python_code
stringlengths 0
1.8M
| repo_name
stringclasses 7
values | file_path
stringlengths 5
99
|
---|---|---|
// SPDX-License-Identifier: GPL-2.0-or-later
/*
*
* Copyright (C) 2001, 2002, 2003 Broadcom Corporation
* Copyright (C) 2007 Ralf Baechle <[email protected]>
* Copyright (C) 2007 MIPS Technologies, Inc.
* written by Ralf Baechle <[email protected]>
*/
#undef DEBUG
#include <linux/device.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/sched.h>
#include <linux/vmalloc.h>
#include <linux/fs.h>
#include <linux/errno.h>
#include <linux/wait.h>
#include <asm/io.h>
#include <asm/sibyte/sb1250.h>
#ifdef CONFIG_SIBYTE_BCM1x80
#include <asm/sibyte/bcm1480_regs.h>
#include <asm/sibyte/bcm1480_scd.h>
#include <asm/sibyte/bcm1480_int.h>
#elif defined(CONFIG_SIBYTE_SB1250) || defined(CONFIG_SIBYTE_BCM112X)
#include <asm/sibyte/sb1250_regs.h>
#include <asm/sibyte/sb1250_scd.h>
#include <asm/sibyte/sb1250_int.h>
#else
#error invalid SiByte UART configuration
#endif
#ifdef CONFIG_SIBYTE_BCM1x80
#undef K_INT_TRACE_FREEZE
#define K_INT_TRACE_FREEZE K_BCM1480_INT_TRACE_FREEZE
#undef K_INT_PERF_CNT
#define K_INT_PERF_CNT K_BCM1480_INT_PERF_CNT
#endif
#include <linux/uaccess.h>
#define SBPROF_TB_MAJOR 240
typedef u64 tb_sample_t[6*256];
enum open_status {
SB_CLOSED,
SB_OPENING,
SB_OPEN
};
struct sbprof_tb {
wait_queue_head_t tb_sync;
wait_queue_head_t tb_read;
struct mutex lock;
enum open_status open;
tb_sample_t *sbprof_tbbuf;
int next_tb_sample;
volatile int tb_enable;
volatile int tb_armed;
};
static struct sbprof_tb sbp;
#define MAX_SAMPLE_BYTES (24*1024*1024)
#define MAX_TBSAMPLE_BYTES (12*1024*1024)
#define MAX_SAMPLES (MAX_SAMPLE_BYTES/sizeof(u_int32_t))
#define TB_SAMPLE_SIZE (sizeof(tb_sample_t))
#define MAX_TB_SAMPLES (MAX_TBSAMPLE_BYTES/TB_SAMPLE_SIZE)
/* ioctls */
#define SBPROF_ZBSTART _IOW('s', 0, int)
#define SBPROF_ZBSTOP _IOW('s', 1, int)
#define SBPROF_ZBWAITFULL _IOW('s', 2, int)
/*
* Routines for using 40-bit SCD cycle counter
*
* Client responsible for either handling interrupts or making sure
* the cycles counter never saturates, e.g., by doing
* zclk_timer_init(0) at least every 2^40 - 1 ZCLKs.
*/
/*
* Configures SCD counter 0 to count ZCLKs starting from val;
* Configures SCD counters1,2,3 to count nothing.
* Must not be called while gathering ZBbus profiles.
*/
#define zclk_timer_init(val) \
__asm__ __volatile__ (".set push;" \
".set mips64;" \
"la $8, 0xb00204c0;" /* SCD perf_cnt_cfg */ \
"sd %0, 0x10($8);" /* write val to counter0 */ \
"sd %1, 0($8);" /* config counter0 for zclks*/ \
".set pop" \
: /* no outputs */ \
/* enable, counter0 */ \
: /* inputs */ "r"(val), "r" ((1ULL << 33) | 1ULL) \
: /* modifies */ "$8" )
/* Reads SCD counter 0 and puts result in value
unsigned long long val; */
#define zclk_get(val) \
__asm__ __volatile__ (".set push;" \
".set mips64;" \
"la $8, 0xb00204c0;" /* SCD perf_cnt_cfg */ \
"ld %0, 0x10($8);" /* write val to counter0 */ \
".set pop" \
: /* outputs */ "=r"(val) \
: /* inputs */ \
: /* modifies */ "$8" )
#define DEVNAME "sb_tbprof"
#define TB_FULL (sbp.next_tb_sample == MAX_TB_SAMPLES)
/*
* Support for ZBbus sampling using the trace buffer
*
* We use the SCD performance counter interrupt, caused by a Zclk counter
* overflow, to trigger the start of tracing.
*
* We set the trace buffer to sample everything and freeze on
* overflow.
*
* We map the interrupt for trace_buffer_freeze to handle it on CPU 0.
*
*/
static u64 tb_period;
static void arm_tb(void)
{
u64 scdperfcnt;
u64 next = (1ULL << 40) - tb_period;
u64 tb_options = M_SCD_TRACE_CFG_FREEZE_FULL;
/*
* Generate an SCD_PERFCNT interrupt in TB_PERIOD Zclks to
* trigger start of trace. XXX vary sampling period
*/
__raw_writeq(0, IOADDR(A_SCD_PERF_CNT_1));
scdperfcnt = __raw_readq(IOADDR(A_SCD_PERF_CNT_CFG));
/*
* Unfortunately, in Pass 2 we must clear all counters to knock down
* a previous interrupt request. This means that bus profiling
* requires ALL of the SCD perf counters.
*/
#ifdef CONFIG_SIBYTE_BCM1x80
__raw_writeq((scdperfcnt & ~M_SPC_CFG_SRC1) |
/* keep counters 0,2,3,4,5,6,7 as is */
V_SPC_CFG_SRC1(1), /* counter 1 counts cycles */
IOADDR(A_BCM1480_SCD_PERF_CNT_CFG0));
__raw_writeq(
M_SPC_CFG_ENABLE | /* enable counting */
M_SPC_CFG_CLEAR | /* clear all counters */
V_SPC_CFG_SRC1(1), /* counter 1 counts cycles */
IOADDR(A_BCM1480_SCD_PERF_CNT_CFG1));
#else
__raw_writeq((scdperfcnt & ~M_SPC_CFG_SRC1) |
/* keep counters 0,2,3 as is */
M_SPC_CFG_ENABLE | /* enable counting */
M_SPC_CFG_CLEAR | /* clear all counters */
V_SPC_CFG_SRC1(1), /* counter 1 counts cycles */
IOADDR(A_SCD_PERF_CNT_CFG));
#endif
__raw_writeq(next, IOADDR(A_SCD_PERF_CNT_1));
/* Reset the trace buffer */
__raw_writeq(M_SCD_TRACE_CFG_RESET, IOADDR(A_SCD_TRACE_CFG));
#if 0 && defined(M_SCD_TRACE_CFG_FORCECNT)
/* XXXKW may want to expose control to the data-collector */
tb_options |= M_SCD_TRACE_CFG_FORCECNT;
#endif
__raw_writeq(tb_options, IOADDR(A_SCD_TRACE_CFG));
sbp.tb_armed = 1;
}
static irqreturn_t sbprof_tb_intr(int irq, void *dev_id)
{
int i;
pr_debug(DEVNAME ": tb_intr\n");
if (sbp.next_tb_sample < MAX_TB_SAMPLES) {
/* XXX should use XKPHYS to make writes bypass L2 */
u64 *p = sbp.sbprof_tbbuf[sbp.next_tb_sample++];
/* Read out trace */
__raw_writeq(M_SCD_TRACE_CFG_START_READ,
IOADDR(A_SCD_TRACE_CFG));
__asm__ __volatile__ ("sync" : : : "memory");
/* Loop runs backwards because bundles are read out in reverse order */
for (i = 256 * 6; i > 0; i -= 6) {
/* Subscripts decrease to put bundle in the order */
/* t0 lo, t0 hi, t1 lo, t1 hi, t2 lo, t2 hi */
p[i - 1] = __raw_readq(IOADDR(A_SCD_TRACE_READ));
/* read t2 hi */
p[i - 2] = __raw_readq(IOADDR(A_SCD_TRACE_READ));
/* read t2 lo */
p[i - 3] = __raw_readq(IOADDR(A_SCD_TRACE_READ));
/* read t1 hi */
p[i - 4] = __raw_readq(IOADDR(A_SCD_TRACE_READ));
/* read t1 lo */
p[i - 5] = __raw_readq(IOADDR(A_SCD_TRACE_READ));
/* read t0 hi */
p[i - 6] = __raw_readq(IOADDR(A_SCD_TRACE_READ));
/* read t0 lo */
}
if (!sbp.tb_enable) {
pr_debug(DEVNAME ": tb_intr shutdown\n");
__raw_writeq(M_SCD_TRACE_CFG_RESET,
IOADDR(A_SCD_TRACE_CFG));
sbp.tb_armed = 0;
wake_up_interruptible(&sbp.tb_sync);
} else {
/* knock down current interrupt and get another one later */
arm_tb();
}
} else {
/* No more trace buffer samples */
pr_debug(DEVNAME ": tb_intr full\n");
__raw_writeq(M_SCD_TRACE_CFG_RESET, IOADDR(A_SCD_TRACE_CFG));
sbp.tb_armed = 0;
if (!sbp.tb_enable)
wake_up_interruptible(&sbp.tb_sync);
wake_up_interruptible(&sbp.tb_read);
}
return IRQ_HANDLED;
}
static irqreturn_t sbprof_pc_intr(int irq, void *dev_id)
{
printk(DEVNAME ": unexpected pc_intr");
return IRQ_NONE;
}
/*
* Requires: Already called zclk_timer_init with a value that won't
* saturate 40 bits. No subsequent use of SCD performance counters
* or trace buffer.
*/
static int sbprof_zbprof_start(struct file *filp)
{
u64 scdperfcnt;
int err;
if (xchg(&sbp.tb_enable, 1))
return -EBUSY;
pr_debug(DEVNAME ": starting\n");
sbp.next_tb_sample = 0;
filp->f_pos = 0;
err = request_irq(K_INT_TRACE_FREEZE, sbprof_tb_intr, 0,
DEVNAME " trace freeze", &sbp);
if (err)
return -EBUSY;
/* Make sure there isn't a perf-cnt interrupt waiting */
scdperfcnt = __raw_readq(IOADDR(A_SCD_PERF_CNT_CFG));
/* Disable and clear counters, override SRC_1 */
__raw_writeq((scdperfcnt & ~(M_SPC_CFG_SRC1 | M_SPC_CFG_ENABLE)) |
M_SPC_CFG_ENABLE | M_SPC_CFG_CLEAR | V_SPC_CFG_SRC1(1),
IOADDR(A_SCD_PERF_CNT_CFG));
/*
* We grab this interrupt to prevent others from trying to use
* it, even though we don't want to service the interrupts
* (they only feed into the trace-on-interrupt mechanism)
*/
if (request_irq(K_INT_PERF_CNT, sbprof_pc_intr, 0, DEVNAME " scd perfcnt", &sbp)) {
free_irq(K_INT_TRACE_FREEZE, &sbp);
return -EBUSY;
}
/*
* I need the core to mask these, but the interrupt mapper to
* pass them through. I am exploiting my knowledge that
* cp0_status masks out IP[5]. krw
*/
#ifdef CONFIG_SIBYTE_BCM1x80
__raw_writeq(K_BCM1480_INT_MAP_I3,
IOADDR(A_BCM1480_IMR_REGISTER(0, R_BCM1480_IMR_INTERRUPT_MAP_BASE_L) +
((K_BCM1480_INT_PERF_CNT & 0x3f) << 3)));
#else
__raw_writeq(K_INT_MAP_I3,
IOADDR(A_IMR_REGISTER(0, R_IMR_INTERRUPT_MAP_BASE) +
(K_INT_PERF_CNT << 3)));
#endif
/* Initialize address traps */
__raw_writeq(0, IOADDR(A_ADDR_TRAP_UP_0));
__raw_writeq(0, IOADDR(A_ADDR_TRAP_UP_1));
__raw_writeq(0, IOADDR(A_ADDR_TRAP_UP_2));
__raw_writeq(0, IOADDR(A_ADDR_TRAP_UP_3));
__raw_writeq(0, IOADDR(A_ADDR_TRAP_DOWN_0));
__raw_writeq(0, IOADDR(A_ADDR_TRAP_DOWN_1));
__raw_writeq(0, IOADDR(A_ADDR_TRAP_DOWN_2));
__raw_writeq(0, IOADDR(A_ADDR_TRAP_DOWN_3));
__raw_writeq(0, IOADDR(A_ADDR_TRAP_CFG_0));
__raw_writeq(0, IOADDR(A_ADDR_TRAP_CFG_1));
__raw_writeq(0, IOADDR(A_ADDR_TRAP_CFG_2));
__raw_writeq(0, IOADDR(A_ADDR_TRAP_CFG_3));
/* Initialize Trace Event 0-7 */
/* when interrupt */
__raw_writeq(M_SCD_TREVT_INTERRUPT, IOADDR(A_SCD_TRACE_EVENT_0));
__raw_writeq(0, IOADDR(A_SCD_TRACE_EVENT_1));
__raw_writeq(0, IOADDR(A_SCD_TRACE_EVENT_2));
__raw_writeq(0, IOADDR(A_SCD_TRACE_EVENT_3));
__raw_writeq(0, IOADDR(A_SCD_TRACE_EVENT_4));
__raw_writeq(0, IOADDR(A_SCD_TRACE_EVENT_5));
__raw_writeq(0, IOADDR(A_SCD_TRACE_EVENT_6));
__raw_writeq(0, IOADDR(A_SCD_TRACE_EVENT_7));
/* Initialize Trace Sequence 0-7 */
/* Start on event 0 (interrupt) */
__raw_writeq(V_SCD_TRSEQ_FUNC_START | 0x0fff,
IOADDR(A_SCD_TRACE_SEQUENCE_0));
/* dsamp when d used | asamp when a used */
__raw_writeq(M_SCD_TRSEQ_ASAMPLE | M_SCD_TRSEQ_DSAMPLE |
K_SCD_TRSEQ_TRIGGER_ALL,
IOADDR(A_SCD_TRACE_SEQUENCE_1));
__raw_writeq(0, IOADDR(A_SCD_TRACE_SEQUENCE_2));
__raw_writeq(0, IOADDR(A_SCD_TRACE_SEQUENCE_3));
__raw_writeq(0, IOADDR(A_SCD_TRACE_SEQUENCE_4));
__raw_writeq(0, IOADDR(A_SCD_TRACE_SEQUENCE_5));
__raw_writeq(0, IOADDR(A_SCD_TRACE_SEQUENCE_6));
__raw_writeq(0, IOADDR(A_SCD_TRACE_SEQUENCE_7));
/* Now indicate the PERF_CNT interrupt as a trace-relevant interrupt */
#ifdef CONFIG_SIBYTE_BCM1x80
__raw_writeq(1ULL << (K_BCM1480_INT_PERF_CNT & 0x3f),
IOADDR(A_BCM1480_IMR_REGISTER(0, R_BCM1480_IMR_INTERRUPT_TRACE_L)));
#else
__raw_writeq(1ULL << K_INT_PERF_CNT,
IOADDR(A_IMR_REGISTER(0, R_IMR_INTERRUPT_TRACE)));
#endif
arm_tb();
pr_debug(DEVNAME ": done starting\n");
return 0;
}
static int sbprof_zbprof_stop(void)
{
int err = 0;
pr_debug(DEVNAME ": stopping\n");
if (sbp.tb_enable) {
/*
* XXXKW there is a window here where the intr handler may run,
* see the disable, and do the wake_up before this sleep
* happens.
*/
pr_debug(DEVNAME ": wait for disarm\n");
err = wait_event_interruptible(sbp.tb_sync, !sbp.tb_armed);
pr_debug(DEVNAME ": disarm complete, stat %d\n", err);
if (err)
return err;
sbp.tb_enable = 0;
free_irq(K_INT_TRACE_FREEZE, &sbp);
free_irq(K_INT_PERF_CNT, &sbp);
}
pr_debug(DEVNAME ": done stopping\n");
return err;
}
static int sbprof_tb_open(struct inode *inode, struct file *filp)
{
int minor;
minor = iminor(inode);
if (minor != 0)
return -ENODEV;
if (xchg(&sbp.open, SB_OPENING) != SB_CLOSED)
return -EBUSY;
memset(&sbp, 0, sizeof(struct sbprof_tb));
sbp.sbprof_tbbuf = vzalloc(MAX_TBSAMPLE_BYTES);
if (!sbp.sbprof_tbbuf) {
sbp.open = SB_CLOSED;
wmb();
return -ENOMEM;
}
init_waitqueue_head(&sbp.tb_sync);
init_waitqueue_head(&sbp.tb_read);
mutex_init(&sbp.lock);
sbp.open = SB_OPEN;
wmb();
return 0;
}
static int sbprof_tb_release(struct inode *inode, struct file *filp)
{
int minor;
minor = iminor(inode);
if (minor != 0 || sbp.open != SB_CLOSED)
return -ENODEV;
mutex_lock(&sbp.lock);
if (sbp.tb_armed || sbp.tb_enable)
sbprof_zbprof_stop();
vfree(sbp.sbprof_tbbuf);
sbp.open = SB_CLOSED;
wmb();
mutex_unlock(&sbp.lock);
return 0;
}
static ssize_t sbprof_tb_read(struct file *filp, char __user *buf,
size_t size, loff_t *offp)
{
int cur_sample, sample_off, cur_count, sample_left;
char *src;
int count = 0;
char __user *dest = buf;
long cur_off = *offp;
if (!access_ok(buf, size))
return -EFAULT;
mutex_lock(&sbp.lock);
count = 0;
cur_sample = cur_off / TB_SAMPLE_SIZE;
sample_off = cur_off % TB_SAMPLE_SIZE;
sample_left = TB_SAMPLE_SIZE - sample_off;
while (size && (cur_sample < sbp.next_tb_sample)) {
int err;
cur_count = size < sample_left ? size : sample_left;
src = (char *)(((long)sbp.sbprof_tbbuf[cur_sample])+sample_off);
err = __copy_to_user(dest, src, cur_count);
if (err) {
*offp = cur_off + cur_count - err;
mutex_unlock(&sbp.lock);
return err;
}
pr_debug(DEVNAME ": read from sample %d, %d bytes\n",
cur_sample, cur_count);
size -= cur_count;
sample_left -= cur_count;
if (!sample_left) {
cur_sample++;
sample_off = 0;
sample_left = TB_SAMPLE_SIZE;
} else {
sample_off += cur_count;
}
cur_off += cur_count;
dest += cur_count;
count += cur_count;
}
*offp = cur_off;
mutex_unlock(&sbp.lock);
return count;
}
static long sbprof_tb_ioctl(struct file *filp,
unsigned int command,
unsigned long arg)
{
int err = 0;
switch (command) {
case SBPROF_ZBSTART:
mutex_lock(&sbp.lock);
err = sbprof_zbprof_start(filp);
mutex_unlock(&sbp.lock);
break;
case SBPROF_ZBSTOP:
mutex_lock(&sbp.lock);
err = sbprof_zbprof_stop();
mutex_unlock(&sbp.lock);
break;
case SBPROF_ZBWAITFULL: {
err = wait_event_interruptible(sbp.tb_read, TB_FULL);
if (err)
break;
err = put_user(TB_FULL, (int __user *) arg);
break;
}
default:
err = -EINVAL;
break;
}
return err;
}
static const struct file_operations sbprof_tb_fops = {
.owner = THIS_MODULE,
.open = sbprof_tb_open,
.release = sbprof_tb_release,
.read = sbprof_tb_read,
.unlocked_ioctl = sbprof_tb_ioctl,
.compat_ioctl = sbprof_tb_ioctl,
.mmap = NULL,
.llseek = default_llseek,
};
static struct class *tb_class;
static struct device *tb_dev;
static int __init sbprof_tb_init(void)
{
struct device *dev;
struct class *tbc;
int err;
if (register_chrdev(SBPROF_TB_MAJOR, DEVNAME, &sbprof_tb_fops)) {
printk(KERN_WARNING DEVNAME ": initialization failed (dev %d)\n",
SBPROF_TB_MAJOR);
return -EIO;
}
tbc = class_create("sb_tracebuffer");
if (IS_ERR(tbc)) {
err = PTR_ERR(tbc);
goto out_chrdev;
}
tb_class = tbc;
dev = device_create(tbc, NULL, MKDEV(SBPROF_TB_MAJOR, 0), NULL, "tb");
if (IS_ERR(dev)) {
err = PTR_ERR(dev);
goto out_class;
}
tb_dev = dev;
sbp.open = SB_CLOSED;
wmb();
tb_period = zbbus_mhz * 10000LL;
pr_info(DEVNAME ": initialized - tb_period = %lld\n",
(long long) tb_period);
return 0;
out_class:
class_destroy(tb_class);
out_chrdev:
unregister_chrdev(SBPROF_TB_MAJOR, DEVNAME);
return err;
}
static void __exit sbprof_tb_cleanup(void)
{
device_destroy(tb_class, MKDEV(SBPROF_TB_MAJOR, 0));
unregister_chrdev(SBPROF_TB_MAJOR, DEVNAME);
class_destroy(tb_class);
}
module_init(sbprof_tb_init);
module_exit(sbprof_tb_cleanup);
MODULE_ALIAS_CHARDEV_MAJOR(SBPROF_TB_MAJOR);
MODULE_AUTHOR("Ralf Baechle <[email protected]>");
MODULE_LICENSE("GPL");
| linux-master | arch/mips/sibyte/common/sb_tbprof.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2002,2003 Broadcom Corporation
*/
/*
* The Bus Watcher monitors internal bus transactions and maintains
* counts of transactions with error status, logging details and
* causing one of several interrupts. This driver provides a handler
* for those interrupts which aggregates the counts (to avoid
* saturating the 8-bit counters) and provides a presence in
* /proc/bus_watcher if PROC_FS is on.
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/interrupt.h>
#include <linux/sched.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <asm/io.h>
#include <asm/sibyte/sb1250.h>
#include <asm/sibyte/sb1250_regs.h>
#include <asm/sibyte/sb1250_int.h>
#include <asm/sibyte/sb1250_scd.h>
#ifdef CONFIG_SIBYTE_BCM1x80
#include <asm/sibyte/bcm1480_regs.h>
#endif
struct bw_stats_struct {
uint64_t status;
uint32_t l2_err;
uint32_t memio_err;
int status_printed;
unsigned long l2_cor_d;
unsigned long l2_bad_d;
unsigned long l2_cor_t;
unsigned long l2_bad_t;
unsigned long mem_cor_d;
unsigned long mem_bad_d;
unsigned long bus_error;
} bw_stats;
static void print_summary(uint32_t status, uint32_t l2_err,
uint32_t memio_err)
{
printk("Bus watcher error counters: %08x %08x\n", l2_err, memio_err);
printk("\nLast recorded signature:\n");
printk("Request %02x from %d, answered by %d with Dcode %d\n",
(unsigned int)(G_SCD_BERR_TID(status) & 0x3f),
(int)(G_SCD_BERR_TID(status) >> 6),
(int)G_SCD_BERR_RID(status),
(int)G_SCD_BERR_DCODE(status));
}
/*
* check_bus_watcher is exported for use in situations where we want
* to see the most recent status of the bus watcher, which might have
* already been destructively read out of the registers.
*
* notes: this is currently used by the cache error handler
* should provide locking against the interrupt handler
*/
void check_bus_watcher(void)
{
u32 status, l2_err, memio_err;
#if defined(CONFIG_SIBYTE_BCM112X) || defined(CONFIG_SIBYTE_SB1250)
/* Use non-destructive register */
status = csr_in32(IOADDR(A_SCD_BUS_ERR_STATUS_DEBUG));
#elif defined(CONFIG_SIBYTE_BCM1x80)
/* Use non-destructive register */
/* Same as 1250 except BUS_ERR_STATUS_DEBUG is in a different place. */
status = csr_in32(IOADDR(A_BCM1480_BUS_ERR_STATUS_DEBUG));
#else
#error bus watcher being built for unknown Sibyte SOC!
#endif
if (!(status & 0x7fffffff)) {
printk("Using last values reaped by bus watcher driver\n");
status = bw_stats.status;
l2_err = bw_stats.l2_err;
memio_err = bw_stats.memio_err;
} else {
l2_err = csr_in32(IOADDR(A_BUS_L2_ERRORS));
memio_err = csr_in32(IOADDR(A_BUS_MEM_IO_ERRORS));
}
if (status & ~(1UL << 31))
print_summary(status, l2_err, memio_err);
else
printk("Bus watcher indicates no error\n");
}
#ifdef CONFIG_PROC_FS
/* For simplicity, I want to assume a single read is required each
time */
static int bw_proc_show(struct seq_file *m, void *v)
{
struct bw_stats_struct *stats = m->private;
seq_puts(m, "SiByte Bus Watcher statistics\n");
seq_puts(m, "-----------------------------\n");
seq_printf(m, "L2-d-cor %8ld\nL2-d-bad %8ld\n",
stats->l2_cor_d, stats->l2_bad_d);
seq_printf(m, "L2-t-cor %8ld\nL2-t-bad %8ld\n",
stats->l2_cor_t, stats->l2_bad_t);
seq_printf(m, "MC-d-cor %8ld\nMC-d-bad %8ld\n",
stats->mem_cor_d, stats->mem_bad_d);
seq_printf(m, "IO-err %8ld\n", stats->bus_error);
seq_puts(m, "\nLast recorded signature:\n");
seq_printf(m, "Request %02x from %d, answered by %d with Dcode %d\n",
(unsigned int)(G_SCD_BERR_TID(stats->status) & 0x3f),
(int)(G_SCD_BERR_TID(stats->status) >> 6),
(int)G_SCD_BERR_RID(stats->status),
(int)G_SCD_BERR_DCODE(stats->status));
/* XXXKW indicate multiple errors between printings, or stats
collection (or both)? */
if (stats->status & M_SCD_BERR_MULTERRS)
seq_puts(m, "Multiple errors observed since last check.\n");
if (stats->status_printed) {
seq_puts(m, "(no change since last printing)\n");
} else {
stats->status_printed = 1;
}
return 0;
}
static void create_proc_decoder(struct bw_stats_struct *stats)
{
struct proc_dir_entry *ent;
ent = proc_create_single_data("bus_watcher", S_IWUSR | S_IRUGO, NULL,
bw_proc_show, stats);
if (!ent) {
printk(KERN_INFO "Unable to initialize bus_watcher /proc entry\n");
return;
}
}
#endif /* CONFIG_PROC_FS */
/*
* sibyte_bw_int - handle bus watcher interrupts and accumulate counts
*
* notes: possible re-entry due to multiple sources
* should check/indicate saturation
*/
static irqreturn_t sibyte_bw_int(int irq, void *data)
{
struct bw_stats_struct *stats = data;
unsigned long cntr;
#ifdef CONFIG_SIBYTE_BW_TRACE
int i;
#endif
#ifdef CONFIG_SIBYTE_BW_TRACE
csr_out32(M_SCD_TRACE_CFG_FREEZE, IOADDR(A_SCD_TRACE_CFG));
csr_out32(M_SCD_TRACE_CFG_START_READ, IOADDR(A_SCD_TRACE_CFG));
for (i=0; i<256*6; i++)
printk("%016llx\n",
(long long)__raw_readq(IOADDR(A_SCD_TRACE_READ)));
csr_out32(M_SCD_TRACE_CFG_RESET, IOADDR(A_SCD_TRACE_CFG));
csr_out32(M_SCD_TRACE_CFG_START, IOADDR(A_SCD_TRACE_CFG));
#endif
/* Destructive read, clears register and interrupt */
stats->status = csr_in32(IOADDR(A_SCD_BUS_ERR_STATUS));
stats->status_printed = 0;
stats->l2_err = cntr = csr_in32(IOADDR(A_BUS_L2_ERRORS));
stats->l2_cor_d += G_SCD_L2ECC_CORR_D(cntr);
stats->l2_bad_d += G_SCD_L2ECC_BAD_D(cntr);
stats->l2_cor_t += G_SCD_L2ECC_CORR_T(cntr);
stats->l2_bad_t += G_SCD_L2ECC_BAD_T(cntr);
csr_out32(0, IOADDR(A_BUS_L2_ERRORS));
stats->memio_err = cntr = csr_in32(IOADDR(A_BUS_MEM_IO_ERRORS));
stats->mem_cor_d += G_SCD_MEM_ECC_CORR(cntr);
stats->mem_bad_d += G_SCD_MEM_ECC_BAD(cntr);
stats->bus_error += G_SCD_MEM_BUSERR(cntr);
csr_out32(0, IOADDR(A_BUS_MEM_IO_ERRORS));
return IRQ_HANDLED;
}
int __init sibyte_bus_watcher(void)
{
memset(&bw_stats, 0, sizeof(struct bw_stats_struct));
bw_stats.status_printed = 1;
if (request_irq(K_INT_BAD_ECC, sibyte_bw_int, 0, "Bus watcher", &bw_stats)) {
printk("Failed to register bus watcher BAD_ECC irq\n");
return -1;
}
if (request_irq(K_INT_COR_ECC, sibyte_bw_int, 0, "Bus watcher", &bw_stats)) {
free_irq(K_INT_BAD_ECC, &bw_stats);
printk("Failed to register bus watcher COR_ECC irq\n");
return -1;
}
if (request_irq(K_INT_IO_BUS, sibyte_bw_int, 0, "Bus watcher", &bw_stats)) {
free_irq(K_INT_BAD_ECC, &bw_stats);
free_irq(K_INT_COR_ECC, &bw_stats);
printk("Failed to register bus watcher IO_BUS irq\n");
return -1;
}
#ifdef CONFIG_PROC_FS
create_proc_decoder(&bw_stats);
#endif
#ifdef CONFIG_SIBYTE_BW_TRACE
csr_out32((M_SCD_TRSEQ_ASAMPLE | M_SCD_TRSEQ_DSAMPLE |
K_SCD_TRSEQ_TRIGGER_ALL),
IOADDR(A_SCD_TRACE_SEQUENCE_0));
csr_out32(M_SCD_TRACE_CFG_RESET, IOADDR(A_SCD_TRACE_CFG));
csr_out32(M_SCD_TRACE_CFG_START, IOADDR(A_SCD_TRACE_CFG));
#endif
return 0;
}
device_initcall(sibyte_bus_watcher);
| linux-master | arch/mips/sibyte/common/bus_watcher.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* DMA support for Broadcom SiByte platforms.
*
* Copyright (c) 2018 Maciej W. Rozycki
*/
#include <linux/swiotlb.h>
#include <asm/bootinfo.h>
void __init plat_swiotlb_setup(void)
{
swiotlb_init(true, SWIOTLB_VERBOSE);
}
| linux-master | arch/mips/sibyte/common/dma.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/console.h>
#include <asm/sibyte/board.h>
#include <asm/fw/cfe/cfe_api.h>
#include <asm/fw/cfe/cfe_error.h>
extern int cfe_cons_handle;
static void cfe_console_write(struct console *cons, const char *str,
unsigned int count)
{
int i, last, written;
for (i=0, last=0; i<count; i++) {
if (!str[i])
/* XXXKW can/should this ever happen? */
return;
if (str[i] == '\n') {
do {
written = cfe_write(cfe_cons_handle, &str[last], i-last);
if (written < 0)
;
last += written;
} while (last < i);
while (cfe_write(cfe_cons_handle, "\r", 1) <= 0)
;
}
}
if (last != count) {
do {
written = cfe_write(cfe_cons_handle, &str[last], count-last);
if (written < 0)
;
last += written;
} while (last < count);
}
}
static int cfe_console_setup(struct console *cons, char *str)
{
char consdev[32];
/* XXXKW think about interaction with 'console=' cmdline arg */
/* If none of the console options are configured, the build will break. */
if (cfe_getenv("BOOT_CONSOLE", consdev, 32) >= 0) {
#ifdef CONFIG_SERIAL_SB1250_DUART
if (!strcmp(consdev, "uart0")) {
setleds("u0cn");
} else if (!strcmp(consdev, "uart1")) {
setleds("u1cn");
} else
#endif
#ifdef CONFIG_VGA_CONSOLE
if (!strcmp(consdev, "pcconsole0")) {
setleds("pccn");
} else
#endif
return -ENODEV;
}
return 0;
}
static struct console sb1250_cfe_cons = {
.name = "cfe",
.write = cfe_console_write,
.setup = cfe_console_setup,
.flags = CON_PRINTBUFFER,
.index = -1,
};
static int __init sb1250_cfe_console_init(void)
{
register_console(&sb1250_cfe_cons);
return 0;
}
console_initcall(sb1250_cfe_console_init);
| linux-master | arch/mips/sibyte/common/cfe_console.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2000, 2001, 2002, 2003 Broadcom Corporation
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/linkage.h>
#include <linux/mm.h>
#include <linux/memblock.h>
#include <linux/pm.h>
#include <linux/smp.h>
#include <asm/bootinfo.h>
#include <asm/reboot.h>
#include <asm/setup.h>
#include <asm/sibyte/board.h>
#include <asm/smp-ops.h>
#include <asm/fw/cfe/cfe_api.h>
#include <asm/fw/cfe/cfe_error.h>
/* Max ram addressable in 32-bit segments */
#ifdef CONFIG_64BIT
#define MAX_RAM_SIZE (~0ULL)
#else
#ifdef CONFIG_HIGHMEM
#ifdef CONFIG_PHYS_ADDR_T_64BIT
#define MAX_RAM_SIZE (~0ULL)
#else
#define MAX_RAM_SIZE (0xffffffffULL)
#endif
#else
#define MAX_RAM_SIZE (0x1fffffffULL)
#endif
#endif
int cfe_cons_handle;
#ifdef CONFIG_BLK_DEV_INITRD
extern unsigned long initrd_start, initrd_end;
#endif
static void __noreturn cfe_linux_exit(void *arg)
{
int warm = *(int *)arg;
if (smp_processor_id()) {
static int reboot_smp;
/* Don't repeat the process from another CPU */
if (!reboot_smp) {
/* Get CPU 0 to do the cfe_exit */
reboot_smp = 1;
smp_call_function(cfe_linux_exit, arg, 0);
}
} else {
printk("Passing control back to CFE...\n");
cfe_exit(warm, 0);
printk("cfe_exit returned??\n");
}
while (1);
}
static void __noreturn cfe_linux_restart(char *command)
{
static const int zero;
cfe_linux_exit((void *)&zero);
}
static void __noreturn cfe_linux_halt(void)
{
static const int one = 1;
cfe_linux_exit((void *)&one);
}
static __init void prom_meminit(void)
{
u64 addr, size, type; /* regardless of PHYS_ADDR_T_64BIT */
int mem_flags = 0;
unsigned int idx;
int rd_flag;
#ifdef CONFIG_BLK_DEV_INITRD
unsigned long initrd_pstart;
unsigned long initrd_pend;
initrd_pstart = CPHYSADDR(initrd_start);
initrd_pend = CPHYSADDR(initrd_end);
if (initrd_start &&
((initrd_pstart > MAX_RAM_SIZE)
|| (initrd_pend > MAX_RAM_SIZE))) {
panic("initrd out of addressable memory");
}
#endif /* INITRD */
for (idx = 0; cfe_enummem(idx, mem_flags, &addr, &size, &type) != CFE_ERR_NOMORE;
idx++) {
rd_flag = 0;
if (type == CFE_MI_AVAILABLE) {
/*
* See if this block contains (any portion of) the
* ramdisk
*/
#ifdef CONFIG_BLK_DEV_INITRD
if (initrd_start) {
if ((initrd_pstart > addr) &&
(initrd_pstart < (addr + size))) {
memblock_add(addr,
initrd_pstart - addr);
rd_flag = 1;
}
if ((initrd_pend > addr) &&
(initrd_pend < (addr + size))) {
memblock_add(initrd_pend,
(addr + size) - initrd_pend);
rd_flag = 1;
}
}
#endif
if (!rd_flag) {
if (addr > MAX_RAM_SIZE)
continue;
if (addr+size > MAX_RAM_SIZE)
size = MAX_RAM_SIZE - (addr+size) + 1;
/*
* memcpy/__copy_user prefetch, which
* will cause a bus error for
* KSEG/KUSEG addrs not backed by RAM.
* Hence, reserve some padding for the
* prefetch distance.
*/
if (size > 512)
size -= 512;
memblock_add(addr, size);
}
}
}
#ifdef CONFIG_BLK_DEV_INITRD
if (initrd_start) {
memblock_add(initrd_pstart, initrd_pend - initrd_pstart);
memblock_reserve(initrd_pstart, initrd_pend - initrd_pstart);
}
#endif
}
#ifdef CONFIG_BLK_DEV_INITRD
static int __init initrd_setup(char *str)
{
char rdarg[64];
int idx;
char *tmp, *endptr;
unsigned long initrd_size;
/* Make a copy of the initrd argument so we can smash it up here */
for (idx = 0; idx < sizeof(rdarg)-1; idx++) {
if (!str[idx] || (str[idx] == ' ')) break;
rdarg[idx] = str[idx];
}
rdarg[idx] = 0;
str = rdarg;
/*
*Initrd location comes in the form "<hex size of ramdisk in bytes>@<location in memory>"
* e.g. initrd=3abfd@80010000. This is set up by the loader.
*/
for (tmp = str; *tmp != '@'; tmp++) {
if (!*tmp) {
goto fail;
}
}
*tmp = 0;
tmp++;
if (!*tmp) {
goto fail;
}
initrd_size = simple_strtoul(str, &endptr, 16);
if (*endptr) {
*(tmp-1) = '@';
goto fail;
}
*(tmp-1) = '@';
initrd_start = simple_strtoul(tmp, &endptr, 16);
if (*endptr) {
goto fail;
}
initrd_end = initrd_start + initrd_size;
printk("Found initrd of %lx@%lx\n", initrd_size, initrd_start);
return 1;
fail:
printk("Bad initrd argument. Disabling initrd\n");
initrd_start = 0;
initrd_end = 0;
return 1;
}
#endif
extern const struct plat_smp_ops sb_smp_ops;
extern const struct plat_smp_ops bcm1480_smp_ops;
/*
* prom_init is called just after the cpu type is determined, from setup_arch()
*/
void __init prom_init(void)
{
uint64_t cfe_ept, cfe_handle;
unsigned int cfe_eptseal;
int argc = fw_arg0;
char **envp = (char **) fw_arg2;
int *prom_vec = (int *) fw_arg3;
_machine_restart = cfe_linux_restart;
_machine_halt = cfe_linux_halt;
pm_power_off = cfe_linux_halt;
/*
* Check if a loader was used; if NOT, the 4 arguments are
* what CFE gives us (handle, 0, EPT and EPTSEAL)
*/
if (argc < 0) {
cfe_handle = (uint64_t)(long)argc;
cfe_ept = (long)envp;
cfe_eptseal = (uint32_t)(unsigned long)prom_vec;
} else {
if ((int32_t)(long)prom_vec < 0) {
/*
* Old loader; all it gives us is the handle,
* so use the "known" entrypoint and assume
* the seal.
*/
cfe_handle = (uint64_t)(long)prom_vec;
cfe_ept = (uint64_t)((int32_t)0x9fc00500);
cfe_eptseal = CFE_EPTSEAL;
} else {
/*
* Newer loaders bundle the handle/ept/eptseal
* Note: prom_vec is in the loader's useg
* which is still alive in the TLB.
*/
cfe_handle = (uint64_t)((int32_t *)prom_vec)[0];
cfe_ept = (uint64_t)((int32_t *)prom_vec)[2];
cfe_eptseal = (unsigned int)((uint32_t *)prom_vec)[3];
}
}
if (cfe_eptseal != CFE_EPTSEAL) {
/* too early for panic to do any good */
printk("CFE's entrypoint seal doesn't match. Spinning.");
while (1) ;
}
cfe_init(cfe_handle, cfe_ept);
/*
* Get the handle for (at least) prom_putchar, possibly for
* boot console
*/
cfe_cons_handle = cfe_getstdhandle(CFE_STDHANDLE_CONSOLE);
if (cfe_getenv("LINUX_CMDLINE", arcs_cmdline, COMMAND_LINE_SIZE) < 0) {
if (argc >= 0) {
/* The loader should have set the command line */
/* too early for panic to do any good */
printk("LINUX_CMDLINE not defined in cfe.");
while (1) ;
}
}
#ifdef CONFIG_BLK_DEV_INITRD
{
char *ptr;
/* Need to find out early whether we've got an initrd. So scan
the list looking now */
for (ptr = arcs_cmdline; *ptr; ptr++) {
while (*ptr == ' ') {
ptr++;
}
if (!strncmp(ptr, "initrd=", 7)) {
initrd_setup(ptr+7);
break;
} else {
while (*ptr && (*ptr != ' ')) {
ptr++;
}
}
}
}
#endif /* CONFIG_BLK_DEV_INITRD */
/* Not sure this is needed, but it's the safe way. */
arcs_cmdline[COMMAND_LINE_SIZE-1] = 0;
prom_meminit();
#if defined(CONFIG_SIBYTE_BCM112X) || defined(CONFIG_SIBYTE_SB1250)
register_smp_ops(&sb_smp_ops);
#endif
#ifdef CONFIG_SIBYTE_BCM1x80
register_smp_ops(&bcm1480_smp_ops);
#endif
}
void prom_putchar(char c)
{
int ret;
while ((ret = cfe_write(cfe_cons_handle, &c, 1)) == 0)
;
}
| linux-master | arch/mips/sibyte/common/cfe.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2000, 2001, 2002, 2003 Broadcom Corporation
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/linkage.h>
#include <linux/interrupt.h>
#include <linux/spinlock.h>
#include <linux/smp.h>
#include <linux/mm.h>
#include <linux/kernel_stat.h>
#include <asm/errno.h>
#include <asm/signal.h>
#include <asm/time.h>
#include <asm/io.h>
#include <asm/sibyte/sb1250_regs.h>
#include <asm/sibyte/sb1250_int.h>
#include <asm/sibyte/sb1250_uart.h>
#include <asm/sibyte/sb1250_scd.h>
#include <asm/sibyte/sb1250.h>
/*
* These are the routines that handle all the low level interrupt stuff.
* Actions handled here are: initialization of the interrupt map, requesting of
* interrupt lines by handlers, dispatching if interrupts to handlers, probing
* for interrupt lines
*/
#ifdef CONFIG_SIBYTE_HAS_LDT
extern unsigned long ldt_eoi_space;
#endif
/* Store the CPU id (not the logical number) */
int sb1250_irq_owner[SB1250_NR_IRQS];
static DEFINE_RAW_SPINLOCK(sb1250_imr_lock);
void sb1250_mask_irq(int cpu, int irq)
{
unsigned long flags;
u64 cur_ints;
raw_spin_lock_irqsave(&sb1250_imr_lock, flags);
cur_ints = ____raw_readq(IOADDR(A_IMR_MAPPER(cpu) +
R_IMR_INTERRUPT_MASK));
cur_ints |= (((u64) 1) << irq);
____raw_writeq(cur_ints, IOADDR(A_IMR_MAPPER(cpu) +
R_IMR_INTERRUPT_MASK));
raw_spin_unlock_irqrestore(&sb1250_imr_lock, flags);
}
void sb1250_unmask_irq(int cpu, int irq)
{
unsigned long flags;
u64 cur_ints;
raw_spin_lock_irqsave(&sb1250_imr_lock, flags);
cur_ints = ____raw_readq(IOADDR(A_IMR_MAPPER(cpu) +
R_IMR_INTERRUPT_MASK));
cur_ints &= ~(((u64) 1) << irq);
____raw_writeq(cur_ints, IOADDR(A_IMR_MAPPER(cpu) +
R_IMR_INTERRUPT_MASK));
raw_spin_unlock_irqrestore(&sb1250_imr_lock, flags);
}
#ifdef CONFIG_SMP
static int sb1250_set_affinity(struct irq_data *d, const struct cpumask *mask,
bool force)
{
int i = 0, old_cpu, cpu, int_on;
unsigned int irq = d->irq;
u64 cur_ints;
unsigned long flags;
i = cpumask_first_and(mask, cpu_online_mask);
/* Convert logical CPU to physical CPU */
cpu = cpu_logical_map(i);
/* Protect against other affinity changers and IMR manipulation */
raw_spin_lock_irqsave(&sb1250_imr_lock, flags);
/* Swizzle each CPU's IMR (but leave the IP selection alone) */
old_cpu = sb1250_irq_owner[irq];
cur_ints = ____raw_readq(IOADDR(A_IMR_MAPPER(old_cpu) +
R_IMR_INTERRUPT_MASK));
int_on = !(cur_ints & (((u64) 1) << irq));
if (int_on) {
/* If it was on, mask it */
cur_ints |= (((u64) 1) << irq);
____raw_writeq(cur_ints, IOADDR(A_IMR_MAPPER(old_cpu) +
R_IMR_INTERRUPT_MASK));
}
sb1250_irq_owner[irq] = cpu;
if (int_on) {
/* unmask for the new CPU */
cur_ints = ____raw_readq(IOADDR(A_IMR_MAPPER(cpu) +
R_IMR_INTERRUPT_MASK));
cur_ints &= ~(((u64) 1) << irq);
____raw_writeq(cur_ints, IOADDR(A_IMR_MAPPER(cpu) +
R_IMR_INTERRUPT_MASK));
}
raw_spin_unlock_irqrestore(&sb1250_imr_lock, flags);
return 0;
}
#endif
static void disable_sb1250_irq(struct irq_data *d)
{
unsigned int irq = d->irq;
sb1250_mask_irq(sb1250_irq_owner[irq], irq);
}
static void enable_sb1250_irq(struct irq_data *d)
{
unsigned int irq = d->irq;
sb1250_unmask_irq(sb1250_irq_owner[irq], irq);
}
static void ack_sb1250_irq(struct irq_data *d)
{
unsigned int irq = d->irq;
#ifdef CONFIG_SIBYTE_HAS_LDT
u64 pending;
/*
* If the interrupt was an HT interrupt, now is the time to
* clear it. NOTE: we assume the HT bridge was set up to
* deliver the interrupts to all CPUs (which makes affinity
* changing easier for us)
*/
pending = __raw_readq(IOADDR(A_IMR_REGISTER(sb1250_irq_owner[irq],
R_IMR_LDT_INTERRUPT)));
pending &= ((u64)1 << (irq));
if (pending) {
int i;
for (i=0; i<NR_CPUS; i++) {
int cpu;
#ifdef CONFIG_SMP
cpu = cpu_logical_map(i);
#else
cpu = i;
#endif
/*
* Clear for all CPUs so an affinity switch
* doesn't find an old status
*/
__raw_writeq(pending,
IOADDR(A_IMR_REGISTER(cpu,
R_IMR_LDT_INTERRUPT_CLR)));
}
/*
* Generate EOI. For Pass 1 parts, EOI is a nop. For
* Pass 2, the LDT world may be edge-triggered, but
* this EOI shouldn't hurt. If they are
* level-sensitive, the EOI is required.
*/
*(uint32_t *)(ldt_eoi_space+(irq<<16)+(7<<2)) = 0;
}
#endif
sb1250_mask_irq(sb1250_irq_owner[irq], irq);
}
static struct irq_chip sb1250_irq_type = {
.name = "SB1250-IMR",
.irq_mask_ack = ack_sb1250_irq,
.irq_unmask = enable_sb1250_irq,
.irq_mask = disable_sb1250_irq,
#ifdef CONFIG_SMP
.irq_set_affinity = sb1250_set_affinity
#endif
};
void __init init_sb1250_irqs(void)
{
int i;
for (i = 0; i < SB1250_NR_IRQS; i++) {
irq_set_chip_and_handler(i, &sb1250_irq_type,
handle_level_irq);
sb1250_irq_owner[i] = 0;
}
}
/*
* arch_init_irq is called early in the boot sequence from init/main.c via
* init_IRQ. It is responsible for setting up the interrupt mapper and
* installing the handler that will be responsible for dispatching interrupts
* to the "right" place.
*/
/*
* For now, map all interrupts to IP[2]. We could save
* some cycles by parceling out system interrupts to different
* IP lines, but keep it simple for bringup. We'll also direct
* all interrupts to a single CPU; we should probably route
* PCI and LDT to one cpu and everything else to the other
* to balance the load a bit.
*
* On the second cpu, everything is set to IP5, which is
* ignored, EXCEPT the mailbox interrupt. That one is
* set to IP[2] so it is handled. This is needed so we
* can do cross-cpu function calls, as required by SMP
*/
#define IMR_IP2_VAL K_INT_MAP_I0
#define IMR_IP3_VAL K_INT_MAP_I1
#define IMR_IP4_VAL K_INT_MAP_I2
#define IMR_IP5_VAL K_INT_MAP_I3
#define IMR_IP6_VAL K_INT_MAP_I4
void __init arch_init_irq(void)
{
unsigned int i;
u64 tmp;
unsigned int imask = STATUSF_IP4 | STATUSF_IP3 | STATUSF_IP2 |
STATUSF_IP1 | STATUSF_IP0;
/* Default everything to IP2 */
for (i = 0; i < SB1250_NR_IRQS; i++) { /* was I0 */
__raw_writeq(IMR_IP2_VAL,
IOADDR(A_IMR_REGISTER(0,
R_IMR_INTERRUPT_MAP_BASE) +
(i << 3)));
__raw_writeq(IMR_IP2_VAL,
IOADDR(A_IMR_REGISTER(1,
R_IMR_INTERRUPT_MAP_BASE) +
(i << 3)));
}
init_sb1250_irqs();
/*
* Map the high 16 bits of the mailbox registers to IP[3], for
* inter-cpu messages
*/
/* Was I1 */
__raw_writeq(IMR_IP3_VAL,
IOADDR(A_IMR_REGISTER(0, R_IMR_INTERRUPT_MAP_BASE) +
(K_INT_MBOX_0 << 3)));
__raw_writeq(IMR_IP3_VAL,
IOADDR(A_IMR_REGISTER(1, R_IMR_INTERRUPT_MAP_BASE) +
(K_INT_MBOX_0 << 3)));
/* Clear the mailboxes. The firmware may leave them dirty */
__raw_writeq(0xffffffffffffffffULL,
IOADDR(A_IMR_REGISTER(0, R_IMR_MAILBOX_CLR_CPU)));
__raw_writeq(0xffffffffffffffffULL,
IOADDR(A_IMR_REGISTER(1, R_IMR_MAILBOX_CLR_CPU)));
/* Mask everything except the mailbox registers for both cpus */
tmp = ~((u64) 0) ^ (((u64) 1) << K_INT_MBOX_0);
__raw_writeq(tmp, IOADDR(A_IMR_REGISTER(0, R_IMR_INTERRUPT_MASK)));
__raw_writeq(tmp, IOADDR(A_IMR_REGISTER(1, R_IMR_INTERRUPT_MASK)));
/* Enable necessary IPs, disable the rest */
change_c0_status(ST0_IM, imask);
}
extern void sb1250_mailbox_interrupt(void);
static inline void dispatch_ip2(void)
{
unsigned int cpu = smp_processor_id();
unsigned long long mask;
/*
* Default...we've hit an IP[2] interrupt, which means we've got to
* check the 1250 interrupt registers to figure out what to do. Need
* to detect which CPU we're on, now that smp_affinity is supported.
*/
mask = __raw_readq(IOADDR(A_IMR_REGISTER(cpu,
R_IMR_INTERRUPT_STATUS_BASE)));
if (mask)
do_IRQ(fls64(mask) - 1);
}
asmlinkage void plat_irq_dispatch(void)
{
unsigned int cpu = smp_processor_id();
unsigned int pending;
/*
* What a pain. We have to be really careful saving the upper 32 bits
* of any * register across function calls if we don't want them
* trashed--since were running in -o32, the calling routing never saves
* the full 64 bits of a register across a function call. Being the
* interrupt handler, we're guaranteed that interrupts are disabled
* during this code so we don't have to worry about random interrupts
* blasting the high 32 bits.
*/
pending = read_c0_cause() & read_c0_status() & ST0_IM;
if (pending & CAUSEF_IP7) /* CPU performance counter interrupt */
do_IRQ(MIPS_CPU_IRQ_BASE + 7);
else if (pending & CAUSEF_IP4)
do_IRQ(K_INT_TIMER_0 + cpu); /* sb1250_timer_interrupt() */
#ifdef CONFIG_SMP
else if (pending & CAUSEF_IP3)
sb1250_mailbox_interrupt();
#endif
else if (pending & CAUSEF_IP2)
dispatch_ip2();
else
spurious_interrupt();
}
| linux-master | arch/mips/sibyte/sb1250/irq.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2000, 2001, 2002, 2003 Broadcom Corporation
*/
#include <linux/export.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/reboot.h>
#include <linux/string.h>
#include <asm/bootinfo.h>
#include <asm/cpu.h>
#include <asm/mipsregs.h>
#include <asm/io.h>
#include <asm/sibyte/sb1250.h>
#include <asm/sibyte/sb1250_regs.h>
#include <asm/sibyte/sb1250_scd.h>
unsigned int sb1_pass;
unsigned int soc_pass;
unsigned int soc_type;
EXPORT_SYMBOL(soc_type);
unsigned int periph_rev;
EXPORT_SYMBOL_GPL(periph_rev);
unsigned int zbbus_mhz;
EXPORT_SYMBOL(zbbus_mhz);
static char *soc_str;
static char *pass_str;
static unsigned int war_pass; /* XXXKW don't overload PASS defines? */
static int __init setup_bcm1250(void)
{
int ret = 0;
switch (soc_pass) {
case K_SYS_REVISION_BCM1250_PASS1:
periph_rev = 1;
pass_str = "Pass 1";
break;
case K_SYS_REVISION_BCM1250_A10:
periph_rev = 2;
pass_str = "A8/A10";
/* XXXKW different war_pass? */
war_pass = K_SYS_REVISION_BCM1250_PASS2;
break;
case K_SYS_REVISION_BCM1250_PASS2_2:
periph_rev = 2;
pass_str = "B1";
break;
case K_SYS_REVISION_BCM1250_B2:
periph_rev = 2;
pass_str = "B2";
war_pass = K_SYS_REVISION_BCM1250_PASS2_2;
break;
case K_SYS_REVISION_BCM1250_PASS3:
periph_rev = 3;
pass_str = "C0";
break;
case K_SYS_REVISION_BCM1250_C1:
periph_rev = 3;
pass_str = "C1";
break;
default:
if (soc_pass < K_SYS_REVISION_BCM1250_PASS2_2) {
periph_rev = 2;
pass_str = "A0-A6";
war_pass = K_SYS_REVISION_BCM1250_PASS2;
} else {
printk("Unknown BCM1250 rev %x\n", soc_pass);
ret = 1;
}
break;
}
return ret;
}
int sb1250_m3_workaround_needed(void)
{
switch (soc_type) {
case K_SYS_SOC_TYPE_BCM1250:
case K_SYS_SOC_TYPE_BCM1250_ALT:
case K_SYS_SOC_TYPE_BCM1250_ALT2:
case K_SYS_SOC_TYPE_BCM1125:
case K_SYS_SOC_TYPE_BCM1125H:
return soc_pass < K_SYS_REVISION_BCM1250_C0;
default:
return 0;
}
}
static int __init setup_bcm112x(void)
{
int ret = 0;
switch (soc_pass) {
case 0:
/* Early build didn't have revid set */
periph_rev = 3;
pass_str = "A1";
war_pass = K_SYS_REVISION_BCM112x_A1;
break;
case K_SYS_REVISION_BCM112x_A1:
periph_rev = 3;
pass_str = "A1";
break;
case K_SYS_REVISION_BCM112x_A2:
periph_rev = 3;
pass_str = "A2";
break;
case K_SYS_REVISION_BCM112x_A3:
periph_rev = 3;
pass_str = "A3";
break;
case K_SYS_REVISION_BCM112x_A4:
periph_rev = 3;
pass_str = "A4";
break;
case K_SYS_REVISION_BCM112x_B0:
periph_rev = 3;
pass_str = "B0";
break;
default:
printk("Unknown %s rev %x\n", soc_str, soc_pass);
ret = 1;
}
return ret;
}
/* Setup code likely to be common to all SiByte platforms */
static int __init sys_rev_decode(void)
{
int ret = 0;
war_pass = soc_pass;
switch (soc_type) {
case K_SYS_SOC_TYPE_BCM1250:
case K_SYS_SOC_TYPE_BCM1250_ALT:
case K_SYS_SOC_TYPE_BCM1250_ALT2:
soc_str = "BCM1250";
ret = setup_bcm1250();
break;
case K_SYS_SOC_TYPE_BCM1120:
soc_str = "BCM1120";
ret = setup_bcm112x();
break;
case K_SYS_SOC_TYPE_BCM1125:
soc_str = "BCM1125";
ret = setup_bcm112x();
break;
case K_SYS_SOC_TYPE_BCM1125H:
soc_str = "BCM1125H";
ret = setup_bcm112x();
break;
default:
printk("Unknown SOC type %x\n", soc_type);
ret = 1;
break;
}
return ret;
}
void __init sb1250_setup(void)
{
uint64_t sys_rev;
int plldiv;
int bad_config = 0;
sb1_pass = read_c0_prid() & PRID_REV_MASK;
sys_rev = __raw_readq(IOADDR(A_SCD_SYSTEM_REVISION));
soc_type = SYS_SOC_TYPE(sys_rev);
soc_pass = G_SYS_REVISION(sys_rev);
if (sys_rev_decode()) {
printk("Restart after failure to identify SiByte chip\n");
machine_restart(NULL);
}
plldiv = G_SYS_PLL_DIV(__raw_readq(IOADDR(A_SCD_SYSTEM_CFG)));
zbbus_mhz = ((plldiv >> 1) * 50) + ((plldiv & 1) * 25);
printk("Broadcom SiByte %s %s @ %d MHz (SB1 rev %d)\n",
soc_str, pass_str, zbbus_mhz * 2, sb1_pass);
printk("Board type: %s\n", get_system_type());
switch (war_pass) {
case K_SYS_REVISION_BCM1250_PASS1:
printk("@@@@ This is a BCM1250 A0-A2 (Pass 1) board, "
"and the kernel doesn't have the proper "
"workarounds compiled in. @@@@\n");
bad_config = 1;
break;
case K_SYS_REVISION_BCM1250_PASS2:
/* Pass 2 - easiest as default for now - so many numbers */
#if !defined(CONFIG_SB1_PASS_2_WORKAROUNDS) || \
!defined(CONFIG_SB1_PASS_2_1_WORKAROUNDS)
printk("@@@@ This is a BCM1250 A3-A10 board, and the "
"kernel doesn't have the proper workarounds "
"compiled in. @@@@\n");
bad_config = 1;
#endif
#ifdef CONFIG_CPU_HAS_PREFETCH
printk("@@@@ Prefetches may be enabled in this kernel, "
"but are buggy on this board. @@@@\n");
bad_config = 1;
#endif
break;
case K_SYS_REVISION_BCM1250_PASS2_2:
#ifndef CONFIG_SB1_PASS_2_WORKAROUNDS
printk("@@@@ This is a BCM1250 B1/B2. board, and the "
"kernel doesn't have the proper workarounds "
"compiled in. @@@@\n");
bad_config = 1;
#endif
#if defined(CONFIG_SB1_PASS_2_1_WORKAROUNDS) || \
!defined(CONFIG_CPU_HAS_PREFETCH)
printk("@@@@ This is a BCM1250 B1/B2, but the kernel is "
"conservatively configured for an 'A' stepping. "
"@@@@\n");
#endif
break;
default:
break;
}
if (bad_config) {
printk("Invalid configuration for this chip.\n");
machine_restart(NULL);
}
}
| linux-master | arch/mips/sibyte/sb1250/setup.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2000, 2001 Broadcom Corporation
*/
#include <linux/init.h>
extern void sb1250_clocksource_init(void);
extern void sb1250_clockevent_init(void);
void __init plat_time_init(void)
{
sb1250_clocksource_init();
sb1250_clockevent_init();
}
| linux-master | arch/mips/sibyte/sb1250/time.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2001, 2002, 2003 Broadcom Corporation
*/
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/smp.h>
#include <linux/kernel_stat.h>
#include <linux/sched/task_stack.h>
#include <asm/mmu_context.h>
#include <asm/io.h>
#include <asm/fw/cfe/cfe_api.h>
#include <asm/sibyte/sb1250.h>
#include <asm/sibyte/sb1250_regs.h>
#include <asm/sibyte/sb1250_int.h>
static void *mailbox_set_regs[] = {
IOADDR(A_IMR_CPU0_BASE + R_IMR_MAILBOX_SET_CPU),
IOADDR(A_IMR_CPU1_BASE + R_IMR_MAILBOX_SET_CPU)
};
static void *mailbox_clear_regs[] = {
IOADDR(A_IMR_CPU0_BASE + R_IMR_MAILBOX_CLR_CPU),
IOADDR(A_IMR_CPU1_BASE + R_IMR_MAILBOX_CLR_CPU)
};
static void *mailbox_regs[] = {
IOADDR(A_IMR_CPU0_BASE + R_IMR_MAILBOX_CPU),
IOADDR(A_IMR_CPU1_BASE + R_IMR_MAILBOX_CPU)
};
/*
* SMP init and finish on secondary CPUs
*/
void sb1250_smp_init(void)
{
unsigned int imask = STATUSF_IP4 | STATUSF_IP3 | STATUSF_IP2 |
STATUSF_IP1 | STATUSF_IP0;
/* Set interrupt mask, but don't enable */
change_c0_status(ST0_IM, imask);
}
/*
* These are routines for dealing with the sb1250 smp capabilities
* independent of board/firmware
*/
/*
* Simple enough; everything is set up, so just poke the appropriate mailbox
* register, and we should be set
*/
static void sb1250_send_ipi_single(int cpu, unsigned int action)
{
__raw_writeq((((u64)action) << 48), mailbox_set_regs[cpu]);
}
static inline void sb1250_send_ipi_mask(const struct cpumask *mask,
unsigned int action)
{
unsigned int i;
for_each_cpu(i, mask)
sb1250_send_ipi_single(i, action);
}
/*
* Code to run on secondary just after probing the CPU
*/
static void sb1250_init_secondary(void)
{
extern void sb1250_smp_init(void);
sb1250_smp_init();
}
/*
* Do any tidying up before marking online and running the idle
* loop
*/
static void sb1250_smp_finish(void)
{
extern void sb1250_clockevent_init(void);
sb1250_clockevent_init();
local_irq_enable();
}
/*
* Setup the PC, SP, and GP of a secondary processor and start it
* running!
*/
static int sb1250_boot_secondary(int cpu, struct task_struct *idle)
{
int retval;
retval = cfe_cpu_start(cpu_logical_map(cpu), &smp_bootstrap,
__KSTK_TOS(idle),
(unsigned long)task_thread_info(idle), 0);
if (retval != 0)
printk("cfe_start_cpu(%i) returned %i\n" , cpu, retval);
return retval;
}
/*
* Use CFE to find out how many CPUs are available, setting up
* cpu_possible_mask and the logical/physical mappings.
* XXXKW will the boot CPU ever not be physical 0?
*
* Common setup before any secondaries are started
*/
static void __init sb1250_smp_setup(void)
{
int i, num;
init_cpu_possible(cpumask_of(0));
__cpu_number_map[0] = 0;
__cpu_logical_map[0] = 0;
for (i = 1, num = 0; i < NR_CPUS; i++) {
if (cfe_cpu_stop(i) == 0) {
set_cpu_possible(i, true);
__cpu_number_map[i] = ++num;
__cpu_logical_map[num] = i;
}
}
printk(KERN_INFO "Detected %i available secondary CPU(s)\n", num);
}
static void __init sb1250_prepare_cpus(unsigned int max_cpus)
{
}
const struct plat_smp_ops sb_smp_ops = {
.send_ipi_single = sb1250_send_ipi_single,
.send_ipi_mask = sb1250_send_ipi_mask,
.init_secondary = sb1250_init_secondary,
.smp_finish = sb1250_smp_finish,
.boot_secondary = sb1250_boot_secondary,
.smp_setup = sb1250_smp_setup,
.prepare_cpus = sb1250_prepare_cpus,
};
void sb1250_mailbox_interrupt(void)
{
int cpu = smp_processor_id();
int irq = K_INT_MBOX_0;
unsigned int action;
kstat_incr_irq_this_cpu(irq);
/* Load the mailbox register to figure out what we're supposed to do */
action = (____raw_readq(mailbox_regs[cpu]) >> 48) & 0xffff;
/* Clear the mailbox to clear the interrupt */
____raw_writeq(((u64)action) << 48, mailbox_clear_regs[cpu]);
if (action & SMP_RESCHEDULE_YOURSELF)
scheduler_ipi();
if (action & SMP_CALL_FUNCTION) {
irq_enter();
generic_smp_call_function_interrupt();
irq_exit();
}
}
| linux-master | arch/mips/sibyte/sb1250/smp.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2000,2001,2002,2003,2004 Broadcom Corporation
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/linkage.h>
#include <linux/interrupt.h>
#include <linux/smp.h>
#include <linux/spinlock.h>
#include <linux/mm.h>
#include <linux/kernel_stat.h>
#include <asm/errno.h>
#include <asm/irq_regs.h>
#include <asm/signal.h>
#include <asm/io.h>
#include <asm/sibyte/bcm1480_regs.h>
#include <asm/sibyte/bcm1480_int.h>
#include <asm/sibyte/bcm1480_scd.h>
#include <asm/sibyte/sb1250_uart.h>
#include <asm/sibyte/sb1250.h>
/*
* These are the routines that handle all the low level interrupt stuff.
* Actions handled here are: initialization of the interrupt map, requesting of
* interrupt lines by handlers, dispatching if interrupts to handlers, probing
* for interrupt lines
*/
#ifdef CONFIG_PCI
extern unsigned long ht_eoi_space;
#endif
/* Store the CPU id (not the logical number) */
int bcm1480_irq_owner[BCM1480_NR_IRQS];
static DEFINE_RAW_SPINLOCK(bcm1480_imr_lock);
void bcm1480_mask_irq(int cpu, int irq)
{
unsigned long flags, hl_spacing;
u64 cur_ints;
raw_spin_lock_irqsave(&bcm1480_imr_lock, flags);
hl_spacing = 0;
if ((irq >= BCM1480_NR_IRQS_HALF) && (irq <= BCM1480_NR_IRQS)) {
hl_spacing = BCM1480_IMR_HL_SPACING;
irq -= BCM1480_NR_IRQS_HALF;
}
cur_ints = ____raw_readq(IOADDR(A_BCM1480_IMR_MAPPER(cpu) + R_BCM1480_IMR_INTERRUPT_MASK_H + hl_spacing));
cur_ints |= (((u64) 1) << irq);
____raw_writeq(cur_ints, IOADDR(A_BCM1480_IMR_MAPPER(cpu) + R_BCM1480_IMR_INTERRUPT_MASK_H + hl_spacing));
raw_spin_unlock_irqrestore(&bcm1480_imr_lock, flags);
}
void bcm1480_unmask_irq(int cpu, int irq)
{
unsigned long flags, hl_spacing;
u64 cur_ints;
raw_spin_lock_irqsave(&bcm1480_imr_lock, flags);
hl_spacing = 0;
if ((irq >= BCM1480_NR_IRQS_HALF) && (irq <= BCM1480_NR_IRQS)) {
hl_spacing = BCM1480_IMR_HL_SPACING;
irq -= BCM1480_NR_IRQS_HALF;
}
cur_ints = ____raw_readq(IOADDR(A_BCM1480_IMR_MAPPER(cpu) + R_BCM1480_IMR_INTERRUPT_MASK_H + hl_spacing));
cur_ints &= ~(((u64) 1) << irq);
____raw_writeq(cur_ints, IOADDR(A_BCM1480_IMR_MAPPER(cpu) + R_BCM1480_IMR_INTERRUPT_MASK_H + hl_spacing));
raw_spin_unlock_irqrestore(&bcm1480_imr_lock, flags);
}
#ifdef CONFIG_SMP
static int bcm1480_set_affinity(struct irq_data *d, const struct cpumask *mask,
bool force)
{
unsigned int irq_dirty, irq = d->irq;
int i = 0, old_cpu, cpu, int_on, k;
u64 cur_ints;
unsigned long flags;
i = cpumask_first_and(mask, cpu_online_mask);
/* Convert logical CPU to physical CPU */
cpu = cpu_logical_map(i);
/* Protect against other affinity changers and IMR manipulation */
raw_spin_lock_irqsave(&bcm1480_imr_lock, flags);
/* Swizzle each CPU's IMR (but leave the IP selection alone) */
old_cpu = bcm1480_irq_owner[irq];
irq_dirty = irq;
if ((irq_dirty >= BCM1480_NR_IRQS_HALF) && (irq_dirty <= BCM1480_NR_IRQS)) {
irq_dirty -= BCM1480_NR_IRQS_HALF;
}
for (k=0; k<2; k++) { /* Loop through high and low interrupt mask register */
cur_ints = ____raw_readq(IOADDR(A_BCM1480_IMR_MAPPER(old_cpu) + R_BCM1480_IMR_INTERRUPT_MASK_H + (k*BCM1480_IMR_HL_SPACING)));
int_on = !(cur_ints & (((u64) 1) << irq_dirty));
if (int_on) {
/* If it was on, mask it */
cur_ints |= (((u64) 1) << irq_dirty);
____raw_writeq(cur_ints, IOADDR(A_BCM1480_IMR_MAPPER(old_cpu) + R_BCM1480_IMR_INTERRUPT_MASK_H + (k*BCM1480_IMR_HL_SPACING)));
}
bcm1480_irq_owner[irq] = cpu;
if (int_on) {
/* unmask for the new CPU */
cur_ints = ____raw_readq(IOADDR(A_BCM1480_IMR_MAPPER(cpu) + R_BCM1480_IMR_INTERRUPT_MASK_H + (k*BCM1480_IMR_HL_SPACING)));
cur_ints &= ~(((u64) 1) << irq_dirty);
____raw_writeq(cur_ints, IOADDR(A_BCM1480_IMR_MAPPER(cpu) + R_BCM1480_IMR_INTERRUPT_MASK_H + (k*BCM1480_IMR_HL_SPACING)));
}
}
raw_spin_unlock_irqrestore(&bcm1480_imr_lock, flags);
return 0;
}
#endif
/*****************************************************************************/
static void disable_bcm1480_irq(struct irq_data *d)
{
unsigned int irq = d->irq;
bcm1480_mask_irq(bcm1480_irq_owner[irq], irq);
}
static void enable_bcm1480_irq(struct irq_data *d)
{
unsigned int irq = d->irq;
bcm1480_unmask_irq(bcm1480_irq_owner[irq], irq);
}
static void ack_bcm1480_irq(struct irq_data *d)
{
unsigned int irq_dirty, irq = d->irq;
u64 pending;
int k;
/*
* If the interrupt was an HT interrupt, now is the time to
* clear it. NOTE: we assume the HT bridge was set up to
* deliver the interrupts to all CPUs (which makes affinity
* changing easier for us)
*/
irq_dirty = irq;
if ((irq_dirty >= BCM1480_NR_IRQS_HALF) && (irq_dirty <= BCM1480_NR_IRQS)) {
irq_dirty -= BCM1480_NR_IRQS_HALF;
}
for (k=0; k<2; k++) { /* Loop through high and low LDT interrupts */
pending = __raw_readq(IOADDR(A_BCM1480_IMR_REGISTER(bcm1480_irq_owner[irq],
R_BCM1480_IMR_LDT_INTERRUPT_H + (k*BCM1480_IMR_HL_SPACING))));
pending &= ((u64)1 << (irq_dirty));
if (pending) {
#ifdef CONFIG_SMP
int i;
for (i=0; i<NR_CPUS; i++) {
/*
* Clear for all CPUs so an affinity switch
* doesn't find an old status
*/
__raw_writeq(pending, IOADDR(A_BCM1480_IMR_REGISTER(cpu_logical_map(i),
R_BCM1480_IMR_LDT_INTERRUPT_CLR_H + (k*BCM1480_IMR_HL_SPACING))));
}
#else
__raw_writeq(pending, IOADDR(A_BCM1480_IMR_REGISTER(0, R_BCM1480_IMR_LDT_INTERRUPT_CLR_H + (k*BCM1480_IMR_HL_SPACING))));
#endif
/*
* Generate EOI. For Pass 1 parts, EOI is a nop. For
* Pass 2, the LDT world may be edge-triggered, but
* this EOI shouldn't hurt. If they are
* level-sensitive, the EOI is required.
*/
#ifdef CONFIG_PCI
if (ht_eoi_space)
*(uint32_t *)(ht_eoi_space+(irq<<16)+(7<<2)) = 0;
#endif
}
}
bcm1480_mask_irq(bcm1480_irq_owner[irq], irq);
}
static struct irq_chip bcm1480_irq_type = {
.name = "BCM1480-IMR",
.irq_mask_ack = ack_bcm1480_irq,
.irq_mask = disable_bcm1480_irq,
.irq_unmask = enable_bcm1480_irq,
#ifdef CONFIG_SMP
.irq_set_affinity = bcm1480_set_affinity
#endif
};
void __init init_bcm1480_irqs(void)
{
int i;
for (i = 0; i < BCM1480_NR_IRQS; i++) {
irq_set_chip_and_handler(i, &bcm1480_irq_type,
handle_level_irq);
bcm1480_irq_owner[i] = 0;
}
}
/*
* init_IRQ is called early in the boot sequence from init/main.c. It
* is responsible for setting up the interrupt mapper and installing the
* handler that will be responsible for dispatching interrupts to the
* "right" place.
*/
/*
* For now, map all interrupts to IP[2]. We could save
* some cycles by parceling out system interrupts to different
* IP lines, but keep it simple for bringup. We'll also direct
* all interrupts to a single CPU; we should probably route
* PCI and LDT to one cpu and everything else to the other
* to balance the load a bit.
*
* On the second cpu, everything is set to IP5, which is
* ignored, EXCEPT the mailbox interrupt. That one is
* set to IP[2] so it is handled. This is needed so we
* can do cross-cpu function calls, as required by SMP
*/
#define IMR_IP2_VAL K_BCM1480_INT_MAP_I0
#define IMR_IP3_VAL K_BCM1480_INT_MAP_I1
#define IMR_IP4_VAL K_BCM1480_INT_MAP_I2
#define IMR_IP5_VAL K_BCM1480_INT_MAP_I3
#define IMR_IP6_VAL K_BCM1480_INT_MAP_I4
void __init arch_init_irq(void)
{
unsigned int i, cpu;
u64 tmp;
unsigned int imask = STATUSF_IP4 | STATUSF_IP3 | STATUSF_IP2 |
STATUSF_IP1 | STATUSF_IP0;
/* Default everything to IP2 */
/* Start with _high registers which has no bit 0 interrupt source */
for (i = 1; i < BCM1480_NR_IRQS_HALF; i++) { /* was I0 */
for (cpu = 0; cpu < 4; cpu++) {
__raw_writeq(IMR_IP2_VAL,
IOADDR(A_BCM1480_IMR_REGISTER(cpu,
R_BCM1480_IMR_INTERRUPT_MAP_BASE_H) + (i << 3)));
}
}
/* Now do _low registers */
for (i = 0; i < BCM1480_NR_IRQS_HALF; i++) {
for (cpu = 0; cpu < 4; cpu++) {
__raw_writeq(IMR_IP2_VAL,
IOADDR(A_BCM1480_IMR_REGISTER(cpu,
R_BCM1480_IMR_INTERRUPT_MAP_BASE_L) + (i << 3)));
}
}
init_bcm1480_irqs();
/*
* Map the high 16 bits of mailbox_0 registers to IP[3], for
* inter-cpu messages
*/
/* Was I1 */
for (cpu = 0; cpu < 4; cpu++) {
__raw_writeq(IMR_IP3_VAL, IOADDR(A_BCM1480_IMR_REGISTER(cpu, R_BCM1480_IMR_INTERRUPT_MAP_BASE_H) +
(K_BCM1480_INT_MBOX_0_0 << 3)));
}
/* Clear the mailboxes. The firmware may leave them dirty */
for (cpu = 0; cpu < 4; cpu++) {
__raw_writeq(0xffffffffffffffffULL,
IOADDR(A_BCM1480_IMR_REGISTER(cpu, R_BCM1480_IMR_MAILBOX_0_CLR_CPU)));
__raw_writeq(0xffffffffffffffffULL,
IOADDR(A_BCM1480_IMR_REGISTER(cpu, R_BCM1480_IMR_MAILBOX_1_CLR_CPU)));
}
/* Mask everything except the high 16 bit of mailbox_0 registers for all cpus */
tmp = ~((u64) 0) ^ ( (((u64) 1) << K_BCM1480_INT_MBOX_0_0));
for (cpu = 0; cpu < 4; cpu++) {
__raw_writeq(tmp, IOADDR(A_BCM1480_IMR_REGISTER(cpu, R_BCM1480_IMR_INTERRUPT_MASK_H)));
}
tmp = ~((u64) 0);
for (cpu = 0; cpu < 4; cpu++) {
__raw_writeq(tmp, IOADDR(A_BCM1480_IMR_REGISTER(cpu, R_BCM1480_IMR_INTERRUPT_MASK_L)));
}
/*
* Note that the timer interrupts are also mapped, but this is
* done in bcm1480_time_init(). Also, the profiling driver
* does its own management of IP7.
*/
/* Enable necessary IPs, disable the rest */
change_c0_status(ST0_IM, imask);
}
extern void bcm1480_mailbox_interrupt(void);
static inline void dispatch_ip2(void)
{
unsigned long long mask_h, mask_l;
unsigned int cpu = smp_processor_id();
unsigned long base;
/*
* Default...we've hit an IP[2] interrupt, which means we've got to
* check the 1480 interrupt registers to figure out what to do. Need
* to detect which CPU we're on, now that smp_affinity is supported.
*/
base = A_BCM1480_IMR_MAPPER(cpu);
mask_h = __raw_readq(
IOADDR(base + R_BCM1480_IMR_INTERRUPT_STATUS_BASE_H));
mask_l = __raw_readq(
IOADDR(base + R_BCM1480_IMR_INTERRUPT_STATUS_BASE_L));
if (mask_h) {
if (mask_h ^ 1)
do_IRQ(fls64(mask_h) - 1);
else if (mask_l)
do_IRQ(63 + fls64(mask_l));
}
}
asmlinkage void plat_irq_dispatch(void)
{
unsigned int cpu = smp_processor_id();
unsigned int pending;
pending = read_c0_cause() & read_c0_status();
if (pending & CAUSEF_IP4)
do_IRQ(K_BCM1480_INT_TIMER_0 + cpu);
#ifdef CONFIG_SMP
else if (pending & CAUSEF_IP3)
bcm1480_mailbox_interrupt();
#endif
else if (pending & CAUSEF_IP2)
dispatch_ip2();
}
| linux-master | arch/mips/sibyte/bcm1480/irq.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2000,2001,2002,2003,2004 Broadcom Corporation
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/export.h>
#include <linux/reboot.h>
#include <linux/string.h>
#include <asm/bootinfo.h>
#include <asm/cpu.h>
#include <asm/mipsregs.h>
#include <asm/io.h>
#include <asm/sibyte/sb1250.h>
#include <asm/sibyte/bcm1480_regs.h>
#include <asm/sibyte/bcm1480_scd.h>
#include <asm/sibyte/sb1250_scd.h>
unsigned int sb1_pass;
unsigned int soc_pass;
unsigned int soc_type;
EXPORT_SYMBOL(soc_type);
unsigned int periph_rev;
EXPORT_SYMBOL_GPL(periph_rev);
unsigned int zbbus_mhz;
EXPORT_SYMBOL(zbbus_mhz);
static unsigned int part_type;
static char *soc_str;
static char *pass_str;
static int __init setup_bcm1x80_bcm1x55(void)
{
switch (soc_pass) {
case K_SYS_REVISION_BCM1480_S0:
periph_rev = 1;
pass_str = "S0 (pass1)";
break;
case K_SYS_REVISION_BCM1480_A1:
periph_rev = 1;
pass_str = "A1 (pass1)";
break;
case K_SYS_REVISION_BCM1480_A2:
periph_rev = 1;
pass_str = "A2 (pass1)";
break;
case K_SYS_REVISION_BCM1480_A3:
periph_rev = 1;
pass_str = "A3 (pass1)";
break;
case K_SYS_REVISION_BCM1480_B0:
periph_rev = 1;
pass_str = "B0 (pass2)";
break;
default:
printk("Unknown %s rev %x\n", soc_str, soc_pass);
periph_rev = 1;
pass_str = "Unknown Revision";
break;
}
return 0;
}
/* Setup code likely to be common to all SiByte platforms */
static int __init sys_rev_decode(void)
{
int ret = 0;
switch (soc_type) {
case K_SYS_SOC_TYPE_BCM1x80:
if (part_type == K_SYS_PART_BCM1480)
soc_str = "BCM1480";
else if (part_type == K_SYS_PART_BCM1280)
soc_str = "BCM1280";
else
soc_str = "BCM1x80";
ret = setup_bcm1x80_bcm1x55();
break;
case K_SYS_SOC_TYPE_BCM1x55:
if (part_type == K_SYS_PART_BCM1455)
soc_str = "BCM1455";
else if (part_type == K_SYS_PART_BCM1255)
soc_str = "BCM1255";
else
soc_str = "BCM1x55";
ret = setup_bcm1x80_bcm1x55();
break;
default:
printk("Unknown part type %x\n", part_type);
ret = 1;
break;
}
return ret;
}
void __init bcm1480_setup(void)
{
uint64_t sys_rev;
int plldiv;
sb1_pass = read_c0_prid() & PRID_REV_MASK;
sys_rev = __raw_readq(IOADDR(A_SCD_SYSTEM_REVISION));
soc_type = SYS_SOC_TYPE(sys_rev);
part_type = G_SYS_PART(sys_rev);
soc_pass = G_SYS_REVISION(sys_rev);
if (sys_rev_decode()) {
printk("Restart after failure to identify SiByte chip\n");
machine_restart(NULL);
}
plldiv = G_BCM1480_SYS_PLL_DIV(__raw_readq(IOADDR(A_SCD_SYSTEM_CFG)));
zbbus_mhz = ((plldiv >> 1) * 50) + ((plldiv & 1) * 25);
printk("Broadcom SiByte %s %s @ %d MHz (SB-1A rev %d)\n",
soc_str, pass_str, zbbus_mhz * 2, sb1_pass);
printk("Board type: %s\n", get_system_type());
}
| linux-master | arch/mips/sibyte/bcm1480/setup.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2000,2001,2004 Broadcom Corporation
*/
#include <linux/init.h>
extern void sb1480_clockevent_init(void);
extern void sb1480_clocksource_init(void);
void __init plat_time_init(void)
{
sb1480_clocksource_init();
sb1480_clockevent_init();
}
| linux-master | arch/mips/sibyte/bcm1480/time.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2001,2002,2004 Broadcom Corporation
*/
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/smp.h>
#include <linux/kernel_stat.h>
#include <linux/sched.h>
#include <linux/sched/task_stack.h>
#include <asm/mmu_context.h>
#include <asm/io.h>
#include <asm/fw/cfe/cfe_api.h>
#include <asm/sibyte/sb1250.h>
#include <asm/sibyte/bcm1480_regs.h>
#include <asm/sibyte/bcm1480_int.h>
/*
* These are routines for dealing with the bcm1480 smp capabilities
* independent of board/firmware
*/
static void *mailbox_0_set_regs[] = {
IOADDR(A_BCM1480_IMR_CPU0_BASE + R_BCM1480_IMR_MAILBOX_0_SET_CPU),
IOADDR(A_BCM1480_IMR_CPU1_BASE + R_BCM1480_IMR_MAILBOX_0_SET_CPU),
IOADDR(A_BCM1480_IMR_CPU2_BASE + R_BCM1480_IMR_MAILBOX_0_SET_CPU),
IOADDR(A_BCM1480_IMR_CPU3_BASE + R_BCM1480_IMR_MAILBOX_0_SET_CPU),
};
static void *mailbox_0_clear_regs[] = {
IOADDR(A_BCM1480_IMR_CPU0_BASE + R_BCM1480_IMR_MAILBOX_0_CLR_CPU),
IOADDR(A_BCM1480_IMR_CPU1_BASE + R_BCM1480_IMR_MAILBOX_0_CLR_CPU),
IOADDR(A_BCM1480_IMR_CPU2_BASE + R_BCM1480_IMR_MAILBOX_0_CLR_CPU),
IOADDR(A_BCM1480_IMR_CPU3_BASE + R_BCM1480_IMR_MAILBOX_0_CLR_CPU),
};
static void *mailbox_0_regs[] = {
IOADDR(A_BCM1480_IMR_CPU0_BASE + R_BCM1480_IMR_MAILBOX_0_CPU),
IOADDR(A_BCM1480_IMR_CPU1_BASE + R_BCM1480_IMR_MAILBOX_0_CPU),
IOADDR(A_BCM1480_IMR_CPU2_BASE + R_BCM1480_IMR_MAILBOX_0_CPU),
IOADDR(A_BCM1480_IMR_CPU3_BASE + R_BCM1480_IMR_MAILBOX_0_CPU),
};
/*
* SMP init and finish on secondary CPUs
*/
void bcm1480_smp_init(void)
{
unsigned int imask = STATUSF_IP4 | STATUSF_IP3 | STATUSF_IP2 |
STATUSF_IP1 | STATUSF_IP0;
/* Set interrupt mask, but don't enable */
change_c0_status(ST0_IM, imask);
}
/*
* These are routines for dealing with the sb1250 smp capabilities
* independent of board/firmware
*/
/*
* Simple enough; everything is set up, so just poke the appropriate mailbox
* register, and we should be set
*/
static void bcm1480_send_ipi_single(int cpu, unsigned int action)
{
__raw_writeq((((u64)action)<< 48), mailbox_0_set_regs[cpu]);
}
static void bcm1480_send_ipi_mask(const struct cpumask *mask,
unsigned int action)
{
unsigned int i;
for_each_cpu(i, mask)
bcm1480_send_ipi_single(i, action);
}
/*
* Code to run on secondary just after probing the CPU
*/
static void bcm1480_init_secondary(void)
{
extern void bcm1480_smp_init(void);
bcm1480_smp_init();
}
/*
* Do any tidying up before marking online and running the idle
* loop
*/
static void bcm1480_smp_finish(void)
{
extern void sb1480_clockevent_init(void);
sb1480_clockevent_init();
local_irq_enable();
}
/*
* Setup the PC, SP, and GP of a secondary processor and start it
* running!
*/
static int bcm1480_boot_secondary(int cpu, struct task_struct *idle)
{
int retval;
retval = cfe_cpu_start(cpu_logical_map(cpu), &smp_bootstrap,
__KSTK_TOS(idle),
(unsigned long)task_thread_info(idle), 0);
if (retval != 0)
printk("cfe_start_cpu(%i) returned %i\n" , cpu, retval);
return retval;
}
/*
* Use CFE to find out how many CPUs are available, setting up
* cpu_possible_mask and the logical/physical mappings.
* XXXKW will the boot CPU ever not be physical 0?
*
* Common setup before any secondaries are started
*/
static void __init bcm1480_smp_setup(void)
{
int i, num;
init_cpu_possible(cpumask_of(0));
__cpu_number_map[0] = 0;
__cpu_logical_map[0] = 0;
for (i = 1, num = 0; i < NR_CPUS; i++) {
if (cfe_cpu_stop(i) == 0) {
set_cpu_possible(i, true);
__cpu_number_map[i] = ++num;
__cpu_logical_map[num] = i;
}
}
printk(KERN_INFO "Detected %i available secondary CPU(s)\n", num);
}
static void __init bcm1480_prepare_cpus(unsigned int max_cpus)
{
}
const struct plat_smp_ops bcm1480_smp_ops = {
.send_ipi_single = bcm1480_send_ipi_single,
.send_ipi_mask = bcm1480_send_ipi_mask,
.init_secondary = bcm1480_init_secondary,
.smp_finish = bcm1480_smp_finish,
.boot_secondary = bcm1480_boot_secondary,
.smp_setup = bcm1480_smp_setup,
.prepare_cpus = bcm1480_prepare_cpus,
};
void bcm1480_mailbox_interrupt(void)
{
int cpu = smp_processor_id();
int irq = K_BCM1480_INT_MBOX_0_0;
unsigned int action;
kstat_incr_irq_this_cpu(irq);
/* Load the mailbox register to figure out what we're supposed to do */
action = (__raw_readq(mailbox_0_regs[cpu]) >> 48) & 0xffff;
/* Clear the mailbox to clear the interrupt */
__raw_writeq(((u64)action)<<48, mailbox_0_clear_regs[cpu]);
if (action & SMP_RESCHEDULE_YOURSELF)
scheduler_ipi();
if (action & SMP_CALL_FUNCTION) {
irq_enter();
generic_smp_call_function_interrupt();
irq_exit();
}
}
| linux-master | arch/mips/sibyte/bcm1480/smp.c |
/*
* BRIEF MODULE DESCRIPTION
* Serial port initialisation.
*
* Copyright 2004 IDT Inc. ([email protected])
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
* NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
* USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
* ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/init.h>
#include <linux/tty.h>
#include <linux/serial_core.h>
#include <linux/serial_8250.h>
#include <linux/irq.h>
#include <asm/serial.h>
#include <asm/mach-rc32434/rb.h>
extern unsigned int idt_cpu_freq;
static struct uart_port rb532_uart = {
.flags = UPF_BOOT_AUTOCONF,
.line = 0,
.irq = UART0_IRQ,
.iotype = UPIO_MEM,
.membase = (char *)KSEG1ADDR(REGBASE + UART0BASE),
.regshift = 2
};
int __init setup_serial_port(void)
{
rb532_uart.uartclk = idt_cpu_freq;
return early_serial_setup(&rb532_uart);
}
arch_initcall(setup_serial_port);
| linux-master | arch/mips/rb532/serial.c |
/*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
* NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
* USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
* ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 675 Mass Ave, Cambridge, MA 02139, USA.
*
* Copyright 2002 MontaVista Software Inc.
* Author: MontaVista Software, Inc.
* [email protected] or [email protected]
*/
#include <linux/bitops.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/io.h>
#include <linux/kernel_stat.h>
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/types.h>
#include <linux/interrupt.h>
#include <linux/ioport.h>
#include <linux/timex.h>
#include <linux/random.h>
#include <linux/delay.h>
#include <asm/bootinfo.h>
#include <asm/time.h>
#include <asm/mipsregs.h>
#include <asm/mach-rc32434/irq.h>
#include <asm/mach-rc32434/gpio.h>
struct intr_group {
u32 mask; /* mask of valid bits in pending/mask registers */
volatile u32 *base_addr;
};
#define RC32434_NR_IRQS (GROUP4_IRQ_BASE + 32)
#if (NR_IRQS < RC32434_NR_IRQS)
#error Too little irqs defined. Did you override <asm/irq.h> ?
#endif
static const struct intr_group intr_group[NUM_INTR_GROUPS] = {
{
.mask = 0x0000efff,
.base_addr = (u32 *) KSEG1ADDR(IC_GROUP0_PEND + 0 * IC_GROUP_OFFSET)},
{
.mask = 0x00001fff,
.base_addr = (u32 *) KSEG1ADDR(IC_GROUP0_PEND + 1 * IC_GROUP_OFFSET)},
{
.mask = 0x00000007,
.base_addr = (u32 *) KSEG1ADDR(IC_GROUP0_PEND + 2 * IC_GROUP_OFFSET)},
{
.mask = 0x0003ffff,
.base_addr = (u32 *) KSEG1ADDR(IC_GROUP0_PEND + 3 * IC_GROUP_OFFSET)},
{
.mask = 0xffffffff,
.base_addr = (u32 *) KSEG1ADDR(IC_GROUP0_PEND + 4 * IC_GROUP_OFFSET)}
};
#define READ_PEND(base) (*(base))
#define READ_MASK(base) (*(base + 2))
#define WRITE_MASK(base, val) (*(base + 2) = (val))
static inline int irq_to_group(unsigned int irq_nr)
{
return (irq_nr - GROUP0_IRQ_BASE) >> 5;
}
static inline int group_to_ip(unsigned int group)
{
return group + 2;
}
static inline void enable_local_irq(unsigned int ip)
{
int ipnum = 0x100 << ip;
set_c0_status(ipnum);
}
static inline void disable_local_irq(unsigned int ip)
{
int ipnum = 0x100 << ip;
clear_c0_status(ipnum);
}
static inline void ack_local_irq(unsigned int ip)
{
int ipnum = 0x100 << ip;
clear_c0_cause(ipnum);
}
static void rb532_enable_irq(struct irq_data *d)
{
unsigned int group, intr_bit, irq_nr = d->irq;
int ip = irq_nr - GROUP0_IRQ_BASE;
volatile unsigned int *addr;
if (ip < 0)
enable_local_irq(irq_nr);
else {
group = ip >> 5;
ip &= (1 << 5) - 1;
intr_bit = 1 << ip;
enable_local_irq(group_to_ip(group));
addr = intr_group[group].base_addr;
WRITE_MASK(addr, READ_MASK(addr) & ~intr_bit);
}
}
static void rb532_disable_irq(struct irq_data *d)
{
unsigned int group, intr_bit, mask, irq_nr = d->irq;
int ip = irq_nr - GROUP0_IRQ_BASE;
volatile unsigned int *addr;
if (ip < 0) {
disable_local_irq(irq_nr);
} else {
group = ip >> 5;
ip &= (1 << 5) - 1;
intr_bit = 1 << ip;
addr = intr_group[group].base_addr;
mask = READ_MASK(addr);
mask |= intr_bit;
WRITE_MASK(addr, mask);
/* There is a maximum of 14 GPIO interrupts */
if (group == GPIO_MAPPED_IRQ_GROUP && irq_nr <= (GROUP4_IRQ_BASE + 13))
rb532_gpio_set_istat(0, irq_nr - GPIO_MAPPED_IRQ_BASE);
/*
* if there are no more interrupts enabled in this
* group, disable corresponding IP
*/
if (mask == intr_group[group].mask)
disable_local_irq(group_to_ip(group));
}
}
static void rb532_mask_and_ack_irq(struct irq_data *d)
{
rb532_disable_irq(d);
ack_local_irq(group_to_ip(irq_to_group(d->irq)));
}
static int rb532_set_type(struct irq_data *d, unsigned type)
{
int gpio = d->irq - GPIO_MAPPED_IRQ_BASE;
int group = irq_to_group(d->irq);
if (group != GPIO_MAPPED_IRQ_GROUP || d->irq > (GROUP4_IRQ_BASE + 13))
return (type == IRQ_TYPE_LEVEL_HIGH) ? 0 : -EINVAL;
switch (type) {
case IRQ_TYPE_LEVEL_HIGH:
rb532_gpio_set_ilevel(1, gpio);
break;
case IRQ_TYPE_LEVEL_LOW:
rb532_gpio_set_ilevel(0, gpio);
break;
default:
return -EINVAL;
}
return 0;
}
static struct irq_chip rc32434_irq_type = {
.name = "RB532",
.irq_ack = rb532_disable_irq,
.irq_mask = rb532_disable_irq,
.irq_mask_ack = rb532_mask_and_ack_irq,
.irq_unmask = rb532_enable_irq,
.irq_set_type = rb532_set_type,
};
void __init arch_init_irq(void)
{
int i;
pr_info("Initializing IRQ's: %d out of %d\n", RC32434_NR_IRQS, NR_IRQS);
for (i = 0; i < RC32434_NR_IRQS; i++)
irq_set_chip_and_handler(i, &rc32434_irq_type,
handle_level_irq);
}
/* Main Interrupt dispatcher */
asmlinkage void plat_irq_dispatch(void)
{
unsigned int ip, pend, group;
volatile unsigned int *addr;
unsigned int cp0_cause = read_c0_cause() & read_c0_status();
if (cp0_cause & CAUSEF_IP7) {
do_IRQ(7);
} else {
ip = (cp0_cause & 0x7c00);
if (ip) {
group = 21 + (fls(ip) - 32);
addr = intr_group[group].base_addr;
pend = READ_PEND(addr);
pend &= ~READ_MASK(addr); /* only unmasked interrupts */
pend = 39 + (fls(pend) - 32);
do_IRQ((group << 5) + pend);
}
}
}
| linux-master | arch/mips/rb532/irq.c |
/*
* Miscellaneous functions for IDT EB434 board
*
* Copyright 2004 IDT Inc. ([email protected])
* Copyright 2006 Phil Sutter <[email protected]>
* Copyright 2007 Florian Fainelli <[email protected]>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
* NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
* USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
* ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/export.h>
#include <linux/spinlock.h>
#include <linux/platform_device.h>
#include <linux/gpio/driver.h>
#include <asm/mach-rc32434/rb.h>
#include <asm/mach-rc32434/gpio.h>
#define GPIOBASE 0x050000
/* Offsets relative to GPIOBASE */
#define GPIOFUNC 0x00
#define GPIOCFG 0x04
#define GPIOD 0x08
#define GPIOILEVEL 0x0C
#define GPIOISTAT 0x10
#define GPIONMIEN 0x14
#define IMASK6 0x38
struct rb532_gpio_chip {
struct gpio_chip chip;
void __iomem *regbase;
};
static struct resource rb532_gpio_reg0_res[] = {
{
.name = "gpio_reg0",
.start = REGBASE + GPIOBASE,
.end = REGBASE + GPIOBASE + sizeof(struct rb532_gpio_reg) - 1,
.flags = IORESOURCE_MEM,
}
};
/* rb532_set_bit - sanely set a bit
*
* bitval: new value for the bit
* offset: bit index in the 4 byte address range
* ioaddr: 4 byte aligned address being altered
*/
static inline void rb532_set_bit(unsigned bitval,
unsigned offset, void __iomem *ioaddr)
{
unsigned long flags;
u32 val;
local_irq_save(flags);
val = readl(ioaddr);
val &= ~(!bitval << offset); /* unset bit if bitval == 0 */
val |= (!!bitval << offset); /* set bit if bitval == 1 */
writel(val, ioaddr);
local_irq_restore(flags);
}
/* rb532_get_bit - read a bit
*
* returns the boolean state of the bit, which may be > 1
*/
static inline int rb532_get_bit(unsigned offset, void __iomem *ioaddr)
{
return readl(ioaddr) & (1 << offset);
}
/*
* Return GPIO level */
static int rb532_gpio_get(struct gpio_chip *chip, unsigned offset)
{
struct rb532_gpio_chip *gpch;
gpch = gpiochip_get_data(chip);
return !!rb532_get_bit(offset, gpch->regbase + GPIOD);
}
/*
* Set output GPIO level
*/
static void rb532_gpio_set(struct gpio_chip *chip,
unsigned offset, int value)
{
struct rb532_gpio_chip *gpch;
gpch = gpiochip_get_data(chip);
rb532_set_bit(value, offset, gpch->regbase + GPIOD);
}
/*
* Set GPIO direction to input
*/
static int rb532_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
{
struct rb532_gpio_chip *gpch;
gpch = gpiochip_get_data(chip);
/* disable alternate function in case it's set */
rb532_set_bit(0, offset, gpch->regbase + GPIOFUNC);
rb532_set_bit(0, offset, gpch->regbase + GPIOCFG);
return 0;
}
/*
* Set GPIO direction to output
*/
static int rb532_gpio_direction_output(struct gpio_chip *chip,
unsigned offset, int value)
{
struct rb532_gpio_chip *gpch;
gpch = gpiochip_get_data(chip);
/* disable alternate function in case it's set */
rb532_set_bit(0, offset, gpch->regbase + GPIOFUNC);
/* set the initial output value */
rb532_set_bit(value, offset, gpch->regbase + GPIOD);
rb532_set_bit(1, offset, gpch->regbase + GPIOCFG);
return 0;
}
static int rb532_gpio_to_irq(struct gpio_chip *chip, unsigned gpio)
{
return 8 + 4 * 32 + gpio;
}
static struct rb532_gpio_chip rb532_gpio_chip[] = {
[0] = {
.chip = {
.label = "gpio0",
.direction_input = rb532_gpio_direction_input,
.direction_output = rb532_gpio_direction_output,
.get = rb532_gpio_get,
.set = rb532_gpio_set,
.to_irq = rb532_gpio_to_irq,
.base = 0,
.ngpio = 32,
},
},
};
/*
* Set GPIO interrupt level
*/
void rb532_gpio_set_ilevel(int bit, unsigned gpio)
{
rb532_set_bit(bit, gpio, rb532_gpio_chip->regbase + GPIOILEVEL);
}
EXPORT_SYMBOL(rb532_gpio_set_ilevel);
/*
* Set GPIO interrupt status
*/
void rb532_gpio_set_istat(int bit, unsigned gpio)
{
rb532_set_bit(bit, gpio, rb532_gpio_chip->regbase + GPIOISTAT);
}
EXPORT_SYMBOL(rb532_gpio_set_istat);
/*
* Configure GPIO alternate function
*/
void rb532_gpio_set_func(unsigned gpio)
{
rb532_set_bit(1, gpio, rb532_gpio_chip->regbase + GPIOFUNC);
}
EXPORT_SYMBOL(rb532_gpio_set_func);
int __init rb532_gpio_init(void)
{
struct resource *r;
r = rb532_gpio_reg0_res;
rb532_gpio_chip->regbase = ioremap(r->start, resource_size(r));
if (!rb532_gpio_chip->regbase) {
printk(KERN_ERR "rb532: cannot remap GPIO register 0\n");
return -ENXIO;
}
/* Register our GPIO chip */
gpiochip_add_data(&rb532_gpio_chip->chip, rb532_gpio_chip);
return 0;
}
arch_initcall(rb532_gpio_init);
| linux-master | arch/mips/rb532/gpio.c |
// SPDX-License-Identifier: GPL-2.0
/*
* setup.c - boot time setup code
*/
#include <linux/init.h>
#include <linux/export.h>
#include <asm/bootinfo.h>
#include <asm/reboot.h>
#include <asm/time.h>
#include <linux/ioport.h>
#include <asm/mach-rc32434/rb.h>
#include <asm/mach-rc32434/pci.h>
struct pci_reg __iomem *pci_reg;
EXPORT_SYMBOL(pci_reg);
static struct resource pci0_res[] = {
{
.name = "pci_reg0",
.start = PCI0_BASE_ADDR,
.end = PCI0_BASE_ADDR + sizeof(struct pci_reg),
.flags = IORESOURCE_MEM,
}
};
static void rb_machine_restart(char *command)
{
/* just jump to the reset vector */
writel(0x80000001, IDT434_REG_BASE + RST);
((void (*)(void)) KSEG1ADDR(0x1FC00000u))();
}
static void rb_machine_halt(void)
{
for (;;)
continue;
}
void __init plat_mem_setup(void)
{
u32 val;
_machine_restart = rb_machine_restart;
_machine_halt = rb_machine_halt;
pm_power_off = rb_machine_halt;
set_io_port_base(KSEG1);
pci_reg = ioremap(pci0_res[0].start,
pci0_res[0].end - pci0_res[0].start);
if (!pci_reg) {
printk(KERN_ERR "Could not remap PCI registers\n");
return;
}
val = __raw_readl(&pci_reg->pcic);
val &= 0xFFFFFF7;
__raw_writel(val, (void *)&pci_reg->pcic);
#ifdef CONFIG_PCI
/* Enable PCI interrupts in EPLD Mask register */
*epld_mask = 0x0;
*(epld_mask + 1) = 0x0;
#endif
write_c0_wired(0);
}
const char *get_system_type(void)
{
switch (mips_machtype) {
case MACH_MIKROTIK_RB532A:
return "Mikrotik RB532A";
break;
default:
return "Mikrotik RB532";
break;
}
}
| linux-master | arch/mips/rb532/setup.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* RouterBoard 500 Platform devices
*
* Copyright (C) 2006 Felix Fietkau <[email protected]>
* Copyright (C) 2007 Florian Fainelli <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/export.h>
#include <linux/init.h>
#include <linux/ctype.h>
#include <linux/string.h>
#include <linux/platform_device.h>
#include <linux/mtd/platnand.h>
#include <linux/mtd/mtd.h>
#include <linux/gpio.h>
#include <linux/gpio/machine.h>
#include <linux/gpio_keys.h>
#include <linux/input.h>
#include <linux/serial_8250.h>
#include <asm/bootinfo.h>
#include <asm/mach-rc32434/rc32434.h>
#include <asm/mach-rc32434/dma.h>
#include <asm/mach-rc32434/dma_v.h>
#include <asm/mach-rc32434/eth.h>
#include <asm/mach-rc32434/rb.h>
#include <asm/mach-rc32434/integ.h>
#include <asm/mach-rc32434/gpio.h>
#include <asm/mach-rc32434/irq.h>
#define ETH0_RX_DMA_ADDR (DMA0_BASE_ADDR + 0 * DMA_CHAN_OFFSET)
#define ETH0_TX_DMA_ADDR (DMA0_BASE_ADDR + 1 * DMA_CHAN_OFFSET)
extern unsigned int idt_cpu_freq;
static struct mpmc_device dev3;
void set_latch_u5(unsigned char or_mask, unsigned char nand_mask)
{
unsigned long flags;
spin_lock_irqsave(&dev3.lock, flags);
dev3.state = (dev3.state | or_mask) & ~nand_mask;
writeb(dev3.state, dev3.base);
spin_unlock_irqrestore(&dev3.lock, flags);
}
EXPORT_SYMBOL(set_latch_u5);
unsigned char get_latch_u5(void)
{
return dev3.state;
}
EXPORT_SYMBOL(get_latch_u5);
static struct resource korina_dev0_res[] = {
{
.name = "emac",
.start = ETH0_BASE_ADDR,
.end = ETH0_BASE_ADDR + sizeof(struct eth_regs),
.flags = IORESOURCE_MEM,
}, {
.name = "rx",
.start = ETH0_DMA_RX_IRQ,
.end = ETH0_DMA_RX_IRQ,
.flags = IORESOURCE_IRQ
}, {
.name = "tx",
.start = ETH0_DMA_TX_IRQ,
.end = ETH0_DMA_TX_IRQ,
.flags = IORESOURCE_IRQ
}, {
.name = "dma_rx",
.start = ETH0_RX_DMA_ADDR,
.end = ETH0_RX_DMA_ADDR + DMA_CHAN_OFFSET - 1,
.flags = IORESOURCE_MEM,
}, {
.name = "dma_tx",
.start = ETH0_TX_DMA_ADDR,
.end = ETH0_TX_DMA_ADDR + DMA_CHAN_OFFSET - 1,
.flags = IORESOURCE_MEM,
}
};
static struct korina_device korina_dev0_data = {
.name = "korina0",
.mac = {0xde, 0xca, 0xff, 0xc0, 0xff, 0xee}
};
static struct platform_device korina_dev0 = {
.id = -1,
.name = "korina",
.resource = korina_dev0_res,
.num_resources = ARRAY_SIZE(korina_dev0_res),
.dev = {
.platform_data = &korina_dev0_data.mac,
}
};
static struct resource cf_slot0_res[] = {
{
.name = "cf_membase",
.flags = IORESOURCE_MEM
}, {
.name = "cf_irq",
.start = (8 + 4 * 32 + CF_GPIO_NUM), /* 149 */
.end = (8 + 4 * 32 + CF_GPIO_NUM),
.flags = IORESOURCE_IRQ
}
};
static struct gpiod_lookup_table cf_slot0_gpio_table = {
.dev_id = "pata-rb532-cf",
.table = {
GPIO_LOOKUP("gpio0", CF_GPIO_NUM,
NULL, GPIO_ACTIVE_HIGH),
{ },
},
};
static struct platform_device cf_slot0 = {
.id = -1,
.name = "pata-rb532-cf",
.resource = cf_slot0_res,
.num_resources = ARRAY_SIZE(cf_slot0_res),
};
/* Resources and device for NAND */
static int rb532_dev_ready(struct nand_chip *chip)
{
return gpio_get_value(GPIO_RDY);
}
static void rb532_cmd_ctrl(struct nand_chip *chip, int cmd, unsigned int ctrl)
{
unsigned char orbits, nandbits;
if (ctrl & NAND_CTRL_CHANGE) {
orbits = (ctrl & NAND_CLE) << 1;
orbits |= (ctrl & NAND_ALE) >> 1;
nandbits = (~ctrl & NAND_CLE) << 1;
nandbits |= (~ctrl & NAND_ALE) >> 1;
set_latch_u5(orbits, nandbits);
}
if (cmd != NAND_CMD_NONE)
writeb(cmd, chip->legacy.IO_ADDR_W);
}
static struct resource nand_slot0_res[] = {
[0] = {
.name = "nand_membase",
.flags = IORESOURCE_MEM
}
};
static struct platform_nand_data rb532_nand_data = {
.ctrl.dev_ready = rb532_dev_ready,
.ctrl.cmd_ctrl = rb532_cmd_ctrl,
};
static struct platform_device nand_slot0 = {
.name = "gen_nand",
.id = -1,
.resource = nand_slot0_res,
.num_resources = ARRAY_SIZE(nand_slot0_res),
.dev.platform_data = &rb532_nand_data,
};
static struct mtd_partition rb532_partition_info[] = {
{
.name = "Routerboard NAND boot",
.offset = 0,
.size = 4 * 1024 * 1024,
}, {
.name = "rootfs",
.offset = MTDPART_OFS_NXTBLK,
.size = MTDPART_SIZ_FULL,
}
};
static struct platform_device rb532_led = {
.name = "rb532-led",
.id = -1,
};
static struct platform_device rb532_button = {
.name = "rb532-button",
.id = -1,
};
static struct resource rb532_wdt_res[] = {
{
.name = "rb532_wdt_res",
.start = INTEG0_BASE_ADDR,
.end = INTEG0_BASE_ADDR + sizeof(struct integ),
.flags = IORESOURCE_MEM,
}
};
static struct platform_device rb532_wdt = {
.name = "rc32434_wdt",
.id = -1,
.resource = rb532_wdt_res,
.num_resources = ARRAY_SIZE(rb532_wdt_res),
};
static struct plat_serial8250_port rb532_uart_res[] = {
{
.type = PORT_16550A,
.membase = (char *)KSEG1ADDR(REGBASE + UART0BASE),
.irq = UART0_IRQ,
.regshift = 2,
.iotype = UPIO_MEM,
.flags = UPF_BOOT_AUTOCONF,
},
{
.flags = 0,
}
};
static struct platform_device rb532_uart = {
.name = "serial8250",
.id = PLAT8250_DEV_PLATFORM,
.dev.platform_data = &rb532_uart_res,
};
static struct platform_device *rb532_devs[] = {
&korina_dev0,
&nand_slot0,
&cf_slot0,
&rb532_led,
&rb532_button,
&rb532_uart,
&rb532_wdt
};
/* NAND definitions */
#define NAND_CHIP_DELAY 25
static void __init rb532_nand_setup(void)
{
switch (mips_machtype) {
case MACH_MIKROTIK_RB532A:
set_latch_u5(LO_FOFF | LO_CEX,
LO_ULED | LO_ALE | LO_CLE | LO_WPX);
break;
default:
set_latch_u5(LO_WPX | LO_FOFF | LO_CEX,
LO_ULED | LO_ALE | LO_CLE);
break;
}
/* Setup NAND specific settings */
rb532_nand_data.chip.nr_chips = 1;
rb532_nand_data.chip.nr_partitions = ARRAY_SIZE(rb532_partition_info);
rb532_nand_data.chip.partitions = rb532_partition_info;
rb532_nand_data.chip.chip_delay = NAND_CHIP_DELAY;
}
static int __init plat_setup_devices(void)
{
/* Look for the CF card reader */
if (!readl(IDT434_REG_BASE + DEV1MASK))
rb532_devs[2] = NULL; /* disable cf_slot0 at index 2 */
else {
cf_slot0_res[0].start =
readl(IDT434_REG_BASE + DEV1BASE);
cf_slot0_res[0].end = cf_slot0_res[0].start + 0x1000;
}
/* Read the NAND resources from the device controller */
nand_slot0_res[0].start = readl(IDT434_REG_BASE + DEV2BASE);
nand_slot0_res[0].end = nand_slot0_res[0].start + 0x1000;
/* Read and map device controller 3 */
dev3.base = ioremap(readl(IDT434_REG_BASE + DEV3BASE), 1);
if (!dev3.base) {
printk(KERN_ERR "rb532: cannot remap device controller 3\n");
return -ENXIO;
}
/* Initialise the NAND device */
rb532_nand_setup();
/* set the uart clock to the current cpu frequency */
rb532_uart_res[0].uartclk = idt_cpu_freq;
gpiod_add_lookup_table(&cf_slot0_gpio_table);
return platform_add_devices(rb532_devs, ARRAY_SIZE(rb532_devs));
}
#ifdef CONFIG_NET
static int __init setup_kmac(char *s)
{
printk(KERN_INFO "korina mac = %s\n", s);
if (!mac_pton(s, korina_dev0_data.mac))
printk(KERN_ERR "Invalid mac\n");
return 1;
}
__setup("kmac=", setup_kmac);
#endif /* CONFIG_NET */
arch_initcall(plat_setup_devices);
| linux-master | arch/mips/rb532/devices.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Carsten Langgaard, [email protected]
* Copyright (C) 1999,2000 MIPS Technologies, Inc. All rights reserved.
*
* Setting up the clock on the MIPS boards.
*/
#include <linux/init.h>
#include <linux/kernel_stat.h>
#include <linux/ptrace.h>
#include <linux/sched.h>
#include <linux/spinlock.h>
#include <linux/mc146818rtc.h>
#include <linux/irq.h>
#include <linux/timex.h>
#include <asm/mipsregs.h>
#include <asm/time.h>
#include <asm/mach-rc32434/rc32434.h>
extern unsigned int idt_cpu_freq;
/*
* Figure out the r4k offset, the amount to increment the compare
* register for each time tick. There is no RTC available.
*
* The RC32434 counts at half the CPU *core* speed.
*/
static unsigned long __init cal_r4koff(void)
{
mips_hpt_frequency = idt_cpu_freq * IDT_CLOCK_MULT / 2;
return mips_hpt_frequency / HZ;
}
void __init plat_time_init(void)
{
unsigned int est_freq;
unsigned long flags, r4k_offset;
local_irq_save(flags);
printk(KERN_INFO "calculating r4koff... ");
r4k_offset = cal_r4koff();
printk("%08lx(%d)\n", r4k_offset, (int) r4k_offset);
est_freq = 2 * r4k_offset * HZ;
est_freq += 5000; /* round */
est_freq -= est_freq % 10000;
printk(KERN_INFO "CPU frequency %d.%02d MHz\n", est_freq / 1000000,
(est_freq % 1000000) * 100 / 1000000);
local_irq_restore(flags);
}
| linux-master | arch/mips/rb532/time.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* RouterBoard 500 specific prom routines
*
* Copyright (C) 2003, Peter Sadik <[email protected]>
* Copyright (C) 2005-2006, P.Christeas <[email protected]>
* Copyright (C) 2007, Gabor Juhos <[email protected]>
* Felix Fietkau <[email protected]>
* Florian Fainelli <[email protected]>
*/
#include <linux/init.h>
#include <linux/mm.h>
#include <linux/export.h>
#include <linux/string.h>
#include <linux/console.h>
#include <linux/memblock.h>
#include <linux/ioport.h>
#include <asm/bootinfo.h>
#include <asm/mach-rc32434/ddr.h>
#include <asm/mach-rc32434/prom.h>
unsigned int idt_cpu_freq = 132000000;
EXPORT_SYMBOL(idt_cpu_freq);
static struct resource ddr_reg[] = {
{
.name = "ddr-reg",
.start = DDR0_PHYS_ADDR,
.end = DDR0_PHYS_ADDR + sizeof(struct ddr_ram),
.flags = IORESOURCE_MEM,
}
};
static inline int match_tag(char *arg, const char *tag)
{
return strncmp(arg, tag, strlen(tag)) == 0;
}
static inline unsigned long tag2ul(char *arg, const char *tag)
{
char *num;
num = arg + strlen(tag);
return simple_strtoul(num, 0, 10);
}
void __init prom_setup_cmdline(void)
{
static char cmd_line[COMMAND_LINE_SIZE] __initdata;
char *cp, *board;
int prom_argc;
char **prom_argv;
int i;
prom_argc = fw_arg0;
prom_argv = (char **) fw_arg1;
cp = cmd_line;
/* Note: it is common that parameters start
* at argv[1] and not argv[0],
* however, our elf loader starts at [0] */
for (i = 0; i < prom_argc; i++) {
if (match_tag(prom_argv[i], FREQ_TAG)) {
idt_cpu_freq = tag2ul(prom_argv[i], FREQ_TAG);
continue;
}
#ifdef IGNORE_CMDLINE_MEM
/* parses out the "mem=xx" arg */
if (match_tag(prom_argv[i], MEM_TAG))
continue;
#endif
if (i > 0)
*(cp++) = ' ';
if (match_tag(prom_argv[i], BOARD_TAG)) {
board = prom_argv[i] + strlen(BOARD_TAG);
if (match_tag(board, BOARD_RB532A))
mips_machtype = MACH_MIKROTIK_RB532A;
else
mips_machtype = MACH_MIKROTIK_RB532;
}
strcpy(cp, prom_argv[i]);
cp += strlen(prom_argv[i]);
}
*(cp++) = ' ';
i = strlen(arcs_cmdline);
if (i > 0) {
*(cp++) = ' ';
strcpy(cp, arcs_cmdline);
cp += strlen(arcs_cmdline);
}
cmd_line[COMMAND_LINE_SIZE - 1] = '\0';
strcpy(arcs_cmdline, cmd_line);
}
void __init prom_init(void)
{
struct ddr_ram __iomem *ddr;
phys_addr_t memsize;
phys_addr_t ddrbase;
ddr = ioremap(ddr_reg[0].start,
ddr_reg[0].end - ddr_reg[0].start);
if (!ddr) {
printk(KERN_ERR "Unable to remap DDR register\n");
return;
}
ddrbase = (phys_addr_t)&ddr->ddrbase;
memsize = (phys_addr_t)&ddr->ddrmask;
memsize = 0 - memsize;
prom_setup_cmdline();
/* give all RAM to boot allocator,
* except for the first 0x400 and the last 0x200 bytes */
memblock_add(ddrbase + 0x400, memsize - 0x600);
}
| linux-master | arch/mips/rb532/prom.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/kobject.h>
#include <boot_param.h>
static ssize_t boardinfo_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
char board_manufacturer[64] = {0};
char *tmp_board_manufacturer = board_manufacturer;
char bios_vendor[64] = {0};
char *tmp_bios_vendor = bios_vendor;
strcpy(board_manufacturer, eboard->name);
strcpy(bios_vendor, einter->description);
return sprintf(buf,
"Board Info\n"
"Manufacturer\t\t: %s\n"
"Board Name\t\t: %s\n"
"Family\t\t\t: LOONGSON3\n\n"
"BIOS Info\n"
"Vendor\t\t\t: %s\n"
"Version\t\t\t: %s\n"
"ROM Size\t\t: %d KB\n"
"Release Date\t\t: %s\n",
strsep(&tmp_board_manufacturer, "-"),
eboard->name,
strsep(&tmp_bios_vendor, "-"),
einter->description,
einter->size,
especial->special_name);
}
static struct kobj_attribute boardinfo_attr = __ATTR(boardinfo, 0444,
boardinfo_show, NULL);
static int __init boardinfo_init(void)
{
struct kobject *lefi_kobj;
lefi_kobj = kobject_create_and_add("lefi", firmware_kobj);
if (!lefi_kobj) {
pr_err("lefi: Firmware registration failed.\n");
return -ENOMEM;
}
return sysfs_create_file(lefi_kobj, &boardinfo_attr.attr);
}
late_initcall(boardinfo_init);
| linux-master | arch/mips/loongson64/boardinfo.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/init.h>
#include <linux/pci.h>
#include <linux/percpu.h>
#include <linux/delay.h>
#include <linux/spinlock.h>
#include <linux/interrupt.h>
#include <asm/hpet.h>
#include <asm/time.h>
#define SMBUS_CFG_BASE (loongson_sysconf.ht_control_base + 0x0300a000)
#define SMBUS_PCI_REG40 0x40
#define SMBUS_PCI_REG64 0x64
#define SMBUS_PCI_REGB4 0xb4
#define HPET_MIN_CYCLES 16
#define HPET_MIN_PROG_DELTA (HPET_MIN_CYCLES * 12)
static DEFINE_SPINLOCK(hpet_lock);
DEFINE_PER_CPU(struct clock_event_device, hpet_clockevent_device);
static unsigned int smbus_read(int offset)
{
return *(volatile unsigned int *)(SMBUS_CFG_BASE + offset);
}
static void smbus_write(int offset, int data)
{
*(volatile unsigned int *)(SMBUS_CFG_BASE + offset) = data;
}
static void smbus_enable(int offset, int bit)
{
unsigned int cfg = smbus_read(offset);
cfg |= bit;
smbus_write(offset, cfg);
}
static int hpet_read(int offset)
{
return *(volatile unsigned int *)(HPET_MMIO_ADDR + offset);
}
static void hpet_write(int offset, int data)
{
*(volatile unsigned int *)(HPET_MMIO_ADDR + offset) = data;
}
static void hpet_start_counter(void)
{
unsigned int cfg = hpet_read(HPET_CFG);
cfg |= HPET_CFG_ENABLE;
hpet_write(HPET_CFG, cfg);
}
static void hpet_stop_counter(void)
{
unsigned int cfg = hpet_read(HPET_CFG);
cfg &= ~HPET_CFG_ENABLE;
hpet_write(HPET_CFG, cfg);
}
static void hpet_reset_counter(void)
{
hpet_write(HPET_COUNTER, 0);
hpet_write(HPET_COUNTER + 4, 0);
}
static void hpet_restart_counter(void)
{
hpet_stop_counter();
hpet_reset_counter();
hpet_start_counter();
}
static void hpet_enable_legacy_int(void)
{
/* Do nothing on Loongson-3 */
}
static int hpet_set_state_periodic(struct clock_event_device *evt)
{
int cfg;
spin_lock(&hpet_lock);
pr_info("set clock event to periodic mode!\n");
/* stop counter */
hpet_stop_counter();
/* enables the timer0 to generate a periodic interrupt */
cfg = hpet_read(HPET_T0_CFG);
cfg &= ~HPET_TN_LEVEL;
cfg |= HPET_TN_ENABLE | HPET_TN_PERIODIC | HPET_TN_SETVAL |
HPET_TN_32BIT;
hpet_write(HPET_T0_CFG, cfg);
/* set the comparator */
hpet_write(HPET_T0_CMP, HPET_COMPARE_VAL);
udelay(1);
hpet_write(HPET_T0_CMP, HPET_COMPARE_VAL);
/* start counter */
hpet_start_counter();
spin_unlock(&hpet_lock);
return 0;
}
static int hpet_set_state_shutdown(struct clock_event_device *evt)
{
int cfg;
spin_lock(&hpet_lock);
cfg = hpet_read(HPET_T0_CFG);
cfg &= ~HPET_TN_ENABLE;
hpet_write(HPET_T0_CFG, cfg);
spin_unlock(&hpet_lock);
return 0;
}
static int hpet_set_state_oneshot(struct clock_event_device *evt)
{
int cfg;
spin_lock(&hpet_lock);
pr_info("set clock event to one shot mode!\n");
cfg = hpet_read(HPET_T0_CFG);
/*
* set timer0 type
* 1 : periodic interrupt
* 0 : non-periodic(oneshot) interrupt
*/
cfg &= ~HPET_TN_PERIODIC;
cfg |= HPET_TN_ENABLE | HPET_TN_32BIT;
hpet_write(HPET_T0_CFG, cfg);
spin_unlock(&hpet_lock);
return 0;
}
static int hpet_tick_resume(struct clock_event_device *evt)
{
spin_lock(&hpet_lock);
hpet_enable_legacy_int();
spin_unlock(&hpet_lock);
return 0;
}
static int hpet_next_event(unsigned long delta,
struct clock_event_device *evt)
{
u32 cnt;
s32 res;
cnt = hpet_read(HPET_COUNTER);
cnt += (u32) delta;
hpet_write(HPET_T0_CMP, cnt);
res = (s32)(cnt - hpet_read(HPET_COUNTER));
return res < HPET_MIN_CYCLES ? -ETIME : 0;
}
static irqreturn_t hpet_irq_handler(int irq, void *data)
{
int is_irq;
struct clock_event_device *cd;
unsigned int cpu = smp_processor_id();
is_irq = hpet_read(HPET_STATUS);
if (is_irq & HPET_T0_IRS) {
/* clear the TIMER0 irq status register */
hpet_write(HPET_STATUS, HPET_T0_IRS);
cd = &per_cpu(hpet_clockevent_device, cpu);
cd->event_handler(cd);
return IRQ_HANDLED;
}
return IRQ_NONE;
}
/*
* hpet address assignation and irq setting should be done in bios.
* but pmon don't do this, we just setup here directly.
* The operation under is normal. unfortunately, hpet_setup process
* is before pci initialize.
*
* {
* struct pci_dev *pdev;
*
* pdev = pci_get_device(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_SBX00_SMBUS, NULL);
* pci_write_config_word(pdev, SMBUS_PCI_REGB4, HPET_ADDR);
*
* ...
* }
*/
static void hpet_setup(void)
{
/* set hpet base address */
smbus_write(SMBUS_PCI_REGB4, HPET_ADDR);
/* enable decoding of access to HPET MMIO*/
smbus_enable(SMBUS_PCI_REG40, (1 << 28));
/* HPET irq enable */
smbus_enable(SMBUS_PCI_REG64, (1 << 10));
hpet_enable_legacy_int();
}
void __init setup_hpet_timer(void)
{
unsigned long flags = IRQF_NOBALANCING | IRQF_TIMER;
unsigned int cpu = smp_processor_id();
struct clock_event_device *cd;
hpet_setup();
cd = &per_cpu(hpet_clockevent_device, cpu);
cd->name = "hpet";
cd->rating = 100;
cd->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT;
cd->set_state_shutdown = hpet_set_state_shutdown;
cd->set_state_periodic = hpet_set_state_periodic;
cd->set_state_oneshot = hpet_set_state_oneshot;
cd->tick_resume = hpet_tick_resume;
cd->set_next_event = hpet_next_event;
cd->irq = HPET_T0_IRQ;
cd->cpumask = cpumask_of(cpu);
clockevent_set_clock(cd, HPET_FREQ);
cd->max_delta_ns = clockevent_delta2ns(0x7fffffff, cd);
cd->max_delta_ticks = 0x7fffffff;
cd->min_delta_ns = clockevent_delta2ns(HPET_MIN_PROG_DELTA, cd);
cd->min_delta_ticks = HPET_MIN_PROG_DELTA;
clockevents_register_device(cd);
if (request_irq(HPET_T0_IRQ, hpet_irq_handler, flags, "hpet", NULL))
pr_err("Failed to request irq %d (hpet)\n", HPET_T0_IRQ);
pr_info("hpet clock event device register\n");
}
static u64 hpet_read_counter(struct clocksource *cs)
{
return (u64)hpet_read(HPET_COUNTER);
}
static void hpet_suspend(struct clocksource *cs)
{
}
static void hpet_resume(struct clocksource *cs)
{
hpet_setup();
hpet_restart_counter();
}
static struct clocksource csrc_hpet = {
.name = "hpet",
/* mips clocksource rating is less than 300, so hpet is better. */
.rating = 300,
.read = hpet_read_counter,
.mask = CLOCKSOURCE_MASK(32),
/* oneshot mode work normal with this flag */
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
.suspend = hpet_suspend,
.resume = hpet_resume,
.mult = 0,
.shift = 10,
};
int __init init_hpet_clocksource(void)
{
csrc_hpet.mult = clocksource_hz2mult(HPET_FREQ, csrc_hpet.shift);
return clocksource_register_hz(&csrc_hpet, HPET_FREQ);
}
arch_initcall(init_hpet_clocksource);
| linux-master | arch/mips/loongson64/hpet.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2009 Lemote Inc.
* Author: Wu Zhangjin, [email protected]
*/
#include <linux/irqchip.h>
#include <linux/logic_pio.h>
#include <linux/memblock.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <asm/bootinfo.h>
#include <asm/traps.h>
#include <asm/smp-ops.h>
#include <asm/cacheflush.h>
#include <asm/fw/fw.h>
#include <loongson.h>
#include <boot_param.h>
#define NODE_ID_OFFSET_ADDR ((void __iomem *)TO_UNCAC(0x1001041c))
u32 node_id_offset;
static void __init mips_nmi_setup(void)
{
void *base;
base = (void *)(CAC_BASE + 0x380);
memcpy(base, except_vec_nmi, 0x80);
flush_icache_range((unsigned long)base, (unsigned long)base + 0x80);
}
void ls7a_early_config(void)
{
node_id_offset = ((readl(NODE_ID_OFFSET_ADDR) >> 8) & 0x1f) + 36;
}
void rs780e_early_config(void)
{
node_id_offset = 37;
}
void virtual_early_config(void)
{
node_id_offset = 44;
}
void __init szmem(unsigned int node)
{
u32 i, mem_type;
static unsigned long num_physpages;
u64 node_id, node_psize, start_pfn, end_pfn, mem_start, mem_size;
/* Otherwise come from DTB */
if (loongson_sysconf.fw_interface != LOONGSON_LEFI)
return;
/* Parse memory information and activate */
for (i = 0; i < loongson_memmap->nr_map; i++) {
node_id = loongson_memmap->map[i].node_id;
if (node_id != node)
continue;
mem_type = loongson_memmap->map[i].mem_type;
mem_size = loongson_memmap->map[i].mem_size;
mem_start = loongson_memmap->map[i].mem_start;
switch (mem_type) {
case SYSTEM_RAM_LOW:
case SYSTEM_RAM_HIGH:
start_pfn = ((node_id << 44) + mem_start) >> PAGE_SHIFT;
node_psize = (mem_size << 20) >> PAGE_SHIFT;
end_pfn = start_pfn + node_psize;
num_physpages += node_psize;
pr_info("Node%d: mem_type:%d, mem_start:0x%llx, mem_size:0x%llx MB\n",
(u32)node_id, mem_type, mem_start, mem_size);
pr_info(" start_pfn:0x%llx, end_pfn:0x%llx, num_physpages:0x%lx\n",
start_pfn, end_pfn, num_physpages);
memblock_add_node(PFN_PHYS(start_pfn),
PFN_PHYS(node_psize), node,
MEMBLOCK_NONE);
break;
case SYSTEM_RAM_RESERVED:
pr_info("Node%d: mem_type:%d, mem_start:0x%llx, mem_size:0x%llx MB\n",
(u32)node_id, mem_type, mem_start, mem_size);
memblock_reserve(((node_id << 44) + mem_start), mem_size << 20);
break;
}
}
}
#ifndef CONFIG_NUMA
static void __init prom_init_memory(void)
{
szmem(0);
}
#endif
void __init prom_init(void)
{
fw_init_cmdline();
if (fw_arg2 == 0 || (fdt_magic(fw_arg2) == FDT_MAGIC)) {
loongson_sysconf.fw_interface = LOONGSON_DTB;
prom_dtb_init_env();
} else {
loongson_sysconf.fw_interface = LOONGSON_LEFI;
prom_lefi_init_env();
}
/* init base address of io space */
set_io_port_base(PCI_IOBASE);
if (loongson_sysconf.early_config)
loongson_sysconf.early_config();
#ifdef CONFIG_NUMA
prom_init_numa_memory();
#else
prom_init_memory();
#endif
/* Hardcode to CPU UART 0 */
if ((read_c0_prid() & PRID_IMP_MASK) == PRID_IMP_LOONGSON_64R)
setup_8250_early_printk_port(TO_UNCAC(LOONGSON_REG_BASE), 0, 1024);
else
setup_8250_early_printk_port(TO_UNCAC(LOONGSON_REG_BASE + 0x1e0), 0, 1024);
register_smp_ops(&loongson3_smp_ops);
board_nmi_handler_setup = mips_nmi_setup;
}
static int __init add_legacy_isa_io(struct fwnode_handle *fwnode, resource_size_t hw_start,
resource_size_t size)
{
int ret = 0;
struct logic_pio_hwaddr *range;
unsigned long vaddr;
range = kzalloc(sizeof(*range), GFP_ATOMIC);
if (!range)
return -ENOMEM;
range->fwnode = fwnode;
range->size = size = round_up(size, PAGE_SIZE);
range->hw_start = hw_start;
range->flags = LOGIC_PIO_CPU_MMIO;
ret = logic_pio_register_range(range);
if (ret) {
kfree(range);
return ret;
}
/* Legacy ISA must placed at the start of PCI_IOBASE */
if (range->io_start != 0) {
logic_pio_unregister_range(range);
kfree(range);
return -EINVAL;
}
vaddr = PCI_IOBASE + range->io_start;
ioremap_page_range(vaddr, vaddr + size, hw_start, pgprot_device(PAGE_KERNEL));
return 0;
}
static __init void reserve_pio_range(void)
{
struct device_node *np;
for_each_node_by_name(np, "isa") {
struct of_range range;
struct of_range_parser parser;
pr_info("ISA Bridge: %pOF\n", np);
if (of_range_parser_init(&parser, np)) {
pr_info("Failed to parse resources.\n");
of_node_put(np);
break;
}
for_each_of_range(&parser, &range) {
switch (range.flags & IORESOURCE_TYPE_BITS) {
case IORESOURCE_IO:
pr_info(" IO 0x%016llx..0x%016llx -> 0x%016llx\n",
range.cpu_addr,
range.cpu_addr + range.size - 1,
range.bus_addr);
if (add_legacy_isa_io(&np->fwnode, range.cpu_addr, range.size))
pr_warn("Failed to reserve legacy IO in Logic PIO\n");
break;
case IORESOURCE_MEM:
pr_info(" MEM 0x%016llx..0x%016llx -> 0x%016llx\n",
range.cpu_addr,
range.cpu_addr + range.size - 1,
range.bus_addr);
break;
}
}
}
}
void __init arch_init_irq(void)
{
reserve_pio_range();
irqchip_init();
}
| linux-master | arch/mips/loongson64/init.c |
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2014 Lemote Corporation.
* written by Huacai Chen <[email protected]>
*
* based on arch/mips/cavium-octeon/cpu.c
* Copyright (C) 2009 Wind River Systems,
* written by Ralf Baechle <[email protected]>
*/
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/notifier.h>
#include <linux/ptrace.h>
#include <linux/uaccess.h>
#include <linux/sched/signal.h>
#include <asm/fpu.h>
#include <asm/cop2.h>
#include <asm/inst.h>
#include <asm/branch.h>
#include <asm/current.h>
#include <asm/mipsregs.h>
#include <asm/unaligned-emul.h>
static int loongson_cu2_call(struct notifier_block *nfb, unsigned long action,
void *data)
{
unsigned int res, fpu_owned;
unsigned long ra, value, value_next;
union mips_instruction insn;
int fr = !test_thread_flag(TIF_32BIT_FPREGS);
struct pt_regs *regs = (struct pt_regs *)data;
void __user *addr = (void __user *)regs->cp0_badvaddr;
unsigned int __user *pc = (unsigned int __user *)exception_epc(regs);
ra = regs->regs[31];
__get_user(insn.word, pc);
switch (action) {
case CU2_EXCEPTION:
preempt_disable();
fpu_owned = __is_fpu_owner();
if (!fr)
set_c0_status(ST0_CU1 | ST0_CU2);
else
set_c0_status(ST0_CU1 | ST0_CU2 | ST0_FR);
enable_fpu_hazard();
KSTK_STATUS(current) |= (ST0_CU1 | ST0_CU2);
if (fr)
KSTK_STATUS(current) |= ST0_FR;
else
KSTK_STATUS(current) &= ~ST0_FR;
/* If FPU is owned, we needn't init or restore fp */
if (!fpu_owned) {
set_thread_flag(TIF_USEDFPU);
init_fp_ctx(current);
_restore_fp(current);
}
preempt_enable();
return NOTIFY_STOP; /* Don't call default notifier */
case CU2_LWC2_OP:
if (insn.loongson3_lswc2_format.ls == 0)
goto sigbus;
if (insn.loongson3_lswc2_format.fr == 0) { /* gslq */
if (!access_ok(addr, 16))
goto sigbus;
LoadDW(addr, value, res);
if (res)
goto fault;
LoadDW(addr + 8, value_next, res);
if (res)
goto fault;
regs->regs[insn.loongson3_lswc2_format.rt] = value;
regs->regs[insn.loongson3_lswc2_format.rq] = value_next;
compute_return_epc(regs);
} else { /* gslqc1 */
if (!access_ok(addr, 16))
goto sigbus;
lose_fpu(1);
LoadDW(addr, value, res);
if (res)
goto fault;
LoadDW(addr + 8, value_next, res);
if (res)
goto fault;
set_fpr64(¤t->thread.fpu.fpr[insn.loongson3_lswc2_format.rt], 0, value);
set_fpr64(¤t->thread.fpu.fpr[insn.loongson3_lswc2_format.rq], 0, value_next);
compute_return_epc(regs);
own_fpu(1);
}
return NOTIFY_STOP; /* Don't call default notifier */
case CU2_SWC2_OP:
if (insn.loongson3_lswc2_format.ls == 0)
goto sigbus;
if (insn.loongson3_lswc2_format.fr == 0) { /* gssq */
if (!access_ok(addr, 16))
goto sigbus;
/* write upper 8 bytes first */
value_next = regs->regs[insn.loongson3_lswc2_format.rq];
StoreDW(addr + 8, value_next, res);
if (res)
goto fault;
value = regs->regs[insn.loongson3_lswc2_format.rt];
StoreDW(addr, value, res);
if (res)
goto fault;
compute_return_epc(regs);
} else { /* gssqc1 */
if (!access_ok(addr, 16))
goto sigbus;
lose_fpu(1);
value_next = get_fpr64(¤t->thread.fpu.fpr[insn.loongson3_lswc2_format.rq], 0);
StoreDW(addr + 8, value_next, res);
if (res)
goto fault;
value = get_fpr64(¤t->thread.fpu.fpr[insn.loongson3_lswc2_format.rt], 0);
StoreDW(addr, value, res);
if (res)
goto fault;
compute_return_epc(regs);
own_fpu(1);
}
return NOTIFY_STOP; /* Don't call default notifier */
case CU2_LDC2_OP:
switch (insn.loongson3_lsdc2_format.opcode1) {
/*
* Loongson-3 overridden ldc2 instructions.
* opcode1 instruction
* 0x1 gslhx: load 2 bytes to GPR
* 0x2 gslwx: load 4 bytes to GPR
* 0x3 gsldx: load 8 bytes to GPR
* 0x6 gslwxc1: load 4 bytes to FPR
* 0x7 gsldxc1: load 8 bytes to FPR
*/
case 0x1:
if (!access_ok(addr, 2))
goto sigbus;
LoadHW(addr, value, res);
if (res)
goto fault;
compute_return_epc(regs);
regs->regs[insn.loongson3_lsdc2_format.rt] = value;
break;
case 0x2:
if (!access_ok(addr, 4))
goto sigbus;
LoadW(addr, value, res);
if (res)
goto fault;
compute_return_epc(regs);
regs->regs[insn.loongson3_lsdc2_format.rt] = value;
break;
case 0x3:
if (!access_ok(addr, 8))
goto sigbus;
LoadDW(addr, value, res);
if (res)
goto fault;
compute_return_epc(regs);
regs->regs[insn.loongson3_lsdc2_format.rt] = value;
break;
case 0x6:
die_if_kernel("Unaligned FP access in kernel code", regs);
BUG_ON(!used_math());
if (!access_ok(addr, 4))
goto sigbus;
lose_fpu(1);
LoadW(addr, value, res);
if (res)
goto fault;
set_fpr64(¤t->thread.fpu.fpr[insn.loongson3_lsdc2_format.rt], 0, value);
compute_return_epc(regs);
own_fpu(1);
break;
case 0x7:
die_if_kernel("Unaligned FP access in kernel code", regs);
BUG_ON(!used_math());
if (!access_ok(addr, 8))
goto sigbus;
lose_fpu(1);
LoadDW(addr, value, res);
if (res)
goto fault;
set_fpr64(¤t->thread.fpu.fpr[insn.loongson3_lsdc2_format.rt], 0, value);
compute_return_epc(regs);
own_fpu(1);
break;
}
return NOTIFY_STOP; /* Don't call default notifier */
case CU2_SDC2_OP:
switch (insn.loongson3_lsdc2_format.opcode1) {
/*
* Loongson-3 overridden sdc2 instructions.
* opcode1 instruction
* 0x1 gsshx: store 2 bytes from GPR
* 0x2 gsswx: store 4 bytes from GPR
* 0x3 gssdx: store 8 bytes from GPR
* 0x6 gsswxc1: store 4 bytes from FPR
* 0x7 gssdxc1: store 8 bytes from FPR
*/
case 0x1:
if (!access_ok(addr, 2))
goto sigbus;
compute_return_epc(regs);
value = regs->regs[insn.loongson3_lsdc2_format.rt];
StoreHW(addr, value, res);
if (res)
goto fault;
break;
case 0x2:
if (!access_ok(addr, 4))
goto sigbus;
compute_return_epc(regs);
value = regs->regs[insn.loongson3_lsdc2_format.rt];
StoreW(addr, value, res);
if (res)
goto fault;
break;
case 0x3:
if (!access_ok(addr, 8))
goto sigbus;
compute_return_epc(regs);
value = regs->regs[insn.loongson3_lsdc2_format.rt];
StoreDW(addr, value, res);
if (res)
goto fault;
break;
case 0x6:
die_if_kernel("Unaligned FP access in kernel code", regs);
BUG_ON(!used_math());
if (!access_ok(addr, 4))
goto sigbus;
lose_fpu(1);
value = get_fpr64(¤t->thread.fpu.fpr[insn.loongson3_lsdc2_format.rt], 0);
StoreW(addr, value, res);
if (res)
goto fault;
compute_return_epc(regs);
own_fpu(1);
break;
case 0x7:
die_if_kernel("Unaligned FP access in kernel code", regs);
BUG_ON(!used_math());
if (!access_ok(addr, 8))
goto sigbus;
lose_fpu(1);
value = get_fpr64(¤t->thread.fpu.fpr[insn.loongson3_lsdc2_format.rt], 0);
StoreDW(addr, value, res);
if (res)
goto fault;
compute_return_epc(regs);
own_fpu(1);
break;
}
return NOTIFY_STOP; /* Don't call default notifier */
}
return NOTIFY_OK; /* Let default notifier send signals */
fault:
/* roll back jump/branch */
regs->regs[31] = ra;
regs->cp0_epc = (unsigned long)pc;
/* Did we have an exception handler installed? */
if (fixup_exception(regs))
return NOTIFY_STOP; /* Don't call default notifier */
die_if_kernel("Unhandled kernel unaligned access", regs);
force_sig(SIGSEGV);
return NOTIFY_STOP; /* Don't call default notifier */
sigbus:
die_if_kernel("Unhandled kernel unaligned access", regs);
force_sig(SIGBUS);
return NOTIFY_STOP; /* Don't call default notifier */
}
static int __init loongson_cu2_setup(void)
{
return cu2_notifier(loongson_cu2_call, 0);
}
early_initcall(loongson_cu2_setup);
| linux-master | arch/mips/loongson64/cop2-ex.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
*
* Copyright (C) 2007 Lemote, Inc. & Institute of Computing Technology
* Author: Fuxin Zhang, [email protected]
* Copyright (C) 2009 Lemote, Inc.
* Author: Zhangjin Wu, [email protected]
*/
#include <linux/cpu.h>
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/kexec.h>
#include <linux/pm.h>
#include <linux/slab.h>
#include <asm/bootinfo.h>
#include <asm/idle.h>
#include <asm/reboot.h>
#include <asm/bug.h>
#include <loongson.h>
#include <boot_param.h>
static void loongson_restart(char *command)
{
void (*fw_restart)(void) = (void *)loongson_sysconf.restart_addr;
fw_restart();
while (1) {
if (cpu_wait)
cpu_wait();
}
}
static void loongson_poweroff(void)
{
void (*fw_poweroff)(void) = (void *)loongson_sysconf.poweroff_addr;
fw_poweroff();
while (1) {
if (cpu_wait)
cpu_wait();
}
}
static void loongson_halt(void)
{
pr_notice("\n\n** You can safely turn off the power now **\n\n");
while (1) {
if (cpu_wait)
cpu_wait();
}
}
#ifdef CONFIG_KEXEC
/* 0X80000000~0X80200000 is safe */
#define MAX_ARGS 64
#define KEXEC_CTRL_CODE 0xFFFFFFFF80100000UL
#define KEXEC_ARGV_ADDR 0xFFFFFFFF80108000UL
#define KEXEC_ARGV_SIZE COMMAND_LINE_SIZE
#define KEXEC_ENVP_SIZE 4800
static int kexec_argc;
static int kdump_argc;
static void *kexec_argv;
static void *kdump_argv;
static void *kexec_envp;
static int loongson_kexec_prepare(struct kimage *image)
{
int i, argc = 0;
unsigned int *argv;
char *str, *ptr, *bootloader = "kexec";
/* argv at offset 0, argv[] at offset KEXEC_ARGV_SIZE/2 */
if (image->type == KEXEC_TYPE_DEFAULT)
argv = (unsigned int *)kexec_argv;
else
argv = (unsigned int *)kdump_argv;
argv[argc++] = (unsigned int)(KEXEC_ARGV_ADDR + KEXEC_ARGV_SIZE/2);
for (i = 0; i < image->nr_segments; i++) {
if (!strncmp(bootloader, (char *)image->segment[i].buf,
strlen(bootloader))) {
/*
* convert command line string to array
* of parameters (as bootloader does).
*/
int offt;
str = (char *)argv + KEXEC_ARGV_SIZE/2;
memcpy(str, image->segment[i].buf, KEXEC_ARGV_SIZE/2);
ptr = strchr(str, ' ');
while (ptr && (argc < MAX_ARGS)) {
*ptr = '\0';
if (ptr[1] != ' ') {
offt = (int)(ptr - str + 1);
argv[argc] = KEXEC_ARGV_ADDR + KEXEC_ARGV_SIZE/2 + offt;
argc++;
}
ptr = strchr(ptr + 1, ' ');
}
break;
}
}
if (image->type == KEXEC_TYPE_DEFAULT)
kexec_argc = argc;
else
kdump_argc = argc;
/* kexec/kdump need a safe page to save reboot_code_buffer */
image->control_code_page = virt_to_page((void *)KEXEC_CTRL_CODE);
return 0;
}
static void loongson_kexec_shutdown(void)
{
#ifdef CONFIG_SMP
int cpu;
/* All CPUs go to reboot_code_buffer */
for_each_possible_cpu(cpu)
if (!cpu_online(cpu))
cpu_device_up(get_cpu_device(cpu));
secondary_kexec_args[0] = TO_UNCAC(0x3ff01000);
#endif
kexec_args[0] = kexec_argc;
kexec_args[1] = fw_arg1;
kexec_args[2] = fw_arg2;
memcpy((void *)fw_arg1, kexec_argv, KEXEC_ARGV_SIZE);
memcpy((void *)fw_arg2, kexec_envp, KEXEC_ENVP_SIZE);
}
static void loongson_crash_shutdown(struct pt_regs *regs)
{
default_machine_crash_shutdown(regs);
kexec_args[0] = kdump_argc;
kexec_args[1] = fw_arg1;
kexec_args[2] = fw_arg2;
#ifdef CONFIG_SMP
secondary_kexec_args[0] = TO_UNCAC(0x3ff01000);
#endif
memcpy((void *)fw_arg1, kdump_argv, KEXEC_ARGV_SIZE);
memcpy((void *)fw_arg2, kexec_envp, KEXEC_ENVP_SIZE);
}
#endif
static int __init mips_reboot_setup(void)
{
_machine_restart = loongson_restart;
_machine_halt = loongson_halt;
pm_power_off = loongson_poweroff;
#ifdef CONFIG_KEXEC
kexec_argv = kmalloc(KEXEC_ARGV_SIZE, GFP_KERNEL);
if (WARN_ON(!kexec_argv))
return -ENOMEM;
kdump_argv = kmalloc(KEXEC_ARGV_SIZE, GFP_KERNEL);
if (WARN_ON(!kdump_argv))
return -ENOMEM;
kexec_envp = kmalloc(KEXEC_ENVP_SIZE, GFP_KERNEL);
if (WARN_ON(!kexec_envp))
return -ENOMEM;
fw_arg1 = KEXEC_ARGV_ADDR;
memcpy(kexec_envp, (void *)fw_arg2, KEXEC_ENVP_SIZE);
_machine_kexec_prepare = loongson_kexec_prepare;
_machine_kexec_shutdown = loongson_kexec_shutdown;
_machine_crash_shutdown = loongson_crash_shutdown;
#endif
return 0;
}
arch_initcall(mips_reboot_setup);
| linux-master | arch/mips/loongson64/reset.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2007 Lemote Inc. & Institute of Computing Technology
* Author: Fuxin Zhang, [email protected]
*/
#include <linux/export.h>
#include <linux/init.h>
#include <asm/bootinfo.h>
#include <linux/libfdt.h>
#include <linux/of_fdt.h>
#include <asm/prom.h>
#include <loongson.h>
void *loongson_fdt_blob;
void __init plat_mem_setup(void)
{
if (loongson_fdt_blob)
__dt_setup_arch(loongson_fdt_blob);
}
| linux-master | arch/mips/loongson64/setup.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/dma-direct.h>
#include <linux/init.h>
#include <linux/swiotlb.h>
#include <boot_param.h>
dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
{
/* We extract 2bit node id (bit 44~47, only bit 44~45 used now) from
* Loongson-3's 48bit address space and embed it into 40bit */
long nid = (paddr >> 44) & 0x3;
return ((nid << 44) ^ paddr) | (nid << node_id_offset);
}
phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
{
/* We extract 2bit node id (bit 44~47, only bit 44~45 used now) from
* Loongson-3's 48bit address space and embed it into 40bit */
long nid = (daddr >> node_id_offset) & 0x3;
return ((nid << node_id_offset) ^ daddr) | (nid << 44);
}
void __init plat_swiotlb_setup(void)
{
swiotlb_init(true, SWIOTLB_VERBOSE);
}
| linux-master | arch/mips/loongson64/dma.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* loongson-specific suspend support
*
* Copyright (C) 2009 Lemote Inc.
* Author: Wu Zhangjin <[email protected]>
*/
#include <linux/suspend.h>
#include <linux/interrupt.h>
#include <linux/pm.h>
#include <asm/i8259.h>
#include <asm/mipsregs.h>
#include <loongson.h>
static unsigned int __maybe_unused cached_master_mask; /* i8259A */
static unsigned int __maybe_unused cached_slave_mask;
static unsigned int __maybe_unused cached_bonito_irq_mask; /* bonito */
void arch_suspend_disable_irqs(void)
{
/* disable all mips events */
local_irq_disable();
#ifdef CONFIG_I8259
/* disable all events of i8259A */
cached_slave_mask = inb(PIC_SLAVE_IMR);
cached_master_mask = inb(PIC_MASTER_IMR);
outb(0xff, PIC_SLAVE_IMR);
inb(PIC_SLAVE_IMR);
outb(0xff, PIC_MASTER_IMR);
inb(PIC_MASTER_IMR);
#endif
/* disable all events of bonito */
cached_bonito_irq_mask = LOONGSON_INTEN;
LOONGSON_INTENCLR = 0xffff;
(void)LOONGSON_INTENCLR;
}
void arch_suspend_enable_irqs(void)
{
/* enable all mips events */
local_irq_enable();
#ifdef CONFIG_I8259
/* only enable the cached events of i8259A */
outb(cached_slave_mask, PIC_SLAVE_IMR);
outb(cached_master_mask, PIC_MASTER_IMR);
#endif
/* enable all cached events of bonito */
LOONGSON_INTENSET = cached_bonito_irq_mask;
(void)LOONGSON_INTENSET;
}
/*
* Setup the board-specific events for waking up loongson from wait mode
*/
void __weak setup_wakeup_events(void)
{
}
void __weak mach_suspend(void)
{
}
void __weak mach_resume(void)
{
}
static int loongson_pm_enter(suspend_state_t state)
{
mach_suspend();
mach_resume();
return 0;
}
static int loongson_pm_valid_state(suspend_state_t state)
{
switch (state) {
case PM_SUSPEND_ON:
case PM_SUSPEND_STANDBY:
case PM_SUSPEND_MEM:
return 1;
default:
return 0;
}
}
static const struct platform_suspend_ops loongson_pm_ops = {
.valid = loongson_pm_valid_state,
.enter = loongson_pm_enter,
};
static int __init loongson_pm_init(void)
{
suspend_set_ops(&loongson_pm_ops);
return 0;
}
arch_initcall(loongson_pm_init);
| linux-master | arch/mips/loongson64/pm.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Based on Ocelot Linux port, which is
* Copyright 2001 MontaVista Software Inc.
* Author: [email protected] or [email protected]
*
* Copyright 2003 ICT CAS
* Author: Michael Guo <[email protected]>
*
* Copyright (C) 2007 Lemote Inc. & Institute of Computing Technology
* Author: Fuxin Zhang, [email protected]
*
* Copyright (C) 2009 Lemote Inc.
* Author: Wu Zhangjin, [email protected]
*/
#include <linux/export.h>
#include <linux/pci_ids.h>
#include <asm/bootinfo.h>
#include <loongson.h>
#include <boot_param.h>
#include <builtin_dtbs.h>
#include <workarounds.h>
#define HOST_BRIDGE_CONFIG_ADDR ((void __iomem *)TO_UNCAC(0x1a000000))
u32 cpu_clock_freq;
EXPORT_SYMBOL(cpu_clock_freq);
struct efi_memory_map_loongson *loongson_memmap;
struct loongson_system_configuration loongson_sysconf;
struct board_devices *eboard;
struct interface_info *einter;
struct loongson_special_attribute *especial;
u64 loongson_chipcfg[MAX_PACKAGES] = {0xffffffffbfc00180};
u64 loongson_chiptemp[MAX_PACKAGES];
u64 loongson_freqctrl[MAX_PACKAGES];
unsigned long long smp_group[4];
const char *get_system_type(void)
{
return "Generic Loongson64 System";
}
void __init prom_dtb_init_env(void)
{
if ((fw_arg2 < CKSEG0 || fw_arg2 > CKSEG1)
&& (fw_arg2 < XKPHYS || fw_arg2 > XKSEG))
loongson_fdt_blob = __dtb_loongson64_2core_2k1000_begin;
else
loongson_fdt_blob = (void *)fw_arg2;
}
void __init prom_lefi_init_env(void)
{
struct boot_params *boot_p;
struct loongson_params *loongson_p;
struct system_loongson *esys;
struct efi_cpuinfo_loongson *ecpu;
struct irq_source_routing_table *eirq_source;
u32 id;
u16 vendor;
/* firmware arguments are initialized in head.S */
boot_p = (struct boot_params *)fw_arg2;
loongson_p = &(boot_p->efi.smbios.lp);
esys = (struct system_loongson *)
((u64)loongson_p + loongson_p->system_offset);
ecpu = (struct efi_cpuinfo_loongson *)
((u64)loongson_p + loongson_p->cpu_offset);
eboard = (struct board_devices *)
((u64)loongson_p + loongson_p->boarddev_table_offset);
einter = (struct interface_info *)
((u64)loongson_p + loongson_p->interface_offset);
especial = (struct loongson_special_attribute *)
((u64)loongson_p + loongson_p->special_offset);
eirq_source = (struct irq_source_routing_table *)
((u64)loongson_p + loongson_p->irq_offset);
loongson_memmap = (struct efi_memory_map_loongson *)
((u64)loongson_p + loongson_p->memory_offset);
cpu_clock_freq = ecpu->cpu_clock_freq;
loongson_sysconf.cputype = ecpu->cputype;
switch (ecpu->cputype) {
case Legacy_3A:
case Loongson_3A:
loongson_sysconf.cores_per_node = 4;
loongson_sysconf.cores_per_package = 4;
smp_group[0] = 0x900000003ff01000;
smp_group[1] = 0x900010003ff01000;
smp_group[2] = 0x900020003ff01000;
smp_group[3] = 0x900030003ff01000;
loongson_chipcfg[0] = 0x900000001fe00180;
loongson_chipcfg[1] = 0x900010001fe00180;
loongson_chipcfg[2] = 0x900020001fe00180;
loongson_chipcfg[3] = 0x900030001fe00180;
loongson_chiptemp[0] = 0x900000001fe0019c;
loongson_chiptemp[1] = 0x900010001fe0019c;
loongson_chiptemp[2] = 0x900020001fe0019c;
loongson_chiptemp[3] = 0x900030001fe0019c;
loongson_freqctrl[0] = 0x900000001fe001d0;
loongson_freqctrl[1] = 0x900010001fe001d0;
loongson_freqctrl[2] = 0x900020001fe001d0;
loongson_freqctrl[3] = 0x900030001fe001d0;
loongson_sysconf.workarounds = WORKAROUND_CPUFREQ;
break;
case Legacy_3B:
case Loongson_3B:
loongson_sysconf.cores_per_node = 4; /* One chip has 2 nodes */
loongson_sysconf.cores_per_package = 8;
smp_group[0] = 0x900000003ff01000;
smp_group[1] = 0x900010003ff05000;
smp_group[2] = 0x900020003ff09000;
smp_group[3] = 0x900030003ff0d000;
loongson_chipcfg[0] = 0x900000001fe00180;
loongson_chipcfg[1] = 0x900020001fe00180;
loongson_chipcfg[2] = 0x900040001fe00180;
loongson_chipcfg[3] = 0x900060001fe00180;
loongson_chiptemp[0] = 0x900000001fe0019c;
loongson_chiptemp[1] = 0x900020001fe0019c;
loongson_chiptemp[2] = 0x900040001fe0019c;
loongson_chiptemp[3] = 0x900060001fe0019c;
loongson_freqctrl[0] = 0x900000001fe001d0;
loongson_freqctrl[1] = 0x900020001fe001d0;
loongson_freqctrl[2] = 0x900040001fe001d0;
loongson_freqctrl[3] = 0x900060001fe001d0;
loongson_sysconf.workarounds = WORKAROUND_CPUHOTPLUG;
break;
default:
loongson_sysconf.cores_per_node = 1;
loongson_sysconf.cores_per_package = 1;
loongson_chipcfg[0] = 0x900000001fe00180;
}
loongson_sysconf.nr_cpus = ecpu->nr_cpus;
loongson_sysconf.boot_cpu_id = ecpu->cpu_startup_core_id;
loongson_sysconf.reserved_cpus_mask = ecpu->reserved_cores_mask;
if (ecpu->nr_cpus > NR_CPUS || ecpu->nr_cpus == 0)
loongson_sysconf.nr_cpus = NR_CPUS;
loongson_sysconf.nr_nodes = (loongson_sysconf.nr_cpus +
loongson_sysconf.cores_per_node - 1) /
loongson_sysconf.cores_per_node;
loongson_sysconf.dma_mask_bits = eirq_source->dma_mask_bits;
if (loongson_sysconf.dma_mask_bits < 32 ||
loongson_sysconf.dma_mask_bits > 64)
loongson_sysconf.dma_mask_bits = 32;
loongson_sysconf.restart_addr = boot_p->reset_system.ResetWarm;
loongson_sysconf.poweroff_addr = boot_p->reset_system.Shutdown;
loongson_sysconf.suspend_addr = boot_p->reset_system.DoSuspend;
loongson_sysconf.vgabios_addr = boot_p->efi.smbios.vga_bios;
pr_debug("Shutdown Addr: %llx, Restart Addr: %llx, VBIOS Addr: %llx\n",
loongson_sysconf.poweroff_addr, loongson_sysconf.restart_addr,
loongson_sysconf.vgabios_addr);
loongson_sysconf.workarounds |= esys->workarounds;
pr_info("CpuClock = %u\n", cpu_clock_freq);
/* Read the ID of PCI host bridge to detect bridge type */
id = readl(HOST_BRIDGE_CONFIG_ADDR);
vendor = id & 0xffff;
switch (vendor) {
case PCI_VENDOR_ID_LOONGSON:
pr_info("The bridge chip is LS7A\n");
loongson_sysconf.bridgetype = LS7A;
loongson_sysconf.early_config = ls7a_early_config;
break;
case PCI_VENDOR_ID_AMD:
case PCI_VENDOR_ID_ATI:
pr_info("The bridge chip is RS780E or SR5690\n");
loongson_sysconf.bridgetype = RS780E;
loongson_sysconf.early_config = rs780e_early_config;
break;
default:
pr_info("The bridge chip is VIRTUAL\n");
loongson_sysconf.bridgetype = VIRTUAL;
loongson_sysconf.early_config = virtual_early_config;
loongson_fdt_blob = __dtb_loongson64v_4core_virtio_begin;
break;
}
if ((read_c0_prid() & PRID_IMP_MASK) == PRID_IMP_LOONGSON_64C) {
switch (read_c0_prid() & PRID_REV_MASK) {
case PRID_REV_LOONGSON3A_R1:
case PRID_REV_LOONGSON3A_R2_0:
case PRID_REV_LOONGSON3A_R2_1:
case PRID_REV_LOONGSON3A_R3_0:
case PRID_REV_LOONGSON3A_R3_1:
switch (loongson_sysconf.bridgetype) {
case LS7A:
loongson_fdt_blob = __dtb_loongson64c_4core_ls7a_begin;
break;
case RS780E:
loongson_fdt_blob = __dtb_loongson64c_4core_rs780e_begin;
break;
default:
break;
}
break;
case PRID_REV_LOONGSON3B_R1:
case PRID_REV_LOONGSON3B_R2:
if (loongson_sysconf.bridgetype == RS780E)
loongson_fdt_blob = __dtb_loongson64c_8core_rs780e_begin;
break;
default:
break;
}
} else if ((read_c0_prid() & PRID_IMP_MASK) == PRID_IMP_LOONGSON_64G) {
if (loongson_sysconf.bridgetype == LS7A)
loongson_fdt_blob = __dtb_loongson64g_4core_ls7a_begin;
}
if (!loongson_fdt_blob)
pr_err("Failed to determine built-in Loongson64 dtb\n");
}
| linux-master | arch/mips/loongson64/env.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2007 Lemote, Inc. & Institute of Computing Technology
* Author: Fuxin Zhang, [email protected]
*
* Copyright (C) 2009 Lemote Inc.
* Author: Wu Zhangjin, [email protected]
*/
#include <asm/time.h>
#include <asm/hpet.h>
#include <loongson.h>
#include <linux/clk.h>
#include <linux/of_clk.h>
void __init plat_time_init(void)
{
struct clk *clk;
struct device_node *np;
if (loongson_sysconf.fw_interface == LOONGSON_DTB) {
of_clk_init(NULL);
np = of_get_cpu_node(0, NULL);
if (!np) {
pr_err("Failed to get CPU node\n");
return;
}
clk = of_clk_get(np, 0);
if (IS_ERR(clk)) {
pr_err("Failed to get CPU clock: %ld\n", PTR_ERR(clk));
return;
}
cpu_clock_freq = clk_get_rate(clk);
clk_put(clk);
}
/* setup mips r4k timer */
mips_hpt_frequency = cpu_clock_freq / 2;
#ifdef CONFIG_RS780_HPET
setup_hpet_timer();
#endif
}
| linux-master | arch/mips/loongson64/time.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2010 Loongson Inc. & Lemote Inc. &
* Institute of Computing Technology
* Author: Xiang Gao, [email protected]
* Huacai Chen, [email protected]
* Xiaofu Meng, Shuangshuang Zhang
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/mmzone.h>
#include <linux/export.h>
#include <linux/nodemask.h>
#include <linux/swap.h>
#include <linux/memblock.h>
#include <linux/pfn.h>
#include <linux/highmem.h>
#include <asm/page.h>
#include <asm/pgalloc.h>
#include <asm/sections.h>
#include <linux/irq.h>
#include <asm/bootinfo.h>
#include <asm/mc146818-time.h>
#include <asm/time.h>
#include <asm/wbflush.h>
#include <boot_param.h>
#include <loongson.h>
unsigned char __node_distances[MAX_NUMNODES][MAX_NUMNODES];
EXPORT_SYMBOL(__node_distances);
struct pglist_data *__node_data[MAX_NUMNODES];
EXPORT_SYMBOL(__node_data);
cpumask_t __node_cpumask[MAX_NUMNODES];
EXPORT_SYMBOL(__node_cpumask);
static void cpu_node_probe(void)
{
int i;
nodes_clear(node_possible_map);
nodes_clear(node_online_map);
for (i = 0; i < loongson_sysconf.nr_nodes; i++) {
node_set_state(num_online_nodes(), N_POSSIBLE);
node_set_online(num_online_nodes());
}
pr_info("NUMA: Discovered %d cpus on %d nodes\n",
loongson_sysconf.nr_cpus, num_online_nodes());
}
static int __init compute_node_distance(int row, int col)
{
int package_row = row * loongson_sysconf.cores_per_node /
loongson_sysconf.cores_per_package;
int package_col = col * loongson_sysconf.cores_per_node /
loongson_sysconf.cores_per_package;
if (col == row)
return LOCAL_DISTANCE;
else if (package_row == package_col)
return 40;
else
return 100;
}
static void __init init_topology_matrix(void)
{
int row, col;
for (row = 0; row < MAX_NUMNODES; row++)
for (col = 0; col < MAX_NUMNODES; col++)
__node_distances[row][col] = -1;
for_each_online_node(row) {
for_each_online_node(col) {
__node_distances[row][col] =
compute_node_distance(row, col);
}
}
}
static void __init node_mem_init(unsigned int node)
{
struct pglist_data *nd;
unsigned long node_addrspace_offset;
unsigned long start_pfn, end_pfn;
unsigned long nd_pa;
int tnid;
const size_t nd_size = roundup(sizeof(pg_data_t), SMP_CACHE_BYTES);
node_addrspace_offset = nid_to_addrbase(node);
pr_info("Node%d's addrspace_offset is 0x%lx\n",
node, node_addrspace_offset);
get_pfn_range_for_nid(node, &start_pfn, &end_pfn);
pr_info("Node%d: start_pfn=0x%lx, end_pfn=0x%lx\n",
node, start_pfn, end_pfn);
nd_pa = memblock_phys_alloc_try_nid(nd_size, SMP_CACHE_BYTES, node);
if (!nd_pa)
panic("Cannot allocate %zu bytes for node %d data\n",
nd_size, node);
nd = __va(nd_pa);
memset(nd, 0, sizeof(struct pglist_data));
tnid = early_pfn_to_nid(nd_pa >> PAGE_SHIFT);
if (tnid != node)
pr_info("NODE_DATA(%d) on node %d\n", node, tnid);
__node_data[node] = nd;
NODE_DATA(node)->node_start_pfn = start_pfn;
NODE_DATA(node)->node_spanned_pages = end_pfn - start_pfn;
if (node == 0) {
/* kernel start address */
unsigned long kernel_start_pfn = PFN_DOWN(__pa_symbol(&_text));
/* kernel end address */
unsigned long kernel_end_pfn = PFN_UP(__pa_symbol(&_end));
/* used by finalize_initrd() */
max_low_pfn = end_pfn;
/* Reserve the kernel text/data/bss */
memblock_reserve(kernel_start_pfn << PAGE_SHIFT,
((kernel_end_pfn - kernel_start_pfn) << PAGE_SHIFT));
/* Reserve 0xfe000000~0xffffffff for RS780E integrated GPU */
if (node_end_pfn(0) >= (0xffffffff >> PAGE_SHIFT))
memblock_reserve((node_addrspace_offset | 0xfe000000),
32 << 20);
/* Reserve pfn range 0~node[0]->node_start_pfn */
memblock_reserve(0, PAGE_SIZE * start_pfn);
}
}
static __init void prom_meminit(void)
{
unsigned int node, cpu, active_cpu = 0;
cpu_node_probe();
init_topology_matrix();
for (node = 0; node < loongson_sysconf.nr_nodes; node++) {
if (node_online(node)) {
szmem(node);
node_mem_init(node);
cpumask_clear(&__node_cpumask[node]);
}
}
max_low_pfn = PHYS_PFN(memblock_end_of_DRAM());
for (cpu = 0; cpu < loongson_sysconf.nr_cpus; cpu++) {
node = cpu / loongson_sysconf.cores_per_node;
if (node >= num_online_nodes())
node = 0;
if (loongson_sysconf.reserved_cpus_mask & (1<<cpu))
continue;
cpumask_set_cpu(active_cpu, &__node_cpumask[node]);
pr_info("NUMA: set cpumask cpu %d on node %d\n", active_cpu, node);
active_cpu++;
}
}
void __init paging_init(void)
{
unsigned long zones_size[MAX_NR_ZONES] = {0, };
pagetable_init();
zones_size[ZONE_DMA32] = MAX_DMA32_PFN;
zones_size[ZONE_NORMAL] = max_low_pfn;
free_area_init(zones_size);
}
void __init mem_init(void)
{
high_memory = (void *) __va(get_num_physpages() << PAGE_SHIFT);
memblock_free_all();
setup_zero_pages(); /* This comes from node 0 */
}
/* All PCI device belongs to logical Node-0 */
int pcibus_to_node(struct pci_bus *bus)
{
return 0;
}
EXPORT_SYMBOL(pcibus_to_node);
void __init prom_init_numa_memory(void)
{
pr_info("CP0_Config3: CP0 16.3 (0x%x)\n", read_c0_config3());
pr_info("CP0_PageGrain: CP0 5.1 (0x%x)\n", read_c0_pagegrain());
prom_meminit();
}
pg_data_t * __init arch_alloc_nodedata(int nid)
{
return memblock_alloc(sizeof(pg_data_t), SMP_CACHE_BYTES);
}
void arch_refresh_nodedata(int nid, pg_data_t *pgdat)
{
__node_data[nid] = pgdat;
}
| linux-master | arch/mips/loongson64/numa.c |
// SPDX-License-Identifier: GPL-2.0+
#include <linux/pci.h>
#include <loongson.h>
static void pci_fixup_video(struct pci_dev *pdev)
{
struct resource *res = &pdev->resource[PCI_ROM_RESOURCE];
if (res->start)
return;
if (!loongson_sysconf.vgabios_addr)
return;
pci_disable_rom(pdev);
if (res->parent)
release_resource(res);
res->start = virt_to_phys((void *) loongson_sysconf.vgabios_addr);
res->end = res->start + 256*1024 - 1;
res->flags = IORESOURCE_MEM | IORESOURCE_ROM_SHADOW |
IORESOURCE_PCI_FIXED;
dev_info(&pdev->dev, "Video device with shadowed ROM at %pR\n", res);
}
DECLARE_PCI_FIXUP_CLASS_HEADER(PCI_VENDOR_ID_ATI, 0x9615,
PCI_CLASS_DISPLAY_VGA, 8, pci_fixup_video);
| linux-master | arch/mips/loongson64/vbios_quirk.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/smp.h>
#include <linux/types.h>
#include <asm/cpu.h>
#include <asm/cpu-info.h>
#include <asm/elf.h>
#include <loongson_regs.h>
#include <cpucfg-emul.h>
static bool is_loongson(struct cpuinfo_mips *c)
{
switch (c->processor_id & PRID_COMP_MASK) {
case PRID_COMP_LEGACY:
return ((c->processor_id & PRID_IMP_MASK) ==
PRID_IMP_LOONGSON_64C);
case PRID_COMP_LOONGSON:
return true;
default:
return false;
}
}
static u32 get_loongson_fprev(struct cpuinfo_mips *c)
{
return c->fpu_id & LOONGSON_FPREV_MASK;
}
static bool cpu_has_uca(void)
{
u32 diag = read_c0_diag();
u32 new_diag;
if (diag & LOONGSON_DIAG_UCAC)
/* UCA is already enabled. */
return true;
/* See if UCAC bit can be flipped on. This should be safe. */
new_diag = diag | LOONGSON_DIAG_UCAC;
write_c0_diag(new_diag);
new_diag = read_c0_diag();
write_c0_diag(diag);
return (new_diag & LOONGSON_DIAG_UCAC) != 0;
}
static void probe_uca(struct cpuinfo_mips *c)
{
if (cpu_has_uca())
c->loongson3_cpucfg_data[0] |= LOONGSON_CFG1_LSUCA;
}
static void decode_loongson_config6(struct cpuinfo_mips *c)
{
u32 config6 = read_c0_config6();
if (config6 & LOONGSON_CONF6_SFBEN)
c->loongson3_cpucfg_data[0] |= LOONGSON_CFG1_SFBP;
if (config6 & LOONGSON_CONF6_LLEXC)
c->loongson3_cpucfg_data[0] |= LOONGSON_CFG1_LLEXC;
if (config6 & LOONGSON_CONF6_SCRAND)
c->loongson3_cpucfg_data[0] |= LOONGSON_CFG1_SCRAND;
}
static void patch_cpucfg_sel1(struct cpuinfo_mips *c)
{
u64 ases = c->ases;
u64 options = c->options;
u32 data = c->loongson3_cpucfg_data[0];
if (options & MIPS_CPU_FPU) {
data |= LOONGSON_CFG1_FP;
data |= get_loongson_fprev(c) << LOONGSON_CFG1_FPREV_OFFSET;
}
if (ases & MIPS_ASE_LOONGSON_MMI)
data |= LOONGSON_CFG1_MMI;
if (ases & MIPS_ASE_MSA)
data |= LOONGSON_CFG1_MSA1;
c->loongson3_cpucfg_data[0] = data;
}
static void patch_cpucfg_sel2(struct cpuinfo_mips *c)
{
u64 ases = c->ases;
u64 options = c->options;
u32 data = c->loongson3_cpucfg_data[1];
if (ases & MIPS_ASE_LOONGSON_EXT)
data |= LOONGSON_CFG2_LEXT1;
if (ases & MIPS_ASE_LOONGSON_EXT2)
data |= LOONGSON_CFG2_LEXT2;
if (options & MIPS_CPU_LDPTE)
data |= LOONGSON_CFG2_LSPW;
if (ases & MIPS_ASE_VZ)
data |= LOONGSON_CFG2_LVZP;
else
data &= ~LOONGSON_CFG2_LVZREV;
c->loongson3_cpucfg_data[1] = data;
}
static void patch_cpucfg_sel3(struct cpuinfo_mips *c)
{
u64 ases = c->ases;
u32 data = c->loongson3_cpucfg_data[2];
if (ases & MIPS_ASE_LOONGSON_CAM) {
data |= LOONGSON_CFG3_LCAMP;
} else {
data &= ~LOONGSON_CFG3_LCAMREV;
data &= ~LOONGSON_CFG3_LCAMNUM;
data &= ~LOONGSON_CFG3_LCAMKW;
data &= ~LOONGSON_CFG3_LCAMVW;
}
c->loongson3_cpucfg_data[2] = data;
}
void loongson3_cpucfg_synthesize_data(struct cpuinfo_mips *c)
{
/* Only engage the logic on Loongson processors. */
if (!is_loongson(c))
return;
/* CPUs with CPUCFG support don't need to synthesize anything. */
if (cpu_has_cfg())
goto have_cpucfg_now;
c->loongson3_cpucfg_data[0] = 0;
c->loongson3_cpucfg_data[1] = 0;
c->loongson3_cpucfg_data[2] = 0;
/* Add CPUCFG features non-discoverable otherwise. */
switch (c->processor_id & (PRID_IMP_MASK | PRID_REV_MASK)) {
case PRID_IMP_LOONGSON_64R | PRID_REV_LOONGSON2K_R1_0:
case PRID_IMP_LOONGSON_64R | PRID_REV_LOONGSON2K_R1_1:
case PRID_IMP_LOONGSON_64R | PRID_REV_LOONGSON2K_R1_2:
case PRID_IMP_LOONGSON_64R | PRID_REV_LOONGSON2K_R1_3:
decode_loongson_config6(c);
probe_uca(c);
c->loongson3_cpucfg_data[0] |= (LOONGSON_CFG1_LSLDR0 |
LOONGSON_CFG1_LSSYNCI | LOONGSON_CFG1_LLSYNC |
LOONGSON_CFG1_TGTSYNC);
c->loongson3_cpucfg_data[1] |= (LOONGSON_CFG2_LBT1 |
LOONGSON_CFG2_LBT2 | LOONGSON_CFG2_LPMP |
LOONGSON_CFG2_LPM_REV2);
c->loongson3_cpucfg_data[2] = 0;
break;
case PRID_IMP_LOONGSON_64C | PRID_REV_LOONGSON3A_R1:
c->loongson3_cpucfg_data[0] |= (LOONGSON_CFG1_LSLDR0 |
LOONGSON_CFG1_LSSYNCI | LOONGSON_CFG1_LSUCA |
LOONGSON_CFG1_LLSYNC | LOONGSON_CFG1_TGTSYNC);
c->loongson3_cpucfg_data[1] |= (LOONGSON_CFG2_LBT1 |
LOONGSON_CFG2_LPMP | LOONGSON_CFG2_LPM_REV1);
c->loongson3_cpucfg_data[2] |= (
LOONGSON_CFG3_LCAM_REV1 |
LOONGSON_CFG3_LCAMNUM_REV1 |
LOONGSON_CFG3_LCAMKW_REV1 |
LOONGSON_CFG3_LCAMVW_REV1);
break;
case PRID_IMP_LOONGSON_64C | PRID_REV_LOONGSON3B_R1:
case PRID_IMP_LOONGSON_64C | PRID_REV_LOONGSON3B_R2:
c->loongson3_cpucfg_data[0] |= (LOONGSON_CFG1_LSLDR0 |
LOONGSON_CFG1_LSSYNCI | LOONGSON_CFG1_LSUCA |
LOONGSON_CFG1_LLSYNC | LOONGSON_CFG1_TGTSYNC);
c->loongson3_cpucfg_data[1] |= (LOONGSON_CFG2_LBT1 |
LOONGSON_CFG2_LPMP | LOONGSON_CFG2_LPM_REV1);
c->loongson3_cpucfg_data[2] |= (
LOONGSON_CFG3_LCAM_REV1 |
LOONGSON_CFG3_LCAMNUM_REV1 |
LOONGSON_CFG3_LCAMKW_REV1 |
LOONGSON_CFG3_LCAMVW_REV1);
break;
case PRID_IMP_LOONGSON_64C | PRID_REV_LOONGSON3A_R2_0:
case PRID_IMP_LOONGSON_64C | PRID_REV_LOONGSON3A_R2_1:
case PRID_IMP_LOONGSON_64C | PRID_REV_LOONGSON3A_R3_0:
case PRID_IMP_LOONGSON_64C | PRID_REV_LOONGSON3A_R3_1:
decode_loongson_config6(c);
probe_uca(c);
c->loongson3_cpucfg_data[0] |= (LOONGSON_CFG1_CNT64 |
LOONGSON_CFG1_LSLDR0 | LOONGSON_CFG1_LSPREF |
LOONGSON_CFG1_LSPREFX | LOONGSON_CFG1_LSSYNCI |
LOONGSON_CFG1_LLSYNC | LOONGSON_CFG1_TGTSYNC);
c->loongson3_cpucfg_data[1] |= (LOONGSON_CFG2_LBT1 |
LOONGSON_CFG2_LBT2 | LOONGSON_CFG2_LBTMMU |
LOONGSON_CFG2_LPMP | LOONGSON_CFG2_LPM_REV1 |
LOONGSON_CFG2_LVZ_REV1);
c->loongson3_cpucfg_data[2] |= (LOONGSON_CFG3_LCAM_REV1 |
LOONGSON_CFG3_LCAMNUM_REV1 |
LOONGSON_CFG3_LCAMKW_REV1 |
LOONGSON_CFG3_LCAMVW_REV1);
break;
default:
/* It is possible that some future Loongson cores still do
* not have CPUCFG, so do not emulate anything for these
* cores.
*/
return;
}
/* This feature is set by firmware, but all known Loongson-64 systems
* are configured this way.
*/
c->loongson3_cpucfg_data[0] |= LOONGSON_CFG1_CDMAP;
/* Patch in dynamically probed bits. */
patch_cpucfg_sel1(c);
patch_cpucfg_sel2(c);
patch_cpucfg_sel3(c);
have_cpucfg_now:
/* We have usable CPUCFG now, emulated or not.
* Announce CPUCFG availability to userspace via hwcap.
*/
elf_hwcap |= HWCAP_LOONGSON_CPUCFG;
}
| linux-master | arch/mips/loongson64/cpucfg-emul.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2010, 2011, 2012, Lemote, Inc.
* Author: Chen Huacai, [email protected]
*/
#include <irq.h>
#include <linux/init.h>
#include <linux/cpu.h>
#include <linux/sched.h>
#include <linux/sched/hotplug.h>
#include <linux/sched/task_stack.h>
#include <linux/smp.h>
#include <linux/cpufreq.h>
#include <linux/kexec.h>
#include <asm/processor.h>
#include <asm/smp.h>
#include <asm/time.h>
#include <asm/tlbflush.h>
#include <asm/cacheflush.h>
#include <loongson.h>
#include <loongson_regs.h>
#include <workarounds.h>
#include "smp.h"
DEFINE_PER_CPU(int, cpu_state);
#define LS_IPI_IRQ (MIPS_CPU_IRQ_BASE + 6)
static void __iomem *ipi_set0_regs[16];
static void __iomem *ipi_clear0_regs[16];
static void __iomem *ipi_status0_regs[16];
static void __iomem *ipi_en0_regs[16];
static void __iomem *ipi_mailbox_buf[16];
static uint32_t core0_c0count[NR_CPUS];
static u32 (*ipi_read_clear)(int cpu);
static void (*ipi_write_action)(int cpu, u32 action);
static void (*ipi_write_enable)(int cpu);
static void (*ipi_clear_buf)(int cpu);
static void (*ipi_write_buf)(int cpu, struct task_struct *idle);
/* send mail via Mail_Send register for 3A4000+ CPU */
static void csr_mail_send(uint64_t data, int cpu, int mailbox)
{
uint64_t val;
/* send high 32 bits */
val = CSR_MAIL_SEND_BLOCK;
val |= (CSR_MAIL_SEND_BOX_HIGH(mailbox) << CSR_MAIL_SEND_BOX_SHIFT);
val |= (cpu << CSR_MAIL_SEND_CPU_SHIFT);
val |= (data & CSR_MAIL_SEND_H32_MASK);
csr_writeq(val, LOONGSON_CSR_MAIL_SEND);
/* send low 32 bits */
val = CSR_MAIL_SEND_BLOCK;
val |= (CSR_MAIL_SEND_BOX_LOW(mailbox) << CSR_MAIL_SEND_BOX_SHIFT);
val |= (cpu << CSR_MAIL_SEND_CPU_SHIFT);
val |= (data << CSR_MAIL_SEND_BUF_SHIFT);
csr_writeq(val, LOONGSON_CSR_MAIL_SEND);
};
static u32 csr_ipi_read_clear(int cpu)
{
u32 action;
/* Load the ipi register to figure out what we're supposed to do */
action = csr_readl(LOONGSON_CSR_IPI_STATUS);
/* Clear the ipi register to clear the interrupt */
csr_writel(action, LOONGSON_CSR_IPI_CLEAR);
return action;
}
static void csr_ipi_write_action(int cpu, u32 action)
{
unsigned int irq = 0;
while ((irq = ffs(action))) {
uint32_t val = CSR_IPI_SEND_BLOCK;
val |= (irq - 1);
val |= (cpu << CSR_IPI_SEND_CPU_SHIFT);
csr_writel(val, LOONGSON_CSR_IPI_SEND);
action &= ~BIT(irq - 1);
}
}
static void csr_ipi_write_enable(int cpu)
{
csr_writel(0xffffffff, LOONGSON_CSR_IPI_EN);
}
static void csr_ipi_clear_buf(int cpu)
{
csr_writeq(0, LOONGSON_CSR_MAIL_BUF0);
}
static void csr_ipi_write_buf(int cpu, struct task_struct *idle)
{
unsigned long startargs[4];
/* startargs[] are initial PC, SP and GP for secondary CPU */
startargs[0] = (unsigned long)&smp_bootstrap;
startargs[1] = (unsigned long)__KSTK_TOS(idle);
startargs[2] = (unsigned long)task_thread_info(idle);
startargs[3] = 0;
pr_debug("CPU#%d, func_pc=%lx, sp=%lx, gp=%lx\n",
cpu, startargs[0], startargs[1], startargs[2]);
csr_mail_send(startargs[3], cpu_logical_map(cpu), 3);
csr_mail_send(startargs[2], cpu_logical_map(cpu), 2);
csr_mail_send(startargs[1], cpu_logical_map(cpu), 1);
csr_mail_send(startargs[0], cpu_logical_map(cpu), 0);
}
static u32 legacy_ipi_read_clear(int cpu)
{
u32 action;
/* Load the ipi register to figure out what we're supposed to do */
action = readl_relaxed(ipi_status0_regs[cpu_logical_map(cpu)]);
/* Clear the ipi register to clear the interrupt */
writel_relaxed(action, ipi_clear0_regs[cpu_logical_map(cpu)]);
nudge_writes();
return action;
}
static void legacy_ipi_write_action(int cpu, u32 action)
{
writel_relaxed((u32)action, ipi_set0_regs[cpu]);
nudge_writes();
}
static void legacy_ipi_write_enable(int cpu)
{
writel_relaxed(0xffffffff, ipi_en0_regs[cpu_logical_map(cpu)]);
}
static void legacy_ipi_clear_buf(int cpu)
{
writeq_relaxed(0, ipi_mailbox_buf[cpu_logical_map(cpu)] + 0x0);
}
static void legacy_ipi_write_buf(int cpu, struct task_struct *idle)
{
unsigned long startargs[4];
/* startargs[] are initial PC, SP and GP for secondary CPU */
startargs[0] = (unsigned long)&smp_bootstrap;
startargs[1] = (unsigned long)__KSTK_TOS(idle);
startargs[2] = (unsigned long)task_thread_info(idle);
startargs[3] = 0;
pr_debug("CPU#%d, func_pc=%lx, sp=%lx, gp=%lx\n",
cpu, startargs[0], startargs[1], startargs[2]);
writeq_relaxed(startargs[3],
ipi_mailbox_buf[cpu_logical_map(cpu)] + 0x18);
writeq_relaxed(startargs[2],
ipi_mailbox_buf[cpu_logical_map(cpu)] + 0x10);
writeq_relaxed(startargs[1],
ipi_mailbox_buf[cpu_logical_map(cpu)] + 0x8);
writeq_relaxed(startargs[0],
ipi_mailbox_buf[cpu_logical_map(cpu)] + 0x0);
nudge_writes();
}
static void csr_ipi_probe(void)
{
if (cpu_has_csr() && csr_readl(LOONGSON_CSR_FEATURES) & LOONGSON_CSRF_IPI) {
ipi_read_clear = csr_ipi_read_clear;
ipi_write_action = csr_ipi_write_action;
ipi_write_enable = csr_ipi_write_enable;
ipi_clear_buf = csr_ipi_clear_buf;
ipi_write_buf = csr_ipi_write_buf;
} else {
ipi_read_clear = legacy_ipi_read_clear;
ipi_write_action = legacy_ipi_write_action;
ipi_write_enable = legacy_ipi_write_enable;
ipi_clear_buf = legacy_ipi_clear_buf;
ipi_write_buf = legacy_ipi_write_buf;
}
}
static void ipi_set0_regs_init(void)
{
ipi_set0_regs[0] = (void __iomem *)
(SMP_CORE_GROUP0_BASE + SMP_CORE0_OFFSET + SET0);
ipi_set0_regs[1] = (void __iomem *)
(SMP_CORE_GROUP0_BASE + SMP_CORE1_OFFSET + SET0);
ipi_set0_regs[2] = (void __iomem *)
(SMP_CORE_GROUP0_BASE + SMP_CORE2_OFFSET + SET0);
ipi_set0_regs[3] = (void __iomem *)
(SMP_CORE_GROUP0_BASE + SMP_CORE3_OFFSET + SET0);
ipi_set0_regs[4] = (void __iomem *)
(SMP_CORE_GROUP1_BASE + SMP_CORE0_OFFSET + SET0);
ipi_set0_regs[5] = (void __iomem *)
(SMP_CORE_GROUP1_BASE + SMP_CORE1_OFFSET + SET0);
ipi_set0_regs[6] = (void __iomem *)
(SMP_CORE_GROUP1_BASE + SMP_CORE2_OFFSET + SET0);
ipi_set0_regs[7] = (void __iomem *)
(SMP_CORE_GROUP1_BASE + SMP_CORE3_OFFSET + SET0);
ipi_set0_regs[8] = (void __iomem *)
(SMP_CORE_GROUP2_BASE + SMP_CORE0_OFFSET + SET0);
ipi_set0_regs[9] = (void __iomem *)
(SMP_CORE_GROUP2_BASE + SMP_CORE1_OFFSET + SET0);
ipi_set0_regs[10] = (void __iomem *)
(SMP_CORE_GROUP2_BASE + SMP_CORE2_OFFSET + SET0);
ipi_set0_regs[11] = (void __iomem *)
(SMP_CORE_GROUP2_BASE + SMP_CORE3_OFFSET + SET0);
ipi_set0_regs[12] = (void __iomem *)
(SMP_CORE_GROUP3_BASE + SMP_CORE0_OFFSET + SET0);
ipi_set0_regs[13] = (void __iomem *)
(SMP_CORE_GROUP3_BASE + SMP_CORE1_OFFSET + SET0);
ipi_set0_regs[14] = (void __iomem *)
(SMP_CORE_GROUP3_BASE + SMP_CORE2_OFFSET + SET0);
ipi_set0_regs[15] = (void __iomem *)
(SMP_CORE_GROUP3_BASE + SMP_CORE3_OFFSET + SET0);
}
static void ipi_clear0_regs_init(void)
{
ipi_clear0_regs[0] = (void __iomem *)
(SMP_CORE_GROUP0_BASE + SMP_CORE0_OFFSET + CLEAR0);
ipi_clear0_regs[1] = (void __iomem *)
(SMP_CORE_GROUP0_BASE + SMP_CORE1_OFFSET + CLEAR0);
ipi_clear0_regs[2] = (void __iomem *)
(SMP_CORE_GROUP0_BASE + SMP_CORE2_OFFSET + CLEAR0);
ipi_clear0_regs[3] = (void __iomem *)
(SMP_CORE_GROUP0_BASE + SMP_CORE3_OFFSET + CLEAR0);
ipi_clear0_regs[4] = (void __iomem *)
(SMP_CORE_GROUP1_BASE + SMP_CORE0_OFFSET + CLEAR0);
ipi_clear0_regs[5] = (void __iomem *)
(SMP_CORE_GROUP1_BASE + SMP_CORE1_OFFSET + CLEAR0);
ipi_clear0_regs[6] = (void __iomem *)
(SMP_CORE_GROUP1_BASE + SMP_CORE2_OFFSET + CLEAR0);
ipi_clear0_regs[7] = (void __iomem *)
(SMP_CORE_GROUP1_BASE + SMP_CORE3_OFFSET + CLEAR0);
ipi_clear0_regs[8] = (void __iomem *)
(SMP_CORE_GROUP2_BASE + SMP_CORE0_OFFSET + CLEAR0);
ipi_clear0_regs[9] = (void __iomem *)
(SMP_CORE_GROUP2_BASE + SMP_CORE1_OFFSET + CLEAR0);
ipi_clear0_regs[10] = (void __iomem *)
(SMP_CORE_GROUP2_BASE + SMP_CORE2_OFFSET + CLEAR0);
ipi_clear0_regs[11] = (void __iomem *)
(SMP_CORE_GROUP2_BASE + SMP_CORE3_OFFSET + CLEAR0);
ipi_clear0_regs[12] = (void __iomem *)
(SMP_CORE_GROUP3_BASE + SMP_CORE0_OFFSET + CLEAR0);
ipi_clear0_regs[13] = (void __iomem *)
(SMP_CORE_GROUP3_BASE + SMP_CORE1_OFFSET + CLEAR0);
ipi_clear0_regs[14] = (void __iomem *)
(SMP_CORE_GROUP3_BASE + SMP_CORE2_OFFSET + CLEAR0);
ipi_clear0_regs[15] = (void __iomem *)
(SMP_CORE_GROUP3_BASE + SMP_CORE3_OFFSET + CLEAR0);
}
static void ipi_status0_regs_init(void)
{
ipi_status0_regs[0] = (void __iomem *)
(SMP_CORE_GROUP0_BASE + SMP_CORE0_OFFSET + STATUS0);
ipi_status0_regs[1] = (void __iomem *)
(SMP_CORE_GROUP0_BASE + SMP_CORE1_OFFSET + STATUS0);
ipi_status0_regs[2] = (void __iomem *)
(SMP_CORE_GROUP0_BASE + SMP_CORE2_OFFSET + STATUS0);
ipi_status0_regs[3] = (void __iomem *)
(SMP_CORE_GROUP0_BASE + SMP_CORE3_OFFSET + STATUS0);
ipi_status0_regs[4] = (void __iomem *)
(SMP_CORE_GROUP1_BASE + SMP_CORE0_OFFSET + STATUS0);
ipi_status0_regs[5] = (void __iomem *)
(SMP_CORE_GROUP1_BASE + SMP_CORE1_OFFSET + STATUS0);
ipi_status0_regs[6] = (void __iomem *)
(SMP_CORE_GROUP1_BASE + SMP_CORE2_OFFSET + STATUS0);
ipi_status0_regs[7] = (void __iomem *)
(SMP_CORE_GROUP1_BASE + SMP_CORE3_OFFSET + STATUS0);
ipi_status0_regs[8] = (void __iomem *)
(SMP_CORE_GROUP2_BASE + SMP_CORE0_OFFSET + STATUS0);
ipi_status0_regs[9] = (void __iomem *)
(SMP_CORE_GROUP2_BASE + SMP_CORE1_OFFSET + STATUS0);
ipi_status0_regs[10] = (void __iomem *)
(SMP_CORE_GROUP2_BASE + SMP_CORE2_OFFSET + STATUS0);
ipi_status0_regs[11] = (void __iomem *)
(SMP_CORE_GROUP2_BASE + SMP_CORE3_OFFSET + STATUS0);
ipi_status0_regs[12] = (void __iomem *)
(SMP_CORE_GROUP3_BASE + SMP_CORE0_OFFSET + STATUS0);
ipi_status0_regs[13] = (void __iomem *)
(SMP_CORE_GROUP3_BASE + SMP_CORE1_OFFSET + STATUS0);
ipi_status0_regs[14] = (void __iomem *)
(SMP_CORE_GROUP3_BASE + SMP_CORE2_OFFSET + STATUS0);
ipi_status0_regs[15] = (void __iomem *)
(SMP_CORE_GROUP3_BASE + SMP_CORE3_OFFSET + STATUS0);
}
static void ipi_en0_regs_init(void)
{
ipi_en0_regs[0] = (void __iomem *)
(SMP_CORE_GROUP0_BASE + SMP_CORE0_OFFSET + EN0);
ipi_en0_regs[1] = (void __iomem *)
(SMP_CORE_GROUP0_BASE + SMP_CORE1_OFFSET + EN0);
ipi_en0_regs[2] = (void __iomem *)
(SMP_CORE_GROUP0_BASE + SMP_CORE2_OFFSET + EN0);
ipi_en0_regs[3] = (void __iomem *)
(SMP_CORE_GROUP0_BASE + SMP_CORE3_OFFSET + EN0);
ipi_en0_regs[4] = (void __iomem *)
(SMP_CORE_GROUP1_BASE + SMP_CORE0_OFFSET + EN0);
ipi_en0_regs[5] = (void __iomem *)
(SMP_CORE_GROUP1_BASE + SMP_CORE1_OFFSET + EN0);
ipi_en0_regs[6] = (void __iomem *)
(SMP_CORE_GROUP1_BASE + SMP_CORE2_OFFSET + EN0);
ipi_en0_regs[7] = (void __iomem *)
(SMP_CORE_GROUP1_BASE + SMP_CORE3_OFFSET + EN0);
ipi_en0_regs[8] = (void __iomem *)
(SMP_CORE_GROUP2_BASE + SMP_CORE0_OFFSET + EN0);
ipi_en0_regs[9] = (void __iomem *)
(SMP_CORE_GROUP2_BASE + SMP_CORE1_OFFSET + EN0);
ipi_en0_regs[10] = (void __iomem *)
(SMP_CORE_GROUP2_BASE + SMP_CORE2_OFFSET + EN0);
ipi_en0_regs[11] = (void __iomem *)
(SMP_CORE_GROUP2_BASE + SMP_CORE3_OFFSET + EN0);
ipi_en0_regs[12] = (void __iomem *)
(SMP_CORE_GROUP3_BASE + SMP_CORE0_OFFSET + EN0);
ipi_en0_regs[13] = (void __iomem *)
(SMP_CORE_GROUP3_BASE + SMP_CORE1_OFFSET + EN0);
ipi_en0_regs[14] = (void __iomem *)
(SMP_CORE_GROUP3_BASE + SMP_CORE2_OFFSET + EN0);
ipi_en0_regs[15] = (void __iomem *)
(SMP_CORE_GROUP3_BASE + SMP_CORE3_OFFSET + EN0);
}
static void ipi_mailbox_buf_init(void)
{
ipi_mailbox_buf[0] = (void __iomem *)
(SMP_CORE_GROUP0_BASE + SMP_CORE0_OFFSET + BUF);
ipi_mailbox_buf[1] = (void __iomem *)
(SMP_CORE_GROUP0_BASE + SMP_CORE1_OFFSET + BUF);
ipi_mailbox_buf[2] = (void __iomem *)
(SMP_CORE_GROUP0_BASE + SMP_CORE2_OFFSET + BUF);
ipi_mailbox_buf[3] = (void __iomem *)
(SMP_CORE_GROUP0_BASE + SMP_CORE3_OFFSET + BUF);
ipi_mailbox_buf[4] = (void __iomem *)
(SMP_CORE_GROUP1_BASE + SMP_CORE0_OFFSET + BUF);
ipi_mailbox_buf[5] = (void __iomem *)
(SMP_CORE_GROUP1_BASE + SMP_CORE1_OFFSET + BUF);
ipi_mailbox_buf[6] = (void __iomem *)
(SMP_CORE_GROUP1_BASE + SMP_CORE2_OFFSET + BUF);
ipi_mailbox_buf[7] = (void __iomem *)
(SMP_CORE_GROUP1_BASE + SMP_CORE3_OFFSET + BUF);
ipi_mailbox_buf[8] = (void __iomem *)
(SMP_CORE_GROUP2_BASE + SMP_CORE0_OFFSET + BUF);
ipi_mailbox_buf[9] = (void __iomem *)
(SMP_CORE_GROUP2_BASE + SMP_CORE1_OFFSET + BUF);
ipi_mailbox_buf[10] = (void __iomem *)
(SMP_CORE_GROUP2_BASE + SMP_CORE2_OFFSET + BUF);
ipi_mailbox_buf[11] = (void __iomem *)
(SMP_CORE_GROUP2_BASE + SMP_CORE3_OFFSET + BUF);
ipi_mailbox_buf[12] = (void __iomem *)
(SMP_CORE_GROUP3_BASE + SMP_CORE0_OFFSET + BUF);
ipi_mailbox_buf[13] = (void __iomem *)
(SMP_CORE_GROUP3_BASE + SMP_CORE1_OFFSET + BUF);
ipi_mailbox_buf[14] = (void __iomem *)
(SMP_CORE_GROUP3_BASE + SMP_CORE2_OFFSET + BUF);
ipi_mailbox_buf[15] = (void __iomem *)
(SMP_CORE_GROUP3_BASE + SMP_CORE3_OFFSET + BUF);
}
/*
* Simple enough, just poke the appropriate ipi register
*/
static void loongson3_send_ipi_single(int cpu, unsigned int action)
{
ipi_write_action(cpu_logical_map(cpu), (u32)action);
}
static void
loongson3_send_ipi_mask(const struct cpumask *mask, unsigned int action)
{
unsigned int i;
for_each_cpu(i, mask)
ipi_write_action(cpu_logical_map(i), (u32)action);
}
static irqreturn_t loongson3_ipi_interrupt(int irq, void *dev_id)
{
int i, cpu = smp_processor_id();
unsigned int action, c0count;
action = ipi_read_clear(cpu);
if (action & SMP_RESCHEDULE_YOURSELF)
scheduler_ipi();
if (action & SMP_CALL_FUNCTION) {
irq_enter();
generic_smp_call_function_interrupt();
irq_exit();
}
if (action & SMP_ASK_C0COUNT) {
BUG_ON(cpu != 0);
c0count = read_c0_count();
c0count = c0count ? c0count : 1;
for (i = 1; i < nr_cpu_ids; i++)
core0_c0count[i] = c0count;
nudge_writes(); /* Let others see the result ASAP */
}
return IRQ_HANDLED;
}
#define MAX_LOOPS 800
/*
* SMP init and finish on secondary CPUs
*/
static void loongson3_init_secondary(void)
{
int i;
uint32_t initcount;
unsigned int cpu = smp_processor_id();
unsigned int imask = STATUSF_IP7 | STATUSF_IP6 |
STATUSF_IP3 | STATUSF_IP2;
/* Set interrupt mask, but don't enable */
change_c0_status(ST0_IM, imask);
ipi_write_enable(cpu);
per_cpu(cpu_state, cpu) = CPU_ONLINE;
cpu_set_core(&cpu_data[cpu],
cpu_logical_map(cpu) % loongson_sysconf.cores_per_package);
cpu_data[cpu].package =
cpu_logical_map(cpu) / loongson_sysconf.cores_per_package;
i = 0;
core0_c0count[cpu] = 0;
loongson3_send_ipi_single(0, SMP_ASK_C0COUNT);
while (!core0_c0count[cpu]) {
i++;
cpu_relax();
}
if (i > MAX_LOOPS)
i = MAX_LOOPS;
if (cpu_data[cpu].package)
initcount = core0_c0count[cpu] + i;
else /* Local access is faster for loops */
initcount = core0_c0count[cpu] + i/2;
write_c0_count(initcount);
}
static void loongson3_smp_finish(void)
{
int cpu = smp_processor_id();
write_c0_compare(read_c0_count() + mips_hpt_frequency/HZ);
local_irq_enable();
ipi_clear_buf(cpu);
pr_info("CPU#%d finished, CP0_ST=%x\n",
smp_processor_id(), read_c0_status());
}
static void __init loongson3_smp_setup(void)
{
int i = 0, num = 0; /* i: physical id, num: logical id */
init_cpu_possible(cpu_none_mask);
/* For unified kernel, NR_CPUS is the maximum possible value,
* loongson_sysconf.nr_cpus is the really present value
*/
while (i < loongson_sysconf.nr_cpus) {
if (loongson_sysconf.reserved_cpus_mask & (1<<i)) {
/* Reserved physical CPU cores */
__cpu_number_map[i] = -1;
} else {
__cpu_number_map[i] = num;
__cpu_logical_map[num] = i;
set_cpu_possible(num, true);
/* Loongson processors are always grouped by 4 */
cpu_set_cluster(&cpu_data[num], i / 4);
num++;
}
i++;
}
pr_info("Detected %i available CPU(s)\n", num);
while (num < loongson_sysconf.nr_cpus) {
__cpu_logical_map[num] = -1;
num++;
}
csr_ipi_probe();
ipi_set0_regs_init();
ipi_clear0_regs_init();
ipi_status0_regs_init();
ipi_en0_regs_init();
ipi_mailbox_buf_init();
ipi_write_enable(0);
cpu_set_core(&cpu_data[0],
cpu_logical_map(0) % loongson_sysconf.cores_per_package);
cpu_data[0].package = cpu_logical_map(0) / loongson_sysconf.cores_per_package;
}
static void __init loongson3_prepare_cpus(unsigned int max_cpus)
{
if (request_irq(LS_IPI_IRQ, loongson3_ipi_interrupt,
IRQF_PERCPU | IRQF_NO_SUSPEND, "SMP_IPI", NULL))
pr_err("Failed to request IPI IRQ\n");
init_cpu_present(cpu_possible_mask);
per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE;
}
/*
* Setup the PC, SP, and GP of a secondary processor and start it runing!
*/
static int loongson3_boot_secondary(int cpu, struct task_struct *idle)
{
pr_info("Booting CPU#%d...\n", cpu);
ipi_write_buf(cpu, idle);
return 0;
}
#ifdef CONFIG_HOTPLUG_CPU
static int loongson3_cpu_disable(void)
{
unsigned long flags;
unsigned int cpu = smp_processor_id();
set_cpu_online(cpu, false);
calculate_cpu_foreign_map();
local_irq_save(flags);
clear_c0_status(ST0_IM);
local_irq_restore(flags);
local_flush_tlb_all();
return 0;
}
static void loongson3_cpu_die(unsigned int cpu)
{
while (per_cpu(cpu_state, cpu) != CPU_DEAD)
cpu_relax();
mb();
}
/* To shutdown a core in Loongson 3, the target core should go to CKSEG1 and
* flush all L1 entries at first. Then, another core (usually Core 0) can
* safely disable the clock of the target core. loongson3_play_dead() is
* called via CKSEG1 (uncached and unmmaped)
*/
static void loongson3_type1_play_dead(int *state_addr)
{
register int val;
register long cpuid, core, node, count;
register void *addr, *base, *initfunc;
__asm__ __volatile__(
" .set push \n"
" .set noreorder \n"
" li %[addr], 0x80000000 \n" /* KSEG0 */
"1: cache 0, 0(%[addr]) \n" /* flush L1 ICache */
" cache 0, 1(%[addr]) \n"
" cache 0, 2(%[addr]) \n"
" cache 0, 3(%[addr]) \n"
" cache 1, 0(%[addr]) \n" /* flush L1 DCache */
" cache 1, 1(%[addr]) \n"
" cache 1, 2(%[addr]) \n"
" cache 1, 3(%[addr]) \n"
" addiu %[sets], %[sets], -1 \n"
" bnez %[sets], 1b \n"
" addiu %[addr], %[addr], 0x20 \n"
" li %[val], 0x7 \n" /* *state_addr = CPU_DEAD; */
" sw %[val], (%[state_addr]) \n"
" sync \n"
" cache 21, (%[state_addr]) \n" /* flush entry of *state_addr */
" .set pop \n"
: [addr] "=&r" (addr), [val] "=&r" (val)
: [state_addr] "r" (state_addr),
[sets] "r" (cpu_data[smp_processor_id()].dcache.sets));
__asm__ __volatile__(
" .set push \n"
" .set noreorder \n"
" .set mips64 \n"
" mfc0 %[cpuid], $15, 1 \n"
" andi %[cpuid], 0x3ff \n"
" dli %[base], 0x900000003ff01000 \n"
" andi %[core], %[cpuid], 0x3 \n"
" sll %[core], 8 \n" /* get core id */
" or %[base], %[base], %[core] \n"
" andi %[node], %[cpuid], 0xc \n"
" dsll %[node], 42 \n" /* get node id */
" or %[base], %[base], %[node] \n"
"1: li %[count], 0x100 \n" /* wait for init loop */
"2: bnez %[count], 2b \n" /* limit mailbox access */
" addiu %[count], -1 \n"
" ld %[initfunc], 0x20(%[base]) \n" /* get PC via mailbox */
" beqz %[initfunc], 1b \n"
" nop \n"
" ld $sp, 0x28(%[base]) \n" /* get SP via mailbox */
" ld $gp, 0x30(%[base]) \n" /* get GP via mailbox */
" ld $a1, 0x38(%[base]) \n"
" jr %[initfunc] \n" /* jump to initial PC */
" nop \n"
" .set pop \n"
: [core] "=&r" (core), [node] "=&r" (node),
[base] "=&r" (base), [cpuid] "=&r" (cpuid),
[count] "=&r" (count), [initfunc] "=&r" (initfunc)
: /* No Input */
: "a1");
}
static void loongson3_type2_play_dead(int *state_addr)
{
register int val;
register long cpuid, core, node, count;
register void *addr, *base, *initfunc;
__asm__ __volatile__(
" .set push \n"
" .set noreorder \n"
" li %[addr], 0x80000000 \n" /* KSEG0 */
"1: cache 0, 0(%[addr]) \n" /* flush L1 ICache */
" cache 0, 1(%[addr]) \n"
" cache 0, 2(%[addr]) \n"
" cache 0, 3(%[addr]) \n"
" cache 1, 0(%[addr]) \n" /* flush L1 DCache */
" cache 1, 1(%[addr]) \n"
" cache 1, 2(%[addr]) \n"
" cache 1, 3(%[addr]) \n"
" addiu %[sets], %[sets], -1 \n"
" bnez %[sets], 1b \n"
" addiu %[addr], %[addr], 0x20 \n"
" li %[val], 0x7 \n" /* *state_addr = CPU_DEAD; */
" sw %[val], (%[state_addr]) \n"
" sync \n"
" cache 21, (%[state_addr]) \n" /* flush entry of *state_addr */
" .set pop \n"
: [addr] "=&r" (addr), [val] "=&r" (val)
: [state_addr] "r" (state_addr),
[sets] "r" (cpu_data[smp_processor_id()].dcache.sets));
__asm__ __volatile__(
" .set push \n"
" .set noreorder \n"
" .set mips64 \n"
" mfc0 %[cpuid], $15, 1 \n"
" andi %[cpuid], 0x3ff \n"
" dli %[base], 0x900000003ff01000 \n"
" andi %[core], %[cpuid], 0x3 \n"
" sll %[core], 8 \n" /* get core id */
" or %[base], %[base], %[core] \n"
" andi %[node], %[cpuid], 0xc \n"
" dsll %[node], 42 \n" /* get node id */
" or %[base], %[base], %[node] \n"
" dsrl %[node], 30 \n" /* 15:14 */
" or %[base], %[base], %[node] \n"
"1: li %[count], 0x100 \n" /* wait for init loop */
"2: bnez %[count], 2b \n" /* limit mailbox access */
" addiu %[count], -1 \n"
" ld %[initfunc], 0x20(%[base]) \n" /* get PC via mailbox */
" beqz %[initfunc], 1b \n"
" nop \n"
" ld $sp, 0x28(%[base]) \n" /* get SP via mailbox */
" ld $gp, 0x30(%[base]) \n" /* get GP via mailbox */
" ld $a1, 0x38(%[base]) \n"
" jr %[initfunc] \n" /* jump to initial PC */
" nop \n"
" .set pop \n"
: [core] "=&r" (core), [node] "=&r" (node),
[base] "=&r" (base), [cpuid] "=&r" (cpuid),
[count] "=&r" (count), [initfunc] "=&r" (initfunc)
: /* No Input */
: "a1");
}
static void loongson3_type3_play_dead(int *state_addr)
{
register int val;
register long cpuid, core, node, count;
register void *addr, *base, *initfunc;
__asm__ __volatile__(
" .set push \n"
" .set noreorder \n"
" li %[addr], 0x80000000 \n" /* KSEG0 */
"1: cache 0, 0(%[addr]) \n" /* flush L1 ICache */
" cache 0, 1(%[addr]) \n"
" cache 0, 2(%[addr]) \n"
" cache 0, 3(%[addr]) \n"
" cache 1, 0(%[addr]) \n" /* flush L1 DCache */
" cache 1, 1(%[addr]) \n"
" cache 1, 2(%[addr]) \n"
" cache 1, 3(%[addr]) \n"
" addiu %[sets], %[sets], -1 \n"
" bnez %[sets], 1b \n"
" addiu %[addr], %[addr], 0x40 \n"
" li %[addr], 0x80000000 \n" /* KSEG0 */
"2: cache 2, 0(%[addr]) \n" /* flush L1 VCache */
" cache 2, 1(%[addr]) \n"
" cache 2, 2(%[addr]) \n"
" cache 2, 3(%[addr]) \n"
" cache 2, 4(%[addr]) \n"
" cache 2, 5(%[addr]) \n"
" cache 2, 6(%[addr]) \n"
" cache 2, 7(%[addr]) \n"
" cache 2, 8(%[addr]) \n"
" cache 2, 9(%[addr]) \n"
" cache 2, 10(%[addr]) \n"
" cache 2, 11(%[addr]) \n"
" cache 2, 12(%[addr]) \n"
" cache 2, 13(%[addr]) \n"
" cache 2, 14(%[addr]) \n"
" cache 2, 15(%[addr]) \n"
" addiu %[vsets], %[vsets], -1 \n"
" bnez %[vsets], 2b \n"
" addiu %[addr], %[addr], 0x40 \n"
" li %[val], 0x7 \n" /* *state_addr = CPU_DEAD; */
" sw %[val], (%[state_addr]) \n"
" sync \n"
" cache 21, (%[state_addr]) \n" /* flush entry of *state_addr */
" .set pop \n"
: [addr] "=&r" (addr), [val] "=&r" (val)
: [state_addr] "r" (state_addr),
[sets] "r" (cpu_data[smp_processor_id()].dcache.sets),
[vsets] "r" (cpu_data[smp_processor_id()].vcache.sets));
__asm__ __volatile__(
" .set push \n"
" .set noreorder \n"
" .set mips64 \n"
" mfc0 %[cpuid], $15, 1 \n"
" andi %[cpuid], 0x3ff \n"
" dli %[base], 0x900000003ff01000 \n"
" andi %[core], %[cpuid], 0x3 \n"
" sll %[core], 8 \n" /* get core id */
" or %[base], %[base], %[core] \n"
" andi %[node], %[cpuid], 0xc \n"
" dsll %[node], 42 \n" /* get node id */
" or %[base], %[base], %[node] \n"
"1: li %[count], 0x100 \n" /* wait for init loop */
"2: bnez %[count], 2b \n" /* limit mailbox access */
" addiu %[count], -1 \n"
" lw %[initfunc], 0x20(%[base]) \n" /* check lower 32-bit as jump indicator */
" beqz %[initfunc], 1b \n"
" nop \n"
" ld %[initfunc], 0x20(%[base]) \n" /* get PC (whole 64-bit) via mailbox */
" ld $sp, 0x28(%[base]) \n" /* get SP via mailbox */
" ld $gp, 0x30(%[base]) \n" /* get GP via mailbox */
" ld $a1, 0x38(%[base]) \n"
" jr %[initfunc] \n" /* jump to initial PC */
" nop \n"
" .set pop \n"
: [core] "=&r" (core), [node] "=&r" (node),
[base] "=&r" (base), [cpuid] "=&r" (cpuid),
[count] "=&r" (count), [initfunc] "=&r" (initfunc)
: /* No Input */
: "a1");
}
void play_dead(void)
{
int prid_imp, prid_rev, *state_addr;
unsigned int cpu = smp_processor_id();
void (*play_dead_at_ckseg1)(int *);
idle_task_exit();
cpuhp_ap_report_dead();
prid_imp = read_c0_prid() & PRID_IMP_MASK;
prid_rev = read_c0_prid() & PRID_REV_MASK;
if (prid_imp == PRID_IMP_LOONGSON_64G) {
play_dead_at_ckseg1 =
(void *)CKSEG1ADDR((unsigned long)loongson3_type3_play_dead);
goto out;
}
switch (prid_rev) {
case PRID_REV_LOONGSON3A_R1:
default:
play_dead_at_ckseg1 =
(void *)CKSEG1ADDR((unsigned long)loongson3_type1_play_dead);
break;
case PRID_REV_LOONGSON3B_R1:
case PRID_REV_LOONGSON3B_R2:
play_dead_at_ckseg1 =
(void *)CKSEG1ADDR((unsigned long)loongson3_type2_play_dead);
break;
case PRID_REV_LOONGSON3A_R2_0:
case PRID_REV_LOONGSON3A_R2_1:
case PRID_REV_LOONGSON3A_R3_0:
case PRID_REV_LOONGSON3A_R3_1:
play_dead_at_ckseg1 =
(void *)CKSEG1ADDR((unsigned long)loongson3_type3_play_dead);
break;
}
out:
state_addr = &per_cpu(cpu_state, cpu);
mb();
play_dead_at_ckseg1(state_addr);
BUG();
}
static int loongson3_disable_clock(unsigned int cpu)
{
uint64_t core_id = cpu_core(&cpu_data[cpu]);
uint64_t package_id = cpu_data[cpu].package;
if ((read_c0_prid() & PRID_REV_MASK) == PRID_REV_LOONGSON3A_R1) {
LOONGSON_CHIPCFG(package_id) &= ~(1 << (12 + core_id));
} else {
if (!(loongson_sysconf.workarounds & WORKAROUND_CPUHOTPLUG))
LOONGSON_FREQCTRL(package_id) &= ~(1 << (core_id * 4 + 3));
}
return 0;
}
static int loongson3_enable_clock(unsigned int cpu)
{
uint64_t core_id = cpu_core(&cpu_data[cpu]);
uint64_t package_id = cpu_data[cpu].package;
if ((read_c0_prid() & PRID_REV_MASK) == PRID_REV_LOONGSON3A_R1) {
LOONGSON_CHIPCFG(package_id) |= 1 << (12 + core_id);
} else {
if (!(loongson_sysconf.workarounds & WORKAROUND_CPUHOTPLUG))
LOONGSON_FREQCTRL(package_id) |= 1 << (core_id * 4 + 3);
}
return 0;
}
static int register_loongson3_notifier(void)
{
return cpuhp_setup_state_nocalls(CPUHP_MIPS_SOC_PREPARE,
"mips/loongson:prepare",
loongson3_enable_clock,
loongson3_disable_clock);
}
early_initcall(register_loongson3_notifier);
#endif
const struct plat_smp_ops loongson3_smp_ops = {
.send_ipi_single = loongson3_send_ipi_single,
.send_ipi_mask = loongson3_send_ipi_mask,
.init_secondary = loongson3_init_secondary,
.smp_finish = loongson3_smp_finish,
.boot_secondary = loongson3_boot_secondary,
.smp_setup = loongson3_smp_setup,
.prepare_cpus = loongson3_prepare_cpus,
#ifdef CONFIG_HOTPLUG_CPU
.cpu_disable = loongson3_cpu_disable,
.cpu_die = loongson3_cpu_die,
#endif
#ifdef CONFIG_KEXEC
.kexec_nonboot_cpu = kexec_nonboot_cpu_jump,
#endif
};
| linux-master | arch/mips/loongson64/smp.c |
// SPDX-License-Identifier: GPL-2.0
/*
* SGI IP30 miscellaneous setup bits.
*
* Copyright (C) 2004-2007 Stanislaw Skowronek <[email protected]>
* 2007 Joshua Kinard <[email protected]>
* 2009 Johannes Dickgreber <[email protected]>
*/
#include <linux/init.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/percpu.h>
#include <linux/memblock.h>
#include <asm/smp-ops.h>
#include <asm/sgialib.h>
#include <asm/time.h>
#include <asm/sgi/heart.h>
#include "ip30-common.h"
/* Structure of accessible HEART registers located in XKPHYS space. */
struct ip30_heart_regs __iomem *heart_regs = HEART_XKPHYS_BASE;
/*
* ARCS will report up to the first 1GB of
* memory if queried. Anything beyond that
* is marked as reserved.
*/
#define IP30_MAX_PROM_MEMORY _AC(0x40000000, UL)
/*
* Memory in the Octane starts at 512MB
*/
#define IP30_MEMORY_BASE _AC(0x20000000, UL)
/*
* If using ARCS to probe for memory, then
* remaining memory will start at this offset.
*/
#define IP30_REAL_MEMORY_START (IP30_MEMORY_BASE + IP30_MAX_PROM_MEMORY)
#define MEM_SHIFT(x) ((x) >> 20)
static void __init ip30_mem_init(void)
{
unsigned long total_mem;
phys_addr_t addr;
phys_addr_t size;
u32 memcfg;
int i;
total_mem = 0;
for (i = 0; i < HEART_MEMORY_BANKS; i++) {
memcfg = __raw_readl(&heart_regs->mem_cfg.l[i]);
if (!(memcfg & HEART_MEMCFG_VALID))
continue;
addr = memcfg & HEART_MEMCFG_ADDR_MASK;
addr <<= HEART_MEMCFG_UNIT_SHIFT;
addr += IP30_MEMORY_BASE;
size = memcfg & HEART_MEMCFG_SIZE_MASK;
size >>= HEART_MEMCFG_SIZE_SHIFT;
size += 1;
size <<= HEART_MEMCFG_UNIT_SHIFT;
total_mem += size;
if (addr >= IP30_REAL_MEMORY_START)
memblock_phys_free(addr, size);
else if ((addr + size) > IP30_REAL_MEMORY_START)
memblock_phys_free(IP30_REAL_MEMORY_START,
size - IP30_MAX_PROM_MEMORY);
}
pr_info("Detected %luMB of physical memory.\n", MEM_SHIFT(total_mem));
}
/**
* ip30_cpu_time_init - platform time initialization.
*/
static void __init ip30_cpu_time_init(void)
{
int cpu = smp_processor_id();
u64 heart_compare;
unsigned int start, end;
int time_diff;
heart_compare = (heart_read(&heart_regs->count) +
(HEART_CYCLES_PER_SEC / 10));
start = read_c0_count();
while ((heart_read(&heart_regs->count) - heart_compare) & 0x800000)
cpu_relax();
end = read_c0_count();
time_diff = (int)end - (int)start;
mips_hpt_frequency = time_diff * 10;
pr_info("IP30: CPU%d: %d MHz CPU detected.\n", cpu,
(mips_hpt_frequency * 2) / 1000000);
}
void __init ip30_per_cpu_init(void)
{
/* Disable all interrupts. */
clear_c0_status(ST0_IM);
ip30_cpu_time_init();
#ifdef CONFIG_SMP
ip30_install_ipi();
#endif
enable_percpu_irq(IP30_HEART_L0_IRQ, IRQ_TYPE_NONE);
enable_percpu_irq(IP30_HEART_L1_IRQ, IRQ_TYPE_NONE);
enable_percpu_irq(IP30_HEART_L2_IRQ, IRQ_TYPE_NONE);
enable_percpu_irq(IP30_HEART_ERR_IRQ, IRQ_TYPE_NONE);
}
/**
* plat_mem_setup - despite the name, misc setup happens here.
*/
void __init plat_mem_setup(void)
{
ip30_mem_init();
/* XXX: Hard lock on /sbin/init if this flag isn't specified. */
prom_flags |= PROM_FLAG_DONT_FREE_TEMP;
#ifdef CONFIG_SMP
register_smp_ops(&ip30_smp_ops);
#else
ip30_per_cpu_init();
#endif
ioport_resource.start = 0;
ioport_resource.end = ~0UL;
set_io_port_base(IO_BASE);
}
| linux-master | arch/mips/sgi-ip30/ip30-setup.c |
// SPDX-License-Identifier: GPL-2.0
/*
* ip30-power.c: Software powerdown and reset handling for IP30 architecture.
*
* Copyright (C) 2004-2007 Stanislaw Skowronek <[email protected]>
* 2014 Joshua Kinard <[email protected]>
* 2009 Johannes Dickgreber <[email protected]>
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/notifier.h>
#include <linux/delay.h>
#include <linux/rtc/ds1685.h>
#include <linux/interrupt.h>
#include <linux/pm.h>
#include <asm/reboot.h>
#include <asm/sgi/heart.h>
static void __noreturn ip30_machine_restart(char *cmd)
{
/*
* Execute HEART cold reset
* Yes, it's cold-HEARTed!
*/
heart_write((heart_read(&heart_regs->mode) | HM_COLD_RST),
&heart_regs->mode);
unreachable();
}
static int __init ip30_reboot_setup(void)
{
_machine_restart = ip30_machine_restart;
return 0;
}
subsys_initcall(ip30_reboot_setup);
| linux-master | arch/mips/sgi-ip30/ip30-power.c |
// SPDX-License-Identifier: GPL-2.0
/*
* ip30-smp.c: SMP on IP30 architecture.
* Based off of the original IP30 SMP code, with inspiration from ip27-smp.c
* and smp-bmips.c.
*
* Copyright (C) 2005-2007 Stanislaw Skowronek <[email protected]>
* 2006-2007, 2014-2015 Joshua Kinard <[email protected]>
* 2009 Johannes Dickgreber <[email protected]>
*/
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/sched/task_stack.h>
#include <asm/time.h>
#include <asm/sgi/heart.h>
#include "ip30-common.h"
#define MPCONF_MAGIC 0xbaddeed2
#define MPCONF_ADDR 0xa800000000000600L
#define MPCONF_SIZE 0x80
#define MPCONF(x) (MPCONF_ADDR + (x) * MPCONF_SIZE)
/* HEART can theoretically do 4 CPUs, but only 2 are physically possible */
#define MP_NCPU 2
struct mpconf {
u32 magic;
u32 prid;
u32 physid;
u32 virtid;
u32 scachesz;
u16 fanloads;
u16 res;
void *launch;
void *rendezvous;
u64 res2[3];
void *stackaddr;
void *lnch_parm;
void *rndv_parm;
u32 idleflag;
};
static void ip30_smp_send_ipi_single(int cpu, u32 action)
{
int irq;
switch (action) {
case SMP_RESCHEDULE_YOURSELF:
irq = HEART_L2_INT_RESCHED_CPU_0;
break;
case SMP_CALL_FUNCTION:
irq = HEART_L2_INT_CALL_CPU_0;
break;
default:
panic("IP30: Unknown action value in %s!\n", __func__);
}
irq += cpu;
/* Poke the other CPU -- it's got mail! */
heart_write(BIT_ULL(irq), &heart_regs->set_isr);
}
static void ip30_smp_send_ipi_mask(const struct cpumask *mask, u32 action)
{
u32 i;
for_each_cpu(i, mask)
ip30_smp_send_ipi_single(i, action);
}
static void __init ip30_smp_setup(void)
{
int i;
int ncpu = 0;
struct mpconf *mpc;
init_cpu_possible(cpumask_of(0));
/* Scan the MPCONF structure and enumerate available CPUs. */
for (i = 0; i < MP_NCPU; i++) {
mpc = (struct mpconf *)MPCONF(i);
if (mpc->magic == MPCONF_MAGIC) {
set_cpu_possible(i, true);
__cpu_number_map[i] = ++ncpu;
__cpu_logical_map[ncpu] = i;
pr_info("IP30: Slot: %d, PrID: %.8x, PhyID: %d, VirtID: %d\n",
i, mpc->prid, mpc->physid, mpc->virtid);
}
}
pr_info("IP30: Detected %d CPU(s) present.\n", ncpu);
/*
* Set the coherency algorithm to '5' (cacheable coherent
* exclusive on write). This is needed on IP30 SMP, especially
* for R14000 CPUs, otherwise, instruction bus errors will
* occur upon reaching userland.
*/
change_c0_config(CONF_CM_CMASK, CONF_CM_CACHABLE_COW);
}
static void __init ip30_smp_prepare_cpus(unsigned int max_cpus)
{
/* nothing to do here */
}
static int __init ip30_smp_boot_secondary(int cpu, struct task_struct *idle)
{
struct mpconf *mpc = (struct mpconf *)MPCONF(cpu);
/* Stack pointer (sp). */
mpc->stackaddr = (void *)__KSTK_TOS(idle);
/* Global pointer (gp). */
mpc->lnch_parm = task_thread_info(idle);
mb(); /* make sure stack and lparm are written */
/* Boot CPUx. */
mpc->launch = smp_bootstrap;
/* CPUx now executes smp_bootstrap, then ip30_smp_finish */
return 0;
}
static void __init ip30_smp_init_cpu(void)
{
ip30_per_cpu_init();
}
static void __init ip30_smp_finish(void)
{
enable_percpu_irq(get_c0_compare_int(), IRQ_TYPE_NONE);
local_irq_enable();
}
struct plat_smp_ops __read_mostly ip30_smp_ops = {
.send_ipi_single = ip30_smp_send_ipi_single,
.send_ipi_mask = ip30_smp_send_ipi_mask,
.smp_setup = ip30_smp_setup,
.prepare_cpus = ip30_smp_prepare_cpus,
.boot_secondary = ip30_smp_boot_secondary,
.init_secondary = ip30_smp_init_cpu,
.smp_finish = ip30_smp_finish,
.prepare_boot_cpu = ip30_smp_init_cpu,
};
| linux-master | arch/mips/sgi-ip30/ip30-smp.c |
// SPDX-License-Identifier: GPL-2.0
/*
* ip30-xtalk.c - Very basic Crosstalk (XIO) detection support.
* Copyright (C) 2004-2007 Stanislaw Skowronek <[email protected]>
* Copyright (C) 2009 Johannes Dickgreber <[email protected]>
* Copyright (C) 2007, 2014-2016 Joshua Kinard <[email protected]>
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/platform_device.h>
#include <linux/platform_data/sgi-w1.h>
#include <linux/platform_data/xtalk-bridge.h>
#include <asm/xtalk/xwidget.h>
#include <asm/pci/bridge.h>
#define IP30_SWIN_BASE(widget) \
(0x0000000010000000 | (((unsigned long)(widget)) << 24))
#define IP30_RAW_SWIN_BASE(widget) (IO_BASE + IP30_SWIN_BASE(widget))
#define IP30_SWIN_SIZE (1 << 24)
#define IP30_WIDGET_XBOW _AC(0x0, UL) /* XBow is always 0 */
#define IP30_WIDGET_HEART _AC(0x8, UL) /* HEART is always 8 */
#define IP30_WIDGET_PCI_BASE _AC(0xf, UL) /* BaseIO PCI is always 15 */
#define XTALK_NODEV 0xffffffff
#define XBOW_REG_LINK_STAT_0 0x114
#define XBOW_REG_LINK_BLK_SIZE 0x40
#define XBOW_REG_LINK_ALIVE 0x80000000
#define HEART_INTR_ADDR 0x00000080
#define xtalk_read __raw_readl
static void bridge_platform_create(int widget, int masterwid)
{
struct xtalk_bridge_platform_data *bd;
struct sgi_w1_platform_data *wd;
struct platform_device *pdev_wd;
struct platform_device *pdev_bd;
struct resource w1_res;
wd = kzalloc(sizeof(*wd), GFP_KERNEL);
if (!wd) {
pr_warn("xtalk:%x bridge create out of memory\n", widget);
return;
}
snprintf(wd->dev_id, sizeof(wd->dev_id), "bridge-%012lx",
IP30_SWIN_BASE(widget));
memset(&w1_res, 0, sizeof(w1_res));
w1_res.start = IP30_SWIN_BASE(widget) +
offsetof(struct bridge_regs, b_nic);
w1_res.end = w1_res.start + 3;
w1_res.flags = IORESOURCE_MEM;
pdev_wd = platform_device_alloc("sgi_w1", PLATFORM_DEVID_AUTO);
if (!pdev_wd) {
pr_warn("xtalk:%x bridge create out of memory\n", widget);
goto err_kfree_wd;
}
if (platform_device_add_resources(pdev_wd, &w1_res, 1)) {
pr_warn("xtalk:%x bridge failed to add platform resources.\n", widget);
goto err_put_pdev_wd;
}
if (platform_device_add_data(pdev_wd, wd, sizeof(*wd))) {
pr_warn("xtalk:%x bridge failed to add platform data.\n", widget);
goto err_put_pdev_wd;
}
if (platform_device_add(pdev_wd)) {
pr_warn("xtalk:%x bridge failed to add platform device.\n", widget);
goto err_put_pdev_wd;
}
/* platform_device_add_data() duplicates the data */
kfree(wd);
bd = kzalloc(sizeof(*bd), GFP_KERNEL);
if (!bd) {
pr_warn("xtalk:%x bridge create out of memory\n", widget);
goto err_unregister_pdev_wd;
}
pdev_bd = platform_device_alloc("xtalk-bridge", PLATFORM_DEVID_AUTO);
if (!pdev_bd) {
pr_warn("xtalk:%x bridge create out of memory\n", widget);
goto err_kfree_bd;
}
bd->bridge_addr = IP30_RAW_SWIN_BASE(widget);
bd->intr_addr = HEART_INTR_ADDR;
bd->nasid = 0;
bd->masterwid = masterwid;
bd->mem.name = "Bridge PCI MEM";
bd->mem.start = IP30_SWIN_BASE(widget) + BRIDGE_DEVIO0;
bd->mem.end = IP30_SWIN_BASE(widget) + IP30_SWIN_SIZE - 1;
bd->mem.flags = IORESOURCE_MEM;
bd->mem_offset = IP30_SWIN_BASE(widget);
bd->io.name = "Bridge PCI IO";
bd->io.start = IP30_SWIN_BASE(widget) + BRIDGE_DEVIO0;
bd->io.end = IP30_SWIN_BASE(widget) + IP30_SWIN_SIZE - 1;
bd->io.flags = IORESOURCE_IO;
bd->io_offset = IP30_SWIN_BASE(widget);
if (platform_device_add_data(pdev_bd, bd, sizeof(*bd))) {
pr_warn("xtalk:%x bridge failed to add platform data.\n", widget);
goto err_put_pdev_bd;
}
if (platform_device_add(pdev_bd)) {
pr_warn("xtalk:%x bridge failed to add platform device.\n", widget);
goto err_put_pdev_bd;
}
/* platform_device_add_data() duplicates the data */
kfree(bd);
pr_info("xtalk:%x bridge widget\n", widget);
return;
err_put_pdev_bd:
platform_device_put(pdev_bd);
err_kfree_bd:
kfree(bd);
err_unregister_pdev_wd:
platform_device_unregister(pdev_wd);
return;
err_put_pdev_wd:
platform_device_put(pdev_wd);
err_kfree_wd:
kfree(wd);
return;
}
static unsigned int __init xbow_widget_active(s8 wid)
{
unsigned int link_stat;
link_stat = xtalk_read((void *)(IP30_RAW_SWIN_BASE(IP30_WIDGET_XBOW) +
XBOW_REG_LINK_STAT_0 +
XBOW_REG_LINK_BLK_SIZE *
(wid - 8)));
return (link_stat & XBOW_REG_LINK_ALIVE) ? 1 : 0;
}
static void __init xtalk_init_widget(s8 wid, s8 masterwid)
{
xwidget_part_num_t partnum;
widgetreg_t widget_id;
if (!xbow_widget_active(wid))
return;
widget_id = xtalk_read((void *)(IP30_RAW_SWIN_BASE(wid) + WIDGET_ID));
partnum = XWIDGET_PART_NUM(widget_id);
switch (partnum) {
case BRIDGE_WIDGET_PART_NUM:
case XBRIDGE_WIDGET_PART_NUM:
bridge_platform_create(wid, masterwid);
break;
default:
pr_info("xtalk:%x unknown widget (0x%x)\n", wid, partnum);
break;
}
}
static int __init ip30_xtalk_init(void)
{
int i;
/*
* Walk widget IDs backwards so that BaseIO is probed first. This
* ensures that the BaseIO IOC3 is always detected as eth0.
*/
for (i = IP30_WIDGET_PCI_BASE; i > IP30_WIDGET_HEART; i--)
xtalk_init_widget(i, IP30_WIDGET_HEART);
return 0;
}
arch_initcall(ip30_xtalk_init);
| linux-master | arch/mips/sgi-ip30/ip30-xtalk.c |
// SPDX-License-Identifier: GPL-2.0
/*
* ip30-irq.c: Highlevel interrupt handling for IP30 architecture.
*/
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/irqdomain.h>
#include <linux/percpu.h>
#include <linux/spinlock.h>
#include <linux/tick.h>
#include <linux/types.h>
#include <asm/irq_cpu.h>
#include <asm/sgi/heart.h>
#include "ip30-common.h"
struct heart_irq_data {
u64 *irq_mask;
int cpu;
};
static DECLARE_BITMAP(heart_irq_map, HEART_NUM_IRQS);
static DEFINE_PER_CPU(unsigned long, irq_enable_mask);
static inline int heart_alloc_int(void)
{
int bit;
again:
bit = find_first_zero_bit(heart_irq_map, HEART_NUM_IRQS);
if (bit >= HEART_NUM_IRQS)
return -ENOSPC;
if (test_and_set_bit(bit, heart_irq_map))
goto again;
return bit;
}
static void ip30_error_irq(struct irq_desc *desc)
{
u64 pending, mask, cause, error_irqs, err_reg;
int cpu = smp_processor_id();
int i;
pending = heart_read(&heart_regs->isr);
mask = heart_read(&heart_regs->imr[cpu]);
cause = heart_read(&heart_regs->cause);
error_irqs = (pending & HEART_L4_INT_MASK & mask);
/* Bail if there's nothing to process (how did we get here, then?) */
if (unlikely(!error_irqs))
return;
/* Prevent any of the error IRQs from firing again. */
heart_write(mask & ~(pending), &heart_regs->imr[cpu]);
/* Ack all error IRQs. */
heart_write(HEART_L4_INT_MASK, &heart_regs->clear_isr);
/*
* If we also have a cause value, then something happened, so loop
* through the error IRQs and report a "heart attack" for each one
* and print the value of the HEART cause register. This is really
* primitive right now, but it should hopefully work until a more
* robust error handling routine can be put together.
*
* Refer to heart.h for the HC_* macros to work out the cause
* that got us here.
*/
if (cause) {
pr_alert("IP30: CPU%d: HEART ATTACK! ISR = 0x%.16llx, IMR = 0x%.16llx, CAUSE = 0x%.16llx\n",
cpu, pending, mask, cause);
if (cause & HC_COR_MEM_ERR) {
err_reg = heart_read(&heart_regs->mem_err_addr);
pr_alert(" HEART_MEMERR_ADDR = 0x%.16llx\n", err_reg);
}
/* i = 63; i >= 51; i-- */
for (i = HEART_ERR_MASK_END; i >= HEART_ERR_MASK_START; i--)
if ((pending >> i) & 1)
pr_alert(" HEART Error IRQ #%d\n", i);
/* XXX: Seems possible to loop forever here, so panic(). */
panic("IP30: Fatal Error !\n");
}
/* Unmask the error IRQs. */
heart_write(mask, &heart_regs->imr[cpu]);
}
static void ip30_normal_irq(struct irq_desc *desc)
{
int cpu = smp_processor_id();
struct irq_domain *domain;
u64 pend, mask;
int ret;
pend = heart_read(&heart_regs->isr);
mask = (heart_read(&heart_regs->imr[cpu]) &
(HEART_L0_INT_MASK | HEART_L1_INT_MASK | HEART_L2_INT_MASK));
pend &= mask;
if (unlikely(!pend))
return;
#ifdef CONFIG_SMP
if (pend & BIT_ULL(HEART_L2_INT_RESCHED_CPU_0)) {
heart_write(BIT_ULL(HEART_L2_INT_RESCHED_CPU_0),
&heart_regs->clear_isr);
scheduler_ipi();
} else if (pend & BIT_ULL(HEART_L2_INT_RESCHED_CPU_1)) {
heart_write(BIT_ULL(HEART_L2_INT_RESCHED_CPU_1),
&heart_regs->clear_isr);
scheduler_ipi();
} else if (pend & BIT_ULL(HEART_L2_INT_CALL_CPU_0)) {
heart_write(BIT_ULL(HEART_L2_INT_CALL_CPU_0),
&heart_regs->clear_isr);
generic_smp_call_function_interrupt();
} else if (pend & BIT_ULL(HEART_L2_INT_CALL_CPU_1)) {
heart_write(BIT_ULL(HEART_L2_INT_CALL_CPU_1),
&heart_regs->clear_isr);
generic_smp_call_function_interrupt();
} else
#endif
{
domain = irq_desc_get_handler_data(desc);
ret = generic_handle_domain_irq(domain, __ffs(pend));
if (ret)
spurious_interrupt();
}
}
static void ip30_ack_heart_irq(struct irq_data *d)
{
heart_write(BIT_ULL(d->hwirq), &heart_regs->clear_isr);
}
static void ip30_mask_heart_irq(struct irq_data *d)
{
struct heart_irq_data *hd = irq_data_get_irq_chip_data(d);
unsigned long *mask = &per_cpu(irq_enable_mask, hd->cpu);
clear_bit(d->hwirq, mask);
heart_write(*mask, &heart_regs->imr[hd->cpu]);
}
static void ip30_mask_and_ack_heart_irq(struct irq_data *d)
{
struct heart_irq_data *hd = irq_data_get_irq_chip_data(d);
unsigned long *mask = &per_cpu(irq_enable_mask, hd->cpu);
clear_bit(d->hwirq, mask);
heart_write(*mask, &heart_regs->imr[hd->cpu]);
heart_write(BIT_ULL(d->hwirq), &heart_regs->clear_isr);
}
static void ip30_unmask_heart_irq(struct irq_data *d)
{
struct heart_irq_data *hd = irq_data_get_irq_chip_data(d);
unsigned long *mask = &per_cpu(irq_enable_mask, hd->cpu);
set_bit(d->hwirq, mask);
heart_write(*mask, &heart_regs->imr[hd->cpu]);
}
static int ip30_set_heart_irq_affinity(struct irq_data *d,
const struct cpumask *mask, bool force)
{
struct heart_irq_data *hd = irq_data_get_irq_chip_data(d);
if (!hd)
return -EINVAL;
if (irqd_is_started(d))
ip30_mask_and_ack_heart_irq(d);
hd->cpu = cpumask_first_and(mask, cpu_online_mask);
if (irqd_is_started(d))
ip30_unmask_heart_irq(d);
irq_data_update_effective_affinity(d, cpumask_of(hd->cpu));
return 0;
}
static struct irq_chip heart_irq_chip = {
.name = "HEART",
.irq_ack = ip30_ack_heart_irq,
.irq_mask = ip30_mask_heart_irq,
.irq_mask_ack = ip30_mask_and_ack_heart_irq,
.irq_unmask = ip30_unmask_heart_irq,
.irq_set_affinity = ip30_set_heart_irq_affinity,
};
static int heart_domain_alloc(struct irq_domain *domain, unsigned int virq,
unsigned int nr_irqs, void *arg)
{
struct irq_alloc_info *info = arg;
struct heart_irq_data *hd;
int hwirq;
if (nr_irqs > 1 || !info)
return -EINVAL;
hd = kzalloc(sizeof(*hd), GFP_KERNEL);
if (!hd)
return -ENOMEM;
hwirq = heart_alloc_int();
if (hwirq < 0) {
kfree(hd);
return -EAGAIN;
}
irq_domain_set_info(domain, virq, hwirq, &heart_irq_chip, hd,
handle_level_irq, NULL, NULL);
return 0;
}
static void heart_domain_free(struct irq_domain *domain,
unsigned int virq, unsigned int nr_irqs)
{
struct irq_data *irqd;
if (nr_irqs > 1)
return;
irqd = irq_domain_get_irq_data(domain, virq);
if (irqd) {
clear_bit(irqd->hwirq, heart_irq_map);
kfree(irqd->chip_data);
}
}
static const struct irq_domain_ops heart_domain_ops = {
.alloc = heart_domain_alloc,
.free = heart_domain_free,
};
void __init ip30_install_ipi(void)
{
int cpu = smp_processor_id();
unsigned long *mask = &per_cpu(irq_enable_mask, cpu);
set_bit(HEART_L2_INT_RESCHED_CPU_0 + cpu, mask);
heart_write(BIT_ULL(HEART_L2_INT_RESCHED_CPU_0 + cpu),
&heart_regs->clear_isr);
set_bit(HEART_L2_INT_CALL_CPU_0 + cpu, mask);
heart_write(BIT_ULL(HEART_L2_INT_CALL_CPU_0 + cpu),
&heart_regs->clear_isr);
heart_write(*mask, &heart_regs->imr[cpu]);
}
void __init arch_init_irq(void)
{
struct irq_domain *domain;
struct fwnode_handle *fn;
unsigned long *mask;
int i;
mips_cpu_irq_init();
/* Mask all IRQs. */
heart_write(HEART_CLR_ALL_MASK, &heart_regs->imr[0]);
heart_write(HEART_CLR_ALL_MASK, &heart_regs->imr[1]);
heart_write(HEART_CLR_ALL_MASK, &heart_regs->imr[2]);
heart_write(HEART_CLR_ALL_MASK, &heart_regs->imr[3]);
/* Ack everything. */
heart_write(HEART_ACK_ALL_MASK, &heart_regs->clear_isr);
/* Enable specific HEART error IRQs for each CPU. */
mask = &per_cpu(irq_enable_mask, 0);
*mask |= HEART_CPU0_ERR_MASK;
heart_write(*mask, &heart_regs->imr[0]);
mask = &per_cpu(irq_enable_mask, 1);
*mask |= HEART_CPU1_ERR_MASK;
heart_write(*mask, &heart_regs->imr[1]);
/*
* Some HEART bits are reserved by hardware or by software convention.
* Mark these as reserved right away so they won't be accidentally
* used later.
*/
set_bit(HEART_L0_INT_GENERIC, heart_irq_map);
set_bit(HEART_L0_INT_FLOW_CTRL_HWTR_0, heart_irq_map);
set_bit(HEART_L0_INT_FLOW_CTRL_HWTR_1, heart_irq_map);
set_bit(HEART_L2_INT_RESCHED_CPU_0, heart_irq_map);
set_bit(HEART_L2_INT_RESCHED_CPU_1, heart_irq_map);
set_bit(HEART_L2_INT_CALL_CPU_0, heart_irq_map);
set_bit(HEART_L2_INT_CALL_CPU_1, heart_irq_map);
set_bit(HEART_L3_INT_TIMER, heart_irq_map);
/* Reserve the error interrupts (#51 to #63). */
for (i = HEART_L4_INT_XWID_ERR_9; i <= HEART_L4_INT_HEART_EXCP; i++)
set_bit(i, heart_irq_map);
fn = irq_domain_alloc_named_fwnode("HEART");
WARN_ON(fn == NULL);
if (!fn)
return;
domain = irq_domain_create_linear(fn, HEART_NUM_IRQS,
&heart_domain_ops, NULL);
WARN_ON(domain == NULL);
if (!domain)
return;
irq_set_default_host(domain);
irq_set_percpu_devid(IP30_HEART_L0_IRQ);
irq_set_chained_handler_and_data(IP30_HEART_L0_IRQ, ip30_normal_irq,
domain);
irq_set_percpu_devid(IP30_HEART_L1_IRQ);
irq_set_chained_handler_and_data(IP30_HEART_L1_IRQ, ip30_normal_irq,
domain);
irq_set_percpu_devid(IP30_HEART_L2_IRQ);
irq_set_chained_handler_and_data(IP30_HEART_L2_IRQ, ip30_normal_irq,
domain);
irq_set_percpu_devid(IP30_HEART_ERR_IRQ);
irq_set_chained_handler_and_data(IP30_HEART_ERR_IRQ, ip30_error_irq,
domain);
}
| linux-master | arch/mips/sgi-ip30/ip30-irq.c |
// SPDX-License-Identifier: GPL-2.0
/*
* ip30-timer.c: Clocksource/clockevent support for the
* HEART chip in SGI Octane (IP30) systems.
*
* Copyright (C) 2004-2007 Stanislaw Skowronek <[email protected]>
* Copyright (C) 2009 Johannes Dickgreber <[email protected]>
* Copyright (C) 2011 Joshua Kinard <[email protected]>
*/
#include <linux/clocksource.h>
#include <linux/cpumask.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/percpu.h>
#include <linux/sched_clock.h>
#include <asm/time.h>
#include <asm/cevt-r4k.h>
#include <asm/sgi/heart.h>
static u64 ip30_heart_counter_read(struct clocksource *cs)
{
return heart_read(&heart_regs->count);
}
struct clocksource ip30_heart_clocksource = {
.name = "HEART",
.rating = 400,
.read = ip30_heart_counter_read,
.mask = CLOCKSOURCE_MASK(52),
.flags = (CLOCK_SOURCE_IS_CONTINUOUS | CLOCK_SOURCE_VALID_FOR_HRES),
};
static u64 notrace ip30_heart_read_sched_clock(void)
{
return heart_read(&heart_regs->count);
}
static void __init ip30_heart_clocksource_init(void)
{
struct clocksource *cs = &ip30_heart_clocksource;
clocksource_register_hz(cs, HEART_CYCLES_PER_SEC);
sched_clock_register(ip30_heart_read_sched_clock, 52,
HEART_CYCLES_PER_SEC);
}
void __init plat_time_init(void)
{
int irq = get_c0_compare_int();
cp0_timer_irq_installed = 1;
c0_compare_irqaction.percpu_dev_id = &mips_clockevent_device;
c0_compare_irqaction.flags &= ~IRQF_SHARED;
irq_set_handler(irq, handle_percpu_devid_irq);
irq_set_percpu_devid(irq);
setup_percpu_irq(irq, &c0_compare_irqaction);
enable_percpu_irq(irq, IRQ_TYPE_NONE);
ip30_heart_clocksource_init();
}
| linux-master | arch/mips/sgi-ip30/ip30-timer.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/io.h>
#include <asm/sn/ioc3.h>
static inline struct ioc3_uartregs *console_uart(void)
{
struct ioc3 *ioc3;
ioc3 = (struct ioc3 *)((void *)(0x900000001f600000));
return &ioc3->sregs.uarta;
}
void prom_putchar(char c)
{
struct ioc3_uartregs *uart = console_uart();
while ((readb(&uart->iu_lsr) & 0x20) == 0)
cpu_relax();
writeb(c, &uart->iu_thr);
}
| linux-master | arch/mips/sgi-ip30/ip30-console.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Registration of Cobalt LED platform device.
*
* Copyright (C) 2007 Yoichi Yuasa <[email protected]>
*/
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/ioport.h>
#include <linux/platform_device.h>
#include <cobalt.h>
static struct resource cobalt_led_resource __initdata = {
.start = 0x1c000000,
.end = 0x1c000000,
.flags = IORESOURCE_MEM,
};
static __init int cobalt_led_add(void)
{
struct platform_device *pdev;
int retval;
if (cobalt_board_id == COBALT_BRD_ID_QUBE1 ||
cobalt_board_id == COBALT_BRD_ID_QUBE2)
pdev = platform_device_alloc("cobalt-qube-leds", -1);
else
pdev = platform_device_alloc("cobalt-raq-leds", -1);
if (!pdev)
return -ENOMEM;
retval = platform_device_add_resources(pdev, &cobalt_led_resource, 1);
if (retval)
goto err_free_device;
retval = platform_device_add(pdev);
if (retval)
goto err_free_device;
return 0;
err_free_device:
platform_device_put(pdev);
return retval;
}
device_initcall(cobalt_led_add);
| linux-master | arch/mips/cobalt/led.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Registration of Cobalt UART platform device.
*
* Copyright (C) 2007 Yoichi Yuasa <[email protected]>
*/
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/ioport.h>
#include <linux/platform_device.h>
#include <linux/serial_8250.h>
#include <cobalt.h>
#include <irq.h>
static struct resource cobalt_uart_resource[] __initdata = {
{
.start = 0x1c800000,
.end = 0x1c800007,
.flags = IORESOURCE_MEM,
},
{
.start = SERIAL_IRQ,
.end = SERIAL_IRQ,
.flags = IORESOURCE_IRQ,
},
};
static struct plat_serial8250_port cobalt_serial8250_port[] = {
{
.irq = SERIAL_IRQ,
.uartclk = 18432000,
.iotype = UPIO_MEM,
.flags = UPF_IOREMAP | UPF_BOOT_AUTOCONF | UPF_SKIP_TEST,
.mapbase = 0x1c800000,
},
{},
};
static __init int cobalt_uart_add(void)
{
struct platform_device *pdev;
int retval;
/*
* Cobalt Qube1 has no UART.
*/
if (cobalt_board_id == COBALT_BRD_ID_QUBE1)
return 0;
pdev = platform_device_alloc("serial8250", -1);
if (!pdev)
return -ENOMEM;
pdev->id = PLAT8250_DEV_PLATFORM;
pdev->dev.platform_data = cobalt_serial8250_port;
retval = platform_device_add_resources(pdev, cobalt_uart_resource, ARRAY_SIZE(cobalt_uart_resource));
if (retval)
goto err_free_device;
retval = platform_device_add(pdev);
if (retval)
goto err_free_device;
return 0;
err_free_device:
platform_device_put(pdev);
return retval;
}
device_initcall(cobalt_uart_add);
| linux-master | arch/mips/cobalt/serial.c |
/*
* IRQ vector handles
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1995, 1996, 1997, 2003 by Ralf Baechle
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/irq.h>
#include <linux/interrupt.h>
#include <linux/pci.h>
#include <asm/i8259.h>
#include <asm/irq_cpu.h>
#include <asm/irq_gt641xx.h>
#include <asm/gt64120.h>
#include <irq.h>
asmlinkage void plat_irq_dispatch(void)
{
unsigned pending = read_c0_status() & read_c0_cause() & ST0_IM;
int irq;
if (pending & CAUSEF_IP2)
gt641xx_irq_dispatch();
else if (pending & CAUSEF_IP6) {
irq = i8259_irq();
if (irq < 0)
spurious_interrupt();
else
do_IRQ(irq);
} else if (pending & CAUSEF_IP3)
do_IRQ(MIPS_CPU_IRQ_BASE + 3);
else if (pending & CAUSEF_IP4)
do_IRQ(MIPS_CPU_IRQ_BASE + 4);
else if (pending & CAUSEF_IP5)
do_IRQ(MIPS_CPU_IRQ_BASE + 5);
else if (pending & CAUSEF_IP7)
do_IRQ(MIPS_CPU_IRQ_BASE + 7);
else
spurious_interrupt();
}
void __init arch_init_irq(void)
{
mips_cpu_irq_init();
gt641xx_irq_init();
init_i8259_irqs();
if (request_irq(GT641XX_CASCADE_IRQ, no_action, IRQF_NO_THREAD,
"cascade", NULL)) {
pr_err("Failed to request irq %d (cascade)\n",
GT641XX_CASCADE_IRQ);
}
if (request_irq(I8259_CASCADE_IRQ, no_action, IRQF_NO_THREAD,
"cascade", NULL)) {
pr_err("Failed to request irq %d (cascade)\n",
I8259_CASCADE_IRQ);
}
}
| linux-master | arch/mips/cobalt/irq.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Registration of Cobalt RTC platform device.
*
* Copyright (C) 2007 Yoichi Yuasa <[email protected]>
*/
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/ioport.h>
#include <linux/mc146818rtc.h>
#include <linux/platform_device.h>
static struct resource cobalt_rtc_resource[] __initdata = {
{
.start = 0x70,
.end = 0x77,
.flags = IORESOURCE_IO,
},
{
.start = RTC_IRQ,
.end = RTC_IRQ,
.flags = IORESOURCE_IRQ,
},
};
static __init int cobalt_rtc_add(void)
{
struct platform_device *pdev;
int retval;
pdev = platform_device_alloc("rtc_cmos", -1);
if (!pdev)
return -ENOMEM;
retval = platform_device_add_resources(pdev, cobalt_rtc_resource,
ARRAY_SIZE(cobalt_rtc_resource));
if (retval)
goto err_free_device;
retval = platform_device_add(pdev);
if (retval)
goto err_free_device;
return 0;
err_free_device:
platform_device_put(pdev);
return retval;
}
device_initcall(cobalt_rtc_add);
| linux-master | arch/mips/cobalt/rtc.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Cobalt buttons platform device.
*
* Copyright (C) 2007 Yoichi Yuasa <[email protected]>
*/
#include <linux/platform_device.h>
#include <linux/errno.h>
#include <linux/init.h>
static struct resource cobalt_buttons_resource __initdata = {
.start = 0x1d000000,
.end = 0x1d000003,
.flags = IORESOURCE_MEM,
};
static __init int cobalt_add_buttons(void)
{
struct platform_device *pd;
int error;
pd = platform_device_alloc("Cobalt buttons", -1);
if (!pd)
return -ENOMEM;
error = platform_device_add_resources(pd, &cobalt_buttons_resource, 1);
if (error)
goto err_free_device;
error = platform_device_add(pd);
if (error)
goto err_free_device;
return 0;
err_free_device:
platform_device_put(pd);
return error;
}
device_initcall(cobalt_add_buttons);
| linux-master | arch/mips/cobalt/buttons.c |
/*
* Register PCI controller.
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1996, 1997, 2004, 05 by Ralf Baechle ([email protected])
* Copyright (C) 2001, 2002, 2003 by Liam Davies ([email protected])
*
*/
#include <linux/init.h>
#include <linux/pci.h>
#include <asm/gt64120.h>
extern struct pci_ops gt64xxx_pci0_ops;
static struct resource cobalt_mem_resource = {
.start = GT_DEF_PCI0_MEM0_BASE,
.end = GT_DEF_PCI0_MEM0_BASE + GT_DEF_PCI0_MEM0_SIZE - 1,
.name = "PCI memory",
.flags = IORESOURCE_MEM,
};
static struct resource cobalt_io_resource = {
.start = 0x1000,
.end = 0xffffffUL,
.name = "PCI I/O",
.flags = IORESOURCE_IO,
};
static struct pci_controller cobalt_pci_controller = {
.pci_ops = >64xxx_pci0_ops,
.mem_resource = &cobalt_mem_resource,
.io_resource = &cobalt_io_resource,
.io_offset = 0 - GT_DEF_PCI0_IO_BASE,
.io_map_base = CKSEG1ADDR(GT_DEF_PCI0_IO_BASE),
};
static int __init cobalt_pci_init(void)
{
register_pci_controller(&cobalt_pci_controller);
return 0;
}
arch_initcall(cobalt_pci_init);
| linux-master | arch/mips/cobalt/pci.c |
/*
* Cobalt Reset operations
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1995, 1996, 1997 by Ralf Baechle
* Copyright (C) 2001 by Liam Davies ([email protected])
*/
#include <linux/init.h>
#include <linux/io.h>
#include <linux/leds.h>
#include <asm/idle.h>
#include <asm/processor.h>
#include <cobalt.h>
#define RESET_PORT ((void __iomem *)CKSEG1ADDR(0x1c000000))
#define RESET 0x0f
DEFINE_LED_TRIGGER(power_off_led_trigger);
static int __init ledtrig_power_off_init(void)
{
led_trigger_register_simple("power-off", &power_off_led_trigger);
return 0;
}
device_initcall(ledtrig_power_off_init);
void cobalt_machine_halt(void)
{
/*
* turn on power off LED on RaQ
*/
led_trigger_event(power_off_led_trigger, LED_FULL);
local_irq_disable();
while (1) {
if (cpu_wait)
cpu_wait();
}
}
void cobalt_machine_restart(char *command)
{
writeb(RESET, RESET_PORT);
/* we should never get here */
cobalt_machine_halt();
}
| linux-master | arch/mips/cobalt/reset.c |
/*
* Setup pointers to hardware dependent routines.
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1996, 1997, 2004, 05 by Ralf Baechle ([email protected])
* Copyright (C) 2001, 2002, 2003 by Liam Davies ([email protected])
*
*/
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/ioport.h>
#include <linux/memblock.h>
#include <linux/pm.h>
#include <asm/bootinfo.h>
#include <asm/reboot.h>
#include <asm/setup.h>
#include <asm/gt64120.h>
#include <cobalt.h>
extern void cobalt_machine_restart(char *command);
extern void cobalt_machine_halt(void);
const char *get_system_type(void)
{
switch (cobalt_board_id) {
case COBALT_BRD_ID_QUBE1:
return "Cobalt Qube";
case COBALT_BRD_ID_RAQ1:
return "Cobalt RaQ";
case COBALT_BRD_ID_QUBE2:
return "Cobalt Qube2";
case COBALT_BRD_ID_RAQ2:
return "Cobalt RaQ2";
}
return "MIPS Cobalt";
}
/*
* Cobalt doesn't have PS/2 keyboard/mouse interfaces,
* keyboard controller is never used.
* Also PCI-ISA bridge DMA controller is never used.
*/
static struct resource cobalt_reserved_resources[] = {
{ /* dma1 */
.start = 0x00,
.end = 0x1f,
.name = "reserved",
.flags = IORESOURCE_BUSY | IORESOURCE_IO,
},
{ /* keyboard */
.start = 0x60,
.end = 0x6f,
.name = "reserved",
.flags = IORESOURCE_BUSY | IORESOURCE_IO,
},
{ /* dma page reg */
.start = 0x80,
.end = 0x8f,
.name = "reserved",
.flags = IORESOURCE_BUSY | IORESOURCE_IO,
},
{ /* dma2 */
.start = 0xc0,
.end = 0xdf,
.name = "reserved",
.flags = IORESOURCE_BUSY | IORESOURCE_IO,
},
};
void __init plat_mem_setup(void)
{
int i;
_machine_restart = cobalt_machine_restart;
_machine_halt = cobalt_machine_halt;
pm_power_off = cobalt_machine_halt;
set_io_port_base(CKSEG1ADDR(GT_DEF_PCI0_IO_BASE));
/* I/O port resource */
ioport_resource.end = 0x01ffffff;
/* These resources have been reserved by VIA SuperI/O chip. */
for (i = 0; i < ARRAY_SIZE(cobalt_reserved_resources); i++)
request_resource(&ioport_resource, cobalt_reserved_resources + i);
}
/*
* Prom init. We read our one and only communication with the firmware.
* Grab the amount of installed memory.
* Better boot loaders (CoLo) pass a command line too :-)
*/
void __init prom_init(void)
{
unsigned long memsz;
int argc, i;
char **argv;
memsz = fw_arg0 & 0x7fff0000;
argc = fw_arg0 & 0x0000ffff;
argv = (char **)fw_arg1;
for (i = 1; i < argc; i++) {
strlcat(arcs_cmdline, argv[i], COMMAND_LINE_SIZE);
if (i < (argc - 1))
strlcat(arcs_cmdline, " ", COMMAND_LINE_SIZE);
}
memblock_add(0, memsz);
setup_8250_early_printk_port(CKSEG1ADDR(0x1c800000), 0, 0);
}
| linux-master | arch/mips/cobalt/setup.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Cobalt time initialization.
*
* Copyright (C) 2007 Yoichi Yuasa <[email protected]>
*/
#include <linux/i8253.h>
#include <linux/init.h>
#include <asm/gt64120.h>
#include <asm/time.h>
#define GT641XX_BASE_CLOCK 50000000 /* 50MHz */
void __init plat_time_init(void)
{
u32 start, end;
int i = HZ / 10;
setup_pit_timer();
gt641xx_set_base_clock(GT641XX_BASE_CLOCK);
/*
* MIPS counter frequency is measured during a 100msec interval
* using GT64111 timer0.
*/
while (!gt641xx_timer0_state())
;
start = read_c0_count();
while (i--)
while (!gt641xx_timer0_state())
;
end = read_c0_count();
mips_hpt_frequency = (end - start) * 10;
printk(KERN_INFO "MIPS counter frequency %dHz\n", mips_hpt_frequency);
}
| linux-master | arch/mips/cobalt/time.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Registration of Cobalt MTD device.
*
* Copyright (C) 2006 Yoichi Yuasa <[email protected]>
*/
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/mtd/partitions.h>
#include <linux/mtd/physmap.h>
static struct mtd_partition cobalt_mtd_partitions[] = {
{
.name = "firmware",
.offset = 0x0,
.size = 0x80000,
},
};
static struct physmap_flash_data cobalt_flash_data = {
.width = 1,
.nr_parts = 1,
.parts = cobalt_mtd_partitions,
};
static struct resource cobalt_mtd_resource = {
.start = 0x1fc00000,
.end = 0x1fc7ffff,
.flags = IORESOURCE_MEM,
};
static struct platform_device cobalt_mtd = {
.name = "physmap-flash",
.dev = {
.platform_data = &cobalt_flash_data,
},
.num_resources = 1,
.resource = &cobalt_mtd_resource,
};
static int __init cobalt_mtd_init(void)
{
platform_device_register(&cobalt_mtd);
return 0;
}
device_initcall(cobalt_mtd_init);
| linux-master | arch/mips/cobalt/mtd.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Registration of Cobalt LCD platform device.
*
* Copyright (C) 2008 Yoichi Yuasa <[email protected]>
*/
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/ioport.h>
#include <linux/platform_device.h>
static struct resource cobalt_lcd_resource __initdata = {
.start = 0x1f000000,
.end = 0x1f00001f,
.flags = IORESOURCE_MEM,
};
static __init int cobalt_lcd_add(void)
{
struct platform_device *pdev;
int retval;
pdev = platform_device_alloc("cobalt-lcd", -1);
if (!pdev)
return -ENOMEM;
retval = platform_device_add_resources(pdev, &cobalt_lcd_resource, 1);
if (retval)
goto err_free_device;
retval = platform_device_add(pdev);
if (retval)
goto err_free_device;
return 0;
err_free_device:
platform_device_put(pdev);
return retval;
}
device_initcall(cobalt_lcd_add);
| linux-master | arch/mips/cobalt/lcd.c |
// SPDX-License-Identifier: GPL-2.0
/*
* N64 IRQ
*
* Copyright (C) 2021 Lauri Kasanen
*/
#include <linux/export.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <asm/irq_cpu.h>
void __init arch_init_irq(void)
{
mips_cpu_irq_init();
}
| linux-master | arch/mips/n64/irq.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Nintendo 64 init.
*
* Copyright (C) 2021 Lauri Kasanen
*/
#include <linux/init.h>
#include <linux/ioport.h>
#include <linux/irq.h>
#include <linux/memblock.h>
#include <linux/platform_device.h>
#include <linux/platform_data/simplefb.h>
#include <linux/string.h>
#include <asm/bootinfo.h>
#include <asm/fw/fw.h>
#include <asm/time.h>
#define IO_MEM_RESOURCE_START 0UL
#define IO_MEM_RESOURCE_END 0x1fffffffUL
/*
* System-specifc irq names for clarity
*/
#define MIPS_CPU_IRQ(x) (MIPS_CPU_IRQ_BASE + (x))
#define MIPS_SOFTINT0_IRQ MIPS_CPU_IRQ(0)
#define MIPS_SOFTINT1_IRQ MIPS_CPU_IRQ(1)
#define RCP_IRQ MIPS_CPU_IRQ(2)
#define CART_IRQ MIPS_CPU_IRQ(3)
#define PRENMI_IRQ MIPS_CPU_IRQ(4)
#define RDBR_IRQ MIPS_CPU_IRQ(5)
#define RDBW_IRQ MIPS_CPU_IRQ(6)
#define TIMER_IRQ MIPS_CPU_IRQ(7)
static void __init iomem_resource_init(void)
{
iomem_resource.start = IO_MEM_RESOURCE_START;
iomem_resource.end = IO_MEM_RESOURCE_END;
}
const char *get_system_type(void)
{
return "Nintendo 64";
}
void __init prom_init(void)
{
fw_init_cmdline();
}
#define W 320
#define H 240
#define REG_BASE ((u32 *) CKSEG1ADDR(0x4400000))
static void __init n64rdp_write_reg(const u8 reg, const u32 value)
{
__raw_writel(value, REG_BASE + reg);
}
#undef REG_BASE
static const u32 ntsc_320[] __initconst = {
0x00013212, 0x00000000, 0x00000140, 0x00000200,
0x00000000, 0x03e52239, 0x0000020d, 0x00000c15,
0x0c150c15, 0x006c02ec, 0x002501ff, 0x000e0204,
0x00000200, 0x00000400
};
#define MI_REG_BASE 0x4300000
#define NUM_MI_REGS 4
#define AI_REG_BASE 0x4500000
#define NUM_AI_REGS 6
#define PI_REG_BASE 0x4600000
#define NUM_PI_REGS 5
#define SI_REG_BASE 0x4800000
#define NUM_SI_REGS 7
static int __init n64_platform_init(void)
{
static const char simplefb_resname[] = "FB";
static const struct simplefb_platform_data mode = {
.width = W,
.height = H,
.stride = W * 2,
.format = "r5g5b5a1"
};
struct resource res[3];
void *orig;
unsigned long phys;
unsigned i;
memset(res, 0, sizeof(struct resource) * 3);
res[0].flags = IORESOURCE_MEM;
res[0].start = MI_REG_BASE;
res[0].end = MI_REG_BASE + NUM_MI_REGS * 4 - 1;
res[1].flags = IORESOURCE_MEM;
res[1].start = AI_REG_BASE;
res[1].end = AI_REG_BASE + NUM_AI_REGS * 4 - 1;
res[2].flags = IORESOURCE_IRQ;
res[2].start = RCP_IRQ;
res[2].end = RCP_IRQ;
platform_device_register_simple("n64audio", -1, res, 3);
memset(&res[0], 0, sizeof(res[0]));
res[0].flags = IORESOURCE_MEM;
res[0].start = PI_REG_BASE;
res[0].end = PI_REG_BASE + NUM_PI_REGS * 4 - 1;
platform_device_register_simple("n64cart", -1, res, 1);
memset(&res[0], 0, sizeof(res[0]));
res[0].flags = IORESOURCE_MEM;
res[0].start = SI_REG_BASE;
res[0].end = SI_REG_BASE + NUM_SI_REGS * 4 - 1;
platform_device_register_simple("n64joy", -1, res, 1);
/* The framebuffer needs 64-byte alignment */
orig = kzalloc(W * H * 2 + 63, GFP_DMA | GFP_KERNEL);
if (!orig)
return -ENOMEM;
phys = virt_to_phys(orig);
phys += 63;
phys &= ~63;
for (i = 0; i < ARRAY_SIZE(ntsc_320); i++) {
if (i == 1)
n64rdp_write_reg(i, phys);
else
n64rdp_write_reg(i, ntsc_320[i]);
}
/* setup IORESOURCE_MEM as framebuffer memory */
memset(&res[0], 0, sizeof(res[0]));
res[0].flags = IORESOURCE_MEM;
res[0].name = simplefb_resname;
res[0].start = phys;
res[0].end = phys + W * H * 2 - 1;
platform_device_register_resndata(NULL, "simple-framebuffer", 0,
&res[0], 1, &mode, sizeof(mode));
return 0;
}
#undef W
#undef H
arch_initcall(n64_platform_init);
void __init plat_mem_setup(void)
{
iomem_resource_init();
memblock_add(0x0, 8 * 1024 * 1024); /* Bootloader blocks the 4mb config */
}
void __init plat_time_init(void)
{
/* 93.75 MHz cpu, count register runs at half rate */
mips_hpt_frequency = 93750000 / 2;
}
| linux-master | arch/mips/n64/init.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 1999, 2000, 05, 06 Ralf Baechle ([email protected])
* Copyright (C) 1999, 2000 Silicon Graphics, Inc.
*/
#include <linux/bcd.h>
#include <linux/clockchips.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/sched_clock.h>
#include <linux/interrupt.h>
#include <linux/kernel_stat.h>
#include <linux/param.h>
#include <linux/smp.h>
#include <linux/time.h>
#include <linux/timex.h>
#include <linux/mm.h>
#include <linux/platform_device.h>
#include <asm/time.h>
#include <asm/sgialib.h>
#include <asm/sn/klconfig.h>
#include <asm/sn/arch.h>
#include <asm/sn/addrs.h>
#include <asm/sn/agent.h>
#include "ip27-common.h"
static int rt_next_event(unsigned long delta, struct clock_event_device *evt)
{
unsigned int cpu = smp_processor_id();
int slice = cputoslice(cpu);
unsigned long cnt;
cnt = LOCAL_HUB_L(PI_RT_COUNT);
cnt += delta;
LOCAL_HUB_S(PI_RT_COMPARE_A + PI_COUNT_OFFSET * slice, cnt);
return LOCAL_HUB_L(PI_RT_COUNT) >= cnt ? -ETIME : 0;
}
static DEFINE_PER_CPU(struct clock_event_device, hub_rt_clockevent);
static DEFINE_PER_CPU(char [11], hub_rt_name);
static irqreturn_t hub_rt_counter_handler(int irq, void *dev_id)
{
unsigned int cpu = smp_processor_id();
struct clock_event_device *cd = &per_cpu(hub_rt_clockevent, cpu);
int slice = cputoslice(cpu);
/*
* Ack
*/
LOCAL_HUB_S(PI_RT_PEND_A + PI_COUNT_OFFSET * slice, 0);
cd->event_handler(cd);
return IRQ_HANDLED;
}
struct irqaction hub_rt_irqaction = {
.handler = hub_rt_counter_handler,
.percpu_dev_id = &hub_rt_clockevent,
.flags = IRQF_PERCPU | IRQF_TIMER,
.name = "hub-rt",
};
/*
* This is a hack; we really need to figure these values out dynamically
*
* Since 800 ns works very well with various HUB frequencies, such as
* 360, 380, 390 and 400 MHZ, we use 800 ns rtc cycle time.
*
* Ralf: which clock rate is used to feed the counter?
*/
#define NSEC_PER_CYCLE 800
#define CYCLES_PER_SEC (NSEC_PER_SEC / NSEC_PER_CYCLE)
void hub_rt_clock_event_init(void)
{
unsigned int cpu = smp_processor_id();
struct clock_event_device *cd = &per_cpu(hub_rt_clockevent, cpu);
unsigned char *name = per_cpu(hub_rt_name, cpu);
sprintf(name, "hub-rt %d", cpu);
cd->name = name;
cd->features = CLOCK_EVT_FEAT_ONESHOT;
clockevent_set_clock(cd, CYCLES_PER_SEC);
cd->max_delta_ns = clockevent_delta2ns(0xfffffffffffff, cd);
cd->max_delta_ticks = 0xfffffffffffff;
cd->min_delta_ns = clockevent_delta2ns(0x300, cd);
cd->min_delta_ticks = 0x300;
cd->rating = 200;
cd->irq = IP27_RT_TIMER_IRQ;
cd->cpumask = cpumask_of(cpu);
cd->set_next_event = rt_next_event;
clockevents_register_device(cd);
enable_percpu_irq(IP27_RT_TIMER_IRQ, IRQ_TYPE_NONE);
}
static void __init hub_rt_clock_event_global_init(void)
{
irq_set_handler(IP27_RT_TIMER_IRQ, handle_percpu_devid_irq);
irq_set_percpu_devid(IP27_RT_TIMER_IRQ);
setup_percpu_irq(IP27_RT_TIMER_IRQ, &hub_rt_irqaction);
}
static u64 hub_rt_read(struct clocksource *cs)
{
return REMOTE_HUB_L(cputonasid(0), PI_RT_COUNT);
}
struct clocksource hub_rt_clocksource = {
.name = "HUB-RT",
.rating = 200,
.read = hub_rt_read,
.mask = CLOCKSOURCE_MASK(52),
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
static u64 notrace hub_rt_read_sched_clock(void)
{
return REMOTE_HUB_L(cputonasid(0), PI_RT_COUNT);
}
static void __init hub_rt_clocksource_init(void)
{
struct clocksource *cs = &hub_rt_clocksource;
clocksource_register_hz(cs, CYCLES_PER_SEC);
sched_clock_register(hub_rt_read_sched_clock, 52, CYCLES_PER_SEC);
}
void __init plat_time_init(void)
{
hub_rt_clocksource_init();
hub_rt_clock_event_global_init();
hub_rt_clock_event_init();
}
void hub_rtc_init(nasid_t nasid)
{
/*
* We only need to initialize the current node.
* If this is not the current node then it is a cpuless
* node and timeouts will not happen there.
*/
if (get_nasid() == nasid) {
LOCAL_HUB_S(PI_RT_EN_A, 1);
LOCAL_HUB_S(PI_RT_EN_B, 1);
LOCAL_HUB_S(PI_PROF_EN_A, 0);
LOCAL_HUB_S(PI_PROF_EN_B, 0);
LOCAL_HUB_S(PI_RT_COUNT, 0);
LOCAL_HUB_S(PI_RT_PEND_A, 0);
LOCAL_HUB_S(PI_RT_PEND_B, 0);
}
}
| linux-master | arch/mips/sgi-ip27/ip27-timer.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Ported from IRIX to Linux by Kanoj Sarcar, 06/08/00.
* Copyright 2000 - 2001 Silicon Graphics, Inc.
* Copyright 2000 - 2001 Kanoj Sarcar ([email protected])
*/
#include <linux/init.h>
#include <linux/mm.h>
#include <linux/mmzone.h>
#include <linux/kernel.h>
#include <linux/nodemask.h>
#include <linux/string.h>
#include <asm/page.h>
#include <asm/sections.h>
#include <asm/sn/types.h>
#include <asm/sn/arch.h>
#include <asm/sn/gda.h>
#include <asm/sn/mapped_kernel.h>
#include "ip27-common.h"
static nodemask_t ktext_repmask;
/*
* XXX - This needs to be much smarter about where it puts copies of the
* kernel. For example, we should never put a copy on a headless node,
* and we should respect the topology of the machine.
*/
void __init setup_replication_mask(void)
{
/* Set only the master cnode's bit. The master cnode is always 0. */
nodes_clear(ktext_repmask);
node_set(0, ktext_repmask);
#ifdef CONFIG_REPLICATE_KTEXT
#ifndef CONFIG_MAPPED_KERNEL
#error Kernel replication works with mapped kernel support. No calias support.
#endif
{
nasid_t nasid;
for_each_online_node(nasid) {
if (nasid == 0)
continue;
/* Advertise that we have a copy of the kernel */
node_set(nasid, ktext_repmask);
}
}
#endif
/* Set up a GDA pointer to the replication mask. */
GDA->g_ktext_repmask = &ktext_repmask;
}
static __init void set_ktext_source(nasid_t client_nasid, nasid_t server_nasid)
{
kern_vars_t *kvp;
kvp = &hub_data(client_nasid)->kern_vars;
KERN_VARS_ADDR(client_nasid) = (unsigned long)kvp;
kvp->kv_magic = KV_MAGIC;
kvp->kv_ro_nasid = server_nasid;
kvp->kv_rw_nasid = master_nasid;
kvp->kv_ro_baseaddr = NODE_CAC_BASE(server_nasid);
kvp->kv_rw_baseaddr = NODE_CAC_BASE(master_nasid);
printk("REPLICATION: ON nasid %d, ktext from nasid %d, kdata from nasid %d\n", client_nasid, server_nasid, master_nasid);
}
/* XXX - When the BTE works, we should use it instead of this. */
static __init void copy_kernel(nasid_t dest_nasid)
{
unsigned long dest_kern_start, source_start, source_end, kern_size;
source_start = (unsigned long) _stext;
source_end = (unsigned long) _etext;
kern_size = source_end - source_start;
dest_kern_start = CHANGE_ADDR_NASID(MAPPED_KERN_RO_TO_K0(source_start),
dest_nasid);
memcpy((void *)dest_kern_start, (void *)source_start, kern_size);
}
void __init replicate_kernel_text(void)
{
nasid_t client_nasid;
nasid_t server_nasid;
server_nasid = master_nasid;
/* Record where the master node should get its kernel text */
set_ktext_source(master_nasid, master_nasid);
for_each_online_node(client_nasid) {
if (client_nasid == 0)
continue;
/* Check if this node should get a copy of the kernel */
if (node_isset(client_nasid, ktext_repmask)) {
server_nasid = client_nasid;
copy_kernel(server_nasid);
}
/* Record where this node should get its kernel text */
set_ktext_source(client_nasid, server_nasid);
}
}
/*
* Return pfn of first free page of memory on a node. PROM may allocate
* data structures on the first couple of pages of the first slot of each
* node. If this is the case, getfirstfree(node) > getslotstart(node, 0).
*/
unsigned long node_getfirstfree(nasid_t nasid)
{
unsigned long loadbase = REP_BASE;
unsigned long offset;
#ifdef CONFIG_MAPPED_KERNEL
loadbase += 16777216;
#endif
offset = PAGE_ALIGN((unsigned long)(&_end)) - loadbase;
if ((nasid == 0) || (node_isset(nasid, ktext_repmask)))
return TO_NODE(nasid, offset) >> PAGE_SHIFT;
else
return KDM_TO_PHYS(PAGE_ALIGN(SYMMON_STK_ADDR(nasid, 0))) >> PAGE_SHIFT;
}
| linux-master | arch/mips/sgi-ip27/ip27-klnuma.c |
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Reset an IP27.
*
* Copyright (C) 1997, 1998, 1999, 2000, 06 by Ralf Baechle
* Copyright (C) 1999, 2000 Silicon Graphics, Inc.
*/
#include <linux/compiler.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/timer.h>
#include <linux/smp.h>
#include <linux/mmzone.h>
#include <linux/nodemask.h>
#include <linux/pm.h>
#include <asm/io.h>
#include <asm/irq.h>
#include <asm/reboot.h>
#include <asm/sgialib.h>
#include <asm/sn/addrs.h>
#include <asm/sn/agent.h>
#include <asm/sn/arch.h>
#include <asm/sn/gda.h>
#include "ip27-common.h"
void machine_restart(char *command) __noreturn;
void machine_halt(void) __noreturn;
void machine_power_off(void) __noreturn;
#define noreturn while(1); /* Silence gcc. */
/* XXX How to pass the reboot command to the firmware??? */
static void ip27_machine_restart(char *command)
{
#if 0
int i;
#endif
printk("Reboot started from CPU %d\n", smp_processor_id());
#ifdef CONFIG_SMP
smp_send_stop();
#endif
#if 0
for_each_online_node(i)
REMOTE_HUB_S(i, PROMOP_REG, PROMOP_REBOOT);
#else
LOCAL_HUB_S(NI_PORT_RESET, NPR_PORTRESET | NPR_LOCALRESET);
#endif
noreturn;
}
static void ip27_machine_halt(void)
{
int i;
#ifdef CONFIG_SMP
smp_send_stop();
#endif
for_each_online_node(i)
REMOTE_HUB_S(i, PROMOP_REG, PROMOP_RESTART);
LOCAL_HUB_S(NI_PORT_RESET, NPR_PORTRESET | NPR_LOCALRESET);
noreturn;
}
static void ip27_machine_power_off(void)
{
/* To do ... */
noreturn;
}
void ip27_reboot_setup(void)
{
_machine_restart = ip27_machine_restart;
_machine_halt = ip27_machine_halt;
pm_power_off = ip27_machine_power_off;
}
| linux-master | arch/mips/sgi-ip27/ip27-reset.c |
/*
* This file is subject to the terms and conditions of the GNU General
* Public License. See the file "COPYING" in the main directory of this
* archive for more details.
*
* Copyright (C) 2000 - 2001 by Kanoj Sarcar ([email protected])
* Copyright (C) 2000 - 2001 by Silicon Graphics, Inc.
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/smp.h>
#include <linux/mm.h>
#include <linux/export.h>
#include <linux/cpumask.h>
#include <asm/bootinfo.h>
#include <asm/cpu.h>
#include <asm/io.h>
#include <asm/sgialib.h>
#include <asm/time.h>
#include <asm/sn/agent.h>
#include <asm/sn/types.h>
#include <asm/sn/klconfig.h>
#include <asm/sn/ioc3.h>
#include <asm/mipsregs.h>
#include <asm/sn/gda.h>
#include <asm/sn/intr.h>
#include <asm/current.h>
#include <asm/processor.h>
#include <asm/mmu_context.h>
#include <asm/thread_info.h>
#include <asm/sn/launch.h>
#include <asm/sn/mapped_kernel.h>
#include "ip27-common.h"
#define CPU_NONE (cpuid_t)-1
static DECLARE_BITMAP(hub_init_mask, MAX_NUMNODES);
nasid_t master_nasid = INVALID_NASID;
struct cpuinfo_ip27 sn_cpu_info[NR_CPUS];
EXPORT_SYMBOL_GPL(sn_cpu_info);
static void per_hub_init(nasid_t nasid)
{
struct hub_data *hub = hub_data(nasid);
cpumask_set_cpu(smp_processor_id(), &hub->h_cpus);
if (test_and_set_bit(nasid, hub_init_mask))
return;
/*
* Set CRB timeout at 5ms, (< PI timeout of 10ms)
*/
REMOTE_HUB_S(nasid, IIO_ICTP, 0x800);
REMOTE_HUB_S(nasid, IIO_ICTO, 0xff);
hub_rtc_init(nasid);
if (nasid) {
/* copy exception handlers from first node to current node */
memcpy((void *)NODE_OFFSET_TO_K0(nasid, 0),
(void *)CKSEG0, 0x200);
__flush_cache_all();
/* switch to node local exception handlers */
REMOTE_HUB_S(nasid, PI_CALIAS_SIZE, PI_CALIAS_SIZE_8K);
}
}
void per_cpu_init(void)
{
int cpu = smp_processor_id();
nasid_t nasid = get_nasid();
clear_c0_status(ST0_IM);
per_hub_init(nasid);
pr_info("CPU %d clock is %dMHz.\n", cpu, sn_cpu_info[cpu].p_speed);
install_ipi();
/* Install our NMI handler if symmon hasn't installed one. */
install_cpu_nmi_handler(cputoslice(cpu));
enable_percpu_irq(IP27_HUB_PEND0_IRQ, IRQ_TYPE_NONE);
enable_percpu_irq(IP27_HUB_PEND1_IRQ, IRQ_TYPE_NONE);
}
void __init plat_mem_setup(void)
{
u64 p, e, n_mode;
nasid_t nid;
register_smp_ops(&ip27_smp_ops);
ip27_reboot_setup();
/*
* hub_rtc init and cpu clock intr enabled for later calibrate_delay.
*/
nid = get_nasid();
printk("IP27: Running on node %d.\n", nid);
p = LOCAL_HUB_L(PI_CPU_PRESENT_A) & 1;
e = LOCAL_HUB_L(PI_CPU_ENABLE_A) & 1;
printk("Node %d has %s primary CPU%s.\n", nid,
p ? "a" : "no",
e ? ", CPU is running" : "");
p = LOCAL_HUB_L(PI_CPU_PRESENT_B) & 1;
e = LOCAL_HUB_L(PI_CPU_ENABLE_B) & 1;
printk("Node %d has %s secondary CPU%s.\n", nid,
p ? "a" : "no",
e ? ", CPU is running" : "");
/*
* Try to catch kernel missconfigurations and give user an
* indication what option to select.
*/
n_mode = LOCAL_HUB_L(NI_STATUS_REV_ID) & NSRI_MORENODES_MASK;
printk("Machine is in %c mode.\n", n_mode ? 'N' : 'M');
#ifdef CONFIG_SGI_SN_N_MODE
if (!n_mode)
panic("Kernel compiled for M mode.");
#else
if (n_mode)
panic("Kernel compiled for N mode.");
#endif
ioport_resource.start = 0;
ioport_resource.end = ~0UL;
set_io_port_base(IO_BASE);
}
const char *get_system_type(void)
{
return "SGI Origin";
}
void __init prom_init(void)
{
prom_init_cmdline(fw_arg0, (LONG *)fw_arg1);
prom_meminit();
}
| linux-master | arch/mips/sgi-ip27/ip27-init.c |
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2000, 05 by Ralf Baechle ([email protected])
* Copyright (C) 2000 by Silicon Graphics, Inc.
* Copyright (C) 2004 by Christoph Hellwig
*
* On SGI IP27 the ARC memory configuration data is completely bogus but
* alternate easier to use mechanisms are available.
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/memblock.h>
#include <linux/mm.h>
#include <linux/mmzone.h>
#include <linux/export.h>
#include <linux/nodemask.h>
#include <linux/swap.h>
#include <linux/pfn.h>
#include <linux/highmem.h>
#include <asm/page.h>
#include <asm/pgalloc.h>
#include <asm/sections.h>
#include <asm/sn/arch.h>
#include <asm/sn/agent.h>
#include <asm/sn/klconfig.h>
#include "ip27-common.h"
#define SLOT_PFNSHIFT (SLOT_SHIFT - PAGE_SHIFT)
#define PFN_NASIDSHFT (NASID_SHFT - PAGE_SHIFT)
struct node_data *__node_data[MAX_NUMNODES];
EXPORT_SYMBOL(__node_data);
static u64 gen_region_mask(void)
{
int region_shift;
u64 region_mask;
nasid_t nasid;
region_shift = get_region_shift();
region_mask = 0;
for_each_online_node(nasid)
region_mask |= BIT_ULL(nasid >> region_shift);
return region_mask;
}
#define rou_rflag rou_flags
static int router_distance;
static void router_recurse(klrou_t *router_a, klrou_t *router_b, int depth)
{
klrou_t *router;
lboard_t *brd;
int port;
if (router_a->rou_rflag == 1)
return;
if (depth >= router_distance)
return;
router_a->rou_rflag = 1;
for (port = 1; port <= MAX_ROUTER_PORTS; port++) {
if (router_a->rou_port[port].port_nasid == INVALID_NASID)
continue;
brd = (lboard_t *)NODE_OFFSET_TO_K0(
router_a->rou_port[port].port_nasid,
router_a->rou_port[port].port_offset);
if (brd->brd_type == KLTYPE_ROUTER) {
router = (klrou_t *)NODE_OFFSET_TO_K0(NASID_GET(brd), brd->brd_compts[0]);
if (router == router_b) {
if (depth < router_distance)
router_distance = depth;
}
else
router_recurse(router, router_b, depth + 1);
}
}
router_a->rou_rflag = 0;
}
unsigned char __node_distances[MAX_NUMNODES][MAX_NUMNODES];
EXPORT_SYMBOL(__node_distances);
static int __init compute_node_distance(nasid_t nasid_a, nasid_t nasid_b)
{
klrou_t *router, *router_a = NULL, *router_b = NULL;
lboard_t *brd, *dest_brd;
nasid_t nasid;
int port;
/* Figure out which routers nodes in question are connected to */
for_each_online_node(nasid) {
brd = find_lboard_class((lboard_t *)KL_CONFIG_INFO(nasid),
KLTYPE_ROUTER);
if (!brd)
continue;
do {
if (brd->brd_flags & DUPLICATE_BOARD)
continue;
router = (klrou_t *)NODE_OFFSET_TO_K0(NASID_GET(brd), brd->brd_compts[0]);
router->rou_rflag = 0;
for (port = 1; port <= MAX_ROUTER_PORTS; port++) {
if (router->rou_port[port].port_nasid == INVALID_NASID)
continue;
dest_brd = (lboard_t *)NODE_OFFSET_TO_K0(
router->rou_port[port].port_nasid,
router->rou_port[port].port_offset);
if (dest_brd->brd_type == KLTYPE_IP27) {
if (dest_brd->brd_nasid == nasid_a)
router_a = router;
if (dest_brd->brd_nasid == nasid_b)
router_b = router;
}
}
} while ((brd = find_lboard_class(KLCF_NEXT(brd), KLTYPE_ROUTER)));
}
if (nasid_a == nasid_b)
return LOCAL_DISTANCE;
if (router_a == router_b)
return LOCAL_DISTANCE + 1;
if (router_a == NULL) {
pr_info("node_distance: router_a NULL\n");
return 255;
}
if (router_b == NULL) {
pr_info("node_distance: router_b NULL\n");
return 255;
}
router_distance = 100;
router_recurse(router_a, router_b, 2);
return LOCAL_DISTANCE + router_distance;
}
static void __init init_topology_matrix(void)
{
nasid_t row, col;
for (row = 0; row < MAX_NUMNODES; row++)
for (col = 0; col < MAX_NUMNODES; col++)
__node_distances[row][col] = -1;
for_each_online_node(row) {
for_each_online_node(col) {
__node_distances[row][col] =
compute_node_distance(row, col);
}
}
}
static void __init dump_topology(void)
{
nasid_t nasid;
lboard_t *brd, *dest_brd;
int port;
int router_num = 0;
klrou_t *router;
nasid_t row, col;
pr_info("************** Topology ********************\n");
pr_info(" ");
for_each_online_node(col)
pr_cont("%02d ", col);
pr_cont("\n");
for_each_online_node(row) {
pr_info("%02d ", row);
for_each_online_node(col)
pr_cont("%2d ", node_distance(row, col));
pr_cont("\n");
}
for_each_online_node(nasid) {
brd = find_lboard_class((lboard_t *)KL_CONFIG_INFO(nasid),
KLTYPE_ROUTER);
if (!brd)
continue;
do {
if (brd->brd_flags & DUPLICATE_BOARD)
continue;
pr_cont("Router %d:", router_num);
router_num++;
router = (klrou_t *)NODE_OFFSET_TO_K0(NASID_GET(brd), brd->brd_compts[0]);
for (port = 1; port <= MAX_ROUTER_PORTS; port++) {
if (router->rou_port[port].port_nasid == INVALID_NASID)
continue;
dest_brd = (lboard_t *)NODE_OFFSET_TO_K0(
router->rou_port[port].port_nasid,
router->rou_port[port].port_offset);
if (dest_brd->brd_type == KLTYPE_IP27)
pr_cont(" %d", dest_brd->brd_nasid);
if (dest_brd->brd_type == KLTYPE_ROUTER)
pr_cont(" r");
}
pr_cont("\n");
} while ( (brd = find_lboard_class(KLCF_NEXT(brd), KLTYPE_ROUTER)) );
}
}
static unsigned long __init slot_getbasepfn(nasid_t nasid, int slot)
{
return ((unsigned long)nasid << PFN_NASIDSHFT) | (slot << SLOT_PFNSHIFT);
}
static unsigned long __init slot_psize_compute(nasid_t nasid, int slot)
{
lboard_t *brd;
klmembnk_t *banks;
unsigned long size;
/* Find the node board */
brd = find_lboard((lboard_t *)KL_CONFIG_INFO(nasid), KLTYPE_IP27);
if (!brd)
return 0;
/* Get the memory bank structure */
banks = (klmembnk_t *) find_first_component(brd, KLSTRUCT_MEMBNK);
if (!banks)
return 0;
/* Size in _Megabytes_ */
size = (unsigned long)banks->membnk_bnksz[slot/4];
/* hack for 128 dimm banks */
if (size <= 128) {
if (slot % 4 == 0) {
size <<= 20; /* size in bytes */
return size >> PAGE_SHIFT;
} else
return 0;
} else {
size /= 4;
size <<= 20;
return size >> PAGE_SHIFT;
}
}
static void __init mlreset(void)
{
u64 region_mask;
nasid_t nasid;
master_nasid = get_nasid();
/*
* Probe for all CPUs - this creates the cpumask and sets up the
* mapping tables. We need to do this as early as possible.
*/
#ifdef CONFIG_SMP
cpu_node_probe();
#endif
init_topology_matrix();
dump_topology();
region_mask = gen_region_mask();
setup_replication_mask();
/*
* Set all nodes' calias sizes to 8k
*/
for_each_online_node(nasid) {
/*
* Always have node 0 in the region mask, otherwise
* CALIAS accesses get exceptions since the hub
* thinks it is a node 0 address.
*/
REMOTE_HUB_S(nasid, PI_REGION_PRESENT, (region_mask | 1));
REMOTE_HUB_S(nasid, PI_CALIAS_SIZE, PI_CALIAS_SIZE_0);
#ifdef LATER
/*
* Set up all hubs to have a big window pointing at
* widget 0. Memory mode, widget 0, offset 0
*/
REMOTE_HUB_S(nasid, IIO_ITTE(SWIN0_BIGWIN),
((HUB_PIO_MAP_TO_MEM << IIO_ITTE_IOSP_SHIFT) |
(0 << IIO_ITTE_WIDGET_SHIFT)));
#endif
}
}
static void __init szmem(void)
{
unsigned long slot_psize, slot0sz = 0, nodebytes; /* Hack to detect problem configs */
int slot;
nasid_t node;
for_each_online_node(node) {
nodebytes = 0;
for (slot = 0; slot < MAX_MEM_SLOTS; slot++) {
slot_psize = slot_psize_compute(node, slot);
if (slot == 0)
slot0sz = slot_psize;
/*
* We need to refine the hack when we have replicated
* kernel text.
*/
nodebytes += (1LL << SLOT_SHIFT);
if (!slot_psize)
continue;
if ((nodebytes >> PAGE_SHIFT) * (sizeof(struct page)) >
(slot0sz << PAGE_SHIFT)) {
pr_info("Ignoring slot %d onwards on node %d\n",
slot, node);
slot = MAX_MEM_SLOTS;
continue;
}
memblock_add_node(PFN_PHYS(slot_getbasepfn(node, slot)),
PFN_PHYS(slot_psize), node,
MEMBLOCK_NONE);
}
}
}
static void __init node_mem_init(nasid_t node)
{
unsigned long slot_firstpfn = slot_getbasepfn(node, 0);
unsigned long slot_freepfn = node_getfirstfree(node);
unsigned long start_pfn, end_pfn;
get_pfn_range_for_nid(node, &start_pfn, &end_pfn);
/*
* Allocate the node data structures on the node first.
*/
__node_data[node] = __va(slot_freepfn << PAGE_SHIFT);
memset(__node_data[node], 0, PAGE_SIZE);
NODE_DATA(node)->node_start_pfn = start_pfn;
NODE_DATA(node)->node_spanned_pages = end_pfn - start_pfn;
cpumask_clear(&hub_data(node)->h_cpus);
slot_freepfn += PFN_UP(sizeof(struct pglist_data) +
sizeof(struct hub_data));
memblock_reserve(slot_firstpfn << PAGE_SHIFT,
((slot_freepfn - slot_firstpfn) << PAGE_SHIFT));
}
/*
* A node with nothing. We use it to avoid any special casing in
* cpumask_of_node
*/
static struct node_data null_node = {
.hub = {
.h_cpus = CPU_MASK_NONE
}
};
/*
* Currently, the intranode memory hole support assumes that each slot
* contains at least 32 MBytes of memory. We assume all bootmem data
* fits on the first slot.
*/
void __init prom_meminit(void)
{
nasid_t node;
mlreset();
szmem();
max_low_pfn = PHYS_PFN(memblock_end_of_DRAM());
for (node = 0; node < MAX_NUMNODES; node++) {
if (node_online(node)) {
node_mem_init(node);
continue;
}
__node_data[node] = &null_node;
}
}
extern void setup_zero_pages(void);
void __init paging_init(void)
{
unsigned long zones_size[MAX_NR_ZONES] = {0, };
pagetable_init();
zones_size[ZONE_NORMAL] = max_low_pfn;
free_area_init(zones_size);
}
void __init mem_init(void)
{
high_memory = (void *) __va(get_num_physpages() << PAGE_SHIFT);
memblock_free_all();
setup_zero_pages(); /* This comes from node 0 */
}
pg_data_t * __init arch_alloc_nodedata(int nid)
{
return memblock_alloc(sizeof(pg_data_t), SMP_CACHE_BYTES);
}
void arch_refresh_nodedata(int nid, pg_data_t *pgdat)
{
__node_data[nid] = (struct node_data *)pgdat;
}
| linux-master | arch/mips/sgi-ip27/ip27-memory.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 1999, 2000 Ralf Baechle ([email protected])
* Copyright (C) 1999, 2000 Silicon Graphics, Inc.
*/
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/interrupt.h>
#include <linux/kernel_stat.h>
#include <linux/param.h>
#include <linux/timex.h>
#include <linux/mm.h>
#include <asm/sn/klconfig.h>
#include <asm/sn/arch.h>
#include <asm/sn/gda.h>
klinfo_t *find_component(lboard_t *brd, klinfo_t *kli, unsigned char struct_type)
{
int index, j;
if (kli == (klinfo_t *)NULL) {
index = 0;
} else {
for (j = 0; j < KLCF_NUM_COMPS(brd); j++)
if (kli == KLCF_COMP(brd, j))
break;
index = j;
if (index == KLCF_NUM_COMPS(brd)) {
printk("find_component: Bad pointer: 0x%p\n", kli);
return (klinfo_t *)NULL;
}
index++; /* next component */
}
for (; index < KLCF_NUM_COMPS(brd); index++) {
kli = KLCF_COMP(brd, index);
if (KLCF_COMP_TYPE(kli) == struct_type)
return kli;
}
/* Didn't find it. */
return (klinfo_t *)NULL;
}
klinfo_t *find_first_component(lboard_t *brd, unsigned char struct_type)
{
return find_component(brd, (klinfo_t *)NULL, struct_type);
}
lboard_t *find_lboard(lboard_t *start, unsigned char brd_type)
{
/* Search all boards stored on this node. */
while (start) {
if (start->brd_type == brd_type)
return start;
start = KLCF_NEXT(start);
}
/* Didn't find it. */
return (lboard_t *)NULL;
}
lboard_t *find_lboard_class(lboard_t *start, unsigned char brd_type)
{
/* Search all boards stored on this node. */
while (start) {
if (KLCLASS(start->brd_type) == KLCLASS(brd_type))
return start;
start = KLCF_NEXT(start);
}
/* Didn't find it. */
return (lboard_t *)NULL;
}
| linux-master | arch/mips/sgi-ip27/ip27-klconfig.c |
// SPDX-License-Identifier: GPL-2.0
/*
* ip27-irq.c: Highlevel interrupt handling for IP27 architecture.
*
* Copyright (C) 1999, 2000 Ralf Baechle ([email protected])
* Copyright (C) 1999, 2000 Silicon Graphics, Inc.
* Copyright (C) 1999 - 2001 Kanoj Sarcar
*/
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/irqdomain.h>
#include <linux/ioport.h>
#include <linux/kernel.h>
#include <linux/bitops.h>
#include <linux/sched.h>
#include <asm/io.h>
#include <asm/irq_cpu.h>
#include <asm/sn/addrs.h>
#include <asm/sn/agent.h>
#include <asm/sn/arch.h>
#include <asm/sn/intr.h>
#include <asm/sn/irq_alloc.h>
struct hub_irq_data {
u64 *irq_mask[2];
cpuid_t cpu;
};
static DECLARE_BITMAP(hub_irq_map, IP27_HUB_IRQ_COUNT);
static DEFINE_PER_CPU(unsigned long [2], irq_enable_mask);
static inline int alloc_level(void)
{
int level;
again:
level = find_first_zero_bit(hub_irq_map, IP27_HUB_IRQ_COUNT);
if (level >= IP27_HUB_IRQ_COUNT)
return -ENOSPC;
if (test_and_set_bit(level, hub_irq_map))
goto again;
return level;
}
static void enable_hub_irq(struct irq_data *d)
{
struct hub_irq_data *hd = irq_data_get_irq_chip_data(d);
unsigned long *mask = per_cpu(irq_enable_mask, hd->cpu);
set_bit(d->hwirq, mask);
__raw_writeq(mask[0], hd->irq_mask[0]);
__raw_writeq(mask[1], hd->irq_mask[1]);
}
static void disable_hub_irq(struct irq_data *d)
{
struct hub_irq_data *hd = irq_data_get_irq_chip_data(d);
unsigned long *mask = per_cpu(irq_enable_mask, hd->cpu);
clear_bit(d->hwirq, mask);
__raw_writeq(mask[0], hd->irq_mask[0]);
__raw_writeq(mask[1], hd->irq_mask[1]);
}
static void setup_hub_mask(struct hub_irq_data *hd, const struct cpumask *mask)
{
nasid_t nasid;
int cpu;
cpu = cpumask_first_and(mask, cpu_online_mask);
if (cpu >= nr_cpu_ids)
cpu = cpumask_any(cpu_online_mask);
nasid = cpu_to_node(cpu);
hd->cpu = cpu;
if (!cputoslice(cpu)) {
hd->irq_mask[0] = REMOTE_HUB_PTR(nasid, PI_INT_MASK0_A);
hd->irq_mask[1] = REMOTE_HUB_PTR(nasid, PI_INT_MASK1_A);
} else {
hd->irq_mask[0] = REMOTE_HUB_PTR(nasid, PI_INT_MASK0_B);
hd->irq_mask[1] = REMOTE_HUB_PTR(nasid, PI_INT_MASK1_B);
}
}
static int set_affinity_hub_irq(struct irq_data *d, const struct cpumask *mask,
bool force)
{
struct hub_irq_data *hd = irq_data_get_irq_chip_data(d);
if (!hd)
return -EINVAL;
if (irqd_is_started(d))
disable_hub_irq(d);
setup_hub_mask(hd, mask);
if (irqd_is_started(d))
enable_hub_irq(d);
irq_data_update_effective_affinity(d, cpumask_of(hd->cpu));
return 0;
}
static struct irq_chip hub_irq_type = {
.name = "HUB",
.irq_mask = disable_hub_irq,
.irq_unmask = enable_hub_irq,
.irq_set_affinity = set_affinity_hub_irq,
};
static int hub_domain_alloc(struct irq_domain *domain, unsigned int virq,
unsigned int nr_irqs, void *arg)
{
struct irq_alloc_info *info = arg;
struct hub_irq_data *hd;
struct hub_data *hub;
struct irq_desc *desc;
int swlevel;
if (nr_irqs > 1 || !info)
return -EINVAL;
hd = kzalloc(sizeof(*hd), GFP_KERNEL);
if (!hd)
return -ENOMEM;
swlevel = alloc_level();
if (unlikely(swlevel < 0)) {
kfree(hd);
return -EAGAIN;
}
irq_domain_set_info(domain, virq, swlevel, &hub_irq_type, hd,
handle_level_irq, NULL, NULL);
/* use CPU connected to nearest hub */
hub = hub_data(info->nasid);
setup_hub_mask(hd, &hub->h_cpus);
info->nasid = cpu_to_node(hd->cpu);
/* Make sure it's not already pending when we connect it. */
REMOTE_HUB_CLR_INTR(info->nasid, swlevel);
desc = irq_to_desc(virq);
desc->irq_common_data.node = info->nasid;
cpumask_copy(desc->irq_common_data.affinity, &hub->h_cpus);
return 0;
}
static void hub_domain_free(struct irq_domain *domain,
unsigned int virq, unsigned int nr_irqs)
{
struct irq_data *irqd;
if (nr_irqs > 1)
return;
irqd = irq_domain_get_irq_data(domain, virq);
if (irqd && irqd->chip_data)
kfree(irqd->chip_data);
}
static const struct irq_domain_ops hub_domain_ops = {
.alloc = hub_domain_alloc,
.free = hub_domain_free,
};
/*
* This code is unnecessarily complex, because we do
* intr enabling. Basically, once we grab the set of intrs we need
* to service, we must mask _all_ these interrupts; firstly, to make
* sure the same intr does not intr again, causing recursion that
* can lead to stack overflow. Secondly, we can not just mask the
* one intr we are do_IRQing, because the non-masked intrs in the
* first set might intr again, causing multiple servicings of the
* same intr. This effect is mostly seen for intercpu intrs.
* Kanoj 05.13.00
*/
static void ip27_do_irq_mask0(struct irq_desc *desc)
{
cpuid_t cpu = smp_processor_id();
unsigned long *mask = per_cpu(irq_enable_mask, cpu);
struct irq_domain *domain;
u64 pend0;
int ret;
/* copied from Irix intpend0() */
pend0 = LOCAL_HUB_L(PI_INT_PEND0);
pend0 &= mask[0]; /* Pick intrs we should look at */
if (!pend0)
return;
#ifdef CONFIG_SMP
if (pend0 & (1UL << CPU_RESCHED_A_IRQ)) {
LOCAL_HUB_CLR_INTR(CPU_RESCHED_A_IRQ);
scheduler_ipi();
} else if (pend0 & (1UL << CPU_RESCHED_B_IRQ)) {
LOCAL_HUB_CLR_INTR(CPU_RESCHED_B_IRQ);
scheduler_ipi();
} else if (pend0 & (1UL << CPU_CALL_A_IRQ)) {
LOCAL_HUB_CLR_INTR(CPU_CALL_A_IRQ);
generic_smp_call_function_interrupt();
} else if (pend0 & (1UL << CPU_CALL_B_IRQ)) {
LOCAL_HUB_CLR_INTR(CPU_CALL_B_IRQ);
generic_smp_call_function_interrupt();
} else
#endif
{
domain = irq_desc_get_handler_data(desc);
ret = generic_handle_domain_irq(domain, __ffs(pend0));
if (ret)
spurious_interrupt();
}
LOCAL_HUB_L(PI_INT_PEND0);
}
static void ip27_do_irq_mask1(struct irq_desc *desc)
{
cpuid_t cpu = smp_processor_id();
unsigned long *mask = per_cpu(irq_enable_mask, cpu);
struct irq_domain *domain;
u64 pend1;
int ret;
/* copied from Irix intpend0() */
pend1 = LOCAL_HUB_L(PI_INT_PEND1);
pend1 &= mask[1]; /* Pick intrs we should look at */
if (!pend1)
return;
domain = irq_desc_get_handler_data(desc);
ret = generic_handle_domain_irq(domain, __ffs(pend1) + 64);
if (ret)
spurious_interrupt();
LOCAL_HUB_L(PI_INT_PEND1);
}
void install_ipi(void)
{
int cpu = smp_processor_id();
unsigned long *mask = per_cpu(irq_enable_mask, cpu);
int slice = LOCAL_HUB_L(PI_CPU_NUM);
int resched, call;
resched = CPU_RESCHED_A_IRQ + slice;
set_bit(resched, mask);
LOCAL_HUB_CLR_INTR(resched);
call = CPU_CALL_A_IRQ + slice;
set_bit(call, mask);
LOCAL_HUB_CLR_INTR(call);
if (slice == 0) {
LOCAL_HUB_S(PI_INT_MASK0_A, mask[0]);
LOCAL_HUB_S(PI_INT_MASK1_A, mask[1]);
} else {
LOCAL_HUB_S(PI_INT_MASK0_B, mask[0]);
LOCAL_HUB_S(PI_INT_MASK1_B, mask[1]);
}
}
void __init arch_init_irq(void)
{
struct irq_domain *domain;
struct fwnode_handle *fn;
int i;
mips_cpu_irq_init();
/*
* Some interrupts are reserved by hardware or by software convention.
* Mark these as reserved right away so they won't be used accidentally
* later.
*/
for (i = 0; i <= CPU_CALL_B_IRQ; i++)
set_bit(i, hub_irq_map);
for (i = NI_BRDCAST_ERR_A; i <= MSC_PANIC_INTR; i++)
set_bit(i, hub_irq_map);
fn = irq_domain_alloc_named_fwnode("HUB");
WARN_ON(fn == NULL);
if (!fn)
return;
domain = irq_domain_create_linear(fn, IP27_HUB_IRQ_COUNT,
&hub_domain_ops, NULL);
WARN_ON(domain == NULL);
if (!domain)
return;
irq_set_default_host(domain);
irq_set_percpu_devid(IP27_HUB_PEND0_IRQ);
irq_set_chained_handler_and_data(IP27_HUB_PEND0_IRQ, ip27_do_irq_mask0,
domain);
irq_set_percpu_devid(IP27_HUB_PEND1_IRQ);
irq_set_chained_handler_and_data(IP27_HUB_PEND1_IRQ, ip27_do_irq_mask1,
domain);
}
| linux-master | arch/mips/sgi-ip27/ip27-irq.c |
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2001, 2002 Ralf Baechle
*/
#include <asm/page.h>
#include <asm/setup.h>
#include <asm/sn/addrs.h>
#include <asm/sn/agent.h>
#include <asm/sn/klconfig.h>
#include <asm/sn/ioc3.h>
#include <linux/serial.h>
#include <linux/serial_core.h>
#include "ip27-common.h"
#define IOC3_CLK (22000000 / 3)
#define IOC3_FLAGS (0)
static inline struct ioc3_uartregs *console_uart(void)
{
struct ioc3 *ioc3;
nasid_t nasid;
nasid = (master_nasid == INVALID_NASID) ? get_nasid() : master_nasid;
ioc3 = (struct ioc3 *)KL_CONFIG_CH_CONS_INFO(nasid)->memory_base;
return &ioc3->sregs.uarta;
}
void prom_putchar(char c)
{
struct ioc3_uartregs *uart = console_uart();
while ((readb(&uart->iu_lsr) & 0x20) == 0)
;
writeb(c, &uart->iu_thr);
}
| linux-master | arch/mips/sgi-ip27/ip27-console.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 1992-1997, 2000-2003 Silicon Graphics, Inc.
* Copyright (C) 2004 Christoph Hellwig.
*
* Support functions for the HUB ASIC - mostly PIO mapping related.
*/
#include <linux/bitops.h>
#include <linux/string.h>
#include <linux/mmzone.h>
#include <asm/sn/addrs.h>
#include <asm/sn/arch.h>
#include <asm/sn/agent.h>
#include <asm/sn/io.h>
#include <asm/xtalk/xtalk.h>
static int force_fire_and_forget = 1;
/**
* hub_pio_map - establish a HUB PIO mapping
*
* @hub: hub to perform PIO mapping on
* @widget: widget ID to perform PIO mapping for
* @xtalk_addr: xtalk_address that needs to be mapped
* @size: size of the PIO mapping
*
**/
unsigned long hub_pio_map(nasid_t nasid, xwidgetnum_t widget,
unsigned long xtalk_addr, size_t size)
{
unsigned i;
/* use small-window mapping if possible */
if ((xtalk_addr % SWIN_SIZE) + size <= SWIN_SIZE)
return NODE_SWIN_BASE(nasid, widget) + (xtalk_addr % SWIN_SIZE);
if ((xtalk_addr % BWIN_SIZE) + size > BWIN_SIZE) {
printk(KERN_WARNING "PIO mapping at hub %d widget %d addr 0x%lx"
" too big (%ld)\n",
nasid, widget, xtalk_addr, size);
return 0;
}
xtalk_addr &= ~(BWIN_SIZE-1);
for (i = 0; i < HUB_NUM_BIG_WINDOW; i++) {
if (test_and_set_bit(i, hub_data(nasid)->h_bigwin_used))
continue;
/*
* The code below does a PIO write to setup an ITTE entry.
*
* We need to prevent other CPUs from seeing our updated
* memory shadow of the ITTE (in the piomap) until the ITTE
* entry is actually set up; otherwise, another CPU might
* attempt a PIO prematurely.
*
* Also, the only way we can know that an entry has been
* received by the hub and can be used by future PIO reads/
* writes is by reading back the ITTE entry after writing it.
*
* For these two reasons, we PIO read back the ITTE entry
* after we write it.
*/
IIO_ITTE_PUT(nasid, i, HUB_PIO_MAP_TO_MEM, widget, xtalk_addr);
__raw_readq(IIO_ITTE_GET(nasid, i));
return NODE_BWIN_BASE(nasid, widget) + (xtalk_addr % BWIN_SIZE);
}
printk(KERN_WARNING "unable to establish PIO mapping for at"
" hub %d widget %d addr 0x%lx\n",
nasid, widget, xtalk_addr);
return 0;
}
/*
* hub_setup_prb(nasid, prbnum, credits, conveyor)
*
* Put a PRB into fire-and-forget mode if conveyor isn't set. Otherwise,
* put it into conveyor belt mode with the specified number of credits.
*/
static void hub_setup_prb(nasid_t nasid, int prbnum, int credits)
{
union iprb_u prb;
int prb_offset;
/*
* Get the current register value.
*/
prb_offset = IIO_IOPRB(prbnum);
prb.iprb_regval = REMOTE_HUB_L(nasid, prb_offset);
/*
* Clear out some fields.
*/
prb.iprb_ovflow = 1;
prb.iprb_bnakctr = 0;
prb.iprb_anakctr = 0;
/*
* Enable or disable fire-and-forget mode.
*/
prb.iprb_ff = force_fire_and_forget ? 1 : 0;
/*
* Set the appropriate number of PIO credits for the widget.
*/
prb.iprb_xtalkctr = credits;
/*
* Store the new value to the register.
*/
REMOTE_HUB_S(nasid, prb_offset, prb.iprb_regval);
}
/**
* hub_set_piomode - set pio mode for a given hub
*
* @nasid: physical node ID for the hub in question
*
* Put the hub into either "PIO conveyor belt" mode or "fire-and-forget" mode.
* To do this, we have to make absolutely sure that no PIOs are in progress
* so we turn off access to all widgets for the duration of the function.
*
* XXX - This code should really check what kind of widget we're talking
* to. Bridges can only handle three requests, but XG will do more.
* How many can crossbow handle to widget 0? We're assuming 1.
*
* XXX - There is a bug in the crossbow that link reset PIOs do not
* return write responses. The easiest solution to this problem is to
* leave widget 0 (xbow) in fire-and-forget mode at all times. This
* only affects pio's to xbow registers, which should be rare.
**/
static void hub_set_piomode(nasid_t nasid)
{
u64 ii_iowa;
union hubii_wcr_u ii_wcr;
unsigned i;
ii_iowa = REMOTE_HUB_L(nasid, IIO_OUTWIDGET_ACCESS);
REMOTE_HUB_S(nasid, IIO_OUTWIDGET_ACCESS, 0);
ii_wcr.wcr_reg_value = REMOTE_HUB_L(nasid, IIO_WCR);
if (ii_wcr.iwcr_dir_con) {
/*
* Assume a bridge here.
*/
hub_setup_prb(nasid, 0, 3);
} else {
/*
* Assume a crossbow here.
*/
hub_setup_prb(nasid, 0, 1);
}
/*
* XXX - Here's where we should take the widget type into
* when account assigning credits.
*/
for (i = HUB_WIDGET_ID_MIN; i <= HUB_WIDGET_ID_MAX; i++)
hub_setup_prb(nasid, i, 3);
REMOTE_HUB_S(nasid, IIO_OUTWIDGET_ACCESS, ii_iowa);
}
/*
* hub_pio_init - PIO-related hub initialization
*
* @hub: hubinfo structure for our hub
*/
void hub_pio_init(nasid_t nasid)
{
unsigned i;
/* initialize big window piomaps for this hub */
bitmap_zero(hub_data(nasid)->h_bigwin_used, HUB_NUM_BIG_WINDOW);
for (i = 0; i < HUB_NUM_BIG_WINDOW; i++)
IIO_ITTE_DISABLE(nasid, i);
hub_set_piomode(nasid);
}
| linux-master | arch/mips/sgi-ip27/ip27-hubio.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/kernel.h>
#include <linux/mmzone.h>
#include <linux/nodemask.h>
#include <linux/spinlock.h>
#include <linux/smp.h>
#include <linux/atomic.h>
#include <asm/sn/types.h>
#include <asm/sn/addrs.h>
#include <asm/sn/nmi.h>
#include <asm/sn/arch.h>
#include <asm/sn/agent.h>
#if 0
#define NODE_NUM_CPUS(n) CNODE_NUM_CPUS(n)
#else
#define NODE_NUM_CPUS(n) CPUS_PER_NODE
#endif
#define SEND_NMI(_nasid, _slice) \
REMOTE_HUB_S((_nasid), (PI_NMI_A + ((_slice) * PI_NMI_OFFSET)), 1)
typedef unsigned long machreg_t;
static arch_spinlock_t nmi_lock = __ARCH_SPIN_LOCK_UNLOCKED;
/*
* Let's see what else we need to do here. Set up sp, gp?
*/
void nmi_dump(void)
{
void cont_nmi_dump(void);
cont_nmi_dump();
}
void install_cpu_nmi_handler(int slice)
{
nmi_t *nmi_addr;
nmi_addr = (nmi_t *)NMI_ADDR(get_nasid(), slice);
if (nmi_addr->call_addr)
return;
nmi_addr->magic = NMI_MAGIC;
nmi_addr->call_addr = (void *)nmi_dump;
nmi_addr->call_addr_c =
(void *)(~((unsigned long)(nmi_addr->call_addr)));
nmi_addr->call_parm = 0;
}
/*
* Copy the cpu registers which have been saved in the IP27prom format
* into the eframe format for the node under consideration.
*/
void nmi_cpu_eframe_save(nasid_t nasid, int slice)
{
struct reg_struct *nr;
int i;
/* Get the pointer to the current cpu's register set. */
nr = (struct reg_struct *)
(TO_UNCAC(TO_NODE(nasid, IP27_NMI_KREGS_OFFSET)) +
slice * IP27_NMI_KREGS_CPU_SIZE);
pr_emerg("NMI nasid %d: slice %d\n", nasid, slice);
/*
* Saved main processor registers
*/
for (i = 0; i < 32; ) {
if ((i % 4) == 0)
pr_emerg("$%2d :", i);
pr_cont(" %016lx", nr->gpr[i]);
i++;
if ((i % 4) == 0)
pr_cont("\n");
}
pr_emerg("Hi : (value lost)\n");
pr_emerg("Lo : (value lost)\n");
/*
* Saved cp0 registers
*/
pr_emerg("epc : %016lx %pS\n", nr->epc, (void *)nr->epc);
pr_emerg("%s\n", print_tainted());
pr_emerg("ErrEPC: %016lx %pS\n", nr->error_epc, (void *)nr->error_epc);
pr_emerg("ra : %016lx %pS\n", nr->gpr[31], (void *)nr->gpr[31]);
pr_emerg("Status: %08lx ", nr->sr);
if (nr->sr & ST0_KX)
pr_cont("KX ");
if (nr->sr & ST0_SX)
pr_cont("SX ");
if (nr->sr & ST0_UX)
pr_cont("UX ");
switch (nr->sr & ST0_KSU) {
case KSU_USER:
pr_cont("USER ");
break;
case KSU_SUPERVISOR:
pr_cont("SUPERVISOR ");
break;
case KSU_KERNEL:
pr_cont("KERNEL ");
break;
default:
pr_cont("BAD_MODE ");
break;
}
if (nr->sr & ST0_ERL)
pr_cont("ERL ");
if (nr->sr & ST0_EXL)
pr_cont("EXL ");
if (nr->sr & ST0_IE)
pr_cont("IE ");
pr_cont("\n");
pr_emerg("Cause : %08lx\n", nr->cause);
pr_emerg("PrId : %08x\n", read_c0_prid());
pr_emerg("BadVA : %016lx\n", nr->badva);
pr_emerg("CErr : %016lx\n", nr->cache_err);
pr_emerg("NMI_SR: %016lx\n", nr->nmi_sr);
pr_emerg("\n");
}
void nmi_dump_hub_irq(nasid_t nasid, int slice)
{
u64 mask0, mask1, pend0, pend1;
if (slice == 0) { /* Slice A */
mask0 = REMOTE_HUB_L(nasid, PI_INT_MASK0_A);
mask1 = REMOTE_HUB_L(nasid, PI_INT_MASK1_A);
} else { /* Slice B */
mask0 = REMOTE_HUB_L(nasid, PI_INT_MASK0_B);
mask1 = REMOTE_HUB_L(nasid, PI_INT_MASK1_B);
}
pend0 = REMOTE_HUB_L(nasid, PI_INT_PEND0);
pend1 = REMOTE_HUB_L(nasid, PI_INT_PEND1);
pr_emerg("PI_INT_MASK0: %16llx PI_INT_MASK1: %16llx\n", mask0, mask1);
pr_emerg("PI_INT_PEND0: %16llx PI_INT_PEND1: %16llx\n", pend0, pend1);
pr_emerg("\n\n");
}
/*
* Copy the cpu registers which have been saved in the IP27prom format
* into the eframe format for the node under consideration.
*/
void nmi_node_eframe_save(nasid_t nasid)
{
int slice;
if (nasid == INVALID_NASID)
return;
/* Save the registers into eframe for each cpu */
for (slice = 0; slice < NODE_NUM_CPUS(slice); slice++) {
nmi_cpu_eframe_save(nasid, slice);
nmi_dump_hub_irq(nasid, slice);
}
}
/*
* Save the nmi cpu registers for all cpus in the system.
*/
void
nmi_eframes_save(void)
{
nasid_t nasid;
for_each_online_node(nasid)
nmi_node_eframe_save(nasid);
}
void
cont_nmi_dump(void)
{
#ifndef REAL_NMI_SIGNAL
static atomic_t nmied_cpus = ATOMIC_INIT(0);
atomic_inc(&nmied_cpus);
#endif
/*
* Only allow 1 cpu to proceed
*/
arch_spin_lock(&nmi_lock);
#ifdef REAL_NMI_SIGNAL
/*
* Wait up to 15 seconds for the other cpus to respond to the NMI.
* If a cpu has not responded after 10 sec, send it 1 additional NMI.
* This is for 2 reasons:
* - sometimes a MMSC fail to NMI all cpus.
* - on 512p SN0 system, the MMSC will only send NMIs to
* half the cpus. Unfortunately, we don't know which cpus may be
* NMIed - it depends on how the site chooses to configure.
*
* Note: it has been measure that it takes the MMSC up to 2.3 secs to
* send NMIs to all cpus on a 256p system.
*/
for (i=0; i < 1500; i++) {
for_each_online_node(node)
if (NODEPDA(node)->dump_count == 0)
break;
if (node == MAX_NUMNODES)
break;
if (i == 1000) {
for_each_online_node(node)
if (NODEPDA(node)->dump_count == 0) {
cpu = cpumask_first(cpumask_of_node(node));
for (n=0; n < CNODE_NUM_CPUS(node); cpu++, n++) {
CPUMASK_SETB(nmied_cpus, cpu);
/*
* cputonasid, cputoslice
* needs kernel cpuid
*/
SEND_NMI((cputonasid(cpu)), (cputoslice(cpu)));
}
}
}
udelay(10000);
}
#else
while (atomic_read(&nmied_cpus) != num_online_cpus());
#endif
/*
* Save the nmi cpu registers for all cpu in the eframe format.
*/
nmi_eframes_save();
LOCAL_HUB_S(NI_PORT_RESET, NPR_PORTRESET | NPR_LOCALRESET);
}
| linux-master | arch/mips/sgi-ip27/ip27-nmi.c |
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1994, 1995, 1996, 1999, 2000 by Ralf Baechle
* Copyright (C) 1999, 2000 by Silicon Graphics
* Copyright (C) 2002 Maciej W. Rozycki
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/signal.h> /* for SIGBUS */
#include <linux/sched.h> /* schow_regs(), force_sig() */
#include <linux/sched/debug.h>
#include <linux/sched/signal.h>
#include <asm/ptrace.h>
#include <asm/sn/addrs.h>
#include <asm/sn/agent.h>
#include <asm/sn/arch.h>
#include <asm/tlbdebug.h>
#include <asm/traps.h>
#include <linux/uaccess.h>
static void dump_hub_information(unsigned long errst0, unsigned long errst1)
{
static char *err_type[2][8] = {
{ NULL, "Uncached Partial Read PRERR", "DERR", "Read Timeout",
NULL, NULL, NULL, NULL },
{ "WERR", "Uncached Partial Write", "PWERR", "Write Timeout",
NULL, NULL, NULL, NULL }
};
union pi_err_stat0 st0;
union pi_err_stat1 st1;
st0.pi_stat0_word = errst0;
st1.pi_stat1_word = errst1;
if (!st0.pi_stat0_fmt.s0_valid) {
pr_info("Hub does not contain valid error information\n");
return;
}
pr_info("Hub has valid error information:\n");
if (st0.pi_stat0_fmt.s0_ovr_run)
pr_info("Overrun is set. Error stack may contain additional "
"information.\n");
pr_info("Hub error address is %08lx\n",
(unsigned long)st0.pi_stat0_fmt.s0_addr);
pr_info("Incoming message command 0x%lx\n",
(unsigned long)st0.pi_stat0_fmt.s0_cmd);
pr_info("Supplemental field of incoming message is 0x%lx\n",
(unsigned long)st0.pi_stat0_fmt.s0_supl);
pr_info("T5 Rn (for RRB only) is 0x%lx\n",
(unsigned long)st0.pi_stat0_fmt.s0_t5_req);
pr_info("Error type is %s\n", err_type[st1.pi_stat1_fmt.s1_rw_rb]
[st0.pi_stat0_fmt.s0_err_type] ? : "invalid");
}
int ip27_be_handler(struct pt_regs *regs, int is_fixup)
{
unsigned long errst0, errst1;
int data = regs->cp0_cause & 4;
int cpu = LOCAL_HUB_L(PI_CPU_NUM);
if (is_fixup)
return MIPS_BE_FIXUP;
printk("Slice %c got %cbe at 0x%lx\n", 'A' + cpu, data ? 'd' : 'i',
regs->cp0_epc);
printk("Hub information:\n");
printk("ERR_INT_PEND = 0x%06llx\n", LOCAL_HUB_L(PI_ERR_INT_PEND));
errst0 = LOCAL_HUB_L(cpu ? PI_ERR_STATUS0_B : PI_ERR_STATUS0_A);
errst1 = LOCAL_HUB_L(cpu ? PI_ERR_STATUS1_B : PI_ERR_STATUS1_A);
dump_hub_information(errst0, errst1);
show_regs(regs);
dump_tlb_all();
while(1);
force_sig(SIGBUS);
}
void __init ip27_be_init(void)
{
/* XXX Initialize all the Hub & Bridge error handling here. */
int cpu = LOCAL_HUB_L(PI_CPU_NUM);
int cpuoff = cpu << 8;
mips_set_be_handler(ip27_be_handler);
LOCAL_HUB_S(PI_ERR_INT_PEND,
cpu ? PI_ERR_CLEAR_ALL_B : PI_ERR_CLEAR_ALL_A);
LOCAL_HUB_S(PI_ERR_INT_MASK_A + cpuoff, 0);
LOCAL_HUB_S(PI_ERR_STACK_ADDR_A + cpuoff, 0);
LOCAL_HUB_S(PI_ERR_STACK_SIZE, 0); /* Disable error stack */
LOCAL_HUB_S(PI_SYSAD_ERRCHK_EN, PI_SYSAD_CHECK_ALL);
}
| linux-master | arch/mips/sgi-ip27/ip27-berr.c |
/*
* This file is subject to the terms and conditions of the GNU General
* Public License. See the file "COPYING" in the main directory of this
* archive for more details.
*
* Copyright (C) 2000 - 2001 by Kanoj Sarcar ([email protected])
* Copyright (C) 2000 - 2001 by Silicon Graphics, Inc.
*/
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/sched/task_stack.h>
#include <linux/topology.h>
#include <linux/nodemask.h>
#include <asm/page.h>
#include <asm/processor.h>
#include <asm/ptrace.h>
#include <asm/sn/agent.h>
#include <asm/sn/arch.h>
#include <asm/sn/gda.h>
#include <asm/sn/intr.h>
#include <asm/sn/klconfig.h>
#include <asm/sn/launch.h>
#include <asm/sn/mapped_kernel.h>
#include <asm/sn/types.h>
#include "ip27-common.h"
static int node_scan_cpus(nasid_t nasid, int highest)
{
static int cpus_found;
lboard_t *brd;
klcpu_t *acpu;
cpuid_t cpuid;
brd = find_lboard((lboard_t *)KL_CONFIG_INFO(nasid), KLTYPE_IP27);
do {
acpu = (klcpu_t *)find_first_component(brd, KLSTRUCT_CPU);
while (acpu) {
cpuid = acpu->cpu_info.virtid;
/* Only let it join in if it's marked enabled */
if ((acpu->cpu_info.flags & KLINFO_ENABLE) &&
(cpus_found != NR_CPUS)) {
if (cpuid > highest)
highest = cpuid;
set_cpu_possible(cpuid, true);
cputonasid(cpus_found) = nasid;
cputoslice(cpus_found) = acpu->cpu_info.physid;
sn_cpu_info[cpus_found].p_speed =
acpu->cpu_speed;
cpus_found++;
}
acpu = (klcpu_t *)find_component(brd, (klinfo_t *)acpu,
KLSTRUCT_CPU);
}
brd = KLCF_NEXT(brd);
if (!brd)
break;
brd = find_lboard(brd, KLTYPE_IP27);
} while (brd);
return highest;
}
void cpu_node_probe(void)
{
int i, highest = 0;
gda_t *gdap = GDA;
nodes_clear(node_online_map);
for (i = 0; i < MAX_NUMNODES; i++) {
nasid_t nasid = gdap->g_nasidtable[i];
if (nasid == INVALID_NASID)
break;
node_set_online(nasid);
highest = node_scan_cpus(nasid, highest);
}
printk("Discovered %d cpus on %d nodes\n", highest + 1, num_online_nodes());
}
static __init void intr_clear_all(nasid_t nasid)
{
int i;
REMOTE_HUB_S(nasid, PI_INT_MASK0_A, 0);
REMOTE_HUB_S(nasid, PI_INT_MASK0_B, 0);
REMOTE_HUB_S(nasid, PI_INT_MASK1_A, 0);
REMOTE_HUB_S(nasid, PI_INT_MASK1_B, 0);
for (i = 0; i < 128; i++)
REMOTE_HUB_CLR_INTR(nasid, i);
}
static void ip27_send_ipi_single(int destid, unsigned int action)
{
int irq;
switch (action) {
case SMP_RESCHEDULE_YOURSELF:
irq = CPU_RESCHED_A_IRQ;
break;
case SMP_CALL_FUNCTION:
irq = CPU_CALL_A_IRQ;
break;
default:
panic("sendintr");
}
irq += cputoslice(destid);
/*
* Set the interrupt bit associated with the CPU we want to
* send the interrupt to.
*/
REMOTE_HUB_SEND_INTR(cpu_to_node(destid), irq);
}
static void ip27_send_ipi_mask(const struct cpumask *mask, unsigned int action)
{
unsigned int i;
for_each_cpu(i, mask)
ip27_send_ipi_single(i, action);
}
static void ip27_init_cpu(void)
{
per_cpu_init();
}
static void ip27_smp_finish(void)
{
hub_rt_clock_event_init();
local_irq_enable();
}
/*
* Launch a slave into smp_bootstrap(). It doesn't take an argument, and we
* set sp to the kernel stack of the newly created idle process, gp to the proc
* struct so that current_thread_info() will work.
*/
static int ip27_boot_secondary(int cpu, struct task_struct *idle)
{
unsigned long gp = (unsigned long)task_thread_info(idle);
unsigned long sp = __KSTK_TOS(idle);
LAUNCH_SLAVE(cputonasid(cpu), cputoslice(cpu),
(launch_proc_t)MAPPED_KERN_RW_TO_K0(smp_bootstrap),
0, (void *) sp, (void *) gp);
return 0;
}
static void __init ip27_smp_setup(void)
{
nasid_t nasid;
for_each_online_node(nasid) {
if (nasid == 0)
continue;
intr_clear_all(nasid);
}
replicate_kernel_text();
/*
* PROM sets up system, that boot cpu is always first CPU on nasid 0
*/
cputonasid(0) = 0;
cputoslice(0) = LOCAL_HUB_L(PI_CPU_NUM);
}
static void __init ip27_prepare_cpus(unsigned int max_cpus)
{
/* We already did everything necessary earlier */
}
const struct plat_smp_ops ip27_smp_ops = {
.send_ipi_single = ip27_send_ipi_single,
.send_ipi_mask = ip27_send_ipi_mask,
.init_secondary = ip27_init_cpu,
.smp_finish = ip27_smp_finish,
.boot_secondary = ip27_boot_secondary,
.smp_setup = ip27_smp_setup,
.prepare_cpus = ip27_prepare_cpus,
.prepare_boot_cpu = ip27_init_cpu,
};
| linux-master | arch/mips/sgi-ip27/ip27-smp.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 1999, 2000 Ralf Baechle ([email protected])
* Copyright (C) 1999, 2000 Silcon Graphics, Inc.
* Copyright (C) 2004 Christoph Hellwig.
*
* Generic XTALK initialization code
*/
#include <linux/kernel.h>
#include <linux/smp.h>
#include <linux/platform_device.h>
#include <linux/platform_data/sgi-w1.h>
#include <linux/platform_data/xtalk-bridge.h>
#include <asm/sn/addrs.h>
#include <asm/sn/types.h>
#include <asm/sn/klconfig.h>
#include <asm/pci/bridge.h>
#include <asm/xtalk/xtalk.h>
#define XBOW_WIDGET_PART_NUM 0x0
#define XXBOW_WIDGET_PART_NUM 0xd000 /* Xbow in Xbridge */
#define BASE_XBOW_PORT 8 /* Lowest external port */
static void bridge_platform_create(nasid_t nasid, int widget, int masterwid)
{
struct xtalk_bridge_platform_data *bd;
struct sgi_w1_platform_data *wd;
struct platform_device *pdev_wd;
struct platform_device *pdev_bd;
struct resource w1_res;
unsigned long offset;
offset = NODE_OFFSET(nasid);
wd = kzalloc(sizeof(*wd), GFP_KERNEL);
if (!wd) {
pr_warn("xtalk:n%d/%x bridge create out of memory\n", nasid, widget);
return;
}
snprintf(wd->dev_id, sizeof(wd->dev_id), "bridge-%012lx",
offset + (widget << SWIN_SIZE_BITS));
memset(&w1_res, 0, sizeof(w1_res));
w1_res.start = offset + (widget << SWIN_SIZE_BITS) +
offsetof(struct bridge_regs, b_nic);
w1_res.end = w1_res.start + 3;
w1_res.flags = IORESOURCE_MEM;
pdev_wd = platform_device_alloc("sgi_w1", PLATFORM_DEVID_AUTO);
if (!pdev_wd) {
pr_warn("xtalk:n%d/%x bridge create out of memory\n", nasid, widget);
goto err_kfree_wd;
}
if (platform_device_add_resources(pdev_wd, &w1_res, 1)) {
pr_warn("xtalk:n%d/%x bridge failed to add platform resources.\n", nasid, widget);
goto err_put_pdev_wd;
}
if (platform_device_add_data(pdev_wd, wd, sizeof(*wd))) {
pr_warn("xtalk:n%d/%x bridge failed to add platform data.\n", nasid, widget);
goto err_put_pdev_wd;
}
if (platform_device_add(pdev_wd)) {
pr_warn("xtalk:n%d/%x bridge failed to add platform device.\n", nasid, widget);
goto err_put_pdev_wd;
}
/* platform_device_add_data() duplicates the data */
kfree(wd);
bd = kzalloc(sizeof(*bd), GFP_KERNEL);
if (!bd) {
pr_warn("xtalk:n%d/%x bridge create out of memory\n", nasid, widget);
goto err_unregister_pdev_wd;
}
pdev_bd = platform_device_alloc("xtalk-bridge", PLATFORM_DEVID_AUTO);
if (!pdev_bd) {
pr_warn("xtalk:n%d/%x bridge create out of memory\n", nasid, widget);
goto err_kfree_bd;
}
bd->bridge_addr = RAW_NODE_SWIN_BASE(nasid, widget);
bd->intr_addr = BIT_ULL(47) + 0x01800000 + PI_INT_PEND_MOD;
bd->nasid = nasid;
bd->masterwid = masterwid;
bd->mem.name = "Bridge PCI MEM";
bd->mem.start = offset + (widget << SWIN_SIZE_BITS) + BRIDGE_DEVIO0;
bd->mem.end = offset + (widget << SWIN_SIZE_BITS) + SWIN_SIZE - 1;
bd->mem.flags = IORESOURCE_MEM;
bd->mem_offset = offset;
bd->io.name = "Bridge PCI IO";
bd->io.start = offset + (widget << SWIN_SIZE_BITS) + BRIDGE_DEVIO0;
bd->io.end = offset + (widget << SWIN_SIZE_BITS) + SWIN_SIZE - 1;
bd->io.flags = IORESOURCE_IO;
bd->io_offset = offset;
if (platform_device_add_data(pdev_bd, bd, sizeof(*bd))) {
pr_warn("xtalk:n%d/%x bridge failed to add platform data.\n", nasid, widget);
goto err_put_pdev_bd;
}
if (platform_device_add(pdev_bd)) {
pr_warn("xtalk:n%d/%x bridge failed to add platform device.\n", nasid, widget);
goto err_put_pdev_bd;
}
/* platform_device_add_data() duplicates the data */
kfree(bd);
pr_info("xtalk:n%d/%x bridge widget\n", nasid, widget);
return;
err_put_pdev_bd:
platform_device_put(pdev_bd);
err_kfree_bd:
kfree(bd);
err_unregister_pdev_wd:
platform_device_unregister(pdev_wd);
return;
err_put_pdev_wd:
platform_device_put(pdev_wd);
err_kfree_wd:
kfree(wd);
return;
}
static int probe_one_port(nasid_t nasid, int widget, int masterwid)
{
widgetreg_t widget_id;
xwidget_part_num_t partnum;
widget_id = *(volatile widgetreg_t *)
(RAW_NODE_SWIN_BASE(nasid, widget) + WIDGET_ID);
partnum = XWIDGET_PART_NUM(widget_id);
switch (partnum) {
case BRIDGE_WIDGET_PART_NUM:
case XBRIDGE_WIDGET_PART_NUM:
bridge_platform_create(nasid, widget, masterwid);
break;
default:
pr_info("xtalk:n%d/%d unknown widget (0x%x)\n",
nasid, widget, partnum);
break;
}
return 0;
}
static int xbow_probe(nasid_t nasid)
{
lboard_t *brd;
klxbow_t *xbow_p;
unsigned masterwid, i;
/*
* found xbow, so may have multiple bridges
* need to probe xbow
*/
brd = find_lboard((lboard_t *)KL_CONFIG_INFO(nasid), KLTYPE_MIDPLANE8);
if (!brd)
return -ENODEV;
xbow_p = (klxbow_t *)find_component(brd, NULL, KLSTRUCT_XBOW);
if (!xbow_p)
return -ENODEV;
/*
* Okay, here's a xbow. Let's arbitrate and find
* out if we should initialize it. Set enabled
* hub connected at highest or lowest widget as
* master.
*/
#ifdef WIDGET_A
i = HUB_WIDGET_ID_MAX + 1;
do {
i--;
} while ((!XBOW_PORT_TYPE_HUB(xbow_p, i)) ||
(!XBOW_PORT_IS_ENABLED(xbow_p, i)));
#else
i = HUB_WIDGET_ID_MIN - 1;
do {
i++;
} while ((!XBOW_PORT_TYPE_HUB(xbow_p, i)) ||
(!XBOW_PORT_IS_ENABLED(xbow_p, i)));
#endif
masterwid = i;
if (nasid != XBOW_PORT_NASID(xbow_p, i))
return 1;
for (i = HUB_WIDGET_ID_MIN; i <= HUB_WIDGET_ID_MAX; i++) {
if (XBOW_PORT_IS_ENABLED(xbow_p, i) &&
XBOW_PORT_TYPE_IO(xbow_p, i))
probe_one_port(nasid, i, masterwid);
}
return 0;
}
static void xtalk_probe_node(nasid_t nasid)
{
volatile u64 hubreg;
xwidget_part_num_t partnum;
widgetreg_t widget_id;
hubreg = REMOTE_HUB_L(nasid, IIO_LLP_CSR);
/* check whether the link is up */
if (!(hubreg & IIO_LLP_CSR_IS_UP))
return;
widget_id = *(volatile widgetreg_t *)
(RAW_NODE_SWIN_BASE(nasid, 0x0) + WIDGET_ID);
partnum = XWIDGET_PART_NUM(widget_id);
switch (partnum) {
case BRIDGE_WIDGET_PART_NUM:
bridge_platform_create(nasid, 0x8, 0xa);
break;
case XBOW_WIDGET_PART_NUM:
case XXBOW_WIDGET_PART_NUM:
pr_info("xtalk:n%d/0 xbow widget\n", nasid);
xbow_probe(nasid);
break;
default:
pr_info("xtalk:n%d/0 unknown widget (0x%x)\n", nasid, partnum);
break;
}
}
static int __init xtalk_init(void)
{
nasid_t nasid;
for_each_online_node(nasid)
xtalk_probe_node(nasid);
return 0;
}
arch_initcall(xtalk_init);
| linux-master | arch/mips/sgi-ip27/ip27-xtalk.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Atheros AR7XXX/AR9XXX SoC early printk support
*
* Copyright (C) 2008-2011 Gabor Juhos <[email protected]>
* Copyright (C) 2008 Imre Kaloz <[email protected]>
*/
#include <linux/io.h>
#include <linux/errno.h>
#include <linux/serial.h>
#include <linux/serial_reg.h>
#include <asm/addrspace.h>
#include <asm/setup.h>
#include <asm/mach-ath79/ath79.h>
#include <asm/mach-ath79/ar71xx_regs.h>
#include <asm/mach-ath79/ar933x_uart.h>
static void (*_prom_putchar)(char);
static inline void prom_putchar_wait(void __iomem *reg, u32 val)
{
u32 t;
do {
t = __raw_readl(reg);
if ((t & val) == val)
break;
} while (1);
}
static void prom_putchar_ar71xx(char ch)
{
void __iomem *base = (void __iomem *)(KSEG1ADDR(AR71XX_UART_BASE));
prom_putchar_wait(base + UART_LSR * 4, UART_LSR_BOTH_EMPTY);
__raw_writel((unsigned char)ch, base + UART_TX * 4);
prom_putchar_wait(base + UART_LSR * 4, UART_LSR_BOTH_EMPTY);
}
static void prom_putchar_ar933x(char ch)
{
void __iomem *base = (void __iomem *)(KSEG1ADDR(AR933X_UART_BASE));
prom_putchar_wait(base + AR933X_UART_DATA_REG, AR933X_UART_DATA_TX_CSR);
__raw_writel(AR933X_UART_DATA_TX_CSR | (unsigned char)ch,
base + AR933X_UART_DATA_REG);
prom_putchar_wait(base + AR933X_UART_DATA_REG, AR933X_UART_DATA_TX_CSR);
}
static void prom_putchar_dummy(char ch)
{
/* nothing to do */
}
static void prom_enable_uart(u32 id)
{
void __iomem *gpio_base;
u32 uart_en;
u32 t;
switch (id) {
case REV_ID_MAJOR_AR71XX:
uart_en = AR71XX_GPIO_FUNC_UART_EN;
break;
case REV_ID_MAJOR_AR7240:
case REV_ID_MAJOR_AR7241:
case REV_ID_MAJOR_AR7242:
uart_en = AR724X_GPIO_FUNC_UART_EN;
break;
case REV_ID_MAJOR_AR913X:
uart_en = AR913X_GPIO_FUNC_UART_EN;
break;
case REV_ID_MAJOR_AR9330:
case REV_ID_MAJOR_AR9331:
uart_en = AR933X_GPIO_FUNC_UART_EN;
break;
case REV_ID_MAJOR_AR9341:
case REV_ID_MAJOR_AR9342:
case REV_ID_MAJOR_AR9344:
/* TODO */
default:
return;
}
gpio_base = (void __iomem *)KSEG1ADDR(AR71XX_GPIO_BASE);
t = __raw_readl(gpio_base + AR71XX_GPIO_REG_FUNC);
t |= uart_en;
__raw_writel(t, gpio_base + AR71XX_GPIO_REG_FUNC);
}
static void prom_putchar_init(void)
{
void __iomem *base;
u32 id;
base = (void __iomem *)(KSEG1ADDR(AR71XX_RESET_BASE));
id = __raw_readl(base + AR71XX_RESET_REG_REV_ID);
id &= REV_ID_MAJOR_MASK;
switch (id) {
case REV_ID_MAJOR_AR71XX:
case REV_ID_MAJOR_AR7240:
case REV_ID_MAJOR_AR7241:
case REV_ID_MAJOR_AR7242:
case REV_ID_MAJOR_AR913X:
case REV_ID_MAJOR_AR9341:
case REV_ID_MAJOR_AR9342:
case REV_ID_MAJOR_AR9344:
case REV_ID_MAJOR_QCA9533:
case REV_ID_MAJOR_QCA9533_V2:
case REV_ID_MAJOR_QCA9556:
case REV_ID_MAJOR_QCA9558:
case REV_ID_MAJOR_TP9343:
case REV_ID_MAJOR_QCA956X:
case REV_ID_MAJOR_QCN550X:
_prom_putchar = prom_putchar_ar71xx;
break;
case REV_ID_MAJOR_AR9330:
case REV_ID_MAJOR_AR9331:
_prom_putchar = prom_putchar_ar933x;
break;
default:
_prom_putchar = prom_putchar_dummy;
return;
}
prom_enable_uart(id);
}
void prom_putchar(char ch)
{
if (!_prom_putchar)
prom_putchar_init();
_prom_putchar(ch);
}
| linux-master | arch/mips/ath79/early_printk.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Atheros AR71XX/AR724X/AR913X common routines
*
* Copyright (C) 2010-2011 Jaiganesh Narayanan <[email protected]>
* Copyright (C) 2008-2011 Gabor Juhos <[email protected]>
* Copyright (C) 2008 Imre Kaloz <[email protected]>
*
* Parts of this file are based on Atheros' 2.6.15/2.6.31 BSP
*/
#include <linux/kernel.h>
#include <linux/export.h>
#include <linux/types.h>
#include <linux/spinlock.h>
#include <asm/mach-ath79/ath79.h>
#include <asm/mach-ath79/ar71xx_regs.h>
#include "common.h"
static DEFINE_SPINLOCK(ath79_device_reset_lock);
u32 ath79_cpu_freq;
EXPORT_SYMBOL_GPL(ath79_cpu_freq);
u32 ath79_ahb_freq;
EXPORT_SYMBOL_GPL(ath79_ahb_freq);
u32 ath79_ddr_freq;
EXPORT_SYMBOL_GPL(ath79_ddr_freq);
enum ath79_soc_type ath79_soc;
unsigned int ath79_soc_rev;
void __iomem *ath79_pll_base;
void __iomem *ath79_reset_base;
EXPORT_SYMBOL_GPL(ath79_reset_base);
static void __iomem *ath79_ddr_base;
static void __iomem *ath79_ddr_wb_flush_base;
static void __iomem *ath79_ddr_pci_win_base;
void ath79_ddr_ctrl_init(void)
{
ath79_ddr_base = ioremap(AR71XX_DDR_CTRL_BASE,
AR71XX_DDR_CTRL_SIZE);
if (soc_is_ar913x() || soc_is_ar724x() || soc_is_ar933x()) {
ath79_ddr_wb_flush_base = ath79_ddr_base + 0x7c;
ath79_ddr_pci_win_base = 0;
} else {
ath79_ddr_wb_flush_base = ath79_ddr_base + 0x9c;
ath79_ddr_pci_win_base = ath79_ddr_base + 0x7c;
}
}
EXPORT_SYMBOL_GPL(ath79_ddr_ctrl_init);
void ath79_ddr_wb_flush(u32 reg)
{
void __iomem *flush_reg = ath79_ddr_wb_flush_base + (reg * 4);
/* Flush the DDR write buffer. */
__raw_writel(0x1, flush_reg);
while (__raw_readl(flush_reg) & 0x1)
;
/* It must be run twice. */
__raw_writel(0x1, flush_reg);
while (__raw_readl(flush_reg) & 0x1)
;
}
EXPORT_SYMBOL_GPL(ath79_ddr_wb_flush);
void ath79_ddr_set_pci_windows(void)
{
BUG_ON(!ath79_ddr_pci_win_base);
__raw_writel(AR71XX_PCI_WIN0_OFFS, ath79_ddr_pci_win_base + 0x0);
__raw_writel(AR71XX_PCI_WIN1_OFFS, ath79_ddr_pci_win_base + 0x4);
__raw_writel(AR71XX_PCI_WIN2_OFFS, ath79_ddr_pci_win_base + 0x8);
__raw_writel(AR71XX_PCI_WIN3_OFFS, ath79_ddr_pci_win_base + 0xc);
__raw_writel(AR71XX_PCI_WIN4_OFFS, ath79_ddr_pci_win_base + 0x10);
__raw_writel(AR71XX_PCI_WIN5_OFFS, ath79_ddr_pci_win_base + 0x14);
__raw_writel(AR71XX_PCI_WIN6_OFFS, ath79_ddr_pci_win_base + 0x18);
__raw_writel(AR71XX_PCI_WIN7_OFFS, ath79_ddr_pci_win_base + 0x1c);
}
EXPORT_SYMBOL_GPL(ath79_ddr_set_pci_windows);
void ath79_device_reset_set(u32 mask)
{
unsigned long flags;
u32 reg;
u32 t;
if (soc_is_ar71xx())
reg = AR71XX_RESET_REG_RESET_MODULE;
else if (soc_is_ar724x())
reg = AR724X_RESET_REG_RESET_MODULE;
else if (soc_is_ar913x())
reg = AR913X_RESET_REG_RESET_MODULE;
else if (soc_is_ar933x())
reg = AR933X_RESET_REG_RESET_MODULE;
else if (soc_is_ar934x())
reg = AR934X_RESET_REG_RESET_MODULE;
else if (soc_is_qca953x())
reg = QCA953X_RESET_REG_RESET_MODULE;
else if (soc_is_qca955x())
reg = QCA955X_RESET_REG_RESET_MODULE;
else if (soc_is_qca956x() || soc_is_tp9343())
reg = QCA956X_RESET_REG_RESET_MODULE;
else
BUG();
spin_lock_irqsave(&ath79_device_reset_lock, flags);
t = ath79_reset_rr(reg);
ath79_reset_wr(reg, t | mask);
spin_unlock_irqrestore(&ath79_device_reset_lock, flags);
}
EXPORT_SYMBOL_GPL(ath79_device_reset_set);
void ath79_device_reset_clear(u32 mask)
{
unsigned long flags;
u32 reg;
u32 t;
if (soc_is_ar71xx())
reg = AR71XX_RESET_REG_RESET_MODULE;
else if (soc_is_ar724x())
reg = AR724X_RESET_REG_RESET_MODULE;
else if (soc_is_ar913x())
reg = AR913X_RESET_REG_RESET_MODULE;
else if (soc_is_ar933x())
reg = AR933X_RESET_REG_RESET_MODULE;
else if (soc_is_ar934x())
reg = AR934X_RESET_REG_RESET_MODULE;
else if (soc_is_qca953x())
reg = QCA953X_RESET_REG_RESET_MODULE;
else if (soc_is_qca955x())
reg = QCA955X_RESET_REG_RESET_MODULE;
else if (soc_is_qca956x() || soc_is_tp9343())
reg = QCA956X_RESET_REG_RESET_MODULE;
else
BUG();
spin_lock_irqsave(&ath79_device_reset_lock, flags);
t = ath79_reset_rr(reg);
ath79_reset_wr(reg, t & ~mask);
spin_unlock_irqrestore(&ath79_device_reset_lock, flags);
}
EXPORT_SYMBOL_GPL(ath79_device_reset_clear);
| linux-master | arch/mips/ath79/common.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Atheros AR71XX/AR724X/AR913X common routines
*
* Copyright (C) 2010-2011 Jaiganesh Narayanan <[email protected]>
* Copyright (C) 2011 Gabor Juhos <[email protected]>
*
* Parts of this file are based on Atheros' 2.6.15/2.6.31 BSP
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/io.h>
#include <linux/err.h>
#include <linux/clk.h>
#include <linux/clkdev.h>
#include <linux/clk-provider.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <dt-bindings/clock/ath79-clk.h>
#include <asm/div64.h>
#include <asm/mach-ath79/ath79.h>
#include <asm/mach-ath79/ar71xx_regs.h>
#include "common.h"
#define AR71XX_BASE_FREQ 40000000
#define AR724X_BASE_FREQ 40000000
static struct clk *clks[ATH79_CLK_END];
static struct clk_onecell_data clk_data = {
.clks = clks,
.clk_num = ARRAY_SIZE(clks),
};
static const char * const clk_names[ATH79_CLK_END] = {
[ATH79_CLK_CPU] = "cpu",
[ATH79_CLK_DDR] = "ddr",
[ATH79_CLK_AHB] = "ahb",
[ATH79_CLK_REF] = "ref",
[ATH79_CLK_MDIO] = "mdio",
};
static const char * __init ath79_clk_name(int type)
{
BUG_ON(type >= ARRAY_SIZE(clk_names) || !clk_names[type]);
return clk_names[type];
}
static void __init __ath79_set_clk(int type, const char *name, struct clk *clk)
{
if (IS_ERR(clk))
panic("failed to allocate %s clock structure", clk_names[type]);
clks[type] = clk;
clk_register_clkdev(clk, name, NULL);
}
static struct clk * __init ath79_set_clk(int type, unsigned long rate)
{
const char *name = ath79_clk_name(type);
struct clk *clk;
clk = clk_register_fixed_rate(NULL, name, NULL, 0, rate);
__ath79_set_clk(type, name, clk);
return clk;
}
static struct clk * __init ath79_set_ff_clk(int type, const char *parent,
unsigned int mult, unsigned int div)
{
const char *name = ath79_clk_name(type);
struct clk *clk;
clk = clk_register_fixed_factor(NULL, name, parent, 0, mult, div);
__ath79_set_clk(type, name, clk);
return clk;
}
static unsigned long __init ath79_setup_ref_clk(unsigned long rate)
{
struct clk *clk = clks[ATH79_CLK_REF];
if (clk)
rate = clk_get_rate(clk);
else
clk = ath79_set_clk(ATH79_CLK_REF, rate);
return rate;
}
static void __init ar71xx_clocks_init(void __iomem *pll_base)
{
unsigned long ref_rate;
unsigned long cpu_rate;
unsigned long ddr_rate;
unsigned long ahb_rate;
u32 pll;
u32 freq;
u32 div;
ref_rate = ath79_setup_ref_clk(AR71XX_BASE_FREQ);
pll = __raw_readl(pll_base + AR71XX_PLL_REG_CPU_CONFIG);
div = ((pll >> AR71XX_PLL_FB_SHIFT) & AR71XX_PLL_FB_MASK) + 1;
freq = div * ref_rate;
div = ((pll >> AR71XX_CPU_DIV_SHIFT) & AR71XX_CPU_DIV_MASK) + 1;
cpu_rate = freq / div;
div = ((pll >> AR71XX_DDR_DIV_SHIFT) & AR71XX_DDR_DIV_MASK) + 1;
ddr_rate = freq / div;
div = (((pll >> AR71XX_AHB_DIV_SHIFT) & AR71XX_AHB_DIV_MASK) + 1) * 2;
ahb_rate = cpu_rate / div;
ath79_set_clk(ATH79_CLK_CPU, cpu_rate);
ath79_set_clk(ATH79_CLK_DDR, ddr_rate);
ath79_set_clk(ATH79_CLK_AHB, ahb_rate);
}
static void __init ar724x_clocks_init(void __iomem *pll_base)
{
u32 mult, div, ddr_div, ahb_div;
u32 pll;
ath79_setup_ref_clk(AR71XX_BASE_FREQ);
pll = __raw_readl(pll_base + AR724X_PLL_REG_CPU_CONFIG);
mult = ((pll >> AR724X_PLL_FB_SHIFT) & AR724X_PLL_FB_MASK);
div = ((pll >> AR724X_PLL_REF_DIV_SHIFT) & AR724X_PLL_REF_DIV_MASK) * 2;
ddr_div = ((pll >> AR724X_DDR_DIV_SHIFT) & AR724X_DDR_DIV_MASK) + 1;
ahb_div = (((pll >> AR724X_AHB_DIV_SHIFT) & AR724X_AHB_DIV_MASK) + 1) * 2;
ath79_set_ff_clk(ATH79_CLK_CPU, "ref", mult, div);
ath79_set_ff_clk(ATH79_CLK_DDR, "ref", mult, div * ddr_div);
ath79_set_ff_clk(ATH79_CLK_AHB, "ref", mult, div * ahb_div);
}
static void __init ar933x_clocks_init(void __iomem *pll_base)
{
unsigned long ref_rate;
u32 clock_ctrl;
u32 ref_div;
u32 ninit_mul;
u32 out_div;
u32 cpu_div;
u32 ddr_div;
u32 ahb_div;
u32 t;
t = ath79_reset_rr(AR933X_RESET_REG_BOOTSTRAP);
if (t & AR933X_BOOTSTRAP_REF_CLK_40)
ref_rate = (40 * 1000 * 1000);
else
ref_rate = (25 * 1000 * 1000);
ath79_setup_ref_clk(ref_rate);
clock_ctrl = __raw_readl(pll_base + AR933X_PLL_CLOCK_CTRL_REG);
if (clock_ctrl & AR933X_PLL_CLOCK_CTRL_BYPASS) {
ref_div = 1;
ninit_mul = 1;
out_div = 1;
cpu_div = 1;
ddr_div = 1;
ahb_div = 1;
} else {
u32 cpu_config;
u32 t;
cpu_config = __raw_readl(pll_base + AR933X_PLL_CPU_CONFIG_REG);
t = (cpu_config >> AR933X_PLL_CPU_CONFIG_REFDIV_SHIFT) &
AR933X_PLL_CPU_CONFIG_REFDIV_MASK;
ref_div = t;
ninit_mul = (cpu_config >> AR933X_PLL_CPU_CONFIG_NINT_SHIFT) &
AR933X_PLL_CPU_CONFIG_NINT_MASK;
t = (cpu_config >> AR933X_PLL_CPU_CONFIG_OUTDIV_SHIFT) &
AR933X_PLL_CPU_CONFIG_OUTDIV_MASK;
if (t == 0)
t = 1;
out_div = (1 << t);
cpu_div = ((clock_ctrl >> AR933X_PLL_CLOCK_CTRL_CPU_DIV_SHIFT) &
AR933X_PLL_CLOCK_CTRL_CPU_DIV_MASK) + 1;
ddr_div = ((clock_ctrl >> AR933X_PLL_CLOCK_CTRL_DDR_DIV_SHIFT) &
AR933X_PLL_CLOCK_CTRL_DDR_DIV_MASK) + 1;
ahb_div = ((clock_ctrl >> AR933X_PLL_CLOCK_CTRL_AHB_DIV_SHIFT) &
AR933X_PLL_CLOCK_CTRL_AHB_DIV_MASK) + 1;
}
ath79_set_ff_clk(ATH79_CLK_CPU, "ref", ninit_mul,
ref_div * out_div * cpu_div);
ath79_set_ff_clk(ATH79_CLK_DDR, "ref", ninit_mul,
ref_div * out_div * ddr_div);
ath79_set_ff_clk(ATH79_CLK_AHB, "ref", ninit_mul,
ref_div * out_div * ahb_div);
}
static u32 __init ar934x_get_pll_freq(u32 ref, u32 ref_div, u32 nint, u32 nfrac,
u32 frac, u32 out_div)
{
u64 t;
u32 ret;
t = ref;
t *= nint;
do_div(t, ref_div);
ret = t;
t = ref;
t *= nfrac;
do_div(t, ref_div * frac);
ret += t;
ret /= (1 << out_div);
return ret;
}
static void __init ar934x_clocks_init(void __iomem *pll_base)
{
unsigned long ref_rate;
unsigned long cpu_rate;
unsigned long ddr_rate;
unsigned long ahb_rate;
u32 pll, out_div, ref_div, nint, nfrac, frac, clk_ctrl, postdiv;
u32 cpu_pll, ddr_pll;
u32 bootstrap;
void __iomem *dpll_base;
dpll_base = ioremap(AR934X_SRIF_BASE, AR934X_SRIF_SIZE);
bootstrap = ath79_reset_rr(AR934X_RESET_REG_BOOTSTRAP);
if (bootstrap & AR934X_BOOTSTRAP_REF_CLK_40)
ref_rate = 40 * 1000 * 1000;
else
ref_rate = 25 * 1000 * 1000;
ref_rate = ath79_setup_ref_clk(ref_rate);
pll = __raw_readl(dpll_base + AR934X_SRIF_CPU_DPLL2_REG);
if (pll & AR934X_SRIF_DPLL2_LOCAL_PLL) {
out_div = (pll >> AR934X_SRIF_DPLL2_OUTDIV_SHIFT) &
AR934X_SRIF_DPLL2_OUTDIV_MASK;
pll = __raw_readl(dpll_base + AR934X_SRIF_CPU_DPLL1_REG);
nint = (pll >> AR934X_SRIF_DPLL1_NINT_SHIFT) &
AR934X_SRIF_DPLL1_NINT_MASK;
nfrac = pll & AR934X_SRIF_DPLL1_NFRAC_MASK;
ref_div = (pll >> AR934X_SRIF_DPLL1_REFDIV_SHIFT) &
AR934X_SRIF_DPLL1_REFDIV_MASK;
frac = 1 << 18;
} else {
pll = __raw_readl(pll_base + AR934X_PLL_CPU_CONFIG_REG);
out_div = (pll >> AR934X_PLL_CPU_CONFIG_OUTDIV_SHIFT) &
AR934X_PLL_CPU_CONFIG_OUTDIV_MASK;
ref_div = (pll >> AR934X_PLL_CPU_CONFIG_REFDIV_SHIFT) &
AR934X_PLL_CPU_CONFIG_REFDIV_MASK;
nint = (pll >> AR934X_PLL_CPU_CONFIG_NINT_SHIFT) &
AR934X_PLL_CPU_CONFIG_NINT_MASK;
nfrac = (pll >> AR934X_PLL_CPU_CONFIG_NFRAC_SHIFT) &
AR934X_PLL_CPU_CONFIG_NFRAC_MASK;
frac = 1 << 6;
}
cpu_pll = ar934x_get_pll_freq(ref_rate, ref_div, nint,
nfrac, frac, out_div);
pll = __raw_readl(dpll_base + AR934X_SRIF_DDR_DPLL2_REG);
if (pll & AR934X_SRIF_DPLL2_LOCAL_PLL) {
out_div = (pll >> AR934X_SRIF_DPLL2_OUTDIV_SHIFT) &
AR934X_SRIF_DPLL2_OUTDIV_MASK;
pll = __raw_readl(dpll_base + AR934X_SRIF_DDR_DPLL1_REG);
nint = (pll >> AR934X_SRIF_DPLL1_NINT_SHIFT) &
AR934X_SRIF_DPLL1_NINT_MASK;
nfrac = pll & AR934X_SRIF_DPLL1_NFRAC_MASK;
ref_div = (pll >> AR934X_SRIF_DPLL1_REFDIV_SHIFT) &
AR934X_SRIF_DPLL1_REFDIV_MASK;
frac = 1 << 18;
} else {
pll = __raw_readl(pll_base + AR934X_PLL_DDR_CONFIG_REG);
out_div = (pll >> AR934X_PLL_DDR_CONFIG_OUTDIV_SHIFT) &
AR934X_PLL_DDR_CONFIG_OUTDIV_MASK;
ref_div = (pll >> AR934X_PLL_DDR_CONFIG_REFDIV_SHIFT) &
AR934X_PLL_DDR_CONFIG_REFDIV_MASK;
nint = (pll >> AR934X_PLL_DDR_CONFIG_NINT_SHIFT) &
AR934X_PLL_DDR_CONFIG_NINT_MASK;
nfrac = (pll >> AR934X_PLL_DDR_CONFIG_NFRAC_SHIFT) &
AR934X_PLL_DDR_CONFIG_NFRAC_MASK;
frac = 1 << 10;
}
ddr_pll = ar934x_get_pll_freq(ref_rate, ref_div, nint,
nfrac, frac, out_div);
clk_ctrl = __raw_readl(pll_base + AR934X_PLL_CPU_DDR_CLK_CTRL_REG);
postdiv = (clk_ctrl >> AR934X_PLL_CPU_DDR_CLK_CTRL_CPU_POST_DIV_SHIFT) &
AR934X_PLL_CPU_DDR_CLK_CTRL_CPU_POST_DIV_MASK;
if (clk_ctrl & AR934X_PLL_CPU_DDR_CLK_CTRL_CPU_PLL_BYPASS)
cpu_rate = ref_rate;
else if (clk_ctrl & AR934X_PLL_CPU_DDR_CLK_CTRL_CPUCLK_FROM_CPUPLL)
cpu_rate = cpu_pll / (postdiv + 1);
else
cpu_rate = ddr_pll / (postdiv + 1);
postdiv = (clk_ctrl >> AR934X_PLL_CPU_DDR_CLK_CTRL_DDR_POST_DIV_SHIFT) &
AR934X_PLL_CPU_DDR_CLK_CTRL_DDR_POST_DIV_MASK;
if (clk_ctrl & AR934X_PLL_CPU_DDR_CLK_CTRL_DDR_PLL_BYPASS)
ddr_rate = ref_rate;
else if (clk_ctrl & AR934X_PLL_CPU_DDR_CLK_CTRL_DDRCLK_FROM_DDRPLL)
ddr_rate = ddr_pll / (postdiv + 1);
else
ddr_rate = cpu_pll / (postdiv + 1);
postdiv = (clk_ctrl >> AR934X_PLL_CPU_DDR_CLK_CTRL_AHB_POST_DIV_SHIFT) &
AR934X_PLL_CPU_DDR_CLK_CTRL_AHB_POST_DIV_MASK;
if (clk_ctrl & AR934X_PLL_CPU_DDR_CLK_CTRL_AHB_PLL_BYPASS)
ahb_rate = ref_rate;
else if (clk_ctrl & AR934X_PLL_CPU_DDR_CLK_CTRL_AHBCLK_FROM_DDRPLL)
ahb_rate = ddr_pll / (postdiv + 1);
else
ahb_rate = cpu_pll / (postdiv + 1);
ath79_set_clk(ATH79_CLK_CPU, cpu_rate);
ath79_set_clk(ATH79_CLK_DDR, ddr_rate);
ath79_set_clk(ATH79_CLK_AHB, ahb_rate);
clk_ctrl = __raw_readl(pll_base + AR934X_PLL_SWITCH_CLOCK_CONTROL_REG);
if (clk_ctrl & AR934X_PLL_SWITCH_CLOCK_CONTROL_MDIO_CLK_SEL)
ath79_set_clk(ATH79_CLK_MDIO, 100 * 1000 * 1000);
iounmap(dpll_base);
}
static void __init qca953x_clocks_init(void __iomem *pll_base)
{
unsigned long ref_rate;
unsigned long cpu_rate;
unsigned long ddr_rate;
unsigned long ahb_rate;
u32 pll, out_div, ref_div, nint, frac, clk_ctrl, postdiv;
u32 cpu_pll, ddr_pll;
u32 bootstrap;
bootstrap = ath79_reset_rr(QCA953X_RESET_REG_BOOTSTRAP);
if (bootstrap & QCA953X_BOOTSTRAP_REF_CLK_40)
ref_rate = 40 * 1000 * 1000;
else
ref_rate = 25 * 1000 * 1000;
ref_rate = ath79_setup_ref_clk(ref_rate);
pll = __raw_readl(pll_base + QCA953X_PLL_CPU_CONFIG_REG);
out_div = (pll >> QCA953X_PLL_CPU_CONFIG_OUTDIV_SHIFT) &
QCA953X_PLL_CPU_CONFIG_OUTDIV_MASK;
ref_div = (pll >> QCA953X_PLL_CPU_CONFIG_REFDIV_SHIFT) &
QCA953X_PLL_CPU_CONFIG_REFDIV_MASK;
nint = (pll >> QCA953X_PLL_CPU_CONFIG_NINT_SHIFT) &
QCA953X_PLL_CPU_CONFIG_NINT_MASK;
frac = (pll >> QCA953X_PLL_CPU_CONFIG_NFRAC_SHIFT) &
QCA953X_PLL_CPU_CONFIG_NFRAC_MASK;
cpu_pll = nint * ref_rate / ref_div;
cpu_pll += frac * (ref_rate >> 6) / ref_div;
cpu_pll /= (1 << out_div);
pll = __raw_readl(pll_base + QCA953X_PLL_DDR_CONFIG_REG);
out_div = (pll >> QCA953X_PLL_DDR_CONFIG_OUTDIV_SHIFT) &
QCA953X_PLL_DDR_CONFIG_OUTDIV_MASK;
ref_div = (pll >> QCA953X_PLL_DDR_CONFIG_REFDIV_SHIFT) &
QCA953X_PLL_DDR_CONFIG_REFDIV_MASK;
nint = (pll >> QCA953X_PLL_DDR_CONFIG_NINT_SHIFT) &
QCA953X_PLL_DDR_CONFIG_NINT_MASK;
frac = (pll >> QCA953X_PLL_DDR_CONFIG_NFRAC_SHIFT) &
QCA953X_PLL_DDR_CONFIG_NFRAC_MASK;
ddr_pll = nint * ref_rate / ref_div;
ddr_pll += frac * (ref_rate >> 6) / (ref_div << 4);
ddr_pll /= (1 << out_div);
clk_ctrl = __raw_readl(pll_base + QCA953X_PLL_CLK_CTRL_REG);
postdiv = (clk_ctrl >> QCA953X_PLL_CLK_CTRL_CPU_POST_DIV_SHIFT) &
QCA953X_PLL_CLK_CTRL_CPU_POST_DIV_MASK;
if (clk_ctrl & QCA953X_PLL_CLK_CTRL_CPU_PLL_BYPASS)
cpu_rate = ref_rate;
else if (clk_ctrl & QCA953X_PLL_CLK_CTRL_CPUCLK_FROM_CPUPLL)
cpu_rate = cpu_pll / (postdiv + 1);
else
cpu_rate = ddr_pll / (postdiv + 1);
postdiv = (clk_ctrl >> QCA953X_PLL_CLK_CTRL_DDR_POST_DIV_SHIFT) &
QCA953X_PLL_CLK_CTRL_DDR_POST_DIV_MASK;
if (clk_ctrl & QCA953X_PLL_CLK_CTRL_DDR_PLL_BYPASS)
ddr_rate = ref_rate;
else if (clk_ctrl & QCA953X_PLL_CLK_CTRL_DDRCLK_FROM_DDRPLL)
ddr_rate = ddr_pll / (postdiv + 1);
else
ddr_rate = cpu_pll / (postdiv + 1);
postdiv = (clk_ctrl >> QCA953X_PLL_CLK_CTRL_AHB_POST_DIV_SHIFT) &
QCA953X_PLL_CLK_CTRL_AHB_POST_DIV_MASK;
if (clk_ctrl & QCA953X_PLL_CLK_CTRL_AHB_PLL_BYPASS)
ahb_rate = ref_rate;
else if (clk_ctrl & QCA953X_PLL_CLK_CTRL_AHBCLK_FROM_DDRPLL)
ahb_rate = ddr_pll / (postdiv + 1);
else
ahb_rate = cpu_pll / (postdiv + 1);
ath79_set_clk(ATH79_CLK_CPU, cpu_rate);
ath79_set_clk(ATH79_CLK_DDR, ddr_rate);
ath79_set_clk(ATH79_CLK_AHB, ahb_rate);
}
static void __init qca955x_clocks_init(void __iomem *pll_base)
{
unsigned long ref_rate;
unsigned long cpu_rate;
unsigned long ddr_rate;
unsigned long ahb_rate;
u32 pll, out_div, ref_div, nint, frac, clk_ctrl, postdiv;
u32 cpu_pll, ddr_pll;
u32 bootstrap;
bootstrap = ath79_reset_rr(QCA955X_RESET_REG_BOOTSTRAP);
if (bootstrap & QCA955X_BOOTSTRAP_REF_CLK_40)
ref_rate = 40 * 1000 * 1000;
else
ref_rate = 25 * 1000 * 1000;
ref_rate = ath79_setup_ref_clk(ref_rate);
pll = __raw_readl(pll_base + QCA955X_PLL_CPU_CONFIG_REG);
out_div = (pll >> QCA955X_PLL_CPU_CONFIG_OUTDIV_SHIFT) &
QCA955X_PLL_CPU_CONFIG_OUTDIV_MASK;
ref_div = (pll >> QCA955X_PLL_CPU_CONFIG_REFDIV_SHIFT) &
QCA955X_PLL_CPU_CONFIG_REFDIV_MASK;
nint = (pll >> QCA955X_PLL_CPU_CONFIG_NINT_SHIFT) &
QCA955X_PLL_CPU_CONFIG_NINT_MASK;
frac = (pll >> QCA955X_PLL_CPU_CONFIG_NFRAC_SHIFT) &
QCA955X_PLL_CPU_CONFIG_NFRAC_MASK;
cpu_pll = nint * ref_rate / ref_div;
cpu_pll += frac * ref_rate / (ref_div * (1 << 6));
cpu_pll /= (1 << out_div);
pll = __raw_readl(pll_base + QCA955X_PLL_DDR_CONFIG_REG);
out_div = (pll >> QCA955X_PLL_DDR_CONFIG_OUTDIV_SHIFT) &
QCA955X_PLL_DDR_CONFIG_OUTDIV_MASK;
ref_div = (pll >> QCA955X_PLL_DDR_CONFIG_REFDIV_SHIFT) &
QCA955X_PLL_DDR_CONFIG_REFDIV_MASK;
nint = (pll >> QCA955X_PLL_DDR_CONFIG_NINT_SHIFT) &
QCA955X_PLL_DDR_CONFIG_NINT_MASK;
frac = (pll >> QCA955X_PLL_DDR_CONFIG_NFRAC_SHIFT) &
QCA955X_PLL_DDR_CONFIG_NFRAC_MASK;
ddr_pll = nint * ref_rate / ref_div;
ddr_pll += frac * ref_rate / (ref_div * (1 << 10));
ddr_pll /= (1 << out_div);
clk_ctrl = __raw_readl(pll_base + QCA955X_PLL_CLK_CTRL_REG);
postdiv = (clk_ctrl >> QCA955X_PLL_CLK_CTRL_CPU_POST_DIV_SHIFT) &
QCA955X_PLL_CLK_CTRL_CPU_POST_DIV_MASK;
if (clk_ctrl & QCA955X_PLL_CLK_CTRL_CPU_PLL_BYPASS)
cpu_rate = ref_rate;
else if (clk_ctrl & QCA955X_PLL_CLK_CTRL_CPUCLK_FROM_CPUPLL)
cpu_rate = ddr_pll / (postdiv + 1);
else
cpu_rate = cpu_pll / (postdiv + 1);
postdiv = (clk_ctrl >> QCA955X_PLL_CLK_CTRL_DDR_POST_DIV_SHIFT) &
QCA955X_PLL_CLK_CTRL_DDR_POST_DIV_MASK;
if (clk_ctrl & QCA955X_PLL_CLK_CTRL_DDR_PLL_BYPASS)
ddr_rate = ref_rate;
else if (clk_ctrl & QCA955X_PLL_CLK_CTRL_DDRCLK_FROM_DDRPLL)
ddr_rate = cpu_pll / (postdiv + 1);
else
ddr_rate = ddr_pll / (postdiv + 1);
postdiv = (clk_ctrl >> QCA955X_PLL_CLK_CTRL_AHB_POST_DIV_SHIFT) &
QCA955X_PLL_CLK_CTRL_AHB_POST_DIV_MASK;
if (clk_ctrl & QCA955X_PLL_CLK_CTRL_AHB_PLL_BYPASS)
ahb_rate = ref_rate;
else if (clk_ctrl & QCA955X_PLL_CLK_CTRL_AHBCLK_FROM_DDRPLL)
ahb_rate = ddr_pll / (postdiv + 1);
else
ahb_rate = cpu_pll / (postdiv + 1);
ath79_set_clk(ATH79_CLK_CPU, cpu_rate);
ath79_set_clk(ATH79_CLK_DDR, ddr_rate);
ath79_set_clk(ATH79_CLK_AHB, ahb_rate);
}
static void __init qca956x_clocks_init(void __iomem *pll_base)
{
unsigned long ref_rate;
unsigned long cpu_rate;
unsigned long ddr_rate;
unsigned long ahb_rate;
u32 pll, out_div, ref_div, nint, hfrac, lfrac, clk_ctrl, postdiv;
u32 cpu_pll, ddr_pll;
u32 bootstrap;
/*
* QCA956x timer init workaround has to be applied right before setting
* up the clock. Else, there will be no jiffies
*/
u32 misc;
misc = ath79_reset_rr(AR71XX_RESET_REG_MISC_INT_ENABLE);
misc |= MISC_INT_MIPS_SI_TIMERINT_MASK;
ath79_reset_wr(AR71XX_RESET_REG_MISC_INT_ENABLE, misc);
bootstrap = ath79_reset_rr(QCA956X_RESET_REG_BOOTSTRAP);
if (bootstrap & QCA956X_BOOTSTRAP_REF_CLK_40)
ref_rate = 40 * 1000 * 1000;
else
ref_rate = 25 * 1000 * 1000;
ref_rate = ath79_setup_ref_clk(ref_rate);
pll = __raw_readl(pll_base + QCA956X_PLL_CPU_CONFIG_REG);
out_div = (pll >> QCA956X_PLL_CPU_CONFIG_OUTDIV_SHIFT) &
QCA956X_PLL_CPU_CONFIG_OUTDIV_MASK;
ref_div = (pll >> QCA956X_PLL_CPU_CONFIG_REFDIV_SHIFT) &
QCA956X_PLL_CPU_CONFIG_REFDIV_MASK;
pll = __raw_readl(pll_base + QCA956X_PLL_CPU_CONFIG1_REG);
nint = (pll >> QCA956X_PLL_CPU_CONFIG1_NINT_SHIFT) &
QCA956X_PLL_CPU_CONFIG1_NINT_MASK;
hfrac = (pll >> QCA956X_PLL_CPU_CONFIG1_NFRAC_H_SHIFT) &
QCA956X_PLL_CPU_CONFIG1_NFRAC_H_MASK;
lfrac = (pll >> QCA956X_PLL_CPU_CONFIG1_NFRAC_L_SHIFT) &
QCA956X_PLL_CPU_CONFIG1_NFRAC_L_MASK;
cpu_pll = nint * ref_rate / ref_div;
cpu_pll += (lfrac * ref_rate) / ((ref_div * 25) << 13);
cpu_pll += (hfrac >> 13) * ref_rate / ref_div;
cpu_pll /= (1 << out_div);
pll = __raw_readl(pll_base + QCA956X_PLL_DDR_CONFIG_REG);
out_div = (pll >> QCA956X_PLL_DDR_CONFIG_OUTDIV_SHIFT) &
QCA956X_PLL_DDR_CONFIG_OUTDIV_MASK;
ref_div = (pll >> QCA956X_PLL_DDR_CONFIG_REFDIV_SHIFT) &
QCA956X_PLL_DDR_CONFIG_REFDIV_MASK;
pll = __raw_readl(pll_base + QCA956X_PLL_DDR_CONFIG1_REG);
nint = (pll >> QCA956X_PLL_DDR_CONFIG1_NINT_SHIFT) &
QCA956X_PLL_DDR_CONFIG1_NINT_MASK;
hfrac = (pll >> QCA956X_PLL_DDR_CONFIG1_NFRAC_H_SHIFT) &
QCA956X_PLL_DDR_CONFIG1_NFRAC_H_MASK;
lfrac = (pll >> QCA956X_PLL_DDR_CONFIG1_NFRAC_L_SHIFT) &
QCA956X_PLL_DDR_CONFIG1_NFRAC_L_MASK;
ddr_pll = nint * ref_rate / ref_div;
ddr_pll += (lfrac * ref_rate) / ((ref_div * 25) << 13);
ddr_pll += (hfrac >> 13) * ref_rate / ref_div;
ddr_pll /= (1 << out_div);
clk_ctrl = __raw_readl(pll_base + QCA956X_PLL_CLK_CTRL_REG);
postdiv = (clk_ctrl >> QCA956X_PLL_CLK_CTRL_CPU_POST_DIV_SHIFT) &
QCA956X_PLL_CLK_CTRL_CPU_POST_DIV_MASK;
if (clk_ctrl & QCA956X_PLL_CLK_CTRL_CPU_PLL_BYPASS)
cpu_rate = ref_rate;
else if (clk_ctrl & QCA956X_PLL_CLK_CTRL_CPU_DDRCLK_FROM_CPUPLL)
cpu_rate = ddr_pll / (postdiv + 1);
else
cpu_rate = cpu_pll / (postdiv + 1);
postdiv = (clk_ctrl >> QCA956X_PLL_CLK_CTRL_DDR_POST_DIV_SHIFT) &
QCA956X_PLL_CLK_CTRL_DDR_POST_DIV_MASK;
if (clk_ctrl & QCA956X_PLL_CLK_CTRL_DDR_PLL_BYPASS)
ddr_rate = ref_rate;
else if (clk_ctrl & QCA956X_PLL_CLK_CTRL_CPU_DDRCLK_FROM_DDRPLL)
ddr_rate = cpu_pll / (postdiv + 1);
else
ddr_rate = ddr_pll / (postdiv + 1);
postdiv = (clk_ctrl >> QCA956X_PLL_CLK_CTRL_AHB_POST_DIV_SHIFT) &
QCA956X_PLL_CLK_CTRL_AHB_POST_DIV_MASK;
if (clk_ctrl & QCA956X_PLL_CLK_CTRL_AHB_PLL_BYPASS)
ahb_rate = ref_rate;
else if (clk_ctrl & QCA956X_PLL_CLK_CTRL_AHBCLK_FROM_DDRPLL)
ahb_rate = ddr_pll / (postdiv + 1);
else
ahb_rate = cpu_pll / (postdiv + 1);
ath79_set_clk(ATH79_CLK_CPU, cpu_rate);
ath79_set_clk(ATH79_CLK_DDR, ddr_rate);
ath79_set_clk(ATH79_CLK_AHB, ahb_rate);
}
static void __init ath79_clocks_init_dt(struct device_node *np)
{
struct clk *ref_clk;
void __iomem *pll_base;
ref_clk = of_clk_get(np, 0);
if (!IS_ERR(ref_clk))
clks[ATH79_CLK_REF] = ref_clk;
pll_base = of_iomap(np, 0);
if (!pll_base) {
pr_err("%pOF: can't map pll registers\n", np);
goto err_clk;
}
if (of_device_is_compatible(np, "qca,ar7100-pll"))
ar71xx_clocks_init(pll_base);
else if (of_device_is_compatible(np, "qca,ar7240-pll") ||
of_device_is_compatible(np, "qca,ar9130-pll"))
ar724x_clocks_init(pll_base);
else if (of_device_is_compatible(np, "qca,ar9330-pll"))
ar933x_clocks_init(pll_base);
else if (of_device_is_compatible(np, "qca,ar9340-pll"))
ar934x_clocks_init(pll_base);
else if (of_device_is_compatible(np, "qca,qca9530-pll"))
qca953x_clocks_init(pll_base);
else if (of_device_is_compatible(np, "qca,qca9550-pll"))
qca955x_clocks_init(pll_base);
else if (of_device_is_compatible(np, "qca,qca9560-pll"))
qca956x_clocks_init(pll_base);
if (!clks[ATH79_CLK_MDIO])
clks[ATH79_CLK_MDIO] = clks[ATH79_CLK_REF];
if (of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data)) {
pr_err("%pOF: could not register clk provider\n", np);
goto err_iounmap;
}
return;
err_iounmap:
iounmap(pll_base);
err_clk:
clk_put(ref_clk);
}
CLK_OF_DECLARE(ar7100_clk, "qca,ar7100-pll", ath79_clocks_init_dt);
CLK_OF_DECLARE(ar7240_clk, "qca,ar7240-pll", ath79_clocks_init_dt);
CLK_OF_DECLARE(ar9130_clk, "qca,ar9130-pll", ath79_clocks_init_dt);
CLK_OF_DECLARE(ar9330_clk, "qca,ar9330-pll", ath79_clocks_init_dt);
CLK_OF_DECLARE(ar9340_clk, "qca,ar9340-pll", ath79_clocks_init_dt);
CLK_OF_DECLARE(ar9530_clk, "qca,qca9530-pll", ath79_clocks_init_dt);
CLK_OF_DECLARE(ar9550_clk, "qca,qca9550-pll", ath79_clocks_init_dt);
CLK_OF_DECLARE(ar9560_clk, "qca,qca9560-pll", ath79_clocks_init_dt);
| linux-master | arch/mips/ath79/clock.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Atheros AR71XX/AR724X/AR913X specific setup
*
* Copyright (C) 2010-2011 Jaiganesh Narayanan <[email protected]>
* Copyright (C) 2008-2011 Gabor Juhos <[email protected]>
* Copyright (C) 2008 Imre Kaloz <[email protected]>
*
* Parts of this file are based on Atheros' 2.6.15/2.6.31 BSP
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/io.h>
#include <linux/memblock.h>
#include <linux/err.h>
#include <linux/clk.h>
#include <linux/of_clk.h>
#include <linux/of_fdt.h>
#include <linux/irqchip.h>
#include <asm/bootinfo.h>
#include <asm/idle.h>
#include <asm/time.h> /* for mips_hpt_frequency */
#include <asm/reboot.h> /* for _machine_{restart,halt} */
#include <asm/prom.h>
#include <asm/fw/fw.h>
#include <asm/mach-ath79/ath79.h>
#include <asm/mach-ath79/ar71xx_regs.h>
#include "common.h"
#define ATH79_SYS_TYPE_LEN 64
static char ath79_sys_type[ATH79_SYS_TYPE_LEN];
static void ath79_halt(void)
{
while (1)
cpu_wait();
}
static void __init ath79_detect_sys_type(void)
{
char *chip = "????";
u32 id;
u32 major;
u32 minor;
u32 rev = 0;
u32 ver = 1;
id = ath79_reset_rr(AR71XX_RESET_REG_REV_ID);
major = id & REV_ID_MAJOR_MASK;
switch (major) {
case REV_ID_MAJOR_AR71XX:
minor = id & AR71XX_REV_ID_MINOR_MASK;
rev = id >> AR71XX_REV_ID_REVISION_SHIFT;
rev &= AR71XX_REV_ID_REVISION_MASK;
switch (minor) {
case AR71XX_REV_ID_MINOR_AR7130:
ath79_soc = ATH79_SOC_AR7130;
chip = "7130";
break;
case AR71XX_REV_ID_MINOR_AR7141:
ath79_soc = ATH79_SOC_AR7141;
chip = "7141";
break;
case AR71XX_REV_ID_MINOR_AR7161:
ath79_soc = ATH79_SOC_AR7161;
chip = "7161";
break;
}
break;
case REV_ID_MAJOR_AR7240:
ath79_soc = ATH79_SOC_AR7240;
chip = "7240";
rev = id & AR724X_REV_ID_REVISION_MASK;
break;
case REV_ID_MAJOR_AR7241:
ath79_soc = ATH79_SOC_AR7241;
chip = "7241";
rev = id & AR724X_REV_ID_REVISION_MASK;
break;
case REV_ID_MAJOR_AR7242:
ath79_soc = ATH79_SOC_AR7242;
chip = "7242";
rev = id & AR724X_REV_ID_REVISION_MASK;
break;
case REV_ID_MAJOR_AR913X:
minor = id & AR913X_REV_ID_MINOR_MASK;
rev = id >> AR913X_REV_ID_REVISION_SHIFT;
rev &= AR913X_REV_ID_REVISION_MASK;
switch (minor) {
case AR913X_REV_ID_MINOR_AR9130:
ath79_soc = ATH79_SOC_AR9130;
chip = "9130";
break;
case AR913X_REV_ID_MINOR_AR9132:
ath79_soc = ATH79_SOC_AR9132;
chip = "9132";
break;
}
break;
case REV_ID_MAJOR_AR9330:
ath79_soc = ATH79_SOC_AR9330;
chip = "9330";
rev = id & AR933X_REV_ID_REVISION_MASK;
break;
case REV_ID_MAJOR_AR9331:
ath79_soc = ATH79_SOC_AR9331;
chip = "9331";
rev = id & AR933X_REV_ID_REVISION_MASK;
break;
case REV_ID_MAJOR_AR9341:
ath79_soc = ATH79_SOC_AR9341;
chip = "9341";
rev = id & AR934X_REV_ID_REVISION_MASK;
break;
case REV_ID_MAJOR_AR9342:
ath79_soc = ATH79_SOC_AR9342;
chip = "9342";
rev = id & AR934X_REV_ID_REVISION_MASK;
break;
case REV_ID_MAJOR_AR9344:
ath79_soc = ATH79_SOC_AR9344;
chip = "9344";
rev = id & AR934X_REV_ID_REVISION_MASK;
break;
case REV_ID_MAJOR_QCA9533_V2:
ver = 2;
ath79_soc_rev = 2;
fallthrough;
case REV_ID_MAJOR_QCA9533:
ath79_soc = ATH79_SOC_QCA9533;
chip = "9533";
rev = id & QCA953X_REV_ID_REVISION_MASK;
break;
case REV_ID_MAJOR_QCA9556:
ath79_soc = ATH79_SOC_QCA9556;
chip = "9556";
rev = id & QCA955X_REV_ID_REVISION_MASK;
break;
case REV_ID_MAJOR_QCA9558:
ath79_soc = ATH79_SOC_QCA9558;
chip = "9558";
rev = id & QCA955X_REV_ID_REVISION_MASK;
break;
case REV_ID_MAJOR_QCA956X:
ath79_soc = ATH79_SOC_QCA956X;
chip = "956X";
rev = id & QCA956X_REV_ID_REVISION_MASK;
break;
case REV_ID_MAJOR_QCN550X:
ath79_soc = ATH79_SOC_QCA956X;
chip = "550X";
rev = id & QCA956X_REV_ID_REVISION_MASK;
break;
case REV_ID_MAJOR_TP9343:
ath79_soc = ATH79_SOC_TP9343;
chip = "9343";
rev = id & QCA956X_REV_ID_REVISION_MASK;
break;
default:
panic("ath79: unknown SoC, id:0x%08x", id);
}
if (ver == 1)
ath79_soc_rev = rev;
if (soc_is_qca953x() || soc_is_qca955x() || soc_is_qca956x())
sprintf(ath79_sys_type, "Qualcomm Atheros QCA%s ver %u rev %u",
chip, ver, rev);
else if (soc_is_tp9343())
sprintf(ath79_sys_type, "Qualcomm Atheros TP%s rev %u",
chip, rev);
else
sprintf(ath79_sys_type, "Atheros AR%s rev %u", chip, rev);
pr_info("SoC: %s\n", ath79_sys_type);
}
const char *get_system_type(void)
{
return ath79_sys_type;
}
unsigned int get_c0_compare_int(void)
{
return CP0_LEGACY_COMPARE_IRQ;
}
void __init plat_mem_setup(void)
{
void *dtb;
set_io_port_base(KSEG1);
/* Get the position of the FDT passed by the bootloader */
dtb = (void *)fw_getenvl("fdt_start");
if (dtb == NULL)
dtb = get_fdt();
if (dtb)
__dt_setup_arch((void *)KSEG0ADDR(dtb));
ath79_reset_base = ioremap(AR71XX_RESET_BASE,
AR71XX_RESET_SIZE);
ath79_pll_base = ioremap(AR71XX_PLL_BASE,
AR71XX_PLL_SIZE);
ath79_detect_sys_type();
ath79_ddr_ctrl_init();
detect_memory_region(0, ATH79_MEM_SIZE_MIN, ATH79_MEM_SIZE_MAX);
_machine_halt = ath79_halt;
pm_power_off = ath79_halt;
}
void __init plat_time_init(void)
{
struct device_node *np;
struct clk *clk;
unsigned long cpu_clk_rate;
of_clk_init(NULL);
np = of_get_cpu_node(0, NULL);
if (!np) {
pr_err("Failed to get CPU node\n");
return;
}
clk = of_clk_get(np, 0);
if (IS_ERR(clk)) {
pr_err("Failed to get CPU clock: %ld\n", PTR_ERR(clk));
return;
}
cpu_clk_rate = clk_get_rate(clk);
pr_info("CPU clock: %lu.%03lu MHz\n",
cpu_clk_rate / 1000000, (cpu_clk_rate / 1000) % 1000);
mips_hpt_frequency = cpu_clk_rate / 2;
clk_put(clk);
}
void __init arch_init_irq(void)
{
irqchip_init();
}
| linux-master | arch/mips/ath79/setup.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Atheros AR71XX/AR724X/AR913X specific prom routines
*
* Copyright (C) 2015 Laurent Fasnacht <[email protected]>
* Copyright (C) 2008-2010 Gabor Juhos <[email protected]>
* Copyright (C) 2008 Imre Kaloz <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/io.h>
#include <linux/string.h>
#include <linux/initrd.h>
#include <asm/bootinfo.h>
#include <asm/addrspace.h>
#include <asm/fw/fw.h>
#include "common.h"
void __init prom_init(void)
{
fw_init_cmdline();
#ifdef CONFIG_BLK_DEV_INITRD
/* Read the initrd address from the firmware environment */
initrd_start = fw_getenvl("initrd_start");
if (initrd_start) {
initrd_start = KSEG0ADDR(initrd_start);
initrd_end = initrd_start + fw_getenvl("initrd_size");
}
#endif
}
| linux-master | arch/mips/ath79/prom.c |
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2010 Gabor Juhos <[email protected]>
*/
#include <linux/mm.h>
#include <linux/io.h>
#include <linux/serial_reg.h>
#include <asm/setup.h>
#include "devices.h"
#include "ar2315_regs.h"
#include "ar5312_regs.h"
static inline void prom_uart_wr(void __iomem *base, unsigned reg,
unsigned char ch)
{
__raw_writel(ch, base + 4 * reg);
}
static inline unsigned char prom_uart_rr(void __iomem *base, unsigned reg)
{
return __raw_readl(base + 4 * reg);
}
void prom_putchar(char ch)
{
static void __iomem *base;
if (unlikely(base == NULL)) {
if (is_ar2315())
base = (void __iomem *)(KSEG1ADDR(AR2315_UART0_BASE));
else
base = (void __iomem *)(KSEG1ADDR(AR5312_UART0_BASE));
}
while ((prom_uart_rr(base, UART_LSR) & UART_LSR_THRE) == 0)
;
prom_uart_wr(base, UART_TX, (unsigned char)ch);
while ((prom_uart_rr(base, UART_LSR) & UART_LSR_THRE) == 0)
;
}
| linux-master | arch/mips/ath25/early_printk.c |
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2003 Atheros Communications, Inc., All Rights Reserved.
* Copyright (C) 2006 FON Technology, SL.
* Copyright (C) 2006 Imre Kaloz <[email protected]>
* Copyright (C) 2006 Felix Fietkau <[email protected]>
* Copyright (C) 2012 Alexandros C. Couloumbis <[email protected]>
*/
/*
* Platform devices for Atheros AR2315 SoCs
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/bitops.h>
#include <linux/irqdomain.h>
#include <linux/interrupt.h>
#include <linux/memblock.h>
#include <linux/platform_device.h>
#include <linux/reboot.h>
#include <asm/bootinfo.h>
#include <asm/reboot.h>
#include <asm/time.h>
#include <ath25_platform.h>
#include "devices.h"
#include "ar2315.h"
#include "ar2315_regs.h"
static void __iomem *ar2315_rst_base;
static struct irq_domain *ar2315_misc_irq_domain;
static inline u32 ar2315_rst_reg_read(u32 reg)
{
return __raw_readl(ar2315_rst_base + reg);
}
static inline void ar2315_rst_reg_write(u32 reg, u32 val)
{
__raw_writel(val, ar2315_rst_base + reg);
}
static inline void ar2315_rst_reg_mask(u32 reg, u32 mask, u32 val)
{
u32 ret = ar2315_rst_reg_read(reg);
ret &= ~mask;
ret |= val;
ar2315_rst_reg_write(reg, ret);
}
static irqreturn_t ar2315_ahb_err_handler(int cpl, void *dev_id)
{
ar2315_rst_reg_write(AR2315_AHB_ERR0, AR2315_AHB_ERROR_DET);
ar2315_rst_reg_read(AR2315_AHB_ERR1);
pr_emerg("AHB fatal error\n");
machine_restart("AHB error"); /* Catastrophic failure */
return IRQ_HANDLED;
}
static void ar2315_misc_irq_handler(struct irq_desc *desc)
{
u32 pending = ar2315_rst_reg_read(AR2315_ISR) &
ar2315_rst_reg_read(AR2315_IMR);
unsigned nr;
int ret = 0;
if (pending) {
struct irq_domain *domain = irq_desc_get_handler_data(desc);
nr = __ffs(pending);
if (nr == AR2315_MISC_IRQ_GPIO)
ar2315_rst_reg_write(AR2315_ISR, AR2315_ISR_GPIO);
else if (nr == AR2315_MISC_IRQ_WATCHDOG)
ar2315_rst_reg_write(AR2315_ISR, AR2315_ISR_WD);
ret = generic_handle_domain_irq(domain, nr);
}
if (!pending || ret)
spurious_interrupt();
}
static void ar2315_misc_irq_unmask(struct irq_data *d)
{
ar2315_rst_reg_mask(AR2315_IMR, 0, BIT(d->hwirq));
}
static void ar2315_misc_irq_mask(struct irq_data *d)
{
ar2315_rst_reg_mask(AR2315_IMR, BIT(d->hwirq), 0);
}
static struct irq_chip ar2315_misc_irq_chip = {
.name = "ar2315-misc",
.irq_unmask = ar2315_misc_irq_unmask,
.irq_mask = ar2315_misc_irq_mask,
};
static int ar2315_misc_irq_map(struct irq_domain *d, unsigned irq,
irq_hw_number_t hw)
{
irq_set_chip_and_handler(irq, &ar2315_misc_irq_chip, handle_level_irq);
return 0;
}
static const struct irq_domain_ops ar2315_misc_irq_domain_ops = {
.map = ar2315_misc_irq_map,
};
/*
* Called when an interrupt is received, this function
* determines exactly which interrupt it was, and it
* invokes the appropriate handler.
*
* Implicitly, we also define interrupt priority by
* choosing which to dispatch first.
*/
static void ar2315_irq_dispatch(void)
{
u32 pending = read_c0_status() & read_c0_cause();
if (pending & CAUSEF_IP3)
do_IRQ(AR2315_IRQ_WLAN0);
#ifdef CONFIG_PCI_AR2315
else if (pending & CAUSEF_IP5)
do_IRQ(AR2315_IRQ_LCBUS_PCI);
#endif
else if (pending & CAUSEF_IP2)
do_IRQ(AR2315_IRQ_MISC);
else if (pending & CAUSEF_IP7)
do_IRQ(ATH25_IRQ_CPU_CLOCK);
else
spurious_interrupt();
}
void __init ar2315_arch_init_irq(void)
{
struct irq_domain *domain;
unsigned irq;
ath25_irq_dispatch = ar2315_irq_dispatch;
domain = irq_domain_add_linear(NULL, AR2315_MISC_IRQ_COUNT,
&ar2315_misc_irq_domain_ops, NULL);
if (!domain)
panic("Failed to add IRQ domain");
irq = irq_create_mapping(domain, AR2315_MISC_IRQ_AHB);
if (request_irq(irq, ar2315_ahb_err_handler, 0, "ar2315-ahb-error",
NULL))
pr_err("Failed to register ar2315-ahb-error interrupt\n");
irq_set_chained_handler_and_data(AR2315_IRQ_MISC,
ar2315_misc_irq_handler, domain);
ar2315_misc_irq_domain = domain;
}
void __init ar2315_init_devices(void)
{
/* Find board configuration */
ath25_find_config(AR2315_SPI_READ_BASE, AR2315_SPI_READ_SIZE);
ath25_add_wmac(0, AR2315_WLAN0_BASE, AR2315_IRQ_WLAN0);
}
static void ar2315_restart(char *command)
{
void (*mips_reset_vec)(void) = (void *)0xbfc00000;
local_irq_disable();
/* try reset the system via reset control */
ar2315_rst_reg_write(AR2315_COLD_RESET, AR2317_RESET_SYSTEM);
/* Cold reset does not work on the AR2315/6, use the GPIO reset bits
* a workaround. Give it some time to attempt a gpio based hardware
* reset (atheros reference design workaround) */
/* TODO: implement the GPIO reset workaround */
/* Some boards (e.g. Senao EOC-2610) don't implement the reset logic
* workaround. Attempt to jump to the mips reset location -
* the boot loader itself might be able to recover the system */
mips_reset_vec();
}
/*
* This table is indexed by bits 5..4 of the CLOCKCTL1 register
* to determine the predevisor value.
*/
static int clockctl1_predivide_table[4] __initdata = { 1, 2, 4, 5 };
static int pllc_divide_table[5] __initdata = { 2, 3, 4, 6, 3 };
static unsigned __init ar2315_sys_clk(u32 clock_ctl)
{
unsigned int pllc_ctrl, cpu_div;
unsigned int pllc_out, refdiv, fdiv, divby2;
unsigned int clk_div;
pllc_ctrl = ar2315_rst_reg_read(AR2315_PLLC_CTL);
refdiv = ATH25_REG_MS(pllc_ctrl, AR2315_PLLC_REF_DIV);
refdiv = clockctl1_predivide_table[refdiv];
fdiv = ATH25_REG_MS(pllc_ctrl, AR2315_PLLC_FDBACK_DIV);
divby2 = ATH25_REG_MS(pllc_ctrl, AR2315_PLLC_ADD_FDBACK_DIV) + 1;
pllc_out = (40000000 / refdiv) * (2 * divby2) * fdiv;
/* clkm input selected */
switch (clock_ctl & AR2315_CPUCLK_CLK_SEL_M) {
case 0:
case 1:
clk_div = ATH25_REG_MS(pllc_ctrl, AR2315_PLLC_CLKM_DIV);
clk_div = pllc_divide_table[clk_div];
break;
case 2:
clk_div = ATH25_REG_MS(pllc_ctrl, AR2315_PLLC_CLKC_DIV);
clk_div = pllc_divide_table[clk_div];
break;
default:
pllc_out = 40000000;
clk_div = 1;
break;
}
cpu_div = ATH25_REG_MS(clock_ctl, AR2315_CPUCLK_CLK_DIV);
cpu_div = cpu_div * 2 ?: 1;
return pllc_out / (clk_div * cpu_div);
}
static inline unsigned ar2315_cpu_frequency(void)
{
return ar2315_sys_clk(ar2315_rst_reg_read(AR2315_CPUCLK));
}
static inline unsigned ar2315_apb_frequency(void)
{
return ar2315_sys_clk(ar2315_rst_reg_read(AR2315_AMBACLK));
}
void __init ar2315_plat_time_init(void)
{
mips_hpt_frequency = ar2315_cpu_frequency() / 2;
}
void __init ar2315_plat_mem_setup(void)
{
void __iomem *sdram_base;
u32 memsize, memcfg;
u32 devid;
u32 config;
/* Detect memory size */
sdram_base = ioremap(AR2315_SDRAMCTL_BASE,
AR2315_SDRAMCTL_SIZE);
memcfg = __raw_readl(sdram_base + AR2315_MEM_CFG);
memsize = 1 + ATH25_REG_MS(memcfg, AR2315_MEM_CFG_DATA_WIDTH);
memsize <<= 1 + ATH25_REG_MS(memcfg, AR2315_MEM_CFG_COL_WIDTH);
memsize <<= 1 + ATH25_REG_MS(memcfg, AR2315_MEM_CFG_ROW_WIDTH);
memsize <<= 3;
memblock_add(0, memsize);
iounmap(sdram_base);
ar2315_rst_base = ioremap(AR2315_RST_BASE, AR2315_RST_SIZE);
/* Detect the hardware based on the device ID */
devid = ar2315_rst_reg_read(AR2315_SREV) & AR2315_REV_CHIP;
switch (devid) {
case 0x91: /* Need to check */
ath25_soc = ATH25_SOC_AR2318;
break;
case 0x90:
ath25_soc = ATH25_SOC_AR2317;
break;
case 0x87:
ath25_soc = ATH25_SOC_AR2316;
break;
case 0x86:
default:
ath25_soc = ATH25_SOC_AR2315;
break;
}
ath25_board.devid = devid;
/* Clear any lingering AHB errors */
config = read_c0_config();
write_c0_config(config & ~0x3);
ar2315_rst_reg_write(AR2315_AHB_ERR0, AR2315_AHB_ERROR_DET);
ar2315_rst_reg_read(AR2315_AHB_ERR1);
ar2315_rst_reg_write(AR2315_WDT_CTRL, AR2315_WDT_CTRL_IGNORE);
_machine_restart = ar2315_restart;
}
#ifdef CONFIG_PCI_AR2315
static struct resource ar2315_pci_res[] = {
{
.name = "ar2315-pci-ctrl",
.flags = IORESOURCE_MEM,
.start = AR2315_PCI_BASE,
.end = AR2315_PCI_BASE + AR2315_PCI_SIZE - 1,
},
{
.name = "ar2315-pci-ext",
.flags = IORESOURCE_MEM,
.start = AR2315_PCI_EXT_BASE,
.end = AR2315_PCI_EXT_BASE + AR2315_PCI_EXT_SIZE - 1,
},
{
.name = "ar2315-pci",
.flags = IORESOURCE_IRQ,
.start = AR2315_IRQ_LCBUS_PCI,
.end = AR2315_IRQ_LCBUS_PCI,
},
};
#endif
void __init ar2315_arch_init(void)
{
unsigned irq = irq_create_mapping(ar2315_misc_irq_domain,
AR2315_MISC_IRQ_UART0);
ath25_serial_setup(AR2315_UART0_BASE, irq, ar2315_apb_frequency());
#ifdef CONFIG_PCI_AR2315
if (ath25_soc == ATH25_SOC_AR2315) {
/* Reset PCI DMA logic */
ar2315_rst_reg_mask(AR2315_RESET, 0, AR2315_RESET_PCIDMA);
msleep(20);
ar2315_rst_reg_mask(AR2315_RESET, AR2315_RESET_PCIDMA, 0);
msleep(20);
/* Configure endians */
ar2315_rst_reg_mask(AR2315_ENDIAN_CTL, 0, AR2315_CONFIG_PCIAHB |
AR2315_CONFIG_PCIAHB_BRIDGE);
/* Configure as PCI host with DMA */
ar2315_rst_reg_write(AR2315_PCICLK, AR2315_PCICLK_PLLC_CLKM |
(AR2315_PCICLK_IN_FREQ_DIV_6 <<
AR2315_PCICLK_DIV_S));
ar2315_rst_reg_mask(AR2315_AHB_ARB_CTL, 0, AR2315_ARB_PCI);
ar2315_rst_reg_mask(AR2315_IF_CTL, AR2315_IF_PCI_CLK_MASK |
AR2315_IF_MASK, AR2315_IF_PCI |
AR2315_IF_PCI_HOST | AR2315_IF_PCI_INTR |
(AR2315_IF_PCI_CLK_OUTPUT_CLK <<
AR2315_IF_PCI_CLK_SHIFT));
platform_device_register_simple("ar2315-pci", -1,
ar2315_pci_res,
ARRAY_SIZE(ar2315_pci_res));
}
#endif
}
| linux-master | arch/mips/ath25/ar2315.c |
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2003 Atheros Communications, Inc., All Rights Reserved.
* Copyright (C) 2006 FON Technology, SL.
* Copyright (C) 2006 Imre Kaloz <[email protected]>
* Copyright (C) 2006-2009 Felix Fietkau <[email protected]>
* Copyright (C) 2012 Alexandros C. Couloumbis <[email protected]>
*/
/*
* Platform devices for Atheros AR5312 SoCs
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/bitops.h>
#include <linux/irqdomain.h>
#include <linux/interrupt.h>
#include <linux/memblock.h>
#include <linux/platform_device.h>
#include <linux/mtd/physmap.h>
#include <linux/reboot.h>
#include <asm/bootinfo.h>
#include <asm/reboot.h>
#include <asm/time.h>
#include <ath25_platform.h>
#include "devices.h"
#include "ar5312.h"
#include "ar5312_regs.h"
static void __iomem *ar5312_rst_base;
static struct irq_domain *ar5312_misc_irq_domain;
static inline u32 ar5312_rst_reg_read(u32 reg)
{
return __raw_readl(ar5312_rst_base + reg);
}
static inline void ar5312_rst_reg_write(u32 reg, u32 val)
{
__raw_writel(val, ar5312_rst_base + reg);
}
static inline void ar5312_rst_reg_mask(u32 reg, u32 mask, u32 val)
{
u32 ret = ar5312_rst_reg_read(reg);
ret &= ~mask;
ret |= val;
ar5312_rst_reg_write(reg, ret);
}
static irqreturn_t ar5312_ahb_err_handler(int cpl, void *dev_id)
{
u32 proc1 = ar5312_rst_reg_read(AR5312_PROC1);
u32 proc_addr = ar5312_rst_reg_read(AR5312_PROCADDR); /* clears error */
u32 dma1 = ar5312_rst_reg_read(AR5312_DMA1);
u32 dma_addr = ar5312_rst_reg_read(AR5312_DMAADDR); /* clears error */
pr_emerg("AHB interrupt: PROCADDR=0x%8.8x PROC1=0x%8.8x DMAADDR=0x%8.8x DMA1=0x%8.8x\n",
proc_addr, proc1, dma_addr, dma1);
machine_restart("AHB error"); /* Catastrophic failure */
return IRQ_HANDLED;
}
static void ar5312_misc_irq_handler(struct irq_desc *desc)
{
u32 pending = ar5312_rst_reg_read(AR5312_ISR) &
ar5312_rst_reg_read(AR5312_IMR);
unsigned nr;
int ret = 0;
if (pending) {
struct irq_domain *domain = irq_desc_get_handler_data(desc);
nr = __ffs(pending);
ret = generic_handle_domain_irq(domain, nr);
if (nr == AR5312_MISC_IRQ_TIMER)
ar5312_rst_reg_read(AR5312_TIMER);
}
if (!pending || ret)
spurious_interrupt();
}
/* Enable the specified AR5312_MISC_IRQ interrupt */
static void ar5312_misc_irq_unmask(struct irq_data *d)
{
ar5312_rst_reg_mask(AR5312_IMR, 0, BIT(d->hwirq));
}
/* Disable the specified AR5312_MISC_IRQ interrupt */
static void ar5312_misc_irq_mask(struct irq_data *d)
{
ar5312_rst_reg_mask(AR5312_IMR, BIT(d->hwirq), 0);
ar5312_rst_reg_read(AR5312_IMR); /* flush write buffer */
}
static struct irq_chip ar5312_misc_irq_chip = {
.name = "ar5312-misc",
.irq_unmask = ar5312_misc_irq_unmask,
.irq_mask = ar5312_misc_irq_mask,
};
static int ar5312_misc_irq_map(struct irq_domain *d, unsigned irq,
irq_hw_number_t hw)
{
irq_set_chip_and_handler(irq, &ar5312_misc_irq_chip, handle_level_irq);
return 0;
}
static const struct irq_domain_ops ar5312_misc_irq_domain_ops = {
.map = ar5312_misc_irq_map,
};
static void ar5312_irq_dispatch(void)
{
u32 pending = read_c0_status() & read_c0_cause();
if (pending & CAUSEF_IP2)
do_IRQ(AR5312_IRQ_WLAN0);
else if (pending & CAUSEF_IP5)
do_IRQ(AR5312_IRQ_WLAN1);
else if (pending & CAUSEF_IP6)
do_IRQ(AR5312_IRQ_MISC);
else if (pending & CAUSEF_IP7)
do_IRQ(ATH25_IRQ_CPU_CLOCK);
else
spurious_interrupt();
}
void __init ar5312_arch_init_irq(void)
{
struct irq_domain *domain;
unsigned irq;
ath25_irq_dispatch = ar5312_irq_dispatch;
domain = irq_domain_add_linear(NULL, AR5312_MISC_IRQ_COUNT,
&ar5312_misc_irq_domain_ops, NULL);
if (!domain)
panic("Failed to add IRQ domain");
irq = irq_create_mapping(domain, AR5312_MISC_IRQ_AHB_PROC);
if (request_irq(irq, ar5312_ahb_err_handler, 0, "ar5312-ahb-error",
NULL))
pr_err("Failed to register ar5312-ahb-error interrupt\n");
irq_set_chained_handler_and_data(AR5312_IRQ_MISC,
ar5312_misc_irq_handler, domain);
ar5312_misc_irq_domain = domain;
}
static struct physmap_flash_data ar5312_flash_data = {
.width = 2,
};
static struct resource ar5312_flash_resource = {
.start = AR5312_FLASH_BASE,
.end = AR5312_FLASH_BASE + AR5312_FLASH_SIZE - 1,
.flags = IORESOURCE_MEM,
};
static struct platform_device ar5312_physmap_flash = {
.name = "physmap-flash",
.id = 0,
.dev.platform_data = &ar5312_flash_data,
.resource = &ar5312_flash_resource,
.num_resources = 1,
};
static void __init ar5312_flash_init(void)
{
void __iomem *flashctl_base;
u32 ctl;
flashctl_base = ioremap(AR5312_FLASHCTL_BASE,
AR5312_FLASHCTL_SIZE);
ctl = __raw_readl(flashctl_base + AR5312_FLASHCTL0);
ctl &= AR5312_FLASHCTL_MW;
/* fixup flash width */
switch (ctl) {
case AR5312_FLASHCTL_MW16:
ar5312_flash_data.width = 2;
break;
case AR5312_FLASHCTL_MW8:
default:
ar5312_flash_data.width = 1;
break;
}
/*
* Configure flash bank 0.
* Assume 8M window size. Flash will be aliased if it's smaller
*/
ctl |= AR5312_FLASHCTL_E | AR5312_FLASHCTL_AC_8M | AR5312_FLASHCTL_RBLE;
ctl |= 0x01 << AR5312_FLASHCTL_IDCY_S;
ctl |= 0x07 << AR5312_FLASHCTL_WST1_S;
ctl |= 0x07 << AR5312_FLASHCTL_WST2_S;
__raw_writel(ctl, flashctl_base + AR5312_FLASHCTL0);
/* Disable other flash banks */
ctl = __raw_readl(flashctl_base + AR5312_FLASHCTL1);
ctl &= ~(AR5312_FLASHCTL_E | AR5312_FLASHCTL_AC);
__raw_writel(ctl, flashctl_base + AR5312_FLASHCTL1);
ctl = __raw_readl(flashctl_base + AR5312_FLASHCTL2);
ctl &= ~(AR5312_FLASHCTL_E | AR5312_FLASHCTL_AC);
__raw_writel(ctl, flashctl_base + AR5312_FLASHCTL2);
iounmap(flashctl_base);
}
void __init ar5312_init_devices(void)
{
struct ath25_boarddata *config;
ar5312_flash_init();
/* Locate board/radio config data */
ath25_find_config(AR5312_FLASH_BASE, AR5312_FLASH_SIZE);
config = ath25_board.config;
/* AR2313 has CPU minor rev. 10 */
if ((current_cpu_data.processor_id & 0xff) == 0x0a)
ath25_soc = ATH25_SOC_AR2313;
/* AR2312 shares the same Silicon ID as AR5312 */
else if (config->flags & BD_ISCASPER)
ath25_soc = ATH25_SOC_AR2312;
/* Everything else is probably AR5312 or compatible */
else
ath25_soc = ATH25_SOC_AR5312;
platform_device_register(&ar5312_physmap_flash);
switch (ath25_soc) {
case ATH25_SOC_AR5312:
if (!ath25_board.radio)
return;
if (!(config->flags & BD_WLAN0))
break;
ath25_add_wmac(0, AR5312_WLAN0_BASE, AR5312_IRQ_WLAN0);
break;
case ATH25_SOC_AR2312:
case ATH25_SOC_AR2313:
if (!ath25_board.radio)
return;
break;
default:
break;
}
if (config->flags & BD_WLAN1)
ath25_add_wmac(1, AR5312_WLAN1_BASE, AR5312_IRQ_WLAN1);
}
static void ar5312_restart(char *command)
{
/* reset the system */
local_irq_disable();
while (1)
ar5312_rst_reg_write(AR5312_RESET, AR5312_RESET_SYSTEM);
}
/*
* This table is indexed by bits 5..4 of the CLOCKCTL1 register
* to determine the predevisor value.
*/
static unsigned clockctl1_predivide_table[4] __initdata = { 1, 2, 4, 5 };
static unsigned __init ar5312_cpu_frequency(void)
{
u32 scratch, devid, clock_ctl1;
u32 predivide_mask, multiplier_mask, doubler_mask;
unsigned predivide_shift, multiplier_shift;
unsigned predivide_select, predivisor, multiplier;
/* Trust the bootrom's idea of cpu frequency. */
scratch = ar5312_rst_reg_read(AR5312_SCRATCH);
if (scratch)
return scratch;
devid = ar5312_rst_reg_read(AR5312_REV);
devid = (devid & AR5312_REV_MAJ) >> AR5312_REV_MAJ_S;
if (devid == AR5312_REV_MAJ_AR2313) {
predivide_mask = AR2313_CLOCKCTL1_PREDIVIDE_MASK;
predivide_shift = AR2313_CLOCKCTL1_PREDIVIDE_SHIFT;
multiplier_mask = AR2313_CLOCKCTL1_MULTIPLIER_MASK;
multiplier_shift = AR2313_CLOCKCTL1_MULTIPLIER_SHIFT;
doubler_mask = AR2313_CLOCKCTL1_DOUBLER_MASK;
} else { /* AR5312 and AR2312 */
predivide_mask = AR5312_CLOCKCTL1_PREDIVIDE_MASK;
predivide_shift = AR5312_CLOCKCTL1_PREDIVIDE_SHIFT;
multiplier_mask = AR5312_CLOCKCTL1_MULTIPLIER_MASK;
multiplier_shift = AR5312_CLOCKCTL1_MULTIPLIER_SHIFT;
doubler_mask = AR5312_CLOCKCTL1_DOUBLER_MASK;
}
/*
* Clocking is derived from a fixed 40MHz input clock.
*
* cpu_freq = input_clock * MULT (where MULT is PLL multiplier)
* sys_freq = cpu_freq / 4 (used for APB clock, serial,
* flash, Timer, Watchdog Timer)
*
* cnt_freq = cpu_freq / 2 (use for CPU count/compare)
*
* So, for example, with a PLL multiplier of 5, we have
*
* cpu_freq = 200MHz
* sys_freq = 50MHz
* cnt_freq = 100MHz
*
* We compute the CPU frequency, based on PLL settings.
*/
clock_ctl1 = ar5312_rst_reg_read(AR5312_CLOCKCTL1);
predivide_select = (clock_ctl1 & predivide_mask) >> predivide_shift;
predivisor = clockctl1_predivide_table[predivide_select];
multiplier = (clock_ctl1 & multiplier_mask) >> multiplier_shift;
if (clock_ctl1 & doubler_mask)
multiplier <<= 1;
return (40000000 / predivisor) * multiplier;
}
static inline unsigned ar5312_sys_frequency(void)
{
return ar5312_cpu_frequency() / 4;
}
void __init ar5312_plat_time_init(void)
{
mips_hpt_frequency = ar5312_cpu_frequency() / 2;
}
void __init ar5312_plat_mem_setup(void)
{
void __iomem *sdram_base;
u32 memsize, memcfg, bank0_ac, bank1_ac;
u32 devid;
/* Detect memory size */
sdram_base = ioremap(AR5312_SDRAMCTL_BASE,
AR5312_SDRAMCTL_SIZE);
memcfg = __raw_readl(sdram_base + AR5312_MEM_CFG1);
bank0_ac = ATH25_REG_MS(memcfg, AR5312_MEM_CFG1_AC0);
bank1_ac = ATH25_REG_MS(memcfg, AR5312_MEM_CFG1_AC1);
memsize = (bank0_ac ? (1 << (bank0_ac + 1)) : 0) +
(bank1_ac ? (1 << (bank1_ac + 1)) : 0);
memsize <<= 20;
memblock_add(0, memsize);
iounmap(sdram_base);
ar5312_rst_base = ioremap(AR5312_RST_BASE, AR5312_RST_SIZE);
devid = ar5312_rst_reg_read(AR5312_REV);
devid >>= AR5312_REV_WMAC_MIN_S;
devid &= AR5312_REV_CHIP;
ath25_board.devid = (u16)devid;
/* Clear any lingering AHB errors */
ar5312_rst_reg_read(AR5312_PROCADDR);
ar5312_rst_reg_read(AR5312_DMAADDR);
ar5312_rst_reg_write(AR5312_WDT_CTRL, AR5312_WDT_CTRL_IGNORE);
_machine_restart = ar5312_restart;
}
void __init ar5312_arch_init(void)
{
unsigned irq = irq_create_mapping(ar5312_misc_irq_domain,
AR5312_MISC_IRQ_UART0);
ath25_serial_setup(AR5312_UART0_BASE, irq, ar5312_sys_frequency());
}
| linux-master | arch/mips/ath25/ar5312.c |
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2003 Atheros Communications, Inc., All Rights Reserved.
* Copyright (C) 2006 FON Technology, SL.
* Copyright (C) 2006 Imre Kaloz <[email protected]>
* Copyright (C) 2006-2009 Felix Fietkau <[email protected]>
*/
#include <linux/init.h>
#include <linux/interrupt.h>
#include <asm/irq_cpu.h>
#include <asm/reboot.h>
#include <asm/bootinfo.h>
#include <asm/time.h>
#include <ath25_platform.h>
#include "devices.h"
#include "ar5312.h"
#include "ar2315.h"
void (*ath25_irq_dispatch)(void);
static inline bool check_radio_magic(const void __iomem *addr)
{
addr += 0x7a; /* offset for flash magic */
return (__raw_readb(addr) == 0x5a) && (__raw_readb(addr + 1) == 0xa5);
}
static inline bool check_notempty(const void __iomem *addr)
{
return __raw_readl(addr) != 0xffffffff;
}
static inline bool check_board_data(const void __iomem *addr, bool broken)
{
/* config magic found */
if (__raw_readl(addr) == ATH25_BD_MAGIC)
return true;
if (!broken)
return false;
/* broken board data detected, use radio data to find the
* offset, user will fix this */
if (check_radio_magic(addr + 0x1000))
return true;
if (check_radio_magic(addr + 0xf8))
return true;
return false;
}
static const void __iomem * __init find_board_config(const void __iomem *limit,
const bool broken)
{
const void __iomem *addr;
const void __iomem *begin = limit - 0x1000;
const void __iomem *end = limit - 0x30000;
for (addr = begin; addr >= end; addr -= 0x1000)
if (check_board_data(addr, broken))
return addr;
return NULL;
}
static const void __iomem * __init find_radio_config(const void __iomem *limit,
const void __iomem *bcfg)
{
const void __iomem *rcfg, *begin, *end;
/*
* Now find the start of Radio Configuration data, using heuristics:
* Search forward from Board Configuration data by 0x1000 bytes
* at a time until we find non-0xffffffff.
*/
begin = bcfg + 0x1000;
end = limit;
for (rcfg = begin; rcfg < end; rcfg += 0x1000)
if (check_notempty(rcfg) && check_radio_magic(rcfg))
return rcfg;
/* AR2316 relocates radio config to new location */
begin = bcfg + 0xf8;
end = limit - 0x1000 + 0xf8;
for (rcfg = begin; rcfg < end; rcfg += 0x1000)
if (check_notempty(rcfg) && check_radio_magic(rcfg))
return rcfg;
return NULL;
}
/*
* NB: Search region size could be larger than the actual flash size,
* but this shouldn't be a problem here, because the flash
* will simply be mapped multiple times.
*/
int __init ath25_find_config(phys_addr_t base, unsigned long size)
{
const void __iomem *flash_base, *flash_limit;
struct ath25_boarddata *config;
unsigned int rcfg_size;
int broken_boarddata = 0;
const void __iomem *bcfg, *rcfg;
u8 *board_data;
u8 *radio_data;
u8 *mac_addr;
u32 offset;
flash_base = ioremap(base, size);
flash_limit = flash_base + size;
ath25_board.config = NULL;
ath25_board.radio = NULL;
/* Copy the board and radio data to RAM, because accessing the mapped
* memory of the flash directly after booting is not safe */
/* Try to find valid board and radio data */
bcfg = find_board_config(flash_limit, false);
/* If that fails, try to at least find valid radio data */
if (!bcfg) {
bcfg = find_board_config(flash_limit, true);
broken_boarddata = 1;
}
if (!bcfg) {
pr_warn("WARNING: No board configuration data found!\n");
goto error;
}
board_data = kzalloc(BOARD_CONFIG_BUFSZ, GFP_KERNEL);
if (!board_data)
goto error;
ath25_board.config = (struct ath25_boarddata *)board_data;
memcpy_fromio(board_data, bcfg, 0x100);
if (broken_boarddata) {
pr_warn("WARNING: broken board data detected\n");
config = ath25_board.config;
if (is_zero_ether_addr(config->enet0_mac)) {
pr_info("Fixing up empty mac addresses\n");
config->reset_config_gpio = 0xffff;
config->sys_led_gpio = 0xffff;
eth_random_addr(config->wlan0_mac);
config->wlan0_mac[0] &= ~0x06;
eth_random_addr(config->enet0_mac);
eth_random_addr(config->enet1_mac);
}
}
/* Radio config starts 0x100 bytes after board config, regardless
* of what the physical layout on the flash chip looks like */
rcfg = find_radio_config(flash_limit, bcfg);
if (!rcfg) {
pr_warn("WARNING: Could not find Radio Configuration data\n");
goto error;
}
radio_data = board_data + 0x100 + ((rcfg - bcfg) & 0xfff);
ath25_board.radio = radio_data;
offset = radio_data - board_data;
pr_info("Radio config found at offset 0x%x (0x%x)\n", rcfg - bcfg,
offset);
rcfg_size = BOARD_CONFIG_BUFSZ - offset;
memcpy_fromio(radio_data, rcfg, rcfg_size);
mac_addr = &radio_data[0x1d * 2];
if (is_broadcast_ether_addr(mac_addr)) {
pr_info("Radio MAC is blank; using board-data\n");
ether_addr_copy(mac_addr, ath25_board.config->wlan0_mac);
}
iounmap(flash_base);
return 0;
error:
iounmap(flash_base);
return -ENODEV;
}
static void ath25_halt(void)
{
local_irq_disable();
unreachable();
}
void __init plat_mem_setup(void)
{
_machine_halt = ath25_halt;
pm_power_off = ath25_halt;
if (is_ar5312())
ar5312_plat_mem_setup();
else
ar2315_plat_mem_setup();
/* Disable data watchpoints */
write_c0_watchlo0(0);
}
asmlinkage void plat_irq_dispatch(void)
{
ath25_irq_dispatch();
}
void __init plat_time_init(void)
{
if (is_ar5312())
ar5312_plat_time_init();
else
ar2315_plat_time_init();
}
unsigned int get_c0_compare_int(void)
{
return CP0_LEGACY_COMPARE_IRQ;
}
void __init arch_init_irq(void)
{
clear_c0_status(ST0_IM);
mips_cpu_irq_init();
/* Initialize interrupt controllers */
if (is_ar5312())
ar5312_arch_init_irq();
else
ar2315_arch_init_irq();
}
| linux-master | arch/mips/ath25/board.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/serial_8250.h>
#include <linux/platform_device.h>
#include <asm/bootinfo.h>
#include <ath25_platform.h>
#include "devices.h"
#include "ar5312.h"
#include "ar2315.h"
struct ar231x_board_config ath25_board;
enum ath25_soc_type ath25_soc = ATH25_SOC_UNKNOWN;
static struct resource ath25_wmac0_res[] = {
{
.name = "wmac0_membase",
.flags = IORESOURCE_MEM,
},
{
.name = "wmac0_irq",
.flags = IORESOURCE_IRQ,
}
};
static struct resource ath25_wmac1_res[] = {
{
.name = "wmac1_membase",
.flags = IORESOURCE_MEM,
},
{
.name = "wmac1_irq",
.flags = IORESOURCE_IRQ,
}
};
static struct platform_device ath25_wmac[] = {
{
.id = 0,
.name = "ar231x-wmac",
.resource = ath25_wmac0_res,
.num_resources = ARRAY_SIZE(ath25_wmac0_res),
.dev.platform_data = &ath25_board,
},
{
.id = 1,
.name = "ar231x-wmac",
.resource = ath25_wmac1_res,
.num_resources = ARRAY_SIZE(ath25_wmac1_res),
.dev.platform_data = &ath25_board,
},
};
static const char * const soc_type_strings[] = {
[ATH25_SOC_AR5312] = "Atheros AR5312",
[ATH25_SOC_AR2312] = "Atheros AR2312",
[ATH25_SOC_AR2313] = "Atheros AR2313",
[ATH25_SOC_AR2315] = "Atheros AR2315",
[ATH25_SOC_AR2316] = "Atheros AR2316",
[ATH25_SOC_AR2317] = "Atheros AR2317",
[ATH25_SOC_AR2318] = "Atheros AR2318",
[ATH25_SOC_UNKNOWN] = "Atheros (unknown)",
};
const char *get_system_type(void)
{
if ((ath25_soc >= ARRAY_SIZE(soc_type_strings)) ||
!soc_type_strings[ath25_soc])
return soc_type_strings[ATH25_SOC_UNKNOWN];
return soc_type_strings[ath25_soc];
}
void __init ath25_serial_setup(u32 mapbase, int irq, unsigned int uartclk)
{
#ifdef CONFIG_SERIAL_8250_CONSOLE
struct uart_port s;
memset(&s, 0, sizeof(s));
s.flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST | UPF_IOREMAP;
s.iotype = UPIO_MEM32;
s.irq = irq;
s.regshift = 2;
s.mapbase = mapbase;
s.uartclk = uartclk;
early_serial_setup(&s);
#endif /* CONFIG_SERIAL_8250_CONSOLE */
}
int __init ath25_add_wmac(int nr, u32 base, int irq)
{
struct resource *res;
ath25_wmac[nr].dev.platform_data = &ath25_board;
res = &ath25_wmac[nr].resource[0];
res->start = base;
res->end = base + 0x10000 - 1;
res++;
res->start = irq;
res->end = irq;
return platform_device_register(&ath25_wmac[nr]);
}
static int __init ath25_register_devices(void)
{
if (is_ar5312())
ar5312_init_devices();
else
ar2315_init_devices();
return 0;
}
device_initcall(ath25_register_devices);
static int __init ath25_arch_init(void)
{
if (is_ar5312())
ar5312_arch_init();
else
ar2315_arch_init();
return 0;
}
arch_initcall(ath25_arch_init);
| linux-master | arch/mips/ath25/devices.c |
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright MontaVista Software Inc
* Copyright (C) 2003 Atheros Communications, Inc., All Rights Reserved.
* Copyright (C) 2006 FON Technology, SL.
* Copyright (C) 2006 Imre Kaloz <[email protected]>
* Copyright (C) 2006 Felix Fietkau <[email protected]>
*/
/*
* Prom setup file for AR5312/AR231x SoCs
*/
#include <linux/init.h>
#include <asm/bootinfo.h>
void __init prom_init(void)
{
}
| linux-master | arch/mips/ath25/prom.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
*
* Copyright (C) 2010 John Crispin <[email protected]>
*/
#include <linux/cpu.h>
#include <lantiq_soc.h>
#include <asm/setup.h>
#define ASC_BUF 1024
#define LTQ_ASC_FSTAT ((u32 *)(LTQ_EARLY_ASC + 0x0048))
#ifdef __BIG_ENDIAN
#define LTQ_ASC_TBUF ((u32 *)(LTQ_EARLY_ASC + 0x0020 + 3))
#else
#define LTQ_ASC_TBUF ((u32 *)(LTQ_EARLY_ASC + 0x0020))
#endif
#define TXMASK 0x3F00
#define TXOFFSET 8
void prom_putchar(char c)
{
unsigned long flags;
local_irq_save(flags);
do { } while ((ltq_r32(LTQ_ASC_FSTAT) & TXMASK) >> TXOFFSET);
if (c == '\n')
ltq_w8('\r', LTQ_ASC_TBUF);
ltq_w8(c, LTQ_ASC_TBUF);
local_irq_restore(flags);
}
| linux-master | arch/mips/lantiq/early_printk.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
*
* Copyright (C) 2010 John Crispin <[email protected]>
* Copyright (C) 2010 Thomas Langer <[email protected]>
*/
#include <linux/interrupt.h>
#include <linux/ioport.h>
#include <linux/sched.h>
#include <linux/irqchip.h>
#include <linux/irqdomain.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <asm/bootinfo.h>
#include <asm/irq_cpu.h>
#include <lantiq_soc.h>
#include <irq.h>
/* register definitions - internal irqs */
#define LTQ_ICU_ISR 0x0000
#define LTQ_ICU_IER 0x0008
#define LTQ_ICU_IOSR 0x0010
#define LTQ_ICU_IRSR 0x0018
#define LTQ_ICU_IMR 0x0020
#define LTQ_ICU_IM_SIZE 0x28
/* register definitions - external irqs */
#define LTQ_EIU_EXIN_C 0x0000
#define LTQ_EIU_EXIN_INIC 0x0004
#define LTQ_EIU_EXIN_INC 0x0008
#define LTQ_EIU_EXIN_INEN 0x000C
/* number of external interrupts */
#define MAX_EIU 6
/* the performance counter */
#define LTQ_PERF_IRQ (INT_NUM_IM4_IRL0 + 31)
/*
* irqs generated by devices attached to the EBU need to be acked in
* a special manner
*/
#define LTQ_ICU_EBU_IRQ 22
#define ltq_icu_w32(vpe, m, x, y) \
ltq_w32((x), ltq_icu_membase[vpe] + m*LTQ_ICU_IM_SIZE + (y))
#define ltq_icu_r32(vpe, m, x) \
ltq_r32(ltq_icu_membase[vpe] + m*LTQ_ICU_IM_SIZE + (x))
#define ltq_eiu_w32(x, y) ltq_w32((x), ltq_eiu_membase + (y))
#define ltq_eiu_r32(x) ltq_r32(ltq_eiu_membase + (x))
/* we have a cascade of 8 irqs */
#define MIPS_CPU_IRQ_CASCADE 8
static int exin_avail;
static u32 ltq_eiu_irq[MAX_EIU];
static void __iomem *ltq_icu_membase[NR_CPUS];
static void __iomem *ltq_eiu_membase;
static struct irq_domain *ltq_domain;
static DEFINE_SPINLOCK(ltq_eiu_lock);
static DEFINE_RAW_SPINLOCK(ltq_icu_lock);
static int ltq_perfcount_irq;
int ltq_eiu_get_irq(int exin)
{
if (exin < exin_avail)
return ltq_eiu_irq[exin];
return -1;
}
void ltq_disable_irq(struct irq_data *d)
{
unsigned long offset = d->hwirq - MIPS_CPU_IRQ_CASCADE;
unsigned long im = offset / INT_NUM_IM_OFFSET;
unsigned long flags;
int vpe;
offset %= INT_NUM_IM_OFFSET;
raw_spin_lock_irqsave(<q_icu_lock, flags);
for_each_present_cpu(vpe) {
ltq_icu_w32(vpe, im,
ltq_icu_r32(vpe, im, LTQ_ICU_IER) & ~BIT(offset),
LTQ_ICU_IER);
}
raw_spin_unlock_irqrestore(<q_icu_lock, flags);
}
void ltq_mask_and_ack_irq(struct irq_data *d)
{
unsigned long offset = d->hwirq - MIPS_CPU_IRQ_CASCADE;
unsigned long im = offset / INT_NUM_IM_OFFSET;
unsigned long flags;
int vpe;
offset %= INT_NUM_IM_OFFSET;
raw_spin_lock_irqsave(<q_icu_lock, flags);
for_each_present_cpu(vpe) {
ltq_icu_w32(vpe, im,
ltq_icu_r32(vpe, im, LTQ_ICU_IER) & ~BIT(offset),
LTQ_ICU_IER);
ltq_icu_w32(vpe, im, BIT(offset), LTQ_ICU_ISR);
}
raw_spin_unlock_irqrestore(<q_icu_lock, flags);
}
static void ltq_ack_irq(struct irq_data *d)
{
unsigned long offset = d->hwirq - MIPS_CPU_IRQ_CASCADE;
unsigned long im = offset / INT_NUM_IM_OFFSET;
unsigned long flags;
int vpe;
offset %= INT_NUM_IM_OFFSET;
raw_spin_lock_irqsave(<q_icu_lock, flags);
for_each_present_cpu(vpe) {
ltq_icu_w32(vpe, im, BIT(offset), LTQ_ICU_ISR);
}
raw_spin_unlock_irqrestore(<q_icu_lock, flags);
}
void ltq_enable_irq(struct irq_data *d)
{
unsigned long offset = d->hwirq - MIPS_CPU_IRQ_CASCADE;
unsigned long im = offset / INT_NUM_IM_OFFSET;
unsigned long flags;
int vpe;
offset %= INT_NUM_IM_OFFSET;
vpe = cpumask_first(irq_data_get_effective_affinity_mask(d));
/* This shouldn't be even possible, maybe during CPU hotplug spam */
if (unlikely(vpe >= nr_cpu_ids))
vpe = smp_processor_id();
raw_spin_lock_irqsave(<q_icu_lock, flags);
ltq_icu_w32(vpe, im, ltq_icu_r32(vpe, im, LTQ_ICU_IER) | BIT(offset),
LTQ_ICU_IER);
raw_spin_unlock_irqrestore(<q_icu_lock, flags);
}
static int ltq_eiu_settype(struct irq_data *d, unsigned int type)
{
int i;
unsigned long flags;
for (i = 0; i < exin_avail; i++) {
if (d->hwirq == ltq_eiu_irq[i]) {
int val = 0;
int edge = 0;
switch (type) {
case IRQF_TRIGGER_NONE:
break;
case IRQF_TRIGGER_RISING:
val = 1;
edge = 1;
break;
case IRQF_TRIGGER_FALLING:
val = 2;
edge = 1;
break;
case IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING:
val = 3;
edge = 1;
break;
case IRQF_TRIGGER_HIGH:
val = 5;
break;
case IRQF_TRIGGER_LOW:
val = 6;
break;
default:
pr_err("invalid type %d for irq %ld\n",
type, d->hwirq);
return -EINVAL;
}
if (edge)
irq_set_handler(d->hwirq, handle_edge_irq);
spin_lock_irqsave(<q_eiu_lock, flags);
ltq_eiu_w32((ltq_eiu_r32(LTQ_EIU_EXIN_C) &
(~(7 << (i * 4)))) | (val << (i * 4)),
LTQ_EIU_EXIN_C);
spin_unlock_irqrestore(<q_eiu_lock, flags);
}
}
return 0;
}
static unsigned int ltq_startup_eiu_irq(struct irq_data *d)
{
int i;
ltq_enable_irq(d);
for (i = 0; i < exin_avail; i++) {
if (d->hwirq == ltq_eiu_irq[i]) {
/* by default we are low level triggered */
ltq_eiu_settype(d, IRQF_TRIGGER_LOW);
/* clear all pending */
ltq_eiu_w32(ltq_eiu_r32(LTQ_EIU_EXIN_INC) & ~BIT(i),
LTQ_EIU_EXIN_INC);
/* enable */
ltq_eiu_w32(ltq_eiu_r32(LTQ_EIU_EXIN_INEN) | BIT(i),
LTQ_EIU_EXIN_INEN);
break;
}
}
return 0;
}
static void ltq_shutdown_eiu_irq(struct irq_data *d)
{
int i;
ltq_disable_irq(d);
for (i = 0; i < exin_avail; i++) {
if (d->hwirq == ltq_eiu_irq[i]) {
/* disable */
ltq_eiu_w32(ltq_eiu_r32(LTQ_EIU_EXIN_INEN) & ~BIT(i),
LTQ_EIU_EXIN_INEN);
break;
}
}
}
#if defined(CONFIG_SMP)
static int ltq_icu_irq_set_affinity(struct irq_data *d,
const struct cpumask *cpumask, bool force)
{
struct cpumask tmask;
if (!cpumask_and(&tmask, cpumask, cpu_online_mask))
return -EINVAL;
irq_data_update_effective_affinity(d, &tmask);
return IRQ_SET_MASK_OK;
}
#endif
static struct irq_chip ltq_irq_type = {
.name = "icu",
.irq_enable = ltq_enable_irq,
.irq_disable = ltq_disable_irq,
.irq_unmask = ltq_enable_irq,
.irq_ack = ltq_ack_irq,
.irq_mask = ltq_disable_irq,
.irq_mask_ack = ltq_mask_and_ack_irq,
#if defined(CONFIG_SMP)
.irq_set_affinity = ltq_icu_irq_set_affinity,
#endif
};
static struct irq_chip ltq_eiu_type = {
.name = "eiu",
.irq_startup = ltq_startup_eiu_irq,
.irq_shutdown = ltq_shutdown_eiu_irq,
.irq_enable = ltq_enable_irq,
.irq_disable = ltq_disable_irq,
.irq_unmask = ltq_enable_irq,
.irq_ack = ltq_ack_irq,
.irq_mask = ltq_disable_irq,
.irq_mask_ack = ltq_mask_and_ack_irq,
.irq_set_type = ltq_eiu_settype,
#if defined(CONFIG_SMP)
.irq_set_affinity = ltq_icu_irq_set_affinity,
#endif
};
static void ltq_hw_irq_handler(struct irq_desc *desc)
{
unsigned int module = irq_desc_get_irq(desc) - 2;
u32 irq;
irq_hw_number_t hwirq;
int vpe = smp_processor_id();
irq = ltq_icu_r32(vpe, module, LTQ_ICU_IOSR);
if (irq == 0)
return;
/*
* silicon bug causes only the msb set to 1 to be valid. all
* other bits might be bogus
*/
irq = __fls(irq);
hwirq = irq + MIPS_CPU_IRQ_CASCADE + (INT_NUM_IM_OFFSET * module);
generic_handle_domain_irq(ltq_domain, hwirq);
/* if this is a EBU irq, we need to ack it or get a deadlock */
if (irq == LTQ_ICU_EBU_IRQ && !module && LTQ_EBU_PCC_ISTAT != 0)
ltq_ebu_w32(ltq_ebu_r32(LTQ_EBU_PCC_ISTAT) | 0x10,
LTQ_EBU_PCC_ISTAT);
}
static int icu_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw)
{
struct irq_chip *chip = <q_irq_type;
struct irq_data *data;
int i;
if (hw < MIPS_CPU_IRQ_CASCADE)
return 0;
for (i = 0; i < exin_avail; i++)
if (hw == ltq_eiu_irq[i])
chip = <q_eiu_type;
data = irq_get_irq_data(irq);
irq_data_update_effective_affinity(data, cpumask_of(0));
irq_set_chip_and_handler(irq, chip, handle_level_irq);
return 0;
}
static const struct irq_domain_ops irq_domain_ops = {
.xlate = irq_domain_xlate_onetwocell,
.map = icu_map,
};
int __init icu_of_init(struct device_node *node, struct device_node *parent)
{
struct device_node *eiu_node;
struct resource res;
int i, ret, vpe;
/* load register regions of available ICUs */
for_each_possible_cpu(vpe) {
if (of_address_to_resource(node, vpe, &res))
panic("Failed to get icu%i memory range", vpe);
if (!request_mem_region(res.start, resource_size(&res),
res.name))
pr_err("Failed to request icu%i memory\n", vpe);
ltq_icu_membase[vpe] = ioremap(res.start,
resource_size(&res));
if (!ltq_icu_membase[vpe])
panic("Failed to remap icu%i memory", vpe);
}
/* turn off all irqs by default */
for_each_possible_cpu(vpe) {
for (i = 0; i < MAX_IM; i++) {
/* make sure all irqs are turned off by default */
ltq_icu_w32(vpe, i, 0, LTQ_ICU_IER);
/* clear all possibly pending interrupts */
ltq_icu_w32(vpe, i, ~0, LTQ_ICU_ISR);
ltq_icu_w32(vpe, i, ~0, LTQ_ICU_IMR);
/* clear resend */
ltq_icu_w32(vpe, i, 0, LTQ_ICU_IRSR);
}
}
mips_cpu_irq_init();
for (i = 0; i < MAX_IM; i++)
irq_set_chained_handler(i + 2, ltq_hw_irq_handler);
ltq_domain = irq_domain_add_linear(node,
(MAX_IM * INT_NUM_IM_OFFSET) + MIPS_CPU_IRQ_CASCADE,
&irq_domain_ops, 0);
/* tell oprofile which irq to use */
ltq_perfcount_irq = irq_create_mapping(ltq_domain, LTQ_PERF_IRQ);
/* the external interrupts are optional and xway only */
eiu_node = of_find_compatible_node(NULL, NULL, "lantiq,eiu-xway");
if (eiu_node && !of_address_to_resource(eiu_node, 0, &res)) {
/* find out how many external irq sources we have */
exin_avail = of_property_count_u32_elems(eiu_node,
"lantiq,eiu-irqs");
if (exin_avail > MAX_EIU)
exin_avail = MAX_EIU;
ret = of_property_read_u32_array(eiu_node, "lantiq,eiu-irqs",
ltq_eiu_irq, exin_avail);
if (ret)
panic("failed to load external irq resources");
if (!request_mem_region(res.start, resource_size(&res),
res.name))
pr_err("Failed to request eiu memory");
ltq_eiu_membase = ioremap(res.start,
resource_size(&res));
if (!ltq_eiu_membase)
panic("Failed to remap eiu memory");
}
of_node_put(eiu_node);
return 0;
}
int get_c0_perfcount_int(void)
{
return ltq_perfcount_irq;
}
EXPORT_SYMBOL_GPL(get_c0_perfcount_int);
unsigned int get_c0_compare_int(void)
{
return CP0_LEGACY_COMPARE_IRQ;
}
IRQCHIP_DECLARE(lantiq_icu, "lantiq,icu", icu_of_init);
void __init arch_init_irq(void)
{
irqchip_init();
}
| linux-master | arch/mips/lantiq/irq.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
*
* Copyright (C) 2010 Thomas Langer <[email protected]>
* Copyright (C) 2010 John Crispin <[email protected]>
*/
#include <linux/io.h>
#include <linux/export.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/clk.h>
#include <linux/clkdev.h>
#include <linux/err.h>
#include <linux/list.h>
#include <asm/time.h>
#include <asm/irq.h>
#include <asm/div64.h>
#include <lantiq_soc.h>
#include "clk.h"
#include "prom.h"
/* lantiq socs have 3 static clocks */
static struct clk cpu_clk_generic[4];
void clkdev_add_static(unsigned long cpu, unsigned long fpi,
unsigned long io, unsigned long ppe)
{
cpu_clk_generic[0].rate = cpu;
cpu_clk_generic[1].rate = fpi;
cpu_clk_generic[2].rate = io;
cpu_clk_generic[3].rate = ppe;
}
struct clk *clk_get_cpu(void)
{
return &cpu_clk_generic[0];
}
struct clk *clk_get_fpi(void)
{
return &cpu_clk_generic[1];
}
EXPORT_SYMBOL_GPL(clk_get_fpi);
struct clk *clk_get_io(void)
{
return &cpu_clk_generic[2];
}
EXPORT_SYMBOL_GPL(clk_get_io);
struct clk *clk_get_ppe(void)
{
return &cpu_clk_generic[3];
}
EXPORT_SYMBOL_GPL(clk_get_ppe);
static inline int clk_good(struct clk *clk)
{
return clk && !IS_ERR(clk);
}
unsigned long clk_get_rate(struct clk *clk)
{
if (unlikely(!clk_good(clk)))
return 0;
if (clk->rate != 0)
return clk->rate;
if (clk->get_rate != NULL)
return clk->get_rate();
return 0;
}
EXPORT_SYMBOL(clk_get_rate);
int clk_set_rate(struct clk *clk, unsigned long rate)
{
if (unlikely(!clk_good(clk)))
return 0;
if (clk->rates && *clk->rates) {
unsigned long *r = clk->rates;
while (*r && (*r != rate))
r++;
if (!*r) {
pr_err("clk %s.%s: trying to set invalid rate %ld\n",
clk->cl.dev_id, clk->cl.con_id, rate);
return -1;
}
}
clk->rate = rate;
return 0;
}
EXPORT_SYMBOL(clk_set_rate);
long clk_round_rate(struct clk *clk, unsigned long rate)
{
if (unlikely(!clk_good(clk)))
return 0;
if (clk->rates && *clk->rates) {
unsigned long *r = clk->rates;
while (*r && (*r != rate))
r++;
if (!*r) {
return clk->rate;
}
}
return rate;
}
EXPORT_SYMBOL(clk_round_rate);
int clk_enable(struct clk *clk)
{
if (unlikely(!clk_good(clk)))
return -1;
if (clk->enable)
return clk->enable(clk);
return -1;
}
EXPORT_SYMBOL(clk_enable);
void clk_disable(struct clk *clk)
{
if (unlikely(!clk_good(clk)))
return;
if (clk->disable)
clk->disable(clk);
}
EXPORT_SYMBOL(clk_disable);
int clk_activate(struct clk *clk)
{
if (unlikely(!clk_good(clk)))
return -1;
if (clk->activate)
return clk->activate(clk);
return -1;
}
EXPORT_SYMBOL(clk_activate);
void clk_deactivate(struct clk *clk)
{
if (unlikely(!clk_good(clk)))
return;
if (clk->deactivate)
clk->deactivate(clk);
}
EXPORT_SYMBOL(clk_deactivate);
struct clk *clk_get_parent(struct clk *clk)
{
return NULL;
}
EXPORT_SYMBOL(clk_get_parent);
int clk_set_parent(struct clk *clk, struct clk *parent)
{
return 0;
}
EXPORT_SYMBOL(clk_set_parent);
static inline u32 get_counter_resolution(void)
{
u32 res;
__asm__ __volatile__(
".set push\n"
".set mips32r2\n"
"rdhwr %0, $3\n"
".set pop\n"
: "=&r" (res)
: /* no input */
: "memory");
return res;
}
void __init plat_time_init(void)
{
struct clk *clk;
ltq_soc_init();
clk = clk_get_cpu();
mips_hpt_frequency = clk_get_rate(clk) / get_counter_resolution();
write_c0_compare(read_c0_count());
pr_info("CPU Clock: %ldMHz\n", clk_get_rate(clk) / 1000000);
clk_put(clk);
}
| linux-master | arch/mips/lantiq/clk.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
*
* Copyright (C) 2010 John Crispin <[email protected]>
*/
#include <linux/export.h>
#include <linux/clk.h>
#include <linux/memblock.h>
#include <linux/of_fdt.h>
#include <asm/bootinfo.h>
#include <asm/time.h>
#include <asm/prom.h>
#include <lantiq.h>
#include "prom.h"
#include "clk.h"
/* access to the ebu needs to be locked between different drivers */
DEFINE_SPINLOCK(ebu_lock);
EXPORT_SYMBOL_GPL(ebu_lock);
/*
* this struct is filled by the soc specific detection code and holds
* information about the specific soc type, revision and name
*/
static struct ltq_soc_info soc_info;
/*
* These structs are used to override vsmp_init_secondary()
*/
#if defined(CONFIG_MIPS_MT_SMP)
extern const struct plat_smp_ops vsmp_smp_ops;
static struct plat_smp_ops lantiq_smp_ops;
#endif
const char *get_system_type(void)
{
return soc_info.sys_type;
}
int ltq_soc_type(void)
{
return soc_info.type;
}
static void __init prom_init_cmdline(void)
{
int argc = fw_arg0;
char **argv = (char **) KSEG1ADDR(fw_arg1);
int i;
arcs_cmdline[0] = '\0';
for (i = 0; i < argc; i++) {
char *p = (char *) KSEG1ADDR(argv[i]);
if (CPHYSADDR(p) && *p) {
strlcat(arcs_cmdline, p, sizeof(arcs_cmdline));
strlcat(arcs_cmdline, " ", sizeof(arcs_cmdline));
}
}
}
void __init plat_mem_setup(void)
{
void *dtb;
ioport_resource.start = IOPORT_RESOURCE_START;
ioport_resource.end = IOPORT_RESOURCE_END;
iomem_resource.start = IOMEM_RESOURCE_START;
iomem_resource.end = IOMEM_RESOURCE_END;
set_io_port_base((unsigned long) KSEG1);
dtb = get_fdt();
if (dtb == NULL)
panic("no dtb found");
/*
* Load the devicetree. This causes the chosen node to be
* parsed resulting in our memory appearing
*/
__dt_setup_arch(dtb);
}
#if defined(CONFIG_MIPS_MT_SMP)
static void lantiq_init_secondary(void)
{
/*
* MIPS CPU startup function vsmp_init_secondary() will only
* enable some of the interrupts for the second CPU/VPE.
*/
set_c0_status(ST0_IM);
}
#endif
void __init prom_init(void)
{
/* call the soc specific detetcion code and get it to fill soc_info */
ltq_soc_detect(&soc_info);
snprintf(soc_info.sys_type, LTQ_SYS_TYPE_LEN - 1, "%s rev %s",
soc_info.name, soc_info.rev_type);
soc_info.sys_type[LTQ_SYS_TYPE_LEN - 1] = '\0';
pr_info("SoC: %s\n", soc_info.sys_type);
prom_init_cmdline();
#if defined(CONFIG_MIPS_MT_SMP)
if (cpu_has_mipsmt) {
lantiq_smp_ops = vsmp_smp_ops;
lantiq_smp_ops.init_secondary = lantiq_init_secondary;
register_smp_ops(&lantiq_smp_ops);
}
#endif
}
| linux-master | arch/mips/lantiq/prom.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
*
* Copyright (C) 2011 John Crispin <[email protected]>
*/
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/io.h>
#include <linux/dma-mapping.h>
#include <linux/export.h>
#include <linux/spinlock.h>
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/of.h>
#include <lantiq_soc.h>
#include <xway_dma.h>
#define LTQ_DMA_ID 0x08
#define LTQ_DMA_CTRL 0x10
#define LTQ_DMA_CPOLL 0x14
#define LTQ_DMA_CS 0x18
#define LTQ_DMA_CCTRL 0x1C
#define LTQ_DMA_CDBA 0x20
#define LTQ_DMA_CDLEN 0x24
#define LTQ_DMA_CIS 0x28
#define LTQ_DMA_CIE 0x2C
#define LTQ_DMA_PS 0x40
#define LTQ_DMA_PCTRL 0x44
#define LTQ_DMA_IRNEN 0xf4
#define DMA_ID_CHNR GENMASK(26, 20) /* channel number */
#define DMA_DESCPT BIT(3) /* descriptor complete irq */
#define DMA_TX BIT(8) /* TX channel direction */
#define DMA_CHAN_ON BIT(0) /* channel on / off bit */
#define DMA_PDEN BIT(6) /* enable packet drop */
#define DMA_CHAN_RST BIT(1) /* channel on / off bit */
#define DMA_RESET BIT(0) /* channel on / off bit */
#define DMA_IRQ_ACK 0x7e /* IRQ status register */
#define DMA_POLL BIT(31) /* turn on channel polling */
#define DMA_CLK_DIV4 BIT(6) /* polling clock divider */
#define DMA_PCTRL_2W_BURST 0x1 /* 2 word burst length */
#define DMA_PCTRL_4W_BURST 0x2 /* 4 word burst length */
#define DMA_PCTRL_8W_BURST 0x3 /* 8 word burst length */
#define DMA_TX_BURST_SHIFT 4 /* tx burst shift */
#define DMA_RX_BURST_SHIFT 2 /* rx burst shift */
#define DMA_ETOP_ENDIANNESS (0xf << 8) /* endianness swap etop channels */
#define DMA_WEIGHT (BIT(17) | BIT(16)) /* default channel wheight */
#define ltq_dma_r32(x) ltq_r32(ltq_dma_membase + (x))
#define ltq_dma_w32(x, y) ltq_w32(x, ltq_dma_membase + (y))
#define ltq_dma_w32_mask(x, y, z) ltq_w32_mask(x, y, \
ltq_dma_membase + (z))
static void __iomem *ltq_dma_membase;
static DEFINE_SPINLOCK(ltq_dma_lock);
void
ltq_dma_enable_irq(struct ltq_dma_channel *ch)
{
unsigned long flags;
spin_lock_irqsave(<q_dma_lock, flags);
ltq_dma_w32(ch->nr, LTQ_DMA_CS);
ltq_dma_w32_mask(0, 1 << ch->nr, LTQ_DMA_IRNEN);
spin_unlock_irqrestore(<q_dma_lock, flags);
}
EXPORT_SYMBOL_GPL(ltq_dma_enable_irq);
void
ltq_dma_disable_irq(struct ltq_dma_channel *ch)
{
unsigned long flags;
spin_lock_irqsave(<q_dma_lock, flags);
ltq_dma_w32(ch->nr, LTQ_DMA_CS);
ltq_dma_w32_mask(1 << ch->nr, 0, LTQ_DMA_IRNEN);
spin_unlock_irqrestore(<q_dma_lock, flags);
}
EXPORT_SYMBOL_GPL(ltq_dma_disable_irq);
void
ltq_dma_ack_irq(struct ltq_dma_channel *ch)
{
unsigned long flags;
spin_lock_irqsave(<q_dma_lock, flags);
ltq_dma_w32(ch->nr, LTQ_DMA_CS);
ltq_dma_w32(DMA_IRQ_ACK, LTQ_DMA_CIS);
spin_unlock_irqrestore(<q_dma_lock, flags);
}
EXPORT_SYMBOL_GPL(ltq_dma_ack_irq);
void
ltq_dma_open(struct ltq_dma_channel *ch)
{
unsigned long flag;
spin_lock_irqsave(<q_dma_lock, flag);
ltq_dma_w32(ch->nr, LTQ_DMA_CS);
ltq_dma_w32_mask(0, DMA_CHAN_ON, LTQ_DMA_CCTRL);
spin_unlock_irqrestore(<q_dma_lock, flag);
}
EXPORT_SYMBOL_GPL(ltq_dma_open);
void
ltq_dma_close(struct ltq_dma_channel *ch)
{
unsigned long flag;
spin_lock_irqsave(<q_dma_lock, flag);
ltq_dma_w32(ch->nr, LTQ_DMA_CS);
ltq_dma_w32_mask(DMA_CHAN_ON, 0, LTQ_DMA_CCTRL);
ltq_dma_w32_mask(1 << ch->nr, 0, LTQ_DMA_IRNEN);
spin_unlock_irqrestore(<q_dma_lock, flag);
}
EXPORT_SYMBOL_GPL(ltq_dma_close);
static void
ltq_dma_alloc(struct ltq_dma_channel *ch)
{
unsigned long flags;
ch->desc = 0;
ch->desc_base = dma_alloc_coherent(ch->dev,
LTQ_DESC_NUM * LTQ_DESC_SIZE,
&ch->phys, GFP_ATOMIC);
spin_lock_irqsave(<q_dma_lock, flags);
ltq_dma_w32(ch->nr, LTQ_DMA_CS);
ltq_dma_w32(ch->phys, LTQ_DMA_CDBA);
ltq_dma_w32(LTQ_DESC_NUM, LTQ_DMA_CDLEN);
ltq_dma_w32_mask(DMA_CHAN_ON, 0, LTQ_DMA_CCTRL);
wmb();
ltq_dma_w32_mask(0, DMA_CHAN_RST, LTQ_DMA_CCTRL);
while (ltq_dma_r32(LTQ_DMA_CCTRL) & DMA_CHAN_RST)
;
spin_unlock_irqrestore(<q_dma_lock, flags);
}
void
ltq_dma_alloc_tx(struct ltq_dma_channel *ch)
{
unsigned long flags;
ltq_dma_alloc(ch);
spin_lock_irqsave(<q_dma_lock, flags);
ltq_dma_w32(DMA_DESCPT, LTQ_DMA_CIE);
ltq_dma_w32_mask(0, 1 << ch->nr, LTQ_DMA_IRNEN);
ltq_dma_w32(DMA_WEIGHT | DMA_TX, LTQ_DMA_CCTRL);
spin_unlock_irqrestore(<q_dma_lock, flags);
}
EXPORT_SYMBOL_GPL(ltq_dma_alloc_tx);
void
ltq_dma_alloc_rx(struct ltq_dma_channel *ch)
{
unsigned long flags;
ltq_dma_alloc(ch);
spin_lock_irqsave(<q_dma_lock, flags);
ltq_dma_w32(DMA_DESCPT, LTQ_DMA_CIE);
ltq_dma_w32_mask(0, 1 << ch->nr, LTQ_DMA_IRNEN);
ltq_dma_w32(DMA_WEIGHT, LTQ_DMA_CCTRL);
spin_unlock_irqrestore(<q_dma_lock, flags);
}
EXPORT_SYMBOL_GPL(ltq_dma_alloc_rx);
void
ltq_dma_free(struct ltq_dma_channel *ch)
{
if (!ch->desc_base)
return;
ltq_dma_close(ch);
dma_free_coherent(ch->dev, LTQ_DESC_NUM * LTQ_DESC_SIZE,
ch->desc_base, ch->phys);
}
EXPORT_SYMBOL_GPL(ltq_dma_free);
void
ltq_dma_init_port(int p, int tx_burst, int rx_burst)
{
ltq_dma_w32(p, LTQ_DMA_PS);
switch (p) {
case DMA_PORT_ETOP:
/*
* Tell the DMA engine to swap the endianness of data frames and
* drop packets if the channel arbitration fails.
*/
ltq_dma_w32_mask(0, (DMA_ETOP_ENDIANNESS | DMA_PDEN),
LTQ_DMA_PCTRL);
break;
default:
break;
}
switch (rx_burst) {
case 8:
ltq_dma_w32_mask(0x0c, (DMA_PCTRL_8W_BURST << DMA_RX_BURST_SHIFT),
LTQ_DMA_PCTRL);
break;
case 4:
ltq_dma_w32_mask(0x0c, (DMA_PCTRL_4W_BURST << DMA_RX_BURST_SHIFT),
LTQ_DMA_PCTRL);
break;
case 2:
ltq_dma_w32_mask(0x0c, (DMA_PCTRL_2W_BURST << DMA_RX_BURST_SHIFT),
LTQ_DMA_PCTRL);
break;
default:
break;
}
switch (tx_burst) {
case 8:
ltq_dma_w32_mask(0x30, (DMA_PCTRL_8W_BURST << DMA_TX_BURST_SHIFT),
LTQ_DMA_PCTRL);
break;
case 4:
ltq_dma_w32_mask(0x30, (DMA_PCTRL_4W_BURST << DMA_TX_BURST_SHIFT),
LTQ_DMA_PCTRL);
break;
case 2:
ltq_dma_w32_mask(0x30, (DMA_PCTRL_2W_BURST << DMA_TX_BURST_SHIFT),
LTQ_DMA_PCTRL);
break;
default:
break;
}
}
EXPORT_SYMBOL_GPL(ltq_dma_init_port);
static int
ltq_dma_init(struct platform_device *pdev)
{
struct clk *clk;
unsigned int id, nchannels;
int i;
ltq_dma_membase = devm_platform_get_and_ioremap_resource(pdev, 0, NULL);
if (IS_ERR(ltq_dma_membase))
panic("Failed to remap dma resource");
/* power up and reset the dma engine */
clk = clk_get(&pdev->dev, NULL);
if (IS_ERR(clk))
panic("Failed to get dma clock");
clk_enable(clk);
ltq_dma_w32_mask(0, DMA_RESET, LTQ_DMA_CTRL);
usleep_range(1, 10);
/* disable all interrupts */
ltq_dma_w32(0, LTQ_DMA_IRNEN);
/* reset/configure each channel */
id = ltq_dma_r32(LTQ_DMA_ID);
nchannels = ((id & DMA_ID_CHNR) >> 20);
for (i = 0; i < nchannels; i++) {
ltq_dma_w32(i, LTQ_DMA_CS);
ltq_dma_w32(DMA_CHAN_RST, LTQ_DMA_CCTRL);
ltq_dma_w32(DMA_POLL | DMA_CLK_DIV4, LTQ_DMA_CPOLL);
ltq_dma_w32_mask(DMA_CHAN_ON, 0, LTQ_DMA_CCTRL);
}
dev_info(&pdev->dev,
"Init done - hw rev: %X, ports: %d, channels: %d\n",
id & 0x1f, (id >> 16) & 0xf, nchannels);
return 0;
}
static const struct of_device_id dma_match[] = {
{ .compatible = "lantiq,dma-xway" },
{},
};
static struct platform_driver dma_driver = {
.probe = ltq_dma_init,
.driver = {
.name = "dma-xway",
.of_match_table = dma_match,
},
};
int __init
dma_init(void)
{
return platform_driver_register(&dma_driver);
}
postcore_initcall(dma_init);
| linux-master | arch/mips/lantiq/xway/dma.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
*
* Copyright (C) 2010 John Crispin <[email protected]>
* Copyright (C) 2013-2015 Lantiq Beteiligungs-GmbH & Co.KG
*/
#include <linux/io.h>
#include <linux/export.h>
#include <linux/clk.h>
#include <asm/time.h>
#include <asm/irq.h>
#include <asm/div64.h>
#include <lantiq_soc.h>
#include "../clk.h"
static unsigned int ram_clocks[] = {
CLOCK_167M, CLOCK_133M, CLOCK_111M, CLOCK_83M };
#define DDR_HZ ram_clocks[ltq_cgu_r32(CGU_SYS) & 0x3]
/* legacy xway clock */
#define CGU_SYS 0x10
/* vr9, ar10/grx390 clock */
#define CGU_SYS_XRX 0x0c
#define CGU_IF_CLK_AR10 0x24
unsigned long ltq_danube_fpi_hz(void)
{
unsigned long ddr_clock = DDR_HZ;
if (ltq_cgu_r32(CGU_SYS) & 0x40)
return ddr_clock >> 1;
return ddr_clock;
}
unsigned long ltq_danube_cpu_hz(void)
{
switch (ltq_cgu_r32(CGU_SYS) & 0xc) {
case 0:
return CLOCK_333M;
case 4:
return DDR_HZ;
case 8:
return DDR_HZ << 1;
default:
return DDR_HZ >> 1;
}
}
unsigned long ltq_danube_pp32_hz(void)
{
unsigned int clksys = (ltq_cgu_r32(CGU_SYS) >> 7) & 3;
unsigned long clk;
switch (clksys) {
case 1:
clk = CLOCK_240M;
break;
case 2:
clk = CLOCK_222M;
break;
case 3:
clk = CLOCK_133M;
break;
default:
clk = CLOCK_266M;
break;
}
return clk;
}
unsigned long ltq_ar9_sys_hz(void)
{
if (((ltq_cgu_r32(CGU_SYS) >> 3) & 0x3) == 0x2)
return CLOCK_393M;
return CLOCK_333M;
}
unsigned long ltq_ar9_fpi_hz(void)
{
unsigned long sys = ltq_ar9_sys_hz();
if (ltq_cgu_r32(CGU_SYS) & BIT(0))
return sys / 3;
else
return sys / 2;
}
unsigned long ltq_ar9_cpu_hz(void)
{
if (ltq_cgu_r32(CGU_SYS) & BIT(2))
return ltq_ar9_fpi_hz();
else
return ltq_ar9_sys_hz();
}
unsigned long ltq_vr9_cpu_hz(void)
{
unsigned int cpu_sel;
unsigned long clk;
cpu_sel = (ltq_cgu_r32(CGU_SYS_XRX) >> 4) & 0xf;
switch (cpu_sel) {
case 0:
clk = CLOCK_600M;
break;
case 1:
clk = CLOCK_500M;
break;
case 2:
clk = CLOCK_393M;
break;
case 3:
clk = CLOCK_333M;
break;
case 5:
case 6:
clk = CLOCK_196_608M;
break;
case 7:
clk = CLOCK_167M;
break;
case 4:
case 8:
case 9:
clk = CLOCK_125M;
break;
default:
clk = 0;
break;
}
return clk;
}
unsigned long ltq_vr9_fpi_hz(void)
{
unsigned int ocp_sel, cpu_clk;
unsigned long clk;
cpu_clk = ltq_vr9_cpu_hz();
ocp_sel = ltq_cgu_r32(CGU_SYS_XRX) & 0x3;
switch (ocp_sel) {
case 0:
/* OCP ratio 1 */
clk = cpu_clk;
break;
case 2:
/* OCP ratio 2 */
clk = cpu_clk / 2;
break;
case 3:
/* OCP ratio 2.5 */
clk = (cpu_clk * 2) / 5;
break;
case 4:
/* OCP ratio 3 */
clk = cpu_clk / 3;
break;
default:
clk = 0;
break;
}
return clk;
}
unsigned long ltq_vr9_pp32_hz(void)
{
unsigned int clksys = (ltq_cgu_r32(CGU_SYS) >> 16) & 0x7;
unsigned long clk;
switch (clksys) {
case 0:
clk = CLOCK_500M;
break;
case 1:
clk = CLOCK_432M;
break;
case 2:
clk = CLOCK_288M;
break;
default:
clk = CLOCK_500M;
break;
}
return clk;
}
unsigned long ltq_ar10_cpu_hz(void)
{
unsigned int clksys;
int cpu_fs = (ltq_cgu_r32(CGU_SYS_XRX) >> 8) & 0x1;
int freq_div = (ltq_cgu_r32(CGU_SYS_XRX) >> 4) & 0x7;
switch (cpu_fs) {
case 0:
clksys = CLOCK_500M;
break;
case 1:
clksys = CLOCK_600M;
break;
default:
clksys = CLOCK_500M;
break;
}
switch (freq_div) {
case 0:
return clksys;
case 1:
return clksys >> 1;
case 2:
return clksys >> 2;
default:
return clksys;
}
}
unsigned long ltq_ar10_fpi_hz(void)
{
int freq_fpi = (ltq_cgu_r32(CGU_IF_CLK_AR10) >> 25) & 0xf;
switch (freq_fpi) {
case 1:
return CLOCK_300M;
case 5:
return CLOCK_250M;
case 2:
return CLOCK_150M;
case 6:
return CLOCK_125M;
default:
return CLOCK_125M;
}
}
unsigned long ltq_ar10_pp32_hz(void)
{
unsigned int clksys = (ltq_cgu_r32(CGU_SYS) >> 16) & 0x7;
unsigned long clk;
switch (clksys) {
case 1:
clk = CLOCK_250M;
break;
case 4:
clk = CLOCK_400M;
break;
default:
clk = CLOCK_250M;
break;
}
return clk;
}
unsigned long ltq_grx390_cpu_hz(void)
{
unsigned int clksys;
int cpu_fs = ((ltq_cgu_r32(CGU_SYS_XRX) >> 9) & 0x3);
int freq_div = ((ltq_cgu_r32(CGU_SYS_XRX) >> 4) & 0x7);
switch (cpu_fs) {
case 0:
clksys = CLOCK_600M;
break;
case 1:
clksys = CLOCK_666M;
break;
case 2:
clksys = CLOCK_720M;
break;
default:
clksys = CLOCK_600M;
break;
}
switch (freq_div) {
case 0:
return clksys;
case 1:
return clksys >> 1;
case 2:
return clksys >> 2;
default:
return clksys;
}
}
unsigned long ltq_grx390_fpi_hz(void)
{
/* fpi clock is derived from ddr_clk */
unsigned int clksys;
int cpu_fs = ((ltq_cgu_r32(CGU_SYS_XRX) >> 9) & 0x3);
int freq_div = ((ltq_cgu_r32(CGU_SYS_XRX)) & 0x7);
switch (cpu_fs) {
case 0:
clksys = CLOCK_600M;
break;
case 1:
clksys = CLOCK_666M;
break;
case 2:
clksys = CLOCK_720M;
break;
default:
clksys = CLOCK_600M;
break;
}
switch (freq_div) {
case 1:
return clksys >> 1;
case 2:
return clksys >> 2;
default:
return clksys >> 1;
}
}
unsigned long ltq_grx390_pp32_hz(void)
{
unsigned int clksys = (ltq_cgu_r32(CGU_SYS) >> 16) & 0x7;
unsigned long clk;
switch (clksys) {
case 1:
clk = CLOCK_250M;
break;
case 2:
clk = CLOCK_432M;
break;
case 4:
clk = CLOCK_400M;
break;
default:
clk = CLOCK_250M;
break;
}
return clk;
}
| linux-master | arch/mips/lantiq/xway/clk.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
*
* Copyright (C) 2011-2012 John Crispin <[email protected]>
* Copyright (C) 2013-2015 Lantiq Beteiligungs-GmbH & Co.KG
*/
#include <linux/ioport.h>
#include <linux/export.h>
#include <linux/clkdev.h>
#include <linux/spinlock.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <lantiq_soc.h>
#include "../clk.h"
#include "../prom.h"
/* clock control register for legacy */
#define CGU_IFCCR 0x0018
#define CGU_IFCCR_VR9 0x0024
/* system clock register for legacy */
#define CGU_SYS 0x0010
/* pci control register */
#define CGU_PCICR 0x0034
#define CGU_PCICR_VR9 0x0038
/* ephy configuration register */
#define CGU_EPHY 0x10
/* Legacy PMU register for ar9, ase, danube */
/* power control register */
#define PMU_PWDCR 0x1C
/* power status register */
#define PMU_PWDSR 0x20
/* power control register */
#define PMU_PWDCR1 0x24
/* power status register */
#define PMU_PWDSR1 0x28
/* power control register */
#define PWDCR(x) ((x) ? (PMU_PWDCR1) : (PMU_PWDCR))
/* power status register */
#define PWDSR(x) ((x) ? (PMU_PWDSR1) : (PMU_PWDSR))
/* PMU register for ar10 and grx390 */
/* First register set */
#define PMU_CLK_SR 0x20 /* status */
#define PMU_CLK_CR_A 0x24 /* Enable */
#define PMU_CLK_CR_B 0x28 /* Disable */
/* Second register set */
#define PMU_CLK_SR1 0x30 /* status */
#define PMU_CLK_CR1_A 0x34 /* Enable */
#define PMU_CLK_CR1_B 0x38 /* Disable */
/* Third register set */
#define PMU_ANA_SR 0x40 /* status */
#define PMU_ANA_CR_A 0x44 /* Enable */
#define PMU_ANA_CR_B 0x48 /* Disable */
/* Status */
static u32 pmu_clk_sr[] = {
PMU_CLK_SR,
PMU_CLK_SR1,
PMU_ANA_SR,
};
/* Enable */
static u32 pmu_clk_cr_a[] = {
PMU_CLK_CR_A,
PMU_CLK_CR1_A,
PMU_ANA_CR_A,
};
/* Disable */
static u32 pmu_clk_cr_b[] = {
PMU_CLK_CR_B,
PMU_CLK_CR1_B,
PMU_ANA_CR_B,
};
#define PWDCR_EN_XRX(x) (pmu_clk_cr_a[(x)])
#define PWDCR_DIS_XRX(x) (pmu_clk_cr_b[(x)])
#define PWDSR_XRX(x) (pmu_clk_sr[(x)])
/* clock gates that we can en/disable */
#define PMU_USB0_P BIT(0)
#define PMU_ASE_SDIO BIT(2) /* ASE special */
#define PMU_PCI BIT(4)
#define PMU_DMA BIT(5)
#define PMU_USB0 BIT(6)
#define PMU_ASC0 BIT(7)
#define PMU_EPHY BIT(7) /* ase */
#define PMU_USIF BIT(7) /* from vr9 until grx390 */
#define PMU_SPI BIT(8)
#define PMU_DFE BIT(9)
#define PMU_EBU BIT(10)
#define PMU_STP BIT(11)
#define PMU_GPT BIT(12)
#define PMU_AHBS BIT(13) /* vr9 */
#define PMU_FPI BIT(14)
#define PMU_AHBM BIT(15)
#define PMU_SDIO BIT(16) /* danube, ar9, vr9 */
#define PMU_ASC1 BIT(17)
#define PMU_PPE_QSB BIT(18)
#define PMU_PPE_SLL01 BIT(19)
#define PMU_DEU BIT(20)
#define PMU_PPE_TC BIT(21)
#define PMU_PPE_EMA BIT(22)
#define PMU_PPE_DPLUM BIT(23)
#define PMU_PPE_DP BIT(23)
#define PMU_PPE_DPLUS BIT(24)
#define PMU_USB1_P BIT(26)
#define PMU_GPHY3 BIT(26) /* grx390 */
#define PMU_USB1 BIT(27)
#define PMU_SWITCH BIT(28)
#define PMU_PPE_TOP BIT(29)
#define PMU_GPHY0 BIT(29) /* ar10, xrx390 */
#define PMU_GPHY BIT(30)
#define PMU_GPHY1 BIT(30) /* ar10, xrx390 */
#define PMU_PCIE_CLK BIT(31)
#define PMU_GPHY2 BIT(31) /* ar10, xrx390 */
#define PMU1_PCIE_PHY BIT(0) /* vr9-specific,moved in ar10/grx390 */
#define PMU1_PCIE_CTL BIT(1)
#define PMU1_PCIE_PDI BIT(4)
#define PMU1_PCIE_MSI BIT(5)
#define PMU1_CKE BIT(6)
#define PMU1_PCIE1_CTL BIT(17)
#define PMU1_PCIE1_PDI BIT(20)
#define PMU1_PCIE1_MSI BIT(21)
#define PMU1_PCIE2_CTL BIT(25)
#define PMU1_PCIE2_PDI BIT(26)
#define PMU1_PCIE2_MSI BIT(27)
#define PMU_ANALOG_USB0_P BIT(0)
#define PMU_ANALOG_USB1_P BIT(1)
#define PMU_ANALOG_PCIE0_P BIT(8)
#define PMU_ANALOG_PCIE1_P BIT(9)
#define PMU_ANALOG_PCIE2_P BIT(10)
#define PMU_ANALOG_DSL_AFE BIT(16)
#define PMU_ANALOG_DCDC_2V5 BIT(17)
#define PMU_ANALOG_DCDC_1VX BIT(18)
#define PMU_ANALOG_DCDC_1V0 BIT(19)
#define pmu_w32(x, y) ltq_w32((x), pmu_membase + (y))
#define pmu_r32(x) ltq_r32(pmu_membase + (x))
static void __iomem *pmu_membase;
void __iomem *ltq_cgu_membase;
void __iomem *ltq_ebu_membase;
static u32 ifccr = CGU_IFCCR;
static u32 pcicr = CGU_PCICR;
static DEFINE_SPINLOCK(g_pmu_lock);
/* legacy function kept alive to ease clkdev transition */
void ltq_pmu_enable(unsigned int module)
{
int retry = 1000000;
spin_lock(&g_pmu_lock);
pmu_w32(pmu_r32(PMU_PWDCR) & ~module, PMU_PWDCR);
do {} while (--retry && (pmu_r32(PMU_PWDSR) & module));
spin_unlock(&g_pmu_lock);
if (!retry)
panic("activating PMU module failed!");
}
EXPORT_SYMBOL(ltq_pmu_enable);
/* legacy function kept alive to ease clkdev transition */
void ltq_pmu_disable(unsigned int module)
{
int retry = 1000000;
spin_lock(&g_pmu_lock);
pmu_w32(pmu_r32(PMU_PWDCR) | module, PMU_PWDCR);
do {} while (--retry && (!(pmu_r32(PMU_PWDSR) & module)));
spin_unlock(&g_pmu_lock);
if (!retry)
pr_warn("deactivating PMU module failed!");
}
EXPORT_SYMBOL(ltq_pmu_disable);
/* enable a hw clock */
static int cgu_enable(struct clk *clk)
{
ltq_cgu_w32(ltq_cgu_r32(ifccr) | clk->bits, ifccr);
return 0;
}
/* disable a hw clock */
static void cgu_disable(struct clk *clk)
{
ltq_cgu_w32(ltq_cgu_r32(ifccr) & ~clk->bits, ifccr);
}
/* enable a clock gate */
static int pmu_enable(struct clk *clk)
{
int retry = 1000000;
if (of_machine_is_compatible("lantiq,ar10")
|| of_machine_is_compatible("lantiq,grx390")) {
pmu_w32(clk->bits, PWDCR_EN_XRX(clk->module));
do {} while (--retry &&
(!(pmu_r32(PWDSR_XRX(clk->module)) & clk->bits)));
} else {
spin_lock(&g_pmu_lock);
pmu_w32(pmu_r32(PWDCR(clk->module)) & ~clk->bits,
PWDCR(clk->module));
do {} while (--retry &&
(pmu_r32(PWDSR(clk->module)) & clk->bits));
spin_unlock(&g_pmu_lock);
}
if (!retry)
panic("activating PMU module failed!");
return 0;
}
/* disable a clock gate */
static void pmu_disable(struct clk *clk)
{
int retry = 1000000;
if (of_machine_is_compatible("lantiq,ar10")
|| of_machine_is_compatible("lantiq,grx390")) {
pmu_w32(clk->bits, PWDCR_DIS_XRX(clk->module));
do {} while (--retry &&
(pmu_r32(PWDSR_XRX(clk->module)) & clk->bits));
} else {
spin_lock(&g_pmu_lock);
pmu_w32(pmu_r32(PWDCR(clk->module)) | clk->bits,
PWDCR(clk->module));
do {} while (--retry &&
(!(pmu_r32(PWDSR(clk->module)) & clk->bits)));
spin_unlock(&g_pmu_lock);
}
if (!retry)
pr_warn("deactivating PMU module failed!");
}
/* the pci enable helper */
static int pci_enable(struct clk *clk)
{
unsigned int val = ltq_cgu_r32(ifccr);
/* set bus clock speed */
if (of_machine_is_compatible("lantiq,ar9") ||
of_machine_is_compatible("lantiq,vr9")) {
val &= ~0x1f00000;
if (clk->rate == CLOCK_33M)
val |= 0xe00000;
else
val |= 0x700000; /* 62.5M */
} else {
val &= ~0xf00000;
if (clk->rate == CLOCK_33M)
val |= 0x800000;
else
val |= 0x400000; /* 62.5M */
}
ltq_cgu_w32(val, ifccr);
pmu_enable(clk);
return 0;
}
/* enable the external clock as a source */
static int pci_ext_enable(struct clk *clk)
{
ltq_cgu_w32(ltq_cgu_r32(ifccr) & ~(1 << 16), ifccr);
ltq_cgu_w32((1 << 30), pcicr);
return 0;
}
/* disable the external clock as a source */
static void pci_ext_disable(struct clk *clk)
{
ltq_cgu_w32(ltq_cgu_r32(ifccr) | (1 << 16), ifccr);
ltq_cgu_w32((1 << 31) | (1 << 30), pcicr);
}
/* enable a clockout source */
static int clkout_enable(struct clk *clk)
{
int i;
/* get the correct rate */
for (i = 0; i < 4; i++) {
if (clk->rates[i] == clk->rate) {
int shift = 14 - (2 * clk->module);
int enable = 7 - clk->module;
unsigned int val = ltq_cgu_r32(ifccr);
val &= ~(3 << shift);
val |= i << shift;
val |= enable;
ltq_cgu_w32(val, ifccr);
return 0;
}
}
return -1;
}
/* manage the clock gates via PMU */
static void clkdev_add_pmu(const char *dev, const char *con, bool deactivate,
unsigned int module, unsigned int bits)
{
struct clk *clk = kzalloc(sizeof(struct clk), GFP_KERNEL);
if (!clk)
return;
clk->cl.dev_id = dev;
clk->cl.con_id = con;
clk->cl.clk = clk;
clk->enable = pmu_enable;
clk->disable = pmu_disable;
clk->module = module;
clk->bits = bits;
if (deactivate) {
/*
* Disable it during the initialization. Module should enable
* when used
*/
pmu_disable(clk);
}
clkdev_add(&clk->cl);
}
/* manage the clock generator */
static void clkdev_add_cgu(const char *dev, const char *con,
unsigned int bits)
{
struct clk *clk = kzalloc(sizeof(struct clk), GFP_KERNEL);
if (!clk)
return;
clk->cl.dev_id = dev;
clk->cl.con_id = con;
clk->cl.clk = clk;
clk->enable = cgu_enable;
clk->disable = cgu_disable;
clk->bits = bits;
clkdev_add(&clk->cl);
}
/* pci needs its own enable function as the setup is a bit more complex */
static unsigned long valid_pci_rates[] = {CLOCK_33M, CLOCK_62_5M, 0};
static void clkdev_add_pci(void)
{
struct clk *clk = kzalloc(sizeof(struct clk), GFP_KERNEL);
struct clk *clk_ext = kzalloc(sizeof(struct clk), GFP_KERNEL);
/* main pci clock */
if (clk) {
clk->cl.dev_id = "17000000.pci";
clk->cl.con_id = NULL;
clk->cl.clk = clk;
clk->rate = CLOCK_33M;
clk->rates = valid_pci_rates;
clk->enable = pci_enable;
clk->disable = pmu_disable;
clk->module = 0;
clk->bits = PMU_PCI;
clkdev_add(&clk->cl);
}
/* use internal/external bus clock */
if (clk_ext) {
clk_ext->cl.dev_id = "17000000.pci";
clk_ext->cl.con_id = "external";
clk_ext->cl.clk = clk_ext;
clk_ext->enable = pci_ext_enable;
clk_ext->disable = pci_ext_disable;
clkdev_add(&clk_ext->cl);
}
}
/* xway socs can generate clocks on gpio pins */
static unsigned long valid_clkout_rates[4][5] = {
{CLOCK_32_768K, CLOCK_1_536M, CLOCK_2_5M, CLOCK_12M, 0},
{CLOCK_40M, CLOCK_12M, CLOCK_24M, CLOCK_48M, 0},
{CLOCK_25M, CLOCK_40M, CLOCK_30M, CLOCK_60M, 0},
{CLOCK_12M, CLOCK_50M, CLOCK_32_768K, CLOCK_25M, 0},
};
static void clkdev_add_clkout(void)
{
int i;
for (i = 0; i < 4; i++) {
struct clk *clk;
char *name;
name = kzalloc(sizeof("clkout0"), GFP_KERNEL);
if (!name)
continue;
sprintf(name, "clkout%d", i);
clk = kzalloc(sizeof(struct clk), GFP_KERNEL);
if (!clk) {
kfree(name);
continue;
}
clk->cl.dev_id = "1f103000.cgu";
clk->cl.con_id = name;
clk->cl.clk = clk;
clk->rate = 0;
clk->rates = valid_clkout_rates[i];
clk->enable = clkout_enable;
clk->module = i;
clkdev_add(&clk->cl);
}
}
/* bring up all register ranges that we need for basic system control */
void __init ltq_soc_init(void)
{
struct resource res_pmu, res_cgu, res_ebu;
struct device_node *np_pmu =
of_find_compatible_node(NULL, NULL, "lantiq,pmu-xway");
struct device_node *np_cgu =
of_find_compatible_node(NULL, NULL, "lantiq,cgu-xway");
struct device_node *np_ebu =
of_find_compatible_node(NULL, NULL, "lantiq,ebu-xway");
/* check if all the core register ranges are available */
if (!np_pmu || !np_cgu || !np_ebu)
panic("Failed to load core nodes from devicetree");
if (of_address_to_resource(np_pmu, 0, &res_pmu) ||
of_address_to_resource(np_cgu, 0, &res_cgu) ||
of_address_to_resource(np_ebu, 0, &res_ebu))
panic("Failed to get core resources");
of_node_put(np_pmu);
of_node_put(np_cgu);
of_node_put(np_ebu);
if (!request_mem_region(res_pmu.start, resource_size(&res_pmu),
res_pmu.name) ||
!request_mem_region(res_cgu.start, resource_size(&res_cgu),
res_cgu.name) ||
!request_mem_region(res_ebu.start, resource_size(&res_ebu),
res_ebu.name))
pr_err("Failed to request core resources");
pmu_membase = ioremap(res_pmu.start, resource_size(&res_pmu));
ltq_cgu_membase = ioremap(res_cgu.start,
resource_size(&res_cgu));
ltq_ebu_membase = ioremap(res_ebu.start,
resource_size(&res_ebu));
if (!pmu_membase || !ltq_cgu_membase || !ltq_ebu_membase)
panic("Failed to remap core resources");
/* make sure to unprotect the memory region where flash is located */
ltq_ebu_w32(ltq_ebu_r32(LTQ_EBU_BUSCON0) & ~EBU_WRDIS, LTQ_EBU_BUSCON0);
/* add our generic xway clocks */
clkdev_add_pmu("10000000.fpi", NULL, 0, 0, PMU_FPI);
clkdev_add_pmu("1e100a00.gptu", NULL, 1, 0, PMU_GPT);
clkdev_add_pmu("1e100bb0.stp", NULL, 1, 0, PMU_STP);
clkdev_add_pmu("1e100c00.serial", NULL, 0, 0, PMU_ASC1);
clkdev_add_pmu("1e104100.dma", NULL, 1, 0, PMU_DMA);
clkdev_add_pmu("1e100800.spi", NULL, 1, 0, PMU_SPI);
clkdev_add_pmu("1e105300.ebu", NULL, 0, 0, PMU_EBU);
clkdev_add_clkout();
/* add the soc dependent clocks */
if (of_machine_is_compatible("lantiq,vr9")) {
ifccr = CGU_IFCCR_VR9;
pcicr = CGU_PCICR_VR9;
} else {
clkdev_add_pmu("1e180000.etop", NULL, 1, 0, PMU_PPE);
}
if (!of_machine_is_compatible("lantiq,ase"))
clkdev_add_pci();
if (of_machine_is_compatible("lantiq,grx390") ||
of_machine_is_compatible("lantiq,ar10")) {
clkdev_add_pmu("1e108000.switch", "gphy0", 0, 0, PMU_GPHY0);
clkdev_add_pmu("1e108000.switch", "gphy1", 0, 0, PMU_GPHY1);
clkdev_add_pmu("1e108000.switch", "gphy2", 0, 0, PMU_GPHY2);
clkdev_add_pmu("1f203018.usb2-phy", "phy", 1, 2, PMU_ANALOG_USB0_P);
clkdev_add_pmu("1f203034.usb2-phy", "phy", 1, 2, PMU_ANALOG_USB1_P);
/* rc 0 */
clkdev_add_pmu("1f106800.phy", "phy", 1, 2, PMU_ANALOG_PCIE0_P);
clkdev_add_pmu("1d900000.pcie", "msi", 1, 1, PMU1_PCIE_MSI);
clkdev_add_pmu("1f106800.phy", "pdi", 1, 1, PMU1_PCIE_PDI);
clkdev_add_pmu("1d900000.pcie", "ctl", 1, 1, PMU1_PCIE_CTL);
/* rc 1 */
clkdev_add_pmu("1f700400.phy", "phy", 1, 2, PMU_ANALOG_PCIE1_P);
clkdev_add_pmu("19000000.pcie", "msi", 1, 1, PMU1_PCIE1_MSI);
clkdev_add_pmu("1f700400.phy", "pdi", 1, 1, PMU1_PCIE1_PDI);
clkdev_add_pmu("19000000.pcie", "ctl", 1, 1, PMU1_PCIE1_CTL);
}
if (of_machine_is_compatible("lantiq,ase")) {
if (ltq_cgu_r32(CGU_SYS) & (1 << 5))
clkdev_add_static(CLOCK_266M, CLOCK_133M,
CLOCK_133M, CLOCK_266M);
else
clkdev_add_static(CLOCK_133M, CLOCK_133M,
CLOCK_133M, CLOCK_133M);
clkdev_add_pmu("1e101000.usb", "otg", 1, 0, PMU_USB0);
clkdev_add_pmu("1f203018.usb2-phy", "phy", 1, 0, PMU_USB0_P);
clkdev_add_pmu("1e180000.etop", "ppe", 1, 0, PMU_PPE);
clkdev_add_cgu("1e180000.etop", "ephycgu", CGU_EPHY);
clkdev_add_pmu("1e180000.etop", "ephy", 1, 0, PMU_EPHY);
clkdev_add_pmu("1e103000.sdio", NULL, 1, 0, PMU_ASE_SDIO);
clkdev_add_pmu("1e116000.mei", "dfe", 1, 0, PMU_DFE);
} else if (of_machine_is_compatible("lantiq,grx390")) {
clkdev_add_static(ltq_grx390_cpu_hz(), ltq_grx390_fpi_hz(),
ltq_grx390_fpi_hz(), ltq_grx390_pp32_hz());
clkdev_add_pmu("1e108000.switch", "gphy3", 0, 0, PMU_GPHY3);
clkdev_add_pmu("1e101000.usb", "otg", 1, 0, PMU_USB0);
clkdev_add_pmu("1e106000.usb", "otg", 1, 0, PMU_USB1);
/* rc 2 */
clkdev_add_pmu("1f106a00.pcie", "phy", 1, 2, PMU_ANALOG_PCIE2_P);
clkdev_add_pmu("1a800000.pcie", "msi", 1, 1, PMU1_PCIE2_MSI);
clkdev_add_pmu("1f106a00.pcie", "pdi", 1, 1, PMU1_PCIE2_PDI);
clkdev_add_pmu("1a800000.pcie", "ctl", 1, 1, PMU1_PCIE2_CTL);
clkdev_add_pmu("1e10b308.eth", NULL, 0, 0, PMU_SWITCH | PMU_PPE_DP);
clkdev_add_pmu("1da00000.usif", "NULL", 1, 0, PMU_USIF);
clkdev_add_pmu("1e103100.deu", NULL, 1, 0, PMU_DEU);
} else if (of_machine_is_compatible("lantiq,ar10")) {
clkdev_add_static(ltq_ar10_cpu_hz(), ltq_ar10_fpi_hz(),
ltq_ar10_fpi_hz(), ltq_ar10_pp32_hz());
clkdev_add_pmu("1e101000.usb", "otg", 1, 0, PMU_USB0);
clkdev_add_pmu("1e106000.usb", "otg", 1, 0, PMU_USB1);
clkdev_add_pmu("1e10b308.eth", NULL, 0, 0, PMU_SWITCH |
PMU_PPE_DP | PMU_PPE_TC);
clkdev_add_pmu("1da00000.usif", "NULL", 1, 0, PMU_USIF);
clkdev_add_pmu("1e103100.deu", NULL, 1, 0, PMU_DEU);
clkdev_add_pmu("1e116000.mei", "afe", 1, 2, PMU_ANALOG_DSL_AFE);
clkdev_add_pmu("1e116000.mei", "dfe", 1, 0, PMU_DFE);
} else if (of_machine_is_compatible("lantiq,vr9")) {
clkdev_add_static(ltq_vr9_cpu_hz(), ltq_vr9_fpi_hz(),
ltq_vr9_fpi_hz(), ltq_vr9_pp32_hz());
clkdev_add_pmu("1f203018.usb2-phy", "phy", 1, 0, PMU_USB0_P);
clkdev_add_pmu("1e101000.usb", "otg", 1, 0, PMU_USB0 | PMU_AHBM);
clkdev_add_pmu("1f203034.usb2-phy", "phy", 1, 0, PMU_USB1_P);
clkdev_add_pmu("1e106000.usb", "otg", 1, 0, PMU_USB1 | PMU_AHBM);
clkdev_add_pmu("1f106800.phy", "phy", 1, 1, PMU1_PCIE_PHY);
clkdev_add_pmu("1d900000.pcie", "bus", 1, 0, PMU_PCIE_CLK);
clkdev_add_pmu("1d900000.pcie", "msi", 1, 1, PMU1_PCIE_MSI);
clkdev_add_pmu("1f106800.phy", "pdi", 1, 1, PMU1_PCIE_PDI);
clkdev_add_pmu("1d900000.pcie", "ctl", 1, 1, PMU1_PCIE_CTL);
clkdev_add_pmu(NULL, "ahb", 1, 0, PMU_AHBM | PMU_AHBS);
clkdev_add_pmu("1da00000.usif", "NULL", 1, 0, PMU_USIF);
clkdev_add_pmu("1e10b308.eth", NULL, 0, 0,
PMU_SWITCH | PMU_PPE_DPLUS | PMU_PPE_DPLUM |
PMU_PPE_EMA | PMU_PPE_TC | PMU_PPE_SLL01 |
PMU_PPE_QSB | PMU_PPE_TOP);
clkdev_add_pmu("1e108000.switch", "gphy0", 0, 0, PMU_GPHY);
clkdev_add_pmu("1e108000.switch", "gphy1", 0, 0, PMU_GPHY);
clkdev_add_pmu("1e103000.sdio", NULL, 1, 0, PMU_SDIO);
clkdev_add_pmu("1e103100.deu", NULL, 1, 0, PMU_DEU);
clkdev_add_pmu("1e116000.mei", "dfe", 1, 0, PMU_DFE);
} else if (of_machine_is_compatible("lantiq,ar9")) {
clkdev_add_static(ltq_ar9_cpu_hz(), ltq_ar9_fpi_hz(),
ltq_ar9_fpi_hz(), CLOCK_250M);
clkdev_add_pmu("1f203018.usb2-phy", "phy", 1, 0, PMU_USB0_P);
clkdev_add_pmu("1e101000.usb", "otg", 1, 0, PMU_USB0 | PMU_AHBM);
clkdev_add_pmu("1f203034.usb2-phy", "phy", 1, 0, PMU_USB1_P);
clkdev_add_pmu("1e106000.usb", "otg", 1, 0, PMU_USB1 | PMU_AHBM);
clkdev_add_pmu("1e180000.etop", "switch", 1, 0, PMU_SWITCH);
clkdev_add_pmu("1e103000.sdio", NULL, 1, 0, PMU_SDIO);
clkdev_add_pmu("1e103100.deu", NULL, 1, 0, PMU_DEU);
clkdev_add_pmu("1e116000.mei", "dfe", 1, 0, PMU_DFE);
clkdev_add_pmu("1e100400.serial", NULL, 1, 0, PMU_ASC0);
} else {
clkdev_add_static(ltq_danube_cpu_hz(), ltq_danube_fpi_hz(),
ltq_danube_fpi_hz(), ltq_danube_pp32_hz());
clkdev_add_pmu("1e101000.usb", "otg", 1, 0, PMU_USB0 | PMU_AHBM);
clkdev_add_pmu("1f203018.usb2-phy", "phy", 1, 0, PMU_USB0_P);
clkdev_add_pmu("1e103000.sdio", NULL, 1, 0, PMU_SDIO);
clkdev_add_pmu("1e103100.deu", NULL, 1, 0, PMU_DEU);
clkdev_add_pmu("1e116000.mei", "dfe", 1, 0, PMU_DFE);
clkdev_add_pmu("1e100400.serial", NULL, 1, 0, PMU_ASC0);
}
}
| linux-master | arch/mips/lantiq/xway/sysctrl.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
*
* Copyright (C) 2010 John Crispin <[email protected]>
* Copyright (C) 2013-2015 Lantiq Beteiligungs-GmbH & Co.KG
*/
#include <linux/export.h>
#include <linux/clk.h>
#include <asm/bootinfo.h>
#include <asm/time.h>
#include <lantiq_soc.h>
#include "../prom.h"
#define SOC_DANUBE "Danube"
#define SOC_TWINPASS "Twinpass"
#define SOC_AMAZON_SE "Amazon_SE"
#define SOC_AR9 "AR9"
#define SOC_GR9 "GRX200"
#define SOC_VR9 "xRX200"
#define SOC_VRX220 "xRX220"
#define SOC_AR10 "xRX300"
#define SOC_GRX390 "xRX330"
#define COMP_DANUBE "lantiq,danube"
#define COMP_TWINPASS "lantiq,twinpass"
#define COMP_AMAZON_SE "lantiq,ase"
#define COMP_AR9 "lantiq,ar9"
#define COMP_GR9 "lantiq,gr9"
#define COMP_VR9 "lantiq,vr9"
#define COMP_AR10 "lantiq,ar10"
#define COMP_GRX390 "lantiq,grx390"
#define PART_SHIFT 12
#define PART_MASK 0x0FFFFFFF
#define REV_SHIFT 28
#define REV_MASK 0xF0000000
void __init ltq_soc_detect(struct ltq_soc_info *i)
{
i->partnum = (ltq_r32(LTQ_MPS_CHIPID) & PART_MASK) >> PART_SHIFT;
i->rev = (ltq_r32(LTQ_MPS_CHIPID) & REV_MASK) >> REV_SHIFT;
sprintf(i->rev_type, "1.%d", i->rev);
switch (i->partnum) {
case SOC_ID_DANUBE1:
case SOC_ID_DANUBE2:
i->name = SOC_DANUBE;
i->type = SOC_TYPE_DANUBE;
i->compatible = COMP_DANUBE;
break;
case SOC_ID_TWINPASS:
i->name = SOC_TWINPASS;
i->type = SOC_TYPE_DANUBE;
i->compatible = COMP_TWINPASS;
break;
case SOC_ID_ARX188:
case SOC_ID_ARX168_1:
case SOC_ID_ARX168_2:
case SOC_ID_ARX182:
i->name = SOC_AR9;
i->type = SOC_TYPE_AR9;
i->compatible = COMP_AR9;
break;
case SOC_ID_GRX188:
case SOC_ID_GRX168:
i->name = SOC_GR9;
i->type = SOC_TYPE_AR9;
i->compatible = COMP_GR9;
break;
case SOC_ID_AMAZON_SE_1:
case SOC_ID_AMAZON_SE_2:
#ifdef CONFIG_PCI
panic("ase is only supported for non pci kernels");
#endif
i->name = SOC_AMAZON_SE;
i->type = SOC_TYPE_AMAZON_SE;
i->compatible = COMP_AMAZON_SE;
break;
case SOC_ID_VRX282:
case SOC_ID_VRX268:
case SOC_ID_VRX288:
i->name = SOC_VR9;
i->type = SOC_TYPE_VR9;
i->compatible = COMP_VR9;
break;
case SOC_ID_GRX268:
case SOC_ID_GRX288:
i->name = SOC_GR9;
i->type = SOC_TYPE_VR9;
i->compatible = COMP_GR9;
break;
case SOC_ID_VRX268_2:
case SOC_ID_VRX288_2:
i->name = SOC_VR9;
i->type = SOC_TYPE_VR9_2;
i->compatible = COMP_VR9;
break;
case SOC_ID_VRX220:
i->name = SOC_VRX220;
i->type = SOC_TYPE_VRX220;
i->compatible = COMP_VR9;
break;
case SOC_ID_GRX282_2:
case SOC_ID_GRX288_2:
i->name = SOC_GR9;
i->type = SOC_TYPE_VR9_2;
i->compatible = COMP_GR9;
break;
case SOC_ID_ARX362:
case SOC_ID_ARX368:
case SOC_ID_ARX382:
case SOC_ID_ARX388:
case SOC_ID_URX388:
i->name = SOC_AR10;
i->type = SOC_TYPE_AR10;
i->compatible = COMP_AR10;
break;
case SOC_ID_GRX383:
case SOC_ID_GRX369:
case SOC_ID_GRX387:
case SOC_ID_GRX389:
i->name = SOC_GRX390;
i->type = SOC_TYPE_GRX390;
i->compatible = COMP_GRX390;
break;
default:
unreachable();
break;
}
}
| linux-master | arch/mips/lantiq/xway/prom.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
*
* Copyright (C) 2012 John Crispin <[email protected]>
* Copyright (C) 2012 Lantiq GmbH
*/
#include <linux/interrupt.h>
#include <linux/ioport.h>
#include <linux/init.h>
#include <linux/mod_devicetable.h>
#include <linux/of_irq.h>
#include <linux/platform_device.h>
#include <lantiq_soc.h>
#include "../clk.h"
/* the magic ID byte of the core */
#define GPTU_MAGIC 0x59
/* clock control register */
#define GPTU_CLC 0x00
/* id register */
#define GPTU_ID 0x08
/* interrupt node enable */
#define GPTU_IRNEN 0xf4
/* interrupt control register */
#define GPTU_IRCR 0xf8
/* interrupt capture register */
#define GPTU_IRNCR 0xfc
/* there are 3 identical blocks of 2 timers. calculate register offsets */
#define GPTU_SHIFT(x) (x % 2 ? 4 : 0)
#define GPTU_BASE(x) (((x >> 1) * 0x20) + 0x10)
/* timer control register */
#define GPTU_CON(x) (GPTU_BASE(x) + GPTU_SHIFT(x) + 0x00)
/* timer auto reload register */
#define GPTU_RUN(x) (GPTU_BASE(x) + GPTU_SHIFT(x) + 0x08)
/* timer manual reload register */
#define GPTU_RLD(x) (GPTU_BASE(x) + GPTU_SHIFT(x) + 0x10)
/* timer count register */
#define GPTU_CNT(x) (GPTU_BASE(x) + GPTU_SHIFT(x) + 0x18)
/* GPTU_CON(x) */
#define CON_CNT BIT(2)
#define CON_EDGE_ANY (BIT(7) | BIT(6))
#define CON_SYNC BIT(8)
#define CON_CLK_INT BIT(10)
/* GPTU_RUN(x) */
#define RUN_SEN BIT(0)
#define RUN_RL BIT(2)
/* set clock to runmode */
#define CLC_RMC BIT(8)
/* bring core out of suspend */
#define CLC_SUSPEND BIT(4)
/* the disable bit */
#define CLC_DISABLE BIT(0)
#define gptu_w32(x, y) ltq_w32((x), gptu_membase + (y))
#define gptu_r32(x) ltq_r32(gptu_membase + (x))
enum gptu_timer {
TIMER1A = 0,
TIMER1B,
TIMER2A,
TIMER2B,
TIMER3A,
TIMER3B
};
static void __iomem *gptu_membase;
static struct resource irqres[6];
static irqreturn_t timer_irq_handler(int irq, void *priv)
{
int timer = irq - irqres[0].start;
gptu_w32(1 << timer, GPTU_IRNCR);
return IRQ_HANDLED;
}
static void gptu_hwinit(void)
{
gptu_w32(0x00, GPTU_IRNEN);
gptu_w32(0xff, GPTU_IRNCR);
gptu_w32(CLC_RMC | CLC_SUSPEND, GPTU_CLC);
}
static void gptu_hwexit(void)
{
gptu_w32(0x00, GPTU_IRNEN);
gptu_w32(0xff, GPTU_IRNCR);
gptu_w32(CLC_DISABLE, GPTU_CLC);
}
static int gptu_enable(struct clk *clk)
{
int ret = request_irq(irqres[clk->bits].start, timer_irq_handler,
IRQF_TIMER, "gtpu", NULL);
if (ret) {
pr_err("gptu: failed to request irq\n");
return ret;
}
gptu_w32(CON_CNT | CON_EDGE_ANY | CON_SYNC | CON_CLK_INT,
GPTU_CON(clk->bits));
gptu_w32(1, GPTU_RLD(clk->bits));
gptu_w32(gptu_r32(GPTU_IRNEN) | BIT(clk->bits), GPTU_IRNEN);
gptu_w32(RUN_SEN | RUN_RL, GPTU_RUN(clk->bits));
return 0;
}
static void gptu_disable(struct clk *clk)
{
gptu_w32(0, GPTU_RUN(clk->bits));
gptu_w32(0, GPTU_CON(clk->bits));
gptu_w32(0, GPTU_RLD(clk->bits));
gptu_w32(gptu_r32(GPTU_IRNEN) & ~BIT(clk->bits), GPTU_IRNEN);
free_irq(irqres[clk->bits].start, NULL);
}
static inline void clkdev_add_gptu(struct device *dev, const char *con,
unsigned int timer)
{
struct clk *clk = kzalloc(sizeof(struct clk), GFP_KERNEL);
if (!clk)
return;
clk->cl.dev_id = dev_name(dev);
clk->cl.con_id = con;
clk->cl.clk = clk;
clk->enable = gptu_enable;
clk->disable = gptu_disable;
clk->bits = timer;
clkdev_add(&clk->cl);
}
static int gptu_probe(struct platform_device *pdev)
{
struct clk *clk;
if (of_irq_to_resource_table(pdev->dev.of_node, irqres, 6) != 6) {
dev_err(&pdev->dev, "Failed to get IRQ list\n");
return -EINVAL;
}
/* remap gptu register range */
gptu_membase = devm_platform_get_and_ioremap_resource(pdev, 0, NULL);
if (IS_ERR(gptu_membase))
return PTR_ERR(gptu_membase);
/* enable our clock */
clk = clk_get(&pdev->dev, NULL);
if (IS_ERR(clk)) {
dev_err(&pdev->dev, "Failed to get clock\n");
return -ENOENT;
}
clk_enable(clk);
/* power up the core */
gptu_hwinit();
/* the gptu has a ID register */
if (((gptu_r32(GPTU_ID) >> 8) & 0xff) != GPTU_MAGIC) {
dev_err(&pdev->dev, "Failed to find magic\n");
gptu_hwexit();
clk_disable(clk);
clk_put(clk);
return -ENAVAIL;
}
/* register the clocks */
clkdev_add_gptu(&pdev->dev, "timer1a", TIMER1A);
clkdev_add_gptu(&pdev->dev, "timer1b", TIMER1B);
clkdev_add_gptu(&pdev->dev, "timer2a", TIMER2A);
clkdev_add_gptu(&pdev->dev, "timer2b", TIMER2B);
clkdev_add_gptu(&pdev->dev, "timer3a", TIMER3A);
clkdev_add_gptu(&pdev->dev, "timer3b", TIMER3B);
dev_info(&pdev->dev, "gptu: 6 timers loaded\n");
return 0;
}
static const struct of_device_id gptu_match[] = {
{ .compatible = "lantiq,gptu-xway" },
{},
};
static struct platform_driver dma_driver = {
.probe = gptu_probe,
.driver = {
.name = "gptu-xway",
.of_match_table = gptu_match,
},
};
int __init gptu_init(void)
{
int ret = platform_driver_register(&dma_driver);
if (ret)
pr_info("gptu: Error registering platform driver\n");
return ret;
}
arch_initcall(gptu_init);
| linux-master | arch/mips/lantiq/xway/gptu.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
*
* Copyright (C) 2012 John Crispin <[email protected]>
* Copyright (C) 2010 Sameer Ahmad, Lantiq GmbH
*/
#include <linux/ioport.h>
#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include <lantiq_soc.h>
/* Bias and regulator Setup Register */
#define DCDC_BIAS_VREG0 0xa
/* Bias and regulator Setup Register */
#define DCDC_BIAS_VREG1 0xb
#define dcdc_w8(x, y) ltq_w8((x), dcdc_membase + (y))
#define dcdc_r8(x) ltq_r8(dcdc_membase + (x))
static void __iomem *dcdc_membase;
static int dcdc_probe(struct platform_device *pdev)
{
dcdc_membase = devm_platform_get_and_ioremap_resource(pdev, 0, NULL);
if (IS_ERR(dcdc_membase))
return PTR_ERR(dcdc_membase);
dev_info(&pdev->dev, "Core Voltage : %d mV\n",
dcdc_r8(DCDC_BIAS_VREG1) * 8);
return 0;
}
static const struct of_device_id dcdc_match[] = {
{ .compatible = "lantiq,dcdc-xrx200" },
{},
};
static struct platform_driver dcdc_driver = {
.probe = dcdc_probe,
.driver = {
.name = "dcdc-xrx200",
.of_match_table = dcdc_match,
},
};
int __init dcdc_init(void)
{
int ret = platform_driver_register(&dcdc_driver);
if (ret)
pr_info("dcdc: Error registering platform driver\n");
return ret;
}
arch_initcall(dcdc_init);
| linux-master | arch/mips/lantiq/xway/dcdc.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
*
* Copyright (C) 2012 John Crispin <[email protected]>
*/
#include <linux/err.h>
#include <linux/export.h>
#include <linux/gpio/consumer.h>
#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
#include <lantiq_soc.h>
static unsigned int *cp1_base;
unsigned int *ltq_get_cp1_base(void)
{
if (!cp1_base)
panic("no cp1 base was set\n");
return cp1_base;
}
EXPORT_SYMBOL(ltq_get_cp1_base);
static int vmmc_probe(struct platform_device *pdev)
{
#define CP1_SIZE (1 << 20)
struct gpio_desc *gpio;
int gpio_count;
dma_addr_t dma;
int error;
cp1_base =
(void *) CPHYSADDR(dma_alloc_coherent(&pdev->dev, CP1_SIZE,
&dma, GFP_KERNEL));
gpio_count = gpiod_count(&pdev->dev, NULL);
while (gpio_count > 0) {
gpio = devm_gpiod_get_index(&pdev->dev,
NULL, --gpio_count, GPIOD_OUT_HIGH);
error = PTR_ERR_OR_ZERO(gpio);
if (error) {
dev_err(&pdev->dev,
"failed to request GPIO idx %d: %d\n",
gpio_count, error);
continue;
}
gpiod_set_consumer_name(gpio, "vmmc-relay");
}
dev_info(&pdev->dev, "reserved %dMB at 0x%p", CP1_SIZE >> 20, cp1_base);
return 0;
}
static const struct of_device_id vmmc_match[] = {
{ .compatible = "lantiq,vmmc-xway" },
{},
};
static struct platform_driver vmmc_driver = {
.probe = vmmc_probe,
.driver = {
.name = "lantiq,vmmc",
.of_match_table = vmmc_match,
},
};
builtin_platform_driver(vmmc_driver);
| linux-master | arch/mips/lantiq/xway/vmmc.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
*
* Copyright (C) 2012 Thomas Langer <[email protected]>
* Copyright (C) 2012 John Crispin <[email protected]>
*/
#include <linux/init.h>
#include <linux/io.h>
#include <linux/pm.h>
#include <asm/reboot.h>
#include <linux/export.h>
#include <lantiq_soc.h>
/*
* Dummy implementation. Used to allow platform code to find out what
* source was booted from
*/
unsigned char ltq_boot_select(void)
{
return BS_SPI;
}
#define BOOT_REG_BASE (KSEG1 | 0x1F200000)
#define BOOT_PW1_REG (BOOT_REG_BASE | 0x20)
#define BOOT_PW2_REG (BOOT_REG_BASE | 0x24)
#define BOOT_PW1 0x4C545100
#define BOOT_PW2 0x0051544C
#define WDT_REG_BASE (KSEG1 | 0x1F8803F0)
#define WDT_PW1 0x00BE0000
#define WDT_PW2 0x00DC0000
static void machine_restart(char *command)
{
local_irq_disable();
/* reboot magic */
ltq_w32(BOOT_PW1, (void *)BOOT_PW1_REG); /* 'LTQ\0' */
ltq_w32(BOOT_PW2, (void *)BOOT_PW2_REG); /* '\0QTL' */
ltq_w32(0, (void *)BOOT_REG_BASE); /* reset Bootreg RVEC */
/* watchdog magic */
ltq_w32(WDT_PW1, (void *)WDT_REG_BASE);
ltq_w32(WDT_PW2 |
(0x3 << 26) | /* PWL */
(0x2 << 24) | /* CLKDIV */
(0x1 << 31) | /* enable */
(1), /* reload */
(void *)WDT_REG_BASE);
unreachable();
}
static void machine_halt(void)
{
local_irq_disable();
unreachable();
}
static void machine_power_off(void)
{
local_irq_disable();
unreachable();
}
static int __init mips_reboot_setup(void)
{
_machine_restart = machine_restart;
_machine_halt = machine_halt;
pm_power_off = machine_power_off;
return 0;
}
arch_initcall(mips_reboot_setup);
| linux-master | arch/mips/lantiq/falcon/reset.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
*
* Copyright (C) 2011 Thomas Langer <[email protected]>
* Copyright (C) 2011 John Crispin <[email protected]>
*/
#include <linux/ioport.h>
#include <linux/export.h>
#include <linux/clkdev.h>
#include <linux/of_address.h>
#include <asm/delay.h>
#include <lantiq_soc.h>
#include "../clk.h"
/* infrastructure control register */
#define SYS1_INFRAC 0x00bc
/* Configuration fuses for drivers and pll */
#define STATUS_CONFIG 0x0040
/* GPE frequency selection */
#define GPPC_OFFSET 24
#define GPEFREQ_MASK 0x0000C00
#define GPEFREQ_OFFSET 10
/* Clock status register */
#define SYSCTL_CLKS 0x0000
/* Clock enable register */
#define SYSCTL_CLKEN 0x0004
/* Clock clear register */
#define SYSCTL_CLKCLR 0x0008
/* Activation Status Register */
#define SYSCTL_ACTS 0x0020
/* Activation Register */
#define SYSCTL_ACT 0x0024
/* Deactivation Register */
#define SYSCTL_DEACT 0x0028
/* reboot Register */
#define SYSCTL_RBT 0x002c
/* CPU0 Clock Control Register */
#define SYS1_CPU0CC 0x0040
/* HRST_OUT_N Control Register */
#define SYS1_HRSTOUTC 0x00c0
/* clock divider bit */
#define CPU0CC_CPUDIV 0x0001
/* Activation Status Register */
#define ACTS_ASC0_ACT 0x00001000
#define ACTS_SSC0 0x00002000
#define ACTS_ASC1_ACT 0x00000800
#define ACTS_I2C_ACT 0x00004000
#define ACTS_P0 0x00010000
#define ACTS_P1 0x00010000
#define ACTS_P2 0x00020000
#define ACTS_P3 0x00020000
#define ACTS_P4 0x00040000
#define ACTS_PADCTRL0 0x00100000
#define ACTS_PADCTRL1 0x00100000
#define ACTS_PADCTRL2 0x00200000
#define ACTS_PADCTRL3 0x00200000
#define ACTS_PADCTRL4 0x00400000
#define sysctl_w32(m, x, y) ltq_w32((x), sysctl_membase[m] + (y))
#define sysctl_r32(m, x) ltq_r32(sysctl_membase[m] + (x))
#define sysctl_w32_mask(m, clear, set, reg) \
sysctl_w32(m, (sysctl_r32(m, reg) & ~(clear)) | (set), reg)
#define status_w32(x, y) ltq_w32((x), status_membase + (y))
#define status_r32(x) ltq_r32(status_membase + (x))
static void __iomem *sysctl_membase[3], *status_membase;
void __iomem *ltq_sys1_membase, *ltq_ebu_membase;
void falcon_trigger_hrst(int level)
{
sysctl_w32(SYSCTL_SYS1, level & 1, SYS1_HRSTOUTC);
}
static inline void sysctl_wait(struct clk *clk,
unsigned int test, unsigned int reg)
{
int err = 1000000;
do {} while (--err && ((sysctl_r32(clk->module, reg)
& clk->bits) != test));
if (!err)
pr_err("module de/activation failed %d %08X %08X %08X\n",
clk->module, clk->bits, test,
sysctl_r32(clk->module, reg) & clk->bits);
}
static int sysctl_activate(struct clk *clk)
{
sysctl_w32(clk->module, clk->bits, SYSCTL_CLKEN);
sysctl_w32(clk->module, clk->bits, SYSCTL_ACT);
sysctl_wait(clk, clk->bits, SYSCTL_ACTS);
return 0;
}
static void sysctl_deactivate(struct clk *clk)
{
sysctl_w32(clk->module, clk->bits, SYSCTL_CLKCLR);
sysctl_w32(clk->module, clk->bits, SYSCTL_DEACT);
sysctl_wait(clk, 0, SYSCTL_ACTS);
}
static int sysctl_clken(struct clk *clk)
{
sysctl_w32(clk->module, clk->bits, SYSCTL_CLKEN);
sysctl_w32(clk->module, clk->bits, SYSCTL_ACT);
sysctl_wait(clk, clk->bits, SYSCTL_CLKS);
return 0;
}
static void sysctl_clkdis(struct clk *clk)
{
sysctl_w32(clk->module, clk->bits, SYSCTL_CLKCLR);
sysctl_wait(clk, 0, SYSCTL_CLKS);
}
static void sysctl_reboot(struct clk *clk)
{
unsigned int act;
unsigned int bits;
act = sysctl_r32(clk->module, SYSCTL_ACT);
bits = ~act & clk->bits;
if (bits != 0) {
sysctl_w32(clk->module, bits, SYSCTL_CLKEN);
sysctl_w32(clk->module, bits, SYSCTL_ACT);
sysctl_wait(clk, bits, SYSCTL_ACTS);
}
sysctl_w32(clk->module, act & clk->bits, SYSCTL_RBT);
sysctl_wait(clk, clk->bits, SYSCTL_ACTS);
}
/* enable the ONU core */
static void falcon_gpe_enable(void)
{
unsigned int freq;
unsigned int status;
/* if the clock is already enabled */
status = sysctl_r32(SYSCTL_SYS1, SYS1_INFRAC);
if (status & (1 << (GPPC_OFFSET + 1)))
return;
freq = (status_r32(STATUS_CONFIG) &
GPEFREQ_MASK) >>
GPEFREQ_OFFSET;
if (freq == 0)
freq = 1; /* use 625MHz on unfused chip */
/* apply new frequency */
sysctl_w32_mask(SYSCTL_SYS1, 7 << (GPPC_OFFSET + 1),
freq << (GPPC_OFFSET + 2) , SYS1_INFRAC);
udelay(1);
/* enable new frequency */
sysctl_w32_mask(SYSCTL_SYS1, 0, 1 << (GPPC_OFFSET + 1), SYS1_INFRAC);
udelay(1);
}
static inline void clkdev_add_sys(const char *dev, unsigned int module,
unsigned int bits)
{
struct clk *clk = kzalloc(sizeof(struct clk), GFP_KERNEL);
if (!clk)
return;
clk->cl.dev_id = dev;
clk->cl.con_id = NULL;
clk->cl.clk = clk;
clk->module = module;
clk->bits = bits;
clk->activate = sysctl_activate;
clk->deactivate = sysctl_deactivate;
clk->enable = sysctl_clken;
clk->disable = sysctl_clkdis;
clk->reboot = sysctl_reboot;
clkdev_add(&clk->cl);
}
void __init ltq_soc_init(void)
{
struct device_node *np_status =
of_find_compatible_node(NULL, NULL, "lantiq,status-falcon");
struct device_node *np_ebu =
of_find_compatible_node(NULL, NULL, "lantiq,ebu-falcon");
struct device_node *np_sys1 =
of_find_compatible_node(NULL, NULL, "lantiq,sys1-falcon");
struct device_node *np_syseth =
of_find_compatible_node(NULL, NULL, "lantiq,syseth-falcon");
struct device_node *np_sysgpe =
of_find_compatible_node(NULL, NULL, "lantiq,sysgpe-falcon");
struct resource res_status, res_ebu, res_sys[3];
int i;
/* check if all the core register ranges are available */
if (!np_status || !np_ebu || !np_sys1 || !np_syseth || !np_sysgpe)
panic("Failed to load core nodes from devicetree");
if (of_address_to_resource(np_status, 0, &res_status) ||
of_address_to_resource(np_ebu, 0, &res_ebu) ||
of_address_to_resource(np_sys1, 0, &res_sys[0]) ||
of_address_to_resource(np_syseth, 0, &res_sys[1]) ||
of_address_to_resource(np_sysgpe, 0, &res_sys[2]))
panic("Failed to get core resources");
of_node_put(np_status);
of_node_put(np_ebu);
of_node_put(np_sys1);
of_node_put(np_syseth);
of_node_put(np_sysgpe);
if ((request_mem_region(res_status.start, resource_size(&res_status),
res_status.name) < 0) ||
(request_mem_region(res_ebu.start, resource_size(&res_ebu),
res_ebu.name) < 0) ||
(request_mem_region(res_sys[0].start,
resource_size(&res_sys[0]),
res_sys[0].name) < 0) ||
(request_mem_region(res_sys[1].start,
resource_size(&res_sys[1]),
res_sys[1].name) < 0) ||
(request_mem_region(res_sys[2].start,
resource_size(&res_sys[2]),
res_sys[2].name) < 0))
pr_err("Failed to request core resources");
status_membase = ioremap(res_status.start,
resource_size(&res_status));
ltq_ebu_membase = ioremap(res_ebu.start,
resource_size(&res_ebu));
if (!status_membase || !ltq_ebu_membase)
panic("Failed to remap core resources");
for (i = 0; i < 3; i++) {
sysctl_membase[i] = ioremap(res_sys[i].start,
resource_size(&res_sys[i]));
if (!sysctl_membase[i])
panic("Failed to remap sysctrl resources");
}
ltq_sys1_membase = sysctl_membase[0];
falcon_gpe_enable();
/* get our 3 static rates for cpu, fpi and io clocks */
if (ltq_sys1_r32(SYS1_CPU0CC) & CPU0CC_CPUDIV)
clkdev_add_static(CLOCK_200M, CLOCK_100M, CLOCK_200M, 0);
else
clkdev_add_static(CLOCK_400M, CLOCK_100M, CLOCK_200M, 0);
/* add our clock domains */
clkdev_add_sys("1d810000.gpio", SYSCTL_SYSETH, ACTS_P0);
clkdev_add_sys("1d810100.gpio", SYSCTL_SYSETH, ACTS_P2);
clkdev_add_sys("1e800100.gpio", SYSCTL_SYS1, ACTS_P1);
clkdev_add_sys("1e800200.gpio", SYSCTL_SYS1, ACTS_P3);
clkdev_add_sys("1e800300.gpio", SYSCTL_SYS1, ACTS_P4);
clkdev_add_sys("1db01000.pad", SYSCTL_SYSETH, ACTS_PADCTRL0);
clkdev_add_sys("1db02000.pad", SYSCTL_SYSETH, ACTS_PADCTRL2);
clkdev_add_sys("1e800400.pad", SYSCTL_SYS1, ACTS_PADCTRL1);
clkdev_add_sys("1e800500.pad", SYSCTL_SYS1, ACTS_PADCTRL3);
clkdev_add_sys("1e800600.pad", SYSCTL_SYS1, ACTS_PADCTRL4);
clkdev_add_sys("1e100b00.serial", SYSCTL_SYS1, ACTS_ASC1_ACT);
clkdev_add_sys("1e100c00.serial", SYSCTL_SYS1, ACTS_ASC0_ACT);
clkdev_add_sys("1e100d00.spi", SYSCTL_SYS1, ACTS_SSC0);
clkdev_add_sys("1e200000.i2c", SYSCTL_SYS1, ACTS_I2C_ACT);
}
| linux-master | arch/mips/lantiq/falcon/sysctrl.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
*
* Copyright (C) 2012 Thomas Langer <[email protected]>
* Copyright (C) 2012 John Crispin <[email protected]>
*/
#include <linux/kernel.h>
#include <asm/cacheflush.h>
#include <asm/traps.h>
#include <asm/io.h>
#include <lantiq_soc.h>
#include "../prom.h"
#define SOC_FALCON "Falcon"
#define SOC_FALCON_D "Falcon-D"
#define SOC_FALCON_V "Falcon-V"
#define SOC_FALCON_M "Falcon-M"
#define COMP_FALCON "lantiq,falcon"
#define PART_SHIFT 12
#define PART_MASK 0x0FFFF000
#define REV_SHIFT 28
#define REV_MASK 0xF0000000
#define SREV_SHIFT 22
#define SREV_MASK 0x03C00000
#define TYPE_SHIFT 26
#define TYPE_MASK 0x3C000000
/* reset, nmi and ejtag exception vectors */
#define BOOT_REG_BASE (KSEG1 | 0x1F200000)
#define BOOT_RVEC (BOOT_REG_BASE | 0x00)
#define BOOT_NVEC (BOOT_REG_BASE | 0x04)
#define BOOT_EVEC (BOOT_REG_BASE | 0x08)
void __init ltq_soc_nmi_setup(void)
{
extern void (*nmi_handler)(void);
ltq_w32((unsigned long)&nmi_handler, (void *)BOOT_NVEC);
}
void __init ltq_soc_ejtag_setup(void)
{
extern void (*ejtag_debug_handler)(void);
ltq_w32((unsigned long)&ejtag_debug_handler, (void *)BOOT_EVEC);
}
void __init ltq_soc_detect(struct ltq_soc_info *i)
{
u32 type;
i->partnum = (ltq_r32(FALCON_CHIPID) & PART_MASK) >> PART_SHIFT;
i->rev = (ltq_r32(FALCON_CHIPID) & REV_MASK) >> REV_SHIFT;
i->srev = ((ltq_r32(FALCON_CHIPCONF) & SREV_MASK) >> SREV_SHIFT);
i->compatible = COMP_FALCON;
i->type = SOC_TYPE_FALCON;
sprintf(i->rev_type, "%c%d%d", (i->srev & 0x4) ? ('B') : ('A'),
i->rev & 0x7, (i->srev & 0x3) + 1);
switch (i->partnum) {
case SOC_ID_FALCON:
type = (ltq_r32(FALCON_CHIPTYPE) & TYPE_MASK) >> TYPE_SHIFT;
switch (type) {
case 0:
i->name = SOC_FALCON_D;
break;
case 1:
i->name = SOC_FALCON_V;
break;
case 2:
i->name = SOC_FALCON_M;
break;
default:
i->name = SOC_FALCON;
break;
}
break;
default:
unreachable();
break;
}
board_nmi_handler_setup = ltq_soc_nmi_setup;
board_ejtag_handler_setup = ltq_soc_ejtag_setup;
}
| linux-master | arch/mips/lantiq/falcon/prom.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Loongson-3 Virtual IPI interrupt support.
*
* Copyright (C) 2019 Loongson Technologies, Inc. All rights reserved.
*
* Authors: Chen Zhu <[email protected]>
* Authors: Huacai Chen <[email protected]>
*/
#include <linux/kvm_host.h>
#define IPI_BASE 0x3ff01000ULL
#define CORE0_STATUS_OFF 0x000
#define CORE0_EN_OFF 0x004
#define CORE0_SET_OFF 0x008
#define CORE0_CLEAR_OFF 0x00c
#define CORE0_BUF_20 0x020
#define CORE0_BUF_28 0x028
#define CORE0_BUF_30 0x030
#define CORE0_BUF_38 0x038
#define CORE1_STATUS_OFF 0x100
#define CORE1_EN_OFF 0x104
#define CORE1_SET_OFF 0x108
#define CORE1_CLEAR_OFF 0x10c
#define CORE1_BUF_20 0x120
#define CORE1_BUF_28 0x128
#define CORE1_BUF_30 0x130
#define CORE1_BUF_38 0x138
#define CORE2_STATUS_OFF 0x200
#define CORE2_EN_OFF 0x204
#define CORE2_SET_OFF 0x208
#define CORE2_CLEAR_OFF 0x20c
#define CORE2_BUF_20 0x220
#define CORE2_BUF_28 0x228
#define CORE2_BUF_30 0x230
#define CORE2_BUF_38 0x238
#define CORE3_STATUS_OFF 0x300
#define CORE3_EN_OFF 0x304
#define CORE3_SET_OFF 0x308
#define CORE3_CLEAR_OFF 0x30c
#define CORE3_BUF_20 0x320
#define CORE3_BUF_28 0x328
#define CORE3_BUF_30 0x330
#define CORE3_BUF_38 0x338
static int loongson_vipi_read(struct loongson_kvm_ipi *ipi,
gpa_t addr, int len, void *val)
{
uint32_t core = (addr >> 8) & 3;
uint32_t node = (addr >> 44) & 3;
uint32_t id = core + node * 4;
uint64_t offset = addr & 0xff;
void *pbuf;
struct ipi_state *s = &(ipi->ipistate[id]);
BUG_ON(offset & (len - 1));
switch (offset) {
case CORE0_STATUS_OFF:
*(uint64_t *)val = s->status;
break;
case CORE0_EN_OFF:
*(uint64_t *)val = s->en;
break;
case CORE0_SET_OFF:
*(uint64_t *)val = 0;
break;
case CORE0_CLEAR_OFF:
*(uint64_t *)val = 0;
break;
case CORE0_BUF_20 ... CORE0_BUF_38:
pbuf = (void *)s->buf + (offset - 0x20);
if (len == 8)
*(uint64_t *)val = *(uint64_t *)pbuf;
else /* Assume len == 4 */
*(uint32_t *)val = *(uint32_t *)pbuf;
break;
default:
pr_notice("%s with unknown addr %llx\n", __func__, addr);
break;
}
return 0;
}
static int loongson_vipi_write(struct loongson_kvm_ipi *ipi,
gpa_t addr, int len, const void *val)
{
uint32_t core = (addr >> 8) & 3;
uint32_t node = (addr >> 44) & 3;
uint32_t id = core + node * 4;
uint64_t data, offset = addr & 0xff;
void *pbuf;
struct kvm *kvm = ipi->kvm;
struct kvm_mips_interrupt irq;
struct ipi_state *s = &(ipi->ipistate[id]);
data = *(uint64_t *)val;
BUG_ON(offset & (len - 1));
switch (offset) {
case CORE0_STATUS_OFF:
break;
case CORE0_EN_OFF:
s->en = data;
break;
case CORE0_SET_OFF:
s->status |= data;
irq.cpu = id;
irq.irq = 6;
kvm_vcpu_ioctl_interrupt(kvm_get_vcpu(kvm, id), &irq);
break;
case CORE0_CLEAR_OFF:
s->status &= ~data;
if (!s->status) {
irq.cpu = id;
irq.irq = -6;
kvm_vcpu_ioctl_interrupt(kvm_get_vcpu(kvm, id), &irq);
}
break;
case CORE0_BUF_20 ... CORE0_BUF_38:
pbuf = (void *)s->buf + (offset - 0x20);
if (len == 8)
*(uint64_t *)pbuf = (uint64_t)data;
else /* Assume len == 4 */
*(uint32_t *)pbuf = (uint32_t)data;
break;
default:
pr_notice("%s with unknown addr %llx\n", __func__, addr);
break;
}
return 0;
}
static int kvm_ipi_read(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
gpa_t addr, int len, void *val)
{
unsigned long flags;
struct loongson_kvm_ipi *ipi;
struct ipi_io_device *ipi_device;
ipi_device = container_of(dev, struct ipi_io_device, device);
ipi = ipi_device->ipi;
spin_lock_irqsave(&ipi->lock, flags);
loongson_vipi_read(ipi, addr, len, val);
spin_unlock_irqrestore(&ipi->lock, flags);
return 0;
}
static int kvm_ipi_write(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
gpa_t addr, int len, const void *val)
{
unsigned long flags;
struct loongson_kvm_ipi *ipi;
struct ipi_io_device *ipi_device;
ipi_device = container_of(dev, struct ipi_io_device, device);
ipi = ipi_device->ipi;
spin_lock_irqsave(&ipi->lock, flags);
loongson_vipi_write(ipi, addr, len, val);
spin_unlock_irqrestore(&ipi->lock, flags);
return 0;
}
static const struct kvm_io_device_ops kvm_ipi_ops = {
.read = kvm_ipi_read,
.write = kvm_ipi_write,
};
void kvm_init_loongson_ipi(struct kvm *kvm)
{
int i;
unsigned long addr;
struct loongson_kvm_ipi *s;
struct kvm_io_device *device;
s = &kvm->arch.ipi;
s->kvm = kvm;
spin_lock_init(&s->lock);
/*
* Initialize IPI device
*/
for (i = 0; i < 4; i++) {
device = &s->dev_ipi[i].device;
kvm_iodevice_init(device, &kvm_ipi_ops);
addr = (((unsigned long)i) << 44) + IPI_BASE;
mutex_lock(&kvm->slots_lock);
kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, addr, 0x400, device);
mutex_unlock(&kvm->slots_lock);
s->dev_ipi[i].ipi = s;
s->dev_ipi[i].node_id = i;
}
}
| linux-master | arch/mips/kvm/loongson_ipi.c |
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* KVM/MIPS: Interrupt delivery
*
* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
* Authors: Sanjay Lal <[email protected]>
*/
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/vmalloc.h>
#include <linux/fs.h>
#include <linux/memblock.h>
#include <asm/page.h>
#include <asm/cacheflush.h>
#include <linux/kvm_host.h>
#include "interrupt.h"
void kvm_mips_deliver_interrupts(struct kvm_vcpu *vcpu, u32 cause)
{
unsigned long *pending = &vcpu->arch.pending_exceptions;
unsigned long *pending_clr = &vcpu->arch.pending_exceptions_clr;
unsigned int priority;
if (!(*pending) && !(*pending_clr))
return;
priority = __ffs(*pending_clr);
while (priority <= MIPS_EXC_MAX) {
kvm_mips_callbacks->irq_clear(vcpu, priority, cause);
priority = find_next_bit(pending_clr,
BITS_PER_BYTE * sizeof(*pending_clr),
priority + 1);
}
priority = __ffs(*pending);
while (priority <= MIPS_EXC_MAX) {
kvm_mips_callbacks->irq_deliver(vcpu, priority, cause);
priority = find_next_bit(pending,
BITS_PER_BYTE * sizeof(*pending),
priority + 1);
}
}
int kvm_mips_pending_timer(struct kvm_vcpu *vcpu)
{
return test_bit(MIPS_EXC_INT_TIMER, &vcpu->arch.pending_exceptions);
}
| linux-master | arch/mips/kvm/interrupt.c |
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* KVM/MIPS: COP0 access histogram
*
* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
* Authors: Sanjay Lal <[email protected]>
*/
#include <linux/kvm_host.h>
char *kvm_cop0_str[N_MIPS_COPROC_REGS] = {
"Index",
"Random",
"EntryLo0",
"EntryLo1",
"Context",
"PG Mask",
"Wired",
"HWREna",
"BadVAddr",
"Count",
"EntryHI",
"Compare",
"Status",
"Cause",
"EXC PC",
"PRID",
"Config",
"LLAddr",
"Watch Lo",
"Watch Hi",
"X Context",
"Reserved",
"Impl Dep",
"Debug",
"DEPC",
"PerfCnt",
"ErrCtl",
"CacheErr",
"TagLo",
"TagHi",
"ErrorEPC",
"DESAVE"
};
void kvm_mips_dump_stats(struct kvm_vcpu *vcpu)
{
#ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
int i, j;
kvm_info("\nKVM VCPU[%d] COP0 Access Profile:\n", vcpu->vcpu_id);
for (i = 0; i < N_MIPS_COPROC_REGS; i++) {
for (j = 0; j < N_MIPS_COPROC_SEL; j++) {
if (vcpu->arch.cop0.stat[i][j])
kvm_info("%s[%d]: %lu\n", kvm_cop0_str[i], j,
vcpu->arch.cop0.stat[i][j]);
}
}
#endif
}
| linux-master | arch/mips/kvm/stats.c |
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* KVM/MIPS: MIPS specific KVM APIs
*
* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
* Authors: Sanjay Lal <[email protected]>
*/
#include <linux/bitops.h>
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/kdebug.h>
#include <linux/module.h>
#include <linux/uaccess.h>
#include <linux/vmalloc.h>
#include <linux/sched/signal.h>
#include <linux/fs.h>
#include <linux/memblock.h>
#include <linux/pgtable.h>
#include <asm/fpu.h>
#include <asm/page.h>
#include <asm/cacheflush.h>
#include <asm/mmu_context.h>
#include <asm/pgalloc.h>
#include <linux/kvm_host.h>
#include "interrupt.h"
#define CREATE_TRACE_POINTS
#include "trace.h"
#ifndef VECTORSPACING
#define VECTORSPACING 0x100 /* for EI/VI mode */
#endif
const struct _kvm_stats_desc kvm_vm_stats_desc[] = {
KVM_GENERIC_VM_STATS()
};
const struct kvm_stats_header kvm_vm_stats_header = {
.name_size = KVM_STATS_NAME_SIZE,
.num_desc = ARRAY_SIZE(kvm_vm_stats_desc),
.id_offset = sizeof(struct kvm_stats_header),
.desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE,
.data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE +
sizeof(kvm_vm_stats_desc),
};
const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = {
KVM_GENERIC_VCPU_STATS(),
STATS_DESC_COUNTER(VCPU, wait_exits),
STATS_DESC_COUNTER(VCPU, cache_exits),
STATS_DESC_COUNTER(VCPU, signal_exits),
STATS_DESC_COUNTER(VCPU, int_exits),
STATS_DESC_COUNTER(VCPU, cop_unusable_exits),
STATS_DESC_COUNTER(VCPU, tlbmod_exits),
STATS_DESC_COUNTER(VCPU, tlbmiss_ld_exits),
STATS_DESC_COUNTER(VCPU, tlbmiss_st_exits),
STATS_DESC_COUNTER(VCPU, addrerr_st_exits),
STATS_DESC_COUNTER(VCPU, addrerr_ld_exits),
STATS_DESC_COUNTER(VCPU, syscall_exits),
STATS_DESC_COUNTER(VCPU, resvd_inst_exits),
STATS_DESC_COUNTER(VCPU, break_inst_exits),
STATS_DESC_COUNTER(VCPU, trap_inst_exits),
STATS_DESC_COUNTER(VCPU, msa_fpe_exits),
STATS_DESC_COUNTER(VCPU, fpe_exits),
STATS_DESC_COUNTER(VCPU, msa_disabled_exits),
STATS_DESC_COUNTER(VCPU, flush_dcache_exits),
STATS_DESC_COUNTER(VCPU, vz_gpsi_exits),
STATS_DESC_COUNTER(VCPU, vz_gsfc_exits),
STATS_DESC_COUNTER(VCPU, vz_hc_exits),
STATS_DESC_COUNTER(VCPU, vz_grr_exits),
STATS_DESC_COUNTER(VCPU, vz_gva_exits),
STATS_DESC_COUNTER(VCPU, vz_ghfc_exits),
STATS_DESC_COUNTER(VCPU, vz_gpa_exits),
STATS_DESC_COUNTER(VCPU, vz_resvd_exits),
#ifdef CONFIG_CPU_LOONGSON64
STATS_DESC_COUNTER(VCPU, vz_cpucfg_exits),
#endif
};
const struct kvm_stats_header kvm_vcpu_stats_header = {
.name_size = KVM_STATS_NAME_SIZE,
.num_desc = ARRAY_SIZE(kvm_vcpu_stats_desc),
.id_offset = sizeof(struct kvm_stats_header),
.desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE,
.data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE +
sizeof(kvm_vcpu_stats_desc),
};
bool kvm_trace_guest_mode_change;
int kvm_guest_mode_change_trace_reg(void)
{
kvm_trace_guest_mode_change = true;
return 0;
}
void kvm_guest_mode_change_trace_unreg(void)
{
kvm_trace_guest_mode_change = false;
}
/*
* XXXKYMA: We are simulatoring a processor that has the WII bit set in
* Config7, so we are "runnable" if interrupts are pending
*/
int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
{
return !!(vcpu->arch.pending_exceptions);
}
bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
{
return false;
}
int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
{
return 1;
}
int kvm_arch_hardware_enable(void)
{
return kvm_mips_callbacks->hardware_enable();
}
void kvm_arch_hardware_disable(void)
{
kvm_mips_callbacks->hardware_disable();
}
extern void kvm_init_loongson_ipi(struct kvm *kvm);
int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
{
switch (type) {
case KVM_VM_MIPS_AUTO:
break;
case KVM_VM_MIPS_VZ:
break;
default:
/* Unsupported KVM type */
return -EINVAL;
}
/* Allocate page table to map GPA -> RPA */
kvm->arch.gpa_mm.pgd = kvm_pgd_alloc();
if (!kvm->arch.gpa_mm.pgd)
return -ENOMEM;
#ifdef CONFIG_CPU_LOONGSON64
kvm_init_loongson_ipi(kvm);
#endif
return 0;
}
static void kvm_mips_free_gpa_pt(struct kvm *kvm)
{
/* It should always be safe to remove after flushing the whole range */
WARN_ON(!kvm_mips_flush_gpa_pt(kvm, 0, ~0));
pgd_free(NULL, kvm->arch.gpa_mm.pgd);
}
void kvm_arch_destroy_vm(struct kvm *kvm)
{
kvm_destroy_vcpus(kvm);
kvm_mips_free_gpa_pt(kvm);
}
long kvm_arch_dev_ioctl(struct file *filp, unsigned int ioctl,
unsigned long arg)
{
return -ENOIOCTLCMD;
}
void kvm_arch_flush_shadow_all(struct kvm *kvm)
{
/* Flush whole GPA */
kvm_mips_flush_gpa_pt(kvm, 0, ~0);
kvm_flush_remote_tlbs(kvm);
}
void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
struct kvm_memory_slot *slot)
{
/*
* The slot has been made invalid (ready for moving or deletion), so we
* need to ensure that it can no longer be accessed by any guest VCPUs.
*/
spin_lock(&kvm->mmu_lock);
/* Flush slot from GPA */
kvm_mips_flush_gpa_pt(kvm, slot->base_gfn,
slot->base_gfn + slot->npages - 1);
kvm_flush_remote_tlbs_memslot(kvm, slot);
spin_unlock(&kvm->mmu_lock);
}
int kvm_arch_prepare_memory_region(struct kvm *kvm,
const struct kvm_memory_slot *old,
struct kvm_memory_slot *new,
enum kvm_mr_change change)
{
return 0;
}
void kvm_arch_commit_memory_region(struct kvm *kvm,
struct kvm_memory_slot *old,
const struct kvm_memory_slot *new,
enum kvm_mr_change change)
{
int needs_flush;
/*
* If dirty page logging is enabled, write protect all pages in the slot
* ready for dirty logging.
*
* There is no need to do this in any of the following cases:
* CREATE: No dirty mappings will already exist.
* MOVE/DELETE: The old mappings will already have been cleaned up by
* kvm_arch_flush_shadow_memslot()
*/
if (change == KVM_MR_FLAGS_ONLY &&
(!(old->flags & KVM_MEM_LOG_DIRTY_PAGES) &&
new->flags & KVM_MEM_LOG_DIRTY_PAGES)) {
spin_lock(&kvm->mmu_lock);
/* Write protect GPA page table entries */
needs_flush = kvm_mips_mkclean_gpa_pt(kvm, new->base_gfn,
new->base_gfn + new->npages - 1);
if (needs_flush)
kvm_flush_remote_tlbs_memslot(kvm, new);
spin_unlock(&kvm->mmu_lock);
}
}
static inline void dump_handler(const char *symbol, void *start, void *end)
{
u32 *p;
pr_debug("LEAF(%s)\n", symbol);
pr_debug("\t.set push\n");
pr_debug("\t.set noreorder\n");
for (p = start; p < (u32 *)end; ++p)
pr_debug("\t.word\t0x%08x\t\t# %p\n", *p, p);
pr_debug("\t.set\tpop\n");
pr_debug("\tEND(%s)\n", symbol);
}
/* low level hrtimer wake routine */
static enum hrtimer_restart kvm_mips_comparecount_wakeup(struct hrtimer *timer)
{
struct kvm_vcpu *vcpu;
vcpu = container_of(timer, struct kvm_vcpu, arch.comparecount_timer);
kvm_mips_callbacks->queue_timer_int(vcpu);
vcpu->arch.wait = 0;
rcuwait_wake_up(&vcpu->wait);
return kvm_mips_count_timeout(vcpu);
}
int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id)
{
return 0;
}
int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
{
int err, size;
void *gebase, *p, *handler, *refill_start, *refill_end;
int i;
kvm_debug("kvm @ %p: create cpu %d at %p\n",
vcpu->kvm, vcpu->vcpu_id, vcpu);
err = kvm_mips_callbacks->vcpu_init(vcpu);
if (err)
return err;
hrtimer_init(&vcpu->arch.comparecount_timer, CLOCK_MONOTONIC,
HRTIMER_MODE_REL);
vcpu->arch.comparecount_timer.function = kvm_mips_comparecount_wakeup;
/*
* Allocate space for host mode exception handlers that handle
* guest mode exits
*/
if (cpu_has_veic || cpu_has_vint)
size = 0x200 + VECTORSPACING * 64;
else
size = 0x4000;
gebase = kzalloc(ALIGN(size, PAGE_SIZE), GFP_KERNEL);
if (!gebase) {
err = -ENOMEM;
goto out_uninit_vcpu;
}
kvm_debug("Allocated %d bytes for KVM Exception Handlers @ %p\n",
ALIGN(size, PAGE_SIZE), gebase);
/*
* Check new ebase actually fits in CP0_EBase. The lack of a write gate
* limits us to the low 512MB of physical address space. If the memory
* we allocate is out of range, just give up now.
*/
if (!cpu_has_ebase_wg && virt_to_phys(gebase) >= 0x20000000) {
kvm_err("CP0_EBase.WG required for guest exception base %pK\n",
gebase);
err = -ENOMEM;
goto out_free_gebase;
}
/* Save new ebase */
vcpu->arch.guest_ebase = gebase;
/* Build guest exception vectors dynamically in unmapped memory */
handler = gebase + 0x2000;
/* TLB refill (or XTLB refill on 64-bit VZ where KX=1) */
refill_start = gebase;
if (IS_ENABLED(CONFIG_64BIT))
refill_start += 0x080;
refill_end = kvm_mips_build_tlb_refill_exception(refill_start, handler);
/* General Exception Entry point */
kvm_mips_build_exception(gebase + 0x180, handler);
/* For vectored interrupts poke the exception code @ all offsets 0-7 */
for (i = 0; i < 8; i++) {
kvm_debug("L1 Vectored handler @ %p\n",
gebase + 0x200 + (i * VECTORSPACING));
kvm_mips_build_exception(gebase + 0x200 + i * VECTORSPACING,
handler);
}
/* General exit handler */
p = handler;
p = kvm_mips_build_exit(p);
/* Guest entry routine */
vcpu->arch.vcpu_run = p;
p = kvm_mips_build_vcpu_run(p);
/* Dump the generated code */
pr_debug("#include <asm/asm.h>\n");
pr_debug("#include <asm/regdef.h>\n");
pr_debug("\n");
dump_handler("kvm_vcpu_run", vcpu->arch.vcpu_run, p);
dump_handler("kvm_tlb_refill", refill_start, refill_end);
dump_handler("kvm_gen_exc", gebase + 0x180, gebase + 0x200);
dump_handler("kvm_exit", gebase + 0x2000, vcpu->arch.vcpu_run);
/* Invalidate the icache for these ranges */
flush_icache_range((unsigned long)gebase,
(unsigned long)gebase + ALIGN(size, PAGE_SIZE));
/* Init */
vcpu->arch.last_sched_cpu = -1;
vcpu->arch.last_exec_cpu = -1;
/* Initial guest state */
err = kvm_mips_callbacks->vcpu_setup(vcpu);
if (err)
goto out_free_gebase;
return 0;
out_free_gebase:
kfree(gebase);
out_uninit_vcpu:
kvm_mips_callbacks->vcpu_uninit(vcpu);
return err;
}
void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
{
hrtimer_cancel(&vcpu->arch.comparecount_timer);
kvm_mips_dump_stats(vcpu);
kvm_mmu_free_memory_caches(vcpu);
kfree(vcpu->arch.guest_ebase);
kvm_mips_callbacks->vcpu_uninit(vcpu);
}
int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
struct kvm_guest_debug *dbg)
{
return -ENOIOCTLCMD;
}
/*
* Actually run the vCPU, entering an RCU extended quiescent state (EQS) while
* the vCPU is running.
*
* This must be noinstr as instrumentation may make use of RCU, and this is not
* safe during the EQS.
*/
static int noinstr kvm_mips_vcpu_enter_exit(struct kvm_vcpu *vcpu)
{
int ret;
guest_state_enter_irqoff();
ret = kvm_mips_callbacks->vcpu_run(vcpu);
guest_state_exit_irqoff();
return ret;
}
int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
{
int r = -EINTR;
vcpu_load(vcpu);
kvm_sigset_activate(vcpu);
if (vcpu->mmio_needed) {
if (!vcpu->mmio_is_write)
kvm_mips_complete_mmio_load(vcpu);
vcpu->mmio_needed = 0;
}
if (vcpu->run->immediate_exit)
goto out;
lose_fpu(1);
local_irq_disable();
guest_timing_enter_irqoff();
trace_kvm_enter(vcpu);
/*
* Make sure the read of VCPU requests in vcpu_run() callback is not
* reordered ahead of the write to vcpu->mode, or we could miss a TLB
* flush request while the requester sees the VCPU as outside of guest
* mode and not needing an IPI.
*/
smp_store_mb(vcpu->mode, IN_GUEST_MODE);
r = kvm_mips_vcpu_enter_exit(vcpu);
/*
* We must ensure that any pending interrupts are taken before
* we exit guest timing so that timer ticks are accounted as
* guest time. Transiently unmask interrupts so that any
* pending interrupts are taken.
*
* TODO: is there a barrier which ensures that pending interrupts are
* recognised? Currently this just hopes that the CPU takes any pending
* interrupts between the enable and disable.
*/
local_irq_enable();
local_irq_disable();
trace_kvm_out(vcpu);
guest_timing_exit_irqoff();
local_irq_enable();
out:
kvm_sigset_deactivate(vcpu);
vcpu_put(vcpu);
return r;
}
int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
struct kvm_mips_interrupt *irq)
{
int intr = (int)irq->irq;
struct kvm_vcpu *dvcpu = NULL;
if (intr == kvm_priority_to_irq[MIPS_EXC_INT_IPI_1] ||
intr == kvm_priority_to_irq[MIPS_EXC_INT_IPI_2] ||
intr == (-kvm_priority_to_irq[MIPS_EXC_INT_IPI_1]) ||
intr == (-kvm_priority_to_irq[MIPS_EXC_INT_IPI_2]))
kvm_debug("%s: CPU: %d, INTR: %d\n", __func__, irq->cpu,
(int)intr);
if (irq->cpu == -1)
dvcpu = vcpu;
else
dvcpu = kvm_get_vcpu(vcpu->kvm, irq->cpu);
if (intr == 2 || intr == 3 || intr == 4 || intr == 6) {
kvm_mips_callbacks->queue_io_int(dvcpu, irq);
} else if (intr == -2 || intr == -3 || intr == -4 || intr == -6) {
kvm_mips_callbacks->dequeue_io_int(dvcpu, irq);
} else {
kvm_err("%s: invalid interrupt ioctl (%d:%d)\n", __func__,
irq->cpu, irq->irq);
return -EINVAL;
}
dvcpu->arch.wait = 0;
rcuwait_wake_up(&dvcpu->wait);
return 0;
}
int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
struct kvm_mp_state *mp_state)
{
return -ENOIOCTLCMD;
}
int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
struct kvm_mp_state *mp_state)
{
return -ENOIOCTLCMD;
}
static u64 kvm_mips_get_one_regs[] = {
KVM_REG_MIPS_R0,
KVM_REG_MIPS_R1,
KVM_REG_MIPS_R2,
KVM_REG_MIPS_R3,
KVM_REG_MIPS_R4,
KVM_REG_MIPS_R5,
KVM_REG_MIPS_R6,
KVM_REG_MIPS_R7,
KVM_REG_MIPS_R8,
KVM_REG_MIPS_R9,
KVM_REG_MIPS_R10,
KVM_REG_MIPS_R11,
KVM_REG_MIPS_R12,
KVM_REG_MIPS_R13,
KVM_REG_MIPS_R14,
KVM_REG_MIPS_R15,
KVM_REG_MIPS_R16,
KVM_REG_MIPS_R17,
KVM_REG_MIPS_R18,
KVM_REG_MIPS_R19,
KVM_REG_MIPS_R20,
KVM_REG_MIPS_R21,
KVM_REG_MIPS_R22,
KVM_REG_MIPS_R23,
KVM_REG_MIPS_R24,
KVM_REG_MIPS_R25,
KVM_REG_MIPS_R26,
KVM_REG_MIPS_R27,
KVM_REG_MIPS_R28,
KVM_REG_MIPS_R29,
KVM_REG_MIPS_R30,
KVM_REG_MIPS_R31,
#ifndef CONFIG_CPU_MIPSR6
KVM_REG_MIPS_HI,
KVM_REG_MIPS_LO,
#endif
KVM_REG_MIPS_PC,
};
static u64 kvm_mips_get_one_regs_fpu[] = {
KVM_REG_MIPS_FCR_IR,
KVM_REG_MIPS_FCR_CSR,
};
static u64 kvm_mips_get_one_regs_msa[] = {
KVM_REG_MIPS_MSA_IR,
KVM_REG_MIPS_MSA_CSR,
};
static unsigned long kvm_mips_num_regs(struct kvm_vcpu *vcpu)
{
unsigned long ret;
ret = ARRAY_SIZE(kvm_mips_get_one_regs);
if (kvm_mips_guest_can_have_fpu(&vcpu->arch)) {
ret += ARRAY_SIZE(kvm_mips_get_one_regs_fpu) + 48;
/* odd doubles */
if (boot_cpu_data.fpu_id & MIPS_FPIR_F64)
ret += 16;
}
if (kvm_mips_guest_can_have_msa(&vcpu->arch))
ret += ARRAY_SIZE(kvm_mips_get_one_regs_msa) + 32;
ret += kvm_mips_callbacks->num_regs(vcpu);
return ret;
}
static int kvm_mips_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices)
{
u64 index;
unsigned int i;
if (copy_to_user(indices, kvm_mips_get_one_regs,
sizeof(kvm_mips_get_one_regs)))
return -EFAULT;
indices += ARRAY_SIZE(kvm_mips_get_one_regs);
if (kvm_mips_guest_can_have_fpu(&vcpu->arch)) {
if (copy_to_user(indices, kvm_mips_get_one_regs_fpu,
sizeof(kvm_mips_get_one_regs_fpu)))
return -EFAULT;
indices += ARRAY_SIZE(kvm_mips_get_one_regs_fpu);
for (i = 0; i < 32; ++i) {
index = KVM_REG_MIPS_FPR_32(i);
if (copy_to_user(indices, &index, sizeof(index)))
return -EFAULT;
++indices;
/* skip odd doubles if no F64 */
if (i & 1 && !(boot_cpu_data.fpu_id & MIPS_FPIR_F64))
continue;
index = KVM_REG_MIPS_FPR_64(i);
if (copy_to_user(indices, &index, sizeof(index)))
return -EFAULT;
++indices;
}
}
if (kvm_mips_guest_can_have_msa(&vcpu->arch)) {
if (copy_to_user(indices, kvm_mips_get_one_regs_msa,
sizeof(kvm_mips_get_one_regs_msa)))
return -EFAULT;
indices += ARRAY_SIZE(kvm_mips_get_one_regs_msa);
for (i = 0; i < 32; ++i) {
index = KVM_REG_MIPS_VEC_128(i);
if (copy_to_user(indices, &index, sizeof(index)))
return -EFAULT;
++indices;
}
}
return kvm_mips_callbacks->copy_reg_indices(vcpu, indices);
}
static int kvm_mips_get_reg(struct kvm_vcpu *vcpu,
const struct kvm_one_reg *reg)
{
struct mips_coproc *cop0 = &vcpu->arch.cop0;
struct mips_fpu_struct *fpu = &vcpu->arch.fpu;
int ret;
s64 v;
s64 vs[2];
unsigned int idx;
switch (reg->id) {
/* General purpose registers */
case KVM_REG_MIPS_R0 ... KVM_REG_MIPS_R31:
v = (long)vcpu->arch.gprs[reg->id - KVM_REG_MIPS_R0];
break;
#ifndef CONFIG_CPU_MIPSR6
case KVM_REG_MIPS_HI:
v = (long)vcpu->arch.hi;
break;
case KVM_REG_MIPS_LO:
v = (long)vcpu->arch.lo;
break;
#endif
case KVM_REG_MIPS_PC:
v = (long)vcpu->arch.pc;
break;
/* Floating point registers */
case KVM_REG_MIPS_FPR_32(0) ... KVM_REG_MIPS_FPR_32(31):
if (!kvm_mips_guest_has_fpu(&vcpu->arch))
return -EINVAL;
idx = reg->id - KVM_REG_MIPS_FPR_32(0);
/* Odd singles in top of even double when FR=0 */
if (kvm_read_c0_guest_status(cop0) & ST0_FR)
v = get_fpr32(&fpu->fpr[idx], 0);
else
v = get_fpr32(&fpu->fpr[idx & ~1], idx & 1);
break;
case KVM_REG_MIPS_FPR_64(0) ... KVM_REG_MIPS_FPR_64(31):
if (!kvm_mips_guest_has_fpu(&vcpu->arch))
return -EINVAL;
idx = reg->id - KVM_REG_MIPS_FPR_64(0);
/* Can't access odd doubles in FR=0 mode */
if (idx & 1 && !(kvm_read_c0_guest_status(cop0) & ST0_FR))
return -EINVAL;
v = get_fpr64(&fpu->fpr[idx], 0);
break;
case KVM_REG_MIPS_FCR_IR:
if (!kvm_mips_guest_has_fpu(&vcpu->arch))
return -EINVAL;
v = boot_cpu_data.fpu_id;
break;
case KVM_REG_MIPS_FCR_CSR:
if (!kvm_mips_guest_has_fpu(&vcpu->arch))
return -EINVAL;
v = fpu->fcr31;
break;
/* MIPS SIMD Architecture (MSA) registers */
case KVM_REG_MIPS_VEC_128(0) ... KVM_REG_MIPS_VEC_128(31):
if (!kvm_mips_guest_has_msa(&vcpu->arch))
return -EINVAL;
/* Can't access MSA registers in FR=0 mode */
if (!(kvm_read_c0_guest_status(cop0) & ST0_FR))
return -EINVAL;
idx = reg->id - KVM_REG_MIPS_VEC_128(0);
#ifdef CONFIG_CPU_LITTLE_ENDIAN
/* least significant byte first */
vs[0] = get_fpr64(&fpu->fpr[idx], 0);
vs[1] = get_fpr64(&fpu->fpr[idx], 1);
#else
/* most significant byte first */
vs[0] = get_fpr64(&fpu->fpr[idx], 1);
vs[1] = get_fpr64(&fpu->fpr[idx], 0);
#endif
break;
case KVM_REG_MIPS_MSA_IR:
if (!kvm_mips_guest_has_msa(&vcpu->arch))
return -EINVAL;
v = boot_cpu_data.msa_id;
break;
case KVM_REG_MIPS_MSA_CSR:
if (!kvm_mips_guest_has_msa(&vcpu->arch))
return -EINVAL;
v = fpu->msacsr;
break;
/* registers to be handled specially */
default:
ret = kvm_mips_callbacks->get_one_reg(vcpu, reg, &v);
if (ret)
return ret;
break;
}
if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U64) {
u64 __user *uaddr64 = (u64 __user *)(long)reg->addr;
return put_user(v, uaddr64);
} else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U32) {
u32 __user *uaddr32 = (u32 __user *)(long)reg->addr;
u32 v32 = (u32)v;
return put_user(v32, uaddr32);
} else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U128) {
void __user *uaddr = (void __user *)(long)reg->addr;
return copy_to_user(uaddr, vs, 16) ? -EFAULT : 0;
} else {
return -EINVAL;
}
}
static int kvm_mips_set_reg(struct kvm_vcpu *vcpu,
const struct kvm_one_reg *reg)
{
struct mips_coproc *cop0 = &vcpu->arch.cop0;
struct mips_fpu_struct *fpu = &vcpu->arch.fpu;
s64 v;
s64 vs[2];
unsigned int idx;
if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U64) {
u64 __user *uaddr64 = (u64 __user *)(long)reg->addr;
if (get_user(v, uaddr64) != 0)
return -EFAULT;
} else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U32) {
u32 __user *uaddr32 = (u32 __user *)(long)reg->addr;
s32 v32;
if (get_user(v32, uaddr32) != 0)
return -EFAULT;
v = (s64)v32;
} else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U128) {
void __user *uaddr = (void __user *)(long)reg->addr;
return copy_from_user(vs, uaddr, 16) ? -EFAULT : 0;
} else {
return -EINVAL;
}
switch (reg->id) {
/* General purpose registers */
case KVM_REG_MIPS_R0:
/* Silently ignore requests to set $0 */
break;
case KVM_REG_MIPS_R1 ... KVM_REG_MIPS_R31:
vcpu->arch.gprs[reg->id - KVM_REG_MIPS_R0] = v;
break;
#ifndef CONFIG_CPU_MIPSR6
case KVM_REG_MIPS_HI:
vcpu->arch.hi = v;
break;
case KVM_REG_MIPS_LO:
vcpu->arch.lo = v;
break;
#endif
case KVM_REG_MIPS_PC:
vcpu->arch.pc = v;
break;
/* Floating point registers */
case KVM_REG_MIPS_FPR_32(0) ... KVM_REG_MIPS_FPR_32(31):
if (!kvm_mips_guest_has_fpu(&vcpu->arch))
return -EINVAL;
idx = reg->id - KVM_REG_MIPS_FPR_32(0);
/* Odd singles in top of even double when FR=0 */
if (kvm_read_c0_guest_status(cop0) & ST0_FR)
set_fpr32(&fpu->fpr[idx], 0, v);
else
set_fpr32(&fpu->fpr[idx & ~1], idx & 1, v);
break;
case KVM_REG_MIPS_FPR_64(0) ... KVM_REG_MIPS_FPR_64(31):
if (!kvm_mips_guest_has_fpu(&vcpu->arch))
return -EINVAL;
idx = reg->id - KVM_REG_MIPS_FPR_64(0);
/* Can't access odd doubles in FR=0 mode */
if (idx & 1 && !(kvm_read_c0_guest_status(cop0) & ST0_FR))
return -EINVAL;
set_fpr64(&fpu->fpr[idx], 0, v);
break;
case KVM_REG_MIPS_FCR_IR:
if (!kvm_mips_guest_has_fpu(&vcpu->arch))
return -EINVAL;
/* Read-only */
break;
case KVM_REG_MIPS_FCR_CSR:
if (!kvm_mips_guest_has_fpu(&vcpu->arch))
return -EINVAL;
fpu->fcr31 = v;
break;
/* MIPS SIMD Architecture (MSA) registers */
case KVM_REG_MIPS_VEC_128(0) ... KVM_REG_MIPS_VEC_128(31):
if (!kvm_mips_guest_has_msa(&vcpu->arch))
return -EINVAL;
idx = reg->id - KVM_REG_MIPS_VEC_128(0);
#ifdef CONFIG_CPU_LITTLE_ENDIAN
/* least significant byte first */
set_fpr64(&fpu->fpr[idx], 0, vs[0]);
set_fpr64(&fpu->fpr[idx], 1, vs[1]);
#else
/* most significant byte first */
set_fpr64(&fpu->fpr[idx], 1, vs[0]);
set_fpr64(&fpu->fpr[idx], 0, vs[1]);
#endif
break;
case KVM_REG_MIPS_MSA_IR:
if (!kvm_mips_guest_has_msa(&vcpu->arch))
return -EINVAL;
/* Read-only */
break;
case KVM_REG_MIPS_MSA_CSR:
if (!kvm_mips_guest_has_msa(&vcpu->arch))
return -EINVAL;
fpu->msacsr = v;
break;
/* registers to be handled specially */
default:
return kvm_mips_callbacks->set_one_reg(vcpu, reg, v);
}
return 0;
}
static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
struct kvm_enable_cap *cap)
{
int r = 0;
if (!kvm_vm_ioctl_check_extension(vcpu->kvm, cap->cap))
return -EINVAL;
if (cap->flags)
return -EINVAL;
if (cap->args[0])
return -EINVAL;
switch (cap->cap) {
case KVM_CAP_MIPS_FPU:
vcpu->arch.fpu_enabled = true;
break;
case KVM_CAP_MIPS_MSA:
vcpu->arch.msa_enabled = true;
break;
default:
r = -EINVAL;
break;
}
return r;
}
long kvm_arch_vcpu_async_ioctl(struct file *filp, unsigned int ioctl,
unsigned long arg)
{
struct kvm_vcpu *vcpu = filp->private_data;
void __user *argp = (void __user *)arg;
if (ioctl == KVM_INTERRUPT) {
struct kvm_mips_interrupt irq;
if (copy_from_user(&irq, argp, sizeof(irq)))
return -EFAULT;
kvm_debug("[%d] %s: irq: %d\n", vcpu->vcpu_id, __func__,
irq.irq);
return kvm_vcpu_ioctl_interrupt(vcpu, &irq);
}
return -ENOIOCTLCMD;
}
long kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl,
unsigned long arg)
{
struct kvm_vcpu *vcpu = filp->private_data;
void __user *argp = (void __user *)arg;
long r;
vcpu_load(vcpu);
switch (ioctl) {
case KVM_SET_ONE_REG:
case KVM_GET_ONE_REG: {
struct kvm_one_reg reg;
r = -EFAULT;
if (copy_from_user(®, argp, sizeof(reg)))
break;
if (ioctl == KVM_SET_ONE_REG)
r = kvm_mips_set_reg(vcpu, ®);
else
r = kvm_mips_get_reg(vcpu, ®);
break;
}
case KVM_GET_REG_LIST: {
struct kvm_reg_list __user *user_list = argp;
struct kvm_reg_list reg_list;
unsigned n;
r = -EFAULT;
if (copy_from_user(®_list, user_list, sizeof(reg_list)))
break;
n = reg_list.n;
reg_list.n = kvm_mips_num_regs(vcpu);
if (copy_to_user(user_list, ®_list, sizeof(reg_list)))
break;
r = -E2BIG;
if (n < reg_list.n)
break;
r = kvm_mips_copy_reg_indices(vcpu, user_list->reg);
break;
}
case KVM_ENABLE_CAP: {
struct kvm_enable_cap cap;
r = -EFAULT;
if (copy_from_user(&cap, argp, sizeof(cap)))
break;
r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
break;
}
default:
r = -ENOIOCTLCMD;
}
vcpu_put(vcpu);
return r;
}
void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot)
{
}
int kvm_arch_flush_remote_tlbs(struct kvm *kvm)
{
kvm_mips_callbacks->prepare_flush_shadow(kvm);
return 1;
}
int kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
{
int r;
switch (ioctl) {
default:
r = -ENOIOCTLCMD;
}
return r;
}
int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
struct kvm_sregs *sregs)
{
return -ENOIOCTLCMD;
}
int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
struct kvm_sregs *sregs)
{
return -ENOIOCTLCMD;
}
void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
{
}
int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
{
return -ENOIOCTLCMD;
}
int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
{
return -ENOIOCTLCMD;
}
vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
{
return VM_FAULT_SIGBUS;
}
int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
{
int r;
switch (ext) {
case KVM_CAP_ONE_REG:
case KVM_CAP_ENABLE_CAP:
case KVM_CAP_READONLY_MEM:
case KVM_CAP_SYNC_MMU:
case KVM_CAP_IMMEDIATE_EXIT:
r = 1;
break;
case KVM_CAP_NR_VCPUS:
r = min_t(unsigned int, num_online_cpus(), KVM_MAX_VCPUS);
break;
case KVM_CAP_MAX_VCPUS:
r = KVM_MAX_VCPUS;
break;
case KVM_CAP_MAX_VCPU_ID:
r = KVM_MAX_VCPU_IDS;
break;
case KVM_CAP_MIPS_FPU:
/* We don't handle systems with inconsistent cpu_has_fpu */
r = !!raw_cpu_has_fpu;
break;
case KVM_CAP_MIPS_MSA:
/*
* We don't support MSA vector partitioning yet:
* 1) It would require explicit support which can't be tested
* yet due to lack of support in current hardware.
* 2) It extends the state that would need to be saved/restored
* by e.g. QEMU for migration.
*
* When vector partitioning hardware becomes available, support
* could be added by requiring a flag when enabling
* KVM_CAP_MIPS_MSA capability to indicate that userland knows
* to save/restore the appropriate extra state.
*/
r = cpu_has_msa && !(boot_cpu_data.msa_id & MSA_IR_WRPF);
break;
default:
r = kvm_mips_callbacks->check_extension(kvm, ext);
break;
}
return r;
}
int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
{
return kvm_mips_pending_timer(vcpu) ||
kvm_read_c0_guest_cause(&vcpu->arch.cop0) & C_TI;
}
int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu)
{
int i;
struct mips_coproc *cop0;
if (!vcpu)
return -1;
kvm_debug("VCPU Register Dump:\n");
kvm_debug("\tpc = 0x%08lx\n", vcpu->arch.pc);
kvm_debug("\texceptions: %08lx\n", vcpu->arch.pending_exceptions);
for (i = 0; i < 32; i += 4) {
kvm_debug("\tgpr%02d: %08lx %08lx %08lx %08lx\n", i,
vcpu->arch.gprs[i],
vcpu->arch.gprs[i + 1],
vcpu->arch.gprs[i + 2], vcpu->arch.gprs[i + 3]);
}
kvm_debug("\thi: 0x%08lx\n", vcpu->arch.hi);
kvm_debug("\tlo: 0x%08lx\n", vcpu->arch.lo);
cop0 = &vcpu->arch.cop0;
kvm_debug("\tStatus: 0x%08x, Cause: 0x%08x\n",
kvm_read_c0_guest_status(cop0),
kvm_read_c0_guest_cause(cop0));
kvm_debug("\tEPC: 0x%08lx\n", kvm_read_c0_guest_epc(cop0));
return 0;
}
int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
{
int i;
vcpu_load(vcpu);
for (i = 1; i < ARRAY_SIZE(vcpu->arch.gprs); i++)
vcpu->arch.gprs[i] = regs->gpr[i];
vcpu->arch.gprs[0] = 0; /* zero is special, and cannot be set. */
vcpu->arch.hi = regs->hi;
vcpu->arch.lo = regs->lo;
vcpu->arch.pc = regs->pc;
vcpu_put(vcpu);
return 0;
}
int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
{
int i;
vcpu_load(vcpu);
for (i = 0; i < ARRAY_SIZE(vcpu->arch.gprs); i++)
regs->gpr[i] = vcpu->arch.gprs[i];
regs->hi = vcpu->arch.hi;
regs->lo = vcpu->arch.lo;
regs->pc = vcpu->arch.pc;
vcpu_put(vcpu);
return 0;
}
int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
struct kvm_translation *tr)
{
return 0;
}
static void kvm_mips_set_c0_status(void)
{
u32 status = read_c0_status();
if (cpu_has_dsp)
status |= (ST0_MX);
write_c0_status(status);
ehb();
}
/*
* Return value is in the form (errcode<<2 | RESUME_FLAG_HOST | RESUME_FLAG_NV)
*/
static int __kvm_mips_handle_exit(struct kvm_vcpu *vcpu)
{
struct kvm_run *run = vcpu->run;
u32 cause = vcpu->arch.host_cp0_cause;
u32 exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
u32 __user *opc = (u32 __user *) vcpu->arch.pc;
unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
enum emulation_result er = EMULATE_DONE;
u32 inst;
int ret = RESUME_GUEST;
vcpu->mode = OUTSIDE_GUEST_MODE;
/* Set a default exit reason */
run->exit_reason = KVM_EXIT_UNKNOWN;
run->ready_for_interrupt_injection = 1;
/*
* Set the appropriate status bits based on host CPU features,
* before we hit the scheduler
*/
kvm_mips_set_c0_status();
local_irq_enable();
kvm_debug("kvm_mips_handle_exit: cause: %#x, PC: %p, kvm_run: %p, kvm_vcpu: %p\n",
cause, opc, run, vcpu);
trace_kvm_exit(vcpu, exccode);
switch (exccode) {
case EXCCODE_INT:
kvm_debug("[%d]EXCCODE_INT @ %p\n", vcpu->vcpu_id, opc);
++vcpu->stat.int_exits;
if (need_resched())
cond_resched();
ret = RESUME_GUEST;
break;
case EXCCODE_CPU:
kvm_debug("EXCCODE_CPU: @ PC: %p\n", opc);
++vcpu->stat.cop_unusable_exits;
ret = kvm_mips_callbacks->handle_cop_unusable(vcpu);
/* XXXKYMA: Might need to return to user space */
if (run->exit_reason == KVM_EXIT_IRQ_WINDOW_OPEN)
ret = RESUME_HOST;
break;
case EXCCODE_MOD:
++vcpu->stat.tlbmod_exits;
ret = kvm_mips_callbacks->handle_tlb_mod(vcpu);
break;
case EXCCODE_TLBS:
kvm_debug("TLB ST fault: cause %#x, status %#x, PC: %p, BadVaddr: %#lx\n",
cause, kvm_read_c0_guest_status(&vcpu->arch.cop0), opc,
badvaddr);
++vcpu->stat.tlbmiss_st_exits;
ret = kvm_mips_callbacks->handle_tlb_st_miss(vcpu);
break;
case EXCCODE_TLBL:
kvm_debug("TLB LD fault: cause %#x, PC: %p, BadVaddr: %#lx\n",
cause, opc, badvaddr);
++vcpu->stat.tlbmiss_ld_exits;
ret = kvm_mips_callbacks->handle_tlb_ld_miss(vcpu);
break;
case EXCCODE_ADES:
++vcpu->stat.addrerr_st_exits;
ret = kvm_mips_callbacks->handle_addr_err_st(vcpu);
break;
case EXCCODE_ADEL:
++vcpu->stat.addrerr_ld_exits;
ret = kvm_mips_callbacks->handle_addr_err_ld(vcpu);
break;
case EXCCODE_SYS:
++vcpu->stat.syscall_exits;
ret = kvm_mips_callbacks->handle_syscall(vcpu);
break;
case EXCCODE_RI:
++vcpu->stat.resvd_inst_exits;
ret = kvm_mips_callbacks->handle_res_inst(vcpu);
break;
case EXCCODE_BP:
++vcpu->stat.break_inst_exits;
ret = kvm_mips_callbacks->handle_break(vcpu);
break;
case EXCCODE_TR:
++vcpu->stat.trap_inst_exits;
ret = kvm_mips_callbacks->handle_trap(vcpu);
break;
case EXCCODE_MSAFPE:
++vcpu->stat.msa_fpe_exits;
ret = kvm_mips_callbacks->handle_msa_fpe(vcpu);
break;
case EXCCODE_FPE:
++vcpu->stat.fpe_exits;
ret = kvm_mips_callbacks->handle_fpe(vcpu);
break;
case EXCCODE_MSADIS:
++vcpu->stat.msa_disabled_exits;
ret = kvm_mips_callbacks->handle_msa_disabled(vcpu);
break;
case EXCCODE_GE:
/* defer exit accounting to handler */
ret = kvm_mips_callbacks->handle_guest_exit(vcpu);
break;
default:
if (cause & CAUSEF_BD)
opc += 1;
inst = 0;
kvm_get_badinstr(opc, vcpu, &inst);
kvm_err("Exception Code: %d, not yet handled, @ PC: %p, inst: 0x%08x BadVaddr: %#lx Status: %#x\n",
exccode, opc, inst, badvaddr,
kvm_read_c0_guest_status(&vcpu->arch.cop0));
kvm_arch_vcpu_dump_regs(vcpu);
run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
ret = RESUME_HOST;
break;
}
local_irq_disable();
if (ret == RESUME_GUEST)
kvm_vz_acquire_htimer(vcpu);
if (er == EMULATE_DONE && !(ret & RESUME_HOST))
kvm_mips_deliver_interrupts(vcpu, cause);
if (!(ret & RESUME_HOST)) {
/* Only check for signals if not already exiting to userspace */
if (signal_pending(current)) {
run->exit_reason = KVM_EXIT_INTR;
ret = (-EINTR << 2) | RESUME_HOST;
++vcpu->stat.signal_exits;
trace_kvm_exit(vcpu, KVM_TRACE_EXIT_SIGNAL);
}
}
if (ret == RESUME_GUEST) {
trace_kvm_reenter(vcpu);
/*
* Make sure the read of VCPU requests in vcpu_reenter()
* callback is not reordered ahead of the write to vcpu->mode,
* or we could miss a TLB flush request while the requester sees
* the VCPU as outside of guest mode and not needing an IPI.
*/
smp_store_mb(vcpu->mode, IN_GUEST_MODE);
kvm_mips_callbacks->vcpu_reenter(vcpu);
/*
* If FPU / MSA are enabled (i.e. the guest's FPU / MSA context
* is live), restore FCR31 / MSACSR.
*
* This should be before returning to the guest exception
* vector, as it may well cause an [MSA] FP exception if there
* are pending exception bits unmasked. (see
* kvm_mips_csr_die_notifier() for how that is handled).
*/
if (kvm_mips_guest_has_fpu(&vcpu->arch) &&
read_c0_status() & ST0_CU1)
__kvm_restore_fcsr(&vcpu->arch);
if (kvm_mips_guest_has_msa(&vcpu->arch) &&
read_c0_config5() & MIPS_CONF5_MSAEN)
__kvm_restore_msacsr(&vcpu->arch);
}
return ret;
}
int noinstr kvm_mips_handle_exit(struct kvm_vcpu *vcpu)
{
int ret;
guest_state_exit_irqoff();
ret = __kvm_mips_handle_exit(vcpu);
guest_state_enter_irqoff();
return ret;
}
/* Enable FPU for guest and restore context */
void kvm_own_fpu(struct kvm_vcpu *vcpu)
{
struct mips_coproc *cop0 = &vcpu->arch.cop0;
unsigned int sr, cfg5;
preempt_disable();
sr = kvm_read_c0_guest_status(cop0);
/*
* If MSA state is already live, it is undefined how it interacts with
* FR=0 FPU state, and we don't want to hit reserved instruction
* exceptions trying to save the MSA state later when CU=1 && FR=1, so
* play it safe and save it first.
*/
if (cpu_has_msa && sr & ST0_CU1 && !(sr & ST0_FR) &&
vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA)
kvm_lose_fpu(vcpu);
/*
* Enable FPU for guest
* We set FR and FRE according to guest context
*/
change_c0_status(ST0_CU1 | ST0_FR, sr);
if (cpu_has_fre) {
cfg5 = kvm_read_c0_guest_config5(cop0);
change_c0_config5(MIPS_CONF5_FRE, cfg5);
}
enable_fpu_hazard();
/* If guest FPU state not active, restore it now */
if (!(vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU)) {
__kvm_restore_fpu(&vcpu->arch);
vcpu->arch.aux_inuse |= KVM_MIPS_AUX_FPU;
trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, KVM_TRACE_AUX_FPU);
} else {
trace_kvm_aux(vcpu, KVM_TRACE_AUX_ENABLE, KVM_TRACE_AUX_FPU);
}
preempt_enable();
}
#ifdef CONFIG_CPU_HAS_MSA
/* Enable MSA for guest and restore context */
void kvm_own_msa(struct kvm_vcpu *vcpu)
{
struct mips_coproc *cop0 = &vcpu->arch.cop0;
unsigned int sr, cfg5;
preempt_disable();
/*
* Enable FPU if enabled in guest, since we're restoring FPU context
* anyway. We set FR and FRE according to guest context.
*/
if (kvm_mips_guest_has_fpu(&vcpu->arch)) {
sr = kvm_read_c0_guest_status(cop0);
/*
* If FR=0 FPU state is already live, it is undefined how it
* interacts with MSA state, so play it safe and save it first.
*/
if (!(sr & ST0_FR) &&
(vcpu->arch.aux_inuse & (KVM_MIPS_AUX_FPU |
KVM_MIPS_AUX_MSA)) == KVM_MIPS_AUX_FPU)
kvm_lose_fpu(vcpu);
change_c0_status(ST0_CU1 | ST0_FR, sr);
if (sr & ST0_CU1 && cpu_has_fre) {
cfg5 = kvm_read_c0_guest_config5(cop0);
change_c0_config5(MIPS_CONF5_FRE, cfg5);
}
}
/* Enable MSA for guest */
set_c0_config5(MIPS_CONF5_MSAEN);
enable_fpu_hazard();
switch (vcpu->arch.aux_inuse & (KVM_MIPS_AUX_FPU | KVM_MIPS_AUX_MSA)) {
case KVM_MIPS_AUX_FPU:
/*
* Guest FPU state already loaded, only restore upper MSA state
*/
__kvm_restore_msa_upper(&vcpu->arch);
vcpu->arch.aux_inuse |= KVM_MIPS_AUX_MSA;
trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, KVM_TRACE_AUX_MSA);
break;
case 0:
/* Neither FPU or MSA already active, restore full MSA state */
__kvm_restore_msa(&vcpu->arch);
vcpu->arch.aux_inuse |= KVM_MIPS_AUX_MSA;
if (kvm_mips_guest_has_fpu(&vcpu->arch))
vcpu->arch.aux_inuse |= KVM_MIPS_AUX_FPU;
trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE,
KVM_TRACE_AUX_FPU_MSA);
break;
default:
trace_kvm_aux(vcpu, KVM_TRACE_AUX_ENABLE, KVM_TRACE_AUX_MSA);
break;
}
preempt_enable();
}
#endif
/* Drop FPU & MSA without saving it */
void kvm_drop_fpu(struct kvm_vcpu *vcpu)
{
preempt_disable();
if (cpu_has_msa && vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA) {
disable_msa();
trace_kvm_aux(vcpu, KVM_TRACE_AUX_DISCARD, KVM_TRACE_AUX_MSA);
vcpu->arch.aux_inuse &= ~KVM_MIPS_AUX_MSA;
}
if (vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU) {
clear_c0_status(ST0_CU1 | ST0_FR);
trace_kvm_aux(vcpu, KVM_TRACE_AUX_DISCARD, KVM_TRACE_AUX_FPU);
vcpu->arch.aux_inuse &= ~KVM_MIPS_AUX_FPU;
}
preempt_enable();
}
/* Save and disable FPU & MSA */
void kvm_lose_fpu(struct kvm_vcpu *vcpu)
{
/*
* With T&E, FPU & MSA get disabled in root context (hardware) when it
* is disabled in guest context (software), but the register state in
* the hardware may still be in use.
* This is why we explicitly re-enable the hardware before saving.
*/
preempt_disable();
if (cpu_has_msa && vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA) {
__kvm_save_msa(&vcpu->arch);
trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_FPU_MSA);
/* Disable MSA & FPU */
disable_msa();
if (vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU) {
clear_c0_status(ST0_CU1 | ST0_FR);
disable_fpu_hazard();
}
vcpu->arch.aux_inuse &= ~(KVM_MIPS_AUX_FPU | KVM_MIPS_AUX_MSA);
} else if (vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU) {
__kvm_save_fpu(&vcpu->arch);
vcpu->arch.aux_inuse &= ~KVM_MIPS_AUX_FPU;
trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_FPU);
/* Disable FPU */
clear_c0_status(ST0_CU1 | ST0_FR);
disable_fpu_hazard();
}
preempt_enable();
}
/*
* Step over a specific ctc1 to FCSR and a specific ctcmsa to MSACSR which are
* used to restore guest FCSR/MSACSR state and may trigger a "harmless" FP/MSAFP
* exception if cause bits are set in the value being written.
*/
static int kvm_mips_csr_die_notify(struct notifier_block *self,
unsigned long cmd, void *ptr)
{
struct die_args *args = (struct die_args *)ptr;
struct pt_regs *regs = args->regs;
unsigned long pc;
/* Only interested in FPE and MSAFPE */
if (cmd != DIE_FP && cmd != DIE_MSAFP)
return NOTIFY_DONE;
/* Return immediately if guest context isn't active */
if (!(current->flags & PF_VCPU))
return NOTIFY_DONE;
/* Should never get here from user mode */
BUG_ON(user_mode(regs));
pc = instruction_pointer(regs);
switch (cmd) {
case DIE_FP:
/* match 2nd instruction in __kvm_restore_fcsr */
if (pc != (unsigned long)&__kvm_restore_fcsr + 4)
return NOTIFY_DONE;
break;
case DIE_MSAFP:
/* match 2nd/3rd instruction in __kvm_restore_msacsr */
if (!cpu_has_msa ||
pc < (unsigned long)&__kvm_restore_msacsr + 4 ||
pc > (unsigned long)&__kvm_restore_msacsr + 8)
return NOTIFY_DONE;
break;
}
/* Move PC forward a little and continue executing */
instruction_pointer(regs) += 4;
return NOTIFY_STOP;
}
static struct notifier_block kvm_mips_csr_die_notifier = {
.notifier_call = kvm_mips_csr_die_notify,
};
static u32 kvm_default_priority_to_irq[MIPS_EXC_MAX] = {
[MIPS_EXC_INT_TIMER] = C_IRQ5,
[MIPS_EXC_INT_IO_1] = C_IRQ0,
[MIPS_EXC_INT_IPI_1] = C_IRQ1,
[MIPS_EXC_INT_IPI_2] = C_IRQ2,
};
static u32 kvm_loongson3_priority_to_irq[MIPS_EXC_MAX] = {
[MIPS_EXC_INT_TIMER] = C_IRQ5,
[MIPS_EXC_INT_IO_1] = C_IRQ0,
[MIPS_EXC_INT_IO_2] = C_IRQ1,
[MIPS_EXC_INT_IPI_1] = C_IRQ4,
};
u32 *kvm_priority_to_irq = kvm_default_priority_to_irq;
u32 kvm_irq_to_priority(u32 irq)
{
int i;
for (i = MIPS_EXC_INT_TIMER; i < MIPS_EXC_MAX; i++) {
if (kvm_priority_to_irq[i] == (1 << (irq + 8)))
return i;
}
return MIPS_EXC_MAX;
}
static int __init kvm_mips_init(void)
{
int ret;
if (cpu_has_mmid) {
pr_warn("KVM does not yet support MMIDs. KVM Disabled\n");
return -EOPNOTSUPP;
}
ret = kvm_mips_entry_setup();
if (ret)
return ret;
ret = kvm_mips_emulation_init();
if (ret)
return ret;
if (boot_cpu_type() == CPU_LOONGSON64)
kvm_priority_to_irq = kvm_loongson3_priority_to_irq;
register_die_notifier(&kvm_mips_csr_die_notifier);
ret = kvm_init(sizeof(struct kvm_vcpu), 0, THIS_MODULE);
if (ret) {
unregister_die_notifier(&kvm_mips_csr_die_notifier);
return ret;
}
return 0;
}
static void __exit kvm_mips_exit(void)
{
kvm_exit();
unregister_die_notifier(&kvm_mips_csr_die_notifier);
}
module_init(kvm_mips_init);
module_exit(kvm_mips_exit);
EXPORT_TRACEPOINT_SYMBOL(kvm_exit);
| linux-master | arch/mips/kvm/mips.c |
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* KVM/MIPS: Instruction/Exception emulation
*
* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
* Authors: Sanjay Lal <[email protected]>
*/
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/ktime.h>
#include <linux/kvm_host.h>
#include <linux/vmalloc.h>
#include <linux/fs.h>
#include <linux/memblock.h>
#include <linux/random.h>
#include <asm/page.h>
#include <asm/cacheflush.h>
#include <asm/cacheops.h>
#include <asm/cpu-info.h>
#include <asm/mmu_context.h>
#include <asm/tlbflush.h>
#include <asm/inst.h>
#undef CONFIG_MIPS_MT
#include <asm/r4kcache.h>
#define CONFIG_MIPS_MT
#include "interrupt.h"
#include "trace.h"
/*
* Compute the return address and do emulate branch simulation, if required.
* This function should be called only in branch delay slot active.
*/
static int kvm_compute_return_epc(struct kvm_vcpu *vcpu, unsigned long instpc,
unsigned long *out)
{
unsigned int dspcontrol;
union mips_instruction insn;
struct kvm_vcpu_arch *arch = &vcpu->arch;
long epc = instpc;
long nextpc;
int err;
if (epc & 3) {
kvm_err("%s: unaligned epc\n", __func__);
return -EINVAL;
}
/* Read the instruction */
err = kvm_get_badinstrp((u32 *)epc, vcpu, &insn.word);
if (err)
return err;
switch (insn.i_format.opcode) {
/* jr and jalr are in r_format format. */
case spec_op:
switch (insn.r_format.func) {
case jalr_op:
arch->gprs[insn.r_format.rd] = epc + 8;
fallthrough;
case jr_op:
nextpc = arch->gprs[insn.r_format.rs];
break;
default:
return -EINVAL;
}
break;
/*
* This group contains:
* bltz_op, bgez_op, bltzl_op, bgezl_op,
* bltzal_op, bgezal_op, bltzall_op, bgezall_op.
*/
case bcond_op:
switch (insn.i_format.rt) {
case bltz_op:
case bltzl_op:
if ((long)arch->gprs[insn.i_format.rs] < 0)
epc = epc + 4 + (insn.i_format.simmediate << 2);
else
epc += 8;
nextpc = epc;
break;
case bgez_op:
case bgezl_op:
if ((long)arch->gprs[insn.i_format.rs] >= 0)
epc = epc + 4 + (insn.i_format.simmediate << 2);
else
epc += 8;
nextpc = epc;
break;
case bltzal_op:
case bltzall_op:
arch->gprs[31] = epc + 8;
if ((long)arch->gprs[insn.i_format.rs] < 0)
epc = epc + 4 + (insn.i_format.simmediate << 2);
else
epc += 8;
nextpc = epc;
break;
case bgezal_op:
case bgezall_op:
arch->gprs[31] = epc + 8;
if ((long)arch->gprs[insn.i_format.rs] >= 0)
epc = epc + 4 + (insn.i_format.simmediate << 2);
else
epc += 8;
nextpc = epc;
break;
case bposge32_op:
if (!cpu_has_dsp) {
kvm_err("%s: DSP branch but not DSP ASE\n",
__func__);
return -EINVAL;
}
dspcontrol = rddsp(0x01);
if (dspcontrol >= 32)
epc = epc + 4 + (insn.i_format.simmediate << 2);
else
epc += 8;
nextpc = epc;
break;
default:
return -EINVAL;
}
break;
/* These are unconditional and in j_format. */
case jal_op:
arch->gprs[31] = instpc + 8;
fallthrough;
case j_op:
epc += 4;
epc >>= 28;
epc <<= 28;
epc |= (insn.j_format.target << 2);
nextpc = epc;
break;
/* These are conditional and in i_format. */
case beq_op:
case beql_op:
if (arch->gprs[insn.i_format.rs] ==
arch->gprs[insn.i_format.rt])
epc = epc + 4 + (insn.i_format.simmediate << 2);
else
epc += 8;
nextpc = epc;
break;
case bne_op:
case bnel_op:
if (arch->gprs[insn.i_format.rs] !=
arch->gprs[insn.i_format.rt])
epc = epc + 4 + (insn.i_format.simmediate << 2);
else
epc += 8;
nextpc = epc;
break;
case blez_op: /* POP06 */
#ifndef CONFIG_CPU_MIPSR6
case blezl_op: /* removed in R6 */
#endif
if (insn.i_format.rt != 0)
goto compact_branch;
if ((long)arch->gprs[insn.i_format.rs] <= 0)
epc = epc + 4 + (insn.i_format.simmediate << 2);
else
epc += 8;
nextpc = epc;
break;
case bgtz_op: /* POP07 */
#ifndef CONFIG_CPU_MIPSR6
case bgtzl_op: /* removed in R6 */
#endif
if (insn.i_format.rt != 0)
goto compact_branch;
if ((long)arch->gprs[insn.i_format.rs] > 0)
epc = epc + 4 + (insn.i_format.simmediate << 2);
else
epc += 8;
nextpc = epc;
break;
/* And now the FPA/cp1 branch instructions. */
case cop1_op:
kvm_err("%s: unsupported cop1_op\n", __func__);
return -EINVAL;
#ifdef CONFIG_CPU_MIPSR6
/* R6 added the following compact branches with forbidden slots */
case blezl_op: /* POP26 */
case bgtzl_op: /* POP27 */
/* only rt == 0 isn't compact branch */
if (insn.i_format.rt != 0)
goto compact_branch;
return -EINVAL;
case pop10_op:
case pop30_op:
/* only rs == rt == 0 is reserved, rest are compact branches */
if (insn.i_format.rs != 0 || insn.i_format.rt != 0)
goto compact_branch;
return -EINVAL;
case pop66_op:
case pop76_op:
/* only rs == 0 isn't compact branch */
if (insn.i_format.rs != 0)
goto compact_branch;
return -EINVAL;
compact_branch:
/*
* If we've hit an exception on the forbidden slot, then
* the branch must not have been taken.
*/
epc += 8;
nextpc = epc;
break;
#else
compact_branch:
/* Fall through - Compact branches not supported before R6 */
#endif
default:
return -EINVAL;
}
*out = nextpc;
return 0;
}
enum emulation_result update_pc(struct kvm_vcpu *vcpu, u32 cause)
{
int err;
if (cause & CAUSEF_BD) {
err = kvm_compute_return_epc(vcpu, vcpu->arch.pc,
&vcpu->arch.pc);
if (err)
return EMULATE_FAIL;
} else {
vcpu->arch.pc += 4;
}
kvm_debug("update_pc(): New PC: %#lx\n", vcpu->arch.pc);
return EMULATE_DONE;
}
/**
* kvm_get_badinstr() - Get bad instruction encoding.
* @opc: Guest pointer to faulting instruction.
* @vcpu: KVM VCPU information.
*
* Gets the instruction encoding of the faulting instruction, using the saved
* BadInstr register value if it exists, otherwise falling back to reading guest
* memory at @opc.
*
* Returns: The instruction encoding of the faulting instruction.
*/
int kvm_get_badinstr(u32 *opc, struct kvm_vcpu *vcpu, u32 *out)
{
if (cpu_has_badinstr) {
*out = vcpu->arch.host_cp0_badinstr;
return 0;
} else {
WARN_ONCE(1, "CPU doesn't have BadInstr register\n");
return -EINVAL;
}
}
/**
* kvm_get_badinstrp() - Get bad prior instruction encoding.
* @opc: Guest pointer to prior faulting instruction.
* @vcpu: KVM VCPU information.
*
* Gets the instruction encoding of the prior faulting instruction (the branch
* containing the delay slot which faulted), using the saved BadInstrP register
* value if it exists, otherwise falling back to reading guest memory at @opc.
*
* Returns: The instruction encoding of the prior faulting instruction.
*/
int kvm_get_badinstrp(u32 *opc, struct kvm_vcpu *vcpu, u32 *out)
{
if (cpu_has_badinstrp) {
*out = vcpu->arch.host_cp0_badinstrp;
return 0;
} else {
WARN_ONCE(1, "CPU doesn't have BadInstrp register\n");
return -EINVAL;
}
}
/**
* kvm_mips_count_disabled() - Find whether the CP0_Count timer is disabled.
* @vcpu: Virtual CPU.
*
* Returns: 1 if the CP0_Count timer is disabled by either the guest
* CP0_Cause.DC bit or the count_ctl.DC bit.
* 0 otherwise (in which case CP0_Count timer is running).
*/
int kvm_mips_count_disabled(struct kvm_vcpu *vcpu)
{
struct mips_coproc *cop0 = &vcpu->arch.cop0;
return (vcpu->arch.count_ctl & KVM_REG_MIPS_COUNT_CTL_DC) ||
(kvm_read_c0_guest_cause(cop0) & CAUSEF_DC);
}
/**
* kvm_mips_ktime_to_count() - Scale ktime_t to a 32-bit count.
*
* Caches the dynamic nanosecond bias in vcpu->arch.count_dyn_bias.
*
* Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is running).
*/
static u32 kvm_mips_ktime_to_count(struct kvm_vcpu *vcpu, ktime_t now)
{
s64 now_ns, periods;
u64 delta;
now_ns = ktime_to_ns(now);
delta = now_ns + vcpu->arch.count_dyn_bias;
if (delta >= vcpu->arch.count_period) {
/* If delta is out of safe range the bias needs adjusting */
periods = div64_s64(now_ns, vcpu->arch.count_period);
vcpu->arch.count_dyn_bias = -periods * vcpu->arch.count_period;
/* Recalculate delta with new bias */
delta = now_ns + vcpu->arch.count_dyn_bias;
}
/*
* We've ensured that:
* delta < count_period
*
* Therefore the intermediate delta*count_hz will never overflow since
* at the boundary condition:
* delta = count_period
* delta = NSEC_PER_SEC * 2^32 / count_hz
* delta * count_hz = NSEC_PER_SEC * 2^32
*/
return div_u64(delta * vcpu->arch.count_hz, NSEC_PER_SEC);
}
/**
* kvm_mips_count_time() - Get effective current time.
* @vcpu: Virtual CPU.
*
* Get effective monotonic ktime. This is usually a straightforward ktime_get(),
* except when the master disable bit is set in count_ctl, in which case it is
* count_resume, i.e. the time that the count was disabled.
*
* Returns: Effective monotonic ktime for CP0_Count.
*/
static inline ktime_t kvm_mips_count_time(struct kvm_vcpu *vcpu)
{
if (unlikely(vcpu->arch.count_ctl & KVM_REG_MIPS_COUNT_CTL_DC))
return vcpu->arch.count_resume;
return ktime_get();
}
/**
* kvm_mips_read_count_running() - Read the current count value as if running.
* @vcpu: Virtual CPU.
* @now: Kernel time to read CP0_Count at.
*
* Returns the current guest CP0_Count register at time @now and handles if the
* timer interrupt is pending and hasn't been handled yet.
*
* Returns: The current value of the guest CP0_Count register.
*/
static u32 kvm_mips_read_count_running(struct kvm_vcpu *vcpu, ktime_t now)
{
struct mips_coproc *cop0 = &vcpu->arch.cop0;
ktime_t expires, threshold;
u32 count, compare;
int running;
/* Calculate the biased and scaled guest CP0_Count */
count = vcpu->arch.count_bias + kvm_mips_ktime_to_count(vcpu, now);
compare = kvm_read_c0_guest_compare(cop0);
/*
* Find whether CP0_Count has reached the closest timer interrupt. If
* not, we shouldn't inject it.
*/
if ((s32)(count - compare) < 0)
return count;
/*
* The CP0_Count we're going to return has already reached the closest
* timer interrupt. Quickly check if it really is a new interrupt by
* looking at whether the interval until the hrtimer expiry time is
* less than 1/4 of the timer period.
*/
expires = hrtimer_get_expires(&vcpu->arch.comparecount_timer);
threshold = ktime_add_ns(now, vcpu->arch.count_period / 4);
if (ktime_before(expires, threshold)) {
/*
* Cancel it while we handle it so there's no chance of
* interference with the timeout handler.
*/
running = hrtimer_cancel(&vcpu->arch.comparecount_timer);
/* Nothing should be waiting on the timeout */
kvm_mips_callbacks->queue_timer_int(vcpu);
/*
* Restart the timer if it was running based on the expiry time
* we read, so that we don't push it back 2 periods.
*/
if (running) {
expires = ktime_add_ns(expires,
vcpu->arch.count_period);
hrtimer_start(&vcpu->arch.comparecount_timer, expires,
HRTIMER_MODE_ABS);
}
}
return count;
}
/**
* kvm_mips_read_count() - Read the current count value.
* @vcpu: Virtual CPU.
*
* Read the current guest CP0_Count value, taking into account whether the timer
* is stopped.
*
* Returns: The current guest CP0_Count value.
*/
u32 kvm_mips_read_count(struct kvm_vcpu *vcpu)
{
struct mips_coproc *cop0 = &vcpu->arch.cop0;
/* If count disabled just read static copy of count */
if (kvm_mips_count_disabled(vcpu))
return kvm_read_c0_guest_count(cop0);
return kvm_mips_read_count_running(vcpu, ktime_get());
}
/**
* kvm_mips_freeze_hrtimer() - Safely stop the hrtimer.
* @vcpu: Virtual CPU.
* @count: Output pointer for CP0_Count value at point of freeze.
*
* Freeze the hrtimer safely and return both the ktime and the CP0_Count value
* at the point it was frozen. It is guaranteed that any pending interrupts at
* the point it was frozen are handled, and none after that point.
*
* This is useful where the time/CP0_Count is needed in the calculation of the
* new parameters.
*
* Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is running).
*
* Returns: The ktime at the point of freeze.
*/
ktime_t kvm_mips_freeze_hrtimer(struct kvm_vcpu *vcpu, u32 *count)
{
ktime_t now;
/* stop hrtimer before finding time */
hrtimer_cancel(&vcpu->arch.comparecount_timer);
now = ktime_get();
/* find count at this point and handle pending hrtimer */
*count = kvm_mips_read_count_running(vcpu, now);
return now;
}
/**
* kvm_mips_resume_hrtimer() - Resume hrtimer, updating expiry.
* @vcpu: Virtual CPU.
* @now: ktime at point of resume.
* @count: CP0_Count at point of resume.
*
* Resumes the timer and updates the timer expiry based on @now and @count.
* This can be used in conjunction with kvm_mips_freeze_timer() when timer
* parameters need to be changed.
*
* It is guaranteed that a timer interrupt immediately after resume will be
* handled, but not if CP_Compare is exactly at @count. That case is already
* handled by kvm_mips_freeze_timer().
*
* Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is running).
*/
static void kvm_mips_resume_hrtimer(struct kvm_vcpu *vcpu,
ktime_t now, u32 count)
{
struct mips_coproc *cop0 = &vcpu->arch.cop0;
u32 compare;
u64 delta;
ktime_t expire;
/* Calculate timeout (wrap 0 to 2^32) */
compare = kvm_read_c0_guest_compare(cop0);
delta = (u64)(u32)(compare - count - 1) + 1;
delta = div_u64(delta * NSEC_PER_SEC, vcpu->arch.count_hz);
expire = ktime_add_ns(now, delta);
/* Update hrtimer to use new timeout */
hrtimer_cancel(&vcpu->arch.comparecount_timer);
hrtimer_start(&vcpu->arch.comparecount_timer, expire, HRTIMER_MODE_ABS);
}
/**
* kvm_mips_restore_hrtimer() - Restore hrtimer after a gap, updating expiry.
* @vcpu: Virtual CPU.
* @before: Time before Count was saved, lower bound of drift calculation.
* @count: CP0_Count at point of restore.
* @min_drift: Minimum amount of drift permitted before correction.
* Must be <= 0.
*
* Restores the timer from a particular @count, accounting for drift. This can
* be used in conjunction with kvm_mips_freeze_timer() when a hardware timer is
* to be used for a period of time, but the exact ktime corresponding to the
* final Count that must be restored is not known.
*
* It is gauranteed that a timer interrupt immediately after restore will be
* handled, but not if CP0_Compare is exactly at @count. That case should
* already be handled when the hardware timer state is saved.
*
* Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is not
* stopped).
*
* Returns: Amount of correction to count_bias due to drift.
*/
int kvm_mips_restore_hrtimer(struct kvm_vcpu *vcpu, ktime_t before,
u32 count, int min_drift)
{
ktime_t now, count_time;
u32 now_count, before_count;
u64 delta;
int drift, ret = 0;
/* Calculate expected count at before */
before_count = vcpu->arch.count_bias +
kvm_mips_ktime_to_count(vcpu, before);
/*
* Detect significantly negative drift, where count is lower than
* expected. Some negative drift is expected when hardware counter is
* set after kvm_mips_freeze_timer(), and it is harmless to allow the
* time to jump forwards a little, within reason. If the drift is too
* significant, adjust the bias to avoid a big Guest.CP0_Count jump.
*/
drift = count - before_count;
if (drift < min_drift) {
count_time = before;
vcpu->arch.count_bias += drift;
ret = drift;
goto resume;
}
/* Calculate expected count right now */
now = ktime_get();
now_count = vcpu->arch.count_bias + kvm_mips_ktime_to_count(vcpu, now);
/*
* Detect positive drift, where count is higher than expected, and
* adjust the bias to avoid guest time going backwards.
*/
drift = count - now_count;
if (drift > 0) {
count_time = now;
vcpu->arch.count_bias += drift;
ret = drift;
goto resume;
}
/* Subtract nanosecond delta to find ktime when count was read */
delta = (u64)(u32)(now_count - count);
delta = div_u64(delta * NSEC_PER_SEC, vcpu->arch.count_hz);
count_time = ktime_sub_ns(now, delta);
resume:
/* Resume using the calculated ktime */
kvm_mips_resume_hrtimer(vcpu, count_time, count);
return ret;
}
/**
* kvm_mips_write_count() - Modify the count and update timer.
* @vcpu: Virtual CPU.
* @count: Guest CP0_Count value to set.
*
* Sets the CP0_Count value and updates the timer accordingly.
*/
void kvm_mips_write_count(struct kvm_vcpu *vcpu, u32 count)
{
struct mips_coproc *cop0 = &vcpu->arch.cop0;
ktime_t now;
/* Calculate bias */
now = kvm_mips_count_time(vcpu);
vcpu->arch.count_bias = count - kvm_mips_ktime_to_count(vcpu, now);
if (kvm_mips_count_disabled(vcpu))
/* The timer's disabled, adjust the static count */
kvm_write_c0_guest_count(cop0, count);
else
/* Update timeout */
kvm_mips_resume_hrtimer(vcpu, now, count);
}
/**
* kvm_mips_init_count() - Initialise timer.
* @vcpu: Virtual CPU.
* @count_hz: Frequency of timer.
*
* Initialise the timer to the specified frequency, zero it, and set it going if
* it's enabled.
*/
void kvm_mips_init_count(struct kvm_vcpu *vcpu, unsigned long count_hz)
{
vcpu->arch.count_hz = count_hz;
vcpu->arch.count_period = div_u64((u64)NSEC_PER_SEC << 32, count_hz);
vcpu->arch.count_dyn_bias = 0;
/* Starting at 0 */
kvm_mips_write_count(vcpu, 0);
}
/**
* kvm_mips_set_count_hz() - Update the frequency of the timer.
* @vcpu: Virtual CPU.
* @count_hz: Frequency of CP0_Count timer in Hz.
*
* Change the frequency of the CP0_Count timer. This is done atomically so that
* CP0_Count is continuous and no timer interrupt is lost.
*
* Returns: -EINVAL if @count_hz is out of range.
* 0 on success.
*/
int kvm_mips_set_count_hz(struct kvm_vcpu *vcpu, s64 count_hz)
{
struct mips_coproc *cop0 = &vcpu->arch.cop0;
int dc;
ktime_t now;
u32 count;
/* ensure the frequency is in a sensible range... */
if (count_hz <= 0 || count_hz > NSEC_PER_SEC)
return -EINVAL;
/* ... and has actually changed */
if (vcpu->arch.count_hz == count_hz)
return 0;
/* Safely freeze timer so we can keep it continuous */
dc = kvm_mips_count_disabled(vcpu);
if (dc) {
now = kvm_mips_count_time(vcpu);
count = kvm_read_c0_guest_count(cop0);
} else {
now = kvm_mips_freeze_hrtimer(vcpu, &count);
}
/* Update the frequency */
vcpu->arch.count_hz = count_hz;
vcpu->arch.count_period = div_u64((u64)NSEC_PER_SEC << 32, count_hz);
vcpu->arch.count_dyn_bias = 0;
/* Calculate adjusted bias so dynamic count is unchanged */
vcpu->arch.count_bias = count - kvm_mips_ktime_to_count(vcpu, now);
/* Update and resume hrtimer */
if (!dc)
kvm_mips_resume_hrtimer(vcpu, now, count);
return 0;
}
/**
* kvm_mips_write_compare() - Modify compare and update timer.
* @vcpu: Virtual CPU.
* @compare: New CP0_Compare value.
* @ack: Whether to acknowledge timer interrupt.
*
* Update CP0_Compare to a new value and update the timeout.
* If @ack, atomically acknowledge any pending timer interrupt, otherwise ensure
* any pending timer interrupt is preserved.
*/
void kvm_mips_write_compare(struct kvm_vcpu *vcpu, u32 compare, bool ack)
{
struct mips_coproc *cop0 = &vcpu->arch.cop0;
int dc;
u32 old_compare = kvm_read_c0_guest_compare(cop0);
s32 delta = compare - old_compare;
u32 cause;
ktime_t now = ktime_set(0, 0); /* silence bogus GCC warning */
u32 count;
/* if unchanged, must just be an ack */
if (old_compare == compare) {
if (!ack)
return;
kvm_mips_callbacks->dequeue_timer_int(vcpu);
kvm_write_c0_guest_compare(cop0, compare);
return;
}
/*
* If guest CP0_Compare moves forward, CP0_GTOffset should be adjusted
* too to prevent guest CP0_Count hitting guest CP0_Compare.
*
* The new GTOffset corresponds to the new value of CP0_Compare, and is
* set prior to it being written into the guest context. We disable
* preemption until the new value is written to prevent restore of a
* GTOffset corresponding to the old CP0_Compare value.
*/
if (delta > 0) {
preempt_disable();
write_c0_gtoffset(compare - read_c0_count());
back_to_back_c0_hazard();
}
/* freeze_hrtimer() takes care of timer interrupts <= count */
dc = kvm_mips_count_disabled(vcpu);
if (!dc)
now = kvm_mips_freeze_hrtimer(vcpu, &count);
if (ack)
kvm_mips_callbacks->dequeue_timer_int(vcpu);
else
/*
* With VZ, writing CP0_Compare acks (clears) CP0_Cause.TI, so
* preserve guest CP0_Cause.TI if we don't want to ack it.
*/
cause = kvm_read_c0_guest_cause(cop0);
kvm_write_c0_guest_compare(cop0, compare);
if (delta > 0)
preempt_enable();
back_to_back_c0_hazard();
if (!ack && cause & CAUSEF_TI)
kvm_write_c0_guest_cause(cop0, cause);
/* resume_hrtimer() takes care of timer interrupts > count */
if (!dc)
kvm_mips_resume_hrtimer(vcpu, now, count);
/*
* If guest CP0_Compare is moving backward, we delay CP0_GTOffset change
* until after the new CP0_Compare is written, otherwise new guest
* CP0_Count could hit new guest CP0_Compare.
*/
if (delta <= 0)
write_c0_gtoffset(compare - read_c0_count());
}
/**
* kvm_mips_count_disable() - Disable count.
* @vcpu: Virtual CPU.
*
* Disable the CP0_Count timer. A timer interrupt on or before the final stop
* time will be handled but not after.
*
* Assumes CP0_Count was previously enabled but now Guest.CP0_Cause.DC or
* count_ctl.DC has been set (count disabled).
*
* Returns: The time that the timer was stopped.
*/
static ktime_t kvm_mips_count_disable(struct kvm_vcpu *vcpu)
{
struct mips_coproc *cop0 = &vcpu->arch.cop0;
u32 count;
ktime_t now;
/* Stop hrtimer */
hrtimer_cancel(&vcpu->arch.comparecount_timer);
/* Set the static count from the dynamic count, handling pending TI */
now = ktime_get();
count = kvm_mips_read_count_running(vcpu, now);
kvm_write_c0_guest_count(cop0, count);
return now;
}
/**
* kvm_mips_count_disable_cause() - Disable count using CP0_Cause.DC.
* @vcpu: Virtual CPU.
*
* Disable the CP0_Count timer and set CP0_Cause.DC. A timer interrupt on or
* before the final stop time will be handled if the timer isn't disabled by
* count_ctl.DC, but not after.
*
* Assumes CP0_Cause.DC is clear (count enabled).
*/
void kvm_mips_count_disable_cause(struct kvm_vcpu *vcpu)
{
struct mips_coproc *cop0 = &vcpu->arch.cop0;
kvm_set_c0_guest_cause(cop0, CAUSEF_DC);
if (!(vcpu->arch.count_ctl & KVM_REG_MIPS_COUNT_CTL_DC))
kvm_mips_count_disable(vcpu);
}
/**
* kvm_mips_count_enable_cause() - Enable count using CP0_Cause.DC.
* @vcpu: Virtual CPU.
*
* Enable the CP0_Count timer and clear CP0_Cause.DC. A timer interrupt after
* the start time will be handled if the timer isn't disabled by count_ctl.DC,
* potentially before even returning, so the caller should be careful with
* ordering of CP0_Cause modifications so as not to lose it.
*
* Assumes CP0_Cause.DC is set (count disabled).
*/
void kvm_mips_count_enable_cause(struct kvm_vcpu *vcpu)
{
struct mips_coproc *cop0 = &vcpu->arch.cop0;
u32 count;
kvm_clear_c0_guest_cause(cop0, CAUSEF_DC);
/*
* Set the dynamic count to match the static count.
* This starts the hrtimer if count_ctl.DC allows it.
* Otherwise it conveniently updates the biases.
*/
count = kvm_read_c0_guest_count(cop0);
kvm_mips_write_count(vcpu, count);
}
/**
* kvm_mips_set_count_ctl() - Update the count control KVM register.
* @vcpu: Virtual CPU.
* @count_ctl: Count control register new value.
*
* Set the count control KVM register. The timer is updated accordingly.
*
* Returns: -EINVAL if reserved bits are set.
* 0 on success.
*/
int kvm_mips_set_count_ctl(struct kvm_vcpu *vcpu, s64 count_ctl)
{
struct mips_coproc *cop0 = &vcpu->arch.cop0;
s64 changed = count_ctl ^ vcpu->arch.count_ctl;
s64 delta;
ktime_t expire, now;
u32 count, compare;
/* Only allow defined bits to be changed */
if (changed & ~(s64)(KVM_REG_MIPS_COUNT_CTL_DC))
return -EINVAL;
/* Apply new value */
vcpu->arch.count_ctl = count_ctl;
/* Master CP0_Count disable */
if (changed & KVM_REG_MIPS_COUNT_CTL_DC) {
/* Is CP0_Cause.DC already disabling CP0_Count? */
if (kvm_read_c0_guest_cause(cop0) & CAUSEF_DC) {
if (count_ctl & KVM_REG_MIPS_COUNT_CTL_DC)
/* Just record the current time */
vcpu->arch.count_resume = ktime_get();
} else if (count_ctl & KVM_REG_MIPS_COUNT_CTL_DC) {
/* disable timer and record current time */
vcpu->arch.count_resume = kvm_mips_count_disable(vcpu);
} else {
/*
* Calculate timeout relative to static count at resume
* time (wrap 0 to 2^32).
*/
count = kvm_read_c0_guest_count(cop0);
compare = kvm_read_c0_guest_compare(cop0);
delta = (u64)(u32)(compare - count - 1) + 1;
delta = div_u64(delta * NSEC_PER_SEC,
vcpu->arch.count_hz);
expire = ktime_add_ns(vcpu->arch.count_resume, delta);
/* Handle pending interrupt */
now = ktime_get();
if (ktime_compare(now, expire) >= 0)
/* Nothing should be waiting on the timeout */
kvm_mips_callbacks->queue_timer_int(vcpu);
/* Resume hrtimer without changing bias */
count = kvm_mips_read_count_running(vcpu, now);
kvm_mips_resume_hrtimer(vcpu, now, count);
}
}
return 0;
}
/**
* kvm_mips_set_count_resume() - Update the count resume KVM register.
* @vcpu: Virtual CPU.
* @count_resume: Count resume register new value.
*
* Set the count resume KVM register.
*
* Returns: -EINVAL if out of valid range (0..now).
* 0 on success.
*/
int kvm_mips_set_count_resume(struct kvm_vcpu *vcpu, s64 count_resume)
{
/*
* It doesn't make sense for the resume time to be in the future, as it
* would be possible for the next interrupt to be more than a full
* period in the future.
*/
if (count_resume < 0 || count_resume > ktime_to_ns(ktime_get()))
return -EINVAL;
vcpu->arch.count_resume = ns_to_ktime(count_resume);
return 0;
}
/**
* kvm_mips_count_timeout() - Push timer forward on timeout.
* @vcpu: Virtual CPU.
*
* Handle an hrtimer event by push the hrtimer forward a period.
*
* Returns: The hrtimer_restart value to return to the hrtimer subsystem.
*/
enum hrtimer_restart kvm_mips_count_timeout(struct kvm_vcpu *vcpu)
{
/* Add the Count period to the current expiry time */
hrtimer_add_expires_ns(&vcpu->arch.comparecount_timer,
vcpu->arch.count_period);
return HRTIMER_RESTART;
}
enum emulation_result kvm_mips_emul_wait(struct kvm_vcpu *vcpu)
{
kvm_debug("[%#lx] !!!WAIT!!! (%#lx)\n", vcpu->arch.pc,
vcpu->arch.pending_exceptions);
++vcpu->stat.wait_exits;
trace_kvm_exit(vcpu, KVM_TRACE_EXIT_WAIT);
if (!vcpu->arch.pending_exceptions) {
kvm_vz_lose_htimer(vcpu);
vcpu->arch.wait = 1;
kvm_vcpu_halt(vcpu);
/*
* We are runnable, then definitely go off to user space to
* check if any I/O interrupts are pending.
*/
if (kvm_arch_vcpu_runnable(vcpu))
vcpu->run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN;
}
return EMULATE_DONE;
}
enum emulation_result kvm_mips_emulate_store(union mips_instruction inst,
u32 cause,
struct kvm_vcpu *vcpu)
{
int r;
enum emulation_result er;
u32 rt;
struct kvm_run *run = vcpu->run;
void *data = run->mmio.data;
unsigned int imme;
unsigned long curr_pc;
/*
* Update PC and hold onto current PC in case there is
* an error and we want to rollback the PC
*/
curr_pc = vcpu->arch.pc;
er = update_pc(vcpu, cause);
if (er == EMULATE_FAIL)
return er;
rt = inst.i_format.rt;
run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa(
vcpu->arch.host_cp0_badvaddr);
if (run->mmio.phys_addr == KVM_INVALID_ADDR)
goto out_fail;
switch (inst.i_format.opcode) {
#if defined(CONFIG_64BIT)
case sd_op:
run->mmio.len = 8;
*(u64 *)data = vcpu->arch.gprs[rt];
kvm_debug("[%#lx] OP_SD: eaddr: %#lx, gpr: %#lx, data: %#llx\n",
vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
vcpu->arch.gprs[rt], *(u64 *)data);
break;
#endif
case sw_op:
run->mmio.len = 4;
*(u32 *)data = vcpu->arch.gprs[rt];
kvm_debug("[%#lx] OP_SW: eaddr: %#lx, gpr: %#lx, data: %#x\n",
vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
vcpu->arch.gprs[rt], *(u32 *)data);
break;
case sh_op:
run->mmio.len = 2;
*(u16 *)data = vcpu->arch.gprs[rt];
kvm_debug("[%#lx] OP_SH: eaddr: %#lx, gpr: %#lx, data: %#x\n",
vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
vcpu->arch.gprs[rt], *(u16 *)data);
break;
case sb_op:
run->mmio.len = 1;
*(u8 *)data = vcpu->arch.gprs[rt];
kvm_debug("[%#lx] OP_SB: eaddr: %#lx, gpr: %#lx, data: %#x\n",
vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
vcpu->arch.gprs[rt], *(u8 *)data);
break;
case swl_op:
run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa(
vcpu->arch.host_cp0_badvaddr) & (~0x3);
run->mmio.len = 4;
imme = vcpu->arch.host_cp0_badvaddr & 0x3;
switch (imme) {
case 0:
*(u32 *)data = ((*(u32 *)data) & 0xffffff00) |
(vcpu->arch.gprs[rt] >> 24);
break;
case 1:
*(u32 *)data = ((*(u32 *)data) & 0xffff0000) |
(vcpu->arch.gprs[rt] >> 16);
break;
case 2:
*(u32 *)data = ((*(u32 *)data) & 0xff000000) |
(vcpu->arch.gprs[rt] >> 8);
break;
case 3:
*(u32 *)data = vcpu->arch.gprs[rt];
break;
default:
break;
}
kvm_debug("[%#lx] OP_SWL: eaddr: %#lx, gpr: %#lx, data: %#x\n",
vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
vcpu->arch.gprs[rt], *(u32 *)data);
break;
case swr_op:
run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa(
vcpu->arch.host_cp0_badvaddr) & (~0x3);
run->mmio.len = 4;
imme = vcpu->arch.host_cp0_badvaddr & 0x3;
switch (imme) {
case 0:
*(u32 *)data = vcpu->arch.gprs[rt];
break;
case 1:
*(u32 *)data = ((*(u32 *)data) & 0xff) |
(vcpu->arch.gprs[rt] << 8);
break;
case 2:
*(u32 *)data = ((*(u32 *)data) & 0xffff) |
(vcpu->arch.gprs[rt] << 16);
break;
case 3:
*(u32 *)data = ((*(u32 *)data) & 0xffffff) |
(vcpu->arch.gprs[rt] << 24);
break;
default:
break;
}
kvm_debug("[%#lx] OP_SWR: eaddr: %#lx, gpr: %#lx, data: %#x\n",
vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
vcpu->arch.gprs[rt], *(u32 *)data);
break;
#if defined(CONFIG_64BIT)
case sdl_op:
run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa(
vcpu->arch.host_cp0_badvaddr) & (~0x7);
run->mmio.len = 8;
imme = vcpu->arch.host_cp0_badvaddr & 0x7;
switch (imme) {
case 0:
*(u64 *)data = ((*(u64 *)data) & 0xffffffffffffff00) |
((vcpu->arch.gprs[rt] >> 56) & 0xff);
break;
case 1:
*(u64 *)data = ((*(u64 *)data) & 0xffffffffffff0000) |
((vcpu->arch.gprs[rt] >> 48) & 0xffff);
break;
case 2:
*(u64 *)data = ((*(u64 *)data) & 0xffffffffff000000) |
((vcpu->arch.gprs[rt] >> 40) & 0xffffff);
break;
case 3:
*(u64 *)data = ((*(u64 *)data) & 0xffffffff00000000) |
((vcpu->arch.gprs[rt] >> 32) & 0xffffffff);
break;
case 4:
*(u64 *)data = ((*(u64 *)data) & 0xffffff0000000000) |
((vcpu->arch.gprs[rt] >> 24) & 0xffffffffff);
break;
case 5:
*(u64 *)data = ((*(u64 *)data) & 0xffff000000000000) |
((vcpu->arch.gprs[rt] >> 16) & 0xffffffffffff);
break;
case 6:
*(u64 *)data = ((*(u64 *)data) & 0xff00000000000000) |
((vcpu->arch.gprs[rt] >> 8) & 0xffffffffffffff);
break;
case 7:
*(u64 *)data = vcpu->arch.gprs[rt];
break;
default:
break;
}
kvm_debug("[%#lx] OP_SDL: eaddr: %#lx, gpr: %#lx, data: %llx\n",
vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
vcpu->arch.gprs[rt], *(u64 *)data);
break;
case sdr_op:
run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa(
vcpu->arch.host_cp0_badvaddr) & (~0x7);
run->mmio.len = 8;
imme = vcpu->arch.host_cp0_badvaddr & 0x7;
switch (imme) {
case 0:
*(u64 *)data = vcpu->arch.gprs[rt];
break;
case 1:
*(u64 *)data = ((*(u64 *)data) & 0xff) |
(vcpu->arch.gprs[rt] << 8);
break;
case 2:
*(u64 *)data = ((*(u64 *)data) & 0xffff) |
(vcpu->arch.gprs[rt] << 16);
break;
case 3:
*(u64 *)data = ((*(u64 *)data) & 0xffffff) |
(vcpu->arch.gprs[rt] << 24);
break;
case 4:
*(u64 *)data = ((*(u64 *)data) & 0xffffffff) |
(vcpu->arch.gprs[rt] << 32);
break;
case 5:
*(u64 *)data = ((*(u64 *)data) & 0xffffffffff) |
(vcpu->arch.gprs[rt] << 40);
break;
case 6:
*(u64 *)data = ((*(u64 *)data) & 0xffffffffffff) |
(vcpu->arch.gprs[rt] << 48);
break;
case 7:
*(u64 *)data = ((*(u64 *)data) & 0xffffffffffffff) |
(vcpu->arch.gprs[rt] << 56);
break;
default:
break;
}
kvm_debug("[%#lx] OP_SDR: eaddr: %#lx, gpr: %#lx, data: %llx\n",
vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
vcpu->arch.gprs[rt], *(u64 *)data);
break;
#endif
#ifdef CONFIG_CPU_LOONGSON64
case sdc2_op:
rt = inst.loongson3_lsdc2_format.rt;
switch (inst.loongson3_lsdc2_format.opcode1) {
/*
* Loongson-3 overridden sdc2 instructions.
* opcode1 instruction
* 0x0 gssbx: store 1 bytes from GPR
* 0x1 gsshx: store 2 bytes from GPR
* 0x2 gsswx: store 4 bytes from GPR
* 0x3 gssdx: store 8 bytes from GPR
*/
case 0x0:
run->mmio.len = 1;
*(u8 *)data = vcpu->arch.gprs[rt];
kvm_debug("[%#lx] OP_GSSBX: eaddr: %#lx, gpr: %#lx, data: %#x\n",
vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
vcpu->arch.gprs[rt], *(u8 *)data);
break;
case 0x1:
run->mmio.len = 2;
*(u16 *)data = vcpu->arch.gprs[rt];
kvm_debug("[%#lx] OP_GSSSHX: eaddr: %#lx, gpr: %#lx, data: %#x\n",
vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
vcpu->arch.gprs[rt], *(u16 *)data);
break;
case 0x2:
run->mmio.len = 4;
*(u32 *)data = vcpu->arch.gprs[rt];
kvm_debug("[%#lx] OP_GSSWX: eaddr: %#lx, gpr: %#lx, data: %#x\n",
vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
vcpu->arch.gprs[rt], *(u32 *)data);
break;
case 0x3:
run->mmio.len = 8;
*(u64 *)data = vcpu->arch.gprs[rt];
kvm_debug("[%#lx] OP_GSSDX: eaddr: %#lx, gpr: %#lx, data: %#llx\n",
vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
vcpu->arch.gprs[rt], *(u64 *)data);
break;
default:
kvm_err("Godson Extended GS-Store not yet supported (inst=0x%08x)\n",
inst.word);
break;
}
break;
#endif
default:
kvm_err("Store not yet supported (inst=0x%08x)\n",
inst.word);
goto out_fail;
}
vcpu->mmio_needed = 1;
run->mmio.is_write = 1;
vcpu->mmio_is_write = 1;
r = kvm_io_bus_write(vcpu, KVM_MMIO_BUS,
run->mmio.phys_addr, run->mmio.len, data);
if (!r) {
vcpu->mmio_needed = 0;
return EMULATE_DONE;
}
return EMULATE_DO_MMIO;
out_fail:
/* Rollback PC if emulation was unsuccessful */
vcpu->arch.pc = curr_pc;
return EMULATE_FAIL;
}
enum emulation_result kvm_mips_emulate_load(union mips_instruction inst,
u32 cause, struct kvm_vcpu *vcpu)
{
struct kvm_run *run = vcpu->run;
int r;
enum emulation_result er;
unsigned long curr_pc;
u32 op, rt;
unsigned int imme;
rt = inst.i_format.rt;
op = inst.i_format.opcode;
/*
* Find the resume PC now while we have safe and easy access to the
* prior branch instruction, and save it for
* kvm_mips_complete_mmio_load() to restore later.
*/
curr_pc = vcpu->arch.pc;
er = update_pc(vcpu, cause);
if (er == EMULATE_FAIL)
return er;
vcpu->arch.io_pc = vcpu->arch.pc;
vcpu->arch.pc = curr_pc;
vcpu->arch.io_gpr = rt;
run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa(
vcpu->arch.host_cp0_badvaddr);
if (run->mmio.phys_addr == KVM_INVALID_ADDR)
return EMULATE_FAIL;
vcpu->mmio_needed = 2; /* signed */
switch (op) {
#if defined(CONFIG_64BIT)
case ld_op:
run->mmio.len = 8;
break;
case lwu_op:
vcpu->mmio_needed = 1; /* unsigned */
fallthrough;
#endif
case lw_op:
run->mmio.len = 4;
break;
case lhu_op:
vcpu->mmio_needed = 1; /* unsigned */
fallthrough;
case lh_op:
run->mmio.len = 2;
break;
case lbu_op:
vcpu->mmio_needed = 1; /* unsigned */
fallthrough;
case lb_op:
run->mmio.len = 1;
break;
case lwl_op:
run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa(
vcpu->arch.host_cp0_badvaddr) & (~0x3);
run->mmio.len = 4;
imme = vcpu->arch.host_cp0_badvaddr & 0x3;
switch (imme) {
case 0:
vcpu->mmio_needed = 3; /* 1 byte */
break;
case 1:
vcpu->mmio_needed = 4; /* 2 bytes */
break;
case 2:
vcpu->mmio_needed = 5; /* 3 bytes */
break;
case 3:
vcpu->mmio_needed = 6; /* 4 bytes */
break;
default:
break;
}
break;
case lwr_op:
run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa(
vcpu->arch.host_cp0_badvaddr) & (~0x3);
run->mmio.len = 4;
imme = vcpu->arch.host_cp0_badvaddr & 0x3;
switch (imme) {
case 0:
vcpu->mmio_needed = 7; /* 4 bytes */
break;
case 1:
vcpu->mmio_needed = 8; /* 3 bytes */
break;
case 2:
vcpu->mmio_needed = 9; /* 2 bytes */
break;
case 3:
vcpu->mmio_needed = 10; /* 1 byte */
break;
default:
break;
}
break;
#if defined(CONFIG_64BIT)
case ldl_op:
run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa(
vcpu->arch.host_cp0_badvaddr) & (~0x7);
run->mmio.len = 8;
imme = vcpu->arch.host_cp0_badvaddr & 0x7;
switch (imme) {
case 0:
vcpu->mmio_needed = 11; /* 1 byte */
break;
case 1:
vcpu->mmio_needed = 12; /* 2 bytes */
break;
case 2:
vcpu->mmio_needed = 13; /* 3 bytes */
break;
case 3:
vcpu->mmio_needed = 14; /* 4 bytes */
break;
case 4:
vcpu->mmio_needed = 15; /* 5 bytes */
break;
case 5:
vcpu->mmio_needed = 16; /* 6 bytes */
break;
case 6:
vcpu->mmio_needed = 17; /* 7 bytes */
break;
case 7:
vcpu->mmio_needed = 18; /* 8 bytes */
break;
default:
break;
}
break;
case ldr_op:
run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa(
vcpu->arch.host_cp0_badvaddr) & (~0x7);
run->mmio.len = 8;
imme = vcpu->arch.host_cp0_badvaddr & 0x7;
switch (imme) {
case 0:
vcpu->mmio_needed = 19; /* 8 bytes */
break;
case 1:
vcpu->mmio_needed = 20; /* 7 bytes */
break;
case 2:
vcpu->mmio_needed = 21; /* 6 bytes */
break;
case 3:
vcpu->mmio_needed = 22; /* 5 bytes */
break;
case 4:
vcpu->mmio_needed = 23; /* 4 bytes */
break;
case 5:
vcpu->mmio_needed = 24; /* 3 bytes */
break;
case 6:
vcpu->mmio_needed = 25; /* 2 bytes */
break;
case 7:
vcpu->mmio_needed = 26; /* 1 byte */
break;
default:
break;
}
break;
#endif
#ifdef CONFIG_CPU_LOONGSON64
case ldc2_op:
rt = inst.loongson3_lsdc2_format.rt;
switch (inst.loongson3_lsdc2_format.opcode1) {
/*
* Loongson-3 overridden ldc2 instructions.
* opcode1 instruction
* 0x0 gslbx: store 1 bytes from GPR
* 0x1 gslhx: store 2 bytes from GPR
* 0x2 gslwx: store 4 bytes from GPR
* 0x3 gsldx: store 8 bytes from GPR
*/
case 0x0:
run->mmio.len = 1;
vcpu->mmio_needed = 27; /* signed */
break;
case 0x1:
run->mmio.len = 2;
vcpu->mmio_needed = 28; /* signed */
break;
case 0x2:
run->mmio.len = 4;
vcpu->mmio_needed = 29; /* signed */
break;
case 0x3:
run->mmio.len = 8;
vcpu->mmio_needed = 30; /* signed */
break;
default:
kvm_err("Godson Extended GS-Load for float not yet supported (inst=0x%08x)\n",
inst.word);
break;
}
break;
#endif
default:
kvm_err("Load not yet supported (inst=0x%08x)\n",
inst.word);
vcpu->mmio_needed = 0;
return EMULATE_FAIL;
}
run->mmio.is_write = 0;
vcpu->mmio_is_write = 0;
r = kvm_io_bus_read(vcpu, KVM_MMIO_BUS,
run->mmio.phys_addr, run->mmio.len, run->mmio.data);
if (!r) {
kvm_mips_complete_mmio_load(vcpu);
vcpu->mmio_needed = 0;
return EMULATE_DONE;
}
return EMULATE_DO_MMIO;
}
enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu)
{
struct kvm_run *run = vcpu->run;
unsigned long *gpr = &vcpu->arch.gprs[vcpu->arch.io_gpr];
enum emulation_result er = EMULATE_DONE;
if (run->mmio.len > sizeof(*gpr)) {
kvm_err("Bad MMIO length: %d", run->mmio.len);
er = EMULATE_FAIL;
goto done;
}
/* Restore saved resume PC */
vcpu->arch.pc = vcpu->arch.io_pc;
switch (run->mmio.len) {
case 8:
switch (vcpu->mmio_needed) {
case 11:
*gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffffffffffff) |
(((*(s64 *)run->mmio.data) & 0xff) << 56);
break;
case 12:
*gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffffffffff) |
(((*(s64 *)run->mmio.data) & 0xffff) << 48);
break;
case 13:
*gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffffffff) |
(((*(s64 *)run->mmio.data) & 0xffffff) << 40);
break;
case 14:
*gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffffff) |
(((*(s64 *)run->mmio.data) & 0xffffffff) << 32);
break;
case 15:
*gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffff) |
(((*(s64 *)run->mmio.data) & 0xffffffffff) << 24);
break;
case 16:
*gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffff) |
(((*(s64 *)run->mmio.data) & 0xffffffffffff) << 16);
break;
case 17:
*gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xff) |
(((*(s64 *)run->mmio.data) & 0xffffffffffffff) << 8);
break;
case 18:
case 19:
*gpr = *(s64 *)run->mmio.data;
break;
case 20:
*gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xff00000000000000) |
((((*(s64 *)run->mmio.data)) >> 8) & 0xffffffffffffff);
break;
case 21:
*gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffff000000000000) |
((((*(s64 *)run->mmio.data)) >> 16) & 0xffffffffffff);
break;
case 22:
*gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffff0000000000) |
((((*(s64 *)run->mmio.data)) >> 24) & 0xffffffffff);
break;
case 23:
*gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffffff00000000) |
((((*(s64 *)run->mmio.data)) >> 32) & 0xffffffff);
break;
case 24:
*gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffffffff000000) |
((((*(s64 *)run->mmio.data)) >> 40) & 0xffffff);
break;
case 25:
*gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffffffffff0000) |
((((*(s64 *)run->mmio.data)) >> 48) & 0xffff);
break;
case 26:
*gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffffffffffff00) |
((((*(s64 *)run->mmio.data)) >> 56) & 0xff);
break;
default:
*gpr = *(s64 *)run->mmio.data;
}
break;
case 4:
switch (vcpu->mmio_needed) {
case 1:
*gpr = *(u32 *)run->mmio.data;
break;
case 2:
*gpr = *(s32 *)run->mmio.data;
break;
case 3:
*gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffff) |
(((*(s32 *)run->mmio.data) & 0xff) << 24);
break;
case 4:
*gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffff) |
(((*(s32 *)run->mmio.data) & 0xffff) << 16);
break;
case 5:
*gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xff) |
(((*(s32 *)run->mmio.data) & 0xffffff) << 8);
break;
case 6:
case 7:
*gpr = *(s32 *)run->mmio.data;
break;
case 8:
*gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xff000000) |
((((*(s32 *)run->mmio.data)) >> 8) & 0xffffff);
break;
case 9:
*gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffff0000) |
((((*(s32 *)run->mmio.data)) >> 16) & 0xffff);
break;
case 10:
*gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffff00) |
((((*(s32 *)run->mmio.data)) >> 24) & 0xff);
break;
default:
*gpr = *(s32 *)run->mmio.data;
}
break;
case 2:
if (vcpu->mmio_needed == 1)
*gpr = *(u16 *)run->mmio.data;
else
*gpr = *(s16 *)run->mmio.data;
break;
case 1:
if (vcpu->mmio_needed == 1)
*gpr = *(u8 *)run->mmio.data;
else
*gpr = *(s8 *)run->mmio.data;
break;
}
done:
return er;
}
| linux-master | arch/mips/kvm/emulate.c |
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* KVM/MIPS: Support for hardware virtualization extensions
*
* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
* Authors: Yann Le Du <[email protected]>
*/
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/module.h>
#include <linux/preempt.h>
#include <linux/vmalloc.h>
#include <asm/cacheflush.h>
#include <asm/cacheops.h>
#include <asm/cmpxchg.h>
#include <asm/fpu.h>
#include <asm/hazards.h>
#include <asm/inst.h>
#include <asm/mmu_context.h>
#include <asm/r4kcache.h>
#include <asm/time.h>
#include <asm/tlb.h>
#include <asm/tlbex.h>
#include <linux/kvm_host.h>
#include "interrupt.h"
#ifdef CONFIG_CPU_LOONGSON64
#include "loongson_regs.h"
#endif
#include "trace.h"
/* Pointers to last VCPU loaded on each physical CPU */
static struct kvm_vcpu *last_vcpu[NR_CPUS];
/* Pointers to last VCPU executed on each physical CPU */
static struct kvm_vcpu *last_exec_vcpu[NR_CPUS];
/*
* Number of guest VTLB entries to use, so we can catch inconsistency between
* CPUs.
*/
static unsigned int kvm_vz_guest_vtlb_size;
static inline long kvm_vz_read_gc0_ebase(void)
{
if (sizeof(long) == 8 && cpu_has_ebase_wg)
return read_gc0_ebase_64();
else
return read_gc0_ebase();
}
static inline void kvm_vz_write_gc0_ebase(long v)
{
/*
* First write with WG=1 to write upper bits, then write again in case
* WG should be left at 0.
* write_gc0_ebase_64() is no longer UNDEFINED since R6.
*/
if (sizeof(long) == 8 &&
(cpu_has_mips64r6 || cpu_has_ebase_wg)) {
write_gc0_ebase_64(v | MIPS_EBASE_WG);
write_gc0_ebase_64(v);
} else {
write_gc0_ebase(v | MIPS_EBASE_WG);
write_gc0_ebase(v);
}
}
/*
* These Config bits may be writable by the guest:
* Config: [K23, KU] (!TLB), K0
* Config1: (none)
* Config2: [TU, SU] (impl)
* Config3: ISAOnExc
* Config4: FTLBPageSize
* Config5: K, CV, MSAEn, UFE, FRE, SBRI, UFR
*/
static inline unsigned int kvm_vz_config_guest_wrmask(struct kvm_vcpu *vcpu)
{
return CONF_CM_CMASK;
}
static inline unsigned int kvm_vz_config1_guest_wrmask(struct kvm_vcpu *vcpu)
{
return 0;
}
static inline unsigned int kvm_vz_config2_guest_wrmask(struct kvm_vcpu *vcpu)
{
return 0;
}
static inline unsigned int kvm_vz_config3_guest_wrmask(struct kvm_vcpu *vcpu)
{
return MIPS_CONF3_ISA_OE;
}
static inline unsigned int kvm_vz_config4_guest_wrmask(struct kvm_vcpu *vcpu)
{
/* no need to be exact */
return MIPS_CONF4_VFTLBPAGESIZE;
}
static inline unsigned int kvm_vz_config5_guest_wrmask(struct kvm_vcpu *vcpu)
{
unsigned int mask = MIPS_CONF5_K | MIPS_CONF5_CV | MIPS_CONF5_SBRI;
/* Permit MSAEn changes if MSA supported and enabled */
if (kvm_mips_guest_has_msa(&vcpu->arch))
mask |= MIPS_CONF5_MSAEN;
/*
* Permit guest FPU mode changes if FPU is enabled and the relevant
* feature exists according to FIR register.
*/
if (kvm_mips_guest_has_fpu(&vcpu->arch)) {
if (cpu_has_ufr)
mask |= MIPS_CONF5_UFR;
if (cpu_has_fre)
mask |= MIPS_CONF5_FRE | MIPS_CONF5_UFE;
}
return mask;
}
static inline unsigned int kvm_vz_config6_guest_wrmask(struct kvm_vcpu *vcpu)
{
return LOONGSON_CONF6_INTIMER | LOONGSON_CONF6_EXTIMER;
}
/*
* VZ optionally allows these additional Config bits to be written by root:
* Config: M, [MT]
* Config1: M, [MMUSize-1, C2, MD, PC, WR, CA], FP
* Config2: M
* Config3: M, MSAP, [BPG], ULRI, [DSP2P, DSPP], CTXTC, [ITL, LPA, VEIC,
* VInt, SP, CDMM, MT, SM, TL]
* Config4: M, [VTLBSizeExt, MMUSizeExt]
* Config5: MRP
*/
static inline unsigned int kvm_vz_config_user_wrmask(struct kvm_vcpu *vcpu)
{
return kvm_vz_config_guest_wrmask(vcpu) | MIPS_CONF_M;
}
static inline unsigned int kvm_vz_config1_user_wrmask(struct kvm_vcpu *vcpu)
{
unsigned int mask = kvm_vz_config1_guest_wrmask(vcpu) | MIPS_CONF_M;
/* Permit FPU to be present if FPU is supported */
if (kvm_mips_guest_can_have_fpu(&vcpu->arch))
mask |= MIPS_CONF1_FP;
return mask;
}
static inline unsigned int kvm_vz_config2_user_wrmask(struct kvm_vcpu *vcpu)
{
return kvm_vz_config2_guest_wrmask(vcpu) | MIPS_CONF_M;
}
static inline unsigned int kvm_vz_config3_user_wrmask(struct kvm_vcpu *vcpu)
{
unsigned int mask = kvm_vz_config3_guest_wrmask(vcpu) | MIPS_CONF_M |
MIPS_CONF3_ULRI | MIPS_CONF3_CTXTC;
/* Permit MSA to be present if MSA is supported */
if (kvm_mips_guest_can_have_msa(&vcpu->arch))
mask |= MIPS_CONF3_MSA;
return mask;
}
static inline unsigned int kvm_vz_config4_user_wrmask(struct kvm_vcpu *vcpu)
{
return kvm_vz_config4_guest_wrmask(vcpu) | MIPS_CONF_M;
}
static inline unsigned int kvm_vz_config5_user_wrmask(struct kvm_vcpu *vcpu)
{
return kvm_vz_config5_guest_wrmask(vcpu) | MIPS_CONF5_MRP;
}
static inline unsigned int kvm_vz_config6_user_wrmask(struct kvm_vcpu *vcpu)
{
return kvm_vz_config6_guest_wrmask(vcpu) |
LOONGSON_CONF6_SFBEN | LOONGSON_CONF6_FTLBDIS;
}
static gpa_t kvm_vz_gva_to_gpa_cb(gva_t gva)
{
/* VZ guest has already converted gva to gpa */
return gva;
}
static void kvm_vz_queue_irq(struct kvm_vcpu *vcpu, unsigned int priority)
{
set_bit(priority, &vcpu->arch.pending_exceptions);
clear_bit(priority, &vcpu->arch.pending_exceptions_clr);
}
static void kvm_vz_dequeue_irq(struct kvm_vcpu *vcpu, unsigned int priority)
{
clear_bit(priority, &vcpu->arch.pending_exceptions);
set_bit(priority, &vcpu->arch.pending_exceptions_clr);
}
static void kvm_vz_queue_timer_int_cb(struct kvm_vcpu *vcpu)
{
/*
* timer expiry is asynchronous to vcpu execution therefore defer guest
* cp0 accesses
*/
kvm_vz_queue_irq(vcpu, MIPS_EXC_INT_TIMER);
}
static void kvm_vz_dequeue_timer_int_cb(struct kvm_vcpu *vcpu)
{
/*
* timer expiry is asynchronous to vcpu execution therefore defer guest
* cp0 accesses
*/
kvm_vz_dequeue_irq(vcpu, MIPS_EXC_INT_TIMER);
}
static void kvm_vz_queue_io_int_cb(struct kvm_vcpu *vcpu,
struct kvm_mips_interrupt *irq)
{
int intr = (int)irq->irq;
/*
* interrupts are asynchronous to vcpu execution therefore defer guest
* cp0 accesses
*/
kvm_vz_queue_irq(vcpu, kvm_irq_to_priority(intr));
}
static void kvm_vz_dequeue_io_int_cb(struct kvm_vcpu *vcpu,
struct kvm_mips_interrupt *irq)
{
int intr = (int)irq->irq;
/*
* interrupts are asynchronous to vcpu execution therefore defer guest
* cp0 accesses
*/
kvm_vz_dequeue_irq(vcpu, kvm_irq_to_priority(-intr));
}
static int kvm_vz_irq_deliver_cb(struct kvm_vcpu *vcpu, unsigned int priority,
u32 cause)
{
u32 irq = (priority < MIPS_EXC_MAX) ?
kvm_priority_to_irq[priority] : 0;
switch (priority) {
case MIPS_EXC_INT_TIMER:
set_gc0_cause(C_TI);
break;
case MIPS_EXC_INT_IO_1:
case MIPS_EXC_INT_IO_2:
case MIPS_EXC_INT_IPI_1:
case MIPS_EXC_INT_IPI_2:
if (cpu_has_guestctl2)
set_c0_guestctl2(irq);
else
set_gc0_cause(irq);
break;
default:
break;
}
clear_bit(priority, &vcpu->arch.pending_exceptions);
return 1;
}
static int kvm_vz_irq_clear_cb(struct kvm_vcpu *vcpu, unsigned int priority,
u32 cause)
{
u32 irq = (priority < MIPS_EXC_MAX) ?
kvm_priority_to_irq[priority] : 0;
switch (priority) {
case MIPS_EXC_INT_TIMER:
/*
* Explicitly clear irq associated with Cause.IP[IPTI]
* if GuestCtl2 virtual interrupt register not
* supported or if not using GuestCtl2 Hardware Clear.
*/
if (cpu_has_guestctl2) {
if (!(read_c0_guestctl2() & (irq << 14)))
clear_c0_guestctl2(irq);
} else {
clear_gc0_cause(irq);
}
break;
case MIPS_EXC_INT_IO_1:
case MIPS_EXC_INT_IO_2:
case MIPS_EXC_INT_IPI_1:
case MIPS_EXC_INT_IPI_2:
/* Clear GuestCtl2.VIP irq if not using Hardware Clear */
if (cpu_has_guestctl2) {
if (!(read_c0_guestctl2() & (irq << 14)))
clear_c0_guestctl2(irq);
} else {
clear_gc0_cause(irq);
}
break;
default:
break;
}
clear_bit(priority, &vcpu->arch.pending_exceptions_clr);
return 1;
}
/*
* VZ guest timer handling.
*/
/**
* kvm_vz_should_use_htimer() - Find whether to use the VZ hard guest timer.
* @vcpu: Virtual CPU.
*
* Returns: true if the VZ GTOffset & real guest CP0_Count should be used
* instead of software emulation of guest timer.
* false otherwise.
*/
static bool kvm_vz_should_use_htimer(struct kvm_vcpu *vcpu)
{
if (kvm_mips_count_disabled(vcpu))
return false;
/* Chosen frequency must match real frequency */
if (mips_hpt_frequency != vcpu->arch.count_hz)
return false;
/* We don't support a CP0_GTOffset with fewer bits than CP0_Count */
if (current_cpu_data.gtoffset_mask != 0xffffffff)
return false;
return true;
}
/**
* _kvm_vz_restore_stimer() - Restore soft timer state.
* @vcpu: Virtual CPU.
* @compare: CP0_Compare register value, restored by caller.
* @cause: CP0_Cause register to restore.
*
* Restore VZ state relating to the soft timer. The hard timer can be enabled
* later.
*/
static void _kvm_vz_restore_stimer(struct kvm_vcpu *vcpu, u32 compare,
u32 cause)
{
/*
* Avoid spurious counter interrupts by setting Guest CP0_Count to just
* after Guest CP0_Compare.
*/
write_c0_gtoffset(compare - read_c0_count());
back_to_back_c0_hazard();
write_gc0_cause(cause);
}
/**
* _kvm_vz_restore_htimer() - Restore hard timer state.
* @vcpu: Virtual CPU.
* @compare: CP0_Compare register value, restored by caller.
* @cause: CP0_Cause register to restore.
*
* Restore hard timer Guest.Count & Guest.Cause taking care to preserve the
* value of Guest.CP0_Cause.TI while restoring Guest.CP0_Cause.
*/
static void _kvm_vz_restore_htimer(struct kvm_vcpu *vcpu,
u32 compare, u32 cause)
{
u32 start_count, after_count;
unsigned long flags;
/*
* Freeze the soft-timer and sync the guest CP0_Count with it. We do
* this with interrupts disabled to avoid latency.
*/
local_irq_save(flags);
kvm_mips_freeze_hrtimer(vcpu, &start_count);
write_c0_gtoffset(start_count - read_c0_count());
local_irq_restore(flags);
/* restore guest CP0_Cause, as TI may already be set */
back_to_back_c0_hazard();
write_gc0_cause(cause);
/*
* The above sequence isn't atomic and would result in lost timer
* interrupts if we're not careful. Detect if a timer interrupt is due
* and assert it.
*/
back_to_back_c0_hazard();
after_count = read_gc0_count();
if (after_count - start_count > compare - start_count - 1)
kvm_vz_queue_irq(vcpu, MIPS_EXC_INT_TIMER);
}
/**
* kvm_vz_restore_timer() - Restore timer state.
* @vcpu: Virtual CPU.
*
* Restore soft timer state from saved context.
*/
static void kvm_vz_restore_timer(struct kvm_vcpu *vcpu)
{
struct mips_coproc *cop0 = &vcpu->arch.cop0;
u32 cause, compare;
compare = kvm_read_sw_gc0_compare(cop0);
cause = kvm_read_sw_gc0_cause(cop0);
write_gc0_compare(compare);
_kvm_vz_restore_stimer(vcpu, compare, cause);
}
/**
* kvm_vz_acquire_htimer() - Switch to hard timer state.
* @vcpu: Virtual CPU.
*
* Restore hard timer state on top of existing soft timer state if possible.
*
* Since hard timer won't remain active over preemption, preemption should be
* disabled by the caller.
*/
void kvm_vz_acquire_htimer(struct kvm_vcpu *vcpu)
{
u32 gctl0;
gctl0 = read_c0_guestctl0();
if (!(gctl0 & MIPS_GCTL0_GT) && kvm_vz_should_use_htimer(vcpu)) {
/* enable guest access to hard timer */
write_c0_guestctl0(gctl0 | MIPS_GCTL0_GT);
_kvm_vz_restore_htimer(vcpu, read_gc0_compare(),
read_gc0_cause());
}
}
/**
* _kvm_vz_save_htimer() - Switch to software emulation of guest timer.
* @vcpu: Virtual CPU.
* @out_compare: Pointer to write compare value to.
* @out_cause: Pointer to write cause value to.
*
* Save VZ guest timer state and switch to software emulation of guest CP0
* timer. The hard timer must already be in use, so preemption should be
* disabled.
*/
static void _kvm_vz_save_htimer(struct kvm_vcpu *vcpu,
u32 *out_compare, u32 *out_cause)
{
u32 cause, compare, before_count, end_count;
ktime_t before_time;
compare = read_gc0_compare();
*out_compare = compare;
before_time = ktime_get();
/*
* Record the CP0_Count *prior* to saving CP0_Cause, so we have a time
* at which no pending timer interrupt is missing.
*/
before_count = read_gc0_count();
back_to_back_c0_hazard();
cause = read_gc0_cause();
*out_cause = cause;
/*
* Record a final CP0_Count which we will transfer to the soft-timer.
* This is recorded *after* saving CP0_Cause, so we don't get any timer
* interrupts from just after the final CP0_Count point.
*/
back_to_back_c0_hazard();
end_count = read_gc0_count();
/*
* The above sequence isn't atomic, so we could miss a timer interrupt
* between reading CP0_Cause and end_count. Detect and record any timer
* interrupt due between before_count and end_count.
*/
if (end_count - before_count > compare - before_count - 1)
kvm_vz_queue_irq(vcpu, MIPS_EXC_INT_TIMER);
/*
* Restore soft-timer, ignoring a small amount of negative drift due to
* delay between freeze_hrtimer and setting CP0_GTOffset.
*/
kvm_mips_restore_hrtimer(vcpu, before_time, end_count, -0x10000);
}
/**
* kvm_vz_save_timer() - Save guest timer state.
* @vcpu: Virtual CPU.
*
* Save VZ guest timer state and switch to soft guest timer if hard timer was in
* use.
*/
static void kvm_vz_save_timer(struct kvm_vcpu *vcpu)
{
struct mips_coproc *cop0 = &vcpu->arch.cop0;
u32 gctl0, compare, cause;
gctl0 = read_c0_guestctl0();
if (gctl0 & MIPS_GCTL0_GT) {
/* disable guest use of hard timer */
write_c0_guestctl0(gctl0 & ~MIPS_GCTL0_GT);
/* save hard timer state */
_kvm_vz_save_htimer(vcpu, &compare, &cause);
} else {
compare = read_gc0_compare();
cause = read_gc0_cause();
}
/* save timer-related state to VCPU context */
kvm_write_sw_gc0_cause(cop0, cause);
kvm_write_sw_gc0_compare(cop0, compare);
}
/**
* kvm_vz_lose_htimer() - Ensure hard guest timer is not in use.
* @vcpu: Virtual CPU.
*
* Transfers the state of the hard guest timer to the soft guest timer, leaving
* guest state intact so it can continue to be used with the soft timer.
*/
void kvm_vz_lose_htimer(struct kvm_vcpu *vcpu)
{
u32 gctl0, compare, cause;
preempt_disable();
gctl0 = read_c0_guestctl0();
if (gctl0 & MIPS_GCTL0_GT) {
/* disable guest use of timer */
write_c0_guestctl0(gctl0 & ~MIPS_GCTL0_GT);
/* switch to soft timer */
_kvm_vz_save_htimer(vcpu, &compare, &cause);
/* leave soft timer in usable state */
_kvm_vz_restore_stimer(vcpu, compare, cause);
}
preempt_enable();
}
/**
* is_eva_access() - Find whether an instruction is an EVA memory accessor.
* @inst: 32-bit instruction encoding.
*
* Finds whether @inst encodes an EVA memory access instruction, which would
* indicate that emulation of it should access the user mode address space
* instead of the kernel mode address space. This matters for MUSUK segments
* which are TLB mapped for user mode but unmapped for kernel mode.
*
* Returns: Whether @inst encodes an EVA accessor instruction.
*/
static bool is_eva_access(union mips_instruction inst)
{
if (inst.spec3_format.opcode != spec3_op)
return false;
switch (inst.spec3_format.func) {
case lwle_op:
case lwre_op:
case cachee_op:
case sbe_op:
case she_op:
case sce_op:
case swe_op:
case swle_op:
case swre_op:
case prefe_op:
case lbue_op:
case lhue_op:
case lbe_op:
case lhe_op:
case lle_op:
case lwe_op:
return true;
default:
return false;
}
}
/**
* is_eva_am_mapped() - Find whether an access mode is mapped.
* @vcpu: KVM VCPU state.
* @am: 3-bit encoded access mode.
* @eu: Segment becomes unmapped and uncached when Status.ERL=1.
*
* Decode @am to find whether it encodes a mapped segment for the current VCPU
* state. Where necessary @eu and the actual instruction causing the fault are
* taken into account to make the decision.
*
* Returns: Whether the VCPU faulted on a TLB mapped address.
*/
static bool is_eva_am_mapped(struct kvm_vcpu *vcpu, unsigned int am, bool eu)
{
u32 am_lookup;
int err;
/*
* Interpret access control mode. We assume address errors will already
* have been caught by the guest, leaving us with:
* AM UM SM KM 31..24 23..16
* UK 0 000 Unm 0 0
* MK 1 001 TLB 1
* MSK 2 010 TLB TLB 1
* MUSK 3 011 TLB TLB TLB 1
* MUSUK 4 100 TLB TLB Unm 0 1
* USK 5 101 Unm Unm 0 0
* - 6 110 0 0
* UUSK 7 111 Unm Unm Unm 0 0
*
* We shift a magic value by AM across the sign bit to find if always
* TLB mapped, and if not shift by 8 again to find if it depends on KM.
*/
am_lookup = 0x70080000 << am;
if ((s32)am_lookup < 0) {
/*
* MK, MSK, MUSK
* Always TLB mapped, unless SegCtl.EU && ERL
*/
if (!eu || !(read_gc0_status() & ST0_ERL))
return true;
} else {
am_lookup <<= 8;
if ((s32)am_lookup < 0) {
union mips_instruction inst;
unsigned int status;
u32 *opc;
/*
* MUSUK
* TLB mapped if not in kernel mode
*/
status = read_gc0_status();
if (!(status & (ST0_EXL | ST0_ERL)) &&
(status & ST0_KSU))
return true;
/*
* EVA access instructions in kernel
* mode access user address space.
*/
opc = (u32 *)vcpu->arch.pc;
if (vcpu->arch.host_cp0_cause & CAUSEF_BD)
opc += 1;
err = kvm_get_badinstr(opc, vcpu, &inst.word);
if (!err && is_eva_access(inst))
return true;
}
}
return false;
}
/**
* kvm_vz_gva_to_gpa() - Convert valid GVA to GPA.
* @vcpu: KVM VCPU state.
* @gva: Guest virtual address to convert.
* @gpa: Output guest physical address.
*
* Convert a guest virtual address (GVA) which is valid according to the guest
* context, to a guest physical address (GPA).
*
* Returns: 0 on success.
* -errno on failure.
*/
static int kvm_vz_gva_to_gpa(struct kvm_vcpu *vcpu, unsigned long gva,
unsigned long *gpa)
{
u32 gva32 = gva;
unsigned long segctl;
if ((long)gva == (s32)gva32) {
/* Handle canonical 32-bit virtual address */
if (cpu_guest_has_segments) {
unsigned long mask, pa;
switch (gva32 >> 29) {
case 0:
case 1: /* CFG5 (1GB) */
segctl = read_gc0_segctl2() >> 16;
mask = (unsigned long)0xfc0000000ull;
break;
case 2:
case 3: /* CFG4 (1GB) */
segctl = read_gc0_segctl2();
mask = (unsigned long)0xfc0000000ull;
break;
case 4: /* CFG3 (512MB) */
segctl = read_gc0_segctl1() >> 16;
mask = (unsigned long)0xfe0000000ull;
break;
case 5: /* CFG2 (512MB) */
segctl = read_gc0_segctl1();
mask = (unsigned long)0xfe0000000ull;
break;
case 6: /* CFG1 (512MB) */
segctl = read_gc0_segctl0() >> 16;
mask = (unsigned long)0xfe0000000ull;
break;
case 7: /* CFG0 (512MB) */
segctl = read_gc0_segctl0();
mask = (unsigned long)0xfe0000000ull;
break;
default:
/*
* GCC 4.9 isn't smart enough to figure out that
* segctl and mask are always initialised.
*/
unreachable();
}
if (is_eva_am_mapped(vcpu, (segctl >> 4) & 0x7,
segctl & 0x0008))
goto tlb_mapped;
/* Unmapped, find guest physical address */
pa = (segctl << 20) & mask;
pa |= gva32 & ~mask;
*gpa = pa;
return 0;
} else if ((s32)gva32 < (s32)0xc0000000) {
/* legacy unmapped KSeg0 or KSeg1 */
*gpa = gva32 & 0x1fffffff;
return 0;
}
#ifdef CONFIG_64BIT
} else if ((gva & 0xc000000000000000) == 0x8000000000000000) {
/* XKPHYS */
if (cpu_guest_has_segments) {
/*
* Each of the 8 regions can be overridden by SegCtl2.XR
* to use SegCtl1.XAM.
*/
segctl = read_gc0_segctl2();
if (segctl & (1ull << (56 + ((gva >> 59) & 0x7)))) {
segctl = read_gc0_segctl1();
if (is_eva_am_mapped(vcpu, (segctl >> 59) & 0x7,
0))
goto tlb_mapped;
}
}
/*
* Traditionally fully unmapped.
* Bits 61:59 specify the CCA, which we can just mask off here.
* Bits 58:PABITS should be zero, but we shouldn't have got here
* if it wasn't.
*/
*gpa = gva & 0x07ffffffffffffff;
return 0;
#endif
}
tlb_mapped:
return kvm_vz_guest_tlb_lookup(vcpu, gva, gpa);
}
/**
* kvm_vz_badvaddr_to_gpa() - Convert GVA BadVAddr from root exception to GPA.
* @vcpu: KVM VCPU state.
* @badvaddr: Root BadVAddr.
* @gpa: Output guest physical address.
*
* VZ implementations are permitted to report guest virtual addresses (GVA) in
* BadVAddr on a root exception during guest execution, instead of the more
* convenient guest physical addresses (GPA). When we get a GVA, this function
* converts it to a GPA, taking into account guest segmentation and guest TLB
* state.
*
* Returns: 0 on success.
* -errno on failure.
*/
static int kvm_vz_badvaddr_to_gpa(struct kvm_vcpu *vcpu, unsigned long badvaddr,
unsigned long *gpa)
{
unsigned int gexccode = (vcpu->arch.host_cp0_guestctl0 &
MIPS_GCTL0_GEXC) >> MIPS_GCTL0_GEXC_SHIFT;
/* If BadVAddr is GPA, then all is well in the world */
if (likely(gexccode == MIPS_GCTL0_GEXC_GPA)) {
*gpa = badvaddr;
return 0;
}
/* Otherwise we'd expect it to be GVA ... */
if (WARN(gexccode != MIPS_GCTL0_GEXC_GVA,
"Unexpected gexccode %#x\n", gexccode))
return -EINVAL;
/* ... and we need to perform the GVA->GPA translation in software */
return kvm_vz_gva_to_gpa(vcpu, badvaddr, gpa);
}
static int kvm_trap_vz_no_handler(struct kvm_vcpu *vcpu)
{
u32 *opc = (u32 *) vcpu->arch.pc;
u32 cause = vcpu->arch.host_cp0_cause;
u32 exccode = (cause & CAUSEF_EXCCODE) >> CAUSEB_EXCCODE;
unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
u32 inst = 0;
/*
* Fetch the instruction.
*/
if (cause & CAUSEF_BD)
opc += 1;
kvm_get_badinstr(opc, vcpu, &inst);
kvm_err("Exception Code: %d not handled @ PC: %p, inst: 0x%08x BadVaddr: %#lx Status: %#x\n",
exccode, opc, inst, badvaddr,
read_gc0_status());
kvm_arch_vcpu_dump_regs(vcpu);
vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
return RESUME_HOST;
}
static unsigned long mips_process_maar(unsigned int op, unsigned long val)
{
/* Mask off unused bits */
unsigned long mask = 0xfffff000 | MIPS_MAAR_S | MIPS_MAAR_VL;
if (read_gc0_pagegrain() & PG_ELPA)
mask |= 0x00ffffff00000000ull;
if (cpu_guest_has_mvh)
mask |= MIPS_MAAR_VH;
/* Set or clear VH */
if (op == mtc_op) {
/* clear VH */
val &= ~MIPS_MAAR_VH;
} else if (op == dmtc_op) {
/* set VH to match VL */
val &= ~MIPS_MAAR_VH;
if (val & MIPS_MAAR_VL)
val |= MIPS_MAAR_VH;
}
return val & mask;
}
static void kvm_write_maari(struct kvm_vcpu *vcpu, unsigned long val)
{
struct mips_coproc *cop0 = &vcpu->arch.cop0;
val &= MIPS_MAARI_INDEX;
if (val == MIPS_MAARI_INDEX)
kvm_write_sw_gc0_maari(cop0, ARRAY_SIZE(vcpu->arch.maar) - 1);
else if (val < ARRAY_SIZE(vcpu->arch.maar))
kvm_write_sw_gc0_maari(cop0, val);
}
static enum emulation_result kvm_vz_gpsi_cop0(union mips_instruction inst,
u32 *opc, u32 cause,
struct kvm_vcpu *vcpu)
{
struct mips_coproc *cop0 = &vcpu->arch.cop0;
enum emulation_result er = EMULATE_DONE;
u32 rt, rd, sel;
unsigned long curr_pc;
unsigned long val;
/*
* Update PC and hold onto current PC in case there is
* an error and we want to rollback the PC
*/
curr_pc = vcpu->arch.pc;
er = update_pc(vcpu, cause);
if (er == EMULATE_FAIL)
return er;
if (inst.co_format.co) {
switch (inst.co_format.func) {
case wait_op:
er = kvm_mips_emul_wait(vcpu);
break;
default:
er = EMULATE_FAIL;
}
} else {
rt = inst.c0r_format.rt;
rd = inst.c0r_format.rd;
sel = inst.c0r_format.sel;
switch (inst.c0r_format.rs) {
case dmfc_op:
case mfc_op:
#ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
cop0->stat[rd][sel]++;
#endif
if (rd == MIPS_CP0_COUNT &&
sel == 0) { /* Count */
val = kvm_mips_read_count(vcpu);
} else if (rd == MIPS_CP0_COMPARE &&
sel == 0) { /* Compare */
val = read_gc0_compare();
} else if (rd == MIPS_CP0_LLADDR &&
sel == 0) { /* LLAddr */
if (cpu_guest_has_rw_llb)
val = read_gc0_lladdr() &
MIPS_LLADDR_LLB;
else
val = 0;
} else if (rd == MIPS_CP0_LLADDR &&
sel == 1 && /* MAAR */
cpu_guest_has_maar &&
!cpu_guest_has_dyn_maar) {
/* MAARI must be in range */
BUG_ON(kvm_read_sw_gc0_maari(cop0) >=
ARRAY_SIZE(vcpu->arch.maar));
val = vcpu->arch.maar[
kvm_read_sw_gc0_maari(cop0)];
} else if ((rd == MIPS_CP0_PRID &&
(sel == 0 || /* PRid */
sel == 2 || /* CDMMBase */
sel == 3)) || /* CMGCRBase */
(rd == MIPS_CP0_STATUS &&
(sel == 2 || /* SRSCtl */
sel == 3)) || /* SRSMap */
(rd == MIPS_CP0_CONFIG &&
(sel == 6 || /* Config6 */
sel == 7)) || /* Config7 */
(rd == MIPS_CP0_LLADDR &&
(sel == 2) && /* MAARI */
cpu_guest_has_maar &&
!cpu_guest_has_dyn_maar) ||
(rd == MIPS_CP0_ERRCTL &&
(sel == 0))) { /* ErrCtl */
val = cop0->reg[rd][sel];
#ifdef CONFIG_CPU_LOONGSON64
} else if (rd == MIPS_CP0_DIAG &&
(sel == 0)) { /* Diag */
val = cop0->reg[rd][sel];
#endif
} else {
val = 0;
er = EMULATE_FAIL;
}
if (er != EMULATE_FAIL) {
/* Sign extend */
if (inst.c0r_format.rs == mfc_op)
val = (int)val;
vcpu->arch.gprs[rt] = val;
}
trace_kvm_hwr(vcpu, (inst.c0r_format.rs == mfc_op) ?
KVM_TRACE_MFC0 : KVM_TRACE_DMFC0,
KVM_TRACE_COP0(rd, sel), val);
break;
case dmtc_op:
case mtc_op:
#ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
cop0->stat[rd][sel]++;
#endif
val = vcpu->arch.gprs[rt];
trace_kvm_hwr(vcpu, (inst.c0r_format.rs == mtc_op) ?
KVM_TRACE_MTC0 : KVM_TRACE_DMTC0,
KVM_TRACE_COP0(rd, sel), val);
if (rd == MIPS_CP0_COUNT &&
sel == 0) { /* Count */
kvm_vz_lose_htimer(vcpu);
kvm_mips_write_count(vcpu, vcpu->arch.gprs[rt]);
} else if (rd == MIPS_CP0_COMPARE &&
sel == 0) { /* Compare */
kvm_mips_write_compare(vcpu,
vcpu->arch.gprs[rt],
true);
} else if (rd == MIPS_CP0_LLADDR &&
sel == 0) { /* LLAddr */
/*
* P5600 generates GPSI on guest MTC0 LLAddr.
* Only allow the guest to clear LLB.
*/
if (cpu_guest_has_rw_llb &&
!(val & MIPS_LLADDR_LLB))
write_gc0_lladdr(0);
} else if (rd == MIPS_CP0_LLADDR &&
sel == 1 && /* MAAR */
cpu_guest_has_maar &&
!cpu_guest_has_dyn_maar) {
val = mips_process_maar(inst.c0r_format.rs,
val);
/* MAARI must be in range */
BUG_ON(kvm_read_sw_gc0_maari(cop0) >=
ARRAY_SIZE(vcpu->arch.maar));
vcpu->arch.maar[kvm_read_sw_gc0_maari(cop0)] =
val;
} else if (rd == MIPS_CP0_LLADDR &&
(sel == 2) && /* MAARI */
cpu_guest_has_maar &&
!cpu_guest_has_dyn_maar) {
kvm_write_maari(vcpu, val);
} else if (rd == MIPS_CP0_CONFIG &&
(sel == 6)) {
cop0->reg[rd][sel] = (int)val;
} else if (rd == MIPS_CP0_ERRCTL &&
(sel == 0)) { /* ErrCtl */
/* ignore the written value */
#ifdef CONFIG_CPU_LOONGSON64
} else if (rd == MIPS_CP0_DIAG &&
(sel == 0)) { /* Diag */
unsigned long flags;
local_irq_save(flags);
if (val & LOONGSON_DIAG_BTB) {
/* Flush BTB */
set_c0_diag(LOONGSON_DIAG_BTB);
}
if (val & LOONGSON_DIAG_ITLB) {
/* Flush ITLB */
set_c0_diag(LOONGSON_DIAG_ITLB);
}
if (val & LOONGSON_DIAG_DTLB) {
/* Flush DTLB */
set_c0_diag(LOONGSON_DIAG_DTLB);
}
if (val & LOONGSON_DIAG_VTLB) {
/* Flush VTLB */
kvm_loongson_clear_guest_vtlb();
}
if (val & LOONGSON_DIAG_FTLB) {
/* Flush FTLB */
kvm_loongson_clear_guest_ftlb();
}
local_irq_restore(flags);
#endif
} else {
er = EMULATE_FAIL;
}
break;
default:
er = EMULATE_FAIL;
break;
}
}
/* Rollback PC only if emulation was unsuccessful */
if (er == EMULATE_FAIL) {
kvm_err("[%#lx]%s: unsupported cop0 instruction 0x%08x\n",
curr_pc, __func__, inst.word);
vcpu->arch.pc = curr_pc;
}
return er;
}
static enum emulation_result kvm_vz_gpsi_cache(union mips_instruction inst,
u32 *opc, u32 cause,
struct kvm_vcpu *vcpu)
{
enum emulation_result er = EMULATE_DONE;
u32 cache, op_inst, op, base;
s16 offset;
struct kvm_vcpu_arch *arch = &vcpu->arch;
unsigned long va, curr_pc;
/*
* Update PC and hold onto current PC in case there is
* an error and we want to rollback the PC
*/
curr_pc = vcpu->arch.pc;
er = update_pc(vcpu, cause);
if (er == EMULATE_FAIL)
return er;
base = inst.i_format.rs;
op_inst = inst.i_format.rt;
if (cpu_has_mips_r6)
offset = inst.spec3_format.simmediate;
else
offset = inst.i_format.simmediate;
cache = op_inst & CacheOp_Cache;
op = op_inst & CacheOp_Op;
va = arch->gprs[base] + offset;
kvm_debug("CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
cache, op, base, arch->gprs[base], offset);
/* Secondary or tirtiary cache ops ignored */
if (cache != Cache_I && cache != Cache_D)
return EMULATE_DONE;
switch (op_inst) {
case Index_Invalidate_I:
flush_icache_line_indexed(va);
return EMULATE_DONE;
case Index_Writeback_Inv_D:
flush_dcache_line_indexed(va);
return EMULATE_DONE;
case Hit_Invalidate_I:
case Hit_Invalidate_D:
case Hit_Writeback_Inv_D:
if (boot_cpu_type() == CPU_CAVIUM_OCTEON3) {
/* We can just flush entire icache */
local_flush_icache_range(0, 0);
return EMULATE_DONE;
}
/* So far, other platforms support guest hit cache ops */
break;
default:
break;
}
kvm_err("@ %#lx/%#lx CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
curr_pc, vcpu->arch.gprs[31], cache, op, base, arch->gprs[base],
offset);
/* Rollback PC */
vcpu->arch.pc = curr_pc;
return EMULATE_FAIL;
}
#ifdef CONFIG_CPU_LOONGSON64
static enum emulation_result kvm_vz_gpsi_lwc2(union mips_instruction inst,
u32 *opc, u32 cause,
struct kvm_vcpu *vcpu)
{
unsigned int rs, rd;
unsigned int hostcfg;
unsigned long curr_pc;
enum emulation_result er = EMULATE_DONE;
/*
* Update PC and hold onto current PC in case there is
* an error and we want to rollback the PC
*/
curr_pc = vcpu->arch.pc;
er = update_pc(vcpu, cause);
if (er == EMULATE_FAIL)
return er;
rs = inst.loongson3_lscsr_format.rs;
rd = inst.loongson3_lscsr_format.rd;
switch (inst.loongson3_lscsr_format.fr) {
case 0x8: /* Read CPUCFG */
++vcpu->stat.vz_cpucfg_exits;
hostcfg = read_cpucfg(vcpu->arch.gprs[rs]);
switch (vcpu->arch.gprs[rs]) {
case LOONGSON_CFG0:
vcpu->arch.gprs[rd] = 0x14c000;
break;
case LOONGSON_CFG1:
hostcfg &= (LOONGSON_CFG1_FP | LOONGSON_CFG1_MMI |
LOONGSON_CFG1_MSA1 | LOONGSON_CFG1_MSA2 |
LOONGSON_CFG1_SFBP);
vcpu->arch.gprs[rd] = hostcfg;
break;
case LOONGSON_CFG2:
hostcfg &= (LOONGSON_CFG2_LEXT1 | LOONGSON_CFG2_LEXT2 |
LOONGSON_CFG2_LEXT3 | LOONGSON_CFG2_LSPW);
vcpu->arch.gprs[rd] = hostcfg;
break;
case LOONGSON_CFG3:
vcpu->arch.gprs[rd] = hostcfg;
break;
default:
/* Don't export any other advanced features to guest */
vcpu->arch.gprs[rd] = 0;
break;
}
break;
default:
kvm_err("lwc2 emulate not impl %d rs %lx @%lx\n",
inst.loongson3_lscsr_format.fr, vcpu->arch.gprs[rs], curr_pc);
er = EMULATE_FAIL;
break;
}
/* Rollback PC only if emulation was unsuccessful */
if (er == EMULATE_FAIL) {
kvm_err("[%#lx]%s: unsupported lwc2 instruction 0x%08x 0x%08x\n",
curr_pc, __func__, inst.word, inst.loongson3_lscsr_format.fr);
vcpu->arch.pc = curr_pc;
}
return er;
}
#endif
static enum emulation_result kvm_trap_vz_handle_gpsi(u32 cause, u32 *opc,
struct kvm_vcpu *vcpu)
{
enum emulation_result er = EMULATE_DONE;
struct kvm_vcpu_arch *arch = &vcpu->arch;
union mips_instruction inst;
int rd, rt, sel;
int err;
/*
* Fetch the instruction.
*/
if (cause & CAUSEF_BD)
opc += 1;
err = kvm_get_badinstr(opc, vcpu, &inst.word);
if (err)
return EMULATE_FAIL;
switch (inst.r_format.opcode) {
case cop0_op:
er = kvm_vz_gpsi_cop0(inst, opc, cause, vcpu);
break;
#ifndef CONFIG_CPU_MIPSR6
case cache_op:
trace_kvm_exit(vcpu, KVM_TRACE_EXIT_CACHE);
er = kvm_vz_gpsi_cache(inst, opc, cause, vcpu);
break;
#endif
#ifdef CONFIG_CPU_LOONGSON64
case lwc2_op:
er = kvm_vz_gpsi_lwc2(inst, opc, cause, vcpu);
break;
#endif
case spec3_op:
switch (inst.spec3_format.func) {
#ifdef CONFIG_CPU_MIPSR6
case cache6_op:
trace_kvm_exit(vcpu, KVM_TRACE_EXIT_CACHE);
er = kvm_vz_gpsi_cache(inst, opc, cause, vcpu);
break;
#endif
case rdhwr_op:
if (inst.r_format.rs || (inst.r_format.re >> 3))
goto unknown;
rd = inst.r_format.rd;
rt = inst.r_format.rt;
sel = inst.r_format.re & 0x7;
switch (rd) {
case MIPS_HWR_CC: /* Read count register */
arch->gprs[rt] =
(long)(int)kvm_mips_read_count(vcpu);
break;
default:
trace_kvm_hwr(vcpu, KVM_TRACE_RDHWR,
KVM_TRACE_HWR(rd, sel), 0);
goto unknown;
}
trace_kvm_hwr(vcpu, KVM_TRACE_RDHWR,
KVM_TRACE_HWR(rd, sel), arch->gprs[rt]);
er = update_pc(vcpu, cause);
break;
default:
goto unknown;
}
break;
unknown:
default:
kvm_err("GPSI exception not supported (%p/%#x)\n",
opc, inst.word);
kvm_arch_vcpu_dump_regs(vcpu);
er = EMULATE_FAIL;
break;
}
return er;
}
static enum emulation_result kvm_trap_vz_handle_gsfc(u32 cause, u32 *opc,
struct kvm_vcpu *vcpu)
{
enum emulation_result er = EMULATE_DONE;
struct kvm_vcpu_arch *arch = &vcpu->arch;
union mips_instruction inst;
int err;
/*
* Fetch the instruction.
*/
if (cause & CAUSEF_BD)
opc += 1;
err = kvm_get_badinstr(opc, vcpu, &inst.word);
if (err)
return EMULATE_FAIL;
/* complete MTC0 on behalf of guest and advance EPC */
if (inst.c0r_format.opcode == cop0_op &&
inst.c0r_format.rs == mtc_op &&
inst.c0r_format.z == 0) {
int rt = inst.c0r_format.rt;
int rd = inst.c0r_format.rd;
int sel = inst.c0r_format.sel;
unsigned int val = arch->gprs[rt];
unsigned int old_val, change;
trace_kvm_hwr(vcpu, KVM_TRACE_MTC0, KVM_TRACE_COP0(rd, sel),
val);
if ((rd == MIPS_CP0_STATUS) && (sel == 0)) {
/* FR bit should read as zero if no FPU */
if (!kvm_mips_guest_has_fpu(&vcpu->arch))
val &= ~(ST0_CU1 | ST0_FR);
/*
* Also don't allow FR to be set if host doesn't support
* it.
*/
if (!(boot_cpu_data.fpu_id & MIPS_FPIR_F64))
val &= ~ST0_FR;
old_val = read_gc0_status();
change = val ^ old_val;
if (change & ST0_FR) {
/*
* FPU and Vector register state is made
* UNPREDICTABLE by a change of FR, so don't
* even bother saving it.
*/
kvm_drop_fpu(vcpu);
}
/*
* If MSA state is already live, it is undefined how it
* interacts with FR=0 FPU state, and we don't want to
* hit reserved instruction exceptions trying to save
* the MSA state later when CU=1 && FR=1, so play it
* safe and save it first.
*/
if (change & ST0_CU1 && !(val & ST0_FR) &&
vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA)
kvm_lose_fpu(vcpu);
write_gc0_status(val);
} else if ((rd == MIPS_CP0_CAUSE) && (sel == 0)) {
u32 old_cause = read_gc0_cause();
u32 change = old_cause ^ val;
/* DC bit enabling/disabling timer? */
if (change & CAUSEF_DC) {
if (val & CAUSEF_DC) {
kvm_vz_lose_htimer(vcpu);
kvm_mips_count_disable_cause(vcpu);
} else {
kvm_mips_count_enable_cause(vcpu);
}
}
/* Only certain bits are RW to the guest */
change &= (CAUSEF_DC | CAUSEF_IV | CAUSEF_WP |
CAUSEF_IP0 | CAUSEF_IP1);
/* WP can only be cleared */
change &= ~CAUSEF_WP | old_cause;
write_gc0_cause(old_cause ^ change);
} else if ((rd == MIPS_CP0_STATUS) && (sel == 1)) { /* IntCtl */
write_gc0_intctl(val);
} else if ((rd == MIPS_CP0_CONFIG) && (sel == 5)) {
old_val = read_gc0_config5();
change = val ^ old_val;
/* Handle changes in FPU/MSA modes */
preempt_disable();
/*
* Propagate FRE changes immediately if the FPU
* context is already loaded.
*/
if (change & MIPS_CONF5_FRE &&
vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU)
change_c0_config5(MIPS_CONF5_FRE, val);
preempt_enable();
val = old_val ^
(change & kvm_vz_config5_guest_wrmask(vcpu));
write_gc0_config5(val);
} else {
kvm_err("Handle GSFC, unsupported field change @ %p: %#x\n",
opc, inst.word);
er = EMULATE_FAIL;
}
if (er != EMULATE_FAIL)
er = update_pc(vcpu, cause);
} else {
kvm_err("Handle GSFC, unrecognized instruction @ %p: %#x\n",
opc, inst.word);
er = EMULATE_FAIL;
}
return er;
}
static enum emulation_result kvm_trap_vz_handle_ghfc(u32 cause, u32 *opc,
struct kvm_vcpu *vcpu)
{
/*
* Presumably this is due to MC (guest mode change), so lets trace some
* relevant info.
*/
trace_kvm_guest_mode_change(vcpu);
return EMULATE_DONE;
}
static enum emulation_result kvm_trap_vz_handle_hc(u32 cause, u32 *opc,
struct kvm_vcpu *vcpu)
{
enum emulation_result er;
union mips_instruction inst;
unsigned long curr_pc;
int err;
if (cause & CAUSEF_BD)
opc += 1;
err = kvm_get_badinstr(opc, vcpu, &inst.word);
if (err)
return EMULATE_FAIL;
/*
* Update PC and hold onto current PC in case there is
* an error and we want to rollback the PC
*/
curr_pc = vcpu->arch.pc;
er = update_pc(vcpu, cause);
if (er == EMULATE_FAIL)
return er;
er = kvm_mips_emul_hypcall(vcpu, inst);
if (er == EMULATE_FAIL)
vcpu->arch.pc = curr_pc;
return er;
}
static enum emulation_result kvm_trap_vz_no_handler_guest_exit(u32 gexccode,
u32 cause,
u32 *opc,
struct kvm_vcpu *vcpu)
{
u32 inst;
/*
* Fetch the instruction.
*/
if (cause & CAUSEF_BD)
opc += 1;
kvm_get_badinstr(opc, vcpu, &inst);
kvm_err("Guest Exception Code: %d not yet handled @ PC: %p, inst: 0x%08x Status: %#x\n",
gexccode, opc, inst, read_gc0_status());
return EMULATE_FAIL;
}
static int kvm_trap_vz_handle_guest_exit(struct kvm_vcpu *vcpu)
{
u32 *opc = (u32 *) vcpu->arch.pc;
u32 cause = vcpu->arch.host_cp0_cause;
enum emulation_result er = EMULATE_DONE;
u32 gexccode = (vcpu->arch.host_cp0_guestctl0 &
MIPS_GCTL0_GEXC) >> MIPS_GCTL0_GEXC_SHIFT;
int ret = RESUME_GUEST;
trace_kvm_exit(vcpu, KVM_TRACE_EXIT_GEXCCODE_BASE + gexccode);
switch (gexccode) {
case MIPS_GCTL0_GEXC_GPSI:
++vcpu->stat.vz_gpsi_exits;
er = kvm_trap_vz_handle_gpsi(cause, opc, vcpu);
break;
case MIPS_GCTL0_GEXC_GSFC:
++vcpu->stat.vz_gsfc_exits;
er = kvm_trap_vz_handle_gsfc(cause, opc, vcpu);
break;
case MIPS_GCTL0_GEXC_HC:
++vcpu->stat.vz_hc_exits;
er = kvm_trap_vz_handle_hc(cause, opc, vcpu);
break;
case MIPS_GCTL0_GEXC_GRR:
++vcpu->stat.vz_grr_exits;
er = kvm_trap_vz_no_handler_guest_exit(gexccode, cause, opc,
vcpu);
break;
case MIPS_GCTL0_GEXC_GVA:
++vcpu->stat.vz_gva_exits;
er = kvm_trap_vz_no_handler_guest_exit(gexccode, cause, opc,
vcpu);
break;
case MIPS_GCTL0_GEXC_GHFC:
++vcpu->stat.vz_ghfc_exits;
er = kvm_trap_vz_handle_ghfc(cause, opc, vcpu);
break;
case MIPS_GCTL0_GEXC_GPA:
++vcpu->stat.vz_gpa_exits;
er = kvm_trap_vz_no_handler_guest_exit(gexccode, cause, opc,
vcpu);
break;
default:
++vcpu->stat.vz_resvd_exits;
er = kvm_trap_vz_no_handler_guest_exit(gexccode, cause, opc,
vcpu);
break;
}
if (er == EMULATE_DONE) {
ret = RESUME_GUEST;
} else if (er == EMULATE_HYPERCALL) {
ret = kvm_mips_handle_hypcall(vcpu);
} else {
vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
ret = RESUME_HOST;
}
return ret;
}
/**
* kvm_trap_vz_handle_cop_unusable() - Guest used unusable coprocessor.
* @vcpu: Virtual CPU context.
*
* Handle when the guest attempts to use a coprocessor which hasn't been allowed
* by the root context.
*
* Return: value indicating whether to resume the host or the guest
* (RESUME_HOST or RESUME_GUEST)
*/
static int kvm_trap_vz_handle_cop_unusable(struct kvm_vcpu *vcpu)
{
u32 cause = vcpu->arch.host_cp0_cause;
enum emulation_result er = EMULATE_FAIL;
int ret = RESUME_GUEST;
if (((cause & CAUSEF_CE) >> CAUSEB_CE) == 1) {
/*
* If guest FPU not present, the FPU operation should have been
* treated as a reserved instruction!
* If FPU already in use, we shouldn't get this at all.
*/
if (WARN_ON(!kvm_mips_guest_has_fpu(&vcpu->arch) ||
vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU)) {
preempt_enable();
return EMULATE_FAIL;
}
kvm_own_fpu(vcpu);
er = EMULATE_DONE;
}
/* other coprocessors not handled */
switch (er) {
case EMULATE_DONE:
ret = RESUME_GUEST;
break;
case EMULATE_FAIL:
vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
ret = RESUME_HOST;
break;
default:
BUG();
}
return ret;
}
/**
* kvm_trap_vz_handle_msa_disabled() - Guest used MSA while disabled in root.
* @vcpu: Virtual CPU context.
*
* Handle when the guest attempts to use MSA when it is disabled in the root
* context.
*
* Return: value indicating whether to resume the host or the guest
* (RESUME_HOST or RESUME_GUEST)
*/
static int kvm_trap_vz_handle_msa_disabled(struct kvm_vcpu *vcpu)
{
/*
* If MSA not present or not exposed to guest or FR=0, the MSA operation
* should have been treated as a reserved instruction!
* Same if CU1=1, FR=0.
* If MSA already in use, we shouldn't get this at all.
*/
if (!kvm_mips_guest_has_msa(&vcpu->arch) ||
(read_gc0_status() & (ST0_CU1 | ST0_FR)) == ST0_CU1 ||
!(read_gc0_config5() & MIPS_CONF5_MSAEN) ||
vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA) {
vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
return RESUME_HOST;
}
kvm_own_msa(vcpu);
return RESUME_GUEST;
}
static int kvm_trap_vz_handle_tlb_ld_miss(struct kvm_vcpu *vcpu)
{
struct kvm_run *run = vcpu->run;
u32 *opc = (u32 *) vcpu->arch.pc;
u32 cause = vcpu->arch.host_cp0_cause;
ulong badvaddr = vcpu->arch.host_cp0_badvaddr;
union mips_instruction inst;
enum emulation_result er = EMULATE_DONE;
int err, ret = RESUME_GUEST;
if (kvm_mips_handle_vz_root_tlb_fault(badvaddr, vcpu, false)) {
/* A code fetch fault doesn't count as an MMIO */
if (kvm_is_ifetch_fault(&vcpu->arch)) {
run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
return RESUME_HOST;
}
/* Fetch the instruction */
if (cause & CAUSEF_BD)
opc += 1;
err = kvm_get_badinstr(opc, vcpu, &inst.word);
if (err) {
run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
return RESUME_HOST;
}
/* Treat as MMIO */
er = kvm_mips_emulate_load(inst, cause, vcpu);
if (er == EMULATE_FAIL) {
kvm_err("Guest Emulate Load from MMIO space failed: PC: %p, BadVaddr: %#lx\n",
opc, badvaddr);
run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
}
}
if (er == EMULATE_DONE) {
ret = RESUME_GUEST;
} else if (er == EMULATE_DO_MMIO) {
run->exit_reason = KVM_EXIT_MMIO;
ret = RESUME_HOST;
} else {
run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
ret = RESUME_HOST;
}
return ret;
}
static int kvm_trap_vz_handle_tlb_st_miss(struct kvm_vcpu *vcpu)
{
struct kvm_run *run = vcpu->run;
u32 *opc = (u32 *) vcpu->arch.pc;
u32 cause = vcpu->arch.host_cp0_cause;
ulong badvaddr = vcpu->arch.host_cp0_badvaddr;
union mips_instruction inst;
enum emulation_result er = EMULATE_DONE;
int err;
int ret = RESUME_GUEST;
/* Just try the access again if we couldn't do the translation */
if (kvm_vz_badvaddr_to_gpa(vcpu, badvaddr, &badvaddr))
return RESUME_GUEST;
vcpu->arch.host_cp0_badvaddr = badvaddr;
if (kvm_mips_handle_vz_root_tlb_fault(badvaddr, vcpu, true)) {
/* Fetch the instruction */
if (cause & CAUSEF_BD)
opc += 1;
err = kvm_get_badinstr(opc, vcpu, &inst.word);
if (err) {
run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
return RESUME_HOST;
}
/* Treat as MMIO */
er = kvm_mips_emulate_store(inst, cause, vcpu);
if (er == EMULATE_FAIL) {
kvm_err("Guest Emulate Store to MMIO space failed: PC: %p, BadVaddr: %#lx\n",
opc, badvaddr);
run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
}
}
if (er == EMULATE_DONE) {
ret = RESUME_GUEST;
} else if (er == EMULATE_DO_MMIO) {
run->exit_reason = KVM_EXIT_MMIO;
ret = RESUME_HOST;
} else {
run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
ret = RESUME_HOST;
}
return ret;
}
static u64 kvm_vz_get_one_regs[] = {
KVM_REG_MIPS_CP0_INDEX,
KVM_REG_MIPS_CP0_ENTRYLO0,
KVM_REG_MIPS_CP0_ENTRYLO1,
KVM_REG_MIPS_CP0_CONTEXT,
KVM_REG_MIPS_CP0_PAGEMASK,
KVM_REG_MIPS_CP0_PAGEGRAIN,
KVM_REG_MIPS_CP0_WIRED,
KVM_REG_MIPS_CP0_HWRENA,
KVM_REG_MIPS_CP0_BADVADDR,
KVM_REG_MIPS_CP0_COUNT,
KVM_REG_MIPS_CP0_ENTRYHI,
KVM_REG_MIPS_CP0_COMPARE,
KVM_REG_MIPS_CP0_STATUS,
KVM_REG_MIPS_CP0_INTCTL,
KVM_REG_MIPS_CP0_CAUSE,
KVM_REG_MIPS_CP0_EPC,
KVM_REG_MIPS_CP0_PRID,
KVM_REG_MIPS_CP0_EBASE,
KVM_REG_MIPS_CP0_CONFIG,
KVM_REG_MIPS_CP0_CONFIG1,
KVM_REG_MIPS_CP0_CONFIG2,
KVM_REG_MIPS_CP0_CONFIG3,
KVM_REG_MIPS_CP0_CONFIG4,
KVM_REG_MIPS_CP0_CONFIG5,
KVM_REG_MIPS_CP0_CONFIG6,
#ifdef CONFIG_64BIT
KVM_REG_MIPS_CP0_XCONTEXT,
#endif
KVM_REG_MIPS_CP0_ERROREPC,
KVM_REG_MIPS_COUNT_CTL,
KVM_REG_MIPS_COUNT_RESUME,
KVM_REG_MIPS_COUNT_HZ,
};
static u64 kvm_vz_get_one_regs_contextconfig[] = {
KVM_REG_MIPS_CP0_CONTEXTCONFIG,
#ifdef CONFIG_64BIT
KVM_REG_MIPS_CP0_XCONTEXTCONFIG,
#endif
};
static u64 kvm_vz_get_one_regs_segments[] = {
KVM_REG_MIPS_CP0_SEGCTL0,
KVM_REG_MIPS_CP0_SEGCTL1,
KVM_REG_MIPS_CP0_SEGCTL2,
};
static u64 kvm_vz_get_one_regs_htw[] = {
KVM_REG_MIPS_CP0_PWBASE,
KVM_REG_MIPS_CP0_PWFIELD,
KVM_REG_MIPS_CP0_PWSIZE,
KVM_REG_MIPS_CP0_PWCTL,
};
static u64 kvm_vz_get_one_regs_kscratch[] = {
KVM_REG_MIPS_CP0_KSCRATCH1,
KVM_REG_MIPS_CP0_KSCRATCH2,
KVM_REG_MIPS_CP0_KSCRATCH3,
KVM_REG_MIPS_CP0_KSCRATCH4,
KVM_REG_MIPS_CP0_KSCRATCH5,
KVM_REG_MIPS_CP0_KSCRATCH6,
};
static unsigned long kvm_vz_num_regs(struct kvm_vcpu *vcpu)
{
unsigned long ret;
ret = ARRAY_SIZE(kvm_vz_get_one_regs);
if (cpu_guest_has_userlocal)
++ret;
if (cpu_guest_has_badinstr)
++ret;
if (cpu_guest_has_badinstrp)
++ret;
if (cpu_guest_has_contextconfig)
ret += ARRAY_SIZE(kvm_vz_get_one_regs_contextconfig);
if (cpu_guest_has_segments)
ret += ARRAY_SIZE(kvm_vz_get_one_regs_segments);
if (cpu_guest_has_htw || cpu_guest_has_ldpte)
ret += ARRAY_SIZE(kvm_vz_get_one_regs_htw);
if (cpu_guest_has_maar && !cpu_guest_has_dyn_maar)
ret += 1 + ARRAY_SIZE(vcpu->arch.maar);
ret += __arch_hweight8(cpu_data[0].guest.kscratch_mask);
return ret;
}
static int kvm_vz_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices)
{
u64 index;
unsigned int i;
if (copy_to_user(indices, kvm_vz_get_one_regs,
sizeof(kvm_vz_get_one_regs)))
return -EFAULT;
indices += ARRAY_SIZE(kvm_vz_get_one_regs);
if (cpu_guest_has_userlocal) {
index = KVM_REG_MIPS_CP0_USERLOCAL;
if (copy_to_user(indices, &index, sizeof(index)))
return -EFAULT;
++indices;
}
if (cpu_guest_has_badinstr) {
index = KVM_REG_MIPS_CP0_BADINSTR;
if (copy_to_user(indices, &index, sizeof(index)))
return -EFAULT;
++indices;
}
if (cpu_guest_has_badinstrp) {
index = KVM_REG_MIPS_CP0_BADINSTRP;
if (copy_to_user(indices, &index, sizeof(index)))
return -EFAULT;
++indices;
}
if (cpu_guest_has_contextconfig) {
if (copy_to_user(indices, kvm_vz_get_one_regs_contextconfig,
sizeof(kvm_vz_get_one_regs_contextconfig)))
return -EFAULT;
indices += ARRAY_SIZE(kvm_vz_get_one_regs_contextconfig);
}
if (cpu_guest_has_segments) {
if (copy_to_user(indices, kvm_vz_get_one_regs_segments,
sizeof(kvm_vz_get_one_regs_segments)))
return -EFAULT;
indices += ARRAY_SIZE(kvm_vz_get_one_regs_segments);
}
if (cpu_guest_has_htw || cpu_guest_has_ldpte) {
if (copy_to_user(indices, kvm_vz_get_one_regs_htw,
sizeof(kvm_vz_get_one_regs_htw)))
return -EFAULT;
indices += ARRAY_SIZE(kvm_vz_get_one_regs_htw);
}
if (cpu_guest_has_maar && !cpu_guest_has_dyn_maar) {
for (i = 0; i < ARRAY_SIZE(vcpu->arch.maar); ++i) {
index = KVM_REG_MIPS_CP0_MAAR(i);
if (copy_to_user(indices, &index, sizeof(index)))
return -EFAULT;
++indices;
}
index = KVM_REG_MIPS_CP0_MAARI;
if (copy_to_user(indices, &index, sizeof(index)))
return -EFAULT;
++indices;
}
for (i = 0; i < 6; ++i) {
if (!cpu_guest_has_kscr(i + 2))
continue;
if (copy_to_user(indices, &kvm_vz_get_one_regs_kscratch[i],
sizeof(kvm_vz_get_one_regs_kscratch[i])))
return -EFAULT;
++indices;
}
return 0;
}
static inline s64 entrylo_kvm_to_user(unsigned long v)
{
s64 mask, ret = v;
if (BITS_PER_LONG == 32) {
/*
* KVM API exposes 64-bit version of the register, so move the
* RI/XI bits up into place.
*/
mask = MIPS_ENTRYLO_RI | MIPS_ENTRYLO_XI;
ret &= ~mask;
ret |= ((s64)v & mask) << 32;
}
return ret;
}
static inline unsigned long entrylo_user_to_kvm(s64 v)
{
unsigned long mask, ret = v;
if (BITS_PER_LONG == 32) {
/*
* KVM API exposes 64-bit versiono of the register, so move the
* RI/XI bits down into place.
*/
mask = MIPS_ENTRYLO_RI | MIPS_ENTRYLO_XI;
ret &= ~mask;
ret |= (v >> 32) & mask;
}
return ret;
}
static int kvm_vz_get_one_reg(struct kvm_vcpu *vcpu,
const struct kvm_one_reg *reg,
s64 *v)
{
struct mips_coproc *cop0 = &vcpu->arch.cop0;
unsigned int idx;
switch (reg->id) {
case KVM_REG_MIPS_CP0_INDEX:
*v = (long)read_gc0_index();
break;
case KVM_REG_MIPS_CP0_ENTRYLO0:
*v = entrylo_kvm_to_user(read_gc0_entrylo0());
break;
case KVM_REG_MIPS_CP0_ENTRYLO1:
*v = entrylo_kvm_to_user(read_gc0_entrylo1());
break;
case KVM_REG_MIPS_CP0_CONTEXT:
*v = (long)read_gc0_context();
break;
case KVM_REG_MIPS_CP0_CONTEXTCONFIG:
if (!cpu_guest_has_contextconfig)
return -EINVAL;
*v = read_gc0_contextconfig();
break;
case KVM_REG_MIPS_CP0_USERLOCAL:
if (!cpu_guest_has_userlocal)
return -EINVAL;
*v = read_gc0_userlocal();
break;
#ifdef CONFIG_64BIT
case KVM_REG_MIPS_CP0_XCONTEXTCONFIG:
if (!cpu_guest_has_contextconfig)
return -EINVAL;
*v = read_gc0_xcontextconfig();
break;
#endif
case KVM_REG_MIPS_CP0_PAGEMASK:
*v = (long)read_gc0_pagemask();
break;
case KVM_REG_MIPS_CP0_PAGEGRAIN:
*v = (long)read_gc0_pagegrain();
break;
case KVM_REG_MIPS_CP0_SEGCTL0:
if (!cpu_guest_has_segments)
return -EINVAL;
*v = read_gc0_segctl0();
break;
case KVM_REG_MIPS_CP0_SEGCTL1:
if (!cpu_guest_has_segments)
return -EINVAL;
*v = read_gc0_segctl1();
break;
case KVM_REG_MIPS_CP0_SEGCTL2:
if (!cpu_guest_has_segments)
return -EINVAL;
*v = read_gc0_segctl2();
break;
case KVM_REG_MIPS_CP0_PWBASE:
if (!cpu_guest_has_htw && !cpu_guest_has_ldpte)
return -EINVAL;
*v = read_gc0_pwbase();
break;
case KVM_REG_MIPS_CP0_PWFIELD:
if (!cpu_guest_has_htw && !cpu_guest_has_ldpte)
return -EINVAL;
*v = read_gc0_pwfield();
break;
case KVM_REG_MIPS_CP0_PWSIZE:
if (!cpu_guest_has_htw && !cpu_guest_has_ldpte)
return -EINVAL;
*v = read_gc0_pwsize();
break;
case KVM_REG_MIPS_CP0_WIRED:
*v = (long)read_gc0_wired();
break;
case KVM_REG_MIPS_CP0_PWCTL:
if (!cpu_guest_has_htw && !cpu_guest_has_ldpte)
return -EINVAL;
*v = read_gc0_pwctl();
break;
case KVM_REG_MIPS_CP0_HWRENA:
*v = (long)read_gc0_hwrena();
break;
case KVM_REG_MIPS_CP0_BADVADDR:
*v = (long)read_gc0_badvaddr();
break;
case KVM_REG_MIPS_CP0_BADINSTR:
if (!cpu_guest_has_badinstr)
return -EINVAL;
*v = read_gc0_badinstr();
break;
case KVM_REG_MIPS_CP0_BADINSTRP:
if (!cpu_guest_has_badinstrp)
return -EINVAL;
*v = read_gc0_badinstrp();
break;
case KVM_REG_MIPS_CP0_COUNT:
*v = kvm_mips_read_count(vcpu);
break;
case KVM_REG_MIPS_CP0_ENTRYHI:
*v = (long)read_gc0_entryhi();
break;
case KVM_REG_MIPS_CP0_COMPARE:
*v = (long)read_gc0_compare();
break;
case KVM_REG_MIPS_CP0_STATUS:
*v = (long)read_gc0_status();
break;
case KVM_REG_MIPS_CP0_INTCTL:
*v = read_gc0_intctl();
break;
case KVM_REG_MIPS_CP0_CAUSE:
*v = (long)read_gc0_cause();
break;
case KVM_REG_MIPS_CP0_EPC:
*v = (long)read_gc0_epc();
break;
case KVM_REG_MIPS_CP0_PRID:
switch (boot_cpu_type()) {
case CPU_CAVIUM_OCTEON3:
/* Octeon III has a read-only guest.PRid */
*v = read_gc0_prid();
break;
default:
*v = (long)kvm_read_c0_guest_prid(cop0);
break;
}
break;
case KVM_REG_MIPS_CP0_EBASE:
*v = kvm_vz_read_gc0_ebase();
break;
case KVM_REG_MIPS_CP0_CONFIG:
*v = read_gc0_config();
break;
case KVM_REG_MIPS_CP0_CONFIG1:
if (!cpu_guest_has_conf1)
return -EINVAL;
*v = read_gc0_config1();
break;
case KVM_REG_MIPS_CP0_CONFIG2:
if (!cpu_guest_has_conf2)
return -EINVAL;
*v = read_gc0_config2();
break;
case KVM_REG_MIPS_CP0_CONFIG3:
if (!cpu_guest_has_conf3)
return -EINVAL;
*v = read_gc0_config3();
break;
case KVM_REG_MIPS_CP0_CONFIG4:
if (!cpu_guest_has_conf4)
return -EINVAL;
*v = read_gc0_config4();
break;
case KVM_REG_MIPS_CP0_CONFIG5:
if (!cpu_guest_has_conf5)
return -EINVAL;
*v = read_gc0_config5();
break;
case KVM_REG_MIPS_CP0_CONFIG6:
*v = kvm_read_sw_gc0_config6(cop0);
break;
case KVM_REG_MIPS_CP0_MAAR(0) ... KVM_REG_MIPS_CP0_MAAR(0x3f):
if (!cpu_guest_has_maar || cpu_guest_has_dyn_maar)
return -EINVAL;
idx = reg->id - KVM_REG_MIPS_CP0_MAAR(0);
if (idx >= ARRAY_SIZE(vcpu->arch.maar))
return -EINVAL;
*v = vcpu->arch.maar[idx];
break;
case KVM_REG_MIPS_CP0_MAARI:
if (!cpu_guest_has_maar || cpu_guest_has_dyn_maar)
return -EINVAL;
*v = kvm_read_sw_gc0_maari(&vcpu->arch.cop0);
break;
#ifdef CONFIG_64BIT
case KVM_REG_MIPS_CP0_XCONTEXT:
*v = read_gc0_xcontext();
break;
#endif
case KVM_REG_MIPS_CP0_ERROREPC:
*v = (long)read_gc0_errorepc();
break;
case KVM_REG_MIPS_CP0_KSCRATCH1 ... KVM_REG_MIPS_CP0_KSCRATCH6:
idx = reg->id - KVM_REG_MIPS_CP0_KSCRATCH1 + 2;
if (!cpu_guest_has_kscr(idx))
return -EINVAL;
switch (idx) {
case 2:
*v = (long)read_gc0_kscratch1();
break;
case 3:
*v = (long)read_gc0_kscratch2();
break;
case 4:
*v = (long)read_gc0_kscratch3();
break;
case 5:
*v = (long)read_gc0_kscratch4();
break;
case 6:
*v = (long)read_gc0_kscratch5();
break;
case 7:
*v = (long)read_gc0_kscratch6();
break;
}
break;
case KVM_REG_MIPS_COUNT_CTL:
*v = vcpu->arch.count_ctl;
break;
case KVM_REG_MIPS_COUNT_RESUME:
*v = ktime_to_ns(vcpu->arch.count_resume);
break;
case KVM_REG_MIPS_COUNT_HZ:
*v = vcpu->arch.count_hz;
break;
default:
return -EINVAL;
}
return 0;
}
static int kvm_vz_set_one_reg(struct kvm_vcpu *vcpu,
const struct kvm_one_reg *reg,
s64 v)
{
struct mips_coproc *cop0 = &vcpu->arch.cop0;
unsigned int idx;
int ret = 0;
unsigned int cur, change;
switch (reg->id) {
case KVM_REG_MIPS_CP0_INDEX:
write_gc0_index(v);
break;
case KVM_REG_MIPS_CP0_ENTRYLO0:
write_gc0_entrylo0(entrylo_user_to_kvm(v));
break;
case KVM_REG_MIPS_CP0_ENTRYLO1:
write_gc0_entrylo1(entrylo_user_to_kvm(v));
break;
case KVM_REG_MIPS_CP0_CONTEXT:
write_gc0_context(v);
break;
case KVM_REG_MIPS_CP0_CONTEXTCONFIG:
if (!cpu_guest_has_contextconfig)
return -EINVAL;
write_gc0_contextconfig(v);
break;
case KVM_REG_MIPS_CP0_USERLOCAL:
if (!cpu_guest_has_userlocal)
return -EINVAL;
write_gc0_userlocal(v);
break;
#ifdef CONFIG_64BIT
case KVM_REG_MIPS_CP0_XCONTEXTCONFIG:
if (!cpu_guest_has_contextconfig)
return -EINVAL;
write_gc0_xcontextconfig(v);
break;
#endif
case KVM_REG_MIPS_CP0_PAGEMASK:
write_gc0_pagemask(v);
break;
case KVM_REG_MIPS_CP0_PAGEGRAIN:
write_gc0_pagegrain(v);
break;
case KVM_REG_MIPS_CP0_SEGCTL0:
if (!cpu_guest_has_segments)
return -EINVAL;
write_gc0_segctl0(v);
break;
case KVM_REG_MIPS_CP0_SEGCTL1:
if (!cpu_guest_has_segments)
return -EINVAL;
write_gc0_segctl1(v);
break;
case KVM_REG_MIPS_CP0_SEGCTL2:
if (!cpu_guest_has_segments)
return -EINVAL;
write_gc0_segctl2(v);
break;
case KVM_REG_MIPS_CP0_PWBASE:
if (!cpu_guest_has_htw && !cpu_guest_has_ldpte)
return -EINVAL;
write_gc0_pwbase(v);
break;
case KVM_REG_MIPS_CP0_PWFIELD:
if (!cpu_guest_has_htw && !cpu_guest_has_ldpte)
return -EINVAL;
write_gc0_pwfield(v);
break;
case KVM_REG_MIPS_CP0_PWSIZE:
if (!cpu_guest_has_htw && !cpu_guest_has_ldpte)
return -EINVAL;
write_gc0_pwsize(v);
break;
case KVM_REG_MIPS_CP0_WIRED:
change_gc0_wired(MIPSR6_WIRED_WIRED, v);
break;
case KVM_REG_MIPS_CP0_PWCTL:
if (!cpu_guest_has_htw && !cpu_guest_has_ldpte)
return -EINVAL;
write_gc0_pwctl(v);
break;
case KVM_REG_MIPS_CP0_HWRENA:
write_gc0_hwrena(v);
break;
case KVM_REG_MIPS_CP0_BADVADDR:
write_gc0_badvaddr(v);
break;
case KVM_REG_MIPS_CP0_BADINSTR:
if (!cpu_guest_has_badinstr)
return -EINVAL;
write_gc0_badinstr(v);
break;
case KVM_REG_MIPS_CP0_BADINSTRP:
if (!cpu_guest_has_badinstrp)
return -EINVAL;
write_gc0_badinstrp(v);
break;
case KVM_REG_MIPS_CP0_COUNT:
kvm_mips_write_count(vcpu, v);
break;
case KVM_REG_MIPS_CP0_ENTRYHI:
write_gc0_entryhi(v);
break;
case KVM_REG_MIPS_CP0_COMPARE:
kvm_mips_write_compare(vcpu, v, false);
break;
case KVM_REG_MIPS_CP0_STATUS:
write_gc0_status(v);
break;
case KVM_REG_MIPS_CP0_INTCTL:
write_gc0_intctl(v);
break;
case KVM_REG_MIPS_CP0_CAUSE:
/*
* If the timer is stopped or started (DC bit) it must look
* atomic with changes to the timer interrupt pending bit (TI).
* A timer interrupt should not happen in between.
*/
if ((read_gc0_cause() ^ v) & CAUSEF_DC) {
if (v & CAUSEF_DC) {
/* disable timer first */
kvm_mips_count_disable_cause(vcpu);
change_gc0_cause((u32)~CAUSEF_DC, v);
} else {
/* enable timer last */
change_gc0_cause((u32)~CAUSEF_DC, v);
kvm_mips_count_enable_cause(vcpu);
}
} else {
write_gc0_cause(v);
}
break;
case KVM_REG_MIPS_CP0_EPC:
write_gc0_epc(v);
break;
case KVM_REG_MIPS_CP0_PRID:
switch (boot_cpu_type()) {
case CPU_CAVIUM_OCTEON3:
/* Octeon III has a guest.PRid, but its read-only */
break;
default:
kvm_write_c0_guest_prid(cop0, v);
break;
}
break;
case KVM_REG_MIPS_CP0_EBASE:
kvm_vz_write_gc0_ebase(v);
break;
case KVM_REG_MIPS_CP0_CONFIG:
cur = read_gc0_config();
change = (cur ^ v) & kvm_vz_config_user_wrmask(vcpu);
if (change) {
v = cur ^ change;
write_gc0_config(v);
}
break;
case KVM_REG_MIPS_CP0_CONFIG1:
if (!cpu_guest_has_conf1)
break;
cur = read_gc0_config1();
change = (cur ^ v) & kvm_vz_config1_user_wrmask(vcpu);
if (change) {
v = cur ^ change;
write_gc0_config1(v);
}
break;
case KVM_REG_MIPS_CP0_CONFIG2:
if (!cpu_guest_has_conf2)
break;
cur = read_gc0_config2();
change = (cur ^ v) & kvm_vz_config2_user_wrmask(vcpu);
if (change) {
v = cur ^ change;
write_gc0_config2(v);
}
break;
case KVM_REG_MIPS_CP0_CONFIG3:
if (!cpu_guest_has_conf3)
break;
cur = read_gc0_config3();
change = (cur ^ v) & kvm_vz_config3_user_wrmask(vcpu);
if (change) {
v = cur ^ change;
write_gc0_config3(v);
}
break;
case KVM_REG_MIPS_CP0_CONFIG4:
if (!cpu_guest_has_conf4)
break;
cur = read_gc0_config4();
change = (cur ^ v) & kvm_vz_config4_user_wrmask(vcpu);
if (change) {
v = cur ^ change;
write_gc0_config4(v);
}
break;
case KVM_REG_MIPS_CP0_CONFIG5:
if (!cpu_guest_has_conf5)
break;
cur = read_gc0_config5();
change = (cur ^ v) & kvm_vz_config5_user_wrmask(vcpu);
if (change) {
v = cur ^ change;
write_gc0_config5(v);
}
break;
case KVM_REG_MIPS_CP0_CONFIG6:
cur = kvm_read_sw_gc0_config6(cop0);
change = (cur ^ v) & kvm_vz_config6_user_wrmask(vcpu);
if (change) {
v = cur ^ change;
kvm_write_sw_gc0_config6(cop0, (int)v);
}
break;
case KVM_REG_MIPS_CP0_MAAR(0) ... KVM_REG_MIPS_CP0_MAAR(0x3f):
if (!cpu_guest_has_maar || cpu_guest_has_dyn_maar)
return -EINVAL;
idx = reg->id - KVM_REG_MIPS_CP0_MAAR(0);
if (idx >= ARRAY_SIZE(vcpu->arch.maar))
return -EINVAL;
vcpu->arch.maar[idx] = mips_process_maar(dmtc_op, v);
break;
case KVM_REG_MIPS_CP0_MAARI:
if (!cpu_guest_has_maar || cpu_guest_has_dyn_maar)
return -EINVAL;
kvm_write_maari(vcpu, v);
break;
#ifdef CONFIG_64BIT
case KVM_REG_MIPS_CP0_XCONTEXT:
write_gc0_xcontext(v);
break;
#endif
case KVM_REG_MIPS_CP0_ERROREPC:
write_gc0_errorepc(v);
break;
case KVM_REG_MIPS_CP0_KSCRATCH1 ... KVM_REG_MIPS_CP0_KSCRATCH6:
idx = reg->id - KVM_REG_MIPS_CP0_KSCRATCH1 + 2;
if (!cpu_guest_has_kscr(idx))
return -EINVAL;
switch (idx) {
case 2:
write_gc0_kscratch1(v);
break;
case 3:
write_gc0_kscratch2(v);
break;
case 4:
write_gc0_kscratch3(v);
break;
case 5:
write_gc0_kscratch4(v);
break;
case 6:
write_gc0_kscratch5(v);
break;
case 7:
write_gc0_kscratch6(v);
break;
}
break;
case KVM_REG_MIPS_COUNT_CTL:
ret = kvm_mips_set_count_ctl(vcpu, v);
break;
case KVM_REG_MIPS_COUNT_RESUME:
ret = kvm_mips_set_count_resume(vcpu, v);
break;
case KVM_REG_MIPS_COUNT_HZ:
ret = kvm_mips_set_count_hz(vcpu, v);
break;
default:
return -EINVAL;
}
return ret;
}
#define guestid_cache(cpu) (cpu_data[cpu].guestid_cache)
static void kvm_vz_get_new_guestid(unsigned long cpu, struct kvm_vcpu *vcpu)
{
unsigned long guestid = guestid_cache(cpu);
if (!(++guestid & GUESTID_MASK)) {
if (cpu_has_vtag_icache)
flush_icache_all();
if (!guestid) /* fix version if needed */
guestid = GUESTID_FIRST_VERSION;
++guestid; /* guestid 0 reserved for root */
/* start new guestid cycle */
kvm_vz_local_flush_roottlb_all_guests();
kvm_vz_local_flush_guesttlb_all();
}
guestid_cache(cpu) = guestid;
}
/* Returns 1 if the guest TLB may be clobbered */
static int kvm_vz_check_requests(struct kvm_vcpu *vcpu, int cpu)
{
int ret = 0;
int i;
if (!kvm_request_pending(vcpu))
return 0;
if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) {
if (cpu_has_guestid) {
/* Drop all GuestIDs for this VCPU */
for_each_possible_cpu(i)
vcpu->arch.vzguestid[i] = 0;
/* This will clobber guest TLB contents too */
ret = 1;
}
/*
* For Root ASID Dealias (RAD) we don't do anything here, but we
* still need the request to ensure we recheck asid_flush_mask.
* We can still return 0 as only the root TLB will be affected
* by a root ASID flush.
*/
}
return ret;
}
static void kvm_vz_vcpu_save_wired(struct kvm_vcpu *vcpu)
{
unsigned int wired = read_gc0_wired();
struct kvm_mips_tlb *tlbs;
int i;
/* Expand the wired TLB array if necessary */
wired &= MIPSR6_WIRED_WIRED;
if (wired > vcpu->arch.wired_tlb_limit) {
tlbs = krealloc(vcpu->arch.wired_tlb, wired *
sizeof(*vcpu->arch.wired_tlb), GFP_ATOMIC);
if (WARN_ON(!tlbs)) {
/* Save whatever we can */
wired = vcpu->arch.wired_tlb_limit;
} else {
vcpu->arch.wired_tlb = tlbs;
vcpu->arch.wired_tlb_limit = wired;
}
}
if (wired)
/* Save wired entries from the guest TLB */
kvm_vz_save_guesttlb(vcpu->arch.wired_tlb, 0, wired);
/* Invalidate any dropped entries since last time */
for (i = wired; i < vcpu->arch.wired_tlb_used; ++i) {
vcpu->arch.wired_tlb[i].tlb_hi = UNIQUE_GUEST_ENTRYHI(i);
vcpu->arch.wired_tlb[i].tlb_lo[0] = 0;
vcpu->arch.wired_tlb[i].tlb_lo[1] = 0;
vcpu->arch.wired_tlb[i].tlb_mask = 0;
}
vcpu->arch.wired_tlb_used = wired;
}
static void kvm_vz_vcpu_load_wired(struct kvm_vcpu *vcpu)
{
/* Load wired entries into the guest TLB */
if (vcpu->arch.wired_tlb)
kvm_vz_load_guesttlb(vcpu->arch.wired_tlb, 0,
vcpu->arch.wired_tlb_used);
}
static void kvm_vz_vcpu_load_tlb(struct kvm_vcpu *vcpu, int cpu)
{
struct kvm *kvm = vcpu->kvm;
struct mm_struct *gpa_mm = &kvm->arch.gpa_mm;
bool migrated;
/*
* Are we entering guest context on a different CPU to last time?
* If so, the VCPU's guest TLB state on this CPU may be stale.
*/
migrated = (vcpu->arch.last_exec_cpu != cpu);
vcpu->arch.last_exec_cpu = cpu;
/*
* A vcpu's GuestID is set in GuestCtl1.ID when the vcpu is loaded and
* remains set until another vcpu is loaded in. As a rule GuestRID
* remains zeroed when in root context unless the kernel is busy
* manipulating guest tlb entries.
*/
if (cpu_has_guestid) {
/*
* Check if our GuestID is of an older version and thus invalid.
*
* We also discard the stored GuestID if we've executed on
* another CPU, as the guest mappings may have changed without
* hypervisor knowledge.
*/
if (migrated ||
(vcpu->arch.vzguestid[cpu] ^ guestid_cache(cpu)) &
GUESTID_VERSION_MASK) {
kvm_vz_get_new_guestid(cpu, vcpu);
vcpu->arch.vzguestid[cpu] = guestid_cache(cpu);
trace_kvm_guestid_change(vcpu,
vcpu->arch.vzguestid[cpu]);
}
/* Restore GuestID */
change_c0_guestctl1(GUESTID_MASK, vcpu->arch.vzguestid[cpu]);
} else {
/*
* The Guest TLB only stores a single guest's TLB state, so
* flush it if another VCPU has executed on this CPU.
*
* We also flush if we've executed on another CPU, as the guest
* mappings may have changed without hypervisor knowledge.
*/
if (migrated || last_exec_vcpu[cpu] != vcpu)
kvm_vz_local_flush_guesttlb_all();
last_exec_vcpu[cpu] = vcpu;
/*
* Root ASID dealiases guest GPA mappings in the root TLB.
* Allocate new root ASID if needed.
*/
if (cpumask_test_and_clear_cpu(cpu, &kvm->arch.asid_flush_mask))
get_new_mmu_context(gpa_mm);
else
check_mmu_context(gpa_mm);
}
}
static int kvm_vz_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
{
struct mips_coproc *cop0 = &vcpu->arch.cop0;
bool migrated, all;
/*
* Have we migrated to a different CPU?
* If so, any old guest TLB state may be stale.
*/
migrated = (vcpu->arch.last_sched_cpu != cpu);
/*
* Was this the last VCPU to run on this CPU?
* If not, any old guest state from this VCPU will have been clobbered.
*/
all = migrated || (last_vcpu[cpu] != vcpu);
last_vcpu[cpu] = vcpu;
/*
* Restore CP0_Wired unconditionally as we clear it after use, and
* restore wired guest TLB entries (while in guest context).
*/
kvm_restore_gc0_wired(cop0);
if (current->flags & PF_VCPU) {
tlbw_use_hazard();
kvm_vz_vcpu_load_tlb(vcpu, cpu);
kvm_vz_vcpu_load_wired(vcpu);
}
/*
* Restore timer state regardless, as e.g. Cause.TI can change over time
* if left unmaintained.
*/
kvm_vz_restore_timer(vcpu);
/* Set MC bit if we want to trace guest mode changes */
if (kvm_trace_guest_mode_change)
set_c0_guestctl0(MIPS_GCTL0_MC);
else
clear_c0_guestctl0(MIPS_GCTL0_MC);
/* Don't bother restoring registers multiple times unless necessary */
if (!all)
return 0;
/*
* Restore config registers first, as some implementations restrict
* writes to other registers when the corresponding feature bits aren't
* set. For example Status.CU1 cannot be set unless Config1.FP is set.
*/
kvm_restore_gc0_config(cop0);
if (cpu_guest_has_conf1)
kvm_restore_gc0_config1(cop0);
if (cpu_guest_has_conf2)
kvm_restore_gc0_config2(cop0);
if (cpu_guest_has_conf3)
kvm_restore_gc0_config3(cop0);
if (cpu_guest_has_conf4)
kvm_restore_gc0_config4(cop0);
if (cpu_guest_has_conf5)
kvm_restore_gc0_config5(cop0);
if (cpu_guest_has_conf6)
kvm_restore_gc0_config6(cop0);
if (cpu_guest_has_conf7)
kvm_restore_gc0_config7(cop0);
kvm_restore_gc0_index(cop0);
kvm_restore_gc0_entrylo0(cop0);
kvm_restore_gc0_entrylo1(cop0);
kvm_restore_gc0_context(cop0);
if (cpu_guest_has_contextconfig)
kvm_restore_gc0_contextconfig(cop0);
#ifdef CONFIG_64BIT
kvm_restore_gc0_xcontext(cop0);
if (cpu_guest_has_contextconfig)
kvm_restore_gc0_xcontextconfig(cop0);
#endif
kvm_restore_gc0_pagemask(cop0);
kvm_restore_gc0_pagegrain(cop0);
kvm_restore_gc0_hwrena(cop0);
kvm_restore_gc0_badvaddr(cop0);
kvm_restore_gc0_entryhi(cop0);
kvm_restore_gc0_status(cop0);
kvm_restore_gc0_intctl(cop0);
kvm_restore_gc0_epc(cop0);
kvm_vz_write_gc0_ebase(kvm_read_sw_gc0_ebase(cop0));
if (cpu_guest_has_userlocal)
kvm_restore_gc0_userlocal(cop0);
kvm_restore_gc0_errorepc(cop0);
/* restore KScratch registers if enabled in guest */
if (cpu_guest_has_conf4) {
if (cpu_guest_has_kscr(2))
kvm_restore_gc0_kscratch1(cop0);
if (cpu_guest_has_kscr(3))
kvm_restore_gc0_kscratch2(cop0);
if (cpu_guest_has_kscr(4))
kvm_restore_gc0_kscratch3(cop0);
if (cpu_guest_has_kscr(5))
kvm_restore_gc0_kscratch4(cop0);
if (cpu_guest_has_kscr(6))
kvm_restore_gc0_kscratch5(cop0);
if (cpu_guest_has_kscr(7))
kvm_restore_gc0_kscratch6(cop0);
}
if (cpu_guest_has_badinstr)
kvm_restore_gc0_badinstr(cop0);
if (cpu_guest_has_badinstrp)
kvm_restore_gc0_badinstrp(cop0);
if (cpu_guest_has_segments) {
kvm_restore_gc0_segctl0(cop0);
kvm_restore_gc0_segctl1(cop0);
kvm_restore_gc0_segctl2(cop0);
}
/* restore HTW registers */
if (cpu_guest_has_htw || cpu_guest_has_ldpte) {
kvm_restore_gc0_pwbase(cop0);
kvm_restore_gc0_pwfield(cop0);
kvm_restore_gc0_pwsize(cop0);
kvm_restore_gc0_pwctl(cop0);
}
/* restore Root.GuestCtl2 from unused Guest guestctl2 register */
if (cpu_has_guestctl2)
write_c0_guestctl2(
cop0->reg[MIPS_CP0_GUESTCTL2][MIPS_CP0_GUESTCTL2_SEL]);
/*
* We should clear linked load bit to break interrupted atomics. This
* prevents a SC on the next VCPU from succeeding by matching a LL on
* the previous VCPU.
*/
if (vcpu->kvm->created_vcpus > 1)
write_gc0_lladdr(0);
return 0;
}
static int kvm_vz_vcpu_put(struct kvm_vcpu *vcpu, int cpu)
{
struct mips_coproc *cop0 = &vcpu->arch.cop0;
if (current->flags & PF_VCPU)
kvm_vz_vcpu_save_wired(vcpu);
kvm_lose_fpu(vcpu);
kvm_save_gc0_index(cop0);
kvm_save_gc0_entrylo0(cop0);
kvm_save_gc0_entrylo1(cop0);
kvm_save_gc0_context(cop0);
if (cpu_guest_has_contextconfig)
kvm_save_gc0_contextconfig(cop0);
#ifdef CONFIG_64BIT
kvm_save_gc0_xcontext(cop0);
if (cpu_guest_has_contextconfig)
kvm_save_gc0_xcontextconfig(cop0);
#endif
kvm_save_gc0_pagemask(cop0);
kvm_save_gc0_pagegrain(cop0);
kvm_save_gc0_wired(cop0);
/* allow wired TLB entries to be overwritten */
clear_gc0_wired(MIPSR6_WIRED_WIRED);
kvm_save_gc0_hwrena(cop0);
kvm_save_gc0_badvaddr(cop0);
kvm_save_gc0_entryhi(cop0);
kvm_save_gc0_status(cop0);
kvm_save_gc0_intctl(cop0);
kvm_save_gc0_epc(cop0);
kvm_write_sw_gc0_ebase(cop0, kvm_vz_read_gc0_ebase());
if (cpu_guest_has_userlocal)
kvm_save_gc0_userlocal(cop0);
/* only save implemented config registers */
kvm_save_gc0_config(cop0);
if (cpu_guest_has_conf1)
kvm_save_gc0_config1(cop0);
if (cpu_guest_has_conf2)
kvm_save_gc0_config2(cop0);
if (cpu_guest_has_conf3)
kvm_save_gc0_config3(cop0);
if (cpu_guest_has_conf4)
kvm_save_gc0_config4(cop0);
if (cpu_guest_has_conf5)
kvm_save_gc0_config5(cop0);
if (cpu_guest_has_conf6)
kvm_save_gc0_config6(cop0);
if (cpu_guest_has_conf7)
kvm_save_gc0_config7(cop0);
kvm_save_gc0_errorepc(cop0);
/* save KScratch registers if enabled in guest */
if (cpu_guest_has_conf4) {
if (cpu_guest_has_kscr(2))
kvm_save_gc0_kscratch1(cop0);
if (cpu_guest_has_kscr(3))
kvm_save_gc0_kscratch2(cop0);
if (cpu_guest_has_kscr(4))
kvm_save_gc0_kscratch3(cop0);
if (cpu_guest_has_kscr(5))
kvm_save_gc0_kscratch4(cop0);
if (cpu_guest_has_kscr(6))
kvm_save_gc0_kscratch5(cop0);
if (cpu_guest_has_kscr(7))
kvm_save_gc0_kscratch6(cop0);
}
if (cpu_guest_has_badinstr)
kvm_save_gc0_badinstr(cop0);
if (cpu_guest_has_badinstrp)
kvm_save_gc0_badinstrp(cop0);
if (cpu_guest_has_segments) {
kvm_save_gc0_segctl0(cop0);
kvm_save_gc0_segctl1(cop0);
kvm_save_gc0_segctl2(cop0);
}
/* save HTW registers if enabled in guest */
if (cpu_guest_has_ldpte || (cpu_guest_has_htw &&
kvm_read_sw_gc0_config3(cop0) & MIPS_CONF3_PW)) {
kvm_save_gc0_pwbase(cop0);
kvm_save_gc0_pwfield(cop0);
kvm_save_gc0_pwsize(cop0);
kvm_save_gc0_pwctl(cop0);
}
kvm_vz_save_timer(vcpu);
/* save Root.GuestCtl2 in unused Guest guestctl2 register */
if (cpu_has_guestctl2)
cop0->reg[MIPS_CP0_GUESTCTL2][MIPS_CP0_GUESTCTL2_SEL] =
read_c0_guestctl2();
return 0;
}
/**
* kvm_vz_resize_guest_vtlb() - Attempt to resize guest VTLB.
* @size: Number of guest VTLB entries (0 < @size <= root VTLB entries).
*
* Attempt to resize the guest VTLB by writing guest Config registers. This is
* necessary for cores with a shared root/guest TLB to avoid overlap with wired
* entries in the root VTLB.
*
* Returns: The resulting guest VTLB size.
*/
static unsigned int kvm_vz_resize_guest_vtlb(unsigned int size)
{
unsigned int config4 = 0, ret = 0, limit;
/* Write MMUSize - 1 into guest Config registers */
if (cpu_guest_has_conf1)
change_gc0_config1(MIPS_CONF1_TLBS,
(size - 1) << MIPS_CONF1_TLBS_SHIFT);
if (cpu_guest_has_conf4) {
config4 = read_gc0_config4();
if (cpu_has_mips_r6 || (config4 & MIPS_CONF4_MMUEXTDEF) ==
MIPS_CONF4_MMUEXTDEF_VTLBSIZEEXT) {
config4 &= ~MIPS_CONF4_VTLBSIZEEXT;
config4 |= ((size - 1) >> MIPS_CONF1_TLBS_SIZE) <<
MIPS_CONF4_VTLBSIZEEXT_SHIFT;
} else if ((config4 & MIPS_CONF4_MMUEXTDEF) ==
MIPS_CONF4_MMUEXTDEF_MMUSIZEEXT) {
config4 &= ~MIPS_CONF4_MMUSIZEEXT;
config4 |= ((size - 1) >> MIPS_CONF1_TLBS_SIZE) <<
MIPS_CONF4_MMUSIZEEXT_SHIFT;
}
write_gc0_config4(config4);
}
/*
* Set Guest.Wired.Limit = 0 (no limit up to Guest.MMUSize-1), unless it
* would exceed Root.Wired.Limit (clearing Guest.Wired.Wired so write
* not dropped)
*/
if (cpu_has_mips_r6) {
limit = (read_c0_wired() & MIPSR6_WIRED_LIMIT) >>
MIPSR6_WIRED_LIMIT_SHIFT;
if (size - 1 <= limit)
limit = 0;
write_gc0_wired(limit << MIPSR6_WIRED_LIMIT_SHIFT);
}
/* Read back MMUSize - 1 */
back_to_back_c0_hazard();
if (cpu_guest_has_conf1)
ret = (read_gc0_config1() & MIPS_CONF1_TLBS) >>
MIPS_CONF1_TLBS_SHIFT;
if (config4) {
if (cpu_has_mips_r6 || (config4 & MIPS_CONF4_MMUEXTDEF) ==
MIPS_CONF4_MMUEXTDEF_VTLBSIZEEXT)
ret |= ((config4 & MIPS_CONF4_VTLBSIZEEXT) >>
MIPS_CONF4_VTLBSIZEEXT_SHIFT) <<
MIPS_CONF1_TLBS_SIZE;
else if ((config4 & MIPS_CONF4_MMUEXTDEF) ==
MIPS_CONF4_MMUEXTDEF_MMUSIZEEXT)
ret |= ((config4 & MIPS_CONF4_MMUSIZEEXT) >>
MIPS_CONF4_MMUSIZEEXT_SHIFT) <<
MIPS_CONF1_TLBS_SIZE;
}
return ret + 1;
}
static int kvm_vz_hardware_enable(void)
{
unsigned int mmu_size, guest_mmu_size, ftlb_size;
u64 guest_cvmctl, cvmvmconfig;
switch (current_cpu_type()) {
case CPU_CAVIUM_OCTEON3:
/* Set up guest timer/perfcount IRQ lines */
guest_cvmctl = read_gc0_cvmctl();
guest_cvmctl &= ~CVMCTL_IPTI;
guest_cvmctl |= 7ull << CVMCTL_IPTI_SHIFT;
guest_cvmctl &= ~CVMCTL_IPPCI;
guest_cvmctl |= 6ull << CVMCTL_IPPCI_SHIFT;
write_gc0_cvmctl(guest_cvmctl);
cvmvmconfig = read_c0_cvmvmconfig();
/* No I/O hole translation. */
cvmvmconfig |= CVMVMCONF_DGHT;
/* Halve the root MMU size */
mmu_size = ((cvmvmconfig & CVMVMCONF_MMUSIZEM1)
>> CVMVMCONF_MMUSIZEM1_S) + 1;
guest_mmu_size = mmu_size / 2;
mmu_size -= guest_mmu_size;
cvmvmconfig &= ~CVMVMCONF_RMMUSIZEM1;
cvmvmconfig |= mmu_size - 1;
write_c0_cvmvmconfig(cvmvmconfig);
/* Update our records */
current_cpu_data.tlbsize = mmu_size;
current_cpu_data.tlbsizevtlb = mmu_size;
current_cpu_data.guest.tlbsize = guest_mmu_size;
/* Flush moved entries in new (guest) context */
kvm_vz_local_flush_guesttlb_all();
break;
default:
/*
* ImgTec cores tend to use a shared root/guest TLB. To avoid
* overlap of root wired and guest entries, the guest TLB may
* need resizing.
*/
mmu_size = current_cpu_data.tlbsizevtlb;
ftlb_size = current_cpu_data.tlbsize - mmu_size;
/* Try switching to maximum guest VTLB size for flush */
guest_mmu_size = kvm_vz_resize_guest_vtlb(mmu_size);
current_cpu_data.guest.tlbsize = guest_mmu_size + ftlb_size;
kvm_vz_local_flush_guesttlb_all();
/*
* Reduce to make space for root wired entries and at least 2
* root non-wired entries. This does assume that long-term wired
* entries won't be added later.
*/
guest_mmu_size = mmu_size - num_wired_entries() - 2;
guest_mmu_size = kvm_vz_resize_guest_vtlb(guest_mmu_size);
current_cpu_data.guest.tlbsize = guest_mmu_size + ftlb_size;
/*
* Write the VTLB size, but if another CPU has already written,
* check it matches or we won't provide a consistent view to the
* guest. If this ever happens it suggests an asymmetric number
* of wired entries.
*/
if (cmpxchg(&kvm_vz_guest_vtlb_size, 0, guest_mmu_size) &&
WARN(guest_mmu_size != kvm_vz_guest_vtlb_size,
"Available guest VTLB size mismatch"))
return -EINVAL;
break;
}
/*
* Enable virtualization features granting guest direct control of
* certain features:
* CP0=1: Guest coprocessor 0 context.
* AT=Guest: Guest MMU.
* CG=1: Hit (virtual address) CACHE operations (optional).
* CF=1: Guest Config registers.
* CGI=1: Indexed flush CACHE operations (optional).
*/
write_c0_guestctl0(MIPS_GCTL0_CP0 |
(MIPS_GCTL0_AT_GUEST << MIPS_GCTL0_AT_SHIFT) |
MIPS_GCTL0_CG | MIPS_GCTL0_CF);
if (cpu_has_guestctl0ext) {
if (current_cpu_type() != CPU_LOONGSON64)
set_c0_guestctl0ext(MIPS_GCTL0EXT_CGI);
else
clear_c0_guestctl0ext(MIPS_GCTL0EXT_CGI);
}
if (cpu_has_guestid) {
write_c0_guestctl1(0);
kvm_vz_local_flush_roottlb_all_guests();
GUESTID_MASK = current_cpu_data.guestid_mask;
GUESTID_FIRST_VERSION = GUESTID_MASK + 1;
GUESTID_VERSION_MASK = ~GUESTID_MASK;
current_cpu_data.guestid_cache = GUESTID_FIRST_VERSION;
}
/* clear any pending injected virtual guest interrupts */
if (cpu_has_guestctl2)
clear_c0_guestctl2(0x3f << 10);
#ifdef CONFIG_CPU_LOONGSON64
/* Control guest CCA attribute */
if (cpu_has_csr())
csr_writel(csr_readl(0xffffffec) | 0x1, 0xffffffec);
#endif
return 0;
}
static void kvm_vz_hardware_disable(void)
{
u64 cvmvmconfig;
unsigned int mmu_size;
/* Flush any remaining guest TLB entries */
kvm_vz_local_flush_guesttlb_all();
switch (current_cpu_type()) {
case CPU_CAVIUM_OCTEON3:
/*
* Allocate whole TLB for root. Existing guest TLB entries will
* change ownership to the root TLB. We should be safe though as
* they've already been flushed above while in guest TLB.
*/
cvmvmconfig = read_c0_cvmvmconfig();
mmu_size = ((cvmvmconfig & CVMVMCONF_MMUSIZEM1)
>> CVMVMCONF_MMUSIZEM1_S) + 1;
cvmvmconfig &= ~CVMVMCONF_RMMUSIZEM1;
cvmvmconfig |= mmu_size - 1;
write_c0_cvmvmconfig(cvmvmconfig);
/* Update our records */
current_cpu_data.tlbsize = mmu_size;
current_cpu_data.tlbsizevtlb = mmu_size;
current_cpu_data.guest.tlbsize = 0;
/* Flush moved entries in new (root) context */
local_flush_tlb_all();
break;
}
if (cpu_has_guestid) {
write_c0_guestctl1(0);
kvm_vz_local_flush_roottlb_all_guests();
}
}
static int kvm_vz_check_extension(struct kvm *kvm, long ext)
{
int r;
switch (ext) {
case KVM_CAP_MIPS_VZ:
/* we wouldn't be here unless cpu_has_vz */
r = 1;
break;
#ifdef CONFIG_64BIT
case KVM_CAP_MIPS_64BIT:
/* We support 64-bit registers/operations and addresses */
r = 2;
break;
#endif
case KVM_CAP_IOEVENTFD:
r = 1;
break;
default:
r = 0;
break;
}
return r;
}
static int kvm_vz_vcpu_init(struct kvm_vcpu *vcpu)
{
int i;
for_each_possible_cpu(i)
vcpu->arch.vzguestid[i] = 0;
return 0;
}
static void kvm_vz_vcpu_uninit(struct kvm_vcpu *vcpu)
{
int cpu;
/*
* If the VCPU is freed and reused as another VCPU, we don't want the
* matching pointer wrongly hanging around in last_vcpu[] or
* last_exec_vcpu[].
*/
for_each_possible_cpu(cpu) {
if (last_vcpu[cpu] == vcpu)
last_vcpu[cpu] = NULL;
if (last_exec_vcpu[cpu] == vcpu)
last_exec_vcpu[cpu] = NULL;
}
}
static int kvm_vz_vcpu_setup(struct kvm_vcpu *vcpu)
{
struct mips_coproc *cop0 = &vcpu->arch.cop0;
unsigned long count_hz = 100*1000*1000; /* default to 100 MHz */
/*
* Start off the timer at the same frequency as the host timer, but the
* soft timer doesn't handle frequencies greater than 1GHz yet.
*/
if (mips_hpt_frequency && mips_hpt_frequency <= NSEC_PER_SEC)
count_hz = mips_hpt_frequency;
kvm_mips_init_count(vcpu, count_hz);
/*
* Initialize guest register state to valid architectural reset state.
*/
/* PageGrain */
if (cpu_has_mips_r5 || cpu_has_mips_r6)
kvm_write_sw_gc0_pagegrain(cop0, PG_RIE | PG_XIE | PG_IEC);
/* Wired */
if (cpu_has_mips_r6)
kvm_write_sw_gc0_wired(cop0,
read_gc0_wired() & MIPSR6_WIRED_LIMIT);
/* Status */
kvm_write_sw_gc0_status(cop0, ST0_BEV | ST0_ERL);
if (cpu_has_mips_r5 || cpu_has_mips_r6)
kvm_change_sw_gc0_status(cop0, ST0_FR, read_gc0_status());
/* IntCtl */
kvm_write_sw_gc0_intctl(cop0, read_gc0_intctl() &
(INTCTLF_IPFDC | INTCTLF_IPPCI | INTCTLF_IPTI));
/* PRId */
kvm_write_sw_gc0_prid(cop0, boot_cpu_data.processor_id);
/* EBase */
kvm_write_sw_gc0_ebase(cop0, (s32)0x80000000 | vcpu->vcpu_id);
/* Config */
kvm_save_gc0_config(cop0);
/* architecturally writable (e.g. from guest) */
kvm_change_sw_gc0_config(cop0, CONF_CM_CMASK,
_page_cachable_default >> _CACHE_SHIFT);
/* architecturally read only, but maybe writable from root */
kvm_change_sw_gc0_config(cop0, MIPS_CONF_MT, read_c0_config());
if (cpu_guest_has_conf1) {
kvm_set_sw_gc0_config(cop0, MIPS_CONF_M);
/* Config1 */
kvm_save_gc0_config1(cop0);
/* architecturally read only, but maybe writable from root */
kvm_clear_sw_gc0_config1(cop0, MIPS_CONF1_C2 |
MIPS_CONF1_MD |
MIPS_CONF1_PC |
MIPS_CONF1_WR |
MIPS_CONF1_CA |
MIPS_CONF1_FP);
}
if (cpu_guest_has_conf2) {
kvm_set_sw_gc0_config1(cop0, MIPS_CONF_M);
/* Config2 */
kvm_save_gc0_config2(cop0);
}
if (cpu_guest_has_conf3) {
kvm_set_sw_gc0_config2(cop0, MIPS_CONF_M);
/* Config3 */
kvm_save_gc0_config3(cop0);
/* architecturally writable (e.g. from guest) */
kvm_clear_sw_gc0_config3(cop0, MIPS_CONF3_ISA_OE);
/* architecturally read only, but maybe writable from root */
kvm_clear_sw_gc0_config3(cop0, MIPS_CONF3_MSA |
MIPS_CONF3_BPG |
MIPS_CONF3_ULRI |
MIPS_CONF3_DSP |
MIPS_CONF3_CTXTC |
MIPS_CONF3_ITL |
MIPS_CONF3_LPA |
MIPS_CONF3_VEIC |
MIPS_CONF3_VINT |
MIPS_CONF3_SP |
MIPS_CONF3_CDMM |
MIPS_CONF3_MT |
MIPS_CONF3_SM |
MIPS_CONF3_TL);
}
if (cpu_guest_has_conf4) {
kvm_set_sw_gc0_config3(cop0, MIPS_CONF_M);
/* Config4 */
kvm_save_gc0_config4(cop0);
}
if (cpu_guest_has_conf5) {
kvm_set_sw_gc0_config4(cop0, MIPS_CONF_M);
/* Config5 */
kvm_save_gc0_config5(cop0);
/* architecturally writable (e.g. from guest) */
kvm_clear_sw_gc0_config5(cop0, MIPS_CONF5_K |
MIPS_CONF5_CV |
MIPS_CONF5_MSAEN |
MIPS_CONF5_UFE |
MIPS_CONF5_FRE |
MIPS_CONF5_SBRI |
MIPS_CONF5_UFR);
/* architecturally read only, but maybe writable from root */
kvm_clear_sw_gc0_config5(cop0, MIPS_CONF5_MRP);
}
if (cpu_guest_has_contextconfig) {
/* ContextConfig */
kvm_write_sw_gc0_contextconfig(cop0, 0x007ffff0);
#ifdef CONFIG_64BIT
/* XContextConfig */
/* bits SEGBITS-13+3:4 set */
kvm_write_sw_gc0_xcontextconfig(cop0,
((1ull << (cpu_vmbits - 13)) - 1) << 4);
#endif
}
/* Implementation dependent, use the legacy layout */
if (cpu_guest_has_segments) {
/* SegCtl0, SegCtl1, SegCtl2 */
kvm_write_sw_gc0_segctl0(cop0, 0x00200010);
kvm_write_sw_gc0_segctl1(cop0, 0x00000002 |
(_page_cachable_default >> _CACHE_SHIFT) <<
(16 + MIPS_SEGCFG_C_SHIFT));
kvm_write_sw_gc0_segctl2(cop0, 0x00380438);
}
/* reset HTW registers */
if (cpu_guest_has_htw && (cpu_has_mips_r5 || cpu_has_mips_r6)) {
/* PWField */
kvm_write_sw_gc0_pwfield(cop0, 0x0c30c302);
/* PWSize */
kvm_write_sw_gc0_pwsize(cop0, 1 << MIPS_PWSIZE_PTW_SHIFT);
}
/* start with no pending virtual guest interrupts */
if (cpu_has_guestctl2)
cop0->reg[MIPS_CP0_GUESTCTL2][MIPS_CP0_GUESTCTL2_SEL] = 0;
/* Put PC at reset vector */
vcpu->arch.pc = CKSEG1ADDR(0x1fc00000);
return 0;
}
static void kvm_vz_prepare_flush_shadow(struct kvm *kvm)
{
if (!cpu_has_guestid) {
/*
* For each CPU there is a single GPA ASID used by all VCPUs in
* the VM, so it doesn't make sense for the VCPUs to handle
* invalidation of these ASIDs individually.
*
* Instead mark all CPUs as needing ASID invalidation in
* asid_flush_mask, and kvm_flush_remote_tlbs(kvm) will
* kick any running VCPUs so they check asid_flush_mask.
*/
cpumask_setall(&kvm->arch.asid_flush_mask);
}
}
static void kvm_vz_vcpu_reenter(struct kvm_vcpu *vcpu)
{
int cpu = smp_processor_id();
int preserve_guest_tlb;
preserve_guest_tlb = kvm_vz_check_requests(vcpu, cpu);
if (preserve_guest_tlb)
kvm_vz_vcpu_save_wired(vcpu);
kvm_vz_vcpu_load_tlb(vcpu, cpu);
if (preserve_guest_tlb)
kvm_vz_vcpu_load_wired(vcpu);
}
static int kvm_vz_vcpu_run(struct kvm_vcpu *vcpu)
{
int cpu = smp_processor_id();
int r;
kvm_vz_acquire_htimer(vcpu);
/* Check if we have any exceptions/interrupts pending */
kvm_mips_deliver_interrupts(vcpu, read_gc0_cause());
kvm_vz_check_requests(vcpu, cpu);
kvm_vz_vcpu_load_tlb(vcpu, cpu);
kvm_vz_vcpu_load_wired(vcpu);
r = vcpu->arch.vcpu_run(vcpu);
kvm_vz_vcpu_save_wired(vcpu);
return r;
}
static struct kvm_mips_callbacks kvm_vz_callbacks = {
.handle_cop_unusable = kvm_trap_vz_handle_cop_unusable,
.handle_tlb_mod = kvm_trap_vz_handle_tlb_st_miss,
.handle_tlb_ld_miss = kvm_trap_vz_handle_tlb_ld_miss,
.handle_tlb_st_miss = kvm_trap_vz_handle_tlb_st_miss,
.handle_addr_err_st = kvm_trap_vz_no_handler,
.handle_addr_err_ld = kvm_trap_vz_no_handler,
.handle_syscall = kvm_trap_vz_no_handler,
.handle_res_inst = kvm_trap_vz_no_handler,
.handle_break = kvm_trap_vz_no_handler,
.handle_msa_disabled = kvm_trap_vz_handle_msa_disabled,
.handle_guest_exit = kvm_trap_vz_handle_guest_exit,
.hardware_enable = kvm_vz_hardware_enable,
.hardware_disable = kvm_vz_hardware_disable,
.check_extension = kvm_vz_check_extension,
.vcpu_init = kvm_vz_vcpu_init,
.vcpu_uninit = kvm_vz_vcpu_uninit,
.vcpu_setup = kvm_vz_vcpu_setup,
.prepare_flush_shadow = kvm_vz_prepare_flush_shadow,
.gva_to_gpa = kvm_vz_gva_to_gpa_cb,
.queue_timer_int = kvm_vz_queue_timer_int_cb,
.dequeue_timer_int = kvm_vz_dequeue_timer_int_cb,
.queue_io_int = kvm_vz_queue_io_int_cb,
.dequeue_io_int = kvm_vz_dequeue_io_int_cb,
.irq_deliver = kvm_vz_irq_deliver_cb,
.irq_clear = kvm_vz_irq_clear_cb,
.num_regs = kvm_vz_num_regs,
.copy_reg_indices = kvm_vz_copy_reg_indices,
.get_one_reg = kvm_vz_get_one_reg,
.set_one_reg = kvm_vz_set_one_reg,
.vcpu_load = kvm_vz_vcpu_load,
.vcpu_put = kvm_vz_vcpu_put,
.vcpu_run = kvm_vz_vcpu_run,
.vcpu_reenter = kvm_vz_vcpu_reenter,
};
/* FIXME: Get rid of the callbacks now that trap-and-emulate is gone. */
const struct kvm_mips_callbacks * const kvm_mips_callbacks = &kvm_vz_callbacks;
int kvm_mips_emulation_init(void)
{
if (!cpu_has_vz)
return -ENODEV;
/*
* VZ requires at least 2 KScratch registers, so it should have been
* possible to allocate pgd_reg.
*/
if (WARN(pgd_reg == -1,
"pgd_reg not allocated even though cpu_has_vz\n"))
return -ENODEV;
pr_info("Starting KVM with MIPS VZ extensions\n");
return 0;
}
| linux-master | arch/mips/kvm/vz.c |
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* KVM/MIPS: Hypercall handling.
*
* Copyright (C) 2015 Imagination Technologies Ltd.
*/
#include <linux/kernel.h>
#include <linux/kvm_host.h>
#include <linux/kvm_para.h>
#define MAX_HYPCALL_ARGS 4
enum emulation_result kvm_mips_emul_hypcall(struct kvm_vcpu *vcpu,
union mips_instruction inst)
{
unsigned int code = (inst.co_format.code >> 5) & 0x3ff;
kvm_debug("[%#lx] HYPCALL %#03x\n", vcpu->arch.pc, code);
switch (code) {
case 0:
return EMULATE_HYPERCALL;
default:
return EMULATE_FAIL;
};
}
static int kvm_mips_hypercall(struct kvm_vcpu *vcpu, unsigned long num,
const unsigned long *args, unsigned long *hret)
{
/* Report unimplemented hypercall to guest */
*hret = -KVM_ENOSYS;
return RESUME_GUEST;
}
int kvm_mips_handle_hypcall(struct kvm_vcpu *vcpu)
{
unsigned long num, args[MAX_HYPCALL_ARGS];
/* read hypcall number and arguments */
num = vcpu->arch.gprs[2]; /* v0 */
args[0] = vcpu->arch.gprs[4]; /* a0 */
args[1] = vcpu->arch.gprs[5]; /* a1 */
args[2] = vcpu->arch.gprs[6]; /* a2 */
args[3] = vcpu->arch.gprs[7]; /* a3 */
return kvm_mips_hypercall(vcpu, num,
args, &vcpu->arch.gprs[2] /* v0 */);
}
| linux-master | arch/mips/kvm/hypcall.c |
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* KVM/MIPS TLB handling, this file is part of the Linux host kernel so that
* TLB handlers run from KSEG0
*
* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
* Authors: Sanjay Lal <[email protected]>
*/
#include <linux/sched.h>
#include <linux/smp.h>
#include <linux/mm.h>
#include <linux/delay.h>
#include <linux/export.h>
#include <linux/kvm_host.h>
#include <linux/srcu.h>
#include <asm/cpu.h>
#include <asm/bootinfo.h>
#include <asm/mipsregs.h>
#include <asm/mmu_context.h>
#include <asm/cacheflush.h>
#include <asm/tlb.h>
#include <asm/tlbdebug.h>
#undef CONFIG_MIPS_MT
#include <asm/r4kcache.h>
#define CONFIG_MIPS_MT
unsigned long GUESTID_MASK;
EXPORT_SYMBOL_GPL(GUESTID_MASK);
unsigned long GUESTID_FIRST_VERSION;
EXPORT_SYMBOL_GPL(GUESTID_FIRST_VERSION);
unsigned long GUESTID_VERSION_MASK;
EXPORT_SYMBOL_GPL(GUESTID_VERSION_MASK);
static u32 kvm_mips_get_root_asid(struct kvm_vcpu *vcpu)
{
struct mm_struct *gpa_mm = &vcpu->kvm->arch.gpa_mm;
if (cpu_has_guestid)
return 0;
else
return cpu_asid(smp_processor_id(), gpa_mm);
}
static int _kvm_mips_host_tlb_inv(unsigned long entryhi)
{
int idx;
write_c0_entryhi(entryhi);
mtc0_tlbw_hazard();
tlb_probe();
tlb_probe_hazard();
idx = read_c0_index();
BUG_ON(idx >= current_cpu_data.tlbsize);
if (idx >= 0) {
write_c0_entryhi(UNIQUE_ENTRYHI(idx));
write_c0_entrylo0(0);
write_c0_entrylo1(0);
mtc0_tlbw_hazard();
tlb_write_indexed();
tlbw_use_hazard();
}
return idx;
}
/* GuestID management */
/**
* clear_root_gid() - Set GuestCtl1.RID for normal root operation.
*/
static inline void clear_root_gid(void)
{
if (cpu_has_guestid) {
clear_c0_guestctl1(MIPS_GCTL1_RID);
mtc0_tlbw_hazard();
}
}
/**
* set_root_gid_to_guest_gid() - Set GuestCtl1.RID to match GuestCtl1.ID.
*
* Sets the root GuestID to match the current guest GuestID, for TLB operation
* on the GPA->RPA mappings in the root TLB.
*
* The caller must be sure to disable HTW while the root GID is set, and
* possibly longer if TLB registers are modified.
*/
static inline void set_root_gid_to_guest_gid(void)
{
unsigned int guestctl1;
if (cpu_has_guestid) {
back_to_back_c0_hazard();
guestctl1 = read_c0_guestctl1();
guestctl1 = (guestctl1 & ~MIPS_GCTL1_RID) |
((guestctl1 & MIPS_GCTL1_ID) >> MIPS_GCTL1_ID_SHIFT)
<< MIPS_GCTL1_RID_SHIFT;
write_c0_guestctl1(guestctl1);
mtc0_tlbw_hazard();
}
}
int kvm_vz_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long va)
{
int idx;
unsigned long flags, old_entryhi;
local_irq_save(flags);
htw_stop();
/* Set root GuestID for root probe and write of guest TLB entry */
set_root_gid_to_guest_gid();
old_entryhi = read_c0_entryhi();
idx = _kvm_mips_host_tlb_inv((va & VPN2_MASK) |
kvm_mips_get_root_asid(vcpu));
write_c0_entryhi(old_entryhi);
clear_root_gid();
mtc0_tlbw_hazard();
htw_start();
local_irq_restore(flags);
/*
* We don't want to get reserved instruction exceptions for missing tlb
* entries.
*/
if (cpu_has_vtag_icache)
flush_icache_all();
if (idx > 0)
kvm_debug("%s: Invalidated root entryhi %#lx @ idx %d\n",
__func__, (va & VPN2_MASK) |
kvm_mips_get_root_asid(vcpu), idx);
return 0;
}
EXPORT_SYMBOL_GPL(kvm_vz_host_tlb_inv);
/**
* kvm_vz_guest_tlb_lookup() - Lookup a guest VZ TLB mapping.
* @vcpu: KVM VCPU pointer.
* @gpa: Guest virtual address in a TLB mapped guest segment.
* @gpa: Pointer to output guest physical address it maps to.
*
* Converts a guest virtual address in a guest TLB mapped segment to a guest
* physical address, by probing the guest TLB.
*
* Returns: 0 if guest TLB mapping exists for @gva. *@gpa will have been
* written.
* -EFAULT if no guest TLB mapping exists for @gva. *@gpa may not
* have been written.
*/
int kvm_vz_guest_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long gva,
unsigned long *gpa)
{
unsigned long o_entryhi, o_entrylo[2], o_pagemask;
unsigned int o_index;
unsigned long entrylo[2], pagemask, pagemaskbit, pa;
unsigned long flags;
int index;
/* Probe the guest TLB for a mapping */
local_irq_save(flags);
/* Set root GuestID for root probe of guest TLB entry */
htw_stop();
set_root_gid_to_guest_gid();
o_entryhi = read_gc0_entryhi();
o_index = read_gc0_index();
write_gc0_entryhi((o_entryhi & 0x3ff) | (gva & ~0xfffl));
mtc0_tlbw_hazard();
guest_tlb_probe();
tlb_probe_hazard();
index = read_gc0_index();
if (index < 0) {
/* No match, fail */
write_gc0_entryhi(o_entryhi);
write_gc0_index(o_index);
clear_root_gid();
htw_start();
local_irq_restore(flags);
return -EFAULT;
}
/* Match! read the TLB entry */
o_entrylo[0] = read_gc0_entrylo0();
o_entrylo[1] = read_gc0_entrylo1();
o_pagemask = read_gc0_pagemask();
mtc0_tlbr_hazard();
guest_tlb_read();
tlb_read_hazard();
entrylo[0] = read_gc0_entrylo0();
entrylo[1] = read_gc0_entrylo1();
pagemask = ~read_gc0_pagemask() & ~0x1fffl;
write_gc0_entryhi(o_entryhi);
write_gc0_index(o_index);
write_gc0_entrylo0(o_entrylo[0]);
write_gc0_entrylo1(o_entrylo[1]);
write_gc0_pagemask(o_pagemask);
clear_root_gid();
htw_start();
local_irq_restore(flags);
/* Select one of the EntryLo values and interpret the GPA */
pagemaskbit = (pagemask ^ (pagemask & (pagemask - 1))) >> 1;
pa = entrylo[!!(gva & pagemaskbit)];
/*
* TLB entry may have become invalid since TLB probe if physical FTLB
* entries are shared between threads (e.g. I6400).
*/
if (!(pa & ENTRYLO_V))
return -EFAULT;
/*
* Note, this doesn't take guest MIPS32 XPA into account, where PFN is
* split with XI/RI in the middle.
*/
pa = (pa << 6) & ~0xfffl;
pa |= gva & ~(pagemask | pagemaskbit);
*gpa = pa;
return 0;
}
EXPORT_SYMBOL_GPL(kvm_vz_guest_tlb_lookup);
/**
* kvm_vz_local_flush_roottlb_all_guests() - Flush all root TLB entries for
* guests.
*
* Invalidate all entries in root tlb which are GPA mappings.
*/
void kvm_vz_local_flush_roottlb_all_guests(void)
{
unsigned long flags;
unsigned long old_entryhi, old_pagemask, old_guestctl1;
int entry;
if (WARN_ON(!cpu_has_guestid))
return;
local_irq_save(flags);
htw_stop();
/* TLBR may clobber EntryHi.ASID, PageMask, and GuestCtl1.RID */
old_entryhi = read_c0_entryhi();
old_pagemask = read_c0_pagemask();
old_guestctl1 = read_c0_guestctl1();
/*
* Invalidate guest entries in root TLB while leaving root entries
* intact when possible.
*/
for (entry = 0; entry < current_cpu_data.tlbsize; entry++) {
write_c0_index(entry);
mtc0_tlbw_hazard();
tlb_read();
tlb_read_hazard();
/* Don't invalidate non-guest (RVA) mappings in the root TLB */
if (!(read_c0_guestctl1() & MIPS_GCTL1_RID))
continue;
/* Make sure all entries differ. */
write_c0_entryhi(UNIQUE_ENTRYHI(entry));
write_c0_entrylo0(0);
write_c0_entrylo1(0);
write_c0_guestctl1(0);
mtc0_tlbw_hazard();
tlb_write_indexed();
}
write_c0_entryhi(old_entryhi);
write_c0_pagemask(old_pagemask);
write_c0_guestctl1(old_guestctl1);
tlbw_use_hazard();
htw_start();
local_irq_restore(flags);
}
EXPORT_SYMBOL_GPL(kvm_vz_local_flush_roottlb_all_guests);
/**
* kvm_vz_local_flush_guesttlb_all() - Flush all guest TLB entries.
*
* Invalidate all entries in guest tlb irrespective of guestid.
*/
void kvm_vz_local_flush_guesttlb_all(void)
{
unsigned long flags;
unsigned long old_index;
unsigned long old_entryhi;
unsigned long old_entrylo[2];
unsigned long old_pagemask;
int entry;
u64 cvmmemctl2 = 0;
local_irq_save(flags);
/* Preserve all clobbered guest registers */
old_index = read_gc0_index();
old_entryhi = read_gc0_entryhi();
old_entrylo[0] = read_gc0_entrylo0();
old_entrylo[1] = read_gc0_entrylo1();
old_pagemask = read_gc0_pagemask();
switch (current_cpu_type()) {
case CPU_CAVIUM_OCTEON3:
/* Inhibit machine check due to multiple matching TLB entries */
cvmmemctl2 = read_c0_cvmmemctl2();
cvmmemctl2 |= CVMMEMCTL2_INHIBITTS;
write_c0_cvmmemctl2(cvmmemctl2);
break;
}
/* Invalidate guest entries in guest TLB */
write_gc0_entrylo0(0);
write_gc0_entrylo1(0);
write_gc0_pagemask(0);
for (entry = 0; entry < current_cpu_data.guest.tlbsize; entry++) {
/* Make sure all entries differ. */
write_gc0_index(entry);
write_gc0_entryhi(UNIQUE_GUEST_ENTRYHI(entry));
mtc0_tlbw_hazard();
guest_tlb_write_indexed();
}
if (cvmmemctl2) {
cvmmemctl2 &= ~CVMMEMCTL2_INHIBITTS;
write_c0_cvmmemctl2(cvmmemctl2);
}
write_gc0_index(old_index);
write_gc0_entryhi(old_entryhi);
write_gc0_entrylo0(old_entrylo[0]);
write_gc0_entrylo1(old_entrylo[1]);
write_gc0_pagemask(old_pagemask);
tlbw_use_hazard();
local_irq_restore(flags);
}
EXPORT_SYMBOL_GPL(kvm_vz_local_flush_guesttlb_all);
/**
* kvm_vz_save_guesttlb() - Save a range of guest TLB entries.
* @buf: Buffer to write TLB entries into.
* @index: Start index.
* @count: Number of entries to save.
*
* Save a range of guest TLB entries. The caller must ensure interrupts are
* disabled.
*/
void kvm_vz_save_guesttlb(struct kvm_mips_tlb *buf, unsigned int index,
unsigned int count)
{
unsigned int end = index + count;
unsigned long old_entryhi, old_entrylo0, old_entrylo1, old_pagemask;
unsigned int guestctl1 = 0;
int old_index, i;
/* Save registers we're about to clobber */
old_index = read_gc0_index();
old_entryhi = read_gc0_entryhi();
old_entrylo0 = read_gc0_entrylo0();
old_entrylo1 = read_gc0_entrylo1();
old_pagemask = read_gc0_pagemask();
/* Set root GuestID for root probe */
htw_stop();
set_root_gid_to_guest_gid();
if (cpu_has_guestid)
guestctl1 = read_c0_guestctl1();
/* Read each entry from guest TLB */
for (i = index; i < end; ++i, ++buf) {
write_gc0_index(i);
mtc0_tlbr_hazard();
guest_tlb_read();
tlb_read_hazard();
if (cpu_has_guestid &&
(read_c0_guestctl1() ^ guestctl1) & MIPS_GCTL1_RID) {
/* Entry invalid or belongs to another guest */
buf->tlb_hi = UNIQUE_GUEST_ENTRYHI(i);
buf->tlb_lo[0] = 0;
buf->tlb_lo[1] = 0;
buf->tlb_mask = 0;
} else {
/* Entry belongs to the right guest */
buf->tlb_hi = read_gc0_entryhi();
buf->tlb_lo[0] = read_gc0_entrylo0();
buf->tlb_lo[1] = read_gc0_entrylo1();
buf->tlb_mask = read_gc0_pagemask();
}
}
/* Clear root GuestID again */
clear_root_gid();
htw_start();
/* Restore clobbered registers */
write_gc0_index(old_index);
write_gc0_entryhi(old_entryhi);
write_gc0_entrylo0(old_entrylo0);
write_gc0_entrylo1(old_entrylo1);
write_gc0_pagemask(old_pagemask);
tlbw_use_hazard();
}
EXPORT_SYMBOL_GPL(kvm_vz_save_guesttlb);
/**
* kvm_vz_load_guesttlb() - Save a range of guest TLB entries.
* @buf: Buffer to read TLB entries from.
* @index: Start index.
* @count: Number of entries to load.
*
* Load a range of guest TLB entries. The caller must ensure interrupts are
* disabled.
*/
void kvm_vz_load_guesttlb(const struct kvm_mips_tlb *buf, unsigned int index,
unsigned int count)
{
unsigned int end = index + count;
unsigned long old_entryhi, old_entrylo0, old_entrylo1, old_pagemask;
int old_index, i;
/* Save registers we're about to clobber */
old_index = read_gc0_index();
old_entryhi = read_gc0_entryhi();
old_entrylo0 = read_gc0_entrylo0();
old_entrylo1 = read_gc0_entrylo1();
old_pagemask = read_gc0_pagemask();
/* Set root GuestID for root probe */
htw_stop();
set_root_gid_to_guest_gid();
/* Write each entry to guest TLB */
for (i = index; i < end; ++i, ++buf) {
write_gc0_index(i);
write_gc0_entryhi(buf->tlb_hi);
write_gc0_entrylo0(buf->tlb_lo[0]);
write_gc0_entrylo1(buf->tlb_lo[1]);
write_gc0_pagemask(buf->tlb_mask);
mtc0_tlbw_hazard();
guest_tlb_write_indexed();
}
/* Clear root GuestID again */
clear_root_gid();
htw_start();
/* Restore clobbered registers */
write_gc0_index(old_index);
write_gc0_entryhi(old_entryhi);
write_gc0_entrylo0(old_entrylo0);
write_gc0_entrylo1(old_entrylo1);
write_gc0_pagemask(old_pagemask);
tlbw_use_hazard();
}
EXPORT_SYMBOL_GPL(kvm_vz_load_guesttlb);
#ifdef CONFIG_CPU_LOONGSON64
void kvm_loongson_clear_guest_vtlb(void)
{
int idx = read_gc0_index();
/* Set root GuestID for root probe and write of guest TLB entry */
set_root_gid_to_guest_gid();
write_gc0_index(0);
guest_tlbinvf();
write_gc0_index(idx);
clear_root_gid();
set_c0_diag(LOONGSON_DIAG_ITLB | LOONGSON_DIAG_DTLB);
}
EXPORT_SYMBOL_GPL(kvm_loongson_clear_guest_vtlb);
void kvm_loongson_clear_guest_ftlb(void)
{
int i;
int idx = read_gc0_index();
/* Set root GuestID for root probe and write of guest TLB entry */
set_root_gid_to_guest_gid();
for (i = current_cpu_data.tlbsizevtlb;
i < (current_cpu_data.tlbsizevtlb +
current_cpu_data.tlbsizeftlbsets);
i++) {
write_gc0_index(i);
guest_tlbinvf();
}
write_gc0_index(idx);
clear_root_gid();
set_c0_diag(LOONGSON_DIAG_ITLB | LOONGSON_DIAG_DTLB);
}
EXPORT_SYMBOL_GPL(kvm_loongson_clear_guest_ftlb);
#endif
| linux-master | arch/mips/kvm/tlb.c |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.