python_code
stringlengths 0
1.8M
| repo_name
stringclasses 7
values | file_path
stringlengths 5
99
|
---|---|---|
// SPDX-License-Identifier: GPL-2.0-or-later
/*
*
* Copyright (C) 2007 Lemote, Inc. & Institute of Computing Technology
* Author: Fuxin Zhang, [email protected]
* Copyright (C) 2009 Lemote, Inc.
* Author: Zhangjin Wu, [email protected]
*/
#include <linux/init.h>
#include <linux/pm.h>
#include <asm/idle.h>
#include <asm/reboot.h>
#include <loongson.h>
static inline void loongson_reboot(void)
{
#ifndef CONFIG_CPU_JUMP_WORKAROUNDS
((void (*)(void))ioremap(LOONGSON_BOOT_BASE, 4)) ();
#else
void (*func)(void);
func = (void *)ioremap(LOONGSON_BOOT_BASE, 4);
__asm__ __volatile__(
" .set noat \n"
" jr %[func] \n"
" .set at \n"
: /* No outputs */
: [func] "r" (func));
#endif
}
static void loongson_restart(char *command)
{
/* do preparation for reboot */
mach_prepare_reboot();
/* reboot via jumping to boot base address */
loongson_reboot();
}
static void loongson_poweroff(void)
{
mach_prepare_shutdown();
/*
* It needs a wait loop here, but mips/kernel/reset.c already calls
* a generic delay loop, machine_hang(), so simply return.
*/
return;
}
static void loongson_halt(void)
{
pr_notice("\n\n** You can safely turn off the power now **\n\n");
while (1) {
if (cpu_wait)
cpu_wait();
}
}
static int __init mips_reboot_setup(void)
{
_machine_restart = loongson_restart;
_machine_halt = loongson_halt;
pm_power_off = loongson_poweroff;
return 0;
}
arch_initcall(mips_reboot_setup);
| linux-master | arch/mips/loongson2ef/common/reset.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2007 Lemote Inc. & Institute of Computing Technology
* Author: Fuxin Zhang, [email protected]
*/
#include <linux/export.h>
#include <linux/init.h>
#include <asm/wbflush.h>
#include <asm/bootinfo.h>
#include <loongson.h>
static void wbflush_loongson(void)
{
asm(".set\tpush\n\t"
".set\tnoreorder\n\t"
".set mips3\n\t"
"sync\n\t"
"nop\n\t"
".set\tpop\n\t"
".set mips0\n\t");
}
void (*__wbflush)(void) = wbflush_loongson;
EXPORT_SYMBOL(__wbflush);
void __init plat_mem_setup(void)
{
}
| linux-master | arch/mips/loongson2ef/common/setup.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2009 Lemote Inc.
* Author: Wu Zhangjin, [email protected]
*/
#include <linux/export.h>
#include <asm/bootinfo.h>
#include <asm/setup.h>
#include <loongson.h>
/* raw */
unsigned long loongson_uart_base;
/* ioremapped */
unsigned long _loongson_uart_base;
EXPORT_SYMBOL(loongson_uart_base);
EXPORT_SYMBOL(_loongson_uart_base);
void prom_init_loongson_uart_base(void)
{
switch (mips_machtype) {
case MACH_LEMOTE_FL2E:
loongson_uart_base = LOONGSON_PCIIO_BASE + 0x3f8;
break;
case MACH_LEMOTE_FL2F:
case MACH_LEMOTE_LL2F:
loongson_uart_base = LOONGSON_PCIIO_BASE + 0x2f8;
break;
case MACH_LEMOTE_ML2F7:
case MACH_LEMOTE_YL2F89:
case MACH_DEXXON_GDIUM2F10:
case MACH_LEMOTE_NAS:
default:
/* The CPU provided serial port (LPC) */
loongson_uart_base = LOONGSON_LIO1_BASE + 0x3f8;
break;
}
_loongson_uart_base = TO_UNCAC(loongson_uart_base);
setup_8250_early_printk_port(_loongson_uart_base, 0, 1024);
}
| linux-master | arch/mips/loongson2ef/common/uart_base.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* loongson-specific suspend support
*
* Copyright (C) 2009 Lemote Inc.
* Author: Wu Zhangjin <[email protected]>
*/
#include <linux/suspend.h>
#include <linux/interrupt.h>
#include <linux/pm.h>
#include <asm/i8259.h>
#include <asm/mipsregs.h>
#include <loongson.h>
static unsigned int __maybe_unused cached_master_mask; /* i8259A */
static unsigned int __maybe_unused cached_slave_mask;
static unsigned int __maybe_unused cached_bonito_irq_mask; /* bonito */
void arch_suspend_disable_irqs(void)
{
/* disable all mips events */
local_irq_disable();
#ifdef CONFIG_I8259
/* disable all events of i8259A */
cached_slave_mask = inb(PIC_SLAVE_IMR);
cached_master_mask = inb(PIC_MASTER_IMR);
outb(0xff, PIC_SLAVE_IMR);
inb(PIC_SLAVE_IMR);
outb(0xff, PIC_MASTER_IMR);
inb(PIC_MASTER_IMR);
#endif
/* disable all events of bonito */
cached_bonito_irq_mask = LOONGSON_INTEN;
LOONGSON_INTENCLR = 0xffff;
(void)LOONGSON_INTENCLR;
}
void arch_suspend_enable_irqs(void)
{
/* enable all mips events */
local_irq_enable();
#ifdef CONFIG_I8259
/* only enable the cached events of i8259A */
outb(cached_slave_mask, PIC_SLAVE_IMR);
outb(cached_master_mask, PIC_MASTER_IMR);
#endif
/* enable all cached events of bonito */
LOONGSON_INTENSET = cached_bonito_irq_mask;
(void)LOONGSON_INTENSET;
}
/*
* Setup the board-specific events for waking up loongson from wait mode
*/
void __weak setup_wakeup_events(void)
{
}
/*
* Check wakeup events
*/
int __weak wakeup_loongson(void)
{
return 1;
}
/*
* If the events are really what we want to wakeup the CPU, wake it up
* otherwise put the CPU asleep again.
*/
static void wait_for_wakeup_events(void)
{
while (!wakeup_loongson())
writel(readl(LOONGSON_CHIPCFG) & ~0x7, LOONGSON_CHIPCFG);
}
/*
* Stop all perf counters
*
* $24 is the control register of Loongson perf counter
*/
static inline void stop_perf_counters(void)
{
__write_64bit_c0_register($24, 0, 0);
}
static void loongson_suspend_enter(void)
{
unsigned int cached_cpu_freq;
/* setup wakeup events via enabling the IRQs */
setup_wakeup_events();
stop_perf_counters();
cached_cpu_freq = readl(LOONGSON_CHIPCFG);
/* Put CPU into wait mode */
writel(readl(LOONGSON_CHIPCFG) & ~0x7, LOONGSON_CHIPCFG);
/* wait for the given events to wakeup cpu from wait mode */
wait_for_wakeup_events();
writel(cached_cpu_freq, LOONGSON_CHIPCFG);
mmiowb();
}
void __weak mach_suspend(void)
{
}
void __weak mach_resume(void)
{
}
static int loongson_pm_enter(suspend_state_t state)
{
mach_suspend();
/* processor specific suspend */
loongson_suspend_enter();
mach_resume();
return 0;
}
static int loongson_pm_valid_state(suspend_state_t state)
{
switch (state) {
case PM_SUSPEND_ON:
case PM_SUSPEND_STANDBY:
case PM_SUSPEND_MEM:
return 1;
default:
return 0;
}
}
static const struct platform_suspend_ops loongson_pm_ops = {
.valid = loongson_pm_valid_state,
.enter = loongson_pm_enter,
};
static int __init loongson_pm_init(void)
{
suspend_set_ops(&loongson_pm_ops);
return 0;
}
arch_initcall(loongson_pm_init);
| linux-master | arch/mips/loongson2ef/common/pm.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Based on Ocelot Linux port, which is
* Copyright 2001 MontaVista Software Inc.
* Author: [email protected] or [email protected]
*
* Copyright 2003 ICT CAS
* Author: Michael Guo <[email protected]>
*
* Copyright (C) 2007 Lemote Inc. & Institute of Computing Technology
* Author: Fuxin Zhang, [email protected]
*
* Copyright (C) 2009 Lemote Inc.
* Author: Wu Zhangjin, [email protected]
*/
#include <linux/export.h>
#include <asm/bootinfo.h>
#include <asm/fw/fw.h>
#include <loongson.h>
u32 cpu_clock_freq;
EXPORT_SYMBOL(cpu_clock_freq);
void __init prom_init_env(void)
{
/* pmon passes arguments in 32bit pointers */
unsigned int processor_id;
cpu_clock_freq = fw_getenvl("cpuclock");
memsize = fw_getenvl("memsize");
highmemsize = fw_getenvl("highmemsize");
if (memsize == 0)
memsize = 256;
pr_info("memsize=%u, highmemsize=%u\n", memsize, highmemsize);
if (cpu_clock_freq == 0) {
processor_id = (¤t_cpu_data)->processor_id;
switch (processor_id & PRID_REV_MASK) {
case PRID_REV_LOONGSON2E:
cpu_clock_freq = 533080000;
break;
case PRID_REV_LOONGSON2F:
cpu_clock_freq = 797000000;
break;
default:
cpu_clock_freq = 100000000;
break;
}
}
pr_info("CpuClock = %u\n", cpu_clock_freq);
}
| linux-master | arch/mips/loongson2ef/common/env.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2007 Lemote, Inc. & Institute of Computing Technology
* Author: Fuxin Zhang, [email protected]
*
* Copyright (C) 2009 Lemote Inc.
* Author: Wu Zhangjin, [email protected]
*/
#include <asm/mc146818-time.h>
#include <asm/time.h>
#include <asm/hpet.h>
#include <loongson.h>
#include <cs5536/cs5536_mfgpt.h>
void __init plat_time_init(void)
{
/* setup mips r4k timer */
mips_hpt_frequency = cpu_clock_freq / 2;
setup_mfgpt0_timer();
}
void read_persistent_clock64(struct timespec64 *ts)
{
ts->tv_sec = mc146818_get_cmos_time();
ts->tv_nsec = 0;
}
| linux-master | arch/mips/loongson2ef/common/time.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* the IDE Virtual Support Module of AMD CS5536
*
* Copyright (C) 2007 Lemote, Inc.
* Author : jlliu, [email protected]
*
* Copyright (C) 2009 Lemote, Inc.
* Author: Wu Zhangjin, [email protected]
*/
#include <cs5536/cs5536.h>
#include <cs5536/cs5536_pci.h>
void pci_ide_write_reg(int reg, u32 value)
{
u32 hi = 0, lo = value;
switch (reg) {
case PCI_COMMAND:
_rdmsr(GLIU_MSR_REG(GLIU_PAE), &hi, &lo);
if (value & PCI_COMMAND_MASTER)
lo |= (0x03 << 4);
else
lo &= ~(0x03 << 4);
_wrmsr(GLIU_MSR_REG(GLIU_PAE), hi, lo);
break;
case PCI_STATUS:
if (value & PCI_STATUS_PARITY) {
_rdmsr(SB_MSR_REG(SB_ERROR), &hi, &lo);
if (lo & SB_PARE_ERR_FLAG) {
lo = (lo & 0x0000ffff) | SB_PARE_ERR_FLAG;
_wrmsr(SB_MSR_REG(SB_ERROR), hi, lo);
}
}
break;
case PCI_CACHE_LINE_SIZE:
value &= 0x0000ff00;
_rdmsr(SB_MSR_REG(SB_CTRL), &hi, &lo);
hi &= 0xffffff00;
hi |= (value >> 8);
_wrmsr(SB_MSR_REG(SB_CTRL), hi, lo);
break;
case PCI_BAR4_REG:
if (value == PCI_BAR_RANGE_MASK) {
_rdmsr(GLCP_MSR_REG(GLCP_SOFT_COM), &hi, &lo);
lo |= SOFT_BAR_IDE_FLAG;
_wrmsr(GLCP_MSR_REG(GLCP_SOFT_COM), hi, lo);
} else if (value & 0x01) {
_rdmsr(IDE_MSR_REG(IDE_IO_BAR), &hi, &lo);
lo = (value & 0xfffffff0) | 0x1;
_wrmsr(IDE_MSR_REG(IDE_IO_BAR), hi, lo);
value &= 0xfffffffc;
hi = 0x60000000 | ((value & 0x000ff000) >> 12);
lo = 0x000ffff0 | ((value & 0x00000fff) << 20);
_wrmsr(GLIU_MSR_REG(GLIU_IOD_BM2), hi, lo);
}
break;
case PCI_IDE_CFG_REG:
if (value == CS5536_IDE_FLASH_SIGNATURE) {
_rdmsr(DIVIL_MSR_REG(DIVIL_BALL_OPTS), &hi, &lo);
lo |= 0x01;
_wrmsr(DIVIL_MSR_REG(DIVIL_BALL_OPTS), hi, lo);
} else {
_rdmsr(IDE_MSR_REG(IDE_CFG), &hi, &lo);
lo = value;
_wrmsr(IDE_MSR_REG(IDE_CFG), hi, lo);
}
break;
case PCI_IDE_DTC_REG:
_rdmsr(IDE_MSR_REG(IDE_DTC), &hi, &lo);
lo = value;
_wrmsr(IDE_MSR_REG(IDE_DTC), hi, lo);
break;
case PCI_IDE_CAST_REG:
_rdmsr(IDE_MSR_REG(IDE_CAST), &hi, &lo);
lo = value;
_wrmsr(IDE_MSR_REG(IDE_CAST), hi, lo);
break;
case PCI_IDE_ETC_REG:
_rdmsr(IDE_MSR_REG(IDE_ETC), &hi, &lo);
lo = value;
_wrmsr(IDE_MSR_REG(IDE_ETC), hi, lo);
break;
case PCI_IDE_PM_REG:
_rdmsr(IDE_MSR_REG(IDE_INTERNAL_PM), &hi, &lo);
lo = value;
_wrmsr(IDE_MSR_REG(IDE_INTERNAL_PM), hi, lo);
break;
default:
break;
}
}
u32 pci_ide_read_reg(int reg)
{
u32 conf_data = 0;
u32 hi, lo;
switch (reg) {
case PCI_VENDOR_ID:
conf_data =
CFG_PCI_VENDOR_ID(CS5536_IDE_DEVICE_ID, CS5536_VENDOR_ID);
break;
case PCI_COMMAND:
_rdmsr(IDE_MSR_REG(IDE_IO_BAR), &hi, &lo);
if (lo & 0xfffffff0)
conf_data |= PCI_COMMAND_IO;
_rdmsr(GLIU_MSR_REG(GLIU_PAE), &hi, &lo);
if ((lo & 0x30) == 0x30)
conf_data |= PCI_COMMAND_MASTER;
break;
case PCI_STATUS:
conf_data |= PCI_STATUS_66MHZ;
conf_data |= PCI_STATUS_FAST_BACK;
_rdmsr(SB_MSR_REG(SB_ERROR), &hi, &lo);
if (lo & SB_PARE_ERR_FLAG)
conf_data |= PCI_STATUS_PARITY;
conf_data |= PCI_STATUS_DEVSEL_MEDIUM;
break;
case PCI_CLASS_REVISION:
_rdmsr(IDE_MSR_REG(IDE_CAP), &hi, &lo);
conf_data = lo & 0x000000ff;
conf_data |= (CS5536_IDE_CLASS_CODE << 8);
break;
case PCI_CACHE_LINE_SIZE:
_rdmsr(SB_MSR_REG(SB_CTRL), &hi, &lo);
hi &= 0x000000f8;
conf_data = CFG_PCI_CACHE_LINE_SIZE(PCI_NORMAL_HEADER_TYPE, hi);
break;
case PCI_BAR4_REG:
_rdmsr(GLCP_MSR_REG(GLCP_SOFT_COM), &hi, &lo);
if (lo & SOFT_BAR_IDE_FLAG) {
conf_data = CS5536_IDE_RANGE |
PCI_BASE_ADDRESS_SPACE_IO;
lo &= ~SOFT_BAR_IDE_FLAG;
_wrmsr(GLCP_MSR_REG(GLCP_SOFT_COM), hi, lo);
} else {
_rdmsr(IDE_MSR_REG(IDE_IO_BAR), &hi, &lo);
conf_data = lo & 0xfffffff0;
conf_data |= 0x01;
conf_data &= ~0x02;
}
break;
case PCI_CARDBUS_CIS:
conf_data = PCI_CARDBUS_CIS_POINTER;
break;
case PCI_SUBSYSTEM_VENDOR_ID:
conf_data =
CFG_PCI_VENDOR_ID(CS5536_IDE_SUB_ID, CS5536_SUB_VENDOR_ID);
break;
case PCI_ROM_ADDRESS:
conf_data = PCI_EXPANSION_ROM_BAR;
break;
case PCI_CAPABILITY_LIST:
conf_data = PCI_CAPLIST_POINTER;
break;
case PCI_INTERRUPT_LINE:
conf_data =
CFG_PCI_INTERRUPT_LINE(PCI_DEFAULT_PIN, CS5536_IDE_INTR);
break;
case PCI_IDE_CFG_REG:
_rdmsr(IDE_MSR_REG(IDE_CFG), &hi, &lo);
conf_data = lo;
break;
case PCI_IDE_DTC_REG:
_rdmsr(IDE_MSR_REG(IDE_DTC), &hi, &lo);
conf_data = lo;
break;
case PCI_IDE_CAST_REG:
_rdmsr(IDE_MSR_REG(IDE_CAST), &hi, &lo);
conf_data = lo;
break;
case PCI_IDE_ETC_REG:
_rdmsr(IDE_MSR_REG(IDE_ETC), &hi, &lo);
conf_data = lo;
break;
case PCI_IDE_PM_REG:
_rdmsr(IDE_MSR_REG(IDE_INTERNAL_PM), &hi, &lo);
conf_data = lo;
break;
default:
break;
}
return conf_data;
}
| linux-master | arch/mips/loongson2ef/common/cs5536/cs5536_ide.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* the OHCI Virtual Support Module of AMD CS5536
*
* Copyright (C) 2007 Lemote, Inc.
* Author : jlliu, [email protected]
*
* Copyright (C) 2009 Lemote, Inc.
* Author: Wu Zhangjin, [email protected]
*/
#include <cs5536/cs5536.h>
#include <cs5536/cs5536_pci.h>
void pci_ohci_write_reg(int reg, u32 value)
{
u32 hi = 0, lo = value;
switch (reg) {
case PCI_COMMAND:
_rdmsr(USB_MSR_REG(USB_OHCI), &hi, &lo);
if (value & PCI_COMMAND_MASTER)
hi |= PCI_COMMAND_MASTER;
else
hi &= ~PCI_COMMAND_MASTER;
if (value & PCI_COMMAND_MEMORY)
hi |= PCI_COMMAND_MEMORY;
else
hi &= ~PCI_COMMAND_MEMORY;
_wrmsr(USB_MSR_REG(USB_OHCI), hi, lo);
break;
case PCI_STATUS:
if (value & PCI_STATUS_PARITY) {
_rdmsr(SB_MSR_REG(SB_ERROR), &hi, &lo);
if (lo & SB_PARE_ERR_FLAG) {
lo = (lo & 0x0000ffff) | SB_PARE_ERR_FLAG;
_wrmsr(SB_MSR_REG(SB_ERROR), hi, lo);
}
}
break;
case PCI_BAR0_REG:
if (value == PCI_BAR_RANGE_MASK) {
_rdmsr(GLCP_MSR_REG(GLCP_SOFT_COM), &hi, &lo);
lo |= SOFT_BAR_OHCI_FLAG;
_wrmsr(GLCP_MSR_REG(GLCP_SOFT_COM), hi, lo);
} else if ((value & 0x01) == 0x00) {
_rdmsr(USB_MSR_REG(USB_OHCI), &hi, &lo);
lo = value;
_wrmsr(USB_MSR_REG(USB_OHCI), hi, lo);
value &= 0xfffffff0;
hi = 0x40000000 | ((value & 0xff000000) >> 24);
lo = 0x000fffff | ((value & 0x00fff000) << 8);
_wrmsr(GLIU_MSR_REG(GLIU_P2D_BM3), hi, lo);
}
break;
case PCI_OHCI_INT_REG:
_rdmsr(DIVIL_MSR_REG(PIC_YSEL_LOW), &hi, &lo);
lo &= ~(0xf << PIC_YSEL_LOW_USB_SHIFT);
if (value) /* enable all the usb interrupt in PIC */
lo |= (CS5536_USB_INTR << PIC_YSEL_LOW_USB_SHIFT);
_wrmsr(DIVIL_MSR_REG(PIC_YSEL_LOW), hi, lo);
break;
default:
break;
}
}
u32 pci_ohci_read_reg(int reg)
{
u32 conf_data = 0;
u32 hi, lo;
switch (reg) {
case PCI_VENDOR_ID:
conf_data =
CFG_PCI_VENDOR_ID(CS5536_OHCI_DEVICE_ID, CS5536_VENDOR_ID);
break;
case PCI_COMMAND:
_rdmsr(USB_MSR_REG(USB_OHCI), &hi, &lo);
if (hi & PCI_COMMAND_MASTER)
conf_data |= PCI_COMMAND_MASTER;
if (hi & PCI_COMMAND_MEMORY)
conf_data |= PCI_COMMAND_MEMORY;
break;
case PCI_STATUS:
conf_data |= PCI_STATUS_66MHZ;
conf_data |= PCI_STATUS_FAST_BACK;
_rdmsr(SB_MSR_REG(SB_ERROR), &hi, &lo);
if (lo & SB_PARE_ERR_FLAG)
conf_data |= PCI_STATUS_PARITY;
conf_data |= PCI_STATUS_DEVSEL_MEDIUM;
break;
case PCI_CLASS_REVISION:
_rdmsr(USB_MSR_REG(USB_CAP), &hi, &lo);
conf_data = lo & 0x000000ff;
conf_data |= (CS5536_OHCI_CLASS_CODE << 8);
break;
case PCI_CACHE_LINE_SIZE:
conf_data =
CFG_PCI_CACHE_LINE_SIZE(PCI_NORMAL_HEADER_TYPE,
PCI_NORMAL_LATENCY_TIMER);
break;
case PCI_BAR0_REG:
_rdmsr(GLCP_MSR_REG(GLCP_SOFT_COM), &hi, &lo);
if (lo & SOFT_BAR_OHCI_FLAG) {
conf_data = CS5536_OHCI_RANGE |
PCI_BASE_ADDRESS_SPACE_MEMORY;
lo &= ~SOFT_BAR_OHCI_FLAG;
_wrmsr(GLCP_MSR_REG(GLCP_SOFT_COM), hi, lo);
} else {
_rdmsr(USB_MSR_REG(USB_OHCI), &hi, &lo);
conf_data = lo & 0xffffff00;
conf_data &= ~0x0000000f; /* 32bit mem */
}
break;
case PCI_CARDBUS_CIS:
conf_data = PCI_CARDBUS_CIS_POINTER;
break;
case PCI_SUBSYSTEM_VENDOR_ID:
conf_data =
CFG_PCI_VENDOR_ID(CS5536_OHCI_SUB_ID, CS5536_SUB_VENDOR_ID);
break;
case PCI_ROM_ADDRESS:
conf_data = PCI_EXPANSION_ROM_BAR;
break;
case PCI_CAPABILITY_LIST:
conf_data = PCI_CAPLIST_USB_POINTER;
break;
case PCI_INTERRUPT_LINE:
conf_data =
CFG_PCI_INTERRUPT_LINE(PCI_DEFAULT_PIN, CS5536_USB_INTR);
break;
case PCI_OHCI_INT_REG:
_rdmsr(DIVIL_MSR_REG(PIC_YSEL_LOW), &hi, &lo);
if (((lo >> PIC_YSEL_LOW_USB_SHIFT) & 0xf) == CS5536_USB_INTR)
conf_data = 1;
break;
default:
break;
}
return conf_data;
}
| linux-master | arch/mips/loongson2ef/common/cs5536/cs5536_ohci.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* the ISA Virtual Support Module of AMD CS5536
*
* Copyright (C) 2007 Lemote, Inc.
* Author : jlliu, [email protected]
*
* Copyright (C) 2009 Lemote, Inc.
* Author: Wu Zhangjin, [email protected]
*/
#include <linux/pci.h>
#include <cs5536/cs5536.h>
#include <cs5536/cs5536_pci.h>
/* common variables for PCI_ISA_READ/WRITE_BAR */
static const u32 divil_msr_reg[6] = {
DIVIL_MSR_REG(DIVIL_LBAR_SMB), DIVIL_MSR_REG(DIVIL_LBAR_GPIO),
DIVIL_MSR_REG(DIVIL_LBAR_MFGPT), DIVIL_MSR_REG(DIVIL_LBAR_IRQ),
DIVIL_MSR_REG(DIVIL_LBAR_PMS), DIVIL_MSR_REG(DIVIL_LBAR_ACPI),
};
static const u32 soft_bar_flag[6] = {
SOFT_BAR_SMB_FLAG, SOFT_BAR_GPIO_FLAG, SOFT_BAR_MFGPT_FLAG,
SOFT_BAR_IRQ_FLAG, SOFT_BAR_PMS_FLAG, SOFT_BAR_ACPI_FLAG,
};
static const u32 sb_msr_reg[6] = {
SB_MSR_REG(SB_R0), SB_MSR_REG(SB_R1), SB_MSR_REG(SB_R2),
SB_MSR_REG(SB_R3), SB_MSR_REG(SB_R4), SB_MSR_REG(SB_R5),
};
static const u32 bar_space_range[6] = {
CS5536_SMB_RANGE, CS5536_GPIO_RANGE, CS5536_MFGPT_RANGE,
CS5536_IRQ_RANGE, CS5536_PMS_RANGE, CS5536_ACPI_RANGE,
};
static const int bar_space_len[6] = {
CS5536_SMB_LENGTH, CS5536_GPIO_LENGTH, CS5536_MFGPT_LENGTH,
CS5536_IRQ_LENGTH, CS5536_PMS_LENGTH, CS5536_ACPI_LENGTH,
};
/*
* enable the divil module bar space.
*
* For all the DIVIL module LBAR, you should control the DIVIL LBAR reg
* and the RCONFx(0~5) reg to use the modules.
*/
static void divil_lbar_enable(void)
{
u32 hi, lo;
int offset;
/*
* The DIVIL IRQ is not used yet. and make the RCONF0 reserved.
*/
for (offset = DIVIL_LBAR_SMB; offset <= DIVIL_LBAR_PMS; offset++) {
_rdmsr(DIVIL_MSR_REG(offset), &hi, &lo);
hi |= 0x01;
_wrmsr(DIVIL_MSR_REG(offset), hi, lo);
}
}
/*
* disable the divil module bar space.
*/
static void divil_lbar_disable(void)
{
u32 hi, lo;
int offset;
for (offset = DIVIL_LBAR_SMB; offset <= DIVIL_LBAR_PMS; offset++) {
_rdmsr(DIVIL_MSR_REG(offset), &hi, &lo);
hi &= ~0x01;
_wrmsr(DIVIL_MSR_REG(offset), hi, lo);
}
}
/*
* BAR write: write value to the n BAR
*/
void pci_isa_write_bar(int n, u32 value)
{
u32 hi = 0, lo = value;
if (value == PCI_BAR_RANGE_MASK) {
_rdmsr(GLCP_MSR_REG(GLCP_SOFT_COM), &hi, &lo);
lo |= soft_bar_flag[n];
_wrmsr(GLCP_MSR_REG(GLCP_SOFT_COM), hi, lo);
} else if (value & 0x01) {
/* NATIVE reg */
hi = 0x0000f001;
lo &= bar_space_range[n];
_wrmsr(divil_msr_reg[n], hi, lo);
/* RCONFx is 4bytes in units for I/O space */
hi = ((value & 0x000ffffc) << 12) |
((bar_space_len[n] - 4) << 12) | 0x01;
lo = ((value & 0x000ffffc) << 12) | 0x01;
_wrmsr(sb_msr_reg[n], hi, lo);
}
}
/*
* BAR read: read the n BAR
*/
u32 pci_isa_read_bar(int n)
{
u32 conf_data = 0;
u32 hi, lo;
_rdmsr(GLCP_MSR_REG(GLCP_SOFT_COM), &hi, &lo);
if (lo & soft_bar_flag[n]) {
conf_data = bar_space_range[n] | PCI_BASE_ADDRESS_SPACE_IO;
lo &= ~soft_bar_flag[n];
_wrmsr(GLCP_MSR_REG(GLCP_SOFT_COM), hi, lo);
} else {
_rdmsr(divil_msr_reg[n], &hi, &lo);
conf_data = lo & bar_space_range[n];
conf_data |= 0x01;
conf_data &= ~0x02;
}
return conf_data;
}
/*
* isa_write: ISA write transfer
*
* We assume that this is not a bus master transfer.
*/
void pci_isa_write_reg(int reg, u32 value)
{
u32 hi = 0, lo = value;
u32 temp;
switch (reg) {
case PCI_COMMAND:
if (value & PCI_COMMAND_IO)
divil_lbar_enable();
else
divil_lbar_disable();
break;
case PCI_STATUS:
_rdmsr(SB_MSR_REG(SB_ERROR), &hi, &lo);
temp = lo & 0x0000ffff;
if ((value & PCI_STATUS_SIG_TARGET_ABORT) &&
(lo & SB_TAS_ERR_EN))
temp |= SB_TAS_ERR_FLAG;
if ((value & PCI_STATUS_REC_TARGET_ABORT) &&
(lo & SB_TAR_ERR_EN))
temp |= SB_TAR_ERR_FLAG;
if ((value & PCI_STATUS_REC_MASTER_ABORT)
&& (lo & SB_MAR_ERR_EN))
temp |= SB_MAR_ERR_FLAG;
if ((value & PCI_STATUS_DETECTED_PARITY)
&& (lo & SB_PARE_ERR_EN))
temp |= SB_PARE_ERR_FLAG;
lo = temp;
_wrmsr(SB_MSR_REG(SB_ERROR), hi, lo);
break;
case PCI_CACHE_LINE_SIZE:
value &= 0x0000ff00;
_rdmsr(SB_MSR_REG(SB_CTRL), &hi, &lo);
hi &= 0xffffff00;
hi |= (value >> 8);
_wrmsr(SB_MSR_REG(SB_CTRL), hi, lo);
break;
case PCI_BAR0_REG:
pci_isa_write_bar(0, value);
break;
case PCI_BAR1_REG:
pci_isa_write_bar(1, value);
break;
case PCI_BAR2_REG:
pci_isa_write_bar(2, value);
break;
case PCI_BAR3_REG:
pci_isa_write_bar(3, value);
break;
case PCI_BAR4_REG:
pci_isa_write_bar(4, value);
break;
case PCI_BAR5_REG:
pci_isa_write_bar(5, value);
break;
case PCI_UART1_INT_REG:
_rdmsr(DIVIL_MSR_REG(PIC_YSEL_HIGH), &hi, &lo);
/* disable uart1 interrupt in PIC */
lo &= ~(0xf << 24);
if (value) /* enable uart1 interrupt in PIC */
lo |= (CS5536_UART1_INTR << 24);
_wrmsr(DIVIL_MSR_REG(PIC_YSEL_HIGH), hi, lo);
break;
case PCI_UART2_INT_REG:
_rdmsr(DIVIL_MSR_REG(PIC_YSEL_HIGH), &hi, &lo);
/* disable uart2 interrupt in PIC */
lo &= ~(0xf << 28);
if (value) /* enable uart2 interrupt in PIC */
lo |= (CS5536_UART2_INTR << 28);
_wrmsr(DIVIL_MSR_REG(PIC_YSEL_HIGH), hi, lo);
break;
case PCI_ISA_FIXUP_REG:
if (value) {
/* enable the TARGET ABORT/MASTER ABORT etc. */
_rdmsr(SB_MSR_REG(SB_ERROR), &hi, &lo);
lo |= 0x00000063;
_wrmsr(SB_MSR_REG(SB_ERROR), hi, lo);
}
break;
default:
/* ALL OTHER PCI CONFIG SPACE HEADER IS NOT IMPLEMENTED. */
break;
}
}
/*
* isa_read: ISA read transfers
*
* We assume that this is not a bus master transfer.
*/
u32 pci_isa_read_reg(int reg)
{
u32 conf_data = 0;
u32 hi, lo;
switch (reg) {
case PCI_VENDOR_ID:
conf_data =
CFG_PCI_VENDOR_ID(CS5536_ISA_DEVICE_ID, CS5536_VENDOR_ID);
break;
case PCI_COMMAND:
/* we just check the first LBAR for the IO enable bit, */
/* maybe we should changed later. */
_rdmsr(DIVIL_MSR_REG(DIVIL_LBAR_SMB), &hi, &lo);
if (hi & 0x01)
conf_data |= PCI_COMMAND_IO;
break;
case PCI_STATUS:
conf_data |= PCI_STATUS_66MHZ;
conf_data |= PCI_STATUS_DEVSEL_MEDIUM;
conf_data |= PCI_STATUS_FAST_BACK;
_rdmsr(SB_MSR_REG(SB_ERROR), &hi, &lo);
if (lo & SB_TAS_ERR_FLAG)
conf_data |= PCI_STATUS_SIG_TARGET_ABORT;
if (lo & SB_TAR_ERR_FLAG)
conf_data |= PCI_STATUS_REC_TARGET_ABORT;
if (lo & SB_MAR_ERR_FLAG)
conf_data |= PCI_STATUS_REC_MASTER_ABORT;
if (lo & SB_PARE_ERR_FLAG)
conf_data |= PCI_STATUS_DETECTED_PARITY;
break;
case PCI_CLASS_REVISION:
_rdmsr(GLCP_MSR_REG(GLCP_CHIP_REV_ID), &hi, &lo);
conf_data = lo & 0x000000ff;
conf_data |= (CS5536_ISA_CLASS_CODE << 8);
break;
case PCI_CACHE_LINE_SIZE:
_rdmsr(SB_MSR_REG(SB_CTRL), &hi, &lo);
hi &= 0x000000f8;
conf_data = CFG_PCI_CACHE_LINE_SIZE(PCI_BRIDGE_HEADER_TYPE, hi);
break;
/*
* we only use the LBAR of DIVIL, no RCONF used.
* all of them are IO space.
*/
case PCI_BAR0_REG:
return pci_isa_read_bar(0);
break;
case PCI_BAR1_REG:
return pci_isa_read_bar(1);
break;
case PCI_BAR2_REG:
return pci_isa_read_bar(2);
break;
case PCI_BAR3_REG:
break;
case PCI_BAR4_REG:
return pci_isa_read_bar(4);
break;
case PCI_BAR5_REG:
return pci_isa_read_bar(5);
break;
case PCI_CARDBUS_CIS:
conf_data = PCI_CARDBUS_CIS_POINTER;
break;
case PCI_SUBSYSTEM_VENDOR_ID:
conf_data =
CFG_PCI_VENDOR_ID(CS5536_ISA_SUB_ID, CS5536_SUB_VENDOR_ID);
break;
case PCI_ROM_ADDRESS:
conf_data = PCI_EXPANSION_ROM_BAR;
break;
case PCI_CAPABILITY_LIST:
conf_data = PCI_CAPLIST_POINTER;
break;
case PCI_INTERRUPT_LINE:
/* no interrupt used here */
conf_data = CFG_PCI_INTERRUPT_LINE(0x00, 0x00);
break;
default:
break;
}
return conf_data;
}
/*
* The mfgpt timer interrupt is running early, so we must keep the south bridge
* mmio always enabled. Otherwise we may race with the PCI configuration which
* may temporarily disable it. When that happens and the timer interrupt fires,
* we are not able to clear it and the system will hang.
*/
static void cs5536_isa_mmio_always_on(struct pci_dev *dev)
{
dev->mmio_always_on = 1;
}
DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CS5536_ISA,
PCI_CLASS_BRIDGE_ISA, 8, cs5536_isa_mmio_always_on);
| linux-master | arch/mips/loongson2ef/common/cs5536/cs5536_isa.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* the EHCI Virtual Support Module of AMD CS5536
*
* Copyright (C) 2007 Lemote, Inc.
* Author : jlliu, [email protected]
*
* Copyright (C) 2009 Lemote, Inc.
* Author: Wu Zhangjin, [email protected]
*/
#include <cs5536/cs5536.h>
#include <cs5536/cs5536_pci.h>
void pci_ehci_write_reg(int reg, u32 value)
{
u32 hi = 0, lo = value;
switch (reg) {
case PCI_COMMAND:
_rdmsr(USB_MSR_REG(USB_EHCI), &hi, &lo);
if (value & PCI_COMMAND_MASTER)
hi |= PCI_COMMAND_MASTER;
else
hi &= ~PCI_COMMAND_MASTER;
if (value & PCI_COMMAND_MEMORY)
hi |= PCI_COMMAND_MEMORY;
else
hi &= ~PCI_COMMAND_MEMORY;
_wrmsr(USB_MSR_REG(USB_EHCI), hi, lo);
break;
case PCI_STATUS:
if (value & PCI_STATUS_PARITY) {
_rdmsr(SB_MSR_REG(SB_ERROR), &hi, &lo);
if (lo & SB_PARE_ERR_FLAG) {
lo = (lo & 0x0000ffff) | SB_PARE_ERR_FLAG;
_wrmsr(SB_MSR_REG(SB_ERROR), hi, lo);
}
}
break;
case PCI_BAR0_REG:
if (value == PCI_BAR_RANGE_MASK) {
_rdmsr(GLCP_MSR_REG(GLCP_SOFT_COM), &hi, &lo);
lo |= SOFT_BAR_EHCI_FLAG;
_wrmsr(GLCP_MSR_REG(GLCP_SOFT_COM), hi, lo);
} else if ((value & 0x01) == 0x00) {
_rdmsr(USB_MSR_REG(USB_EHCI), &hi, &lo);
lo = value;
_wrmsr(USB_MSR_REG(USB_EHCI), hi, lo);
value &= 0xfffffff0;
hi = 0x40000000 | ((value & 0xff000000) >> 24);
lo = 0x000fffff | ((value & 0x00fff000) << 8);
_wrmsr(GLIU_MSR_REG(GLIU_P2D_BM4), hi, lo);
}
break;
case PCI_EHCI_LEGSMIEN_REG:
_rdmsr(USB_MSR_REG(USB_EHCI), &hi, &lo);
hi &= 0x003f0000;
hi |= (value & 0x3f) << 16;
_wrmsr(USB_MSR_REG(USB_EHCI), hi, lo);
break;
case PCI_EHCI_FLADJ_REG:
_rdmsr(USB_MSR_REG(USB_EHCI), &hi, &lo);
hi &= ~0x00003f00;
hi |= value & 0x00003f00;
_wrmsr(USB_MSR_REG(USB_EHCI), hi, lo);
break;
default:
break;
}
}
u32 pci_ehci_read_reg(int reg)
{
u32 conf_data = 0;
u32 hi, lo;
switch (reg) {
case PCI_VENDOR_ID:
conf_data =
CFG_PCI_VENDOR_ID(CS5536_EHCI_DEVICE_ID, CS5536_VENDOR_ID);
break;
case PCI_COMMAND:
_rdmsr(USB_MSR_REG(USB_EHCI), &hi, &lo);
if (hi & PCI_COMMAND_MASTER)
conf_data |= PCI_COMMAND_MASTER;
if (hi & PCI_COMMAND_MEMORY)
conf_data |= PCI_COMMAND_MEMORY;
break;
case PCI_STATUS:
conf_data |= PCI_STATUS_66MHZ;
conf_data |= PCI_STATUS_FAST_BACK;
_rdmsr(SB_MSR_REG(SB_ERROR), &hi, &lo);
if (lo & SB_PARE_ERR_FLAG)
conf_data |= PCI_STATUS_PARITY;
conf_data |= PCI_STATUS_DEVSEL_MEDIUM;
break;
case PCI_CLASS_REVISION:
_rdmsr(USB_MSR_REG(USB_CAP), &hi, &lo);
conf_data = lo & 0x000000ff;
conf_data |= (CS5536_EHCI_CLASS_CODE << 8);
break;
case PCI_CACHE_LINE_SIZE:
conf_data =
CFG_PCI_CACHE_LINE_SIZE(PCI_NORMAL_HEADER_TYPE,
PCI_NORMAL_LATENCY_TIMER);
break;
case PCI_BAR0_REG:
_rdmsr(GLCP_MSR_REG(GLCP_SOFT_COM), &hi, &lo);
if (lo & SOFT_BAR_EHCI_FLAG) {
conf_data = CS5536_EHCI_RANGE |
PCI_BASE_ADDRESS_SPACE_MEMORY;
lo &= ~SOFT_BAR_EHCI_FLAG;
_wrmsr(GLCP_MSR_REG(GLCP_SOFT_COM), hi, lo);
} else {
_rdmsr(USB_MSR_REG(USB_EHCI), &hi, &lo);
conf_data = lo & 0xfffff000;
}
break;
case PCI_CARDBUS_CIS:
conf_data = PCI_CARDBUS_CIS_POINTER;
break;
case PCI_SUBSYSTEM_VENDOR_ID:
conf_data =
CFG_PCI_VENDOR_ID(CS5536_EHCI_SUB_ID, CS5536_SUB_VENDOR_ID);
break;
case PCI_ROM_ADDRESS:
conf_data = PCI_EXPANSION_ROM_BAR;
break;
case PCI_CAPABILITY_LIST:
conf_data = PCI_CAPLIST_USB_POINTER;
break;
case PCI_INTERRUPT_LINE:
conf_data =
CFG_PCI_INTERRUPT_LINE(PCI_DEFAULT_PIN, CS5536_USB_INTR);
break;
case PCI_EHCI_LEGSMIEN_REG:
_rdmsr(USB_MSR_REG(USB_EHCI), &hi, &lo);
conf_data = (hi & 0x003f0000) >> 16;
break;
case PCI_EHCI_LEGSMISTS_REG:
_rdmsr(USB_MSR_REG(USB_EHCI), &hi, &lo);
conf_data = (hi & 0x3f000000) >> 24;
break;
case PCI_EHCI_FLADJ_REG:
_rdmsr(USB_MSR_REG(USB_EHCI), &hi, &lo);
conf_data = hi & 0x00003f00;
break;
default:
break;
}
return conf_data;
}
| linux-master | arch/mips/loongson2ef/common/cs5536/cs5536_ehci.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* read/write operation to the PCI config space of CS5536
*
* Copyright (C) 2007 Lemote, Inc.
* Author : jlliu, [email protected]
*
* Copyright (C) 2009 Lemote, Inc.
* Author: Wu Zhangjin, [email protected]
*
* the Virtual Support Module(VSM) for virtulizing the PCI
* configure space are defined in cs5536_modulename.c respectively,
*
* after this virtulizing, user can access the PCI configure space
* directly as a normal multi-function PCI device which follows
* the PCI-2.2 spec.
*/
#include <linux/types.h>
#include <cs5536/cs5536_pci.h>
#include <cs5536/cs5536_vsm.h>
enum {
CS5536_FUNC_START = -1,
CS5536_ISA_FUNC,
reserved_func,
CS5536_IDE_FUNC,
CS5536_ACC_FUNC,
CS5536_OHCI_FUNC,
CS5536_EHCI_FUNC,
CS5536_FUNC_END,
};
static const cs5536_pci_vsm_write vsm_conf_write[] = {
[CS5536_ISA_FUNC] = pci_isa_write_reg,
[reserved_func] = NULL,
[CS5536_IDE_FUNC] = pci_ide_write_reg,
[CS5536_ACC_FUNC] = pci_acc_write_reg,
[CS5536_OHCI_FUNC] = pci_ohci_write_reg,
[CS5536_EHCI_FUNC] = pci_ehci_write_reg,
};
static const cs5536_pci_vsm_read vsm_conf_read[] = {
[CS5536_ISA_FUNC] = pci_isa_read_reg,
[reserved_func] = NULL,
[CS5536_IDE_FUNC] = pci_ide_read_reg,
[CS5536_ACC_FUNC] = pci_acc_read_reg,
[CS5536_OHCI_FUNC] = pci_ohci_read_reg,
[CS5536_EHCI_FUNC] = pci_ehci_read_reg,
};
/*
* write to PCI config space and transfer it to MSR write.
*/
void cs5536_pci_conf_write4(int function, int reg, u32 value)
{
if ((function <= CS5536_FUNC_START) || (function >= CS5536_FUNC_END))
return;
if ((reg < 0) || (reg > 0x100) || ((reg & 0x03) != 0))
return;
if (vsm_conf_write[function] != NULL)
vsm_conf_write[function](reg, value);
}
/*
* read PCI config space and transfer it to MSR access.
*/
u32 cs5536_pci_conf_read4(int function, int reg)
{
u32 data = 0;
if ((function <= CS5536_FUNC_START) || (function >= CS5536_FUNC_END))
return 0;
if ((reg < 0) || ((reg & 0x03) != 0))
return 0;
if (reg > 0x100)
return 0xffffffff;
if (vsm_conf_read[function] != NULL)
data = vsm_conf_read[function](reg);
return data;
}
| linux-master | arch/mips/loongson2ef/common/cs5536/cs5536_pci.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* CS5536 General timer functions
*
* Copyright (C) 2007 Lemote Inc. & Institute of Computing Technology
* Author: Yanhua, [email protected]
*
* Copyright (C) 2009 Lemote Inc.
* Author: Wu zhangjin, [email protected]
*
* Reference: AMD Geode(TM) CS5536 Companion Device Data Book
*/
#include <linux/io.h>
#include <linux/init.h>
#include <linux/export.h>
#include <linux/jiffies.h>
#include <linux/spinlock.h>
#include <linux/interrupt.h>
#include <linux/clockchips.h>
#include <asm/time.h>
#include <cs5536/cs5536_mfgpt.h>
static DEFINE_RAW_SPINLOCK(mfgpt_lock);
static u32 mfgpt_base;
/*
* Initialize the MFGPT timer.
*
* This is also called after resume to bring the MFGPT into operation again.
*/
/* disable counter */
void disable_mfgpt0_counter(void)
{
outw(inw(MFGPT0_SETUP) & 0x7fff, MFGPT0_SETUP);
}
EXPORT_SYMBOL(disable_mfgpt0_counter);
/* enable counter, comparator2 to event mode, 14.318MHz clock */
void enable_mfgpt0_counter(void)
{
outw(0xe310, MFGPT0_SETUP);
}
EXPORT_SYMBOL(enable_mfgpt0_counter);
static int mfgpt_timer_set_periodic(struct clock_event_device *evt)
{
raw_spin_lock(&mfgpt_lock);
outw(COMPARE, MFGPT0_CMP2); /* set comparator2 */
outw(0, MFGPT0_CNT); /* set counter to 0 */
enable_mfgpt0_counter();
raw_spin_unlock(&mfgpt_lock);
return 0;
}
static int mfgpt_timer_shutdown(struct clock_event_device *evt)
{
if (clockevent_state_periodic(evt) || clockevent_state_oneshot(evt)) {
raw_spin_lock(&mfgpt_lock);
disable_mfgpt0_counter();
raw_spin_unlock(&mfgpt_lock);
}
return 0;
}
static struct clock_event_device mfgpt_clockevent = {
.name = "mfgpt",
.features = CLOCK_EVT_FEAT_PERIODIC,
/* The oneshot mode have very high deviation, don't use it! */
.set_state_shutdown = mfgpt_timer_shutdown,
.set_state_periodic = mfgpt_timer_set_periodic,
.irq = CS5536_MFGPT_INTR,
};
static irqreturn_t timer_interrupt(int irq, void *dev_id)
{
u32 basehi;
/*
* get MFGPT base address
*
* NOTE: do not remove me, it's need for the value of mfgpt_base is
* variable
*/
_rdmsr(DIVIL_MSR_REG(DIVIL_LBAR_MFGPT), &basehi, &mfgpt_base);
/* ack */
outw(inw(MFGPT0_SETUP) | 0x4000, MFGPT0_SETUP);
mfgpt_clockevent.event_handler(&mfgpt_clockevent);
return IRQ_HANDLED;
}
/*
* Initialize the conversion factor and the min/max deltas of the clock event
* structure and register the clock event source with the framework.
*/
void __init setup_mfgpt0_timer(void)
{
u32 basehi;
struct clock_event_device *cd = &mfgpt_clockevent;
unsigned int cpu = smp_processor_id();
cd->cpumask = cpumask_of(cpu);
clockevent_set_clock(cd, MFGPT_TICK_RATE);
cd->max_delta_ns = clockevent_delta2ns(0xffff, cd);
cd->max_delta_ticks = 0xffff;
cd->min_delta_ns = clockevent_delta2ns(0xf, cd);
cd->min_delta_ticks = 0xf;
/* Enable MFGPT0 Comparator 2 Output to the Interrupt Mapper */
_wrmsr(DIVIL_MSR_REG(MFGPT_IRQ), 0, 0x100);
/* Enable Interrupt Gate 5 */
_wrmsr(DIVIL_MSR_REG(PIC_ZSEL_LOW), 0, 0x50000);
/* get MFGPT base address */
_rdmsr(DIVIL_MSR_REG(DIVIL_LBAR_MFGPT), &basehi, &mfgpt_base);
clockevents_register_device(cd);
if (request_irq(CS5536_MFGPT_INTR, timer_interrupt,
IRQF_NOBALANCING | IRQF_TIMER, "timer", NULL))
pr_err("Failed to register timer interrupt\n");
}
/*
* Since the MFGPT overflows every tick, its not very useful
* to just read by itself. So use jiffies to emulate a free
* running counter:
*/
static u64 mfgpt_read(struct clocksource *cs)
{
unsigned long flags;
int count;
u32 jifs;
static int old_count;
static u32 old_jifs;
raw_spin_lock_irqsave(&mfgpt_lock, flags);
/*
* Although our caller may have the read side of xtime_lock,
* this is now a seqlock, and we are cheating in this routine
* by having side effects on state that we cannot undo if
* there is a collision on the seqlock and our caller has to
* retry. (Namely, old_jifs and old_count.) So we must treat
* jiffies as volatile despite the lock. We read jiffies
* before latching the timer count to guarantee that although
* the jiffies value might be older than the count (that is,
* the counter may underflow between the last point where
* jiffies was incremented and the point where we latch the
* count), it cannot be newer.
*/
jifs = jiffies;
/* read the count */
count = inw(MFGPT0_CNT);
/*
* It's possible for count to appear to go the wrong way for this
* reason:
*
* The timer counter underflows, but we haven't handled the resulting
* interrupt and incremented jiffies yet.
*
* Previous attempts to handle these cases intelligently were buggy, so
* we just do the simple thing now.
*/
if (count < old_count && jifs == old_jifs)
count = old_count;
old_count = count;
old_jifs = jifs;
raw_spin_unlock_irqrestore(&mfgpt_lock, flags);
return (u64) (jifs * COMPARE) + count;
}
static struct clocksource clocksource_mfgpt = {
.name = "mfgpt",
.rating = 120, /* Functional for real use, but not desired */
.read = mfgpt_read,
.mask = CLOCKSOURCE_MASK(32),
};
int __init init_mfgpt_clocksource(void)
{
if (num_possible_cpus() > 1) /* MFGPT does not scale! */
return 0;
return clocksource_register_hz(&clocksource_mfgpt, MFGPT_TICK_RATE);
}
arch_initcall(init_mfgpt_clocksource);
| linux-master | arch/mips/loongson2ef/common/cs5536/cs5536_mfgpt.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* the ACC Virtual Support Module of AMD CS5536
*
* Copyright (C) 2007 Lemote, Inc.
* Author : jlliu, [email protected]
*
* Copyright (C) 2009 Lemote, Inc.
* Author: Wu Zhangjin, [email protected]
*/
#include <cs5536/cs5536.h>
#include <cs5536/cs5536_pci.h>
void pci_acc_write_reg(int reg, u32 value)
{
u32 hi = 0, lo = value;
switch (reg) {
case PCI_COMMAND:
_rdmsr(GLIU_MSR_REG(GLIU_PAE), &hi, &lo);
if (value & PCI_COMMAND_MASTER)
lo |= (0x03 << 8);
else
lo &= ~(0x03 << 8);
_wrmsr(GLIU_MSR_REG(GLIU_PAE), hi, lo);
break;
case PCI_STATUS:
if (value & PCI_STATUS_PARITY) {
_rdmsr(SB_MSR_REG(SB_ERROR), &hi, &lo);
if (lo & SB_PARE_ERR_FLAG) {
lo = (lo & 0x0000ffff) | SB_PARE_ERR_FLAG;
_wrmsr(SB_MSR_REG(SB_ERROR), hi, lo);
}
}
break;
case PCI_BAR0_REG:
if (value == PCI_BAR_RANGE_MASK) {
_rdmsr(GLCP_MSR_REG(GLCP_SOFT_COM), &hi, &lo);
lo |= SOFT_BAR_ACC_FLAG;
_wrmsr(GLCP_MSR_REG(GLCP_SOFT_COM), hi, lo);
} else if (value & 0x01) {
value &= 0xfffffffc;
hi = 0xA0000000 | ((value & 0x000ff000) >> 12);
lo = 0x000fff80 | ((value & 0x00000fff) << 20);
_wrmsr(GLIU_MSR_REG(GLIU_IOD_BM1), hi, lo);
}
break;
case PCI_ACC_INT_REG:
_rdmsr(DIVIL_MSR_REG(PIC_YSEL_LOW), &hi, &lo);
/* disable all the usb interrupt in PIC */
lo &= ~(0xf << PIC_YSEL_LOW_ACC_SHIFT);
if (value) /* enable all the acc interrupt in PIC */
lo |= (CS5536_ACC_INTR << PIC_YSEL_LOW_ACC_SHIFT);
_wrmsr(DIVIL_MSR_REG(PIC_YSEL_LOW), hi, lo);
break;
default:
break;
}
}
u32 pci_acc_read_reg(int reg)
{
u32 hi, lo;
u32 conf_data = 0;
switch (reg) {
case PCI_VENDOR_ID:
conf_data =
CFG_PCI_VENDOR_ID(CS5536_ACC_DEVICE_ID, CS5536_VENDOR_ID);
break;
case PCI_COMMAND:
_rdmsr(GLIU_MSR_REG(GLIU_IOD_BM1), &hi, &lo);
if (((lo & 0xfff00000) || (hi & 0x000000ff))
&& ((hi & 0xf0000000) == 0xa0000000))
conf_data |= PCI_COMMAND_IO;
_rdmsr(GLIU_MSR_REG(GLIU_PAE), &hi, &lo);
if ((lo & 0x300) == 0x300)
conf_data |= PCI_COMMAND_MASTER;
break;
case PCI_STATUS:
conf_data |= PCI_STATUS_66MHZ;
conf_data |= PCI_STATUS_FAST_BACK;
_rdmsr(SB_MSR_REG(SB_ERROR), &hi, &lo);
if (lo & SB_PARE_ERR_FLAG)
conf_data |= PCI_STATUS_PARITY;
conf_data |= PCI_STATUS_DEVSEL_MEDIUM;
break;
case PCI_CLASS_REVISION:
_rdmsr(ACC_MSR_REG(ACC_CAP), &hi, &lo);
conf_data = lo & 0x000000ff;
conf_data |= (CS5536_ACC_CLASS_CODE << 8);
break;
case PCI_CACHE_LINE_SIZE:
conf_data =
CFG_PCI_CACHE_LINE_SIZE(PCI_NORMAL_HEADER_TYPE,
PCI_NORMAL_LATENCY_TIMER);
break;
case PCI_BAR0_REG:
_rdmsr(GLCP_MSR_REG(GLCP_SOFT_COM), &hi, &lo);
if (lo & SOFT_BAR_ACC_FLAG) {
conf_data = CS5536_ACC_RANGE |
PCI_BASE_ADDRESS_SPACE_IO;
lo &= ~SOFT_BAR_ACC_FLAG;
_wrmsr(GLCP_MSR_REG(GLCP_SOFT_COM), hi, lo);
} else {
_rdmsr(GLIU_MSR_REG(GLIU_IOD_BM1), &hi, &lo);
conf_data = (hi & 0x000000ff) << 12;
conf_data |= (lo & 0xfff00000) >> 20;
conf_data |= 0x01;
conf_data &= ~0x02;
}
break;
case PCI_CARDBUS_CIS:
conf_data = PCI_CARDBUS_CIS_POINTER;
break;
case PCI_SUBSYSTEM_VENDOR_ID:
conf_data =
CFG_PCI_VENDOR_ID(CS5536_ACC_SUB_ID, CS5536_SUB_VENDOR_ID);
break;
case PCI_ROM_ADDRESS:
conf_data = PCI_EXPANSION_ROM_BAR;
break;
case PCI_CAPABILITY_LIST:
conf_data = PCI_CAPLIST_USB_POINTER;
break;
case PCI_INTERRUPT_LINE:
conf_data =
CFG_PCI_INTERRUPT_LINE(PCI_DEFAULT_PIN, CS5536_ACC_INTR);
break;
default:
break;
}
return conf_data;
}
| linux-master | arch/mips/loongson2ef/common/cs5536/cs5536_acc.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
*
* Copyright (C) 2014 Broadcom Corporation
* Author: Kevin Cernekee <[email protected]>
*/
#include <linux/of.h>
#include <linux/irqchip.h>
#include <asm/bmips.h>
#include <asm/irq.h>
#include <asm/irq_cpu.h>
#include <asm/time.h>
static const struct of_device_id smp_intc_dt_match[] = {
{ .compatible = "brcm,bcm7038-l1-intc" },
{ .compatible = "brcm,bcm6345-l1-intc" },
{}
};
unsigned int get_c0_compare_int(void)
{
return CP0_LEGACY_COMPARE_IRQ;
}
void __init arch_init_irq(void)
{
struct device_node *dn;
/* Only these controllers support SMP IRQ affinity */
dn = of_find_matching_node(NULL, smp_intc_dt_match);
if (dn)
of_node_put(dn);
else
bmips_tp1_irqs = 0;
irqchip_init();
}
IRQCHIP_DECLARE(mips_cpu_intc, "mti,cpu-interrupt-controller",
mips_cpu_irq_of_init);
| linux-master | arch/mips/bmips/irq.c |
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2008 Maxime Bizon <[email protected]>
* Copyright (C) 2014 Kevin Cernekee <[email protected]>
*/
#include <linux/init.h>
#include <linux/bitops.h>
#include <linux/memblock.h>
#include <linux/ioport.h>
#include <linux/kernel.h>
#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_clk.h>
#include <linux/of_fdt.h>
#include <linux/libfdt.h>
#include <linux/smp.h>
#include <asm/addrspace.h>
#include <asm/bmips.h>
#include <asm/bootinfo.h>
#include <asm/cpu-type.h>
#include <asm/mipsregs.h>
#include <asm/prom.h>
#include <asm/smp-ops.h>
#include <asm/time.h>
#include <asm/traps.h>
#include <asm/fw/cfe/cfe_api.h>
#define RELO_NORMAL_VEC BIT(18)
#define REG_BCM6328_OTP ((void __iomem *)CKSEG1ADDR(0x1000062c))
#define BCM6328_TP1_DISABLED BIT(9)
extern bool bmips_rac_flush_disable;
static const unsigned long kbase = VMLINUX_LOAD_ADDRESS & 0xfff00000;
struct bmips_quirk {
const char *compatible;
void (*quirk_fn)(void);
};
static void kbase_setup(void)
{
__raw_writel(kbase | RELO_NORMAL_VEC,
BMIPS_GET_CBR() + BMIPS_RELO_VECTOR_CONTROL_1);
ebase = kbase;
}
static void bcm3384_viper_quirks(void)
{
/*
* Some experimental CM boxes are set up to let CM own the Viper TP0
* and let Linux own TP1. This requires moving the kernel
* load address to a non-conflicting region (e.g. via
* CONFIG_PHYSICAL_START) and supplying an alternate DTB.
* If we detect this condition, we need to move the MIPS exception
* vectors up to an area that we own.
*
* This is distinct from the OTHER special case mentioned in
* smp-bmips.c (boot on TP1, but enable SMP, then TP0 becomes our
* logical CPU#1). For the Viper TP1 case, SMP is off limits.
*
* Also note that many BMIPS435x CPUs do not have a
* BMIPS_RELO_VECTOR_CONTROL_1 register, so it isn't safe to just
* write VMLINUX_LOAD_ADDRESS into that register on every SoC.
*/
board_ebase_setup = &kbase_setup;
bmips_smp_enabled = 0;
}
static void bcm63xx_fixup_cpu1(void)
{
/*
* The bootloader has set up the CPU1 reset vector at
* 0xa000_0200.
* This conflicts with the special interrupt vector (IV).
* The bootloader has also set up CPU1 to respond to the wrong
* IPI interrupt.
* Here we will start up CPU1 in the background and ask it to
* reconfigure itself then go back to sleep.
*/
memcpy((void *)0xa0000200, &bmips_smp_movevec, 0x20);
__sync();
set_c0_cause(C_SW0);
cpumask_set_cpu(1, &bmips_booted_mask);
}
static void bcm6328_quirks(void)
{
/* Check CPU1 status in OTP (it is usually disabled) */
if (__raw_readl(REG_BCM6328_OTP) & BCM6328_TP1_DISABLED)
bmips_smp_enabled = 0;
else
bcm63xx_fixup_cpu1();
}
static void bcm6358_quirks(void)
{
/*
* BCM3368/BCM6358 need special handling for their shared TLB, so
* disable SMP for now
*/
bmips_smp_enabled = 0;
/*
* RAC flush causes kernel panics on BCM6358 when booting from TP1
* because the bootloader is not initializing it properly.
*/
bmips_rac_flush_disable = !!(read_c0_brcm_cmt_local() & (1 << 31));
}
static void bcm6368_quirks(void)
{
bcm63xx_fixup_cpu1();
}
static const struct bmips_quirk bmips_quirk_list[] = {
{ "brcm,bcm3368", &bcm6358_quirks },
{ "brcm,bcm3384-viper", &bcm3384_viper_quirks },
{ "brcm,bcm33843-viper", &bcm3384_viper_quirks },
{ "brcm,bcm6328", &bcm6328_quirks },
{ "brcm,bcm6358", &bcm6358_quirks },
{ "brcm,bcm6362", &bcm6368_quirks },
{ "brcm,bcm6368", &bcm6368_quirks },
{ "brcm,bcm63168", &bcm6368_quirks },
{ "brcm,bcm63268", &bcm6368_quirks },
{ },
};
static void __init bmips_init_cfe(void)
{
cfe_seal = fw_arg3;
if (cfe_seal != CFE_EPTSEAL)
return;
cfe_init(fw_arg0, fw_arg2);
}
void __init prom_init(void)
{
bmips_init_cfe();
bmips_cpu_setup();
register_bmips_smp_ops();
}
const char *get_system_type(void)
{
return "Generic BMIPS kernel";
}
void __init plat_time_init(void)
{
struct device_node *np;
u32 freq;
np = of_find_node_by_name(NULL, "cpus");
if (!np)
panic("missing 'cpus' DT node");
if (of_property_read_u32(np, "mips-hpt-frequency", &freq) < 0)
panic("missing 'mips-hpt-frequency' property");
of_node_put(np);
mips_hpt_frequency = freq;
}
void __init plat_mem_setup(void)
{
void *dtb;
const struct bmips_quirk *q;
set_io_port_base(0);
ioport_resource.start = 0;
ioport_resource.end = ~0;
/*
* intended to somewhat resemble ARM; see
* Documentation/arch/arm/booting.rst
*/
if (fw_arg0 == 0 && fw_arg1 == 0xffffffff)
dtb = phys_to_virt(fw_arg2);
else
dtb = get_fdt();
if (!dtb)
cfe_die("no dtb found");
__dt_setup_arch(dtb);
for (q = bmips_quirk_list; q->quirk_fn; q++) {
if (of_flat_dt_is_compatible(of_get_flat_dt_root(),
q->compatible)) {
q->quirk_fn();
}
}
}
void __init device_tree_init(void)
{
struct device_node *np;
unflatten_and_copy_device_tree();
/* Disable SMP boot unless both CPUs are listed in DT and !disabled */
np = of_find_node_by_name(NULL, "cpus");
if (np && of_get_available_child_count(np) <= 1)
bmips_smp_enabled = 0;
of_node_put(np);
}
static int __init plat_dev_init(void)
{
of_clk_init(NULL);
return 0;
}
arch_initcall(plat_dev_init);
| linux-master | arch/mips/bmips/setup.c |
// SPDX-License-Identifier: GPL-2.0+
#include <linux/types.h>
#include <linux/dma-map-ops.h>
#include <asm/bmips.h>
#include <asm/io.h>
bool bmips_rac_flush_disable;
void arch_sync_dma_for_cpu_all(void)
{
void __iomem *cbr = BMIPS_GET_CBR();
u32 cfg;
if (boot_cpu_type() != CPU_BMIPS3300 &&
boot_cpu_type() != CPU_BMIPS4350 &&
boot_cpu_type() != CPU_BMIPS4380)
return;
if (unlikely(bmips_rac_flush_disable))
return;
/* Flush stale data out of the readahead cache */
cfg = __raw_readl(cbr + BMIPS_RAC_CONFIG);
__raw_writel(cfg | 0x100, cbr + BMIPS_RAC_CONFIG);
__raw_readl(cbr + BMIPS_RAC_CONFIG);
}
| linux-master | arch/mips/bmips/dma.c |
// SPDX-License-Identifier: GPL-2.0
/*
* MIPS accelerated ChaCha and XChaCha stream ciphers,
* including ChaCha20 (RFC7539)
*
* Copyright (C) 2019 Linaro, Ltd. <[email protected]>
*/
#include <asm/byteorder.h>
#include <crypto/algapi.h>
#include <crypto/internal/chacha.h>
#include <crypto/internal/skcipher.h>
#include <linux/kernel.h>
#include <linux/module.h>
asmlinkage void chacha_crypt_arch(u32 *state, u8 *dst, const u8 *src,
unsigned int bytes, int nrounds);
EXPORT_SYMBOL(chacha_crypt_arch);
asmlinkage void hchacha_block_arch(const u32 *state, u32 *stream, int nrounds);
EXPORT_SYMBOL(hchacha_block_arch);
void chacha_init_arch(u32 *state, const u32 *key, const u8 *iv)
{
chacha_init_generic(state, key, iv);
}
EXPORT_SYMBOL(chacha_init_arch);
static int chacha_mips_stream_xor(struct skcipher_request *req,
const struct chacha_ctx *ctx, const u8 *iv)
{
struct skcipher_walk walk;
u32 state[16];
int err;
err = skcipher_walk_virt(&walk, req, false);
chacha_init_generic(state, ctx->key, iv);
while (walk.nbytes > 0) {
unsigned int nbytes = walk.nbytes;
if (nbytes < walk.total)
nbytes = round_down(nbytes, walk.stride);
chacha_crypt(state, walk.dst.virt.addr, walk.src.virt.addr,
nbytes, ctx->nrounds);
err = skcipher_walk_done(&walk, walk.nbytes - nbytes);
}
return err;
}
static int chacha_mips(struct skcipher_request *req)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm);
return chacha_mips_stream_xor(req, ctx, req->iv);
}
static int xchacha_mips(struct skcipher_request *req)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm);
struct chacha_ctx subctx;
u32 state[16];
u8 real_iv[16];
chacha_init_generic(state, ctx->key, req->iv);
hchacha_block(state, subctx.key, ctx->nrounds);
subctx.nrounds = ctx->nrounds;
memcpy(&real_iv[0], req->iv + 24, 8);
memcpy(&real_iv[8], req->iv + 16, 8);
return chacha_mips_stream_xor(req, &subctx, real_iv);
}
static struct skcipher_alg algs[] = {
{
.base.cra_name = "chacha20",
.base.cra_driver_name = "chacha20-mips",
.base.cra_priority = 200,
.base.cra_blocksize = 1,
.base.cra_ctxsize = sizeof(struct chacha_ctx),
.base.cra_module = THIS_MODULE,
.min_keysize = CHACHA_KEY_SIZE,
.max_keysize = CHACHA_KEY_SIZE,
.ivsize = CHACHA_IV_SIZE,
.chunksize = CHACHA_BLOCK_SIZE,
.setkey = chacha20_setkey,
.encrypt = chacha_mips,
.decrypt = chacha_mips,
}, {
.base.cra_name = "xchacha20",
.base.cra_driver_name = "xchacha20-mips",
.base.cra_priority = 200,
.base.cra_blocksize = 1,
.base.cra_ctxsize = sizeof(struct chacha_ctx),
.base.cra_module = THIS_MODULE,
.min_keysize = CHACHA_KEY_SIZE,
.max_keysize = CHACHA_KEY_SIZE,
.ivsize = XCHACHA_IV_SIZE,
.chunksize = CHACHA_BLOCK_SIZE,
.setkey = chacha20_setkey,
.encrypt = xchacha_mips,
.decrypt = xchacha_mips,
}, {
.base.cra_name = "xchacha12",
.base.cra_driver_name = "xchacha12-mips",
.base.cra_priority = 200,
.base.cra_blocksize = 1,
.base.cra_ctxsize = sizeof(struct chacha_ctx),
.base.cra_module = THIS_MODULE,
.min_keysize = CHACHA_KEY_SIZE,
.max_keysize = CHACHA_KEY_SIZE,
.ivsize = XCHACHA_IV_SIZE,
.chunksize = CHACHA_BLOCK_SIZE,
.setkey = chacha12_setkey,
.encrypt = xchacha_mips,
.decrypt = xchacha_mips,
}
};
static int __init chacha_simd_mod_init(void)
{
return IS_REACHABLE(CONFIG_CRYPTO_SKCIPHER) ?
crypto_register_skciphers(algs, ARRAY_SIZE(algs)) : 0;
}
static void __exit chacha_simd_mod_fini(void)
{
if (IS_REACHABLE(CONFIG_CRYPTO_SKCIPHER))
crypto_unregister_skciphers(algs, ARRAY_SIZE(algs));
}
module_init(chacha_simd_mod_init);
module_exit(chacha_simd_mod_fini);
MODULE_DESCRIPTION("ChaCha and XChaCha stream ciphers (MIPS accelerated)");
MODULE_AUTHOR("Ard Biesheuvel <[email protected]>");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS_CRYPTO("chacha20");
MODULE_ALIAS_CRYPTO("chacha20-mips");
MODULE_ALIAS_CRYPTO("xchacha20");
MODULE_ALIAS_CRYPTO("xchacha20-mips");
MODULE_ALIAS_CRYPTO("xchacha12");
MODULE_ALIAS_CRYPTO("xchacha12-mips");
| linux-master | arch/mips/crypto/chacha-glue.c |
// SPDX-License-Identifier: GPL-2.0
/*
* OpenSSL/Cryptogams accelerated Poly1305 transform for MIPS
*
* Copyright (C) 2019 Linaro Ltd. <[email protected]>
*/
#include <asm/unaligned.h>
#include <crypto/algapi.h>
#include <crypto/internal/hash.h>
#include <crypto/internal/poly1305.h>
#include <linux/cpufeature.h>
#include <linux/crypto.h>
#include <linux/module.h>
asmlinkage void poly1305_init_mips(void *state, const u8 *key);
asmlinkage void poly1305_blocks_mips(void *state, const u8 *src, u32 len, u32 hibit);
asmlinkage void poly1305_emit_mips(void *state, u8 *digest, const u32 *nonce);
void poly1305_init_arch(struct poly1305_desc_ctx *dctx, const u8 key[POLY1305_KEY_SIZE])
{
poly1305_init_mips(&dctx->h, key);
dctx->s[0] = get_unaligned_le32(key + 16);
dctx->s[1] = get_unaligned_le32(key + 20);
dctx->s[2] = get_unaligned_le32(key + 24);
dctx->s[3] = get_unaligned_le32(key + 28);
dctx->buflen = 0;
}
EXPORT_SYMBOL(poly1305_init_arch);
static int mips_poly1305_init(struct shash_desc *desc)
{
struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc);
dctx->buflen = 0;
dctx->rset = 0;
dctx->sset = false;
return 0;
}
static void mips_poly1305_blocks(struct poly1305_desc_ctx *dctx, const u8 *src,
u32 len, u32 hibit)
{
if (unlikely(!dctx->sset)) {
if (!dctx->rset) {
poly1305_init_mips(&dctx->h, src);
src += POLY1305_BLOCK_SIZE;
len -= POLY1305_BLOCK_SIZE;
dctx->rset = 1;
}
if (len >= POLY1305_BLOCK_SIZE) {
dctx->s[0] = get_unaligned_le32(src + 0);
dctx->s[1] = get_unaligned_le32(src + 4);
dctx->s[2] = get_unaligned_le32(src + 8);
dctx->s[3] = get_unaligned_le32(src + 12);
src += POLY1305_BLOCK_SIZE;
len -= POLY1305_BLOCK_SIZE;
dctx->sset = true;
}
if (len < POLY1305_BLOCK_SIZE)
return;
}
len &= ~(POLY1305_BLOCK_SIZE - 1);
poly1305_blocks_mips(&dctx->h, src, len, hibit);
}
static int mips_poly1305_update(struct shash_desc *desc, const u8 *src,
unsigned int len)
{
struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc);
if (unlikely(dctx->buflen)) {
u32 bytes = min(len, POLY1305_BLOCK_SIZE - dctx->buflen);
memcpy(dctx->buf + dctx->buflen, src, bytes);
src += bytes;
len -= bytes;
dctx->buflen += bytes;
if (dctx->buflen == POLY1305_BLOCK_SIZE) {
mips_poly1305_blocks(dctx, dctx->buf, POLY1305_BLOCK_SIZE, 1);
dctx->buflen = 0;
}
}
if (likely(len >= POLY1305_BLOCK_SIZE)) {
mips_poly1305_blocks(dctx, src, len, 1);
src += round_down(len, POLY1305_BLOCK_SIZE);
len %= POLY1305_BLOCK_SIZE;
}
if (unlikely(len)) {
dctx->buflen = len;
memcpy(dctx->buf, src, len);
}
return 0;
}
void poly1305_update_arch(struct poly1305_desc_ctx *dctx, const u8 *src,
unsigned int nbytes)
{
if (unlikely(dctx->buflen)) {
u32 bytes = min(nbytes, POLY1305_BLOCK_SIZE - dctx->buflen);
memcpy(dctx->buf + dctx->buflen, src, bytes);
src += bytes;
nbytes -= bytes;
dctx->buflen += bytes;
if (dctx->buflen == POLY1305_BLOCK_SIZE) {
poly1305_blocks_mips(&dctx->h, dctx->buf,
POLY1305_BLOCK_SIZE, 1);
dctx->buflen = 0;
}
}
if (likely(nbytes >= POLY1305_BLOCK_SIZE)) {
unsigned int len = round_down(nbytes, POLY1305_BLOCK_SIZE);
poly1305_blocks_mips(&dctx->h, src, len, 1);
src += len;
nbytes %= POLY1305_BLOCK_SIZE;
}
if (unlikely(nbytes)) {
dctx->buflen = nbytes;
memcpy(dctx->buf, src, nbytes);
}
}
EXPORT_SYMBOL(poly1305_update_arch);
void poly1305_final_arch(struct poly1305_desc_ctx *dctx, u8 *dst)
{
if (unlikely(dctx->buflen)) {
dctx->buf[dctx->buflen++] = 1;
memset(dctx->buf + dctx->buflen, 0,
POLY1305_BLOCK_SIZE - dctx->buflen);
poly1305_blocks_mips(&dctx->h, dctx->buf, POLY1305_BLOCK_SIZE, 0);
}
poly1305_emit_mips(&dctx->h, dst, dctx->s);
*dctx = (struct poly1305_desc_ctx){};
}
EXPORT_SYMBOL(poly1305_final_arch);
static int mips_poly1305_final(struct shash_desc *desc, u8 *dst)
{
struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc);
if (unlikely(!dctx->sset))
return -ENOKEY;
poly1305_final_arch(dctx, dst);
return 0;
}
static struct shash_alg mips_poly1305_alg = {
.init = mips_poly1305_init,
.update = mips_poly1305_update,
.final = mips_poly1305_final,
.digestsize = POLY1305_DIGEST_SIZE,
.descsize = sizeof(struct poly1305_desc_ctx),
.base.cra_name = "poly1305",
.base.cra_driver_name = "poly1305-mips",
.base.cra_priority = 200,
.base.cra_blocksize = POLY1305_BLOCK_SIZE,
.base.cra_module = THIS_MODULE,
};
static int __init mips_poly1305_mod_init(void)
{
return IS_REACHABLE(CONFIG_CRYPTO_HASH) ?
crypto_register_shash(&mips_poly1305_alg) : 0;
}
static void __exit mips_poly1305_mod_exit(void)
{
if (IS_REACHABLE(CONFIG_CRYPTO_HASH))
crypto_unregister_shash(&mips_poly1305_alg);
}
module_init(mips_poly1305_mod_init);
module_exit(mips_poly1305_mod_exit);
MODULE_LICENSE("GPL v2");
MODULE_ALIAS_CRYPTO("poly1305");
MODULE_ALIAS_CRYPTO("poly1305-mips");
| linux-master | arch/mips/crypto/poly1305-glue.c |
// SPDX-License-Identifier: GPL-2.0
/*
* crc32-mips.c - CRC32 and CRC32C using optional MIPSr6 instructions
*
* Module based on arm64/crypto/crc32-arm.c
*
* Copyright (C) 2014 Linaro Ltd <[email protected]>
* Copyright (C) 2018 MIPS Tech, LLC
*/
#include <linux/cpufeature.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/string.h>
#include <asm/mipsregs.h>
#include <asm/unaligned.h>
#include <crypto/internal/hash.h>
enum crc_op_size {
b, h, w, d,
};
enum crc_type {
crc32,
crc32c,
};
#ifndef TOOLCHAIN_SUPPORTS_CRC
#define _ASM_SET_CRC(OP, SZ, TYPE) \
_ASM_MACRO_3R(OP, rt, rs, rt2, \
".ifnc \\rt, \\rt2\n\t" \
".error \"invalid operands \\\"" #OP " \\rt,\\rs,\\rt2\\\"\"\n\t" \
".endif\n\t" \
_ASM_INSN_IF_MIPS(0x7c00000f | (__rt << 16) | (__rs << 21) | \
((SZ) << 6) | ((TYPE) << 8)) \
_ASM_INSN32_IF_MM(0x00000030 | (__rs << 16) | (__rt << 21) | \
((SZ) << 14) | ((TYPE) << 3)))
#define _ASM_UNSET_CRC(op, SZ, TYPE) ".purgem " #op "\n\t"
#else /* !TOOLCHAIN_SUPPORTS_CRC */
#define _ASM_SET_CRC(op, SZ, TYPE) ".set\tcrc\n\t"
#define _ASM_UNSET_CRC(op, SZ, TYPE)
#endif
#define __CRC32(crc, value, op, SZ, TYPE) \
do { \
__asm__ __volatile__( \
".set push\n\t" \
_ASM_SET_CRC(op, SZ, TYPE) \
#op " %0, %1, %0\n\t" \
_ASM_UNSET_CRC(op, SZ, TYPE) \
".set pop" \
: "+r" (crc) \
: "r" (value)); \
} while (0)
#define _CRC32_crc32b(crc, value) __CRC32(crc, value, crc32b, 0, 0)
#define _CRC32_crc32h(crc, value) __CRC32(crc, value, crc32h, 1, 0)
#define _CRC32_crc32w(crc, value) __CRC32(crc, value, crc32w, 2, 0)
#define _CRC32_crc32d(crc, value) __CRC32(crc, value, crc32d, 3, 0)
#define _CRC32_crc32cb(crc, value) __CRC32(crc, value, crc32cb, 0, 1)
#define _CRC32_crc32ch(crc, value) __CRC32(crc, value, crc32ch, 1, 1)
#define _CRC32_crc32cw(crc, value) __CRC32(crc, value, crc32cw, 2, 1)
#define _CRC32_crc32cd(crc, value) __CRC32(crc, value, crc32cd, 3, 1)
#define _CRC32(crc, value, size, op) \
_CRC32_##op##size(crc, value)
#define CRC32(crc, value, size) \
_CRC32(crc, value, size, crc32)
#define CRC32C(crc, value, size) \
_CRC32(crc, value, size, crc32c)
static u32 crc32_mips_le_hw(u32 crc_, const u8 *p, unsigned int len)
{
u32 crc = crc_;
#ifdef CONFIG_64BIT
while (len >= sizeof(u64)) {
u64 value = get_unaligned_le64(p);
CRC32(crc, value, d);
p += sizeof(u64);
len -= sizeof(u64);
}
if (len & sizeof(u32)) {
#else /* !CONFIG_64BIT */
while (len >= sizeof(u32)) {
#endif
u32 value = get_unaligned_le32(p);
CRC32(crc, value, w);
p += sizeof(u32);
len -= sizeof(u32);
}
if (len & sizeof(u16)) {
u16 value = get_unaligned_le16(p);
CRC32(crc, value, h);
p += sizeof(u16);
}
if (len & sizeof(u8)) {
u8 value = *p++;
CRC32(crc, value, b);
}
return crc;
}
static u32 crc32c_mips_le_hw(u32 crc_, const u8 *p, unsigned int len)
{
u32 crc = crc_;
#ifdef CONFIG_64BIT
while (len >= sizeof(u64)) {
u64 value = get_unaligned_le64(p);
CRC32C(crc, value, d);
p += sizeof(u64);
len -= sizeof(u64);
}
if (len & sizeof(u32)) {
#else /* !CONFIG_64BIT */
while (len >= sizeof(u32)) {
#endif
u32 value = get_unaligned_le32(p);
CRC32C(crc, value, w);
p += sizeof(u32);
len -= sizeof(u32);
}
if (len & sizeof(u16)) {
u16 value = get_unaligned_le16(p);
CRC32C(crc, value, h);
p += sizeof(u16);
}
if (len & sizeof(u8)) {
u8 value = *p++;
CRC32C(crc, value, b);
}
return crc;
}
#define CHKSUM_BLOCK_SIZE 1
#define CHKSUM_DIGEST_SIZE 4
struct chksum_ctx {
u32 key;
};
struct chksum_desc_ctx {
u32 crc;
};
static int chksum_init(struct shash_desc *desc)
{
struct chksum_ctx *mctx = crypto_shash_ctx(desc->tfm);
struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
ctx->crc = mctx->key;
return 0;
}
/*
* Setting the seed allows arbitrary accumulators and flexible XOR policy
* If your algorithm starts with ~0, then XOR with ~0 before you set
* the seed.
*/
static int chksum_setkey(struct crypto_shash *tfm, const u8 *key,
unsigned int keylen)
{
struct chksum_ctx *mctx = crypto_shash_ctx(tfm);
if (keylen != sizeof(mctx->key))
return -EINVAL;
mctx->key = get_unaligned_le32(key);
return 0;
}
static int chksum_update(struct shash_desc *desc, const u8 *data,
unsigned int length)
{
struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
ctx->crc = crc32_mips_le_hw(ctx->crc, data, length);
return 0;
}
static int chksumc_update(struct shash_desc *desc, const u8 *data,
unsigned int length)
{
struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
ctx->crc = crc32c_mips_le_hw(ctx->crc, data, length);
return 0;
}
static int chksum_final(struct shash_desc *desc, u8 *out)
{
struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
put_unaligned_le32(ctx->crc, out);
return 0;
}
static int chksumc_final(struct shash_desc *desc, u8 *out)
{
struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
put_unaligned_le32(~ctx->crc, out);
return 0;
}
static int __chksum_finup(u32 crc, const u8 *data, unsigned int len, u8 *out)
{
put_unaligned_le32(crc32_mips_le_hw(crc, data, len), out);
return 0;
}
static int __chksumc_finup(u32 crc, const u8 *data, unsigned int len, u8 *out)
{
put_unaligned_le32(~crc32c_mips_le_hw(crc, data, len), out);
return 0;
}
static int chksum_finup(struct shash_desc *desc, const u8 *data,
unsigned int len, u8 *out)
{
struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
return __chksum_finup(ctx->crc, data, len, out);
}
static int chksumc_finup(struct shash_desc *desc, const u8 *data,
unsigned int len, u8 *out)
{
struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
return __chksumc_finup(ctx->crc, data, len, out);
}
static int chksum_digest(struct shash_desc *desc, const u8 *data,
unsigned int length, u8 *out)
{
struct chksum_ctx *mctx = crypto_shash_ctx(desc->tfm);
return __chksum_finup(mctx->key, data, length, out);
}
static int chksumc_digest(struct shash_desc *desc, const u8 *data,
unsigned int length, u8 *out)
{
struct chksum_ctx *mctx = crypto_shash_ctx(desc->tfm);
return __chksumc_finup(mctx->key, data, length, out);
}
static int chksum_cra_init(struct crypto_tfm *tfm)
{
struct chksum_ctx *mctx = crypto_tfm_ctx(tfm);
mctx->key = ~0;
return 0;
}
static struct shash_alg crc32_alg = {
.digestsize = CHKSUM_DIGEST_SIZE,
.setkey = chksum_setkey,
.init = chksum_init,
.update = chksum_update,
.final = chksum_final,
.finup = chksum_finup,
.digest = chksum_digest,
.descsize = sizeof(struct chksum_desc_ctx),
.base = {
.cra_name = "crc32",
.cra_driver_name = "crc32-mips-hw",
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_OPTIONAL_KEY,
.cra_blocksize = CHKSUM_BLOCK_SIZE,
.cra_alignmask = 0,
.cra_ctxsize = sizeof(struct chksum_ctx),
.cra_module = THIS_MODULE,
.cra_init = chksum_cra_init,
}
};
static struct shash_alg crc32c_alg = {
.digestsize = CHKSUM_DIGEST_SIZE,
.setkey = chksum_setkey,
.init = chksum_init,
.update = chksumc_update,
.final = chksumc_final,
.finup = chksumc_finup,
.digest = chksumc_digest,
.descsize = sizeof(struct chksum_desc_ctx),
.base = {
.cra_name = "crc32c",
.cra_driver_name = "crc32c-mips-hw",
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_OPTIONAL_KEY,
.cra_blocksize = CHKSUM_BLOCK_SIZE,
.cra_alignmask = 0,
.cra_ctxsize = sizeof(struct chksum_ctx),
.cra_module = THIS_MODULE,
.cra_init = chksum_cra_init,
}
};
static int __init crc32_mod_init(void)
{
int err;
err = crypto_register_shash(&crc32_alg);
if (err)
return err;
err = crypto_register_shash(&crc32c_alg);
if (err) {
crypto_unregister_shash(&crc32_alg);
return err;
}
return 0;
}
static void __exit crc32_mod_exit(void)
{
crypto_unregister_shash(&crc32_alg);
crypto_unregister_shash(&crc32c_alg);
}
MODULE_AUTHOR("Marcin Nowakowski <[email protected]");
MODULE_DESCRIPTION("CRC32 and CRC32C using optional MIPS instructions");
MODULE_LICENSE("GPL v2");
module_cpu_feature_match(MIPS_CRC32, crc32_mod_init);
module_exit(crc32_mod_exit);
| linux-master | arch/mips/crypto/crc32-mips.c |
/*
*
* BRIEF MODULE DESCRIPTION
* Board specific pci fixups for the Toshiba rbtx4927
*
* Copyright 2001 MontaVista Software Inc.
* Author: MontaVista Software, Inc.
* [email protected] or [email protected]
*
* Copyright (C) 2000-2001 Toshiba Corporation
*
* Copyright (C) 2004 MontaVista Software Inc.
* Author: Manish Lachwani ([email protected])
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
* NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
* USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
* ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/types.h>
#include <asm/txx9/pci.h>
#include <asm/txx9/rbtx4927.h>
int rbtx4927_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
unsigned char irq = pin;
/* IRQ rotation */
irq--; /* 0-3 */
if (slot == TX4927_PCIC_IDSEL_AD_TO_SLOT(23)) {
/* PCI CardSlot (IDSEL=A23) */
/* PCIA => PCIA */
irq = (irq + 0 + slot) % 4;
} else {
/* PCI Backplane */
if (txx9_pci_option & TXX9_PCI_OPT_PICMG)
irq = (irq + 33 - slot) % 4;
else
irq = (irq + 3 + slot) % 4;
}
irq++; /* 1-4 */
switch (irq) {
case 1:
irq = RBTX4927_IRQ_IOC_PCIA;
break;
case 2:
irq = RBTX4927_IRQ_IOC_PCIB;
break;
case 3:
irq = RBTX4927_IRQ_IOC_PCIC;
break;
case 4:
irq = RBTX4927_IRQ_IOC_PCID;
break;
}
return irq;
}
| linux-master | arch/mips/pci/fixup-rbtx4927.c |
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2007, 2008, 2009, 2010, 2011 Cavium Networks
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/pci.h>
#include <linux/interrupt.h>
#include <linux/time.h>
#include <linux/delay.h>
#include <linux/moduleparam.h>
#include <asm/octeon/octeon.h>
#include <asm/octeon/cvmx-npei-defs.h>
#include <asm/octeon/cvmx-pciercx-defs.h>
#include <asm/octeon/cvmx-pescx-defs.h>
#include <asm/octeon/cvmx-pexp-defs.h>
#include <asm/octeon/cvmx-pemx-defs.h>
#include <asm/octeon/cvmx-dpi-defs.h>
#include <asm/octeon/cvmx-sli-defs.h>
#include <asm/octeon/cvmx-sriox-defs.h>
#include <asm/octeon/cvmx-helper-errata.h>
#include <asm/octeon/pci-octeon.h>
#define MRRS_CN5XXX 0 /* 128 byte Max Read Request Size */
#define MPS_CN5XXX 0 /* 128 byte Max Packet Size (Limit of most PCs) */
#define MRRS_CN6XXX 3 /* 1024 byte Max Read Request Size */
#define MPS_CN6XXX 0 /* 128 byte Max Packet Size (Limit of most PCs) */
/* Module parameter to disable PCI probing */
static int pcie_disable;
module_param(pcie_disable, int, S_IRUGO);
static int enable_pcie_14459_war;
static int enable_pcie_bus_num_war[2];
union cvmx_pcie_address {
uint64_t u64;
struct {
uint64_t upper:2; /* Normally 2 for XKPHYS */
uint64_t reserved_49_61:13; /* Must be zero */
uint64_t io:1; /* 1 for IO space access */
uint64_t did:5; /* PCIe DID = 3 */
uint64_t subdid:3; /* PCIe SubDID = 1 */
uint64_t reserved_36_39:4; /* Must be zero */
uint64_t es:2; /* Endian swap = 1 */
uint64_t port:2; /* PCIe port 0,1 */
uint64_t reserved_29_31:3; /* Must be zero */
/*
* Selects the type of the configuration request (0 = type 0,
* 1 = type 1).
*/
uint64_t ty:1;
/* Target bus number sent in the ID in the request. */
uint64_t bus:8;
/*
* Target device number sent in the ID in the
* request. Note that Dev must be zero for type 0
* configuration requests.
*/
uint64_t dev:5;
/* Target function number sent in the ID in the request. */
uint64_t func:3;
/*
* Selects a register in the configuration space of
* the target.
*/
uint64_t reg:12;
} config;
struct {
uint64_t upper:2; /* Normally 2 for XKPHYS */
uint64_t reserved_49_61:13; /* Must be zero */
uint64_t io:1; /* 1 for IO space access */
uint64_t did:5; /* PCIe DID = 3 */
uint64_t subdid:3; /* PCIe SubDID = 2 */
uint64_t reserved_36_39:4; /* Must be zero */
uint64_t es:2; /* Endian swap = 1 */
uint64_t port:2; /* PCIe port 0,1 */
uint64_t address:32; /* PCIe IO address */
} io;
struct {
uint64_t upper:2; /* Normally 2 for XKPHYS */
uint64_t reserved_49_61:13; /* Must be zero */
uint64_t io:1; /* 1 for IO space access */
uint64_t did:5; /* PCIe DID = 3 */
uint64_t subdid:3; /* PCIe SubDID = 3-6 */
uint64_t reserved_36_39:4; /* Must be zero */
uint64_t address:36; /* PCIe Mem address */
} mem;
};
static int cvmx_pcie_rc_initialize(int pcie_port);
/**
* Return the Core virtual base address for PCIe IO access. IOs are
* read/written as an offset from this address.
*
* @pcie_port: PCIe port the IO is for
*
* Returns 64bit Octeon IO base address for read/write
*/
static inline uint64_t cvmx_pcie_get_io_base_address(int pcie_port)
{
union cvmx_pcie_address pcie_addr;
pcie_addr.u64 = 0;
pcie_addr.io.upper = 0;
pcie_addr.io.io = 1;
pcie_addr.io.did = 3;
pcie_addr.io.subdid = 2;
pcie_addr.io.es = 1;
pcie_addr.io.port = pcie_port;
return pcie_addr.u64;
}
/**
* Size of the IO address region returned at address
* cvmx_pcie_get_io_base_address()
*
* @pcie_port: PCIe port the IO is for
*
* Returns Size of the IO window
*/
static inline uint64_t cvmx_pcie_get_io_size(int pcie_port)
{
return 1ull << 32;
}
/**
* Return the Core virtual base address for PCIe MEM access. Memory is
* read/written as an offset from this address.
*
* @pcie_port: PCIe port the IO is for
*
* Returns 64bit Octeon IO base address for read/write
*/
static inline uint64_t cvmx_pcie_get_mem_base_address(int pcie_port)
{
union cvmx_pcie_address pcie_addr;
pcie_addr.u64 = 0;
pcie_addr.mem.upper = 0;
pcie_addr.mem.io = 1;
pcie_addr.mem.did = 3;
pcie_addr.mem.subdid = 3 + pcie_port;
return pcie_addr.u64;
}
/**
* Size of the Mem address region returned at address
* cvmx_pcie_get_mem_base_address()
*
* @pcie_port: PCIe port the IO is for
*
* Returns Size of the Mem window
*/
static inline uint64_t cvmx_pcie_get_mem_size(int pcie_port)
{
return 1ull << 36;
}
/**
* Read a PCIe config space register indirectly. This is used for
* registers of the form PCIEEP_CFG??? and PCIERC?_CFG???.
*
* @pcie_port: PCIe port to read from
* @cfg_offset: Address to read
*
* Returns Value read
*/
static uint32_t cvmx_pcie_cfgx_read(int pcie_port, uint32_t cfg_offset)
{
if (octeon_has_feature(OCTEON_FEATURE_NPEI)) {
union cvmx_pescx_cfg_rd pescx_cfg_rd;
pescx_cfg_rd.u64 = 0;
pescx_cfg_rd.s.addr = cfg_offset;
cvmx_write_csr(CVMX_PESCX_CFG_RD(pcie_port), pescx_cfg_rd.u64);
pescx_cfg_rd.u64 = cvmx_read_csr(CVMX_PESCX_CFG_RD(pcie_port));
return pescx_cfg_rd.s.data;
} else {
union cvmx_pemx_cfg_rd pemx_cfg_rd;
pemx_cfg_rd.u64 = 0;
pemx_cfg_rd.s.addr = cfg_offset;
cvmx_write_csr(CVMX_PEMX_CFG_RD(pcie_port), pemx_cfg_rd.u64);
pemx_cfg_rd.u64 = cvmx_read_csr(CVMX_PEMX_CFG_RD(pcie_port));
return pemx_cfg_rd.s.data;
}
}
/**
* Write a PCIe config space register indirectly. This is used for
* registers of the form PCIEEP_CFG??? and PCIERC?_CFG???.
*
* @pcie_port: PCIe port to write to
* @cfg_offset: Address to write
* @val: Value to write
*/
static void cvmx_pcie_cfgx_write(int pcie_port, uint32_t cfg_offset,
uint32_t val)
{
if (octeon_has_feature(OCTEON_FEATURE_NPEI)) {
union cvmx_pescx_cfg_wr pescx_cfg_wr;
pescx_cfg_wr.u64 = 0;
pescx_cfg_wr.s.addr = cfg_offset;
pescx_cfg_wr.s.data = val;
cvmx_write_csr(CVMX_PESCX_CFG_WR(pcie_port), pescx_cfg_wr.u64);
} else {
union cvmx_pemx_cfg_wr pemx_cfg_wr;
pemx_cfg_wr.u64 = 0;
pemx_cfg_wr.s.addr = cfg_offset;
pemx_cfg_wr.s.data = val;
cvmx_write_csr(CVMX_PEMX_CFG_WR(pcie_port), pemx_cfg_wr.u64);
}
}
/**
* Build a PCIe config space request address for a device
*
* @pcie_port: PCIe port to access
* @bus: Sub bus
* @dev: Device ID
* @fn: Device sub function
* @reg: Register to access
*
* Returns 64bit Octeon IO address
*/
static inline uint64_t __cvmx_pcie_build_config_addr(int pcie_port, int bus,
int dev, int fn, int reg)
{
union cvmx_pcie_address pcie_addr;
union cvmx_pciercx_cfg006 pciercx_cfg006;
pciercx_cfg006.u32 =
cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG006(pcie_port));
if ((bus <= pciercx_cfg006.s.pbnum) && (dev != 0))
return 0;
pcie_addr.u64 = 0;
pcie_addr.config.upper = 2;
pcie_addr.config.io = 1;
pcie_addr.config.did = 3;
pcie_addr.config.subdid = 1;
pcie_addr.config.es = 1;
pcie_addr.config.port = pcie_port;
pcie_addr.config.ty = (bus > pciercx_cfg006.s.pbnum);
pcie_addr.config.bus = bus;
pcie_addr.config.dev = dev;
pcie_addr.config.func = fn;
pcie_addr.config.reg = reg;
return pcie_addr.u64;
}
/**
* Read 8bits from a Device's config space
*
* @pcie_port: PCIe port the device is on
* @bus: Sub bus
* @dev: Device ID
* @fn: Device sub function
* @reg: Register to access
*
* Returns Result of the read
*/
static uint8_t cvmx_pcie_config_read8(int pcie_port, int bus, int dev,
int fn, int reg)
{
uint64_t address =
__cvmx_pcie_build_config_addr(pcie_port, bus, dev, fn, reg);
if (address)
return cvmx_read64_uint8(address);
else
return 0xff;
}
/**
* Read 16bits from a Device's config space
*
* @pcie_port: PCIe port the device is on
* @bus: Sub bus
* @dev: Device ID
* @fn: Device sub function
* @reg: Register to access
*
* Returns Result of the read
*/
static uint16_t cvmx_pcie_config_read16(int pcie_port, int bus, int dev,
int fn, int reg)
{
uint64_t address =
__cvmx_pcie_build_config_addr(pcie_port, bus, dev, fn, reg);
if (address)
return le16_to_cpu(cvmx_read64_uint16(address));
else
return 0xffff;
}
/**
* Read 32bits from a Device's config space
*
* @pcie_port: PCIe port the device is on
* @bus: Sub bus
* @dev: Device ID
* @fn: Device sub function
* @reg: Register to access
*
* Returns Result of the read
*/
static uint32_t cvmx_pcie_config_read32(int pcie_port, int bus, int dev,
int fn, int reg)
{
uint64_t address =
__cvmx_pcie_build_config_addr(pcie_port, bus, dev, fn, reg);
if (address)
return le32_to_cpu(cvmx_read64_uint32(address));
else
return 0xffffffff;
}
/**
* Write 8bits to a Device's config space
*
* @pcie_port: PCIe port the device is on
* @bus: Sub bus
* @dev: Device ID
* @fn: Device sub function
* @reg: Register to access
* @val: Value to write
*/
static void cvmx_pcie_config_write8(int pcie_port, int bus, int dev, int fn,
int reg, uint8_t val)
{
uint64_t address =
__cvmx_pcie_build_config_addr(pcie_port, bus, dev, fn, reg);
if (address)
cvmx_write64_uint8(address, val);
}
/**
* Write 16bits to a Device's config space
*
* @pcie_port: PCIe port the device is on
* @bus: Sub bus
* @dev: Device ID
* @fn: Device sub function
* @reg: Register to access
* @val: Value to write
*/
static void cvmx_pcie_config_write16(int pcie_port, int bus, int dev, int fn,
int reg, uint16_t val)
{
uint64_t address =
__cvmx_pcie_build_config_addr(pcie_port, bus, dev, fn, reg);
if (address)
cvmx_write64_uint16(address, cpu_to_le16(val));
}
/**
* Write 32bits to a Device's config space
*
* @pcie_port: PCIe port the device is on
* @bus: Sub bus
* @dev: Device ID
* @fn: Device sub function
* @reg: Register to access
* @val: Value to write
*/
static void cvmx_pcie_config_write32(int pcie_port, int bus, int dev, int fn,
int reg, uint32_t val)
{
uint64_t address =
__cvmx_pcie_build_config_addr(pcie_port, bus, dev, fn, reg);
if (address)
cvmx_write64_uint32(address, cpu_to_le32(val));
}
/**
* Initialize the RC config space CSRs
*
* @pcie_port: PCIe port to initialize
*/
static void __cvmx_pcie_rc_initialize_config_space(int pcie_port)
{
union cvmx_pciercx_cfg030 pciercx_cfg030;
union cvmx_pciercx_cfg070 pciercx_cfg070;
union cvmx_pciercx_cfg001 pciercx_cfg001;
union cvmx_pciercx_cfg032 pciercx_cfg032;
union cvmx_pciercx_cfg006 pciercx_cfg006;
union cvmx_pciercx_cfg008 pciercx_cfg008;
union cvmx_pciercx_cfg009 pciercx_cfg009;
union cvmx_pciercx_cfg010 pciercx_cfg010;
union cvmx_pciercx_cfg011 pciercx_cfg011;
union cvmx_pciercx_cfg035 pciercx_cfg035;
union cvmx_pciercx_cfg075 pciercx_cfg075;
union cvmx_pciercx_cfg034 pciercx_cfg034;
/* Max Payload Size (PCIE*_CFG030[MPS]) */
/* Max Read Request Size (PCIE*_CFG030[MRRS]) */
/* Relaxed-order, no-snoop enables (PCIE*_CFG030[RO_EN,NS_EN] */
/* Error Message Enables (PCIE*_CFG030[CE_EN,NFE_EN,FE_EN,UR_EN]) */
pciercx_cfg030.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG030(pcie_port));
if (OCTEON_IS_MODEL(OCTEON_CN5XXX)) {
pciercx_cfg030.s.mps = MPS_CN5XXX;
pciercx_cfg030.s.mrrs = MRRS_CN5XXX;
} else {
pciercx_cfg030.s.mps = MPS_CN6XXX;
pciercx_cfg030.s.mrrs = MRRS_CN6XXX;
}
/*
* Enable relaxed order processing. This will allow devices to
* affect read response ordering.
*/
pciercx_cfg030.s.ro_en = 1;
/* Enable no snoop processing. Not used by Octeon */
pciercx_cfg030.s.ns_en = 1;
/* Correctable error reporting enable. */
pciercx_cfg030.s.ce_en = 1;
/* Non-fatal error reporting enable. */
pciercx_cfg030.s.nfe_en = 1;
/* Fatal error reporting enable. */
pciercx_cfg030.s.fe_en = 1;
/* Unsupported request reporting enable. */
pciercx_cfg030.s.ur_en = 1;
cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG030(pcie_port), pciercx_cfg030.u32);
if (octeon_has_feature(OCTEON_FEATURE_NPEI)) {
union cvmx_npei_ctl_status2 npei_ctl_status2;
/*
* Max Payload Size (NPEI_CTL_STATUS2[MPS]) must match
* PCIE*_CFG030[MPS]. Max Read Request Size
* (NPEI_CTL_STATUS2[MRRS]) must not exceed
* PCIE*_CFG030[MRRS]
*/
npei_ctl_status2.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_CTL_STATUS2);
/* Max payload size = 128 bytes for best Octeon DMA performance */
npei_ctl_status2.s.mps = MPS_CN5XXX;
/* Max read request size = 128 bytes for best Octeon DMA performance */
npei_ctl_status2.s.mrrs = MRRS_CN5XXX;
if (pcie_port)
npei_ctl_status2.s.c1_b1_s = 3; /* Port1 BAR1 Size 256MB */
else
npei_ctl_status2.s.c0_b1_s = 3; /* Port0 BAR1 Size 256MB */
cvmx_write_csr(CVMX_PEXP_NPEI_CTL_STATUS2, npei_ctl_status2.u64);
} else {
/*
* Max Payload Size (DPI_SLI_PRTX_CFG[MPS]) must match
* PCIE*_CFG030[MPS]. Max Read Request Size
* (DPI_SLI_PRTX_CFG[MRRS]) must not exceed
* PCIE*_CFG030[MRRS].
*/
union cvmx_dpi_sli_prtx_cfg prt_cfg;
union cvmx_sli_s2m_portx_ctl sli_s2m_portx_ctl;
prt_cfg.u64 = cvmx_read_csr(CVMX_DPI_SLI_PRTX_CFG(pcie_port));
prt_cfg.s.mps = MPS_CN6XXX;
prt_cfg.s.mrrs = MRRS_CN6XXX;
/* Max outstanding load request. */
prt_cfg.s.molr = 32;
cvmx_write_csr(CVMX_DPI_SLI_PRTX_CFG(pcie_port), prt_cfg.u64);
sli_s2m_portx_ctl.u64 = cvmx_read_csr(CVMX_PEXP_SLI_S2M_PORTX_CTL(pcie_port));
sli_s2m_portx_ctl.s.mrrs = MRRS_CN6XXX;
cvmx_write_csr(CVMX_PEXP_SLI_S2M_PORTX_CTL(pcie_port), sli_s2m_portx_ctl.u64);
}
/* ECRC Generation (PCIE*_CFG070[GE,CE]) */
pciercx_cfg070.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG070(pcie_port));
pciercx_cfg070.s.ge = 1; /* ECRC generation enable. */
pciercx_cfg070.s.ce = 1; /* ECRC check enable. */
cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG070(pcie_port), pciercx_cfg070.u32);
/*
* Access Enables (PCIE*_CFG001[MSAE,ME])
* ME and MSAE should always be set.
* Interrupt Disable (PCIE*_CFG001[I_DIS])
* System Error Message Enable (PCIE*_CFG001[SEE])
*/
pciercx_cfg001.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG001(pcie_port));
pciercx_cfg001.s.msae = 1; /* Memory space enable. */
pciercx_cfg001.s.me = 1; /* Bus master enable. */
pciercx_cfg001.s.i_dis = 1; /* INTx assertion disable. */
pciercx_cfg001.s.see = 1; /* SERR# enable */
cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG001(pcie_port), pciercx_cfg001.u32);
/* Advanced Error Recovery Message Enables */
/* (PCIE*_CFG066,PCIE*_CFG067,PCIE*_CFG069) */
cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG066(pcie_port), 0);
/* Use CVMX_PCIERCX_CFG067 hardware default */
cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG069(pcie_port), 0);
/* Active State Power Management (PCIE*_CFG032[ASLPC]) */
pciercx_cfg032.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG032(pcie_port));
pciercx_cfg032.s.aslpc = 0; /* Active state Link PM control. */
cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG032(pcie_port), pciercx_cfg032.u32);
/*
* Link Width Mode (PCIERCn_CFG452[LME]) - Set during
* cvmx_pcie_rc_initialize_link()
*
* Primary Bus Number (PCIERCn_CFG006[PBNUM])
*
* We set the primary bus number to 1 so IDT bridges are
* happy. They don't like zero.
*/
pciercx_cfg006.u32 = 0;
pciercx_cfg006.s.pbnum = 1;
pciercx_cfg006.s.sbnum = 1;
pciercx_cfg006.s.subbnum = 1;
cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG006(pcie_port), pciercx_cfg006.u32);
/*
* Memory-mapped I/O BAR (PCIERCn_CFG008)
* Most applications should disable the memory-mapped I/O BAR by
* setting PCIERCn_CFG008[ML_ADDR] < PCIERCn_CFG008[MB_ADDR]
*/
pciercx_cfg008.u32 = 0;
pciercx_cfg008.s.mb_addr = 0x100;
pciercx_cfg008.s.ml_addr = 0;
cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG008(pcie_port), pciercx_cfg008.u32);
/*
* Prefetchable BAR (PCIERCn_CFG009,PCIERCn_CFG010,PCIERCn_CFG011)
* Most applications should disable the prefetchable BAR by setting
* PCIERCn_CFG011[UMEM_LIMIT],PCIERCn_CFG009[LMEM_LIMIT] <
* PCIERCn_CFG010[UMEM_BASE],PCIERCn_CFG009[LMEM_BASE]
*/
pciercx_cfg009.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG009(pcie_port));
pciercx_cfg010.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG010(pcie_port));
pciercx_cfg011.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG011(pcie_port));
pciercx_cfg009.s.lmem_base = 0x100;
pciercx_cfg009.s.lmem_limit = 0;
pciercx_cfg010.s.umem_base = 0x100;
pciercx_cfg011.s.umem_limit = 0;
cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG009(pcie_port), pciercx_cfg009.u32);
cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG010(pcie_port), pciercx_cfg010.u32);
cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG011(pcie_port), pciercx_cfg011.u32);
/*
* System Error Interrupt Enables (PCIERCn_CFG035[SECEE,SEFEE,SENFEE])
* PME Interrupt Enables (PCIERCn_CFG035[PMEIE])
*/
pciercx_cfg035.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG035(pcie_port));
pciercx_cfg035.s.secee = 1; /* System error on correctable error enable. */
pciercx_cfg035.s.sefee = 1; /* System error on fatal error enable. */
pciercx_cfg035.s.senfee = 1; /* System error on non-fatal error enable. */
pciercx_cfg035.s.pmeie = 1; /* PME interrupt enable. */
cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG035(pcie_port), pciercx_cfg035.u32);
/*
* Advanced Error Recovery Interrupt Enables
* (PCIERCn_CFG075[CERE,NFERE,FERE])
*/
pciercx_cfg075.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG075(pcie_port));
pciercx_cfg075.s.cere = 1; /* Correctable error reporting enable. */
pciercx_cfg075.s.nfere = 1; /* Non-fatal error reporting enable. */
pciercx_cfg075.s.fere = 1; /* Fatal error reporting enable. */
cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG075(pcie_port), pciercx_cfg075.u32);
/*
* HP Interrupt Enables (PCIERCn_CFG034[HPINT_EN],
* PCIERCn_CFG034[DLLS_EN,CCINT_EN])
*/
pciercx_cfg034.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG034(pcie_port));
pciercx_cfg034.s.hpint_en = 1; /* Hot-plug interrupt enable. */
pciercx_cfg034.s.dlls_en = 1; /* Data Link Layer state changed enable */
pciercx_cfg034.s.ccint_en = 1; /* Command completed interrupt enable. */
cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG034(pcie_port), pciercx_cfg034.u32);
}
/**
* Initialize a host mode PCIe gen 1 link. This function takes a PCIe
* port from reset to a link up state. Software can then begin
* configuring the rest of the link.
*
* @pcie_port: PCIe port to initialize
*
* Returns Zero on success
*/
static int __cvmx_pcie_rc_initialize_link_gen1(int pcie_port)
{
uint64_t start_cycle;
union cvmx_pescx_ctl_status pescx_ctl_status;
union cvmx_pciercx_cfg452 pciercx_cfg452;
union cvmx_pciercx_cfg032 pciercx_cfg032;
union cvmx_pciercx_cfg448 pciercx_cfg448;
/* Set the lane width */
pciercx_cfg452.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG452(pcie_port));
pescx_ctl_status.u64 = cvmx_read_csr(CVMX_PESCX_CTL_STATUS(pcie_port));
if (pescx_ctl_status.s.qlm_cfg == 0)
/* We're in 8 lane (56XX) or 4 lane (54XX) mode */
pciercx_cfg452.s.lme = 0xf;
else
/* We're in 4 lane (56XX) or 2 lane (52XX) mode */
pciercx_cfg452.s.lme = 0x7;
cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG452(pcie_port), pciercx_cfg452.u32);
/*
* CN52XX pass 1.x has an errata where length mismatches on UR
* responses can cause bus errors on 64bit memory
* reads. Turning off length error checking fixes this.
*/
if (OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X)) {
union cvmx_pciercx_cfg455 pciercx_cfg455;
pciercx_cfg455.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG455(pcie_port));
pciercx_cfg455.s.m_cpl_len_err = 1;
cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG455(pcie_port), pciercx_cfg455.u32);
}
/* Lane swap needs to be manually enabled for CN52XX */
if (OCTEON_IS_MODEL(OCTEON_CN52XX) && (pcie_port == 1)) {
pescx_ctl_status.s.lane_swp = 1;
cvmx_write_csr(CVMX_PESCX_CTL_STATUS(pcie_port), pescx_ctl_status.u64);
}
/* Bring up the link */
pescx_ctl_status.u64 = cvmx_read_csr(CVMX_PESCX_CTL_STATUS(pcie_port));
pescx_ctl_status.s.lnk_enb = 1;
cvmx_write_csr(CVMX_PESCX_CTL_STATUS(pcie_port), pescx_ctl_status.u64);
/*
* CN52XX pass 1.0: Due to a bug in 2nd order CDR, it needs to
* be disabled.
*/
if (OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_0))
__cvmx_helper_errata_qlm_disable_2nd_order_cdr(0);
/* Wait for the link to come up */
start_cycle = cvmx_get_cycle();
do {
if (cvmx_get_cycle() - start_cycle > 2 * octeon_get_clock_rate()) {
cvmx_dprintf("PCIe: Port %d link timeout\n", pcie_port);
return -1;
}
__delay(10000);
pciercx_cfg032.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG032(pcie_port));
} while (pciercx_cfg032.s.dlla == 0);
/* Clear all pending errors */
cvmx_write_csr(CVMX_PEXP_NPEI_INT_SUM, cvmx_read_csr(CVMX_PEXP_NPEI_INT_SUM));
/*
* Update the Replay Time Limit. Empirically, some PCIe
* devices take a little longer to respond than expected under
* load. As a workaround for this we configure the Replay Time
* Limit to the value expected for a 512 byte MPS instead of
* our actual 256 byte MPS. The numbers below are directly
* from the PCIe spec table 3-4.
*/
pciercx_cfg448.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG448(pcie_port));
switch (pciercx_cfg032.s.nlw) {
case 1: /* 1 lane */
pciercx_cfg448.s.rtl = 1677;
break;
case 2: /* 2 lanes */
pciercx_cfg448.s.rtl = 867;
break;
case 4: /* 4 lanes */
pciercx_cfg448.s.rtl = 462;
break;
case 8: /* 8 lanes */
pciercx_cfg448.s.rtl = 258;
break;
}
cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG448(pcie_port), pciercx_cfg448.u32);
return 0;
}
static void __cvmx_increment_ba(union cvmx_sli_mem_access_subidx *pmas)
{
if (OCTEON_IS_MODEL(OCTEON_CN68XX))
pmas->cn68xx.ba++;
else
pmas->s.ba++;
}
/**
* Initialize a PCIe gen 1 port for use in host(RC) mode. It doesn't
* enumerate the bus.
*
* @pcie_port: PCIe port to initialize
*
* Returns Zero on success
*/
static int __cvmx_pcie_rc_initialize_gen1(int pcie_port)
{
int i;
int base;
u64 addr_swizzle;
union cvmx_ciu_soft_prst ciu_soft_prst;
union cvmx_pescx_bist_status pescx_bist_status;
union cvmx_pescx_bist_status2 pescx_bist_status2;
union cvmx_npei_ctl_status npei_ctl_status;
union cvmx_npei_mem_access_ctl npei_mem_access_ctl;
union cvmx_npei_mem_access_subidx mem_access_subid;
union cvmx_npei_dbg_data npei_dbg_data;
union cvmx_pescx_ctl_status2 pescx_ctl_status2;
union cvmx_pciercx_cfg032 pciercx_cfg032;
union cvmx_npei_bar1_indexx bar1_index;
retry:
/*
* Make sure we aren't trying to setup a target mode interface
* in host mode.
*/
npei_ctl_status.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_CTL_STATUS);
if ((pcie_port == 0) && !npei_ctl_status.s.host_mode) {
cvmx_dprintf("PCIe: Port %d in endpoint mode\n", pcie_port);
return -1;
}
/*
* Make sure a CN52XX isn't trying to bring up port 1 when it
* is disabled.
*/
if (OCTEON_IS_MODEL(OCTEON_CN52XX)) {
npei_dbg_data.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_DBG_DATA);
if ((pcie_port == 1) && npei_dbg_data.cn52xx.qlm0_link_width) {
cvmx_dprintf("PCIe: ERROR: cvmx_pcie_rc_initialize() called on port1, but port1 is disabled\n");
return -1;
}
}
/*
* PCIe switch arbitration mode. '0' == fixed priority NPEI,
* PCIe0, then PCIe1. '1' == round robin.
*/
npei_ctl_status.s.arb = 1;
/* Allow up to 0x20 config retries */
npei_ctl_status.s.cfg_rtry = 0x20;
/*
* CN52XX pass1.x has an errata where P0_NTAGS and P1_NTAGS
* don't reset.
*/
if (OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X)) {
npei_ctl_status.s.p0_ntags = 0x20;
npei_ctl_status.s.p1_ntags = 0x20;
}
cvmx_write_csr(CVMX_PEXP_NPEI_CTL_STATUS, npei_ctl_status.u64);
/* Bring the PCIe out of reset */
if (cvmx_sysinfo_get()->board_type == CVMX_BOARD_TYPE_EBH5200) {
/*
* The EBH5200 board swapped the PCIe reset lines on
* the board. As a workaround for this bug, we bring
* both PCIe ports out of reset at the same time
* instead of on separate calls. So for port 0, we
* bring both out of reset and do nothing on port 1
*/
if (pcie_port == 0) {
ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST);
/*
* After a chip reset the PCIe will also be in
* reset. If it isn't, most likely someone is
* trying to init it again without a proper
* PCIe reset.
*/
if (ciu_soft_prst.s.soft_prst == 0) {
/* Reset the ports */
ciu_soft_prst.s.soft_prst = 1;
cvmx_write_csr(CVMX_CIU_SOFT_PRST, ciu_soft_prst.u64);
ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST1);
ciu_soft_prst.s.soft_prst = 1;
cvmx_write_csr(CVMX_CIU_SOFT_PRST1, ciu_soft_prst.u64);
/* Wait until pcie resets the ports. */
udelay(2000);
}
ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST1);
ciu_soft_prst.s.soft_prst = 0;
cvmx_write_csr(CVMX_CIU_SOFT_PRST1, ciu_soft_prst.u64);
ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST);
ciu_soft_prst.s.soft_prst = 0;
cvmx_write_csr(CVMX_CIU_SOFT_PRST, ciu_soft_prst.u64);
}
} else {
/*
* The normal case: The PCIe ports are completely
* separate and can be brought out of reset
* independently.
*/
if (pcie_port)
ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST1);
else
ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST);
/*
* After a chip reset the PCIe will also be in
* reset. If it isn't, most likely someone is trying
* to init it again without a proper PCIe reset.
*/
if (ciu_soft_prst.s.soft_prst == 0) {
/* Reset the port */
ciu_soft_prst.s.soft_prst = 1;
if (pcie_port)
cvmx_write_csr(CVMX_CIU_SOFT_PRST1, ciu_soft_prst.u64);
else
cvmx_write_csr(CVMX_CIU_SOFT_PRST, ciu_soft_prst.u64);
/* Wait until pcie resets the ports. */
udelay(2000);
}
if (pcie_port) {
ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST1);
ciu_soft_prst.s.soft_prst = 0;
cvmx_write_csr(CVMX_CIU_SOFT_PRST1, ciu_soft_prst.u64);
} else {
ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST);
ciu_soft_prst.s.soft_prst = 0;
cvmx_write_csr(CVMX_CIU_SOFT_PRST, ciu_soft_prst.u64);
}
}
/*
* Wait for PCIe reset to complete. Due to errata PCIE-700, we
* don't poll PESCX_CTL_STATUS2[PCIERST], but simply wait a
* fixed number of cycles.
*/
__delay(400000);
/*
* PESCX_BIST_STATUS2[PCLK_RUN] was missing on pass 1 of
* CN56XX and CN52XX, so we only probe it on newer chips
*/
if (!OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_X) && !OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X)) {
/* Clear PCLK_RUN so we can check if the clock is running */
pescx_ctl_status2.u64 = cvmx_read_csr(CVMX_PESCX_CTL_STATUS2(pcie_port));
pescx_ctl_status2.s.pclk_run = 1;
cvmx_write_csr(CVMX_PESCX_CTL_STATUS2(pcie_port), pescx_ctl_status2.u64);
/* Now that we cleared PCLK_RUN, wait for it to be set
* again telling us the clock is running
*/
if (CVMX_WAIT_FOR_FIELD64(CVMX_PESCX_CTL_STATUS2(pcie_port),
union cvmx_pescx_ctl_status2, pclk_run, ==, 1, 10000)) {
cvmx_dprintf("PCIe: Port %d isn't clocked, skipping.\n", pcie_port);
return -1;
}
}
/*
* Check and make sure PCIe came out of reset. If it doesn't
* the board probably hasn't wired the clocks up and the
* interface should be skipped.
*/
pescx_ctl_status2.u64 = cvmx_read_csr(CVMX_PESCX_CTL_STATUS2(pcie_port));
if (pescx_ctl_status2.s.pcierst) {
cvmx_dprintf("PCIe: Port %d stuck in reset, skipping.\n", pcie_port);
return -1;
}
/*
* Check BIST2 status. If any bits are set skip this
* interface. This is an attempt to catch PCIE-813 on pass 1
* parts.
*/
pescx_bist_status2.u64 = cvmx_read_csr(CVMX_PESCX_BIST_STATUS2(pcie_port));
if (pescx_bist_status2.u64) {
cvmx_dprintf("PCIe: Port %d BIST2 failed. Most likely this port isn't hooked up, skipping.\n",
pcie_port);
return -1;
}
/* Check BIST status */
pescx_bist_status.u64 = cvmx_read_csr(CVMX_PESCX_BIST_STATUS(pcie_port));
if (pescx_bist_status.u64)
cvmx_dprintf("PCIe: BIST FAILED for port %d (0x%016llx)\n",
pcie_port, CAST64(pescx_bist_status.u64));
/* Initialize the config space CSRs */
__cvmx_pcie_rc_initialize_config_space(pcie_port);
/* Bring the link up */
if (__cvmx_pcie_rc_initialize_link_gen1(pcie_port)) {
cvmx_dprintf("PCIe: Failed to initialize port %d, probably the slot is empty\n",
pcie_port);
return -1;
}
/* Store merge control (NPEI_MEM_ACCESS_CTL[TIMER,MAX_WORD]) */
npei_mem_access_ctl.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_MEM_ACCESS_CTL);
npei_mem_access_ctl.s.max_word = 0; /* Allow 16 words to combine */
npei_mem_access_ctl.s.timer = 127; /* Wait up to 127 cycles for more data */
cvmx_write_csr(CVMX_PEXP_NPEI_MEM_ACCESS_CTL, npei_mem_access_ctl.u64);
/* Setup Mem access SubDIDs */
mem_access_subid.u64 = 0;
mem_access_subid.s.port = pcie_port; /* Port the request is sent to. */
mem_access_subid.s.nmerge = 1; /* Due to an errata on pass 1 chips, no merging is allowed. */
mem_access_subid.s.esr = 1; /* Endian-swap for Reads. */
mem_access_subid.s.esw = 1; /* Endian-swap for Writes. */
mem_access_subid.s.nsr = 0; /* Enable Snooping for Reads. Octeon doesn't care, but devices might want this more conservative setting */
mem_access_subid.s.nsw = 0; /* Enable Snoop for Writes. */
mem_access_subid.s.ror = 0; /* Disable Relaxed Ordering for Reads. */
mem_access_subid.s.row = 0; /* Disable Relaxed Ordering for Writes. */
mem_access_subid.s.ba = 0; /* PCIe Address Bits <63:34>. */
/*
* Setup mem access 12-15 for port 0, 16-19 for port 1,
* supplying 36 bits of address space.
*/
for (i = 12 + pcie_port * 4; i < 16 + pcie_port * 4; i++) {
cvmx_write_csr(CVMX_PEXP_NPEI_MEM_ACCESS_SUBIDX(i), mem_access_subid.u64);
mem_access_subid.s.ba += 1; /* Set each SUBID to extend the addressable range */
}
/*
* Disable the peer to peer forwarding register. This must be
* setup by the OS after it enumerates the bus and assigns
* addresses to the PCIe busses.
*/
for (i = 0; i < 4; i++) {
cvmx_write_csr(CVMX_PESCX_P2P_BARX_START(i, pcie_port), -1);
cvmx_write_csr(CVMX_PESCX_P2P_BARX_END(i, pcie_port), -1);
}
/* Set Octeon's BAR0 to decode 0-16KB. It overlaps with Bar2 */
cvmx_write_csr(CVMX_PESCX_P2N_BAR0_START(pcie_port), 0);
/* BAR1 follows BAR2 with a gap so it has the same address as for gen2. */
cvmx_write_csr(CVMX_PESCX_P2N_BAR1_START(pcie_port), CVMX_PCIE_BAR1_RC_BASE);
bar1_index.u32 = 0;
bar1_index.s.addr_idx = (CVMX_PCIE_BAR1_PHYS_BASE >> 22);
bar1_index.s.ca = 1; /* Not Cached */
bar1_index.s.end_swp = 1; /* Endian Swap mode */
bar1_index.s.addr_v = 1; /* Valid entry */
base = pcie_port ? 16 : 0;
/* Big endian swizzle for 32-bit PEXP_NCB register. */
#ifdef __MIPSEB__
addr_swizzle = 4;
#else
addr_swizzle = 0;
#endif
for (i = 0; i < 16; i++) {
cvmx_write64_uint32((CVMX_PEXP_NPEI_BAR1_INDEXX(base) ^ addr_swizzle),
bar1_index.u32);
base++;
/* 256MB / 16 >> 22 == 4 */
bar1_index.s.addr_idx += (((1ull << 28) / 16ull) >> 22);
}
/*
* Set Octeon's BAR2 to decode 0-2^39. Bar0 and Bar1 take
* precedence where they overlap. It also overlaps with the
* device addresses, so make sure the peer to peer forwarding
* is set right.
*/
cvmx_write_csr(CVMX_PESCX_P2N_BAR2_START(pcie_port), 0);
/*
* Setup BAR2 attributes
*
* Relaxed Ordering (NPEI_CTL_PORTn[PTLP_RO,CTLP_RO, WAIT_COM])
* - PTLP_RO,CTLP_RO should normally be set (except for debug).
* - WAIT_COM=0 will likely work for all applications.
*
* Load completion relaxed ordering (NPEI_CTL_PORTn[WAITL_COM]).
*/
if (pcie_port) {
union cvmx_npei_ctl_port1 npei_ctl_port;
npei_ctl_port.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_CTL_PORT1);
npei_ctl_port.s.bar2_enb = 1;
npei_ctl_port.s.bar2_esx = 1;
npei_ctl_port.s.bar2_cax = 0;
npei_ctl_port.s.ptlp_ro = 1;
npei_ctl_port.s.ctlp_ro = 1;
npei_ctl_port.s.wait_com = 0;
npei_ctl_port.s.waitl_com = 0;
cvmx_write_csr(CVMX_PEXP_NPEI_CTL_PORT1, npei_ctl_port.u64);
} else {
union cvmx_npei_ctl_port0 npei_ctl_port;
npei_ctl_port.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_CTL_PORT0);
npei_ctl_port.s.bar2_enb = 1;
npei_ctl_port.s.bar2_esx = 1;
npei_ctl_port.s.bar2_cax = 0;
npei_ctl_port.s.ptlp_ro = 1;
npei_ctl_port.s.ctlp_ro = 1;
npei_ctl_port.s.wait_com = 0;
npei_ctl_port.s.waitl_com = 0;
cvmx_write_csr(CVMX_PEXP_NPEI_CTL_PORT0, npei_ctl_port.u64);
}
/*
* Both pass 1 and pass 2 of CN52XX and CN56XX have an errata
* that causes TLP ordering to not be preserved after multiple
* PCIe port resets. This code detects this fault and corrects
* it by aligning the TLP counters properly. Another link
* reset is then performed. See PCIE-13340
*/
if (OCTEON_IS_MODEL(OCTEON_CN56XX_PASS2_X) ||
OCTEON_IS_MODEL(OCTEON_CN52XX_PASS2_X) ||
OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_X) ||
OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X)) {
union cvmx_npei_dbg_data dbg_data;
int old_in_fif_p_count;
int in_fif_p_count;
int out_p_count;
int in_p_offset = (OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X) || OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_X)) ? 4 : 1;
int i;
/*
* Choose a write address of 1MB. It should be
* harmless as all bars haven't been setup.
*/
uint64_t write_address = (cvmx_pcie_get_mem_base_address(pcie_port) + 0x100000) | (1ull<<63);
/*
* Make sure at least in_p_offset have been executed before we try and
* read in_fif_p_count
*/
i = in_p_offset;
while (i--) {
cvmx_write64_uint32(write_address, 0);
__delay(10000);
}
/*
* Read the IN_FIF_P_COUNT from the debug
* select. IN_FIF_P_COUNT can be unstable sometimes so
* read it twice with a write between the reads. This
* way we can tell the value is good as it will
* increment by one due to the write
*/
cvmx_write_csr(CVMX_PEXP_NPEI_DBG_SELECT, (pcie_port) ? 0xd7fc : 0xcffc);
cvmx_read_csr(CVMX_PEXP_NPEI_DBG_SELECT);
do {
dbg_data.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_DBG_DATA);
old_in_fif_p_count = dbg_data.s.data & 0xff;
cvmx_write64_uint32(write_address, 0);
__delay(10000);
dbg_data.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_DBG_DATA);
in_fif_p_count = dbg_data.s.data & 0xff;
} while (in_fif_p_count != ((old_in_fif_p_count+1) & 0xff));
/* Update in_fif_p_count for it's offset with respect to out_p_count */
in_fif_p_count = (in_fif_p_count + in_p_offset) & 0xff;
/* Read the OUT_P_COUNT from the debug select */
cvmx_write_csr(CVMX_PEXP_NPEI_DBG_SELECT, (pcie_port) ? 0xd00f : 0xc80f);
cvmx_read_csr(CVMX_PEXP_NPEI_DBG_SELECT);
dbg_data.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_DBG_DATA);
out_p_count = (dbg_data.s.data>>1) & 0xff;
/* Check that the two counters are aligned */
if (out_p_count != in_fif_p_count) {
cvmx_dprintf("PCIe: Port %d aligning TLP counters as workaround to maintain ordering\n", pcie_port);
while (in_fif_p_count != 0) {
cvmx_write64_uint32(write_address, 0);
__delay(10000);
in_fif_p_count = (in_fif_p_count + 1) & 0xff;
}
/*
* The EBH5200 board swapped the PCIe reset
* lines on the board. This means we must
* bring both links down and up, which will
* cause the PCIe0 to need alignment
* again. Lots of messages will be displayed,
* but everything should work
*/
if ((cvmx_sysinfo_get()->board_type == CVMX_BOARD_TYPE_EBH5200) &&
(pcie_port == 1))
cvmx_pcie_rc_initialize(0);
/* Rety bringing this port up */
goto retry;
}
}
/* Display the link status */
pciercx_cfg032.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG032(pcie_port));
cvmx_dprintf("PCIe: Port %d link active, %d lanes\n", pcie_port, pciercx_cfg032.s.nlw);
return 0;
}
/**
* Initialize a host mode PCIe gen 2 link. This function takes a PCIe
* port from reset to a link up state. Software can then begin
* configuring the rest of the link.
*
* @pcie_port: PCIe port to initialize
*
* Return Zero on success.
*/
static int __cvmx_pcie_rc_initialize_link_gen2(int pcie_port)
{
uint64_t start_cycle;
union cvmx_pemx_ctl_status pem_ctl_status;
union cvmx_pciercx_cfg032 pciercx_cfg032;
union cvmx_pciercx_cfg448 pciercx_cfg448;
/* Bring up the link */
pem_ctl_status.u64 = cvmx_read_csr(CVMX_PEMX_CTL_STATUS(pcie_port));
pem_ctl_status.s.lnk_enb = 1;
cvmx_write_csr(CVMX_PEMX_CTL_STATUS(pcie_port), pem_ctl_status.u64);
/* Wait for the link to come up */
start_cycle = cvmx_get_cycle();
do {
if (cvmx_get_cycle() - start_cycle > octeon_get_clock_rate())
return -1;
__delay(10000);
pciercx_cfg032.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG032(pcie_port));
} while ((pciercx_cfg032.s.dlla == 0) || (pciercx_cfg032.s.lt == 1));
/*
* Update the Replay Time Limit. Empirically, some PCIe
* devices take a little longer to respond than expected under
* load. As a workaround for this we configure the Replay Time
* Limit to the value expected for a 512 byte MPS instead of
* our actual 256 byte MPS. The numbers below are directly
* from the PCIe spec table 3-4
*/
pciercx_cfg448.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG448(pcie_port));
switch (pciercx_cfg032.s.nlw) {
case 1: /* 1 lane */
pciercx_cfg448.s.rtl = 1677;
break;
case 2: /* 2 lanes */
pciercx_cfg448.s.rtl = 867;
break;
case 4: /* 4 lanes */
pciercx_cfg448.s.rtl = 462;
break;
case 8: /* 8 lanes */
pciercx_cfg448.s.rtl = 258;
break;
}
cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG448(pcie_port), pciercx_cfg448.u32);
return 0;
}
/**
* Initialize a PCIe gen 2 port for use in host(RC) mode. It doesn't enumerate
* the bus.
*
* @pcie_port: PCIe port to initialize
*
* Returns Zero on success.
*/
static int __cvmx_pcie_rc_initialize_gen2(int pcie_port)
{
int i;
union cvmx_ciu_soft_prst ciu_soft_prst;
union cvmx_mio_rst_ctlx mio_rst_ctl;
union cvmx_pemx_bar_ctl pemx_bar_ctl;
union cvmx_pemx_ctl_status pemx_ctl_status;
union cvmx_pemx_bist_status pemx_bist_status;
union cvmx_pemx_bist_status2 pemx_bist_status2;
union cvmx_pciercx_cfg032 pciercx_cfg032;
union cvmx_pciercx_cfg515 pciercx_cfg515;
union cvmx_sli_ctl_portx sli_ctl_portx;
union cvmx_sli_mem_access_ctl sli_mem_access_ctl;
union cvmx_sli_mem_access_subidx mem_access_subid;
union cvmx_sriox_status_reg sriox_status_reg;
union cvmx_pemx_bar1_indexx bar1_index;
if (octeon_has_feature(OCTEON_FEATURE_SRIO)) {
/* Make sure this interface isn't SRIO */
if (OCTEON_IS_MODEL(OCTEON_CN66XX)) {
/*
* The CN66XX requires reading the
* MIO_QLMX_CFG register to figure out the
* port type.
*/
union cvmx_mio_qlmx_cfg qlmx_cfg;
qlmx_cfg.u64 = cvmx_read_csr(CVMX_MIO_QLMX_CFG(pcie_port));
if (qlmx_cfg.s.qlm_spd == 15) {
pr_notice("PCIe: Port %d is disabled, skipping.\n", pcie_port);
return -1;
}
switch (qlmx_cfg.s.qlm_spd) {
case 0x1: /* SRIO 1x4 short */
case 0x3: /* SRIO 1x4 long */
case 0x4: /* SRIO 2x2 short */
case 0x6: /* SRIO 2x2 long */
pr_notice("PCIe: Port %d is SRIO, skipping.\n", pcie_port);
return -1;
case 0x9: /* SGMII */
pr_notice("PCIe: Port %d is SGMII, skipping.\n", pcie_port);
return -1;
case 0xb: /* XAUI */
pr_notice("PCIe: Port %d is XAUI, skipping.\n", pcie_port);
return -1;
case 0x0: /* PCIE gen2 */
case 0x8: /* PCIE gen2 (alias) */
case 0x2: /* PCIE gen1 */
case 0xa: /* PCIE gen1 (alias) */
break;
default:
pr_notice("PCIe: Port %d is unknown, skipping.\n", pcie_port);
return -1;
}
} else {
sriox_status_reg.u64 = cvmx_read_csr(CVMX_SRIOX_STATUS_REG(pcie_port));
if (sriox_status_reg.s.srio) {
pr_notice("PCIe: Port %d is SRIO, skipping.\n", pcie_port);
return -1;
}
}
}
#if 0
/* This code is so that the PCIe analyzer is able to see 63XX traffic */
pr_notice("PCIE : init for pcie analyzer.\n");
cvmx_helper_qlm_jtag_init();
cvmx_helper_qlm_jtag_shift_zeros(pcie_port, 85);
cvmx_helper_qlm_jtag_shift(pcie_port, 1, 1);
cvmx_helper_qlm_jtag_shift_zeros(pcie_port, 300-86);
cvmx_helper_qlm_jtag_shift_zeros(pcie_port, 85);
cvmx_helper_qlm_jtag_shift(pcie_port, 1, 1);
cvmx_helper_qlm_jtag_shift_zeros(pcie_port, 300-86);
cvmx_helper_qlm_jtag_shift_zeros(pcie_port, 85);
cvmx_helper_qlm_jtag_shift(pcie_port, 1, 1);
cvmx_helper_qlm_jtag_shift_zeros(pcie_port, 300-86);
cvmx_helper_qlm_jtag_shift_zeros(pcie_port, 85);
cvmx_helper_qlm_jtag_shift(pcie_port, 1, 1);
cvmx_helper_qlm_jtag_shift_zeros(pcie_port, 300-86);
cvmx_helper_qlm_jtag_update(pcie_port);
#endif
/* Make sure we aren't trying to setup a target mode interface in host mode */
mio_rst_ctl.u64 = cvmx_read_csr(CVMX_MIO_RST_CTLX(pcie_port));
if (!mio_rst_ctl.s.host_mode) {
pr_notice("PCIe: Port %d in endpoint mode.\n", pcie_port);
return -1;
}
/* CN63XX Pass 1.0 errata G-14395 requires the QLM De-emphasis be programmed */
if (OCTEON_IS_MODEL(OCTEON_CN63XX_PASS1_0)) {
if (pcie_port) {
union cvmx_ciu_qlm ciu_qlm;
ciu_qlm.u64 = cvmx_read_csr(CVMX_CIU_QLM1);
ciu_qlm.s.txbypass = 1;
ciu_qlm.s.txdeemph = 5;
ciu_qlm.s.txmargin = 0x17;
cvmx_write_csr(CVMX_CIU_QLM1, ciu_qlm.u64);
} else {
union cvmx_ciu_qlm ciu_qlm;
ciu_qlm.u64 = cvmx_read_csr(CVMX_CIU_QLM0);
ciu_qlm.s.txbypass = 1;
ciu_qlm.s.txdeemph = 5;
ciu_qlm.s.txmargin = 0x17;
cvmx_write_csr(CVMX_CIU_QLM0, ciu_qlm.u64);
}
}
/* Bring the PCIe out of reset */
if (pcie_port)
ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST1);
else
ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST);
/*
* After a chip reset the PCIe will also be in reset. If it
* isn't, most likely someone is trying to init it again
* without a proper PCIe reset
*/
if (ciu_soft_prst.s.soft_prst == 0) {
/* Reset the port */
ciu_soft_prst.s.soft_prst = 1;
if (pcie_port)
cvmx_write_csr(CVMX_CIU_SOFT_PRST1, ciu_soft_prst.u64);
else
cvmx_write_csr(CVMX_CIU_SOFT_PRST, ciu_soft_prst.u64);
/* Wait until pcie resets the ports. */
udelay(2000);
}
if (pcie_port) {
ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST1);
ciu_soft_prst.s.soft_prst = 0;
cvmx_write_csr(CVMX_CIU_SOFT_PRST1, ciu_soft_prst.u64);
} else {
ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST);
ciu_soft_prst.s.soft_prst = 0;
cvmx_write_csr(CVMX_CIU_SOFT_PRST, ciu_soft_prst.u64);
}
/* Wait for PCIe reset to complete */
udelay(1000);
/*
* Check and make sure PCIe came out of reset. If it doesn't
* the board probably hasn't wired the clocks up and the
* interface should be skipped.
*/
if (CVMX_WAIT_FOR_FIELD64(CVMX_MIO_RST_CTLX(pcie_port), union cvmx_mio_rst_ctlx, rst_done, ==, 1, 10000)) {
pr_notice("PCIe: Port %d stuck in reset, skipping.\n", pcie_port);
return -1;
}
/* Check BIST status */
pemx_bist_status.u64 = cvmx_read_csr(CVMX_PEMX_BIST_STATUS(pcie_port));
if (pemx_bist_status.u64)
pr_notice("PCIe: BIST FAILED for port %d (0x%016llx)\n", pcie_port, CAST64(pemx_bist_status.u64));
pemx_bist_status2.u64 = cvmx_read_csr(CVMX_PEMX_BIST_STATUS2(pcie_port));
/* Errata PCIE-14766 may cause the lower 6 bits to be randomly set on CN63XXp1 */
if (OCTEON_IS_MODEL(OCTEON_CN63XX_PASS1_X))
pemx_bist_status2.u64 &= ~0x3full;
if (pemx_bist_status2.u64)
pr_notice("PCIe: BIST2 FAILED for port %d (0x%016llx)\n", pcie_port, CAST64(pemx_bist_status2.u64));
/* Initialize the config space CSRs */
__cvmx_pcie_rc_initialize_config_space(pcie_port);
/* Enable gen2 speed selection */
pciercx_cfg515.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG515(pcie_port));
pciercx_cfg515.s.dsc = 1;
cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG515(pcie_port), pciercx_cfg515.u32);
/* Bring the link up */
if (__cvmx_pcie_rc_initialize_link_gen2(pcie_port)) {
/*
* Some gen1 devices don't handle the gen 2 training
* correctly. Disable gen2 and try again with only
* gen1
*/
union cvmx_pciercx_cfg031 pciercx_cfg031;
pciercx_cfg031.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG031(pcie_port));
pciercx_cfg031.s.mls = 1;
cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG031(pcie_port), pciercx_cfg031.u32);
if (__cvmx_pcie_rc_initialize_link_gen2(pcie_port)) {
pr_notice("PCIe: Link timeout on port %d, probably the slot is empty\n", pcie_port);
return -1;
}
}
/* Store merge control (SLI_MEM_ACCESS_CTL[TIMER,MAX_WORD]) */
sli_mem_access_ctl.u64 = cvmx_read_csr(CVMX_PEXP_SLI_MEM_ACCESS_CTL);
sli_mem_access_ctl.s.max_word = 0; /* Allow 16 words to combine */
sli_mem_access_ctl.s.timer = 127; /* Wait up to 127 cycles for more data */
cvmx_write_csr(CVMX_PEXP_SLI_MEM_ACCESS_CTL, sli_mem_access_ctl.u64);
/* Setup Mem access SubDIDs */
mem_access_subid.u64 = 0;
mem_access_subid.s.port = pcie_port; /* Port the request is sent to. */
mem_access_subid.s.nmerge = 0; /* Allow merging as it works on CN6XXX. */
mem_access_subid.s.esr = 1; /* Endian-swap for Reads. */
mem_access_subid.s.esw = 1; /* Endian-swap for Writes. */
mem_access_subid.s.wtype = 0; /* "No snoop" and "Relaxed ordering" are not set */
mem_access_subid.s.rtype = 0; /* "No snoop" and "Relaxed ordering" are not set */
/* PCIe Address Bits <63:34>. */
if (OCTEON_IS_MODEL(OCTEON_CN68XX))
mem_access_subid.cn68xx.ba = 0;
else
mem_access_subid.s.ba = 0;
/*
* Setup mem access 12-15 for port 0, 16-19 for port 1,
* supplying 36 bits of address space.
*/
for (i = 12 + pcie_port * 4; i < 16 + pcie_port * 4; i++) {
cvmx_write_csr(CVMX_PEXP_SLI_MEM_ACCESS_SUBIDX(i), mem_access_subid.u64);
/* Set each SUBID to extend the addressable range */
__cvmx_increment_ba(&mem_access_subid);
}
/*
* Disable the peer to peer forwarding register. This must be
* setup by the OS after it enumerates the bus and assigns
* addresses to the PCIe busses.
*/
for (i = 0; i < 4; i++) {
cvmx_write_csr(CVMX_PEMX_P2P_BARX_START(i, pcie_port), -1);
cvmx_write_csr(CVMX_PEMX_P2P_BARX_END(i, pcie_port), -1);
}
/* Set Octeon's BAR0 to decode 0-16KB. It overlaps with Bar2 */
cvmx_write_csr(CVMX_PEMX_P2N_BAR0_START(pcie_port), 0);
/*
* Set Octeon's BAR2 to decode 0-2^41. Bar0 and Bar1 take
* precedence where they overlap. It also overlaps with the
* device addresses, so make sure the peer to peer forwarding
* is set right.
*/
cvmx_write_csr(CVMX_PEMX_P2N_BAR2_START(pcie_port), 0);
/*
* Setup BAR2 attributes
* Relaxed Ordering (NPEI_CTL_PORTn[PTLP_RO,CTLP_RO, WAIT_COM])
* - PTLP_RO,CTLP_RO should normally be set (except for debug).
* - WAIT_COM=0 will likely work for all applications.
* Load completion relaxed ordering (NPEI_CTL_PORTn[WAITL_COM])
*/
pemx_bar_ctl.u64 = cvmx_read_csr(CVMX_PEMX_BAR_CTL(pcie_port));
pemx_bar_ctl.s.bar1_siz = 3; /* 256MB BAR1*/
pemx_bar_ctl.s.bar2_enb = 1;
pemx_bar_ctl.s.bar2_esx = 1;
pemx_bar_ctl.s.bar2_cax = 0;
cvmx_write_csr(CVMX_PEMX_BAR_CTL(pcie_port), pemx_bar_ctl.u64);
sli_ctl_portx.u64 = cvmx_read_csr(CVMX_PEXP_SLI_CTL_PORTX(pcie_port));
sli_ctl_portx.s.ptlp_ro = 1;
sli_ctl_portx.s.ctlp_ro = 1;
sli_ctl_portx.s.wait_com = 0;
sli_ctl_portx.s.waitl_com = 0;
cvmx_write_csr(CVMX_PEXP_SLI_CTL_PORTX(pcie_port), sli_ctl_portx.u64);
/* BAR1 follows BAR2 */
cvmx_write_csr(CVMX_PEMX_P2N_BAR1_START(pcie_port), CVMX_PCIE_BAR1_RC_BASE);
bar1_index.u64 = 0;
bar1_index.s.addr_idx = (CVMX_PCIE_BAR1_PHYS_BASE >> 22);
bar1_index.s.ca = 1; /* Not Cached */
bar1_index.s.end_swp = 1; /* Endian Swap mode */
bar1_index.s.addr_v = 1; /* Valid entry */
for (i = 0; i < 16; i++) {
cvmx_write_csr(CVMX_PEMX_BAR1_INDEXX(i, pcie_port), bar1_index.u64);
/* 256MB / 16 >> 22 == 4 */
bar1_index.s.addr_idx += (((1ull << 28) / 16ull) >> 22);
}
/*
* Allow config retries for 250ms. Count is based off the 5Ghz
* SERDES clock.
*/
pemx_ctl_status.u64 = cvmx_read_csr(CVMX_PEMX_CTL_STATUS(pcie_port));
pemx_ctl_status.s.cfg_rtry = 250 * 5000000 / 0x10000;
cvmx_write_csr(CVMX_PEMX_CTL_STATUS(pcie_port), pemx_ctl_status.u64);
/* Display the link status */
pciercx_cfg032.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG032(pcie_port));
pr_notice("PCIe: Port %d link active, %d lanes, speed gen%d\n", pcie_port, pciercx_cfg032.s.nlw, pciercx_cfg032.s.ls);
return 0;
}
/**
* Initialize a PCIe port for use in host(RC) mode. It doesn't enumerate the bus.
*
* @pcie_port: PCIe port to initialize
*
* Returns Zero on success
*/
static int cvmx_pcie_rc_initialize(int pcie_port)
{
int result;
if (octeon_has_feature(OCTEON_FEATURE_NPEI))
result = __cvmx_pcie_rc_initialize_gen1(pcie_port);
else
result = __cvmx_pcie_rc_initialize_gen2(pcie_port);
return result;
}
/* Above was cvmx-pcie.c, below original pcie.c */
/**
* Map a PCI device to the appropriate interrupt line
*
* @dev: The Linux PCI device structure for the device to map
* @slot: The slot number for this device on __BUS 0__. Linux
* enumerates through all the bridges and figures out the
* slot on Bus 0 where this device eventually hooks to.
* @pin: The PCI interrupt pin read from the device, then swizzled
* as it goes through each bridge.
* Returns Interrupt number for the device
*/
int octeon_pcie_pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
/*
* The EBH5600 board with the PCI to PCIe bridge mistakenly
* wires the first slot for both device id 2 and interrupt
* A. According to the PCI spec, device id 2 should be C. The
* following kludge attempts to fix this.
*/
if (strstr(octeon_board_type_string(), "EBH5600") &&
dev->bus && dev->bus->parent) {
/*
* Iterate all the way up the device chain and find
* the root bus.
*/
while (dev->bus && dev->bus->parent)
dev = to_pci_dev(dev->bus->bridge);
/*
* If the root bus is number 0 and the PEX 8114 is the
* root, assume we are behind the miswired bus. We
* need to correct the swizzle level by two. Yuck.
*/
if ((dev->bus->number == 1) &&
(dev->vendor == 0x10b5) && (dev->device == 0x8114)) {
/*
* The pin field is one based, not zero. We
* need to swizzle it by minus two.
*/
pin = ((pin - 3) & 3) + 1;
}
}
/*
* The -1 is because pin starts with one, not zero. It might
* be that this equation needs to include the slot number, but
* I don't have hardware to check that against.
*/
return pin - 1 + OCTEON_IRQ_PCI_INT0;
}
static void set_cfg_read_retry(u32 retry_cnt)
{
union cvmx_pemx_ctl_status pemx_ctl;
pemx_ctl.u64 = cvmx_read_csr(CVMX_PEMX_CTL_STATUS(1));
pemx_ctl.s.cfg_rtry = retry_cnt;
cvmx_write_csr(CVMX_PEMX_CTL_STATUS(1), pemx_ctl.u64);
}
static u32 disable_cfg_read_retry(void)
{
u32 retry_cnt;
union cvmx_pemx_ctl_status pemx_ctl;
pemx_ctl.u64 = cvmx_read_csr(CVMX_PEMX_CTL_STATUS(1));
retry_cnt = pemx_ctl.s.cfg_rtry;
pemx_ctl.s.cfg_rtry = 0;
cvmx_write_csr(CVMX_PEMX_CTL_STATUS(1), pemx_ctl.u64);
return retry_cnt;
}
static int is_cfg_retry(void)
{
union cvmx_pemx_int_sum pemx_int_sum;
pemx_int_sum.u64 = cvmx_read_csr(CVMX_PEMX_INT_SUM(1));
if (pemx_int_sum.s.crs_dr)
return 1;
return 0;
}
/*
* Read a value from configuration space
*
*/
static int octeon_pcie_read_config(unsigned int pcie_port, struct pci_bus *bus,
unsigned int devfn, int reg, int size,
u32 *val)
{
union octeon_cvmemctl cvmmemctl;
union octeon_cvmemctl cvmmemctl_save;
int bus_number = bus->number;
int cfg_retry = 0;
int retry_cnt = 0;
int max_retry_cnt = 10;
u32 cfg_retry_cnt = 0;
cvmmemctl_save.u64 = 0;
BUG_ON(pcie_port >= ARRAY_SIZE(enable_pcie_bus_num_war));
/*
* For the top level bus make sure our hardware bus number
* matches the software one
*/
if (bus->parent == NULL) {
if (enable_pcie_bus_num_war[pcie_port])
bus_number = 0;
else {
union cvmx_pciercx_cfg006 pciercx_cfg006;
pciercx_cfg006.u32 = cvmx_pcie_cfgx_read(pcie_port,
CVMX_PCIERCX_CFG006(pcie_port));
if (pciercx_cfg006.s.pbnum != bus_number) {
pciercx_cfg006.s.pbnum = bus_number;
pciercx_cfg006.s.sbnum = bus_number;
pciercx_cfg006.s.subbnum = bus_number;
cvmx_pcie_cfgx_write(pcie_port,
CVMX_PCIERCX_CFG006(pcie_port),
pciercx_cfg006.u32);
}
}
}
/*
* PCIe only has a single device connected to Octeon. It is
* always device ID 0. Don't bother doing reads for other
* device IDs on the first segment.
*/
if ((bus->parent == NULL) && (devfn >> 3 != 0))
return PCIBIOS_FUNC_NOT_SUPPORTED;
/*
* The following is a workaround for the CN57XX, CN56XX,
* CN55XX, and CN54XX errata with PCIe config reads from non
* existent devices. These chips will hang the PCIe link if a
* config read is performed that causes a UR response.
*/
if (OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1) ||
OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_1)) {
/*
* For our EBH5600 board, port 0 has a bridge with two
* PCI-X slots. We need a new special checks to make
* sure we only probe valid stuff. The PCIe->PCI-X
* bridge only respondes to device ID 0, function
* 0-1
*/
if ((bus->parent == NULL) && (devfn >= 2))
return PCIBIOS_FUNC_NOT_SUPPORTED;
/*
* The PCI-X slots are device ID 2,3. Choose one of
* the below "if" blocks based on what is plugged into
* the board.
*/
#if 1
/* Use this option if you aren't using either slot */
if (bus_number == 2)
return PCIBIOS_FUNC_NOT_SUPPORTED;
#elif 0
/*
* Use this option if you are using the first slot but
* not the second.
*/
if ((bus_number == 2) && (devfn >> 3 != 2))
return PCIBIOS_FUNC_NOT_SUPPORTED;
#elif 0
/*
* Use this option if you are using the second slot
* but not the first.
*/
if ((bus_number == 2) && (devfn >> 3 != 3))
return PCIBIOS_FUNC_NOT_SUPPORTED;
#elif 0
/* Use this opion if you are using both slots */
if ((bus_number == 2) &&
!((devfn == (2 << 3)) || (devfn == (3 << 3))))
return PCIBIOS_FUNC_NOT_SUPPORTED;
#endif
/* The following #if gives a more complicated example. This is
the required checks for running a Nitrox CN16XX-NHBX in the
slot of the EBH5600. This card has a PLX PCIe bridge with
four Nitrox PLX parts behind it */
#if 0
/* PLX bridge with 4 ports */
if ((bus_number == 4) &&
!((devfn >> 3 >= 1) && (devfn >> 3 <= 4)))
return PCIBIOS_FUNC_NOT_SUPPORTED;
/* Nitrox behind PLX 1 */
if ((bus_number == 5) && (devfn >> 3 != 0))
return PCIBIOS_FUNC_NOT_SUPPORTED;
/* Nitrox behind PLX 2 */
if ((bus_number == 6) && (devfn >> 3 != 0))
return PCIBIOS_FUNC_NOT_SUPPORTED;
/* Nitrox behind PLX 3 */
if ((bus_number == 7) && (devfn >> 3 != 0))
return PCIBIOS_FUNC_NOT_SUPPORTED;
/* Nitrox behind PLX 4 */
if ((bus_number == 8) && (devfn >> 3 != 0))
return PCIBIOS_FUNC_NOT_SUPPORTED;
#endif
/*
* Shorten the DID timeout so bus errors for PCIe
* config reads from non existent devices happen
* faster. This allows us to continue booting even if
* the above "if" checks are wrong. Once one of these
* errors happens, the PCIe port is dead.
*/
cvmmemctl_save.u64 = __read_64bit_c0_register($11, 7);
cvmmemctl.u64 = cvmmemctl_save.u64;
cvmmemctl.s.didtto = 2;
__write_64bit_c0_register($11, 7, cvmmemctl.u64);
}
if ((OCTEON_IS_MODEL(OCTEON_CN63XX)) && (enable_pcie_14459_war))
cfg_retry_cnt = disable_cfg_read_retry();
pr_debug("pcie_cfg_rd port=%d b=%d devfn=0x%03x reg=0x%03x"
" size=%d ", pcie_port, bus_number, devfn, reg, size);
do {
switch (size) {
case 4:
*val = cvmx_pcie_config_read32(pcie_port, bus_number,
devfn >> 3, devfn & 0x7, reg);
break;
case 2:
*val = cvmx_pcie_config_read16(pcie_port, bus_number,
devfn >> 3, devfn & 0x7, reg);
break;
case 1:
*val = cvmx_pcie_config_read8(pcie_port, bus_number,
devfn >> 3, devfn & 0x7, reg);
break;
default:
if (OCTEON_IS_MODEL(OCTEON_CN63XX))
set_cfg_read_retry(cfg_retry_cnt);
return PCIBIOS_FUNC_NOT_SUPPORTED;
}
if ((OCTEON_IS_MODEL(OCTEON_CN63XX)) &&
(enable_pcie_14459_war)) {
cfg_retry = is_cfg_retry();
retry_cnt++;
if (retry_cnt > max_retry_cnt) {
pr_err(" pcie cfg_read retries failed. retry_cnt=%d\n",
retry_cnt);
cfg_retry = 0;
}
}
} while (cfg_retry);
if ((OCTEON_IS_MODEL(OCTEON_CN63XX)) && (enable_pcie_14459_war))
set_cfg_read_retry(cfg_retry_cnt);
pr_debug("val=%08x : tries=%02d\n", *val, retry_cnt);
if (OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1) ||
OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_1))
write_c0_cvmmemctl(cvmmemctl_save.u64);
return PCIBIOS_SUCCESSFUL;
}
static int octeon_pcie0_read_config(struct pci_bus *bus, unsigned int devfn,
int reg, int size, u32 *val)
{
return octeon_pcie_read_config(0, bus, devfn, reg, size, val);
}
static int octeon_pcie1_read_config(struct pci_bus *bus, unsigned int devfn,
int reg, int size, u32 *val)
{
return octeon_pcie_read_config(1, bus, devfn, reg, size, val);
}
static int octeon_dummy_read_config(struct pci_bus *bus, unsigned int devfn,
int reg, int size, u32 *val)
{
return PCIBIOS_FUNC_NOT_SUPPORTED;
}
/*
* Write a value to PCI configuration space
*/
static int octeon_pcie_write_config(unsigned int pcie_port, struct pci_bus *bus,
unsigned int devfn, int reg,
int size, u32 val)
{
int bus_number = bus->number;
BUG_ON(pcie_port >= ARRAY_SIZE(enable_pcie_bus_num_war));
if ((bus->parent == NULL) && (enable_pcie_bus_num_war[pcie_port]))
bus_number = 0;
pr_debug("pcie_cfg_wr port=%d b=%d devfn=0x%03x"
" reg=0x%03x size=%d val=%08x\n", pcie_port, bus_number, devfn,
reg, size, val);
switch (size) {
case 4:
cvmx_pcie_config_write32(pcie_port, bus_number, devfn >> 3,
devfn & 0x7, reg, val);
break;
case 2:
cvmx_pcie_config_write16(pcie_port, bus_number, devfn >> 3,
devfn & 0x7, reg, val);
break;
case 1:
cvmx_pcie_config_write8(pcie_port, bus_number, devfn >> 3,
devfn & 0x7, reg, val);
break;
default:
return PCIBIOS_FUNC_NOT_SUPPORTED;
}
return PCIBIOS_SUCCESSFUL;
}
static int octeon_pcie0_write_config(struct pci_bus *bus, unsigned int devfn,
int reg, int size, u32 val)
{
return octeon_pcie_write_config(0, bus, devfn, reg, size, val);
}
static int octeon_pcie1_write_config(struct pci_bus *bus, unsigned int devfn,
int reg, int size, u32 val)
{
return octeon_pcie_write_config(1, bus, devfn, reg, size, val);
}
static int octeon_dummy_write_config(struct pci_bus *bus, unsigned int devfn,
int reg, int size, u32 val)
{
return PCIBIOS_FUNC_NOT_SUPPORTED;
}
static struct pci_ops octeon_pcie0_ops = {
.read = octeon_pcie0_read_config,
.write = octeon_pcie0_write_config,
};
static struct resource octeon_pcie0_mem_resource = {
.name = "Octeon PCIe0 MEM",
.flags = IORESOURCE_MEM,
};
static struct resource octeon_pcie0_io_resource = {
.name = "Octeon PCIe0 IO",
.flags = IORESOURCE_IO,
};
static struct pci_controller octeon_pcie0_controller = {
.pci_ops = &octeon_pcie0_ops,
.mem_resource = &octeon_pcie0_mem_resource,
.io_resource = &octeon_pcie0_io_resource,
};
static struct pci_ops octeon_pcie1_ops = {
.read = octeon_pcie1_read_config,
.write = octeon_pcie1_write_config,
};
static struct resource octeon_pcie1_mem_resource = {
.name = "Octeon PCIe1 MEM",
.flags = IORESOURCE_MEM,
};
static struct resource octeon_pcie1_io_resource = {
.name = "Octeon PCIe1 IO",
.flags = IORESOURCE_IO,
};
static struct pci_controller octeon_pcie1_controller = {
.pci_ops = &octeon_pcie1_ops,
.mem_resource = &octeon_pcie1_mem_resource,
.io_resource = &octeon_pcie1_io_resource,
};
static struct pci_ops octeon_dummy_ops = {
.read = octeon_dummy_read_config,
.write = octeon_dummy_write_config,
};
static struct resource octeon_dummy_mem_resource = {
.name = "Virtual PCIe MEM",
.flags = IORESOURCE_MEM,
};
static struct resource octeon_dummy_io_resource = {
.name = "Virtual PCIe IO",
.flags = IORESOURCE_IO,
};
static struct pci_controller octeon_dummy_controller = {
.pci_ops = &octeon_dummy_ops,
.mem_resource = &octeon_dummy_mem_resource,
.io_resource = &octeon_dummy_io_resource,
};
static int device_needs_bus_num_war(uint32_t deviceid)
{
#define IDT_VENDOR_ID 0x111d
if ((deviceid & 0xffff) == IDT_VENDOR_ID)
return 1;
return 0;
}
/**
* Initialize the Octeon PCIe controllers
*
* Returns
*/
static int __init octeon_pcie_setup(void)
{
int result;
int host_mode;
int srio_war15205 = 0, port;
union cvmx_sli_ctl_portx sli_ctl_portx;
union cvmx_sriox_status_reg sriox_status_reg;
/* These chips don't have PCIe */
if (!octeon_has_feature(OCTEON_FEATURE_PCIE))
return 0;
/* No PCIe simulation */
if (octeon_is_simulation())
return 0;
/* Disable PCI if instructed on the command line */
if (pcie_disable)
return 0;
/* Point pcibios_map_irq() to the PCIe version of it */
octeon_pcibios_map_irq = octeon_pcie_pcibios_map_irq;
/*
* PCIe I/O range. It is based on port 0 but includes up until
* port 1's end.
*/
set_io_port_base(CVMX_ADD_IO_SEG(cvmx_pcie_get_io_base_address(0)));
ioport_resource.start = 0;
ioport_resource.end =
cvmx_pcie_get_io_base_address(1) -
cvmx_pcie_get_io_base_address(0) + cvmx_pcie_get_io_size(1) - 1;
/*
* Create a dummy PCIe controller to swallow up bus 0. IDT bridges
* don't work if the primary bus number is zero. Here we add a fake
* PCIe controller that the kernel will give bus 0. This allows
* us to not change the normal kernel bus enumeration
*/
octeon_dummy_controller.io_map_base = -1;
octeon_dummy_controller.mem_resource->start = (1ull<<48);
octeon_dummy_controller.mem_resource->end = (1ull<<48);
register_pci_controller(&octeon_dummy_controller);
if (octeon_has_feature(OCTEON_FEATURE_NPEI)) {
union cvmx_npei_ctl_status npei_ctl_status;
npei_ctl_status.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_CTL_STATUS);
host_mode = npei_ctl_status.s.host_mode;
octeon_dma_bar_type = OCTEON_DMA_BAR_TYPE_PCIE;
} else {
union cvmx_mio_rst_ctlx mio_rst_ctl;
mio_rst_ctl.u64 = cvmx_read_csr(CVMX_MIO_RST_CTLX(0));
host_mode = mio_rst_ctl.s.host_mode;
octeon_dma_bar_type = OCTEON_DMA_BAR_TYPE_PCIE2;
}
if (host_mode) {
pr_notice("PCIe: Initializing port 0\n");
/* CN63XX pass 1_x/2.0 errata PCIe-15205 */
if (OCTEON_IS_MODEL(OCTEON_CN63XX_PASS1_X) ||
OCTEON_IS_MODEL(OCTEON_CN63XX_PASS2_0)) {
sriox_status_reg.u64 = cvmx_read_csr(CVMX_SRIOX_STATUS_REG(0));
if (sriox_status_reg.s.srio) {
srio_war15205 += 1; /* Port is SRIO */
port = 0;
}
}
result = cvmx_pcie_rc_initialize(0);
if (result == 0) {
uint32_t device0;
/* Memory offsets are physical addresses */
octeon_pcie0_controller.mem_offset =
cvmx_pcie_get_mem_base_address(0);
/* IO offsets are Mips virtual addresses */
octeon_pcie0_controller.io_map_base =
CVMX_ADD_IO_SEG(cvmx_pcie_get_io_base_address
(0));
octeon_pcie0_controller.io_offset = 0;
/*
* To keep things similar to PCI, we start
* device addresses at the same place as PCI
* uisng big bar support. This normally
* translates to 4GB-256MB, which is the same
* as most x86 PCs.
*/
octeon_pcie0_controller.mem_resource->start =
cvmx_pcie_get_mem_base_address(0) +
(4ul << 30) - (OCTEON_PCI_BAR1_HOLE_SIZE << 20);
octeon_pcie0_controller.mem_resource->end =
cvmx_pcie_get_mem_base_address(0) +
cvmx_pcie_get_mem_size(0) - 1;
/*
* Ports must be above 16KB for the ISA bus
* filtering in the PCI-X to PCI bridge.
*/
octeon_pcie0_controller.io_resource->start = 4 << 10;
octeon_pcie0_controller.io_resource->end =
cvmx_pcie_get_io_size(0) - 1;
msleep(100); /* Some devices need extra time */
register_pci_controller(&octeon_pcie0_controller);
device0 = cvmx_pcie_config_read32(0, 0, 0, 0, 0);
enable_pcie_bus_num_war[0] =
device_needs_bus_num_war(device0);
}
} else {
pr_notice("PCIe: Port 0 in endpoint mode, skipping.\n");
/* CN63XX pass 1_x/2.0 errata PCIe-15205 */
if (OCTEON_IS_MODEL(OCTEON_CN63XX_PASS1_X) ||
OCTEON_IS_MODEL(OCTEON_CN63XX_PASS2_0)) {
srio_war15205 += 1;
port = 0;
}
}
if (octeon_has_feature(OCTEON_FEATURE_NPEI)) {
host_mode = 1;
/* Skip the 2nd port on CN52XX if port 0 is in 4 lane mode */
if (OCTEON_IS_MODEL(OCTEON_CN52XX)) {
union cvmx_npei_dbg_data dbg_data;
dbg_data.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_DBG_DATA);
if (dbg_data.cn52xx.qlm0_link_width)
host_mode = 0;
}
} else {
union cvmx_mio_rst_ctlx mio_rst_ctl;
mio_rst_ctl.u64 = cvmx_read_csr(CVMX_MIO_RST_CTLX(1));
host_mode = mio_rst_ctl.s.host_mode;
}
if (host_mode) {
pr_notice("PCIe: Initializing port 1\n");
/* CN63XX pass 1_x/2.0 errata PCIe-15205 */
if (OCTEON_IS_MODEL(OCTEON_CN63XX_PASS1_X) ||
OCTEON_IS_MODEL(OCTEON_CN63XX_PASS2_0)) {
sriox_status_reg.u64 = cvmx_read_csr(CVMX_SRIOX_STATUS_REG(1));
if (sriox_status_reg.s.srio) {
srio_war15205 += 1; /* Port is SRIO */
port = 1;
}
}
result = cvmx_pcie_rc_initialize(1);
if (result == 0) {
uint32_t device0;
/* Memory offsets are physical addresses */
octeon_pcie1_controller.mem_offset =
cvmx_pcie_get_mem_base_address(1);
/*
* To calculate the address for accessing the 2nd PCIe device,
* either 'io_map_base' (pci_iomap()), or 'mips_io_port_base'
* (ioport_map()) value is added to
* pci_resource_start(dev,bar)). The 'mips_io_port_base' is set
* only once based on first PCIe. Also changing 'io_map_base'
* based on first slot's value so that both the routines will
* work properly.
*/
octeon_pcie1_controller.io_map_base =
CVMX_ADD_IO_SEG(cvmx_pcie_get_io_base_address(0));
/* IO offsets are Mips virtual addresses */
octeon_pcie1_controller.io_offset =
cvmx_pcie_get_io_base_address(1) -
cvmx_pcie_get_io_base_address(0);
/*
* To keep things similar to PCI, we start device
* addresses at the same place as PCI uisng big bar
* support. This normally translates to 4GB-256MB,
* which is the same as most x86 PCs.
*/
octeon_pcie1_controller.mem_resource->start =
cvmx_pcie_get_mem_base_address(1) + (4ul << 30) -
(OCTEON_PCI_BAR1_HOLE_SIZE << 20);
octeon_pcie1_controller.mem_resource->end =
cvmx_pcie_get_mem_base_address(1) +
cvmx_pcie_get_mem_size(1) - 1;
/*
* Ports must be above 16KB for the ISA bus filtering
* in the PCI-X to PCI bridge.
*/
octeon_pcie1_controller.io_resource->start =
cvmx_pcie_get_io_base_address(1) -
cvmx_pcie_get_io_base_address(0);
octeon_pcie1_controller.io_resource->end =
octeon_pcie1_controller.io_resource->start +
cvmx_pcie_get_io_size(1) - 1;
msleep(100); /* Some devices need extra time */
register_pci_controller(&octeon_pcie1_controller);
device0 = cvmx_pcie_config_read32(1, 0, 0, 0, 0);
enable_pcie_bus_num_war[1] =
device_needs_bus_num_war(device0);
}
} else {
pr_notice("PCIe: Port 1 not in root complex mode, skipping.\n");
/* CN63XX pass 1_x/2.0 errata PCIe-15205 */
if (OCTEON_IS_MODEL(OCTEON_CN63XX_PASS1_X) ||
OCTEON_IS_MODEL(OCTEON_CN63XX_PASS2_0)) {
srio_war15205 += 1;
port = 1;
}
}
/*
* CN63XX pass 1_x/2.0 errata PCIe-15205 requires setting all
* of SRIO MACs SLI_CTL_PORT*[INT*_MAP] to similar value and
* all of PCIe Macs SLI_CTL_PORT*[INT*_MAP] to different value
* from the previous set values
*/
if (OCTEON_IS_MODEL(OCTEON_CN63XX_PASS1_X) ||
OCTEON_IS_MODEL(OCTEON_CN63XX_PASS2_0)) {
if (srio_war15205 == 1) {
sli_ctl_portx.u64 = cvmx_read_csr(CVMX_PEXP_SLI_CTL_PORTX(port));
sli_ctl_portx.s.inta_map = 1;
sli_ctl_portx.s.intb_map = 1;
sli_ctl_portx.s.intc_map = 1;
sli_ctl_portx.s.intd_map = 1;
cvmx_write_csr(CVMX_PEXP_SLI_CTL_PORTX(port), sli_ctl_portx.u64);
sli_ctl_portx.u64 = cvmx_read_csr(CVMX_PEXP_SLI_CTL_PORTX(!port));
sli_ctl_portx.s.inta_map = 0;
sli_ctl_portx.s.intb_map = 0;
sli_ctl_portx.s.intc_map = 0;
sli_ctl_portx.s.intd_map = 0;
cvmx_write_csr(CVMX_PEXP_SLI_CTL_PORTX(!port), sli_ctl_portx.u64);
}
}
octeon_pci_dma_init();
return 0;
}
arch_initcall(octeon_pcie_setup);
| linux-master | arch/mips/pci/pcie-octeon.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Ralink MT7620A SoC PCI support
*
* Copyright (C) 2007-2013 Bruce Chang (Mediatek)
* Copyright (C) 2013-2016 John Crispin <[email protected]>
*/
#include <linux/types.h>
#include <linux/pci.h>
#include <linux/io.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/of.h>
#include <linux/of_irq.h>
#include <linux/of_pci.h>
#include <linux/reset.h>
#include <linux/platform_device.h>
#include <asm/mach-ralink/ralink_regs.h>
#include <asm/mach-ralink/mt7620.h>
#define RALINK_PCI_IO_MAP_BASE 0x10160000
#define RALINK_PCI_MEMORY_BASE 0x0
#define RALINK_INT_PCIE0 4
#define RALINK_CLKCFG1 0x30
#define RALINK_GPIOMODE 0x60
#define PPLL_CFG1 0x9c
#define PPLL_LD BIT(23)
#define PPLL_DRV 0xa0
#define PDRV_SW_SET BIT(31)
#define LC_CKDRVPD BIT(19)
#define LC_CKDRVOHZ BIT(18)
#define LC_CKDRVHZ BIT(17)
#define LC_CKTEST BIT(16)
/* PCI Bridge registers */
#define RALINK_PCI_PCICFG_ADDR 0x00
#define PCIRST BIT(1)
#define RALINK_PCI_PCIENA 0x0C
#define PCIINT2 BIT(20)
#define RALINK_PCI_CONFIG_ADDR 0x20
#define RALINK_PCI_CONFIG_DATA_VIRT_REG 0x24
#define RALINK_PCI_MEMBASE 0x28
#define RALINK_PCI_IOBASE 0x2C
/* PCI RC registers */
#define RALINK_PCI0_BAR0SETUP_ADDR 0x10
#define RALINK_PCI0_IMBASEBAR0_ADDR 0x18
#define RALINK_PCI0_ID 0x30
#define RALINK_PCI0_CLASS 0x34
#define RALINK_PCI0_SUBID 0x38
#define RALINK_PCI0_STATUS 0x50
#define PCIE_LINK_UP_ST BIT(0)
#define PCIEPHY0_CFG 0x90
#define RALINK_PCIEPHY_P0_CTL_OFFSET 0x7498
#define RALINK_PCIE0_CLK_EN BIT(26)
#define BUSY 0x80000000
#define WAITRETRY_MAX 10
#define WRITE_MODE (1UL << 23)
#define DATA_SHIFT 0
#define ADDR_SHIFT 8
static void __iomem *bridge_base;
static void __iomem *pcie_base;
static struct reset_control *rstpcie0;
static inline void bridge_w32(u32 val, unsigned reg)
{
iowrite32(val, bridge_base + reg);
}
static inline u32 bridge_r32(unsigned reg)
{
return ioread32(bridge_base + reg);
}
static inline void pcie_w32(u32 val, unsigned reg)
{
iowrite32(val, pcie_base + reg);
}
static inline u32 pcie_r32(unsigned reg)
{
return ioread32(pcie_base + reg);
}
static inline void pcie_m32(u32 clr, u32 set, unsigned reg)
{
u32 val = pcie_r32(reg);
val &= ~clr;
val |= set;
pcie_w32(val, reg);
}
static int wait_pciephy_busy(void)
{
unsigned long reg_value = 0x0, retry = 0;
while (1) {
reg_value = pcie_r32(PCIEPHY0_CFG);
if (reg_value & BUSY)
mdelay(100);
else
break;
if (retry++ > WAITRETRY_MAX) {
pr_warn("PCIE-PHY retry failed.\n");
return -1;
}
}
return 0;
}
static void pcie_phy(unsigned long addr, unsigned long val)
{
wait_pciephy_busy();
pcie_w32(WRITE_MODE | (val << DATA_SHIFT) | (addr << ADDR_SHIFT),
PCIEPHY0_CFG);
mdelay(1);
wait_pciephy_busy();
}
static int pci_config_read(struct pci_bus *bus, unsigned int devfn, int where,
int size, u32 *val)
{
unsigned int slot = PCI_SLOT(devfn);
u8 func = PCI_FUNC(devfn);
u32 address;
u32 data;
u32 num = 0;
if (bus)
num = bus->number;
address = (((where & 0xF00) >> 8) << 24) | (num << 16) | (slot << 11) |
(func << 8) | (where & 0xfc) | 0x80000000;
bridge_w32(address, RALINK_PCI_CONFIG_ADDR);
data = bridge_r32(RALINK_PCI_CONFIG_DATA_VIRT_REG);
switch (size) {
case 1:
*val = (data >> ((where & 3) << 3)) & 0xff;
break;
case 2:
*val = (data >> ((where & 3) << 3)) & 0xffff;
break;
case 4:
*val = data;
break;
}
return PCIBIOS_SUCCESSFUL;
}
static int pci_config_write(struct pci_bus *bus, unsigned int devfn, int where,
int size, u32 val)
{
unsigned int slot = PCI_SLOT(devfn);
u8 func = PCI_FUNC(devfn);
u32 address;
u32 data;
u32 num = 0;
if (bus)
num = bus->number;
address = (((where & 0xF00) >> 8) << 24) | (num << 16) | (slot << 11) |
(func << 8) | (where & 0xfc) | 0x80000000;
bridge_w32(address, RALINK_PCI_CONFIG_ADDR);
data = bridge_r32(RALINK_PCI_CONFIG_DATA_VIRT_REG);
switch (size) {
case 1:
data = (data & ~(0xff << ((where & 3) << 3))) |
(val << ((where & 3) << 3));
break;
case 2:
data = (data & ~(0xffff << ((where & 3) << 3))) |
(val << ((where & 3) << 3));
break;
case 4:
data = val;
break;
}
bridge_w32(data, RALINK_PCI_CONFIG_DATA_VIRT_REG);
return PCIBIOS_SUCCESSFUL;
}
struct pci_ops mt7620_pci_ops = {
.read = pci_config_read,
.write = pci_config_write,
};
static struct resource mt7620_res_pci_mem1;
static struct resource mt7620_res_pci_io1;
struct pci_controller mt7620_controller = {
.pci_ops = &mt7620_pci_ops,
.mem_resource = &mt7620_res_pci_mem1,
.mem_offset = 0x00000000UL,
.io_resource = &mt7620_res_pci_io1,
.io_offset = 0x00000000UL,
.io_map_base = 0xa0000000,
};
static int mt7620_pci_hw_init(struct platform_device *pdev)
{
/* bypass PCIe DLL */
pcie_phy(0x0, 0x80);
pcie_phy(0x1, 0x04);
/* Elastic buffer control */
pcie_phy(0x68, 0xB4);
/* put core into reset */
pcie_m32(0, PCIRST, RALINK_PCI_PCICFG_ADDR);
reset_control_assert(rstpcie0);
/* disable power and all clocks */
rt_sysc_m32(RALINK_PCIE0_CLK_EN, 0, RALINK_CLKCFG1);
rt_sysc_m32(LC_CKDRVPD, PDRV_SW_SET, PPLL_DRV);
/* bring core out of reset */
reset_control_deassert(rstpcie0);
rt_sysc_m32(0, RALINK_PCIE0_CLK_EN, RALINK_CLKCFG1);
mdelay(100);
if (!(rt_sysc_r32(PPLL_CFG1) & PPLL_LD)) {
dev_err(&pdev->dev, "pcie PLL not locked, aborting init\n");
reset_control_assert(rstpcie0);
rt_sysc_m32(RALINK_PCIE0_CLK_EN, 0, RALINK_CLKCFG1);
return -1;
}
/* power up the bus */
rt_sysc_m32(LC_CKDRVHZ | LC_CKDRVOHZ, LC_CKDRVPD | PDRV_SW_SET,
PPLL_DRV);
return 0;
}
static int mt7628_pci_hw_init(struct platform_device *pdev)
{
u32 val = 0;
/* bring the core out of reset */
rt_sysc_m32(BIT(16), 0, RALINK_GPIOMODE);
reset_control_deassert(rstpcie0);
/* enable the pci clk */
rt_sysc_m32(0, RALINK_PCIE0_CLK_EN, RALINK_CLKCFG1);
mdelay(100);
/* voodoo from the SDK driver */
pcie_m32(~0xff, 0x5, RALINK_PCIEPHY_P0_CTL_OFFSET);
pci_config_read(NULL, 0, 0x70c, 4, &val);
val &= ~(0xff) << 8;
val |= 0x50 << 8;
pci_config_write(NULL, 0, 0x70c, 4, val);
return 0;
}
static int mt7620_pci_probe(struct platform_device *pdev)
{
u32 val = 0;
rstpcie0 = devm_reset_control_get_exclusive(&pdev->dev, "pcie0");
if (IS_ERR(rstpcie0))
return PTR_ERR(rstpcie0);
bridge_base = devm_platform_get_and_ioremap_resource(pdev, 0, NULL);
if (IS_ERR(bridge_base))
return PTR_ERR(bridge_base);
pcie_base = devm_platform_get_and_ioremap_resource(pdev, 1, NULL);
if (IS_ERR(pcie_base))
return PTR_ERR(pcie_base);
iomem_resource.start = 0;
iomem_resource.end = ~0;
ioport_resource.start = 0;
ioport_resource.end = ~0;
/* bring up the pci core */
switch (ralink_soc) {
case MT762X_SOC_MT7620A:
if (mt7620_pci_hw_init(pdev))
return -1;
break;
case MT762X_SOC_MT7628AN:
case MT762X_SOC_MT7688:
if (mt7628_pci_hw_init(pdev))
return -1;
break;
default:
dev_err(&pdev->dev, "pcie is not supported on this hardware\n");
return -1;
}
mdelay(50);
/* enable write access */
pcie_m32(PCIRST, 0, RALINK_PCI_PCICFG_ADDR);
mdelay(100);
/* check if there is a card present */
if ((pcie_r32(RALINK_PCI0_STATUS) & PCIE_LINK_UP_ST) == 0) {
reset_control_assert(rstpcie0);
rt_sysc_m32(RALINK_PCIE0_CLK_EN, 0, RALINK_CLKCFG1);
if (ralink_soc == MT762X_SOC_MT7620A)
rt_sysc_m32(LC_CKDRVPD, PDRV_SW_SET, PPLL_DRV);
dev_info(&pdev->dev, "PCIE0 no card, disable it(RST&CLK)\n");
return -1;
}
/* setup ranges */
bridge_w32(0xffffffff, RALINK_PCI_MEMBASE);
bridge_w32(RALINK_PCI_IO_MAP_BASE, RALINK_PCI_IOBASE);
pcie_w32(0x7FFF0001, RALINK_PCI0_BAR0SETUP_ADDR);
pcie_w32(RALINK_PCI_MEMORY_BASE, RALINK_PCI0_IMBASEBAR0_ADDR);
pcie_w32(0x06040001, RALINK_PCI0_CLASS);
/* enable interrupts */
pcie_m32(0, PCIINT2, RALINK_PCI_PCIENA);
/* voodoo from the SDK driver */
pci_config_read(NULL, 0, 4, 4, &val);
pci_config_write(NULL, 0, 4, 4, val | 0x7);
pci_load_of_ranges(&mt7620_controller, pdev->dev.of_node);
register_pci_controller(&mt7620_controller);
return 0;
}
int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
u16 cmd;
u32 val;
int irq = 0;
if ((dev->bus->number == 0) && (slot == 0)) {
pcie_w32(0x7FFF0001, RALINK_PCI0_BAR0SETUP_ADDR);
pci_config_write(dev->bus, 0, PCI_BASE_ADDRESS_0, 4,
RALINK_PCI_MEMORY_BASE);
pci_config_read(dev->bus, 0, PCI_BASE_ADDRESS_0, 4, &val);
} else if ((dev->bus->number == 1) && (slot == 0x0)) {
irq = RALINK_INT_PCIE0;
} else {
dev_err(&dev->dev, "no irq found - bus=0x%x, slot = 0x%x\n",
dev->bus->number, slot);
return 0;
}
dev_info(&dev->dev, "card - bus=0x%x, slot = 0x%x irq=%d\n",
dev->bus->number, slot, irq);
/* configure the cache line size to 0x14 */
pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, 0x14);
/* configure latency timer to 0xff */
pci_write_config_byte(dev, PCI_LATENCY_TIMER, 0xff);
pci_read_config_word(dev, PCI_COMMAND, &cmd);
/* setup the slot */
cmd = cmd | PCI_COMMAND_MASTER | PCI_COMMAND_IO | PCI_COMMAND_MEMORY;
pci_write_config_word(dev, PCI_COMMAND, cmd);
pci_write_config_byte(dev, PCI_INTERRUPT_LINE, dev->irq);
return irq;
}
int pcibios_plat_dev_init(struct pci_dev *dev)
{
return 0;
}
static const struct of_device_id mt7620_pci_ids[] = {
{ .compatible = "mediatek,mt7620-pci" },
{},
};
static struct platform_driver mt7620_pci_driver = {
.probe = mt7620_pci_probe,
.driver = {
.name = "mt7620-pci",
.of_match_table = of_match_ptr(mt7620_pci_ids),
},
};
static int __init mt7620_pci_init(void)
{
return platform_driver_register(&mt7620_pci_driver);
}
arch_initcall(mt7620_pci_init);
| linux-master | arch/mips/pci/pci-mt7620.c |
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2000, 2001 Keith M Wesolowski
* Copyright (C) 2004 by Ralf Baechle ([email protected])
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/pci.h>
#include <linux/types.h>
#include <asm/ip32/mace.h>
#include <asm/ip32/ip32_ints.h>
#undef DEBUG_MACE_PCI
/*
* Handle errors from the bridge. This includes master and target aborts,
* various command and address errors, and the interrupt test. This gets
* registered on the bridge error irq. It's conceivable that some of these
* conditions warrant a panic. Anybody care to say which ones?
*/
static irqreturn_t macepci_error(int irq, void *dev)
{
char s;
unsigned int flags = mace->pci.error;
unsigned int addr = mace->pci.error_addr;
if (flags & MACEPCI_ERROR_MEMORY_ADDR)
s = 'M';
else if (flags & MACEPCI_ERROR_CONFIG_ADDR)
s = 'C';
else
s = 'X';
if (flags & MACEPCI_ERROR_MASTER_ABORT) {
printk("MACEPCI: Master abort at 0x%08x (%c)\n", addr, s);
flags &= ~MACEPCI_ERROR_MASTER_ABORT;
}
if (flags & MACEPCI_ERROR_TARGET_ABORT) {
printk("MACEPCI: Target abort at 0x%08x (%c)\n", addr, s);
flags &= ~MACEPCI_ERROR_TARGET_ABORT;
}
if (flags & MACEPCI_ERROR_DATA_PARITY_ERR) {
printk("MACEPCI: Data parity error at 0x%08x (%c)\n", addr, s);
flags &= ~MACEPCI_ERROR_DATA_PARITY_ERR;
}
if (flags & MACEPCI_ERROR_RETRY_ERR) {
printk("MACEPCI: Retry error at 0x%08x (%c)\n", addr, s);
flags &= ~MACEPCI_ERROR_RETRY_ERR;
}
if (flags & MACEPCI_ERROR_ILLEGAL_CMD) {
printk("MACEPCI: Illegal command at 0x%08x (%c)\n", addr, s);
flags &= ~MACEPCI_ERROR_ILLEGAL_CMD;
}
if (flags & MACEPCI_ERROR_SYSTEM_ERR) {
printk("MACEPCI: System error at 0x%08x (%c)\n", addr, s);
flags &= ~MACEPCI_ERROR_SYSTEM_ERR;
}
if (flags & MACEPCI_ERROR_PARITY_ERR) {
printk("MACEPCI: Parity error at 0x%08x (%c)\n", addr, s);
flags &= ~MACEPCI_ERROR_PARITY_ERR;
}
if (flags & MACEPCI_ERROR_OVERRUN) {
printk("MACEPCI: Overrun error at 0x%08x (%c)\n", addr, s);
flags &= ~MACEPCI_ERROR_OVERRUN;
}
if (flags & MACEPCI_ERROR_SIG_TABORT) {
printk("MACEPCI: Signaled target abort (clearing)\n");
flags &= ~MACEPCI_ERROR_SIG_TABORT;
}
if (flags & MACEPCI_ERROR_INTERRUPT_TEST) {
printk("MACEPCI: Interrupt test triggered (clearing)\n");
flags &= ~MACEPCI_ERROR_INTERRUPT_TEST;
}
mace->pci.error = flags;
return IRQ_HANDLED;
}
extern struct pci_ops mace_pci_ops;
#ifdef CONFIG_64BIT
static struct resource mace_pci_mem_resource = {
.name = "SGI O2 PCI MEM",
.start = MACEPCI_HI_MEMORY,
.end = 0x2FFFFFFFFUL,
.flags = IORESOURCE_MEM,
};
static struct resource mace_pci_io_resource = {
.name = "SGI O2 PCI IO",
.start = 0x00000000UL,
.end = 0xffffffffUL,
.flags = IORESOURCE_IO,
};
#define MACE_PCI_MEM_OFFSET 0x200000000
#else
static struct resource mace_pci_mem_resource = {
.name = "SGI O2 PCI MEM",
.start = MACEPCI_LOW_MEMORY,
.end = MACEPCI_LOW_MEMORY + 0x2000000 - 1,
.flags = IORESOURCE_MEM,
};
static struct resource mace_pci_io_resource = {
.name = "SGI O2 PCI IO",
.start = 0x00000000,
.end = 0xFFFFFFFF,
.flags = IORESOURCE_IO,
};
#define MACE_PCI_MEM_OFFSET (MACEPCI_LOW_MEMORY - 0x80000000)
#endif
static struct pci_controller mace_pci_controller = {
.pci_ops = &mace_pci_ops,
.mem_resource = &mace_pci_mem_resource,
.io_resource = &mace_pci_io_resource,
.mem_offset = MACE_PCI_MEM_OFFSET,
.io_offset = 0,
.io_map_base = CKSEG1ADDR(MACEPCI_LOW_IO),
};
static int __init mace_init(void)
{
PCIBIOS_MIN_IO = 0x1000;
/* Clear any outstanding errors and enable interrupts */
mace->pci.error_addr = 0;
mace->pci.error = 0;
mace->pci.control = 0xff008500;
printk("MACE PCI rev %d\n", mace->pci.rev);
BUG_ON(request_irq(MACE_PCI_BRIDGE_IRQ, macepci_error, 0,
"MACE PCI error", NULL));
/* extend memory resources */
iomem_resource.end = mace_pci_mem_resource.end;
ioport_resource = mace_pci_io_resource;
register_pci_controller(&mace_pci_controller);
return 0;
}
arch_initcall(mace_init);
| linux-master | arch/mips/pci/pci-ip32.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
*
* Copyright (C) 2010 John Crispin <[email protected]>
*/
#include <linux/types.h>
#include <linux/pci.h>
#include <linux/kernel.h>
#include <linux/delay.h>
#include <linux/mm.h>
#include <asm/addrspace.h>
#include <linux/vmalloc.h>
#include <lantiq_soc.h>
#include "pci-lantiq.h"
#define LTQ_PCI_CFG_BUSNUM_SHF 16
#define LTQ_PCI_CFG_DEVNUM_SHF 11
#define LTQ_PCI_CFG_FUNNUM_SHF 8
#define PCI_ACCESS_READ 0
#define PCI_ACCESS_WRITE 1
static int ltq_pci_config_access(unsigned char access_type, struct pci_bus *bus,
unsigned int devfn, unsigned int where, u32 *data)
{
unsigned long cfg_base;
unsigned long flags;
u32 temp;
/* we support slot from 0 to 15 dev_fn & 0x68 (AD29) is the
SoC itself */
if ((bus->number != 0) || ((devfn & 0xf8) > 0x78)
|| ((devfn & 0xf8) == 0) || ((devfn & 0xf8) == 0x68))
return 1;
spin_lock_irqsave(&ebu_lock, flags);
cfg_base = (unsigned long) ltq_pci_mapped_cfg;
cfg_base |= (bus->number << LTQ_PCI_CFG_BUSNUM_SHF) | (devfn <<
LTQ_PCI_CFG_FUNNUM_SHF) | (where & ~0x3);
/* Perform access */
if (access_type == PCI_ACCESS_WRITE) {
ltq_w32(swab32(*data), ((u32 *)cfg_base));
} else {
*data = ltq_r32(((u32 *)(cfg_base)));
*data = swab32(*data);
}
wmb();
/* clean possible Master abort */
cfg_base = (unsigned long) ltq_pci_mapped_cfg;
cfg_base |= (0x0 << LTQ_PCI_CFG_FUNNUM_SHF) + 4;
temp = ltq_r32(((u32 *)(cfg_base)));
temp = swab32(temp);
cfg_base = (unsigned long) ltq_pci_mapped_cfg;
cfg_base |= (0x68 << LTQ_PCI_CFG_FUNNUM_SHF) + 4;
ltq_w32(temp, ((u32 *)cfg_base));
spin_unlock_irqrestore(&ebu_lock, flags);
if (((*data) == 0xffffffff) && (access_type == PCI_ACCESS_READ))
return 1;
return 0;
}
int ltq_pci_read_config_dword(struct pci_bus *bus, unsigned int devfn,
int where, int size, u32 *val)
{
u32 data = 0;
if (ltq_pci_config_access(PCI_ACCESS_READ, bus, devfn, where, &data))
return PCIBIOS_DEVICE_NOT_FOUND;
if (size == 1)
*val = (data >> ((where & 3) << 3)) & 0xff;
else if (size == 2)
*val = (data >> ((where & 3) << 3)) & 0xffff;
else
*val = data;
return PCIBIOS_SUCCESSFUL;
}
int ltq_pci_write_config_dword(struct pci_bus *bus, unsigned int devfn,
int where, int size, u32 val)
{
u32 data = 0;
if (size == 4) {
data = val;
} else {
if (ltq_pci_config_access(PCI_ACCESS_READ, bus,
devfn, where, &data))
return PCIBIOS_DEVICE_NOT_FOUND;
if (size == 1)
data = (data & ~(0xff << ((where & 3) << 3))) |
(val << ((where & 3) << 3));
else if (size == 2)
data = (data & ~(0xffff << ((where & 3) << 3))) |
(val << ((where & 3) << 3));
}
if (ltq_pci_config_access(PCI_ACCESS_WRITE, bus, devfn, where, &data))
return PCIBIOS_DEVICE_NOT_FOUND;
return PCIBIOS_SUCCESSFUL;
}
| linux-master | arch/mips/pci/ops-lantiq.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
*
* Copyright (C) 2003, 04, 11 Ralf Baechle ([email protected])
* Copyright (C) 2011 Wind River Systems,
* written by Ralf Baechle ([email protected])
*/
#include <linux/bug.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/memblock.h>
#include <linux/export.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/pci.h>
#include <linux/of_address.h>
#include <asm/cpu-info.h>
/*
* If PCI_PROBE_ONLY in pci_flags is set, we don't change any PCI resource
* assignments.
*/
/*
* The PCI controller list.
*/
static LIST_HEAD(controllers);
static int pci_initialized;
/*
* We need to avoid collisions with `mirrored' VGA ports
* and other strange ISA hardware, so we always want the
* addresses to be allocated in the 0x000-0x0ff region
* modulo 0x400.
*
* Why? Because some silly external IO cards only decode
* the low 10 bits of the IO address. The 0x00-0xff region
* is reserved for motherboard devices that decode all 16
* bits, so it's ok to allocate at, say, 0x2800-0x28ff,
* but we want to try to avoid allocating at 0x2900-0x2bff
* which might have be mirrored at 0x0100-0x03ff..
*/
resource_size_t
pcibios_align_resource(void *data, const struct resource *res,
resource_size_t size, resource_size_t align)
{
struct pci_dev *dev = data;
struct pci_controller *hose = dev->sysdata;
resource_size_t start = res->start;
if (res->flags & IORESOURCE_IO) {
/* Make sure we start at our min on all hoses */
if (start < PCIBIOS_MIN_IO + hose->io_resource->start)
start = PCIBIOS_MIN_IO + hose->io_resource->start;
/*
* Put everything into 0x00-0xff region modulo 0x400
*/
if (start & 0x300)
start = (start + 0x3ff) & ~0x3ff;
} else if (res->flags & IORESOURCE_MEM) {
/* Make sure we start at our min on all hoses */
if (start < PCIBIOS_MIN_MEM + hose->mem_resource->start)
start = PCIBIOS_MIN_MEM + hose->mem_resource->start;
}
return start;
}
static void pcibios_scanbus(struct pci_controller *hose)
{
static int next_busno;
static int need_domain_info;
LIST_HEAD(resources);
struct pci_bus *bus;
struct pci_host_bridge *bridge;
int ret;
bridge = pci_alloc_host_bridge(0);
if (!bridge)
return;
if (hose->get_busno && pci_has_flag(PCI_PROBE_ONLY))
next_busno = (*hose->get_busno)();
pci_add_resource_offset(&resources,
hose->mem_resource, hose->mem_offset);
pci_add_resource_offset(&resources,
hose->io_resource, hose->io_offset);
list_splice_init(&resources, &bridge->windows);
bridge->dev.parent = NULL;
bridge->sysdata = hose;
bridge->busnr = next_busno;
bridge->ops = hose->pci_ops;
bridge->swizzle_irq = pci_common_swizzle;
bridge->map_irq = pcibios_map_irq;
ret = pci_scan_root_bus_bridge(bridge);
if (ret) {
pci_free_host_bridge(bridge);
return;
}
hose->bus = bus = bridge->bus;
need_domain_info = need_domain_info || pci_domain_nr(bus);
set_pci_need_domain_info(hose, need_domain_info);
next_busno = bus->busn_res.end + 1;
/* Don't allow 8-bit bus number overflow inside the hose -
reserve some space for bridges. */
if (next_busno > 224) {
next_busno = 0;
need_domain_info = 1;
}
/*
* We insert PCI resources into the iomem_resource and
* ioport_resource trees in either pci_bus_claim_resources()
* or pci_bus_assign_resources().
*/
if (pci_has_flag(PCI_PROBE_ONLY)) {
pci_bus_claim_resources(bus);
} else {
struct pci_bus *child;
pci_bus_size_bridges(bus);
pci_bus_assign_resources(bus);
list_for_each_entry(child, &bus->children, node)
pcie_bus_configure_settings(child);
}
pci_bus_add_devices(bus);
}
#ifdef CONFIG_OF
void pci_load_of_ranges(struct pci_controller *hose, struct device_node *node)
{
struct of_pci_range range;
struct of_pci_range_parser parser;
hose->of_node = node;
if (of_pci_range_parser_init(&parser, node))
return;
for_each_of_pci_range(&parser, &range) {
struct resource *res = NULL;
switch (range.flags & IORESOURCE_TYPE_BITS) {
case IORESOURCE_IO:
hose->io_map_base =
(unsigned long)ioremap(range.cpu_addr,
range.size);
res = hose->io_resource;
break;
case IORESOURCE_MEM:
res = hose->mem_resource;
break;
}
if (res != NULL) {
res->name = node->full_name;
res->flags = range.flags;
res->start = range.cpu_addr;
res->end = range.cpu_addr + range.size - 1;
res->parent = res->child = res->sibling = NULL;
}
}
}
struct device_node *pcibios_get_phb_of_node(struct pci_bus *bus)
{
struct pci_controller *hose = bus->sysdata;
return of_node_get(hose->of_node);
}
#endif
static DEFINE_MUTEX(pci_scan_mutex);
void register_pci_controller(struct pci_controller *hose)
{
struct resource *parent;
parent = hose->mem_resource->parent;
if (!parent)
parent = &iomem_resource;
if (request_resource(parent, hose->mem_resource) < 0)
goto out;
parent = hose->io_resource->parent;
if (!parent)
parent = &ioport_resource;
if (request_resource(parent, hose->io_resource) < 0) {
release_resource(hose->mem_resource);
goto out;
}
INIT_LIST_HEAD(&hose->list);
list_add_tail(&hose->list, &controllers);
/*
* Do not panic here but later - this might happen before console init.
*/
if (!hose->io_map_base) {
printk(KERN_WARNING
"registering PCI controller with io_map_base unset\n");
}
/*
* Scan the bus if it is register after the PCI subsystem
* initialization.
*/
if (pci_initialized) {
mutex_lock(&pci_scan_mutex);
pcibios_scanbus(hose);
mutex_unlock(&pci_scan_mutex);
}
return;
out:
printk(KERN_WARNING
"Skipping PCI bus scan due to resource conflict\n");
}
static int __init pcibios_init(void)
{
struct pci_controller *hose;
/* Scan all of the recorded PCI controllers. */
list_for_each_entry(hose, &controllers, list)
pcibios_scanbus(hose);
pci_initialized = 1;
return 0;
}
subsys_initcall(pcibios_init);
static int pcibios_enable_resources(struct pci_dev *dev, int mask)
{
u16 cmd, old_cmd;
int idx;
struct resource *r;
pci_read_config_word(dev, PCI_COMMAND, &cmd);
old_cmd = cmd;
pci_dev_for_each_resource(dev, r, idx) {
/* Only set up the requested stuff */
if (!(mask & (1<<idx)))
continue;
if (!(r->flags & (IORESOURCE_IO | IORESOURCE_MEM)))
continue;
if ((idx == PCI_ROM_RESOURCE) &&
(!(r->flags & IORESOURCE_ROM_ENABLE)))
continue;
if (!r->start && r->end) {
pci_err(dev,
"can't enable device: resource collisions\n");
return -EINVAL;
}
if (r->flags & IORESOURCE_IO)
cmd |= PCI_COMMAND_IO;
if (r->flags & IORESOURCE_MEM)
cmd |= PCI_COMMAND_MEMORY;
}
if (cmd != old_cmd) {
pci_info(dev, "enabling device (%04x -> %04x)\n", old_cmd, cmd);
pci_write_config_word(dev, PCI_COMMAND, cmd);
}
return 0;
}
int pcibios_enable_device(struct pci_dev *dev, int mask)
{
int err = pcibios_enable_resources(dev, mask);
if (err < 0)
return err;
return pcibios_plat_dev_init(dev);
}
void pcibios_fixup_bus(struct pci_bus *bus)
{
struct pci_dev *dev = bus->self;
if (pci_has_flag(PCI_PROBE_ONLY) && dev &&
(dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) {
pci_read_bridge_bases(bus);
}
}
char * (*pcibios_plat_setup)(char *str) __initdata;
char *__init pcibios_setup(char *str)
{
if (pcibios_plat_setup)
return pcibios_plat_setup(str);
return str;
}
| linux-master | arch/mips/pci/pci-legacy.c |
/*
* Based on linux/arch/mips/txx9/rbtx4938/setup.c,
* and RBTX49xx patch from CELF patch archive.
*
* Copyright 2001, 2003-2005 MontaVista Software Inc.
* Copyright (C) 2004 by Ralf Baechle ([email protected])
* (C) Copyright TOSHIBA CORPORATION 2000-2001, 2004-2007
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <linux/init.h>
#include <linux/pci.h>
#include <linux/kernel.h>
#include <linux/interrupt.h>
#include <asm/txx9/generic.h>
#include <asm/txx9/tx4927.h>
int __init tx4927_report_pciclk(void)
{
int pciclk = 0;
pr_info("PCIC --%s PCICLK:",
(__raw_readq(&tx4927_ccfgptr->ccfg) & TX4927_CCFG_PCI66) ?
" PCI66" : "");
if (__raw_readq(&tx4927_ccfgptr->pcfg) & TX4927_PCFG_PCICLKEN_ALL) {
u64 ccfg = __raw_readq(&tx4927_ccfgptr->ccfg);
switch ((unsigned long)ccfg &
TX4927_CCFG_PCIDIVMODE_MASK) {
case TX4927_CCFG_PCIDIVMODE_2_5:
pciclk = txx9_cpu_clock * 2 / 5; break;
case TX4927_CCFG_PCIDIVMODE_3:
pciclk = txx9_cpu_clock / 3; break;
case TX4927_CCFG_PCIDIVMODE_5:
pciclk = txx9_cpu_clock / 5; break;
case TX4927_CCFG_PCIDIVMODE_6:
pciclk = txx9_cpu_clock / 6; break;
}
pr_cont("Internal(%u.%uMHz)",
(pciclk + 50000) / 1000000,
((pciclk + 50000) / 100000) % 10);
} else {
pr_cont("External");
pciclk = -1;
}
pr_cont("\n");
return pciclk;
}
int __init tx4927_pciclk66_setup(void)
{
int pciclk;
/* Assert M66EN */
tx4927_ccfg_set(TX4927_CCFG_PCI66);
/* Double PCICLK (if possible) */
if (__raw_readq(&tx4927_ccfgptr->pcfg) & TX4927_PCFG_PCICLKEN_ALL) {
unsigned int pcidivmode = 0;
u64 ccfg = __raw_readq(&tx4927_ccfgptr->ccfg);
pcidivmode = (unsigned long)ccfg &
TX4927_CCFG_PCIDIVMODE_MASK;
switch (pcidivmode) {
case TX4927_CCFG_PCIDIVMODE_5:
case TX4927_CCFG_PCIDIVMODE_2_5:
pcidivmode = TX4927_CCFG_PCIDIVMODE_2_5;
pciclk = txx9_cpu_clock * 2 / 5;
break;
case TX4927_CCFG_PCIDIVMODE_6:
case TX4927_CCFG_PCIDIVMODE_3:
default:
pcidivmode = TX4927_CCFG_PCIDIVMODE_3;
pciclk = txx9_cpu_clock / 3;
}
tx4927_ccfg_change(TX4927_CCFG_PCIDIVMODE_MASK,
pcidivmode);
pr_debug("PCICLK: ccfg:%08lx\n",
(unsigned long)__raw_readq(&tx4927_ccfgptr->ccfg));
} else
pciclk = -1;
return pciclk;
}
void __init tx4927_setup_pcierr_irq(void)
{
if (request_irq(TXX9_IRQ_BASE + TX4927_IR_PCIERR,
tx4927_pcierr_interrupt,
0, "PCI error",
(void *)TX4927_PCIC_REG))
pr_warn("Failed to request irq for PCIERR\n");
}
| linux-master | arch/mips/pci/pci-tx4927.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 1999, 2000, 2004 MIPS Technologies, Inc.
* All rights reserved.
* Authors: Carsten Langgaard <[email protected]>
* Maciej W. Rozycki <[email protected]>
*
* Copyright (C) 2009 Lemote Inc.
* Author: Wu Zhangjin <[email protected]>
*/
#include <linux/types.h>
#include <linux/pci.h>
#include <linux/kernel.h>
#include <linux/export.h>
#include <loongson.h>
#ifdef CONFIG_CS5536
#include <cs5536/cs5536_pci.h>
#include <cs5536/cs5536.h>
#endif
#define PCI_ACCESS_READ 0
#define PCI_ACCESS_WRITE 1
#define CFG_SPACE_REG(offset) \
(void *)CKSEG1ADDR(LOONGSON_PCICFG_BASE | (offset))
#define ID_SEL_BEGIN 11
#define MAX_DEV_NUM (31 - ID_SEL_BEGIN)
static int loongson_pcibios_config_access(unsigned char access_type,
struct pci_bus *bus,
unsigned int devfn, int where,
u32 *data)
{
u32 busnum = bus->number;
u32 addr, type;
u32 dummy;
void *addrp;
int device = PCI_SLOT(devfn);
int function = PCI_FUNC(devfn);
int reg = where & ~3;
if (busnum == 0) {
/* board-specific part,currently,only fuloong2f,yeeloong2f
* use CS5536, fuloong2e use via686b, gdium has no
* south bridge
*/
#ifdef CONFIG_CS5536
/* cs5536_pci_conf_read4/write4() will call _rdmsr/_wrmsr() to
* access the regsters PCI_MSR_ADDR, PCI_MSR_DATA_LO,
* PCI_MSR_DATA_HI, which is bigger than PCI_MSR_CTRL, so, it
* will not go this branch, but the others. so, no calling dead
* loop here.
*/
if ((PCI_IDSEL_CS5536 == device) && (reg < PCI_MSR_CTRL)) {
switch (access_type) {
case PCI_ACCESS_READ:
*data = cs5536_pci_conf_read4(function, reg);
break;
case PCI_ACCESS_WRITE:
cs5536_pci_conf_write4(function, reg, *data);
break;
}
return 0;
}
#endif
/* Type 0 configuration for onboard PCI bus */
if (device > MAX_DEV_NUM)
return -1;
addr = (1 << (device + ID_SEL_BEGIN)) | (function << 8) | reg;
type = 0;
} else {
/* Type 1 configuration for offboard PCI bus */
addr = (busnum << 16) | (device << 11) | (function << 8) | reg;
type = 0x10000;
}
/* Clear aborts */
LOONGSON_PCICMD |= LOONGSON_PCICMD_MABORT_CLR | \
LOONGSON_PCICMD_MTABORT_CLR;
LOONGSON_PCIMAP_CFG = (addr >> 16) | type;
/* Flush Bonito register block */
dummy = LOONGSON_PCIMAP_CFG;
mmiowb();
addrp = CFG_SPACE_REG(addr & 0xffff);
if (access_type == PCI_ACCESS_WRITE)
writel(cpu_to_le32(*data), addrp);
else
*data = le32_to_cpu(readl(addrp));
/* Detect Master/Target abort */
if (LOONGSON_PCICMD & (LOONGSON_PCICMD_MABORT_CLR |
LOONGSON_PCICMD_MTABORT_CLR)) {
/* Error occurred */
/* Clear bits */
LOONGSON_PCICMD |= (LOONGSON_PCICMD_MABORT_CLR |
LOONGSON_PCICMD_MTABORT_CLR);
return -1;
}
return 0;
}
/*
* We can't address 8 and 16 bit words directly. Instead we have to
* read/write a 32bit word and mask/modify the data we actually want.
*/
static int loongson_pcibios_read(struct pci_bus *bus, unsigned int devfn,
int where, int size, u32 *val)
{
u32 data = 0;
if ((size == 2) && (where & 1))
return PCIBIOS_BAD_REGISTER_NUMBER;
else if ((size == 4) && (where & 3))
return PCIBIOS_BAD_REGISTER_NUMBER;
if (loongson_pcibios_config_access(PCI_ACCESS_READ, bus, devfn, where,
&data))
return -1;
if (size == 1)
*val = (data >> ((where & 3) << 3)) & 0xff;
else if (size == 2)
*val = (data >> ((where & 3) << 3)) & 0xffff;
else
*val = data;
return PCIBIOS_SUCCESSFUL;
}
static int loongson_pcibios_write(struct pci_bus *bus, unsigned int devfn,
int where, int size, u32 val)
{
u32 data = 0;
if ((size == 2) && (where & 1))
return PCIBIOS_BAD_REGISTER_NUMBER;
else if ((size == 4) && (where & 3))
return PCIBIOS_BAD_REGISTER_NUMBER;
if (size == 4)
data = val;
else {
if (loongson_pcibios_config_access(PCI_ACCESS_READ, bus, devfn,
where, &data))
return -1;
if (size == 1)
data = (data & ~(0xff << ((where & 3) << 3))) |
(val << ((where & 3) << 3));
else if (size == 2)
data = (data & ~(0xffff << ((where & 3) << 3))) |
(val << ((where & 3) << 3));
}
if (loongson_pcibios_config_access(PCI_ACCESS_WRITE, bus, devfn, where,
&data))
return -1;
return PCIBIOS_SUCCESSFUL;
}
struct pci_ops loongson_pci_ops = {
.read = loongson_pcibios_read,
.write = loongson_pcibios_write
};
#ifdef CONFIG_CS5536
DEFINE_RAW_SPINLOCK(msr_lock);
void _rdmsr(u32 msr, u32 *hi, u32 *lo)
{
struct pci_bus bus = {
.number = PCI_BUS_CS5536
};
u32 devfn = PCI_DEVFN(PCI_IDSEL_CS5536, 0);
unsigned long flags;
raw_spin_lock_irqsave(&msr_lock, flags);
loongson_pcibios_write(&bus, devfn, PCI_MSR_ADDR, 4, msr);
loongson_pcibios_read(&bus, devfn, PCI_MSR_DATA_LO, 4, lo);
loongson_pcibios_read(&bus, devfn, PCI_MSR_DATA_HI, 4, hi);
raw_spin_unlock_irqrestore(&msr_lock, flags);
}
EXPORT_SYMBOL(_rdmsr);
void _wrmsr(u32 msr, u32 hi, u32 lo)
{
struct pci_bus bus = {
.number = PCI_BUS_CS5536
};
u32 devfn = PCI_DEVFN(PCI_IDSEL_CS5536, 0);
unsigned long flags;
raw_spin_lock_irqsave(&msr_lock, flags);
loongson_pcibios_write(&bus, devfn, PCI_MSR_ADDR, 4, msr);
loongson_pcibios_write(&bus, devfn, PCI_MSR_DATA_LO, 4, lo);
loongson_pcibios_write(&bus, devfn, PCI_MSR_DATA_HI, 4, hi);
raw_spin_unlock_irqrestore(&msr_lock, flags);
}
EXPORT_SYMBOL(_wrmsr);
#endif
| linux-master | arch/mips/pci/ops-loongson2.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2001,2002,2005 Broadcom Corporation
* Copyright (C) 2004 by Ralf Baechle ([email protected])
*/
/*
* BCM1480/1455-specific HT support (looking like PCI)
*
* This module provides the glue between Linux's PCI subsystem
* and the hardware. We basically provide glue for accessing
* configuration space, and set up the translation for I/O
* space accesses.
*
* To access configuration space, we use ioremap. In the 32-bit
* kernel, this consumes either 4 or 8 page table pages, and 16MB of
* kernel mapped memory. Hopefully neither of these should be a huge
* problem.
*
*/
#include <linux/types.h>
#include <linux/pci.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/mm.h>
#include <linux/console.h>
#include <linux/tty.h>
#include <asm/sibyte/bcm1480_regs.h>
#include <asm/sibyte/bcm1480_scd.h>
#include <asm/sibyte/board.h>
#include <asm/io.h>
/*
* Macros for calculating offsets into config space given a device
* structure or dev/fun/reg
*/
#define CFGOFFSET(bus, devfn, where) (((bus)<<16)+((devfn)<<8)+(where))
#define CFGADDR(bus, devfn, where) CFGOFFSET((bus)->number, (devfn), where)
static void *ht_cfg_space;
#define PCI_BUS_ENABLED 1
#define PCI_DEVICE_MODE 2
static int bcm1480ht_bus_status;
#define PCI_BRIDGE_DEVICE 0
#define HT_BRIDGE_DEVICE 1
/*
* HT's level-sensitive interrupts require EOI, which is generated
* through a 4MB memory-mapped region
*/
unsigned long ht_eoi_space;
/*
* Read/write 32-bit values in config space.
*/
static inline u32 READCFG32(u32 addr)
{
return *(u32 *)(ht_cfg_space + (addr&~3));
}
static inline void WRITECFG32(u32 addr, u32 data)
{
*(u32 *)(ht_cfg_space + (addr & ~3)) = data;
}
/*
* Some checks before doing config cycles:
* In PCI Device Mode, hide everything on bus 0 except the LDT host
* bridge. Otherwise, access is controlled by bridge MasterEn bits.
*/
static int bcm1480ht_can_access(struct pci_bus *bus, int devfn)
{
u32 devno;
if (!(bcm1480ht_bus_status & (PCI_BUS_ENABLED | PCI_DEVICE_MODE)))
return 0;
if (bus->number == 0) {
devno = PCI_SLOT(devfn);
if (bcm1480ht_bus_status & PCI_DEVICE_MODE)
return 0;
}
return 1;
}
/*
* Read/write access functions for various sizes of values
* in config space. Return all 1's for disallowed accesses
* for a kludgy but adequate simulation of master aborts.
*/
static int bcm1480ht_pcibios_read(struct pci_bus *bus, unsigned int devfn,
int where, int size, u32 * val)
{
u32 data = 0;
if ((size == 2) && (where & 1))
return PCIBIOS_BAD_REGISTER_NUMBER;
else if ((size == 4) && (where & 3))
return PCIBIOS_BAD_REGISTER_NUMBER;
if (bcm1480ht_can_access(bus, devfn))
data = READCFG32(CFGADDR(bus, devfn, where));
else
data = 0xFFFFFFFF;
if (size == 1)
*val = (data >> ((where & 3) << 3)) & 0xff;
else if (size == 2)
*val = (data >> ((where & 3) << 3)) & 0xffff;
else
*val = data;
return PCIBIOS_SUCCESSFUL;
}
static int bcm1480ht_pcibios_write(struct pci_bus *bus, unsigned int devfn,
int where, int size, u32 val)
{
u32 cfgaddr = CFGADDR(bus, devfn, where);
u32 data = 0;
if ((size == 2) && (where & 1))
return PCIBIOS_BAD_REGISTER_NUMBER;
else if ((size == 4) && (where & 3))
return PCIBIOS_BAD_REGISTER_NUMBER;
if (!bcm1480ht_can_access(bus, devfn))
return PCIBIOS_BAD_REGISTER_NUMBER;
data = READCFG32(cfgaddr);
if (size == 1)
data = (data & ~(0xff << ((where & 3) << 3))) |
(val << ((where & 3) << 3));
else if (size == 2)
data = (data & ~(0xffff << ((where & 3) << 3))) |
(val << ((where & 3) << 3));
else
data = val;
WRITECFG32(cfgaddr, data);
return PCIBIOS_SUCCESSFUL;
}
static int bcm1480ht_pcibios_get_busno(void)
{
return 0;
}
struct pci_ops bcm1480ht_pci_ops = {
.read = bcm1480ht_pcibios_read,
.write = bcm1480ht_pcibios_write,
};
static struct resource bcm1480ht_mem_resource = {
.name = "BCM1480 HT MEM",
.start = A_BCM1480_PHYS_HT_MEM_MATCH_BYTES,
.end = A_BCM1480_PHYS_HT_MEM_MATCH_BYTES + 0x1fffffffUL,
.flags = IORESOURCE_MEM,
};
static struct resource bcm1480ht_io_resource = {
.name = "BCM1480 HT I/O",
.start = A_BCM1480_PHYS_HT_IO_MATCH_BYTES,
.end = A_BCM1480_PHYS_HT_IO_MATCH_BYTES + 0x01ffffffUL,
.flags = IORESOURCE_IO,
};
struct pci_controller bcm1480ht_controller = {
.pci_ops = &bcm1480ht_pci_ops,
.mem_resource = &bcm1480ht_mem_resource,
.io_resource = &bcm1480ht_io_resource,
.index = 1,
.get_busno = bcm1480ht_pcibios_get_busno,
.io_offset = A_BCM1480_PHYS_HT_IO_MATCH_BYTES,
};
static int __init bcm1480ht_pcibios_init(void)
{
ht_cfg_space = ioremap(A_BCM1480_PHYS_HT_CFG_MATCH_BITS, 16*1024*1024);
/* CFE doesn't always init all HT paths, so we always scan */
bcm1480ht_bus_status |= PCI_BUS_ENABLED;
ht_eoi_space = (unsigned long)
ioremap(A_BCM1480_PHYS_HT_SPECIAL_MATCH_BYTES,
4 * 1024 * 1024);
bcm1480ht_controller.io_map_base = (unsigned long)
ioremap(A_BCM1480_PHYS_HT_IO_MATCH_BYTES, 65536);
bcm1480ht_controller.io_map_base -= bcm1480ht_controller.io_offset;
register_pci_controller(&bcm1480ht_controller);
return 0;
}
arch_initcall(bcm1480ht_pcibios_init);
| linux-master | arch/mips/pci/pci-bcm1480ht.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2008 Lemote Technology
* Copyright (C) 2004 ICT CAS
* Author: Li xiaoyu, [email protected]
*
* Copyright (C) 2007 Lemote, Inc.
* Author: Fuxin Zhang, [email protected]
*/
#include <linux/init.h>
#include <linux/pci.h>
#include <loongson.h>
#include <cs5536/cs5536.h>
#include <cs5536/cs5536_pci.h>
/* PCI interrupt pins
*
* These should not be changed, or you should consider loongson2f interrupt
* register and your pci card dispatch
*/
#define PCIA 4
#define PCIB 5
#define PCIC 6
#define PCID 7
/* all the pci device has the PCIA pin, check the datasheet. */
static char irq_tab[][5] = {
/* INTA INTB INTC INTD */
{0, 0, 0, 0, 0}, /* 11: Unused */
{0, 0, 0, 0, 0}, /* 12: Unused */
{0, 0, 0, 0, 0}, /* 13: Unused */
{0, 0, 0, 0, 0}, /* 14: Unused */
{0, 0, 0, 0, 0}, /* 15: Unused */
{0, 0, 0, 0, 0}, /* 16: Unused */
{0, PCIA, 0, 0, 0}, /* 17: RTL8110-0 */
{0, PCIB, 0, 0, 0}, /* 18: RTL8110-1 */
{0, PCIC, 0, 0, 0}, /* 19: SiI3114 */
{0, PCID, 0, 0, 0}, /* 20: 3-ports nec usb */
{0, PCIA, PCIB, PCIC, PCID}, /* 21: PCI-SLOT */
{0, 0, 0, 0, 0}, /* 22: Unused */
{0, 0, 0, 0, 0}, /* 23: Unused */
{0, 0, 0, 0, 0}, /* 24: Unused */
{0, 0, 0, 0, 0}, /* 25: Unused */
{0, 0, 0, 0, 0}, /* 26: Unused */
{0, 0, 0, 0, 0}, /* 27: Unused */
};
int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
int virq;
if ((PCI_SLOT(dev->devfn) != PCI_IDSEL_CS5536)
&& (PCI_SLOT(dev->devfn) < 32)) {
virq = irq_tab[slot][pin];
printk(KERN_INFO "slot: %d, pin: %d, irq: %d\n", slot, pin,
virq + LOONGSON_IRQ_BASE);
if (virq != 0)
return LOONGSON_IRQ_BASE + virq;
else
return 0;
} else if (PCI_SLOT(dev->devfn) == PCI_IDSEL_CS5536) { /* cs5536 */
switch (PCI_FUNC(dev->devfn)) {
case 2:
pci_write_config_byte(dev, PCI_INTERRUPT_LINE,
CS5536_IDE_INTR);
return CS5536_IDE_INTR; /* for IDE */
case 3:
pci_write_config_byte(dev, PCI_INTERRUPT_LINE,
CS5536_ACC_INTR);
return CS5536_ACC_INTR; /* for AUDIO */
case 4: /* for OHCI */
case 5: /* for EHCI */
case 6: /* for UDC */
case 7: /* for OTG */
pci_write_config_byte(dev, PCI_INTERRUPT_LINE,
CS5536_USB_INTR);
return CS5536_USB_INTR;
}
return dev->irq;
} else {
printk(KERN_INFO "strange PCI slot number.\n");
return 0;
}
}
/* Do platform specific device initialization at pci_enable_device() time */
int pcibios_plat_dev_init(struct pci_dev *dev)
{
return 0;
}
/* CS5536 SPEC. fixup */
static void loongson_cs5536_isa_fixup(struct pci_dev *pdev)
{
/* the uart1 and uart2 interrupt in PIC is enabled as default */
pci_write_config_dword(pdev, PCI_UART1_INT_REG, 1);
pci_write_config_dword(pdev, PCI_UART2_INT_REG, 1);
}
static void loongson_cs5536_ide_fixup(struct pci_dev *pdev)
{
/* setting the mutex pin as IDE function */
pci_write_config_dword(pdev, PCI_IDE_CFG_REG,
CS5536_IDE_FLASH_SIGNATURE);
}
static void loongson_cs5536_acc_fixup(struct pci_dev *pdev)
{
/* enable the AUDIO interrupt in PIC */
pci_write_config_dword(pdev, PCI_ACC_INT_REG, 1);
pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0xc0);
}
static void loongson_cs5536_ohci_fixup(struct pci_dev *pdev)
{
/* enable the OHCI interrupt in PIC */
/* THE OHCI, EHCI, UDC, OTG are shared with interrupt in PIC */
pci_write_config_dword(pdev, PCI_OHCI_INT_REG, 1);
}
static void loongson_cs5536_ehci_fixup(struct pci_dev *pdev)
{
u32 hi, lo;
/* Serial short detect enable */
_rdmsr(USB_MSR_REG(USB_CONFIG), &hi, &lo);
_wrmsr(USB_MSR_REG(USB_CONFIG), (1 << 1) | (1 << 3), lo);
/* setting the USB2.0 micro frame length */
pci_write_config_dword(pdev, PCI_EHCI_FLADJ_REG, 0x2000);
}
static void loongson_nec_fixup(struct pci_dev *pdev)
{
unsigned int val;
pci_read_config_dword(pdev, 0xe0, &val);
/* Only 2 port be used */
pci_write_config_dword(pdev, 0xe0, (val & ~3) | 0x2);
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CS5536_ISA,
loongson_cs5536_isa_fixup);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CS5536_OHC,
loongson_cs5536_ohci_fixup);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CS5536_EHC,
loongson_cs5536_ehci_fixup);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CS5536_AUDIO,
loongson_cs5536_acc_fixup);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CS5536_IDE,
loongson_cs5536_ide_fixup);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_USB,
loongson_nec_fixup);
| linux-master | arch/mips/pci/fixup-lemote2f.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Atheros AR71xx PCI host controller driver
*
* Copyright (C) 2008-2011 Gabor Juhos <[email protected]>
* Copyright (C) 2008 Imre Kaloz <[email protected]>
*
* Parts of this file are based on Atheros' 2.6.15 BSP
*/
#include <linux/resource.h>
#include <linux/types.h>
#include <linux/delay.h>
#include <linux/bitops.h>
#include <linux/pci.h>
#include <linux/pci_regs.h>
#include <linux/interrupt.h>
#include <linux/init.h>
#include <linux/platform_device.h>
#include <asm/mach-ath79/ar71xx_regs.h>
#include <asm/mach-ath79/ath79.h>
#define AR71XX_PCI_REG_CRP_AD_CBE 0x00
#define AR71XX_PCI_REG_CRP_WRDATA 0x04
#define AR71XX_PCI_REG_CRP_RDDATA 0x08
#define AR71XX_PCI_REG_CFG_AD 0x0c
#define AR71XX_PCI_REG_CFG_CBE 0x10
#define AR71XX_PCI_REG_CFG_WRDATA 0x14
#define AR71XX_PCI_REG_CFG_RDDATA 0x18
#define AR71XX_PCI_REG_PCI_ERR 0x1c
#define AR71XX_PCI_REG_PCI_ERR_ADDR 0x20
#define AR71XX_PCI_REG_AHB_ERR 0x24
#define AR71XX_PCI_REG_AHB_ERR_ADDR 0x28
#define AR71XX_PCI_CRP_CMD_WRITE 0x00010000
#define AR71XX_PCI_CRP_CMD_READ 0x00000000
#define AR71XX_PCI_CFG_CMD_READ 0x0000000a
#define AR71XX_PCI_CFG_CMD_WRITE 0x0000000b
#define AR71XX_PCI_INT_CORE BIT(4)
#define AR71XX_PCI_INT_DEV2 BIT(2)
#define AR71XX_PCI_INT_DEV1 BIT(1)
#define AR71XX_PCI_INT_DEV0 BIT(0)
#define AR71XX_PCI_IRQ_COUNT 5
struct ar71xx_pci_controller {
void __iomem *cfg_base;
int irq;
int irq_base;
struct pci_controller pci_ctrl;
struct resource io_res;
struct resource mem_res;
};
/* Byte lane enable bits */
static const u8 ar71xx_pci_ble_table[4][4] = {
{0x0, 0xf, 0xf, 0xf},
{0xe, 0xd, 0xb, 0x7},
{0xc, 0xf, 0x3, 0xf},
{0xf, 0xf, 0xf, 0xf},
};
static const u32 ar71xx_pci_read_mask[8] = {
0, 0xff, 0xffff, 0, 0xffffffff, 0, 0, 0
};
static inline u32 ar71xx_pci_get_ble(int where, int size, int local)
{
u32 t;
t = ar71xx_pci_ble_table[size & 3][where & 3];
BUG_ON(t == 0xf);
t <<= (local) ? 20 : 4;
return t;
}
static inline u32 ar71xx_pci_bus_addr(struct pci_bus *bus, unsigned int devfn,
int where)
{
u32 ret;
if (!bus->number) {
/* type 0 */
ret = (1 << PCI_SLOT(devfn)) | (PCI_FUNC(devfn) << 8) |
(where & ~3);
} else {
/* type 1 */
ret = (bus->number << 16) | (PCI_SLOT(devfn) << 11) |
(PCI_FUNC(devfn) << 8) | (where & ~3) | 1;
}
return ret;
}
static inline struct ar71xx_pci_controller *
pci_bus_to_ar71xx_controller(struct pci_bus *bus)
{
struct pci_controller *hose;
hose = (struct pci_controller *) bus->sysdata;
return container_of(hose, struct ar71xx_pci_controller, pci_ctrl);
}
static int ar71xx_pci_check_error(struct ar71xx_pci_controller *apc, int quiet)
{
void __iomem *base = apc->cfg_base;
u32 pci_err;
u32 ahb_err;
pci_err = __raw_readl(base + AR71XX_PCI_REG_PCI_ERR) & 3;
if (pci_err) {
if (!quiet) {
u32 addr;
addr = __raw_readl(base + AR71XX_PCI_REG_PCI_ERR_ADDR);
pr_crit("ar71xx: %s bus error %d at addr 0x%x\n",
"PCI", pci_err, addr);
}
/* clear PCI error status */
__raw_writel(pci_err, base + AR71XX_PCI_REG_PCI_ERR);
}
ahb_err = __raw_readl(base + AR71XX_PCI_REG_AHB_ERR) & 1;
if (ahb_err) {
if (!quiet) {
u32 addr;
addr = __raw_readl(base + AR71XX_PCI_REG_AHB_ERR_ADDR);
pr_crit("ar71xx: %s bus error %d at addr 0x%x\n",
"AHB", ahb_err, addr);
}
/* clear AHB error status */
__raw_writel(ahb_err, base + AR71XX_PCI_REG_AHB_ERR);
}
return !!(ahb_err | pci_err);
}
static inline void ar71xx_pci_local_write(struct ar71xx_pci_controller *apc,
int where, int size, u32 value)
{
void __iomem *base = apc->cfg_base;
u32 ad_cbe;
value = value << (8 * (where & 3));
ad_cbe = AR71XX_PCI_CRP_CMD_WRITE | (where & ~3);
ad_cbe |= ar71xx_pci_get_ble(where, size, 1);
__raw_writel(ad_cbe, base + AR71XX_PCI_REG_CRP_AD_CBE);
__raw_writel(value, base + AR71XX_PCI_REG_CRP_WRDATA);
}
static inline int ar71xx_pci_set_cfgaddr(struct pci_bus *bus,
unsigned int devfn,
int where, int size, u32 cmd)
{
struct ar71xx_pci_controller *apc = pci_bus_to_ar71xx_controller(bus);
void __iomem *base = apc->cfg_base;
u32 addr;
addr = ar71xx_pci_bus_addr(bus, devfn, where);
__raw_writel(addr, base + AR71XX_PCI_REG_CFG_AD);
__raw_writel(cmd | ar71xx_pci_get_ble(where, size, 0),
base + AR71XX_PCI_REG_CFG_CBE);
return ar71xx_pci_check_error(apc, 1);
}
static int ar71xx_pci_read_config(struct pci_bus *bus, unsigned int devfn,
int where, int size, u32 *value)
{
struct ar71xx_pci_controller *apc = pci_bus_to_ar71xx_controller(bus);
void __iomem *base = apc->cfg_base;
u32 data;
int err;
int ret;
ret = PCIBIOS_SUCCESSFUL;
data = ~0;
err = ar71xx_pci_set_cfgaddr(bus, devfn, where, size,
AR71XX_PCI_CFG_CMD_READ);
if (err)
ret = PCIBIOS_DEVICE_NOT_FOUND;
else
data = __raw_readl(base + AR71XX_PCI_REG_CFG_RDDATA);
*value = (data >> (8 * (where & 3))) & ar71xx_pci_read_mask[size & 7];
return ret;
}
static int ar71xx_pci_write_config(struct pci_bus *bus, unsigned int devfn,
int where, int size, u32 value)
{
struct ar71xx_pci_controller *apc = pci_bus_to_ar71xx_controller(bus);
void __iomem *base = apc->cfg_base;
int err;
int ret;
value = value << (8 * (where & 3));
ret = PCIBIOS_SUCCESSFUL;
err = ar71xx_pci_set_cfgaddr(bus, devfn, where, size,
AR71XX_PCI_CFG_CMD_WRITE);
if (err)
ret = PCIBIOS_DEVICE_NOT_FOUND;
else
__raw_writel(value, base + AR71XX_PCI_REG_CFG_WRDATA);
return ret;
}
static struct pci_ops ar71xx_pci_ops = {
.read = ar71xx_pci_read_config,
.write = ar71xx_pci_write_config,
};
static void ar71xx_pci_irq_handler(struct irq_desc *desc)
{
struct ar71xx_pci_controller *apc;
void __iomem *base = ath79_reset_base;
u32 pending;
apc = irq_desc_get_handler_data(desc);
pending = __raw_readl(base + AR71XX_RESET_REG_PCI_INT_STATUS) &
__raw_readl(base + AR71XX_RESET_REG_PCI_INT_ENABLE);
if (pending & AR71XX_PCI_INT_DEV0)
generic_handle_irq(apc->irq_base + 0);
else if (pending & AR71XX_PCI_INT_DEV1)
generic_handle_irq(apc->irq_base + 1);
else if (pending & AR71XX_PCI_INT_DEV2)
generic_handle_irq(apc->irq_base + 2);
else if (pending & AR71XX_PCI_INT_CORE)
generic_handle_irq(apc->irq_base + 4);
else
spurious_interrupt();
}
static void ar71xx_pci_irq_unmask(struct irq_data *d)
{
struct ar71xx_pci_controller *apc;
unsigned int irq;
void __iomem *base = ath79_reset_base;
u32 t;
apc = irq_data_get_irq_chip_data(d);
irq = d->irq - apc->irq_base;
t = __raw_readl(base + AR71XX_RESET_REG_PCI_INT_ENABLE);
__raw_writel(t | (1 << irq), base + AR71XX_RESET_REG_PCI_INT_ENABLE);
/* flush write */
__raw_readl(base + AR71XX_RESET_REG_PCI_INT_ENABLE);
}
static void ar71xx_pci_irq_mask(struct irq_data *d)
{
struct ar71xx_pci_controller *apc;
unsigned int irq;
void __iomem *base = ath79_reset_base;
u32 t;
apc = irq_data_get_irq_chip_data(d);
irq = d->irq - apc->irq_base;
t = __raw_readl(base + AR71XX_RESET_REG_PCI_INT_ENABLE);
__raw_writel(t & ~(1 << irq), base + AR71XX_RESET_REG_PCI_INT_ENABLE);
/* flush write */
__raw_readl(base + AR71XX_RESET_REG_PCI_INT_ENABLE);
}
static struct irq_chip ar71xx_pci_irq_chip = {
.name = "AR71XX PCI",
.irq_mask = ar71xx_pci_irq_mask,
.irq_unmask = ar71xx_pci_irq_unmask,
.irq_mask_ack = ar71xx_pci_irq_mask,
};
static void ar71xx_pci_irq_init(struct ar71xx_pci_controller *apc)
{
void __iomem *base = ath79_reset_base;
int i;
__raw_writel(0, base + AR71XX_RESET_REG_PCI_INT_ENABLE);
__raw_writel(0, base + AR71XX_RESET_REG_PCI_INT_STATUS);
BUILD_BUG_ON(ATH79_PCI_IRQ_COUNT < AR71XX_PCI_IRQ_COUNT);
apc->irq_base = ATH79_PCI_IRQ_BASE;
for (i = apc->irq_base;
i < apc->irq_base + AR71XX_PCI_IRQ_COUNT; i++) {
irq_set_chip_and_handler(i, &ar71xx_pci_irq_chip,
handle_level_irq);
irq_set_chip_data(i, apc);
}
irq_set_chained_handler_and_data(apc->irq, ar71xx_pci_irq_handler,
apc);
}
static void ar71xx_pci_reset(void)
{
ath79_device_reset_set(AR71XX_RESET_PCI_BUS | AR71XX_RESET_PCI_CORE);
mdelay(100);
ath79_device_reset_clear(AR71XX_RESET_PCI_BUS | AR71XX_RESET_PCI_CORE);
mdelay(100);
ath79_ddr_set_pci_windows();
mdelay(100);
}
static int ar71xx_pci_probe(struct platform_device *pdev)
{
struct ar71xx_pci_controller *apc;
struct resource *res;
u32 t;
apc = devm_kzalloc(&pdev->dev, sizeof(struct ar71xx_pci_controller),
GFP_KERNEL);
if (!apc)
return -ENOMEM;
apc->cfg_base = devm_platform_ioremap_resource_byname(pdev,
"cfg_base");
if (IS_ERR(apc->cfg_base))
return PTR_ERR(apc->cfg_base);
apc->irq = platform_get_irq(pdev, 0);
if (apc->irq < 0)
return -EINVAL;
res = platform_get_resource_byname(pdev, IORESOURCE_IO, "io_base");
if (!res)
return -EINVAL;
apc->io_res.parent = res;
apc->io_res.name = "PCI IO space";
apc->io_res.start = res->start;
apc->io_res.end = res->end;
apc->io_res.flags = IORESOURCE_IO;
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mem_base");
if (!res)
return -EINVAL;
apc->mem_res.parent = res;
apc->mem_res.name = "PCI memory space";
apc->mem_res.start = res->start;
apc->mem_res.end = res->end;
apc->mem_res.flags = IORESOURCE_MEM;
ar71xx_pci_reset();
/* setup COMMAND register */
t = PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER | PCI_COMMAND_INVALIDATE
| PCI_COMMAND_PARITY | PCI_COMMAND_SERR | PCI_COMMAND_FAST_BACK;
ar71xx_pci_local_write(apc, PCI_COMMAND, 4, t);
/* clear bus errors */
ar71xx_pci_check_error(apc, 1);
ar71xx_pci_irq_init(apc);
apc->pci_ctrl.pci_ops = &ar71xx_pci_ops;
apc->pci_ctrl.mem_resource = &apc->mem_res;
apc->pci_ctrl.io_resource = &apc->io_res;
register_pci_controller(&apc->pci_ctrl);
return 0;
}
static struct platform_driver ar71xx_pci_driver = {
.probe = ar71xx_pci_probe,
.driver = {
.name = "ar71xx-pci",
},
};
static int __init ar71xx_pci_init(void)
{
return platform_driver_register(&ar71xx_pci_driver);
}
postcore_initcall(ar71xx_pci_init);
| linux-master | arch/mips/pci/pci-ar71xx.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 1999, 2000, 2004, 2005 MIPS Technologies, Inc.
* All rights reserved.
* Authors: Carsten Langgaard <[email protected]>
* Maciej W. Rozycki <[email protected]>
* Copyright (C) 2005 Ralf Baechle ([email protected])
*
* MIPS boards specific PCI support.
*/
#include <linux/types.h>
#include <linux/pci.h>
#include <linux/kernel.h>
#include <asm/mips-boards/msc01_pci.h>
#define PCI_ACCESS_READ 0
#define PCI_ACCESS_WRITE 1
/*
* PCI configuration cycle AD bus definition
*/
/* Type 0 */
#define PCI_CFG_TYPE0_REG_SHF 0
#define PCI_CFG_TYPE0_FUNC_SHF 8
/* Type 1 */
#define PCI_CFG_TYPE1_REG_SHF 0
#define PCI_CFG_TYPE1_FUNC_SHF 8
#define PCI_CFG_TYPE1_DEV_SHF 11
#define PCI_CFG_TYPE1_BUS_SHF 16
static int msc_pcibios_config_access(unsigned char access_type,
struct pci_bus *bus, unsigned int devfn, int where, u32 * data)
{
unsigned char busnum = bus->number;
u32 intr;
/* Clear status register bits. */
MSC_WRITE(MSC01_PCI_INTSTAT,
(MSC01_PCI_INTCFG_MA_BIT | MSC01_PCI_INTCFG_TA_BIT));
MSC_WRITE(MSC01_PCI_CFGADDR,
((busnum << MSC01_PCI_CFGADDR_BNUM_SHF) |
(PCI_SLOT(devfn) << MSC01_PCI_CFGADDR_DNUM_SHF) |
(PCI_FUNC(devfn) << MSC01_PCI_CFGADDR_FNUM_SHF) |
((where / 4) << MSC01_PCI_CFGADDR_RNUM_SHF)));
/* Perform access */
if (access_type == PCI_ACCESS_WRITE)
MSC_WRITE(MSC01_PCI_CFGDATA, *data);
else
MSC_READ(MSC01_PCI_CFGDATA, *data);
/* Detect Master/Target abort */
MSC_READ(MSC01_PCI_INTSTAT, intr);
if (intr & (MSC01_PCI_INTCFG_MA_BIT | MSC01_PCI_INTCFG_TA_BIT)) {
/* Error occurred */
/* Clear bits */
MSC_WRITE(MSC01_PCI_INTSTAT,
(MSC01_PCI_INTCFG_MA_BIT | MSC01_PCI_INTCFG_TA_BIT));
return -1;
}
return 0;
}
/*
* We can't address 8 and 16 bit words directly. Instead we have to
* read/write a 32bit word and mask/modify the data we actually want.
*/
static int msc_pcibios_read(struct pci_bus *bus, unsigned int devfn,
int where, int size, u32 * val)
{
u32 data = 0;
if ((size == 2) && (where & 1))
return PCIBIOS_BAD_REGISTER_NUMBER;
else if ((size == 4) && (where & 3))
return PCIBIOS_BAD_REGISTER_NUMBER;
if (msc_pcibios_config_access(PCI_ACCESS_READ, bus, devfn, where,
&data))
return -1;
if (size == 1)
*val = (data >> ((where & 3) << 3)) & 0xff;
else if (size == 2)
*val = (data >> ((where & 3) << 3)) & 0xffff;
else
*val = data;
return PCIBIOS_SUCCESSFUL;
}
static int msc_pcibios_write(struct pci_bus *bus, unsigned int devfn,
int where, int size, u32 val)
{
u32 data = 0;
if ((size == 2) && (where & 1))
return PCIBIOS_BAD_REGISTER_NUMBER;
else if ((size == 4) && (where & 3))
return PCIBIOS_BAD_REGISTER_NUMBER;
if (size == 4)
data = val;
else {
if (msc_pcibios_config_access(PCI_ACCESS_READ, bus, devfn,
where, &data))
return -1;
if (size == 1)
data = (data & ~(0xff << ((where & 3) << 3))) |
(val << ((where & 3) << 3));
else if (size == 2)
data = (data & ~(0xffff << ((where & 3) << 3))) |
(val << ((where & 3) << 3));
}
if (msc_pcibios_config_access(PCI_ACCESS_WRITE, bus, devfn, where,
&data))
return -1;
return PCIBIOS_SUCCESSFUL;
}
struct pci_ops msc_pci_ops = {
.read = msc_pcibios_read,
.write = msc_pcibios_write
};
| linux-master | arch/mips/pci/ops-msc.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2003 Christoph Hellwig ([email protected])
* Copyright (C) 1999, 2000, 04 Ralf Baechle ([email protected])
* Copyright (C) 1999, 2000 Silicon Graphics, Inc.
*/
#include <linux/kernel.h>
#include <linux/export.h>
#include <linux/pci.h>
#include <linux/smp.h>
#include <linux/dma-direct.h>
#include <linux/platform_device.h>
#include <linux/platform_data/xtalk-bridge.h>
#include <linux/nvmem-consumer.h>
#include <linux/crc16.h>
#include <linux/irqdomain.h>
#include <asm/pci/bridge.h>
#include <asm/paccess.h>
#include <asm/sn/irq_alloc.h>
#include <asm/sn/ioc3.h>
#define CRC16_INIT 0
#define CRC16_VALID 0xb001
/*
* Common phys<->dma mapping for platforms using pci xtalk bridge
*/
dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct bridge_controller *bc = BRIDGE_CONTROLLER(pdev->bus);
return bc->baddr + paddr;
}
phys_addr_t dma_to_phys(struct device *dev, dma_addr_t dma_addr)
{
return dma_addr & ~(0xffUL << 56);
}
/*
* Most of the IOC3 PCI config register aren't present
* we emulate what is needed for a normal PCI enumeration
*/
static int ioc3_cfg_rd(void *addr, int where, int size, u32 *value, u32 sid)
{
u32 cf, shift, mask;
switch (where & ~3) {
case 0x00 ... 0x10:
case 0x40 ... 0x44:
if (get_dbe(cf, (u32 *)addr))
return PCIBIOS_DEVICE_NOT_FOUND;
break;
case 0x2c:
cf = sid;
break;
case 0x3c:
/* emulate sane interrupt pin value */
cf = 0x00000100;
break;
default:
cf = 0;
break;
}
shift = (where & 3) << 3;
mask = 0xffffffffU >> ((4 - size) << 3);
*value = (cf >> shift) & mask;
return PCIBIOS_SUCCESSFUL;
}
static int ioc3_cfg_wr(void *addr, int where, int size, u32 value)
{
u32 cf, shift, mask, smask;
if ((where >= 0x14 && where < 0x40) || (where >= 0x48))
return PCIBIOS_SUCCESSFUL;
if (get_dbe(cf, (u32 *)addr))
return PCIBIOS_DEVICE_NOT_FOUND;
shift = ((where & 3) << 3);
mask = (0xffffffffU >> ((4 - size) << 3));
smask = mask << shift;
cf = (cf & ~smask) | ((value & mask) << shift);
if (put_dbe(cf, (u32 *)addr))
return PCIBIOS_DEVICE_NOT_FOUND;
return PCIBIOS_SUCCESSFUL;
}
static void bridge_disable_swapping(struct pci_dev *dev)
{
struct bridge_controller *bc = BRIDGE_CONTROLLER(dev->bus);
int slot = PCI_SLOT(dev->devfn);
/* Turn off byte swapping */
bridge_clr(bc, b_device[slot].reg, BRIDGE_DEV_SWAP_DIR);
bridge_read(bc, b_widget.w_tflush); /* Flush */
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SGI, PCI_DEVICE_ID_SGI_IOC3,
bridge_disable_swapping);
/*
* The Bridge ASIC supports both type 0 and type 1 access. Type 1 is
* not really documented, so right now I can't write code which uses it.
* Therefore we use type 0 accesses for now even though they won't work
* correctly for PCI-to-PCI bridges.
*
* The function is complicated by the ultimate brokenness of the IOC3 chip
* which is used in SGI systems. The IOC3 can only handle 32-bit PCI
* accesses and does only decode parts of it's address space.
*/
static int pci_conf0_read_config(struct pci_bus *bus, unsigned int devfn,
int where, int size, u32 *value)
{
struct bridge_controller *bc = BRIDGE_CONTROLLER(bus);
struct bridge_regs *bridge = bc->base;
int slot = PCI_SLOT(devfn);
int fn = PCI_FUNC(devfn);
void *addr;
u32 cf;
int res;
addr = &bridge->b_type0_cfg_dev[slot].f[fn].c[PCI_VENDOR_ID];
if (get_dbe(cf, (u32 *)addr))
return PCIBIOS_DEVICE_NOT_FOUND;
/*
* IOC3 is broken beyond belief ... Don't even give the
* generic PCI code a chance to look at it for real ...
*/
if (cf == (PCI_VENDOR_ID_SGI | (PCI_DEVICE_ID_SGI_IOC3 << 16))) {
addr = &bridge->b_type0_cfg_dev[slot].f[fn].l[where >> 2];
return ioc3_cfg_rd(addr, where, size, value,
bc->ioc3_sid[slot]);
}
addr = &bridge->b_type0_cfg_dev[slot].f[fn].c[where ^ (4 - size)];
if (size == 1)
res = get_dbe(*value, (u8 *)addr);
else if (size == 2)
res = get_dbe(*value, (u16 *)addr);
else
res = get_dbe(*value, (u32 *)addr);
return res ? PCIBIOS_DEVICE_NOT_FOUND : PCIBIOS_SUCCESSFUL;
}
static int pci_conf1_read_config(struct pci_bus *bus, unsigned int devfn,
int where, int size, u32 *value)
{
struct bridge_controller *bc = BRIDGE_CONTROLLER(bus);
struct bridge_regs *bridge = bc->base;
int busno = bus->number;
int slot = PCI_SLOT(devfn);
int fn = PCI_FUNC(devfn);
void *addr;
u32 cf;
int res;
bridge_write(bc, b_pci_cfg, (busno << 16) | (slot << 11));
addr = &bridge->b_type1_cfg.c[(fn << 8) | PCI_VENDOR_ID];
if (get_dbe(cf, (u32 *)addr))
return PCIBIOS_DEVICE_NOT_FOUND;
/*
* IOC3 is broken beyond belief ... Don't even give the
* generic PCI code a chance to look at it for real ...
*/
if (cf == (PCI_VENDOR_ID_SGI | (PCI_DEVICE_ID_SGI_IOC3 << 16))) {
addr = &bridge->b_type1_cfg.c[(fn << 8) | (where & ~3)];
return ioc3_cfg_rd(addr, where, size, value,
bc->ioc3_sid[slot]);
}
addr = &bridge->b_type1_cfg.c[(fn << 8) | (where ^ (4 - size))];
if (size == 1)
res = get_dbe(*value, (u8 *)addr);
else if (size == 2)
res = get_dbe(*value, (u16 *)addr);
else
res = get_dbe(*value, (u32 *)addr);
return res ? PCIBIOS_DEVICE_NOT_FOUND : PCIBIOS_SUCCESSFUL;
}
static int pci_read_config(struct pci_bus *bus, unsigned int devfn,
int where, int size, u32 *value)
{
if (!pci_is_root_bus(bus))
return pci_conf1_read_config(bus, devfn, where, size, value);
return pci_conf0_read_config(bus, devfn, where, size, value);
}
static int pci_conf0_write_config(struct pci_bus *bus, unsigned int devfn,
int where, int size, u32 value)
{
struct bridge_controller *bc = BRIDGE_CONTROLLER(bus);
struct bridge_regs *bridge = bc->base;
int slot = PCI_SLOT(devfn);
int fn = PCI_FUNC(devfn);
void *addr;
u32 cf;
int res;
addr = &bridge->b_type0_cfg_dev[slot].f[fn].c[PCI_VENDOR_ID];
if (get_dbe(cf, (u32 *)addr))
return PCIBIOS_DEVICE_NOT_FOUND;
/*
* IOC3 is broken beyond belief ... Don't even give the
* generic PCI code a chance to look at it for real ...
*/
if (cf == (PCI_VENDOR_ID_SGI | (PCI_DEVICE_ID_SGI_IOC3 << 16))) {
addr = &bridge->b_type0_cfg_dev[slot].f[fn].l[where >> 2];
return ioc3_cfg_wr(addr, where, size, value);
}
addr = &bridge->b_type0_cfg_dev[slot].f[fn].c[where ^ (4 - size)];
if (size == 1)
res = put_dbe(value, (u8 *)addr);
else if (size == 2)
res = put_dbe(value, (u16 *)addr);
else
res = put_dbe(value, (u32 *)addr);
if (res)
return PCIBIOS_DEVICE_NOT_FOUND;
return PCIBIOS_SUCCESSFUL;
}
static int pci_conf1_write_config(struct pci_bus *bus, unsigned int devfn,
int where, int size, u32 value)
{
struct bridge_controller *bc = BRIDGE_CONTROLLER(bus);
struct bridge_regs *bridge = bc->base;
int slot = PCI_SLOT(devfn);
int fn = PCI_FUNC(devfn);
int busno = bus->number;
void *addr;
u32 cf;
int res;
bridge_write(bc, b_pci_cfg, (busno << 16) | (slot << 11));
addr = &bridge->b_type1_cfg.c[(fn << 8) | PCI_VENDOR_ID];
if (get_dbe(cf, (u32 *)addr))
return PCIBIOS_DEVICE_NOT_FOUND;
/*
* IOC3 is broken beyond belief ... Don't even give the
* generic PCI code a chance to look at it for real ...
*/
if (cf == (PCI_VENDOR_ID_SGI | (PCI_DEVICE_ID_SGI_IOC3 << 16))) {
addr = &bridge->b_type0_cfg_dev[slot].f[fn].l[where >> 2];
return ioc3_cfg_wr(addr, where, size, value);
}
addr = &bridge->b_type1_cfg.c[(fn << 8) | (where ^ (4 - size))];
if (size == 1)
res = put_dbe(value, (u8 *)addr);
else if (size == 2)
res = put_dbe(value, (u16 *)addr);
else
res = put_dbe(value, (u32 *)addr);
if (res)
return PCIBIOS_DEVICE_NOT_FOUND;
return PCIBIOS_SUCCESSFUL;
}
static int pci_write_config(struct pci_bus *bus, unsigned int devfn,
int where, int size, u32 value)
{
if (!pci_is_root_bus(bus))
return pci_conf1_write_config(bus, devfn, where, size, value);
return pci_conf0_write_config(bus, devfn, where, size, value);
}
static struct pci_ops bridge_pci_ops = {
.read = pci_read_config,
.write = pci_write_config,
};
struct bridge_irq_chip_data {
struct bridge_controller *bc;
nasid_t nasid;
};
static int bridge_set_affinity(struct irq_data *d, const struct cpumask *mask,
bool force)
{
#ifdef CONFIG_NUMA
struct bridge_irq_chip_data *data = d->chip_data;
int bit = d->parent_data->hwirq;
int pin = d->hwirq;
int ret, cpu;
ret = irq_chip_set_affinity_parent(d, mask, force);
if (ret >= 0) {
cpu = cpumask_first_and(mask, cpu_online_mask);
data->nasid = cpu_to_node(cpu);
bridge_write(data->bc, b_int_addr[pin].addr,
(((data->bc->intr_addr >> 30) & 0x30000) |
bit | (data->nasid << 8)));
bridge_read(data->bc, b_wid_tflush);
}
return ret;
#else
return irq_chip_set_affinity_parent(d, mask, force);
#endif
}
struct irq_chip bridge_irq_chip = {
.name = "BRIDGE",
.irq_mask = irq_chip_mask_parent,
.irq_unmask = irq_chip_unmask_parent,
.irq_set_affinity = bridge_set_affinity
};
static int bridge_domain_alloc(struct irq_domain *domain, unsigned int virq,
unsigned int nr_irqs, void *arg)
{
struct bridge_irq_chip_data *data;
struct irq_alloc_info *info = arg;
int ret;
if (nr_irqs > 1 || !info)
return -EINVAL;
data = kzalloc(sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
ret = irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, arg);
if (ret >= 0) {
data->bc = info->ctrl;
data->nasid = info->nasid;
irq_domain_set_info(domain, virq, info->pin, &bridge_irq_chip,
data, handle_level_irq, NULL, NULL);
} else {
kfree(data);
}
return ret;
}
static void bridge_domain_free(struct irq_domain *domain, unsigned int virq,
unsigned int nr_irqs)
{
struct irq_data *irqd = irq_domain_get_irq_data(domain, virq);
if (nr_irqs)
return;
kfree(irqd->chip_data);
irq_domain_free_irqs_top(domain, virq, nr_irqs);
}
static int bridge_domain_activate(struct irq_domain *domain,
struct irq_data *irqd, bool reserve)
{
struct bridge_irq_chip_data *data = irqd->chip_data;
struct bridge_controller *bc = data->bc;
int bit = irqd->parent_data->hwirq;
int pin = irqd->hwirq;
u32 device;
bridge_write(bc, b_int_addr[pin].addr,
(((bc->intr_addr >> 30) & 0x30000) |
bit | (data->nasid << 8)));
bridge_set(bc, b_int_enable, (1 << pin));
bridge_set(bc, b_int_enable, 0x7ffffe00); /* more stuff in int_enable */
/*
* Enable sending of an interrupt clear packet to the hub on a high to
* low transition of the interrupt pin.
*
* IRIX sets additional bits in the address which are documented as
* reserved in the bridge docs.
*/
bridge_set(bc, b_int_mode, (1UL << pin));
/*
* We assume the bridge to have a 1:1 mapping between devices
* (slots) and intr pins.
*/
device = bridge_read(bc, b_int_device);
device &= ~(7 << (pin*3));
device |= (pin << (pin*3));
bridge_write(bc, b_int_device, device);
bridge_read(bc, b_wid_tflush);
return 0;
}
static void bridge_domain_deactivate(struct irq_domain *domain,
struct irq_data *irqd)
{
struct bridge_irq_chip_data *data = irqd->chip_data;
bridge_clr(data->bc, b_int_enable, (1 << irqd->hwirq));
bridge_read(data->bc, b_wid_tflush);
}
static const struct irq_domain_ops bridge_domain_ops = {
.alloc = bridge_domain_alloc,
.free = bridge_domain_free,
.activate = bridge_domain_activate,
.deactivate = bridge_domain_deactivate
};
/*
* All observed requests have pin == 1. We could have a global here, that
* gets incremented and returned every time - unfortunately, pci_map_irq
* may be called on the same device over and over, and need to return the
* same value. On O2000, pin can be 0 or 1, and PCI slots can be [0..7].
*
* A given PCI device, in general, should be able to intr any of the cpus
* on any one of the hubs connected to its xbow.
*/
static int bridge_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
struct bridge_controller *bc = BRIDGE_CONTROLLER(dev->bus);
struct irq_alloc_info info;
int irq;
switch (pin) {
case PCI_INTERRUPT_UNKNOWN:
case PCI_INTERRUPT_INTA:
case PCI_INTERRUPT_INTC:
pin = 0;
break;
case PCI_INTERRUPT_INTB:
case PCI_INTERRUPT_INTD:
pin = 1;
}
irq = bc->pci_int[slot][pin];
if (irq == -1) {
info.ctrl = bc;
info.nasid = bc->nasid;
info.pin = bc->int_mapping[slot][pin];
irq = irq_domain_alloc_irqs(bc->domain, 1, bc->nasid, &info);
if (irq < 0)
return irq;
bc->pci_int[slot][pin] = irq;
}
return irq;
}
#define IOC3_SID(sid) (PCI_VENDOR_ID_SGI | ((sid) << 16))
static void bridge_setup_ip27_baseio6g(struct bridge_controller *bc)
{
bc->ioc3_sid[2] = IOC3_SID(IOC3_SUBSYS_IP27_BASEIO6G);
bc->ioc3_sid[6] = IOC3_SID(IOC3_SUBSYS_IP27_MIO);
bc->int_mapping[2][1] = 4;
bc->int_mapping[6][1] = 6;
}
static void bridge_setup_ip27_baseio(struct bridge_controller *bc)
{
bc->ioc3_sid[2] = IOC3_SID(IOC3_SUBSYS_IP27_BASEIO);
bc->int_mapping[2][1] = 4;
}
static void bridge_setup_ip29_baseio(struct bridge_controller *bc)
{
bc->ioc3_sid[2] = IOC3_SID(IOC3_SUBSYS_IP29_SYSBOARD);
bc->int_mapping[2][1] = 3;
}
static void bridge_setup_ip30_sysboard(struct bridge_controller *bc)
{
bc->ioc3_sid[2] = IOC3_SID(IOC3_SUBSYS_IP30_SYSBOARD);
bc->int_mapping[2][1] = 4;
}
static void bridge_setup_menet(struct bridge_controller *bc)
{
bc->ioc3_sid[0] = IOC3_SID(IOC3_SUBSYS_MENET);
bc->ioc3_sid[1] = IOC3_SID(IOC3_SUBSYS_MENET);
bc->ioc3_sid[2] = IOC3_SID(IOC3_SUBSYS_MENET);
bc->ioc3_sid[3] = IOC3_SID(IOC3_SUBSYS_MENET4);
}
static void bridge_setup_io7(struct bridge_controller *bc)
{
bc->ioc3_sid[4] = IOC3_SID(IOC3_SUBSYS_IO7);
}
static void bridge_setup_io8(struct bridge_controller *bc)
{
bc->ioc3_sid[4] = IOC3_SID(IOC3_SUBSYS_IO8);
}
static void bridge_setup_io9(struct bridge_controller *bc)
{
bc->ioc3_sid[1] = IOC3_SID(IOC3_SUBSYS_IO9);
}
static void bridge_setup_ip34_fuel_sysboard(struct bridge_controller *bc)
{
bc->ioc3_sid[4] = IOC3_SID(IOC3_SUBSYS_IP34_SYSBOARD);
}
#define BRIDGE_BOARD_SETUP(_partno, _setup) \
{ .match = _partno, .setup = _setup }
static const struct {
char *match;
void (*setup)(struct bridge_controller *bc);
} bridge_ioc3_devid[] = {
BRIDGE_BOARD_SETUP("030-0734-", bridge_setup_ip27_baseio6g),
BRIDGE_BOARD_SETUP("030-0880-", bridge_setup_ip27_baseio6g),
BRIDGE_BOARD_SETUP("030-1023-", bridge_setup_ip27_baseio),
BRIDGE_BOARD_SETUP("030-1124-", bridge_setup_ip27_baseio),
BRIDGE_BOARD_SETUP("030-1025-", bridge_setup_ip29_baseio),
BRIDGE_BOARD_SETUP("030-1244-", bridge_setup_ip29_baseio),
BRIDGE_BOARD_SETUP("030-1389-", bridge_setup_ip29_baseio),
BRIDGE_BOARD_SETUP("030-0887-", bridge_setup_ip30_sysboard),
BRIDGE_BOARD_SETUP("030-1467-", bridge_setup_ip30_sysboard),
BRIDGE_BOARD_SETUP("030-0873-", bridge_setup_menet),
BRIDGE_BOARD_SETUP("030-1557-", bridge_setup_io7),
BRIDGE_BOARD_SETUP("030-1673-", bridge_setup_io8),
BRIDGE_BOARD_SETUP("030-1771-", bridge_setup_io9),
BRIDGE_BOARD_SETUP("030-1707-", bridge_setup_ip34_fuel_sysboard),
};
static void bridge_setup_board(struct bridge_controller *bc, char *partnum)
{
int i;
for (i = 0; i < ARRAY_SIZE(bridge_ioc3_devid); i++)
if (!strncmp(partnum, bridge_ioc3_devid[i].match,
strlen(bridge_ioc3_devid[i].match))) {
bridge_ioc3_devid[i].setup(bc);
}
}
static int bridge_nvmem_match(struct device *dev, const void *data)
{
const char *name = dev_name(dev);
const char *prefix = data;
if (strlen(name) < strlen(prefix))
return 0;
return memcmp(prefix, dev_name(dev), strlen(prefix)) == 0;
}
static int bridge_get_partnum(u64 baddr, char *partnum)
{
struct nvmem_device *nvmem;
char prefix[24];
u8 prom[64];
int i, j;
int ret;
snprintf(prefix, sizeof(prefix), "bridge-%012llx-0b-", baddr);
nvmem = nvmem_device_find(prefix, bridge_nvmem_match);
if (IS_ERR(nvmem))
return PTR_ERR(nvmem);
ret = nvmem_device_read(nvmem, 0, 64, prom);
nvmem_device_put(nvmem);
if (ret != 64)
return ret;
if (crc16(CRC16_INIT, prom, 32) != CRC16_VALID ||
crc16(CRC16_INIT, prom + 32, 32) != CRC16_VALID)
return -EINVAL;
/* Assemble part number */
j = 0;
for (i = 0; i < 19; i++)
if (prom[i + 11] != ' ')
partnum[j++] = prom[i + 11];
for (i = 0; i < 6; i++)
if (prom[i + 32] != ' ')
partnum[j++] = prom[i + 32];
partnum[j] = 0;
return 0;
}
static int bridge_probe(struct platform_device *pdev)
{
struct xtalk_bridge_platform_data *bd = dev_get_platdata(&pdev->dev);
struct device *dev = &pdev->dev;
struct bridge_controller *bc;
struct pci_host_bridge *host;
struct irq_domain *domain, *parent;
struct fwnode_handle *fn;
char partnum[26];
int slot;
int err;
/* get part number from one wire prom */
if (bridge_get_partnum(virt_to_phys((void *)bd->bridge_addr), partnum))
return -EPROBE_DEFER; /* not available yet */
parent = irq_get_default_host();
if (!parent)
return -ENODEV;
fn = irq_domain_alloc_named_fwnode("BRIDGE");
if (!fn)
return -ENOMEM;
domain = irq_domain_create_hierarchy(parent, 0, 8, fn,
&bridge_domain_ops, NULL);
if (!domain) {
irq_domain_free_fwnode(fn);
return -ENOMEM;
}
pci_set_flags(PCI_PROBE_ONLY);
host = devm_pci_alloc_host_bridge(dev, sizeof(*bc));
if (!host) {
err = -ENOMEM;
goto err_remove_domain;
}
bc = pci_host_bridge_priv(host);
bc->busn.name = "Bridge PCI busn";
bc->busn.start = 0;
bc->busn.end = 0xff;
bc->busn.flags = IORESOURCE_BUS;
bc->domain = domain;
pci_add_resource_offset(&host->windows, &bd->mem, bd->mem_offset);
pci_add_resource_offset(&host->windows, &bd->io, bd->io_offset);
pci_add_resource(&host->windows, &bc->busn);
err = devm_request_pci_bus_resources(dev, &host->windows);
if (err < 0)
goto err_free_resource;
bc->nasid = bd->nasid;
bc->baddr = (u64)bd->masterwid << 60 | PCI64_ATTR_BAR;
bc->base = (struct bridge_regs *)bd->bridge_addr;
bc->intr_addr = bd->intr_addr;
/*
* Clear all pending interrupts.
*/
bridge_write(bc, b_int_rst_stat, BRIDGE_IRR_ALL_CLR);
/*
* Until otherwise set up, assume all interrupts are from slot 0
*/
bridge_write(bc, b_int_device, 0x0);
/*
* disable swapping for big windows
*/
bridge_clr(bc, b_wid_control,
BRIDGE_CTRL_IO_SWAP | BRIDGE_CTRL_MEM_SWAP);
#ifdef CONFIG_PAGE_SIZE_4KB
bridge_clr(bc, b_wid_control, BRIDGE_CTRL_PAGE_SIZE);
#else /* 16kB or larger */
bridge_set(bc, b_wid_control, BRIDGE_CTRL_PAGE_SIZE);
#endif
/*
* Hmm... IRIX sets additional bits in the address which
* are documented as reserved in the bridge docs.
*/
bridge_write(bc, b_wid_int_upper,
((bc->intr_addr >> 32) & 0xffff) | (bd->masterwid << 16));
bridge_write(bc, b_wid_int_lower, bc->intr_addr & 0xffffffff);
bridge_write(bc, b_dir_map, (bd->masterwid << 20)); /* DMA */
bridge_write(bc, b_int_enable, 0);
for (slot = 0; slot < 8; slot++) {
bridge_set(bc, b_device[slot].reg, BRIDGE_DEV_SWAP_DIR);
bc->pci_int[slot][0] = -1;
bc->pci_int[slot][1] = -1;
/* default interrupt pin mapping */
bc->int_mapping[slot][0] = slot;
bc->int_mapping[slot][1] = slot ^ 4;
}
bridge_read(bc, b_wid_tflush); /* wait until Bridge PIO complete */
bridge_setup_board(bc, partnum);
host->dev.parent = dev;
host->sysdata = bc;
host->busnr = 0;
host->ops = &bridge_pci_ops;
host->map_irq = bridge_map_irq;
host->swizzle_irq = pci_common_swizzle;
err = pci_scan_root_bus_bridge(host);
if (err < 0)
goto err_free_resource;
pci_bus_claim_resources(host->bus);
pci_bus_add_devices(host->bus);
platform_set_drvdata(pdev, host->bus);
return 0;
err_free_resource:
pci_free_resource_list(&host->windows);
err_remove_domain:
irq_domain_remove(domain);
irq_domain_free_fwnode(fn);
return err;
}
static void bridge_remove(struct platform_device *pdev)
{
struct pci_bus *bus = platform_get_drvdata(pdev);
struct bridge_controller *bc = BRIDGE_CONTROLLER(bus);
struct fwnode_handle *fn = bc->domain->fwnode;
irq_domain_remove(bc->domain);
irq_domain_free_fwnode(fn);
pci_lock_rescan_remove();
pci_stop_root_bus(bus);
pci_remove_root_bus(bus);
pci_unlock_rescan_remove();
}
static struct platform_driver bridge_driver = {
.probe = bridge_probe,
.remove_new = bridge_remove,
.driver = {
.name = "xtalk-bridge",
}
};
builtin_platform_driver(bridge_driver);
| linux-master | arch/mips/pci/pci-xtalk-bridge.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Define the pci_ops for the PCIC on Toshiba TX4927, TX4938, etc.
*
* Based on linux/arch/mips/pci/ops-tx4938.c,
* linux/arch/mips/pci/fixup-rbtx4938.c,
* linux/arch/mips/txx9/rbtx4938/setup.c,
* and RBTX49xx patch from CELF patch archive.
*
* 2003-2005 (c) MontaVista Software, Inc.
* Copyright (C) 2004 by Ralf Baechle ([email protected])
* (C) Copyright TOSHIBA CORPORATION 2000-2001, 2004-2007
*/
#include <linux/kernel.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <asm/txx9/pci.h>
#include <asm/txx9/tx4927pcic.h>
static struct {
struct pci_controller *channel;
struct tx4927_pcic_reg __iomem *pcicptr;
} pcicptrs[2]; /* TX4938 has 2 pcic */
static void __init set_tx4927_pcicptr(struct pci_controller *channel,
struct tx4927_pcic_reg __iomem *pcicptr)
{
int i;
for (i = 0; i < ARRAY_SIZE(pcicptrs); i++) {
if (pcicptrs[i].channel == channel) {
pcicptrs[i].pcicptr = pcicptr;
return;
}
}
for (i = 0; i < ARRAY_SIZE(pcicptrs); i++) {
if (!pcicptrs[i].channel) {
pcicptrs[i].channel = channel;
pcicptrs[i].pcicptr = pcicptr;
return;
}
}
BUG();
}
struct tx4927_pcic_reg __iomem *get_tx4927_pcicptr(
struct pci_controller *channel)
{
int i;
for (i = 0; i < ARRAY_SIZE(pcicptrs); i++) {
if (pcicptrs[i].channel == channel)
return pcicptrs[i].pcicptr;
}
return NULL;
}
static int mkaddr(struct pci_bus *bus, unsigned int devfn, int where,
struct tx4927_pcic_reg __iomem *pcicptr)
{
if (bus->parent == NULL &&
devfn >= PCI_DEVFN(TX4927_PCIC_MAX_DEVNU, 0))
return -1;
__raw_writel(((bus->number & 0xff) << 0x10)
| ((devfn & 0xff) << 0x08) | (where & 0xfc)
| (bus->parent ? 1 : 0),
&pcicptr->g2pcfgadrs);
/* clear M_ABORT and Disable M_ABORT Int. */
__raw_writel((__raw_readl(&pcicptr->pcistatus) & 0x0000ffff)
| (PCI_STATUS_REC_MASTER_ABORT << 16),
&pcicptr->pcistatus);
return 0;
}
static int check_abort(struct tx4927_pcic_reg __iomem *pcicptr)
{
int code = PCIBIOS_SUCCESSFUL;
/* wait write cycle completion before checking error status */
while (__raw_readl(&pcicptr->pcicstatus) & TX4927_PCIC_PCICSTATUS_IWB)
;
if (__raw_readl(&pcicptr->pcistatus)
& (PCI_STATUS_REC_MASTER_ABORT << 16)) {
__raw_writel((__raw_readl(&pcicptr->pcistatus) & 0x0000ffff)
| (PCI_STATUS_REC_MASTER_ABORT << 16),
&pcicptr->pcistatus);
/* flush write buffer */
iob();
code = PCIBIOS_DEVICE_NOT_FOUND;
}
return code;
}
static u8 icd_readb(int offset, struct tx4927_pcic_reg __iomem *pcicptr)
{
#ifdef __BIG_ENDIAN
offset ^= 3;
#endif
return __raw_readb((void __iomem *)&pcicptr->g2pcfgdata + offset);
}
static u16 icd_readw(int offset, struct tx4927_pcic_reg __iomem *pcicptr)
{
#ifdef __BIG_ENDIAN
offset ^= 2;
#endif
return __raw_readw((void __iomem *)&pcicptr->g2pcfgdata + offset);
}
static u32 icd_readl(struct tx4927_pcic_reg __iomem *pcicptr)
{
return __raw_readl(&pcicptr->g2pcfgdata);
}
static void icd_writeb(u8 val, int offset,
struct tx4927_pcic_reg __iomem *pcicptr)
{
#ifdef __BIG_ENDIAN
offset ^= 3;
#endif
__raw_writeb(val, (void __iomem *)&pcicptr->g2pcfgdata + offset);
}
static void icd_writew(u16 val, int offset,
struct tx4927_pcic_reg __iomem *pcicptr)
{
#ifdef __BIG_ENDIAN
offset ^= 2;
#endif
__raw_writew(val, (void __iomem *)&pcicptr->g2pcfgdata + offset);
}
static void icd_writel(u32 val, struct tx4927_pcic_reg __iomem *pcicptr)
{
__raw_writel(val, &pcicptr->g2pcfgdata);
}
static struct tx4927_pcic_reg __iomem *pci_bus_to_pcicptr(struct pci_bus *bus)
{
struct pci_controller *channel = bus->sysdata;
return get_tx4927_pcicptr(channel);
}
static int tx4927_pci_config_read(struct pci_bus *bus, unsigned int devfn,
int where, int size, u32 *val)
{
struct tx4927_pcic_reg __iomem *pcicptr = pci_bus_to_pcicptr(bus);
if (mkaddr(bus, devfn, where, pcicptr)) {
*val = 0xffffffff;
return -1;
}
switch (size) {
case 1:
*val = icd_readb(where & 3, pcicptr);
break;
case 2:
*val = icd_readw(where & 3, pcicptr);
break;
default:
*val = icd_readl(pcicptr);
}
return check_abort(pcicptr);
}
static int tx4927_pci_config_write(struct pci_bus *bus, unsigned int devfn,
int where, int size, u32 val)
{
struct tx4927_pcic_reg __iomem *pcicptr = pci_bus_to_pcicptr(bus);
if (mkaddr(bus, devfn, where, pcicptr))
return -1;
switch (size) {
case 1:
icd_writeb(val, where & 3, pcicptr);
break;
case 2:
icd_writew(val, where & 3, pcicptr);
break;
default:
icd_writel(val, pcicptr);
}
return check_abort(pcicptr);
}
static struct pci_ops tx4927_pci_ops = {
.read = tx4927_pci_config_read,
.write = tx4927_pci_config_write,
};
static struct {
u8 trdyto;
u8 retryto;
u16 gbwc;
} tx4927_pci_opts = {
.trdyto = 0,
.retryto = 0,
.gbwc = 0xfe0, /* 4064 GBUSCLK for CCFG.GTOT=0b11 */
};
char *tx4927_pcibios_setup(char *str)
{
if (!strncmp(str, "trdyto=", 7)) {
u8 val = 0;
if (kstrtou8(str + 7, 0, &val) == 0)
tx4927_pci_opts.trdyto = val;
return NULL;
}
if (!strncmp(str, "retryto=", 8)) {
u8 val = 0;
if (kstrtou8(str + 8, 0, &val) == 0)
tx4927_pci_opts.retryto = val;
return NULL;
}
if (!strncmp(str, "gbwc=", 5)) {
u16 val;
if (kstrtou16(str + 5, 0, &val) == 0)
tx4927_pci_opts.gbwc = val;
return NULL;
}
return str;
}
void __init tx4927_pcic_setup(struct tx4927_pcic_reg __iomem *pcicptr,
struct pci_controller *channel, int extarb)
{
int i;
unsigned long flags;
set_tx4927_pcicptr(channel, pcicptr);
if (!channel->pci_ops)
printk(KERN_INFO
"PCIC -- DID:%04x VID:%04x RID:%02x Arbiter:%s\n",
__raw_readl(&pcicptr->pciid) >> 16,
__raw_readl(&pcicptr->pciid) & 0xffff,
__raw_readl(&pcicptr->pciccrev) & 0xff,
extarb ? "External" : "Internal");
channel->pci_ops = &tx4927_pci_ops;
local_irq_save(flags);
/* Disable All Initiator Space */
__raw_writel(__raw_readl(&pcicptr->pciccfg)
& ~(TX4927_PCIC_PCICCFG_G2PMEN(0)
| TX4927_PCIC_PCICCFG_G2PMEN(1)
| TX4927_PCIC_PCICCFG_G2PMEN(2)
| TX4927_PCIC_PCICCFG_G2PIOEN),
&pcicptr->pciccfg);
/* GB->PCI mappings */
__raw_writel((channel->io_resource->end - channel->io_resource->start)
>> 4,
&pcicptr->g2piomask);
____raw_writeq((channel->io_resource->start +
channel->io_map_base - IO_BASE) |
#ifdef __BIG_ENDIAN
TX4927_PCIC_G2PIOGBASE_ECHG
#else
TX4927_PCIC_G2PIOGBASE_BSDIS
#endif
, &pcicptr->g2piogbase);
____raw_writeq(channel->io_resource->start - channel->io_offset,
&pcicptr->g2piopbase);
for (i = 0; i < 3; i++) {
__raw_writel(0, &pcicptr->g2pmmask[i]);
____raw_writeq(0, &pcicptr->g2pmgbase[i]);
____raw_writeq(0, &pcicptr->g2pmpbase[i]);
}
if (channel->mem_resource->end) {
__raw_writel((channel->mem_resource->end
- channel->mem_resource->start) >> 4,
&pcicptr->g2pmmask[0]);
____raw_writeq(channel->mem_resource->start |
#ifdef __BIG_ENDIAN
TX4927_PCIC_G2PMnGBASE_ECHG
#else
TX4927_PCIC_G2PMnGBASE_BSDIS
#endif
, &pcicptr->g2pmgbase[0]);
____raw_writeq(channel->mem_resource->start -
channel->mem_offset,
&pcicptr->g2pmpbase[0]);
}
/* PCI->GB mappings (I/O 256B) */
__raw_writel(0, &pcicptr->p2giopbase); /* 256B */
____raw_writeq(0, &pcicptr->p2giogbase);
/* PCI->GB mappings (MEM 512MB (64MB on R1.x)) */
__raw_writel(0, &pcicptr->p2gm0plbase);
__raw_writel(0, &pcicptr->p2gm0pubase);
____raw_writeq(TX4927_PCIC_P2GMnGBASE_TMEMEN |
#ifdef __BIG_ENDIAN
TX4927_PCIC_P2GMnGBASE_TECHG
#else
TX4927_PCIC_P2GMnGBASE_TBSDIS
#endif
, &pcicptr->p2gmgbase[0]);
/* PCI->GB mappings (MEM 16MB) */
__raw_writel(0xffffffff, &pcicptr->p2gm1plbase);
__raw_writel(0xffffffff, &pcicptr->p2gm1pubase);
____raw_writeq(0, &pcicptr->p2gmgbase[1]);
/* PCI->GB mappings (MEM 1MB) */
__raw_writel(0xffffffff, &pcicptr->p2gm2pbase); /* 1MB */
____raw_writeq(0, &pcicptr->p2gmgbase[2]);
/* Clear all (including IRBER) except for GBWC */
__raw_writel((tx4927_pci_opts.gbwc << 16)
& TX4927_PCIC_PCICCFG_GBWC_MASK,
&pcicptr->pciccfg);
/* Enable Initiator Memory Space */
if (channel->mem_resource->end)
__raw_writel(__raw_readl(&pcicptr->pciccfg)
| TX4927_PCIC_PCICCFG_G2PMEN(0),
&pcicptr->pciccfg);
/* Enable Initiator I/O Space */
if (channel->io_resource->end)
__raw_writel(__raw_readl(&pcicptr->pciccfg)
| TX4927_PCIC_PCICCFG_G2PIOEN,
&pcicptr->pciccfg);
/* Enable Initiator Config */
__raw_writel(__raw_readl(&pcicptr->pciccfg)
| TX4927_PCIC_PCICCFG_ICAEN | TX4927_PCIC_PCICCFG_TCAR,
&pcicptr->pciccfg);
/* Do not use MEMMUL, MEMINF: YMFPCI card causes M_ABORT. */
__raw_writel(0, &pcicptr->pcicfg1);
__raw_writel((__raw_readl(&pcicptr->g2ptocnt) & ~0xffff)
| (tx4927_pci_opts.trdyto & 0xff)
| ((tx4927_pci_opts.retryto & 0xff) << 8),
&pcicptr->g2ptocnt);
/* Clear All Local Bus Status */
__raw_writel(TX4927_PCIC_PCICSTATUS_ALL, &pcicptr->pcicstatus);
/* Enable All Local Bus Interrupts */
__raw_writel(TX4927_PCIC_PCICSTATUS_ALL, &pcicptr->pcicmask);
/* Clear All Initiator Status */
__raw_writel(TX4927_PCIC_G2PSTATUS_ALL, &pcicptr->g2pstatus);
/* Enable All Initiator Interrupts */
__raw_writel(TX4927_PCIC_G2PSTATUS_ALL, &pcicptr->g2pmask);
/* Clear All PCI Status Error */
__raw_writel((__raw_readl(&pcicptr->pcistatus) & 0x0000ffff)
| (TX4927_PCIC_PCISTATUS_ALL << 16),
&pcicptr->pcistatus);
/* Enable All PCI Status Error Interrupts */
__raw_writel(TX4927_PCIC_PCISTATUS_ALL, &pcicptr->pcimask);
if (!extarb) {
/* Reset Bus Arbiter */
__raw_writel(TX4927_PCIC_PBACFG_RPBA, &pcicptr->pbacfg);
__raw_writel(0, &pcicptr->pbabm);
/* Enable Bus Arbiter */
__raw_writel(TX4927_PCIC_PBACFG_PBAEN, &pcicptr->pbacfg);
}
__raw_writel(PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY
| PCI_COMMAND_PARITY | PCI_COMMAND_SERR,
&pcicptr->pcistatus);
local_irq_restore(flags);
printk(KERN_DEBUG
"PCI: COMMAND=%04x,PCIMASK=%04x,"
"TRDYTO=%02x,RETRYTO=%02x,GBWC=%03x\n",
__raw_readl(&pcicptr->pcistatus) & 0xffff,
__raw_readl(&pcicptr->pcimask) & 0xffff,
__raw_readl(&pcicptr->g2ptocnt) & 0xff,
(__raw_readl(&pcicptr->g2ptocnt) & 0xff00) >> 8,
(__raw_readl(&pcicptr->pciccfg) >> 16) & 0xfff);
}
static void tx4927_report_pcic_status1(struct tx4927_pcic_reg __iomem *pcicptr)
{
__u16 pcistatus = (__u16)(__raw_readl(&pcicptr->pcistatus) >> 16);
__u32 g2pstatus = __raw_readl(&pcicptr->g2pstatus);
__u32 pcicstatus = __raw_readl(&pcicptr->pcicstatus);
static struct {
__u32 flag;
const char *str;
} pcistat_tbl[] = {
{ PCI_STATUS_DETECTED_PARITY, "DetectedParityError" },
{ PCI_STATUS_SIG_SYSTEM_ERROR, "SignaledSystemError" },
{ PCI_STATUS_REC_MASTER_ABORT, "ReceivedMasterAbort" },
{ PCI_STATUS_REC_TARGET_ABORT, "ReceivedTargetAbort" },
{ PCI_STATUS_SIG_TARGET_ABORT, "SignaledTargetAbort" },
{ PCI_STATUS_PARITY, "MasterParityError" },
}, g2pstat_tbl[] = {
{ TX4927_PCIC_G2PSTATUS_TTOE, "TIOE" },
{ TX4927_PCIC_G2PSTATUS_RTOE, "RTOE" },
}, pcicstat_tbl[] = {
{ TX4927_PCIC_PCICSTATUS_PME, "PME" },
{ TX4927_PCIC_PCICSTATUS_TLB, "TLB" },
{ TX4927_PCIC_PCICSTATUS_NIB, "NIB" },
{ TX4927_PCIC_PCICSTATUS_ZIB, "ZIB" },
{ TX4927_PCIC_PCICSTATUS_PERR, "PERR" },
{ TX4927_PCIC_PCICSTATUS_SERR, "SERR" },
{ TX4927_PCIC_PCICSTATUS_GBE, "GBE" },
{ TX4927_PCIC_PCICSTATUS_IWB, "IWB" },
};
int i, cont;
printk(KERN_ERR "");
if (pcistatus & TX4927_PCIC_PCISTATUS_ALL) {
printk(KERN_CONT "pcistat:%04x(", pcistatus);
for (i = 0, cont = 0; i < ARRAY_SIZE(pcistat_tbl); i++)
if (pcistatus & pcistat_tbl[i].flag)
printk(KERN_CONT "%s%s",
cont++ ? " " : "", pcistat_tbl[i].str);
printk(KERN_CONT ") ");
}
if (g2pstatus & TX4927_PCIC_G2PSTATUS_ALL) {
printk(KERN_CONT "g2pstatus:%08x(", g2pstatus);
for (i = 0, cont = 0; i < ARRAY_SIZE(g2pstat_tbl); i++)
if (g2pstatus & g2pstat_tbl[i].flag)
printk(KERN_CONT "%s%s",
cont++ ? " " : "", g2pstat_tbl[i].str);
printk(KERN_CONT ") ");
}
if (pcicstatus & TX4927_PCIC_PCICSTATUS_ALL) {
printk(KERN_CONT "pcicstatus:%08x(", pcicstatus);
for (i = 0, cont = 0; i < ARRAY_SIZE(pcicstat_tbl); i++)
if (pcicstatus & pcicstat_tbl[i].flag)
printk(KERN_CONT "%s%s",
cont++ ? " " : "", pcicstat_tbl[i].str);
printk(KERN_CONT ")");
}
printk(KERN_CONT "\n");
}
void tx4927_report_pcic_status(void)
{
int i;
for (i = 0; i < ARRAY_SIZE(pcicptrs); i++) {
if (pcicptrs[i].pcicptr)
tx4927_report_pcic_status1(pcicptrs[i].pcicptr);
}
}
static void tx4927_dump_pcic_settings1(struct tx4927_pcic_reg __iomem *pcicptr)
{
int i;
__u32 __iomem *preg = (__u32 __iomem *)pcicptr;
printk(KERN_INFO "tx4927 pcic (0x%p) settings:", pcicptr);
for (i = 0; i < sizeof(struct tx4927_pcic_reg); i += 4, preg++) {
if (i % 32 == 0) {
printk(KERN_CONT "\n");
printk(KERN_INFO "%04x:", i);
}
/* skip registers with side-effects */
if (i == offsetof(struct tx4927_pcic_reg, g2pintack)
|| i == offsetof(struct tx4927_pcic_reg, g2pspc)
|| i == offsetof(struct tx4927_pcic_reg, g2pcfgadrs)
|| i == offsetof(struct tx4927_pcic_reg, g2pcfgdata)) {
printk(KERN_CONT " XXXXXXXX");
continue;
}
printk(KERN_CONT " %08x", __raw_readl(preg));
}
printk(KERN_CONT "\n");
}
void tx4927_dump_pcic_settings(void)
{
int i;
for (i = 0; i < ARRAY_SIZE(pcicptrs); i++) {
if (pcicptrs[i].pcicptr)
tx4927_dump_pcic_settings1(pcicptrs[i].pcicptr);
}
}
irqreturn_t tx4927_pcierr_interrupt(int irq, void *dev_id)
{
struct pt_regs *regs = get_irq_regs();
struct tx4927_pcic_reg __iomem *pcicptr =
(struct tx4927_pcic_reg __iomem *)(unsigned long)dev_id;
if (txx9_pci_err_action != TXX9_PCI_ERR_IGNORE) {
printk(KERN_WARNING "PCIERR interrupt at 0x%0*lx\n",
(int)(2 * sizeof(unsigned long)), regs->cp0_epc);
tx4927_report_pcic_status1(pcicptr);
}
if (txx9_pci_err_action != TXX9_PCI_ERR_PANIC) {
/* clear all pci errors */
__raw_writel((__raw_readl(&pcicptr->pcistatus) & 0x0000ffff)
| (TX4927_PCIC_PCISTATUS_ALL << 16),
&pcicptr->pcistatus);
__raw_writel(TX4927_PCIC_G2PSTATUS_ALL, &pcicptr->g2pstatus);
__raw_writel(TX4927_PCIC_PBASTATUS_ALL, &pcicptr->pbastatus);
__raw_writel(TX4927_PCIC_PCICSTATUS_ALL, &pcicptr->pcicstatus);
return IRQ_HANDLED;
}
console_verbose();
tx4927_dump_pcic_settings1(pcicptr);
panic("PCI error.");
}
#ifdef CONFIG_TOSHIBA_FPCIB0
static void tx4927_quirk_slc90e66_bridge(struct pci_dev *dev)
{
struct tx4927_pcic_reg __iomem *pcicptr = pci_bus_to_pcicptr(dev->bus);
if (!pcicptr)
return;
if (__raw_readl(&pcicptr->pbacfg) & TX4927_PCIC_PBACFG_PBAEN) {
/* Reset Bus Arbiter */
__raw_writel(TX4927_PCIC_PBACFG_RPBA, &pcicptr->pbacfg);
/*
* swap reqBP and reqXP (raise priority of SLC90E66).
* SLC90E66(PCI-ISA bridge) is connected to REQ2 on
* PCI Backplane board.
*/
__raw_writel(0x72543610, &pcicptr->pbareqport);
__raw_writel(0, &pcicptr->pbabm);
/* Use Fixed ParkMaster (required by SLC90E66) */
__raw_writel(TX4927_PCIC_PBACFG_FIXPA, &pcicptr->pbacfg);
/* Enable Bus Arbiter */
__raw_writel(TX4927_PCIC_PBACFG_FIXPA |
TX4927_PCIC_PBACFG_PBAEN,
&pcicptr->pbacfg);
printk(KERN_INFO "PCI: Use Fixed Park Master (REQPORT %08x)\n",
__raw_readl(&pcicptr->pbareqport));
}
}
#define PCI_DEVICE_ID_EFAR_SLC90E66_0 0x9460
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_EFAR, PCI_DEVICE_ID_EFAR_SLC90E66_0,
tx4927_quirk_slc90e66_bridge);
#endif
| linux-master | arch/mips/pci/ops-tx4927.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
*
* Copyright (C) 2003, 04, 11 Ralf Baechle ([email protected])
* Copyright (C) 2011 Wind River Systems,
* written by Ralf Baechle ([email protected])
*/
#include <linux/bug.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/memblock.h>
#include <linux/export.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/pci.h>
#include <linux/of_address.h>
#include <asm/cpu-info.h>
unsigned long PCIBIOS_MIN_IO;
EXPORT_SYMBOL(PCIBIOS_MIN_IO);
unsigned long PCIBIOS_MIN_MEM;
EXPORT_SYMBOL(PCIBIOS_MIN_MEM);
static int __init pcibios_set_cache_line_size(void)
{
unsigned int lsize;
/*
* Set PCI cacheline size to that of the highest level in the
* cache hierarchy.
*/
lsize = cpu_dcache_line_size();
lsize = cpu_scache_line_size() ? : lsize;
lsize = cpu_tcache_line_size() ? : lsize;
BUG_ON(!lsize);
pci_dfl_cache_line_size = lsize >> 2;
pr_debug("PCI: pci_cache_line_size set to %d bytes\n", lsize);
return 0;
}
arch_initcall(pcibios_set_cache_line_size);
void pci_resource_to_user(const struct pci_dev *dev, int bar,
const struct resource *rsrc, resource_size_t *start,
resource_size_t *end)
{
phys_addr_t size = resource_size(rsrc);
*start = fixup_bigphys_addr(rsrc->start, size);
*end = rsrc->start + size - 1;
}
| linux-master | arch/mips/pci/pci.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2001,2002,2005 Broadcom Corporation
* Copyright (C) 2004 by Ralf Baechle ([email protected])
*/
/*
* BCM1x80/1x55-specific PCI support
*
* This module provides the glue between Linux's PCI subsystem
* and the hardware. We basically provide glue for accessing
* configuration space, and set up the translation for I/O
* space accesses.
*
* To access configuration space, we use ioremap. In the 32-bit
* kernel, this consumes either 4 or 8 page table pages, and 16MB of
* kernel mapped memory. Hopefully neither of these should be a huge
* problem.
*
* XXX: AT THIS TIME, ONLY the NATIVE PCI-X INTERFACE IS SUPPORTED.
*/
#include <linux/types.h>
#include <linux/pci.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/mm.h>
#include <linux/console.h>
#include <linux/tty.h>
#include <linux/vt.h>
#include <asm/sibyte/bcm1480_regs.h>
#include <asm/sibyte/bcm1480_scd.h>
#include <asm/sibyte/board.h>
#include <asm/io.h>
/*
* Macros for calculating offsets into config space given a device
* structure or dev/fun/reg
*/
#define CFGOFFSET(bus, devfn, where) (((bus)<<16)+((devfn)<<8)+(where))
#define CFGADDR(bus, devfn, where) CFGOFFSET((bus)->number, (devfn), where)
static void *cfg_space;
#define PCI_BUS_ENABLED 1
#define PCI_DEVICE_MODE 2
static int bcm1480_bus_status;
#define PCI_BRIDGE_DEVICE 0
/*
* Read/write 32-bit values in config space.
*/
static inline u32 READCFG32(u32 addr)
{
return *(u32 *)(cfg_space + (addr&~3));
}
static inline void WRITECFG32(u32 addr, u32 data)
{
*(u32 *)(cfg_space + (addr & ~3)) = data;
}
int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
if (pin == 0)
return -1;
return K_BCM1480_INT_PCI_INTA - 1 + pin;
}
/* Do platform specific device initialization at pci_enable_device() time */
int pcibios_plat_dev_init(struct pci_dev *dev)
{
return 0;
}
/*
* Some checks before doing config cycles:
* In PCI Device Mode, hide everything on bus 0 except the LDT host
* bridge. Otherwise, access is controlled by bridge MasterEn bits.
*/
static int bcm1480_pci_can_access(struct pci_bus *bus, int devfn)
{
u32 devno;
if (!(bcm1480_bus_status & (PCI_BUS_ENABLED | PCI_DEVICE_MODE)))
return 0;
if (bus->number == 0) {
devno = PCI_SLOT(devfn);
if (bcm1480_bus_status & PCI_DEVICE_MODE)
return 0;
else
return 1;
} else
return 1;
}
/*
* Read/write access functions for various sizes of values
* in config space. Return all 1's for disallowed accesses
* for a kludgy but adequate simulation of master aborts.
*/
static int bcm1480_pcibios_read(struct pci_bus *bus, unsigned int devfn,
int where, int size, u32 * val)
{
u32 data = 0;
if ((size == 2) && (where & 1))
return PCIBIOS_BAD_REGISTER_NUMBER;
else if ((size == 4) && (where & 3))
return PCIBIOS_BAD_REGISTER_NUMBER;
if (bcm1480_pci_can_access(bus, devfn))
data = READCFG32(CFGADDR(bus, devfn, where));
else
data = 0xFFFFFFFF;
if (size == 1)
*val = (data >> ((where & 3) << 3)) & 0xff;
else if (size == 2)
*val = (data >> ((where & 3) << 3)) & 0xffff;
else
*val = data;
return PCIBIOS_SUCCESSFUL;
}
static int bcm1480_pcibios_write(struct pci_bus *bus, unsigned int devfn,
int where, int size, u32 val)
{
u32 cfgaddr = CFGADDR(bus, devfn, where);
u32 data = 0;
if ((size == 2) && (where & 1))
return PCIBIOS_BAD_REGISTER_NUMBER;
else if ((size == 4) && (where & 3))
return PCIBIOS_BAD_REGISTER_NUMBER;
if (!bcm1480_pci_can_access(bus, devfn))
return PCIBIOS_BAD_REGISTER_NUMBER;
data = READCFG32(cfgaddr);
if (size == 1)
data = (data & ~(0xff << ((where & 3) << 3))) |
(val << ((where & 3) << 3));
else if (size == 2)
data = (data & ~(0xffff << ((where & 3) << 3))) |
(val << ((where & 3) << 3));
else
data = val;
WRITECFG32(cfgaddr, data);
return PCIBIOS_SUCCESSFUL;
}
struct pci_ops bcm1480_pci_ops = {
.read = bcm1480_pcibios_read,
.write = bcm1480_pcibios_write,
};
static struct resource bcm1480_mem_resource = {
.name = "BCM1480 PCI MEM",
.start = A_BCM1480_PHYS_PCI_MEM_MATCH_BYTES,
.end = A_BCM1480_PHYS_PCI_MEM_MATCH_BYTES + 0xfffffffUL,
.flags = IORESOURCE_MEM,
};
static struct resource bcm1480_io_resource = {
.name = "BCM1480 PCI I/O",
.start = A_BCM1480_PHYS_PCI_IO_MATCH_BYTES,
.end = A_BCM1480_PHYS_PCI_IO_MATCH_BYTES + 0x1ffffffUL,
.flags = IORESOURCE_IO,
};
struct pci_controller bcm1480_controller = {
.pci_ops = &bcm1480_pci_ops,
.mem_resource = &bcm1480_mem_resource,
.io_resource = &bcm1480_io_resource,
.io_offset = A_BCM1480_PHYS_PCI_IO_MATCH_BYTES,
};
static int __init bcm1480_pcibios_init(void)
{
uint32_t cmdreg;
uint64_t reg;
/* CFE will assign PCI resources */
pci_set_flags(PCI_PROBE_ONLY);
/* Avoid ISA compat ranges. */
PCIBIOS_MIN_IO = 0x00008000UL;
PCIBIOS_MIN_MEM = 0x01000000UL;
/* Set I/O resource limits. - unlimited for now to accommodate HT */
ioport_resource.end = 0xffffffffUL;
iomem_resource.end = 0xffffffffUL;
cfg_space = ioremap(A_BCM1480_PHYS_PCI_CFG_MATCH_BITS, 16*1024*1024);
/*
* See if the PCI bus has been configured by the firmware.
*/
reg = __raw_readq(IOADDR(A_SCD_SYSTEM_CFG));
if (!(reg & M_BCM1480_SYS_PCI_HOST)) {
bcm1480_bus_status |= PCI_DEVICE_MODE;
} else {
cmdreg = READCFG32(CFGOFFSET(0, PCI_DEVFN(PCI_BRIDGE_DEVICE, 0),
PCI_COMMAND));
if (!(cmdreg & PCI_COMMAND_MASTER)) {
printk
("PCI: Skipping PCI probe. Bus is not initialized.\n");
iounmap(cfg_space);
return 1; /* XXX */
}
bcm1480_bus_status |= PCI_BUS_ENABLED;
}
/* turn on ExpMemEn */
cmdreg = READCFG32(CFGOFFSET(0, PCI_DEVFN(PCI_BRIDGE_DEVICE, 0), 0x40));
WRITECFG32(CFGOFFSET(0, PCI_DEVFN(PCI_BRIDGE_DEVICE, 0), 0x40),
cmdreg | 0x10);
cmdreg = READCFG32(CFGOFFSET(0, PCI_DEVFN(PCI_BRIDGE_DEVICE, 0), 0x40));
/*
* Establish mappings in KSEG2 (kernel virtual) to PCI I/O
* space. Use "match bytes" policy to make everything look
* little-endian. So, you need to also set
* CONFIG_SWAP_IO_SPACE, but this is the combination that
* works correctly with most of Linux's drivers.
* XXX ehs: Should this happen in PCI Device mode?
*/
bcm1480_controller.io_map_base = (unsigned long)
ioremap(A_BCM1480_PHYS_PCI_IO_MATCH_BYTES, 65536);
bcm1480_controller.io_map_base -= bcm1480_controller.io_offset;
set_io_port_base(bcm1480_controller.io_map_base);
register_pci_controller(&bcm1480_controller);
#ifdef CONFIG_VGA_CONSOLE
console_lock();
do_take_over_console(&vga_con, 0, MAX_NR_CONSOLES-1, 1);
console_unlock();
#endif
return 0;
}
arch_initcall(bcm1480_pcibios_init);
| linux-master | arch/mips/pci/pci-bcm1480.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 1999, 2000, 2004 MIPS Technologies, Inc.
* All rights reserved.
* Authors: Carsten Langgaard <[email protected]>
* Maciej W. Rozycki <[email protected]>
*/
#include <linux/types.h>
#include <linux/pci.h>
#include <linux/kernel.h>
#include <asm/gt64120.h>
#define PCI_ACCESS_READ 0
#define PCI_ACCESS_WRITE 1
/*
* PCI configuration cycle AD bus definition
*/
/* Type 0 */
#define PCI_CFG_TYPE0_REG_SHF 0
#define PCI_CFG_TYPE0_FUNC_SHF 8
/* Type 1 */
#define PCI_CFG_TYPE1_REG_SHF 0
#define PCI_CFG_TYPE1_FUNC_SHF 8
#define PCI_CFG_TYPE1_DEV_SHF 11
#define PCI_CFG_TYPE1_BUS_SHF 16
static int gt64xxx_pci0_pcibios_config_access(unsigned char access_type,
struct pci_bus *bus, unsigned int devfn, int where, u32 * data)
{
unsigned char busnum = bus->number;
u32 intr;
if ((busnum == 0) && (devfn >= PCI_DEVFN(31, 0)))
return -1; /* Because of a bug in the galileo (for slot 31). */
/* Clear cause register bits */
GT_WRITE(GT_INTRCAUSE_OFS, ~(GT_INTRCAUSE_MASABORT0_BIT |
GT_INTRCAUSE_TARABORT0_BIT));
/* Setup address */
GT_WRITE(GT_PCI0_CFGADDR_OFS,
(busnum << GT_PCI0_CFGADDR_BUSNUM_SHF) |
(devfn << GT_PCI0_CFGADDR_FUNCTNUM_SHF) |
((where / 4) << GT_PCI0_CFGADDR_REGNUM_SHF) |
GT_PCI0_CFGADDR_CONFIGEN_BIT);
if (access_type == PCI_ACCESS_WRITE) {
if (busnum == 0 && PCI_SLOT(devfn) == 0) {
/*
* The Galileo system controller is acting
* differently than other devices.
*/
GT_WRITE(GT_PCI0_CFGDATA_OFS, *data);
} else
__GT_WRITE(GT_PCI0_CFGDATA_OFS, *data);
} else {
if (busnum == 0 && PCI_SLOT(devfn) == 0) {
/*
* The Galileo system controller is acting
* differently than other devices.
*/
*data = GT_READ(GT_PCI0_CFGDATA_OFS);
} else
*data = __GT_READ(GT_PCI0_CFGDATA_OFS);
}
/* Check for master or target abort */
intr = GT_READ(GT_INTRCAUSE_OFS);
if (intr & (GT_INTRCAUSE_MASABORT0_BIT | GT_INTRCAUSE_TARABORT0_BIT)) {
/* Error occurred */
/* Clear bits */
GT_WRITE(GT_INTRCAUSE_OFS, ~(GT_INTRCAUSE_MASABORT0_BIT |
GT_INTRCAUSE_TARABORT0_BIT));
return -1;
}
return 0;
}
/*
* We can't address 8 and 16 bit words directly. Instead we have to
* read/write a 32bit word and mask/modify the data we actually want.
*/
static int gt64xxx_pci0_pcibios_read(struct pci_bus *bus, unsigned int devfn,
int where, int size, u32 * val)
{
u32 data = 0;
if (gt64xxx_pci0_pcibios_config_access(PCI_ACCESS_READ, bus, devfn,
where, &data))
return PCIBIOS_DEVICE_NOT_FOUND;
if (size == 1)
*val = (data >> ((where & 3) << 3)) & 0xff;
else if (size == 2)
*val = (data >> ((where & 3) << 3)) & 0xffff;
else
*val = data;
return PCIBIOS_SUCCESSFUL;
}
static int gt64xxx_pci0_pcibios_write(struct pci_bus *bus, unsigned int devfn,
int where, int size, u32 val)
{
u32 data = 0;
if (size == 4)
data = val;
else {
if (gt64xxx_pci0_pcibios_config_access(PCI_ACCESS_READ, bus,
devfn, where, &data))
return PCIBIOS_DEVICE_NOT_FOUND;
if (size == 1)
data = (data & ~(0xff << ((where & 3) << 3))) |
(val << ((where & 3) << 3));
else if (size == 2)
data = (data & ~(0xffff << ((where & 3) << 3))) |
(val << ((where & 3) << 3));
}
if (gt64xxx_pci0_pcibios_config_access(PCI_ACCESS_WRITE, bus, devfn,
where, &data))
return PCIBIOS_DEVICE_NOT_FOUND;
return PCIBIOS_SUCCESSFUL;
}
struct pci_ops gt64xxx_pci0_ops = {
.read = gt64xxx_pci0_pcibios_read,
.write = gt64xxx_pci0_pcibios_write
};
| linux-master | arch/mips/pci/ops-gt64xxx_pci0.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/pci.h>
#include <asm/ip32/ip32_ints.h>
/*
* O2 has up to 5 PCI devices connected into the MACE bridge. The device
* map looks like this:
*
* 0 aic7xxx 0
* 1 aic7xxx 1
* 2 expansion slot
* 3 N/C
* 4 N/C
*/
#define SCSI0 MACEPCI_SCSI0_IRQ
#define SCSI1 MACEPCI_SCSI1_IRQ
#define INTA0 MACEPCI_SLOT0_IRQ
#define INTA1 MACEPCI_SLOT1_IRQ
#define INTA2 MACEPCI_SLOT2_IRQ
#define INTB MACEPCI_SHARED0_IRQ
#define INTC MACEPCI_SHARED1_IRQ
#define INTD MACEPCI_SHARED2_IRQ
static char irq_tab_mace[][5] = {
/* Dummy INT#A INT#B INT#C INT#D */
{0, 0, 0, 0, 0}, /* This is placeholder row - never used */
{0, SCSI0, SCSI0, SCSI0, SCSI0},
{0, SCSI1, SCSI1, SCSI1, SCSI1},
{0, INTA0, INTB, INTC, INTD},
{0, INTA1, INTC, INTD, INTB},
{0, INTA2, INTD, INTB, INTC},
};
/*
* Given a PCI slot number (a la PCI_SLOT(...)) and the interrupt pin of
* the device (1-4 => A-D), tell what irq to use. Note that we don't
* in theory have slots 4 and 5, and we never normally use the shared
* irqs. I suppose a device without a pin A will thank us for doing it
* right if there exists such a broken piece of crap.
*/
int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
return irq_tab_mace[slot][pin];
}
/* Do platform specific device initialization at pci_enable_device() time */
int pcibios_plat_dev_init(struct pci_dev *dev)
{
return 0;
}
| linux-master | arch/mips/pci/fixup-ip32.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2001,2002,2003 Broadcom Corporation
* Copyright (C) 2004 by Ralf Baechle ([email protected])
*/
/*
* BCM1250-specific PCI support
*
* This module provides the glue between Linux's PCI subsystem
* and the hardware. We basically provide glue for accessing
* configuration space, and set up the translation for I/O
* space accesses.
*
* To access configuration space, we use ioremap. In the 32-bit
* kernel, this consumes either 4 or 8 page table pages, and 16MB of
* kernel mapped memory. Hopefully neither of these should be a huge
* problem.
*/
#include <linux/types.h>
#include <linux/pci.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/mm.h>
#include <linux/console.h>
#include <linux/tty.h>
#include <linux/vt.h>
#include <asm/io.h>
#include <asm/sibyte/sb1250_defs.h>
#include <asm/sibyte/sb1250_regs.h>
#include <asm/sibyte/sb1250_scd.h>
#include <asm/sibyte/board.h>
/*
* Macros for calculating offsets into config space given a device
* structure or dev/fun/reg
*/
#define CFGOFFSET(bus, devfn, where) (((bus)<<16) + ((devfn)<<8) + (where))
#define CFGADDR(bus, devfn, where) CFGOFFSET((bus)->number, (devfn), where)
static void *cfg_space;
#define PCI_BUS_ENABLED 1
#define LDT_BUS_ENABLED 2
#define PCI_DEVICE_MODE 4
static int sb1250_bus_status;
#define PCI_BRIDGE_DEVICE 0
#define LDT_BRIDGE_DEVICE 1
#ifdef CONFIG_SIBYTE_HAS_LDT
/*
* HT's level-sensitive interrupts require EOI, which is generated
* through a 4MB memory-mapped region
*/
unsigned long ldt_eoi_space;
#endif
/*
* Read/write 32-bit values in config space.
*/
static inline u32 READCFG32(u32 addr)
{
return *(u32 *) (cfg_space + (addr & ~3));
}
static inline void WRITECFG32(u32 addr, u32 data)
{
*(u32 *) (cfg_space + (addr & ~3)) = data;
}
int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
return dev->irq;
}
/* Do platform specific device initialization at pci_enable_device() time */
int pcibios_plat_dev_init(struct pci_dev *dev)
{
return 0;
}
/*
* Some checks before doing config cycles:
* In PCI Device Mode, hide everything on bus 0 except the LDT host
* bridge. Otherwise, access is controlled by bridge MasterEn bits.
*/
static int sb1250_pci_can_access(struct pci_bus *bus, int devfn)
{
u32 devno;
if (!(sb1250_bus_status & (PCI_BUS_ENABLED | PCI_DEVICE_MODE)))
return 0;
if (bus->number == 0) {
devno = PCI_SLOT(devfn);
if (devno == LDT_BRIDGE_DEVICE)
return (sb1250_bus_status & LDT_BUS_ENABLED) != 0;
else if (sb1250_bus_status & PCI_DEVICE_MODE)
return 0;
else
return 1;
} else
return 1;
}
/*
* Read/write access functions for various sizes of values
* in config space. Return all 1's for disallowed accesses
* for a kludgy but adequate simulation of master aborts.
*/
static int sb1250_pcibios_read(struct pci_bus *bus, unsigned int devfn,
int where, int size, u32 * val)
{
u32 data = 0;
if ((size == 2) && (where & 1))
return PCIBIOS_BAD_REGISTER_NUMBER;
else if ((size == 4) && (where & 3))
return PCIBIOS_BAD_REGISTER_NUMBER;
if (sb1250_pci_can_access(bus, devfn))
data = READCFG32(CFGADDR(bus, devfn, where));
else
data = 0xFFFFFFFF;
if (size == 1)
*val = (data >> ((where & 3) << 3)) & 0xff;
else if (size == 2)
*val = (data >> ((where & 3) << 3)) & 0xffff;
else
*val = data;
return PCIBIOS_SUCCESSFUL;
}
static int sb1250_pcibios_write(struct pci_bus *bus, unsigned int devfn,
int where, int size, u32 val)
{
u32 cfgaddr = CFGADDR(bus, devfn, where);
u32 data = 0;
if ((size == 2) && (where & 1))
return PCIBIOS_BAD_REGISTER_NUMBER;
else if ((size == 4) && (where & 3))
return PCIBIOS_BAD_REGISTER_NUMBER;
if (!sb1250_pci_can_access(bus, devfn))
return PCIBIOS_BAD_REGISTER_NUMBER;
data = READCFG32(cfgaddr);
if (size == 1)
data = (data & ~(0xff << ((where & 3) << 3))) |
(val << ((where & 3) << 3));
else if (size == 2)
data = (data & ~(0xffff << ((where & 3) << 3))) |
(val << ((where & 3) << 3));
else
data = val;
WRITECFG32(cfgaddr, data);
return PCIBIOS_SUCCESSFUL;
}
struct pci_ops sb1250_pci_ops = {
.read = sb1250_pcibios_read,
.write = sb1250_pcibios_write,
};
static struct resource sb1250_mem_resource = {
.name = "SB1250 PCI MEM",
.start = 0x40000000UL,
.end = 0x5fffffffUL,
.flags = IORESOURCE_MEM,
};
static struct resource sb1250_io_resource = {
.name = "SB1250 PCI I/O",
.start = 0x00000000UL,
.end = 0x01ffffffUL,
.flags = IORESOURCE_IO,
};
struct pci_controller sb1250_controller = {
.pci_ops = &sb1250_pci_ops,
.mem_resource = &sb1250_mem_resource,
.io_resource = &sb1250_io_resource,
};
static int __init sb1250_pcibios_init(void)
{
void __iomem *io_map_base;
uint32_t cmdreg;
uint64_t reg;
/* CFE will assign PCI resources */
pci_set_flags(PCI_PROBE_ONLY);
/* Avoid ISA compat ranges. */
PCIBIOS_MIN_IO = 0x00008000UL;
PCIBIOS_MIN_MEM = 0x01000000UL;
/* Set I/O resource limits. */
ioport_resource.end = 0x01ffffffUL; /* 32MB accessible by sb1250 */
iomem_resource.end = 0xffffffffUL; /* no HT support yet */
cfg_space =
ioremap(A_PHYS_LDTPCI_CFG_MATCH_BITS, 16 * 1024 * 1024);
/*
* See if the PCI bus has been configured by the firmware.
*/
reg = __raw_readq(IOADDR(A_SCD_SYSTEM_CFG));
if (!(reg & M_SYS_PCI_HOST)) {
sb1250_bus_status |= PCI_DEVICE_MODE;
} else {
cmdreg =
READCFG32(CFGOFFSET
(0, PCI_DEVFN(PCI_BRIDGE_DEVICE, 0),
PCI_COMMAND));
if (!(cmdreg & PCI_COMMAND_MASTER)) {
printk
("PCI: Skipping PCI probe. Bus is not initialized.\n");
iounmap(cfg_space);
return 0;
}
sb1250_bus_status |= PCI_BUS_ENABLED;
}
/*
* Establish mappings in KSEG2 (kernel virtual) to PCI I/O
* space. Use "match bytes" policy to make everything look
* little-endian. So, you need to also set
* CONFIG_SWAP_IO_SPACE, but this is the combination that
* works correctly with most of Linux's drivers.
* XXX ehs: Should this happen in PCI Device mode?
*/
io_map_base = ioremap(A_PHYS_LDTPCI_IO_MATCH_BYTES, 1024 * 1024);
sb1250_controller.io_map_base = (unsigned long)io_map_base;
set_io_port_base((unsigned long)io_map_base);
#ifdef CONFIG_SIBYTE_HAS_LDT
/*
* Also check the LDT bridge's enable, just in case we didn't
* initialize that one.
*/
cmdreg = READCFG32(CFGOFFSET(0, PCI_DEVFN(LDT_BRIDGE_DEVICE, 0),
PCI_COMMAND));
if (cmdreg & PCI_COMMAND_MASTER) {
sb1250_bus_status |= LDT_BUS_ENABLED;
/*
* Need bits 23:16 to convey vector number. Note that
* this consumes 4MB of kernel-mapped memory
* (Kseg2/Kseg3) for 32-bit kernel.
*/
ldt_eoi_space = (unsigned long)
ioremap(A_PHYS_LDT_SPECIAL_MATCH_BYTES,
4 * 1024 * 1024);
}
#endif
register_pci_controller(&sb1250_controller);
#ifdef CONFIG_VGA_CONSOLE
console_lock();
do_take_over_console(&vga_con, 0, MAX_NR_CONSOLES - 1, 1);
console_unlock();
#endif
return 0;
}
arch_initcall(sb1250_pcibios_init);
| linux-master | arch/mips/pci/pci-sb1250.c |
/*
* BRIEF MODULE DESCRIPTION
* PCI initialization for IDT EB434 board
*
* Copyright 2004 IDT Inc. ([email protected])
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
* NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
* USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
* ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/types.h>
#include <linux/pci.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <asm/mach-rc32434/rc32434.h>
#include <asm/mach-rc32434/pci.h>
#define PCI_ACCESS_READ 0
#define PCI_ACCESS_WRITE 1
/* define an unsigned array for the PCI registers */
static unsigned int korina_cnfg_regs[25] = {
KORINA_CNFG1, KORINA_CNFG2, KORINA_CNFG3, KORINA_CNFG4,
KORINA_CNFG5, KORINA_CNFG6, KORINA_CNFG7, KORINA_CNFG8,
KORINA_CNFG9, KORINA_CNFG10, KORINA_CNFG11, KORINA_CNFG12,
KORINA_CNFG13, KORINA_CNFG14, KORINA_CNFG15, KORINA_CNFG16,
KORINA_CNFG17, KORINA_CNFG18, KORINA_CNFG19, KORINA_CNFG20,
KORINA_CNFG21, KORINA_CNFG22, KORINA_CNFG23, KORINA_CNFG24
};
static struct resource rc32434_res_pci_mem1;
static struct resource rc32434_res_pci_mem2;
static struct resource rc32434_res_pci_mem1 = {
.name = "PCI MEM1",
.start = 0x50000000,
.end = 0x5FFFFFFF,
.flags = IORESOURCE_MEM,
.sibling = NULL,
.child = &rc32434_res_pci_mem2
};
static struct resource rc32434_res_pci_mem2 = {
.name = "PCI Mem2",
.start = 0x60000000,
.end = 0x6FFFFFFF,
.flags = IORESOURCE_MEM,
.parent = &rc32434_res_pci_mem1,
.sibling = NULL,
.child = NULL
};
static struct resource rc32434_res_pci_io1 = {
.name = "PCI I/O1",
.start = 0x18800000,
.end = 0x188FFFFF,
.flags = IORESOURCE_IO,
};
extern struct pci_ops rc32434_pci_ops;
#define PCI_MEM1_START PCI_ADDR_START
#define PCI_MEM1_END (PCI_ADDR_START + CPUTOPCI_MEM_WIN - 1)
#define PCI_MEM2_START (PCI_ADDR_START + CPUTOPCI_MEM_WIN)
#define PCI_MEM2_END (PCI_ADDR_START + (2 * CPUTOPCI_MEM_WIN) - 1)
#define PCI_IO1_START (PCI_ADDR_START + (2 * CPUTOPCI_MEM_WIN))
#define PCI_IO1_END \
(PCI_ADDR_START + (2 * CPUTOPCI_MEM_WIN) + CPUTOPCI_IO_WIN - 1)
#define PCI_IO2_START \
(PCI_ADDR_START + (2 * CPUTOPCI_MEM_WIN) + CPUTOPCI_IO_WIN)
#define PCI_IO2_END \
(PCI_ADDR_START + (2 * CPUTOPCI_MEM_WIN) + (2 * CPUTOPCI_IO_WIN) - 1)
struct pci_controller rc32434_controller2;
struct pci_controller rc32434_controller = {
.pci_ops = &rc32434_pci_ops,
.mem_resource = &rc32434_res_pci_mem1,
.io_resource = &rc32434_res_pci_io1,
.mem_offset = 0,
.io_offset = 0,
};
#ifdef __MIPSEB__
#define PCI_ENDIAN_FLAG PCILBAC_sb_m
#else
#define PCI_ENDIAN_FLAG 0
#endif
static int __init rc32434_pcibridge_init(void)
{
unsigned int pcicvalue, pcicdata = 0;
unsigned int dummyread, pcicntlval;
int loopCount;
unsigned int pci_config_addr;
pcicvalue = rc32434_pci->pcic;
pcicvalue = (pcicvalue >> PCIM_SHFT) & PCIM_BIT_LEN;
if (!((pcicvalue == PCIM_H_EA) ||
(pcicvalue == PCIM_H_IA_FIX) ||
(pcicvalue == PCIM_H_IA_RR))) {
pr_err("PCI init error!!!\n");
/* Not in Host Mode, return ERROR */
return -1;
}
/* Enables the Idle Grant mode, Arbiter Parking */
pcicdata |= (PCI_CTL_IGM | PCI_CTL_EAP | PCI_CTL_EN);
rc32434_pci->pcic = pcicdata; /* Enable the PCI bus Interface */
/* Zero out the PCI status & PCI Status Mask */
for (;;) {
pcicdata = rc32434_pci->pcis;
if (!(pcicdata & PCI_STAT_RIP))
break;
}
rc32434_pci->pcis = 0;
rc32434_pci->pcism = 0xFFFFFFFF;
/* Zero out the PCI decoupled registers */
rc32434_pci->pcidac = 0; /*
* disable PCI decoupled accesses at
* initialization
*/
rc32434_pci->pcidas = 0; /* clear the status */
rc32434_pci->pcidasm = 0x0000007F; /* Mask all the interrupts */
/* Mask PCI Messaging Interrupts */
rc32434_pci_msg->pciiic = 0;
rc32434_pci_msg->pciiim = 0xFFFFFFFF;
rc32434_pci_msg->pciioic = 0;
rc32434_pci_msg->pciioim = 0;
/* Setup PCILB0 as Memory Window */
rc32434_pci->pcilba[0].address = (unsigned int) (PCI_ADDR_START);
/* setup the PCI map address as same as the local address */
rc32434_pci->pcilba[0].mapping = (unsigned int) (PCI_ADDR_START);
/* Setup PCILBA1 as MEM */
rc32434_pci->pcilba[0].control =
(((SIZE_256MB & 0x1f) << PCI_LBAC_SIZE_BIT) | PCI_ENDIAN_FLAG);
dummyread = rc32434_pci->pcilba[0].control; /* flush the CPU write Buffers */
rc32434_pci->pcilba[1].address = 0x60000000;
rc32434_pci->pcilba[1].mapping = 0x60000000;
/* setup PCILBA2 as IO Window */
rc32434_pci->pcilba[1].control =
(((SIZE_256MB & 0x1f) << PCI_LBAC_SIZE_BIT) | PCI_ENDIAN_FLAG);
dummyread = rc32434_pci->pcilba[1].control; /* flush the CPU write Buffers */
rc32434_pci->pcilba[2].address = 0x18C00000;
rc32434_pci->pcilba[2].mapping = 0x18FFFFFF;
/* setup PCILBA2 as IO Window */
rc32434_pci->pcilba[2].control =
(((SIZE_4MB & 0x1f) << PCI_LBAC_SIZE_BIT) | PCI_ENDIAN_FLAG);
dummyread = rc32434_pci->pcilba[2].control; /* flush the CPU write Buffers */
/* Setup PCILBA3 as IO Window */
rc32434_pci->pcilba[3].address = 0x18800000;
rc32434_pci->pcilba[3].mapping = 0x18800000;
rc32434_pci->pcilba[3].control =
((((SIZE_1MB & 0x1ff) << PCI_LBAC_SIZE_BIT) | PCI_LBAC_MSI) |
PCI_ENDIAN_FLAG);
dummyread = rc32434_pci->pcilba[3].control; /* flush the CPU write Buffers */
pci_config_addr = (unsigned int) (0x80000004);
for (loopCount = 0; loopCount < 24; loopCount++) {
rc32434_pci->pcicfga = pci_config_addr;
dummyread = rc32434_pci->pcicfga;
rc32434_pci->pcicfgd = korina_cnfg_regs[loopCount];
dummyread = rc32434_pci->pcicfgd;
pci_config_addr += 4;
}
rc32434_pci->pcitc =
(unsigned int) ((PCITC_RTIMER_VAL & 0xff) << PCI_TC_RTIMER_BIT) |
((PCITC_DTIMER_VAL & 0xff) << PCI_TC_DTIMER_BIT);
pcicntlval = rc32434_pci->pcic;
pcicntlval &= ~PCI_CTL_TNR;
rc32434_pci->pcic = pcicntlval;
pcicntlval = rc32434_pci->pcic;
return 0;
}
static int __init rc32434_pci_init(void)
{
void __iomem *io_map_base;
pr_info("PCI: Initializing PCI\n");
ioport_resource.start = rc32434_res_pci_io1.start;
ioport_resource.end = rc32434_res_pci_io1.end;
rc32434_pcibridge_init();
io_map_base = ioremap(rc32434_res_pci_io1.start,
resource_size(&rc32434_res_pci_io1));
if (!io_map_base)
return -ENOMEM;
rc32434_controller.io_map_base =
(unsigned long)io_map_base - rc32434_res_pci_io1.start;
register_pci_controller(&rc32434_controller);
rc32434_sync();
return 0;
}
arch_initcall(rc32434_pci_init);
| linux-master | arch/mips/pci/pci-rc32434.c |
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2008 Maxime Bizon <[email protected]>
*/
#include <linux/types.h>
#include <linux/pci.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/clk.h>
#include <asm/bootinfo.h>
#include <bcm63xx_reset.h>
#include "pci-bcm63xx.h"
/*
* Allow PCI to be disabled at runtime depending on board nvram
* configuration
*/
int bcm63xx_pci_enabled;
static struct resource bcm_pci_mem_resource = {
.name = "bcm63xx PCI memory space",
.start = BCM_PCI_MEM_BASE_PA,
.end = BCM_PCI_MEM_END_PA,
.flags = IORESOURCE_MEM
};
static struct resource bcm_pci_io_resource = {
.name = "bcm63xx PCI IO space",
.start = BCM_PCI_IO_BASE_PA,
#ifdef CONFIG_CARDBUS
.end = BCM_PCI_IO_HALF_PA,
#else
.end = BCM_PCI_IO_END_PA,
#endif
.flags = IORESOURCE_IO
};
struct pci_controller bcm63xx_controller = {
.pci_ops = &bcm63xx_pci_ops,
.io_resource = &bcm_pci_io_resource,
.mem_resource = &bcm_pci_mem_resource,
};
/*
* We handle cardbus via a fake Cardbus bridge, memory and io spaces
* have to be clearly separated from PCI one since we have different
* memory decoder.
*/
#ifdef CONFIG_CARDBUS
static struct resource bcm_cb_mem_resource = {
.name = "bcm63xx Cardbus memory space",
.start = BCM_CB_MEM_BASE_PA,
.end = BCM_CB_MEM_END_PA,
.flags = IORESOURCE_MEM
};
static struct resource bcm_cb_io_resource = {
.name = "bcm63xx Cardbus IO space",
.start = BCM_PCI_IO_HALF_PA + 1,
.end = BCM_PCI_IO_END_PA,
.flags = IORESOURCE_IO
};
struct pci_controller bcm63xx_cb_controller = {
.pci_ops = &bcm63xx_cb_ops,
.io_resource = &bcm_cb_io_resource,
.mem_resource = &bcm_cb_mem_resource,
};
#endif
static struct resource bcm_pcie_mem_resource = {
.name = "bcm63xx PCIe memory space",
.start = BCM_PCIE_MEM_BASE_PA,
.end = BCM_PCIE_MEM_END_PA,
.flags = IORESOURCE_MEM,
};
static struct resource bcm_pcie_io_resource = {
.name = "bcm63xx PCIe IO space",
.start = 0,
.end = 0,
.flags = 0,
};
struct pci_controller bcm63xx_pcie_controller = {
.pci_ops = &bcm63xx_pcie_ops,
.io_resource = &bcm_pcie_io_resource,
.mem_resource = &bcm_pcie_mem_resource,
};
static u32 bcm63xx_int_cfg_readl(u32 reg)
{
u32 tmp;
tmp = reg & MPI_PCICFGCTL_CFGADDR_MASK;
tmp |= MPI_PCICFGCTL_WRITEEN_MASK;
bcm_mpi_writel(tmp, MPI_PCICFGCTL_REG);
iob();
return bcm_mpi_readl(MPI_PCICFGDATA_REG);
}
static void bcm63xx_int_cfg_writel(u32 val, u32 reg)
{
u32 tmp;
tmp = reg & MPI_PCICFGCTL_CFGADDR_MASK;
tmp |= MPI_PCICFGCTL_WRITEEN_MASK;
bcm_mpi_writel(tmp, MPI_PCICFGCTL_REG);
bcm_mpi_writel(val, MPI_PCICFGDATA_REG);
}
void __iomem *pci_iospace_start;
static void __init bcm63xx_reset_pcie(void)
{
u32 val;
u32 reg;
/* enable SERDES */
if (BCMCPU_IS_6328())
reg = MISC_SERDES_CTRL_6328_REG;
else
reg = MISC_SERDES_CTRL_6362_REG;
val = bcm_misc_readl(reg);
val |= SERDES_PCIE_EN | SERDES_PCIE_EXD_EN;
bcm_misc_writel(val, reg);
/* reset the PCIe core */
bcm63xx_core_set_reset(BCM63XX_RESET_PCIE, 1);
bcm63xx_core_set_reset(BCM63XX_RESET_PCIE_EXT, 1);
mdelay(10);
bcm63xx_core_set_reset(BCM63XX_RESET_PCIE, 0);
mdelay(10);
bcm63xx_core_set_reset(BCM63XX_RESET_PCIE_EXT, 0);
mdelay(200);
}
static struct clk *pcie_clk;
static int __init bcm63xx_register_pcie(void)
{
u32 val;
/* enable clock */
pcie_clk = clk_get(NULL, "pcie");
if (IS_ERR_OR_NULL(pcie_clk))
return -ENODEV;
clk_prepare_enable(pcie_clk);
bcm63xx_reset_pcie();
/* configure the PCIe bridge */
val = bcm_pcie_readl(PCIE_BRIDGE_OPT1_REG);
val |= OPT1_RD_BE_OPT_EN;
val |= OPT1_RD_REPLY_BE_FIX_EN;
val |= OPT1_PCIE_BRIDGE_HOLE_DET_EN;
val |= OPT1_L1_INT_STATUS_MASK_POL;
bcm_pcie_writel(val, PCIE_BRIDGE_OPT1_REG);
/* setup the interrupts */
val = bcm_pcie_readl(PCIE_BRIDGE_RC_INT_MASK_REG);
val |= PCIE_RC_INT_A | PCIE_RC_INT_B | PCIE_RC_INT_C | PCIE_RC_INT_D;
bcm_pcie_writel(val, PCIE_BRIDGE_RC_INT_MASK_REG);
val = bcm_pcie_readl(PCIE_BRIDGE_OPT2_REG);
/* enable credit checking and error checking */
val |= OPT2_TX_CREDIT_CHK_EN;
val |= OPT2_UBUS_UR_DECODE_DIS;
/* set device bus/func for the pcie device */
val |= (PCIE_BUS_DEVICE << OPT2_CFG_TYPE1_BUS_NO_SHIFT);
val |= OPT2_CFG_TYPE1_BD_SEL;
bcm_pcie_writel(val, PCIE_BRIDGE_OPT2_REG);
/* setup class code as bridge */
val = bcm_pcie_readl(PCIE_IDVAL3_REG);
val &= ~IDVAL3_CLASS_CODE_MASK;
val |= PCI_CLASS_BRIDGE_PCI_NORMAL;
bcm_pcie_writel(val, PCIE_IDVAL3_REG);
/* disable bar1 size */
val = bcm_pcie_readl(PCIE_CONFIG2_REG);
val &= ~CONFIG2_BAR1_SIZE_MASK;
bcm_pcie_writel(val, PCIE_CONFIG2_REG);
/* set bar0 to little endian */
val = (BCM_PCIE_MEM_BASE_PA >> 20) << BASEMASK_BASE_SHIFT;
val |= (BCM_PCIE_MEM_BASE_PA >> 20) << BASEMASK_MASK_SHIFT;
val |= BASEMASK_REMAP_EN;
bcm_pcie_writel(val, PCIE_BRIDGE_BAR0_BASEMASK_REG);
val = (BCM_PCIE_MEM_BASE_PA >> 20) << REBASE_ADDR_BASE_SHIFT;
bcm_pcie_writel(val, PCIE_BRIDGE_BAR0_REBASE_ADDR_REG);
register_pci_controller(&bcm63xx_pcie_controller);
return 0;
}
static int __init bcm63xx_register_pci(void)
{
unsigned int mem_size;
u32 val;
/*
* configuration access are done through IO space, remap 4
* first bytes to access it from CPU.
*
* this means that no io access from CPU should happen while
* we do a configuration cycle, but there's no way we can add
* a spinlock for each io access, so this is currently kind of
* broken on SMP.
*/
pci_iospace_start = ioremap(BCM_PCI_IO_BASE_PA, 4);
if (!pci_iospace_start)
return -ENOMEM;
/* setup local bus to PCI access (PCI memory) */
val = BCM_PCI_MEM_BASE_PA & MPI_L2P_BASE_MASK;
bcm_mpi_writel(val, MPI_L2PMEMBASE1_REG);
bcm_mpi_writel(~(BCM_PCI_MEM_SIZE - 1), MPI_L2PMEMRANGE1_REG);
bcm_mpi_writel(val | MPI_L2PREMAP_ENABLED_MASK, MPI_L2PMEMREMAP1_REG);
/* set Cardbus IDSEL (type 0 cfg access on primary bus for
* this IDSEL will be done on Cardbus instead) */
val = bcm_pcmcia_readl(PCMCIA_C1_REG);
val &= ~PCMCIA_C1_CBIDSEL_MASK;
val |= (CARDBUS_PCI_IDSEL << PCMCIA_C1_CBIDSEL_SHIFT);
bcm_pcmcia_writel(val, PCMCIA_C1_REG);
#ifdef CONFIG_CARDBUS
/* setup local bus to PCI access (Cardbus memory) */
val = BCM_CB_MEM_BASE_PA & MPI_L2P_BASE_MASK;
bcm_mpi_writel(val, MPI_L2PMEMBASE2_REG);
bcm_mpi_writel(~(BCM_CB_MEM_SIZE - 1), MPI_L2PMEMRANGE2_REG);
val |= MPI_L2PREMAP_ENABLED_MASK | MPI_L2PREMAP_IS_CARDBUS_MASK;
bcm_mpi_writel(val, MPI_L2PMEMREMAP2_REG);
#else
/* disable second access windows */
bcm_mpi_writel(0, MPI_L2PMEMREMAP2_REG);
#endif
/* setup local bus to PCI access (IO memory), we have only 1
* IO window for both PCI and cardbus, but it cannot handle
* both at the same time, assume standard PCI for now, if
* cardbus card has IO zone, PCI fixup will change window to
* cardbus */
val = BCM_PCI_IO_BASE_PA & MPI_L2P_BASE_MASK;
bcm_mpi_writel(val, MPI_L2PIOBASE_REG);
bcm_mpi_writel(~(BCM_PCI_IO_SIZE - 1), MPI_L2PIORANGE_REG);
bcm_mpi_writel(val | MPI_L2PREMAP_ENABLED_MASK, MPI_L2PIOREMAP_REG);
/* enable PCI related GPIO pins */
bcm_mpi_writel(MPI_LOCBUSCTL_EN_PCI_GPIO_MASK, MPI_LOCBUSCTL_REG);
/* setup PCI to local bus access, used by PCI device to target
* local RAM while bus mastering */
bcm63xx_int_cfg_writel(0, PCI_BASE_ADDRESS_3);
if (BCMCPU_IS_3368() || BCMCPU_IS_6358() || BCMCPU_IS_6368())
val = MPI_SP0_REMAP_ENABLE_MASK;
else
val = 0;
bcm_mpi_writel(val, MPI_SP0_REMAP_REG);
bcm63xx_int_cfg_writel(0x0, PCI_BASE_ADDRESS_4);
bcm_mpi_writel(0, MPI_SP1_REMAP_REG);
mem_size = bcm63xx_get_memory_size();
/* 6348 before rev b0 exposes only 16 MB of RAM memory through
* PCI, throw a warning if we have more memory */
if (BCMCPU_IS_6348() && (bcm63xx_get_cpu_rev() & 0xf0) == 0xa0) {
if (mem_size > (16 * 1024 * 1024))
printk(KERN_WARNING "bcm63xx: this CPU "
"revision cannot handle more than 16MB "
"of RAM for PCI bus mastering\n");
} else {
/* setup sp0 range to local RAM size */
bcm_mpi_writel(~(mem_size - 1), MPI_SP0_RANGE_REG);
bcm_mpi_writel(0, MPI_SP1_RANGE_REG);
}
/* change host bridge retry counter to infinite number of
* retry, needed for some broadcom wifi cards with Silicon
* Backplane bus where access to srom seems very slow */
val = bcm63xx_int_cfg_readl(BCMPCI_REG_TIMERS);
val &= ~REG_TIMER_RETRY_MASK;
bcm63xx_int_cfg_writel(val, BCMPCI_REG_TIMERS);
/* enable memory decoder and bus mastering */
val = bcm63xx_int_cfg_readl(PCI_COMMAND);
val |= (PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER);
bcm63xx_int_cfg_writel(val, PCI_COMMAND);
/* enable read prefetching & disable byte swapping for bus
* mastering transfers */
val = bcm_mpi_readl(MPI_PCIMODESEL_REG);
val &= ~MPI_PCIMODESEL_BAR1_NOSWAP_MASK;
val &= ~MPI_PCIMODESEL_BAR2_NOSWAP_MASK;
val &= ~MPI_PCIMODESEL_PREFETCH_MASK;
val |= (8 << MPI_PCIMODESEL_PREFETCH_SHIFT);
bcm_mpi_writel(val, MPI_PCIMODESEL_REG);
/* enable pci interrupt */
val = bcm_mpi_readl(MPI_LOCINT_REG);
val |= MPI_LOCINT_MASK(MPI_LOCINT_EXT_PCI_INT);
bcm_mpi_writel(val, MPI_LOCINT_REG);
register_pci_controller(&bcm63xx_controller);
#ifdef CONFIG_CARDBUS
register_pci_controller(&bcm63xx_cb_controller);
#endif
/* mark memory space used for IO mapping as reserved */
request_mem_region(BCM_PCI_IO_BASE_PA, BCM_PCI_IO_SIZE,
"bcm63xx PCI IO space");
return 0;
}
static int __init bcm63xx_pci_init(void)
{
if (!bcm63xx_pci_enabled)
return -ENODEV;
switch (bcm63xx_get_cpu_id()) {
case BCM6328_CPU_ID:
case BCM6362_CPU_ID:
return bcm63xx_register_pcie();
case BCM3368_CPU_ID:
case BCM6348_CPU_ID:
case BCM6358_CPU_ID:
case BCM6368_CPU_ID:
return bcm63xx_register_pci();
default:
return -ENODEV;
}
}
arch_initcall(bcm63xx_pci_init);
| linux-master | arch/mips/pci/pci-bcm63xx.c |
/*
* Cobalt Qube/Raq PCI support
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1995, 1996, 1997, 2002, 2003 by Ralf Baechle
* Copyright (C) 2001, 2002, 2003 by Liam Davies ([email protected])
*/
#include <linux/types.h>
#include <linux/pci.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <asm/io.h>
#include <asm/gt64120.h>
#include <cobalt.h>
#include <irq.h>
/*
* PCI slot numbers
*/
#define COBALT_PCICONF_CPU 0x06
#define COBALT_PCICONF_ETH0 0x07
#define COBALT_PCICONF_RAQSCSI 0x08
#define COBALT_PCICONF_VIA 0x09
#define COBALT_PCICONF_PCISLOT 0x0A
#define COBALT_PCICONF_ETH1 0x0C
/*
* The Cobalt board ID information. The boards have an ID number wired
* into the VIA that is available in the high nibble of register 94.
*/
#define VIA_COBALT_BRD_ID_REG 0x94
#define VIA_COBALT_BRD_REG_to_ID(reg) ((unsigned char)(reg) >> 4)
/*
* Default value of PCI Class Code on GT64111 is PCI_CLASS_MEMORY_OTHER (0x0580)
* instead of PCI_CLASS_BRIDGE_HOST (0x0600). Galileo explained this choice in
* document "GT-64111 System Controller for RC4640, RM523X and VR4300 CPUs",
* section "6.5.3 PCI Autoconfiguration at RESET":
*
* Some PCs refuse to configure host bridges if they are found plugged into
* a PCI slot (ask the BIOS vendors why...). The "Memory Controller" Class
* Code does not cause a problem for these non-compliant BIOSes, so we used
* this as the default in the GT-64111.
*
* So fix the incorrect default value of PCI Class Code. More details are on:
* https://lore.kernel.org/r/20211102154831.xtrlgrmrizl5eidl@pali/
* https://lore.kernel.org/r/[email protected]/
*/
static void qube_raq_galileo_early_fixup(struct pci_dev *dev)
{
if (dev->devfn == PCI_DEVFN(0, 0) &&
(dev->class >> 8) == PCI_CLASS_MEMORY_OTHER) {
dev->class = (PCI_CLASS_BRIDGE_HOST << 8) | (dev->class & 0xff);
printk(KERN_INFO "Galileo: fixed bridge class\n");
}
}
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_MARVELL, PCI_DEVICE_ID_MARVELL_GT64111,
qube_raq_galileo_early_fixup);
static void qube_raq_via_bmIDE_fixup(struct pci_dev *dev)
{
unsigned short cfgword;
unsigned char lt;
/* Enable Bus Mastering and fast back to back. */
pci_read_config_word(dev, PCI_COMMAND, &cfgword);
cfgword |= (PCI_COMMAND_FAST_BACK | PCI_COMMAND_MASTER);
pci_write_config_word(dev, PCI_COMMAND, cfgword);
/* Enable both ide interfaces. ROM only enables primary one. */
pci_write_config_byte(dev, 0x40, 0xb);
/* Set latency timer to reasonable value. */
pci_read_config_byte(dev, PCI_LATENCY_TIMER, <);
if (lt < 64)
pci_write_config_byte(dev, PCI_LATENCY_TIMER, 64);
pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, 8);
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C586_1,
qube_raq_via_bmIDE_fixup);
static void qube_raq_galileo_fixup(struct pci_dev *dev)
{
if (dev->devfn != PCI_DEVFN(0, 0))
return;
/* Fix PCI latency-timer and cache-line-size values in Galileo
* host bridge.
*/
pci_write_config_byte(dev, PCI_LATENCY_TIMER, 64);
pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, 8);
/*
* The code described by the comment below has been removed
* as it causes bus mastering by the Ethernet controllers
* to break under any kind of network load. We always set
* the retry timeouts to their maximum.
*
* --x--x--x--x--x--x--x--x--x--x--x--x--x--x--x--x--x--x--x--x--
*
* On all machines prior to Q2, we had the STOP line disconnected
* from Galileo to VIA on PCI. The new Galileo does not function
* correctly unless we have it connected.
*
* Therefore we must set the disconnect/retry cycle values to
* something sensible when using the new Galileo.
*/
printk(KERN_INFO "Galileo: revision %u\n", dev->revision);
#if 0
if (dev->revision >= 0x10) {
/* New Galileo, assumes PCI stop line to VIA is connected. */
GT_WRITE(GT_PCI0_TOR_OFS, 0x4020);
} else if (dev->revision == 0x1 || dev->revision == 0x2)
#endif
{
signed int timeo;
/* XXX WE MUST DO THIS ELSE GALILEO LOCKS UP! -DaveM */
timeo = GT_READ(GT_PCI0_TOR_OFS);
/* Old Galileo, assumes PCI STOP line to VIA is disconnected. */
GT_WRITE(GT_PCI0_TOR_OFS,
(0xff << 16) | /* retry count */
(0xff << 8) | /* timeout 1 */
0xff); /* timeout 0 */
/* enable PCI retry exceeded interrupt */
GT_WRITE(GT_INTRMASK_OFS, GT_INTR_RETRYCTR0_MSK | GT_READ(GT_INTRMASK_OFS));
}
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL, PCI_DEVICE_ID_MARVELL_GT64111,
qube_raq_galileo_fixup);
int cobalt_board_id;
static void qube_raq_via_board_id_fixup(struct pci_dev *dev)
{
u8 id;
int retval;
retval = pci_read_config_byte(dev, VIA_COBALT_BRD_ID_REG, &id);
if (retval) {
panic("Cannot read board ID");
return;
}
cobalt_board_id = VIA_COBALT_BRD_REG_to_ID(id);
printk(KERN_INFO "Cobalt board ID: %d\n", cobalt_board_id);
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C586_0,
qube_raq_via_board_id_fixup);
static char irq_tab_qube1[] = {
[COBALT_PCICONF_CPU] = 0,
[COBALT_PCICONF_ETH0] = QUBE1_ETH0_IRQ,
[COBALT_PCICONF_RAQSCSI] = SCSI_IRQ,
[COBALT_PCICONF_VIA] = 0,
[COBALT_PCICONF_PCISLOT] = PCISLOT_IRQ,
[COBALT_PCICONF_ETH1] = 0
};
static char irq_tab_cobalt[] = {
[COBALT_PCICONF_CPU] = 0,
[COBALT_PCICONF_ETH0] = ETH0_IRQ,
[COBALT_PCICONF_RAQSCSI] = SCSI_IRQ,
[COBALT_PCICONF_VIA] = 0,
[COBALT_PCICONF_PCISLOT] = PCISLOT_IRQ,
[COBALT_PCICONF_ETH1] = ETH1_IRQ
};
static char irq_tab_raq2[] = {
[COBALT_PCICONF_CPU] = 0,
[COBALT_PCICONF_ETH0] = ETH0_IRQ,
[COBALT_PCICONF_RAQSCSI] = RAQ2_SCSI_IRQ,
[COBALT_PCICONF_VIA] = 0,
[COBALT_PCICONF_PCISLOT] = PCISLOT_IRQ,
[COBALT_PCICONF_ETH1] = ETH1_IRQ
};
int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
if (cobalt_board_id <= COBALT_BRD_ID_QUBE1)
return irq_tab_qube1[slot];
if (cobalt_board_id == COBALT_BRD_ID_RAQ2)
return irq_tab_raq2[slot];
return irq_tab_cobalt[slot];
}
/* Do platform specific device initialization at pci_enable_device() time */
int pcibios_plat_dev_init(struct pci_dev *dev)
{
return 0;
}
| linux-master | arch/mips/pci/fixup-cobalt.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2018 John Crispin <[email protected]>
*/
#include <linux/pci.h>
//#include <linux/of_irq.h>
#include <linux/of_pci.h>
int pcibios_plat_dev_init(struct pci_dev *dev)
{
return PCIBIOS_SUCCESSFUL;
}
int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
return of_irq_parse_and_map_pci(dev, slot, pin);
}
| linux-master | arch/mips/pci/fixup-ath79.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
*
* Copyright (C) 2010 John Crispin <[email protected]>
*/
#include <linux/types.h>
#include <linux/pci.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
#include <linux/mm.h>
#include <linux/vmalloc.h>
#include <linux/clk.h>
#include <linux/of.h>
#include <linux/of_pci.h>
#include <linux/platform_device.h>
#include <asm/addrspace.h>
#include <lantiq_soc.h>
#include <lantiq_irq.h>
#include "pci-lantiq.h"
#define PCI_CR_FCI_ADDR_MAP0 0x00C0
#define PCI_CR_FCI_ADDR_MAP1 0x00C4
#define PCI_CR_FCI_ADDR_MAP2 0x00C8
#define PCI_CR_FCI_ADDR_MAP3 0x00CC
#define PCI_CR_FCI_ADDR_MAP4 0x00D0
#define PCI_CR_FCI_ADDR_MAP5 0x00D4
#define PCI_CR_FCI_ADDR_MAP6 0x00D8
#define PCI_CR_FCI_ADDR_MAP7 0x00DC
#define PCI_CR_CLK_CTRL 0x0000
#define PCI_CR_PCI_MOD 0x0030
#define PCI_CR_PC_ARB 0x0080
#define PCI_CR_FCI_ADDR_MAP11hg 0x00E4
#define PCI_CR_BAR11MASK 0x0044
#define PCI_CR_BAR12MASK 0x0048
#define PCI_CR_BAR13MASK 0x004C
#define PCI_CS_BASE_ADDR1 0x0010
#define PCI_CR_PCI_ADDR_MAP11 0x0064
#define PCI_CR_FCI_BURST_LENGTH 0x00E8
#define PCI_CR_PCI_EOI 0x002C
#define PCI_CS_STS_CMD 0x0004
#define PCI_MASTER0_REQ_MASK_2BITS 8
#define PCI_MASTER1_REQ_MASK_2BITS 10
#define PCI_MASTER2_REQ_MASK_2BITS 12
#define INTERNAL_ARB_ENABLE_BIT 0
#define LTQ_CGU_IFCCR 0x0018
#define LTQ_CGU_PCICR 0x0034
#define ltq_pci_w32(x, y) ltq_w32((x), ltq_pci_membase + (y))
#define ltq_pci_r32(x) ltq_r32(ltq_pci_membase + (x))
#define ltq_pci_cfg_w32(x, y) ltq_w32((x), ltq_pci_mapped_cfg + (y))
#define ltq_pci_cfg_r32(x) ltq_r32(ltq_pci_mapped_cfg + (x))
__iomem void *ltq_pci_mapped_cfg;
static __iomem void *ltq_pci_membase;
static struct gpio_desc *reset_gpio;
static struct clk *clk_pci, *clk_external;
static struct resource pci_io_resource;
static struct resource pci_mem_resource;
static struct pci_ops pci_ops = {
.read = ltq_pci_read_config_dword,
.write = ltq_pci_write_config_dword
};
static struct pci_controller pci_controller = {
.pci_ops = &pci_ops,
.mem_resource = &pci_mem_resource,
.mem_offset = 0x00000000UL,
.io_resource = &pci_io_resource,
.io_offset = 0x00000000UL,
};
static inline u32 ltq_calc_bar11mask(void)
{
u32 mem, bar11mask;
/* BAR11MASK value depends on available memory on system. */
mem = get_num_physpages() * PAGE_SIZE;
bar11mask = (0x0ffffff0 & ~((1 << (fls(mem) - 1)) - 1)) | 8;
return bar11mask;
}
static int ltq_pci_startup(struct platform_device *pdev)
{
struct device_node *node = pdev->dev.of_node;
const __be32 *req_mask, *bus_clk;
u32 temp_buffer;
int error;
/* get our clocks */
clk_pci = clk_get(&pdev->dev, NULL);
if (IS_ERR(clk_pci)) {
dev_err(&pdev->dev, "failed to get pci clock\n");
return PTR_ERR(clk_pci);
}
clk_external = clk_get(&pdev->dev, "external");
if (IS_ERR(clk_external)) {
clk_put(clk_pci);
dev_err(&pdev->dev, "failed to get external pci clock\n");
return PTR_ERR(clk_external);
}
/* read the bus speed that we want */
bus_clk = of_get_property(node, "lantiq,bus-clock", NULL);
if (bus_clk)
clk_set_rate(clk_pci, *bus_clk);
/* and enable the clocks */
clk_enable(clk_pci);
if (of_property_read_bool(node, "lantiq,external-clock"))
clk_enable(clk_external);
else
clk_disable(clk_external);
/* setup reset gpio used by pci */
reset_gpio = devm_gpiod_get_optional(&pdev->dev, "reset",
GPIOD_OUT_LOW);
error = PTR_ERR_OR_ZERO(reset_gpio);
if (error) {
dev_err(&pdev->dev, "failed to request gpio: %d\n", error);
return error;
}
gpiod_set_consumer_name(reset_gpio, "pci_reset");
/* enable auto-switching between PCI and EBU */
ltq_pci_w32(0xa, PCI_CR_CLK_CTRL);
/* busy, i.e. configuration is not done, PCI access has to be retried */
ltq_pci_w32(ltq_pci_r32(PCI_CR_PCI_MOD) & ~(1 << 24), PCI_CR_PCI_MOD);
wmb();
/* BUS Master/IO/MEM access */
ltq_pci_cfg_w32(ltq_pci_cfg_r32(PCI_CS_STS_CMD) | 7, PCI_CS_STS_CMD);
/* enable external 2 PCI masters */
temp_buffer = ltq_pci_r32(PCI_CR_PC_ARB);
/* setup the request mask */
req_mask = of_get_property(node, "req-mask", NULL);
if (req_mask)
temp_buffer &= ~((*req_mask & 0xf) << 16);
else
temp_buffer &= ~0xf0000;
/* enable internal arbiter */
temp_buffer |= (1 << INTERNAL_ARB_ENABLE_BIT);
/* enable internal PCI master reqest */
temp_buffer &= (~(3 << PCI_MASTER0_REQ_MASK_2BITS));
/* enable EBU request */
temp_buffer &= (~(3 << PCI_MASTER1_REQ_MASK_2BITS));
/* enable all external masters request */
temp_buffer &= (~(3 << PCI_MASTER2_REQ_MASK_2BITS));
ltq_pci_w32(temp_buffer, PCI_CR_PC_ARB);
wmb();
/* setup BAR memory regions */
ltq_pci_w32(0x18000000, PCI_CR_FCI_ADDR_MAP0);
ltq_pci_w32(0x18400000, PCI_CR_FCI_ADDR_MAP1);
ltq_pci_w32(0x18800000, PCI_CR_FCI_ADDR_MAP2);
ltq_pci_w32(0x18c00000, PCI_CR_FCI_ADDR_MAP3);
ltq_pci_w32(0x19000000, PCI_CR_FCI_ADDR_MAP4);
ltq_pci_w32(0x19400000, PCI_CR_FCI_ADDR_MAP5);
ltq_pci_w32(0x19800000, PCI_CR_FCI_ADDR_MAP6);
ltq_pci_w32(0x19c00000, PCI_CR_FCI_ADDR_MAP7);
ltq_pci_w32(0x1ae00000, PCI_CR_FCI_ADDR_MAP11hg);
ltq_pci_w32(ltq_calc_bar11mask(), PCI_CR_BAR11MASK);
ltq_pci_w32(0, PCI_CR_PCI_ADDR_MAP11);
ltq_pci_w32(0, PCI_CS_BASE_ADDR1);
/* both TX and RX endian swap are enabled */
ltq_pci_w32(ltq_pci_r32(PCI_CR_PCI_EOI) | 3, PCI_CR_PCI_EOI);
wmb();
ltq_pci_w32(ltq_pci_r32(PCI_CR_BAR12MASK) | 0x80000000,
PCI_CR_BAR12MASK);
ltq_pci_w32(ltq_pci_r32(PCI_CR_BAR13MASK) | 0x80000000,
PCI_CR_BAR13MASK);
/*use 8 dw burst length */
ltq_pci_w32(0x303, PCI_CR_FCI_BURST_LENGTH);
ltq_pci_w32(ltq_pci_r32(PCI_CR_PCI_MOD) | (1 << 24), PCI_CR_PCI_MOD);
wmb();
/* setup irq line */
ltq_ebu_w32(ltq_ebu_r32(LTQ_EBU_PCC_CON) | 0xc, LTQ_EBU_PCC_CON);
ltq_ebu_w32(ltq_ebu_r32(LTQ_EBU_PCC_IEN) | 0x10, LTQ_EBU_PCC_IEN);
/* toggle reset pin */
if (reset_gpio) {
gpiod_set_value_cansleep(reset_gpio, 1);
wmb();
mdelay(1);
gpiod_set_value_cansleep(reset_gpio, 0);
}
return 0;
}
static int ltq_pci_probe(struct platform_device *pdev)
{
pci_clear_flags(PCI_PROBE_ONLY);
ltq_pci_membase = devm_platform_get_and_ioremap_resource(pdev, 1, NULL);
if (IS_ERR(ltq_pci_membase))
return PTR_ERR(ltq_pci_membase);
ltq_pci_mapped_cfg = devm_platform_get_and_ioremap_resource(pdev, 0, NULL);
if (IS_ERR(ltq_pci_mapped_cfg))
return PTR_ERR(ltq_pci_mapped_cfg);
ltq_pci_startup(pdev);
pci_load_of_ranges(&pci_controller, pdev->dev.of_node);
register_pci_controller(&pci_controller);
return 0;
}
static const struct of_device_id ltq_pci_match[] = {
{ .compatible = "lantiq,pci-xway" },
{},
};
static struct platform_driver ltq_pci_driver = {
.probe = ltq_pci_probe,
.driver = {
.name = "pci-xway",
.of_match_table = ltq_pci_match,
},
};
int __init pcibios_init(void)
{
int ret = platform_driver_register(<q_pci_driver);
if (ret)
pr_info("pci-xway: Error registering platform driver!");
return ret;
}
arch_initcall(pcibios_init);
| linux-master | arch/mips/pci/pci-lantiq.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
*/
/*
* Both AR2315 and AR2316 chips have PCI interface unit, which supports DMA
* and interrupt. PCI interface supports MMIO access method, but does not
* seem to support I/O ports.
*
* Read/write operation in the region 0x80000000-0xBFFFFFFF causes
* a memory read/write command on the PCI bus. 30 LSBs of address on
* the bus are taken from memory read/write request and 2 MSBs are
* determined by PCI unit configuration.
*
* To work with the configuration space instead of memory is necessary set
* the CFG_SEL bit in the PCI_MISC_CONFIG register.
*
* Devices on the bus can perform DMA requests via chip BAR1. PCI host
* controller BARs are programmend as if an external device is programmed.
* Which means that during configuration, IDSEL pin of the chip should be
* asserted.
*
* We know (and support) only one board that uses the PCI interface -
* Fonera 2.0g (FON2202). It has a USB EHCI controller connected to the
* AR2315 PCI bus. IDSEL pin of USB controller is connected to AD[13] line
* and IDSEL pin of AR2315 is connected to AD[16] line.
*/
#include <linux/types.h>
#include <linux/pci.h>
#include <linux/platform_device.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/dma-direct.h>
#include <linux/mm.h>
#include <linux/delay.h>
#include <linux/bitops.h>
#include <linux/irq.h>
#include <linux/irqdomain.h>
#include <linux/io.h>
#include <asm/paccess.h>
/*
* PCI Bus Interface Registers
*/
#define AR2315_PCI_1MS_REG 0x0008
#define AR2315_PCI_1MS_MASK 0x3FFFF /* # of AHB clk cycles in 1ms */
#define AR2315_PCI_MISC_CONFIG 0x000c
#define AR2315_PCIMISC_TXD_EN 0x00000001 /* Enable TXD for fragments */
#define AR2315_PCIMISC_CFG_SEL 0x00000002 /* Mem or Config cycles */
#define AR2315_PCIMISC_GIG_MASK 0x0000000C /* bits 31-30 for pci req */
#define AR2315_PCIMISC_RST_MODE 0x00000030
#define AR2315_PCIRST_INPUT 0x00000000 /* 4:5=0 rst is input */
#define AR2315_PCIRST_LOW 0x00000010 /* 4:5=1 rst to GND */
#define AR2315_PCIRST_HIGH 0x00000020 /* 4:5=2 rst to VDD */
#define AR2315_PCIGRANT_EN 0x00000000 /* 6:7=0 early grant en */
#define AR2315_PCIGRANT_FRAME 0x00000040 /* 6:7=1 grant waits 4 frame */
#define AR2315_PCIGRANT_IDLE 0x00000080 /* 6:7=2 grant waits 4 idle */
#define AR2315_PCIGRANT_GAP 0x00000000 /* 6:7=2 grant waits 4 idle */
#define AR2315_PCICACHE_DIS 0x00001000 /* PCI external access cache
* disable */
#define AR2315_PCI_OUT_TSTAMP 0x0010
#define AR2315_PCI_UNCACHE_CFG 0x0014
#define AR2315_PCI_IN_EN 0x0100
#define AR2315_PCI_IN_EN0 0x01 /* Enable chain 0 */
#define AR2315_PCI_IN_EN1 0x02 /* Enable chain 1 */
#define AR2315_PCI_IN_EN2 0x04 /* Enable chain 2 */
#define AR2315_PCI_IN_EN3 0x08 /* Enable chain 3 */
#define AR2315_PCI_IN_DIS 0x0104
#define AR2315_PCI_IN_DIS0 0x01 /* Disable chain 0 */
#define AR2315_PCI_IN_DIS1 0x02 /* Disable chain 1 */
#define AR2315_PCI_IN_DIS2 0x04 /* Disable chain 2 */
#define AR2315_PCI_IN_DIS3 0x08 /* Disable chain 3 */
#define AR2315_PCI_IN_PTR 0x0200
#define AR2315_PCI_OUT_EN 0x0400
#define AR2315_PCI_OUT_EN0 0x01 /* Enable chain 0 */
#define AR2315_PCI_OUT_DIS 0x0404
#define AR2315_PCI_OUT_DIS0 0x01 /* Disable chain 0 */
#define AR2315_PCI_OUT_PTR 0x0408
/* PCI interrupt status (write one to clear) */
#define AR2315_PCI_ISR 0x0500
#define AR2315_PCI_INT_TX 0x00000001 /* Desc In Completed */
#define AR2315_PCI_INT_TXOK 0x00000002 /* Desc In OK */
#define AR2315_PCI_INT_TXERR 0x00000004 /* Desc In ERR */
#define AR2315_PCI_INT_TXEOL 0x00000008 /* Desc In End-of-List */
#define AR2315_PCI_INT_RX 0x00000010 /* Desc Out Completed */
#define AR2315_PCI_INT_RXOK 0x00000020 /* Desc Out OK */
#define AR2315_PCI_INT_RXERR 0x00000040 /* Desc Out ERR */
#define AR2315_PCI_INT_RXEOL 0x00000080 /* Desc Out EOL */
#define AR2315_PCI_INT_TXOOD 0x00000200 /* Desc In Out-of-Desc */
#define AR2315_PCI_INT_DESCMASK 0x0000FFFF /* Desc Mask */
#define AR2315_PCI_INT_EXT 0x02000000 /* Extern PCI INTA */
#define AR2315_PCI_INT_ABORT 0x04000000 /* PCI bus abort event */
/* PCI interrupt mask */
#define AR2315_PCI_IMR 0x0504
/* Global PCI interrupt enable */
#define AR2315_PCI_IER 0x0508
#define AR2315_PCI_IER_DISABLE 0x00 /* disable pci interrupts */
#define AR2315_PCI_IER_ENABLE 0x01 /* enable pci interrupts */
#define AR2315_PCI_HOST_IN_EN 0x0800
#define AR2315_PCI_HOST_IN_DIS 0x0804
#define AR2315_PCI_HOST_IN_PTR 0x0810
#define AR2315_PCI_HOST_OUT_EN 0x0900
#define AR2315_PCI_HOST_OUT_DIS 0x0904
#define AR2315_PCI_HOST_OUT_PTR 0x0908
/*
* PCI interrupts, which share IP5
* Keep ordered according to AR2315_PCI_INT_XXX bits
*/
#define AR2315_PCI_IRQ_EXT 25
#define AR2315_PCI_IRQ_ABORT 26
#define AR2315_PCI_IRQ_COUNT 27
/* Arbitrary size of memory region to access the configuration space */
#define AR2315_PCI_CFG_SIZE 0x00100000
#define AR2315_PCI_HOST_SLOT 3
#define AR2315_PCI_HOST_DEVID ((0xff18 << 16) | PCI_VENDOR_ID_ATHEROS)
/*
* We need some arbitrary non-zero value to be programmed to the BAR1 register
* of PCI host controller to enable DMA. The same value should be used as the
* offset to calculate the physical address of DMA buffer for PCI devices.
*/
#define AR2315_PCI_HOST_SDRAM_BASEADDR 0x20000000
/* ??? access BAR */
#define AR2315_PCI_HOST_MBAR0 0x10000000
/* RAM access BAR */
#define AR2315_PCI_HOST_MBAR1 AR2315_PCI_HOST_SDRAM_BASEADDR
/* ??? access BAR */
#define AR2315_PCI_HOST_MBAR2 0x30000000
struct ar2315_pci_ctrl {
void __iomem *cfg_mem;
void __iomem *mmr_mem;
unsigned irq;
unsigned irq_ext;
struct irq_domain *domain;
struct pci_controller pci_ctrl;
struct resource mem_res;
struct resource io_res;
};
static inline dma_addr_t ar2315_dev_offset(struct device *dev)
{
if (dev && dev_is_pci(dev))
return AR2315_PCI_HOST_SDRAM_BASEADDR;
return 0;
}
dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
{
return paddr + ar2315_dev_offset(dev);
}
phys_addr_t dma_to_phys(struct device *dev, dma_addr_t dma_addr)
{
return dma_addr - ar2315_dev_offset(dev);
}
static inline struct ar2315_pci_ctrl *ar2315_pci_bus_to_apc(struct pci_bus *bus)
{
struct pci_controller *hose = bus->sysdata;
return container_of(hose, struct ar2315_pci_ctrl, pci_ctrl);
}
static inline u32 ar2315_pci_reg_read(struct ar2315_pci_ctrl *apc, u32 reg)
{
return __raw_readl(apc->mmr_mem + reg);
}
static inline void ar2315_pci_reg_write(struct ar2315_pci_ctrl *apc, u32 reg,
u32 val)
{
__raw_writel(val, apc->mmr_mem + reg);
}
static inline void ar2315_pci_reg_mask(struct ar2315_pci_ctrl *apc, u32 reg,
u32 mask, u32 val)
{
u32 ret = ar2315_pci_reg_read(apc, reg);
ret &= ~mask;
ret |= val;
ar2315_pci_reg_write(apc, reg, ret);
}
static int ar2315_pci_cfg_access(struct ar2315_pci_ctrl *apc, unsigned devfn,
int where, int size, u32 *ptr, bool write)
{
int func = PCI_FUNC(devfn);
int dev = PCI_SLOT(devfn);
u32 addr = (1 << (13 + dev)) | (func << 8) | (where & ~3);
u32 mask = 0xffffffff >> 8 * (4 - size);
u32 sh = (where & 3) * 8;
u32 value, isr;
/* Prevent access past the remapped area */
if (addr >= AR2315_PCI_CFG_SIZE || dev > 18)
return PCIBIOS_DEVICE_NOT_FOUND;
/* Clear pending errors */
ar2315_pci_reg_write(apc, AR2315_PCI_ISR, AR2315_PCI_INT_ABORT);
/* Select Configuration access */
ar2315_pci_reg_mask(apc, AR2315_PCI_MISC_CONFIG, 0,
AR2315_PCIMISC_CFG_SEL);
mb(); /* PCI must see space change before we begin */
value = __raw_readl(apc->cfg_mem + addr);
isr = ar2315_pci_reg_read(apc, AR2315_PCI_ISR);
if (isr & AR2315_PCI_INT_ABORT)
goto exit_err;
if (write) {
value = (value & ~(mask << sh)) | *ptr << sh;
__raw_writel(value, apc->cfg_mem + addr);
isr = ar2315_pci_reg_read(apc, AR2315_PCI_ISR);
if (isr & AR2315_PCI_INT_ABORT)
goto exit_err;
} else {
*ptr = (value >> sh) & mask;
}
goto exit;
exit_err:
ar2315_pci_reg_write(apc, AR2315_PCI_ISR, AR2315_PCI_INT_ABORT);
if (!write)
*ptr = 0xffffffff;
exit:
/* Select Memory access */
ar2315_pci_reg_mask(apc, AR2315_PCI_MISC_CONFIG, AR2315_PCIMISC_CFG_SEL,
0);
return isr & AR2315_PCI_INT_ABORT ? PCIBIOS_DEVICE_NOT_FOUND :
PCIBIOS_SUCCESSFUL;
}
static inline int ar2315_pci_local_cfg_rd(struct ar2315_pci_ctrl *apc,
unsigned devfn, int where, u32 *val)
{
return ar2315_pci_cfg_access(apc, devfn, where, sizeof(u32), val,
false);
}
static inline int ar2315_pci_local_cfg_wr(struct ar2315_pci_ctrl *apc,
unsigned devfn, int where, u32 val)
{
return ar2315_pci_cfg_access(apc, devfn, where, sizeof(u32), &val,
true);
}
static int ar2315_pci_cfg_read(struct pci_bus *bus, unsigned devfn, int where,
int size, u32 *value)
{
struct ar2315_pci_ctrl *apc = ar2315_pci_bus_to_apc(bus);
if (PCI_SLOT(devfn) == AR2315_PCI_HOST_SLOT)
return PCIBIOS_DEVICE_NOT_FOUND;
return ar2315_pci_cfg_access(apc, devfn, where, size, value, false);
}
static int ar2315_pci_cfg_write(struct pci_bus *bus, unsigned devfn, int where,
int size, u32 value)
{
struct ar2315_pci_ctrl *apc = ar2315_pci_bus_to_apc(bus);
if (PCI_SLOT(devfn) == AR2315_PCI_HOST_SLOT)
return PCIBIOS_DEVICE_NOT_FOUND;
return ar2315_pci_cfg_access(apc, devfn, where, size, &value, true);
}
static struct pci_ops ar2315_pci_ops = {
.read = ar2315_pci_cfg_read,
.write = ar2315_pci_cfg_write,
};
static int ar2315_pci_host_setup(struct ar2315_pci_ctrl *apc)
{
unsigned devfn = PCI_DEVFN(AR2315_PCI_HOST_SLOT, 0);
int res;
u32 id;
res = ar2315_pci_local_cfg_rd(apc, devfn, PCI_VENDOR_ID, &id);
if (res != PCIBIOS_SUCCESSFUL || id != AR2315_PCI_HOST_DEVID)
return -ENODEV;
/* Program MBARs */
ar2315_pci_local_cfg_wr(apc, devfn, PCI_BASE_ADDRESS_0,
AR2315_PCI_HOST_MBAR0);
ar2315_pci_local_cfg_wr(apc, devfn, PCI_BASE_ADDRESS_1,
AR2315_PCI_HOST_MBAR1);
ar2315_pci_local_cfg_wr(apc, devfn, PCI_BASE_ADDRESS_2,
AR2315_PCI_HOST_MBAR2);
/* Run */
ar2315_pci_local_cfg_wr(apc, devfn, PCI_COMMAND, PCI_COMMAND_MEMORY |
PCI_COMMAND_MASTER | PCI_COMMAND_SPECIAL |
PCI_COMMAND_INVALIDATE | PCI_COMMAND_PARITY |
PCI_COMMAND_SERR | PCI_COMMAND_FAST_BACK);
return 0;
}
static void ar2315_pci_irq_handler(struct irq_desc *desc)
{
struct ar2315_pci_ctrl *apc = irq_desc_get_handler_data(desc);
u32 pending = ar2315_pci_reg_read(apc, AR2315_PCI_ISR) &
ar2315_pci_reg_read(apc, AR2315_PCI_IMR);
int ret = 0;
if (pending)
ret = generic_handle_domain_irq(apc->domain, __ffs(pending));
if (!pending || ret)
spurious_interrupt();
}
static void ar2315_pci_irq_mask(struct irq_data *d)
{
struct ar2315_pci_ctrl *apc = irq_data_get_irq_chip_data(d);
ar2315_pci_reg_mask(apc, AR2315_PCI_IMR, BIT(d->hwirq), 0);
}
static void ar2315_pci_irq_mask_ack(struct irq_data *d)
{
struct ar2315_pci_ctrl *apc = irq_data_get_irq_chip_data(d);
u32 m = BIT(d->hwirq);
ar2315_pci_reg_mask(apc, AR2315_PCI_IMR, m, 0);
ar2315_pci_reg_write(apc, AR2315_PCI_ISR, m);
}
static void ar2315_pci_irq_unmask(struct irq_data *d)
{
struct ar2315_pci_ctrl *apc = irq_data_get_irq_chip_data(d);
ar2315_pci_reg_mask(apc, AR2315_PCI_IMR, 0, BIT(d->hwirq));
}
static struct irq_chip ar2315_pci_irq_chip = {
.name = "AR2315-PCI",
.irq_mask = ar2315_pci_irq_mask,
.irq_mask_ack = ar2315_pci_irq_mask_ack,
.irq_unmask = ar2315_pci_irq_unmask,
};
static int ar2315_pci_irq_map(struct irq_domain *d, unsigned irq,
irq_hw_number_t hw)
{
irq_set_chip_and_handler(irq, &ar2315_pci_irq_chip, handle_level_irq);
irq_set_chip_data(irq, d->host_data);
return 0;
}
static const struct irq_domain_ops ar2315_pci_irq_domain_ops = {
.map = ar2315_pci_irq_map,
};
static void ar2315_pci_irq_init(struct ar2315_pci_ctrl *apc)
{
ar2315_pci_reg_mask(apc, AR2315_PCI_IER, AR2315_PCI_IER_ENABLE, 0);
ar2315_pci_reg_mask(apc, AR2315_PCI_IMR, (AR2315_PCI_INT_ABORT |
AR2315_PCI_INT_EXT), 0);
apc->irq_ext = irq_create_mapping(apc->domain, AR2315_PCI_IRQ_EXT);
irq_set_chained_handler_and_data(apc->irq, ar2315_pci_irq_handler,
apc);
/* Clear any pending Abort or external Interrupts
* and enable interrupt processing */
ar2315_pci_reg_write(apc, AR2315_PCI_ISR, AR2315_PCI_INT_ABORT |
AR2315_PCI_INT_EXT);
ar2315_pci_reg_mask(apc, AR2315_PCI_IER, 0, AR2315_PCI_IER_ENABLE);
}
static int ar2315_pci_probe(struct platform_device *pdev)
{
struct ar2315_pci_ctrl *apc;
struct device *dev = &pdev->dev;
struct resource *res;
int irq, err;
apc = devm_kzalloc(dev, sizeof(*apc), GFP_KERNEL);
if (!apc)
return -ENOMEM;
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return -EINVAL;
apc->irq = irq;
apc->mmr_mem = devm_platform_ioremap_resource_byname(pdev,
"ar2315-pci-ctrl");
if (IS_ERR(apc->mmr_mem))
return PTR_ERR(apc->mmr_mem);
res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
"ar2315-pci-ext");
if (!res)
return -EINVAL;
apc->mem_res.name = "AR2315 PCI mem space";
apc->mem_res.parent = res;
apc->mem_res.start = res->start;
apc->mem_res.end = res->end;
apc->mem_res.flags = IORESOURCE_MEM;
/* Remap PCI config space */
apc->cfg_mem = devm_ioremap(dev, res->start,
AR2315_PCI_CFG_SIZE);
if (!apc->cfg_mem) {
dev_err(dev, "failed to remap PCI config space\n");
return -ENOMEM;
}
/* Reset the PCI bus by setting bits 5-4 in PCI_MCFG */
ar2315_pci_reg_mask(apc, AR2315_PCI_MISC_CONFIG,
AR2315_PCIMISC_RST_MODE,
AR2315_PCIRST_LOW);
msleep(100);
/* Bring the PCI out of reset */
ar2315_pci_reg_mask(apc, AR2315_PCI_MISC_CONFIG,
AR2315_PCIMISC_RST_MODE,
AR2315_PCIRST_HIGH | AR2315_PCICACHE_DIS | 0x8);
ar2315_pci_reg_write(apc, AR2315_PCI_UNCACHE_CFG,
0x1E | /* 1GB uncached */
(1 << 5) | /* Enable uncached */
(0x2 << 30) /* Base: 0x80000000 */);
ar2315_pci_reg_read(apc, AR2315_PCI_UNCACHE_CFG);
msleep(500);
err = ar2315_pci_host_setup(apc);
if (err)
return err;
apc->domain = irq_domain_add_linear(NULL, AR2315_PCI_IRQ_COUNT,
&ar2315_pci_irq_domain_ops, apc);
if (!apc->domain) {
dev_err(dev, "failed to add IRQ domain\n");
return -ENOMEM;
}
ar2315_pci_irq_init(apc);
/* PCI controller does not support I/O ports */
apc->io_res.name = "AR2315 IO space";
apc->io_res.start = 0;
apc->io_res.end = 0;
apc->io_res.flags = IORESOURCE_IO;
apc->pci_ctrl.pci_ops = &ar2315_pci_ops;
apc->pci_ctrl.mem_resource = &apc->mem_res;
apc->pci_ctrl.io_resource = &apc->io_res;
register_pci_controller(&apc->pci_ctrl);
dev_info(dev, "register PCI controller\n");
return 0;
}
static struct platform_driver ar2315_pci_driver = {
.probe = ar2315_pci_probe,
.driver = {
.name = "ar2315-pci",
},
};
static int __init ar2315_pci_init(void)
{
return platform_driver_register(&ar2315_pci_driver);
}
arch_initcall(ar2315_pci_init);
int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
struct ar2315_pci_ctrl *apc = ar2315_pci_bus_to_apc(dev->bus);
return slot ? 0 : apc->irq_ext;
}
int pcibios_plat_dev_init(struct pci_dev *dev)
{
return 0;
}
| linux-master | arch/mips/pci/pci-ar2315.c |
/*
* Copyright 2001 MontaVista Software Inc.
* Author: MontaVista Software, Inc.
* [email protected] or [email protected]
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
* NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
* USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
* ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/types.h>
#include <linux/pci.h>
#include <linux/kernel.h>
#include <asm/mach-rc32434/rc32434.h>
#include <asm/mach-rc32434/irq.h>
static int irq_map[2][12] = {
{0, 0, 2, 3, 2, 3, 0, 0, 0, 0, 0, 1},
{0, 0, 1, 3, 0, 2, 1, 3, 0, 2, 1, 3}
};
int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
int irq = 0;
if (dev->bus->number < 2 && PCI_SLOT(dev->devfn) < 12)
irq = irq_map[dev->bus->number][PCI_SLOT(dev->devfn)];
return irq + GROUP4_IRQ_BASE + 4;
}
static void rc32434_pci_early_fixup(struct pci_dev *dev)
{
if (PCI_SLOT(dev->devfn) == 6 && dev->bus->number == 0) {
/* disable prefetched memory range */
pci_write_config_word(dev, PCI_PREF_MEMORY_LIMIT, 0);
pci_write_config_word(dev, PCI_PREF_MEMORY_BASE, 0x10);
pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, 4);
}
}
/*
* The fixup applies to both the IDT and VIA devices present on the board
*/
DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, rc32434_pci_early_fixup);
/* Do platform specific device initialization at pci_enable_device() time */
int pcibios_plat_dev_init(struct pci_dev *dev)
{
return 0;
}
| linux-master | arch/mips/pci/fixup-rc32434.c |
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2008 Maxime Bizon <[email protected]>
*/
#include <linux/types.h>
#include <linux/pci.h>
#include <bcm63xx_cpu.h>
int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
return bcm63xx_get_irq_number(IRQ_PCI);
}
int pcibios_plat_dev_init(struct pci_dev *dev)
{
return 0;
}
| linux-master | arch/mips/pci/fixup-bcm63xx.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/init.h>
#include <linux/pci.h>
#include <asm/mips-boards/piix4.h>
/* PCI interrupt pins */
#define PCIA 1
#define PCIB 2
#define PCIC 3
#define PCID 4
/* This table is filled in by interrogating the PIIX4 chip */
static char pci_irq[5] = {
};
static char irq_tab[][5] = {
/* INTA INTB INTC INTD */
{0, 0, 0, 0, 0 }, /* 0: GT64120 PCI bridge */
{0, 0, 0, 0, 0 }, /* 1: Unused */
{0, 0, 0, 0, 0 }, /* 2: Unused */
{0, 0, 0, 0, 0 }, /* 3: Unused */
{0, 0, 0, 0, 0 }, /* 4: Unused */
{0, 0, 0, 0, 0 }, /* 5: Unused */
{0, 0, 0, 0, 0 }, /* 6: Unused */
{0, 0, 0, 0, 0 }, /* 7: Unused */
{0, 0, 0, 0, 0 }, /* 8: Unused */
{0, 0, 0, 0, 0 }, /* 9: Unused */
{0, 0, 0, 0, PCID }, /* 10: PIIX4 USB */
{0, PCIB, 0, 0, 0 }, /* 11: AMD 79C973 Ethernet */
{0, PCIC, 0, 0, 0 }, /* 12: Crystal 4281 Sound */
{0, 0, 0, 0, 0 }, /* 13: Unused */
{0, 0, 0, 0, 0 }, /* 14: Unused */
{0, 0, 0, 0, 0 }, /* 15: Unused */
{0, 0, 0, 0, 0 }, /* 16: Unused */
{0, 0, 0, 0, 0 }, /* 17: Bonito/SOC-it PCI Bridge*/
{0, PCIA, PCIB, PCIC, PCID }, /* 18: PCI Slot 1 */
{0, PCIB, PCIC, PCID, PCIA }, /* 19: PCI Slot 2 */
{0, PCIC, PCID, PCIA, PCIB }, /* 20: PCI Slot 3 */
{0, PCID, PCIA, PCIB, PCIC } /* 21: PCI Slot 4 */
};
int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
int virq;
virq = irq_tab[slot][pin];
return pci_irq[virq];
}
/* Do platform specific device initialization at pci_enable_device() time */
int pcibios_plat_dev_init(struct pci_dev *dev)
{
return 0;
}
static void malta_piix_func3_base_fixup(struct pci_dev *dev)
{
/* Set a sane PM I/O base address */
pci_write_config_word(dev, PIIX4_FUNC3_PMBA, 0x1000);
/* Enable access to the PM I/O region */
pci_write_config_byte(dev, PIIX4_FUNC3_PMREGMISC,
PIIX4_FUNC3_PMREGMISC_EN);
}
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371AB_3,
malta_piix_func3_base_fixup);
static void malta_piix_func0_fixup(struct pci_dev *pdev)
{
unsigned char reg_val;
u32 reg_val32;
u16 reg_val16;
/* PIIX PIRQC[A:D] irq mappings */
static int piixirqmap[PIIX4_FUNC0_PIRQRC_IRQ_ROUTING_MAX] = {
0, 0, 0, 3,
4, 5, 6, 7,
0, 9, 10, 11,
12, 0, 14, 15
};
int i;
/* Interrogate PIIX4 to get PCI IRQ mapping */
for (i = 0; i <= 3; i++) {
pci_read_config_byte(pdev, PIIX4_FUNC0_PIRQRC+i, ®_val);
if (reg_val & PIIX4_FUNC0_PIRQRC_IRQ_ROUTING_DISABLE)
pci_irq[PCIA+i] = 0; /* Disabled */
else
pci_irq[PCIA+i] = piixirqmap[reg_val &
PIIX4_FUNC0_PIRQRC_IRQ_ROUTING_MASK];
}
/* Done by YAMON 2.00 onwards */
if (PCI_SLOT(pdev->devfn) == 10) {
/*
* Set top of main memory accessible by ISA or DMA
* devices to 16 Mb.
*/
pci_read_config_byte(pdev, PIIX4_FUNC0_TOM, ®_val);
pci_write_config_byte(pdev, PIIX4_FUNC0_TOM, reg_val |
PIIX4_FUNC0_TOM_TOP_OF_MEMORY_MASK);
}
/* Mux SERIRQ to its pin */
pci_read_config_dword(pdev, PIIX4_FUNC0_GENCFG, ®_val32);
pci_write_config_dword(pdev, PIIX4_FUNC0_GENCFG,
reg_val32 | PIIX4_FUNC0_GENCFG_SERIRQ);
/* Enable SERIRQ */
pci_read_config_byte(pdev, PIIX4_FUNC0_SERIRQC, ®_val);
reg_val |= PIIX4_FUNC0_SERIRQC_EN | PIIX4_FUNC0_SERIRQC_CONT;
pci_write_config_byte(pdev, PIIX4_FUNC0_SERIRQC, reg_val);
/* Enable response to special cycles */
pci_read_config_word(pdev, PCI_COMMAND, ®_val16);
pci_write_config_word(pdev, PCI_COMMAND,
reg_val16 | PCI_COMMAND_SPECIAL);
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371AB_0,
malta_piix_func0_fixup);
static void malta_piix_func1_fixup(struct pci_dev *pdev)
{
unsigned char reg_val;
/* Done by YAMON 2.02 onwards */
if (PCI_SLOT(pdev->devfn) == 10) {
/*
* IDE Decode enable.
*/
pci_read_config_byte(pdev, PIIX4_FUNC1_IDETIM_PRIMARY_HI,
®_val);
pci_write_config_byte(pdev, PIIX4_FUNC1_IDETIM_PRIMARY_HI,
reg_val|PIIX4_FUNC1_IDETIM_PRIMARY_HI_IDE_DECODE_EN);
pci_read_config_byte(pdev, PIIX4_FUNC1_IDETIM_SECONDARY_HI,
®_val);
pci_write_config_byte(pdev, PIIX4_FUNC1_IDETIM_SECONDARY_HI,
reg_val|PIIX4_FUNC1_IDETIM_SECONDARY_HI_IDE_DECODE_EN);
}
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371AB,
malta_piix_func1_fixup);
/* Enable PCI 2.1 compatibility in PIIX4 */
static void quirk_dlcsetup(struct pci_dev *dev)
{
u8 odlc, ndlc;
(void) pci_read_config_byte(dev, PIIX4_FUNC0_DLC, &odlc);
/* Enable passive releases and delayed transaction */
ndlc = odlc | PIIX4_FUNC0_DLC_USBPR_EN |
PIIX4_FUNC0_DLC_PASSIVE_RELEASE_EN |
PIIX4_FUNC0_DLC_DELAYED_TRANSACTION_EN;
(void) pci_write_config_byte(dev, PIIX4_FUNC0_DLC, ndlc);
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371AB_0,
quirk_dlcsetup);
| linux-master | arch/mips/pci/fixup-malta.c |
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2000, 2001 Keith M Wesolowski
*/
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/types.h>
#include <asm/ip32/mace.h>
#if 0
# define DPRINTK(args...) printk(args);
#else
# define DPRINTK(args...)
#endif
/*
* O2 has up to 5 PCI devices connected into the MACE bridge. The device
* map looks like this:
*
* 0 aic7xxx 0
* 1 aic7xxx 1
* 2 expansion slot
* 3 N/C
* 4 N/C
*/
static inline int mkaddr(struct pci_bus *bus, unsigned int devfn,
unsigned int reg)
{
return ((bus->number & 0xff) << 16) |
((devfn & 0xff) << 8) |
(reg & 0xfc);
}
static int
mace_pci_read_config(struct pci_bus *bus, unsigned int devfn,
int reg, int size, u32 *val)
{
u32 control = mace->pci.control;
/* disable master aborts interrupts during config read */
mace->pci.control = control & ~MACEPCI_CONTROL_MAR_INT;
mace->pci.config_addr = mkaddr(bus, devfn, reg);
switch (size) {
case 1:
*val = mace->pci.config_data.b[(reg & 3) ^ 3];
break;
case 2:
*val = mace->pci.config_data.w[((reg >> 1) & 1) ^ 1];
break;
case 4:
*val = mace->pci.config_data.l;
break;
}
/* ack possible master abort */
mace->pci.error &= ~MACEPCI_ERROR_MASTER_ABORT;
mace->pci.control = control;
/*
* someone forgot to set the ultra bit for the onboard
* scsi chips; we fake it here
*/
if (bus->number == 0 && reg == 0x40 && size == 4 &&
(devfn == (1 << 3) || devfn == (2 << 3)))
*val |= 0x1000;
DPRINTK("read%d: reg=%08x,val=%02x\n", size * 8, reg, *val);
return PCIBIOS_SUCCESSFUL;
}
static int
mace_pci_write_config(struct pci_bus *bus, unsigned int devfn,
int reg, int size, u32 val)
{
mace->pci.config_addr = mkaddr(bus, devfn, reg);
switch (size) {
case 1:
mace->pci.config_data.b[(reg & 3) ^ 3] = val;
break;
case 2:
mace->pci.config_data.w[((reg >> 1) & 1) ^ 1] = val;
break;
case 4:
mace->pci.config_data.l = val;
break;
}
DPRINTK("write%d: reg=%08x,val=%02x\n", size * 8, reg, val);
return PCIBIOS_SUCCESSFUL;
}
struct pci_ops mace_pci_ops = {
.read = mace_pci_read_config,
.write = mace_pci_write_config,
};
| linux-master | arch/mips/pci/ops-mace.c |
/*
* Copyright (C) 2008 Aurelien Jarno <[email protected]>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
* NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
* USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
* ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/types.h>
#include <linux/pci.h>
#include <linux/ssb/ssb.h>
#include <linux/bcma/bcma.h>
#include <bcm47xx.h>
int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
return 0;
}
#ifdef CONFIG_BCM47XX_SSB
static int bcm47xx_pcibios_plat_dev_init_ssb(struct pci_dev *dev)
{
int res;
u8 slot, pin;
res = ssb_pcibios_plat_dev_init(dev);
if (res < 0) {
pci_alert(dev, "PCI: Failed to init device\n");
return res;
}
pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin);
slot = PCI_SLOT(dev->devfn);
res = ssb_pcibios_map_irq(dev, slot, pin);
/* IRQ-0 and IRQ-1 are software interrupts. */
if (res < 2) {
pci_alert(dev, "PCI: Failed to map IRQ of device\n");
return res;
}
dev->irq = res;
return 0;
}
#endif
#ifdef CONFIG_BCM47XX_BCMA
static int bcm47xx_pcibios_plat_dev_init_bcma(struct pci_dev *dev)
{
int res;
res = bcma_core_pci_plat_dev_init(dev);
if (res < 0) {
pci_alert(dev, "PCI: Failed to init device\n");
return res;
}
res = bcma_core_pci_pcibios_map_irq(dev);
/* IRQ-0 and IRQ-1 are software interrupts. */
if (res < 2) {
pci_alert(dev, "PCI: Failed to map IRQ of device\n");
return res;
}
dev->irq = res;
return 0;
}
#endif
int pcibios_plat_dev_init(struct pci_dev *dev)
{
#ifdef CONFIG_BCM47XX_SSB
if (bcm47xx_bus_type == BCM47XX_BUS_TYPE_SSB)
return bcm47xx_pcibios_plat_dev_init_ssb(dev);
#endif
#ifdef CONFIG_BCM47XX_BCMA
if (bcm47xx_bus_type == BCM47XX_BUS_TYPE_BCMA)
return bcm47xx_pcibios_plat_dev_init_bcma(dev);
#endif
return 0;
}
| linux-master | arch/mips/pci/pci-bcm47xx.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Alchemy PCI host mode support.
*
* Copyright 2001-2003, 2007-2008 MontaVista Software Inc.
* Author: MontaVista Software, Inc. <[email protected]>
*
* Support for all devices (greater than 16) added by David Gathright.
*/
#include <linux/clk.h>
#include <linux/export.h>
#include <linux/types.h>
#include <linux/pci.h>
#include <linux/platform_device.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/syscore_ops.h>
#include <linux/vmalloc.h>
#include <linux/dma-map-ops.h> /* for dma_default_coherent */
#include <asm/mach-au1x00/au1000.h>
#include <asm/tlbmisc.h>
#ifdef CONFIG_PCI_DEBUG
#define DBG(x...) printk(KERN_DEBUG x)
#else
#define DBG(x...) do {} while (0)
#endif
#define PCI_ACCESS_READ 0
#define PCI_ACCESS_WRITE 1
struct alchemy_pci_context {
struct pci_controller alchemy_pci_ctrl; /* leave as first member! */
void __iomem *regs; /* ctrl base */
/* tools for wired entry for config space access */
unsigned long last_elo0;
unsigned long last_elo1;
int wired_entry;
struct vm_struct *pci_cfg_vm;
unsigned long pm[12];
int (*board_map_irq)(const struct pci_dev *d, u8 slot, u8 pin);
int (*board_pci_idsel)(unsigned int devsel, int assert);
};
/* for syscore_ops. There's only one PCI controller on Alchemy chips, so this
* should suffice for now.
*/
static struct alchemy_pci_context *__alchemy_pci_ctx;
/* IO/MEM resources for PCI. Keep the memres in sync with fixup_bigphys_addr
* in arch/mips/alchemy/common/setup.c
*/
static struct resource alchemy_pci_def_memres = {
.start = ALCHEMY_PCI_MEMWIN_START,
.end = ALCHEMY_PCI_MEMWIN_END,
.name = "PCI memory space",
.flags = IORESOURCE_MEM
};
static struct resource alchemy_pci_def_iores = {
.start = ALCHEMY_PCI_IOWIN_START,
.end = ALCHEMY_PCI_IOWIN_END,
.name = "PCI IO space",
.flags = IORESOURCE_IO
};
static void mod_wired_entry(int entry, unsigned long entrylo0,
unsigned long entrylo1, unsigned long entryhi,
unsigned long pagemask)
{
unsigned long old_pagemask;
unsigned long old_ctx;
/* Save old context and create impossible VPN2 value */
old_ctx = read_c0_entryhi() & MIPS_ENTRYHI_ASID;
old_pagemask = read_c0_pagemask();
write_c0_index(entry);
write_c0_pagemask(pagemask);
write_c0_entryhi(entryhi);
write_c0_entrylo0(entrylo0);
write_c0_entrylo1(entrylo1);
tlb_write_indexed();
write_c0_entryhi(old_ctx);
write_c0_pagemask(old_pagemask);
}
static void alchemy_pci_wired_entry(struct alchemy_pci_context *ctx)
{
ctx->wired_entry = read_c0_wired();
add_wired_entry(0, 0, (unsigned long)ctx->pci_cfg_vm->addr, PM_4K);
ctx->last_elo0 = ctx->last_elo1 = ~0;
}
static int config_access(unsigned char access_type, struct pci_bus *bus,
unsigned int dev_fn, unsigned char where, u32 *data)
{
struct alchemy_pci_context *ctx = bus->sysdata;
unsigned int device = PCI_SLOT(dev_fn);
unsigned int function = PCI_FUNC(dev_fn);
unsigned long offset, status, cfg_base, flags, entryLo0, entryLo1, r;
int error = PCIBIOS_SUCCESSFUL;
if (device > 19) {
*data = 0xffffffff;
return -1;
}
local_irq_save(flags);
r = __raw_readl(ctx->regs + PCI_REG_STATCMD) & 0x0000ffff;
r |= PCI_STATCMD_STATUS(0x2000);
__raw_writel(r, ctx->regs + PCI_REG_STATCMD);
wmb();
/* Allow board vendors to implement their own off-chip IDSEL.
* If it doesn't succeed, may as well bail out at this point.
*/
if (ctx->board_pci_idsel(device, 1) == 0) {
*data = 0xffffffff;
local_irq_restore(flags);
return -1;
}
/* Setup the config window */
if (bus->number == 0)
cfg_base = (1 << device) << 11;
else
cfg_base = 0x80000000 | (bus->number << 16) | (device << 11);
/* Setup the lower bits of the 36-bit address */
offset = (function << 8) | (where & ~0x3);
/* Pick up any address that falls below the page mask */
offset |= cfg_base & ~PAGE_MASK;
/* Page boundary */
cfg_base = cfg_base & PAGE_MASK;
/* To improve performance, if the current device is the same as
* the last device accessed, we don't touch the TLB.
*/
entryLo0 = (6 << 26) | (cfg_base >> 6) | (2 << 3) | 7;
entryLo1 = (6 << 26) | (cfg_base >> 6) | (0x1000 >> 6) | (2 << 3) | 7;
if ((entryLo0 != ctx->last_elo0) || (entryLo1 != ctx->last_elo1)) {
mod_wired_entry(ctx->wired_entry, entryLo0, entryLo1,
(unsigned long)ctx->pci_cfg_vm->addr, PM_4K);
ctx->last_elo0 = entryLo0;
ctx->last_elo1 = entryLo1;
}
if (access_type == PCI_ACCESS_WRITE)
__raw_writel(*data, ctx->pci_cfg_vm->addr + offset);
else
*data = __raw_readl(ctx->pci_cfg_vm->addr + offset);
wmb();
DBG("alchemy-pci: cfg access %d bus %u dev %u at %x dat %x conf %lx\n",
access_type, bus->number, device, where, *data, offset);
/* check for errors, master abort */
status = __raw_readl(ctx->regs + PCI_REG_STATCMD);
if (status & (1 << 29)) {
*data = 0xffffffff;
error = -1;
DBG("alchemy-pci: master abort on cfg access %d bus %d dev %d\n",
access_type, bus->number, device);
} else if ((status >> 28) & 0xf) {
DBG("alchemy-pci: PCI ERR detected: dev %d, status %lx\n",
device, (status >> 28) & 0xf);
/* clear errors */
__raw_writel(status & 0xf000ffff, ctx->regs + PCI_REG_STATCMD);
*data = 0xffffffff;
error = -1;
}
/* Take away the IDSEL. */
(void)ctx->board_pci_idsel(device, 0);
local_irq_restore(flags);
return error;
}
static int read_config_byte(struct pci_bus *bus, unsigned int devfn,
int where, u8 *val)
{
u32 data;
int ret = config_access(PCI_ACCESS_READ, bus, devfn, where, &data);
if (where & 1)
data >>= 8;
if (where & 2)
data >>= 16;
*val = data & 0xff;
return ret;
}
static int read_config_word(struct pci_bus *bus, unsigned int devfn,
int where, u16 *val)
{
u32 data;
int ret = config_access(PCI_ACCESS_READ, bus, devfn, where, &data);
if (where & 2)
data >>= 16;
*val = data & 0xffff;
return ret;
}
static int read_config_dword(struct pci_bus *bus, unsigned int devfn,
int where, u32 *val)
{
return config_access(PCI_ACCESS_READ, bus, devfn, where, val);
}
static int write_config_byte(struct pci_bus *bus, unsigned int devfn,
int where, u8 val)
{
u32 data = 0;
if (config_access(PCI_ACCESS_READ, bus, devfn, where, &data))
return -1;
data = (data & ~(0xff << ((where & 3) << 3))) |
(val << ((where & 3) << 3));
if (config_access(PCI_ACCESS_WRITE, bus, devfn, where, &data))
return -1;
return PCIBIOS_SUCCESSFUL;
}
static int write_config_word(struct pci_bus *bus, unsigned int devfn,
int where, u16 val)
{
u32 data = 0;
if (config_access(PCI_ACCESS_READ, bus, devfn, where, &data))
return -1;
data = (data & ~(0xffff << ((where & 3) << 3))) |
(val << ((where & 3) << 3));
if (config_access(PCI_ACCESS_WRITE, bus, devfn, where, &data))
return -1;
return PCIBIOS_SUCCESSFUL;
}
static int write_config_dword(struct pci_bus *bus, unsigned int devfn,
int where, u32 val)
{
return config_access(PCI_ACCESS_WRITE, bus, devfn, where, &val);
}
static int alchemy_pci_read(struct pci_bus *bus, unsigned int devfn,
int where, int size, u32 *val)
{
switch (size) {
case 1: {
u8 _val;
int rc = read_config_byte(bus, devfn, where, &_val);
*val = _val;
return rc;
}
case 2: {
u16 _val;
int rc = read_config_word(bus, devfn, where, &_val);
*val = _val;
return rc;
}
default:
return read_config_dword(bus, devfn, where, val);
}
}
static int alchemy_pci_write(struct pci_bus *bus, unsigned int devfn,
int where, int size, u32 val)
{
switch (size) {
case 1:
return write_config_byte(bus, devfn, where, (u8) val);
case 2:
return write_config_word(bus, devfn, where, (u16) val);
default:
return write_config_dword(bus, devfn, where, val);
}
}
static struct pci_ops alchemy_pci_ops = {
.read = alchemy_pci_read,
.write = alchemy_pci_write,
};
static int alchemy_pci_def_idsel(unsigned int devsel, int assert)
{
return 1; /* success */
}
/* save PCI controller register contents. */
static int alchemy_pci_suspend(void)
{
struct alchemy_pci_context *ctx = __alchemy_pci_ctx;
if (!ctx)
return 0;
ctx->pm[0] = __raw_readl(ctx->regs + PCI_REG_CMEM);
ctx->pm[1] = __raw_readl(ctx->regs + PCI_REG_CONFIG) & 0x0009ffff;
ctx->pm[2] = __raw_readl(ctx->regs + PCI_REG_B2BMASK_CCH);
ctx->pm[3] = __raw_readl(ctx->regs + PCI_REG_B2BBASE0_VID);
ctx->pm[4] = __raw_readl(ctx->regs + PCI_REG_B2BBASE1_SID);
ctx->pm[5] = __raw_readl(ctx->regs + PCI_REG_MWMASK_DEV);
ctx->pm[6] = __raw_readl(ctx->regs + PCI_REG_MWBASE_REV_CCL);
ctx->pm[7] = __raw_readl(ctx->regs + PCI_REG_ID);
ctx->pm[8] = __raw_readl(ctx->regs + PCI_REG_CLASSREV);
ctx->pm[9] = __raw_readl(ctx->regs + PCI_REG_PARAM);
ctx->pm[10] = __raw_readl(ctx->regs + PCI_REG_MBAR);
ctx->pm[11] = __raw_readl(ctx->regs + PCI_REG_TIMEOUT);
return 0;
}
static void alchemy_pci_resume(void)
{
struct alchemy_pci_context *ctx = __alchemy_pci_ctx;
if (!ctx)
return;
__raw_writel(ctx->pm[0], ctx->regs + PCI_REG_CMEM);
__raw_writel(ctx->pm[2], ctx->regs + PCI_REG_B2BMASK_CCH);
__raw_writel(ctx->pm[3], ctx->regs + PCI_REG_B2BBASE0_VID);
__raw_writel(ctx->pm[4], ctx->regs + PCI_REG_B2BBASE1_SID);
__raw_writel(ctx->pm[5], ctx->regs + PCI_REG_MWMASK_DEV);
__raw_writel(ctx->pm[6], ctx->regs + PCI_REG_MWBASE_REV_CCL);
__raw_writel(ctx->pm[7], ctx->regs + PCI_REG_ID);
__raw_writel(ctx->pm[8], ctx->regs + PCI_REG_CLASSREV);
__raw_writel(ctx->pm[9], ctx->regs + PCI_REG_PARAM);
__raw_writel(ctx->pm[10], ctx->regs + PCI_REG_MBAR);
__raw_writel(ctx->pm[11], ctx->regs + PCI_REG_TIMEOUT);
wmb();
__raw_writel(ctx->pm[1], ctx->regs + PCI_REG_CONFIG);
wmb();
/* YAMON on all db1xxx boards wipes the TLB and writes zero to C0_wired
* on resume, making it necessary to recreate it as soon as possible.
*/
ctx->wired_entry = 8191; /* impossibly high value */
alchemy_pci_wired_entry(ctx); /* install it */
}
static struct syscore_ops alchemy_pci_pmops = {
.suspend = alchemy_pci_suspend,
.resume = alchemy_pci_resume,
};
static int alchemy_pci_probe(struct platform_device *pdev)
{
struct alchemy_pci_platdata *pd = pdev->dev.platform_data;
struct alchemy_pci_context *ctx;
void __iomem *virt_io;
unsigned long val;
struct resource *r;
struct clk *c;
int ret;
/* need at least PCI IRQ mapping table */
if (!pd) {
dev_err(&pdev->dev, "need platform data for PCI setup\n");
ret = -ENODEV;
goto out;
}
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
if (!ctx) {
dev_err(&pdev->dev, "no memory for pcictl context\n");
ret = -ENOMEM;
goto out;
}
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!r) {
dev_err(&pdev->dev, "no pcictl ctrl regs resource\n");
ret = -ENODEV;
goto out1;
}
if (!request_mem_region(r->start, resource_size(r), pdev->name)) {
dev_err(&pdev->dev, "cannot claim pci regs\n");
ret = -ENODEV;
goto out1;
}
c = clk_get(&pdev->dev, "pci_clko");
if (IS_ERR(c)) {
dev_err(&pdev->dev, "unable to find PCI clock\n");
ret = PTR_ERR(c);
goto out2;
}
ret = clk_prepare_enable(c);
if (ret) {
dev_err(&pdev->dev, "cannot enable PCI clock\n");
goto out6;
}
ctx->regs = ioremap(r->start, resource_size(r));
if (!ctx->regs) {
dev_err(&pdev->dev, "cannot map pci regs\n");
ret = -ENODEV;
goto out5;
}
/* map parts of the PCI IO area */
/* REVISIT: if this changes with a newer variant (doubt it) make this
* a platform resource.
*/
virt_io = ioremap(AU1500_PCI_IO_PHYS_ADDR, 0x00100000);
if (!virt_io) {
dev_err(&pdev->dev, "cannot remap pci io space\n");
ret = -ENODEV;
goto out3;
}
ctx->alchemy_pci_ctrl.io_map_base = (unsigned long)virt_io;
/* Au1500 revisions older than AD have borked coherent PCI */
if (alchemy_get_cputype() == ALCHEMY_CPU_AU1500 &&
read_c0_prid() < 0x01030202 && !dma_default_coherent) {
val = __raw_readl(ctx->regs + PCI_REG_CONFIG);
val |= PCI_CONFIG_NC;
__raw_writel(val, ctx->regs + PCI_REG_CONFIG);
wmb();
dev_info(&pdev->dev, "non-coherent PCI on Au1500 AA/AB/AC\n");
}
if (pd->board_map_irq)
ctx->board_map_irq = pd->board_map_irq;
if (pd->board_pci_idsel)
ctx->board_pci_idsel = pd->board_pci_idsel;
else
ctx->board_pci_idsel = alchemy_pci_def_idsel;
/* fill in relevant pci_controller members */
ctx->alchemy_pci_ctrl.pci_ops = &alchemy_pci_ops;
ctx->alchemy_pci_ctrl.mem_resource = &alchemy_pci_def_memres;
ctx->alchemy_pci_ctrl.io_resource = &alchemy_pci_def_iores;
/* we can't ioremap the entire pci config space because it's too large,
* nor can we dynamically ioremap it because some drivers use the
* PCI config routines from within atomic contex and that becomes a
* problem in get_vm_area(). Instead we use one wired TLB entry to
* handle all config accesses for all busses.
*/
ctx->pci_cfg_vm = get_vm_area(0x2000, VM_IOREMAP);
if (!ctx->pci_cfg_vm) {
dev_err(&pdev->dev, "unable to get vm area\n");
ret = -ENOMEM;
goto out4;
}
ctx->wired_entry = 8191; /* impossibly high value */
alchemy_pci_wired_entry(ctx); /* install it */
set_io_port_base((unsigned long)ctx->alchemy_pci_ctrl.io_map_base);
/* board may want to modify bits in the config register, do it now */
val = __raw_readl(ctx->regs + PCI_REG_CONFIG);
val &= ~pd->pci_cfg_clr;
val |= pd->pci_cfg_set;
val &= ~PCI_CONFIG_PD; /* clear disable bit */
__raw_writel(val, ctx->regs + PCI_REG_CONFIG);
wmb();
__alchemy_pci_ctx = ctx;
platform_set_drvdata(pdev, ctx);
register_syscore_ops(&alchemy_pci_pmops);
register_pci_controller(&ctx->alchemy_pci_ctrl);
dev_info(&pdev->dev, "PCI controller at %ld MHz\n",
clk_get_rate(c) / 1000000);
return 0;
out4:
iounmap(virt_io);
out3:
iounmap(ctx->regs);
out5:
clk_disable_unprepare(c);
out6:
clk_put(c);
out2:
release_mem_region(r->start, resource_size(r));
out1:
kfree(ctx);
out:
return ret;
}
static struct platform_driver alchemy_pcictl_driver = {
.probe = alchemy_pci_probe,
.driver = {
.name = "alchemy-pci",
},
};
static int __init alchemy_pci_init(void)
{
/* Au1500/Au1550 have PCI */
switch (alchemy_get_cputype()) {
case ALCHEMY_CPU_AU1500:
case ALCHEMY_CPU_AU1550:
return platform_driver_register(&alchemy_pcictl_driver);
}
return 0;
}
arch_initcall(alchemy_pci_init);
int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
struct alchemy_pci_context *ctx = dev->sysdata;
if (ctx && ctx->board_map_irq)
return ctx->board_map_irq(dev, slot, pin);
return -1;
}
int pcibios_plat_dev_init(struct pci_dev *dev)
{
return 0;
}
| linux-master | arch/mips/pci/pci-alchemy.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 1999, 2000, 2004 MIPS Technologies, Inc.
* All rights reserved.
* Authors: Carsten Langgaard <[email protected]>
* Maciej W. Rozycki <[email protected]>
*
* MIPS boards specific PCI support.
*/
#include <linux/types.h>
#include <linux/pci.h>
#include <linux/kernel.h>
#include <asm/mips-boards/bonito64.h>
#define PCI_ACCESS_READ 0
#define PCI_ACCESS_WRITE 1
#define CFG_SPACE_REG(offset) (void *)CKSEG1ADDR(_pcictrl_bonito_pcicfg + (offset))
#define ID_SEL_BEGIN 10
#define MAX_DEV_NUM (31 - ID_SEL_BEGIN)
static int bonito64_pcibios_config_access(unsigned char access_type,
struct pci_bus *bus,
unsigned int devfn, int where,
u32 * data)
{
u32 busnum = bus->number;
u32 addr, type;
u32 dummy;
void *addrp;
int device = PCI_SLOT(devfn);
int function = PCI_FUNC(devfn);
int reg = where & ~3;
if (busnum == 0) {
/* Type 0 configuration for onboard PCI bus */
if (device > MAX_DEV_NUM)
return -1;
addr = (1 << (device + ID_SEL_BEGIN)) | (function << 8) | reg;
type = 0;
} else {
/* Type 1 configuration for offboard PCI bus */
addr = (busnum << 16) | (device << 11) | (function << 8) | reg;
type = 0x10000;
}
/* Clear aborts */
BONITO_PCICMD |= BONITO_PCICMD_MABORT_CLR | BONITO_PCICMD_MTABORT_CLR;
BONITO_PCIMAP_CFG = (addr >> 16) | type;
/* Flush Bonito register block */
dummy = BONITO_PCIMAP_CFG;
mmiowb();
addrp = CFG_SPACE_REG(addr & 0xffff);
if (access_type == PCI_ACCESS_WRITE) {
writel(cpu_to_le32(*data), addrp);
/* Wait till done */
while (BONITO_PCIMSTAT & 0xF);
} else {
*data = le32_to_cpu(readl(addrp));
}
/* Detect Master/Target abort */
if (BONITO_PCICMD & (BONITO_PCICMD_MABORT_CLR |
BONITO_PCICMD_MTABORT_CLR)) {
/* Error occurred */
/* Clear bits */
BONITO_PCICMD |= (BONITO_PCICMD_MABORT_CLR |
BONITO_PCICMD_MTABORT_CLR);
return -1;
}
return 0;
}
/*
* We can't address 8 and 16 bit words directly. Instead we have to
* read/write a 32bit word and mask/modify the data we actually want.
*/
static int bonito64_pcibios_read(struct pci_bus *bus, unsigned int devfn,
int where, int size, u32 * val)
{
u32 data = 0;
if ((size == 2) && (where & 1))
return PCIBIOS_BAD_REGISTER_NUMBER;
else if ((size == 4) && (where & 3))
return PCIBIOS_BAD_REGISTER_NUMBER;
if (bonito64_pcibios_config_access(PCI_ACCESS_READ, bus, devfn, where,
&data))
return -1;
if (size == 1)
*val = (data >> ((where & 3) << 3)) & 0xff;
else if (size == 2)
*val = (data >> ((where & 3) << 3)) & 0xffff;
else
*val = data;
return PCIBIOS_SUCCESSFUL;
}
static int bonito64_pcibios_write(struct pci_bus *bus, unsigned int devfn,
int where, int size, u32 val)
{
u32 data = 0;
if ((size == 2) && (where & 1))
return PCIBIOS_BAD_REGISTER_NUMBER;
else if ((size == 4) && (where & 3))
return PCIBIOS_BAD_REGISTER_NUMBER;
if (size == 4)
data = val;
else {
if (bonito64_pcibios_config_access(PCI_ACCESS_READ, bus, devfn,
where, &data))
return -1;
if (size == 1)
data = (data & ~(0xff << ((where & 3) << 3))) |
(val << ((where & 3) << 3));
else if (size == 2)
data = (data & ~(0xffff << ((where & 3) << 3))) |
(val << ((where & 3) << 3));
}
if (bonito64_pcibios_config_access(PCI_ACCESS_WRITE, bus, devfn, where,
&data))
return -1;
return PCIBIOS_SUCCESSFUL;
}
struct pci_ops bonito64_pci_ops = {
.read = bonito64_pcibios_read,
.write = bonito64_pcibios_write
};
| linux-master | arch/mips/pci/ops-bonito64.c |
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* SNI specific PCI support for RM200/RM300.
*
* Copyright (C) 1997 - 2000, 2003 Ralf Baechle <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/types.h>
#include <asm/sni.h>
/*
* It seems that on the RM200 only lower 3 bits of the 5 bit PCI device
* address are decoded. We therefore manually have to reject attempts at
* reading outside this range. Being on the paranoid side we only do this
* test for bus 0 and hope forwarding and decoding work properly for any
* subordinated busses.
*
* ASIC PCI only supports type 1 config cycles.
*/
static int set_config_address(unsigned int busno, unsigned int devfn, int reg)
{
if ((devfn > 255) || (reg > 255))
return PCIBIOS_BAD_REGISTER_NUMBER;
if (busno == 0 && devfn >= PCI_DEVFN(8, 0))
return PCIBIOS_DEVICE_NOT_FOUND;
*(volatile u32 *)PCIMT_CONFIG_ADDRESS =
((busno & 0xff) << 16) |
((devfn & 0xff) << 8) |
(reg & 0xfc);
return PCIBIOS_SUCCESSFUL;
}
static int pcimt_read(struct pci_bus *bus, unsigned int devfn, int reg,
int size, u32 * val)
{
int res;
if ((res = set_config_address(bus->number, devfn, reg)))
return res;
switch (size) {
case 1:
*val = inb(PCIMT_CONFIG_DATA + (reg & 3));
break;
case 2:
*val = inw(PCIMT_CONFIG_DATA + (reg & 2));
break;
case 4:
*val = inl(PCIMT_CONFIG_DATA);
break;
}
return 0;
}
static int pcimt_write(struct pci_bus *bus, unsigned int devfn, int reg,
int size, u32 val)
{
int res;
if ((res = set_config_address(bus->number, devfn, reg)))
return res;
switch (size) {
case 1:
outb(val, PCIMT_CONFIG_DATA + (reg & 3));
break;
case 2:
outw(val, PCIMT_CONFIG_DATA + (reg & 2));
break;
case 4:
outl(val, PCIMT_CONFIG_DATA);
break;
}
return 0;
}
struct pci_ops sni_pcimt_ops = {
.read = pcimt_read,
.write = pcimt_write,
};
static int pcit_set_config_address(unsigned int busno, unsigned int devfn, int reg)
{
if ((devfn > 255) || (reg > 255) || (busno > 255))
return PCIBIOS_BAD_REGISTER_NUMBER;
outl((1 << 31) | ((busno & 0xff) << 16) | ((devfn & 0xff) << 8) | (reg & 0xfc), 0xcf8);
return PCIBIOS_SUCCESSFUL;
}
static int pcit_read(struct pci_bus *bus, unsigned int devfn, int reg,
int size, u32 * val)
{
int res;
/*
* on bus 0 we need to check, whether there is a device answering
* for the devfn by doing a config write and checking the result. If
* we don't do it, we will get a data bus error
*/
if (bus->number == 0) {
pcit_set_config_address(0, 0, 0x68);
outl(inl(0xcfc) | 0xc0000000, 0xcfc);
if ((res = pcit_set_config_address(0, devfn, 0)))
return res;
outl(0xffffffff, 0xcfc);
pcit_set_config_address(0, 0, 0x68);
if (inl(0xcfc) & 0x100000)
return PCIBIOS_DEVICE_NOT_FOUND;
}
if ((res = pcit_set_config_address(bus->number, devfn, reg)))
return res;
switch (size) {
case 1:
*val = inb(PCIMT_CONFIG_DATA + (reg & 3));
break;
case 2:
*val = inw(PCIMT_CONFIG_DATA + (reg & 2));
break;
case 4:
*val = inl(PCIMT_CONFIG_DATA);
break;
}
return 0;
}
static int pcit_write(struct pci_bus *bus, unsigned int devfn, int reg,
int size, u32 val)
{
int res;
if ((res = pcit_set_config_address(bus->number, devfn, reg)))
return res;
switch (size) {
case 1:
outb(val, PCIMT_CONFIG_DATA + (reg & 3));
break;
case 2:
outw(val, PCIMT_CONFIG_DATA + (reg & 2));
break;
case 4:
outl(val, PCIMT_CONFIG_DATA);
break;
}
return 0;
}
struct pci_ops sni_pcit_ops = {
.read = pcit_read,
.write = pcit_write,
};
| linux-master | arch/mips/pci/ops-sni.c |
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2005-2009, 2010 Cavium Networks
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/msi.h>
#include <linux/spinlock.h>
#include <linux/interrupt.h>
#include <asm/octeon/octeon.h>
#include <asm/octeon/cvmx-npi-defs.h>
#include <asm/octeon/cvmx-pci-defs.h>
#include <asm/octeon/cvmx-npei-defs.h>
#include <asm/octeon/cvmx-sli-defs.h>
#include <asm/octeon/cvmx-pexp-defs.h>
#include <asm/octeon/pci-octeon.h>
/*
* Each bit in msi_free_irq_bitmask represents a MSI interrupt that is
* in use.
*/
static u64 msi_free_irq_bitmask[4];
/*
* Each bit in msi_multiple_irq_bitmask tells that the device using
* this bit in msi_free_irq_bitmask is also using the next bit. This
* is used so we can disable all of the MSI interrupts when a device
* uses multiple.
*/
static u64 msi_multiple_irq_bitmask[4];
/*
* This lock controls updates to msi_free_irq_bitmask and
* msi_multiple_irq_bitmask.
*/
static DEFINE_SPINLOCK(msi_free_irq_bitmask_lock);
/*
* Number of MSI IRQs used. This variable is set up in
* the module init time.
*/
static int msi_irq_size;
/**
* arch_setup_msi_irq() - setup MSI IRQs for a device
* @dev: Device requesting MSI interrupts
* @desc: MSI descriptor
*
* Called when a driver requests MSI interrupts instead of the
* legacy INT A-D. This routine will allocate multiple interrupts
* for MSI devices that support them. A device can override this by
* programming the MSI control bits [6:4] before calling
* pci_enable_msi().
*
* Return: %0 on success, non-%0 on error.
*/
int arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc)
{
struct msi_msg msg;
u16 control;
int configured_private_bits;
int request_private_bits;
int irq = 0;
int irq_step;
u64 search_mask;
int index;
if (desc->pci.msi_attrib.is_msix)
return -EINVAL;
/*
* Read the MSI config to figure out how many IRQs this device
* wants. Most devices only want 1, which will give
* configured_private_bits and request_private_bits equal 0.
*/
pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &control);
/*
* If the number of private bits has been configured then use
* that value instead of the requested number. This gives the
* driver the chance to override the number of interrupts
* before calling pci_enable_msi().
*/
configured_private_bits = (control & PCI_MSI_FLAGS_QSIZE) >> 4;
if (configured_private_bits == 0) {
/* Nothing is configured, so use the hardware requested size */
request_private_bits = (control & PCI_MSI_FLAGS_QMASK) >> 1;
} else {
/*
* Use the number of configured bits, assuming the
* driver wanted to override the hardware request
* value.
*/
request_private_bits = configured_private_bits;
}
/*
* The PCI 2.3 spec mandates that there are at most 32
* interrupts. If this device asks for more, only give it one.
*/
if (request_private_bits > 5)
request_private_bits = 0;
try_only_one:
/*
* The IRQs have to be aligned on a power of two based on the
* number being requested.
*/
irq_step = 1 << request_private_bits;
/* Mask with one bit for each IRQ */
search_mask = (1 << irq_step) - 1;
/*
* We're going to search msi_free_irq_bitmask_lock for zero
* bits. This represents an MSI interrupt number that isn't in
* use.
*/
spin_lock(&msi_free_irq_bitmask_lock);
for (index = 0; index < msi_irq_size/64; index++) {
for (irq = 0; irq < 64; irq += irq_step) {
if ((msi_free_irq_bitmask[index] & (search_mask << irq)) == 0) {
msi_free_irq_bitmask[index] |= search_mask << irq;
msi_multiple_irq_bitmask[index] |= (search_mask >> 1) << irq;
goto msi_irq_allocated;
}
}
}
msi_irq_allocated:
spin_unlock(&msi_free_irq_bitmask_lock);
/* Make sure the search for available interrupts didn't fail */
if (irq >= 64) {
if (request_private_bits) {
pr_err("arch_setup_msi_irq: Unable to find %d free interrupts, trying just one",
1 << request_private_bits);
request_private_bits = 0;
goto try_only_one;
} else
panic("arch_setup_msi_irq: Unable to find a free MSI interrupt");
}
/* MSI interrupts start at logical IRQ OCTEON_IRQ_MSI_BIT0 */
irq += index*64;
irq += OCTEON_IRQ_MSI_BIT0;
switch (octeon_dma_bar_type) {
case OCTEON_DMA_BAR_TYPE_SMALL:
/* When not using big bar, Bar 0 is based at 128MB */
msg.address_lo =
((128ul << 20) + CVMX_PCI_MSI_RCV) & 0xffffffff;
msg.address_hi = ((128ul << 20) + CVMX_PCI_MSI_RCV) >> 32;
break;
case OCTEON_DMA_BAR_TYPE_BIG:
/* When using big bar, Bar 0 is based at 0 */
msg.address_lo = (0 + CVMX_PCI_MSI_RCV) & 0xffffffff;
msg.address_hi = (0 + CVMX_PCI_MSI_RCV) >> 32;
break;
case OCTEON_DMA_BAR_TYPE_PCIE:
/* When using PCIe, Bar 0 is based at 0 */
/* FIXME CVMX_NPEI_MSI_RCV* other than 0? */
msg.address_lo = (0 + CVMX_NPEI_PCIE_MSI_RCV) & 0xffffffff;
msg.address_hi = (0 + CVMX_NPEI_PCIE_MSI_RCV) >> 32;
break;
case OCTEON_DMA_BAR_TYPE_PCIE2:
/* When using PCIe2, Bar 0 is based at 0 */
msg.address_lo = (0 + CVMX_SLI_PCIE_MSI_RCV) & 0xffffffff;
msg.address_hi = (0 + CVMX_SLI_PCIE_MSI_RCV) >> 32;
break;
default:
panic("arch_setup_msi_irq: Invalid octeon_dma_bar_type");
}
msg.data = irq - OCTEON_IRQ_MSI_BIT0;
/* Update the number of IRQs the device has available to it */
control &= ~PCI_MSI_FLAGS_QSIZE;
control |= request_private_bits << 4;
pci_write_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, control);
irq_set_msi_desc(irq, desc);
pci_write_msi_msg(irq, &msg);
return 0;
}
/**
* arch_teardown_msi_irq() - release MSI IRQs for a device
* @irq: The devices first irq number. There may be multiple in sequence.
*
* Called when a device no longer needs its MSI interrupts. All
* MSI interrupts for the device are freed.
*/
void arch_teardown_msi_irq(unsigned int irq)
{
int number_irqs;
u64 bitmask;
int index = 0;
int irq0;
if ((irq < OCTEON_IRQ_MSI_BIT0)
|| (irq > msi_irq_size + OCTEON_IRQ_MSI_BIT0))
panic("arch_teardown_msi_irq: Attempted to teardown illegal "
"MSI interrupt (%d)", irq);
irq -= OCTEON_IRQ_MSI_BIT0;
index = irq / 64;
irq0 = irq % 64;
/*
* Count the number of IRQs we need to free by looking at the
* msi_multiple_irq_bitmask. Each bit set means that the next
* IRQ is also owned by this device.
*/
number_irqs = 0;
while ((irq0 + number_irqs < 64) &&
(msi_multiple_irq_bitmask[index]
& (1ull << (irq0 + number_irqs))))
number_irqs++;
number_irqs++;
/* Mask with one bit for each IRQ */
bitmask = (1 << number_irqs) - 1;
/* Shift the mask to the correct bit location */
bitmask <<= irq0;
if ((msi_free_irq_bitmask[index] & bitmask) != bitmask)
panic("arch_teardown_msi_irq: Attempted to teardown MSI "
"interrupt (%d) not in use", irq);
/* Checks are done, update the in use bitmask */
spin_lock(&msi_free_irq_bitmask_lock);
msi_free_irq_bitmask[index] &= ~bitmask;
msi_multiple_irq_bitmask[index] &= ~bitmask;
spin_unlock(&msi_free_irq_bitmask_lock);
}
static DEFINE_RAW_SPINLOCK(octeon_irq_msi_lock);
static u64 msi_rcv_reg[4];
static u64 mis_ena_reg[4];
static void octeon_irq_msi_enable_pcie(struct irq_data *data)
{
u64 en;
unsigned long flags;
int msi_number = data->irq - OCTEON_IRQ_MSI_BIT0;
int irq_index = msi_number >> 6;
int irq_bit = msi_number & 0x3f;
raw_spin_lock_irqsave(&octeon_irq_msi_lock, flags);
en = cvmx_read_csr(mis_ena_reg[irq_index]);
en |= 1ull << irq_bit;
cvmx_write_csr(mis_ena_reg[irq_index], en);
cvmx_read_csr(mis_ena_reg[irq_index]);
raw_spin_unlock_irqrestore(&octeon_irq_msi_lock, flags);
}
static void octeon_irq_msi_disable_pcie(struct irq_data *data)
{
u64 en;
unsigned long flags;
int msi_number = data->irq - OCTEON_IRQ_MSI_BIT0;
int irq_index = msi_number >> 6;
int irq_bit = msi_number & 0x3f;
raw_spin_lock_irqsave(&octeon_irq_msi_lock, flags);
en = cvmx_read_csr(mis_ena_reg[irq_index]);
en &= ~(1ull << irq_bit);
cvmx_write_csr(mis_ena_reg[irq_index], en);
cvmx_read_csr(mis_ena_reg[irq_index]);
raw_spin_unlock_irqrestore(&octeon_irq_msi_lock, flags);
}
static struct irq_chip octeon_irq_chip_msi_pcie = {
.name = "MSI",
.irq_enable = octeon_irq_msi_enable_pcie,
.irq_disable = octeon_irq_msi_disable_pcie,
};
static void octeon_irq_msi_enable_pci(struct irq_data *data)
{
/*
* Octeon PCI doesn't have the ability to mask/unmask MSI
* interrupts individually. Instead of masking/unmasking them
* in groups of 16, we simple assume MSI devices are well
* behaved. MSI interrupts are always enable and the ACK is
* assumed to be enough
*/
}
static void octeon_irq_msi_disable_pci(struct irq_data *data)
{
/* See comment in enable */
}
static struct irq_chip octeon_irq_chip_msi_pci = {
.name = "MSI",
.irq_enable = octeon_irq_msi_enable_pci,
.irq_disable = octeon_irq_msi_disable_pci,
};
/*
* Called by the interrupt handling code when an MSI interrupt
* occurs.
*/
static irqreturn_t __octeon_msi_do_interrupt(int index, u64 msi_bits)
{
int irq;
int bit;
bit = fls64(msi_bits);
if (bit) {
bit--;
/* Acknowledge it first. */
cvmx_write_csr(msi_rcv_reg[index], 1ull << bit);
irq = bit + OCTEON_IRQ_MSI_BIT0 + 64 * index;
do_IRQ(irq);
return IRQ_HANDLED;
}
return IRQ_NONE;
}
#define OCTEON_MSI_INT_HANDLER_X(x) \
static irqreturn_t octeon_msi_interrupt##x(int cpl, void *dev_id) \
{ \
u64 msi_bits = cvmx_read_csr(msi_rcv_reg[(x)]); \
return __octeon_msi_do_interrupt((x), msi_bits); \
}
/*
* Create octeon_msi_interrupt{0-3} function body
*/
OCTEON_MSI_INT_HANDLER_X(0);
OCTEON_MSI_INT_HANDLER_X(1);
OCTEON_MSI_INT_HANDLER_X(2);
OCTEON_MSI_INT_HANDLER_X(3);
/*
* Initializes the MSI interrupt handling code
*/
int __init octeon_msi_initialize(void)
{
int irq;
struct irq_chip *msi;
if (octeon_dma_bar_type == OCTEON_DMA_BAR_TYPE_INVALID) {
return 0;
} else if (octeon_dma_bar_type == OCTEON_DMA_BAR_TYPE_PCIE) {
msi_rcv_reg[0] = CVMX_PEXP_NPEI_MSI_RCV0;
msi_rcv_reg[1] = CVMX_PEXP_NPEI_MSI_RCV1;
msi_rcv_reg[2] = CVMX_PEXP_NPEI_MSI_RCV2;
msi_rcv_reg[3] = CVMX_PEXP_NPEI_MSI_RCV3;
mis_ena_reg[0] = CVMX_PEXP_NPEI_MSI_ENB0;
mis_ena_reg[1] = CVMX_PEXP_NPEI_MSI_ENB1;
mis_ena_reg[2] = CVMX_PEXP_NPEI_MSI_ENB2;
mis_ena_reg[3] = CVMX_PEXP_NPEI_MSI_ENB3;
msi = &octeon_irq_chip_msi_pcie;
} else {
msi_rcv_reg[0] = CVMX_NPI_NPI_MSI_RCV;
#define INVALID_GENERATE_ADE 0x8700000000000000ULL;
msi_rcv_reg[1] = INVALID_GENERATE_ADE;
msi_rcv_reg[2] = INVALID_GENERATE_ADE;
msi_rcv_reg[3] = INVALID_GENERATE_ADE;
mis_ena_reg[0] = INVALID_GENERATE_ADE;
mis_ena_reg[1] = INVALID_GENERATE_ADE;
mis_ena_reg[2] = INVALID_GENERATE_ADE;
mis_ena_reg[3] = INVALID_GENERATE_ADE;
msi = &octeon_irq_chip_msi_pci;
}
for (irq = OCTEON_IRQ_MSI_BIT0; irq <= OCTEON_IRQ_MSI_LAST; irq++)
irq_set_chip_and_handler(irq, msi, handle_simple_irq);
if (octeon_has_feature(OCTEON_FEATURE_PCIE)) {
if (request_irq(OCTEON_IRQ_PCI_MSI0, octeon_msi_interrupt0,
0, "MSI[0:63]", octeon_msi_interrupt0))
panic("request_irq(OCTEON_IRQ_PCI_MSI0) failed");
if (request_irq(OCTEON_IRQ_PCI_MSI1, octeon_msi_interrupt1,
0, "MSI[64:127]", octeon_msi_interrupt1))
panic("request_irq(OCTEON_IRQ_PCI_MSI1) failed");
if (request_irq(OCTEON_IRQ_PCI_MSI2, octeon_msi_interrupt2,
0, "MSI[127:191]", octeon_msi_interrupt2))
panic("request_irq(OCTEON_IRQ_PCI_MSI2) failed");
if (request_irq(OCTEON_IRQ_PCI_MSI3, octeon_msi_interrupt3,
0, "MSI[192:255]", octeon_msi_interrupt3))
panic("request_irq(OCTEON_IRQ_PCI_MSI3) failed");
msi_irq_size = 256;
} else if (octeon_is_pci_host()) {
if (request_irq(OCTEON_IRQ_PCI_MSI0, octeon_msi_interrupt0,
0, "MSI[0:15]", octeon_msi_interrupt0))
panic("request_irq(OCTEON_IRQ_PCI_MSI0) failed");
if (request_irq(OCTEON_IRQ_PCI_MSI1, octeon_msi_interrupt0,
0, "MSI[16:31]", octeon_msi_interrupt0))
panic("request_irq(OCTEON_IRQ_PCI_MSI1) failed");
if (request_irq(OCTEON_IRQ_PCI_MSI2, octeon_msi_interrupt0,
0, "MSI[32:47]", octeon_msi_interrupt0))
panic("request_irq(OCTEON_IRQ_PCI_MSI2) failed");
if (request_irq(OCTEON_IRQ_PCI_MSI3, octeon_msi_interrupt0,
0, "MSI[48:63]", octeon_msi_interrupt0))
panic("request_irq(OCTEON_IRQ_PCI_MSI3) failed");
msi_irq_size = 64;
}
return 0;
}
subsys_initcall(octeon_msi_initialize);
| linux-master | arch/mips/pci/msi-octeon.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Ralink RT3662/RT3883 SoC PCI support
*
* Copyright (C) 2011-2013 Gabor Juhos <[email protected]>
*
* Parts of this file are based on Ralink's 2.6.21 BSP
*/
#include <linux/types.h>
#include <linux/pci.h>
#include <linux/io.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/irqdomain.h>
#include <linux/of.h>
#include <linux/of_irq.h>
#include <linux/of_pci.h>
#include <linux/platform_device.h>
#include <asm/mach-ralink/rt3883.h>
#include <asm/mach-ralink/ralink_regs.h>
#define RT3883_MEMORY_BASE 0x00000000
#define RT3883_MEMORY_SIZE 0x02000000
#define RT3883_PCI_REG_PCICFG 0x00
#define RT3883_PCICFG_P2P_BR_DEVNUM_M 0xf
#define RT3883_PCICFG_P2P_BR_DEVNUM_S 16
#define RT3883_PCICFG_PCIRST BIT(1)
#define RT3883_PCI_REG_PCIRAW 0x04
#define RT3883_PCI_REG_PCIINT 0x08
#define RT3883_PCI_REG_PCIENA 0x0c
#define RT3883_PCI_REG_CFGADDR 0x20
#define RT3883_PCI_REG_CFGDATA 0x24
#define RT3883_PCI_REG_MEMBASE 0x28
#define RT3883_PCI_REG_IOBASE 0x2c
#define RT3883_PCI_REG_ARBCTL 0x80
#define RT3883_PCI_REG_BASE(_x) (0x1000 + (_x) * 0x1000)
#define RT3883_PCI_REG_BAR0SETUP(_x) (RT3883_PCI_REG_BASE((_x)) + 0x10)
#define RT3883_PCI_REG_IMBASEBAR0(_x) (RT3883_PCI_REG_BASE((_x)) + 0x18)
#define RT3883_PCI_REG_ID(_x) (RT3883_PCI_REG_BASE((_x)) + 0x30)
#define RT3883_PCI_REG_CLASS(_x) (RT3883_PCI_REG_BASE((_x)) + 0x34)
#define RT3883_PCI_REG_SUBID(_x) (RT3883_PCI_REG_BASE((_x)) + 0x38)
#define RT3883_PCI_REG_STATUS(_x) (RT3883_PCI_REG_BASE((_x)) + 0x50)
#define RT3883_PCI_MODE_NONE 0
#define RT3883_PCI_MODE_PCI BIT(0)
#define RT3883_PCI_MODE_PCIE BIT(1)
#define RT3883_PCI_MODE_BOTH (RT3883_PCI_MODE_PCI | RT3883_PCI_MODE_PCIE)
#define RT3883_PCI_IRQ_COUNT 32
#define RT3883_P2P_BR_DEVNUM 1
struct rt3883_pci_controller {
void __iomem *base;
struct device_node *intc_of_node;
struct irq_domain *irq_domain;
struct pci_controller pci_controller;
struct resource io_res;
struct resource mem_res;
bool pcie_ready;
};
static inline struct rt3883_pci_controller *
pci_bus_to_rt3883_controller(struct pci_bus *bus)
{
struct pci_controller *hose;
hose = (struct pci_controller *) bus->sysdata;
return container_of(hose, struct rt3883_pci_controller, pci_controller);
}
static inline u32 rt3883_pci_r32(struct rt3883_pci_controller *rpc,
unsigned reg)
{
return ioread32(rpc->base + reg);
}
static inline void rt3883_pci_w32(struct rt3883_pci_controller *rpc,
u32 val, unsigned reg)
{
iowrite32(val, rpc->base + reg);
}
static inline u32 rt3883_pci_get_cfgaddr(unsigned int bus, unsigned int slot,
unsigned int func, unsigned int where)
{
return (bus << 16) | (slot << 11) | (func << 8) | (where & 0xfc) |
0x80000000;
}
static u32 rt3883_pci_read_cfg32(struct rt3883_pci_controller *rpc,
unsigned bus, unsigned slot,
unsigned func, unsigned reg)
{
u32 address;
address = rt3883_pci_get_cfgaddr(bus, slot, func, reg);
rt3883_pci_w32(rpc, address, RT3883_PCI_REG_CFGADDR);
return rt3883_pci_r32(rpc, RT3883_PCI_REG_CFGDATA);
}
static void rt3883_pci_write_cfg32(struct rt3883_pci_controller *rpc,
unsigned bus, unsigned slot,
unsigned func, unsigned reg, u32 val)
{
u32 address;
address = rt3883_pci_get_cfgaddr(bus, slot, func, reg);
rt3883_pci_w32(rpc, address, RT3883_PCI_REG_CFGADDR);
rt3883_pci_w32(rpc, val, RT3883_PCI_REG_CFGDATA);
}
static void rt3883_pci_irq_handler(struct irq_desc *desc)
{
struct rt3883_pci_controller *rpc;
u32 pending;
rpc = irq_desc_get_handler_data(desc);
pending = rt3883_pci_r32(rpc, RT3883_PCI_REG_PCIINT) &
rt3883_pci_r32(rpc, RT3883_PCI_REG_PCIENA);
if (!pending) {
spurious_interrupt();
return;
}
while (pending) {
unsigned bit = __ffs(pending);
generic_handle_domain_irq(rpc->irq_domain, bit);
pending &= ~BIT(bit);
}
}
static void rt3883_pci_irq_unmask(struct irq_data *d)
{
struct rt3883_pci_controller *rpc;
u32 t;
rpc = irq_data_get_irq_chip_data(d);
t = rt3883_pci_r32(rpc, RT3883_PCI_REG_PCIENA);
rt3883_pci_w32(rpc, t | BIT(d->hwirq), RT3883_PCI_REG_PCIENA);
/* flush write */
rt3883_pci_r32(rpc, RT3883_PCI_REG_PCIENA);
}
static void rt3883_pci_irq_mask(struct irq_data *d)
{
struct rt3883_pci_controller *rpc;
u32 t;
rpc = irq_data_get_irq_chip_data(d);
t = rt3883_pci_r32(rpc, RT3883_PCI_REG_PCIENA);
rt3883_pci_w32(rpc, t & ~BIT(d->hwirq), RT3883_PCI_REG_PCIENA);
/* flush write */
rt3883_pci_r32(rpc, RT3883_PCI_REG_PCIENA);
}
static struct irq_chip rt3883_pci_irq_chip = {
.name = "RT3883 PCI",
.irq_mask = rt3883_pci_irq_mask,
.irq_unmask = rt3883_pci_irq_unmask,
.irq_mask_ack = rt3883_pci_irq_mask,
};
static int rt3883_pci_irq_map(struct irq_domain *d, unsigned int irq,
irq_hw_number_t hw)
{
irq_set_chip_and_handler(irq, &rt3883_pci_irq_chip, handle_level_irq);
irq_set_chip_data(irq, d->host_data);
return 0;
}
static const struct irq_domain_ops rt3883_pci_irq_domain_ops = {
.map = rt3883_pci_irq_map,
.xlate = irq_domain_xlate_onecell,
};
static int rt3883_pci_irq_init(struct device *dev,
struct rt3883_pci_controller *rpc)
{
int irq;
irq = irq_of_parse_and_map(rpc->intc_of_node, 0);
if (irq == 0) {
dev_err(dev, "%pOF has no IRQ", rpc->intc_of_node);
return -EINVAL;
}
/* disable all interrupts */
rt3883_pci_w32(rpc, 0, RT3883_PCI_REG_PCIENA);
rpc->irq_domain =
irq_domain_add_linear(rpc->intc_of_node, RT3883_PCI_IRQ_COUNT,
&rt3883_pci_irq_domain_ops,
rpc);
if (!rpc->irq_domain) {
dev_err(dev, "unable to add IRQ domain\n");
return -ENODEV;
}
irq_set_chained_handler_and_data(irq, rt3883_pci_irq_handler, rpc);
return 0;
}
static int rt3883_pci_config_read(struct pci_bus *bus, unsigned int devfn,
int where, int size, u32 *val)
{
struct rt3883_pci_controller *rpc;
u32 address;
u32 data;
rpc = pci_bus_to_rt3883_controller(bus);
if (!rpc->pcie_ready && bus->number == 1)
return PCIBIOS_DEVICE_NOT_FOUND;
address = rt3883_pci_get_cfgaddr(bus->number, PCI_SLOT(devfn),
PCI_FUNC(devfn), where);
rt3883_pci_w32(rpc, address, RT3883_PCI_REG_CFGADDR);
data = rt3883_pci_r32(rpc, RT3883_PCI_REG_CFGDATA);
switch (size) {
case 1:
*val = (data >> ((where & 3) << 3)) & 0xff;
break;
case 2:
*val = (data >> ((where & 3) << 3)) & 0xffff;
break;
case 4:
*val = data;
break;
}
return PCIBIOS_SUCCESSFUL;
}
static int rt3883_pci_config_write(struct pci_bus *bus, unsigned int devfn,
int where, int size, u32 val)
{
struct rt3883_pci_controller *rpc;
u32 address;
u32 data;
rpc = pci_bus_to_rt3883_controller(bus);
if (!rpc->pcie_ready && bus->number == 1)
return PCIBIOS_DEVICE_NOT_FOUND;
address = rt3883_pci_get_cfgaddr(bus->number, PCI_SLOT(devfn),
PCI_FUNC(devfn), where);
rt3883_pci_w32(rpc, address, RT3883_PCI_REG_CFGADDR);
data = rt3883_pci_r32(rpc, RT3883_PCI_REG_CFGDATA);
switch (size) {
case 1:
data = (data & ~(0xff << ((where & 3) << 3))) |
(val << ((where & 3) << 3));
break;
case 2:
data = (data & ~(0xffff << ((where & 3) << 3))) |
(val << ((where & 3) << 3));
break;
case 4:
data = val;
break;
}
rt3883_pci_w32(rpc, data, RT3883_PCI_REG_CFGDATA);
return PCIBIOS_SUCCESSFUL;
}
static struct pci_ops rt3883_pci_ops = {
.read = rt3883_pci_config_read,
.write = rt3883_pci_config_write,
};
static void rt3883_pci_preinit(struct rt3883_pci_controller *rpc, unsigned mode)
{
u32 syscfg1;
u32 rstctrl;
u32 clkcfg1;
u32 t;
rstctrl = rt_sysc_r32(RT3883_SYSC_REG_RSTCTRL);
syscfg1 = rt_sysc_r32(RT3883_SYSC_REG_SYSCFG1);
clkcfg1 = rt_sysc_r32(RT3883_SYSC_REG_CLKCFG1);
if (mode & RT3883_PCI_MODE_PCIE) {
rstctrl |= RT3883_RSTCTRL_PCIE;
rt_sysc_w32(rstctrl, RT3883_SYSC_REG_RSTCTRL);
/* setup PCI PAD drive mode */
syscfg1 &= ~(0x30);
syscfg1 |= (2 << 4);
rt_sysc_w32(syscfg1, RT3883_SYSC_REG_SYSCFG1);
t = rt_sysc_r32(RT3883_SYSC_REG_PCIE_CLK_GEN0);
t &= ~BIT(31);
rt_sysc_w32(t, RT3883_SYSC_REG_PCIE_CLK_GEN0);
t = rt_sysc_r32(RT3883_SYSC_REG_PCIE_CLK_GEN1);
t &= 0x80ffffff;
rt_sysc_w32(t, RT3883_SYSC_REG_PCIE_CLK_GEN1);
t = rt_sysc_r32(RT3883_SYSC_REG_PCIE_CLK_GEN1);
t |= 0xa << 24;
rt_sysc_w32(t, RT3883_SYSC_REG_PCIE_CLK_GEN1);
t = rt_sysc_r32(RT3883_SYSC_REG_PCIE_CLK_GEN0);
t |= BIT(31);
rt_sysc_w32(t, RT3883_SYSC_REG_PCIE_CLK_GEN0);
msleep(50);
rstctrl &= ~RT3883_RSTCTRL_PCIE;
rt_sysc_w32(rstctrl, RT3883_SYSC_REG_RSTCTRL);
}
syscfg1 |= (RT3883_SYSCFG1_PCIE_RC_MODE | RT3883_SYSCFG1_PCI_HOST_MODE);
clkcfg1 &= ~(RT3883_CLKCFG1_PCI_CLK_EN | RT3883_CLKCFG1_PCIE_CLK_EN);
if (mode & RT3883_PCI_MODE_PCI) {
clkcfg1 |= RT3883_CLKCFG1_PCI_CLK_EN;
rstctrl &= ~RT3883_RSTCTRL_PCI;
}
if (mode & RT3883_PCI_MODE_PCIE) {
clkcfg1 |= RT3883_CLKCFG1_PCIE_CLK_EN;
rstctrl &= ~RT3883_RSTCTRL_PCIE;
}
rt_sysc_w32(syscfg1, RT3883_SYSC_REG_SYSCFG1);
rt_sysc_w32(rstctrl, RT3883_SYSC_REG_RSTCTRL);
rt_sysc_w32(clkcfg1, RT3883_SYSC_REG_CLKCFG1);
msleep(500);
/*
* setup the device number of the P2P bridge
* and de-assert the reset line
*/
t = (RT3883_P2P_BR_DEVNUM << RT3883_PCICFG_P2P_BR_DEVNUM_S);
rt3883_pci_w32(rpc, t, RT3883_PCI_REG_PCICFG);
/* flush write */
rt3883_pci_r32(rpc, RT3883_PCI_REG_PCICFG);
msleep(500);
if (mode & RT3883_PCI_MODE_PCIE) {
msleep(500);
t = rt3883_pci_r32(rpc, RT3883_PCI_REG_STATUS(1));
rpc->pcie_ready = t & BIT(0);
if (!rpc->pcie_ready) {
/* reset the PCIe block */
t = rt_sysc_r32(RT3883_SYSC_REG_RSTCTRL);
t |= RT3883_RSTCTRL_PCIE;
rt_sysc_w32(t, RT3883_SYSC_REG_RSTCTRL);
t &= ~RT3883_RSTCTRL_PCIE;
rt_sysc_w32(t, RT3883_SYSC_REG_RSTCTRL);
/* turn off PCIe clock */
t = rt_sysc_r32(RT3883_SYSC_REG_CLKCFG1);
t &= ~RT3883_CLKCFG1_PCIE_CLK_EN;
rt_sysc_w32(t, RT3883_SYSC_REG_CLKCFG1);
t = rt_sysc_r32(RT3883_SYSC_REG_PCIE_CLK_GEN0);
t &= ~0xf000c080;
rt_sysc_w32(t, RT3883_SYSC_REG_PCIE_CLK_GEN0);
}
}
/* enable PCI arbiter */
rt3883_pci_w32(rpc, 0x79, RT3883_PCI_REG_ARBCTL);
}
static int rt3883_pci_probe(struct platform_device *pdev)
{
struct rt3883_pci_controller *rpc;
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
struct device_node *child;
u32 val;
int err;
int mode;
rpc = devm_kzalloc(dev, sizeof(*rpc), GFP_KERNEL);
if (!rpc)
return -ENOMEM;
rpc->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(rpc->base))
return PTR_ERR(rpc->base);
/* find the interrupt controller child node */
for_each_child_of_node(np, child) {
if (of_property_read_bool(child, "interrupt-controller")) {
rpc->intc_of_node = child;
break;
}
}
if (!rpc->intc_of_node) {
dev_err(dev, "%pOF has no %s child node",
np, "interrupt controller");
return -EINVAL;
}
/* find the PCI host bridge child node */
for_each_child_of_node(np, child) {
if (of_node_is_type(child, "pci")) {
rpc->pci_controller.of_node = child;
break;
}
}
if (!rpc->pci_controller.of_node) {
dev_err(dev, "%pOF has no %s child node",
np, "PCI host bridge");
err = -EINVAL;
goto err_put_intc_node;
}
mode = RT3883_PCI_MODE_NONE;
for_each_available_child_of_node(rpc->pci_controller.of_node, child) {
int devfn;
if (!of_node_is_type(child, "pci"))
continue;
devfn = of_pci_get_devfn(child);
if (devfn < 0)
continue;
switch (PCI_SLOT(devfn)) {
case 1:
mode |= RT3883_PCI_MODE_PCIE;
break;
case 17:
case 18:
mode |= RT3883_PCI_MODE_PCI;
break;
}
}
if (mode == RT3883_PCI_MODE_NONE) {
dev_err(dev, "unable to determine PCI mode\n");
err = -EINVAL;
goto err_put_hb_node;
}
dev_info(dev, "mode:%s%s\n",
(mode & RT3883_PCI_MODE_PCI) ? " PCI" : "",
(mode & RT3883_PCI_MODE_PCIE) ? " PCIe" : "");
rt3883_pci_preinit(rpc, mode);
rpc->pci_controller.pci_ops = &rt3883_pci_ops;
rpc->pci_controller.io_resource = &rpc->io_res;
rpc->pci_controller.mem_resource = &rpc->mem_res;
/* Load PCI I/O and memory resources from DT */
pci_load_of_ranges(&rpc->pci_controller,
rpc->pci_controller.of_node);
rt3883_pci_w32(rpc, rpc->mem_res.start, RT3883_PCI_REG_MEMBASE);
rt3883_pci_w32(rpc, rpc->io_res.start, RT3883_PCI_REG_IOBASE);
ioport_resource.start = rpc->io_res.start;
ioport_resource.end = rpc->io_res.end;
/* PCI */
rt3883_pci_w32(rpc, 0x03ff0000, RT3883_PCI_REG_BAR0SETUP(0));
rt3883_pci_w32(rpc, RT3883_MEMORY_BASE, RT3883_PCI_REG_IMBASEBAR0(0));
rt3883_pci_w32(rpc, 0x08021814, RT3883_PCI_REG_ID(0));
rt3883_pci_w32(rpc, 0x00800001, RT3883_PCI_REG_CLASS(0));
rt3883_pci_w32(rpc, 0x28801814, RT3883_PCI_REG_SUBID(0));
/* PCIe */
rt3883_pci_w32(rpc, 0x03ff0000, RT3883_PCI_REG_BAR0SETUP(1));
rt3883_pci_w32(rpc, RT3883_MEMORY_BASE, RT3883_PCI_REG_IMBASEBAR0(1));
rt3883_pci_w32(rpc, 0x08021814, RT3883_PCI_REG_ID(1));
rt3883_pci_w32(rpc, 0x06040001, RT3883_PCI_REG_CLASS(1));
rt3883_pci_w32(rpc, 0x28801814, RT3883_PCI_REG_SUBID(1));
err = rt3883_pci_irq_init(dev, rpc);
if (err)
goto err_put_hb_node;
/* PCIe */
val = rt3883_pci_read_cfg32(rpc, 0, 0x01, 0, PCI_COMMAND);
val |= PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER;
rt3883_pci_write_cfg32(rpc, 0, 0x01, 0, PCI_COMMAND, val);
/* PCI */
val = rt3883_pci_read_cfg32(rpc, 0, 0x00, 0, PCI_COMMAND);
val |= PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER;
rt3883_pci_write_cfg32(rpc, 0, 0x00, 0, PCI_COMMAND, val);
if (mode == RT3883_PCI_MODE_PCIE) {
rt3883_pci_w32(rpc, 0x03ff0001, RT3883_PCI_REG_BAR0SETUP(0));
rt3883_pci_w32(rpc, 0x03ff0001, RT3883_PCI_REG_BAR0SETUP(1));
rt3883_pci_write_cfg32(rpc, 0, RT3883_P2P_BR_DEVNUM, 0,
PCI_BASE_ADDRESS_0,
RT3883_MEMORY_BASE);
/* flush write */
rt3883_pci_read_cfg32(rpc, 0, RT3883_P2P_BR_DEVNUM, 0,
PCI_BASE_ADDRESS_0);
} else {
rt3883_pci_write_cfg32(rpc, 0, RT3883_P2P_BR_DEVNUM, 0,
PCI_IO_BASE, 0x00000101);
}
register_pci_controller(&rpc->pci_controller);
return 0;
err_put_hb_node:
of_node_put(rpc->pci_controller.of_node);
err_put_intc_node:
of_node_put(rpc->intc_of_node);
return err;
}
int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
return of_irq_parse_and_map_pci(dev, slot, pin);
}
int pcibios_plat_dev_init(struct pci_dev *dev)
{
return 0;
}
static const struct of_device_id rt3883_pci_ids[] = {
{ .compatible = "ralink,rt3883-pci" },
{},
};
static struct platform_driver rt3883_pci_driver = {
.probe = rt3883_pci_probe,
.driver = {
.name = "rt3883-pci",
.of_match_table = of_match_ptr(rt3883_pci_ids),
},
};
static int __init rt3883_pci_init(void)
{
return platform_driver_register(&rt3883_pci_driver);
}
postcore_initcall(rt3883_pci_init);
| linux-master | arch/mips/pci/pci-rt3883.c |
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2005-2009 Cavium Networks
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/pci.h>
#include <linux/interrupt.h>
#include <linux/time.h>
#include <linux/delay.h>
#include <linux/platform_device.h>
#include <linux/swiotlb.h>
#include <asm/time.h>
#include <asm/octeon/octeon.h>
#include <asm/octeon/cvmx-npi-defs.h>
#include <asm/octeon/cvmx-pci-defs.h>
#include <asm/octeon/pci-octeon.h>
#define USE_OCTEON_INTERNAL_ARBITER
/*
* Octeon's PCI controller uses did=3, subdid=2 for PCI IO
* addresses. Use PCI endian swapping 1 so no address swapping is
* necessary. The Linux io routines will endian swap the data.
*/
#define OCTEON_PCI_IOSPACE_BASE 0x80011a0400000000ull
#define OCTEON_PCI_IOSPACE_SIZE (1ull<<32)
/* Octeon't PCI controller uses did=3, subdid=3 for PCI memory. */
#define OCTEON_PCI_MEMSPACE_OFFSET (0x00011b0000000000ull)
u64 octeon_bar1_pci_phys;
/**
* This is the bit decoding used for the Octeon PCI controller addresses
*/
union octeon_pci_address {
uint64_t u64;
struct {
uint64_t upper:2;
uint64_t reserved:13;
uint64_t io:1;
uint64_t did:5;
uint64_t subdid:3;
uint64_t reserved2:4;
uint64_t endian_swap:2;
uint64_t reserved3:10;
uint64_t bus:8;
uint64_t dev:5;
uint64_t func:3;
uint64_t reg:8;
} s;
};
int (*octeon_pcibios_map_irq)(const struct pci_dev *dev, u8 slot, u8 pin);
enum octeon_dma_bar_type octeon_dma_bar_type = OCTEON_DMA_BAR_TYPE_INVALID;
/**
* Map a PCI device to the appropriate interrupt line
*
* @dev: The Linux PCI device structure for the device to map
* @slot: The slot number for this device on __BUS 0__. Linux
* enumerates through all the bridges and figures out the
* slot on Bus 0 where this device eventually hooks to.
* @pin: The PCI interrupt pin read from the device, then swizzled
* as it goes through each bridge.
* Returns Interrupt number for the device
*/
int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
if (octeon_pcibios_map_irq)
return octeon_pcibios_map_irq(dev, slot, pin);
else
panic("octeon_pcibios_map_irq not set.");
}
/*
* Called to perform platform specific PCI setup
*/
int pcibios_plat_dev_init(struct pci_dev *dev)
{
uint16_t config;
uint32_t dconfig;
int pos;
/*
* Force the Cache line setting to 64 bytes. The standard
* Linux bus scan doesn't seem to set it. Octeon really has
* 128 byte lines, but Intel bridges get really upset if you
* try and set values above 64 bytes. Value is specified in
* 32bit words.
*/
pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, 64 / 4);
/* Set latency timers for all devices */
pci_write_config_byte(dev, PCI_LATENCY_TIMER, 64);
/* Enable reporting System errors and parity errors on all devices */
/* Enable parity checking and error reporting */
pci_read_config_word(dev, PCI_COMMAND, &config);
config |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
pci_write_config_word(dev, PCI_COMMAND, config);
if (dev->subordinate) {
/* Set latency timers on sub bridges */
pci_write_config_byte(dev, PCI_SEC_LATENCY_TIMER, 64);
/* More bridge error detection */
pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &config);
config |= PCI_BRIDGE_CTL_PARITY | PCI_BRIDGE_CTL_SERR;
pci_write_config_word(dev, PCI_BRIDGE_CONTROL, config);
}
/* Enable the PCIe normal error reporting */
config = PCI_EXP_DEVCTL_CERE; /* Correctable Error Reporting */
config |= PCI_EXP_DEVCTL_NFERE; /* Non-Fatal Error Reporting */
config |= PCI_EXP_DEVCTL_FERE; /* Fatal Error Reporting */
config |= PCI_EXP_DEVCTL_URRE; /* Unsupported Request */
pcie_capability_set_word(dev, PCI_EXP_DEVCTL, config);
/* Find the Advanced Error Reporting capability */
pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
if (pos) {
/* Clear Uncorrectable Error Status */
pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS,
&dconfig);
pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS,
dconfig);
/* Enable reporting of all uncorrectable errors */
/* Uncorrectable Error Mask - turned on bits disable errors */
pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, 0);
/*
* Leave severity at HW default. This only controls if
* errors are reported as uncorrectable or
* correctable, not if the error is reported.
*/
/* PCI_ERR_UNCOR_SEVER - Uncorrectable Error Severity */
/* Clear Correctable Error Status */
pci_read_config_dword(dev, pos + PCI_ERR_COR_STATUS, &dconfig);
pci_write_config_dword(dev, pos + PCI_ERR_COR_STATUS, dconfig);
/* Enable reporting of all correctable errors */
/* Correctable Error Mask - turned on bits disable errors */
pci_write_config_dword(dev, pos + PCI_ERR_COR_MASK, 0);
/* Advanced Error Capabilities */
pci_read_config_dword(dev, pos + PCI_ERR_CAP, &dconfig);
/* ECRC Generation Enable */
if (config & PCI_ERR_CAP_ECRC_GENC)
config |= PCI_ERR_CAP_ECRC_GENE;
/* ECRC Check Enable */
if (config & PCI_ERR_CAP_ECRC_CHKC)
config |= PCI_ERR_CAP_ECRC_CHKE;
pci_write_config_dword(dev, pos + PCI_ERR_CAP, dconfig);
/* PCI_ERR_HEADER_LOG - Header Log Register (16 bytes) */
/* Report all errors to the root complex */
pci_write_config_dword(dev, pos + PCI_ERR_ROOT_COMMAND,
PCI_ERR_ROOT_CMD_COR_EN |
PCI_ERR_ROOT_CMD_NONFATAL_EN |
PCI_ERR_ROOT_CMD_FATAL_EN);
/* Clear the Root status register */
pci_read_config_dword(dev, pos + PCI_ERR_ROOT_STATUS, &dconfig);
pci_write_config_dword(dev, pos + PCI_ERR_ROOT_STATUS, dconfig);
}
return 0;
}
/**
* Return the mapping of PCI device number to IRQ line. Each
* character in the return string represents the interrupt
* line for the device at that position. Device 1 maps to the
* first character, etc. The characters A-D are used for PCI
* interrupts.
*
* Returns PCI interrupt mapping
*/
const char *octeon_get_pci_interrupts(void)
{
/*
* Returning an empty string causes the interrupts to be
* routed based on the PCI specification. From the PCI spec:
*
* INTA# of Device Number 0 is connected to IRQW on the system
* board. (Device Number has no significance regarding being
* located on the system board or in a connector.) INTA# of
* Device Number 1 is connected to IRQX on the system
* board. INTA# of Device Number 2 is connected to IRQY on the
* system board. INTA# of Device Number 3 is connected to IRQZ
* on the system board. The table below describes how each
* agent's INTx# lines are connected to the system board
* interrupt lines. The following equation can be used to
* determine to which INTx# signal on the system board a given
* device's INTx# line(s) is connected.
*
* MB = (D + I) MOD 4 MB = System board Interrupt (IRQW = 0,
* IRQX = 1, IRQY = 2, and IRQZ = 3) D = Device Number I =
* Interrupt Number (INTA# = 0, INTB# = 1, INTC# = 2, and
* INTD# = 3)
*/
if (of_machine_is_compatible("dlink,dsr-500n"))
return "CCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC";
switch (octeon_bootinfo->board_type) {
case CVMX_BOARD_TYPE_NAO38:
/* This is really the NAC38 */
return "AAAAADABAAAAAAAAAAAAAAAAAAAAAAAA";
case CVMX_BOARD_TYPE_EBH3100:
case CVMX_BOARD_TYPE_CN3010_EVB_HS5:
case CVMX_BOARD_TYPE_CN3005_EVB_HS5:
return "AAABAAAAAAAAAAAAAAAAAAAAAAAAAAAA";
case CVMX_BOARD_TYPE_BBGW_REF:
return "AABCD";
case CVMX_BOARD_TYPE_CUST_DSR1000N:
return "CCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC";
case CVMX_BOARD_TYPE_THUNDER:
case CVMX_BOARD_TYPE_EBH3000:
default:
return "";
}
}
/**
* Map a PCI device to the appropriate interrupt line
*
* @dev: The Linux PCI device structure for the device to map
* @slot: The slot number for this device on __BUS 0__. Linux
* enumerates through all the bridges and figures out the
* slot on Bus 0 where this device eventually hooks to.
* @pin: The PCI interrupt pin read from the device, then swizzled
* as it goes through each bridge.
* Returns Interrupt number for the device
*/
int __init octeon_pci_pcibios_map_irq(const struct pci_dev *dev,
u8 slot, u8 pin)
{
int irq_num;
const char *interrupts;
int dev_num;
/* Get the board specific interrupt mapping */
interrupts = octeon_get_pci_interrupts();
dev_num = dev->devfn >> 3;
if (dev_num < strlen(interrupts))
irq_num = ((interrupts[dev_num] - 'A' + pin - 1) & 3) +
OCTEON_IRQ_PCI_INT0;
else
irq_num = ((slot + pin - 3) & 3) + OCTEON_IRQ_PCI_INT0;
return irq_num;
}
/*
* Read a value from configuration space
*/
static int octeon_read_config(struct pci_bus *bus, unsigned int devfn,
int reg, int size, u32 *val)
{
union octeon_pci_address pci_addr;
pci_addr.u64 = 0;
pci_addr.s.upper = 2;
pci_addr.s.io = 1;
pci_addr.s.did = 3;
pci_addr.s.subdid = 1;
pci_addr.s.endian_swap = 1;
pci_addr.s.bus = bus->number;
pci_addr.s.dev = devfn >> 3;
pci_addr.s.func = devfn & 0x7;
pci_addr.s.reg = reg;
switch (size) {
case 4:
*val = le32_to_cpu(cvmx_read64_uint32(pci_addr.u64));
return PCIBIOS_SUCCESSFUL;
case 2:
*val = le16_to_cpu(cvmx_read64_uint16(pci_addr.u64));
return PCIBIOS_SUCCESSFUL;
case 1:
*val = cvmx_read64_uint8(pci_addr.u64);
return PCIBIOS_SUCCESSFUL;
}
return PCIBIOS_FUNC_NOT_SUPPORTED;
}
/*
* Write a value to PCI configuration space
*/
static int octeon_write_config(struct pci_bus *bus, unsigned int devfn,
int reg, int size, u32 val)
{
union octeon_pci_address pci_addr;
pci_addr.u64 = 0;
pci_addr.s.upper = 2;
pci_addr.s.io = 1;
pci_addr.s.did = 3;
pci_addr.s.subdid = 1;
pci_addr.s.endian_swap = 1;
pci_addr.s.bus = bus->number;
pci_addr.s.dev = devfn >> 3;
pci_addr.s.func = devfn & 0x7;
pci_addr.s.reg = reg;
switch (size) {
case 4:
cvmx_write64_uint32(pci_addr.u64, cpu_to_le32(val));
return PCIBIOS_SUCCESSFUL;
case 2:
cvmx_write64_uint16(pci_addr.u64, cpu_to_le16(val));
return PCIBIOS_SUCCESSFUL;
case 1:
cvmx_write64_uint8(pci_addr.u64, val);
return PCIBIOS_SUCCESSFUL;
}
return PCIBIOS_FUNC_NOT_SUPPORTED;
}
static struct pci_ops octeon_pci_ops = {
.read = octeon_read_config,
.write = octeon_write_config,
};
static struct resource octeon_pci_mem_resource = {
.start = 0,
.end = 0,
.name = "Octeon PCI MEM",
.flags = IORESOURCE_MEM,
};
/*
* PCI ports must be above 16KB so the ISA bus filtering in the PCI-X to PCI
* bridge
*/
static struct resource octeon_pci_io_resource = {
.start = 0x4000,
.end = OCTEON_PCI_IOSPACE_SIZE - 1,
.name = "Octeon PCI IO",
.flags = IORESOURCE_IO,
};
static struct pci_controller octeon_pci_controller = {
.pci_ops = &octeon_pci_ops,
.mem_resource = &octeon_pci_mem_resource,
.mem_offset = OCTEON_PCI_MEMSPACE_OFFSET,
.io_resource = &octeon_pci_io_resource,
.io_offset = 0,
.io_map_base = OCTEON_PCI_IOSPACE_BASE,
};
/*
* Low level initialize the Octeon PCI controller
*/
static void octeon_pci_initialize(void)
{
union cvmx_pci_cfg01 cfg01;
union cvmx_npi_ctl_status ctl_status;
union cvmx_pci_ctl_status_2 ctl_status_2;
union cvmx_pci_cfg19 cfg19;
union cvmx_pci_cfg16 cfg16;
union cvmx_pci_cfg22 cfg22;
union cvmx_pci_cfg56 cfg56;
/* Reset the PCI Bus */
cvmx_write_csr(CVMX_CIU_SOFT_PRST, 0x1);
cvmx_read_csr(CVMX_CIU_SOFT_PRST);
udelay(2000); /* Hold PCI reset for 2 ms */
ctl_status.u64 = 0; /* cvmx_read_csr(CVMX_NPI_CTL_STATUS); */
ctl_status.s.max_word = 1;
ctl_status.s.timer = 1;
cvmx_write_csr(CVMX_NPI_CTL_STATUS, ctl_status.u64);
/* Deassert PCI reset and advertize PCX Host Mode Device Capability
(64b) */
cvmx_write_csr(CVMX_CIU_SOFT_PRST, 0x4);
cvmx_read_csr(CVMX_CIU_SOFT_PRST);
udelay(2000); /* Wait 2 ms after deasserting PCI reset */
ctl_status_2.u32 = 0;
ctl_status_2.s.tsr_hwm = 1; /* Initializes to 0. Must be set
before any PCI reads. */
ctl_status_2.s.bar2pres = 1; /* Enable BAR2 */
ctl_status_2.s.bar2_enb = 1;
ctl_status_2.s.bar2_cax = 1; /* Don't use L2 */
ctl_status_2.s.bar2_esx = 1;
ctl_status_2.s.pmo_amod = 1; /* Round robin priority */
if (octeon_dma_bar_type == OCTEON_DMA_BAR_TYPE_BIG) {
/* BAR1 hole */
ctl_status_2.s.bb1_hole = OCTEON_PCI_BAR1_HOLE_BITS;
ctl_status_2.s.bb1_siz = 1; /* BAR1 is 2GB */
ctl_status_2.s.bb_ca = 1; /* Don't use L2 with big bars */
ctl_status_2.s.bb_es = 1; /* Big bar in byte swap mode */
ctl_status_2.s.bb1 = 1; /* BAR1 is big */
ctl_status_2.s.bb0 = 1; /* BAR0 is big */
}
octeon_npi_write32(CVMX_NPI_PCI_CTL_STATUS_2, ctl_status_2.u32);
udelay(2000); /* Wait 2 ms before doing PCI reads */
ctl_status_2.u32 = octeon_npi_read32(CVMX_NPI_PCI_CTL_STATUS_2);
pr_notice("PCI Status: %s %s-bit\n",
ctl_status_2.s.ap_pcix ? "PCI-X" : "PCI",
ctl_status_2.s.ap_64ad ? "64" : "32");
if (OCTEON_IS_MODEL(OCTEON_CN58XX) || OCTEON_IS_MODEL(OCTEON_CN50XX)) {
union cvmx_pci_cnt_reg cnt_reg_start;
union cvmx_pci_cnt_reg cnt_reg_end;
unsigned long cycles, pci_clock;
cnt_reg_start.u64 = cvmx_read_csr(CVMX_NPI_PCI_CNT_REG);
cycles = read_c0_cvmcount();
udelay(1000);
cnt_reg_end.u64 = cvmx_read_csr(CVMX_NPI_PCI_CNT_REG);
cycles = read_c0_cvmcount() - cycles;
pci_clock = (cnt_reg_end.s.pcicnt - cnt_reg_start.s.pcicnt) /
(cycles / (mips_hpt_frequency / 1000000));
pr_notice("PCI Clock: %lu MHz\n", pci_clock);
}
/*
* TDOMC must be set to one in PCI mode. TDOMC should be set to 4
* in PCI-X mode to allow four outstanding splits. Otherwise,
* should not change from its reset value. Don't write PCI_CFG19
* in PCI mode (0x82000001 reset value), write it to 0x82000004
* after PCI-X mode is known. MRBCI,MDWE,MDRE -> must be zero.
* MRBCM -> must be one.
*/
if (ctl_status_2.s.ap_pcix) {
cfg19.u32 = 0;
/*
* Target Delayed/Split request outstanding maximum
* count. [1..31] and 0=32. NOTE: If the user
* programs these bits beyond the Designed Maximum
* outstanding count, then the designed maximum table
* depth will be used instead. No additional
* Deferred/Split transactions will be accepted if
* this outstanding maximum count is
* reached. Furthermore, no additional deferred/split
* transactions will be accepted if the I/O delay/ I/O
* Split Request outstanding maximum is reached.
*/
cfg19.s.tdomc = 4;
/*
* Master Deferred Read Request Outstanding Max Count
* (PCI only). CR4C[26:24] Max SAC cycles MAX DAC
* cycles 000 8 4 001 1 0 010 2 1 011 3 1 100 4 2 101
* 5 2 110 6 3 111 7 3 For example, if these bits are
* programmed to 100, the core can support 2 DAC
* cycles, 4 SAC cycles or a combination of 1 DAC and
* 2 SAC cycles. NOTE: For the PCI-X maximum
* outstanding split transactions, refer to
* CRE0[22:20].
*/
cfg19.s.mdrrmc = 2;
/*
* Master Request (Memory Read) Byte Count/Byte Enable
* select. 0 = Byte Enables valid. In PCI mode, a
* burst transaction cannot be performed using Memory
* Read command=4?h6. 1 = DWORD Byte Count valid
* (default). In PCI Mode, the memory read byte
* enables are automatically generated by the
* core. Note: N3 Master Request transaction sizes are
* always determined through the
* am_attr[<35:32>|<7:0>] field.
*/
cfg19.s.mrbcm = 1;
octeon_npi_write32(CVMX_NPI_PCI_CFG19, cfg19.u32);
}
cfg01.u32 = 0;
cfg01.s.msae = 1; /* Memory Space Access Enable */
cfg01.s.me = 1; /* Master Enable */
cfg01.s.pee = 1; /* PERR# Enable */
cfg01.s.see = 1; /* System Error Enable */
cfg01.s.fbbe = 1; /* Fast Back to Back Transaction Enable */
octeon_npi_write32(CVMX_NPI_PCI_CFG01, cfg01.u32);
#ifdef USE_OCTEON_INTERNAL_ARBITER
/*
* When OCTEON is a PCI host, most systems will use OCTEON's
* internal arbiter, so must enable it before any PCI/PCI-X
* traffic can occur.
*/
{
union cvmx_npi_pci_int_arb_cfg pci_int_arb_cfg;
pci_int_arb_cfg.u64 = 0;
pci_int_arb_cfg.s.en = 1; /* Internal arbiter enable */
cvmx_write_csr(CVMX_NPI_PCI_INT_ARB_CFG, pci_int_arb_cfg.u64);
}
#endif /* USE_OCTEON_INTERNAL_ARBITER */
/*
* Preferably written to 1 to set MLTD. [RDSATI,TRTAE,
* TWTAE,TMAE,DPPMR -> must be zero. TILT -> must not be set to
* 1..7.
*/
cfg16.u32 = 0;
cfg16.s.mltd = 1; /* Master Latency Timer Disable */
octeon_npi_write32(CVMX_NPI_PCI_CFG16, cfg16.u32);
/*
* Should be written to 0x4ff00. MTTV -> must be zero.
* FLUSH -> must be 1. MRV -> should be 0xFF.
*/
cfg22.u32 = 0;
/* Master Retry Value [1..255] and 0=infinite */
cfg22.s.mrv = 0xff;
/*
* AM_DO_FLUSH_I control NOTE: This bit MUST BE ONE for proper
* N3K operation.
*/
cfg22.s.flush = 1;
octeon_npi_write32(CVMX_NPI_PCI_CFG22, cfg22.u32);
/*
* MOST Indicates the maximum number of outstanding splits (in -1
* notation) when OCTEON is in PCI-X mode. PCI-X performance is
* affected by the MOST selection. Should generally be written
* with one of 0x3be807, 0x2be807, 0x1be807, or 0x0be807,
* depending on the desired MOST of 3, 2, 1, or 0, respectively.
*/
cfg56.u32 = 0;
cfg56.s.pxcid = 7; /* RO - PCI-X Capability ID */
cfg56.s.ncp = 0xe8; /* RO - Next Capability Pointer */
cfg56.s.dpere = 1; /* Data Parity Error Recovery Enable */
cfg56.s.roe = 1; /* Relaxed Ordering Enable */
cfg56.s.mmbc = 1; /* Maximum Memory Byte Count
[0=512B,1=1024B,2=2048B,3=4096B] */
cfg56.s.most = 3; /* Maximum outstanding Split transactions [0=1
.. 7=32] */
octeon_npi_write32(CVMX_NPI_PCI_CFG56, cfg56.u32);
/*
* Affects PCI performance when OCTEON services reads to its
* BAR1/BAR2. Refer to Section 10.6.1. The recommended values are
* 0x22, 0x33, and 0x33 for PCI_READ_CMD_6, PCI_READ_CMD_C, and
* PCI_READ_CMD_E, respectively. Unfortunately due to errata DDR-700,
* these values need to be changed so they won't possibly prefetch off
* of the end of memory if PCI is DMAing a buffer at the end of
* memory. Note that these values differ from their reset values.
*/
octeon_npi_write32(CVMX_NPI_PCI_READ_CMD_6, 0x21);
octeon_npi_write32(CVMX_NPI_PCI_READ_CMD_C, 0x31);
octeon_npi_write32(CVMX_NPI_PCI_READ_CMD_E, 0x31);
}
/*
* Initialize the Octeon PCI controller
*/
static int __init octeon_pci_setup(void)
{
union cvmx_npi_mem_access_subidx mem_access;
int index;
/* Only these chips have PCI */
if (octeon_has_feature(OCTEON_FEATURE_PCIE))
return 0;
if (!octeon_is_pci_host()) {
pr_notice("Not in host mode, PCI Controller not initialized\n");
return 0;
}
/* Point pcibios_map_irq() to the PCI version of it */
octeon_pcibios_map_irq = octeon_pci_pcibios_map_irq;
/* Only use the big bars on chips that support it */
if (OCTEON_IS_MODEL(OCTEON_CN31XX) ||
OCTEON_IS_MODEL(OCTEON_CN38XX_PASS2) ||
OCTEON_IS_MODEL(OCTEON_CN38XX_PASS1))
octeon_dma_bar_type = OCTEON_DMA_BAR_TYPE_SMALL;
else
octeon_dma_bar_type = OCTEON_DMA_BAR_TYPE_BIG;
/* PCI I/O and PCI MEM values */
set_io_port_base(OCTEON_PCI_IOSPACE_BASE);
ioport_resource.start = 0;
ioport_resource.end = OCTEON_PCI_IOSPACE_SIZE - 1;
pr_notice("%s Octeon big bar support\n",
(octeon_dma_bar_type ==
OCTEON_DMA_BAR_TYPE_BIG) ? "Enabling" : "Disabling");
octeon_pci_initialize();
mem_access.u64 = 0;
mem_access.s.esr = 1; /* Endian-Swap on read. */
mem_access.s.esw = 1; /* Endian-Swap on write. */
mem_access.s.nsr = 0; /* No-Snoop on read. */
mem_access.s.nsw = 0; /* No-Snoop on write. */
mem_access.s.ror = 0; /* Relax Read on read. */
mem_access.s.row = 0; /* Relax Order on write. */
mem_access.s.ba = 0; /* PCI Address bits [63:36]. */
cvmx_write_csr(CVMX_NPI_MEM_ACCESS_SUBID3, mem_access.u64);
/*
* Remap the Octeon BAR 2 above all 32 bit devices
* (0x8000000000ul). This is done here so it is remapped
* before the readl()'s below. We don't want BAR2 overlapping
* with BAR0/BAR1 during these reads.
*/
octeon_npi_write32(CVMX_NPI_PCI_CFG08,
(u32)(OCTEON_BAR2_PCI_ADDRESS & 0xffffffffull));
octeon_npi_write32(CVMX_NPI_PCI_CFG09,
(u32)(OCTEON_BAR2_PCI_ADDRESS >> 32));
if (octeon_dma_bar_type == OCTEON_DMA_BAR_TYPE_BIG) {
/* Remap the Octeon BAR 0 to 0-2GB */
octeon_npi_write32(CVMX_NPI_PCI_CFG04, 0);
octeon_npi_write32(CVMX_NPI_PCI_CFG05, 0);
/*
* Remap the Octeon BAR 1 to map 2GB-4GB (minus the
* BAR 1 hole).
*/
octeon_npi_write32(CVMX_NPI_PCI_CFG06, 2ul << 30);
octeon_npi_write32(CVMX_NPI_PCI_CFG07, 0);
/* BAR1 movable mappings set for identity mapping */
octeon_bar1_pci_phys = 0x80000000ull;
for (index = 0; index < 32; index++) {
union cvmx_pci_bar1_indexx bar1_index;
bar1_index.u32 = 0;
/* Address bits[35:22] sent to L2C */
bar1_index.s.addr_idx =
(octeon_bar1_pci_phys >> 22) + index;
/* Don't put PCI accesses in L2. */
bar1_index.s.ca = 1;
/* Endian Swap Mode */
bar1_index.s.end_swp = 1;
/* Set '1' when the selected address range is valid. */
bar1_index.s.addr_v = 1;
octeon_npi_write32(CVMX_NPI_PCI_BAR1_INDEXX(index),
bar1_index.u32);
}
/* Devices go after BAR1 */
octeon_pci_mem_resource.start =
OCTEON_PCI_MEMSPACE_OFFSET + (4ul << 30) -
(OCTEON_PCI_BAR1_HOLE_SIZE << 20);
octeon_pci_mem_resource.end =
octeon_pci_mem_resource.start + (1ul << 30);
} else {
/* Remap the Octeon BAR 0 to map 128MB-(128MB+4KB) */
octeon_npi_write32(CVMX_NPI_PCI_CFG04, 128ul << 20);
octeon_npi_write32(CVMX_NPI_PCI_CFG05, 0);
/* Remap the Octeon BAR 1 to map 0-128MB */
octeon_npi_write32(CVMX_NPI_PCI_CFG06, 0);
octeon_npi_write32(CVMX_NPI_PCI_CFG07, 0);
/* BAR1 movable regions contiguous to cover the swiotlb */
octeon_bar1_pci_phys =
default_swiotlb_base() & ~((1ull << 22) - 1);
for (index = 0; index < 32; index++) {
union cvmx_pci_bar1_indexx bar1_index;
bar1_index.u32 = 0;
/* Address bits[35:22] sent to L2C */
bar1_index.s.addr_idx =
(octeon_bar1_pci_phys >> 22) + index;
/* Don't put PCI accesses in L2. */
bar1_index.s.ca = 1;
/* Endian Swap Mode */
bar1_index.s.end_swp = 1;
/* Set '1' when the selected address range is valid. */
bar1_index.s.addr_v = 1;
octeon_npi_write32(CVMX_NPI_PCI_BAR1_INDEXX(index),
bar1_index.u32);
}
/* Devices go after BAR0 */
octeon_pci_mem_resource.start =
OCTEON_PCI_MEMSPACE_OFFSET + (128ul << 20) +
(4ul << 10);
octeon_pci_mem_resource.end =
octeon_pci_mem_resource.start + (1ul << 30);
}
register_pci_controller(&octeon_pci_controller);
/*
* Clear any errors that might be pending from before the bus
* was setup properly.
*/
cvmx_write_csr(CVMX_NPI_PCI_INT_SUM2, -1);
if (IS_ERR(platform_device_register_simple("octeon_pci_edac",
-1, NULL, 0)))
pr_err("Registration of co_pci_edac failed!\n");
octeon_pci_dma_init();
return 0;
}
arch_initcall(octeon_pci_setup);
| linux-master | arch/mips/pci/pci-octeon.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2004, 2006 MIPS Technologies, Inc. All rights reserved.
* Author: Maciej W. Rozycki <[email protected]>
* Copyright (C) 2018 Maciej W. Rozycki
*/
#include <linux/dma-mapping.h>
#include <linux/pci.h>
/*
* Set the BCM1250, etc. PCI host bridge's TRDY timeout
* to the finite max.
*/
static void quirk_sb1250_pci(struct pci_dev *dev)
{
pci_write_config_byte(dev, 0x40, 0xff);
}
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SIBYTE, PCI_DEVICE_ID_BCM1250_PCI,
quirk_sb1250_pci);
/*
* The BCM1250, etc. PCI host bridge does not support DAC on its 32-bit
* bus, so we set the bus's DMA limit accordingly. However the HT link
* down the artificial PCI-HT bridge supports 40-bit addressing and the
* SP1011 HT-PCI bridge downstream supports both DAC and a 64-bit bus
* width, so we record the PCI-HT bridge's secondary and subordinate bus
* numbers and do not set the limit for devices present in the inclusive
* range of those.
*/
struct sb1250_bus_dma_limit_exclude {
bool set;
unsigned char start;
unsigned char end;
};
static int sb1250_bus_dma_limit(struct pci_dev *dev, void *data)
{
struct sb1250_bus_dma_limit_exclude *exclude = data;
bool exclude_this;
bool ht_bridge;
exclude_this = exclude->set && (dev->bus->number >= exclude->start &&
dev->bus->number <= exclude->end);
ht_bridge = !exclude->set && (dev->vendor == PCI_VENDOR_ID_SIBYTE &&
dev->device == PCI_DEVICE_ID_BCM1250_HT);
if (exclude_this) {
dev_dbg(&dev->dev, "not disabling DAC for device");
} else if (ht_bridge) {
exclude->start = dev->subordinate->number;
exclude->end = pci_bus_max_busnr(dev->subordinate);
exclude->set = true;
dev_dbg(&dev->dev, "not disabling DAC for [bus %02x-%02x]",
exclude->start, exclude->end);
} else {
dev_dbg(&dev->dev, "disabling DAC for device");
dev->dev.bus_dma_limit = DMA_BIT_MASK(32);
}
return 0;
}
static void quirk_sb1250_pci_dac(struct pci_dev *dev)
{
struct sb1250_bus_dma_limit_exclude exclude = { .set = false };
pci_walk_bus(dev->bus, sb1250_bus_dma_limit, &exclude);
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SIBYTE, PCI_DEVICE_ID_BCM1250_PCI,
quirk_sb1250_pci_dac);
/*
* The BCM1250, etc. PCI/HT bridge reports as a host bridge.
*/
static void quirk_sb1250_ht(struct pci_dev *dev)
{
dev->class = PCI_CLASS_BRIDGE_PCI_NORMAL;
}
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SIBYTE, PCI_DEVICE_ID_BCM1250_HT,
quirk_sb1250_ht);
/*
* Set the SP1011 HT/PCI bridge's TRDY timeout to the finite max.
*/
static void quirk_sp1011(struct pci_dev *dev)
{
pci_write_config_byte(dev, 0x64, 0xff);
}
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SIPACKETS, PCI_DEVICE_ID_SP1011,
quirk_sp1011);
| linux-master | arch/mips/pci/fixup-sb1250.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Atheros AR724X PCI host controller driver
*
* Copyright (C) 2011 René Bolldorf <[email protected]>
* Copyright (C) 2009-2011 Gabor Juhos <[email protected]>
*/
#include <linux/irq.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/platform_device.h>
#include <asm/mach-ath79/ath79.h>
#include <asm/mach-ath79/ar71xx_regs.h>
#define AR724X_PCI_REG_APP 0x00
#define AR724X_PCI_REG_RESET 0x18
#define AR724X_PCI_REG_INT_STATUS 0x4c
#define AR724X_PCI_REG_INT_MASK 0x50
#define AR724X_PCI_APP_LTSSM_ENABLE BIT(0)
#define AR724X_PCI_RESET_LINK_UP BIT(0)
#define AR724X_PCI_INT_DEV0 BIT(14)
#define AR724X_PCI_IRQ_COUNT 1
#define AR7240_BAR0_WAR_VALUE 0xffff
#define AR724X_PCI_CMD_INIT (PCI_COMMAND_MEMORY | \
PCI_COMMAND_MASTER | \
PCI_COMMAND_INVALIDATE | \
PCI_COMMAND_PARITY | \
PCI_COMMAND_SERR | \
PCI_COMMAND_FAST_BACK)
struct ar724x_pci_controller {
void __iomem *devcfg_base;
void __iomem *ctrl_base;
void __iomem *crp_base;
int irq;
int irq_base;
bool link_up;
bool bar0_is_cached;
u32 bar0_value;
struct pci_controller pci_controller;
struct resource io_res;
struct resource mem_res;
};
static inline bool ar724x_pci_check_link(struct ar724x_pci_controller *apc)
{
u32 reset;
reset = __raw_readl(apc->ctrl_base + AR724X_PCI_REG_RESET);
return reset & AR724X_PCI_RESET_LINK_UP;
}
static inline struct ar724x_pci_controller *
pci_bus_to_ar724x_controller(struct pci_bus *bus)
{
struct pci_controller *hose;
hose = (struct pci_controller *) bus->sysdata;
return container_of(hose, struct ar724x_pci_controller, pci_controller);
}
static int ar724x_pci_local_write(struct ar724x_pci_controller *apc,
int where, int size, u32 value)
{
void __iomem *base;
u32 data;
int s;
WARN_ON(where & (size - 1));
if (!apc->link_up)
return PCIBIOS_DEVICE_NOT_FOUND;
base = apc->crp_base;
data = __raw_readl(base + (where & ~3));
switch (size) {
case 1:
s = ((where & 3) * 8);
data &= ~(0xff << s);
data |= ((value & 0xff) << s);
break;
case 2:
s = ((where & 2) * 8);
data &= ~(0xffff << s);
data |= ((value & 0xffff) << s);
break;
case 4:
data = value;
break;
default:
return PCIBIOS_BAD_REGISTER_NUMBER;
}
__raw_writel(data, base + (where & ~3));
/* flush write */
__raw_readl(base + (where & ~3));
return PCIBIOS_SUCCESSFUL;
}
static int ar724x_pci_read(struct pci_bus *bus, unsigned int devfn, int where,
int size, uint32_t *value)
{
struct ar724x_pci_controller *apc;
void __iomem *base;
u32 data;
apc = pci_bus_to_ar724x_controller(bus);
if (!apc->link_up)
return PCIBIOS_DEVICE_NOT_FOUND;
if (devfn)
return PCIBIOS_DEVICE_NOT_FOUND;
base = apc->devcfg_base;
data = __raw_readl(base + (where & ~3));
switch (size) {
case 1:
if (where & 1)
data >>= 8;
if (where & 2)
data >>= 16;
data &= 0xff;
break;
case 2:
if (where & 2)
data >>= 16;
data &= 0xffff;
break;
case 4:
break;
default:
return PCIBIOS_BAD_REGISTER_NUMBER;
}
if (where == PCI_BASE_ADDRESS_0 && size == 4 &&
apc->bar0_is_cached) {
/* use the cached value */
*value = apc->bar0_value;
} else {
*value = data;
}
return PCIBIOS_SUCCESSFUL;
}
static int ar724x_pci_write(struct pci_bus *bus, unsigned int devfn, int where,
int size, uint32_t value)
{
struct ar724x_pci_controller *apc;
void __iomem *base;
u32 data;
int s;
apc = pci_bus_to_ar724x_controller(bus);
if (!apc->link_up)
return PCIBIOS_DEVICE_NOT_FOUND;
if (devfn)
return PCIBIOS_DEVICE_NOT_FOUND;
if (soc_is_ar7240() && where == PCI_BASE_ADDRESS_0 && size == 4) {
if (value != 0xffffffff) {
/*
* WAR for a hw issue. If the BAR0 register of the
* device is set to the proper base address, the
* memory space of the device is not accessible.
*
* Cache the intended value so it can be read back,
* and write a SoC specific constant value to the
* BAR0 register in order to make the device memory
* accessible.
*/
apc->bar0_is_cached = true;
apc->bar0_value = value;
value = AR7240_BAR0_WAR_VALUE;
} else {
apc->bar0_is_cached = false;
}
}
base = apc->devcfg_base;
data = __raw_readl(base + (where & ~3));
switch (size) {
case 1:
s = ((where & 3) * 8);
data &= ~(0xff << s);
data |= ((value & 0xff) << s);
break;
case 2:
s = ((where & 2) * 8);
data &= ~(0xffff << s);
data |= ((value & 0xffff) << s);
break;
case 4:
data = value;
break;
default:
return PCIBIOS_BAD_REGISTER_NUMBER;
}
__raw_writel(data, base + (where & ~3));
/* flush write */
__raw_readl(base + (where & ~3));
return PCIBIOS_SUCCESSFUL;
}
static struct pci_ops ar724x_pci_ops = {
.read = ar724x_pci_read,
.write = ar724x_pci_write,
};
static void ar724x_pci_irq_handler(struct irq_desc *desc)
{
struct ar724x_pci_controller *apc;
void __iomem *base;
u32 pending;
apc = irq_desc_get_handler_data(desc);
base = apc->ctrl_base;
pending = __raw_readl(base + AR724X_PCI_REG_INT_STATUS) &
__raw_readl(base + AR724X_PCI_REG_INT_MASK);
if (pending & AR724X_PCI_INT_DEV0)
generic_handle_irq(apc->irq_base + 0);
else
spurious_interrupt();
}
static void ar724x_pci_irq_unmask(struct irq_data *d)
{
struct ar724x_pci_controller *apc;
void __iomem *base;
int offset;
u32 t;
apc = irq_data_get_irq_chip_data(d);
base = apc->ctrl_base;
offset = apc->irq_base - d->irq;
switch (offset) {
case 0:
t = __raw_readl(base + AR724X_PCI_REG_INT_MASK);
__raw_writel(t | AR724X_PCI_INT_DEV0,
base + AR724X_PCI_REG_INT_MASK);
/* flush write */
__raw_readl(base + AR724X_PCI_REG_INT_MASK);
}
}
static void ar724x_pci_irq_mask(struct irq_data *d)
{
struct ar724x_pci_controller *apc;
void __iomem *base;
int offset;
u32 t;
apc = irq_data_get_irq_chip_data(d);
base = apc->ctrl_base;
offset = apc->irq_base - d->irq;
switch (offset) {
case 0:
t = __raw_readl(base + AR724X_PCI_REG_INT_MASK);
__raw_writel(t & ~AR724X_PCI_INT_DEV0,
base + AR724X_PCI_REG_INT_MASK);
/* flush write */
__raw_readl(base + AR724X_PCI_REG_INT_MASK);
t = __raw_readl(base + AR724X_PCI_REG_INT_STATUS);
__raw_writel(t | AR724X_PCI_INT_DEV0,
base + AR724X_PCI_REG_INT_STATUS);
/* flush write */
__raw_readl(base + AR724X_PCI_REG_INT_STATUS);
}
}
static struct irq_chip ar724x_pci_irq_chip = {
.name = "AR724X PCI ",
.irq_mask = ar724x_pci_irq_mask,
.irq_unmask = ar724x_pci_irq_unmask,
.irq_mask_ack = ar724x_pci_irq_mask,
};
static void ar724x_pci_irq_init(struct ar724x_pci_controller *apc,
int id)
{
void __iomem *base;
int i;
base = apc->ctrl_base;
__raw_writel(0, base + AR724X_PCI_REG_INT_MASK);
__raw_writel(0, base + AR724X_PCI_REG_INT_STATUS);
apc->irq_base = ATH79_PCI_IRQ_BASE + (id * AR724X_PCI_IRQ_COUNT);
for (i = apc->irq_base;
i < apc->irq_base + AR724X_PCI_IRQ_COUNT; i++) {
irq_set_chip_and_handler(i, &ar724x_pci_irq_chip,
handle_level_irq);
irq_set_chip_data(i, apc);
}
irq_set_chained_handler_and_data(apc->irq, ar724x_pci_irq_handler,
apc);
}
static void ar724x_pci_hw_init(struct ar724x_pci_controller *apc)
{
u32 ppl, app;
int wait = 0;
/* deassert PCIe host controller and PCIe PHY reset */
ath79_device_reset_clear(AR724X_RESET_PCIE);
ath79_device_reset_clear(AR724X_RESET_PCIE_PHY);
/* remove the reset of the PCIE PLL */
ppl = ath79_pll_rr(AR724X_PLL_REG_PCIE_CONFIG);
ppl &= ~AR724X_PLL_REG_PCIE_CONFIG_PPL_RESET;
ath79_pll_wr(AR724X_PLL_REG_PCIE_CONFIG, ppl);
/* deassert bypass for the PCIE PLL */
ppl = ath79_pll_rr(AR724X_PLL_REG_PCIE_CONFIG);
ppl &= ~AR724X_PLL_REG_PCIE_CONFIG_PPL_BYPASS;
ath79_pll_wr(AR724X_PLL_REG_PCIE_CONFIG, ppl);
/* set PCIE Application Control to ready */
app = __raw_readl(apc->ctrl_base + AR724X_PCI_REG_APP);
app |= AR724X_PCI_APP_LTSSM_ENABLE;
__raw_writel(app, apc->ctrl_base + AR724X_PCI_REG_APP);
/* wait up to 100ms for PHY link up */
do {
mdelay(10);
wait++;
} while (wait < 10 && !ar724x_pci_check_link(apc));
}
static int ar724x_pci_probe(struct platform_device *pdev)
{
struct ar724x_pci_controller *apc;
struct resource *res;
int id;
id = pdev->id;
if (id == -1)
id = 0;
apc = devm_kzalloc(&pdev->dev, sizeof(struct ar724x_pci_controller),
GFP_KERNEL);
if (!apc)
return -ENOMEM;
apc->ctrl_base = devm_platform_ioremap_resource_byname(pdev, "ctrl_base");
if (IS_ERR(apc->ctrl_base))
return PTR_ERR(apc->ctrl_base);
apc->devcfg_base = devm_platform_ioremap_resource_byname(pdev, "cfg_base");
if (IS_ERR(apc->devcfg_base))
return PTR_ERR(apc->devcfg_base);
apc->crp_base = devm_platform_ioremap_resource_byname(pdev, "crp_base");
if (IS_ERR(apc->crp_base))
return PTR_ERR(apc->crp_base);
apc->irq = platform_get_irq(pdev, 0);
if (apc->irq < 0)
return -EINVAL;
res = platform_get_resource_byname(pdev, IORESOURCE_IO, "io_base");
if (!res)
return -EINVAL;
apc->io_res.parent = res;
apc->io_res.name = "PCI IO space";
apc->io_res.start = res->start;
apc->io_res.end = res->end;
apc->io_res.flags = IORESOURCE_IO;
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mem_base");
if (!res)
return -EINVAL;
apc->mem_res.parent = res;
apc->mem_res.name = "PCI memory space";
apc->mem_res.start = res->start;
apc->mem_res.end = res->end;
apc->mem_res.flags = IORESOURCE_MEM;
apc->pci_controller.pci_ops = &ar724x_pci_ops;
apc->pci_controller.io_resource = &apc->io_res;
apc->pci_controller.mem_resource = &apc->mem_res;
/*
* Do the full PCIE Root Complex Initialization Sequence if the PCIe
* host controller is in reset.
*/
if (ath79_reset_rr(AR724X_RESET_REG_RESET_MODULE) & AR724X_RESET_PCIE)
ar724x_pci_hw_init(apc);
apc->link_up = ar724x_pci_check_link(apc);
if (!apc->link_up)
dev_warn(&pdev->dev, "PCIe link is down\n");
ar724x_pci_irq_init(apc, id);
ar724x_pci_local_write(apc, PCI_COMMAND, 4, AR724X_PCI_CMD_INIT);
register_pci_controller(&apc->pci_controller);
return 0;
}
static struct platform_driver ar724x_pci_driver = {
.probe = ar724x_pci_probe,
.driver = {
.name = "ar724x-pci",
},
};
static int __init ar724x_pci_init(void)
{
return platform_driver_register(&ar724x_pci_driver);
}
postcore_initcall(ar724x_pci_init);
| linux-master | arch/mips/pci/pci-ar724x.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2016 Imagination Technologies
* Author: Paul Burton <[email protected]>
*
* pcibios_align_resource taken from arch/arm/kernel/bios32.c.
*/
#include <linux/pci.h>
/*
* We need to avoid collisions with `mirrored' VGA ports
* and other strange ISA hardware, so we always want the
* addresses to be allocated in the 0x000-0x0ff region
* modulo 0x400.
*
* Why? Because some silly external IO cards only decode
* the low 10 bits of the IO address. The 0x00-0xff region
* is reserved for motherboard devices that decode all 16
* bits, so it's ok to allocate at, say, 0x2800-0x28ff,
* but we want to try to avoid allocating at 0x2900-0x2bff
* which might have be mirrored at 0x0100-0x03ff..
*/
resource_size_t pcibios_align_resource(void *data, const struct resource *res,
resource_size_t size, resource_size_t align)
{
struct pci_dev *dev = data;
resource_size_t start = res->start;
struct pci_host_bridge *host_bridge;
if (res->flags & IORESOURCE_IO && start & 0x300)
start = (start + 0x3ff) & ~0x3ff;
start = (start + align - 1) & ~(align - 1);
host_bridge = pci_find_host_bridge(dev->bus);
if (host_bridge->align_resource)
return host_bridge->align_resource(dev, res,
start, size, align);
return start;
}
void pcibios_fixup_bus(struct pci_bus *bus)
{
pci_read_bridge_bases(bus);
}
#ifdef pci_remap_iospace
int pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr)
{
unsigned long vaddr;
if (res->start != 0) {
WARN_ONCE(1, "resource start address is not zero\n");
return -ENODEV;
}
vaddr = (unsigned long)ioremap(phys_addr, resource_size(res));
set_io_port_base(vaddr);
return 0;
}
#endif
| linux-master | arch/mips/pci/pci-generic.c |
/*
* Based on linux/arch/mips/txx9/rbtx4938/setup.c,
* and RBTX49xx patch from CELF patch archive.
*
* Copyright 2001, 2003-2005 MontaVista Software Inc.
* Copyright (C) 2004 by Ralf Baechle ([email protected])
* (C) Copyright TOSHIBA CORPORATION 2000-2001, 2004-2007
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <linux/init.h>
#include <linux/pci.h>
#include <linux/kernel.h>
#include <linux/interrupt.h>
#include <asm/txx9/generic.h>
#include <asm/txx9/tx4938.h>
int __init tx4938_report_pciclk(void)
{
int pciclk = 0;
pr_info("PCIC --%s PCICLK:",
(__raw_readq(&tx4938_ccfgptr->ccfg) & TX4938_CCFG_PCI66) ?
" PCI66" : "");
if (__raw_readq(&tx4938_ccfgptr->pcfg) & TX4938_PCFG_PCICLKEN_ALL) {
u64 ccfg = __raw_readq(&tx4938_ccfgptr->ccfg);
switch ((unsigned long)ccfg &
TX4938_CCFG_PCIDIVMODE_MASK) {
case TX4938_CCFG_PCIDIVMODE_4:
pciclk = txx9_cpu_clock / 4; break;
case TX4938_CCFG_PCIDIVMODE_4_5:
pciclk = txx9_cpu_clock * 2 / 9; break;
case TX4938_CCFG_PCIDIVMODE_5:
pciclk = txx9_cpu_clock / 5; break;
case TX4938_CCFG_PCIDIVMODE_5_5:
pciclk = txx9_cpu_clock * 2 / 11; break;
case TX4938_CCFG_PCIDIVMODE_8:
pciclk = txx9_cpu_clock / 8; break;
case TX4938_CCFG_PCIDIVMODE_9:
pciclk = txx9_cpu_clock / 9; break;
case TX4938_CCFG_PCIDIVMODE_10:
pciclk = txx9_cpu_clock / 10; break;
case TX4938_CCFG_PCIDIVMODE_11:
pciclk = txx9_cpu_clock / 11; break;
}
pr_cont("Internal(%u.%uMHz)",
(pciclk + 50000) / 1000000,
((pciclk + 50000) / 100000) % 10);
} else {
pr_cont("External");
pciclk = -1;
}
pr_cont("\n");
return pciclk;
}
void __init tx4938_report_pci1clk(void)
{
__u64 ccfg = __raw_readq(&tx4938_ccfgptr->ccfg);
unsigned int pciclk =
txx9_gbus_clock / ((ccfg & TX4938_CCFG_PCI1DMD) ? 4 : 2);
pr_info("PCIC1 -- %sPCICLK:%u.%uMHz\n",
(ccfg & TX4938_CCFG_PCI1_66) ? "PCI66 " : "",
(pciclk + 50000) / 1000000,
((pciclk + 50000) / 100000) % 10);
}
int __init tx4938_pciclk66_setup(void)
{
int pciclk;
/* Assert M66EN */
tx4938_ccfg_set(TX4938_CCFG_PCI66);
/* Double PCICLK (if possible) */
if (__raw_readq(&tx4938_ccfgptr->pcfg) & TX4938_PCFG_PCICLKEN_ALL) {
unsigned int pcidivmode = 0;
u64 ccfg = __raw_readq(&tx4938_ccfgptr->ccfg);
pcidivmode = (unsigned long)ccfg &
TX4938_CCFG_PCIDIVMODE_MASK;
switch (pcidivmode) {
case TX4938_CCFG_PCIDIVMODE_8:
case TX4938_CCFG_PCIDIVMODE_4:
pcidivmode = TX4938_CCFG_PCIDIVMODE_4;
pciclk = txx9_cpu_clock / 4;
break;
case TX4938_CCFG_PCIDIVMODE_9:
case TX4938_CCFG_PCIDIVMODE_4_5:
pcidivmode = TX4938_CCFG_PCIDIVMODE_4_5;
pciclk = txx9_cpu_clock * 2 / 9;
break;
case TX4938_CCFG_PCIDIVMODE_10:
case TX4938_CCFG_PCIDIVMODE_5:
pcidivmode = TX4938_CCFG_PCIDIVMODE_5;
pciclk = txx9_cpu_clock / 5;
break;
case TX4938_CCFG_PCIDIVMODE_11:
case TX4938_CCFG_PCIDIVMODE_5_5:
default:
pcidivmode = TX4938_CCFG_PCIDIVMODE_5_5;
pciclk = txx9_cpu_clock * 2 / 11;
break;
}
tx4938_ccfg_change(TX4938_CCFG_PCIDIVMODE_MASK,
pcidivmode);
pr_debug("PCICLK: ccfg:%08lx\n",
(unsigned long)__raw_readq(&tx4938_ccfgptr->ccfg));
} else
pciclk = -1;
return pciclk;
}
int tx4938_pcic1_map_irq(const struct pci_dev *dev, u8 slot)
{
if (get_tx4927_pcicptr(dev->bus->sysdata) == tx4938_pcic1ptr) {
switch (slot) {
case TX4927_PCIC_IDSEL_AD_TO_SLOT(31):
if (__raw_readq(&tx4938_ccfgptr->pcfg) &
TX4938_PCFG_ETH0_SEL)
return TXX9_IRQ_BASE + TX4938_IR_ETH0;
break;
case TX4927_PCIC_IDSEL_AD_TO_SLOT(30):
if (__raw_readq(&tx4938_ccfgptr->pcfg) &
TX4938_PCFG_ETH1_SEL)
return TXX9_IRQ_BASE + TX4938_IR_ETH1;
break;
}
return 0;
}
return -1;
}
void __init tx4938_setup_pcierr_irq(void)
{
if (request_irq(TXX9_IRQ_BASE + TX4938_IR_PCIERR,
tx4927_pcierr_interrupt,
0, "PCI error",
(void *)TX4927_PCIC_REG))
pr_warn("Failed to request irq for PCIERR\n");
}
| linux-master | arch/mips/pci/pci-tx4938.c |
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* SNI specific PCI support for RM200/RM300.
*
* Copyright (C) 1997 - 2000, 2003, 04 Ralf Baechle ([email protected])
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/pci.h>
#include <asm/mipsregs.h>
#include <asm/sni.h>
#include <irq.h>
/*
* PCIMT Shortcuts ...
*/
#define SCSI PCIMT_IRQ_SCSI
#define ETH PCIMT_IRQ_ETHERNET
#define INTA PCIMT_IRQ_INTA
#define INTB PCIMT_IRQ_INTB
#define INTC PCIMT_IRQ_INTC
#define INTD PCIMT_IRQ_INTD
/*
* Device 0: PCI EISA Bridge (directly routed)
* Device 1: NCR53c810 SCSI (directly routed)
* Device 2: PCnet32 Ethernet (directly routed)
* Device 3: VGA (routed to INTB)
* Device 4: Unused
* Device 5: Slot 2
* Device 6: Slot 3
* Device 7: Slot 4
*
* Documentation says the VGA is device 5 and device 3 is unused but that
* seem to be a documentation error. At least on my RM200C the Cirrus
* Logic CL-GD5434 VGA is device 3.
*/
static char irq_tab_rm200[8][5] = {
/* INTA INTB INTC INTD */
{ 0, 0, 0, 0, 0 }, /* EISA bridge */
{ SCSI, SCSI, SCSI, SCSI, SCSI }, /* SCSI */
{ ETH, ETH, ETH, ETH, ETH }, /* Ethernet */
{ INTB, INTB, INTB, INTB, INTB }, /* VGA */
{ 0, 0, 0, 0, 0 }, /* Unused */
{ 0, INTB, INTC, INTD, INTA }, /* Slot 2 */
{ 0, INTC, INTD, INTA, INTB }, /* Slot 3 */
{ 0, INTD, INTA, INTB, INTC }, /* Slot 4 */
};
/*
* In Revision D of the RM300 Device 2 has become a normal purpose Slot 1
*
* The VGA card is optional for RM300 systems.
*/
static char irq_tab_rm300d[8][5] = {
/* INTA INTB INTC INTD */
{ 0, 0, 0, 0, 0 }, /* EISA bridge */
{ SCSI, SCSI, SCSI, SCSI, SCSI }, /* SCSI */
{ 0, INTC, INTD, INTA, INTB }, /* Slot 1 */
{ INTB, INTB, INTB, INTB, INTB }, /* VGA */
{ 0, 0, 0, 0, 0 }, /* Unused */
{ 0, INTB, INTC, INTD, INTA }, /* Slot 2 */
{ 0, INTC, INTD, INTA, INTB }, /* Slot 3 */
{ 0, INTD, INTA, INTB, INTC }, /* Slot 4 */
};
static char irq_tab_rm300e[5][5] = {
/* INTA INTB INTC INTD */
{ 0, 0, 0, 0, 0 }, /* HOST bridge */
{ SCSI, SCSI, SCSI, SCSI, SCSI }, /* SCSI */
{ 0, INTC, INTD, INTA, INTB }, /* Bridge/i960 */
{ 0, INTD, INTA, INTB, INTC }, /* Slot 1 */
{ 0, INTA, INTB, INTC, INTD }, /* Slot 2 */
};
#undef SCSI
#undef ETH
#undef INTA
#undef INTB
#undef INTC
#undef INTD
/*
* PCIT Shortcuts ...
*/
#define SCSI0 PCIT_IRQ_SCSI0
#define SCSI1 PCIT_IRQ_SCSI1
#define ETH PCIT_IRQ_ETHERNET
#define INTA PCIT_IRQ_INTA
#define INTB PCIT_IRQ_INTB
#define INTC PCIT_IRQ_INTC
#define INTD PCIT_IRQ_INTD
static char irq_tab_pcit[13][5] = {
/* INTA INTB INTC INTD */
{ 0, 0, 0, 0, 0 }, /* HOST bridge */
{ SCSI0, SCSI0, SCSI0, SCSI0, SCSI0 }, /* SCSI */
{ SCSI1, SCSI1, SCSI1, SCSI1, SCSI1 }, /* SCSI */
{ ETH, ETH, ETH, ETH, ETH }, /* Ethernet */
{ 0, INTA, INTB, INTC, INTD }, /* PCI-PCI bridge */
{ 0, 0, 0, 0, 0 }, /* Unused */
{ 0, 0, 0, 0, 0 }, /* Unused */
{ 0, 0, 0, 0, 0 }, /* Unused */
{ 0, INTA, INTB, INTC, INTD }, /* Slot 1 */
{ 0, INTB, INTC, INTD, INTA }, /* Slot 2 */
{ 0, INTC, INTD, INTA, INTB }, /* Slot 3 */
{ 0, INTD, INTA, INTB, INTC }, /* Slot 4 */
{ 0, INTA, INTB, INTC, INTD }, /* Slot 5 */
};
static char irq_tab_pcit_cplus[13][5] = {
/* INTA INTB INTC INTD */
{ 0, 0, 0, 0, 0 }, /* HOST bridge */
{ 0, INTB, INTC, INTD, INTA }, /* PCI Slot 9 */
{ 0, 0, 0, 0, 0 }, /* PCI-EISA */
{ 0, 0, 0, 0, 0 }, /* Unused */
{ 0, INTA, INTB, INTC, INTD }, /* PCI-PCI bridge */
{ 0, INTB, INTC, INTD, INTA }, /* fixup */
};
static inline int is_rm300_revd(void)
{
unsigned char csmsr = *(volatile unsigned char *)PCIMT_CSMSR;
return (csmsr & 0xa0) == 0x20;
}
int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
switch (sni_brd_type) {
case SNI_BRD_PCI_TOWER_CPLUS:
if (slot == 4) {
/*
* SNI messed up interrupt wiring for onboard
* PCI bus 1; we need to fix this up here
*/
while (dev && dev->bus->number != 1)
dev = dev->bus->self;
if (dev && dev->devfn >= PCI_DEVFN(4, 0))
slot = 5;
}
return irq_tab_pcit_cplus[slot][pin];
case SNI_BRD_PCI_TOWER:
return irq_tab_pcit[slot][pin];
case SNI_BRD_PCI_MTOWER:
if (is_rm300_revd())
return irq_tab_rm300d[slot][pin];
fallthrough;
case SNI_BRD_PCI_DESKTOP:
return irq_tab_rm200[slot][pin];
case SNI_BRD_PCI_MTOWER_CPLUS:
return irq_tab_rm300e[slot][pin];
}
return 0;
}
/* Do platform specific device initialization at pci_enable_device() time */
int pcibios_plat_dev_init(struct pci_dev *dev)
{
return 0;
}
| linux-master | arch/mips/pci/fixup-sni.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Ralink RT288x SoC PCI register definitions
*
* Copyright (C) 2009 John Crispin <[email protected]>
* Copyright (C) 2009 Gabor Juhos <[email protected]>
*
* Parts of this file are based on Ralink's 2.6.21 BSP
*/
#include <linux/delay.h>
#include <linux/types.h>
#include <linux/pci.h>
#include <linux/io.h>
#include <linux/init.h>
#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include <asm/mach-ralink/rt288x.h>
#define RT2880_PCI_BASE 0x00440000
#define RT288X_CPU_IRQ_PCI 4
#define RT2880_PCI_MEM_BASE 0x20000000
#define RT2880_PCI_MEM_SIZE 0x10000000
#define RT2880_PCI_IO_BASE 0x00460000
#define RT2880_PCI_IO_SIZE 0x00010000
#define RT2880_PCI_REG_PCICFG_ADDR 0x00
#define RT2880_PCI_REG_PCIMSK_ADDR 0x0c
#define RT2880_PCI_REG_BAR0SETUP_ADDR 0x10
#define RT2880_PCI_REG_IMBASEBAR0_ADDR 0x18
#define RT2880_PCI_REG_CONFIG_ADDR 0x20
#define RT2880_PCI_REG_CONFIG_DATA 0x24
#define RT2880_PCI_REG_MEMBASE 0x28
#define RT2880_PCI_REG_IOBASE 0x2c
#define RT2880_PCI_REG_ID 0x30
#define RT2880_PCI_REG_CLASS 0x34
#define RT2880_PCI_REG_SUBID 0x38
#define RT2880_PCI_REG_ARBCTL 0x80
static void __iomem *rt2880_pci_base;
static u32 rt2880_pci_reg_read(u32 reg)
{
return readl(rt2880_pci_base + reg);
}
static void rt2880_pci_reg_write(u32 val, u32 reg)
{
writel(val, rt2880_pci_base + reg);
}
static inline u32 rt2880_pci_get_cfgaddr(unsigned int bus, unsigned int slot,
unsigned int func, unsigned int where)
{
return ((bus << 16) | (slot << 11) | (func << 8) | (where & 0xfc) |
0x80000000);
}
static int rt2880_pci_config_read(struct pci_bus *bus, unsigned int devfn,
int where, int size, u32 *val)
{
u32 address;
u32 data;
address = rt2880_pci_get_cfgaddr(bus->number, PCI_SLOT(devfn),
PCI_FUNC(devfn), where);
rt2880_pci_reg_write(address, RT2880_PCI_REG_CONFIG_ADDR);
data = rt2880_pci_reg_read(RT2880_PCI_REG_CONFIG_DATA);
switch (size) {
case 1:
*val = (data >> ((where & 3) << 3)) & 0xff;
break;
case 2:
*val = (data >> ((where & 3) << 3)) & 0xffff;
break;
case 4:
*val = data;
break;
}
return PCIBIOS_SUCCESSFUL;
}
static int rt2880_pci_config_write(struct pci_bus *bus, unsigned int devfn,
int where, int size, u32 val)
{
u32 address;
u32 data;
address = rt2880_pci_get_cfgaddr(bus->number, PCI_SLOT(devfn),
PCI_FUNC(devfn), where);
rt2880_pci_reg_write(address, RT2880_PCI_REG_CONFIG_ADDR);
data = rt2880_pci_reg_read(RT2880_PCI_REG_CONFIG_DATA);
switch (size) {
case 1:
data = (data & ~(0xff << ((where & 3) << 3))) |
(val << ((where & 3) << 3));
break;
case 2:
data = (data & ~(0xffff << ((where & 3) << 3))) |
(val << ((where & 3) << 3));
break;
case 4:
data = val;
break;
}
rt2880_pci_reg_write(data, RT2880_PCI_REG_CONFIG_DATA);
return PCIBIOS_SUCCESSFUL;
}
static struct pci_ops rt2880_pci_ops = {
.read = rt2880_pci_config_read,
.write = rt2880_pci_config_write,
};
static struct resource rt2880_pci_mem_resource = {
.name = "PCI MEM space",
.start = RT2880_PCI_MEM_BASE,
.end = RT2880_PCI_MEM_BASE + RT2880_PCI_MEM_SIZE - 1,
.flags = IORESOURCE_MEM,
};
static struct resource rt2880_pci_io_resource = {
.name = "PCI IO space",
.start = RT2880_PCI_IO_BASE,
.end = RT2880_PCI_IO_BASE + RT2880_PCI_IO_SIZE - 1,
.flags = IORESOURCE_IO,
};
static struct pci_controller rt2880_pci_controller = {
.pci_ops = &rt2880_pci_ops,
.mem_resource = &rt2880_pci_mem_resource,
.io_resource = &rt2880_pci_io_resource,
};
static inline u32 rt2880_pci_read_u32(unsigned long reg)
{
u32 address;
u32 ret;
address = rt2880_pci_get_cfgaddr(0, 0, 0, reg);
rt2880_pci_reg_write(address, RT2880_PCI_REG_CONFIG_ADDR);
ret = rt2880_pci_reg_read(RT2880_PCI_REG_CONFIG_DATA);
return ret;
}
static inline void rt2880_pci_write_u32(unsigned long reg, u32 val)
{
u32 address;
address = rt2880_pci_get_cfgaddr(0, 0, 0, reg);
rt2880_pci_reg_write(address, RT2880_PCI_REG_CONFIG_ADDR);
rt2880_pci_reg_write(val, RT2880_PCI_REG_CONFIG_DATA);
}
int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
int irq = -1;
if (dev->bus->number != 0)
return irq;
switch (PCI_SLOT(dev->devfn)) {
case 0x00:
break;
case 0x11:
irq = RT288X_CPU_IRQ_PCI;
break;
default:
pr_err("%s:%s[%d] trying to alloc unknown pci irq\n",
__FILE__, __func__, __LINE__);
BUG();
break;
}
return irq;
}
static int rt288x_pci_probe(struct platform_device *pdev)
{
void __iomem *io_map_base;
rt2880_pci_base = ioremap(RT2880_PCI_BASE, PAGE_SIZE);
io_map_base = ioremap(RT2880_PCI_IO_BASE, RT2880_PCI_IO_SIZE);
rt2880_pci_controller.io_map_base = (unsigned long) io_map_base;
set_io_port_base((unsigned long) io_map_base);
ioport_resource.start = RT2880_PCI_IO_BASE;
ioport_resource.end = RT2880_PCI_IO_BASE + RT2880_PCI_IO_SIZE - 1;
rt2880_pci_reg_write(0, RT2880_PCI_REG_PCICFG_ADDR);
udelay(1);
rt2880_pci_reg_write(0x79, RT2880_PCI_REG_ARBCTL);
rt2880_pci_reg_write(0x07FF0001, RT2880_PCI_REG_BAR0SETUP_ADDR);
rt2880_pci_reg_write(RT2880_PCI_MEM_BASE, RT2880_PCI_REG_MEMBASE);
rt2880_pci_reg_write(RT2880_PCI_IO_BASE, RT2880_PCI_REG_IOBASE);
rt2880_pci_reg_write(0x08000000, RT2880_PCI_REG_IMBASEBAR0_ADDR);
rt2880_pci_reg_write(0x08021814, RT2880_PCI_REG_ID);
rt2880_pci_reg_write(0x00800001, RT2880_PCI_REG_CLASS);
rt2880_pci_reg_write(0x28801814, RT2880_PCI_REG_SUBID);
rt2880_pci_reg_write(0x000c0000, RT2880_PCI_REG_PCIMSK_ADDR);
rt2880_pci_write_u32(PCI_BASE_ADDRESS_0, 0x08000000);
(void) rt2880_pci_read_u32(PCI_BASE_ADDRESS_0);
rt2880_pci_controller.of_node = pdev->dev.of_node;
register_pci_controller(&rt2880_pci_controller);
return 0;
}
int pcibios_plat_dev_init(struct pci_dev *dev)
{
static bool slot0_init;
/*
* Nobody seems to initialize slot 0, but this platform requires it, so
* do it once when some other slot is being enabled. The PCI subsystem
* should configure other slots properly, so no need to do anything
* special for those.
*/
if (!slot0_init && dev->bus->number == 0) {
u16 cmd;
u32 bar0;
slot0_init = true;
pci_bus_write_config_dword(dev->bus, 0, PCI_BASE_ADDRESS_0,
0x08000000);
pci_bus_read_config_dword(dev->bus, 0, PCI_BASE_ADDRESS_0,
&bar0);
pci_bus_read_config_word(dev->bus, 0, PCI_COMMAND, &cmd);
cmd |= PCI_COMMAND_MASTER | PCI_COMMAND_IO | PCI_COMMAND_MEMORY;
pci_bus_write_config_word(dev->bus, 0, PCI_COMMAND, cmd);
}
return 0;
}
static const struct of_device_id rt288x_pci_match[] = {
{ .compatible = "ralink,rt288x-pci" },
{},
};
static struct platform_driver rt288x_pci_driver = {
.probe = rt288x_pci_probe,
.driver = {
.name = "rt288x-pci",
.of_match_table = rt288x_pci_match,
},
};
int __init pcibios_init(void)
{
int ret = platform_driver_register(&rt288x_pci_driver);
if (ret)
pr_info("rt288x-pci: Error registering platform driver!");
return ret;
}
arch_initcall(pcibios_init);
| linux-master | arch/mips/pci/pci-rt2880.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
*
* Copyright (C) 2012 John Crispin <[email protected]>
*/
#include <linux/of_irq.h>
#include <linux/of_pci.h>
int (*ltq_pci_plat_arch_init)(struct pci_dev *dev) = NULL;
int (*ltq_pci_plat_dev_init)(struct pci_dev *dev) = NULL;
int pcibios_plat_dev_init(struct pci_dev *dev)
{
if (ltq_pci_plat_arch_init)
return ltq_pci_plat_arch_init(dev);
if (ltq_pci_plat_dev_init)
return ltq_pci_plat_dev_init(dev);
return 0;
}
int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
return of_irq_parse_and_map_pci(dev, slot, pin);
}
| linux-master | arch/mips/pci/fixup-lantiq.c |
/*
* BRIEF MODULE DESCRIPTION
* pci_ops for IDT EB434 board
*
* Copyright 2004 IDT Inc. ([email protected])
* Copyright 2006 Felix Fietkau <[email protected]>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
* NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
* USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
* ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/pci.h>
#include <linux/types.h>
#include <asm/cpu.h>
#include <asm/mach-rc32434/rc32434.h>
#include <asm/mach-rc32434/pci.h>
#define PCI_ACCESS_READ 0
#define PCI_ACCESS_WRITE 1
#define PCI_CFG_SET(bus, slot, func, off) \
(rc32434_pci->pcicfga = (0x80000000 | \
((bus) << 16) | ((slot)<<11) | \
((func)<<8) | (off)))
static inline int config_access(unsigned char access_type,
struct pci_bus *bus, unsigned int devfn,
unsigned char where, u32 *data)
{
unsigned int slot = PCI_SLOT(devfn);
u8 func = PCI_FUNC(devfn);
/* Setup address */
PCI_CFG_SET(bus->number, slot, func, where);
rc32434_sync();
if (access_type == PCI_ACCESS_WRITE)
rc32434_pci->pcicfgd = *data;
else
*data = rc32434_pci->pcicfgd;
rc32434_sync();
return 0;
}
/*
* We can't address 8 and 16 bit words directly. Instead we have to
* read/write a 32bit word and mask/modify the data we actually want.
*/
static int read_config_byte(struct pci_bus *bus, unsigned int devfn,
int where, u8 *val)
{
u32 data;
int ret;
ret = config_access(PCI_ACCESS_READ, bus, devfn, where, &data);
*val = (data >> ((where & 3) << 3)) & 0xff;
return ret;
}
static int read_config_word(struct pci_bus *bus, unsigned int devfn,
int where, u16 *val)
{
u32 data;
int ret;
ret = config_access(PCI_ACCESS_READ, bus, devfn, where, &data);
*val = (data >> ((where & 3) << 3)) & 0xffff;
return ret;
}
static int read_config_dword(struct pci_bus *bus, unsigned int devfn,
int where, u32 *val)
{
int ret;
int delay = 1;
/*
* Don't scan too far, else there will be errors with plugged in
* daughterboard (rb564).
*/
if (bus->number == 0 && PCI_SLOT(devfn) > 21)
return 0;
retry:
ret = config_access(PCI_ACCESS_READ, bus, devfn, where, val);
/*
* Certain devices react delayed at device scan time, this
* gives them time to settle
*/
if (where == PCI_VENDOR_ID) {
if (ret == 0xffffffff || ret == 0x00000000 ||
ret == 0x0000ffff || ret == 0xffff0000) {
if (delay > 4)
return 0;
delay *= 2;
msleep(delay);
goto retry;
}
}
return ret;
}
static int
write_config_byte(struct pci_bus *bus, unsigned int devfn, int where,
u8 val)
{
u32 data = 0;
if (config_access(PCI_ACCESS_READ, bus, devfn, where, &data))
return -1;
data = (data & ~(0xff << ((where & 3) << 3))) |
(val << ((where & 3) << 3));
if (config_access(PCI_ACCESS_WRITE, bus, devfn, where, &data))
return -1;
return PCIBIOS_SUCCESSFUL;
}
static int
write_config_word(struct pci_bus *bus, unsigned int devfn, int where,
u16 val)
{
u32 data = 0;
if (config_access(PCI_ACCESS_READ, bus, devfn, where, &data))
return -1;
data = (data & ~(0xffff << ((where & 3) << 3))) |
(val << ((where & 3) << 3));
if (config_access(PCI_ACCESS_WRITE, bus, devfn, where, &data))
return -1;
return PCIBIOS_SUCCESSFUL;
}
static int
write_config_dword(struct pci_bus *bus, unsigned int devfn, int where,
u32 val)
{
if (config_access(PCI_ACCESS_WRITE, bus, devfn, where, &val))
return -1;
return PCIBIOS_SUCCESSFUL;
}
static int pci_config_read(struct pci_bus *bus, unsigned int devfn,
int where, int size, u32 *val)
{
switch (size) {
case 1:
return read_config_byte(bus, devfn, where, (u8 *) val);
case 2:
return read_config_word(bus, devfn, where, (u16 *) val);
default:
return read_config_dword(bus, devfn, where, val);
}
}
static int pci_config_write(struct pci_bus *bus, unsigned int devfn,
int where, int size, u32 val)
{
switch (size) {
case 1:
return write_config_byte(bus, devfn, where, (u8) val);
case 2:
return write_config_word(bus, devfn, where, (u16) val);
default:
return write_config_dword(bus, devfn, where, val);
}
}
struct pci_ops rc32434_pci_ops = {
.read = pci_config_read,
.write = pci_config_write,
};
| linux-master | arch/mips/pci/ops-rc32434.c |
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2008 Maxime Bizon <[email protected]>
*/
#include <linux/types.h>
#include <linux/pci.h>
#include <linux/kernel.h>
#include <linux/delay.h>
#include <linux/io.h>
#include "pci-bcm63xx.h"
/*
* swizzle 32bits data to return only the needed part
*/
static int postprocess_read(u32 data, int where, unsigned int size)
{
u32 ret;
ret = 0;
switch (size) {
case 1:
ret = (data >> ((where & 3) << 3)) & 0xff;
break;
case 2:
ret = (data >> ((where & 3) << 3)) & 0xffff;
break;
case 4:
ret = data;
break;
}
return ret;
}
static int preprocess_write(u32 orig_data, u32 val, int where,
unsigned int size)
{
u32 ret;
ret = 0;
switch (size) {
case 1:
ret = (orig_data & ~(0xff << ((where & 3) << 3))) |
(val << ((where & 3) << 3));
break;
case 2:
ret = (orig_data & ~(0xffff << ((where & 3) << 3))) |
(val << ((where & 3) << 3));
break;
case 4:
ret = val;
break;
}
return ret;
}
/*
* setup hardware for a configuration cycle with given parameters
*/
static int bcm63xx_setup_cfg_access(int type, unsigned int busn,
unsigned int devfn, int where)
{
unsigned int slot, func, reg;
u32 val;
slot = PCI_SLOT(devfn);
func = PCI_FUNC(devfn);
reg = where >> 2;
/* sanity check */
if (slot > (MPI_L2PCFG_DEVNUM_MASK >> MPI_L2PCFG_DEVNUM_SHIFT))
return 1;
if (func > (MPI_L2PCFG_FUNC_MASK >> MPI_L2PCFG_FUNC_SHIFT))
return 1;
if (reg > (MPI_L2PCFG_REG_MASK >> MPI_L2PCFG_REG_SHIFT))
return 1;
/* ok, setup config access */
val = (reg << MPI_L2PCFG_REG_SHIFT);
val |= (func << MPI_L2PCFG_FUNC_SHIFT);
val |= (slot << MPI_L2PCFG_DEVNUM_SHIFT);
val |= MPI_L2PCFG_CFG_USEREG_MASK;
val |= MPI_L2PCFG_CFG_SEL_MASK;
/* type 0 cycle for local bus, type 1 cycle for anything else */
if (type != 0) {
/* FIXME: how to specify bus ??? */
val |= (1 << MPI_L2PCFG_CFG_TYPE_SHIFT);
}
bcm_mpi_writel(val, MPI_L2PCFG_REG);
return 0;
}
static int bcm63xx_do_cfg_read(int type, unsigned int busn,
unsigned int devfn, int where, int size,
u32 *val)
{
u32 data;
/* two phase cycle, first we write address, then read data at
* another location, caller already has a spinlock so no need
* to add one here */
if (bcm63xx_setup_cfg_access(type, busn, devfn, where))
return PCIBIOS_DEVICE_NOT_FOUND;
iob();
data = le32_to_cpu(__raw_readl(pci_iospace_start));
/* restore IO space normal behaviour */
bcm_mpi_writel(0, MPI_L2PCFG_REG);
*val = postprocess_read(data, where, size);
return PCIBIOS_SUCCESSFUL;
}
static int bcm63xx_do_cfg_write(int type, unsigned int busn,
unsigned int devfn, int where, int size,
u32 val)
{
u32 data;
/* two phase cycle, first we write address, then write data to
* another location, caller already has a spinlock so no need
* to add one here */
if (bcm63xx_setup_cfg_access(type, busn, devfn, where))
return PCIBIOS_DEVICE_NOT_FOUND;
iob();
data = le32_to_cpu(__raw_readl(pci_iospace_start));
data = preprocess_write(data, val, where, size);
__raw_writel(cpu_to_le32(data), pci_iospace_start);
wmb();
/* no way to know the access is done, we have to wait */
udelay(500);
/* restore IO space normal behaviour */
bcm_mpi_writel(0, MPI_L2PCFG_REG);
return PCIBIOS_SUCCESSFUL;
}
static int bcm63xx_pci_read(struct pci_bus *bus, unsigned int devfn,
int where, int size, u32 *val)
{
int type;
type = bus->parent ? 1 : 0;
if (type == 0 && PCI_SLOT(devfn) == CARDBUS_PCI_IDSEL)
return PCIBIOS_DEVICE_NOT_FOUND;
return bcm63xx_do_cfg_read(type, bus->number, devfn,
where, size, val);
}
static int bcm63xx_pci_write(struct pci_bus *bus, unsigned int devfn,
int where, int size, u32 val)
{
int type;
type = bus->parent ? 1 : 0;
if (type == 0 && PCI_SLOT(devfn) == CARDBUS_PCI_IDSEL)
return PCIBIOS_DEVICE_NOT_FOUND;
return bcm63xx_do_cfg_write(type, bus->number, devfn,
where, size, val);
}
struct pci_ops bcm63xx_pci_ops = {
.read = bcm63xx_pci_read,
.write = bcm63xx_pci_write
};
#ifdef CONFIG_CARDBUS
/*
* emulate configuration read access on a cardbus bridge
*/
#define FAKE_CB_BRIDGE_SLOT 0x1e
static int fake_cb_bridge_bus_number = -1;
static struct {
u16 pci_command;
u8 cb_latency;
u8 subordinate_busn;
u8 cardbus_busn;
u8 pci_busn;
int bus_assigned;
u16 bridge_control;
u32 mem_base0;
u32 mem_limit0;
u32 mem_base1;
u32 mem_limit1;
u32 io_base0;
u32 io_limit0;
u32 io_base1;
u32 io_limit1;
} fake_cb_bridge_regs;
static int fake_cb_bridge_read(int where, int size, u32 *val)
{
unsigned int reg;
u32 data;
data = 0;
reg = where >> 2;
switch (reg) {
case (PCI_VENDOR_ID >> 2):
case (PCI_CB_SUBSYSTEM_VENDOR_ID >> 2):
/* create dummy vendor/device id from our cpu id */
data = (bcm63xx_get_cpu_id() << 16) | PCI_VENDOR_ID_BROADCOM;
break;
case (PCI_COMMAND >> 2):
data = (PCI_STATUS_DEVSEL_SLOW << 16);
data |= fake_cb_bridge_regs.pci_command;
break;
case (PCI_CLASS_REVISION >> 2):
data = (PCI_CLASS_BRIDGE_CARDBUS << 16);
break;
case (PCI_CACHE_LINE_SIZE >> 2):
data = (PCI_HEADER_TYPE_CARDBUS << 16);
break;
case (PCI_INTERRUPT_LINE >> 2):
/* bridge control */
data = (fake_cb_bridge_regs.bridge_control << 16);
/* pin:intA line:0xff */
data |= (0x1 << 8) | 0xff;
break;
case (PCI_CB_PRIMARY_BUS >> 2):
data = (fake_cb_bridge_regs.cb_latency << 24);
data |= (fake_cb_bridge_regs.subordinate_busn << 16);
data |= (fake_cb_bridge_regs.cardbus_busn << 8);
data |= fake_cb_bridge_regs.pci_busn;
break;
case (PCI_CB_MEMORY_BASE_0 >> 2):
data = fake_cb_bridge_regs.mem_base0;
break;
case (PCI_CB_MEMORY_LIMIT_0 >> 2):
data = fake_cb_bridge_regs.mem_limit0;
break;
case (PCI_CB_MEMORY_BASE_1 >> 2):
data = fake_cb_bridge_regs.mem_base1;
break;
case (PCI_CB_MEMORY_LIMIT_1 >> 2):
data = fake_cb_bridge_regs.mem_limit1;
break;
case (PCI_CB_IO_BASE_0 >> 2):
/* | 1 for 32bits io support */
data = fake_cb_bridge_regs.io_base0 | 0x1;
break;
case (PCI_CB_IO_LIMIT_0 >> 2):
data = fake_cb_bridge_regs.io_limit0;
break;
case (PCI_CB_IO_BASE_1 >> 2):
/* | 1 for 32bits io support */
data = fake_cb_bridge_regs.io_base1 | 0x1;
break;
case (PCI_CB_IO_LIMIT_1 >> 2):
data = fake_cb_bridge_regs.io_limit1;
break;
}
*val = postprocess_read(data, where, size);
return PCIBIOS_SUCCESSFUL;
}
/*
* emulate configuration write access on a cardbus bridge
*/
static int fake_cb_bridge_write(int where, int size, u32 val)
{
unsigned int reg;
u32 data, tmp;
int ret;
ret = fake_cb_bridge_read((where & ~0x3), 4, &data);
if (ret != PCIBIOS_SUCCESSFUL)
return ret;
data = preprocess_write(data, val, where, size);
reg = where >> 2;
switch (reg) {
case (PCI_COMMAND >> 2):
fake_cb_bridge_regs.pci_command = (data & 0xffff);
break;
case (PCI_CB_PRIMARY_BUS >> 2):
fake_cb_bridge_regs.cb_latency = (data >> 24) & 0xff;
fake_cb_bridge_regs.subordinate_busn = (data >> 16) & 0xff;
fake_cb_bridge_regs.cardbus_busn = (data >> 8) & 0xff;
fake_cb_bridge_regs.pci_busn = data & 0xff;
if (fake_cb_bridge_regs.cardbus_busn)
fake_cb_bridge_regs.bus_assigned = 1;
break;
case (PCI_INTERRUPT_LINE >> 2):
tmp = (data >> 16) & 0xffff;
/* disable memory prefetch support */
tmp &= ~PCI_CB_BRIDGE_CTL_PREFETCH_MEM0;
tmp &= ~PCI_CB_BRIDGE_CTL_PREFETCH_MEM1;
fake_cb_bridge_regs.bridge_control = tmp;
break;
case (PCI_CB_MEMORY_BASE_0 >> 2):
fake_cb_bridge_regs.mem_base0 = data;
break;
case (PCI_CB_MEMORY_LIMIT_0 >> 2):
fake_cb_bridge_regs.mem_limit0 = data;
break;
case (PCI_CB_MEMORY_BASE_1 >> 2):
fake_cb_bridge_regs.mem_base1 = data;
break;
case (PCI_CB_MEMORY_LIMIT_1 >> 2):
fake_cb_bridge_regs.mem_limit1 = data;
break;
case (PCI_CB_IO_BASE_0 >> 2):
fake_cb_bridge_regs.io_base0 = data;
break;
case (PCI_CB_IO_LIMIT_0 >> 2):
fake_cb_bridge_regs.io_limit0 = data;
break;
case (PCI_CB_IO_BASE_1 >> 2):
fake_cb_bridge_regs.io_base1 = data;
break;
case (PCI_CB_IO_LIMIT_1 >> 2):
fake_cb_bridge_regs.io_limit1 = data;
break;
}
return PCIBIOS_SUCCESSFUL;
}
static int bcm63xx_cb_read(struct pci_bus *bus, unsigned int devfn,
int where, int size, u32 *val)
{
/* snoop access to slot 0x1e on root bus, we fake a cardbus
* bridge at this location */
if (!bus->parent && PCI_SLOT(devfn) == FAKE_CB_BRIDGE_SLOT) {
fake_cb_bridge_bus_number = bus->number;
return fake_cb_bridge_read(where, size, val);
}
/* a configuration cycle for the device behind the cardbus
* bridge is actually done as a type 0 cycle on the primary
* bus. This means that only one device can be on the cardbus
* bus */
if (fake_cb_bridge_regs.bus_assigned &&
bus->number == fake_cb_bridge_regs.cardbus_busn &&
PCI_SLOT(devfn) == 0)
return bcm63xx_do_cfg_read(0, 0,
PCI_DEVFN(CARDBUS_PCI_IDSEL, 0),
where, size, val);
return PCIBIOS_DEVICE_NOT_FOUND;
}
static int bcm63xx_cb_write(struct pci_bus *bus, unsigned int devfn,
int where, int size, u32 val)
{
if (!bus->parent && PCI_SLOT(devfn) == FAKE_CB_BRIDGE_SLOT) {
fake_cb_bridge_bus_number = bus->number;
return fake_cb_bridge_write(where, size, val);
}
if (fake_cb_bridge_regs.bus_assigned &&
bus->number == fake_cb_bridge_regs.cardbus_busn &&
PCI_SLOT(devfn) == 0)
return bcm63xx_do_cfg_write(0, 0,
PCI_DEVFN(CARDBUS_PCI_IDSEL, 0),
where, size, val);
return PCIBIOS_DEVICE_NOT_FOUND;
}
struct pci_ops bcm63xx_cb_ops = {
.read = bcm63xx_cb_read,
.write = bcm63xx_cb_write,
};
/*
* only one IO window, so it cannot be shared by PCI and cardbus, use
* fixup to choose and detect unhandled configuration
*/
static void bcm63xx_fixup(struct pci_dev *dev)
{
static int io_window = -1;
int found, new_io_window;
struct resource *r;
u32 val;
/* look for any io resource */
found = 0;
pci_dev_for_each_resource(dev, r) {
if (resource_type(r) == IORESOURCE_IO) {
found = 1;
break;
}
}
if (!found)
return;
/* skip our fake bus with only cardbus bridge on it */
if (dev->bus->number == fake_cb_bridge_bus_number)
return;
/* find on which bus the device is */
if (fake_cb_bridge_regs.bus_assigned &&
dev->bus->number == fake_cb_bridge_regs.cardbus_busn &&
PCI_SLOT(dev->devfn) == 0)
new_io_window = 1;
else
new_io_window = 0;
if (new_io_window == io_window)
return;
if (io_window != -1) {
printk(KERN_ERR "bcm63xx: both PCI and cardbus devices "
"need IO, which hardware cannot do\n");
return;
}
printk(KERN_INFO "bcm63xx: PCI IO window assigned to %s\n",
(new_io_window == 0) ? "PCI" : "cardbus");
val = bcm_mpi_readl(MPI_L2PIOREMAP_REG);
if (io_window)
val |= MPI_L2PREMAP_IS_CARDBUS_MASK;
else
val &= ~MPI_L2PREMAP_IS_CARDBUS_MASK;
bcm_mpi_writel(val, MPI_L2PIOREMAP_REG);
io_window = new_io_window;
}
DECLARE_PCI_FIXUP_ENABLE(PCI_ANY_ID, PCI_ANY_ID, bcm63xx_fixup);
#endif
static int bcm63xx_pcie_can_access(struct pci_bus *bus, int devfn)
{
switch (bus->number) {
case PCIE_BUS_BRIDGE:
return PCI_SLOT(devfn) == 0;
case PCIE_BUS_DEVICE:
if (PCI_SLOT(devfn) == 0)
return bcm_pcie_readl(PCIE_DLSTATUS_REG)
& DLSTATUS_PHYLINKUP;
fallthrough;
default:
return false;
}
}
static int bcm63xx_pcie_read(struct pci_bus *bus, unsigned int devfn,
int where, int size, u32 *val)
{
u32 data;
u32 reg = where & ~3;
if (!bcm63xx_pcie_can_access(bus, devfn))
return PCIBIOS_DEVICE_NOT_FOUND;
if (bus->number == PCIE_BUS_DEVICE)
reg += PCIE_DEVICE_OFFSET;
data = bcm_pcie_readl(reg);
*val = postprocess_read(data, where, size);
return PCIBIOS_SUCCESSFUL;
}
static int bcm63xx_pcie_write(struct pci_bus *bus, unsigned int devfn,
int where, int size, u32 val)
{
u32 data;
u32 reg = where & ~3;
if (!bcm63xx_pcie_can_access(bus, devfn))
return PCIBIOS_DEVICE_NOT_FOUND;
if (bus->number == PCIE_BUS_DEVICE)
reg += PCIE_DEVICE_OFFSET;
data = bcm_pcie_readl(reg);
data = preprocess_write(data, val, where, size);
bcm_pcie_writel(data, reg);
return PCIBIOS_SUCCESSFUL;
}
struct pci_ops bcm63xx_pcie_ops = {
.read = bcm63xx_pcie_read,
.write = bcm63xx_pcie_write
};
| linux-master | arch/mips/pci/ops-bcm63xx.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2004 ICT CAS
* Author: Li xiaoyu, ICT CAS
* [email protected]
*
* Copyright (C) 2007 Lemote, Inc. & Institute of Computing Technology
* Author: Fuxin Zhang, [email protected]
*/
#include <linux/init.h>
#include <linux/pci.h>
#include <loongson.h>
/* South bridge slot number is set by the pci probe process */
static u8 sb_slot = 5;
int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
int irq = 0;
if (slot == sb_slot) {
switch (PCI_FUNC(dev->devfn)) {
case 2:
irq = 10;
break;
case 3:
irq = 11;
break;
case 5:
irq = 9;
break;
}
} else {
irq = LOONGSON_IRQ_BASE + 25 + pin;
}
return irq;
}
/* Do platform specific device initialization at pci_enable_device() time */
int pcibios_plat_dev_init(struct pci_dev *dev)
{
return 0;
}
static void loongson2e_nec_fixup(struct pci_dev *pdev)
{
unsigned int val;
/* Configures port 1, 2, 3, 4 to be validate*/
pci_read_config_dword(pdev, 0xe0, &val);
pci_write_config_dword(pdev, 0xe0, (val & ~7) | 0x4);
/* System clock is 48-MHz Oscillator. */
pci_write_config_dword(pdev, 0xe4, 1 << 5);
}
static void loongson2e_686b_func0_fixup(struct pci_dev *pdev)
{
unsigned char c;
sb_slot = PCI_SLOT(pdev->devfn);
printk(KERN_INFO "via686b fix: ISA bridge\n");
/* Enable I/O Recovery time */
pci_write_config_byte(pdev, 0x40, 0x08);
/* Enable ISA refresh */
pci_write_config_byte(pdev, 0x41, 0x01);
/* disable ISA line buffer */
pci_write_config_byte(pdev, 0x45, 0x00);
/* Gate INTR, and flush line buffer */
pci_write_config_byte(pdev, 0x46, 0xe0);
/* Disable PCI Delay Transaction, Enable EISA ports 4D0/4D1. */
/* pci_write_config_byte(pdev, 0x47, 0x20); */
/*
* enable PCI Delay Transaction, Enable EISA ports 4D0/4D1.
* enable time-out timer
*/
pci_write_config_byte(pdev, 0x47, 0xe6);
/*
* enable level trigger on pci irqs: 9,10,11,13
* important! without this PCI interrupts won't work
*/
outb(0x2e, 0x4d1);
/* 512 K PCI Decode */
pci_write_config_byte(pdev, 0x48, 0x01);
/* Wait for PGNT before grant to ISA Master/DMA */
pci_write_config_byte(pdev, 0x4a, 0x84);
/*
* Plug'n'Play
*
* Parallel DRQ 3, Floppy DRQ 2 (default)
*/
pci_write_config_byte(pdev, 0x50, 0x0e);
/*
* IRQ Routing for Floppy and Parallel port
*
* IRQ 6 for floppy, IRQ 7 for parallel port
*/
pci_write_config_byte(pdev, 0x51, 0x76);
/* IRQ Routing for serial ports (take IRQ 3 and 4) */
pci_write_config_byte(pdev, 0x52, 0x34);
/* All IRQ's level triggered. */
pci_write_config_byte(pdev, 0x54, 0x00);
/* route PIRQA-D irq */
pci_write_config_byte(pdev, 0x55, 0x90); /* bit 7-4, PIRQA */
pci_write_config_byte(pdev, 0x56, 0xba); /* bit 7-4, PIRQC; */
/* 3-0, PIRQB */
pci_write_config_byte(pdev, 0x57, 0xd0); /* bit 7-4, PIRQD */
/* enable function 5/6, audio/modem */
pci_read_config_byte(pdev, 0x85, &c);
c &= ~(0x3 << 2);
pci_write_config_byte(pdev, 0x85, c);
printk(KERN_INFO"via686b fix: ISA bridge done\n");
}
static void loongson2e_686b_func1_fixup(struct pci_dev *pdev)
{
printk(KERN_INFO"via686b fix: IDE\n");
/* Modify IDE controller setup */
pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 48);
pci_write_config_byte(pdev, PCI_COMMAND,
PCI_COMMAND_IO | PCI_COMMAND_MEMORY |
PCI_COMMAND_MASTER);
pci_write_config_byte(pdev, 0x40, 0x0b);
/* legacy mode */
pci_write_config_byte(pdev, 0x42, 0x09);
#if 1/* play safe, otherwise we may see notebook's usb keyboard lockup */
/* disable read prefetch/write post buffers */
pci_write_config_byte(pdev, 0x41, 0x02);
/* use 3/4 as fifo thresh hold */
pci_write_config_byte(pdev, 0x43, 0x0a);
pci_write_config_byte(pdev, 0x44, 0x00);
pci_write_config_byte(pdev, 0x45, 0x00);
#else
pci_write_config_byte(pdev, 0x41, 0xc2);
pci_write_config_byte(pdev, 0x43, 0x35);
pci_write_config_byte(pdev, 0x44, 0x1c);
pci_write_config_byte(pdev, 0x45, 0x10);
#endif
printk(KERN_INFO"via686b fix: IDE done\n");
}
static void loongson2e_686b_func2_fixup(struct pci_dev *pdev)
{
/* irq routing */
pci_write_config_byte(pdev, PCI_INTERRUPT_LINE, 10);
}
static void loongson2e_686b_func3_fixup(struct pci_dev *pdev)
{
/* irq routing */
pci_write_config_byte(pdev, PCI_INTERRUPT_LINE, 11);
}
static void loongson2e_686b_func5_fixup(struct pci_dev *pdev)
{
unsigned int val;
unsigned char c;
/* enable IO */
pci_write_config_byte(pdev, PCI_COMMAND,
PCI_COMMAND_IO | PCI_COMMAND_MEMORY |
PCI_COMMAND_MASTER);
pci_read_config_dword(pdev, 0x4, &val);
pci_write_config_dword(pdev, 0x4, val | 1);
/* route ac97 IRQ */
pci_write_config_byte(pdev, 0x3c, 9);
pci_read_config_byte(pdev, 0x8, &c);
/* link control: enable link & SGD PCM output */
pci_write_config_byte(pdev, 0x41, 0xcc);
/* disable game port, FM, midi, sb, enable write to reg2c-2f */
pci_write_config_byte(pdev, 0x42, 0x20);
/* we are using Avance logic codec */
pci_write_config_word(pdev, 0x2c, 0x1005);
pci_write_config_word(pdev, 0x2e, 0x4710);
pci_read_config_dword(pdev, 0x2c, &val);
pci_write_config_byte(pdev, 0x42, 0x0);
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686,
loongson2e_686b_func0_fixup);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C586_1,
loongson2e_686b_func1_fixup);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C586_2,
loongson2e_686b_func2_fixup);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C586_3,
loongson2e_686b_func3_fixup);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686_5,
loongson2e_686b_func5_fixup);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_USB,
loongson2e_nec_fixup);
| linux-master | arch/mips/pci/fixup-fuloong2e.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 1999, 2000, 2004, 2005 MIPS Technologies, Inc.
* All rights reserved.
* Authors: Carsten Langgaard <[email protected]>
* Maciej W. Rozycki <[email protected]>
*
* Copyright (C) 2004 by Ralf Baechle ([email protected])
*
* MIPS boards specific PCI support.
*/
#include <linux/types.h>
#include <linux/pci.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <asm/gt64120.h>
#include <asm/mips-cps.h>
#include <asm/mips-boards/generic.h>
#include <asm/mips-boards/bonito64.h>
#include <asm/mips-boards/msc01_pci.h>
static struct resource bonito64_mem_resource = {
.name = "Bonito PCI MEM",
.flags = IORESOURCE_MEM,
};
static struct resource bonito64_io_resource = {
.name = "Bonito PCI I/O",
.start = 0x00000000UL,
.end = 0x000fffffUL,
.flags = IORESOURCE_IO,
};
static struct resource gt64120_mem_resource = {
.name = "GT-64120 PCI MEM",
.flags = IORESOURCE_MEM,
};
static struct resource gt64120_io_resource = {
.name = "GT-64120 PCI I/O",
.flags = IORESOURCE_IO,
};
static struct resource msc_mem_resource = {
.name = "MSC PCI MEM",
.flags = IORESOURCE_MEM,
};
static struct resource msc_io_resource = {
.name = "MSC PCI I/O",
.flags = IORESOURCE_IO,
};
extern struct pci_ops bonito64_pci_ops;
extern struct pci_ops gt64xxx_pci0_ops;
extern struct pci_ops msc_pci_ops;
static struct pci_controller bonito64_controller = {
.pci_ops = &bonito64_pci_ops,
.io_resource = &bonito64_io_resource,
.mem_resource = &bonito64_mem_resource,
.io_offset = 0x00000000UL,
};
static struct pci_controller gt64120_controller = {
.pci_ops = >64xxx_pci0_ops,
.io_resource = >64120_io_resource,
.mem_resource = >64120_mem_resource,
};
static struct pci_controller msc_controller = {
.pci_ops = &msc_pci_ops,
.io_resource = &msc_io_resource,
.mem_resource = &msc_mem_resource,
};
void __init mips_pcibios_init(void)
{
struct pci_controller *controller;
resource_size_t start, end, map, start1, end1, map1, map2, map3, mask;
switch (mips_revision_sconid) {
case MIPS_REVISION_SCON_GT64120:
/*
* Due to a bug in the Galileo system controller, we need
* to setup the PCI BAR for the Galileo internal registers.
* This should be done in the bios/bootprom and will be
* fixed in a later revision of YAMON (the MIPS boards
* boot prom).
*/
GT_WRITE(GT_PCI0_CFGADDR_OFS,
(0 << GT_PCI0_CFGADDR_BUSNUM_SHF) | /* Local bus */
(0 << GT_PCI0_CFGADDR_DEVNUM_SHF) | /* GT64120 dev */
(0 << GT_PCI0_CFGADDR_FUNCTNUM_SHF) | /* Function 0*/
((0x20/4) << GT_PCI0_CFGADDR_REGNUM_SHF) | /* BAR 4*/
GT_PCI0_CFGADDR_CONFIGEN_BIT);
/* Perform the write */
GT_WRITE(GT_PCI0_CFGDATA_OFS, CPHYSADDR(MIPS_GT_BASE));
/* Set up resource ranges from the controller's registers. */
start = GT_READ(GT_PCI0M0LD_OFS);
end = GT_READ(GT_PCI0M0HD_OFS);
map = GT_READ(GT_PCI0M0REMAP_OFS);
end = (end & GT_PCI_HD_MSK) | (start & ~GT_PCI_HD_MSK);
start1 = GT_READ(GT_PCI0M1LD_OFS);
end1 = GT_READ(GT_PCI0M1HD_OFS);
map1 = GT_READ(GT_PCI0M1REMAP_OFS);
end1 = (end1 & GT_PCI_HD_MSK) | (start1 & ~GT_PCI_HD_MSK);
/* Cannot support multiple windows, use the wider. */
if (end1 - start1 > end - start) {
start = start1;
end = end1;
map = map1;
}
mask = ~(start ^ end);
/* We don't support remapping with a discontiguous mask. */
BUG_ON((start & GT_PCI_HD_MSK) != (map & GT_PCI_HD_MSK) &&
mask != ~((mask & -mask) - 1));
gt64120_mem_resource.start = start;
gt64120_mem_resource.end = end;
gt64120_controller.mem_offset = (start & mask) - (map & mask);
/* Addresses are 36-bit, so do shifts in the destinations. */
gt64120_mem_resource.start <<= GT_PCI_DCRM_SHF;
gt64120_mem_resource.end <<= GT_PCI_DCRM_SHF;
gt64120_mem_resource.end |= (1 << GT_PCI_DCRM_SHF) - 1;
gt64120_controller.mem_offset <<= GT_PCI_DCRM_SHF;
start = GT_READ(GT_PCI0IOLD_OFS);
end = GT_READ(GT_PCI0IOHD_OFS);
map = GT_READ(GT_PCI0IOREMAP_OFS);
end = (end & GT_PCI_HD_MSK) | (start & ~GT_PCI_HD_MSK);
mask = ~(start ^ end);
/* We don't support remapping with a discontiguous mask. */
BUG_ON((start & GT_PCI_HD_MSK) != (map & GT_PCI_HD_MSK) &&
mask != ~((mask & -mask) - 1));
gt64120_io_resource.start = map & mask;
gt64120_io_resource.end = (map & mask) | ~mask;
gt64120_controller.io_offset = 0;
/* Addresses are 36-bit, so do shifts in the destinations. */
gt64120_io_resource.start <<= GT_PCI_DCRM_SHF;
gt64120_io_resource.end <<= GT_PCI_DCRM_SHF;
gt64120_io_resource.end |= (1 << GT_PCI_DCRM_SHF) - 1;
controller = >64120_controller;
break;
case MIPS_REVISION_SCON_BONITO:
/* Set up resource ranges from the controller's registers. */
map = BONITO_PCIMAP;
map1 = (BONITO_PCIMAP & BONITO_PCIMAP_PCIMAP_LO0) >>
BONITO_PCIMAP_PCIMAP_LO0_SHIFT;
map2 = (BONITO_PCIMAP & BONITO_PCIMAP_PCIMAP_LO1) >>
BONITO_PCIMAP_PCIMAP_LO1_SHIFT;
map3 = (BONITO_PCIMAP & BONITO_PCIMAP_PCIMAP_LO2) >>
BONITO_PCIMAP_PCIMAP_LO2_SHIFT;
/* Combine as many adjacent windows as possible. */
map = map1;
start = BONITO_PCILO0_BASE;
end = 1;
if (map3 == map2 + 1) {
map = map2;
start = BONITO_PCILO1_BASE;
end++;
}
if (map2 == map1 + 1) {
map = map1;
start = BONITO_PCILO0_BASE;
end++;
}
bonito64_mem_resource.start = start;
bonito64_mem_resource.end = start +
BONITO_PCIMAP_WINBASE(end) - 1;
bonito64_controller.mem_offset = start -
BONITO_PCIMAP_WINBASE(map);
controller = &bonito64_controller;
break;
case MIPS_REVISION_SCON_SOCIT:
case MIPS_REVISION_SCON_ROCIT:
case MIPS_REVISION_SCON_SOCITSC:
case MIPS_REVISION_SCON_SOCITSCP:
/* Set up resource ranges from the controller's registers. */
MSC_READ(MSC01_PCI_SC2PMBASL, start);
MSC_READ(MSC01_PCI_SC2PMMSKL, mask);
MSC_READ(MSC01_PCI_SC2PMMAPL, map);
msc_mem_resource.start = start & mask;
msc_mem_resource.end = (start & mask) | ~mask;
msc_controller.mem_offset = (start & mask) - (map & mask);
if (mips_cps_numiocu(0)) {
write_gcr_reg0_base(start);
write_gcr_reg0_mask(mask |
CM_GCR_REGn_MASK_CMTGT_IOCU0);
}
MSC_READ(MSC01_PCI_SC2PIOBASL, start);
MSC_READ(MSC01_PCI_SC2PIOMSKL, mask);
MSC_READ(MSC01_PCI_SC2PIOMAPL, map);
msc_io_resource.start = map & mask;
msc_io_resource.end = (map & mask) | ~mask;
msc_controller.io_offset = 0;
ioport_resource.end = ~mask;
if (mips_cps_numiocu(0)) {
write_gcr_reg1_base(start);
write_gcr_reg1_mask(mask |
CM_GCR_REGn_MASK_CMTGT_IOCU0);
}
/* If ranges overlap I/O takes precedence. */
start = start & mask;
end = start | ~mask;
if ((start >= msc_mem_resource.start &&
start <= msc_mem_resource.end) ||
(end >= msc_mem_resource.start &&
end <= msc_mem_resource.end)) {
/* Use the larger space. */
start = max(start, msc_mem_resource.start);
end = min(end, msc_mem_resource.end);
if (start - msc_mem_resource.start >=
msc_mem_resource.end - end)
msc_mem_resource.end = start - 1;
else
msc_mem_resource.start = end + 1;
}
controller = &msc_controller;
break;
default:
return;
}
/* PIIX4 ACPI starts at 0x1000 */
if (controller->io_resource->start < 0x00001000UL)
controller->io_resource->start = 0x00001000UL;
iomem_resource.end &= 0xfffffffffULL; /* 64 GB */
ioport_resource.end = controller->io_resource->end;
controller->io_map_base = mips_io_port_base;
register_pci_controller(controller);
}
| linux-master | arch/mips/pci/pci-malta.c |
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2003 Christoph Hellwig ([email protected])
* Copyright (C) 1999, 2000, 04 Ralf Baechle ([email protected])
* Copyright (C) 1999, 2000 Silicon Graphics, Inc.
*/
#include <asm/sn/addrs.h>
#include <asm/sn/types.h>
#include <asm/sn/klconfig.h>
#include <asm/sn/agent.h>
#include <asm/sn/ioc3.h>
#include <asm/pci/bridge.h>
#ifdef CONFIG_NUMA
int pcibus_to_node(struct pci_bus *bus)
{
struct bridge_controller *bc = BRIDGE_CONTROLLER(bus);
return bc->nasid;
}
EXPORT_SYMBOL(pcibus_to_node);
#endif /* CONFIG_NUMA */
static void ip29_fixup_phy(struct pci_dev *dev)
{
int nasid = pcibus_to_node(dev->bus);
u32 sid;
if (nasid != 1)
return; /* only needed on second module */
/* enable ethernet PHY on IP29 systemboard */
pci_read_config_dword(dev, PCI_SUBSYSTEM_VENDOR_ID, &sid);
if (sid == (PCI_VENDOR_ID_SGI | (IOC3_SUBSYS_IP29_SYSBOARD) << 16))
REMOTE_HUB_S(nasid, MD_LED0, 0x09);
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SGI, PCI_DEVICE_ID_SGI_IOC3,
ip29_fixup_phy);
| linux-master | arch/mips/pci/pci-ip27.c |
// SPDX-License-Identifier: GPL-2.0-only
/* IEEE754 floating point arithmetic
* double precision: common utilities
*/
/*
* MIPS floating point support
* Copyright (C) 1994-2000 Algorithmics Ltd.
*/
#include "ieee754sp.h"
#include "ieee754dp.h"
static inline union ieee754dp ieee754dp_nan_fsp(int xs, u64 xm)
{
return builddp(xs, DP_EMAX + 1 + DP_EBIAS,
xm << (DP_FBITS - SP_FBITS));
}
union ieee754dp ieee754dp_fsp(union ieee754sp x)
{
COMPXSP;
EXPLODEXSP;
ieee754_clearcx();
FLUSHXSP;
switch (xc) {
case IEEE754_CLASS_SNAN:
return ieee754dp_nanxcpt(ieee754dp_nan_fsp(xs, xm));
case IEEE754_CLASS_QNAN:
return ieee754dp_nan_fsp(xs, xm);
case IEEE754_CLASS_INF:
return ieee754dp_inf(xs);
case IEEE754_CLASS_ZERO:
return ieee754dp_zero(xs);
case IEEE754_CLASS_DNORM:
/* normalize */
while ((xm >> SP_FBITS) == 0) {
xm <<= 1;
xe--;
}
break;
case IEEE754_CLASS_NORM:
break;
}
/*
* Can't possibly overflow,underflow, or need rounding
*/
/* drop the hidden bit */
xm &= ~SP_HIDDEN_BIT;
return builddp(xs, xe + DP_EBIAS,
(u64) xm << (DP_FBITS - SP_FBITS));
}
| linux-master | arch/mips/math-emu/dp_fsp.c |
// SPDX-License-Identifier: GPL-2.0-only
/* IEEE754 floating point arithmetic
* single precision square root
*/
/*
* MIPS floating point support
* Copyright (C) 1994-2000 Algorithmics Ltd.
*/
#include "ieee754sp.h"
union ieee754sp ieee754sp_sqrt(union ieee754sp x)
{
int ix, s, q, m, t, i;
unsigned int r;
COMPXSP;
/* take care of Inf and NaN */
EXPLODEXSP;
ieee754_clearcx();
FLUSHXSP;
/* x == INF or NAN? */
switch (xc) {
case IEEE754_CLASS_SNAN:
return ieee754sp_nanxcpt(x);
case IEEE754_CLASS_QNAN:
/* sqrt(Nan) = Nan */
return x;
case IEEE754_CLASS_ZERO:
/* sqrt(0) = 0 */
return x;
case IEEE754_CLASS_INF:
if (xs) {
/* sqrt(-Inf) = Nan */
ieee754_setcx(IEEE754_INVALID_OPERATION);
return ieee754sp_indef();
}
/* sqrt(+Inf) = Inf */
return x;
case IEEE754_CLASS_DNORM:
case IEEE754_CLASS_NORM:
if (xs) {
/* sqrt(-x) = Nan */
ieee754_setcx(IEEE754_INVALID_OPERATION);
return ieee754sp_indef();
}
break;
}
ix = x.bits;
/* normalize x */
m = (ix >> 23);
if (m == 0) { /* subnormal x */
for (i = 0; (ix & 0x00800000) == 0; i++)
ix <<= 1;
m -= i - 1;
}
m -= 127; /* unbias exponent */
ix = (ix & 0x007fffff) | 0x00800000;
if (m & 1) /* odd m, double x to make it even */
ix += ix;
m >>= 1; /* m = [m/2] */
/* generate sqrt(x) bit by bit */
ix += ix;
s = 0;
q = 0; /* q = sqrt(x) */
r = 0x01000000; /* r = moving bit from right to left */
while (r != 0) {
t = s + r;
if (t <= ix) {
s = t + r;
ix -= t;
q += r;
}
ix += ix;
r >>= 1;
}
if (ix != 0) {
ieee754_setcx(IEEE754_INEXACT);
switch (ieee754_csr.rm) {
case FPU_CSR_RU:
q += 2;
break;
case FPU_CSR_RN:
q += (q & 1);
break;
}
}
ix = (q >> 1) + 0x3f000000;
ix += (m << 23);
x.bits = ix;
return x;
}
| linux-master | arch/mips/math-emu/sp_sqrt.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/cpumask.h>
#include <linux/debugfs.h>
#include <linux/fs.h>
#include <linux/init.h>
#include <linux/percpu.h>
#include <linux/types.h>
#include <asm/debug.h>
#include <asm/fpu_emulator.h>
#include <asm/local.h>
DEFINE_PER_CPU(struct mips_fpu_emulator_stats, fpuemustats);
static int fpuemu_stat_get(void *data, u64 *val)
{
int cpu;
unsigned long sum = 0;
for_each_online_cpu(cpu) {
struct mips_fpu_emulator_stats *ps;
local_t *pv;
ps = &per_cpu(fpuemustats, cpu);
pv = (void *)ps + (unsigned long)data;
sum += local_read(pv);
}
*val = sum;
return 0;
}
DEFINE_SIMPLE_ATTRIBUTE(fops_fpuemu_stat, fpuemu_stat_get, NULL, "%llu\n");
/*
* Used to obtain names for a debugfs instruction counter, given field name
* in fpuemustats structure. For example, for input "cmp_sueq_d", the output
* would be "cmp.sueq.d". This is needed since dots are not allowed to be
* used in structure field names, and are, on the other hand, desired to be
* used in debugfs item names to be clearly associated to corresponding
* MIPS FPU instructions.
*/
static void adjust_instruction_counter_name(char *out_name, char *in_name)
{
int i = 0;
strcpy(out_name, in_name);
while (in_name[i] != '\0') {
if (out_name[i] == '_')
out_name[i] = '.';
i++;
}
}
static int fpuemustats_clear_show(struct seq_file *s, void *unused)
{
__this_cpu_write((fpuemustats).emulated, 0);
__this_cpu_write((fpuemustats).loads, 0);
__this_cpu_write((fpuemustats).stores, 0);
__this_cpu_write((fpuemustats).branches, 0);
__this_cpu_write((fpuemustats).cp1ops, 0);
__this_cpu_write((fpuemustats).cp1xops, 0);
__this_cpu_write((fpuemustats).errors, 0);
__this_cpu_write((fpuemustats).ieee754_inexact, 0);
__this_cpu_write((fpuemustats).ieee754_underflow, 0);
__this_cpu_write((fpuemustats).ieee754_overflow, 0);
__this_cpu_write((fpuemustats).ieee754_zerodiv, 0);
__this_cpu_write((fpuemustats).ieee754_invalidop, 0);
__this_cpu_write((fpuemustats).ds_emul, 0);
__this_cpu_write((fpuemustats).abs_s, 0);
__this_cpu_write((fpuemustats).abs_d, 0);
__this_cpu_write((fpuemustats).add_s, 0);
__this_cpu_write((fpuemustats).add_d, 0);
__this_cpu_write((fpuemustats).bc1eqz, 0);
__this_cpu_write((fpuemustats).bc1nez, 0);
__this_cpu_write((fpuemustats).ceil_w_s, 0);
__this_cpu_write((fpuemustats).ceil_w_d, 0);
__this_cpu_write((fpuemustats).ceil_l_s, 0);
__this_cpu_write((fpuemustats).ceil_l_d, 0);
__this_cpu_write((fpuemustats).class_s, 0);
__this_cpu_write((fpuemustats).class_d, 0);
__this_cpu_write((fpuemustats).cmp_af_s, 0);
__this_cpu_write((fpuemustats).cmp_af_d, 0);
__this_cpu_write((fpuemustats).cmp_eq_s, 0);
__this_cpu_write((fpuemustats).cmp_eq_d, 0);
__this_cpu_write((fpuemustats).cmp_le_s, 0);
__this_cpu_write((fpuemustats).cmp_le_d, 0);
__this_cpu_write((fpuemustats).cmp_lt_s, 0);
__this_cpu_write((fpuemustats).cmp_lt_d, 0);
__this_cpu_write((fpuemustats).cmp_ne_s, 0);
__this_cpu_write((fpuemustats).cmp_ne_d, 0);
__this_cpu_write((fpuemustats).cmp_or_s, 0);
__this_cpu_write((fpuemustats).cmp_or_d, 0);
__this_cpu_write((fpuemustats).cmp_ueq_s, 0);
__this_cpu_write((fpuemustats).cmp_ueq_d, 0);
__this_cpu_write((fpuemustats).cmp_ule_s, 0);
__this_cpu_write((fpuemustats).cmp_ule_d, 0);
__this_cpu_write((fpuemustats).cmp_ult_s, 0);
__this_cpu_write((fpuemustats).cmp_ult_d, 0);
__this_cpu_write((fpuemustats).cmp_un_s, 0);
__this_cpu_write((fpuemustats).cmp_un_d, 0);
__this_cpu_write((fpuemustats).cmp_une_s, 0);
__this_cpu_write((fpuemustats).cmp_une_d, 0);
__this_cpu_write((fpuemustats).cmp_saf_s, 0);
__this_cpu_write((fpuemustats).cmp_saf_d, 0);
__this_cpu_write((fpuemustats).cmp_seq_s, 0);
__this_cpu_write((fpuemustats).cmp_seq_d, 0);
__this_cpu_write((fpuemustats).cmp_sle_s, 0);
__this_cpu_write((fpuemustats).cmp_sle_d, 0);
__this_cpu_write((fpuemustats).cmp_slt_s, 0);
__this_cpu_write((fpuemustats).cmp_slt_d, 0);
__this_cpu_write((fpuemustats).cmp_sne_s, 0);
__this_cpu_write((fpuemustats).cmp_sne_d, 0);
__this_cpu_write((fpuemustats).cmp_sor_s, 0);
__this_cpu_write((fpuemustats).cmp_sor_d, 0);
__this_cpu_write((fpuemustats).cmp_sueq_s, 0);
__this_cpu_write((fpuemustats).cmp_sueq_d, 0);
__this_cpu_write((fpuemustats).cmp_sule_s, 0);
__this_cpu_write((fpuemustats).cmp_sule_d, 0);
__this_cpu_write((fpuemustats).cmp_sult_s, 0);
__this_cpu_write((fpuemustats).cmp_sult_d, 0);
__this_cpu_write((fpuemustats).cmp_sun_s, 0);
__this_cpu_write((fpuemustats).cmp_sun_d, 0);
__this_cpu_write((fpuemustats).cmp_sune_s, 0);
__this_cpu_write((fpuemustats).cmp_sune_d, 0);
__this_cpu_write((fpuemustats).cvt_d_l, 0);
__this_cpu_write((fpuemustats).cvt_d_s, 0);
__this_cpu_write((fpuemustats).cvt_d_w, 0);
__this_cpu_write((fpuemustats).cvt_l_s, 0);
__this_cpu_write((fpuemustats).cvt_l_d, 0);
__this_cpu_write((fpuemustats).cvt_s_d, 0);
__this_cpu_write((fpuemustats).cvt_s_l, 0);
__this_cpu_write((fpuemustats).cvt_s_w, 0);
__this_cpu_write((fpuemustats).cvt_w_s, 0);
__this_cpu_write((fpuemustats).cvt_w_d, 0);
__this_cpu_write((fpuemustats).div_s, 0);
__this_cpu_write((fpuemustats).div_d, 0);
__this_cpu_write((fpuemustats).floor_w_s, 0);
__this_cpu_write((fpuemustats).floor_w_d, 0);
__this_cpu_write((fpuemustats).floor_l_s, 0);
__this_cpu_write((fpuemustats).floor_l_d, 0);
__this_cpu_write((fpuemustats).maddf_s, 0);
__this_cpu_write((fpuemustats).maddf_d, 0);
__this_cpu_write((fpuemustats).max_s, 0);
__this_cpu_write((fpuemustats).max_d, 0);
__this_cpu_write((fpuemustats).maxa_s, 0);
__this_cpu_write((fpuemustats).maxa_d, 0);
__this_cpu_write((fpuemustats).min_s, 0);
__this_cpu_write((fpuemustats).min_d, 0);
__this_cpu_write((fpuemustats).mina_s, 0);
__this_cpu_write((fpuemustats).mina_d, 0);
__this_cpu_write((fpuemustats).mov_s, 0);
__this_cpu_write((fpuemustats).mov_d, 0);
__this_cpu_write((fpuemustats).msubf_s, 0);
__this_cpu_write((fpuemustats).msubf_d, 0);
__this_cpu_write((fpuemustats).mul_s, 0);
__this_cpu_write((fpuemustats).mul_d, 0);
__this_cpu_write((fpuemustats).neg_s, 0);
__this_cpu_write((fpuemustats).neg_d, 0);
__this_cpu_write((fpuemustats).recip_s, 0);
__this_cpu_write((fpuemustats).recip_d, 0);
__this_cpu_write((fpuemustats).rint_s, 0);
__this_cpu_write((fpuemustats).rint_d, 0);
__this_cpu_write((fpuemustats).round_w_s, 0);
__this_cpu_write((fpuemustats).round_w_d, 0);
__this_cpu_write((fpuemustats).round_l_s, 0);
__this_cpu_write((fpuemustats).round_l_d, 0);
__this_cpu_write((fpuemustats).rsqrt_s, 0);
__this_cpu_write((fpuemustats).rsqrt_d, 0);
__this_cpu_write((fpuemustats).sel_s, 0);
__this_cpu_write((fpuemustats).sel_d, 0);
__this_cpu_write((fpuemustats).seleqz_s, 0);
__this_cpu_write((fpuemustats).seleqz_d, 0);
__this_cpu_write((fpuemustats).selnez_s, 0);
__this_cpu_write((fpuemustats).selnez_d, 0);
__this_cpu_write((fpuemustats).sqrt_s, 0);
__this_cpu_write((fpuemustats).sqrt_d, 0);
__this_cpu_write((fpuemustats).sub_s, 0);
__this_cpu_write((fpuemustats).sub_d, 0);
__this_cpu_write((fpuemustats).trunc_w_s, 0);
__this_cpu_write((fpuemustats).trunc_w_d, 0);
__this_cpu_write((fpuemustats).trunc_l_s, 0);
__this_cpu_write((fpuemustats).trunc_l_d, 0);
return 0;
}
DEFINE_SHOW_ATTRIBUTE(fpuemustats_clear);
static int __init debugfs_fpuemu(void)
{
struct dentry *fpuemu_debugfs_base_dir;
struct dentry *fpuemu_debugfs_inst_dir;
char name[32];
fpuemu_debugfs_base_dir = debugfs_create_dir("fpuemustats",
mips_debugfs_dir);
debugfs_create_file("fpuemustats_clear", 0444, mips_debugfs_dir, NULL,
&fpuemustats_clear_fops);
#define FPU_EMU_STAT_OFFSET(m) \
offsetof(struct mips_fpu_emulator_stats, m)
#define FPU_STAT_CREATE(m) \
do { \
debugfs_create_file(#m, 0444, fpuemu_debugfs_base_dir, \
(void *)FPU_EMU_STAT_OFFSET(m), \
&fops_fpuemu_stat); \
} while (0)
FPU_STAT_CREATE(emulated);
FPU_STAT_CREATE(loads);
FPU_STAT_CREATE(stores);
FPU_STAT_CREATE(branches);
FPU_STAT_CREATE(cp1ops);
FPU_STAT_CREATE(cp1xops);
FPU_STAT_CREATE(errors);
FPU_STAT_CREATE(ieee754_inexact);
FPU_STAT_CREATE(ieee754_underflow);
FPU_STAT_CREATE(ieee754_overflow);
FPU_STAT_CREATE(ieee754_zerodiv);
FPU_STAT_CREATE(ieee754_invalidop);
FPU_STAT_CREATE(ds_emul);
fpuemu_debugfs_inst_dir = debugfs_create_dir("instructions",
fpuemu_debugfs_base_dir);
#define FPU_STAT_CREATE_EX(m) \
do { \
adjust_instruction_counter_name(name, #m); \
\
debugfs_create_file(name, 0444, fpuemu_debugfs_inst_dir, \
(void *)FPU_EMU_STAT_OFFSET(m), \
&fops_fpuemu_stat); \
} while (0)
FPU_STAT_CREATE_EX(abs_s);
FPU_STAT_CREATE_EX(abs_d);
FPU_STAT_CREATE_EX(add_s);
FPU_STAT_CREATE_EX(add_d);
FPU_STAT_CREATE_EX(bc1eqz);
FPU_STAT_CREATE_EX(bc1nez);
FPU_STAT_CREATE_EX(ceil_w_s);
FPU_STAT_CREATE_EX(ceil_w_d);
FPU_STAT_CREATE_EX(ceil_l_s);
FPU_STAT_CREATE_EX(ceil_l_d);
FPU_STAT_CREATE_EX(class_s);
FPU_STAT_CREATE_EX(class_d);
FPU_STAT_CREATE_EX(cmp_af_s);
FPU_STAT_CREATE_EX(cmp_af_d);
FPU_STAT_CREATE_EX(cmp_eq_s);
FPU_STAT_CREATE_EX(cmp_eq_d);
FPU_STAT_CREATE_EX(cmp_le_s);
FPU_STAT_CREATE_EX(cmp_le_d);
FPU_STAT_CREATE_EX(cmp_lt_s);
FPU_STAT_CREATE_EX(cmp_lt_d);
FPU_STAT_CREATE_EX(cmp_ne_s);
FPU_STAT_CREATE_EX(cmp_ne_d);
FPU_STAT_CREATE_EX(cmp_or_s);
FPU_STAT_CREATE_EX(cmp_or_d);
FPU_STAT_CREATE_EX(cmp_ueq_s);
FPU_STAT_CREATE_EX(cmp_ueq_d);
FPU_STAT_CREATE_EX(cmp_ule_s);
FPU_STAT_CREATE_EX(cmp_ule_d);
FPU_STAT_CREATE_EX(cmp_ult_s);
FPU_STAT_CREATE_EX(cmp_ult_d);
FPU_STAT_CREATE_EX(cmp_un_s);
FPU_STAT_CREATE_EX(cmp_un_d);
FPU_STAT_CREATE_EX(cmp_une_s);
FPU_STAT_CREATE_EX(cmp_une_d);
FPU_STAT_CREATE_EX(cmp_saf_s);
FPU_STAT_CREATE_EX(cmp_saf_d);
FPU_STAT_CREATE_EX(cmp_seq_s);
FPU_STAT_CREATE_EX(cmp_seq_d);
FPU_STAT_CREATE_EX(cmp_sle_s);
FPU_STAT_CREATE_EX(cmp_sle_d);
FPU_STAT_CREATE_EX(cmp_slt_s);
FPU_STAT_CREATE_EX(cmp_slt_d);
FPU_STAT_CREATE_EX(cmp_sne_s);
FPU_STAT_CREATE_EX(cmp_sne_d);
FPU_STAT_CREATE_EX(cmp_sor_s);
FPU_STAT_CREATE_EX(cmp_sor_d);
FPU_STAT_CREATE_EX(cmp_sueq_s);
FPU_STAT_CREATE_EX(cmp_sueq_d);
FPU_STAT_CREATE_EX(cmp_sule_s);
FPU_STAT_CREATE_EX(cmp_sule_d);
FPU_STAT_CREATE_EX(cmp_sult_s);
FPU_STAT_CREATE_EX(cmp_sult_d);
FPU_STAT_CREATE_EX(cmp_sun_s);
FPU_STAT_CREATE_EX(cmp_sun_d);
FPU_STAT_CREATE_EX(cmp_sune_s);
FPU_STAT_CREATE_EX(cmp_sune_d);
FPU_STAT_CREATE_EX(cvt_d_l);
FPU_STAT_CREATE_EX(cvt_d_s);
FPU_STAT_CREATE_EX(cvt_d_w);
FPU_STAT_CREATE_EX(cvt_l_s);
FPU_STAT_CREATE_EX(cvt_l_d);
FPU_STAT_CREATE_EX(cvt_s_d);
FPU_STAT_CREATE_EX(cvt_s_l);
FPU_STAT_CREATE_EX(cvt_s_w);
FPU_STAT_CREATE_EX(cvt_w_s);
FPU_STAT_CREATE_EX(cvt_w_d);
FPU_STAT_CREATE_EX(div_s);
FPU_STAT_CREATE_EX(div_d);
FPU_STAT_CREATE_EX(floor_w_s);
FPU_STAT_CREATE_EX(floor_w_d);
FPU_STAT_CREATE_EX(floor_l_s);
FPU_STAT_CREATE_EX(floor_l_d);
FPU_STAT_CREATE_EX(maddf_s);
FPU_STAT_CREATE_EX(maddf_d);
FPU_STAT_CREATE_EX(max_s);
FPU_STAT_CREATE_EX(max_d);
FPU_STAT_CREATE_EX(maxa_s);
FPU_STAT_CREATE_EX(maxa_d);
FPU_STAT_CREATE_EX(min_s);
FPU_STAT_CREATE_EX(min_d);
FPU_STAT_CREATE_EX(mina_s);
FPU_STAT_CREATE_EX(mina_d);
FPU_STAT_CREATE_EX(mov_s);
FPU_STAT_CREATE_EX(mov_d);
FPU_STAT_CREATE_EX(msubf_s);
FPU_STAT_CREATE_EX(msubf_d);
FPU_STAT_CREATE_EX(mul_s);
FPU_STAT_CREATE_EX(mul_d);
FPU_STAT_CREATE_EX(neg_s);
FPU_STAT_CREATE_EX(neg_d);
FPU_STAT_CREATE_EX(recip_s);
FPU_STAT_CREATE_EX(recip_d);
FPU_STAT_CREATE_EX(rint_s);
FPU_STAT_CREATE_EX(rint_d);
FPU_STAT_CREATE_EX(round_w_s);
FPU_STAT_CREATE_EX(round_w_d);
FPU_STAT_CREATE_EX(round_l_s);
FPU_STAT_CREATE_EX(round_l_d);
FPU_STAT_CREATE_EX(rsqrt_s);
FPU_STAT_CREATE_EX(rsqrt_d);
FPU_STAT_CREATE_EX(sel_s);
FPU_STAT_CREATE_EX(sel_d);
FPU_STAT_CREATE_EX(seleqz_s);
FPU_STAT_CREATE_EX(seleqz_d);
FPU_STAT_CREATE_EX(selnez_s);
FPU_STAT_CREATE_EX(selnez_d);
FPU_STAT_CREATE_EX(sqrt_s);
FPU_STAT_CREATE_EX(sqrt_d);
FPU_STAT_CREATE_EX(sub_s);
FPU_STAT_CREATE_EX(sub_d);
FPU_STAT_CREATE_EX(trunc_w_s);
FPU_STAT_CREATE_EX(trunc_w_d);
FPU_STAT_CREATE_EX(trunc_l_s);
FPU_STAT_CREATE_EX(trunc_l_d);
return 0;
}
arch_initcall(debugfs_fpuemu);
| linux-master | arch/mips/math-emu/me-debugfs.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* cp1emu.c: a MIPS coprocessor 1 (FPU) instruction emulator
*
* MIPS floating point support
* Copyright (C) 1994-2000 Algorithmics Ltd.
*
* Kevin D. Kissell, [email protected] and Carsten Langgaard, [email protected]
* Copyright (C) 2000 MIPS Technologies, Inc.
*
* A complete emulator for MIPS coprocessor 1 instructions. This is
* required for #float(switch) or #float(trap), where it catches all
* COP1 instructions via the "CoProcessor Unusable" exception.
*
* More surprisingly it is also required for #float(ieee), to help out
* the hardware FPU at the boundaries of the IEEE-754 representation
* (denormalised values, infinities, underflow, etc). It is made
* quite nasty because emulation of some non-COP1 instructions is
* required, e.g. in branch delay slots.
*
* Note if you know that you won't have an FPU, then you'll get much
* better performance by compiling with -msoft-float!
*/
#include <linux/sched.h>
#include <linux/debugfs.h>
#include <linux/percpu-defs.h>
#include <linux/perf_event.h>
#include <asm/branch.h>
#include <asm/inst.h>
#include <asm/ptrace.h>
#include <asm/signal.h>
#include <linux/uaccess.h>
#include <asm/cpu-info.h>
#include <asm/processor.h>
#include <asm/fpu_emulator.h>
#include <asm/fpu.h>
#include <asm/mips-r2-to-r6-emul.h>
#include "ieee754.h"
/* Function which emulates a floating point instruction. */
static int fpu_emu(struct pt_regs *, struct mips_fpu_struct *,
mips_instruction);
static int fpux_emu(struct pt_regs *,
struct mips_fpu_struct *, mips_instruction, void __user **);
/* Control registers */
#define FPCREG_RID 0 /* $0 = revision id */
#define FPCREG_FCCR 25 /* $25 = fccr */
#define FPCREG_FEXR 26 /* $26 = fexr */
#define FPCREG_FENR 28 /* $28 = fenr */
#define FPCREG_CSR 31 /* $31 = csr */
/* convert condition code register number to csr bit */
const unsigned int fpucondbit[8] = {
FPU_CSR_COND,
FPU_CSR_COND1,
FPU_CSR_COND2,
FPU_CSR_COND3,
FPU_CSR_COND4,
FPU_CSR_COND5,
FPU_CSR_COND6,
FPU_CSR_COND7
};
/* (microMIPS) Convert certain microMIPS instructions to MIPS32 format. */
static const int sd_format[] = {16, 17, 0, 0, 0, 0, 0, 0};
static const int sdps_format[] = {16, 17, 22, 0, 0, 0, 0, 0};
static const int dwl_format[] = {17, 20, 21, 0, 0, 0, 0, 0};
static const int swl_format[] = {16, 20, 21, 0, 0, 0, 0, 0};
/*
* This functions translates a 32-bit microMIPS instruction
* into a 32-bit MIPS32 instruction. Returns 0 on success
* and SIGILL otherwise.
*/
static int microMIPS32_to_MIPS32(union mips_instruction *insn_ptr)
{
union mips_instruction insn = *insn_ptr;
union mips_instruction mips32_insn = insn;
int func, fmt, op;
switch (insn.mm_i_format.opcode) {
case mm_ldc132_op:
mips32_insn.mm_i_format.opcode = ldc1_op;
mips32_insn.mm_i_format.rt = insn.mm_i_format.rs;
mips32_insn.mm_i_format.rs = insn.mm_i_format.rt;
break;
case mm_lwc132_op:
mips32_insn.mm_i_format.opcode = lwc1_op;
mips32_insn.mm_i_format.rt = insn.mm_i_format.rs;
mips32_insn.mm_i_format.rs = insn.mm_i_format.rt;
break;
case mm_sdc132_op:
mips32_insn.mm_i_format.opcode = sdc1_op;
mips32_insn.mm_i_format.rt = insn.mm_i_format.rs;
mips32_insn.mm_i_format.rs = insn.mm_i_format.rt;
break;
case mm_swc132_op:
mips32_insn.mm_i_format.opcode = swc1_op;
mips32_insn.mm_i_format.rt = insn.mm_i_format.rs;
mips32_insn.mm_i_format.rs = insn.mm_i_format.rt;
break;
case mm_pool32i_op:
/* NOTE: offset is << by 1 if in microMIPS mode. */
if ((insn.mm_i_format.rt == mm_bc1f_op) ||
(insn.mm_i_format.rt == mm_bc1t_op)) {
mips32_insn.fb_format.opcode = cop1_op;
mips32_insn.fb_format.bc = bc_op;
mips32_insn.fb_format.flag =
(insn.mm_i_format.rt == mm_bc1t_op) ? 1 : 0;
} else
return SIGILL;
break;
case mm_pool32f_op:
switch (insn.mm_fp0_format.func) {
case mm_32f_01_op:
case mm_32f_11_op:
case mm_32f_02_op:
case mm_32f_12_op:
case mm_32f_41_op:
case mm_32f_51_op:
case mm_32f_42_op:
case mm_32f_52_op:
op = insn.mm_fp0_format.func;
if (op == mm_32f_01_op)
func = madd_s_op;
else if (op == mm_32f_11_op)
func = madd_d_op;
else if (op == mm_32f_02_op)
func = nmadd_s_op;
else if (op == mm_32f_12_op)
func = nmadd_d_op;
else if (op == mm_32f_41_op)
func = msub_s_op;
else if (op == mm_32f_51_op)
func = msub_d_op;
else if (op == mm_32f_42_op)
func = nmsub_s_op;
else
func = nmsub_d_op;
mips32_insn.fp6_format.opcode = cop1x_op;
mips32_insn.fp6_format.fr = insn.mm_fp6_format.fr;
mips32_insn.fp6_format.ft = insn.mm_fp6_format.ft;
mips32_insn.fp6_format.fs = insn.mm_fp6_format.fs;
mips32_insn.fp6_format.fd = insn.mm_fp6_format.fd;
mips32_insn.fp6_format.func = func;
break;
case mm_32f_10_op:
func = -1; /* Invalid */
op = insn.mm_fp5_format.op & 0x7;
if (op == mm_ldxc1_op)
func = ldxc1_op;
else if (op == mm_sdxc1_op)
func = sdxc1_op;
else if (op == mm_lwxc1_op)
func = lwxc1_op;
else if (op == mm_swxc1_op)
func = swxc1_op;
if (func != -1) {
mips32_insn.r_format.opcode = cop1x_op;
mips32_insn.r_format.rs =
insn.mm_fp5_format.base;
mips32_insn.r_format.rt =
insn.mm_fp5_format.index;
mips32_insn.r_format.rd = 0;
mips32_insn.r_format.re = insn.mm_fp5_format.fd;
mips32_insn.r_format.func = func;
} else
return SIGILL;
break;
case mm_32f_40_op:
op = -1; /* Invalid */
if (insn.mm_fp2_format.op == mm_fmovt_op)
op = 1;
else if (insn.mm_fp2_format.op == mm_fmovf_op)
op = 0;
if (op != -1) {
mips32_insn.fp0_format.opcode = cop1_op;
mips32_insn.fp0_format.fmt =
sdps_format[insn.mm_fp2_format.fmt];
mips32_insn.fp0_format.ft =
(insn.mm_fp2_format.cc<<2) + op;
mips32_insn.fp0_format.fs =
insn.mm_fp2_format.fs;
mips32_insn.fp0_format.fd =
insn.mm_fp2_format.fd;
mips32_insn.fp0_format.func = fmovc_op;
} else
return SIGILL;
break;
case mm_32f_60_op:
func = -1; /* Invalid */
if (insn.mm_fp0_format.op == mm_fadd_op)
func = fadd_op;
else if (insn.mm_fp0_format.op == mm_fsub_op)
func = fsub_op;
else if (insn.mm_fp0_format.op == mm_fmul_op)
func = fmul_op;
else if (insn.mm_fp0_format.op == mm_fdiv_op)
func = fdiv_op;
if (func != -1) {
mips32_insn.fp0_format.opcode = cop1_op;
mips32_insn.fp0_format.fmt =
sdps_format[insn.mm_fp0_format.fmt];
mips32_insn.fp0_format.ft =
insn.mm_fp0_format.ft;
mips32_insn.fp0_format.fs =
insn.mm_fp0_format.fs;
mips32_insn.fp0_format.fd =
insn.mm_fp0_format.fd;
mips32_insn.fp0_format.func = func;
} else
return SIGILL;
break;
case mm_32f_70_op:
func = -1; /* Invalid */
if (insn.mm_fp0_format.op == mm_fmovn_op)
func = fmovn_op;
else if (insn.mm_fp0_format.op == mm_fmovz_op)
func = fmovz_op;
if (func != -1) {
mips32_insn.fp0_format.opcode = cop1_op;
mips32_insn.fp0_format.fmt =
sdps_format[insn.mm_fp0_format.fmt];
mips32_insn.fp0_format.ft =
insn.mm_fp0_format.ft;
mips32_insn.fp0_format.fs =
insn.mm_fp0_format.fs;
mips32_insn.fp0_format.fd =
insn.mm_fp0_format.fd;
mips32_insn.fp0_format.func = func;
} else
return SIGILL;
break;
case mm_32f_73_op: /* POOL32FXF */
switch (insn.mm_fp1_format.op) {
case mm_movf0_op:
case mm_movf1_op:
case mm_movt0_op:
case mm_movt1_op:
if ((insn.mm_fp1_format.op & 0x7f) ==
mm_movf0_op)
op = 0;
else
op = 1;
mips32_insn.r_format.opcode = spec_op;
mips32_insn.r_format.rs = insn.mm_fp4_format.fs;
mips32_insn.r_format.rt =
(insn.mm_fp4_format.cc << 2) + op;
mips32_insn.r_format.rd = insn.mm_fp4_format.rt;
mips32_insn.r_format.re = 0;
mips32_insn.r_format.func = movc_op;
break;
case mm_fcvtd0_op:
case mm_fcvtd1_op:
case mm_fcvts0_op:
case mm_fcvts1_op:
if ((insn.mm_fp1_format.op & 0x7f) ==
mm_fcvtd0_op) {
func = fcvtd_op;
fmt = swl_format[insn.mm_fp3_format.fmt];
} else {
func = fcvts_op;
fmt = dwl_format[insn.mm_fp3_format.fmt];
}
mips32_insn.fp0_format.opcode = cop1_op;
mips32_insn.fp0_format.fmt = fmt;
mips32_insn.fp0_format.ft = 0;
mips32_insn.fp0_format.fs =
insn.mm_fp3_format.fs;
mips32_insn.fp0_format.fd =
insn.mm_fp3_format.rt;
mips32_insn.fp0_format.func = func;
break;
case mm_fmov0_op:
case mm_fmov1_op:
case mm_fabs0_op:
case mm_fabs1_op:
case mm_fneg0_op:
case mm_fneg1_op:
if ((insn.mm_fp1_format.op & 0x7f) ==
mm_fmov0_op)
func = fmov_op;
else if ((insn.mm_fp1_format.op & 0x7f) ==
mm_fabs0_op)
func = fabs_op;
else
func = fneg_op;
mips32_insn.fp0_format.opcode = cop1_op;
mips32_insn.fp0_format.fmt =
sdps_format[insn.mm_fp3_format.fmt];
mips32_insn.fp0_format.ft = 0;
mips32_insn.fp0_format.fs =
insn.mm_fp3_format.fs;
mips32_insn.fp0_format.fd =
insn.mm_fp3_format.rt;
mips32_insn.fp0_format.func = func;
break;
case mm_ffloorl_op:
case mm_ffloorw_op:
case mm_fceill_op:
case mm_fceilw_op:
case mm_ftruncl_op:
case mm_ftruncw_op:
case mm_froundl_op:
case mm_froundw_op:
case mm_fcvtl_op:
case mm_fcvtw_op:
if (insn.mm_fp1_format.op == mm_ffloorl_op)
func = ffloorl_op;
else if (insn.mm_fp1_format.op == mm_ffloorw_op)
func = ffloor_op;
else if (insn.mm_fp1_format.op == mm_fceill_op)
func = fceill_op;
else if (insn.mm_fp1_format.op == mm_fceilw_op)
func = fceil_op;
else if (insn.mm_fp1_format.op == mm_ftruncl_op)
func = ftruncl_op;
else if (insn.mm_fp1_format.op == mm_ftruncw_op)
func = ftrunc_op;
else if (insn.mm_fp1_format.op == mm_froundl_op)
func = froundl_op;
else if (insn.mm_fp1_format.op == mm_froundw_op)
func = fround_op;
else if (insn.mm_fp1_format.op == mm_fcvtl_op)
func = fcvtl_op;
else
func = fcvtw_op;
mips32_insn.fp0_format.opcode = cop1_op;
mips32_insn.fp0_format.fmt =
sd_format[insn.mm_fp1_format.fmt];
mips32_insn.fp0_format.ft = 0;
mips32_insn.fp0_format.fs =
insn.mm_fp1_format.fs;
mips32_insn.fp0_format.fd =
insn.mm_fp1_format.rt;
mips32_insn.fp0_format.func = func;
break;
case mm_frsqrt_op:
case mm_fsqrt_op:
case mm_frecip_op:
if (insn.mm_fp1_format.op == mm_frsqrt_op)
func = frsqrt_op;
else if (insn.mm_fp1_format.op == mm_fsqrt_op)
func = fsqrt_op;
else
func = frecip_op;
mips32_insn.fp0_format.opcode = cop1_op;
mips32_insn.fp0_format.fmt =
sdps_format[insn.mm_fp1_format.fmt];
mips32_insn.fp0_format.ft = 0;
mips32_insn.fp0_format.fs =
insn.mm_fp1_format.fs;
mips32_insn.fp0_format.fd =
insn.mm_fp1_format.rt;
mips32_insn.fp0_format.func = func;
break;
case mm_mfc1_op:
case mm_mtc1_op:
case mm_cfc1_op:
case mm_ctc1_op:
case mm_mfhc1_op:
case mm_mthc1_op:
if (insn.mm_fp1_format.op == mm_mfc1_op)
op = mfc_op;
else if (insn.mm_fp1_format.op == mm_mtc1_op)
op = mtc_op;
else if (insn.mm_fp1_format.op == mm_cfc1_op)
op = cfc_op;
else if (insn.mm_fp1_format.op == mm_ctc1_op)
op = ctc_op;
else if (insn.mm_fp1_format.op == mm_mfhc1_op)
op = mfhc_op;
else
op = mthc_op;
mips32_insn.fp1_format.opcode = cop1_op;
mips32_insn.fp1_format.op = op;
mips32_insn.fp1_format.rt =
insn.mm_fp1_format.rt;
mips32_insn.fp1_format.fs =
insn.mm_fp1_format.fs;
mips32_insn.fp1_format.fd = 0;
mips32_insn.fp1_format.func = 0;
break;
default:
return SIGILL;
}
break;
case mm_32f_74_op: /* c.cond.fmt */
mips32_insn.fp0_format.opcode = cop1_op;
mips32_insn.fp0_format.fmt =
sdps_format[insn.mm_fp4_format.fmt];
mips32_insn.fp0_format.ft = insn.mm_fp4_format.rt;
mips32_insn.fp0_format.fs = insn.mm_fp4_format.fs;
mips32_insn.fp0_format.fd = insn.mm_fp4_format.cc << 2;
mips32_insn.fp0_format.func =
insn.mm_fp4_format.cond | MM_MIPS32_COND_FC;
break;
default:
return SIGILL;
}
break;
default:
return SIGILL;
}
*insn_ptr = mips32_insn;
return 0;
}
/*
* Redundant with logic already in kernel/branch.c,
* embedded in compute_return_epc. At some point,
* a single subroutine should be used across both
* modules.
*/
int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,
unsigned long *contpc)
{
union mips_instruction insn = (union mips_instruction)dec_insn.insn;
unsigned int fcr31;
unsigned int bit = 0;
unsigned int bit0;
union fpureg *fpr;
switch (insn.i_format.opcode) {
case spec_op:
switch (insn.r_format.func) {
case jalr_op:
if (insn.r_format.rd != 0) {
regs->regs[insn.r_format.rd] =
regs->cp0_epc + dec_insn.pc_inc +
dec_insn.next_pc_inc;
}
fallthrough;
case jr_op:
/* For R6, JR already emulated in jalr_op */
if (NO_R6EMU && insn.r_format.func == jr_op)
break;
*contpc = regs->regs[insn.r_format.rs];
return 1;
}
break;
case bcond_op:
switch (insn.i_format.rt) {
case bltzal_op:
case bltzall_op:
if (NO_R6EMU && (insn.i_format.rs ||
insn.i_format.rt == bltzall_op))
break;
regs->regs[31] = regs->cp0_epc +
dec_insn.pc_inc +
dec_insn.next_pc_inc;
fallthrough;
case bltzl_op:
if (NO_R6EMU)
break;
fallthrough;
case bltz_op:
if ((long)regs->regs[insn.i_format.rs] < 0)
*contpc = regs->cp0_epc +
dec_insn.pc_inc +
(insn.i_format.simmediate << 2);
else
*contpc = regs->cp0_epc +
dec_insn.pc_inc +
dec_insn.next_pc_inc;
return 1;
case bgezal_op:
case bgezall_op:
if (NO_R6EMU && (insn.i_format.rs ||
insn.i_format.rt == bgezall_op))
break;
regs->regs[31] = regs->cp0_epc +
dec_insn.pc_inc +
dec_insn.next_pc_inc;
fallthrough;
case bgezl_op:
if (NO_R6EMU)
break;
fallthrough;
case bgez_op:
if ((long)regs->regs[insn.i_format.rs] >= 0)
*contpc = regs->cp0_epc +
dec_insn.pc_inc +
(insn.i_format.simmediate << 2);
else
*contpc = regs->cp0_epc +
dec_insn.pc_inc +
dec_insn.next_pc_inc;
return 1;
}
break;
case jalx_op:
set_isa16_mode(bit);
fallthrough;
case jal_op:
regs->regs[31] = regs->cp0_epc +
dec_insn.pc_inc +
dec_insn.next_pc_inc;
fallthrough;
case j_op:
*contpc = regs->cp0_epc + dec_insn.pc_inc;
*contpc >>= 28;
*contpc <<= 28;
*contpc |= (insn.j_format.target << 2);
/* Set microMIPS mode bit: XOR for jalx. */
*contpc ^= bit;
return 1;
case beql_op:
if (NO_R6EMU)
break;
fallthrough;
case beq_op:
if (regs->regs[insn.i_format.rs] ==
regs->regs[insn.i_format.rt])
*contpc = regs->cp0_epc +
dec_insn.pc_inc +
(insn.i_format.simmediate << 2);
else
*contpc = regs->cp0_epc +
dec_insn.pc_inc +
dec_insn.next_pc_inc;
return 1;
case bnel_op:
if (NO_R6EMU)
break;
fallthrough;
case bne_op:
if (regs->regs[insn.i_format.rs] !=
regs->regs[insn.i_format.rt])
*contpc = regs->cp0_epc +
dec_insn.pc_inc +
(insn.i_format.simmediate << 2);
else
*contpc = regs->cp0_epc +
dec_insn.pc_inc +
dec_insn.next_pc_inc;
return 1;
case blezl_op:
if (!insn.i_format.rt && NO_R6EMU)
break;
fallthrough;
case blez_op:
/*
* Compact branches for R6 for the
* blez and blezl opcodes.
* BLEZ | rs = 0 | rt != 0 == BLEZALC
* BLEZ | rs = rt != 0 == BGEZALC
* BLEZ | rs != 0 | rt != 0 == BGEUC
* BLEZL | rs = 0 | rt != 0 == BLEZC
* BLEZL | rs = rt != 0 == BGEZC
* BLEZL | rs != 0 | rt != 0 == BGEC
*
* For real BLEZ{,L}, rt is always 0.
*/
if (cpu_has_mips_r6 && insn.i_format.rt) {
if ((insn.i_format.opcode == blez_op) &&
((!insn.i_format.rs && insn.i_format.rt) ||
(insn.i_format.rs == insn.i_format.rt)))
regs->regs[31] = regs->cp0_epc +
dec_insn.pc_inc;
*contpc = regs->cp0_epc + dec_insn.pc_inc +
dec_insn.next_pc_inc;
return 1;
}
if ((long)regs->regs[insn.i_format.rs] <= 0)
*contpc = regs->cp0_epc +
dec_insn.pc_inc +
(insn.i_format.simmediate << 2);
else
*contpc = regs->cp0_epc +
dec_insn.pc_inc +
dec_insn.next_pc_inc;
return 1;
case bgtzl_op:
if (!insn.i_format.rt && NO_R6EMU)
break;
fallthrough;
case bgtz_op:
/*
* Compact branches for R6 for the
* bgtz and bgtzl opcodes.
* BGTZ | rs = 0 | rt != 0 == BGTZALC
* BGTZ | rs = rt != 0 == BLTZALC
* BGTZ | rs != 0 | rt != 0 == BLTUC
* BGTZL | rs = 0 | rt != 0 == BGTZC
* BGTZL | rs = rt != 0 == BLTZC
* BGTZL | rs != 0 | rt != 0 == BLTC
*
* *ZALC varint for BGTZ &&& rt != 0
* For real GTZ{,L}, rt is always 0.
*/
if (cpu_has_mips_r6 && insn.i_format.rt) {
if ((insn.i_format.opcode == blez_op) &&
((!insn.i_format.rs && insn.i_format.rt) ||
(insn.i_format.rs == insn.i_format.rt)))
regs->regs[31] = regs->cp0_epc +
dec_insn.pc_inc;
*contpc = regs->cp0_epc + dec_insn.pc_inc +
dec_insn.next_pc_inc;
return 1;
}
if ((long)regs->regs[insn.i_format.rs] > 0)
*contpc = regs->cp0_epc +
dec_insn.pc_inc +
(insn.i_format.simmediate << 2);
else
*contpc = regs->cp0_epc +
dec_insn.pc_inc +
dec_insn.next_pc_inc;
return 1;
case pop10_op:
case pop30_op:
if (!cpu_has_mips_r6)
break;
if (insn.i_format.rt && !insn.i_format.rs)
regs->regs[31] = regs->cp0_epc + 4;
*contpc = regs->cp0_epc + dec_insn.pc_inc +
dec_insn.next_pc_inc;
return 1;
#ifdef CONFIG_CPU_CAVIUM_OCTEON
case lwc2_op: /* This is bbit0 on Octeon */
if ((regs->regs[insn.i_format.rs] & (1ull<<insn.i_format.rt)) == 0)
*contpc = regs->cp0_epc + 4 + (insn.i_format.simmediate << 2);
else
*contpc = regs->cp0_epc + 8;
return 1;
case ldc2_op: /* This is bbit032 on Octeon */
if ((regs->regs[insn.i_format.rs] & (1ull<<(insn.i_format.rt + 32))) == 0)
*contpc = regs->cp0_epc + 4 + (insn.i_format.simmediate << 2);
else
*contpc = regs->cp0_epc + 8;
return 1;
case swc2_op: /* This is bbit1 on Octeon */
if (regs->regs[insn.i_format.rs] & (1ull<<insn.i_format.rt))
*contpc = regs->cp0_epc + 4 + (insn.i_format.simmediate << 2);
else
*contpc = regs->cp0_epc + 8;
return 1;
case sdc2_op: /* This is bbit132 on Octeon */
if (regs->regs[insn.i_format.rs] & (1ull<<(insn.i_format.rt + 32)))
*contpc = regs->cp0_epc + 4 + (insn.i_format.simmediate << 2);
else
*contpc = regs->cp0_epc + 8;
return 1;
#else
case bc6_op:
/*
* Only valid for MIPS R6 but we can still end up
* here from a broken userland so just tell emulator
* this is not a branch and let it break later on.
*/
if (!cpu_has_mips_r6)
break;
*contpc = regs->cp0_epc + dec_insn.pc_inc +
dec_insn.next_pc_inc;
return 1;
case balc6_op:
if (!cpu_has_mips_r6)
break;
regs->regs[31] = regs->cp0_epc + 4;
*contpc = regs->cp0_epc + dec_insn.pc_inc +
dec_insn.next_pc_inc;
return 1;
case pop66_op:
if (!cpu_has_mips_r6)
break;
*contpc = regs->cp0_epc + dec_insn.pc_inc +
dec_insn.next_pc_inc;
return 1;
case pop76_op:
if (!cpu_has_mips_r6)
break;
if (!insn.i_format.rs)
regs->regs[31] = regs->cp0_epc + 4;
*contpc = regs->cp0_epc + dec_insn.pc_inc +
dec_insn.next_pc_inc;
return 1;
#endif
case cop0_op:
case cop1_op:
/* Need to check for R6 bc1nez and bc1eqz branches */
if (cpu_has_mips_r6 &&
((insn.i_format.rs == bc1eqz_op) ||
(insn.i_format.rs == bc1nez_op))) {
bit = 0;
fpr = ¤t->thread.fpu.fpr[insn.i_format.rt];
bit0 = get_fpr32(fpr, 0) & 0x1;
switch (insn.i_format.rs) {
case bc1eqz_op:
bit = bit0 == 0;
break;
case bc1nez_op:
bit = bit0 != 0;
break;
}
if (bit)
*contpc = regs->cp0_epc +
dec_insn.pc_inc +
(insn.i_format.simmediate << 2);
else
*contpc = regs->cp0_epc +
dec_insn.pc_inc +
dec_insn.next_pc_inc;
return 1;
}
/* R2/R6 compatible cop1 instruction */
fallthrough;
case cop2_op:
case cop1x_op:
if (insn.i_format.rs == bc_op) {
preempt_disable();
if (is_fpu_owner())
fcr31 = read_32bit_cp1_register(CP1_STATUS);
else
fcr31 = current->thread.fpu.fcr31;
preempt_enable();
bit = (insn.i_format.rt >> 2);
bit += (bit != 0);
bit += 23;
switch (insn.i_format.rt & 3) {
case 0: /* bc1f */
case 2: /* bc1fl */
if (~fcr31 & (1 << bit))
*contpc = regs->cp0_epc +
dec_insn.pc_inc +
(insn.i_format.simmediate << 2);
else
*contpc = regs->cp0_epc +
dec_insn.pc_inc +
dec_insn.next_pc_inc;
return 1;
case 1: /* bc1t */
case 3: /* bc1tl */
if (fcr31 & (1 << bit))
*contpc = regs->cp0_epc +
dec_insn.pc_inc +
(insn.i_format.simmediate << 2);
else
*contpc = regs->cp0_epc +
dec_insn.pc_inc +
dec_insn.next_pc_inc;
return 1;
}
}
break;
}
return 0;
}
/*
* In the Linux kernel, we support selection of FPR format on the
* basis of the Status.FR bit. If an FPU is not present, the FR bit
* is hardwired to zero, which would imply a 32-bit FPU even for
* 64-bit CPUs so we rather look at TIF_32BIT_FPREGS.
* FPU emu is slow and bulky and optimizing this function offers fairly
* sizeable benefits so we try to be clever and make this function return
* a constant whenever possible, that is on 64-bit kernels without O32
* compatibility enabled and on 32-bit without 64-bit FPU support.
*/
static inline int cop1_64bit(struct pt_regs *xcp)
{
if (IS_ENABLED(CONFIG_64BIT) && !IS_ENABLED(CONFIG_MIPS32_O32))
return 1;
else if (IS_ENABLED(CONFIG_32BIT) &&
!IS_ENABLED(CONFIG_MIPS_O32_FP64_SUPPORT))
return 0;
return !test_thread_flag(TIF_32BIT_FPREGS);
}
static inline bool hybrid_fprs(void)
{
return test_thread_flag(TIF_HYBRID_FPREGS);
}
#define SIFROMREG(si, x) \
do { \
if (cop1_64bit(xcp) && !hybrid_fprs()) \
(si) = (int)get_fpr32(&ctx->fpr[x], 0); \
else \
(si) = (int)get_fpr32(&ctx->fpr[(x) & ~1], (x) & 1); \
} while (0)
#define SITOREG(si, x) \
do { \
if (cop1_64bit(xcp) && !hybrid_fprs()) { \
unsigned int i; \
set_fpr32(&ctx->fpr[x], 0, si); \
for (i = 1; i < ARRAY_SIZE(ctx->fpr[x].val32); i++) \
set_fpr32(&ctx->fpr[x], i, 0); \
} else { \
set_fpr32(&ctx->fpr[(x) & ~1], (x) & 1, si); \
} \
} while (0)
#define SIFROMHREG(si, x) ((si) = (int)get_fpr32(&ctx->fpr[x], 1))
#define SITOHREG(si, x) \
do { \
unsigned int i; \
set_fpr32(&ctx->fpr[x], 1, si); \
for (i = 2; i < ARRAY_SIZE(ctx->fpr[x].val32); i++) \
set_fpr32(&ctx->fpr[x], i, 0); \
} while (0)
#define DIFROMREG(di, x) \
((di) = get_fpr64(&ctx->fpr[(x) & ~(cop1_64bit(xcp) ^ 1)], 0))
#define DITOREG(di, x) \
do { \
unsigned int fpr, i; \
fpr = (x) & ~(cop1_64bit(xcp) ^ 1); \
set_fpr64(&ctx->fpr[fpr], 0, di); \
for (i = 1; i < ARRAY_SIZE(ctx->fpr[x].val64); i++) \
set_fpr64(&ctx->fpr[fpr], i, 0); \
} while (0)
#define SPFROMREG(sp, x) SIFROMREG((sp).bits, x)
#define SPTOREG(sp, x) SITOREG((sp).bits, x)
#define DPFROMREG(dp, x) DIFROMREG((dp).bits, x)
#define DPTOREG(dp, x) DITOREG((dp).bits, x)
/*
* Emulate a CFC1 instruction.
*/
static inline void cop1_cfc(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
mips_instruction ir)
{
u32 fcr31 = ctx->fcr31;
u32 value = 0;
switch (MIPSInst_RD(ir)) {
case FPCREG_CSR:
value = fcr31;
pr_debug("%p gpr[%d]<-csr=%08x\n",
(void *)xcp->cp0_epc, MIPSInst_RT(ir), value);
break;
case FPCREG_FENR:
if (!cpu_has_mips_r)
break;
value = (fcr31 >> (FPU_CSR_FS_S - MIPS_FENR_FS_S)) &
MIPS_FENR_FS;
value |= fcr31 & (FPU_CSR_ALL_E | FPU_CSR_RM);
pr_debug("%p gpr[%d]<-enr=%08x\n",
(void *)xcp->cp0_epc, MIPSInst_RT(ir), value);
break;
case FPCREG_FEXR:
if (!cpu_has_mips_r)
break;
value = fcr31 & (FPU_CSR_ALL_X | FPU_CSR_ALL_S);
pr_debug("%p gpr[%d]<-exr=%08x\n",
(void *)xcp->cp0_epc, MIPSInst_RT(ir), value);
break;
case FPCREG_FCCR:
if (!cpu_has_mips_r)
break;
value = (fcr31 >> (FPU_CSR_COND_S - MIPS_FCCR_COND0_S)) &
MIPS_FCCR_COND0;
value |= (fcr31 >> (FPU_CSR_COND1_S - MIPS_FCCR_COND1_S)) &
(MIPS_FCCR_CONDX & ~MIPS_FCCR_COND0);
pr_debug("%p gpr[%d]<-ccr=%08x\n",
(void *)xcp->cp0_epc, MIPSInst_RT(ir), value);
break;
case FPCREG_RID:
value = boot_cpu_data.fpu_id;
break;
default:
break;
}
if (MIPSInst_RT(ir))
xcp->regs[MIPSInst_RT(ir)] = value;
}
/*
* Emulate a CTC1 instruction.
*/
static inline void cop1_ctc(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
mips_instruction ir)
{
u32 fcr31 = ctx->fcr31;
u32 value;
u32 mask;
if (MIPSInst_RT(ir) == 0)
value = 0;
else
value = xcp->regs[MIPSInst_RT(ir)];
switch (MIPSInst_RD(ir)) {
case FPCREG_CSR:
pr_debug("%p gpr[%d]->csr=%08x\n",
(void *)xcp->cp0_epc, MIPSInst_RT(ir), value);
/* Preserve read-only bits. */
mask = boot_cpu_data.fpu_msk31;
fcr31 = (value & ~mask) | (fcr31 & mask);
break;
case FPCREG_FENR:
if (!cpu_has_mips_r)
break;
pr_debug("%p gpr[%d]->enr=%08x\n",
(void *)xcp->cp0_epc, MIPSInst_RT(ir), value);
fcr31 &= ~(FPU_CSR_FS | FPU_CSR_ALL_E | FPU_CSR_RM);
fcr31 |= (value << (FPU_CSR_FS_S - MIPS_FENR_FS_S)) &
FPU_CSR_FS;
fcr31 |= value & (FPU_CSR_ALL_E | FPU_CSR_RM);
break;
case FPCREG_FEXR:
if (!cpu_has_mips_r)
break;
pr_debug("%p gpr[%d]->exr=%08x\n",
(void *)xcp->cp0_epc, MIPSInst_RT(ir), value);
fcr31 &= ~(FPU_CSR_ALL_X | FPU_CSR_ALL_S);
fcr31 |= value & (FPU_CSR_ALL_X | FPU_CSR_ALL_S);
break;
case FPCREG_FCCR:
if (!cpu_has_mips_r)
break;
pr_debug("%p gpr[%d]->ccr=%08x\n",
(void *)xcp->cp0_epc, MIPSInst_RT(ir), value);
fcr31 &= ~(FPU_CSR_CONDX | FPU_CSR_COND);
fcr31 |= (value << (FPU_CSR_COND_S - MIPS_FCCR_COND0_S)) &
FPU_CSR_COND;
fcr31 |= (value << (FPU_CSR_COND1_S - MIPS_FCCR_COND1_S)) &
FPU_CSR_CONDX;
break;
default:
break;
}
ctx->fcr31 = fcr31;
}
/*
* Emulate the single floating point instruction pointed at by EPC.
* Two instructions if the instruction is in a branch delay slot.
*/
static int cop1Emulate(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
struct mm_decoded_insn dec_insn, void __user **fault_addr)
{
unsigned long contpc = xcp->cp0_epc + dec_insn.pc_inc;
unsigned int cond, cbit, bit0;
mips_instruction ir;
int likely, pc_inc;
union fpureg *fpr;
u32 __user *wva;
u64 __user *dva;
u32 wval;
u64 dval;
int sig;
/*
* These are giving gcc a gentle hint about what to expect in
* dec_inst in order to do better optimization.
*/
if (!cpu_has_mmips && dec_insn.micro_mips_mode)
unreachable();
/* XXX NEC Vr54xx bug workaround */
if (delay_slot(xcp)) {
if (dec_insn.micro_mips_mode) {
if (!mm_isBranchInstr(xcp, dec_insn, &contpc))
clear_delay_slot(xcp);
} else {
if (!isBranchInstr(xcp, dec_insn, &contpc))
clear_delay_slot(xcp);
}
}
if (delay_slot(xcp)) {
/*
* The instruction to be emulated is in a branch delay slot
* which means that we have to emulate the branch instruction
* BEFORE we do the cop1 instruction.
*
* This branch could be a COP1 branch, but in that case we
* would have had a trap for that instruction, and would not
* come through this route.
*
* Linux MIPS branch emulator operates on context, updating the
* cp0_epc.
*/
ir = dec_insn.next_insn; /* process delay slot instr */
pc_inc = dec_insn.next_pc_inc;
} else {
ir = dec_insn.insn; /* process current instr */
pc_inc = dec_insn.pc_inc;
}
/*
* Since microMIPS FPU instructios are a subset of MIPS32 FPU
* instructions, we want to convert microMIPS FPU instructions
* into MIPS32 instructions so that we could reuse all of the
* FPU emulation code.
*
* NOTE: We cannot do this for branch instructions since they
* are not a subset. Example: Cannot emulate a 16-bit
* aligned target address with a MIPS32 instruction.
*/
if (dec_insn.micro_mips_mode) {
/*
* If next instruction is a 16-bit instruction, then
* it cannot be a FPU instruction. This could happen
* since we can be called for non-FPU instructions.
*/
if ((pc_inc == 2) ||
(microMIPS32_to_MIPS32((union mips_instruction *)&ir)
== SIGILL))
return SIGILL;
}
emul:
perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, xcp, 0);
MIPS_FPU_EMU_INC_STATS(emulated);
switch (MIPSInst_OPCODE(ir)) {
case ldc1_op:
dva = (u64 __user *) (xcp->regs[MIPSInst_RS(ir)] +
MIPSInst_SIMM(ir));
MIPS_FPU_EMU_INC_STATS(loads);
if (!access_ok(dva, sizeof(u64))) {
MIPS_FPU_EMU_INC_STATS(errors);
*fault_addr = dva;
return SIGBUS;
}
if (__get_user(dval, dva)) {
MIPS_FPU_EMU_INC_STATS(errors);
*fault_addr = dva;
return SIGSEGV;
}
DITOREG(dval, MIPSInst_RT(ir));
break;
case sdc1_op:
dva = (u64 __user *) (xcp->regs[MIPSInst_RS(ir)] +
MIPSInst_SIMM(ir));
MIPS_FPU_EMU_INC_STATS(stores);
DIFROMREG(dval, MIPSInst_RT(ir));
if (!access_ok(dva, sizeof(u64))) {
MIPS_FPU_EMU_INC_STATS(errors);
*fault_addr = dva;
return SIGBUS;
}
if (__put_user(dval, dva)) {
MIPS_FPU_EMU_INC_STATS(errors);
*fault_addr = dva;
return SIGSEGV;
}
break;
case lwc1_op:
wva = (u32 __user *) (xcp->regs[MIPSInst_RS(ir)] +
MIPSInst_SIMM(ir));
MIPS_FPU_EMU_INC_STATS(loads);
if (!access_ok(wva, sizeof(u32))) {
MIPS_FPU_EMU_INC_STATS(errors);
*fault_addr = wva;
return SIGBUS;
}
if (__get_user(wval, wva)) {
MIPS_FPU_EMU_INC_STATS(errors);
*fault_addr = wva;
return SIGSEGV;
}
SITOREG(wval, MIPSInst_RT(ir));
break;
case swc1_op:
wva = (u32 __user *) (xcp->regs[MIPSInst_RS(ir)] +
MIPSInst_SIMM(ir));
MIPS_FPU_EMU_INC_STATS(stores);
SIFROMREG(wval, MIPSInst_RT(ir));
if (!access_ok(wva, sizeof(u32))) {
MIPS_FPU_EMU_INC_STATS(errors);
*fault_addr = wva;
return SIGBUS;
}
if (__put_user(wval, wva)) {
MIPS_FPU_EMU_INC_STATS(errors);
*fault_addr = wva;
return SIGSEGV;
}
break;
case cop1_op:
switch (MIPSInst_RS(ir)) {
case dmfc_op:
if (!cpu_has_mips_3_4_5 && !cpu_has_mips64)
return SIGILL;
/* copregister fs -> gpr[rt] */
if (MIPSInst_RT(ir) != 0) {
DIFROMREG(xcp->regs[MIPSInst_RT(ir)],
MIPSInst_RD(ir));
}
break;
case dmtc_op:
if (!cpu_has_mips_3_4_5 && !cpu_has_mips64)
return SIGILL;
/* copregister fs <- rt */
DITOREG(xcp->regs[MIPSInst_RT(ir)], MIPSInst_RD(ir));
break;
case mfhc_op:
if (!cpu_has_mips_r2_r6)
return SIGILL;
/* copregister rd -> gpr[rt] */
if (MIPSInst_RT(ir) != 0) {
SIFROMHREG(xcp->regs[MIPSInst_RT(ir)],
MIPSInst_RD(ir));
}
break;
case mthc_op:
if (!cpu_has_mips_r2_r6)
return SIGILL;
/* copregister rd <- gpr[rt] */
SITOHREG(xcp->regs[MIPSInst_RT(ir)], MIPSInst_RD(ir));
break;
case mfc_op:
/* copregister rd -> gpr[rt] */
if (MIPSInst_RT(ir) != 0) {
SIFROMREG(xcp->regs[MIPSInst_RT(ir)],
MIPSInst_RD(ir));
}
break;
case mtc_op:
/* copregister rd <- rt */
SITOREG(xcp->regs[MIPSInst_RT(ir)], MIPSInst_RD(ir));
break;
case cfc_op:
/* cop control register rd -> gpr[rt] */
cop1_cfc(xcp, ctx, ir);
break;
case ctc_op:
/* copregister rd <- rt */
cop1_ctc(xcp, ctx, ir);
if ((ctx->fcr31 >> 5) & ctx->fcr31 & FPU_CSR_ALL_E) {
return SIGFPE;
}
break;
case bc1eqz_op:
case bc1nez_op:
if (!cpu_has_mips_r6 || delay_slot(xcp))
return SIGILL;
likely = 0;
cond = 0;
fpr = ¤t->thread.fpu.fpr[MIPSInst_RT(ir)];
bit0 = get_fpr32(fpr, 0) & 0x1;
switch (MIPSInst_RS(ir)) {
case bc1eqz_op:
MIPS_FPU_EMU_INC_STATS(bc1eqz);
cond = bit0 == 0;
break;
case bc1nez_op:
MIPS_FPU_EMU_INC_STATS(bc1nez);
cond = bit0 != 0;
break;
}
goto branch_common;
case bc_op:
if (delay_slot(xcp))
return SIGILL;
if (cpu_has_mips_4_5_r)
cbit = fpucondbit[MIPSInst_RT(ir) >> 2];
else
cbit = FPU_CSR_COND;
cond = ctx->fcr31 & cbit;
likely = 0;
switch (MIPSInst_RT(ir) & 3) {
case bcfl_op:
if (cpu_has_mips_2_3_4_5_r)
likely = 1;
fallthrough;
case bcf_op:
cond = !cond;
break;
case bctl_op:
if (cpu_has_mips_2_3_4_5_r)
likely = 1;
fallthrough;
case bct_op:
break;
}
branch_common:
MIPS_FPU_EMU_INC_STATS(branches);
set_delay_slot(xcp);
if (cond) {
/*
* Branch taken: emulate dslot instruction
*/
unsigned long bcpc;
/*
* Remember EPC at the branch to point back
* at so that any delay-slot instruction
* signal is not silently ignored.
*/
bcpc = xcp->cp0_epc;
xcp->cp0_epc += dec_insn.pc_inc;
contpc = MIPSInst_SIMM(ir);
ir = dec_insn.next_insn;
if (dec_insn.micro_mips_mode) {
contpc = (xcp->cp0_epc + (contpc << 1));
/* If 16-bit instruction, not FPU. */
if ((dec_insn.next_pc_inc == 2) ||
(microMIPS32_to_MIPS32((union mips_instruction *)&ir) == SIGILL)) {
/*
* Since this instruction will
* be put on the stack with
* 32-bit words, get around
* this problem by putting a
* NOP16 as the second one.
*/
if (dec_insn.next_pc_inc == 2)
ir = (ir & (~0xffff)) | MM_NOP16;
/*
* Single step the non-CP1
* instruction in the dslot.
*/
sig = mips_dsemul(xcp, ir,
bcpc, contpc);
if (sig < 0)
break;
if (sig)
xcp->cp0_epc = bcpc;
/*
* SIGILL forces out of
* the emulation loop.
*/
return sig ? sig : SIGILL;
}
} else
contpc = (xcp->cp0_epc + (contpc << 2));
switch (MIPSInst_OPCODE(ir)) {
case lwc1_op:
case swc1_op:
goto emul;
case ldc1_op:
case sdc1_op:
if (cpu_has_mips_2_3_4_5_r)
goto emul;
goto bc_sigill;
case cop1_op:
goto emul;
case cop1x_op:
if (cpu_has_mips_4_5_64_r2_r6)
/* its one of ours */
goto emul;
goto bc_sigill;
case spec_op:
switch (MIPSInst_FUNC(ir)) {
case movc_op:
if (cpu_has_mips_4_5_r)
goto emul;
goto bc_sigill;
}
break;
bc_sigill:
xcp->cp0_epc = bcpc;
return SIGILL;
}
/*
* Single step the non-cp1
* instruction in the dslot
*/
sig = mips_dsemul(xcp, ir, bcpc, contpc);
if (sig < 0)
break;
if (sig)
xcp->cp0_epc = bcpc;
/* SIGILL forces out of the emulation loop. */
return sig ? sig : SIGILL;
} else if (likely) { /* branch not taken */
/*
* branch likely nullifies
* dslot if not taken
*/
xcp->cp0_epc += dec_insn.pc_inc;
contpc += dec_insn.pc_inc;
/*
* else continue & execute
* dslot as normal insn
*/
}
break;
default:
if (!(MIPSInst_RS(ir) & 0x10))
return SIGILL;
/* a real fpu computation instruction */
sig = fpu_emu(xcp, ctx, ir);
if (sig)
return sig;
}
break;
case cop1x_op:
if (!cpu_has_mips_4_5_64_r2_r6)
return SIGILL;
sig = fpux_emu(xcp, ctx, ir, fault_addr);
if (sig)
return sig;
break;
case spec_op:
if (!cpu_has_mips_4_5_r)
return SIGILL;
if (MIPSInst_FUNC(ir) != movc_op)
return SIGILL;
cond = fpucondbit[MIPSInst_RT(ir) >> 2];
if (((ctx->fcr31 & cond) != 0) == ((MIPSInst_RT(ir) & 1) != 0))
xcp->regs[MIPSInst_RD(ir)] =
xcp->regs[MIPSInst_RS(ir)];
break;
default:
return SIGILL;
}
/* we did it !! */
xcp->cp0_epc = contpc;
clear_delay_slot(xcp);
return 0;
}
/*
* Conversion table from MIPS compare ops 48-63
* cond = ieee754dp_cmp(x,y,IEEE754_UN,sig);
*/
static const unsigned char cmptab[8] = {
0, /* cmp_0 (sig) cmp_sf */
IEEE754_CUN, /* cmp_un (sig) cmp_ngle */
IEEE754_CEQ, /* cmp_eq (sig) cmp_seq */
IEEE754_CEQ | IEEE754_CUN, /* cmp_ueq (sig) cmp_ngl */
IEEE754_CLT, /* cmp_olt (sig) cmp_lt */
IEEE754_CLT | IEEE754_CUN, /* cmp_ult (sig) cmp_nge */
IEEE754_CLT | IEEE754_CEQ, /* cmp_ole (sig) cmp_le */
IEEE754_CLT | IEEE754_CEQ | IEEE754_CUN, /* cmp_ule (sig) cmp_ngt */
};
static const unsigned char negative_cmptab[8] = {
0, /* Reserved */
IEEE754_CLT | IEEE754_CGT | IEEE754_CEQ,
IEEE754_CLT | IEEE754_CGT | IEEE754_CUN,
IEEE754_CLT | IEEE754_CGT,
/* Reserved */
};
/*
* Additional MIPS4 instructions
*/
#define DEF3OP(name, p, f1, f2, f3) \
static union ieee754##p fpemu_##p##_##name(union ieee754##p r, \
union ieee754##p s, union ieee754##p t) \
{ \
struct _ieee754_csr ieee754_csr_save; \
s = f1(s, t); \
ieee754_csr_save = ieee754_csr; \
s = f2(s, r); \
ieee754_csr_save.cx |= ieee754_csr.cx; \
ieee754_csr_save.sx |= ieee754_csr.sx; \
s = f3(s); \
ieee754_csr.cx |= ieee754_csr_save.cx; \
ieee754_csr.sx |= ieee754_csr_save.sx; \
return s; \
}
static union ieee754dp fpemu_dp_recip(union ieee754dp d)
{
return ieee754dp_div(ieee754dp_one(0), d);
}
static union ieee754dp fpemu_dp_rsqrt(union ieee754dp d)
{
return ieee754dp_div(ieee754dp_one(0), ieee754dp_sqrt(d));
}
static union ieee754sp fpemu_sp_recip(union ieee754sp s)
{
return ieee754sp_div(ieee754sp_one(0), s);
}
static union ieee754sp fpemu_sp_rsqrt(union ieee754sp s)
{
return ieee754sp_div(ieee754sp_one(0), ieee754sp_sqrt(s));
}
DEF3OP(madd, sp, ieee754sp_mul, ieee754sp_add, );
DEF3OP(msub, sp, ieee754sp_mul, ieee754sp_sub, );
DEF3OP(nmadd, sp, ieee754sp_mul, ieee754sp_add, ieee754sp_neg);
DEF3OP(nmsub, sp, ieee754sp_mul, ieee754sp_sub, ieee754sp_neg);
DEF3OP(madd, dp, ieee754dp_mul, ieee754dp_add, );
DEF3OP(msub, dp, ieee754dp_mul, ieee754dp_sub, );
DEF3OP(nmadd, dp, ieee754dp_mul, ieee754dp_add, ieee754dp_neg);
DEF3OP(nmsub, dp, ieee754dp_mul, ieee754dp_sub, ieee754dp_neg);
static int fpux_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
mips_instruction ir, void __user **fault_addr)
{
unsigned int rcsr = 0; /* resulting csr */
MIPS_FPU_EMU_INC_STATS(cp1xops);
switch (MIPSInst_FMA_FFMT(ir)) {
case s_fmt:{ /* 0 */
union ieee754sp(*handler) (union ieee754sp, union ieee754sp, union ieee754sp);
union ieee754sp fd, fr, fs, ft;
u32 __user *va;
u32 val;
switch (MIPSInst_FUNC(ir)) {
case lwxc1_op:
va = (void __user *) (xcp->regs[MIPSInst_FR(ir)] +
xcp->regs[MIPSInst_FT(ir)]);
MIPS_FPU_EMU_INC_STATS(loads);
if (!access_ok(va, sizeof(u32))) {
MIPS_FPU_EMU_INC_STATS(errors);
*fault_addr = va;
return SIGBUS;
}
if (__get_user(val, va)) {
MIPS_FPU_EMU_INC_STATS(errors);
*fault_addr = va;
return SIGSEGV;
}
SITOREG(val, MIPSInst_FD(ir));
break;
case swxc1_op:
va = (void __user *) (xcp->regs[MIPSInst_FR(ir)] +
xcp->regs[MIPSInst_FT(ir)]);
MIPS_FPU_EMU_INC_STATS(stores);
SIFROMREG(val, MIPSInst_FS(ir));
if (!access_ok(va, sizeof(u32))) {
MIPS_FPU_EMU_INC_STATS(errors);
*fault_addr = va;
return SIGBUS;
}
if (put_user(val, va)) {
MIPS_FPU_EMU_INC_STATS(errors);
*fault_addr = va;
return SIGSEGV;
}
break;
case madd_s_op:
if (cpu_has_mac2008_only)
handler = ieee754sp_madd;
else
handler = fpemu_sp_madd;
goto scoptop;
case msub_s_op:
if (cpu_has_mac2008_only)
handler = ieee754sp_msub;
else
handler = fpemu_sp_msub;
goto scoptop;
case nmadd_s_op:
if (cpu_has_mac2008_only)
handler = ieee754sp_nmadd;
else
handler = fpemu_sp_nmadd;
goto scoptop;
case nmsub_s_op:
if (cpu_has_mac2008_only)
handler = ieee754sp_nmsub;
else
handler = fpemu_sp_nmsub;
goto scoptop;
scoptop:
SPFROMREG(fr, MIPSInst_FR(ir));
SPFROMREG(fs, MIPSInst_FS(ir));
SPFROMREG(ft, MIPSInst_FT(ir));
fd = (*handler) (fr, fs, ft);
SPTOREG(fd, MIPSInst_FD(ir));
copcsr:
if (ieee754_cxtest(IEEE754_INEXACT)) {
MIPS_FPU_EMU_INC_STATS(ieee754_inexact);
rcsr |= FPU_CSR_INE_X | FPU_CSR_INE_S;
}
if (ieee754_cxtest(IEEE754_UNDERFLOW)) {
MIPS_FPU_EMU_INC_STATS(ieee754_underflow);
rcsr |= FPU_CSR_UDF_X | FPU_CSR_UDF_S;
}
if (ieee754_cxtest(IEEE754_OVERFLOW)) {
MIPS_FPU_EMU_INC_STATS(ieee754_overflow);
rcsr |= FPU_CSR_OVF_X | FPU_CSR_OVF_S;
}
if (ieee754_cxtest(IEEE754_INVALID_OPERATION)) {
MIPS_FPU_EMU_INC_STATS(ieee754_invalidop);
rcsr |= FPU_CSR_INV_X | FPU_CSR_INV_S;
}
ctx->fcr31 = (ctx->fcr31 & ~FPU_CSR_ALL_X) | rcsr;
if ((ctx->fcr31 >> 5) & ctx->fcr31 & FPU_CSR_ALL_E) {
/*printk ("SIGFPE: FPU csr = %08x\n",
ctx->fcr31); */
return SIGFPE;
}
break;
default:
return SIGILL;
}
break;
}
case d_fmt:{ /* 1 */
union ieee754dp(*handler) (union ieee754dp, union ieee754dp, union ieee754dp);
union ieee754dp fd, fr, fs, ft;
u64 __user *va;
u64 val;
switch (MIPSInst_FUNC(ir)) {
case ldxc1_op:
va = (void __user *) (xcp->regs[MIPSInst_FR(ir)] +
xcp->regs[MIPSInst_FT(ir)]);
MIPS_FPU_EMU_INC_STATS(loads);
if (!access_ok(va, sizeof(u64))) {
MIPS_FPU_EMU_INC_STATS(errors);
*fault_addr = va;
return SIGBUS;
}
if (__get_user(val, va)) {
MIPS_FPU_EMU_INC_STATS(errors);
*fault_addr = va;
return SIGSEGV;
}
DITOREG(val, MIPSInst_FD(ir));
break;
case sdxc1_op:
va = (void __user *) (xcp->regs[MIPSInst_FR(ir)] +
xcp->regs[MIPSInst_FT(ir)]);
MIPS_FPU_EMU_INC_STATS(stores);
DIFROMREG(val, MIPSInst_FS(ir));
if (!access_ok(va, sizeof(u64))) {
MIPS_FPU_EMU_INC_STATS(errors);
*fault_addr = va;
return SIGBUS;
}
if (__put_user(val, va)) {
MIPS_FPU_EMU_INC_STATS(errors);
*fault_addr = va;
return SIGSEGV;
}
break;
case madd_d_op:
if (cpu_has_mac2008_only)
handler = ieee754dp_madd;
else
handler = fpemu_dp_madd;
goto dcoptop;
case msub_d_op:
if (cpu_has_mac2008_only)
handler = ieee754dp_msub;
else
handler = fpemu_dp_msub;
goto dcoptop;
case nmadd_d_op:
if (cpu_has_mac2008_only)
handler = ieee754dp_nmadd;
else
handler = fpemu_dp_nmadd;
goto dcoptop;
case nmsub_d_op:
if (cpu_has_mac2008_only)
handler = ieee754dp_nmsub;
else
handler = fpemu_dp_nmsub;
goto dcoptop;
dcoptop:
DPFROMREG(fr, MIPSInst_FR(ir));
DPFROMREG(fs, MIPSInst_FS(ir));
DPFROMREG(ft, MIPSInst_FT(ir));
fd = (*handler) (fr, fs, ft);
DPTOREG(fd, MIPSInst_FD(ir));
goto copcsr;
default:
return SIGILL;
}
break;
}
case 0x3:
if (MIPSInst_FUNC(ir) != pfetch_op)
return SIGILL;
/* ignore prefx operation */
break;
default:
return SIGILL;
}
return 0;
}
/*
* Emulate a single COP1 arithmetic instruction.
*/
static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
mips_instruction ir)
{
int rfmt; /* resulting format */
unsigned int rcsr = 0; /* resulting csr */
unsigned int oldrm;
unsigned int cbit;
unsigned int cond;
union {
union ieee754dp d;
union ieee754sp s;
int w;
s64 l;
} rv; /* resulting value */
u64 bits;
MIPS_FPU_EMU_INC_STATS(cp1ops);
switch (rfmt = (MIPSInst_FFMT(ir) & 0xf)) {
case s_fmt: { /* 0 */
union {
union ieee754sp(*b) (union ieee754sp, union ieee754sp);
union ieee754sp(*u) (union ieee754sp);
} handler;
union ieee754sp fd, fs, ft;
switch (MIPSInst_FUNC(ir)) {
/* binary ops */
case fadd_op:
MIPS_FPU_EMU_INC_STATS(add_s);
handler.b = ieee754sp_add;
goto scopbop;
case fsub_op:
MIPS_FPU_EMU_INC_STATS(sub_s);
handler.b = ieee754sp_sub;
goto scopbop;
case fmul_op:
MIPS_FPU_EMU_INC_STATS(mul_s);
handler.b = ieee754sp_mul;
goto scopbop;
case fdiv_op:
MIPS_FPU_EMU_INC_STATS(div_s);
handler.b = ieee754sp_div;
goto scopbop;
/* unary ops */
case fsqrt_op:
if (!cpu_has_mips_2_3_4_5_r)
return SIGILL;
MIPS_FPU_EMU_INC_STATS(sqrt_s);
handler.u = ieee754sp_sqrt;
goto scopuop;
/*
* Note that on some MIPS IV implementations such as the
* R5000 and R8000 the FSQRT and FRECIP instructions do not
* achieve full IEEE-754 accuracy - however this emulator does.
*/
case frsqrt_op:
if (!cpu_has_mips_4_5_64_r2_r6)
return SIGILL;
MIPS_FPU_EMU_INC_STATS(rsqrt_s);
handler.u = fpemu_sp_rsqrt;
goto scopuop;
case frecip_op:
if (!cpu_has_mips_4_5_64_r2_r6)
return SIGILL;
MIPS_FPU_EMU_INC_STATS(recip_s);
handler.u = fpemu_sp_recip;
goto scopuop;
case fmovc_op:
if (!cpu_has_mips_4_5_r)
return SIGILL;
cond = fpucondbit[MIPSInst_FT(ir) >> 2];
if (((ctx->fcr31 & cond) != 0) !=
((MIPSInst_FT(ir) & 1) != 0))
return 0;
SPFROMREG(rv.s, MIPSInst_FS(ir));
break;
case fmovz_op:
if (!cpu_has_mips_4_5_r)
return SIGILL;
if (xcp->regs[MIPSInst_FT(ir)] != 0)
return 0;
SPFROMREG(rv.s, MIPSInst_FS(ir));
break;
case fmovn_op:
if (!cpu_has_mips_4_5_r)
return SIGILL;
if (xcp->regs[MIPSInst_FT(ir)] == 0)
return 0;
SPFROMREG(rv.s, MIPSInst_FS(ir));
break;
case fseleqz_op:
if (!cpu_has_mips_r6)
return SIGILL;
MIPS_FPU_EMU_INC_STATS(seleqz_s);
SPFROMREG(rv.s, MIPSInst_FT(ir));
if (rv.w & 0x1)
rv.w = 0;
else
SPFROMREG(rv.s, MIPSInst_FS(ir));
break;
case fselnez_op:
if (!cpu_has_mips_r6)
return SIGILL;
MIPS_FPU_EMU_INC_STATS(selnez_s);
SPFROMREG(rv.s, MIPSInst_FT(ir));
if (rv.w & 0x1)
SPFROMREG(rv.s, MIPSInst_FS(ir));
else
rv.w = 0;
break;
case fmaddf_op: {
union ieee754sp ft, fs, fd;
if (!cpu_has_mips_r6)
return SIGILL;
MIPS_FPU_EMU_INC_STATS(maddf_s);
SPFROMREG(ft, MIPSInst_FT(ir));
SPFROMREG(fs, MIPSInst_FS(ir));
SPFROMREG(fd, MIPSInst_FD(ir));
rv.s = ieee754sp_maddf(fd, fs, ft);
goto copcsr;
}
case fmsubf_op: {
union ieee754sp ft, fs, fd;
if (!cpu_has_mips_r6)
return SIGILL;
MIPS_FPU_EMU_INC_STATS(msubf_s);
SPFROMREG(ft, MIPSInst_FT(ir));
SPFROMREG(fs, MIPSInst_FS(ir));
SPFROMREG(fd, MIPSInst_FD(ir));
rv.s = ieee754sp_msubf(fd, fs, ft);
goto copcsr;
}
case frint_op: {
union ieee754sp fs;
if (!cpu_has_mips_r6)
return SIGILL;
MIPS_FPU_EMU_INC_STATS(rint_s);
SPFROMREG(fs, MIPSInst_FS(ir));
rv.s = ieee754sp_rint(fs);
goto copcsr;
}
case fclass_op: {
union ieee754sp fs;
if (!cpu_has_mips_r6)
return SIGILL;
MIPS_FPU_EMU_INC_STATS(class_s);
SPFROMREG(fs, MIPSInst_FS(ir));
rv.w = ieee754sp_2008class(fs);
rfmt = w_fmt;
goto copcsr;
}
case fmin_op: {
union ieee754sp fs, ft;
if (!cpu_has_mips_r6)
return SIGILL;
MIPS_FPU_EMU_INC_STATS(min_s);
SPFROMREG(ft, MIPSInst_FT(ir));
SPFROMREG(fs, MIPSInst_FS(ir));
rv.s = ieee754sp_fmin(fs, ft);
goto copcsr;
}
case fmina_op: {
union ieee754sp fs, ft;
if (!cpu_has_mips_r6)
return SIGILL;
MIPS_FPU_EMU_INC_STATS(mina_s);
SPFROMREG(ft, MIPSInst_FT(ir));
SPFROMREG(fs, MIPSInst_FS(ir));
rv.s = ieee754sp_fmina(fs, ft);
goto copcsr;
}
case fmax_op: {
union ieee754sp fs, ft;
if (!cpu_has_mips_r6)
return SIGILL;
MIPS_FPU_EMU_INC_STATS(max_s);
SPFROMREG(ft, MIPSInst_FT(ir));
SPFROMREG(fs, MIPSInst_FS(ir));
rv.s = ieee754sp_fmax(fs, ft);
goto copcsr;
}
case fmaxa_op: {
union ieee754sp fs, ft;
if (!cpu_has_mips_r6)
return SIGILL;
MIPS_FPU_EMU_INC_STATS(maxa_s);
SPFROMREG(ft, MIPSInst_FT(ir));
SPFROMREG(fs, MIPSInst_FS(ir));
rv.s = ieee754sp_fmaxa(fs, ft);
goto copcsr;
}
case fabs_op:
MIPS_FPU_EMU_INC_STATS(abs_s);
handler.u = ieee754sp_abs;
goto scopuop;
case fneg_op:
MIPS_FPU_EMU_INC_STATS(neg_s);
handler.u = ieee754sp_neg;
goto scopuop;
case fmov_op:
/* an easy one */
MIPS_FPU_EMU_INC_STATS(mov_s);
SPFROMREG(rv.s, MIPSInst_FS(ir));
goto copcsr;
/* binary op on handler */
scopbop:
SPFROMREG(fs, MIPSInst_FS(ir));
SPFROMREG(ft, MIPSInst_FT(ir));
rv.s = (*handler.b) (fs, ft);
goto copcsr;
scopuop:
SPFROMREG(fs, MIPSInst_FS(ir));
rv.s = (*handler.u) (fs);
goto copcsr;
copcsr:
if (ieee754_cxtest(IEEE754_INEXACT)) {
MIPS_FPU_EMU_INC_STATS(ieee754_inexact);
rcsr |= FPU_CSR_INE_X | FPU_CSR_INE_S;
}
if (ieee754_cxtest(IEEE754_UNDERFLOW)) {
MIPS_FPU_EMU_INC_STATS(ieee754_underflow);
rcsr |= FPU_CSR_UDF_X | FPU_CSR_UDF_S;
}
if (ieee754_cxtest(IEEE754_OVERFLOW)) {
MIPS_FPU_EMU_INC_STATS(ieee754_overflow);
rcsr |= FPU_CSR_OVF_X | FPU_CSR_OVF_S;
}
if (ieee754_cxtest(IEEE754_ZERO_DIVIDE)) {
MIPS_FPU_EMU_INC_STATS(ieee754_zerodiv);
rcsr |= FPU_CSR_DIV_X | FPU_CSR_DIV_S;
}
if (ieee754_cxtest(IEEE754_INVALID_OPERATION)) {
MIPS_FPU_EMU_INC_STATS(ieee754_invalidop);
rcsr |= FPU_CSR_INV_X | FPU_CSR_INV_S;
}
break;
/* unary conv ops */
case fcvts_op:
return SIGILL; /* not defined */
case fcvtd_op:
MIPS_FPU_EMU_INC_STATS(cvt_d_s);
SPFROMREG(fs, MIPSInst_FS(ir));
rv.d = ieee754dp_fsp(fs);
rfmt = d_fmt;
goto copcsr;
case fcvtw_op:
MIPS_FPU_EMU_INC_STATS(cvt_w_s);
SPFROMREG(fs, MIPSInst_FS(ir));
rv.w = ieee754sp_tint(fs);
rfmt = w_fmt;
goto copcsr;
case fround_op:
case ftrunc_op:
case fceil_op:
case ffloor_op:
if (!cpu_has_mips_2_3_4_5_r)
return SIGILL;
if (MIPSInst_FUNC(ir) == fceil_op)
MIPS_FPU_EMU_INC_STATS(ceil_w_s);
if (MIPSInst_FUNC(ir) == ffloor_op)
MIPS_FPU_EMU_INC_STATS(floor_w_s);
if (MIPSInst_FUNC(ir) == fround_op)
MIPS_FPU_EMU_INC_STATS(round_w_s);
if (MIPSInst_FUNC(ir) == ftrunc_op)
MIPS_FPU_EMU_INC_STATS(trunc_w_s);
oldrm = ieee754_csr.rm;
SPFROMREG(fs, MIPSInst_FS(ir));
ieee754_csr.rm = MIPSInst_FUNC(ir);
rv.w = ieee754sp_tint(fs);
ieee754_csr.rm = oldrm;
rfmt = w_fmt;
goto copcsr;
case fsel_op:
if (!cpu_has_mips_r6)
return SIGILL;
MIPS_FPU_EMU_INC_STATS(sel_s);
SPFROMREG(fd, MIPSInst_FD(ir));
if (fd.bits & 0x1)
SPFROMREG(rv.s, MIPSInst_FT(ir));
else
SPFROMREG(rv.s, MIPSInst_FS(ir));
break;
case fcvtl_op:
if (!cpu_has_mips_3_4_5_64_r2_r6)
return SIGILL;
MIPS_FPU_EMU_INC_STATS(cvt_l_s);
SPFROMREG(fs, MIPSInst_FS(ir));
rv.l = ieee754sp_tlong(fs);
rfmt = l_fmt;
goto copcsr;
case froundl_op:
case ftruncl_op:
case fceill_op:
case ffloorl_op:
if (!cpu_has_mips_3_4_5_64_r2_r6)
return SIGILL;
if (MIPSInst_FUNC(ir) == fceill_op)
MIPS_FPU_EMU_INC_STATS(ceil_l_s);
if (MIPSInst_FUNC(ir) == ffloorl_op)
MIPS_FPU_EMU_INC_STATS(floor_l_s);
if (MIPSInst_FUNC(ir) == froundl_op)
MIPS_FPU_EMU_INC_STATS(round_l_s);
if (MIPSInst_FUNC(ir) == ftruncl_op)
MIPS_FPU_EMU_INC_STATS(trunc_l_s);
oldrm = ieee754_csr.rm;
SPFROMREG(fs, MIPSInst_FS(ir));
ieee754_csr.rm = MIPSInst_FUNC(ir);
rv.l = ieee754sp_tlong(fs);
ieee754_csr.rm = oldrm;
rfmt = l_fmt;
goto copcsr;
default:
if (!NO_R6EMU && MIPSInst_FUNC(ir) >= fcmp_op) {
unsigned int cmpop;
union ieee754sp fs, ft;
cmpop = MIPSInst_FUNC(ir) - fcmp_op;
SPFROMREG(fs, MIPSInst_FS(ir));
SPFROMREG(ft, MIPSInst_FT(ir));
rv.w = ieee754sp_cmp(fs, ft,
cmptab[cmpop & 0x7], cmpop & 0x8);
rfmt = -1;
if ((cmpop & 0x8) && ieee754_cxtest
(IEEE754_INVALID_OPERATION))
rcsr = FPU_CSR_INV_X | FPU_CSR_INV_S;
else
goto copcsr;
} else
return SIGILL;
break;
}
break;
}
case d_fmt: {
union ieee754dp fd, fs, ft;
union {
union ieee754dp(*b) (union ieee754dp, union ieee754dp);
union ieee754dp(*u) (union ieee754dp);
} handler;
switch (MIPSInst_FUNC(ir)) {
/* binary ops */
case fadd_op:
MIPS_FPU_EMU_INC_STATS(add_d);
handler.b = ieee754dp_add;
goto dcopbop;
case fsub_op:
MIPS_FPU_EMU_INC_STATS(sub_d);
handler.b = ieee754dp_sub;
goto dcopbop;
case fmul_op:
MIPS_FPU_EMU_INC_STATS(mul_d);
handler.b = ieee754dp_mul;
goto dcopbop;
case fdiv_op:
MIPS_FPU_EMU_INC_STATS(div_d);
handler.b = ieee754dp_div;
goto dcopbop;
/* unary ops */
case fsqrt_op:
if (!cpu_has_mips_2_3_4_5_r)
return SIGILL;
MIPS_FPU_EMU_INC_STATS(sqrt_d);
handler.u = ieee754dp_sqrt;
goto dcopuop;
/*
* Note that on some MIPS IV implementations such as the
* R5000 and R8000 the FSQRT and FRECIP instructions do not
* achieve full IEEE-754 accuracy - however this emulator does.
*/
case frsqrt_op:
if (!cpu_has_mips_4_5_64_r2_r6)
return SIGILL;
MIPS_FPU_EMU_INC_STATS(rsqrt_d);
handler.u = fpemu_dp_rsqrt;
goto dcopuop;
case frecip_op:
if (!cpu_has_mips_4_5_64_r2_r6)
return SIGILL;
MIPS_FPU_EMU_INC_STATS(recip_d);
handler.u = fpemu_dp_recip;
goto dcopuop;
case fmovc_op:
if (!cpu_has_mips_4_5_r)
return SIGILL;
cond = fpucondbit[MIPSInst_FT(ir) >> 2];
if (((ctx->fcr31 & cond) != 0) !=
((MIPSInst_FT(ir) & 1) != 0))
return 0;
DPFROMREG(rv.d, MIPSInst_FS(ir));
break;
case fmovz_op:
if (!cpu_has_mips_4_5_r)
return SIGILL;
if (xcp->regs[MIPSInst_FT(ir)] != 0)
return 0;
DPFROMREG(rv.d, MIPSInst_FS(ir));
break;
case fmovn_op:
if (!cpu_has_mips_4_5_r)
return SIGILL;
if (xcp->regs[MIPSInst_FT(ir)] == 0)
return 0;
DPFROMREG(rv.d, MIPSInst_FS(ir));
break;
case fseleqz_op:
if (!cpu_has_mips_r6)
return SIGILL;
MIPS_FPU_EMU_INC_STATS(seleqz_d);
DPFROMREG(rv.d, MIPSInst_FT(ir));
if (rv.l & 0x1)
rv.l = 0;
else
DPFROMREG(rv.d, MIPSInst_FS(ir));
break;
case fselnez_op:
if (!cpu_has_mips_r6)
return SIGILL;
MIPS_FPU_EMU_INC_STATS(selnez_d);
DPFROMREG(rv.d, MIPSInst_FT(ir));
if (rv.l & 0x1)
DPFROMREG(rv.d, MIPSInst_FS(ir));
else
rv.l = 0;
break;
case fmaddf_op: {
union ieee754dp ft, fs, fd;
if (!cpu_has_mips_r6)
return SIGILL;
MIPS_FPU_EMU_INC_STATS(maddf_d);
DPFROMREG(ft, MIPSInst_FT(ir));
DPFROMREG(fs, MIPSInst_FS(ir));
DPFROMREG(fd, MIPSInst_FD(ir));
rv.d = ieee754dp_maddf(fd, fs, ft);
goto copcsr;
}
case fmsubf_op: {
union ieee754dp ft, fs, fd;
if (!cpu_has_mips_r6)
return SIGILL;
MIPS_FPU_EMU_INC_STATS(msubf_d);
DPFROMREG(ft, MIPSInst_FT(ir));
DPFROMREG(fs, MIPSInst_FS(ir));
DPFROMREG(fd, MIPSInst_FD(ir));
rv.d = ieee754dp_msubf(fd, fs, ft);
goto copcsr;
}
case frint_op: {
union ieee754dp fs;
if (!cpu_has_mips_r6)
return SIGILL;
MIPS_FPU_EMU_INC_STATS(rint_d);
DPFROMREG(fs, MIPSInst_FS(ir));
rv.d = ieee754dp_rint(fs);
goto copcsr;
}
case fclass_op: {
union ieee754dp fs;
if (!cpu_has_mips_r6)
return SIGILL;
MIPS_FPU_EMU_INC_STATS(class_d);
DPFROMREG(fs, MIPSInst_FS(ir));
rv.l = ieee754dp_2008class(fs);
rfmt = l_fmt;
goto copcsr;
}
case fmin_op: {
union ieee754dp fs, ft;
if (!cpu_has_mips_r6)
return SIGILL;
MIPS_FPU_EMU_INC_STATS(min_d);
DPFROMREG(ft, MIPSInst_FT(ir));
DPFROMREG(fs, MIPSInst_FS(ir));
rv.d = ieee754dp_fmin(fs, ft);
goto copcsr;
}
case fmina_op: {
union ieee754dp fs, ft;
if (!cpu_has_mips_r6)
return SIGILL;
MIPS_FPU_EMU_INC_STATS(mina_d);
DPFROMREG(ft, MIPSInst_FT(ir));
DPFROMREG(fs, MIPSInst_FS(ir));
rv.d = ieee754dp_fmina(fs, ft);
goto copcsr;
}
case fmax_op: {
union ieee754dp fs, ft;
if (!cpu_has_mips_r6)
return SIGILL;
MIPS_FPU_EMU_INC_STATS(max_d);
DPFROMREG(ft, MIPSInst_FT(ir));
DPFROMREG(fs, MIPSInst_FS(ir));
rv.d = ieee754dp_fmax(fs, ft);
goto copcsr;
}
case fmaxa_op: {
union ieee754dp fs, ft;
if (!cpu_has_mips_r6)
return SIGILL;
MIPS_FPU_EMU_INC_STATS(maxa_d);
DPFROMREG(ft, MIPSInst_FT(ir));
DPFROMREG(fs, MIPSInst_FS(ir));
rv.d = ieee754dp_fmaxa(fs, ft);
goto copcsr;
}
case fabs_op:
MIPS_FPU_EMU_INC_STATS(abs_d);
handler.u = ieee754dp_abs;
goto dcopuop;
case fneg_op:
MIPS_FPU_EMU_INC_STATS(neg_d);
handler.u = ieee754dp_neg;
goto dcopuop;
case fmov_op:
/* an easy one */
MIPS_FPU_EMU_INC_STATS(mov_d);
DPFROMREG(rv.d, MIPSInst_FS(ir));
goto copcsr;
/* binary op on handler */
dcopbop:
DPFROMREG(fs, MIPSInst_FS(ir));
DPFROMREG(ft, MIPSInst_FT(ir));
rv.d = (*handler.b) (fs, ft);
goto copcsr;
dcopuop:
DPFROMREG(fs, MIPSInst_FS(ir));
rv.d = (*handler.u) (fs);
goto copcsr;
/*
* unary conv ops
*/
case fcvts_op:
MIPS_FPU_EMU_INC_STATS(cvt_s_d);
DPFROMREG(fs, MIPSInst_FS(ir));
rv.s = ieee754sp_fdp(fs);
rfmt = s_fmt;
goto copcsr;
case fcvtd_op:
return SIGILL; /* not defined */
case fcvtw_op:
MIPS_FPU_EMU_INC_STATS(cvt_w_d);
DPFROMREG(fs, MIPSInst_FS(ir));
rv.w = ieee754dp_tint(fs); /* wrong */
rfmt = w_fmt;
goto copcsr;
case fround_op:
case ftrunc_op:
case fceil_op:
case ffloor_op:
if (!cpu_has_mips_2_3_4_5_r)
return SIGILL;
if (MIPSInst_FUNC(ir) == fceil_op)
MIPS_FPU_EMU_INC_STATS(ceil_w_d);
if (MIPSInst_FUNC(ir) == ffloor_op)
MIPS_FPU_EMU_INC_STATS(floor_w_d);
if (MIPSInst_FUNC(ir) == fround_op)
MIPS_FPU_EMU_INC_STATS(round_w_d);
if (MIPSInst_FUNC(ir) == ftrunc_op)
MIPS_FPU_EMU_INC_STATS(trunc_w_d);
oldrm = ieee754_csr.rm;
DPFROMREG(fs, MIPSInst_FS(ir));
ieee754_csr.rm = MIPSInst_FUNC(ir);
rv.w = ieee754dp_tint(fs);
ieee754_csr.rm = oldrm;
rfmt = w_fmt;
goto copcsr;
case fsel_op:
if (!cpu_has_mips_r6)
return SIGILL;
MIPS_FPU_EMU_INC_STATS(sel_d);
DPFROMREG(fd, MIPSInst_FD(ir));
if (fd.bits & 0x1)
DPFROMREG(rv.d, MIPSInst_FT(ir));
else
DPFROMREG(rv.d, MIPSInst_FS(ir));
break;
case fcvtl_op:
if (!cpu_has_mips_3_4_5_64_r2_r6)
return SIGILL;
MIPS_FPU_EMU_INC_STATS(cvt_l_d);
DPFROMREG(fs, MIPSInst_FS(ir));
rv.l = ieee754dp_tlong(fs);
rfmt = l_fmt;
goto copcsr;
case froundl_op:
case ftruncl_op:
case fceill_op:
case ffloorl_op:
if (!cpu_has_mips_3_4_5_64_r2_r6)
return SIGILL;
if (MIPSInst_FUNC(ir) == fceill_op)
MIPS_FPU_EMU_INC_STATS(ceil_l_d);
if (MIPSInst_FUNC(ir) == ffloorl_op)
MIPS_FPU_EMU_INC_STATS(floor_l_d);
if (MIPSInst_FUNC(ir) == froundl_op)
MIPS_FPU_EMU_INC_STATS(round_l_d);
if (MIPSInst_FUNC(ir) == ftruncl_op)
MIPS_FPU_EMU_INC_STATS(trunc_l_d);
oldrm = ieee754_csr.rm;
DPFROMREG(fs, MIPSInst_FS(ir));
ieee754_csr.rm = MIPSInst_FUNC(ir);
rv.l = ieee754dp_tlong(fs);
ieee754_csr.rm = oldrm;
rfmt = l_fmt;
goto copcsr;
default:
if (!NO_R6EMU && MIPSInst_FUNC(ir) >= fcmp_op) {
unsigned int cmpop;
union ieee754dp fs, ft;
cmpop = MIPSInst_FUNC(ir) - fcmp_op;
DPFROMREG(fs, MIPSInst_FS(ir));
DPFROMREG(ft, MIPSInst_FT(ir));
rv.w = ieee754dp_cmp(fs, ft,
cmptab[cmpop & 0x7], cmpop & 0x8);
rfmt = -1;
if ((cmpop & 0x8)
&&
ieee754_cxtest
(IEEE754_INVALID_OPERATION))
rcsr = FPU_CSR_INV_X | FPU_CSR_INV_S;
else
goto copcsr;
}
else {
return SIGILL;
}
break;
}
break;
}
case w_fmt: {
union ieee754dp fs;
switch (MIPSInst_FUNC(ir)) {
case fcvts_op:
/* convert word to single precision real */
MIPS_FPU_EMU_INC_STATS(cvt_s_w);
SPFROMREG(fs, MIPSInst_FS(ir));
rv.s = ieee754sp_fint(fs.bits);
rfmt = s_fmt;
goto copcsr;
case fcvtd_op:
/* convert word to double precision real */
MIPS_FPU_EMU_INC_STATS(cvt_d_w);
SPFROMREG(fs, MIPSInst_FS(ir));
rv.d = ieee754dp_fint(fs.bits);
rfmt = d_fmt;
goto copcsr;
default: {
/* Emulating the new CMP.condn.fmt R6 instruction */
#define CMPOP_MASK 0x7
#define SIGN_BIT (0x1 << 3)
#define PREDICATE_BIT (0x1 << 4)
int cmpop = MIPSInst_FUNC(ir) & CMPOP_MASK;
int sig = MIPSInst_FUNC(ir) & SIGN_BIT;
union ieee754sp fs, ft;
/* This is an R6 only instruction */
if (!cpu_has_mips_r6 ||
(MIPSInst_FUNC(ir) & 0x20))
return SIGILL;
if (!sig) {
if (!(MIPSInst_FUNC(ir) & PREDICATE_BIT)) {
switch (cmpop) {
case 0:
MIPS_FPU_EMU_INC_STATS(cmp_af_s);
break;
case 1:
MIPS_FPU_EMU_INC_STATS(cmp_un_s);
break;
case 2:
MIPS_FPU_EMU_INC_STATS(cmp_eq_s);
break;
case 3:
MIPS_FPU_EMU_INC_STATS(cmp_ueq_s);
break;
case 4:
MIPS_FPU_EMU_INC_STATS(cmp_lt_s);
break;
case 5:
MIPS_FPU_EMU_INC_STATS(cmp_ult_s);
break;
case 6:
MIPS_FPU_EMU_INC_STATS(cmp_le_s);
break;
case 7:
MIPS_FPU_EMU_INC_STATS(cmp_ule_s);
break;
}
} else {
switch (cmpop) {
case 1:
MIPS_FPU_EMU_INC_STATS(cmp_or_s);
break;
case 2:
MIPS_FPU_EMU_INC_STATS(cmp_une_s);
break;
case 3:
MIPS_FPU_EMU_INC_STATS(cmp_ne_s);
break;
}
}
} else {
if (!(MIPSInst_FUNC(ir) & PREDICATE_BIT)) {
switch (cmpop) {
case 0:
MIPS_FPU_EMU_INC_STATS(cmp_saf_s);
break;
case 1:
MIPS_FPU_EMU_INC_STATS(cmp_sun_s);
break;
case 2:
MIPS_FPU_EMU_INC_STATS(cmp_seq_s);
break;
case 3:
MIPS_FPU_EMU_INC_STATS(cmp_sueq_s);
break;
case 4:
MIPS_FPU_EMU_INC_STATS(cmp_slt_s);
break;
case 5:
MIPS_FPU_EMU_INC_STATS(cmp_sult_s);
break;
case 6:
MIPS_FPU_EMU_INC_STATS(cmp_sle_s);
break;
case 7:
MIPS_FPU_EMU_INC_STATS(cmp_sule_s);
break;
}
} else {
switch (cmpop) {
case 1:
MIPS_FPU_EMU_INC_STATS(cmp_sor_s);
break;
case 2:
MIPS_FPU_EMU_INC_STATS(cmp_sune_s);
break;
case 3:
MIPS_FPU_EMU_INC_STATS(cmp_sne_s);
break;
}
}
}
/* fmt is w_fmt for single precision so fix it */
rfmt = s_fmt;
/* default to false */
rv.w = 0;
/* CMP.condn.S */
SPFROMREG(fs, MIPSInst_FS(ir));
SPFROMREG(ft, MIPSInst_FT(ir));
/* positive predicates */
if (!(MIPSInst_FUNC(ir) & PREDICATE_BIT)) {
if (ieee754sp_cmp(fs, ft, cmptab[cmpop],
sig))
rv.w = -1; /* true, all 1s */
if ((sig) &&
ieee754_cxtest(IEEE754_INVALID_OPERATION))
rcsr = FPU_CSR_INV_X | FPU_CSR_INV_S;
else
goto copcsr;
} else {
/* negative predicates */
switch (cmpop) {
case 1:
case 2:
case 3:
if (ieee754sp_cmp(fs, ft,
negative_cmptab[cmpop],
sig))
rv.w = -1; /* true, all 1s */
if (sig &&
ieee754_cxtest(IEEE754_INVALID_OPERATION))
rcsr = FPU_CSR_INV_X | FPU_CSR_INV_S;
else
goto copcsr;
break;
default:
/* Reserved R6 ops */
return SIGILL;
}
}
break;
}
}
break;
}
case l_fmt:
if (!cpu_has_mips_3_4_5_64_r2_r6)
return SIGILL;
DIFROMREG(bits, MIPSInst_FS(ir));
switch (MIPSInst_FUNC(ir)) {
case fcvts_op:
/* convert long to single precision real */
MIPS_FPU_EMU_INC_STATS(cvt_s_l);
rv.s = ieee754sp_flong(bits);
rfmt = s_fmt;
goto copcsr;
case fcvtd_op:
/* convert long to double precision real */
MIPS_FPU_EMU_INC_STATS(cvt_d_l);
rv.d = ieee754dp_flong(bits);
rfmt = d_fmt;
goto copcsr;
default: {
/* Emulating the new CMP.condn.fmt R6 instruction */
int cmpop = MIPSInst_FUNC(ir) & CMPOP_MASK;
int sig = MIPSInst_FUNC(ir) & SIGN_BIT;
union ieee754dp fs, ft;
if (!cpu_has_mips_r6 ||
(MIPSInst_FUNC(ir) & 0x20))
return SIGILL;
if (!sig) {
if (!(MIPSInst_FUNC(ir) & PREDICATE_BIT)) {
switch (cmpop) {
case 0:
MIPS_FPU_EMU_INC_STATS(cmp_af_d);
break;
case 1:
MIPS_FPU_EMU_INC_STATS(cmp_un_d);
break;
case 2:
MIPS_FPU_EMU_INC_STATS(cmp_eq_d);
break;
case 3:
MIPS_FPU_EMU_INC_STATS(cmp_ueq_d);
break;
case 4:
MIPS_FPU_EMU_INC_STATS(cmp_lt_d);
break;
case 5:
MIPS_FPU_EMU_INC_STATS(cmp_ult_d);
break;
case 6:
MIPS_FPU_EMU_INC_STATS(cmp_le_d);
break;
case 7:
MIPS_FPU_EMU_INC_STATS(cmp_ule_d);
break;
}
} else {
switch (cmpop) {
case 1:
MIPS_FPU_EMU_INC_STATS(cmp_or_d);
break;
case 2:
MIPS_FPU_EMU_INC_STATS(cmp_une_d);
break;
case 3:
MIPS_FPU_EMU_INC_STATS(cmp_ne_d);
break;
}
}
} else {
if (!(MIPSInst_FUNC(ir) & PREDICATE_BIT)) {
switch (cmpop) {
case 0:
MIPS_FPU_EMU_INC_STATS(cmp_saf_d);
break;
case 1:
MIPS_FPU_EMU_INC_STATS(cmp_sun_d);
break;
case 2:
MIPS_FPU_EMU_INC_STATS(cmp_seq_d);
break;
case 3:
MIPS_FPU_EMU_INC_STATS(cmp_sueq_d);
break;
case 4:
MIPS_FPU_EMU_INC_STATS(cmp_slt_d);
break;
case 5:
MIPS_FPU_EMU_INC_STATS(cmp_sult_d);
break;
case 6:
MIPS_FPU_EMU_INC_STATS(cmp_sle_d);
break;
case 7:
MIPS_FPU_EMU_INC_STATS(cmp_sule_d);
break;
}
} else {
switch (cmpop) {
case 1:
MIPS_FPU_EMU_INC_STATS(cmp_sor_d);
break;
case 2:
MIPS_FPU_EMU_INC_STATS(cmp_sune_d);
break;
case 3:
MIPS_FPU_EMU_INC_STATS(cmp_sne_d);
break;
}
}
}
/* fmt is l_fmt for double precision so fix it */
rfmt = d_fmt;
/* default to false */
rv.l = 0;
/* CMP.condn.D */
DPFROMREG(fs, MIPSInst_FS(ir));
DPFROMREG(ft, MIPSInst_FT(ir));
/* positive predicates */
if (!(MIPSInst_FUNC(ir) & PREDICATE_BIT)) {
if (ieee754dp_cmp(fs, ft,
cmptab[cmpop], sig))
rv.l = -1LL; /* true, all 1s */
if (sig &&
ieee754_cxtest(IEEE754_INVALID_OPERATION))
rcsr = FPU_CSR_INV_X | FPU_CSR_INV_S;
else
goto copcsr;
} else {
/* negative predicates */
switch (cmpop) {
case 1:
case 2:
case 3:
if (ieee754dp_cmp(fs, ft,
negative_cmptab[cmpop],
sig))
rv.l = -1LL; /* true, all 1s */
if (sig &&
ieee754_cxtest(IEEE754_INVALID_OPERATION))
rcsr = FPU_CSR_INV_X | FPU_CSR_INV_S;
else
goto copcsr;
break;
default:
/* Reserved R6 ops */
return SIGILL;
}
}
break;
}
}
break;
default:
return SIGILL;
}
/*
* Update the fpu CSR register for this operation.
* If an exception is required, generate a tidy SIGFPE exception,
* without updating the result register.
* Note: cause exception bits do not accumulate, they are rewritten
* for each op; only the flag/sticky bits accumulate.
*/
ctx->fcr31 = (ctx->fcr31 & ~FPU_CSR_ALL_X) | rcsr;
if ((ctx->fcr31 >> 5) & ctx->fcr31 & FPU_CSR_ALL_E) {
/*printk ("SIGFPE: FPU csr = %08x\n",ctx->fcr31); */
return SIGFPE;
}
/*
* Now we can safely write the result back to the register file.
*/
switch (rfmt) {
case -1:
if (cpu_has_mips_4_5_r)
cbit = fpucondbit[MIPSInst_FD(ir) >> 2];
else
cbit = FPU_CSR_COND;
if (rv.w)
ctx->fcr31 |= cbit;
else
ctx->fcr31 &= ~cbit;
break;
case d_fmt:
DPTOREG(rv.d, MIPSInst_FD(ir));
break;
case s_fmt:
SPTOREG(rv.s, MIPSInst_FD(ir));
break;
case w_fmt:
SITOREG(rv.w, MIPSInst_FD(ir));
break;
case l_fmt:
if (!cpu_has_mips_3_4_5_64_r2_r6)
return SIGILL;
DITOREG(rv.l, MIPSInst_FD(ir));
break;
default:
return SIGILL;
}
return 0;
}
/*
* Emulate FPU instructions.
*
* If we use FPU hardware, then we have been typically called to handle
* an unimplemented operation, such as where an operand is a NaN or
* denormalized. In that case exit the emulation loop after a single
* iteration so as to let hardware execute any subsequent instructions.
*
* If we have no FPU hardware or it has been disabled, then continue
* emulating floating-point instructions until one of these conditions
* has occurred:
*
* - a non-FPU instruction has been encountered,
*
* - an attempt to emulate has ended with a signal,
*
* - the ISA mode has been switched.
*
* We need to terminate the emulation loop if we got switched to the
* MIPS16 mode, whether supported or not, so that we do not attempt
* to emulate a MIPS16 instruction as a regular MIPS FPU instruction.
* Similarly if we got switched to the microMIPS mode and only the
* regular MIPS mode is supported, so that we do not attempt to emulate
* a microMIPS instruction as a regular MIPS FPU instruction. Or if
* we got switched to the regular MIPS mode and only the microMIPS mode
* is supported, so that we do not attempt to emulate a regular MIPS
* instruction that should cause an Address Error exception instead.
* For simplicity we always terminate upon an ISA mode switch.
*/
int fpu_emulator_cop1Handler(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
int has_fpu, void __user **fault_addr)
{
unsigned long oldepc, prevepc;
struct mm_decoded_insn dec_insn;
u16 instr[4];
u16 *instr_ptr;
int sig = 0;
/*
* Initialize context if it hasn't been used already, otherwise ensure
* it has been saved to struct thread_struct.
*/
if (!init_fp_ctx(current))
lose_fpu(1);
oldepc = xcp->cp0_epc;
do {
prevepc = xcp->cp0_epc;
if (get_isa16_mode(prevepc) && cpu_has_mmips) {
/*
* Get next 2 microMIPS instructions and convert them
* into 32-bit instructions.
*/
if ((get_user(instr[0], (u16 __user *)msk_isa16_mode(xcp->cp0_epc))) ||
(get_user(instr[1], (u16 __user *)msk_isa16_mode(xcp->cp0_epc + 2))) ||
(get_user(instr[2], (u16 __user *)msk_isa16_mode(xcp->cp0_epc + 4))) ||
(get_user(instr[3], (u16 __user *)msk_isa16_mode(xcp->cp0_epc + 6)))) {
MIPS_FPU_EMU_INC_STATS(errors);
return SIGBUS;
}
instr_ptr = instr;
/* Get first instruction. */
if (mm_insn_16bit(*instr_ptr)) {
/* Duplicate the half-word. */
dec_insn.insn = (*instr_ptr << 16) |
(*instr_ptr);
/* 16-bit instruction. */
dec_insn.pc_inc = 2;
instr_ptr += 1;
} else {
dec_insn.insn = (*instr_ptr << 16) |
*(instr_ptr+1);
/* 32-bit instruction. */
dec_insn.pc_inc = 4;
instr_ptr += 2;
}
/* Get second instruction. */
if (mm_insn_16bit(*instr_ptr)) {
/* Duplicate the half-word. */
dec_insn.next_insn = (*instr_ptr << 16) |
(*instr_ptr);
/* 16-bit instruction. */
dec_insn.next_pc_inc = 2;
} else {
dec_insn.next_insn = (*instr_ptr << 16) |
*(instr_ptr+1);
/* 32-bit instruction. */
dec_insn.next_pc_inc = 4;
}
dec_insn.micro_mips_mode = 1;
} else {
if ((get_user(dec_insn.insn,
(mips_instruction __user *) xcp->cp0_epc)) ||
(get_user(dec_insn.next_insn,
(mips_instruction __user *)(xcp->cp0_epc+4)))) {
MIPS_FPU_EMU_INC_STATS(errors);
return SIGBUS;
}
dec_insn.pc_inc = 4;
dec_insn.next_pc_inc = 4;
dec_insn.micro_mips_mode = 0;
}
if ((dec_insn.insn == 0) ||
((dec_insn.pc_inc == 2) &&
((dec_insn.insn & 0xffff) == MM_NOP16)))
xcp->cp0_epc += dec_insn.pc_inc; /* Skip NOPs */
else {
/*
* The 'ieee754_csr' is an alias of ctx->fcr31.
* No need to copy ctx->fcr31 to ieee754_csr.
*/
sig = cop1Emulate(xcp, ctx, dec_insn, fault_addr);
}
if (has_fpu)
break;
if (sig)
break;
/*
* We have to check for the ISA bit explicitly here,
* because `get_isa16_mode' may return 0 if support
* for code compression has been globally disabled,
* or otherwise we may produce the wrong signal or
* even proceed successfully where we must not.
*/
if ((xcp->cp0_epc ^ prevepc) & 0x1)
break;
cond_resched();
} while (xcp->cp0_epc > prevepc);
/* SIGILL indicates a non-fpu instruction */
if (sig == SIGILL && xcp->cp0_epc != oldepc)
/* but if EPC has advanced, then ignore it */
sig = 0;
return sig;
}
| linux-master | arch/mips/math-emu/cp1emu.c |
// SPDX-License-Identifier: GPL-2.0-only
/* IEEE754 floating point arithmetic
* double precision: common utilities
*/
/*
* MIPS floating point support
* Copyright (C) 1994-2000 Algorithmics Ltd.
*/
#include "ieee754dp.h"
union ieee754dp ieee754dp_mul(union ieee754dp x, union ieee754dp y)
{
int re;
int rs;
u64 rm;
unsigned int lxm;
unsigned int hxm;
unsigned int lym;
unsigned int hym;
u64 lrm;
u64 hrm;
u64 t;
u64 at;
COMPXDP;
COMPYDP;
EXPLODEXDP;
EXPLODEYDP;
ieee754_clearcx();
FLUSHXDP;
FLUSHYDP;
switch (CLPAIR(xc, yc)) {
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_SNAN):
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_SNAN):
case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_SNAN):
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_SNAN):
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_SNAN):
return ieee754dp_nanxcpt(y);
case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_SNAN):
case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_ZERO):
case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_NORM):
case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_DNORM):
case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF):
return ieee754dp_nanxcpt(x);
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_QNAN):
return y;
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_ZERO):
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_NORM):
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_DNORM):
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_INF):
return x;
/*
* Infinity handling
*/
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_ZERO):
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_INF):
ieee754_setcx(IEEE754_INVALID_OPERATION);
return ieee754dp_indef();
case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_INF):
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_INF):
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_NORM):
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_DNORM):
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF):
return ieee754dp_inf(xs ^ ys);
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO):
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_NORM):
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_DNORM):
case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_ZERO):
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_ZERO):
return ieee754dp_zero(xs ^ ys);
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM):
DPDNORMX;
fallthrough;
case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_DNORM):
DPDNORMY;
break;
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_NORM):
DPDNORMX;
break;
case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_NORM):
break;
}
/* rm = xm * ym, re = xe+ye basically */
assert(xm & DP_HIDDEN_BIT);
assert(ym & DP_HIDDEN_BIT);
re = xe + ye;
rs = xs ^ ys;
/* shunt to top of word */
xm <<= 64 - (DP_FBITS + 1);
ym <<= 64 - (DP_FBITS + 1);
/*
* Multiply 64 bits xm, ym to give high 64 bits rm with stickness.
*/
lxm = xm;
hxm = xm >> 32;
lym = ym;
hym = ym >> 32;
lrm = DPXMULT(lxm, lym);
hrm = DPXMULT(hxm, hym);
t = DPXMULT(lxm, hym);
at = lrm + (t << 32);
hrm += at < lrm;
lrm = at;
hrm = hrm + (t >> 32);
t = DPXMULT(hxm, lym);
at = lrm + (t << 32);
hrm += at < lrm;
lrm = at;
hrm = hrm + (t >> 32);
rm = hrm | (lrm != 0);
/*
* Sticky shift down to normal rounding precision.
*/
if ((s64) rm < 0) {
rm = (rm >> (64 - (DP_FBITS + 1 + 3))) |
((rm << (DP_FBITS + 1 + 3)) != 0);
re++;
} else {
rm = (rm >> (64 - (DP_FBITS + 1 + 3 + 1))) |
((rm << (DP_FBITS + 1 + 3 + 1)) != 0);
}
assert(rm & (DP_HIDDEN_BIT << 3));
return ieee754dp_format(rs, re, rm);
}
| linux-master | arch/mips/math-emu/dp_mul.c |
// SPDX-License-Identifier: GPL-2.0-only
/* IEEE754 floating point arithmetic
* single precision
*/
/*
* MIPS floating point support
* Copyright (C) 1994-2000 Algorithmics Ltd.
*/
#include "ieee754sp.h"
union ieee754sp ieee754sp_flong(s64 x)
{
u64 xm; /* <--- need 64-bit mantissa temp */
int xe;
int xs;
ieee754_clearcx();
if (x == 0)
return ieee754sp_zero(0);
if (x == 1 || x == -1)
return ieee754sp_one(x < 0);
if (x == 10 || x == -10)
return ieee754sp_ten(x < 0);
xs = (x < 0);
if (xs) {
if (x == (1ULL << 63))
xm = (1ULL << 63); /* max neg can't be safely negated */
else
xm = -x;
} else {
xm = x;
}
xe = SP_FBITS + 3;
if (xm >> (SP_FBITS + 1 + 3)) {
/* shunt out overflow bits
*/
while (xm >> (SP_FBITS + 1 + 3)) {
SPXSRSX1();
}
} else {
/* normalize in grs extended single precision */
while ((xm >> (SP_FBITS + 3)) == 0) {
xm <<= 1;
xe--;
}
}
return ieee754sp_format(xs, xe, xm);
}
| linux-master | arch/mips/math-emu/sp_flong.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* IEEE754 floating point arithmetic
* single precision: MAX{,A}.f
* MAX : Scalar Floating-Point Maximum
* MAXA: Scalar Floating-Point argument with Maximum Absolute Value
*
* MAX.S : FPR[fd] = maxNum(FPR[fs],FPR[ft])
* MAXA.S: FPR[fd] = maxNumMag(FPR[fs],FPR[ft])
*
* MIPS floating point support
* Copyright (C) 2015 Imagination Technologies, Ltd.
* Author: Markos Chandras <[email protected]>
*/
#include "ieee754sp.h"
union ieee754sp ieee754sp_fmax(union ieee754sp x, union ieee754sp y)
{
COMPXSP;
COMPYSP;
EXPLODEXSP;
EXPLODEYSP;
FLUSHXSP;
FLUSHYSP;
ieee754_clearcx();
switch (CLPAIR(xc, yc)) {
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_SNAN):
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_SNAN):
case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_SNAN):
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_SNAN):
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_SNAN):
return ieee754sp_nanxcpt(y);
case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_SNAN):
case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_ZERO):
case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_NORM):
case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_DNORM):
case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF):
return ieee754sp_nanxcpt(x);
/*
* Quiet NaN handling
*/
/*
* The case of both inputs quiet NaNs
*/
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
return x;
/*
* The cases of exactly one input quiet NaN (numbers
* are here preferred as returned values to NaNs)
*/
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_QNAN):
return x;
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_ZERO):
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_NORM):
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_DNORM):
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_INF):
return y;
/*
* Infinity and zero handling
*/
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_ZERO):
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_NORM):
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_DNORM):
case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_ZERO):
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_ZERO):
return xs ? y : x;
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF):
case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_INF):
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_INF):
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_INF):
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_NORM):
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_DNORM):
return ys ? x : y;
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO):
return ieee754sp_zero(xs & ys);
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM):
SPDNORMX;
fallthrough;
case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_DNORM):
SPDNORMY;
break;
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_NORM):
SPDNORMX;
}
/* Finally get to do some computation */
assert(xm & SP_HIDDEN_BIT);
assert(ym & SP_HIDDEN_BIT);
/* Compare signs */
if (xs > ys)
return y;
else if (xs < ys)
return x;
/* Signs of inputs are equal, let's compare exponents */
if (xs == 0) {
/* Inputs are both positive */
if (xe > ye)
return x;
else if (xe < ye)
return y;
} else {
/* Inputs are both negative */
if (xe > ye)
return y;
else if (xe < ye)
return x;
}
/* Signs and exponents of inputs are equal, let's compare mantissas */
if (xs == 0) {
/* Inputs are both positive, with equal signs and exponents */
if (xm <= ym)
return y;
return x;
}
/* Inputs are both negative, with equal signs and exponents */
if (xm <= ym)
return x;
return y;
}
union ieee754sp ieee754sp_fmaxa(union ieee754sp x, union ieee754sp y)
{
COMPXSP;
COMPYSP;
EXPLODEXSP;
EXPLODEYSP;
FLUSHXSP;
FLUSHYSP;
ieee754_clearcx();
switch (CLPAIR(xc, yc)) {
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_SNAN):
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_SNAN):
case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_SNAN):
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_SNAN):
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_SNAN):
return ieee754sp_nanxcpt(y);
case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_SNAN):
case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_ZERO):
case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_NORM):
case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_DNORM):
case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF):
return ieee754sp_nanxcpt(x);
/*
* Quiet NaN handling
*/
/*
* The case of both inputs quiet NaNs
*/
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
return x;
/*
* The cases of exactly one input quiet NaN (numbers
* are here preferred as returned values to NaNs)
*/
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_QNAN):
return x;
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_ZERO):
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_NORM):
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_DNORM):
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_INF):
return y;
/*
* Infinity and zero handling
*/
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF):
return ieee754sp_inf(xs & ys);
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_ZERO):
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_NORM):
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_DNORM):
case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_ZERO):
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_ZERO):
return x;
case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_INF):
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_INF):
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_INF):
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_NORM):
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_DNORM):
return y;
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO):
return ieee754sp_zero(xs & ys);
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM):
SPDNORMX;
fallthrough;
case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_DNORM):
SPDNORMY;
break;
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_NORM):
SPDNORMX;
}
/* Finally get to do some computation */
assert(xm & SP_HIDDEN_BIT);
assert(ym & SP_HIDDEN_BIT);
/* Compare exponent */
if (xe > ye)
return x;
else if (xe < ye)
return y;
/* Compare mantissa */
if (xm < ym)
return y;
else if (xm > ym)
return x;
else if (xs == 0)
return x;
return y;
}
| linux-master | arch/mips/math-emu/sp_fmax.c |
// SPDX-License-Identifier: GPL-2.0-only
/* IEEE754 floating point arithmetic
* double precision: common utilities
*/
/*
* MIPS floating point support
* Copyright (C) 1994-2000 Algorithmics Ltd.
*/
#include "ieee754dp.h"
s64 ieee754dp_tlong(union ieee754dp x)
{
u64 residue;
int round;
int sticky;
int odd;
COMPXDP;
ieee754_clearcx();
EXPLODEXDP;
FLUSHXDP;
switch (xc) {
case IEEE754_CLASS_SNAN:
case IEEE754_CLASS_QNAN:
ieee754_setcx(IEEE754_INVALID_OPERATION);
return ieee754di_indef();
case IEEE754_CLASS_INF:
ieee754_setcx(IEEE754_INVALID_OPERATION);
return ieee754di_overflow(xs);
case IEEE754_CLASS_ZERO:
return 0;
case IEEE754_CLASS_DNORM:
case IEEE754_CLASS_NORM:
break;
}
if (xe >= 63) {
/* look for valid corner case */
if (xe == 63 && xs && xm == DP_HIDDEN_BIT)
return -0x8000000000000000LL;
/* Set invalid. We will only use overflow for floating
point overflow */
ieee754_setcx(IEEE754_INVALID_OPERATION);
return ieee754di_overflow(xs);
}
/* oh gawd */
if (xe > DP_FBITS) {
xm <<= xe - DP_FBITS;
} else if (xe < DP_FBITS) {
if (xe < -1) {
residue = xm;
round = 0;
sticky = residue != 0;
xm = 0;
} else {
/* Shifting a u64 64 times does not work,
* so we do it in two steps. Be aware that xe
* may be -1 */
residue = xm << (xe + 1);
residue <<= 63 - DP_FBITS;
round = (residue >> 63) != 0;
sticky = (residue << 1) != 0;
xm >>= DP_FBITS - xe;
}
odd = (xm & 0x1) != 0x0;
switch (ieee754_csr.rm) {
case FPU_CSR_RN:
if (round && (sticky || odd))
xm++;
break;
case FPU_CSR_RZ:
break;
case FPU_CSR_RU: /* toward +Infinity */
if ((round || sticky) && !xs)
xm++;
break;
case FPU_CSR_RD: /* toward -Infinity */
if ((round || sticky) && xs)
xm++;
break;
}
if ((xm >> 63) != 0) {
/* This can happen after rounding */
ieee754_setcx(IEEE754_INVALID_OPERATION);
return ieee754di_overflow(xs);
}
if (round || sticky)
ieee754_setcx(IEEE754_INEXACT);
}
if (xs)
return -xm;
else
return xm;
}
| linux-master | arch/mips/math-emu/dp_tlong.c |
// SPDX-License-Identifier: GPL-2.0-only
/* IEEE754 floating point arithmetic
* single precision
*/
/*
* MIPS floating point support
* Copyright (C) 1994-2000 Algorithmics Ltd.
*/
#include "ieee754sp.h"
union ieee754sp ieee754sp_neg(union ieee754sp x)
{
union ieee754sp y;
if (ieee754_csr.abs2008) {
y = x;
SPSIGN(y) = !SPSIGN(x);
} else {
unsigned int oldrm;
oldrm = ieee754_csr.rm;
ieee754_csr.rm = FPU_CSR_RD;
y = ieee754sp_sub(ieee754sp_zero(0), x);
ieee754_csr.rm = oldrm;
}
return y;
}
union ieee754sp ieee754sp_abs(union ieee754sp x)
{
union ieee754sp y;
if (ieee754_csr.abs2008) {
y = x;
SPSIGN(y) = 0;
} else {
unsigned int oldrm;
oldrm = ieee754_csr.rm;
ieee754_csr.rm = FPU_CSR_RD;
if (SPSIGN(x))
y = ieee754sp_sub(ieee754sp_zero(0), x);
else
y = ieee754sp_add(ieee754sp_zero(0), x);
ieee754_csr.rm = oldrm;
}
return y;
}
| linux-master | arch/mips/math-emu/sp_simple.c |
// SPDX-License-Identifier: GPL-2.0-only
/* IEEE754 floating point arithmetic
* double precision: common utilities
*/
/*
* MIPS floating point support
* Copyright (C) 1994-2000 Algorithmics Ltd.
*/
#include "ieee754dp.h"
union ieee754dp ieee754dp_div(union ieee754dp x, union ieee754dp y)
{
u64 rm;
int re;
u64 bm;
COMPXDP;
COMPYDP;
EXPLODEXDP;
EXPLODEYDP;
ieee754_clearcx();
FLUSHXDP;
FLUSHYDP;
switch (CLPAIR(xc, yc)) {
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_SNAN):
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_SNAN):
case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_SNAN):
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_SNAN):
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_SNAN):
return ieee754dp_nanxcpt(y);
case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_SNAN):
case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_ZERO):
case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_NORM):
case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_DNORM):
case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF):
return ieee754dp_nanxcpt(x);
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_QNAN):
return y;
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_ZERO):
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_NORM):
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_DNORM):
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_INF):
return x;
/*
* Infinity handling
*/
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF):
ieee754_setcx(IEEE754_INVALID_OPERATION);
return ieee754dp_indef();
case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_INF):
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_INF):
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_INF):
return ieee754dp_zero(xs ^ ys);
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_ZERO):
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_NORM):
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_DNORM):
return ieee754dp_inf(xs ^ ys);
/*
* Zero handling
*/
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO):
ieee754_setcx(IEEE754_INVALID_OPERATION);
return ieee754dp_indef();
case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_ZERO):
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_ZERO):
ieee754_setcx(IEEE754_ZERO_DIVIDE);
return ieee754dp_inf(xs ^ ys);
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_NORM):
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_DNORM):
return ieee754dp_zero(xs == ys ? 0 : 1);
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM):
DPDNORMX;
fallthrough;
case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_DNORM):
DPDNORMY;
break;
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_NORM):
DPDNORMX;
break;
case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_NORM):
break;
}
assert(xm & DP_HIDDEN_BIT);
assert(ym & DP_HIDDEN_BIT);
/* provide rounding space */
xm <<= 3;
ym <<= 3;
/* now the dirty work */
rm = 0;
re = xe - ye;
for (bm = DP_MBIT(DP_FBITS + 2); bm; bm >>= 1) {
if (xm >= ym) {
xm -= ym;
rm |= bm;
if (xm == 0)
break;
}
xm <<= 1;
}
rm <<= 1;
if (xm)
rm |= 1; /* have remainder, set sticky */
assert(rm);
/*
* Normalise rm to rounding precision ?
*/
while ((rm >> (DP_FBITS + 3)) == 0) {
rm <<= 1;
re--;
}
return ieee754dp_format(xs == ys ? 0 : 1, re, rm);
}
| linux-master | arch/mips/math-emu/dp_div.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* IEEE754 floating point arithmetic
* double precision: MADDF.f (Fused Multiply Add)
* MADDF.fmt: FPR[fd] = FPR[fd] + (FPR[fs] x FPR[ft])
*
* MIPS floating point support
* Copyright (C) 2015 Imagination Technologies, Ltd.
* Author: Markos Chandras <[email protected]>
*/
#include "ieee754dp.h"
/* 128 bits shift right logical with rounding. */
static void srl128(u64 *hptr, u64 *lptr, int count)
{
u64 low;
if (count >= 128) {
*lptr = *hptr != 0 || *lptr != 0;
*hptr = 0;
} else if (count >= 64) {
if (count == 64) {
*lptr = *hptr | (*lptr != 0);
} else {
low = *lptr;
*lptr = *hptr >> (count - 64);
*lptr |= (*hptr << (128 - count)) != 0 || low != 0;
}
*hptr = 0;
} else {
low = *lptr;
*lptr = low >> count | *hptr << (64 - count);
*lptr |= (low << (64 - count)) != 0;
*hptr = *hptr >> count;
}
}
static union ieee754dp _dp_maddf(union ieee754dp z, union ieee754dp x,
union ieee754dp y, enum maddf_flags flags)
{
int re;
int rs;
unsigned int lxm;
unsigned int hxm;
unsigned int lym;
unsigned int hym;
u64 lrm;
u64 hrm;
u64 lzm;
u64 hzm;
u64 t;
u64 at;
int s;
COMPXDP;
COMPYDP;
COMPZDP;
EXPLODEXDP;
EXPLODEYDP;
EXPLODEZDP;
FLUSHXDP;
FLUSHYDP;
FLUSHZDP;
ieee754_clearcx();
rs = xs ^ ys;
if (flags & MADDF_NEGATE_PRODUCT)
rs ^= 1;
if (flags & MADDF_NEGATE_ADDITION)
zs ^= 1;
/*
* Handle the cases when at least one of x, y or z is a NaN.
* Order of precedence is sNaN, qNaN and z, x, y.
*/
if (zc == IEEE754_CLASS_SNAN)
return ieee754dp_nanxcpt(z);
if (xc == IEEE754_CLASS_SNAN)
return ieee754dp_nanxcpt(x);
if (yc == IEEE754_CLASS_SNAN)
return ieee754dp_nanxcpt(y);
if (zc == IEEE754_CLASS_QNAN)
return z;
if (xc == IEEE754_CLASS_QNAN)
return x;
if (yc == IEEE754_CLASS_QNAN)
return y;
if (zc == IEEE754_CLASS_DNORM)
DPDNORMZ;
/* ZERO z cases are handled separately below */
switch (CLPAIR(xc, yc)) {
/*
* Infinity handling
*/
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_ZERO):
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_INF):
ieee754_setcx(IEEE754_INVALID_OPERATION);
return ieee754dp_indef();
case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_INF):
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_INF):
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_NORM):
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_DNORM):
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF):
if ((zc == IEEE754_CLASS_INF) && (zs != rs)) {
/*
* Cases of addition of infinities with opposite signs
* or subtraction of infinities with same signs.
*/
ieee754_setcx(IEEE754_INVALID_OPERATION);
return ieee754dp_indef();
}
/*
* z is here either not an infinity, or an infinity having the
* same sign as product (x*y). The result must be an infinity,
* and its sign is determined only by the sign of product (x*y).
*/
return ieee754dp_inf(rs);
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO):
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_NORM):
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_DNORM):
case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_ZERO):
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_ZERO):
if (zc == IEEE754_CLASS_INF)
return ieee754dp_inf(zs);
if (zc == IEEE754_CLASS_ZERO) {
/* Handle cases +0 + (-0) and similar ones. */
if (zs == rs)
/*
* Cases of addition of zeros of equal signs
* or subtraction of zeroes of opposite signs.
* The sign of the resulting zero is in any
* such case determined only by the sign of z.
*/
return z;
return ieee754dp_zero(ieee754_csr.rm == FPU_CSR_RD);
}
/* x*y is here 0, and z is not 0, so just return z */
return z;
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM):
DPDNORMX;
fallthrough;
case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_DNORM):
if (zc == IEEE754_CLASS_INF)
return ieee754dp_inf(zs);
DPDNORMY;
break;
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_NORM):
if (zc == IEEE754_CLASS_INF)
return ieee754dp_inf(zs);
DPDNORMX;
break;
case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_NORM):
if (zc == IEEE754_CLASS_INF)
return ieee754dp_inf(zs);
/* continue to real computations */
}
/* Finally get to do some computation */
/*
* Do the multiplication bit first
*
* rm = xm * ym, re = xe + ye basically
*
* At this point xm and ym should have been normalized.
*/
assert(xm & DP_HIDDEN_BIT);
assert(ym & DP_HIDDEN_BIT);
re = xe + ye;
/* shunt to top of word */
xm <<= 64 - (DP_FBITS + 1);
ym <<= 64 - (DP_FBITS + 1);
/*
* Multiply 64 bits xm and ym to give 128 bits result in hrm:lrm.
*/
lxm = xm;
hxm = xm >> 32;
lym = ym;
hym = ym >> 32;
lrm = DPXMULT(lxm, lym);
hrm = DPXMULT(hxm, hym);
t = DPXMULT(lxm, hym);
at = lrm + (t << 32);
hrm += at < lrm;
lrm = at;
hrm = hrm + (t >> 32);
t = DPXMULT(hxm, lym);
at = lrm + (t << 32);
hrm += at < lrm;
lrm = at;
hrm = hrm + (t >> 32);
/* Put explicit bit at bit 126 if necessary */
if ((int64_t)hrm < 0) {
lrm = (hrm << 63) | (lrm >> 1);
hrm = hrm >> 1;
re++;
}
assert(hrm & (1 << 62));
if (zc == IEEE754_CLASS_ZERO) {
/*
* Move explicit bit from bit 126 to bit 55 since the
* ieee754dp_format code expects the mantissa to be
* 56 bits wide (53 + 3 rounding bits).
*/
srl128(&hrm, &lrm, (126 - 55));
return ieee754dp_format(rs, re, lrm);
}
/* Move explicit bit from bit 52 to bit 126 */
lzm = 0;
hzm = zm << 10;
assert(hzm & (1 << 62));
/* Make the exponents the same */
if (ze > re) {
/*
* Have to shift y fraction right to align.
*/
s = ze - re;
srl128(&hrm, &lrm, s);
re += s;
} else if (re > ze) {
/*
* Have to shift x fraction right to align.
*/
s = re - ze;
srl128(&hzm, &lzm, s);
ze += s;
}
assert(ze == re);
assert(ze <= DP_EMAX);
/* Do the addition */
if (zs == rs) {
/*
* Generate 128 bit result by adding two 127 bit numbers
* leaving result in hzm:lzm, zs and ze.
*/
hzm = hzm + hrm + (lzm > (lzm + lrm));
lzm = lzm + lrm;
if ((int64_t)hzm < 0) { /* carry out */
srl128(&hzm, &lzm, 1);
ze++;
}
} else {
if (hzm > hrm || (hzm == hrm && lzm >= lrm)) {
hzm = hzm - hrm - (lzm < lrm);
lzm = lzm - lrm;
} else {
hzm = hrm - hzm - (lrm < lzm);
lzm = lrm - lzm;
zs = rs;
}
if (lzm == 0 && hzm == 0)
return ieee754dp_zero(ieee754_csr.rm == FPU_CSR_RD);
/*
* Put explicit bit at bit 126 if necessary.
*/
if (hzm == 0) {
/* left shift by 63 or 64 bits */
if ((int64_t)lzm < 0) {
/* MSB of lzm is the explicit bit */
hzm = lzm >> 1;
lzm = lzm << 63;
ze -= 63;
} else {
hzm = lzm;
lzm = 0;
ze -= 64;
}
}
t = 0;
while ((hzm >> (62 - t)) == 0)
t++;
assert(t <= 62);
if (t) {
hzm = hzm << t | lzm >> (64 - t);
lzm = lzm << t;
ze -= t;
}
}
/*
* Move explicit bit from bit 126 to bit 55 since the
* ieee754dp_format code expects the mantissa to be
* 56 bits wide (53 + 3 rounding bits).
*/
srl128(&hzm, &lzm, (126 - 55));
return ieee754dp_format(zs, ze, lzm);
}
union ieee754dp ieee754dp_maddf(union ieee754dp z, union ieee754dp x,
union ieee754dp y)
{
return _dp_maddf(z, x, y, 0);
}
union ieee754dp ieee754dp_msubf(union ieee754dp z, union ieee754dp x,
union ieee754dp y)
{
return _dp_maddf(z, x, y, MADDF_NEGATE_PRODUCT);
}
union ieee754dp ieee754dp_madd(union ieee754dp z, union ieee754dp x,
union ieee754dp y)
{
return _dp_maddf(z, x, y, 0);
}
union ieee754dp ieee754dp_msub(union ieee754dp z, union ieee754dp x,
union ieee754dp y)
{
return _dp_maddf(z, x, y, MADDF_NEGATE_ADDITION);
}
union ieee754dp ieee754dp_nmadd(union ieee754dp z, union ieee754dp x,
union ieee754dp y)
{
return _dp_maddf(z, x, y, MADDF_NEGATE_PRODUCT|MADDF_NEGATE_ADDITION);
}
union ieee754dp ieee754dp_nmsub(union ieee754dp z, union ieee754dp x,
union ieee754dp y)
{
return _dp_maddf(z, x, y, MADDF_NEGATE_PRODUCT);
}
| linux-master | arch/mips/math-emu/dp_maddf.c |
// SPDX-License-Identifier: GPL-2.0-only
/* IEEE754 floating point arithmetic
* single precision
*/
/*
* MIPS floating point support
* Copyright (C) 1994-2000 Algorithmics Ltd.
*/
#include "ieee754sp.h"
union ieee754sp ieee754sp_sub(union ieee754sp x, union ieee754sp y)
{
int s;
COMPXSP;
COMPYSP;
EXPLODEXSP;
EXPLODEYSP;
ieee754_clearcx();
FLUSHXSP;
FLUSHYSP;
switch (CLPAIR(xc, yc)) {
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_SNAN):
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_SNAN):
case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_SNAN):
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_SNAN):
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_SNAN):
return ieee754sp_nanxcpt(y);
case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_SNAN):
case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_ZERO):
case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_NORM):
case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_DNORM):
case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF):
return ieee754sp_nanxcpt(x);
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_QNAN):
return y;
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_ZERO):
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_NORM):
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_DNORM):
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_INF):
return x;
/*
* Infinity handling
*/
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF):
if (xs != ys)
return x;
ieee754_setcx(IEEE754_INVALID_OPERATION);
return ieee754sp_indef();
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_INF):
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_INF):
case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_INF):
return ieee754sp_inf(ys ^ 1);
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_ZERO):
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_NORM):
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_DNORM):
return x;
/*
* Zero handling
*/
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO):
if (xs != ys)
return x;
else
return ieee754sp_zero(ieee754_csr.rm == FPU_CSR_RD);
case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_ZERO):
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_ZERO):
return x;
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_NORM):
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_DNORM):
/* quick fix up */
SPSIGN(y) ^= 1;
return y;
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM):
SPDNORMX;
fallthrough;
case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_DNORM):
SPDNORMY;
break;
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_NORM):
SPDNORMX;
break;
case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_NORM):
break;
}
/* flip sign of y and handle as add */
ys ^= 1;
assert(xm & SP_HIDDEN_BIT);
assert(ym & SP_HIDDEN_BIT);
/* provide guard,round and stick bit space */
xm <<= 3;
ym <<= 3;
if (xe > ye) {
/*
* have to shift y fraction right to align
*/
s = xe - ye;
ym = XSPSRS(ym, s);
ye += s;
} else if (ye > xe) {
/*
* have to shift x fraction right to align
*/
s = ye - xe;
xm = XSPSRS(xm, s);
xe += s;
}
assert(xe == ye);
assert(xe <= SP_EMAX);
if (xs == ys) {
/* generate 28 bit result of adding two 27 bit numbers
*/
xm = xm + ym;
if (xm >> (SP_FBITS + 1 + 3)) { /* carry out */
SPXSRSX1(); /* shift preserving sticky */
}
} else {
if (xm >= ym) {
xm = xm - ym;
} else {
xm = ym - xm;
xs = ys;
}
if (xm == 0) {
if (ieee754_csr.rm == FPU_CSR_RD)
return ieee754sp_zero(1); /* round negative inf. => sign = -1 */
else
return ieee754sp_zero(0); /* other round modes => sign = 1 */
}
/* normalize to rounding precision
*/
while ((xm >> (SP_FBITS + 3)) == 0) {
xm <<= 1;
xe--;
}
}
return ieee754sp_format(xs, xe, xm);
}
| linux-master | arch/mips/math-emu/sp_sub.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* IEEE754 floating point arithmetic
* double precision: CLASS.f
* FPR[fd] = class(FPR[fs])
*
* MIPS floating point support
* Copyright (C) 2015 Imagination Technologies, Ltd.
* Author: Markos Chandras <[email protected]>
*/
#include "ieee754dp.h"
int ieee754dp_2008class(union ieee754dp x)
{
COMPXDP;
EXPLODEXDP;
/*
* 10 bit mask as follows:
*
* bit0 = SNAN
* bit1 = QNAN
* bit2 = -INF
* bit3 = -NORM
* bit4 = -DNORM
* bit5 = -ZERO
* bit6 = INF
* bit7 = NORM
* bit8 = DNORM
* bit9 = ZERO
*/
switch(xc) {
case IEEE754_CLASS_SNAN:
return 0x01;
case IEEE754_CLASS_QNAN:
return 0x02;
case IEEE754_CLASS_INF:
return 0x04 << (xs ? 0 : 4);
case IEEE754_CLASS_NORM:
return 0x08 << (xs ? 0 : 4);
case IEEE754_CLASS_DNORM:
return 0x10 << (xs ? 0 : 4);
case IEEE754_CLASS_ZERO:
return 0x20 << (xs ? 0 : 4);
default:
pr_err("Unknown class: %d\n", xc);
return 0;
}
}
| linux-master | arch/mips/math-emu/dp_2008class.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* IEEE754 floating point arithmetic
* double precision: MIN{,A}.f
* MIN : Scalar Floating-Point Minimum
* MINA: Scalar Floating-Point argument with Minimum Absolute Value
*
* MIN.D : FPR[fd] = minNum(FPR[fs],FPR[ft])
* MINA.D: FPR[fd] = maxNumMag(FPR[fs],FPR[ft])
*
* MIPS floating point support
* Copyright (C) 2015 Imagination Technologies, Ltd.
* Author: Markos Chandras <[email protected]>
*/
#include "ieee754dp.h"
union ieee754dp ieee754dp_fmax(union ieee754dp x, union ieee754dp y)
{
COMPXDP;
COMPYDP;
EXPLODEXDP;
EXPLODEYDP;
FLUSHXDP;
FLUSHYDP;
ieee754_clearcx();
switch (CLPAIR(xc, yc)) {
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_SNAN):
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_SNAN):
case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_SNAN):
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_SNAN):
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_SNAN):
return ieee754dp_nanxcpt(y);
case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_SNAN):
case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_ZERO):
case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_NORM):
case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_DNORM):
case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF):
return ieee754dp_nanxcpt(x);
/*
* Quiet NaN handling
*/
/*
* The case of both inputs quiet NaNs
*/
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
return x;
/*
* The cases of exactly one input quiet NaN (numbers
* are here preferred as returned values to NaNs)
*/
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_QNAN):
return x;
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_ZERO):
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_NORM):
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_DNORM):
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_INF):
return y;
/*
* Infinity and zero handling
*/
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_ZERO):
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_NORM):
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_DNORM):
case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_ZERO):
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_ZERO):
return xs ? y : x;
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF):
case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_INF):
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_INF):
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_INF):
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_NORM):
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_DNORM):
return ys ? x : y;
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO):
return ieee754dp_zero(xs & ys);
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM):
DPDNORMX;
fallthrough;
case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_DNORM):
DPDNORMY;
break;
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_NORM):
DPDNORMX;
}
/* Finally get to do some computation */
assert(xm & DP_HIDDEN_BIT);
assert(ym & DP_HIDDEN_BIT);
/* Compare signs */
if (xs > ys)
return y;
else if (xs < ys)
return x;
/* Signs of inputs are equal, let's compare exponents */
if (xs == 0) {
/* Inputs are both positive */
if (xe > ye)
return x;
else if (xe < ye)
return y;
} else {
/* Inputs are both negative */
if (xe > ye)
return y;
else if (xe < ye)
return x;
}
/* Signs and exponents of inputs are equal, let's compare mantissas */
if (xs == 0) {
/* Inputs are both positive, with equal signs and exponents */
if (xm <= ym)
return y;
return x;
}
/* Inputs are both negative, with equal signs and exponents */
if (xm <= ym)
return x;
return y;
}
union ieee754dp ieee754dp_fmaxa(union ieee754dp x, union ieee754dp y)
{
COMPXDP;
COMPYDP;
EXPLODEXDP;
EXPLODEYDP;
FLUSHXDP;
FLUSHYDP;
ieee754_clearcx();
switch (CLPAIR(xc, yc)) {
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_SNAN):
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_SNAN):
case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_SNAN):
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_SNAN):
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_SNAN):
return ieee754dp_nanxcpt(y);
case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_SNAN):
case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_ZERO):
case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_NORM):
case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_DNORM):
case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF):
return ieee754dp_nanxcpt(x);
/*
* Quiet NaN handling
*/
/*
* The case of both inputs quiet NaNs
*/
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
return x;
/*
* The cases of exactly one input quiet NaN (numbers
* are here preferred as returned values to NaNs)
*/
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_QNAN):
return x;
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_ZERO):
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_NORM):
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_DNORM):
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_INF):
return y;
/*
* Infinity and zero handling
*/
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF):
return ieee754dp_inf(xs & ys);
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_ZERO):
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_NORM):
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_DNORM):
case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_ZERO):
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_ZERO):
return x;
case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_INF):
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_INF):
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_INF):
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_NORM):
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_DNORM):
return y;
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO):
return ieee754dp_zero(xs & ys);
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM):
DPDNORMX;
fallthrough;
case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_DNORM):
DPDNORMY;
break;
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_NORM):
DPDNORMX;
}
/* Finally get to do some computation */
assert(xm & DP_HIDDEN_BIT);
assert(ym & DP_HIDDEN_BIT);
/* Compare exponent */
if (xe > ye)
return x;
else if (xe < ye)
return y;
/* Compare mantissa */
if (xm < ym)
return y;
else if (xm > ym)
return x;
else if (xs == 0)
return x;
return y;
}
| linux-master | arch/mips/math-emu/dp_fmax.c |
// SPDX-License-Identifier: GPL-2.0-only
/* IEEE754 floating point arithmetic
* single precision
*/
/*
* MIPS floating point support
* Copyright (C) 1994-2000 Algorithmics Ltd.
*/
#include "ieee754sp.h"
int ieee754sp_cmp(union ieee754sp x, union ieee754sp y, int cmp, int sig)
{
int vx;
int vy;
COMPXSP;
COMPYSP;
EXPLODEXSP;
EXPLODEYSP;
FLUSHXSP;
FLUSHYSP;
ieee754_clearcx(); /* Even clear inexact flag here */
if (ieee754_class_nan(xc) || ieee754_class_nan(yc)) {
if (sig ||
xc == IEEE754_CLASS_SNAN || yc == IEEE754_CLASS_SNAN)
ieee754_setcx(IEEE754_INVALID_OPERATION);
return (cmp & IEEE754_CUN) != 0;
} else {
vx = x.bits;
vy = y.bits;
if (vx < 0)
vx = -vx ^ SP_SIGN_BIT;
if (vy < 0)
vy = -vy ^ SP_SIGN_BIT;
if (vx < vy)
return (cmp & IEEE754_CLT) != 0;
else if (vx == vy)
return (cmp & IEEE754_CEQ) != 0;
else
return (cmp & IEEE754_CGT) != 0;
}
}
| linux-master | arch/mips/math-emu/sp_cmp.c |
// SPDX-License-Identifier: GPL-2.0-only
/* IEEE754 floating point arithmetic
* single precision
*/
/*
* MIPS floating point support
* Copyright (C) 1994-2000 Algorithmics Ltd.
*/
#include "ieee754sp.h"
union ieee754sp ieee754sp_add(union ieee754sp x, union ieee754sp y)
{
int s;
COMPXSP;
COMPYSP;
EXPLODEXSP;
EXPLODEYSP;
ieee754_clearcx();
FLUSHXSP;
FLUSHYSP;
switch (CLPAIR(xc, yc)) {
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_SNAN):
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_SNAN):
case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_SNAN):
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_SNAN):
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_SNAN):
return ieee754sp_nanxcpt(y);
case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_SNAN):
case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_ZERO):
case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_NORM):
case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_DNORM):
case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF):
return ieee754sp_nanxcpt(x);
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_QNAN):
return y;
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_ZERO):
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_NORM):
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_DNORM):
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_INF):
return x;
/*
* Infinity handling
*/
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF):
if (xs == ys)
return x;
ieee754_setcx(IEEE754_INVALID_OPERATION);
return ieee754sp_indef();
case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_INF):
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_INF):
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_INF):
return y;
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_ZERO):
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_NORM):
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_DNORM):
return x;
/*
* Zero handling
*/
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO):
if (xs == ys)
return x;
else
return ieee754sp_zero(ieee754_csr.rm == FPU_CSR_RD);
case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_ZERO):
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_ZERO):
return x;
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_NORM):
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_DNORM):
return y;
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM):
SPDNORMX;
fallthrough;
case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_DNORM):
SPDNORMY;
break;
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_NORM):
SPDNORMX;
break;
case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_NORM):
break;
}
assert(xm & SP_HIDDEN_BIT);
assert(ym & SP_HIDDEN_BIT);
/*
* Provide guard, round and stick bit space.
*/
xm <<= 3;
ym <<= 3;
if (xe > ye) {
/*
* Have to shift y fraction right to align.
*/
s = xe - ye;
ym = XSPSRS(ym, s);
ye += s;
} else if (ye > xe) {
/*
* Have to shift x fraction right to align.
*/
s = ye - xe;
xm = XSPSRS(xm, s);
xe += s;
}
assert(xe == ye);
assert(xe <= SP_EMAX);
if (xs == ys) {
/*
* Generate 28 bit result of adding two 27 bit numbers
* leaving result in xm, xs and xe.
*/
xm = xm + ym;
if (xm >> (SP_FBITS + 1 + 3)) { /* carry out */
SPXSRSX1();
}
} else {
if (xm >= ym) {
xm = xm - ym;
} else {
xm = ym - xm;
xs = ys;
}
if (xm == 0)
return ieee754sp_zero(ieee754_csr.rm == FPU_CSR_RD);
/*
* Normalize in extended single precision
*/
while ((xm >> (SP_FBITS + 3)) == 0) {
xm <<= 1;
xe--;
}
}
return ieee754sp_format(xs, xe, xm);
}
| linux-master | arch/mips/math-emu/sp_add.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/err.h>
#include <linux/slab.h>
#include <linux/mm_types.h>
#include <linux/sched/task.h>
#include <asm/branch.h>
#include <asm/cacheflush.h>
#include <asm/fpu_emulator.h>
#include <asm/inst.h>
#include <asm/mipsregs.h>
#include <linux/uaccess.h>
/**
* struct emuframe - The 'emulation' frame structure
* @emul: The instruction to 'emulate'.
* @badinst: A break instruction to cause a return to the kernel.
*
* This structure defines the frames placed within the delay slot emulation
* page in response to a call to mips_dsemul(). Each thread may be allocated
* only one frame at any given time. The kernel stores within it the
* instruction to be 'emulated' followed by a break instruction, then
* executes the frame in user mode. The break causes a trap to the kernel
* which leads to do_dsemulret() being called unless the instruction in
* @emul causes a trap itself, is a branch, or a signal is delivered to
* the thread. In these cases the allocated frame will either be reused by
* a subsequent delay slot 'emulation', or be freed during signal delivery or
* upon thread exit.
*
* This approach is used because:
*
* - Actually emulating all instructions isn't feasible. We would need to
* be able to handle instructions from all revisions of the MIPS ISA,
* all ASEs & all vendor instruction set extensions. This would be a
* whole lot of work & continual maintenance burden as new instructions
* are introduced, and in the case of some vendor extensions may not
* even be possible. Thus we need to take the approach of actually
* executing the instruction.
*
* - We must execute the instruction within user context. If we were to
* execute the instruction in kernel mode then it would have access to
* kernel resources without very careful checks, leaving us with a
* high potential for security or stability issues to arise.
*
* - We used to place the frame on the users stack, but this requires
* that the stack be executable. This is bad for security so the
* per-process page is now used instead.
*
* - The instruction in @emul may be something entirely invalid for a
* delay slot. The user may (intentionally or otherwise) place a branch
* in a delay slot, or a kernel mode instruction, or something else
* which generates an exception. Thus we can't rely upon the break in
* @badinst always being hit. For this reason we track the index of the
* frame allocated to each thread, allowing us to clean it up at later
* points such as signal delivery or thread exit.
*
* - The user may generate a fake struct emuframe if they wish, invoking
* the BRK_MEMU break instruction themselves. We must therefore not
* trust that BRK_MEMU means there's actually a valid frame allocated
* to the thread, and must not allow the user to do anything they
* couldn't already.
*/
struct emuframe {
mips_instruction emul;
mips_instruction badinst;
};
static const int emupage_frame_count = PAGE_SIZE / sizeof(struct emuframe);
static inline __user struct emuframe *dsemul_page(void)
{
return (__user struct emuframe *)STACK_TOP;
}
static int alloc_emuframe(void)
{
mm_context_t *mm_ctx = ¤t->mm->context;
int idx;
retry:
spin_lock(&mm_ctx->bd_emupage_lock);
/* Ensure we have an allocation bitmap */
if (!mm_ctx->bd_emupage_allocmap) {
mm_ctx->bd_emupage_allocmap = bitmap_zalloc(emupage_frame_count,
GFP_ATOMIC);
if (!mm_ctx->bd_emupage_allocmap) {
idx = BD_EMUFRAME_NONE;
goto out_unlock;
}
}
/* Attempt to allocate a single bit/frame */
idx = bitmap_find_free_region(mm_ctx->bd_emupage_allocmap,
emupage_frame_count, 0);
if (idx < 0) {
/*
* Failed to allocate a frame. We'll wait until one becomes
* available. We unlock the page so that other threads actually
* get the opportunity to free their frames, which means
* technically the result of bitmap_full may be incorrect.
* However the worst case is that we repeat all this and end up
* back here again.
*/
spin_unlock(&mm_ctx->bd_emupage_lock);
if (!wait_event_killable(mm_ctx->bd_emupage_queue,
!bitmap_full(mm_ctx->bd_emupage_allocmap,
emupage_frame_count)))
goto retry;
/* Received a fatal signal - just give in */
return BD_EMUFRAME_NONE;
}
/* Success! */
pr_debug("allocate emuframe %d to %d\n", idx, current->pid);
out_unlock:
spin_unlock(&mm_ctx->bd_emupage_lock);
return idx;
}
static void free_emuframe(int idx, struct mm_struct *mm)
{
mm_context_t *mm_ctx = &mm->context;
spin_lock(&mm_ctx->bd_emupage_lock);
pr_debug("free emuframe %d from %d\n", idx, current->pid);
bitmap_clear(mm_ctx->bd_emupage_allocmap, idx, 1);
/* If some thread is waiting for a frame, now's its chance */
wake_up(&mm_ctx->bd_emupage_queue);
spin_unlock(&mm_ctx->bd_emupage_lock);
}
static bool within_emuframe(struct pt_regs *regs)
{
unsigned long base = (unsigned long)dsemul_page();
if (regs->cp0_epc < base)
return false;
if (regs->cp0_epc >= (base + PAGE_SIZE))
return false;
return true;
}
bool dsemul_thread_cleanup(struct task_struct *tsk)
{
int fr_idx;
/* Clear any allocated frame, retrieving its index */
fr_idx = atomic_xchg(&tsk->thread.bd_emu_frame, BD_EMUFRAME_NONE);
/* If no frame was allocated, we're done */
if (fr_idx == BD_EMUFRAME_NONE)
return false;
task_lock(tsk);
/* Free the frame that this thread had allocated */
if (tsk->mm)
free_emuframe(fr_idx, tsk->mm);
task_unlock(tsk);
return true;
}
bool dsemul_thread_rollback(struct pt_regs *regs)
{
struct emuframe __user *fr;
int fr_idx;
/* Do nothing if we're not executing from a frame */
if (!within_emuframe(regs))
return false;
/* Find the frame being executed */
fr_idx = atomic_read(¤t->thread.bd_emu_frame);
if (fr_idx == BD_EMUFRAME_NONE)
return false;
fr = &dsemul_page()[fr_idx];
/*
* If the PC is at the emul instruction, roll back to the branch. If
* PC is at the badinst (break) instruction, we've already emulated the
* instruction so progress to the continue PC. If it's anything else
* then something is amiss & the user has branched into some other area
* of the emupage - we'll free the allocated frame anyway.
*/
if (msk_isa16_mode(regs->cp0_epc) == (unsigned long)&fr->emul)
regs->cp0_epc = current->thread.bd_emu_branch_pc;
else if (msk_isa16_mode(regs->cp0_epc) == (unsigned long)&fr->badinst)
regs->cp0_epc = current->thread.bd_emu_cont_pc;
atomic_set(¤t->thread.bd_emu_frame, BD_EMUFRAME_NONE);
free_emuframe(fr_idx, current->mm);
return true;
}
void dsemul_mm_cleanup(struct mm_struct *mm)
{
mm_context_t *mm_ctx = &mm->context;
bitmap_free(mm_ctx->bd_emupage_allocmap);
}
int mips_dsemul(struct pt_regs *regs, mips_instruction ir,
unsigned long branch_pc, unsigned long cont_pc)
{
int isa16 = get_isa16_mode(regs->cp0_epc);
mips_instruction break_math;
unsigned long fr_uaddr;
struct emuframe fr;
int fr_idx, ret;
/* NOP is easy */
if (ir == 0)
return -1;
/* microMIPS instructions */
if (isa16) {
union mips_instruction insn = { .word = ir };
/* NOP16 aka MOVE16 $0, $0 */
if ((ir >> 16) == MM_NOP16)
return -1;
/* ADDIUPC */
if (insn.mm_a_format.opcode == mm_addiupc_op) {
unsigned int rs;
s32 v;
rs = (((insn.mm_a_format.rs + 0xe) & 0xf) + 2);
v = regs->cp0_epc & ~3;
v += insn.mm_a_format.simmediate << 2;
regs->regs[rs] = (long)v;
return -1;
}
}
pr_debug("dsemul 0x%08lx cont at 0x%08lx\n", regs->cp0_epc, cont_pc);
/* Allocate a frame if we don't already have one */
fr_idx = atomic_read(¤t->thread.bd_emu_frame);
if (fr_idx == BD_EMUFRAME_NONE)
fr_idx = alloc_emuframe();
if (fr_idx == BD_EMUFRAME_NONE)
return SIGBUS;
/* Retrieve the appropriately encoded break instruction */
break_math = BREAK_MATH(isa16);
/* Write the instructions to the frame */
if (isa16) {
union mips_instruction _emul = {
.halfword = { ir >> 16, ir }
};
union mips_instruction _badinst = {
.halfword = { break_math >> 16, break_math }
};
fr.emul = _emul.word;
fr.badinst = _badinst.word;
} else {
fr.emul = ir;
fr.badinst = break_math;
}
/* Write the frame to user memory */
fr_uaddr = (unsigned long)&dsemul_page()[fr_idx];
ret = access_process_vm(current, fr_uaddr, &fr, sizeof(fr),
FOLL_FORCE | FOLL_WRITE);
if (unlikely(ret != sizeof(fr))) {
MIPS_FPU_EMU_INC_STATS(errors);
free_emuframe(fr_idx, current->mm);
return SIGBUS;
}
/* Record the PC of the branch, PC to continue from & frame index */
current->thread.bd_emu_branch_pc = branch_pc;
current->thread.bd_emu_cont_pc = cont_pc;
atomic_set(¤t->thread.bd_emu_frame, fr_idx);
/* Change user register context to execute the frame */
regs->cp0_epc = fr_uaddr | isa16;
return 0;
}
bool do_dsemulret(struct pt_regs *xcp)
{
/* Cleanup the allocated frame, returning if there wasn't one */
if (!dsemul_thread_cleanup(current)) {
MIPS_FPU_EMU_INC_STATS(errors);
return false;
}
/* Set EPC to return to post-branch instruction */
xcp->cp0_epc = current->thread.bd_emu_cont_pc;
pr_debug("dsemulret to 0x%08lx\n", xcp->cp0_epc);
MIPS_FPU_EMU_INC_STATS(ds_emul);
return true;
}
| linux-master | arch/mips/math-emu/dsemul.c |
// SPDX-License-Identifier: GPL-2.0-only
/* IEEE754 floating point arithmetic
* double precision square root
*/
/*
* MIPS floating point support
* Copyright (C) 1994-2000 Algorithmics Ltd.
*/
#include "ieee754dp.h"
static const unsigned int table[] = {
0, 1204, 3062, 5746, 9193, 13348, 18162, 23592,
29598, 36145, 43202, 50740, 58733, 67158, 75992,
85215, 83599, 71378, 60428, 50647, 41945, 34246,
27478, 21581, 16499, 12183, 8588, 5674, 3403,
1742, 661, 130
};
union ieee754dp ieee754dp_sqrt(union ieee754dp x)
{
struct _ieee754_csr oldcsr;
union ieee754dp y, z, t;
unsigned int scalx, yh;
COMPXDP;
EXPLODEXDP;
ieee754_clearcx();
FLUSHXDP;
/* x == INF or NAN? */
switch (xc) {
case IEEE754_CLASS_SNAN:
return ieee754dp_nanxcpt(x);
case IEEE754_CLASS_QNAN:
/* sqrt(Nan) = Nan */
return x;
case IEEE754_CLASS_ZERO:
/* sqrt(0) = 0 */
return x;
case IEEE754_CLASS_INF:
if (xs) {
/* sqrt(-Inf) = Nan */
ieee754_setcx(IEEE754_INVALID_OPERATION);
return ieee754dp_indef();
}
/* sqrt(+Inf) = Inf */
return x;
case IEEE754_CLASS_DNORM:
DPDNORMX;
fallthrough;
case IEEE754_CLASS_NORM:
if (xs) {
/* sqrt(-x) = Nan */
ieee754_setcx(IEEE754_INVALID_OPERATION);
return ieee754dp_indef();
}
break;
}
/* save old csr; switch off INX enable & flag; set RN rounding */
oldcsr = ieee754_csr;
ieee754_csr.mx &= ~IEEE754_INEXACT;
ieee754_csr.sx &= ~IEEE754_INEXACT;
ieee754_csr.rm = FPU_CSR_RN;
/* adjust exponent to prevent overflow */
scalx = 0;
if (xe > 512) { /* x > 2**-512? */
xe -= 512; /* x = x / 2**512 */
scalx += 256;
} else if (xe < -512) { /* x < 2**-512? */
xe += 512; /* x = x * 2**512 */
scalx -= 256;
}
x = builddp(0, xe + DP_EBIAS, xm & ~DP_HIDDEN_BIT);
y = x;
/* magic initial approximation to almost 8 sig. bits */
yh = y.bits >> 32;
yh = (yh >> 1) + 0x1ff80000;
yh = yh - table[(yh >> 15) & 31];
y.bits = ((u64) yh << 32) | (y.bits & 0xffffffff);
/* Heron's rule once with correction to improve to ~18 sig. bits */
/* t=x/y; y=y+t; py[n0]=py[n0]-0x00100006; py[n1]=0; */
t = ieee754dp_div(x, y);
y = ieee754dp_add(y, t);
y.bits -= 0x0010000600000000LL;
y.bits &= 0xffffffff00000000LL;
/* triple to almost 56 sig. bits: y ~= sqrt(x) to within 1 ulp */
/* t=y*y; z=t; pt[n0]+=0x00100000; t+=z; z=(x-z)*y; */
t = ieee754dp_mul(y, y);
z = t;
t.bexp += 0x001;
t = ieee754dp_add(t, z);
z = ieee754dp_mul(ieee754dp_sub(x, z), y);
/* t=z/(t+x) ; pt[n0]+=0x00100000; y+=t; */
t = ieee754dp_div(z, ieee754dp_add(t, x));
t.bexp += 0x001;
y = ieee754dp_add(y, t);
/* twiddle last bit to force y correctly rounded */
/* set RZ, clear INEX flag */
ieee754_csr.rm = FPU_CSR_RZ;
ieee754_csr.sx &= ~IEEE754_INEXACT;
/* t=x/y; ...chopped quotient, possibly inexact */
t = ieee754dp_div(x, y);
if (ieee754_csr.sx & IEEE754_INEXACT || t.bits != y.bits) {
if (!(ieee754_csr.sx & IEEE754_INEXACT))
/* t = t-ulp */
t.bits -= 1;
/* add inexact to result status */
oldcsr.cx |= IEEE754_INEXACT;
oldcsr.sx |= IEEE754_INEXACT;
switch (oldcsr.rm) {
case FPU_CSR_RU:
y.bits += 1;
fallthrough;
case FPU_CSR_RN:
t.bits += 1;
break;
}
/* y=y+t; ...chopped sum */
y = ieee754dp_add(y, t);
/* adjust scalx for correctly rounded sqrt(x) */
scalx -= 1;
}
/* py[n0]=py[n0]+scalx; ...scale back y */
y.bexp += scalx;
/* restore rounding mode, possibly set inexact */
ieee754_csr = oldcsr;
return y;
}
| linux-master | arch/mips/math-emu/dp_sqrt.c |
// SPDX-License-Identifier: GPL-2.0-only
/* IEEE754 floating point arithmetic
* double precision: common utilities
*/
/*
* MIPS floating point support
* Copyright (C) 1994-2000 Algorithmics Ltd.
*/
#include <linux/compiler.h>
#include "ieee754dp.h"
int ieee754dp_class(union ieee754dp x)
{
COMPXDP;
EXPLODEXDP;
return xc;
}
static inline int ieee754dp_isnan(union ieee754dp x)
{
return ieee754_class_nan(ieee754dp_class(x));
}
static inline int ieee754dp_issnan(union ieee754dp x)
{
int qbit;
assert(ieee754dp_isnan(x));
qbit = (DPMANT(x) & DP_MBIT(DP_FBITS - 1)) == DP_MBIT(DP_FBITS - 1);
return ieee754_csr.nan2008 ^ qbit;
}
/*
* Raise the Invalid Operation IEEE 754 exception
* and convert the signaling NaN supplied to a quiet NaN.
*/
union ieee754dp __cold ieee754dp_nanxcpt(union ieee754dp r)
{
assert(ieee754dp_issnan(r));
ieee754_setcx(IEEE754_INVALID_OPERATION);
if (ieee754_csr.nan2008) {
DPMANT(r) |= DP_MBIT(DP_FBITS - 1);
} else {
DPMANT(r) &= ~DP_MBIT(DP_FBITS - 1);
if (!ieee754dp_isnan(r))
DPMANT(r) |= DP_MBIT(DP_FBITS - 2);
}
return r;
}
static u64 ieee754dp_get_rounding(int sn, u64 xm)
{
/* inexact must round of 3 bits
*/
if (xm & (DP_MBIT(3) - 1)) {
switch (ieee754_csr.rm) {
case FPU_CSR_RZ:
break;
case FPU_CSR_RN:
xm += 0x3 + ((xm >> 3) & 1);
/* xm += (xm&0x8)?0x4:0x3 */
break;
case FPU_CSR_RU: /* toward +Infinity */
if (!sn) /* ?? */
xm += 0x8;
break;
case FPU_CSR_RD: /* toward -Infinity */
if (sn) /* ?? */
xm += 0x8;
break;
}
}
return xm;
}
/* generate a normal/denormal number with over,under handling
* sn is sign
* xe is an unbiased exponent
* xm is 3bit extended precision value.
*/
union ieee754dp ieee754dp_format(int sn, int xe, u64 xm)
{
assert(xm); /* we don't gen exact zeros (probably should) */
assert((xm >> (DP_FBITS + 1 + 3)) == 0); /* no excess */
assert(xm & (DP_HIDDEN_BIT << 3));
if (xe < DP_EMIN) {
/* strip lower bits */
int es = DP_EMIN - xe;
if (ieee754_csr.nod) {
ieee754_setcx(IEEE754_UNDERFLOW);
ieee754_setcx(IEEE754_INEXACT);
switch(ieee754_csr.rm) {
case FPU_CSR_RN:
case FPU_CSR_RZ:
return ieee754dp_zero(sn);
case FPU_CSR_RU: /* toward +Infinity */
if (sn == 0)
return ieee754dp_min(0);
else
return ieee754dp_zero(1);
case FPU_CSR_RD: /* toward -Infinity */
if (sn == 0)
return ieee754dp_zero(0);
else
return ieee754dp_min(1);
}
}
if (xe == DP_EMIN - 1 &&
ieee754dp_get_rounding(sn, xm) >> (DP_FBITS + 1 + 3))
{
/* Not tiny after rounding */
ieee754_setcx(IEEE754_INEXACT);
xm = ieee754dp_get_rounding(sn, xm);
xm >>= 1;
/* Clear grs bits */
xm &= ~(DP_MBIT(3) - 1);
xe++;
}
else {
/* sticky right shift es bits
*/
xm = XDPSRS(xm, es);
xe += es;
assert((xm & (DP_HIDDEN_BIT << 3)) == 0);
assert(xe == DP_EMIN);
}
}
if (xm & (DP_MBIT(3) - 1)) {
ieee754_setcx(IEEE754_INEXACT);
if ((xm & (DP_HIDDEN_BIT << 3)) == 0) {
ieee754_setcx(IEEE754_UNDERFLOW);
}
/* inexact must round of 3 bits
*/
xm = ieee754dp_get_rounding(sn, xm);
/* adjust exponent for rounding add overflowing
*/
if (xm >> (DP_FBITS + 3 + 1)) {
/* add causes mantissa overflow */
xm >>= 1;
xe++;
}
}
/* strip grs bits */
xm >>= 3;
assert((xm >> (DP_FBITS + 1)) == 0); /* no excess */
assert(xe >= DP_EMIN);
if (xe > DP_EMAX) {
ieee754_setcx(IEEE754_OVERFLOW);
ieee754_setcx(IEEE754_INEXACT);
/* -O can be table indexed by (rm,sn) */
switch (ieee754_csr.rm) {
case FPU_CSR_RN:
return ieee754dp_inf(sn);
case FPU_CSR_RZ:
return ieee754dp_max(sn);
case FPU_CSR_RU: /* toward +Infinity */
if (sn == 0)
return ieee754dp_inf(0);
else
return ieee754dp_max(1);
case FPU_CSR_RD: /* toward -Infinity */
if (sn == 0)
return ieee754dp_max(0);
else
return ieee754dp_inf(1);
}
}
/* gen norm/denorm/zero */
if ((xm & DP_HIDDEN_BIT) == 0) {
/* we underflow (tiny/zero) */
assert(xe == DP_EMIN);
if (ieee754_csr.mx & IEEE754_UNDERFLOW)
ieee754_setcx(IEEE754_UNDERFLOW);
return builddp(sn, DP_EMIN - 1 + DP_EBIAS, xm);
} else {
assert((xm >> (DP_FBITS + 1)) == 0); /* no excess */
assert(xm & DP_HIDDEN_BIT);
return builddp(sn, xe + DP_EBIAS, xm & ~DP_HIDDEN_BIT);
}
}
| linux-master | arch/mips/math-emu/ieee754dp.c |
// SPDX-License-Identifier: GPL-2.0-only
/* IEEE754 floating point arithmetic
* single precision
*/
/*
* MIPS floating point support
* Copyright (C) 1994-2000 Algorithmics Ltd.
*/
#include "ieee754sp.h"
union ieee754sp ieee754sp_mul(union ieee754sp x, union ieee754sp y)
{
int re;
int rs;
unsigned int rm;
unsigned short lxm;
unsigned short hxm;
unsigned short lym;
unsigned short hym;
unsigned int lrm;
unsigned int hrm;
unsigned int t;
unsigned int at;
COMPXSP;
COMPYSP;
EXPLODEXSP;
EXPLODEYSP;
ieee754_clearcx();
FLUSHXSP;
FLUSHYSP;
switch (CLPAIR(xc, yc)) {
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_SNAN):
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_SNAN):
case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_SNAN):
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_SNAN):
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_SNAN):
return ieee754sp_nanxcpt(y);
case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_SNAN):
case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_ZERO):
case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_NORM):
case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_DNORM):
case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF):
return ieee754sp_nanxcpt(x);
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_QNAN):
return y;
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_ZERO):
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_NORM):
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_DNORM):
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_INF):
return x;
/*
* Infinity handling
*/
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_ZERO):
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_INF):
ieee754_setcx(IEEE754_INVALID_OPERATION);
return ieee754sp_indef();
case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_INF):
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_INF):
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_NORM):
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_DNORM):
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF):
return ieee754sp_inf(xs ^ ys);
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO):
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_NORM):
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_DNORM):
case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_ZERO):
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_ZERO):
return ieee754sp_zero(xs ^ ys);
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM):
SPDNORMX;
fallthrough;
case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_DNORM):
SPDNORMY;
break;
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_NORM):
SPDNORMX;
break;
case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_NORM):
break;
}
/* rm = xm * ym, re = xe+ye basically */
assert(xm & SP_HIDDEN_BIT);
assert(ym & SP_HIDDEN_BIT);
re = xe + ye;
rs = xs ^ ys;
/* shunt to top of word */
xm <<= 32 - (SP_FBITS + 1);
ym <<= 32 - (SP_FBITS + 1);
/*
* Multiply 32 bits xm, ym to give high 32 bits rm with stickness.
*/
lxm = xm & 0xffff;
hxm = xm >> 16;
lym = ym & 0xffff;
hym = ym >> 16;
lrm = lxm * lym; /* 16 * 16 => 32 */
hrm = hxm * hym; /* 16 * 16 => 32 */
t = lxm * hym; /* 16 * 16 => 32 */
at = lrm + (t << 16);
hrm += at < lrm;
lrm = at;
hrm = hrm + (t >> 16);
t = hxm * lym; /* 16 * 16 => 32 */
at = lrm + (t << 16);
hrm += at < lrm;
lrm = at;
hrm = hrm + (t >> 16);
rm = hrm | (lrm != 0);
/*
* Sticky shift down to normal rounding precision.
*/
if ((int) rm < 0) {
rm = (rm >> (32 - (SP_FBITS + 1 + 3))) |
((rm << (SP_FBITS + 1 + 3)) != 0);
re++;
} else {
rm = (rm >> (32 - (SP_FBITS + 1 + 3 + 1))) |
((rm << (SP_FBITS + 1 + 3 + 1)) != 0);
}
assert(rm & (SP_HIDDEN_BIT << 3));
return ieee754sp_format(rs, re, rm);
}
| linux-master | arch/mips/math-emu/sp_mul.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* IEEE754 floating point arithmetic
* single precision: MIN{,A}.f
* MIN : Scalar Floating-Point Minimum
* MINA: Scalar Floating-Point argument with Minimum Absolute Value
*
* MIN.S : FPR[fd] = minNum(FPR[fs],FPR[ft])
* MINA.S: FPR[fd] = maxNumMag(FPR[fs],FPR[ft])
*
* MIPS floating point support
* Copyright (C) 2015 Imagination Technologies, Ltd.
* Author: Markos Chandras <[email protected]>
*/
#include "ieee754sp.h"
union ieee754sp ieee754sp_fmin(union ieee754sp x, union ieee754sp y)
{
COMPXSP;
COMPYSP;
EXPLODEXSP;
EXPLODEYSP;
FLUSHXSP;
FLUSHYSP;
ieee754_clearcx();
switch (CLPAIR(xc, yc)) {
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_SNAN):
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_SNAN):
case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_SNAN):
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_SNAN):
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_SNAN):
return ieee754sp_nanxcpt(y);
case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_SNAN):
case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_ZERO):
case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_NORM):
case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_DNORM):
case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF):
return ieee754sp_nanxcpt(x);
/*
* Quiet NaN handling
*/
/*
* The case of both inputs quiet NaNs
*/
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
return x;
/*
* The cases of exactly one input quiet NaN (numbers
* are here preferred as returned values to NaNs)
*/
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_QNAN):
return x;
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_ZERO):
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_NORM):
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_DNORM):
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_INF):
return y;
/*
* Infinity and zero handling
*/
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_ZERO):
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_NORM):
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_DNORM):
case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_ZERO):
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_ZERO):
return xs ? x : y;
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF):
case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_INF):
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_INF):
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_INF):
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_NORM):
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_DNORM):
return ys ? y : x;
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO):
return ieee754sp_zero(xs | ys);
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM):
SPDNORMX;
fallthrough;
case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_DNORM):
SPDNORMY;
break;
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_NORM):
SPDNORMX;
}
/* Finally get to do some computation */
assert(xm & SP_HIDDEN_BIT);
assert(ym & SP_HIDDEN_BIT);
/* Compare signs */
if (xs > ys)
return x;
else if (xs < ys)
return y;
/* Signs of inputs are the same, let's compare exponents */
if (xs == 0) {
/* Inputs are both positive */
if (xe > ye)
return y;
else if (xe < ye)
return x;
} else {
/* Inputs are both negative */
if (xe > ye)
return x;
else if (xe < ye)
return y;
}
/* Signs and exponents of inputs are equal, let's compare mantissas */
if (xs == 0) {
/* Inputs are both positive, with equal signs and exponents */
if (xm <= ym)
return x;
return y;
}
/* Inputs are both negative, with equal signs and exponents */
if (xm <= ym)
return y;
return x;
}
union ieee754sp ieee754sp_fmina(union ieee754sp x, union ieee754sp y)
{
COMPXSP;
COMPYSP;
EXPLODEXSP;
EXPLODEYSP;
FLUSHXSP;
FLUSHYSP;
ieee754_clearcx();
switch (CLPAIR(xc, yc)) {
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_SNAN):
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_SNAN):
case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_SNAN):
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_SNAN):
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_SNAN):
return ieee754sp_nanxcpt(y);
case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_SNAN):
case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_ZERO):
case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_NORM):
case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_DNORM):
case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF):
return ieee754sp_nanxcpt(x);
/*
* Quiet NaN handling
*/
/*
* The case of both inputs quiet NaNs
*/
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
return x;
/*
* The cases of exactly one input quiet NaN (numbers
* are here preferred as returned values to NaNs)
*/
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_QNAN):
return x;
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_ZERO):
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_NORM):
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_DNORM):
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_INF):
return y;
/*
* Infinity and zero handling
*/
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF):
return ieee754sp_inf(xs | ys);
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_ZERO):
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_NORM):
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_DNORM):
case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_ZERO):
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_ZERO):
return y;
case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_INF):
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_INF):
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_INF):
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_NORM):
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_DNORM):
return x;
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO):
return ieee754sp_zero(xs | ys);
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM):
SPDNORMX;
fallthrough;
case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_DNORM):
SPDNORMY;
break;
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_NORM):
SPDNORMX;
}
/* Finally get to do some computation */
assert(xm & SP_HIDDEN_BIT);
assert(ym & SP_HIDDEN_BIT);
/* Compare exponent */
if (xe > ye)
return y;
else if (xe < ye)
return x;
/* Compare mantissa */
if (xm < ym)
return x;
else if (xm > ym)
return y;
else if (xs == 1)
return x;
return y;
}
| linux-master | arch/mips/math-emu/sp_fmin.c |
// SPDX-License-Identifier: GPL-2.0-only
/* IEEE754 floating point arithmetic
* double precision: common utilities
*/
/*
* MIPS floating point support
* Copyright (C) 1994-2000 Algorithmics Ltd.
*/
#include "ieee754dp.h"
union ieee754dp ieee754dp_fint(int x)
{
u64 xm;
int xe;
int xs;
ieee754_clearcx();
if (x == 0)
return ieee754dp_zero(0);
if (x == 1 || x == -1)
return ieee754dp_one(x < 0);
if (x == 10 || x == -10)
return ieee754dp_ten(x < 0);
xs = (x < 0);
if (xs) {
if (x == (1 << 31))
xm = ((unsigned) 1 << 31); /* max neg can't be safely negated */
else
xm = -x;
} else {
xm = x;
}
/* normalize - result can never be inexact or overflow */
xe = DP_FBITS;
while ((xm >> DP_FBITS) == 0) {
xm <<= 1;
xe--;
}
return builddp(xs, xe + DP_EBIAS, xm & ~DP_HIDDEN_BIT);
}
| linux-master | arch/mips/math-emu/dp_fint.c |
// SPDX-License-Identifier: GPL-2.0-only
/* IEEE754 floating point arithmetic
* single precision
*/
/*
* MIPS floating point support
* Copyright (C) 1994-2000 Algorithmics Ltd.
*/
#include "ieee754sp.h"
int ieee754sp_tint(union ieee754sp x)
{
u32 residue;
int round;
int sticky;
int odd;
COMPXSP;
ieee754_clearcx();
EXPLODEXSP;
FLUSHXSP;
switch (xc) {
case IEEE754_CLASS_SNAN:
case IEEE754_CLASS_QNAN:
ieee754_setcx(IEEE754_INVALID_OPERATION);
return ieee754si_indef();
case IEEE754_CLASS_INF:
ieee754_setcx(IEEE754_INVALID_OPERATION);
return ieee754si_overflow(xs);
case IEEE754_CLASS_ZERO:
return 0;
case IEEE754_CLASS_DNORM:
case IEEE754_CLASS_NORM:
break;
}
if (xe >= 31) {
/* look for valid corner case */
if (xe == 31 && xs && xm == SP_HIDDEN_BIT)
return -0x80000000;
/* Set invalid. We will only use overflow for floating
point overflow */
ieee754_setcx(IEEE754_INVALID_OPERATION);
return ieee754si_overflow(xs);
}
/* oh gawd */
if (xe > SP_FBITS) {
xm <<= xe - SP_FBITS;
} else {
if (xe < -1) {
residue = xm;
round = 0;
sticky = residue != 0;
xm = 0;
} else {
/* Shifting a u32 32 times does not work,
* so we do it in two steps. Be aware that xe
* may be -1 */
residue = xm << (xe + 1);
residue <<= 31 - SP_FBITS;
round = (residue >> 31) != 0;
sticky = (residue << 1) != 0;
xm >>= SP_FBITS - xe;
}
odd = (xm & 0x1) != 0x0;
switch (ieee754_csr.rm) {
case FPU_CSR_RN:
if (round && (sticky || odd))
xm++;
break;
case FPU_CSR_RZ:
break;
case FPU_CSR_RU: /* toward +Infinity */
if ((round || sticky) && !xs)
xm++;
break;
case FPU_CSR_RD: /* toward -Infinity */
if ((round || sticky) && xs)
xm++;
break;
}
if ((xm >> 31) != 0) {
/* This can happen after rounding */
ieee754_setcx(IEEE754_INVALID_OPERATION);
return ieee754si_overflow(xs);
}
if (round || sticky)
ieee754_setcx(IEEE754_INEXACT);
}
if (xs)
return -xm;
else
return xm;
}
| linux-master | arch/mips/math-emu/sp_tint.c |
// SPDX-License-Identifier: GPL-2.0-only
/* IEEE754 floating point arithmetic
* single precision
*/
/*
* MIPS floating point support
* Copyright (C) 1994-2000 Algorithmics Ltd.
*/
#include "ieee754sp.h"
s64 ieee754sp_tlong(union ieee754sp x)
{
u32 residue;
int round;
int sticky;
int odd;
COMPXDP; /* <-- need 64-bit mantissa tmp */
ieee754_clearcx();
EXPLODEXSP;
FLUSHXSP;
switch (xc) {
case IEEE754_CLASS_SNAN:
case IEEE754_CLASS_QNAN:
ieee754_setcx(IEEE754_INVALID_OPERATION);
return ieee754di_indef();
case IEEE754_CLASS_INF:
ieee754_setcx(IEEE754_INVALID_OPERATION);
return ieee754di_overflow(xs);
case IEEE754_CLASS_ZERO:
return 0;
case IEEE754_CLASS_DNORM:
case IEEE754_CLASS_NORM:
break;
}
if (xe >= 63) {
/* look for valid corner case */
if (xe == 63 && xs && xm == SP_HIDDEN_BIT)
return -0x8000000000000000LL;
/* Set invalid. We will only use overflow for floating
point overflow */
ieee754_setcx(IEEE754_INVALID_OPERATION);
return ieee754di_overflow(xs);
}
/* oh gawd */
if (xe > SP_FBITS) {
xm <<= xe - SP_FBITS;
} else if (xe < SP_FBITS) {
if (xe < -1) {
residue = xm;
round = 0;
sticky = residue != 0;
xm = 0;
} else {
residue = xm << (32 - SP_FBITS + xe);
round = (residue >> 31) != 0;
sticky = (residue << 1) != 0;
xm >>= SP_FBITS - xe;
}
odd = (xm & 0x1) != 0x0;
switch (ieee754_csr.rm) {
case FPU_CSR_RN:
if (round && (sticky || odd))
xm++;
break;
case FPU_CSR_RZ:
break;
case FPU_CSR_RU: /* toward +Infinity */
if ((round || sticky) && !xs)
xm++;
break;
case FPU_CSR_RD: /* toward -Infinity */
if ((round || sticky) && xs)
xm++;
break;
}
if ((xm >> 63) != 0) {
/* This can happen after rounding */
ieee754_setcx(IEEE754_INVALID_OPERATION);
return ieee754di_overflow(xs);
}
if (round || sticky)
ieee754_setcx(IEEE754_INEXACT);
}
if (xs)
return -xm;
else
return xm;
}
| linux-master | arch/mips/math-emu/sp_tlong.c |
// SPDX-License-Identifier: GPL-2.0-only
/* IEEE754 floating point arithmetic
* double precision: common utilities
*/
/*
* MIPS floating point support
* Copyright (C) 1994-2000 Algorithmics Ltd.
* Copyright (C) 2017 Imagination Technologies, Ltd.
* Author: Aleksandar Markovic <[email protected]>
*/
#include "ieee754dp.h"
union ieee754dp ieee754dp_rint(union ieee754dp x)
{
union ieee754dp ret;
u64 residue;
int sticky;
int round;
int odd;
COMPXDP;
ieee754_clearcx();
EXPLODEXDP;
FLUSHXDP;
if (xc == IEEE754_CLASS_SNAN)
return ieee754dp_nanxcpt(x);
if ((xc == IEEE754_CLASS_QNAN) ||
(xc == IEEE754_CLASS_INF) ||
(xc == IEEE754_CLASS_ZERO))
return x;
if (xe >= DP_FBITS)
return x;
if (xe < -1) {
residue = xm;
round = 0;
sticky = residue != 0;
xm = 0;
} else {
residue = xm << (64 - DP_FBITS + xe);
round = (residue >> 63) != 0;
sticky = (residue << 1) != 0;
xm >>= DP_FBITS - xe;
}
odd = (xm & 0x1) != 0x0;
switch (ieee754_csr.rm) {
case FPU_CSR_RN: /* toward nearest */
if (round && (sticky || odd))
xm++;
break;
case FPU_CSR_RZ: /* toward zero */
break;
case FPU_CSR_RU: /* toward +infinity */
if ((round || sticky) && !xs)
xm++;
break;
case FPU_CSR_RD: /* toward -infinity */
if ((round || sticky) && xs)
xm++;
break;
}
if (round || sticky)
ieee754_setcx(IEEE754_INEXACT);
ret = ieee754dp_flong(xm);
DPSIGN(ret) = xs;
return ret;
}
| linux-master | arch/mips/math-emu/dp_rint.c |
// SPDX-License-Identifier: GPL-2.0-only
/* IEEE754 floating point arithmetic
* double precision: common utilities
*/
/*
* MIPS floating point support
* Copyright (C) 1994-2000 Algorithmics Ltd.
*/
#include "ieee754dp.h"
union ieee754dp ieee754dp_flong(s64 x)
{
u64 xm;
int xe;
int xs;
ieee754_clearcx();
if (x == 0)
return ieee754dp_zero(0);
if (x == 1 || x == -1)
return ieee754dp_one(x < 0);
if (x == 10 || x == -10)
return ieee754dp_ten(x < 0);
xs = (x < 0);
if (xs) {
if (x == (1ULL << 63))
xm = (1ULL << 63); /* max neg can't be safely negated */
else
xm = -x;
} else {
xm = x;
}
/* normalize */
xe = DP_FBITS + 3;
if (xm >> (DP_FBITS + 1 + 3)) {
/* shunt out overflow bits */
while (xm >> (DP_FBITS + 1 + 3)) {
XDPSRSX1();
}
} else {
/* normalize in grs extended double precision */
while ((xm >> (DP_FBITS + 3)) == 0) {
xm <<= 1;
xe--;
}
}
return ieee754dp_format(xs, xe, xm);
}
| linux-master | arch/mips/math-emu/dp_flong.c |
// SPDX-License-Identifier: GPL-2.0-only
/* ieee754 floating point arithmetic
* single and double precision
*
* BUGS
* not much dp done
* doesn't generate IEEE754_INEXACT
*/
/*
* MIPS floating point support
* Copyright (C) 1994-2000 Algorithmics Ltd.
*/
#include <linux/compiler.h>
#include "ieee754.h"
#include "ieee754sp.h"
#include "ieee754dp.h"
/*
* Special constants
*/
/*
* Older GCC requires the inner braces for initialization of union ieee754dp's
* anonymous struct member. Without an error will result.
*/
#define xPCNST(s, b, m, ebias) \
{ \
{ \
.sign = (s), \
.bexp = (b) + ebias, \
.mant = (m) \
} \
}
#define DPCNST(s, b, m) \
xPCNST(s, b, m, DP_EBIAS)
const union ieee754dp __ieee754dp_spcvals[] = {
DPCNST(0, DP_EMIN - 1, 0x0000000000000ULL), /* + zero */
DPCNST(1, DP_EMIN - 1, 0x0000000000000ULL), /* - zero */
DPCNST(0, 0, 0x0000000000000ULL), /* + 1.0 */
DPCNST(1, 0, 0x0000000000000ULL), /* - 1.0 */
DPCNST(0, 3, 0x4000000000000ULL), /* + 10.0 */
DPCNST(1, 3, 0x4000000000000ULL), /* - 10.0 */
DPCNST(0, DP_EMAX + 1, 0x0000000000000ULL), /* + infinity */
DPCNST(1, DP_EMAX + 1, 0x0000000000000ULL), /* - infinity */
DPCNST(0, DP_EMAX + 1, 0x7FFFFFFFFFFFFULL), /* + ind legacy qNaN */
DPCNST(0, DP_EMAX + 1, 0x8000000000000ULL), /* + indef 2008 qNaN */
DPCNST(0, DP_EMAX, 0xFFFFFFFFFFFFFULL), /* + max */
DPCNST(1, DP_EMAX, 0xFFFFFFFFFFFFFULL), /* - max */
DPCNST(0, DP_EMIN, 0x0000000000000ULL), /* + min normal */
DPCNST(1, DP_EMIN, 0x0000000000000ULL), /* - min normal */
DPCNST(0, DP_EMIN - 1, 0x0000000000001ULL), /* + min denormal */
DPCNST(1, DP_EMIN - 1, 0x0000000000001ULL), /* - min denormal */
DPCNST(0, 31, 0x0000000000000ULL), /* + 1.0e31 */
DPCNST(0, 63, 0x0000000000000ULL), /* + 1.0e63 */
};
#define SPCNST(s, b, m) \
xPCNST(s, b, m, SP_EBIAS)
const union ieee754sp __ieee754sp_spcvals[] = {
SPCNST(0, SP_EMIN - 1, 0x000000), /* + zero */
SPCNST(1, SP_EMIN - 1, 0x000000), /* - zero */
SPCNST(0, 0, 0x000000), /* + 1.0 */
SPCNST(1, 0, 0x000000), /* - 1.0 */
SPCNST(0, 3, 0x200000), /* + 10.0 */
SPCNST(1, 3, 0x200000), /* - 10.0 */
SPCNST(0, SP_EMAX + 1, 0x000000), /* + infinity */
SPCNST(1, SP_EMAX + 1, 0x000000), /* - infinity */
SPCNST(0, SP_EMAX + 1, 0x3FFFFF), /* + indef legacy quiet NaN */
SPCNST(0, SP_EMAX + 1, 0x400000), /* + indef 2008 quiet NaN */
SPCNST(0, SP_EMAX, 0x7FFFFF), /* + max normal */
SPCNST(1, SP_EMAX, 0x7FFFFF), /* - max normal */
SPCNST(0, SP_EMIN, 0x000000), /* + min normal */
SPCNST(1, SP_EMIN, 0x000000), /* - min normal */
SPCNST(0, SP_EMIN - 1, 0x000001), /* + min denormal */
SPCNST(1, SP_EMIN - 1, 0x000001), /* - min denormal */
SPCNST(0, 31, 0x000000), /* + 1.0e31 */
SPCNST(0, 63, 0x000000), /* + 1.0e63 */
};
| linux-master | arch/mips/math-emu/ieee754.c |
// SPDX-License-Identifier: GPL-2.0-only
/* IEEE754 floating point arithmetic
* double precision: common utilities
*/
/*
* MIPS floating point support
* Copyright (C) 1994-2000 Algorithmics Ltd.
*/
#include "ieee754dp.h"
union ieee754dp ieee754dp_neg(union ieee754dp x)
{
union ieee754dp y;
if (ieee754_csr.abs2008) {
y = x;
DPSIGN(y) = !DPSIGN(x);
} else {
unsigned int oldrm;
oldrm = ieee754_csr.rm;
ieee754_csr.rm = FPU_CSR_RD;
y = ieee754dp_sub(ieee754dp_zero(0), x);
ieee754_csr.rm = oldrm;
}
return y;
}
union ieee754dp ieee754dp_abs(union ieee754dp x)
{
union ieee754dp y;
if (ieee754_csr.abs2008) {
y = x;
DPSIGN(y) = 0;
} else {
unsigned int oldrm;
oldrm = ieee754_csr.rm;
ieee754_csr.rm = FPU_CSR_RD;
if (DPSIGN(x))
y = ieee754dp_sub(ieee754dp_zero(0), x);
else
y = ieee754dp_add(ieee754dp_zero(0), x);
ieee754_csr.rm = oldrm;
}
return y;
}
| linux-master | arch/mips/math-emu/dp_simple.c |
// SPDX-License-Identifier: GPL-2.0-only
/* IEEE754 floating point arithmetic
* single precision
*/
/*
* MIPS floating point support
* Copyright (C) 1994-2000 Algorithmics Ltd.
* Copyright (C) 2017 Imagination Technologies, Ltd.
* Author: Aleksandar Markovic <[email protected]>
*/
#include "ieee754sp.h"
union ieee754sp ieee754sp_rint(union ieee754sp x)
{
union ieee754sp ret;
u32 residue;
int sticky;
int round;
int odd;
COMPXDP; /* <-- DP needed for 64-bit mantissa tmp */
ieee754_clearcx();
EXPLODEXSP;
FLUSHXSP;
if (xc == IEEE754_CLASS_SNAN)
return ieee754sp_nanxcpt(x);
if ((xc == IEEE754_CLASS_QNAN) ||
(xc == IEEE754_CLASS_INF) ||
(xc == IEEE754_CLASS_ZERO))
return x;
if (xe >= SP_FBITS)
return x;
if (xe < -1) {
residue = xm;
round = 0;
sticky = residue != 0;
xm = 0;
} else {
residue = xm << (xe + 1);
residue <<= 31 - SP_FBITS;
round = (residue >> 31) != 0;
sticky = (residue << 1) != 0;
xm >>= SP_FBITS - xe;
}
odd = (xm & 0x1) != 0x0;
switch (ieee754_csr.rm) {
case FPU_CSR_RN: /* toward nearest */
if (round && (sticky || odd))
xm++;
break;
case FPU_CSR_RZ: /* toward zero */
break;
case FPU_CSR_RU: /* toward +infinity */
if ((round || sticky) && !xs)
xm++;
break;
case FPU_CSR_RD: /* toward -infinity */
if ((round || sticky) && xs)
xm++;
break;
}
if (round || sticky)
ieee754_setcx(IEEE754_INEXACT);
ret = ieee754sp_flong(xm);
SPSIGN(ret) = xs;
return ret;
}
| linux-master | arch/mips/math-emu/sp_rint.c |
// SPDX-License-Identifier: GPL-2.0-only
/* IEEE754 floating point arithmetic
* single precision
*/
/*
* MIPS floating point support
* Copyright (C) 1994-2000 Algorithmics Ltd.
*/
#include "ieee754sp.h"
#include "ieee754dp.h"
static inline union ieee754sp ieee754sp_nan_fdp(int xs, u64 xm)
{
return buildsp(xs, SP_EMAX + 1 + SP_EBIAS,
xm >> (DP_FBITS - SP_FBITS));
}
union ieee754sp ieee754sp_fdp(union ieee754dp x)
{
union ieee754sp y;
u32 rm;
COMPXDP;
COMPYSP;
EXPLODEXDP;
ieee754_clearcx();
FLUSHXDP;
switch (xc) {
case IEEE754_CLASS_SNAN:
x = ieee754dp_nanxcpt(x);
EXPLODEXDP;
fallthrough;
case IEEE754_CLASS_QNAN:
y = ieee754sp_nan_fdp(xs, xm);
if (!ieee754_csr.nan2008) {
EXPLODEYSP;
if (!ieee754_class_nan(yc))
y = ieee754sp_indef();
}
return y;
case IEEE754_CLASS_INF:
return ieee754sp_inf(xs);
case IEEE754_CLASS_ZERO:
return ieee754sp_zero(xs);
case IEEE754_CLASS_DNORM:
/* can't possibly be sp representable */
ieee754_setcx(IEEE754_UNDERFLOW);
ieee754_setcx(IEEE754_INEXACT);
if ((ieee754_csr.rm == FPU_CSR_RU && !xs) ||
(ieee754_csr.rm == FPU_CSR_RD && xs))
return ieee754sp_mind(xs);
return ieee754sp_zero(xs);
case IEEE754_CLASS_NORM:
break;
}
/*
* Convert from DP_FBITS to SP_FBITS+3 with sticky right shift.
*/
rm = (xm >> (DP_FBITS - (SP_FBITS + 3))) |
((xm << (64 - (DP_FBITS - (SP_FBITS + 3)))) != 0);
return ieee754sp_format(xs, xe, rm);
}
| linux-master | arch/mips/math-emu/sp_fdp.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* IEEE754 floating point arithmetic
* single precision: MADDF.f (Fused Multiply Add)
* MADDF.fmt: FPR[fd] = FPR[fd] + (FPR[fs] x FPR[ft])
*
* MIPS floating point support
* Copyright (C) 2015 Imagination Technologies, Ltd.
* Author: Markos Chandras <[email protected]>
*/
#include "ieee754sp.h"
static union ieee754sp _sp_maddf(union ieee754sp z, union ieee754sp x,
union ieee754sp y, enum maddf_flags flags)
{
int re;
int rs;
unsigned int rm;
u64 rm64;
u64 zm64;
int s;
COMPXSP;
COMPYSP;
COMPZSP;
EXPLODEXSP;
EXPLODEYSP;
EXPLODEZSP;
FLUSHXSP;
FLUSHYSP;
FLUSHZSP;
ieee754_clearcx();
rs = xs ^ ys;
if (flags & MADDF_NEGATE_PRODUCT)
rs ^= 1;
if (flags & MADDF_NEGATE_ADDITION)
zs ^= 1;
/*
* Handle the cases when at least one of x, y or z is a NaN.
* Order of precedence is sNaN, qNaN and z, x, y.
*/
if (zc == IEEE754_CLASS_SNAN)
return ieee754sp_nanxcpt(z);
if (xc == IEEE754_CLASS_SNAN)
return ieee754sp_nanxcpt(x);
if (yc == IEEE754_CLASS_SNAN)
return ieee754sp_nanxcpt(y);
if (zc == IEEE754_CLASS_QNAN)
return z;
if (xc == IEEE754_CLASS_QNAN)
return x;
if (yc == IEEE754_CLASS_QNAN)
return y;
if (zc == IEEE754_CLASS_DNORM)
SPDNORMZ;
/* ZERO z cases are handled separately below */
switch (CLPAIR(xc, yc)) {
/*
* Infinity handling
*/
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_ZERO):
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_INF):
ieee754_setcx(IEEE754_INVALID_OPERATION);
return ieee754sp_indef();
case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_INF):
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_INF):
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_NORM):
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_DNORM):
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF):
if ((zc == IEEE754_CLASS_INF) && (zs != rs)) {
/*
* Cases of addition of infinities with opposite signs
* or subtraction of infinities with same signs.
*/
ieee754_setcx(IEEE754_INVALID_OPERATION);
return ieee754sp_indef();
}
/*
* z is here either not an infinity, or an infinity having the
* same sign as product (x*y). The result must be an infinity,
* and its sign is determined only by the sign of product (x*y).
*/
return ieee754sp_inf(rs);
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO):
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_NORM):
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_DNORM):
case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_ZERO):
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_ZERO):
if (zc == IEEE754_CLASS_INF)
return ieee754sp_inf(zs);
if (zc == IEEE754_CLASS_ZERO) {
/* Handle cases +0 + (-0) and similar ones. */
if (zs == rs)
/*
* Cases of addition of zeros of equal signs
* or subtraction of zeroes of opposite signs.
* The sign of the resulting zero is in any
* such case determined only by the sign of z.
*/
return z;
return ieee754sp_zero(ieee754_csr.rm == FPU_CSR_RD);
}
/* x*y is here 0, and z is not 0, so just return z */
return z;
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM):
SPDNORMX;
fallthrough;
case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_DNORM):
if (zc == IEEE754_CLASS_INF)
return ieee754sp_inf(zs);
SPDNORMY;
break;
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_NORM):
if (zc == IEEE754_CLASS_INF)
return ieee754sp_inf(zs);
SPDNORMX;
break;
case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_NORM):
if (zc == IEEE754_CLASS_INF)
return ieee754sp_inf(zs);
/* continue to real computations */
}
/* Finally get to do some computation */
/*
* Do the multiplication bit first
*
* rm = xm * ym, re = xe + ye basically
*
* At this point xm and ym should have been normalized.
*/
/* rm = xm * ym, re = xe+ye basically */
assert(xm & SP_HIDDEN_BIT);
assert(ym & SP_HIDDEN_BIT);
re = xe + ye;
/* Multiple 24 bit xm and ym to give 48 bit results */
rm64 = (uint64_t)xm * ym;
/* Shunt to top of word */
rm64 = rm64 << 16;
/* Put explicit bit at bit 62 if necessary */
if ((int64_t) rm64 < 0) {
rm64 = rm64 >> 1;
re++;
}
assert(rm64 & (1 << 62));
if (zc == IEEE754_CLASS_ZERO) {
/*
* Move explicit bit from bit 62 to bit 26 since the
* ieee754sp_format code expects the mantissa to be
* 27 bits wide (24 + 3 rounding bits).
*/
rm = XSPSRS64(rm64, (62 - 26));
return ieee754sp_format(rs, re, rm);
}
/* Move explicit bit from bit 23 to bit 62 */
zm64 = (uint64_t)zm << (62 - 23);
assert(zm64 & (1 << 62));
/* Make the exponents the same */
if (ze > re) {
/*
* Have to shift r fraction right to align.
*/
s = ze - re;
rm64 = XSPSRS64(rm64, s);
re += s;
} else if (re > ze) {
/*
* Have to shift z fraction right to align.
*/
s = re - ze;
zm64 = XSPSRS64(zm64, s);
ze += s;
}
assert(ze == re);
assert(ze <= SP_EMAX);
/* Do the addition */
if (zs == rs) {
/*
* Generate 64 bit result by adding two 63 bit numbers
* leaving result in zm64, zs and ze.
*/
zm64 = zm64 + rm64;
if ((int64_t)zm64 < 0) { /* carry out */
zm64 = XSPSRS1(zm64);
ze++;
}
} else {
if (zm64 >= rm64) {
zm64 = zm64 - rm64;
} else {
zm64 = rm64 - zm64;
zs = rs;
}
if (zm64 == 0)
return ieee754sp_zero(ieee754_csr.rm == FPU_CSR_RD);
/*
* Put explicit bit at bit 62 if necessary.
*/
while ((zm64 >> 62) == 0) {
zm64 <<= 1;
ze--;
}
}
/*
* Move explicit bit from bit 62 to bit 26 since the
* ieee754sp_format code expects the mantissa to be
* 27 bits wide (24 + 3 rounding bits).
*/
zm = XSPSRS64(zm64, (62 - 26));
return ieee754sp_format(zs, ze, zm);
}
union ieee754sp ieee754sp_maddf(union ieee754sp z, union ieee754sp x,
union ieee754sp y)
{
return _sp_maddf(z, x, y, 0);
}
union ieee754sp ieee754sp_msubf(union ieee754sp z, union ieee754sp x,
union ieee754sp y)
{
return _sp_maddf(z, x, y, MADDF_NEGATE_PRODUCT);
}
union ieee754sp ieee754sp_madd(union ieee754sp z, union ieee754sp x,
union ieee754sp y)
{
return _sp_maddf(z, x, y, 0);
}
union ieee754sp ieee754sp_msub(union ieee754sp z, union ieee754sp x,
union ieee754sp y)
{
return _sp_maddf(z, x, y, MADDF_NEGATE_ADDITION);
}
union ieee754sp ieee754sp_nmadd(union ieee754sp z, union ieee754sp x,
union ieee754sp y)
{
return _sp_maddf(z, x, y, MADDF_NEGATE_PRODUCT|MADDF_NEGATE_ADDITION);
}
union ieee754sp ieee754sp_nmsub(union ieee754sp z, union ieee754sp x,
union ieee754sp y)
{
return _sp_maddf(z, x, y, MADDF_NEGATE_PRODUCT);
}
| linux-master | arch/mips/math-emu/sp_maddf.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Some debug functions
*
* MIPS floating point support
*
* Copyright (C) 1994-2000 Algorithmics Ltd.
*
* Nov 7, 2000
* Modified to build and operate in Linux kernel environment.
*
* Kevin D. Kissell, [email protected] and Carsten Langgaard, [email protected]
* Copyright (C) 2000 MIPS Technologies, Inc. All rights reserved.
*/
#include <linux/types.h>
#include <linux/printk.h>
#include "ieee754.h"
#include "ieee754sp.h"
#include "ieee754dp.h"
union ieee754dp ieee754dp_dump(char *m, union ieee754dp x)
{
int i;
printk("%s", m);
printk("<%08x,%08x>\n", (unsigned) (x.bits >> 32),
(unsigned) x.bits);
printk("\t=");
switch (ieee754dp_class(x)) {
case IEEE754_CLASS_QNAN:
case IEEE754_CLASS_SNAN:
printk("Nan %c", DPSIGN(x) ? '-' : '+');
for (i = DP_FBITS - 1; i >= 0; i--)
printk("%c", DPMANT(x) & DP_MBIT(i) ? '1' : '0');
break;
case IEEE754_CLASS_INF:
printk("%cInfinity", DPSIGN(x) ? '-' : '+');
break;
case IEEE754_CLASS_ZERO:
printk("%cZero", DPSIGN(x) ? '-' : '+');
break;
case IEEE754_CLASS_DNORM:
printk("%c0.", DPSIGN(x) ? '-' : '+');
for (i = DP_FBITS - 1; i >= 0; i--)
printk("%c", DPMANT(x) & DP_MBIT(i) ? '1' : '0');
printk("e%d", DPBEXP(x) - DP_EBIAS);
break;
case IEEE754_CLASS_NORM:
printk("%c1.", DPSIGN(x) ? '-' : '+');
for (i = DP_FBITS - 1; i >= 0; i--)
printk("%c", DPMANT(x) & DP_MBIT(i) ? '1' : '0');
printk("e%d", DPBEXP(x) - DP_EBIAS);
break;
default:
printk("Illegal/Unknown IEEE754 value class");
}
printk("\n");
return x;
}
union ieee754sp ieee754sp_dump(char *m, union ieee754sp x)
{
int i;
printk("%s=", m);
printk("<%08x>\n", (unsigned) x.bits);
printk("\t=");
switch (ieee754sp_class(x)) {
case IEEE754_CLASS_QNAN:
case IEEE754_CLASS_SNAN:
printk("Nan %c", SPSIGN(x) ? '-' : '+');
for (i = SP_FBITS - 1; i >= 0; i--)
printk("%c", SPMANT(x) & SP_MBIT(i) ? '1' : '0');
break;
case IEEE754_CLASS_INF:
printk("%cInfinity", SPSIGN(x) ? '-' : '+');
break;
case IEEE754_CLASS_ZERO:
printk("%cZero", SPSIGN(x) ? '-' : '+');
break;
case IEEE754_CLASS_DNORM:
printk("%c0.", SPSIGN(x) ? '-' : '+');
for (i = SP_FBITS - 1; i >= 0; i--)
printk("%c", SPMANT(x) & SP_MBIT(i) ? '1' : '0');
printk("e%d", SPBEXP(x) - SP_EBIAS);
break;
case IEEE754_CLASS_NORM:
printk("%c1.", SPSIGN(x) ? '-' : '+');
for (i = SP_FBITS - 1; i >= 0; i--)
printk("%c", SPMANT(x) & SP_MBIT(i) ? '1' : '0');
printk("e%d", SPBEXP(x) - SP_EBIAS);
break;
default:
printk("Illegal/Unknown IEEE754 value class");
}
printk("\n");
return x;
}
| linux-master | arch/mips/math-emu/ieee754d.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* IEEE754 floating point arithmetic
* single precision: CLASS.f
* FPR[fd] = class(FPR[fs])
*
* MIPS floating point support
* Copyright (C) 2015 Imagination Technologies, Ltd.
* Author: Markos Chandras <[email protected]>
*/
#include "ieee754sp.h"
int ieee754sp_2008class(union ieee754sp x)
{
COMPXSP;
EXPLODEXSP;
/*
* 10 bit mask as follows:
*
* bit0 = SNAN
* bit1 = QNAN
* bit2 = -INF
* bit3 = -NORM
* bit4 = -DNORM
* bit5 = -ZERO
* bit6 = INF
* bit7 = NORM
* bit8 = DNORM
* bit9 = ZERO
*/
switch(xc) {
case IEEE754_CLASS_SNAN:
return 0x01;
case IEEE754_CLASS_QNAN:
return 0x02;
case IEEE754_CLASS_INF:
return 0x04 << (xs ? 0 : 4);
case IEEE754_CLASS_NORM:
return 0x08 << (xs ? 0 : 4);
case IEEE754_CLASS_DNORM:
return 0x10 << (xs ? 0 : 4);
case IEEE754_CLASS_ZERO:
return 0x20 << (xs ? 0 : 4);
default:
pr_err("Unknown class: %d\n", xc);
return 0;
}
}
| linux-master | arch/mips/math-emu/sp_2008class.c |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.