python_code
stringlengths 0
1.8M
| repo_name
stringclasses 7
values | file_path
stringlengths 5
99
|
---|---|---|
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Generation of main entry point for the guest, exception handling.
*
* Copyright (C) 2012 MIPS Technologies, Inc.
* Authors: Sanjay Lal <[email protected]>
*
* Copyright (C) 2016 Imagination Technologies Ltd.
*/
#include <linux/kvm_host.h>
#include <linux/log2.h>
#include <asm/mmu_context.h>
#include <asm/msa.h>
#include <asm/setup.h>
#include <asm/tlbex.h>
#include <asm/uasm.h>
/* Register names */
#define ZERO 0
#define AT 1
#define V0 2
#define V1 3
#define A0 4
#define A1 5
#if _MIPS_SIM == _MIPS_SIM_ABI32
#define T0 8
#define T1 9
#define T2 10
#define T3 11
#endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */
#if _MIPS_SIM == _MIPS_SIM_ABI64 || _MIPS_SIM == _MIPS_SIM_NABI32
#define T0 12
#define T1 13
#define T2 14
#define T3 15
#endif /* _MIPS_SIM == _MIPS_SIM_ABI64 || _MIPS_SIM == _MIPS_SIM_NABI32 */
#define S0 16
#define S1 17
#define T9 25
#define K0 26
#define K1 27
#define GP 28
#define SP 29
#define RA 31
/* Some CP0 registers */
#define C0_PWBASE 5, 5
#define C0_HWRENA 7, 0
#define C0_BADVADDR 8, 0
#define C0_BADINSTR 8, 1
#define C0_BADINSTRP 8, 2
#define C0_PGD 9, 7
#define C0_ENTRYHI 10, 0
#define C0_GUESTCTL1 10, 4
#define C0_STATUS 12, 0
#define C0_GUESTCTL0 12, 6
#define C0_CAUSE 13, 0
#define C0_EPC 14, 0
#define C0_EBASE 15, 1
#define C0_CONFIG5 16, 5
#define C0_DDATA_LO 28, 3
#define C0_ERROREPC 30, 0
#define CALLFRAME_SIZ 32
#ifdef CONFIG_64BIT
#define ST0_KX_IF_64 ST0_KX
#else
#define ST0_KX_IF_64 0
#endif
static unsigned int scratch_vcpu[2] = { C0_DDATA_LO };
static unsigned int scratch_tmp[2] = { C0_ERROREPC };
enum label_id {
label_fpu_1 = 1,
label_msa_1,
label_return_to_host,
label_kernel_asid,
label_exit_common,
};
UASM_L_LA(_fpu_1)
UASM_L_LA(_msa_1)
UASM_L_LA(_return_to_host)
UASM_L_LA(_kernel_asid)
UASM_L_LA(_exit_common)
static void *kvm_mips_build_enter_guest(void *addr);
static void *kvm_mips_build_ret_from_exit(void *addr);
static void *kvm_mips_build_ret_to_guest(void *addr);
static void *kvm_mips_build_ret_to_host(void *addr);
/*
* The version of this function in tlbex.c uses current_cpu_type(), but for KVM
* we assume symmetry.
*/
static int c0_kscratch(void)
{
return 31;
}
/**
* kvm_mips_entry_setup() - Perform global setup for entry code.
*
* Perform global setup for entry code, such as choosing a scratch register.
*
* Returns: 0 on success.
* -errno on failure.
*/
int kvm_mips_entry_setup(void)
{
/*
* We prefer to use KScratchN registers if they are available over the
* defaults above, which may not work on all cores.
*/
unsigned int kscratch_mask = cpu_data[0].kscratch_mask;
if (pgd_reg != -1)
kscratch_mask &= ~BIT(pgd_reg);
/* Pick a scratch register for storing VCPU */
if (kscratch_mask) {
scratch_vcpu[0] = c0_kscratch();
scratch_vcpu[1] = ffs(kscratch_mask) - 1;
kscratch_mask &= ~BIT(scratch_vcpu[1]);
}
/* Pick a scratch register to use as a temp for saving state */
if (kscratch_mask) {
scratch_tmp[0] = c0_kscratch();
scratch_tmp[1] = ffs(kscratch_mask) - 1;
kscratch_mask &= ~BIT(scratch_tmp[1]);
}
return 0;
}
static void kvm_mips_build_save_scratch(u32 **p, unsigned int tmp,
unsigned int frame)
{
/* Save the VCPU scratch register value in cp0_epc of the stack frame */
UASM_i_MFC0(p, tmp, scratch_vcpu[0], scratch_vcpu[1]);
UASM_i_SW(p, tmp, offsetof(struct pt_regs, cp0_epc), frame);
/* Save the temp scratch register value in cp0_cause of stack frame */
if (scratch_tmp[0] == c0_kscratch()) {
UASM_i_MFC0(p, tmp, scratch_tmp[0], scratch_tmp[1]);
UASM_i_SW(p, tmp, offsetof(struct pt_regs, cp0_cause), frame);
}
}
static void kvm_mips_build_restore_scratch(u32 **p, unsigned int tmp,
unsigned int frame)
{
/*
* Restore host scratch register values saved by
* kvm_mips_build_save_scratch().
*/
UASM_i_LW(p, tmp, offsetof(struct pt_regs, cp0_epc), frame);
UASM_i_MTC0(p, tmp, scratch_vcpu[0], scratch_vcpu[1]);
if (scratch_tmp[0] == c0_kscratch()) {
UASM_i_LW(p, tmp, offsetof(struct pt_regs, cp0_cause), frame);
UASM_i_MTC0(p, tmp, scratch_tmp[0], scratch_tmp[1]);
}
}
/**
* build_set_exc_base() - Assemble code to write exception base address.
* @p: Code buffer pointer.
* @reg: Source register (generated code may set WG bit in @reg).
*
* Assemble code to modify the exception base address in the EBase register,
* using the appropriately sized access and setting the WG bit if necessary.
*/
static inline void build_set_exc_base(u32 **p, unsigned int reg)
{
if (cpu_has_ebase_wg) {
/* Set WG so that all the bits get written */
uasm_i_ori(p, reg, reg, MIPS_EBASE_WG);
UASM_i_MTC0(p, reg, C0_EBASE);
} else {
uasm_i_mtc0(p, reg, C0_EBASE);
}
}
/**
* kvm_mips_build_vcpu_run() - Assemble function to start running a guest VCPU.
* @addr: Address to start writing code.
*
* Assemble the start of the vcpu_run function to run a guest VCPU. The function
* conforms to the following prototype:
*
* int vcpu_run(struct kvm_vcpu *vcpu);
*
* The exit from the guest and return to the caller is handled by the code
* generated by kvm_mips_build_ret_to_host().
*
* Returns: Next address after end of written function.
*/
void *kvm_mips_build_vcpu_run(void *addr)
{
u32 *p = addr;
unsigned int i;
/*
* A0: vcpu
*/
/* k0/k1 not being used in host kernel context */
UASM_i_ADDIU(&p, K1, SP, -(int)sizeof(struct pt_regs));
for (i = 16; i < 32; ++i) {
if (i == 24)
i = 28;
UASM_i_SW(&p, i, offsetof(struct pt_regs, regs[i]), K1);
}
/* Save host status */
uasm_i_mfc0(&p, V0, C0_STATUS);
UASM_i_SW(&p, V0, offsetof(struct pt_regs, cp0_status), K1);
/* Save scratch registers, will be used to store pointer to vcpu etc */
kvm_mips_build_save_scratch(&p, V1, K1);
/* VCPU scratch register has pointer to vcpu */
UASM_i_MTC0(&p, A0, scratch_vcpu[0], scratch_vcpu[1]);
/* Offset into vcpu->arch */
UASM_i_ADDIU(&p, K1, A0, offsetof(struct kvm_vcpu, arch));
/*
* Save the host stack to VCPU, used for exception processing
* when we exit from the Guest
*/
UASM_i_SW(&p, SP, offsetof(struct kvm_vcpu_arch, host_stack), K1);
/* Save the kernel gp as well */
UASM_i_SW(&p, GP, offsetof(struct kvm_vcpu_arch, host_gp), K1);
/*
* Setup status register for running the guest in UM, interrupts
* are disabled
*/
UASM_i_LA(&p, K0, ST0_EXL | KSU_USER | ST0_BEV | ST0_KX_IF_64);
uasm_i_mtc0(&p, K0, C0_STATUS);
uasm_i_ehb(&p);
/* load up the new EBASE */
UASM_i_LW(&p, K0, offsetof(struct kvm_vcpu_arch, guest_ebase), K1);
build_set_exc_base(&p, K0);
/*
* Now that the new EBASE has been loaded, unset BEV, set
* interrupt mask as it was but make sure that timer interrupts
* are enabled
*/
uasm_i_addiu(&p, K0, ZERO, ST0_EXL | KSU_USER | ST0_IE | ST0_KX_IF_64);
uasm_i_andi(&p, V0, V0, ST0_IM);
uasm_i_or(&p, K0, K0, V0);
uasm_i_mtc0(&p, K0, C0_STATUS);
uasm_i_ehb(&p);
p = kvm_mips_build_enter_guest(p);
return p;
}
/**
* kvm_mips_build_enter_guest() - Assemble code to resume guest execution.
* @addr: Address to start writing code.
*
* Assemble the code to resume guest execution. This code is common between the
* initial entry into the guest from the host, and returning from the exit
* handler back to the guest.
*
* Returns: Next address after end of written function.
*/
static void *kvm_mips_build_enter_guest(void *addr)
{
u32 *p = addr;
unsigned int i;
struct uasm_label labels[2];
struct uasm_reloc relocs[2];
struct uasm_label __maybe_unused *l = labels;
struct uasm_reloc __maybe_unused *r = relocs;
memset(labels, 0, sizeof(labels));
memset(relocs, 0, sizeof(relocs));
/* Set Guest EPC */
UASM_i_LW(&p, T0, offsetof(struct kvm_vcpu_arch, pc), K1);
UASM_i_MTC0(&p, T0, C0_EPC);
/* Save normal linux process pgd (VZ guarantees pgd_reg is set) */
if (cpu_has_ldpte)
UASM_i_MFC0(&p, K0, C0_PWBASE);
else
UASM_i_MFC0(&p, K0, c0_kscratch(), pgd_reg);
UASM_i_SW(&p, K0, offsetof(struct kvm_vcpu_arch, host_pgd), K1);
/*
* Set up KVM GPA pgd.
* This does roughly the same as TLBMISS_HANDLER_SETUP_PGD():
* - call tlbmiss_handler_setup_pgd(mm->pgd)
* - write mm->pgd into CP0_PWBase
*
* We keep S0 pointing at struct kvm so we can load the ASID below.
*/
UASM_i_LW(&p, S0, (int)offsetof(struct kvm_vcpu, kvm) -
(int)offsetof(struct kvm_vcpu, arch), K1);
UASM_i_LW(&p, A0, offsetof(struct kvm, arch.gpa_mm.pgd), S0);
UASM_i_LA(&p, T9, (unsigned long)tlbmiss_handler_setup_pgd);
uasm_i_jalr(&p, RA, T9);
/* delay slot */
if (cpu_has_htw)
UASM_i_MTC0(&p, A0, C0_PWBASE);
else
uasm_i_nop(&p);
/* Set GM bit to setup eret to VZ guest context */
uasm_i_addiu(&p, V1, ZERO, 1);
uasm_i_mfc0(&p, K0, C0_GUESTCTL0);
uasm_i_ins(&p, K0, V1, MIPS_GCTL0_GM_SHIFT, 1);
uasm_i_mtc0(&p, K0, C0_GUESTCTL0);
if (cpu_has_guestid) {
/*
* Set root mode GuestID, so that root TLB refill handler can
* use the correct GuestID in the root TLB.
*/
/* Get current GuestID */
uasm_i_mfc0(&p, T0, C0_GUESTCTL1);
/* Set GuestCtl1.RID = GuestCtl1.ID */
uasm_i_ext(&p, T1, T0, MIPS_GCTL1_ID_SHIFT,
MIPS_GCTL1_ID_WIDTH);
uasm_i_ins(&p, T0, T1, MIPS_GCTL1_RID_SHIFT,
MIPS_GCTL1_RID_WIDTH);
uasm_i_mtc0(&p, T0, C0_GUESTCTL1);
/* GuestID handles dealiasing so we don't need to touch ASID */
goto skip_asid_restore;
}
/* Root ASID Dealias (RAD) */
/* Save host ASID */
UASM_i_MFC0(&p, K0, C0_ENTRYHI);
UASM_i_SW(&p, K0, offsetof(struct kvm_vcpu_arch, host_entryhi),
K1);
/* Set the root ASID for the Guest */
UASM_i_ADDIU(&p, T1, S0,
offsetof(struct kvm, arch.gpa_mm.context.asid));
/* t1: contains the base of the ASID array, need to get the cpu id */
/* smp_processor_id */
uasm_i_lw(&p, T2, offsetof(struct thread_info, cpu), GP);
/* index the ASID array */
uasm_i_sll(&p, T2, T2, ilog2(sizeof(long)));
UASM_i_ADDU(&p, T3, T1, T2);
UASM_i_LW(&p, K0, 0, T3);
#ifdef CONFIG_MIPS_ASID_BITS_VARIABLE
/*
* reuse ASID array offset
* cpuinfo_mips is a multiple of sizeof(long)
*/
uasm_i_addiu(&p, T3, ZERO, sizeof(struct cpuinfo_mips)/sizeof(long));
uasm_i_mul(&p, T2, T2, T3);
UASM_i_LA_mostly(&p, AT, (long)&cpu_data[0].asid_mask);
UASM_i_ADDU(&p, AT, AT, T2);
UASM_i_LW(&p, T2, uasm_rel_lo((long)&cpu_data[0].asid_mask), AT);
uasm_i_and(&p, K0, K0, T2);
#else
uasm_i_andi(&p, K0, K0, MIPS_ENTRYHI_ASID);
#endif
/* Set up KVM VZ root ASID (!guestid) */
uasm_i_mtc0(&p, K0, C0_ENTRYHI);
skip_asid_restore:
uasm_i_ehb(&p);
/* Disable RDHWR access */
uasm_i_mtc0(&p, ZERO, C0_HWRENA);
/* load the guest context from VCPU and return */
for (i = 1; i < 32; ++i) {
/* Guest k0/k1 loaded later */
if (i == K0 || i == K1)
continue;
UASM_i_LW(&p, i, offsetof(struct kvm_vcpu_arch, gprs[i]), K1);
}
#ifndef CONFIG_CPU_MIPSR6
/* Restore hi/lo */
UASM_i_LW(&p, K0, offsetof(struct kvm_vcpu_arch, hi), K1);
uasm_i_mthi(&p, K0);
UASM_i_LW(&p, K0, offsetof(struct kvm_vcpu_arch, lo), K1);
uasm_i_mtlo(&p, K0);
#endif
/* Restore the guest's k0/k1 registers */
UASM_i_LW(&p, K0, offsetof(struct kvm_vcpu_arch, gprs[K0]), K1);
UASM_i_LW(&p, K1, offsetof(struct kvm_vcpu_arch, gprs[K1]), K1);
/* Jump to guest */
uasm_i_eret(&p);
uasm_resolve_relocs(relocs, labels);
return p;
}
/**
* kvm_mips_build_tlb_refill_exception() - Assemble TLB refill handler.
* @addr: Address to start writing code.
* @handler: Address of common handler (within range of @addr).
*
* Assemble TLB refill exception fast path handler for guest execution.
*
* Returns: Next address after end of written function.
*/
void *kvm_mips_build_tlb_refill_exception(void *addr, void *handler)
{
u32 *p = addr;
struct uasm_label labels[2];
struct uasm_reloc relocs[2];
#ifndef CONFIG_CPU_LOONGSON64
struct uasm_label *l = labels;
struct uasm_reloc *r = relocs;
#endif
memset(labels, 0, sizeof(labels));
memset(relocs, 0, sizeof(relocs));
/* Save guest k1 into scratch register */
UASM_i_MTC0(&p, K1, scratch_tmp[0], scratch_tmp[1]);
/* Get the VCPU pointer from the VCPU scratch register */
UASM_i_MFC0(&p, K1, scratch_vcpu[0], scratch_vcpu[1]);
/* Save guest k0 into VCPU structure */
UASM_i_SW(&p, K0, offsetof(struct kvm_vcpu, arch.gprs[K0]), K1);
/*
* Some of the common tlbex code uses current_cpu_type(). For KVM we
* assume symmetry and just disable preemption to silence the warning.
*/
preempt_disable();
#ifdef CONFIG_CPU_LOONGSON64
UASM_i_MFC0(&p, K1, C0_PGD);
uasm_i_lddir(&p, K0, K1, 3); /* global page dir */
#ifndef __PAGETABLE_PMD_FOLDED
uasm_i_lddir(&p, K1, K0, 1); /* middle page dir */
#endif
uasm_i_ldpte(&p, K1, 0); /* even */
uasm_i_ldpte(&p, K1, 1); /* odd */
uasm_i_tlbwr(&p);
#else
/*
* Now for the actual refill bit. A lot of this can be common with the
* Linux TLB refill handler, however we don't need to handle so many
* cases. We only need to handle user mode refills, and user mode runs
* with 32-bit addressing.
*
* Therefore the branch to label_vmalloc generated by build_get_pmde64()
* that isn't resolved should never actually get taken and is harmless
* to leave in place for now.
*/
#ifdef CONFIG_64BIT
build_get_pmde64(&p, &l, &r, K0, K1); /* get pmd in K1 */
#else
build_get_pgde32(&p, K0, K1); /* get pgd in K1 */
#endif
/* we don't support huge pages yet */
build_get_ptep(&p, K0, K1);
build_update_entries(&p, K0, K1);
build_tlb_write_entry(&p, &l, &r, tlb_random);
#endif
preempt_enable();
/* Get the VCPU pointer from the VCPU scratch register again */
UASM_i_MFC0(&p, K1, scratch_vcpu[0], scratch_vcpu[1]);
/* Restore the guest's k0/k1 registers */
UASM_i_LW(&p, K0, offsetof(struct kvm_vcpu, arch.gprs[K0]), K1);
uasm_i_ehb(&p);
UASM_i_MFC0(&p, K1, scratch_tmp[0], scratch_tmp[1]);
/* Jump to guest */
uasm_i_eret(&p);
return p;
}
/**
* kvm_mips_build_exception() - Assemble first level guest exception handler.
* @addr: Address to start writing code.
* @handler: Address of common handler (within range of @addr).
*
* Assemble exception vector code for guest execution. The generated vector will
* branch to the common exception handler generated by kvm_mips_build_exit().
*
* Returns: Next address after end of written function.
*/
void *kvm_mips_build_exception(void *addr, void *handler)
{
u32 *p = addr;
struct uasm_label labels[2];
struct uasm_reloc relocs[2];
struct uasm_label *l = labels;
struct uasm_reloc *r = relocs;
memset(labels, 0, sizeof(labels));
memset(relocs, 0, sizeof(relocs));
/* Save guest k1 into scratch register */
UASM_i_MTC0(&p, K1, scratch_tmp[0], scratch_tmp[1]);
/* Get the VCPU pointer from the VCPU scratch register */
UASM_i_MFC0(&p, K1, scratch_vcpu[0], scratch_vcpu[1]);
UASM_i_ADDIU(&p, K1, K1, offsetof(struct kvm_vcpu, arch));
/* Save guest k0 into VCPU structure */
UASM_i_SW(&p, K0, offsetof(struct kvm_vcpu_arch, gprs[K0]), K1);
/* Branch to the common handler */
uasm_il_b(&p, &r, label_exit_common);
uasm_i_nop(&p);
uasm_l_exit_common(&l, handler);
uasm_resolve_relocs(relocs, labels);
return p;
}
/**
* kvm_mips_build_exit() - Assemble common guest exit handler.
* @addr: Address to start writing code.
*
* Assemble the generic guest exit handling code. This is called by the
* exception vectors (generated by kvm_mips_build_exception()), and calls
* kvm_mips_handle_exit(), then either resumes the guest or returns to the host
* depending on the return value.
*
* Returns: Next address after end of written function.
*/
void *kvm_mips_build_exit(void *addr)
{
u32 *p = addr;
unsigned int i;
struct uasm_label labels[3];
struct uasm_reloc relocs[3];
struct uasm_label *l = labels;
struct uasm_reloc *r = relocs;
memset(labels, 0, sizeof(labels));
memset(relocs, 0, sizeof(relocs));
/*
* Generic Guest exception handler. We end up here when the guest
* does something that causes a trap to kernel mode.
*
* Both k0/k1 registers will have already been saved (k0 into the vcpu
* structure, and k1 into the scratch_tmp register).
*
* The k1 register will already contain the kvm_vcpu_arch pointer.
*/
/* Start saving Guest context to VCPU */
for (i = 0; i < 32; ++i) {
/* Guest k0/k1 saved later */
if (i == K0 || i == K1)
continue;
UASM_i_SW(&p, i, offsetof(struct kvm_vcpu_arch, gprs[i]), K1);
}
#ifndef CONFIG_CPU_MIPSR6
/* We need to save hi/lo and restore them on the way out */
uasm_i_mfhi(&p, T0);
UASM_i_SW(&p, T0, offsetof(struct kvm_vcpu_arch, hi), K1);
uasm_i_mflo(&p, T0);
UASM_i_SW(&p, T0, offsetof(struct kvm_vcpu_arch, lo), K1);
#endif
/* Finally save guest k1 to VCPU */
uasm_i_ehb(&p);
UASM_i_MFC0(&p, T0, scratch_tmp[0], scratch_tmp[1]);
UASM_i_SW(&p, T0, offsetof(struct kvm_vcpu_arch, gprs[K1]), K1);
/* Now that context has been saved, we can use other registers */
/* Restore vcpu */
UASM_i_MFC0(&p, S0, scratch_vcpu[0], scratch_vcpu[1]);
/*
* Save Host level EPC, BadVaddr and Cause to VCPU, useful to process
* the exception
*/
UASM_i_MFC0(&p, K0, C0_EPC);
UASM_i_SW(&p, K0, offsetof(struct kvm_vcpu_arch, pc), K1);
UASM_i_MFC0(&p, K0, C0_BADVADDR);
UASM_i_SW(&p, K0, offsetof(struct kvm_vcpu_arch, host_cp0_badvaddr),
K1);
uasm_i_mfc0(&p, K0, C0_CAUSE);
uasm_i_sw(&p, K0, offsetof(struct kvm_vcpu_arch, host_cp0_cause), K1);
if (cpu_has_badinstr) {
uasm_i_mfc0(&p, K0, C0_BADINSTR);
uasm_i_sw(&p, K0, offsetof(struct kvm_vcpu_arch,
host_cp0_badinstr), K1);
}
if (cpu_has_badinstrp) {
uasm_i_mfc0(&p, K0, C0_BADINSTRP);
uasm_i_sw(&p, K0, offsetof(struct kvm_vcpu_arch,
host_cp0_badinstrp), K1);
}
/* Now restore the host state just enough to run the handlers */
/* Switch EBASE to the one used by Linux */
/* load up the host EBASE */
uasm_i_mfc0(&p, V0, C0_STATUS);
uasm_i_lui(&p, AT, ST0_BEV >> 16);
uasm_i_or(&p, K0, V0, AT);
uasm_i_mtc0(&p, K0, C0_STATUS);
uasm_i_ehb(&p);
UASM_i_LA_mostly(&p, K0, (long)&ebase);
UASM_i_LW(&p, K0, uasm_rel_lo((long)&ebase), K0);
build_set_exc_base(&p, K0);
if (raw_cpu_has_fpu) {
/*
* If FPU is enabled, save FCR31 and clear it so that later
* ctc1's don't trigger FPE for pending exceptions.
*/
uasm_i_lui(&p, AT, ST0_CU1 >> 16);
uasm_i_and(&p, V1, V0, AT);
uasm_il_beqz(&p, &r, V1, label_fpu_1);
uasm_i_nop(&p);
uasm_i_cfc1(&p, T0, 31);
uasm_i_sw(&p, T0, offsetof(struct kvm_vcpu_arch, fpu.fcr31),
K1);
uasm_i_ctc1(&p, ZERO, 31);
uasm_l_fpu_1(&l, p);
}
if (cpu_has_msa) {
/*
* If MSA is enabled, save MSACSR and clear it so that later
* instructions don't trigger MSAFPE for pending exceptions.
*/
uasm_i_mfc0(&p, T0, C0_CONFIG5);
uasm_i_ext(&p, T0, T0, 27, 1); /* MIPS_CONF5_MSAEN */
uasm_il_beqz(&p, &r, T0, label_msa_1);
uasm_i_nop(&p);
uasm_i_cfcmsa(&p, T0, MSA_CSR);
uasm_i_sw(&p, T0, offsetof(struct kvm_vcpu_arch, fpu.msacsr),
K1);
uasm_i_ctcmsa(&p, MSA_CSR, ZERO);
uasm_l_msa_1(&l, p);
}
/* Restore host ASID */
if (!cpu_has_guestid) {
UASM_i_LW(&p, K0, offsetof(struct kvm_vcpu_arch, host_entryhi),
K1);
UASM_i_MTC0(&p, K0, C0_ENTRYHI);
}
/*
* Set up normal Linux process pgd.
* This does roughly the same as TLBMISS_HANDLER_SETUP_PGD():
* - call tlbmiss_handler_setup_pgd(mm->pgd)
* - write mm->pgd into CP0_PWBase
*/
UASM_i_LW(&p, A0,
offsetof(struct kvm_vcpu_arch, host_pgd), K1);
UASM_i_LA(&p, T9, (unsigned long)tlbmiss_handler_setup_pgd);
uasm_i_jalr(&p, RA, T9);
/* delay slot */
if (cpu_has_htw)
UASM_i_MTC0(&p, A0, C0_PWBASE);
else
uasm_i_nop(&p);
/* Clear GM bit so we don't enter guest mode when EXL is cleared */
uasm_i_mfc0(&p, K0, C0_GUESTCTL0);
uasm_i_ins(&p, K0, ZERO, MIPS_GCTL0_GM_SHIFT, 1);
uasm_i_mtc0(&p, K0, C0_GUESTCTL0);
/* Save GuestCtl0 so we can access GExcCode after CPU migration */
uasm_i_sw(&p, K0,
offsetof(struct kvm_vcpu_arch, host_cp0_guestctl0), K1);
if (cpu_has_guestid) {
/*
* Clear root mode GuestID, so that root TLB operations use the
* root GuestID in the root TLB.
*/
uasm_i_mfc0(&p, T0, C0_GUESTCTL1);
/* Set GuestCtl1.RID = MIPS_GCTL1_ROOT_GUESTID (i.e. 0) */
uasm_i_ins(&p, T0, ZERO, MIPS_GCTL1_RID_SHIFT,
MIPS_GCTL1_RID_WIDTH);
uasm_i_mtc0(&p, T0, C0_GUESTCTL1);
}
/* Now that the new EBASE has been loaded, unset BEV and KSU_USER */
uasm_i_addiu(&p, AT, ZERO, ~(ST0_EXL | KSU_USER | ST0_IE));
uasm_i_and(&p, V0, V0, AT);
uasm_i_lui(&p, AT, ST0_CU0 >> 16);
uasm_i_or(&p, V0, V0, AT);
#ifdef CONFIG_64BIT
uasm_i_ori(&p, V0, V0, ST0_SX | ST0_UX);
#endif
uasm_i_mtc0(&p, V0, C0_STATUS);
uasm_i_ehb(&p);
/* Load up host GP */
UASM_i_LW(&p, GP, offsetof(struct kvm_vcpu_arch, host_gp), K1);
/* Need a stack before we can jump to "C" */
UASM_i_LW(&p, SP, offsetof(struct kvm_vcpu_arch, host_stack), K1);
/* Saved host state */
UASM_i_ADDIU(&p, SP, SP, -(int)sizeof(struct pt_regs));
/*
* XXXKYMA do we need to load the host ASID, maybe not because the
* kernel entries are marked GLOBAL, need to verify
*/
/* Restore host scratch registers, as we'll have clobbered them */
kvm_mips_build_restore_scratch(&p, K0, SP);
/* Restore RDHWR access */
UASM_i_LA_mostly(&p, K0, (long)&hwrena);
uasm_i_lw(&p, K0, uasm_rel_lo((long)&hwrena), K0);
uasm_i_mtc0(&p, K0, C0_HWRENA);
/* Jump to handler */
/*
* XXXKYMA: not sure if this is safe, how large is the stack??
* Now jump to the kvm_mips_handle_exit() to see if we can deal
* with this in the kernel
*/
uasm_i_move(&p, A0, S0);
UASM_i_LA(&p, T9, (unsigned long)kvm_mips_handle_exit);
uasm_i_jalr(&p, RA, T9);
UASM_i_ADDIU(&p, SP, SP, -CALLFRAME_SIZ);
uasm_resolve_relocs(relocs, labels);
p = kvm_mips_build_ret_from_exit(p);
return p;
}
/**
* kvm_mips_build_ret_from_exit() - Assemble guest exit return handler.
* @addr: Address to start writing code.
*
* Assemble the code to handle the return from kvm_mips_handle_exit(), either
* resuming the guest or returning to the host depending on the return value.
*
* Returns: Next address after end of written function.
*/
static void *kvm_mips_build_ret_from_exit(void *addr)
{
u32 *p = addr;
struct uasm_label labels[2];
struct uasm_reloc relocs[2];
struct uasm_label *l = labels;
struct uasm_reloc *r = relocs;
memset(labels, 0, sizeof(labels));
memset(relocs, 0, sizeof(relocs));
/* Return from handler Make sure interrupts are disabled */
uasm_i_di(&p, ZERO);
uasm_i_ehb(&p);
/*
* XXXKYMA: k0/k1 could have been blown away if we processed
* an exception while we were handling the exception from the
* guest, reload k1
*/
uasm_i_move(&p, K1, S0);
UASM_i_ADDIU(&p, K1, K1, offsetof(struct kvm_vcpu, arch));
/*
* Check return value, should tell us if we are returning to the
* host (handle I/O etc)or resuming the guest
*/
uasm_i_andi(&p, T0, V0, RESUME_HOST);
uasm_il_bnez(&p, &r, T0, label_return_to_host);
uasm_i_nop(&p);
p = kvm_mips_build_ret_to_guest(p);
uasm_l_return_to_host(&l, p);
p = kvm_mips_build_ret_to_host(p);
uasm_resolve_relocs(relocs, labels);
return p;
}
/**
* kvm_mips_build_ret_to_guest() - Assemble code to return to the guest.
* @addr: Address to start writing code.
*
* Assemble the code to handle return from the guest exit handler
* (kvm_mips_handle_exit()) back to the guest.
*
* Returns: Next address after end of written function.
*/
static void *kvm_mips_build_ret_to_guest(void *addr)
{
u32 *p = addr;
/* Put the saved pointer to vcpu (s0) back into the scratch register */
UASM_i_MTC0(&p, S0, scratch_vcpu[0], scratch_vcpu[1]);
/* Load up the Guest EBASE to minimize the window where BEV is set */
UASM_i_LW(&p, T0, offsetof(struct kvm_vcpu_arch, guest_ebase), K1);
/* Switch EBASE back to the one used by KVM */
uasm_i_mfc0(&p, V1, C0_STATUS);
uasm_i_lui(&p, AT, ST0_BEV >> 16);
uasm_i_or(&p, K0, V1, AT);
uasm_i_mtc0(&p, K0, C0_STATUS);
uasm_i_ehb(&p);
build_set_exc_base(&p, T0);
/* Setup status register for running guest in UM */
uasm_i_ori(&p, V1, V1, ST0_EXL | KSU_USER | ST0_IE);
UASM_i_LA(&p, AT, ~(ST0_CU0 | ST0_MX | ST0_SX | ST0_UX));
uasm_i_and(&p, V1, V1, AT);
uasm_i_mtc0(&p, V1, C0_STATUS);
uasm_i_ehb(&p);
p = kvm_mips_build_enter_guest(p);
return p;
}
/**
* kvm_mips_build_ret_to_host() - Assemble code to return to the host.
* @addr: Address to start writing code.
*
* Assemble the code to handle return from the guest exit handler
* (kvm_mips_handle_exit()) back to the host, i.e. to the caller of the vcpu_run
* function generated by kvm_mips_build_vcpu_run().
*
* Returns: Next address after end of written function.
*/
static void *kvm_mips_build_ret_to_host(void *addr)
{
u32 *p = addr;
unsigned int i;
/* EBASE is already pointing to Linux */
UASM_i_LW(&p, K1, offsetof(struct kvm_vcpu_arch, host_stack), K1);
UASM_i_ADDIU(&p, K1, K1, -(int)sizeof(struct pt_regs));
/*
* r2/v0 is the return code, shift it down by 2 (arithmetic)
* to recover the err code
*/
uasm_i_sra(&p, K0, V0, 2);
uasm_i_move(&p, V0, K0);
/* Load context saved on the host stack */
for (i = 16; i < 31; ++i) {
if (i == 24)
i = 28;
UASM_i_LW(&p, i, offsetof(struct pt_regs, regs[i]), K1);
}
/* Restore RDHWR access */
UASM_i_LA_mostly(&p, K0, (long)&hwrena);
uasm_i_lw(&p, K0, uasm_rel_lo((long)&hwrena), K0);
uasm_i_mtc0(&p, K0, C0_HWRENA);
/* Restore RA, which is the address we will return to */
UASM_i_LW(&p, RA, offsetof(struct pt_regs, regs[RA]), K1);
uasm_i_jr(&p, RA);
uasm_i_nop(&p);
return p;
}
| linux-master | arch/mips/kvm/entry.c |
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* KVM/MIPS MMU handling in the KVM module.
*
* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
* Authors: Sanjay Lal <[email protected]>
*/
#include <linux/highmem.h>
#include <linux/kvm_host.h>
#include <linux/uaccess.h>
#include <asm/mmu_context.h>
#include <asm/pgalloc.h>
/*
* KVM_MMU_CACHE_MIN_PAGES is the number of GPA page table translation levels
* for which pages need to be cached.
*/
#if defined(__PAGETABLE_PMD_FOLDED)
#define KVM_MMU_CACHE_MIN_PAGES 1
#else
#define KVM_MMU_CACHE_MIN_PAGES 2
#endif
void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu)
{
kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_cache);
}
/**
* kvm_pgd_init() - Initialise KVM GPA page directory.
* @page: Pointer to page directory (PGD) for KVM GPA.
*
* Initialise a KVM GPA page directory with pointers to the invalid table, i.e.
* representing no mappings. This is similar to pgd_init(), however it
* initialises all the page directory pointers, not just the ones corresponding
* to the userland address space (since it is for the guest physical address
* space rather than a virtual address space).
*/
static void kvm_pgd_init(void *page)
{
unsigned long *p, *end;
unsigned long entry;
#ifdef __PAGETABLE_PMD_FOLDED
entry = (unsigned long)invalid_pte_table;
#else
entry = (unsigned long)invalid_pmd_table;
#endif
p = (unsigned long *)page;
end = p + PTRS_PER_PGD;
do {
p[0] = entry;
p[1] = entry;
p[2] = entry;
p[3] = entry;
p[4] = entry;
p += 8;
p[-3] = entry;
p[-2] = entry;
p[-1] = entry;
} while (p != end);
}
/**
* kvm_pgd_alloc() - Allocate and initialise a KVM GPA page directory.
*
* Allocate a blank KVM GPA page directory (PGD) for representing guest physical
* to host physical page mappings.
*
* Returns: Pointer to new KVM GPA page directory.
* NULL on allocation failure.
*/
pgd_t *kvm_pgd_alloc(void)
{
pgd_t *ret;
ret = (pgd_t *)__get_free_pages(GFP_KERNEL, PGD_TABLE_ORDER);
if (ret)
kvm_pgd_init(ret);
return ret;
}
/**
* kvm_mips_walk_pgd() - Walk page table with optional allocation.
* @pgd: Page directory pointer.
* @addr: Address to index page table using.
* @cache: MMU page cache to allocate new page tables from, or NULL.
*
* Walk the page tables pointed to by @pgd to find the PTE corresponding to the
* address @addr. If page tables don't exist for @addr, they will be created
* from the MMU cache if @cache is not NULL.
*
* Returns: Pointer to pte_t corresponding to @addr.
* NULL if a page table doesn't exist for @addr and !@cache.
* NULL if a page table allocation failed.
*/
static pte_t *kvm_mips_walk_pgd(pgd_t *pgd, struct kvm_mmu_memory_cache *cache,
unsigned long addr)
{
p4d_t *p4d;
pud_t *pud;
pmd_t *pmd;
pgd += pgd_index(addr);
if (pgd_none(*pgd)) {
/* Not used on MIPS yet */
BUG();
return NULL;
}
p4d = p4d_offset(pgd, addr);
pud = pud_offset(p4d, addr);
if (pud_none(*pud)) {
pmd_t *new_pmd;
if (!cache)
return NULL;
new_pmd = kvm_mmu_memory_cache_alloc(cache);
pmd_init(new_pmd);
pud_populate(NULL, pud, new_pmd);
}
pmd = pmd_offset(pud, addr);
if (pmd_none(*pmd)) {
pte_t *new_pte;
if (!cache)
return NULL;
new_pte = kvm_mmu_memory_cache_alloc(cache);
clear_page(new_pte);
pmd_populate_kernel(NULL, pmd, new_pte);
}
return pte_offset_kernel(pmd, addr);
}
/* Caller must hold kvm->mm_lock */
static pte_t *kvm_mips_pte_for_gpa(struct kvm *kvm,
struct kvm_mmu_memory_cache *cache,
unsigned long addr)
{
return kvm_mips_walk_pgd(kvm->arch.gpa_mm.pgd, cache, addr);
}
/*
* kvm_mips_flush_gpa_{pte,pmd,pud,pgd,pt}.
* Flush a range of guest physical address space from the VM's GPA page tables.
*/
static bool kvm_mips_flush_gpa_pte(pte_t *pte, unsigned long start_gpa,
unsigned long end_gpa)
{
int i_min = pte_index(start_gpa);
int i_max = pte_index(end_gpa);
bool safe_to_remove = (i_min == 0 && i_max == PTRS_PER_PTE - 1);
int i;
for (i = i_min; i <= i_max; ++i) {
if (!pte_present(pte[i]))
continue;
set_pte(pte + i, __pte(0));
}
return safe_to_remove;
}
static bool kvm_mips_flush_gpa_pmd(pmd_t *pmd, unsigned long start_gpa,
unsigned long end_gpa)
{
pte_t *pte;
unsigned long end = ~0ul;
int i_min = pmd_index(start_gpa);
int i_max = pmd_index(end_gpa);
bool safe_to_remove = (i_min == 0 && i_max == PTRS_PER_PMD - 1);
int i;
for (i = i_min; i <= i_max; ++i, start_gpa = 0) {
if (!pmd_present(pmd[i]))
continue;
pte = pte_offset_kernel(pmd + i, 0);
if (i == i_max)
end = end_gpa;
if (kvm_mips_flush_gpa_pte(pte, start_gpa, end)) {
pmd_clear(pmd + i);
pte_free_kernel(NULL, pte);
} else {
safe_to_remove = false;
}
}
return safe_to_remove;
}
static bool kvm_mips_flush_gpa_pud(pud_t *pud, unsigned long start_gpa,
unsigned long end_gpa)
{
pmd_t *pmd;
unsigned long end = ~0ul;
int i_min = pud_index(start_gpa);
int i_max = pud_index(end_gpa);
bool safe_to_remove = (i_min == 0 && i_max == PTRS_PER_PUD - 1);
int i;
for (i = i_min; i <= i_max; ++i, start_gpa = 0) {
if (!pud_present(pud[i]))
continue;
pmd = pmd_offset(pud + i, 0);
if (i == i_max)
end = end_gpa;
if (kvm_mips_flush_gpa_pmd(pmd, start_gpa, end)) {
pud_clear(pud + i);
pmd_free(NULL, pmd);
} else {
safe_to_remove = false;
}
}
return safe_to_remove;
}
static bool kvm_mips_flush_gpa_pgd(pgd_t *pgd, unsigned long start_gpa,
unsigned long end_gpa)
{
p4d_t *p4d;
pud_t *pud;
unsigned long end = ~0ul;
int i_min = pgd_index(start_gpa);
int i_max = pgd_index(end_gpa);
bool safe_to_remove = (i_min == 0 && i_max == PTRS_PER_PGD - 1);
int i;
for (i = i_min; i <= i_max; ++i, start_gpa = 0) {
if (!pgd_present(pgd[i]))
continue;
p4d = p4d_offset(pgd, 0);
pud = pud_offset(p4d + i, 0);
if (i == i_max)
end = end_gpa;
if (kvm_mips_flush_gpa_pud(pud, start_gpa, end)) {
pgd_clear(pgd + i);
pud_free(NULL, pud);
} else {
safe_to_remove = false;
}
}
return safe_to_remove;
}
/**
* kvm_mips_flush_gpa_pt() - Flush a range of guest physical addresses.
* @kvm: KVM pointer.
* @start_gfn: Guest frame number of first page in GPA range to flush.
* @end_gfn: Guest frame number of last page in GPA range to flush.
*
* Flushes a range of GPA mappings from the GPA page tables.
*
* The caller must hold the @kvm->mmu_lock spinlock.
*
* Returns: Whether its safe to remove the top level page directory because
* all lower levels have been removed.
*/
bool kvm_mips_flush_gpa_pt(struct kvm *kvm, gfn_t start_gfn, gfn_t end_gfn)
{
return kvm_mips_flush_gpa_pgd(kvm->arch.gpa_mm.pgd,
start_gfn << PAGE_SHIFT,
end_gfn << PAGE_SHIFT);
}
#define BUILD_PTE_RANGE_OP(name, op) \
static int kvm_mips_##name##_pte(pte_t *pte, unsigned long start, \
unsigned long end) \
{ \
int ret = 0; \
int i_min = pte_index(start); \
int i_max = pte_index(end); \
int i; \
pte_t old, new; \
\
for (i = i_min; i <= i_max; ++i) { \
if (!pte_present(pte[i])) \
continue; \
\
old = pte[i]; \
new = op(old); \
if (pte_val(new) == pte_val(old)) \
continue; \
set_pte(pte + i, new); \
ret = 1; \
} \
return ret; \
} \
\
/* returns true if anything was done */ \
static int kvm_mips_##name##_pmd(pmd_t *pmd, unsigned long start, \
unsigned long end) \
{ \
int ret = 0; \
pte_t *pte; \
unsigned long cur_end = ~0ul; \
int i_min = pmd_index(start); \
int i_max = pmd_index(end); \
int i; \
\
for (i = i_min; i <= i_max; ++i, start = 0) { \
if (!pmd_present(pmd[i])) \
continue; \
\
pte = pte_offset_kernel(pmd + i, 0); \
if (i == i_max) \
cur_end = end; \
\
ret |= kvm_mips_##name##_pte(pte, start, cur_end); \
} \
return ret; \
} \
\
static int kvm_mips_##name##_pud(pud_t *pud, unsigned long start, \
unsigned long end) \
{ \
int ret = 0; \
pmd_t *pmd; \
unsigned long cur_end = ~0ul; \
int i_min = pud_index(start); \
int i_max = pud_index(end); \
int i; \
\
for (i = i_min; i <= i_max; ++i, start = 0) { \
if (!pud_present(pud[i])) \
continue; \
\
pmd = pmd_offset(pud + i, 0); \
if (i == i_max) \
cur_end = end; \
\
ret |= kvm_mips_##name##_pmd(pmd, start, cur_end); \
} \
return ret; \
} \
\
static int kvm_mips_##name##_pgd(pgd_t *pgd, unsigned long start, \
unsigned long end) \
{ \
int ret = 0; \
p4d_t *p4d; \
pud_t *pud; \
unsigned long cur_end = ~0ul; \
int i_min = pgd_index(start); \
int i_max = pgd_index(end); \
int i; \
\
for (i = i_min; i <= i_max; ++i, start = 0) { \
if (!pgd_present(pgd[i])) \
continue; \
\
p4d = p4d_offset(pgd, 0); \
pud = pud_offset(p4d + i, 0); \
if (i == i_max) \
cur_end = end; \
\
ret |= kvm_mips_##name##_pud(pud, start, cur_end); \
} \
return ret; \
}
/*
* kvm_mips_mkclean_gpa_pt.
* Mark a range of guest physical address space clean (writes fault) in the VM's
* GPA page table to allow dirty page tracking.
*/
BUILD_PTE_RANGE_OP(mkclean, pte_mkclean)
/**
* kvm_mips_mkclean_gpa_pt() - Make a range of guest physical addresses clean.
* @kvm: KVM pointer.
* @start_gfn: Guest frame number of first page in GPA range to flush.
* @end_gfn: Guest frame number of last page in GPA range to flush.
*
* Make a range of GPA mappings clean so that guest writes will fault and
* trigger dirty page logging.
*
* The caller must hold the @kvm->mmu_lock spinlock.
*
* Returns: Whether any GPA mappings were modified, which would require
* derived mappings (GVA page tables & TLB enties) to be
* invalidated.
*/
int kvm_mips_mkclean_gpa_pt(struct kvm *kvm, gfn_t start_gfn, gfn_t end_gfn)
{
return kvm_mips_mkclean_pgd(kvm->arch.gpa_mm.pgd,
start_gfn << PAGE_SHIFT,
end_gfn << PAGE_SHIFT);
}
/**
* kvm_arch_mmu_enable_log_dirty_pt_masked() - write protect dirty pages
* @kvm: The KVM pointer
* @slot: The memory slot associated with mask
* @gfn_offset: The gfn offset in memory slot
* @mask: The mask of dirty pages at offset 'gfn_offset' in this memory
* slot to be write protected
*
* Walks bits set in mask write protects the associated pte's. Caller must
* acquire @kvm->mmu_lock.
*/
void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
struct kvm_memory_slot *slot,
gfn_t gfn_offset, unsigned long mask)
{
gfn_t base_gfn = slot->base_gfn + gfn_offset;
gfn_t start = base_gfn + __ffs(mask);
gfn_t end = base_gfn + __fls(mask);
kvm_mips_mkclean_gpa_pt(kvm, start, end);
}
/*
* kvm_mips_mkold_gpa_pt.
* Mark a range of guest physical address space old (all accesses fault) in the
* VM's GPA page table to allow detection of commonly used pages.
*/
BUILD_PTE_RANGE_OP(mkold, pte_mkold)
static int kvm_mips_mkold_gpa_pt(struct kvm *kvm, gfn_t start_gfn,
gfn_t end_gfn)
{
return kvm_mips_mkold_pgd(kvm->arch.gpa_mm.pgd,
start_gfn << PAGE_SHIFT,
end_gfn << PAGE_SHIFT);
}
bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
{
kvm_mips_flush_gpa_pt(kvm, range->start, range->end);
return true;
}
bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
{
gpa_t gpa = range->start << PAGE_SHIFT;
pte_t hva_pte = range->arg.pte;
pte_t *gpa_pte = kvm_mips_pte_for_gpa(kvm, NULL, gpa);
pte_t old_pte;
if (!gpa_pte)
return false;
/* Mapping may need adjusting depending on memslot flags */
old_pte = *gpa_pte;
if (range->slot->flags & KVM_MEM_LOG_DIRTY_PAGES && !pte_dirty(old_pte))
hva_pte = pte_mkclean(hva_pte);
else if (range->slot->flags & KVM_MEM_READONLY)
hva_pte = pte_wrprotect(hva_pte);
set_pte(gpa_pte, hva_pte);
/* Replacing an absent or old page doesn't need flushes */
if (!pte_present(old_pte) || !pte_young(old_pte))
return false;
/* Pages swapped, aged, moved, or cleaned require flushes */
return !pte_present(hva_pte) ||
!pte_young(hva_pte) ||
pte_pfn(old_pte) != pte_pfn(hva_pte) ||
(pte_dirty(old_pte) && !pte_dirty(hva_pte));
}
bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
{
return kvm_mips_mkold_gpa_pt(kvm, range->start, range->end);
}
bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
{
gpa_t gpa = range->start << PAGE_SHIFT;
pte_t *gpa_pte = kvm_mips_pte_for_gpa(kvm, NULL, gpa);
if (!gpa_pte)
return false;
return pte_young(*gpa_pte);
}
/**
* _kvm_mips_map_page_fast() - Fast path GPA fault handler.
* @vcpu: VCPU pointer.
* @gpa: Guest physical address of fault.
* @write_fault: Whether the fault was due to a write.
* @out_entry: New PTE for @gpa (written on success unless NULL).
* @out_buddy: New PTE for @gpa's buddy (written on success unless
* NULL).
*
* Perform fast path GPA fault handling, doing all that can be done without
* calling into KVM. This handles marking old pages young (for idle page
* tracking), and dirtying of clean pages (for dirty page logging).
*
* Returns: 0 on success, in which case we can update derived mappings and
* resume guest execution.
* -EFAULT on failure due to absent GPA mapping or write to
* read-only page, in which case KVM must be consulted.
*/
static int _kvm_mips_map_page_fast(struct kvm_vcpu *vcpu, unsigned long gpa,
bool write_fault,
pte_t *out_entry, pte_t *out_buddy)
{
struct kvm *kvm = vcpu->kvm;
gfn_t gfn = gpa >> PAGE_SHIFT;
pte_t *ptep;
kvm_pfn_t pfn = 0; /* silence bogus GCC warning */
bool pfn_valid = false;
int ret = 0;
spin_lock(&kvm->mmu_lock);
/* Fast path - just check GPA page table for an existing entry */
ptep = kvm_mips_pte_for_gpa(kvm, NULL, gpa);
if (!ptep || !pte_present(*ptep)) {
ret = -EFAULT;
goto out;
}
/* Track access to pages marked old */
if (!pte_young(*ptep)) {
set_pte(ptep, pte_mkyoung(*ptep));
pfn = pte_pfn(*ptep);
pfn_valid = true;
/* call kvm_set_pfn_accessed() after unlock */
}
if (write_fault && !pte_dirty(*ptep)) {
if (!pte_write(*ptep)) {
ret = -EFAULT;
goto out;
}
/* Track dirtying of writeable pages */
set_pte(ptep, pte_mkdirty(*ptep));
pfn = pte_pfn(*ptep);
mark_page_dirty(kvm, gfn);
kvm_set_pfn_dirty(pfn);
}
if (out_entry)
*out_entry = *ptep;
if (out_buddy)
*out_buddy = *ptep_buddy(ptep);
out:
spin_unlock(&kvm->mmu_lock);
if (pfn_valid)
kvm_set_pfn_accessed(pfn);
return ret;
}
/**
* kvm_mips_map_page() - Map a guest physical page.
* @vcpu: VCPU pointer.
* @gpa: Guest physical address of fault.
* @write_fault: Whether the fault was due to a write.
* @out_entry: New PTE for @gpa (written on success unless NULL).
* @out_buddy: New PTE for @gpa's buddy (written on success unless
* NULL).
*
* Handle GPA faults by creating a new GPA mapping (or updating an existing
* one).
*
* This takes care of marking pages young or dirty (idle/dirty page tracking),
* asking KVM for the corresponding PFN, and creating a mapping in the GPA page
* tables. Derived mappings (GVA page tables and TLBs) must be handled by the
* caller.
*
* Returns: 0 on success, in which case the caller may use the @out_entry
* and @out_buddy PTEs to update derived mappings and resume guest
* execution.
* -EFAULT if there is no memory region at @gpa or a write was
* attempted to a read-only memory region. This is usually handled
* as an MMIO access.
*/
static int kvm_mips_map_page(struct kvm_vcpu *vcpu, unsigned long gpa,
bool write_fault,
pte_t *out_entry, pte_t *out_buddy)
{
struct kvm *kvm = vcpu->kvm;
struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache;
gfn_t gfn = gpa >> PAGE_SHIFT;
int srcu_idx, err;
kvm_pfn_t pfn;
pte_t *ptep, entry, old_pte;
bool writeable;
unsigned long prot_bits;
unsigned long mmu_seq;
/* Try the fast path to handle old / clean pages */
srcu_idx = srcu_read_lock(&kvm->srcu);
err = _kvm_mips_map_page_fast(vcpu, gpa, write_fault, out_entry,
out_buddy);
if (!err)
goto out;
/* We need a minimum of cached pages ready for page table creation */
err = kvm_mmu_topup_memory_cache(memcache, KVM_MMU_CACHE_MIN_PAGES);
if (err)
goto out;
retry:
/*
* Used to check for invalidations in progress, of the pfn that is
* returned by pfn_to_pfn_prot below.
*/
mmu_seq = kvm->mmu_invalidate_seq;
/*
* Ensure the read of mmu_invalidate_seq isn't reordered with PTE reads
* in gfn_to_pfn_prot() (which calls get_user_pages()), so that we don't
* risk the page we get a reference to getting unmapped before we have a
* chance to grab the mmu_lock without mmu_invalidate_retry() noticing.
*
* This smp_rmb() pairs with the effective smp_wmb() of the combination
* of the pte_unmap_unlock() after the PTE is zapped, and the
* spin_lock() in kvm_mmu_notifier_invalidate_<page|range_end>() before
* mmu_invalidate_seq is incremented.
*/
smp_rmb();
/* Slow path - ask KVM core whether we can access this GPA */
pfn = gfn_to_pfn_prot(kvm, gfn, write_fault, &writeable);
if (is_error_noslot_pfn(pfn)) {
err = -EFAULT;
goto out;
}
spin_lock(&kvm->mmu_lock);
/* Check if an invalidation has taken place since we got pfn */
if (mmu_invalidate_retry(kvm, mmu_seq)) {
/*
* This can happen when mappings are changed asynchronously, but
* also synchronously if a COW is triggered by
* gfn_to_pfn_prot().
*/
spin_unlock(&kvm->mmu_lock);
kvm_release_pfn_clean(pfn);
goto retry;
}
/* Ensure page tables are allocated */
ptep = kvm_mips_pte_for_gpa(kvm, memcache, gpa);
/* Set up the PTE */
prot_bits = _PAGE_PRESENT | __READABLE | _page_cachable_default;
if (writeable) {
prot_bits |= _PAGE_WRITE;
if (write_fault) {
prot_bits |= __WRITEABLE;
mark_page_dirty(kvm, gfn);
kvm_set_pfn_dirty(pfn);
}
}
entry = pfn_pte(pfn, __pgprot(prot_bits));
/* Write the PTE */
old_pte = *ptep;
set_pte(ptep, entry);
err = 0;
if (out_entry)
*out_entry = *ptep;
if (out_buddy)
*out_buddy = *ptep_buddy(ptep);
spin_unlock(&kvm->mmu_lock);
kvm_release_pfn_clean(pfn);
kvm_set_pfn_accessed(pfn);
out:
srcu_read_unlock(&kvm->srcu, srcu_idx);
return err;
}
int kvm_mips_handle_vz_root_tlb_fault(unsigned long badvaddr,
struct kvm_vcpu *vcpu,
bool write_fault)
{
int ret;
ret = kvm_mips_map_page(vcpu, badvaddr, write_fault, NULL, NULL);
if (ret)
return ret;
/* Invalidate this entry in the TLB */
return kvm_vz_host_tlb_inv(vcpu, badvaddr);
}
/**
* kvm_mips_migrate_count() - Migrate timer.
* @vcpu: Virtual CPU.
*
* Migrate CP0_Count hrtimer to the current CPU by cancelling and restarting it
* if it was running prior to being cancelled.
*
* Must be called when the VCPU is migrated to a different CPU to ensure that
* timer expiry during guest execution interrupts the guest and causes the
* interrupt to be delivered in a timely manner.
*/
static void kvm_mips_migrate_count(struct kvm_vcpu *vcpu)
{
if (hrtimer_cancel(&vcpu->arch.comparecount_timer))
hrtimer_restart(&vcpu->arch.comparecount_timer);
}
/* Restore ASID once we are scheduled back after preemption */
void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
{
unsigned long flags;
kvm_debug("%s: vcpu %p, cpu: %d\n", __func__, vcpu, cpu);
local_irq_save(flags);
vcpu->cpu = cpu;
if (vcpu->arch.last_sched_cpu != cpu) {
kvm_debug("[%d->%d]KVM VCPU[%d] switch\n",
vcpu->arch.last_sched_cpu, cpu, vcpu->vcpu_id);
/*
* Migrate the timer interrupt to the current CPU so that it
* always interrupts the guest and synchronously triggers a
* guest timer interrupt.
*/
kvm_mips_migrate_count(vcpu);
}
/* restore guest state to registers */
kvm_mips_callbacks->vcpu_load(vcpu, cpu);
local_irq_restore(flags);
}
/* ASID can change if another task is scheduled during preemption */
void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
{
unsigned long flags;
int cpu;
local_irq_save(flags);
cpu = smp_processor_id();
vcpu->arch.last_sched_cpu = cpu;
vcpu->cpu = -1;
/* save guest state in registers */
kvm_mips_callbacks->vcpu_put(vcpu, cpu);
local_irq_restore(flags);
}
| linux-master | arch/mips/kvm/mmu.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Joshua Henderson <[email protected]>
* Copyright (C) 2015 Microchip Technology Inc. All rights reserved.
*/
#include <asm/io.h>
#include "early_pin.h"
#define PPS_BASE 0x1f800000
/* Input PPS Registers */
#define INT1R 0x1404
#define INT2R 0x1408
#define INT3R 0x140C
#define INT4R 0x1410
#define T2CKR 0x1418
#define T3CKR 0x141C
#define T4CKR 0x1420
#define T5CKR 0x1424
#define T6CKR 0x1428
#define T7CKR 0x142C
#define T8CKR 0x1430
#define T9CKR 0x1434
#define IC1R 0x1438
#define IC2R 0x143C
#define IC3R 0x1440
#define IC4R 0x1444
#define IC5R 0x1448
#define IC6R 0x144C
#define IC7R 0x1450
#define IC8R 0x1454
#define IC9R 0x1458
#define OCFAR 0x1460
#define U1RXR 0x1468
#define U1CTSR 0x146C
#define U2RXR 0x1470
#define U2CTSR 0x1474
#define U3RXR 0x1478
#define U3CTSR 0x147C
#define U4RXR 0x1480
#define U4CTSR 0x1484
#define U5RXR 0x1488
#define U5CTSR 0x148C
#define U6RXR 0x1490
#define U6CTSR 0x1494
#define SDI1R 0x149C
#define SS1R 0x14A0
#define SDI2R 0x14A8
#define SS2R 0x14AC
#define SDI3R 0x14B4
#define SS3R 0x14B8
#define SDI4R 0x14C0
#define SS4R 0x14C4
#define SDI5R 0x14CC
#define SS5R 0x14D0
#define SDI6R 0x14D8
#define SS6R 0x14DC
#define C1RXR 0x14E0
#define C2RXR 0x14E4
#define REFCLKI1R 0x14E8
#define REFCLKI3R 0x14F0
#define REFCLKI4R 0x14F4
static const struct
{
int function;
int reg;
} input_pin_reg[] = {
{ IN_FUNC_INT3, INT3R },
{ IN_FUNC_T2CK, T2CKR },
{ IN_FUNC_T6CK, T6CKR },
{ IN_FUNC_IC3, IC3R },
{ IN_FUNC_IC7, IC7R },
{ IN_FUNC_U1RX, U1RXR },
{ IN_FUNC_U2CTS, U2CTSR },
{ IN_FUNC_U5RX, U5RXR },
{ IN_FUNC_U6CTS, U6CTSR },
{ IN_FUNC_SDI1, SDI1R },
{ IN_FUNC_SDI3, SDI3R },
{ IN_FUNC_SDI5, SDI5R },
{ IN_FUNC_SS6, SS6R },
{ IN_FUNC_REFCLKI1, REFCLKI1R },
{ IN_FUNC_INT4, INT4R },
{ IN_FUNC_T5CK, T5CKR },
{ IN_FUNC_T7CK, T7CKR },
{ IN_FUNC_IC4, IC4R },
{ IN_FUNC_IC8, IC8R },
{ IN_FUNC_U3RX, U3RXR },
{ IN_FUNC_U4CTS, U4CTSR },
{ IN_FUNC_SDI2, SDI2R },
{ IN_FUNC_SDI4, SDI4R },
{ IN_FUNC_C1RX, C1RXR },
{ IN_FUNC_REFCLKI4, REFCLKI4R },
{ IN_FUNC_INT2, INT2R },
{ IN_FUNC_T3CK, T3CKR },
{ IN_FUNC_T8CK, T8CKR },
{ IN_FUNC_IC2, IC2R },
{ IN_FUNC_IC5, IC5R },
{ IN_FUNC_IC9, IC9R },
{ IN_FUNC_U1CTS, U1CTSR },
{ IN_FUNC_U2RX, U2RXR },
{ IN_FUNC_U5CTS, U5CTSR },
{ IN_FUNC_SS1, SS1R },
{ IN_FUNC_SS3, SS3R },
{ IN_FUNC_SS4, SS4R },
{ IN_FUNC_SS5, SS5R },
{ IN_FUNC_C2RX, C2RXR },
{ IN_FUNC_INT1, INT1R },
{ IN_FUNC_T4CK, T4CKR },
{ IN_FUNC_T9CK, T9CKR },
{ IN_FUNC_IC1, IC1R },
{ IN_FUNC_IC6, IC6R },
{ IN_FUNC_U3CTS, U3CTSR },
{ IN_FUNC_U4RX, U4RXR },
{ IN_FUNC_U6RX, U6RXR },
{ IN_FUNC_SS2, SS2R },
{ IN_FUNC_SDI6, SDI6R },
{ IN_FUNC_OCFA, OCFAR },
{ IN_FUNC_REFCLKI3, REFCLKI3R },
};
void pic32_pps_input(int function, int pin)
{
void __iomem *pps_base = ioremap(PPS_BASE, 0xF4);
int i;
for (i = 0; i < ARRAY_SIZE(input_pin_reg); i++) {
if (input_pin_reg[i].function == function) {
__raw_writel(pin, pps_base + input_pin_reg[i].reg);
return;
}
}
iounmap(pps_base);
}
/* Output PPS Registers */
#define RPA14R 0x1538
#define RPA15R 0x153C
#define RPB0R 0x1540
#define RPB1R 0x1544
#define RPB2R 0x1548
#define RPB3R 0x154C
#define RPB5R 0x1554
#define RPB6R 0x1558
#define RPB7R 0x155C
#define RPB8R 0x1560
#define RPB9R 0x1564
#define RPB10R 0x1568
#define RPB14R 0x1578
#define RPB15R 0x157C
#define RPC1R 0x1584
#define RPC2R 0x1588
#define RPC3R 0x158C
#define RPC4R 0x1590
#define RPC13R 0x15B4
#define RPC14R 0x15B8
#define RPD0R 0x15C0
#define RPD1R 0x15C4
#define RPD2R 0x15C8
#define RPD3R 0x15CC
#define RPD4R 0x15D0
#define RPD5R 0x15D4
#define RPD6R 0x15D8
#define RPD7R 0x15DC
#define RPD9R 0x15E4
#define RPD10R 0x15E8
#define RPD11R 0x15EC
#define RPD12R 0x15F0
#define RPD14R 0x15F8
#define RPD15R 0x15FC
#define RPE3R 0x160C
#define RPE5R 0x1614
#define RPE8R 0x1620
#define RPE9R 0x1624
#define RPF0R 0x1640
#define RPF1R 0x1644
#define RPF2R 0x1648
#define RPF3R 0x164C
#define RPF4R 0x1650
#define RPF5R 0x1654
#define RPF8R 0x1660
#define RPF12R 0x1670
#define RPF13R 0x1674
#define RPG0R 0x1680
#define RPG1R 0x1684
#define RPG6R 0x1698
#define RPG7R 0x169C
#define RPG8R 0x16A0
#define RPG9R 0x16A4
static const struct
{
int pin;
int reg;
} output_pin_reg[] = {
{ OUT_RPD2, RPD2R },
{ OUT_RPG8, RPG8R },
{ OUT_RPF4, RPF4R },
{ OUT_RPD10, RPD10R },
{ OUT_RPF1, RPF1R },
{ OUT_RPB9, RPB9R },
{ OUT_RPB10, RPB10R },
{ OUT_RPC14, RPC14R },
{ OUT_RPB5, RPB5R },
{ OUT_RPC1, RPC1R },
{ OUT_RPD14, RPD14R },
{ OUT_RPG1, RPG1R },
{ OUT_RPA14, RPA14R },
{ OUT_RPD6, RPD6R },
{ OUT_RPD3, RPD3R },
{ OUT_RPG7, RPG7R },
{ OUT_RPF5, RPF5R },
{ OUT_RPD11, RPD11R },
{ OUT_RPF0, RPF0R },
{ OUT_RPB1, RPB1R },
{ OUT_RPE5, RPE5R },
{ OUT_RPC13, RPC13R },
{ OUT_RPB3, RPB3R },
{ OUT_RPC4, RPC4R },
{ OUT_RPD15, RPD15R },
{ OUT_RPG0, RPG0R },
{ OUT_RPA15, RPA15R },
{ OUT_RPD7, RPD7R },
{ OUT_RPD9, RPD9R },
{ OUT_RPG6, RPG6R },
{ OUT_RPB8, RPB8R },
{ OUT_RPB15, RPB15R },
{ OUT_RPD4, RPD4R },
{ OUT_RPB0, RPB0R },
{ OUT_RPE3, RPE3R },
{ OUT_RPB7, RPB7R },
{ OUT_RPF12, RPF12R },
{ OUT_RPD12, RPD12R },
{ OUT_RPF8, RPF8R },
{ OUT_RPC3, RPC3R },
{ OUT_RPE9, RPE9R },
{ OUT_RPD1, RPD1R },
{ OUT_RPG9, RPG9R },
{ OUT_RPB14, RPB14R },
{ OUT_RPD0, RPD0R },
{ OUT_RPB6, RPB6R },
{ OUT_RPD5, RPD5R },
{ OUT_RPB2, RPB2R },
{ OUT_RPF3, RPF3R },
{ OUT_RPF13, RPF13R },
{ OUT_RPC2, RPC2R },
{ OUT_RPE8, RPE8R },
{ OUT_RPF2, RPF2R },
};
void pic32_pps_output(int function, int pin)
{
void __iomem *pps_base = ioremap(PPS_BASE, 0x170);
int i;
for (i = 0; i < ARRAY_SIZE(output_pin_reg); i++) {
if (output_pin_reg[i].pin == pin) {
__raw_writel(function,
pps_base + output_pin_reg[i].reg);
return;
}
}
iounmap(pps_base);
}
| linux-master | arch/mips/pic32/pic32mzda/early_pin.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Joshua Henderson <[email protected]>
* Copyright (C) 2015 Microchip Technology Inc. All rights reserved.
*/
#include <asm/mach-pic32/pic32.h>
#include <asm/fw/fw.h>
#include <asm/setup.h>
#include "pic32mzda.h"
#include "early_pin.h"
/* Default early console parameters */
#define EARLY_CONSOLE_PORT 1
#define EARLY_CONSOLE_BAUDRATE 115200
#define UART_ENABLE BIT(15)
#define UART_ENABLE_RX BIT(12)
#define UART_ENABLE_TX BIT(10)
#define UART_TX_FULL BIT(9)
/* UART1(x == 0) - UART6(x == 5) */
#define UART_BASE(x) ((x) * 0x0200)
#define U_MODE(x) UART_BASE(x)
#define U_STA(x) (UART_BASE(x) + 0x10)
#define U_TXR(x) (UART_BASE(x) + 0x20)
#define U_BRG(x) (UART_BASE(x) + 0x40)
static void __iomem *uart_base;
static int console_port = -1;
static int __init configure_uart_pins(int port)
{
switch (port) {
case 1:
pic32_pps_input(IN_FUNC_U2RX, IN_RPB0);
pic32_pps_output(OUT_FUNC_U2TX, OUT_RPG9);
break;
case 5:
pic32_pps_input(IN_FUNC_U6RX, IN_RPD0);
pic32_pps_output(OUT_FUNC_U6TX, OUT_RPB8);
break;
default:
return -1;
}
return 0;
}
static void __init configure_uart(int port, int baud)
{
u32 pbclk;
pbclk = pic32_get_pbclk(2);
__raw_writel(0, uart_base + U_MODE(port));
__raw_writel(((pbclk / baud) / 16) - 1, uart_base + U_BRG(port));
__raw_writel(UART_ENABLE, uart_base + U_MODE(port));
__raw_writel(UART_ENABLE_TX | UART_ENABLE_RX,
uart_base + PIC32_SET(U_STA(port)));
}
static void __init setup_early_console(int port, int baud)
{
if (configure_uart_pins(port))
return;
console_port = port;
configure_uart(console_port, baud);
}
static char * __init pic32_getcmdline(void)
{
/*
* arch_mem_init() has not been called yet, so we don't have a real
* command line setup if using CONFIG_CMDLINE_BOOL.
*/
#ifdef CONFIG_CMDLINE_OVERRIDE
return CONFIG_CMDLINE;
#else
return fw_getcmdline();
#endif
}
static int __init get_port_from_cmdline(char *arch_cmdline)
{
char *s;
int port = -1;
if (!arch_cmdline || *arch_cmdline == '\0')
goto _out;
s = strstr(arch_cmdline, "earlyprintk=");
if (s) {
s = strstr(s, "ttyS");
if (s)
s += 4;
else
goto _out;
port = (*s) - '0';
}
_out:
return port;
}
static int __init get_baud_from_cmdline(char *arch_cmdline)
{
char *s;
int baud = -1;
if (!arch_cmdline || *arch_cmdline == '\0')
goto _out;
s = strstr(arch_cmdline, "earlyprintk=");
if (s) {
s = strstr(s, "ttyS");
if (s)
s += 6;
else
goto _out;
baud = 0;
while (*s >= '0' && *s <= '9')
baud = baud * 10 + *s++ - '0';
}
_out:
return baud;
}
void __init fw_init_early_console(void)
{
char *arch_cmdline = pic32_getcmdline();
int baud, port;
uart_base = ioremap(PIC32_BASE_UART, 0xc00);
baud = get_baud_from_cmdline(arch_cmdline);
port = get_port_from_cmdline(arch_cmdline);
if (port == -1)
port = EARLY_CONSOLE_PORT;
if (baud == -1)
baud = EARLY_CONSOLE_BAUDRATE;
setup_early_console(port, baud);
}
void prom_putchar(char c)
{
if (console_port >= 0) {
while (__raw_readl(
uart_base + U_STA(console_port)) & UART_TX_FULL)
;
__raw_writel(c, uart_base + U_TXR(console_port));
}
}
| linux-master | arch/mips/pic32/pic32mzda/early_console.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Joshua Henderson, [email protected]
* Copyright (C) 2015 Microchip Technology Inc. All rights reserved.
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/of_address.h>
#include <linux/of_fdt.h>
#include <linux/of_platform.h>
#include <linux/platform_data/sdhci-pic32.h>
#include <asm/fw/fw.h>
#include <asm/mips-boards/generic.h>
#include <asm/prom.h>
#include "pic32mzda.h"
const char *get_system_type(void)
{
return "PIC32MZDA";
}
void __init plat_mem_setup(void)
{
void *dtb;
dtb = get_fdt();
if (!dtb) {
pr_err("pic32: no DTB found.\n");
return;
}
/*
* Load the builtin device tree. This causes the chosen node to be
* parsed resulting in our memory appearing.
*/
__dt_setup_arch(dtb);
pr_info("Found following command lines\n");
pr_info(" boot_command_line: %s\n", boot_command_line);
pr_info(" arcs_cmdline : %s\n", arcs_cmdline);
#ifdef CONFIG_CMDLINE_BOOL
pr_info(" builtin_cmdline : %s\n", CONFIG_CMDLINE);
#endif
if (dtb != __dtb_start)
strscpy(arcs_cmdline, boot_command_line, COMMAND_LINE_SIZE);
#ifdef CONFIG_EARLY_PRINTK
fw_init_early_console();
#endif
pic32_config_init();
}
static __init void pic32_init_cmdline(int argc, char *argv[])
{
unsigned int count = COMMAND_LINE_SIZE - 1;
int i;
char *dst = &(arcs_cmdline[0]);
char *src;
for (i = 1; i < argc && count; ++i) {
src = argv[i];
while (*src && count) {
*dst++ = *src++;
--count;
}
*dst++ = ' ';
}
if (i > 1)
--dst;
*dst = 0;
}
void __init prom_init(void)
{
pic32_init_cmdline((int)fw_arg0, (char **)fw_arg1);
}
static struct pic32_sdhci_platform_data sdhci_data = {
.setup_dma = pic32_set_sdhci_adma_fifo_threshold,
};
static struct of_dev_auxdata pic32_auxdata_lookup[] __initdata = {
OF_DEV_AUXDATA("microchip,pic32mzda-sdhci", 0, "sdhci", &sdhci_data),
{ /* sentinel */}
};
static int __init pic32_of_prepare_platform_data(struct of_dev_auxdata *lookup)
{
struct device_node *root, *np;
struct resource res;
root = of_find_node_by_path("/");
for (; lookup->compatible; lookup++) {
np = of_find_compatible_node(NULL, NULL, lookup->compatible);
if (np) {
lookup->name = (char *)np->name;
if (lookup->phys_addr) {
of_node_put(np);
continue;
}
if (!of_address_to_resource(np, 0, &res))
lookup->phys_addr = res.start;
of_node_put(np);
}
}
of_node_put(root);
return 0;
}
static int __init plat_of_setup(void)
{
if (!of_have_populated_dt())
panic("Device tree not present");
pic32_of_prepare_platform_data(pic32_auxdata_lookup);
if (of_platform_default_populate(NULL, pic32_auxdata_lookup, NULL))
panic("Failed to populate DT");
return 0;
}
arch_initcall(plat_of_setup);
| linux-master | arch/mips/pic32/pic32mzda/init.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Joshua Henderson <[email protected]>
* Copyright (C) 2015 Microchip Technology Inc. All rights reserved.
*/
#include <linux/clocksource.h>
#include <linux/init.h>
#include <linux/irqdomain.h>
#include <linux/of.h>
#include <linux/of_clk.h>
#include <linux/of_irq.h>
#include <asm/time.h>
#include "pic32mzda.h"
static const struct of_device_id pic32_infra_match[] = {
{ .compatible = "microchip,pic32mzda-infra", },
{ },
};
#define DEFAULT_CORE_TIMER_INTERRUPT 0
static unsigned int pic32_xlate_core_timer_irq(void)
{
struct device_node *node;
unsigned int irq;
node = of_find_matching_node(NULL, pic32_infra_match);
if (WARN_ON(!node))
goto default_map;
irq = irq_of_parse_and_map(node, 0);
of_node_put(node);
if (!irq)
goto default_map;
return irq;
default_map:
return irq_create_mapping(NULL, DEFAULT_CORE_TIMER_INTERRUPT);
}
unsigned int get_c0_compare_int(void)
{
return pic32_xlate_core_timer_irq();
}
void __init plat_time_init(void)
{
unsigned long rate = pic32_get_pbclk(7);
of_clk_init(NULL);
pr_info("CPU Clock: %ldMHz\n", rate / 1000000);
mips_hpt_frequency = rate / 2;
timer_probe();
}
| linux-master | arch/mips/pic32/pic32mzda/time.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Purna Chandra Mandal, [email protected]
* Copyright (C) 2015 Microchip Technology Inc. All rights reserved.
*/
#include <linux/init.h>
#include <linux/io.h>
#include <linux/spinlock.h>
#include <asm/mach-pic32/pic32.h>
#include "pic32mzda.h"
#define PIC32_CFGCON 0x0000
#define PIC32_DEVID 0x0020
#define PIC32_SYSKEY 0x0030
#define PIC32_CFGEBIA 0x00c0
#define PIC32_CFGEBIC 0x00d0
#define PIC32_CFGCON2 0x00f0
#define PIC32_RCON 0x1240
static void __iomem *pic32_conf_base;
static DEFINE_SPINLOCK(config_lock);
static u32 pic32_reset_status;
static u32 pic32_conf_get_reg_field(u32 offset, u32 rshift, u32 mask)
{
u32 v;
v = readl(pic32_conf_base + offset);
v >>= rshift;
v &= mask;
return v;
}
static u32 pic32_conf_modify_atomic(u32 offset, u32 mask, u32 set)
{
u32 v;
unsigned long flags;
spin_lock_irqsave(&config_lock, flags);
v = readl(pic32_conf_base + offset);
v &= ~mask;
v |= (set & mask);
writel(v, pic32_conf_base + offset);
spin_unlock_irqrestore(&config_lock, flags);
return 0;
}
int pic32_enable_lcd(void)
{
return pic32_conf_modify_atomic(PIC32_CFGCON2, BIT(31), BIT(31));
}
int pic32_disable_lcd(void)
{
return pic32_conf_modify_atomic(PIC32_CFGCON2, BIT(31), 0);
}
int pic32_set_lcd_mode(int mode)
{
u32 mask = mode ? BIT(30) : 0;
return pic32_conf_modify_atomic(PIC32_CFGCON2, BIT(30), mask);
}
int pic32_set_sdhci_adma_fifo_threshold(u32 rthrsh, u32 wthrsh)
{
u32 clr, set;
clr = (0x3ff << 4) | (0x3ff << 16);
set = (rthrsh << 4) | (wthrsh << 16);
return pic32_conf_modify_atomic(PIC32_CFGCON2, clr, set);
}
void pic32_syskey_unlock_debug(const char *func, const ulong line)
{
void __iomem *syskey = pic32_conf_base + PIC32_SYSKEY;
pr_debug("%s: called from %s:%lu\n", __func__, func, line);
writel(0x00000000, syskey);
writel(0xAA996655, syskey);
writel(0x556699AA, syskey);
}
static u32 pic32_get_device_id(void)
{
return pic32_conf_get_reg_field(PIC32_DEVID, 0, 0x0fffffff);
}
static u32 pic32_get_device_version(void)
{
return pic32_conf_get_reg_field(PIC32_DEVID, 28, 0xf);
}
u32 pic32_get_boot_status(void)
{
return pic32_reset_status;
}
EXPORT_SYMBOL(pic32_get_boot_status);
void __init pic32_config_init(void)
{
pic32_conf_base = ioremap(PIC32_BASE_CONFIG, 0x110);
if (!pic32_conf_base)
panic("pic32: config base not mapped");
/* Boot Status */
pic32_reset_status = readl(pic32_conf_base + PIC32_RCON);
writel(-1, PIC32_CLR(pic32_conf_base + PIC32_RCON));
/* Device Information */
pr_info("Device Id: 0x%08x, Device Ver: 0x%04x\n",
pic32_get_device_id(),
pic32_get_device_version());
}
| linux-master | arch/mips/pic32/pic32mzda/config.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Joshua Henderson <[email protected]>
* Copyright (C) 2015 Microchip Technology Inc. All rights reserved.
*/
#include <asm/mach-pic32/pic32.h>
#include "pic32mzda.h"
/* Oscillators, PLL & clocks */
#define ICLK_MASK 0x00000080
#define PLLDIV_MASK 0x00000007
#define CUROSC_MASK 0x00000007
#define PLLMUL_MASK 0x0000007F
#define PB_MASK 0x00000007
#define FRC1 0
#define FRC2 7
#define SPLL 1
#define POSC 2
#define FRC_CLK 8000000
#define PIC32_POSC_FREQ 24000000
#define OSCCON 0x0000
#define SPLLCON 0x0020
#define PB1DIV 0x0140
u32 pic32_get_sysclk(void)
{
u32 osc_freq = 0;
u32 pllclk;
u32 frcdivn;
u32 osccon;
u32 spllcon;
int curr_osc;
u32 plliclk;
u32 pllidiv;
u32 pllodiv;
u32 pllmult;
u32 frcdiv;
void __iomem *osc_base = ioremap(PIC32_BASE_OSC, 0x200);
osccon = __raw_readl(osc_base + OSCCON);
spllcon = __raw_readl(osc_base + SPLLCON);
plliclk = (spllcon & ICLK_MASK);
pllidiv = ((spllcon >> 8) & PLLDIV_MASK) + 1;
pllodiv = ((spllcon >> 24) & PLLDIV_MASK);
pllmult = ((spllcon >> 16) & PLLMUL_MASK) + 1;
frcdiv = ((osccon >> 24) & PLLDIV_MASK);
pllclk = plliclk ? FRC_CLK : PIC32_POSC_FREQ;
frcdivn = ((1 << frcdiv) + 1) + (128 * (frcdiv == 7));
if (pllodiv < 2)
pllodiv = 2;
else if (pllodiv < 5)
pllodiv = (1 << pllodiv);
else
pllodiv = 32;
curr_osc = (int)((osccon >> 12) & CUROSC_MASK);
switch (curr_osc) {
case FRC1:
case FRC2:
osc_freq = FRC_CLK / frcdivn;
break;
case SPLL:
osc_freq = ((pllclk / pllidiv) * pllmult) / pllodiv;
break;
case POSC:
osc_freq = PIC32_POSC_FREQ;
break;
default:
break;
}
iounmap(osc_base);
return osc_freq;
}
u32 pic32_get_pbclk(int bus)
{
u32 clk_freq;
void __iomem *osc_base = ioremap(PIC32_BASE_OSC, 0x200);
u32 pbxdiv = PB1DIV + ((bus - 1) * 0x10);
u32 pbdiv = (__raw_readl(osc_base + pbxdiv) & PB_MASK) + 1;
iounmap(osc_base);
clk_freq = pic32_get_sysclk();
return clk_freq / pbdiv;
}
| linux-master | arch/mips/pic32/pic32mzda/early_clk.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Joshua Henderson <[email protected]>
* Copyright (C) 2015 Microchip Technology Inc. All rights reserved.
*/
#include <linux/init.h>
#include <linux/irqchip.h>
#include <asm/irq.h>
void __init arch_init_irq(void)
{
irqchip_init();
}
| linux-master | arch/mips/pic32/common/irq.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Joshua Henderson <[email protected]>
* Copyright (C) 2015 Microchip Technology Inc. All rights reserved.
*/
#include <linux/init.h>
#include <linux/pm.h>
#include <asm/reboot.h>
#include <asm/mach-pic32/pic32.h>
#define PIC32_RSWRST 0x10
static void pic32_halt(void)
{
while (1) {
__asm__(".set push;\n"
".set arch=r4000;\n"
"wait;\n"
".set pop;\n"
);
}
}
static void pic32_machine_restart(char *command)
{
void __iomem *reg =
ioremap(PIC32_BASE_RESET + PIC32_RSWRST, sizeof(u32));
pic32_syskey_unlock();
/* magic write/read */
__raw_writel(1, reg);
(void)__raw_readl(reg);
pic32_halt();
}
static void pic32_machine_halt(void)
{
local_irq_disable();
pic32_halt();
}
static int __init mips_reboot_setup(void)
{
_machine_restart = pic32_machine_restart;
_machine_halt = pic32_machine_halt;
pm_power_off = pic32_machine_halt;
return 0;
}
arch_initcall(mips_reboot_setup);
| linux-master | arch/mips/pic32/common/reset.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Bus error event handling code for DECstation/DECsystem 3100
* and 2100 (KN01) systems equipped with parity error detection
* logic.
*
* Copyright (c) 2005 Maciej W. Rozycki
*/
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/spinlock.h>
#include <linux/types.h>
#include <asm/inst.h>
#include <asm/irq_regs.h>
#include <asm/mipsregs.h>
#include <asm/page.h>
#include <asm/ptrace.h>
#include <asm/traps.h>
#include <linux/uaccess.h>
#include <asm/dec/kn01.h>
/* CP0 hazard avoidance. */
#define BARRIER \
__asm__ __volatile__( \
".set push\n\t" \
".set noreorder\n\t" \
"nop\n\t" \
".set pop\n\t")
/*
* Bits 7:0 of the Control Register are write-only -- the
* corresponding bits of the Status Register have a different
* meaning. Hence we use a cache. It speeds up things a bit
* as well.
*
* There is no default value -- it has to be initialized.
*/
u16 cached_kn01_csr;
static DEFINE_RAW_SPINLOCK(kn01_lock);
static inline void dec_kn01_be_ack(void)
{
volatile u16 *csr = (void *)CKSEG1ADDR(KN01_SLOT_BASE + KN01_CSR);
unsigned long flags;
raw_spin_lock_irqsave(&kn01_lock, flags);
*csr = cached_kn01_csr | KN01_CSR_MEMERR; /* Clear bus IRQ. */
iob();
raw_spin_unlock_irqrestore(&kn01_lock, flags);
}
static int dec_kn01_be_backend(struct pt_regs *regs, int is_fixup, int invoker)
{
volatile u32 *kn01_erraddr = (void *)CKSEG1ADDR(KN01_SLOT_BASE +
KN01_ERRADDR);
static const char excstr[] = "exception";
static const char intstr[] = "interrupt";
static const char cpustr[] = "CPU";
static const char mreadstr[] = "memory read";
static const char readstr[] = "read";
static const char writestr[] = "write";
static const char timestr[] = "timeout";
static const char paritystr[] = "parity error";
int data = regs->cp0_cause & 4;
unsigned int __user *pc = (unsigned int __user *)regs->cp0_epc +
((regs->cp0_cause & CAUSEF_BD) != 0);
union mips_instruction insn;
unsigned long entrylo, offset;
long asid, entryhi, vaddr;
const char *kind, *agent, *cycle, *event;
unsigned long address;
u32 erraddr = *kn01_erraddr;
int action = MIPS_BE_FATAL;
/* Ack ASAP, so that any subsequent errors get caught. */
dec_kn01_be_ack();
kind = invoker ? intstr : excstr;
agent = cpustr;
if (invoker)
address = erraddr;
else {
/* Bloody hardware doesn't record the address for reads... */
if (data) {
/* This never faults. */
__get_user(insn.word, pc);
vaddr = regs->regs[insn.i_format.rs] +
insn.i_format.simmediate;
} else
vaddr = (long)pc;
if (KSEGX(vaddr) == CKSEG0 || KSEGX(vaddr) == CKSEG1)
address = CPHYSADDR(vaddr);
else {
/* Peek at what physical address the CPU used. */
asid = read_c0_entryhi();
entryhi = asid & (PAGE_SIZE - 1);
entryhi |= vaddr & ~(PAGE_SIZE - 1);
write_c0_entryhi(entryhi);
BARRIER;
tlb_probe();
/* No need to check for presence. */
tlb_read();
entrylo = read_c0_entrylo0();
write_c0_entryhi(asid);
offset = vaddr & (PAGE_SIZE - 1);
address = (entrylo & ~(PAGE_SIZE - 1)) | offset;
}
}
/* Treat low 256MB as memory, high -- as I/O. */
if (address < 0x10000000) {
cycle = mreadstr;
event = paritystr;
} else {
cycle = invoker ? writestr : readstr;
event = timestr;
}
if (is_fixup)
action = MIPS_BE_FIXUP;
if (action != MIPS_BE_FIXUP)
printk(KERN_ALERT "Bus error %s: %s %s %s at %#010lx\n",
kind, agent, cycle, event, address);
return action;
}
int dec_kn01_be_handler(struct pt_regs *regs, int is_fixup)
{
return dec_kn01_be_backend(regs, is_fixup, 0);
}
irqreturn_t dec_kn01_be_interrupt(int irq, void *dev_id)
{
volatile u16 *csr = (void *)CKSEG1ADDR(KN01_SLOT_BASE + KN01_CSR);
struct pt_regs *regs = get_irq_regs();
int action;
if (!(*csr & KN01_CSR_MEMERR))
return IRQ_NONE; /* Must have been video. */
action = dec_kn01_be_backend(regs, 0, 1);
if (action == MIPS_BE_DISCARD)
return IRQ_HANDLED;
/*
* FIXME: Find the affected processes and kill them, otherwise
* we must die.
*
* The interrupt is asynchronously delivered thus EPC and RA
* may be irrelevant, but are printed for a reference.
*/
printk(KERN_ALERT "Fatal bus interrupt, epc == %08lx, ra == %08lx\n",
regs->cp0_epc, regs->regs[31]);
die("Unrecoverable bus error", regs);
}
void __init dec_kn01_be_init(void)
{
volatile u16 *csr = (void *)CKSEG1ADDR(KN01_SLOT_BASE + KN01_CSR);
unsigned long flags;
raw_spin_lock_irqsave(&kn01_lock, flags);
/* Preset write-only bits of the Control Register cache. */
cached_kn01_csr = *csr;
cached_kn01_csr &= KN01_CSR_STATUS | KN01_CSR_PARDIS | KN01_CSR_TXDIS;
cached_kn01_csr |= KN01_CSR_LEDS;
/* Enable parity error detection. */
cached_kn01_csr &= ~KN01_CSR_PARDIS;
*csr = cached_kn01_csr;
iob();
raw_spin_unlock_irqrestore(&kn01_lock, flags);
/* Clear any leftover errors from the firmware. */
dec_kn01_be_ack();
}
| linux-master | arch/mips/dec/kn01-berr.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Bus error event handling code for 5000-series systems equipped
* with parity error detection logic, i.e. DECstation/DECsystem
* 5000/120, /125, /133 (KN02-BA), 5000/150 (KN04-BA) and Personal
* DECstation/DECsystem 5000/20, /25, /33 (KN02-CA), 5000/50
* (KN04-CA) systems.
*
* Copyright (c) 2005 Maciej W. Rozycki
*/
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <asm/addrspace.h>
#include <asm/cpu-type.h>
#include <asm/irq_regs.h>
#include <asm/ptrace.h>
#include <asm/traps.h>
#include <asm/dec/kn02ca.h>
#include <asm/dec/kn02xa.h>
#include <asm/dec/kn05.h>
static inline void dec_kn02xa_be_ack(void)
{
volatile u32 *mer = (void *)CKSEG1ADDR(KN02XA_MER);
volatile u32 *mem_intr = (void *)CKSEG1ADDR(KN02XA_MEM_INTR);
*mer = KN02CA_MER_INTR; /* Clear errors; keep the ARC IRQ. */
*mem_intr = 0; /* Any write clears the bus IRQ. */
iob();
}
static int dec_kn02xa_be_backend(struct pt_regs *regs, int is_fixup,
int invoker)
{
volatile u32 *kn02xa_mer = (void *)CKSEG1ADDR(KN02XA_MER);
volatile u32 *kn02xa_ear = (void *)CKSEG1ADDR(KN02XA_EAR);
static const char excstr[] = "exception";
static const char intstr[] = "interrupt";
static const char cpustr[] = "CPU";
static const char mreadstr[] = "memory read";
static const char readstr[] = "read";
static const char writestr[] = "write";
static const char timestr[] = "timeout";
static const char paritystr[] = "parity error";
static const char lanestat[][4] = { " OK", "BAD" };
const char *kind, *agent, *cycle, *event;
unsigned long address;
u32 mer = *kn02xa_mer;
u32 ear = *kn02xa_ear;
int action = MIPS_BE_FATAL;
/* Ack ASAP, so that any subsequent errors get caught. */
dec_kn02xa_be_ack();
kind = invoker ? intstr : excstr;
/* No DMA errors? */
agent = cpustr;
address = ear & KN02XA_EAR_ADDRESS;
/* Low 256MB is decoded as memory, high -- as TC. */
if (address < 0x10000000) {
cycle = mreadstr;
event = paritystr;
} else {
cycle = invoker ? writestr : readstr;
event = timestr;
}
if (is_fixup)
action = MIPS_BE_FIXUP;
if (action != MIPS_BE_FIXUP)
printk(KERN_ALERT "Bus error %s: %s %s %s at %#010lx\n",
kind, agent, cycle, event, address);
if (action != MIPS_BE_FIXUP && address < 0x10000000)
printk(KERN_ALERT " Byte lane status %#3x -- "
"#3: %s, #2: %s, #1: %s, #0: %s\n",
(mer & KN02XA_MER_BYTERR) >> 8,
lanestat[(mer & KN02XA_MER_BYTERR_3) != 0],
lanestat[(mer & KN02XA_MER_BYTERR_2) != 0],
lanestat[(mer & KN02XA_MER_BYTERR_1) != 0],
lanestat[(mer & KN02XA_MER_BYTERR_0) != 0]);
return action;
}
int dec_kn02xa_be_handler(struct pt_regs *regs, int is_fixup)
{
return dec_kn02xa_be_backend(regs, is_fixup, 0);
}
irqreturn_t dec_kn02xa_be_interrupt(int irq, void *dev_id)
{
struct pt_regs *regs = get_irq_regs();
int action = dec_kn02xa_be_backend(regs, 0, 1);
if (action == MIPS_BE_DISCARD)
return IRQ_HANDLED;
/*
* FIXME: Find the affected processes and kill them, otherwise
* we must die.
*
* The interrupt is asynchronously delivered thus EPC and RA
* may be irrelevant, but are printed for a reference.
*/
printk(KERN_ALERT "Fatal bus interrupt, epc == %08lx, ra == %08lx\n",
regs->cp0_epc, regs->regs[31]);
die("Unrecoverable bus error", regs);
}
void __init dec_kn02xa_be_init(void)
{
volatile u32 *mbcs = (void *)CKSEG1ADDR(KN4K_SLOT_BASE + KN4K_MB_CSR);
/* For KN04 we need to make sure EE (?) is enabled in the MB. */
if (current_cpu_type() == CPU_R4000SC)
*mbcs |= KN4K_MB_CSR_EE;
fast_iob();
/* Clear any leftover errors from the firmware. */
dec_kn02xa_be_ack();
}
| linux-master | arch/mips/dec/kn02xa-berr.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* DEC platform devices.
*
* Copyright (c) 2014 Maciej W. Rozycki
*/
#include <linux/ioport.h>
#include <linux/kernel.h>
#include <linux/mc146818rtc.h>
#include <linux/platform_device.h>
static struct resource dec_rtc_resources[] = {
{
.name = "rtc",
.flags = IORESOURCE_MEM,
},
};
static struct cmos_rtc_board_info dec_rtc_info = {
.flags = CMOS_RTC_FLAGS_NOFREQ,
.address_space = 64,
};
static struct platform_device dec_rtc_device = {
.name = "rtc_cmos",
.id = PLATFORM_DEVID_NONE,
.dev.platform_data = &dec_rtc_info,
.resource = dec_rtc_resources,
.num_resources = ARRAY_SIZE(dec_rtc_resources),
};
static int __init dec_add_devices(void)
{
dec_rtc_resources[0].start = RTC_PORT(0);
dec_rtc_resources[0].end = RTC_PORT(0) + dec_kn_slot_size - 1;
return platform_device_register(&dec_rtc_device);
}
device_initcall(dec_add_devices);
| linux-master | arch/mips/dec/platform.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Reset a DECstation machine.
*
* Copyright (C) 199x the Anonymous
* Copyright (C) 2001, 2002, 2003 Maciej W. Rozycki
*/
#include <linux/interrupt.h>
#include <linux/linkage.h>
#include <asm/addrspace.h>
typedef void __noreturn (* noret_func_t)(void);
static inline void __noreturn back_to_prom(void)
{
noret_func_t func = (void *)CKSEG1ADDR(0x1fc00000);
func();
}
void __noreturn dec_machine_restart(char *command)
{
back_to_prom();
}
void __noreturn dec_machine_halt(void)
{
back_to_prom();
}
void __noreturn dec_machine_power_off(void)
{
/* DECstations don't have a software power switch */
back_to_prom();
}
irqreturn_t dec_intr_halt(int irq, void *dev_id)
{
dec_machine_halt();
}
| linux-master | arch/mips/dec/reset.c |
/*
* TURBOchannel architecture calls.
*
* Copyright (c) Harald Koerfgen, 1998
* Copyright (c) 2001, 2003, 2005, 2006 Maciej W. Rozycki
* Copyright (c) 2005 James Simmons
*
* This file is subject to the terms and conditions of the GNU
* General Public License. See the file "COPYING" in the main
* directory of this archive for more details.
*/
#include <linux/compiler.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/string.h>
#include <linux/tc.h>
#include <linux/types.h>
#include <asm/addrspace.h>
#include <asm/bootinfo.h>
#include <asm/paccess.h>
#include <asm/dec/interrupts.h>
#include <asm/dec/prom.h>
#include <asm/dec/system.h>
/*
* Protected read byte from TURBOchannel slot space.
*/
int tc_preadb(u8 *valp, void __iomem *addr)
{
return get_dbe(*valp, (u8 *)addr);
}
/*
* Get TURBOchannel bus information as specified by the spec, plus
* the slot space base address and the number of slots.
*/
int __init tc_bus_get_info(struct tc_bus *tbus)
{
if (!dec_tc_bus)
return -ENXIO;
memcpy(&tbus->info, rex_gettcinfo(), sizeof(tbus->info));
tbus->slot_base = CPHYSADDR((long)rex_slot_address(0));
switch (mips_machtype) {
case MACH_DS5000_200:
tbus->num_tcslots = 7;
break;
case MACH_DS5000_2X0:
case MACH_DS5900:
tbus->ext_slot_base = 0x20000000;
tbus->ext_slot_size = 0x20000000;
fallthrough;
case MACH_DS5000_1XX:
tbus->num_tcslots = 3;
break;
case MACH_DS5000_XX:
tbus->num_tcslots = 2;
default:
break;
}
return 0;
}
/*
* Get the IRQ for the specified slot.
*/
void __init tc_device_get_irq(struct tc_dev *tdev)
{
switch (tdev->slot) {
case 0:
tdev->interrupt = dec_interrupt[DEC_IRQ_TC0];
break;
case 1:
tdev->interrupt = dec_interrupt[DEC_IRQ_TC1];
break;
case 2:
tdev->interrupt = dec_interrupt[DEC_IRQ_TC2];
break;
/*
* Yuck! DS5000/200 onboard devices
*/
case 5:
tdev->interrupt = dec_interrupt[DEC_IRQ_TC5];
break;
case 6:
tdev->interrupt = dec_interrupt[DEC_IRQ_TC6];
break;
default:
tdev->interrupt = -1;
break;
}
}
| linux-master | arch/mips/dec/tc.c |
/*
* System-specific setup, especially interrupts.
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1998 Harald Koerfgen
* Copyright (C) 2000, 2001, 2002, 2003, 2005, 2020 Maciej W. Rozycki
*/
#include <linux/console.h>
#include <linux/export.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/ioport.h>
#include <linux/irq.h>
#include <linux/irqnr.h>
#include <linux/memblock.h>
#include <linux/param.h>
#include <linux/percpu-defs.h>
#include <linux/sched.h>
#include <linux/spinlock.h>
#include <linux/types.h>
#include <linux/pm.h>
#include <asm/addrspace.h>
#include <asm/bootinfo.h>
#include <asm/cpu.h>
#include <asm/cpu-features.h>
#include <asm/cpu-type.h>
#include <asm/irq.h>
#include <asm/irq_cpu.h>
#include <asm/mipsregs.h>
#include <asm/page.h>
#include <asm/reboot.h>
#include <asm/sections.h>
#include <asm/time.h>
#include <asm/traps.h>
#include <asm/wbflush.h>
#include <asm/dec/interrupts.h>
#include <asm/dec/ioasic.h>
#include <asm/dec/ioasic_addrs.h>
#include <asm/dec/ioasic_ints.h>
#include <asm/dec/kn01.h>
#include <asm/dec/kn02.h>
#include <asm/dec/kn02ba.h>
#include <asm/dec/kn02ca.h>
#include <asm/dec/kn03.h>
#include <asm/dec/kn230.h>
#include <asm/dec/system.h>
extern void dec_machine_restart(char *command);
extern void dec_machine_halt(void);
extern void dec_machine_power_off(void);
extern irqreturn_t dec_intr_halt(int irq, void *dev_id);
unsigned long dec_kn_slot_base, dec_kn_slot_size;
EXPORT_SYMBOL(dec_kn_slot_base);
EXPORT_SYMBOL(dec_kn_slot_size);
int dec_tc_bus;
DEFINE_SPINLOCK(ioasic_ssr_lock);
EXPORT_SYMBOL(ioasic_ssr_lock);
volatile u32 *ioasic_base;
EXPORT_SYMBOL(ioasic_base);
/*
* IRQ routing and priority tables. Priorities are set as follows:
*
* KN01 KN230 KN02 KN02-BA KN02-CA KN03
*
* MEMORY CPU CPU CPU ASIC CPU CPU
* RTC CPU CPU CPU ASIC CPU CPU
* DMA - - - ASIC ASIC ASIC
* SERIAL0 CPU CPU CSR ASIC ASIC ASIC
* SERIAL1 - - - ASIC - ASIC
* SCSI CPU CPU CSR ASIC ASIC ASIC
* ETHERNET CPU * CSR ASIC ASIC ASIC
* other - - - ASIC - -
* TC2 - - CSR CPU ASIC ASIC
* TC1 - - CSR CPU ASIC ASIC
* TC0 - - CSR CPU ASIC ASIC
* other - CPU - CPU ASIC ASIC
* other - - - - CPU CPU
*
* * -- shared with SCSI
*/
int dec_interrupt[DEC_NR_INTS] = {
[0 ... DEC_NR_INTS - 1] = -1
};
EXPORT_SYMBOL(dec_interrupt);
int_ptr cpu_mask_nr_tbl[DEC_MAX_CPU_INTS][2] = {
{ { .i = ~0 }, { .p = dec_intr_unimplemented } },
};
int_ptr asic_mask_nr_tbl[DEC_MAX_ASIC_INTS][2] = {
{ { .i = ~0 }, { .p = asic_intr_unimplemented } },
};
int cpu_fpu_mask = DEC_CPU_IRQ_MASK(DEC_CPU_INR_FPU);
int *fpu_kstat_irq;
static irq_handler_t busirq_handler;
static unsigned int busirq_flags = IRQF_NO_THREAD;
/*
* Bus error (DBE/IBE exceptions and bus interrupts) handling setup.
*/
static void __init dec_be_init(void)
{
switch (mips_machtype) {
case MACH_DS23100: /* DS2100/DS3100 Pmin/Pmax */
mips_set_be_handler(dec_kn01_be_handler);
busirq_handler = dec_kn01_be_interrupt;
busirq_flags |= IRQF_SHARED;
dec_kn01_be_init();
break;
case MACH_DS5000_1XX: /* DS5000/1xx 3min */
case MACH_DS5000_XX: /* DS5000/xx Maxine */
mips_set_be_handler(dec_kn02xa_be_handler);
busirq_handler = dec_kn02xa_be_interrupt;
dec_kn02xa_be_init();
break;
case MACH_DS5000_200: /* DS5000/200 3max */
case MACH_DS5000_2X0: /* DS5000/240 3max+ */
case MACH_DS5900: /* DS5900 bigmax */
mips_set_be_handler(dec_ecc_be_handler);
busirq_handler = dec_ecc_be_interrupt;
dec_ecc_be_init();
break;
}
}
void __init plat_mem_setup(void)
{
board_be_init = dec_be_init;
wbflush_setup();
_machine_restart = dec_machine_restart;
_machine_halt = dec_machine_halt;
pm_power_off = dec_machine_power_off;
ioport_resource.start = ~0UL;
ioport_resource.end = 0UL;
/* Stay away from the firmware working memory area for now. */
memblock_reserve(PHYS_OFFSET, __pa_symbol(&_text) - PHYS_OFFSET);
}
/*
* Machine-specific initialisation for KN01, aka DS2100 (aka Pmin)
* or DS3100 (aka Pmax).
*/
static int kn01_interrupt[DEC_NR_INTS] __initdata = {
[DEC_IRQ_CASCADE] = -1,
[DEC_IRQ_AB_RECV] = -1,
[DEC_IRQ_AB_XMIT] = -1,
[DEC_IRQ_DZ11] = DEC_CPU_IRQ_NR(KN01_CPU_INR_DZ11),
[DEC_IRQ_ASC] = -1,
[DEC_IRQ_FLOPPY] = -1,
[DEC_IRQ_FPU] = DEC_CPU_IRQ_NR(DEC_CPU_INR_FPU),
[DEC_IRQ_HALT] = -1,
[DEC_IRQ_ISDN] = -1,
[DEC_IRQ_LANCE] = DEC_CPU_IRQ_NR(KN01_CPU_INR_LANCE),
[DEC_IRQ_BUS] = DEC_CPU_IRQ_NR(KN01_CPU_INR_BUS),
[DEC_IRQ_PSU] = -1,
[DEC_IRQ_RTC] = DEC_CPU_IRQ_NR(KN01_CPU_INR_RTC),
[DEC_IRQ_SCC0] = -1,
[DEC_IRQ_SCC1] = -1,
[DEC_IRQ_SII] = DEC_CPU_IRQ_NR(KN01_CPU_INR_SII),
[DEC_IRQ_TC0] = -1,
[DEC_IRQ_TC1] = -1,
[DEC_IRQ_TC2] = -1,
[DEC_IRQ_TIMER] = -1,
[DEC_IRQ_VIDEO] = DEC_CPU_IRQ_NR(KN01_CPU_INR_VIDEO),
[DEC_IRQ_ASC_MERR] = -1,
[DEC_IRQ_ASC_ERR] = -1,
[DEC_IRQ_ASC_DMA] = -1,
[DEC_IRQ_FLOPPY_ERR] = -1,
[DEC_IRQ_ISDN_ERR] = -1,
[DEC_IRQ_ISDN_RXDMA] = -1,
[DEC_IRQ_ISDN_TXDMA] = -1,
[DEC_IRQ_LANCE_MERR] = -1,
[DEC_IRQ_SCC0A_RXERR] = -1,
[DEC_IRQ_SCC0A_RXDMA] = -1,
[DEC_IRQ_SCC0A_TXERR] = -1,
[DEC_IRQ_SCC0A_TXDMA] = -1,
[DEC_IRQ_AB_RXERR] = -1,
[DEC_IRQ_AB_RXDMA] = -1,
[DEC_IRQ_AB_TXERR] = -1,
[DEC_IRQ_AB_TXDMA] = -1,
[DEC_IRQ_SCC1A_RXERR] = -1,
[DEC_IRQ_SCC1A_RXDMA] = -1,
[DEC_IRQ_SCC1A_TXERR] = -1,
[DEC_IRQ_SCC1A_TXDMA] = -1,
};
static int_ptr kn01_cpu_mask_nr_tbl[][2] __initdata = {
{ { .i = DEC_CPU_IRQ_MASK(KN01_CPU_INR_BUS) },
{ .i = DEC_CPU_IRQ_NR(KN01_CPU_INR_BUS) } },
{ { .i = DEC_CPU_IRQ_MASK(KN01_CPU_INR_RTC) },
{ .i = DEC_CPU_IRQ_NR(KN01_CPU_INR_RTC) } },
{ { .i = DEC_CPU_IRQ_MASK(KN01_CPU_INR_DZ11) },
{ .i = DEC_CPU_IRQ_NR(KN01_CPU_INR_DZ11) } },
{ { .i = DEC_CPU_IRQ_MASK(KN01_CPU_INR_SII) },
{ .i = DEC_CPU_IRQ_NR(KN01_CPU_INR_SII) } },
{ { .i = DEC_CPU_IRQ_MASK(KN01_CPU_INR_LANCE) },
{ .i = DEC_CPU_IRQ_NR(KN01_CPU_INR_LANCE) } },
{ { .i = DEC_CPU_IRQ_ALL },
{ .p = cpu_all_int } },
};
static void __init dec_init_kn01(void)
{
/* IRQ routing. */
memcpy(&dec_interrupt, &kn01_interrupt,
sizeof(kn01_interrupt));
/* CPU IRQ priorities. */
memcpy(&cpu_mask_nr_tbl, &kn01_cpu_mask_nr_tbl,
sizeof(kn01_cpu_mask_nr_tbl));
mips_cpu_irq_init();
} /* dec_init_kn01 */
/*
* Machine-specific initialisation for KN230, aka DS5100, aka MIPSmate.
*/
static int kn230_interrupt[DEC_NR_INTS] __initdata = {
[DEC_IRQ_CASCADE] = -1,
[DEC_IRQ_AB_RECV] = -1,
[DEC_IRQ_AB_XMIT] = -1,
[DEC_IRQ_DZ11] = DEC_CPU_IRQ_NR(KN230_CPU_INR_DZ11),
[DEC_IRQ_ASC] = -1,
[DEC_IRQ_FLOPPY] = -1,
[DEC_IRQ_FPU] = DEC_CPU_IRQ_NR(DEC_CPU_INR_FPU),
[DEC_IRQ_HALT] = DEC_CPU_IRQ_NR(KN230_CPU_INR_HALT),
[DEC_IRQ_ISDN] = -1,
[DEC_IRQ_LANCE] = DEC_CPU_IRQ_NR(KN230_CPU_INR_LANCE),
[DEC_IRQ_BUS] = DEC_CPU_IRQ_NR(KN230_CPU_INR_BUS),
[DEC_IRQ_PSU] = -1,
[DEC_IRQ_RTC] = DEC_CPU_IRQ_NR(KN230_CPU_INR_RTC),
[DEC_IRQ_SCC0] = -1,
[DEC_IRQ_SCC1] = -1,
[DEC_IRQ_SII] = DEC_CPU_IRQ_NR(KN230_CPU_INR_SII),
[DEC_IRQ_TC0] = -1,
[DEC_IRQ_TC1] = -1,
[DEC_IRQ_TC2] = -1,
[DEC_IRQ_TIMER] = -1,
[DEC_IRQ_VIDEO] = -1,
[DEC_IRQ_ASC_MERR] = -1,
[DEC_IRQ_ASC_ERR] = -1,
[DEC_IRQ_ASC_DMA] = -1,
[DEC_IRQ_FLOPPY_ERR] = -1,
[DEC_IRQ_ISDN_ERR] = -1,
[DEC_IRQ_ISDN_RXDMA] = -1,
[DEC_IRQ_ISDN_TXDMA] = -1,
[DEC_IRQ_LANCE_MERR] = -1,
[DEC_IRQ_SCC0A_RXERR] = -1,
[DEC_IRQ_SCC0A_RXDMA] = -1,
[DEC_IRQ_SCC0A_TXERR] = -1,
[DEC_IRQ_SCC0A_TXDMA] = -1,
[DEC_IRQ_AB_RXERR] = -1,
[DEC_IRQ_AB_RXDMA] = -1,
[DEC_IRQ_AB_TXERR] = -1,
[DEC_IRQ_AB_TXDMA] = -1,
[DEC_IRQ_SCC1A_RXERR] = -1,
[DEC_IRQ_SCC1A_RXDMA] = -1,
[DEC_IRQ_SCC1A_TXERR] = -1,
[DEC_IRQ_SCC1A_TXDMA] = -1,
};
static int_ptr kn230_cpu_mask_nr_tbl[][2] __initdata = {
{ { .i = DEC_CPU_IRQ_MASK(KN230_CPU_INR_BUS) },
{ .i = DEC_CPU_IRQ_NR(KN230_CPU_INR_BUS) } },
{ { .i = DEC_CPU_IRQ_MASK(KN230_CPU_INR_RTC) },
{ .i = DEC_CPU_IRQ_NR(KN230_CPU_INR_RTC) } },
{ { .i = DEC_CPU_IRQ_MASK(KN230_CPU_INR_DZ11) },
{ .i = DEC_CPU_IRQ_NR(KN230_CPU_INR_DZ11) } },
{ { .i = DEC_CPU_IRQ_MASK(KN230_CPU_INR_SII) },
{ .i = DEC_CPU_IRQ_NR(KN230_CPU_INR_SII) } },
{ { .i = DEC_CPU_IRQ_ALL },
{ .p = cpu_all_int } },
};
static void __init dec_init_kn230(void)
{
/* IRQ routing. */
memcpy(&dec_interrupt, &kn230_interrupt,
sizeof(kn230_interrupt));
/* CPU IRQ priorities. */
memcpy(&cpu_mask_nr_tbl, &kn230_cpu_mask_nr_tbl,
sizeof(kn230_cpu_mask_nr_tbl));
mips_cpu_irq_init();
} /* dec_init_kn230 */
/*
* Machine-specific initialisation for KN02, aka DS5000/200, aka 3max.
*/
static int kn02_interrupt[DEC_NR_INTS] __initdata = {
[DEC_IRQ_CASCADE] = DEC_CPU_IRQ_NR(KN02_CPU_INR_CASCADE),
[DEC_IRQ_AB_RECV] = -1,
[DEC_IRQ_AB_XMIT] = -1,
[DEC_IRQ_DZ11] = KN02_IRQ_NR(KN02_CSR_INR_DZ11),
[DEC_IRQ_ASC] = KN02_IRQ_NR(KN02_CSR_INR_ASC),
[DEC_IRQ_FLOPPY] = -1,
[DEC_IRQ_FPU] = DEC_CPU_IRQ_NR(DEC_CPU_INR_FPU),
[DEC_IRQ_HALT] = -1,
[DEC_IRQ_ISDN] = -1,
[DEC_IRQ_LANCE] = KN02_IRQ_NR(KN02_CSR_INR_LANCE),
[DEC_IRQ_BUS] = DEC_CPU_IRQ_NR(KN02_CPU_INR_BUS),
[DEC_IRQ_PSU] = -1,
[DEC_IRQ_RTC] = DEC_CPU_IRQ_NR(KN02_CPU_INR_RTC),
[DEC_IRQ_SCC0] = -1,
[DEC_IRQ_SCC1] = -1,
[DEC_IRQ_SII] = -1,
[DEC_IRQ_TC0] = KN02_IRQ_NR(KN02_CSR_INR_TC0),
[DEC_IRQ_TC1] = KN02_IRQ_NR(KN02_CSR_INR_TC1),
[DEC_IRQ_TC2] = KN02_IRQ_NR(KN02_CSR_INR_TC2),
[DEC_IRQ_TIMER] = -1,
[DEC_IRQ_VIDEO] = -1,
[DEC_IRQ_ASC_MERR] = -1,
[DEC_IRQ_ASC_ERR] = -1,
[DEC_IRQ_ASC_DMA] = -1,
[DEC_IRQ_FLOPPY_ERR] = -1,
[DEC_IRQ_ISDN_ERR] = -1,
[DEC_IRQ_ISDN_RXDMA] = -1,
[DEC_IRQ_ISDN_TXDMA] = -1,
[DEC_IRQ_LANCE_MERR] = -1,
[DEC_IRQ_SCC0A_RXERR] = -1,
[DEC_IRQ_SCC0A_RXDMA] = -1,
[DEC_IRQ_SCC0A_TXERR] = -1,
[DEC_IRQ_SCC0A_TXDMA] = -1,
[DEC_IRQ_AB_RXERR] = -1,
[DEC_IRQ_AB_RXDMA] = -1,
[DEC_IRQ_AB_TXERR] = -1,
[DEC_IRQ_AB_TXDMA] = -1,
[DEC_IRQ_SCC1A_RXERR] = -1,
[DEC_IRQ_SCC1A_RXDMA] = -1,
[DEC_IRQ_SCC1A_TXERR] = -1,
[DEC_IRQ_SCC1A_TXDMA] = -1,
};
static int_ptr kn02_cpu_mask_nr_tbl[][2] __initdata = {
{ { .i = DEC_CPU_IRQ_MASK(KN02_CPU_INR_BUS) },
{ .i = DEC_CPU_IRQ_NR(KN02_CPU_INR_BUS) } },
{ { .i = DEC_CPU_IRQ_MASK(KN02_CPU_INR_RTC) },
{ .i = DEC_CPU_IRQ_NR(KN02_CPU_INR_RTC) } },
{ { .i = DEC_CPU_IRQ_MASK(KN02_CPU_INR_CASCADE) },
{ .p = kn02_io_int } },
{ { .i = DEC_CPU_IRQ_ALL },
{ .p = cpu_all_int } },
};
static int_ptr kn02_asic_mask_nr_tbl[][2] __initdata = {
{ { .i = KN02_IRQ_MASK(KN02_CSR_INR_DZ11) },
{ .i = KN02_IRQ_NR(KN02_CSR_INR_DZ11) } },
{ { .i = KN02_IRQ_MASK(KN02_CSR_INR_ASC) },
{ .i = KN02_IRQ_NR(KN02_CSR_INR_ASC) } },
{ { .i = KN02_IRQ_MASK(KN02_CSR_INR_LANCE) },
{ .i = KN02_IRQ_NR(KN02_CSR_INR_LANCE) } },
{ { .i = KN02_IRQ_MASK(KN02_CSR_INR_TC2) },
{ .i = KN02_IRQ_NR(KN02_CSR_INR_TC2) } },
{ { .i = KN02_IRQ_MASK(KN02_CSR_INR_TC1) },
{ .i = KN02_IRQ_NR(KN02_CSR_INR_TC1) } },
{ { .i = KN02_IRQ_MASK(KN02_CSR_INR_TC0) },
{ .i = KN02_IRQ_NR(KN02_CSR_INR_TC0) } },
{ { .i = KN02_IRQ_ALL },
{ .p = kn02_all_int } },
};
static void __init dec_init_kn02(void)
{
/* IRQ routing. */
memcpy(&dec_interrupt, &kn02_interrupt,
sizeof(kn02_interrupt));
/* CPU IRQ priorities. */
memcpy(&cpu_mask_nr_tbl, &kn02_cpu_mask_nr_tbl,
sizeof(kn02_cpu_mask_nr_tbl));
/* KN02 CSR IRQ priorities. */
memcpy(&asic_mask_nr_tbl, &kn02_asic_mask_nr_tbl,
sizeof(kn02_asic_mask_nr_tbl));
mips_cpu_irq_init();
init_kn02_irqs(KN02_IRQ_BASE);
} /* dec_init_kn02 */
/*
* Machine-specific initialisation for KN02-BA, aka DS5000/1xx
* (xx = 20, 25, 33), aka 3min. Also applies to KN04(-BA), aka
* DS5000/150, aka 4min.
*/
static int kn02ba_interrupt[DEC_NR_INTS] __initdata = {
[DEC_IRQ_CASCADE] = DEC_CPU_IRQ_NR(KN02BA_CPU_INR_CASCADE),
[DEC_IRQ_AB_RECV] = -1,
[DEC_IRQ_AB_XMIT] = -1,
[DEC_IRQ_DZ11] = -1,
[DEC_IRQ_ASC] = IO_IRQ_NR(KN02BA_IO_INR_ASC),
[DEC_IRQ_FLOPPY] = -1,
[DEC_IRQ_FPU] = DEC_CPU_IRQ_NR(DEC_CPU_INR_FPU),
[DEC_IRQ_HALT] = DEC_CPU_IRQ_NR(KN02BA_CPU_INR_HALT),
[DEC_IRQ_ISDN] = -1,
[DEC_IRQ_LANCE] = IO_IRQ_NR(KN02BA_IO_INR_LANCE),
[DEC_IRQ_BUS] = IO_IRQ_NR(KN02BA_IO_INR_BUS),
[DEC_IRQ_PSU] = IO_IRQ_NR(KN02BA_IO_INR_PSU),
[DEC_IRQ_RTC] = IO_IRQ_NR(KN02BA_IO_INR_RTC),
[DEC_IRQ_SCC0] = IO_IRQ_NR(KN02BA_IO_INR_SCC0),
[DEC_IRQ_SCC1] = IO_IRQ_NR(KN02BA_IO_INR_SCC1),
[DEC_IRQ_SII] = -1,
[DEC_IRQ_TC0] = DEC_CPU_IRQ_NR(KN02BA_CPU_INR_TC0),
[DEC_IRQ_TC1] = DEC_CPU_IRQ_NR(KN02BA_CPU_INR_TC1),
[DEC_IRQ_TC2] = DEC_CPU_IRQ_NR(KN02BA_CPU_INR_TC2),
[DEC_IRQ_TIMER] = -1,
[DEC_IRQ_VIDEO] = -1,
[DEC_IRQ_ASC_MERR] = IO_IRQ_NR(IO_INR_ASC_MERR),
[DEC_IRQ_ASC_ERR] = IO_IRQ_NR(IO_INR_ASC_ERR),
[DEC_IRQ_ASC_DMA] = IO_IRQ_NR(IO_INR_ASC_DMA),
[DEC_IRQ_FLOPPY_ERR] = -1,
[DEC_IRQ_ISDN_ERR] = -1,
[DEC_IRQ_ISDN_RXDMA] = -1,
[DEC_IRQ_ISDN_TXDMA] = -1,
[DEC_IRQ_LANCE_MERR] = IO_IRQ_NR(IO_INR_LANCE_MERR),
[DEC_IRQ_SCC0A_RXERR] = IO_IRQ_NR(IO_INR_SCC0A_RXERR),
[DEC_IRQ_SCC0A_RXDMA] = IO_IRQ_NR(IO_INR_SCC0A_RXDMA),
[DEC_IRQ_SCC0A_TXERR] = IO_IRQ_NR(IO_INR_SCC0A_TXERR),
[DEC_IRQ_SCC0A_TXDMA] = IO_IRQ_NR(IO_INR_SCC0A_TXDMA),
[DEC_IRQ_AB_RXERR] = -1,
[DEC_IRQ_AB_RXDMA] = -1,
[DEC_IRQ_AB_TXERR] = -1,
[DEC_IRQ_AB_TXDMA] = -1,
[DEC_IRQ_SCC1A_RXERR] = IO_IRQ_NR(IO_INR_SCC1A_RXERR),
[DEC_IRQ_SCC1A_RXDMA] = IO_IRQ_NR(IO_INR_SCC1A_RXDMA),
[DEC_IRQ_SCC1A_TXERR] = IO_IRQ_NR(IO_INR_SCC1A_TXERR),
[DEC_IRQ_SCC1A_TXDMA] = IO_IRQ_NR(IO_INR_SCC1A_TXDMA),
};
static int_ptr kn02ba_cpu_mask_nr_tbl[][2] __initdata = {
{ { .i = DEC_CPU_IRQ_MASK(KN02BA_CPU_INR_CASCADE) },
{ .p = kn02xa_io_int } },
{ { .i = DEC_CPU_IRQ_MASK(KN02BA_CPU_INR_TC2) },
{ .i = DEC_CPU_IRQ_NR(KN02BA_CPU_INR_TC2) } },
{ { .i = DEC_CPU_IRQ_MASK(KN02BA_CPU_INR_TC1) },
{ .i = DEC_CPU_IRQ_NR(KN02BA_CPU_INR_TC1) } },
{ { .i = DEC_CPU_IRQ_MASK(KN02BA_CPU_INR_TC0) },
{ .i = DEC_CPU_IRQ_NR(KN02BA_CPU_INR_TC0) } },
{ { .i = DEC_CPU_IRQ_ALL },
{ .p = cpu_all_int } },
};
static int_ptr kn02ba_asic_mask_nr_tbl[][2] __initdata = {
{ { .i = IO_IRQ_MASK(KN02BA_IO_INR_BUS) },
{ .i = IO_IRQ_NR(KN02BA_IO_INR_BUS) } },
{ { .i = IO_IRQ_MASK(KN02BA_IO_INR_RTC) },
{ .i = IO_IRQ_NR(KN02BA_IO_INR_RTC) } },
{ { .i = IO_IRQ_DMA },
{ .p = asic_dma_int } },
{ { .i = IO_IRQ_MASK(KN02BA_IO_INR_SCC0) },
{ .i = IO_IRQ_NR(KN02BA_IO_INR_SCC0) } },
{ { .i = IO_IRQ_MASK(KN02BA_IO_INR_SCC1) },
{ .i = IO_IRQ_NR(KN02BA_IO_INR_SCC1) } },
{ { .i = IO_IRQ_MASK(KN02BA_IO_INR_ASC) },
{ .i = IO_IRQ_NR(KN02BA_IO_INR_ASC) } },
{ { .i = IO_IRQ_MASK(KN02BA_IO_INR_LANCE) },
{ .i = IO_IRQ_NR(KN02BA_IO_INR_LANCE) } },
{ { .i = IO_IRQ_ALL },
{ .p = asic_all_int } },
};
static void __init dec_init_kn02ba(void)
{
/* IRQ routing. */
memcpy(&dec_interrupt, &kn02ba_interrupt,
sizeof(kn02ba_interrupt));
/* CPU IRQ priorities. */
memcpy(&cpu_mask_nr_tbl, &kn02ba_cpu_mask_nr_tbl,
sizeof(kn02ba_cpu_mask_nr_tbl));
/* I/O ASIC IRQ priorities. */
memcpy(&asic_mask_nr_tbl, &kn02ba_asic_mask_nr_tbl,
sizeof(kn02ba_asic_mask_nr_tbl));
mips_cpu_irq_init();
init_ioasic_irqs(IO_IRQ_BASE);
} /* dec_init_kn02ba */
/*
* Machine-specific initialisation for KN02-CA, aka DS5000/xx,
* (xx = 20, 25, 33), aka MAXine. Also applies to KN04(-CA), aka
* DS5000/50, aka 4MAXine.
*/
static int kn02ca_interrupt[DEC_NR_INTS] __initdata = {
[DEC_IRQ_CASCADE] = DEC_CPU_IRQ_NR(KN02CA_CPU_INR_CASCADE),
[DEC_IRQ_AB_RECV] = IO_IRQ_NR(KN02CA_IO_INR_AB_RECV),
[DEC_IRQ_AB_XMIT] = IO_IRQ_NR(KN02CA_IO_INR_AB_XMIT),
[DEC_IRQ_DZ11] = -1,
[DEC_IRQ_ASC] = IO_IRQ_NR(KN02CA_IO_INR_ASC),
[DEC_IRQ_FLOPPY] = IO_IRQ_NR(KN02CA_IO_INR_FLOPPY),
[DEC_IRQ_FPU] = DEC_CPU_IRQ_NR(DEC_CPU_INR_FPU),
[DEC_IRQ_HALT] = DEC_CPU_IRQ_NR(KN02CA_CPU_INR_HALT),
[DEC_IRQ_ISDN] = IO_IRQ_NR(KN02CA_IO_INR_ISDN),
[DEC_IRQ_LANCE] = IO_IRQ_NR(KN02CA_IO_INR_LANCE),
[DEC_IRQ_BUS] = DEC_CPU_IRQ_NR(KN02CA_CPU_INR_BUS),
[DEC_IRQ_PSU] = -1,
[DEC_IRQ_RTC] = DEC_CPU_IRQ_NR(KN02CA_CPU_INR_RTC),
[DEC_IRQ_SCC0] = IO_IRQ_NR(KN02CA_IO_INR_SCC0),
[DEC_IRQ_SCC1] = -1,
[DEC_IRQ_SII] = -1,
[DEC_IRQ_TC0] = IO_IRQ_NR(KN02CA_IO_INR_TC0),
[DEC_IRQ_TC1] = IO_IRQ_NR(KN02CA_IO_INR_TC1),
[DEC_IRQ_TC2] = -1,
[DEC_IRQ_TIMER] = DEC_CPU_IRQ_NR(KN02CA_CPU_INR_TIMER),
[DEC_IRQ_VIDEO] = IO_IRQ_NR(KN02CA_IO_INR_VIDEO),
[DEC_IRQ_ASC_MERR] = IO_IRQ_NR(IO_INR_ASC_MERR),
[DEC_IRQ_ASC_ERR] = IO_IRQ_NR(IO_INR_ASC_ERR),
[DEC_IRQ_ASC_DMA] = IO_IRQ_NR(IO_INR_ASC_DMA),
[DEC_IRQ_FLOPPY_ERR] = IO_IRQ_NR(IO_INR_FLOPPY_ERR),
[DEC_IRQ_ISDN_ERR] = IO_IRQ_NR(IO_INR_ISDN_ERR),
[DEC_IRQ_ISDN_RXDMA] = IO_IRQ_NR(IO_INR_ISDN_RXDMA),
[DEC_IRQ_ISDN_TXDMA] = IO_IRQ_NR(IO_INR_ISDN_TXDMA),
[DEC_IRQ_LANCE_MERR] = IO_IRQ_NR(IO_INR_LANCE_MERR),
[DEC_IRQ_SCC0A_RXERR] = IO_IRQ_NR(IO_INR_SCC0A_RXERR),
[DEC_IRQ_SCC0A_RXDMA] = IO_IRQ_NR(IO_INR_SCC0A_RXDMA),
[DEC_IRQ_SCC0A_TXERR] = IO_IRQ_NR(IO_INR_SCC0A_TXERR),
[DEC_IRQ_SCC0A_TXDMA] = IO_IRQ_NR(IO_INR_SCC0A_TXDMA),
[DEC_IRQ_AB_RXERR] = IO_IRQ_NR(IO_INR_AB_RXERR),
[DEC_IRQ_AB_RXDMA] = IO_IRQ_NR(IO_INR_AB_RXDMA),
[DEC_IRQ_AB_TXERR] = IO_IRQ_NR(IO_INR_AB_TXERR),
[DEC_IRQ_AB_TXDMA] = IO_IRQ_NR(IO_INR_AB_TXDMA),
[DEC_IRQ_SCC1A_RXERR] = -1,
[DEC_IRQ_SCC1A_RXDMA] = -1,
[DEC_IRQ_SCC1A_TXERR] = -1,
[DEC_IRQ_SCC1A_TXDMA] = -1,
};
static int_ptr kn02ca_cpu_mask_nr_tbl[][2] __initdata = {
{ { .i = DEC_CPU_IRQ_MASK(KN02CA_CPU_INR_BUS) },
{ .i = DEC_CPU_IRQ_NR(KN02CA_CPU_INR_BUS) } },
{ { .i = DEC_CPU_IRQ_MASK(KN02CA_CPU_INR_RTC) },
{ .i = DEC_CPU_IRQ_NR(KN02CA_CPU_INR_RTC) } },
{ { .i = DEC_CPU_IRQ_MASK(KN02CA_CPU_INR_CASCADE) },
{ .p = kn02xa_io_int } },
{ { .i = DEC_CPU_IRQ_ALL },
{ .p = cpu_all_int } },
};
static int_ptr kn02ca_asic_mask_nr_tbl[][2] __initdata = {
{ { .i = IO_IRQ_DMA },
{ .p = asic_dma_int } },
{ { .i = IO_IRQ_MASK(KN02CA_IO_INR_SCC0) },
{ .i = IO_IRQ_NR(KN02CA_IO_INR_SCC0) } },
{ { .i = IO_IRQ_MASK(KN02CA_IO_INR_ASC) },
{ .i = IO_IRQ_NR(KN02CA_IO_INR_ASC) } },
{ { .i = IO_IRQ_MASK(KN02CA_IO_INR_LANCE) },
{ .i = IO_IRQ_NR(KN02CA_IO_INR_LANCE) } },
{ { .i = IO_IRQ_MASK(KN02CA_IO_INR_TC1) },
{ .i = IO_IRQ_NR(KN02CA_IO_INR_TC1) } },
{ { .i = IO_IRQ_MASK(KN02CA_IO_INR_TC0) },
{ .i = IO_IRQ_NR(KN02CA_IO_INR_TC0) } },
{ { .i = IO_IRQ_ALL },
{ .p = asic_all_int } },
};
static void __init dec_init_kn02ca(void)
{
/* IRQ routing. */
memcpy(&dec_interrupt, &kn02ca_interrupt,
sizeof(kn02ca_interrupt));
/* CPU IRQ priorities. */
memcpy(&cpu_mask_nr_tbl, &kn02ca_cpu_mask_nr_tbl,
sizeof(kn02ca_cpu_mask_nr_tbl));
/* I/O ASIC IRQ priorities. */
memcpy(&asic_mask_nr_tbl, &kn02ca_asic_mask_nr_tbl,
sizeof(kn02ca_asic_mask_nr_tbl));
mips_cpu_irq_init();
init_ioasic_irqs(IO_IRQ_BASE);
} /* dec_init_kn02ca */
/*
* Machine-specific initialisation for KN03, aka DS5000/240,
* aka 3max+ and DS5900, aka BIGmax. Also applies to KN05, aka
* DS5000/260, aka 4max+ and DS5900/260.
*/
static int kn03_interrupt[DEC_NR_INTS] __initdata = {
[DEC_IRQ_CASCADE] = DEC_CPU_IRQ_NR(KN03_CPU_INR_CASCADE),
[DEC_IRQ_AB_RECV] = -1,
[DEC_IRQ_AB_XMIT] = -1,
[DEC_IRQ_DZ11] = -1,
[DEC_IRQ_ASC] = IO_IRQ_NR(KN03_IO_INR_ASC),
[DEC_IRQ_FLOPPY] = -1,
[DEC_IRQ_FPU] = DEC_CPU_IRQ_NR(DEC_CPU_INR_FPU),
[DEC_IRQ_HALT] = DEC_CPU_IRQ_NR(KN03_CPU_INR_HALT),
[DEC_IRQ_ISDN] = -1,
[DEC_IRQ_LANCE] = IO_IRQ_NR(KN03_IO_INR_LANCE),
[DEC_IRQ_BUS] = DEC_CPU_IRQ_NR(KN03_CPU_INR_BUS),
[DEC_IRQ_PSU] = IO_IRQ_NR(KN03_IO_INR_PSU),
[DEC_IRQ_RTC] = DEC_CPU_IRQ_NR(KN03_CPU_INR_RTC),
[DEC_IRQ_SCC0] = IO_IRQ_NR(KN03_IO_INR_SCC0),
[DEC_IRQ_SCC1] = IO_IRQ_NR(KN03_IO_INR_SCC1),
[DEC_IRQ_SII] = -1,
[DEC_IRQ_TC0] = IO_IRQ_NR(KN03_IO_INR_TC0),
[DEC_IRQ_TC1] = IO_IRQ_NR(KN03_IO_INR_TC1),
[DEC_IRQ_TC2] = IO_IRQ_NR(KN03_IO_INR_TC2),
[DEC_IRQ_TIMER] = -1,
[DEC_IRQ_VIDEO] = -1,
[DEC_IRQ_ASC_MERR] = IO_IRQ_NR(IO_INR_ASC_MERR),
[DEC_IRQ_ASC_ERR] = IO_IRQ_NR(IO_INR_ASC_ERR),
[DEC_IRQ_ASC_DMA] = IO_IRQ_NR(IO_INR_ASC_DMA),
[DEC_IRQ_FLOPPY_ERR] = -1,
[DEC_IRQ_ISDN_ERR] = -1,
[DEC_IRQ_ISDN_RXDMA] = -1,
[DEC_IRQ_ISDN_TXDMA] = -1,
[DEC_IRQ_LANCE_MERR] = IO_IRQ_NR(IO_INR_LANCE_MERR),
[DEC_IRQ_SCC0A_RXERR] = IO_IRQ_NR(IO_INR_SCC0A_RXERR),
[DEC_IRQ_SCC0A_RXDMA] = IO_IRQ_NR(IO_INR_SCC0A_RXDMA),
[DEC_IRQ_SCC0A_TXERR] = IO_IRQ_NR(IO_INR_SCC0A_TXERR),
[DEC_IRQ_SCC0A_TXDMA] = IO_IRQ_NR(IO_INR_SCC0A_TXDMA),
[DEC_IRQ_AB_RXERR] = -1,
[DEC_IRQ_AB_RXDMA] = -1,
[DEC_IRQ_AB_TXERR] = -1,
[DEC_IRQ_AB_TXDMA] = -1,
[DEC_IRQ_SCC1A_RXERR] = IO_IRQ_NR(IO_INR_SCC1A_RXERR),
[DEC_IRQ_SCC1A_RXDMA] = IO_IRQ_NR(IO_INR_SCC1A_RXDMA),
[DEC_IRQ_SCC1A_TXERR] = IO_IRQ_NR(IO_INR_SCC1A_TXERR),
[DEC_IRQ_SCC1A_TXDMA] = IO_IRQ_NR(IO_INR_SCC1A_TXDMA),
};
static int_ptr kn03_cpu_mask_nr_tbl[][2] __initdata = {
{ { .i = DEC_CPU_IRQ_MASK(KN03_CPU_INR_BUS) },
{ .i = DEC_CPU_IRQ_NR(KN03_CPU_INR_BUS) } },
{ { .i = DEC_CPU_IRQ_MASK(KN03_CPU_INR_RTC) },
{ .i = DEC_CPU_IRQ_NR(KN03_CPU_INR_RTC) } },
{ { .i = DEC_CPU_IRQ_MASK(KN03_CPU_INR_CASCADE) },
{ .p = kn03_io_int } },
{ { .i = DEC_CPU_IRQ_ALL },
{ .p = cpu_all_int } },
};
static int_ptr kn03_asic_mask_nr_tbl[][2] __initdata = {
{ { .i = IO_IRQ_DMA },
{ .p = asic_dma_int } },
{ { .i = IO_IRQ_MASK(KN03_IO_INR_SCC0) },
{ .i = IO_IRQ_NR(KN03_IO_INR_SCC0) } },
{ { .i = IO_IRQ_MASK(KN03_IO_INR_SCC1) },
{ .i = IO_IRQ_NR(KN03_IO_INR_SCC1) } },
{ { .i = IO_IRQ_MASK(KN03_IO_INR_ASC) },
{ .i = IO_IRQ_NR(KN03_IO_INR_ASC) } },
{ { .i = IO_IRQ_MASK(KN03_IO_INR_LANCE) },
{ .i = IO_IRQ_NR(KN03_IO_INR_LANCE) } },
{ { .i = IO_IRQ_MASK(KN03_IO_INR_TC2) },
{ .i = IO_IRQ_NR(KN03_IO_INR_TC2) } },
{ { .i = IO_IRQ_MASK(KN03_IO_INR_TC1) },
{ .i = IO_IRQ_NR(KN03_IO_INR_TC1) } },
{ { .i = IO_IRQ_MASK(KN03_IO_INR_TC0) },
{ .i = IO_IRQ_NR(KN03_IO_INR_TC0) } },
{ { .i = IO_IRQ_ALL },
{ .p = asic_all_int } },
};
static void __init dec_init_kn03(void)
{
/* IRQ routing. */
memcpy(&dec_interrupt, &kn03_interrupt,
sizeof(kn03_interrupt));
/* CPU IRQ priorities. */
memcpy(&cpu_mask_nr_tbl, &kn03_cpu_mask_nr_tbl,
sizeof(kn03_cpu_mask_nr_tbl));
/* I/O ASIC IRQ priorities. */
memcpy(&asic_mask_nr_tbl, &kn03_asic_mask_nr_tbl,
sizeof(kn03_asic_mask_nr_tbl));
mips_cpu_irq_init();
init_ioasic_irqs(IO_IRQ_BASE);
} /* dec_init_kn03 */
void __init arch_init_irq(void)
{
switch (mips_machtype) {
case MACH_DS23100: /* DS2100/DS3100 Pmin/Pmax */
dec_init_kn01();
break;
case MACH_DS5100: /* DS5100 MIPSmate */
dec_init_kn230();
break;
case MACH_DS5000_200: /* DS5000/200 3max */
dec_init_kn02();
break;
case MACH_DS5000_1XX: /* DS5000/1xx 3min */
dec_init_kn02ba();
break;
case MACH_DS5000_2X0: /* DS5000/240 3max+ */
case MACH_DS5900: /* DS5900 bigmax */
dec_init_kn03();
break;
case MACH_DS5000_XX: /* Personal DS5000/xx */
dec_init_kn02ca();
break;
case MACH_DS5800: /* DS5800 Isis */
panic("Don't know how to set this up!");
break;
case MACH_DS5400: /* DS5400 MIPSfair */
panic("Don't know how to set this up!");
break;
case MACH_DS5500: /* DS5500 MIPSfair-2 */
panic("Don't know how to set this up!");
break;
}
/* Free the FPU interrupt if the exception is present. */
if (!cpu_has_nofpuex) {
cpu_fpu_mask = 0;
dec_interrupt[DEC_IRQ_FPU] = -1;
}
/* Free the halt interrupt unused on R4k systems. */
if (current_cpu_type() == CPU_R4000SC ||
current_cpu_type() == CPU_R4400SC)
dec_interrupt[DEC_IRQ_HALT] = -1;
/* Register board interrupts: FPU and cascade. */
if (IS_ENABLED(CONFIG_MIPS_FP_SUPPORT) &&
dec_interrupt[DEC_IRQ_FPU] >= 0 && cpu_has_fpu) {
struct irq_desc *desc_fpu;
int irq_fpu;
irq_fpu = dec_interrupt[DEC_IRQ_FPU];
if (request_irq(irq_fpu, no_action, IRQF_NO_THREAD, "fpu",
NULL))
pr_err("Failed to register fpu interrupt\n");
desc_fpu = irq_to_desc(irq_fpu);
fpu_kstat_irq = this_cpu_ptr(desc_fpu->kstat_irqs);
}
if (dec_interrupt[DEC_IRQ_CASCADE] >= 0) {
if (request_irq(dec_interrupt[DEC_IRQ_CASCADE], no_action,
IRQF_NO_THREAD, "cascade", NULL))
pr_err("Failed to register cascade interrupt\n");
}
/* Register the bus error interrupt. */
if (dec_interrupt[DEC_IRQ_BUS] >= 0 && busirq_handler) {
if (request_irq(dec_interrupt[DEC_IRQ_BUS], busirq_handler,
busirq_flags, "bus error", busirq_handler))
pr_err("Failed to register bus error interrupt\n");
}
/* Register the HALT interrupt. */
if (dec_interrupt[DEC_IRQ_HALT] >= 0) {
if (request_irq(dec_interrupt[DEC_IRQ_HALT], dec_intr_halt,
IRQF_NO_THREAD, "halt", NULL))
pr_err("Failed to register halt interrupt\n");
}
}
asmlinkage unsigned int dec_irq_dispatch(unsigned int irq)
{
do_IRQ(irq);
return 0;
}
| linux-master | arch/mips/dec/setup.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* DEC I/O ASIC interrupts.
*
* Copyright (c) 2002, 2003, 2013 Maciej W. Rozycki
*/
#include <linux/init.h>
#include <linux/irq.h>
#include <linux/types.h>
#include <asm/dec/ioasic.h>
#include <asm/dec/ioasic_addrs.h>
#include <asm/dec/ioasic_ints.h>
static int ioasic_irq_base;
static void unmask_ioasic_irq(struct irq_data *d)
{
u32 simr;
simr = ioasic_read(IO_REG_SIMR);
simr |= (1 << (d->irq - ioasic_irq_base));
ioasic_write(IO_REG_SIMR, simr);
}
static void mask_ioasic_irq(struct irq_data *d)
{
u32 simr;
simr = ioasic_read(IO_REG_SIMR);
simr &= ~(1 << (d->irq - ioasic_irq_base));
ioasic_write(IO_REG_SIMR, simr);
}
static void ack_ioasic_irq(struct irq_data *d)
{
mask_ioasic_irq(d);
fast_iob();
}
static struct irq_chip ioasic_irq_type = {
.name = "IO-ASIC",
.irq_ack = ack_ioasic_irq,
.irq_mask = mask_ioasic_irq,
.irq_mask_ack = ack_ioasic_irq,
.irq_unmask = unmask_ioasic_irq,
};
static void clear_ioasic_dma_irq(struct irq_data *d)
{
u32 sir;
sir = ~(1 << (d->irq - ioasic_irq_base));
ioasic_write(IO_REG_SIR, sir);
fast_iob();
}
static struct irq_chip ioasic_dma_irq_type = {
.name = "IO-ASIC-DMA",
.irq_ack = clear_ioasic_dma_irq,
.irq_mask = mask_ioasic_irq,
.irq_unmask = unmask_ioasic_irq,
.irq_eoi = clear_ioasic_dma_irq,
};
/*
* I/O ASIC implements two kinds of DMA interrupts, informational and
* error interrupts.
*
* The former do not stop DMA and should be cleared as soon as possible
* so that if they retrigger before the handler has completed, usually as
* a side effect of actions taken by the handler, then they are reissued.
* These use the `handle_edge_irq' handler that clears the request right
* away.
*
* The latter stop DMA and do not resume it until the interrupt has been
* cleared. This cannot be done until after a corrective action has been
* taken and this also means they will not retrigger. Therefore they use
* the `handle_fasteoi_irq' handler that only clears the request on the
* way out. Because MIPS processor interrupt inputs, one of which the I/O
* ASIC is cascaded to, are level-triggered it is recommended that error
* DMA interrupt action handlers are registered with the IRQF_ONESHOT flag
* set so that they are run with the interrupt line masked.
*
* This mask has `1' bits in the positions of informational interrupts.
*/
#define IO_IRQ_DMA_INFO \
(IO_IRQ_MASK(IO_INR_SCC0A_RXDMA) | \
IO_IRQ_MASK(IO_INR_SCC1A_RXDMA) | \
IO_IRQ_MASK(IO_INR_ISDN_TXDMA) | \
IO_IRQ_MASK(IO_INR_ISDN_RXDMA) | \
IO_IRQ_MASK(IO_INR_ASC_DMA))
void __init init_ioasic_irqs(int base)
{
int i;
/* Mask interrupts. */
ioasic_write(IO_REG_SIMR, 0);
fast_iob();
for (i = base; i < base + IO_INR_DMA; i++)
irq_set_chip_and_handler(i, &ioasic_irq_type,
handle_level_irq);
for (; i < base + IO_IRQ_LINES; i++)
irq_set_chip_and_handler(i, &ioasic_dma_irq_type,
1 << (i - base) & IO_IRQ_DMA_INFO ?
handle_edge_irq : handle_fasteoi_irq);
ioasic_irq_base = base;
}
| linux-master | arch/mips/dec/ioasic-irq.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 1991, 1992, 1995 Linus Torvalds
* Copyright (C) 2000, 2003 Maciej W. Rozycki
*
* This file contains the time handling details for PC-style clocks as
* found in some MIPS systems.
*
*/
#include <linux/bcd.h>
#include <linux/init.h>
#include <linux/mc146818rtc.h>
#include <linux/param.h>
#include <asm/cpu-features.h>
#include <asm/ds1287.h>
#include <asm/time.h>
#include <asm/dec/interrupts.h>
#include <asm/dec/ioasic.h>
#include <asm/dec/machtype.h>
void read_persistent_clock64(struct timespec64 *ts)
{
unsigned int year, mon, day, hour, min, sec, real_year;
unsigned long flags;
spin_lock_irqsave(&rtc_lock, flags);
do {
sec = CMOS_READ(RTC_SECONDS);
min = CMOS_READ(RTC_MINUTES);
hour = CMOS_READ(RTC_HOURS);
day = CMOS_READ(RTC_DAY_OF_MONTH);
mon = CMOS_READ(RTC_MONTH);
year = CMOS_READ(RTC_YEAR);
/*
* The PROM will reset the year to either '72 or '73.
* Therefore we store the real year separately, in one
* of unused BBU RAM locations.
*/
real_year = CMOS_READ(RTC_DEC_YEAR);
} while (sec != CMOS_READ(RTC_SECONDS));
spin_unlock_irqrestore(&rtc_lock, flags);
if (!(CMOS_READ(RTC_CONTROL) & RTC_DM_BINARY) || RTC_ALWAYS_BCD) {
sec = bcd2bin(sec);
min = bcd2bin(min);
hour = bcd2bin(hour);
day = bcd2bin(day);
mon = bcd2bin(mon);
year = bcd2bin(year);
}
year += real_year - 72 + 2000;
ts->tv_sec = mktime64(year, mon, day, hour, min, sec);
ts->tv_nsec = 0;
}
/*
* In order to set the CMOS clock precisely, update_persistent_clock64 has to
* be called 500 ms after the second nowtime has started, because when
* nowtime is written into the registers of the CMOS clock, it will
* jump to the next second precisely 500 ms later. Check the Dallas
* DS1287 data sheet for details.
*/
int update_persistent_clock64(struct timespec64 now)
{
time64_t nowtime = now.tv_sec;
int retval = 0;
int real_seconds, real_minutes, cmos_minutes;
unsigned char save_control, save_freq_select;
/* irq are locally disabled here */
spin_lock(&rtc_lock);
/* tell the clock it's being set */
save_control = CMOS_READ(RTC_CONTROL);
CMOS_WRITE((save_control | RTC_SET), RTC_CONTROL);
/* stop and reset prescaler */
save_freq_select = CMOS_READ(RTC_FREQ_SELECT);
CMOS_WRITE((save_freq_select | RTC_DIV_RESET2), RTC_FREQ_SELECT);
cmos_minutes = CMOS_READ(RTC_MINUTES);
if (!(save_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD)
cmos_minutes = bcd2bin(cmos_minutes);
/*
* since we're only adjusting minutes and seconds,
* don't interfere with hour overflow. This avoids
* messing with unknown time zones but requires your
* RTC not to be off by more than 15 minutes
*/
real_minutes = div_s64_rem(nowtime, 60, &real_seconds);
if (((abs(real_minutes - cmos_minutes) + 15) / 30) & 1)
real_minutes += 30; /* correct for half hour time zone */
real_minutes %= 60;
if (abs(real_minutes - cmos_minutes) < 30) {
if (!(save_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD) {
real_seconds = bin2bcd(real_seconds);
real_minutes = bin2bcd(real_minutes);
}
CMOS_WRITE(real_seconds, RTC_SECONDS);
CMOS_WRITE(real_minutes, RTC_MINUTES);
} else {
printk_once(KERN_NOTICE
"set_rtc_mmss: can't update from %d to %d\n",
cmos_minutes, real_minutes);
retval = -1;
}
/* The following flags have to be released exactly in this order,
* otherwise the DS1287 will not reset the oscillator and will not
* update precisely 500 ms later. You won't find this mentioned
* in the Dallas Semiconductor data sheets, but who believes data
* sheets anyway ... -- Markus Kuhn
*/
CMOS_WRITE(save_control, RTC_CONTROL);
CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT);
spin_unlock(&rtc_lock);
return retval;
}
void __init plat_time_init(void)
{
int ioasic_clock = 0;
u32 start, end;
int i = HZ / 8;
/* Set up the rate of periodic DS1287 interrupts. */
ds1287_set_base_clock(HZ);
/* On some I/O ASIC systems we have the I/O ASIC's counter. */
if (IOASIC)
ioasic_clock = dec_ioasic_clocksource_init() == 0;
if (cpu_has_counter) {
ds1287_timer_state();
while (!ds1287_timer_state())
;
start = read_c0_count();
while (i--)
while (!ds1287_timer_state())
;
end = read_c0_count();
mips_hpt_frequency = (end - start) * 8;
printk(KERN_INFO "MIPS counter frequency %dHz\n",
mips_hpt_frequency);
/*
* All R4k DECstations suffer from the CP0 Count erratum,
* so we can't use the timer as a clock source, and a clock
* event both at a time. An accurate wall clock is more
* important than a high-precision interval timer so only
* use the timer as a clock source, and not a clock event
* if there's no I/O ASIC counter available to serve as a
* clock source.
*/
if (!ioasic_clock) {
init_r4k_clocksource();
mips_hpt_frequency = 0;
}
}
ds1287_clockevent_init(dec_interrupt[DEC_IRQ_RTC]);
}
| linux-master | arch/mips/dec/time.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Bus error event handling code for systems equipped with ECC
* handling logic, i.e. DECstation/DECsystem 5000/200 (KN02),
* 5000/240 (KN03), 5000/260 (KN05) and DECsystem 5900 (KN03),
* 5900/260 (KN05) systems.
*
* Copyright (c) 2003, 2005 Maciej W. Rozycki
*/
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/types.h>
#include <asm/addrspace.h>
#include <asm/bootinfo.h>
#include <asm/cpu.h>
#include <asm/cpu-type.h>
#include <asm/irq_regs.h>
#include <asm/processor.h>
#include <asm/ptrace.h>
#include <asm/traps.h>
#include <asm/dec/ecc.h>
#include <asm/dec/kn02.h>
#include <asm/dec/kn03.h>
#include <asm/dec/kn05.h>
static volatile u32 *kn0x_erraddr;
static volatile u32 *kn0x_chksyn;
static inline void dec_ecc_be_ack(void)
{
*kn0x_erraddr = 0; /* any write clears the IRQ */
iob();
}
static int dec_ecc_be_backend(struct pt_regs *regs, int is_fixup, int invoker)
{
static const char excstr[] = "exception";
static const char intstr[] = "interrupt";
static const char cpustr[] = "CPU";
static const char dmastr[] = "DMA";
static const char readstr[] = "read";
static const char mreadstr[] = "memory read";
static const char writestr[] = "write";
static const char mwritstr[] = "partial memory write";
static const char timestr[] = "timeout";
static const char overstr[] = "overrun";
static const char eccstr[] = "ECC error";
const char *kind, *agent, *cycle, *event;
const char *status = "", *xbit = "", *fmt = "";
unsigned long address;
u16 syn = 0, sngl;
int i = 0;
u32 erraddr = *kn0x_erraddr;
u32 chksyn = *kn0x_chksyn;
int action = MIPS_BE_FATAL;
/* For non-ECC ack ASAP, so that any subsequent errors get caught. */
if ((erraddr & (KN0X_EAR_VALID | KN0X_EAR_ECCERR)) == KN0X_EAR_VALID)
dec_ecc_be_ack();
kind = invoker ? intstr : excstr;
if (!(erraddr & KN0X_EAR_VALID)) {
/* No idea what happened. */
printk(KERN_ALERT "Unidentified bus error %s\n", kind);
return action;
}
agent = (erraddr & KN0X_EAR_CPU) ? cpustr : dmastr;
if (erraddr & KN0X_EAR_ECCERR) {
/* An ECC error on a CPU or DMA transaction. */
cycle = (erraddr & KN0X_EAR_WRITE) ? mwritstr : mreadstr;
event = eccstr;
} else {
/* A CPU timeout or a DMA overrun. */
cycle = (erraddr & KN0X_EAR_WRITE) ? writestr : readstr;
event = (erraddr & KN0X_EAR_CPU) ? timestr : overstr;
}
address = erraddr & KN0X_EAR_ADDRESS;
/* For ECC errors on reads adjust for MT pipelining. */
if ((erraddr & (KN0X_EAR_WRITE | KN0X_EAR_ECCERR)) == KN0X_EAR_ECCERR)
address = (address & ~0xfffLL) | ((address - 5) & 0xfffLL);
address <<= 2;
/* Only CPU errors are fixable. */
if (erraddr & KN0X_EAR_CPU && is_fixup)
action = MIPS_BE_FIXUP;
if (erraddr & KN0X_EAR_ECCERR) {
static const u8 data_sbit[32] = {
0x4f, 0x4a, 0x52, 0x54, 0x57, 0x58, 0x5b, 0x5d,
0x23, 0x25, 0x26, 0x29, 0x2a, 0x2c, 0x31, 0x34,
0x0e, 0x0b, 0x13, 0x15, 0x16, 0x19, 0x1a, 0x1c,
0x62, 0x64, 0x67, 0x68, 0x6b, 0x6d, 0x70, 0x75,
};
static const u8 data_mbit[25] = {
0x07, 0x0d, 0x1f,
0x2f, 0x32, 0x37, 0x38, 0x3b, 0x3d, 0x3e,
0x43, 0x45, 0x46, 0x49, 0x4c, 0x51, 0x5e,
0x61, 0x6e, 0x73, 0x76, 0x79, 0x7a, 0x7c, 0x7f,
};
static const char sbestr[] = "corrected single";
static const char dbestr[] = "uncorrectable double";
static const char mbestr[] = "uncorrectable multiple";
if (!(address & 0x4))
syn = chksyn; /* Low bank. */
else
syn = chksyn >> 16; /* High bank. */
if (!(syn & KN0X_ESR_VLDLO)) {
/* Ack now, no rewrite will happen. */
dec_ecc_be_ack();
fmt = KERN_ALERT "%s" "invalid\n";
} else {
sngl = syn & KN0X_ESR_SNGLO;
syn &= KN0X_ESR_SYNLO;
/*
* Multibit errors may be tagged incorrectly;
* check the syndrome explicitly.
*/
for (i = 0; i < 25; i++)
if (syn == data_mbit[i])
break;
if (i < 25) {
status = mbestr;
} else if (!sngl) {
status = dbestr;
} else {
volatile u32 *ptr =
(void *)CKSEG1ADDR(address);
*ptr = *ptr; /* Rewrite. */
iob();
status = sbestr;
action = MIPS_BE_DISCARD;
}
/* Ack now, now we've rewritten (or not). */
dec_ecc_be_ack();
if (syn && syn == (syn & -syn)) {
if (syn == 0x01) {
fmt = KERN_ALERT "%s"
"%#04x -- %s bit error "
"at check bit C%s\n";
xbit = "X";
} else {
fmt = KERN_ALERT "%s"
"%#04x -- %s bit error "
"at check bit C%s%u\n";
}
i = syn >> 2;
} else {
for (i = 0; i < 32; i++)
if (syn == data_sbit[i])
break;
if (i < 32)
fmt = KERN_ALERT "%s"
"%#04x -- %s bit error "
"at data bit D%s%u\n";
else
fmt = KERN_ALERT "%s"
"%#04x -- %s bit error\n";
}
}
}
if (action != MIPS_BE_FIXUP)
printk(KERN_ALERT "Bus error %s: %s %s %s at %#010lx\n",
kind, agent, cycle, event, address);
if (action != MIPS_BE_FIXUP && erraddr & KN0X_EAR_ECCERR)
printk(fmt, " ECC syndrome ", syn, status, xbit, i);
return action;
}
int dec_ecc_be_handler(struct pt_regs *regs, int is_fixup)
{
return dec_ecc_be_backend(regs, is_fixup, 0);
}
irqreturn_t dec_ecc_be_interrupt(int irq, void *dev_id)
{
struct pt_regs *regs = get_irq_regs();
int action = dec_ecc_be_backend(regs, 0, 1);
if (action == MIPS_BE_DISCARD)
return IRQ_HANDLED;
/*
* FIXME: Find the affected processes and kill them, otherwise
* we must die.
*
* The interrupt is asynchronously delivered thus EPC and RA
* may be irrelevant, but are printed for a reference.
*/
printk(KERN_ALERT "Fatal bus interrupt, epc == %08lx, ra == %08lx\n",
regs->cp0_epc, regs->regs[31]);
die("Unrecoverable bus error", regs);
}
/*
* Initialization differs a bit between KN02 and KN03/KN05, so we
* need two variants. Once set up, all systems can be handled the
* same way.
*/
static inline void dec_kn02_be_init(void)
{
volatile u32 *csr = (void *)CKSEG1ADDR(KN02_SLOT_BASE + KN02_CSR);
kn0x_erraddr = (void *)CKSEG1ADDR(KN02_SLOT_BASE + KN02_ERRADDR);
kn0x_chksyn = (void *)CKSEG1ADDR(KN02_SLOT_BASE + KN02_CHKSYN);
/* Preset write-only bits of the Control Register cache. */
cached_kn02_csr = *csr | KN02_CSR_LEDS;
/* Set normal ECC detection and generation. */
cached_kn02_csr &= ~(KN02_CSR_DIAGCHK | KN02_CSR_DIAGGEN);
/* Enable ECC correction. */
cached_kn02_csr |= KN02_CSR_CORRECT;
*csr = cached_kn02_csr;
iob();
}
static inline void dec_kn03_be_init(void)
{
volatile u32 *mcr = (void *)CKSEG1ADDR(KN03_SLOT_BASE + IOASIC_MCR);
volatile u32 *mbcs = (void *)CKSEG1ADDR(KN4K_SLOT_BASE + KN4K_MB_CSR);
kn0x_erraddr = (void *)CKSEG1ADDR(KN03_SLOT_BASE + IOASIC_ERRADDR);
kn0x_chksyn = (void *)CKSEG1ADDR(KN03_SLOT_BASE + IOASIC_CHKSYN);
/*
* Set normal ECC detection and generation, enable ECC correction.
* For KN05 we also need to make sure EE (?) is enabled in the MB.
* Otherwise DBE/IBE exceptions would be masked but bus error
* interrupts would still arrive, resulting in an inevitable crash
* if get_dbe() triggers one.
*/
*mcr = (*mcr & ~(KN03_MCR_DIAGCHK | KN03_MCR_DIAGGEN)) |
KN03_MCR_CORRECT;
if (current_cpu_type() == CPU_R4400SC)
*mbcs |= KN4K_MB_CSR_EE;
fast_iob();
}
void __init dec_ecc_be_init(void)
{
if (mips_machtype == MACH_DS5000_200)
dec_kn02_be_init();
else
dec_kn03_be_init();
/* Clear any leftover errors from the firmware. */
dec_ecc_be_ack();
}
| linux-master | arch/mips/dec/ecc-berr.c |
/*
* Setup the right wbflush routine for the different DECstations.
*
* Created with information from:
* DECstation 3100 Desktop Workstation Functional Specification
* DECstation 5000/200 KN02 System Module Functional Specification
* mipsel-linux-objdump --disassemble vmunix | grep "wbflush" :-)
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1998 Harald Koerfgen
* Copyright (C) 2002 Maciej W. Rozycki
*/
#include <linux/export.h>
#include <linux/init.h>
#include <asm/bootinfo.h>
#include <asm/wbflush.h>
#include <asm/barrier.h>
static void wbflush_kn01(void);
static void wbflush_kn210(void);
static void wbflush_mips(void);
void (*__wbflush) (void);
void __init wbflush_setup(void)
{
switch (mips_machtype) {
case MACH_DS23100:
case MACH_DS5000_200: /* DS5000 3max */
__wbflush = wbflush_kn01;
break;
case MACH_DS5100: /* DS5100 MIPSMATE */
__wbflush = wbflush_kn210;
break;
case MACH_DS5000_1XX: /* DS5000/100 3min */
case MACH_DS5000_XX: /* Personal DS5000/2x */
case MACH_DS5000_2X0: /* DS5000/240 3max+ */
case MACH_DS5900: /* DS5900 bigmax */
default:
__wbflush = wbflush_mips;
break;
}
}
/*
* For the DS3100 and DS5000/200 the R2020/R3220 writeback buffer functions
* as part of Coprocessor 0.
*/
static void wbflush_kn01(void)
{
asm(".set\tpush\n\t"
".set\tnoreorder\n\t"
"1:\tbc0f\t1b\n\t"
"nop\n\t"
".set\tpop");
}
/*
* For the DS5100 the writeback buffer seems to be a part of Coprocessor 3.
* But CP3 has to enabled first.
*/
static void wbflush_kn210(void)
{
asm(".set\tpush\n\t"
".set\tnoreorder\n\t"
"mfc0\t$2,$12\n\t"
"lui\t$3,0x8000\n\t"
"or\t$3,$2,$3\n\t"
"mtc0\t$3,$12\n\t"
"nop\n"
"1:\tbc3f\t1b\n\t"
"nop\n\t"
"mtc0\t$2,$12\n\t"
"nop\n\t"
".set\tpop"
: : : "$2", "$3");
}
/*
* I/O ASIC systems use a standard writeback buffer that gets flushed
* upon an uncached read.
*/
static void wbflush_mips(void)
{
__fast_iob();
}
EXPORT_SYMBOL(__wbflush);
| linux-master | arch/mips/dec/wbflush.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* DECstation 5000/200 (KN02) Control and Status Register
* interrupts.
*
* Copyright (c) 2002, 2003, 2005 Maciej W. Rozycki
*/
#include <linux/init.h>
#include <linux/irq.h>
#include <linux/types.h>
#include <asm/dec/kn02.h>
/*
* Bits 7:0 of the Control Register are write-only -- the
* corresponding bits of the Status Register have a different
* meaning. Hence we use a cache. It speeds up things a bit
* as well.
*
* There is no default value -- it has to be initialized.
*/
u32 cached_kn02_csr;
static int kn02_irq_base;
static void unmask_kn02_irq(struct irq_data *d)
{
volatile u32 *csr = (volatile u32 *)CKSEG1ADDR(KN02_SLOT_BASE +
KN02_CSR);
cached_kn02_csr |= (1 << (d->irq - kn02_irq_base + 16));
*csr = cached_kn02_csr;
}
static void mask_kn02_irq(struct irq_data *d)
{
volatile u32 *csr = (volatile u32 *)CKSEG1ADDR(KN02_SLOT_BASE +
KN02_CSR);
cached_kn02_csr &= ~(1 << (d->irq - kn02_irq_base + 16));
*csr = cached_kn02_csr;
}
static void ack_kn02_irq(struct irq_data *d)
{
mask_kn02_irq(d);
iob();
}
static struct irq_chip kn02_irq_type = {
.name = "KN02-CSR",
.irq_ack = ack_kn02_irq,
.irq_mask = mask_kn02_irq,
.irq_mask_ack = ack_kn02_irq,
.irq_unmask = unmask_kn02_irq,
};
void __init init_kn02_irqs(int base)
{
volatile u32 *csr = (volatile u32 *)CKSEG1ADDR(KN02_SLOT_BASE +
KN02_CSR);
int i;
/* Mask interrupts. */
cached_kn02_csr &= ~KN02_CSR_IOINTEN;
*csr = cached_kn02_csr;
iob();
for (i = base; i < base + KN02_IRQ_LINES; i++)
irq_set_chip_and_handler(i, &kn02_irq_type, handle_level_irq);
kn02_irq_base = base;
}
| linux-master | arch/mips/dec/kn02-irq.c |
// SPDX-License-Identifier: GPL-2.0
/*
* memory.c: memory initialisation code.
*
* Copyright (C) 1998 Harald Koerfgen, Frieder Streffer and Paul M. Antoine
* Copyright (C) 2000, 2002 Maciej W. Rozycki
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/memblock.h>
#include <linux/types.h>
#include <asm/addrspace.h>
#include <asm/dec/machtype.h>
#include <asm/dec/prom.h>
#include <asm/page.h>
#include <asm/sections.h>
volatile unsigned long mem_err; /* So we know an error occurred */
/*
* Probe memory in 4MB chunks, waiting for an error to tell us we've fallen
* off the end of real memory. Only suitable for the 2100/3100's (PMAX).
*/
#define CHUNK_SIZE 0x400000
static __init void pmax_setup_memory_region(void)
{
volatile unsigned char *memory_page, dummy;
char old_handler[0x80];
extern char genexcept_early;
/* Install exception handler */
memcpy(&old_handler, (void *)(CKSEG0 + 0x80), 0x80);
memcpy((void *)(CKSEG0 + 0x80), &genexcept_early, 0x80);
/* read unmapped and uncached (KSEG1)
* DECstations have at least 4MB RAM
* Assume less than 480MB of RAM, as this is max for 5000/2xx
* FIXME this should be replaced by the first free page!
*/
for (memory_page = (unsigned char *)CKSEG1 + CHUNK_SIZE;
mem_err == 0 && memory_page < (unsigned char *)CKSEG1 + 0x1e00000;
memory_page += CHUNK_SIZE) {
dummy = *memory_page;
}
memcpy((void *)(CKSEG0 + 0x80), &old_handler, 0x80);
memblock_add(0, (unsigned long)memory_page - CKSEG1 - CHUNK_SIZE);
}
/*
* Use the REX prom calls to get hold of the memory bitmap, and thence
* determine memory size.
*/
static __init void rex_setup_memory_region(void)
{
int i, bitmap_size;
unsigned long mem_start = 0, mem_size = 0;
memmap *bm;
/* some free 64k */
bm = (memmap *)CKSEG0ADDR(0x28000);
bitmap_size = rex_getbitmap(bm);
for (i = 0; i < bitmap_size; i++) {
/* FIXME: very simplistically only add full sets of pages */
if (bm->bitmap[i] == 0xff)
mem_size += (8 * bm->pagesize);
else if (!mem_size)
mem_start += (8 * bm->pagesize);
else {
memblock_add(mem_start, mem_size);
mem_start += mem_size + (8 * bm->pagesize);
mem_size = 0;
}
}
if (mem_size)
memblock_add(mem_start, mem_size);
}
void __init prom_meminit(u32 magic)
{
if (!prom_is_rex(magic))
pmax_setup_memory_region();
else
rex_setup_memory_region();
}
void __init prom_free_prom_memory(void)
{
unsigned long end;
/*
* Free everything below the kernel itself but leave
* the first page reserved for the exception handlers.
*/
#if IS_ENABLED(CONFIG_DECLANCE)
/*
* Leave 128 KB reserved for Lance memory for
* IOASIC DECstations.
*
* XXX: save this address for use in dec_lance.c?
*/
if (IOASIC)
end = __pa(&_text) - 0x00020000;
else
#endif
end = __pa(&_text);
free_init_pages("unused PROM memory", PAGE_SIZE, end);
}
| linux-master | arch/mips/dec/prom/memory.c |
// SPDX-License-Identifier: GPL-2.0
/*
* identify.c: machine identification code.
*
* Copyright (C) 1998 Harald Koerfgen and Paul M. Antoine
* Copyright (C) 2002, 2003, 2004, 2005 Maciej W. Rozycki
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/mc146818rtc.h>
#include <linux/export.h>
#include <linux/string.h>
#include <linux/types.h>
#include <asm/bootinfo.h>
#include <asm/dec/ioasic.h>
#include <asm/dec/ioasic_addrs.h>
#include <asm/dec/kn01.h>
#include <asm/dec/kn02.h>
#include <asm/dec/kn02ba.h>
#include <asm/dec/kn02ca.h>
#include <asm/dec/kn03.h>
#include <asm/dec/kn230.h>
#include <asm/dec/prom.h>
#include <asm/dec/system.h>
#include "dectypes.h"
static const char *dec_system_strings[] = {
[MACH_DSUNKNOWN] "unknown DECstation",
[MACH_DS23100] "DECstation 2100/3100",
[MACH_DS5100] "DECsystem 5100",
[MACH_DS5000_200] "DECstation 5000/200",
[MACH_DS5000_1XX] "DECstation 5000/1xx",
[MACH_DS5000_XX] "Personal DECstation 5000/xx",
[MACH_DS5000_2X0] "DECstation 5000/2x0",
[MACH_DS5400] "DECsystem 5400",
[MACH_DS5500] "DECsystem 5500",
[MACH_DS5800] "DECsystem 5800",
[MACH_DS5900] "DECsystem 5900",
};
const char *get_system_type(void)
{
#define STR_BUF_LEN 64
static char system[STR_BUF_LEN];
static int called = 0;
if (called == 0) {
called = 1;
snprintf(system, STR_BUF_LEN, "Digital %s",
dec_system_strings[mips_machtype]);
}
return system;
}
/*
* Setup essential system-specific memory addresses. We need them
* early. Semantically the functions belong to prom/init.c, but they
* are compact enough we want them inlined. --macro
*/
volatile u8 *dec_rtc_base;
EXPORT_SYMBOL(dec_rtc_base);
static inline void prom_init_kn01(void)
{
dec_kn_slot_base = KN01_SLOT_BASE;
dec_kn_slot_size = KN01_SLOT_SIZE;
dec_rtc_base = (void *)CKSEG1ADDR(dec_kn_slot_base + KN01_RTC);
}
static inline void prom_init_kn230(void)
{
dec_kn_slot_base = KN01_SLOT_BASE;
dec_kn_slot_size = KN01_SLOT_SIZE;
dec_rtc_base = (void *)CKSEG1ADDR(dec_kn_slot_base + KN01_RTC);
}
static inline void prom_init_kn02(void)
{
dec_kn_slot_base = KN02_SLOT_BASE;
dec_kn_slot_size = KN02_SLOT_SIZE;
dec_tc_bus = 1;
dec_rtc_base = (void *)CKSEG1ADDR(dec_kn_slot_base + KN02_RTC);
}
static inline void prom_init_kn02xa(void)
{
dec_kn_slot_base = KN02XA_SLOT_BASE;
dec_kn_slot_size = IOASIC_SLOT_SIZE;
dec_tc_bus = 1;
ioasic_base = (void *)CKSEG1ADDR(dec_kn_slot_base + IOASIC_IOCTL);
dec_rtc_base = (void *)CKSEG1ADDR(dec_kn_slot_base + IOASIC_TOY);
}
static inline void prom_init_kn03(void)
{
dec_kn_slot_base = KN03_SLOT_BASE;
dec_kn_slot_size = IOASIC_SLOT_SIZE;
dec_tc_bus = 1;
ioasic_base = (void *)CKSEG1ADDR(dec_kn_slot_base + IOASIC_IOCTL);
dec_rtc_base = (void *)CKSEG1ADDR(dec_kn_slot_base + IOASIC_TOY);
}
void __init prom_identify_arch(u32 magic)
{
unsigned char dec_cpunum, dec_firmrev, dec_etc, dec_systype;
u32 dec_sysid;
if (!prom_is_rex(magic)) {
dec_sysid = simple_strtoul(prom_getenv("systype"),
(char **)0, 0);
} else {
dec_sysid = rex_getsysid();
if (dec_sysid == 0) {
printk("Zero sysid returned from PROM! "
"Assuming a PMAX-like machine.\n");
dec_sysid = 1;
}
}
dec_cpunum = (dec_sysid & 0xff000000) >> 24;
dec_systype = (dec_sysid & 0xff0000) >> 16;
dec_firmrev = (dec_sysid & 0xff00) >> 8;
dec_etc = dec_sysid & 0xff;
/*
* FIXME: This may not be an exhaustive list of DECStations/Servers!
* Put all model-specific initialisation calls here.
*/
switch (dec_systype) {
case DS2100_3100:
mips_machtype = MACH_DS23100;
prom_init_kn01();
break;
case DS5100: /* DS5100 MIPSMATE */
mips_machtype = MACH_DS5100;
prom_init_kn230();
break;
case DS5000_200: /* DS5000 3max */
mips_machtype = MACH_DS5000_200;
prom_init_kn02();
break;
case DS5000_1XX: /* DS5000/100 3min */
mips_machtype = MACH_DS5000_1XX;
prom_init_kn02xa();
break;
case DS5000_2X0: /* DS5000/240 3max+ or DS5900 bigmax */
mips_machtype = MACH_DS5000_2X0;
prom_init_kn03();
if (!(ioasic_read(IO_REG_SIR) & KN03_IO_INR_3MAXP))
mips_machtype = MACH_DS5900;
break;
case DS5000_XX: /* Personal DS5000/xx maxine */
mips_machtype = MACH_DS5000_XX;
prom_init_kn02xa();
break;
case DS5800: /* DS5800 Isis */
mips_machtype = MACH_DS5800;
break;
case DS5400: /* DS5400 MIPSfair */
mips_machtype = MACH_DS5400;
break;
case DS5500: /* DS5500 MIPSfair-2 */
mips_machtype = MACH_DS5500;
break;
default:
mips_machtype = MACH_DSUNKNOWN;
break;
}
if (mips_machtype == MACH_DSUNKNOWN)
printk("This is an %s, id is %x\n",
dec_system_strings[mips_machtype], dec_systype);
else
printk("This is a %s\n", dec_system_strings[mips_machtype]);
}
| linux-master | arch/mips/dec/prom/identify.c |
// SPDX-License-Identifier: GPL-2.0
/*
* init.c: PROM library initialisation code.
*
* Copyright (C) 1998 Harald Koerfgen
* Copyright (C) 2002, 2004 Maciej W. Rozycki
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/linkage.h>
#include <linux/smp.h>
#include <linux/string.h>
#include <linux/types.h>
#include <asm/bootinfo.h>
#include <asm/cpu.h>
#include <asm/cpu-type.h>
#include <asm/processor.h>
#include <asm/dec/prom.h>
int (*__rex_bootinit)(void);
int (*__rex_bootread)(void);
int (*__rex_getbitmap)(memmap *);
unsigned long *(*__rex_slot_address)(int);
void *(*__rex_gettcinfo)(void);
int (*__rex_getsysid)(void);
void (*__rex_clear_cache)(void);
int (*__prom_getchar)(void);
char *(*__prom_getenv)(char *);
int (*__prom_printf)(char *, ...);
int (*__pmax_open)(char*, int);
int (*__pmax_lseek)(int, long, int);
int (*__pmax_read)(int, void *, int);
int (*__pmax_close)(int);
/*
* Detect which PROM the DECSTATION has, and set the callback vectors
* appropriately.
*/
void __init which_prom(s32 magic, s32 *prom_vec)
{
/*
* No sign of the REX PROM's magic number means we assume a non-REX
* machine (i.e. we're on a DS2100/3100, DS5100 or DS5000/2xx)
*/
if (prom_is_rex(magic)) {
/*
* Set up prom abstraction structure with REX entry points.
*/
__rex_bootinit =
(void *)(long)*(prom_vec + REX_PROM_BOOTINIT);
__rex_bootread =
(void *)(long)*(prom_vec + REX_PROM_BOOTREAD);
__rex_getbitmap =
(void *)(long)*(prom_vec + REX_PROM_GETBITMAP);
__prom_getchar =
(void *)(long)*(prom_vec + REX_PROM_GETCHAR);
__prom_getenv =
(void *)(long)*(prom_vec + REX_PROM_GETENV);
__rex_getsysid =
(void *)(long)*(prom_vec + REX_PROM_GETSYSID);
__rex_gettcinfo =
(void *)(long)*(prom_vec + REX_PROM_GETTCINFO);
__prom_printf =
(void *)(long)*(prom_vec + REX_PROM_PRINTF);
__rex_slot_address =
(void *)(long)*(prom_vec + REX_PROM_SLOTADDR);
__rex_clear_cache =
(void *)(long)*(prom_vec + REX_PROM_CLEARCACHE);
} else {
/*
* Set up prom abstraction structure with non-REX entry points.
*/
__prom_getchar = (void *)PMAX_PROM_GETCHAR;
__prom_getenv = (void *)PMAX_PROM_GETENV;
__prom_printf = (void *)PMAX_PROM_PRINTF;
__pmax_open = (void *)PMAX_PROM_OPEN;
__pmax_lseek = (void *)PMAX_PROM_LSEEK;
__pmax_read = (void *)PMAX_PROM_READ;
__pmax_close = (void *)PMAX_PROM_CLOSE;
}
}
void __init prom_init(void)
{
extern void dec_machine_halt(void);
static const char cpu_msg[] __initconst =
"Sorry, this kernel is compiled for a wrong CPU type!\n";
s32 argc = fw_arg0;
s32 *argv = (void *)fw_arg1;
u32 magic = fw_arg2;
s32 *prom_vec = (void *)fw_arg3;
/*
* Determine which PROM we have
* (and therefore which machine we're on!)
*/
which_prom(magic, prom_vec);
if (prom_is_rex(magic))
rex_clear_cache();
/* Register the early console. */
register_prom_console();
/* Were we compiled with the right CPU option? */
#if defined(CONFIG_CPU_R3000)
if ((current_cpu_type() == CPU_R4000SC) ||
(current_cpu_type() == CPU_R4400SC)) {
static const char r4k_msg[] __initconst =
"Please recompile with \"CONFIG_CPU_R4X00 = y\".\n";
printk(cpu_msg);
printk(r4k_msg);
dec_machine_halt();
}
#endif
#if defined(CONFIG_CPU_R4X00)
if ((current_cpu_type() == CPU_R3000) ||
(current_cpu_type() == CPU_R3000A)) {
static const char r3k_msg[] __initconst =
"Please recompile with \"CONFIG_CPU_R3000 = y\".\n";
printk(cpu_msg);
printk(r3k_msg);
dec_machine_halt();
}
#endif
prom_meminit(magic);
prom_identify_arch(magic);
prom_init_cmdline(argc, argv, magic);
}
| linux-master | arch/mips/dec/prom/init.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* DECstation PROM-based early console support.
*
* Copyright (C) 2004, 2007 Maciej W. Rozycki
*/
#include <linux/console.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <asm/dec/prom.h>
static void __init prom_console_write(struct console *con, const char *s,
unsigned int c)
{
char buf[81];
unsigned int chunk = sizeof(buf) - 1;
while (c > 0) {
if (chunk > c)
chunk = c;
memcpy(buf, s, chunk);
buf[chunk] = '\0';
prom_printf("%s", buf);
s += chunk;
c -= chunk;
}
}
static struct console promcons __initdata = {
.name = "prom",
.write = prom_console_write,
.flags = CON_BOOT | CON_PRINTBUFFER,
.index = -1,
};
void __init register_prom_console(void)
{
register_console(&promcons);
}
| linux-master | arch/mips/dec/prom/console.c |
// SPDX-License-Identifier: GPL-2.0
/*
* cmdline.c: read the command line passed to us by the PROM.
*
* Copyright (C) 1998 Harald Koerfgen
* Copyright (C) 2002, 2004 Maciej W. Rozycki
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/types.h>
#include <asm/bootinfo.h>
#include <asm/dec/prom.h>
#undef PROM_DEBUG
void __init prom_init_cmdline(s32 argc, s32 *argv, u32 magic)
{
char *arg;
int start_arg, i;
/*
* collect args and prepare cmd_line
*/
if (!prom_is_rex(magic))
start_arg = 1;
else
start_arg = 2;
for (i = start_arg; i < argc; i++) {
arg = (void *)(long)(argv[i]);
strcat(arcs_cmdline, arg);
if (i < (argc - 1))
strcat(arcs_cmdline, " ");
}
#ifdef PROM_DEBUG
printk("arcs_cmdline: %s\n", &(arcs_cmdline[0]));
#endif
}
| linux-master | arch/mips/dec/prom/cmdline.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
*
* Parts of this file are based on Ralink's 2.6.21 BSP
*
* Copyright (C) 2008-2011 Gabor Juhos <[email protected]>
* Copyright (C) 2008 Imre Kaloz <[email protected]>
* Copyright (C) 2013 John Crispin <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/bug.h>
#include <linux/slab.h>
#include <linux/sys_soc.h>
#include <asm/io.h>
#include <asm/mipsregs.h>
#include <asm/mach-ralink/ralink_regs.h>
#include <asm/mach-ralink/rt305x.h>
#include "common.h"
static struct ralink_soc_info *soc_info_ptr;
static unsigned long rt5350_get_mem_size(void)
{
unsigned long ret;
u32 t;
t = __raw_readl(RT305X_SYSC_BASE + SYSC_REG_SYSTEM_CONFIG);
t = (t >> RT5350_SYSCFG0_DRAM_SIZE_SHIFT) &
RT5350_SYSCFG0_DRAM_SIZE_MASK;
switch (t) {
case RT5350_SYSCFG0_DRAM_SIZE_2M:
ret = 2;
break;
case RT5350_SYSCFG0_DRAM_SIZE_8M:
ret = 8;
break;
case RT5350_SYSCFG0_DRAM_SIZE_16M:
ret = 16;
break;
case RT5350_SYSCFG0_DRAM_SIZE_32M:
ret = 32;
break;
case RT5350_SYSCFG0_DRAM_SIZE_64M:
ret = 64;
break;
default:
panic("rt5350: invalid DRAM size: %u", t);
break;
}
return ret;
}
static unsigned int __init rt305x_get_soc_name0(void)
{
return __raw_readl(RT305X_SYSC_BASE + SYSC_REG_CHIP_NAME0);
}
static unsigned int __init rt305x_get_soc_name1(void)
{
return __raw_readl(RT305X_SYSC_BASE + SYSC_REG_CHIP_NAME1);
}
static bool __init rt3052_soc_valid(void)
{
if (rt305x_get_soc_name0() == RT3052_CHIP_NAME0 &&
rt305x_get_soc_name1() == RT3052_CHIP_NAME1)
return true;
else
return false;
}
static bool __init rt3350_soc_valid(void)
{
if (rt305x_get_soc_name0() == RT3350_CHIP_NAME0 &&
rt305x_get_soc_name1() == RT3350_CHIP_NAME1)
return true;
else
return false;
}
static bool __init rt3352_soc_valid(void)
{
if (rt305x_get_soc_name0() == RT3352_CHIP_NAME0 &&
rt305x_get_soc_name1() == RT3352_CHIP_NAME1)
return true;
else
return false;
}
static bool __init rt5350_soc_valid(void)
{
if (rt305x_get_soc_name0() == RT5350_CHIP_NAME0 &&
rt305x_get_soc_name1() == RT5350_CHIP_NAME1)
return true;
else
return false;
}
static const char __init *rt305x_get_soc_name(struct ralink_soc_info *soc_info)
{
if (rt3052_soc_valid()) {
unsigned long icache_sets;
icache_sets = (read_c0_config1() >> 22) & 7;
if (icache_sets == 1) {
ralink_soc = RT305X_SOC_RT3050;
soc_info->compatible = "ralink,rt3050-soc";
return "RT3050";
} else {
ralink_soc = RT305X_SOC_RT3052;
soc_info->compatible = "ralink,rt3052-soc";
return "RT3052";
}
} else if (rt3350_soc_valid()) {
ralink_soc = RT305X_SOC_RT3350;
soc_info->compatible = "ralink,rt3350-soc";
return "RT3350";
} else if (rt3352_soc_valid()) {
ralink_soc = RT305X_SOC_RT3352;
soc_info->compatible = "ralink,rt3352-soc";
return "RT3352";
} else if (rt5350_soc_valid()) {
ralink_soc = RT305X_SOC_RT5350;
soc_info->compatible = "ralink,rt5350-soc";
return "RT5350";
} else {
panic("rt305x: unknown SoC, n0:%08x n1:%08x",
rt305x_get_soc_name0(), rt305x_get_soc_name1());
}
}
static unsigned int __init rt305x_get_soc_id(void)
{
return __raw_readl(RT305X_SYSC_BASE + SYSC_REG_CHIP_ID);
}
static unsigned int __init rt305x_get_soc_ver(void)
{
return (rt305x_get_soc_id() >> CHIP_ID_ID_SHIFT) & CHIP_ID_ID_MASK;
}
static unsigned int __init rt305x_get_soc_rev(void)
{
return (rt305x_get_soc_id() & CHIP_ID_REV_MASK);
}
static const char __init *rt305x_get_soc_id_name(void)
{
if (soc_is_rt3050())
return "rt3050";
else if (soc_is_rt3052())
return "rt3052";
else if (soc_is_rt3350())
return "rt3350";
else if (soc_is_rt3352())
return "rt3352";
else if (soc_is_rt5350())
return "rt5350";
else
return "invalid";
}
static int __init rt305x_soc_dev_init(void)
{
struct soc_device *soc_dev;
struct soc_device_attribute *soc_dev_attr;
soc_dev_attr = kzalloc(sizeof(*soc_dev_attr), GFP_KERNEL);
if (!soc_dev_attr)
return -ENOMEM;
soc_dev_attr->family = "Ralink";
soc_dev_attr->soc_id = rt305x_get_soc_id_name();
soc_dev_attr->data = soc_info_ptr;
soc_dev = soc_device_register(soc_dev_attr);
if (IS_ERR(soc_dev)) {
kfree(soc_dev_attr);
return PTR_ERR(soc_dev);
}
return 0;
}
device_initcall(rt305x_soc_dev_init);
void __init prom_soc_init(struct ralink_soc_info *soc_info)
{
const char *name = rt305x_get_soc_name(soc_info);
snprintf(soc_info->sys_type, RAMIPS_SYS_TYPE_LEN,
"Ralink %s id:%u rev:%u",
name,
rt305x_get_soc_ver(),
rt305x_get_soc_rev());
soc_info->mem_base = RT305X_SDRAM_BASE;
if (soc_is_rt5350()) {
soc_info->mem_size = rt5350_get_mem_size();
} else if (soc_is_rt305x() || soc_is_rt3350()) {
soc_info->mem_size_min = RT305X_MEM_SIZE_MIN;
soc_info->mem_size_max = RT305X_MEM_SIZE_MAX;
} else if (soc_is_rt3352()) {
soc_info->mem_size_min = RT3352_MEM_SIZE_MIN;
soc_info->mem_size_max = RT3352_MEM_SIZE_MAX;
}
soc_info_ptr = soc_info;
}
| linux-master | arch/mips/ralink/rt305x.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
*
* Copyright (C) 2011-2012 Gabor Juhos <[email protected]>
*/
#include <linux/io.h>
#include <linux/serial_reg.h>
#include <asm/addrspace.h>
#include <asm/setup.h>
#ifdef CONFIG_SOC_RT288X
#define EARLY_UART_BASE 0x300c00
#define CHIPID_BASE 0x300004
#elif defined(CONFIG_SOC_MT7621)
#define EARLY_UART_BASE 0x1E000c00
#define CHIPID_BASE 0x1E000004
#else
#define EARLY_UART_BASE 0x10000c00
#define CHIPID_BASE 0x10000004
#endif
#define MT7628_CHIP_NAME1 0x20203832
#define UART_REG_TX 0x04
#define UART_REG_LCR 0x0c
#define UART_REG_LSR 0x14
#define UART_REG_LSR_RT2880 0x1c
static __iomem void *uart_membase = (__iomem void *) KSEG1ADDR(EARLY_UART_BASE);
static __iomem void *chipid_membase = (__iomem void *) KSEG1ADDR(CHIPID_BASE);
static int init_complete;
static inline void uart_w32(u32 val, unsigned reg)
{
__raw_writel(val, uart_membase + reg);
}
static inline u32 uart_r32(unsigned reg)
{
return __raw_readl(uart_membase + reg);
}
static inline int soc_is_mt7628(void)
{
return IS_ENABLED(CONFIG_SOC_MT7620) &&
(__raw_readl(chipid_membase) == MT7628_CHIP_NAME1);
}
static void find_uart_base(void)
{
int i;
if (!soc_is_mt7628())
return;
for (i = 0; i < 3; i++) {
u32 reg = uart_r32(UART_REG_LCR + (0x100 * i));
if (!reg)
continue;
uart_membase = (__iomem void *) KSEG1ADDR(EARLY_UART_BASE +
(0x100 * i));
break;
}
}
void prom_putchar(char ch)
{
if (!init_complete) {
find_uart_base();
init_complete = 1;
}
if (IS_ENABLED(CONFIG_SOC_MT7621) || soc_is_mt7628()) {
uart_w32((unsigned char)ch, UART_TX);
while ((uart_r32(UART_REG_LSR) & UART_LSR_THRE) == 0)
;
} else {
while ((uart_r32(UART_REG_LSR_RT2880) & UART_LSR_THRE) == 0)
;
uart_w32((unsigned char)ch, UART_REG_TX);
while ((uart_r32(UART_REG_LSR_RT2880) & UART_LSR_THRE) == 0)
;
}
}
| linux-master | arch/mips/ralink/early_printk.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Ralink RT2880 timer
* Author: John Crispin
*
* Copyright (C) 2013 John Crispin <[email protected]>
*/
#include <linux/platform_device.h>
#include <linux/interrupt.h>
#include <linux/timer.h>
#include <linux/of_gpio.h>
#include <linux/clk.h>
#include <asm/mach-ralink/ralink_regs.h>
#define TIMER_REG_TMRSTAT 0x00
#define TIMER_REG_TMR0LOAD 0x10
#define TIMER_REG_TMR0CTL 0x18
#define TMRSTAT_TMR0INT BIT(0)
#define TMR0CTL_ENABLE BIT(7)
#define TMR0CTL_MODE_PERIODIC BIT(4)
#define TMR0CTL_PRESCALER 1
#define TMR0CTL_PRESCALE_VAL (0xf - TMR0CTL_PRESCALER)
#define TMR0CTL_PRESCALE_DIV (65536 / BIT(TMR0CTL_PRESCALER))
struct rt_timer {
struct device *dev;
void __iomem *membase;
int irq;
unsigned long timer_freq;
unsigned long timer_div;
};
static inline void rt_timer_w32(struct rt_timer *rt, u8 reg, u32 val)
{
__raw_writel(val, rt->membase + reg);
}
static inline u32 rt_timer_r32(struct rt_timer *rt, u8 reg)
{
return __raw_readl(rt->membase + reg);
}
static irqreturn_t rt_timer_irq(int irq, void *_rt)
{
struct rt_timer *rt = (struct rt_timer *) _rt;
rt_timer_w32(rt, TIMER_REG_TMR0LOAD, rt->timer_freq / rt->timer_div);
rt_timer_w32(rt, TIMER_REG_TMRSTAT, TMRSTAT_TMR0INT);
return IRQ_HANDLED;
}
static int rt_timer_request(struct rt_timer *rt)
{
int err = request_irq(rt->irq, rt_timer_irq, 0,
dev_name(rt->dev), rt);
if (err) {
dev_err(rt->dev, "failed to request irq\n");
} else {
u32 t = TMR0CTL_MODE_PERIODIC | TMR0CTL_PRESCALE_VAL;
rt_timer_w32(rt, TIMER_REG_TMR0CTL, t);
}
return err;
}
static int rt_timer_config(struct rt_timer *rt, unsigned long divisor)
{
if (rt->timer_freq < divisor)
rt->timer_div = rt->timer_freq;
else
rt->timer_div = divisor;
rt_timer_w32(rt, TIMER_REG_TMR0LOAD, rt->timer_freq / rt->timer_div);
return 0;
}
static int rt_timer_enable(struct rt_timer *rt)
{
u32 t;
rt_timer_w32(rt, TIMER_REG_TMR0LOAD, rt->timer_freq / rt->timer_div);
t = rt_timer_r32(rt, TIMER_REG_TMR0CTL);
t |= TMR0CTL_ENABLE;
rt_timer_w32(rt, TIMER_REG_TMR0CTL, t);
return 0;
}
static int rt_timer_probe(struct platform_device *pdev)
{
struct rt_timer *rt;
struct clk *clk;
rt = devm_kzalloc(&pdev->dev, sizeof(*rt), GFP_KERNEL);
if (!rt) {
dev_err(&pdev->dev, "failed to allocate memory\n");
return -ENOMEM;
}
rt->irq = platform_get_irq(pdev, 0);
if (rt->irq < 0)
return rt->irq;
rt->membase = devm_platform_get_and_ioremap_resource(pdev, 0, NULL);
if (IS_ERR(rt->membase))
return PTR_ERR(rt->membase);
clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(clk)) {
dev_err(&pdev->dev, "failed get clock rate\n");
return PTR_ERR(clk);
}
rt->timer_freq = clk_get_rate(clk) / TMR0CTL_PRESCALE_DIV;
if (!rt->timer_freq)
return -EINVAL;
rt->dev = &pdev->dev;
platform_set_drvdata(pdev, rt);
rt_timer_request(rt);
rt_timer_config(rt, 2);
rt_timer_enable(rt);
dev_info(&pdev->dev, "maximum frequency is %luHz\n", rt->timer_freq);
return 0;
}
static const struct of_device_id rt_timer_match[] = {
{ .compatible = "ralink,rt2880-timer" },
{},
};
static struct platform_driver rt_timer_driver = {
.probe = rt_timer_probe,
.driver = {
.name = "rt-timer",
.of_match_table = rt_timer_match,
.suppress_bind_attrs = true,
},
};
builtin_platform_driver(rt_timer_driver);
| linux-master | arch/mips/ralink/timer.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
*
* Parts of this file are based on Ralink's 2.6.21 BSP
*
* Copyright (C) 2008 Imre Kaloz <[email protected]>
* Copyright (C) 2008-2011 Gabor Juhos <[email protected]>
* Copyright (C) 2013 John Crispin <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/sys_soc.h>
#include <asm/mipsregs.h>
#include <asm/mach-ralink/ralink_regs.h>
#include <asm/mach-ralink/rt3883.h>
#include "common.h"
static struct ralink_soc_info *soc_info_ptr;
static unsigned int __init rt3883_get_soc_name0(void)
{
return __raw_readl(RT3883_SYSC_BASE + RT3883_SYSC_REG_CHIPID0_3);
}
static unsigned int __init rt3883_get_soc_name1(void)
{
return __raw_readl(RT3883_SYSC_BASE + RT3883_SYSC_REG_CHIPID4_7);
}
static bool __init rt3883_soc_valid(void)
{
if (rt3883_get_soc_name0() == RT3883_CHIP_NAME0 &&
rt3883_get_soc_name1() == RT3883_CHIP_NAME1)
return true;
else
return false;
}
static const char __init *rt3883_get_soc_name(void)
{
if (rt3883_soc_valid())
return "RT3883";
else
return "invalid";
}
static unsigned int __init rt3883_get_soc_id(void)
{
return __raw_readl(RT3883_SYSC_BASE + RT3883_SYSC_REG_REVID);
}
static unsigned int __init rt3883_get_soc_ver(void)
{
return (rt3883_get_soc_id() >> RT3883_REVID_VER_ID_SHIFT) & RT3883_REVID_VER_ID_MASK;
}
static unsigned int __init rt3883_get_soc_rev(void)
{
return (rt3883_get_soc_id() & RT3883_REVID_ECO_ID_MASK);
}
static int __init rt3883_soc_dev_init(void)
{
struct soc_device *soc_dev;
struct soc_device_attribute *soc_dev_attr;
soc_dev_attr = kzalloc(sizeof(*soc_dev_attr), GFP_KERNEL);
if (!soc_dev_attr)
return -ENOMEM;
soc_dev_attr->family = "Ralink";
soc_dev_attr->soc_id = rt3883_get_soc_name();
soc_dev_attr->data = soc_info_ptr;
soc_dev = soc_device_register(soc_dev_attr);
if (IS_ERR(soc_dev)) {
kfree(soc_dev_attr);
return PTR_ERR(soc_dev);
}
return 0;
}
device_initcall(rt3883_soc_dev_init);
void __init prom_soc_init(struct ralink_soc_info *soc_info)
{
if (rt3883_soc_valid())
soc_info->compatible = "ralink,rt3883-soc";
else
panic("rt3883: unknown SoC, n0:%08x n1:%08x",
rt3883_get_soc_name0(), rt3883_get_soc_name1());
snprintf(soc_info->sys_type, RAMIPS_SYS_TYPE_LEN,
"Ralink %s ver:%u eco:%u",
rt3883_get_soc_name(),
rt3883_get_soc_ver(),
rt3883_get_soc_rev());
soc_info->mem_base = RT3883_SDRAM_BASE;
soc_info->mem_size_min = RT3883_MEM_SIZE_MIN;
soc_info->mem_size_max = RT3883_MEM_SIZE_MAX;
ralink_soc = RT3883_SOC;
soc_info_ptr = soc_info;
}
| linux-master | arch/mips/ralink/rt3883.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
*
* Copyright (C) 2013 John Crispin <[email protected]>
*/
#include <linux/debugfs.h>
#include <linux/seq_file.h>
#define BOOTROM_OFFSET 0x10118000
#define BOOTROM_SIZE 0x8000
static void __iomem *membase = (void __iomem *) KSEG1ADDR(BOOTROM_OFFSET);
static int bootrom_show(struct seq_file *s, void *unused)
{
seq_write(s, membase, BOOTROM_SIZE);
return 0;
}
DEFINE_SHOW_ATTRIBUTE(bootrom);
static int __init bootrom_setup(void)
{
debugfs_create_file("bootrom", 0444, NULL, NULL, &bootrom_fops);
return 0;
}
postcore_initcall(bootrom_setup);
| linux-master | arch/mips/ralink/bootrom.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
*
* Copyright (C) 2009 Gabor Juhos <[email protected]>
* Copyright (C) 2013 John Crispin <[email protected]>
*/
#include <linux/io.h>
#include <linux/bitops.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/irqdomain.h>
#include <linux/interrupt.h>
#include <asm/irq_cpu.h>
#include <asm/mipsregs.h>
#include "common.h"
#define INTC_INT_GLOBAL BIT(31)
#define RALINK_CPU_IRQ_INTC (MIPS_CPU_IRQ_BASE + 2)
#define RALINK_CPU_IRQ_PCI (MIPS_CPU_IRQ_BASE + 4)
#define RALINK_CPU_IRQ_FE (MIPS_CPU_IRQ_BASE + 5)
#define RALINK_CPU_IRQ_WIFI (MIPS_CPU_IRQ_BASE + 6)
#define RALINK_CPU_IRQ_COUNTER (MIPS_CPU_IRQ_BASE + 7)
/* we have a cascade of 8 irqs */
#define RALINK_INTC_IRQ_BASE 8
/* we have 32 SoC irqs */
#define RALINK_INTC_IRQ_COUNT 32
#define RALINK_INTC_IRQ_PERFC (RALINK_INTC_IRQ_BASE + 9)
enum rt_intc_regs_enum {
INTC_REG_STATUS0 = 0,
INTC_REG_STATUS1,
INTC_REG_TYPE,
INTC_REG_RAW_STATUS,
INTC_REG_ENABLE,
INTC_REG_DISABLE,
};
static u32 rt_intc_regs[] = {
[INTC_REG_STATUS0] = 0x00,
[INTC_REG_STATUS1] = 0x04,
[INTC_REG_TYPE] = 0x20,
[INTC_REG_RAW_STATUS] = 0x30,
[INTC_REG_ENABLE] = 0x34,
[INTC_REG_DISABLE] = 0x38,
};
static void __iomem *rt_intc_membase;
static int rt_perfcount_irq;
static inline void rt_intc_w32(u32 val, unsigned reg)
{
__raw_writel(val, rt_intc_membase + rt_intc_regs[reg]);
}
static inline u32 rt_intc_r32(unsigned reg)
{
return __raw_readl(rt_intc_membase + rt_intc_regs[reg]);
}
static void ralink_intc_irq_unmask(struct irq_data *d)
{
rt_intc_w32(BIT(d->hwirq), INTC_REG_ENABLE);
}
static void ralink_intc_irq_mask(struct irq_data *d)
{
rt_intc_w32(BIT(d->hwirq), INTC_REG_DISABLE);
}
static struct irq_chip ralink_intc_irq_chip = {
.name = "INTC",
.irq_unmask = ralink_intc_irq_unmask,
.irq_mask = ralink_intc_irq_mask,
.irq_mask_ack = ralink_intc_irq_mask,
};
int get_c0_perfcount_int(void)
{
return rt_perfcount_irq;
}
EXPORT_SYMBOL_GPL(get_c0_perfcount_int);
unsigned int get_c0_compare_int(void)
{
return CP0_LEGACY_COMPARE_IRQ;
}
static void ralink_intc_irq_handler(struct irq_desc *desc)
{
u32 pending = rt_intc_r32(INTC_REG_STATUS0);
if (pending) {
struct irq_domain *domain = irq_desc_get_handler_data(desc);
generic_handle_domain_irq(domain, __ffs(pending));
} else {
spurious_interrupt();
}
}
asmlinkage void plat_irq_dispatch(void)
{
unsigned long pending;
pending = read_c0_status() & read_c0_cause() & ST0_IM;
if (pending & STATUSF_IP7)
do_IRQ(RALINK_CPU_IRQ_COUNTER);
else if (pending & STATUSF_IP5)
do_IRQ(RALINK_CPU_IRQ_FE);
else if (pending & STATUSF_IP6)
do_IRQ(RALINK_CPU_IRQ_WIFI);
else if (pending & STATUSF_IP4)
do_IRQ(RALINK_CPU_IRQ_PCI);
else if (pending & STATUSF_IP2)
do_IRQ(RALINK_CPU_IRQ_INTC);
else
spurious_interrupt();
}
static int intc_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw)
{
irq_set_chip_and_handler(irq, &ralink_intc_irq_chip, handle_level_irq);
return 0;
}
static const struct irq_domain_ops irq_domain_ops = {
.xlate = irq_domain_xlate_onecell,
.map = intc_map,
};
static int __init intc_of_init(struct device_node *node,
struct device_node *parent)
{
struct resource res;
struct irq_domain *domain;
int irq;
if (!of_property_read_u32_array(node, "ralink,intc-registers",
rt_intc_regs, 6))
pr_info("intc: using register map from devicetree\n");
irq = irq_of_parse_and_map(node, 0);
if (!irq)
panic("Failed to get INTC IRQ");
if (of_address_to_resource(node, 0, &res))
panic("Failed to get intc memory range");
if (!request_mem_region(res.start, resource_size(&res),
res.name))
pr_err("Failed to request intc memory");
rt_intc_membase = ioremap(res.start,
resource_size(&res));
if (!rt_intc_membase)
panic("Failed to remap intc memory");
/* disable all interrupts */
rt_intc_w32(~0, INTC_REG_DISABLE);
/* route all INTC interrupts to MIPS HW0 interrupt */
rt_intc_w32(0, INTC_REG_TYPE);
domain = irq_domain_add_legacy(node, RALINK_INTC_IRQ_COUNT,
RALINK_INTC_IRQ_BASE, 0, &irq_domain_ops, NULL);
if (!domain)
panic("Failed to add irqdomain");
rt_intc_w32(INTC_INT_GLOBAL, INTC_REG_ENABLE);
irq_set_chained_handler_and_data(irq, ralink_intc_irq_handler, domain);
/* tell the kernel which irq is used for performance monitoring */
rt_perfcount_irq = irq_create_mapping(domain, 9);
return 0;
}
static struct of_device_id __initdata of_irq_ids[] = {
{ .compatible = "mti,cpu-interrupt-controller", .data = mips_cpu_irq_of_init },
{ .compatible = "ralink,rt2880-intc", .data = intc_of_init },
{},
};
void __init arch_init_irq(void)
{
of_irq_init(of_irq_ids);
}
| linux-master | arch/mips/ralink/irq.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
*
* Copyright (C) 2015 Nikolay Martynov <[email protected]>
* Copyright (C) 2015 John Crispin <[email protected]>
*/
#include <linux/init.h>
#include <linux/of.h>
#include <linux/of_clk.h>
#include <linux/clocksource.h>
#include "common.h"
void __init plat_time_init(void)
{
ralink_of_remap();
of_clk_init(NULL);
timer_probe();
}
| linux-master | arch/mips/ralink/timer-gic.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
*
* Parts of this file are based on Ralink's 2.6.21 BSP
*
* Copyright (C) 2008-2011 Gabor Juhos <[email protected]>
* Copyright (C) 2008 Imre Kaloz <[email protected]>
* Copyright (C) 2013 John Crispin <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/sys_soc.h>
#include <asm/mipsregs.h>
#include <asm/mach-ralink/ralink_regs.h>
#include <asm/mach-ralink/rt288x.h>
#include "common.h"
static struct ralink_soc_info *soc_info_ptr;
static unsigned int __init rt2880_get_soc_name0(void)
{
return __raw_readl(RT2880_SYSC_BASE + SYSC_REG_CHIP_NAME0);
}
static unsigned int __init rt2880_get_soc_name1(void)
{
return __raw_readl(RT2880_SYSC_BASE + SYSC_REG_CHIP_NAME1);
}
static bool __init rt2880_soc_valid(void)
{
if (rt2880_get_soc_name0() == RT2880_CHIP_NAME0 &&
rt2880_get_soc_name1() == RT2880_CHIP_NAME1)
return true;
else
return false;
}
static const char __init *rt2880_get_soc_name(void)
{
if (rt2880_soc_valid())
return "RT2880";
else
return "invalid";
}
static unsigned int __init rt2880_get_soc_id(void)
{
return __raw_readl(RT2880_SYSC_BASE + SYSC_REG_CHIP_ID);
}
static unsigned int __init rt2880_get_soc_ver(void)
{
return (rt2880_get_soc_id() >> CHIP_ID_ID_SHIFT) & CHIP_ID_ID_MASK;
}
static unsigned int __init rt2880_get_soc_rev(void)
{
return (rt2880_get_soc_id() & CHIP_ID_REV_MASK);
}
static int __init rt2880_soc_dev_init(void)
{
struct soc_device *soc_dev;
struct soc_device_attribute *soc_dev_attr;
soc_dev_attr = kzalloc(sizeof(*soc_dev_attr), GFP_KERNEL);
if (!soc_dev_attr)
return -ENOMEM;
soc_dev_attr->family = "Ralink";
soc_dev_attr->soc_id = rt2880_get_soc_name();
soc_dev_attr->data = soc_info_ptr;
soc_dev = soc_device_register(soc_dev_attr);
if (IS_ERR(soc_dev)) {
kfree(soc_dev_attr);
return PTR_ERR(soc_dev);
}
return 0;
}
device_initcall(rt2880_soc_dev_init);
void __init prom_soc_init(struct ralink_soc_info *soc_info)
{
if (rt2880_soc_valid())
soc_info->compatible = "ralink,r2880-soc";
else
panic("rt288x: unknown SoC, n0:%08x n1:%08x",
rt2880_get_soc_name0(), rt2880_get_soc_name1());
snprintf(soc_info->sys_type, RAMIPS_SYS_TYPE_LEN,
"Ralink %s id:%u rev:%u",
rt2880_get_soc_name(),
rt2880_get_soc_ver(),
rt2880_get_soc_rev());
soc_info->mem_base = RT2880_SDRAM_BASE;
soc_info->mem_size_min = RT2880_MEM_SIZE_MIN;
soc_info->mem_size_max = RT2880_MEM_SIZE_MAX;
ralink_soc = RT2880_SOC;
soc_info_ptr = soc_info;
}
| linux-master | arch/mips/ralink/rt288x.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
*
* Copyright (C) 2015 Nikolay Martynov <[email protected]>
* Copyright (C) 2015 John Crispin <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/sys_soc.h>
#include <linux/memblock.h>
#include <linux/pci.h>
#include <linux/bug.h>
#include <asm/bootinfo.h>
#include <asm/mipsregs.h>
#include <asm/smp-ops.h>
#include <asm/mips-cps.h>
#include <asm/mach-ralink/ralink_regs.h>
#include <asm/mach-ralink/mt7621.h>
#include "common.h"
#define MT7621_MEM_TEST_PATTERN 0xaa5555aa
static u32 detect_magic __initdata;
static struct ralink_soc_info *soc_info_ptr;
int pcibios_root_bridge_prepare(struct pci_host_bridge *bridge)
{
struct resource_entry *entry;
resource_size_t mask;
entry = resource_list_first_type(&bridge->windows, IORESOURCE_MEM);
if (!entry) {
pr_err("Cannot get memory resource\n");
return -EINVAL;
}
if (mips_cps_numiocu(0)) {
/*
* Hardware doesn't accept mask values with 1s after
* 0s (e.g. 0xffef), so warn if that's happen
*/
mask = ~(entry->res->end - entry->res->start) & CM_GCR_REGn_MASK_ADDRMASK;
WARN_ON(mask && BIT(ffz(~mask)) - 1 != ~mask);
write_gcr_reg1_base(entry->res->start);
write_gcr_reg1_mask(mask | CM_GCR_REGn_MASK_CMTGT_IOCU0);
pr_info("PCI coherence region base: 0x%08llx, mask/settings: 0x%08llx\n",
(unsigned long long)read_gcr_reg1_base(),
(unsigned long long)read_gcr_reg1_mask());
}
return 0;
}
phys_addr_t mips_cpc_default_phys_base(void)
{
panic("Cannot detect cpc address");
}
static bool __init mt7621_addr_wraparound_test(phys_addr_t size)
{
void *dm = (void *)KSEG1ADDR(&detect_magic);
if (CPHYSADDR(dm + size) >= MT7621_LOWMEM_MAX_SIZE)
return true;
__raw_writel(MT7621_MEM_TEST_PATTERN, dm);
if (__raw_readl(dm) != __raw_readl(dm + size))
return false;
__raw_writel(~MT7621_MEM_TEST_PATTERN, dm);
return __raw_readl(dm) == __raw_readl(dm + size);
}
static void __init mt7621_memory_detect(void)
{
phys_addr_t size;
for (size = 32 * SZ_1M; size <= 256 * SZ_1M; size <<= 1) {
if (mt7621_addr_wraparound_test(size)) {
memblock_add(MT7621_LOWMEM_BASE, size);
return;
}
}
memblock_add(MT7621_LOWMEM_BASE, MT7621_LOWMEM_MAX_SIZE);
memblock_add(MT7621_HIGHMEM_BASE, MT7621_HIGHMEM_SIZE);
}
static unsigned int __init mt7621_get_soc_name0(void)
{
return __raw_readl(MT7621_SYSC_BASE + SYSC_REG_CHIP_NAME0);
}
static unsigned int __init mt7621_get_soc_name1(void)
{
return __raw_readl(MT7621_SYSC_BASE + SYSC_REG_CHIP_NAME1);
}
static bool __init mt7621_soc_valid(void)
{
if (mt7621_get_soc_name0() == MT7621_CHIP_NAME0 &&
mt7621_get_soc_name1() == MT7621_CHIP_NAME1)
return true;
else
return false;
}
static const char __init *mt7621_get_soc_id(void)
{
if (mt7621_soc_valid())
return "MT7621";
else
return "invalid";
}
static unsigned int __init mt7621_get_soc_rev(void)
{
return __raw_readl(MT7621_SYSC_BASE + SYSC_REG_CHIP_REV);
}
static unsigned int __init mt7621_get_soc_ver(void)
{
return (mt7621_get_soc_rev() >> CHIP_REV_VER_SHIFT) & CHIP_REV_VER_MASK;
}
static unsigned int __init mt7621_get_soc_eco(void)
{
return (mt7621_get_soc_rev() & CHIP_REV_ECO_MASK);
}
static const char __init *mt7621_get_soc_revision(void)
{
if (mt7621_get_soc_rev() == 1 && mt7621_get_soc_eco() == 1)
return "E2";
else
return "E1";
}
static int __init mt7621_soc_dev_init(void)
{
struct soc_device *soc_dev;
struct soc_device_attribute *soc_dev_attr;
soc_dev_attr = kzalloc(sizeof(*soc_dev_attr), GFP_KERNEL);
if (!soc_dev_attr)
return -ENOMEM;
soc_dev_attr->soc_id = "mt7621";
soc_dev_attr->family = "Ralink";
soc_dev_attr->revision = mt7621_get_soc_revision();
soc_dev_attr->data = soc_info_ptr;
soc_dev = soc_device_register(soc_dev_attr);
if (IS_ERR(soc_dev)) {
kfree(soc_dev_attr);
return PTR_ERR(soc_dev);
}
return 0;
}
device_initcall(mt7621_soc_dev_init);
void __init prom_soc_init(struct ralink_soc_info *soc_info)
{
/* Early detection of CMP support */
mips_cm_probe();
mips_cpc_probe();
if (mips_cps_numiocu(0)) {
/*
* mips_cm_probe() wipes out bootloader
* config for CM regions and we have to configure them
* again. This SoC cannot talk to pamlbus devices
* witout proper iocu region set up.
*
* FIXME: it would be better to do this with values
* from DT, but we need this very early because
* without this we cannot talk to pretty much anything
* including serial.
*/
write_gcr_reg0_base(MT7621_PALMBUS_BASE);
write_gcr_reg0_mask(~MT7621_PALMBUS_SIZE |
CM_GCR_REGn_MASK_CMTGT_IOCU0);
__sync();
}
if (mt7621_soc_valid())
soc_info->compatible = "mediatek,mt7621-soc";
else
panic("mt7621: unknown SoC, n0:%08x n1:%08x\n",
mt7621_get_soc_name0(),
mt7621_get_soc_name1());
ralink_soc = MT762X_SOC_MT7621AT;
snprintf(soc_info->sys_type, RAMIPS_SYS_TYPE_LEN,
"MediaTek %s ver:%u eco:%u",
mt7621_get_soc_id(),
mt7621_get_soc_ver(),
mt7621_get_soc_eco());
soc_info->mem_detect = mt7621_memory_detect;
soc_info_ptr = soc_info;
if (!register_cps_smp_ops())
return;
if (!register_vsmp_smp_ops())
return;
}
| linux-master | arch/mips/ralink/mt7621.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
*
* Copyright (C) 2008-2009 Gabor Juhos <[email protected]>
* Copyright (C) 2008 Imre Kaloz <[email protected]>
* Copyright (C) 2013 John Crispin <[email protected]>
*/
#include <linux/pm.h>
#include <linux/io.h>
#include <linux/of.h>
#include <linux/delay.h>
#include <asm/reboot.h>
#include <asm/mach-ralink/ralink_regs.h>
/* Reset Control */
#define SYSC_REG_RESET_CTRL 0x034
#define RSTCTL_RESET_PCI BIT(26)
#define RSTCTL_RESET_SYSTEM BIT(0)
static void ralink_restart(char *command)
{
if (IS_ENABLED(CONFIG_PCI)) {
rt_sysc_m32(0, RSTCTL_RESET_PCI, SYSC_REG_RESET_CTRL);
mdelay(50);
}
local_irq_disable();
rt_sysc_w32(RSTCTL_RESET_SYSTEM, SYSC_REG_RESET_CTRL);
unreachable();
}
static int __init mips_reboot_setup(void)
{
_machine_restart = ralink_restart;
return 0;
}
arch_initcall(mips_reboot_setup);
| linux-master | arch/mips/ralink/reset.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
*
* Copyright (C) 2013 John Crispin <[email protected]>
*/
#include <linux/interrupt.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/of_irq.h>
#include <linux/platform_device.h>
#include <asm/mach-ralink/ralink_regs.h>
#define REG_ILL_ACC_ADDR 0x10
#define REG_ILL_ACC_TYPE 0x14
#define ILL_INT_STATUS BIT(31)
#define ILL_ACC_WRITE BIT(30)
#define ILL_ACC_LEN_M 0xff
#define ILL_ACC_OFF_M 0xf
#define ILL_ACC_OFF_S 16
#define ILL_ACC_ID_M 0x7
#define ILL_ACC_ID_S 8
#define DRV_NAME "ill_acc"
static const char * const ill_acc_ids[] = {
"cpu", "dma", "ppe", "pdma rx", "pdma tx", "pci/e", "wmac", "usb",
};
static irqreturn_t ill_acc_irq_handler(int irq, void *_priv)
{
struct device *dev = (struct device *) _priv;
u32 addr = rt_memc_r32(REG_ILL_ACC_ADDR);
u32 type = rt_memc_r32(REG_ILL_ACC_TYPE);
dev_err(dev, "illegal %s access from %s - addr:0x%08x offset:%d len:%d\n",
(type & ILL_ACC_WRITE) ? ("write") : ("read"),
ill_acc_ids[(type >> ILL_ACC_ID_S) & ILL_ACC_ID_M],
addr, (type >> ILL_ACC_OFF_S) & ILL_ACC_OFF_M,
type & ILL_ACC_LEN_M);
rt_memc_w32(ILL_INT_STATUS, REG_ILL_ACC_TYPE);
return IRQ_HANDLED;
}
static int __init ill_acc_of_setup(void)
{
struct platform_device *pdev;
struct device_node *np;
int irq;
/* somehow this driver breaks on RT5350 */
if (of_machine_is_compatible("ralink,rt5350-soc"))
return -EINVAL;
np = of_find_compatible_node(NULL, NULL, "ralink,rt3050-memc");
if (!np)
return -EINVAL;
pdev = of_find_device_by_node(np);
if (!pdev) {
pr_err("%pOFn: failed to lookup pdev\n", np);
of_node_put(np);
return -EINVAL;
}
irq = irq_of_parse_and_map(np, 0);
of_node_put(np);
if (!irq) {
dev_err(&pdev->dev, "failed to get irq\n");
put_device(&pdev->dev);
return -EINVAL;
}
if (request_irq(irq, ill_acc_irq_handler, 0, "ill_acc", &pdev->dev)) {
dev_err(&pdev->dev, "failed to request irq\n");
put_device(&pdev->dev);
return -EINVAL;
}
rt_memc_w32(ILL_INT_STATUS, REG_ILL_ACC_TYPE);
dev_info(&pdev->dev, "irq registered\n");
return 0;
}
arch_initcall(ill_acc_of_setup);
| linux-master | arch/mips/ralink/ill_acc.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
*
* Copyright (C) 2011 Gabor Juhos <[email protected]>
* Copyright (C) 2013 John Crispin <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/export.h>
#include <linux/clkdev.h>
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <asm/mach-ralink/ralink_regs.h>
#include <asm/time.h>
#include "common.h"
static const char *clk_cpu(int *idx)
{
switch (ralink_soc) {
case RT2880_SOC:
*idx = 0;
return "ralink,rt2880-sysc";
case RT3883_SOC:
*idx = 0;
return "ralink,rt3883-sysc";
case RT305X_SOC_RT3050:
*idx = 0;
return "ralink,rt3050-sysc";
case RT305X_SOC_RT3052:
*idx = 0;
return "ralink,rt3052-sysc";
case RT305X_SOC_RT3350:
*idx = 1;
return "ralink,rt3350-sysc";
case RT305X_SOC_RT3352:
*idx = 1;
return "ralink,rt3352-sysc";
case RT305X_SOC_RT5350:
*idx = 1;
return "ralink,rt5350-sysc";
case MT762X_SOC_MT7620A:
*idx = 2;
return "ralink,mt7620-sysc";
case MT762X_SOC_MT7620N:
*idx = 2;
return "ralink,mt7620-sysc";
case MT762X_SOC_MT7628AN:
*idx = 1;
return "ralink,mt7628-sysc";
case MT762X_SOC_MT7688:
*idx = 1;
return "ralink,mt7688-sysc";
default:
*idx = -1;
return "invalid";
}
}
void __init plat_time_init(void)
{
struct of_phandle_args clkspec;
const char *compatible;
struct clk *clk;
int cpu_clk_idx;
ralink_of_remap();
compatible = clk_cpu(&cpu_clk_idx);
if (cpu_clk_idx == -1)
panic("unable to get CPU clock index");
of_clk_init(NULL);
clkspec.np = of_find_compatible_node(NULL, NULL, compatible);
clkspec.args_count = 1;
clkspec.args[0] = cpu_clk_idx;
clk = of_clk_get_from_provider(&clkspec);
if (IS_ERR(clk))
panic("unable to get CPU clock, err=%ld", PTR_ERR(clk));
pr_info("CPU Clock: %ldMHz\n", clk_get_rate(clk) / 1000000);
mips_hpt_frequency = clk_get_rate(clk) / 2;
clk_put(clk);
timer_probe();
}
| linux-master | arch/mips/ralink/clk.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
*
* Parts of this file are based on Ralink's 2.6.21 BSP
*
* Copyright (C) 2008-2011 Gabor Juhos <[email protected]>
* Copyright (C) 2008 Imre Kaloz <[email protected]>
* Copyright (C) 2013 John Crispin <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/bug.h>
#include <linux/slab.h>
#include <linux/sys_soc.h>
#include <asm/mipsregs.h>
#include <asm/mach-ralink/ralink_regs.h>
#include <asm/mach-ralink/mt7620.h>
#include "common.h"
/* analog */
#define PMU0_CFG 0x88
#define PMU_SW_SET BIT(28)
#define A_DCDC_EN BIT(24)
#define A_SSC_PERI BIT(19)
#define A_SSC_GEN BIT(18)
#define A_SSC_M 0x3
#define A_SSC_S 16
#define A_DLY_M 0x7
#define A_DLY_S 8
#define A_VTUNE_M 0xff
/* digital */
#define PMU1_CFG 0x8C
#define DIG_SW_SEL BIT(25)
/* EFUSE bits */
#define EFUSE_MT7688 0x100000
/* DRAM type bit */
#define DRAM_TYPE_MT7628_MASK 0x1
/* does the board have sdram or ddram */
static int dram_type;
static struct ralink_soc_info *soc_info_ptr;
static __init void
mt7620_dram_init(struct ralink_soc_info *soc_info)
{
switch (dram_type) {
case SYSCFG0_DRAM_TYPE_SDRAM:
pr_info("Board has SDRAM\n");
soc_info->mem_size_min = MT7620_SDRAM_SIZE_MIN;
soc_info->mem_size_max = MT7620_SDRAM_SIZE_MAX;
break;
case SYSCFG0_DRAM_TYPE_DDR1:
pr_info("Board has DDR1\n");
soc_info->mem_size_min = MT7620_DDR1_SIZE_MIN;
soc_info->mem_size_max = MT7620_DDR1_SIZE_MAX;
break;
case SYSCFG0_DRAM_TYPE_DDR2:
pr_info("Board has DDR2\n");
soc_info->mem_size_min = MT7620_DDR2_SIZE_MIN;
soc_info->mem_size_max = MT7620_DDR2_SIZE_MAX;
break;
default:
BUG();
}
}
static __init void
mt7628_dram_init(struct ralink_soc_info *soc_info)
{
switch (dram_type) {
case SYSCFG0_DRAM_TYPE_DDR1_MT7628:
pr_info("Board has DDR1\n");
soc_info->mem_size_min = MT7620_DDR1_SIZE_MIN;
soc_info->mem_size_max = MT7620_DDR1_SIZE_MAX;
break;
case SYSCFG0_DRAM_TYPE_DDR2_MT7628:
pr_info("Board has DDR2\n");
soc_info->mem_size_min = MT7620_DDR2_SIZE_MIN;
soc_info->mem_size_max = MT7620_DDR2_SIZE_MAX;
break;
default:
BUG();
}
}
static unsigned int __init mt7620_get_soc_name0(void)
{
return __raw_readl(MT7620_SYSC_BASE + SYSC_REG_CHIP_NAME0);
}
static unsigned int __init mt7620_get_soc_name1(void)
{
return __raw_readl(MT7620_SYSC_BASE + SYSC_REG_CHIP_NAME1);
}
static bool __init mt7620_soc_valid(void)
{
if (mt7620_get_soc_name0() == MT7620_CHIP_NAME0 &&
mt7620_get_soc_name1() == MT7620_CHIP_NAME1)
return true;
else
return false;
}
static bool __init mt7628_soc_valid(void)
{
if (mt7620_get_soc_name0() == MT7620_CHIP_NAME0 &&
mt7620_get_soc_name1() == MT7628_CHIP_NAME1)
return true;
else
return false;
}
static unsigned int __init mt7620_get_rev(void)
{
return __raw_readl(MT7620_SYSC_BASE + SYSC_REG_CHIP_REV);
}
static unsigned int __init mt7620_get_bga(void)
{
return (mt7620_get_rev() >> CHIP_REV_PKG_SHIFT) & CHIP_REV_PKG_MASK;
}
static unsigned int __init mt7620_get_efuse(void)
{
return __raw_readl(MT7620_SYSC_BASE + SYSC_REG_EFUSE_CFG);
}
static unsigned int __init mt7620_get_soc_ver(void)
{
return (mt7620_get_rev() >> CHIP_REV_VER_SHIFT) & CHIP_REV_VER_MASK;
}
static unsigned int __init mt7620_get_soc_eco(void)
{
return (mt7620_get_rev() & CHIP_REV_ECO_MASK);
}
static const char __init *mt7620_get_soc_name(struct ralink_soc_info *soc_info)
{
if (mt7620_soc_valid()) {
u32 bga = mt7620_get_bga();
if (bga) {
ralink_soc = MT762X_SOC_MT7620A;
soc_info->compatible = "ralink,mt7620a-soc";
return "MT7620A";
} else {
ralink_soc = MT762X_SOC_MT7620N;
soc_info->compatible = "ralink,mt7620n-soc";
return "MT7620N";
}
} else if (mt7628_soc_valid()) {
u32 efuse = mt7620_get_efuse();
unsigned char *name = NULL;
if (efuse & EFUSE_MT7688) {
ralink_soc = MT762X_SOC_MT7688;
name = "MT7688";
} else {
ralink_soc = MT762X_SOC_MT7628AN;
name = "MT7628AN";
}
soc_info->compatible = "ralink,mt7628an-soc";
return name;
} else {
panic("mt762x: unknown SoC, n0:%08x n1:%08x\n",
mt7620_get_soc_name0(), mt7620_get_soc_name1());
}
}
static const char __init *mt7620_get_soc_id_name(void)
{
if (ralink_soc == MT762X_SOC_MT7620A)
return "mt7620a";
else if (ralink_soc == MT762X_SOC_MT7620N)
return "mt7620n";
else if (ralink_soc == MT762X_SOC_MT7688)
return "mt7688";
else if (ralink_soc == MT762X_SOC_MT7628AN)
return "mt7628n";
else
return "invalid";
}
static int __init mt7620_soc_dev_init(void)
{
struct soc_device *soc_dev;
struct soc_device_attribute *soc_dev_attr;
soc_dev_attr = kzalloc(sizeof(*soc_dev_attr), GFP_KERNEL);
if (!soc_dev_attr)
return -ENOMEM;
soc_dev_attr->family = "Ralink";
soc_dev_attr->soc_id = mt7620_get_soc_id_name();
soc_dev_attr->data = soc_info_ptr;
soc_dev = soc_device_register(soc_dev_attr);
if (IS_ERR(soc_dev)) {
kfree(soc_dev_attr);
return PTR_ERR(soc_dev);
}
return 0;
}
device_initcall(mt7620_soc_dev_init);
void __init prom_soc_init(struct ralink_soc_info *soc_info)
{
const char *name = mt7620_get_soc_name(soc_info);
u32 cfg0;
u32 pmu0;
u32 pmu1;
snprintf(soc_info->sys_type, RAMIPS_SYS_TYPE_LEN,
"MediaTek %s ver:%u eco:%u",
name, mt7620_get_soc_ver(), mt7620_get_soc_eco());
cfg0 = __raw_readl(MT7620_SYSC_BASE + SYSC_REG_SYSTEM_CONFIG0);
if (is_mt76x8()) {
dram_type = cfg0 & DRAM_TYPE_MT7628_MASK;
} else {
dram_type = (cfg0 >> SYSCFG0_DRAM_TYPE_SHIFT) &
SYSCFG0_DRAM_TYPE_MASK;
if (dram_type == SYSCFG0_DRAM_TYPE_UNKNOWN)
dram_type = SYSCFG0_DRAM_TYPE_SDRAM;
}
soc_info->mem_base = MT7620_DRAM_BASE;
if (is_mt76x8())
mt7628_dram_init(soc_info);
else
mt7620_dram_init(soc_info);
pmu0 = __raw_readl(MT7620_SYSC_BASE + PMU0_CFG);
pmu1 = __raw_readl(MT7620_SYSC_BASE + PMU1_CFG);
pr_info("Analog PMU set to %s control\n",
(pmu0 & PMU_SW_SET) ? ("sw") : ("hw"));
pr_info("Digital PMU set to %s control\n",
(pmu1 & DIG_SW_SEL) ? ("sw") : ("hw"));
soc_info_ptr = soc_info;
}
| linux-master | arch/mips/ralink/mt7620.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
*
* Copyright (C) 2009 Gabor Juhos <[email protected]>
* Copyright (C) 2010 Joonas Lahtinen <[email protected]>
* Copyright (C) 2013 John Crispin <[email protected]>
*/
#include <linux/string.h>
#include <asm/bootinfo.h>
#include <asm/addrspace.h>
#include <asm/mach-ralink/ralink_regs.h>
#include "common.h"
struct ralink_soc_info soc_info;
enum ralink_soc_type ralink_soc;
EXPORT_SYMBOL_GPL(ralink_soc);
const char *get_system_type(void)
{
return soc_info.sys_type;
}
static __init void prom_init_cmdline(void)
{
int argc;
char **argv;
int i;
pr_debug("prom: fw_arg0=%08x fw_arg1=%08x fw_arg2=%08x fw_arg3=%08x\n",
(unsigned int)fw_arg0, (unsigned int)fw_arg1,
(unsigned int)fw_arg2, (unsigned int)fw_arg3);
argc = fw_arg0;
argv = (char **) KSEG1ADDR(fw_arg1);
if (!argv) {
pr_debug("argv=%p is invalid, skipping\n",
argv);
return;
}
for (i = 0; i < argc; i++) {
char *p = (char *) KSEG1ADDR(argv[i]);
if (CPHYSADDR(p) && *p) {
pr_debug("argv[%d]: %s\n", i, p);
strlcat(arcs_cmdline, " ", sizeof(arcs_cmdline));
strlcat(arcs_cmdline, p, sizeof(arcs_cmdline));
}
}
}
void __init prom_init(void)
{
prom_soc_init(&soc_info);
pr_info("SoC Type: %s\n", get_system_type());
prom_init_cmdline();
}
| linux-master | arch/mips/ralink/prom.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
*
* Copyright (C) 2008 Imre Kaloz <[email protected]>
* Copyright (C) 2008-2009 Gabor Juhos <[email protected]>
* Copyright (C) 2013 John Crispin <[email protected]>
*/
#include <linux/io.h>
#include <linux/clk.h>
#include <linux/export.h>
#include <linux/init.h>
#include <linux/sizes.h>
#include <linux/of_fdt.h>
#include <linux/kernel.h>
#include <linux/memblock.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <asm/reboot.h>
#include <asm/bootinfo.h>
#include <asm/addrspace.h>
#include <asm/prom.h>
#include <asm/mach-ralink/ralink_regs.h>
#include "common.h"
__iomem void *rt_sysc_membase;
__iomem void *rt_memc_membase;
EXPORT_SYMBOL_GPL(rt_sysc_membase);
static const struct of_device_id mtmips_memc_match[] = {
{ .compatible = "mediatek,mt7621-memc" },
{ .compatible = "ralink,mt7620a-memc" },
{ .compatible = "ralink,rt2880-memc" },
{ .compatible = "ralink,rt3050-memc" },
{ .compatible = "ralink,rt3883-memc" },
{}
};
static const struct of_device_id mtmips_sysc_match[] = {
{ .compatible = "mediatek,mt7621-sysc" },
{ .compatible = "ralink,mt7620-sysc" },
{ .compatible = "ralink,mt7628-sysc" },
{ .compatible = "ralink,mt7688-sysc" },
{ .compatible = "ralink,rt2880-sysc" },
{ .compatible = "ralink,rt3050-sysc" },
{ .compatible = "ralink,rt3052-sysc" },
{ .compatible = "ralink,rt3352-sysc" },
{ .compatible = "ralink,rt3883-sysc" },
{ .compatible = "ralink,rt5350-sysc" },
{}
};
static __iomem void *
mtmips_of_remap_node(const struct of_device_id *match, const char *type)
{
struct resource res;
struct device_node *np;
np = of_find_matching_node(NULL, match);
if (!np)
panic("Failed to find %s controller node", type);
if (of_address_to_resource(np, 0, &res))
panic("Failed to get resource for %s node", np->name);
if (!request_mem_region(res.start,
resource_size(&res),
res.name))
panic("Failed to request resources for %s node", np->name);
of_node_put(np);
return ioremap(res.start, resource_size(&res));
}
void __init ralink_of_remap(void)
{
rt_sysc_membase = mtmips_of_remap_node(mtmips_sysc_match, "system");
rt_memc_membase = mtmips_of_remap_node(mtmips_memc_match, "memory");
if (!rt_sysc_membase || !rt_memc_membase)
panic("Failed to remap core resources");
}
void __init plat_mem_setup(void)
{
void *dtb;
set_io_port_base(KSEG1);
/*
* Load the builtin devicetree. This causes the chosen node to be
* parsed resulting in our memory appearing.
*/
dtb = get_fdt();
__dt_setup_arch(dtb);
if (early_init_dt_scan_memory())
return;
if (soc_info.mem_detect)
soc_info.mem_detect();
else if (soc_info.mem_size)
memblock_add(soc_info.mem_base, soc_info.mem_size * SZ_1M);
else
detect_memory_region(soc_info.mem_base,
soc_info.mem_size_min * SZ_1M,
soc_info.mem_size_max * SZ_1M);
}
static int __init plat_of_setup(void)
{
__dt_register_buses(soc_info.compatible, "palmbus");
return 0;
}
arch_initcall(plat_of_setup);
| linux-master | arch/mips/ralink/of.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
*
* Copyright (C) 2015 Nikolay Martynov <[email protected]>
* Copyright (C) 2015 John Crispin <[email protected]>
*/
#include <linux/init.h>
#include <linux/of.h>
#include <linux/irqchip.h>
#include <asm/mips-cps.h>
int get_c0_perfcount_int(void)
{
return gic_get_c0_perfcount_int();
}
EXPORT_SYMBOL_GPL(get_c0_perfcount_int);
void __init arch_init_irq(void)
{
irqchip_init();
}
| linux-master | arch/mips/ralink/irq-gic.c |
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2013 by John Crispin <[email protected]>
*/
#include <linux/clockchips.h>
#include <linux/clocksource.h>
#include <linux/interrupt.h>
#include <linux/reset.h>
#include <linux/init.h>
#include <linux/time.h>
#include <linux/of.h>
#include <linux/of_irq.h>
#include <linux/of_address.h>
#include <asm/mach-ralink/ralink_regs.h>
#define SYSTICK_FREQ (50 * 1000)
#define SYSTICK_CONFIG 0x00
#define SYSTICK_COMPARE 0x04
#define SYSTICK_COUNT 0x08
/* route systick irq to mips irq 7 instead of the r4k-timer */
#define CFG_EXT_STK_EN 0x2
/* enable the counter */
#define CFG_CNT_EN 0x1
struct systick_device {
void __iomem *membase;
struct clock_event_device dev;
int irq_requested;
int freq_scale;
};
static int systick_set_oneshot(struct clock_event_device *evt);
static int systick_shutdown(struct clock_event_device *evt);
static int systick_next_event(unsigned long delta,
struct clock_event_device *evt)
{
struct systick_device *sdev;
u32 count;
sdev = container_of(evt, struct systick_device, dev);
count = ioread32(sdev->membase + SYSTICK_COUNT);
count = (count + delta) % SYSTICK_FREQ;
iowrite32(count, sdev->membase + SYSTICK_COMPARE);
return 0;
}
static void systick_event_handler(struct clock_event_device *dev)
{
/* noting to do here */
}
static irqreturn_t systick_interrupt(int irq, void *dev_id)
{
struct clock_event_device *dev = (struct clock_event_device *) dev_id;
dev->event_handler(dev);
return IRQ_HANDLED;
}
static struct systick_device systick = {
.dev = {
/*
* cevt-r4k uses 300, make sure systick
* gets used if available
*/
.rating = 310,
.features = CLOCK_EVT_FEAT_ONESHOT,
.set_next_event = systick_next_event,
.set_state_shutdown = systick_shutdown,
.set_state_oneshot = systick_set_oneshot,
.event_handler = systick_event_handler,
},
};
static int systick_shutdown(struct clock_event_device *evt)
{
struct systick_device *sdev;
sdev = container_of(evt, struct systick_device, dev);
if (sdev->irq_requested)
free_irq(systick.dev.irq, &systick.dev);
sdev->irq_requested = 0;
iowrite32(0, systick.membase + SYSTICK_CONFIG);
return 0;
}
static int systick_set_oneshot(struct clock_event_device *evt)
{
const char *name = systick.dev.name;
struct systick_device *sdev;
int irq = systick.dev.irq;
sdev = container_of(evt, struct systick_device, dev);
if (!sdev->irq_requested) {
if (request_irq(irq, systick_interrupt,
IRQF_PERCPU | IRQF_TIMER, name, &systick.dev))
pr_err("Failed to request irq %d (%s)\n", irq, name);
}
sdev->irq_requested = 1;
iowrite32(CFG_EXT_STK_EN | CFG_CNT_EN,
systick.membase + SYSTICK_CONFIG);
return 0;
}
static int __init ralink_systick_init(struct device_node *np)
{
int ret;
systick.membase = of_iomap(np, 0);
if (!systick.membase)
return -ENXIO;
systick.dev.name = np->name;
clockevents_calc_mult_shift(&systick.dev, SYSTICK_FREQ, 60);
systick.dev.max_delta_ns = clockevent_delta2ns(0x7fff, &systick.dev);
systick.dev.max_delta_ticks = 0x7fff;
systick.dev.min_delta_ns = clockevent_delta2ns(0x3, &systick.dev);
systick.dev.min_delta_ticks = 0x3;
systick.dev.irq = irq_of_parse_and_map(np, 0);
if (!systick.dev.irq) {
pr_err("%pOFn: request_irq failed", np);
return -EINVAL;
}
ret = clocksource_mmio_init(systick.membase + SYSTICK_COUNT, np->name,
SYSTICK_FREQ, 301, 16,
clocksource_mmio_readl_up);
if (ret)
return ret;
clockevents_register_device(&systick.dev);
pr_info("%pOFn: running - mult: %d, shift: %d\n",
np, systick.dev.mult, systick.dev.shift);
return 0;
}
TIMER_OF_DECLARE(systick, "ralink,cevt-systick", ralink_systick_init);
| linux-master | arch/mips/ralink/cevt-rt3352.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/export.h>
#include "libgcc.h"
/*
* GCC 7 & older can suboptimally generate __multi3 calls for mips64r6, so for
* that specific case only we implement that intrinsic here.
*
* See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=82981
*/
#if defined(CONFIG_64BIT) && defined(CONFIG_CPU_MIPSR6) && (__GNUC__ < 8)
/* multiply 64-bit values, low 64-bits returned */
static inline long long notrace dmulu(long long a, long long b)
{
long long res;
asm ("dmulu %0,%1,%2" : "=r" (res) : "r" (a), "r" (b));
return res;
}
/* multiply 64-bit unsigned values, high 64-bits of 128-bit result returned */
static inline long long notrace dmuhu(long long a, long long b)
{
long long res;
asm ("dmuhu %0,%1,%2" : "=r" (res) : "r" (a), "r" (b));
return res;
}
/* multiply 128-bit values, low 128-bits returned */
ti_type notrace __multi3(ti_type a, ti_type b)
{
TWunion res, aa, bb;
aa.ti = a;
bb.ti = b;
/*
* a * b = (a.lo * b.lo)
* + 2^64 * (a.hi * b.lo + a.lo * b.hi)
* [+ 2^128 * (a.hi * b.hi)]
*/
res.s.low = dmulu(aa.s.low, bb.s.low);
res.s.high = dmuhu(aa.s.low, bb.s.low);
res.s.high += dmulu(aa.s.high, bb.s.low);
res.s.high += dmulu(aa.s.low, bb.s.high);
return res.ti;
}
EXPORT_SYMBOL(__multi3);
#endif /* 64BIT && CPU_MIPSR6 && GCC7 */
| linux-master | arch/mips/lib/multi3.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Dump R3000 TLB for debugging purposes.
*
* Copyright (C) 1994, 1995 by Waldorf Electronics, written by Ralf Baechle.
* Copyright (C) 1999 by Silicon Graphics, Inc.
* Copyright (C) 1999 by Harald Koerfgen
*/
#include <linux/kernel.h>
#include <linux/mm.h>
#include <asm/mipsregs.h>
#include <asm/mmu_context.h>
#include <asm/page.h>
#include <asm/tlbdebug.h>
void dump_tlb_regs(void)
{
pr_info("Index : %0x\n", read_c0_index());
pr_info("EntryHi : %0lx\n", read_c0_entryhi());
pr_info("EntryLo : %0lx\n", read_c0_entrylo0());
}
static void dump_tlb(int first, int last)
{
int i;
unsigned int asid;
unsigned long entryhi, entrylo0, asid_mask;
asid_mask = cpu_asid_mask(¤t_cpu_data);
asid = read_c0_entryhi() & asid_mask;
for (i = first; i <= last; i++) {
write_c0_index(i<<8);
__asm__ __volatile__(
".set\tnoreorder\n\t"
"tlbr\n\t"
"nop\n\t"
".set\treorder");
entryhi = read_c0_entryhi();
entrylo0 = read_c0_entrylo0();
/* Unused entries have a virtual address of KSEG0. */
if ((entryhi & PAGE_MASK) != KSEG0 &&
(entrylo0 & R3K_ENTRYLO_G ||
(entryhi & asid_mask) == asid)) {
/*
* Only print entries in use
*/
printk("Index: %2d ", i);
pr_cont("va=%08lx asid=%08lx"
" [pa=%06lx n=%d d=%d v=%d g=%d]",
entryhi & PAGE_MASK,
entryhi & asid_mask,
entrylo0 & PAGE_MASK,
(entrylo0 & R3K_ENTRYLO_N) ? 1 : 0,
(entrylo0 & R3K_ENTRYLO_D) ? 1 : 0,
(entrylo0 & R3K_ENTRYLO_V) ? 1 : 0,
(entrylo0 & R3K_ENTRYLO_G) ? 1 : 0);
}
}
printk("\n");
write_c0_entryhi(asid);
}
void dump_tlb_all(void)
{
dump_tlb(0, current_cpu_data.tlbsize - 1);
}
| linux-master | arch/mips/lib/r3k_dump_tlb.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/export.h>
#include <linux/compiler.h>
#include <uapi/linux/swab.h>
/* To silence -Wmissing-prototypes. */
unsigned int __bswapsi2(unsigned int u);
unsigned int notrace __bswapsi2(unsigned int u)
{
return ___constant_swab32(u);
}
EXPORT_SYMBOL(__bswapsi2);
| linux-master | arch/mips/lib/bswapsi.c |
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1994, 95, 96, 97, 98, 99, 2003 by Ralf Baechle
* Copyright (C) 1996 by Paul M. Antoine
* Copyright (C) 1999 Silicon Graphics
* Copyright (C) 2000 MIPS Technologies, Inc.
*/
#include <asm/irqflags.h>
#include <asm/hazards.h>
#include <linux/compiler.h>
#include <linux/preempt.h>
#include <linux/export.h>
#include <linux/stringify.h>
#if !defined(CONFIG_CPU_HAS_DIEI)
/*
* For cli() we have to insert nops to make sure that the new value
* has actually arrived in the status register before the end of this
* macro.
* R4000/R4400 need three nops, the R4600 two nops and the R10000 needs
* no nops at all.
*/
/*
* For TX49, operating only IE bit is not enough.
*
* If mfc0 $12 follows store and the mfc0 is last instruction of a
* page and fetching the next instruction causes TLB miss, the result
* of the mfc0 might wrongly contain EXL bit.
*
* ERT-TX49H2-027, ERT-TX49H3-012, ERT-TX49HL3-006, ERT-TX49H4-008
*
* Workaround: mask EXL bit of the result or place a nop before mfc0.
*/
notrace void arch_local_irq_disable(void)
{
preempt_disable_notrace();
__asm__ __volatile__(
" .set push \n"
" .set noat \n"
" mfc0 $1,$12 \n"
" ori $1,0x1f \n"
" xori $1,0x1f \n"
" .set noreorder \n"
" mtc0 $1,$12 \n"
" " __stringify(__irq_disable_hazard) " \n"
" .set pop \n"
: /* no outputs */
: /* no inputs */
: "memory");
preempt_enable_notrace();
}
EXPORT_SYMBOL(arch_local_irq_disable);
notrace unsigned long arch_local_irq_save(void)
{
unsigned long flags;
preempt_disable_notrace();
__asm__ __volatile__(
" .set push \n"
" .set reorder \n"
" .set noat \n"
" mfc0 %[flags], $12 \n"
" ori $1, %[flags], 0x1f \n"
" xori $1, 0x1f \n"
" .set noreorder \n"
" mtc0 $1, $12 \n"
" " __stringify(__irq_disable_hazard) " \n"
" .set pop \n"
: [flags] "=r" (flags)
: /* no inputs */
: "memory");
preempt_enable_notrace();
return flags;
}
EXPORT_SYMBOL(arch_local_irq_save);
notrace void arch_local_irq_restore(unsigned long flags)
{
unsigned long __tmp1;
preempt_disable_notrace();
__asm__ __volatile__(
" .set push \n"
" .set noreorder \n"
" .set noat \n"
" mfc0 $1, $12 \n"
" andi %[flags], 1 \n"
" ori $1, 0x1f \n"
" xori $1, 0x1f \n"
" or %[flags], $1 \n"
" mtc0 %[flags], $12 \n"
" " __stringify(__irq_disable_hazard) " \n"
" .set pop \n"
: [flags] "=r" (__tmp1)
: "0" (flags)
: "memory");
preempt_enable_notrace();
}
EXPORT_SYMBOL(arch_local_irq_restore);
#endif /* !CONFIG_CPU_HAS_DIEI */
| linux-master | arch/mips/lib/mips-atomic.c |
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1994 by Waldorf Electronics
* Copyright (C) 1995 - 2000, 01, 03 by Ralf Baechle
* Copyright (C) 1999, 2000 Silicon Graphics, Inc.
* Copyright (C) 2007, 2014 Maciej W. Rozycki
*/
#include <linux/delay.h>
#include <linux/export.h>
#include <linux/param.h>
#include <linux/smp.h>
#include <linux/stringify.h>
#include <asm/asm.h>
#include <asm/compiler.h>
#ifndef CONFIG_CPU_DADDI_WORKAROUNDS
#define GCC_DADDI_IMM_ASM() "I"
#else
#define GCC_DADDI_IMM_ASM() "r"
#endif
#ifndef CONFIG_HAVE_PLAT_DELAY
void __delay(unsigned long loops)
{
__asm__ __volatile__ (
" .set noreorder \n"
" .align 3 \n"
"1: bnez %0, 1b \n"
" " __stringify(LONG_SUBU) " %0, %1 \n"
" .set reorder \n"
: "=r" (loops)
: GCC_DADDI_IMM_ASM() (1), "0" (loops));
}
EXPORT_SYMBOL(__delay);
/*
* Division by multiplication: you don't have to worry about
* loss of precision.
*
* Use only for very small delays ( < 1 msec). Should probably use a
* lookup table, really, as the multiplications take much too long with
* short delays. This is a "reasonable" implementation, though (and the
* first constant multiplications gets optimized away if the delay is
* a constant)
*/
void __udelay(unsigned long us)
{
unsigned int lpj = raw_current_cpu_data.udelay_val;
__delay((us * 0x000010c7ull * HZ * lpj) >> 32);
}
EXPORT_SYMBOL(__udelay);
void __ndelay(unsigned long ns)
{
unsigned int lpj = raw_current_cpu_data.udelay_val;
__delay((ns * 0x00000005ull * HZ * lpj) >> 32);
}
EXPORT_SYMBOL(__ndelay);
#endif
| linux-master | arch/mips/lib/delay.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/export.h>
#include <linux/compiler.h>
#include <uapi/linux/swab.h>
/* To silence -Wmissing-prototypes. */
unsigned long long __bswapdi2(unsigned long long u);
unsigned long long notrace __bswapdi2(unsigned long long u)
{
return ___constant_swab64(u);
}
EXPORT_SYMBOL(__bswapdi2);
| linux-master | arch/mips/lib/bswapdi.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Implement the default iomap interfaces
*
* (C) Copyright 2004 Linus Torvalds
* (C) Copyright 2006 Ralf Baechle <[email protected]>
* (C) Copyright 2007 MIPS Technologies, Inc.
* written by Ralf Baechle <[email protected]>
*/
#include <linux/pci.h>
#include <linux/export.h>
#include <asm/io.h>
#ifdef CONFIG_PCI_DRIVERS_LEGACY
void __iomem *__pci_ioport_map(struct pci_dev *dev,
unsigned long port, unsigned int nr)
{
struct pci_controller *ctrl = dev->bus->sysdata;
unsigned long base = ctrl->io_map_base;
/* This will eventually become a BUG_ON but for now be gentle */
if (unlikely(!ctrl->io_map_base)) {
struct pci_bus *bus = dev->bus;
char name[8];
while (bus->parent)
bus = bus->parent;
ctrl->io_map_base = base = mips_io_port_base;
sprintf(name, "%04x:%02x", pci_domain_nr(bus), bus->number);
printk(KERN_WARNING "io_map_base of root PCI bus %s unset. "
"Trying to continue but you better\nfix this issue or "
"report it to [email protected] or your "
"vendor.\n", name);
#ifdef CONFIG_PCI_DOMAINS
panic("To avoid data corruption io_map_base MUST be set with "
"multiple PCI domains.");
#endif
}
return (void __iomem *) (ctrl->io_map_base + port);
}
#endif /* CONFIG_PCI_DRIVERS_LEGACY */
| linux-master | arch/mips/lib/iomap-pci.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Dump R4x00 TLB for debugging purposes.
*
* Copyright (C) 1994, 1995 by Waldorf Electronics, written by Ralf Baechle.
* Copyright (C) 1999 by Silicon Graphics, Inc.
*/
#include <linux/kernel.h>
#include <linux/mm.h>
#include <asm/hazards.h>
#include <asm/mipsregs.h>
#include <asm/mmu_context.h>
#include <asm/page.h>
#include <asm/tlbdebug.h>
void dump_tlb_regs(void)
{
const int field = 2 * sizeof(unsigned long);
pr_info("Index : %0x\n", read_c0_index());
pr_info("PageMask : %0x\n", read_c0_pagemask());
if (cpu_has_guestid)
pr_info("GuestCtl1: %0x\n", read_c0_guestctl1());
pr_info("EntryHi : %0*lx\n", field, read_c0_entryhi());
pr_info("EntryLo0 : %0*lx\n", field, read_c0_entrylo0());
pr_info("EntryLo1 : %0*lx\n", field, read_c0_entrylo1());
pr_info("Wired : %0x\n", read_c0_wired());
switch (current_cpu_type()) {
case CPU_R10000:
case CPU_R12000:
case CPU_R14000:
case CPU_R16000:
pr_info("FrameMask: %0x\n", read_c0_framemask());
break;
}
if (cpu_has_small_pages || cpu_has_rixi || cpu_has_xpa)
pr_info("PageGrain: %0x\n", read_c0_pagegrain());
if (cpu_has_htw) {
pr_info("PWField : %0*lx\n", field, read_c0_pwfield());
pr_info("PWSize : %0*lx\n", field, read_c0_pwsize());
pr_info("PWCtl : %0x\n", read_c0_pwctl());
}
}
static inline const char *msk2str(unsigned int mask)
{
switch (mask) {
case PM_4K: return "4kb";
case PM_16K: return "16kb";
case PM_64K: return "64kb";
case PM_256K: return "256kb";
#ifdef CONFIG_CPU_CAVIUM_OCTEON
case PM_8K: return "8kb";
case PM_32K: return "32kb";
case PM_128K: return "128kb";
case PM_512K: return "512kb";
case PM_2M: return "2Mb";
case PM_8M: return "8Mb";
case PM_32M: return "32Mb";
#endif
}
return "";
}
static void dump_tlb(int first, int last)
{
unsigned long s_entryhi, entryhi, asid, mmid;
unsigned long long entrylo0, entrylo1, pa;
unsigned int s_index, s_pagemask, s_guestctl1 = 0;
unsigned int pagemask, guestctl1 = 0, c0, c1, i;
unsigned long asidmask = cpu_asid_mask(¤t_cpu_data);
int asidwidth = DIV_ROUND_UP(ilog2(asidmask) + 1, 4);
unsigned long s_mmid;
#ifdef CONFIG_32BIT
bool xpa = cpu_has_xpa && (read_c0_pagegrain() & PG_ELPA);
int pwidth = xpa ? 11 : 8;
int vwidth = 8;
#else
bool xpa = false;
int pwidth = 11;
int vwidth = 11;
#endif
s_pagemask = read_c0_pagemask();
s_entryhi = read_c0_entryhi();
s_index = read_c0_index();
if (cpu_has_mmid)
asid = s_mmid = read_c0_memorymapid();
else
asid = s_entryhi & asidmask;
if (cpu_has_guestid)
s_guestctl1 = read_c0_guestctl1();
for (i = first; i <= last; i++) {
write_c0_index(i);
mtc0_tlbr_hazard();
tlb_read();
tlb_read_hazard();
pagemask = read_c0_pagemask();
entryhi = read_c0_entryhi();
entrylo0 = read_c0_entrylo0();
entrylo1 = read_c0_entrylo1();
if (cpu_has_mmid)
mmid = read_c0_memorymapid();
else
mmid = entryhi & asidmask;
if (cpu_has_guestid)
guestctl1 = read_c0_guestctl1();
/* EHINV bit marks entire entry as invalid */
if (cpu_has_tlbinv && entryhi & MIPS_ENTRYHI_EHINV)
continue;
/*
* Prior to tlbinv, unused entries have a virtual address of
* CKSEG0.
*/
if ((entryhi & ~0x1ffffUL) == CKSEG0)
continue;
/*
* ASID takes effect in absence of G (global) bit.
* We check both G bits, even though architecturally they should
* match one another, because some revisions of the SB1 core may
* leave only a single G bit set after a machine check exception
* due to duplicate TLB entry.
*/
if (!((entrylo0 | entrylo1) & ENTRYLO_G) && (mmid != asid))
continue;
/*
* Only print entries in use
*/
printk("Index: %2d pgmask=%s ", i, msk2str(pagemask));
c0 = (entrylo0 & ENTRYLO_C) >> ENTRYLO_C_SHIFT;
c1 = (entrylo1 & ENTRYLO_C) >> ENTRYLO_C_SHIFT;
pr_cont("va=%0*lx asid=%0*lx",
vwidth, (entryhi & ~0x1fffUL),
asidwidth, mmid);
if (cpu_has_guestid)
pr_cont(" gid=%02lx",
(guestctl1 & MIPS_GCTL1_RID)
>> MIPS_GCTL1_RID_SHIFT);
/* RI/XI are in awkward places, so mask them off separately */
pa = entrylo0 & ~(MIPS_ENTRYLO_RI | MIPS_ENTRYLO_XI);
if (xpa)
pa |= (unsigned long long)readx_c0_entrylo0() << 30;
pa = (pa << 6) & PAGE_MASK;
pr_cont("\n\t[");
if (cpu_has_rixi)
pr_cont("ri=%d xi=%d ",
(entrylo0 & MIPS_ENTRYLO_RI) ? 1 : 0,
(entrylo0 & MIPS_ENTRYLO_XI) ? 1 : 0);
pr_cont("pa=%0*llx c=%d d=%d v=%d g=%d] [",
pwidth, pa, c0,
(entrylo0 & ENTRYLO_D) ? 1 : 0,
(entrylo0 & ENTRYLO_V) ? 1 : 0,
(entrylo0 & ENTRYLO_G) ? 1 : 0);
/* RI/XI are in awkward places, so mask them off separately */
pa = entrylo1 & ~(MIPS_ENTRYLO_RI | MIPS_ENTRYLO_XI);
if (xpa)
pa |= (unsigned long long)readx_c0_entrylo1() << 30;
pa = (pa << 6) & PAGE_MASK;
if (cpu_has_rixi)
pr_cont("ri=%d xi=%d ",
(entrylo1 & MIPS_ENTRYLO_RI) ? 1 : 0,
(entrylo1 & MIPS_ENTRYLO_XI) ? 1 : 0);
pr_cont("pa=%0*llx c=%d d=%d v=%d g=%d]\n",
pwidth, pa, c1,
(entrylo1 & ENTRYLO_D) ? 1 : 0,
(entrylo1 & ENTRYLO_V) ? 1 : 0,
(entrylo1 & ENTRYLO_G) ? 1 : 0);
}
printk("\n");
write_c0_entryhi(s_entryhi);
write_c0_index(s_index);
write_c0_pagemask(s_pagemask);
if (cpu_has_guestid)
write_c0_guestctl1(s_guestctl1);
}
void dump_tlb_all(void)
{
dump_tlb(0, current_cpu_data.tlbsize - 1);
}
| linux-master | arch/mips/lib/dump_tlb.c |
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2005 Thiemo Seufer
* Copyright (C) 2005 MIPS Technologies, Inc. All rights reserved.
* Author: Maciej W. Rozycki <[email protected]>
*/
#include <asm/addrspace.h>
#include <asm/bug.h>
#include <asm/cacheflush.h>
#ifndef CKSEG2
#define CKSEG2 CKSSEG
#endif
#ifndef TO_PHYS_MASK
#define TO_PHYS_MASK -1
#endif
/*
* FUNC is executed in one of the uncached segments, depending on its
* original address as follows:
*
* 1. If the original address is in CKSEG0 or CKSEG1, then the uncached
* segment used is CKSEG1.
* 2. If the original address is in XKPHYS, then the uncached segment
* used is XKPHYS(2).
* 3. Otherwise it's a bug.
*
* The same remapping is done with the stack pointer. Stack handling
* works because we don't handle stack arguments or more complex return
* values, so we can avoid sharing the same stack area between a cached
* and the uncached mode.
*/
unsigned long run_uncached(void *func)
{
register long ret __asm__("$2");
long lfunc = (long)func, ufunc;
long usp;
long sp;
__asm__("move %0, $sp" : "=r" (sp));
if (sp >= (long)CKSEG0 && sp < (long)CKSEG2)
usp = CKSEG1ADDR(sp);
#ifdef CONFIG_64BIT
else if ((long long)sp >= (long long)PHYS_TO_XKPHYS(0, 0) &&
(long long)sp < (long long)PHYS_TO_XKPHYS(8, 0))
usp = PHYS_TO_XKPHYS(K_CALG_UNCACHED,
XKPHYS_TO_PHYS((long long)sp));
#endif
else {
BUG();
usp = sp;
}
if (lfunc >= (long)CKSEG0 && lfunc < (long)CKSEG2)
ufunc = CKSEG1ADDR(lfunc);
#ifdef CONFIG_64BIT
else if ((long long)lfunc >= (long long)PHYS_TO_XKPHYS(0, 0) &&
(long long)lfunc < (long long)PHYS_TO_XKPHYS(8, 0))
ufunc = PHYS_TO_XKPHYS(K_CALG_UNCACHED,
XKPHYS_TO_PHYS((long long)lfunc));
#endif
else {
BUG();
ufunc = lfunc;
}
__asm__ __volatile__ (
" move $16, $sp\n"
" move $sp, %1\n"
" jalr %2\n"
" move $sp, $16"
: "=r" (ret)
: "r" (usp), "r" (ufunc)
: "$16", "$31");
return ret;
}
| linux-master | arch/mips/lib/uncached.c |
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (c) 1994-1997, 99, 2000, 06, 07 Ralf Baechle ([email protected])
* Copyright (c) 1999, 2000 Silicon Graphics, Inc.
*/
#include <linux/bitops.h>
#include <linux/bits.h>
#include <linux/irqflags.h>
#include <linux/export.h>
/**
* __mips_set_bit - Atomically set a bit in memory. This is called by
* set_bit() if it cannot find a faster solution.
* @nr: the bit to set
* @addr: the address to start counting from
*/
void __mips_set_bit(unsigned long nr, volatile unsigned long *addr)
{
volatile unsigned long *a = &addr[BIT_WORD(nr)];
unsigned int bit = nr % BITS_PER_LONG;
unsigned long mask;
unsigned long flags;
mask = 1UL << bit;
raw_local_irq_save(flags);
*a |= mask;
raw_local_irq_restore(flags);
}
EXPORT_SYMBOL(__mips_set_bit);
/**
* __mips_clear_bit - Clears a bit in memory. This is called by clear_bit() if
* it cannot find a faster solution.
* @nr: Bit to clear
* @addr: Address to start counting from
*/
void __mips_clear_bit(unsigned long nr, volatile unsigned long *addr)
{
volatile unsigned long *a = &addr[BIT_WORD(nr)];
unsigned int bit = nr % BITS_PER_LONG;
unsigned long mask;
unsigned long flags;
mask = 1UL << bit;
raw_local_irq_save(flags);
*a &= ~mask;
raw_local_irq_restore(flags);
}
EXPORT_SYMBOL(__mips_clear_bit);
/**
* __mips_change_bit - Toggle a bit in memory. This is called by change_bit()
* if it cannot find a faster solution.
* @nr: Bit to change
* @addr: Address to start counting from
*/
void __mips_change_bit(unsigned long nr, volatile unsigned long *addr)
{
volatile unsigned long *a = &addr[BIT_WORD(nr)];
unsigned int bit = nr % BITS_PER_LONG;
unsigned long mask;
unsigned long flags;
mask = 1UL << bit;
raw_local_irq_save(flags);
*a ^= mask;
raw_local_irq_restore(flags);
}
EXPORT_SYMBOL(__mips_change_bit);
/**
* __mips_test_and_set_bit_lock - Set a bit and return its old value. This is
* called by test_and_set_bit_lock() if it cannot find a faster solution.
* @nr: Bit to set
* @addr: Address to count from
*/
int __mips_test_and_set_bit_lock(unsigned long nr,
volatile unsigned long *addr)
{
volatile unsigned long *a = &addr[BIT_WORD(nr)];
unsigned int bit = nr % BITS_PER_LONG;
unsigned long mask;
unsigned long flags;
int res;
mask = 1UL << bit;
raw_local_irq_save(flags);
res = (mask & *a) != 0;
*a |= mask;
raw_local_irq_restore(flags);
return res;
}
EXPORT_SYMBOL(__mips_test_and_set_bit_lock);
/**
* __mips_test_and_clear_bit - Clear a bit and return its old value. This is
* called by test_and_clear_bit() if it cannot find a faster solution.
* @nr: Bit to clear
* @addr: Address to count from
*/
int __mips_test_and_clear_bit(unsigned long nr, volatile unsigned long *addr)
{
volatile unsigned long *a = &addr[BIT_WORD(nr)];
unsigned int bit = nr % BITS_PER_LONG;
unsigned long mask;
unsigned long flags;
int res;
mask = 1UL << bit;
raw_local_irq_save(flags);
res = (mask & *a) != 0;
*a &= ~mask;
raw_local_irq_restore(flags);
return res;
}
EXPORT_SYMBOL(__mips_test_and_clear_bit);
/**
* __mips_test_and_change_bit - Change a bit and return its old value. This is
* called by test_and_change_bit() if it cannot find a faster solution.
* @nr: Bit to change
* @addr: Address to count from
*/
int __mips_test_and_change_bit(unsigned long nr, volatile unsigned long *addr)
{
volatile unsigned long *a = &addr[BIT_WORD(nr)];
unsigned int bit = nr % BITS_PER_LONG;
unsigned long mask;
unsigned long flags;
int res;
mask = 1UL << bit;
raw_local_irq_save(flags);
res = (mask & *a) != 0;
*a ^= mask;
raw_local_irq_restore(flags);
return res;
}
EXPORT_SYMBOL(__mips_test_and_change_bit);
| linux-master | arch/mips/lib/bitops.c |
// SPDX-License-Identifier: GPL-2.0-only
#include <linux/export.h>
#include <linux/io.h>
/**
* __ioread64_copy - copy data from MMIO space, in 64-bit units
* @to: destination (must be 64-bit aligned)
* @from: source, in MMIO space (must be 64-bit aligned)
* @count: number of 64-bit quantities to copy
*
* Copy data from MMIO space to kernel space, in units of 32 or 64 bits at a
* time. Order of access is not guaranteed, nor is a memory barrier
* performed afterwards.
*/
void __ioread64_copy(void *to, const void __iomem *from, size_t count)
{
#ifdef CONFIG_64BIT
u64 *dst = to;
const u64 __iomem *src = from;
const u64 __iomem *end = src + count;
while (src < end)
*dst++ = __raw_readq(src++);
#else
__ioread32_copy(to, from, count * 2);
#endif
}
EXPORT_SYMBOL_GPL(__ioread64_copy);
| linux-master | arch/mips/lib/iomap_copy.c |
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* A small micro-assembler. It is intentionally kept simple, does only
* support a subset of instructions, and does not try to hide pipeline
* effects like branch delay slots.
*
* Copyright (C) 2004, 2005, 2006, 2008 Thiemo Seufer
* Copyright (C) 2005, 2007 Maciej W. Rozycki
* Copyright (C) 2006 Ralf Baechle ([email protected])
* Copyright (C) 2012, 2013 MIPS Technologies, Inc. All rights reserved.
*/
#include <linux/kernel.h>
#include <linux/types.h>
#include <asm/inst.h>
#include <asm/elf.h>
#include <asm/bugs.h>
#include <asm/uasm.h>
#define RS_MASK 0x1f
#define RS_SH 16
#define RT_MASK 0x1f
#define RT_SH 21
#define SCIMM_MASK 0x3ff
#define SCIMM_SH 16
/* This macro sets the non-variable bits of an instruction. */
#define M(a, b, c, d, e, f) \
((a) << OP_SH \
| (b) << RT_SH \
| (c) << RS_SH \
| (d) << RD_SH \
| (e) << RE_SH \
| (f) << FUNC_SH)
#include "uasm.c"
static const struct insn insn_table_MM[insn_invalid] = {
[insn_addu] = {M(mm_pool32a_op, 0, 0, 0, 0, mm_addu32_op), RT | RS | RD},
[insn_addiu] = {M(mm_addiu32_op, 0, 0, 0, 0, 0), RT | RS | SIMM},
[insn_and] = {M(mm_pool32a_op, 0, 0, 0, 0, mm_and_op), RT | RS | RD},
[insn_andi] = {M(mm_andi32_op, 0, 0, 0, 0, 0), RT | RS | UIMM},
[insn_beq] = {M(mm_beq32_op, 0, 0, 0, 0, 0), RS | RT | BIMM},
[insn_beql] = {0, 0},
[insn_bgez] = {M(mm_pool32i_op, mm_bgez_op, 0, 0, 0, 0), RS | BIMM},
[insn_bgezl] = {0, 0},
[insn_bltz] = {M(mm_pool32i_op, mm_bltz_op, 0, 0, 0, 0), RS | BIMM},
[insn_bltzl] = {0, 0},
[insn_bne] = {M(mm_bne32_op, 0, 0, 0, 0, 0), RT | RS | BIMM},
[insn_cache] = {M(mm_pool32b_op, 0, 0, mm_cache_func, 0, 0), RT | RS | SIMM},
[insn_cfc1] = {M(mm_pool32f_op, 0, 0, 0, mm_cfc1_op, mm_32f_73_op), RT | RS},
[insn_cfcmsa] = {M(mm_pool32s_op, 0, msa_cfc_op, 0, 0, mm_32s_elm_op), RD | RE},
[insn_ctc1] = {M(mm_pool32f_op, 0, 0, 0, mm_ctc1_op, mm_32f_73_op), RT | RS},
[insn_ctcmsa] = {M(mm_pool32s_op, 0, msa_ctc_op, 0, 0, mm_32s_elm_op), RD | RE},
[insn_daddu] = {0, 0},
[insn_daddiu] = {0, 0},
[insn_di] = {M(mm_pool32a_op, 0, 0, 0, mm_di_op, mm_pool32axf_op), RS},
[insn_divu] = {M(mm_pool32a_op, 0, 0, 0, mm_divu_op, mm_pool32axf_op), RT | RS},
[insn_dmfc0] = {0, 0},
[insn_dmtc0] = {0, 0},
[insn_dsll] = {0, 0},
[insn_dsll32] = {0, 0},
[insn_dsra] = {0, 0},
[insn_dsrl] = {0, 0},
[insn_dsrl32] = {0, 0},
[insn_drotr] = {0, 0},
[insn_drotr32] = {0, 0},
[insn_dsubu] = {0, 0},
[insn_eret] = {M(mm_pool32a_op, 0, 0, 0, mm_eret_op, mm_pool32axf_op), 0},
[insn_ins] = {M(mm_pool32a_op, 0, 0, 0, 0, mm_ins_op), RT | RS | RD | RE},
[insn_ext] = {M(mm_pool32a_op, 0, 0, 0, 0, mm_ext_op), RT | RS | RD | RE},
[insn_j] = {M(mm_j32_op, 0, 0, 0, 0, 0), JIMM},
[insn_jal] = {M(mm_jal32_op, 0, 0, 0, 0, 0), JIMM},
[insn_jalr] = {M(mm_pool32a_op, 0, 0, 0, mm_jalr_op, mm_pool32axf_op), RT | RS},
[insn_jr] = {M(mm_pool32a_op, 0, 0, 0, mm_jalr_op, mm_pool32axf_op), RS},
[insn_lb] = {M(mm_lb32_op, 0, 0, 0, 0, 0), RT | RS | SIMM},
[insn_ld] = {0, 0},
[insn_lh] = {M(mm_lh32_op, 0, 0, 0, 0, 0), RT | RS | SIMM},
[insn_ll] = {M(mm_pool32c_op, 0, 0, (mm_ll_func << 1), 0, 0), RS | RT | SIMM},
[insn_lld] = {0, 0},
[insn_lui] = {M(mm_pool32i_op, mm_lui_op, 0, 0, 0, 0), RS | SIMM},
[insn_lw] = {M(mm_lw32_op, 0, 0, 0, 0, 0), RT | RS | SIMM},
[insn_mfc0] = {M(mm_pool32a_op, 0, 0, 0, mm_mfc0_op, mm_pool32axf_op), RT | RS | RD},
[insn_mfhi] = {M(mm_pool32a_op, 0, 0, 0, mm_mfhi32_op, mm_pool32axf_op), RS},
[insn_mflo] = {M(mm_pool32a_op, 0, 0, 0, mm_mflo32_op, mm_pool32axf_op), RS},
[insn_mtc0] = {M(mm_pool32a_op, 0, 0, 0, mm_mtc0_op, mm_pool32axf_op), RT | RS | RD},
[insn_mthi] = {M(mm_pool32a_op, 0, 0, 0, mm_mthi32_op, mm_pool32axf_op), RS},
[insn_mtlo] = {M(mm_pool32a_op, 0, 0, 0, mm_mtlo32_op, mm_pool32axf_op), RS},
[insn_mul] = {M(mm_pool32a_op, 0, 0, 0, 0, mm_mul_op), RT | RS | RD},
[insn_or] = {M(mm_pool32a_op, 0, 0, 0, 0, mm_or32_op), RT | RS | RD},
[insn_ori] = {M(mm_ori32_op, 0, 0, 0, 0, 0), RT | RS | UIMM},
[insn_pref] = {M(mm_pool32c_op, 0, 0, (mm_pref_func << 1), 0, 0), RT | RS | SIMM},
[insn_rfe] = {0, 0},
[insn_sc] = {M(mm_pool32c_op, 0, 0, (mm_sc_func << 1), 0, 0), RT | RS | SIMM},
[insn_scd] = {0, 0},
[insn_sd] = {0, 0},
[insn_sll] = {M(mm_pool32a_op, 0, 0, 0, 0, mm_sll32_op), RT | RS | RD},
[insn_sllv] = {M(mm_pool32a_op, 0, 0, 0, 0, mm_sllv32_op), RT | RS | RD},
[insn_slt] = {M(mm_pool32a_op, 0, 0, 0, 0, mm_slt_op), RT | RS | RD},
[insn_sltiu] = {M(mm_sltiu32_op, 0, 0, 0, 0, 0), RT | RS | SIMM},
[insn_sltu] = {M(mm_pool32a_op, 0, 0, 0, 0, mm_sltu_op), RT | RS | RD},
[insn_sra] = {M(mm_pool32a_op, 0, 0, 0, 0, mm_sra_op), RT | RS | RD},
[insn_srav] = {M(mm_pool32a_op, 0, 0, 0, 0, mm_srav_op), RT | RS | RD},
[insn_srl] = {M(mm_pool32a_op, 0, 0, 0, 0, mm_srl32_op), RT | RS | RD},
[insn_srlv] = {M(mm_pool32a_op, 0, 0, 0, 0, mm_srlv32_op), RT | RS | RD},
[insn_rotr] = {M(mm_pool32a_op, 0, 0, 0, 0, mm_rotr_op), RT | RS | RD},
[insn_subu] = {M(mm_pool32a_op, 0, 0, 0, 0, mm_subu32_op), RT | RS | RD},
[insn_sw] = {M(mm_sw32_op, 0, 0, 0, 0, 0), RT | RS | SIMM},
[insn_sync] = {M(mm_pool32a_op, 0, 0, 0, mm_sync_op, mm_pool32axf_op), RS},
[insn_tlbp] = {M(mm_pool32a_op, 0, 0, 0, mm_tlbp_op, mm_pool32axf_op), 0},
[insn_tlbr] = {M(mm_pool32a_op, 0, 0, 0, mm_tlbr_op, mm_pool32axf_op), 0},
[insn_tlbwi] = {M(mm_pool32a_op, 0, 0, 0, mm_tlbwi_op, mm_pool32axf_op), 0},
[insn_tlbwr] = {M(mm_pool32a_op, 0, 0, 0, mm_tlbwr_op, mm_pool32axf_op), 0},
[insn_wait] = {M(mm_pool32a_op, 0, 0, 0, mm_wait_op, mm_pool32axf_op), SCIMM},
[insn_wsbh] = {M(mm_pool32a_op, 0, 0, 0, mm_wsbh_op, mm_pool32axf_op), RT | RS},
[insn_xor] = {M(mm_pool32a_op, 0, 0, 0, 0, mm_xor32_op), RT | RS | RD},
[insn_xori] = {M(mm_xori32_op, 0, 0, 0, 0, 0), RT | RS | UIMM},
[insn_dins] = {0, 0},
[insn_dinsm] = {0, 0},
[insn_syscall] = {M(mm_pool32a_op, 0, 0, 0, mm_syscall_op, mm_pool32axf_op), SCIMM},
[insn_bbit0] = {0, 0},
[insn_bbit1] = {0, 0},
[insn_lwx] = {0, 0},
[insn_ldx] = {0, 0},
};
#undef M
static inline u32 build_bimm(s32 arg)
{
WARN(arg > 0xffff || arg < -0x10000,
KERN_WARNING "Micro-assembler field overflow\n");
WARN(arg & 0x3, KERN_WARNING "Invalid micro-assembler branch target\n");
return ((arg < 0) ? (1 << 15) : 0) | ((arg >> 1) & 0x7fff);
}
static inline u32 build_jimm(u32 arg)
{
WARN(arg & ~((JIMM_MASK << 2) | 1),
KERN_WARNING "Micro-assembler field overflow\n");
return (arg >> 1) & JIMM_MASK;
}
/*
* The order of opcode arguments is implicitly left to right,
* starting with RS and ending with FUNC or IMM.
*/
static void build_insn(u32 **buf, enum opcode opc, ...)
{
const struct insn *ip;
va_list ap;
u32 op;
if (opc < 0 || opc >= insn_invalid ||
(opc == insn_daddiu && r4k_daddiu_bug()) ||
(insn_table_MM[opc].match == 0 && insn_table_MM[opc].fields == 0))
panic("Unsupported Micro-assembler instruction %d", opc);
ip = &insn_table_MM[opc];
op = ip->match;
va_start(ap, opc);
if (ip->fields & RS) {
if (opc == insn_mfc0 || opc == insn_mtc0 ||
opc == insn_cfc1 || opc == insn_ctc1)
op |= build_rt(va_arg(ap, u32));
else
op |= build_rs(va_arg(ap, u32));
}
if (ip->fields & RT) {
if (opc == insn_mfc0 || opc == insn_mtc0 ||
opc == insn_cfc1 || opc == insn_ctc1)
op |= build_rs(va_arg(ap, u32));
else
op |= build_rt(va_arg(ap, u32));
}
if (ip->fields & RD)
op |= build_rd(va_arg(ap, u32));
if (ip->fields & RE)
op |= build_re(va_arg(ap, u32));
if (ip->fields & SIMM)
op |= build_simm(va_arg(ap, s32));
if (ip->fields & UIMM)
op |= build_uimm(va_arg(ap, u32));
if (ip->fields & BIMM)
op |= build_bimm(va_arg(ap, s32));
if (ip->fields & JIMM)
op |= build_jimm(va_arg(ap, u32));
if (ip->fields & FUNC)
op |= build_func(va_arg(ap, u32));
if (ip->fields & SET)
op |= build_set(va_arg(ap, u32));
if (ip->fields & SCIMM)
op |= build_scimm(va_arg(ap, u32));
va_end(ap);
#ifdef CONFIG_CPU_LITTLE_ENDIAN
**buf = ((op & 0xffff) << 16) | (op >> 16);
#else
**buf = op;
#endif
(*buf)++;
}
static inline void
__resolve_relocs(struct uasm_reloc *rel, struct uasm_label *lab)
{
long laddr = (long)lab->addr;
long raddr = (long)rel->addr;
switch (rel->type) {
case R_MIPS_PC16:
#ifdef CONFIG_CPU_LITTLE_ENDIAN
*rel->addr |= (build_bimm(laddr - (raddr + 4)) << 16);
#else
*rel->addr |= build_bimm(laddr - (raddr + 4));
#endif
break;
default:
panic("Unsupported Micro-assembler relocation %d",
rel->type);
}
}
| linux-master | arch/mips/mm/uasm-micromips.c |
/*
* MIPS Huge TLB Page Support for Kernel.
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2002, Rohit Seth <[email protected]>
* Copyright 2005, Embedded Alley Solutions, Inc.
* Matt Porter <[email protected]>
* Copyright (C) 2008, 2009 Cavium Networks, Inc.
*/
#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/hugetlb.h>
#include <linux/pagemap.h>
#include <linux/err.h>
#include <linux/sysctl.h>
#include <asm/mman.h>
#include <asm/tlb.h>
#include <asm/tlbflush.h>
pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long addr, unsigned long sz)
{
pgd_t *pgd;
p4d_t *p4d;
pud_t *pud;
pte_t *pte = NULL;
pgd = pgd_offset(mm, addr);
p4d = p4d_alloc(mm, pgd, addr);
pud = pud_alloc(mm, p4d, addr);
if (pud)
pte = (pte_t *)pmd_alloc(mm, pud, addr);
return pte;
}
pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr,
unsigned long sz)
{
pgd_t *pgd;
p4d_t *p4d;
pud_t *pud;
pmd_t *pmd = NULL;
pgd = pgd_offset(mm, addr);
if (pgd_present(*pgd)) {
p4d = p4d_offset(pgd, addr);
if (p4d_present(*p4d)) {
pud = pud_offset(p4d, addr);
if (pud_present(*pud))
pmd = pmd_offset(pud, addr);
}
}
return (pte_t *) pmd;
}
int pmd_huge(pmd_t pmd)
{
return (pmd_val(pmd) & _PAGE_HUGE) != 0;
}
int pud_huge(pud_t pud)
{
return (pud_val(pud) & _PAGE_HUGE) != 0;
}
| linux-master | arch/mips/mm/hugetlbpage.c |
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2003 by Ralf Baechle
*/
#include <linux/init.h>
#include <linux/mm.h>
#include <linux/memblock.h>
#include <linux/highmem.h>
#include <asm/fixmap.h>
#include <asm/pgalloc.h>
#include <asm/tlbflush.h>
void pgd_init(void *addr)
{
unsigned long *p = (unsigned long *)addr;
int i;
for (i = 0; i < USER_PTRS_PER_PGD; i+=8) {
p[i + 0] = (unsigned long) invalid_pte_table;
p[i + 1] = (unsigned long) invalid_pte_table;
p[i + 2] = (unsigned long) invalid_pte_table;
p[i + 3] = (unsigned long) invalid_pte_table;
p[i + 4] = (unsigned long) invalid_pte_table;
p[i + 5] = (unsigned long) invalid_pte_table;
p[i + 6] = (unsigned long) invalid_pte_table;
p[i + 7] = (unsigned long) invalid_pte_table;
}
}
#if defined(CONFIG_TRANSPARENT_HUGEPAGE)
pmd_t mk_pmd(struct page *page, pgprot_t prot)
{
pmd_t pmd;
pmd_val(pmd) = (page_to_pfn(page) << PFN_PTE_SHIFT) | pgprot_val(prot);
return pmd;
}
void set_pmd_at(struct mm_struct *mm, unsigned long addr,
pmd_t *pmdp, pmd_t pmd)
{
*pmdp = pmd;
}
#endif /* defined(CONFIG_TRANSPARENT_HUGEPAGE) */
void __init pagetable_init(void)
{
unsigned long vaddr;
pgd_t *pgd_base;
#ifdef CONFIG_HIGHMEM
pgd_t *pgd;
p4d_t *p4d;
pud_t *pud;
pmd_t *pmd;
pte_t *pte;
#endif
/* Initialize the entire pgd. */
pgd_init(swapper_pg_dir);
pgd_init(&swapper_pg_dir[USER_PTRS_PER_PGD]);
pgd_base = swapper_pg_dir;
/*
* Fixed mappings:
*/
vaddr = __fix_to_virt(__end_of_fixed_addresses - 1);
fixrange_init(vaddr & PMD_MASK, vaddr + FIXADDR_SIZE, pgd_base);
#ifdef CONFIG_HIGHMEM
/*
* Permanent kmaps:
*/
vaddr = PKMAP_BASE;
fixrange_init(vaddr & PMD_MASK, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base);
pgd = swapper_pg_dir + pgd_index(vaddr);
p4d = p4d_offset(pgd, vaddr);
pud = pud_offset(p4d, vaddr);
pmd = pmd_offset(pud, vaddr);
pte = pte_offset_kernel(pmd, vaddr);
pkmap_page_table = pte;
#endif
}
| linux-master | arch/mips/mm/pgtable-32.c |
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* A small micro-assembler. It is intentionally kept simple, does only
* support a subset of instructions, and does not try to hide pipeline
* effects like branch delay slots.
*
* Copyright (C) 2004, 2005, 2006, 2008 Thiemo Seufer
* Copyright (C) 2005, 2007 Maciej W. Rozycki
* Copyright (C) 2006 Ralf Baechle ([email protected])
* Copyright (C) 2012, 2013 MIPS Technologies, Inc. All rights reserved.
*/
enum fields {
RS = 0x001,
RT = 0x002,
RD = 0x004,
RE = 0x008,
SIMM = 0x010,
UIMM = 0x020,
BIMM = 0x040,
JIMM = 0x080,
FUNC = 0x100,
SET = 0x200,
SCIMM = 0x400,
SIMM9 = 0x800,
};
#define OP_MASK 0x3f
#define OP_SH 26
#define RD_MASK 0x1f
#define RD_SH 11
#define RE_MASK 0x1f
#define RE_SH 6
#define IMM_MASK 0xffff
#define IMM_SH 0
#define JIMM_MASK 0x3ffffff
#define JIMM_SH 0
#define FUNC_MASK 0x3f
#define FUNC_SH 0
#define SET_MASK 0x7
#define SET_SH 0
#define SIMM9_SH 7
#define SIMM9_MASK 0x1ff
enum opcode {
insn_addiu, insn_addu, insn_and, insn_andi, insn_bbit0, insn_bbit1,
insn_beq, insn_beql, insn_bgez, insn_bgezl, insn_bgtz, insn_blez,
insn_bltz, insn_bltzl, insn_bne, insn_break, insn_cache, insn_cfc1,
insn_cfcmsa, insn_ctc1, insn_ctcmsa, insn_daddiu, insn_daddu, insn_ddivu,
insn_ddivu_r6, insn_di, insn_dins, insn_dinsm, insn_dinsu, insn_divu,
insn_divu_r6, insn_dmfc0, insn_dmodu, insn_dmtc0, insn_dmultu,
insn_dmulu, insn_drotr, insn_drotr32, insn_dsbh, insn_dshd, insn_dsll,
insn_dsll32, insn_dsllv, insn_dsra, insn_dsra32, insn_dsrav, insn_dsrl,
insn_dsrl32, insn_dsrlv, insn_dsubu, insn_eret, insn_ext, insn_ins,
insn_j, insn_jal, insn_jalr, insn_jr, insn_lb, insn_lbu, insn_ld,
insn_lddir, insn_ldpte, insn_ldx, insn_lh, insn_lhu, insn_ll, insn_lld,
insn_lui, insn_lw, insn_lwu, insn_lwx, insn_mfc0, insn_mfhc0, insn_mfhi,
insn_mflo, insn_modu, insn_movn, insn_movz, insn_mtc0, insn_mthc0,
insn_mthi, insn_mtlo, insn_mul, insn_multu, insn_mulu, insn_muhu, insn_nor,
insn_or, insn_ori, insn_pref, insn_rfe, insn_rotr, insn_sb, insn_sc,
insn_scd, insn_seleqz, insn_selnez, insn_sd, insn_sh, insn_sll,
insn_sllv, insn_slt, insn_slti, insn_sltiu, insn_sltu, insn_sra,
insn_srav, insn_srl, insn_srlv, insn_subu, insn_sw, insn_sync,
insn_syscall, insn_tlbp, insn_tlbr, insn_tlbwi, insn_tlbwr, insn_wait,
insn_wsbh, insn_xor, insn_xori, insn_yield,
insn_invalid /* insn_invalid must be last */
};
struct insn {
u32 match;
enum fields fields;
};
static inline u32 build_rs(u32 arg)
{
WARN(arg & ~RS_MASK, KERN_WARNING "Micro-assembler field overflow\n");
return (arg & RS_MASK) << RS_SH;
}
static inline u32 build_rt(u32 arg)
{
WARN(arg & ~RT_MASK, KERN_WARNING "Micro-assembler field overflow\n");
return (arg & RT_MASK) << RT_SH;
}
static inline u32 build_rd(u32 arg)
{
WARN(arg & ~RD_MASK, KERN_WARNING "Micro-assembler field overflow\n");
return (arg & RD_MASK) << RD_SH;
}
static inline u32 build_re(u32 arg)
{
WARN(arg & ~RE_MASK, KERN_WARNING "Micro-assembler field overflow\n");
return (arg & RE_MASK) << RE_SH;
}
static inline u32 build_simm(s32 arg)
{
WARN(arg > 0x7fff || arg < -0x8000,
KERN_WARNING "Micro-assembler field overflow\n");
return arg & 0xffff;
}
static inline u32 build_uimm(u32 arg)
{
WARN(arg & ~IMM_MASK, KERN_WARNING "Micro-assembler field overflow\n");
return arg & IMM_MASK;
}
static inline u32 build_scimm(u32 arg)
{
WARN(arg & ~SCIMM_MASK,
KERN_WARNING "Micro-assembler field overflow\n");
return (arg & SCIMM_MASK) << SCIMM_SH;
}
static inline u32 build_scimm9(s32 arg)
{
WARN((arg > 0xff || arg < -0x100),
KERN_WARNING "Micro-assembler field overflow\n");
return (arg & SIMM9_MASK) << SIMM9_SH;
}
static inline u32 build_func(u32 arg)
{
WARN(arg & ~FUNC_MASK, KERN_WARNING "Micro-assembler field overflow\n");
return arg & FUNC_MASK;
}
static inline u32 build_set(u32 arg)
{
WARN(arg & ~SET_MASK, KERN_WARNING "Micro-assembler field overflow\n");
return arg & SET_MASK;
}
static void build_insn(u32 **buf, enum opcode opc, ...);
#define I_u1u2u3(op) \
Ip_u1u2u3(op) \
{ \
build_insn(buf, insn##op, a, b, c); \
} \
UASM_EXPORT_SYMBOL(uasm_i##op);
#define I_s3s1s2(op) \
Ip_s3s1s2(op) \
{ \
build_insn(buf, insn##op, b, c, a); \
} \
UASM_EXPORT_SYMBOL(uasm_i##op);
#define I_u2u1u3(op) \
Ip_u2u1u3(op) \
{ \
build_insn(buf, insn##op, b, a, c); \
} \
UASM_EXPORT_SYMBOL(uasm_i##op);
#define I_u3u2u1(op) \
Ip_u3u2u1(op) \
{ \
build_insn(buf, insn##op, c, b, a); \
} \
UASM_EXPORT_SYMBOL(uasm_i##op);
#define I_u3u1u2(op) \
Ip_u3u1u2(op) \
{ \
build_insn(buf, insn##op, b, c, a); \
} \
UASM_EXPORT_SYMBOL(uasm_i##op);
#define I_u1u2s3(op) \
Ip_u1u2s3(op) \
{ \
build_insn(buf, insn##op, a, b, c); \
} \
UASM_EXPORT_SYMBOL(uasm_i##op);
#define I_u2s3u1(op) \
Ip_u2s3u1(op) \
{ \
build_insn(buf, insn##op, c, a, b); \
} \
UASM_EXPORT_SYMBOL(uasm_i##op);
#define I_u2u1s3(op) \
Ip_u2u1s3(op) \
{ \
build_insn(buf, insn##op, b, a, c); \
} \
UASM_EXPORT_SYMBOL(uasm_i##op);
#define I_u2u1msbu3(op) \
Ip_u2u1msbu3(op) \
{ \
build_insn(buf, insn##op, b, a, c+d-1, c); \
} \
UASM_EXPORT_SYMBOL(uasm_i##op);
#define I_u2u1msb32u3(op) \
Ip_u2u1msbu3(op) \
{ \
build_insn(buf, insn##op, b, a, c+d-33, c); \
} \
UASM_EXPORT_SYMBOL(uasm_i##op);
#define I_u2u1msb32msb3(op) \
Ip_u2u1msbu3(op) \
{ \
build_insn(buf, insn##op, b, a, c+d-33, c-32); \
} \
UASM_EXPORT_SYMBOL(uasm_i##op);
#define I_u2u1msbdu3(op) \
Ip_u2u1msbu3(op) \
{ \
build_insn(buf, insn##op, b, a, d-1, c); \
} \
UASM_EXPORT_SYMBOL(uasm_i##op);
#define I_u1u2(op) \
Ip_u1u2(op) \
{ \
build_insn(buf, insn##op, a, b); \
} \
UASM_EXPORT_SYMBOL(uasm_i##op);
#define I_u2u1(op) \
Ip_u1u2(op) \
{ \
build_insn(buf, insn##op, b, a); \
} \
UASM_EXPORT_SYMBOL(uasm_i##op);
#define I_u1s2(op) \
Ip_u1s2(op) \
{ \
build_insn(buf, insn##op, a, b); \
} \
UASM_EXPORT_SYMBOL(uasm_i##op);
#define I_u1(op) \
Ip_u1(op) \
{ \
build_insn(buf, insn##op, a); \
} \
UASM_EXPORT_SYMBOL(uasm_i##op);
#define I_0(op) \
Ip_0(op) \
{ \
build_insn(buf, insn##op); \
} \
UASM_EXPORT_SYMBOL(uasm_i##op);
I_u2u1s3(_addiu)
I_u3u1u2(_addu)
I_u2u1u3(_andi)
I_u3u1u2(_and)
I_u1u2s3(_beq)
I_u1u2s3(_beql)
I_u1s2(_bgez)
I_u1s2(_bgezl)
I_u1s2(_bgtz)
I_u1s2(_blez)
I_u1s2(_bltz)
I_u1s2(_bltzl)
I_u1u2s3(_bne)
I_u1(_break)
I_u2s3u1(_cache)
I_u1u2(_cfc1)
I_u2u1(_cfcmsa)
I_u1u2(_ctc1)
I_u2u1(_ctcmsa)
I_u1u2(_ddivu)
I_u3u1u2(_ddivu_r6)
I_u1u2u3(_dmfc0)
I_u3u1u2(_dmodu)
I_u1u2u3(_dmtc0)
I_u1u2(_dmultu)
I_u3u1u2(_dmulu)
I_u2u1s3(_daddiu)
I_u3u1u2(_daddu)
I_u1(_di);
I_u1u2(_divu)
I_u3u1u2(_divu_r6)
I_u2u1(_dsbh);
I_u2u1(_dshd);
I_u2u1u3(_dsll)
I_u2u1u3(_dsll32)
I_u3u2u1(_dsllv)
I_u2u1u3(_dsra)
I_u2u1u3(_dsra32)
I_u3u2u1(_dsrav)
I_u2u1u3(_dsrl)
I_u2u1u3(_dsrl32)
I_u3u2u1(_dsrlv)
I_u2u1u3(_drotr)
I_u2u1u3(_drotr32)
I_u3u1u2(_dsubu)
I_0(_eret)
I_u2u1msbdu3(_ext)
I_u2u1msbu3(_ins)
I_u1(_j)
I_u1(_jal)
I_u2u1(_jalr)
I_u1(_jr)
I_u2s3u1(_lb)
I_u2s3u1(_lbu)
I_u2s3u1(_ld)
I_u2s3u1(_lh)
I_u2s3u1(_lhu)
I_u2s3u1(_ll)
I_u2s3u1(_lld)
I_u1s2(_lui)
I_u2s3u1(_lw)
I_u2s3u1(_lwu)
I_u1u2u3(_mfc0)
I_u1u2u3(_mfhc0)
I_u3u1u2(_modu)
I_u3u1u2(_movn)
I_u3u1u2(_movz)
I_u1(_mfhi)
I_u1(_mflo)
I_u1u2u3(_mtc0)
I_u1u2u3(_mthc0)
I_u1(_mthi)
I_u1(_mtlo)
I_u3u1u2(_mul)
I_u1u2(_multu)
I_u3u1u2(_mulu)
I_u3u1u2(_muhu)
I_u3u1u2(_nor)
I_u3u1u2(_or)
I_u2u1u3(_ori)
I_0(_rfe)
I_u2s3u1(_sb)
I_u2s3u1(_sc)
I_u2s3u1(_scd)
I_u2s3u1(_sd)
I_u3u1u2(_seleqz)
I_u3u1u2(_selnez)
I_u2s3u1(_sh)
I_u2u1u3(_sll)
I_u3u2u1(_sllv)
I_s3s1s2(_slt)
I_u2u1s3(_slti)
I_u2u1s3(_sltiu)
I_u3u1u2(_sltu)
I_u2u1u3(_sra)
I_u3u2u1(_srav)
I_u2u1u3(_srl)
I_u3u2u1(_srlv)
I_u2u1u3(_rotr)
I_u3u1u2(_subu)
I_u2s3u1(_sw)
I_u1(_sync)
I_0(_tlbp)
I_0(_tlbr)
I_0(_tlbwi)
I_0(_tlbwr)
I_u1(_wait);
I_u2u1(_wsbh)
I_u3u1u2(_xor)
I_u2u1u3(_xori)
I_u2u1(_yield)
I_u2u1msbu3(_dins);
I_u2u1msb32u3(_dinsm);
I_u2u1msb32msb3(_dinsu);
I_u1(_syscall);
I_u1u2s3(_bbit0);
I_u1u2s3(_bbit1);
I_u3u1u2(_lwx)
I_u3u1u2(_ldx)
I_u1u2(_ldpte)
I_u2u1u3(_lddir)
#ifdef CONFIG_CPU_CAVIUM_OCTEON
#include <asm/octeon/octeon.h>
void uasm_i_pref(u32 **buf, unsigned int a, signed int b,
unsigned int c)
{
if (OCTEON_IS_MODEL(OCTEON_CN6XXX) && a <= 24 && a != 5)
/*
* As per erratum Core-14449, replace prefetches 0-4,
* 6-24 with 'pref 28'.
*/
build_insn(buf, insn_pref, c, 28, b);
else
build_insn(buf, insn_pref, c, a, b);
}
UASM_EXPORT_SYMBOL(uasm_i_pref);
#else
I_u2s3u1(_pref)
#endif
/* Handle labels. */
void uasm_build_label(struct uasm_label **lab, u32 *addr, int lid)
{
(*lab)->addr = addr;
(*lab)->lab = lid;
(*lab)++;
}
UASM_EXPORT_SYMBOL(uasm_build_label);
int uasm_in_compat_space_p(long addr)
{
/* Is this address in 32bit compat space? */
return addr == (int)addr;
}
UASM_EXPORT_SYMBOL(uasm_in_compat_space_p);
static int uasm_rel_highest(long val)
{
#ifdef CONFIG_64BIT
return ((((val + 0x800080008000L) >> 48) & 0xffff) ^ 0x8000) - 0x8000;
#else
return 0;
#endif
}
static int uasm_rel_higher(long val)
{
#ifdef CONFIG_64BIT
return ((((val + 0x80008000L) >> 32) & 0xffff) ^ 0x8000) - 0x8000;
#else
return 0;
#endif
}
int uasm_rel_hi(long val)
{
return ((((val + 0x8000L) >> 16) & 0xffff) ^ 0x8000) - 0x8000;
}
UASM_EXPORT_SYMBOL(uasm_rel_hi);
int uasm_rel_lo(long val)
{
return ((val & 0xffff) ^ 0x8000) - 0x8000;
}
UASM_EXPORT_SYMBOL(uasm_rel_lo);
void UASM_i_LA_mostly(u32 **buf, unsigned int rs, long addr)
{
if (!uasm_in_compat_space_p(addr)) {
uasm_i_lui(buf, rs, uasm_rel_highest(addr));
if (uasm_rel_higher(addr))
uasm_i_daddiu(buf, rs, rs, uasm_rel_higher(addr));
if (uasm_rel_hi(addr)) {
uasm_i_dsll(buf, rs, rs, 16);
uasm_i_daddiu(buf, rs, rs,
uasm_rel_hi(addr));
uasm_i_dsll(buf, rs, rs, 16);
} else
uasm_i_dsll32(buf, rs, rs, 0);
} else
uasm_i_lui(buf, rs, uasm_rel_hi(addr));
}
UASM_EXPORT_SYMBOL(UASM_i_LA_mostly);
void UASM_i_LA(u32 **buf, unsigned int rs, long addr)
{
UASM_i_LA_mostly(buf, rs, addr);
if (uasm_rel_lo(addr)) {
if (!uasm_in_compat_space_p(addr))
uasm_i_daddiu(buf, rs, rs,
uasm_rel_lo(addr));
else
uasm_i_addiu(buf, rs, rs,
uasm_rel_lo(addr));
}
}
UASM_EXPORT_SYMBOL(UASM_i_LA);
/* Handle relocations. */
void uasm_r_mips_pc16(struct uasm_reloc **rel, u32 *addr, int lid)
{
(*rel)->addr = addr;
(*rel)->type = R_MIPS_PC16;
(*rel)->lab = lid;
(*rel)++;
}
UASM_EXPORT_SYMBOL(uasm_r_mips_pc16);
static inline void __resolve_relocs(struct uasm_reloc *rel,
struct uasm_label *lab);
void uasm_resolve_relocs(struct uasm_reloc *rel,
struct uasm_label *lab)
{
struct uasm_label *l;
for (; rel->lab != UASM_LABEL_INVALID; rel++)
for (l = lab; l->lab != UASM_LABEL_INVALID; l++)
if (rel->lab == l->lab)
__resolve_relocs(rel, l);
}
UASM_EXPORT_SYMBOL(uasm_resolve_relocs);
void uasm_move_relocs(struct uasm_reloc *rel, u32 *first, u32 *end,
long off)
{
for (; rel->lab != UASM_LABEL_INVALID; rel++)
if (rel->addr >= first && rel->addr < end)
rel->addr += off;
}
UASM_EXPORT_SYMBOL(uasm_move_relocs);
void uasm_move_labels(struct uasm_label *lab, u32 *first, u32 *end,
long off)
{
for (; lab->lab != UASM_LABEL_INVALID; lab++)
if (lab->addr >= first && lab->addr < end)
lab->addr += off;
}
UASM_EXPORT_SYMBOL(uasm_move_labels);
void uasm_copy_handler(struct uasm_reloc *rel, struct uasm_label *lab,
u32 *first, u32 *end, u32 *target)
{
long off = (long)(target - first);
memcpy(target, first, (end - first) * sizeof(u32));
uasm_move_relocs(rel, first, end, off);
uasm_move_labels(lab, first, end, off);
}
UASM_EXPORT_SYMBOL(uasm_copy_handler);
int uasm_insn_has_bdelay(struct uasm_reloc *rel, u32 *addr)
{
for (; rel->lab != UASM_LABEL_INVALID; rel++) {
if (rel->addr == addr
&& (rel->type == R_MIPS_PC16
|| rel->type == R_MIPS_26))
return 1;
}
return 0;
}
UASM_EXPORT_SYMBOL(uasm_insn_has_bdelay);
/* Convenience functions for labeled branches. */
void uasm_il_bltz(u32 **p, struct uasm_reloc **r, unsigned int reg,
int lid)
{
uasm_r_mips_pc16(r, *p, lid);
uasm_i_bltz(p, reg, 0);
}
UASM_EXPORT_SYMBOL(uasm_il_bltz);
void uasm_il_b(u32 **p, struct uasm_reloc **r, int lid)
{
uasm_r_mips_pc16(r, *p, lid);
uasm_i_b(p, 0);
}
UASM_EXPORT_SYMBOL(uasm_il_b);
void uasm_il_beq(u32 **p, struct uasm_reloc **r, unsigned int r1,
unsigned int r2, int lid)
{
uasm_r_mips_pc16(r, *p, lid);
uasm_i_beq(p, r1, r2, 0);
}
UASM_EXPORT_SYMBOL(uasm_il_beq);
void uasm_il_beqz(u32 **p, struct uasm_reloc **r, unsigned int reg,
int lid)
{
uasm_r_mips_pc16(r, *p, lid);
uasm_i_beqz(p, reg, 0);
}
UASM_EXPORT_SYMBOL(uasm_il_beqz);
void uasm_il_beqzl(u32 **p, struct uasm_reloc **r, unsigned int reg,
int lid)
{
uasm_r_mips_pc16(r, *p, lid);
uasm_i_beqzl(p, reg, 0);
}
UASM_EXPORT_SYMBOL(uasm_il_beqzl);
void uasm_il_bne(u32 **p, struct uasm_reloc **r, unsigned int reg1,
unsigned int reg2, int lid)
{
uasm_r_mips_pc16(r, *p, lid);
uasm_i_bne(p, reg1, reg2, 0);
}
UASM_EXPORT_SYMBOL(uasm_il_bne);
void uasm_il_bnez(u32 **p, struct uasm_reloc **r, unsigned int reg,
int lid)
{
uasm_r_mips_pc16(r, *p, lid);
uasm_i_bnez(p, reg, 0);
}
UASM_EXPORT_SYMBOL(uasm_il_bnez);
void uasm_il_bgezl(u32 **p, struct uasm_reloc **r, unsigned int reg,
int lid)
{
uasm_r_mips_pc16(r, *p, lid);
uasm_i_bgezl(p, reg, 0);
}
UASM_EXPORT_SYMBOL(uasm_il_bgezl);
void uasm_il_bgez(u32 **p, struct uasm_reloc **r, unsigned int reg,
int lid)
{
uasm_r_mips_pc16(r, *p, lid);
uasm_i_bgez(p, reg, 0);
}
UASM_EXPORT_SYMBOL(uasm_il_bgez);
void uasm_il_bbit0(u32 **p, struct uasm_reloc **r, unsigned int reg,
unsigned int bit, int lid)
{
uasm_r_mips_pc16(r, *p, lid);
uasm_i_bbit0(p, reg, bit, 0);
}
UASM_EXPORT_SYMBOL(uasm_il_bbit0);
void uasm_il_bbit1(u32 **p, struct uasm_reloc **r, unsigned int reg,
unsigned int bit, int lid)
{
uasm_r_mips_pc16(r, *p, lid);
uasm_i_bbit1(p, reg, bit, 0);
}
UASM_EXPORT_SYMBOL(uasm_il_bbit1);
| linux-master | arch/mips/mm/uasm.c |
// SPDX-License-Identifier: GPL-2.0-only
#include <linux/uaccess.h>
#include <linux/kernel.h>
bool copy_from_kernel_nofault_allowed(const void *unsafe_src, size_t size)
{
/* highest bit set means kernel space */
return (unsigned long)unsafe_src >> (BITS_PER_LONG - 1);
}
| linux-master | arch/mips/mm/maccess.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2015 Imagination Technologies
* Author: Paul Burton <[email protected]>
*/
#include <asm/bcache.h>
#include <asm/debug.h>
#include <linux/uaccess.h>
#include <linux/debugfs.h>
#include <linux/init.h>
static ssize_t sc_prefetch_read(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
bool enabled = bc_prefetch_is_enabled();
char buf[3];
buf[0] = enabled ? 'Y' : 'N';
buf[1] = '\n';
buf[2] = 0;
return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
}
static ssize_t sc_prefetch_write(struct file *file,
const char __user *user_buf,
size_t count, loff_t *ppos)
{
bool enabled;
int err;
err = kstrtobool_from_user(user_buf, count, &enabled);
if (err)
return err;
if (enabled)
bc_prefetch_enable();
else
bc_prefetch_disable();
return count;
}
static const struct file_operations sc_prefetch_fops = {
.open = simple_open,
.llseek = default_llseek,
.read = sc_prefetch_read,
.write = sc_prefetch_write,
};
static int __init sc_debugfs_init(void)
{
struct dentry *dir;
dir = debugfs_create_dir("l2cache", mips_debugfs_dir);
debugfs_create_file("prefetch", S_IRUGO | S_IWUSR, dir, NULL,
&sc_prefetch_fops);
return 0;
}
late_initcall(sc_debugfs_init);
| linux-master | arch/mips/mm/sc-debugfs.c |
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1996 David S. Miller ([email protected])
* Copyright (C) 1997, 1998, 1999, 2000, 2001, 2002 Ralf Baechle ([email protected])
* Copyright (C) 1999, 2000 Silicon Graphics, Inc.
*/
#include <linux/cpu_pm.h>
#include <linux/hardirq.h>
#include <linux/init.h>
#include <linux/highmem.h>
#include <linux/kernel.h>
#include <linux/linkage.h>
#include <linux/preempt.h>
#include <linux/sched.h>
#include <linux/smp.h>
#include <linux/mm.h>
#include <linux/export.h>
#include <linux/bitops.h>
#include <linux/dma-map-ops.h> /* for dma_default_coherent */
#include <asm/bcache.h>
#include <asm/bootinfo.h>
#include <asm/cache.h>
#include <asm/cacheops.h>
#include <asm/cpu.h>
#include <asm/cpu-features.h>
#include <asm/cpu-type.h>
#include <asm/io.h>
#include <asm/page.h>
#include <asm/r4kcache.h>
#include <asm/sections.h>
#include <asm/mmu_context.h>
#include <asm/cacheflush.h> /* for run_uncached() */
#include <asm/traps.h>
#include <asm/mips-cps.h>
/*
* Bits describing what cache ops an SMP callback function may perform.
*
* R4K_HIT - Virtual user or kernel address based cache operations. The
* active_mm must be checked before using user addresses, falling
* back to kmap.
* R4K_INDEX - Index based cache operations.
*/
#define R4K_HIT BIT(0)
#define R4K_INDEX BIT(1)
/**
* r4k_op_needs_ipi() - Decide if a cache op needs to be done on every core.
* @type: Type of cache operations (R4K_HIT or R4K_INDEX).
*
* Decides whether a cache op needs to be performed on every core in the system.
* This may change depending on the @type of cache operation, as well as the set
* of online CPUs, so preemption should be disabled by the caller to prevent CPU
* hotplug from changing the result.
*
* Returns: 1 if the cache operation @type should be done on every core in
* the system.
* 0 if the cache operation @type is globalized and only needs to
* be performed on a simple CPU.
*/
static inline bool r4k_op_needs_ipi(unsigned int type)
{
/* The MIPS Coherence Manager (CM) globalizes address-based cache ops */
if (type == R4K_HIT && mips_cm_present())
return false;
/*
* Hardware doesn't globalize the required cache ops, so SMP calls may
* be needed, but only if there are foreign CPUs (non-siblings with
* separate caches).
*/
/* cpu_foreign_map[] undeclared when !CONFIG_SMP */
#ifdef CONFIG_SMP
return !cpumask_empty(&cpu_foreign_map[0]);
#else
return false;
#endif
}
/*
* Special Variant of smp_call_function for use by cache functions:
*
* o No return value
* o collapses to normal function call on UP kernels
* o collapses to normal function call on systems with a single shared
* primary cache.
* o doesn't disable interrupts on the local CPU
*/
static inline void r4k_on_each_cpu(unsigned int type,
void (*func)(void *info), void *info)
{
preempt_disable();
if (r4k_op_needs_ipi(type))
smp_call_function_many(&cpu_foreign_map[smp_processor_id()],
func, info, 1);
func(info);
preempt_enable();
}
/*
* Must die.
*/
static unsigned long icache_size __read_mostly;
static unsigned long dcache_size __read_mostly;
static unsigned long vcache_size __read_mostly;
static unsigned long scache_size __read_mostly;
#define cpu_is_r4600_v1_x() ((read_c0_prid() & 0xfffffff0) == 0x00002010)
#define cpu_is_r4600_v2_x() ((read_c0_prid() & 0xfffffff0) == 0x00002020)
#define R4600_HIT_CACHEOP_WAR_IMPL \
do { \
if (IS_ENABLED(CONFIG_WAR_R4600_V2_HIT_CACHEOP) && \
cpu_is_r4600_v2_x()) \
*(volatile unsigned long *)CKSEG1; \
if (IS_ENABLED(CONFIG_WAR_R4600_V1_HIT_CACHEOP)) \
__asm__ __volatile__("nop;nop;nop;nop"); \
} while (0)
static void (*r4k_blast_dcache_page)(unsigned long addr);
static inline void r4k_blast_dcache_page_dc32(unsigned long addr)
{
R4600_HIT_CACHEOP_WAR_IMPL;
blast_dcache32_page(addr);
}
static inline void r4k_blast_dcache_page_dc64(unsigned long addr)
{
blast_dcache64_page(addr);
}
static inline void r4k_blast_dcache_page_dc128(unsigned long addr)
{
blast_dcache128_page(addr);
}
static void r4k_blast_dcache_page_setup(void)
{
unsigned long dc_lsize = cpu_dcache_line_size();
switch (dc_lsize) {
case 0:
r4k_blast_dcache_page = (void *)cache_noop;
break;
case 16:
r4k_blast_dcache_page = blast_dcache16_page;
break;
case 32:
r4k_blast_dcache_page = r4k_blast_dcache_page_dc32;
break;
case 64:
r4k_blast_dcache_page = r4k_blast_dcache_page_dc64;
break;
case 128:
r4k_blast_dcache_page = r4k_blast_dcache_page_dc128;
break;
default:
break;
}
}
#ifndef CONFIG_EVA
#define r4k_blast_dcache_user_page r4k_blast_dcache_page
#else
static void (*r4k_blast_dcache_user_page)(unsigned long addr);
static void r4k_blast_dcache_user_page_setup(void)
{
unsigned long dc_lsize = cpu_dcache_line_size();
if (dc_lsize == 0)
r4k_blast_dcache_user_page = (void *)cache_noop;
else if (dc_lsize == 16)
r4k_blast_dcache_user_page = blast_dcache16_user_page;
else if (dc_lsize == 32)
r4k_blast_dcache_user_page = blast_dcache32_user_page;
else if (dc_lsize == 64)
r4k_blast_dcache_user_page = blast_dcache64_user_page;
}
#endif
void (* r4k_blast_dcache)(void);
EXPORT_SYMBOL(r4k_blast_dcache);
static void r4k_blast_dcache_setup(void)
{
unsigned long dc_lsize = cpu_dcache_line_size();
if (dc_lsize == 0)
r4k_blast_dcache = (void *)cache_noop;
else if (dc_lsize == 16)
r4k_blast_dcache = blast_dcache16;
else if (dc_lsize == 32)
r4k_blast_dcache = blast_dcache32;
else if (dc_lsize == 64)
r4k_blast_dcache = blast_dcache64;
else if (dc_lsize == 128)
r4k_blast_dcache = blast_dcache128;
}
/* force code alignment (used for CONFIG_WAR_TX49XX_ICACHE_INDEX_INV) */
#define JUMP_TO_ALIGN(order) \
__asm__ __volatile__( \
"b\t1f\n\t" \
".align\t" #order "\n\t" \
"1:\n\t" \
)
#define CACHE32_UNROLL32_ALIGN JUMP_TO_ALIGN(10) /* 32 * 32 = 1024 */
#define CACHE32_UNROLL32_ALIGN2 JUMP_TO_ALIGN(11)
static inline void blast_r4600_v1_icache32(void)
{
unsigned long flags;
local_irq_save(flags);
blast_icache32();
local_irq_restore(flags);
}
static inline void tx49_blast_icache32(void)
{
unsigned long start = INDEX_BASE;
unsigned long end = start + current_cpu_data.icache.waysize;
unsigned long ws_inc = 1UL << current_cpu_data.icache.waybit;
unsigned long ws_end = current_cpu_data.icache.ways <<
current_cpu_data.icache.waybit;
unsigned long ws, addr;
CACHE32_UNROLL32_ALIGN2;
/* I'm in even chunk. blast odd chunks */
for (ws = 0; ws < ws_end; ws += ws_inc)
for (addr = start + 0x400; addr < end; addr += 0x400 * 2)
cache_unroll(32, kernel_cache, Index_Invalidate_I,
addr | ws, 32);
CACHE32_UNROLL32_ALIGN;
/* I'm in odd chunk. blast even chunks */
for (ws = 0; ws < ws_end; ws += ws_inc)
for (addr = start; addr < end; addr += 0x400 * 2)
cache_unroll(32, kernel_cache, Index_Invalidate_I,
addr | ws, 32);
}
static void (* r4k_blast_icache_page)(unsigned long addr);
static void r4k_blast_icache_page_setup(void)
{
unsigned long ic_lsize = cpu_icache_line_size();
if (ic_lsize == 0)
r4k_blast_icache_page = (void *)cache_noop;
else if (ic_lsize == 16)
r4k_blast_icache_page = blast_icache16_page;
else if (ic_lsize == 32 && current_cpu_type() == CPU_LOONGSON2EF)
r4k_blast_icache_page = loongson2_blast_icache32_page;
else if (ic_lsize == 32)
r4k_blast_icache_page = blast_icache32_page;
else if (ic_lsize == 64)
r4k_blast_icache_page = blast_icache64_page;
else if (ic_lsize == 128)
r4k_blast_icache_page = blast_icache128_page;
}
#ifndef CONFIG_EVA
#define r4k_blast_icache_user_page r4k_blast_icache_page
#else
static void (*r4k_blast_icache_user_page)(unsigned long addr);
static void r4k_blast_icache_user_page_setup(void)
{
unsigned long ic_lsize = cpu_icache_line_size();
if (ic_lsize == 0)
r4k_blast_icache_user_page = (void *)cache_noop;
else if (ic_lsize == 16)
r4k_blast_icache_user_page = blast_icache16_user_page;
else if (ic_lsize == 32)
r4k_blast_icache_user_page = blast_icache32_user_page;
else if (ic_lsize == 64)
r4k_blast_icache_user_page = blast_icache64_user_page;
}
#endif
void (* r4k_blast_icache)(void);
EXPORT_SYMBOL(r4k_blast_icache);
static void r4k_blast_icache_setup(void)
{
unsigned long ic_lsize = cpu_icache_line_size();
if (ic_lsize == 0)
r4k_blast_icache = (void *)cache_noop;
else if (ic_lsize == 16)
r4k_blast_icache = blast_icache16;
else if (ic_lsize == 32) {
if (IS_ENABLED(CONFIG_WAR_R4600_V1_INDEX_ICACHEOP) &&
cpu_is_r4600_v1_x())
r4k_blast_icache = blast_r4600_v1_icache32;
else if (IS_ENABLED(CONFIG_WAR_TX49XX_ICACHE_INDEX_INV))
r4k_blast_icache = tx49_blast_icache32;
else if (current_cpu_type() == CPU_LOONGSON2EF)
r4k_blast_icache = loongson2_blast_icache32;
else
r4k_blast_icache = blast_icache32;
} else if (ic_lsize == 64)
r4k_blast_icache = blast_icache64;
else if (ic_lsize == 128)
r4k_blast_icache = blast_icache128;
}
static void (* r4k_blast_scache_page)(unsigned long addr);
static void r4k_blast_scache_page_setup(void)
{
unsigned long sc_lsize = cpu_scache_line_size();
if (scache_size == 0)
r4k_blast_scache_page = (void *)cache_noop;
else if (sc_lsize == 16)
r4k_blast_scache_page = blast_scache16_page;
else if (sc_lsize == 32)
r4k_blast_scache_page = blast_scache32_page;
else if (sc_lsize == 64)
r4k_blast_scache_page = blast_scache64_page;
else if (sc_lsize == 128)
r4k_blast_scache_page = blast_scache128_page;
}
static void (* r4k_blast_scache)(void);
static void r4k_blast_scache_setup(void)
{
unsigned long sc_lsize = cpu_scache_line_size();
if (scache_size == 0)
r4k_blast_scache = (void *)cache_noop;
else if (sc_lsize == 16)
r4k_blast_scache = blast_scache16;
else if (sc_lsize == 32)
r4k_blast_scache = blast_scache32;
else if (sc_lsize == 64)
r4k_blast_scache = blast_scache64;
else if (sc_lsize == 128)
r4k_blast_scache = blast_scache128;
}
static void (*r4k_blast_scache_node)(long node);
static void r4k_blast_scache_node_setup(void)
{
unsigned long sc_lsize = cpu_scache_line_size();
if (current_cpu_type() != CPU_LOONGSON64)
r4k_blast_scache_node = (void *)cache_noop;
else if (sc_lsize == 16)
r4k_blast_scache_node = blast_scache16_node;
else if (sc_lsize == 32)
r4k_blast_scache_node = blast_scache32_node;
else if (sc_lsize == 64)
r4k_blast_scache_node = blast_scache64_node;
else if (sc_lsize == 128)
r4k_blast_scache_node = blast_scache128_node;
}
static inline void local_r4k___flush_cache_all(void * args)
{
switch (current_cpu_type()) {
case CPU_LOONGSON2EF:
case CPU_R4000SC:
case CPU_R4000MC:
case CPU_R4400SC:
case CPU_R4400MC:
case CPU_R10000:
case CPU_R12000:
case CPU_R14000:
case CPU_R16000:
/*
* These caches are inclusive caches, that is, if something
* is not cached in the S-cache, we know it also won't be
* in one of the primary caches.
*/
r4k_blast_scache();
break;
case CPU_LOONGSON64:
/* Use get_ebase_cpunum() for both NUMA=y/n */
r4k_blast_scache_node(get_ebase_cpunum() >> 2);
break;
case CPU_BMIPS5000:
r4k_blast_scache();
__sync();
break;
default:
r4k_blast_dcache();
r4k_blast_icache();
break;
}
}
static void r4k___flush_cache_all(void)
{
r4k_on_each_cpu(R4K_INDEX, local_r4k___flush_cache_all, NULL);
}
/**
* has_valid_asid() - Determine if an mm already has an ASID.
* @mm: Memory map.
* @type: R4K_HIT or R4K_INDEX, type of cache op.
*
* Determines whether @mm already has an ASID on any of the CPUs which cache ops
* of type @type within an r4k_on_each_cpu() call will affect. If
* r4k_on_each_cpu() does an SMP call to a single VPE in each core, then the
* scope of the operation is confined to sibling CPUs, otherwise all online CPUs
* will need to be checked.
*
* Must be called in non-preemptive context.
*
* Returns: 1 if the CPUs affected by @type cache ops have an ASID for @mm.
* 0 otherwise.
*/
static inline int has_valid_asid(const struct mm_struct *mm, unsigned int type)
{
unsigned int i;
const cpumask_t *mask = cpu_present_mask;
if (cpu_has_mmid)
return cpu_context(0, mm) != 0;
/* cpu_sibling_map[] undeclared when !CONFIG_SMP */
#ifdef CONFIG_SMP
/*
* If r4k_on_each_cpu does SMP calls, it does them to a single VPE in
* each foreign core, so we only need to worry about siblings.
* Otherwise we need to worry about all present CPUs.
*/
if (r4k_op_needs_ipi(type))
mask = &cpu_sibling_map[smp_processor_id()];
#endif
for_each_cpu(i, mask)
if (cpu_context(i, mm))
return 1;
return 0;
}
static void r4k__flush_cache_vmap(void)
{
r4k_blast_dcache();
}
static void r4k__flush_cache_vunmap(void)
{
r4k_blast_dcache();
}
/*
* Note: flush_tlb_range() assumes flush_cache_range() sufficiently flushes
* whole caches when vma is executable.
*/
static inline void local_r4k_flush_cache_range(void * args)
{
struct vm_area_struct *vma = args;
int exec = vma->vm_flags & VM_EXEC;
if (!has_valid_asid(vma->vm_mm, R4K_INDEX))
return;
/*
* If dcache can alias, we must blast it since mapping is changing.
* If executable, we must ensure any dirty lines are written back far
* enough to be visible to icache.
*/
if (cpu_has_dc_aliases || (exec && !cpu_has_ic_fills_f_dc))
r4k_blast_dcache();
/* If executable, blast stale lines from icache */
if (exec)
r4k_blast_icache();
}
static void r4k_flush_cache_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end)
{
int exec = vma->vm_flags & VM_EXEC;
if (cpu_has_dc_aliases || exec)
r4k_on_each_cpu(R4K_INDEX, local_r4k_flush_cache_range, vma);
}
static inline void local_r4k_flush_cache_mm(void * args)
{
struct mm_struct *mm = args;
if (!has_valid_asid(mm, R4K_INDEX))
return;
/*
* Kludge alert. For obscure reasons R4000SC and R4400SC go nuts if we
* only flush the primary caches but R1x000 behave sane ...
* R4000SC and R4400SC indexed S-cache ops also invalidate primary
* caches, so we can bail out early.
*/
if (current_cpu_type() == CPU_R4000SC ||
current_cpu_type() == CPU_R4000MC ||
current_cpu_type() == CPU_R4400SC ||
current_cpu_type() == CPU_R4400MC) {
r4k_blast_scache();
return;
}
r4k_blast_dcache();
}
static void r4k_flush_cache_mm(struct mm_struct *mm)
{
if (!cpu_has_dc_aliases)
return;
r4k_on_each_cpu(R4K_INDEX, local_r4k_flush_cache_mm, mm);
}
struct flush_cache_page_args {
struct vm_area_struct *vma;
unsigned long addr;
unsigned long pfn;
};
static inline void local_r4k_flush_cache_page(void *args)
{
struct flush_cache_page_args *fcp_args = args;
struct vm_area_struct *vma = fcp_args->vma;
unsigned long addr = fcp_args->addr;
struct page *page = pfn_to_page(fcp_args->pfn);
int exec = vma->vm_flags & VM_EXEC;
struct mm_struct *mm = vma->vm_mm;
int map_coherent = 0;
pmd_t *pmdp;
pte_t *ptep;
void *vaddr;
/*
* If owns no valid ASID yet, cannot possibly have gotten
* this page into the cache.
*/
if (!has_valid_asid(mm, R4K_HIT))
return;
addr &= PAGE_MASK;
pmdp = pmd_off(mm, addr);
ptep = pte_offset_kernel(pmdp, addr);
/*
* If the page isn't marked valid, the page cannot possibly be
* in the cache.
*/
if (!(pte_present(*ptep)))
return;
if ((mm == current->active_mm) && (pte_val(*ptep) & _PAGE_VALID))
vaddr = NULL;
else {
struct folio *folio = page_folio(page);
/*
* Use kmap_coherent or kmap_atomic to do flushes for
* another ASID than the current one.
*/
map_coherent = (cpu_has_dc_aliases &&
folio_mapped(folio) &&
!folio_test_dcache_dirty(folio));
if (map_coherent)
vaddr = kmap_coherent(page, addr);
else
vaddr = kmap_atomic(page);
addr = (unsigned long)vaddr;
}
if (cpu_has_dc_aliases || (exec && !cpu_has_ic_fills_f_dc)) {
vaddr ? r4k_blast_dcache_page(addr) :
r4k_blast_dcache_user_page(addr);
if (exec && !cpu_icache_snoops_remote_store)
r4k_blast_scache_page(addr);
}
if (exec) {
if (vaddr && cpu_has_vtag_icache && mm == current->active_mm) {
drop_mmu_context(mm);
} else
vaddr ? r4k_blast_icache_page(addr) :
r4k_blast_icache_user_page(addr);
}
if (vaddr) {
if (map_coherent)
kunmap_coherent();
else
kunmap_atomic(vaddr);
}
}
static void r4k_flush_cache_page(struct vm_area_struct *vma,
unsigned long addr, unsigned long pfn)
{
struct flush_cache_page_args args;
args.vma = vma;
args.addr = addr;
args.pfn = pfn;
r4k_on_each_cpu(R4K_HIT, local_r4k_flush_cache_page, &args);
}
static inline void local_r4k_flush_data_cache_page(void * addr)
{
r4k_blast_dcache_page((unsigned long) addr);
}
static void r4k_flush_data_cache_page(unsigned long addr)
{
if (in_atomic())
local_r4k_flush_data_cache_page((void *)addr);
else
r4k_on_each_cpu(R4K_HIT, local_r4k_flush_data_cache_page,
(void *) addr);
}
struct flush_icache_range_args {
unsigned long start;
unsigned long end;
unsigned int type;
bool user;
};
static inline void __local_r4k_flush_icache_range(unsigned long start,
unsigned long end,
unsigned int type,
bool user)
{
if (!cpu_has_ic_fills_f_dc) {
if (type == R4K_INDEX ||
(type & R4K_INDEX && end - start >= dcache_size)) {
r4k_blast_dcache();
} else {
R4600_HIT_CACHEOP_WAR_IMPL;
if (user)
protected_blast_dcache_range(start, end);
else
blast_dcache_range(start, end);
}
}
if (type == R4K_INDEX ||
(type & R4K_INDEX && end - start > icache_size))
r4k_blast_icache();
else {
switch (boot_cpu_type()) {
case CPU_LOONGSON2EF:
protected_loongson2_blast_icache_range(start, end);
break;
default:
if (user)
protected_blast_icache_range(start, end);
else
blast_icache_range(start, end);
break;
}
}
}
static inline void local_r4k_flush_icache_range(unsigned long start,
unsigned long end)
{
__local_r4k_flush_icache_range(start, end, R4K_HIT | R4K_INDEX, false);
}
static inline void local_r4k_flush_icache_user_range(unsigned long start,
unsigned long end)
{
__local_r4k_flush_icache_range(start, end, R4K_HIT | R4K_INDEX, true);
}
static inline void local_r4k_flush_icache_range_ipi(void *args)
{
struct flush_icache_range_args *fir_args = args;
unsigned long start = fir_args->start;
unsigned long end = fir_args->end;
unsigned int type = fir_args->type;
bool user = fir_args->user;
__local_r4k_flush_icache_range(start, end, type, user);
}
static void __r4k_flush_icache_range(unsigned long start, unsigned long end,
bool user)
{
struct flush_icache_range_args args;
unsigned long size, cache_size;
args.start = start;
args.end = end;
args.type = R4K_HIT | R4K_INDEX;
args.user = user;
/*
* Indexed cache ops require an SMP call.
* Consider if that can or should be avoided.
*/
preempt_disable();
if (r4k_op_needs_ipi(R4K_INDEX) && !r4k_op_needs_ipi(R4K_HIT)) {
/*
* If address-based cache ops don't require an SMP call, then
* use them exclusively for small flushes.
*/
size = end - start;
cache_size = icache_size;
if (!cpu_has_ic_fills_f_dc) {
size *= 2;
cache_size += dcache_size;
}
if (size <= cache_size)
args.type &= ~R4K_INDEX;
}
r4k_on_each_cpu(args.type, local_r4k_flush_icache_range_ipi, &args);
preempt_enable();
instruction_hazard();
}
static void r4k_flush_icache_range(unsigned long start, unsigned long end)
{
return __r4k_flush_icache_range(start, end, false);
}
static void r4k_flush_icache_user_range(unsigned long start, unsigned long end)
{
return __r4k_flush_icache_range(start, end, true);
}
#ifdef CONFIG_DMA_NONCOHERENT
static void r4k_dma_cache_wback_inv(unsigned long addr, unsigned long size)
{
/* Catch bad driver code */
if (WARN_ON(size == 0))
return;
preempt_disable();
if (cpu_has_inclusive_pcaches) {
if (size >= scache_size) {
if (current_cpu_type() != CPU_LOONGSON64)
r4k_blast_scache();
else
r4k_blast_scache_node(pa_to_nid(addr));
} else {
blast_scache_range(addr, addr + size);
}
preempt_enable();
__sync();
return;
}
/*
* Either no secondary cache or the available caches don't have the
* subset property so we have to flush the primary caches
* explicitly.
* If we would need IPI to perform an INDEX-type operation, then
* we have to use the HIT-type alternative as IPI cannot be used
* here due to interrupts possibly being disabled.
*/
if (!r4k_op_needs_ipi(R4K_INDEX) && size >= dcache_size) {
r4k_blast_dcache();
} else {
R4600_HIT_CACHEOP_WAR_IMPL;
blast_dcache_range(addr, addr + size);
}
preempt_enable();
bc_wback_inv(addr, size);
__sync();
}
static void prefetch_cache_inv(unsigned long addr, unsigned long size)
{
unsigned int linesz = cpu_scache_line_size();
unsigned long addr0 = addr, addr1;
addr0 &= ~(linesz - 1);
addr1 = (addr0 + size - 1) & ~(linesz - 1);
protected_writeback_scache_line(addr0);
if (likely(addr1 != addr0))
protected_writeback_scache_line(addr1);
else
return;
addr0 += linesz;
if (likely(addr1 != addr0))
protected_writeback_scache_line(addr0);
else
return;
addr1 -= linesz;
if (likely(addr1 > addr0))
protected_writeback_scache_line(addr0);
}
static void r4k_dma_cache_inv(unsigned long addr, unsigned long size)
{
/* Catch bad driver code */
if (WARN_ON(size == 0))
return;
preempt_disable();
if (current_cpu_type() == CPU_BMIPS5000)
prefetch_cache_inv(addr, size);
if (cpu_has_inclusive_pcaches) {
if (size >= scache_size) {
if (current_cpu_type() != CPU_LOONGSON64)
r4k_blast_scache();
else
r4k_blast_scache_node(pa_to_nid(addr));
} else {
/*
* There is no clearly documented alignment requirement
* for the cache instruction on MIPS processors and
* some processors, among them the RM5200 and RM7000
* QED processors will throw an address error for cache
* hit ops with insufficient alignment. Solved by
* aligning the address to cache line size.
*/
blast_inv_scache_range(addr, addr + size);
}
preempt_enable();
__sync();
return;
}
if (!r4k_op_needs_ipi(R4K_INDEX) && size >= dcache_size) {
r4k_blast_dcache();
} else {
R4600_HIT_CACHEOP_WAR_IMPL;
blast_inv_dcache_range(addr, addr + size);
}
preempt_enable();
bc_inv(addr, size);
__sync();
}
#endif /* CONFIG_DMA_NONCOHERENT */
static void r4k_flush_icache_all(void)
{
if (cpu_has_vtag_icache)
r4k_blast_icache();
}
struct flush_kernel_vmap_range_args {
unsigned long vaddr;
int size;
};
static inline void local_r4k_flush_kernel_vmap_range_index(void *args)
{
/*
* Aliases only affect the primary caches so don't bother with
* S-caches or T-caches.
*/
r4k_blast_dcache();
}
static inline void local_r4k_flush_kernel_vmap_range(void *args)
{
struct flush_kernel_vmap_range_args *vmra = args;
unsigned long vaddr = vmra->vaddr;
int size = vmra->size;
/*
* Aliases only affect the primary caches so don't bother with
* S-caches or T-caches.
*/
R4600_HIT_CACHEOP_WAR_IMPL;
blast_dcache_range(vaddr, vaddr + size);
}
static void r4k_flush_kernel_vmap_range(unsigned long vaddr, int size)
{
struct flush_kernel_vmap_range_args args;
args.vaddr = (unsigned long) vaddr;
args.size = size;
if (size >= dcache_size)
r4k_on_each_cpu(R4K_INDEX,
local_r4k_flush_kernel_vmap_range_index, NULL);
else
r4k_on_each_cpu(R4K_HIT, local_r4k_flush_kernel_vmap_range,
&args);
}
static inline void rm7k_erratum31(void)
{
const unsigned long ic_lsize = 32;
unsigned long addr;
/* RM7000 erratum #31. The icache is screwed at startup. */
write_c0_taglo(0);
write_c0_taghi(0);
for (addr = INDEX_BASE; addr <= INDEX_BASE + 4096; addr += ic_lsize) {
__asm__ __volatile__ (
".set push\n\t"
".set noreorder\n\t"
".set mips3\n\t"
"cache\t%1, 0(%0)\n\t"
"cache\t%1, 0x1000(%0)\n\t"
"cache\t%1, 0x2000(%0)\n\t"
"cache\t%1, 0x3000(%0)\n\t"
"cache\t%2, 0(%0)\n\t"
"cache\t%2, 0x1000(%0)\n\t"
"cache\t%2, 0x2000(%0)\n\t"
"cache\t%2, 0x3000(%0)\n\t"
"cache\t%1, 0(%0)\n\t"
"cache\t%1, 0x1000(%0)\n\t"
"cache\t%1, 0x2000(%0)\n\t"
"cache\t%1, 0x3000(%0)\n\t"
".set pop\n"
:
: "r" (addr), "i" (Index_Store_Tag_I), "i" (Fill_I));
}
}
static inline int alias_74k_erratum(struct cpuinfo_mips *c)
{
unsigned int imp = c->processor_id & PRID_IMP_MASK;
unsigned int rev = c->processor_id & PRID_REV_MASK;
int present = 0;
/*
* Early versions of the 74K do not update the cache tags on a
* vtag miss/ptag hit which can occur in the case of KSEG0/KUSEG
* aliases. In this case it is better to treat the cache as always
* having aliases. Also disable the synonym tag update feature
* where available. In this case no opportunistic tag update will
* happen where a load causes a virtual address miss but a physical
* address hit during a D-cache look-up.
*/
switch (imp) {
case PRID_IMP_74K:
if (rev <= PRID_REV_ENCODE_332(2, 4, 0))
present = 1;
if (rev == PRID_REV_ENCODE_332(2, 4, 0))
write_c0_config6(read_c0_config6() | MTI_CONF6_SYND);
break;
case PRID_IMP_1074K:
if (rev <= PRID_REV_ENCODE_332(1, 1, 0)) {
present = 1;
write_c0_config6(read_c0_config6() | MTI_CONF6_SYND);
}
break;
default:
BUG();
}
return present;
}
static void b5k_instruction_hazard(void)
{
__sync();
__sync();
__asm__ __volatile__(
" nop; nop; nop; nop; nop; nop; nop; nop\n"
" nop; nop; nop; nop; nop; nop; nop; nop\n"
" nop; nop; nop; nop; nop; nop; nop; nop\n"
" nop; nop; nop; nop; nop; nop; nop; nop\n"
: : : "memory");
}
static char *way_string[] = { NULL, "direct mapped", "2-way",
"3-way", "4-way", "5-way", "6-way", "7-way", "8-way",
"9-way", "10-way", "11-way", "12-way",
"13-way", "14-way", "15-way", "16-way",
};
static void probe_pcache(void)
{
struct cpuinfo_mips *c = ¤t_cpu_data;
unsigned int config = read_c0_config();
unsigned int prid = read_c0_prid();
int has_74k_erratum = 0;
unsigned long config1;
unsigned int lsize;
switch (current_cpu_type()) {
case CPU_R4600: /* QED style two way caches? */
case CPU_R4700:
case CPU_R5000:
case CPU_NEVADA:
icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
c->icache.ways = 2;
c->icache.waybit = __ffs(icache_size/2);
dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
c->dcache.ways = 2;
c->dcache.waybit= __ffs(dcache_size/2);
c->options |= MIPS_CPU_CACHE_CDEX_P;
break;
case CPU_R5500:
icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
c->icache.ways = 2;
c->icache.waybit= 0;
dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
c->dcache.ways = 2;
c->dcache.waybit = 0;
c->options |= MIPS_CPU_CACHE_CDEX_P | MIPS_CPU_PREFETCH;
break;
case CPU_TX49XX:
icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
c->icache.ways = 4;
c->icache.waybit= 0;
dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
c->dcache.ways = 4;
c->dcache.waybit = 0;
c->options |= MIPS_CPU_CACHE_CDEX_P;
c->options |= MIPS_CPU_PREFETCH;
break;
case CPU_R4000PC:
case CPU_R4000SC:
case CPU_R4000MC:
case CPU_R4400PC:
case CPU_R4400SC:
case CPU_R4400MC:
case CPU_R4300:
icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
c->icache.ways = 1;
c->icache.waybit = 0; /* doesn't matter */
dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
c->dcache.ways = 1;
c->dcache.waybit = 0; /* does not matter */
c->options |= MIPS_CPU_CACHE_CDEX_P;
break;
case CPU_R10000:
case CPU_R12000:
case CPU_R14000:
case CPU_R16000:
icache_size = 1 << (12 + ((config & R10K_CONF_IC) >> 29));
c->icache.linesz = 64;
c->icache.ways = 2;
c->icache.waybit = 0;
dcache_size = 1 << (12 + ((config & R10K_CONF_DC) >> 26));
c->dcache.linesz = 32;
c->dcache.ways = 2;
c->dcache.waybit = 0;
c->options |= MIPS_CPU_PREFETCH;
break;
case CPU_RM7000:
rm7k_erratum31();
icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
c->icache.ways = 4;
c->icache.waybit = __ffs(icache_size / c->icache.ways);
dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
c->dcache.ways = 4;
c->dcache.waybit = __ffs(dcache_size / c->dcache.ways);
c->options |= MIPS_CPU_CACHE_CDEX_P;
c->options |= MIPS_CPU_PREFETCH;
break;
case CPU_LOONGSON2EF:
icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
if (prid & 0x3)
c->icache.ways = 4;
else
c->icache.ways = 2;
c->icache.waybit = 0;
dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
if (prid & 0x3)
c->dcache.ways = 4;
else
c->dcache.ways = 2;
c->dcache.waybit = 0;
break;
case CPU_LOONGSON64:
config1 = read_c0_config1();
lsize = (config1 >> 19) & 7;
if (lsize)
c->icache.linesz = 2 << lsize;
else
c->icache.linesz = 0;
c->icache.sets = 64 << ((config1 >> 22) & 7);
c->icache.ways = 1 + ((config1 >> 16) & 7);
icache_size = c->icache.sets *
c->icache.ways *
c->icache.linesz;
c->icache.waybit = 0;
lsize = (config1 >> 10) & 7;
if (lsize)
c->dcache.linesz = 2 << lsize;
else
c->dcache.linesz = 0;
c->dcache.sets = 64 << ((config1 >> 13) & 7);
c->dcache.ways = 1 + ((config1 >> 7) & 7);
dcache_size = c->dcache.sets *
c->dcache.ways *
c->dcache.linesz;
c->dcache.waybit = 0;
if ((c->processor_id & (PRID_IMP_MASK | PRID_REV_MASK)) >=
(PRID_IMP_LOONGSON_64C | PRID_REV_LOONGSON3A_R2_0) ||
(c->processor_id & PRID_IMP_MASK) == PRID_IMP_LOONGSON_64R)
c->options |= MIPS_CPU_PREFETCH;
break;
case CPU_CAVIUM_OCTEON3:
/* For now lie about the number of ways. */
c->icache.linesz = 128;
c->icache.sets = 16;
c->icache.ways = 8;
c->icache.flags |= MIPS_CACHE_VTAG;
icache_size = c->icache.sets * c->icache.ways * c->icache.linesz;
c->dcache.linesz = 128;
c->dcache.ways = 8;
c->dcache.sets = 8;
dcache_size = c->dcache.sets * c->dcache.ways * c->dcache.linesz;
c->options |= MIPS_CPU_PREFETCH;
break;
default:
if (!(config & MIPS_CONF_M))
panic("Don't know how to probe P-caches on this cpu.");
/*
* So we seem to be a MIPS32 or MIPS64 CPU
* So let's probe the I-cache ...
*/
config1 = read_c0_config1();
lsize = (config1 >> 19) & 7;
/* IL == 7 is reserved */
if (lsize == 7)
panic("Invalid icache line size");
c->icache.linesz = lsize ? 2 << lsize : 0;
c->icache.sets = 32 << (((config1 >> 22) + 1) & 7);
c->icache.ways = 1 + ((config1 >> 16) & 7);
icache_size = c->icache.sets *
c->icache.ways *
c->icache.linesz;
c->icache.waybit = __ffs(icache_size/c->icache.ways);
if (config & MIPS_CONF_VI)
c->icache.flags |= MIPS_CACHE_VTAG;
/*
* Now probe the MIPS32 / MIPS64 data cache.
*/
c->dcache.flags = 0;
lsize = (config1 >> 10) & 7;
/* DL == 7 is reserved */
if (lsize == 7)
panic("Invalid dcache line size");
c->dcache.linesz = lsize ? 2 << lsize : 0;
c->dcache.sets = 32 << (((config1 >> 13) + 1) & 7);
c->dcache.ways = 1 + ((config1 >> 7) & 7);
dcache_size = c->dcache.sets *
c->dcache.ways *
c->dcache.linesz;
c->dcache.waybit = __ffs(dcache_size/c->dcache.ways);
c->options |= MIPS_CPU_PREFETCH;
break;
}
/*
* Processor configuration sanity check for the R4000SC erratum
* #5. With page sizes larger than 32kB there is no possibility
* to get a VCE exception anymore so we don't care about this
* misconfiguration. The case is rather theoretical anyway;
* presumably no vendor is shipping his hardware in the "bad"
* configuration.
*/
if ((prid & PRID_IMP_MASK) == PRID_IMP_R4000 &&
(prid & PRID_REV_MASK) < PRID_REV_R4400 &&
!(config & CONF_SC) && c->icache.linesz != 16 &&
PAGE_SIZE <= 0x8000)
panic("Improper R4000SC processor configuration detected");
/* compute a couple of other cache variables */
c->icache.waysize = icache_size / c->icache.ways;
c->dcache.waysize = dcache_size / c->dcache.ways;
c->icache.sets = c->icache.linesz ?
icache_size / (c->icache.linesz * c->icache.ways) : 0;
c->dcache.sets = c->dcache.linesz ?
dcache_size / (c->dcache.linesz * c->dcache.ways) : 0;
/*
* R1x000 P-caches are odd in a positive way. They're 32kB 2-way
* virtually indexed so normally would suffer from aliases. So
* normally they'd suffer from aliases but magic in the hardware deals
* with that for us so we don't need to take care ourselves.
*/
switch (current_cpu_type()) {
case CPU_20KC:
case CPU_25KF:
case CPU_I6400:
case CPU_I6500:
case CPU_SB1:
case CPU_SB1A:
c->dcache.flags |= MIPS_CACHE_PINDEX;
break;
case CPU_R10000:
case CPU_R12000:
case CPU_R14000:
case CPU_R16000:
break;
case CPU_74K:
case CPU_1074K:
has_74k_erratum = alias_74k_erratum(c);
fallthrough;
case CPU_M14KC:
case CPU_M14KEC:
case CPU_24K:
case CPU_34K:
case CPU_1004K:
case CPU_INTERAPTIV:
case CPU_P5600:
case CPU_PROAPTIV:
case CPU_M5150:
case CPU_QEMU_GENERIC:
case CPU_P6600:
case CPU_M6250:
if (!(read_c0_config7() & MIPS_CONF7_IAR) &&
(c->icache.waysize > PAGE_SIZE))
c->icache.flags |= MIPS_CACHE_ALIASES;
if (!has_74k_erratum && (read_c0_config7() & MIPS_CONF7_AR)) {
/*
* Effectively physically indexed dcache,
* thus no virtual aliases.
*/
c->dcache.flags |= MIPS_CACHE_PINDEX;
break;
}
fallthrough;
default:
if (has_74k_erratum || c->dcache.waysize > PAGE_SIZE)
c->dcache.flags |= MIPS_CACHE_ALIASES;
}
/* Physically indexed caches don't suffer from virtual aliasing */
if (c->dcache.flags & MIPS_CACHE_PINDEX)
c->dcache.flags &= ~MIPS_CACHE_ALIASES;
/*
* In systems with CM the icache fills from L2 or closer caches, and
* thus sees remote stores without needing to write them back any
* further than that.
*/
if (mips_cm_present())
c->icache.flags |= MIPS_IC_SNOOPS_REMOTE;
switch (current_cpu_type()) {
case CPU_20KC:
/*
* Some older 20Kc chips doesn't have the 'VI' bit in
* the config register.
*/
c->icache.flags |= MIPS_CACHE_VTAG;
break;
case CPU_ALCHEMY:
case CPU_I6400:
case CPU_I6500:
c->icache.flags |= MIPS_CACHE_IC_F_DC;
break;
case CPU_BMIPS5000:
c->icache.flags |= MIPS_CACHE_IC_F_DC;
/* Cache aliases are handled in hardware; allow HIGHMEM */
c->dcache.flags &= ~MIPS_CACHE_ALIASES;
break;
case CPU_LOONGSON2EF:
/*
* LOONGSON2 has 4 way icache, but when using indexed cache op,
* one op will act on all 4 ways
*/
c->icache.ways = 1;
}
pr_info("Primary instruction cache %ldkB, %s, %s, linesize %d bytes.\n",
icache_size >> 10,
c->icache.flags & MIPS_CACHE_VTAG ? "VIVT" : "VIPT",
way_string[c->icache.ways], c->icache.linesz);
pr_info("Primary data cache %ldkB, %s, %s, %s, linesize %d bytes\n",
dcache_size >> 10, way_string[c->dcache.ways],
(c->dcache.flags & MIPS_CACHE_PINDEX) ? "PIPT" : "VIPT",
(c->dcache.flags & MIPS_CACHE_ALIASES) ?
"cache aliases" : "no aliases",
c->dcache.linesz);
}
static void probe_vcache(void)
{
struct cpuinfo_mips *c = ¤t_cpu_data;
unsigned int config2, lsize;
if (current_cpu_type() != CPU_LOONGSON64)
return;
config2 = read_c0_config2();
if ((lsize = ((config2 >> 20) & 15)))
c->vcache.linesz = 2 << lsize;
else
c->vcache.linesz = lsize;
c->vcache.sets = 64 << ((config2 >> 24) & 15);
c->vcache.ways = 1 + ((config2 >> 16) & 15);
vcache_size = c->vcache.sets * c->vcache.ways * c->vcache.linesz;
c->vcache.waybit = 0;
c->vcache.waysize = vcache_size / c->vcache.ways;
pr_info("Unified victim cache %ldkB %s, linesize %d bytes.\n",
vcache_size >> 10, way_string[c->vcache.ways], c->vcache.linesz);
}
/*
* If you even _breathe_ on this function, look at the gcc output and make sure
* it does not pop things on and off the stack for the cache sizing loop that
* executes in KSEG1 space or else you will crash and burn badly. You have
* been warned.
*/
static int probe_scache(void)
{
unsigned long flags, addr, begin, end, pow2;
unsigned int config = read_c0_config();
struct cpuinfo_mips *c = ¤t_cpu_data;
if (config & CONF_SC)
return 0;
begin = (unsigned long) &_stext;
begin &= ~((4 * 1024 * 1024) - 1);
end = begin + (4 * 1024 * 1024);
/*
* This is such a bitch, you'd think they would make it easy to do
* this. Away you daemons of stupidity!
*/
local_irq_save(flags);
/* Fill each size-multiple cache line with a valid tag. */
pow2 = (64 * 1024);
for (addr = begin; addr < end; addr = (begin + pow2)) {
unsigned long *p = (unsigned long *) addr;
__asm__ __volatile__("nop" : : "r" (*p)); /* whee... */
pow2 <<= 1;
}
/* Load first line with zero (therefore invalid) tag. */
write_c0_taglo(0);
write_c0_taghi(0);
__asm__ __volatile__("nop; nop; nop; nop;"); /* avoid the hazard */
cache_op(Index_Store_Tag_I, begin);
cache_op(Index_Store_Tag_D, begin);
cache_op(Index_Store_Tag_SD, begin);
/* Now search for the wrap around point. */
pow2 = (128 * 1024);
for (addr = begin + (128 * 1024); addr < end; addr = begin + pow2) {
cache_op(Index_Load_Tag_SD, addr);
__asm__ __volatile__("nop; nop; nop; nop;"); /* hazard... */
if (!read_c0_taglo())
break;
pow2 <<= 1;
}
local_irq_restore(flags);
addr -= begin;
scache_size = addr;
c->scache.linesz = 16 << ((config & R4K_CONF_SB) >> 22);
c->scache.ways = 1;
c->scache.waybit = 0; /* does not matter */
return 1;
}
static void loongson2_sc_init(void)
{
struct cpuinfo_mips *c = ¤t_cpu_data;
scache_size = 512*1024;
c->scache.linesz = 32;
c->scache.ways = 4;
c->scache.waybit = 0;
c->scache.waysize = scache_size / (c->scache.ways);
c->scache.sets = scache_size / (c->scache.linesz * c->scache.ways);
pr_info("Unified secondary cache %ldkB %s, linesize %d bytes.\n",
scache_size >> 10, way_string[c->scache.ways], c->scache.linesz);
c->options |= MIPS_CPU_INCLUSIVE_CACHES;
}
static void loongson3_sc_init(void)
{
struct cpuinfo_mips *c = ¤t_cpu_data;
unsigned int config2, lsize;
config2 = read_c0_config2();
lsize = (config2 >> 4) & 15;
if (lsize)
c->scache.linesz = 2 << lsize;
else
c->scache.linesz = 0;
c->scache.sets = 64 << ((config2 >> 8) & 15);
c->scache.ways = 1 + (config2 & 15);
/* Loongson-3 has 4-Scache banks, while Loongson-2K have only 2 banks */
if ((c->processor_id & PRID_IMP_MASK) == PRID_IMP_LOONGSON_64R)
c->scache.sets *= 2;
else
c->scache.sets *= 4;
scache_size = c->scache.sets * c->scache.ways * c->scache.linesz;
c->scache.waybit = 0;
c->scache.waysize = scache_size / c->scache.ways;
pr_info("Unified secondary cache %ldkB %s, linesize %d bytes.\n",
scache_size >> 10, way_string[c->scache.ways], c->scache.linesz);
if (scache_size)
c->options |= MIPS_CPU_INCLUSIVE_CACHES;
return;
}
extern int r5k_sc_init(void);
extern int rm7k_sc_init(void);
extern int mips_sc_init(void);
static void setup_scache(void)
{
struct cpuinfo_mips *c = ¤t_cpu_data;
unsigned int config = read_c0_config();
int sc_present = 0;
/*
* Do the probing thing on R4000SC and R4400SC processors. Other
* processors don't have a S-cache that would be relevant to the
* Linux memory management.
*/
switch (current_cpu_type()) {
case CPU_R4000SC:
case CPU_R4000MC:
case CPU_R4400SC:
case CPU_R4400MC:
sc_present = run_uncached(probe_scache);
if (sc_present)
c->options |= MIPS_CPU_CACHE_CDEX_S;
break;
case CPU_R10000:
case CPU_R12000:
case CPU_R14000:
case CPU_R16000:
scache_size = 0x80000 << ((config & R10K_CONF_SS) >> 16);
c->scache.linesz = 64 << ((config >> 13) & 1);
c->scache.ways = 2;
c->scache.waybit= 0;
sc_present = 1;
break;
case CPU_R5000:
case CPU_NEVADA:
#ifdef CONFIG_R5000_CPU_SCACHE
r5k_sc_init();
#endif
return;
case CPU_RM7000:
#ifdef CONFIG_RM7000_CPU_SCACHE
rm7k_sc_init();
#endif
return;
case CPU_LOONGSON2EF:
loongson2_sc_init();
return;
case CPU_LOONGSON64:
loongson3_sc_init();
return;
case CPU_CAVIUM_OCTEON3:
/* don't need to worry about L2, fully coherent */
return;
default:
if (c->isa_level & (MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M64R1 |
MIPS_CPU_ISA_M32R2 | MIPS_CPU_ISA_M64R2 |
MIPS_CPU_ISA_M32R5 | MIPS_CPU_ISA_M64R5 |
MIPS_CPU_ISA_M32R6 | MIPS_CPU_ISA_M64R6)) {
#ifdef CONFIG_MIPS_CPU_SCACHE
if (mips_sc_init ()) {
scache_size = c->scache.ways * c->scache.sets * c->scache.linesz;
printk("MIPS secondary cache %ldkB, %s, linesize %d bytes.\n",
scache_size >> 10,
way_string[c->scache.ways], c->scache.linesz);
if (current_cpu_type() == CPU_BMIPS5000)
c->options |= MIPS_CPU_INCLUSIVE_CACHES;
}
#else
if (!(c->scache.flags & MIPS_CACHE_NOT_PRESENT))
panic("Dunno how to handle MIPS32 / MIPS64 second level cache");
#endif
return;
}
sc_present = 0;
}
if (!sc_present)
return;
/* compute a couple of other cache variables */
c->scache.waysize = scache_size / c->scache.ways;
c->scache.sets = scache_size / (c->scache.linesz * c->scache.ways);
printk("Unified secondary cache %ldkB %s, linesize %d bytes.\n",
scache_size >> 10, way_string[c->scache.ways], c->scache.linesz);
c->options |= MIPS_CPU_INCLUSIVE_CACHES;
}
void au1x00_fixup_config_od(void)
{
/*
* c0_config.od (bit 19) was write only (and read as 0)
* on the early revisions of Alchemy SOCs. It disables the bus
* transaction overlapping and needs to be set to fix various errata.
*/
switch (read_c0_prid()) {
case 0x00030100: /* Au1000 DA */
case 0x00030201: /* Au1000 HA */
case 0x00030202: /* Au1000 HB */
case 0x01030200: /* Au1500 AB */
/*
* Au1100 errata actually keeps silence about this bit, so we set it
* just in case for those revisions that require it to be set according
* to the (now gone) cpu table.
*/
case 0x02030200: /* Au1100 AB */
case 0x02030201: /* Au1100 BA */
case 0x02030202: /* Au1100 BC */
set_c0_config(1 << 19);
break;
}
}
/* CP0 hazard avoidance. */
#define NXP_BARRIER() \
__asm__ __volatile__( \
".set noreorder\n\t" \
"nop; nop; nop; nop; nop; nop;\n\t" \
".set reorder\n\t")
static void nxp_pr4450_fixup_config(void)
{
unsigned long config0;
config0 = read_c0_config();
/* clear all three cache coherency fields */
config0 &= ~(0x7 | (7 << 25) | (7 << 28));
config0 |= (((_page_cachable_default >> _CACHE_SHIFT) << 0) |
((_page_cachable_default >> _CACHE_SHIFT) << 25) |
((_page_cachable_default >> _CACHE_SHIFT) << 28));
write_c0_config(config0);
NXP_BARRIER();
}
static int cca = -1;
static int __init cca_setup(char *str)
{
get_option(&str, &cca);
return 0;
}
early_param("cca", cca_setup);
static void coherency_setup(void)
{
if (cca < 0 || cca > 7)
cca = read_c0_config() & CONF_CM_CMASK;
_page_cachable_default = cca << _CACHE_SHIFT;
pr_debug("Using cache attribute %d\n", cca);
change_c0_config(CONF_CM_CMASK, cca);
/*
* c0_status.cu=0 specifies that updates by the sc instruction use
* the coherency mode specified by the TLB; 1 means cachable
* coherent update on write will be used. Not all processors have
* this bit and; some wire it to zero, others like Toshiba had the
* silly idea of putting something else there ...
*/
switch (current_cpu_type()) {
case CPU_R4000PC:
case CPU_R4000SC:
case CPU_R4000MC:
case CPU_R4400PC:
case CPU_R4400SC:
case CPU_R4400MC:
clear_c0_config(CONF_CU);
break;
/*
* We need to catch the early Alchemy SOCs with
* the write-only co_config.od bit and set it back to one on:
* Au1000 rev DA, HA, HB; Au1100 AB, BA, BC, Au1500 AB
*/
case CPU_ALCHEMY:
au1x00_fixup_config_od();
break;
case PRID_IMP_PR4450:
nxp_pr4450_fixup_config();
break;
}
}
static void r4k_cache_error_setup(void)
{
extern char __weak except_vec2_generic;
extern char __weak except_vec2_sb1;
switch (current_cpu_type()) {
case CPU_SB1:
case CPU_SB1A:
set_uncached_handler(0x100, &except_vec2_sb1, 0x80);
break;
default:
set_uncached_handler(0x100, &except_vec2_generic, 0x80);
break;
}
}
void r4k_cache_init(void)
{
extern void build_clear_page(void);
extern void build_copy_page(void);
struct cpuinfo_mips *c = ¤t_cpu_data;
probe_pcache();
probe_vcache();
setup_scache();
r4k_blast_dcache_page_setup();
r4k_blast_dcache_setup();
r4k_blast_icache_page_setup();
r4k_blast_icache_setup();
r4k_blast_scache_page_setup();
r4k_blast_scache_setup();
r4k_blast_scache_node_setup();
#ifdef CONFIG_EVA
r4k_blast_dcache_user_page_setup();
r4k_blast_icache_user_page_setup();
#endif
/*
* Some MIPS32 and MIPS64 processors have physically indexed caches.
* This code supports virtually indexed processors and will be
* unnecessarily inefficient on physically indexed processors.
*/
if (c->dcache.linesz && cpu_has_dc_aliases)
shm_align_mask = max_t( unsigned long,
c->dcache.sets * c->dcache.linesz - 1,
PAGE_SIZE - 1);
else
shm_align_mask = PAGE_SIZE-1;
__flush_cache_vmap = r4k__flush_cache_vmap;
__flush_cache_vunmap = r4k__flush_cache_vunmap;
flush_cache_all = cache_noop;
__flush_cache_all = r4k___flush_cache_all;
flush_cache_mm = r4k_flush_cache_mm;
flush_cache_page = r4k_flush_cache_page;
flush_cache_range = r4k_flush_cache_range;
__flush_kernel_vmap_range = r4k_flush_kernel_vmap_range;
flush_icache_all = r4k_flush_icache_all;
flush_data_cache_page = r4k_flush_data_cache_page;
flush_icache_range = r4k_flush_icache_range;
local_flush_icache_range = local_r4k_flush_icache_range;
__flush_icache_user_range = r4k_flush_icache_user_range;
__local_flush_icache_user_range = local_r4k_flush_icache_user_range;
#ifdef CONFIG_DMA_NONCOHERENT
_dma_cache_wback_inv = r4k_dma_cache_wback_inv;
_dma_cache_wback = r4k_dma_cache_wback_inv;
_dma_cache_inv = r4k_dma_cache_inv;
#endif /* CONFIG_DMA_NONCOHERENT */
build_clear_page();
build_copy_page();
/*
* We want to run CMP kernels on core with and without coherent
* caches. Therefore, do not use CONFIG_MIPS_CMP to decide whether
* or not to flush caches.
*/
local_r4k___flush_cache_all(NULL);
coherency_setup();
board_cache_error_setup = r4k_cache_error_setup;
/*
* Per-CPU overrides
*/
switch (current_cpu_type()) {
case CPU_BMIPS4350:
case CPU_BMIPS4380:
/* No IPI is needed because all CPUs share the same D$ */
flush_data_cache_page = r4k_blast_dcache_page;
break;
case CPU_BMIPS5000:
/* We lose our superpowers if L2 is disabled */
if (c->scache.flags & MIPS_CACHE_NOT_PRESENT)
break;
/* I$ fills from D$ just by emptying the write buffers */
flush_cache_page = (void *)b5k_instruction_hazard;
flush_cache_range = (void *)b5k_instruction_hazard;
flush_data_cache_page = (void *)b5k_instruction_hazard;
flush_icache_range = (void *)b5k_instruction_hazard;
local_flush_icache_range = (void *)b5k_instruction_hazard;
/* Optimization: an L2 flush implicitly flushes the L1 */
current_cpu_data.options |= MIPS_CPU_INCLUSIVE_CACHES;
break;
case CPU_LOONGSON64:
/* Loongson-3 maintains cache coherency by hardware */
__flush_cache_all = cache_noop;
__flush_cache_vmap = cache_noop;
__flush_cache_vunmap = cache_noop;
__flush_kernel_vmap_range = (void *)cache_noop;
flush_cache_mm = (void *)cache_noop;
flush_cache_page = (void *)cache_noop;
flush_cache_range = (void *)cache_noop;
flush_icache_all = (void *)cache_noop;
flush_data_cache_page = (void *)cache_noop;
break;
}
}
static int r4k_cache_pm_notifier(struct notifier_block *self, unsigned long cmd,
void *v)
{
switch (cmd) {
case CPU_PM_ENTER_FAILED:
case CPU_PM_EXIT:
coherency_setup();
break;
}
return NOTIFY_OK;
}
static struct notifier_block r4k_cache_pm_notifier_block = {
.notifier_call = r4k_cache_pm_notifier,
};
int __init r4k_cache_init_pm(void)
{
return cpu_pm_register_notifier(&r4k_cache_pm_notifier_block);
}
arch_initcall(r4k_cache_init_pm);
| linux-master | arch/mips/mm/c-r4k.c |
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2011 Wind River Systems,
* written by Ralf Baechle <[email protected]>
*/
#include <linux/compiler.h>
#include <linux/elf-randomize.h>
#include <linux/errno.h>
#include <linux/mm.h>
#include <linux/mman.h>
#include <linux/export.h>
#include <linux/personality.h>
#include <linux/random.h>
#include <linux/sched/signal.h>
#include <linux/sched/mm.h>
unsigned long shm_align_mask = PAGE_SIZE - 1; /* Sane caches */
EXPORT_SYMBOL(shm_align_mask);
#define COLOUR_ALIGN(addr, pgoff) \
((((addr) + shm_align_mask) & ~shm_align_mask) + \
(((pgoff) << PAGE_SHIFT) & shm_align_mask))
enum mmap_allocation_direction {UP, DOWN};
static unsigned long arch_get_unmapped_area_common(struct file *filp,
unsigned long addr0, unsigned long len, unsigned long pgoff,
unsigned long flags, enum mmap_allocation_direction dir)
{
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
unsigned long addr = addr0;
int do_color_align;
struct vm_unmapped_area_info info;
if (unlikely(len > TASK_SIZE))
return -ENOMEM;
if (flags & MAP_FIXED) {
/* Even MAP_FIXED mappings must reside within TASK_SIZE */
if (TASK_SIZE - len < addr)
return -EINVAL;
/*
* We do not accept a shared mapping if it would violate
* cache aliasing constraints.
*/
if ((flags & MAP_SHARED) &&
((addr - (pgoff << PAGE_SHIFT)) & shm_align_mask))
return -EINVAL;
return addr;
}
do_color_align = 0;
if (filp || (flags & MAP_SHARED))
do_color_align = 1;
/* requesting a specific address */
if (addr) {
if (do_color_align)
addr = COLOUR_ALIGN(addr, pgoff);
else
addr = PAGE_ALIGN(addr);
vma = find_vma(mm, addr);
if (TASK_SIZE - len >= addr &&
(!vma || addr + len <= vm_start_gap(vma)))
return addr;
}
info.length = len;
info.align_mask = do_color_align ? (PAGE_MASK & shm_align_mask) : 0;
info.align_offset = pgoff << PAGE_SHIFT;
if (dir == DOWN) {
info.flags = VM_UNMAPPED_AREA_TOPDOWN;
info.low_limit = PAGE_SIZE;
info.high_limit = mm->mmap_base;
addr = vm_unmapped_area(&info);
if (!(addr & ~PAGE_MASK))
return addr;
/*
* A failed mmap() very likely causes application failure,
* so fall back to the bottom-up function here. This scenario
* can happen with large stack limits and large mmap()
* allocations.
*/
}
info.flags = 0;
info.low_limit = mm->mmap_base;
info.high_limit = TASK_SIZE;
return vm_unmapped_area(&info);
}
unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr0,
unsigned long len, unsigned long pgoff, unsigned long flags)
{
return arch_get_unmapped_area_common(filp,
addr0, len, pgoff, flags, UP);
}
/*
* There is no need to export this but sched.h declares the function as
* extern so making it static here results in an error.
*/
unsigned long arch_get_unmapped_area_topdown(struct file *filp,
unsigned long addr0, unsigned long len, unsigned long pgoff,
unsigned long flags)
{
return arch_get_unmapped_area_common(filp,
addr0, len, pgoff, flags, DOWN);
}
bool __virt_addr_valid(const volatile void *kaddr)
{
unsigned long vaddr = (unsigned long)kaddr;
if ((vaddr < PAGE_OFFSET) || (vaddr >= MAP_BASE))
return false;
return pfn_valid(PFN_DOWN(virt_to_phys(kaddr)));
}
EXPORT_SYMBOL_GPL(__virt_addr_valid);
| linux-master | arch/mips/mm/mmap.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2000 Ani Joshi <[email protected]>
* Copyright (C) 2000, 2001, 06 Ralf Baechle <[email protected]>
* swiped from i386, and cloned for MIPS by Geert, polished by Ralf.
*/
#include <linux/dma-direct.h>
#include <linux/dma-map-ops.h>
#include <linux/highmem.h>
#include <asm/cache.h>
#include <asm/cpu-type.h>
#include <asm/io.h>
/*
* The affected CPUs below in 'cpu_needs_post_dma_flush()' can speculatively
* fill random cachelines with stale data at any time, requiring an extra
* flush post-DMA.
*
* Warning on the terminology - Linux calls an uncached area coherent; MIPS
* terminology calls memory areas with hardware maintained coherency coherent.
*
* Note that the R14000 and R16000 should also be checked for in this condition.
* However this function is only called on non-I/O-coherent systems and only the
* R10000 and R12000 are used in such systems, the SGI IP28 Indigo² rsp.
* SGI IP32 aka O2.
*/
static inline bool cpu_needs_post_dma_flush(void)
{
switch (boot_cpu_type()) {
case CPU_R10000:
case CPU_R12000:
case CPU_BMIPS5000:
case CPU_LOONGSON2EF:
case CPU_XBURST:
return true;
default:
/*
* Presence of MAARs suggests that the CPU supports
* speculatively prefetching data, and therefore requires
* the post-DMA flush/invalidate.
*/
return cpu_has_maar;
}
}
void arch_dma_prep_coherent(struct page *page, size_t size)
{
dma_cache_wback_inv((unsigned long)page_address(page), size);
}
void *arch_dma_set_uncached(void *addr, size_t size)
{
return (void *)(__pa(addr) + UNCAC_BASE);
}
static inline void dma_sync_virt_for_device(void *addr, size_t size,
enum dma_data_direction dir)
{
switch (dir) {
case DMA_TO_DEVICE:
dma_cache_wback((unsigned long)addr, size);
break;
case DMA_FROM_DEVICE:
dma_cache_inv((unsigned long)addr, size);
break;
case DMA_BIDIRECTIONAL:
dma_cache_wback_inv((unsigned long)addr, size);
break;
default:
BUG();
}
}
static inline void dma_sync_virt_for_cpu(void *addr, size_t size,
enum dma_data_direction dir)
{
switch (dir) {
case DMA_TO_DEVICE:
break;
case DMA_FROM_DEVICE:
case DMA_BIDIRECTIONAL:
dma_cache_inv((unsigned long)addr, size);
break;
default:
BUG();
}
}
/*
* A single sg entry may refer to multiple physically contiguous pages. But
* we still need to process highmem pages individually. If highmem is not
* configured then the bulk of this loop gets optimized out.
*/
static inline void dma_sync_phys(phys_addr_t paddr, size_t size,
enum dma_data_direction dir, bool for_device)
{
struct page *page = pfn_to_page(paddr >> PAGE_SHIFT);
unsigned long offset = paddr & ~PAGE_MASK;
size_t left = size;
do {
size_t len = left;
void *addr;
if (PageHighMem(page)) {
if (offset + len > PAGE_SIZE)
len = PAGE_SIZE - offset;
}
addr = kmap_atomic(page);
if (for_device)
dma_sync_virt_for_device(addr + offset, len, dir);
else
dma_sync_virt_for_cpu(addr + offset, len, dir);
kunmap_atomic(addr);
offset = 0;
page++;
left -= len;
} while (left);
}
void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
enum dma_data_direction dir)
{
dma_sync_phys(paddr, size, dir, true);
}
#ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU
void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
enum dma_data_direction dir)
{
if (cpu_needs_post_dma_flush())
dma_sync_phys(paddr, size, dir, false);
}
#endif
#ifdef CONFIG_ARCH_HAS_SETUP_DMA_OPS
void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
const struct iommu_ops *iommu, bool coherent)
{
dev->dma_coherent = coherent;
}
#endif
| linux-master | arch/mips/mm/dma-noncoherent.c |
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1994 - 2000 Ralf Baechle
* Copyright (C) 1999, 2000 Silicon Graphics, Inc.
* Kevin D. Kissell, [email protected] and Carsten Langgaard, [email protected]
* Copyright (C) 2000 MIPS Technologies, Inc. All rights reserved.
*/
#include <linux/bug.h>
#include <linux/init.h>
#include <linux/export.h>
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/smp.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/pagemap.h>
#include <linux/ptrace.h>
#include <linux/mman.h>
#include <linux/mm.h>
#include <linux/memblock.h>
#include <linux/highmem.h>
#include <linux/swap.h>
#include <linux/proc_fs.h>
#include <linux/pfn.h>
#include <linux/hardirq.h>
#include <linux/gfp.h>
#include <linux/kcore.h>
#include <linux/initrd.h>
#include <asm/bootinfo.h>
#include <asm/cachectl.h>
#include <asm/cpu.h>
#include <asm/dma.h>
#include <asm/maar.h>
#include <asm/mmu_context.h>
#include <asm/sections.h>
#include <asm/pgalloc.h>
#include <asm/tlb.h>
#include <asm/fixmap.h>
/*
* We have up to 8 empty zeroed pages so we can map one of the right colour
* when needed. This is necessary only on R4000 / R4400 SC and MC versions
* where we have to avoid VCED / VECI exceptions for good performance at
* any price. Since page is never written to after the initialization we
* don't have to care about aliases on other CPUs.
*/
unsigned long empty_zero_page, zero_page_mask;
EXPORT_SYMBOL_GPL(empty_zero_page);
EXPORT_SYMBOL(zero_page_mask);
/*
* Not static inline because used by IP27 special magic initialization code
*/
void setup_zero_pages(void)
{
unsigned int order, i;
struct page *page;
if (cpu_has_vce)
order = 3;
else
order = 0;
empty_zero_page = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
if (!empty_zero_page)
panic("Oh boy, that early out of memory?");
page = virt_to_page((void *)empty_zero_page);
split_page(page, order);
for (i = 0; i < (1 << order); i++, page++)
mark_page_reserved(page);
zero_page_mask = ((PAGE_SIZE << order) - 1) & PAGE_MASK;
}
static void *__kmap_pgprot(struct page *page, unsigned long addr, pgprot_t prot)
{
enum fixed_addresses idx;
unsigned int old_mmid;
unsigned long vaddr, flags, entrylo;
unsigned long old_ctx;
pte_t pte;
int tlbidx;
BUG_ON(folio_test_dcache_dirty(page_folio(page)));
preempt_disable();
pagefault_disable();
idx = (addr >> PAGE_SHIFT) & (FIX_N_COLOURS - 1);
idx += in_interrupt() ? FIX_N_COLOURS : 0;
vaddr = __fix_to_virt(FIX_CMAP_END - idx);
pte = mk_pte(page, prot);
#if defined(CONFIG_XPA)
entrylo = pte_to_entrylo(pte.pte_high);
#elif defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
entrylo = pte.pte_high;
#else
entrylo = pte_to_entrylo(pte_val(pte));
#endif
local_irq_save(flags);
old_ctx = read_c0_entryhi();
write_c0_entryhi(vaddr & (PAGE_MASK << 1));
write_c0_entrylo0(entrylo);
write_c0_entrylo1(entrylo);
if (cpu_has_mmid) {
old_mmid = read_c0_memorymapid();
write_c0_memorymapid(MMID_KERNEL_WIRED);
}
#ifdef CONFIG_XPA
if (cpu_has_xpa) {
entrylo = (pte.pte_low & _PFNX_MASK);
writex_c0_entrylo0(entrylo);
writex_c0_entrylo1(entrylo);
}
#endif
tlbidx = num_wired_entries();
write_c0_wired(tlbidx + 1);
write_c0_index(tlbidx);
mtc0_tlbw_hazard();
tlb_write_indexed();
tlbw_use_hazard();
write_c0_entryhi(old_ctx);
if (cpu_has_mmid)
write_c0_memorymapid(old_mmid);
local_irq_restore(flags);
return (void*) vaddr;
}
void *kmap_coherent(struct page *page, unsigned long addr)
{
return __kmap_pgprot(page, addr, PAGE_KERNEL);
}
void *kmap_noncoherent(struct page *page, unsigned long addr)
{
return __kmap_pgprot(page, addr, PAGE_KERNEL_NC);
}
void kunmap_coherent(void)
{
unsigned int wired;
unsigned long flags, old_ctx;
local_irq_save(flags);
old_ctx = read_c0_entryhi();
wired = num_wired_entries() - 1;
write_c0_wired(wired);
write_c0_index(wired);
write_c0_entryhi(UNIQUE_ENTRYHI(wired));
write_c0_entrylo0(0);
write_c0_entrylo1(0);
mtc0_tlbw_hazard();
tlb_write_indexed();
tlbw_use_hazard();
write_c0_entryhi(old_ctx);
local_irq_restore(flags);
pagefault_enable();
preempt_enable();
}
void copy_user_highpage(struct page *to, struct page *from,
unsigned long vaddr, struct vm_area_struct *vma)
{
struct folio *src = page_folio(from);
void *vfrom, *vto;
vto = kmap_atomic(to);
if (cpu_has_dc_aliases &&
folio_mapped(src) && !folio_test_dcache_dirty(src)) {
vfrom = kmap_coherent(from, vaddr);
copy_page(vto, vfrom);
kunmap_coherent();
} else {
vfrom = kmap_atomic(from);
copy_page(vto, vfrom);
kunmap_atomic(vfrom);
}
if ((!cpu_has_ic_fills_f_dc) ||
pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK))
flush_data_cache_page((unsigned long)vto);
kunmap_atomic(vto);
/* Make sure this page is cleared on other CPU's too before using it */
smp_wmb();
}
void copy_to_user_page(struct vm_area_struct *vma,
struct page *page, unsigned long vaddr, void *dst, const void *src,
unsigned long len)
{
struct folio *folio = page_folio(page);
if (cpu_has_dc_aliases &&
folio_mapped(folio) && !folio_test_dcache_dirty(folio)) {
void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
memcpy(vto, src, len);
kunmap_coherent();
} else {
memcpy(dst, src, len);
if (cpu_has_dc_aliases)
folio_set_dcache_dirty(folio);
}
if (vma->vm_flags & VM_EXEC)
flush_cache_page(vma, vaddr, page_to_pfn(page));
}
void copy_from_user_page(struct vm_area_struct *vma,
struct page *page, unsigned long vaddr, void *dst, const void *src,
unsigned long len)
{
struct folio *folio = page_folio(page);
if (cpu_has_dc_aliases &&
folio_mapped(folio) && !folio_test_dcache_dirty(folio)) {
void *vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
memcpy(dst, vfrom, len);
kunmap_coherent();
} else {
memcpy(dst, src, len);
if (cpu_has_dc_aliases)
folio_set_dcache_dirty(folio);
}
}
EXPORT_SYMBOL_GPL(copy_from_user_page);
void __init fixrange_init(unsigned long start, unsigned long end,
pgd_t *pgd_base)
{
#ifdef CONFIG_HIGHMEM
pgd_t *pgd;
pud_t *pud;
pmd_t *pmd;
pte_t *pte;
int i, j, k;
unsigned long vaddr;
vaddr = start;
i = pgd_index(vaddr);
j = pud_index(vaddr);
k = pmd_index(vaddr);
pgd = pgd_base + i;
for ( ; (i < PTRS_PER_PGD) && (vaddr < end); pgd++, i++) {
pud = (pud_t *)pgd;
for ( ; (j < PTRS_PER_PUD) && (vaddr < end); pud++, j++) {
pmd = (pmd_t *)pud;
for (; (k < PTRS_PER_PMD) && (vaddr < end); pmd++, k++) {
if (pmd_none(*pmd)) {
pte = (pte_t *) memblock_alloc_low(PAGE_SIZE,
PAGE_SIZE);
if (!pte)
panic("%s: Failed to allocate %lu bytes align=%lx\n",
__func__, PAGE_SIZE,
PAGE_SIZE);
set_pmd(pmd, __pmd((unsigned long)pte));
BUG_ON(pte != pte_offset_kernel(pmd, 0));
}
vaddr += PMD_SIZE;
}
k = 0;
}
j = 0;
}
#endif
}
struct maar_walk_info {
struct maar_config cfg[16];
unsigned int num_cfg;
};
static int maar_res_walk(unsigned long start_pfn, unsigned long nr_pages,
void *data)
{
struct maar_walk_info *wi = data;
struct maar_config *cfg = &wi->cfg[wi->num_cfg];
unsigned int maar_align;
/* MAAR registers hold physical addresses right shifted by 4 bits */
maar_align = BIT(MIPS_MAAR_ADDR_SHIFT + 4);
/* Fill in the MAAR config entry */
cfg->lower = ALIGN(PFN_PHYS(start_pfn), maar_align);
cfg->upper = ALIGN_DOWN(PFN_PHYS(start_pfn + nr_pages), maar_align) - 1;
cfg->attrs = MIPS_MAAR_S;
/* Ensure we don't overflow the cfg array */
if (!WARN_ON(wi->num_cfg >= ARRAY_SIZE(wi->cfg)))
wi->num_cfg++;
return 0;
}
unsigned __weak platform_maar_init(unsigned num_pairs)
{
unsigned int num_configured;
struct maar_walk_info wi;
wi.num_cfg = 0;
walk_system_ram_range(0, max_pfn, &wi, maar_res_walk);
num_configured = maar_config(wi.cfg, wi.num_cfg, num_pairs);
if (num_configured < wi.num_cfg)
pr_warn("Not enough MAAR pairs (%u) for all memory regions (%u)\n",
num_pairs, wi.num_cfg);
return num_configured;
}
void maar_init(void)
{
unsigned num_maars, used, i;
phys_addr_t lower, upper, attr;
static struct {
struct maar_config cfgs[3];
unsigned used;
} recorded = { { { 0 } }, 0 };
if (!cpu_has_maar)
return;
/* Detect the number of MAARs */
write_c0_maari(~0);
back_to_back_c0_hazard();
num_maars = read_c0_maari() + 1;
/* MAARs should be in pairs */
WARN_ON(num_maars % 2);
/* Set MAARs using values we recorded already */
if (recorded.used) {
used = maar_config(recorded.cfgs, recorded.used, num_maars / 2);
BUG_ON(used != recorded.used);
} else {
/* Configure the required MAARs */
used = platform_maar_init(num_maars / 2);
}
/* Disable any further MAARs */
for (i = (used * 2); i < num_maars; i++) {
write_c0_maari(i);
back_to_back_c0_hazard();
write_c0_maar(0);
back_to_back_c0_hazard();
}
if (recorded.used)
return;
pr_info("MAAR configuration:\n");
for (i = 0; i < num_maars; i += 2) {
write_c0_maari(i);
back_to_back_c0_hazard();
upper = read_c0_maar();
#ifdef CONFIG_XPA
upper |= (phys_addr_t)readx_c0_maar() << MIPS_MAARX_ADDR_SHIFT;
#endif
write_c0_maari(i + 1);
back_to_back_c0_hazard();
lower = read_c0_maar();
#ifdef CONFIG_XPA
lower |= (phys_addr_t)readx_c0_maar() << MIPS_MAARX_ADDR_SHIFT;
#endif
attr = lower & upper;
lower = (lower & MIPS_MAAR_ADDR) << 4;
upper = ((upper & MIPS_MAAR_ADDR) << 4) | 0xffff;
pr_info(" [%d]: ", i / 2);
if ((attr & MIPS_MAAR_V) != MIPS_MAAR_V) {
pr_cont("disabled\n");
continue;
}
pr_cont("%pa-%pa", &lower, &upper);
if (attr & MIPS_MAAR_S)
pr_cont(" speculate");
pr_cont("\n");
/* Record the setup for use on secondary CPUs */
if (used <= ARRAY_SIZE(recorded.cfgs)) {
recorded.cfgs[recorded.used].lower = lower;
recorded.cfgs[recorded.used].upper = upper;
recorded.cfgs[recorded.used].attrs = attr;
recorded.used++;
}
}
}
#ifndef CONFIG_NUMA
void __init paging_init(void)
{
unsigned long max_zone_pfns[MAX_NR_ZONES];
pagetable_init();
#ifdef CONFIG_ZONE_DMA
max_zone_pfns[ZONE_DMA] = MAX_DMA_PFN;
#endif
#ifdef CONFIG_ZONE_DMA32
max_zone_pfns[ZONE_DMA32] = MAX_DMA32_PFN;
#endif
max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
#ifdef CONFIG_HIGHMEM
max_zone_pfns[ZONE_HIGHMEM] = highend_pfn;
if (cpu_has_dc_aliases && max_low_pfn != highend_pfn) {
printk(KERN_WARNING "This processor doesn't support highmem."
" %ldk highmem ignored\n",
(highend_pfn - max_low_pfn) << (PAGE_SHIFT - 10));
max_zone_pfns[ZONE_HIGHMEM] = max_low_pfn;
}
#endif
free_area_init(max_zone_pfns);
}
#ifdef CONFIG_64BIT
static struct kcore_list kcore_kseg0;
#endif
static inline void __init mem_init_free_highmem(void)
{
#ifdef CONFIG_HIGHMEM
unsigned long tmp;
if (cpu_has_dc_aliases)
return;
for (tmp = highstart_pfn; tmp < highend_pfn; tmp++) {
struct page *page = pfn_to_page(tmp);
if (!memblock_is_memory(PFN_PHYS(tmp)))
SetPageReserved(page);
else
free_highmem_page(page);
}
#endif
}
void __init mem_init(void)
{
/*
* When PFN_PTE_SHIFT is greater than PAGE_SHIFT we won't have enough PTE
* bits to hold a full 32b physical address on MIPS32 systems.
*/
BUILD_BUG_ON(IS_ENABLED(CONFIG_32BIT) && (PFN_PTE_SHIFT > PAGE_SHIFT));
#ifdef CONFIG_HIGHMEM
max_mapnr = highend_pfn ? highend_pfn : max_low_pfn;
#else
max_mapnr = max_low_pfn;
#endif
high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT);
maar_init();
memblock_free_all();
setup_zero_pages(); /* Setup zeroed pages. */
mem_init_free_highmem();
#ifdef CONFIG_64BIT
if ((unsigned long) &_text > (unsigned long) CKSEG0)
/* The -4 is a hack so that user tools don't have to handle
the overflow. */
kclist_add(&kcore_kseg0, (void *) CKSEG0,
0x80000000 - 4, KCORE_TEXT);
#endif
}
#endif /* !CONFIG_NUMA */
void free_init_pages(const char *what, unsigned long begin, unsigned long end)
{
unsigned long pfn;
for (pfn = PFN_UP(begin); pfn < PFN_DOWN(end); pfn++) {
struct page *page = pfn_to_page(pfn);
void *addr = phys_to_virt(PFN_PHYS(pfn));
memset(addr, POISON_FREE_INITMEM, PAGE_SIZE);
free_reserved_page(page);
}
printk(KERN_INFO "Freeing %s: %ldk freed\n", what, (end - begin) >> 10);
}
void (*free_init_pages_eva)(void *begin, void *end) = NULL;
void __weak __init prom_free_prom_memory(void)
{
/* nothing to do */
}
void __ref free_initmem(void)
{
prom_free_prom_memory();
/*
* Let the platform define a specific function to free the
* init section since EVA may have used any possible mapping
* between virtual and physical addresses.
*/
if (free_init_pages_eva)
free_init_pages_eva((void *)&__init_begin, (void *)&__init_end);
else
free_initmem_default(POISON_FREE_INITMEM);
}
#ifdef CONFIG_HAVE_SETUP_PER_CPU_AREA
unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
EXPORT_SYMBOL(__per_cpu_offset);
static int __init pcpu_cpu_distance(unsigned int from, unsigned int to)
{
return node_distance(cpu_to_node(from), cpu_to_node(to));
}
static int __init pcpu_cpu_to_node(int cpu)
{
return cpu_to_node(cpu);
}
void __init setup_per_cpu_areas(void)
{
unsigned long delta;
unsigned int cpu;
int rc;
/*
* Always reserve area for module percpu variables. That's
* what the legacy allocator did.
*/
rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE,
PERCPU_DYNAMIC_RESERVE, PAGE_SIZE,
pcpu_cpu_distance,
pcpu_cpu_to_node);
if (rc < 0)
panic("Failed to initialize percpu areas.");
delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
for_each_possible_cpu(cpu)
__per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu];
}
#endif
#ifndef CONFIG_MIPS_PGD_C0_CONTEXT
unsigned long pgd_current[NR_CPUS];
#endif
/*
* Align swapper_pg_dir in to 64K, allows its address to be loaded
* with a single LUI instruction in the TLB handlers. If we used
* __aligned(64K), its size would get rounded up to the alignment
* size, and waste space. So we place it in its own section and align
* it in the linker script.
*/
pgd_t swapper_pg_dir[PTRS_PER_PGD] __section(".bss..swapper_pg_dir");
#ifndef __PAGETABLE_PUD_FOLDED
pud_t invalid_pud_table[PTRS_PER_PUD] __page_aligned_bss;
#endif
#ifndef __PAGETABLE_PMD_FOLDED
pmd_t invalid_pmd_table[PTRS_PER_PMD] __page_aligned_bss;
EXPORT_SYMBOL_GPL(invalid_pmd_table);
#endif
pte_t invalid_pte_table[PTRS_PER_PTE] __page_aligned_bss;
EXPORT_SYMBOL(invalid_pte_table);
| linux-master | arch/mips/mm/init.c |
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1997, 99, 2001 - 2004 Ralf Baechle <[email protected]>
*/
#include <linux/extable.h>
#include <linux/spinlock.h>
#include <asm/branch.h>
#include <linux/uaccess.h>
int fixup_exception(struct pt_regs *regs)
{
const struct exception_table_entry *fixup;
fixup = search_exception_tables(exception_epc(regs));
if (fixup) {
regs->cp0_epc = fixup->nextinsn;
return 1;
}
return 0;
}
| linux-master | arch/mips/mm/extable.c |
// SPDX-License-Identifier: GPL-2.0
/*
* r2300.c: R2000 and R3000 specific mmu/cache code.
*
* Copyright (C) 1996 David S. Miller ([email protected])
*
* with a lot of changes to make this thing work for R3000s
* Tx39XX R4k style caches added. HK
* Copyright (C) 1998, 1999, 2000 Harald Koerfgen
* Copyright (C) 1998 Gleb Raiko & Vladimir Roganov
* Copyright (C) 2002 Ralf Baechle
* Copyright (C) 2002 Maciej W. Rozycki
*/
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/smp.h>
#include <linux/mm.h>
#include <asm/page.h>
#include <asm/mmu_context.h>
#include <asm/tlbmisc.h>
#include <asm/isadep.h>
#include <asm/io.h>
#include <asm/bootinfo.h>
#include <asm/cpu.h>
#undef DEBUG_TLB
extern void build_tlb_refill_handler(void);
/* CP0 hazard avoidance. */
#define BARRIER \
__asm__ __volatile__( \
".set push\n\t" \
".set noreorder\n\t" \
"nop\n\t" \
".set pop\n\t")
/* TLB operations. */
static void local_flush_tlb_from(int entry)
{
unsigned long old_ctx;
old_ctx = read_c0_entryhi() & cpu_asid_mask(¤t_cpu_data);
write_c0_entrylo0(0);
while (entry < current_cpu_data.tlbsize) {
write_c0_index(entry << 8);
write_c0_entryhi((entry | 0x80000) << 12);
entry++; /* BARRIER */
tlb_write_indexed();
}
write_c0_entryhi(old_ctx);
}
void local_flush_tlb_all(void)
{
unsigned long flags;
#ifdef DEBUG_TLB
printk("[tlball]");
#endif
local_irq_save(flags);
local_flush_tlb_from(8);
local_irq_restore(flags);
}
void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
unsigned long end)
{
unsigned long asid_mask = cpu_asid_mask(¤t_cpu_data);
struct mm_struct *mm = vma->vm_mm;
int cpu = smp_processor_id();
if (cpu_context(cpu, mm) != 0) {
unsigned long size, flags;
#ifdef DEBUG_TLB
printk("[tlbrange<%lu,0x%08lx,0x%08lx>]",
cpu_context(cpu, mm) & asid_mask, start, end);
#endif
local_irq_save(flags);
size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
if (size <= current_cpu_data.tlbsize) {
int oldpid = read_c0_entryhi() & asid_mask;
int newpid = cpu_context(cpu, mm) & asid_mask;
start &= PAGE_MASK;
end += PAGE_SIZE - 1;
end &= PAGE_MASK;
while (start < end) {
int idx;
write_c0_entryhi(start | newpid);
start += PAGE_SIZE; /* BARRIER */
tlb_probe();
idx = read_c0_index();
write_c0_entrylo0(0);
write_c0_entryhi(KSEG0);
if (idx < 0) /* BARRIER */
continue;
tlb_write_indexed();
}
write_c0_entryhi(oldpid);
} else {
drop_mmu_context(mm);
}
local_irq_restore(flags);
}
}
void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
{
unsigned long size, flags;
#ifdef DEBUG_TLB
printk("[tlbrange<%lu,0x%08lx,0x%08lx>]", start, end);
#endif
local_irq_save(flags);
size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
if (size <= current_cpu_data.tlbsize) {
int pid = read_c0_entryhi();
start &= PAGE_MASK;
end += PAGE_SIZE - 1;
end &= PAGE_MASK;
while (start < end) {
int idx;
write_c0_entryhi(start);
start += PAGE_SIZE; /* BARRIER */
tlb_probe();
idx = read_c0_index();
write_c0_entrylo0(0);
write_c0_entryhi(KSEG0);
if (idx < 0) /* BARRIER */
continue;
tlb_write_indexed();
}
write_c0_entryhi(pid);
} else {
local_flush_tlb_all();
}
local_irq_restore(flags);
}
void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
{
unsigned long asid_mask = cpu_asid_mask(¤t_cpu_data);
int cpu = smp_processor_id();
if (cpu_context(cpu, vma->vm_mm) != 0) {
unsigned long flags;
int oldpid, newpid, idx;
#ifdef DEBUG_TLB
printk("[tlbpage<%lu,0x%08lx>]", cpu_context(cpu, vma->vm_mm), page);
#endif
newpid = cpu_context(cpu, vma->vm_mm) & asid_mask;
page &= PAGE_MASK;
local_irq_save(flags);
oldpid = read_c0_entryhi() & asid_mask;
write_c0_entryhi(page | newpid);
BARRIER;
tlb_probe();
idx = read_c0_index();
write_c0_entrylo0(0);
write_c0_entryhi(KSEG0);
if (idx < 0) /* BARRIER */
goto finish;
tlb_write_indexed();
finish:
write_c0_entryhi(oldpid);
local_irq_restore(flags);
}
}
void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte)
{
unsigned long asid_mask = cpu_asid_mask(¤t_cpu_data);
unsigned long flags;
int idx, pid;
/*
* Handle debugger faulting in for debugee.
*/
if (current->active_mm != vma->vm_mm)
return;
pid = read_c0_entryhi() & asid_mask;
#ifdef DEBUG_TLB
if ((pid != (cpu_context(cpu, vma->vm_mm) & asid_mask)) || (cpu_context(cpu, vma->vm_mm) == 0)) {
printk("update_mmu_cache: Wheee, bogus tlbpid mmpid=%lu tlbpid=%d\n",
(cpu_context(cpu, vma->vm_mm)), pid);
}
#endif
local_irq_save(flags);
address &= PAGE_MASK;
write_c0_entryhi(address | pid);
BARRIER;
tlb_probe();
idx = read_c0_index();
write_c0_entrylo0(pte_val(pte));
write_c0_entryhi(address | pid);
if (idx < 0) { /* BARRIER */
tlb_write_random();
} else {
tlb_write_indexed();
}
write_c0_entryhi(pid);
local_irq_restore(flags);
}
void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,
unsigned long entryhi, unsigned long pagemask)
{
unsigned long asid_mask = cpu_asid_mask(¤t_cpu_data);
unsigned long flags;
unsigned long old_ctx;
static unsigned long wired = 0;
if (wired < 8) {
#ifdef DEBUG_TLB
printk("[tlbwired<entry lo0 %8x, hi %8x\n>]\n",
entrylo0, entryhi);
#endif
local_irq_save(flags);
old_ctx = read_c0_entryhi() & asid_mask;
write_c0_entrylo0(entrylo0);
write_c0_entryhi(entryhi);
write_c0_index(wired);
wired++; /* BARRIER */
tlb_write_indexed();
write_c0_entryhi(old_ctx);
local_flush_tlb_all();
local_irq_restore(flags);
}
}
void tlb_init(void)
{
local_flush_tlb_from(0);
build_tlb_refill_handler();
}
| linux-master | arch/mips/mm/tlb-r3k.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/bug.h>
#include <linux/export.h>
#include <linux/types.h>
#include <linux/mmdebug.h>
#include <linux/mm.h>
#include <asm/addrspace.h>
#include <asm/sections.h>
#include <asm/io.h>
#include <asm/page.h>
#include <asm/dma.h>
static inline bool __debug_virt_addr_valid(unsigned long x)
{
/*
* MAX_DMA_ADDRESS is a virtual address that may not correspond to an
* actual physical address. Enough code relies on
* virt_to_phys(MAX_DMA_ADDRESS) that we just need to work around it
* and always return true.
*/
if (x == MAX_DMA_ADDRESS)
return true;
return x >= PAGE_OFFSET && (KSEGX(x) < KSEG2 ||
IS_ENABLED(CONFIG_EVA) ||
!IS_ENABLED(CONFIG_HIGHMEM));
}
phys_addr_t __virt_to_phys(volatile const void *x)
{
WARN(!__debug_virt_addr_valid((unsigned long)x),
"virt_to_phys used for non-linear address: %pK (%pS)\n",
x, x);
return __virt_to_phys_nodebug(x);
}
EXPORT_SYMBOL(__virt_to_phys);
phys_addr_t __phys_addr_symbol(unsigned long x)
{
/* This is bounds checking against the kernel image only.
* __pa_symbol should only be used on kernel symbol addresses.
*/
VIRTUAL_BUG_ON(x < (unsigned long)_text ||
x > (unsigned long)_end);
return __pa_symbol_nodebug(x);
}
EXPORT_SYMBOL(__phys_addr_symbol);
| linux-master | arch/mips/mm/physaddr.c |
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Synthesize TLB refill handlers at runtime.
*
* Copyright (C) 2004, 2005, 2006, 2008 Thiemo Seufer
* Copyright (C) 2005, 2007, 2008, 2009 Maciej W. Rozycki
* Copyright (C) 2006 Ralf Baechle ([email protected])
* Copyright (C) 2008, 2009 Cavium Networks, Inc.
* Copyright (C) 2011 MIPS Technologies, Inc.
*
* ... and the days got worse and worse and now you see
* I've gone completely out of my mind.
*
* They're coming to take me a away haha
* they're coming to take me a away hoho hihi haha
* to the funny farm where code is beautiful all the time ...
*
* (Condolences to Napoleon XIV)
*/
#include <linux/bug.h>
#include <linux/export.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/smp.h>
#include <linux/string.h>
#include <linux/cache.h>
#include <linux/pgtable.h>
#include <asm/cacheflush.h>
#include <asm/cpu-type.h>
#include <asm/mmu_context.h>
#include <asm/uasm.h>
#include <asm/setup.h>
#include <asm/tlbex.h>
static int mips_xpa_disabled;
static int __init xpa_disable(char *s)
{
mips_xpa_disabled = 1;
return 1;
}
__setup("noxpa", xpa_disable);
/*
* TLB load/store/modify handlers.
*
* Only the fastpath gets synthesized at runtime, the slowpath for
* do_page_fault remains normal asm.
*/
extern void tlb_do_page_fault_0(void);
extern void tlb_do_page_fault_1(void);
struct work_registers {
int r1;
int r2;
int r3;
};
struct tlb_reg_save {
unsigned long a;
unsigned long b;
} ____cacheline_aligned_in_smp;
static struct tlb_reg_save handler_reg_save[NR_CPUS];
static inline int r45k_bvahwbug(void)
{
/* XXX: We should probe for the presence of this bug, but we don't. */
return 0;
}
static inline int r4k_250MHZhwbug(void)
{
/* XXX: We should probe for the presence of this bug, but we don't. */
return 0;
}
extern int sb1250_m3_workaround_needed(void);
static inline int __maybe_unused bcm1250_m3_war(void)
{
if (IS_ENABLED(CONFIG_SB1_PASS_2_WORKAROUNDS))
return sb1250_m3_workaround_needed();
return 0;
}
static inline int __maybe_unused r10000_llsc_war(void)
{
return IS_ENABLED(CONFIG_WAR_R10000_LLSC);
}
static int use_bbit_insns(void)
{
switch (current_cpu_type()) {
case CPU_CAVIUM_OCTEON:
case CPU_CAVIUM_OCTEON_PLUS:
case CPU_CAVIUM_OCTEON2:
case CPU_CAVIUM_OCTEON3:
return 1;
default:
return 0;
}
}
static int use_lwx_insns(void)
{
switch (current_cpu_type()) {
case CPU_CAVIUM_OCTEON2:
case CPU_CAVIUM_OCTEON3:
return 1;
default:
return 0;
}
}
#if defined(CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE) && \
CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE > 0
static bool scratchpad_available(void)
{
return true;
}
static int scratchpad_offset(int i)
{
/*
* CVMSEG starts at address -32768 and extends for
* CAVIUM_OCTEON_CVMSEG_SIZE 128 byte cache lines.
*/
i += 1; /* Kernel use starts at the top and works down. */
return CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE * 128 - (8 * i) - 32768;
}
#else
static bool scratchpad_available(void)
{
return false;
}
static int scratchpad_offset(int i)
{
BUG();
/* Really unreachable, but evidently some GCC want this. */
return 0;
}
#endif
/*
* Found by experiment: At least some revisions of the 4kc throw under
* some circumstances a machine check exception, triggered by invalid
* values in the index register. Delaying the tlbp instruction until
* after the next branch, plus adding an additional nop in front of
* tlbwi/tlbwr avoids the invalid index register values. Nobody knows
* why; it's not an issue caused by the core RTL.
*
*/
static int m4kc_tlbp_war(void)
{
return current_cpu_type() == CPU_4KC;
}
/* Handle labels (which must be positive integers). */
enum label_id {
label_second_part = 1,
label_leave,
label_vmalloc,
label_vmalloc_done,
label_tlbw_hazard_0,
label_split = label_tlbw_hazard_0 + 8,
label_tlbl_goaround1,
label_tlbl_goaround2,
label_nopage_tlbl,
label_nopage_tlbs,
label_nopage_tlbm,
label_smp_pgtable_change,
label_r3000_write_probe_fail,
label_large_segbits_fault,
#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
label_tlb_huge_update,
#endif
};
UASM_L_LA(_second_part)
UASM_L_LA(_leave)
UASM_L_LA(_vmalloc)
UASM_L_LA(_vmalloc_done)
/* _tlbw_hazard_x is handled differently. */
UASM_L_LA(_split)
UASM_L_LA(_tlbl_goaround1)
UASM_L_LA(_tlbl_goaround2)
UASM_L_LA(_nopage_tlbl)
UASM_L_LA(_nopage_tlbs)
UASM_L_LA(_nopage_tlbm)
UASM_L_LA(_smp_pgtable_change)
UASM_L_LA(_r3000_write_probe_fail)
UASM_L_LA(_large_segbits_fault)
#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
UASM_L_LA(_tlb_huge_update)
#endif
static int hazard_instance;
static void uasm_bgezl_hazard(u32 **p, struct uasm_reloc **r, int instance)
{
switch (instance) {
case 0 ... 7:
uasm_il_bgezl(p, r, 0, label_tlbw_hazard_0 + instance);
return;
default:
BUG();
}
}
static void uasm_bgezl_label(struct uasm_label **l, u32 **p, int instance)
{
switch (instance) {
case 0 ... 7:
uasm_build_label(l, *p, label_tlbw_hazard_0 + instance);
break;
default:
BUG();
}
}
/*
* pgtable bits are assigned dynamically depending on processor feature
* and statically based on kernel configuration. This spits out the actual
* values the kernel is using. Required to make sense from disassembled
* TLB exception handlers.
*/
static void output_pgtable_bits_defines(void)
{
#define pr_define(fmt, ...) \
pr_debug("#define " fmt, ##__VA_ARGS__)
pr_debug("#include <asm/asm.h>\n");
pr_debug("#include <asm/regdef.h>\n");
pr_debug("\n");
pr_define("_PAGE_PRESENT_SHIFT %d\n", _PAGE_PRESENT_SHIFT);
pr_define("_PAGE_NO_READ_SHIFT %d\n", _PAGE_NO_READ_SHIFT);
pr_define("_PAGE_WRITE_SHIFT %d\n", _PAGE_WRITE_SHIFT);
pr_define("_PAGE_ACCESSED_SHIFT %d\n", _PAGE_ACCESSED_SHIFT);
pr_define("_PAGE_MODIFIED_SHIFT %d\n", _PAGE_MODIFIED_SHIFT);
#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
pr_define("_PAGE_HUGE_SHIFT %d\n", _PAGE_HUGE_SHIFT);
#endif
#ifdef _PAGE_NO_EXEC_SHIFT
if (cpu_has_rixi)
pr_define("_PAGE_NO_EXEC_SHIFT %d\n", _PAGE_NO_EXEC_SHIFT);
#endif
pr_define("_PAGE_GLOBAL_SHIFT %d\n", _PAGE_GLOBAL_SHIFT);
pr_define("_PAGE_VALID_SHIFT %d\n", _PAGE_VALID_SHIFT);
pr_define("_PAGE_DIRTY_SHIFT %d\n", _PAGE_DIRTY_SHIFT);
pr_define("PFN_PTE_SHIFT %d\n", PFN_PTE_SHIFT);
pr_debug("\n");
}
static inline void dump_handler(const char *symbol, const void *start, const void *end)
{
unsigned int count = (end - start) / sizeof(u32);
const u32 *handler = start;
int i;
pr_debug("LEAF(%s)\n", symbol);
pr_debug("\t.set push\n");
pr_debug("\t.set noreorder\n");
for (i = 0; i < count; i++)
pr_debug("\t.word\t0x%08x\t\t# %p\n", handler[i], &handler[i]);
pr_debug("\t.set\tpop\n");
pr_debug("\tEND(%s)\n", symbol);
}
/* The only general purpose registers allowed in TLB handlers. */
#define K0 26
#define K1 27
/* Some CP0 registers */
#define C0_INDEX 0, 0
#define C0_ENTRYLO0 2, 0
#define C0_TCBIND 2, 2
#define C0_ENTRYLO1 3, 0
#define C0_CONTEXT 4, 0
#define C0_PAGEMASK 5, 0
#define C0_PWBASE 5, 5
#define C0_PWFIELD 5, 6
#define C0_PWSIZE 5, 7
#define C0_PWCTL 6, 6
#define C0_BADVADDR 8, 0
#define C0_PGD 9, 7
#define C0_ENTRYHI 10, 0
#define C0_EPC 14, 0
#define C0_XCONTEXT 20, 0
#ifdef CONFIG_64BIT
# define GET_CONTEXT(buf, reg) UASM_i_MFC0(buf, reg, C0_XCONTEXT)
#else
# define GET_CONTEXT(buf, reg) UASM_i_MFC0(buf, reg, C0_CONTEXT)
#endif
/* The worst case length of the handler is around 18 instructions for
* R3000-style TLBs and up to 63 instructions for R4000-style TLBs.
* Maximum space available is 32 instructions for R3000 and 64
* instructions for R4000.
*
* We deliberately chose a buffer size of 128, so we won't scribble
* over anything important on overflow before we panic.
*/
static u32 tlb_handler[128];
/* simply assume worst case size for labels and relocs */
static struct uasm_label labels[128];
static struct uasm_reloc relocs[128];
static int check_for_high_segbits;
static bool fill_includes_sw_bits;
static unsigned int kscratch_used_mask;
static inline int __maybe_unused c0_kscratch(void)
{
return 31;
}
static int allocate_kscratch(void)
{
int r;
unsigned int a = cpu_data[0].kscratch_mask & ~kscratch_used_mask;
r = ffs(a);
if (r == 0)
return -1;
r--; /* make it zero based */
kscratch_used_mask |= (1 << r);
return r;
}
static int scratch_reg;
int pgd_reg;
EXPORT_SYMBOL_GPL(pgd_reg);
enum vmalloc64_mode {not_refill, refill_scratch, refill_noscratch};
static struct work_registers build_get_work_registers(u32 **p)
{
struct work_registers r;
if (scratch_reg >= 0) {
/* Save in CPU local C0_KScratch? */
UASM_i_MTC0(p, 1, c0_kscratch(), scratch_reg);
r.r1 = K0;
r.r2 = K1;
r.r3 = 1;
return r;
}
if (num_possible_cpus() > 1) {
/* Get smp_processor_id */
UASM_i_CPUID_MFC0(p, K0, SMP_CPUID_REG);
UASM_i_SRL_SAFE(p, K0, K0, SMP_CPUID_REGSHIFT);
/* handler_reg_save index in K0 */
UASM_i_SLL(p, K0, K0, ilog2(sizeof(struct tlb_reg_save)));
UASM_i_LA(p, K1, (long)&handler_reg_save);
UASM_i_ADDU(p, K0, K0, K1);
} else {
UASM_i_LA(p, K0, (long)&handler_reg_save);
}
/* K0 now points to save area, save $1 and $2 */
UASM_i_SW(p, 1, offsetof(struct tlb_reg_save, a), K0);
UASM_i_SW(p, 2, offsetof(struct tlb_reg_save, b), K0);
r.r1 = K1;
r.r2 = 1;
r.r3 = 2;
return r;
}
static void build_restore_work_registers(u32 **p)
{
if (scratch_reg >= 0) {
uasm_i_ehb(p);
UASM_i_MFC0(p, 1, c0_kscratch(), scratch_reg);
return;
}
/* K0 already points to save area, restore $1 and $2 */
UASM_i_LW(p, 1, offsetof(struct tlb_reg_save, a), K0);
UASM_i_LW(p, 2, offsetof(struct tlb_reg_save, b), K0);
}
#ifndef CONFIG_MIPS_PGD_C0_CONTEXT
/*
* CONFIG_MIPS_PGD_C0_CONTEXT implies 64 bit and lack of pgd_current,
* we cannot do r3000 under these circumstances.
*
* The R3000 TLB handler is simple.
*/
static void build_r3000_tlb_refill_handler(void)
{
long pgdc = (long)pgd_current;
u32 *p;
memset(tlb_handler, 0, sizeof(tlb_handler));
p = tlb_handler;
uasm_i_mfc0(&p, K0, C0_BADVADDR);
uasm_i_lui(&p, K1, uasm_rel_hi(pgdc)); /* cp0 delay */
uasm_i_lw(&p, K1, uasm_rel_lo(pgdc), K1);
uasm_i_srl(&p, K0, K0, 22); /* load delay */
uasm_i_sll(&p, K0, K0, 2);
uasm_i_addu(&p, K1, K1, K0);
uasm_i_mfc0(&p, K0, C0_CONTEXT);
uasm_i_lw(&p, K1, 0, K1); /* cp0 delay */
uasm_i_andi(&p, K0, K0, 0xffc); /* load delay */
uasm_i_addu(&p, K1, K1, K0);
uasm_i_lw(&p, K0, 0, K1);
uasm_i_nop(&p); /* load delay */
uasm_i_mtc0(&p, K0, C0_ENTRYLO0);
uasm_i_mfc0(&p, K1, C0_EPC); /* cp0 delay */
uasm_i_tlbwr(&p); /* cp0 delay */
uasm_i_jr(&p, K1);
uasm_i_rfe(&p); /* branch delay */
if (p > tlb_handler + 32)
panic("TLB refill handler space exceeded");
pr_debug("Wrote TLB refill handler (%u instructions).\n",
(unsigned int)(p - tlb_handler));
memcpy((void *)ebase, tlb_handler, 0x80);
local_flush_icache_range(ebase, ebase + 0x80);
dump_handler("r3000_tlb_refill", (u32 *)ebase, (u32 *)(ebase + 0x80));
}
#endif /* CONFIG_MIPS_PGD_C0_CONTEXT */
/*
* The R4000 TLB handler is much more complicated. We have two
* consecutive handler areas with 32 instructions space each.
* Since they aren't used at the same time, we can overflow in the
* other one.To keep things simple, we first assume linear space,
* then we relocate it to the final handler layout as needed.
*/
static u32 final_handler[64];
/*
* Hazards
*
* From the IDT errata for the QED RM5230 (Nevada), processor revision 1.0:
* 2. A timing hazard exists for the TLBP instruction.
*
* stalling_instruction
* TLBP
*
* The JTLB is being read for the TLBP throughout the stall generated by the
* previous instruction. This is not really correct as the stalling instruction
* can modify the address used to access the JTLB. The failure symptom is that
* the TLBP instruction will use an address created for the stalling instruction
* and not the address held in C0_ENHI and thus report the wrong results.
*
* The software work-around is to not allow the instruction preceding the TLBP
* to stall - make it an NOP or some other instruction guaranteed not to stall.
*
* Errata 2 will not be fixed. This errata is also on the R5000.
*
* As if we MIPS hackers wouldn't know how to nop pipelines happy ...
*/
static void __maybe_unused build_tlb_probe_entry(u32 **p)
{
switch (current_cpu_type()) {
/* Found by experiment: R4600 v2.0/R4700 needs this, too. */
case CPU_R4600:
case CPU_R4700:
case CPU_R5000:
case CPU_NEVADA:
uasm_i_nop(p);
uasm_i_tlbp(p);
break;
default:
uasm_i_tlbp(p);
break;
}
}
void build_tlb_write_entry(u32 **p, struct uasm_label **l,
struct uasm_reloc **r,
enum tlb_write_entry wmode)
{
void(*tlbw)(u32 **) = NULL;
switch (wmode) {
case tlb_random: tlbw = uasm_i_tlbwr; break;
case tlb_indexed: tlbw = uasm_i_tlbwi; break;
}
if (cpu_has_mips_r2_r6) {
if (cpu_has_mips_r2_exec_hazard)
uasm_i_ehb(p);
tlbw(p);
return;
}
switch (current_cpu_type()) {
case CPU_R4000PC:
case CPU_R4000SC:
case CPU_R4000MC:
case CPU_R4400PC:
case CPU_R4400SC:
case CPU_R4400MC:
/*
* This branch uses up a mtc0 hazard nop slot and saves
* two nops after the tlbw instruction.
*/
uasm_bgezl_hazard(p, r, hazard_instance);
tlbw(p);
uasm_bgezl_label(l, p, hazard_instance);
hazard_instance++;
uasm_i_nop(p);
break;
case CPU_R4600:
case CPU_R4700:
uasm_i_nop(p);
tlbw(p);
uasm_i_nop(p);
break;
case CPU_R5000:
case CPU_NEVADA:
uasm_i_nop(p); /* QED specifies 2 nops hazard */
uasm_i_nop(p); /* QED specifies 2 nops hazard */
tlbw(p);
break;
case CPU_R4300:
case CPU_5KC:
case CPU_TX49XX:
case CPU_PR4450:
uasm_i_nop(p);
tlbw(p);
break;
case CPU_R10000:
case CPU_R12000:
case CPU_R14000:
case CPU_R16000:
case CPU_4KC:
case CPU_4KEC:
case CPU_M14KC:
case CPU_M14KEC:
case CPU_SB1:
case CPU_SB1A:
case CPU_4KSC:
case CPU_20KC:
case CPU_25KF:
case CPU_BMIPS32:
case CPU_BMIPS3300:
case CPU_BMIPS4350:
case CPU_BMIPS4380:
case CPU_BMIPS5000:
case CPU_LOONGSON2EF:
case CPU_LOONGSON64:
case CPU_R5500:
if (m4kc_tlbp_war())
uasm_i_nop(p);
fallthrough;
case CPU_ALCHEMY:
tlbw(p);
break;
case CPU_RM7000:
uasm_i_nop(p);
uasm_i_nop(p);
uasm_i_nop(p);
uasm_i_nop(p);
tlbw(p);
break;
case CPU_XBURST:
tlbw(p);
uasm_i_nop(p);
break;
default:
panic("No TLB refill handler yet (CPU type: %d)",
current_cpu_type());
break;
}
}
EXPORT_SYMBOL_GPL(build_tlb_write_entry);
static __maybe_unused void build_convert_pte_to_entrylo(u32 **p,
unsigned int reg)
{
if (_PAGE_GLOBAL_SHIFT == 0) {
/* pte_t is already in EntryLo format */
return;
}
if (cpu_has_rixi && _PAGE_NO_EXEC != 0) {
if (fill_includes_sw_bits) {
UASM_i_ROTR(p, reg, reg, ilog2(_PAGE_GLOBAL));
} else {
UASM_i_SRL(p, reg, reg, ilog2(_PAGE_NO_EXEC));
UASM_i_ROTR(p, reg, reg,
ilog2(_PAGE_GLOBAL) - ilog2(_PAGE_NO_EXEC));
}
} else {
#ifdef CONFIG_PHYS_ADDR_T_64BIT
uasm_i_dsrl_safe(p, reg, reg, ilog2(_PAGE_GLOBAL));
#else
UASM_i_SRL(p, reg, reg, ilog2(_PAGE_GLOBAL));
#endif
}
}
#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
static void build_restore_pagemask(u32 **p, struct uasm_reloc **r,
unsigned int tmp, enum label_id lid,
int restore_scratch)
{
if (restore_scratch) {
/*
* Ensure the MFC0 below observes the value written to the
* KScratch register by the prior MTC0.
*/
if (scratch_reg >= 0)
uasm_i_ehb(p);
/* Reset default page size */
if (PM_DEFAULT_MASK >> 16) {
uasm_i_lui(p, tmp, PM_DEFAULT_MASK >> 16);
uasm_i_ori(p, tmp, tmp, PM_DEFAULT_MASK & 0xffff);
uasm_i_mtc0(p, tmp, C0_PAGEMASK);
uasm_il_b(p, r, lid);
} else if (PM_DEFAULT_MASK) {
uasm_i_ori(p, tmp, 0, PM_DEFAULT_MASK);
uasm_i_mtc0(p, tmp, C0_PAGEMASK);
uasm_il_b(p, r, lid);
} else {
uasm_i_mtc0(p, 0, C0_PAGEMASK);
uasm_il_b(p, r, lid);
}
if (scratch_reg >= 0)
UASM_i_MFC0(p, 1, c0_kscratch(), scratch_reg);
else
UASM_i_LW(p, 1, scratchpad_offset(0), 0);
} else {
/* Reset default page size */
if (PM_DEFAULT_MASK >> 16) {
uasm_i_lui(p, tmp, PM_DEFAULT_MASK >> 16);
uasm_i_ori(p, tmp, tmp, PM_DEFAULT_MASK & 0xffff);
uasm_il_b(p, r, lid);
uasm_i_mtc0(p, tmp, C0_PAGEMASK);
} else if (PM_DEFAULT_MASK) {
uasm_i_ori(p, tmp, 0, PM_DEFAULT_MASK);
uasm_il_b(p, r, lid);
uasm_i_mtc0(p, tmp, C0_PAGEMASK);
} else {
uasm_il_b(p, r, lid);
uasm_i_mtc0(p, 0, C0_PAGEMASK);
}
}
}
static void build_huge_tlb_write_entry(u32 **p, struct uasm_label **l,
struct uasm_reloc **r,
unsigned int tmp,
enum tlb_write_entry wmode,
int restore_scratch)
{
/* Set huge page tlb entry size */
uasm_i_lui(p, tmp, PM_HUGE_MASK >> 16);
uasm_i_ori(p, tmp, tmp, PM_HUGE_MASK & 0xffff);
uasm_i_mtc0(p, tmp, C0_PAGEMASK);
build_tlb_write_entry(p, l, r, wmode);
build_restore_pagemask(p, r, tmp, label_leave, restore_scratch);
}
/*
* Check if Huge PTE is present, if so then jump to LABEL.
*/
static void
build_is_huge_pte(u32 **p, struct uasm_reloc **r, unsigned int tmp,
unsigned int pmd, int lid)
{
UASM_i_LW(p, tmp, 0, pmd);
if (use_bbit_insns()) {
uasm_il_bbit1(p, r, tmp, ilog2(_PAGE_HUGE), lid);
} else {
uasm_i_andi(p, tmp, tmp, _PAGE_HUGE);
uasm_il_bnez(p, r, tmp, lid);
}
}
static void build_huge_update_entries(u32 **p, unsigned int pte,
unsigned int tmp)
{
int small_sequence;
/*
* A huge PTE describes an area the size of the
* configured huge page size. This is twice the
* of the large TLB entry size we intend to use.
* A TLB entry half the size of the configured
* huge page size is configured into entrylo0
* and entrylo1 to cover the contiguous huge PTE
* address space.
*/
small_sequence = (HPAGE_SIZE >> 7) < 0x10000;
/* We can clobber tmp. It isn't used after this.*/
if (!small_sequence)
uasm_i_lui(p, tmp, HPAGE_SIZE >> (7 + 16));
build_convert_pte_to_entrylo(p, pte);
UASM_i_MTC0(p, pte, C0_ENTRYLO0); /* load it */
/* convert to entrylo1 */
if (small_sequence)
UASM_i_ADDIU(p, pte, pte, HPAGE_SIZE >> 7);
else
UASM_i_ADDU(p, pte, pte, tmp);
UASM_i_MTC0(p, pte, C0_ENTRYLO1); /* load it */
}
static void build_huge_handler_tail(u32 **p, struct uasm_reloc **r,
struct uasm_label **l,
unsigned int pte,
unsigned int ptr,
unsigned int flush)
{
#ifdef CONFIG_SMP
UASM_i_SC(p, pte, 0, ptr);
uasm_il_beqz(p, r, pte, label_tlb_huge_update);
UASM_i_LW(p, pte, 0, ptr); /* Needed because SC killed our PTE */
#else
UASM_i_SW(p, pte, 0, ptr);
#endif
if (cpu_has_ftlb && flush) {
BUG_ON(!cpu_has_tlbinv);
UASM_i_MFC0(p, ptr, C0_ENTRYHI);
uasm_i_ori(p, ptr, ptr, MIPS_ENTRYHI_EHINV);
UASM_i_MTC0(p, ptr, C0_ENTRYHI);
build_tlb_write_entry(p, l, r, tlb_indexed);
uasm_i_xori(p, ptr, ptr, MIPS_ENTRYHI_EHINV);
UASM_i_MTC0(p, ptr, C0_ENTRYHI);
build_huge_update_entries(p, pte, ptr);
build_huge_tlb_write_entry(p, l, r, pte, tlb_random, 0);
return;
}
build_huge_update_entries(p, pte, ptr);
build_huge_tlb_write_entry(p, l, r, pte, tlb_indexed, 0);
}
#endif /* CONFIG_MIPS_HUGE_TLB_SUPPORT */
#ifdef CONFIG_64BIT
/*
* TMP and PTR are scratch.
* TMP will be clobbered, PTR will hold the pmd entry.
*/
void build_get_pmde64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
unsigned int tmp, unsigned int ptr)
{
#ifndef CONFIG_MIPS_PGD_C0_CONTEXT
long pgdc = (long)pgd_current;
#endif
/*
* The vmalloc handling is not in the hotpath.
*/
uasm_i_dmfc0(p, tmp, C0_BADVADDR);
if (check_for_high_segbits) {
/*
* The kernel currently implicitely assumes that the
* MIPS SEGBITS parameter for the processor is
* (PGDIR_SHIFT+PGDIR_BITS) or less, and will never
* allocate virtual addresses outside the maximum
* range for SEGBITS = (PGDIR_SHIFT+PGDIR_BITS). But
* that doesn't prevent user code from accessing the
* higher xuseg addresses. Here, we make sure that
* everything but the lower xuseg addresses goes down
* the module_alloc/vmalloc path.
*/
uasm_i_dsrl_safe(p, ptr, tmp, PGDIR_SHIFT + PGD_TABLE_ORDER + PAGE_SHIFT - 3);
uasm_il_bnez(p, r, ptr, label_vmalloc);
} else {
uasm_il_bltz(p, r, tmp, label_vmalloc);
}
/* No uasm_i_nop needed here, since the next insn doesn't touch TMP. */
if (pgd_reg != -1) {
/* pgd is in pgd_reg */
if (cpu_has_ldpte)
UASM_i_MFC0(p, ptr, C0_PWBASE);
else
UASM_i_MFC0(p, ptr, c0_kscratch(), pgd_reg);
} else {
#if defined(CONFIG_MIPS_PGD_C0_CONTEXT)
/*
* &pgd << 11 stored in CONTEXT [23..63].
*/
UASM_i_MFC0(p, ptr, C0_CONTEXT);
/* Clear lower 23 bits of context. */
uasm_i_dins(p, ptr, 0, 0, 23);
/* insert bit[63:59] of CAC_BASE into bit[11:6] of ptr */
uasm_i_ori(p, ptr, ptr, ((u64)(CAC_BASE) >> 53));
uasm_i_drotr(p, ptr, ptr, 11);
#elif defined(CONFIG_SMP)
UASM_i_CPUID_MFC0(p, ptr, SMP_CPUID_REG);
uasm_i_dsrl_safe(p, ptr, ptr, SMP_CPUID_PTRSHIFT);
UASM_i_LA_mostly(p, tmp, pgdc);
uasm_i_daddu(p, ptr, ptr, tmp);
uasm_i_dmfc0(p, tmp, C0_BADVADDR);
uasm_i_ld(p, ptr, uasm_rel_lo(pgdc), ptr);
#else
UASM_i_LA_mostly(p, ptr, pgdc);
uasm_i_ld(p, ptr, uasm_rel_lo(pgdc), ptr);
#endif
}
uasm_l_vmalloc_done(l, *p);
/* get pgd offset in bytes */
uasm_i_dsrl_safe(p, tmp, tmp, PGDIR_SHIFT - 3);
uasm_i_andi(p, tmp, tmp, (PTRS_PER_PGD - 1)<<3);
uasm_i_daddu(p, ptr, ptr, tmp); /* add in pgd offset */
#ifndef __PAGETABLE_PUD_FOLDED
uasm_i_dmfc0(p, tmp, C0_BADVADDR); /* get faulting address */
uasm_i_ld(p, ptr, 0, ptr); /* get pud pointer */
uasm_i_dsrl_safe(p, tmp, tmp, PUD_SHIFT - 3); /* get pud offset in bytes */
uasm_i_andi(p, tmp, tmp, (PTRS_PER_PUD - 1) << 3);
uasm_i_daddu(p, ptr, ptr, tmp); /* add in pud offset */
#endif
#ifndef __PAGETABLE_PMD_FOLDED
uasm_i_dmfc0(p, tmp, C0_BADVADDR); /* get faulting address */
uasm_i_ld(p, ptr, 0, ptr); /* get pmd pointer */
uasm_i_dsrl_safe(p, tmp, tmp, PMD_SHIFT-3); /* get pmd offset in bytes */
uasm_i_andi(p, tmp, tmp, (PTRS_PER_PMD - 1)<<3);
uasm_i_daddu(p, ptr, ptr, tmp); /* add in pmd offset */
#endif
}
EXPORT_SYMBOL_GPL(build_get_pmde64);
/*
* BVADDR is the faulting address, PTR is scratch.
* PTR will hold the pgd for vmalloc.
*/
static void
build_get_pgd_vmalloc64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
unsigned int bvaddr, unsigned int ptr,
enum vmalloc64_mode mode)
{
long swpd = (long)swapper_pg_dir;
int single_insn_swpd;
int did_vmalloc_branch = 0;
single_insn_swpd = uasm_in_compat_space_p(swpd) && !uasm_rel_lo(swpd);
uasm_l_vmalloc(l, *p);
if (mode != not_refill && check_for_high_segbits) {
if (single_insn_swpd) {
uasm_il_bltz(p, r, bvaddr, label_vmalloc_done);
uasm_i_lui(p, ptr, uasm_rel_hi(swpd));
did_vmalloc_branch = 1;
/* fall through */
} else {
uasm_il_bgez(p, r, bvaddr, label_large_segbits_fault);
}
}
if (!did_vmalloc_branch) {
if (single_insn_swpd) {
uasm_il_b(p, r, label_vmalloc_done);
uasm_i_lui(p, ptr, uasm_rel_hi(swpd));
} else {
UASM_i_LA_mostly(p, ptr, swpd);
uasm_il_b(p, r, label_vmalloc_done);
if (uasm_in_compat_space_p(swpd))
uasm_i_addiu(p, ptr, ptr, uasm_rel_lo(swpd));
else
uasm_i_daddiu(p, ptr, ptr, uasm_rel_lo(swpd));
}
}
if (mode != not_refill && check_for_high_segbits) {
uasm_l_large_segbits_fault(l, *p);
if (mode == refill_scratch && scratch_reg >= 0)
uasm_i_ehb(p);
/*
* We get here if we are an xsseg address, or if we are
* an xuseg address above (PGDIR_SHIFT+PGDIR_BITS) boundary.
*
* Ignoring xsseg (assume disabled so would generate
* (address errors?), the only remaining possibility
* is the upper xuseg addresses. On processors with
* TLB_SEGBITS <= PGDIR_SHIFT+PGDIR_BITS, these
* addresses would have taken an address error. We try
* to mimic that here by taking a load/istream page
* fault.
*/
if (IS_ENABLED(CONFIG_CPU_LOONGSON3_WORKAROUNDS))
uasm_i_sync(p, 0);
UASM_i_LA(p, ptr, (unsigned long)tlb_do_page_fault_0);
uasm_i_jr(p, ptr);
if (mode == refill_scratch) {
if (scratch_reg >= 0)
UASM_i_MFC0(p, 1, c0_kscratch(), scratch_reg);
else
UASM_i_LW(p, 1, scratchpad_offset(0), 0);
} else {
uasm_i_nop(p);
}
}
}
#else /* !CONFIG_64BIT */
/*
* TMP and PTR are scratch.
* TMP will be clobbered, PTR will hold the pgd entry.
*/
void build_get_pgde32(u32 **p, unsigned int tmp, unsigned int ptr)
{
if (pgd_reg != -1) {
/* pgd is in pgd_reg */
uasm_i_mfc0(p, ptr, c0_kscratch(), pgd_reg);
uasm_i_mfc0(p, tmp, C0_BADVADDR); /* get faulting address */
} else {
long pgdc = (long)pgd_current;
/* 32 bit SMP has smp_processor_id() stored in CONTEXT. */
#ifdef CONFIG_SMP
uasm_i_mfc0(p, ptr, SMP_CPUID_REG);
UASM_i_LA_mostly(p, tmp, pgdc);
uasm_i_srl(p, ptr, ptr, SMP_CPUID_PTRSHIFT);
uasm_i_addu(p, ptr, tmp, ptr);
#else
UASM_i_LA_mostly(p, ptr, pgdc);
#endif
uasm_i_mfc0(p, tmp, C0_BADVADDR); /* get faulting address */
uasm_i_lw(p, ptr, uasm_rel_lo(pgdc), ptr);
}
uasm_i_srl(p, tmp, tmp, PGDIR_SHIFT); /* get pgd only bits */
uasm_i_sll(p, tmp, tmp, PGD_T_LOG2);
uasm_i_addu(p, ptr, ptr, tmp); /* add in pgd offset */
}
EXPORT_SYMBOL_GPL(build_get_pgde32);
#endif /* !CONFIG_64BIT */
static void build_adjust_context(u32 **p, unsigned int ctx)
{
unsigned int shift = 4 - (PTE_T_LOG2 + 1) + PAGE_SHIFT - 12;
unsigned int mask = (PTRS_PER_PTE / 2 - 1) << (PTE_T_LOG2 + 1);
if (shift)
UASM_i_SRL(p, ctx, ctx, shift);
uasm_i_andi(p, ctx, ctx, mask);
}
void build_get_ptep(u32 **p, unsigned int tmp, unsigned int ptr)
{
/*
* Bug workaround for the Nevada. It seems as if under certain
* circumstances the move from cp0_context might produce a
* bogus result when the mfc0 instruction and its consumer are
* in a different cacheline or a load instruction, probably any
* memory reference, is between them.
*/
switch (current_cpu_type()) {
case CPU_NEVADA:
UASM_i_LW(p, ptr, 0, ptr);
GET_CONTEXT(p, tmp); /* get context reg */
break;
default:
GET_CONTEXT(p, tmp); /* get context reg */
UASM_i_LW(p, ptr, 0, ptr);
break;
}
build_adjust_context(p, tmp);
UASM_i_ADDU(p, ptr, ptr, tmp); /* add in offset */
}
EXPORT_SYMBOL_GPL(build_get_ptep);
void build_update_entries(u32 **p, unsigned int tmp, unsigned int ptep)
{
int pte_off_even = 0;
int pte_off_odd = sizeof(pte_t);
#if defined(CONFIG_CPU_MIPS32) && defined(CONFIG_PHYS_ADDR_T_64BIT)
/* The low 32 bits of EntryLo is stored in pte_high */
pte_off_even += offsetof(pte_t, pte_high);
pte_off_odd += offsetof(pte_t, pte_high);
#endif
if (IS_ENABLED(CONFIG_XPA)) {
uasm_i_lw(p, tmp, pte_off_even, ptep); /* even pte */
UASM_i_ROTR(p, tmp, tmp, ilog2(_PAGE_GLOBAL));
UASM_i_MTC0(p, tmp, C0_ENTRYLO0);
if (cpu_has_xpa && !mips_xpa_disabled) {
uasm_i_lw(p, tmp, 0, ptep);
uasm_i_ext(p, tmp, tmp, 0, 24);
uasm_i_mthc0(p, tmp, C0_ENTRYLO0);
}
uasm_i_lw(p, tmp, pte_off_odd, ptep); /* odd pte */
UASM_i_ROTR(p, tmp, tmp, ilog2(_PAGE_GLOBAL));
UASM_i_MTC0(p, tmp, C0_ENTRYLO1);
if (cpu_has_xpa && !mips_xpa_disabled) {
uasm_i_lw(p, tmp, sizeof(pte_t), ptep);
uasm_i_ext(p, tmp, tmp, 0, 24);
uasm_i_mthc0(p, tmp, C0_ENTRYLO1);
}
return;
}
UASM_i_LW(p, tmp, pte_off_even, ptep); /* get even pte */
UASM_i_LW(p, ptep, pte_off_odd, ptep); /* get odd pte */
if (r45k_bvahwbug())
build_tlb_probe_entry(p);
build_convert_pte_to_entrylo(p, tmp);
if (r4k_250MHZhwbug())
UASM_i_MTC0(p, 0, C0_ENTRYLO0);
UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */
build_convert_pte_to_entrylo(p, ptep);
if (r45k_bvahwbug())
uasm_i_mfc0(p, tmp, C0_INDEX);
if (r4k_250MHZhwbug())
UASM_i_MTC0(p, 0, C0_ENTRYLO1);
UASM_i_MTC0(p, ptep, C0_ENTRYLO1); /* load it */
}
EXPORT_SYMBOL_GPL(build_update_entries);
struct mips_huge_tlb_info {
int huge_pte;
int restore_scratch;
bool need_reload_pte;
};
static struct mips_huge_tlb_info
build_fast_tlb_refill_handler (u32 **p, struct uasm_label **l,
struct uasm_reloc **r, unsigned int tmp,
unsigned int ptr, int c0_scratch_reg)
{
struct mips_huge_tlb_info rv;
unsigned int even, odd;
int vmalloc_branch_delay_filled = 0;
const int scratch = 1; /* Our extra working register */
rv.huge_pte = scratch;
rv.restore_scratch = 0;
rv.need_reload_pte = false;
if (check_for_high_segbits) {
UASM_i_MFC0(p, tmp, C0_BADVADDR);
if (pgd_reg != -1)
UASM_i_MFC0(p, ptr, c0_kscratch(), pgd_reg);
else
UASM_i_MFC0(p, ptr, C0_CONTEXT);
if (c0_scratch_reg >= 0)
UASM_i_MTC0(p, scratch, c0_kscratch(), c0_scratch_reg);
else
UASM_i_SW(p, scratch, scratchpad_offset(0), 0);
uasm_i_dsrl_safe(p, scratch, tmp,
PGDIR_SHIFT + PGD_TABLE_ORDER + PAGE_SHIFT - 3);
uasm_il_bnez(p, r, scratch, label_vmalloc);
if (pgd_reg == -1) {
vmalloc_branch_delay_filled = 1;
/* Clear lower 23 bits of context. */
uasm_i_dins(p, ptr, 0, 0, 23);
}
} else {
if (pgd_reg != -1)
UASM_i_MFC0(p, ptr, c0_kscratch(), pgd_reg);
else
UASM_i_MFC0(p, ptr, C0_CONTEXT);
UASM_i_MFC0(p, tmp, C0_BADVADDR);
if (c0_scratch_reg >= 0)
UASM_i_MTC0(p, scratch, c0_kscratch(), c0_scratch_reg);
else
UASM_i_SW(p, scratch, scratchpad_offset(0), 0);
if (pgd_reg == -1)
/* Clear lower 23 bits of context. */
uasm_i_dins(p, ptr, 0, 0, 23);
uasm_il_bltz(p, r, tmp, label_vmalloc);
}
if (pgd_reg == -1) {
vmalloc_branch_delay_filled = 1;
/* insert bit[63:59] of CAC_BASE into bit[11:6] of ptr */
uasm_i_ori(p, ptr, ptr, ((u64)(CAC_BASE) >> 53));
uasm_i_drotr(p, ptr, ptr, 11);
}
#ifdef __PAGETABLE_PMD_FOLDED
#define LOC_PTEP scratch
#else
#define LOC_PTEP ptr
#endif
if (!vmalloc_branch_delay_filled)
/* get pgd offset in bytes */
uasm_i_dsrl_safe(p, scratch, tmp, PGDIR_SHIFT - 3);
uasm_l_vmalloc_done(l, *p);
/*
* tmp ptr
* fall-through case = badvaddr *pgd_current
* vmalloc case = badvaddr swapper_pg_dir
*/
if (vmalloc_branch_delay_filled)
/* get pgd offset in bytes */
uasm_i_dsrl_safe(p, scratch, tmp, PGDIR_SHIFT - 3);
#ifdef __PAGETABLE_PMD_FOLDED
GET_CONTEXT(p, tmp); /* get context reg */
#endif
uasm_i_andi(p, scratch, scratch, (PTRS_PER_PGD - 1) << 3);
if (use_lwx_insns()) {
UASM_i_LWX(p, LOC_PTEP, scratch, ptr);
} else {
uasm_i_daddu(p, ptr, ptr, scratch); /* add in pgd offset */
uasm_i_ld(p, LOC_PTEP, 0, ptr); /* get pmd pointer */
}
#ifndef __PAGETABLE_PUD_FOLDED
/* get pud offset in bytes */
uasm_i_dsrl_safe(p, scratch, tmp, PUD_SHIFT - 3);
uasm_i_andi(p, scratch, scratch, (PTRS_PER_PUD - 1) << 3);
if (use_lwx_insns()) {
UASM_i_LWX(p, ptr, scratch, ptr);
} else {
uasm_i_daddu(p, ptr, ptr, scratch); /* add in pmd offset */
UASM_i_LW(p, ptr, 0, ptr);
}
/* ptr contains a pointer to PMD entry */
/* tmp contains the address */
#endif
#ifndef __PAGETABLE_PMD_FOLDED
/* get pmd offset in bytes */
uasm_i_dsrl_safe(p, scratch, tmp, PMD_SHIFT - 3);
uasm_i_andi(p, scratch, scratch, (PTRS_PER_PMD - 1) << 3);
GET_CONTEXT(p, tmp); /* get context reg */
if (use_lwx_insns()) {
UASM_i_LWX(p, scratch, scratch, ptr);
} else {
uasm_i_daddu(p, ptr, ptr, scratch); /* add in pmd offset */
UASM_i_LW(p, scratch, 0, ptr);
}
#endif
/* Adjust the context during the load latency. */
build_adjust_context(p, tmp);
#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
uasm_il_bbit1(p, r, scratch, ilog2(_PAGE_HUGE), label_tlb_huge_update);
/*
* The in the LWX case we don't want to do the load in the
* delay slot. It cannot issue in the same cycle and may be
* speculative and unneeded.
*/
if (use_lwx_insns())
uasm_i_nop(p);
#endif /* CONFIG_MIPS_HUGE_TLB_SUPPORT */
/* build_update_entries */
if (use_lwx_insns()) {
even = ptr;
odd = tmp;
UASM_i_LWX(p, even, scratch, tmp);
UASM_i_ADDIU(p, tmp, tmp, sizeof(pte_t));
UASM_i_LWX(p, odd, scratch, tmp);
} else {
UASM_i_ADDU(p, ptr, scratch, tmp); /* add in offset */
even = tmp;
odd = ptr;
UASM_i_LW(p, even, 0, ptr); /* get even pte */
UASM_i_LW(p, odd, sizeof(pte_t), ptr); /* get odd pte */
}
if (cpu_has_rixi) {
uasm_i_drotr(p, even, even, ilog2(_PAGE_GLOBAL));
UASM_i_MTC0(p, even, C0_ENTRYLO0); /* load it */
uasm_i_drotr(p, odd, odd, ilog2(_PAGE_GLOBAL));
} else {
uasm_i_dsrl_safe(p, even, even, ilog2(_PAGE_GLOBAL));
UASM_i_MTC0(p, even, C0_ENTRYLO0); /* load it */
uasm_i_dsrl_safe(p, odd, odd, ilog2(_PAGE_GLOBAL));
}
UASM_i_MTC0(p, odd, C0_ENTRYLO1); /* load it */
if (c0_scratch_reg >= 0) {
uasm_i_ehb(p);
UASM_i_MFC0(p, scratch, c0_kscratch(), c0_scratch_reg);
build_tlb_write_entry(p, l, r, tlb_random);
uasm_l_leave(l, *p);
rv.restore_scratch = 1;
} else if (PAGE_SHIFT == 14 || PAGE_SHIFT == 13) {
build_tlb_write_entry(p, l, r, tlb_random);
uasm_l_leave(l, *p);
UASM_i_LW(p, scratch, scratchpad_offset(0), 0);
} else {
UASM_i_LW(p, scratch, scratchpad_offset(0), 0);
build_tlb_write_entry(p, l, r, tlb_random);
uasm_l_leave(l, *p);
rv.restore_scratch = 1;
}
uasm_i_eret(p); /* return from trap */
return rv;
}
/*
* For a 64-bit kernel, we are using the 64-bit XTLB refill exception
* because EXL == 0. If we wrap, we can also use the 32 instruction
* slots before the XTLB refill exception handler which belong to the
* unused TLB refill exception.
*/
#define MIPS64_REFILL_INSNS 32
static void build_r4000_tlb_refill_handler(void)
{
u32 *p = tlb_handler;
struct uasm_label *l = labels;
struct uasm_reloc *r = relocs;
u32 *f;
unsigned int final_len;
struct mips_huge_tlb_info htlb_info __maybe_unused;
enum vmalloc64_mode vmalloc_mode __maybe_unused;
memset(tlb_handler, 0, sizeof(tlb_handler));
memset(labels, 0, sizeof(labels));
memset(relocs, 0, sizeof(relocs));
memset(final_handler, 0, sizeof(final_handler));
if (IS_ENABLED(CONFIG_64BIT) && (scratch_reg >= 0 || scratchpad_available()) && use_bbit_insns()) {
htlb_info = build_fast_tlb_refill_handler(&p, &l, &r, K0, K1,
scratch_reg);
vmalloc_mode = refill_scratch;
} else {
htlb_info.huge_pte = K0;
htlb_info.restore_scratch = 0;
htlb_info.need_reload_pte = true;
vmalloc_mode = refill_noscratch;
/*
* create the plain linear handler
*/
if (bcm1250_m3_war()) {
unsigned int segbits = 44;
uasm_i_dmfc0(&p, K0, C0_BADVADDR);
uasm_i_dmfc0(&p, K1, C0_ENTRYHI);
uasm_i_xor(&p, K0, K0, K1);
uasm_i_dsrl_safe(&p, K1, K0, 62);
uasm_i_dsrl_safe(&p, K0, K0, 12 + 1);
uasm_i_dsll_safe(&p, K0, K0, 64 + 12 + 1 - segbits);
uasm_i_or(&p, K0, K0, K1);
uasm_il_bnez(&p, &r, K0, label_leave);
/* No need for uasm_i_nop */
}
#ifdef CONFIG_64BIT
build_get_pmde64(&p, &l, &r, K0, K1); /* get pmd in K1 */
#else
build_get_pgde32(&p, K0, K1); /* get pgd in K1 */
#endif
#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
build_is_huge_pte(&p, &r, K0, K1, label_tlb_huge_update);
#endif
build_get_ptep(&p, K0, K1);
build_update_entries(&p, K0, K1);
build_tlb_write_entry(&p, &l, &r, tlb_random);
uasm_l_leave(&l, p);
uasm_i_eret(&p); /* return from trap */
}
#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
uasm_l_tlb_huge_update(&l, p);
if (htlb_info.need_reload_pte)
UASM_i_LW(&p, htlb_info.huge_pte, 0, K1);
build_huge_update_entries(&p, htlb_info.huge_pte, K1);
build_huge_tlb_write_entry(&p, &l, &r, K0, tlb_random,
htlb_info.restore_scratch);
#endif
#ifdef CONFIG_64BIT
build_get_pgd_vmalloc64(&p, &l, &r, K0, K1, vmalloc_mode);
#endif
/*
* Overflow check: For the 64bit handler, we need at least one
* free instruction slot for the wrap-around branch. In worst
* case, if the intended insertion point is a delay slot, we
* need three, with the second nop'ed and the third being
* unused.
*/
switch (boot_cpu_type()) {
default:
if (sizeof(long) == 4) {
fallthrough;
case CPU_LOONGSON2EF:
/* Loongson2 ebase is different than r4k, we have more space */
if ((p - tlb_handler) > 64)
panic("TLB refill handler space exceeded");
/*
* Now fold the handler in the TLB refill handler space.
*/
f = final_handler;
/* Simplest case, just copy the handler. */
uasm_copy_handler(relocs, labels, tlb_handler, p, f);
final_len = p - tlb_handler;
break;
} else {
if (((p - tlb_handler) > (MIPS64_REFILL_INSNS * 2) - 1)
|| (((p - tlb_handler) > (MIPS64_REFILL_INSNS * 2) - 3)
&& uasm_insn_has_bdelay(relocs,
tlb_handler + MIPS64_REFILL_INSNS - 3)))
panic("TLB refill handler space exceeded");
/*
* Now fold the handler in the TLB refill handler space.
*/
f = final_handler + MIPS64_REFILL_INSNS;
if ((p - tlb_handler) <= MIPS64_REFILL_INSNS) {
/* Just copy the handler. */
uasm_copy_handler(relocs, labels, tlb_handler, p, f);
final_len = p - tlb_handler;
} else {
#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
const enum label_id ls = label_tlb_huge_update;
#else
const enum label_id ls = label_vmalloc;
#endif
u32 *split;
int ov = 0;
int i;
for (i = 0; i < ARRAY_SIZE(labels) && labels[i].lab != ls; i++)
;
BUG_ON(i == ARRAY_SIZE(labels));
split = labels[i].addr;
/*
* See if we have overflown one way or the other.
*/
if (split > tlb_handler + MIPS64_REFILL_INSNS ||
split < p - MIPS64_REFILL_INSNS)
ov = 1;
if (ov) {
/*
* Split two instructions before the end. One
* for the branch and one for the instruction
* in the delay slot.
*/
split = tlb_handler + MIPS64_REFILL_INSNS - 2;
/*
* If the branch would fall in a delay slot,
* we must back up an additional instruction
* so that it is no longer in a delay slot.
*/
if (uasm_insn_has_bdelay(relocs, split - 1))
split--;
}
/* Copy first part of the handler. */
uasm_copy_handler(relocs, labels, tlb_handler, split, f);
f += split - tlb_handler;
if (ov) {
/* Insert branch. */
uasm_l_split(&l, final_handler);
uasm_il_b(&f, &r, label_split);
if (uasm_insn_has_bdelay(relocs, split))
uasm_i_nop(&f);
else {
uasm_copy_handler(relocs, labels,
split, split + 1, f);
uasm_move_labels(labels, f, f + 1, -1);
f++;
split++;
}
}
/* Copy the rest of the handler. */
uasm_copy_handler(relocs, labels, split, p, final_handler);
final_len = (f - (final_handler + MIPS64_REFILL_INSNS)) +
(p - split);
}
}
break;
}
uasm_resolve_relocs(relocs, labels);
pr_debug("Wrote TLB refill handler (%u instructions).\n",
final_len);
memcpy((void *)ebase, final_handler, 0x100);
local_flush_icache_range(ebase, ebase + 0x100);
dump_handler("r4000_tlb_refill", (u32 *)ebase, (u32 *)(ebase + 0x100));
}
static void setup_pw(void)
{
unsigned int pwctl;
unsigned long pgd_i, pgd_w;
#ifndef __PAGETABLE_PMD_FOLDED
unsigned long pmd_i, pmd_w;
#endif
unsigned long pt_i, pt_w;
unsigned long pte_i, pte_w;
#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
unsigned long psn;
psn = ilog2(_PAGE_HUGE); /* bit used to indicate huge page */
#endif
pgd_i = PGDIR_SHIFT; /* 1st level PGD */
#ifndef __PAGETABLE_PMD_FOLDED
pgd_w = PGDIR_SHIFT - PMD_SHIFT + PGD_TABLE_ORDER;
pmd_i = PMD_SHIFT; /* 2nd level PMD */
pmd_w = PMD_SHIFT - PAGE_SHIFT;
#else
pgd_w = PGDIR_SHIFT - PAGE_SHIFT + PGD_TABLE_ORDER;
#endif
pt_i = PAGE_SHIFT; /* 3rd level PTE */
pt_w = PAGE_SHIFT - 3;
pte_i = ilog2(_PAGE_GLOBAL);
pte_w = 0;
pwctl = 1 << 30; /* Set PWDirExt */
#ifndef __PAGETABLE_PMD_FOLDED
write_c0_pwfield(pgd_i << 24 | pmd_i << 12 | pt_i << 6 | pte_i);
write_c0_pwsize(1 << 30 | pgd_w << 24 | pmd_w << 12 | pt_w << 6 | pte_w);
#else
write_c0_pwfield(pgd_i << 24 | pt_i << 6 | pte_i);
write_c0_pwsize(1 << 30 | pgd_w << 24 | pt_w << 6 | pte_w);
#endif
#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
pwctl |= (1 << 6 | psn);
#endif
write_c0_pwctl(pwctl);
write_c0_kpgd((long)swapper_pg_dir);
kscratch_used_mask |= (1 << 7); /* KScratch6 is used for KPGD */
}
static void build_loongson3_tlb_refill_handler(void)
{
u32 *p = tlb_handler;
struct uasm_label *l = labels;
struct uasm_reloc *r = relocs;
memset(labels, 0, sizeof(labels));
memset(relocs, 0, sizeof(relocs));
memset(tlb_handler, 0, sizeof(tlb_handler));
if (check_for_high_segbits) {
uasm_i_dmfc0(&p, K0, C0_BADVADDR);
uasm_i_dsrl_safe(&p, K1, K0, PGDIR_SHIFT + PGD_TABLE_ORDER + PAGE_SHIFT - 3);
uasm_il_beqz(&p, &r, K1, label_vmalloc);
uasm_i_nop(&p);
uasm_il_bgez(&p, &r, K0, label_large_segbits_fault);
uasm_i_nop(&p);
uasm_l_vmalloc(&l, p);
}
uasm_i_dmfc0(&p, K1, C0_PGD);
uasm_i_lddir(&p, K0, K1, 3); /* global page dir */
#ifndef __PAGETABLE_PMD_FOLDED
uasm_i_lddir(&p, K1, K0, 1); /* middle page dir */
#endif
uasm_i_ldpte(&p, K1, 0); /* even */
uasm_i_ldpte(&p, K1, 1); /* odd */
uasm_i_tlbwr(&p);
/* restore page mask */
if (PM_DEFAULT_MASK >> 16) {
uasm_i_lui(&p, K0, PM_DEFAULT_MASK >> 16);
uasm_i_ori(&p, K0, K0, PM_DEFAULT_MASK & 0xffff);
uasm_i_mtc0(&p, K0, C0_PAGEMASK);
} else if (PM_DEFAULT_MASK) {
uasm_i_ori(&p, K0, 0, PM_DEFAULT_MASK);
uasm_i_mtc0(&p, K0, C0_PAGEMASK);
} else {
uasm_i_mtc0(&p, 0, C0_PAGEMASK);
}
uasm_i_eret(&p);
if (check_for_high_segbits) {
uasm_l_large_segbits_fault(&l, p);
UASM_i_LA(&p, K1, (unsigned long)tlb_do_page_fault_0);
uasm_i_jr(&p, K1);
uasm_i_nop(&p);
}
uasm_resolve_relocs(relocs, labels);
memcpy((void *)(ebase + 0x80), tlb_handler, 0x80);
local_flush_icache_range(ebase + 0x80, ebase + 0x100);
dump_handler("loongson3_tlb_refill",
(u32 *)(ebase + 0x80), (u32 *)(ebase + 0x100));
}
static void build_setup_pgd(void)
{
const int a0 = 4;
const int __maybe_unused a1 = 5;
const int __maybe_unused a2 = 6;
u32 *p = (u32 *)msk_isa16_mode((ulong)tlbmiss_handler_setup_pgd);
#ifndef CONFIG_MIPS_PGD_C0_CONTEXT
long pgdc = (long)pgd_current;
#endif
memset(p, 0, tlbmiss_handler_setup_pgd_end - (char *)p);
memset(labels, 0, sizeof(labels));
memset(relocs, 0, sizeof(relocs));
pgd_reg = allocate_kscratch();
#ifdef CONFIG_MIPS_PGD_C0_CONTEXT
if (pgd_reg == -1) {
struct uasm_label *l = labels;
struct uasm_reloc *r = relocs;
/* PGD << 11 in c0_Context */
/*
* If it is a ckseg0 address, convert to a physical
* address. Shifting right by 29 and adding 4 will
* result in zero for these addresses.
*
*/
UASM_i_SRA(&p, a1, a0, 29);
UASM_i_ADDIU(&p, a1, a1, 4);
uasm_il_bnez(&p, &r, a1, label_tlbl_goaround1);
uasm_i_nop(&p);
uasm_i_dinsm(&p, a0, 0, 29, 64 - 29);
uasm_l_tlbl_goaround1(&l, p);
UASM_i_SLL(&p, a0, a0, 11);
UASM_i_MTC0(&p, a0, C0_CONTEXT);
uasm_i_jr(&p, 31);
uasm_i_ehb(&p);
} else {
/* PGD in c0_KScratch */
if (cpu_has_ldpte)
UASM_i_MTC0(&p, a0, C0_PWBASE);
else
UASM_i_MTC0(&p, a0, c0_kscratch(), pgd_reg);
uasm_i_jr(&p, 31);
uasm_i_ehb(&p);
}
#else
#ifdef CONFIG_SMP
/* Save PGD to pgd_current[smp_processor_id()] */
UASM_i_CPUID_MFC0(&p, a1, SMP_CPUID_REG);
UASM_i_SRL_SAFE(&p, a1, a1, SMP_CPUID_PTRSHIFT);
UASM_i_LA_mostly(&p, a2, pgdc);
UASM_i_ADDU(&p, a2, a2, a1);
UASM_i_SW(&p, a0, uasm_rel_lo(pgdc), a2);
#else
UASM_i_LA_mostly(&p, a2, pgdc);
UASM_i_SW(&p, a0, uasm_rel_lo(pgdc), a2);
#endif /* SMP */
/* if pgd_reg is allocated, save PGD also to scratch register */
if (pgd_reg != -1) {
UASM_i_MTC0(&p, a0, c0_kscratch(), pgd_reg);
uasm_i_jr(&p, 31);
uasm_i_ehb(&p);
} else {
uasm_i_jr(&p, 31);
uasm_i_nop(&p);
}
#endif
if (p >= (u32 *)tlbmiss_handler_setup_pgd_end)
panic("tlbmiss_handler_setup_pgd space exceeded");
uasm_resolve_relocs(relocs, labels);
pr_debug("Wrote tlbmiss_handler_setup_pgd (%u instructions).\n",
(unsigned int)(p - (u32 *)tlbmiss_handler_setup_pgd));
dump_handler("tlbmiss_handler", tlbmiss_handler_setup_pgd,
tlbmiss_handler_setup_pgd_end);
}
static void
iPTE_LW(u32 **p, unsigned int pte, unsigned int ptr)
{
#ifdef CONFIG_SMP
if (IS_ENABLED(CONFIG_CPU_LOONGSON3_WORKAROUNDS))
uasm_i_sync(p, 0);
# ifdef CONFIG_PHYS_ADDR_T_64BIT
if (cpu_has_64bits)
uasm_i_lld(p, pte, 0, ptr);
else
# endif
UASM_i_LL(p, pte, 0, ptr);
#else
# ifdef CONFIG_PHYS_ADDR_T_64BIT
if (cpu_has_64bits)
uasm_i_ld(p, pte, 0, ptr);
else
# endif
UASM_i_LW(p, pte, 0, ptr);
#endif
}
static void
iPTE_SW(u32 **p, struct uasm_reloc **r, unsigned int pte, unsigned int ptr,
unsigned int mode, unsigned int scratch)
{
unsigned int hwmode = mode & (_PAGE_VALID | _PAGE_DIRTY);
unsigned int swmode = mode & ~hwmode;
if (IS_ENABLED(CONFIG_XPA) && !cpu_has_64bits) {
uasm_i_lui(p, scratch, swmode >> 16);
uasm_i_or(p, pte, pte, scratch);
BUG_ON(swmode & 0xffff);
} else {
uasm_i_ori(p, pte, pte, mode);
}
#ifdef CONFIG_SMP
# ifdef CONFIG_PHYS_ADDR_T_64BIT
if (cpu_has_64bits)
uasm_i_scd(p, pte, 0, ptr);
else
# endif
UASM_i_SC(p, pte, 0, ptr);
if (r10000_llsc_war())
uasm_il_beqzl(p, r, pte, label_smp_pgtable_change);
else
uasm_il_beqz(p, r, pte, label_smp_pgtable_change);
# ifdef CONFIG_PHYS_ADDR_T_64BIT
if (!cpu_has_64bits) {
/* no uasm_i_nop needed */
uasm_i_ll(p, pte, sizeof(pte_t) / 2, ptr);
uasm_i_ori(p, pte, pte, hwmode);
BUG_ON(hwmode & ~0xffff);
uasm_i_sc(p, pte, sizeof(pte_t) / 2, ptr);
uasm_il_beqz(p, r, pte, label_smp_pgtable_change);
/* no uasm_i_nop needed */
uasm_i_lw(p, pte, 0, ptr);
} else
uasm_i_nop(p);
# else
uasm_i_nop(p);
# endif
#else
# ifdef CONFIG_PHYS_ADDR_T_64BIT
if (cpu_has_64bits)
uasm_i_sd(p, pte, 0, ptr);
else
# endif
UASM_i_SW(p, pte, 0, ptr);
# ifdef CONFIG_PHYS_ADDR_T_64BIT
if (!cpu_has_64bits) {
uasm_i_lw(p, pte, sizeof(pte_t) / 2, ptr);
uasm_i_ori(p, pte, pte, hwmode);
BUG_ON(hwmode & ~0xffff);
uasm_i_sw(p, pte, sizeof(pte_t) / 2, ptr);
uasm_i_lw(p, pte, 0, ptr);
}
# endif
#endif
}
/*
* Check if PTE is present, if not then jump to LABEL. PTR points to
* the page table where this PTE is located, PTE will be re-loaded
* with it's original value.
*/
static void
build_pte_present(u32 **p, struct uasm_reloc **r,
int pte, int ptr, int scratch, enum label_id lid)
{
int t = scratch >= 0 ? scratch : pte;
int cur = pte;
if (cpu_has_rixi) {
if (use_bbit_insns()) {
uasm_il_bbit0(p, r, pte, ilog2(_PAGE_PRESENT), lid);
uasm_i_nop(p);
} else {
if (_PAGE_PRESENT_SHIFT) {
uasm_i_srl(p, t, cur, _PAGE_PRESENT_SHIFT);
cur = t;
}
uasm_i_andi(p, t, cur, 1);
uasm_il_beqz(p, r, t, lid);
if (pte == t)
/* You lose the SMP race :-(*/
iPTE_LW(p, pte, ptr);
}
} else {
if (_PAGE_PRESENT_SHIFT) {
uasm_i_srl(p, t, cur, _PAGE_PRESENT_SHIFT);
cur = t;
}
uasm_i_andi(p, t, cur,
(_PAGE_PRESENT | _PAGE_NO_READ) >> _PAGE_PRESENT_SHIFT);
uasm_i_xori(p, t, t, _PAGE_PRESENT >> _PAGE_PRESENT_SHIFT);
uasm_il_bnez(p, r, t, lid);
if (pte == t)
/* You lose the SMP race :-(*/
iPTE_LW(p, pte, ptr);
}
}
/* Make PTE valid, store result in PTR. */
static void
build_make_valid(u32 **p, struct uasm_reloc **r, unsigned int pte,
unsigned int ptr, unsigned int scratch)
{
unsigned int mode = _PAGE_VALID | _PAGE_ACCESSED;
iPTE_SW(p, r, pte, ptr, mode, scratch);
}
/*
* Check if PTE can be written to, if not branch to LABEL. Regardless
* restore PTE with value from PTR when done.
*/
static void
build_pte_writable(u32 **p, struct uasm_reloc **r,
unsigned int pte, unsigned int ptr, int scratch,
enum label_id lid)
{
int t = scratch >= 0 ? scratch : pte;
int cur = pte;
if (_PAGE_PRESENT_SHIFT) {
uasm_i_srl(p, t, cur, _PAGE_PRESENT_SHIFT);
cur = t;
}
uasm_i_andi(p, t, cur,
(_PAGE_PRESENT | _PAGE_WRITE) >> _PAGE_PRESENT_SHIFT);
uasm_i_xori(p, t, t,
(_PAGE_PRESENT | _PAGE_WRITE) >> _PAGE_PRESENT_SHIFT);
uasm_il_bnez(p, r, t, lid);
if (pte == t)
/* You lose the SMP race :-(*/
iPTE_LW(p, pte, ptr);
else
uasm_i_nop(p);
}
/* Make PTE writable, update software status bits as well, then store
* at PTR.
*/
static void
build_make_write(u32 **p, struct uasm_reloc **r, unsigned int pte,
unsigned int ptr, unsigned int scratch)
{
unsigned int mode = (_PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID
| _PAGE_DIRTY);
iPTE_SW(p, r, pte, ptr, mode, scratch);
}
/*
* Check if PTE can be modified, if not branch to LABEL. Regardless
* restore PTE with value from PTR when done.
*/
static void
build_pte_modifiable(u32 **p, struct uasm_reloc **r,
unsigned int pte, unsigned int ptr, int scratch,
enum label_id lid)
{
if (use_bbit_insns()) {
uasm_il_bbit0(p, r, pte, ilog2(_PAGE_WRITE), lid);
uasm_i_nop(p);
} else {
int t = scratch >= 0 ? scratch : pte;
uasm_i_srl(p, t, pte, _PAGE_WRITE_SHIFT);
uasm_i_andi(p, t, t, 1);
uasm_il_beqz(p, r, t, lid);
if (pte == t)
/* You lose the SMP race :-(*/
iPTE_LW(p, pte, ptr);
}
}
#ifndef CONFIG_MIPS_PGD_C0_CONTEXT
/*
* R3000 style TLB load/store/modify handlers.
*/
/*
* This places the pte into ENTRYLO0 and writes it with tlbwi.
* Then it returns.
*/
static void
build_r3000_pte_reload_tlbwi(u32 **p, unsigned int pte, unsigned int tmp)
{
uasm_i_mtc0(p, pte, C0_ENTRYLO0); /* cp0 delay */
uasm_i_mfc0(p, tmp, C0_EPC); /* cp0 delay */
uasm_i_tlbwi(p);
uasm_i_jr(p, tmp);
uasm_i_rfe(p); /* branch delay */
}
/*
* This places the pte into ENTRYLO0 and writes it with tlbwi
* or tlbwr as appropriate. This is because the index register
* may have the probe fail bit set as a result of a trap on a
* kseg2 access, i.e. without refill. Then it returns.
*/
static void
build_r3000_tlb_reload_write(u32 **p, struct uasm_label **l,
struct uasm_reloc **r, unsigned int pte,
unsigned int tmp)
{
uasm_i_mfc0(p, tmp, C0_INDEX);
uasm_i_mtc0(p, pte, C0_ENTRYLO0); /* cp0 delay */
uasm_il_bltz(p, r, tmp, label_r3000_write_probe_fail); /* cp0 delay */
uasm_i_mfc0(p, tmp, C0_EPC); /* branch delay */
uasm_i_tlbwi(p); /* cp0 delay */
uasm_i_jr(p, tmp);
uasm_i_rfe(p); /* branch delay */
uasm_l_r3000_write_probe_fail(l, *p);
uasm_i_tlbwr(p); /* cp0 delay */
uasm_i_jr(p, tmp);
uasm_i_rfe(p); /* branch delay */
}
static void
build_r3000_tlbchange_handler_head(u32 **p, unsigned int pte,
unsigned int ptr)
{
long pgdc = (long)pgd_current;
uasm_i_mfc0(p, pte, C0_BADVADDR);
uasm_i_lui(p, ptr, uasm_rel_hi(pgdc)); /* cp0 delay */
uasm_i_lw(p, ptr, uasm_rel_lo(pgdc), ptr);
uasm_i_srl(p, pte, pte, 22); /* load delay */
uasm_i_sll(p, pte, pte, 2);
uasm_i_addu(p, ptr, ptr, pte);
uasm_i_mfc0(p, pte, C0_CONTEXT);
uasm_i_lw(p, ptr, 0, ptr); /* cp0 delay */
uasm_i_andi(p, pte, pte, 0xffc); /* load delay */
uasm_i_addu(p, ptr, ptr, pte);
uasm_i_lw(p, pte, 0, ptr);
uasm_i_tlbp(p); /* load delay */
}
static void build_r3000_tlb_load_handler(void)
{
u32 *p = (u32 *)handle_tlbl;
struct uasm_label *l = labels;
struct uasm_reloc *r = relocs;
memset(p, 0, handle_tlbl_end - (char *)p);
memset(labels, 0, sizeof(labels));
memset(relocs, 0, sizeof(relocs));
build_r3000_tlbchange_handler_head(&p, K0, K1);
build_pte_present(&p, &r, K0, K1, -1, label_nopage_tlbl);
uasm_i_nop(&p); /* load delay */
build_make_valid(&p, &r, K0, K1, -1);
build_r3000_tlb_reload_write(&p, &l, &r, K0, K1);
uasm_l_nopage_tlbl(&l, p);
uasm_i_j(&p, (unsigned long)tlb_do_page_fault_0 & 0x0fffffff);
uasm_i_nop(&p);
if (p >= (u32 *)handle_tlbl_end)
panic("TLB load handler fastpath space exceeded");
uasm_resolve_relocs(relocs, labels);
pr_debug("Wrote TLB load handler fastpath (%u instructions).\n",
(unsigned int)(p - (u32 *)handle_tlbl));
dump_handler("r3000_tlb_load", handle_tlbl, handle_tlbl_end);
}
static void build_r3000_tlb_store_handler(void)
{
u32 *p = (u32 *)handle_tlbs;
struct uasm_label *l = labels;
struct uasm_reloc *r = relocs;
memset(p, 0, handle_tlbs_end - (char *)p);
memset(labels, 0, sizeof(labels));
memset(relocs, 0, sizeof(relocs));
build_r3000_tlbchange_handler_head(&p, K0, K1);
build_pte_writable(&p, &r, K0, K1, -1, label_nopage_tlbs);
uasm_i_nop(&p); /* load delay */
build_make_write(&p, &r, K0, K1, -1);
build_r3000_tlb_reload_write(&p, &l, &r, K0, K1);
uasm_l_nopage_tlbs(&l, p);
uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff);
uasm_i_nop(&p);
if (p >= (u32 *)handle_tlbs_end)
panic("TLB store handler fastpath space exceeded");
uasm_resolve_relocs(relocs, labels);
pr_debug("Wrote TLB store handler fastpath (%u instructions).\n",
(unsigned int)(p - (u32 *)handle_tlbs));
dump_handler("r3000_tlb_store", handle_tlbs, handle_tlbs_end);
}
static void build_r3000_tlb_modify_handler(void)
{
u32 *p = (u32 *)handle_tlbm;
struct uasm_label *l = labels;
struct uasm_reloc *r = relocs;
memset(p, 0, handle_tlbm_end - (char *)p);
memset(labels, 0, sizeof(labels));
memset(relocs, 0, sizeof(relocs));
build_r3000_tlbchange_handler_head(&p, K0, K1);
build_pte_modifiable(&p, &r, K0, K1, -1, label_nopage_tlbm);
uasm_i_nop(&p); /* load delay */
build_make_write(&p, &r, K0, K1, -1);
build_r3000_pte_reload_tlbwi(&p, K0, K1);
uasm_l_nopage_tlbm(&l, p);
uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff);
uasm_i_nop(&p);
if (p >= (u32 *)handle_tlbm_end)
panic("TLB modify handler fastpath space exceeded");
uasm_resolve_relocs(relocs, labels);
pr_debug("Wrote TLB modify handler fastpath (%u instructions).\n",
(unsigned int)(p - (u32 *)handle_tlbm));
dump_handler("r3000_tlb_modify", handle_tlbm, handle_tlbm_end);
}
#endif /* CONFIG_MIPS_PGD_C0_CONTEXT */
static bool cpu_has_tlbex_tlbp_race(void)
{
/*
* When a Hardware Table Walker is running it can replace TLB entries
* at any time, leading to a race between it & the CPU.
*/
if (cpu_has_htw)
return true;
/*
* If the CPU shares FTLB RAM with its siblings then our entry may be
* replaced at any time by a sibling performing a write to the FTLB.
*/
if (cpu_has_shared_ftlb_ram)
return true;
/* In all other cases there ought to be no race condition to handle */
return false;
}
/*
* R4000 style TLB load/store/modify handlers.
*/
static struct work_registers
build_r4000_tlbchange_handler_head(u32 **p, struct uasm_label **l,
struct uasm_reloc **r)
{
struct work_registers wr = build_get_work_registers(p);
#ifdef CONFIG_64BIT
build_get_pmde64(p, l, r, wr.r1, wr.r2); /* get pmd in ptr */
#else
build_get_pgde32(p, wr.r1, wr.r2); /* get pgd in ptr */
#endif
#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
/*
* For huge tlb entries, pmd doesn't contain an address but
* instead contains the tlb pte. Check the PAGE_HUGE bit and
* see if we need to jump to huge tlb processing.
*/
build_is_huge_pte(p, r, wr.r1, wr.r2, label_tlb_huge_update);
#endif
UASM_i_MFC0(p, wr.r1, C0_BADVADDR);
UASM_i_LW(p, wr.r2, 0, wr.r2);
UASM_i_SRL(p, wr.r1, wr.r1, PAGE_SHIFT - PTE_T_LOG2);
uasm_i_andi(p, wr.r1, wr.r1, (PTRS_PER_PTE - 1) << PTE_T_LOG2);
UASM_i_ADDU(p, wr.r2, wr.r2, wr.r1);
#ifdef CONFIG_SMP
uasm_l_smp_pgtable_change(l, *p);
#endif
iPTE_LW(p, wr.r1, wr.r2); /* get even pte */
if (!m4kc_tlbp_war()) {
build_tlb_probe_entry(p);
if (cpu_has_tlbex_tlbp_race()) {
/* race condition happens, leaving */
uasm_i_ehb(p);
uasm_i_mfc0(p, wr.r3, C0_INDEX);
uasm_il_bltz(p, r, wr.r3, label_leave);
uasm_i_nop(p);
}
}
return wr;
}
static void
build_r4000_tlbchange_handler_tail(u32 **p, struct uasm_label **l,
struct uasm_reloc **r, unsigned int tmp,
unsigned int ptr)
{
uasm_i_ori(p, ptr, ptr, sizeof(pte_t));
uasm_i_xori(p, ptr, ptr, sizeof(pte_t));
build_update_entries(p, tmp, ptr);
build_tlb_write_entry(p, l, r, tlb_indexed);
uasm_l_leave(l, *p);
build_restore_work_registers(p);
uasm_i_eret(p); /* return from trap */
#ifdef CONFIG_64BIT
build_get_pgd_vmalloc64(p, l, r, tmp, ptr, not_refill);
#endif
}
static void build_r4000_tlb_load_handler(void)
{
u32 *p = (u32 *)msk_isa16_mode((ulong)handle_tlbl);
struct uasm_label *l = labels;
struct uasm_reloc *r = relocs;
struct work_registers wr;
memset(p, 0, handle_tlbl_end - (char *)p);
memset(labels, 0, sizeof(labels));
memset(relocs, 0, sizeof(relocs));
if (bcm1250_m3_war()) {
unsigned int segbits = 44;
uasm_i_dmfc0(&p, K0, C0_BADVADDR);
uasm_i_dmfc0(&p, K1, C0_ENTRYHI);
uasm_i_xor(&p, K0, K0, K1);
uasm_i_dsrl_safe(&p, K1, K0, 62);
uasm_i_dsrl_safe(&p, K0, K0, 12 + 1);
uasm_i_dsll_safe(&p, K0, K0, 64 + 12 + 1 - segbits);
uasm_i_or(&p, K0, K0, K1);
uasm_il_bnez(&p, &r, K0, label_leave);
/* No need for uasm_i_nop */
}
wr = build_r4000_tlbchange_handler_head(&p, &l, &r);
build_pte_present(&p, &r, wr.r1, wr.r2, wr.r3, label_nopage_tlbl);
if (m4kc_tlbp_war())
build_tlb_probe_entry(&p);
if (cpu_has_rixi && !cpu_has_rixiex) {
/*
* If the page is not _PAGE_VALID, RI or XI could not
* have triggered it. Skip the expensive test..
*/
if (use_bbit_insns()) {
uasm_il_bbit0(&p, &r, wr.r1, ilog2(_PAGE_VALID),
label_tlbl_goaround1);
} else {
uasm_i_andi(&p, wr.r3, wr.r1, _PAGE_VALID);
uasm_il_beqz(&p, &r, wr.r3, label_tlbl_goaround1);
}
uasm_i_nop(&p);
/*
* Warn if something may race with us & replace the TLB entry
* before we read it here. Everything with such races should
* also have dedicated RiXi exception handlers, so this
* shouldn't be hit.
*/
WARN(cpu_has_tlbex_tlbp_race(), "Unhandled race in RiXi path");
uasm_i_tlbr(&p);
if (cpu_has_mips_r2_exec_hazard)
uasm_i_ehb(&p);
/* Examine entrylo 0 or 1 based on ptr. */
if (use_bbit_insns()) {
uasm_i_bbit0(&p, wr.r2, ilog2(sizeof(pte_t)), 8);
} else {
uasm_i_andi(&p, wr.r3, wr.r2, sizeof(pte_t));
uasm_i_beqz(&p, wr.r3, 8);
}
/* load it in the delay slot*/
UASM_i_MFC0(&p, wr.r3, C0_ENTRYLO0);
/* load it if ptr is odd */
UASM_i_MFC0(&p, wr.r3, C0_ENTRYLO1);
/*
* If the entryLo (now in wr.r3) is valid (bit 1), RI or
* XI must have triggered it.
*/
if (use_bbit_insns()) {
uasm_il_bbit1(&p, &r, wr.r3, 1, label_nopage_tlbl);
uasm_i_nop(&p);
uasm_l_tlbl_goaround1(&l, p);
} else {
uasm_i_andi(&p, wr.r3, wr.r3, 2);
uasm_il_bnez(&p, &r, wr.r3, label_nopage_tlbl);
uasm_i_nop(&p);
}
uasm_l_tlbl_goaround1(&l, p);
}
build_make_valid(&p, &r, wr.r1, wr.r2, wr.r3);
build_r4000_tlbchange_handler_tail(&p, &l, &r, wr.r1, wr.r2);
#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
/*
* This is the entry point when build_r4000_tlbchange_handler_head
* spots a huge page.
*/
uasm_l_tlb_huge_update(&l, p);
iPTE_LW(&p, wr.r1, wr.r2);
build_pte_present(&p, &r, wr.r1, wr.r2, wr.r3, label_nopage_tlbl);
build_tlb_probe_entry(&p);
if (cpu_has_rixi && !cpu_has_rixiex) {
/*
* If the page is not _PAGE_VALID, RI or XI could not
* have triggered it. Skip the expensive test..
*/
if (use_bbit_insns()) {
uasm_il_bbit0(&p, &r, wr.r1, ilog2(_PAGE_VALID),
label_tlbl_goaround2);
} else {
uasm_i_andi(&p, wr.r3, wr.r1, _PAGE_VALID);
uasm_il_beqz(&p, &r, wr.r3, label_tlbl_goaround2);
}
uasm_i_nop(&p);
/*
* Warn if something may race with us & replace the TLB entry
* before we read it here. Everything with such races should
* also have dedicated RiXi exception handlers, so this
* shouldn't be hit.
*/
WARN(cpu_has_tlbex_tlbp_race(), "Unhandled race in RiXi path");
uasm_i_tlbr(&p);
if (cpu_has_mips_r2_exec_hazard)
uasm_i_ehb(&p);
/* Examine entrylo 0 or 1 based on ptr. */
if (use_bbit_insns()) {
uasm_i_bbit0(&p, wr.r2, ilog2(sizeof(pte_t)), 8);
} else {
uasm_i_andi(&p, wr.r3, wr.r2, sizeof(pte_t));
uasm_i_beqz(&p, wr.r3, 8);
}
/* load it in the delay slot*/
UASM_i_MFC0(&p, wr.r3, C0_ENTRYLO0);
/* load it if ptr is odd */
UASM_i_MFC0(&p, wr.r3, C0_ENTRYLO1);
/*
* If the entryLo (now in wr.r3) is valid (bit 1), RI or
* XI must have triggered it.
*/
if (use_bbit_insns()) {
uasm_il_bbit0(&p, &r, wr.r3, 1, label_tlbl_goaround2);
} else {
uasm_i_andi(&p, wr.r3, wr.r3, 2);
uasm_il_beqz(&p, &r, wr.r3, label_tlbl_goaround2);
}
if (PM_DEFAULT_MASK == 0)
uasm_i_nop(&p);
/*
* We clobbered C0_PAGEMASK, restore it. On the other branch
* it is restored in build_huge_tlb_write_entry.
*/
build_restore_pagemask(&p, &r, wr.r3, label_nopage_tlbl, 0);
uasm_l_tlbl_goaround2(&l, p);
}
uasm_i_ori(&p, wr.r1, wr.r1, (_PAGE_ACCESSED | _PAGE_VALID));
build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2, 1);
#endif
uasm_l_nopage_tlbl(&l, p);
if (IS_ENABLED(CONFIG_CPU_LOONGSON3_WORKAROUNDS))
uasm_i_sync(&p, 0);
build_restore_work_registers(&p);
#ifdef CONFIG_CPU_MICROMIPS
if ((unsigned long)tlb_do_page_fault_0 & 1) {
uasm_i_lui(&p, K0, uasm_rel_hi((long)tlb_do_page_fault_0));
uasm_i_addiu(&p, K0, K0, uasm_rel_lo((long)tlb_do_page_fault_0));
uasm_i_jr(&p, K0);
} else
#endif
uasm_i_j(&p, (unsigned long)tlb_do_page_fault_0 & 0x0fffffff);
uasm_i_nop(&p);
if (p >= (u32 *)handle_tlbl_end)
panic("TLB load handler fastpath space exceeded");
uasm_resolve_relocs(relocs, labels);
pr_debug("Wrote TLB load handler fastpath (%u instructions).\n",
(unsigned int)(p - (u32 *)handle_tlbl));
dump_handler("r4000_tlb_load", handle_tlbl, handle_tlbl_end);
}
static void build_r4000_tlb_store_handler(void)
{
u32 *p = (u32 *)msk_isa16_mode((ulong)handle_tlbs);
struct uasm_label *l = labels;
struct uasm_reloc *r = relocs;
struct work_registers wr;
memset(p, 0, handle_tlbs_end - (char *)p);
memset(labels, 0, sizeof(labels));
memset(relocs, 0, sizeof(relocs));
wr = build_r4000_tlbchange_handler_head(&p, &l, &r);
build_pte_writable(&p, &r, wr.r1, wr.r2, wr.r3, label_nopage_tlbs);
if (m4kc_tlbp_war())
build_tlb_probe_entry(&p);
build_make_write(&p, &r, wr.r1, wr.r2, wr.r3);
build_r4000_tlbchange_handler_tail(&p, &l, &r, wr.r1, wr.r2);
#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
/*
* This is the entry point when
* build_r4000_tlbchange_handler_head spots a huge page.
*/
uasm_l_tlb_huge_update(&l, p);
iPTE_LW(&p, wr.r1, wr.r2);
build_pte_writable(&p, &r, wr.r1, wr.r2, wr.r3, label_nopage_tlbs);
build_tlb_probe_entry(&p);
uasm_i_ori(&p, wr.r1, wr.r1,
_PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID | _PAGE_DIRTY);
build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2, 1);
#endif
uasm_l_nopage_tlbs(&l, p);
if (IS_ENABLED(CONFIG_CPU_LOONGSON3_WORKAROUNDS))
uasm_i_sync(&p, 0);
build_restore_work_registers(&p);
#ifdef CONFIG_CPU_MICROMIPS
if ((unsigned long)tlb_do_page_fault_1 & 1) {
uasm_i_lui(&p, K0, uasm_rel_hi((long)tlb_do_page_fault_1));
uasm_i_addiu(&p, K0, K0, uasm_rel_lo((long)tlb_do_page_fault_1));
uasm_i_jr(&p, K0);
} else
#endif
uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff);
uasm_i_nop(&p);
if (p >= (u32 *)handle_tlbs_end)
panic("TLB store handler fastpath space exceeded");
uasm_resolve_relocs(relocs, labels);
pr_debug("Wrote TLB store handler fastpath (%u instructions).\n",
(unsigned int)(p - (u32 *)handle_tlbs));
dump_handler("r4000_tlb_store", handle_tlbs, handle_tlbs_end);
}
static void build_r4000_tlb_modify_handler(void)
{
u32 *p = (u32 *)msk_isa16_mode((ulong)handle_tlbm);
struct uasm_label *l = labels;
struct uasm_reloc *r = relocs;
struct work_registers wr;
memset(p, 0, handle_tlbm_end - (char *)p);
memset(labels, 0, sizeof(labels));
memset(relocs, 0, sizeof(relocs));
wr = build_r4000_tlbchange_handler_head(&p, &l, &r);
build_pte_modifiable(&p, &r, wr.r1, wr.r2, wr.r3, label_nopage_tlbm);
if (m4kc_tlbp_war())
build_tlb_probe_entry(&p);
/* Present and writable bits set, set accessed and dirty bits. */
build_make_write(&p, &r, wr.r1, wr.r2, wr.r3);
build_r4000_tlbchange_handler_tail(&p, &l, &r, wr.r1, wr.r2);
#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
/*
* This is the entry point when
* build_r4000_tlbchange_handler_head spots a huge page.
*/
uasm_l_tlb_huge_update(&l, p);
iPTE_LW(&p, wr.r1, wr.r2);
build_pte_modifiable(&p, &r, wr.r1, wr.r2, wr.r3, label_nopage_tlbm);
build_tlb_probe_entry(&p);
uasm_i_ori(&p, wr.r1, wr.r1,
_PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID | _PAGE_DIRTY);
build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2, 0);
#endif
uasm_l_nopage_tlbm(&l, p);
if (IS_ENABLED(CONFIG_CPU_LOONGSON3_WORKAROUNDS))
uasm_i_sync(&p, 0);
build_restore_work_registers(&p);
#ifdef CONFIG_CPU_MICROMIPS
if ((unsigned long)tlb_do_page_fault_1 & 1) {
uasm_i_lui(&p, K0, uasm_rel_hi((long)tlb_do_page_fault_1));
uasm_i_addiu(&p, K0, K0, uasm_rel_lo((long)tlb_do_page_fault_1));
uasm_i_jr(&p, K0);
} else
#endif
uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff);
uasm_i_nop(&p);
if (p >= (u32 *)handle_tlbm_end)
panic("TLB modify handler fastpath space exceeded");
uasm_resolve_relocs(relocs, labels);
pr_debug("Wrote TLB modify handler fastpath (%u instructions).\n",
(unsigned int)(p - (u32 *)handle_tlbm));
dump_handler("r4000_tlb_modify", handle_tlbm, handle_tlbm_end);
}
static void flush_tlb_handlers(void)
{
local_flush_icache_range((unsigned long)handle_tlbl,
(unsigned long)handle_tlbl_end);
local_flush_icache_range((unsigned long)handle_tlbs,
(unsigned long)handle_tlbs_end);
local_flush_icache_range((unsigned long)handle_tlbm,
(unsigned long)handle_tlbm_end);
local_flush_icache_range((unsigned long)tlbmiss_handler_setup_pgd,
(unsigned long)tlbmiss_handler_setup_pgd_end);
}
static void print_htw_config(void)
{
unsigned long config;
unsigned int pwctl;
const int field = 2 * sizeof(unsigned long);
config = read_c0_pwfield();
pr_debug("PWField (0x%0*lx): GDI: 0x%02lx UDI: 0x%02lx MDI: 0x%02lx PTI: 0x%02lx PTEI: 0x%02lx\n",
field, config,
(config & MIPS_PWFIELD_GDI_MASK) >> MIPS_PWFIELD_GDI_SHIFT,
(config & MIPS_PWFIELD_UDI_MASK) >> MIPS_PWFIELD_UDI_SHIFT,
(config & MIPS_PWFIELD_MDI_MASK) >> MIPS_PWFIELD_MDI_SHIFT,
(config & MIPS_PWFIELD_PTI_MASK) >> MIPS_PWFIELD_PTI_SHIFT,
(config & MIPS_PWFIELD_PTEI_MASK) >> MIPS_PWFIELD_PTEI_SHIFT);
config = read_c0_pwsize();
pr_debug("PWSize (0x%0*lx): PS: 0x%lx GDW: 0x%02lx UDW: 0x%02lx MDW: 0x%02lx PTW: 0x%02lx PTEW: 0x%02lx\n",
field, config,
(config & MIPS_PWSIZE_PS_MASK) >> MIPS_PWSIZE_PS_SHIFT,
(config & MIPS_PWSIZE_GDW_MASK) >> MIPS_PWSIZE_GDW_SHIFT,
(config & MIPS_PWSIZE_UDW_MASK) >> MIPS_PWSIZE_UDW_SHIFT,
(config & MIPS_PWSIZE_MDW_MASK) >> MIPS_PWSIZE_MDW_SHIFT,
(config & MIPS_PWSIZE_PTW_MASK) >> MIPS_PWSIZE_PTW_SHIFT,
(config & MIPS_PWSIZE_PTEW_MASK) >> MIPS_PWSIZE_PTEW_SHIFT);
pwctl = read_c0_pwctl();
pr_debug("PWCtl (0x%x): PWEn: 0x%x XK: 0x%x XS: 0x%x XU: 0x%x DPH: 0x%x HugePg: 0x%x Psn: 0x%x\n",
pwctl,
(pwctl & MIPS_PWCTL_PWEN_MASK) >> MIPS_PWCTL_PWEN_SHIFT,
(pwctl & MIPS_PWCTL_XK_MASK) >> MIPS_PWCTL_XK_SHIFT,
(pwctl & MIPS_PWCTL_XS_MASK) >> MIPS_PWCTL_XS_SHIFT,
(pwctl & MIPS_PWCTL_XU_MASK) >> MIPS_PWCTL_XU_SHIFT,
(pwctl & MIPS_PWCTL_DPH_MASK) >> MIPS_PWCTL_DPH_SHIFT,
(pwctl & MIPS_PWCTL_HUGEPG_MASK) >> MIPS_PWCTL_HUGEPG_SHIFT,
(pwctl & MIPS_PWCTL_PSN_MASK) >> MIPS_PWCTL_PSN_SHIFT);
}
static void config_htw_params(void)
{
unsigned long pwfield, pwsize, ptei;
unsigned int config;
/*
* We are using 2-level page tables, so we only need to
* setup GDW and PTW appropriately. UDW and MDW will remain 0.
* The default value of GDI/UDI/MDI/PTI is 0xc. It is illegal to
* write values less than 0xc in these fields because the entire
* write will be dropped. As a result of which, we must preserve
* the original reset values and overwrite only what we really want.
*/
pwfield = read_c0_pwfield();
/* re-initialize the GDI field */
pwfield &= ~MIPS_PWFIELD_GDI_MASK;
pwfield |= PGDIR_SHIFT << MIPS_PWFIELD_GDI_SHIFT;
/* re-initialize the PTI field including the even/odd bit */
pwfield &= ~MIPS_PWFIELD_PTI_MASK;
pwfield |= PAGE_SHIFT << MIPS_PWFIELD_PTI_SHIFT;
if (CONFIG_PGTABLE_LEVELS >= 3) {
pwfield &= ~MIPS_PWFIELD_MDI_MASK;
pwfield |= PMD_SHIFT << MIPS_PWFIELD_MDI_SHIFT;
}
/* Set the PTEI right shift */
ptei = _PAGE_GLOBAL_SHIFT << MIPS_PWFIELD_PTEI_SHIFT;
pwfield |= ptei;
write_c0_pwfield(pwfield);
/* Check whether the PTEI value is supported */
back_to_back_c0_hazard();
pwfield = read_c0_pwfield();
if (((pwfield & MIPS_PWFIELD_PTEI_MASK) << MIPS_PWFIELD_PTEI_SHIFT)
!= ptei) {
pr_warn("Unsupported PTEI field value: 0x%lx. HTW will not be enabled",
ptei);
/*
* Drop option to avoid HTW being enabled via another path
* (eg htw_reset())
*/
current_cpu_data.options &= ~MIPS_CPU_HTW;
return;
}
pwsize = ilog2(PTRS_PER_PGD) << MIPS_PWSIZE_GDW_SHIFT;
pwsize |= ilog2(PTRS_PER_PTE) << MIPS_PWSIZE_PTW_SHIFT;
if (CONFIG_PGTABLE_LEVELS >= 3)
pwsize |= ilog2(PTRS_PER_PMD) << MIPS_PWSIZE_MDW_SHIFT;
/* Set pointer size to size of directory pointers */
if (IS_ENABLED(CONFIG_64BIT))
pwsize |= MIPS_PWSIZE_PS_MASK;
/* PTEs may be multiple pointers long (e.g. with XPA) */
pwsize |= ((PTE_T_LOG2 - PGD_T_LOG2) << MIPS_PWSIZE_PTEW_SHIFT)
& MIPS_PWSIZE_PTEW_MASK;
write_c0_pwsize(pwsize);
/* Make sure everything is set before we enable the HTW */
back_to_back_c0_hazard();
/*
* Enable HTW (and only for XUSeg on 64-bit), and disable the rest of
* the pwctl fields.
*/
config = 1 << MIPS_PWCTL_PWEN_SHIFT;
if (IS_ENABLED(CONFIG_64BIT))
config |= MIPS_PWCTL_XU_MASK;
write_c0_pwctl(config);
pr_info("Hardware Page Table Walker enabled\n");
print_htw_config();
}
static void config_xpa_params(void)
{
#ifdef CONFIG_XPA
unsigned int pagegrain;
if (mips_xpa_disabled) {
pr_info("Extended Physical Addressing (XPA) disabled\n");
return;
}
pagegrain = read_c0_pagegrain();
write_c0_pagegrain(pagegrain | PG_ELPA);
back_to_back_c0_hazard();
pagegrain = read_c0_pagegrain();
if (pagegrain & PG_ELPA)
pr_info("Extended Physical Addressing (XPA) enabled\n");
else
panic("Extended Physical Addressing (XPA) disabled");
#endif
}
static void check_pabits(void)
{
unsigned long entry;
unsigned pabits, fillbits;
if (!cpu_has_rixi || _PAGE_NO_EXEC == 0) {
/*
* We'll only be making use of the fact that we can rotate bits
* into the fill if the CPU supports RIXI, so don't bother
* probing this for CPUs which don't.
*/
return;
}
write_c0_entrylo0(~0ul);
back_to_back_c0_hazard();
entry = read_c0_entrylo0();
/* clear all non-PFN bits */
entry &= ~((1 << MIPS_ENTRYLO_PFN_SHIFT) - 1);
entry &= ~(MIPS_ENTRYLO_RI | MIPS_ENTRYLO_XI);
/* find a lower bound on PABITS, and upper bound on fill bits */
pabits = fls_long(entry) + 6;
fillbits = max_t(int, (int)BITS_PER_LONG - pabits, 0);
/* minus the RI & XI bits */
fillbits -= min_t(unsigned, fillbits, 2);
if (fillbits >= ilog2(_PAGE_NO_EXEC))
fill_includes_sw_bits = true;
pr_debug("Entry* registers contain %u fill bits\n", fillbits);
}
void build_tlb_refill_handler(void)
{
/*
* The refill handler is generated per-CPU, multi-node systems
* may have local storage for it. The other handlers are only
* needed once.
*/
static int run_once = 0;
if (IS_ENABLED(CONFIG_XPA) && !cpu_has_rixi)
panic("Kernels supporting XPA currently require CPUs with RIXI");
output_pgtable_bits_defines();
check_pabits();
#ifdef CONFIG_64BIT
check_for_high_segbits = current_cpu_data.vmbits > (PGDIR_SHIFT + PGD_TABLE_ORDER + PAGE_SHIFT - 3);
#endif
if (cpu_has_3kex) {
#ifndef CONFIG_MIPS_PGD_C0_CONTEXT
if (!run_once) {
build_setup_pgd();
build_r3000_tlb_refill_handler();
build_r3000_tlb_load_handler();
build_r3000_tlb_store_handler();
build_r3000_tlb_modify_handler();
flush_tlb_handlers();
run_once++;
}
#else
panic("No R3000 TLB refill handler");
#endif
return;
}
if (cpu_has_ldpte)
setup_pw();
if (!run_once) {
scratch_reg = allocate_kscratch();
build_setup_pgd();
build_r4000_tlb_load_handler();
build_r4000_tlb_store_handler();
build_r4000_tlb_modify_handler();
if (cpu_has_ldpte)
build_loongson3_tlb_refill_handler();
else
build_r4000_tlb_refill_handler();
flush_tlb_handlers();
run_once++;
}
if (cpu_has_xpa)
config_xpa_params();
if (cpu_has_htw)
config_htw_params();
}
| linux-master | arch/mips/mm/tlbex.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2006 Chris Dearman ([email protected]),
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/mm.h>
#include <asm/cpu-type.h>
#include <asm/mipsregs.h>
#include <asm/bcache.h>
#include <asm/cacheops.h>
#include <asm/page.h>
#include <asm/mmu_context.h>
#include <asm/r4kcache.h>
#include <asm/mips-cps.h>
#include <asm/bootinfo.h>
/*
* MIPS32/MIPS64 L2 cache handling
*/
/*
* Writeback and invalidate the secondary cache before DMA.
*/
static void mips_sc_wback_inv(unsigned long addr, unsigned long size)
{
blast_scache_range(addr, addr + size);
}
/*
* Invalidate the secondary cache before DMA.
*/
static void mips_sc_inv(unsigned long addr, unsigned long size)
{
unsigned long lsize = cpu_scache_line_size();
unsigned long almask = ~(lsize - 1);
cache_op(Hit_Writeback_Inv_SD, addr & almask);
cache_op(Hit_Writeback_Inv_SD, (addr + size - 1) & almask);
blast_inv_scache_range(addr, addr + size);
}
static void mips_sc_enable(void)
{
/* L2 cache is permanently enabled */
}
static void mips_sc_disable(void)
{
/* L2 cache is permanently enabled */
}
static void mips_sc_prefetch_enable(void)
{
unsigned long pftctl;
if (mips_cm_revision() < CM_REV_CM2_5)
return;
/*
* If there is one or more L2 prefetch unit present then enable
* prefetching for both code & data, for all ports.
*/
pftctl = read_gcr_l2_pft_control();
if (pftctl & CM_GCR_L2_PFT_CONTROL_NPFT) {
pftctl &= ~CM_GCR_L2_PFT_CONTROL_PAGEMASK;
pftctl |= PAGE_MASK & CM_GCR_L2_PFT_CONTROL_PAGEMASK;
pftctl |= CM_GCR_L2_PFT_CONTROL_PFTEN;
write_gcr_l2_pft_control(pftctl);
set_gcr_l2_pft_control_b(CM_GCR_L2_PFT_CONTROL_B_PORTID |
CM_GCR_L2_PFT_CONTROL_B_CEN);
}
}
static void mips_sc_prefetch_disable(void)
{
if (mips_cm_revision() < CM_REV_CM2_5)
return;
clear_gcr_l2_pft_control(CM_GCR_L2_PFT_CONTROL_PFTEN);
clear_gcr_l2_pft_control_b(CM_GCR_L2_PFT_CONTROL_B_PORTID |
CM_GCR_L2_PFT_CONTROL_B_CEN);
}
static bool mips_sc_prefetch_is_enabled(void)
{
unsigned long pftctl;
if (mips_cm_revision() < CM_REV_CM2_5)
return false;
pftctl = read_gcr_l2_pft_control();
if (!(pftctl & CM_GCR_L2_PFT_CONTROL_NPFT))
return false;
return !!(pftctl & CM_GCR_L2_PFT_CONTROL_PFTEN);
}
static struct bcache_ops mips_sc_ops = {
.bc_enable = mips_sc_enable,
.bc_disable = mips_sc_disable,
.bc_wback_inv = mips_sc_wback_inv,
.bc_inv = mips_sc_inv,
.bc_prefetch_enable = mips_sc_prefetch_enable,
.bc_prefetch_disable = mips_sc_prefetch_disable,
.bc_prefetch_is_enabled = mips_sc_prefetch_is_enabled,
};
/*
* Check if the L2 cache controller is activated on a particular platform.
* MTI's L2 controller and the L2 cache controller of Broadcom's BMIPS
* cores both use c0_config2's bit 12 as "L2 Bypass" bit, that is the
* cache being disabled. However there is no guarantee for this to be
* true on all platforms. In an act of stupidity the spec defined bits
* 12..15 as implementation defined so below function will eventually have
* to be replaced by a platform specific probe.
*/
static inline int mips_sc_is_activated(struct cpuinfo_mips *c)
{
unsigned int config2 = read_c0_config2();
unsigned int tmp;
/* Check the bypass bit (L2B) */
switch (current_cpu_type()) {
case CPU_34K:
case CPU_74K:
case CPU_1004K:
case CPU_1074K:
case CPU_INTERAPTIV:
case CPU_PROAPTIV:
case CPU_P5600:
case CPU_BMIPS5000:
case CPU_QEMU_GENERIC:
case CPU_P6600:
if (config2 & (1 << 12))
return 0;
}
tmp = (config2 >> 4) & 0x0f;
if (0 < tmp && tmp <= 7)
c->scache.linesz = 2 << tmp;
else
return 0;
return 1;
}
static int mips_sc_probe_cm3(void)
{
struct cpuinfo_mips *c = ¤t_cpu_data;
unsigned long cfg = read_gcr_l2_config();
unsigned long sets, line_sz, assoc;
if (cfg & CM_GCR_L2_CONFIG_BYPASS)
return 0;
sets = cfg & CM_GCR_L2_CONFIG_SET_SIZE;
sets >>= __ffs(CM_GCR_L2_CONFIG_SET_SIZE);
if (sets)
c->scache.sets = 64 << sets;
line_sz = cfg & CM_GCR_L2_CONFIG_LINE_SIZE;
line_sz >>= __ffs(CM_GCR_L2_CONFIG_LINE_SIZE);
if (line_sz)
c->scache.linesz = 2 << line_sz;
assoc = cfg & CM_GCR_L2_CONFIG_ASSOC;
assoc >>= __ffs(CM_GCR_L2_CONFIG_ASSOC);
c->scache.ways = assoc + 1;
c->scache.waysize = c->scache.sets * c->scache.linesz;
c->scache.waybit = __ffs(c->scache.waysize);
if (c->scache.linesz) {
c->scache.flags &= ~MIPS_CACHE_NOT_PRESENT;
c->options |= MIPS_CPU_INCLUSIVE_CACHES;
return 1;
}
return 0;
}
static inline int mips_sc_probe(void)
{
struct cpuinfo_mips *c = ¤t_cpu_data;
unsigned int config1, config2;
unsigned int tmp;
/* Mark as not present until probe completed */
c->scache.flags |= MIPS_CACHE_NOT_PRESENT;
if (mips_cm_revision() >= CM_REV_CM3)
return mips_sc_probe_cm3();
/* Ignore anything but MIPSxx processors */
if (!(c->isa_level & (MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M64R1 |
MIPS_CPU_ISA_M32R2 | MIPS_CPU_ISA_M64R2 |
MIPS_CPU_ISA_M32R5 | MIPS_CPU_ISA_M64R5 |
MIPS_CPU_ISA_M32R6 | MIPS_CPU_ISA_M64R6)))
return 0;
/* Does this MIPS32/MIPS64 CPU have a config2 register? */
config1 = read_c0_config1();
if (!(config1 & MIPS_CONF_M))
return 0;
config2 = read_c0_config2();
if (!mips_sc_is_activated(c))
return 0;
tmp = (config2 >> 8) & 0x0f;
if (tmp <= 7)
c->scache.sets = 64 << tmp;
else
return 0;
tmp = (config2 >> 0) & 0x0f;
if (tmp <= 7)
c->scache.ways = tmp + 1;
else
return 0;
if (current_cpu_type() == CPU_XBURST) {
switch (mips_machtype) {
/*
* According to config2 it would be 5-ways, but that is
* contradicted by all documentation.
*/
case MACH_INGENIC_JZ4770:
case MACH_INGENIC_JZ4775:
c->scache.ways = 4;
break;
/*
* According to config2 it would be 5-ways and 512-sets,
* but that is contradicted by all documentation.
*/
case MACH_INGENIC_X1000:
case MACH_INGENIC_X1000E:
c->scache.sets = 256;
c->scache.ways = 4;
break;
}
}
c->scache.waysize = c->scache.sets * c->scache.linesz;
c->scache.waybit = __ffs(c->scache.waysize);
c->scache.flags &= ~MIPS_CACHE_NOT_PRESENT;
return 1;
}
int mips_sc_init(void)
{
int found = mips_sc_probe();
if (found) {
mips_sc_enable();
mips_sc_prefetch_enable();
bcops = &mips_sc_ops;
}
return found;
}
| linux-master | arch/mips/mm/sc-mips.c |
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* A small micro-assembler. It is intentionally kept simple, does only
* support a subset of instructions, and does not try to hide pipeline
* effects like branch delay slots.
*
* Copyright (C) 2004, 2005, 2006, 2008 Thiemo Seufer
* Copyright (C) 2005, 2007 Maciej W. Rozycki
* Copyright (C) 2006 Ralf Baechle ([email protected])
* Copyright (C) 2012, 2013 MIPS Technologies, Inc. All rights reserved.
*/
#include <linux/kernel.h>
#include <linux/types.h>
#include <asm/inst.h>
#include <asm/elf.h>
#include <asm/bugs.h>
#include <asm/uasm.h>
#define RS_MASK 0x1f
#define RS_SH 21
#define RT_MASK 0x1f
#define RT_SH 16
#define SCIMM_MASK 0xfffff
#define SCIMM_SH 6
/* This macro sets the non-variable bits of an instruction. */
#define M(a, b, c, d, e, f) \
((a) << OP_SH \
| (b) << RS_SH \
| (c) << RT_SH \
| (d) << RD_SH \
| (e) << RE_SH \
| (f) << FUNC_SH)
/* This macro sets the non-variable bits of an R6 instruction. */
#define M6(a, b, c, d, e) \
((a) << OP_SH \
| (b) << RS_SH \
| (c) << RT_SH \
| (d) << SIMM9_SH \
| (e) << FUNC_SH)
#include "uasm.c"
static const struct insn insn_table[insn_invalid] = {
[insn_addiu] = {M(addiu_op, 0, 0, 0, 0, 0), RS | RT | SIMM},
[insn_addu] = {M(spec_op, 0, 0, 0, 0, addu_op), RS | RT | RD},
[insn_and] = {M(spec_op, 0, 0, 0, 0, and_op), RS | RT | RD},
[insn_andi] = {M(andi_op, 0, 0, 0, 0, 0), RS | RT | UIMM},
[insn_bbit0] = {M(lwc2_op, 0, 0, 0, 0, 0), RS | RT | BIMM},
[insn_bbit1] = {M(swc2_op, 0, 0, 0, 0, 0), RS | RT | BIMM},
[insn_beq] = {M(beq_op, 0, 0, 0, 0, 0), RS | RT | BIMM},
[insn_beql] = {M(beql_op, 0, 0, 0, 0, 0), RS | RT | BIMM},
[insn_bgez] = {M(bcond_op, 0, bgez_op, 0, 0, 0), RS | BIMM},
[insn_bgezl] = {M(bcond_op, 0, bgezl_op, 0, 0, 0), RS | BIMM},
[insn_bgtz] = {M(bgtz_op, 0, 0, 0, 0, 0), RS | BIMM},
[insn_blez] = {M(blez_op, 0, 0, 0, 0, 0), RS | BIMM},
[insn_bltz] = {M(bcond_op, 0, bltz_op, 0, 0, 0), RS | BIMM},
[insn_bltzl] = {M(bcond_op, 0, bltzl_op, 0, 0, 0), RS | BIMM},
[insn_bne] = {M(bne_op, 0, 0, 0, 0, 0), RS | RT | BIMM},
[insn_break] = {M(spec_op, 0, 0, 0, 0, break_op), SCIMM},
#ifndef CONFIG_CPU_MIPSR6
[insn_cache] = {M(cache_op, 0, 0, 0, 0, 0), RS | RT | SIMM},
#else
[insn_cache] = {M6(spec3_op, 0, 0, 0, cache6_op), RS | RT | SIMM9},
#endif
[insn_cfc1] = {M(cop1_op, cfc_op, 0, 0, 0, 0), RT | RD},
[insn_cfcmsa] = {M(msa_op, 0, msa_cfc_op, 0, 0, msa_elm_op), RD | RE},
[insn_ctc1] = {M(cop1_op, ctc_op, 0, 0, 0, 0), RT | RD},
[insn_ctcmsa] = {M(msa_op, 0, msa_ctc_op, 0, 0, msa_elm_op), RD | RE},
[insn_daddiu] = {M(daddiu_op, 0, 0, 0, 0, 0), RS | RT | SIMM},
[insn_daddu] = {M(spec_op, 0, 0, 0, 0, daddu_op), RS | RT | RD},
[insn_ddivu] = {M(spec_op, 0, 0, 0, 0, ddivu_op), RS | RT},
[insn_ddivu_r6] = {M(spec_op, 0, 0, 0, ddivu_ddivu6_op, ddivu_op),
RS | RT | RD},
[insn_di] = {M(cop0_op, mfmc0_op, 0, 12, 0, 0), RT},
[insn_dins] = {M(spec3_op, 0, 0, 0, 0, dins_op), RS | RT | RD | RE},
[insn_dinsm] = {M(spec3_op, 0, 0, 0, 0, dinsm_op), RS | RT | RD | RE},
[insn_dinsu] = {M(spec3_op, 0, 0, 0, 0, dinsu_op), RS | RT | RD | RE},
[insn_divu] = {M(spec_op, 0, 0, 0, 0, divu_op), RS | RT},
[insn_divu_r6] = {M(spec_op, 0, 0, 0, divu_divu6_op, divu_op),
RS | RT | RD},
[insn_dmfc0] = {M(cop0_op, dmfc_op, 0, 0, 0, 0), RT | RD | SET},
[insn_dmodu] = {M(spec_op, 0, 0, 0, ddivu_dmodu_op, ddivu_op),
RS | RT | RD},
[insn_dmtc0] = {M(cop0_op, dmtc_op, 0, 0, 0, 0), RT | RD | SET},
[insn_dmultu] = {M(spec_op, 0, 0, 0, 0, dmultu_op), RS | RT},
[insn_dmulu] = {M(spec_op, 0, 0, 0, dmultu_dmulu_op, dmultu_op),
RS | RT | RD},
[insn_drotr] = {M(spec_op, 1, 0, 0, 0, dsrl_op), RT | RD | RE},
[insn_drotr32] = {M(spec_op, 1, 0, 0, 0, dsrl32_op), RT | RD | RE},
[insn_dsbh] = {M(spec3_op, 0, 0, 0, dsbh_op, dbshfl_op), RT | RD},
[insn_dshd] = {M(spec3_op, 0, 0, 0, dshd_op, dbshfl_op), RT | RD},
[insn_dsll] = {M(spec_op, 0, 0, 0, 0, dsll_op), RT | RD | RE},
[insn_dsll32] = {M(spec_op, 0, 0, 0, 0, dsll32_op), RT | RD | RE},
[insn_dsllv] = {M(spec_op, 0, 0, 0, 0, dsllv_op), RS | RT | RD},
[insn_dsra] = {M(spec_op, 0, 0, 0, 0, dsra_op), RT | RD | RE},
[insn_dsra32] = {M(spec_op, 0, 0, 0, 0, dsra32_op), RT | RD | RE},
[insn_dsrav] = {M(spec_op, 0, 0, 0, 0, dsrav_op), RS | RT | RD},
[insn_dsrl] = {M(spec_op, 0, 0, 0, 0, dsrl_op), RT | RD | RE},
[insn_dsrl32] = {M(spec_op, 0, 0, 0, 0, dsrl32_op), RT | RD | RE},
[insn_dsrlv] = {M(spec_op, 0, 0, 0, 0, dsrlv_op), RS | RT | RD},
[insn_dsubu] = {M(spec_op, 0, 0, 0, 0, dsubu_op), RS | RT | RD},
[insn_eret] = {M(cop0_op, cop_op, 0, 0, 0, eret_op), 0},
[insn_ext] = {M(spec3_op, 0, 0, 0, 0, ext_op), RS | RT | RD | RE},
[insn_ins] = {M(spec3_op, 0, 0, 0, 0, ins_op), RS | RT | RD | RE},
[insn_j] = {M(j_op, 0, 0, 0, 0, 0), JIMM},
[insn_jal] = {M(jal_op, 0, 0, 0, 0, 0), JIMM},
[insn_jalr] = {M(spec_op, 0, 0, 0, 0, jalr_op), RS | RD},
#ifndef CONFIG_CPU_MIPSR6
[insn_jr] = {M(spec_op, 0, 0, 0, 0, jr_op), RS},
#else
[insn_jr] = {M(spec_op, 0, 0, 0, 0, jalr_op), RS},
#endif
[insn_lb] = {M(lb_op, 0, 0, 0, 0, 0), RS | RT | SIMM},
[insn_lbu] = {M(lbu_op, 0, 0, 0, 0, 0), RS | RT | SIMM},
[insn_ld] = {M(ld_op, 0, 0, 0, 0, 0), RS | RT | SIMM},
[insn_lddir] = {M(lwc2_op, 0, 0, 0, lddir_op, mult_op), RS | RT | RD},
[insn_ldpte] = {M(lwc2_op, 0, 0, 0, ldpte_op, mult_op), RS | RD},
[insn_ldx] = {M(spec3_op, 0, 0, 0, ldx_op, lx_op), RS | RT | RD},
[insn_lh] = {M(lh_op, 0, 0, 0, 0, 0), RS | RT | SIMM},
[insn_lhu] = {M(lhu_op, 0, 0, 0, 0, 0), RS | RT | SIMM},
#ifndef CONFIG_CPU_MIPSR6
[insn_ll] = {M(ll_op, 0, 0, 0, 0, 0), RS | RT | SIMM},
[insn_lld] = {M(lld_op, 0, 0, 0, 0, 0), RS | RT | SIMM},
#else
[insn_ll] = {M6(spec3_op, 0, 0, 0, ll6_op), RS | RT | SIMM9},
[insn_lld] = {M6(spec3_op, 0, 0, 0, lld6_op), RS | RT | SIMM9},
#endif
[insn_lui] = {M(lui_op, 0, 0, 0, 0, 0), RT | SIMM},
[insn_lw] = {M(lw_op, 0, 0, 0, 0, 0), RS | RT | SIMM},
[insn_lwu] = {M(lwu_op, 0, 0, 0, 0, 0), RS | RT | SIMM},
[insn_lwx] = {M(spec3_op, 0, 0, 0, lwx_op, lx_op), RS | RT | RD},
[insn_mfc0] = {M(cop0_op, mfc_op, 0, 0, 0, 0), RT | RD | SET},
[insn_mfhc0] = {M(cop0_op, mfhc0_op, 0, 0, 0, 0), RT | RD | SET},
[insn_mfhi] = {M(spec_op, 0, 0, 0, 0, mfhi_op), RD},
[insn_mflo] = {M(spec_op, 0, 0, 0, 0, mflo_op), RD},
[insn_modu] = {M(spec_op, 0, 0, 0, divu_modu_op, divu_op),
RS | RT | RD},
[insn_movn] = {M(spec_op, 0, 0, 0, 0, movn_op), RS | RT | RD},
[insn_movz] = {M(spec_op, 0, 0, 0, 0, movz_op), RS | RT | RD},
[insn_mtc0] = {M(cop0_op, mtc_op, 0, 0, 0, 0), RT | RD | SET},
[insn_mthc0] = {M(cop0_op, mthc0_op, 0, 0, 0, 0), RT | RD | SET},
[insn_mthi] = {M(spec_op, 0, 0, 0, 0, mthi_op), RS},
[insn_mtlo] = {M(spec_op, 0, 0, 0, 0, mtlo_op), RS},
[insn_mulu] = {M(spec_op, 0, 0, 0, multu_mulu_op, multu_op),
RS | RT | RD},
[insn_muhu] = {M(spec_op, 0, 0, 0, multu_muhu_op, multu_op),
RS | RT | RD},
#ifndef CONFIG_CPU_MIPSR6
[insn_mul] = {M(spec2_op, 0, 0, 0, 0, mul_op), RS | RT | RD},
#else
[insn_mul] = {M(spec_op, 0, 0, 0, mult_mul_op, mult_op), RS | RT | RD},
#endif
[insn_multu] = {M(spec_op, 0, 0, 0, 0, multu_op), RS | RT},
[insn_nor] = {M(spec_op, 0, 0, 0, 0, nor_op), RS | RT | RD},
[insn_or] = {M(spec_op, 0, 0, 0, 0, or_op), RS | RT | RD},
[insn_ori] = {M(ori_op, 0, 0, 0, 0, 0), RS | RT | UIMM},
#ifndef CONFIG_CPU_MIPSR6
[insn_pref] = {M(pref_op, 0, 0, 0, 0, 0), RS | RT | SIMM},
#else
[insn_pref] = {M6(spec3_op, 0, 0, 0, pref6_op), RS | RT | SIMM9},
#endif
[insn_rfe] = {M(cop0_op, cop_op, 0, 0, 0, rfe_op), 0},
[insn_rotr] = {M(spec_op, 1, 0, 0, 0, srl_op), RT | RD | RE},
[insn_sb] = {M(sb_op, 0, 0, 0, 0, 0), RS | RT | SIMM},
#ifndef CONFIG_CPU_MIPSR6
[insn_sc] = {M(sc_op, 0, 0, 0, 0, 0), RS | RT | SIMM},
[insn_scd] = {M(scd_op, 0, 0, 0, 0, 0), RS | RT | SIMM},
#else
[insn_sc] = {M6(spec3_op, 0, 0, 0, sc6_op), RS | RT | SIMM9},
[insn_scd] = {M6(spec3_op, 0, 0, 0, scd6_op), RS | RT | SIMM9},
#endif
[insn_sd] = {M(sd_op, 0, 0, 0, 0, 0), RS | RT | SIMM},
[insn_seleqz] = {M(spec_op, 0, 0, 0, 0, seleqz_op), RS | RT | RD},
[insn_selnez] = {M(spec_op, 0, 0, 0, 0, selnez_op), RS | RT | RD},
[insn_sh] = {M(sh_op, 0, 0, 0, 0, 0), RS | RT | SIMM},
[insn_sll] = {M(spec_op, 0, 0, 0, 0, sll_op), RT | RD | RE},
[insn_sllv] = {M(spec_op, 0, 0, 0, 0, sllv_op), RS | RT | RD},
[insn_slt] = {M(spec_op, 0, 0, 0, 0, slt_op), RS | RT | RD},
[insn_slti] = {M(slti_op, 0, 0, 0, 0, 0), RS | RT | SIMM},
[insn_sltiu] = {M(sltiu_op, 0, 0, 0, 0, 0), RS | RT | SIMM},
[insn_sltu] = {M(spec_op, 0, 0, 0, 0, sltu_op), RS | RT | RD},
[insn_sra] = {M(spec_op, 0, 0, 0, 0, sra_op), RT | RD | RE},
[insn_srav] = {M(spec_op, 0, 0, 0, 0, srav_op), RS | RT | RD},
[insn_srl] = {M(spec_op, 0, 0, 0, 0, srl_op), RT | RD | RE},
[insn_srlv] = {M(spec_op, 0, 0, 0, 0, srlv_op), RS | RT | RD},
[insn_subu] = {M(spec_op, 0, 0, 0, 0, subu_op), RS | RT | RD},
[insn_sw] = {M(sw_op, 0, 0, 0, 0, 0), RS | RT | SIMM},
[insn_sync] = {M(spec_op, 0, 0, 0, 0, sync_op), RE},
[insn_syscall] = {M(spec_op, 0, 0, 0, 0, syscall_op), SCIMM},
[insn_tlbp] = {M(cop0_op, cop_op, 0, 0, 0, tlbp_op), 0},
[insn_tlbr] = {M(cop0_op, cop_op, 0, 0, 0, tlbr_op), 0},
[insn_tlbwi] = {M(cop0_op, cop_op, 0, 0, 0, tlbwi_op), 0},
[insn_tlbwr] = {M(cop0_op, cop_op, 0, 0, 0, tlbwr_op), 0},
[insn_wait] = {M(cop0_op, cop_op, 0, 0, 0, wait_op), SCIMM},
[insn_wsbh] = {M(spec3_op, 0, 0, 0, wsbh_op, bshfl_op), RT | RD},
[insn_xor] = {M(spec_op, 0, 0, 0, 0, xor_op), RS | RT | RD},
[insn_xori] = {M(xori_op, 0, 0, 0, 0, 0), RS | RT | UIMM},
[insn_yield] = {M(spec3_op, 0, 0, 0, 0, yield_op), RS | RD},
};
#undef M
static inline u32 build_bimm(s32 arg)
{
WARN(arg > 0x1ffff || arg < -0x20000,
KERN_WARNING "Micro-assembler field overflow\n");
WARN(arg & 0x3, KERN_WARNING "Invalid micro-assembler branch target\n");
return ((arg < 0) ? (1 << 15) : 0) | ((arg >> 2) & 0x7fff);
}
static inline u32 build_jimm(u32 arg)
{
WARN(arg & ~(JIMM_MASK << 2),
KERN_WARNING "Micro-assembler field overflow\n");
return (arg >> 2) & JIMM_MASK;
}
/*
* The order of opcode arguments is implicitly left to right,
* starting with RS and ending with FUNC or IMM.
*/
static void build_insn(u32 **buf, enum opcode opc, ...)
{
const struct insn *ip;
va_list ap;
u32 op;
if (opc < 0 || opc >= insn_invalid ||
(opc == insn_daddiu && r4k_daddiu_bug()) ||
(insn_table[opc].match == 0 && insn_table[opc].fields == 0))
panic("Unsupported Micro-assembler instruction %d", opc);
ip = &insn_table[opc];
op = ip->match;
va_start(ap, opc);
if (ip->fields & RS)
op |= build_rs(va_arg(ap, u32));
if (ip->fields & RT)
op |= build_rt(va_arg(ap, u32));
if (ip->fields & RD)
op |= build_rd(va_arg(ap, u32));
if (ip->fields & RE)
op |= build_re(va_arg(ap, u32));
if (ip->fields & SIMM)
op |= build_simm(va_arg(ap, s32));
if (ip->fields & UIMM)
op |= build_uimm(va_arg(ap, u32));
if (ip->fields & BIMM)
op |= build_bimm(va_arg(ap, s32));
if (ip->fields & JIMM)
op |= build_jimm(va_arg(ap, u32));
if (ip->fields & FUNC)
op |= build_func(va_arg(ap, u32));
if (ip->fields & SET)
op |= build_set(va_arg(ap, u32));
if (ip->fields & SCIMM)
op |= build_scimm(va_arg(ap, u32));
if (ip->fields & SIMM9)
op |= build_scimm9(va_arg(ap, u32));
va_end(ap);
**buf = op;
(*buf)++;
}
static inline void
__resolve_relocs(struct uasm_reloc *rel, struct uasm_label *lab)
{
long laddr = (long)lab->addr;
long raddr = (long)rel->addr;
switch (rel->type) {
case R_MIPS_PC16:
*rel->addr |= build_bimm(laddr - (raddr + 4));
break;
default:
panic("Unsupported Micro-assembler relocation %d",
rel->type);
}
}
| linux-master | arch/mips/mm/uasm-mips.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2001,2002,2003 Broadcom Corporation
*/
#include <linux/sched.h>
#include <asm/mipsregs.h>
#include <asm/sibyte/sb1250.h>
#include <asm/sibyte/sb1250_regs.h>
#if !defined(CONFIG_SIBYTE_BUS_WATCHER) || defined(CONFIG_SIBYTE_BW_TRACE)
#include <asm/io.h>
#include <asm/sibyte/sb1250_scd.h>
#endif
/*
* We'd like to dump the L2_ECC_TAG register on errors, but errata make
* that unsafe... So for now we don't. (BCM1250/BCM112x erratum SOC-48.)
*/
#undef DUMP_L2_ECC_TAG_ON_ERROR
/* SB1 definitions */
/* XXX should come from config1 XXX */
#define SB1_CACHE_INDEX_MASK 0x1fe0
#define CP0_ERRCTL_RECOVERABLE (1 << 31)
#define CP0_ERRCTL_DCACHE (1 << 30)
#define CP0_ERRCTL_ICACHE (1 << 29)
#define CP0_ERRCTL_MULTIBUS (1 << 23)
#define CP0_ERRCTL_MC_TLB (1 << 15)
#define CP0_ERRCTL_MC_TIMEOUT (1 << 14)
#define CP0_CERRI_TAG_PARITY (1 << 29)
#define CP0_CERRI_DATA_PARITY (1 << 28)
#define CP0_CERRI_EXTERNAL (1 << 26)
#define CP0_CERRI_IDX_VALID(c) (!((c) & CP0_CERRI_EXTERNAL))
#define CP0_CERRI_DATA (CP0_CERRI_DATA_PARITY)
#define CP0_CERRD_MULTIPLE (1 << 31)
#define CP0_CERRD_TAG_STATE (1 << 30)
#define CP0_CERRD_TAG_ADDRESS (1 << 29)
#define CP0_CERRD_DATA_SBE (1 << 28)
#define CP0_CERRD_DATA_DBE (1 << 27)
#define CP0_CERRD_EXTERNAL (1 << 26)
#define CP0_CERRD_LOAD (1 << 25)
#define CP0_CERRD_STORE (1 << 24)
#define CP0_CERRD_FILLWB (1 << 23)
#define CP0_CERRD_COHERENCY (1 << 22)
#define CP0_CERRD_DUPTAG (1 << 21)
#define CP0_CERRD_DPA_VALID(c) (!((c) & CP0_CERRD_EXTERNAL))
#define CP0_CERRD_IDX_VALID(c) \
(((c) & (CP0_CERRD_LOAD | CP0_CERRD_STORE)) ? (!((c) & CP0_CERRD_EXTERNAL)) : 0)
#define CP0_CERRD_CAUSES \
(CP0_CERRD_LOAD | CP0_CERRD_STORE | CP0_CERRD_FILLWB | CP0_CERRD_COHERENCY | CP0_CERRD_DUPTAG)
#define CP0_CERRD_TYPES \
(CP0_CERRD_TAG_STATE | CP0_CERRD_TAG_ADDRESS | CP0_CERRD_DATA_SBE | CP0_CERRD_DATA_DBE | CP0_CERRD_EXTERNAL)
#define CP0_CERRD_DATA (CP0_CERRD_DATA_SBE | CP0_CERRD_DATA_DBE)
static uint32_t extract_ic(unsigned short addr, int data);
static uint32_t extract_dc(unsigned short addr, int data);
static inline void breakout_errctl(unsigned int val)
{
if (val & CP0_ERRCTL_RECOVERABLE)
printk(" recoverable");
if (val & CP0_ERRCTL_DCACHE)
printk(" dcache");
if (val & CP0_ERRCTL_ICACHE)
printk(" icache");
if (val & CP0_ERRCTL_MULTIBUS)
printk(" multiple-buserr");
printk("\n");
}
static inline void breakout_cerri(unsigned int val)
{
if (val & CP0_CERRI_TAG_PARITY)
printk(" tag-parity");
if (val & CP0_CERRI_DATA_PARITY)
printk(" data-parity");
if (val & CP0_CERRI_EXTERNAL)
printk(" external");
printk("\n");
}
static inline void breakout_cerrd(unsigned int val)
{
switch (val & CP0_CERRD_CAUSES) {
case CP0_CERRD_LOAD:
printk(" load,");
break;
case CP0_CERRD_STORE:
printk(" store,");
break;
case CP0_CERRD_FILLWB:
printk(" fill/wb,");
break;
case CP0_CERRD_COHERENCY:
printk(" coherency,");
break;
case CP0_CERRD_DUPTAG:
printk(" duptags,");
break;
default:
printk(" NO CAUSE,");
break;
}
if (!(val & CP0_CERRD_TYPES))
printk(" NO TYPE");
else {
if (val & CP0_CERRD_MULTIPLE)
printk(" multi-err");
if (val & CP0_CERRD_TAG_STATE)
printk(" tag-state");
if (val & CP0_CERRD_TAG_ADDRESS)
printk(" tag-address");
if (val & CP0_CERRD_DATA_SBE)
printk(" data-SBE");
if (val & CP0_CERRD_DATA_DBE)
printk(" data-DBE");
if (val & CP0_CERRD_EXTERNAL)
printk(" external");
}
printk("\n");
}
#ifndef CONFIG_SIBYTE_BUS_WATCHER
static void check_bus_watcher(void)
{
uint32_t status, l2_err, memio_err;
#ifdef DUMP_L2_ECC_TAG_ON_ERROR
uint64_t l2_tag;
#endif
/* Destructive read, clears register and interrupt */
status = csr_in32(IOADDR(A_SCD_BUS_ERR_STATUS));
/* Bit 31 is always on, but there's no #define for that */
if (status & ~(1UL << 31)) {
l2_err = csr_in32(IOADDR(A_BUS_L2_ERRORS));
#ifdef DUMP_L2_ECC_TAG_ON_ERROR
l2_tag = in64(IOADDR(A_L2_ECC_TAG));
#endif
memio_err = csr_in32(IOADDR(A_BUS_MEM_IO_ERRORS));
printk("Bus watcher error counters: %08x %08x\n", l2_err, memio_err);
printk("\nLast recorded signature:\n");
printk("Request %02x from %d, answered by %d with Dcode %d\n",
(unsigned int)(G_SCD_BERR_TID(status) & 0x3f),
(int)(G_SCD_BERR_TID(status) >> 6),
(int)G_SCD_BERR_RID(status),
(int)G_SCD_BERR_DCODE(status));
#ifdef DUMP_L2_ECC_TAG_ON_ERROR
printk("Last L2 tag w/ bad ECC: %016llx\n", l2_tag);
#endif
} else {
printk("Bus watcher indicates no error\n");
}
}
#else
extern void check_bus_watcher(void);
#endif
asmlinkage void sb1_cache_error(void)
{
uint32_t errctl, cerr_i, cerr_d, dpalo, dpahi, eepc, res;
unsigned long long cerr_dpa;
#ifdef CONFIG_SIBYTE_BW_TRACE
/* Freeze the trace buffer now */
csr_out32(M_SCD_TRACE_CFG_FREEZE, IOADDR(A_SCD_TRACE_CFG));
printk("Trace buffer frozen\n");
#endif
printk("Cache error exception on CPU %x:\n",
(read_c0_prid() >> 25) & 0x7);
__asm__ __volatile__ (
" .set push\n\t"
" .set mips64\n\t"
" .set noat\n\t"
" mfc0 %0, $26\n\t"
" mfc0 %1, $27\n\t"
" mfc0 %2, $27, 1\n\t"
" dmfc0 $1, $27, 3\n\t"
" dsrl32 %3, $1, 0 \n\t"
" sll %4, $1, 0 \n\t"
" mfc0 %5, $30\n\t"
" .set pop"
: "=r" (errctl), "=r" (cerr_i), "=r" (cerr_d),
"=r" (dpahi), "=r" (dpalo), "=r" (eepc));
cerr_dpa = (((uint64_t)dpahi) << 32) | dpalo;
printk(" c0_errorepc == %08x\n", eepc);
printk(" c0_errctl == %08x", errctl);
breakout_errctl(errctl);
if (errctl & CP0_ERRCTL_ICACHE) {
printk(" c0_cerr_i == %08x", cerr_i);
breakout_cerri(cerr_i);
if (CP0_CERRI_IDX_VALID(cerr_i)) {
/* Check index of EPC, allowing for delay slot */
if (((eepc & SB1_CACHE_INDEX_MASK) != (cerr_i & SB1_CACHE_INDEX_MASK)) &&
((eepc & SB1_CACHE_INDEX_MASK) != ((cerr_i & SB1_CACHE_INDEX_MASK) - 4)))
printk(" cerr_i idx doesn't match eepc\n");
else {
res = extract_ic(cerr_i & SB1_CACHE_INDEX_MASK,
(cerr_i & CP0_CERRI_DATA) != 0);
if (!(res & cerr_i))
printk("...didn't see indicated icache problem\n");
}
}
}
if (errctl & CP0_ERRCTL_DCACHE) {
printk(" c0_cerr_d == %08x", cerr_d);
breakout_cerrd(cerr_d);
if (CP0_CERRD_DPA_VALID(cerr_d)) {
printk(" c0_cerr_dpa == %010llx\n", cerr_dpa);
if (!CP0_CERRD_IDX_VALID(cerr_d)) {
res = extract_dc(cerr_dpa & SB1_CACHE_INDEX_MASK,
(cerr_d & CP0_CERRD_DATA) != 0);
if (!(res & cerr_d))
printk("...didn't see indicated dcache problem\n");
} else {
if ((cerr_dpa & SB1_CACHE_INDEX_MASK) != (cerr_d & SB1_CACHE_INDEX_MASK))
printk(" cerr_d idx doesn't match cerr_dpa\n");
else {
res = extract_dc(cerr_d & SB1_CACHE_INDEX_MASK,
(cerr_d & CP0_CERRD_DATA) != 0);
if (!(res & cerr_d))
printk("...didn't see indicated problem\n");
}
}
}
}
check_bus_watcher();
/*
* Calling panic() when a fatal cache error occurs scrambles the
* state of the system (and the cache), making it difficult to
* investigate after the fact. However, if you just stall the CPU,
* the other CPU may keep on running, which is typically very
* undesirable.
*/
#ifdef CONFIG_SB1_CERR_STALL
while (1)
;
#else
panic("unhandled cache error");
#endif
}
/* Parity lookup table. */
static const uint8_t parity[256] = {
0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0
};
/* Masks to select bits for Hamming parity, mask_72_64[i] for bit[i] */
static const uint64_t mask_72_64[8] = {
0x0738C808099264FFULL,
0x38C808099264FF07ULL,
0xC808099264FF0738ULL,
0x08099264FF0738C8ULL,
0x099264FF0738C808ULL,
0x9264FF0738C80809ULL,
0x64FF0738C8080992ULL,
0xFF0738C808099264ULL
};
/* Calculate the parity on a range of bits */
static char range_parity(uint64_t dword, int max, int min)
{
char parity = 0;
int i;
dword >>= min;
for (i=max-min; i>=0; i--) {
if (dword & 0x1)
parity = !parity;
dword >>= 1;
}
return parity;
}
/* Calculate the 4-bit even byte-parity for an instruction */
static unsigned char inst_parity(uint32_t word)
{
int i, j;
char parity = 0;
for (j=0; j<4; j++) {
char byte_parity = 0;
for (i=0; i<8; i++) {
if (word & 0x80000000)
byte_parity = !byte_parity;
word <<= 1;
}
parity <<= 1;
parity |= byte_parity;
}
return parity;
}
static uint32_t extract_ic(unsigned short addr, int data)
{
unsigned short way;
int valid;
uint32_t taghi, taglolo, taglohi;
unsigned long long taglo, va;
uint64_t tlo_tmp;
uint8_t lru;
int res = 0;
printk("Icache index 0x%04x ", addr);
for (way = 0; way < 4; way++) {
/* Index-load-tag-I */
__asm__ __volatile__ (
" .set push \n\t"
" .set noreorder \n\t"
" .set mips64 \n\t"
" .set noat \n\t"
" cache 4, 0(%3) \n\t"
" mfc0 %0, $29 \n\t"
" dmfc0 $1, $28 \n\t"
" dsrl32 %1, $1, 0 \n\t"
" sll %2, $1, 0 \n\t"
" .set pop"
: "=r" (taghi), "=r" (taglohi), "=r" (taglolo)
: "r" ((way << 13) | addr));
taglo = ((unsigned long long)taglohi << 32) | taglolo;
if (way == 0) {
lru = (taghi >> 14) & 0xff;
printk("[Bank %d Set 0x%02x] LRU > %d %d %d %d > MRU\n",
((addr >> 5) & 0x3), /* bank */
((addr >> 7) & 0x3f), /* index */
(lru & 0x3),
((lru >> 2) & 0x3),
((lru >> 4) & 0x3),
((lru >> 6) & 0x3));
}
va = (taglo & 0xC0000FFFFFFFE000ULL) | addr;
if ((taglo & (1 << 31)) && (((taglo >> 62) & 0x3) == 3))
va |= 0x3FFFF00000000000ULL;
valid = ((taghi >> 29) & 1);
if (valid) {
tlo_tmp = taglo & 0xfff3ff;
if (((taglo >> 10) & 1) ^ range_parity(tlo_tmp, 23, 0)) {
printk(" ** bad parity in VTag0/G/ASID\n");
res |= CP0_CERRI_TAG_PARITY;
}
if (((taglo >> 11) & 1) ^ range_parity(taglo, 63, 24)) {
printk(" ** bad parity in R/VTag1\n");
res |= CP0_CERRI_TAG_PARITY;
}
}
if (valid ^ ((taghi >> 27) & 1)) {
printk(" ** bad parity for valid bit\n");
res |= CP0_CERRI_TAG_PARITY;
}
printk(" %d [VA %016llx] [Vld? %d] raw tags: %08X-%016llX\n",
way, va, valid, taghi, taglo);
if (data) {
uint32_t datahi, insta, instb;
uint8_t predecode;
int offset;
/* (hit all banks and ways) */
for (offset = 0; offset < 4; offset++) {
/* Index-load-data-I */
__asm__ __volatile__ (
" .set push\n\t"
" .set noreorder\n\t"
" .set mips64\n\t"
" .set noat\n\t"
" cache 6, 0(%3) \n\t"
" mfc0 %0, $29, 1\n\t"
" dmfc0 $1, $28, 1\n\t"
" dsrl32 %1, $1, 0 \n\t"
" sll %2, $1, 0 \n\t"
" .set pop \n"
: "=r" (datahi), "=r" (insta), "=r" (instb)
: "r" ((way << 13) | addr | (offset << 3)));
predecode = (datahi >> 8) & 0xff;
if (((datahi >> 16) & 1) != (uint32_t)range_parity(predecode, 7, 0)) {
printk(" ** bad parity in predecode\n");
res |= CP0_CERRI_DATA_PARITY;
}
/* XXXKW should/could check predecode bits themselves */
if (((datahi >> 4) & 0xf) ^ inst_parity(insta)) {
printk(" ** bad parity in instruction a\n");
res |= CP0_CERRI_DATA_PARITY;
}
if ((datahi & 0xf) ^ inst_parity(instb)) {
printk(" ** bad parity in instruction b\n");
res |= CP0_CERRI_DATA_PARITY;
}
printk(" %05X-%08X%08X", datahi, insta, instb);
}
printk("\n");
}
}
return res;
}
/* Compute the ECC for a data doubleword */
static uint8_t dc_ecc(uint64_t dword)
{
uint64_t t;
uint32_t w;
uint8_t p;
int i;
p = 0;
for (i = 7; i >= 0; i--)
{
p <<= 1;
t = dword & mask_72_64[i];
w = (uint32_t)(t >> 32);
p ^= (parity[w>>24] ^ parity[(w>>16) & 0xFF]
^ parity[(w>>8) & 0xFF] ^ parity[w & 0xFF]);
w = (uint32_t)(t & 0xFFFFFFFF);
p ^= (parity[w>>24] ^ parity[(w>>16) & 0xFF]
^ parity[(w>>8) & 0xFF] ^ parity[w & 0xFF]);
}
return p;
}
struct dc_state {
unsigned char val;
char *name;
};
static struct dc_state dc_states[] = {
{ 0x00, "INVALID" },
{ 0x0f, "COH-SHD" },
{ 0x13, "NCO-E-C" },
{ 0x19, "NCO-E-D" },
{ 0x16, "COH-E-C" },
{ 0x1c, "COH-E-D" },
{ 0xff, "*ERROR*" }
};
#define DC_TAG_VALID(state) \
(((state) == 0x0) || ((state) == 0xf) || ((state) == 0x13) || \
((state) == 0x19) || ((state) == 0x16) || ((state) == 0x1c))
static char *dc_state_str(unsigned char state)
{
struct dc_state *dsc = dc_states;
while (dsc->val != 0xff) {
if (dsc->val == state)
break;
dsc++;
}
return dsc->name;
}
static uint32_t extract_dc(unsigned short addr, int data)
{
int valid, way;
unsigned char state;
uint32_t taghi, taglolo, taglohi;
unsigned long long taglo, pa;
uint8_t ecc, lru;
int res = 0;
printk("Dcache index 0x%04x ", addr);
for (way = 0; way < 4; way++) {
__asm__ __volatile__ (
" .set push\n\t"
" .set noreorder\n\t"
" .set mips64\n\t"
" .set noat\n\t"
" cache 5, 0(%3)\n\t" /* Index-load-tag-D */
" mfc0 %0, $29, 2\n\t"
" dmfc0 $1, $28, 2\n\t"
" dsrl32 %1, $1, 0\n\t"
" sll %2, $1, 0\n\t"
" .set pop"
: "=r" (taghi), "=r" (taglohi), "=r" (taglolo)
: "r" ((way << 13) | addr));
taglo = ((unsigned long long)taglohi << 32) | taglolo;
pa = (taglo & 0xFFFFFFE000ULL) | addr;
if (way == 0) {
lru = (taghi >> 14) & 0xff;
printk("[Bank %d Set 0x%02x] LRU > %d %d %d %d > MRU\n",
((addr >> 11) & 0x2) | ((addr >> 5) & 1), /* bank */
((addr >> 6) & 0x3f), /* index */
(lru & 0x3),
((lru >> 2) & 0x3),
((lru >> 4) & 0x3),
((lru >> 6) & 0x3));
}
state = (taghi >> 25) & 0x1f;
valid = DC_TAG_VALID(state);
printk(" %d [PA %010llx] [state %s (%02x)] raw tags: %08X-%016llX\n",
way, pa, dc_state_str(state), state, taghi, taglo);
if (valid) {
if (((taglo >> 11) & 1) ^ range_parity(taglo, 39, 26)) {
printk(" ** bad parity in PTag1\n");
res |= CP0_CERRD_TAG_ADDRESS;
}
if (((taglo >> 10) & 1) ^ range_parity(taglo, 25, 13)) {
printk(" ** bad parity in PTag0\n");
res |= CP0_CERRD_TAG_ADDRESS;
}
} else {
res |= CP0_CERRD_TAG_STATE;
}
if (data) {
uint32_t datalohi, datalolo, datahi;
unsigned long long datalo;
int offset;
char bad_ecc = 0;
for (offset = 0; offset < 4; offset++) {
/* Index-load-data-D */
__asm__ __volatile__ (
" .set push\n\t"
" .set noreorder\n\t"
" .set mips64\n\t"
" .set noat\n\t"
" cache 7, 0(%3)\n\t" /* Index-load-data-D */
" mfc0 %0, $29, 3\n\t"
" dmfc0 $1, $28, 3\n\t"
" dsrl32 %1, $1, 0 \n\t"
" sll %2, $1, 0 \n\t"
" .set pop"
: "=r" (datahi), "=r" (datalohi), "=r" (datalolo)
: "r" ((way << 13) | addr | (offset << 3)));
datalo = ((unsigned long long)datalohi << 32) | datalolo;
ecc = dc_ecc(datalo);
if (ecc != datahi) {
int bits;
bad_ecc |= 1 << (3-offset);
ecc ^= datahi;
bits = hweight8(ecc);
res |= (bits == 1) ? CP0_CERRD_DATA_SBE : CP0_CERRD_DATA_DBE;
}
printk(" %02X-%016llX", datahi, datalo);
}
printk("\n");
if (bad_ecc)
printk(" dwords w/ bad ECC: %d %d %d %d\n",
!!(bad_ecc & 8), !!(bad_ecc & 4),
!!(bad_ecc & 2), !!(bad_ecc & 1));
}
}
return res;
}
| linux-master | arch/mips/mm/cerr-sb1.c |
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* (C) Copyright 1995 1996 Linus Torvalds
* (C) Copyright 2001, 2002 Ralf Baechle
*/
#include <linux/export.h>
#include <asm/addrspace.h>
#include <asm/byteorder.h>
#include <linux/ioport.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/mm_types.h>
#include <linux/io.h>
#include <asm/cacheflush.h>
#include <asm/tlbflush.h>
#include <ioremap.h>
#define IS_LOW512(addr) (!((phys_addr_t)(addr) & (phys_addr_t) ~0x1fffffffULL))
#define IS_KSEG1(addr) (((unsigned long)(addr) & ~0x1fffffffUL) == CKSEG1)
static int __ioremap_check_ram(unsigned long start_pfn, unsigned long nr_pages,
void *arg)
{
unsigned long i;
for (i = 0; i < nr_pages; i++) {
if (pfn_valid(start_pfn + i) &&
!PageReserved(pfn_to_page(start_pfn + i)))
return 1;
}
return 0;
}
/*
* ioremap_prot - map bus memory into CPU space
* @phys_addr: bus address of the memory
* @size: size of the resource to map
*
* ioremap_prot gives the caller control over cache coherency attributes (CCA)
*/
void __iomem *ioremap_prot(phys_addr_t phys_addr, unsigned long size,
unsigned long prot_val)
{
unsigned long flags = prot_val & _CACHE_MASK;
unsigned long offset, pfn, last_pfn;
struct vm_struct *area;
phys_addr_t last_addr;
unsigned long vaddr;
void __iomem *cpu_addr;
cpu_addr = plat_ioremap(phys_addr, size, flags);
if (cpu_addr)
return cpu_addr;
phys_addr = fixup_bigphys_addr(phys_addr, size);
/* Don't allow wraparound or zero size */
last_addr = phys_addr + size - 1;
if (!size || last_addr < phys_addr)
return NULL;
/*
* Map uncached objects in the low 512mb of address space using KSEG1,
* otherwise map using page tables.
*/
if (IS_LOW512(phys_addr) && IS_LOW512(last_addr) &&
flags == _CACHE_UNCACHED)
return (void __iomem *) CKSEG1ADDR(phys_addr);
/*
* Don't allow anybody to remap RAM that may be allocated by the page
* allocator, since that could lead to races & data clobbering.
*/
pfn = PFN_DOWN(phys_addr);
last_pfn = PFN_DOWN(last_addr);
if (walk_system_ram_range(pfn, last_pfn - pfn + 1, NULL,
__ioremap_check_ram) == 1) {
WARN_ONCE(1, "ioremap on RAM at %pa - %pa\n",
&phys_addr, &last_addr);
return NULL;
}
/*
* Mappings have to be page-aligned
*/
offset = phys_addr & ~PAGE_MASK;
phys_addr &= PAGE_MASK;
size = PAGE_ALIGN(last_addr + 1) - phys_addr;
/*
* Ok, go for it..
*/
area = get_vm_area(size, VM_IOREMAP);
if (!area)
return NULL;
vaddr = (unsigned long)area->addr;
flags |= _PAGE_GLOBAL | _PAGE_PRESENT | __READABLE | __WRITEABLE;
if (ioremap_page_range(vaddr, vaddr + size, phys_addr,
__pgprot(flags))) {
free_vm_area(area);
return NULL;
}
return (void __iomem *)(vaddr + offset);
}
EXPORT_SYMBOL(ioremap_prot);
void iounmap(const volatile void __iomem *addr)
{
if (!plat_iounmap(addr) && !IS_KSEG1(addr))
vunmap((void *)((unsigned long)addr & PAGE_MASK));
}
EXPORT_SYMBOL(iounmap);
| linux-master | arch/mips/mm/ioremap.c |
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2003, 04, 05 Ralf Baechle ([email protected])
* Copyright (C) 2007 Maciej W. Rozycki
* Copyright (C) 2008 Thiemo Seufer
* Copyright (C) 2012 MIPS Technologies, Inc.
*/
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/smp.h>
#include <linux/mm.h>
#include <linux/proc_fs.h>
#include <asm/bugs.h>
#include <asm/cacheops.h>
#include <asm/cpu-type.h>
#include <asm/inst.h>
#include <asm/io.h>
#include <asm/page.h>
#include <asm/prefetch.h>
#include <asm/bootinfo.h>
#include <asm/mipsregs.h>
#include <asm/mmu_context.h>
#include <asm/cpu.h>
#ifdef CONFIG_SIBYTE_DMA_PAGEOPS
#include <asm/sibyte/sb1250.h>
#include <asm/sibyte/sb1250_regs.h>
#include <asm/sibyte/sb1250_dma.h>
#endif
#include <asm/uasm.h>
/* Registers used in the assembled routines. */
#define ZERO 0
#define AT 2
#define A0 4
#define A1 5
#define A2 6
#define T0 8
#define T1 9
#define T2 10
#define T3 11
#define T9 25
#define RA 31
/* Handle labels (which must be positive integers). */
enum label_id {
label_clear_nopref = 1,
label_clear_pref,
label_copy_nopref,
label_copy_pref_both,
label_copy_pref_store,
};
UASM_L_LA(_clear_nopref)
UASM_L_LA(_clear_pref)
UASM_L_LA(_copy_nopref)
UASM_L_LA(_copy_pref_both)
UASM_L_LA(_copy_pref_store)
/* We need one branch and therefore one relocation per target label. */
static struct uasm_label labels[5];
static struct uasm_reloc relocs[5];
#define cpu_is_r4600_v1_x() ((read_c0_prid() & 0xfffffff0) == 0x00002010)
#define cpu_is_r4600_v2_x() ((read_c0_prid() & 0xfffffff0) == 0x00002020)
/*
* R6 has a limited offset of the pref instruction.
* Skip it if the offset is more than 9 bits.
*/
#define _uasm_i_pref(a, b, c, d) \
do { \
if (cpu_has_mips_r6) { \
if (c <= 0xff && c >= -0x100) \
uasm_i_pref(a, b, c, d);\
} else { \
uasm_i_pref(a, b, c, d); \
} \
} while(0)
static int pref_bias_clear_store;
static int pref_bias_copy_load;
static int pref_bias_copy_store;
static u32 pref_src_mode;
static u32 pref_dst_mode;
static int clear_word_size;
static int copy_word_size;
static int half_clear_loop_size;
static int half_copy_loop_size;
static int cache_line_size;
#define cache_line_mask() (cache_line_size - 1)
static inline void
pg_addiu(u32 **buf, unsigned int reg1, unsigned int reg2, unsigned int off)
{
if (cpu_has_64bit_gp_regs &&
IS_ENABLED(CONFIG_CPU_DADDI_WORKAROUNDS) &&
r4k_daddiu_bug()) {
if (off > 0x7fff) {
uasm_i_lui(buf, T9, uasm_rel_hi(off));
uasm_i_addiu(buf, T9, T9, uasm_rel_lo(off));
} else
uasm_i_addiu(buf, T9, ZERO, off);
uasm_i_daddu(buf, reg1, reg2, T9);
} else {
if (off > 0x7fff) {
uasm_i_lui(buf, T9, uasm_rel_hi(off));
uasm_i_addiu(buf, T9, T9, uasm_rel_lo(off));
UASM_i_ADDU(buf, reg1, reg2, T9);
} else
UASM_i_ADDIU(buf, reg1, reg2, off);
}
}
static void set_prefetch_parameters(void)
{
if (cpu_has_64bit_gp_regs || cpu_has_64bit_zero_reg)
clear_word_size = 8;
else
clear_word_size = 4;
if (cpu_has_64bit_gp_regs)
copy_word_size = 8;
else
copy_word_size = 4;
/*
* The pref's used here are using "streaming" hints, which cause the
* copied data to be kicked out of the cache sooner. A page copy often
* ends up copying a lot more data than is commonly used, so this seems
* to make sense in terms of reducing cache pollution, but I've no real
* performance data to back this up.
*/
if (cpu_has_prefetch) {
/*
* XXX: Most prefetch bias values in here are based on
* guesswork.
*/
cache_line_size = cpu_dcache_line_size();
switch (current_cpu_type()) {
case CPU_R5500:
case CPU_TX49XX:
/* These processors only support the Pref_Load. */
pref_bias_copy_load = 256;
break;
case CPU_R10000:
case CPU_R12000:
case CPU_R14000:
case CPU_R16000:
/*
* Those values have been experimentally tuned for an
* Origin 200.
*/
pref_bias_clear_store = 512;
pref_bias_copy_load = 256;
pref_bias_copy_store = 256;
pref_src_mode = Pref_LoadStreamed;
pref_dst_mode = Pref_StoreStreamed;
break;
case CPU_SB1:
case CPU_SB1A:
pref_bias_clear_store = 128;
pref_bias_copy_load = 128;
pref_bias_copy_store = 128;
/*
* SB1 pass1 Pref_LoadStreamed/Pref_StoreStreamed
* hints are broken.
*/
if (current_cpu_type() == CPU_SB1 &&
(current_cpu_data.processor_id & 0xff) < 0x02) {
pref_src_mode = Pref_Load;
pref_dst_mode = Pref_Store;
} else {
pref_src_mode = Pref_LoadStreamed;
pref_dst_mode = Pref_StoreStreamed;
}
break;
case CPU_LOONGSON64:
/* Loongson-3 only support the Pref_Load/Pref_Store. */
pref_bias_clear_store = 128;
pref_bias_copy_load = 128;
pref_bias_copy_store = 128;
pref_src_mode = Pref_Load;
pref_dst_mode = Pref_Store;
break;
default:
pref_bias_clear_store = 128;
pref_bias_copy_load = 256;
pref_bias_copy_store = 128;
pref_src_mode = Pref_LoadStreamed;
if (cpu_has_mips_r6)
/*
* Bit 30 (Pref_PrepareForStore) has been
* removed from MIPS R6. Use bit 5
* (Pref_StoreStreamed).
*/
pref_dst_mode = Pref_StoreStreamed;
else
pref_dst_mode = Pref_PrepareForStore;
break;
}
} else {
if (cpu_has_cache_cdex_s)
cache_line_size = cpu_scache_line_size();
else if (cpu_has_cache_cdex_p)
cache_line_size = cpu_dcache_line_size();
}
/*
* Too much unrolling will overflow the available space in
* clear_space_array / copy_page_array.
*/
half_clear_loop_size = min(16 * clear_word_size,
max(cache_line_size >> 1,
4 * clear_word_size));
half_copy_loop_size = min(16 * copy_word_size,
max(cache_line_size >> 1,
4 * copy_word_size));
}
static void build_clear_store(u32 **buf, int off)
{
if (cpu_has_64bit_gp_regs || cpu_has_64bit_zero_reg) {
uasm_i_sd(buf, ZERO, off, A0);
} else {
uasm_i_sw(buf, ZERO, off, A0);
}
}
static inline void build_clear_pref(u32 **buf, int off)
{
if (off & cache_line_mask())
return;
if (pref_bias_clear_store) {
_uasm_i_pref(buf, pref_dst_mode, pref_bias_clear_store + off,
A0);
} else if (cache_line_size == (half_clear_loop_size << 1)) {
if (cpu_has_cache_cdex_s) {
uasm_i_cache(buf, Create_Dirty_Excl_SD, off, A0);
} else if (cpu_has_cache_cdex_p) {
if (IS_ENABLED(CONFIG_WAR_R4600_V1_HIT_CACHEOP) &&
cpu_is_r4600_v1_x()) {
uasm_i_nop(buf);
uasm_i_nop(buf);
uasm_i_nop(buf);
uasm_i_nop(buf);
}
if (IS_ENABLED(CONFIG_WAR_R4600_V2_HIT_CACHEOP) &&
cpu_is_r4600_v2_x())
uasm_i_lw(buf, ZERO, ZERO, AT);
uasm_i_cache(buf, Create_Dirty_Excl_D, off, A0);
}
}
}
extern u32 __clear_page_start;
extern u32 __clear_page_end;
extern u32 __copy_page_start;
extern u32 __copy_page_end;
void build_clear_page(void)
{
int off;
u32 *buf = &__clear_page_start;
struct uasm_label *l = labels;
struct uasm_reloc *r = relocs;
int i;
static atomic_t run_once = ATOMIC_INIT(0);
if (atomic_xchg(&run_once, 1)) {
return;
}
memset(labels, 0, sizeof(labels));
memset(relocs, 0, sizeof(relocs));
set_prefetch_parameters();
/*
* This algorithm makes the following assumptions:
* - The prefetch bias is a multiple of 2 words.
* - The prefetch bias is less than one page.
*/
BUG_ON(pref_bias_clear_store % (2 * clear_word_size));
BUG_ON(PAGE_SIZE < pref_bias_clear_store);
off = PAGE_SIZE - pref_bias_clear_store;
if (off > 0xffff || !pref_bias_clear_store)
pg_addiu(&buf, A2, A0, off);
else
uasm_i_ori(&buf, A2, A0, off);
if (IS_ENABLED(CONFIG_WAR_R4600_V2_HIT_CACHEOP) && cpu_is_r4600_v2_x())
uasm_i_lui(&buf, AT, uasm_rel_hi(0xa0000000));
off = cache_line_size ? min(8, pref_bias_clear_store / cache_line_size)
* cache_line_size : 0;
while (off) {
build_clear_pref(&buf, -off);
off -= cache_line_size;
}
uasm_l_clear_pref(&l, buf);
do {
build_clear_pref(&buf, off);
build_clear_store(&buf, off);
off += clear_word_size;
} while (off < half_clear_loop_size);
pg_addiu(&buf, A0, A0, 2 * off);
off = -off;
do {
build_clear_pref(&buf, off);
if (off == -clear_word_size)
uasm_il_bne(&buf, &r, A0, A2, label_clear_pref);
build_clear_store(&buf, off);
off += clear_word_size;
} while (off < 0);
if (pref_bias_clear_store) {
pg_addiu(&buf, A2, A0, pref_bias_clear_store);
uasm_l_clear_nopref(&l, buf);
off = 0;
do {
build_clear_store(&buf, off);
off += clear_word_size;
} while (off < half_clear_loop_size);
pg_addiu(&buf, A0, A0, 2 * off);
off = -off;
do {
if (off == -clear_word_size)
uasm_il_bne(&buf, &r, A0, A2,
label_clear_nopref);
build_clear_store(&buf, off);
off += clear_word_size;
} while (off < 0);
}
uasm_i_jr(&buf, RA);
uasm_i_nop(&buf);
BUG_ON(buf > &__clear_page_end);
uasm_resolve_relocs(relocs, labels);
pr_debug("Synthesized clear page handler (%u instructions).\n",
(u32)(buf - &__clear_page_start));
pr_debug("\t.set push\n");
pr_debug("\t.set noreorder\n");
for (i = 0; i < (buf - &__clear_page_start); i++)
pr_debug("\t.word 0x%08x\n", (&__clear_page_start)[i]);
pr_debug("\t.set pop\n");
}
static void build_copy_load(u32 **buf, int reg, int off)
{
if (cpu_has_64bit_gp_regs) {
uasm_i_ld(buf, reg, off, A1);
} else {
uasm_i_lw(buf, reg, off, A1);
}
}
static void build_copy_store(u32 **buf, int reg, int off)
{
if (cpu_has_64bit_gp_regs) {
uasm_i_sd(buf, reg, off, A0);
} else {
uasm_i_sw(buf, reg, off, A0);
}
}
static inline void build_copy_load_pref(u32 **buf, int off)
{
if (off & cache_line_mask())
return;
if (pref_bias_copy_load)
_uasm_i_pref(buf, pref_src_mode, pref_bias_copy_load + off, A1);
}
static inline void build_copy_store_pref(u32 **buf, int off)
{
if (off & cache_line_mask())
return;
if (pref_bias_copy_store) {
_uasm_i_pref(buf, pref_dst_mode, pref_bias_copy_store + off,
A0);
} else if (cache_line_size == (half_copy_loop_size << 1)) {
if (cpu_has_cache_cdex_s) {
uasm_i_cache(buf, Create_Dirty_Excl_SD, off, A0);
} else if (cpu_has_cache_cdex_p) {
if (IS_ENABLED(CONFIG_WAR_R4600_V1_HIT_CACHEOP) &&
cpu_is_r4600_v1_x()) {
uasm_i_nop(buf);
uasm_i_nop(buf);
uasm_i_nop(buf);
uasm_i_nop(buf);
}
if (IS_ENABLED(CONFIG_WAR_R4600_V2_HIT_CACHEOP) &&
cpu_is_r4600_v2_x())
uasm_i_lw(buf, ZERO, ZERO, AT);
uasm_i_cache(buf, Create_Dirty_Excl_D, off, A0);
}
}
}
void build_copy_page(void)
{
int off;
u32 *buf = &__copy_page_start;
struct uasm_label *l = labels;
struct uasm_reloc *r = relocs;
int i;
static atomic_t run_once = ATOMIC_INIT(0);
if (atomic_xchg(&run_once, 1)) {
return;
}
memset(labels, 0, sizeof(labels));
memset(relocs, 0, sizeof(relocs));
set_prefetch_parameters();
/*
* This algorithm makes the following assumptions:
* - All prefetch biases are multiples of 8 words.
* - The prefetch biases are less than one page.
* - The store prefetch bias isn't greater than the load
* prefetch bias.
*/
BUG_ON(pref_bias_copy_load % (8 * copy_word_size));
BUG_ON(pref_bias_copy_store % (8 * copy_word_size));
BUG_ON(PAGE_SIZE < pref_bias_copy_load);
BUG_ON(pref_bias_copy_store > pref_bias_copy_load);
off = PAGE_SIZE - pref_bias_copy_load;
if (off > 0xffff || !pref_bias_copy_load)
pg_addiu(&buf, A2, A0, off);
else
uasm_i_ori(&buf, A2, A0, off);
if (IS_ENABLED(CONFIG_WAR_R4600_V2_HIT_CACHEOP) && cpu_is_r4600_v2_x())
uasm_i_lui(&buf, AT, uasm_rel_hi(0xa0000000));
off = cache_line_size ? min(8, pref_bias_copy_load / cache_line_size) *
cache_line_size : 0;
while (off) {
build_copy_load_pref(&buf, -off);
off -= cache_line_size;
}
off = cache_line_size ? min(8, pref_bias_copy_store / cache_line_size) *
cache_line_size : 0;
while (off) {
build_copy_store_pref(&buf, -off);
off -= cache_line_size;
}
uasm_l_copy_pref_both(&l, buf);
do {
build_copy_load_pref(&buf, off);
build_copy_load(&buf, T0, off);
build_copy_load_pref(&buf, off + copy_word_size);
build_copy_load(&buf, T1, off + copy_word_size);
build_copy_load_pref(&buf, off + 2 * copy_word_size);
build_copy_load(&buf, T2, off + 2 * copy_word_size);
build_copy_load_pref(&buf, off + 3 * copy_word_size);
build_copy_load(&buf, T3, off + 3 * copy_word_size);
build_copy_store_pref(&buf, off);
build_copy_store(&buf, T0, off);
build_copy_store_pref(&buf, off + copy_word_size);
build_copy_store(&buf, T1, off + copy_word_size);
build_copy_store_pref(&buf, off + 2 * copy_word_size);
build_copy_store(&buf, T2, off + 2 * copy_word_size);
build_copy_store_pref(&buf, off + 3 * copy_word_size);
build_copy_store(&buf, T3, off + 3 * copy_word_size);
off += 4 * copy_word_size;
} while (off < half_copy_loop_size);
pg_addiu(&buf, A1, A1, 2 * off);
pg_addiu(&buf, A0, A0, 2 * off);
off = -off;
do {
build_copy_load_pref(&buf, off);
build_copy_load(&buf, T0, off);
build_copy_load_pref(&buf, off + copy_word_size);
build_copy_load(&buf, T1, off + copy_word_size);
build_copy_load_pref(&buf, off + 2 * copy_word_size);
build_copy_load(&buf, T2, off + 2 * copy_word_size);
build_copy_load_pref(&buf, off + 3 * copy_word_size);
build_copy_load(&buf, T3, off + 3 * copy_word_size);
build_copy_store_pref(&buf, off);
build_copy_store(&buf, T0, off);
build_copy_store_pref(&buf, off + copy_word_size);
build_copy_store(&buf, T1, off + copy_word_size);
build_copy_store_pref(&buf, off + 2 * copy_word_size);
build_copy_store(&buf, T2, off + 2 * copy_word_size);
build_copy_store_pref(&buf, off + 3 * copy_word_size);
if (off == -(4 * copy_word_size))
uasm_il_bne(&buf, &r, A2, A0, label_copy_pref_both);
build_copy_store(&buf, T3, off + 3 * copy_word_size);
off += 4 * copy_word_size;
} while (off < 0);
if (pref_bias_copy_load - pref_bias_copy_store) {
pg_addiu(&buf, A2, A0,
pref_bias_copy_load - pref_bias_copy_store);
uasm_l_copy_pref_store(&l, buf);
off = 0;
do {
build_copy_load(&buf, T0, off);
build_copy_load(&buf, T1, off + copy_word_size);
build_copy_load(&buf, T2, off + 2 * copy_word_size);
build_copy_load(&buf, T3, off + 3 * copy_word_size);
build_copy_store_pref(&buf, off);
build_copy_store(&buf, T0, off);
build_copy_store_pref(&buf, off + copy_word_size);
build_copy_store(&buf, T1, off + copy_word_size);
build_copy_store_pref(&buf, off + 2 * copy_word_size);
build_copy_store(&buf, T2, off + 2 * copy_word_size);
build_copy_store_pref(&buf, off + 3 * copy_word_size);
build_copy_store(&buf, T3, off + 3 * copy_word_size);
off += 4 * copy_word_size;
} while (off < half_copy_loop_size);
pg_addiu(&buf, A1, A1, 2 * off);
pg_addiu(&buf, A0, A0, 2 * off);
off = -off;
do {
build_copy_load(&buf, T0, off);
build_copy_load(&buf, T1, off + copy_word_size);
build_copy_load(&buf, T2, off + 2 * copy_word_size);
build_copy_load(&buf, T3, off + 3 * copy_word_size);
build_copy_store_pref(&buf, off);
build_copy_store(&buf, T0, off);
build_copy_store_pref(&buf, off + copy_word_size);
build_copy_store(&buf, T1, off + copy_word_size);
build_copy_store_pref(&buf, off + 2 * copy_word_size);
build_copy_store(&buf, T2, off + 2 * copy_word_size);
build_copy_store_pref(&buf, off + 3 * copy_word_size);
if (off == -(4 * copy_word_size))
uasm_il_bne(&buf, &r, A2, A0,
label_copy_pref_store);
build_copy_store(&buf, T3, off + 3 * copy_word_size);
off += 4 * copy_word_size;
} while (off < 0);
}
if (pref_bias_copy_store) {
pg_addiu(&buf, A2, A0, pref_bias_copy_store);
uasm_l_copy_nopref(&l, buf);
off = 0;
do {
build_copy_load(&buf, T0, off);
build_copy_load(&buf, T1, off + copy_word_size);
build_copy_load(&buf, T2, off + 2 * copy_word_size);
build_copy_load(&buf, T3, off + 3 * copy_word_size);
build_copy_store(&buf, T0, off);
build_copy_store(&buf, T1, off + copy_word_size);
build_copy_store(&buf, T2, off + 2 * copy_word_size);
build_copy_store(&buf, T3, off + 3 * copy_word_size);
off += 4 * copy_word_size;
} while (off < half_copy_loop_size);
pg_addiu(&buf, A1, A1, 2 * off);
pg_addiu(&buf, A0, A0, 2 * off);
off = -off;
do {
build_copy_load(&buf, T0, off);
build_copy_load(&buf, T1, off + copy_word_size);
build_copy_load(&buf, T2, off + 2 * copy_word_size);
build_copy_load(&buf, T3, off + 3 * copy_word_size);
build_copy_store(&buf, T0, off);
build_copy_store(&buf, T1, off + copy_word_size);
build_copy_store(&buf, T2, off + 2 * copy_word_size);
if (off == -(4 * copy_word_size))
uasm_il_bne(&buf, &r, A2, A0,
label_copy_nopref);
build_copy_store(&buf, T3, off + 3 * copy_word_size);
off += 4 * copy_word_size;
} while (off < 0);
}
uasm_i_jr(&buf, RA);
uasm_i_nop(&buf);
BUG_ON(buf > &__copy_page_end);
uasm_resolve_relocs(relocs, labels);
pr_debug("Synthesized copy page handler (%u instructions).\n",
(u32)(buf - &__copy_page_start));
pr_debug("\t.set push\n");
pr_debug("\t.set noreorder\n");
for (i = 0; i < (buf - &__copy_page_start); i++)
pr_debug("\t.word 0x%08x\n", (&__copy_page_start)[i]);
pr_debug("\t.set pop\n");
}
#ifdef CONFIG_SIBYTE_DMA_PAGEOPS
extern void clear_page_cpu(void *page);
extern void copy_page_cpu(void *to, void *from);
/*
* Pad descriptors to cacheline, since each is exclusively owned by a
* particular CPU.
*/
struct dmadscr {
u64 dscr_a;
u64 dscr_b;
u64 pad_a;
u64 pad_b;
} ____cacheline_aligned_in_smp page_descr[DM_NUM_CHANNELS];
void clear_page(void *page)
{
u64 to_phys = CPHYSADDR((unsigned long)page);
unsigned int cpu = smp_processor_id();
/* if the page is not in KSEG0, use old way */
if ((long)KSEGX((unsigned long)page) != (long)CKSEG0)
return clear_page_cpu(page);
page_descr[cpu].dscr_a = to_phys | M_DM_DSCRA_ZERO_MEM |
M_DM_DSCRA_L2C_DEST | M_DM_DSCRA_INTERRUPT;
page_descr[cpu].dscr_b = V_DM_DSCRB_SRC_LENGTH(PAGE_SIZE);
__raw_writeq(1, IOADDR(A_DM_REGISTER(cpu, R_DM_DSCR_COUNT)));
/*
* Don't really want to do it this way, but there's no
* reliable way to delay completion detection.
*/
while (!(__raw_readq(IOADDR(A_DM_REGISTER(cpu, R_DM_DSCR_BASE_DEBUG)))
& M_DM_DSCR_BASE_INTERRUPT))
;
__raw_readq(IOADDR(A_DM_REGISTER(cpu, R_DM_DSCR_BASE)));
}
EXPORT_SYMBOL(clear_page);
void copy_page(void *to, void *from)
{
u64 from_phys = CPHYSADDR((unsigned long)from);
u64 to_phys = CPHYSADDR((unsigned long)to);
unsigned int cpu = smp_processor_id();
/* if any page is not in KSEG0, use old way */
if ((long)KSEGX((unsigned long)to) != (long)CKSEG0
|| (long)KSEGX((unsigned long)from) != (long)CKSEG0)
return copy_page_cpu(to, from);
page_descr[cpu].dscr_a = to_phys | M_DM_DSCRA_L2C_DEST |
M_DM_DSCRA_INTERRUPT;
page_descr[cpu].dscr_b = from_phys | V_DM_DSCRB_SRC_LENGTH(PAGE_SIZE);
__raw_writeq(1, IOADDR(A_DM_REGISTER(cpu, R_DM_DSCR_COUNT)));
/*
* Don't really want to do it this way, but there's no
* reliable way to delay completion detection.
*/
while (!(__raw_readq(IOADDR(A_DM_REGISTER(cpu, R_DM_DSCR_BASE_DEBUG)))
& M_DM_DSCR_BASE_INTERRUPT))
;
__raw_readq(IOADDR(A_DM_REGISTER(cpu, R_DM_DSCR_BASE)));
}
EXPORT_SYMBOL(copy_page);
#endif /* CONFIG_SIBYTE_DMA_PAGEOPS */
| linux-master | arch/mips/mm/page.c |
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1999, 2000 by Silicon Graphics
* Copyright (C) 2003 by Ralf Baechle
*/
#include <linux/export.h>
#include <linux/init.h>
#include <linux/mm.h>
#include <asm/fixmap.h>
#include <asm/pgalloc.h>
#include <asm/tlbflush.h>
void pgd_init(void *addr)
{
unsigned long *p, *end;
unsigned long entry;
#if !defined(__PAGETABLE_PUD_FOLDED)
entry = (unsigned long)invalid_pud_table;
#elif !defined(__PAGETABLE_PMD_FOLDED)
entry = (unsigned long)invalid_pmd_table;
#else
entry = (unsigned long)invalid_pte_table;
#endif
p = (unsigned long *) addr;
end = p + PTRS_PER_PGD;
do {
p[0] = entry;
p[1] = entry;
p[2] = entry;
p[3] = entry;
p[4] = entry;
p += 8;
p[-3] = entry;
p[-2] = entry;
p[-1] = entry;
} while (p != end);
}
#ifndef __PAGETABLE_PMD_FOLDED
void pmd_init(void *addr)
{
unsigned long *p, *end;
unsigned long pagetable = (unsigned long)invalid_pte_table;
p = (unsigned long *)addr;
end = p + PTRS_PER_PMD;
do {
p[0] = pagetable;
p[1] = pagetable;
p[2] = pagetable;
p[3] = pagetable;
p[4] = pagetable;
p += 8;
p[-3] = pagetable;
p[-2] = pagetable;
p[-1] = pagetable;
} while (p != end);
}
EXPORT_SYMBOL_GPL(pmd_init);
#endif
#ifndef __PAGETABLE_PUD_FOLDED
void pud_init(void *addr)
{
unsigned long *p, *end;
unsigned long pagetable = (unsigned long)invalid_pmd_table;
p = (unsigned long *)addr;
end = p + PTRS_PER_PUD;
do {
p[0] = pagetable;
p[1] = pagetable;
p[2] = pagetable;
p[3] = pagetable;
p[4] = pagetable;
p += 8;
p[-3] = pagetable;
p[-2] = pagetable;
p[-1] = pagetable;
} while (p != end);
}
#endif
pmd_t mk_pmd(struct page *page, pgprot_t prot)
{
pmd_t pmd;
pmd_val(pmd) = (page_to_pfn(page) << PFN_PTE_SHIFT) | pgprot_val(prot);
return pmd;
}
void set_pmd_at(struct mm_struct *mm, unsigned long addr,
pmd_t *pmdp, pmd_t pmd)
{
*pmdp = pmd;
}
void __init pagetable_init(void)
{
unsigned long vaddr;
pgd_t *pgd_base;
/* Initialize the entire pgd. */
pgd_init(swapper_pg_dir);
#ifndef __PAGETABLE_PUD_FOLDED
pud_init(invalid_pud_table);
#endif
#ifndef __PAGETABLE_PMD_FOLDED
pmd_init(invalid_pmd_table);
#endif
pgd_base = swapper_pg_dir;
/*
* Fixed mappings:
*/
vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
fixrange_init(vaddr, vaddr + FIXADDR_SIZE, pgd_base);
}
| linux-master | arch/mips/mm/pgtable-64.c |
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <linux/export.h>
#include <linux/mm.h>
#include <linux/string.h>
#include <asm/pgalloc.h>
pgd_t *pgd_alloc(struct mm_struct *mm)
{
pgd_t *init, *ret = NULL;
struct ptdesc *ptdesc = pagetable_alloc(GFP_KERNEL & ~__GFP_HIGHMEM,
PGD_TABLE_ORDER);
if (ptdesc) {
ret = ptdesc_address(ptdesc);
init = pgd_offset(&init_mm, 0UL);
pgd_init(ret);
memcpy(ret + USER_PTRS_PER_PGD, init + USER_PTRS_PER_PGD,
(PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
}
return ret;
}
EXPORT_SYMBOL_GPL(pgd_alloc);
| linux-master | arch/mips/mm/pgtable.c |
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1996 David S. Miller ([email protected])
* Copyright (C) 1997, 1998, 1999, 2000 Ralf Baechle [email protected]
* Carsten Langgaard, [email protected]
* Copyright (C) 2002 MIPS Technologies, Inc. All rights reserved.
*/
#include <linux/cpu_pm.h>
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/smp.h>
#include <linux/mm.h>
#include <linux/hugetlb.h>
#include <linux/export.h>
#include <asm/cpu.h>
#include <asm/cpu-type.h>
#include <asm/bootinfo.h>
#include <asm/hazards.h>
#include <asm/mmu_context.h>
#include <asm/tlb.h>
#include <asm/tlbmisc.h>
extern void build_tlb_refill_handler(void);
/*
* LOONGSON-2 has a 4 entry itlb which is a subset of jtlb, LOONGSON-3 has
* a 4 entry itlb and a 4 entry dtlb which are subsets of jtlb. Unfortunately,
* itlb/dtlb are not totally transparent to software.
*/
static inline void flush_micro_tlb(void)
{
switch (current_cpu_type()) {
case CPU_LOONGSON2EF:
write_c0_diag(LOONGSON_DIAG_ITLB);
break;
case CPU_LOONGSON64:
write_c0_diag(LOONGSON_DIAG_ITLB | LOONGSON_DIAG_DTLB);
break;
default:
break;
}
}
static inline void flush_micro_tlb_vm(struct vm_area_struct *vma)
{
if (vma->vm_flags & VM_EXEC)
flush_micro_tlb();
}
void local_flush_tlb_all(void)
{
unsigned long flags;
unsigned long old_ctx;
int entry, ftlbhighset;
local_irq_save(flags);
/* Save old context and create impossible VPN2 value */
old_ctx = read_c0_entryhi();
htw_stop();
write_c0_entrylo0(0);
write_c0_entrylo1(0);
entry = num_wired_entries();
/*
* Blast 'em all away.
* If there are any wired entries, fall back to iterating
*/
if (cpu_has_tlbinv && !entry) {
if (current_cpu_data.tlbsizevtlb) {
write_c0_index(0);
mtc0_tlbw_hazard();
tlbinvf(); /* invalidate VTLB */
}
ftlbhighset = current_cpu_data.tlbsizevtlb +
current_cpu_data.tlbsizeftlbsets;
for (entry = current_cpu_data.tlbsizevtlb;
entry < ftlbhighset;
entry++) {
write_c0_index(entry);
mtc0_tlbw_hazard();
tlbinvf(); /* invalidate one FTLB set */
}
} else {
while (entry < current_cpu_data.tlbsize) {
/* Make sure all entries differ. */
write_c0_entryhi(UNIQUE_ENTRYHI(entry));
write_c0_index(entry);
mtc0_tlbw_hazard();
tlb_write_indexed();
entry++;
}
}
tlbw_use_hazard();
write_c0_entryhi(old_ctx);
htw_start();
flush_micro_tlb();
local_irq_restore(flags);
}
EXPORT_SYMBOL(local_flush_tlb_all);
void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
unsigned long end)
{
struct mm_struct *mm = vma->vm_mm;
int cpu = smp_processor_id();
if (cpu_context(cpu, mm) != 0) {
unsigned long size, flags;
local_irq_save(flags);
start = round_down(start, PAGE_SIZE << 1);
end = round_up(end, PAGE_SIZE << 1);
size = (end - start) >> (PAGE_SHIFT + 1);
if (size <= (current_cpu_data.tlbsizeftlbsets ?
current_cpu_data.tlbsize / 8 :
current_cpu_data.tlbsize / 2)) {
unsigned long old_entryhi, old_mmid;
int newpid = cpu_asid(cpu, mm);
old_entryhi = read_c0_entryhi();
if (cpu_has_mmid) {
old_mmid = read_c0_memorymapid();
write_c0_memorymapid(newpid);
}
htw_stop();
while (start < end) {
int idx;
if (cpu_has_mmid)
write_c0_entryhi(start);
else
write_c0_entryhi(start | newpid);
start += (PAGE_SIZE << 1);
mtc0_tlbw_hazard();
tlb_probe();
tlb_probe_hazard();
idx = read_c0_index();
write_c0_entrylo0(0);
write_c0_entrylo1(0);
if (idx < 0)
continue;
/* Make sure all entries differ. */
write_c0_entryhi(UNIQUE_ENTRYHI(idx));
mtc0_tlbw_hazard();
tlb_write_indexed();
}
tlbw_use_hazard();
write_c0_entryhi(old_entryhi);
if (cpu_has_mmid)
write_c0_memorymapid(old_mmid);
htw_start();
} else {
drop_mmu_context(mm);
}
flush_micro_tlb();
local_irq_restore(flags);
}
}
void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
{
unsigned long size, flags;
local_irq_save(flags);
size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
size = (size + 1) >> 1;
if (size <= (current_cpu_data.tlbsizeftlbsets ?
current_cpu_data.tlbsize / 8 :
current_cpu_data.tlbsize / 2)) {
int pid = read_c0_entryhi();
start &= (PAGE_MASK << 1);
end += ((PAGE_SIZE << 1) - 1);
end &= (PAGE_MASK << 1);
htw_stop();
while (start < end) {
int idx;
write_c0_entryhi(start);
start += (PAGE_SIZE << 1);
mtc0_tlbw_hazard();
tlb_probe();
tlb_probe_hazard();
idx = read_c0_index();
write_c0_entrylo0(0);
write_c0_entrylo1(0);
if (idx < 0)
continue;
/* Make sure all entries differ. */
write_c0_entryhi(UNIQUE_ENTRYHI(idx));
mtc0_tlbw_hazard();
tlb_write_indexed();
}
tlbw_use_hazard();
write_c0_entryhi(pid);
htw_start();
} else {
local_flush_tlb_all();
}
flush_micro_tlb();
local_irq_restore(flags);
}
void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
{
int cpu = smp_processor_id();
if (cpu_context(cpu, vma->vm_mm) != 0) {
unsigned long old_mmid;
unsigned long flags, old_entryhi;
int idx;
page &= (PAGE_MASK << 1);
local_irq_save(flags);
old_entryhi = read_c0_entryhi();
htw_stop();
if (cpu_has_mmid) {
old_mmid = read_c0_memorymapid();
write_c0_entryhi(page);
write_c0_memorymapid(cpu_asid(cpu, vma->vm_mm));
} else {
write_c0_entryhi(page | cpu_asid(cpu, vma->vm_mm));
}
mtc0_tlbw_hazard();
tlb_probe();
tlb_probe_hazard();
idx = read_c0_index();
write_c0_entrylo0(0);
write_c0_entrylo1(0);
if (idx < 0)
goto finish;
/* Make sure all entries differ. */
write_c0_entryhi(UNIQUE_ENTRYHI(idx));
mtc0_tlbw_hazard();
tlb_write_indexed();
tlbw_use_hazard();
finish:
write_c0_entryhi(old_entryhi);
if (cpu_has_mmid)
write_c0_memorymapid(old_mmid);
htw_start();
flush_micro_tlb_vm(vma);
local_irq_restore(flags);
}
}
/*
* This one is only used for pages with the global bit set so we don't care
* much about the ASID.
*/
void local_flush_tlb_one(unsigned long page)
{
unsigned long flags;
int oldpid, idx;
local_irq_save(flags);
oldpid = read_c0_entryhi();
htw_stop();
page &= (PAGE_MASK << 1);
write_c0_entryhi(page);
mtc0_tlbw_hazard();
tlb_probe();
tlb_probe_hazard();
idx = read_c0_index();
write_c0_entrylo0(0);
write_c0_entrylo1(0);
if (idx >= 0) {
/* Make sure all entries differ. */
write_c0_entryhi(UNIQUE_ENTRYHI(idx));
mtc0_tlbw_hazard();
tlb_write_indexed();
tlbw_use_hazard();
}
write_c0_entryhi(oldpid);
htw_start();
flush_micro_tlb();
local_irq_restore(flags);
}
/*
* We will need multiple versions of update_mmu_cache(), one that just
* updates the TLB with the new pte(s), and another which also checks
* for the R4k "end of page" hardware bug and does the needy.
*/
void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
{
unsigned long flags;
pgd_t *pgdp;
p4d_t *p4dp;
pud_t *pudp;
pmd_t *pmdp;
pte_t *ptep, *ptemap = NULL;
int idx, pid;
/*
* Handle debugger faulting in for debugee.
*/
if (current->active_mm != vma->vm_mm)
return;
local_irq_save(flags);
htw_stop();
address &= (PAGE_MASK << 1);
if (cpu_has_mmid) {
write_c0_entryhi(address);
} else {
pid = read_c0_entryhi() & cpu_asid_mask(¤t_cpu_data);
write_c0_entryhi(address | pid);
}
pgdp = pgd_offset(vma->vm_mm, address);
mtc0_tlbw_hazard();
tlb_probe();
tlb_probe_hazard();
p4dp = p4d_offset(pgdp, address);
pudp = pud_offset(p4dp, address);
pmdp = pmd_offset(pudp, address);
idx = read_c0_index();
#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
/* this could be a huge page */
if (pmd_huge(*pmdp)) {
unsigned long lo;
write_c0_pagemask(PM_HUGE_MASK);
ptep = (pte_t *)pmdp;
lo = pte_to_entrylo(pte_val(*ptep));
write_c0_entrylo0(lo);
write_c0_entrylo1(lo + (HPAGE_SIZE >> 7));
mtc0_tlbw_hazard();
if (idx < 0)
tlb_write_random();
else
tlb_write_indexed();
tlbw_use_hazard();
write_c0_pagemask(PM_DEFAULT_MASK);
} else
#endif
{
ptemap = ptep = pte_offset_map(pmdp, address);
/*
* update_mmu_cache() is called between pte_offset_map_lock()
* and pte_unmap_unlock(), so we can assume that ptep is not
* NULL here: and what should be done below if it were NULL?
*/
#if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
#ifdef CONFIG_XPA
write_c0_entrylo0(pte_to_entrylo(ptep->pte_high));
if (cpu_has_xpa)
writex_c0_entrylo0(ptep->pte_low & _PFNX_MASK);
ptep++;
write_c0_entrylo1(pte_to_entrylo(ptep->pte_high));
if (cpu_has_xpa)
writex_c0_entrylo1(ptep->pte_low & _PFNX_MASK);
#else
write_c0_entrylo0(ptep->pte_high);
ptep++;
write_c0_entrylo1(ptep->pte_high);
#endif
#else
write_c0_entrylo0(pte_to_entrylo(pte_val(*ptep++)));
write_c0_entrylo1(pte_to_entrylo(pte_val(*ptep)));
#endif
mtc0_tlbw_hazard();
if (idx < 0)
tlb_write_random();
else
tlb_write_indexed();
}
tlbw_use_hazard();
htw_start();
flush_micro_tlb_vm(vma);
if (ptemap)
pte_unmap(ptemap);
local_irq_restore(flags);
}
void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,
unsigned long entryhi, unsigned long pagemask)
{
#ifdef CONFIG_XPA
panic("Broken for XPA kernels");
#else
unsigned int old_mmid;
unsigned long flags;
unsigned long wired;
unsigned long old_pagemask;
unsigned long old_ctx;
local_irq_save(flags);
if (cpu_has_mmid) {
old_mmid = read_c0_memorymapid();
write_c0_memorymapid(MMID_KERNEL_WIRED);
}
/* Save old context and create impossible VPN2 value */
old_ctx = read_c0_entryhi();
htw_stop();
old_pagemask = read_c0_pagemask();
wired = num_wired_entries();
write_c0_wired(wired + 1);
write_c0_index(wired);
tlbw_use_hazard(); /* What is the hazard here? */
write_c0_pagemask(pagemask);
write_c0_entryhi(entryhi);
write_c0_entrylo0(entrylo0);
write_c0_entrylo1(entrylo1);
mtc0_tlbw_hazard();
tlb_write_indexed();
tlbw_use_hazard();
write_c0_entryhi(old_ctx);
if (cpu_has_mmid)
write_c0_memorymapid(old_mmid);
tlbw_use_hazard(); /* What is the hazard here? */
htw_start();
write_c0_pagemask(old_pagemask);
local_flush_tlb_all();
local_irq_restore(flags);
#endif
}
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
int has_transparent_hugepage(void)
{
static unsigned int mask = -1;
if (mask == -1) { /* first call comes during __init */
unsigned long flags;
local_irq_save(flags);
write_c0_pagemask(PM_HUGE_MASK);
back_to_back_c0_hazard();
mask = read_c0_pagemask();
write_c0_pagemask(PM_DEFAULT_MASK);
local_irq_restore(flags);
}
return mask == PM_HUGE_MASK;
}
EXPORT_SYMBOL(has_transparent_hugepage);
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
/*
* Used for loading TLB entries before trap_init() has started, when we
* don't actually want to add a wired entry which remains throughout the
* lifetime of the system
*/
int temp_tlb_entry;
__init int add_temporary_entry(unsigned long entrylo0, unsigned long entrylo1,
unsigned long entryhi, unsigned long pagemask)
{
int ret = 0;
unsigned long flags;
unsigned long wired;
unsigned long old_pagemask;
unsigned long old_ctx;
local_irq_save(flags);
/* Save old context and create impossible VPN2 value */
htw_stop();
old_ctx = read_c0_entryhi();
old_pagemask = read_c0_pagemask();
wired = num_wired_entries();
if (--temp_tlb_entry < wired) {
printk(KERN_WARNING
"No TLB space left for add_temporary_entry\n");
ret = -ENOSPC;
goto out;
}
write_c0_index(temp_tlb_entry);
write_c0_pagemask(pagemask);
write_c0_entryhi(entryhi);
write_c0_entrylo0(entrylo0);
write_c0_entrylo1(entrylo1);
mtc0_tlbw_hazard();
tlb_write_indexed();
tlbw_use_hazard();
write_c0_entryhi(old_ctx);
write_c0_pagemask(old_pagemask);
htw_start();
out:
local_irq_restore(flags);
return ret;
}
static int ntlb;
static int __init set_ntlb(char *str)
{
get_option(&str, &ntlb);
return 1;
}
__setup("ntlb=", set_ntlb);
/*
* Configure TLB (for init or after a CPU has been powered off).
*/
static void r4k_tlb_configure(void)
{
/*
* You should never change this register:
* - On R4600 1.7 the tlbp never hits for pages smaller than
* the value in the c0_pagemask register.
* - The entire mm handling assumes the c0_pagemask register to
* be set to fixed-size pages.
*/
write_c0_pagemask(PM_DEFAULT_MASK);
back_to_back_c0_hazard();
if (read_c0_pagemask() != PM_DEFAULT_MASK)
panic("MMU doesn't support PAGE_SIZE=0x%lx", PAGE_SIZE);
write_c0_wired(0);
if (current_cpu_type() == CPU_R10000 ||
current_cpu_type() == CPU_R12000 ||
current_cpu_type() == CPU_R14000 ||
current_cpu_type() == CPU_R16000)
write_c0_framemask(0);
if (cpu_has_rixi) {
/*
* Enable the no read, no exec bits, and enable large physical
* address.
*/
#ifdef CONFIG_64BIT
set_c0_pagegrain(PG_RIE | PG_XIE | PG_ELPA);
#else
set_c0_pagegrain(PG_RIE | PG_XIE);
#endif
}
temp_tlb_entry = current_cpu_data.tlbsize - 1;
/* From this point on the ARC firmware is dead. */
local_flush_tlb_all();
/* Did I tell you that ARC SUCKS? */
}
void tlb_init(void)
{
r4k_tlb_configure();
if (ntlb) {
if (ntlb > 1 && ntlb <= current_cpu_data.tlbsize) {
int wired = current_cpu_data.tlbsize - ntlb;
write_c0_wired(wired);
write_c0_index(wired-1);
printk("Restricting TLB to %d entries\n", ntlb);
} else
printk("Ignoring invalid argument ntlb=%d\n", ntlb);
}
build_tlb_refill_handler();
}
static int r4k_tlb_pm_notifier(struct notifier_block *self, unsigned long cmd,
void *v)
{
switch (cmd) {
case CPU_PM_ENTER_FAILED:
case CPU_PM_EXIT:
r4k_tlb_configure();
break;
}
return NOTIFY_OK;
}
static struct notifier_block r4k_tlb_pm_notifier_block = {
.notifier_call = r4k_tlb_pm_notifier,
};
static int __init r4k_tlb_init_pm(void)
{
return cpu_pm_register_notifier(&r4k_tlb_pm_notifier_block);
}
arch_initcall(r4k_tlb_init_pm);
| linux-master | arch/mips/mm/tlb-r4k.c |
// SPDX-License-Identifier: GPL-2.0
/*
* sc-rm7k.c: RM7000 cache management functions.
*
* Copyright (C) 1997, 2001, 2003, 2004 Ralf Baechle ([email protected])
*/
#undef DEBUG
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/bitops.h>
#include <asm/addrspace.h>
#include <asm/bcache.h>
#include <asm/cacheops.h>
#include <asm/mipsregs.h>
#include <asm/processor.h>
#include <asm/sections.h>
#include <asm/cacheflush.h> /* for run_uncached() */
/* Primary cache parameters. */
#define sc_lsize 32
#define tc_pagesize (32*128)
/* Secondary cache parameters. */
#define scache_size (256*1024) /* Fixed to 256KiB on RM7000 */
/* Tertiary cache parameters */
#define tc_lsize 32
extern unsigned long icache_way_size, dcache_way_size;
static unsigned long tcache_size;
#include <asm/r4kcache.h>
static int rm7k_tcache_init;
/*
* Writeback and invalidate the primary cache dcache before DMA.
* (XXX These need to be fixed ...)
*/
static void rm7k_sc_wback_inv(unsigned long addr, unsigned long size)
{
unsigned long end, a;
pr_debug("rm7k_sc_wback_inv[%08lx,%08lx]", addr, size);
/* Catch bad driver code */
BUG_ON(size == 0);
blast_scache_range(addr, addr + size);
if (!rm7k_tcache_init)
return;
a = addr & ~(tc_pagesize - 1);
end = (addr + size - 1) & ~(tc_pagesize - 1);
while(1) {
invalidate_tcache_page(a); /* Page_Invalidate_T */
if (a == end)
break;
a += tc_pagesize;
}
}
static void rm7k_sc_inv(unsigned long addr, unsigned long size)
{
unsigned long end, a;
pr_debug("rm7k_sc_inv[%08lx,%08lx]", addr, size);
/* Catch bad driver code */
BUG_ON(size == 0);
blast_inv_scache_range(addr, addr + size);
if (!rm7k_tcache_init)
return;
a = addr & ~(tc_pagesize - 1);
end = (addr + size - 1) & ~(tc_pagesize - 1);
while(1) {
invalidate_tcache_page(a); /* Page_Invalidate_T */
if (a == end)
break;
a += tc_pagesize;
}
}
static void blast_rm7k_tcache(void)
{
unsigned long start = CKSEG0ADDR(0);
unsigned long end = start + tcache_size;
write_c0_taglo(0);
while (start < end) {
cache_op(Page_Invalidate_T, start);
start += tc_pagesize;
}
}
/*
* This function is executed in uncached address space.
*/
static void __rm7k_tc_enable(void)
{
int i;
set_c0_config(RM7K_CONF_TE);
write_c0_taglo(0);
write_c0_taghi(0);
for (i = 0; i < tcache_size; i += tc_lsize)
cache_op(Index_Store_Tag_T, CKSEG0ADDR(i));
}
static void rm7k_tc_enable(void)
{
if (read_c0_config() & RM7K_CONF_TE)
return;
BUG_ON(tcache_size == 0);
run_uncached(__rm7k_tc_enable);
}
/*
* This function is executed in uncached address space.
*/
static void __rm7k_sc_enable(void)
{
int i;
set_c0_config(RM7K_CONF_SE);
write_c0_taglo(0);
write_c0_taghi(0);
for (i = 0; i < scache_size; i += sc_lsize)
cache_op(Index_Store_Tag_SD, CKSEG0ADDR(i));
}
static void rm7k_sc_enable(void)
{
if (read_c0_config() & RM7K_CONF_SE)
return;
pr_info("Enabling secondary cache...\n");
run_uncached(__rm7k_sc_enable);
if (rm7k_tcache_init)
rm7k_tc_enable();
}
static void rm7k_tc_disable(void)
{
unsigned long flags;
local_irq_save(flags);
blast_rm7k_tcache();
clear_c0_config(RM7K_CONF_TE);
local_irq_restore(flags);
}
static void rm7k_sc_disable(void)
{
clear_c0_config(RM7K_CONF_SE);
if (rm7k_tcache_init)
rm7k_tc_disable();
}
static struct bcache_ops rm7k_sc_ops = {
.bc_enable = rm7k_sc_enable,
.bc_disable = rm7k_sc_disable,
.bc_wback_inv = rm7k_sc_wback_inv,
.bc_inv = rm7k_sc_inv
};
/*
* This is a probing function like the one found in c-r4k.c, we look for the
* wrap around point with different addresses.
*/
static void __probe_tcache(void)
{
unsigned long flags, addr, begin, end, pow2;
begin = (unsigned long) &_stext;
begin &= ~((8 * 1024 * 1024) - 1);
end = begin + (8 * 1024 * 1024);
local_irq_save(flags);
set_c0_config(RM7K_CONF_TE);
/* Fill size-multiple lines with a valid tag */
pow2 = (256 * 1024);
for (addr = begin; addr <= end; addr = (begin + pow2)) {
unsigned long *p = (unsigned long *) addr;
__asm__ __volatile__("nop" : : "r" (*p));
pow2 <<= 1;
}
/* Load first line with a 0 tag, to check after */
write_c0_taglo(0);
write_c0_taghi(0);
cache_op(Index_Store_Tag_T, begin);
/* Look for the wrap-around */
pow2 = (512 * 1024);
for (addr = begin + (512 * 1024); addr <= end; addr = begin + pow2) {
cache_op(Index_Load_Tag_T, addr);
if (!read_c0_taglo())
break;
pow2 <<= 1;
}
addr -= begin;
tcache_size = addr;
clear_c0_config(RM7K_CONF_TE);
local_irq_restore(flags);
}
void rm7k_sc_init(void)
{
struct cpuinfo_mips *c = ¤t_cpu_data;
unsigned int config = read_c0_config();
if ((config & RM7K_CONF_SC))
return;
c->scache.linesz = sc_lsize;
c->scache.ways = 4;
c->scache.waybit= __ffs(scache_size / c->scache.ways);
c->scache.waysize = scache_size / c->scache.ways;
c->scache.sets = scache_size / (c->scache.linesz * c->scache.ways);
printk(KERN_INFO "Secondary cache size %dK, linesize %d bytes.\n",
(scache_size >> 10), sc_lsize);
if (!(config & RM7K_CONF_SE))
rm7k_sc_enable();
bcops = &rm7k_sc_ops;
/*
* While we're at it let's deal with the tertiary cache.
*/
rm7k_tcache_init = 0;
tcache_size = 0;
if (config & RM7K_CONF_TC)
return;
/*
* No efficient way to ask the hardware for the size of the tcache,
* so must probe for it.
*/
run_uncached(__probe_tcache);
rm7k_tc_enable();
rm7k_tcache_init = 1;
c->tcache.linesz = tc_lsize;
c->tcache.ways = 1;
pr_info("Tertiary cache size %ldK.\n", (tcache_size >> 10));
}
| linux-master | arch/mips/mm/sc-rm7k.c |
// SPDX-License-Identifier: GPL-2.0
/*
* r2300.c: R2000 and R3000 specific mmu/cache code.
*
* Copyright (C) 1996 David S. Miller ([email protected])
*
* with a lot of changes to make this thing work for R3000s
* Tx39XX R4k style caches added. HK
* Copyright (C) 1998, 1999, 2000 Harald Koerfgen
* Copyright (C) 1998 Gleb Raiko & Vladimir Roganov
* Copyright (C) 2001, 2004, 2007 Maciej W. Rozycki
*/
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/smp.h>
#include <linux/mm.h>
#include <asm/page.h>
#include <asm/mmu_context.h>
#include <asm/isadep.h>
#include <asm/io.h>
#include <asm/bootinfo.h>
#include <asm/cpu.h>
static unsigned long icache_size, dcache_size; /* Size in bytes */
static unsigned long icache_lsize, dcache_lsize; /* Size in bytes */
unsigned long r3k_cache_size(unsigned long ca_flags)
{
unsigned long flags, status, dummy, size;
volatile unsigned long *p;
p = (volatile unsigned long *) KSEG0;
flags = read_c0_status();
/* isolate cache space */
write_c0_status((ca_flags|flags)&~ST0_IEC);
*p = 0xa5a55a5a;
dummy = *p;
status = read_c0_status();
if (dummy != 0xa5a55a5a || (status & ST0_CM)) {
size = 0;
} else {
for (size = 128; size <= 0x40000; size <<= 1)
*(p + size) = 0;
*p = -1;
for (size = 128;
(size <= 0x40000) && (*(p + size) == 0);
size <<= 1)
;
if (size > 0x40000)
size = 0;
}
write_c0_status(flags);
return size * sizeof(*p);
}
unsigned long r3k_cache_lsize(unsigned long ca_flags)
{
unsigned long flags, status, lsize, i;
volatile unsigned long *p;
p = (volatile unsigned long *) KSEG0;
flags = read_c0_status();
/* isolate cache space */
write_c0_status((ca_flags|flags)&~ST0_IEC);
for (i = 0; i < 128; i++)
*(p + i) = 0;
*(volatile unsigned char *)p = 0;
for (lsize = 1; lsize < 128; lsize <<= 1) {
*(p + lsize);
status = read_c0_status();
if (!(status & ST0_CM))
break;
}
for (i = 0; i < 128; i += lsize)
*(volatile unsigned char *)(p + i) = 0;
write_c0_status(flags);
return lsize * sizeof(*p);
}
static void r3k_probe_cache(void)
{
dcache_size = r3k_cache_size(ST0_ISC);
if (dcache_size)
dcache_lsize = r3k_cache_lsize(ST0_ISC);
icache_size = r3k_cache_size(ST0_ISC|ST0_SWC);
if (icache_size)
icache_lsize = r3k_cache_lsize(ST0_ISC|ST0_SWC);
}
static void r3k_flush_icache_range(unsigned long start, unsigned long end)
{
unsigned long size, i, flags;
volatile unsigned char *p;
size = end - start;
if (size > icache_size || KSEGX(start) != KSEG0) {
start = KSEG0;
size = icache_size;
}
p = (char *)start;
flags = read_c0_status();
/* isolate cache space */
write_c0_status((ST0_ISC|ST0_SWC|flags)&~ST0_IEC);
for (i = 0; i < size; i += 0x080) {
asm( "sb\t$0, 0x000(%0)\n\t"
"sb\t$0, 0x004(%0)\n\t"
"sb\t$0, 0x008(%0)\n\t"
"sb\t$0, 0x00c(%0)\n\t"
"sb\t$0, 0x010(%0)\n\t"
"sb\t$0, 0x014(%0)\n\t"
"sb\t$0, 0x018(%0)\n\t"
"sb\t$0, 0x01c(%0)\n\t"
"sb\t$0, 0x020(%0)\n\t"
"sb\t$0, 0x024(%0)\n\t"
"sb\t$0, 0x028(%0)\n\t"
"sb\t$0, 0x02c(%0)\n\t"
"sb\t$0, 0x030(%0)\n\t"
"sb\t$0, 0x034(%0)\n\t"
"sb\t$0, 0x038(%0)\n\t"
"sb\t$0, 0x03c(%0)\n\t"
"sb\t$0, 0x040(%0)\n\t"
"sb\t$0, 0x044(%0)\n\t"
"sb\t$0, 0x048(%0)\n\t"
"sb\t$0, 0x04c(%0)\n\t"
"sb\t$0, 0x050(%0)\n\t"
"sb\t$0, 0x054(%0)\n\t"
"sb\t$0, 0x058(%0)\n\t"
"sb\t$0, 0x05c(%0)\n\t"
"sb\t$0, 0x060(%0)\n\t"
"sb\t$0, 0x064(%0)\n\t"
"sb\t$0, 0x068(%0)\n\t"
"sb\t$0, 0x06c(%0)\n\t"
"sb\t$0, 0x070(%0)\n\t"
"sb\t$0, 0x074(%0)\n\t"
"sb\t$0, 0x078(%0)\n\t"
"sb\t$0, 0x07c(%0)\n\t"
: : "r" (p) );
p += 0x080;
}
write_c0_status(flags);
}
static void r3k_flush_dcache_range(unsigned long start, unsigned long end)
{
unsigned long size, i, flags;
volatile unsigned char *p;
size = end - start;
if (size > dcache_size || KSEGX(start) != KSEG0) {
start = KSEG0;
size = dcache_size;
}
p = (char *)start;
flags = read_c0_status();
/* isolate cache space */
write_c0_status((ST0_ISC|flags)&~ST0_IEC);
for (i = 0; i < size; i += 0x080) {
asm( "sb\t$0, 0x000(%0)\n\t"
"sb\t$0, 0x004(%0)\n\t"
"sb\t$0, 0x008(%0)\n\t"
"sb\t$0, 0x00c(%0)\n\t"
"sb\t$0, 0x010(%0)\n\t"
"sb\t$0, 0x014(%0)\n\t"
"sb\t$0, 0x018(%0)\n\t"
"sb\t$0, 0x01c(%0)\n\t"
"sb\t$0, 0x020(%0)\n\t"
"sb\t$0, 0x024(%0)\n\t"
"sb\t$0, 0x028(%0)\n\t"
"sb\t$0, 0x02c(%0)\n\t"
"sb\t$0, 0x030(%0)\n\t"
"sb\t$0, 0x034(%0)\n\t"
"sb\t$0, 0x038(%0)\n\t"
"sb\t$0, 0x03c(%0)\n\t"
"sb\t$0, 0x040(%0)\n\t"
"sb\t$0, 0x044(%0)\n\t"
"sb\t$0, 0x048(%0)\n\t"
"sb\t$0, 0x04c(%0)\n\t"
"sb\t$0, 0x050(%0)\n\t"
"sb\t$0, 0x054(%0)\n\t"
"sb\t$0, 0x058(%0)\n\t"
"sb\t$0, 0x05c(%0)\n\t"
"sb\t$0, 0x060(%0)\n\t"
"sb\t$0, 0x064(%0)\n\t"
"sb\t$0, 0x068(%0)\n\t"
"sb\t$0, 0x06c(%0)\n\t"
"sb\t$0, 0x070(%0)\n\t"
"sb\t$0, 0x074(%0)\n\t"
"sb\t$0, 0x078(%0)\n\t"
"sb\t$0, 0x07c(%0)\n\t"
: : "r" (p) );
p += 0x080;
}
write_c0_status(flags);
}
static inline void r3k_flush_cache_all(void)
{
}
static inline void r3k___flush_cache_all(void)
{
r3k_flush_dcache_range(KSEG0, KSEG0 + dcache_size);
r3k_flush_icache_range(KSEG0, KSEG0 + icache_size);
}
static void r3k_flush_cache_mm(struct mm_struct *mm)
{
}
static void r3k_flush_cache_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end)
{
}
static void r3k_flush_cache_page(struct vm_area_struct *vma,
unsigned long addr, unsigned long pfn)
{
unsigned long kaddr = KSEG0ADDR(pfn << PAGE_SHIFT);
int exec = vma->vm_flags & VM_EXEC;
struct mm_struct *mm = vma->vm_mm;
pmd_t *pmdp;
pte_t *ptep;
pr_debug("cpage[%08llx,%08lx]\n",
cpu_context(smp_processor_id(), mm), addr);
/* No ASID => no such page in the cache. */
if (cpu_context(smp_processor_id(), mm) == 0)
return;
pmdp = pmd_off(mm, addr);
ptep = pte_offset_kernel(pmdp, addr);
/* Invalid => no such page in the cache. */
if (!(pte_val(*ptep) & _PAGE_PRESENT))
return;
r3k_flush_dcache_range(kaddr, kaddr + PAGE_SIZE);
if (exec)
r3k_flush_icache_range(kaddr, kaddr + PAGE_SIZE);
}
static void r3k_flush_data_cache_page(unsigned long addr)
{
}
static void r3k_flush_kernel_vmap_range(unsigned long vaddr, int size)
{
BUG();
}
static void r3k_dma_cache_wback_inv(unsigned long start, unsigned long size)
{
/* Catch bad driver code */
BUG_ON(size == 0);
iob();
r3k_flush_dcache_range(start, start + size);
}
void r3k_cache_init(void)
{
extern void build_clear_page(void);
extern void build_copy_page(void);
r3k_probe_cache();
flush_cache_all = r3k_flush_cache_all;
__flush_cache_all = r3k___flush_cache_all;
flush_cache_mm = r3k_flush_cache_mm;
flush_cache_range = r3k_flush_cache_range;
flush_cache_page = r3k_flush_cache_page;
flush_icache_range = r3k_flush_icache_range;
local_flush_icache_range = r3k_flush_icache_range;
__flush_icache_user_range = r3k_flush_icache_range;
__local_flush_icache_user_range = r3k_flush_icache_range;
__flush_kernel_vmap_range = r3k_flush_kernel_vmap_range;
flush_data_cache_page = r3k_flush_data_cache_page;
_dma_cache_wback_inv = r3k_dma_cache_wback_inv;
_dma_cache_wback = r3k_dma_cache_wback_inv;
_dma_cache_inv = r3k_dma_cache_wback_inv;
pr_info("Primary instruction cache %ldkB, linesize %ld bytes.\n",
icache_size >> 10, icache_lsize);
pr_info("Primary data cache %ldkB, linesize %ld bytes.\n",
dcache_size >> 10, dcache_lsize);
build_clear_page();
build_copy_page();
}
| linux-master | arch/mips/mm/c-r3k.c |
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1994 - 2003, 06, 07 by Ralf Baechle ([email protected])
* Copyright (C) 2007 MIPS Technologies, Inc.
*/
#include <linux/fs.h>
#include <linux/fcntl.h>
#include <linux/kernel.h>
#include <linux/linkage.h>
#include <linux/export.h>
#include <linux/sched.h>
#include <linux/syscalls.h>
#include <linux/mm.h>
#include <linux/highmem.h>
#include <linux/pagemap.h>
#include <asm/bcache.h>
#include <asm/cacheflush.h>
#include <asm/processor.h>
#include <asm/cpu.h>
#include <asm/cpu-features.h>
#include <asm/setup.h>
#include <asm/pgtable.h>
/* Cache operations. */
void (*flush_cache_all)(void);
void (*__flush_cache_all)(void);
EXPORT_SYMBOL_GPL(__flush_cache_all);
void (*flush_cache_mm)(struct mm_struct *mm);
void (*flush_cache_range)(struct vm_area_struct *vma, unsigned long start,
unsigned long end);
void (*flush_cache_page)(struct vm_area_struct *vma, unsigned long page,
unsigned long pfn);
void (*flush_icache_range)(unsigned long start, unsigned long end);
EXPORT_SYMBOL_GPL(flush_icache_range);
void (*local_flush_icache_range)(unsigned long start, unsigned long end);
EXPORT_SYMBOL_GPL(local_flush_icache_range);
void (*__flush_icache_user_range)(unsigned long start, unsigned long end);
void (*__local_flush_icache_user_range)(unsigned long start, unsigned long end);
EXPORT_SYMBOL_GPL(__local_flush_icache_user_range);
void (*__flush_cache_vmap)(void);
void (*__flush_cache_vunmap)(void);
void (*__flush_kernel_vmap_range)(unsigned long vaddr, int size);
EXPORT_SYMBOL_GPL(__flush_kernel_vmap_range);
/* MIPS specific cache operations */
void (*flush_data_cache_page)(unsigned long addr);
void (*flush_icache_all)(void);
EXPORT_SYMBOL(flush_data_cache_page);
EXPORT_SYMBOL(flush_icache_all);
/*
* Dummy cache handling routine
*/
void cache_noop(void) {}
#ifdef CONFIG_BOARD_SCACHE
static struct bcache_ops no_sc_ops = {
.bc_enable = (void *)cache_noop,
.bc_disable = (void *)cache_noop,
.bc_wback_inv = (void *)cache_noop,
.bc_inv = (void *)cache_noop
};
struct bcache_ops *bcops = &no_sc_ops;
#endif
#ifdef CONFIG_DMA_NONCOHERENT
/* DMA cache operations. */
void (*_dma_cache_wback_inv)(unsigned long start, unsigned long size);
void (*_dma_cache_wback)(unsigned long start, unsigned long size);
void (*_dma_cache_inv)(unsigned long start, unsigned long size);
#endif /* CONFIG_DMA_NONCOHERENT */
/*
* We could optimize the case where the cache argument is not BCACHE but
* that seems very atypical use ...
*/
SYSCALL_DEFINE3(cacheflush, unsigned long, addr, unsigned long, bytes,
unsigned int, cache)
{
if (bytes == 0)
return 0;
if (!access_ok((void __user *) addr, bytes))
return -EFAULT;
__flush_icache_user_range(addr, addr + bytes);
return 0;
}
void __flush_dcache_pages(struct page *page, unsigned int nr)
{
struct folio *folio = page_folio(page);
struct address_space *mapping = folio_flush_mapping(folio);
unsigned long addr;
unsigned int i;
if (mapping && !mapping_mapped(mapping)) {
folio_set_dcache_dirty(folio);
return;
}
/*
* We could delay the flush for the !page_mapping case too. But that
* case is for exec env/arg pages and those are %99 certainly going to
* get faulted into the tlb (and thus flushed) anyways.
*/
for (i = 0; i < nr; i++) {
addr = (unsigned long)kmap_local_page(page + i);
flush_data_cache_page(addr);
kunmap_local((void *)addr);
}
}
EXPORT_SYMBOL(__flush_dcache_pages);
void __flush_anon_page(struct page *page, unsigned long vmaddr)
{
unsigned long addr = (unsigned long) page_address(page);
struct folio *folio = page_folio(page);
if (pages_do_alias(addr, vmaddr)) {
if (folio_mapped(folio) && !folio_test_dcache_dirty(folio)) {
void *kaddr;
kaddr = kmap_coherent(page, vmaddr);
flush_data_cache_page((unsigned long)kaddr);
kunmap_coherent();
} else
flush_data_cache_page(addr);
}
}
EXPORT_SYMBOL(__flush_anon_page);
void __update_cache(unsigned long address, pte_t pte)
{
struct folio *folio;
unsigned long pfn, addr;
int exec = !pte_no_exec(pte) && !cpu_has_ic_fills_f_dc;
unsigned int i;
pfn = pte_pfn(pte);
if (unlikely(!pfn_valid(pfn)))
return;
folio = page_folio(pfn_to_page(pfn));
address &= PAGE_MASK;
address -= offset_in_folio(folio, pfn << PAGE_SHIFT);
if (folio_test_dcache_dirty(folio)) {
for (i = 0; i < folio_nr_pages(folio); i++) {
addr = (unsigned long)kmap_local_folio(folio, i);
if (exec || pages_do_alias(addr, address))
flush_data_cache_page(addr);
kunmap_local((void *)addr);
address += PAGE_SIZE;
}
folio_clear_dcache_dirty(folio);
}
}
unsigned long _page_cachable_default;
EXPORT_SYMBOL(_page_cachable_default);
#define PM(p) __pgprot(_page_cachable_default | (p))
static pgprot_t protection_map[16] __ro_after_init;
DECLARE_VM_GET_PAGE_PROT
static inline void setup_protection_map(void)
{
protection_map[0] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
protection_map[1] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC);
protection_map[2] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
protection_map[3] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC);
protection_map[4] = PM(_PAGE_PRESENT);
protection_map[5] = PM(_PAGE_PRESENT);
protection_map[6] = PM(_PAGE_PRESENT);
protection_map[7] = PM(_PAGE_PRESENT);
protection_map[8] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
protection_map[9] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC);
protection_map[10] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE |
_PAGE_NO_READ);
protection_map[11] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE);
protection_map[12] = PM(_PAGE_PRESENT);
protection_map[13] = PM(_PAGE_PRESENT);
protection_map[14] = PM(_PAGE_PRESENT | _PAGE_WRITE);
protection_map[15] = PM(_PAGE_PRESENT | _PAGE_WRITE);
}
#undef PM
void cpu_cache_init(void)
{
if (cpu_has_3k_cache) {
extern void __weak r3k_cache_init(void);
r3k_cache_init();
}
if (cpu_has_4k_cache) {
extern void __weak r4k_cache_init(void);
r4k_cache_init();
}
if (cpu_has_octeon_cache) {
extern void __weak octeon_cache_init(void);
octeon_cache_init();
}
setup_protection_map();
}
| linux-master | arch/mips/mm/cache.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/atomic.h>
#include <linux/mmu_context.h>
#include <linux/percpu.h>
#include <linux/spinlock.h>
static DEFINE_RAW_SPINLOCK(cpu_mmid_lock);
static atomic64_t mmid_version;
static unsigned int num_mmids;
static unsigned long *mmid_map;
static DEFINE_PER_CPU(u64, reserved_mmids);
static cpumask_t tlb_flush_pending;
static bool asid_versions_eq(int cpu, u64 a, u64 b)
{
return ((a ^ b) & asid_version_mask(cpu)) == 0;
}
void get_new_mmu_context(struct mm_struct *mm)
{
unsigned int cpu;
u64 asid;
/*
* This function is specific to ASIDs, and should not be called when
* MMIDs are in use.
*/
if (WARN_ON(IS_ENABLED(CONFIG_DEBUG_VM) && cpu_has_mmid))
return;
cpu = smp_processor_id();
asid = asid_cache(cpu);
if (!((asid += cpu_asid_inc()) & cpu_asid_mask(&cpu_data[cpu]))) {
if (cpu_has_vtag_icache)
flush_icache_all();
local_flush_tlb_all(); /* start new asid cycle */
}
set_cpu_context(cpu, mm, asid);
asid_cache(cpu) = asid;
}
EXPORT_SYMBOL_GPL(get_new_mmu_context);
void check_mmu_context(struct mm_struct *mm)
{
unsigned int cpu = smp_processor_id();
/*
* This function is specific to ASIDs, and should not be called when
* MMIDs are in use.
*/
if (WARN_ON(IS_ENABLED(CONFIG_DEBUG_VM) && cpu_has_mmid))
return;
/* Check if our ASID is of an older version and thus invalid */
if (!asid_versions_eq(cpu, cpu_context(cpu, mm), asid_cache(cpu)))
get_new_mmu_context(mm);
}
EXPORT_SYMBOL_GPL(check_mmu_context);
static void flush_context(void)
{
u64 mmid;
int cpu;
/* Update the list of reserved MMIDs and the MMID bitmap */
bitmap_zero(mmid_map, num_mmids);
/* Reserve an MMID for kmap/wired entries */
__set_bit(MMID_KERNEL_WIRED, mmid_map);
for_each_possible_cpu(cpu) {
mmid = xchg_relaxed(&cpu_data[cpu].asid_cache, 0);
/*
* If this CPU has already been through a
* rollover, but hasn't run another task in
* the meantime, we must preserve its reserved
* MMID, as this is the only trace we have of
* the process it is still running.
*/
if (mmid == 0)
mmid = per_cpu(reserved_mmids, cpu);
__set_bit(mmid & cpu_asid_mask(&cpu_data[cpu]), mmid_map);
per_cpu(reserved_mmids, cpu) = mmid;
}
/*
* Queue a TLB invalidation for each CPU to perform on next
* context-switch
*/
cpumask_setall(&tlb_flush_pending);
}
static bool check_update_reserved_mmid(u64 mmid, u64 newmmid)
{
bool hit;
int cpu;
/*
* Iterate over the set of reserved MMIDs looking for a match.
* If we find one, then we can update our mm to use newmmid
* (i.e. the same MMID in the current generation) but we can't
* exit the loop early, since we need to ensure that all copies
* of the old MMID are updated to reflect the mm. Failure to do
* so could result in us missing the reserved MMID in a future
* generation.
*/
hit = false;
for_each_possible_cpu(cpu) {
if (per_cpu(reserved_mmids, cpu) == mmid) {
hit = true;
per_cpu(reserved_mmids, cpu) = newmmid;
}
}
return hit;
}
static u64 get_new_mmid(struct mm_struct *mm)
{
static u32 cur_idx = MMID_KERNEL_WIRED + 1;
u64 mmid, version, mmid_mask;
mmid = cpu_context(0, mm);
version = atomic64_read(&mmid_version);
mmid_mask = cpu_asid_mask(&boot_cpu_data);
if (!asid_versions_eq(0, mmid, 0)) {
u64 newmmid = version | (mmid & mmid_mask);
/*
* If our current MMID was active during a rollover, we
* can continue to use it and this was just a false alarm.
*/
if (check_update_reserved_mmid(mmid, newmmid)) {
mmid = newmmid;
goto set_context;
}
/*
* We had a valid MMID in a previous life, so try to re-use
* it if possible.
*/
if (!__test_and_set_bit(mmid & mmid_mask, mmid_map)) {
mmid = newmmid;
goto set_context;
}
}
/* Allocate a free MMID */
mmid = find_next_zero_bit(mmid_map, num_mmids, cur_idx);
if (mmid != num_mmids)
goto reserve_mmid;
/* We're out of MMIDs, so increment the global version */
version = atomic64_add_return_relaxed(asid_first_version(0),
&mmid_version);
/* Note currently active MMIDs & mark TLBs as requiring flushes */
flush_context();
/* We have more MMIDs than CPUs, so this will always succeed */
mmid = find_first_zero_bit(mmid_map, num_mmids);
reserve_mmid:
__set_bit(mmid, mmid_map);
cur_idx = mmid;
mmid |= version;
set_context:
set_cpu_context(0, mm, mmid);
return mmid;
}
void check_switch_mmu_context(struct mm_struct *mm)
{
unsigned int cpu = smp_processor_id();
u64 ctx, old_active_mmid;
unsigned long flags;
if (!cpu_has_mmid) {
check_mmu_context(mm);
write_c0_entryhi(cpu_asid(cpu, mm));
goto setup_pgd;
}
/*
* MMID switch fast-path, to avoid acquiring cpu_mmid_lock when it's
* unnecessary.
*
* The memory ordering here is subtle. If our active_mmids is non-zero
* and the MMID matches the current version, then we update the CPU's
* asid_cache with a relaxed cmpxchg. Racing with a concurrent rollover
* means that either:
*
* - We get a zero back from the cmpxchg and end up waiting on
* cpu_mmid_lock in check_mmu_context(). Taking the lock synchronises
* with the rollover and so we are forced to see the updated
* generation.
*
* - We get a valid MMID back from the cmpxchg, which means the
* relaxed xchg in flush_context will treat us as reserved
* because atomic RmWs are totally ordered for a given location.
*/
ctx = cpu_context(cpu, mm);
old_active_mmid = READ_ONCE(cpu_data[cpu].asid_cache);
if (!old_active_mmid ||
!asid_versions_eq(cpu, ctx, atomic64_read(&mmid_version)) ||
!cmpxchg_relaxed(&cpu_data[cpu].asid_cache, old_active_mmid, ctx)) {
raw_spin_lock_irqsave(&cpu_mmid_lock, flags);
ctx = cpu_context(cpu, mm);
if (!asid_versions_eq(cpu, ctx, atomic64_read(&mmid_version)))
ctx = get_new_mmid(mm);
WRITE_ONCE(cpu_data[cpu].asid_cache, ctx);
raw_spin_unlock_irqrestore(&cpu_mmid_lock, flags);
}
/*
* Invalidate the local TLB if needed. Note that we must only clear our
* bit in tlb_flush_pending after this is complete, so that the
* cpu_has_shared_ftlb_entries case below isn't misled.
*/
if (cpumask_test_cpu(cpu, &tlb_flush_pending)) {
if (cpu_has_vtag_icache)
flush_icache_all();
local_flush_tlb_all();
cpumask_clear_cpu(cpu, &tlb_flush_pending);
}
write_c0_memorymapid(ctx & cpu_asid_mask(&boot_cpu_data));
/*
* If this CPU shares FTLB entries with its siblings and one or more of
* those siblings hasn't yet invalidated its TLB following a version
* increase then we need to invalidate any TLB entries for our MMID
* that we might otherwise pick up from a sibling.
*
* We ifdef on CONFIG_SMP because cpu_sibling_map isn't defined in
* CONFIG_SMP=n kernels.
*/
#ifdef CONFIG_SMP
if (cpu_has_shared_ftlb_entries &&
cpumask_intersects(&tlb_flush_pending, &cpu_sibling_map[cpu])) {
/* Ensure we operate on the new MMID */
mtc0_tlbw_hazard();
/*
* Invalidate all TLB entries associated with the new
* MMID, and wait for the invalidation to complete.
*/
ginvt_mmid();
sync_ginv();
}
#endif
setup_pgd:
TLBMISS_HANDLER_SETUP_PGD(mm->pgd);
}
EXPORT_SYMBOL_GPL(check_switch_mmu_context);
static int mmid_init(void)
{
if (!cpu_has_mmid)
return 0;
/*
* Expect allocation after rollover to fail if we don't have at least
* one more MMID than CPUs.
*/
num_mmids = asid_first_version(0);
WARN_ON(num_mmids <= num_possible_cpus());
atomic64_set(&mmid_version, asid_first_version(0));
mmid_map = bitmap_zalloc(num_mmids, GFP_KERNEL);
if (!mmid_map)
panic("Failed to allocate bitmap for %u MMIDs\n", num_mmids);
/* Reserve an MMID for kmap/wired entries */
__set_bit(MMID_KERNEL_WIRED, mmid_map);
pr_info("MMID allocator initialised with %u entries\n", num_mmids);
return 0;
}
early_initcall(mmid_init);
| linux-master | arch/mips/mm/context.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/compiler.h>
#include <linux/init.h>
#include <linux/export.h>
#include <linux/highmem.h>
#include <linux/sched.h>
#include <linux/smp.h>
#include <asm/fixmap.h>
#include <asm/tlbflush.h>
unsigned long highstart_pfn, highend_pfn;
void kmap_flush_tlb(unsigned long addr)
{
flush_tlb_one(addr);
}
EXPORT_SYMBOL(kmap_flush_tlb);
| linux-master | arch/mips/mm/highmem.c |
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1995 - 2000 by Ralf Baechle
*/
#include <linux/context_tracking.h>
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/ptrace.h>
#include <linux/ratelimit.h>
#include <linux/mman.h>
#include <linux/mm.h>
#include <linux/smp.h>
#include <linux/kprobes.h>
#include <linux/perf_event.h>
#include <linux/uaccess.h>
#include <asm/branch.h>
#include <asm/mmu_context.h>
#include <asm/ptrace.h>
#include <asm/highmem.h> /* For VMALLOC_END */
#include <linux/kdebug.h>
int show_unhandled_signals = 1;
/*
* This routine handles page faults. It determines the address,
* and the problem, and then passes it off to one of the appropriate
* routines.
*/
static void __do_page_fault(struct pt_regs *regs, unsigned long write,
unsigned long address)
{
struct vm_area_struct * vma = NULL;
struct task_struct *tsk = current;
struct mm_struct *mm = tsk->mm;
const int field = sizeof(unsigned long) * 2;
int si_code;
vm_fault_t fault;
unsigned int flags = FAULT_FLAG_DEFAULT;
static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10);
#if 0
printk("Cpu%d[%s:%d:%0*lx:%ld:%0*lx]\n", raw_smp_processor_id(),
current->comm, current->pid, field, address, write,
field, regs->cp0_epc);
#endif
#ifdef CONFIG_KPROBES
/*
* This is to notify the fault handler of the kprobes.
*/
if (notify_die(DIE_PAGE_FAULT, "page fault", regs, -1,
current->thread.trap_nr, SIGSEGV) == NOTIFY_STOP)
return;
#endif
si_code = SEGV_MAPERR;
/*
* We fault-in kernel-space virtual memory on-demand. The
* 'reference' page table is init_mm.pgd.
*
* NOTE! We MUST NOT take any locks for this case. We may
* be in an interrupt or a critical region, and should
* only copy the information from the master page table,
* nothing more.
*/
#ifdef CONFIG_64BIT
# define VMALLOC_FAULT_TARGET no_context
#else
# define VMALLOC_FAULT_TARGET vmalloc_fault
#endif
if (unlikely(address >= VMALLOC_START && address <= VMALLOC_END))
goto VMALLOC_FAULT_TARGET;
#ifdef MODULE_START
if (unlikely(address >= MODULE_START && address < MODULE_END))
goto VMALLOC_FAULT_TARGET;
#endif
/*
* If we're in an interrupt or have no user
* context, we must not take the fault..
*/
if (faulthandler_disabled() || !mm)
goto bad_area_nosemaphore;
if (user_mode(regs))
flags |= FAULT_FLAG_USER;
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
retry:
vma = lock_mm_and_find_vma(mm, address, regs);
if (!vma)
goto bad_area_nosemaphore;
/*
* Ok, we have a good vm_area for this memory access, so
* we can handle it..
*/
si_code = SEGV_ACCERR;
if (write) {
if (!(vma->vm_flags & VM_WRITE))
goto bad_area;
flags |= FAULT_FLAG_WRITE;
} else {
if (cpu_has_rixi) {
if (address == regs->cp0_epc && !(vma->vm_flags & VM_EXEC)) {
#if 0
pr_notice("Cpu%d[%s:%d:%0*lx:%ld:%0*lx] XI violation\n",
raw_smp_processor_id(),
current->comm, current->pid,
field, address, write,
field, regs->cp0_epc);
#endif
goto bad_area;
}
if (!(vma->vm_flags & VM_READ) &&
exception_epc(regs) != address) {
#if 0
pr_notice("Cpu%d[%s:%d:%0*lx:%ld:%0*lx] RI violation\n",
raw_smp_processor_id(),
current->comm, current->pid,
field, address, write,
field, regs->cp0_epc);
#endif
goto bad_area;
}
} else {
if (unlikely(!vma_is_accessible(vma)))
goto bad_area;
}
}
/*
* If for any reason at all we couldn't handle the fault,
* make sure we exit gracefully rather than endlessly redo
* the fault.
*/
fault = handle_mm_fault(vma, address, flags, regs);
if (fault_signal_pending(fault, regs)) {
if (!user_mode(regs))
goto no_context;
return;
}
/* The fault is fully completed (including releasing mmap lock) */
if (fault & VM_FAULT_COMPLETED)
return;
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
goto out_of_memory;
else if (fault & VM_FAULT_SIGSEGV)
goto bad_area;
else if (fault & VM_FAULT_SIGBUS)
goto do_sigbus;
BUG();
}
if (fault & VM_FAULT_RETRY) {
flags |= FAULT_FLAG_TRIED;
/*
* No need to mmap_read_unlock(mm) as we would
* have already released it in __lock_page_or_retry
* in mm/filemap.c.
*/
goto retry;
}
mmap_read_unlock(mm);
return;
/*
* Something tried to access memory that isn't in our memory map..
* Fix it, but check if it's kernel or user first..
*/
bad_area:
mmap_read_unlock(mm);
bad_area_nosemaphore:
/* User mode accesses just cause a SIGSEGV */
if (user_mode(regs)) {
tsk->thread.cp0_badvaddr = address;
tsk->thread.error_code = write;
if (show_unhandled_signals &&
unhandled_signal(tsk, SIGSEGV) &&
__ratelimit(&ratelimit_state)) {
pr_info("do_page_fault(): sending SIGSEGV to %s for invalid %s %0*lx\n",
tsk->comm,
write ? "write access to" : "read access from",
field, address);
pr_info("epc = %0*lx in", field,
(unsigned long) regs->cp0_epc);
print_vma_addr(KERN_CONT " ", regs->cp0_epc);
pr_cont("\n");
pr_info("ra = %0*lx in", field,
(unsigned long) regs->regs[31]);
print_vma_addr(KERN_CONT " ", regs->regs[31]);
pr_cont("\n");
}
current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f;
force_sig_fault(SIGSEGV, si_code, (void __user *)address);
return;
}
no_context:
/* Are we prepared to handle this kernel fault? */
if (fixup_exception(regs)) {
current->thread.cp0_baduaddr = address;
return;
}
/*
* Oops. The kernel tried to access some bad page. We'll have to
* terminate things with extreme prejudice.
*/
bust_spinlocks(1);
printk(KERN_ALERT "CPU %d Unable to handle kernel paging request at "
"virtual address %0*lx, epc == %0*lx, ra == %0*lx\n",
raw_smp_processor_id(), field, address, field, regs->cp0_epc,
field, regs->regs[31]);
die("Oops", regs);
out_of_memory:
/*
* We ran out of memory, call the OOM killer, and return the userspace
* (which will retry the fault, or kill us if we got oom-killed).
*/
mmap_read_unlock(mm);
if (!user_mode(regs))
goto no_context;
pagefault_out_of_memory();
return;
do_sigbus:
mmap_read_unlock(mm);
/* Kernel mode? Handle exceptions or die */
if (!user_mode(regs))
goto no_context;
/*
* Send a sigbus, regardless of whether we were in kernel
* or user mode.
*/
#if 0
printk("do_page_fault() #3: sending SIGBUS to %s for "
"invalid %s\n%0*lx (epc == %0*lx, ra == %0*lx)\n",
tsk->comm,
write ? "write access to" : "read access from",
field, address,
field, (unsigned long) regs->cp0_epc,
field, (unsigned long) regs->regs[31]);
#endif
current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f;
tsk->thread.cp0_badvaddr = address;
force_sig_fault(SIGBUS, BUS_ADRERR, (void __user *)address);
return;
#ifndef CONFIG_64BIT
vmalloc_fault:
{
/*
* Synchronize this task's top level page-table
* with the 'reference' page table.
*
* Do _not_ use "tsk" here. We might be inside
* an interrupt in the middle of a task switch..
*/
int offset = pgd_index(address);
pgd_t *pgd, *pgd_k;
p4d_t *p4d, *p4d_k;
pud_t *pud, *pud_k;
pmd_t *pmd, *pmd_k;
pte_t *pte_k;
pgd = (pgd_t *) pgd_current[raw_smp_processor_id()] + offset;
pgd_k = init_mm.pgd + offset;
if (!pgd_present(*pgd_k))
goto no_context;
set_pgd(pgd, *pgd_k);
p4d = p4d_offset(pgd, address);
p4d_k = p4d_offset(pgd_k, address);
if (!p4d_present(*p4d_k))
goto no_context;
pud = pud_offset(p4d, address);
pud_k = pud_offset(p4d_k, address);
if (!pud_present(*pud_k))
goto no_context;
pmd = pmd_offset(pud, address);
pmd_k = pmd_offset(pud_k, address);
if (!pmd_present(*pmd_k))
goto no_context;
set_pmd(pmd, *pmd_k);
pte_k = pte_offset_kernel(pmd_k, address);
if (!pte_present(*pte_k))
goto no_context;
return;
}
#endif
}
NOKPROBE_SYMBOL(__do_page_fault);
asmlinkage void do_page_fault(struct pt_regs *regs,
unsigned long write, unsigned long address)
{
enum ctx_state prev_state;
prev_state = exception_enter();
__do_page_fault(regs, write, address);
exception_exit(prev_state);
}
NOKPROBE_SYMBOL(do_page_fault);
| linux-master | arch/mips/mm/fault.c |
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2005-2007 Cavium Networks
*/
#include <linux/export.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/smp.h>
#include <linux/mm.h>
#include <linux/bitops.h>
#include <linux/cpu.h>
#include <linux/io.h>
#include <asm/bcache.h>
#include <asm/bootinfo.h>
#include <asm/cacheops.h>
#include <asm/cpu-features.h>
#include <asm/cpu-type.h>
#include <asm/page.h>
#include <asm/r4kcache.h>
#include <asm/traps.h>
#include <asm/mmu_context.h>
#include <asm/octeon/octeon.h>
unsigned long long cache_err_dcache[NR_CPUS];
EXPORT_SYMBOL_GPL(cache_err_dcache);
/*
* Octeon automatically flushes the dcache on tlb changes, so
* from Linux's viewpoint it acts much like a physically
* tagged cache. No flushing is needed
*
*/
static void octeon_flush_data_cache_page(unsigned long addr)
{
/* Nothing to do */
}
static inline void octeon_local_flush_icache(void)
{
asm volatile ("synci 0($0)");
}
/*
* Flush local I-cache for the specified range.
*/
static void local_octeon_flush_icache_range(unsigned long start,
unsigned long end)
{
octeon_local_flush_icache();
}
/**
* octeon_flush_icache_all_cores - Flush caches as necessary for all cores
* affected by a vma. If no vma is supplied, all cores are flushed.
*
* @vma: VMA to flush or NULL to flush all icaches.
*/
static void octeon_flush_icache_all_cores(struct vm_area_struct *vma)
{
extern void octeon_send_ipi_single(int cpu, unsigned int action);
#ifdef CONFIG_SMP
int cpu;
cpumask_t mask;
#endif
mb();
octeon_local_flush_icache();
#ifdef CONFIG_SMP
preempt_disable();
cpu = smp_processor_id();
/*
* If we have a vma structure, we only need to worry about
* cores it has been used on
*/
if (vma)
mask = *mm_cpumask(vma->vm_mm);
else
mask = *cpu_online_mask;
cpumask_clear_cpu(cpu, &mask);
#ifdef CONFIG_CAVIUM_OCTEON_SOC
for_each_cpu(cpu, &mask)
octeon_send_ipi_single(cpu, SMP_ICACHE_FLUSH);
#else
smp_call_function_many(&mask, (smp_call_func_t)octeon_local_flush_icache,
NULL, 1);
#endif
preempt_enable();
#endif
}
/*
* Called to flush the icache on all cores
*/
static void octeon_flush_icache_all(void)
{
octeon_flush_icache_all_cores(NULL);
}
/**
* octeon_flush_cache_mm - flush all memory associated with a memory context.
*
* @mm: Memory context to flush
*/
static void octeon_flush_cache_mm(struct mm_struct *mm)
{
/*
* According to the R4K version of this file, CPUs without
* dcache aliases don't need to do anything here
*/
}
/*
* Flush a range of kernel addresses out of the icache
*
*/
static void octeon_flush_icache_range(unsigned long start, unsigned long end)
{
octeon_flush_icache_all_cores(NULL);
}
/**
* octeon_flush_cache_range - Flush a range out of a vma
*
* @vma: VMA to flush
* @start: beginning address for flush
* @end: ending address for flush
*/
static void octeon_flush_cache_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end)
{
if (vma->vm_flags & VM_EXEC)
octeon_flush_icache_all_cores(vma);
}
/**
* octeon_flush_cache_page - Flush a specific page of a vma
*
* @vma: VMA to flush page for
* @page: Page to flush
* @pfn: Page frame number
*/
static void octeon_flush_cache_page(struct vm_area_struct *vma,
unsigned long page, unsigned long pfn)
{
if (vma->vm_flags & VM_EXEC)
octeon_flush_icache_all_cores(vma);
}
static void octeon_flush_kernel_vmap_range(unsigned long vaddr, int size)
{
BUG();
}
/*
* Probe Octeon's caches
*
*/
static void probe_octeon(void)
{
unsigned long icache_size;
unsigned long dcache_size;
unsigned int config1;
struct cpuinfo_mips *c = ¤t_cpu_data;
int cputype = current_cpu_type();
config1 = read_c0_config1();
switch (cputype) {
case CPU_CAVIUM_OCTEON:
case CPU_CAVIUM_OCTEON_PLUS:
c->icache.linesz = 2 << ((config1 >> 19) & 7);
c->icache.sets = 64 << ((config1 >> 22) & 7);
c->icache.ways = 1 + ((config1 >> 16) & 7);
c->icache.flags |= MIPS_CACHE_VTAG;
icache_size =
c->icache.sets * c->icache.ways * c->icache.linesz;
c->icache.waybit = ffs(icache_size / c->icache.ways) - 1;
c->dcache.linesz = 128;
if (cputype == CPU_CAVIUM_OCTEON_PLUS)
c->dcache.sets = 2; /* CN5XXX has two Dcache sets */
else
c->dcache.sets = 1; /* CN3XXX has one Dcache set */
c->dcache.ways = 64;
dcache_size =
c->dcache.sets * c->dcache.ways * c->dcache.linesz;
c->dcache.waybit = ffs(dcache_size / c->dcache.ways) - 1;
c->options |= MIPS_CPU_PREFETCH;
break;
case CPU_CAVIUM_OCTEON2:
c->icache.linesz = 2 << ((config1 >> 19) & 7);
c->icache.sets = 8;
c->icache.ways = 37;
c->icache.flags |= MIPS_CACHE_VTAG;
icache_size = c->icache.sets * c->icache.ways * c->icache.linesz;
c->dcache.linesz = 128;
c->dcache.ways = 32;
c->dcache.sets = 8;
dcache_size = c->dcache.sets * c->dcache.ways * c->dcache.linesz;
c->options |= MIPS_CPU_PREFETCH;
break;
case CPU_CAVIUM_OCTEON3:
c->icache.linesz = 128;
c->icache.sets = 16;
c->icache.ways = 39;
c->icache.flags |= MIPS_CACHE_VTAG;
icache_size = c->icache.sets * c->icache.ways * c->icache.linesz;
c->dcache.linesz = 128;
c->dcache.ways = 32;
c->dcache.sets = 8;
dcache_size = c->dcache.sets * c->dcache.ways * c->dcache.linesz;
c->options |= MIPS_CPU_PREFETCH;
break;
default:
panic("Unsupported Cavium Networks CPU type");
break;
}
/* compute a couple of other cache variables */
c->icache.waysize = icache_size / c->icache.ways;
c->dcache.waysize = dcache_size / c->dcache.ways;
c->icache.sets = icache_size / (c->icache.linesz * c->icache.ways);
c->dcache.sets = dcache_size / (c->dcache.linesz * c->dcache.ways);
if (smp_processor_id() == 0) {
pr_info("Primary instruction cache %ldkB, %s, %d way, "
"%d sets, linesize %d bytes.\n",
icache_size >> 10,
cpu_has_vtag_icache ?
"virtually tagged" : "physically tagged",
c->icache.ways, c->icache.sets, c->icache.linesz);
pr_info("Primary data cache %ldkB, %d-way, %d sets, "
"linesize %d bytes.\n",
dcache_size >> 10, c->dcache.ways,
c->dcache.sets, c->dcache.linesz);
}
}
static void octeon_cache_error_setup(void)
{
extern char except_vec2_octeon;
set_handler(0x100, &except_vec2_octeon, 0x80);
}
/*
* Setup the Octeon cache flush routines
*
*/
void octeon_cache_init(void)
{
probe_octeon();
shm_align_mask = PAGE_SIZE - 1;
flush_cache_all = octeon_flush_icache_all;
__flush_cache_all = octeon_flush_icache_all;
flush_cache_mm = octeon_flush_cache_mm;
flush_cache_page = octeon_flush_cache_page;
flush_cache_range = octeon_flush_cache_range;
flush_icache_all = octeon_flush_icache_all;
flush_data_cache_page = octeon_flush_data_cache_page;
flush_icache_range = octeon_flush_icache_range;
local_flush_icache_range = local_octeon_flush_icache_range;
__flush_icache_user_range = octeon_flush_icache_range;
__local_flush_icache_user_range = local_octeon_flush_icache_range;
__flush_kernel_vmap_range = octeon_flush_kernel_vmap_range;
build_clear_page();
build_copy_page();
board_cache_error_setup = octeon_cache_error_setup;
}
/*
* Handle a cache error exception
*/
static RAW_NOTIFIER_HEAD(co_cache_error_chain);
int register_co_cache_error_notifier(struct notifier_block *nb)
{
return raw_notifier_chain_register(&co_cache_error_chain, nb);
}
EXPORT_SYMBOL_GPL(register_co_cache_error_notifier);
int unregister_co_cache_error_notifier(struct notifier_block *nb)
{
return raw_notifier_chain_unregister(&co_cache_error_chain, nb);
}
EXPORT_SYMBOL_GPL(unregister_co_cache_error_notifier);
static void co_cache_error_call_notifiers(unsigned long val)
{
int rv = raw_notifier_call_chain(&co_cache_error_chain, val, NULL);
if ((rv & ~NOTIFY_STOP_MASK) != NOTIFY_OK) {
u64 dcache_err;
unsigned long coreid = cvmx_get_core_num();
u64 icache_err = read_octeon_c0_icacheerr();
if (val) {
dcache_err = cache_err_dcache[coreid];
cache_err_dcache[coreid] = 0;
} else {
dcache_err = read_octeon_c0_dcacheerr();
}
pr_err("Core%lu: Cache error exception:\n", coreid);
pr_err("cp0_errorepc == %lx\n", read_c0_errorepc());
if (icache_err & 1) {
pr_err("CacheErr (Icache) == %llx\n",
(unsigned long long)icache_err);
write_octeon_c0_icacheerr(0);
}
if (dcache_err & 1) {
pr_err("CacheErr (Dcache) == %llx\n",
(unsigned long long)dcache_err);
}
}
}
/*
* Called when the exception is recoverable
*/
asmlinkage void cache_parity_error_octeon_recoverable(void)
{
co_cache_error_call_notifiers(0);
}
/*
* Called when the exception is not recoverable
*/
asmlinkage void cache_parity_error_octeon_non_recoverable(void)
{
co_cache_error_call_notifiers(1);
panic("Can't handle cache error: nested exception");
}
| linux-master | arch/mips/mm/c-octeon.c |
// SPDX-License-Identifier: GPL-2.0-only
#include <linux/io.h>
#include <ioremap.h>
void __iomem *ioremap_prot(phys_addr_t offset, unsigned long size,
unsigned long prot_val)
{
unsigned long flags = prot_val & _CACHE_MASK;
u64 base = (flags == _CACHE_UNCACHED ? IO_BASE : UNCAC_BASE);
void __iomem *addr;
addr = plat_ioremap(offset, size, flags);
if (!addr)
addr = (void __iomem *)(unsigned long)(base + offset);
return addr;
}
EXPORT_SYMBOL(ioremap_prot);
void iounmap(const volatile void __iomem *addr)
{
plat_iounmap(addr);
}
EXPORT_SYMBOL(iounmap);
| linux-master | arch/mips/mm/ioremap64.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 1997, 2001 Ralf Baechle ([email protected]),
* derived from r4xx0.c by David S. Miller ([email protected]).
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/mm.h>
#include <asm/mipsregs.h>
#include <asm/bcache.h>
#include <asm/cacheops.h>
#include <asm/page.h>
#include <asm/mmu_context.h>
#include <asm/r4kcache.h>
/* Secondary cache size in bytes, if present. */
static unsigned long scache_size;
#define SC_LINE 32
#define SC_PAGE (128*SC_LINE)
static inline void blast_r5000_scache(void)
{
unsigned long start = INDEX_BASE;
unsigned long end = start + scache_size;
while(start < end) {
cache_op(R5K_Page_Invalidate_S, start);
start += SC_PAGE;
}
}
static void r5k_dma_cache_inv_sc(unsigned long addr, unsigned long size)
{
unsigned long end, a;
/* Catch bad driver code */
BUG_ON(size == 0);
if (size >= scache_size) {
blast_r5000_scache();
return;
}
/* On the R5000 secondary cache we cannot
* invalidate less than a page at a time.
* The secondary cache is physically indexed, write-through.
*/
a = addr & ~(SC_PAGE - 1);
end = (addr + size - 1) & ~(SC_PAGE - 1);
while (a <= end) {
cache_op(R5K_Page_Invalidate_S, a);
a += SC_PAGE;
}
}
static void r5k_sc_enable(void)
{
unsigned long flags;
local_irq_save(flags);
set_c0_config(R5K_CONF_SE);
blast_r5000_scache();
local_irq_restore(flags);
}
static void r5k_sc_disable(void)
{
unsigned long flags;
local_irq_save(flags);
blast_r5000_scache();
clear_c0_config(R5K_CONF_SE);
local_irq_restore(flags);
}
static inline int __init r5k_sc_probe(void)
{
unsigned long config = read_c0_config();
if (config & CONF_SC)
return 0;
scache_size = (512 * 1024) << ((config & R5K_CONF_SS) >> 20);
printk("R5000 SCACHE size %ldkB, linesize 32 bytes.\n",
scache_size >> 10);
return 1;
}
static struct bcache_ops r5k_sc_ops = {
.bc_enable = r5k_sc_enable,
.bc_disable = r5k_sc_disable,
.bc_wback_inv = r5k_dma_cache_inv_sc,
.bc_inv = r5k_dma_cache_inv_sc
};
void r5k_sc_init(void)
{
if (r5k_sc_probe()) {
r5k_sc_enable();
bcops = &r5k_sc_ops;
}
}
| linux-master | arch/mips/mm/sc-r5k.c |
// SPDX-License-Identifier: GPL-2.0
/*
* sc-ip22.c: Indy cache management functions.
*
* Copyright (C) 1997, 2001 Ralf Baechle ([email protected]),
* derived from r4xx0.c by David S. Miller ([email protected]).
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/mm.h>
#include <asm/bcache.h>
#include <asm/page.h>
#include <asm/bootinfo.h>
#include <asm/sgi/ip22.h>
#include <asm/sgi/mc.h>
/* Secondary cache size in bytes, if present. */
static unsigned long scache_size;
#undef DEBUG_CACHE
#define SC_SIZE 0x00080000
#define SC_LINE 32
#define CI_MASK (SC_SIZE - SC_LINE)
#define SC_INDEX(n) ((n) & CI_MASK)
static inline void indy_sc_wipe(unsigned long first, unsigned long last)
{
unsigned long tmp;
__asm__ __volatile__(
" .set push # indy_sc_wipe \n"
" .set noreorder \n"
" .set mips3 \n"
" .set noat \n"
" mfc0 %2, $12 \n"
" li $1, 0x80 # Go 64 bit \n"
" mtc0 $1, $12 \n"
" \n"
" # \n"
" # Open code a dli $1, 0x9000000080000000 \n"
" # \n"
" # Required because binutils 2.25 will happily accept \n"
" # 64 bit instructions in .set mips3 mode but puke on \n"
" # 64 bit constants when generating 32 bit ELF \n"
" # \n"
" lui $1,0x9000 \n"
" dsll $1,$1,0x10 \n"
" ori $1,$1,0x8000 \n"
" dsll $1,$1,0x10 \n"
" \n"
" or %0, $1 # first line to flush \n"
" or %1, $1 # last line to flush \n"
" .set at \n"
" \n"
"1: sw $0, 0(%0) \n"
" bne %0, %1, 1b \n"
" daddu %0, 32 \n"
" \n"
" mtc0 %2, $12 # Back to 32 bit \n"
" nop # pipeline hazard \n"
" nop \n"
" nop \n"
" nop \n"
" .set pop \n"
: "=r" (first), "=r" (last), "=&r" (tmp)
: "0" (first), "1" (last));
}
static void indy_sc_wback_invalidate(unsigned long addr, unsigned long size)
{
unsigned long first_line, last_line;
unsigned long flags;
#ifdef DEBUG_CACHE
printk("indy_sc_wback_invalidate[%08lx,%08lx]", addr, size);
#endif
/* Catch bad driver code */
BUG_ON(size == 0);
/* Which lines to flush? */
first_line = SC_INDEX(addr);
last_line = SC_INDEX(addr + size - 1);
local_irq_save(flags);
if (first_line <= last_line) {
indy_sc_wipe(first_line, last_line);
goto out;
}
indy_sc_wipe(first_line, SC_SIZE - SC_LINE);
indy_sc_wipe(0, last_line);
out:
local_irq_restore(flags);
}
static void indy_sc_enable(void)
{
unsigned long addr, tmp1, tmp2;
/* This is really cool... */
#ifdef DEBUG_CACHE
printk("Enabling R4600 SCACHE\n");
#endif
__asm__ __volatile__(
".set\tpush\n\t"
".set\tnoreorder\n\t"
".set\tmips3\n\t"
"mfc0\t%2, $12\n\t"
"nop; nop; nop; nop;\n\t"
"li\t%1, 0x80\n\t"
"mtc0\t%1, $12\n\t"
"nop; nop; nop; nop;\n\t"
"li\t%0, 0x1\n\t"
"dsll\t%0, 31\n\t"
"lui\t%1, 0x9000\n\t"
"dsll32\t%1, 0\n\t"
"or\t%0, %1, %0\n\t"
"sb\t$0, 0(%0)\n\t"
"mtc0\t$0, $12\n\t"
"nop; nop; nop; nop;\n\t"
"mtc0\t%2, $12\n\t"
"nop; nop; nop; nop;\n\t"
".set\tpop"
: "=r" (tmp1), "=r" (tmp2), "=r" (addr));
}
static void indy_sc_disable(void)
{
unsigned long tmp1, tmp2, tmp3;
#ifdef DEBUG_CACHE
printk("Disabling R4600 SCACHE\n");
#endif
__asm__ __volatile__(
".set\tpush\n\t"
".set\tnoreorder\n\t"
".set\tmips3\n\t"
"li\t%0, 0x1\n\t"
"dsll\t%0, 31\n\t"
"lui\t%1, 0x9000\n\t"
"dsll32\t%1, 0\n\t"
"or\t%0, %1, %0\n\t"
"mfc0\t%2, $12\n\t"
"nop; nop; nop; nop\n\t"
"li\t%1, 0x80\n\t"
"mtc0\t%1, $12\n\t"
"nop; nop; nop; nop\n\t"
"sh\t$0, 0(%0)\n\t"
"mtc0\t$0, $12\n\t"
"nop; nop; nop; nop\n\t"
"mtc0\t%2, $12\n\t"
"nop; nop; nop; nop\n\t"
".set\tpop"
: "=r" (tmp1), "=r" (tmp2), "=r" (tmp3));
}
static inline int __init indy_sc_probe(void)
{
unsigned int size = ip22_eeprom_read(&sgimc->eeprom, 17);
if (size == 0)
return 0;
size <<= PAGE_SHIFT;
printk(KERN_INFO "R4600/R5000 SCACHE size %dK, linesize 32 bytes.\n",
size >> 10);
scache_size = size;
return 1;
}
/* XXX Check with wje if the Indy caches can differentiate between
writeback + invalidate and just invalidate. */
static struct bcache_ops indy_sc_ops = {
.bc_enable = indy_sc_enable,
.bc_disable = indy_sc_disable,
.bc_wback_inv = indy_sc_wback_invalidate,
.bc_inv = indy_sc_wback_invalidate
};
void indy_sc_init(void)
{
if (indy_sc_probe()) {
indy_sc_enable();
bcops = &indy_sc_ops;
}
}
| linux-master | arch/mips/mm/sc-ip22.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Support code for virtual Ranchu board for MIPS.
*
* Author: Miodrag Dinic <[email protected]>
*/
#include <linux/of_address.h>
#include <linux/types.h>
#include <asm/machine.h>
#include <asm/mipsregs.h>
#include <asm/time.h>
#define GOLDFISH_TIMER_LOW 0x00
#define GOLDFISH_TIMER_HIGH 0x04
static __init u64 read_rtc_time(void __iomem *base)
{
u32 time_low;
u32 time_high;
/*
* Reading the low address latches the high value
* as well so there is no fear that we may read
* inaccurate high value.
*/
time_low = readl(base + GOLDFISH_TIMER_LOW);
time_high = readl(base + GOLDFISH_TIMER_HIGH);
return ((u64)time_high << 32) | time_low;
}
static __init unsigned int ranchu_measure_hpt_freq(void)
{
u64 rtc_start, rtc_current, rtc_delta;
unsigned int start, count;
struct device_node *np;
void __iomem *rtc_base;
np = of_find_compatible_node(NULL, NULL, "google,goldfish-rtc");
if (!np)
panic("%s(): Failed to find 'google,goldfish-rtc' dt node!",
__func__);
rtc_base = of_iomap(np, 0);
of_node_put(np);
if (!rtc_base)
panic("%s(): Failed to ioremap Goldfish RTC base!", __func__);
/*
* Poll the nanosecond resolution RTC for one
* second to calibrate the CPU frequency.
*/
rtc_start = read_rtc_time(rtc_base);
start = read_c0_count();
do {
rtc_current = read_rtc_time(rtc_base);
rtc_delta = rtc_current - rtc_start;
} while (rtc_delta < NSEC_PER_SEC);
count = read_c0_count() - start;
/*
* Make sure the frequency will be a round number.
* Without this correction, the returned value may vary
* between subsequent emulation executions.
*
* TODO: Set this value using device tree.
*/
count += 5000;
count -= count % 10000;
iounmap(rtc_base);
return count;
}
static const struct of_device_id ranchu_of_match[] __initconst = {
{
.compatible = "mti,ranchu",
},
{}
};
MIPS_MACHINE(ranchu) = {
.matches = ranchu_of_match,
.measure_hpt_freq = ranchu_measure_hpt_freq,
};
| linux-master | arch/mips/generic/board-ranchu.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2016 Imagination Technologies
* Author: Paul Burton <[email protected]>
*/
#define pr_fmt(fmt) "yamon-dt: " fmt
#include <linux/bug.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/libfdt.h>
#include <linux/printk.h>
#include <asm/fw/fw.h>
#include <asm/yamon-dt.h>
#define MAX_MEM_ARRAY_ENTRIES 2
__init int yamon_dt_append_cmdline(void *fdt)
{
int err, chosen_off;
/* find or add chosen node */
chosen_off = fdt_path_offset(fdt, "/chosen");
if (chosen_off == -FDT_ERR_NOTFOUND)
chosen_off = fdt_add_subnode(fdt, 0, "chosen");
if (chosen_off < 0) {
pr_err("Unable to find or add DT chosen node: %d\n",
chosen_off);
return chosen_off;
}
err = fdt_setprop_string(fdt, chosen_off, "bootargs", fw_getcmdline());
if (err) {
pr_err("Unable to set bootargs property: %d\n", err);
return err;
}
return 0;
}
static unsigned int __init gen_fdt_mem_array(
const struct yamon_mem_region *regions,
__be32 *mem_array,
unsigned int max_entries,
unsigned long memsize)
{
const struct yamon_mem_region *mr;
unsigned long size;
unsigned int entries = 0;
for (mr = regions; mr->size && memsize; ++mr) {
if (entries >= max_entries) {
pr_warn("Number of regions exceeds max %u\n",
max_entries);
break;
}
/* How much of the remaining RAM fits in the next region? */
size = min_t(unsigned long, memsize, mr->size);
memsize -= size;
/* Emit a memory region */
*(mem_array++) = cpu_to_be32(mr->start);
*(mem_array++) = cpu_to_be32(size);
++entries;
/* Discard the next mr->discard bytes */
memsize -= min_t(unsigned long, memsize, mr->discard);
}
return entries;
}
__init int yamon_dt_append_memory(void *fdt,
const struct yamon_mem_region *regions)
{
unsigned long phys_memsize = 0, memsize;
__be32 mem_array[2 * MAX_MEM_ARRAY_ENTRIES];
unsigned int mem_entries;
int i, err, mem_off;
char *var, param_name[10], *var_names[] = {
"ememsize", "memsize",
};
/* find memory size from the bootloader environment */
for (i = 0; i < ARRAY_SIZE(var_names); i++) {
var = fw_getenv(var_names[i]);
if (!var)
continue;
err = kstrtoul(var, 0, &phys_memsize);
if (!err)
break;
pr_warn("Failed to read the '%s' env variable '%s'\n",
var_names[i], var);
}
if (!phys_memsize) {
pr_warn("The bootloader didn't provide memsize: defaulting to 32MB\n");
phys_memsize = 32 << 20;
}
/* default to using all available RAM */
memsize = phys_memsize;
/* allow the user to override the usable memory */
for (i = 0; i < ARRAY_SIZE(var_names); i++) {
snprintf(param_name, sizeof(param_name), "%s=", var_names[i]);
var = strstr(arcs_cmdline, param_name);
if (!var)
continue;
memsize = memparse(var + strlen(param_name), NULL);
}
/* if the user says there's more RAM than we thought, believe them */
phys_memsize = max_t(unsigned long, phys_memsize, memsize);
/* find or add a memory node */
mem_off = fdt_path_offset(fdt, "/memory");
if (mem_off == -FDT_ERR_NOTFOUND)
mem_off = fdt_add_subnode(fdt, 0, "memory");
if (mem_off < 0) {
pr_err("Unable to find or add memory DT node: %d\n", mem_off);
return mem_off;
}
err = fdt_setprop_string(fdt, mem_off, "device_type", "memory");
if (err) {
pr_err("Unable to set memory node device_type: %d\n", err);
return err;
}
mem_entries = gen_fdt_mem_array(regions, mem_array,
MAX_MEM_ARRAY_ENTRIES, phys_memsize);
err = fdt_setprop(fdt, mem_off, "reg",
mem_array, mem_entries * 2 * sizeof(mem_array[0]));
if (err) {
pr_err("Unable to set memory regs property: %d\n", err);
return err;
}
mem_entries = gen_fdt_mem_array(regions, mem_array,
MAX_MEM_ARRAY_ENTRIES, memsize);
err = fdt_setprop(fdt, mem_off, "linux,usable-memory",
mem_array, mem_entries * 2 * sizeof(mem_array[0]));
if (err) {
pr_err("Unable to set linux,usable-memory property: %d\n", err);
return err;
}
return 0;
}
__init int yamon_dt_serial_config(void *fdt)
{
const char *yamontty, *mode_var;
char mode_var_name[9], path[20], parity;
unsigned int uart, baud, stop_bits;
bool hw_flow;
int chosen_off, err;
yamontty = fw_getenv("yamontty");
if (!yamontty || !strcmp(yamontty, "tty0")) {
uart = 0;
} else if (!strcmp(yamontty, "tty1")) {
uart = 1;
} else {
pr_warn("yamontty environment variable '%s' invalid\n",
yamontty);
uart = 0;
}
baud = stop_bits = 0;
parity = 0;
hw_flow = false;
snprintf(mode_var_name, sizeof(mode_var_name), "modetty%u", uart);
mode_var = fw_getenv(mode_var_name);
if (mode_var) {
while (mode_var[0] >= '0' && mode_var[0] <= '9') {
baud *= 10;
baud += mode_var[0] - '0';
mode_var++;
}
if (mode_var[0] == ',')
mode_var++;
if (mode_var[0])
parity = mode_var[0];
if (mode_var[0] == ',')
mode_var++;
if (mode_var[0])
stop_bits = mode_var[0] - '0';
if (mode_var[0] == ',')
mode_var++;
if (!strcmp(mode_var, "hw"))
hw_flow = true;
}
if (!baud)
baud = 38400;
if (parity != 'e' && parity != 'n' && parity != 'o')
parity = 'n';
if (stop_bits != 7 && stop_bits != 8)
stop_bits = 8;
WARN_ON(snprintf(path, sizeof(path), "serial%u:%u%c%u%s",
uart, baud, parity, stop_bits,
hw_flow ? "r" : "") >= sizeof(path));
/* find or add chosen node */
chosen_off = fdt_path_offset(fdt, "/chosen");
if (chosen_off == -FDT_ERR_NOTFOUND)
chosen_off = fdt_add_subnode(fdt, 0, "chosen");
if (chosen_off < 0) {
pr_err("Unable to find or add DT chosen node: %d\n",
chosen_off);
return chosen_off;
}
err = fdt_setprop_string(fdt, chosen_off, "stdout-path", path);
if (err) {
pr_err("Unable to set stdout-path property: %d\n", err);
return err;
}
return 0;
}
| linux-master | arch/mips/generic/yamon-dt.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2016 Imagination Technologies
* Author: Paul Burton <[email protected]>
*/
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/clocksource.h>
#include <linux/init.h>
#include <linux/types.h>
#include <asm/irq.h>
#include <asm/mips-cps.h>
#include <asm/time.h>
int get_c0_fdc_int(void)
{
int mips_cpu_fdc_irq;
if (mips_gic_present())
mips_cpu_fdc_irq = gic_get_c0_fdc_int();
else if (cpu_has_veic)
panic("Unimplemented!");
else if (cp0_fdc_irq >= 0)
mips_cpu_fdc_irq = MIPS_CPU_IRQ_BASE + cp0_fdc_irq;
else
mips_cpu_fdc_irq = -1;
return mips_cpu_fdc_irq;
}
int get_c0_perfcount_int(void)
{
int mips_cpu_perf_irq;
if (mips_gic_present())
mips_cpu_perf_irq = gic_get_c0_perfcount_int();
else if (cpu_has_veic)
panic("Unimplemented!");
else if (cp0_perfcount_irq >= 0)
mips_cpu_perf_irq = MIPS_CPU_IRQ_BASE + cp0_perfcount_irq;
else
mips_cpu_perf_irq = -1;
return mips_cpu_perf_irq;
}
unsigned int get_c0_compare_int(void)
{
int mips_cpu_timer_irq;
if (mips_gic_present())
mips_cpu_timer_irq = gic_get_c0_compare_int();
else if (cpu_has_veic)
panic("Unimplemented!");
else
mips_cpu_timer_irq = MIPS_CPU_IRQ_BASE + cp0_compare_irq;
return mips_cpu_timer_irq;
}
| linux-master | arch/mips/generic/irq.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2016 Imagination Technologies
* Author: Paul Burton <[email protected]>
*/
#include <linux/clk.h>
#include <linux/clocksource.h>
#include <linux/init.h>
#include <linux/irqchip.h>
#include <linux/of_clk.h>
#include <linux/of_fdt.h>
#include <asm/bootinfo.h>
#include <asm/fw/fw.h>
#include <asm/irq_cpu.h>
#include <asm/machine.h>
#include <asm/mips-cps.h>
#include <asm/prom.h>
#include <asm/smp-ops.h>
#include <asm/time.h>
static __initconst const void *fdt;
static __initconst const struct mips_machine *mach;
static __initconst const void *mach_match_data;
void __init prom_init(void)
{
plat_get_fdt();
BUG_ON(!fdt);
}
void __init *plat_get_fdt(void)
{
const struct mips_machine *check_mach;
const struct of_device_id *match;
if (fdt)
/* Already set up */
return (void *)fdt;
fdt = (void *)get_fdt();
if (fdt && !fdt_check_header(fdt)) {
/*
* We have been provided with the appropriate device tree for
* the board. Make use of it & search for any machine struct
* based upon the root compatible string.
*/
for_each_mips_machine(check_mach) {
match = mips_machine_is_compatible(check_mach, fdt);
if (match) {
mach = check_mach;
mach_match_data = match->data;
break;
}
}
} else if (IS_ENABLED(CONFIG_LEGACY_BOARDS)) {
/*
* We weren't booted using the UHI boot protocol, but do
* support some number of boards with legacy boot protocols.
* Attempt to find the right one.
*/
for_each_mips_machine(check_mach) {
if (!check_mach->detect)
continue;
if (!check_mach->detect())
continue;
mach = check_mach;
}
/*
* If we don't recognise the machine then we can't continue, so
* die here.
*/
BUG_ON(!mach);
/* Retrieve the machine's FDT */
fdt = mach->fdt;
}
return (void *)fdt;
}
#ifdef CONFIG_RELOCATABLE
void __init plat_fdt_relocated(void *new_location)
{
/*
* reset fdt as the cached value would point to the location
* before relocations happened and update the location argument
* if it was passed using UHI
*/
fdt = NULL;
if (fw_arg0 == -2)
fw_arg1 = (unsigned long)new_location;
}
#endif /* CONFIG_RELOCATABLE */
void __init plat_mem_setup(void)
{
if (mach && mach->fixup_fdt)
fdt = mach->fixup_fdt(fdt, mach_match_data);
fw_init_cmdline();
__dt_setup_arch((void *)fdt);
}
void __init device_tree_init(void)
{
unflatten_and_copy_device_tree();
mips_cpc_probe();
if (!register_cps_smp_ops())
return;
if (!register_vsmp_smp_ops())
return;
register_up_smp_ops();
}
int __init apply_mips_fdt_fixups(void *fdt_out, size_t fdt_out_size,
const void *fdt_in,
const struct mips_fdt_fixup *fixups)
{
int err;
err = fdt_open_into(fdt_in, fdt_out, fdt_out_size);
if (err) {
pr_err("Failed to open FDT\n");
return err;
}
for (; fixups->apply; fixups++) {
err = fixups->apply(fdt_out);
if (err) {
pr_err("Failed to apply FDT fixup \"%s\"\n",
fixups->description);
return err;
}
}
err = fdt_pack(fdt_out);
if (err)
pr_err("Failed to pack FDT\n");
return err;
}
void __init plat_time_init(void)
{
struct device_node *np;
struct clk *clk;
of_clk_init(NULL);
if (!cpu_has_counter) {
mips_hpt_frequency = 0;
} else if (mach && mach->measure_hpt_freq) {
mips_hpt_frequency = mach->measure_hpt_freq();
} else {
np = of_get_cpu_node(0, NULL);
if (!np) {
pr_err("Failed to get CPU node\n");
return;
}
clk = of_clk_get(np, 0);
if (IS_ERR(clk)) {
pr_err("Failed to get CPU clock: %ld\n", PTR_ERR(clk));
return;
}
mips_hpt_frequency = clk_get_rate(clk);
clk_put(clk);
switch (boot_cpu_type()) {
case CPU_20KC:
case CPU_25KF:
/* The counter runs at the CPU clock rate */
break;
default:
/* The counter runs at half the CPU clock rate */
mips_hpt_frequency /= 2;
break;
}
}
timer_probe();
}
void __init arch_init_irq(void)
{
struct device_node *intc_node;
intc_node = of_find_compatible_node(NULL, NULL,
"mti,cpu-interrupt-controller");
if (!cpu_has_veic && !intc_node)
mips_cpu_irq_init();
of_node_put(intc_node);
irqchip_init();
}
| linux-master | arch/mips/generic/init.c |
// SPDX-License-Identifier: (GPL-2.0 OR MIT)
/*
* Microsemi MIPS SoC support
*
* Copyright (c) 2017 Microsemi Corporation
*/
#include <asm/machine.h>
#include <asm/prom.h>
#define DEVCPU_GCB_CHIP_REGS_CHIP_ID 0x71070000
#define CHIP_ID_PART_ID GENMASK(27, 12)
#define OCELOT_PART_ID (0x7514 << 12)
#define UART_UART 0x70100000
static __init bool ocelot_detect(void)
{
u32 rev;
int idx;
/* Look for the TLB entry set up by redboot before trying to use it */
write_c0_entryhi(DEVCPU_GCB_CHIP_REGS_CHIP_ID);
mtc0_tlbw_hazard();
tlb_probe();
tlb_probe_hazard();
idx = read_c0_index();
if (idx < 0)
return false;
/* A TLB entry exists, lets assume its usable and check the CHIP ID */
rev = __raw_readl((void __iomem *)DEVCPU_GCB_CHIP_REGS_CHIP_ID);
if ((rev & CHIP_ID_PART_ID) != OCELOT_PART_ID)
return false;
/* Copy command line from bootloader early for Initrd detection */
if (fw_arg0 < 10 && (fw_arg1 & 0xFFF00000) == 0x80000000) {
unsigned int prom_argc = fw_arg0;
const char **prom_argv = (const char **)fw_arg1;
if (prom_argc > 1 && strlen(prom_argv[1]) > 0)
/* ignore all built-in args if any f/w args given */
strcpy(arcs_cmdline, prom_argv[1]);
}
return true;
}
static void __init ocelot_earlyprintk_init(void)
{
void __iomem *uart_base;
uart_base = ioremap(UART_UART, 0x20);
setup_8250_early_printk_port((unsigned long)uart_base, 2, 50000);
}
static void __init ocelot_late_init(void)
{
ocelot_earlyprintk_init();
}
static __init const void *ocelot_fixup_fdt(const void *fdt,
const void *match_data)
{
/* This has to be done so late because ioremap needs to work */
late_time_init = ocelot_late_init;
return fdt;
}
extern char __dtb_ocelot_pcb123_begin[];
MIPS_MACHINE(ocelot) = {
.fdt = __dtb_ocelot_pcb123_begin,
.fixup_fdt = ocelot_fixup_fdt,
.detect = ocelot_detect,
};
| linux-master | arch/mips/generic/board-ocelot.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Support for Ingenic SoCs
*
* Copyright (C) 2009-2010, Lars-Peter Clausen <[email protected]>
* Copyright (C) 2011, Maarten ter Huurne <[email protected]>
* Copyright (C) 2020 Paul Cercueil <[email protected]>
*/
#include <linux/clk.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_fdt.h>
#include <linux/pm.h>
#include <linux/sizes.h>
#include <linux/suspend.h>
#include <linux/types.h>
#include <asm/bootinfo.h>
#include <asm/io.h>
#include <asm/machine.h>
#include <asm/reboot.h>
static __init char *ingenic_get_system_type(unsigned long machtype)
{
switch (machtype) {
case MACH_INGENIC_X2100:
return "X2100";
case MACH_INGENIC_X2000H:
return "X2000H";
case MACH_INGENIC_X2000E:
return "X2000E";
case MACH_INGENIC_X2000:
return "X2000";
case MACH_INGENIC_X1830:
return "X1830";
case MACH_INGENIC_X1000E:
return "X1000E";
case MACH_INGENIC_X1000:
return "X1000";
case MACH_INGENIC_JZ4780:
return "JZ4780";
case MACH_INGENIC_JZ4775:
return "JZ4775";
case MACH_INGENIC_JZ4770:
return "JZ4770";
case MACH_INGENIC_JZ4760B:
return "JZ4760B";
case MACH_INGENIC_JZ4760:
return "JZ4760";
case MACH_INGENIC_JZ4755:
return "JZ4755";
case MACH_INGENIC_JZ4750:
return "JZ4750";
case MACH_INGENIC_JZ4725B:
return "JZ4725B";
case MACH_INGENIC_JZ4730:
return "JZ4730";
default:
return "JZ4740";
}
}
#define INGENIC_CGU_BASE 0x10000000
#define JZ4750_CGU_CPCCR_ECS BIT(30)
#define JZ4760_CGU_CPCCR_ECS BIT(31)
static __init void ingenic_force_12M_ext(const void *fdt, unsigned int mask)
{
const __be32 *prop;
unsigned int cpccr;
void __iomem *cgu;
bool use_div;
int offset;
offset = fdt_path_offset(fdt, "/ext");
if (offset < 0)
return;
prop = fdt_getprop(fdt, offset, "clock-frequency", NULL);
if (!prop)
return;
/*
* If the external oscillator is 24 MHz, enable the /2 divider to
* drive it down to 12 MHz, since this is what the hardware can work
* with.
* The 16 MHz cutoff value is arbitrary; setting it to 12 MHz would not
* work as the crystal frequency (as reported in the Device Tree) might
* be slightly above this value.
*/
use_div = be32_to_cpup(prop) >= 16000000;
cgu = ioremap(INGENIC_CGU_BASE, 0x4);
if (!cgu)
return;
cpccr = ioread32(cgu);
if (use_div)
cpccr |= mask;
else
cpccr &= ~mask;
iowrite32(cpccr, cgu);
iounmap(cgu);
}
static __init const void *ingenic_fixup_fdt(const void *fdt, const void *match_data)
{
/*
* Old devicetree files for the qi,lb60 board did not have a /memory
* node. Hardcode the memory info here.
*/
if (!fdt_node_check_compatible(fdt, 0, "qi,lb60") &&
fdt_path_offset(fdt, "/memory") < 0)
early_init_dt_add_memory_arch(0, SZ_32M);
mips_machtype = (unsigned long)match_data;
system_type = ingenic_get_system_type(mips_machtype);
switch (mips_machtype) {
case MACH_INGENIC_JZ4750:
case MACH_INGENIC_JZ4755:
ingenic_force_12M_ext(fdt, JZ4750_CGU_CPCCR_ECS);
break;
case MACH_INGENIC_JZ4760:
ingenic_force_12M_ext(fdt, JZ4760_CGU_CPCCR_ECS);
break;
default:
break;
}
return fdt;
}
static const struct of_device_id ingenic_of_match[] __initconst = {
{ .compatible = "ingenic,jz4730", .data = (void *)MACH_INGENIC_JZ4730 },
{ .compatible = "ingenic,jz4740", .data = (void *)MACH_INGENIC_JZ4740 },
{ .compatible = "ingenic,jz4725b", .data = (void *)MACH_INGENIC_JZ4725B },
{ .compatible = "ingenic,jz4750", .data = (void *)MACH_INGENIC_JZ4750 },
{ .compatible = "ingenic,jz4755", .data = (void *)MACH_INGENIC_JZ4755 },
{ .compatible = "ingenic,jz4760", .data = (void *)MACH_INGENIC_JZ4760 },
{ .compatible = "ingenic,jz4760b", .data = (void *)MACH_INGENIC_JZ4760B },
{ .compatible = "ingenic,jz4770", .data = (void *)MACH_INGENIC_JZ4770 },
{ .compatible = "ingenic,jz4775", .data = (void *)MACH_INGENIC_JZ4775 },
{ .compatible = "ingenic,jz4780", .data = (void *)MACH_INGENIC_JZ4780 },
{ .compatible = "ingenic,x1000", .data = (void *)MACH_INGENIC_X1000 },
{ .compatible = "ingenic,x1000e", .data = (void *)MACH_INGENIC_X1000E },
{ .compatible = "ingenic,x1830", .data = (void *)MACH_INGENIC_X1830 },
{ .compatible = "ingenic,x2000", .data = (void *)MACH_INGENIC_X2000 },
{ .compatible = "ingenic,x2000e", .data = (void *)MACH_INGENIC_X2000E },
{ .compatible = "ingenic,x2000h", .data = (void *)MACH_INGENIC_X2000H },
{ .compatible = "ingenic,x2100", .data = (void *)MACH_INGENIC_X2100 },
{}
};
MIPS_MACHINE(ingenic) = {
.matches = ingenic_of_match,
.fixup_fdt = ingenic_fixup_fdt,
};
static void ingenic_wait_instr(void)
{
__asm__(".set push;\n"
".set mips3;\n"
"wait;\n"
".set pop;\n"
);
}
static void ingenic_halt(void)
{
for (;;)
ingenic_wait_instr();
}
static int ingenic_pm_enter(suspend_state_t state)
{
ingenic_wait_instr();
return 0;
}
static const struct platform_suspend_ops ingenic_pm_ops = {
.valid = suspend_valid_only_mem,
.enter = ingenic_pm_enter,
};
static int __init ingenic_pm_init(void)
{
if (boot_cpu_type() == CPU_XBURST) {
if (IS_ENABLED(CONFIG_PM_SLEEP))
suspend_set_ops(&ingenic_pm_ops);
_machine_halt = ingenic_halt;
}
return 0;
}
late_initcall(ingenic_pm_init);
| linux-master | arch/mips/generic/board-ingenic.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2016 Imagination Technologies
* Author: Paul Burton <[email protected]>
*/
#include <linux/of.h>
#include <asm/bootinfo.h>
char *system_type;
const char *get_system_type(void)
{
const char *str;
int err;
if (system_type)
return system_type;
err = of_property_read_string(of_root, "model", &str);
if (!err)
return str;
err = of_property_read_string_index(of_root, "compatible", 0, &str);
if (!err)
return str;
return "Unknown";
}
| linux-master | arch/mips/generic/proc.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2016 Imagination Technologies
* Author: Paul Burton <[email protected]>
*/
#define pr_fmt(fmt) "sead3: " fmt
#include <linux/errno.h>
#include <linux/libfdt.h>
#include <linux/printk.h>
#include <linux/sizes.h>
#include <asm/fw/fw.h>
#include <asm/io.h>
#include <asm/machine.h>
#include <asm/yamon-dt.h>
#define SEAD_CONFIG CKSEG1ADDR(0x1b100110)
#define SEAD_CONFIG_GIC_PRESENT BIT(1)
#define MIPS_REVISION CKSEG1ADDR(0x1fc00010)
#define MIPS_REVISION_MACHINE (0xf << 4)
#define MIPS_REVISION_MACHINE_SEAD3 (0x4 << 4)
/*
* Maximum 384MB RAM at physical address 0, preceding any I/O.
*/
static struct yamon_mem_region mem_regions[] __initdata = {
/* start size */
{ 0, SZ_256M + SZ_128M },
{}
};
static __init bool sead3_detect(void)
{
uint32_t rev;
rev = __raw_readl((void *)MIPS_REVISION);
return (rev & MIPS_REVISION_MACHINE) == MIPS_REVISION_MACHINE_SEAD3;
}
static __init int append_memory(void *fdt)
{
return yamon_dt_append_memory(fdt, mem_regions);
}
static __init int remove_gic(void *fdt)
{
const unsigned int cpu_ehci_int = 2;
const unsigned int cpu_uart_int = 4;
const unsigned int cpu_eth_int = 6;
int gic_off, cpu_off, uart_off, eth_off, ehci_off, err;
uint32_t cfg, cpu_phandle;
/* leave the GIC node intact if a GIC is present */
cfg = __raw_readl((uint32_t *)SEAD_CONFIG);
if (cfg & SEAD_CONFIG_GIC_PRESENT)
return 0;
gic_off = fdt_node_offset_by_compatible(fdt, -1, "mti,gic");
if (gic_off < 0) {
pr_err("unable to find DT GIC node: %d\n", gic_off);
return gic_off;
}
err = fdt_nop_node(fdt, gic_off);
if (err) {
pr_err("unable to nop GIC node\n");
return err;
}
cpu_off = fdt_node_offset_by_compatible(fdt, -1,
"mti,cpu-interrupt-controller");
if (cpu_off < 0) {
pr_err("unable to find CPU intc node: %d\n", cpu_off);
return cpu_off;
}
cpu_phandle = fdt_get_phandle(fdt, cpu_off);
if (!cpu_phandle) {
pr_err("unable to get CPU intc phandle\n");
return -EINVAL;
}
uart_off = fdt_node_offset_by_compatible(fdt, -1, "ns16550a");
while (uart_off >= 0) {
err = fdt_setprop_u32(fdt, uart_off, "interrupt-parent",
cpu_phandle);
if (err) {
pr_warn("unable to set UART interrupt-parent: %d\n",
err);
return err;
}
err = fdt_setprop_u32(fdt, uart_off, "interrupts",
cpu_uart_int);
if (err) {
pr_err("unable to set UART interrupts property: %d\n",
err);
return err;
}
uart_off = fdt_node_offset_by_compatible(fdt, uart_off,
"ns16550a");
}
if (uart_off != -FDT_ERR_NOTFOUND) {
pr_err("error searching for UART DT node: %d\n", uart_off);
return uart_off;
}
eth_off = fdt_node_offset_by_compatible(fdt, -1, "smsc,lan9115");
if (eth_off < 0) {
pr_err("unable to find ethernet DT node: %d\n", eth_off);
return eth_off;
}
err = fdt_setprop_u32(fdt, eth_off, "interrupt-parent", cpu_phandle);
if (err) {
pr_err("unable to set ethernet interrupt-parent: %d\n", err);
return err;
}
err = fdt_setprop_u32(fdt, eth_off, "interrupts", cpu_eth_int);
if (err) {
pr_err("unable to set ethernet interrupts property: %d\n", err);
return err;
}
ehci_off = fdt_node_offset_by_compatible(fdt, -1, "generic-ehci");
if (ehci_off < 0) {
pr_err("unable to find EHCI DT node: %d\n", ehci_off);
return ehci_off;
}
err = fdt_setprop_u32(fdt, ehci_off, "interrupt-parent", cpu_phandle);
if (err) {
pr_err("unable to set EHCI interrupt-parent: %d\n", err);
return err;
}
err = fdt_setprop_u32(fdt, ehci_off, "interrupts", cpu_ehci_int);
if (err) {
pr_err("unable to set EHCI interrupts property: %d\n", err);
return err;
}
return 0;
}
static const struct mips_fdt_fixup sead3_fdt_fixups[] __initconst = {
{ yamon_dt_append_cmdline, "append command line" },
{ append_memory, "append memory" },
{ remove_gic, "remove GIC when not present" },
{ yamon_dt_serial_config, "append serial configuration" },
{ },
};
static __init const void *sead3_fixup_fdt(const void *fdt,
const void *match_data)
{
static unsigned char fdt_buf[16 << 10] __initdata;
int err;
if (fdt_check_header(fdt))
panic("Corrupt DT");
/* if this isn't SEAD3, something went wrong */
BUG_ON(fdt_node_check_compatible(fdt, 0, "mti,sead-3"));
fw_init_cmdline();
err = apply_mips_fdt_fixups(fdt_buf, sizeof(fdt_buf),
fdt, sead3_fdt_fixups);
if (err)
panic("Unable to fixup FDT: %d", err);
return fdt_buf;
}
static __init unsigned int sead3_measure_hpt_freq(void)
{
void __iomem *status_reg = (void __iomem *)0xbf000410;
unsigned int freq, orig, tick = 0;
unsigned long flags;
local_irq_save(flags);
orig = readl(status_reg) & 0x2; /* get original sample */
/* wait for transition */
while ((readl(status_reg) & 0x2) == orig)
;
orig = orig ^ 0x2; /* flip the bit */
write_c0_count(0);
/* wait 1 second (the sampling clock transitions every 10ms) */
while (tick < 100) {
/* wait for transition */
while ((readl(status_reg) & 0x2) == orig)
;
orig = orig ^ 0x2; /* flip the bit */
tick++;
}
freq = read_c0_count();
local_irq_restore(flags);
return freq;
}
extern char __dtb_sead3_begin[];
MIPS_MACHINE(sead3) = {
.fdt = __dtb_sead3_begin,
.detect = sead3_detect,
.fixup_fdt = sead3_fixup_fdt,
.measure_hpt_freq = sead3_measure_hpt_freq,
};
| linux-master | arch/mips/generic/board-sead3.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2015 Imagination Technologies
* Author: Paul Burton <[email protected]>
*/
#include <linux/bug.h>
#include <linux/kernel.h>
#include <linux/libfdt.h>
#include <linux/of_fdt.h>
#include <linux/sizes.h>
#include <asm/addrspace.h>
#include <asm/bootinfo.h>
#include <asm/fw/fw.h>
#include <asm/mips-boards/generic.h>
#include <asm/mips-boards/malta.h>
#include <asm/mips-cps.h>
#include <asm/page.h>
#define ROCIT_REG_BASE 0x1f403000
#define ROCIT_CONFIG_GEN1 (ROCIT_REG_BASE + 0x04)
#define ROCIT_CONFIG_GEN1_MEMMAP_SHIFT 8
#define ROCIT_CONFIG_GEN1_MEMMAP_MASK (0xf << 8)
static unsigned char fdt_buf[16 << 10] __initdata __aligned(8);
/* determined physical memory size, not overridden by command line args */
extern unsigned long physical_memsize;
enum mem_map {
MEM_MAP_V1 = 0,
MEM_MAP_V2,
};
#define MAX_MEM_ARRAY_ENTRIES 2
static __init int malta_scon(void)
{
int scon = MIPS_REVISION_SCONID;
if (scon != MIPS_REVISION_SCON_OTHER)
return scon;
switch (MIPS_REVISION_CORID) {
case MIPS_REVISION_CORID_QED_RM5261:
case MIPS_REVISION_CORID_CORE_LV:
case MIPS_REVISION_CORID_CORE_FPGA:
case MIPS_REVISION_CORID_CORE_FPGAR2:
return MIPS_REVISION_SCON_GT64120;
case MIPS_REVISION_CORID_CORE_EMUL_BON:
case MIPS_REVISION_CORID_BONITO64:
case MIPS_REVISION_CORID_CORE_20K:
return MIPS_REVISION_SCON_BONITO;
case MIPS_REVISION_CORID_CORE_MSC:
case MIPS_REVISION_CORID_CORE_FPGA2:
case MIPS_REVISION_CORID_CORE_24K:
return MIPS_REVISION_SCON_SOCIT;
case MIPS_REVISION_CORID_CORE_FPGA3:
case MIPS_REVISION_CORID_CORE_FPGA4:
case MIPS_REVISION_CORID_CORE_FPGA5:
case MIPS_REVISION_CORID_CORE_EMUL_MSC:
default:
return MIPS_REVISION_SCON_ROCIT;
}
}
static unsigned __init gen_fdt_mem_array(__be32 *mem_array, unsigned long size,
enum mem_map map)
{
unsigned long size_preio;
unsigned entries;
entries = 1;
mem_array[0] = cpu_to_be32(PHYS_OFFSET);
if (IS_ENABLED(CONFIG_EVA)) {
/*
* The current Malta EVA configuration is "special" in that it
* always makes use of addresses in the upper half of the 32 bit
* physical address map, which gives it a contiguous region of
* DDR but limits it to 2GB.
*/
mem_array[1] = cpu_to_be32(size);
goto done;
}
size_preio = min_t(unsigned long, size, SZ_256M);
mem_array[1] = cpu_to_be32(size_preio);
size -= size_preio;
if (!size)
goto done;
if (map == MEM_MAP_V2) {
/*
* We have a flat 32 bit physical memory map with DDR filling
* all 4GB of the memory map, apart from the I/O region which
* obscures 256MB from 0x10000000-0x1fffffff.
*
* Therefore we discard the 256MB behind the I/O region.
*/
if (size <= SZ_256M)
goto done;
size -= SZ_256M;
/* Make use of the memory following the I/O region */
entries++;
mem_array[2] = cpu_to_be32(PHYS_OFFSET + SZ_512M);
mem_array[3] = cpu_to_be32(size);
} else {
/*
* We have a 32 bit physical memory map with a 2GB DDR region
* aliased in the upper & lower halves of it. The I/O region
* obscures 256MB from 0x10000000-0x1fffffff in the low alias
* but the DDR it obscures is accessible via the high alias.
*
* Simply access everything beyond the lowest 256MB of DDR using
* the high alias.
*/
entries++;
mem_array[2] = cpu_to_be32(PHYS_OFFSET + SZ_2G + SZ_256M);
mem_array[3] = cpu_to_be32(size);
}
done:
BUG_ON(entries > MAX_MEM_ARRAY_ENTRIES);
return entries;
}
static void __init append_memory(void *fdt, int root_off)
{
__be32 mem_array[2 * MAX_MEM_ARRAY_ENTRIES];
unsigned long memsize;
unsigned mem_entries;
int i, err, mem_off;
enum mem_map mem_map;
u32 config;
char *var, param_name[10], *var_names[] = {
"ememsize", "memsize",
};
/* if a memory node already exists, leave it alone */
mem_off = fdt_path_offset(fdt, "/memory");
if (mem_off >= 0)
return;
/* find memory size from the bootloader environment */
for (i = 0; i < ARRAY_SIZE(var_names); i++) {
var = fw_getenv(var_names[i]);
if (!var)
continue;
err = kstrtoul(var, 0, &physical_memsize);
if (!err)
break;
pr_warn("Failed to read the '%s' env variable '%s'\n",
var_names[i], var);
}
if (!physical_memsize) {
pr_warn("The bootloader didn't provide memsize: defaulting to 32MB\n");
physical_memsize = 32 << 20;
}
if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) {
/*
* SOC-it swaps, or perhaps doesn't swap, when DMA'ing
* the last word of physical memory.
*/
physical_memsize -= PAGE_SIZE;
}
/* default to using all available RAM */
memsize = physical_memsize;
/* allow the user to override the usable memory */
for (i = 0; i < ARRAY_SIZE(var_names); i++) {
snprintf(param_name, sizeof(param_name), "%s=", var_names[i]);
var = strstr(arcs_cmdline, param_name);
if (!var)
continue;
memsize = memparse(var + strlen(param_name), NULL);
}
/* if the user says there's more RAM than we thought, believe them */
physical_memsize = max_t(unsigned long, physical_memsize, memsize);
/* detect the memory map in use */
if (malta_scon() == MIPS_REVISION_SCON_ROCIT) {
/* ROCit has a register indicating the memory map in use */
config = readl((void __iomem *)CKSEG1ADDR(ROCIT_CONFIG_GEN1));
mem_map = config & ROCIT_CONFIG_GEN1_MEMMAP_MASK;
mem_map >>= ROCIT_CONFIG_GEN1_MEMMAP_SHIFT;
} else {
/* if not using ROCit, presume the v1 memory map */
mem_map = MEM_MAP_V1;
}
if (mem_map > MEM_MAP_V2)
panic("Unsupported physical memory map v%u detected",
(unsigned int)mem_map);
/* append memory to the DT */
mem_off = fdt_add_subnode(fdt, root_off, "memory");
if (mem_off < 0)
panic("Unable to add memory node to DT: %d", mem_off);
err = fdt_setprop_string(fdt, mem_off, "device_type", "memory");
if (err)
panic("Unable to set memory node device_type: %d", err);
mem_entries = gen_fdt_mem_array(mem_array, physical_memsize, mem_map);
err = fdt_setprop(fdt, mem_off, "reg", mem_array,
mem_entries * 2 * sizeof(mem_array[0]));
if (err)
panic("Unable to set memory regs property: %d", err);
mem_entries = gen_fdt_mem_array(mem_array, memsize, mem_map);
err = fdt_setprop(fdt, mem_off, "linux,usable-memory", mem_array,
mem_entries * 2 * sizeof(mem_array[0]));
if (err)
panic("Unable to set linux,usable-memory property: %d", err);
}
static void __init remove_gic(void *fdt)
{
int err, gic_off, i8259_off, cpu_off;
void __iomem *biu_base;
uint32_t cpu_phandle, sc_cfg;
/* if we have a CM which reports a GIC is present, leave the DT alone */
err = mips_cm_probe();
if (!err && (read_gcr_gic_status() & CM_GCR_GIC_STATUS_EX))
return;
if (malta_scon() == MIPS_REVISION_SCON_ROCIT) {
/*
* On systems using the RocIT system controller a GIC may be
* present without a CM. Detect whether that is the case.
*/
biu_base = ioremap(MSC01_BIU_REG_BASE,
MSC01_BIU_ADDRSPACE_SZ);
sc_cfg = __raw_readl(biu_base + MSC01_SC_CFG_OFS);
if (sc_cfg & MSC01_SC_CFG_GICPRES_MSK) {
/* enable the GIC at the system controller level */
sc_cfg |= BIT(MSC01_SC_CFG_GICENA_SHF);
__raw_writel(sc_cfg, biu_base + MSC01_SC_CFG_OFS);
return;
}
}
gic_off = fdt_node_offset_by_compatible(fdt, -1, "mti,gic");
if (gic_off < 0) {
pr_warn("malta-dtshim: unable to find DT GIC node: %d\n",
gic_off);
return;
}
err = fdt_nop_node(fdt, gic_off);
if (err)
pr_warn("malta-dtshim: unable to nop GIC node\n");
i8259_off = fdt_node_offset_by_compatible(fdt, -1, "intel,i8259");
if (i8259_off < 0) {
pr_warn("malta-dtshim: unable to find DT i8259 node: %d\n",
i8259_off);
return;
}
cpu_off = fdt_node_offset_by_compatible(fdt, -1,
"mti,cpu-interrupt-controller");
if (cpu_off < 0) {
pr_warn("malta-dtshim: unable to find CPU intc node: %d\n",
cpu_off);
return;
}
cpu_phandle = fdt_get_phandle(fdt, cpu_off);
if (!cpu_phandle) {
pr_warn("malta-dtshim: unable to get CPU intc phandle\n");
return;
}
err = fdt_setprop_u32(fdt, i8259_off, "interrupt-parent", cpu_phandle);
if (err) {
pr_warn("malta-dtshim: unable to set i8259 interrupt-parent: %d\n",
err);
return;
}
err = fdt_setprop_u32(fdt, i8259_off, "interrupts", 2);
if (err) {
pr_warn("malta-dtshim: unable to set i8259 interrupts: %d\n",
err);
return;
}
}
void __init *malta_dt_shim(void *fdt)
{
int root_off, len, err;
const char *compat;
if (fdt_check_header(fdt))
panic("Corrupt DT");
err = fdt_open_into(fdt, fdt_buf, sizeof(fdt_buf));
if (err)
panic("Unable to open FDT: %d", err);
root_off = fdt_path_offset(fdt_buf, "/");
if (root_off < 0)
panic("No / node in DT");
compat = fdt_getprop(fdt_buf, root_off, "compatible", &len);
if (!compat)
panic("No root compatible property in DT: %d", len);
/* if this isn't Malta, leave the DT alone */
if (strncmp(compat, "mti,malta", len))
return fdt;
append_memory(fdt_buf, root_off);
remove_gic(fdt_buf);
err = fdt_pack(fdt_buf);
if (err)
panic("Unable to pack FDT: %d\n", err);
return fdt_buf;
}
| linux-master | arch/mips/mti-malta/malta-dtshim.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Carsten Langgaard, [email protected]
* Copyright (C) 1999,2000 MIPS Technologies, Inc. All rights reserved.
*
* Setting up the clock on the MIPS boards.
*/
#include <linux/types.h>
#include <linux/i8253.h>
#include <linux/init.h>
#include <linux/kernel_stat.h>
#include <linux/libfdt.h>
#include <linux/math64.h>
#include <linux/sched.h>
#include <linux/spinlock.h>
#include <linux/interrupt.h>
#include <linux/timex.h>
#include <linux/mc146818rtc.h>
#include <asm/cpu.h>
#include <asm/mipsregs.h>
#include <asm/mipsmtregs.h>
#include <asm/hardirq.h>
#include <asm/irq.h>
#include <asm/div64.h>
#include <asm/setup.h>
#include <asm/time.h>
#include <asm/mc146818-time.h>
#include <asm/msc01_ic.h>
#include <asm/mips-cps.h>
#include <asm/mips-boards/generic.h>
#include <asm/mips-boards/maltaint.h>
static int mips_cpu_timer_irq;
static int mips_cpu_perf_irq;
extern int cp0_perfcount_irq;
static unsigned int gic_frequency;
static void mips_timer_dispatch(void)
{
do_IRQ(mips_cpu_timer_irq);
}
static void mips_perf_dispatch(void)
{
do_IRQ(mips_cpu_perf_irq);
}
static unsigned int freqround(unsigned int freq, unsigned int amount)
{
freq += amount;
freq -= freq % (amount*2);
return freq;
}
/*
* Estimate CPU and GIC frequencies.
*/
static void __init estimate_frequencies(void)
{
unsigned long flags;
unsigned int count, start;
unsigned char secs1, secs2, ctrl;
int secs;
u64 giccount = 0, gicstart = 0;
local_irq_save(flags);
if (mips_gic_present())
clear_gic_config(GIC_CONFIG_COUNTSTOP);
/*
* Read counters exactly on rising edge of update flag.
* This helps get an accurate reading under virtualisation.
*/
while (CMOS_READ(RTC_REG_A) & RTC_UIP);
while (!(CMOS_READ(RTC_REG_A) & RTC_UIP));
start = read_c0_count();
if (mips_gic_present())
gicstart = read_gic_counter();
/* Wait for falling edge before reading RTC. */
while (CMOS_READ(RTC_REG_A) & RTC_UIP);
secs1 = CMOS_READ(RTC_SECONDS);
/* Read counters again exactly on rising edge of update flag. */
while (!(CMOS_READ(RTC_REG_A) & RTC_UIP));
count = read_c0_count();
if (mips_gic_present())
giccount = read_gic_counter();
/* Wait for falling edge before reading RTC again. */
while (CMOS_READ(RTC_REG_A) & RTC_UIP);
secs2 = CMOS_READ(RTC_SECONDS);
ctrl = CMOS_READ(RTC_CONTROL);
local_irq_restore(flags);
if (!(ctrl & RTC_DM_BINARY) || RTC_ALWAYS_BCD) {
secs1 = bcd2bin(secs1);
secs2 = bcd2bin(secs2);
}
secs = secs2 - secs1;
if (secs < 1)
secs += 60;
count -= start;
count /= secs;
mips_hpt_frequency = count;
if (mips_gic_present()) {
giccount = div_u64(giccount - gicstart, secs);
gic_frequency = giccount;
}
}
void read_persistent_clock64(struct timespec64 *ts)
{
ts->tv_sec = mc146818_get_cmos_time();
ts->tv_nsec = 0;
}
int get_c0_fdc_int(void)
{
/*
* Some cores claim the FDC is routable through the GIC, but it doesn't
* actually seem to be connected for those Malta bitstreams.
*/
switch (current_cpu_type()) {
case CPU_INTERAPTIV:
case CPU_PROAPTIV:
return -1;
}
if (cpu_has_veic)
return -1;
else if (mips_gic_present())
return gic_get_c0_fdc_int();
else if (cp0_fdc_irq >= 0)
return MIPS_CPU_IRQ_BASE + cp0_fdc_irq;
else
return -1;
}
int get_c0_perfcount_int(void)
{
if (cpu_has_veic) {
set_vi_handler(MSC01E_INT_PERFCTR, mips_perf_dispatch);
mips_cpu_perf_irq = MSC01E_INT_BASE + MSC01E_INT_PERFCTR;
} else if (mips_gic_present()) {
mips_cpu_perf_irq = gic_get_c0_perfcount_int();
} else if (cp0_perfcount_irq >= 0) {
mips_cpu_perf_irq = MIPS_CPU_IRQ_BASE + cp0_perfcount_irq;
} else {
mips_cpu_perf_irq = -1;
}
return mips_cpu_perf_irq;
}
EXPORT_SYMBOL_GPL(get_c0_perfcount_int);
unsigned int get_c0_compare_int(void)
{
if (cpu_has_veic) {
set_vi_handler(MSC01E_INT_CPUCTR, mips_timer_dispatch);
mips_cpu_timer_irq = MSC01E_INT_BASE + MSC01E_INT_CPUCTR;
} else if (mips_gic_present()) {
mips_cpu_timer_irq = gic_get_c0_compare_int();
} else {
mips_cpu_timer_irq = MIPS_CPU_IRQ_BASE + cp0_compare_irq;
}
return mips_cpu_timer_irq;
}
static void __init init_rtc(void)
{
unsigned char freq, ctrl;
/* Set 32KHz time base if not already set */
freq = CMOS_READ(RTC_FREQ_SELECT);
if ((freq & RTC_DIV_CTL) != RTC_REF_CLCK_32KHZ)
CMOS_WRITE(RTC_REF_CLCK_32KHZ, RTC_FREQ_SELECT);
/* Ensure SET bit is clear so RTC can run */
ctrl = CMOS_READ(RTC_CONTROL);
if (ctrl & RTC_SET)
CMOS_WRITE(ctrl & ~RTC_SET, RTC_CONTROL);
}
#ifdef CONFIG_CLKSRC_MIPS_GIC
static u32 gic_frequency_dt;
static struct property gic_frequency_prop = {
.name = "clock-frequency",
.length = sizeof(u32),
.value = &gic_frequency_dt,
};
static void update_gic_frequency_dt(void)
{
struct device_node *node;
gic_frequency_dt = cpu_to_be32(gic_frequency);
node = of_find_compatible_node(NULL, NULL, "mti,gic-timer");
if (!node) {
pr_err("mti,gic-timer device node not found\n");
return;
}
if (of_update_property(node, &gic_frequency_prop) < 0)
pr_err("error updating gic frequency property\n");
of_node_put(node);
}
#endif
void __init plat_time_init(void)
{
unsigned int prid = read_c0_prid() & (PRID_COMP_MASK | PRID_IMP_MASK);
unsigned int freq;
init_rtc();
estimate_frequencies();
freq = mips_hpt_frequency;
if ((prid != (PRID_COMP_MIPS | PRID_IMP_20KC)) &&
(prid != (PRID_COMP_MIPS | PRID_IMP_25KF)))
freq *= 2;
freq = freqround(freq, 5000);
printk("CPU frequency %d.%02d MHz\n", freq/1000000,
(freq%1000000)*100/1000000);
#ifdef CONFIG_I8253
/* Only Malta has a PIT. */
setup_pit_timer();
#endif
if (mips_gic_present()) {
freq = freqround(gic_frequency, 5000);
printk("GIC frequency %d.%02d MHz\n", freq/1000000,
(freq%1000000)*100/1000000);
#ifdef CONFIG_CLKSRC_MIPS_GIC
update_gic_frequency_dt();
timer_probe();
#endif
}
}
| linux-master | arch/mips/mti-malta/malta-time.c |
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Carsten Langgaard, [email protected]
* Copyright (C) 2000, 2001, 2004 MIPS Technologies, Inc.
* Copyright (C) 2001 Ralf Baechle
* Copyright (C) 2013 Imagination Technologies Ltd.
*
* Routines for generic manipulation of the interrupts found on the MIPS
* Malta board. The interrupt controller is located in the South Bridge
* a PIIX4 device with two internal 82C95 interrupt controllers.
*/
#include <linux/init.h>
#include <linux/irq.h>
#include <linux/irqchip.h>
#include <linux/sched.h>
#include <linux/smp.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/of_irq.h>
#include <linux/kernel_stat.h>
#include <linux/kernel.h>
#include <linux/random.h>
#include <asm/traps.h>
#include <asm/i8259.h>
#include <asm/irq_cpu.h>
#include <asm/irq_regs.h>
#include <asm/mips-boards/malta.h>
#include <asm/mips-boards/maltaint.h>
#include <asm/mips-cps.h>
#include <asm/gt64120.h>
#include <asm/mips-boards/generic.h>
#include <asm/mips-boards/msc01_pci.h>
#include <asm/msc01_ic.h>
#include <asm/setup.h>
#include <asm/rtlx.h>
static inline int mips_pcibios_iack(void)
{
int irq;
/*
* Determine highest priority pending interrupt by performing
* a PCI Interrupt Acknowledge cycle.
*/
switch (mips_revision_sconid) {
case MIPS_REVISION_SCON_SOCIT:
case MIPS_REVISION_SCON_ROCIT:
case MIPS_REVISION_SCON_SOCITSC:
case MIPS_REVISION_SCON_SOCITSCP:
MSC_READ(MSC01_PCI_IACK, irq);
irq &= 0xff;
break;
case MIPS_REVISION_SCON_GT64120:
irq = GT_READ(GT_PCI0_IACK_OFS);
irq &= 0xff;
break;
case MIPS_REVISION_SCON_BONITO:
/* The following will generate a PCI IACK cycle on the
* Bonito controller. It's a little bit kludgy, but it
* was the easiest way to implement it in hardware at
* the given time.
*/
BONITO_PCIMAP_CFG = 0x20000;
/* Flush Bonito register block */
(void) BONITO_PCIMAP_CFG;
iob(); /* sync */
irq = __raw_readl((u32 *)_pcictrl_bonito_pcicfg);
iob(); /* sync */
irq &= 0xff;
BONITO_PCIMAP_CFG = 0;
break;
default:
pr_emerg("Unknown system controller.\n");
return -1;
}
return irq;
}
static void corehi_irqdispatch(void)
{
unsigned int intedge, intsteer, pcicmd, pcibadaddr;
unsigned int pcimstat, intisr, inten, intpol;
unsigned int intrcause, datalo, datahi;
struct pt_regs *regs = get_irq_regs();
pr_emerg("CoreHI interrupt, shouldn't happen, we die here!\n");
pr_emerg("epc : %08lx\nStatus: %08lx\n"
"Cause : %08lx\nbadVaddr : %08lx\n",
regs->cp0_epc, regs->cp0_status,
regs->cp0_cause, regs->cp0_badvaddr);
/* Read all the registers and then print them as there is a
problem with interspersed printk's upsetting the Bonito controller.
Do it for the others too.
*/
switch (mips_revision_sconid) {
case MIPS_REVISION_SCON_SOCIT:
case MIPS_REVISION_SCON_ROCIT:
case MIPS_REVISION_SCON_SOCITSC:
case MIPS_REVISION_SCON_SOCITSCP:
ll_msc_irq();
break;
case MIPS_REVISION_SCON_GT64120:
intrcause = GT_READ(GT_INTRCAUSE_OFS);
datalo = GT_READ(GT_CPUERR_ADDRLO_OFS);
datahi = GT_READ(GT_CPUERR_ADDRHI_OFS);
pr_emerg("GT_INTRCAUSE = %08x\n", intrcause);
pr_emerg("GT_CPUERR_ADDR = %02x%08x\n",
datahi, datalo);
break;
case MIPS_REVISION_SCON_BONITO:
pcibadaddr = BONITO_PCIBADADDR;
pcimstat = BONITO_PCIMSTAT;
intisr = BONITO_INTISR;
inten = BONITO_INTEN;
intpol = BONITO_INTPOL;
intedge = BONITO_INTEDGE;
intsteer = BONITO_INTSTEER;
pcicmd = BONITO_PCICMD;
pr_emerg("BONITO_INTISR = %08x\n", intisr);
pr_emerg("BONITO_INTEN = %08x\n", inten);
pr_emerg("BONITO_INTPOL = %08x\n", intpol);
pr_emerg("BONITO_INTEDGE = %08x\n", intedge);
pr_emerg("BONITO_INTSTEER = %08x\n", intsteer);
pr_emerg("BONITO_PCICMD = %08x\n", pcicmd);
pr_emerg("BONITO_PCIBADADDR = %08x\n", pcibadaddr);
pr_emerg("BONITO_PCIMSTAT = %08x\n", pcimstat);
break;
}
die("CoreHi interrupt", regs);
}
static irqreturn_t corehi_handler(int irq, void *dev_id)
{
corehi_irqdispatch();
return IRQ_HANDLED;
}
static msc_irqmap_t msc_irqmap[] __initdata = {
{MSC01C_INT_TMR, MSC01_IRQ_EDGE, 0},
{MSC01C_INT_PCI, MSC01_IRQ_LEVEL, 0},
};
static int msc_nr_irqs __initdata = ARRAY_SIZE(msc_irqmap);
static msc_irqmap_t msc_eicirqmap[] __initdata = {
{MSC01E_INT_SW0, MSC01_IRQ_LEVEL, 0},
{MSC01E_INT_SW1, MSC01_IRQ_LEVEL, 0},
{MSC01E_INT_I8259A, MSC01_IRQ_LEVEL, 0},
{MSC01E_INT_SMI, MSC01_IRQ_LEVEL, 0},
{MSC01E_INT_COREHI, MSC01_IRQ_LEVEL, 0},
{MSC01E_INT_CORELO, MSC01_IRQ_LEVEL, 0},
{MSC01E_INT_TMR, MSC01_IRQ_EDGE, 0},
{MSC01E_INT_PCI, MSC01_IRQ_LEVEL, 0},
{MSC01E_INT_PERFCTR, MSC01_IRQ_LEVEL, 0},
{MSC01E_INT_CPUCTR, MSC01_IRQ_LEVEL, 0}
};
static int msc_nr_eicirqs __initdata = ARRAY_SIZE(msc_eicirqmap);
void __init arch_init_irq(void)
{
int corehi_irq;
/*
* Preallocate the i8259's expected virq's here. Since irqchip_init()
* will probe the irqchips in hierarchial order, i8259 is probed last.
* If anything allocates a virq before the i8259 is probed, it will
* be given one of the i8259's expected range and consequently setup
* of the i8259 will fail.
*/
WARN(irq_alloc_descs(I8259A_IRQ_BASE, I8259A_IRQ_BASE,
16, numa_node_id()) < 0,
"Cannot reserve i8259 virqs at IRQ%d\n", I8259A_IRQ_BASE);
i8259_set_poll(mips_pcibios_iack);
irqchip_init();
switch (mips_revision_sconid) {
case MIPS_REVISION_SCON_SOCIT:
case MIPS_REVISION_SCON_ROCIT:
if (cpu_has_veic)
init_msc_irqs(MIPS_MSC01_IC_REG_BASE,
MSC01E_INT_BASE, msc_eicirqmap,
msc_nr_eicirqs);
else
init_msc_irqs(MIPS_MSC01_IC_REG_BASE,
MSC01C_INT_BASE, msc_irqmap,
msc_nr_irqs);
break;
case MIPS_REVISION_SCON_SOCITSC:
case MIPS_REVISION_SCON_SOCITSCP:
if (cpu_has_veic)
init_msc_irqs(MIPS_SOCITSC_IC_REG_BASE,
MSC01E_INT_BASE, msc_eicirqmap,
msc_nr_eicirqs);
else
init_msc_irqs(MIPS_SOCITSC_IC_REG_BASE,
MSC01C_INT_BASE, msc_irqmap,
msc_nr_irqs);
}
if (mips_gic_present()) {
corehi_irq = MIPS_CPU_IRQ_BASE + MIPSCPU_INT_COREHI;
} else if (cpu_has_veic) {
set_vi_handler(MSC01E_INT_COREHI, corehi_irqdispatch);
corehi_irq = MSC01E_INT_BASE + MSC01E_INT_COREHI;
} else {
corehi_irq = MIPS_CPU_IRQ_BASE + MIPSCPU_INT_COREHI;
}
if (request_irq(corehi_irq, corehi_handler, IRQF_NO_THREAD, "CoreHi",
NULL))
pr_err("Failed to request irq %d (CoreHi)\n", corehi_irq);
}
| linux-master | arch/mips/mti-malta/malta-int.c |
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* PROM library functions for acquiring/using memory descriptors given to
* us from the YAMON.
*
* Copyright (C) 1999,2000,2012 MIPS Technologies, Inc.
* All rights reserved.
* Authors: Carsten Langgaard <[email protected]>
* Steven J. Hill <[email protected]>
*/
#include <linux/init.h>
#include <linux/memblock.h>
#include <linux/string.h>
#include <asm/bootinfo.h>
#include <asm/cdmm.h>
#include <asm/maar.h>
#include <asm/sections.h>
#include <asm/fw/fw.h>
/* determined physical memory size, not overridden by command line args */
unsigned long physical_memsize = 0L;
static void free_init_pages_eva_malta(void *begin, void *end)
{
free_init_pages("unused kernel", __pa_symbol((unsigned long *)begin),
__pa_symbol((unsigned long *)end));
}
void __init fw_meminit(void)
{
bool eva = IS_ENABLED(CONFIG_EVA);
free_init_pages_eva = eva ? free_init_pages_eva_malta : NULL;
}
phys_addr_t mips_cdmm_phys_base(void)
{
/* This address is "typically unused" */
return 0x1fc10000;
}
| linux-master | arch/mips/mti-malta/malta-memory.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Carsten Langgaard, [email protected]
* Copyright (C) 2000 MIPS Technologies, Inc. All rights reserved.
* Copyright (C) 2008 Dmitri Vorobiev
*/
#include <linux/cpu.h>
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/ioport.h>
#include <linux/irq.h>
#include <linux/of_fdt.h>
#include <linux/pci.h>
#include <linux/screen_info.h>
#include <linux/time.h>
#include <linux/dma-map-ops.h> /* for dma_default_coherent */
#include <asm/fw/fw.h>
#include <asm/mips-cps.h>
#include <asm/mips-boards/generic.h>
#include <asm/mips-boards/malta.h>
#include <asm/mips-boards/maltaint.h>
#include <asm/dma.h>
#include <asm/prom.h>
#include <asm/traps.h>
#ifdef CONFIG_VT
#include <linux/console.h>
#endif
#define ROCIT_CONFIG_GEN0 0x1f403000
#define ROCIT_CONFIG_GEN0_PCI_IOCU BIT(7)
static struct resource standard_io_resources[] = {
{
.name = "dma1",
.start = 0x00,
.end = 0x1f,
.flags = IORESOURCE_IO | IORESOURCE_BUSY
},
{
.name = "timer",
.start = 0x40,
.end = 0x5f,
.flags = IORESOURCE_IO | IORESOURCE_BUSY
},
{
.name = "keyboard",
.start = 0x60,
.end = 0x6f,
.flags = IORESOURCE_IO | IORESOURCE_BUSY
},
{
.name = "dma page reg",
.start = 0x80,
.end = 0x8f,
.flags = IORESOURCE_IO | IORESOURCE_BUSY
},
{
.name = "dma2",
.start = 0xc0,
.end = 0xdf,
.flags = IORESOURCE_IO | IORESOURCE_BUSY
},
};
const char *get_system_type(void)
{
return "MIPS Malta";
}
#ifdef CONFIG_BLK_DEV_FD
static void __init fd_activate(void)
{
/*
* Activate Floppy Controller in the SMSC FDC37M817 Super I/O
* Controller.
* Done by YAMON 2.00 onwards
*/
/* Entering config state. */
SMSC_WRITE(SMSC_CONFIG_ENTER, SMSC_CONFIG_REG);
/* Activate floppy controller. */
SMSC_WRITE(SMSC_CONFIG_DEVNUM, SMSC_CONFIG_REG);
SMSC_WRITE(SMSC_CONFIG_DEVNUM_FLOPPY, SMSC_DATA_REG);
SMSC_WRITE(SMSC_CONFIG_ACTIVATE, SMSC_CONFIG_REG);
SMSC_WRITE(SMSC_CONFIG_ACTIVATE_ENABLE, SMSC_DATA_REG);
/* Exit config state. */
SMSC_WRITE(SMSC_CONFIG_EXIT, SMSC_CONFIG_REG);
}
#endif
static void __init plat_setup_iocoherency(void)
{
u32 cfg;
if (mips_revision_sconid == MIPS_REVISION_SCON_BONITO) {
if (BONITO_PCICACHECTRL & BONITO_PCICACHECTRL_CPUCOH_PRES) {
BONITO_PCICACHECTRL |= BONITO_PCICACHECTRL_CPUCOH_EN;
pr_info("Enabled Bonito CPU coherency\n");
dma_default_coherent = true;
}
if (strstr(fw_getcmdline(), "iobcuncached")) {
BONITO_PCICACHECTRL &= ~BONITO_PCICACHECTRL_IOBCCOH_EN;
BONITO_PCIMEMBASECFG = BONITO_PCIMEMBASECFG &
~(BONITO_PCIMEMBASECFG_MEMBASE0_CACHED |
BONITO_PCIMEMBASECFG_MEMBASE1_CACHED);
pr_info("Disabled Bonito IOBC coherency\n");
} else {
BONITO_PCICACHECTRL |= BONITO_PCICACHECTRL_IOBCCOH_EN;
BONITO_PCIMEMBASECFG |=
(BONITO_PCIMEMBASECFG_MEMBASE0_CACHED |
BONITO_PCIMEMBASECFG_MEMBASE1_CACHED);
pr_info("Enabled Bonito IOBC coherency\n");
}
} else if (mips_cps_numiocu(0) != 0) {
/* Nothing special needs to be done to enable coherency */
pr_info("CMP IOCU detected\n");
cfg = __raw_readl((u32 *)CKSEG1ADDR(ROCIT_CONFIG_GEN0));
if (cfg & ROCIT_CONFIG_GEN0_PCI_IOCU)
dma_default_coherent = true;
else
pr_crit("IOCU OPERATION DISABLED BY SWITCH - DEFAULTING TO SW IO COHERENCY\n");
}
if (dma_default_coherent)
pr_info("Hardware DMA cache coherency enabled\n");
else
pr_info("Software DMA cache coherency enabled\n");
}
static void __init pci_clock_check(void)
{
unsigned int __iomem *jmpr_p =
(unsigned int *) ioremap(MALTA_JMPRS_REG, sizeof(unsigned int));
int jmpr = (__raw_readl(jmpr_p) >> 2) & 0x07;
static const int pciclocks[] __initconst = {
33, 20, 25, 30, 12, 16, 37, 10
};
int pciclock = pciclocks[jmpr];
char *optptr, *argptr = fw_getcmdline();
/*
* If user passed a pci_clock= option, don't tack on another one
*/
optptr = strstr(argptr, "pci_clock=");
if (optptr && (optptr == argptr || optptr[-1] == ' '))
return;
if (pciclock != 33) {
pr_warn("WARNING: PCI clock is %dMHz, setting pci_clock\n",
pciclock);
argptr += strlen(argptr);
sprintf(argptr, " pci_clock=%d", pciclock);
if (pciclock < 20 || pciclock > 66)
pr_warn("WARNING: IDE timing calculations will be "
"incorrect\n");
}
}
#if defined(CONFIG_VT) && defined(CONFIG_VGA_CONSOLE)
static void __init screen_info_setup(void)
{
screen_info = (struct screen_info) {
.orig_x = 0,
.orig_y = 25,
.ext_mem_k = 0,
.orig_video_page = 0,
.orig_video_mode = 0,
.orig_video_cols = 80,
.unused2 = 0,
.orig_video_ega_bx = 0,
.unused3 = 0,
.orig_video_lines = 25,
.orig_video_isVGA = VIDEO_TYPE_VGAC,
.orig_video_points = 16
};
}
#endif
static void __init bonito_quirks_setup(void)
{
char *argptr;
argptr = fw_getcmdline();
if (strstr(argptr, "debug")) {
BONITO_BONGENCFG |= BONITO_BONGENCFG_DEBUGMODE;
pr_info("Enabled Bonito debug mode\n");
} else
BONITO_BONGENCFG &= ~BONITO_BONGENCFG_DEBUGMODE;
}
void __init *plat_get_fdt(void)
{
return (void *)__dtb_start;
}
void __init plat_mem_setup(void)
{
unsigned int i;
void *fdt = plat_get_fdt();
fdt = malta_dt_shim(fdt);
__dt_setup_arch(fdt);
if (IS_ENABLED(CONFIG_EVA))
/* EVA has already been configured in mach-malta/kernel-init.h */
pr_info("Enhanced Virtual Addressing (EVA) activated\n");
mips_pcibios_init();
/* Request I/O space for devices used on the Malta board. */
for (i = 0; i < ARRAY_SIZE(standard_io_resources); i++)
request_resource(&ioport_resource, standard_io_resources+i);
/*
* Enable DMA channel 4 (cascade channel) in the PIIX4 south bridge.
*/
enable_dma(4);
if (mips_revision_sconid == MIPS_REVISION_SCON_BONITO)
bonito_quirks_setup();
plat_setup_iocoherency();
pci_clock_check();
#ifdef CONFIG_BLK_DEV_FD
fd_activate();
#endif
#if defined(CONFIG_VT) && defined(CONFIG_VGA_CONSOLE)
screen_info_setup();
#endif
}
| linux-master | arch/mips/mti-malta/malta-setup.c |
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* PROM library initialisation code.
*
* Copyright (C) 1999,2000,2004,2005,2012 MIPS Technologies, Inc.
* All rights reserved.
* Authors: Carsten Langgaard <[email protected]>
* Maciej W. Rozycki <[email protected]>
* Steven J. Hill <[email protected]>
*/
#include <linux/init.h>
#include <linux/string.h>
#include <linux/kernel.h>
#include <linux/pci_regs.h>
#include <linux/serial_core.h>
#include <asm/cacheflush.h>
#include <asm/smp-ops.h>
#include <asm/traps.h>
#include <asm/fw/fw.h>
#include <asm/mips-cps.h>
#include <asm/mips-boards/generic.h>
#include <asm/mips-boards/malta.h>
static int mips_revision_corid;
int mips_revision_sconid;
/* Bonito64 system controller register base. */
unsigned long _pcictrl_bonito;
unsigned long _pcictrl_bonito_pcicfg;
/* GT64120 system controller register base */
unsigned long _pcictrl_gt64120;
/* MIPS System controller register base */
unsigned long _pcictrl_msc;
#ifdef CONFIG_SERIAL_8250_CONSOLE
static void __init console_config(void)
{
char console_string[40];
int baud = 0;
char parity = '\0', bits = '\0', flow = '\0';
char *s;
s = fw_getenv("modetty0");
if (s) {
while (*s >= '0' && *s <= '9')
baud = baud*10 + *s++ - '0';
if (*s == ',')
s++;
if (*s)
parity = *s++;
if (*s == ',')
s++;
if (*s)
bits = *s++;
if (*s == ',')
s++;
if (*s == 'h')
flow = 'r';
}
if (baud == 0)
baud = 38400;
if (parity != 'n' && parity != 'o' && parity != 'e')
parity = 'n';
if (bits != '7' && bits != '8')
bits = '8';
if (flow == '\0')
flow = 'r';
if ((strstr(fw_getcmdline(), "earlycon=")) == NULL) {
sprintf(console_string, "uart8250,io,0x3f8,%d%c%c", baud,
parity, bits);
setup_earlycon(console_string);
}
if ((strstr(fw_getcmdline(), "console=")) == NULL) {
sprintf(console_string, " console=ttyS0,%d%c%c%c", baud,
parity, bits, flow);
strcat(fw_getcmdline(), console_string);
pr_info("Config serial console:%s\n", console_string);
}
}
#endif
static void __init mips_nmi_setup(void)
{
void *base;
base = cpu_has_veic ?
(void *)(CAC_BASE + 0xa80) :
(void *)(CAC_BASE + 0x380);
memcpy(base, except_vec_nmi, 0x80);
flush_icache_range((unsigned long)base, (unsigned long)base + 0x80);
}
static void __init mips_ejtag_setup(void)
{
void *base;
extern char except_vec_ejtag_debug[];
base = cpu_has_veic ?
(void *)(CAC_BASE + 0xa00) :
(void *)(CAC_BASE + 0x300);
memcpy(base, except_vec_ejtag_debug, 0x80);
flush_icache_range((unsigned long)base, (unsigned long)base + 0x80);
}
phys_addr_t mips_cpc_default_phys_base(void)
{
return CPC_BASE_ADDR;
}
void __init prom_init(void)
{
/*
* early setup of _pcictrl_bonito so that we can determine
* the system controller on a CORE_EMUL board
*/
_pcictrl_bonito = (unsigned long)ioremap(BONITO_REG_BASE, BONITO_REG_SIZE);
mips_revision_corid = MIPS_REVISION_CORID;
if (mips_revision_corid == MIPS_REVISION_CORID_CORE_EMUL) {
if (BONITO_PCIDID == 0x0001df53 ||
BONITO_PCIDID == 0x0003df53)
mips_revision_corid = MIPS_REVISION_CORID_CORE_EMUL_BON;
else
mips_revision_corid = MIPS_REVISION_CORID_CORE_EMUL_MSC;
}
mips_revision_sconid = MIPS_REVISION_SCONID;
if (mips_revision_sconid == MIPS_REVISION_SCON_OTHER) {
switch (mips_revision_corid) {
case MIPS_REVISION_CORID_QED_RM5261:
case MIPS_REVISION_CORID_CORE_LV:
case MIPS_REVISION_CORID_CORE_FPGA:
case MIPS_REVISION_CORID_CORE_FPGAR2:
mips_revision_sconid = MIPS_REVISION_SCON_GT64120;
break;
case MIPS_REVISION_CORID_CORE_EMUL_BON:
case MIPS_REVISION_CORID_BONITO64:
case MIPS_REVISION_CORID_CORE_20K:
mips_revision_sconid = MIPS_REVISION_SCON_BONITO;
break;
case MIPS_REVISION_CORID_CORE_MSC:
case MIPS_REVISION_CORID_CORE_FPGA2:
case MIPS_REVISION_CORID_CORE_24K:
/*
* SOCit/ROCit support is essentially identical
* but make an attempt to distinguish them
*/
mips_revision_sconid = MIPS_REVISION_SCON_SOCIT;
break;
case MIPS_REVISION_CORID_CORE_FPGA3:
case MIPS_REVISION_CORID_CORE_FPGA4:
case MIPS_REVISION_CORID_CORE_FPGA5:
case MIPS_REVISION_CORID_CORE_EMUL_MSC:
default:
/* See above */
mips_revision_sconid = MIPS_REVISION_SCON_ROCIT;
break;
}
}
switch (mips_revision_sconid) {
u32 start, map, mask, data;
case MIPS_REVISION_SCON_GT64120:
/*
* Setup the North bridge to do Master byte-lane swapping
* when running in bigendian.
*/
_pcictrl_gt64120 = (unsigned long)ioremap(MIPS_GT_BASE, 0x2000);
#ifdef CONFIG_CPU_LITTLE_ENDIAN
GT_WRITE(GT_PCI0_CMD_OFS, GT_PCI0_CMD_MBYTESWAP_BIT |
GT_PCI0_CMD_SBYTESWAP_BIT);
#else
GT_WRITE(GT_PCI0_CMD_OFS, 0);
#endif
/* Fix up PCI I/O mapping if necessary (for Atlas). */
start = GT_READ(GT_PCI0IOLD_OFS);
map = GT_READ(GT_PCI0IOREMAP_OFS);
if ((start & map) != 0) {
map &= ~start;
GT_WRITE(GT_PCI0IOREMAP_OFS, map);
}
set_io_port_base(MALTA_GT_PORT_BASE);
break;
case MIPS_REVISION_SCON_BONITO:
_pcictrl_bonito_pcicfg = (unsigned long)ioremap(BONITO_PCICFG_BASE, BONITO_PCICFG_SIZE);
/*
* Disable Bonito IOBC.
*/
BONITO_PCIMEMBASECFG = BONITO_PCIMEMBASECFG &
~(BONITO_PCIMEMBASECFG_MEMBASE0_CACHED |
BONITO_PCIMEMBASECFG_MEMBASE1_CACHED);
/*
* Setup the North bridge to do Master byte-lane swapping
* when running in bigendian.
*/
#ifdef CONFIG_CPU_LITTLE_ENDIAN
BONITO_BONGENCFG = BONITO_BONGENCFG &
~(BONITO_BONGENCFG_MSTRBYTESWAP |
BONITO_BONGENCFG_BYTESWAP);
#else
BONITO_BONGENCFG = BONITO_BONGENCFG |
BONITO_BONGENCFG_MSTRBYTESWAP |
BONITO_BONGENCFG_BYTESWAP;
#endif
set_io_port_base(MALTA_BONITO_PORT_BASE);
break;
case MIPS_REVISION_SCON_SOCIT:
case MIPS_REVISION_SCON_ROCIT:
_pcictrl_msc = (unsigned long)ioremap(MIPS_MSC01_PCI_REG_BASE, 0x2000);
mips_pci_controller:
mb();
MSC_READ(MSC01_PCI_CFG, data);
MSC_WRITE(MSC01_PCI_CFG, data & ~MSC01_PCI_CFG_EN_BIT);
wmb();
/* Fix up lane swapping. */
#ifdef CONFIG_CPU_LITTLE_ENDIAN
MSC_WRITE(MSC01_PCI_SWAP, MSC01_PCI_SWAP_NOSWAP);
#else
MSC_WRITE(MSC01_PCI_SWAP,
MSC01_PCI_SWAP_BYTESWAP << MSC01_PCI_SWAP_IO_SHF |
MSC01_PCI_SWAP_BYTESWAP << MSC01_PCI_SWAP_MEM_SHF |
MSC01_PCI_SWAP_BYTESWAP << MSC01_PCI_SWAP_BAR0_SHF);
#endif
/*
* Setup the Malta max (2GB) memory for PCI DMA in host bridge
* in transparent addressing mode.
*/
mask = PHYS_OFFSET | PCI_BASE_ADDRESS_MEM_PREFETCH;
MSC_WRITE(MSC01_PCI_BAR0, mask);
MSC_WRITE(MSC01_PCI_HEAD4, mask);
mask &= MSC01_PCI_BAR0_SIZE_MSK;
MSC_WRITE(MSC01_PCI_P2SCMSKL, mask);
MSC_WRITE(MSC01_PCI_P2SCMAPL, mask);
/* Don't handle target retries indefinitely. */
if ((data & MSC01_PCI_CFG_MAXRTRY_MSK) ==
MSC01_PCI_CFG_MAXRTRY_MSK)
data = (data & ~(MSC01_PCI_CFG_MAXRTRY_MSK <<
MSC01_PCI_CFG_MAXRTRY_SHF)) |
((MSC01_PCI_CFG_MAXRTRY_MSK - 1) <<
MSC01_PCI_CFG_MAXRTRY_SHF);
wmb();
MSC_WRITE(MSC01_PCI_CFG, data);
mb();
set_io_port_base(MALTA_MSC_PORT_BASE);
break;
case MIPS_REVISION_SCON_SOCITSC:
case MIPS_REVISION_SCON_SOCITSCP:
_pcictrl_msc = (unsigned long)ioremap(MIPS_SOCITSC_PCI_REG_BASE, 0x2000);
goto mips_pci_controller;
default:
/* Unknown system controller */
while (1); /* We die here... */
}
board_nmi_handler_setup = mips_nmi_setup;
board_ejtag_handler_setup = mips_ejtag_setup;
fw_init_cmdline();
fw_meminit();
#ifdef CONFIG_SERIAL_8250_CONSOLE
console_config();
#endif
/* Early detection of CMP support */
mips_cpc_probe();
if (!register_cps_smp_ops())
return;
if (!register_vsmp_smp_ops())
return;
register_up_smp_ops();
}
| linux-master | arch/mips/mti-malta/malta-init.c |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.