python_code
stringlengths
0
1.8M
repo_name
stringclasses
7 values
file_path
stringlengths
5
99
// SPDX-License-Identifier: GPL-2.0-only /* * * Copyright IBM Corp. 2008 * * Authors: Hollis Blanchard <[email protected]> * Christian Ehrhardt <[email protected]> */ #include <linux/kvm_host.h> #include <linux/fs.h> #include <linux/seq_file.h> #include <linux/debugfs.h> #include <linux/uaccess.h> #include <linux/module.h> #include <asm/time.h> #include <asm-generic/div64.h> #include "timing.h" void kvmppc_init_timing_stats(struct kvm_vcpu *vcpu) { int i; /* Take a lock to avoid concurrent updates */ mutex_lock(&vcpu->arch.exit_timing_lock); vcpu->arch.last_exit_type = 0xDEAD; for (i = 0; i < __NUMBER_OF_KVM_EXIT_TYPES; i++) { vcpu->arch.timing_count_type[i] = 0; vcpu->arch.timing_max_duration[i] = 0; vcpu->arch.timing_min_duration[i] = 0xFFFFFFFF; vcpu->arch.timing_sum_duration[i] = 0; vcpu->arch.timing_sum_quad_duration[i] = 0; } vcpu->arch.timing_last_exit = 0; vcpu->arch.timing_exit.tv64 = 0; vcpu->arch.timing_last_enter.tv64 = 0; mutex_unlock(&vcpu->arch.exit_timing_lock); } static void add_exit_timing(struct kvm_vcpu *vcpu, u64 duration, int type) { u64 old; mutex_lock(&vcpu->arch.exit_timing_lock); vcpu->arch.timing_count_type[type]++; /* sum */ old = vcpu->arch.timing_sum_duration[type]; vcpu->arch.timing_sum_duration[type] += duration; if (unlikely(old > vcpu->arch.timing_sum_duration[type])) { printk(KERN_ERR"%s - wrap adding sum of durations" " old %lld new %lld type %d exit # of type %d\n", __func__, old, vcpu->arch.timing_sum_duration[type], type, vcpu->arch.timing_count_type[type]); } /* square sum */ old = vcpu->arch.timing_sum_quad_duration[type]; vcpu->arch.timing_sum_quad_duration[type] += (duration*duration); if (unlikely(old > vcpu->arch.timing_sum_quad_duration[type])) { printk(KERN_ERR"%s - wrap adding sum of squared durations" " old %lld new %lld type %d exit # of type %d\n", __func__, old, vcpu->arch.timing_sum_quad_duration[type], type, vcpu->arch.timing_count_type[type]); } /* set min/max */ if (unlikely(duration < vcpu->arch.timing_min_duration[type])) vcpu->arch.timing_min_duration[type] = duration; if (unlikely(duration > vcpu->arch.timing_max_duration[type])) vcpu->arch.timing_max_duration[type] = duration; mutex_unlock(&vcpu->arch.exit_timing_lock); } void kvmppc_update_timing_stats(struct kvm_vcpu *vcpu) { u64 exit = vcpu->arch.timing_last_exit; u64 enter = vcpu->arch.timing_last_enter.tv64; /* save exit time, used next exit when the reenter time is known */ vcpu->arch.timing_last_exit = vcpu->arch.timing_exit.tv64; if (unlikely(vcpu->arch.last_exit_type == 0xDEAD || exit == 0)) return; /* skip incomplete cycle (e.g. after reset) */ /* update statistics for average and standard deviation */ add_exit_timing(vcpu, (enter - exit), vcpu->arch.last_exit_type); /* enter -> timing_last_exit is time spent in guest - log this too */ add_exit_timing(vcpu, (vcpu->arch.timing_last_exit - enter), TIMEINGUEST); } static const char *kvm_exit_names[__NUMBER_OF_KVM_EXIT_TYPES] = { [MMIO_EXITS] = "MMIO", [SIGNAL_EXITS] = "SIGNAL", [ITLB_REAL_MISS_EXITS] = "ITLBREAL", [ITLB_VIRT_MISS_EXITS] = "ITLBVIRT", [DTLB_REAL_MISS_EXITS] = "DTLBREAL", [DTLB_VIRT_MISS_EXITS] = "DTLBVIRT", [SYSCALL_EXITS] = "SYSCALL", [ISI_EXITS] = "ISI", [DSI_EXITS] = "DSI", [EMULATED_INST_EXITS] = "EMULINST", [EMULATED_MTMSRWE_EXITS] = "EMUL_WAIT", [EMULATED_WRTEE_EXITS] = "EMUL_WRTEE", [EMULATED_MTSPR_EXITS] = "EMUL_MTSPR", [EMULATED_MFSPR_EXITS] = "EMUL_MFSPR", [EMULATED_MTMSR_EXITS] = "EMUL_MTMSR", [EMULATED_MFMSR_EXITS] = "EMUL_MFMSR", [EMULATED_TLBSX_EXITS] = "EMUL_TLBSX", [EMULATED_TLBWE_EXITS] = "EMUL_TLBWE", [EMULATED_RFI_EXITS] = "EMUL_RFI", [DEC_EXITS] = "DEC", [EXT_INTR_EXITS] = "EXTINT", [HALT_WAKEUP] = "HALT", [USR_PR_INST] = "USR_PR_INST", [FP_UNAVAIL] = "FP_UNAVAIL", [DEBUG_EXITS] = "DEBUG", [TIMEINGUEST] = "TIMEINGUEST" }; static int kvmppc_exit_timing_show(struct seq_file *m, void *private) { struct kvm_vcpu *vcpu = m->private; int i; u64 min, max, sum, sum_quad; seq_puts(m, "type count min max sum sum_squared\n"); for (i = 0; i < __NUMBER_OF_KVM_EXIT_TYPES; i++) { min = vcpu->arch.timing_min_duration[i]; do_div(min, tb_ticks_per_usec); max = vcpu->arch.timing_max_duration[i]; do_div(max, tb_ticks_per_usec); sum = vcpu->arch.timing_sum_duration[i]; do_div(sum, tb_ticks_per_usec); sum_quad = vcpu->arch.timing_sum_quad_duration[i]; do_div(sum_quad, tb_ticks_per_usec); seq_printf(m, "%12s %10d %10lld %10lld %20lld %20lld\n", kvm_exit_names[i], vcpu->arch.timing_count_type[i], min, max, sum, sum_quad); } return 0; } /* Write 'c' to clear the timing statistics. */ static ssize_t kvmppc_exit_timing_write(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos) { int err = -EINVAL; char c; if (count > 1) { goto done; } if (get_user(c, user_buf)) { err = -EFAULT; goto done; } if (c == 'c') { struct seq_file *seqf = file->private_data; struct kvm_vcpu *vcpu = seqf->private; /* Write does not affect our buffers previously generated with * show. seq_file is locked here to prevent races of init with * a show call */ mutex_lock(&seqf->lock); kvmppc_init_timing_stats(vcpu); mutex_unlock(&seqf->lock); err = count; } done: return err; } static int kvmppc_exit_timing_open(struct inode *inode, struct file *file) { return single_open(file, kvmppc_exit_timing_show, inode->i_private); } static const struct file_operations kvmppc_exit_timing_fops = { .owner = THIS_MODULE, .open = kvmppc_exit_timing_open, .read = seq_read, .write = kvmppc_exit_timing_write, .llseek = seq_lseek, .release = single_release, }; int kvmppc_create_vcpu_debugfs_e500(struct kvm_vcpu *vcpu, struct dentry *debugfs_dentry) { debugfs_create_file("timing", 0666, debugfs_dentry, vcpu, &kvmppc_exit_timing_fops); return 0; }
linux-master
arch/powerpc/kvm/timing.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2008-2011 Freescale Semiconductor, Inc. All rights reserved. * * Author: Yu Liu, <[email protected]> * * Description: * This file is derived from arch/powerpc/kvm/44x_emulate.c, * by Hollis Blanchard <[email protected]>. */ #include <asm/kvm_ppc.h> #include <asm/disassemble.h> #include <asm/dbell.h> #include <asm/reg_booke.h> #include "booke.h" #include "e500.h" #define XOP_DCBTLS 166 #define XOP_MSGSND 206 #define XOP_MSGCLR 238 #define XOP_MFTMR 366 #define XOP_TLBIVAX 786 #define XOP_TLBSX 914 #define XOP_TLBRE 946 #define XOP_TLBWE 978 #define XOP_TLBILX 18 #define XOP_EHPRIV 270 #ifdef CONFIG_KVM_E500MC static int dbell2prio(ulong param) { int msg = param & PPC_DBELL_TYPE_MASK; int prio = -1; switch (msg) { case PPC_DBELL_TYPE(PPC_DBELL): prio = BOOKE_IRQPRIO_DBELL; break; case PPC_DBELL_TYPE(PPC_DBELL_CRIT): prio = BOOKE_IRQPRIO_DBELL_CRIT; break; default: break; } return prio; } static int kvmppc_e500_emul_msgclr(struct kvm_vcpu *vcpu, int rb) { ulong param = vcpu->arch.regs.gpr[rb]; int prio = dbell2prio(param); if (prio < 0) return EMULATE_FAIL; clear_bit(prio, &vcpu->arch.pending_exceptions); return EMULATE_DONE; } static int kvmppc_e500_emul_msgsnd(struct kvm_vcpu *vcpu, int rb) { ulong param = vcpu->arch.regs.gpr[rb]; int prio = dbell2prio(rb); int pir = param & PPC_DBELL_PIR_MASK; unsigned long i; struct kvm_vcpu *cvcpu; if (prio < 0) return EMULATE_FAIL; kvm_for_each_vcpu(i, cvcpu, vcpu->kvm) { int cpir = cvcpu->arch.shared->pir; if ((param & PPC_DBELL_MSG_BRDCAST) || (cpir == pir)) { set_bit(prio, &cvcpu->arch.pending_exceptions); kvm_vcpu_kick(cvcpu); } } return EMULATE_DONE; } #endif static int kvmppc_e500_emul_ehpriv(struct kvm_vcpu *vcpu, unsigned int inst, int *advance) { int emulated = EMULATE_DONE; switch (get_oc(inst)) { case EHPRIV_OC_DEBUG: vcpu->run->exit_reason = KVM_EXIT_DEBUG; vcpu->run->debug.arch.address = vcpu->arch.regs.nip; vcpu->run->debug.arch.status = 0; kvmppc_account_exit(vcpu, DEBUG_EXITS); emulated = EMULATE_EXIT_USER; *advance = 0; break; default: emulated = EMULATE_FAIL; } return emulated; } static int kvmppc_e500_emul_dcbtls(struct kvm_vcpu *vcpu) { struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); /* Always fail to lock the cache */ vcpu_e500->l1csr0 |= L1CSR0_CUL; return EMULATE_DONE; } static int kvmppc_e500_emul_mftmr(struct kvm_vcpu *vcpu, unsigned int inst, int rt) { /* Expose one thread per vcpu */ if (get_tmrn(inst) == TMRN_TMCFG0) { kvmppc_set_gpr(vcpu, rt, 1 | (1 << TMRN_TMCFG0_NATHRD_SHIFT)); return EMULATE_DONE; } return EMULATE_FAIL; } int kvmppc_core_emulate_op_e500(struct kvm_vcpu *vcpu, unsigned int inst, int *advance) { int emulated = EMULATE_DONE; int ra = get_ra(inst); int rb = get_rb(inst); int rt = get_rt(inst); gva_t ea; switch (get_op(inst)) { case 31: switch (get_xop(inst)) { case XOP_DCBTLS: emulated = kvmppc_e500_emul_dcbtls(vcpu); break; #ifdef CONFIG_KVM_E500MC case XOP_MSGSND: emulated = kvmppc_e500_emul_msgsnd(vcpu, rb); break; case XOP_MSGCLR: emulated = kvmppc_e500_emul_msgclr(vcpu, rb); break; #endif case XOP_TLBRE: emulated = kvmppc_e500_emul_tlbre(vcpu); break; case XOP_TLBWE: emulated = kvmppc_e500_emul_tlbwe(vcpu); break; case XOP_TLBSX: ea = kvmppc_get_ea_indexed(vcpu, ra, rb); emulated = kvmppc_e500_emul_tlbsx(vcpu, ea); break; case XOP_TLBILX: { int type = rt & 0x3; ea = kvmppc_get_ea_indexed(vcpu, ra, rb); emulated = kvmppc_e500_emul_tlbilx(vcpu, type, ea); break; } case XOP_TLBIVAX: ea = kvmppc_get_ea_indexed(vcpu, ra, rb); emulated = kvmppc_e500_emul_tlbivax(vcpu, ea); break; case XOP_MFTMR: emulated = kvmppc_e500_emul_mftmr(vcpu, inst, rt); break; case XOP_EHPRIV: emulated = kvmppc_e500_emul_ehpriv(vcpu, inst, advance); break; default: emulated = EMULATE_FAIL; } break; default: emulated = EMULATE_FAIL; } if (emulated == EMULATE_FAIL) emulated = kvmppc_booke_emulate_op(vcpu, inst, advance); return emulated; } int kvmppc_core_emulate_mtspr_e500(struct kvm_vcpu *vcpu, int sprn, ulong spr_val) { struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); int emulated = EMULATE_DONE; switch (sprn) { #ifndef CONFIG_KVM_BOOKE_HV case SPRN_PID: kvmppc_set_pid(vcpu, spr_val); break; case SPRN_PID1: if (spr_val != 0) return EMULATE_FAIL; vcpu_e500->pid[1] = spr_val; break; case SPRN_PID2: if (spr_val != 0) return EMULATE_FAIL; vcpu_e500->pid[2] = spr_val; break; case SPRN_MAS0: vcpu->arch.shared->mas0 = spr_val; break; case SPRN_MAS1: vcpu->arch.shared->mas1 = spr_val; break; case SPRN_MAS2: vcpu->arch.shared->mas2 = spr_val; break; case SPRN_MAS3: vcpu->arch.shared->mas7_3 &= ~(u64)0xffffffff; vcpu->arch.shared->mas7_3 |= spr_val; break; case SPRN_MAS4: vcpu->arch.shared->mas4 = spr_val; break; case SPRN_MAS6: vcpu->arch.shared->mas6 = spr_val; break; case SPRN_MAS7: vcpu->arch.shared->mas7_3 &= (u64)0xffffffff; vcpu->arch.shared->mas7_3 |= (u64)spr_val << 32; break; #endif case SPRN_L1CSR0: vcpu_e500->l1csr0 = spr_val; vcpu_e500->l1csr0 &= ~(L1CSR0_DCFI | L1CSR0_CLFC); break; case SPRN_L1CSR1: vcpu_e500->l1csr1 = spr_val; vcpu_e500->l1csr1 &= ~(L1CSR1_ICFI | L1CSR1_ICLFR); break; case SPRN_HID0: vcpu_e500->hid0 = spr_val; break; case SPRN_HID1: vcpu_e500->hid1 = spr_val; break; case SPRN_MMUCSR0: emulated = kvmppc_e500_emul_mt_mmucsr0(vcpu_e500, spr_val); break; case SPRN_PWRMGTCR0: /* * Guest relies on host power management configurations * Treat the request as a general store */ vcpu->arch.pwrmgtcr0 = spr_val; break; case SPRN_BUCSR: /* * If we are here, it means that we have already flushed the * branch predictor, so just return to guest. */ break; /* extra exceptions */ #ifdef CONFIG_SPE_POSSIBLE case SPRN_IVOR32: vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_UNAVAIL] = spr_val; break; case SPRN_IVOR33: vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_DATA] = spr_val; break; case SPRN_IVOR34: vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_ROUND] = spr_val; break; #endif #ifdef CONFIG_ALTIVEC case SPRN_IVOR32: vcpu->arch.ivor[BOOKE_IRQPRIO_ALTIVEC_UNAVAIL] = spr_val; break; case SPRN_IVOR33: vcpu->arch.ivor[BOOKE_IRQPRIO_ALTIVEC_ASSIST] = spr_val; break; #endif case SPRN_IVOR35: vcpu->arch.ivor[BOOKE_IRQPRIO_PERFORMANCE_MONITOR] = spr_val; break; #ifdef CONFIG_KVM_BOOKE_HV case SPRN_IVOR36: vcpu->arch.ivor[BOOKE_IRQPRIO_DBELL] = spr_val; break; case SPRN_IVOR37: vcpu->arch.ivor[BOOKE_IRQPRIO_DBELL_CRIT] = spr_val; break; #endif default: emulated = kvmppc_booke_emulate_mtspr(vcpu, sprn, spr_val); } return emulated; } int kvmppc_core_emulate_mfspr_e500(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val) { struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); int emulated = EMULATE_DONE; switch (sprn) { #ifndef CONFIG_KVM_BOOKE_HV case SPRN_PID: *spr_val = vcpu_e500->pid[0]; break; case SPRN_PID1: *spr_val = vcpu_e500->pid[1]; break; case SPRN_PID2: *spr_val = vcpu_e500->pid[2]; break; case SPRN_MAS0: *spr_val = vcpu->arch.shared->mas0; break; case SPRN_MAS1: *spr_val = vcpu->arch.shared->mas1; break; case SPRN_MAS2: *spr_val = vcpu->arch.shared->mas2; break; case SPRN_MAS3: *spr_val = (u32)vcpu->arch.shared->mas7_3; break; case SPRN_MAS4: *spr_val = vcpu->arch.shared->mas4; break; case SPRN_MAS6: *spr_val = vcpu->arch.shared->mas6; break; case SPRN_MAS7: *spr_val = vcpu->arch.shared->mas7_3 >> 32; break; #endif case SPRN_DECAR: *spr_val = vcpu->arch.decar; break; case SPRN_TLB0CFG: *spr_val = vcpu->arch.tlbcfg[0]; break; case SPRN_TLB1CFG: *spr_val = vcpu->arch.tlbcfg[1]; break; case SPRN_TLB0PS: if (!has_feature(vcpu, VCPU_FTR_MMU_V2)) return EMULATE_FAIL; *spr_val = vcpu->arch.tlbps[0]; break; case SPRN_TLB1PS: if (!has_feature(vcpu, VCPU_FTR_MMU_V2)) return EMULATE_FAIL; *spr_val = vcpu->arch.tlbps[1]; break; case SPRN_L1CSR0: *spr_val = vcpu_e500->l1csr0; break; case SPRN_L1CSR1: *spr_val = vcpu_e500->l1csr1; break; case SPRN_HID0: *spr_val = vcpu_e500->hid0; break; case SPRN_HID1: *spr_val = vcpu_e500->hid1; break; case SPRN_SVR: *spr_val = vcpu_e500->svr; break; case SPRN_MMUCSR0: *spr_val = 0; break; case SPRN_MMUCFG: *spr_val = vcpu->arch.mmucfg; break; case SPRN_EPTCFG: if (!has_feature(vcpu, VCPU_FTR_MMU_V2)) return EMULATE_FAIL; /* * Legacy Linux guests access EPTCFG register even if the E.PT * category is disabled in the VM. Give them a chance to live. */ *spr_val = vcpu->arch.eptcfg; break; case SPRN_PWRMGTCR0: *spr_val = vcpu->arch.pwrmgtcr0; break; /* extra exceptions */ #ifdef CONFIG_SPE_POSSIBLE case SPRN_IVOR32: *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_UNAVAIL]; break; case SPRN_IVOR33: *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_DATA]; break; case SPRN_IVOR34: *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_ROUND]; break; #endif #ifdef CONFIG_ALTIVEC case SPRN_IVOR32: *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_ALTIVEC_UNAVAIL]; break; case SPRN_IVOR33: *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_ALTIVEC_ASSIST]; break; #endif case SPRN_IVOR35: *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_PERFORMANCE_MONITOR]; break; #ifdef CONFIG_KVM_BOOKE_HV case SPRN_IVOR36: *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_DBELL]; break; case SPRN_IVOR37: *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_DBELL_CRIT]; break; #endif default: emulated = kvmppc_booke_emulate_mfspr(vcpu, sprn, spr_val); } return emulated; }
linux-master
arch/powerpc/kvm/e500_emulate.c
// SPDX-License-Identifier: GPL-2.0-only /* * * Copyright Novell Inc 2010 * * Authors: Alexander Graf <[email protected]> */ #include <asm/kvm.h> #include <asm/kvm_ppc.h> #include <asm/disassemble.h> #include <asm/kvm_book3s.h> #include <asm/kvm_fpu.h> #include <asm/reg.h> #include <asm/cacheflush.h> #include <asm/switch_to.h> #include <linux/vmalloc.h> /* #define DEBUG */ #ifdef DEBUG #define dprintk printk #else #define dprintk(...) do { } while(0); #endif #define OP_LFS 48 #define OP_LFSU 49 #define OP_LFD 50 #define OP_LFDU 51 #define OP_STFS 52 #define OP_STFSU 53 #define OP_STFD 54 #define OP_STFDU 55 #define OP_PSQ_L 56 #define OP_PSQ_LU 57 #define OP_PSQ_ST 60 #define OP_PSQ_STU 61 #define OP_31_LFSX 535 #define OP_31_LFSUX 567 #define OP_31_LFDX 599 #define OP_31_LFDUX 631 #define OP_31_STFSX 663 #define OP_31_STFSUX 695 #define OP_31_STFX 727 #define OP_31_STFUX 759 #define OP_31_LWIZX 887 #define OP_31_STFIWX 983 #define OP_59_FADDS 21 #define OP_59_FSUBS 20 #define OP_59_FSQRTS 22 #define OP_59_FDIVS 18 #define OP_59_FRES 24 #define OP_59_FMULS 25 #define OP_59_FRSQRTES 26 #define OP_59_FMSUBS 28 #define OP_59_FMADDS 29 #define OP_59_FNMSUBS 30 #define OP_59_FNMADDS 31 #define OP_63_FCMPU 0 #define OP_63_FCPSGN 8 #define OP_63_FRSP 12 #define OP_63_FCTIW 14 #define OP_63_FCTIWZ 15 #define OP_63_FDIV 18 #define OP_63_FADD 21 #define OP_63_FSQRT 22 #define OP_63_FSEL 23 #define OP_63_FRE 24 #define OP_63_FMUL 25 #define OP_63_FRSQRTE 26 #define OP_63_FMSUB 28 #define OP_63_FMADD 29 #define OP_63_FNMSUB 30 #define OP_63_FNMADD 31 #define OP_63_FCMPO 32 #define OP_63_MTFSB1 38 // XXX #define OP_63_FSUB 20 #define OP_63_FNEG 40 #define OP_63_MCRFS 64 #define OP_63_MTFSB0 70 #define OP_63_FMR 72 #define OP_63_MTFSFI 134 #define OP_63_FABS 264 #define OP_63_MFFS 583 #define OP_63_MTFSF 711 #define OP_4X_PS_CMPU0 0 #define OP_4X_PSQ_LX 6 #define OP_4XW_PSQ_STX 7 #define OP_4A_PS_SUM0 10 #define OP_4A_PS_SUM1 11 #define OP_4A_PS_MULS0 12 #define OP_4A_PS_MULS1 13 #define OP_4A_PS_MADDS0 14 #define OP_4A_PS_MADDS1 15 #define OP_4A_PS_DIV 18 #define OP_4A_PS_SUB 20 #define OP_4A_PS_ADD 21 #define OP_4A_PS_SEL 23 #define OP_4A_PS_RES 24 #define OP_4A_PS_MUL 25 #define OP_4A_PS_RSQRTE 26 #define OP_4A_PS_MSUB 28 #define OP_4A_PS_MADD 29 #define OP_4A_PS_NMSUB 30 #define OP_4A_PS_NMADD 31 #define OP_4X_PS_CMPO0 32 #define OP_4X_PSQ_LUX 38 #define OP_4XW_PSQ_STUX 39 #define OP_4X_PS_NEG 40 #define OP_4X_PS_CMPU1 64 #define OP_4X_PS_MR 72 #define OP_4X_PS_CMPO1 96 #define OP_4X_PS_NABS 136 #define OP_4X_PS_ABS 264 #define OP_4X_PS_MERGE00 528 #define OP_4X_PS_MERGE01 560 #define OP_4X_PS_MERGE10 592 #define OP_4X_PS_MERGE11 624 #define SCALAR_NONE 0 #define SCALAR_HIGH (1 << 0) #define SCALAR_LOW (1 << 1) #define SCALAR_NO_PS0 (1 << 2) #define SCALAR_NO_PS1 (1 << 3) #define GQR_ST_TYPE_MASK 0x00000007 #define GQR_ST_TYPE_SHIFT 0 #define GQR_ST_SCALE_MASK 0x00003f00 #define GQR_ST_SCALE_SHIFT 8 #define GQR_LD_TYPE_MASK 0x00070000 #define GQR_LD_TYPE_SHIFT 16 #define GQR_LD_SCALE_MASK 0x3f000000 #define GQR_LD_SCALE_SHIFT 24 #define GQR_QUANTIZE_FLOAT 0 #define GQR_QUANTIZE_U8 4 #define GQR_QUANTIZE_U16 5 #define GQR_QUANTIZE_S8 6 #define GQR_QUANTIZE_S16 7 #define FPU_LS_SINGLE 0 #define FPU_LS_DOUBLE 1 #define FPU_LS_SINGLE_LOW 2 static inline void kvmppc_sync_qpr(struct kvm_vcpu *vcpu, int rt) { kvm_cvt_df(&VCPU_FPR(vcpu, rt), &vcpu->arch.qpr[rt]); } static void kvmppc_inject_pf(struct kvm_vcpu *vcpu, ulong eaddr, bool is_store) { u32 dsisr; u64 msr = kvmppc_get_msr(vcpu); msr = kvmppc_set_field(msr, 33, 36, 0); msr = kvmppc_set_field(msr, 42, 47, 0); kvmppc_set_msr(vcpu, msr); kvmppc_set_dar(vcpu, eaddr); /* Page Fault */ dsisr = kvmppc_set_field(0, 33, 33, 1); if (is_store) dsisr = kvmppc_set_field(dsisr, 38, 38, 1); kvmppc_set_dsisr(vcpu, dsisr); kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_DATA_STORAGE); } static int kvmppc_emulate_fpr_load(struct kvm_vcpu *vcpu, int rs, ulong addr, int ls_type) { int emulated = EMULATE_FAIL; int r; char tmp[8]; int len = sizeof(u32); if (ls_type == FPU_LS_DOUBLE) len = sizeof(u64); /* read from memory */ r = kvmppc_ld(vcpu, &addr, len, tmp, true); vcpu->arch.paddr_accessed = addr; if (r < 0) { kvmppc_inject_pf(vcpu, addr, false); goto done_load; } else if (r == EMULATE_DO_MMIO) { emulated = kvmppc_handle_load(vcpu, KVM_MMIO_REG_FPR | rs, len, 1); goto done_load; } emulated = EMULATE_DONE; /* put in registers */ switch (ls_type) { case FPU_LS_SINGLE: kvm_cvt_fd((u32*)tmp, &VCPU_FPR(vcpu, rs)); vcpu->arch.qpr[rs] = *((u32*)tmp); break; case FPU_LS_DOUBLE: VCPU_FPR(vcpu, rs) = *((u64*)tmp); break; } dprintk(KERN_INFO "KVM: FPR_LD [0x%llx] at 0x%lx (%d)\n", *(u64*)tmp, addr, len); done_load: return emulated; } static int kvmppc_emulate_fpr_store(struct kvm_vcpu *vcpu, int rs, ulong addr, int ls_type) { int emulated = EMULATE_FAIL; int r; char tmp[8]; u64 val; int len; switch (ls_type) { case FPU_LS_SINGLE: kvm_cvt_df(&VCPU_FPR(vcpu, rs), (u32*)tmp); val = *((u32*)tmp); len = sizeof(u32); break; case FPU_LS_SINGLE_LOW: *((u32*)tmp) = VCPU_FPR(vcpu, rs); val = VCPU_FPR(vcpu, rs) & 0xffffffff; len = sizeof(u32); break; case FPU_LS_DOUBLE: *((u64*)tmp) = VCPU_FPR(vcpu, rs); val = VCPU_FPR(vcpu, rs); len = sizeof(u64); break; default: val = 0; len = 0; } r = kvmppc_st(vcpu, &addr, len, tmp, true); vcpu->arch.paddr_accessed = addr; if (r < 0) { kvmppc_inject_pf(vcpu, addr, true); } else if (r == EMULATE_DO_MMIO) { emulated = kvmppc_handle_store(vcpu, val, len, 1); } else { emulated = EMULATE_DONE; } dprintk(KERN_INFO "KVM: FPR_ST [0x%llx] at 0x%lx (%d)\n", val, addr, len); return emulated; } static int kvmppc_emulate_psq_load(struct kvm_vcpu *vcpu, int rs, ulong addr, bool w, int i) { int emulated = EMULATE_FAIL; int r; float one = 1.0; u32 tmp[2]; /* read from memory */ if (w) { r = kvmppc_ld(vcpu, &addr, sizeof(u32), tmp, true); memcpy(&tmp[1], &one, sizeof(u32)); } else { r = kvmppc_ld(vcpu, &addr, sizeof(u32) * 2, tmp, true); } vcpu->arch.paddr_accessed = addr; if (r < 0) { kvmppc_inject_pf(vcpu, addr, false); goto done_load; } else if ((r == EMULATE_DO_MMIO) && w) { emulated = kvmppc_handle_load(vcpu, KVM_MMIO_REG_FPR | rs, 4, 1); vcpu->arch.qpr[rs] = tmp[1]; goto done_load; } else if (r == EMULATE_DO_MMIO) { emulated = kvmppc_handle_load(vcpu, KVM_MMIO_REG_FQPR | rs, 8, 1); goto done_load; } emulated = EMULATE_DONE; /* put in registers */ kvm_cvt_fd(&tmp[0], &VCPU_FPR(vcpu, rs)); vcpu->arch.qpr[rs] = tmp[1]; dprintk(KERN_INFO "KVM: PSQ_LD [0x%x, 0x%x] at 0x%lx (%d)\n", tmp[0], tmp[1], addr, w ? 4 : 8); done_load: return emulated; } static int kvmppc_emulate_psq_store(struct kvm_vcpu *vcpu, int rs, ulong addr, bool w, int i) { int emulated = EMULATE_FAIL; int r; u32 tmp[2]; int len = w ? sizeof(u32) : sizeof(u64); kvm_cvt_df(&VCPU_FPR(vcpu, rs), &tmp[0]); tmp[1] = vcpu->arch.qpr[rs]; r = kvmppc_st(vcpu, &addr, len, tmp, true); vcpu->arch.paddr_accessed = addr; if (r < 0) { kvmppc_inject_pf(vcpu, addr, true); } else if ((r == EMULATE_DO_MMIO) && w) { emulated = kvmppc_handle_store(vcpu, tmp[0], 4, 1); } else if (r == EMULATE_DO_MMIO) { u64 val = ((u64)tmp[0] << 32) | tmp[1]; emulated = kvmppc_handle_store(vcpu, val, 8, 1); } else { emulated = EMULATE_DONE; } dprintk(KERN_INFO "KVM: PSQ_ST [0x%x, 0x%x] at 0x%lx (%d)\n", tmp[0], tmp[1], addr, len); return emulated; } /* * Cuts out inst bits with ordering according to spec. * That means the leftmost bit is zero. All given bits are included. */ static inline u32 inst_get_field(u32 inst, int msb, int lsb) { return kvmppc_get_field(inst, msb + 32, lsb + 32); } static bool kvmppc_inst_is_paired_single(struct kvm_vcpu *vcpu, u32 inst) { if (!(vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE)) return false; switch (get_op(inst)) { case OP_PSQ_L: case OP_PSQ_LU: case OP_PSQ_ST: case OP_PSQ_STU: case OP_LFS: case OP_LFSU: case OP_LFD: case OP_LFDU: case OP_STFS: case OP_STFSU: case OP_STFD: case OP_STFDU: return true; case 4: /* X form */ switch (inst_get_field(inst, 21, 30)) { case OP_4X_PS_CMPU0: case OP_4X_PSQ_LX: case OP_4X_PS_CMPO0: case OP_4X_PSQ_LUX: case OP_4X_PS_NEG: case OP_4X_PS_CMPU1: case OP_4X_PS_MR: case OP_4X_PS_CMPO1: case OP_4X_PS_NABS: case OP_4X_PS_ABS: case OP_4X_PS_MERGE00: case OP_4X_PS_MERGE01: case OP_4X_PS_MERGE10: case OP_4X_PS_MERGE11: return true; } /* XW form */ switch (inst_get_field(inst, 25, 30)) { case OP_4XW_PSQ_STX: case OP_4XW_PSQ_STUX: return true; } /* A form */ switch (inst_get_field(inst, 26, 30)) { case OP_4A_PS_SUM1: case OP_4A_PS_SUM0: case OP_4A_PS_MULS0: case OP_4A_PS_MULS1: case OP_4A_PS_MADDS0: case OP_4A_PS_MADDS1: case OP_4A_PS_DIV: case OP_4A_PS_SUB: case OP_4A_PS_ADD: case OP_4A_PS_SEL: case OP_4A_PS_RES: case OP_4A_PS_MUL: case OP_4A_PS_RSQRTE: case OP_4A_PS_MSUB: case OP_4A_PS_MADD: case OP_4A_PS_NMSUB: case OP_4A_PS_NMADD: return true; } break; case 59: switch (inst_get_field(inst, 21, 30)) { case OP_59_FADDS: case OP_59_FSUBS: case OP_59_FDIVS: case OP_59_FRES: case OP_59_FRSQRTES: return true; } switch (inst_get_field(inst, 26, 30)) { case OP_59_FMULS: case OP_59_FMSUBS: case OP_59_FMADDS: case OP_59_FNMSUBS: case OP_59_FNMADDS: return true; } break; case 63: switch (inst_get_field(inst, 21, 30)) { case OP_63_MTFSB0: case OP_63_MTFSB1: case OP_63_MTFSF: case OP_63_MTFSFI: case OP_63_MCRFS: case OP_63_MFFS: case OP_63_FCMPU: case OP_63_FCMPO: case OP_63_FNEG: case OP_63_FMR: case OP_63_FABS: case OP_63_FRSP: case OP_63_FDIV: case OP_63_FADD: case OP_63_FSUB: case OP_63_FCTIW: case OP_63_FCTIWZ: case OP_63_FRSQRTE: case OP_63_FCPSGN: return true; } switch (inst_get_field(inst, 26, 30)) { case OP_63_FMUL: case OP_63_FSEL: case OP_63_FMSUB: case OP_63_FMADD: case OP_63_FNMSUB: case OP_63_FNMADD: return true; } break; case 31: switch (inst_get_field(inst, 21, 30)) { case OP_31_LFSX: case OP_31_LFSUX: case OP_31_LFDX: case OP_31_LFDUX: case OP_31_STFSX: case OP_31_STFSUX: case OP_31_STFX: case OP_31_STFUX: case OP_31_STFIWX: return true; } break; } return false; } static int get_d_signext(u32 inst) { int d = inst & 0x8ff; if (d & 0x800) return -(d & 0x7ff); return (d & 0x7ff); } static int kvmppc_ps_three_in(struct kvm_vcpu *vcpu, bool rc, int reg_out, int reg_in1, int reg_in2, int reg_in3, int scalar, void (*func)(u64 *fpscr, u32 *dst, u32 *src1, u32 *src2, u32 *src3)) { u32 *qpr = vcpu->arch.qpr; u32 ps0_out; u32 ps0_in1, ps0_in2, ps0_in3; u32 ps1_in1, ps1_in2, ps1_in3; /* RC */ WARN_ON(rc); /* PS0 */ kvm_cvt_df(&VCPU_FPR(vcpu, reg_in1), &ps0_in1); kvm_cvt_df(&VCPU_FPR(vcpu, reg_in2), &ps0_in2); kvm_cvt_df(&VCPU_FPR(vcpu, reg_in3), &ps0_in3); if (scalar & SCALAR_LOW) ps0_in2 = qpr[reg_in2]; func(&vcpu->arch.fp.fpscr, &ps0_out, &ps0_in1, &ps0_in2, &ps0_in3); dprintk(KERN_INFO "PS3 ps0 -> f(0x%x, 0x%x, 0x%x) = 0x%x\n", ps0_in1, ps0_in2, ps0_in3, ps0_out); if (!(scalar & SCALAR_NO_PS0)) kvm_cvt_fd(&ps0_out, &VCPU_FPR(vcpu, reg_out)); /* PS1 */ ps1_in1 = qpr[reg_in1]; ps1_in2 = qpr[reg_in2]; ps1_in3 = qpr[reg_in3]; if (scalar & SCALAR_HIGH) ps1_in2 = ps0_in2; if (!(scalar & SCALAR_NO_PS1)) func(&vcpu->arch.fp.fpscr, &qpr[reg_out], &ps1_in1, &ps1_in2, &ps1_in3); dprintk(KERN_INFO "PS3 ps1 -> f(0x%x, 0x%x, 0x%x) = 0x%x\n", ps1_in1, ps1_in2, ps1_in3, qpr[reg_out]); return EMULATE_DONE; } static int kvmppc_ps_two_in(struct kvm_vcpu *vcpu, bool rc, int reg_out, int reg_in1, int reg_in2, int scalar, void (*func)(u64 *fpscr, u32 *dst, u32 *src1, u32 *src2)) { u32 *qpr = vcpu->arch.qpr; u32 ps0_out; u32 ps0_in1, ps0_in2; u32 ps1_out; u32 ps1_in1, ps1_in2; /* RC */ WARN_ON(rc); /* PS0 */ kvm_cvt_df(&VCPU_FPR(vcpu, reg_in1), &ps0_in1); if (scalar & SCALAR_LOW) ps0_in2 = qpr[reg_in2]; else kvm_cvt_df(&VCPU_FPR(vcpu, reg_in2), &ps0_in2); func(&vcpu->arch.fp.fpscr, &ps0_out, &ps0_in1, &ps0_in2); if (!(scalar & SCALAR_NO_PS0)) { dprintk(KERN_INFO "PS2 ps0 -> f(0x%x, 0x%x) = 0x%x\n", ps0_in1, ps0_in2, ps0_out); kvm_cvt_fd(&ps0_out, &VCPU_FPR(vcpu, reg_out)); } /* PS1 */ ps1_in1 = qpr[reg_in1]; ps1_in2 = qpr[reg_in2]; if (scalar & SCALAR_HIGH) ps1_in2 = ps0_in2; func(&vcpu->arch.fp.fpscr, &ps1_out, &ps1_in1, &ps1_in2); if (!(scalar & SCALAR_NO_PS1)) { qpr[reg_out] = ps1_out; dprintk(KERN_INFO "PS2 ps1 -> f(0x%x, 0x%x) = 0x%x\n", ps1_in1, ps1_in2, qpr[reg_out]); } return EMULATE_DONE; } static int kvmppc_ps_one_in(struct kvm_vcpu *vcpu, bool rc, int reg_out, int reg_in, void (*func)(u64 *t, u32 *dst, u32 *src1)) { u32 *qpr = vcpu->arch.qpr; u32 ps0_out, ps0_in; u32 ps1_in; /* RC */ WARN_ON(rc); /* PS0 */ kvm_cvt_df(&VCPU_FPR(vcpu, reg_in), &ps0_in); func(&vcpu->arch.fp.fpscr, &ps0_out, &ps0_in); dprintk(KERN_INFO "PS1 ps0 -> f(0x%x) = 0x%x\n", ps0_in, ps0_out); kvm_cvt_fd(&ps0_out, &VCPU_FPR(vcpu, reg_out)); /* PS1 */ ps1_in = qpr[reg_in]; func(&vcpu->arch.fp.fpscr, &qpr[reg_out], &ps1_in); dprintk(KERN_INFO "PS1 ps1 -> f(0x%x) = 0x%x\n", ps1_in, qpr[reg_out]); return EMULATE_DONE; } int kvmppc_emulate_paired_single(struct kvm_vcpu *vcpu) { u32 inst; ppc_inst_t pinst; enum emulation_result emulated = EMULATE_DONE; int ax_rd, ax_ra, ax_rb, ax_rc; short full_d; u64 *fpr_d, *fpr_a, *fpr_b, *fpr_c; bool rcomp; u32 cr; #ifdef DEBUG int i; #endif emulated = kvmppc_get_last_inst(vcpu, INST_GENERIC, &pinst); inst = ppc_inst_val(pinst); if (emulated != EMULATE_DONE) return emulated; ax_rd = inst_get_field(inst, 6, 10); ax_ra = inst_get_field(inst, 11, 15); ax_rb = inst_get_field(inst, 16, 20); ax_rc = inst_get_field(inst, 21, 25); full_d = inst_get_field(inst, 16, 31); fpr_d = &VCPU_FPR(vcpu, ax_rd); fpr_a = &VCPU_FPR(vcpu, ax_ra); fpr_b = &VCPU_FPR(vcpu, ax_rb); fpr_c = &VCPU_FPR(vcpu, ax_rc); rcomp = (inst & 1) ? true : false; cr = kvmppc_get_cr(vcpu); if (!kvmppc_inst_is_paired_single(vcpu, inst)) return EMULATE_FAIL; if (!(kvmppc_get_msr(vcpu) & MSR_FP)) { kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL); return EMULATE_AGAIN; } kvmppc_giveup_ext(vcpu, MSR_FP); preempt_disable(); enable_kernel_fp(); /* Do we need to clear FE0 / FE1 here? Don't think so. */ #ifdef DEBUG for (i = 0; i < ARRAY_SIZE(vcpu->arch.fp.fpr); i++) { u32 f; kvm_cvt_df(&VCPU_FPR(vcpu, i), &f); dprintk(KERN_INFO "FPR[%d] = 0x%x / 0x%llx QPR[%d] = 0x%x\n", i, f, VCPU_FPR(vcpu, i), i, vcpu->arch.qpr[i]); } #endif switch (get_op(inst)) { case OP_PSQ_L: { ulong addr = ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0; bool w = inst_get_field(inst, 16, 16) ? true : false; int i = inst_get_field(inst, 17, 19); addr += get_d_signext(inst); emulated = kvmppc_emulate_psq_load(vcpu, ax_rd, addr, w, i); break; } case OP_PSQ_LU: { ulong addr = kvmppc_get_gpr(vcpu, ax_ra); bool w = inst_get_field(inst, 16, 16) ? true : false; int i = inst_get_field(inst, 17, 19); addr += get_d_signext(inst); emulated = kvmppc_emulate_psq_load(vcpu, ax_rd, addr, w, i); if (emulated == EMULATE_DONE) kvmppc_set_gpr(vcpu, ax_ra, addr); break; } case OP_PSQ_ST: { ulong addr = ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0; bool w = inst_get_field(inst, 16, 16) ? true : false; int i = inst_get_field(inst, 17, 19); addr += get_d_signext(inst); emulated = kvmppc_emulate_psq_store(vcpu, ax_rd, addr, w, i); break; } case OP_PSQ_STU: { ulong addr = kvmppc_get_gpr(vcpu, ax_ra); bool w = inst_get_field(inst, 16, 16) ? true : false; int i = inst_get_field(inst, 17, 19); addr += get_d_signext(inst); emulated = kvmppc_emulate_psq_store(vcpu, ax_rd, addr, w, i); if (emulated == EMULATE_DONE) kvmppc_set_gpr(vcpu, ax_ra, addr); break; } case 4: /* X form */ switch (inst_get_field(inst, 21, 30)) { case OP_4X_PS_CMPU0: /* XXX */ emulated = EMULATE_FAIL; break; case OP_4X_PSQ_LX: { ulong addr = ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0; bool w = inst_get_field(inst, 21, 21) ? true : false; int i = inst_get_field(inst, 22, 24); addr += kvmppc_get_gpr(vcpu, ax_rb); emulated = kvmppc_emulate_psq_load(vcpu, ax_rd, addr, w, i); break; } case OP_4X_PS_CMPO0: /* XXX */ emulated = EMULATE_FAIL; break; case OP_4X_PSQ_LUX: { ulong addr = kvmppc_get_gpr(vcpu, ax_ra); bool w = inst_get_field(inst, 21, 21) ? true : false; int i = inst_get_field(inst, 22, 24); addr += kvmppc_get_gpr(vcpu, ax_rb); emulated = kvmppc_emulate_psq_load(vcpu, ax_rd, addr, w, i); if (emulated == EMULATE_DONE) kvmppc_set_gpr(vcpu, ax_ra, addr); break; } case OP_4X_PS_NEG: VCPU_FPR(vcpu, ax_rd) = VCPU_FPR(vcpu, ax_rb); VCPU_FPR(vcpu, ax_rd) ^= 0x8000000000000000ULL; vcpu->arch.qpr[ax_rd] = vcpu->arch.qpr[ax_rb]; vcpu->arch.qpr[ax_rd] ^= 0x80000000; break; case OP_4X_PS_CMPU1: /* XXX */ emulated = EMULATE_FAIL; break; case OP_4X_PS_MR: WARN_ON(rcomp); VCPU_FPR(vcpu, ax_rd) = VCPU_FPR(vcpu, ax_rb); vcpu->arch.qpr[ax_rd] = vcpu->arch.qpr[ax_rb]; break; case OP_4X_PS_CMPO1: /* XXX */ emulated = EMULATE_FAIL; break; case OP_4X_PS_NABS: WARN_ON(rcomp); VCPU_FPR(vcpu, ax_rd) = VCPU_FPR(vcpu, ax_rb); VCPU_FPR(vcpu, ax_rd) |= 0x8000000000000000ULL; vcpu->arch.qpr[ax_rd] = vcpu->arch.qpr[ax_rb]; vcpu->arch.qpr[ax_rd] |= 0x80000000; break; case OP_4X_PS_ABS: WARN_ON(rcomp); VCPU_FPR(vcpu, ax_rd) = VCPU_FPR(vcpu, ax_rb); VCPU_FPR(vcpu, ax_rd) &= ~0x8000000000000000ULL; vcpu->arch.qpr[ax_rd] = vcpu->arch.qpr[ax_rb]; vcpu->arch.qpr[ax_rd] &= ~0x80000000; break; case OP_4X_PS_MERGE00: WARN_ON(rcomp); VCPU_FPR(vcpu, ax_rd) = VCPU_FPR(vcpu, ax_ra); /* vcpu->arch.qpr[ax_rd] = VCPU_FPR(vcpu, ax_rb); */ kvm_cvt_df(&VCPU_FPR(vcpu, ax_rb), &vcpu->arch.qpr[ax_rd]); break; case OP_4X_PS_MERGE01: WARN_ON(rcomp); VCPU_FPR(vcpu, ax_rd) = VCPU_FPR(vcpu, ax_ra); vcpu->arch.qpr[ax_rd] = vcpu->arch.qpr[ax_rb]; break; case OP_4X_PS_MERGE10: WARN_ON(rcomp); /* VCPU_FPR(vcpu, ax_rd) = vcpu->arch.qpr[ax_ra]; */ kvm_cvt_fd(&vcpu->arch.qpr[ax_ra], &VCPU_FPR(vcpu, ax_rd)); /* vcpu->arch.qpr[ax_rd] = VCPU_FPR(vcpu, ax_rb); */ kvm_cvt_df(&VCPU_FPR(vcpu, ax_rb), &vcpu->arch.qpr[ax_rd]); break; case OP_4X_PS_MERGE11: WARN_ON(rcomp); /* VCPU_FPR(vcpu, ax_rd) = vcpu->arch.qpr[ax_ra]; */ kvm_cvt_fd(&vcpu->arch.qpr[ax_ra], &VCPU_FPR(vcpu, ax_rd)); vcpu->arch.qpr[ax_rd] = vcpu->arch.qpr[ax_rb]; break; } /* XW form */ switch (inst_get_field(inst, 25, 30)) { case OP_4XW_PSQ_STX: { ulong addr = ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0; bool w = inst_get_field(inst, 21, 21) ? true : false; int i = inst_get_field(inst, 22, 24); addr += kvmppc_get_gpr(vcpu, ax_rb); emulated = kvmppc_emulate_psq_store(vcpu, ax_rd, addr, w, i); break; } case OP_4XW_PSQ_STUX: { ulong addr = kvmppc_get_gpr(vcpu, ax_ra); bool w = inst_get_field(inst, 21, 21) ? true : false; int i = inst_get_field(inst, 22, 24); addr += kvmppc_get_gpr(vcpu, ax_rb); emulated = kvmppc_emulate_psq_store(vcpu, ax_rd, addr, w, i); if (emulated == EMULATE_DONE) kvmppc_set_gpr(vcpu, ax_ra, addr); break; } } /* A form */ switch (inst_get_field(inst, 26, 30)) { case OP_4A_PS_SUM1: emulated = kvmppc_ps_two_in(vcpu, rcomp, ax_rd, ax_rb, ax_ra, SCALAR_NO_PS0 | SCALAR_HIGH, fps_fadds); VCPU_FPR(vcpu, ax_rd) = VCPU_FPR(vcpu, ax_rc); break; case OP_4A_PS_SUM0: emulated = kvmppc_ps_two_in(vcpu, rcomp, ax_rd, ax_ra, ax_rb, SCALAR_NO_PS1 | SCALAR_LOW, fps_fadds); vcpu->arch.qpr[ax_rd] = vcpu->arch.qpr[ax_rc]; break; case OP_4A_PS_MULS0: emulated = kvmppc_ps_two_in(vcpu, rcomp, ax_rd, ax_ra, ax_rc, SCALAR_HIGH, fps_fmuls); break; case OP_4A_PS_MULS1: emulated = kvmppc_ps_two_in(vcpu, rcomp, ax_rd, ax_ra, ax_rc, SCALAR_LOW, fps_fmuls); break; case OP_4A_PS_MADDS0: emulated = kvmppc_ps_three_in(vcpu, rcomp, ax_rd, ax_ra, ax_rc, ax_rb, SCALAR_HIGH, fps_fmadds); break; case OP_4A_PS_MADDS1: emulated = kvmppc_ps_three_in(vcpu, rcomp, ax_rd, ax_ra, ax_rc, ax_rb, SCALAR_LOW, fps_fmadds); break; case OP_4A_PS_DIV: emulated = kvmppc_ps_two_in(vcpu, rcomp, ax_rd, ax_ra, ax_rb, SCALAR_NONE, fps_fdivs); break; case OP_4A_PS_SUB: emulated = kvmppc_ps_two_in(vcpu, rcomp, ax_rd, ax_ra, ax_rb, SCALAR_NONE, fps_fsubs); break; case OP_4A_PS_ADD: emulated = kvmppc_ps_two_in(vcpu, rcomp, ax_rd, ax_ra, ax_rb, SCALAR_NONE, fps_fadds); break; case OP_4A_PS_SEL: emulated = kvmppc_ps_three_in(vcpu, rcomp, ax_rd, ax_ra, ax_rc, ax_rb, SCALAR_NONE, fps_fsel); break; case OP_4A_PS_RES: emulated = kvmppc_ps_one_in(vcpu, rcomp, ax_rd, ax_rb, fps_fres); break; case OP_4A_PS_MUL: emulated = kvmppc_ps_two_in(vcpu, rcomp, ax_rd, ax_ra, ax_rc, SCALAR_NONE, fps_fmuls); break; case OP_4A_PS_RSQRTE: emulated = kvmppc_ps_one_in(vcpu, rcomp, ax_rd, ax_rb, fps_frsqrte); break; case OP_4A_PS_MSUB: emulated = kvmppc_ps_three_in(vcpu, rcomp, ax_rd, ax_ra, ax_rc, ax_rb, SCALAR_NONE, fps_fmsubs); break; case OP_4A_PS_MADD: emulated = kvmppc_ps_three_in(vcpu, rcomp, ax_rd, ax_ra, ax_rc, ax_rb, SCALAR_NONE, fps_fmadds); break; case OP_4A_PS_NMSUB: emulated = kvmppc_ps_three_in(vcpu, rcomp, ax_rd, ax_ra, ax_rc, ax_rb, SCALAR_NONE, fps_fnmsubs); break; case OP_4A_PS_NMADD: emulated = kvmppc_ps_three_in(vcpu, rcomp, ax_rd, ax_ra, ax_rc, ax_rb, SCALAR_NONE, fps_fnmadds); break; } break; /* Real FPU operations */ case OP_LFS: { ulong addr = (ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0) + full_d; emulated = kvmppc_emulate_fpr_load(vcpu, ax_rd, addr, FPU_LS_SINGLE); break; } case OP_LFSU: { ulong addr = kvmppc_get_gpr(vcpu, ax_ra) + full_d; emulated = kvmppc_emulate_fpr_load(vcpu, ax_rd, addr, FPU_LS_SINGLE); if (emulated == EMULATE_DONE) kvmppc_set_gpr(vcpu, ax_ra, addr); break; } case OP_LFD: { ulong addr = (ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0) + full_d; emulated = kvmppc_emulate_fpr_load(vcpu, ax_rd, addr, FPU_LS_DOUBLE); break; } case OP_LFDU: { ulong addr = kvmppc_get_gpr(vcpu, ax_ra) + full_d; emulated = kvmppc_emulate_fpr_load(vcpu, ax_rd, addr, FPU_LS_DOUBLE); if (emulated == EMULATE_DONE) kvmppc_set_gpr(vcpu, ax_ra, addr); break; } case OP_STFS: { ulong addr = (ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0) + full_d; emulated = kvmppc_emulate_fpr_store(vcpu, ax_rd, addr, FPU_LS_SINGLE); break; } case OP_STFSU: { ulong addr = kvmppc_get_gpr(vcpu, ax_ra) + full_d; emulated = kvmppc_emulate_fpr_store(vcpu, ax_rd, addr, FPU_LS_SINGLE); if (emulated == EMULATE_DONE) kvmppc_set_gpr(vcpu, ax_ra, addr); break; } case OP_STFD: { ulong addr = (ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0) + full_d; emulated = kvmppc_emulate_fpr_store(vcpu, ax_rd, addr, FPU_LS_DOUBLE); break; } case OP_STFDU: { ulong addr = kvmppc_get_gpr(vcpu, ax_ra) + full_d; emulated = kvmppc_emulate_fpr_store(vcpu, ax_rd, addr, FPU_LS_DOUBLE); if (emulated == EMULATE_DONE) kvmppc_set_gpr(vcpu, ax_ra, addr); break; } case 31: switch (inst_get_field(inst, 21, 30)) { case OP_31_LFSX: { ulong addr = ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0; addr += kvmppc_get_gpr(vcpu, ax_rb); emulated = kvmppc_emulate_fpr_load(vcpu, ax_rd, addr, FPU_LS_SINGLE); break; } case OP_31_LFSUX: { ulong addr = kvmppc_get_gpr(vcpu, ax_ra) + kvmppc_get_gpr(vcpu, ax_rb); emulated = kvmppc_emulate_fpr_load(vcpu, ax_rd, addr, FPU_LS_SINGLE); if (emulated == EMULATE_DONE) kvmppc_set_gpr(vcpu, ax_ra, addr); break; } case OP_31_LFDX: { ulong addr = (ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0) + kvmppc_get_gpr(vcpu, ax_rb); emulated = kvmppc_emulate_fpr_load(vcpu, ax_rd, addr, FPU_LS_DOUBLE); break; } case OP_31_LFDUX: { ulong addr = kvmppc_get_gpr(vcpu, ax_ra) + kvmppc_get_gpr(vcpu, ax_rb); emulated = kvmppc_emulate_fpr_load(vcpu, ax_rd, addr, FPU_LS_DOUBLE); if (emulated == EMULATE_DONE) kvmppc_set_gpr(vcpu, ax_ra, addr); break; } case OP_31_STFSX: { ulong addr = (ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0) + kvmppc_get_gpr(vcpu, ax_rb); emulated = kvmppc_emulate_fpr_store(vcpu, ax_rd, addr, FPU_LS_SINGLE); break; } case OP_31_STFSUX: { ulong addr = kvmppc_get_gpr(vcpu, ax_ra) + kvmppc_get_gpr(vcpu, ax_rb); emulated = kvmppc_emulate_fpr_store(vcpu, ax_rd, addr, FPU_LS_SINGLE); if (emulated == EMULATE_DONE) kvmppc_set_gpr(vcpu, ax_ra, addr); break; } case OP_31_STFX: { ulong addr = (ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0) + kvmppc_get_gpr(vcpu, ax_rb); emulated = kvmppc_emulate_fpr_store(vcpu, ax_rd, addr, FPU_LS_DOUBLE); break; } case OP_31_STFUX: { ulong addr = kvmppc_get_gpr(vcpu, ax_ra) + kvmppc_get_gpr(vcpu, ax_rb); emulated = kvmppc_emulate_fpr_store(vcpu, ax_rd, addr, FPU_LS_DOUBLE); if (emulated == EMULATE_DONE) kvmppc_set_gpr(vcpu, ax_ra, addr); break; } case OP_31_STFIWX: { ulong addr = (ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0) + kvmppc_get_gpr(vcpu, ax_rb); emulated = kvmppc_emulate_fpr_store(vcpu, ax_rd, addr, FPU_LS_SINGLE_LOW); break; } break; } break; case 59: switch (inst_get_field(inst, 21, 30)) { case OP_59_FADDS: fpd_fadds(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_b); kvmppc_sync_qpr(vcpu, ax_rd); break; case OP_59_FSUBS: fpd_fsubs(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_b); kvmppc_sync_qpr(vcpu, ax_rd); break; case OP_59_FDIVS: fpd_fdivs(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_b); kvmppc_sync_qpr(vcpu, ax_rd); break; case OP_59_FRES: fpd_fres(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_b); kvmppc_sync_qpr(vcpu, ax_rd); break; case OP_59_FRSQRTES: fpd_frsqrtes(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_b); kvmppc_sync_qpr(vcpu, ax_rd); break; } switch (inst_get_field(inst, 26, 30)) { case OP_59_FMULS: fpd_fmuls(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_c); kvmppc_sync_qpr(vcpu, ax_rd); break; case OP_59_FMSUBS: fpd_fmsubs(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b); kvmppc_sync_qpr(vcpu, ax_rd); break; case OP_59_FMADDS: fpd_fmadds(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b); kvmppc_sync_qpr(vcpu, ax_rd); break; case OP_59_FNMSUBS: fpd_fnmsubs(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b); kvmppc_sync_qpr(vcpu, ax_rd); break; case OP_59_FNMADDS: fpd_fnmadds(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b); kvmppc_sync_qpr(vcpu, ax_rd); break; } break; case 63: switch (inst_get_field(inst, 21, 30)) { case OP_63_MTFSB0: case OP_63_MTFSB1: case OP_63_MCRFS: case OP_63_MTFSFI: /* XXX need to implement */ break; case OP_63_MFFS: /* XXX missing CR */ *fpr_d = vcpu->arch.fp.fpscr; break; case OP_63_MTFSF: /* XXX missing fm bits */ /* XXX missing CR */ vcpu->arch.fp.fpscr = *fpr_b; break; case OP_63_FCMPU: { u32 tmp_cr; u32 cr0_mask = 0xf0000000; u32 cr_shift = inst_get_field(inst, 6, 8) * 4; fpd_fcmpu(&vcpu->arch.fp.fpscr, &tmp_cr, fpr_a, fpr_b); cr &= ~(cr0_mask >> cr_shift); cr |= (cr & cr0_mask) >> cr_shift; break; } case OP_63_FCMPO: { u32 tmp_cr; u32 cr0_mask = 0xf0000000; u32 cr_shift = inst_get_field(inst, 6, 8) * 4; fpd_fcmpo(&vcpu->arch.fp.fpscr, &tmp_cr, fpr_a, fpr_b); cr &= ~(cr0_mask >> cr_shift); cr |= (cr & cr0_mask) >> cr_shift; break; } case OP_63_FNEG: fpd_fneg(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_b); break; case OP_63_FMR: *fpr_d = *fpr_b; break; case OP_63_FABS: fpd_fabs(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_b); break; case OP_63_FCPSGN: fpd_fcpsgn(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_b); break; case OP_63_FDIV: fpd_fdiv(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_b); break; case OP_63_FADD: fpd_fadd(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_b); break; case OP_63_FSUB: fpd_fsub(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_b); break; case OP_63_FCTIW: fpd_fctiw(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_b); break; case OP_63_FCTIWZ: fpd_fctiwz(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_b); break; case OP_63_FRSP: fpd_frsp(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_b); kvmppc_sync_qpr(vcpu, ax_rd); break; case OP_63_FRSQRTE: { double one = 1.0f; /* fD = sqrt(fB) */ fpd_fsqrt(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_b); /* fD = 1.0f / fD */ fpd_fdiv(&vcpu->arch.fp.fpscr, &cr, fpr_d, (u64*)&one, fpr_d); break; } } switch (inst_get_field(inst, 26, 30)) { case OP_63_FMUL: fpd_fmul(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_c); break; case OP_63_FSEL: fpd_fsel(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b); break; case OP_63_FMSUB: fpd_fmsub(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b); break; case OP_63_FMADD: fpd_fmadd(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b); break; case OP_63_FNMSUB: fpd_fnmsub(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b); break; case OP_63_FNMADD: fpd_fnmadd(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b); break; } break; } #ifdef DEBUG for (i = 0; i < ARRAY_SIZE(vcpu->arch.fp.fpr); i++) { u32 f; kvm_cvt_df(&VCPU_FPR(vcpu, i), &f); dprintk(KERN_INFO "FPR[%d] = 0x%x\n", i, f); } #endif if (rcomp) kvmppc_set_cr(vcpu, cr); disable_kernel_fp(); preempt_enable(); return emulated; }
linux-master
arch/powerpc/kvm/book3s_paired_singles.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Hypervisor Maintenance Interrupt (HMI) handling. * * Copyright 2015 IBM Corporation * Author: Mahesh Salgaonkar <[email protected]> */ #undef DEBUG #include <linux/types.h> #include <linux/compiler.h> #include <asm/paca.h> #include <asm/hmi.h> #include <asm/processor.h> void wait_for_subcore_guest_exit(void) { int i; /* * NULL bitmap pointer indicates that KVM module hasn't * been loaded yet and hence no guests are running, or running * on POWER9 or newer CPU. * * If no KVM is in use, no need to co-ordinate among threads * as all of them will always be in host and no one is going * to modify TB other than the opal hmi handler. * * POWER9 and newer don't need this synchronisation. * * Hence, just return from here. */ if (!local_paca->sibling_subcore_state) return; for (i = 0; i < MAX_SUBCORE_PER_CORE; i++) while (local_paca->sibling_subcore_state->in_guest[i]) cpu_relax(); } void wait_for_tb_resync(void) { if (!local_paca->sibling_subcore_state) return; while (test_bit(CORE_TB_RESYNC_REQ_BIT, &local_paca->sibling_subcore_state->flags)) cpu_relax(); }
linux-master
arch/powerpc/kvm/book3s_hv_hmi.c
// SPDX-License-Identifier: GPL-2.0-only /* * * Copyright SUSE Linux Products GmbH 2009 * * Authors: Alexander Graf <[email protected]> */ #include <linux/types.h> #include <linux/string.h> #include <linux/kvm.h> #include <linux/kvm_host.h> #include <linux/highmem.h> #include <asm/kvm_ppc.h> #include <asm/kvm_book3s.h> #include <asm/book3s/64/mmu-hash.h> /* #define DEBUG_MMU */ #ifdef DEBUG_MMU #define dprintk(X...) printk(KERN_INFO X) #else #define dprintk(X...) do { } while(0) #endif static struct kvmppc_slb *kvmppc_mmu_book3s_64_find_slbe( struct kvm_vcpu *vcpu, gva_t eaddr) { int i; u64 esid = GET_ESID(eaddr); u64 esid_1t = GET_ESID_1T(eaddr); for (i = 0; i < vcpu->arch.slb_nr; i++) { u64 cmp_esid = esid; if (!vcpu->arch.slb[i].valid) continue; if (vcpu->arch.slb[i].tb) cmp_esid = esid_1t; if (vcpu->arch.slb[i].esid == cmp_esid) return &vcpu->arch.slb[i]; } dprintk("KVM: No SLB entry found for 0x%lx [%llx | %llx]\n", eaddr, esid, esid_1t); for (i = 0; i < vcpu->arch.slb_nr; i++) { if (vcpu->arch.slb[i].vsid) dprintk(" %d: %c%c%c %llx %llx\n", i, vcpu->arch.slb[i].valid ? 'v' : ' ', vcpu->arch.slb[i].large ? 'l' : ' ', vcpu->arch.slb[i].tb ? 't' : ' ', vcpu->arch.slb[i].esid, vcpu->arch.slb[i].vsid); } return NULL; } static int kvmppc_slb_sid_shift(struct kvmppc_slb *slbe) { return slbe->tb ? SID_SHIFT_1T : SID_SHIFT; } static u64 kvmppc_slb_offset_mask(struct kvmppc_slb *slbe) { return (1ul << kvmppc_slb_sid_shift(slbe)) - 1; } static u64 kvmppc_slb_calc_vpn(struct kvmppc_slb *slb, gva_t eaddr) { eaddr &= kvmppc_slb_offset_mask(slb); return (eaddr >> VPN_SHIFT) | ((slb->vsid) << (kvmppc_slb_sid_shift(slb) - VPN_SHIFT)); } static u64 kvmppc_mmu_book3s_64_ea_to_vp(struct kvm_vcpu *vcpu, gva_t eaddr, bool data) { struct kvmppc_slb *slb; slb = kvmppc_mmu_book3s_64_find_slbe(vcpu, eaddr); if (!slb) return 0; return kvmppc_slb_calc_vpn(slb, eaddr); } static int mmu_pagesize(int mmu_pg) { switch (mmu_pg) { case MMU_PAGE_64K: return 16; case MMU_PAGE_16M: return 24; } return 12; } static int kvmppc_mmu_book3s_64_get_pagesize(struct kvmppc_slb *slbe) { return mmu_pagesize(slbe->base_page_size); } static u32 kvmppc_mmu_book3s_64_get_page(struct kvmppc_slb *slbe, gva_t eaddr) { int p = kvmppc_mmu_book3s_64_get_pagesize(slbe); return ((eaddr & kvmppc_slb_offset_mask(slbe)) >> p); } static hva_t kvmppc_mmu_book3s_64_get_pteg(struct kvm_vcpu *vcpu, struct kvmppc_slb *slbe, gva_t eaddr, bool second) { struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu); u64 hash, pteg, htabsize; u32 ssize; hva_t r; u64 vpn; htabsize = ((1 << ((vcpu_book3s->sdr1 & 0x1f) + 11)) - 1); vpn = kvmppc_slb_calc_vpn(slbe, eaddr); ssize = slbe->tb ? MMU_SEGSIZE_1T : MMU_SEGSIZE_256M; hash = hpt_hash(vpn, kvmppc_mmu_book3s_64_get_pagesize(slbe), ssize); if (second) hash = ~hash; hash &= ((1ULL << 39ULL) - 1ULL); hash &= htabsize; hash <<= 7ULL; pteg = vcpu_book3s->sdr1 & 0xfffffffffffc0000ULL; pteg |= hash; dprintk("MMU: page=0x%x sdr1=0x%llx pteg=0x%llx vsid=0x%llx\n", page, vcpu_book3s->sdr1, pteg, slbe->vsid); /* When running a PAPR guest, SDR1 contains a HVA address instead of a GPA */ if (vcpu->arch.papr_enabled) r = pteg; else r = gfn_to_hva(vcpu->kvm, pteg >> PAGE_SHIFT); if (kvm_is_error_hva(r)) return r; return r | (pteg & ~PAGE_MASK); } static u64 kvmppc_mmu_book3s_64_get_avpn(struct kvmppc_slb *slbe, gva_t eaddr) { int p = kvmppc_mmu_book3s_64_get_pagesize(slbe); u64 avpn; avpn = kvmppc_mmu_book3s_64_get_page(slbe, eaddr); avpn |= slbe->vsid << (kvmppc_slb_sid_shift(slbe) - p); if (p < 16) avpn >>= ((80 - p) - 56) - 8; /* 16 - p */ else avpn <<= p - 16; return avpn; } /* * Return page size encoded in the second word of a HPTE, or * -1 for an invalid encoding for the base page size indicated by * the SLB entry. This doesn't handle mixed pagesize segments yet. */ static int decode_pagesize(struct kvmppc_slb *slbe, u64 r) { switch (slbe->base_page_size) { case MMU_PAGE_64K: if ((r & 0xf000) == 0x1000) return MMU_PAGE_64K; break; case MMU_PAGE_16M: if ((r & 0xff000) == 0) return MMU_PAGE_16M; break; } return -1; } static int kvmppc_mmu_book3s_64_xlate(struct kvm_vcpu *vcpu, gva_t eaddr, struct kvmppc_pte *gpte, bool data, bool iswrite) { struct kvmppc_slb *slbe; hva_t ptegp; u64 pteg[16]; u64 avpn = 0; u64 r; u64 v_val, v_mask; u64 eaddr_mask; int i; u8 pp, key = 0; bool found = false; bool second = false; int pgsize; ulong mp_ea = vcpu->arch.magic_page_ea; /* Magic page override */ if (unlikely(mp_ea) && unlikely((eaddr & ~0xfffULL) == (mp_ea & ~0xfffULL)) && !(kvmppc_get_msr(vcpu) & MSR_PR)) { gpte->eaddr = eaddr; gpte->vpage = kvmppc_mmu_book3s_64_ea_to_vp(vcpu, eaddr, data); gpte->raddr = vcpu->arch.magic_page_pa | (gpte->raddr & 0xfff); gpte->raddr &= KVM_PAM; gpte->may_execute = true; gpte->may_read = true; gpte->may_write = true; gpte->page_size = MMU_PAGE_4K; gpte->wimg = HPTE_R_M; return 0; } slbe = kvmppc_mmu_book3s_64_find_slbe(vcpu, eaddr); if (!slbe) goto no_seg_found; avpn = kvmppc_mmu_book3s_64_get_avpn(slbe, eaddr); v_val = avpn & HPTE_V_AVPN; if (slbe->tb) v_val |= SLB_VSID_B_1T; if (slbe->large) v_val |= HPTE_V_LARGE; v_val |= HPTE_V_VALID; v_mask = SLB_VSID_B | HPTE_V_AVPN | HPTE_V_LARGE | HPTE_V_VALID | HPTE_V_SECONDARY; pgsize = slbe->large ? MMU_PAGE_16M : MMU_PAGE_4K; mutex_lock(&vcpu->kvm->arch.hpt_mutex); do_second: ptegp = kvmppc_mmu_book3s_64_get_pteg(vcpu, slbe, eaddr, second); if (kvm_is_error_hva(ptegp)) goto no_page_found; if(copy_from_user(pteg, (void __user *)ptegp, sizeof(pteg))) { printk_ratelimited(KERN_ERR "KVM: Can't copy data from 0x%lx!\n", ptegp); goto no_page_found; } if ((kvmppc_get_msr(vcpu) & MSR_PR) && slbe->Kp) key = 4; else if (!(kvmppc_get_msr(vcpu) & MSR_PR) && slbe->Ks) key = 4; for (i=0; i<16; i+=2) { u64 pte0 = be64_to_cpu(pteg[i]); u64 pte1 = be64_to_cpu(pteg[i + 1]); /* Check all relevant fields of 1st dword */ if ((pte0 & v_mask) == v_val) { /* If large page bit is set, check pgsize encoding */ if (slbe->large && (vcpu->arch.hflags & BOOK3S_HFLAG_MULTI_PGSIZE)) { pgsize = decode_pagesize(slbe, pte1); if (pgsize < 0) continue; } found = true; break; } } if (!found) { if (second) goto no_page_found; v_val |= HPTE_V_SECONDARY; second = true; goto do_second; } r = be64_to_cpu(pteg[i+1]); pp = (r & HPTE_R_PP) | key; if (r & HPTE_R_PP0) pp |= 8; gpte->eaddr = eaddr; gpte->vpage = kvmppc_mmu_book3s_64_ea_to_vp(vcpu, eaddr, data); eaddr_mask = (1ull << mmu_pagesize(pgsize)) - 1; gpte->raddr = (r & HPTE_R_RPN & ~eaddr_mask) | (eaddr & eaddr_mask); gpte->page_size = pgsize; gpte->may_execute = ((r & HPTE_R_N) ? false : true); if (unlikely(vcpu->arch.disable_kernel_nx) && !(kvmppc_get_msr(vcpu) & MSR_PR)) gpte->may_execute = true; gpte->may_read = false; gpte->may_write = false; gpte->wimg = r & HPTE_R_WIMG; switch (pp) { case 0: case 1: case 2: case 6: gpte->may_write = true; fallthrough; case 3: case 5: case 7: case 10: gpte->may_read = true; break; } dprintk("KVM MMU: Translated 0x%lx [0x%llx] -> 0x%llx " "-> 0x%lx\n", eaddr, avpn, gpte->vpage, gpte->raddr); /* Update PTE R and C bits, so the guest's swapper knows we used the * page */ if (gpte->may_read && !(r & HPTE_R_R)) { /* * Set the accessed flag. * We have to write this back with a single byte write * because another vcpu may be accessing this on * non-PAPR platforms such as mac99, and this is * what real hardware does. */ char __user *addr = (char __user *) (ptegp + (i + 1) * sizeof(u64)); r |= HPTE_R_R; put_user(r >> 8, addr + 6); } if (iswrite && gpte->may_write && !(r & HPTE_R_C)) { /* Set the dirty flag */ /* Use a single byte write */ char __user *addr = (char __user *) (ptegp + (i + 1) * sizeof(u64)); r |= HPTE_R_C; put_user(r, addr + 7); } mutex_unlock(&vcpu->kvm->arch.hpt_mutex); if (!gpte->may_read || (iswrite && !gpte->may_write)) return -EPERM; return 0; no_page_found: mutex_unlock(&vcpu->kvm->arch.hpt_mutex); return -ENOENT; no_seg_found: dprintk("KVM MMU: Trigger segment fault\n"); return -EINVAL; } static void kvmppc_mmu_book3s_64_slbmte(struct kvm_vcpu *vcpu, u64 rs, u64 rb) { u64 esid, esid_1t; int slb_nr; struct kvmppc_slb *slbe; dprintk("KVM MMU: slbmte(0x%llx, 0x%llx)\n", rs, rb); esid = GET_ESID(rb); esid_1t = GET_ESID_1T(rb); slb_nr = rb & 0xfff; if (slb_nr > vcpu->arch.slb_nr) return; slbe = &vcpu->arch.slb[slb_nr]; slbe->large = (rs & SLB_VSID_L) ? 1 : 0; slbe->tb = (rs & SLB_VSID_B_1T) ? 1 : 0; slbe->esid = slbe->tb ? esid_1t : esid; slbe->vsid = (rs & ~SLB_VSID_B) >> (kvmppc_slb_sid_shift(slbe) - 16); slbe->valid = (rb & SLB_ESID_V) ? 1 : 0; slbe->Ks = (rs & SLB_VSID_KS) ? 1 : 0; slbe->Kp = (rs & SLB_VSID_KP) ? 1 : 0; slbe->nx = (rs & SLB_VSID_N) ? 1 : 0; slbe->class = (rs & SLB_VSID_C) ? 1 : 0; slbe->base_page_size = MMU_PAGE_4K; if (slbe->large) { if (vcpu->arch.hflags & BOOK3S_HFLAG_MULTI_PGSIZE) { switch (rs & SLB_VSID_LP) { case SLB_VSID_LP_00: slbe->base_page_size = MMU_PAGE_16M; break; case SLB_VSID_LP_01: slbe->base_page_size = MMU_PAGE_64K; break; } } else slbe->base_page_size = MMU_PAGE_16M; } slbe->orige = rb & (ESID_MASK | SLB_ESID_V); slbe->origv = rs; /* Map the new segment */ kvmppc_mmu_map_segment(vcpu, esid << SID_SHIFT); } static int kvmppc_mmu_book3s_64_slbfee(struct kvm_vcpu *vcpu, gva_t eaddr, ulong *ret_slb) { struct kvmppc_slb *slbe = kvmppc_mmu_book3s_64_find_slbe(vcpu, eaddr); if (slbe) { *ret_slb = slbe->origv; return 0; } *ret_slb = 0; return -ENOENT; } static u64 kvmppc_mmu_book3s_64_slbmfee(struct kvm_vcpu *vcpu, u64 slb_nr) { struct kvmppc_slb *slbe; if (slb_nr > vcpu->arch.slb_nr) return 0; slbe = &vcpu->arch.slb[slb_nr]; return slbe->orige; } static u64 kvmppc_mmu_book3s_64_slbmfev(struct kvm_vcpu *vcpu, u64 slb_nr) { struct kvmppc_slb *slbe; if (slb_nr > vcpu->arch.slb_nr) return 0; slbe = &vcpu->arch.slb[slb_nr]; return slbe->origv; } static void kvmppc_mmu_book3s_64_slbie(struct kvm_vcpu *vcpu, u64 ea) { struct kvmppc_slb *slbe; u64 seg_size; dprintk("KVM MMU: slbie(0x%llx)\n", ea); slbe = kvmppc_mmu_book3s_64_find_slbe(vcpu, ea); if (!slbe) return; dprintk("KVM MMU: slbie(0x%llx, 0x%llx)\n", ea, slbe->esid); slbe->valid = false; slbe->orige = 0; slbe->origv = 0; seg_size = 1ull << kvmppc_slb_sid_shift(slbe); kvmppc_mmu_flush_segment(vcpu, ea & ~(seg_size - 1), seg_size); } static void kvmppc_mmu_book3s_64_slbia(struct kvm_vcpu *vcpu) { int i; dprintk("KVM MMU: slbia()\n"); for (i = 1; i < vcpu->arch.slb_nr; i++) { vcpu->arch.slb[i].valid = false; vcpu->arch.slb[i].orige = 0; vcpu->arch.slb[i].origv = 0; } if (kvmppc_get_msr(vcpu) & MSR_IR) { kvmppc_mmu_flush_segments(vcpu); kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu)); } } static void kvmppc_mmu_book3s_64_mtsrin(struct kvm_vcpu *vcpu, u32 srnum, ulong value) { u64 rb = 0, rs = 0; /* * According to Book3 2.01 mtsrin is implemented as: * * The SLB entry specified by (RB)32:35 is loaded from register * RS, as follows. * * SLBE Bit Source SLB Field * * 0:31 0x0000_0000 ESID-0:31 * 32:35 (RB)32:35 ESID-32:35 * 36 0b1 V * 37:61 0x00_0000|| 0b0 VSID-0:24 * 62:88 (RS)37:63 VSID-25:51 * 89:91 (RS)33:35 Ks Kp N * 92 (RS)36 L ((RS)36 must be 0b0) * 93 0b0 C */ dprintk("KVM MMU: mtsrin(0x%x, 0x%lx)\n", srnum, value); /* ESID = srnum */ rb |= (srnum & 0xf) << 28; /* Set the valid bit */ rb |= 1 << 27; /* Index = ESID */ rb |= srnum; /* VSID = VSID */ rs |= (value & 0xfffffff) << 12; /* flags = flags */ rs |= ((value >> 28) & 0x7) << 9; kvmppc_mmu_book3s_64_slbmte(vcpu, rs, rb); } static void kvmppc_mmu_book3s_64_tlbie(struct kvm_vcpu *vcpu, ulong va, bool large) { u64 mask = 0xFFFFFFFFFULL; unsigned long i; struct kvm_vcpu *v; dprintk("KVM MMU: tlbie(0x%lx)\n", va); /* * The tlbie instruction changed behaviour starting with * POWER6. POWER6 and later don't have the large page flag * in the instruction but in the RB value, along with bits * indicating page and segment sizes. */ if (vcpu->arch.hflags & BOOK3S_HFLAG_NEW_TLBIE) { /* POWER6 or later */ if (va & 1) { /* L bit */ if ((va & 0xf000) == 0x1000) mask = 0xFFFFFFFF0ULL; /* 64k page */ else mask = 0xFFFFFF000ULL; /* 16M page */ } } else { /* older processors, e.g. PPC970 */ if (large) mask = 0xFFFFFF000ULL; } /* flush this VA on all vcpus */ kvm_for_each_vcpu(i, v, vcpu->kvm) kvmppc_mmu_pte_vflush(v, va >> 12, mask); } #ifdef CONFIG_PPC_64K_PAGES static int segment_contains_magic_page(struct kvm_vcpu *vcpu, ulong esid) { ulong mp_ea = vcpu->arch.magic_page_ea; return mp_ea && !(kvmppc_get_msr(vcpu) & MSR_PR) && (mp_ea >> SID_SHIFT) == esid; } #endif static int kvmppc_mmu_book3s_64_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid, u64 *vsid) { ulong ea = esid << SID_SHIFT; struct kvmppc_slb *slb; u64 gvsid = esid; ulong mp_ea = vcpu->arch.magic_page_ea; int pagesize = MMU_PAGE_64K; u64 msr = kvmppc_get_msr(vcpu); if (msr & (MSR_DR|MSR_IR)) { slb = kvmppc_mmu_book3s_64_find_slbe(vcpu, ea); if (slb) { gvsid = slb->vsid; pagesize = slb->base_page_size; if (slb->tb) { gvsid <<= SID_SHIFT_1T - SID_SHIFT; gvsid |= esid & ((1ul << (SID_SHIFT_1T - SID_SHIFT)) - 1); gvsid |= VSID_1T; } } } switch (msr & (MSR_DR|MSR_IR)) { case 0: gvsid = VSID_REAL | esid; break; case MSR_IR: gvsid |= VSID_REAL_IR; break; case MSR_DR: gvsid |= VSID_REAL_DR; break; case MSR_DR|MSR_IR: if (!slb) goto no_slb; break; default: BUG(); break; } #ifdef CONFIG_PPC_64K_PAGES /* * Mark this as a 64k segment if the host is using * 64k pages, the host MMU supports 64k pages and * the guest segment page size is >= 64k, * but not if this segment contains the magic page. */ if (pagesize >= MMU_PAGE_64K && mmu_psize_defs[MMU_PAGE_64K].shift && !segment_contains_magic_page(vcpu, esid)) gvsid |= VSID_64K; #endif if (kvmppc_get_msr(vcpu) & MSR_PR) gvsid |= VSID_PR; *vsid = gvsid; return 0; no_slb: /* Catch magic page case */ if (unlikely(mp_ea) && unlikely(esid == (mp_ea >> SID_SHIFT)) && !(kvmppc_get_msr(vcpu) & MSR_PR)) { *vsid = VSID_REAL | esid; return 0; } return -EINVAL; } static bool kvmppc_mmu_book3s_64_is_dcbz32(struct kvm_vcpu *vcpu) { return (to_book3s(vcpu)->hid[5] & 0x80); } void kvmppc_mmu_book3s_64_init(struct kvm_vcpu *vcpu) { struct kvmppc_mmu *mmu = &vcpu->arch.mmu; mmu->mfsrin = NULL; mmu->mtsrin = kvmppc_mmu_book3s_64_mtsrin; mmu->slbmte = kvmppc_mmu_book3s_64_slbmte; mmu->slbmfee = kvmppc_mmu_book3s_64_slbmfee; mmu->slbmfev = kvmppc_mmu_book3s_64_slbmfev; mmu->slbfee = kvmppc_mmu_book3s_64_slbfee; mmu->slbie = kvmppc_mmu_book3s_64_slbie; mmu->slbia = kvmppc_mmu_book3s_64_slbia; mmu->xlate = kvmppc_mmu_book3s_64_xlate; mmu->tlbie = kvmppc_mmu_book3s_64_tlbie; mmu->esid_to_vsid = kvmppc_mmu_book3s_64_esid_to_vsid; mmu->ea_to_vp = kvmppc_mmu_book3s_64_ea_to_vp; mmu->is_dcbz32 = kvmppc_mmu_book3s_64_is_dcbz32; vcpu->arch.hflags |= BOOK3S_HFLAG_SLB; }
linux-master
arch/powerpc/kvm/book3s_64_mmu.c
/* * OpenPIC emulation * * Copyright (c) 2004 Jocelyn Mayer * 2011 Alexander Graf * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ #include <linux/slab.h> #include <linux/mutex.h> #include <linux/kvm_host.h> #include <linux/errno.h> #include <linux/fs.h> #include <linux/anon_inodes.h> #include <linux/uaccess.h> #include <asm/mpic.h> #include <asm/kvm_para.h> #include <asm/kvm_ppc.h> #include <kvm/iodev.h> #define MAX_CPU 32 #define MAX_SRC 256 #define MAX_TMR 4 #define MAX_IPI 4 #define MAX_MSI 8 #define MAX_IRQ (MAX_SRC + MAX_IPI + MAX_TMR) #define VID 0x03 /* MPIC version ID */ /* OpenPIC capability flags */ #define OPENPIC_FLAG_IDR_CRIT (1 << 0) #define OPENPIC_FLAG_ILR (2 << 0) /* OpenPIC address map */ #define OPENPIC_REG_SIZE 0x40000 #define OPENPIC_GLB_REG_START 0x0 #define OPENPIC_GLB_REG_SIZE 0x10F0 #define OPENPIC_TMR_REG_START 0x10F0 #define OPENPIC_TMR_REG_SIZE 0x220 #define OPENPIC_MSI_REG_START 0x1600 #define OPENPIC_MSI_REG_SIZE 0x200 #define OPENPIC_SUMMARY_REG_START 0x3800 #define OPENPIC_SUMMARY_REG_SIZE 0x800 #define OPENPIC_SRC_REG_START 0x10000 #define OPENPIC_SRC_REG_SIZE (MAX_SRC * 0x20) #define OPENPIC_CPU_REG_START 0x20000 #define OPENPIC_CPU_REG_SIZE (0x100 + ((MAX_CPU - 1) * 0x1000)) struct fsl_mpic_info { int max_ext; }; static struct fsl_mpic_info fsl_mpic_20 = { .max_ext = 12, }; static struct fsl_mpic_info fsl_mpic_42 = { .max_ext = 12, }; #define FRR_NIRQ_SHIFT 16 #define FRR_NCPU_SHIFT 8 #define FRR_VID_SHIFT 0 #define VID_REVISION_1_2 2 #define VID_REVISION_1_3 3 #define VIR_GENERIC 0x00000000 /* Generic Vendor ID */ #define GCR_RESET 0x80000000 #define GCR_MODE_PASS 0x00000000 #define GCR_MODE_MIXED 0x20000000 #define GCR_MODE_PROXY 0x60000000 #define TBCR_CI 0x80000000 /* count inhibit */ #define TCCR_TOG 0x80000000 /* toggles when decrement to zero */ #define IDR_EP_SHIFT 31 #define IDR_EP_MASK (1 << IDR_EP_SHIFT) #define IDR_CI0_SHIFT 30 #define IDR_CI1_SHIFT 29 #define IDR_P1_SHIFT 1 #define IDR_P0_SHIFT 0 #define ILR_INTTGT_MASK 0x000000ff #define ILR_INTTGT_INT 0x00 #define ILR_INTTGT_CINT 0x01 /* critical */ #define ILR_INTTGT_MCP 0x02 /* machine check */ #define NUM_OUTPUTS 3 #define MSIIR_OFFSET 0x140 #define MSIIR_SRS_SHIFT 29 #define MSIIR_SRS_MASK (0x7 << MSIIR_SRS_SHIFT) #define MSIIR_IBS_SHIFT 24 #define MSIIR_IBS_MASK (0x1f << MSIIR_IBS_SHIFT) static int get_current_cpu(void) { #if defined(CONFIG_KVM) && defined(CONFIG_BOOKE) struct kvm_vcpu *vcpu = current->thread.kvm_vcpu; return vcpu ? vcpu->arch.irq_cpu_id : -1; #else /* XXX */ return -1; #endif } static int openpic_cpu_write_internal(void *opaque, gpa_t addr, u32 val, int idx); static int openpic_cpu_read_internal(void *opaque, gpa_t addr, u32 *ptr, int idx); static inline void write_IRQreg_idr(struct openpic *opp, int n_IRQ, uint32_t val); enum irq_type { IRQ_TYPE_NORMAL = 0, IRQ_TYPE_FSLINT, /* FSL internal interrupt -- level only */ IRQ_TYPE_FSLSPECIAL, /* FSL timer/IPI interrupt, edge, no polarity */ }; struct irq_queue { /* Round up to the nearest 64 IRQs so that the queue length * won't change when moving between 32 and 64 bit hosts. */ unsigned long queue[BITS_TO_LONGS((MAX_IRQ + 63) & ~63)]; int next; int priority; }; struct irq_source { uint32_t ivpr; /* IRQ vector/priority register */ uint32_t idr; /* IRQ destination register */ uint32_t destmask; /* bitmap of CPU destinations */ int last_cpu; int output; /* IRQ level, e.g. ILR_INTTGT_INT */ int pending; /* TRUE if IRQ is pending */ enum irq_type type; bool level:1; /* level-triggered */ bool nomask:1; /* critical interrupts ignore mask on some FSL MPICs */ }; #define IVPR_MASK_SHIFT 31 #define IVPR_MASK_MASK (1 << IVPR_MASK_SHIFT) #define IVPR_ACTIVITY_SHIFT 30 #define IVPR_ACTIVITY_MASK (1 << IVPR_ACTIVITY_SHIFT) #define IVPR_MODE_SHIFT 29 #define IVPR_MODE_MASK (1 << IVPR_MODE_SHIFT) #define IVPR_POLARITY_SHIFT 23 #define IVPR_POLARITY_MASK (1 << IVPR_POLARITY_SHIFT) #define IVPR_SENSE_SHIFT 22 #define IVPR_SENSE_MASK (1 << IVPR_SENSE_SHIFT) #define IVPR_PRIORITY_MASK (0xF << 16) #define IVPR_PRIORITY(_ivprr_) ((int)(((_ivprr_) & IVPR_PRIORITY_MASK) >> 16)) #define IVPR_VECTOR(opp, _ivprr_) ((_ivprr_) & (opp)->vector_mask) /* IDR[EP/CI] are only for FSL MPIC prior to v4.0 */ #define IDR_EP 0x80000000 /* external pin */ #define IDR_CI 0x40000000 /* critical interrupt */ struct irq_dest { struct kvm_vcpu *vcpu; int32_t ctpr; /* CPU current task priority */ struct irq_queue raised; struct irq_queue servicing; /* Count of IRQ sources asserting on non-INT outputs */ uint32_t outputs_active[NUM_OUTPUTS]; }; #define MAX_MMIO_REGIONS 10 struct openpic { struct kvm *kvm; struct kvm_device *dev; struct kvm_io_device mmio; const struct mem_reg *mmio_regions[MAX_MMIO_REGIONS]; int num_mmio_regions; gpa_t reg_base; spinlock_t lock; /* Behavior control */ struct fsl_mpic_info *fsl; uint32_t model; uint32_t flags; uint32_t nb_irqs; uint32_t vid; uint32_t vir; /* Vendor identification register */ uint32_t vector_mask; uint32_t tfrr_reset; uint32_t ivpr_reset; uint32_t idr_reset; uint32_t brr1; uint32_t mpic_mode_mask; /* Global registers */ uint32_t frr; /* Feature reporting register */ uint32_t gcr; /* Global configuration register */ uint32_t pir; /* Processor initialization register */ uint32_t spve; /* Spurious vector register */ uint32_t tfrr; /* Timer frequency reporting register */ /* Source registers */ struct irq_source src[MAX_IRQ]; /* Local registers per output pin */ struct irq_dest dst[MAX_CPU]; uint32_t nb_cpus; /* Timer registers */ struct { uint32_t tccr; /* Global timer current count register */ uint32_t tbcr; /* Global timer base count register */ } timers[MAX_TMR]; /* Shared MSI registers */ struct { uint32_t msir; /* Shared Message Signaled Interrupt Register */ } msi[MAX_MSI]; uint32_t max_irq; uint32_t irq_ipi0; uint32_t irq_tim0; uint32_t irq_msi; }; static void mpic_irq_raise(struct openpic *opp, struct irq_dest *dst, int output) { struct kvm_interrupt irq = { .irq = KVM_INTERRUPT_SET_LEVEL, }; if (!dst->vcpu) { pr_debug("%s: destination cpu %d does not exist\n", __func__, (int)(dst - &opp->dst[0])); return; } pr_debug("%s: cpu %d output %d\n", __func__, dst->vcpu->arch.irq_cpu_id, output); if (output != ILR_INTTGT_INT) /* TODO */ return; kvm_vcpu_ioctl_interrupt(dst->vcpu, &irq); } static void mpic_irq_lower(struct openpic *opp, struct irq_dest *dst, int output) { if (!dst->vcpu) { pr_debug("%s: destination cpu %d does not exist\n", __func__, (int)(dst - &opp->dst[0])); return; } pr_debug("%s: cpu %d output %d\n", __func__, dst->vcpu->arch.irq_cpu_id, output); if (output != ILR_INTTGT_INT) /* TODO */ return; kvmppc_core_dequeue_external(dst->vcpu); } static inline void IRQ_setbit(struct irq_queue *q, int n_IRQ) { set_bit(n_IRQ, q->queue); } static inline void IRQ_resetbit(struct irq_queue *q, int n_IRQ) { clear_bit(n_IRQ, q->queue); } static void IRQ_check(struct openpic *opp, struct irq_queue *q) { int irq = -1; int next = -1; int priority = -1; for (;;) { irq = find_next_bit(q->queue, opp->max_irq, irq + 1); if (irq == opp->max_irq) break; pr_debug("IRQ_check: irq %d set ivpr_pr=%d pr=%d\n", irq, IVPR_PRIORITY(opp->src[irq].ivpr), priority); if (IVPR_PRIORITY(opp->src[irq].ivpr) > priority) { next = irq; priority = IVPR_PRIORITY(opp->src[irq].ivpr); } } q->next = next; q->priority = priority; } static int IRQ_get_next(struct openpic *opp, struct irq_queue *q) { /* XXX: optimize */ IRQ_check(opp, q); return q->next; } static void IRQ_local_pipe(struct openpic *opp, int n_CPU, int n_IRQ, bool active, bool was_active) { struct irq_dest *dst; struct irq_source *src; int priority; dst = &opp->dst[n_CPU]; src = &opp->src[n_IRQ]; pr_debug("%s: IRQ %d active %d was %d\n", __func__, n_IRQ, active, was_active); if (src->output != ILR_INTTGT_INT) { pr_debug("%s: output %d irq %d active %d was %d count %d\n", __func__, src->output, n_IRQ, active, was_active, dst->outputs_active[src->output]); /* On Freescale MPIC, critical interrupts ignore priority, * IACK, EOI, etc. Before MPIC v4.1 they also ignore * masking. */ if (active) { if (!was_active && dst->outputs_active[src->output]++ == 0) { pr_debug("%s: Raise OpenPIC output %d cpu %d irq %d\n", __func__, src->output, n_CPU, n_IRQ); mpic_irq_raise(opp, dst, src->output); } } else { if (was_active && --dst->outputs_active[src->output] == 0) { pr_debug("%s: Lower OpenPIC output %d cpu %d irq %d\n", __func__, src->output, n_CPU, n_IRQ); mpic_irq_lower(opp, dst, src->output); } } return; } priority = IVPR_PRIORITY(src->ivpr); /* Even if the interrupt doesn't have enough priority, * it is still raised, in case ctpr is lowered later. */ if (active) IRQ_setbit(&dst->raised, n_IRQ); else IRQ_resetbit(&dst->raised, n_IRQ); IRQ_check(opp, &dst->raised); if (active && priority <= dst->ctpr) { pr_debug("%s: IRQ %d priority %d too low for ctpr %d on CPU %d\n", __func__, n_IRQ, priority, dst->ctpr, n_CPU); active = 0; } if (active) { if (IRQ_get_next(opp, &dst->servicing) >= 0 && priority <= dst->servicing.priority) { pr_debug("%s: IRQ %d is hidden by servicing IRQ %d on CPU %d\n", __func__, n_IRQ, dst->servicing.next, n_CPU); } else { pr_debug("%s: Raise OpenPIC INT output cpu %d irq %d/%d\n", __func__, n_CPU, n_IRQ, dst->raised.next); mpic_irq_raise(opp, dst, ILR_INTTGT_INT); } } else { IRQ_get_next(opp, &dst->servicing); if (dst->raised.priority > dst->ctpr && dst->raised.priority > dst->servicing.priority) { pr_debug("%s: IRQ %d inactive, IRQ %d prio %d above %d/%d, CPU %d\n", __func__, n_IRQ, dst->raised.next, dst->raised.priority, dst->ctpr, dst->servicing.priority, n_CPU); /* IRQ line stays asserted */ } else { pr_debug("%s: IRQ %d inactive, current prio %d/%d, CPU %d\n", __func__, n_IRQ, dst->ctpr, dst->servicing.priority, n_CPU); mpic_irq_lower(opp, dst, ILR_INTTGT_INT); } } } /* update pic state because registers for n_IRQ have changed value */ static void openpic_update_irq(struct openpic *opp, int n_IRQ) { struct irq_source *src; bool active, was_active; int i; src = &opp->src[n_IRQ]; active = src->pending; if ((src->ivpr & IVPR_MASK_MASK) && !src->nomask) { /* Interrupt source is disabled */ pr_debug("%s: IRQ %d is disabled\n", __func__, n_IRQ); active = false; } was_active = !!(src->ivpr & IVPR_ACTIVITY_MASK); /* * We don't have a similar check for already-active because * ctpr may have changed and we need to withdraw the interrupt. */ if (!active && !was_active) { pr_debug("%s: IRQ %d is already inactive\n", __func__, n_IRQ); return; } if (active) src->ivpr |= IVPR_ACTIVITY_MASK; else src->ivpr &= ~IVPR_ACTIVITY_MASK; if (src->destmask == 0) { /* No target */ pr_debug("%s: IRQ %d has no target\n", __func__, n_IRQ); return; } if (src->destmask == (1 << src->last_cpu)) { /* Only one CPU is allowed to receive this IRQ */ IRQ_local_pipe(opp, src->last_cpu, n_IRQ, active, was_active); } else if (!(src->ivpr & IVPR_MODE_MASK)) { /* Directed delivery mode */ for (i = 0; i < opp->nb_cpus; i++) { if (src->destmask & (1 << i)) { IRQ_local_pipe(opp, i, n_IRQ, active, was_active); } } } else { /* Distributed delivery mode */ for (i = src->last_cpu + 1; i != src->last_cpu; i++) { if (i == opp->nb_cpus) i = 0; if (src->destmask & (1 << i)) { IRQ_local_pipe(opp, i, n_IRQ, active, was_active); src->last_cpu = i; break; } } } } static void openpic_set_irq(void *opaque, int n_IRQ, int level) { struct openpic *opp = opaque; struct irq_source *src; if (n_IRQ >= MAX_IRQ) { WARN_ONCE(1, "%s: IRQ %d out of range\n", __func__, n_IRQ); return; } src = &opp->src[n_IRQ]; pr_debug("openpic: set irq %d = %d ivpr=0x%08x\n", n_IRQ, level, src->ivpr); if (src->level) { /* level-sensitive irq */ src->pending = level; openpic_update_irq(opp, n_IRQ); } else { /* edge-sensitive irq */ if (level) { src->pending = 1; openpic_update_irq(opp, n_IRQ); } if (src->output != ILR_INTTGT_INT) { /* Edge-triggered interrupts shouldn't be used * with non-INT delivery, but just in case, * try to make it do something sane rather than * cause an interrupt storm. This is close to * what you'd probably see happen in real hardware. */ src->pending = 0; openpic_update_irq(opp, n_IRQ); } } } static void openpic_reset(struct openpic *opp) { int i; opp->gcr = GCR_RESET; /* Initialise controller registers */ opp->frr = ((opp->nb_irqs - 1) << FRR_NIRQ_SHIFT) | (opp->vid << FRR_VID_SHIFT); opp->pir = 0; opp->spve = -1 & opp->vector_mask; opp->tfrr = opp->tfrr_reset; /* Initialise IRQ sources */ for (i = 0; i < opp->max_irq; i++) { opp->src[i].ivpr = opp->ivpr_reset; switch (opp->src[i].type) { case IRQ_TYPE_NORMAL: opp->src[i].level = !!(opp->ivpr_reset & IVPR_SENSE_MASK); break; case IRQ_TYPE_FSLINT: opp->src[i].ivpr |= IVPR_POLARITY_MASK; break; case IRQ_TYPE_FSLSPECIAL: break; } write_IRQreg_idr(opp, i, opp->idr_reset); } /* Initialise IRQ destinations */ for (i = 0; i < MAX_CPU; i++) { opp->dst[i].ctpr = 15; memset(&opp->dst[i].raised, 0, sizeof(struct irq_queue)); opp->dst[i].raised.next = -1; memset(&opp->dst[i].servicing, 0, sizeof(struct irq_queue)); opp->dst[i].servicing.next = -1; } /* Initialise timers */ for (i = 0; i < MAX_TMR; i++) { opp->timers[i].tccr = 0; opp->timers[i].tbcr = TBCR_CI; } /* Go out of RESET state */ opp->gcr = 0; } static inline uint32_t read_IRQreg_idr(struct openpic *opp, int n_IRQ) { return opp->src[n_IRQ].idr; } static inline uint32_t read_IRQreg_ilr(struct openpic *opp, int n_IRQ) { if (opp->flags & OPENPIC_FLAG_ILR) return opp->src[n_IRQ].output; return 0xffffffff; } static inline uint32_t read_IRQreg_ivpr(struct openpic *opp, int n_IRQ) { return opp->src[n_IRQ].ivpr; } static inline void write_IRQreg_idr(struct openpic *opp, int n_IRQ, uint32_t val) { struct irq_source *src = &opp->src[n_IRQ]; uint32_t normal_mask = (1UL << opp->nb_cpus) - 1; uint32_t crit_mask = 0; uint32_t mask = normal_mask; int crit_shift = IDR_EP_SHIFT - opp->nb_cpus; int i; if (opp->flags & OPENPIC_FLAG_IDR_CRIT) { crit_mask = mask << crit_shift; mask |= crit_mask | IDR_EP; } src->idr = val & mask; pr_debug("Set IDR %d to 0x%08x\n", n_IRQ, src->idr); if (opp->flags & OPENPIC_FLAG_IDR_CRIT) { if (src->idr & crit_mask) { if (src->idr & normal_mask) { pr_debug("%s: IRQ configured for multiple output types, using critical\n", __func__); } src->output = ILR_INTTGT_CINT; src->nomask = true; src->destmask = 0; for (i = 0; i < opp->nb_cpus; i++) { int n_ci = IDR_CI0_SHIFT - i; if (src->idr & (1UL << n_ci)) src->destmask |= 1UL << i; } } else { src->output = ILR_INTTGT_INT; src->nomask = false; src->destmask = src->idr & normal_mask; } } else { src->destmask = src->idr; } } static inline void write_IRQreg_ilr(struct openpic *opp, int n_IRQ, uint32_t val) { if (opp->flags & OPENPIC_FLAG_ILR) { struct irq_source *src = &opp->src[n_IRQ]; src->output = val & ILR_INTTGT_MASK; pr_debug("Set ILR %d to 0x%08x, output %d\n", n_IRQ, src->idr, src->output); /* TODO: on MPIC v4.0 only, set nomask for non-INT */ } } static inline void write_IRQreg_ivpr(struct openpic *opp, int n_IRQ, uint32_t val) { uint32_t mask; /* NOTE when implementing newer FSL MPIC models: starting with v4.0, * the polarity bit is read-only on internal interrupts. */ mask = IVPR_MASK_MASK | IVPR_PRIORITY_MASK | IVPR_SENSE_MASK | IVPR_POLARITY_MASK | opp->vector_mask; /* ACTIVITY bit is read-only */ opp->src[n_IRQ].ivpr = (opp->src[n_IRQ].ivpr & IVPR_ACTIVITY_MASK) | (val & mask); /* For FSL internal interrupts, The sense bit is reserved and zero, * and the interrupt is always level-triggered. Timers and IPIs * have no sense or polarity bits, and are edge-triggered. */ switch (opp->src[n_IRQ].type) { case IRQ_TYPE_NORMAL: opp->src[n_IRQ].level = !!(opp->src[n_IRQ].ivpr & IVPR_SENSE_MASK); break; case IRQ_TYPE_FSLINT: opp->src[n_IRQ].ivpr &= ~IVPR_SENSE_MASK; break; case IRQ_TYPE_FSLSPECIAL: opp->src[n_IRQ].ivpr &= ~(IVPR_POLARITY_MASK | IVPR_SENSE_MASK); break; } openpic_update_irq(opp, n_IRQ); pr_debug("Set IVPR %d to 0x%08x -> 0x%08x\n", n_IRQ, val, opp->src[n_IRQ].ivpr); } static void openpic_gcr_write(struct openpic *opp, uint64_t val) { if (val & GCR_RESET) { openpic_reset(opp); return; } opp->gcr &= ~opp->mpic_mode_mask; opp->gcr |= val & opp->mpic_mode_mask; } static int openpic_gbl_write(void *opaque, gpa_t addr, u32 val) { struct openpic *opp = opaque; int err = 0; pr_debug("%s: addr %#llx <= %08x\n", __func__, addr, val); if (addr & 0xF) return 0; switch (addr) { case 0x00: /* Block Revision Register1 (BRR1) is Readonly */ break; case 0x40: case 0x50: case 0x60: case 0x70: case 0x80: case 0x90: case 0xA0: case 0xB0: err = openpic_cpu_write_internal(opp, addr, val, get_current_cpu()); break; case 0x1000: /* FRR */ break; case 0x1020: /* GCR */ openpic_gcr_write(opp, val); break; case 0x1080: /* VIR */ break; case 0x1090: /* PIR */ /* * This register is used to reset a CPU core -- * let userspace handle it. */ err = -ENXIO; break; case 0x10A0: /* IPI_IVPR */ case 0x10B0: case 0x10C0: case 0x10D0: { int idx; idx = (addr - 0x10A0) >> 4; write_IRQreg_ivpr(opp, opp->irq_ipi0 + idx, val); break; } case 0x10E0: /* SPVE */ opp->spve = val & opp->vector_mask; break; default: break; } return err; } static int openpic_gbl_read(void *opaque, gpa_t addr, u32 *ptr) { struct openpic *opp = opaque; u32 retval; int err = 0; pr_debug("%s: addr %#llx\n", __func__, addr); retval = 0xFFFFFFFF; if (addr & 0xF) goto out; switch (addr) { case 0x1000: /* FRR */ retval = opp->frr; retval |= (opp->nb_cpus - 1) << FRR_NCPU_SHIFT; break; case 0x1020: /* GCR */ retval = opp->gcr; break; case 0x1080: /* VIR */ retval = opp->vir; break; case 0x1090: /* PIR */ retval = 0x00000000; break; case 0x00: /* Block Revision Register1 (BRR1) */ retval = opp->brr1; break; case 0x40: case 0x50: case 0x60: case 0x70: case 0x80: case 0x90: case 0xA0: case 0xB0: err = openpic_cpu_read_internal(opp, addr, &retval, get_current_cpu()); break; case 0x10A0: /* IPI_IVPR */ case 0x10B0: case 0x10C0: case 0x10D0: { int idx; idx = (addr - 0x10A0) >> 4; retval = read_IRQreg_ivpr(opp, opp->irq_ipi0 + idx); } break; case 0x10E0: /* SPVE */ retval = opp->spve; break; default: break; } out: pr_debug("%s: => 0x%08x\n", __func__, retval); *ptr = retval; return err; } static int openpic_tmr_write(void *opaque, gpa_t addr, u32 val) { struct openpic *opp = opaque; int idx; addr += 0x10f0; pr_debug("%s: addr %#llx <= %08x\n", __func__, addr, val); if (addr & 0xF) return 0; if (addr == 0x10f0) { /* TFRR */ opp->tfrr = val; return 0; } idx = (addr >> 6) & 0x3; addr = addr & 0x30; switch (addr & 0x30) { case 0x00: /* TCCR */ break; case 0x10: /* TBCR */ if ((opp->timers[idx].tccr & TCCR_TOG) != 0 && (val & TBCR_CI) == 0 && (opp->timers[idx].tbcr & TBCR_CI) != 0) opp->timers[idx].tccr &= ~TCCR_TOG; opp->timers[idx].tbcr = val; break; case 0x20: /* TVPR */ write_IRQreg_ivpr(opp, opp->irq_tim0 + idx, val); break; case 0x30: /* TDR */ write_IRQreg_idr(opp, opp->irq_tim0 + idx, val); break; } return 0; } static int openpic_tmr_read(void *opaque, gpa_t addr, u32 *ptr) { struct openpic *opp = opaque; uint32_t retval = -1; int idx; pr_debug("%s: addr %#llx\n", __func__, addr); if (addr & 0xF) goto out; idx = (addr >> 6) & 0x3; if (addr == 0x0) { /* TFRR */ retval = opp->tfrr; goto out; } switch (addr & 0x30) { case 0x00: /* TCCR */ retval = opp->timers[idx].tccr; break; case 0x10: /* TBCR */ retval = opp->timers[idx].tbcr; break; case 0x20: /* TIPV */ retval = read_IRQreg_ivpr(opp, opp->irq_tim0 + idx); break; case 0x30: /* TIDE (TIDR) */ retval = read_IRQreg_idr(opp, opp->irq_tim0 + idx); break; } out: pr_debug("%s: => 0x%08x\n", __func__, retval); *ptr = retval; return 0; } static int openpic_src_write(void *opaque, gpa_t addr, u32 val) { struct openpic *opp = opaque; int idx; pr_debug("%s: addr %#llx <= %08x\n", __func__, addr, val); addr = addr & 0xffff; idx = addr >> 5; switch (addr & 0x1f) { case 0x00: write_IRQreg_ivpr(opp, idx, val); break; case 0x10: write_IRQreg_idr(opp, idx, val); break; case 0x18: write_IRQreg_ilr(opp, idx, val); break; } return 0; } static int openpic_src_read(void *opaque, gpa_t addr, u32 *ptr) { struct openpic *opp = opaque; uint32_t retval; int idx; pr_debug("%s: addr %#llx\n", __func__, addr); retval = 0xFFFFFFFF; addr = addr & 0xffff; idx = addr >> 5; switch (addr & 0x1f) { case 0x00: retval = read_IRQreg_ivpr(opp, idx); break; case 0x10: retval = read_IRQreg_idr(opp, idx); break; case 0x18: retval = read_IRQreg_ilr(opp, idx); break; } pr_debug("%s: => 0x%08x\n", __func__, retval); *ptr = retval; return 0; } static int openpic_msi_write(void *opaque, gpa_t addr, u32 val) { struct openpic *opp = opaque; int idx = opp->irq_msi; int srs, ibs; pr_debug("%s: addr %#llx <= 0x%08x\n", __func__, addr, val); if (addr & 0xF) return 0; switch (addr) { case MSIIR_OFFSET: srs = val >> MSIIR_SRS_SHIFT; idx += srs; ibs = (val & MSIIR_IBS_MASK) >> MSIIR_IBS_SHIFT; opp->msi[srs].msir |= 1 << ibs; openpic_set_irq(opp, idx, 1); break; default: /* most registers are read-only, thus ignored */ break; } return 0; } static int openpic_msi_read(void *opaque, gpa_t addr, u32 *ptr) { struct openpic *opp = opaque; uint32_t r = 0; int i, srs; pr_debug("%s: addr %#llx\n", __func__, addr); if (addr & 0xF) return -ENXIO; srs = addr >> 4; switch (addr) { case 0x00: case 0x10: case 0x20: case 0x30: case 0x40: case 0x50: case 0x60: case 0x70: /* MSIRs */ r = opp->msi[srs].msir; /* Clear on read */ opp->msi[srs].msir = 0; openpic_set_irq(opp, opp->irq_msi + srs, 0); break; case 0x120: /* MSISR */ for (i = 0; i < MAX_MSI; i++) r |= (opp->msi[i].msir ? 1 : 0) << i; break; } pr_debug("%s: => 0x%08x\n", __func__, r); *ptr = r; return 0; } static int openpic_summary_read(void *opaque, gpa_t addr, u32 *ptr) { uint32_t r = 0; pr_debug("%s: addr %#llx\n", __func__, addr); /* TODO: EISR/EIMR */ *ptr = r; return 0; } static int openpic_summary_write(void *opaque, gpa_t addr, u32 val) { pr_debug("%s: addr %#llx <= 0x%08x\n", __func__, addr, val); /* TODO: EISR/EIMR */ return 0; } static int openpic_cpu_write_internal(void *opaque, gpa_t addr, u32 val, int idx) { struct openpic *opp = opaque; struct irq_source *src; struct irq_dest *dst; int s_IRQ, n_IRQ; pr_debug("%s: cpu %d addr %#llx <= 0x%08x\n", __func__, idx, addr, val); if (idx < 0) return 0; if (addr & 0xF) return 0; dst = &opp->dst[idx]; addr &= 0xFF0; switch (addr) { case 0x40: /* IPIDR */ case 0x50: case 0x60: case 0x70: idx = (addr - 0x40) >> 4; /* we use IDE as mask which CPUs to deliver the IPI to still. */ opp->src[opp->irq_ipi0 + idx].destmask |= val; openpic_set_irq(opp, opp->irq_ipi0 + idx, 1); openpic_set_irq(opp, opp->irq_ipi0 + idx, 0); break; case 0x80: /* CTPR */ dst->ctpr = val & 0x0000000F; pr_debug("%s: set CPU %d ctpr to %d, raised %d servicing %d\n", __func__, idx, dst->ctpr, dst->raised.priority, dst->servicing.priority); if (dst->raised.priority <= dst->ctpr) { pr_debug("%s: Lower OpenPIC INT output cpu %d due to ctpr\n", __func__, idx); mpic_irq_lower(opp, dst, ILR_INTTGT_INT); } else if (dst->raised.priority > dst->servicing.priority) { pr_debug("%s: Raise OpenPIC INT output cpu %d irq %d\n", __func__, idx, dst->raised.next); mpic_irq_raise(opp, dst, ILR_INTTGT_INT); } break; case 0x90: /* WHOAMI */ /* Read-only register */ break; case 0xA0: /* IACK */ /* Read-only register */ break; case 0xB0: { /* EOI */ int notify_eoi; pr_debug("EOI\n"); s_IRQ = IRQ_get_next(opp, &dst->servicing); if (s_IRQ < 0) { pr_debug("%s: EOI with no interrupt in service\n", __func__); break; } IRQ_resetbit(&dst->servicing, s_IRQ); /* Notify listeners that the IRQ is over */ notify_eoi = s_IRQ; /* Set up next servicing IRQ */ s_IRQ = IRQ_get_next(opp, &dst->servicing); /* Check queued interrupts. */ n_IRQ = IRQ_get_next(opp, &dst->raised); src = &opp->src[n_IRQ]; if (n_IRQ != -1 && (s_IRQ == -1 || IVPR_PRIORITY(src->ivpr) > dst->servicing.priority)) { pr_debug("Raise OpenPIC INT output cpu %d irq %d\n", idx, n_IRQ); mpic_irq_raise(opp, dst, ILR_INTTGT_INT); } spin_unlock(&opp->lock); kvm_notify_acked_irq(opp->kvm, 0, notify_eoi); spin_lock(&opp->lock); break; } default: break; } return 0; } static int openpic_cpu_write(void *opaque, gpa_t addr, u32 val) { struct openpic *opp = opaque; return openpic_cpu_write_internal(opp, addr, val, (addr & 0x1f000) >> 12); } static uint32_t openpic_iack(struct openpic *opp, struct irq_dest *dst, int cpu) { struct irq_source *src; int retval, irq; pr_debug("Lower OpenPIC INT output\n"); mpic_irq_lower(opp, dst, ILR_INTTGT_INT); irq = IRQ_get_next(opp, &dst->raised); pr_debug("IACK: irq=%d\n", irq); if (irq == -1) /* No more interrupt pending */ return opp->spve; src = &opp->src[irq]; if (!(src->ivpr & IVPR_ACTIVITY_MASK) || !(IVPR_PRIORITY(src->ivpr) > dst->ctpr)) { pr_err("%s: bad raised IRQ %d ctpr %d ivpr 0x%08x\n", __func__, irq, dst->ctpr, src->ivpr); openpic_update_irq(opp, irq); retval = opp->spve; } else { /* IRQ enter servicing state */ IRQ_setbit(&dst->servicing, irq); retval = IVPR_VECTOR(opp, src->ivpr); } if (!src->level) { /* edge-sensitive IRQ */ src->ivpr &= ~IVPR_ACTIVITY_MASK; src->pending = 0; IRQ_resetbit(&dst->raised, irq); } if ((irq >= opp->irq_ipi0) && (irq < (opp->irq_ipi0 + MAX_IPI))) { src->destmask &= ~(1 << cpu); if (src->destmask && !src->level) { /* trigger on CPUs that didn't know about it yet */ openpic_set_irq(opp, irq, 1); openpic_set_irq(opp, irq, 0); /* if all CPUs knew about it, set active bit again */ src->ivpr |= IVPR_ACTIVITY_MASK; } } return retval; } void kvmppc_mpic_set_epr(struct kvm_vcpu *vcpu) { struct openpic *opp = vcpu->arch.mpic; int cpu = vcpu->arch.irq_cpu_id; unsigned long flags; spin_lock_irqsave(&opp->lock, flags); if ((opp->gcr & opp->mpic_mode_mask) == GCR_MODE_PROXY) kvmppc_set_epr(vcpu, openpic_iack(opp, &opp->dst[cpu], cpu)); spin_unlock_irqrestore(&opp->lock, flags); } static int openpic_cpu_read_internal(void *opaque, gpa_t addr, u32 *ptr, int idx) { struct openpic *opp = opaque; struct irq_dest *dst; uint32_t retval; pr_debug("%s: cpu %d addr %#llx\n", __func__, idx, addr); retval = 0xFFFFFFFF; if (idx < 0) goto out; if (addr & 0xF) goto out; dst = &opp->dst[idx]; addr &= 0xFF0; switch (addr) { case 0x80: /* CTPR */ retval = dst->ctpr; break; case 0x90: /* WHOAMI */ retval = idx; break; case 0xA0: /* IACK */ retval = openpic_iack(opp, dst, idx); break; case 0xB0: /* EOI */ retval = 0; break; default: break; } pr_debug("%s: => 0x%08x\n", __func__, retval); out: *ptr = retval; return 0; } static int openpic_cpu_read(void *opaque, gpa_t addr, u32 *ptr) { struct openpic *opp = opaque; return openpic_cpu_read_internal(opp, addr, ptr, (addr & 0x1f000) >> 12); } struct mem_reg { int (*read)(void *opaque, gpa_t addr, u32 *ptr); int (*write)(void *opaque, gpa_t addr, u32 val); gpa_t start_addr; int size; }; static const struct mem_reg openpic_gbl_mmio = { .write = openpic_gbl_write, .read = openpic_gbl_read, .start_addr = OPENPIC_GLB_REG_START, .size = OPENPIC_GLB_REG_SIZE, }; static const struct mem_reg openpic_tmr_mmio = { .write = openpic_tmr_write, .read = openpic_tmr_read, .start_addr = OPENPIC_TMR_REG_START, .size = OPENPIC_TMR_REG_SIZE, }; static const struct mem_reg openpic_cpu_mmio = { .write = openpic_cpu_write, .read = openpic_cpu_read, .start_addr = OPENPIC_CPU_REG_START, .size = OPENPIC_CPU_REG_SIZE, }; static const struct mem_reg openpic_src_mmio = { .write = openpic_src_write, .read = openpic_src_read, .start_addr = OPENPIC_SRC_REG_START, .size = OPENPIC_SRC_REG_SIZE, }; static const struct mem_reg openpic_msi_mmio = { .read = openpic_msi_read, .write = openpic_msi_write, .start_addr = OPENPIC_MSI_REG_START, .size = OPENPIC_MSI_REG_SIZE, }; static const struct mem_reg openpic_summary_mmio = { .read = openpic_summary_read, .write = openpic_summary_write, .start_addr = OPENPIC_SUMMARY_REG_START, .size = OPENPIC_SUMMARY_REG_SIZE, }; static void add_mmio_region(struct openpic *opp, const struct mem_reg *mr) { if (opp->num_mmio_regions >= MAX_MMIO_REGIONS) { WARN(1, "kvm mpic: too many mmio regions\n"); return; } opp->mmio_regions[opp->num_mmio_regions++] = mr; } static void fsl_common_init(struct openpic *opp) { int i; int virq = MAX_SRC; add_mmio_region(opp, &openpic_msi_mmio); add_mmio_region(opp, &openpic_summary_mmio); opp->vid = VID_REVISION_1_2; opp->vir = VIR_GENERIC; opp->vector_mask = 0xFFFF; opp->tfrr_reset = 0; opp->ivpr_reset = IVPR_MASK_MASK; opp->idr_reset = 1 << 0; opp->max_irq = MAX_IRQ; opp->irq_ipi0 = virq; virq += MAX_IPI; opp->irq_tim0 = virq; virq += MAX_TMR; BUG_ON(virq > MAX_IRQ); opp->irq_msi = 224; for (i = 0; i < opp->fsl->max_ext; i++) opp->src[i].level = false; /* Internal interrupts, including message and MSI */ for (i = 16; i < MAX_SRC; i++) { opp->src[i].type = IRQ_TYPE_FSLINT; opp->src[i].level = true; } /* timers and IPIs */ for (i = MAX_SRC; i < virq; i++) { opp->src[i].type = IRQ_TYPE_FSLSPECIAL; opp->src[i].level = false; } } static int kvm_mpic_read_internal(struct openpic *opp, gpa_t addr, u32 *ptr) { int i; for (i = 0; i < opp->num_mmio_regions; i++) { const struct mem_reg *mr = opp->mmio_regions[i]; if (mr->start_addr > addr || addr >= mr->start_addr + mr->size) continue; return mr->read(opp, addr - mr->start_addr, ptr); } return -ENXIO; } static int kvm_mpic_write_internal(struct openpic *opp, gpa_t addr, u32 val) { int i; for (i = 0; i < opp->num_mmio_regions; i++) { const struct mem_reg *mr = opp->mmio_regions[i]; if (mr->start_addr > addr || addr >= mr->start_addr + mr->size) continue; return mr->write(opp, addr - mr->start_addr, val); } return -ENXIO; } static int kvm_mpic_read(struct kvm_vcpu *vcpu, struct kvm_io_device *this, gpa_t addr, int len, void *ptr) { struct openpic *opp = container_of(this, struct openpic, mmio); int ret; union { u32 val; u8 bytes[4]; } u; if (addr & (len - 1)) { pr_debug("%s: bad alignment %llx/%d\n", __func__, addr, len); return -EINVAL; } spin_lock_irq(&opp->lock); ret = kvm_mpic_read_internal(opp, addr - opp->reg_base, &u.val); spin_unlock_irq(&opp->lock); /* * Technically only 32-bit accesses are allowed, but be nice to * people dumping registers a byte at a time -- it works in real * hardware (reads only, not writes). */ if (len == 4) { *(u32 *)ptr = u.val; pr_debug("%s: addr %llx ret %d len 4 val %x\n", __func__, addr, ret, u.val); } else if (len == 1) { *(u8 *)ptr = u.bytes[addr & 3]; pr_debug("%s: addr %llx ret %d len 1 val %x\n", __func__, addr, ret, u.bytes[addr & 3]); } else { pr_debug("%s: bad length %d\n", __func__, len); return -EINVAL; } return ret; } static int kvm_mpic_write(struct kvm_vcpu *vcpu, struct kvm_io_device *this, gpa_t addr, int len, const void *ptr) { struct openpic *opp = container_of(this, struct openpic, mmio); int ret; if (len != 4) { pr_debug("%s: bad length %d\n", __func__, len); return -EOPNOTSUPP; } if (addr & 3) { pr_debug("%s: bad alignment %llx/%d\n", __func__, addr, len); return -EOPNOTSUPP; } spin_lock_irq(&opp->lock); ret = kvm_mpic_write_internal(opp, addr - opp->reg_base, *(const u32 *)ptr); spin_unlock_irq(&opp->lock); pr_debug("%s: addr %llx ret %d val %x\n", __func__, addr, ret, *(const u32 *)ptr); return ret; } static const struct kvm_io_device_ops mpic_mmio_ops = { .read = kvm_mpic_read, .write = kvm_mpic_write, }; static void map_mmio(struct openpic *opp) { kvm_iodevice_init(&opp->mmio, &mpic_mmio_ops); kvm_io_bus_register_dev(opp->kvm, KVM_MMIO_BUS, opp->reg_base, OPENPIC_REG_SIZE, &opp->mmio); } static void unmap_mmio(struct openpic *opp) { kvm_io_bus_unregister_dev(opp->kvm, KVM_MMIO_BUS, &opp->mmio); } static int set_base_addr(struct openpic *opp, struct kvm_device_attr *attr) { u64 base; if (copy_from_user(&base, (u64 __user *)(long)attr->addr, sizeof(u64))) return -EFAULT; if (base & 0x3ffff) { pr_debug("kvm mpic %s: KVM_DEV_MPIC_BASE_ADDR %08llx not aligned\n", __func__, base); return -EINVAL; } if (base == opp->reg_base) return 0; mutex_lock(&opp->kvm->slots_lock); unmap_mmio(opp); opp->reg_base = base; pr_debug("kvm mpic %s: KVM_DEV_MPIC_BASE_ADDR %08llx\n", __func__, base); if (base == 0) goto out; map_mmio(opp); out: mutex_unlock(&opp->kvm->slots_lock); return 0; } #define ATTR_SET 0 #define ATTR_GET 1 static int access_reg(struct openpic *opp, gpa_t addr, u32 *val, int type) { int ret; if (addr & 3) return -ENXIO; spin_lock_irq(&opp->lock); if (type == ATTR_SET) ret = kvm_mpic_write_internal(opp, addr, *val); else ret = kvm_mpic_read_internal(opp, addr, val); spin_unlock_irq(&opp->lock); pr_debug("%s: type %d addr %llx val %x\n", __func__, type, addr, *val); return ret; } static int mpic_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr) { struct openpic *opp = dev->private; u32 attr32; switch (attr->group) { case KVM_DEV_MPIC_GRP_MISC: switch (attr->attr) { case KVM_DEV_MPIC_BASE_ADDR: return set_base_addr(opp, attr); } break; case KVM_DEV_MPIC_GRP_REGISTER: if (get_user(attr32, (u32 __user *)(long)attr->addr)) return -EFAULT; return access_reg(opp, attr->attr, &attr32, ATTR_SET); case KVM_DEV_MPIC_GRP_IRQ_ACTIVE: if (attr->attr > MAX_SRC) return -EINVAL; if (get_user(attr32, (u32 __user *)(long)attr->addr)) return -EFAULT; if (attr32 != 0 && attr32 != 1) return -EINVAL; spin_lock_irq(&opp->lock); openpic_set_irq(opp, attr->attr, attr32); spin_unlock_irq(&opp->lock); return 0; } return -ENXIO; } static int mpic_get_attr(struct kvm_device *dev, struct kvm_device_attr *attr) { struct openpic *opp = dev->private; u64 attr64; u32 attr32; int ret; switch (attr->group) { case KVM_DEV_MPIC_GRP_MISC: switch (attr->attr) { case KVM_DEV_MPIC_BASE_ADDR: mutex_lock(&opp->kvm->slots_lock); attr64 = opp->reg_base; mutex_unlock(&opp->kvm->slots_lock); if (copy_to_user((u64 __user *)(long)attr->addr, &attr64, sizeof(u64))) return -EFAULT; return 0; } break; case KVM_DEV_MPIC_GRP_REGISTER: ret = access_reg(opp, attr->attr, &attr32, ATTR_GET); if (ret) return ret; if (put_user(attr32, (u32 __user *)(long)attr->addr)) return -EFAULT; return 0; case KVM_DEV_MPIC_GRP_IRQ_ACTIVE: if (attr->attr > MAX_SRC) return -EINVAL; spin_lock_irq(&opp->lock); attr32 = opp->src[attr->attr].pending; spin_unlock_irq(&opp->lock); if (put_user(attr32, (u32 __user *)(long)attr->addr)) return -EFAULT; return 0; } return -ENXIO; } static int mpic_has_attr(struct kvm_device *dev, struct kvm_device_attr *attr) { switch (attr->group) { case KVM_DEV_MPIC_GRP_MISC: switch (attr->attr) { case KVM_DEV_MPIC_BASE_ADDR: return 0; } break; case KVM_DEV_MPIC_GRP_REGISTER: return 0; case KVM_DEV_MPIC_GRP_IRQ_ACTIVE: if (attr->attr > MAX_SRC) break; return 0; } return -ENXIO; } static void mpic_destroy(struct kvm_device *dev) { struct openpic *opp = dev->private; dev->kvm->arch.mpic = NULL; kfree(opp); kfree(dev); } static int mpic_set_default_irq_routing(struct openpic *opp) { struct kvm_irq_routing_entry *routing; /* Create a nop default map, so that dereferencing it still works */ routing = kzalloc((sizeof(*routing)), GFP_KERNEL); if (!routing) return -ENOMEM; kvm_set_irq_routing(opp->kvm, routing, 0, 0); kfree(routing); return 0; } static int mpic_create(struct kvm_device *dev, u32 type) { struct openpic *opp; int ret; /* We only support one MPIC at a time for now */ if (dev->kvm->arch.mpic) return -EINVAL; opp = kzalloc(sizeof(struct openpic), GFP_KERNEL); if (!opp) return -ENOMEM; dev->private = opp; opp->kvm = dev->kvm; opp->dev = dev; opp->model = type; spin_lock_init(&opp->lock); add_mmio_region(opp, &openpic_gbl_mmio); add_mmio_region(opp, &openpic_tmr_mmio); add_mmio_region(opp, &openpic_src_mmio); add_mmio_region(opp, &openpic_cpu_mmio); switch (opp->model) { case KVM_DEV_TYPE_FSL_MPIC_20: opp->fsl = &fsl_mpic_20; opp->brr1 = 0x00400200; opp->flags |= OPENPIC_FLAG_IDR_CRIT; opp->nb_irqs = 80; opp->mpic_mode_mask = GCR_MODE_MIXED; fsl_common_init(opp); break; case KVM_DEV_TYPE_FSL_MPIC_42: opp->fsl = &fsl_mpic_42; opp->brr1 = 0x00400402; opp->flags |= OPENPIC_FLAG_ILR; opp->nb_irqs = 196; opp->mpic_mode_mask = GCR_MODE_PROXY; fsl_common_init(opp); break; default: ret = -ENODEV; goto err; } ret = mpic_set_default_irq_routing(opp); if (ret) goto err; openpic_reset(opp); smp_wmb(); dev->kvm->arch.mpic = opp; return 0; err: kfree(opp); return ret; } struct kvm_device_ops kvm_mpic_ops = { .name = "kvm-mpic", .create = mpic_create, .destroy = mpic_destroy, .set_attr = mpic_set_attr, .get_attr = mpic_get_attr, .has_attr = mpic_has_attr, }; int kvmppc_mpic_connect_vcpu(struct kvm_device *dev, struct kvm_vcpu *vcpu, u32 cpu) { struct openpic *opp = dev->private; int ret = 0; if (dev->ops != &kvm_mpic_ops) return -EPERM; if (opp->kvm != vcpu->kvm) return -EPERM; if (cpu < 0 || cpu >= MAX_CPU) return -EPERM; spin_lock_irq(&opp->lock); if (opp->dst[cpu].vcpu) { ret = -EEXIST; goto out; } if (vcpu->arch.irq_type) { ret = -EBUSY; goto out; } opp->dst[cpu].vcpu = vcpu; opp->nb_cpus = max(opp->nb_cpus, cpu + 1); vcpu->arch.mpic = opp; vcpu->arch.irq_cpu_id = cpu; vcpu->arch.irq_type = KVMPPC_IRQ_MPIC; /* This might need to be changed if GCR gets extended */ if (opp->mpic_mode_mask == GCR_MODE_PROXY) vcpu->arch.epr_flags |= KVMPPC_EPR_KERNEL; out: spin_unlock_irq(&opp->lock); return ret; } /* * This should only happen immediately before the mpic is destroyed, * so we shouldn't need to worry about anything still trying to * access the vcpu pointer. */ void kvmppc_mpic_disconnect_vcpu(struct openpic *opp, struct kvm_vcpu *vcpu) { BUG_ON(!opp->dst[vcpu->arch.irq_cpu_id].vcpu); opp->dst[vcpu->arch.irq_cpu_id].vcpu = NULL; } /* * Return value: * < 0 Interrupt was ignored (masked or not delivered for other reasons) * = 0 Interrupt was coalesced (previous irq is still pending) * > 0 Number of CPUs interrupt was delivered to */ static int mpic_set_irq(struct kvm_kernel_irq_routing_entry *e, struct kvm *kvm, int irq_source_id, int level, bool line_status) { u32 irq = e->irqchip.pin; struct openpic *opp = kvm->arch.mpic; unsigned long flags; spin_lock_irqsave(&opp->lock, flags); openpic_set_irq(opp, irq, level); spin_unlock_irqrestore(&opp->lock, flags); /* All code paths we care about don't check for the return value */ return 0; } int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e, struct kvm *kvm, int irq_source_id, int level, bool line_status) { struct openpic *opp = kvm->arch.mpic; unsigned long flags; spin_lock_irqsave(&opp->lock, flags); /* * XXX We ignore the target address for now, as we only support * a single MSI bank. */ openpic_msi_write(kvm->arch.mpic, MSIIR_OFFSET, e->msi.data); spin_unlock_irqrestore(&opp->lock, flags); /* All code paths we care about don't check for the return value */ return 0; } int kvm_set_routing_entry(struct kvm *kvm, struct kvm_kernel_irq_routing_entry *e, const struct kvm_irq_routing_entry *ue) { int r = -EINVAL; switch (ue->type) { case KVM_IRQ_ROUTING_IRQCHIP: e->set = mpic_set_irq; e->irqchip.irqchip = ue->u.irqchip.irqchip; e->irqchip.pin = ue->u.irqchip.pin; if (e->irqchip.pin >= KVM_IRQCHIP_NUM_PINS) goto out; break; case KVM_IRQ_ROUTING_MSI: e->set = kvm_set_msi; e->msi.address_lo = ue->u.msi.address_lo; e->msi.address_hi = ue->u.msi.address_hi; e->msi.data = ue->u.msi.data; break; default: goto out; } r = 0; out: return r; }
linux-master
arch/powerpc/kvm/mpic.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2009. SUSE Linux Products GmbH. All rights reserved. * * Authors: * Alexander Graf <[email protected]> * Kevin Wolf <[email protected]> * Paul Mackerras <[email protected]> * * Description: * Functions relating to running KVM on Book 3S processors where * we don't have access to hypervisor mode, and we run the guest * in problem state (user mode). * * This file is derived from arch/powerpc/kvm/44x.c, * by Hollis Blanchard <[email protected]>. */ #include <linux/kvm_host.h> #include <linux/export.h> #include <linux/err.h> #include <linux/slab.h> #include <asm/reg.h> #include <asm/cputable.h> #include <asm/cacheflush.h> #include <linux/uaccess.h> #include <asm/interrupt.h> #include <asm/io.h> #include <asm/kvm_ppc.h> #include <asm/kvm_book3s.h> #include <asm/mmu_context.h> #include <asm/switch_to.h> #include <asm/firmware.h> #include <asm/setup.h> #include <linux/gfp.h> #include <linux/sched.h> #include <linux/vmalloc.h> #include <linux/highmem.h> #include <linux/module.h> #include <linux/miscdevice.h> #include <asm/asm-prototypes.h> #include <asm/tm.h> #include "book3s.h" #define CREATE_TRACE_POINTS #include "trace_pr.h" /* #define EXIT_DEBUG */ /* #define DEBUG_EXT */ static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr, ulong msr); #ifdef CONFIG_PPC_BOOK3S_64 static int kvmppc_handle_fac(struct kvm_vcpu *vcpu, ulong fac); #endif /* Some compatibility defines */ #ifdef CONFIG_PPC_BOOK3S_32 #define MSR_USER32 MSR_USER #define MSR_USER64 MSR_USER #define HW_PAGE_SIZE PAGE_SIZE #define HPTE_R_M _PAGE_COHERENT #endif static bool kvmppc_is_split_real(struct kvm_vcpu *vcpu) { ulong msr = kvmppc_get_msr(vcpu); return (msr & (MSR_IR|MSR_DR)) == MSR_DR; } static void kvmppc_fixup_split_real(struct kvm_vcpu *vcpu) { ulong msr = kvmppc_get_msr(vcpu); ulong pc = kvmppc_get_pc(vcpu); /* We are in DR only split real mode */ if ((msr & (MSR_IR|MSR_DR)) != MSR_DR) return; /* We have not fixed up the guest already */ if (vcpu->arch.hflags & BOOK3S_HFLAG_SPLIT_HACK) return; /* The code is in fixupable address space */ if (pc & SPLIT_HACK_MASK) return; vcpu->arch.hflags |= BOOK3S_HFLAG_SPLIT_HACK; kvmppc_set_pc(vcpu, pc | SPLIT_HACK_OFFS); } static void kvmppc_unfixup_split_real(struct kvm_vcpu *vcpu) { if (vcpu->arch.hflags & BOOK3S_HFLAG_SPLIT_HACK) { ulong pc = kvmppc_get_pc(vcpu); ulong lr = kvmppc_get_lr(vcpu); if ((pc & SPLIT_HACK_MASK) == SPLIT_HACK_OFFS) kvmppc_set_pc(vcpu, pc & ~SPLIT_HACK_MASK); if ((lr & SPLIT_HACK_MASK) == SPLIT_HACK_OFFS) kvmppc_set_lr(vcpu, lr & ~SPLIT_HACK_MASK); vcpu->arch.hflags &= ~BOOK3S_HFLAG_SPLIT_HACK; } } static void kvmppc_inject_interrupt_pr(struct kvm_vcpu *vcpu, int vec, u64 srr1_flags) { unsigned long msr, pc, new_msr, new_pc; kvmppc_unfixup_split_real(vcpu); msr = kvmppc_get_msr(vcpu); pc = kvmppc_get_pc(vcpu); new_msr = vcpu->arch.intr_msr; new_pc = to_book3s(vcpu)->hior + vec; #ifdef CONFIG_PPC_BOOK3S_64 /* If transactional, change to suspend mode on IRQ delivery */ if (MSR_TM_TRANSACTIONAL(msr)) new_msr |= MSR_TS_S; else new_msr |= msr & MSR_TS_MASK; #endif kvmppc_set_srr0(vcpu, pc); kvmppc_set_srr1(vcpu, (msr & SRR1_MSR_BITS) | srr1_flags); kvmppc_set_pc(vcpu, new_pc); kvmppc_set_msr(vcpu, new_msr); } static void kvmppc_core_vcpu_load_pr(struct kvm_vcpu *vcpu, int cpu) { #ifdef CONFIG_PPC_BOOK3S_64 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); memcpy(svcpu->slb, to_book3s(vcpu)->slb_shadow, sizeof(svcpu->slb)); svcpu->slb_max = to_book3s(vcpu)->slb_shadow_max; svcpu->in_use = 0; svcpu_put(svcpu); /* Disable AIL if supported */ if (cpu_has_feature(CPU_FTR_HVMODE)) { if (cpu_has_feature(CPU_FTR_ARCH_207S)) mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) & ~LPCR_AIL); if (cpu_has_feature(CPU_FTR_ARCH_300) && (current->thread.fscr & FSCR_SCV)) mtspr(SPRN_FSCR, mfspr(SPRN_FSCR) & ~FSCR_SCV); } #endif vcpu->cpu = smp_processor_id(); #ifdef CONFIG_PPC_BOOK3S_32 current->thread.kvm_shadow_vcpu = vcpu->arch.shadow_vcpu; #endif if (kvmppc_is_split_real(vcpu)) kvmppc_fixup_split_real(vcpu); kvmppc_restore_tm_pr(vcpu); } static void kvmppc_core_vcpu_put_pr(struct kvm_vcpu *vcpu) { #ifdef CONFIG_PPC_BOOK3S_64 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); if (svcpu->in_use) { kvmppc_copy_from_svcpu(vcpu); } memcpy(to_book3s(vcpu)->slb_shadow, svcpu->slb, sizeof(svcpu->slb)); to_book3s(vcpu)->slb_shadow_max = svcpu->slb_max; svcpu_put(svcpu); /* Enable AIL if supported */ if (cpu_has_feature(CPU_FTR_HVMODE)) { if (cpu_has_feature(CPU_FTR_ARCH_207S)) mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) | LPCR_AIL_3); if (cpu_has_feature(CPU_FTR_ARCH_300) && (current->thread.fscr & FSCR_SCV)) mtspr(SPRN_FSCR, mfspr(SPRN_FSCR) | FSCR_SCV); } #endif if (kvmppc_is_split_real(vcpu)) kvmppc_unfixup_split_real(vcpu); kvmppc_giveup_ext(vcpu, MSR_FP | MSR_VEC | MSR_VSX); kvmppc_giveup_fac(vcpu, FSCR_TAR_LG); kvmppc_save_tm_pr(vcpu); vcpu->cpu = -1; } /* Copy data needed by real-mode code from vcpu to shadow vcpu */ void kvmppc_copy_to_svcpu(struct kvm_vcpu *vcpu) { struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); svcpu->gpr[0] = vcpu->arch.regs.gpr[0]; svcpu->gpr[1] = vcpu->arch.regs.gpr[1]; svcpu->gpr[2] = vcpu->arch.regs.gpr[2]; svcpu->gpr[3] = vcpu->arch.regs.gpr[3]; svcpu->gpr[4] = vcpu->arch.regs.gpr[4]; svcpu->gpr[5] = vcpu->arch.regs.gpr[5]; svcpu->gpr[6] = vcpu->arch.regs.gpr[6]; svcpu->gpr[7] = vcpu->arch.regs.gpr[7]; svcpu->gpr[8] = vcpu->arch.regs.gpr[8]; svcpu->gpr[9] = vcpu->arch.regs.gpr[9]; svcpu->gpr[10] = vcpu->arch.regs.gpr[10]; svcpu->gpr[11] = vcpu->arch.regs.gpr[11]; svcpu->gpr[12] = vcpu->arch.regs.gpr[12]; svcpu->gpr[13] = vcpu->arch.regs.gpr[13]; svcpu->cr = vcpu->arch.regs.ccr; svcpu->xer = vcpu->arch.regs.xer; svcpu->ctr = vcpu->arch.regs.ctr; svcpu->lr = vcpu->arch.regs.link; svcpu->pc = vcpu->arch.regs.nip; #ifdef CONFIG_PPC_BOOK3S_64 svcpu->shadow_fscr = vcpu->arch.shadow_fscr; #endif /* * Now also save the current time base value. We use this * to find the guest purr and spurr value. */ vcpu->arch.entry_tb = get_tb(); vcpu->arch.entry_vtb = get_vtb(); if (cpu_has_feature(CPU_FTR_ARCH_207S)) vcpu->arch.entry_ic = mfspr(SPRN_IC); svcpu->in_use = true; svcpu_put(svcpu); } static void kvmppc_recalc_shadow_msr(struct kvm_vcpu *vcpu) { ulong guest_msr = kvmppc_get_msr(vcpu); ulong smsr = guest_msr; /* Guest MSR values */ #ifdef CONFIG_PPC_TRANSACTIONAL_MEM smsr &= MSR_FE0 | MSR_FE1 | MSR_SF | MSR_SE | MSR_BE | MSR_LE | MSR_TM | MSR_TS_MASK; #else smsr &= MSR_FE0 | MSR_FE1 | MSR_SF | MSR_SE | MSR_BE | MSR_LE; #endif /* Process MSR values */ smsr |= MSR_ME | MSR_RI | MSR_IR | MSR_DR | MSR_PR | MSR_EE; /* External providers the guest reserved */ smsr |= (guest_msr & vcpu->arch.guest_owned_ext); /* 64-bit Process MSR values */ #ifdef CONFIG_PPC_BOOK3S_64 smsr |= MSR_HV; #endif #ifdef CONFIG_PPC_TRANSACTIONAL_MEM /* * in guest privileged state, we want to fail all TM transactions. * So disable MSR TM bit so that all tbegin. will be able to be * trapped into host. */ if (!(guest_msr & MSR_PR)) smsr &= ~MSR_TM; #endif vcpu->arch.shadow_msr = smsr; } /* Copy data touched by real-mode code from shadow vcpu back to vcpu */ void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu) { struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); #ifdef CONFIG_PPC_TRANSACTIONAL_MEM ulong old_msr; #endif /* * Maybe we were already preempted and synced the svcpu from * our preempt notifiers. Don't bother touching this svcpu then. */ if (!svcpu->in_use) goto out; vcpu->arch.regs.gpr[0] = svcpu->gpr[0]; vcpu->arch.regs.gpr[1] = svcpu->gpr[1]; vcpu->arch.regs.gpr[2] = svcpu->gpr[2]; vcpu->arch.regs.gpr[3] = svcpu->gpr[3]; vcpu->arch.regs.gpr[4] = svcpu->gpr[4]; vcpu->arch.regs.gpr[5] = svcpu->gpr[5]; vcpu->arch.regs.gpr[6] = svcpu->gpr[6]; vcpu->arch.regs.gpr[7] = svcpu->gpr[7]; vcpu->arch.regs.gpr[8] = svcpu->gpr[8]; vcpu->arch.regs.gpr[9] = svcpu->gpr[9]; vcpu->arch.regs.gpr[10] = svcpu->gpr[10]; vcpu->arch.regs.gpr[11] = svcpu->gpr[11]; vcpu->arch.regs.gpr[12] = svcpu->gpr[12]; vcpu->arch.regs.gpr[13] = svcpu->gpr[13]; vcpu->arch.regs.ccr = svcpu->cr; vcpu->arch.regs.xer = svcpu->xer; vcpu->arch.regs.ctr = svcpu->ctr; vcpu->arch.regs.link = svcpu->lr; vcpu->arch.regs.nip = svcpu->pc; vcpu->arch.shadow_srr1 = svcpu->shadow_srr1; vcpu->arch.fault_dar = svcpu->fault_dar; vcpu->arch.fault_dsisr = svcpu->fault_dsisr; vcpu->arch.last_inst = svcpu->last_inst; #ifdef CONFIG_PPC_BOOK3S_64 vcpu->arch.shadow_fscr = svcpu->shadow_fscr; #endif /* * Update purr and spurr using time base on exit. */ vcpu->arch.purr += get_tb() - vcpu->arch.entry_tb; vcpu->arch.spurr += get_tb() - vcpu->arch.entry_tb; to_book3s(vcpu)->vtb += get_vtb() - vcpu->arch.entry_vtb; if (cpu_has_feature(CPU_FTR_ARCH_207S)) vcpu->arch.ic += mfspr(SPRN_IC) - vcpu->arch.entry_ic; #ifdef CONFIG_PPC_TRANSACTIONAL_MEM /* * Unlike other MSR bits, MSR[TS]bits can be changed at guest without * notifying host: * modified by unprivileged instructions like "tbegin"/"tend"/ * "tresume"/"tsuspend" in PR KVM guest. * * It is necessary to sync here to calculate a correct shadow_msr. * * privileged guest's tbegin will be failed at present. So we * only take care of problem state guest. */ old_msr = kvmppc_get_msr(vcpu); if (unlikely((old_msr & MSR_PR) && (vcpu->arch.shadow_srr1 & (MSR_TS_MASK)) != (old_msr & (MSR_TS_MASK)))) { old_msr &= ~(MSR_TS_MASK); old_msr |= (vcpu->arch.shadow_srr1 & (MSR_TS_MASK)); kvmppc_set_msr_fast(vcpu, old_msr); kvmppc_recalc_shadow_msr(vcpu); } #endif svcpu->in_use = false; out: svcpu_put(svcpu); } #ifdef CONFIG_PPC_TRANSACTIONAL_MEM void kvmppc_save_tm_sprs(struct kvm_vcpu *vcpu) { tm_enable(); vcpu->arch.tfhar = mfspr(SPRN_TFHAR); vcpu->arch.texasr = mfspr(SPRN_TEXASR); vcpu->arch.tfiar = mfspr(SPRN_TFIAR); tm_disable(); } void kvmppc_restore_tm_sprs(struct kvm_vcpu *vcpu) { tm_enable(); mtspr(SPRN_TFHAR, vcpu->arch.tfhar); mtspr(SPRN_TEXASR, vcpu->arch.texasr); mtspr(SPRN_TFIAR, vcpu->arch.tfiar); tm_disable(); } /* loadup math bits which is enabled at kvmppc_get_msr() but not enabled at * hardware. */ static void kvmppc_handle_lost_math_exts(struct kvm_vcpu *vcpu) { ulong exit_nr; ulong ext_diff = (kvmppc_get_msr(vcpu) & ~vcpu->arch.guest_owned_ext) & (MSR_FP | MSR_VEC | MSR_VSX); if (!ext_diff) return; if (ext_diff == MSR_FP) exit_nr = BOOK3S_INTERRUPT_FP_UNAVAIL; else if (ext_diff == MSR_VEC) exit_nr = BOOK3S_INTERRUPT_ALTIVEC; else exit_nr = BOOK3S_INTERRUPT_VSX; kvmppc_handle_ext(vcpu, exit_nr, ext_diff); } void kvmppc_save_tm_pr(struct kvm_vcpu *vcpu) { if (!(MSR_TM_ACTIVE(kvmppc_get_msr(vcpu)))) { kvmppc_save_tm_sprs(vcpu); return; } kvmppc_giveup_fac(vcpu, FSCR_TAR_LG); kvmppc_giveup_ext(vcpu, MSR_VSX); preempt_disable(); _kvmppc_save_tm_pr(vcpu, mfmsr()); preempt_enable(); } void kvmppc_restore_tm_pr(struct kvm_vcpu *vcpu) { if (!MSR_TM_ACTIVE(kvmppc_get_msr(vcpu))) { kvmppc_restore_tm_sprs(vcpu); if (kvmppc_get_msr(vcpu) & MSR_TM) { kvmppc_handle_lost_math_exts(vcpu); if (vcpu->arch.fscr & FSCR_TAR) kvmppc_handle_fac(vcpu, FSCR_TAR_LG); } return; } preempt_disable(); _kvmppc_restore_tm_pr(vcpu, kvmppc_get_msr(vcpu)); preempt_enable(); if (kvmppc_get_msr(vcpu) & MSR_TM) { kvmppc_handle_lost_math_exts(vcpu); if (vcpu->arch.fscr & FSCR_TAR) kvmppc_handle_fac(vcpu, FSCR_TAR_LG); } } #endif static int kvmppc_core_check_requests_pr(struct kvm_vcpu *vcpu) { int r = 1; /* Indicate we want to get back into the guest */ /* We misuse TLB_FLUSH to indicate that we want to clear all shadow cache entries */ if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) kvmppc_mmu_pte_flush(vcpu, 0, 0); return r; } /************* MMU Notifiers *************/ static bool do_kvm_unmap_gfn(struct kvm *kvm, struct kvm_gfn_range *range) { unsigned long i; struct kvm_vcpu *vcpu; kvm_for_each_vcpu(i, vcpu, kvm) kvmppc_mmu_pte_pflush(vcpu, range->start << PAGE_SHIFT, range->end << PAGE_SHIFT); return false; } static bool kvm_unmap_gfn_range_pr(struct kvm *kvm, struct kvm_gfn_range *range) { return do_kvm_unmap_gfn(kvm, range); } static bool kvm_age_gfn_pr(struct kvm *kvm, struct kvm_gfn_range *range) { /* XXX could be more clever ;) */ return false; } static bool kvm_test_age_gfn_pr(struct kvm *kvm, struct kvm_gfn_range *range) { /* XXX could be more clever ;) */ return false; } static bool kvm_set_spte_gfn_pr(struct kvm *kvm, struct kvm_gfn_range *range) { /* The page will get remapped properly on its next fault */ return do_kvm_unmap_gfn(kvm, range); } /*****************************************/ static void kvmppc_set_msr_pr(struct kvm_vcpu *vcpu, u64 msr) { ulong old_msr; /* For PAPR guest, make sure MSR reflects guest mode */ if (vcpu->arch.papr_enabled) msr = (msr & ~MSR_HV) | MSR_ME; #ifdef EXIT_DEBUG printk(KERN_INFO "KVM: Set MSR to 0x%llx\n", msr); #endif #ifdef CONFIG_PPC_TRANSACTIONAL_MEM /* We should never target guest MSR to TS=10 && PR=0, * since we always fail transaction for guest privilege * state. */ if (!(msr & MSR_PR) && MSR_TM_TRANSACTIONAL(msr)) kvmppc_emulate_tabort(vcpu, TM_CAUSE_KVM_FAC_UNAV | TM_CAUSE_PERSISTENT); #endif old_msr = kvmppc_get_msr(vcpu); msr &= to_book3s(vcpu)->msr_mask; kvmppc_set_msr_fast(vcpu, msr); kvmppc_recalc_shadow_msr(vcpu); if (msr & MSR_POW) { if (!vcpu->arch.pending_exceptions) { kvm_vcpu_halt(vcpu); vcpu->stat.generic.halt_wakeup++; /* Unset POW bit after we woke up */ msr &= ~MSR_POW; kvmppc_set_msr_fast(vcpu, msr); } } if (kvmppc_is_split_real(vcpu)) kvmppc_fixup_split_real(vcpu); else kvmppc_unfixup_split_real(vcpu); if ((kvmppc_get_msr(vcpu) & (MSR_PR|MSR_IR|MSR_DR)) != (old_msr & (MSR_PR|MSR_IR|MSR_DR))) { kvmppc_mmu_flush_segments(vcpu); kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu)); /* Preload magic page segment when in kernel mode */ if (!(msr & MSR_PR) && vcpu->arch.magic_page_pa) { struct kvm_vcpu_arch *a = &vcpu->arch; if (msr & MSR_DR) kvmppc_mmu_map_segment(vcpu, a->magic_page_ea); else kvmppc_mmu_map_segment(vcpu, a->magic_page_pa); } } /* * When switching from 32 to 64-bit, we may have a stale 32-bit * magic page around, we need to flush it. Typically 32-bit magic * page will be instantiated when calling into RTAS. Note: We * assume that such transition only happens while in kernel mode, * ie, we never transition from user 32-bit to kernel 64-bit with * a 32-bit magic page around. */ if (vcpu->arch.magic_page_pa && !(old_msr & MSR_PR) && !(old_msr & MSR_SF) && (msr & MSR_SF)) { /* going from RTAS to normal kernel code */ kvmppc_mmu_pte_flush(vcpu, (uint32_t)vcpu->arch.magic_page_pa, ~0xFFFUL); } /* Preload FPU if it's enabled */ if (kvmppc_get_msr(vcpu) & MSR_FP) kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP); #ifdef CONFIG_PPC_TRANSACTIONAL_MEM if (kvmppc_get_msr(vcpu) & MSR_TM) kvmppc_handle_lost_math_exts(vcpu); #endif } static void kvmppc_set_pvr_pr(struct kvm_vcpu *vcpu, u32 pvr) { u32 host_pvr; vcpu->arch.hflags &= ~BOOK3S_HFLAG_SLB; vcpu->arch.pvr = pvr; #ifdef CONFIG_PPC_BOOK3S_64 if ((pvr >= 0x330000) && (pvr < 0x70330000)) { kvmppc_mmu_book3s_64_init(vcpu); if (!to_book3s(vcpu)->hior_explicit) to_book3s(vcpu)->hior = 0xfff00000; to_book3s(vcpu)->msr_mask = 0xffffffffffffffffULL; vcpu->arch.cpu_type = KVM_CPU_3S_64; } else #endif { kvmppc_mmu_book3s_32_init(vcpu); if (!to_book3s(vcpu)->hior_explicit) to_book3s(vcpu)->hior = 0; to_book3s(vcpu)->msr_mask = 0xffffffffULL; vcpu->arch.cpu_type = KVM_CPU_3S_32; } kvmppc_sanity_check(vcpu); /* If we are in hypervisor level on 970, we can tell the CPU to * treat DCBZ as 32 bytes store */ vcpu->arch.hflags &= ~BOOK3S_HFLAG_DCBZ32; if (vcpu->arch.mmu.is_dcbz32(vcpu) && (mfmsr() & MSR_HV) && !strcmp(cur_cpu_spec->platform, "ppc970")) vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32; /* Cell performs badly if MSR_FEx are set. So let's hope nobody really needs them in a VM on Cell and force disable them. */ if (!strcmp(cur_cpu_spec->platform, "ppc-cell-be")) to_book3s(vcpu)->msr_mask &= ~(MSR_FE0 | MSR_FE1); /* * If they're asking for POWER6 or later, set the flag * indicating that we can do multiple large page sizes * and 1TB segments. * Also set the flag that indicates that tlbie has the large * page bit in the RB operand instead of the instruction. */ switch (PVR_VER(pvr)) { case PVR_POWER6: case PVR_POWER7: case PVR_POWER7p: case PVR_POWER8: case PVR_POWER8E: case PVR_POWER8NVL: case PVR_POWER9: vcpu->arch.hflags |= BOOK3S_HFLAG_MULTI_PGSIZE | BOOK3S_HFLAG_NEW_TLBIE; break; } #ifdef CONFIG_PPC_BOOK3S_32 /* 32 bit Book3S always has 32 byte dcbz */ vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32; #endif /* On some CPUs we can execute paired single operations natively */ asm ( "mfpvr %0" : "=r"(host_pvr)); switch (host_pvr) { case 0x00080200: /* lonestar 2.0 */ case 0x00088202: /* lonestar 2.2 */ case 0x70000100: /* gekko 1.0 */ case 0x00080100: /* gekko 2.0 */ case 0x00083203: /* gekko 2.3a */ case 0x00083213: /* gekko 2.3b */ case 0x00083204: /* gekko 2.4 */ case 0x00083214: /* gekko 2.4e (8SE) - retail HW2 */ case 0x00087200: /* broadway */ vcpu->arch.hflags |= BOOK3S_HFLAG_NATIVE_PS; /* Enable HID2.PSE - in case we need it later */ mtspr(SPRN_HID2_GEKKO, mfspr(SPRN_HID2_GEKKO) | (1 << 29)); } } /* Book3s_32 CPUs always have 32 bytes cache line size, which Linux assumes. To * make Book3s_32 Linux work on Book3s_64, we have to make sure we trap dcbz to * emulate 32 bytes dcbz length. * * The Book3s_64 inventors also realized this case and implemented a special bit * in the HID5 register, which is a hypervisor ressource. Thus we can't use it. * * My approach here is to patch the dcbz instruction on executing pages. */ static void kvmppc_patch_dcbz(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte) { struct page *hpage; u64 hpage_offset; u32 *page; int i; hpage = gfn_to_page(vcpu->kvm, pte->raddr >> PAGE_SHIFT); if (is_error_page(hpage)) return; hpage_offset = pte->raddr & ~PAGE_MASK; hpage_offset &= ~0xFFFULL; hpage_offset /= 4; get_page(hpage); page = kmap_atomic(hpage); /* patch dcbz into reserved instruction, so we trap */ for (i=hpage_offset; i < hpage_offset + (HW_PAGE_SIZE / 4); i++) if ((be32_to_cpu(page[i]) & 0xff0007ff) == INS_DCBZ) page[i] &= cpu_to_be32(0xfffffff7); kunmap_atomic(page); put_page(hpage); } static bool kvmppc_visible_gpa(struct kvm_vcpu *vcpu, gpa_t gpa) { ulong mp_pa = vcpu->arch.magic_page_pa; if (!(kvmppc_get_msr(vcpu) & MSR_SF)) mp_pa = (uint32_t)mp_pa; gpa &= ~0xFFFULL; if (unlikely(mp_pa) && unlikely((mp_pa & KVM_PAM) == (gpa & KVM_PAM))) { return true; } return kvm_is_visible_gfn(vcpu->kvm, gpa >> PAGE_SHIFT); } static int kvmppc_handle_pagefault(struct kvm_vcpu *vcpu, ulong eaddr, int vec) { bool data = (vec == BOOK3S_INTERRUPT_DATA_STORAGE); bool iswrite = false; int r = RESUME_GUEST; int relocated; int page_found = 0; struct kvmppc_pte pte = { 0 }; bool dr = (kvmppc_get_msr(vcpu) & MSR_DR) ? true : false; bool ir = (kvmppc_get_msr(vcpu) & MSR_IR) ? true : false; u64 vsid; relocated = data ? dr : ir; if (data && (vcpu->arch.fault_dsisr & DSISR_ISSTORE)) iswrite = true; /* Resolve real address if translation turned on */ if (relocated) { page_found = vcpu->arch.mmu.xlate(vcpu, eaddr, &pte, data, iswrite); } else { pte.may_execute = true; pte.may_read = true; pte.may_write = true; pte.raddr = eaddr & KVM_PAM; pte.eaddr = eaddr; pte.vpage = eaddr >> 12; pte.page_size = MMU_PAGE_64K; pte.wimg = HPTE_R_M; } switch (kvmppc_get_msr(vcpu) & (MSR_DR|MSR_IR)) { case 0: pte.vpage |= ((u64)VSID_REAL << (SID_SHIFT - 12)); break; case MSR_DR: if (!data && (vcpu->arch.hflags & BOOK3S_HFLAG_SPLIT_HACK) && ((pte.raddr & SPLIT_HACK_MASK) == SPLIT_HACK_OFFS)) pte.raddr &= ~SPLIT_HACK_MASK; fallthrough; case MSR_IR: vcpu->arch.mmu.esid_to_vsid(vcpu, eaddr >> SID_SHIFT, &vsid); if ((kvmppc_get_msr(vcpu) & (MSR_DR|MSR_IR)) == MSR_DR) pte.vpage |= ((u64)VSID_REAL_DR << (SID_SHIFT - 12)); else pte.vpage |= ((u64)VSID_REAL_IR << (SID_SHIFT - 12)); pte.vpage |= vsid; if (vsid == -1) page_found = -EINVAL; break; } if (vcpu->arch.mmu.is_dcbz32(vcpu) && (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32))) { /* * If we do the dcbz hack, we have to NX on every execution, * so we can patch the executing code. This renders our guest * NX-less. */ pte.may_execute = !data; } if (page_found == -ENOENT || page_found == -EPERM) { /* Page not found in guest PTE entries, or protection fault */ u64 flags; if (page_found == -EPERM) flags = DSISR_PROTFAULT; else flags = DSISR_NOHPTE; if (data) { flags |= vcpu->arch.fault_dsisr & DSISR_ISSTORE; kvmppc_core_queue_data_storage(vcpu, 0, eaddr, flags); } else { kvmppc_core_queue_inst_storage(vcpu, flags); } } else if (page_found == -EINVAL) { /* Page not found in guest SLB */ kvmppc_set_dar(vcpu, kvmppc_get_fault_dar(vcpu)); kvmppc_book3s_queue_irqprio(vcpu, vec + 0x80); } else if (kvmppc_visible_gpa(vcpu, pte.raddr)) { if (data && !(vcpu->arch.fault_dsisr & DSISR_NOHPTE)) { /* * There is already a host HPTE there, presumably * a read-only one for a page the guest thinks * is writable, so get rid of it first. */ kvmppc_mmu_unmap_page(vcpu, &pte); } /* The guest's PTE is not mapped yet. Map on the host */ if (kvmppc_mmu_map_page(vcpu, &pte, iswrite) == -EIO) { /* Exit KVM if mapping failed */ vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; return RESUME_HOST; } if (data) vcpu->stat.sp_storage++; else if (vcpu->arch.mmu.is_dcbz32(vcpu) && (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32))) kvmppc_patch_dcbz(vcpu, &pte); } else { /* MMIO */ vcpu->stat.mmio_exits++; vcpu->arch.paddr_accessed = pte.raddr; vcpu->arch.vaddr_accessed = pte.eaddr; r = kvmppc_emulate_mmio(vcpu); if ( r == RESUME_HOST_NV ) r = RESUME_HOST; } return r; } /* Give up external provider (FPU, Altivec, VSX) */ void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr) { struct thread_struct *t = &current->thread; /* * VSX instructions can access FP and vector registers, so if * we are giving up VSX, make sure we give up FP and VMX as well. */ if (msr & MSR_VSX) msr |= MSR_FP | MSR_VEC; msr &= vcpu->arch.guest_owned_ext; if (!msr) return; #ifdef DEBUG_EXT printk(KERN_INFO "Giving up ext 0x%lx\n", msr); #endif if (msr & MSR_FP) { /* * Note that on CPUs with VSX, giveup_fpu stores * both the traditional FP registers and the added VSX * registers into thread.fp_state.fpr[]. */ if (t->regs->msr & MSR_FP) giveup_fpu(current); t->fp_save_area = NULL; } #ifdef CONFIG_ALTIVEC if (msr & MSR_VEC) { if (current->thread.regs->msr & MSR_VEC) giveup_altivec(current); t->vr_save_area = NULL; } #endif vcpu->arch.guest_owned_ext &= ~(msr | MSR_VSX); kvmppc_recalc_shadow_msr(vcpu); } /* Give up facility (TAR / EBB / DSCR) */ void kvmppc_giveup_fac(struct kvm_vcpu *vcpu, ulong fac) { #ifdef CONFIG_PPC_BOOK3S_64 if (!(vcpu->arch.shadow_fscr & (1ULL << fac))) { /* Facility not available to the guest, ignore giveup request*/ return; } switch (fac) { case FSCR_TAR_LG: vcpu->arch.tar = mfspr(SPRN_TAR); mtspr(SPRN_TAR, current->thread.tar); vcpu->arch.shadow_fscr &= ~FSCR_TAR; break; } #endif } /* Handle external providers (FPU, Altivec, VSX) */ static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr, ulong msr) { struct thread_struct *t = &current->thread; /* When we have paired singles, we emulate in software */ if (vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE) return RESUME_GUEST; if (!(kvmppc_get_msr(vcpu) & msr)) { kvmppc_book3s_queue_irqprio(vcpu, exit_nr); return RESUME_GUEST; } if (msr == MSR_VSX) { /* No VSX? Give an illegal instruction interrupt */ #ifdef CONFIG_VSX if (!cpu_has_feature(CPU_FTR_VSX)) #endif { kvmppc_core_queue_program(vcpu, SRR1_PROGILL); return RESUME_GUEST; } /* * We have to load up all the FP and VMX registers before * we can let the guest use VSX instructions. */ msr = MSR_FP | MSR_VEC | MSR_VSX; } /* See if we already own all the ext(s) needed */ msr &= ~vcpu->arch.guest_owned_ext; if (!msr) return RESUME_GUEST; #ifdef DEBUG_EXT printk(KERN_INFO "Loading up ext 0x%lx\n", msr); #endif if (msr & MSR_FP) { preempt_disable(); enable_kernel_fp(); load_fp_state(&vcpu->arch.fp); disable_kernel_fp(); t->fp_save_area = &vcpu->arch.fp; preempt_enable(); } if (msr & MSR_VEC) { #ifdef CONFIG_ALTIVEC preempt_disable(); enable_kernel_altivec(); load_vr_state(&vcpu->arch.vr); disable_kernel_altivec(); t->vr_save_area = &vcpu->arch.vr; preempt_enable(); #endif } t->regs->msr |= msr; vcpu->arch.guest_owned_ext |= msr; kvmppc_recalc_shadow_msr(vcpu); return RESUME_GUEST; } /* * Kernel code using FP or VMX could have flushed guest state to * the thread_struct; if so, get it back now. */ static void kvmppc_handle_lost_ext(struct kvm_vcpu *vcpu) { unsigned long lost_ext; lost_ext = vcpu->arch.guest_owned_ext & ~current->thread.regs->msr; if (!lost_ext) return; if (lost_ext & MSR_FP) { preempt_disable(); enable_kernel_fp(); load_fp_state(&vcpu->arch.fp); disable_kernel_fp(); preempt_enable(); } #ifdef CONFIG_ALTIVEC if (lost_ext & MSR_VEC) { preempt_disable(); enable_kernel_altivec(); load_vr_state(&vcpu->arch.vr); disable_kernel_altivec(); preempt_enable(); } #endif current->thread.regs->msr |= lost_ext; } #ifdef CONFIG_PPC_BOOK3S_64 void kvmppc_trigger_fac_interrupt(struct kvm_vcpu *vcpu, ulong fac) { /* Inject the Interrupt Cause field and trigger a guest interrupt */ vcpu->arch.fscr &= ~(0xffULL << 56); vcpu->arch.fscr |= (fac << 56); kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_FAC_UNAVAIL); } static void kvmppc_emulate_fac(struct kvm_vcpu *vcpu, ulong fac) { enum emulation_result er = EMULATE_FAIL; if (!(kvmppc_get_msr(vcpu) & MSR_PR)) er = kvmppc_emulate_instruction(vcpu); if ((er != EMULATE_DONE) && (er != EMULATE_AGAIN)) { /* Couldn't emulate, trigger interrupt in guest */ kvmppc_trigger_fac_interrupt(vcpu, fac); } } /* Enable facilities (TAR, EBB, DSCR) for the guest */ static int kvmppc_handle_fac(struct kvm_vcpu *vcpu, ulong fac) { bool guest_fac_enabled; BUG_ON(!cpu_has_feature(CPU_FTR_ARCH_207S)); /* * Not every facility is enabled by FSCR bits, check whether the * guest has this facility enabled at all. */ switch (fac) { case FSCR_TAR_LG: case FSCR_EBB_LG: guest_fac_enabled = (vcpu->arch.fscr & (1ULL << fac)); break; case FSCR_TM_LG: guest_fac_enabled = kvmppc_get_msr(vcpu) & MSR_TM; break; default: guest_fac_enabled = false; break; } if (!guest_fac_enabled) { /* Facility not enabled by the guest */ kvmppc_trigger_fac_interrupt(vcpu, fac); return RESUME_GUEST; } switch (fac) { case FSCR_TAR_LG: /* TAR switching isn't lazy in Linux yet */ current->thread.tar = mfspr(SPRN_TAR); mtspr(SPRN_TAR, vcpu->arch.tar); vcpu->arch.shadow_fscr |= FSCR_TAR; break; default: kvmppc_emulate_fac(vcpu, fac); break; } #ifdef CONFIG_PPC_TRANSACTIONAL_MEM /* Since we disabled MSR_TM at privilege state, the mfspr instruction * for TM spr can trigger TM fac unavailable. In this case, the * emulation is handled by kvmppc_emulate_fac(), which invokes * kvmppc_emulate_mfspr() finally. But note the mfspr can include * RT for NV registers. So it need to restore those NV reg to reflect * the update. */ if ((fac == FSCR_TM_LG) && !(kvmppc_get_msr(vcpu) & MSR_PR)) return RESUME_GUEST_NV; #endif return RESUME_GUEST; } void kvmppc_set_fscr(struct kvm_vcpu *vcpu, u64 fscr) { if (fscr & FSCR_SCV) fscr &= ~FSCR_SCV; /* SCV must not be enabled */ /* Prohibit prefixed instructions for now */ fscr &= ~FSCR_PREFIX; if ((vcpu->arch.fscr & FSCR_TAR) && !(fscr & FSCR_TAR)) { /* TAR got dropped, drop it in shadow too */ kvmppc_giveup_fac(vcpu, FSCR_TAR_LG); } else if (!(vcpu->arch.fscr & FSCR_TAR) && (fscr & FSCR_TAR)) { vcpu->arch.fscr = fscr; kvmppc_handle_fac(vcpu, FSCR_TAR_LG); return; } vcpu->arch.fscr = fscr; } #endif static void kvmppc_setup_debug(struct kvm_vcpu *vcpu) { if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) { u64 msr = kvmppc_get_msr(vcpu); kvmppc_set_msr(vcpu, msr | MSR_SE); } } static void kvmppc_clear_debug(struct kvm_vcpu *vcpu) { if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) { u64 msr = kvmppc_get_msr(vcpu); kvmppc_set_msr(vcpu, msr & ~MSR_SE); } } static int kvmppc_exit_pr_progint(struct kvm_vcpu *vcpu, unsigned int exit_nr) { enum emulation_result er; ulong flags; ppc_inst_t last_inst; int emul, r; /* * shadow_srr1 only contains valid flags if we came here via a program * exception. The other exceptions (emulation assist, FP unavailable, * etc.) do not provide flags in SRR1, so use an illegal-instruction * exception when injecting a program interrupt into the guest. */ if (exit_nr == BOOK3S_INTERRUPT_PROGRAM) flags = vcpu->arch.shadow_srr1 & 0x1f0000ull; else flags = SRR1_PROGILL; emul = kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst); if (emul != EMULATE_DONE) return RESUME_GUEST; if (kvmppc_get_msr(vcpu) & MSR_PR) { #ifdef EXIT_DEBUG pr_info("Userspace triggered 0x700 exception at\n 0x%lx (0x%x)\n", kvmppc_get_pc(vcpu), ppc_inst_val(last_inst)); #endif if ((ppc_inst_val(last_inst) & 0xff0007ff) != (INS_DCBZ & 0xfffffff7)) { kvmppc_core_queue_program(vcpu, flags); return RESUME_GUEST; } } vcpu->stat.emulated_inst_exits++; er = kvmppc_emulate_instruction(vcpu); switch (er) { case EMULATE_DONE: r = RESUME_GUEST_NV; break; case EMULATE_AGAIN: r = RESUME_GUEST; break; case EMULATE_FAIL: pr_crit("%s: emulation at %lx failed (%08x)\n", __func__, kvmppc_get_pc(vcpu), ppc_inst_val(last_inst)); kvmppc_core_queue_program(vcpu, flags); r = RESUME_GUEST; break; case EMULATE_DO_MMIO: vcpu->run->exit_reason = KVM_EXIT_MMIO; r = RESUME_HOST_NV; break; case EMULATE_EXIT_USER: r = RESUME_HOST_NV; break; default: BUG(); } return r; } int kvmppc_handle_exit_pr(struct kvm_vcpu *vcpu, unsigned int exit_nr) { struct kvm_run *run = vcpu->run; int r = RESUME_HOST; int s; vcpu->stat.sum_exits++; run->exit_reason = KVM_EXIT_UNKNOWN; run->ready_for_interrupt_injection = 1; /* We get here with MSR.EE=1 */ trace_kvm_exit(exit_nr, vcpu); guest_exit(); switch (exit_nr) { case BOOK3S_INTERRUPT_INST_STORAGE: { ulong shadow_srr1 = vcpu->arch.shadow_srr1; vcpu->stat.pf_instruc++; if (kvmppc_is_split_real(vcpu)) kvmppc_fixup_split_real(vcpu); #ifdef CONFIG_PPC_BOOK3S_32 /* We set segments as unused segments when invalidating them. So * treat the respective fault as segment fault. */ { struct kvmppc_book3s_shadow_vcpu *svcpu; u32 sr; svcpu = svcpu_get(vcpu); sr = svcpu->sr[kvmppc_get_pc(vcpu) >> SID_SHIFT]; svcpu_put(svcpu); if (sr == SR_INVALID) { kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu)); r = RESUME_GUEST; break; } } #endif /* only care about PTEG not found errors, but leave NX alone */ if (shadow_srr1 & 0x40000000) { int idx = srcu_read_lock(&vcpu->kvm->srcu); r = kvmppc_handle_pagefault(vcpu, kvmppc_get_pc(vcpu), exit_nr); srcu_read_unlock(&vcpu->kvm->srcu, idx); vcpu->stat.sp_instruc++; } else if (vcpu->arch.mmu.is_dcbz32(vcpu) && (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32))) { /* * XXX If we do the dcbz hack we use the NX bit to flush&patch the page, * so we can't use the NX bit inside the guest. Let's cross our fingers, * that no guest that needs the dcbz hack does NX. */ kvmppc_mmu_pte_flush(vcpu, kvmppc_get_pc(vcpu), ~0xFFFUL); r = RESUME_GUEST; } else { kvmppc_core_queue_inst_storage(vcpu, shadow_srr1 & 0x58000000); r = RESUME_GUEST; } break; } case BOOK3S_INTERRUPT_DATA_STORAGE: { ulong dar = kvmppc_get_fault_dar(vcpu); u32 fault_dsisr = vcpu->arch.fault_dsisr; vcpu->stat.pf_storage++; #ifdef CONFIG_PPC_BOOK3S_32 /* We set segments as unused segments when invalidating them. So * treat the respective fault as segment fault. */ { struct kvmppc_book3s_shadow_vcpu *svcpu; u32 sr; svcpu = svcpu_get(vcpu); sr = svcpu->sr[dar >> SID_SHIFT]; svcpu_put(svcpu); if (sr == SR_INVALID) { kvmppc_mmu_map_segment(vcpu, dar); r = RESUME_GUEST; break; } } #endif /* * We need to handle missing shadow PTEs, and * protection faults due to us mapping a page read-only * when the guest thinks it is writable. */ if (fault_dsisr & (DSISR_NOHPTE | DSISR_PROTFAULT)) { int idx = srcu_read_lock(&vcpu->kvm->srcu); r = kvmppc_handle_pagefault(vcpu, dar, exit_nr); srcu_read_unlock(&vcpu->kvm->srcu, idx); } else { kvmppc_core_queue_data_storage(vcpu, 0, dar, fault_dsisr); r = RESUME_GUEST; } break; } case BOOK3S_INTERRUPT_DATA_SEGMENT: if (kvmppc_mmu_map_segment(vcpu, kvmppc_get_fault_dar(vcpu)) < 0) { kvmppc_set_dar(vcpu, kvmppc_get_fault_dar(vcpu)); kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_DATA_SEGMENT); } r = RESUME_GUEST; break; case BOOK3S_INTERRUPT_INST_SEGMENT: if (kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu)) < 0) { kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_INST_SEGMENT); } r = RESUME_GUEST; break; /* We're good on these - the host merely wanted to get our attention */ case BOOK3S_INTERRUPT_DECREMENTER: case BOOK3S_INTERRUPT_HV_DECREMENTER: case BOOK3S_INTERRUPT_DOORBELL: case BOOK3S_INTERRUPT_H_DOORBELL: vcpu->stat.dec_exits++; r = RESUME_GUEST; break; case BOOK3S_INTERRUPT_EXTERNAL: case BOOK3S_INTERRUPT_EXTERNAL_HV: case BOOK3S_INTERRUPT_H_VIRT: vcpu->stat.ext_intr_exits++; r = RESUME_GUEST; break; case BOOK3S_INTERRUPT_HMI: case BOOK3S_INTERRUPT_PERFMON: case BOOK3S_INTERRUPT_SYSTEM_RESET: r = RESUME_GUEST; break; case BOOK3S_INTERRUPT_PROGRAM: case BOOK3S_INTERRUPT_H_EMUL_ASSIST: r = kvmppc_exit_pr_progint(vcpu, exit_nr); break; case BOOK3S_INTERRUPT_SYSCALL: { ppc_inst_t last_sc; int emul; /* Get last sc for papr */ if (vcpu->arch.papr_enabled) { /* The sc instruction points SRR0 to the next inst */ emul = kvmppc_get_last_inst(vcpu, INST_SC, &last_sc); if (emul != EMULATE_DONE) { kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) - 4); r = RESUME_GUEST; break; } } if (vcpu->arch.papr_enabled && (ppc_inst_val(last_sc) == 0x44000022) && !(kvmppc_get_msr(vcpu) & MSR_PR)) { /* SC 1 papr hypercalls */ ulong cmd = kvmppc_get_gpr(vcpu, 3); int i; #ifdef CONFIG_PPC_BOOK3S_64 if (kvmppc_h_pr(vcpu, cmd) == EMULATE_DONE) { r = RESUME_GUEST; break; } #endif run->papr_hcall.nr = cmd; for (i = 0; i < 9; ++i) { ulong gpr = kvmppc_get_gpr(vcpu, 4 + i); run->papr_hcall.args[i] = gpr; } run->exit_reason = KVM_EXIT_PAPR_HCALL; vcpu->arch.hcall_needed = 1; r = RESUME_HOST; } else if (vcpu->arch.osi_enabled && (((u32)kvmppc_get_gpr(vcpu, 3)) == OSI_SC_MAGIC_R3) && (((u32)kvmppc_get_gpr(vcpu, 4)) == OSI_SC_MAGIC_R4)) { /* MOL hypercalls */ u64 *gprs = run->osi.gprs; int i; run->exit_reason = KVM_EXIT_OSI; for (i = 0; i < 32; i++) gprs[i] = kvmppc_get_gpr(vcpu, i); vcpu->arch.osi_needed = 1; r = RESUME_HOST_NV; } else if (!(kvmppc_get_msr(vcpu) & MSR_PR) && (((u32)kvmppc_get_gpr(vcpu, 0)) == KVM_SC_MAGIC_R0)) { /* KVM PV hypercalls */ kvmppc_set_gpr(vcpu, 3, kvmppc_kvm_pv(vcpu)); r = RESUME_GUEST; } else { /* Guest syscalls */ vcpu->stat.syscall_exits++; kvmppc_book3s_queue_irqprio(vcpu, exit_nr); r = RESUME_GUEST; } break; } case BOOK3S_INTERRUPT_FP_UNAVAIL: case BOOK3S_INTERRUPT_ALTIVEC: case BOOK3S_INTERRUPT_VSX: { int ext_msr = 0; int emul; ppc_inst_t last_inst; if (vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE) { /* Do paired single instruction emulation */ emul = kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst); if (emul == EMULATE_DONE) r = kvmppc_exit_pr_progint(vcpu, exit_nr); else r = RESUME_GUEST; break; } /* Enable external provider */ switch (exit_nr) { case BOOK3S_INTERRUPT_FP_UNAVAIL: ext_msr = MSR_FP; break; case BOOK3S_INTERRUPT_ALTIVEC: ext_msr = MSR_VEC; break; case BOOK3S_INTERRUPT_VSX: ext_msr = MSR_VSX; break; } r = kvmppc_handle_ext(vcpu, exit_nr, ext_msr); break; } case BOOK3S_INTERRUPT_ALIGNMENT: { ppc_inst_t last_inst; int emul = kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst); if (emul == EMULATE_DONE) { u32 dsisr; u64 dar; dsisr = kvmppc_alignment_dsisr(vcpu, ppc_inst_val(last_inst)); dar = kvmppc_alignment_dar(vcpu, ppc_inst_val(last_inst)); kvmppc_set_dsisr(vcpu, dsisr); kvmppc_set_dar(vcpu, dar); kvmppc_book3s_queue_irqprio(vcpu, exit_nr); } r = RESUME_GUEST; break; } #ifdef CONFIG_PPC_BOOK3S_64 case BOOK3S_INTERRUPT_FAC_UNAVAIL: r = kvmppc_handle_fac(vcpu, vcpu->arch.shadow_fscr >> 56); break; #endif case BOOK3S_INTERRUPT_MACHINE_CHECK: kvmppc_book3s_queue_irqprio(vcpu, exit_nr); r = RESUME_GUEST; break; case BOOK3S_INTERRUPT_TRACE: if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) { run->exit_reason = KVM_EXIT_DEBUG; r = RESUME_HOST; } else { kvmppc_book3s_queue_irqprio(vcpu, exit_nr); r = RESUME_GUEST; } break; default: { ulong shadow_srr1 = vcpu->arch.shadow_srr1; /* Ugh - bork here! What did we get? */ printk(KERN_EMERG "exit_nr=0x%x | pc=0x%lx | msr=0x%lx\n", exit_nr, kvmppc_get_pc(vcpu), shadow_srr1); r = RESUME_HOST; BUG(); break; } } if (!(r & RESUME_HOST)) { /* To avoid clobbering exit_reason, only check for signals if * we aren't already exiting to userspace for some other * reason. */ /* * Interrupts could be timers for the guest which we have to * inject again, so let's postpone them until we're in the guest * and if we really did time things so badly, then we just exit * again due to a host external interrupt. */ s = kvmppc_prepare_to_enter(vcpu); if (s <= 0) r = s; else { /* interrupts now hard-disabled */ kvmppc_fix_ee_before_entry(); } kvmppc_handle_lost_ext(vcpu); } trace_kvm_book3s_reenter(r, vcpu); return r; } static int kvm_arch_vcpu_ioctl_get_sregs_pr(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) { struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu); int i; sregs->pvr = vcpu->arch.pvr; sregs->u.s.sdr1 = to_book3s(vcpu)->sdr1; if (vcpu->arch.hflags & BOOK3S_HFLAG_SLB) { for (i = 0; i < 64; i++) { sregs->u.s.ppc64.slb[i].slbe = vcpu->arch.slb[i].orige | i; sregs->u.s.ppc64.slb[i].slbv = vcpu->arch.slb[i].origv; } } else { for (i = 0; i < 16; i++) sregs->u.s.ppc32.sr[i] = kvmppc_get_sr(vcpu, i); for (i = 0; i < 8; i++) { sregs->u.s.ppc32.ibat[i] = vcpu3s->ibat[i].raw; sregs->u.s.ppc32.dbat[i] = vcpu3s->dbat[i].raw; } } return 0; } static int kvm_arch_vcpu_ioctl_set_sregs_pr(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) { struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu); int i; kvmppc_set_pvr_pr(vcpu, sregs->pvr); vcpu3s->sdr1 = sregs->u.s.sdr1; #ifdef CONFIG_PPC_BOOK3S_64 if (vcpu->arch.hflags & BOOK3S_HFLAG_SLB) { /* Flush all SLB entries */ vcpu->arch.mmu.slbmte(vcpu, 0, 0); vcpu->arch.mmu.slbia(vcpu); for (i = 0; i < 64; i++) { u64 rb = sregs->u.s.ppc64.slb[i].slbe; u64 rs = sregs->u.s.ppc64.slb[i].slbv; if (rb & SLB_ESID_V) vcpu->arch.mmu.slbmte(vcpu, rs, rb); } } else #endif { for (i = 0; i < 16; i++) { vcpu->arch.mmu.mtsrin(vcpu, i, sregs->u.s.ppc32.sr[i]); } for (i = 0; i < 8; i++) { kvmppc_set_bat(vcpu, &(vcpu3s->ibat[i]), false, (u32)sregs->u.s.ppc32.ibat[i]); kvmppc_set_bat(vcpu, &(vcpu3s->ibat[i]), true, (u32)(sregs->u.s.ppc32.ibat[i] >> 32)); kvmppc_set_bat(vcpu, &(vcpu3s->dbat[i]), false, (u32)sregs->u.s.ppc32.dbat[i]); kvmppc_set_bat(vcpu, &(vcpu3s->dbat[i]), true, (u32)(sregs->u.s.ppc32.dbat[i] >> 32)); } } /* Flush the MMU after messing with the segments */ kvmppc_mmu_pte_flush(vcpu, 0, 0); return 0; } static int kvmppc_get_one_reg_pr(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *val) { int r = 0; switch (id) { case KVM_REG_PPC_DEBUG_INST: *val = get_reg_val(id, KVMPPC_INST_SW_BREAKPOINT); break; case KVM_REG_PPC_HIOR: *val = get_reg_val(id, to_book3s(vcpu)->hior); break; case KVM_REG_PPC_VTB: *val = get_reg_val(id, to_book3s(vcpu)->vtb); break; case KVM_REG_PPC_LPCR: case KVM_REG_PPC_LPCR_64: /* * We are only interested in the LPCR_ILE bit */ if (vcpu->arch.intr_msr & MSR_LE) *val = get_reg_val(id, LPCR_ILE); else *val = get_reg_val(id, 0); break; #ifdef CONFIG_PPC_TRANSACTIONAL_MEM case KVM_REG_PPC_TFHAR: *val = get_reg_val(id, vcpu->arch.tfhar); break; case KVM_REG_PPC_TFIAR: *val = get_reg_val(id, vcpu->arch.tfiar); break; case KVM_REG_PPC_TEXASR: *val = get_reg_val(id, vcpu->arch.texasr); break; case KVM_REG_PPC_TM_GPR0 ... KVM_REG_PPC_TM_GPR31: *val = get_reg_val(id, vcpu->arch.gpr_tm[id-KVM_REG_PPC_TM_GPR0]); break; case KVM_REG_PPC_TM_VSR0 ... KVM_REG_PPC_TM_VSR63: { int i, j; i = id - KVM_REG_PPC_TM_VSR0; if (i < 32) for (j = 0; j < TS_FPRWIDTH; j++) val->vsxval[j] = vcpu->arch.fp_tm.fpr[i][j]; else { if (cpu_has_feature(CPU_FTR_ALTIVEC)) val->vval = vcpu->arch.vr_tm.vr[i-32]; else r = -ENXIO; } break; } case KVM_REG_PPC_TM_CR: *val = get_reg_val(id, vcpu->arch.cr_tm); break; case KVM_REG_PPC_TM_XER: *val = get_reg_val(id, vcpu->arch.xer_tm); break; case KVM_REG_PPC_TM_LR: *val = get_reg_val(id, vcpu->arch.lr_tm); break; case KVM_REG_PPC_TM_CTR: *val = get_reg_val(id, vcpu->arch.ctr_tm); break; case KVM_REG_PPC_TM_FPSCR: *val = get_reg_val(id, vcpu->arch.fp_tm.fpscr); break; case KVM_REG_PPC_TM_AMR: *val = get_reg_val(id, vcpu->arch.amr_tm); break; case KVM_REG_PPC_TM_PPR: *val = get_reg_val(id, vcpu->arch.ppr_tm); break; case KVM_REG_PPC_TM_VRSAVE: *val = get_reg_val(id, vcpu->arch.vrsave_tm); break; case KVM_REG_PPC_TM_VSCR: if (cpu_has_feature(CPU_FTR_ALTIVEC)) *val = get_reg_val(id, vcpu->arch.vr_tm.vscr.u[3]); else r = -ENXIO; break; case KVM_REG_PPC_TM_DSCR: *val = get_reg_val(id, vcpu->arch.dscr_tm); break; case KVM_REG_PPC_TM_TAR: *val = get_reg_val(id, vcpu->arch.tar_tm); break; #endif default: r = -EINVAL; break; } return r; } static void kvmppc_set_lpcr_pr(struct kvm_vcpu *vcpu, u64 new_lpcr) { if (new_lpcr & LPCR_ILE) vcpu->arch.intr_msr |= MSR_LE; else vcpu->arch.intr_msr &= ~MSR_LE; } static int kvmppc_set_one_reg_pr(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *val) { int r = 0; switch (id) { case KVM_REG_PPC_HIOR: to_book3s(vcpu)->hior = set_reg_val(id, *val); to_book3s(vcpu)->hior_explicit = true; break; case KVM_REG_PPC_VTB: to_book3s(vcpu)->vtb = set_reg_val(id, *val); break; case KVM_REG_PPC_LPCR: case KVM_REG_PPC_LPCR_64: kvmppc_set_lpcr_pr(vcpu, set_reg_val(id, *val)); break; #ifdef CONFIG_PPC_TRANSACTIONAL_MEM case KVM_REG_PPC_TFHAR: vcpu->arch.tfhar = set_reg_val(id, *val); break; case KVM_REG_PPC_TFIAR: vcpu->arch.tfiar = set_reg_val(id, *val); break; case KVM_REG_PPC_TEXASR: vcpu->arch.texasr = set_reg_val(id, *val); break; case KVM_REG_PPC_TM_GPR0 ... KVM_REG_PPC_TM_GPR31: vcpu->arch.gpr_tm[id - KVM_REG_PPC_TM_GPR0] = set_reg_val(id, *val); break; case KVM_REG_PPC_TM_VSR0 ... KVM_REG_PPC_TM_VSR63: { int i, j; i = id - KVM_REG_PPC_TM_VSR0; if (i < 32) for (j = 0; j < TS_FPRWIDTH; j++) vcpu->arch.fp_tm.fpr[i][j] = val->vsxval[j]; else if (cpu_has_feature(CPU_FTR_ALTIVEC)) vcpu->arch.vr_tm.vr[i-32] = val->vval; else r = -ENXIO; break; } case KVM_REG_PPC_TM_CR: vcpu->arch.cr_tm = set_reg_val(id, *val); break; case KVM_REG_PPC_TM_XER: vcpu->arch.xer_tm = set_reg_val(id, *val); break; case KVM_REG_PPC_TM_LR: vcpu->arch.lr_tm = set_reg_val(id, *val); break; case KVM_REG_PPC_TM_CTR: vcpu->arch.ctr_tm = set_reg_val(id, *val); break; case KVM_REG_PPC_TM_FPSCR: vcpu->arch.fp_tm.fpscr = set_reg_val(id, *val); break; case KVM_REG_PPC_TM_AMR: vcpu->arch.amr_tm = set_reg_val(id, *val); break; case KVM_REG_PPC_TM_PPR: vcpu->arch.ppr_tm = set_reg_val(id, *val); break; case KVM_REG_PPC_TM_VRSAVE: vcpu->arch.vrsave_tm = set_reg_val(id, *val); break; case KVM_REG_PPC_TM_VSCR: if (cpu_has_feature(CPU_FTR_ALTIVEC)) vcpu->arch.vr.vscr.u[3] = set_reg_val(id, *val); else r = -ENXIO; break; case KVM_REG_PPC_TM_DSCR: vcpu->arch.dscr_tm = set_reg_val(id, *val); break; case KVM_REG_PPC_TM_TAR: vcpu->arch.tar_tm = set_reg_val(id, *val); break; #endif default: r = -EINVAL; break; } return r; } static int kvmppc_core_vcpu_create_pr(struct kvm_vcpu *vcpu) { struct kvmppc_vcpu_book3s *vcpu_book3s; unsigned long p; int err; err = -ENOMEM; vcpu_book3s = vzalloc(sizeof(struct kvmppc_vcpu_book3s)); if (!vcpu_book3s) goto out; vcpu->arch.book3s = vcpu_book3s; #ifdef CONFIG_KVM_BOOK3S_32_HANDLER vcpu->arch.shadow_vcpu = kzalloc(sizeof(*vcpu->arch.shadow_vcpu), GFP_KERNEL); if (!vcpu->arch.shadow_vcpu) goto free_vcpu3s; #endif p = __get_free_page(GFP_KERNEL|__GFP_ZERO); if (!p) goto free_shadow_vcpu; vcpu->arch.shared = (void *)p; #ifdef CONFIG_PPC_BOOK3S_64 /* Always start the shared struct in native endian mode */ #ifdef __BIG_ENDIAN__ vcpu->arch.shared_big_endian = true; #else vcpu->arch.shared_big_endian = false; #endif /* * Default to the same as the host if we're on sufficiently * recent machine that we have 1TB segments; * otherwise default to PPC970FX. */ vcpu->arch.pvr = 0x3C0301; if (mmu_has_feature(MMU_FTR_1T_SEGMENT)) vcpu->arch.pvr = mfspr(SPRN_PVR); vcpu->arch.intr_msr = MSR_SF; #else /* default to book3s_32 (750) */ vcpu->arch.pvr = 0x84202; vcpu->arch.intr_msr = 0; #endif kvmppc_set_pvr_pr(vcpu, vcpu->arch.pvr); vcpu->arch.slb_nr = 64; vcpu->arch.shadow_msr = MSR_USER64 & ~MSR_LE; err = kvmppc_mmu_init_pr(vcpu); if (err < 0) goto free_shared_page; return 0; free_shared_page: free_page((unsigned long)vcpu->arch.shared); free_shadow_vcpu: #ifdef CONFIG_KVM_BOOK3S_32_HANDLER kfree(vcpu->arch.shadow_vcpu); free_vcpu3s: #endif vfree(vcpu_book3s); out: return err; } static void kvmppc_core_vcpu_free_pr(struct kvm_vcpu *vcpu) { struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu); kvmppc_mmu_destroy_pr(vcpu); free_page((unsigned long)vcpu->arch.shared & PAGE_MASK); #ifdef CONFIG_KVM_BOOK3S_32_HANDLER kfree(vcpu->arch.shadow_vcpu); #endif vfree(vcpu_book3s); } static int kvmppc_vcpu_run_pr(struct kvm_vcpu *vcpu) { int ret; /* Check if we can run the vcpu at all */ if (!vcpu->arch.sane) { vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; ret = -EINVAL; goto out; } kvmppc_setup_debug(vcpu); /* * Interrupts could be timers for the guest which we have to inject * again, so let's postpone them until we're in the guest and if we * really did time things so badly, then we just exit again due to * a host external interrupt. */ ret = kvmppc_prepare_to_enter(vcpu); if (ret <= 0) goto out; /* interrupts now hard-disabled */ /* Save FPU, Altivec and VSX state */ giveup_all(current); /* Preload FPU if it's enabled */ if (kvmppc_get_msr(vcpu) & MSR_FP) kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP); kvmppc_fix_ee_before_entry(); ret = __kvmppc_vcpu_run(vcpu); kvmppc_clear_debug(vcpu); /* No need for guest_exit. It's done in handle_exit. We also get here with interrupts enabled. */ /* Make sure we save the guest FPU/Altivec/VSX state */ kvmppc_giveup_ext(vcpu, MSR_FP | MSR_VEC | MSR_VSX); /* Make sure we save the guest TAR/EBB/DSCR state */ kvmppc_giveup_fac(vcpu, FSCR_TAR_LG); srr_regs_clobbered(); out: vcpu->mode = OUTSIDE_GUEST_MODE; return ret; } /* * Get (and clear) the dirty memory log for a memory slot. */ static int kvm_vm_ioctl_get_dirty_log_pr(struct kvm *kvm, struct kvm_dirty_log *log) { struct kvm_memory_slot *memslot; struct kvm_vcpu *vcpu; ulong ga, ga_end; int is_dirty = 0; int r; unsigned long n; mutex_lock(&kvm->slots_lock); r = kvm_get_dirty_log(kvm, log, &is_dirty, &memslot); if (r) goto out; /* If nothing is dirty, don't bother messing with page tables. */ if (is_dirty) { ga = memslot->base_gfn << PAGE_SHIFT; ga_end = ga + (memslot->npages << PAGE_SHIFT); kvm_for_each_vcpu(n, vcpu, kvm) kvmppc_mmu_pte_pflush(vcpu, ga, ga_end); n = kvm_dirty_bitmap_bytes(memslot); memset(memslot->dirty_bitmap, 0, n); } r = 0; out: mutex_unlock(&kvm->slots_lock); return r; } static void kvmppc_core_flush_memslot_pr(struct kvm *kvm, struct kvm_memory_slot *memslot) { return; } static int kvmppc_core_prepare_memory_region_pr(struct kvm *kvm, const struct kvm_memory_slot *old, struct kvm_memory_slot *new, enum kvm_mr_change change) { return 0; } static void kvmppc_core_commit_memory_region_pr(struct kvm *kvm, struct kvm_memory_slot *old, const struct kvm_memory_slot *new, enum kvm_mr_change change) { return; } static void kvmppc_core_free_memslot_pr(struct kvm_memory_slot *slot) { return; } #ifdef CONFIG_PPC64 static int kvm_vm_ioctl_get_smmu_info_pr(struct kvm *kvm, struct kvm_ppc_smmu_info *info) { long int i; struct kvm_vcpu *vcpu; info->flags = 0; /* SLB is always 64 entries */ info->slb_size = 64; /* Standard 4k base page size segment */ info->sps[0].page_shift = 12; info->sps[0].slb_enc = 0; info->sps[0].enc[0].page_shift = 12; info->sps[0].enc[0].pte_enc = 0; /* * 64k large page size. * We only want to put this in if the CPUs we're emulating * support it, but unfortunately we don't have a vcpu easily * to hand here to test. Just pick the first vcpu, and if * that doesn't exist yet, report the minimum capability, * i.e., no 64k pages. * 1T segment support goes along with 64k pages. */ i = 1; vcpu = kvm_get_vcpu(kvm, 0); if (vcpu && (vcpu->arch.hflags & BOOK3S_HFLAG_MULTI_PGSIZE)) { info->flags = KVM_PPC_1T_SEGMENTS; info->sps[i].page_shift = 16; info->sps[i].slb_enc = SLB_VSID_L | SLB_VSID_LP_01; info->sps[i].enc[0].page_shift = 16; info->sps[i].enc[0].pte_enc = 1; ++i; } /* Standard 16M large page size segment */ info->sps[i].page_shift = 24; info->sps[i].slb_enc = SLB_VSID_L; info->sps[i].enc[0].page_shift = 24; info->sps[i].enc[0].pte_enc = 0; return 0; } static int kvm_configure_mmu_pr(struct kvm *kvm, struct kvm_ppc_mmuv3_cfg *cfg) { if (!cpu_has_feature(CPU_FTR_ARCH_300)) return -ENODEV; /* Require flags and process table base and size to all be zero. */ if (cfg->flags || cfg->process_table) return -EINVAL; return 0; } #else static int kvm_vm_ioctl_get_smmu_info_pr(struct kvm *kvm, struct kvm_ppc_smmu_info *info) { /* We should not get called */ BUG(); return 0; } #endif /* CONFIG_PPC64 */ static unsigned int kvm_global_user_count = 0; static DEFINE_SPINLOCK(kvm_global_user_count_lock); static int kvmppc_core_init_vm_pr(struct kvm *kvm) { mutex_init(&kvm->arch.hpt_mutex); #ifdef CONFIG_PPC_BOOK3S_64 /* Start out with the default set of hcalls enabled */ kvmppc_pr_init_default_hcalls(kvm); #endif if (firmware_has_feature(FW_FEATURE_SET_MODE)) { spin_lock(&kvm_global_user_count_lock); if (++kvm_global_user_count == 1) pseries_disable_reloc_on_exc(); spin_unlock(&kvm_global_user_count_lock); } return 0; } static void kvmppc_core_destroy_vm_pr(struct kvm *kvm) { #ifdef CONFIG_PPC64 WARN_ON(!list_empty(&kvm->arch.spapr_tce_tables)); #endif if (firmware_has_feature(FW_FEATURE_SET_MODE)) { spin_lock(&kvm_global_user_count_lock); BUG_ON(kvm_global_user_count == 0); if (--kvm_global_user_count == 0) pseries_enable_reloc_on_exc(); spin_unlock(&kvm_global_user_count_lock); } } static int kvmppc_core_check_processor_compat_pr(void) { /* * PR KVM can work on POWER9 inside a guest partition * running in HPT mode. It can't work if we are using * radix translation (because radix provides no way for * a process to have unique translations in quadrant 3). */ if (cpu_has_feature(CPU_FTR_ARCH_300) && radix_enabled()) return -EIO; return 0; } static int kvm_arch_vm_ioctl_pr(struct file *filp, unsigned int ioctl, unsigned long arg) { return -ENOTTY; } static struct kvmppc_ops kvm_ops_pr = { .get_sregs = kvm_arch_vcpu_ioctl_get_sregs_pr, .set_sregs = kvm_arch_vcpu_ioctl_set_sregs_pr, .get_one_reg = kvmppc_get_one_reg_pr, .set_one_reg = kvmppc_set_one_reg_pr, .vcpu_load = kvmppc_core_vcpu_load_pr, .vcpu_put = kvmppc_core_vcpu_put_pr, .inject_interrupt = kvmppc_inject_interrupt_pr, .set_msr = kvmppc_set_msr_pr, .vcpu_run = kvmppc_vcpu_run_pr, .vcpu_create = kvmppc_core_vcpu_create_pr, .vcpu_free = kvmppc_core_vcpu_free_pr, .check_requests = kvmppc_core_check_requests_pr, .get_dirty_log = kvm_vm_ioctl_get_dirty_log_pr, .flush_memslot = kvmppc_core_flush_memslot_pr, .prepare_memory_region = kvmppc_core_prepare_memory_region_pr, .commit_memory_region = kvmppc_core_commit_memory_region_pr, .unmap_gfn_range = kvm_unmap_gfn_range_pr, .age_gfn = kvm_age_gfn_pr, .test_age_gfn = kvm_test_age_gfn_pr, .set_spte_gfn = kvm_set_spte_gfn_pr, .free_memslot = kvmppc_core_free_memslot_pr, .init_vm = kvmppc_core_init_vm_pr, .destroy_vm = kvmppc_core_destroy_vm_pr, .get_smmu_info = kvm_vm_ioctl_get_smmu_info_pr, .emulate_op = kvmppc_core_emulate_op_pr, .emulate_mtspr = kvmppc_core_emulate_mtspr_pr, .emulate_mfspr = kvmppc_core_emulate_mfspr_pr, .fast_vcpu_kick = kvm_vcpu_kick, .arch_vm_ioctl = kvm_arch_vm_ioctl_pr, #ifdef CONFIG_PPC_BOOK3S_64 .hcall_implemented = kvmppc_hcall_impl_pr, .configure_mmu = kvm_configure_mmu_pr, #endif .giveup_ext = kvmppc_giveup_ext, }; int kvmppc_book3s_init_pr(void) { int r; r = kvmppc_core_check_processor_compat_pr(); if (r < 0) return r; kvm_ops_pr.owner = THIS_MODULE; kvmppc_pr_ops = &kvm_ops_pr; r = kvmppc_mmu_hpte_sysinit(); return r; } void kvmppc_book3s_exit_pr(void) { kvmppc_pr_ops = NULL; kvmppc_mmu_hpte_sysexit(); } /* * We only support separate modules for book3s 64 */ #ifdef CONFIG_PPC_BOOK3S_64 module_init(kvmppc_book3s_init_pr); module_exit(kvmppc_book3s_exit_pr); MODULE_LICENSE("GPL"); MODULE_ALIAS_MISCDEV(KVM_MINOR); MODULE_ALIAS("devname:kvm"); #endif
linux-master
arch/powerpc/kvm/book3s_pr.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2009. SUSE Linux Products GmbH. All rights reserved. * * Authors: * Alexander Graf <[email protected]> * Kevin Wolf <[email protected]> * * Description: * This file is derived from arch/powerpc/kvm/44x.c, * by Hollis Blanchard <[email protected]>. */ #include <linux/kvm_host.h> #include <linux/err.h> #include <linux/export.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/miscdevice.h> #include <linux/gfp.h> #include <linux/sched.h> #include <linux/vmalloc.h> #include <linux/highmem.h> #include <asm/reg.h> #include <asm/cputable.h> #include <asm/cacheflush.h> #include <linux/uaccess.h> #include <asm/io.h> #include <asm/kvm_ppc.h> #include <asm/kvm_book3s.h> #include <asm/mmu_context.h> #include <asm/page.h> #include <asm/xive.h> #include "book3s.h" #include "trace.h" /* #define EXIT_DEBUG */ const struct _kvm_stats_desc kvm_vm_stats_desc[] = { KVM_GENERIC_VM_STATS(), STATS_DESC_ICOUNTER(VM, num_2M_pages), STATS_DESC_ICOUNTER(VM, num_1G_pages) }; const struct kvm_stats_header kvm_vm_stats_header = { .name_size = KVM_STATS_NAME_SIZE, .num_desc = ARRAY_SIZE(kvm_vm_stats_desc), .id_offset = sizeof(struct kvm_stats_header), .desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE, .data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE + sizeof(kvm_vm_stats_desc), }; const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = { KVM_GENERIC_VCPU_STATS(), STATS_DESC_COUNTER(VCPU, sum_exits), STATS_DESC_COUNTER(VCPU, mmio_exits), STATS_DESC_COUNTER(VCPU, signal_exits), STATS_DESC_COUNTER(VCPU, light_exits), STATS_DESC_COUNTER(VCPU, itlb_real_miss_exits), STATS_DESC_COUNTER(VCPU, itlb_virt_miss_exits), STATS_DESC_COUNTER(VCPU, dtlb_real_miss_exits), STATS_DESC_COUNTER(VCPU, dtlb_virt_miss_exits), STATS_DESC_COUNTER(VCPU, syscall_exits), STATS_DESC_COUNTER(VCPU, isi_exits), STATS_DESC_COUNTER(VCPU, dsi_exits), STATS_DESC_COUNTER(VCPU, emulated_inst_exits), STATS_DESC_COUNTER(VCPU, dec_exits), STATS_DESC_COUNTER(VCPU, ext_intr_exits), STATS_DESC_COUNTER(VCPU, halt_successful_wait), STATS_DESC_COUNTER(VCPU, dbell_exits), STATS_DESC_COUNTER(VCPU, gdbell_exits), STATS_DESC_COUNTER(VCPU, ld), STATS_DESC_COUNTER(VCPU, st), STATS_DESC_COUNTER(VCPU, pf_storage), STATS_DESC_COUNTER(VCPU, pf_instruc), STATS_DESC_COUNTER(VCPU, sp_storage), STATS_DESC_COUNTER(VCPU, sp_instruc), STATS_DESC_COUNTER(VCPU, queue_intr), STATS_DESC_COUNTER(VCPU, ld_slow), STATS_DESC_COUNTER(VCPU, st_slow), STATS_DESC_COUNTER(VCPU, pthru_all), STATS_DESC_COUNTER(VCPU, pthru_host), STATS_DESC_COUNTER(VCPU, pthru_bad_aff) }; const struct kvm_stats_header kvm_vcpu_stats_header = { .name_size = KVM_STATS_NAME_SIZE, .num_desc = ARRAY_SIZE(kvm_vcpu_stats_desc), .id_offset = sizeof(struct kvm_stats_header), .desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE, .data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE + sizeof(kvm_vcpu_stats_desc), }; static inline void kvmppc_update_int_pending(struct kvm_vcpu *vcpu, unsigned long pending_now, unsigned long old_pending) { if (is_kvmppc_hv_enabled(vcpu->kvm)) return; if (pending_now) kvmppc_set_int_pending(vcpu, 1); else if (old_pending) kvmppc_set_int_pending(vcpu, 0); } static inline bool kvmppc_critical_section(struct kvm_vcpu *vcpu) { ulong crit_raw; ulong crit_r1; bool crit; if (is_kvmppc_hv_enabled(vcpu->kvm)) return false; crit_raw = kvmppc_get_critical(vcpu); crit_r1 = kvmppc_get_gpr(vcpu, 1); /* Truncate crit indicators in 32 bit mode */ if (!(kvmppc_get_msr(vcpu) & MSR_SF)) { crit_raw &= 0xffffffff; crit_r1 &= 0xffffffff; } /* Critical section when crit == r1 */ crit = (crit_raw == crit_r1); /* ... and we're in supervisor mode */ crit = crit && !(kvmppc_get_msr(vcpu) & MSR_PR); return crit; } void kvmppc_inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 flags) { vcpu->kvm->arch.kvm_ops->inject_interrupt(vcpu, vec, flags); } static int kvmppc_book3s_vec2irqprio(unsigned int vec) { unsigned int prio; switch (vec) { case 0x100: prio = BOOK3S_IRQPRIO_SYSTEM_RESET; break; case 0x200: prio = BOOK3S_IRQPRIO_MACHINE_CHECK; break; case 0x300: prio = BOOK3S_IRQPRIO_DATA_STORAGE; break; case 0x380: prio = BOOK3S_IRQPRIO_DATA_SEGMENT; break; case 0x400: prio = BOOK3S_IRQPRIO_INST_STORAGE; break; case 0x480: prio = BOOK3S_IRQPRIO_INST_SEGMENT; break; case 0x500: prio = BOOK3S_IRQPRIO_EXTERNAL; break; case 0x600: prio = BOOK3S_IRQPRIO_ALIGNMENT; break; case 0x700: prio = BOOK3S_IRQPRIO_PROGRAM; break; case 0x800: prio = BOOK3S_IRQPRIO_FP_UNAVAIL; break; case 0x900: prio = BOOK3S_IRQPRIO_DECREMENTER; break; case 0xc00: prio = BOOK3S_IRQPRIO_SYSCALL; break; case 0xd00: prio = BOOK3S_IRQPRIO_DEBUG; break; case 0xf20: prio = BOOK3S_IRQPRIO_ALTIVEC; break; case 0xf40: prio = BOOK3S_IRQPRIO_VSX; break; case 0xf60: prio = BOOK3S_IRQPRIO_FAC_UNAVAIL; break; default: prio = BOOK3S_IRQPRIO_MAX; break; } return prio; } void kvmppc_book3s_dequeue_irqprio(struct kvm_vcpu *vcpu, unsigned int vec) { unsigned long old_pending = vcpu->arch.pending_exceptions; clear_bit(kvmppc_book3s_vec2irqprio(vec), &vcpu->arch.pending_exceptions); kvmppc_update_int_pending(vcpu, vcpu->arch.pending_exceptions, old_pending); } void kvmppc_book3s_queue_irqprio(struct kvm_vcpu *vcpu, unsigned int vec) { vcpu->stat.queue_intr++; set_bit(kvmppc_book3s_vec2irqprio(vec), &vcpu->arch.pending_exceptions); #ifdef EXIT_DEBUG printk(KERN_INFO "Queueing interrupt %x\n", vec); #endif } EXPORT_SYMBOL_GPL(kvmppc_book3s_queue_irqprio); void kvmppc_core_queue_machine_check(struct kvm_vcpu *vcpu, ulong srr1_flags) { /* might as well deliver this straight away */ kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_MACHINE_CHECK, srr1_flags); } EXPORT_SYMBOL_GPL(kvmppc_core_queue_machine_check); void kvmppc_core_queue_syscall(struct kvm_vcpu *vcpu) { kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_SYSCALL, 0); } EXPORT_SYMBOL(kvmppc_core_queue_syscall); void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong srr1_flags) { /* might as well deliver this straight away */ kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_PROGRAM, srr1_flags); } EXPORT_SYMBOL_GPL(kvmppc_core_queue_program); void kvmppc_core_queue_fpunavail(struct kvm_vcpu *vcpu, ulong srr1_flags) { /* might as well deliver this straight away */ kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, srr1_flags); } void kvmppc_core_queue_vec_unavail(struct kvm_vcpu *vcpu, ulong srr1_flags) { /* might as well deliver this straight away */ kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_ALTIVEC, srr1_flags); } void kvmppc_core_queue_vsx_unavail(struct kvm_vcpu *vcpu, ulong srr1_flags) { /* might as well deliver this straight away */ kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_VSX, srr1_flags); } void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu) { kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_DECREMENTER); } EXPORT_SYMBOL_GPL(kvmppc_core_queue_dec); int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu) { return test_bit(BOOK3S_IRQPRIO_DECREMENTER, &vcpu->arch.pending_exceptions); } EXPORT_SYMBOL_GPL(kvmppc_core_pending_dec); void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu) { kvmppc_book3s_dequeue_irqprio(vcpu, BOOK3S_INTERRUPT_DECREMENTER); } EXPORT_SYMBOL_GPL(kvmppc_core_dequeue_dec); void kvmppc_core_queue_external(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq) { /* * This case (KVM_INTERRUPT_SET) should never actually arise for * a pseries guest (because pseries guests expect their interrupt * controllers to continue asserting an external interrupt request * until it is acknowledged at the interrupt controller), but is * included to avoid ABI breakage and potentially for other * sorts of guest. * * There is a subtlety here: HV KVM does not test the * external_oneshot flag in the code that synthesizes * external interrupts for the guest just before entering * the guest. That is OK even if userspace did do a * KVM_INTERRUPT_SET on a pseries guest vcpu, because the * caller (kvm_vcpu_ioctl_interrupt) does a kvm_vcpu_kick() * which ends up doing a smp_send_reschedule(), which will * pull the guest all the way out to the host, meaning that * we will call kvmppc_core_prepare_to_enter() before entering * the guest again, and that will handle the external_oneshot * flag correctly. */ if (irq->irq == KVM_INTERRUPT_SET) vcpu->arch.external_oneshot = 1; kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_EXTERNAL); } void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu) { kvmppc_book3s_dequeue_irqprio(vcpu, BOOK3S_INTERRUPT_EXTERNAL); } void kvmppc_core_queue_data_storage(struct kvm_vcpu *vcpu, ulong srr1_flags, ulong dar, ulong dsisr) { kvmppc_set_dar(vcpu, dar); kvmppc_set_dsisr(vcpu, dsisr); kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_DATA_STORAGE, srr1_flags); } EXPORT_SYMBOL_GPL(kvmppc_core_queue_data_storage); void kvmppc_core_queue_inst_storage(struct kvm_vcpu *vcpu, ulong srr1_flags) { kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_INST_STORAGE, srr1_flags); } EXPORT_SYMBOL_GPL(kvmppc_core_queue_inst_storage); static int kvmppc_book3s_irqprio_deliver(struct kvm_vcpu *vcpu, unsigned int priority) { int deliver = 1; int vec = 0; bool crit = kvmppc_critical_section(vcpu); switch (priority) { case BOOK3S_IRQPRIO_DECREMENTER: deliver = (kvmppc_get_msr(vcpu) & MSR_EE) && !crit; vec = BOOK3S_INTERRUPT_DECREMENTER; break; case BOOK3S_IRQPRIO_EXTERNAL: deliver = (kvmppc_get_msr(vcpu) & MSR_EE) && !crit; vec = BOOK3S_INTERRUPT_EXTERNAL; break; case BOOK3S_IRQPRIO_SYSTEM_RESET: vec = BOOK3S_INTERRUPT_SYSTEM_RESET; break; case BOOK3S_IRQPRIO_MACHINE_CHECK: vec = BOOK3S_INTERRUPT_MACHINE_CHECK; break; case BOOK3S_IRQPRIO_DATA_STORAGE: vec = BOOK3S_INTERRUPT_DATA_STORAGE; break; case BOOK3S_IRQPRIO_INST_STORAGE: vec = BOOK3S_INTERRUPT_INST_STORAGE; break; case BOOK3S_IRQPRIO_DATA_SEGMENT: vec = BOOK3S_INTERRUPT_DATA_SEGMENT; break; case BOOK3S_IRQPRIO_INST_SEGMENT: vec = BOOK3S_INTERRUPT_INST_SEGMENT; break; case BOOK3S_IRQPRIO_ALIGNMENT: vec = BOOK3S_INTERRUPT_ALIGNMENT; break; case BOOK3S_IRQPRIO_PROGRAM: vec = BOOK3S_INTERRUPT_PROGRAM; break; case BOOK3S_IRQPRIO_VSX: vec = BOOK3S_INTERRUPT_VSX; break; case BOOK3S_IRQPRIO_ALTIVEC: vec = BOOK3S_INTERRUPT_ALTIVEC; break; case BOOK3S_IRQPRIO_FP_UNAVAIL: vec = BOOK3S_INTERRUPT_FP_UNAVAIL; break; case BOOK3S_IRQPRIO_SYSCALL: vec = BOOK3S_INTERRUPT_SYSCALL; break; case BOOK3S_IRQPRIO_DEBUG: vec = BOOK3S_INTERRUPT_TRACE; break; case BOOK3S_IRQPRIO_PERFORMANCE_MONITOR: vec = BOOK3S_INTERRUPT_PERFMON; break; case BOOK3S_IRQPRIO_FAC_UNAVAIL: vec = BOOK3S_INTERRUPT_FAC_UNAVAIL; break; default: deliver = 0; printk(KERN_ERR "KVM: Unknown interrupt: 0x%x\n", priority); break; } #if 0 printk(KERN_INFO "Deliver interrupt 0x%x? %x\n", vec, deliver); #endif if (deliver) kvmppc_inject_interrupt(vcpu, vec, 0); return deliver; } /* * This function determines if an irqprio should be cleared once issued. */ static bool clear_irqprio(struct kvm_vcpu *vcpu, unsigned int priority) { switch (priority) { case BOOK3S_IRQPRIO_DECREMENTER: /* DEC interrupts get cleared by mtdec */ return false; case BOOK3S_IRQPRIO_EXTERNAL: /* * External interrupts get cleared by userspace * except when set by the KVM_INTERRUPT ioctl with * KVM_INTERRUPT_SET (not KVM_INTERRUPT_SET_LEVEL). */ if (vcpu->arch.external_oneshot) { vcpu->arch.external_oneshot = 0; return true; } return false; } return true; } int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu) { unsigned long *pending = &vcpu->arch.pending_exceptions; unsigned long old_pending = vcpu->arch.pending_exceptions; unsigned int priority; #ifdef EXIT_DEBUG if (vcpu->arch.pending_exceptions) printk(KERN_EMERG "KVM: Check pending: %lx\n", vcpu->arch.pending_exceptions); #endif priority = __ffs(*pending); while (priority < BOOK3S_IRQPRIO_MAX) { if (kvmppc_book3s_irqprio_deliver(vcpu, priority) && clear_irqprio(vcpu, priority)) { clear_bit(priority, &vcpu->arch.pending_exceptions); break; } priority = find_next_bit(pending, BITS_PER_BYTE * sizeof(*pending), priority + 1); } /* Tell the guest about our interrupt status */ kvmppc_update_int_pending(vcpu, *pending, old_pending); return 0; } EXPORT_SYMBOL_GPL(kvmppc_core_prepare_to_enter); kvm_pfn_t kvmppc_gpa_to_pfn(struct kvm_vcpu *vcpu, gpa_t gpa, bool writing, bool *writable) { ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM; gfn_t gfn = gpa >> PAGE_SHIFT; if (!(kvmppc_get_msr(vcpu) & MSR_SF)) mp_pa = (uint32_t)mp_pa; /* Magic page override */ gpa &= ~0xFFFULL; if (unlikely(mp_pa) && unlikely((gpa & KVM_PAM) == mp_pa)) { ulong shared_page = ((ulong)vcpu->arch.shared) & PAGE_MASK; kvm_pfn_t pfn; pfn = (kvm_pfn_t)virt_to_phys((void*)shared_page) >> PAGE_SHIFT; get_page(pfn_to_page(pfn)); if (writable) *writable = true; return pfn; } return gfn_to_pfn_prot(vcpu->kvm, gfn, writing, writable); } EXPORT_SYMBOL_GPL(kvmppc_gpa_to_pfn); int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr, enum xlate_instdata xlid, enum xlate_readwrite xlrw, struct kvmppc_pte *pte) { bool data = (xlid == XLATE_DATA); bool iswrite = (xlrw == XLATE_WRITE); int relocated = (kvmppc_get_msr(vcpu) & (data ? MSR_DR : MSR_IR)); int r; if (relocated) { r = vcpu->arch.mmu.xlate(vcpu, eaddr, pte, data, iswrite); } else { pte->eaddr = eaddr; pte->raddr = eaddr & KVM_PAM; pte->vpage = VSID_REAL | eaddr >> 12; pte->may_read = true; pte->may_write = true; pte->may_execute = true; r = 0; if ((kvmppc_get_msr(vcpu) & (MSR_IR | MSR_DR)) == MSR_DR && !data) { if ((vcpu->arch.hflags & BOOK3S_HFLAG_SPLIT_HACK) && ((eaddr & SPLIT_HACK_MASK) == SPLIT_HACK_OFFS)) pte->raddr &= ~SPLIT_HACK_MASK; } } return r; } /* * Returns prefixed instructions with the prefix in the high 32 bits * of *inst and suffix in the low 32 bits. This is the same convention * as used in HEIR, vcpu->arch.last_inst and vcpu->arch.emul_inst. * Like vcpu->arch.last_inst but unlike vcpu->arch.emul_inst, each * half of the value needs byte-swapping if the guest endianness is * different from the host endianness. */ int kvmppc_load_last_inst(struct kvm_vcpu *vcpu, enum instruction_fetch_type type, unsigned long *inst) { ulong pc = kvmppc_get_pc(vcpu); int r; u32 iw; if (type == INST_SC) pc -= 4; r = kvmppc_ld(vcpu, &pc, sizeof(u32), &iw, false); if (r != EMULATE_DONE) return EMULATE_AGAIN; /* * If [H]SRR1 indicates that the instruction that caused the * current interrupt is a prefixed instruction, get the suffix. */ if (kvmppc_get_msr(vcpu) & SRR1_PREFIXED) { u32 suffix; pc += 4; r = kvmppc_ld(vcpu, &pc, sizeof(u32), &suffix, false); if (r != EMULATE_DONE) return EMULATE_AGAIN; *inst = ((u64)iw << 32) | suffix; } else { *inst = iw; } return r; } EXPORT_SYMBOL_GPL(kvmppc_load_last_inst); int kvmppc_subarch_vcpu_init(struct kvm_vcpu *vcpu) { return 0; } void kvmppc_subarch_vcpu_uninit(struct kvm_vcpu *vcpu) { } int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) { int ret; vcpu_load(vcpu); ret = vcpu->kvm->arch.kvm_ops->get_sregs(vcpu, sregs); vcpu_put(vcpu); return ret; } int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) { int ret; vcpu_load(vcpu); ret = vcpu->kvm->arch.kvm_ops->set_sregs(vcpu, sregs); vcpu_put(vcpu); return ret; } int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) { int i; regs->pc = kvmppc_get_pc(vcpu); regs->cr = kvmppc_get_cr(vcpu); regs->ctr = kvmppc_get_ctr(vcpu); regs->lr = kvmppc_get_lr(vcpu); regs->xer = kvmppc_get_xer(vcpu); regs->msr = kvmppc_get_msr(vcpu); regs->srr0 = kvmppc_get_srr0(vcpu); regs->srr1 = kvmppc_get_srr1(vcpu); regs->pid = vcpu->arch.pid; regs->sprg0 = kvmppc_get_sprg0(vcpu); regs->sprg1 = kvmppc_get_sprg1(vcpu); regs->sprg2 = kvmppc_get_sprg2(vcpu); regs->sprg3 = kvmppc_get_sprg3(vcpu); regs->sprg4 = kvmppc_get_sprg4(vcpu); regs->sprg5 = kvmppc_get_sprg5(vcpu); regs->sprg6 = kvmppc_get_sprg6(vcpu); regs->sprg7 = kvmppc_get_sprg7(vcpu); for (i = 0; i < ARRAY_SIZE(regs->gpr); i++) regs->gpr[i] = kvmppc_get_gpr(vcpu, i); return 0; } int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) { int i; kvmppc_set_pc(vcpu, regs->pc); kvmppc_set_cr(vcpu, regs->cr); kvmppc_set_ctr(vcpu, regs->ctr); kvmppc_set_lr(vcpu, regs->lr); kvmppc_set_xer(vcpu, regs->xer); kvmppc_set_msr(vcpu, regs->msr); kvmppc_set_srr0(vcpu, regs->srr0); kvmppc_set_srr1(vcpu, regs->srr1); kvmppc_set_sprg0(vcpu, regs->sprg0); kvmppc_set_sprg1(vcpu, regs->sprg1); kvmppc_set_sprg2(vcpu, regs->sprg2); kvmppc_set_sprg3(vcpu, regs->sprg3); kvmppc_set_sprg4(vcpu, regs->sprg4); kvmppc_set_sprg5(vcpu, regs->sprg5); kvmppc_set_sprg6(vcpu, regs->sprg6); kvmppc_set_sprg7(vcpu, regs->sprg7); for (i = 0; i < ARRAY_SIZE(regs->gpr); i++) kvmppc_set_gpr(vcpu, i, regs->gpr[i]); return 0; } int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) { return -EOPNOTSUPP; } int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) { return -EOPNOTSUPP; } int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *val) { int r = 0; long int i; r = vcpu->kvm->arch.kvm_ops->get_one_reg(vcpu, id, val); if (r == -EINVAL) { r = 0; switch (id) { case KVM_REG_PPC_DAR: *val = get_reg_val(id, kvmppc_get_dar(vcpu)); break; case KVM_REG_PPC_DSISR: *val = get_reg_val(id, kvmppc_get_dsisr(vcpu)); break; case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31: i = id - KVM_REG_PPC_FPR0; *val = get_reg_val(id, VCPU_FPR(vcpu, i)); break; case KVM_REG_PPC_FPSCR: *val = get_reg_val(id, vcpu->arch.fp.fpscr); break; #ifdef CONFIG_VSX case KVM_REG_PPC_VSR0 ... KVM_REG_PPC_VSR31: if (cpu_has_feature(CPU_FTR_VSX)) { i = id - KVM_REG_PPC_VSR0; val->vsxval[0] = vcpu->arch.fp.fpr[i][0]; val->vsxval[1] = vcpu->arch.fp.fpr[i][1]; } else { r = -ENXIO; } break; #endif /* CONFIG_VSX */ case KVM_REG_PPC_DEBUG_INST: *val = get_reg_val(id, INS_TW); break; #ifdef CONFIG_KVM_XICS case KVM_REG_PPC_ICP_STATE: if (!vcpu->arch.icp && !vcpu->arch.xive_vcpu) { r = -ENXIO; break; } if (xics_on_xive()) *val = get_reg_val(id, kvmppc_xive_get_icp(vcpu)); else *val = get_reg_val(id, kvmppc_xics_get_icp(vcpu)); break; #endif /* CONFIG_KVM_XICS */ #ifdef CONFIG_KVM_XIVE case KVM_REG_PPC_VP_STATE: if (!vcpu->arch.xive_vcpu) { r = -ENXIO; break; } if (xive_enabled()) r = kvmppc_xive_native_get_vp(vcpu, val); else r = -ENXIO; break; #endif /* CONFIG_KVM_XIVE */ case KVM_REG_PPC_FSCR: *val = get_reg_val(id, vcpu->arch.fscr); break; case KVM_REG_PPC_TAR: *val = get_reg_val(id, vcpu->arch.tar); break; case KVM_REG_PPC_EBBHR: *val = get_reg_val(id, vcpu->arch.ebbhr); break; case KVM_REG_PPC_EBBRR: *val = get_reg_val(id, vcpu->arch.ebbrr); break; case KVM_REG_PPC_BESCR: *val = get_reg_val(id, vcpu->arch.bescr); break; case KVM_REG_PPC_IC: *val = get_reg_val(id, vcpu->arch.ic); break; default: r = -EINVAL; break; } } return r; } int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *val) { int r = 0; long int i; r = vcpu->kvm->arch.kvm_ops->set_one_reg(vcpu, id, val); if (r == -EINVAL) { r = 0; switch (id) { case KVM_REG_PPC_DAR: kvmppc_set_dar(vcpu, set_reg_val(id, *val)); break; case KVM_REG_PPC_DSISR: kvmppc_set_dsisr(vcpu, set_reg_val(id, *val)); break; case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31: i = id - KVM_REG_PPC_FPR0; VCPU_FPR(vcpu, i) = set_reg_val(id, *val); break; case KVM_REG_PPC_FPSCR: vcpu->arch.fp.fpscr = set_reg_val(id, *val); break; #ifdef CONFIG_VSX case KVM_REG_PPC_VSR0 ... KVM_REG_PPC_VSR31: if (cpu_has_feature(CPU_FTR_VSX)) { i = id - KVM_REG_PPC_VSR0; vcpu->arch.fp.fpr[i][0] = val->vsxval[0]; vcpu->arch.fp.fpr[i][1] = val->vsxval[1]; } else { r = -ENXIO; } break; #endif /* CONFIG_VSX */ #ifdef CONFIG_KVM_XICS case KVM_REG_PPC_ICP_STATE: if (!vcpu->arch.icp && !vcpu->arch.xive_vcpu) { r = -ENXIO; break; } if (xics_on_xive()) r = kvmppc_xive_set_icp(vcpu, set_reg_val(id, *val)); else r = kvmppc_xics_set_icp(vcpu, set_reg_val(id, *val)); break; #endif /* CONFIG_KVM_XICS */ #ifdef CONFIG_KVM_XIVE case KVM_REG_PPC_VP_STATE: if (!vcpu->arch.xive_vcpu) { r = -ENXIO; break; } if (xive_enabled()) r = kvmppc_xive_native_set_vp(vcpu, val); else r = -ENXIO; break; #endif /* CONFIG_KVM_XIVE */ case KVM_REG_PPC_FSCR: vcpu->arch.fscr = set_reg_val(id, *val); break; case KVM_REG_PPC_TAR: vcpu->arch.tar = set_reg_val(id, *val); break; case KVM_REG_PPC_EBBHR: vcpu->arch.ebbhr = set_reg_val(id, *val); break; case KVM_REG_PPC_EBBRR: vcpu->arch.ebbrr = set_reg_val(id, *val); break; case KVM_REG_PPC_BESCR: vcpu->arch.bescr = set_reg_val(id, *val); break; case KVM_REG_PPC_IC: vcpu->arch.ic = set_reg_val(id, *val); break; default: r = -EINVAL; break; } } return r; } void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu) { vcpu->kvm->arch.kvm_ops->vcpu_load(vcpu, cpu); } void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu) { vcpu->kvm->arch.kvm_ops->vcpu_put(vcpu); } void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr) { vcpu->kvm->arch.kvm_ops->set_msr(vcpu, msr); } EXPORT_SYMBOL_GPL(kvmppc_set_msr); int kvmppc_vcpu_run(struct kvm_vcpu *vcpu) { return vcpu->kvm->arch.kvm_ops->vcpu_run(vcpu); } int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, struct kvm_translation *tr) { return 0; } int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, struct kvm_guest_debug *dbg) { vcpu_load(vcpu); vcpu->guest_debug = dbg->control; vcpu_put(vcpu); return 0; } void kvmppc_decrementer_func(struct kvm_vcpu *vcpu) { kvmppc_core_queue_dec(vcpu); kvm_vcpu_kick(vcpu); } int kvmppc_core_vcpu_create(struct kvm_vcpu *vcpu) { return vcpu->kvm->arch.kvm_ops->vcpu_create(vcpu); } void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu) { vcpu->kvm->arch.kvm_ops->vcpu_free(vcpu); } int kvmppc_core_check_requests(struct kvm_vcpu *vcpu) { return vcpu->kvm->arch.kvm_ops->check_requests(vcpu); } void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot) { } int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log) { return kvm->arch.kvm_ops->get_dirty_log(kvm, log); } void kvmppc_core_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot) { kvm->arch.kvm_ops->free_memslot(slot); } void kvmppc_core_flush_memslot(struct kvm *kvm, struct kvm_memory_slot *memslot) { kvm->arch.kvm_ops->flush_memslot(kvm, memslot); } int kvmppc_core_prepare_memory_region(struct kvm *kvm, const struct kvm_memory_slot *old, struct kvm_memory_slot *new, enum kvm_mr_change change) { return kvm->arch.kvm_ops->prepare_memory_region(kvm, old, new, change); } void kvmppc_core_commit_memory_region(struct kvm *kvm, struct kvm_memory_slot *old, const struct kvm_memory_slot *new, enum kvm_mr_change change) { kvm->arch.kvm_ops->commit_memory_region(kvm, old, new, change); } bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range) { return kvm->arch.kvm_ops->unmap_gfn_range(kvm, range); } bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range) { return kvm->arch.kvm_ops->age_gfn(kvm, range); } bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range) { return kvm->arch.kvm_ops->test_age_gfn(kvm, range); } bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range) { return kvm->arch.kvm_ops->set_spte_gfn(kvm, range); } int kvmppc_core_init_vm(struct kvm *kvm) { #ifdef CONFIG_PPC64 INIT_LIST_HEAD_RCU(&kvm->arch.spapr_tce_tables); INIT_LIST_HEAD(&kvm->arch.rtas_tokens); mutex_init(&kvm->arch.rtas_token_lock); #endif return kvm->arch.kvm_ops->init_vm(kvm); } void kvmppc_core_destroy_vm(struct kvm *kvm) { kvm->arch.kvm_ops->destroy_vm(kvm); #ifdef CONFIG_PPC64 kvmppc_rtas_tokens_free(kvm); WARN_ON(!list_empty(&kvm->arch.spapr_tce_tables)); #endif #ifdef CONFIG_KVM_XICS /* * Free the XIVE and XICS devices which are not directly freed by the * device 'release' method */ kfree(kvm->arch.xive_devices.native); kvm->arch.xive_devices.native = NULL; kfree(kvm->arch.xive_devices.xics_on_xive); kvm->arch.xive_devices.xics_on_xive = NULL; kfree(kvm->arch.xics_device); kvm->arch.xics_device = NULL; #endif /* CONFIG_KVM_XICS */ } int kvmppc_h_logical_ci_load(struct kvm_vcpu *vcpu) { unsigned long size = kvmppc_get_gpr(vcpu, 4); unsigned long addr = kvmppc_get_gpr(vcpu, 5); u64 buf; int srcu_idx; int ret; if (!is_power_of_2(size) || (size > sizeof(buf))) return H_TOO_HARD; srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); ret = kvm_io_bus_read(vcpu, KVM_MMIO_BUS, addr, size, &buf); srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx); if (ret != 0) return H_TOO_HARD; switch (size) { case 1: kvmppc_set_gpr(vcpu, 4, *(u8 *)&buf); break; case 2: kvmppc_set_gpr(vcpu, 4, be16_to_cpu(*(__be16 *)&buf)); break; case 4: kvmppc_set_gpr(vcpu, 4, be32_to_cpu(*(__be32 *)&buf)); break; case 8: kvmppc_set_gpr(vcpu, 4, be64_to_cpu(*(__be64 *)&buf)); break; default: BUG(); } return H_SUCCESS; } EXPORT_SYMBOL_GPL(kvmppc_h_logical_ci_load); int kvmppc_h_logical_ci_store(struct kvm_vcpu *vcpu) { unsigned long size = kvmppc_get_gpr(vcpu, 4); unsigned long addr = kvmppc_get_gpr(vcpu, 5); unsigned long val = kvmppc_get_gpr(vcpu, 6); u64 buf; int srcu_idx; int ret; switch (size) { case 1: *(u8 *)&buf = val; break; case 2: *(__be16 *)&buf = cpu_to_be16(val); break; case 4: *(__be32 *)&buf = cpu_to_be32(val); break; case 8: *(__be64 *)&buf = cpu_to_be64(val); break; default: return H_TOO_HARD; } srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); ret = kvm_io_bus_write(vcpu, KVM_MMIO_BUS, addr, size, &buf); srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx); if (ret != 0) return H_TOO_HARD; return H_SUCCESS; } EXPORT_SYMBOL_GPL(kvmppc_h_logical_ci_store); int kvmppc_book3s_hcall_implemented(struct kvm *kvm, unsigned long hcall) { return kvm->arch.kvm_ops->hcall_implemented(hcall); } #ifdef CONFIG_KVM_XICS int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level, bool line_status) { if (xics_on_xive()) return kvmppc_xive_set_irq(kvm, irq_source_id, irq, level, line_status); else return kvmppc_xics_set_irq(kvm, irq_source_id, irq, level, line_status); } int kvm_arch_set_irq_inatomic(struct kvm_kernel_irq_routing_entry *irq_entry, struct kvm *kvm, int irq_source_id, int level, bool line_status) { return kvm_set_irq(kvm, irq_source_id, irq_entry->gsi, level, line_status); } static int kvmppc_book3s_set_irq(struct kvm_kernel_irq_routing_entry *e, struct kvm *kvm, int irq_source_id, int level, bool line_status) { return kvm_set_irq(kvm, irq_source_id, e->gsi, level, line_status); } int kvm_irq_map_gsi(struct kvm *kvm, struct kvm_kernel_irq_routing_entry *entries, int gsi) { entries->gsi = gsi; entries->type = KVM_IRQ_ROUTING_IRQCHIP; entries->set = kvmppc_book3s_set_irq; entries->irqchip.irqchip = 0; entries->irqchip.pin = gsi; return 1; } int kvm_irq_map_chip_pin(struct kvm *kvm, unsigned irqchip, unsigned pin) { return pin; } #endif /* CONFIG_KVM_XICS */ static int kvmppc_book3s_init(void) { int r; r = kvm_init(sizeof(struct kvm_vcpu), 0, THIS_MODULE); if (r) return r; #ifdef CONFIG_KVM_BOOK3S_32_HANDLER r = kvmppc_book3s_init_pr(); #endif #ifdef CONFIG_KVM_XICS #ifdef CONFIG_KVM_XIVE if (xics_on_xive()) { kvm_register_device_ops(&kvm_xive_ops, KVM_DEV_TYPE_XICS); if (kvmppc_xive_native_supported()) kvm_register_device_ops(&kvm_xive_native_ops, KVM_DEV_TYPE_XIVE); } else #endif kvm_register_device_ops(&kvm_xics_ops, KVM_DEV_TYPE_XICS); #endif return r; } static void kvmppc_book3s_exit(void) { #ifdef CONFIG_KVM_BOOK3S_32_HANDLER kvmppc_book3s_exit_pr(); #endif kvm_exit(); } module_init(kvmppc_book3s_init); module_exit(kvmppc_book3s_exit); /* On 32bit this is our one and only kernel module */ #ifdef CONFIG_KVM_BOOK3S_32_HANDLER MODULE_ALIAS_MISCDEV(KVM_MINOR); MODULE_ALIAS("devname:kvm"); #endif
linux-master
arch/powerpc/kvm/book3s.c
// SPDX-License-Identifier: GPL-2.0-only /* * * Copyright IBM Corp. 2007 * Copyright 2011 Freescale Semiconductor, Inc. * * Authors: Hollis Blanchard <[email protected]> */ #include <linux/jiffies.h> #include <linux/hrtimer.h> #include <linux/types.h> #include <linux/string.h> #include <linux/kvm_host.h> #include <linux/clockchips.h> #include <asm/reg.h> #include <asm/time.h> #include <asm/byteorder.h> #include <asm/kvm_ppc.h> #include <asm/disassemble.h> #include <asm/ppc-opcode.h> #include <asm/sstep.h> #include "timing.h" #include "trace.h" #ifdef CONFIG_PPC_FPU static bool kvmppc_check_fp_disabled(struct kvm_vcpu *vcpu) { if (!(kvmppc_get_msr(vcpu) & MSR_FP)) { kvmppc_core_queue_fpunavail(vcpu, kvmppc_get_msr(vcpu) & SRR1_PREFIXED); return true; } return false; } #endif /* CONFIG_PPC_FPU */ #ifdef CONFIG_VSX static bool kvmppc_check_vsx_disabled(struct kvm_vcpu *vcpu) { if (!(kvmppc_get_msr(vcpu) & MSR_VSX)) { kvmppc_core_queue_vsx_unavail(vcpu, kvmppc_get_msr(vcpu) & SRR1_PREFIXED); return true; } return false; } #endif /* CONFIG_VSX */ #ifdef CONFIG_ALTIVEC static bool kvmppc_check_altivec_disabled(struct kvm_vcpu *vcpu) { if (!(kvmppc_get_msr(vcpu) & MSR_VEC)) { kvmppc_core_queue_vec_unavail(vcpu, kvmppc_get_msr(vcpu) & SRR1_PREFIXED); return true; } return false; } #endif /* CONFIG_ALTIVEC */ /* * XXX to do: * lfiwax, lfiwzx * vector loads and stores * * Instructions that trap when used on cache-inhibited mappings * are not emulated here: multiple and string instructions, * lq/stq, and the load-reserve/store-conditional instructions. */ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu) { ppc_inst_t inst; enum emulation_result emulated = EMULATE_FAIL; struct instruction_op op; /* this default type might be overwritten by subcategories */ kvmppc_set_exit_type(vcpu, EMULATED_INST_EXITS); emulated = kvmppc_get_last_inst(vcpu, INST_GENERIC, &inst); if (emulated != EMULATE_DONE) return emulated; vcpu->arch.mmio_vsx_copy_nums = 0; vcpu->arch.mmio_vsx_offset = 0; vcpu->arch.mmio_copy_type = KVMPPC_VSX_COPY_NONE; vcpu->arch.mmio_sp64_extend = 0; vcpu->arch.mmio_sign_extend = 0; vcpu->arch.mmio_vmx_copy_nums = 0; vcpu->arch.mmio_vmx_offset = 0; vcpu->arch.mmio_host_swabbed = 0; emulated = EMULATE_FAIL; vcpu->arch.regs.msr = vcpu->arch.shared->msr; if (analyse_instr(&op, &vcpu->arch.regs, inst) == 0) { int type = op.type & INSTR_TYPE_MASK; int size = GETSIZE(op.type); vcpu->mmio_is_write = OP_IS_STORE(type); switch (type) { case LOAD: { int instr_byte_swap = op.type & BYTEREV; if (op.type & SIGNEXT) emulated = kvmppc_handle_loads(vcpu, op.reg, size, !instr_byte_swap); else emulated = kvmppc_handle_load(vcpu, op.reg, size, !instr_byte_swap); if ((op.type & UPDATE) && (emulated != EMULATE_FAIL)) kvmppc_set_gpr(vcpu, op.update_reg, op.ea); break; } #ifdef CONFIG_PPC_FPU case LOAD_FP: if (kvmppc_check_fp_disabled(vcpu)) return EMULATE_DONE; if (op.type & FPCONV) vcpu->arch.mmio_sp64_extend = 1; if (op.type & SIGNEXT) emulated = kvmppc_handle_loads(vcpu, KVM_MMIO_REG_FPR|op.reg, size, 1); else emulated = kvmppc_handle_load(vcpu, KVM_MMIO_REG_FPR|op.reg, size, 1); if ((op.type & UPDATE) && (emulated != EMULATE_FAIL)) kvmppc_set_gpr(vcpu, op.update_reg, op.ea); break; #endif #ifdef CONFIG_ALTIVEC case LOAD_VMX: if (kvmppc_check_altivec_disabled(vcpu)) return EMULATE_DONE; /* Hardware enforces alignment of VMX accesses */ vcpu->arch.vaddr_accessed &= ~((unsigned long)size - 1); vcpu->arch.paddr_accessed &= ~((unsigned long)size - 1); if (size == 16) { /* lvx */ vcpu->arch.mmio_copy_type = KVMPPC_VMX_COPY_DWORD; } else if (size == 4) { /* lvewx */ vcpu->arch.mmio_copy_type = KVMPPC_VMX_COPY_WORD; } else if (size == 2) { /* lvehx */ vcpu->arch.mmio_copy_type = KVMPPC_VMX_COPY_HWORD; } else if (size == 1) { /* lvebx */ vcpu->arch.mmio_copy_type = KVMPPC_VMX_COPY_BYTE; } else break; vcpu->arch.mmio_vmx_offset = (vcpu->arch.vaddr_accessed & 0xf)/size; if (size == 16) { vcpu->arch.mmio_vmx_copy_nums = 2; emulated = kvmppc_handle_vmx_load(vcpu, KVM_MMIO_REG_VMX|op.reg, 8, 1); } else { vcpu->arch.mmio_vmx_copy_nums = 1; emulated = kvmppc_handle_vmx_load(vcpu, KVM_MMIO_REG_VMX|op.reg, size, 1); } break; #endif #ifdef CONFIG_VSX case LOAD_VSX: { int io_size_each; if (op.vsx_flags & VSX_CHECK_VEC) { if (kvmppc_check_altivec_disabled(vcpu)) return EMULATE_DONE; } else { if (kvmppc_check_vsx_disabled(vcpu)) return EMULATE_DONE; } if (op.vsx_flags & VSX_FPCONV) vcpu->arch.mmio_sp64_extend = 1; if (op.element_size == 8) { if (op.vsx_flags & VSX_SPLAT) vcpu->arch.mmio_copy_type = KVMPPC_VSX_COPY_DWORD_LOAD_DUMP; else vcpu->arch.mmio_copy_type = KVMPPC_VSX_COPY_DWORD; } else if (op.element_size == 4) { if (op.vsx_flags & VSX_SPLAT) vcpu->arch.mmio_copy_type = KVMPPC_VSX_COPY_WORD_LOAD_DUMP; else vcpu->arch.mmio_copy_type = KVMPPC_VSX_COPY_WORD; } else break; if (size < op.element_size) { /* precision convert case: lxsspx, etc */ vcpu->arch.mmio_vsx_copy_nums = 1; io_size_each = size; } else { /* lxvw4x, lxvd2x, etc */ vcpu->arch.mmio_vsx_copy_nums = size/op.element_size; io_size_each = op.element_size; } emulated = kvmppc_handle_vsx_load(vcpu, KVM_MMIO_REG_VSX|op.reg, io_size_each, 1, op.type & SIGNEXT); break; } #endif case STORE: /* if need byte reverse, op.val has been reversed by * analyse_instr(). */ emulated = kvmppc_handle_store(vcpu, op.val, size, 1); if ((op.type & UPDATE) && (emulated != EMULATE_FAIL)) kvmppc_set_gpr(vcpu, op.update_reg, op.ea); break; #ifdef CONFIG_PPC_FPU case STORE_FP: if (kvmppc_check_fp_disabled(vcpu)) return EMULATE_DONE; /* The FP registers need to be flushed so that * kvmppc_handle_store() can read actual FP vals * from vcpu->arch. */ if (vcpu->kvm->arch.kvm_ops->giveup_ext) vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, MSR_FP); if (op.type & FPCONV) vcpu->arch.mmio_sp64_extend = 1; emulated = kvmppc_handle_store(vcpu, VCPU_FPR(vcpu, op.reg), size, 1); if ((op.type & UPDATE) && (emulated != EMULATE_FAIL)) kvmppc_set_gpr(vcpu, op.update_reg, op.ea); break; #endif #ifdef CONFIG_ALTIVEC case STORE_VMX: if (kvmppc_check_altivec_disabled(vcpu)) return EMULATE_DONE; /* Hardware enforces alignment of VMX accesses. */ vcpu->arch.vaddr_accessed &= ~((unsigned long)size - 1); vcpu->arch.paddr_accessed &= ~((unsigned long)size - 1); if (vcpu->kvm->arch.kvm_ops->giveup_ext) vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, MSR_VEC); if (size == 16) { /* stvx */ vcpu->arch.mmio_copy_type = KVMPPC_VMX_COPY_DWORD; } else if (size == 4) { /* stvewx */ vcpu->arch.mmio_copy_type = KVMPPC_VMX_COPY_WORD; } else if (size == 2) { /* stvehx */ vcpu->arch.mmio_copy_type = KVMPPC_VMX_COPY_HWORD; } else if (size == 1) { /* stvebx */ vcpu->arch.mmio_copy_type = KVMPPC_VMX_COPY_BYTE; } else break; vcpu->arch.mmio_vmx_offset = (vcpu->arch.vaddr_accessed & 0xf)/size; if (size == 16) { vcpu->arch.mmio_vmx_copy_nums = 2; emulated = kvmppc_handle_vmx_store(vcpu, op.reg, 8, 1); } else { vcpu->arch.mmio_vmx_copy_nums = 1; emulated = kvmppc_handle_vmx_store(vcpu, op.reg, size, 1); } break; #endif #ifdef CONFIG_VSX case STORE_VSX: { int io_size_each; if (op.vsx_flags & VSX_CHECK_VEC) { if (kvmppc_check_altivec_disabled(vcpu)) return EMULATE_DONE; } else { if (kvmppc_check_vsx_disabled(vcpu)) return EMULATE_DONE; } if (vcpu->kvm->arch.kvm_ops->giveup_ext) vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, MSR_VSX); if (op.vsx_flags & VSX_FPCONV) vcpu->arch.mmio_sp64_extend = 1; if (op.element_size == 8) vcpu->arch.mmio_copy_type = KVMPPC_VSX_COPY_DWORD; else if (op.element_size == 4) vcpu->arch.mmio_copy_type = KVMPPC_VSX_COPY_WORD; else break; if (size < op.element_size) { /* precise conversion case, like stxsspx */ vcpu->arch.mmio_vsx_copy_nums = 1; io_size_each = size; } else { /* stxvw4x, stxvd2x, etc */ vcpu->arch.mmio_vsx_copy_nums = size/op.element_size; io_size_each = op.element_size; } emulated = kvmppc_handle_vsx_store(vcpu, op.reg, io_size_each, 1); break; } #endif case CACHEOP: /* Do nothing. The guest is performing dcbi because * hardware DMA is not snooped by the dcache, but * emulated DMA either goes through the dcache as * normal writes, or the host kernel has handled dcache * coherence. */ emulated = EMULATE_DONE; break; default: break; } } trace_kvm_ppc_instr(ppc_inst_val(inst), kvmppc_get_pc(vcpu), emulated); /* Advance past emulated instruction. */ if (emulated != EMULATE_FAIL) kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) + ppc_inst_len(inst)); return emulated; }
linux-master
arch/powerpc/kvm/emulate_loadstore.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright 2011 Paul Mackerras, IBM Corp. <[email protected]> * Copyright (C) 2009. SUSE Linux Products GmbH. All rights reserved. * * Authors: * Paul Mackerras <[email protected]> * Alexander Graf <[email protected]> * Kevin Wolf <[email protected]> * * Description: KVM functions specific to running on Book 3S * processors in hypervisor mode (specifically POWER7 and later). * * This file is derived from arch/powerpc/kvm/book3s.c, * by Alexander Graf <[email protected]>. */ #include <linux/kvm_host.h> #include <linux/kernel.h> #include <linux/err.h> #include <linux/slab.h> #include <linux/preempt.h> #include <linux/sched/signal.h> #include <linux/sched/stat.h> #include <linux/delay.h> #include <linux/export.h> #include <linux/fs.h> #include <linux/anon_inodes.h> #include <linux/cpu.h> #include <linux/cpumask.h> #include <linux/spinlock.h> #include <linux/page-flags.h> #include <linux/srcu.h> #include <linux/miscdevice.h> #include <linux/debugfs.h> #include <linux/gfp.h> #include <linux/vmalloc.h> #include <linux/highmem.h> #include <linux/hugetlb.h> #include <linux/kvm_irqfd.h> #include <linux/irqbypass.h> #include <linux/module.h> #include <linux/compiler.h> #include <linux/of.h> #include <linux/irqdomain.h> #include <linux/smp.h> #include <asm/ftrace.h> #include <asm/reg.h> #include <asm/ppc-opcode.h> #include <asm/asm-prototypes.h> #include <asm/archrandom.h> #include <asm/debug.h> #include <asm/disassemble.h> #include <asm/cputable.h> #include <asm/cacheflush.h> #include <linux/uaccess.h> #include <asm/interrupt.h> #include <asm/io.h> #include <asm/kvm_ppc.h> #include <asm/kvm_book3s.h> #include <asm/mmu_context.h> #include <asm/lppaca.h> #include <asm/pmc.h> #include <asm/processor.h> #include <asm/cputhreads.h> #include <asm/page.h> #include <asm/hvcall.h> #include <asm/switch_to.h> #include <asm/smp.h> #include <asm/dbell.h> #include <asm/hmi.h> #include <asm/pnv-pci.h> #include <asm/mmu.h> #include <asm/opal.h> #include <asm/xics.h> #include <asm/xive.h> #include <asm/hw_breakpoint.h> #include <asm/kvm_book3s_uvmem.h> #include <asm/ultravisor.h> #include <asm/dtl.h> #include <asm/plpar_wrappers.h> #include <trace/events/ipi.h> #include "book3s.h" #include "book3s_hv.h" #define CREATE_TRACE_POINTS #include "trace_hv.h" /* #define EXIT_DEBUG */ /* #define EXIT_DEBUG_SIMPLE */ /* #define EXIT_DEBUG_INT */ /* Used to indicate that a guest page fault needs to be handled */ #define RESUME_PAGE_FAULT (RESUME_GUEST | RESUME_FLAG_ARCH1) /* Used to indicate that a guest passthrough interrupt needs to be handled */ #define RESUME_PASSTHROUGH (RESUME_GUEST | RESUME_FLAG_ARCH2) /* Used as a "null" value for timebase values */ #define TB_NIL (~(u64)0) static DECLARE_BITMAP(default_enabled_hcalls, MAX_HCALL_OPCODE/4 + 1); static int dynamic_mt_modes = 6; module_param(dynamic_mt_modes, int, 0644); MODULE_PARM_DESC(dynamic_mt_modes, "Set of allowed dynamic micro-threading modes: 0 (= none), 2, 4, or 6 (= 2 or 4)"); static int target_smt_mode; module_param(target_smt_mode, int, 0644); MODULE_PARM_DESC(target_smt_mode, "Target threads per core (0 = max)"); static bool one_vm_per_core; module_param(one_vm_per_core, bool, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(one_vm_per_core, "Only run vCPUs from the same VM on a core (requires POWER8 or older)"); #ifdef CONFIG_KVM_XICS static const struct kernel_param_ops module_param_ops = { .set = param_set_int, .get = param_get_int, }; module_param_cb(kvm_irq_bypass, &module_param_ops, &kvm_irq_bypass, 0644); MODULE_PARM_DESC(kvm_irq_bypass, "Bypass passthrough interrupt optimization"); module_param_cb(h_ipi_redirect, &module_param_ops, &h_ipi_redirect, 0644); MODULE_PARM_DESC(h_ipi_redirect, "Redirect H_IPI wakeup to a free host core"); #endif /* If set, guests are allowed to create and control nested guests */ static bool nested = true; module_param(nested, bool, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(nested, "Enable nested virtualization (only on POWER9)"); static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu); /* * RWMR values for POWER8. These control the rate at which PURR * and SPURR count and should be set according to the number of * online threads in the vcore being run. */ #define RWMR_RPA_P8_1THREAD 0x164520C62609AECAUL #define RWMR_RPA_P8_2THREAD 0x7FFF2908450D8DA9UL #define RWMR_RPA_P8_3THREAD 0x164520C62609AECAUL #define RWMR_RPA_P8_4THREAD 0x199A421245058DA9UL #define RWMR_RPA_P8_5THREAD 0x164520C62609AECAUL #define RWMR_RPA_P8_6THREAD 0x164520C62609AECAUL #define RWMR_RPA_P8_7THREAD 0x164520C62609AECAUL #define RWMR_RPA_P8_8THREAD 0x164520C62609AECAUL static unsigned long p8_rwmr_values[MAX_SMT_THREADS + 1] = { RWMR_RPA_P8_1THREAD, RWMR_RPA_P8_1THREAD, RWMR_RPA_P8_2THREAD, RWMR_RPA_P8_3THREAD, RWMR_RPA_P8_4THREAD, RWMR_RPA_P8_5THREAD, RWMR_RPA_P8_6THREAD, RWMR_RPA_P8_7THREAD, RWMR_RPA_P8_8THREAD, }; static inline struct kvm_vcpu *next_runnable_thread(struct kvmppc_vcore *vc, int *ip) { int i = *ip; struct kvm_vcpu *vcpu; while (++i < MAX_SMT_THREADS) { vcpu = READ_ONCE(vc->runnable_threads[i]); if (vcpu) { *ip = i; return vcpu; } } return NULL; } /* Used to traverse the list of runnable threads for a given vcore */ #define for_each_runnable_thread(i, vcpu, vc) \ for (i = -1; (vcpu = next_runnable_thread(vc, &i)); ) static bool kvmppc_ipi_thread(int cpu) { unsigned long msg = PPC_DBELL_TYPE(PPC_DBELL_SERVER); /* If we're a nested hypervisor, fall back to ordinary IPIs for now */ if (kvmhv_on_pseries()) return false; /* On POWER9 we can use msgsnd to IPI any cpu */ if (cpu_has_feature(CPU_FTR_ARCH_300)) { msg |= get_hard_smp_processor_id(cpu); smp_mb(); __asm__ __volatile__ (PPC_MSGSND(%0) : : "r" (msg)); return true; } /* On POWER8 for IPIs to threads in the same core, use msgsnd */ if (cpu_has_feature(CPU_FTR_ARCH_207S)) { preempt_disable(); if (cpu_first_thread_sibling(cpu) == cpu_first_thread_sibling(smp_processor_id())) { msg |= cpu_thread_in_core(cpu); smp_mb(); __asm__ __volatile__ (PPC_MSGSND(%0) : : "r" (msg)); preempt_enable(); return true; } preempt_enable(); } #if defined(CONFIG_PPC_ICP_NATIVE) && defined(CONFIG_SMP) if (cpu >= 0 && cpu < nr_cpu_ids) { if (paca_ptrs[cpu]->kvm_hstate.xics_phys) { xics_wake_cpu(cpu); return true; } opal_int_set_mfrr(get_hard_smp_processor_id(cpu), IPI_PRIORITY); return true; } #endif return false; } static void kvmppc_fast_vcpu_kick_hv(struct kvm_vcpu *vcpu) { int cpu; struct rcuwait *waitp; /* * rcuwait_wake_up contains smp_mb() which orders prior stores that * create pending work vs below loads of cpu fields. The other side * is the barrier in vcpu run that orders setting the cpu fields vs * testing for pending work. */ waitp = kvm_arch_vcpu_get_wait(vcpu); if (rcuwait_wake_up(waitp)) ++vcpu->stat.generic.halt_wakeup; cpu = READ_ONCE(vcpu->arch.thread_cpu); if (cpu >= 0 && kvmppc_ipi_thread(cpu)) return; /* CPU points to the first thread of the core */ cpu = vcpu->cpu; if (cpu >= 0 && cpu < nr_cpu_ids && cpu_online(cpu)) smp_send_reschedule(cpu); } /* * We use the vcpu_load/put functions to measure stolen time. * * Stolen time is counted as time when either the vcpu is able to * run as part of a virtual core, but the task running the vcore * is preempted or sleeping, or when the vcpu needs something done * in the kernel by the task running the vcpu, but that task is * preempted or sleeping. Those two things have to be counted * separately, since one of the vcpu tasks will take on the job * of running the core, and the other vcpu tasks in the vcore will * sleep waiting for it to do that, but that sleep shouldn't count * as stolen time. * * Hence we accumulate stolen time when the vcpu can run as part of * a vcore using vc->stolen_tb, and the stolen time when the vcpu * needs its task to do other things in the kernel (for example, * service a page fault) in busy_stolen. We don't accumulate * stolen time for a vcore when it is inactive, or for a vcpu * when it is in state RUNNING or NOTREADY. NOTREADY is a bit of * a misnomer; it means that the vcpu task is not executing in * the KVM_VCPU_RUN ioctl, i.e. it is in userspace or elsewhere in * the kernel. We don't have any way of dividing up that time * between time that the vcpu is genuinely stopped, time that * the task is actively working on behalf of the vcpu, and time * that the task is preempted, so we don't count any of it as * stolen. * * Updates to busy_stolen are protected by arch.tbacct_lock; * updates to vc->stolen_tb are protected by the vcore->stoltb_lock * lock. The stolen times are measured in units of timebase ticks. * (Note that the != TB_NIL checks below are purely defensive; * they should never fail.) * * The POWER9 path is simpler, one vcpu per virtual core so the * former case does not exist. If a vcpu is preempted when it is * BUSY_IN_HOST and not ceded or otherwise blocked, then accumulate * the stolen cycles in busy_stolen. RUNNING is not a preemptible * state in the P9 path. */ static void kvmppc_core_start_stolen(struct kvmppc_vcore *vc, u64 tb) { unsigned long flags; WARN_ON_ONCE(cpu_has_feature(CPU_FTR_ARCH_300)); spin_lock_irqsave(&vc->stoltb_lock, flags); vc->preempt_tb = tb; spin_unlock_irqrestore(&vc->stoltb_lock, flags); } static void kvmppc_core_end_stolen(struct kvmppc_vcore *vc, u64 tb) { unsigned long flags; WARN_ON_ONCE(cpu_has_feature(CPU_FTR_ARCH_300)); spin_lock_irqsave(&vc->stoltb_lock, flags); if (vc->preempt_tb != TB_NIL) { vc->stolen_tb += tb - vc->preempt_tb; vc->preempt_tb = TB_NIL; } spin_unlock_irqrestore(&vc->stoltb_lock, flags); } static void kvmppc_core_vcpu_load_hv(struct kvm_vcpu *vcpu, int cpu) { struct kvmppc_vcore *vc = vcpu->arch.vcore; unsigned long flags; u64 now; if (cpu_has_feature(CPU_FTR_ARCH_300)) { if (vcpu->arch.busy_preempt != TB_NIL) { WARN_ON_ONCE(vcpu->arch.state != KVMPPC_VCPU_BUSY_IN_HOST); vc->stolen_tb += mftb() - vcpu->arch.busy_preempt; vcpu->arch.busy_preempt = TB_NIL; } return; } now = mftb(); /* * We can test vc->runner without taking the vcore lock, * because only this task ever sets vc->runner to this * vcpu, and once it is set to this vcpu, only this task * ever sets it to NULL. */ if (vc->runner == vcpu && vc->vcore_state >= VCORE_SLEEPING) kvmppc_core_end_stolen(vc, now); spin_lock_irqsave(&vcpu->arch.tbacct_lock, flags); if (vcpu->arch.state == KVMPPC_VCPU_BUSY_IN_HOST && vcpu->arch.busy_preempt != TB_NIL) { vcpu->arch.busy_stolen += now - vcpu->arch.busy_preempt; vcpu->arch.busy_preempt = TB_NIL; } spin_unlock_irqrestore(&vcpu->arch.tbacct_lock, flags); } static void kvmppc_core_vcpu_put_hv(struct kvm_vcpu *vcpu) { struct kvmppc_vcore *vc = vcpu->arch.vcore; unsigned long flags; u64 now; if (cpu_has_feature(CPU_FTR_ARCH_300)) { /* * In the P9 path, RUNNABLE is not preemptible * (nor takes host interrupts) */ WARN_ON_ONCE(vcpu->arch.state == KVMPPC_VCPU_RUNNABLE); /* * Account stolen time when preempted while the vcpu task is * running in the kernel (but not in qemu, which is INACTIVE). */ if (task_is_running(current) && vcpu->arch.state == KVMPPC_VCPU_BUSY_IN_HOST) vcpu->arch.busy_preempt = mftb(); return; } now = mftb(); if (vc->runner == vcpu && vc->vcore_state >= VCORE_SLEEPING) kvmppc_core_start_stolen(vc, now); spin_lock_irqsave(&vcpu->arch.tbacct_lock, flags); if (vcpu->arch.state == KVMPPC_VCPU_BUSY_IN_HOST) vcpu->arch.busy_preempt = now; spin_unlock_irqrestore(&vcpu->arch.tbacct_lock, flags); } static void kvmppc_set_pvr_hv(struct kvm_vcpu *vcpu, u32 pvr) { vcpu->arch.pvr = pvr; } /* Dummy value used in computing PCR value below */ #define PCR_ARCH_31 (PCR_ARCH_300 << 1) static int kvmppc_set_arch_compat(struct kvm_vcpu *vcpu, u32 arch_compat) { unsigned long host_pcr_bit = 0, guest_pcr_bit = 0; struct kvmppc_vcore *vc = vcpu->arch.vcore; /* We can (emulate) our own architecture version and anything older */ if (cpu_has_feature(CPU_FTR_ARCH_31)) host_pcr_bit = PCR_ARCH_31; else if (cpu_has_feature(CPU_FTR_ARCH_300)) host_pcr_bit = PCR_ARCH_300; else if (cpu_has_feature(CPU_FTR_ARCH_207S)) host_pcr_bit = PCR_ARCH_207; else if (cpu_has_feature(CPU_FTR_ARCH_206)) host_pcr_bit = PCR_ARCH_206; else host_pcr_bit = PCR_ARCH_205; /* Determine lowest PCR bit needed to run guest in given PVR level */ guest_pcr_bit = host_pcr_bit; if (arch_compat) { switch (arch_compat) { case PVR_ARCH_205: guest_pcr_bit = PCR_ARCH_205; break; case PVR_ARCH_206: case PVR_ARCH_206p: guest_pcr_bit = PCR_ARCH_206; break; case PVR_ARCH_207: guest_pcr_bit = PCR_ARCH_207; break; case PVR_ARCH_300: guest_pcr_bit = PCR_ARCH_300; break; case PVR_ARCH_31: guest_pcr_bit = PCR_ARCH_31; break; default: return -EINVAL; } } /* Check requested PCR bits don't exceed our capabilities */ if (guest_pcr_bit > host_pcr_bit) return -EINVAL; spin_lock(&vc->lock); vc->arch_compat = arch_compat; /* * Set all PCR bits for which guest_pcr_bit <= bit < host_pcr_bit * Also set all reserved PCR bits */ vc->pcr = (host_pcr_bit - guest_pcr_bit) | PCR_MASK; spin_unlock(&vc->lock); return 0; } static void kvmppc_dump_regs(struct kvm_vcpu *vcpu) { int r; pr_err("vcpu %p (%d):\n", vcpu, vcpu->vcpu_id); pr_err("pc = %.16lx msr = %.16llx trap = %x\n", vcpu->arch.regs.nip, vcpu->arch.shregs.msr, vcpu->arch.trap); for (r = 0; r < 16; ++r) pr_err("r%2d = %.16lx r%d = %.16lx\n", r, kvmppc_get_gpr(vcpu, r), r+16, kvmppc_get_gpr(vcpu, r+16)); pr_err("ctr = %.16lx lr = %.16lx\n", vcpu->arch.regs.ctr, vcpu->arch.regs.link); pr_err("srr0 = %.16llx srr1 = %.16llx\n", vcpu->arch.shregs.srr0, vcpu->arch.shregs.srr1); pr_err("sprg0 = %.16llx sprg1 = %.16llx\n", vcpu->arch.shregs.sprg0, vcpu->arch.shregs.sprg1); pr_err("sprg2 = %.16llx sprg3 = %.16llx\n", vcpu->arch.shregs.sprg2, vcpu->arch.shregs.sprg3); pr_err("cr = %.8lx xer = %.16lx dsisr = %.8x\n", vcpu->arch.regs.ccr, vcpu->arch.regs.xer, vcpu->arch.shregs.dsisr); pr_err("dar = %.16llx\n", vcpu->arch.shregs.dar); pr_err("fault dar = %.16lx dsisr = %.8x\n", vcpu->arch.fault_dar, vcpu->arch.fault_dsisr); pr_err("SLB (%d entries):\n", vcpu->arch.slb_max); for (r = 0; r < vcpu->arch.slb_max; ++r) pr_err(" ESID = %.16llx VSID = %.16llx\n", vcpu->arch.slb[r].orige, vcpu->arch.slb[r].origv); pr_err("lpcr = %.16lx sdr1 = %.16lx last_inst = %.16lx\n", vcpu->arch.vcore->lpcr, vcpu->kvm->arch.sdr1, vcpu->arch.last_inst); } static struct kvm_vcpu *kvmppc_find_vcpu(struct kvm *kvm, int id) { return kvm_get_vcpu_by_id(kvm, id); } static void init_vpa(struct kvm_vcpu *vcpu, struct lppaca *vpa) { vpa->__old_status |= LPPACA_OLD_SHARED_PROC; vpa->yield_count = cpu_to_be32(1); } static int set_vpa(struct kvm_vcpu *vcpu, struct kvmppc_vpa *v, unsigned long addr, unsigned long len) { /* check address is cacheline aligned */ if (addr & (L1_CACHE_BYTES - 1)) return -EINVAL; spin_lock(&vcpu->arch.vpa_update_lock); if (v->next_gpa != addr || v->len != len) { v->next_gpa = addr; v->len = addr ? len : 0; v->update_pending = 1; } spin_unlock(&vcpu->arch.vpa_update_lock); return 0; } /* Length for a per-processor buffer is passed in at offset 4 in the buffer */ struct reg_vpa { u32 dummy; union { __be16 hword; __be32 word; } length; }; static int vpa_is_registered(struct kvmppc_vpa *vpap) { if (vpap->update_pending) return vpap->next_gpa != 0; return vpap->pinned_addr != NULL; } static unsigned long do_h_register_vpa(struct kvm_vcpu *vcpu, unsigned long flags, unsigned long vcpuid, unsigned long vpa) { struct kvm *kvm = vcpu->kvm; unsigned long len, nb; void *va; struct kvm_vcpu *tvcpu; int err; int subfunc; struct kvmppc_vpa *vpap; tvcpu = kvmppc_find_vcpu(kvm, vcpuid); if (!tvcpu) return H_PARAMETER; subfunc = (flags >> H_VPA_FUNC_SHIFT) & H_VPA_FUNC_MASK; if (subfunc == H_VPA_REG_VPA || subfunc == H_VPA_REG_DTL || subfunc == H_VPA_REG_SLB) { /* Registering new area - address must be cache-line aligned */ if ((vpa & (L1_CACHE_BYTES - 1)) || !vpa) return H_PARAMETER; /* convert logical addr to kernel addr and read length */ va = kvmppc_pin_guest_page(kvm, vpa, &nb); if (va == NULL) return H_PARAMETER; if (subfunc == H_VPA_REG_VPA) len = be16_to_cpu(((struct reg_vpa *)va)->length.hword); else len = be32_to_cpu(((struct reg_vpa *)va)->length.word); kvmppc_unpin_guest_page(kvm, va, vpa, false); /* Check length */ if (len > nb || len < sizeof(struct reg_vpa)) return H_PARAMETER; } else { vpa = 0; len = 0; } err = H_PARAMETER; vpap = NULL; spin_lock(&tvcpu->arch.vpa_update_lock); switch (subfunc) { case H_VPA_REG_VPA: /* register VPA */ /* * The size of our lppaca is 1kB because of the way we align * it for the guest to avoid crossing a 4kB boundary. We only * use 640 bytes of the structure though, so we should accept * clients that set a size of 640. */ BUILD_BUG_ON(sizeof(struct lppaca) != 640); if (len < sizeof(struct lppaca)) break; vpap = &tvcpu->arch.vpa; err = 0; break; case H_VPA_REG_DTL: /* register DTL */ if (len < sizeof(struct dtl_entry)) break; len -= len % sizeof(struct dtl_entry); /* Check that they have previously registered a VPA */ err = H_RESOURCE; if (!vpa_is_registered(&tvcpu->arch.vpa)) break; vpap = &tvcpu->arch.dtl; err = 0; break; case H_VPA_REG_SLB: /* register SLB shadow buffer */ /* Check that they have previously registered a VPA */ err = H_RESOURCE; if (!vpa_is_registered(&tvcpu->arch.vpa)) break; vpap = &tvcpu->arch.slb_shadow; err = 0; break; case H_VPA_DEREG_VPA: /* deregister VPA */ /* Check they don't still have a DTL or SLB buf registered */ err = H_RESOURCE; if (vpa_is_registered(&tvcpu->arch.dtl) || vpa_is_registered(&tvcpu->arch.slb_shadow)) break; vpap = &tvcpu->arch.vpa; err = 0; break; case H_VPA_DEREG_DTL: /* deregister DTL */ vpap = &tvcpu->arch.dtl; err = 0; break; case H_VPA_DEREG_SLB: /* deregister SLB shadow buffer */ vpap = &tvcpu->arch.slb_shadow; err = 0; break; } if (vpap) { vpap->next_gpa = vpa; vpap->len = len; vpap->update_pending = 1; } spin_unlock(&tvcpu->arch.vpa_update_lock); return err; } static void kvmppc_update_vpa(struct kvm_vcpu *vcpu, struct kvmppc_vpa *vpap) { struct kvm *kvm = vcpu->kvm; void *va; unsigned long nb; unsigned long gpa; /* * We need to pin the page pointed to by vpap->next_gpa, * but we can't call kvmppc_pin_guest_page under the lock * as it does get_user_pages() and down_read(). So we * have to drop the lock, pin the page, then get the lock * again and check that a new area didn't get registered * in the meantime. */ for (;;) { gpa = vpap->next_gpa; spin_unlock(&vcpu->arch.vpa_update_lock); va = NULL; nb = 0; if (gpa) va = kvmppc_pin_guest_page(kvm, gpa, &nb); spin_lock(&vcpu->arch.vpa_update_lock); if (gpa == vpap->next_gpa) break; /* sigh... unpin that one and try again */ if (va) kvmppc_unpin_guest_page(kvm, va, gpa, false); } vpap->update_pending = 0; if (va && nb < vpap->len) { /* * If it's now too short, it must be that userspace * has changed the mappings underlying guest memory, * so unregister the region. */ kvmppc_unpin_guest_page(kvm, va, gpa, false); va = NULL; } if (vpap->pinned_addr) kvmppc_unpin_guest_page(kvm, vpap->pinned_addr, vpap->gpa, vpap->dirty); vpap->gpa = gpa; vpap->pinned_addr = va; vpap->dirty = false; if (va) vpap->pinned_end = va + vpap->len; } static void kvmppc_update_vpas(struct kvm_vcpu *vcpu) { if (!(vcpu->arch.vpa.update_pending || vcpu->arch.slb_shadow.update_pending || vcpu->arch.dtl.update_pending)) return; spin_lock(&vcpu->arch.vpa_update_lock); if (vcpu->arch.vpa.update_pending) { kvmppc_update_vpa(vcpu, &vcpu->arch.vpa); if (vcpu->arch.vpa.pinned_addr) init_vpa(vcpu, vcpu->arch.vpa.pinned_addr); } if (vcpu->arch.dtl.update_pending) { kvmppc_update_vpa(vcpu, &vcpu->arch.dtl); vcpu->arch.dtl_ptr = vcpu->arch.dtl.pinned_addr; vcpu->arch.dtl_index = 0; } if (vcpu->arch.slb_shadow.update_pending) kvmppc_update_vpa(vcpu, &vcpu->arch.slb_shadow); spin_unlock(&vcpu->arch.vpa_update_lock); } /* * Return the accumulated stolen time for the vcore up until `now'. * The caller should hold the vcore lock. */ static u64 vcore_stolen_time(struct kvmppc_vcore *vc, u64 now) { u64 p; unsigned long flags; WARN_ON_ONCE(cpu_has_feature(CPU_FTR_ARCH_300)); spin_lock_irqsave(&vc->stoltb_lock, flags); p = vc->stolen_tb; if (vc->vcore_state != VCORE_INACTIVE && vc->preempt_tb != TB_NIL) p += now - vc->preempt_tb; spin_unlock_irqrestore(&vc->stoltb_lock, flags); return p; } static void __kvmppc_create_dtl_entry(struct kvm_vcpu *vcpu, struct lppaca *vpa, unsigned int pcpu, u64 now, unsigned long stolen) { struct dtl_entry *dt; dt = vcpu->arch.dtl_ptr; if (!dt) return; dt->dispatch_reason = 7; dt->preempt_reason = 0; dt->processor_id = cpu_to_be16(pcpu + vcpu->arch.ptid); dt->enqueue_to_dispatch_time = cpu_to_be32(stolen); dt->ready_to_enqueue_time = 0; dt->waiting_to_ready_time = 0; dt->timebase = cpu_to_be64(now); dt->fault_addr = 0; dt->srr0 = cpu_to_be64(kvmppc_get_pc(vcpu)); dt->srr1 = cpu_to_be64(vcpu->arch.shregs.msr); ++dt; if (dt == vcpu->arch.dtl.pinned_end) dt = vcpu->arch.dtl.pinned_addr; vcpu->arch.dtl_ptr = dt; /* order writing *dt vs. writing vpa->dtl_idx */ smp_wmb(); vpa->dtl_idx = cpu_to_be64(++vcpu->arch.dtl_index); /* vcpu->arch.dtl.dirty is set by the caller */ } static void kvmppc_update_vpa_dispatch(struct kvm_vcpu *vcpu, struct kvmppc_vcore *vc) { struct lppaca *vpa; unsigned long stolen; unsigned long core_stolen; u64 now; unsigned long flags; vpa = vcpu->arch.vpa.pinned_addr; if (!vpa) return; now = mftb(); core_stolen = vcore_stolen_time(vc, now); stolen = core_stolen - vcpu->arch.stolen_logged; vcpu->arch.stolen_logged = core_stolen; spin_lock_irqsave(&vcpu->arch.tbacct_lock, flags); stolen += vcpu->arch.busy_stolen; vcpu->arch.busy_stolen = 0; spin_unlock_irqrestore(&vcpu->arch.tbacct_lock, flags); vpa->enqueue_dispatch_tb = cpu_to_be64(be64_to_cpu(vpa->enqueue_dispatch_tb) + stolen); __kvmppc_create_dtl_entry(vcpu, vpa, vc->pcpu, now + vc->tb_offset, stolen); vcpu->arch.vpa.dirty = true; } static void kvmppc_update_vpa_dispatch_p9(struct kvm_vcpu *vcpu, struct kvmppc_vcore *vc, u64 now) { struct lppaca *vpa; unsigned long stolen; unsigned long stolen_delta; vpa = vcpu->arch.vpa.pinned_addr; if (!vpa) return; stolen = vc->stolen_tb; stolen_delta = stolen - vcpu->arch.stolen_logged; vcpu->arch.stolen_logged = stolen; vpa->enqueue_dispatch_tb = cpu_to_be64(stolen); __kvmppc_create_dtl_entry(vcpu, vpa, vc->pcpu, now, stolen_delta); vcpu->arch.vpa.dirty = true; } /* See if there is a doorbell interrupt pending for a vcpu */ static bool kvmppc_doorbell_pending(struct kvm_vcpu *vcpu) { int thr; struct kvmppc_vcore *vc; if (vcpu->arch.doorbell_request) return true; if (cpu_has_feature(CPU_FTR_ARCH_300)) return false; /* * Ensure that the read of vcore->dpdes comes after the read * of vcpu->doorbell_request. This barrier matches the * smp_wmb() in kvmppc_guest_entry_inject(). */ smp_rmb(); vc = vcpu->arch.vcore; thr = vcpu->vcpu_id - vc->first_vcpuid; return !!(vc->dpdes & (1 << thr)); } static bool kvmppc_power8_compatible(struct kvm_vcpu *vcpu) { if (vcpu->arch.vcore->arch_compat >= PVR_ARCH_207) return true; if ((!vcpu->arch.vcore->arch_compat) && cpu_has_feature(CPU_FTR_ARCH_207S)) return true; return false; } static int kvmppc_h_set_mode(struct kvm_vcpu *vcpu, unsigned long mflags, unsigned long resource, unsigned long value1, unsigned long value2) { switch (resource) { case H_SET_MODE_RESOURCE_SET_CIABR: if (!kvmppc_power8_compatible(vcpu)) return H_P2; if (value2) return H_P4; if (mflags) return H_UNSUPPORTED_FLAG_START; /* Guests can't breakpoint the hypervisor */ if ((value1 & CIABR_PRIV) == CIABR_PRIV_HYPER) return H_P3; vcpu->arch.ciabr = value1; return H_SUCCESS; case H_SET_MODE_RESOURCE_SET_DAWR0: if (!kvmppc_power8_compatible(vcpu)) return H_P2; if (!ppc_breakpoint_available()) return H_P2; if (mflags) return H_UNSUPPORTED_FLAG_START; if (value2 & DABRX_HYP) return H_P4; vcpu->arch.dawr0 = value1; vcpu->arch.dawrx0 = value2; return H_SUCCESS; case H_SET_MODE_RESOURCE_SET_DAWR1: if (!kvmppc_power8_compatible(vcpu)) return H_P2; if (!ppc_breakpoint_available()) return H_P2; if (!cpu_has_feature(CPU_FTR_DAWR1)) return H_P2; if (!vcpu->kvm->arch.dawr1_enabled) return H_FUNCTION; if (mflags) return H_UNSUPPORTED_FLAG_START; if (value2 & DABRX_HYP) return H_P4; vcpu->arch.dawr1 = value1; vcpu->arch.dawrx1 = value2; return H_SUCCESS; case H_SET_MODE_RESOURCE_ADDR_TRANS_MODE: /* * KVM does not support mflags=2 (AIL=2) and AIL=1 is reserved. * Keep this in synch with kvmppc_filter_guest_lpcr_hv. */ if (cpu_has_feature(CPU_FTR_P9_RADIX_PREFETCH_BUG) && kvmhv_vcpu_is_radix(vcpu) && mflags == 3) return H_UNSUPPORTED_FLAG_START; return H_TOO_HARD; default: return H_TOO_HARD; } } /* Copy guest memory in place - must reside within a single memslot */ static int kvmppc_copy_guest(struct kvm *kvm, gpa_t to, gpa_t from, unsigned long len) { struct kvm_memory_slot *to_memslot = NULL; struct kvm_memory_slot *from_memslot = NULL; unsigned long to_addr, from_addr; int r; /* Get HPA for from address */ from_memslot = gfn_to_memslot(kvm, from >> PAGE_SHIFT); if (!from_memslot) return -EFAULT; if ((from + len) >= ((from_memslot->base_gfn + from_memslot->npages) << PAGE_SHIFT)) return -EINVAL; from_addr = gfn_to_hva_memslot(from_memslot, from >> PAGE_SHIFT); if (kvm_is_error_hva(from_addr)) return -EFAULT; from_addr |= (from & (PAGE_SIZE - 1)); /* Get HPA for to address */ to_memslot = gfn_to_memslot(kvm, to >> PAGE_SHIFT); if (!to_memslot) return -EFAULT; if ((to + len) >= ((to_memslot->base_gfn + to_memslot->npages) << PAGE_SHIFT)) return -EINVAL; to_addr = gfn_to_hva_memslot(to_memslot, to >> PAGE_SHIFT); if (kvm_is_error_hva(to_addr)) return -EFAULT; to_addr |= (to & (PAGE_SIZE - 1)); /* Perform copy */ r = raw_copy_in_user((void __user *)to_addr, (void __user *)from_addr, len); if (r) return -EFAULT; mark_page_dirty(kvm, to >> PAGE_SHIFT); return 0; } static long kvmppc_h_page_init(struct kvm_vcpu *vcpu, unsigned long flags, unsigned long dest, unsigned long src) { u64 pg_sz = SZ_4K; /* 4K page size */ u64 pg_mask = SZ_4K - 1; int ret; /* Check for invalid flags (H_PAGE_SET_LOANED covers all CMO flags) */ if (flags & ~(H_ICACHE_INVALIDATE | H_ICACHE_SYNCHRONIZE | H_ZERO_PAGE | H_COPY_PAGE | H_PAGE_SET_LOANED)) return H_PARAMETER; /* dest (and src if copy_page flag set) must be page aligned */ if ((dest & pg_mask) || ((flags & H_COPY_PAGE) && (src & pg_mask))) return H_PARAMETER; /* zero and/or copy the page as determined by the flags */ if (flags & H_COPY_PAGE) { ret = kvmppc_copy_guest(vcpu->kvm, dest, src, pg_sz); if (ret < 0) return H_PARAMETER; } else if (flags & H_ZERO_PAGE) { ret = kvm_clear_guest(vcpu->kvm, dest, pg_sz); if (ret < 0) return H_PARAMETER; } /* We can ignore the remaining flags */ return H_SUCCESS; } static int kvm_arch_vcpu_yield_to(struct kvm_vcpu *target) { struct kvmppc_vcore *vcore = target->arch.vcore; /* * We expect to have been called by the real mode handler * (kvmppc_rm_h_confer()) which would have directly returned * H_SUCCESS if the source vcore wasn't idle (e.g. if it may * have useful work to do and should not confer) so we don't * recheck that here. * * In the case of the P9 single vcpu per vcore case, the real * mode handler is not called but no other threads are in the * source vcore. */ if (!cpu_has_feature(CPU_FTR_ARCH_300)) { spin_lock(&vcore->lock); if (target->arch.state == KVMPPC_VCPU_RUNNABLE && vcore->vcore_state != VCORE_INACTIVE && vcore->runner) target = vcore->runner; spin_unlock(&vcore->lock); } return kvm_vcpu_yield_to(target); } static int kvmppc_get_yield_count(struct kvm_vcpu *vcpu) { int yield_count = 0; struct lppaca *lppaca; spin_lock(&vcpu->arch.vpa_update_lock); lppaca = (struct lppaca *)vcpu->arch.vpa.pinned_addr; if (lppaca) yield_count = be32_to_cpu(lppaca->yield_count); spin_unlock(&vcpu->arch.vpa_update_lock); return yield_count; } /* * H_RPT_INVALIDATE hcall handler for nested guests. * * Handles only nested process-scoped invalidation requests in L0. */ static int kvmppc_nested_h_rpt_invalidate(struct kvm_vcpu *vcpu) { unsigned long type = kvmppc_get_gpr(vcpu, 6); unsigned long pid, pg_sizes, start, end; /* * The partition-scoped invalidations aren't handled here in L0. */ if (type & H_RPTI_TYPE_NESTED) return RESUME_HOST; pid = kvmppc_get_gpr(vcpu, 4); pg_sizes = kvmppc_get_gpr(vcpu, 7); start = kvmppc_get_gpr(vcpu, 8); end = kvmppc_get_gpr(vcpu, 9); do_h_rpt_invalidate_prt(pid, vcpu->arch.nested->shadow_lpid, type, pg_sizes, start, end); kvmppc_set_gpr(vcpu, 3, H_SUCCESS); return RESUME_GUEST; } static long kvmppc_h_rpt_invalidate(struct kvm_vcpu *vcpu, unsigned long id, unsigned long target, unsigned long type, unsigned long pg_sizes, unsigned long start, unsigned long end) { if (!kvm_is_radix(vcpu->kvm)) return H_UNSUPPORTED; if (end < start) return H_P5; /* * Partition-scoped invalidation for nested guests. */ if (type & H_RPTI_TYPE_NESTED) { if (!nesting_enabled(vcpu->kvm)) return H_FUNCTION; /* Support only cores as target */ if (target != H_RPTI_TARGET_CMMU) return H_P2; return do_h_rpt_invalidate_pat(vcpu, id, type, pg_sizes, start, end); } /* * Process-scoped invalidation for L1 guests. */ do_h_rpt_invalidate_prt(id, vcpu->kvm->arch.lpid, type, pg_sizes, start, end); return H_SUCCESS; } int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu) { struct kvm *kvm = vcpu->kvm; unsigned long req = kvmppc_get_gpr(vcpu, 3); unsigned long target, ret = H_SUCCESS; int yield_count; struct kvm_vcpu *tvcpu; int idx, rc; if (req <= MAX_HCALL_OPCODE && !test_bit(req/4, vcpu->kvm->arch.enabled_hcalls)) return RESUME_HOST; switch (req) { case H_REMOVE: ret = kvmppc_h_remove(vcpu, kvmppc_get_gpr(vcpu, 4), kvmppc_get_gpr(vcpu, 5), kvmppc_get_gpr(vcpu, 6)); if (ret == H_TOO_HARD) return RESUME_HOST; break; case H_ENTER: ret = kvmppc_h_enter(vcpu, kvmppc_get_gpr(vcpu, 4), kvmppc_get_gpr(vcpu, 5), kvmppc_get_gpr(vcpu, 6), kvmppc_get_gpr(vcpu, 7)); if (ret == H_TOO_HARD) return RESUME_HOST; break; case H_READ: ret = kvmppc_h_read(vcpu, kvmppc_get_gpr(vcpu, 4), kvmppc_get_gpr(vcpu, 5)); if (ret == H_TOO_HARD) return RESUME_HOST; break; case H_CLEAR_MOD: ret = kvmppc_h_clear_mod(vcpu, kvmppc_get_gpr(vcpu, 4), kvmppc_get_gpr(vcpu, 5)); if (ret == H_TOO_HARD) return RESUME_HOST; break; case H_CLEAR_REF: ret = kvmppc_h_clear_ref(vcpu, kvmppc_get_gpr(vcpu, 4), kvmppc_get_gpr(vcpu, 5)); if (ret == H_TOO_HARD) return RESUME_HOST; break; case H_PROTECT: ret = kvmppc_h_protect(vcpu, kvmppc_get_gpr(vcpu, 4), kvmppc_get_gpr(vcpu, 5), kvmppc_get_gpr(vcpu, 6)); if (ret == H_TOO_HARD) return RESUME_HOST; break; case H_BULK_REMOVE: ret = kvmppc_h_bulk_remove(vcpu); if (ret == H_TOO_HARD) return RESUME_HOST; break; case H_CEDE: break; case H_PROD: target = kvmppc_get_gpr(vcpu, 4); tvcpu = kvmppc_find_vcpu(kvm, target); if (!tvcpu) { ret = H_PARAMETER; break; } tvcpu->arch.prodded = 1; smp_mb(); /* This orders prodded store vs ceded load */ if (tvcpu->arch.ceded) kvmppc_fast_vcpu_kick_hv(tvcpu); break; case H_CONFER: target = kvmppc_get_gpr(vcpu, 4); if (target == -1) break; tvcpu = kvmppc_find_vcpu(kvm, target); if (!tvcpu) { ret = H_PARAMETER; break; } yield_count = kvmppc_get_gpr(vcpu, 5); if (kvmppc_get_yield_count(tvcpu) != yield_count) break; kvm_arch_vcpu_yield_to(tvcpu); break; case H_REGISTER_VPA: ret = do_h_register_vpa(vcpu, kvmppc_get_gpr(vcpu, 4), kvmppc_get_gpr(vcpu, 5), kvmppc_get_gpr(vcpu, 6)); break; case H_RTAS: if (list_empty(&kvm->arch.rtas_tokens)) return RESUME_HOST; idx = srcu_read_lock(&kvm->srcu); rc = kvmppc_rtas_hcall(vcpu); srcu_read_unlock(&kvm->srcu, idx); if (rc == -ENOENT) return RESUME_HOST; else if (rc == 0) break; /* Send the error out to userspace via KVM_RUN */ return rc; case H_LOGICAL_CI_LOAD: ret = kvmppc_h_logical_ci_load(vcpu); if (ret == H_TOO_HARD) return RESUME_HOST; break; case H_LOGICAL_CI_STORE: ret = kvmppc_h_logical_ci_store(vcpu); if (ret == H_TOO_HARD) return RESUME_HOST; break; case H_SET_MODE: ret = kvmppc_h_set_mode(vcpu, kvmppc_get_gpr(vcpu, 4), kvmppc_get_gpr(vcpu, 5), kvmppc_get_gpr(vcpu, 6), kvmppc_get_gpr(vcpu, 7)); if (ret == H_TOO_HARD) return RESUME_HOST; break; case H_XIRR: case H_CPPR: case H_EOI: case H_IPI: case H_IPOLL: case H_XIRR_X: if (kvmppc_xics_enabled(vcpu)) { if (xics_on_xive()) { ret = H_NOT_AVAILABLE; return RESUME_GUEST; } ret = kvmppc_xics_hcall(vcpu, req); break; } return RESUME_HOST; case H_SET_DABR: ret = kvmppc_h_set_dabr(vcpu, kvmppc_get_gpr(vcpu, 4)); break; case H_SET_XDABR: ret = kvmppc_h_set_xdabr(vcpu, kvmppc_get_gpr(vcpu, 4), kvmppc_get_gpr(vcpu, 5)); break; #ifdef CONFIG_SPAPR_TCE_IOMMU case H_GET_TCE: ret = kvmppc_h_get_tce(vcpu, kvmppc_get_gpr(vcpu, 4), kvmppc_get_gpr(vcpu, 5)); if (ret == H_TOO_HARD) return RESUME_HOST; break; case H_PUT_TCE: ret = kvmppc_h_put_tce(vcpu, kvmppc_get_gpr(vcpu, 4), kvmppc_get_gpr(vcpu, 5), kvmppc_get_gpr(vcpu, 6)); if (ret == H_TOO_HARD) return RESUME_HOST; break; case H_PUT_TCE_INDIRECT: ret = kvmppc_h_put_tce_indirect(vcpu, kvmppc_get_gpr(vcpu, 4), kvmppc_get_gpr(vcpu, 5), kvmppc_get_gpr(vcpu, 6), kvmppc_get_gpr(vcpu, 7)); if (ret == H_TOO_HARD) return RESUME_HOST; break; case H_STUFF_TCE: ret = kvmppc_h_stuff_tce(vcpu, kvmppc_get_gpr(vcpu, 4), kvmppc_get_gpr(vcpu, 5), kvmppc_get_gpr(vcpu, 6), kvmppc_get_gpr(vcpu, 7)); if (ret == H_TOO_HARD) return RESUME_HOST; break; #endif case H_RANDOM: if (!arch_get_random_seed_longs(&vcpu->arch.regs.gpr[4], 1)) ret = H_HARDWARE; break; case H_RPT_INVALIDATE: ret = kvmppc_h_rpt_invalidate(vcpu, kvmppc_get_gpr(vcpu, 4), kvmppc_get_gpr(vcpu, 5), kvmppc_get_gpr(vcpu, 6), kvmppc_get_gpr(vcpu, 7), kvmppc_get_gpr(vcpu, 8), kvmppc_get_gpr(vcpu, 9)); break; case H_SET_PARTITION_TABLE: ret = H_FUNCTION; if (nesting_enabled(kvm)) ret = kvmhv_set_partition_table(vcpu); break; case H_ENTER_NESTED: ret = H_FUNCTION; if (!nesting_enabled(kvm)) break; ret = kvmhv_enter_nested_guest(vcpu); if (ret == H_INTERRUPT) { kvmppc_set_gpr(vcpu, 3, 0); vcpu->arch.hcall_needed = 0; return -EINTR; } else if (ret == H_TOO_HARD) { kvmppc_set_gpr(vcpu, 3, 0); vcpu->arch.hcall_needed = 0; return RESUME_HOST; } break; case H_TLB_INVALIDATE: ret = H_FUNCTION; if (nesting_enabled(kvm)) ret = kvmhv_do_nested_tlbie(vcpu); break; case H_COPY_TOFROM_GUEST: ret = H_FUNCTION; if (nesting_enabled(kvm)) ret = kvmhv_copy_tofrom_guest_nested(vcpu); break; case H_PAGE_INIT: ret = kvmppc_h_page_init(vcpu, kvmppc_get_gpr(vcpu, 4), kvmppc_get_gpr(vcpu, 5), kvmppc_get_gpr(vcpu, 6)); break; case H_SVM_PAGE_IN: ret = H_UNSUPPORTED; if (kvmppc_get_srr1(vcpu) & MSR_S) ret = kvmppc_h_svm_page_in(kvm, kvmppc_get_gpr(vcpu, 4), kvmppc_get_gpr(vcpu, 5), kvmppc_get_gpr(vcpu, 6)); break; case H_SVM_PAGE_OUT: ret = H_UNSUPPORTED; if (kvmppc_get_srr1(vcpu) & MSR_S) ret = kvmppc_h_svm_page_out(kvm, kvmppc_get_gpr(vcpu, 4), kvmppc_get_gpr(vcpu, 5), kvmppc_get_gpr(vcpu, 6)); break; case H_SVM_INIT_START: ret = H_UNSUPPORTED; if (kvmppc_get_srr1(vcpu) & MSR_S) ret = kvmppc_h_svm_init_start(kvm); break; case H_SVM_INIT_DONE: ret = H_UNSUPPORTED; if (kvmppc_get_srr1(vcpu) & MSR_S) ret = kvmppc_h_svm_init_done(kvm); break; case H_SVM_INIT_ABORT: /* * Even if that call is made by the Ultravisor, the SSR1 value * is the guest context one, with the secure bit clear as it has * not yet been secured. So we can't check it here. * Instead the kvm->arch.secure_guest flag is checked inside * kvmppc_h_svm_init_abort(). */ ret = kvmppc_h_svm_init_abort(kvm); break; default: return RESUME_HOST; } WARN_ON_ONCE(ret == H_TOO_HARD); kvmppc_set_gpr(vcpu, 3, ret); vcpu->arch.hcall_needed = 0; return RESUME_GUEST; } /* * Handle H_CEDE in the P9 path where we don't call the real-mode hcall * handlers in book3s_hv_rmhandlers.S. * * This has to be done early, not in kvmppc_pseries_do_hcall(), so * that the cede logic in kvmppc_run_single_vcpu() works properly. */ static void kvmppc_cede(struct kvm_vcpu *vcpu) { vcpu->arch.shregs.msr |= MSR_EE; vcpu->arch.ceded = 1; smp_mb(); if (vcpu->arch.prodded) { vcpu->arch.prodded = 0; smp_mb(); vcpu->arch.ceded = 0; } } static int kvmppc_hcall_impl_hv(unsigned long cmd) { switch (cmd) { case H_CEDE: case H_PROD: case H_CONFER: case H_REGISTER_VPA: case H_SET_MODE: #ifdef CONFIG_SPAPR_TCE_IOMMU case H_GET_TCE: case H_PUT_TCE: case H_PUT_TCE_INDIRECT: case H_STUFF_TCE: #endif case H_LOGICAL_CI_LOAD: case H_LOGICAL_CI_STORE: #ifdef CONFIG_KVM_XICS case H_XIRR: case H_CPPR: case H_EOI: case H_IPI: case H_IPOLL: case H_XIRR_X: #endif case H_PAGE_INIT: case H_RPT_INVALIDATE: return 1; } /* See if it's in the real-mode table */ return kvmppc_hcall_impl_hv_realmode(cmd); } static int kvmppc_emulate_debug_inst(struct kvm_vcpu *vcpu) { ppc_inst_t last_inst; if (kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst) != EMULATE_DONE) { /* * Fetch failed, so return to guest and * try executing it again. */ return RESUME_GUEST; } if (ppc_inst_val(last_inst) == KVMPPC_INST_SW_BREAKPOINT) { vcpu->run->exit_reason = KVM_EXIT_DEBUG; vcpu->run->debug.arch.address = kvmppc_get_pc(vcpu); return RESUME_HOST; } else { kvmppc_core_queue_program(vcpu, SRR1_PROGILL | (kvmppc_get_msr(vcpu) & SRR1_PREFIXED)); return RESUME_GUEST; } } static void do_nothing(void *x) { } static unsigned long kvmppc_read_dpdes(struct kvm_vcpu *vcpu) { int thr, cpu, pcpu, nthreads; struct kvm_vcpu *v; unsigned long dpdes; nthreads = vcpu->kvm->arch.emul_smt_mode; dpdes = 0; cpu = vcpu->vcpu_id & ~(nthreads - 1); for (thr = 0; thr < nthreads; ++thr, ++cpu) { v = kvmppc_find_vcpu(vcpu->kvm, cpu); if (!v) continue; /* * If the vcpu is currently running on a physical cpu thread, * interrupt it in order to pull it out of the guest briefly, * which will update its vcore->dpdes value. */ pcpu = READ_ONCE(v->cpu); if (pcpu >= 0) smp_call_function_single(pcpu, do_nothing, NULL, 1); if (kvmppc_doorbell_pending(v)) dpdes |= 1 << thr; } return dpdes; } /* * On POWER9, emulate doorbell-related instructions in order to * give the guest the illusion of running on a multi-threaded core. * The instructions emulated are msgsndp, msgclrp, mfspr TIR, * and mfspr DPDES. */ static int kvmppc_emulate_doorbell_instr(struct kvm_vcpu *vcpu) { u32 inst, rb, thr; unsigned long arg; struct kvm *kvm = vcpu->kvm; struct kvm_vcpu *tvcpu; ppc_inst_t pinst; if (kvmppc_get_last_inst(vcpu, INST_GENERIC, &pinst) != EMULATE_DONE) return RESUME_GUEST; inst = ppc_inst_val(pinst); if (get_op(inst) != 31) return EMULATE_FAIL; rb = get_rb(inst); thr = vcpu->vcpu_id & (kvm->arch.emul_smt_mode - 1); switch (get_xop(inst)) { case OP_31_XOP_MSGSNDP: arg = kvmppc_get_gpr(vcpu, rb); if (((arg >> 27) & 0x1f) != PPC_DBELL_SERVER) break; arg &= 0x7f; if (arg >= kvm->arch.emul_smt_mode) break; tvcpu = kvmppc_find_vcpu(kvm, vcpu->vcpu_id - thr + arg); if (!tvcpu) break; if (!tvcpu->arch.doorbell_request) { tvcpu->arch.doorbell_request = 1; kvmppc_fast_vcpu_kick_hv(tvcpu); } break; case OP_31_XOP_MSGCLRP: arg = kvmppc_get_gpr(vcpu, rb); if (((arg >> 27) & 0x1f) != PPC_DBELL_SERVER) break; vcpu->arch.vcore->dpdes = 0; vcpu->arch.doorbell_request = 0; break; case OP_31_XOP_MFSPR: switch (get_sprn(inst)) { case SPRN_TIR: arg = thr; break; case SPRN_DPDES: arg = kvmppc_read_dpdes(vcpu); break; default: return EMULATE_FAIL; } kvmppc_set_gpr(vcpu, get_rt(inst), arg); break; default: return EMULATE_FAIL; } kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) + 4); return RESUME_GUEST; } /* * If the lppaca had pmcregs_in_use clear when we exited the guest, then * HFSCR_PM is cleared for next entry. If the guest then tries to access * the PMU SPRs, we get this facility unavailable interrupt. Putting HFSCR_PM * back in the guest HFSCR will cause the next entry to load the PMU SPRs and * allow the guest access to continue. */ static int kvmppc_pmu_unavailable(struct kvm_vcpu *vcpu) { if (!(vcpu->arch.hfscr_permitted & HFSCR_PM)) return EMULATE_FAIL; vcpu->arch.hfscr |= HFSCR_PM; return RESUME_GUEST; } static int kvmppc_ebb_unavailable(struct kvm_vcpu *vcpu) { if (!(vcpu->arch.hfscr_permitted & HFSCR_EBB)) return EMULATE_FAIL; vcpu->arch.hfscr |= HFSCR_EBB; return RESUME_GUEST; } static int kvmppc_tm_unavailable(struct kvm_vcpu *vcpu) { if (!(vcpu->arch.hfscr_permitted & HFSCR_TM)) return EMULATE_FAIL; vcpu->arch.hfscr |= HFSCR_TM; return RESUME_GUEST; } static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu, struct task_struct *tsk) { struct kvm_run *run = vcpu->run; int r = RESUME_HOST; vcpu->stat.sum_exits++; /* * This can happen if an interrupt occurs in the last stages * of guest entry or the first stages of guest exit (i.e. after * setting paca->kvm_hstate.in_guest to KVM_GUEST_MODE_GUEST_HV * and before setting it to KVM_GUEST_MODE_HOST_HV). * That can happen due to a bug, or due to a machine check * occurring at just the wrong time. */ if (vcpu->arch.shregs.msr & MSR_HV) { printk(KERN_EMERG "KVM trap in HV mode!\n"); printk(KERN_EMERG "trap=0x%x | pc=0x%lx | msr=0x%llx\n", vcpu->arch.trap, kvmppc_get_pc(vcpu), vcpu->arch.shregs.msr); kvmppc_dump_regs(vcpu); run->exit_reason = KVM_EXIT_INTERNAL_ERROR; run->hw.hardware_exit_reason = vcpu->arch.trap; return RESUME_HOST; } run->exit_reason = KVM_EXIT_UNKNOWN; run->ready_for_interrupt_injection = 1; switch (vcpu->arch.trap) { /* We're good on these - the host merely wanted to get our attention */ case BOOK3S_INTERRUPT_NESTED_HV_DECREMENTER: WARN_ON_ONCE(1); /* Should never happen */ vcpu->arch.trap = BOOK3S_INTERRUPT_HV_DECREMENTER; fallthrough; case BOOK3S_INTERRUPT_HV_DECREMENTER: vcpu->stat.dec_exits++; r = RESUME_GUEST; break; case BOOK3S_INTERRUPT_EXTERNAL: case BOOK3S_INTERRUPT_H_DOORBELL: case BOOK3S_INTERRUPT_H_VIRT: vcpu->stat.ext_intr_exits++; r = RESUME_GUEST; break; /* SR/HMI/PMI are HV interrupts that host has handled. Resume guest.*/ case BOOK3S_INTERRUPT_HMI: case BOOK3S_INTERRUPT_PERFMON: case BOOK3S_INTERRUPT_SYSTEM_RESET: r = RESUME_GUEST; break; case BOOK3S_INTERRUPT_MACHINE_CHECK: { static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL, DEFAULT_RATELIMIT_BURST); /* * Print the MCE event to host console. Ratelimit so the guest * can't flood the host log. */ if (__ratelimit(&rs)) machine_check_print_event_info(&vcpu->arch.mce_evt,false, true); /* * If the guest can do FWNMI, exit to userspace so it can * deliver a FWNMI to the guest. * Otherwise we synthesize a machine check for the guest * so that it knows that the machine check occurred. */ if (!vcpu->kvm->arch.fwnmi_enabled) { ulong flags = (vcpu->arch.shregs.msr & 0x083c0000) | (kvmppc_get_msr(vcpu) & SRR1_PREFIXED); kvmppc_core_queue_machine_check(vcpu, flags); r = RESUME_GUEST; break; } /* Exit to guest with KVM_EXIT_NMI as exit reason */ run->exit_reason = KVM_EXIT_NMI; run->hw.hardware_exit_reason = vcpu->arch.trap; /* Clear out the old NMI status from run->flags */ run->flags &= ~KVM_RUN_PPC_NMI_DISP_MASK; /* Now set the NMI status */ if (vcpu->arch.mce_evt.disposition == MCE_DISPOSITION_RECOVERED) run->flags |= KVM_RUN_PPC_NMI_DISP_FULLY_RECOV; else run->flags |= KVM_RUN_PPC_NMI_DISP_NOT_RECOV; r = RESUME_HOST; break; } case BOOK3S_INTERRUPT_PROGRAM: { ulong flags; /* * Normally program interrupts are delivered directly * to the guest by the hardware, but we can get here * as a result of a hypervisor emulation interrupt * (e40) getting turned into a 700 by BML RTAS. */ flags = (vcpu->arch.shregs.msr & 0x1f0000ull) | (kvmppc_get_msr(vcpu) & SRR1_PREFIXED); kvmppc_core_queue_program(vcpu, flags); r = RESUME_GUEST; break; } case BOOK3S_INTERRUPT_SYSCALL: { int i; if (unlikely(vcpu->arch.shregs.msr & MSR_PR)) { /* * Guest userspace executed sc 1. This can only be * reached by the P9 path because the old path * handles this case in realmode hcall handlers. */ if (!kvmhv_vcpu_is_radix(vcpu)) { /* * A guest could be running PR KVM, so this * may be a PR KVM hcall. It must be reflected * to the guest kernel as a sc interrupt. */ kvmppc_core_queue_syscall(vcpu); } else { /* * Radix guests can not run PR KVM or nested HV * hash guests which might run PR KVM, so this * is always a privilege fault. Send a program * check to guest kernel. */ kvmppc_core_queue_program(vcpu, SRR1_PROGPRIV); } r = RESUME_GUEST; break; } /* * hcall - gather args and set exit_reason. This will next be * handled by kvmppc_pseries_do_hcall which may be able to deal * with it and resume guest, or may punt to userspace. */ run->papr_hcall.nr = kvmppc_get_gpr(vcpu, 3); for (i = 0; i < 9; ++i) run->papr_hcall.args[i] = kvmppc_get_gpr(vcpu, 4 + i); run->exit_reason = KVM_EXIT_PAPR_HCALL; vcpu->arch.hcall_needed = 1; r = RESUME_HOST; break; } /* * We get these next two if the guest accesses a page which it thinks * it has mapped but which is not actually present, either because * it is for an emulated I/O device or because the corresonding * host page has been paged out. * * Any other HDSI/HISI interrupts have been handled already for P7/8 * guests. For POWER9 hash guests not using rmhandlers, basic hash * fault handling is done here. */ case BOOK3S_INTERRUPT_H_DATA_STORAGE: { unsigned long vsid; long err; if (cpu_has_feature(CPU_FTR_P9_RADIX_PREFETCH_BUG) && unlikely(vcpu->arch.fault_dsisr == HDSISR_CANARY)) { r = RESUME_GUEST; /* Just retry if it's the canary */ break; } if (kvm_is_radix(vcpu->kvm) || !cpu_has_feature(CPU_FTR_ARCH_300)) { /* * Radix doesn't require anything, and pre-ISAv3.0 hash * already attempted to handle this in rmhandlers. The * hash fault handling below is v3 only (it uses ASDR * via fault_gpa). */ r = RESUME_PAGE_FAULT; break; } if (!(vcpu->arch.fault_dsisr & (DSISR_NOHPTE | DSISR_PROTFAULT))) { kvmppc_core_queue_data_storage(vcpu, kvmppc_get_msr(vcpu) & SRR1_PREFIXED, vcpu->arch.fault_dar, vcpu->arch.fault_dsisr); r = RESUME_GUEST; break; } if (!(vcpu->arch.shregs.msr & MSR_DR)) vsid = vcpu->kvm->arch.vrma_slb_v; else vsid = vcpu->arch.fault_gpa; err = kvmppc_hpte_hv_fault(vcpu, vcpu->arch.fault_dar, vsid, vcpu->arch.fault_dsisr, true); if (err == 0) { r = RESUME_GUEST; } else if (err == -1 || err == -2) { r = RESUME_PAGE_FAULT; } else { kvmppc_core_queue_data_storage(vcpu, kvmppc_get_msr(vcpu) & SRR1_PREFIXED, vcpu->arch.fault_dar, err); r = RESUME_GUEST; } break; } case BOOK3S_INTERRUPT_H_INST_STORAGE: { unsigned long vsid; long err; vcpu->arch.fault_dar = kvmppc_get_pc(vcpu); vcpu->arch.fault_dsisr = vcpu->arch.shregs.msr & DSISR_SRR1_MATCH_64S; if (kvm_is_radix(vcpu->kvm) || !cpu_has_feature(CPU_FTR_ARCH_300)) { /* * Radix doesn't require anything, and pre-ISAv3.0 hash * already attempted to handle this in rmhandlers. The * hash fault handling below is v3 only (it uses ASDR * via fault_gpa). */ if (vcpu->arch.shregs.msr & HSRR1_HISI_WRITE) vcpu->arch.fault_dsisr |= DSISR_ISSTORE; r = RESUME_PAGE_FAULT; break; } if (!(vcpu->arch.fault_dsisr & SRR1_ISI_NOPT)) { kvmppc_core_queue_inst_storage(vcpu, vcpu->arch.fault_dsisr | (kvmppc_get_msr(vcpu) & SRR1_PREFIXED)); r = RESUME_GUEST; break; } if (!(vcpu->arch.shregs.msr & MSR_IR)) vsid = vcpu->kvm->arch.vrma_slb_v; else vsid = vcpu->arch.fault_gpa; err = kvmppc_hpte_hv_fault(vcpu, vcpu->arch.fault_dar, vsid, vcpu->arch.fault_dsisr, false); if (err == 0) { r = RESUME_GUEST; } else if (err == -1) { r = RESUME_PAGE_FAULT; } else { kvmppc_core_queue_inst_storage(vcpu, err | (kvmppc_get_msr(vcpu) & SRR1_PREFIXED)); r = RESUME_GUEST; } break; } /* * This occurs if the guest executes an illegal instruction. * If the guest debug is disabled, generate a program interrupt * to the guest. If guest debug is enabled, we need to check * whether the instruction is a software breakpoint instruction. * Accordingly return to Guest or Host. */ case BOOK3S_INTERRUPT_H_EMUL_ASSIST: if (vcpu->arch.emul_inst != KVM_INST_FETCH_FAILED) vcpu->arch.last_inst = kvmppc_need_byteswap(vcpu) ? swab32(vcpu->arch.emul_inst) : vcpu->arch.emul_inst; if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP) { r = kvmppc_emulate_debug_inst(vcpu); } else { kvmppc_core_queue_program(vcpu, SRR1_PROGILL | (kvmppc_get_msr(vcpu) & SRR1_PREFIXED)); r = RESUME_GUEST; } break; #ifdef CONFIG_PPC_TRANSACTIONAL_MEM case BOOK3S_INTERRUPT_HV_SOFTPATCH: /* * This occurs for various TM-related instructions that * we need to emulate on POWER9 DD2.2. We have already * handled the cases where the guest was in real-suspend * mode and was transitioning to transactional state. */ r = kvmhv_p9_tm_emulation(vcpu); if (r != -1) break; fallthrough; /* go to facility unavailable handler */ #endif /* * This occurs if the guest (kernel or userspace), does something that * is prohibited by HFSCR. * On POWER9, this could be a doorbell instruction that we need * to emulate. * Otherwise, we just generate a program interrupt to the guest. */ case BOOK3S_INTERRUPT_H_FAC_UNAVAIL: { u64 cause = vcpu->arch.hfscr >> 56; r = EMULATE_FAIL; if (cpu_has_feature(CPU_FTR_ARCH_300)) { if (cause == FSCR_MSGP_LG) r = kvmppc_emulate_doorbell_instr(vcpu); if (cause == FSCR_PM_LG) r = kvmppc_pmu_unavailable(vcpu); if (cause == FSCR_EBB_LG) r = kvmppc_ebb_unavailable(vcpu); if (cause == FSCR_TM_LG) r = kvmppc_tm_unavailable(vcpu); } if (r == EMULATE_FAIL) { kvmppc_core_queue_program(vcpu, SRR1_PROGILL | (kvmppc_get_msr(vcpu) & SRR1_PREFIXED)); r = RESUME_GUEST; } break; } case BOOK3S_INTERRUPT_HV_RM_HARD: r = RESUME_PASSTHROUGH; break; default: kvmppc_dump_regs(vcpu); printk(KERN_EMERG "trap=0x%x | pc=0x%lx | msr=0x%llx\n", vcpu->arch.trap, kvmppc_get_pc(vcpu), vcpu->arch.shregs.msr); run->hw.hardware_exit_reason = vcpu->arch.trap; r = RESUME_HOST; break; } return r; } static int kvmppc_handle_nested_exit(struct kvm_vcpu *vcpu) { int r; int srcu_idx; vcpu->stat.sum_exits++; /* * This can happen if an interrupt occurs in the last stages * of guest entry or the first stages of guest exit (i.e. after * setting paca->kvm_hstate.in_guest to KVM_GUEST_MODE_GUEST_HV * and before setting it to KVM_GUEST_MODE_HOST_HV). * That can happen due to a bug, or due to a machine check * occurring at just the wrong time. */ if (vcpu->arch.shregs.msr & MSR_HV) { pr_emerg("KVM trap in HV mode while nested!\n"); pr_emerg("trap=0x%x | pc=0x%lx | msr=0x%llx\n", vcpu->arch.trap, kvmppc_get_pc(vcpu), vcpu->arch.shregs.msr); kvmppc_dump_regs(vcpu); return RESUME_HOST; } switch (vcpu->arch.trap) { /* We're good on these - the host merely wanted to get our attention */ case BOOK3S_INTERRUPT_HV_DECREMENTER: vcpu->stat.dec_exits++; r = RESUME_GUEST; break; case BOOK3S_INTERRUPT_EXTERNAL: vcpu->stat.ext_intr_exits++; r = RESUME_HOST; break; case BOOK3S_INTERRUPT_H_DOORBELL: case BOOK3S_INTERRUPT_H_VIRT: vcpu->stat.ext_intr_exits++; r = RESUME_GUEST; break; /* These need to go to the nested HV */ case BOOK3S_INTERRUPT_NESTED_HV_DECREMENTER: vcpu->arch.trap = BOOK3S_INTERRUPT_HV_DECREMENTER; vcpu->stat.dec_exits++; r = RESUME_HOST; break; /* SR/HMI/PMI are HV interrupts that host has handled. Resume guest.*/ case BOOK3S_INTERRUPT_HMI: case BOOK3S_INTERRUPT_PERFMON: case BOOK3S_INTERRUPT_SYSTEM_RESET: r = RESUME_GUEST; break; case BOOK3S_INTERRUPT_MACHINE_CHECK: { static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL, DEFAULT_RATELIMIT_BURST); /* Pass the machine check to the L1 guest */ r = RESUME_HOST; /* Print the MCE event to host console. */ if (__ratelimit(&rs)) machine_check_print_event_info(&vcpu->arch.mce_evt, false, true); break; } /* * We get these next two if the guest accesses a page which it thinks * it has mapped but which is not actually present, either because * it is for an emulated I/O device or because the corresonding * host page has been paged out. */ case BOOK3S_INTERRUPT_H_DATA_STORAGE: srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); r = kvmhv_nested_page_fault(vcpu); srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx); break; case BOOK3S_INTERRUPT_H_INST_STORAGE: vcpu->arch.fault_dar = kvmppc_get_pc(vcpu); vcpu->arch.fault_dsisr = kvmppc_get_msr(vcpu) & DSISR_SRR1_MATCH_64S; if (vcpu->arch.shregs.msr & HSRR1_HISI_WRITE) vcpu->arch.fault_dsisr |= DSISR_ISSTORE; srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); r = kvmhv_nested_page_fault(vcpu); srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx); break; #ifdef CONFIG_PPC_TRANSACTIONAL_MEM case BOOK3S_INTERRUPT_HV_SOFTPATCH: /* * This occurs for various TM-related instructions that * we need to emulate on POWER9 DD2.2. We have already * handled the cases where the guest was in real-suspend * mode and was transitioning to transactional state. */ r = kvmhv_p9_tm_emulation(vcpu); if (r != -1) break; fallthrough; /* go to facility unavailable handler */ #endif case BOOK3S_INTERRUPT_H_FAC_UNAVAIL: { u64 cause = vcpu->arch.hfscr >> 56; /* * Only pass HFU interrupts to the L1 if the facility is * permitted but disabled by the L1's HFSCR, otherwise * the interrupt does not make sense to the L1 so turn * it into a HEAI. */ if (!(vcpu->arch.hfscr_permitted & (1UL << cause)) || (vcpu->arch.nested_hfscr & (1UL << cause))) { ppc_inst_t pinst; vcpu->arch.trap = BOOK3S_INTERRUPT_H_EMUL_ASSIST; /* * If the fetch failed, return to guest and * try executing it again. */ r = kvmppc_get_last_inst(vcpu, INST_GENERIC, &pinst); vcpu->arch.emul_inst = ppc_inst_val(pinst); if (r != EMULATE_DONE) r = RESUME_GUEST; else r = RESUME_HOST; } else { r = RESUME_HOST; } break; } case BOOK3S_INTERRUPT_HV_RM_HARD: vcpu->arch.trap = 0; r = RESUME_GUEST; if (!xics_on_xive()) kvmppc_xics_rm_complete(vcpu, 0); break; case BOOK3S_INTERRUPT_SYSCALL: { unsigned long req = kvmppc_get_gpr(vcpu, 3); /* * The H_RPT_INVALIDATE hcalls issued by nested * guests for process-scoped invalidations when * GTSE=0, are handled here in L0. */ if (req == H_RPT_INVALIDATE) { r = kvmppc_nested_h_rpt_invalidate(vcpu); break; } r = RESUME_HOST; break; } default: r = RESUME_HOST; break; } return r; } static int kvm_arch_vcpu_ioctl_get_sregs_hv(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) { int i; memset(sregs, 0, sizeof(struct kvm_sregs)); sregs->pvr = vcpu->arch.pvr; for (i = 0; i < vcpu->arch.slb_max; i++) { sregs->u.s.ppc64.slb[i].slbe = vcpu->arch.slb[i].orige; sregs->u.s.ppc64.slb[i].slbv = vcpu->arch.slb[i].origv; } return 0; } static int kvm_arch_vcpu_ioctl_set_sregs_hv(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) { int i, j; /* Only accept the same PVR as the host's, since we can't spoof it */ if (sregs->pvr != vcpu->arch.pvr) return -EINVAL; j = 0; for (i = 0; i < vcpu->arch.slb_nr; i++) { if (sregs->u.s.ppc64.slb[i].slbe & SLB_ESID_V) { vcpu->arch.slb[j].orige = sregs->u.s.ppc64.slb[i].slbe; vcpu->arch.slb[j].origv = sregs->u.s.ppc64.slb[i].slbv; ++j; } } vcpu->arch.slb_max = j; return 0; } /* * Enforce limits on guest LPCR values based on hardware availability, * guest configuration, and possibly hypervisor support and security * concerns. */ unsigned long kvmppc_filter_lpcr_hv(struct kvm *kvm, unsigned long lpcr) { /* LPCR_TC only applies to HPT guests */ if (kvm_is_radix(kvm)) lpcr &= ~LPCR_TC; /* On POWER8 and above, userspace can modify AIL */ if (!cpu_has_feature(CPU_FTR_ARCH_207S)) lpcr &= ~LPCR_AIL; if ((lpcr & LPCR_AIL) != LPCR_AIL_3) lpcr &= ~LPCR_AIL; /* LPCR[AIL]=1/2 is disallowed */ /* * On some POWER9s we force AIL off for radix guests to prevent * executing in MSR[HV]=1 mode with the MMU enabled and PIDR set to * guest, which can result in Q0 translations with LPID=0 PID=PIDR to * be cached, which the host TLB management does not expect. */ if (kvm_is_radix(kvm) && cpu_has_feature(CPU_FTR_P9_RADIX_PREFETCH_BUG)) lpcr &= ~LPCR_AIL; /* * On POWER9, allow userspace to enable large decrementer for the * guest, whether or not the host has it enabled. */ if (!cpu_has_feature(CPU_FTR_ARCH_300)) lpcr &= ~LPCR_LD; return lpcr; } static void verify_lpcr(struct kvm *kvm, unsigned long lpcr) { if (lpcr != kvmppc_filter_lpcr_hv(kvm, lpcr)) { WARN_ONCE(1, "lpcr 0x%lx differs from filtered 0x%lx\n", lpcr, kvmppc_filter_lpcr_hv(kvm, lpcr)); } } static void kvmppc_set_lpcr(struct kvm_vcpu *vcpu, u64 new_lpcr, bool preserve_top32) { struct kvm *kvm = vcpu->kvm; struct kvmppc_vcore *vc = vcpu->arch.vcore; u64 mask; spin_lock(&vc->lock); /* * Userspace can only modify * DPFD (default prefetch depth), ILE (interrupt little-endian), * TC (translation control), AIL (alternate interrupt location), * LD (large decrementer). * These are subject to restrictions from kvmppc_filter_lcpr_hv(). */ mask = LPCR_DPFD | LPCR_ILE | LPCR_TC | LPCR_AIL | LPCR_LD; /* Broken 32-bit version of LPCR must not clear top bits */ if (preserve_top32) mask &= 0xFFFFFFFF; new_lpcr = kvmppc_filter_lpcr_hv(kvm, (vc->lpcr & ~mask) | (new_lpcr & mask)); /* * If ILE (interrupt little-endian) has changed, update the * MSR_LE bit in the intr_msr for each vcpu in this vcore. */ if ((new_lpcr & LPCR_ILE) != (vc->lpcr & LPCR_ILE)) { struct kvm_vcpu *vcpu; unsigned long i; kvm_for_each_vcpu(i, vcpu, kvm) { if (vcpu->arch.vcore != vc) continue; if (new_lpcr & LPCR_ILE) vcpu->arch.intr_msr |= MSR_LE; else vcpu->arch.intr_msr &= ~MSR_LE; } } vc->lpcr = new_lpcr; spin_unlock(&vc->lock); } static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *val) { int r = 0; long int i; switch (id) { case KVM_REG_PPC_DEBUG_INST: *val = get_reg_val(id, KVMPPC_INST_SW_BREAKPOINT); break; case KVM_REG_PPC_HIOR: *val = get_reg_val(id, 0); break; case KVM_REG_PPC_DABR: *val = get_reg_val(id, vcpu->arch.dabr); break; case KVM_REG_PPC_DABRX: *val = get_reg_val(id, vcpu->arch.dabrx); break; case KVM_REG_PPC_DSCR: *val = get_reg_val(id, vcpu->arch.dscr); break; case KVM_REG_PPC_PURR: *val = get_reg_val(id, vcpu->arch.purr); break; case KVM_REG_PPC_SPURR: *val = get_reg_val(id, vcpu->arch.spurr); break; case KVM_REG_PPC_AMR: *val = get_reg_val(id, vcpu->arch.amr); break; case KVM_REG_PPC_UAMOR: *val = get_reg_val(id, vcpu->arch.uamor); break; case KVM_REG_PPC_MMCR0 ... KVM_REG_PPC_MMCR1: i = id - KVM_REG_PPC_MMCR0; *val = get_reg_val(id, vcpu->arch.mmcr[i]); break; case KVM_REG_PPC_MMCR2: *val = get_reg_val(id, vcpu->arch.mmcr[2]); break; case KVM_REG_PPC_MMCRA: *val = get_reg_val(id, vcpu->arch.mmcra); break; case KVM_REG_PPC_MMCRS: *val = get_reg_val(id, vcpu->arch.mmcrs); break; case KVM_REG_PPC_MMCR3: *val = get_reg_val(id, vcpu->arch.mmcr[3]); break; case KVM_REG_PPC_PMC1 ... KVM_REG_PPC_PMC8: i = id - KVM_REG_PPC_PMC1; *val = get_reg_val(id, vcpu->arch.pmc[i]); break; case KVM_REG_PPC_SPMC1 ... KVM_REG_PPC_SPMC2: i = id - KVM_REG_PPC_SPMC1; *val = get_reg_val(id, vcpu->arch.spmc[i]); break; case KVM_REG_PPC_SIAR: *val = get_reg_val(id, vcpu->arch.siar); break; case KVM_REG_PPC_SDAR: *val = get_reg_val(id, vcpu->arch.sdar); break; case KVM_REG_PPC_SIER: *val = get_reg_val(id, vcpu->arch.sier[0]); break; case KVM_REG_PPC_SIER2: *val = get_reg_val(id, vcpu->arch.sier[1]); break; case KVM_REG_PPC_SIER3: *val = get_reg_val(id, vcpu->arch.sier[2]); break; case KVM_REG_PPC_IAMR: *val = get_reg_val(id, vcpu->arch.iamr); break; case KVM_REG_PPC_PSPB: *val = get_reg_val(id, vcpu->arch.pspb); break; case KVM_REG_PPC_DPDES: /* * On POWER9, where we are emulating msgsndp etc., * we return 1 bit for each vcpu, which can come from * either vcore->dpdes or doorbell_request. * On POWER8, doorbell_request is 0. */ if (cpu_has_feature(CPU_FTR_ARCH_300)) *val = get_reg_val(id, vcpu->arch.doorbell_request); else *val = get_reg_val(id, vcpu->arch.vcore->dpdes); break; case KVM_REG_PPC_VTB: *val = get_reg_val(id, vcpu->arch.vcore->vtb); break; case KVM_REG_PPC_DAWR: *val = get_reg_val(id, vcpu->arch.dawr0); break; case KVM_REG_PPC_DAWRX: *val = get_reg_val(id, vcpu->arch.dawrx0); break; case KVM_REG_PPC_DAWR1: *val = get_reg_val(id, vcpu->arch.dawr1); break; case KVM_REG_PPC_DAWRX1: *val = get_reg_val(id, vcpu->arch.dawrx1); break; case KVM_REG_PPC_CIABR: *val = get_reg_val(id, vcpu->arch.ciabr); break; case KVM_REG_PPC_CSIGR: *val = get_reg_val(id, vcpu->arch.csigr); break; case KVM_REG_PPC_TACR: *val = get_reg_val(id, vcpu->arch.tacr); break; case KVM_REG_PPC_TCSCR: *val = get_reg_val(id, vcpu->arch.tcscr); break; case KVM_REG_PPC_PID: *val = get_reg_val(id, vcpu->arch.pid); break; case KVM_REG_PPC_ACOP: *val = get_reg_val(id, vcpu->arch.acop); break; case KVM_REG_PPC_WORT: *val = get_reg_val(id, vcpu->arch.wort); break; case KVM_REG_PPC_TIDR: *val = get_reg_val(id, vcpu->arch.tid); break; case KVM_REG_PPC_PSSCR: *val = get_reg_val(id, vcpu->arch.psscr); break; case KVM_REG_PPC_VPA_ADDR: spin_lock(&vcpu->arch.vpa_update_lock); *val = get_reg_val(id, vcpu->arch.vpa.next_gpa); spin_unlock(&vcpu->arch.vpa_update_lock); break; case KVM_REG_PPC_VPA_SLB: spin_lock(&vcpu->arch.vpa_update_lock); val->vpaval.addr = vcpu->arch.slb_shadow.next_gpa; val->vpaval.length = vcpu->arch.slb_shadow.len; spin_unlock(&vcpu->arch.vpa_update_lock); break; case KVM_REG_PPC_VPA_DTL: spin_lock(&vcpu->arch.vpa_update_lock); val->vpaval.addr = vcpu->arch.dtl.next_gpa; val->vpaval.length = vcpu->arch.dtl.len; spin_unlock(&vcpu->arch.vpa_update_lock); break; case KVM_REG_PPC_TB_OFFSET: *val = get_reg_val(id, vcpu->arch.vcore->tb_offset); break; case KVM_REG_PPC_LPCR: case KVM_REG_PPC_LPCR_64: *val = get_reg_val(id, vcpu->arch.vcore->lpcr); break; case KVM_REG_PPC_PPR: *val = get_reg_val(id, vcpu->arch.ppr); break; #ifdef CONFIG_PPC_TRANSACTIONAL_MEM case KVM_REG_PPC_TFHAR: *val = get_reg_val(id, vcpu->arch.tfhar); break; case KVM_REG_PPC_TFIAR: *val = get_reg_val(id, vcpu->arch.tfiar); break; case KVM_REG_PPC_TEXASR: *val = get_reg_val(id, vcpu->arch.texasr); break; case KVM_REG_PPC_TM_GPR0 ... KVM_REG_PPC_TM_GPR31: i = id - KVM_REG_PPC_TM_GPR0; *val = get_reg_val(id, vcpu->arch.gpr_tm[i]); break; case KVM_REG_PPC_TM_VSR0 ... KVM_REG_PPC_TM_VSR63: { int j; i = id - KVM_REG_PPC_TM_VSR0; if (i < 32) for (j = 0; j < TS_FPRWIDTH; j++) val->vsxval[j] = vcpu->arch.fp_tm.fpr[i][j]; else { if (cpu_has_feature(CPU_FTR_ALTIVEC)) val->vval = vcpu->arch.vr_tm.vr[i-32]; else r = -ENXIO; } break; } case KVM_REG_PPC_TM_CR: *val = get_reg_val(id, vcpu->arch.cr_tm); break; case KVM_REG_PPC_TM_XER: *val = get_reg_val(id, vcpu->arch.xer_tm); break; case KVM_REG_PPC_TM_LR: *val = get_reg_val(id, vcpu->arch.lr_tm); break; case KVM_REG_PPC_TM_CTR: *val = get_reg_val(id, vcpu->arch.ctr_tm); break; case KVM_REG_PPC_TM_FPSCR: *val = get_reg_val(id, vcpu->arch.fp_tm.fpscr); break; case KVM_REG_PPC_TM_AMR: *val = get_reg_val(id, vcpu->arch.amr_tm); break; case KVM_REG_PPC_TM_PPR: *val = get_reg_val(id, vcpu->arch.ppr_tm); break; case KVM_REG_PPC_TM_VRSAVE: *val = get_reg_val(id, vcpu->arch.vrsave_tm); break; case KVM_REG_PPC_TM_VSCR: if (cpu_has_feature(CPU_FTR_ALTIVEC)) *val = get_reg_val(id, vcpu->arch.vr_tm.vscr.u[3]); else r = -ENXIO; break; case KVM_REG_PPC_TM_DSCR: *val = get_reg_val(id, vcpu->arch.dscr_tm); break; case KVM_REG_PPC_TM_TAR: *val = get_reg_val(id, vcpu->arch.tar_tm); break; #endif case KVM_REG_PPC_ARCH_COMPAT: *val = get_reg_val(id, vcpu->arch.vcore->arch_compat); break; case KVM_REG_PPC_DEC_EXPIRY: *val = get_reg_val(id, vcpu->arch.dec_expires); break; case KVM_REG_PPC_ONLINE: *val = get_reg_val(id, vcpu->arch.online); break; case KVM_REG_PPC_PTCR: *val = get_reg_val(id, vcpu->kvm->arch.l1_ptcr); break; default: r = -EINVAL; break; } return r; } static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *val) { int r = 0; long int i; unsigned long addr, len; switch (id) { case KVM_REG_PPC_HIOR: /* Only allow this to be set to zero */ if (set_reg_val(id, *val)) r = -EINVAL; break; case KVM_REG_PPC_DABR: vcpu->arch.dabr = set_reg_val(id, *val); break; case KVM_REG_PPC_DABRX: vcpu->arch.dabrx = set_reg_val(id, *val) & ~DABRX_HYP; break; case KVM_REG_PPC_DSCR: vcpu->arch.dscr = set_reg_val(id, *val); break; case KVM_REG_PPC_PURR: vcpu->arch.purr = set_reg_val(id, *val); break; case KVM_REG_PPC_SPURR: vcpu->arch.spurr = set_reg_val(id, *val); break; case KVM_REG_PPC_AMR: vcpu->arch.amr = set_reg_val(id, *val); break; case KVM_REG_PPC_UAMOR: vcpu->arch.uamor = set_reg_val(id, *val); break; case KVM_REG_PPC_MMCR0 ... KVM_REG_PPC_MMCR1: i = id - KVM_REG_PPC_MMCR0; vcpu->arch.mmcr[i] = set_reg_val(id, *val); break; case KVM_REG_PPC_MMCR2: vcpu->arch.mmcr[2] = set_reg_val(id, *val); break; case KVM_REG_PPC_MMCRA: vcpu->arch.mmcra = set_reg_val(id, *val); break; case KVM_REG_PPC_MMCRS: vcpu->arch.mmcrs = set_reg_val(id, *val); break; case KVM_REG_PPC_MMCR3: *val = get_reg_val(id, vcpu->arch.mmcr[3]); break; case KVM_REG_PPC_PMC1 ... KVM_REG_PPC_PMC8: i = id - KVM_REG_PPC_PMC1; vcpu->arch.pmc[i] = set_reg_val(id, *val); break; case KVM_REG_PPC_SPMC1 ... KVM_REG_PPC_SPMC2: i = id - KVM_REG_PPC_SPMC1; vcpu->arch.spmc[i] = set_reg_val(id, *val); break; case KVM_REG_PPC_SIAR: vcpu->arch.siar = set_reg_val(id, *val); break; case KVM_REG_PPC_SDAR: vcpu->arch.sdar = set_reg_val(id, *val); break; case KVM_REG_PPC_SIER: vcpu->arch.sier[0] = set_reg_val(id, *val); break; case KVM_REG_PPC_SIER2: vcpu->arch.sier[1] = set_reg_val(id, *val); break; case KVM_REG_PPC_SIER3: vcpu->arch.sier[2] = set_reg_val(id, *val); break; case KVM_REG_PPC_IAMR: vcpu->arch.iamr = set_reg_val(id, *val); break; case KVM_REG_PPC_PSPB: vcpu->arch.pspb = set_reg_val(id, *val); break; case KVM_REG_PPC_DPDES: if (cpu_has_feature(CPU_FTR_ARCH_300)) vcpu->arch.doorbell_request = set_reg_val(id, *val) & 1; else vcpu->arch.vcore->dpdes = set_reg_val(id, *val); break; case KVM_REG_PPC_VTB: vcpu->arch.vcore->vtb = set_reg_val(id, *val); break; case KVM_REG_PPC_DAWR: vcpu->arch.dawr0 = set_reg_val(id, *val); break; case KVM_REG_PPC_DAWRX: vcpu->arch.dawrx0 = set_reg_val(id, *val) & ~DAWRX_HYP; break; case KVM_REG_PPC_DAWR1: vcpu->arch.dawr1 = set_reg_val(id, *val); break; case KVM_REG_PPC_DAWRX1: vcpu->arch.dawrx1 = set_reg_val(id, *val) & ~DAWRX_HYP; break; case KVM_REG_PPC_CIABR: vcpu->arch.ciabr = set_reg_val(id, *val); /* Don't allow setting breakpoints in hypervisor code */ if ((vcpu->arch.ciabr & CIABR_PRIV) == CIABR_PRIV_HYPER) vcpu->arch.ciabr &= ~CIABR_PRIV; /* disable */ break; case KVM_REG_PPC_CSIGR: vcpu->arch.csigr = set_reg_val(id, *val); break; case KVM_REG_PPC_TACR: vcpu->arch.tacr = set_reg_val(id, *val); break; case KVM_REG_PPC_TCSCR: vcpu->arch.tcscr = set_reg_val(id, *val); break; case KVM_REG_PPC_PID: vcpu->arch.pid = set_reg_val(id, *val); break; case KVM_REG_PPC_ACOP: vcpu->arch.acop = set_reg_val(id, *val); break; case KVM_REG_PPC_WORT: vcpu->arch.wort = set_reg_val(id, *val); break; case KVM_REG_PPC_TIDR: vcpu->arch.tid = set_reg_val(id, *val); break; case KVM_REG_PPC_PSSCR: vcpu->arch.psscr = set_reg_val(id, *val) & PSSCR_GUEST_VIS; break; case KVM_REG_PPC_VPA_ADDR: addr = set_reg_val(id, *val); r = -EINVAL; if (!addr && (vcpu->arch.slb_shadow.next_gpa || vcpu->arch.dtl.next_gpa)) break; r = set_vpa(vcpu, &vcpu->arch.vpa, addr, sizeof(struct lppaca)); break; case KVM_REG_PPC_VPA_SLB: addr = val->vpaval.addr; len = val->vpaval.length; r = -EINVAL; if (addr && !vcpu->arch.vpa.next_gpa) break; r = set_vpa(vcpu, &vcpu->arch.slb_shadow, addr, len); break; case KVM_REG_PPC_VPA_DTL: addr = val->vpaval.addr; len = val->vpaval.length; r = -EINVAL; if (addr && (len < sizeof(struct dtl_entry) || !vcpu->arch.vpa.next_gpa)) break; len -= len % sizeof(struct dtl_entry); r = set_vpa(vcpu, &vcpu->arch.dtl, addr, len); break; case KVM_REG_PPC_TB_OFFSET: { /* round up to multiple of 2^24 */ u64 tb_offset = ALIGN(set_reg_val(id, *val), 1UL << 24); /* * Now that we know the timebase offset, update the * decrementer expiry with a guest timebase value. If * the userspace does not set DEC_EXPIRY, this ensures * a migrated vcpu at least starts with an expired * decrementer, which is better than a large one that * causes a hang. */ if (!vcpu->arch.dec_expires && tb_offset) vcpu->arch.dec_expires = get_tb() + tb_offset; vcpu->arch.vcore->tb_offset = tb_offset; break; } case KVM_REG_PPC_LPCR: kvmppc_set_lpcr(vcpu, set_reg_val(id, *val), true); break; case KVM_REG_PPC_LPCR_64: kvmppc_set_lpcr(vcpu, set_reg_val(id, *val), false); break; case KVM_REG_PPC_PPR: vcpu->arch.ppr = set_reg_val(id, *val); break; #ifdef CONFIG_PPC_TRANSACTIONAL_MEM case KVM_REG_PPC_TFHAR: vcpu->arch.tfhar = set_reg_val(id, *val); break; case KVM_REG_PPC_TFIAR: vcpu->arch.tfiar = set_reg_val(id, *val); break; case KVM_REG_PPC_TEXASR: vcpu->arch.texasr = set_reg_val(id, *val); break; case KVM_REG_PPC_TM_GPR0 ... KVM_REG_PPC_TM_GPR31: i = id - KVM_REG_PPC_TM_GPR0; vcpu->arch.gpr_tm[i] = set_reg_val(id, *val); break; case KVM_REG_PPC_TM_VSR0 ... KVM_REG_PPC_TM_VSR63: { int j; i = id - KVM_REG_PPC_TM_VSR0; if (i < 32) for (j = 0; j < TS_FPRWIDTH; j++) vcpu->arch.fp_tm.fpr[i][j] = val->vsxval[j]; else if (cpu_has_feature(CPU_FTR_ALTIVEC)) vcpu->arch.vr_tm.vr[i-32] = val->vval; else r = -ENXIO; break; } case KVM_REG_PPC_TM_CR: vcpu->arch.cr_tm = set_reg_val(id, *val); break; case KVM_REG_PPC_TM_XER: vcpu->arch.xer_tm = set_reg_val(id, *val); break; case KVM_REG_PPC_TM_LR: vcpu->arch.lr_tm = set_reg_val(id, *val); break; case KVM_REG_PPC_TM_CTR: vcpu->arch.ctr_tm = set_reg_val(id, *val); break; case KVM_REG_PPC_TM_FPSCR: vcpu->arch.fp_tm.fpscr = set_reg_val(id, *val); break; case KVM_REG_PPC_TM_AMR: vcpu->arch.amr_tm = set_reg_val(id, *val); break; case KVM_REG_PPC_TM_PPR: vcpu->arch.ppr_tm = set_reg_val(id, *val); break; case KVM_REG_PPC_TM_VRSAVE: vcpu->arch.vrsave_tm = set_reg_val(id, *val); break; case KVM_REG_PPC_TM_VSCR: if (cpu_has_feature(CPU_FTR_ALTIVEC)) vcpu->arch.vr.vscr.u[3] = set_reg_val(id, *val); else r = - ENXIO; break; case KVM_REG_PPC_TM_DSCR: vcpu->arch.dscr_tm = set_reg_val(id, *val); break; case KVM_REG_PPC_TM_TAR: vcpu->arch.tar_tm = set_reg_val(id, *val); break; #endif case KVM_REG_PPC_ARCH_COMPAT: r = kvmppc_set_arch_compat(vcpu, set_reg_val(id, *val)); break; case KVM_REG_PPC_DEC_EXPIRY: vcpu->arch.dec_expires = set_reg_val(id, *val); break; case KVM_REG_PPC_ONLINE: i = set_reg_val(id, *val); if (i && !vcpu->arch.online) atomic_inc(&vcpu->arch.vcore->online_count); else if (!i && vcpu->arch.online) atomic_dec(&vcpu->arch.vcore->online_count); vcpu->arch.online = i; break; case KVM_REG_PPC_PTCR: vcpu->kvm->arch.l1_ptcr = set_reg_val(id, *val); break; default: r = -EINVAL; break; } return r; } /* * On POWER9, threads are independent and can be in different partitions. * Therefore we consider each thread to be a subcore. * There is a restriction that all threads have to be in the same * MMU mode (radix or HPT), unfortunately, but since we only support * HPT guests on a HPT host so far, that isn't an impediment yet. */ static int threads_per_vcore(struct kvm *kvm) { if (cpu_has_feature(CPU_FTR_ARCH_300)) return 1; return threads_per_subcore; } static struct kvmppc_vcore *kvmppc_vcore_create(struct kvm *kvm, int id) { struct kvmppc_vcore *vcore; vcore = kzalloc(sizeof(struct kvmppc_vcore), GFP_KERNEL); if (vcore == NULL) return NULL; spin_lock_init(&vcore->lock); spin_lock_init(&vcore->stoltb_lock); rcuwait_init(&vcore->wait); vcore->preempt_tb = TB_NIL; vcore->lpcr = kvm->arch.lpcr; vcore->first_vcpuid = id; vcore->kvm = kvm; INIT_LIST_HEAD(&vcore->preempt_list); return vcore; } #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING static struct debugfs_timings_element { const char *name; size_t offset; } timings[] = { #ifdef CONFIG_KVM_BOOK3S_HV_P9_TIMING {"vcpu_entry", offsetof(struct kvm_vcpu, arch.vcpu_entry)}, {"guest_entry", offsetof(struct kvm_vcpu, arch.guest_entry)}, {"in_guest", offsetof(struct kvm_vcpu, arch.in_guest)}, {"guest_exit", offsetof(struct kvm_vcpu, arch.guest_exit)}, {"vcpu_exit", offsetof(struct kvm_vcpu, arch.vcpu_exit)}, {"hypercall", offsetof(struct kvm_vcpu, arch.hcall)}, {"page_fault", offsetof(struct kvm_vcpu, arch.pg_fault)}, #else {"rm_entry", offsetof(struct kvm_vcpu, arch.rm_entry)}, {"rm_intr", offsetof(struct kvm_vcpu, arch.rm_intr)}, {"rm_exit", offsetof(struct kvm_vcpu, arch.rm_exit)}, {"guest", offsetof(struct kvm_vcpu, arch.guest_time)}, {"cede", offsetof(struct kvm_vcpu, arch.cede_time)}, #endif }; #define N_TIMINGS (ARRAY_SIZE(timings)) struct debugfs_timings_state { struct kvm_vcpu *vcpu; unsigned int buflen; char buf[N_TIMINGS * 100]; }; static int debugfs_timings_open(struct inode *inode, struct file *file) { struct kvm_vcpu *vcpu = inode->i_private; struct debugfs_timings_state *p; p = kzalloc(sizeof(*p), GFP_KERNEL); if (!p) return -ENOMEM; kvm_get_kvm(vcpu->kvm); p->vcpu = vcpu; file->private_data = p; return nonseekable_open(inode, file); } static int debugfs_timings_release(struct inode *inode, struct file *file) { struct debugfs_timings_state *p = file->private_data; kvm_put_kvm(p->vcpu->kvm); kfree(p); return 0; } static ssize_t debugfs_timings_read(struct file *file, char __user *buf, size_t len, loff_t *ppos) { struct debugfs_timings_state *p = file->private_data; struct kvm_vcpu *vcpu = p->vcpu; char *s, *buf_end; struct kvmhv_tb_accumulator tb; u64 count; loff_t pos; ssize_t n; int i, loops; bool ok; if (!p->buflen) { s = p->buf; buf_end = s + sizeof(p->buf); for (i = 0; i < N_TIMINGS; ++i) { struct kvmhv_tb_accumulator *acc; acc = (struct kvmhv_tb_accumulator *) ((unsigned long)vcpu + timings[i].offset); ok = false; for (loops = 0; loops < 1000; ++loops) { count = acc->seqcount; if (!(count & 1)) { smp_rmb(); tb = *acc; smp_rmb(); if (count == acc->seqcount) { ok = true; break; } } udelay(1); } if (!ok) snprintf(s, buf_end - s, "%s: stuck\n", timings[i].name); else snprintf(s, buf_end - s, "%s: %llu %llu %llu %llu\n", timings[i].name, count / 2, tb_to_ns(tb.tb_total), tb_to_ns(tb.tb_min), tb_to_ns(tb.tb_max)); s += strlen(s); } p->buflen = s - p->buf; } pos = *ppos; if (pos >= p->buflen) return 0; if (len > p->buflen - pos) len = p->buflen - pos; n = copy_to_user(buf, p->buf + pos, len); if (n) { if (n == len) return -EFAULT; len -= n; } *ppos = pos + len; return len; } static ssize_t debugfs_timings_write(struct file *file, const char __user *buf, size_t len, loff_t *ppos) { return -EACCES; } static const struct file_operations debugfs_timings_ops = { .owner = THIS_MODULE, .open = debugfs_timings_open, .release = debugfs_timings_release, .read = debugfs_timings_read, .write = debugfs_timings_write, .llseek = generic_file_llseek, }; /* Create a debugfs directory for the vcpu */ static int kvmppc_arch_create_vcpu_debugfs_hv(struct kvm_vcpu *vcpu, struct dentry *debugfs_dentry) { if (cpu_has_feature(CPU_FTR_ARCH_300) == IS_ENABLED(CONFIG_KVM_BOOK3S_HV_P9_TIMING)) debugfs_create_file("timings", 0444, debugfs_dentry, vcpu, &debugfs_timings_ops); return 0; } #else /* CONFIG_KVM_BOOK3S_HV_EXIT_TIMING */ static int kvmppc_arch_create_vcpu_debugfs_hv(struct kvm_vcpu *vcpu, struct dentry *debugfs_dentry) { return 0; } #endif /* CONFIG_KVM_BOOK3S_HV_EXIT_TIMING */ static int kvmppc_core_vcpu_create_hv(struct kvm_vcpu *vcpu) { int err; int core; struct kvmppc_vcore *vcore; struct kvm *kvm; unsigned int id; kvm = vcpu->kvm; id = vcpu->vcpu_id; vcpu->arch.shared = &vcpu->arch.shregs; #ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE /* * The shared struct is never shared on HV, * so we can always use host endianness */ #ifdef __BIG_ENDIAN__ vcpu->arch.shared_big_endian = true; #else vcpu->arch.shared_big_endian = false; #endif #endif vcpu->arch.mmcr[0] = MMCR0_FC; if (cpu_has_feature(CPU_FTR_ARCH_31)) { vcpu->arch.mmcr[0] |= MMCR0_PMCCEXT; vcpu->arch.mmcra = MMCRA_BHRB_DISABLE; } vcpu->arch.ctrl = CTRL_RUNLATCH; /* default to host PVR, since we can't spoof it */ kvmppc_set_pvr_hv(vcpu, mfspr(SPRN_PVR)); spin_lock_init(&vcpu->arch.vpa_update_lock); spin_lock_init(&vcpu->arch.tbacct_lock); vcpu->arch.busy_preempt = TB_NIL; vcpu->arch.shregs.msr = MSR_ME; vcpu->arch.intr_msr = MSR_SF | MSR_ME; /* * Set the default HFSCR for the guest from the host value. * This value is only used on POWER9 and later. * On >= POWER9, we want to virtualize the doorbell facility, so we * don't set the HFSCR_MSGP bit, and that causes those instructions * to trap and then we emulate them. */ vcpu->arch.hfscr = HFSCR_TAR | HFSCR_EBB | HFSCR_PM | HFSCR_BHRB | HFSCR_DSCR | HFSCR_VECVSX | HFSCR_FP; /* On POWER10 and later, allow prefixed instructions */ if (cpu_has_feature(CPU_FTR_ARCH_31)) vcpu->arch.hfscr |= HFSCR_PREFIX; if (cpu_has_feature(CPU_FTR_HVMODE)) { vcpu->arch.hfscr &= mfspr(SPRN_HFSCR); #ifdef CONFIG_PPC_TRANSACTIONAL_MEM if (cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST)) vcpu->arch.hfscr |= HFSCR_TM; #endif } if (cpu_has_feature(CPU_FTR_TM_COMP)) vcpu->arch.hfscr |= HFSCR_TM; vcpu->arch.hfscr_permitted = vcpu->arch.hfscr; /* * PM, EBB, TM are demand-faulted so start with it clear. */ vcpu->arch.hfscr &= ~(HFSCR_PM | HFSCR_EBB | HFSCR_TM); kvmppc_mmu_book3s_hv_init(vcpu); vcpu->arch.state = KVMPPC_VCPU_NOTREADY; init_waitqueue_head(&vcpu->arch.cpu_run); mutex_lock(&kvm->lock); vcore = NULL; err = -EINVAL; if (cpu_has_feature(CPU_FTR_ARCH_300)) { if (id >= (KVM_MAX_VCPUS * kvm->arch.emul_smt_mode)) { pr_devel("KVM: VCPU ID too high\n"); core = KVM_MAX_VCORES; } else { BUG_ON(kvm->arch.smt_mode != 1); core = kvmppc_pack_vcpu_id(kvm, id); } } else { core = id / kvm->arch.smt_mode; } if (core < KVM_MAX_VCORES) { vcore = kvm->arch.vcores[core]; if (vcore && cpu_has_feature(CPU_FTR_ARCH_300)) { pr_devel("KVM: collision on id %u", id); vcore = NULL; } else if (!vcore) { /* * Take mmu_setup_lock for mutual exclusion * with kvmppc_update_lpcr(). */ err = -ENOMEM; vcore = kvmppc_vcore_create(kvm, id & ~(kvm->arch.smt_mode - 1)); mutex_lock(&kvm->arch.mmu_setup_lock); kvm->arch.vcores[core] = vcore; kvm->arch.online_vcores++; mutex_unlock(&kvm->arch.mmu_setup_lock); } } mutex_unlock(&kvm->lock); if (!vcore) return err; spin_lock(&vcore->lock); ++vcore->num_threads; spin_unlock(&vcore->lock); vcpu->arch.vcore = vcore; vcpu->arch.ptid = vcpu->vcpu_id - vcore->first_vcpuid; vcpu->arch.thread_cpu = -1; vcpu->arch.prev_cpu = -1; vcpu->arch.cpu_type = KVM_CPU_3S_64; kvmppc_sanity_check(vcpu); return 0; } static int kvmhv_set_smt_mode(struct kvm *kvm, unsigned long smt_mode, unsigned long flags) { int err; int esmt = 0; if (flags) return -EINVAL; if (smt_mode > MAX_SMT_THREADS || !is_power_of_2(smt_mode)) return -EINVAL; if (!cpu_has_feature(CPU_FTR_ARCH_300)) { /* * On POWER8 (or POWER7), the threading mode is "strict", * so we pack smt_mode vcpus per vcore. */ if (smt_mode > threads_per_subcore) return -EINVAL; } else { /* * On POWER9, the threading mode is "loose", * so each vcpu gets its own vcore. */ esmt = smt_mode; smt_mode = 1; } mutex_lock(&kvm->lock); err = -EBUSY; if (!kvm->arch.online_vcores) { kvm->arch.smt_mode = smt_mode; kvm->arch.emul_smt_mode = esmt; err = 0; } mutex_unlock(&kvm->lock); return err; } static void unpin_vpa(struct kvm *kvm, struct kvmppc_vpa *vpa) { if (vpa->pinned_addr) kvmppc_unpin_guest_page(kvm, vpa->pinned_addr, vpa->gpa, vpa->dirty); } static void kvmppc_core_vcpu_free_hv(struct kvm_vcpu *vcpu) { spin_lock(&vcpu->arch.vpa_update_lock); unpin_vpa(vcpu->kvm, &vcpu->arch.dtl); unpin_vpa(vcpu->kvm, &vcpu->arch.slb_shadow); unpin_vpa(vcpu->kvm, &vcpu->arch.vpa); spin_unlock(&vcpu->arch.vpa_update_lock); } static int kvmppc_core_check_requests_hv(struct kvm_vcpu *vcpu) { /* Indicate we want to get back into the guest */ return 1; } static void kvmppc_set_timer(struct kvm_vcpu *vcpu) { unsigned long dec_nsec, now; now = get_tb(); if (now > kvmppc_dec_expires_host_tb(vcpu)) { /* decrementer has already gone negative */ kvmppc_core_queue_dec(vcpu); kvmppc_core_prepare_to_enter(vcpu); return; } dec_nsec = tb_to_ns(kvmppc_dec_expires_host_tb(vcpu) - now); hrtimer_start(&vcpu->arch.dec_timer, dec_nsec, HRTIMER_MODE_REL); vcpu->arch.timer_running = 1; } extern int __kvmppc_vcore_entry(void); static void kvmppc_remove_runnable(struct kvmppc_vcore *vc, struct kvm_vcpu *vcpu, u64 tb) { u64 now; if (vcpu->arch.state != KVMPPC_VCPU_RUNNABLE) return; spin_lock_irq(&vcpu->arch.tbacct_lock); now = tb; vcpu->arch.busy_stolen += vcore_stolen_time(vc, now) - vcpu->arch.stolen_logged; vcpu->arch.busy_preempt = now; vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST; spin_unlock_irq(&vcpu->arch.tbacct_lock); --vc->n_runnable; WRITE_ONCE(vc->runnable_threads[vcpu->arch.ptid], NULL); } static int kvmppc_grab_hwthread(int cpu) { struct paca_struct *tpaca; long timeout = 10000; tpaca = paca_ptrs[cpu]; /* Ensure the thread won't go into the kernel if it wakes */ tpaca->kvm_hstate.kvm_vcpu = NULL; tpaca->kvm_hstate.kvm_vcore = NULL; tpaca->kvm_hstate.napping = 0; smp_wmb(); tpaca->kvm_hstate.hwthread_req = 1; /* * If the thread is already executing in the kernel (e.g. handling * a stray interrupt), wait for it to get back to nap mode. * The smp_mb() is to ensure that our setting of hwthread_req * is visible before we look at hwthread_state, so if this * races with the code at system_reset_pSeries and the thread * misses our setting of hwthread_req, we are sure to see its * setting of hwthread_state, and vice versa. */ smp_mb(); while (tpaca->kvm_hstate.hwthread_state == KVM_HWTHREAD_IN_KERNEL) { if (--timeout <= 0) { pr_err("KVM: couldn't grab cpu %d\n", cpu); return -EBUSY; } udelay(1); } return 0; } static void kvmppc_release_hwthread(int cpu) { struct paca_struct *tpaca; tpaca = paca_ptrs[cpu]; tpaca->kvm_hstate.hwthread_req = 0; tpaca->kvm_hstate.kvm_vcpu = NULL; tpaca->kvm_hstate.kvm_vcore = NULL; tpaca->kvm_hstate.kvm_split_mode = NULL; } static DEFINE_PER_CPU(struct kvm *, cpu_in_guest); static void radix_flush_cpu(struct kvm *kvm, int cpu, struct kvm_vcpu *vcpu) { struct kvm_nested_guest *nested = vcpu->arch.nested; cpumask_t *need_tlb_flush; int i; if (nested) need_tlb_flush = &nested->need_tlb_flush; else need_tlb_flush = &kvm->arch.need_tlb_flush; cpu = cpu_first_tlb_thread_sibling(cpu); for (i = cpu; i <= cpu_last_tlb_thread_sibling(cpu); i += cpu_tlb_thread_sibling_step()) cpumask_set_cpu(i, need_tlb_flush); /* * Make sure setting of bit in need_tlb_flush precedes testing of * cpu_in_guest. The matching barrier on the other side is hwsync * when switching to guest MMU mode, which happens between * cpu_in_guest being set to the guest kvm, and need_tlb_flush bit * being tested. */ smp_mb(); for (i = cpu; i <= cpu_last_tlb_thread_sibling(cpu); i += cpu_tlb_thread_sibling_step()) { struct kvm *running = *per_cpu_ptr(&cpu_in_guest, i); if (running == kvm) smp_call_function_single(i, do_nothing, NULL, 1); } } static void do_migrate_away_vcpu(void *arg) { struct kvm_vcpu *vcpu = arg; struct kvm *kvm = vcpu->kvm; /* * If the guest has GTSE, it may execute tlbie, so do a eieio; tlbsync; * ptesync sequence on the old CPU before migrating to a new one, in * case we interrupted the guest between a tlbie ; eieio ; * tlbsync; ptesync sequence. * * Otherwise, ptesync is sufficient for ordering tlbiel sequences. */ if (kvm->arch.lpcr & LPCR_GTSE) asm volatile("eieio; tlbsync; ptesync"); else asm volatile("ptesync"); } static void kvmppc_prepare_radix_vcpu(struct kvm_vcpu *vcpu, int pcpu) { struct kvm_nested_guest *nested = vcpu->arch.nested; struct kvm *kvm = vcpu->kvm; int prev_cpu; if (!cpu_has_feature(CPU_FTR_HVMODE)) return; if (nested) prev_cpu = nested->prev_cpu[vcpu->arch.nested_vcpu_id]; else prev_cpu = vcpu->arch.prev_cpu; /* * With radix, the guest can do TLB invalidations itself, * and it could choose to use the local form (tlbiel) if * it is invalidating a translation that has only ever been * used on one vcpu. However, that doesn't mean it has * only ever been used on one physical cpu, since vcpus * can move around between pcpus. To cope with this, when * a vcpu moves from one pcpu to another, we need to tell * any vcpus running on the same core as this vcpu previously * ran to flush the TLB. */ if (prev_cpu != pcpu) { if (prev_cpu >= 0) { if (cpu_first_tlb_thread_sibling(prev_cpu) != cpu_first_tlb_thread_sibling(pcpu)) radix_flush_cpu(kvm, prev_cpu, vcpu); smp_call_function_single(prev_cpu, do_migrate_away_vcpu, vcpu, 1); } if (nested) nested->prev_cpu[vcpu->arch.nested_vcpu_id] = pcpu; else vcpu->arch.prev_cpu = pcpu; } } static void kvmppc_start_thread(struct kvm_vcpu *vcpu, struct kvmppc_vcore *vc) { int cpu; struct paca_struct *tpaca; cpu = vc->pcpu; if (vcpu) { if (vcpu->arch.timer_running) { hrtimer_try_to_cancel(&vcpu->arch.dec_timer); vcpu->arch.timer_running = 0; } cpu += vcpu->arch.ptid; vcpu->cpu = vc->pcpu; vcpu->arch.thread_cpu = cpu; } tpaca = paca_ptrs[cpu]; tpaca->kvm_hstate.kvm_vcpu = vcpu; tpaca->kvm_hstate.ptid = cpu - vc->pcpu; tpaca->kvm_hstate.fake_suspend = 0; /* Order stores to hstate.kvm_vcpu etc. before store to kvm_vcore */ smp_wmb(); tpaca->kvm_hstate.kvm_vcore = vc; if (cpu != smp_processor_id()) kvmppc_ipi_thread(cpu); } static void kvmppc_wait_for_nap(int n_threads) { int cpu = smp_processor_id(); int i, loops; if (n_threads <= 1) return; for (loops = 0; loops < 1000000; ++loops) { /* * Check if all threads are finished. * We set the vcore pointer when starting a thread * and the thread clears it when finished, so we look * for any threads that still have a non-NULL vcore ptr. */ for (i = 1; i < n_threads; ++i) if (paca_ptrs[cpu + i]->kvm_hstate.kvm_vcore) break; if (i == n_threads) { HMT_medium(); return; } HMT_low(); } HMT_medium(); for (i = 1; i < n_threads; ++i) if (paca_ptrs[cpu + i]->kvm_hstate.kvm_vcore) pr_err("KVM: CPU %d seems to be stuck\n", cpu + i); } /* * Check that we are on thread 0 and that any other threads in * this core are off-line. Then grab the threads so they can't * enter the kernel. */ static int on_primary_thread(void) { int cpu = smp_processor_id(); int thr; /* Are we on a primary subcore? */ if (cpu_thread_in_subcore(cpu)) return 0; thr = 0; while (++thr < threads_per_subcore) if (cpu_online(cpu + thr)) return 0; /* Grab all hw threads so they can't go into the kernel */ for (thr = 1; thr < threads_per_subcore; ++thr) { if (kvmppc_grab_hwthread(cpu + thr)) { /* Couldn't grab one; let the others go */ do { kvmppc_release_hwthread(cpu + thr); } while (--thr > 0); return 0; } } return 1; } /* * A list of virtual cores for each physical CPU. * These are vcores that could run but their runner VCPU tasks are * (or may be) preempted. */ struct preempted_vcore_list { struct list_head list; spinlock_t lock; }; static DEFINE_PER_CPU(struct preempted_vcore_list, preempted_vcores); static void init_vcore_lists(void) { int cpu; for_each_possible_cpu(cpu) { struct preempted_vcore_list *lp = &per_cpu(preempted_vcores, cpu); spin_lock_init(&lp->lock); INIT_LIST_HEAD(&lp->list); } } static void kvmppc_vcore_preempt(struct kvmppc_vcore *vc) { struct preempted_vcore_list *lp = this_cpu_ptr(&preempted_vcores); WARN_ON_ONCE(cpu_has_feature(CPU_FTR_ARCH_300)); vc->vcore_state = VCORE_PREEMPT; vc->pcpu = smp_processor_id(); if (vc->num_threads < threads_per_vcore(vc->kvm)) { spin_lock(&lp->lock); list_add_tail(&vc->preempt_list, &lp->list); spin_unlock(&lp->lock); } /* Start accumulating stolen time */ kvmppc_core_start_stolen(vc, mftb()); } static void kvmppc_vcore_end_preempt(struct kvmppc_vcore *vc) { struct preempted_vcore_list *lp; WARN_ON_ONCE(cpu_has_feature(CPU_FTR_ARCH_300)); kvmppc_core_end_stolen(vc, mftb()); if (!list_empty(&vc->preempt_list)) { lp = &per_cpu(preempted_vcores, vc->pcpu); spin_lock(&lp->lock); list_del_init(&vc->preempt_list); spin_unlock(&lp->lock); } vc->vcore_state = VCORE_INACTIVE; } /* * This stores information about the virtual cores currently * assigned to a physical core. */ struct core_info { int n_subcores; int max_subcore_threads; int total_threads; int subcore_threads[MAX_SUBCORES]; struct kvmppc_vcore *vc[MAX_SUBCORES]; }; /* * This mapping means subcores 0 and 1 can use threads 0-3 and 4-7 * respectively in 2-way micro-threading (split-core) mode on POWER8. */ static int subcore_thread_map[MAX_SUBCORES] = { 0, 4, 2, 6 }; static void init_core_info(struct core_info *cip, struct kvmppc_vcore *vc) { memset(cip, 0, sizeof(*cip)); cip->n_subcores = 1; cip->max_subcore_threads = vc->num_threads; cip->total_threads = vc->num_threads; cip->subcore_threads[0] = vc->num_threads; cip->vc[0] = vc; } static bool subcore_config_ok(int n_subcores, int n_threads) { /* * POWER9 "SMT4" cores are permanently in what is effectively a 4-way * split-core mode, with one thread per subcore. */ if (cpu_has_feature(CPU_FTR_ARCH_300)) return n_subcores <= 4 && n_threads == 1; /* On POWER8, can only dynamically split if unsplit to begin with */ if (n_subcores > 1 && threads_per_subcore < MAX_SMT_THREADS) return false; if (n_subcores > MAX_SUBCORES) return false; if (n_subcores > 1) { if (!(dynamic_mt_modes & 2)) n_subcores = 4; if (n_subcores > 2 && !(dynamic_mt_modes & 4)) return false; } return n_subcores * roundup_pow_of_two(n_threads) <= MAX_SMT_THREADS; } static void init_vcore_to_run(struct kvmppc_vcore *vc) { vc->entry_exit_map = 0; vc->in_guest = 0; vc->napping_threads = 0; vc->conferring_threads = 0; vc->tb_offset_applied = 0; } static bool can_dynamic_split(struct kvmppc_vcore *vc, struct core_info *cip) { int n_threads = vc->num_threads; int sub; if (!cpu_has_feature(CPU_FTR_ARCH_207S)) return false; /* In one_vm_per_core mode, require all vcores to be from the same vm */ if (one_vm_per_core && vc->kvm != cip->vc[0]->kvm) return false; if (n_threads < cip->max_subcore_threads) n_threads = cip->max_subcore_threads; if (!subcore_config_ok(cip->n_subcores + 1, n_threads)) return false; cip->max_subcore_threads = n_threads; sub = cip->n_subcores; ++cip->n_subcores; cip->total_threads += vc->num_threads; cip->subcore_threads[sub] = vc->num_threads; cip->vc[sub] = vc; init_vcore_to_run(vc); list_del_init(&vc->preempt_list); return true; } /* * Work out whether it is possible to piggyback the execution of * vcore *pvc onto the execution of the other vcores described in *cip. */ static bool can_piggyback(struct kvmppc_vcore *pvc, struct core_info *cip, int target_threads) { if (cip->total_threads + pvc->num_threads > target_threads) return false; return can_dynamic_split(pvc, cip); } static void prepare_threads(struct kvmppc_vcore *vc) { int i; struct kvm_vcpu *vcpu; for_each_runnable_thread(i, vcpu, vc) { if (signal_pending(vcpu->arch.run_task)) vcpu->arch.ret = -EINTR; else if (vcpu->arch.vpa.update_pending || vcpu->arch.slb_shadow.update_pending || vcpu->arch.dtl.update_pending) vcpu->arch.ret = RESUME_GUEST; else continue; kvmppc_remove_runnable(vc, vcpu, mftb()); wake_up(&vcpu->arch.cpu_run); } } static void collect_piggybacks(struct core_info *cip, int target_threads) { struct preempted_vcore_list *lp = this_cpu_ptr(&preempted_vcores); struct kvmppc_vcore *pvc, *vcnext; spin_lock(&lp->lock); list_for_each_entry_safe(pvc, vcnext, &lp->list, preempt_list) { if (!spin_trylock(&pvc->lock)) continue; prepare_threads(pvc); if (!pvc->n_runnable || !pvc->kvm->arch.mmu_ready) { list_del_init(&pvc->preempt_list); if (pvc->runner == NULL) { pvc->vcore_state = VCORE_INACTIVE; kvmppc_core_end_stolen(pvc, mftb()); } spin_unlock(&pvc->lock); continue; } if (!can_piggyback(pvc, cip, target_threads)) { spin_unlock(&pvc->lock); continue; } kvmppc_core_end_stolen(pvc, mftb()); pvc->vcore_state = VCORE_PIGGYBACK; if (cip->total_threads >= target_threads) break; } spin_unlock(&lp->lock); } static bool recheck_signals_and_mmu(struct core_info *cip) { int sub, i; struct kvm_vcpu *vcpu; struct kvmppc_vcore *vc; for (sub = 0; sub < cip->n_subcores; ++sub) { vc = cip->vc[sub]; if (!vc->kvm->arch.mmu_ready) return true; for_each_runnable_thread(i, vcpu, vc) if (signal_pending(vcpu->arch.run_task)) return true; } return false; } static void post_guest_process(struct kvmppc_vcore *vc, bool is_master) { int still_running = 0, i; u64 now; long ret; struct kvm_vcpu *vcpu; spin_lock(&vc->lock); now = get_tb(); for_each_runnable_thread(i, vcpu, vc) { /* * It's safe to unlock the vcore in the loop here, because * for_each_runnable_thread() is safe against removal of * the vcpu, and the vcore state is VCORE_EXITING here, * so any vcpus becoming runnable will have their arch.trap * set to zero and can't actually run in the guest. */ spin_unlock(&vc->lock); /* cancel pending dec exception if dec is positive */ if (now < kvmppc_dec_expires_host_tb(vcpu) && kvmppc_core_pending_dec(vcpu)) kvmppc_core_dequeue_dec(vcpu); trace_kvm_guest_exit(vcpu); ret = RESUME_GUEST; if (vcpu->arch.trap) ret = kvmppc_handle_exit_hv(vcpu, vcpu->arch.run_task); vcpu->arch.ret = ret; vcpu->arch.trap = 0; spin_lock(&vc->lock); if (is_kvmppc_resume_guest(vcpu->arch.ret)) { if (vcpu->arch.pending_exceptions) kvmppc_core_prepare_to_enter(vcpu); if (vcpu->arch.ceded) kvmppc_set_timer(vcpu); else ++still_running; } else { kvmppc_remove_runnable(vc, vcpu, mftb()); wake_up(&vcpu->arch.cpu_run); } } if (!is_master) { if (still_running > 0) { kvmppc_vcore_preempt(vc); } else if (vc->runner) { vc->vcore_state = VCORE_PREEMPT; kvmppc_core_start_stolen(vc, mftb()); } else { vc->vcore_state = VCORE_INACTIVE; } if (vc->n_runnable > 0 && vc->runner == NULL) { /* make sure there's a candidate runner awake */ i = -1; vcpu = next_runnable_thread(vc, &i); wake_up(&vcpu->arch.cpu_run); } } spin_unlock(&vc->lock); } /* * Clear core from the list of active host cores as we are about to * enter the guest. Only do this if it is the primary thread of the * core (not if a subcore) that is entering the guest. */ static inline int kvmppc_clear_host_core(unsigned int cpu) { int core; if (!kvmppc_host_rm_ops_hv || cpu_thread_in_core(cpu)) return 0; /* * Memory barrier can be omitted here as we will do a smp_wmb() * later in kvmppc_start_thread and we need ensure that state is * visible to other CPUs only after we enter guest. */ core = cpu >> threads_shift; kvmppc_host_rm_ops_hv->rm_core[core].rm_state.in_host = 0; return 0; } /* * Advertise this core as an active host core since we exited the guest * Only need to do this if it is the primary thread of the core that is * exiting. */ static inline int kvmppc_set_host_core(unsigned int cpu) { int core; if (!kvmppc_host_rm_ops_hv || cpu_thread_in_core(cpu)) return 0; /* * Memory barrier can be omitted here because we do a spin_unlock * immediately after this which provides the memory barrier. */ core = cpu >> threads_shift; kvmppc_host_rm_ops_hv->rm_core[core].rm_state.in_host = 1; return 0; } static void set_irq_happened(int trap) { switch (trap) { case BOOK3S_INTERRUPT_EXTERNAL: local_paca->irq_happened |= PACA_IRQ_EE; break; case BOOK3S_INTERRUPT_H_DOORBELL: local_paca->irq_happened |= PACA_IRQ_DBELL; break; case BOOK3S_INTERRUPT_HMI: local_paca->irq_happened |= PACA_IRQ_HMI; break; case BOOK3S_INTERRUPT_SYSTEM_RESET: replay_system_reset(); break; } } /* * Run a set of guest threads on a physical core. * Called with vc->lock held. */ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc) { struct kvm_vcpu *vcpu; int i; int srcu_idx; struct core_info core_info; struct kvmppc_vcore *pvc; struct kvm_split_mode split_info, *sip; int split, subcore_size, active; int sub; bool thr0_done; unsigned long cmd_bit, stat_bit; int pcpu, thr; int target_threads; int controlled_threads; int trap; bool is_power8; if (WARN_ON_ONCE(cpu_has_feature(CPU_FTR_ARCH_300))) return; /* * Remove from the list any threads that have a signal pending * or need a VPA update done */ prepare_threads(vc); /* if the runner is no longer runnable, let the caller pick a new one */ if (vc->runner->arch.state != KVMPPC_VCPU_RUNNABLE) return; /* * Initialize *vc. */ init_vcore_to_run(vc); vc->preempt_tb = TB_NIL; /* * Number of threads that we will be controlling: the same as * the number of threads per subcore, except on POWER9, * where it's 1 because the threads are (mostly) independent. */ controlled_threads = threads_per_vcore(vc->kvm); /* * Make sure we are running on primary threads, and that secondary * threads are offline. Also check if the number of threads in this * guest are greater than the current system threads per guest. */ if ((controlled_threads > 1) && ((vc->num_threads > threads_per_subcore) || !on_primary_thread())) { for_each_runnable_thread(i, vcpu, vc) { vcpu->arch.ret = -EBUSY; kvmppc_remove_runnable(vc, vcpu, mftb()); wake_up(&vcpu->arch.cpu_run); } goto out; } /* * See if we could run any other vcores on the physical core * along with this one. */ init_core_info(&core_info, vc); pcpu = smp_processor_id(); target_threads = controlled_threads; if (target_smt_mode && target_smt_mode < target_threads) target_threads = target_smt_mode; if (vc->num_threads < target_threads) collect_piggybacks(&core_info, target_threads); /* * Hard-disable interrupts, and check resched flag and signals. * If we need to reschedule or deliver a signal, clean up * and return without going into the guest(s). * If the mmu_ready flag has been cleared, don't go into the * guest because that means a HPT resize operation is in progress. */ local_irq_disable(); hard_irq_disable(); if (lazy_irq_pending() || need_resched() || recheck_signals_and_mmu(&core_info)) { local_irq_enable(); vc->vcore_state = VCORE_INACTIVE; /* Unlock all except the primary vcore */ for (sub = 1; sub < core_info.n_subcores; ++sub) { pvc = core_info.vc[sub]; /* Put back on to the preempted vcores list */ kvmppc_vcore_preempt(pvc); spin_unlock(&pvc->lock); } for (i = 0; i < controlled_threads; ++i) kvmppc_release_hwthread(pcpu + i); return; } kvmppc_clear_host_core(pcpu); /* Decide on micro-threading (split-core) mode */ subcore_size = threads_per_subcore; cmd_bit = stat_bit = 0; split = core_info.n_subcores; sip = NULL; is_power8 = cpu_has_feature(CPU_FTR_ARCH_207S); if (split > 1) { sip = &split_info; memset(&split_info, 0, sizeof(split_info)); for (sub = 0; sub < core_info.n_subcores; ++sub) split_info.vc[sub] = core_info.vc[sub]; if (is_power8) { if (split == 2 && (dynamic_mt_modes & 2)) { cmd_bit = HID0_POWER8_1TO2LPAR; stat_bit = HID0_POWER8_2LPARMODE; } else { split = 4; cmd_bit = HID0_POWER8_1TO4LPAR; stat_bit = HID0_POWER8_4LPARMODE; } subcore_size = MAX_SMT_THREADS / split; split_info.rpr = mfspr(SPRN_RPR); split_info.pmmar = mfspr(SPRN_PMMAR); split_info.ldbar = mfspr(SPRN_LDBAR); split_info.subcore_size = subcore_size; } else { split_info.subcore_size = 1; } /* order writes to split_info before kvm_split_mode pointer */ smp_wmb(); } for (thr = 0; thr < controlled_threads; ++thr) { struct paca_struct *paca = paca_ptrs[pcpu + thr]; paca->kvm_hstate.napping = 0; paca->kvm_hstate.kvm_split_mode = sip; } /* Initiate micro-threading (split-core) on POWER8 if required */ if (cmd_bit) { unsigned long hid0 = mfspr(SPRN_HID0); hid0 |= cmd_bit | HID0_POWER8_DYNLPARDIS; mb(); mtspr(SPRN_HID0, hid0); isync(); for (;;) { hid0 = mfspr(SPRN_HID0); if (hid0 & stat_bit) break; cpu_relax(); } } /* * On POWER8, set RWMR register. * Since it only affects PURR and SPURR, it doesn't affect * the host, so we don't save/restore the host value. */ if (is_power8) { unsigned long rwmr_val = RWMR_RPA_P8_8THREAD; int n_online = atomic_read(&vc->online_count); /* * Use the 8-thread value if we're doing split-core * or if the vcore's online count looks bogus. */ if (split == 1 && threads_per_subcore == MAX_SMT_THREADS && n_online >= 1 && n_online <= MAX_SMT_THREADS) rwmr_val = p8_rwmr_values[n_online]; mtspr(SPRN_RWMR, rwmr_val); } /* Start all the threads */ active = 0; for (sub = 0; sub < core_info.n_subcores; ++sub) { thr = is_power8 ? subcore_thread_map[sub] : sub; thr0_done = false; active |= 1 << thr; pvc = core_info.vc[sub]; pvc->pcpu = pcpu + thr; for_each_runnable_thread(i, vcpu, pvc) { /* * XXX: is kvmppc_start_thread called too late here? * It updates vcpu->cpu and vcpu->arch.thread_cpu * which are used by kvmppc_fast_vcpu_kick_hv(), but * kick is called after new exceptions become available * and exceptions are checked earlier than here, by * kvmppc_core_prepare_to_enter. */ kvmppc_start_thread(vcpu, pvc); kvmppc_update_vpa_dispatch(vcpu, pvc); trace_kvm_guest_enter(vcpu); if (!vcpu->arch.ptid) thr0_done = true; active |= 1 << (thr + vcpu->arch.ptid); } /* * We need to start the first thread of each subcore * even if it doesn't have a vcpu. */ if (!thr0_done) kvmppc_start_thread(NULL, pvc); } /* * Ensure that split_info.do_nap is set after setting * the vcore pointer in the PACA of the secondaries. */ smp_mb(); /* * When doing micro-threading, poke the inactive threads as well. * This gets them to the nap instruction after kvm_do_nap, * which reduces the time taken to unsplit later. */ if (cmd_bit) { split_info.do_nap = 1; /* ask secondaries to nap when done */ for (thr = 1; thr < threads_per_subcore; ++thr) if (!(active & (1 << thr))) kvmppc_ipi_thread(pcpu + thr); } vc->vcore_state = VCORE_RUNNING; preempt_disable(); trace_kvmppc_run_core(vc, 0); for (sub = 0; sub < core_info.n_subcores; ++sub) spin_unlock(&core_info.vc[sub]->lock); guest_timing_enter_irqoff(); srcu_idx = srcu_read_lock(&vc->kvm->srcu); guest_state_enter_irqoff(); this_cpu_disable_ftrace(); trap = __kvmppc_vcore_entry(); this_cpu_enable_ftrace(); guest_state_exit_irqoff(); srcu_read_unlock(&vc->kvm->srcu, srcu_idx); set_irq_happened(trap); spin_lock(&vc->lock); /* prevent other vcpu threads from doing kvmppc_start_thread() now */ vc->vcore_state = VCORE_EXITING; /* wait for secondary threads to finish writing their state to memory */ kvmppc_wait_for_nap(controlled_threads); /* Return to whole-core mode if we split the core earlier */ if (cmd_bit) { unsigned long hid0 = mfspr(SPRN_HID0); unsigned long loops = 0; hid0 &= ~HID0_POWER8_DYNLPARDIS; stat_bit = HID0_POWER8_2LPARMODE | HID0_POWER8_4LPARMODE; mb(); mtspr(SPRN_HID0, hid0); isync(); for (;;) { hid0 = mfspr(SPRN_HID0); if (!(hid0 & stat_bit)) break; cpu_relax(); ++loops; } split_info.do_nap = 0; } kvmppc_set_host_core(pcpu); if (!vtime_accounting_enabled_this_cpu()) { local_irq_enable(); /* * Service IRQs here before guest_timing_exit_irqoff() so any * ticks that occurred while running the guest are accounted to * the guest. If vtime accounting is enabled, accounting uses * TB rather than ticks, so it can be done without enabling * interrupts here, which has the problem that it accounts * interrupt processing overhead to the host. */ local_irq_disable(); } guest_timing_exit_irqoff(); local_irq_enable(); /* Let secondaries go back to the offline loop */ for (i = 0; i < controlled_threads; ++i) { kvmppc_release_hwthread(pcpu + i); if (sip && sip->napped[i]) kvmppc_ipi_thread(pcpu + i); } spin_unlock(&vc->lock); /* make sure updates to secondary vcpu structs are visible now */ smp_mb(); preempt_enable(); for (sub = 0; sub < core_info.n_subcores; ++sub) { pvc = core_info.vc[sub]; post_guest_process(pvc, pvc == vc); } spin_lock(&vc->lock); out: vc->vcore_state = VCORE_INACTIVE; trace_kvmppc_run_core(vc, 1); } static inline bool hcall_is_xics(unsigned long req) { return req == H_EOI || req == H_CPPR || req == H_IPI || req == H_IPOLL || req == H_XIRR || req == H_XIRR_X; } static void vcpu_vpa_increment_dispatch(struct kvm_vcpu *vcpu) { struct lppaca *lp = vcpu->arch.vpa.pinned_addr; if (lp) { u32 yield_count = be32_to_cpu(lp->yield_count) + 1; lp->yield_count = cpu_to_be32(yield_count); vcpu->arch.vpa.dirty = 1; } } /* call our hypervisor to load up HV regs and go */ static int kvmhv_vcpu_entry_p9_nested(struct kvm_vcpu *vcpu, u64 time_limit, unsigned long lpcr, u64 *tb) { struct kvmppc_vcore *vc = vcpu->arch.vcore; unsigned long host_psscr; unsigned long msr; struct hv_guest_state hvregs; struct p9_host_os_sprs host_os_sprs; s64 dec; int trap; msr = mfmsr(); save_p9_host_os_sprs(&host_os_sprs); /* * We need to save and restore the guest visible part of the * psscr (i.e. using SPRN_PSSCR_PR) since the hypervisor * doesn't do this for us. Note only required if pseries since * this is done in kvmhv_vcpu_entry_p9() below otherwise. */ host_psscr = mfspr(SPRN_PSSCR_PR); kvmppc_msr_hard_disable_set_facilities(vcpu, msr); if (lazy_irq_pending()) return 0; if (unlikely(load_vcpu_state(vcpu, &host_os_sprs))) msr = mfmsr(); /* TM restore can update msr */ if (vcpu->arch.psscr != host_psscr) mtspr(SPRN_PSSCR_PR, vcpu->arch.psscr); kvmhv_save_hv_regs(vcpu, &hvregs); hvregs.lpcr = lpcr; hvregs.amor = ~0; vcpu->arch.regs.msr = vcpu->arch.shregs.msr; hvregs.version = HV_GUEST_STATE_VERSION; if (vcpu->arch.nested) { hvregs.lpid = vcpu->arch.nested->shadow_lpid; hvregs.vcpu_token = vcpu->arch.nested_vcpu_id; } else { hvregs.lpid = vcpu->kvm->arch.lpid; hvregs.vcpu_token = vcpu->vcpu_id; } hvregs.hdec_expiry = time_limit; /* * When setting DEC, we must always deal with irq_work_raise * via NMI vs setting DEC. The problem occurs right as we * switch into guest mode if a NMI hits and sets pending work * and sets DEC, then that will apply to the guest and not * bring us back to the host. * * irq_work_raise could check a flag (or possibly LPCR[HDICE] * for example) and set HDEC to 1? That wouldn't solve the * nested hv case which needs to abort the hcall or zero the * time limit. * * XXX: Another day's problem. */ mtspr(SPRN_DEC, kvmppc_dec_expires_host_tb(vcpu) - *tb); mtspr(SPRN_DAR, vcpu->arch.shregs.dar); mtspr(SPRN_DSISR, vcpu->arch.shregs.dsisr); switch_pmu_to_guest(vcpu, &host_os_sprs); accumulate_time(vcpu, &vcpu->arch.in_guest); trap = plpar_hcall_norets(H_ENTER_NESTED, __pa(&hvregs), __pa(&vcpu->arch.regs)); accumulate_time(vcpu, &vcpu->arch.guest_exit); kvmhv_restore_hv_return_state(vcpu, &hvregs); switch_pmu_to_host(vcpu, &host_os_sprs); vcpu->arch.shregs.msr = vcpu->arch.regs.msr; vcpu->arch.shregs.dar = mfspr(SPRN_DAR); vcpu->arch.shregs.dsisr = mfspr(SPRN_DSISR); vcpu->arch.psscr = mfspr(SPRN_PSSCR_PR); store_vcpu_state(vcpu); dec = mfspr(SPRN_DEC); if (!(lpcr & LPCR_LD)) /* Sign extend if not using large decrementer */ dec = (s32) dec; *tb = mftb(); vcpu->arch.dec_expires = dec + (*tb + vc->tb_offset); timer_rearm_host_dec(*tb); restore_p9_host_os_sprs(vcpu, &host_os_sprs); if (vcpu->arch.psscr != host_psscr) mtspr(SPRN_PSSCR_PR, host_psscr); return trap; } /* * Guest entry for POWER9 and later CPUs. */ static int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit, unsigned long lpcr, u64 *tb) { struct kvm *kvm = vcpu->kvm; struct kvm_nested_guest *nested = vcpu->arch.nested; u64 next_timer; int trap; next_timer = timer_get_next_tb(); if (*tb >= next_timer) return BOOK3S_INTERRUPT_HV_DECREMENTER; if (next_timer < time_limit) time_limit = next_timer; else if (*tb >= time_limit) /* nested time limit */ return BOOK3S_INTERRUPT_NESTED_HV_DECREMENTER; vcpu->arch.ceded = 0; vcpu_vpa_increment_dispatch(vcpu); if (kvmhv_on_pseries()) { trap = kvmhv_vcpu_entry_p9_nested(vcpu, time_limit, lpcr, tb); /* H_CEDE has to be handled now, not later */ if (trap == BOOK3S_INTERRUPT_SYSCALL && !nested && kvmppc_get_gpr(vcpu, 3) == H_CEDE) { kvmppc_cede(vcpu); kvmppc_set_gpr(vcpu, 3, 0); trap = 0; } } else if (nested) { __this_cpu_write(cpu_in_guest, kvm); trap = kvmhv_vcpu_entry_p9(vcpu, time_limit, lpcr, tb); __this_cpu_write(cpu_in_guest, NULL); } else { kvmppc_xive_push_vcpu(vcpu); __this_cpu_write(cpu_in_guest, kvm); trap = kvmhv_vcpu_entry_p9(vcpu, time_limit, lpcr, tb); __this_cpu_write(cpu_in_guest, NULL); if (trap == BOOK3S_INTERRUPT_SYSCALL && !(vcpu->arch.shregs.msr & MSR_PR)) { unsigned long req = kvmppc_get_gpr(vcpu, 3); /* * XIVE rearm and XICS hcalls must be handled * before xive context is pulled (is this * true?) */ if (req == H_CEDE) { /* H_CEDE has to be handled now */ kvmppc_cede(vcpu); if (!kvmppc_xive_rearm_escalation(vcpu)) { /* * Pending escalation so abort * the cede. */ vcpu->arch.ceded = 0; } kvmppc_set_gpr(vcpu, 3, 0); trap = 0; } else if (req == H_ENTER_NESTED) { /* * L2 should not run with the L1 * context so rearm and pull it. */ if (!kvmppc_xive_rearm_escalation(vcpu)) { /* * Pending escalation so abort * H_ENTER_NESTED. */ kvmppc_set_gpr(vcpu, 3, 0); trap = 0; } } else if (hcall_is_xics(req)) { int ret; ret = kvmppc_xive_xics_hcall(vcpu, req); if (ret != H_TOO_HARD) { kvmppc_set_gpr(vcpu, 3, ret); trap = 0; } } } kvmppc_xive_pull_vcpu(vcpu); if (kvm_is_radix(kvm)) vcpu->arch.slb_max = 0; } vcpu_vpa_increment_dispatch(vcpu); return trap; } /* * Wait for some other vcpu thread to execute us, and * wake us up when we need to handle something in the host. */ static void kvmppc_wait_for_exec(struct kvmppc_vcore *vc, struct kvm_vcpu *vcpu, int wait_state) { DEFINE_WAIT(wait); prepare_to_wait(&vcpu->arch.cpu_run, &wait, wait_state); if (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE) { spin_unlock(&vc->lock); schedule(); spin_lock(&vc->lock); } finish_wait(&vcpu->arch.cpu_run, &wait); } static void grow_halt_poll_ns(struct kvmppc_vcore *vc) { if (!halt_poll_ns_grow) return; vc->halt_poll_ns *= halt_poll_ns_grow; if (vc->halt_poll_ns < halt_poll_ns_grow_start) vc->halt_poll_ns = halt_poll_ns_grow_start; } static void shrink_halt_poll_ns(struct kvmppc_vcore *vc) { if (halt_poll_ns_shrink == 0) vc->halt_poll_ns = 0; else vc->halt_poll_ns /= halt_poll_ns_shrink; } #ifdef CONFIG_KVM_XICS static inline bool xive_interrupt_pending(struct kvm_vcpu *vcpu) { if (!xics_on_xive()) return false; return vcpu->arch.irq_pending || vcpu->arch.xive_saved_state.pipr < vcpu->arch.xive_saved_state.cppr; } #else static inline bool xive_interrupt_pending(struct kvm_vcpu *vcpu) { return false; } #endif /* CONFIG_KVM_XICS */ static bool kvmppc_vcpu_woken(struct kvm_vcpu *vcpu) { if (vcpu->arch.pending_exceptions || vcpu->arch.prodded || kvmppc_doorbell_pending(vcpu) || xive_interrupt_pending(vcpu)) return true; return false; } static bool kvmppc_vcpu_check_block(struct kvm_vcpu *vcpu) { if (!vcpu->arch.ceded || kvmppc_vcpu_woken(vcpu)) return true; return false; } /* * Check to see if any of the runnable vcpus on the vcore have pending * exceptions or are no longer ceded */ static int kvmppc_vcore_check_block(struct kvmppc_vcore *vc) { struct kvm_vcpu *vcpu; int i; for_each_runnable_thread(i, vcpu, vc) { if (kvmppc_vcpu_check_block(vcpu)) return 1; } return 0; } /* * All the vcpus in this vcore are idle, so wait for a decrementer * or external interrupt to one of the vcpus. vc->lock is held. */ static void kvmppc_vcore_blocked(struct kvmppc_vcore *vc) { ktime_t cur, start_poll, start_wait; int do_sleep = 1; u64 block_ns; WARN_ON_ONCE(cpu_has_feature(CPU_FTR_ARCH_300)); /* Poll for pending exceptions and ceded state */ cur = start_poll = ktime_get(); if (vc->halt_poll_ns) { ktime_t stop = ktime_add_ns(start_poll, vc->halt_poll_ns); ++vc->runner->stat.generic.halt_attempted_poll; vc->vcore_state = VCORE_POLLING; spin_unlock(&vc->lock); do { if (kvmppc_vcore_check_block(vc)) { do_sleep = 0; break; } cur = ktime_get(); } while (kvm_vcpu_can_poll(cur, stop)); spin_lock(&vc->lock); vc->vcore_state = VCORE_INACTIVE; if (!do_sleep) { ++vc->runner->stat.generic.halt_successful_poll; goto out; } } prepare_to_rcuwait(&vc->wait); set_current_state(TASK_INTERRUPTIBLE); if (kvmppc_vcore_check_block(vc)) { finish_rcuwait(&vc->wait); do_sleep = 0; /* If we polled, count this as a successful poll */ if (vc->halt_poll_ns) ++vc->runner->stat.generic.halt_successful_poll; goto out; } start_wait = ktime_get(); vc->vcore_state = VCORE_SLEEPING; trace_kvmppc_vcore_blocked(vc->runner, 0); spin_unlock(&vc->lock); schedule(); finish_rcuwait(&vc->wait); spin_lock(&vc->lock); vc->vcore_state = VCORE_INACTIVE; trace_kvmppc_vcore_blocked(vc->runner, 1); ++vc->runner->stat.halt_successful_wait; cur = ktime_get(); out: block_ns = ktime_to_ns(cur) - ktime_to_ns(start_poll); /* Attribute wait time */ if (do_sleep) { vc->runner->stat.generic.halt_wait_ns += ktime_to_ns(cur) - ktime_to_ns(start_wait); KVM_STATS_LOG_HIST_UPDATE( vc->runner->stat.generic.halt_wait_hist, ktime_to_ns(cur) - ktime_to_ns(start_wait)); /* Attribute failed poll time */ if (vc->halt_poll_ns) { vc->runner->stat.generic.halt_poll_fail_ns += ktime_to_ns(start_wait) - ktime_to_ns(start_poll); KVM_STATS_LOG_HIST_UPDATE( vc->runner->stat.generic.halt_poll_fail_hist, ktime_to_ns(start_wait) - ktime_to_ns(start_poll)); } } else { /* Attribute successful poll time */ if (vc->halt_poll_ns) { vc->runner->stat.generic.halt_poll_success_ns += ktime_to_ns(cur) - ktime_to_ns(start_poll); KVM_STATS_LOG_HIST_UPDATE( vc->runner->stat.generic.halt_poll_success_hist, ktime_to_ns(cur) - ktime_to_ns(start_poll)); } } /* Adjust poll time */ if (halt_poll_ns) { if (block_ns <= vc->halt_poll_ns) ; /* We slept and blocked for longer than the max halt time */ else if (vc->halt_poll_ns && block_ns > halt_poll_ns) shrink_halt_poll_ns(vc); /* We slept and our poll time is too small */ else if (vc->halt_poll_ns < halt_poll_ns && block_ns < halt_poll_ns) grow_halt_poll_ns(vc); if (vc->halt_poll_ns > halt_poll_ns) vc->halt_poll_ns = halt_poll_ns; } else vc->halt_poll_ns = 0; trace_kvmppc_vcore_wakeup(do_sleep, block_ns); } /* * This never fails for a radix guest, as none of the operations it does * for a radix guest can fail or have a way to report failure. */ static int kvmhv_setup_mmu(struct kvm_vcpu *vcpu) { int r = 0; struct kvm *kvm = vcpu->kvm; mutex_lock(&kvm->arch.mmu_setup_lock); if (!kvm->arch.mmu_ready) { if (!kvm_is_radix(kvm)) r = kvmppc_hv_setup_htab_rma(vcpu); if (!r) { if (cpu_has_feature(CPU_FTR_ARCH_300)) kvmppc_setup_partition_table(kvm); kvm->arch.mmu_ready = 1; } } mutex_unlock(&kvm->arch.mmu_setup_lock); return r; } static int kvmppc_run_vcpu(struct kvm_vcpu *vcpu) { struct kvm_run *run = vcpu->run; int n_ceded, i, r; struct kvmppc_vcore *vc; struct kvm_vcpu *v; trace_kvmppc_run_vcpu_enter(vcpu); run->exit_reason = 0; vcpu->arch.ret = RESUME_GUEST; vcpu->arch.trap = 0; kvmppc_update_vpas(vcpu); /* * Synchronize with other threads in this virtual core */ vc = vcpu->arch.vcore; spin_lock(&vc->lock); vcpu->arch.ceded = 0; vcpu->arch.run_task = current; vcpu->arch.stolen_logged = vcore_stolen_time(vc, mftb()); vcpu->arch.state = KVMPPC_VCPU_RUNNABLE; vcpu->arch.busy_preempt = TB_NIL; WRITE_ONCE(vc->runnable_threads[vcpu->arch.ptid], vcpu); ++vc->n_runnable; /* * This happens the first time this is called for a vcpu. * If the vcore is already running, we may be able to start * this thread straight away and have it join in. */ if (!signal_pending(current)) { if ((vc->vcore_state == VCORE_PIGGYBACK || vc->vcore_state == VCORE_RUNNING) && !VCORE_IS_EXITING(vc)) { kvmppc_update_vpa_dispatch(vcpu, vc); kvmppc_start_thread(vcpu, vc); trace_kvm_guest_enter(vcpu); } else if (vc->vcore_state == VCORE_SLEEPING) { rcuwait_wake_up(&vc->wait); } } while (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE && !signal_pending(current)) { /* See if the MMU is ready to go */ if (!vcpu->kvm->arch.mmu_ready) { spin_unlock(&vc->lock); r = kvmhv_setup_mmu(vcpu); spin_lock(&vc->lock); if (r) { run->exit_reason = KVM_EXIT_FAIL_ENTRY; run->fail_entry. hardware_entry_failure_reason = 0; vcpu->arch.ret = r; break; } } if (vc->vcore_state == VCORE_PREEMPT && vc->runner == NULL) kvmppc_vcore_end_preempt(vc); if (vc->vcore_state != VCORE_INACTIVE) { kvmppc_wait_for_exec(vc, vcpu, TASK_INTERRUPTIBLE); continue; } for_each_runnable_thread(i, v, vc) { kvmppc_core_prepare_to_enter(v); if (signal_pending(v->arch.run_task)) { kvmppc_remove_runnable(vc, v, mftb()); v->stat.signal_exits++; v->run->exit_reason = KVM_EXIT_INTR; v->arch.ret = -EINTR; wake_up(&v->arch.cpu_run); } } if (!vc->n_runnable || vcpu->arch.state != KVMPPC_VCPU_RUNNABLE) break; n_ceded = 0; for_each_runnable_thread(i, v, vc) { if (!kvmppc_vcpu_woken(v)) n_ceded += v->arch.ceded; else v->arch.ceded = 0; } vc->runner = vcpu; if (n_ceded == vc->n_runnable) { kvmppc_vcore_blocked(vc); } else if (need_resched()) { kvmppc_vcore_preempt(vc); /* Let something else run */ cond_resched_lock(&vc->lock); if (vc->vcore_state == VCORE_PREEMPT) kvmppc_vcore_end_preempt(vc); } else { kvmppc_run_core(vc); } vc->runner = NULL; } while (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE && (vc->vcore_state == VCORE_RUNNING || vc->vcore_state == VCORE_EXITING || vc->vcore_state == VCORE_PIGGYBACK)) kvmppc_wait_for_exec(vc, vcpu, TASK_UNINTERRUPTIBLE); if (vc->vcore_state == VCORE_PREEMPT && vc->runner == NULL) kvmppc_vcore_end_preempt(vc); if (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE) { kvmppc_remove_runnable(vc, vcpu, mftb()); vcpu->stat.signal_exits++; run->exit_reason = KVM_EXIT_INTR; vcpu->arch.ret = -EINTR; } if (vc->n_runnable && vc->vcore_state == VCORE_INACTIVE) { /* Wake up some vcpu to run the core */ i = -1; v = next_runnable_thread(vc, &i); wake_up(&v->arch.cpu_run); } trace_kvmppc_run_vcpu_exit(vcpu); spin_unlock(&vc->lock); return vcpu->arch.ret; } int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, u64 time_limit, unsigned long lpcr) { struct rcuwait *wait = kvm_arch_vcpu_get_wait(vcpu); struct kvm_run *run = vcpu->run; int trap, r, pcpu; int srcu_idx; struct kvmppc_vcore *vc; struct kvm *kvm = vcpu->kvm; struct kvm_nested_guest *nested = vcpu->arch.nested; unsigned long flags; u64 tb; trace_kvmppc_run_vcpu_enter(vcpu); run->exit_reason = 0; vcpu->arch.ret = RESUME_GUEST; vcpu->arch.trap = 0; vc = vcpu->arch.vcore; vcpu->arch.ceded = 0; vcpu->arch.run_task = current; vcpu->arch.last_inst = KVM_INST_FETCH_FAILED; /* See if the MMU is ready to go */ if (unlikely(!kvm->arch.mmu_ready)) { r = kvmhv_setup_mmu(vcpu); if (r) { run->exit_reason = KVM_EXIT_FAIL_ENTRY; run->fail_entry.hardware_entry_failure_reason = 0; vcpu->arch.ret = r; return r; } } if (need_resched()) cond_resched(); kvmppc_update_vpas(vcpu); preempt_disable(); pcpu = smp_processor_id(); if (kvm_is_radix(kvm)) kvmppc_prepare_radix_vcpu(vcpu, pcpu); /* flags save not required, but irq_pmu has no disable/enable API */ powerpc_local_irq_pmu_save(flags); vcpu->arch.state = KVMPPC_VCPU_RUNNABLE; if (signal_pending(current)) goto sigpend; if (need_resched() || !kvm->arch.mmu_ready) goto out; vcpu->cpu = pcpu; vcpu->arch.thread_cpu = pcpu; vc->pcpu = pcpu; local_paca->kvm_hstate.kvm_vcpu = vcpu; local_paca->kvm_hstate.ptid = 0; local_paca->kvm_hstate.fake_suspend = 0; /* * Orders set cpu/thread_cpu vs testing for pending interrupts and * doorbells below. The other side is when these fields are set vs * kvmppc_fast_vcpu_kick_hv reading the cpu/thread_cpu fields to * kick a vCPU to notice the pending interrupt. */ smp_mb(); if (!nested) { kvmppc_core_prepare_to_enter(vcpu); if (vcpu->arch.shregs.msr & MSR_EE) { if (xive_interrupt_pending(vcpu)) kvmppc_inject_interrupt_hv(vcpu, BOOK3S_INTERRUPT_EXTERNAL, 0); } else if (test_bit(BOOK3S_IRQPRIO_EXTERNAL, &vcpu->arch.pending_exceptions)) { lpcr |= LPCR_MER; } } else if (vcpu->arch.pending_exceptions || vcpu->arch.doorbell_request || xive_interrupt_pending(vcpu)) { vcpu->arch.ret = RESUME_HOST; goto out; } if (vcpu->arch.timer_running) { hrtimer_try_to_cancel(&vcpu->arch.dec_timer); vcpu->arch.timer_running = 0; } tb = mftb(); kvmppc_update_vpa_dispatch_p9(vcpu, vc, tb + vc->tb_offset); trace_kvm_guest_enter(vcpu); guest_timing_enter_irqoff(); srcu_idx = srcu_read_lock(&kvm->srcu); guest_state_enter_irqoff(); this_cpu_disable_ftrace(); trap = kvmhv_p9_guest_entry(vcpu, time_limit, lpcr, &tb); vcpu->arch.trap = trap; this_cpu_enable_ftrace(); guest_state_exit_irqoff(); srcu_read_unlock(&kvm->srcu, srcu_idx); set_irq_happened(trap); vcpu->cpu = -1; vcpu->arch.thread_cpu = -1; vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST; if (!vtime_accounting_enabled_this_cpu()) { powerpc_local_irq_pmu_restore(flags); /* * Service IRQs here before guest_timing_exit_irqoff() so any * ticks that occurred while running the guest are accounted to * the guest. If vtime accounting is enabled, accounting uses * TB rather than ticks, so it can be done without enabling * interrupts here, which has the problem that it accounts * interrupt processing overhead to the host. */ powerpc_local_irq_pmu_save(flags); } guest_timing_exit_irqoff(); powerpc_local_irq_pmu_restore(flags); preempt_enable(); /* * cancel pending decrementer exception if DEC is now positive, or if * entering a nested guest in which case the decrementer is now owned * by L2 and the L1 decrementer is provided in hdec_expires */ if (kvmppc_core_pending_dec(vcpu) && ((tb < kvmppc_dec_expires_host_tb(vcpu)) || (trap == BOOK3S_INTERRUPT_SYSCALL && kvmppc_get_gpr(vcpu, 3) == H_ENTER_NESTED))) kvmppc_core_dequeue_dec(vcpu); trace_kvm_guest_exit(vcpu); r = RESUME_GUEST; if (trap) { if (!nested) r = kvmppc_handle_exit_hv(vcpu, current); else r = kvmppc_handle_nested_exit(vcpu); } vcpu->arch.ret = r; if (is_kvmppc_resume_guest(r) && !kvmppc_vcpu_check_block(vcpu)) { kvmppc_set_timer(vcpu); prepare_to_rcuwait(wait); for (;;) { set_current_state(TASK_INTERRUPTIBLE); if (signal_pending(current)) { vcpu->stat.signal_exits++; run->exit_reason = KVM_EXIT_INTR; vcpu->arch.ret = -EINTR; break; } if (kvmppc_vcpu_check_block(vcpu)) break; trace_kvmppc_vcore_blocked(vcpu, 0); schedule(); trace_kvmppc_vcore_blocked(vcpu, 1); } finish_rcuwait(wait); } vcpu->arch.ceded = 0; done: trace_kvmppc_run_vcpu_exit(vcpu); return vcpu->arch.ret; sigpend: vcpu->stat.signal_exits++; run->exit_reason = KVM_EXIT_INTR; vcpu->arch.ret = -EINTR; out: vcpu->cpu = -1; vcpu->arch.thread_cpu = -1; vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST; powerpc_local_irq_pmu_restore(flags); preempt_enable(); goto done; } static int kvmppc_vcpu_run_hv(struct kvm_vcpu *vcpu) { struct kvm_run *run = vcpu->run; int r; int srcu_idx; struct kvm *kvm; unsigned long msr; start_timing(vcpu, &vcpu->arch.vcpu_entry); if (!vcpu->arch.sane) { run->exit_reason = KVM_EXIT_INTERNAL_ERROR; return -EINVAL; } /* No need to go into the guest when all we'll do is come back out */ if (signal_pending(current)) { run->exit_reason = KVM_EXIT_INTR; return -EINTR; } #ifdef CONFIG_PPC_TRANSACTIONAL_MEM /* * Don't allow entry with a suspended transaction, because * the guest entry/exit code will lose it. */ if (cpu_has_feature(CPU_FTR_TM) && current->thread.regs && (current->thread.regs->msr & MSR_TM)) { if (MSR_TM_ACTIVE(current->thread.regs->msr)) { run->exit_reason = KVM_EXIT_FAIL_ENTRY; run->fail_entry.hardware_entry_failure_reason = 0; return -EINVAL; } } #endif /* * Force online to 1 for the sake of old userspace which doesn't * set it. */ if (!vcpu->arch.online) { atomic_inc(&vcpu->arch.vcore->online_count); vcpu->arch.online = 1; } kvmppc_core_prepare_to_enter(vcpu); kvm = vcpu->kvm; atomic_inc(&kvm->arch.vcpus_running); /* Order vcpus_running vs. mmu_ready, see kvmppc_alloc_reset_hpt */ smp_mb(); msr = 0; if (IS_ENABLED(CONFIG_PPC_FPU)) msr |= MSR_FP; if (cpu_has_feature(CPU_FTR_ALTIVEC)) msr |= MSR_VEC; if (cpu_has_feature(CPU_FTR_VSX)) msr |= MSR_VSX; if ((cpu_has_feature(CPU_FTR_TM) || cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST)) && (vcpu->arch.hfscr & HFSCR_TM)) msr |= MSR_TM; msr = msr_check_and_set(msr); kvmppc_save_user_regs(); kvmppc_save_current_sprs(); if (!cpu_has_feature(CPU_FTR_ARCH_300)) vcpu->arch.waitp = &vcpu->arch.vcore->wait; vcpu->arch.pgdir = kvm->mm->pgd; vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST; do { accumulate_time(vcpu, &vcpu->arch.guest_entry); if (cpu_has_feature(CPU_FTR_ARCH_300)) r = kvmhv_run_single_vcpu(vcpu, ~(u64)0, vcpu->arch.vcore->lpcr); else r = kvmppc_run_vcpu(vcpu); if (run->exit_reason == KVM_EXIT_PAPR_HCALL) { accumulate_time(vcpu, &vcpu->arch.hcall); if (WARN_ON_ONCE(vcpu->arch.shregs.msr & MSR_PR)) { /* * These should have been caught reflected * into the guest by now. Final sanity check: * don't allow userspace to execute hcalls in * the hypervisor. */ r = RESUME_GUEST; continue; } trace_kvm_hcall_enter(vcpu); r = kvmppc_pseries_do_hcall(vcpu); trace_kvm_hcall_exit(vcpu, r); kvmppc_core_prepare_to_enter(vcpu); } else if (r == RESUME_PAGE_FAULT) { accumulate_time(vcpu, &vcpu->arch.pg_fault); srcu_idx = srcu_read_lock(&kvm->srcu); r = kvmppc_book3s_hv_page_fault(vcpu, vcpu->arch.fault_dar, vcpu->arch.fault_dsisr); srcu_read_unlock(&kvm->srcu, srcu_idx); } else if (r == RESUME_PASSTHROUGH) { if (WARN_ON(xics_on_xive())) r = H_SUCCESS; else r = kvmppc_xics_rm_complete(vcpu, 0); } } while (is_kvmppc_resume_guest(r)); accumulate_time(vcpu, &vcpu->arch.vcpu_exit); vcpu->arch.state = KVMPPC_VCPU_NOTREADY; atomic_dec(&kvm->arch.vcpus_running); srr_regs_clobbered(); end_timing(vcpu); return r; } static void kvmppc_add_seg_page_size(struct kvm_ppc_one_seg_page_size **sps, int shift, int sllp) { (*sps)->page_shift = shift; (*sps)->slb_enc = sllp; (*sps)->enc[0].page_shift = shift; (*sps)->enc[0].pte_enc = kvmppc_pgsize_lp_encoding(shift, shift); /* * Add 16MB MPSS support (may get filtered out by userspace) */ if (shift != 24) { int penc = kvmppc_pgsize_lp_encoding(shift, 24); if (penc != -1) { (*sps)->enc[1].page_shift = 24; (*sps)->enc[1].pte_enc = penc; } } (*sps)++; } static int kvm_vm_ioctl_get_smmu_info_hv(struct kvm *kvm, struct kvm_ppc_smmu_info *info) { struct kvm_ppc_one_seg_page_size *sps; /* * POWER7, POWER8 and POWER9 all support 32 storage keys for data. * POWER7 doesn't support keys for instruction accesses, * POWER8 and POWER9 do. */ info->data_keys = 32; info->instr_keys = cpu_has_feature(CPU_FTR_ARCH_207S) ? 32 : 0; /* POWER7, 8 and 9 all have 1T segments and 32-entry SLB */ info->flags = KVM_PPC_PAGE_SIZES_REAL | KVM_PPC_1T_SEGMENTS; info->slb_size = 32; /* We only support these sizes for now, and no muti-size segments */ sps = &info->sps[0]; kvmppc_add_seg_page_size(&sps, 12, 0); kvmppc_add_seg_page_size(&sps, 16, SLB_VSID_L | SLB_VSID_LP_01); kvmppc_add_seg_page_size(&sps, 24, SLB_VSID_L); /* If running as a nested hypervisor, we don't support HPT guests */ if (kvmhv_on_pseries()) info->flags |= KVM_PPC_NO_HASH; return 0; } /* * Get (and clear) the dirty memory log for a memory slot. */ static int kvm_vm_ioctl_get_dirty_log_hv(struct kvm *kvm, struct kvm_dirty_log *log) { struct kvm_memslots *slots; struct kvm_memory_slot *memslot; int r; unsigned long n, i; unsigned long *buf, *p; struct kvm_vcpu *vcpu; mutex_lock(&kvm->slots_lock); r = -EINVAL; if (log->slot >= KVM_USER_MEM_SLOTS) goto out; slots = kvm_memslots(kvm); memslot = id_to_memslot(slots, log->slot); r = -ENOENT; if (!memslot || !memslot->dirty_bitmap) goto out; /* * Use second half of bitmap area because both HPT and radix * accumulate bits in the first half. */ n = kvm_dirty_bitmap_bytes(memslot); buf = memslot->dirty_bitmap + n / sizeof(long); memset(buf, 0, n); if (kvm_is_radix(kvm)) r = kvmppc_hv_get_dirty_log_radix(kvm, memslot, buf); else r = kvmppc_hv_get_dirty_log_hpt(kvm, memslot, buf); if (r) goto out; /* * We accumulate dirty bits in the first half of the * memslot's dirty_bitmap area, for when pages are paged * out or modified by the host directly. Pick up these * bits and add them to the map. */ p = memslot->dirty_bitmap; for (i = 0; i < n / sizeof(long); ++i) buf[i] |= xchg(&p[i], 0); /* Harvest dirty bits from VPA and DTL updates */ /* Note: we never modify the SLB shadow buffer areas */ kvm_for_each_vcpu(i, vcpu, kvm) { spin_lock(&vcpu->arch.vpa_update_lock); kvmppc_harvest_vpa_dirty(&vcpu->arch.vpa, memslot, buf); kvmppc_harvest_vpa_dirty(&vcpu->arch.dtl, memslot, buf); spin_unlock(&vcpu->arch.vpa_update_lock); } r = -EFAULT; if (copy_to_user(log->dirty_bitmap, buf, n)) goto out; r = 0; out: mutex_unlock(&kvm->slots_lock); return r; } static void kvmppc_core_free_memslot_hv(struct kvm_memory_slot *slot) { vfree(slot->arch.rmap); slot->arch.rmap = NULL; } static int kvmppc_core_prepare_memory_region_hv(struct kvm *kvm, const struct kvm_memory_slot *old, struct kvm_memory_slot *new, enum kvm_mr_change change) { if (change == KVM_MR_CREATE) { unsigned long size = array_size(new->npages, sizeof(*new->arch.rmap)); if ((size >> PAGE_SHIFT) > totalram_pages()) return -ENOMEM; new->arch.rmap = vzalloc(size); if (!new->arch.rmap) return -ENOMEM; } else if (change != KVM_MR_DELETE) { new->arch.rmap = old->arch.rmap; } return 0; } static void kvmppc_core_commit_memory_region_hv(struct kvm *kvm, struct kvm_memory_slot *old, const struct kvm_memory_slot *new, enum kvm_mr_change change) { /* * If we are creating or modifying a memslot, it might make * some address that was previously cached as emulated * MMIO be no longer emulated MMIO, so invalidate * all the caches of emulated MMIO translations. */ if (change != KVM_MR_DELETE) atomic64_inc(&kvm->arch.mmio_update); /* * For change == KVM_MR_MOVE or KVM_MR_DELETE, higher levels * have already called kvm_arch_flush_shadow_memslot() to * flush shadow mappings. For KVM_MR_CREATE we have no * previous mappings. So the only case to handle is * KVM_MR_FLAGS_ONLY when the KVM_MEM_LOG_DIRTY_PAGES bit * has been changed. * For radix guests, we flush on setting KVM_MEM_LOG_DIRTY_PAGES * to get rid of any THP PTEs in the partition-scoped page tables * so we can track dirtiness at the page level; we flush when * clearing KVM_MEM_LOG_DIRTY_PAGES so that we can go back to * using THP PTEs. */ if (change == KVM_MR_FLAGS_ONLY && kvm_is_radix(kvm) && ((new->flags ^ old->flags) & KVM_MEM_LOG_DIRTY_PAGES)) kvmppc_radix_flush_memslot(kvm, old); /* * If UV hasn't yet called H_SVM_INIT_START, don't register memslots. */ if (!kvm->arch.secure_guest) return; switch (change) { case KVM_MR_CREATE: /* * @TODO kvmppc_uvmem_memslot_create() can fail and * return error. Fix this. */ kvmppc_uvmem_memslot_create(kvm, new); break; case KVM_MR_DELETE: kvmppc_uvmem_memslot_delete(kvm, old); break; default: /* TODO: Handle KVM_MR_MOVE */ break; } } /* * Update LPCR values in kvm->arch and in vcores. * Caller must hold kvm->arch.mmu_setup_lock (for mutual exclusion * of kvm->arch.lpcr update). */ void kvmppc_update_lpcr(struct kvm *kvm, unsigned long lpcr, unsigned long mask) { long int i; u32 cores_done = 0; if ((kvm->arch.lpcr & mask) == lpcr) return; kvm->arch.lpcr = (kvm->arch.lpcr & ~mask) | lpcr; for (i = 0; i < KVM_MAX_VCORES; ++i) { struct kvmppc_vcore *vc = kvm->arch.vcores[i]; if (!vc) continue; spin_lock(&vc->lock); vc->lpcr = (vc->lpcr & ~mask) | lpcr; verify_lpcr(kvm, vc->lpcr); spin_unlock(&vc->lock); if (++cores_done >= kvm->arch.online_vcores) break; } } void kvmppc_setup_partition_table(struct kvm *kvm) { unsigned long dw0, dw1; if (!kvm_is_radix(kvm)) { /* PS field - page size for VRMA */ dw0 = ((kvm->arch.vrma_slb_v & SLB_VSID_L) >> 1) | ((kvm->arch.vrma_slb_v & SLB_VSID_LP) << 1); /* HTABSIZE and HTABORG fields */ dw0 |= kvm->arch.sdr1; /* Second dword as set by userspace */ dw1 = kvm->arch.process_table; } else { dw0 = PATB_HR | radix__get_tree_size() | __pa(kvm->arch.pgtable) | RADIX_PGD_INDEX_SIZE; dw1 = PATB_GR | kvm->arch.process_table; } kvmhv_set_ptbl_entry(kvm->arch.lpid, dw0, dw1); } /* * Set up HPT (hashed page table) and RMA (real-mode area). * Must be called with kvm->arch.mmu_setup_lock held. */ static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu) { int err = 0; struct kvm *kvm = vcpu->kvm; unsigned long hva; struct kvm_memory_slot *memslot; struct vm_area_struct *vma; unsigned long lpcr = 0, senc; unsigned long psize, porder; int srcu_idx; /* Allocate hashed page table (if not done already) and reset it */ if (!kvm->arch.hpt.virt) { int order = KVM_DEFAULT_HPT_ORDER; struct kvm_hpt_info info; err = kvmppc_allocate_hpt(&info, order); /* If we get here, it means userspace didn't specify a * size explicitly. So, try successively smaller * sizes if the default failed. */ while ((err == -ENOMEM) && --order >= PPC_MIN_HPT_ORDER) err = kvmppc_allocate_hpt(&info, order); if (err < 0) { pr_err("KVM: Couldn't alloc HPT\n"); goto out; } kvmppc_set_hpt(kvm, &info); } /* Look up the memslot for guest physical address 0 */ srcu_idx = srcu_read_lock(&kvm->srcu); memslot = gfn_to_memslot(kvm, 0); /* We must have some memory at 0 by now */ err = -EINVAL; if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID)) goto out_srcu; /* Look up the VMA for the start of this memory slot */ hva = memslot->userspace_addr; mmap_read_lock(kvm->mm); vma = vma_lookup(kvm->mm, hva); if (!vma || (vma->vm_flags & VM_IO)) goto up_out; psize = vma_kernel_pagesize(vma); mmap_read_unlock(kvm->mm); /* We can handle 4k, 64k or 16M pages in the VRMA */ if (psize >= 0x1000000) psize = 0x1000000; else if (psize >= 0x10000) psize = 0x10000; else psize = 0x1000; porder = __ilog2(psize); senc = slb_pgsize_encoding(psize); kvm->arch.vrma_slb_v = senc | SLB_VSID_B_1T | (VRMA_VSID << SLB_VSID_SHIFT_1T); /* Create HPTEs in the hash page table for the VRMA */ kvmppc_map_vrma(vcpu, memslot, porder); /* Update VRMASD field in the LPCR */ if (!cpu_has_feature(CPU_FTR_ARCH_300)) { /* the -4 is to account for senc values starting at 0x10 */ lpcr = senc << (LPCR_VRMASD_SH - 4); kvmppc_update_lpcr(kvm, lpcr, LPCR_VRMASD); } /* Order updates to kvm->arch.lpcr etc. vs. mmu_ready */ smp_wmb(); err = 0; out_srcu: srcu_read_unlock(&kvm->srcu, srcu_idx); out: return err; up_out: mmap_read_unlock(kvm->mm); goto out_srcu; } /* * Must be called with kvm->arch.mmu_setup_lock held and * mmu_ready = 0 and no vcpus running. */ int kvmppc_switch_mmu_to_hpt(struct kvm *kvm) { unsigned long lpcr, lpcr_mask; if (nesting_enabled(kvm)) kvmhv_release_all_nested(kvm); kvmppc_rmap_reset(kvm); kvm->arch.process_table = 0; /* Mutual exclusion with kvm_unmap_gfn_range etc. */ spin_lock(&kvm->mmu_lock); kvm->arch.radix = 0; spin_unlock(&kvm->mmu_lock); kvmppc_free_radix(kvm); lpcr = LPCR_VPM1; lpcr_mask = LPCR_VPM1 | LPCR_UPRT | LPCR_GTSE | LPCR_HR; if (cpu_has_feature(CPU_FTR_ARCH_31)) lpcr_mask |= LPCR_HAIL; kvmppc_update_lpcr(kvm, lpcr, lpcr_mask); return 0; } /* * Must be called with kvm->arch.mmu_setup_lock held and * mmu_ready = 0 and no vcpus running. */ int kvmppc_switch_mmu_to_radix(struct kvm *kvm) { unsigned long lpcr, lpcr_mask; int err; err = kvmppc_init_vm_radix(kvm); if (err) return err; kvmppc_rmap_reset(kvm); /* Mutual exclusion with kvm_unmap_gfn_range etc. */ spin_lock(&kvm->mmu_lock); kvm->arch.radix = 1; spin_unlock(&kvm->mmu_lock); kvmppc_free_hpt(&kvm->arch.hpt); lpcr = LPCR_UPRT | LPCR_GTSE | LPCR_HR; lpcr_mask = LPCR_VPM1 | LPCR_UPRT | LPCR_GTSE | LPCR_HR; if (cpu_has_feature(CPU_FTR_ARCH_31)) { lpcr_mask |= LPCR_HAIL; if (cpu_has_feature(CPU_FTR_HVMODE) && (kvm->arch.host_lpcr & LPCR_HAIL)) lpcr |= LPCR_HAIL; } kvmppc_update_lpcr(kvm, lpcr, lpcr_mask); return 0; } #ifdef CONFIG_KVM_XICS /* * Allocate a per-core structure for managing state about which cores are * running in the host versus the guest and for exchanging data between * real mode KVM and CPU running in the host. * This is only done for the first VM. * The allocated structure stays even if all VMs have stopped. * It is only freed when the kvm-hv module is unloaded. * It's OK for this routine to fail, we just don't support host * core operations like redirecting H_IPI wakeups. */ void kvmppc_alloc_host_rm_ops(void) { struct kvmppc_host_rm_ops *ops; unsigned long l_ops; int cpu, core; int size; if (cpu_has_feature(CPU_FTR_ARCH_300)) return; /* Not the first time here ? */ if (kvmppc_host_rm_ops_hv != NULL) return; ops = kzalloc(sizeof(struct kvmppc_host_rm_ops), GFP_KERNEL); if (!ops) return; size = cpu_nr_cores() * sizeof(struct kvmppc_host_rm_core); ops->rm_core = kzalloc(size, GFP_KERNEL); if (!ops->rm_core) { kfree(ops); return; } cpus_read_lock(); for (cpu = 0; cpu < nr_cpu_ids; cpu += threads_per_core) { if (!cpu_online(cpu)) continue; core = cpu >> threads_shift; ops->rm_core[core].rm_state.in_host = 1; } ops->vcpu_kick = kvmppc_fast_vcpu_kick_hv; /* * Make the contents of the kvmppc_host_rm_ops structure visible * to other CPUs before we assign it to the global variable. * Do an atomic assignment (no locks used here), but if someone * beats us to it, just free our copy and return. */ smp_wmb(); l_ops = (unsigned long) ops; if (cmpxchg64((unsigned long *)&kvmppc_host_rm_ops_hv, 0, l_ops)) { cpus_read_unlock(); kfree(ops->rm_core); kfree(ops); return; } cpuhp_setup_state_nocalls_cpuslocked(CPUHP_KVM_PPC_BOOK3S_PREPARE, "ppc/kvm_book3s:prepare", kvmppc_set_host_core, kvmppc_clear_host_core); cpus_read_unlock(); } void kvmppc_free_host_rm_ops(void) { if (kvmppc_host_rm_ops_hv) { cpuhp_remove_state_nocalls(CPUHP_KVM_PPC_BOOK3S_PREPARE); kfree(kvmppc_host_rm_ops_hv->rm_core); kfree(kvmppc_host_rm_ops_hv); kvmppc_host_rm_ops_hv = NULL; } } #endif static int kvmppc_core_init_vm_hv(struct kvm *kvm) { unsigned long lpcr, lpid; int ret; mutex_init(&kvm->arch.uvmem_lock); INIT_LIST_HEAD(&kvm->arch.uvmem_pfns); mutex_init(&kvm->arch.mmu_setup_lock); /* Allocate the guest's logical partition ID */ lpid = kvmppc_alloc_lpid(); if ((long)lpid < 0) return -ENOMEM; kvm->arch.lpid = lpid; kvmppc_alloc_host_rm_ops(); kvmhv_vm_nested_init(kvm); /* * Since we don't flush the TLB when tearing down a VM, * and this lpid might have previously been used, * make sure we flush on each core before running the new VM. * On POWER9, the tlbie in mmu_partition_table_set_entry() * does this flush for us. */ if (!cpu_has_feature(CPU_FTR_ARCH_300)) cpumask_setall(&kvm->arch.need_tlb_flush); /* Start out with the default set of hcalls enabled */ memcpy(kvm->arch.enabled_hcalls, default_enabled_hcalls, sizeof(kvm->arch.enabled_hcalls)); if (!cpu_has_feature(CPU_FTR_ARCH_300)) kvm->arch.host_sdr1 = mfspr(SPRN_SDR1); /* Init LPCR for virtual RMA mode */ if (cpu_has_feature(CPU_FTR_HVMODE)) { kvm->arch.host_lpid = mfspr(SPRN_LPID); kvm->arch.host_lpcr = lpcr = mfspr(SPRN_LPCR); lpcr &= LPCR_PECE | LPCR_LPES; } else { /* * The L2 LPES mode will be set by the L0 according to whether * or not it needs to take external interrupts in HV mode. */ lpcr = 0; } lpcr |= (4UL << LPCR_DPFD_SH) | LPCR_HDICE | LPCR_VPM0 | LPCR_VPM1; kvm->arch.vrma_slb_v = SLB_VSID_B_1T | (VRMA_VSID << SLB_VSID_SHIFT_1T); /* On POWER8 turn on online bit to enable PURR/SPURR */ if (cpu_has_feature(CPU_FTR_ARCH_207S)) lpcr |= LPCR_ONL; /* * On POWER9, VPM0 bit is reserved (VPM0=1 behaviour is assumed) * Set HVICE bit to enable hypervisor virtualization interrupts. * Set HEIC to prevent OS interrupts to go to hypervisor (should * be unnecessary but better safe than sorry in case we re-enable * EE in HV mode with this LPCR still set) */ if (cpu_has_feature(CPU_FTR_ARCH_300)) { lpcr &= ~LPCR_VPM0; lpcr |= LPCR_HVICE | LPCR_HEIC; /* * If xive is enabled, we route 0x500 interrupts directly * to the guest. */ if (xics_on_xive()) lpcr |= LPCR_LPES; } /* * If the host uses radix, the guest starts out as radix. */ if (radix_enabled()) { kvm->arch.radix = 1; kvm->arch.mmu_ready = 1; lpcr &= ~LPCR_VPM1; lpcr |= LPCR_UPRT | LPCR_GTSE | LPCR_HR; if (cpu_has_feature(CPU_FTR_HVMODE) && cpu_has_feature(CPU_FTR_ARCH_31) && (kvm->arch.host_lpcr & LPCR_HAIL)) lpcr |= LPCR_HAIL; ret = kvmppc_init_vm_radix(kvm); if (ret) { kvmppc_free_lpid(kvm->arch.lpid); return ret; } kvmppc_setup_partition_table(kvm); } verify_lpcr(kvm, lpcr); kvm->arch.lpcr = lpcr; /* Initialization for future HPT resizes */ kvm->arch.resize_hpt = NULL; /* * Work out how many sets the TLB has, for the use of * the TLB invalidation loop in book3s_hv_rmhandlers.S. */ if (cpu_has_feature(CPU_FTR_ARCH_31)) { /* * P10 will flush all the congruence class with a single tlbiel */ kvm->arch.tlb_sets = 1; } else if (radix_enabled()) kvm->arch.tlb_sets = POWER9_TLB_SETS_RADIX; /* 128 */ else if (cpu_has_feature(CPU_FTR_ARCH_300)) kvm->arch.tlb_sets = POWER9_TLB_SETS_HASH; /* 256 */ else if (cpu_has_feature(CPU_FTR_ARCH_207S)) kvm->arch.tlb_sets = POWER8_TLB_SETS; /* 512 */ else kvm->arch.tlb_sets = POWER7_TLB_SETS; /* 128 */ /* * Track that we now have a HV mode VM active. This blocks secondary * CPU threads from coming online. */ if (!cpu_has_feature(CPU_FTR_ARCH_300)) kvm_hv_vm_activated(); /* * Initialize smt_mode depending on processor. * POWER8 and earlier have to use "strict" threading, where * all vCPUs in a vcore have to run on the same (sub)core, * whereas on POWER9 the threads can each run a different * guest. */ if (!cpu_has_feature(CPU_FTR_ARCH_300)) kvm->arch.smt_mode = threads_per_subcore; else kvm->arch.smt_mode = 1; kvm->arch.emul_smt_mode = 1; return 0; } static int kvmppc_arch_create_vm_debugfs_hv(struct kvm *kvm) { kvmppc_mmu_debugfs_init(kvm); if (radix_enabled()) kvmhv_radix_debugfs_init(kvm); return 0; } static void kvmppc_free_vcores(struct kvm *kvm) { long int i; for (i = 0; i < KVM_MAX_VCORES; ++i) kfree(kvm->arch.vcores[i]); kvm->arch.online_vcores = 0; } static void kvmppc_core_destroy_vm_hv(struct kvm *kvm) { if (!cpu_has_feature(CPU_FTR_ARCH_300)) kvm_hv_vm_deactivated(); kvmppc_free_vcores(kvm); if (kvm_is_radix(kvm)) kvmppc_free_radix(kvm); else kvmppc_free_hpt(&kvm->arch.hpt); /* Perform global invalidation and return lpid to the pool */ if (cpu_has_feature(CPU_FTR_ARCH_300)) { if (nesting_enabled(kvm)) kvmhv_release_all_nested(kvm); kvm->arch.process_table = 0; if (kvm->arch.secure_guest) uv_svm_terminate(kvm->arch.lpid); kvmhv_set_ptbl_entry(kvm->arch.lpid, 0, 0); } kvmppc_free_lpid(kvm->arch.lpid); kvmppc_free_pimap(kvm); } /* We don't need to emulate any privileged instructions or dcbz */ static int kvmppc_core_emulate_op_hv(struct kvm_vcpu *vcpu, unsigned int inst, int *advance) { return EMULATE_FAIL; } static int kvmppc_core_emulate_mtspr_hv(struct kvm_vcpu *vcpu, int sprn, ulong spr_val) { return EMULATE_FAIL; } static int kvmppc_core_emulate_mfspr_hv(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val) { return EMULATE_FAIL; } static int kvmppc_core_check_processor_compat_hv(void) { if (cpu_has_feature(CPU_FTR_HVMODE) && cpu_has_feature(CPU_FTR_ARCH_206)) return 0; /* POWER9 in radix mode is capable of being a nested hypervisor. */ if (cpu_has_feature(CPU_FTR_ARCH_300) && radix_enabled()) return 0; return -EIO; } #ifdef CONFIG_KVM_XICS void kvmppc_free_pimap(struct kvm *kvm) { kfree(kvm->arch.pimap); } static struct kvmppc_passthru_irqmap *kvmppc_alloc_pimap(void) { return kzalloc(sizeof(struct kvmppc_passthru_irqmap), GFP_KERNEL); } static int kvmppc_set_passthru_irq(struct kvm *kvm, int host_irq, int guest_gsi) { struct irq_desc *desc; struct kvmppc_irq_map *irq_map; struct kvmppc_passthru_irqmap *pimap; struct irq_chip *chip; int i, rc = 0; struct irq_data *host_data; if (!kvm_irq_bypass) return 1; desc = irq_to_desc(host_irq); if (!desc) return -EIO; mutex_lock(&kvm->lock); pimap = kvm->arch.pimap; if (pimap == NULL) { /* First call, allocate structure to hold IRQ map */ pimap = kvmppc_alloc_pimap(); if (pimap == NULL) { mutex_unlock(&kvm->lock); return -ENOMEM; } kvm->arch.pimap = pimap; } /* * For now, we only support interrupts for which the EOI operation * is an OPAL call followed by a write to XIRR, since that's * what our real-mode EOI code does, or a XIVE interrupt */ chip = irq_data_get_irq_chip(&desc->irq_data); if (!chip || !is_pnv_opal_msi(chip)) { pr_warn("kvmppc_set_passthru_irq_hv: Could not assign IRQ map for (%d,%d)\n", host_irq, guest_gsi); mutex_unlock(&kvm->lock); return -ENOENT; } /* * See if we already have an entry for this guest IRQ number. * If it's mapped to a hardware IRQ number, that's an error, * otherwise re-use this entry. */ for (i = 0; i < pimap->n_mapped; i++) { if (guest_gsi == pimap->mapped[i].v_hwirq) { if (pimap->mapped[i].r_hwirq) { mutex_unlock(&kvm->lock); return -EINVAL; } break; } } if (i == KVMPPC_PIRQ_MAPPED) { mutex_unlock(&kvm->lock); return -EAGAIN; /* table is full */ } irq_map = &pimap->mapped[i]; irq_map->v_hwirq = guest_gsi; irq_map->desc = desc; /* * Order the above two stores before the next to serialize with * the KVM real mode handler. */ smp_wmb(); /* * The 'host_irq' number is mapped in the PCI-MSI domain but * the underlying calls, which will EOI the interrupt in real * mode, need an HW IRQ number mapped in the XICS IRQ domain. */ host_data = irq_domain_get_irq_data(irq_get_default_host(), host_irq); irq_map->r_hwirq = (unsigned int)irqd_to_hwirq(host_data); if (i == pimap->n_mapped) pimap->n_mapped++; if (xics_on_xive()) rc = kvmppc_xive_set_mapped(kvm, guest_gsi, host_irq); else kvmppc_xics_set_mapped(kvm, guest_gsi, irq_map->r_hwirq); if (rc) irq_map->r_hwirq = 0; mutex_unlock(&kvm->lock); return 0; } static int kvmppc_clr_passthru_irq(struct kvm *kvm, int host_irq, int guest_gsi) { struct irq_desc *desc; struct kvmppc_passthru_irqmap *pimap; int i, rc = 0; if (!kvm_irq_bypass) return 0; desc = irq_to_desc(host_irq); if (!desc) return -EIO; mutex_lock(&kvm->lock); if (!kvm->arch.pimap) goto unlock; pimap = kvm->arch.pimap; for (i = 0; i < pimap->n_mapped; i++) { if (guest_gsi == pimap->mapped[i].v_hwirq) break; } if (i == pimap->n_mapped) { mutex_unlock(&kvm->lock); return -ENODEV; } if (xics_on_xive()) rc = kvmppc_xive_clr_mapped(kvm, guest_gsi, host_irq); else kvmppc_xics_clr_mapped(kvm, guest_gsi, pimap->mapped[i].r_hwirq); /* invalidate the entry (what to do on error from the above ?) */ pimap->mapped[i].r_hwirq = 0; /* * We don't free this structure even when the count goes to * zero. The structure is freed when we destroy the VM. */ unlock: mutex_unlock(&kvm->lock); return rc; } static int kvmppc_irq_bypass_add_producer_hv(struct irq_bypass_consumer *cons, struct irq_bypass_producer *prod) { int ret = 0; struct kvm_kernel_irqfd *irqfd = container_of(cons, struct kvm_kernel_irqfd, consumer); irqfd->producer = prod; ret = kvmppc_set_passthru_irq(irqfd->kvm, prod->irq, irqfd->gsi); if (ret) pr_info("kvmppc_set_passthru_irq (irq %d, gsi %d) fails: %d\n", prod->irq, irqfd->gsi, ret); return ret; } static void kvmppc_irq_bypass_del_producer_hv(struct irq_bypass_consumer *cons, struct irq_bypass_producer *prod) { int ret; struct kvm_kernel_irqfd *irqfd = container_of(cons, struct kvm_kernel_irqfd, consumer); irqfd->producer = NULL; /* * When producer of consumer is unregistered, we change back to * default external interrupt handling mode - KVM real mode * will switch back to host. */ ret = kvmppc_clr_passthru_irq(irqfd->kvm, prod->irq, irqfd->gsi); if (ret) pr_warn("kvmppc_clr_passthru_irq (irq %d, gsi %d) fails: %d\n", prod->irq, irqfd->gsi, ret); } #endif static int kvm_arch_vm_ioctl_hv(struct file *filp, unsigned int ioctl, unsigned long arg) { struct kvm *kvm __maybe_unused = filp->private_data; void __user *argp = (void __user *)arg; int r; switch (ioctl) { case KVM_PPC_ALLOCATE_HTAB: { u32 htab_order; /* If we're a nested hypervisor, we currently only support radix */ if (kvmhv_on_pseries()) { r = -EOPNOTSUPP; break; } r = -EFAULT; if (get_user(htab_order, (u32 __user *)argp)) break; r = kvmppc_alloc_reset_hpt(kvm, htab_order); if (r) break; r = 0; break; } case KVM_PPC_GET_HTAB_FD: { struct kvm_get_htab_fd ghf; r = -EFAULT; if (copy_from_user(&ghf, argp, sizeof(ghf))) break; r = kvm_vm_ioctl_get_htab_fd(kvm, &ghf); break; } case KVM_PPC_RESIZE_HPT_PREPARE: { struct kvm_ppc_resize_hpt rhpt; r = -EFAULT; if (copy_from_user(&rhpt, argp, sizeof(rhpt))) break; r = kvm_vm_ioctl_resize_hpt_prepare(kvm, &rhpt); break; } case KVM_PPC_RESIZE_HPT_COMMIT: { struct kvm_ppc_resize_hpt rhpt; r = -EFAULT; if (copy_from_user(&rhpt, argp, sizeof(rhpt))) break; r = kvm_vm_ioctl_resize_hpt_commit(kvm, &rhpt); break; } default: r = -ENOTTY; } return r; } /* * List of hcall numbers to enable by default. * For compatibility with old userspace, we enable by default * all hcalls that were implemented before the hcall-enabling * facility was added. Note this list should not include H_RTAS. */ static unsigned int default_hcall_list[] = { H_REMOVE, H_ENTER, H_READ, H_PROTECT, H_BULK_REMOVE, #ifdef CONFIG_SPAPR_TCE_IOMMU H_GET_TCE, H_PUT_TCE, #endif H_SET_DABR, H_SET_XDABR, H_CEDE, H_PROD, H_CONFER, H_REGISTER_VPA, #ifdef CONFIG_KVM_XICS H_EOI, H_CPPR, H_IPI, H_IPOLL, H_XIRR, H_XIRR_X, #endif 0 }; static void init_default_hcalls(void) { int i; unsigned int hcall; for (i = 0; default_hcall_list[i]; ++i) { hcall = default_hcall_list[i]; WARN_ON(!kvmppc_hcall_impl_hv(hcall)); __set_bit(hcall / 4, default_enabled_hcalls); } } static int kvmhv_configure_mmu(struct kvm *kvm, struct kvm_ppc_mmuv3_cfg *cfg) { unsigned long lpcr; int radix; int err; /* If not on a POWER9, reject it */ if (!cpu_has_feature(CPU_FTR_ARCH_300)) return -ENODEV; /* If any unknown flags set, reject it */ if (cfg->flags & ~(KVM_PPC_MMUV3_RADIX | KVM_PPC_MMUV3_GTSE)) return -EINVAL; /* GR (guest radix) bit in process_table field must match */ radix = !!(cfg->flags & KVM_PPC_MMUV3_RADIX); if (!!(cfg->process_table & PATB_GR) != radix) return -EINVAL; /* Process table size field must be reasonable, i.e. <= 24 */ if ((cfg->process_table & PRTS_MASK) > 24) return -EINVAL; /* We can change a guest to/from radix now, if the host is radix */ if (radix && !radix_enabled()) return -EINVAL; /* If we're a nested hypervisor, we currently only support radix */ if (kvmhv_on_pseries() && !radix) return -EINVAL; mutex_lock(&kvm->arch.mmu_setup_lock); if (radix != kvm_is_radix(kvm)) { if (kvm->arch.mmu_ready) { kvm->arch.mmu_ready = 0; /* order mmu_ready vs. vcpus_running */ smp_mb(); if (atomic_read(&kvm->arch.vcpus_running)) { kvm->arch.mmu_ready = 1; err = -EBUSY; goto out_unlock; } } if (radix) err = kvmppc_switch_mmu_to_radix(kvm); else err = kvmppc_switch_mmu_to_hpt(kvm); if (err) goto out_unlock; } kvm->arch.process_table = cfg->process_table; kvmppc_setup_partition_table(kvm); lpcr = (cfg->flags & KVM_PPC_MMUV3_GTSE) ? LPCR_GTSE : 0; kvmppc_update_lpcr(kvm, lpcr, LPCR_GTSE); err = 0; out_unlock: mutex_unlock(&kvm->arch.mmu_setup_lock); return err; } static int kvmhv_enable_nested(struct kvm *kvm) { if (!nested) return -EPERM; if (!cpu_has_feature(CPU_FTR_ARCH_300)) return -ENODEV; if (!radix_enabled()) return -ENODEV; /* kvm == NULL means the caller is testing if the capability exists */ if (kvm) kvm->arch.nested_enable = true; return 0; } static int kvmhv_load_from_eaddr(struct kvm_vcpu *vcpu, ulong *eaddr, void *ptr, int size) { int rc = -EINVAL; if (kvmhv_vcpu_is_radix(vcpu)) { rc = kvmhv_copy_from_guest_radix(vcpu, *eaddr, ptr, size); if (rc > 0) rc = -EINVAL; } /* For now quadrants are the only way to access nested guest memory */ if (rc && vcpu->arch.nested) rc = -EAGAIN; return rc; } static int kvmhv_store_to_eaddr(struct kvm_vcpu *vcpu, ulong *eaddr, void *ptr, int size) { int rc = -EINVAL; if (kvmhv_vcpu_is_radix(vcpu)) { rc = kvmhv_copy_to_guest_radix(vcpu, *eaddr, ptr, size); if (rc > 0) rc = -EINVAL; } /* For now quadrants are the only way to access nested guest memory */ if (rc && vcpu->arch.nested) rc = -EAGAIN; return rc; } static void unpin_vpa_reset(struct kvm *kvm, struct kvmppc_vpa *vpa) { unpin_vpa(kvm, vpa); vpa->gpa = 0; vpa->pinned_addr = NULL; vpa->dirty = false; vpa->update_pending = 0; } /* * Enable a guest to become a secure VM, or test whether * that could be enabled. * Called when the KVM_CAP_PPC_SECURE_GUEST capability is * tested (kvm == NULL) or enabled (kvm != NULL). */ static int kvmhv_enable_svm(struct kvm *kvm) { if (!kvmppc_uvmem_available()) return -EINVAL; if (kvm) kvm->arch.svm_enabled = 1; return 0; } /* * IOCTL handler to turn off secure mode of guest * * - Release all device pages * - Issue ucall to terminate the guest on the UV side * - Unpin the VPA pages. * - Reinit the partition scoped page tables */ static int kvmhv_svm_off(struct kvm *kvm) { struct kvm_vcpu *vcpu; int mmu_was_ready; int srcu_idx; int ret = 0; unsigned long i; if (!(kvm->arch.secure_guest & KVMPPC_SECURE_INIT_START)) return ret; mutex_lock(&kvm->arch.mmu_setup_lock); mmu_was_ready = kvm->arch.mmu_ready; if (kvm->arch.mmu_ready) { kvm->arch.mmu_ready = 0; /* order mmu_ready vs. vcpus_running */ smp_mb(); if (atomic_read(&kvm->arch.vcpus_running)) { kvm->arch.mmu_ready = 1; ret = -EBUSY; goto out; } } srcu_idx = srcu_read_lock(&kvm->srcu); for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) { struct kvm_memory_slot *memslot; struct kvm_memslots *slots = __kvm_memslots(kvm, i); int bkt; if (!slots) continue; kvm_for_each_memslot(memslot, bkt, slots) { kvmppc_uvmem_drop_pages(memslot, kvm, true); uv_unregister_mem_slot(kvm->arch.lpid, memslot->id); } } srcu_read_unlock(&kvm->srcu, srcu_idx); ret = uv_svm_terminate(kvm->arch.lpid); if (ret != U_SUCCESS) { ret = -EINVAL; goto out; } /* * When secure guest is reset, all the guest pages are sent * to UV via UV_PAGE_IN before the non-boot vcpus get a * chance to run and unpin their VPA pages. Unpinning of all * VPA pages is done here explicitly so that VPA pages * can be migrated to the secure side. * * This is required to for the secure SMP guest to reboot * correctly. */ kvm_for_each_vcpu(i, vcpu, kvm) { spin_lock(&vcpu->arch.vpa_update_lock); unpin_vpa_reset(kvm, &vcpu->arch.dtl); unpin_vpa_reset(kvm, &vcpu->arch.slb_shadow); unpin_vpa_reset(kvm, &vcpu->arch.vpa); spin_unlock(&vcpu->arch.vpa_update_lock); } kvmppc_setup_partition_table(kvm); kvm->arch.secure_guest = 0; kvm->arch.mmu_ready = mmu_was_ready; out: mutex_unlock(&kvm->arch.mmu_setup_lock); return ret; } static int kvmhv_enable_dawr1(struct kvm *kvm) { if (!cpu_has_feature(CPU_FTR_DAWR1)) return -ENODEV; /* kvm == NULL means the caller is testing if the capability exists */ if (kvm) kvm->arch.dawr1_enabled = true; return 0; } static bool kvmppc_hash_v3_possible(void) { if (!cpu_has_feature(CPU_FTR_ARCH_300)) return false; if (!cpu_has_feature(CPU_FTR_HVMODE)) return false; /* * POWER9 chips before version 2.02 can't have some threads in * HPT mode and some in radix mode on the same core. */ if (radix_enabled()) { unsigned int pvr = mfspr(SPRN_PVR); if ((pvr >> 16) == PVR_POWER9 && (((pvr & 0xe000) == 0 && (pvr & 0xfff) < 0x202) || ((pvr & 0xe000) == 0x2000 && (pvr & 0xfff) < 0x101))) return false; } return true; } static struct kvmppc_ops kvm_ops_hv = { .get_sregs = kvm_arch_vcpu_ioctl_get_sregs_hv, .set_sregs = kvm_arch_vcpu_ioctl_set_sregs_hv, .get_one_reg = kvmppc_get_one_reg_hv, .set_one_reg = kvmppc_set_one_reg_hv, .vcpu_load = kvmppc_core_vcpu_load_hv, .vcpu_put = kvmppc_core_vcpu_put_hv, .inject_interrupt = kvmppc_inject_interrupt_hv, .set_msr = kvmppc_set_msr_hv, .vcpu_run = kvmppc_vcpu_run_hv, .vcpu_create = kvmppc_core_vcpu_create_hv, .vcpu_free = kvmppc_core_vcpu_free_hv, .check_requests = kvmppc_core_check_requests_hv, .get_dirty_log = kvm_vm_ioctl_get_dirty_log_hv, .flush_memslot = kvmppc_core_flush_memslot_hv, .prepare_memory_region = kvmppc_core_prepare_memory_region_hv, .commit_memory_region = kvmppc_core_commit_memory_region_hv, .unmap_gfn_range = kvm_unmap_gfn_range_hv, .age_gfn = kvm_age_gfn_hv, .test_age_gfn = kvm_test_age_gfn_hv, .set_spte_gfn = kvm_set_spte_gfn_hv, .free_memslot = kvmppc_core_free_memslot_hv, .init_vm = kvmppc_core_init_vm_hv, .destroy_vm = kvmppc_core_destroy_vm_hv, .get_smmu_info = kvm_vm_ioctl_get_smmu_info_hv, .emulate_op = kvmppc_core_emulate_op_hv, .emulate_mtspr = kvmppc_core_emulate_mtspr_hv, .emulate_mfspr = kvmppc_core_emulate_mfspr_hv, .fast_vcpu_kick = kvmppc_fast_vcpu_kick_hv, .arch_vm_ioctl = kvm_arch_vm_ioctl_hv, .hcall_implemented = kvmppc_hcall_impl_hv, #ifdef CONFIG_KVM_XICS .irq_bypass_add_producer = kvmppc_irq_bypass_add_producer_hv, .irq_bypass_del_producer = kvmppc_irq_bypass_del_producer_hv, #endif .configure_mmu = kvmhv_configure_mmu, .get_rmmu_info = kvmhv_get_rmmu_info, .set_smt_mode = kvmhv_set_smt_mode, .enable_nested = kvmhv_enable_nested, .load_from_eaddr = kvmhv_load_from_eaddr, .store_to_eaddr = kvmhv_store_to_eaddr, .enable_svm = kvmhv_enable_svm, .svm_off = kvmhv_svm_off, .enable_dawr1 = kvmhv_enable_dawr1, .hash_v3_possible = kvmppc_hash_v3_possible, .create_vcpu_debugfs = kvmppc_arch_create_vcpu_debugfs_hv, .create_vm_debugfs = kvmppc_arch_create_vm_debugfs_hv, }; static int kvm_init_subcore_bitmap(void) { int i, j; int nr_cores = cpu_nr_cores(); struct sibling_subcore_state *sibling_subcore_state; for (i = 0; i < nr_cores; i++) { int first_cpu = i * threads_per_core; int node = cpu_to_node(first_cpu); /* Ignore if it is already allocated. */ if (paca_ptrs[first_cpu]->sibling_subcore_state) continue; sibling_subcore_state = kzalloc_node(sizeof(struct sibling_subcore_state), GFP_KERNEL, node); if (!sibling_subcore_state) return -ENOMEM; for (j = 0; j < threads_per_core; j++) { int cpu = first_cpu + j; paca_ptrs[cpu]->sibling_subcore_state = sibling_subcore_state; } } return 0; } static int kvmppc_radix_possible(void) { return cpu_has_feature(CPU_FTR_ARCH_300) && radix_enabled(); } static int kvmppc_book3s_init_hv(void) { int r; if (!tlbie_capable) { pr_err("KVM-HV: Host does not support TLBIE\n"); return -ENODEV; } /* * FIXME!! Do we need to check on all cpus ? */ r = kvmppc_core_check_processor_compat_hv(); if (r < 0) return -ENODEV; r = kvmhv_nested_init(); if (r) return r; if (!cpu_has_feature(CPU_FTR_ARCH_300)) { r = kvm_init_subcore_bitmap(); if (r) goto err; } /* * We need a way of accessing the XICS interrupt controller, * either directly, via paca_ptrs[cpu]->kvm_hstate.xics_phys, or * indirectly, via OPAL. */ #ifdef CONFIG_SMP if (!xics_on_xive() && !kvmhv_on_pseries() && !local_paca->kvm_hstate.xics_phys) { struct device_node *np; np = of_find_compatible_node(NULL, NULL, "ibm,opal-intc"); if (!np) { pr_err("KVM-HV: Cannot determine method for accessing XICS\n"); r = -ENODEV; goto err; } /* presence of intc confirmed - node can be dropped again */ of_node_put(np); } #endif init_default_hcalls(); init_vcore_lists(); r = kvmppc_mmu_hv_init(); if (r) goto err; if (kvmppc_radix_possible()) { r = kvmppc_radix_init(); if (r) goto err; } r = kvmppc_uvmem_init(); if (r < 0) { pr_err("KVM-HV: kvmppc_uvmem_init failed %d\n", r); return r; } kvm_ops_hv.owner = THIS_MODULE; kvmppc_hv_ops = &kvm_ops_hv; return 0; err: kvmhv_nested_exit(); kvmppc_radix_exit(); return r; } static void kvmppc_book3s_exit_hv(void) { kvmppc_uvmem_free(); kvmppc_free_host_rm_ops(); if (kvmppc_radix_possible()) kvmppc_radix_exit(); kvmppc_hv_ops = NULL; kvmhv_nested_exit(); } module_init(kvmppc_book3s_init_hv); module_exit(kvmppc_book3s_exit_hv); MODULE_LICENSE("GPL"); MODULE_ALIAS_MISCDEV(KVM_MINOR); MODULE_ALIAS("devname:kvm");
linux-master
arch/powerpc/kvm/book3s_hv.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * * Copyright (C) IBM Corporation, 2010 * * Author: Anton Blanchard <[email protected]> */ #include <linux/export.h> #include <linux/compiler.h> #include <linux/types.h> #include <asm/checksum.h> #include <linux/uaccess.h> __wsum csum_and_copy_from_user(const void __user *src, void *dst, int len) { __wsum csum; if (unlikely(!user_read_access_begin(src, len))) return 0; csum = csum_partial_copy_generic((void __force *)src, dst, len); user_read_access_end(); return csum; } __wsum csum_and_copy_to_user(const void *src, void __user *dst, int len) { __wsum csum; if (unlikely(!user_write_access_begin(dst, len))) return 0; csum = csum_partial_copy_generic(src, (void __force *)dst, len); user_write_access_end(); return csum; }
linux-master
arch/powerpc/lib/checksum_wrappers.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright 2008 Michael Ellerman, IBM Corporation. */ #include <linux/vmalloc.h> #include <linux/init.h> #include <asm/code-patching.h> static int __init instr_is_branch_to_addr(const u32 *instr, unsigned long addr) { if (instr_is_branch_iform(ppc_inst_read(instr)) || instr_is_branch_bform(ppc_inst_read(instr))) return branch_target(instr) == addr; return 0; } static void __init test_trampoline(void) { asm ("nop;nop;\n"); } #define check(x) do { \ if (!(x)) \ pr_err("code-patching: test failed at line %d\n", __LINE__); \ } while (0) static void __init test_branch_iform(void) { int err; ppc_inst_t instr; u32 tmp[2]; u32 *iptr = tmp; unsigned long addr = (unsigned long)tmp; /* The simplest case, branch to self, no flags */ check(instr_is_branch_iform(ppc_inst(0x48000000))); /* All bits of target set, and flags */ check(instr_is_branch_iform(ppc_inst(0x4bffffff))); /* High bit of opcode set, which is wrong */ check(!instr_is_branch_iform(ppc_inst(0xcbffffff))); /* Middle bits of opcode set, which is wrong */ check(!instr_is_branch_iform(ppc_inst(0x7bffffff))); /* Simplest case, branch to self with link */ check(instr_is_branch_iform(ppc_inst(0x48000001))); /* All bits of targets set */ check(instr_is_branch_iform(ppc_inst(0x4bfffffd))); /* Some bits of targets set */ check(instr_is_branch_iform(ppc_inst(0x4bff00fd))); /* Must be a valid branch to start with */ check(!instr_is_branch_iform(ppc_inst(0x7bfffffd))); /* Absolute branch to 0x100 */ ppc_inst_write(iptr, ppc_inst(0x48000103)); check(instr_is_branch_to_addr(iptr, 0x100)); /* Absolute branch to 0x420fc */ ppc_inst_write(iptr, ppc_inst(0x480420ff)); check(instr_is_branch_to_addr(iptr, 0x420fc)); /* Maximum positive relative branch, + 20MB - 4B */ ppc_inst_write(iptr, ppc_inst(0x49fffffc)); check(instr_is_branch_to_addr(iptr, addr + 0x1FFFFFC)); /* Smallest negative relative branch, - 4B */ ppc_inst_write(iptr, ppc_inst(0x4bfffffc)); check(instr_is_branch_to_addr(iptr, addr - 4)); /* Largest negative relative branch, - 32 MB */ ppc_inst_write(iptr, ppc_inst(0x4a000000)); check(instr_is_branch_to_addr(iptr, addr - 0x2000000)); /* Branch to self, with link */ err = create_branch(&instr, iptr, addr, BRANCH_SET_LINK); ppc_inst_write(iptr, instr); check(instr_is_branch_to_addr(iptr, addr)); /* Branch to self - 0x100, with link */ err = create_branch(&instr, iptr, addr - 0x100, BRANCH_SET_LINK); ppc_inst_write(iptr, instr); check(instr_is_branch_to_addr(iptr, addr - 0x100)); /* Branch to self + 0x100, no link */ err = create_branch(&instr, iptr, addr + 0x100, 0); ppc_inst_write(iptr, instr); check(instr_is_branch_to_addr(iptr, addr + 0x100)); /* Maximum relative negative offset, - 32 MB */ err = create_branch(&instr, iptr, addr - 0x2000000, BRANCH_SET_LINK); ppc_inst_write(iptr, instr); check(instr_is_branch_to_addr(iptr, addr - 0x2000000)); /* Out of range relative negative offset, - 32 MB + 4*/ err = create_branch(&instr, iptr, addr - 0x2000004, BRANCH_SET_LINK); check(err); /* Out of range relative positive offset, + 32 MB */ err = create_branch(&instr, iptr, addr + 0x2000000, BRANCH_SET_LINK); check(err); /* Unaligned target */ err = create_branch(&instr, iptr, addr + 3, BRANCH_SET_LINK); check(err); /* Check flags are masked correctly */ err = create_branch(&instr, iptr, addr, 0xFFFFFFFC); ppc_inst_write(iptr, instr); check(instr_is_branch_to_addr(iptr, addr)); check(ppc_inst_equal(instr, ppc_inst(0x48000000))); } static void __init test_create_function_call(void) { u32 *iptr; unsigned long dest; ppc_inst_t instr; /* Check we can create a function call */ iptr = (u32 *)ppc_function_entry(test_trampoline); dest = ppc_function_entry(test_create_function_call); create_branch(&instr, iptr, dest, BRANCH_SET_LINK); patch_instruction(iptr, instr); check(instr_is_branch_to_addr(iptr, dest)); } static void __init test_branch_bform(void) { int err; unsigned long addr; ppc_inst_t instr; u32 tmp[2]; u32 *iptr = tmp; unsigned int flags; addr = (unsigned long)iptr; /* The simplest case, branch to self, no flags */ check(instr_is_branch_bform(ppc_inst(0x40000000))); /* All bits of target set, and flags */ check(instr_is_branch_bform(ppc_inst(0x43ffffff))); /* High bit of opcode set, which is wrong */ check(!instr_is_branch_bform(ppc_inst(0xc3ffffff))); /* Middle bits of opcode set, which is wrong */ check(!instr_is_branch_bform(ppc_inst(0x7bffffff))); /* Absolute conditional branch to 0x100 */ ppc_inst_write(iptr, ppc_inst(0x43ff0103)); check(instr_is_branch_to_addr(iptr, 0x100)); /* Absolute conditional branch to 0x20fc */ ppc_inst_write(iptr, ppc_inst(0x43ff20ff)); check(instr_is_branch_to_addr(iptr, 0x20fc)); /* Maximum positive relative conditional branch, + 32 KB - 4B */ ppc_inst_write(iptr, ppc_inst(0x43ff7ffc)); check(instr_is_branch_to_addr(iptr, addr + 0x7FFC)); /* Smallest negative relative conditional branch, - 4B */ ppc_inst_write(iptr, ppc_inst(0x43fffffc)); check(instr_is_branch_to_addr(iptr, addr - 4)); /* Largest negative relative conditional branch, - 32 KB */ ppc_inst_write(iptr, ppc_inst(0x43ff8000)); check(instr_is_branch_to_addr(iptr, addr - 0x8000)); /* All condition code bits set & link */ flags = 0x3ff000 | BRANCH_SET_LINK; /* Branch to self */ err = create_cond_branch(&instr, iptr, addr, flags); ppc_inst_write(iptr, instr); check(instr_is_branch_to_addr(iptr, addr)); /* Branch to self - 0x100 */ err = create_cond_branch(&instr, iptr, addr - 0x100, flags); ppc_inst_write(iptr, instr); check(instr_is_branch_to_addr(iptr, addr - 0x100)); /* Branch to self + 0x100 */ err = create_cond_branch(&instr, iptr, addr + 0x100, flags); ppc_inst_write(iptr, instr); check(instr_is_branch_to_addr(iptr, addr + 0x100)); /* Maximum relative negative offset, - 32 KB */ err = create_cond_branch(&instr, iptr, addr - 0x8000, flags); ppc_inst_write(iptr, instr); check(instr_is_branch_to_addr(iptr, addr - 0x8000)); /* Out of range relative negative offset, - 32 KB + 4*/ err = create_cond_branch(&instr, iptr, addr - 0x8004, flags); check(err); /* Out of range relative positive offset, + 32 KB */ err = create_cond_branch(&instr, iptr, addr + 0x8000, flags); check(err); /* Unaligned target */ err = create_cond_branch(&instr, iptr, addr + 3, flags); check(err); /* Check flags are masked correctly */ err = create_cond_branch(&instr, iptr, addr, 0xFFFFFFFC); ppc_inst_write(iptr, instr); check(instr_is_branch_to_addr(iptr, addr)); check(ppc_inst_equal(instr, ppc_inst(0x43FF0000))); } static void __init test_translate_branch(void) { unsigned long addr; void *p, *q; ppc_inst_t instr; void *buf; buf = vmalloc(PAGE_ALIGN(0x2000000 + 1)); check(buf); if (!buf) return; /* Simple case, branch to self moved a little */ p = buf; addr = (unsigned long)p; create_branch(&instr, p, addr, 0); ppc_inst_write(p, instr); check(instr_is_branch_to_addr(p, addr)); q = p + 4; translate_branch(&instr, q, p); ppc_inst_write(q, instr); check(instr_is_branch_to_addr(q, addr)); /* Maximum negative case, move b . to addr + 32 MB */ p = buf; addr = (unsigned long)p; create_branch(&instr, p, addr, 0); ppc_inst_write(p, instr); q = buf + 0x2000000; translate_branch(&instr, q, p); ppc_inst_write(q, instr); check(instr_is_branch_to_addr(p, addr)); check(instr_is_branch_to_addr(q, addr)); check(ppc_inst_equal(ppc_inst_read(q), ppc_inst(0x4a000000))); /* Maximum positive case, move x to x - 32 MB + 4 */ p = buf + 0x2000000; addr = (unsigned long)p; create_branch(&instr, p, addr, 0); ppc_inst_write(p, instr); q = buf + 4; translate_branch(&instr, q, p); ppc_inst_write(q, instr); check(instr_is_branch_to_addr(p, addr)); check(instr_is_branch_to_addr(q, addr)); check(ppc_inst_equal(ppc_inst_read(q), ppc_inst(0x49fffffc))); /* Jump to x + 16 MB moved to x + 20 MB */ p = buf; addr = 0x1000000 + (unsigned long)buf; create_branch(&instr, p, addr, BRANCH_SET_LINK); ppc_inst_write(p, instr); q = buf + 0x1400000; translate_branch(&instr, q, p); ppc_inst_write(q, instr); check(instr_is_branch_to_addr(p, addr)); check(instr_is_branch_to_addr(q, addr)); /* Jump to x + 16 MB moved to x - 16 MB + 4 */ p = buf + 0x1000000; addr = 0x2000000 + (unsigned long)buf; create_branch(&instr, p, addr, 0); ppc_inst_write(p, instr); q = buf + 4; translate_branch(&instr, q, p); ppc_inst_write(q, instr); check(instr_is_branch_to_addr(p, addr)); check(instr_is_branch_to_addr(q, addr)); /* Conditional branch tests */ /* Simple case, branch to self moved a little */ p = buf; addr = (unsigned long)p; create_cond_branch(&instr, p, addr, 0); ppc_inst_write(p, instr); check(instr_is_branch_to_addr(p, addr)); q = buf + 4; translate_branch(&instr, q, p); ppc_inst_write(q, instr); check(instr_is_branch_to_addr(q, addr)); /* Maximum negative case, move b . to addr + 32 KB */ p = buf; addr = (unsigned long)p; create_cond_branch(&instr, p, addr, 0xFFFFFFFC); ppc_inst_write(p, instr); q = buf + 0x8000; translate_branch(&instr, q, p); ppc_inst_write(q, instr); check(instr_is_branch_to_addr(p, addr)); check(instr_is_branch_to_addr(q, addr)); check(ppc_inst_equal(ppc_inst_read(q), ppc_inst(0x43ff8000))); /* Maximum positive case, move x to x - 32 KB + 4 */ p = buf + 0x8000; addr = (unsigned long)p; create_cond_branch(&instr, p, addr, 0xFFFFFFFC); ppc_inst_write(p, instr); q = buf + 4; translate_branch(&instr, q, p); ppc_inst_write(q, instr); check(instr_is_branch_to_addr(p, addr)); check(instr_is_branch_to_addr(q, addr)); check(ppc_inst_equal(ppc_inst_read(q), ppc_inst(0x43ff7ffc))); /* Jump to x + 12 KB moved to x + 20 KB */ p = buf; addr = 0x3000 + (unsigned long)buf; create_cond_branch(&instr, p, addr, BRANCH_SET_LINK); ppc_inst_write(p, instr); q = buf + 0x5000; translate_branch(&instr, q, p); ppc_inst_write(q, instr); check(instr_is_branch_to_addr(p, addr)); check(instr_is_branch_to_addr(q, addr)); /* Jump to x + 8 KB moved to x - 8 KB + 4 */ p = buf + 0x2000; addr = 0x4000 + (unsigned long)buf; create_cond_branch(&instr, p, addr, 0); ppc_inst_write(p, instr); q = buf + 4; translate_branch(&instr, q, p); ppc_inst_write(q, instr); check(instr_is_branch_to_addr(p, addr)); check(instr_is_branch_to_addr(q, addr)); /* Free the buffer we were using */ vfree(buf); } static void __init test_prefixed_patching(void) { u32 *iptr = (u32 *)ppc_function_entry(test_trampoline); u32 expected[2] = {OP_PREFIX << 26, 0}; ppc_inst_t inst = ppc_inst_prefix(OP_PREFIX << 26, 0); if (!IS_ENABLED(CONFIG_PPC64)) return; patch_instruction(iptr, inst); check(!memcmp(iptr, expected, sizeof(expected))); } static int __init test_code_patching(void) { pr_info("Running code patching self-tests ...\n"); test_branch_iform(); test_branch_bform(); test_create_function_call(); test_translate_branch(); test_prefixed_patching(); return 0; } late_initcall(test_code_patching);
linux-master
arch/powerpc/lib/test-code-patching.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright(c) 2017 IBM Corporation. All rights reserved. */ #include <linux/string.h> #include <linux/export.h> #include <linux/uaccess.h> #include <linux/libnvdimm.h> #include <asm/cacheflush.h> static inline void __clean_pmem_range(unsigned long start, unsigned long stop) { unsigned long shift = l1_dcache_shift(); unsigned long bytes = l1_dcache_bytes(); void *addr = (void *)(start & ~(bytes - 1)); unsigned long size = stop - (unsigned long)addr + (bytes - 1); unsigned long i; for (i = 0; i < size >> shift; i++, addr += bytes) asm volatile(PPC_DCBSTPS(%0, %1): :"i"(0), "r"(addr): "memory"); } static inline void __flush_pmem_range(unsigned long start, unsigned long stop) { unsigned long shift = l1_dcache_shift(); unsigned long bytes = l1_dcache_bytes(); void *addr = (void *)(start & ~(bytes - 1)); unsigned long size = stop - (unsigned long)addr + (bytes - 1); unsigned long i; for (i = 0; i < size >> shift; i++, addr += bytes) asm volatile(PPC_DCBFPS(%0, %1): :"i"(0), "r"(addr): "memory"); } static inline void clean_pmem_range(unsigned long start, unsigned long stop) { if (cpu_has_feature(CPU_FTR_ARCH_207S)) return __clean_pmem_range(start, stop); } static inline void flush_pmem_range(unsigned long start, unsigned long stop) { if (cpu_has_feature(CPU_FTR_ARCH_207S)) return __flush_pmem_range(start, stop); } /* * CONFIG_ARCH_HAS_PMEM_API symbols */ void arch_wb_cache_pmem(void *addr, size_t size) { unsigned long start = (unsigned long) addr; clean_pmem_range(start, start + size); } EXPORT_SYMBOL_GPL(arch_wb_cache_pmem); void arch_invalidate_pmem(void *addr, size_t size) { unsigned long start = (unsigned long) addr; flush_pmem_range(start, start + size); } EXPORT_SYMBOL_GPL(arch_invalidate_pmem); /* * CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE symbols */ long __copy_from_user_flushcache(void *dest, const void __user *src, unsigned size) { unsigned long copied, start = (unsigned long) dest; copied = __copy_from_user(dest, src, size); clean_pmem_range(start, start + size); return copied; } void memcpy_flushcache(void *dest, const void *src, size_t size) { unsigned long start = (unsigned long) dest; memcpy(dest, src, size); clean_pmem_range(start, start + size); } EXPORT_SYMBOL(memcpy_flushcache);
linux-master
arch/powerpc/lib/pmem.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Simple sanity tests for instruction emulation infrastructure. * * Copyright IBM Corp. 2016 */ #define pr_fmt(fmt) "emulate_step_test: " fmt #include <linux/ptrace.h> #include <asm/cpu_has_feature.h> #include <asm/sstep.h> #include <asm/ppc-opcode.h> #include <asm/code-patching.h> #include <asm/inst.h> #define MAX_SUBTESTS 16 #define IGNORE_GPR(n) (0x1UL << (n)) #define IGNORE_XER (0x1UL << 32) #define IGNORE_CCR (0x1UL << 33) #define NEGATIVE_TEST (0x1UL << 63) #define TEST_PLD(r, base, i, pr) \ ppc_inst_prefix(PPC_PREFIX_8LS | __PPC_PRFX_R(pr) | IMM_H(i), \ PPC_INST_PLD | ___PPC_RT(r) | ___PPC_RA(base) | IMM_L(i)) #define TEST_PLWZ(r, base, i, pr) \ ppc_inst_prefix(PPC_PREFIX_MLS | __PPC_PRFX_R(pr) | IMM_H(i), \ PPC_RAW_LWZ(r, base, i)) #define TEST_PSTD(r, base, i, pr) \ ppc_inst_prefix(PPC_PREFIX_8LS | __PPC_PRFX_R(pr) | IMM_H(i), \ PPC_INST_PSTD | ___PPC_RT(r) | ___PPC_RA(base) | IMM_L(i)) #define TEST_PLFS(r, base, i, pr) \ ppc_inst_prefix(PPC_PREFIX_MLS | __PPC_PRFX_R(pr) | IMM_H(i), \ PPC_INST_LFS | ___PPC_RT(r) | ___PPC_RA(base) | IMM_L(i)) #define TEST_PSTFS(r, base, i, pr) \ ppc_inst_prefix(PPC_PREFIX_MLS | __PPC_PRFX_R(pr) | IMM_H(i), \ PPC_INST_STFS | ___PPC_RT(r) | ___PPC_RA(base) | IMM_L(i)) #define TEST_PLFD(r, base, i, pr) \ ppc_inst_prefix(PPC_PREFIX_MLS | __PPC_PRFX_R(pr) | IMM_H(i), \ PPC_INST_LFD | ___PPC_RT(r) | ___PPC_RA(base) | IMM_L(i)) #define TEST_PSTFD(r, base, i, pr) \ ppc_inst_prefix(PPC_PREFIX_MLS | __PPC_PRFX_R(pr) | IMM_H(i), \ PPC_INST_STFD | ___PPC_RT(r) | ___PPC_RA(base) | IMM_L(i)) #define TEST_PADDI(t, a, i, pr) \ ppc_inst_prefix(PPC_PREFIX_MLS | __PPC_PRFX_R(pr) | IMM_H(i), \ PPC_RAW_ADDI(t, a, i)) static void __init init_pt_regs(struct pt_regs *regs) { static unsigned long msr; static bool msr_cached; memset(regs, 0, sizeof(struct pt_regs)); if (likely(msr_cached)) { regs->msr = msr; return; } asm volatile("mfmsr %0" : "=r"(regs->msr)); regs->msr |= MSR_FP; regs->msr |= MSR_VEC; regs->msr |= MSR_VSX; msr = regs->msr; msr_cached = true; } static void __init show_result(char *mnemonic, char *result) { pr_info("%-14s : %s\n", mnemonic, result); } static void __init show_result_with_descr(char *mnemonic, char *descr, char *result) { pr_info("%-14s : %-50s %s\n", mnemonic, descr, result); } static void __init test_ld(void) { struct pt_regs regs; unsigned long a = 0x23; int stepped = -1; init_pt_regs(&regs); regs.gpr[3] = (unsigned long) &a; /* ld r5, 0(r3) */ stepped = emulate_step(&regs, ppc_inst(PPC_RAW_LD(5, 3, 0))); if (stepped == 1 && regs.gpr[5] == a) show_result("ld", "PASS"); else show_result("ld", "FAIL"); } static void __init test_pld(void) { struct pt_regs regs; unsigned long a = 0x23; int stepped = -1; if (!cpu_has_feature(CPU_FTR_ARCH_31)) { show_result("pld", "SKIP (!CPU_FTR_ARCH_31)"); return; } init_pt_regs(&regs); regs.gpr[3] = (unsigned long)&a; /* pld r5, 0(r3), 0 */ stepped = emulate_step(&regs, TEST_PLD(5, 3, 0, 0)); if (stepped == 1 && regs.gpr[5] == a) show_result("pld", "PASS"); else show_result("pld", "FAIL"); } static void __init test_lwz(void) { struct pt_regs regs; unsigned int a = 0x4545; int stepped = -1; init_pt_regs(&regs); regs.gpr[3] = (unsigned long) &a; /* lwz r5, 0(r3) */ stepped = emulate_step(&regs, ppc_inst(PPC_RAW_LWZ(5, 3, 0))); if (stepped == 1 && regs.gpr[5] == a) show_result("lwz", "PASS"); else show_result("lwz", "FAIL"); } static void __init test_plwz(void) { struct pt_regs regs; unsigned int a = 0x4545; int stepped = -1; if (!cpu_has_feature(CPU_FTR_ARCH_31)) { show_result("plwz", "SKIP (!CPU_FTR_ARCH_31)"); return; } init_pt_regs(&regs); regs.gpr[3] = (unsigned long)&a; /* plwz r5, 0(r3), 0 */ stepped = emulate_step(&regs, TEST_PLWZ(5, 3, 0, 0)); if (stepped == 1 && regs.gpr[5] == a) show_result("plwz", "PASS"); else show_result("plwz", "FAIL"); } static void __init test_lwzx(void) { struct pt_regs regs; unsigned int a[3] = {0x0, 0x0, 0x1234}; int stepped = -1; init_pt_regs(&regs); regs.gpr[3] = (unsigned long) a; regs.gpr[4] = 8; regs.gpr[5] = 0x8765; /* lwzx r5, r3, r4 */ stepped = emulate_step(&regs, ppc_inst(PPC_RAW_LWZX(5, 3, 4))); if (stepped == 1 && regs.gpr[5] == a[2]) show_result("lwzx", "PASS"); else show_result("lwzx", "FAIL"); } static void __init test_std(void) { struct pt_regs regs; unsigned long a = 0x1234; int stepped = -1; init_pt_regs(&regs); regs.gpr[3] = (unsigned long) &a; regs.gpr[5] = 0x5678; /* std r5, 0(r3) */ stepped = emulate_step(&regs, ppc_inst(PPC_RAW_STD(5, 3, 0))); if (stepped == 1 && regs.gpr[5] == a) show_result("std", "PASS"); else show_result("std", "FAIL"); } static void __init test_pstd(void) { struct pt_regs regs; unsigned long a = 0x1234; int stepped = -1; if (!cpu_has_feature(CPU_FTR_ARCH_31)) { show_result("pstd", "SKIP (!CPU_FTR_ARCH_31)"); return; } init_pt_regs(&regs); regs.gpr[3] = (unsigned long)&a; regs.gpr[5] = 0x5678; /* pstd r5, 0(r3), 0 */ stepped = emulate_step(&regs, TEST_PSTD(5, 3, 0, 0)); if (stepped == 1 || regs.gpr[5] == a) show_result("pstd", "PASS"); else show_result("pstd", "FAIL"); } static void __init test_ldarx_stdcx(void) { struct pt_regs regs; unsigned long a = 0x1234; int stepped = -1; unsigned long cr0_eq = 0x1 << 29; /* eq bit of CR0 */ init_pt_regs(&regs); asm volatile("mfcr %0" : "=r"(regs.ccr)); /*** ldarx ***/ regs.gpr[3] = (unsigned long) &a; regs.gpr[4] = 0; regs.gpr[5] = 0x5678; /* ldarx r5, r3, r4, 0 */ stepped = emulate_step(&regs, ppc_inst(PPC_RAW_LDARX(5, 3, 4, 0))); /* * Don't touch 'a' here. Touching 'a' can do Load/store * of 'a' which result in failure of subsequent stdcx. * Instead, use hardcoded value for comparison. */ if (stepped <= 0 || regs.gpr[5] != 0x1234) { show_result("ldarx / stdcx.", "FAIL (ldarx)"); return; } /*** stdcx. ***/ regs.gpr[5] = 0x9ABC; /* stdcx. r5, r3, r4 */ stepped = emulate_step(&regs, ppc_inst(PPC_RAW_STDCX(5, 3, 4))); /* * Two possible scenarios that indicates successful emulation * of stdcx. : * 1. Reservation is active and store is performed. In this * case cr0.eq bit will be set to 1. * 2. Reservation is not active and store is not performed. * In this case cr0.eq bit will be set to 0. */ if (stepped == 1 && ((regs.gpr[5] == a && (regs.ccr & cr0_eq)) || (regs.gpr[5] != a && !(regs.ccr & cr0_eq)))) show_result("ldarx / stdcx.", "PASS"); else show_result("ldarx / stdcx.", "FAIL (stdcx.)"); } #ifdef CONFIG_PPC_FPU static void __init test_lfsx_stfsx(void) { struct pt_regs regs; union { float a; int b; } c; int cached_b; int stepped = -1; init_pt_regs(&regs); /*** lfsx ***/ c.a = 123.45; cached_b = c.b; regs.gpr[3] = (unsigned long) &c.a; regs.gpr[4] = 0; /* lfsx frt10, r3, r4 */ stepped = emulate_step(&regs, ppc_inst(PPC_RAW_LFSX(10, 3, 4))); if (stepped == 1) show_result("lfsx", "PASS"); else show_result("lfsx", "FAIL"); /*** stfsx ***/ c.a = 678.91; /* stfsx frs10, r3, r4 */ stepped = emulate_step(&regs, ppc_inst(PPC_RAW_STFSX(10, 3, 4))); if (stepped == 1 && c.b == cached_b) show_result("stfsx", "PASS"); else show_result("stfsx", "FAIL"); } static void __init test_plfs_pstfs(void) { struct pt_regs regs; union { float a; int b; } c; int cached_b; int stepped = -1; if (!cpu_has_feature(CPU_FTR_ARCH_31)) { show_result("pld", "SKIP (!CPU_FTR_ARCH_31)"); return; } init_pt_regs(&regs); /*** plfs ***/ c.a = 123.45; cached_b = c.b; regs.gpr[3] = (unsigned long)&c.a; /* plfs frt10, 0(r3), 0 */ stepped = emulate_step(&regs, TEST_PLFS(10, 3, 0, 0)); if (stepped == 1) show_result("plfs", "PASS"); else show_result("plfs", "FAIL"); /*** pstfs ***/ c.a = 678.91; /* pstfs frs10, 0(r3), 0 */ stepped = emulate_step(&regs, TEST_PSTFS(10, 3, 0, 0)); if (stepped == 1 && c.b == cached_b) show_result("pstfs", "PASS"); else show_result("pstfs", "FAIL"); } static void __init test_lfdx_stfdx(void) { struct pt_regs regs; union { double a; long b; } c; long cached_b; int stepped = -1; init_pt_regs(&regs); /*** lfdx ***/ c.a = 123456.78; cached_b = c.b; regs.gpr[3] = (unsigned long) &c.a; regs.gpr[4] = 0; /* lfdx frt10, r3, r4 */ stepped = emulate_step(&regs, ppc_inst(PPC_RAW_LFDX(10, 3, 4))); if (stepped == 1) show_result("lfdx", "PASS"); else show_result("lfdx", "FAIL"); /*** stfdx ***/ c.a = 987654.32; /* stfdx frs10, r3, r4 */ stepped = emulate_step(&regs, ppc_inst(PPC_RAW_STFDX(10, 3, 4))); if (stepped == 1 && c.b == cached_b) show_result("stfdx", "PASS"); else show_result("stfdx", "FAIL"); } static void __init test_plfd_pstfd(void) { struct pt_regs regs; union { double a; long b; } c; long cached_b; int stepped = -1; if (!cpu_has_feature(CPU_FTR_ARCH_31)) { show_result("pld", "SKIP (!CPU_FTR_ARCH_31)"); return; } init_pt_regs(&regs); /*** plfd ***/ c.a = 123456.78; cached_b = c.b; regs.gpr[3] = (unsigned long)&c.a; /* plfd frt10, 0(r3), 0 */ stepped = emulate_step(&regs, TEST_PLFD(10, 3, 0, 0)); if (stepped == 1) show_result("plfd", "PASS"); else show_result("plfd", "FAIL"); /*** pstfd ***/ c.a = 987654.32; /* pstfd frs10, 0(r3), 0 */ stepped = emulate_step(&regs, TEST_PSTFD(10, 3, 0, 0)); if (stepped == 1 && c.b == cached_b) show_result("pstfd", "PASS"); else show_result("pstfd", "FAIL"); } #else static void __init test_lfsx_stfsx(void) { show_result("lfsx", "SKIP (CONFIG_PPC_FPU is not set)"); show_result("stfsx", "SKIP (CONFIG_PPC_FPU is not set)"); } static void __init test_plfs_pstfs(void) { show_result("plfs", "SKIP (CONFIG_PPC_FPU is not set)"); show_result("pstfs", "SKIP (CONFIG_PPC_FPU is not set)"); } static void __init test_lfdx_stfdx(void) { show_result("lfdx", "SKIP (CONFIG_PPC_FPU is not set)"); show_result("stfdx", "SKIP (CONFIG_PPC_FPU is not set)"); } static void __init test_plfd_pstfd(void) { show_result("plfd", "SKIP (CONFIG_PPC_FPU is not set)"); show_result("pstfd", "SKIP (CONFIG_PPC_FPU is not set)"); } #endif /* CONFIG_PPC_FPU */ #ifdef CONFIG_ALTIVEC static void __init test_lvx_stvx(void) { struct pt_regs regs; union { vector128 a; u32 b[4]; } c; u32 cached_b[4]; int stepped = -1; init_pt_regs(&regs); /*** lvx ***/ cached_b[0] = c.b[0] = 923745; cached_b[1] = c.b[1] = 2139478; cached_b[2] = c.b[2] = 9012; cached_b[3] = c.b[3] = 982134; regs.gpr[3] = (unsigned long) &c.a; regs.gpr[4] = 0; /* lvx vrt10, r3, r4 */ stepped = emulate_step(&regs, ppc_inst(PPC_RAW_LVX(10, 3, 4))); if (stepped == 1) show_result("lvx", "PASS"); else show_result("lvx", "FAIL"); /*** stvx ***/ c.b[0] = 4987513; c.b[1] = 84313948; c.b[2] = 71; c.b[3] = 498532; /* stvx vrs10, r3, r4 */ stepped = emulate_step(&regs, ppc_inst(PPC_RAW_STVX(10, 3, 4))); if (stepped == 1 && cached_b[0] == c.b[0] && cached_b[1] == c.b[1] && cached_b[2] == c.b[2] && cached_b[3] == c.b[3]) show_result("stvx", "PASS"); else show_result("stvx", "FAIL"); } #else static void __init test_lvx_stvx(void) { show_result("lvx", "SKIP (CONFIG_ALTIVEC is not set)"); show_result("stvx", "SKIP (CONFIG_ALTIVEC is not set)"); } #endif /* CONFIG_ALTIVEC */ #ifdef CONFIG_VSX static void __init test_lxvd2x_stxvd2x(void) { struct pt_regs regs; union { vector128 a; u32 b[4]; } c; u32 cached_b[4]; int stepped = -1; init_pt_regs(&regs); /*** lxvd2x ***/ cached_b[0] = c.b[0] = 18233; cached_b[1] = c.b[1] = 34863571; cached_b[2] = c.b[2] = 834; cached_b[3] = c.b[3] = 6138911; regs.gpr[3] = (unsigned long) &c.a; regs.gpr[4] = 0; /* lxvd2x vsr39, r3, r4 */ stepped = emulate_step(&regs, ppc_inst(PPC_RAW_LXVD2X(39, R3, R4))); if (stepped == 1 && cpu_has_feature(CPU_FTR_VSX)) { show_result("lxvd2x", "PASS"); } else { if (!cpu_has_feature(CPU_FTR_VSX)) show_result("lxvd2x", "PASS (!CPU_FTR_VSX)"); else show_result("lxvd2x", "FAIL"); } /*** stxvd2x ***/ c.b[0] = 21379463; c.b[1] = 87; c.b[2] = 374234; c.b[3] = 4; /* stxvd2x vsr39, r3, r4 */ stepped = emulate_step(&regs, ppc_inst(PPC_RAW_STXVD2X(39, R3, R4))); if (stepped == 1 && cached_b[0] == c.b[0] && cached_b[1] == c.b[1] && cached_b[2] == c.b[2] && cached_b[3] == c.b[3] && cpu_has_feature(CPU_FTR_VSX)) { show_result("stxvd2x", "PASS"); } else { if (!cpu_has_feature(CPU_FTR_VSX)) show_result("stxvd2x", "PASS (!CPU_FTR_VSX)"); else show_result("stxvd2x", "FAIL"); } } #else static void __init test_lxvd2x_stxvd2x(void) { show_result("lxvd2x", "SKIP (CONFIG_VSX is not set)"); show_result("stxvd2x", "SKIP (CONFIG_VSX is not set)"); } #endif /* CONFIG_VSX */ #ifdef CONFIG_VSX static void __init test_lxvp_stxvp(void) { struct pt_regs regs; union { vector128 a; u32 b[4]; } c[2]; u32 cached_b[8]; int stepped = -1; if (!cpu_has_feature(CPU_FTR_ARCH_31)) { show_result("lxvp", "SKIP (!CPU_FTR_ARCH_31)"); show_result("stxvp", "SKIP (!CPU_FTR_ARCH_31)"); return; } init_pt_regs(&regs); /*** lxvp ***/ cached_b[0] = c[0].b[0] = 18233; cached_b[1] = c[0].b[1] = 34863571; cached_b[2] = c[0].b[2] = 834; cached_b[3] = c[0].b[3] = 6138911; cached_b[4] = c[1].b[0] = 1234; cached_b[5] = c[1].b[1] = 5678; cached_b[6] = c[1].b[2] = 91011; cached_b[7] = c[1].b[3] = 121314; regs.gpr[4] = (unsigned long)&c[0].a; /* * lxvp XTp,DQ(RA) * XTp = 32xTX + 2xTp * let TX=1 Tp=1 RA=4 DQ=0 */ stepped = emulate_step(&regs, ppc_inst(PPC_RAW_LXVP(34, 4, 0))); if (stepped == 1 && cpu_has_feature(CPU_FTR_VSX)) { show_result("lxvp", "PASS"); } else { if (!cpu_has_feature(CPU_FTR_VSX)) show_result("lxvp", "PASS (!CPU_FTR_VSX)"); else show_result("lxvp", "FAIL"); } /*** stxvp ***/ c[0].b[0] = 21379463; c[0].b[1] = 87; c[0].b[2] = 374234; c[0].b[3] = 4; c[1].b[0] = 90; c[1].b[1] = 122; c[1].b[2] = 555; c[1].b[3] = 32144; /* * stxvp XSp,DQ(RA) * XSp = 32xSX + 2xSp * let SX=1 Sp=1 RA=4 DQ=0 */ stepped = emulate_step(&regs, ppc_inst(PPC_RAW_STXVP(34, 4, 0))); if (stepped == 1 && cached_b[0] == c[0].b[0] && cached_b[1] == c[0].b[1] && cached_b[2] == c[0].b[2] && cached_b[3] == c[0].b[3] && cached_b[4] == c[1].b[0] && cached_b[5] == c[1].b[1] && cached_b[6] == c[1].b[2] && cached_b[7] == c[1].b[3] && cpu_has_feature(CPU_FTR_VSX)) { show_result("stxvp", "PASS"); } else { if (!cpu_has_feature(CPU_FTR_VSX)) show_result("stxvp", "PASS (!CPU_FTR_VSX)"); else show_result("stxvp", "FAIL"); } } #else static void __init test_lxvp_stxvp(void) { show_result("lxvp", "SKIP (CONFIG_VSX is not set)"); show_result("stxvp", "SKIP (CONFIG_VSX is not set)"); } #endif /* CONFIG_VSX */ #ifdef CONFIG_VSX static void __init test_lxvpx_stxvpx(void) { struct pt_regs regs; union { vector128 a; u32 b[4]; } c[2]; u32 cached_b[8]; int stepped = -1; if (!cpu_has_feature(CPU_FTR_ARCH_31)) { show_result("lxvpx", "SKIP (!CPU_FTR_ARCH_31)"); show_result("stxvpx", "SKIP (!CPU_FTR_ARCH_31)"); return; } init_pt_regs(&regs); /*** lxvpx ***/ cached_b[0] = c[0].b[0] = 18233; cached_b[1] = c[0].b[1] = 34863571; cached_b[2] = c[0].b[2] = 834; cached_b[3] = c[0].b[3] = 6138911; cached_b[4] = c[1].b[0] = 1234; cached_b[5] = c[1].b[1] = 5678; cached_b[6] = c[1].b[2] = 91011; cached_b[7] = c[1].b[3] = 121314; regs.gpr[3] = (unsigned long)&c[0].a; regs.gpr[4] = 0; /* * lxvpx XTp,RA,RB * XTp = 32xTX + 2xTp * let TX=1 Tp=1 RA=3 RB=4 */ stepped = emulate_step(&regs, ppc_inst(PPC_RAW_LXVPX(34, 3, 4))); if (stepped == 1 && cpu_has_feature(CPU_FTR_VSX)) { show_result("lxvpx", "PASS"); } else { if (!cpu_has_feature(CPU_FTR_VSX)) show_result("lxvpx", "PASS (!CPU_FTR_VSX)"); else show_result("lxvpx", "FAIL"); } /*** stxvpx ***/ c[0].b[0] = 21379463; c[0].b[1] = 87; c[0].b[2] = 374234; c[0].b[3] = 4; c[1].b[0] = 90; c[1].b[1] = 122; c[1].b[2] = 555; c[1].b[3] = 32144; /* * stxvpx XSp,RA,RB * XSp = 32xSX + 2xSp * let SX=1 Sp=1 RA=3 RB=4 */ stepped = emulate_step(&regs, ppc_inst(PPC_RAW_STXVPX(34, 3, 4))); if (stepped == 1 && cached_b[0] == c[0].b[0] && cached_b[1] == c[0].b[1] && cached_b[2] == c[0].b[2] && cached_b[3] == c[0].b[3] && cached_b[4] == c[1].b[0] && cached_b[5] == c[1].b[1] && cached_b[6] == c[1].b[2] && cached_b[7] == c[1].b[3] && cpu_has_feature(CPU_FTR_VSX)) { show_result("stxvpx", "PASS"); } else { if (!cpu_has_feature(CPU_FTR_VSX)) show_result("stxvpx", "PASS (!CPU_FTR_VSX)"); else show_result("stxvpx", "FAIL"); } } #else static void __init test_lxvpx_stxvpx(void) { show_result("lxvpx", "SKIP (CONFIG_VSX is not set)"); show_result("stxvpx", "SKIP (CONFIG_VSX is not set)"); } #endif /* CONFIG_VSX */ #ifdef CONFIG_VSX static void __init test_plxvp_pstxvp(void) { ppc_inst_t instr; struct pt_regs regs; union { vector128 a; u32 b[4]; } c[2]; u32 cached_b[8]; int stepped = -1; if (!cpu_has_feature(CPU_FTR_ARCH_31)) { show_result("plxvp", "SKIP (!CPU_FTR_ARCH_31)"); show_result("pstxvp", "SKIP (!CPU_FTR_ARCH_31)"); return; } /*** plxvp ***/ cached_b[0] = c[0].b[0] = 18233; cached_b[1] = c[0].b[1] = 34863571; cached_b[2] = c[0].b[2] = 834; cached_b[3] = c[0].b[3] = 6138911; cached_b[4] = c[1].b[0] = 1234; cached_b[5] = c[1].b[1] = 5678; cached_b[6] = c[1].b[2] = 91011; cached_b[7] = c[1].b[3] = 121314; init_pt_regs(&regs); regs.gpr[3] = (unsigned long)&c[0].a; /* * plxvp XTp,D(RA),R * XTp = 32xTX + 2xTp * let RA=3 R=0 D=d0||d1=0 R=0 Tp=1 TX=1 */ instr = ppc_inst_prefix(PPC_RAW_PLXVP_P(34, 0, 3, 0), PPC_RAW_PLXVP_S(34, 0, 3, 0)); stepped = emulate_step(&regs, instr); if (stepped == 1 && cpu_has_feature(CPU_FTR_VSX)) { show_result("plxvp", "PASS"); } else { if (!cpu_has_feature(CPU_FTR_VSX)) show_result("plxvp", "PASS (!CPU_FTR_VSX)"); else show_result("plxvp", "FAIL"); } /*** pstxvp ***/ c[0].b[0] = 21379463; c[0].b[1] = 87; c[0].b[2] = 374234; c[0].b[3] = 4; c[1].b[0] = 90; c[1].b[1] = 122; c[1].b[2] = 555; c[1].b[3] = 32144; /* * pstxvp XSp,D(RA),R * XSp = 32xSX + 2xSp * let RA=3 D=d0||d1=0 R=0 Sp=1 SX=1 */ instr = ppc_inst_prefix(PPC_RAW_PSTXVP_P(34, 0, 3, 0), PPC_RAW_PSTXVP_S(34, 0, 3, 0)); stepped = emulate_step(&regs, instr); if (stepped == 1 && cached_b[0] == c[0].b[0] && cached_b[1] == c[0].b[1] && cached_b[2] == c[0].b[2] && cached_b[3] == c[0].b[3] && cached_b[4] == c[1].b[0] && cached_b[5] == c[1].b[1] && cached_b[6] == c[1].b[2] && cached_b[7] == c[1].b[3] && cpu_has_feature(CPU_FTR_VSX)) { show_result("pstxvp", "PASS"); } else { if (!cpu_has_feature(CPU_FTR_VSX)) show_result("pstxvp", "PASS (!CPU_FTR_VSX)"); else show_result("pstxvp", "FAIL"); } } #else static void __init test_plxvp_pstxvp(void) { show_result("plxvp", "SKIP (CONFIG_VSX is not set)"); show_result("pstxvp", "SKIP (CONFIG_VSX is not set)"); } #endif /* CONFIG_VSX */ static void __init run_tests_load_store(void) { test_ld(); test_pld(); test_lwz(); test_plwz(); test_lwzx(); test_std(); test_pstd(); test_ldarx_stdcx(); test_lfsx_stfsx(); test_plfs_pstfs(); test_lfdx_stfdx(); test_plfd_pstfd(); test_lvx_stvx(); test_lxvd2x_stxvd2x(); test_lxvp_stxvp(); test_lxvpx_stxvpx(); test_plxvp_pstxvp(); } struct compute_test { char *mnemonic; unsigned long cpu_feature; struct { char *descr; unsigned long flags; ppc_inst_t instr; struct pt_regs regs; } subtests[MAX_SUBTESTS + 1]; }; /* Extreme values for si0||si1 (the MLS:D-form 34 bit immediate field) */ #define SI_MIN BIT(33) #define SI_MAX (BIT(33) - 1) #define SI_UMAX (BIT(34) - 1) static struct compute_test compute_tests[] = { { .mnemonic = "nop", .subtests = { { .descr = "R0 = LONG_MAX", .instr = ppc_inst(PPC_RAW_NOP()), .regs = { .gpr[0] = LONG_MAX, } } } }, { .mnemonic = "setb", .cpu_feature = CPU_FTR_ARCH_300, .subtests = { { .descr = "BFA = 1, CR = GT", .instr = ppc_inst(PPC_RAW_SETB(20, 1)), .regs = { .ccr = 0x4000000, } }, { .descr = "BFA = 4, CR = LT", .instr = ppc_inst(PPC_RAW_SETB(20, 4)), .regs = { .ccr = 0x8000, } }, { .descr = "BFA = 5, CR = EQ", .instr = ppc_inst(PPC_RAW_SETB(20, 5)), .regs = { .ccr = 0x200, } } } }, { .mnemonic = "add", .subtests = { { .descr = "RA = LONG_MIN, RB = LONG_MIN", .instr = ppc_inst(PPC_RAW_ADD(20, 21, 22)), .regs = { .gpr[21] = LONG_MIN, .gpr[22] = LONG_MIN, } }, { .descr = "RA = LONG_MIN, RB = LONG_MAX", .instr = ppc_inst(PPC_RAW_ADD(20, 21, 22)), .regs = { .gpr[21] = LONG_MIN, .gpr[22] = LONG_MAX, } }, { .descr = "RA = LONG_MAX, RB = LONG_MAX", .instr = ppc_inst(PPC_RAW_ADD(20, 21, 22)), .regs = { .gpr[21] = LONG_MAX, .gpr[22] = LONG_MAX, } }, { .descr = "RA = ULONG_MAX, RB = ULONG_MAX", .instr = ppc_inst(PPC_RAW_ADD(20, 21, 22)), .regs = { .gpr[21] = ULONG_MAX, .gpr[22] = ULONG_MAX, } }, { .descr = "RA = ULONG_MAX, RB = 0x1", .instr = ppc_inst(PPC_RAW_ADD(20, 21, 22)), .regs = { .gpr[21] = ULONG_MAX, .gpr[22] = 0x1, } }, { .descr = "RA = INT_MIN, RB = INT_MIN", .instr = ppc_inst(PPC_RAW_ADD(20, 21, 22)), .regs = { .gpr[21] = INT_MIN, .gpr[22] = INT_MIN, } }, { .descr = "RA = INT_MIN, RB = INT_MAX", .instr = ppc_inst(PPC_RAW_ADD(20, 21, 22)), .regs = { .gpr[21] = INT_MIN, .gpr[22] = INT_MAX, } }, { .descr = "RA = INT_MAX, RB = INT_MAX", .instr = ppc_inst(PPC_RAW_ADD(20, 21, 22)), .regs = { .gpr[21] = INT_MAX, .gpr[22] = INT_MAX, } }, { .descr = "RA = UINT_MAX, RB = UINT_MAX", .instr = ppc_inst(PPC_RAW_ADD(20, 21, 22)), .regs = { .gpr[21] = UINT_MAX, .gpr[22] = UINT_MAX, } }, { .descr = "RA = UINT_MAX, RB = 0x1", .instr = ppc_inst(PPC_RAW_ADD(20, 21, 22)), .regs = { .gpr[21] = UINT_MAX, .gpr[22] = 0x1, } } } }, { .mnemonic = "add.", .subtests = { { .descr = "RA = LONG_MIN, RB = LONG_MIN", .flags = IGNORE_CCR, .instr = ppc_inst(PPC_RAW_ADD_DOT(20, 21, 22)), .regs = { .gpr[21] = LONG_MIN, .gpr[22] = LONG_MIN, } }, { .descr = "RA = LONG_MIN, RB = LONG_MAX", .instr = ppc_inst(PPC_RAW_ADD_DOT(20, 21, 22)), .regs = { .gpr[21] = LONG_MIN, .gpr[22] = LONG_MAX, } }, { .descr = "RA = LONG_MAX, RB = LONG_MAX", .flags = IGNORE_CCR, .instr = ppc_inst(PPC_RAW_ADD_DOT(20, 21, 22)), .regs = { .gpr[21] = LONG_MAX, .gpr[22] = LONG_MAX, } }, { .descr = "RA = ULONG_MAX, RB = ULONG_MAX", .instr = ppc_inst(PPC_RAW_ADD_DOT(20, 21, 22)), .regs = { .gpr[21] = ULONG_MAX, .gpr[22] = ULONG_MAX, } }, { .descr = "RA = ULONG_MAX, RB = 0x1", .instr = ppc_inst(PPC_RAW_ADD_DOT(20, 21, 22)), .regs = { .gpr[21] = ULONG_MAX, .gpr[22] = 0x1, } }, { .descr = "RA = INT_MIN, RB = INT_MIN", .instr = ppc_inst(PPC_RAW_ADD_DOT(20, 21, 22)), .regs = { .gpr[21] = INT_MIN, .gpr[22] = INT_MIN, } }, { .descr = "RA = INT_MIN, RB = INT_MAX", .instr = ppc_inst(PPC_RAW_ADD_DOT(20, 21, 22)), .regs = { .gpr[21] = INT_MIN, .gpr[22] = INT_MAX, } }, { .descr = "RA = INT_MAX, RB = INT_MAX", .instr = ppc_inst(PPC_RAW_ADD_DOT(20, 21, 22)), .regs = { .gpr[21] = INT_MAX, .gpr[22] = INT_MAX, } }, { .descr = "RA = UINT_MAX, RB = UINT_MAX", .instr = ppc_inst(PPC_RAW_ADD_DOT(20, 21, 22)), .regs = { .gpr[21] = UINT_MAX, .gpr[22] = UINT_MAX, } }, { .descr = "RA = UINT_MAX, RB = 0x1", .instr = ppc_inst(PPC_RAW_ADD_DOT(20, 21, 22)), .regs = { .gpr[21] = UINT_MAX, .gpr[22] = 0x1, } } } }, { .mnemonic = "addc", .subtests = { { .descr = "RA = LONG_MIN, RB = LONG_MIN", .instr = ppc_inst(PPC_RAW_ADDC(20, 21, 22)), .regs = { .gpr[21] = LONG_MIN, .gpr[22] = LONG_MIN, } }, { .descr = "RA = LONG_MIN, RB = LONG_MAX", .instr = ppc_inst(PPC_RAW_ADDC(20, 21, 22)), .regs = { .gpr[21] = LONG_MIN, .gpr[22] = LONG_MAX, } }, { .descr = "RA = LONG_MAX, RB = LONG_MAX", .instr = ppc_inst(PPC_RAW_ADDC(20, 21, 22)), .regs = { .gpr[21] = LONG_MAX, .gpr[22] = LONG_MAX, } }, { .descr = "RA = ULONG_MAX, RB = ULONG_MAX", .instr = ppc_inst(PPC_RAW_ADDC(20, 21, 22)), .regs = { .gpr[21] = ULONG_MAX, .gpr[22] = ULONG_MAX, } }, { .descr = "RA = ULONG_MAX, RB = 0x1", .instr = ppc_inst(PPC_RAW_ADDC(20, 21, 22)), .regs = { .gpr[21] = ULONG_MAX, .gpr[22] = 0x1, } }, { .descr = "RA = INT_MIN, RB = INT_MIN", .instr = ppc_inst(PPC_RAW_ADDC(20, 21, 22)), .regs = { .gpr[21] = INT_MIN, .gpr[22] = INT_MIN, } }, { .descr = "RA = INT_MIN, RB = INT_MAX", .instr = ppc_inst(PPC_RAW_ADDC(20, 21, 22)), .regs = { .gpr[21] = INT_MIN, .gpr[22] = INT_MAX, } }, { .descr = "RA = INT_MAX, RB = INT_MAX", .instr = ppc_inst(PPC_RAW_ADDC(20, 21, 22)), .regs = { .gpr[21] = INT_MAX, .gpr[22] = INT_MAX, } }, { .descr = "RA = UINT_MAX, RB = UINT_MAX", .instr = ppc_inst(PPC_RAW_ADDC(20, 21, 22)), .regs = { .gpr[21] = UINT_MAX, .gpr[22] = UINT_MAX, } }, { .descr = "RA = UINT_MAX, RB = 0x1", .instr = ppc_inst(PPC_RAW_ADDC(20, 21, 22)), .regs = { .gpr[21] = UINT_MAX, .gpr[22] = 0x1, } }, { .descr = "RA = LONG_MIN | INT_MIN, RB = LONG_MIN | INT_MIN", .instr = ppc_inst(PPC_RAW_ADDC(20, 21, 22)), .regs = { .gpr[21] = LONG_MIN | (uint)INT_MIN, .gpr[22] = LONG_MIN | (uint)INT_MIN, } } } }, { .mnemonic = "addc.", .subtests = { { .descr = "RA = LONG_MIN, RB = LONG_MIN", .flags = IGNORE_CCR, .instr = ppc_inst(PPC_RAW_ADDC_DOT(20, 21, 22)), .regs = { .gpr[21] = LONG_MIN, .gpr[22] = LONG_MIN, } }, { .descr = "RA = LONG_MIN, RB = LONG_MAX", .instr = ppc_inst(PPC_RAW_ADDC_DOT(20, 21, 22)), .regs = { .gpr[21] = LONG_MIN, .gpr[22] = LONG_MAX, } }, { .descr = "RA = LONG_MAX, RB = LONG_MAX", .flags = IGNORE_CCR, .instr = ppc_inst(PPC_RAW_ADDC_DOT(20, 21, 22)), .regs = { .gpr[21] = LONG_MAX, .gpr[22] = LONG_MAX, } }, { .descr = "RA = ULONG_MAX, RB = ULONG_MAX", .instr = ppc_inst(PPC_RAW_ADDC_DOT(20, 21, 22)), .regs = { .gpr[21] = ULONG_MAX, .gpr[22] = ULONG_MAX, } }, { .descr = "RA = ULONG_MAX, RB = 0x1", .instr = ppc_inst(PPC_RAW_ADDC_DOT(20, 21, 22)), .regs = { .gpr[21] = ULONG_MAX, .gpr[22] = 0x1, } }, { .descr = "RA = INT_MIN, RB = INT_MIN", .instr = ppc_inst(PPC_RAW_ADDC_DOT(20, 21, 22)), .regs = { .gpr[21] = INT_MIN, .gpr[22] = INT_MIN, } }, { .descr = "RA = INT_MIN, RB = INT_MAX", .instr = ppc_inst(PPC_RAW_ADDC_DOT(20, 21, 22)), .regs = { .gpr[21] = INT_MIN, .gpr[22] = INT_MAX, } }, { .descr = "RA = INT_MAX, RB = INT_MAX", .instr = ppc_inst(PPC_RAW_ADDC_DOT(20, 21, 22)), .regs = { .gpr[21] = INT_MAX, .gpr[22] = INT_MAX, } }, { .descr = "RA = UINT_MAX, RB = UINT_MAX", .instr = ppc_inst(PPC_RAW_ADDC_DOT(20, 21, 22)), .regs = { .gpr[21] = UINT_MAX, .gpr[22] = UINT_MAX, } }, { .descr = "RA = UINT_MAX, RB = 0x1", .instr = ppc_inst(PPC_RAW_ADDC_DOT(20, 21, 22)), .regs = { .gpr[21] = UINT_MAX, .gpr[22] = 0x1, } }, { .descr = "RA = LONG_MIN | INT_MIN, RB = LONG_MIN | INT_MIN", .instr = ppc_inst(PPC_RAW_ADDC_DOT(20, 21, 22)), .regs = { .gpr[21] = LONG_MIN | (uint)INT_MIN, .gpr[22] = LONG_MIN | (uint)INT_MIN, } } } }, { .mnemonic = "divde", .subtests = { { .descr = "RA = LONG_MIN, RB = LONG_MIN", .instr = ppc_inst(PPC_RAW_DIVDE(20, 21, 22)), .regs = { .gpr[21] = LONG_MIN, .gpr[22] = LONG_MIN, } }, { .descr = "RA = 1L, RB = 0", .instr = ppc_inst(PPC_RAW_DIVDE(20, 21, 22)), .flags = IGNORE_GPR(20), .regs = { .gpr[21] = 1L, .gpr[22] = 0, } }, { .descr = "RA = LONG_MIN, RB = LONG_MAX", .instr = ppc_inst(PPC_RAW_DIVDE(20, 21, 22)), .regs = { .gpr[21] = LONG_MIN, .gpr[22] = LONG_MAX, } } } }, { .mnemonic = "divde.", .subtests = { { .descr = "RA = LONG_MIN, RB = LONG_MIN", .instr = ppc_inst(PPC_RAW_DIVDE_DOT(20, 21, 22)), .regs = { .gpr[21] = LONG_MIN, .gpr[22] = LONG_MIN, } }, { .descr = "RA = 1L, RB = 0", .instr = ppc_inst(PPC_RAW_DIVDE_DOT(20, 21, 22)), .flags = IGNORE_GPR(20), .regs = { .gpr[21] = 1L, .gpr[22] = 0, } }, { .descr = "RA = LONG_MIN, RB = LONG_MAX", .instr = ppc_inst(PPC_RAW_DIVDE_DOT(20, 21, 22)), .regs = { .gpr[21] = LONG_MIN, .gpr[22] = LONG_MAX, } } } }, { .mnemonic = "divdeu", .subtests = { { .descr = "RA = LONG_MIN, RB = LONG_MIN", .instr = ppc_inst(PPC_RAW_DIVDEU(20, 21, 22)), .flags = IGNORE_GPR(20), .regs = { .gpr[21] = LONG_MIN, .gpr[22] = LONG_MIN, } }, { .descr = "RA = 1L, RB = 0", .instr = ppc_inst(PPC_RAW_DIVDEU(20, 21, 22)), .flags = IGNORE_GPR(20), .regs = { .gpr[21] = 1L, .gpr[22] = 0, } }, { .descr = "RA = LONG_MIN, RB = LONG_MAX", .instr = ppc_inst(PPC_RAW_DIVDEU(20, 21, 22)), .regs = { .gpr[21] = LONG_MIN, .gpr[22] = LONG_MAX, } }, { .descr = "RA = LONG_MAX - 1, RB = LONG_MAX", .instr = ppc_inst(PPC_RAW_DIVDEU(20, 21, 22)), .regs = { .gpr[21] = LONG_MAX - 1, .gpr[22] = LONG_MAX, } }, { .descr = "RA = LONG_MIN + 1, RB = LONG_MIN", .instr = ppc_inst(PPC_RAW_DIVDEU(20, 21, 22)), .flags = IGNORE_GPR(20), .regs = { .gpr[21] = LONG_MIN + 1, .gpr[22] = LONG_MIN, } } } }, { .mnemonic = "divdeu.", .subtests = { { .descr = "RA = LONG_MIN, RB = LONG_MIN", .instr = ppc_inst(PPC_RAW_DIVDEU_DOT(20, 21, 22)), .flags = IGNORE_GPR(20), .regs = { .gpr[21] = LONG_MIN, .gpr[22] = LONG_MIN, } }, { .descr = "RA = 1L, RB = 0", .instr = ppc_inst(PPC_RAW_DIVDEU_DOT(20, 21, 22)), .flags = IGNORE_GPR(20), .regs = { .gpr[21] = 1L, .gpr[22] = 0, } }, { .descr = "RA = LONG_MIN, RB = LONG_MAX", .instr = ppc_inst(PPC_RAW_DIVDEU_DOT(20, 21, 22)), .regs = { .gpr[21] = LONG_MIN, .gpr[22] = LONG_MAX, } }, { .descr = "RA = LONG_MAX - 1, RB = LONG_MAX", .instr = ppc_inst(PPC_RAW_DIVDEU_DOT(20, 21, 22)), .regs = { .gpr[21] = LONG_MAX - 1, .gpr[22] = LONG_MAX, } }, { .descr = "RA = LONG_MIN + 1, RB = LONG_MIN", .instr = ppc_inst(PPC_RAW_DIVDEU_DOT(20, 21, 22)), .flags = IGNORE_GPR(20), .regs = { .gpr[21] = LONG_MIN + 1, .gpr[22] = LONG_MIN, } } } }, { .mnemonic = "paddi", .cpu_feature = CPU_FTR_ARCH_31, .subtests = { { .descr = "RA = LONG_MIN, SI = SI_MIN, R = 0", .instr = TEST_PADDI(21, 22, SI_MIN, 0), .regs = { .gpr[21] = 0, .gpr[22] = LONG_MIN, } }, { .descr = "RA = LONG_MIN, SI = SI_MAX, R = 0", .instr = TEST_PADDI(21, 22, SI_MAX, 0), .regs = { .gpr[21] = 0, .gpr[22] = LONG_MIN, } }, { .descr = "RA = LONG_MAX, SI = SI_MAX, R = 0", .instr = TEST_PADDI(21, 22, SI_MAX, 0), .regs = { .gpr[21] = 0, .gpr[22] = LONG_MAX, } }, { .descr = "RA = ULONG_MAX, SI = SI_UMAX, R = 0", .instr = TEST_PADDI(21, 22, SI_UMAX, 0), .regs = { .gpr[21] = 0, .gpr[22] = ULONG_MAX, } }, { .descr = "RA = ULONG_MAX, SI = 0x1, R = 0", .instr = TEST_PADDI(21, 22, 0x1, 0), .regs = { .gpr[21] = 0, .gpr[22] = ULONG_MAX, } }, { .descr = "RA = INT_MIN, SI = SI_MIN, R = 0", .instr = TEST_PADDI(21, 22, SI_MIN, 0), .regs = { .gpr[21] = 0, .gpr[22] = INT_MIN, } }, { .descr = "RA = INT_MIN, SI = SI_MAX, R = 0", .instr = TEST_PADDI(21, 22, SI_MAX, 0), .regs = { .gpr[21] = 0, .gpr[22] = INT_MIN, } }, { .descr = "RA = INT_MAX, SI = SI_MAX, R = 0", .instr = TEST_PADDI(21, 22, SI_MAX, 0), .regs = { .gpr[21] = 0, .gpr[22] = INT_MAX, } }, { .descr = "RA = UINT_MAX, SI = 0x1, R = 0", .instr = TEST_PADDI(21, 22, 0x1, 0), .regs = { .gpr[21] = 0, .gpr[22] = UINT_MAX, } }, { .descr = "RA = UINT_MAX, SI = SI_MAX, R = 0", .instr = TEST_PADDI(21, 22, SI_MAX, 0), .regs = { .gpr[21] = 0, .gpr[22] = UINT_MAX, } }, { .descr = "RA is r0, SI = SI_MIN, R = 0", .instr = TEST_PADDI(21, 0, SI_MIN, 0), .regs = { .gpr[21] = 0x0, } }, { .descr = "RA = 0, SI = SI_MIN, R = 0", .instr = TEST_PADDI(21, 22, SI_MIN, 0), .regs = { .gpr[21] = 0x0, .gpr[22] = 0x0, } }, { .descr = "RA is r0, SI = 0, R = 1", .instr = TEST_PADDI(21, 0, 0, 1), .regs = { .gpr[21] = 0, } }, { .descr = "RA is r0, SI = SI_MIN, R = 1", .instr = TEST_PADDI(21, 0, SI_MIN, 1), .regs = { .gpr[21] = 0, } }, /* Invalid instruction form with R = 1 and RA != 0 */ { .descr = "RA = R22(0), SI = 0, R = 1", .instr = TEST_PADDI(21, 22, 0, 1), .flags = NEGATIVE_TEST, .regs = { .gpr[21] = 0, .gpr[22] = 0, } } } } }; static int __init emulate_compute_instr(struct pt_regs *regs, ppc_inst_t instr, bool negative) { int analysed; struct instruction_op op; if (!regs || !ppc_inst_val(instr)) return -EINVAL; /* This is not a return frame regs */ regs->nip = patch_site_addr(&patch__exec_instr); analysed = analyse_instr(&op, regs, instr); if (analysed != 1 || GETTYPE(op.type) != COMPUTE) { if (negative) return -EFAULT; pr_info("emulation failed, instruction = %08lx\n", ppc_inst_as_ulong(instr)); return -EFAULT; } if (analysed == 1 && negative) pr_info("negative test failed, instruction = %08lx\n", ppc_inst_as_ulong(instr)); if (!negative) emulate_update_regs(regs, &op); return 0; } static int __init execute_compute_instr(struct pt_regs *regs, ppc_inst_t instr) { extern int exec_instr(struct pt_regs *regs); if (!regs || !ppc_inst_val(instr)) return -EINVAL; /* Patch the NOP with the actual instruction */ patch_instruction_site(&patch__exec_instr, instr); if (exec_instr(regs)) { pr_info("execution failed, instruction = %08lx\n", ppc_inst_as_ulong(instr)); return -EFAULT; } return 0; } #define gpr_mismatch(gprn, exp, got) \ pr_info("GPR%u mismatch, exp = 0x%016lx, got = 0x%016lx\n", \ gprn, exp, got) #define reg_mismatch(name, exp, got) \ pr_info("%s mismatch, exp = 0x%016lx, got = 0x%016lx\n", \ name, exp, got) static void __init run_tests_compute(void) { unsigned long flags; struct compute_test *test; struct pt_regs *regs, exp, got; unsigned int i, j, k; ppc_inst_t instr; bool ignore_gpr, ignore_xer, ignore_ccr, passed, rc, negative; for (i = 0; i < ARRAY_SIZE(compute_tests); i++) { test = &compute_tests[i]; if (test->cpu_feature && !early_cpu_has_feature(test->cpu_feature)) { show_result(test->mnemonic, "SKIP (!CPU_FTR)"); continue; } for (j = 0; j < MAX_SUBTESTS && test->subtests[j].descr; j++) { instr = test->subtests[j].instr; flags = test->subtests[j].flags; regs = &test->subtests[j].regs; negative = flags & NEGATIVE_TEST; ignore_xer = flags & IGNORE_XER; ignore_ccr = flags & IGNORE_CCR; passed = true; memcpy(&exp, regs, sizeof(struct pt_regs)); memcpy(&got, regs, sizeof(struct pt_regs)); /* * Set a compatible MSR value explicitly to ensure * that XER and CR bits are updated appropriately */ exp.msr = MSR_KERNEL; got.msr = MSR_KERNEL; rc = emulate_compute_instr(&got, instr, negative) != 0; if (negative) { /* skip executing instruction */ passed = rc; goto print; } else if (rc || execute_compute_instr(&exp, instr)) { passed = false; goto print; } /* Verify GPR values */ for (k = 0; k < 32; k++) { ignore_gpr = flags & IGNORE_GPR(k); if (!ignore_gpr && exp.gpr[k] != got.gpr[k]) { passed = false; gpr_mismatch(k, exp.gpr[k], got.gpr[k]); } } /* Verify LR value */ if (exp.link != got.link) { passed = false; reg_mismatch("LR", exp.link, got.link); } /* Verify XER value */ if (!ignore_xer && exp.xer != got.xer) { passed = false; reg_mismatch("XER", exp.xer, got.xer); } /* Verify CR value */ if (!ignore_ccr && exp.ccr != got.ccr) { passed = false; reg_mismatch("CR", exp.ccr, got.ccr); } print: show_result_with_descr(test->mnemonic, test->subtests[j].descr, passed ? "PASS" : "FAIL"); } } } static int __init test_emulate_step(void) { printk(KERN_INFO "Running instruction emulation self-tests ...\n"); run_tests_load_store(); run_tests_compute(); return 0; } late_initcall(test_emulate_step);
linux-master
arch/powerpc/lib/test_emulate_step.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright 2008 Michael Ellerman, IBM Corporation. */ #include <linux/kprobes.h> #include <linux/mmu_context.h> #include <linux/random.h> #include <linux/vmalloc.h> #include <linux/init.h> #include <linux/cpuhotplug.h> #include <linux/uaccess.h> #include <linux/jump_label.h> #include <asm/debug.h> #include <asm/pgalloc.h> #include <asm/tlb.h> #include <asm/tlbflush.h> #include <asm/page.h> #include <asm/code-patching.h> #include <asm/inst.h> static int __patch_instruction(u32 *exec_addr, ppc_inst_t instr, u32 *patch_addr) { if (!ppc_inst_prefixed(instr)) { u32 val = ppc_inst_val(instr); __put_kernel_nofault(patch_addr, &val, u32, failed); } else { u64 val = ppc_inst_as_ulong(instr); __put_kernel_nofault(patch_addr, &val, u64, failed); } asm ("dcbst 0, %0; sync; icbi 0,%1; sync; isync" :: "r" (patch_addr), "r" (exec_addr)); return 0; failed: return -EPERM; } int raw_patch_instruction(u32 *addr, ppc_inst_t instr) { return __patch_instruction(addr, instr, addr); } struct patch_context { union { struct vm_struct *area; struct mm_struct *mm; }; unsigned long addr; pte_t *pte; }; static DEFINE_PER_CPU(struct patch_context, cpu_patching_context); static int map_patch_area(void *addr, unsigned long text_poke_addr); static void unmap_patch_area(unsigned long addr); static bool mm_patch_enabled(void) { return IS_ENABLED(CONFIG_SMP) && radix_enabled(); } /* * The following applies for Radix MMU. Hash MMU has different requirements, * and so is not supported. * * Changing mm requires context synchronising instructions on both sides of * the context switch, as well as a hwsync between the last instruction for * which the address of an associated storage access was translated using * the current context. * * switch_mm_irqs_off() performs an isync after the context switch. It is * the responsibility of the caller to perform the CSI and hwsync before * starting/stopping the temp mm. */ static struct mm_struct *start_using_temp_mm(struct mm_struct *temp_mm) { struct mm_struct *orig_mm = current->active_mm; lockdep_assert_irqs_disabled(); switch_mm_irqs_off(orig_mm, temp_mm, current); WARN_ON(!mm_is_thread_local(temp_mm)); suspend_breakpoints(); return orig_mm; } static void stop_using_temp_mm(struct mm_struct *temp_mm, struct mm_struct *orig_mm) { lockdep_assert_irqs_disabled(); switch_mm_irqs_off(temp_mm, orig_mm, current); restore_breakpoints(); } static int text_area_cpu_up(unsigned int cpu) { struct vm_struct *area; unsigned long addr; int err; area = get_vm_area(PAGE_SIZE, VM_ALLOC); if (!area) { WARN_ONCE(1, "Failed to create text area for cpu %d\n", cpu); return -1; } // Map/unmap the area to ensure all page tables are pre-allocated addr = (unsigned long)area->addr; err = map_patch_area(empty_zero_page, addr); if (err) return err; unmap_patch_area(addr); this_cpu_write(cpu_patching_context.area, area); this_cpu_write(cpu_patching_context.addr, addr); this_cpu_write(cpu_patching_context.pte, virt_to_kpte(addr)); return 0; } static int text_area_cpu_down(unsigned int cpu) { free_vm_area(this_cpu_read(cpu_patching_context.area)); this_cpu_write(cpu_patching_context.area, NULL); this_cpu_write(cpu_patching_context.addr, 0); this_cpu_write(cpu_patching_context.pte, NULL); return 0; } static void put_patching_mm(struct mm_struct *mm, unsigned long patching_addr) { struct mmu_gather tlb; tlb_gather_mmu(&tlb, mm); free_pgd_range(&tlb, patching_addr, patching_addr + PAGE_SIZE, 0, 0); mmput(mm); } static int text_area_cpu_up_mm(unsigned int cpu) { struct mm_struct *mm; unsigned long addr; pte_t *pte; spinlock_t *ptl; mm = mm_alloc(); if (WARN_ON(!mm)) goto fail_no_mm; /* * Choose a random page-aligned address from the interval * [PAGE_SIZE .. DEFAULT_MAP_WINDOW - PAGE_SIZE]. * The lower address bound is PAGE_SIZE to avoid the zero-page. */ addr = (1 + (get_random_long() % (DEFAULT_MAP_WINDOW / PAGE_SIZE - 2))) << PAGE_SHIFT; /* * PTE allocation uses GFP_KERNEL which means we need to * pre-allocate the PTE here because we cannot do the * allocation during patching when IRQs are disabled. * * Using get_locked_pte() to avoid open coding, the lock * is unnecessary. */ pte = get_locked_pte(mm, addr, &ptl); if (!pte) goto fail_no_pte; pte_unmap_unlock(pte, ptl); this_cpu_write(cpu_patching_context.mm, mm); this_cpu_write(cpu_patching_context.addr, addr); return 0; fail_no_pte: put_patching_mm(mm, addr); fail_no_mm: return -ENOMEM; } static int text_area_cpu_down_mm(unsigned int cpu) { put_patching_mm(this_cpu_read(cpu_patching_context.mm), this_cpu_read(cpu_patching_context.addr)); this_cpu_write(cpu_patching_context.mm, NULL); this_cpu_write(cpu_patching_context.addr, 0); return 0; } static __ro_after_init DEFINE_STATIC_KEY_FALSE(poking_init_done); void __init poking_init(void) { int ret; if (!IS_ENABLED(CONFIG_STRICT_KERNEL_RWX)) return; if (mm_patch_enabled()) ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "powerpc/text_poke_mm:online", text_area_cpu_up_mm, text_area_cpu_down_mm); else ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "powerpc/text_poke:online", text_area_cpu_up, text_area_cpu_down); /* cpuhp_setup_state returns >= 0 on success */ if (WARN_ON(ret < 0)) return; static_branch_enable(&poking_init_done); } static unsigned long get_patch_pfn(void *addr) { if (IS_ENABLED(CONFIG_MODULES) && is_vmalloc_or_module_addr(addr)) return vmalloc_to_pfn(addr); else return __pa_symbol(addr) >> PAGE_SHIFT; } /* * This can be called for kernel text or a module. */ static int map_patch_area(void *addr, unsigned long text_poke_addr) { unsigned long pfn = get_patch_pfn(addr); return map_kernel_page(text_poke_addr, (pfn << PAGE_SHIFT), PAGE_KERNEL); } static void unmap_patch_area(unsigned long addr) { pte_t *ptep; pmd_t *pmdp; pud_t *pudp; p4d_t *p4dp; pgd_t *pgdp; pgdp = pgd_offset_k(addr); if (WARN_ON(pgd_none(*pgdp))) return; p4dp = p4d_offset(pgdp, addr); if (WARN_ON(p4d_none(*p4dp))) return; pudp = pud_offset(p4dp, addr); if (WARN_ON(pud_none(*pudp))) return; pmdp = pmd_offset(pudp, addr); if (WARN_ON(pmd_none(*pmdp))) return; ptep = pte_offset_kernel(pmdp, addr); if (WARN_ON(pte_none(*ptep))) return; /* * In hash, pte_clear flushes the tlb, in radix, we have to */ pte_clear(&init_mm, addr, ptep); flush_tlb_kernel_range(addr, addr + PAGE_SIZE); } static int __do_patch_instruction_mm(u32 *addr, ppc_inst_t instr) { int err; u32 *patch_addr; unsigned long text_poke_addr; pte_t *pte; unsigned long pfn = get_patch_pfn(addr); struct mm_struct *patching_mm; struct mm_struct *orig_mm; spinlock_t *ptl; patching_mm = __this_cpu_read(cpu_patching_context.mm); text_poke_addr = __this_cpu_read(cpu_patching_context.addr); patch_addr = (u32 *)(text_poke_addr + offset_in_page(addr)); pte = get_locked_pte(patching_mm, text_poke_addr, &ptl); if (!pte) return -ENOMEM; __set_pte_at(patching_mm, text_poke_addr, pte, pfn_pte(pfn, PAGE_KERNEL), 0); /* order PTE update before use, also serves as the hwsync */ asm volatile("ptesync": : :"memory"); /* order context switch after arbitrary prior code */ isync(); orig_mm = start_using_temp_mm(patching_mm); err = __patch_instruction(addr, instr, patch_addr); /* hwsync performed by __patch_instruction (sync) if successful */ if (err) mb(); /* sync */ /* context synchronisation performed by __patch_instruction (isync or exception) */ stop_using_temp_mm(patching_mm, orig_mm); pte_clear(patching_mm, text_poke_addr, pte); /* * ptesync to order PTE update before TLB invalidation done * by radix__local_flush_tlb_page_psize (in _tlbiel_va) */ local_flush_tlb_page_psize(patching_mm, text_poke_addr, mmu_virtual_psize); pte_unmap_unlock(pte, ptl); return err; } static int __do_patch_instruction(u32 *addr, ppc_inst_t instr) { int err; u32 *patch_addr; unsigned long text_poke_addr; pte_t *pte; unsigned long pfn = get_patch_pfn(addr); text_poke_addr = (unsigned long)__this_cpu_read(cpu_patching_context.addr) & PAGE_MASK; patch_addr = (u32 *)(text_poke_addr + offset_in_page(addr)); pte = __this_cpu_read(cpu_patching_context.pte); __set_pte_at(&init_mm, text_poke_addr, pte, pfn_pte(pfn, PAGE_KERNEL), 0); /* See ptesync comment in radix__set_pte_at() */ if (radix_enabled()) asm volatile("ptesync": : :"memory"); err = __patch_instruction(addr, instr, patch_addr); pte_clear(&init_mm, text_poke_addr, pte); flush_tlb_kernel_range(text_poke_addr, text_poke_addr + PAGE_SIZE); return err; } int patch_instruction(u32 *addr, ppc_inst_t instr) { int err; unsigned long flags; /* * During early early boot patch_instruction is called * when text_poke_area is not ready, but we still need * to allow patching. We just do the plain old patching */ if (!IS_ENABLED(CONFIG_STRICT_KERNEL_RWX) || !static_branch_likely(&poking_init_done)) return raw_patch_instruction(addr, instr); local_irq_save(flags); if (mm_patch_enabled()) err = __do_patch_instruction_mm(addr, instr); else err = __do_patch_instruction(addr, instr); local_irq_restore(flags); return err; } NOKPROBE_SYMBOL(patch_instruction); int patch_branch(u32 *addr, unsigned long target, int flags) { ppc_inst_t instr; if (create_branch(&instr, addr, target, flags)) return -ERANGE; return patch_instruction(addr, instr); } /* * Helper to check if a given instruction is a conditional branch * Derived from the conditional checks in analyse_instr() */ bool is_conditional_branch(ppc_inst_t instr) { unsigned int opcode = ppc_inst_primary_opcode(instr); if (opcode == 16) /* bc, bca, bcl, bcla */ return true; if (opcode == 19) { switch ((ppc_inst_val(instr) >> 1) & 0x3ff) { case 16: /* bclr, bclrl */ case 528: /* bcctr, bcctrl */ case 560: /* bctar, bctarl */ return true; } } return false; } NOKPROBE_SYMBOL(is_conditional_branch); int create_cond_branch(ppc_inst_t *instr, const u32 *addr, unsigned long target, int flags) { long offset; offset = target; if (! (flags & BRANCH_ABSOLUTE)) offset = offset - (unsigned long)addr; /* Check we can represent the target in the instruction format */ if (!is_offset_in_cond_branch_range(offset)) return 1; /* Mask out the flags and target, so they don't step on each other. */ *instr = ppc_inst(0x40000000 | (flags & 0x3FF0003) | (offset & 0xFFFC)); return 0; } int instr_is_relative_branch(ppc_inst_t instr) { if (ppc_inst_val(instr) & BRANCH_ABSOLUTE) return 0; return instr_is_branch_iform(instr) || instr_is_branch_bform(instr); } int instr_is_relative_link_branch(ppc_inst_t instr) { return instr_is_relative_branch(instr) && (ppc_inst_val(instr) & BRANCH_SET_LINK); } static unsigned long branch_iform_target(const u32 *instr) { signed long imm; imm = ppc_inst_val(ppc_inst_read(instr)) & 0x3FFFFFC; /* If the top bit of the immediate value is set this is negative */ if (imm & 0x2000000) imm -= 0x4000000; if ((ppc_inst_val(ppc_inst_read(instr)) & BRANCH_ABSOLUTE) == 0) imm += (unsigned long)instr; return (unsigned long)imm; } static unsigned long branch_bform_target(const u32 *instr) { signed long imm; imm = ppc_inst_val(ppc_inst_read(instr)) & 0xFFFC; /* If the top bit of the immediate value is set this is negative */ if (imm & 0x8000) imm -= 0x10000; if ((ppc_inst_val(ppc_inst_read(instr)) & BRANCH_ABSOLUTE) == 0) imm += (unsigned long)instr; return (unsigned long)imm; } unsigned long branch_target(const u32 *instr) { if (instr_is_branch_iform(ppc_inst_read(instr))) return branch_iform_target(instr); else if (instr_is_branch_bform(ppc_inst_read(instr))) return branch_bform_target(instr); return 0; } int translate_branch(ppc_inst_t *instr, const u32 *dest, const u32 *src) { unsigned long target; target = branch_target(src); if (instr_is_branch_iform(ppc_inst_read(src))) return create_branch(instr, dest, target, ppc_inst_val(ppc_inst_read(src))); else if (instr_is_branch_bform(ppc_inst_read(src))) return create_cond_branch(instr, dest, target, ppc_inst_val(ppc_inst_read(src))); return 1; }
linux-master
arch/powerpc/lib/code-patching.c
/* * A Remote Heap. Remote means that we don't touch the memory that the * heap points to. Normal heap implementations use the memory they manage * to place their list. We cannot do that because the memory we manage may * have special properties, for example it is uncachable or of different * endianess. * * Author: Pantelis Antoniou <[email protected]> * * 2004 (c) INTRACOM S.A. Greece. This file is licensed under * the terms of the GNU General Public License version 2. This program * is licensed "as is" without any warranty of any kind, whether express * or implied. */ #include <linux/types.h> #include <linux/errno.h> #include <linux/kernel.h> #include <linux/export.h> #include <linux/mm.h> #include <linux/err.h> #include <linux/slab.h> #include <asm/rheap.h> /* * Fixup a list_head, needed when copying lists. If the pointers fall * between s and e, apply the delta. This assumes that * sizeof(struct list_head *) == sizeof(unsigned long *). */ static inline void fixup(unsigned long s, unsigned long e, int d, struct list_head *l) { unsigned long *pp; pp = (unsigned long *)&l->next; if (*pp >= s && *pp < e) *pp += d; pp = (unsigned long *)&l->prev; if (*pp >= s && *pp < e) *pp += d; } /* Grow the allocated blocks */ static int grow(rh_info_t * info, int max_blocks) { rh_block_t *block, *blk; int i, new_blocks; int delta; unsigned long blks, blke; if (max_blocks <= info->max_blocks) return -EINVAL; new_blocks = max_blocks - info->max_blocks; block = kmalloc_array(max_blocks, sizeof(rh_block_t), GFP_ATOMIC); if (block == NULL) return -ENOMEM; if (info->max_blocks > 0) { /* copy old block area */ memcpy(block, info->block, sizeof(rh_block_t) * info->max_blocks); delta = (char *)block - (char *)info->block; /* and fixup list pointers */ blks = (unsigned long)info->block; blke = (unsigned long)(info->block + info->max_blocks); for (i = 0, blk = block; i < info->max_blocks; i++, blk++) fixup(blks, blke, delta, &blk->list); fixup(blks, blke, delta, &info->empty_list); fixup(blks, blke, delta, &info->free_list); fixup(blks, blke, delta, &info->taken_list); /* free the old allocated memory */ if ((info->flags & RHIF_STATIC_BLOCK) == 0) kfree(info->block); } info->block = block; info->empty_slots += new_blocks; info->max_blocks = max_blocks; info->flags &= ~RHIF_STATIC_BLOCK; /* add all new blocks to the free list */ blk = block + info->max_blocks - new_blocks; for (i = 0; i < new_blocks; i++, blk++) list_add(&blk->list, &info->empty_list); return 0; } /* * Assure at least the required amount of empty slots. If this function * causes a grow in the block area then all pointers kept to the block * area are invalid! */ static int assure_empty(rh_info_t * info, int slots) { int max_blocks; /* This function is not meant to be used to grow uncontrollably */ if (slots >= 4) return -EINVAL; /* Enough space */ if (info->empty_slots >= slots) return 0; /* Next 16 sized block */ max_blocks = ((info->max_blocks + slots) + 15) & ~15; return grow(info, max_blocks); } static rh_block_t *get_slot(rh_info_t * info) { rh_block_t *blk; /* If no more free slots, and failure to extend. */ /* XXX: You should have called assure_empty before */ if (info->empty_slots == 0) { printk(KERN_ERR "rh: out of slots; crash is imminent.\n"); return NULL; } /* Get empty slot to use */ blk = list_entry(info->empty_list.next, rh_block_t, list); list_del_init(&blk->list); info->empty_slots--; /* Initialize */ blk->start = 0; blk->size = 0; blk->owner = NULL; return blk; } static inline void release_slot(rh_info_t * info, rh_block_t * blk) { list_add(&blk->list, &info->empty_list); info->empty_slots++; } static void attach_free_block(rh_info_t * info, rh_block_t * blkn) { rh_block_t *blk; rh_block_t *before; rh_block_t *after; rh_block_t *next; int size; unsigned long s, e, bs, be; struct list_head *l; /* We assume that they are aligned properly */ size = blkn->size; s = blkn->start; e = s + size; /* Find the blocks immediately before and after the given one * (if any) */ before = NULL; after = NULL; next = NULL; list_for_each(l, &info->free_list) { blk = list_entry(l, rh_block_t, list); bs = blk->start; be = bs + blk->size; if (next == NULL && s >= bs) next = blk; if (be == s) before = blk; if (e == bs) after = blk; /* If both are not null, break now */ if (before != NULL && after != NULL) break; } /* Now check if they are really adjacent */ if (before && s != (before->start + before->size)) before = NULL; if (after && e != after->start) after = NULL; /* No coalescing; list insert and return */ if (before == NULL && after == NULL) { if (next != NULL) list_add(&blkn->list, &next->list); else list_add(&blkn->list, &info->free_list); return; } /* We don't need it anymore */ release_slot(info, blkn); /* Grow the before block */ if (before != NULL && after == NULL) { before->size += size; return; } /* Grow the after block backwards */ if (before == NULL && after != NULL) { after->start -= size; after->size += size; return; } /* Grow the before block, and release the after block */ before->size += size + after->size; list_del(&after->list); release_slot(info, after); } static void attach_taken_block(rh_info_t * info, rh_block_t * blkn) { rh_block_t *blk; struct list_head *l; /* Find the block immediately before the given one (if any) */ list_for_each(l, &info->taken_list) { blk = list_entry(l, rh_block_t, list); if (blk->start > blkn->start) { list_add_tail(&blkn->list, &blk->list); return; } } list_add_tail(&blkn->list, &info->taken_list); } /* * Create a remote heap dynamically. Note that no memory for the blocks * are allocated. It will upon the first allocation */ rh_info_t *rh_create(unsigned int alignment) { rh_info_t *info; /* Alignment must be a power of two */ if ((alignment & (alignment - 1)) != 0) return ERR_PTR(-EINVAL); info = kmalloc(sizeof(*info), GFP_ATOMIC); if (info == NULL) return ERR_PTR(-ENOMEM); info->alignment = alignment; /* Initially everything as empty */ info->block = NULL; info->max_blocks = 0; info->empty_slots = 0; info->flags = 0; INIT_LIST_HEAD(&info->empty_list); INIT_LIST_HEAD(&info->free_list); INIT_LIST_HEAD(&info->taken_list); return info; } EXPORT_SYMBOL_GPL(rh_create); /* * Destroy a dynamically created remote heap. Deallocate only if the areas * are not static */ void rh_destroy(rh_info_t * info) { if ((info->flags & RHIF_STATIC_BLOCK) == 0) kfree(info->block); if ((info->flags & RHIF_STATIC_INFO) == 0) kfree(info); } EXPORT_SYMBOL_GPL(rh_destroy); /* * Initialize in place a remote heap info block. This is needed to support * operation very early in the startup of the kernel, when it is not yet safe * to call kmalloc. */ void rh_init(rh_info_t * info, unsigned int alignment, int max_blocks, rh_block_t * block) { int i; rh_block_t *blk; /* Alignment must be a power of two */ if ((alignment & (alignment - 1)) != 0) return; info->alignment = alignment; /* Initially everything as empty */ info->block = block; info->max_blocks = max_blocks; info->empty_slots = max_blocks; info->flags = RHIF_STATIC_INFO | RHIF_STATIC_BLOCK; INIT_LIST_HEAD(&info->empty_list); INIT_LIST_HEAD(&info->free_list); INIT_LIST_HEAD(&info->taken_list); /* Add all new blocks to the free list */ for (i = 0, blk = block; i < max_blocks; i++, blk++) list_add(&blk->list, &info->empty_list); } EXPORT_SYMBOL_GPL(rh_init); /* Attach a free memory region, coalesces regions if adjacent */ int rh_attach_region(rh_info_t * info, unsigned long start, int size) { rh_block_t *blk; unsigned long s, e, m; int r; /* The region must be aligned */ s = start; e = s + size; m = info->alignment - 1; /* Round start up */ s = (s + m) & ~m; /* Round end down */ e = e & ~m; if (IS_ERR_VALUE(e) || (e < s)) return -ERANGE; /* Take final values */ start = s; size = e - s; /* Grow the blocks, if needed */ r = assure_empty(info, 1); if (r < 0) return r; blk = get_slot(info); blk->start = start; blk->size = size; blk->owner = NULL; attach_free_block(info, blk); return 0; } EXPORT_SYMBOL_GPL(rh_attach_region); /* Detatch given address range, splits free block if needed. */ unsigned long rh_detach_region(rh_info_t * info, unsigned long start, int size) { struct list_head *l; rh_block_t *blk, *newblk; unsigned long s, e, m, bs, be; /* Validate size */ if (size <= 0) return (unsigned long) -EINVAL; /* The region must be aligned */ s = start; e = s + size; m = info->alignment - 1; /* Round start up */ s = (s + m) & ~m; /* Round end down */ e = e & ~m; if (assure_empty(info, 1) < 0) return (unsigned long) -ENOMEM; blk = NULL; list_for_each(l, &info->free_list) { blk = list_entry(l, rh_block_t, list); /* The range must lie entirely inside one free block */ bs = blk->start; be = blk->start + blk->size; if (s >= bs && e <= be) break; blk = NULL; } if (blk == NULL) return (unsigned long) -ENOMEM; /* Perfect fit */ if (bs == s && be == e) { /* Delete from free list, release slot */ list_del(&blk->list); release_slot(info, blk); return s; } /* blk still in free list, with updated start and/or size */ if (bs == s || be == e) { if (bs == s) blk->start += size; blk->size -= size; } else { /* The front free fragment */ blk->size = s - bs; /* the back free fragment */ newblk = get_slot(info); newblk->start = e; newblk->size = be - e; list_add(&newblk->list, &blk->list); } return s; } EXPORT_SYMBOL_GPL(rh_detach_region); /* Allocate a block of memory at the specified alignment. The value returned * is an offset into the buffer initialized by rh_init(), or a negative number * if there is an error. */ unsigned long rh_alloc_align(rh_info_t * info, int size, int alignment, const char *owner) { struct list_head *l; rh_block_t *blk; rh_block_t *newblk; unsigned long start, sp_size; /* Validate size, and alignment must be power of two */ if (size <= 0 || (alignment & (alignment - 1)) != 0) return (unsigned long) -EINVAL; /* Align to configured alignment */ size = (size + (info->alignment - 1)) & ~(info->alignment - 1); if (assure_empty(info, 2) < 0) return (unsigned long) -ENOMEM; blk = NULL; list_for_each(l, &info->free_list) { blk = list_entry(l, rh_block_t, list); if (size <= blk->size) { start = (blk->start + alignment - 1) & ~(alignment - 1); if (start + size <= blk->start + blk->size) break; } blk = NULL; } if (blk == NULL) return (unsigned long) -ENOMEM; /* Just fits */ if (blk->size == size) { /* Move from free list to taken list */ list_del(&blk->list); newblk = blk; } else { /* Fragment caused, split if needed */ /* Create block for fragment in the beginning */ sp_size = start - blk->start; if (sp_size) { rh_block_t *spblk; spblk = get_slot(info); spblk->start = blk->start; spblk->size = sp_size; /* add before the blk */ list_add(&spblk->list, blk->list.prev); } newblk = get_slot(info); newblk->start = start; newblk->size = size; /* blk still in free list, with updated start and size * for fragment in the end */ blk->start = start + size; blk->size -= sp_size + size; /* No fragment in the end, remove blk */ if (blk->size == 0) { list_del(&blk->list); release_slot(info, blk); } } newblk->owner = owner; attach_taken_block(info, newblk); return start; } EXPORT_SYMBOL_GPL(rh_alloc_align); /* Allocate a block of memory at the default alignment. The value returned is * an offset into the buffer initialized by rh_init(), or a negative number if * there is an error. */ unsigned long rh_alloc(rh_info_t * info, int size, const char *owner) { return rh_alloc_align(info, size, info->alignment, owner); } EXPORT_SYMBOL_GPL(rh_alloc); /* Allocate a block of memory at the given offset, rounded up to the default * alignment. The value returned is an offset into the buffer initialized by * rh_init(), or a negative number if there is an error. */ unsigned long rh_alloc_fixed(rh_info_t * info, unsigned long start, int size, const char *owner) { struct list_head *l; rh_block_t *blk, *newblk1, *newblk2; unsigned long s, e, m, bs = 0, be = 0; /* Validate size */ if (size <= 0) return (unsigned long) -EINVAL; /* The region must be aligned */ s = start; e = s + size; m = info->alignment - 1; /* Round start up */ s = (s + m) & ~m; /* Round end down */ e = e & ~m; if (assure_empty(info, 2) < 0) return (unsigned long) -ENOMEM; blk = NULL; list_for_each(l, &info->free_list) { blk = list_entry(l, rh_block_t, list); /* The range must lie entirely inside one free block */ bs = blk->start; be = blk->start + blk->size; if (s >= bs && e <= be) break; blk = NULL; } if (blk == NULL) return (unsigned long) -ENOMEM; /* Perfect fit */ if (bs == s && be == e) { /* Move from free list to taken list */ list_del(&blk->list); blk->owner = owner; start = blk->start; attach_taken_block(info, blk); return start; } /* blk still in free list, with updated start and/or size */ if (bs == s || be == e) { if (bs == s) blk->start += size; blk->size -= size; } else { /* The front free fragment */ blk->size = s - bs; /* The back free fragment */ newblk2 = get_slot(info); newblk2->start = e; newblk2->size = be - e; list_add(&newblk2->list, &blk->list); } newblk1 = get_slot(info); newblk1->start = s; newblk1->size = e - s; newblk1->owner = owner; start = newblk1->start; attach_taken_block(info, newblk1); return start; } EXPORT_SYMBOL_GPL(rh_alloc_fixed); /* Deallocate the memory previously allocated by one of the rh_alloc functions. * The return value is the size of the deallocated block, or a negative number * if there is an error. */ int rh_free(rh_info_t * info, unsigned long start) { rh_block_t *blk, *blk2; struct list_head *l; int size; /* Linear search for block */ blk = NULL; list_for_each(l, &info->taken_list) { blk2 = list_entry(l, rh_block_t, list); if (start < blk2->start) break; blk = blk2; } if (blk == NULL || start > (blk->start + blk->size)) return -EINVAL; /* Remove from taken list */ list_del(&blk->list); /* Get size of freed block */ size = blk->size; attach_free_block(info, blk); return size; } EXPORT_SYMBOL_GPL(rh_free); int rh_get_stats(rh_info_t * info, int what, int max_stats, rh_stats_t * stats) { rh_block_t *blk; struct list_head *l; struct list_head *h; int nr; switch (what) { case RHGS_FREE: h = &info->free_list; break; case RHGS_TAKEN: h = &info->taken_list; break; default: return -EINVAL; } /* Linear search for block */ nr = 0; list_for_each(l, h) { blk = list_entry(l, rh_block_t, list); if (stats != NULL && nr < max_stats) { stats->start = blk->start; stats->size = blk->size; stats->owner = blk->owner; stats++; } nr++; } return nr; } EXPORT_SYMBOL_GPL(rh_get_stats); int rh_set_owner(rh_info_t * info, unsigned long start, const char *owner) { rh_block_t *blk, *blk2; struct list_head *l; int size; /* Linear search for block */ blk = NULL; list_for_each(l, &info->taken_list) { blk2 = list_entry(l, rh_block_t, list); if (start < blk2->start) break; blk = blk2; } if (blk == NULL || start > (blk->start + blk->size)) return -EINVAL; blk->owner = owner; size = blk->size; return size; } EXPORT_SYMBOL_GPL(rh_set_owner); void rh_dump(rh_info_t * info) { static rh_stats_t st[32]; /* XXX maximum 32 blocks */ int maxnr; int i, nr; maxnr = ARRAY_SIZE(st); printk(KERN_INFO "info @0x%p (%d slots empty / %d max)\n", info, info->empty_slots, info->max_blocks); printk(KERN_INFO " Free:\n"); nr = rh_get_stats(info, RHGS_FREE, maxnr, st); if (nr > maxnr) nr = maxnr; for (i = 0; i < nr; i++) printk(KERN_INFO " 0x%lx-0x%lx (%u)\n", st[i].start, st[i].start + st[i].size, st[i].size); printk(KERN_INFO "\n"); printk(KERN_INFO " Taken:\n"); nr = rh_get_stats(info, RHGS_TAKEN, maxnr, st); if (nr > maxnr) nr = maxnr; for (i = 0; i < nr; i++) printk(KERN_INFO " 0x%lx-0x%lx (%u) %s\n", st[i].start, st[i].start + st[i].size, st[i].size, st[i].owner != NULL ? st[i].owner : ""); printk(KERN_INFO "\n"); } EXPORT_SYMBOL_GPL(rh_dump); void rh_dump_blk(rh_info_t * info, rh_block_t * blk) { printk(KERN_INFO "blk @0x%p: 0x%lx-0x%lx (%u)\n", blk, blk->start, blk->start + blk->size, blk->size); } EXPORT_SYMBOL_GPL(rh_dump_blk);
linux-master
arch/powerpc/lib/rheap.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Altivec XOR operations * * Copyright 2017 IBM Corp. */ #include <linux/preempt.h> #include <linux/export.h> #include <linux/sched.h> #include <asm/switch_to.h> #include <asm/xor_altivec.h> #include "xor_vmx.h" void xor_altivec_2(unsigned long bytes, unsigned long * __restrict p1, const unsigned long * __restrict p2) { preempt_disable(); enable_kernel_altivec(); __xor_altivec_2(bytes, p1, p2); disable_kernel_altivec(); preempt_enable(); } EXPORT_SYMBOL(xor_altivec_2); void xor_altivec_3(unsigned long bytes, unsigned long * __restrict p1, const unsigned long * __restrict p2, const unsigned long * __restrict p3) { preempt_disable(); enable_kernel_altivec(); __xor_altivec_3(bytes, p1, p2, p3); disable_kernel_altivec(); preempt_enable(); } EXPORT_SYMBOL(xor_altivec_3); void xor_altivec_4(unsigned long bytes, unsigned long * __restrict p1, const unsigned long * __restrict p2, const unsigned long * __restrict p3, const unsigned long * __restrict p4) { preempt_disable(); enable_kernel_altivec(); __xor_altivec_4(bytes, p1, p2, p3, p4); disable_kernel_altivec(); preempt_enable(); } EXPORT_SYMBOL(xor_altivec_4); void xor_altivec_5(unsigned long bytes, unsigned long * __restrict p1, const unsigned long * __restrict p2, const unsigned long * __restrict p3, const unsigned long * __restrict p4, const unsigned long * __restrict p5) { preempt_disable(); enable_kernel_altivec(); __xor_altivec_5(bytes, p1, p2, p3, p4, p5); disable_kernel_altivec(); preempt_enable(); } EXPORT_SYMBOL(xor_altivec_5);
linux-master
arch/powerpc/lib/xor_vmx_glue.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Spin and read/write lock operations. * * Copyright (C) 2001-2004 Paul Mackerras <[email protected]>, IBM * Copyright (C) 2001 Anton Blanchard <[email protected]>, IBM * Copyright (C) 2002 Dave Engebretsen <[email protected]>, IBM * Rework to support virtual processors */ #include <linux/kernel.h> #include <linux/spinlock.h> #include <linux/export.h> #include <linux/smp.h> /* waiting for a spinlock... */ #if defined(CONFIG_PPC_SPLPAR) #include <asm/hvcall.h> #include <asm/smp.h> void splpar_spin_yield(arch_spinlock_t *lock) { unsigned int lock_value, holder_cpu, yield_count; lock_value = lock->slock; if (lock_value == 0) return; holder_cpu = lock_value & 0xffff; BUG_ON(holder_cpu >= NR_CPUS); yield_count = yield_count_of(holder_cpu); if ((yield_count & 1) == 0) return; /* virtual cpu is currently running */ rmb(); if (lock->slock != lock_value) return; /* something has changed */ yield_to_preempted(holder_cpu, yield_count); } EXPORT_SYMBOL_GPL(splpar_spin_yield); /* * Waiting for a read lock or a write lock on a rwlock... * This turns out to be the same for read and write locks, since * we only know the holder if it is write-locked. */ void splpar_rw_yield(arch_rwlock_t *rw) { int lock_value; unsigned int holder_cpu, yield_count; lock_value = rw->lock; if (lock_value >= 0) return; /* no write lock at present */ holder_cpu = lock_value & 0xffff; BUG_ON(holder_cpu >= NR_CPUS); yield_count = yield_count_of(holder_cpu); if ((yield_count & 1) == 0) return; /* virtual cpu is currently running */ rmb(); if (rw->lock != lock_value) return; /* something has changed */ yield_to_preempted(holder_cpu, yield_count); } #endif
linux-master
arch/powerpc/lib/locks.c
// SPDX-License-Identifier: GPL-2.0-or-later #include <linux/bug.h> #include <linux/compiler.h> #include <linux/export.h> #include <linux/percpu.h> #include <linux/processor.h> #include <linux/smp.h> #include <linux/topology.h> #include <linux/sched/clock.h> #include <asm/qspinlock.h> #include <asm/paravirt.h> #define MAX_NODES 4 struct qnode { struct qnode *next; struct qspinlock *lock; int cpu; int yield_cpu; u8 locked; /* 1 if lock acquired */ }; struct qnodes { int count; struct qnode nodes[MAX_NODES]; }; /* Tuning parameters */ static int steal_spins __read_mostly = (1 << 5); static int remote_steal_spins __read_mostly = (1 << 2); #if _Q_SPIN_TRY_LOCK_STEAL == 1 static const bool maybe_stealers = true; #else static bool maybe_stealers __read_mostly = true; #endif static int head_spins __read_mostly = (1 << 8); static bool pv_yield_owner __read_mostly = true; static bool pv_yield_allow_steal __read_mostly = false; static bool pv_spin_on_preempted_owner __read_mostly = false; static bool pv_sleepy_lock __read_mostly = true; static bool pv_sleepy_lock_sticky __read_mostly = false; static u64 pv_sleepy_lock_interval_ns __read_mostly = 0; static int pv_sleepy_lock_factor __read_mostly = 256; static bool pv_yield_prev __read_mostly = true; static bool pv_yield_propagate_owner __read_mostly = true; static bool pv_prod_head __read_mostly = false; static DEFINE_PER_CPU_ALIGNED(struct qnodes, qnodes); static DEFINE_PER_CPU_ALIGNED(u64, sleepy_lock_seen_clock); #if _Q_SPIN_SPEC_BARRIER == 1 #define spec_barrier() do { asm volatile("ori 31,31,0" ::: "memory"); } while (0) #else #define spec_barrier() do { } while (0) #endif static __always_inline bool recently_sleepy(void) { /* pv_sleepy_lock is true when this is called */ if (pv_sleepy_lock_interval_ns) { u64 seen = this_cpu_read(sleepy_lock_seen_clock); if (seen) { u64 delta = sched_clock() - seen; if (delta < pv_sleepy_lock_interval_ns) return true; this_cpu_write(sleepy_lock_seen_clock, 0); } } return false; } static __always_inline int get_steal_spins(bool paravirt, bool sleepy) { if (paravirt && sleepy) return steal_spins * pv_sleepy_lock_factor; else return steal_spins; } static __always_inline int get_remote_steal_spins(bool paravirt, bool sleepy) { if (paravirt && sleepy) return remote_steal_spins * pv_sleepy_lock_factor; else return remote_steal_spins; } static __always_inline int get_head_spins(bool paravirt, bool sleepy) { if (paravirt && sleepy) return head_spins * pv_sleepy_lock_factor; else return head_spins; } static inline u32 encode_tail_cpu(int cpu) { return (cpu + 1) << _Q_TAIL_CPU_OFFSET; } static inline int decode_tail_cpu(u32 val) { return (val >> _Q_TAIL_CPU_OFFSET) - 1; } static inline int get_owner_cpu(u32 val) { return (val & _Q_OWNER_CPU_MASK) >> _Q_OWNER_CPU_OFFSET; } /* * Try to acquire the lock if it was not already locked. If the tail matches * mytail then clear it, otherwise leave it unchnaged. Return previous value. * * This is used by the head of the queue to acquire the lock and clean up * its tail if it was the last one queued. */ static __always_inline u32 trylock_clean_tail(struct qspinlock *lock, u32 tail) { u32 newval = queued_spin_encode_locked_val(); u32 prev, tmp; asm volatile( "1: lwarx %0,0,%2,%7 # trylock_clean_tail \n" /* This test is necessary if there could be stealers */ " andi. %1,%0,%5 \n" " bne 3f \n" /* Test whether the lock tail == mytail */ " and %1,%0,%6 \n" " cmpw 0,%1,%3 \n" /* Merge the new locked value */ " or %1,%1,%4 \n" " bne 2f \n" /* If the lock tail matched, then clear it, otherwise leave it. */ " andc %1,%1,%6 \n" "2: stwcx. %1,0,%2 \n" " bne- 1b \n" "\t" PPC_ACQUIRE_BARRIER " \n" "3: \n" : "=&r" (prev), "=&r" (tmp) : "r" (&lock->val), "r"(tail), "r" (newval), "i" (_Q_LOCKED_VAL), "r" (_Q_TAIL_CPU_MASK), "i" (_Q_SPIN_EH_HINT) : "cr0", "memory"); return prev; } /* * Publish our tail, replacing previous tail. Return previous value. * * This provides a release barrier for publishing node, this pairs with the * acquire barrier in get_tail_qnode() when the next CPU finds this tail * value. */ static __always_inline u32 publish_tail_cpu(struct qspinlock *lock, u32 tail) { u32 prev, tmp; kcsan_release(); asm volatile( "\t" PPC_RELEASE_BARRIER " \n" "1: lwarx %0,0,%2 # publish_tail_cpu \n" " andc %1,%0,%4 \n" " or %1,%1,%3 \n" " stwcx. %1,0,%2 \n" " bne- 1b \n" : "=&r" (prev), "=&r"(tmp) : "r" (&lock->val), "r" (tail), "r"(_Q_TAIL_CPU_MASK) : "cr0", "memory"); return prev; } static __always_inline u32 set_mustq(struct qspinlock *lock) { u32 prev; asm volatile( "1: lwarx %0,0,%1 # set_mustq \n" " or %0,%0,%2 \n" " stwcx. %0,0,%1 \n" " bne- 1b \n" : "=&r" (prev) : "r" (&lock->val), "r" (_Q_MUST_Q_VAL) : "cr0", "memory"); return prev; } static __always_inline u32 clear_mustq(struct qspinlock *lock) { u32 prev; asm volatile( "1: lwarx %0,0,%1 # clear_mustq \n" " andc %0,%0,%2 \n" " stwcx. %0,0,%1 \n" " bne- 1b \n" : "=&r" (prev) : "r" (&lock->val), "r" (_Q_MUST_Q_VAL) : "cr0", "memory"); return prev; } static __always_inline bool try_set_sleepy(struct qspinlock *lock, u32 old) { u32 prev; u32 new = old | _Q_SLEEPY_VAL; BUG_ON(!(old & _Q_LOCKED_VAL)); BUG_ON(old & _Q_SLEEPY_VAL); asm volatile( "1: lwarx %0,0,%1 # try_set_sleepy \n" " cmpw 0,%0,%2 \n" " bne- 2f \n" " stwcx. %3,0,%1 \n" " bne- 1b \n" "2: \n" : "=&r" (prev) : "r" (&lock->val), "r"(old), "r" (new) : "cr0", "memory"); return likely(prev == old); } static __always_inline void seen_sleepy_owner(struct qspinlock *lock, u32 val) { if (pv_sleepy_lock) { if (pv_sleepy_lock_interval_ns) this_cpu_write(sleepy_lock_seen_clock, sched_clock()); if (!(val & _Q_SLEEPY_VAL)) try_set_sleepy(lock, val); } } static __always_inline void seen_sleepy_lock(void) { if (pv_sleepy_lock && pv_sleepy_lock_interval_ns) this_cpu_write(sleepy_lock_seen_clock, sched_clock()); } static __always_inline void seen_sleepy_node(struct qspinlock *lock, u32 val) { if (pv_sleepy_lock) { if (pv_sleepy_lock_interval_ns) this_cpu_write(sleepy_lock_seen_clock, sched_clock()); if (val & _Q_LOCKED_VAL) { if (!(val & _Q_SLEEPY_VAL)) try_set_sleepy(lock, val); } } } static struct qnode *get_tail_qnode(struct qspinlock *lock, u32 val) { int cpu = decode_tail_cpu(val); struct qnodes *qnodesp = per_cpu_ptr(&qnodes, cpu); int idx; /* * After publishing the new tail and finding a previous tail in the * previous val (which is the control dependency), this barrier * orders the release barrier in publish_tail_cpu performed by the * last CPU, with subsequently looking at its qnode structures * after the barrier. */ smp_acquire__after_ctrl_dep(); for (idx = 0; idx < MAX_NODES; idx++) { struct qnode *qnode = &qnodesp->nodes[idx]; if (qnode->lock == lock) return qnode; } BUG(); } /* Called inside spin_begin(). Returns whether or not the vCPU was preempted. */ static __always_inline bool __yield_to_locked_owner(struct qspinlock *lock, u32 val, bool paravirt, bool mustq) { int owner; u32 yield_count; bool preempted = false; BUG_ON(!(val & _Q_LOCKED_VAL)); if (!paravirt) goto relax; if (!pv_yield_owner) goto relax; owner = get_owner_cpu(val); yield_count = yield_count_of(owner); if ((yield_count & 1) == 0) goto relax; /* owner vcpu is running */ spin_end(); seen_sleepy_owner(lock, val); preempted = true; /* * Read the lock word after sampling the yield count. On the other side * there may a wmb because the yield count update is done by the * hypervisor preemption and the value update by the OS, however this * ordering might reduce the chance of out of order accesses and * improve the heuristic. */ smp_rmb(); if (READ_ONCE(lock->val) == val) { if (mustq) clear_mustq(lock); yield_to_preempted(owner, yield_count); if (mustq) set_mustq(lock); spin_begin(); /* Don't relax if we yielded. Maybe we should? */ return preempted; } spin_begin(); relax: spin_cpu_relax(); return preempted; } /* Called inside spin_begin(). Returns whether or not the vCPU was preempted. */ static __always_inline bool yield_to_locked_owner(struct qspinlock *lock, u32 val, bool paravirt) { return __yield_to_locked_owner(lock, val, paravirt, false); } /* Called inside spin_begin(). Returns whether or not the vCPU was preempted. */ static __always_inline bool yield_head_to_locked_owner(struct qspinlock *lock, u32 val, bool paravirt) { bool mustq = false; if ((val & _Q_MUST_Q_VAL) && pv_yield_allow_steal) mustq = true; return __yield_to_locked_owner(lock, val, paravirt, mustq); } static __always_inline void propagate_yield_cpu(struct qnode *node, u32 val, int *set_yield_cpu, bool paravirt) { struct qnode *next; int owner; if (!paravirt) return; if (!pv_yield_propagate_owner) return; owner = get_owner_cpu(val); if (*set_yield_cpu == owner) return; next = READ_ONCE(node->next); if (!next) return; if (vcpu_is_preempted(owner)) { next->yield_cpu = owner; *set_yield_cpu = owner; } else if (*set_yield_cpu != -1) { next->yield_cpu = owner; *set_yield_cpu = owner; } } /* Called inside spin_begin() */ static __always_inline bool yield_to_prev(struct qspinlock *lock, struct qnode *node, u32 val, bool paravirt) { int prev_cpu = decode_tail_cpu(val); u32 yield_count; int yield_cpu; bool preempted = false; if (!paravirt) goto relax; if (!pv_yield_propagate_owner) goto yield_prev; yield_cpu = READ_ONCE(node->yield_cpu); if (yield_cpu == -1) { /* Propagate back the -1 CPU */ if (node->next && node->next->yield_cpu != -1) node->next->yield_cpu = yield_cpu; goto yield_prev; } yield_count = yield_count_of(yield_cpu); if ((yield_count & 1) == 0) goto yield_prev; /* owner vcpu is running */ spin_end(); preempted = true; seen_sleepy_node(lock, val); smp_rmb(); if (yield_cpu == node->yield_cpu) { if (node->next && node->next->yield_cpu != yield_cpu) node->next->yield_cpu = yield_cpu; yield_to_preempted(yield_cpu, yield_count); spin_begin(); return preempted; } spin_begin(); yield_prev: if (!pv_yield_prev) goto relax; yield_count = yield_count_of(prev_cpu); if ((yield_count & 1) == 0) goto relax; /* owner vcpu is running */ spin_end(); preempted = true; seen_sleepy_node(lock, val); smp_rmb(); /* See __yield_to_locked_owner comment */ if (!READ_ONCE(node->locked)) { yield_to_preempted(prev_cpu, yield_count); spin_begin(); return preempted; } spin_begin(); relax: spin_cpu_relax(); return preempted; } static __always_inline bool steal_break(u32 val, int iters, bool paravirt, bool sleepy) { if (iters >= get_steal_spins(paravirt, sleepy)) return true; if (IS_ENABLED(CONFIG_NUMA) && (iters >= get_remote_steal_spins(paravirt, sleepy))) { int cpu = get_owner_cpu(val); if (numa_node_id() != cpu_to_node(cpu)) return true; } return false; } static __always_inline bool try_to_steal_lock(struct qspinlock *lock, bool paravirt) { bool seen_preempted = false; bool sleepy = false; int iters = 0; u32 val; if (!steal_spins) { /* XXX: should spin_on_preempted_owner do anything here? */ return false; } /* Attempt to steal the lock */ spin_begin(); do { bool preempted = false; val = READ_ONCE(lock->val); if (val & _Q_MUST_Q_VAL) break; spec_barrier(); if (unlikely(!(val & _Q_LOCKED_VAL))) { spin_end(); if (__queued_spin_trylock_steal(lock)) return true; spin_begin(); } else { preempted = yield_to_locked_owner(lock, val, paravirt); } if (paravirt && pv_sleepy_lock) { if (!sleepy) { if (val & _Q_SLEEPY_VAL) { seen_sleepy_lock(); sleepy = true; } else if (recently_sleepy()) { sleepy = true; } } if (pv_sleepy_lock_sticky && seen_preempted && !(val & _Q_SLEEPY_VAL)) { if (try_set_sleepy(lock, val)) val |= _Q_SLEEPY_VAL; } } if (preempted) { seen_preempted = true; sleepy = true; if (!pv_spin_on_preempted_owner) iters++; /* * pv_spin_on_preempted_owner don't increase iters * while the owner is preempted -- we won't interfere * with it by definition. This could introduce some * latency issue if we continually observe preempted * owners, but hopefully that's a rare corner case of * a badly oversubscribed system. */ } else { iters++; } } while (!steal_break(val, iters, paravirt, sleepy)); spin_end(); return false; } static __always_inline void queued_spin_lock_mcs_queue(struct qspinlock *lock, bool paravirt) { struct qnodes *qnodesp; struct qnode *next, *node; u32 val, old, tail; bool seen_preempted = false; bool sleepy = false; bool mustq = false; int idx; int set_yield_cpu = -1; int iters = 0; BUILD_BUG_ON(CONFIG_NR_CPUS >= (1U << _Q_TAIL_CPU_BITS)); qnodesp = this_cpu_ptr(&qnodes); if (unlikely(qnodesp->count >= MAX_NODES)) { spec_barrier(); while (!queued_spin_trylock(lock)) cpu_relax(); return; } idx = qnodesp->count++; /* * Ensure that we increment the head node->count before initialising * the actual node. If the compiler is kind enough to reorder these * stores, then an IRQ could overwrite our assignments. */ barrier(); node = &qnodesp->nodes[idx]; node->next = NULL; node->lock = lock; node->cpu = smp_processor_id(); node->yield_cpu = -1; node->locked = 0; tail = encode_tail_cpu(node->cpu); /* * Assign all attributes of a node before it can be published. * Issues an lwsync, serving as a release barrier, as well as a * compiler barrier. */ old = publish_tail_cpu(lock, tail); /* * If there was a previous node; link it and wait until reaching the * head of the waitqueue. */ if (old & _Q_TAIL_CPU_MASK) { struct qnode *prev = get_tail_qnode(lock, old); /* Link @node into the waitqueue. */ WRITE_ONCE(prev->next, node); /* Wait for mcs node lock to be released */ spin_begin(); while (!READ_ONCE(node->locked)) { spec_barrier(); if (yield_to_prev(lock, node, old, paravirt)) seen_preempted = true; } spec_barrier(); spin_end(); /* Clear out stale propagated yield_cpu */ if (paravirt && pv_yield_propagate_owner && node->yield_cpu != -1) node->yield_cpu = -1; smp_rmb(); /* acquire barrier for the mcs lock */ /* * Generic qspinlocks have this prefetch here, but it seems * like it could cause additional line transitions because * the waiter will keep loading from it. */ if (_Q_SPIN_PREFETCH_NEXT) { next = READ_ONCE(node->next); if (next) prefetchw(next); } } /* We're at the head of the waitqueue, wait for the lock. */ again: spin_begin(); for (;;) { bool preempted; val = READ_ONCE(lock->val); if (!(val & _Q_LOCKED_VAL)) break; spec_barrier(); if (paravirt && pv_sleepy_lock && maybe_stealers) { if (!sleepy) { if (val & _Q_SLEEPY_VAL) { seen_sleepy_lock(); sleepy = true; } else if (recently_sleepy()) { sleepy = true; } } if (pv_sleepy_lock_sticky && seen_preempted && !(val & _Q_SLEEPY_VAL)) { if (try_set_sleepy(lock, val)) val |= _Q_SLEEPY_VAL; } } propagate_yield_cpu(node, val, &set_yield_cpu, paravirt); preempted = yield_head_to_locked_owner(lock, val, paravirt); if (!maybe_stealers) continue; if (preempted) seen_preempted = true; if (paravirt && preempted) { sleepy = true; if (!pv_spin_on_preempted_owner) iters++; } else { iters++; } if (!mustq && iters >= get_head_spins(paravirt, sleepy)) { mustq = true; set_mustq(lock); val |= _Q_MUST_Q_VAL; } } spec_barrier(); spin_end(); /* If we're the last queued, must clean up the tail. */ old = trylock_clean_tail(lock, tail); if (unlikely(old & _Q_LOCKED_VAL)) { BUG_ON(!maybe_stealers); goto again; /* Can only be true if maybe_stealers. */ } if ((old & _Q_TAIL_CPU_MASK) == tail) goto release; /* We were the tail, no next. */ /* There is a next, must wait for node->next != NULL (MCS protocol) */ next = READ_ONCE(node->next); if (!next) { spin_begin(); while (!(next = READ_ONCE(node->next))) cpu_relax(); spin_end(); } spec_barrier(); /* * Unlock the next mcs waiter node. Release barrier is not required * here because the acquirer is only accessing the lock word, and * the acquire barrier we took the lock with orders that update vs * this store to locked. The corresponding barrier is the smp_rmb() * acquire barrier for mcs lock, above. */ if (paravirt && pv_prod_head) { int next_cpu = next->cpu; WRITE_ONCE(next->locked, 1); if (_Q_SPIN_MISO) asm volatile("miso" ::: "memory"); if (vcpu_is_preempted(next_cpu)) prod_cpu(next_cpu); } else { WRITE_ONCE(next->locked, 1); if (_Q_SPIN_MISO) asm volatile("miso" ::: "memory"); } release: qnodesp->count--; /* release the node */ } void queued_spin_lock_slowpath(struct qspinlock *lock) { /* * This looks funny, but it induces the compiler to inline both * sides of the branch rather than share code as when the condition * is passed as the paravirt argument to the functions. */ if (IS_ENABLED(CONFIG_PARAVIRT_SPINLOCKS) && is_shared_processor()) { if (try_to_steal_lock(lock, true)) { spec_barrier(); return; } queued_spin_lock_mcs_queue(lock, true); } else { if (try_to_steal_lock(lock, false)) { spec_barrier(); return; } queued_spin_lock_mcs_queue(lock, false); } } EXPORT_SYMBOL(queued_spin_lock_slowpath); #ifdef CONFIG_PARAVIRT_SPINLOCKS void pv_spinlocks_init(void) { } #endif #include <linux/debugfs.h> static int steal_spins_set(void *data, u64 val) { #if _Q_SPIN_TRY_LOCK_STEAL == 1 /* MAYBE_STEAL remains true */ steal_spins = val; #else static DEFINE_MUTEX(lock); /* * The lock slow path has a !maybe_stealers case that can assume * the head of queue will not see concurrent waiters. That waiter * is unsafe in the presence of stealers, so must keep them away * from one another. */ mutex_lock(&lock); if (val && !steal_spins) { maybe_stealers = true; /* wait for queue head waiter to go away */ synchronize_rcu(); steal_spins = val; } else if (!val && steal_spins) { steal_spins = val; /* wait for all possible stealers to go away */ synchronize_rcu(); maybe_stealers = false; } else { steal_spins = val; } mutex_unlock(&lock); #endif return 0; } static int steal_spins_get(void *data, u64 *val) { *val = steal_spins; return 0; } DEFINE_SIMPLE_ATTRIBUTE(fops_steal_spins, steal_spins_get, steal_spins_set, "%llu\n"); static int remote_steal_spins_set(void *data, u64 val) { remote_steal_spins = val; return 0; } static int remote_steal_spins_get(void *data, u64 *val) { *val = remote_steal_spins; return 0; } DEFINE_SIMPLE_ATTRIBUTE(fops_remote_steal_spins, remote_steal_spins_get, remote_steal_spins_set, "%llu\n"); static int head_spins_set(void *data, u64 val) { head_spins = val; return 0; } static int head_spins_get(void *data, u64 *val) { *val = head_spins; return 0; } DEFINE_SIMPLE_ATTRIBUTE(fops_head_spins, head_spins_get, head_spins_set, "%llu\n"); static int pv_yield_owner_set(void *data, u64 val) { pv_yield_owner = !!val; return 0; } static int pv_yield_owner_get(void *data, u64 *val) { *val = pv_yield_owner; return 0; } DEFINE_SIMPLE_ATTRIBUTE(fops_pv_yield_owner, pv_yield_owner_get, pv_yield_owner_set, "%llu\n"); static int pv_yield_allow_steal_set(void *data, u64 val) { pv_yield_allow_steal = !!val; return 0; } static int pv_yield_allow_steal_get(void *data, u64 *val) { *val = pv_yield_allow_steal; return 0; } DEFINE_SIMPLE_ATTRIBUTE(fops_pv_yield_allow_steal, pv_yield_allow_steal_get, pv_yield_allow_steal_set, "%llu\n"); static int pv_spin_on_preempted_owner_set(void *data, u64 val) { pv_spin_on_preempted_owner = !!val; return 0; } static int pv_spin_on_preempted_owner_get(void *data, u64 *val) { *val = pv_spin_on_preempted_owner; return 0; } DEFINE_SIMPLE_ATTRIBUTE(fops_pv_spin_on_preempted_owner, pv_spin_on_preempted_owner_get, pv_spin_on_preempted_owner_set, "%llu\n"); static int pv_sleepy_lock_set(void *data, u64 val) { pv_sleepy_lock = !!val; return 0; } static int pv_sleepy_lock_get(void *data, u64 *val) { *val = pv_sleepy_lock; return 0; } DEFINE_SIMPLE_ATTRIBUTE(fops_pv_sleepy_lock, pv_sleepy_lock_get, pv_sleepy_lock_set, "%llu\n"); static int pv_sleepy_lock_sticky_set(void *data, u64 val) { pv_sleepy_lock_sticky = !!val; return 0; } static int pv_sleepy_lock_sticky_get(void *data, u64 *val) { *val = pv_sleepy_lock_sticky; return 0; } DEFINE_SIMPLE_ATTRIBUTE(fops_pv_sleepy_lock_sticky, pv_sleepy_lock_sticky_get, pv_sleepy_lock_sticky_set, "%llu\n"); static int pv_sleepy_lock_interval_ns_set(void *data, u64 val) { pv_sleepy_lock_interval_ns = val; return 0; } static int pv_sleepy_lock_interval_ns_get(void *data, u64 *val) { *val = pv_sleepy_lock_interval_ns; return 0; } DEFINE_SIMPLE_ATTRIBUTE(fops_pv_sleepy_lock_interval_ns, pv_sleepy_lock_interval_ns_get, pv_sleepy_lock_interval_ns_set, "%llu\n"); static int pv_sleepy_lock_factor_set(void *data, u64 val) { pv_sleepy_lock_factor = val; return 0; } static int pv_sleepy_lock_factor_get(void *data, u64 *val) { *val = pv_sleepy_lock_factor; return 0; } DEFINE_SIMPLE_ATTRIBUTE(fops_pv_sleepy_lock_factor, pv_sleepy_lock_factor_get, pv_sleepy_lock_factor_set, "%llu\n"); static int pv_yield_prev_set(void *data, u64 val) { pv_yield_prev = !!val; return 0; } static int pv_yield_prev_get(void *data, u64 *val) { *val = pv_yield_prev; return 0; } DEFINE_SIMPLE_ATTRIBUTE(fops_pv_yield_prev, pv_yield_prev_get, pv_yield_prev_set, "%llu\n"); static int pv_yield_propagate_owner_set(void *data, u64 val) { pv_yield_propagate_owner = !!val; return 0; } static int pv_yield_propagate_owner_get(void *data, u64 *val) { *val = pv_yield_propagate_owner; return 0; } DEFINE_SIMPLE_ATTRIBUTE(fops_pv_yield_propagate_owner, pv_yield_propagate_owner_get, pv_yield_propagate_owner_set, "%llu\n"); static int pv_prod_head_set(void *data, u64 val) { pv_prod_head = !!val; return 0; } static int pv_prod_head_get(void *data, u64 *val) { *val = pv_prod_head; return 0; } DEFINE_SIMPLE_ATTRIBUTE(fops_pv_prod_head, pv_prod_head_get, pv_prod_head_set, "%llu\n"); static __init int spinlock_debugfs_init(void) { debugfs_create_file("qspl_steal_spins", 0600, arch_debugfs_dir, NULL, &fops_steal_spins); debugfs_create_file("qspl_remote_steal_spins", 0600, arch_debugfs_dir, NULL, &fops_remote_steal_spins); debugfs_create_file("qspl_head_spins", 0600, arch_debugfs_dir, NULL, &fops_head_spins); if (is_shared_processor()) { debugfs_create_file("qspl_pv_yield_owner", 0600, arch_debugfs_dir, NULL, &fops_pv_yield_owner); debugfs_create_file("qspl_pv_yield_allow_steal", 0600, arch_debugfs_dir, NULL, &fops_pv_yield_allow_steal); debugfs_create_file("qspl_pv_spin_on_preempted_owner", 0600, arch_debugfs_dir, NULL, &fops_pv_spin_on_preempted_owner); debugfs_create_file("qspl_pv_sleepy_lock", 0600, arch_debugfs_dir, NULL, &fops_pv_sleepy_lock); debugfs_create_file("qspl_pv_sleepy_lock_sticky", 0600, arch_debugfs_dir, NULL, &fops_pv_sleepy_lock_sticky); debugfs_create_file("qspl_pv_sleepy_lock_interval_ns", 0600, arch_debugfs_dir, NULL, &fops_pv_sleepy_lock_interval_ns); debugfs_create_file("qspl_pv_sleepy_lock_factor", 0600, arch_debugfs_dir, NULL, &fops_pv_sleepy_lock_factor); debugfs_create_file("qspl_pv_yield_prev", 0600, arch_debugfs_dir, NULL, &fops_pv_yield_prev); debugfs_create_file("qspl_pv_yield_propagate_owner", 0600, arch_debugfs_dir, NULL, &fops_pv_yield_propagate_owner); debugfs_create_file("qspl_pv_prod_head", 0600, arch_debugfs_dir, NULL, &fops_pv_prod_head); } return 0; } device_initcall(spinlock_debugfs_init);
linux-master
arch/powerpc/lib/qspinlock.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (C) 2001 Ben. Herrenschmidt ([email protected]) * * Modifications for ppc64: * Copyright (C) 2003 Dave Engebretsen <[email protected]> * * Copyright 2008 Michael Ellerman, IBM Corporation. */ #include <linux/types.h> #include <linux/jump_label.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/init.h> #include <linux/sched/mm.h> #include <linux/stop_machine.h> #include <asm/cputable.h> #include <asm/code-patching.h> #include <asm/interrupt.h> #include <asm/page.h> #include <asm/sections.h> #include <asm/setup.h> #include <asm/security_features.h> #include <asm/firmware.h> #include <asm/inst.h> struct fixup_entry { unsigned long mask; unsigned long value; long start_off; long end_off; long alt_start_off; long alt_end_off; }; static u32 *calc_addr(struct fixup_entry *fcur, long offset) { /* * We store the offset to the code as a negative offset from * the start of the alt_entry, to support the VDSO. This * routine converts that back into an actual address. */ return (u32 *)((unsigned long)fcur + offset); } static int patch_alt_instruction(u32 *src, u32 *dest, u32 *alt_start, u32 *alt_end) { int err; ppc_inst_t instr; instr = ppc_inst_read(src); if (instr_is_relative_branch(ppc_inst_read(src))) { u32 *target = (u32 *)branch_target(src); /* Branch within the section doesn't need translating */ if (target < alt_start || target > alt_end) { err = translate_branch(&instr, dest, src); if (err) return 1; } } raw_patch_instruction(dest, instr); return 0; } static int patch_feature_section_mask(unsigned long value, unsigned long mask, struct fixup_entry *fcur) { u32 *start, *end, *alt_start, *alt_end, *src, *dest; start = calc_addr(fcur, fcur->start_off); end = calc_addr(fcur, fcur->end_off); alt_start = calc_addr(fcur, fcur->alt_start_off); alt_end = calc_addr(fcur, fcur->alt_end_off); if ((alt_end - alt_start) > (end - start)) return 1; if ((value & fcur->mask & mask) == (fcur->value & mask)) return 0; src = alt_start; dest = start; for (; src < alt_end; src = ppc_inst_next(src, src), dest = ppc_inst_next(dest, dest)) { if (patch_alt_instruction(src, dest, alt_start, alt_end)) return 1; } for (; dest < end; dest++) raw_patch_instruction(dest, ppc_inst(PPC_RAW_NOP())); return 0; } static void do_feature_fixups_mask(unsigned long value, unsigned long mask, void *fixup_start, void *fixup_end) { struct fixup_entry *fcur, *fend; fcur = fixup_start; fend = fixup_end; for (; fcur < fend; fcur++) { if (patch_feature_section_mask(value, mask, fcur)) { WARN_ON(1); printk("Unable to patch feature section at %p - %p" \ " with %p - %p\n", calc_addr(fcur, fcur->start_off), calc_addr(fcur, fcur->end_off), calc_addr(fcur, fcur->alt_start_off), calc_addr(fcur, fcur->alt_end_off)); } } } void do_feature_fixups(unsigned long value, void *fixup_start, void *fixup_end) { do_feature_fixups_mask(value, ~0, fixup_start, fixup_end); } #ifdef CONFIG_PPC_BARRIER_NOSPEC static bool is_fixup_addr_valid(void *dest, size_t size) { return system_state < SYSTEM_FREEING_INITMEM || !init_section_contains(dest, size); } static int do_patch_fixups(long *start, long *end, unsigned int *instrs, int num) { int i; for (i = 0; start < end; start++, i++) { int j; unsigned int *dest = (void *)start + *start; if (!is_fixup_addr_valid(dest, sizeof(*instrs) * num)) continue; pr_devel("patching dest %lx\n", (unsigned long)dest); for (j = 0; j < num; j++) patch_instruction(dest + j, ppc_inst(instrs[j])); } return i; } #endif #ifdef CONFIG_PPC_BOOK3S_64 static int do_patch_entry_fixups(long *start, long *end, unsigned int *instrs, bool do_fallback, void *fallback) { int i; for (i = 0; start < end; start++, i++) { unsigned int *dest = (void *)start + *start; if (!is_fixup_addr_valid(dest, sizeof(*instrs) * 3)) continue; pr_devel("patching dest %lx\n", (unsigned long)dest); // See comment in do_entry_flush_fixups() RE order of patching if (do_fallback) { patch_instruction(dest, ppc_inst(instrs[0])); patch_instruction(dest + 2, ppc_inst(instrs[2])); patch_branch(dest + 1, (unsigned long)fallback, BRANCH_SET_LINK); } else { patch_instruction(dest + 1, ppc_inst(instrs[1])); patch_instruction(dest + 2, ppc_inst(instrs[2])); patch_instruction(dest, ppc_inst(instrs[0])); } } return i; } static void do_stf_entry_barrier_fixups(enum stf_barrier_type types) { unsigned int instrs[3]; long *start, *end; int i; start = PTRRELOC(&__start___stf_entry_barrier_fixup); end = PTRRELOC(&__stop___stf_entry_barrier_fixup); instrs[0] = PPC_RAW_NOP(); instrs[1] = PPC_RAW_NOP(); instrs[2] = PPC_RAW_NOP(); i = 0; if (types & STF_BARRIER_FALLBACK) { instrs[i++] = PPC_RAW_MFLR(_R10); instrs[i++] = PPC_RAW_NOP(); /* branch patched below */ instrs[i++] = PPC_RAW_MTLR(_R10); } else if (types & STF_BARRIER_EIEIO) { instrs[i++] = PPC_RAW_EIEIO() | 0x02000000; /* eieio + bit 6 hint */ } else if (types & STF_BARRIER_SYNC_ORI) { instrs[i++] = PPC_RAW_SYNC(); instrs[i++] = PPC_RAW_LD(_R10, _R13, 0); instrs[i++] = PPC_RAW_ORI(_R31, _R31, 0); /* speculation barrier */ } i = do_patch_entry_fixups(start, end, instrs, types & STF_BARRIER_FALLBACK, &stf_barrier_fallback); printk(KERN_DEBUG "stf-barrier: patched %d entry locations (%s barrier)\n", i, (types == STF_BARRIER_NONE) ? "no" : (types == STF_BARRIER_FALLBACK) ? "fallback" : (types == STF_BARRIER_EIEIO) ? "eieio" : (types == (STF_BARRIER_SYNC_ORI)) ? "hwsync" : "unknown"); } static void do_stf_exit_barrier_fixups(enum stf_barrier_type types) { unsigned int instrs[6]; long *start, *end; int i; start = PTRRELOC(&__start___stf_exit_barrier_fixup); end = PTRRELOC(&__stop___stf_exit_barrier_fixup); instrs[0] = PPC_RAW_NOP(); instrs[1] = PPC_RAW_NOP(); instrs[2] = PPC_RAW_NOP(); instrs[3] = PPC_RAW_NOP(); instrs[4] = PPC_RAW_NOP(); instrs[5] = PPC_RAW_NOP(); i = 0; if (types & STF_BARRIER_FALLBACK || types & STF_BARRIER_SYNC_ORI) { if (cpu_has_feature(CPU_FTR_HVMODE)) { instrs[i++] = PPC_RAW_MTSPR(SPRN_HSPRG1, _R13); instrs[i++] = PPC_RAW_MFSPR(_R13, SPRN_HSPRG0); } else { instrs[i++] = PPC_RAW_MTSPR(SPRN_SPRG2, _R13); instrs[i++] = PPC_RAW_MFSPR(_R13, SPRN_SPRG1); } instrs[i++] = PPC_RAW_SYNC(); instrs[i++] = PPC_RAW_LD(_R13, _R13, 0); instrs[i++] = PPC_RAW_ORI(_R31, _R31, 0); /* speculation barrier */ if (cpu_has_feature(CPU_FTR_HVMODE)) instrs[i++] = PPC_RAW_MFSPR(_R13, SPRN_HSPRG1); else instrs[i++] = PPC_RAW_MFSPR(_R13, SPRN_SPRG2); } else if (types & STF_BARRIER_EIEIO) { instrs[i++] = PPC_RAW_EIEIO() | 0x02000000; /* eieio + bit 6 hint */ } i = do_patch_fixups(start, end, instrs, ARRAY_SIZE(instrs)); printk(KERN_DEBUG "stf-barrier: patched %d exit locations (%s barrier)\n", i, (types == STF_BARRIER_NONE) ? "no" : (types == STF_BARRIER_FALLBACK) ? "fallback" : (types == STF_BARRIER_EIEIO) ? "eieio" : (types == (STF_BARRIER_SYNC_ORI)) ? "hwsync" : "unknown"); } static bool stf_exit_reentrant = false; static bool rfi_exit_reentrant = false; static DEFINE_MUTEX(exit_flush_lock); static int __do_stf_barrier_fixups(void *data) { enum stf_barrier_type *types = data; do_stf_entry_barrier_fixups(*types); do_stf_exit_barrier_fixups(*types); return 0; } void do_stf_barrier_fixups(enum stf_barrier_type types) { /* * The call to the fallback entry flush, and the fallback/sync-ori exit * flush can not be safely patched in/out while other CPUs are * executing them. So call __do_stf_barrier_fixups() on one CPU while * all other CPUs spin in the stop machine core with interrupts hard * disabled. * * The branch to mark interrupt exits non-reentrant is enabled first, * then stop_machine runs which will ensure all CPUs are out of the * low level interrupt exit code before patching. After the patching, * if allowed, then flip the branch to allow fast exits. */ // Prevent static key update races with do_rfi_flush_fixups() mutex_lock(&exit_flush_lock); static_branch_enable(&interrupt_exit_not_reentrant); stop_machine(__do_stf_barrier_fixups, &types, NULL); if ((types & STF_BARRIER_FALLBACK) || (types & STF_BARRIER_SYNC_ORI)) stf_exit_reentrant = false; else stf_exit_reentrant = true; if (stf_exit_reentrant && rfi_exit_reentrant) static_branch_disable(&interrupt_exit_not_reentrant); mutex_unlock(&exit_flush_lock); } void do_uaccess_flush_fixups(enum l1d_flush_type types) { unsigned int instrs[4]; long *start, *end; int i; start = PTRRELOC(&__start___uaccess_flush_fixup); end = PTRRELOC(&__stop___uaccess_flush_fixup); instrs[0] = PPC_RAW_NOP(); instrs[1] = PPC_RAW_NOP(); instrs[2] = PPC_RAW_NOP(); instrs[3] = PPC_RAW_BLR(); i = 0; if (types == L1D_FLUSH_FALLBACK) { instrs[3] = PPC_RAW_NOP(); /* fallthrough to fallback flush */ } if (types & L1D_FLUSH_ORI) { instrs[i++] = PPC_RAW_ORI(_R31, _R31, 0); /* speculation barrier */ instrs[i++] = PPC_RAW_ORI(_R30, _R30, 0); /* L1d flush */ } if (types & L1D_FLUSH_MTTRIG) instrs[i++] = PPC_RAW_MTSPR(SPRN_TRIG2, _R0); i = do_patch_fixups(start, end, instrs, ARRAY_SIZE(instrs)); printk(KERN_DEBUG "uaccess-flush: patched %d locations (%s flush)\n", i, (types == L1D_FLUSH_NONE) ? "no" : (types == L1D_FLUSH_FALLBACK) ? "fallback displacement" : (types & L1D_FLUSH_ORI) ? (types & L1D_FLUSH_MTTRIG) ? "ori+mttrig type" : "ori type" : (types & L1D_FLUSH_MTTRIG) ? "mttrig type" : "unknown"); } static int __do_entry_flush_fixups(void *data) { enum l1d_flush_type types = *(enum l1d_flush_type *)data; unsigned int instrs[3]; long *start, *end; int i; instrs[0] = PPC_RAW_NOP(); instrs[1] = PPC_RAW_NOP(); instrs[2] = PPC_RAW_NOP(); i = 0; if (types == L1D_FLUSH_FALLBACK) { instrs[i++] = PPC_RAW_MFLR(_R10); instrs[i++] = PPC_RAW_NOP(); /* branch patched below */ instrs[i++] = PPC_RAW_MTLR(_R10); } if (types & L1D_FLUSH_ORI) { instrs[i++] = PPC_RAW_ORI(_R31, _R31, 0); /* speculation barrier */ instrs[i++] = PPC_RAW_ORI(_R30, _R30, 0); /* L1d flush */ } if (types & L1D_FLUSH_MTTRIG) instrs[i++] = PPC_RAW_MTSPR(SPRN_TRIG2, _R0); /* * If we're patching in or out the fallback flush we need to be careful about the * order in which we patch instructions. That's because it's possible we could * take a page fault after patching one instruction, so the sequence of * instructions must be safe even in a half patched state. * * To make that work, when patching in the fallback flush we patch in this order: * - the mflr (dest) * - the mtlr (dest + 2) * - the branch (dest + 1) * * That ensures the sequence is safe to execute at any point. In contrast if we * patch the mtlr last, it's possible we could return from the branch and not * restore LR, leading to a crash later. * * When patching out the fallback flush (either with nops or another flush type), * we patch in this order: * - the branch (dest + 1) * - the mtlr (dest + 2) * - the mflr (dest) * * Note we are protected by stop_machine() from other CPUs executing the code in a * semi-patched state. */ start = PTRRELOC(&__start___entry_flush_fixup); end = PTRRELOC(&__stop___entry_flush_fixup); i = do_patch_entry_fixups(start, end, instrs, types == L1D_FLUSH_FALLBACK, &entry_flush_fallback); start = PTRRELOC(&__start___scv_entry_flush_fixup); end = PTRRELOC(&__stop___scv_entry_flush_fixup); i += do_patch_entry_fixups(start, end, instrs, types == L1D_FLUSH_FALLBACK, &scv_entry_flush_fallback); printk(KERN_DEBUG "entry-flush: patched %d locations (%s flush)\n", i, (types == L1D_FLUSH_NONE) ? "no" : (types == L1D_FLUSH_FALLBACK) ? "fallback displacement" : (types & L1D_FLUSH_ORI) ? (types & L1D_FLUSH_MTTRIG) ? "ori+mttrig type" : "ori type" : (types & L1D_FLUSH_MTTRIG) ? "mttrig type" : "unknown"); return 0; } void do_entry_flush_fixups(enum l1d_flush_type types) { /* * The call to the fallback flush can not be safely patched in/out while * other CPUs are executing it. So call __do_entry_flush_fixups() on one * CPU while all other CPUs spin in the stop machine core with interrupts * hard disabled. */ stop_machine(__do_entry_flush_fixups, &types, NULL); } static int __do_rfi_flush_fixups(void *data) { enum l1d_flush_type types = *(enum l1d_flush_type *)data; unsigned int instrs[3]; long *start, *end; int i; start = PTRRELOC(&__start___rfi_flush_fixup); end = PTRRELOC(&__stop___rfi_flush_fixup); instrs[0] = PPC_RAW_NOP(); instrs[1] = PPC_RAW_NOP(); instrs[2] = PPC_RAW_NOP(); if (types & L1D_FLUSH_FALLBACK) /* b .+16 to fallback flush */ instrs[0] = PPC_RAW_BRANCH(16); i = 0; if (types & L1D_FLUSH_ORI) { instrs[i++] = PPC_RAW_ORI(_R31, _R31, 0); /* speculation barrier */ instrs[i++] = PPC_RAW_ORI(_R30, _R30, 0); /* L1d flush */ } if (types & L1D_FLUSH_MTTRIG) instrs[i++] = PPC_RAW_MTSPR(SPRN_TRIG2, _R0); i = do_patch_fixups(start, end, instrs, ARRAY_SIZE(instrs)); printk(KERN_DEBUG "rfi-flush: patched %d locations (%s flush)\n", i, (types == L1D_FLUSH_NONE) ? "no" : (types == L1D_FLUSH_FALLBACK) ? "fallback displacement" : (types & L1D_FLUSH_ORI) ? (types & L1D_FLUSH_MTTRIG) ? "ori+mttrig type" : "ori type" : (types & L1D_FLUSH_MTTRIG) ? "mttrig type" : "unknown"); return 0; } void do_rfi_flush_fixups(enum l1d_flush_type types) { /* * stop_machine gets all CPUs out of the interrupt exit handler same * as do_stf_barrier_fixups. do_rfi_flush_fixups patching can run * without stop_machine, so this could be achieved with a broadcast * IPI instead, but this matches the stf sequence. */ // Prevent static key update races with do_stf_barrier_fixups() mutex_lock(&exit_flush_lock); static_branch_enable(&interrupt_exit_not_reentrant); stop_machine(__do_rfi_flush_fixups, &types, NULL); if (types & L1D_FLUSH_FALLBACK) rfi_exit_reentrant = false; else rfi_exit_reentrant = true; if (stf_exit_reentrant && rfi_exit_reentrant) static_branch_disable(&interrupt_exit_not_reentrant); mutex_unlock(&exit_flush_lock); } void do_barrier_nospec_fixups_range(bool enable, void *fixup_start, void *fixup_end) { unsigned int instr; long *start, *end; int i; start = fixup_start; end = fixup_end; instr = PPC_RAW_NOP(); if (enable) { pr_info("barrier-nospec: using ORI speculation barrier\n"); instr = PPC_RAW_ORI(_R31, _R31, 0); /* speculation barrier */ } i = do_patch_fixups(start, end, &instr, 1); printk(KERN_DEBUG "barrier-nospec: patched %d locations\n", i); } #endif /* CONFIG_PPC_BOOK3S_64 */ #ifdef CONFIG_PPC_BARRIER_NOSPEC void do_barrier_nospec_fixups(bool enable) { void *start, *end; start = PTRRELOC(&__start___barrier_nospec_fixup); end = PTRRELOC(&__stop___barrier_nospec_fixup); do_barrier_nospec_fixups_range(enable, start, end); } #endif /* CONFIG_PPC_BARRIER_NOSPEC */ #ifdef CONFIG_PPC_E500 void do_barrier_nospec_fixups_range(bool enable, void *fixup_start, void *fixup_end) { unsigned int instr[2]; long *start, *end; int i; start = fixup_start; end = fixup_end; instr[0] = PPC_RAW_NOP(); instr[1] = PPC_RAW_NOP(); if (enable) { pr_info("barrier-nospec: using isync; sync as speculation barrier\n"); instr[0] = PPC_RAW_ISYNC(); instr[1] = PPC_RAW_SYNC(); } i = do_patch_fixups(start, end, instr, ARRAY_SIZE(instr)); printk(KERN_DEBUG "barrier-nospec: patched %d locations\n", i); } static void __init patch_btb_flush_section(long *curr) { unsigned int *start, *end; start = (void *)curr + *curr; end = (void *)curr + *(curr + 1); for (; start < end; start++) { pr_devel("patching dest %lx\n", (unsigned long)start); patch_instruction(start, ppc_inst(PPC_RAW_NOP())); } } void __init do_btb_flush_fixups(void) { long *start, *end; start = PTRRELOC(&__start__btb_flush_fixup); end = PTRRELOC(&__stop__btb_flush_fixup); for (; start < end; start += 2) patch_btb_flush_section(start); } #endif /* CONFIG_PPC_E500 */ void do_lwsync_fixups(unsigned long value, void *fixup_start, void *fixup_end) { long *start, *end; u32 *dest; if (!(value & CPU_FTR_LWSYNC)) return ; start = fixup_start; end = fixup_end; for (; start < end; start++) { dest = (void *)start + *start; raw_patch_instruction(dest, ppc_inst(PPC_INST_LWSYNC)); } } static void __init do_final_fixups(void) { #if defined(CONFIG_PPC64) && defined(CONFIG_RELOCATABLE) ppc_inst_t inst; u32 *src, *dest, *end; if (PHYSICAL_START == 0) return; src = (u32 *)(KERNELBASE + PHYSICAL_START); dest = (u32 *)KERNELBASE; end = (void *)src + (__end_interrupts - _stext); while (src < end) { inst = ppc_inst_read(src); raw_patch_instruction(dest, inst); src = ppc_inst_next(src, src); dest = ppc_inst_next(dest, dest); } #endif } static unsigned long __initdata saved_cpu_features; static unsigned int __initdata saved_mmu_features; #ifdef CONFIG_PPC64 static unsigned long __initdata saved_firmware_features; #endif void __init apply_feature_fixups(void) { struct cpu_spec *spec = PTRRELOC(*PTRRELOC(&cur_cpu_spec)); *PTRRELOC(&saved_cpu_features) = spec->cpu_features; *PTRRELOC(&saved_mmu_features) = spec->mmu_features; /* * Apply the CPU-specific and firmware specific fixups to kernel text * (nop out sections not relevant to this CPU or this firmware). */ do_feature_fixups(spec->cpu_features, PTRRELOC(&__start___ftr_fixup), PTRRELOC(&__stop___ftr_fixup)); do_feature_fixups(spec->mmu_features, PTRRELOC(&__start___mmu_ftr_fixup), PTRRELOC(&__stop___mmu_ftr_fixup)); do_lwsync_fixups(spec->cpu_features, PTRRELOC(&__start___lwsync_fixup), PTRRELOC(&__stop___lwsync_fixup)); #ifdef CONFIG_PPC64 saved_firmware_features = powerpc_firmware_features; do_feature_fixups(powerpc_firmware_features, &__start___fw_ftr_fixup, &__stop___fw_ftr_fixup); #endif do_final_fixups(); } void __init update_mmu_feature_fixups(unsigned long mask) { saved_mmu_features &= ~mask; saved_mmu_features |= cur_cpu_spec->mmu_features & mask; do_feature_fixups_mask(cur_cpu_spec->mmu_features, mask, PTRRELOC(&__start___mmu_ftr_fixup), PTRRELOC(&__stop___mmu_ftr_fixup)); mmu_feature_keys_init(); } void __init setup_feature_keys(void) { /* * Initialise jump label. This causes all the cpu/mmu_has_feature() * checks to take on their correct polarity based on the current set of * CPU/MMU features. */ jump_label_init(); cpu_feature_keys_init(); mmu_feature_keys_init(); } static int __init check_features(void) { WARN(saved_cpu_features != cur_cpu_spec->cpu_features, "CPU features changed after feature patching!\n"); WARN(saved_mmu_features != cur_cpu_spec->mmu_features, "MMU features changed after feature patching!\n"); #ifdef CONFIG_PPC64 WARN(saved_firmware_features != powerpc_firmware_features, "Firmware features changed after feature patching!\n"); #endif return 0; } late_initcall(check_features); #ifdef CONFIG_FTR_FIXUP_SELFTEST #define check(x) \ if (!(x)) printk("feature-fixups: test failed at line %d\n", __LINE__); static int patch_feature_section(unsigned long value, struct fixup_entry *fcur) { return patch_feature_section_mask(value, ~0, fcur); } /* This must be after the text it fixes up, vmlinux.lds.S enforces that atm */ static struct fixup_entry fixup; static long __init calc_offset(struct fixup_entry *entry, unsigned int *p) { return (unsigned long)p - (unsigned long)entry; } static void __init test_basic_patching(void) { extern unsigned int ftr_fixup_test1[]; extern unsigned int end_ftr_fixup_test1[]; extern unsigned int ftr_fixup_test1_orig[]; extern unsigned int ftr_fixup_test1_expected[]; int size = 4 * (end_ftr_fixup_test1 - ftr_fixup_test1); fixup.value = fixup.mask = 8; fixup.start_off = calc_offset(&fixup, ftr_fixup_test1 + 1); fixup.end_off = calc_offset(&fixup, ftr_fixup_test1 + 2); fixup.alt_start_off = fixup.alt_end_off = 0; /* Sanity check */ check(memcmp(ftr_fixup_test1, ftr_fixup_test1_orig, size) == 0); /* Check we don't patch if the value matches */ patch_feature_section(8, &fixup); check(memcmp(ftr_fixup_test1, ftr_fixup_test1_orig, size) == 0); /* Check we do patch if the value doesn't match */ patch_feature_section(0, &fixup); check(memcmp(ftr_fixup_test1, ftr_fixup_test1_expected, size) == 0); /* Check we do patch if the mask doesn't match */ memcpy(ftr_fixup_test1, ftr_fixup_test1_orig, size); check(memcmp(ftr_fixup_test1, ftr_fixup_test1_orig, size) == 0); patch_feature_section(~8, &fixup); check(memcmp(ftr_fixup_test1, ftr_fixup_test1_expected, size) == 0); } static void __init test_alternative_patching(void) { extern unsigned int ftr_fixup_test2[]; extern unsigned int end_ftr_fixup_test2[]; extern unsigned int ftr_fixup_test2_orig[]; extern unsigned int ftr_fixup_test2_alt[]; extern unsigned int ftr_fixup_test2_expected[]; int size = 4 * (end_ftr_fixup_test2 - ftr_fixup_test2); fixup.value = fixup.mask = 0xF; fixup.start_off = calc_offset(&fixup, ftr_fixup_test2 + 1); fixup.end_off = calc_offset(&fixup, ftr_fixup_test2 + 2); fixup.alt_start_off = calc_offset(&fixup, ftr_fixup_test2_alt); fixup.alt_end_off = calc_offset(&fixup, ftr_fixup_test2_alt + 1); /* Sanity check */ check(memcmp(ftr_fixup_test2, ftr_fixup_test2_orig, size) == 0); /* Check we don't patch if the value matches */ patch_feature_section(0xF, &fixup); check(memcmp(ftr_fixup_test2, ftr_fixup_test2_orig, size) == 0); /* Check we do patch if the value doesn't match */ patch_feature_section(0, &fixup); check(memcmp(ftr_fixup_test2, ftr_fixup_test2_expected, size) == 0); /* Check we do patch if the mask doesn't match */ memcpy(ftr_fixup_test2, ftr_fixup_test2_orig, size); check(memcmp(ftr_fixup_test2, ftr_fixup_test2_orig, size) == 0); patch_feature_section(~0xF, &fixup); check(memcmp(ftr_fixup_test2, ftr_fixup_test2_expected, size) == 0); } static void __init test_alternative_case_too_big(void) { extern unsigned int ftr_fixup_test3[]; extern unsigned int end_ftr_fixup_test3[]; extern unsigned int ftr_fixup_test3_orig[]; extern unsigned int ftr_fixup_test3_alt[]; int size = 4 * (end_ftr_fixup_test3 - ftr_fixup_test3); fixup.value = fixup.mask = 0xC; fixup.start_off = calc_offset(&fixup, ftr_fixup_test3 + 1); fixup.end_off = calc_offset(&fixup, ftr_fixup_test3 + 2); fixup.alt_start_off = calc_offset(&fixup, ftr_fixup_test3_alt); fixup.alt_end_off = calc_offset(&fixup, ftr_fixup_test3_alt + 2); /* Sanity check */ check(memcmp(ftr_fixup_test3, ftr_fixup_test3_orig, size) == 0); /* Expect nothing to be patched, and the error returned to us */ check(patch_feature_section(0xF, &fixup) == 1); check(memcmp(ftr_fixup_test3, ftr_fixup_test3_orig, size) == 0); check(patch_feature_section(0, &fixup) == 1); check(memcmp(ftr_fixup_test3, ftr_fixup_test3_orig, size) == 0); check(patch_feature_section(~0xF, &fixup) == 1); check(memcmp(ftr_fixup_test3, ftr_fixup_test3_orig, size) == 0); } static void __init test_alternative_case_too_small(void) { extern unsigned int ftr_fixup_test4[]; extern unsigned int end_ftr_fixup_test4[]; extern unsigned int ftr_fixup_test4_orig[]; extern unsigned int ftr_fixup_test4_alt[]; extern unsigned int ftr_fixup_test4_expected[]; int size = 4 * (end_ftr_fixup_test4 - ftr_fixup_test4); unsigned long flag; /* Check a high-bit flag */ flag = 1UL << ((sizeof(unsigned long) - 1) * 8); fixup.value = fixup.mask = flag; fixup.start_off = calc_offset(&fixup, ftr_fixup_test4 + 1); fixup.end_off = calc_offset(&fixup, ftr_fixup_test4 + 5); fixup.alt_start_off = calc_offset(&fixup, ftr_fixup_test4_alt); fixup.alt_end_off = calc_offset(&fixup, ftr_fixup_test4_alt + 2); /* Sanity check */ check(memcmp(ftr_fixup_test4, ftr_fixup_test4_orig, size) == 0); /* Check we don't patch if the value matches */ patch_feature_section(flag, &fixup); check(memcmp(ftr_fixup_test4, ftr_fixup_test4_orig, size) == 0); /* Check we do patch if the value doesn't match */ patch_feature_section(0, &fixup); check(memcmp(ftr_fixup_test4, ftr_fixup_test4_expected, size) == 0); /* Check we do patch if the mask doesn't match */ memcpy(ftr_fixup_test4, ftr_fixup_test4_orig, size); check(memcmp(ftr_fixup_test4, ftr_fixup_test4_orig, size) == 0); patch_feature_section(~flag, &fixup); check(memcmp(ftr_fixup_test4, ftr_fixup_test4_expected, size) == 0); } static void test_alternative_case_with_branch(void) { extern unsigned int ftr_fixup_test5[]; extern unsigned int end_ftr_fixup_test5[]; extern unsigned int ftr_fixup_test5_expected[]; int size = 4 * (end_ftr_fixup_test5 - ftr_fixup_test5); check(memcmp(ftr_fixup_test5, ftr_fixup_test5_expected, size) == 0); } static void __init test_alternative_case_with_external_branch(void) { extern unsigned int ftr_fixup_test6[]; extern unsigned int end_ftr_fixup_test6[]; extern unsigned int ftr_fixup_test6_expected[]; int size = 4 * (end_ftr_fixup_test6 - ftr_fixup_test6); check(memcmp(ftr_fixup_test6, ftr_fixup_test6_expected, size) == 0); } static void __init test_alternative_case_with_branch_to_end(void) { extern unsigned int ftr_fixup_test7[]; extern unsigned int end_ftr_fixup_test7[]; extern unsigned int ftr_fixup_test7_expected[]; int size = 4 * (end_ftr_fixup_test7 - ftr_fixup_test7); check(memcmp(ftr_fixup_test7, ftr_fixup_test7_expected, size) == 0); } static void __init test_cpu_macros(void) { extern u8 ftr_fixup_test_FTR_macros[]; extern u8 ftr_fixup_test_FTR_macros_expected[]; unsigned long size = ftr_fixup_test_FTR_macros_expected - ftr_fixup_test_FTR_macros; /* The fixups have already been done for us during boot */ check(memcmp(ftr_fixup_test_FTR_macros, ftr_fixup_test_FTR_macros_expected, size) == 0); } static void __init test_fw_macros(void) { #ifdef CONFIG_PPC64 extern u8 ftr_fixup_test_FW_FTR_macros[]; extern u8 ftr_fixup_test_FW_FTR_macros_expected[]; unsigned long size = ftr_fixup_test_FW_FTR_macros_expected - ftr_fixup_test_FW_FTR_macros; /* The fixups have already been done for us during boot */ check(memcmp(ftr_fixup_test_FW_FTR_macros, ftr_fixup_test_FW_FTR_macros_expected, size) == 0); #endif } static void __init test_lwsync_macros(void) { extern u8 lwsync_fixup_test[]; extern u8 end_lwsync_fixup_test[]; extern u8 lwsync_fixup_test_expected_LWSYNC[]; extern u8 lwsync_fixup_test_expected_SYNC[]; unsigned long size = end_lwsync_fixup_test - lwsync_fixup_test; /* The fixups have already been done for us during boot */ if (cur_cpu_spec->cpu_features & CPU_FTR_LWSYNC) { check(memcmp(lwsync_fixup_test, lwsync_fixup_test_expected_LWSYNC, size) == 0); } else { check(memcmp(lwsync_fixup_test, lwsync_fixup_test_expected_SYNC, size) == 0); } } #ifdef CONFIG_PPC64 static void __init test_prefix_patching(void) { extern unsigned int ftr_fixup_prefix1[]; extern unsigned int end_ftr_fixup_prefix1[]; extern unsigned int ftr_fixup_prefix1_orig[]; extern unsigned int ftr_fixup_prefix1_expected[]; int size = sizeof(unsigned int) * (end_ftr_fixup_prefix1 - ftr_fixup_prefix1); fixup.value = fixup.mask = 8; fixup.start_off = calc_offset(&fixup, ftr_fixup_prefix1 + 1); fixup.end_off = calc_offset(&fixup, ftr_fixup_prefix1 + 3); fixup.alt_start_off = fixup.alt_end_off = 0; /* Sanity check */ check(memcmp(ftr_fixup_prefix1, ftr_fixup_prefix1_orig, size) == 0); patch_feature_section(0, &fixup); check(memcmp(ftr_fixup_prefix1, ftr_fixup_prefix1_expected, size) == 0); check(memcmp(ftr_fixup_prefix1, ftr_fixup_prefix1_orig, size) != 0); } static void __init test_prefix_alt_patching(void) { extern unsigned int ftr_fixup_prefix2[]; extern unsigned int end_ftr_fixup_prefix2[]; extern unsigned int ftr_fixup_prefix2_orig[]; extern unsigned int ftr_fixup_prefix2_expected[]; extern unsigned int ftr_fixup_prefix2_alt[]; int size = sizeof(unsigned int) * (end_ftr_fixup_prefix2 - ftr_fixup_prefix2); fixup.value = fixup.mask = 8; fixup.start_off = calc_offset(&fixup, ftr_fixup_prefix2 + 1); fixup.end_off = calc_offset(&fixup, ftr_fixup_prefix2 + 3); fixup.alt_start_off = calc_offset(&fixup, ftr_fixup_prefix2_alt); fixup.alt_end_off = calc_offset(&fixup, ftr_fixup_prefix2_alt + 2); /* Sanity check */ check(memcmp(ftr_fixup_prefix2, ftr_fixup_prefix2_orig, size) == 0); patch_feature_section(0, &fixup); check(memcmp(ftr_fixup_prefix2, ftr_fixup_prefix2_expected, size) == 0); check(memcmp(ftr_fixup_prefix2, ftr_fixup_prefix2_orig, size) != 0); } static void __init test_prefix_word_alt_patching(void) { extern unsigned int ftr_fixup_prefix3[]; extern unsigned int end_ftr_fixup_prefix3[]; extern unsigned int ftr_fixup_prefix3_orig[]; extern unsigned int ftr_fixup_prefix3_expected[]; extern unsigned int ftr_fixup_prefix3_alt[]; int size = sizeof(unsigned int) * (end_ftr_fixup_prefix3 - ftr_fixup_prefix3); fixup.value = fixup.mask = 8; fixup.start_off = calc_offset(&fixup, ftr_fixup_prefix3 + 1); fixup.end_off = calc_offset(&fixup, ftr_fixup_prefix3 + 4); fixup.alt_start_off = calc_offset(&fixup, ftr_fixup_prefix3_alt); fixup.alt_end_off = calc_offset(&fixup, ftr_fixup_prefix3_alt + 3); /* Sanity check */ check(memcmp(ftr_fixup_prefix3, ftr_fixup_prefix3_orig, size) == 0); patch_feature_section(0, &fixup); check(memcmp(ftr_fixup_prefix3, ftr_fixup_prefix3_expected, size) == 0); patch_feature_section(0, &fixup); check(memcmp(ftr_fixup_prefix3, ftr_fixup_prefix3_orig, size) != 0); } #else static inline void test_prefix_patching(void) {} static inline void test_prefix_alt_patching(void) {} static inline void test_prefix_word_alt_patching(void) {} #endif /* CONFIG_PPC64 */ static int __init test_feature_fixups(void) { printk(KERN_DEBUG "Running feature fixup self-tests ...\n"); test_basic_patching(); test_alternative_patching(); test_alternative_case_too_big(); test_alternative_case_too_small(); test_alternative_case_with_branch(); test_alternative_case_with_external_branch(); test_alternative_case_with_branch_to_end(); test_cpu_macros(); test_fw_macros(); test_lwsync_macros(); test_prefix_patching(); test_prefix_alt_patching(); test_prefix_word_alt_patching(); return 0; } late_initcall(test_feature_fixups); #endif /* CONFIG_FTR_FIXUP_SELFTEST */
linux-master
arch/powerpc/lib/feature-fixups.c
// SPDX-License-Identifier: GPL-2.0+ #include <linux/error-injection.h> #include <linux/kprobes.h> #include <linux/uaccess.h> void override_function_with_return(struct pt_regs *regs) { /* * Emulate 'blr'. 'regs' represents the state on entry of a predefined * function in the kernel/module, captured on a kprobe. We don't need * to worry about 32-bit userspace on a 64-bit kernel. */ regs_set_return_ip(regs, regs->link); } NOKPROBE_SYMBOL(override_function_with_return);
linux-master
arch/powerpc/lib/error-inject.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * * Copyright (C) IBM Corporation, 2012 * * Author: Anton Blanchard <[email protected]> */ /* * Sparse (as at v0.5.0) gets very, very confused by this file. * Make it a bit simpler for it. */ #if !defined(__CHECKER__) #include <altivec.h> #else #define vec_xor(a, b) a ^ b #define vector __attribute__((vector_size(16))) #endif #include "xor_vmx.h" typedef vector signed char unative_t; #define DEFINE(V) \ unative_t *V = (unative_t *)V##_in; \ unative_t V##_0, V##_1, V##_2, V##_3 #define LOAD(V) \ do { \ V##_0 = V[0]; \ V##_1 = V[1]; \ V##_2 = V[2]; \ V##_3 = V[3]; \ } while (0) #define STORE(V) \ do { \ V[0] = V##_0; \ V[1] = V##_1; \ V[2] = V##_2; \ V[3] = V##_3; \ } while (0) #define XOR(V1, V2) \ do { \ V1##_0 = vec_xor(V1##_0, V2##_0); \ V1##_1 = vec_xor(V1##_1, V2##_1); \ V1##_2 = vec_xor(V1##_2, V2##_2); \ V1##_3 = vec_xor(V1##_3, V2##_3); \ } while (0) void __xor_altivec_2(unsigned long bytes, unsigned long * __restrict v1_in, const unsigned long * __restrict v2_in) { DEFINE(v1); DEFINE(v2); unsigned long lines = bytes / (sizeof(unative_t)) / 4; do { LOAD(v1); LOAD(v2); XOR(v1, v2); STORE(v1); v1 += 4; v2 += 4; } while (--lines > 0); } void __xor_altivec_3(unsigned long bytes, unsigned long * __restrict v1_in, const unsigned long * __restrict v2_in, const unsigned long * __restrict v3_in) { DEFINE(v1); DEFINE(v2); DEFINE(v3); unsigned long lines = bytes / (sizeof(unative_t)) / 4; do { LOAD(v1); LOAD(v2); LOAD(v3); XOR(v1, v2); XOR(v1, v3); STORE(v1); v1 += 4; v2 += 4; v3 += 4; } while (--lines > 0); } void __xor_altivec_4(unsigned long bytes, unsigned long * __restrict v1_in, const unsigned long * __restrict v2_in, const unsigned long * __restrict v3_in, const unsigned long * __restrict v4_in) { DEFINE(v1); DEFINE(v2); DEFINE(v3); DEFINE(v4); unsigned long lines = bytes / (sizeof(unative_t)) / 4; do { LOAD(v1); LOAD(v2); LOAD(v3); LOAD(v4); XOR(v1, v2); XOR(v3, v4); XOR(v1, v3); STORE(v1); v1 += 4; v2 += 4; v3 += 4; v4 += 4; } while (--lines > 0); } void __xor_altivec_5(unsigned long bytes, unsigned long * __restrict v1_in, const unsigned long * __restrict v2_in, const unsigned long * __restrict v3_in, const unsigned long * __restrict v4_in, const unsigned long * __restrict v5_in) { DEFINE(v1); DEFINE(v2); DEFINE(v3); DEFINE(v4); DEFINE(v5); unsigned long lines = bytes / (sizeof(unative_t)) / 4; do { LOAD(v1); LOAD(v2); LOAD(v3); LOAD(v4); LOAD(v5); XOR(v1, v2); XOR(v3, v4); XOR(v1, v5); XOR(v1, v3); STORE(v1); v1 += 4; v2 += 4; v3 += 4; v4 += 4; v5 += 4; } while (--lines > 0); }
linux-master
arch/powerpc/lib/xor_vmx.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Single-step support. * * Copyright (C) 2004 Paul Mackerras <[email protected]>, IBM */ #include <linux/kernel.h> #include <linux/kprobes.h> #include <linux/ptrace.h> #include <linux/prefetch.h> #include <asm/sstep.h> #include <asm/processor.h> #include <linux/uaccess.h> #include <asm/cpu_has_feature.h> #include <asm/cputable.h> #include <asm/disassemble.h> #ifdef CONFIG_PPC64 /* Bits in SRR1 that are copied from MSR */ #define MSR_MASK 0xffffffff87c0ffffUL #else #define MSR_MASK 0x87c0ffff #endif /* Bits in XER */ #define XER_SO 0x80000000U #define XER_OV 0x40000000U #define XER_CA 0x20000000U #define XER_OV32 0x00080000U #define XER_CA32 0x00040000U #ifdef CONFIG_VSX #define VSX_REGISTER_XTP(rd) ((((rd) & 1) << 5) | ((rd) & 0xfe)) #endif #ifdef CONFIG_PPC_FPU /* * Functions in ldstfp.S */ extern void get_fpr(int rn, double *p); extern void put_fpr(int rn, const double *p); extern void get_vr(int rn, __vector128 *p); extern void put_vr(int rn, __vector128 *p); extern void load_vsrn(int vsr, const void *p); extern void store_vsrn(int vsr, void *p); extern void conv_sp_to_dp(const float *sp, double *dp); extern void conv_dp_to_sp(const double *dp, float *sp); #endif #ifdef __powerpc64__ /* * Functions in quad.S */ extern int do_lq(unsigned long ea, unsigned long *regs); extern int do_stq(unsigned long ea, unsigned long val0, unsigned long val1); extern int do_lqarx(unsigned long ea, unsigned long *regs); extern int do_stqcx(unsigned long ea, unsigned long val0, unsigned long val1, unsigned int *crp); #endif #ifdef __LITTLE_ENDIAN__ #define IS_LE 1 #define IS_BE 0 #else #define IS_LE 0 #define IS_BE 1 #endif /* * Emulate the truncation of 64 bit values in 32-bit mode. */ static nokprobe_inline unsigned long truncate_if_32bit(unsigned long msr, unsigned long val) { if ((msr & MSR_64BIT) == 0) val &= 0xffffffffUL; return val; } /* * Determine whether a conditional branch instruction would branch. */ static nokprobe_inline int branch_taken(unsigned int instr, const struct pt_regs *regs, struct instruction_op *op) { unsigned int bo = (instr >> 21) & 0x1f; unsigned int bi; if ((bo & 4) == 0) { /* decrement counter */ op->type |= DECCTR; if (((bo >> 1) & 1) ^ (regs->ctr == 1)) return 0; } if ((bo & 0x10) == 0) { /* check bit from CR */ bi = (instr >> 16) & 0x1f; if (((regs->ccr >> (31 - bi)) & 1) != ((bo >> 3) & 1)) return 0; } return 1; } static nokprobe_inline long address_ok(struct pt_regs *regs, unsigned long ea, int nb) { if (!user_mode(regs)) return 1; if (access_ok((void __user *)ea, nb)) return 1; if (access_ok((void __user *)ea, 1)) /* Access overlaps the end of the user region */ regs->dar = TASK_SIZE_MAX - 1; else regs->dar = ea; return 0; } /* * Calculate effective address for a D-form instruction */ static nokprobe_inline unsigned long dform_ea(unsigned int instr, const struct pt_regs *regs) { int ra; unsigned long ea; ra = (instr >> 16) & 0x1f; ea = (signed short) instr; /* sign-extend */ if (ra) ea += regs->gpr[ra]; return ea; } #ifdef __powerpc64__ /* * Calculate effective address for a DS-form instruction */ static nokprobe_inline unsigned long dsform_ea(unsigned int instr, const struct pt_regs *regs) { int ra; unsigned long ea; ra = (instr >> 16) & 0x1f; ea = (signed short) (instr & ~3); /* sign-extend */ if (ra) ea += regs->gpr[ra]; return ea; } /* * Calculate effective address for a DQ-form instruction */ static nokprobe_inline unsigned long dqform_ea(unsigned int instr, const struct pt_regs *regs) { int ra; unsigned long ea; ra = (instr >> 16) & 0x1f; ea = (signed short) (instr & ~0xf); /* sign-extend */ if (ra) ea += regs->gpr[ra]; return ea; } #endif /* __powerpc64 */ /* * Calculate effective address for an X-form instruction */ static nokprobe_inline unsigned long xform_ea(unsigned int instr, const struct pt_regs *regs) { int ra, rb; unsigned long ea; ra = (instr >> 16) & 0x1f; rb = (instr >> 11) & 0x1f; ea = regs->gpr[rb]; if (ra) ea += regs->gpr[ra]; return ea; } /* * Calculate effective address for a MLS:D-form / 8LS:D-form * prefixed instruction */ static nokprobe_inline unsigned long mlsd_8lsd_ea(unsigned int instr, unsigned int suffix, const struct pt_regs *regs) { int ra, prefix_r; unsigned int dd; unsigned long ea, d0, d1, d; prefix_r = GET_PREFIX_R(instr); ra = GET_PREFIX_RA(suffix); d0 = instr & 0x3ffff; d1 = suffix & 0xffff; d = (d0 << 16) | d1; /* * sign extend a 34 bit number */ dd = (unsigned int)(d >> 2); ea = (signed int)dd; ea = (ea << 2) | (d & 0x3); if (!prefix_r && ra) ea += regs->gpr[ra]; else if (!prefix_r && !ra) ; /* Leave ea as is */ else if (prefix_r) ea += regs->nip; /* * (prefix_r && ra) is an invalid form. Should already be * checked for by caller! */ return ea; } /* * Return the largest power of 2, not greater than sizeof(unsigned long), * such that x is a multiple of it. */ static nokprobe_inline unsigned long max_align(unsigned long x) { x |= sizeof(unsigned long); return x & -x; /* isolates rightmost bit */ } static nokprobe_inline unsigned long byterev_2(unsigned long x) { return ((x >> 8) & 0xff) | ((x & 0xff) << 8); } static nokprobe_inline unsigned long byterev_4(unsigned long x) { return ((x >> 24) & 0xff) | ((x >> 8) & 0xff00) | ((x & 0xff00) << 8) | ((x & 0xff) << 24); } #ifdef __powerpc64__ static nokprobe_inline unsigned long byterev_8(unsigned long x) { return (byterev_4(x) << 32) | byterev_4(x >> 32); } #endif static nokprobe_inline void do_byte_reverse(void *ptr, int nb) { switch (nb) { case 2: *(u16 *)ptr = byterev_2(*(u16 *)ptr); break; case 4: *(u32 *)ptr = byterev_4(*(u32 *)ptr); break; #ifdef __powerpc64__ case 8: *(unsigned long *)ptr = byterev_8(*(unsigned long *)ptr); break; case 16: { unsigned long *up = (unsigned long *)ptr; unsigned long tmp; tmp = byterev_8(up[0]); up[0] = byterev_8(up[1]); up[1] = tmp; break; } case 32: { unsigned long *up = (unsigned long *)ptr; unsigned long tmp; tmp = byterev_8(up[0]); up[0] = byterev_8(up[3]); up[3] = tmp; tmp = byterev_8(up[2]); up[2] = byterev_8(up[1]); up[1] = tmp; break; } #endif default: WARN_ON_ONCE(1); } } static __always_inline int __read_mem_aligned(unsigned long *dest, unsigned long ea, int nb, struct pt_regs *regs) { unsigned long x = 0; switch (nb) { case 1: unsafe_get_user(x, (unsigned char __user *)ea, Efault); break; case 2: unsafe_get_user(x, (unsigned short __user *)ea, Efault); break; case 4: unsafe_get_user(x, (unsigned int __user *)ea, Efault); break; #ifdef __powerpc64__ case 8: unsafe_get_user(x, (unsigned long __user *)ea, Efault); break; #endif } *dest = x; return 0; Efault: regs->dar = ea; return -EFAULT; } static nokprobe_inline int read_mem_aligned(unsigned long *dest, unsigned long ea, int nb, struct pt_regs *regs) { int err; if (is_kernel_addr(ea)) return __read_mem_aligned(dest, ea, nb, regs); if (user_read_access_begin((void __user *)ea, nb)) { err = __read_mem_aligned(dest, ea, nb, regs); user_read_access_end(); } else { err = -EFAULT; regs->dar = ea; } return err; } /* * Copy from userspace to a buffer, using the largest possible * aligned accesses, up to sizeof(long). */ static __always_inline int __copy_mem_in(u8 *dest, unsigned long ea, int nb, struct pt_regs *regs) { int c; for (; nb > 0; nb -= c) { c = max_align(ea); if (c > nb) c = max_align(nb); switch (c) { case 1: unsafe_get_user(*dest, (u8 __user *)ea, Efault); break; case 2: unsafe_get_user(*(u16 *)dest, (u16 __user *)ea, Efault); break; case 4: unsafe_get_user(*(u32 *)dest, (u32 __user *)ea, Efault); break; #ifdef __powerpc64__ case 8: unsafe_get_user(*(u64 *)dest, (u64 __user *)ea, Efault); break; #endif } dest += c; ea += c; } return 0; Efault: regs->dar = ea; return -EFAULT; } static nokprobe_inline int copy_mem_in(u8 *dest, unsigned long ea, int nb, struct pt_regs *regs) { int err; if (is_kernel_addr(ea)) return __copy_mem_in(dest, ea, nb, regs); if (user_read_access_begin((void __user *)ea, nb)) { err = __copy_mem_in(dest, ea, nb, regs); user_read_access_end(); } else { err = -EFAULT; regs->dar = ea; } return err; } static nokprobe_inline int read_mem_unaligned(unsigned long *dest, unsigned long ea, int nb, struct pt_regs *regs) { union { unsigned long ul; u8 b[sizeof(unsigned long)]; } u; int i; int err; u.ul = 0; i = IS_BE ? sizeof(unsigned long) - nb : 0; err = copy_mem_in(&u.b[i], ea, nb, regs); if (!err) *dest = u.ul; return err; } /* * Read memory at address ea for nb bytes, return 0 for success * or -EFAULT if an error occurred. N.B. nb must be 1, 2, 4 or 8. * If nb < sizeof(long), the result is right-justified on BE systems. */ static int read_mem(unsigned long *dest, unsigned long ea, int nb, struct pt_regs *regs) { if (!address_ok(regs, ea, nb)) return -EFAULT; if ((ea & (nb - 1)) == 0) return read_mem_aligned(dest, ea, nb, regs); return read_mem_unaligned(dest, ea, nb, regs); } NOKPROBE_SYMBOL(read_mem); static __always_inline int __write_mem_aligned(unsigned long val, unsigned long ea, int nb, struct pt_regs *regs) { switch (nb) { case 1: unsafe_put_user(val, (unsigned char __user *)ea, Efault); break; case 2: unsafe_put_user(val, (unsigned short __user *)ea, Efault); break; case 4: unsafe_put_user(val, (unsigned int __user *)ea, Efault); break; #ifdef __powerpc64__ case 8: unsafe_put_user(val, (unsigned long __user *)ea, Efault); break; #endif } return 0; Efault: regs->dar = ea; return -EFAULT; } static nokprobe_inline int write_mem_aligned(unsigned long val, unsigned long ea, int nb, struct pt_regs *regs) { int err; if (is_kernel_addr(ea)) return __write_mem_aligned(val, ea, nb, regs); if (user_write_access_begin((void __user *)ea, nb)) { err = __write_mem_aligned(val, ea, nb, regs); user_write_access_end(); } else { err = -EFAULT; regs->dar = ea; } return err; } /* * Copy from a buffer to userspace, using the largest possible * aligned accesses, up to sizeof(long). */ static __always_inline int __copy_mem_out(u8 *dest, unsigned long ea, int nb, struct pt_regs *regs) { int c; for (; nb > 0; nb -= c) { c = max_align(ea); if (c > nb) c = max_align(nb); switch (c) { case 1: unsafe_put_user(*dest, (u8 __user *)ea, Efault); break; case 2: unsafe_put_user(*(u16 *)dest, (u16 __user *)ea, Efault); break; case 4: unsafe_put_user(*(u32 *)dest, (u32 __user *)ea, Efault); break; #ifdef __powerpc64__ case 8: unsafe_put_user(*(u64 *)dest, (u64 __user *)ea, Efault); break; #endif } dest += c; ea += c; } return 0; Efault: regs->dar = ea; return -EFAULT; } static nokprobe_inline int copy_mem_out(u8 *dest, unsigned long ea, int nb, struct pt_regs *regs) { int err; if (is_kernel_addr(ea)) return __copy_mem_out(dest, ea, nb, regs); if (user_write_access_begin((void __user *)ea, nb)) { err = __copy_mem_out(dest, ea, nb, regs); user_write_access_end(); } else { err = -EFAULT; regs->dar = ea; } return err; } static nokprobe_inline int write_mem_unaligned(unsigned long val, unsigned long ea, int nb, struct pt_regs *regs) { union { unsigned long ul; u8 b[sizeof(unsigned long)]; } u; int i; u.ul = val; i = IS_BE ? sizeof(unsigned long) - nb : 0; return copy_mem_out(&u.b[i], ea, nb, regs); } /* * Write memory at address ea for nb bytes, return 0 for success * or -EFAULT if an error occurred. N.B. nb must be 1, 2, 4 or 8. */ static int write_mem(unsigned long val, unsigned long ea, int nb, struct pt_regs *regs) { if (!address_ok(regs, ea, nb)) return -EFAULT; if ((ea & (nb - 1)) == 0) return write_mem_aligned(val, ea, nb, regs); return write_mem_unaligned(val, ea, nb, regs); } NOKPROBE_SYMBOL(write_mem); #ifdef CONFIG_PPC_FPU /* * These access either the real FP register or the image in the * thread_struct, depending on regs->msr & MSR_FP. */ static int do_fp_load(struct instruction_op *op, unsigned long ea, struct pt_regs *regs, bool cross_endian) { int err, rn, nb; union { int i; unsigned int u; float f; double d[2]; unsigned long l[2]; u8 b[2 * sizeof(double)]; } u; nb = GETSIZE(op->type); if (!address_ok(regs, ea, nb)) return -EFAULT; rn = op->reg; err = copy_mem_in(u.b, ea, nb, regs); if (err) return err; if (unlikely(cross_endian)) { do_byte_reverse(u.b, min(nb, 8)); if (nb == 16) do_byte_reverse(&u.b[8], 8); } preempt_disable(); if (nb == 4) { if (op->type & FPCONV) conv_sp_to_dp(&u.f, &u.d[0]); else if (op->type & SIGNEXT) u.l[0] = u.i; else u.l[0] = u.u; } if (regs->msr & MSR_FP) put_fpr(rn, &u.d[0]); else current->thread.TS_FPR(rn) = u.l[0]; if (nb == 16) { /* lfdp */ rn |= 1; if (regs->msr & MSR_FP) put_fpr(rn, &u.d[1]); else current->thread.TS_FPR(rn) = u.l[1]; } preempt_enable(); return 0; } NOKPROBE_SYMBOL(do_fp_load); static int do_fp_store(struct instruction_op *op, unsigned long ea, struct pt_regs *regs, bool cross_endian) { int rn, nb; union { unsigned int u; float f; double d[2]; unsigned long l[2]; u8 b[2 * sizeof(double)]; } u; nb = GETSIZE(op->type); if (!address_ok(regs, ea, nb)) return -EFAULT; rn = op->reg; preempt_disable(); if (regs->msr & MSR_FP) get_fpr(rn, &u.d[0]); else u.l[0] = current->thread.TS_FPR(rn); if (nb == 4) { if (op->type & FPCONV) conv_dp_to_sp(&u.d[0], &u.f); else u.u = u.l[0]; } if (nb == 16) { rn |= 1; if (regs->msr & MSR_FP) get_fpr(rn, &u.d[1]); else u.l[1] = current->thread.TS_FPR(rn); } preempt_enable(); if (unlikely(cross_endian)) { do_byte_reverse(u.b, min(nb, 8)); if (nb == 16) do_byte_reverse(&u.b[8], 8); } return copy_mem_out(u.b, ea, nb, regs); } NOKPROBE_SYMBOL(do_fp_store); #endif #ifdef CONFIG_ALTIVEC /* For Altivec/VMX, no need to worry about alignment */ static nokprobe_inline int do_vec_load(int rn, unsigned long ea, int size, struct pt_regs *regs, bool cross_endian) { int err; union { __vector128 v; u8 b[sizeof(__vector128)]; } u = {}; if (!address_ok(regs, ea & ~0xfUL, 16)) return -EFAULT; /* align to multiple of size */ ea &= ~(size - 1); err = copy_mem_in(&u.b[ea & 0xf], ea, size, regs); if (err) return err; if (unlikely(cross_endian)) do_byte_reverse(&u.b[ea & 0xf], size); preempt_disable(); if (regs->msr & MSR_VEC) put_vr(rn, &u.v); else current->thread.vr_state.vr[rn] = u.v; preempt_enable(); return 0; } static nokprobe_inline int do_vec_store(int rn, unsigned long ea, int size, struct pt_regs *regs, bool cross_endian) { union { __vector128 v; u8 b[sizeof(__vector128)]; } u; if (!address_ok(regs, ea & ~0xfUL, 16)) return -EFAULT; /* align to multiple of size */ ea &= ~(size - 1); preempt_disable(); if (regs->msr & MSR_VEC) get_vr(rn, &u.v); else u.v = current->thread.vr_state.vr[rn]; preempt_enable(); if (unlikely(cross_endian)) do_byte_reverse(&u.b[ea & 0xf], size); return copy_mem_out(&u.b[ea & 0xf], ea, size, regs); } #endif /* CONFIG_ALTIVEC */ #ifdef __powerpc64__ static nokprobe_inline int emulate_lq(struct pt_regs *regs, unsigned long ea, int reg, bool cross_endian) { int err; if (!address_ok(regs, ea, 16)) return -EFAULT; /* if aligned, should be atomic */ if ((ea & 0xf) == 0) { err = do_lq(ea, &regs->gpr[reg]); } else { err = read_mem(&regs->gpr[reg + IS_LE], ea, 8, regs); if (!err) err = read_mem(&regs->gpr[reg + IS_BE], ea + 8, 8, regs); } if (!err && unlikely(cross_endian)) do_byte_reverse(&regs->gpr[reg], 16); return err; } static nokprobe_inline int emulate_stq(struct pt_regs *regs, unsigned long ea, int reg, bool cross_endian) { int err; unsigned long vals[2]; if (!address_ok(regs, ea, 16)) return -EFAULT; vals[0] = regs->gpr[reg]; vals[1] = regs->gpr[reg + 1]; if (unlikely(cross_endian)) do_byte_reverse(vals, 16); /* if aligned, should be atomic */ if ((ea & 0xf) == 0) return do_stq(ea, vals[0], vals[1]); err = write_mem(vals[IS_LE], ea, 8, regs); if (!err) err = write_mem(vals[IS_BE], ea + 8, 8, regs); return err; } #endif /* __powerpc64 */ #ifdef CONFIG_VSX void emulate_vsx_load(struct instruction_op *op, union vsx_reg *reg, const void *mem, bool rev) { int size, read_size; int i, j; const unsigned int *wp; const unsigned short *hp; const unsigned char *bp; size = GETSIZE(op->type); reg->d[0] = reg->d[1] = 0; switch (op->element_size) { case 32: /* [p]lxvp[x] */ case 16: /* whole vector; lxv[x] or lxvl[l] */ if (size == 0) break; memcpy(reg, mem, size); if (IS_LE && (op->vsx_flags & VSX_LDLEFT)) rev = !rev; if (rev) do_byte_reverse(reg, size); break; case 8: /* scalar loads, lxvd2x, lxvdsx */ read_size = (size >= 8) ? 8 : size; i = IS_LE ? 8 : 8 - read_size; memcpy(&reg->b[i], mem, read_size); if (rev) do_byte_reverse(&reg->b[i], 8); if (size < 8) { if (op->type & SIGNEXT) { /* size == 4 is the only case here */ reg->d[IS_LE] = (signed int) reg->d[IS_LE]; } else if (op->vsx_flags & VSX_FPCONV) { preempt_disable(); conv_sp_to_dp(&reg->fp[1 + IS_LE], &reg->dp[IS_LE]); preempt_enable(); } } else { if (size == 16) { unsigned long v = *(unsigned long *)(mem + 8); reg->d[IS_BE] = !rev ? v : byterev_8(v); } else if (op->vsx_flags & VSX_SPLAT) reg->d[IS_BE] = reg->d[IS_LE]; } break; case 4: /* lxvw4x, lxvwsx */ wp = mem; for (j = 0; j < size / 4; ++j) { i = IS_LE ? 3 - j : j; reg->w[i] = !rev ? *wp++ : byterev_4(*wp++); } if (op->vsx_flags & VSX_SPLAT) { u32 val = reg->w[IS_LE ? 3 : 0]; for (; j < 4; ++j) { i = IS_LE ? 3 - j : j; reg->w[i] = val; } } break; case 2: /* lxvh8x */ hp = mem; for (j = 0; j < size / 2; ++j) { i = IS_LE ? 7 - j : j; reg->h[i] = !rev ? *hp++ : byterev_2(*hp++); } break; case 1: /* lxvb16x */ bp = mem; for (j = 0; j < size; ++j) { i = IS_LE ? 15 - j : j; reg->b[i] = *bp++; } break; } } EXPORT_SYMBOL_GPL(emulate_vsx_load); NOKPROBE_SYMBOL(emulate_vsx_load); void emulate_vsx_store(struct instruction_op *op, const union vsx_reg *reg, void *mem, bool rev) { int size, write_size; int i, j; union vsx_reg buf; unsigned int *wp; unsigned short *hp; unsigned char *bp; size = GETSIZE(op->type); switch (op->element_size) { case 32: /* [p]stxvp[x] */ if (size == 0) break; if (rev) { /* reverse 32 bytes */ union vsx_reg buf32[2]; buf32[0].d[0] = byterev_8(reg[1].d[1]); buf32[0].d[1] = byterev_8(reg[1].d[0]); buf32[1].d[0] = byterev_8(reg[0].d[1]); buf32[1].d[1] = byterev_8(reg[0].d[0]); memcpy(mem, buf32, size); } else { memcpy(mem, reg, size); } break; case 16: /* stxv, stxvx, stxvl, stxvll */ if (size == 0) break; if (IS_LE && (op->vsx_flags & VSX_LDLEFT)) rev = !rev; if (rev) { /* reverse 16 bytes */ buf.d[0] = byterev_8(reg->d[1]); buf.d[1] = byterev_8(reg->d[0]); reg = &buf; } memcpy(mem, reg, size); break; case 8: /* scalar stores, stxvd2x */ write_size = (size >= 8) ? 8 : size; i = IS_LE ? 8 : 8 - write_size; if (size < 8 && op->vsx_flags & VSX_FPCONV) { buf.d[0] = buf.d[1] = 0; preempt_disable(); conv_dp_to_sp(&reg->dp[IS_LE], &buf.fp[1 + IS_LE]); preempt_enable(); reg = &buf; } memcpy(mem, &reg->b[i], write_size); if (size == 16) memcpy(mem + 8, &reg->d[IS_BE], 8); if (unlikely(rev)) { do_byte_reverse(mem, write_size); if (size == 16) do_byte_reverse(mem + 8, 8); } break; case 4: /* stxvw4x */ wp = mem; for (j = 0; j < size / 4; ++j) { i = IS_LE ? 3 - j : j; *wp++ = !rev ? reg->w[i] : byterev_4(reg->w[i]); } break; case 2: /* stxvh8x */ hp = mem; for (j = 0; j < size / 2; ++j) { i = IS_LE ? 7 - j : j; *hp++ = !rev ? reg->h[i] : byterev_2(reg->h[i]); } break; case 1: /* stvxb16x */ bp = mem; for (j = 0; j < size; ++j) { i = IS_LE ? 15 - j : j; *bp++ = reg->b[i]; } break; } } EXPORT_SYMBOL_GPL(emulate_vsx_store); NOKPROBE_SYMBOL(emulate_vsx_store); static nokprobe_inline int do_vsx_load(struct instruction_op *op, unsigned long ea, struct pt_regs *regs, bool cross_endian) { int reg = op->reg; int i, j, nr_vsx_regs; u8 mem[32]; union vsx_reg buf[2]; int size = GETSIZE(op->type); if (!address_ok(regs, ea, size) || copy_mem_in(mem, ea, size, regs)) return -EFAULT; nr_vsx_regs = max(1ul, size / sizeof(__vector128)); emulate_vsx_load(op, buf, mem, cross_endian); preempt_disable(); if (reg < 32) { /* FP regs + extensions */ if (regs->msr & MSR_FP) { for (i = 0; i < nr_vsx_regs; i++) { j = IS_LE ? nr_vsx_regs - i - 1 : i; load_vsrn(reg + i, &buf[j].v); } } else { for (i = 0; i < nr_vsx_regs; i++) { j = IS_LE ? nr_vsx_regs - i - 1 : i; current->thread.fp_state.fpr[reg + i][0] = buf[j].d[0]; current->thread.fp_state.fpr[reg + i][1] = buf[j].d[1]; } } } else { if (regs->msr & MSR_VEC) { for (i = 0; i < nr_vsx_regs; i++) { j = IS_LE ? nr_vsx_regs - i - 1 : i; load_vsrn(reg + i, &buf[j].v); } } else { for (i = 0; i < nr_vsx_regs; i++) { j = IS_LE ? nr_vsx_regs - i - 1 : i; current->thread.vr_state.vr[reg - 32 + i] = buf[j].v; } } } preempt_enable(); return 0; } static nokprobe_inline int do_vsx_store(struct instruction_op *op, unsigned long ea, struct pt_regs *regs, bool cross_endian) { int reg = op->reg; int i, j, nr_vsx_regs; u8 mem[32]; union vsx_reg buf[2]; int size = GETSIZE(op->type); if (!address_ok(regs, ea, size)) return -EFAULT; nr_vsx_regs = max(1ul, size / sizeof(__vector128)); preempt_disable(); if (reg < 32) { /* FP regs + extensions */ if (regs->msr & MSR_FP) { for (i = 0; i < nr_vsx_regs; i++) { j = IS_LE ? nr_vsx_regs - i - 1 : i; store_vsrn(reg + i, &buf[j].v); } } else { for (i = 0; i < nr_vsx_regs; i++) { j = IS_LE ? nr_vsx_regs - i - 1 : i; buf[j].d[0] = current->thread.fp_state.fpr[reg + i][0]; buf[j].d[1] = current->thread.fp_state.fpr[reg + i][1]; } } } else { if (regs->msr & MSR_VEC) { for (i = 0; i < nr_vsx_regs; i++) { j = IS_LE ? nr_vsx_regs - i - 1 : i; store_vsrn(reg + i, &buf[j].v); } } else { for (i = 0; i < nr_vsx_regs; i++) { j = IS_LE ? nr_vsx_regs - i - 1 : i; buf[j].v = current->thread.vr_state.vr[reg - 32 + i]; } } } preempt_enable(); emulate_vsx_store(op, buf, mem, cross_endian); return copy_mem_out(mem, ea, size, regs); } #endif /* CONFIG_VSX */ static __always_inline int __emulate_dcbz(unsigned long ea) { unsigned long i; unsigned long size = l1_dcache_bytes(); for (i = 0; i < size; i += sizeof(long)) unsafe_put_user(0, (unsigned long __user *)(ea + i), Efault); return 0; Efault: return -EFAULT; } int emulate_dcbz(unsigned long ea, struct pt_regs *regs) { int err; unsigned long size = l1_dcache_bytes(); ea = truncate_if_32bit(regs->msr, ea); ea &= ~(size - 1); if (!address_ok(regs, ea, size)) return -EFAULT; if (is_kernel_addr(ea)) { err = __emulate_dcbz(ea); } else if (user_write_access_begin((void __user *)ea, size)) { err = __emulate_dcbz(ea); user_write_access_end(); } else { err = -EFAULT; } if (err) regs->dar = ea; return err; } NOKPROBE_SYMBOL(emulate_dcbz); #define __put_user_asmx(x, addr, err, op, cr) \ __asm__ __volatile__( \ ".machine push\n" \ ".machine power8\n" \ "1: " op " %2,0,%3\n" \ ".machine pop\n" \ " mfcr %1\n" \ "2:\n" \ ".section .fixup,\"ax\"\n" \ "3: li %0,%4\n" \ " b 2b\n" \ ".previous\n" \ EX_TABLE(1b, 3b) \ : "=r" (err), "=r" (cr) \ : "r" (x), "r" (addr), "i" (-EFAULT), "0" (err)) #define __get_user_asmx(x, addr, err, op) \ __asm__ __volatile__( \ ".machine push\n" \ ".machine power8\n" \ "1: "op" %1,0,%2\n" \ ".machine pop\n" \ "2:\n" \ ".section .fixup,\"ax\"\n" \ "3: li %0,%3\n" \ " b 2b\n" \ ".previous\n" \ EX_TABLE(1b, 3b) \ : "=r" (err), "=r" (x) \ : "r" (addr), "i" (-EFAULT), "0" (err)) #define __cacheop_user_asmx(addr, err, op) \ __asm__ __volatile__( \ "1: "op" 0,%1\n" \ "2:\n" \ ".section .fixup,\"ax\"\n" \ "3: li %0,%3\n" \ " b 2b\n" \ ".previous\n" \ EX_TABLE(1b, 3b) \ : "=r" (err) \ : "r" (addr), "i" (-EFAULT), "0" (err)) static nokprobe_inline void set_cr0(const struct pt_regs *regs, struct instruction_op *op) { long val = op->val; op->type |= SETCC; op->ccval = (regs->ccr & 0x0fffffff) | ((regs->xer >> 3) & 0x10000000); if (!(regs->msr & MSR_64BIT)) val = (int) val; if (val < 0) op->ccval |= 0x80000000; else if (val > 0) op->ccval |= 0x40000000; else op->ccval |= 0x20000000; } static nokprobe_inline void set_ca32(struct instruction_op *op, bool val) { if (cpu_has_feature(CPU_FTR_ARCH_300)) { if (val) op->xerval |= XER_CA32; else op->xerval &= ~XER_CA32; } } static nokprobe_inline void add_with_carry(const struct pt_regs *regs, struct instruction_op *op, int rd, unsigned long val1, unsigned long val2, unsigned long carry_in) { unsigned long val = val1 + val2; if (carry_in) ++val; op->type = COMPUTE | SETREG | SETXER; op->reg = rd; op->val = val; val = truncate_if_32bit(regs->msr, val); val1 = truncate_if_32bit(regs->msr, val1); op->xerval = regs->xer; if (val < val1 || (carry_in && val == val1)) op->xerval |= XER_CA; else op->xerval &= ~XER_CA; set_ca32(op, (unsigned int)val < (unsigned int)val1 || (carry_in && (unsigned int)val == (unsigned int)val1)); } static nokprobe_inline void do_cmp_signed(const struct pt_regs *regs, struct instruction_op *op, long v1, long v2, int crfld) { unsigned int crval, shift; op->type = COMPUTE | SETCC; crval = (regs->xer >> 31) & 1; /* get SO bit */ if (v1 < v2) crval |= 8; else if (v1 > v2) crval |= 4; else crval |= 2; shift = (7 - crfld) * 4; op->ccval = (regs->ccr & ~(0xf << shift)) | (crval << shift); } static nokprobe_inline void do_cmp_unsigned(const struct pt_regs *regs, struct instruction_op *op, unsigned long v1, unsigned long v2, int crfld) { unsigned int crval, shift; op->type = COMPUTE | SETCC; crval = (regs->xer >> 31) & 1; /* get SO bit */ if (v1 < v2) crval |= 8; else if (v1 > v2) crval |= 4; else crval |= 2; shift = (7 - crfld) * 4; op->ccval = (regs->ccr & ~(0xf << shift)) | (crval << shift); } static nokprobe_inline void do_cmpb(const struct pt_regs *regs, struct instruction_op *op, unsigned long v1, unsigned long v2) { unsigned long long out_val, mask; int i; out_val = 0; for (i = 0; i < 8; i++) { mask = 0xffUL << (i * 8); if ((v1 & mask) == (v2 & mask)) out_val |= mask; } op->val = out_val; } /* * The size parameter is used to adjust the equivalent popcnt instruction. * popcntb = 8, popcntw = 32, popcntd = 64 */ static nokprobe_inline void do_popcnt(const struct pt_regs *regs, struct instruction_op *op, unsigned long v1, int size) { unsigned long long out = v1; out -= (out >> 1) & 0x5555555555555555ULL; out = (0x3333333333333333ULL & out) + (0x3333333333333333ULL & (out >> 2)); out = (out + (out >> 4)) & 0x0f0f0f0f0f0f0f0fULL; if (size == 8) { /* popcntb */ op->val = out; return; } out += out >> 8; out += out >> 16; if (size == 32) { /* popcntw */ op->val = out & 0x0000003f0000003fULL; return; } out = (out + (out >> 32)) & 0x7f; op->val = out; /* popcntd */ } #ifdef CONFIG_PPC64 static nokprobe_inline void do_bpermd(const struct pt_regs *regs, struct instruction_op *op, unsigned long v1, unsigned long v2) { unsigned char perm, idx; unsigned int i; perm = 0; for (i = 0; i < 8; i++) { idx = (v1 >> (i * 8)) & 0xff; if (idx < 64) if (v2 & PPC_BIT(idx)) perm |= 1 << i; } op->val = perm; } #endif /* CONFIG_PPC64 */ /* * The size parameter adjusts the equivalent prty instruction. * prtyw = 32, prtyd = 64 */ static nokprobe_inline void do_prty(const struct pt_regs *regs, struct instruction_op *op, unsigned long v, int size) { unsigned long long res = v ^ (v >> 8); res ^= res >> 16; if (size == 32) { /* prtyw */ op->val = res & 0x0000000100000001ULL; return; } res ^= res >> 32; op->val = res & 1; /*prtyd */ } static nokprobe_inline int trap_compare(long v1, long v2) { int ret = 0; if (v1 < v2) ret |= 0x10; else if (v1 > v2) ret |= 0x08; else ret |= 0x04; if ((unsigned long)v1 < (unsigned long)v2) ret |= 0x02; else if ((unsigned long)v1 > (unsigned long)v2) ret |= 0x01; return ret; } /* * Elements of 32-bit rotate and mask instructions. */ #define MASK32(mb, me) ((0xffffffffUL >> (mb)) + \ ((signed long)-0x80000000L >> (me)) + ((me) >= (mb))) #ifdef __powerpc64__ #define MASK64_L(mb) (~0UL >> (mb)) #define MASK64_R(me) ((signed long)-0x8000000000000000L >> (me)) #define MASK64(mb, me) (MASK64_L(mb) + MASK64_R(me) + ((me) >= (mb))) #define DATA32(x) (((x) & 0xffffffffUL) | (((x) & 0xffffffffUL) << 32)) #else #define DATA32(x) (x) #endif #define ROTATE(x, n) ((n) ? (((x) << (n)) | ((x) >> (8 * sizeof(long) - (n)))) : (x)) /* * Decode an instruction, and return information about it in *op * without changing *regs. * Integer arithmetic and logical instructions, branches, and barrier * instructions can be emulated just using the information in *op. * * Return value is 1 if the instruction can be emulated just by * updating *regs with the information in *op, -1 if we need the * GPRs but *regs doesn't contain the full register set, or 0 * otherwise. */ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs, ppc_inst_t instr) { #ifdef CONFIG_PPC64 unsigned int suffixopcode, prefixtype, prefix_r; #endif unsigned int opcode, ra, rb, rc, rd, spr, u; unsigned long int imm; unsigned long int val, val2; unsigned int mb, me, sh; unsigned int word, suffix; long ival; word = ppc_inst_val(instr); suffix = ppc_inst_suffix(instr); op->type = COMPUTE; opcode = ppc_inst_primary_opcode(instr); switch (opcode) { case 16: /* bc */ op->type = BRANCH; imm = (signed short)(word & 0xfffc); if ((word & 2) == 0) imm += regs->nip; op->val = truncate_if_32bit(regs->msr, imm); if (word & 1) op->type |= SETLK; if (branch_taken(word, regs, op)) op->type |= BRTAKEN; return 1; case 17: /* sc */ if ((word & 0xfe2) == 2) op->type = SYSCALL; else if (IS_ENABLED(CONFIG_PPC_BOOK3S_64) && (word & 0xfe3) == 1) { /* scv */ op->type = SYSCALL_VECTORED_0; if (!cpu_has_feature(CPU_FTR_ARCH_300)) goto unknown_opcode; } else op->type = UNKNOWN; return 0; case 18: /* b */ op->type = BRANCH | BRTAKEN; imm = word & 0x03fffffc; if (imm & 0x02000000) imm -= 0x04000000; if ((word & 2) == 0) imm += regs->nip; op->val = truncate_if_32bit(regs->msr, imm); if (word & 1) op->type |= SETLK; return 1; case 19: switch ((word >> 1) & 0x3ff) { case 0: /* mcrf */ op->type = COMPUTE + SETCC; rd = 7 - ((word >> 23) & 0x7); ra = 7 - ((word >> 18) & 0x7); rd *= 4; ra *= 4; val = (regs->ccr >> ra) & 0xf; op->ccval = (regs->ccr & ~(0xfUL << rd)) | (val << rd); return 1; case 16: /* bclr */ case 528: /* bcctr */ op->type = BRANCH; imm = (word & 0x400)? regs->ctr: regs->link; op->val = truncate_if_32bit(regs->msr, imm); if (word & 1) op->type |= SETLK; if (branch_taken(word, regs, op)) op->type |= BRTAKEN; return 1; case 18: /* rfid, scary */ if (regs->msr & MSR_PR) goto priv; op->type = RFI; return 0; case 150: /* isync */ op->type = BARRIER | BARRIER_ISYNC; return 1; case 33: /* crnor */ case 129: /* crandc */ case 193: /* crxor */ case 225: /* crnand */ case 257: /* crand */ case 289: /* creqv */ case 417: /* crorc */ case 449: /* cror */ op->type = COMPUTE + SETCC; ra = (word >> 16) & 0x1f; rb = (word >> 11) & 0x1f; rd = (word >> 21) & 0x1f; ra = (regs->ccr >> (31 - ra)) & 1; rb = (regs->ccr >> (31 - rb)) & 1; val = (word >> (6 + ra * 2 + rb)) & 1; op->ccval = (regs->ccr & ~(1UL << (31 - rd))) | (val << (31 - rd)); return 1; } break; case 31: switch ((word >> 1) & 0x3ff) { case 598: /* sync */ op->type = BARRIER + BARRIER_SYNC; #ifdef __powerpc64__ switch ((word >> 21) & 3) { case 1: /* lwsync */ op->type = BARRIER + BARRIER_LWSYNC; break; case 2: /* ptesync */ op->type = BARRIER + BARRIER_PTESYNC; break; } #endif return 1; case 854: /* eieio */ op->type = BARRIER + BARRIER_EIEIO; return 1; } break; } rd = (word >> 21) & 0x1f; ra = (word >> 16) & 0x1f; rb = (word >> 11) & 0x1f; rc = (word >> 6) & 0x1f; switch (opcode) { #ifdef __powerpc64__ case 1: if (!cpu_has_feature(CPU_FTR_ARCH_31)) goto unknown_opcode; prefix_r = GET_PREFIX_R(word); ra = GET_PREFIX_RA(suffix); rd = (suffix >> 21) & 0x1f; op->reg = rd; op->val = regs->gpr[rd]; suffixopcode = get_op(suffix); prefixtype = (word >> 24) & 0x3; switch (prefixtype) { case 2: if (prefix_r && ra) return 0; switch (suffixopcode) { case 14: /* paddi */ op->type = COMPUTE | PREFIXED; op->val = mlsd_8lsd_ea(word, suffix, regs); goto compute_done; } } break; case 2: /* tdi */ if (rd & trap_compare(regs->gpr[ra], (short) word)) goto trap; return 1; #endif case 3: /* twi */ if (rd & trap_compare((int)regs->gpr[ra], (short) word)) goto trap; return 1; #ifdef __powerpc64__ case 4: /* * There are very many instructions with this primary opcode * introduced in the ISA as early as v2.03. However, the ones * we currently emulate were all introduced with ISA 3.0 */ if (!cpu_has_feature(CPU_FTR_ARCH_300)) goto unknown_opcode; switch (word & 0x3f) { case 48: /* maddhd */ asm volatile(PPC_MADDHD(%0, %1, %2, %3) : "=r" (op->val) : "r" (regs->gpr[ra]), "r" (regs->gpr[rb]), "r" (regs->gpr[rc])); goto compute_done; case 49: /* maddhdu */ asm volatile(PPC_MADDHDU(%0, %1, %2, %3) : "=r" (op->val) : "r" (regs->gpr[ra]), "r" (regs->gpr[rb]), "r" (regs->gpr[rc])); goto compute_done; case 51: /* maddld */ asm volatile(PPC_MADDLD(%0, %1, %2, %3) : "=r" (op->val) : "r" (regs->gpr[ra]), "r" (regs->gpr[rb]), "r" (regs->gpr[rc])); goto compute_done; } /* * There are other instructions from ISA 3.0 with the same * primary opcode which do not have emulation support yet. */ goto unknown_opcode; #endif case 7: /* mulli */ op->val = regs->gpr[ra] * (short) word; goto compute_done; case 8: /* subfic */ imm = (short) word; add_with_carry(regs, op, rd, ~regs->gpr[ra], imm, 1); return 1; case 10: /* cmpli */ imm = (unsigned short) word; val = regs->gpr[ra]; #ifdef __powerpc64__ if ((rd & 1) == 0) val = (unsigned int) val; #endif do_cmp_unsigned(regs, op, val, imm, rd >> 2); return 1; case 11: /* cmpi */ imm = (short) word; val = regs->gpr[ra]; #ifdef __powerpc64__ if ((rd & 1) == 0) val = (int) val; #endif do_cmp_signed(regs, op, val, imm, rd >> 2); return 1; case 12: /* addic */ imm = (short) word; add_with_carry(regs, op, rd, regs->gpr[ra], imm, 0); return 1; case 13: /* addic. */ imm = (short) word; add_with_carry(regs, op, rd, regs->gpr[ra], imm, 0); set_cr0(regs, op); return 1; case 14: /* addi */ imm = (short) word; if (ra) imm += regs->gpr[ra]; op->val = imm; goto compute_done; case 15: /* addis */ imm = ((short) word) << 16; if (ra) imm += regs->gpr[ra]; op->val = imm; goto compute_done; case 19: if (((word >> 1) & 0x1f) == 2) { /* addpcis */ if (!cpu_has_feature(CPU_FTR_ARCH_300)) goto unknown_opcode; imm = (short) (word & 0xffc1); /* d0 + d2 fields */ imm |= (word >> 15) & 0x3e; /* d1 field */ op->val = regs->nip + (imm << 16) + 4; goto compute_done; } op->type = UNKNOWN; return 0; case 20: /* rlwimi */ mb = (word >> 6) & 0x1f; me = (word >> 1) & 0x1f; val = DATA32(regs->gpr[rd]); imm = MASK32(mb, me); op->val = (regs->gpr[ra] & ~imm) | (ROTATE(val, rb) & imm); goto logical_done; case 21: /* rlwinm */ mb = (word >> 6) & 0x1f; me = (word >> 1) & 0x1f; val = DATA32(regs->gpr[rd]); op->val = ROTATE(val, rb) & MASK32(mb, me); goto logical_done; case 23: /* rlwnm */ mb = (word >> 6) & 0x1f; me = (word >> 1) & 0x1f; rb = regs->gpr[rb] & 0x1f; val = DATA32(regs->gpr[rd]); op->val = ROTATE(val, rb) & MASK32(mb, me); goto logical_done; case 24: /* ori */ op->val = regs->gpr[rd] | (unsigned short) word; goto logical_done_nocc; case 25: /* oris */ imm = (unsigned short) word; op->val = regs->gpr[rd] | (imm << 16); goto logical_done_nocc; case 26: /* xori */ op->val = regs->gpr[rd] ^ (unsigned short) word; goto logical_done_nocc; case 27: /* xoris */ imm = (unsigned short) word; op->val = regs->gpr[rd] ^ (imm << 16); goto logical_done_nocc; case 28: /* andi. */ op->val = regs->gpr[rd] & (unsigned short) word; set_cr0(regs, op); goto logical_done_nocc; case 29: /* andis. */ imm = (unsigned short) word; op->val = regs->gpr[rd] & (imm << 16); set_cr0(regs, op); goto logical_done_nocc; #ifdef __powerpc64__ case 30: /* rld* */ mb = ((word >> 6) & 0x1f) | (word & 0x20); val = regs->gpr[rd]; if ((word & 0x10) == 0) { sh = rb | ((word & 2) << 4); val = ROTATE(val, sh); switch ((word >> 2) & 3) { case 0: /* rldicl */ val &= MASK64_L(mb); break; case 1: /* rldicr */ val &= MASK64_R(mb); break; case 2: /* rldic */ val &= MASK64(mb, 63 - sh); break; case 3: /* rldimi */ imm = MASK64(mb, 63 - sh); val = (regs->gpr[ra] & ~imm) | (val & imm); } op->val = val; goto logical_done; } else { sh = regs->gpr[rb] & 0x3f; val = ROTATE(val, sh); switch ((word >> 1) & 7) { case 0: /* rldcl */ op->val = val & MASK64_L(mb); goto logical_done; case 1: /* rldcr */ op->val = val & MASK64_R(mb); goto logical_done; } } #endif op->type = UNKNOWN; /* illegal instruction */ return 0; case 31: /* isel occupies 32 minor opcodes */ if (((word >> 1) & 0x1f) == 15) { mb = (word >> 6) & 0x1f; /* bc field */ val = (regs->ccr >> (31 - mb)) & 1; val2 = (ra) ? regs->gpr[ra] : 0; op->val = (val) ? val2 : regs->gpr[rb]; goto compute_done; } switch ((word >> 1) & 0x3ff) { case 4: /* tw */ if (rd == 0x1f || (rd & trap_compare((int)regs->gpr[ra], (int)regs->gpr[rb]))) goto trap; return 1; #ifdef __powerpc64__ case 68: /* td */ if (rd & trap_compare(regs->gpr[ra], regs->gpr[rb])) goto trap; return 1; #endif case 83: /* mfmsr */ if (regs->msr & MSR_PR) goto priv; op->type = MFMSR; op->reg = rd; return 0; case 146: /* mtmsr */ if (regs->msr & MSR_PR) goto priv; op->type = MTMSR; op->reg = rd; op->val = 0xffffffff & ~(MSR_ME | MSR_LE); return 0; #ifdef CONFIG_PPC64 case 178: /* mtmsrd */ if (regs->msr & MSR_PR) goto priv; op->type = MTMSR; op->reg = rd; /* only MSR_EE and MSR_RI get changed if bit 15 set */ /* mtmsrd doesn't change MSR_HV, MSR_ME or MSR_LE */ imm = (word & 0x10000)? 0x8002: 0xefffffffffffeffeUL; op->val = imm; return 0; #endif case 19: /* mfcr */ imm = 0xffffffffUL; if ((word >> 20) & 1) { imm = 0xf0000000UL; for (sh = 0; sh < 8; ++sh) { if (word & (0x80000 >> sh)) break; imm >>= 4; } } op->val = regs->ccr & imm; goto compute_done; case 128: /* setb */ if (!cpu_has_feature(CPU_FTR_ARCH_300)) goto unknown_opcode; /* * 'ra' encodes the CR field number (bfa) in the top 3 bits. * Since each CR field is 4 bits, * we can simply mask off the bottom two bits (bfa * 4) * to yield the first bit in the CR field. */ ra = ra & ~0x3; /* 'val' stores bits of the CR field (bfa) */ val = regs->ccr >> (CR0_SHIFT - ra); /* checks if the LT bit of CR field (bfa) is set */ if (val & 8) op->val = -1; /* checks if the GT bit of CR field (bfa) is set */ else if (val & 4) op->val = 1; else op->val = 0; goto compute_done; case 144: /* mtcrf */ op->type = COMPUTE + SETCC; imm = 0xf0000000UL; val = regs->gpr[rd]; op->ccval = regs->ccr; for (sh = 0; sh < 8; ++sh) { if (word & (0x80000 >> sh)) op->ccval = (op->ccval & ~imm) | (val & imm); imm >>= 4; } return 1; case 339: /* mfspr */ spr = ((word >> 16) & 0x1f) | ((word >> 6) & 0x3e0); op->type = MFSPR; op->reg = rd; op->spr = spr; if (spr == SPRN_XER || spr == SPRN_LR || spr == SPRN_CTR) return 1; return 0; case 467: /* mtspr */ spr = ((word >> 16) & 0x1f) | ((word >> 6) & 0x3e0); op->type = MTSPR; op->val = regs->gpr[rd]; op->spr = spr; if (spr == SPRN_XER || spr == SPRN_LR || spr == SPRN_CTR) return 1; return 0; /* * Compare instructions */ case 0: /* cmp */ val = regs->gpr[ra]; val2 = regs->gpr[rb]; #ifdef __powerpc64__ if ((rd & 1) == 0) { /* word (32-bit) compare */ val = (int) val; val2 = (int) val2; } #endif do_cmp_signed(regs, op, val, val2, rd >> 2); return 1; case 32: /* cmpl */ val = regs->gpr[ra]; val2 = regs->gpr[rb]; #ifdef __powerpc64__ if ((rd & 1) == 0) { /* word (32-bit) compare */ val = (unsigned int) val; val2 = (unsigned int) val2; } #endif do_cmp_unsigned(regs, op, val, val2, rd >> 2); return 1; case 508: /* cmpb */ do_cmpb(regs, op, regs->gpr[rd], regs->gpr[rb]); goto logical_done_nocc; /* * Arithmetic instructions */ case 8: /* subfc */ add_with_carry(regs, op, rd, ~regs->gpr[ra], regs->gpr[rb], 1); goto arith_done; #ifdef __powerpc64__ case 9: /* mulhdu */ asm("mulhdu %0,%1,%2" : "=r" (op->val) : "r" (regs->gpr[ra]), "r" (regs->gpr[rb])); goto arith_done; #endif case 10: /* addc */ add_with_carry(regs, op, rd, regs->gpr[ra], regs->gpr[rb], 0); goto arith_done; case 11: /* mulhwu */ asm("mulhwu %0,%1,%2" : "=r" (op->val) : "r" (regs->gpr[ra]), "r" (regs->gpr[rb])); goto arith_done; case 40: /* subf */ op->val = regs->gpr[rb] - regs->gpr[ra]; goto arith_done; #ifdef __powerpc64__ case 73: /* mulhd */ asm("mulhd %0,%1,%2" : "=r" (op->val) : "r" (regs->gpr[ra]), "r" (regs->gpr[rb])); goto arith_done; #endif case 75: /* mulhw */ asm("mulhw %0,%1,%2" : "=r" (op->val) : "r" (regs->gpr[ra]), "r" (regs->gpr[rb])); goto arith_done; case 104: /* neg */ op->val = -regs->gpr[ra]; goto arith_done; case 136: /* subfe */ add_with_carry(regs, op, rd, ~regs->gpr[ra], regs->gpr[rb], regs->xer & XER_CA); goto arith_done; case 138: /* adde */ add_with_carry(regs, op, rd, regs->gpr[ra], regs->gpr[rb], regs->xer & XER_CA); goto arith_done; case 200: /* subfze */ add_with_carry(regs, op, rd, ~regs->gpr[ra], 0L, regs->xer & XER_CA); goto arith_done; case 202: /* addze */ add_with_carry(regs, op, rd, regs->gpr[ra], 0L, regs->xer & XER_CA); goto arith_done; case 232: /* subfme */ add_with_carry(regs, op, rd, ~regs->gpr[ra], -1L, regs->xer & XER_CA); goto arith_done; #ifdef __powerpc64__ case 233: /* mulld */ op->val = regs->gpr[ra] * regs->gpr[rb]; goto arith_done; #endif case 234: /* addme */ add_with_carry(regs, op, rd, regs->gpr[ra], -1L, regs->xer & XER_CA); goto arith_done; case 235: /* mullw */ op->val = (long)(int) regs->gpr[ra] * (int) regs->gpr[rb]; goto arith_done; #ifdef __powerpc64__ case 265: /* modud */ if (!cpu_has_feature(CPU_FTR_ARCH_300)) goto unknown_opcode; op->val = regs->gpr[ra] % regs->gpr[rb]; goto compute_done; #endif case 266: /* add */ op->val = regs->gpr[ra] + regs->gpr[rb]; goto arith_done; case 267: /* moduw */ if (!cpu_has_feature(CPU_FTR_ARCH_300)) goto unknown_opcode; op->val = (unsigned int) regs->gpr[ra] % (unsigned int) regs->gpr[rb]; goto compute_done; #ifdef __powerpc64__ case 457: /* divdu */ op->val = regs->gpr[ra] / regs->gpr[rb]; goto arith_done; #endif case 459: /* divwu */ op->val = (unsigned int) regs->gpr[ra] / (unsigned int) regs->gpr[rb]; goto arith_done; #ifdef __powerpc64__ case 489: /* divd */ op->val = (long int) regs->gpr[ra] / (long int) regs->gpr[rb]; goto arith_done; #endif case 491: /* divw */ op->val = (int) regs->gpr[ra] / (int) regs->gpr[rb]; goto arith_done; #ifdef __powerpc64__ case 425: /* divde[.] */ asm volatile(PPC_DIVDE(%0, %1, %2) : "=r" (op->val) : "r" (regs->gpr[ra]), "r" (regs->gpr[rb])); goto arith_done; case 393: /* divdeu[.] */ asm volatile(PPC_DIVDEU(%0, %1, %2) : "=r" (op->val) : "r" (regs->gpr[ra]), "r" (regs->gpr[rb])); goto arith_done; #endif case 755: /* darn */ if (!cpu_has_feature(CPU_FTR_ARCH_300)) goto unknown_opcode; switch (ra & 0x3) { case 0: /* 32-bit conditioned */ asm volatile(PPC_DARN(%0, 0) : "=r" (op->val)); goto compute_done; case 1: /* 64-bit conditioned */ asm volatile(PPC_DARN(%0, 1) : "=r" (op->val)); goto compute_done; case 2: /* 64-bit raw */ asm volatile(PPC_DARN(%0, 2) : "=r" (op->val)); goto compute_done; } goto unknown_opcode; #ifdef __powerpc64__ case 777: /* modsd */ if (!cpu_has_feature(CPU_FTR_ARCH_300)) goto unknown_opcode; op->val = (long int) regs->gpr[ra] % (long int) regs->gpr[rb]; goto compute_done; #endif case 779: /* modsw */ if (!cpu_has_feature(CPU_FTR_ARCH_300)) goto unknown_opcode; op->val = (int) regs->gpr[ra] % (int) regs->gpr[rb]; goto compute_done; /* * Logical instructions */ case 26: /* cntlzw */ val = (unsigned int) regs->gpr[rd]; op->val = ( val ? __builtin_clz(val) : 32 ); goto logical_done; #ifdef __powerpc64__ case 58: /* cntlzd */ val = regs->gpr[rd]; op->val = ( val ? __builtin_clzl(val) : 64 ); goto logical_done; #endif case 28: /* and */ op->val = regs->gpr[rd] & regs->gpr[rb]; goto logical_done; case 60: /* andc */ op->val = regs->gpr[rd] & ~regs->gpr[rb]; goto logical_done; case 122: /* popcntb */ do_popcnt(regs, op, regs->gpr[rd], 8); goto logical_done_nocc; case 124: /* nor */ op->val = ~(regs->gpr[rd] | regs->gpr[rb]); goto logical_done; case 154: /* prtyw */ do_prty(regs, op, regs->gpr[rd], 32); goto logical_done_nocc; case 186: /* prtyd */ do_prty(regs, op, regs->gpr[rd], 64); goto logical_done_nocc; #ifdef CONFIG_PPC64 case 252: /* bpermd */ do_bpermd(regs, op, regs->gpr[rd], regs->gpr[rb]); goto logical_done_nocc; #endif case 284: /* xor */ op->val = ~(regs->gpr[rd] ^ regs->gpr[rb]); goto logical_done; case 316: /* xor */ op->val = regs->gpr[rd] ^ regs->gpr[rb]; goto logical_done; case 378: /* popcntw */ do_popcnt(regs, op, regs->gpr[rd], 32); goto logical_done_nocc; case 412: /* orc */ op->val = regs->gpr[rd] | ~regs->gpr[rb]; goto logical_done; case 444: /* or */ op->val = regs->gpr[rd] | regs->gpr[rb]; goto logical_done; case 476: /* nand */ op->val = ~(regs->gpr[rd] & regs->gpr[rb]); goto logical_done; #ifdef CONFIG_PPC64 case 506: /* popcntd */ do_popcnt(regs, op, regs->gpr[rd], 64); goto logical_done_nocc; #endif case 538: /* cnttzw */ if (!cpu_has_feature(CPU_FTR_ARCH_300)) goto unknown_opcode; val = (unsigned int) regs->gpr[rd]; op->val = (val ? __builtin_ctz(val) : 32); goto logical_done; #ifdef __powerpc64__ case 570: /* cnttzd */ if (!cpu_has_feature(CPU_FTR_ARCH_300)) goto unknown_opcode; val = regs->gpr[rd]; op->val = (val ? __builtin_ctzl(val) : 64); goto logical_done; #endif case 922: /* extsh */ op->val = (signed short) regs->gpr[rd]; goto logical_done; case 954: /* extsb */ op->val = (signed char) regs->gpr[rd]; goto logical_done; #ifdef __powerpc64__ case 986: /* extsw */ op->val = (signed int) regs->gpr[rd]; goto logical_done; #endif /* * Shift instructions */ case 24: /* slw */ sh = regs->gpr[rb] & 0x3f; if (sh < 32) op->val = (regs->gpr[rd] << sh) & 0xffffffffUL; else op->val = 0; goto logical_done; case 536: /* srw */ sh = regs->gpr[rb] & 0x3f; if (sh < 32) op->val = (regs->gpr[rd] & 0xffffffffUL) >> sh; else op->val = 0; goto logical_done; case 792: /* sraw */ op->type = COMPUTE + SETREG + SETXER; sh = regs->gpr[rb] & 0x3f; ival = (signed int) regs->gpr[rd]; op->val = ival >> (sh < 32 ? sh : 31); op->xerval = regs->xer; if (ival < 0 && (sh >= 32 || (ival & ((1ul << sh) - 1)) != 0)) op->xerval |= XER_CA; else op->xerval &= ~XER_CA; set_ca32(op, op->xerval & XER_CA); goto logical_done; case 824: /* srawi */ op->type = COMPUTE + SETREG + SETXER; sh = rb; ival = (signed int) regs->gpr[rd]; op->val = ival >> sh; op->xerval = regs->xer; if (ival < 0 && (ival & ((1ul << sh) - 1)) != 0) op->xerval |= XER_CA; else op->xerval &= ~XER_CA; set_ca32(op, op->xerval & XER_CA); goto logical_done; #ifdef __powerpc64__ case 27: /* sld */ sh = regs->gpr[rb] & 0x7f; if (sh < 64) op->val = regs->gpr[rd] << sh; else op->val = 0; goto logical_done; case 539: /* srd */ sh = regs->gpr[rb] & 0x7f; if (sh < 64) op->val = regs->gpr[rd] >> sh; else op->val = 0; goto logical_done; case 794: /* srad */ op->type = COMPUTE + SETREG + SETXER; sh = regs->gpr[rb] & 0x7f; ival = (signed long int) regs->gpr[rd]; op->val = ival >> (sh < 64 ? sh : 63); op->xerval = regs->xer; if (ival < 0 && (sh >= 64 || (ival & ((1ul << sh) - 1)) != 0)) op->xerval |= XER_CA; else op->xerval &= ~XER_CA; set_ca32(op, op->xerval & XER_CA); goto logical_done; case 826: /* sradi with sh_5 = 0 */ case 827: /* sradi with sh_5 = 1 */ op->type = COMPUTE + SETREG + SETXER; sh = rb | ((word & 2) << 4); ival = (signed long int) regs->gpr[rd]; op->val = ival >> sh; op->xerval = regs->xer; if (ival < 0 && (ival & ((1ul << sh) - 1)) != 0) op->xerval |= XER_CA; else op->xerval &= ~XER_CA; set_ca32(op, op->xerval & XER_CA); goto logical_done; case 890: /* extswsli with sh_5 = 0 */ case 891: /* extswsli with sh_5 = 1 */ if (!cpu_has_feature(CPU_FTR_ARCH_300)) goto unknown_opcode; op->type = COMPUTE + SETREG; sh = rb | ((word & 2) << 4); val = (signed int) regs->gpr[rd]; if (sh) op->val = ROTATE(val, sh) & MASK64(0, 63 - sh); else op->val = val; goto logical_done; #endif /* __powerpc64__ */ /* * Cache instructions */ case 54: /* dcbst */ op->type = MKOP(CACHEOP, DCBST, 0); op->ea = xform_ea(word, regs); return 0; case 86: /* dcbf */ op->type = MKOP(CACHEOP, DCBF, 0); op->ea = xform_ea(word, regs); return 0; case 246: /* dcbtst */ op->type = MKOP(CACHEOP, DCBTST, 0); op->ea = xform_ea(word, regs); op->reg = rd; return 0; case 278: /* dcbt */ op->type = MKOP(CACHEOP, DCBTST, 0); op->ea = xform_ea(word, regs); op->reg = rd; return 0; case 982: /* icbi */ op->type = MKOP(CACHEOP, ICBI, 0); op->ea = xform_ea(word, regs); return 0; case 1014: /* dcbz */ op->type = MKOP(CACHEOP, DCBZ, 0); op->ea = xform_ea(word, regs); return 0; } break; } /* * Loads and stores. */ op->type = UNKNOWN; op->update_reg = ra; op->reg = rd; op->val = regs->gpr[rd]; u = (word >> 20) & UPDATE; op->vsx_flags = 0; switch (opcode) { case 31: u = word & UPDATE; op->ea = xform_ea(word, regs); switch ((word >> 1) & 0x3ff) { case 20: /* lwarx */ op->type = MKOP(LARX, 0, 4); break; case 150: /* stwcx. */ op->type = MKOP(STCX, 0, 4); break; #ifdef CONFIG_PPC_HAS_LBARX_LHARX case 52: /* lbarx */ op->type = MKOP(LARX, 0, 1); break; case 694: /* stbcx. */ op->type = MKOP(STCX, 0, 1); break; case 116: /* lharx */ op->type = MKOP(LARX, 0, 2); break; case 726: /* sthcx. */ op->type = MKOP(STCX, 0, 2); break; #endif #ifdef __powerpc64__ case 84: /* ldarx */ op->type = MKOP(LARX, 0, 8); break; case 214: /* stdcx. */ op->type = MKOP(STCX, 0, 8); break; case 276: /* lqarx */ if (!((rd & 1) || rd == ra || rd == rb)) op->type = MKOP(LARX, 0, 16); break; case 182: /* stqcx. */ if (!(rd & 1)) op->type = MKOP(STCX, 0, 16); break; #endif case 23: /* lwzx */ case 55: /* lwzux */ op->type = MKOP(LOAD, u, 4); break; case 87: /* lbzx */ case 119: /* lbzux */ op->type = MKOP(LOAD, u, 1); break; #ifdef CONFIG_ALTIVEC /* * Note: for the load/store vector element instructions, * bits of the EA say which field of the VMX register to use. */ case 7: /* lvebx */ op->type = MKOP(LOAD_VMX, 0, 1); op->element_size = 1; break; case 39: /* lvehx */ op->type = MKOP(LOAD_VMX, 0, 2); op->element_size = 2; break; case 71: /* lvewx */ op->type = MKOP(LOAD_VMX, 0, 4); op->element_size = 4; break; case 103: /* lvx */ case 359: /* lvxl */ op->type = MKOP(LOAD_VMX, 0, 16); op->element_size = 16; break; case 135: /* stvebx */ op->type = MKOP(STORE_VMX, 0, 1); op->element_size = 1; break; case 167: /* stvehx */ op->type = MKOP(STORE_VMX, 0, 2); op->element_size = 2; break; case 199: /* stvewx */ op->type = MKOP(STORE_VMX, 0, 4); op->element_size = 4; break; case 231: /* stvx */ case 487: /* stvxl */ op->type = MKOP(STORE_VMX, 0, 16); break; #endif /* CONFIG_ALTIVEC */ #ifdef __powerpc64__ case 21: /* ldx */ case 53: /* ldux */ op->type = MKOP(LOAD, u, 8); break; case 149: /* stdx */ case 181: /* stdux */ op->type = MKOP(STORE, u, 8); break; #endif case 151: /* stwx */ case 183: /* stwux */ op->type = MKOP(STORE, u, 4); break; case 215: /* stbx */ case 247: /* stbux */ op->type = MKOP(STORE, u, 1); break; case 279: /* lhzx */ case 311: /* lhzux */ op->type = MKOP(LOAD, u, 2); break; #ifdef __powerpc64__ case 341: /* lwax */ case 373: /* lwaux */ op->type = MKOP(LOAD, SIGNEXT | u, 4); break; #endif case 343: /* lhax */ case 375: /* lhaux */ op->type = MKOP(LOAD, SIGNEXT | u, 2); break; case 407: /* sthx */ case 439: /* sthux */ op->type = MKOP(STORE, u, 2); break; #ifdef __powerpc64__ case 532: /* ldbrx */ op->type = MKOP(LOAD, BYTEREV, 8); break; #endif case 533: /* lswx */ op->type = MKOP(LOAD_MULTI, 0, regs->xer & 0x7f); break; case 534: /* lwbrx */ op->type = MKOP(LOAD, BYTEREV, 4); break; case 597: /* lswi */ if (rb == 0) rb = 32; /* # bytes to load */ op->type = MKOP(LOAD_MULTI, 0, rb); op->ea = ra ? regs->gpr[ra] : 0; break; #ifdef CONFIG_PPC_FPU case 535: /* lfsx */ case 567: /* lfsux */ op->type = MKOP(LOAD_FP, u | FPCONV, 4); break; case 599: /* lfdx */ case 631: /* lfdux */ op->type = MKOP(LOAD_FP, u, 8); break; case 663: /* stfsx */ case 695: /* stfsux */ op->type = MKOP(STORE_FP, u | FPCONV, 4); break; case 727: /* stfdx */ case 759: /* stfdux */ op->type = MKOP(STORE_FP, u, 8); break; #ifdef __powerpc64__ case 791: /* lfdpx */ op->type = MKOP(LOAD_FP, 0, 16); break; case 855: /* lfiwax */ op->type = MKOP(LOAD_FP, SIGNEXT, 4); break; case 887: /* lfiwzx */ op->type = MKOP(LOAD_FP, 0, 4); break; case 919: /* stfdpx */ op->type = MKOP(STORE_FP, 0, 16); break; case 983: /* stfiwx */ op->type = MKOP(STORE_FP, 0, 4); break; #endif /* __powerpc64 */ #endif /* CONFIG_PPC_FPU */ #ifdef __powerpc64__ case 660: /* stdbrx */ op->type = MKOP(STORE, BYTEREV, 8); op->val = byterev_8(regs->gpr[rd]); break; #endif case 661: /* stswx */ op->type = MKOP(STORE_MULTI, 0, regs->xer & 0x7f); break; case 662: /* stwbrx */ op->type = MKOP(STORE, BYTEREV, 4); op->val = byterev_4(regs->gpr[rd]); break; case 725: /* stswi */ if (rb == 0) rb = 32; /* # bytes to store */ op->type = MKOP(STORE_MULTI, 0, rb); op->ea = ra ? regs->gpr[ra] : 0; break; case 790: /* lhbrx */ op->type = MKOP(LOAD, BYTEREV, 2); break; case 918: /* sthbrx */ op->type = MKOP(STORE, BYTEREV, 2); op->val = byterev_2(regs->gpr[rd]); break; #ifdef CONFIG_VSX case 12: /* lxsiwzx */ op->reg = rd | ((word & 1) << 5); op->type = MKOP(LOAD_VSX, 0, 4); op->element_size = 8; break; case 76: /* lxsiwax */ op->reg = rd | ((word & 1) << 5); op->type = MKOP(LOAD_VSX, SIGNEXT, 4); op->element_size = 8; break; case 140: /* stxsiwx */ op->reg = rd | ((word & 1) << 5); op->type = MKOP(STORE_VSX, 0, 4); op->element_size = 8; break; case 268: /* lxvx */ if (!cpu_has_feature(CPU_FTR_ARCH_300)) goto unknown_opcode; op->reg = rd | ((word & 1) << 5); op->type = MKOP(LOAD_VSX, 0, 16); op->element_size = 16; op->vsx_flags = VSX_CHECK_VEC; break; case 269: /* lxvl */ case 301: { /* lxvll */ int nb; if (!cpu_has_feature(CPU_FTR_ARCH_300)) goto unknown_opcode; op->reg = rd | ((word & 1) << 5); op->ea = ra ? regs->gpr[ra] : 0; nb = regs->gpr[rb] & 0xff; if (nb > 16) nb = 16; op->type = MKOP(LOAD_VSX, 0, nb); op->element_size = 16; op->vsx_flags = ((word & 0x20) ? VSX_LDLEFT : 0) | VSX_CHECK_VEC; break; } case 332: /* lxvdsx */ op->reg = rd | ((word & 1) << 5); op->type = MKOP(LOAD_VSX, 0, 8); op->element_size = 8; op->vsx_flags = VSX_SPLAT; break; case 333: /* lxvpx */ if (!cpu_has_feature(CPU_FTR_ARCH_31)) goto unknown_opcode; op->reg = VSX_REGISTER_XTP(rd); op->type = MKOP(LOAD_VSX, 0, 32); op->element_size = 32; break; case 364: /* lxvwsx */ if (!cpu_has_feature(CPU_FTR_ARCH_300)) goto unknown_opcode; op->reg = rd | ((word & 1) << 5); op->type = MKOP(LOAD_VSX, 0, 4); op->element_size = 4; op->vsx_flags = VSX_SPLAT | VSX_CHECK_VEC; break; case 396: /* stxvx */ if (!cpu_has_feature(CPU_FTR_ARCH_300)) goto unknown_opcode; op->reg = rd | ((word & 1) << 5); op->type = MKOP(STORE_VSX, 0, 16); op->element_size = 16; op->vsx_flags = VSX_CHECK_VEC; break; case 397: /* stxvl */ case 429: { /* stxvll */ int nb; if (!cpu_has_feature(CPU_FTR_ARCH_300)) goto unknown_opcode; op->reg = rd | ((word & 1) << 5); op->ea = ra ? regs->gpr[ra] : 0; nb = regs->gpr[rb] & 0xff; if (nb > 16) nb = 16; op->type = MKOP(STORE_VSX, 0, nb); op->element_size = 16; op->vsx_flags = ((word & 0x20) ? VSX_LDLEFT : 0) | VSX_CHECK_VEC; break; } case 461: /* stxvpx */ if (!cpu_has_feature(CPU_FTR_ARCH_31)) goto unknown_opcode; op->reg = VSX_REGISTER_XTP(rd); op->type = MKOP(STORE_VSX, 0, 32); op->element_size = 32; break; case 524: /* lxsspx */ op->reg = rd | ((word & 1) << 5); op->type = MKOP(LOAD_VSX, 0, 4); op->element_size = 8; op->vsx_flags = VSX_FPCONV; break; case 588: /* lxsdx */ op->reg = rd | ((word & 1) << 5); op->type = MKOP(LOAD_VSX, 0, 8); op->element_size = 8; break; case 652: /* stxsspx */ op->reg = rd | ((word & 1) << 5); op->type = MKOP(STORE_VSX, 0, 4); op->element_size = 8; op->vsx_flags = VSX_FPCONV; break; case 716: /* stxsdx */ op->reg = rd | ((word & 1) << 5); op->type = MKOP(STORE_VSX, 0, 8); op->element_size = 8; break; case 780: /* lxvw4x */ op->reg = rd | ((word & 1) << 5); op->type = MKOP(LOAD_VSX, 0, 16); op->element_size = 4; break; case 781: /* lxsibzx */ if (!cpu_has_feature(CPU_FTR_ARCH_300)) goto unknown_opcode; op->reg = rd | ((word & 1) << 5); op->type = MKOP(LOAD_VSX, 0, 1); op->element_size = 8; op->vsx_flags = VSX_CHECK_VEC; break; case 812: /* lxvh8x */ if (!cpu_has_feature(CPU_FTR_ARCH_300)) goto unknown_opcode; op->reg = rd | ((word & 1) << 5); op->type = MKOP(LOAD_VSX, 0, 16); op->element_size = 2; op->vsx_flags = VSX_CHECK_VEC; break; case 813: /* lxsihzx */ if (!cpu_has_feature(CPU_FTR_ARCH_300)) goto unknown_opcode; op->reg = rd | ((word & 1) << 5); op->type = MKOP(LOAD_VSX, 0, 2); op->element_size = 8; op->vsx_flags = VSX_CHECK_VEC; break; case 844: /* lxvd2x */ op->reg = rd | ((word & 1) << 5); op->type = MKOP(LOAD_VSX, 0, 16); op->element_size = 8; break; case 876: /* lxvb16x */ if (!cpu_has_feature(CPU_FTR_ARCH_300)) goto unknown_opcode; op->reg = rd | ((word & 1) << 5); op->type = MKOP(LOAD_VSX, 0, 16); op->element_size = 1; op->vsx_flags = VSX_CHECK_VEC; break; case 908: /* stxvw4x */ op->reg = rd | ((word & 1) << 5); op->type = MKOP(STORE_VSX, 0, 16); op->element_size = 4; break; case 909: /* stxsibx */ if (!cpu_has_feature(CPU_FTR_ARCH_300)) goto unknown_opcode; op->reg = rd | ((word & 1) << 5); op->type = MKOP(STORE_VSX, 0, 1); op->element_size = 8; op->vsx_flags = VSX_CHECK_VEC; break; case 940: /* stxvh8x */ if (!cpu_has_feature(CPU_FTR_ARCH_300)) goto unknown_opcode; op->reg = rd | ((word & 1) << 5); op->type = MKOP(STORE_VSX, 0, 16); op->element_size = 2; op->vsx_flags = VSX_CHECK_VEC; break; case 941: /* stxsihx */ if (!cpu_has_feature(CPU_FTR_ARCH_300)) goto unknown_opcode; op->reg = rd | ((word & 1) << 5); op->type = MKOP(STORE_VSX, 0, 2); op->element_size = 8; op->vsx_flags = VSX_CHECK_VEC; break; case 972: /* stxvd2x */ op->reg = rd | ((word & 1) << 5); op->type = MKOP(STORE_VSX, 0, 16); op->element_size = 8; break; case 1004: /* stxvb16x */ if (!cpu_has_feature(CPU_FTR_ARCH_300)) goto unknown_opcode; op->reg = rd | ((word & 1) << 5); op->type = MKOP(STORE_VSX, 0, 16); op->element_size = 1; op->vsx_flags = VSX_CHECK_VEC; break; #endif /* CONFIG_VSX */ } break; case 32: /* lwz */ case 33: /* lwzu */ op->type = MKOP(LOAD, u, 4); op->ea = dform_ea(word, regs); break; case 34: /* lbz */ case 35: /* lbzu */ op->type = MKOP(LOAD, u, 1); op->ea = dform_ea(word, regs); break; case 36: /* stw */ case 37: /* stwu */ op->type = MKOP(STORE, u, 4); op->ea = dform_ea(word, regs); break; case 38: /* stb */ case 39: /* stbu */ op->type = MKOP(STORE, u, 1); op->ea = dform_ea(word, regs); break; case 40: /* lhz */ case 41: /* lhzu */ op->type = MKOP(LOAD, u, 2); op->ea = dform_ea(word, regs); break; case 42: /* lha */ case 43: /* lhau */ op->type = MKOP(LOAD, SIGNEXT | u, 2); op->ea = dform_ea(word, regs); break; case 44: /* sth */ case 45: /* sthu */ op->type = MKOP(STORE, u, 2); op->ea = dform_ea(word, regs); break; case 46: /* lmw */ if (ra >= rd) break; /* invalid form, ra in range to load */ op->type = MKOP(LOAD_MULTI, 0, 4 * (32 - rd)); op->ea = dform_ea(word, regs); break; case 47: /* stmw */ op->type = MKOP(STORE_MULTI, 0, 4 * (32 - rd)); op->ea = dform_ea(word, regs); break; #ifdef CONFIG_PPC_FPU case 48: /* lfs */ case 49: /* lfsu */ op->type = MKOP(LOAD_FP, u | FPCONV, 4); op->ea = dform_ea(word, regs); break; case 50: /* lfd */ case 51: /* lfdu */ op->type = MKOP(LOAD_FP, u, 8); op->ea = dform_ea(word, regs); break; case 52: /* stfs */ case 53: /* stfsu */ op->type = MKOP(STORE_FP, u | FPCONV, 4); op->ea = dform_ea(word, regs); break; case 54: /* stfd */ case 55: /* stfdu */ op->type = MKOP(STORE_FP, u, 8); op->ea = dform_ea(word, regs); break; #endif #ifdef __powerpc64__ case 56: /* lq */ if (!((rd & 1) || (rd == ra))) op->type = MKOP(LOAD, 0, 16); op->ea = dqform_ea(word, regs); break; #endif #ifdef CONFIG_VSX case 57: /* lfdp, lxsd, lxssp */ op->ea = dsform_ea(word, regs); switch (word & 3) { case 0: /* lfdp */ if (rd & 1) break; /* reg must be even */ op->type = MKOP(LOAD_FP, 0, 16); break; case 2: /* lxsd */ if (!cpu_has_feature(CPU_FTR_ARCH_300)) goto unknown_opcode; op->reg = rd + 32; op->type = MKOP(LOAD_VSX, 0, 8); op->element_size = 8; op->vsx_flags = VSX_CHECK_VEC; break; case 3: /* lxssp */ if (!cpu_has_feature(CPU_FTR_ARCH_300)) goto unknown_opcode; op->reg = rd + 32; op->type = MKOP(LOAD_VSX, 0, 4); op->element_size = 8; op->vsx_flags = VSX_FPCONV | VSX_CHECK_VEC; break; } break; #endif /* CONFIG_VSX */ #ifdef __powerpc64__ case 58: /* ld[u], lwa */ op->ea = dsform_ea(word, regs); switch (word & 3) { case 0: /* ld */ op->type = MKOP(LOAD, 0, 8); break; case 1: /* ldu */ op->type = MKOP(LOAD, UPDATE, 8); break; case 2: /* lwa */ op->type = MKOP(LOAD, SIGNEXT, 4); break; } break; #endif #ifdef CONFIG_VSX case 6: if (!cpu_has_feature(CPU_FTR_ARCH_31)) goto unknown_opcode; op->ea = dqform_ea(word, regs); op->reg = VSX_REGISTER_XTP(rd); op->element_size = 32; switch (word & 0xf) { case 0: /* lxvp */ op->type = MKOP(LOAD_VSX, 0, 32); break; case 1: /* stxvp */ op->type = MKOP(STORE_VSX, 0, 32); break; } break; case 61: /* stfdp, lxv, stxsd, stxssp, stxv */ switch (word & 7) { case 0: /* stfdp with LSB of DS field = 0 */ case 4: /* stfdp with LSB of DS field = 1 */ op->ea = dsform_ea(word, regs); op->type = MKOP(STORE_FP, 0, 16); break; case 1: /* lxv */ if (!cpu_has_feature(CPU_FTR_ARCH_300)) goto unknown_opcode; op->ea = dqform_ea(word, regs); if (word & 8) op->reg = rd + 32; op->type = MKOP(LOAD_VSX, 0, 16); op->element_size = 16; op->vsx_flags = VSX_CHECK_VEC; break; case 2: /* stxsd with LSB of DS field = 0 */ case 6: /* stxsd with LSB of DS field = 1 */ if (!cpu_has_feature(CPU_FTR_ARCH_300)) goto unknown_opcode; op->ea = dsform_ea(word, regs); op->reg = rd + 32; op->type = MKOP(STORE_VSX, 0, 8); op->element_size = 8; op->vsx_flags = VSX_CHECK_VEC; break; case 3: /* stxssp with LSB of DS field = 0 */ case 7: /* stxssp with LSB of DS field = 1 */ if (!cpu_has_feature(CPU_FTR_ARCH_300)) goto unknown_opcode; op->ea = dsform_ea(word, regs); op->reg = rd + 32; op->type = MKOP(STORE_VSX, 0, 4); op->element_size = 8; op->vsx_flags = VSX_FPCONV | VSX_CHECK_VEC; break; case 5: /* stxv */ if (!cpu_has_feature(CPU_FTR_ARCH_300)) goto unknown_opcode; op->ea = dqform_ea(word, regs); if (word & 8) op->reg = rd + 32; op->type = MKOP(STORE_VSX, 0, 16); op->element_size = 16; op->vsx_flags = VSX_CHECK_VEC; break; } break; #endif /* CONFIG_VSX */ #ifdef __powerpc64__ case 62: /* std[u] */ op->ea = dsform_ea(word, regs); switch (word & 3) { case 0: /* std */ op->type = MKOP(STORE, 0, 8); break; case 1: /* stdu */ op->type = MKOP(STORE, UPDATE, 8); break; case 2: /* stq */ if (!(rd & 1)) op->type = MKOP(STORE, 0, 16); break; } break; case 1: /* Prefixed instructions */ if (!cpu_has_feature(CPU_FTR_ARCH_31)) goto unknown_opcode; prefix_r = GET_PREFIX_R(word); ra = GET_PREFIX_RA(suffix); op->update_reg = ra; rd = (suffix >> 21) & 0x1f; op->reg = rd; op->val = regs->gpr[rd]; suffixopcode = get_op(suffix); prefixtype = (word >> 24) & 0x3; switch (prefixtype) { case 0: /* Type 00 Eight-Byte Load/Store */ if (prefix_r && ra) break; op->ea = mlsd_8lsd_ea(word, suffix, regs); switch (suffixopcode) { case 41: /* plwa */ op->type = MKOP(LOAD, PREFIXED | SIGNEXT, 4); break; #ifdef CONFIG_VSX case 42: /* plxsd */ op->reg = rd + 32; op->type = MKOP(LOAD_VSX, PREFIXED, 8); op->element_size = 8; op->vsx_flags = VSX_CHECK_VEC; break; case 43: /* plxssp */ op->reg = rd + 32; op->type = MKOP(LOAD_VSX, PREFIXED, 4); op->element_size = 8; op->vsx_flags = VSX_FPCONV | VSX_CHECK_VEC; break; case 46: /* pstxsd */ op->reg = rd + 32; op->type = MKOP(STORE_VSX, PREFIXED, 8); op->element_size = 8; op->vsx_flags = VSX_CHECK_VEC; break; case 47: /* pstxssp */ op->reg = rd + 32; op->type = MKOP(STORE_VSX, PREFIXED, 4); op->element_size = 8; op->vsx_flags = VSX_FPCONV | VSX_CHECK_VEC; break; case 51: /* plxv1 */ op->reg += 32; fallthrough; case 50: /* plxv0 */ op->type = MKOP(LOAD_VSX, PREFIXED, 16); op->element_size = 16; op->vsx_flags = VSX_CHECK_VEC; break; case 55: /* pstxv1 */ op->reg = rd + 32; fallthrough; case 54: /* pstxv0 */ op->type = MKOP(STORE_VSX, PREFIXED, 16); op->element_size = 16; op->vsx_flags = VSX_CHECK_VEC; break; #endif /* CONFIG_VSX */ case 56: /* plq */ op->type = MKOP(LOAD, PREFIXED, 16); break; case 57: /* pld */ op->type = MKOP(LOAD, PREFIXED, 8); break; #ifdef CONFIG_VSX case 58: /* plxvp */ op->reg = VSX_REGISTER_XTP(rd); op->type = MKOP(LOAD_VSX, PREFIXED, 32); op->element_size = 32; break; #endif /* CONFIG_VSX */ case 60: /* pstq */ op->type = MKOP(STORE, PREFIXED, 16); break; case 61: /* pstd */ op->type = MKOP(STORE, PREFIXED, 8); break; #ifdef CONFIG_VSX case 62: /* pstxvp */ op->reg = VSX_REGISTER_XTP(rd); op->type = MKOP(STORE_VSX, PREFIXED, 32); op->element_size = 32; break; #endif /* CONFIG_VSX */ } break; case 1: /* Type 01 Eight-Byte Register-to-Register */ break; case 2: /* Type 10 Modified Load/Store */ if (prefix_r && ra) break; op->ea = mlsd_8lsd_ea(word, suffix, regs); switch (suffixopcode) { case 32: /* plwz */ op->type = MKOP(LOAD, PREFIXED, 4); break; case 34: /* plbz */ op->type = MKOP(LOAD, PREFIXED, 1); break; case 36: /* pstw */ op->type = MKOP(STORE, PREFIXED, 4); break; case 38: /* pstb */ op->type = MKOP(STORE, PREFIXED, 1); break; case 40: /* plhz */ op->type = MKOP(LOAD, PREFIXED, 2); break; case 42: /* plha */ op->type = MKOP(LOAD, PREFIXED | SIGNEXT, 2); break; case 44: /* psth */ op->type = MKOP(STORE, PREFIXED, 2); break; case 48: /* plfs */ op->type = MKOP(LOAD_FP, PREFIXED | FPCONV, 4); break; case 50: /* plfd */ op->type = MKOP(LOAD_FP, PREFIXED, 8); break; case 52: /* pstfs */ op->type = MKOP(STORE_FP, PREFIXED | FPCONV, 4); break; case 54: /* pstfd */ op->type = MKOP(STORE_FP, PREFIXED, 8); break; } break; case 3: /* Type 11 Modified Register-to-Register */ break; } #endif /* __powerpc64__ */ } if (OP_IS_LOAD_STORE(op->type) && (op->type & UPDATE)) { switch (GETTYPE(op->type)) { case LOAD: if (ra == rd) goto unknown_opcode; fallthrough; case STORE: case LOAD_FP: case STORE_FP: if (ra == 0) goto unknown_opcode; } } #ifdef CONFIG_VSX if ((GETTYPE(op->type) == LOAD_VSX || GETTYPE(op->type) == STORE_VSX) && !cpu_has_feature(CPU_FTR_VSX)) { return -1; } #endif /* CONFIG_VSX */ return 0; unknown_opcode: op->type = UNKNOWN; return 0; logical_done: if (word & 1) set_cr0(regs, op); logical_done_nocc: op->reg = ra; op->type |= SETREG; return 1; arith_done: if (word & 1) set_cr0(regs, op); compute_done: op->reg = rd; op->type |= SETREG; return 1; priv: op->type = INTERRUPT | 0x700; op->val = SRR1_PROGPRIV; return 0; trap: op->type = INTERRUPT | 0x700; op->val = SRR1_PROGTRAP; return 0; } EXPORT_SYMBOL_GPL(analyse_instr); NOKPROBE_SYMBOL(analyse_instr); /* * For PPC32 we always use stwu with r1 to change the stack pointer. * So this emulated store may corrupt the exception frame, now we * have to provide the exception frame trampoline, which is pushed * below the kprobed function stack. So we only update gpr[1] but * don't emulate the real store operation. We will do real store * operation safely in exception return code by checking this flag. */ static nokprobe_inline int handle_stack_update(unsigned long ea, struct pt_regs *regs) { /* * Check if we already set since that means we'll * lose the previous value. */ WARN_ON(test_thread_flag(TIF_EMULATE_STACK_STORE)); set_thread_flag(TIF_EMULATE_STACK_STORE); return 0; } static nokprobe_inline void do_signext(unsigned long *valp, int size) { switch (size) { case 2: *valp = (signed short) *valp; break; case 4: *valp = (signed int) *valp; break; } } static nokprobe_inline void do_byterev(unsigned long *valp, int size) { switch (size) { case 2: *valp = byterev_2(*valp); break; case 4: *valp = byterev_4(*valp); break; #ifdef __powerpc64__ case 8: *valp = byterev_8(*valp); break; #endif } } /* * Emulate an instruction that can be executed just by updating * fields in *regs. */ void emulate_update_regs(struct pt_regs *regs, struct instruction_op *op) { unsigned long next_pc; next_pc = truncate_if_32bit(regs->msr, regs->nip + GETLENGTH(op->type)); switch (GETTYPE(op->type)) { case COMPUTE: if (op->type & SETREG) regs->gpr[op->reg] = op->val; if (op->type & SETCC) regs->ccr = op->ccval; if (op->type & SETXER) regs->xer = op->xerval; break; case BRANCH: if (op->type & SETLK) regs->link = next_pc; if (op->type & BRTAKEN) next_pc = op->val; if (op->type & DECCTR) --regs->ctr; break; case BARRIER: switch (op->type & BARRIER_MASK) { case BARRIER_SYNC: mb(); break; case BARRIER_ISYNC: isync(); break; case BARRIER_EIEIO: eieio(); break; #ifdef CONFIG_PPC64 case BARRIER_LWSYNC: asm volatile("lwsync" : : : "memory"); break; case BARRIER_PTESYNC: asm volatile("ptesync" : : : "memory"); break; #endif } break; case MFSPR: switch (op->spr) { case SPRN_XER: regs->gpr[op->reg] = regs->xer & 0xffffffffUL; break; case SPRN_LR: regs->gpr[op->reg] = regs->link; break; case SPRN_CTR: regs->gpr[op->reg] = regs->ctr; break; default: WARN_ON_ONCE(1); } break; case MTSPR: switch (op->spr) { case SPRN_XER: regs->xer = op->val & 0xffffffffUL; break; case SPRN_LR: regs->link = op->val; break; case SPRN_CTR: regs->ctr = op->val; break; default: WARN_ON_ONCE(1); } break; default: WARN_ON_ONCE(1); } regs_set_return_ip(regs, next_pc); } NOKPROBE_SYMBOL(emulate_update_regs); /* * Emulate a previously-analysed load or store instruction. * Return values are: * 0 = instruction emulated successfully * -EFAULT = address out of range or access faulted (regs->dar * contains the faulting address) * -EACCES = misaligned access, instruction requires alignment * -EINVAL = unknown operation in *op */ int emulate_loadstore(struct pt_regs *regs, struct instruction_op *op) { int err, size, type; int i, rd, nb; unsigned int cr; unsigned long val; unsigned long ea; bool cross_endian; err = 0; size = GETSIZE(op->type); type = GETTYPE(op->type); cross_endian = (regs->msr & MSR_LE) != (MSR_KERNEL & MSR_LE); ea = truncate_if_32bit(regs->msr, op->ea); switch (type) { case LARX: if (ea & (size - 1)) return -EACCES; /* can't handle misaligned */ if (!address_ok(regs, ea, size)) return -EFAULT; err = 0; val = 0; switch (size) { #ifdef CONFIG_PPC_HAS_LBARX_LHARX case 1: __get_user_asmx(val, ea, err, "lbarx"); break; case 2: __get_user_asmx(val, ea, err, "lharx"); break; #endif case 4: __get_user_asmx(val, ea, err, "lwarx"); break; #ifdef __powerpc64__ case 8: __get_user_asmx(val, ea, err, "ldarx"); break; case 16: err = do_lqarx(ea, &regs->gpr[op->reg]); break; #endif default: return -EINVAL; } if (err) { regs->dar = ea; break; } if (size < 16) regs->gpr[op->reg] = val; break; case STCX: if (ea & (size - 1)) return -EACCES; /* can't handle misaligned */ if (!address_ok(regs, ea, size)) return -EFAULT; err = 0; switch (size) { #ifdef __powerpc64__ case 1: __put_user_asmx(op->val, ea, err, "stbcx.", cr); break; case 2: __put_user_asmx(op->val, ea, err, "sthcx.", cr); break; #endif case 4: __put_user_asmx(op->val, ea, err, "stwcx.", cr); break; #ifdef __powerpc64__ case 8: __put_user_asmx(op->val, ea, err, "stdcx.", cr); break; case 16: err = do_stqcx(ea, regs->gpr[op->reg], regs->gpr[op->reg + 1], &cr); break; #endif default: return -EINVAL; } if (!err) regs->ccr = (regs->ccr & 0x0fffffff) | (cr & 0xe0000000) | ((regs->xer >> 3) & 0x10000000); else regs->dar = ea; break; case LOAD: #ifdef __powerpc64__ if (size == 16) { err = emulate_lq(regs, ea, op->reg, cross_endian); break; } #endif err = read_mem(&regs->gpr[op->reg], ea, size, regs); if (!err) { if (op->type & SIGNEXT) do_signext(&regs->gpr[op->reg], size); if ((op->type & BYTEREV) == (cross_endian ? 0 : BYTEREV)) do_byterev(&regs->gpr[op->reg], size); } break; #ifdef CONFIG_PPC_FPU case LOAD_FP: /* * If the instruction is in userspace, we can emulate it even * if the VMX state is not live, because we have the state * stored in the thread_struct. If the instruction is in * the kernel, we must not touch the state in the thread_struct. */ if (!(regs->msr & MSR_PR) && !(regs->msr & MSR_FP)) return 0; err = do_fp_load(op, ea, regs, cross_endian); break; #endif #ifdef CONFIG_ALTIVEC case LOAD_VMX: if (!(regs->msr & MSR_PR) && !(regs->msr & MSR_VEC)) return 0; err = do_vec_load(op->reg, ea, size, regs, cross_endian); break; #endif #ifdef CONFIG_VSX case LOAD_VSX: { unsigned long msrbit = MSR_VSX; /* * Some VSX instructions check the MSR_VEC bit rather than MSR_VSX * when the target of the instruction is a vector register. */ if (op->reg >= 32 && (op->vsx_flags & VSX_CHECK_VEC)) msrbit = MSR_VEC; if (!(regs->msr & MSR_PR) && !(regs->msr & msrbit)) return 0; err = do_vsx_load(op, ea, regs, cross_endian); break; } #endif case LOAD_MULTI: if (!address_ok(regs, ea, size)) return -EFAULT; rd = op->reg; for (i = 0; i < size; i += 4) { unsigned int v32 = 0; nb = size - i; if (nb > 4) nb = 4; err = copy_mem_in((u8 *) &v32, ea, nb, regs); if (err) break; if (unlikely(cross_endian)) v32 = byterev_4(v32); regs->gpr[rd] = v32; ea += 4; /* reg number wraps from 31 to 0 for lsw[ix] */ rd = (rd + 1) & 0x1f; } break; case STORE: #ifdef __powerpc64__ if (size == 16) { err = emulate_stq(regs, ea, op->reg, cross_endian); break; } #endif if ((op->type & UPDATE) && size == sizeof(long) && op->reg == 1 && op->update_reg == 1 && !(regs->msr & MSR_PR) && ea >= regs->gpr[1] - STACK_INT_FRAME_SIZE) { err = handle_stack_update(ea, regs); break; } if (unlikely(cross_endian)) do_byterev(&op->val, size); err = write_mem(op->val, ea, size, regs); break; #ifdef CONFIG_PPC_FPU case STORE_FP: if (!(regs->msr & MSR_PR) && !(regs->msr & MSR_FP)) return 0; err = do_fp_store(op, ea, regs, cross_endian); break; #endif #ifdef CONFIG_ALTIVEC case STORE_VMX: if (!(regs->msr & MSR_PR) && !(regs->msr & MSR_VEC)) return 0; err = do_vec_store(op->reg, ea, size, regs, cross_endian); break; #endif #ifdef CONFIG_VSX case STORE_VSX: { unsigned long msrbit = MSR_VSX; /* * Some VSX instructions check the MSR_VEC bit rather than MSR_VSX * when the target of the instruction is a vector register. */ if (op->reg >= 32 && (op->vsx_flags & VSX_CHECK_VEC)) msrbit = MSR_VEC; if (!(regs->msr & MSR_PR) && !(regs->msr & msrbit)) return 0; err = do_vsx_store(op, ea, regs, cross_endian); break; } #endif case STORE_MULTI: if (!address_ok(regs, ea, size)) return -EFAULT; rd = op->reg; for (i = 0; i < size; i += 4) { unsigned int v32 = regs->gpr[rd]; nb = size - i; if (nb > 4) nb = 4; if (unlikely(cross_endian)) v32 = byterev_4(v32); err = copy_mem_out((u8 *) &v32, ea, nb, regs); if (err) break; ea += 4; /* reg number wraps from 31 to 0 for stsw[ix] */ rd = (rd + 1) & 0x1f; } break; default: return -EINVAL; } if (err) return err; if (op->type & UPDATE) regs->gpr[op->update_reg] = op->ea; return 0; } NOKPROBE_SYMBOL(emulate_loadstore); /* * Emulate instructions that cause a transfer of control, * loads and stores, and a few other instructions. * Returns 1 if the step was emulated, 0 if not, * or -1 if the instruction is one that should not be stepped, * such as an rfid, or a mtmsrd that would clear MSR_RI. */ int emulate_step(struct pt_regs *regs, ppc_inst_t instr) { struct instruction_op op; int r, err, type; unsigned long val; unsigned long ea; r = analyse_instr(&op, regs, instr); if (r < 0) return r; if (r > 0) { emulate_update_regs(regs, &op); return 1; } err = 0; type = GETTYPE(op.type); if (OP_IS_LOAD_STORE(type)) { err = emulate_loadstore(regs, &op); if (err) return 0; goto instr_done; } switch (type) { case CACHEOP: ea = truncate_if_32bit(regs->msr, op.ea); if (!address_ok(regs, ea, 8)) return 0; switch (op.type & CACHEOP_MASK) { case DCBST: __cacheop_user_asmx(ea, err, "dcbst"); break; case DCBF: __cacheop_user_asmx(ea, err, "dcbf"); break; case DCBTST: if (op.reg == 0) prefetchw((void *) ea); break; case DCBT: if (op.reg == 0) prefetch((void *) ea); break; case ICBI: __cacheop_user_asmx(ea, err, "icbi"); break; case DCBZ: err = emulate_dcbz(ea, regs); break; } if (err) { regs->dar = ea; return 0; } goto instr_done; case MFMSR: regs->gpr[op.reg] = regs->msr & MSR_MASK; goto instr_done; case MTMSR: val = regs->gpr[op.reg]; if ((val & MSR_RI) == 0) /* can't step mtmsr[d] that would clear MSR_RI */ return -1; /* here op.val is the mask of bits to change */ regs_set_return_msr(regs, (regs->msr & ~op.val) | (val & op.val)); goto instr_done; case SYSCALL: /* sc */ /* * Per ISA v3.1, section 7.5.15 'Trace Interrupt', we can't * single step a system call instruction: * * Successful completion for an instruction means that the * instruction caused no other interrupt. Thus a Trace * interrupt never occurs for a System Call or System Call * Vectored instruction, or for a Trap instruction that * traps. */ return -1; case SYSCALL_VECTORED_0: /* scv 0 */ return -1; case RFI: return -1; } return 0; instr_done: regs_set_return_ip(regs, truncate_if_32bit(regs->msr, regs->nip + GETLENGTH(op.type))); return 1; } NOKPROBE_SYMBOL(emulate_step);
linux-master
arch/powerpc/lib/sstep.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * * Copyright (C) IBM Corporation, 2011 * * Authors: Sukadev Bhattiprolu <[email protected]> * Anton Blanchard <[email protected]> */ #include <linux/uaccess.h> #include <linux/hardirq.h> #include <asm/switch_to.h> int enter_vmx_usercopy(void) { if (in_interrupt()) return 0; preempt_disable(); /* * We need to disable page faults as they can call schedule and * thus make us lose the VMX context. So on page faults, we just * fail which will cause a fallback to the normal non-vmx copy. */ pagefault_disable(); enable_kernel_altivec(); return 1; } /* * This function must return 0 because we tail call optimise when calling * from __copy_tofrom_user_power7 which returns 0 on success. */ int exit_vmx_usercopy(void) { disable_kernel_altivec(); pagefault_enable(); preempt_enable_no_resched(); /* * Must never explicitly call schedule (including preempt_enable()) * while in a kuap-unlocked user copy, because the AMR register will * not be saved and restored across context switch. However preempt * kernels need to be preempted as soon as possible if need_resched is * set and we are preemptible. The hack here is to schedule a * decrementer to fire here and reschedule for us if necessary. */ if (IS_ENABLED(CONFIG_PREEMPT) && need_resched()) set_dec(1); return 0; } int enter_vmx_ops(void) { if (in_interrupt()) return 0; preempt_disable(); enable_kernel_altivec(); return 1; } /* * All calls to this function will be optimised into tail calls. We are * passed a pointer to the destination which we return as required by a * memcpy implementation. */ void *exit_vmx_ops(void *dest) { disable_kernel_altivec(); preempt_enable(); return dest; }
linux-master
arch/powerpc/lib/vmx-helper.c
#include <asm/interrupt.h> #include <asm/kprobes.h> struct soft_mask_table_entry { unsigned long start; unsigned long end; }; struct restart_table_entry { unsigned long start; unsigned long end; unsigned long fixup; }; extern struct soft_mask_table_entry __start___soft_mask_table[]; extern struct soft_mask_table_entry __stop___soft_mask_table[]; extern struct restart_table_entry __start___restart_table[]; extern struct restart_table_entry __stop___restart_table[]; /* Given an address, look for it in the soft mask table */ bool search_kernel_soft_mask_table(unsigned long addr) { struct soft_mask_table_entry *smte = __start___soft_mask_table; while (smte < __stop___soft_mask_table) { unsigned long start = smte->start; unsigned long end = smte->end; if (addr >= start && addr < end) return true; smte++; } return false; } NOKPROBE_SYMBOL(search_kernel_soft_mask_table); /* Given an address, look for it in the kernel exception table */ unsigned long search_kernel_restart_table(unsigned long addr) { struct restart_table_entry *rte = __start___restart_table; while (rte < __stop___restart_table) { unsigned long start = rte->start; unsigned long end = rte->end; unsigned long fixup = rte->fixup; if (addr >= start && addr < end) return fixup; rte++; } return 0; } NOKPROBE_SYMBOL(search_kernel_restart_table);
linux-master
arch/powerpc/lib/restart_table.c
// SPDX-License-Identifier: GPL-2.0 /* * MMU-generic set_memory implementation for powerpc * * Copyright 2019-2021, IBM Corporation. */ #include <linux/mm.h> #include <linux/vmalloc.h> #include <linux/set_memory.h> #include <asm/mmu.h> #include <asm/page.h> #include <asm/pgtable.h> static pte_basic_t pte_update_delta(pte_t *ptep, unsigned long addr, unsigned long old, unsigned long new) { return pte_update(&init_mm, addr, ptep, old & ~new, new & ~old, 0); } /* * Updates the attributes of a page atomically. * * This sequence is safe against concurrent updates, and also allows updating the * attributes of a page currently being executed or accessed. */ static int change_page_attr(pte_t *ptep, unsigned long addr, void *data) { long action = (long)data; addr &= PAGE_MASK; /* modify the PTE bits as desired */ switch (action) { case SET_MEMORY_RO: /* Don't clear DIRTY bit */ pte_update_delta(ptep, addr, _PAGE_KERNEL_RW & ~_PAGE_DIRTY, _PAGE_KERNEL_RO); break; case SET_MEMORY_RW: pte_update_delta(ptep, addr, _PAGE_KERNEL_RO, _PAGE_KERNEL_RW); break; case SET_MEMORY_NX: pte_update_delta(ptep, addr, _PAGE_KERNEL_ROX, _PAGE_KERNEL_RO); break; case SET_MEMORY_X: pte_update_delta(ptep, addr, _PAGE_KERNEL_RO, _PAGE_KERNEL_ROX); break; case SET_MEMORY_NP: pte_update(&init_mm, addr, ptep, _PAGE_PRESENT, 0, 0); break; case SET_MEMORY_P: pte_update(&init_mm, addr, ptep, 0, _PAGE_PRESENT, 0); break; default: WARN_ON_ONCE(1); break; } /* See ptesync comment in radix__set_pte_at() */ if (radix_enabled()) asm volatile("ptesync": : :"memory"); flush_tlb_kernel_range(addr, addr + PAGE_SIZE); return 0; } int change_memory_attr(unsigned long addr, int numpages, long action) { unsigned long start = ALIGN_DOWN(addr, PAGE_SIZE); unsigned long size = numpages * PAGE_SIZE; if (!numpages) return 0; if (WARN_ON_ONCE(is_vmalloc_or_module_addr((void *)addr) && is_vm_area_hugepages((void *)addr))) return -EINVAL; #ifdef CONFIG_PPC_BOOK3S_64 /* * On hash, the linear mapping is not in the Linux page table so * apply_to_existing_page_range() will have no effect. If in the future * the set_memory_* functions are used on the linear map this will need * to be updated. */ if (!radix_enabled()) { int region = get_region_id(addr); if (WARN_ON_ONCE(region != VMALLOC_REGION_ID && region != IO_REGION_ID)) return -EINVAL; } #endif return apply_to_existing_page_range(&init_mm, start, size, change_page_attr, (void *)action); }
linux-master
arch/powerpc/mm/pageattr.c
/* * PPC Huge TLB Page Support for Kernel. * * Copyright (C) 2003 David Gibson, IBM Corporation. * Copyright (C) 2011 Becky Bruce, Freescale Semiconductor * * Based on the IA-32 version: * Copyright (C) 2002, Rohit Seth <[email protected]> */ #include <linux/mm.h> #include <linux/io.h> #include <linux/slab.h> #include <linux/hugetlb.h> #include <linux/export.h> #include <linux/of_fdt.h> #include <linux/memblock.h> #include <linux/moduleparam.h> #include <linux/swap.h> #include <linux/swapops.h> #include <linux/kmemleak.h> #include <asm/pgalloc.h> #include <asm/tlb.h> #include <asm/setup.h> #include <asm/hugetlb.h> #include <asm/pte-walk.h> #include <asm/firmware.h> bool hugetlb_disabled = false; #define hugepd_none(hpd) (hpd_val(hpd) == 0) #define PTE_T_ORDER (__builtin_ffs(sizeof(pte_basic_t)) - \ __builtin_ffs(sizeof(void *))) pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr, unsigned long sz) { /* * Only called for hugetlbfs pages, hence can ignore THP and the * irq disabled walk. */ return __find_linux_pte(mm->pgd, addr, NULL, NULL); } static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp, unsigned long address, unsigned int pdshift, unsigned int pshift, spinlock_t *ptl) { struct kmem_cache *cachep; pte_t *new; int i; int num_hugepd; if (pshift >= pdshift) { cachep = PGT_CACHE(PTE_T_ORDER); num_hugepd = 1 << (pshift - pdshift); } else { cachep = PGT_CACHE(pdshift - pshift); num_hugepd = 1; } if (!cachep) { WARN_ONCE(1, "No page table cache created for hugetlb tables"); return -ENOMEM; } new = kmem_cache_alloc(cachep, pgtable_gfp_flags(mm, GFP_KERNEL)); BUG_ON(pshift > HUGEPD_SHIFT_MASK); BUG_ON((unsigned long)new & HUGEPD_SHIFT_MASK); if (!new) return -ENOMEM; /* * Make sure other cpus find the hugepd set only after a * properly initialized page table is visible to them. * For more details look for comment in __pte_alloc(). */ smp_wmb(); spin_lock(ptl); /* * We have multiple higher-level entries that point to the same * actual pte location. Fill in each as we go and backtrack on error. * We need all of these so the DTLB pgtable walk code can find the * right higher-level entry without knowing if it's a hugepage or not. */ for (i = 0; i < num_hugepd; i++, hpdp++) { if (unlikely(!hugepd_none(*hpdp))) break; hugepd_populate(hpdp, new, pshift); } /* If we bailed from the for loop early, an error occurred, clean up */ if (i < num_hugepd) { for (i = i - 1 ; i >= 0; i--, hpdp--) *hpdp = __hugepd(0); kmem_cache_free(cachep, new); } else { kmemleak_ignore(new); } spin_unlock(ptl); return 0; } /* * At this point we do the placement change only for BOOK3S 64. This would * possibly work on other subarchs. */ pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, unsigned long sz) { pgd_t *pg; p4d_t *p4; pud_t *pu; pmd_t *pm; hugepd_t *hpdp = NULL; unsigned pshift = __ffs(sz); unsigned pdshift = PGDIR_SHIFT; spinlock_t *ptl; addr &= ~(sz-1); pg = pgd_offset(mm, addr); p4 = p4d_offset(pg, addr); #ifdef CONFIG_PPC_BOOK3S_64 if (pshift == PGDIR_SHIFT) /* 16GB huge page */ return (pte_t *) p4; else if (pshift > PUD_SHIFT) { /* * We need to use hugepd table */ ptl = &mm->page_table_lock; hpdp = (hugepd_t *)p4; } else { pdshift = PUD_SHIFT; pu = pud_alloc(mm, p4, addr); if (!pu) return NULL; if (pshift == PUD_SHIFT) return (pte_t *)pu; else if (pshift > PMD_SHIFT) { ptl = pud_lockptr(mm, pu); hpdp = (hugepd_t *)pu; } else { pdshift = PMD_SHIFT; pm = pmd_alloc(mm, pu, addr); if (!pm) return NULL; if (pshift == PMD_SHIFT) /* 16MB hugepage */ return (pte_t *)pm; else { ptl = pmd_lockptr(mm, pm); hpdp = (hugepd_t *)pm; } } } #else if (pshift >= PGDIR_SHIFT) { ptl = &mm->page_table_lock; hpdp = (hugepd_t *)p4; } else { pdshift = PUD_SHIFT; pu = pud_alloc(mm, p4, addr); if (!pu) return NULL; if (pshift >= PUD_SHIFT) { ptl = pud_lockptr(mm, pu); hpdp = (hugepd_t *)pu; } else { pdshift = PMD_SHIFT; pm = pmd_alloc(mm, pu, addr); if (!pm) return NULL; ptl = pmd_lockptr(mm, pm); hpdp = (hugepd_t *)pm; } } #endif if (!hpdp) return NULL; if (IS_ENABLED(CONFIG_PPC_8xx) && pshift < PMD_SHIFT) return pte_alloc_huge(mm, (pmd_t *)hpdp, addr); BUG_ON(!hugepd_none(*hpdp) && !hugepd_ok(*hpdp)); if (hugepd_none(*hpdp) && __hugepte_alloc(mm, hpdp, addr, pdshift, pshift, ptl)) return NULL; return hugepte_offset(*hpdp, addr, pdshift); } #ifdef CONFIG_PPC_BOOK3S_64 /* * Tracks gpages after the device tree is scanned and before the * huge_boot_pages list is ready on pseries. */ #define MAX_NUMBER_GPAGES 1024 __initdata static u64 gpage_freearray[MAX_NUMBER_GPAGES]; __initdata static unsigned nr_gpages; /* * Build list of addresses of gigantic pages. This function is used in early * boot before the buddy allocator is setup. */ void __init pseries_add_gpage(u64 addr, u64 page_size, unsigned long number_of_pages) { if (!addr) return; while (number_of_pages > 0) { gpage_freearray[nr_gpages] = addr; nr_gpages++; number_of_pages--; addr += page_size; } } static int __init pseries_alloc_bootmem_huge_page(struct hstate *hstate) { struct huge_bootmem_page *m; if (nr_gpages == 0) return 0; m = phys_to_virt(gpage_freearray[--nr_gpages]); gpage_freearray[nr_gpages] = 0; list_add(&m->list, &huge_boot_pages); m->hstate = hstate; return 1; } bool __init hugetlb_node_alloc_supported(void) { return false; } #endif int __init alloc_bootmem_huge_page(struct hstate *h, int nid) { #ifdef CONFIG_PPC_BOOK3S_64 if (firmware_has_feature(FW_FEATURE_LPAR) && !radix_enabled()) return pseries_alloc_bootmem_huge_page(h); #endif return __alloc_bootmem_huge_page(h, nid); } #ifndef CONFIG_PPC_BOOK3S_64 #define HUGEPD_FREELIST_SIZE \ ((PAGE_SIZE - sizeof(struct hugepd_freelist)) / sizeof(pte_t)) struct hugepd_freelist { struct rcu_head rcu; unsigned int index; void *ptes[]; }; static DEFINE_PER_CPU(struct hugepd_freelist *, hugepd_freelist_cur); static void hugepd_free_rcu_callback(struct rcu_head *head) { struct hugepd_freelist *batch = container_of(head, struct hugepd_freelist, rcu); unsigned int i; for (i = 0; i < batch->index; i++) kmem_cache_free(PGT_CACHE(PTE_T_ORDER), batch->ptes[i]); free_page((unsigned long)batch); } static void hugepd_free(struct mmu_gather *tlb, void *hugepte) { struct hugepd_freelist **batchp; batchp = &get_cpu_var(hugepd_freelist_cur); if (atomic_read(&tlb->mm->mm_users) < 2 || mm_is_thread_local(tlb->mm)) { kmem_cache_free(PGT_CACHE(PTE_T_ORDER), hugepte); put_cpu_var(hugepd_freelist_cur); return; } if (*batchp == NULL) { *batchp = (struct hugepd_freelist *)__get_free_page(GFP_ATOMIC); (*batchp)->index = 0; } (*batchp)->ptes[(*batchp)->index++] = hugepte; if ((*batchp)->index == HUGEPD_FREELIST_SIZE) { call_rcu(&(*batchp)->rcu, hugepd_free_rcu_callback); *batchp = NULL; } put_cpu_var(hugepd_freelist_cur); } #else static inline void hugepd_free(struct mmu_gather *tlb, void *hugepte) {} #endif /* Return true when the entry to be freed maps more than the area being freed */ static bool range_is_outside_limits(unsigned long start, unsigned long end, unsigned long floor, unsigned long ceiling, unsigned long mask) { if ((start & mask) < floor) return true; if (ceiling) { ceiling &= mask; if (!ceiling) return true; } return end - 1 > ceiling - 1; } static void free_hugepd_range(struct mmu_gather *tlb, hugepd_t *hpdp, int pdshift, unsigned long start, unsigned long end, unsigned long floor, unsigned long ceiling) { pte_t *hugepte = hugepd_page(*hpdp); int i; unsigned long pdmask = ~((1UL << pdshift) - 1); unsigned int num_hugepd = 1; unsigned int shift = hugepd_shift(*hpdp); /* Note: On fsl the hpdp may be the first of several */ if (shift > pdshift) num_hugepd = 1 << (shift - pdshift); if (range_is_outside_limits(start, end, floor, ceiling, pdmask)) return; for (i = 0; i < num_hugepd; i++, hpdp++) *hpdp = __hugepd(0); if (shift >= pdshift) hugepd_free(tlb, hugepte); else pgtable_free_tlb(tlb, hugepte, get_hugepd_cache_index(pdshift - shift)); } static void hugetlb_free_pte_range(struct mmu_gather *tlb, pmd_t *pmd, unsigned long addr, unsigned long end, unsigned long floor, unsigned long ceiling) { pgtable_t token = pmd_pgtable(*pmd); if (range_is_outside_limits(addr, end, floor, ceiling, PMD_MASK)) return; pmd_clear(pmd); pte_free_tlb(tlb, token, addr); mm_dec_nr_ptes(tlb->mm); } static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud, unsigned long addr, unsigned long end, unsigned long floor, unsigned long ceiling) { pmd_t *pmd; unsigned long next; unsigned long start; start = addr; do { unsigned long more; pmd = pmd_offset(pud, addr); next = pmd_addr_end(addr, end); if (!is_hugepd(__hugepd(pmd_val(*pmd)))) { if (pmd_none_or_clear_bad(pmd)) continue; /* * if it is not hugepd pointer, we should already find * it cleared. */ WARN_ON(!IS_ENABLED(CONFIG_PPC_8xx)); hugetlb_free_pte_range(tlb, pmd, addr, end, floor, ceiling); continue; } /* * Increment next by the size of the huge mapping since * there may be more than one entry at this level for a * single hugepage, but all of them point to * the same kmem cache that holds the hugepte. */ more = addr + (1UL << hugepd_shift(*(hugepd_t *)pmd)); if (more > next) next = more; free_hugepd_range(tlb, (hugepd_t *)pmd, PMD_SHIFT, addr, next, floor, ceiling); } while (addr = next, addr != end); if (range_is_outside_limits(start, end, floor, ceiling, PUD_MASK)) return; pmd = pmd_offset(pud, start & PUD_MASK); pud_clear(pud); pmd_free_tlb(tlb, pmd, start & PUD_MASK); mm_dec_nr_pmds(tlb->mm); } static void hugetlb_free_pud_range(struct mmu_gather *tlb, p4d_t *p4d, unsigned long addr, unsigned long end, unsigned long floor, unsigned long ceiling) { pud_t *pud; unsigned long next; unsigned long start; start = addr; do { pud = pud_offset(p4d, addr); next = pud_addr_end(addr, end); if (!is_hugepd(__hugepd(pud_val(*pud)))) { if (pud_none_or_clear_bad(pud)) continue; hugetlb_free_pmd_range(tlb, pud, addr, next, floor, ceiling); } else { unsigned long more; /* * Increment next by the size of the huge mapping since * there may be more than one entry at this level for a * single hugepage, but all of them point to * the same kmem cache that holds the hugepte. */ more = addr + (1UL << hugepd_shift(*(hugepd_t *)pud)); if (more > next) next = more; free_hugepd_range(tlb, (hugepd_t *)pud, PUD_SHIFT, addr, next, floor, ceiling); } } while (addr = next, addr != end); if (range_is_outside_limits(start, end, floor, ceiling, PGDIR_MASK)) return; pud = pud_offset(p4d, start & PGDIR_MASK); p4d_clear(p4d); pud_free_tlb(tlb, pud, start & PGDIR_MASK); mm_dec_nr_puds(tlb->mm); } /* * This function frees user-level page tables of a process. */ void hugetlb_free_pgd_range(struct mmu_gather *tlb, unsigned long addr, unsigned long end, unsigned long floor, unsigned long ceiling) { pgd_t *pgd; p4d_t *p4d; unsigned long next; /* * Because there are a number of different possible pagetable * layouts for hugepage ranges, we limit knowledge of how * things should be laid out to the allocation path * (huge_pte_alloc(), above). Everything else works out the * structure as it goes from information in the hugepd * pointers. That means that we can't here use the * optimization used in the normal page free_pgd_range(), of * checking whether we're actually covering a large enough * range to have to do anything at the top level of the walk * instead of at the bottom. * * To make sense of this, you should probably go read the big * block comment at the top of the normal free_pgd_range(), * too. */ do { next = pgd_addr_end(addr, end); pgd = pgd_offset(tlb->mm, addr); p4d = p4d_offset(pgd, addr); if (!is_hugepd(__hugepd(pgd_val(*pgd)))) { if (p4d_none_or_clear_bad(p4d)) continue; hugetlb_free_pud_range(tlb, p4d, addr, next, floor, ceiling); } else { unsigned long more; /* * Increment next by the size of the huge mapping since * there may be more than one entry at the pgd level * for a single hugepage, but all of them point to the * same kmem cache that holds the hugepte. */ more = addr + (1UL << hugepd_shift(*(hugepd_t *)pgd)); if (more > next) next = more; free_hugepd_range(tlb, (hugepd_t *)p4d, PGDIR_SHIFT, addr, next, floor, ceiling); } } while (addr = next, addr != end); } bool __init arch_hugetlb_valid_size(unsigned long size) { int shift = __ffs(size); int mmu_psize; /* Check that it is a page size supported by the hardware and * that it fits within pagetable and slice limits. */ if (size <= PAGE_SIZE || !is_power_of_2(size)) return false; mmu_psize = check_and_get_huge_psize(shift); if (mmu_psize < 0) return false; BUG_ON(mmu_psize_defs[mmu_psize].shift != shift); return true; } static int __init add_huge_page_size(unsigned long long size) { int shift = __ffs(size); if (!arch_hugetlb_valid_size((unsigned long)size)) return -EINVAL; hugetlb_add_hstate(shift - PAGE_SHIFT); return 0; } static int __init hugetlbpage_init(void) { bool configured = false; int psize; if (hugetlb_disabled) { pr_info("HugeTLB support is disabled!\n"); return 0; } if (IS_ENABLED(CONFIG_PPC_BOOK3S_64) && !radix_enabled() && !mmu_has_feature(MMU_FTR_16M_PAGE)) return -ENODEV; for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) { unsigned shift; unsigned pdshift; if (!mmu_psize_defs[psize].shift) continue; shift = mmu_psize_to_shift(psize); #ifdef CONFIG_PPC_BOOK3S_64 if (shift > PGDIR_SHIFT) continue; else if (shift > PUD_SHIFT) pdshift = PGDIR_SHIFT; else if (shift > PMD_SHIFT) pdshift = PUD_SHIFT; else pdshift = PMD_SHIFT; #else if (shift < PUD_SHIFT) pdshift = PMD_SHIFT; else if (shift < PGDIR_SHIFT) pdshift = PUD_SHIFT; else pdshift = PGDIR_SHIFT; #endif if (add_huge_page_size(1ULL << shift) < 0) continue; /* * if we have pdshift and shift value same, we don't * use pgt cache for hugepd. */ if (pdshift > shift) { if (!IS_ENABLED(CONFIG_PPC_8xx)) pgtable_cache_add(pdshift - shift); } else if (IS_ENABLED(CONFIG_PPC_E500) || IS_ENABLED(CONFIG_PPC_8xx)) { pgtable_cache_add(PTE_T_ORDER); } configured = true; } if (!configured) pr_info("Failed to initialize. Disabling HugeTLB"); return 0; } arch_initcall(hugetlbpage_init); void __init gigantic_hugetlb_cma_reserve(void) { unsigned long order = 0; if (radix_enabled()) order = PUD_SHIFT - PAGE_SHIFT; else if (!firmware_has_feature(FW_FEATURE_LPAR) && mmu_psize_defs[MMU_PAGE_16G].shift) /* * For pseries we do use ibm,expected#pages for reserving 16G pages. */ order = mmu_psize_to_shift(MMU_PAGE_16G) - PAGE_SHIFT; if (order) { VM_WARN_ON(order <= MAX_ORDER); hugetlb_cma_reserve(order); } }
linux-master
arch/powerpc/mm/hugetlbpage.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Common implementation of switch_mm_irqs_off * * Copyright IBM Corp. 2017 */ #include <linux/mm.h> #include <linux/cpu.h> #include <linux/sched/mm.h> #include <asm/mmu_context.h> #include <asm/pgalloc.h> #if defined(CONFIG_PPC32) static inline void switch_mm_pgdir(struct task_struct *tsk, struct mm_struct *mm) { /* 32-bit keeps track of the current PGDIR in the thread struct */ tsk->thread.pgdir = mm->pgd; #ifdef CONFIG_PPC_BOOK3S_32 tsk->thread.sr0 = mm->context.sr0; #endif #if defined(CONFIG_BOOKE_OR_40x) && defined(CONFIG_PPC_KUAP) tsk->thread.pid = mm->context.id; #endif } #elif defined(CONFIG_PPC_BOOK3E_64) static inline void switch_mm_pgdir(struct task_struct *tsk, struct mm_struct *mm) { /* 64-bit Book3E keeps track of current PGD in the PACA */ get_paca()->pgd = mm->pgd; #ifdef CONFIG_PPC_KUAP tsk->thread.pid = mm->context.id; #endif } #else static inline void switch_mm_pgdir(struct task_struct *tsk, struct mm_struct *mm) { } #endif void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk) { int cpu = smp_processor_id(); bool new_on_cpu = false; /* Mark this context has been used on the new CPU */ if (!cpumask_test_cpu(cpu, mm_cpumask(next))) { VM_WARN_ON_ONCE(next == &init_mm); cpumask_set_cpu(cpu, mm_cpumask(next)); inc_mm_active_cpus(next); /* * This full barrier orders the store to the cpumask above vs * a subsequent load which allows this CPU/MMU to begin loading * translations for 'next' from page table PTEs into the TLB. * * When using the radix MMU, that operation is the load of the * MMU context id, which is then moved to SPRN_PID. * * For the hash MMU it is either the first load from slb_cache * in switch_slb() to preload the SLBs, or the load of * get_user_context which loads the context for the VSID hash * to insert a new SLB, in the SLB fault handler. * * On the other side, the barrier is in mm/tlb-radix.c for * radix which orders earlier stores to clear the PTEs before * the load of mm_cpumask to check which CPU TLBs should be * flushed. For hash, pte_xchg to clear the PTE includes the * barrier. * * This full barrier is also needed by membarrier when * switching between processes after store to rq->curr, before * user-space memory accesses. */ smp_mb(); new_on_cpu = true; } /* Some subarchs need to track the PGD elsewhere */ switch_mm_pgdir(tsk, next); /* Nothing else to do if we aren't actually switching */ if (prev == next) return; /* * We must stop all altivec streams before changing the HW * context */ if (cpu_has_feature(CPU_FTR_ALTIVEC)) asm volatile (PPC_DSSALL); if (!new_on_cpu) membarrier_arch_switch_mm(prev, next, tsk); /* * The actual HW switching method differs between the various * sub architectures. Out of line for now */ switch_mmu_context(prev, next, tsk); VM_WARN_ON_ONCE(!cpumask_test_cpu(cpu, mm_cpumask(prev))); } #ifndef CONFIG_PPC_BOOK3S_64 void arch_exit_mmap(struct mm_struct *mm) { void *frag = pte_frag_get(&mm->context); if (frag) pte_frag_destroy(frag); } #endif
linux-master
arch/powerpc/mm/mmu_context.c
// SPDX-License-Identifier: GPL-2.0-only #include <linux/uaccess.h> #include <linux/kernel.h> #include <asm/disassemble.h> #include <asm/inst.h> #include <asm/ppc-opcode.h> bool copy_from_kernel_nofault_allowed(const void *unsafe_src, size_t size) { return is_kernel_addr((unsigned long)unsafe_src); }
linux-master
arch/powerpc/mm/maccess.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Dynamic reconfiguration memory support * * Copyright 2017 IBM Corporation */ #define pr_fmt(fmt) "drmem: " fmt #include <linux/kernel.h> #include <linux/of.h> #include <linux/of_fdt.h> #include <linux/memblock.h> #include <linux/slab.h> #include <asm/drmem.h> static int n_root_addr_cells, n_root_size_cells; static struct drmem_lmb_info __drmem_info; struct drmem_lmb_info *drmem_info = &__drmem_info; static bool in_drmem_update; u64 drmem_lmb_memory_max(void) { struct drmem_lmb *last_lmb; last_lmb = &drmem_info->lmbs[drmem_info->n_lmbs - 1]; return last_lmb->base_addr + drmem_lmb_size(); } static u32 drmem_lmb_flags(struct drmem_lmb *lmb) { /* * Return the value of the lmb flags field minus the reserved * bit used internally for hotplug processing. */ return lmb->flags & ~DRMEM_LMB_RESERVED; } static struct property *clone_property(struct property *prop, u32 prop_sz) { struct property *new_prop; new_prop = kzalloc(sizeof(*new_prop), GFP_KERNEL); if (!new_prop) return NULL; new_prop->name = kstrdup(prop->name, GFP_KERNEL); new_prop->value = kzalloc(prop_sz, GFP_KERNEL); if (!new_prop->name || !new_prop->value) { kfree(new_prop->name); kfree(new_prop->value); kfree(new_prop); return NULL; } new_prop->length = prop_sz; #if defined(CONFIG_OF_DYNAMIC) of_property_set_flag(new_prop, OF_DYNAMIC); #endif return new_prop; } static int drmem_update_dt_v1(struct device_node *memory, struct property *prop) { struct property *new_prop; struct of_drconf_cell_v1 *dr_cell; struct drmem_lmb *lmb; u32 *p; new_prop = clone_property(prop, prop->length); if (!new_prop) return -1; p = new_prop->value; *p++ = cpu_to_be32(drmem_info->n_lmbs); dr_cell = (struct of_drconf_cell_v1 *)p; for_each_drmem_lmb(lmb) { dr_cell->base_addr = cpu_to_be64(lmb->base_addr); dr_cell->drc_index = cpu_to_be32(lmb->drc_index); dr_cell->aa_index = cpu_to_be32(lmb->aa_index); dr_cell->flags = cpu_to_be32(drmem_lmb_flags(lmb)); dr_cell++; } of_update_property(memory, new_prop); return 0; } static void init_drconf_v2_cell(struct of_drconf_cell_v2 *dr_cell, struct drmem_lmb *lmb) { dr_cell->base_addr = cpu_to_be64(lmb->base_addr); dr_cell->drc_index = cpu_to_be32(lmb->drc_index); dr_cell->aa_index = cpu_to_be32(lmb->aa_index); dr_cell->flags = cpu_to_be32(drmem_lmb_flags(lmb)); } static int drmem_update_dt_v2(struct device_node *memory, struct property *prop) { struct property *new_prop; struct of_drconf_cell_v2 *dr_cell; struct drmem_lmb *lmb, *prev_lmb; u32 lmb_sets, prop_sz, seq_lmbs; u32 *p; /* First pass, determine how many LMB sets are needed. */ lmb_sets = 0; prev_lmb = NULL; for_each_drmem_lmb(lmb) { if (!prev_lmb) { prev_lmb = lmb; lmb_sets++; continue; } if (prev_lmb->aa_index != lmb->aa_index || drmem_lmb_flags(prev_lmb) != drmem_lmb_flags(lmb)) lmb_sets++; prev_lmb = lmb; } prop_sz = lmb_sets * sizeof(*dr_cell) + sizeof(__be32); new_prop = clone_property(prop, prop_sz); if (!new_prop) return -1; p = new_prop->value; *p++ = cpu_to_be32(lmb_sets); dr_cell = (struct of_drconf_cell_v2 *)p; /* Second pass, populate the LMB set data */ prev_lmb = NULL; seq_lmbs = 0; for_each_drmem_lmb(lmb) { if (prev_lmb == NULL) { /* Start of first LMB set */ prev_lmb = lmb; init_drconf_v2_cell(dr_cell, lmb); seq_lmbs++; continue; } if (prev_lmb->aa_index != lmb->aa_index || drmem_lmb_flags(prev_lmb) != drmem_lmb_flags(lmb)) { /* end of one set, start of another */ dr_cell->seq_lmbs = cpu_to_be32(seq_lmbs); dr_cell++; init_drconf_v2_cell(dr_cell, lmb); seq_lmbs = 1; } else { seq_lmbs++; } prev_lmb = lmb; } /* close out last LMB set */ dr_cell->seq_lmbs = cpu_to_be32(seq_lmbs); of_update_property(memory, new_prop); return 0; } int drmem_update_dt(void) { struct device_node *memory; struct property *prop; int rc = -1; memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory"); if (!memory) return -1; /* * Set in_drmem_update to prevent the notifier callback to process the * DT property back since the change is coming from the LMB tree. */ in_drmem_update = true; prop = of_find_property(memory, "ibm,dynamic-memory", NULL); if (prop) { rc = drmem_update_dt_v1(memory, prop); } else { prop = of_find_property(memory, "ibm,dynamic-memory-v2", NULL); if (prop) rc = drmem_update_dt_v2(memory, prop); } in_drmem_update = false; of_node_put(memory); return rc; } static void read_drconf_v1_cell(struct drmem_lmb *lmb, const __be32 **prop) { const __be32 *p = *prop; lmb->base_addr = of_read_number(p, n_root_addr_cells); p += n_root_addr_cells; lmb->drc_index = of_read_number(p++, 1); p++; /* skip reserved field */ lmb->aa_index = of_read_number(p++, 1); lmb->flags = of_read_number(p++, 1); *prop = p; } static int __walk_drmem_v1_lmbs(const __be32 *prop, const __be32 *usm, void *data, int (*func)(struct drmem_lmb *, const __be32 **, void *)) { struct drmem_lmb lmb; u32 i, n_lmbs; int ret = 0; n_lmbs = of_read_number(prop++, 1); for (i = 0; i < n_lmbs; i++) { read_drconf_v1_cell(&lmb, &prop); ret = func(&lmb, &usm, data); if (ret) break; } return ret; } static void read_drconf_v2_cell(struct of_drconf_cell_v2 *dr_cell, const __be32 **prop) { const __be32 *p = *prop; dr_cell->seq_lmbs = of_read_number(p++, 1); dr_cell->base_addr = of_read_number(p, n_root_addr_cells); p += n_root_addr_cells; dr_cell->drc_index = of_read_number(p++, 1); dr_cell->aa_index = of_read_number(p++, 1); dr_cell->flags = of_read_number(p++, 1); *prop = p; } static int __walk_drmem_v2_lmbs(const __be32 *prop, const __be32 *usm, void *data, int (*func)(struct drmem_lmb *, const __be32 **, void *)) { struct of_drconf_cell_v2 dr_cell; struct drmem_lmb lmb; u32 i, j, lmb_sets; int ret = 0; lmb_sets = of_read_number(prop++, 1); for (i = 0; i < lmb_sets; i++) { read_drconf_v2_cell(&dr_cell, &prop); for (j = 0; j < dr_cell.seq_lmbs; j++) { lmb.base_addr = dr_cell.base_addr; dr_cell.base_addr += drmem_lmb_size(); lmb.drc_index = dr_cell.drc_index; dr_cell.drc_index++; lmb.aa_index = dr_cell.aa_index; lmb.flags = dr_cell.flags; ret = func(&lmb, &usm, data); if (ret) break; } } return ret; } #ifdef CONFIG_PPC_PSERIES int __init walk_drmem_lmbs_early(unsigned long node, void *data, int (*func)(struct drmem_lmb *, const __be32 **, void *)) { const __be32 *prop, *usm; int len, ret = -ENODEV; prop = of_get_flat_dt_prop(node, "ibm,lmb-size", &len); if (!prop || len < dt_root_size_cells * sizeof(__be32)) return ret; /* Get the address & size cells */ n_root_addr_cells = dt_root_addr_cells; n_root_size_cells = dt_root_size_cells; drmem_info->lmb_size = dt_mem_next_cell(dt_root_size_cells, &prop); usm = of_get_flat_dt_prop(node, "linux,drconf-usable-memory", &len); prop = of_get_flat_dt_prop(node, "ibm,dynamic-memory", &len); if (prop) { ret = __walk_drmem_v1_lmbs(prop, usm, data, func); } else { prop = of_get_flat_dt_prop(node, "ibm,dynamic-memory-v2", &len); if (prop) ret = __walk_drmem_v2_lmbs(prop, usm, data, func); } memblock_dump_all(); return ret; } /* * Update the LMB associativity index. */ static int update_lmb(struct drmem_lmb *updated_lmb, __maybe_unused const __be32 **usm, __maybe_unused void *data) { struct drmem_lmb *lmb; for_each_drmem_lmb(lmb) { if (lmb->drc_index != updated_lmb->drc_index) continue; lmb->aa_index = updated_lmb->aa_index; break; } return 0; } /* * Update the LMB associativity index. * * This needs to be called when the hypervisor is updating the * dynamic-reconfiguration-memory node property. */ void drmem_update_lmbs(struct property *prop) { /* * Don't update the LMBs if triggered by the update done in * drmem_update_dt(), the LMB values have been used to the update the DT * property in that case. */ if (in_drmem_update) return; if (!strcmp(prop->name, "ibm,dynamic-memory")) __walk_drmem_v1_lmbs(prop->value, NULL, NULL, update_lmb); else if (!strcmp(prop->name, "ibm,dynamic-memory-v2")) __walk_drmem_v2_lmbs(prop->value, NULL, NULL, update_lmb); } #endif static int init_drmem_lmb_size(struct device_node *dn) { const __be32 *prop; int len; if (drmem_info->lmb_size) return 0; prop = of_get_property(dn, "ibm,lmb-size", &len); if (!prop || len < n_root_size_cells * sizeof(__be32)) { pr_info("Could not determine LMB size\n"); return -1; } drmem_info->lmb_size = of_read_number(prop, n_root_size_cells); return 0; } /* * Returns the property linux,drconf-usable-memory if * it exists (the property exists only in kexec/kdump kernels, * added by kexec-tools) */ static const __be32 *of_get_usable_memory(struct device_node *dn) { const __be32 *prop; u32 len; prop = of_get_property(dn, "linux,drconf-usable-memory", &len); if (!prop || len < sizeof(unsigned int)) return NULL; return prop; } int walk_drmem_lmbs(struct device_node *dn, void *data, int (*func)(struct drmem_lmb *, const __be32 **, void *)) { const __be32 *prop, *usm; int ret = -ENODEV; if (!of_root) return ret; /* Get the address & size cells */ of_node_get(of_root); n_root_addr_cells = of_n_addr_cells(of_root); n_root_size_cells = of_n_size_cells(of_root); of_node_put(of_root); if (init_drmem_lmb_size(dn)) return ret; usm = of_get_usable_memory(dn); prop = of_get_property(dn, "ibm,dynamic-memory", NULL); if (prop) { ret = __walk_drmem_v1_lmbs(prop, usm, data, func); } else { prop = of_get_property(dn, "ibm,dynamic-memory-v2", NULL); if (prop) ret = __walk_drmem_v2_lmbs(prop, usm, data, func); } return ret; } static void __init init_drmem_v1_lmbs(const __be32 *prop) { struct drmem_lmb *lmb; drmem_info->n_lmbs = of_read_number(prop++, 1); if (drmem_info->n_lmbs == 0) return; drmem_info->lmbs = kcalloc(drmem_info->n_lmbs, sizeof(*lmb), GFP_KERNEL); if (!drmem_info->lmbs) return; for_each_drmem_lmb(lmb) read_drconf_v1_cell(lmb, &prop); } static void __init init_drmem_v2_lmbs(const __be32 *prop) { struct drmem_lmb *lmb; struct of_drconf_cell_v2 dr_cell; const __be32 *p; u32 i, j, lmb_sets; int lmb_index; lmb_sets = of_read_number(prop++, 1); if (lmb_sets == 0) return; /* first pass, calculate the number of LMBs */ p = prop; for (i = 0; i < lmb_sets; i++) { read_drconf_v2_cell(&dr_cell, &p); drmem_info->n_lmbs += dr_cell.seq_lmbs; } drmem_info->lmbs = kcalloc(drmem_info->n_lmbs, sizeof(*lmb), GFP_KERNEL); if (!drmem_info->lmbs) return; /* second pass, read in the LMB information */ lmb_index = 0; p = prop; for (i = 0; i < lmb_sets; i++) { read_drconf_v2_cell(&dr_cell, &p); for (j = 0; j < dr_cell.seq_lmbs; j++) { lmb = &drmem_info->lmbs[lmb_index++]; lmb->base_addr = dr_cell.base_addr; dr_cell.base_addr += drmem_info->lmb_size; lmb->drc_index = dr_cell.drc_index; dr_cell.drc_index++; lmb->aa_index = dr_cell.aa_index; lmb->flags = dr_cell.flags; } } } static int __init drmem_init(void) { struct device_node *dn; const __be32 *prop; dn = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory"); if (!dn) { pr_info("No dynamic reconfiguration memory found\n"); return 0; } if (init_drmem_lmb_size(dn)) { of_node_put(dn); return 0; } prop = of_get_property(dn, "ibm,dynamic-memory", NULL); if (prop) { init_drmem_v1_lmbs(prop); } else { prop = of_get_property(dn, "ibm,dynamic-memory-v2", NULL); if (prop) init_drmem_v2_lmbs(prop); } of_node_put(dn); return 0; } late_initcall(drmem_init);
linux-master
arch/powerpc/mm/drmem.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * This file contains the routines setting up the linux page tables. * -- paulus * * Derived from arch/ppc/mm/init.c: * Copyright (C) 1995-1996 Gary Thomas ([email protected]) * * Modifications by Paul Mackerras (PowerMac) ([email protected]) * and Cort Dougan (PReP) ([email protected]) * Copyright (C) 1996 Paul Mackerras * * Derived from "arch/i386/mm/init.c" * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/types.h> #include <linux/mm.h> #include <linux/vmalloc.h> #include <linux/init.h> #include <linux/highmem.h> #include <linux/memblock.h> #include <linux/slab.h> #include <linux/set_memory.h> #include <asm/pgalloc.h> #include <asm/fixmap.h> #include <asm/setup.h> #include <asm/sections.h> #include <asm/early_ioremap.h> #include <mm/mmu_decl.h> static u8 early_fixmap_pagetable[FIXMAP_PTE_SIZE] __page_aligned_data; notrace void __init early_ioremap_init(void) { unsigned long addr = ALIGN_DOWN(FIXADDR_START, PGDIR_SIZE); pte_t *ptep = (pte_t *)early_fixmap_pagetable; pmd_t *pmdp = pmd_off_k(addr); for (; (s32)(FIXADDR_TOP - addr) > 0; addr += PGDIR_SIZE, ptep += PTRS_PER_PTE, pmdp++) pmd_populate_kernel(&init_mm, pmdp, ptep); early_ioremap_setup(); } static void __init *early_alloc_pgtable(unsigned long size) { void *ptr = memblock_alloc(size, size); if (!ptr) panic("%s: Failed to allocate %lu bytes align=0x%lx\n", __func__, size, size); return ptr; } pte_t __init *early_pte_alloc_kernel(pmd_t *pmdp, unsigned long va) { if (pmd_none(*pmdp)) { pte_t *ptep = early_alloc_pgtable(PTE_FRAG_SIZE); pmd_populate_kernel(&init_mm, pmdp, ptep); } return pte_offset_kernel(pmdp, va); } int __ref map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot) { pmd_t *pd; pte_t *pg; int err = -ENOMEM; /* Use upper 10 bits of VA to index the first level map */ pd = pmd_off_k(va); /* Use middle 10 bits of VA to index the second-level map */ if (likely(slab_is_available())) pg = pte_alloc_kernel(pd, va); else pg = early_pte_alloc_kernel(pd, va); if (pg) { err = 0; /* The PTE should never be already set nor present in the * hash table */ BUG_ON((pte_present(*pg) | pte_hashpte(*pg)) && pgprot_val(prot)); set_pte_at(&init_mm, va, pg, pfn_pte(pa >> PAGE_SHIFT, prot)); } smp_wmb(); return err; } /* * Map in a chunk of physical memory starting at start. */ static void __init __mapin_ram_chunk(unsigned long offset, unsigned long top) { unsigned long v, s; phys_addr_t p; bool ktext; s = offset; v = PAGE_OFFSET + s; p = memstart_addr + s; for (; s < top; s += PAGE_SIZE) { ktext = core_kernel_text(v); map_kernel_page(v, p, ktext ? PAGE_KERNEL_TEXT : PAGE_KERNEL); v += PAGE_SIZE; p += PAGE_SIZE; } } void __init mapin_ram(void) { phys_addr_t base, end; u64 i; for_each_mem_range(i, &base, &end) { phys_addr_t top = min(end, total_lowmem); if (base >= top) continue; base = mmu_mapin_ram(base, top); __mapin_ram_chunk(base, top); } } void mark_initmem_nx(void) { unsigned long numpages = PFN_UP((unsigned long)_einittext) - PFN_DOWN((unsigned long)_sinittext); mmu_mark_initmem_nx(); if (!v_block_mapped((unsigned long)_sinittext)) { set_memory_nx((unsigned long)_sinittext, numpages); set_memory_rw((unsigned long)_sinittext, numpages); } } #ifdef CONFIG_STRICT_KERNEL_RWX void mark_rodata_ro(void) { unsigned long numpages; if (IS_ENABLED(CONFIG_STRICT_MODULE_RWX) && mmu_has_feature(MMU_FTR_HPTE_TABLE)) pr_warn("This platform has HASH MMU, STRICT_MODULE_RWX won't work\n"); if (v_block_mapped((unsigned long)_stext + 1)) { mmu_mark_rodata_ro(); ptdump_check_wx(); return; } /* * mark text and rodata as read only. __end_rodata is set by * powerpc's linker script and includes tables and data * requiring relocation which are not put in RO_DATA. */ numpages = PFN_UP((unsigned long)__end_rodata) - PFN_DOWN((unsigned long)_stext); set_memory_ro((unsigned long)_stext, numpages); // mark_initmem_nx() should have already run by now ptdump_check_wx(); } #endif #if defined(CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC) && defined(CONFIG_DEBUG_PAGEALLOC) void __kernel_map_pages(struct page *page, int numpages, int enable) { unsigned long addr = (unsigned long)page_address(page); if (PageHighMem(page)) return; if (enable) set_memory_p(addr, numpages); else set_memory_np(addr, numpages); } #endif /* CONFIG_DEBUG_PAGEALLOC */
linux-master
arch/powerpc/mm/pgtable_32.c
// SPDX-License-Identifier: GPL-2.0-only /* * PowerPC version derived from arch/arm/mm/consistent.c * Copyright (C) 2001 Dan Malek ([email protected]) * * Copyright (C) 2000 Russell King */ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/types.h> #include <linux/highmem.h> #include <linux/dma-direct.h> #include <linux/dma-map-ops.h> #include <asm/tlbflush.h> #include <asm/dma.h> /* * make an area consistent. */ static void __dma_sync(void *vaddr, size_t size, int direction) { unsigned long start = (unsigned long)vaddr; unsigned long end = start + size; switch (direction) { case DMA_NONE: BUG(); case DMA_FROM_DEVICE: /* * invalidate only when cache-line aligned otherwise there is * the potential for discarding uncommitted data from the cache */ if ((start | end) & (L1_CACHE_BYTES - 1)) flush_dcache_range(start, end); else invalidate_dcache_range(start, end); break; case DMA_TO_DEVICE: /* writeback only */ clean_dcache_range(start, end); break; case DMA_BIDIRECTIONAL: /* writeback and invalidate */ flush_dcache_range(start, end); break; } } #ifdef CONFIG_HIGHMEM /* * __dma_sync_page() implementation for systems using highmem. * In this case, each page of a buffer must be kmapped/kunmapped * in order to have a virtual address for __dma_sync(). This must * not sleep so kmap_atomic()/kunmap_atomic() are used. * * Note: yes, it is possible and correct to have a buffer extend * beyond the first page. */ static inline void __dma_sync_page_highmem(struct page *page, unsigned long offset, size_t size, int direction) { size_t seg_size = min((size_t)(PAGE_SIZE - offset), size); size_t cur_size = seg_size; unsigned long flags, start, seg_offset = offset; int nr_segs = 1 + ((size - seg_size) + PAGE_SIZE - 1)/PAGE_SIZE; int seg_nr = 0; local_irq_save(flags); do { start = (unsigned long)kmap_atomic(page + seg_nr) + seg_offset; /* Sync this buffer segment */ __dma_sync((void *)start, seg_size, direction); kunmap_atomic((void *)start); seg_nr++; /* Calculate next buffer segment size */ seg_size = min((size_t)PAGE_SIZE, size - cur_size); /* Add the segment size to our running total */ cur_size += seg_size; seg_offset = 0; } while (seg_nr < nr_segs); local_irq_restore(flags); } #endif /* CONFIG_HIGHMEM */ /* * __dma_sync_page makes memory consistent. identical to __dma_sync, but * takes a struct page instead of a virtual address */ static void __dma_sync_page(phys_addr_t paddr, size_t size, int dir) { struct page *page = pfn_to_page(paddr >> PAGE_SHIFT); unsigned offset = paddr & ~PAGE_MASK; #ifdef CONFIG_HIGHMEM __dma_sync_page_highmem(page, offset, size, dir); #else unsigned long start = (unsigned long)page_address(page) + offset; __dma_sync((void *)start, size, dir); #endif } void arch_sync_dma_for_device(phys_addr_t paddr, size_t size, enum dma_data_direction dir) { __dma_sync_page(paddr, size, dir); } void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size, enum dma_data_direction dir) { __dma_sync_page(paddr, size, dir); } void arch_dma_prep_coherent(struct page *page, size_t size) { unsigned long kaddr = (unsigned long)page_address(page); flush_dcache_range(kaddr, kaddr + size); }
linux-master
arch/powerpc/mm/dma-noncoherent.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * PowerPC version * Copyright (C) 1995-1996 Gary Thomas ([email protected]) * * Modifications by Paul Mackerras (PowerMac) ([email protected]) * and Cort Dougan (PReP) ([email protected]) * Copyright (C) 1996 Paul Mackerras * PPC44x/36-bit changes by Matt Porter ([email protected]) * * Derived from "arch/i386/mm/init.c" * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds */ #include <linux/memblock.h> #include <linux/highmem.h> #include <linux/suspend.h> #include <linux/dma-direct.h> #include <asm/swiotlb.h> #include <asm/machdep.h> #include <asm/rtas.h> #include <asm/kasan.h> #include <asm/svm.h> #include <asm/mmzone.h> #include <asm/ftrace.h> #include <asm/code-patching.h> #include <asm/setup.h> #include <mm/mmu_decl.h> unsigned long long memory_limit; unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)] __page_aligned_bss; EXPORT_SYMBOL(empty_zero_page); pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, unsigned long size, pgprot_t vma_prot) { if (ppc_md.phys_mem_access_prot) return ppc_md.phys_mem_access_prot(file, pfn, size, vma_prot); if (!page_is_ram(pfn)) vma_prot = pgprot_noncached(vma_prot); return vma_prot; } EXPORT_SYMBOL(phys_mem_access_prot); #ifdef CONFIG_MEMORY_HOTPLUG static DEFINE_MUTEX(linear_mapping_mutex); #ifdef CONFIG_NUMA int memory_add_physaddr_to_nid(u64 start) { return hot_add_scn_to_nid(start); } EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid); #endif int __weak create_section_mapping(unsigned long start, unsigned long end, int nid, pgprot_t prot) { return -ENODEV; } int __weak remove_section_mapping(unsigned long start, unsigned long end) { return -ENODEV; } int __ref arch_create_linear_mapping(int nid, u64 start, u64 size, struct mhp_params *params) { int rc; start = (unsigned long)__va(start); mutex_lock(&linear_mapping_mutex); rc = create_section_mapping(start, start + size, nid, params->pgprot); mutex_unlock(&linear_mapping_mutex); if (rc) { pr_warn("Unable to create linear mapping for 0x%llx..0x%llx: %d\n", start, start + size, rc); return -EFAULT; } return 0; } void __ref arch_remove_linear_mapping(u64 start, u64 size) { int ret; /* Remove htab bolted mappings for this section of memory */ start = (unsigned long)__va(start); mutex_lock(&linear_mapping_mutex); ret = remove_section_mapping(start, start + size); mutex_unlock(&linear_mapping_mutex); if (ret) pr_warn("Unable to remove linear mapping for 0x%llx..0x%llx: %d\n", start, start + size, ret); /* Ensure all vmalloc mappings are flushed in case they also * hit that section of memory */ vm_unmap_aliases(); } /* * After memory hotplug the variables max_pfn, max_low_pfn and high_memory need * updating. */ static void update_end_of_memory_vars(u64 start, u64 size) { unsigned long end_pfn = PFN_UP(start + size); if (end_pfn > max_pfn) { max_pfn = end_pfn; max_low_pfn = end_pfn; high_memory = (void *)__va(max_pfn * PAGE_SIZE - 1) + 1; } } int __ref add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages, struct mhp_params *params) { int ret; ret = __add_pages(nid, start_pfn, nr_pages, params); if (ret) return ret; /* update max_pfn, max_low_pfn and high_memory */ update_end_of_memory_vars(start_pfn << PAGE_SHIFT, nr_pages << PAGE_SHIFT); return ret; } int __ref arch_add_memory(int nid, u64 start, u64 size, struct mhp_params *params) { unsigned long start_pfn = start >> PAGE_SHIFT; unsigned long nr_pages = size >> PAGE_SHIFT; int rc; rc = arch_create_linear_mapping(nid, start, size, params); if (rc) return rc; rc = add_pages(nid, start_pfn, nr_pages, params); if (rc) arch_remove_linear_mapping(start, size); return rc; } void __ref arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap) { unsigned long start_pfn = start >> PAGE_SHIFT; unsigned long nr_pages = size >> PAGE_SHIFT; __remove_pages(start_pfn, nr_pages, altmap); arch_remove_linear_mapping(start, size); } #endif #ifndef CONFIG_NUMA void __init mem_topology_setup(void) { max_low_pfn = max_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT; min_low_pfn = MEMORY_START >> PAGE_SHIFT; #ifdef CONFIG_HIGHMEM max_low_pfn = lowmem_end_addr >> PAGE_SHIFT; #endif /* Place all memblock_regions in the same node and merge contiguous * memblock_regions */ memblock_set_node(0, PHYS_ADDR_MAX, &memblock.memory, 0); } void __init initmem_init(void) { sparse_init(); } /* mark pages that don't exist as nosave */ static int __init mark_nonram_nosave(void) { unsigned long spfn, epfn, prev = 0; int i; for_each_mem_pfn_range(i, MAX_NUMNODES, &spfn, &epfn, NULL) { if (prev && prev < spfn) register_nosave_region(prev, spfn); prev = epfn; } return 0; } #else /* CONFIG_NUMA */ static int __init mark_nonram_nosave(void) { return 0; } #endif /* * Zones usage: * * We setup ZONE_DMA to be 31-bits on all platforms and ZONE_NORMAL to be * everything else. GFP_DMA32 page allocations automatically fall back to * ZONE_DMA. * * By using 31-bit unconditionally, we can exploit zone_dma_bits to inform the * generic DMA mapping code. 32-bit only devices (if not handled by an IOMMU * anyway) will take a first dip into ZONE_NORMAL and get otherwise served by * ZONE_DMA. */ static unsigned long max_zone_pfns[MAX_NR_ZONES]; /* * paging_init() sets up the page tables - in fact we've already done this. */ void __init paging_init(void) { unsigned long long total_ram = memblock_phys_mem_size(); phys_addr_t top_of_ram = memblock_end_of_DRAM(); #ifdef CONFIG_HIGHMEM unsigned long v = __fix_to_virt(FIX_KMAP_END); unsigned long end = __fix_to_virt(FIX_KMAP_BEGIN); for (; v < end; v += PAGE_SIZE) map_kernel_page(v, 0, __pgprot(0)); /* XXX gross */ map_kernel_page(PKMAP_BASE, 0, __pgprot(0)); /* XXX gross */ pkmap_page_table = virt_to_kpte(PKMAP_BASE); #endif /* CONFIG_HIGHMEM */ printk(KERN_DEBUG "Top of RAM: 0x%llx, Total RAM: 0x%llx\n", (unsigned long long)top_of_ram, total_ram); printk(KERN_DEBUG "Memory hole size: %ldMB\n", (long int)((top_of_ram - total_ram) >> 20)); /* * Allow 30-bit DMA for very limited Broadcom wifi chips on many * powerbooks. */ if (IS_ENABLED(CONFIG_PPC32)) zone_dma_bits = 30; else zone_dma_bits = 31; #ifdef CONFIG_ZONE_DMA max_zone_pfns[ZONE_DMA] = min(max_low_pfn, 1UL << (zone_dma_bits - PAGE_SHIFT)); #endif max_zone_pfns[ZONE_NORMAL] = max_low_pfn; #ifdef CONFIG_HIGHMEM max_zone_pfns[ZONE_HIGHMEM] = max_pfn; #endif free_area_init(max_zone_pfns); mark_nonram_nosave(); } void __init mem_init(void) { /* * book3s is limited to 16 page sizes due to encoding this in * a 4-bit field for slices. */ BUILD_BUG_ON(MMU_PAGE_COUNT > 16); #ifdef CONFIG_SWIOTLB /* * Some platforms (e.g. 85xx) limit DMA-able memory way below * 4G. We force memblock to bottom-up mode to ensure that the * memory allocated in swiotlb_init() is DMA-able. * As it's the last memblock allocation, no need to reset it * back to to-down. */ memblock_set_bottom_up(true); swiotlb_init(ppc_swiotlb_enable, ppc_swiotlb_flags); #endif high_memory = (void *) __va(max_low_pfn * PAGE_SIZE); set_max_mapnr(max_pfn); kasan_late_init(); memblock_free_all(); #ifdef CONFIG_HIGHMEM { unsigned long pfn, highmem_mapnr; highmem_mapnr = lowmem_end_addr >> PAGE_SHIFT; for (pfn = highmem_mapnr; pfn < max_mapnr; ++pfn) { phys_addr_t paddr = (phys_addr_t)pfn << PAGE_SHIFT; struct page *page = pfn_to_page(pfn); if (memblock_is_memory(paddr) && !memblock_is_reserved(paddr)) free_highmem_page(page); } } #endif /* CONFIG_HIGHMEM */ #if defined(CONFIG_PPC_E500) && !defined(CONFIG_SMP) /* * If smp is enabled, next_tlbcam_idx is initialized in the cpu up * functions.... do it here for the non-smp case. */ per_cpu(next_tlbcam_idx, smp_processor_id()) = (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) - 1; #endif #ifdef CONFIG_PPC32 pr_info("Kernel virtual memory layout:\n"); #ifdef CONFIG_KASAN pr_info(" * 0x%08lx..0x%08lx : kasan shadow mem\n", KASAN_SHADOW_START, KASAN_SHADOW_END); #endif pr_info(" * 0x%08lx..0x%08lx : fixmap\n", FIXADDR_START, FIXADDR_TOP); #ifdef CONFIG_HIGHMEM pr_info(" * 0x%08lx..0x%08lx : highmem PTEs\n", PKMAP_BASE, PKMAP_ADDR(LAST_PKMAP)); #endif /* CONFIG_HIGHMEM */ if (ioremap_bot != IOREMAP_TOP) pr_info(" * 0x%08lx..0x%08lx : early ioremap\n", ioremap_bot, IOREMAP_TOP); pr_info(" * 0x%08lx..0x%08lx : vmalloc & ioremap\n", VMALLOC_START, VMALLOC_END); #ifdef MODULES_VADDR pr_info(" * 0x%08lx..0x%08lx : modules\n", MODULES_VADDR, MODULES_END); #endif #endif /* CONFIG_PPC32 */ } void free_initmem(void) { ppc_md.progress = ppc_printk_progress; mark_initmem_nx(); free_initmem_default(POISON_FREE_INITMEM); ftrace_free_init_tramp(); } /* * System memory should not be in /proc/iomem but various tools expect it * (eg kdump). */ static int __init add_system_ram_resources(void) { phys_addr_t start, end; u64 i; for_each_mem_range(i, &start, &end) { struct resource *res; res = kzalloc(sizeof(struct resource), GFP_KERNEL); WARN_ON(!res); if (res) { res->name = "System RAM"; res->start = start; /* * In memblock, end points to the first byte after * the range while in resourses, end points to the * last byte in the range. */ res->end = end - 1; res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY; WARN_ON(request_resource(&iomem_resource, res) < 0); } } return 0; } subsys_initcall(add_system_ram_resources); #ifdef CONFIG_STRICT_DEVMEM /* * devmem_is_allowed(): check to see if /dev/mem access to a certain address * is valid. The argument is a physical page number. * * Access has to be given to non-kernel-ram areas as well, these contain the * PCI mmio resources as well as potential bios/acpi data regions. */ int devmem_is_allowed(unsigned long pfn) { if (page_is_rtas_user_buf(pfn)) return 1; if (iomem_is_exclusive(PFN_PHYS(pfn))) return 0; if (!page_is_ram(pfn)) return 1; return 0; } #endif /* CONFIG_STRICT_DEVMEM */ /* * This is defined in kernel/resource.c but only powerpc needs to export it, for * the EHEA driver. Drop this when drivers/net/ethernet/ibm/ehea is removed. */ EXPORT_SYMBOL_GPL(walk_system_ram_range);
linux-master
arch/powerpc/mm/mem.c
// SPDX-License-Identifier: GPL-2.0-or-later #include <linux/io.h> #include <linux/slab.h> #include <linux/vmalloc.h> #include <mm/mmu_decl.h> void __iomem *ioremap_wt(phys_addr_t addr, unsigned long size) { pgprot_t prot = pgprot_cached_wthru(PAGE_KERNEL); return __ioremap_caller(addr, size, prot, __builtin_return_address(0)); } EXPORT_SYMBOL(ioremap_wt); void __iomem * __ioremap_caller(phys_addr_t addr, unsigned long size, pgprot_t prot, void *caller) { unsigned long v; phys_addr_t p, offset; int err; /* * If the address lies within the first 16 MB, assume it's in ISA * memory space */ if (addr < SZ_16M) addr += _ISA_MEM_BASE; /* * Choose an address to map it to. * Once the vmalloc system is running, we use it. * Before then, we use space going down from IOREMAP_TOP * (ioremap_bot records where we're up to). */ p = addr & PAGE_MASK; offset = addr & ~PAGE_MASK; size = PAGE_ALIGN(addr + size) - p; #ifndef CONFIG_CRASH_DUMP /* * Don't allow anybody to remap normal RAM that we're using. * mem_init() sets high_memory so only do the check after that. */ if (slab_is_available() && p <= virt_to_phys(high_memory - 1) && page_is_ram(__phys_to_pfn(p))) { pr_warn("%s(): phys addr 0x%llx is RAM lr %ps\n", __func__, (unsigned long long)p, __builtin_return_address(0)); return NULL; } #endif if (size == 0) return NULL; /* * Is it already mapped? Perhaps overlapped by a previous * mapping. */ v = p_block_mapped(p); if (v) return (void __iomem *)v + offset; if (slab_is_available()) return generic_ioremap_prot(addr, size, prot); /* * Should check if it is a candidate for a BAT mapping */ pr_warn("ioremap() called early from %pS. Use early_ioremap() instead\n", caller); err = early_ioremap_range(ioremap_bot - size - PAGE_SIZE, p, size, prot); if (err) return NULL; ioremap_bot -= size + PAGE_SIZE; return (void __iomem *)ioremap_bot + offset; } void iounmap(volatile void __iomem *addr) { /* * If mapped by BATs then there is nothing to do. * Calling vfree() generates a benign warning. */ if (v_block_mapped((unsigned long)addr)) return; generic_iounmap(addr); } EXPORT_SYMBOL(iounmap);
linux-master
arch/powerpc/mm/ioremap_32.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * PowerPC version * Copyright (C) 1995-1996 Gary Thomas ([email protected]) * * Modifications by Paul Mackerras (PowerMac) ([email protected]) * and Cort Dougan (PReP) ([email protected]) * Copyright (C) 1996 Paul Mackerras * * Derived from "arch/i386/mm/init.c" * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds * * Dave Engebretsen <[email protected]> * Rework for PPC64 port. */ #undef DEBUG #include <linux/signal.h> #include <linux/sched.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/types.h> #include <linux/mman.h> #include <linux/mm.h> #include <linux/swap.h> #include <linux/stddef.h> #include <linux/vmalloc.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/highmem.h> #include <linux/idr.h> #include <linux/nodemask.h> #include <linux/module.h> #include <linux/poison.h> #include <linux/memblock.h> #include <linux/hugetlb.h> #include <linux/slab.h> #include <linux/of_fdt.h> #include <linux/libfdt.h> #include <linux/memremap.h> #include <linux/memory.h> #include <asm/pgalloc.h> #include <asm/page.h> #include <asm/prom.h> #include <asm/rtas.h> #include <asm/io.h> #include <asm/mmu_context.h> #include <asm/mmu.h> #include <linux/uaccess.h> #include <asm/smp.h> #include <asm/machdep.h> #include <asm/tlb.h> #include <asm/eeh.h> #include <asm/processor.h> #include <asm/mmzone.h> #include <asm/cputable.h> #include <asm/sections.h> #include <asm/iommu.h> #include <asm/vdso.h> #include <asm/hugetlb.h> #include <mm/mmu_decl.h> #ifdef CONFIG_SPARSEMEM_VMEMMAP /* * Given an address within the vmemmap, determine the page that * represents the start of the subsection it is within. Note that we have to * do this by hand as the proffered address may not be correctly aligned. * Subtraction of non-aligned pointers produces undefined results. */ static struct page * __meminit vmemmap_subsection_start(unsigned long vmemmap_addr) { unsigned long start_pfn; unsigned long offset = vmemmap_addr - ((unsigned long)(vmemmap)); /* Return the pfn of the start of the section. */ start_pfn = (offset / sizeof(struct page)) & PAGE_SUBSECTION_MASK; return pfn_to_page(start_pfn); } /* * Since memory is added in sub-section chunks, before creating a new vmemmap * mapping, the kernel should check whether there is an existing memmap mapping * covering the new subsection added. This is needed because kernel can map * vmemmap area using 16MB pages which will cover a memory range of 16G. Such * a range covers multiple subsections (2M) * * If any subsection in the 16G range mapped by vmemmap is valid we consider the * vmemmap populated (There is a page table entry already present). We can't do * a page table lookup here because with the hash translation we don't keep * vmemmap details in linux page table. */ int __meminit vmemmap_populated(unsigned long vmemmap_addr, int vmemmap_map_size) { struct page *start; unsigned long vmemmap_end = vmemmap_addr + vmemmap_map_size; start = vmemmap_subsection_start(vmemmap_addr); for (; (unsigned long)start < vmemmap_end; start += PAGES_PER_SUBSECTION) /* * pfn valid check here is intended to really check * whether we have any subsection already initialized * in this range. */ if (pfn_valid(page_to_pfn(start))) return 1; return 0; } /* * vmemmap virtual address space management does not have a traditional page * table to track which virtual struct pages are backed by physical mapping. * The virtual to physical mappings are tracked in a simple linked list * format. 'vmemmap_list' maintains the entire vmemmap physical mapping at * all times where as the 'next' list maintains the available * vmemmap_backing structures which have been deleted from the * 'vmemmap_global' list during system runtime (memory hotplug remove * operation). The freed 'vmemmap_backing' structures are reused later when * new requests come in without allocating fresh memory. This pointer also * tracks the allocated 'vmemmap_backing' structures as we allocate one * full page memory at a time when we dont have any. */ struct vmemmap_backing *vmemmap_list; static struct vmemmap_backing *next; /* * The same pointer 'next' tracks individual chunks inside the allocated * full page during the boot time and again tracks the freed nodes during * runtime. It is racy but it does not happen as they are separated by the * boot process. Will create problem if some how we have memory hotplug * operation during boot !! */ static int num_left; static int num_freed; static __meminit struct vmemmap_backing * vmemmap_list_alloc(int node) { struct vmemmap_backing *vmem_back; /* get from freed entries first */ if (num_freed) { num_freed--; vmem_back = next; next = next->list; return vmem_back; } /* allocate a page when required and hand out chunks */ if (!num_left) { next = vmemmap_alloc_block(PAGE_SIZE, node); if (unlikely(!next)) { WARN_ON(1); return NULL; } num_left = PAGE_SIZE / sizeof(struct vmemmap_backing); } num_left--; return next++; } static __meminit int vmemmap_list_populate(unsigned long phys, unsigned long start, int node) { struct vmemmap_backing *vmem_back; vmem_back = vmemmap_list_alloc(node); if (unlikely(!vmem_back)) { pr_debug("vmemap list allocation failed\n"); return -ENOMEM; } vmem_back->phys = phys; vmem_back->virt_addr = start; vmem_back->list = vmemmap_list; vmemmap_list = vmem_back; return 0; } bool altmap_cross_boundary(struct vmem_altmap *altmap, unsigned long start, unsigned long page_size) { unsigned long nr_pfn = page_size / sizeof(struct page); unsigned long start_pfn = page_to_pfn((struct page *)start); if ((start_pfn + nr_pfn - 1) > altmap->end_pfn) return true; if (start_pfn < altmap->base_pfn) return true; return false; } static int __meminit __vmemmap_populate(unsigned long start, unsigned long end, int node, struct vmem_altmap *altmap) { bool altmap_alloc; unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift; /* Align to the page size of the linear mapping. */ start = ALIGN_DOWN(start, page_size); pr_debug("vmemmap_populate %lx..%lx, node %d\n", start, end, node); for (; start < end; start += page_size) { void *p = NULL; int rc; /* * This vmemmap range is backing different subsections. If any * of that subsection is marked valid, that means we already * have initialized a page table covering this range and hence * the vmemmap range is populated. */ if (vmemmap_populated(start, page_size)) continue; /* * Allocate from the altmap first if we have one. This may * fail due to alignment issues when using 16MB hugepages, so * fall back to system memory if the altmap allocation fail. */ if (altmap && !altmap_cross_boundary(altmap, start, page_size)) { p = vmemmap_alloc_block_buf(page_size, node, altmap); if (!p) pr_debug("altmap block allocation failed, falling back to system memory"); else altmap_alloc = true; } if (!p) { p = vmemmap_alloc_block_buf(page_size, node, NULL); altmap_alloc = false; } if (!p) return -ENOMEM; if (vmemmap_list_populate(__pa(p), start, node)) { /* * If we don't populate vmemap list, we don't have * the ability to free the allocated vmemmap * pages in section_deactivate. Hence free them * here. */ int nr_pfns = page_size >> PAGE_SHIFT; unsigned long page_order = get_order(page_size); if (altmap_alloc) vmem_altmap_free(altmap, nr_pfns); else free_pages((unsigned long)p, page_order); return -ENOMEM; } pr_debug(" * %016lx..%016lx allocated at %p\n", start, start + page_size, p); rc = vmemmap_create_mapping(start, page_size, __pa(p)); if (rc < 0) { pr_warn("%s: Unable to create vmemmap mapping: %d\n", __func__, rc); return -EFAULT; } } return 0; } int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node, struct vmem_altmap *altmap) { #ifdef CONFIG_PPC_BOOK3S_64 if (radix_enabled()) return radix__vmemmap_populate(start, end, node, altmap); #endif return __vmemmap_populate(start, end, node, altmap); } #ifdef CONFIG_MEMORY_HOTPLUG static unsigned long vmemmap_list_free(unsigned long start) { struct vmemmap_backing *vmem_back, *vmem_back_prev; vmem_back_prev = vmem_back = vmemmap_list; /* look for it with prev pointer recorded */ for (; vmem_back; vmem_back = vmem_back->list) { if (vmem_back->virt_addr == start) break; vmem_back_prev = vmem_back; } if (unlikely(!vmem_back)) return 0; /* remove it from vmemmap_list */ if (vmem_back == vmemmap_list) /* remove head */ vmemmap_list = vmem_back->list; else vmem_back_prev->list = vmem_back->list; /* next point to this freed entry */ vmem_back->list = next; next = vmem_back; num_freed++; return vmem_back->phys; } static void __ref __vmemmap_free(unsigned long start, unsigned long end, struct vmem_altmap *altmap) { unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift; unsigned long page_order = get_order(page_size); unsigned long alt_start = ~0, alt_end = ~0; unsigned long base_pfn; start = ALIGN_DOWN(start, page_size); if (altmap) { alt_start = altmap->base_pfn; alt_end = altmap->base_pfn + altmap->reserve + altmap->free; } pr_debug("vmemmap_free %lx...%lx\n", start, end); for (; start < end; start += page_size) { unsigned long nr_pages, addr; struct page *page; /* * We have already marked the subsection we are trying to remove * invalid. So if we want to remove the vmemmap range, we * need to make sure there is no subsection marked valid * in this range. */ if (vmemmap_populated(start, page_size)) continue; addr = vmemmap_list_free(start); if (!addr) continue; page = pfn_to_page(addr >> PAGE_SHIFT); nr_pages = 1 << page_order; base_pfn = PHYS_PFN(addr); if (base_pfn >= alt_start && base_pfn < alt_end) { vmem_altmap_free(altmap, nr_pages); } else if (PageReserved(page)) { /* allocated from bootmem */ if (page_size < PAGE_SIZE) { /* * this shouldn't happen, but if it is * the case, leave the memory there */ WARN_ON_ONCE(1); } else { while (nr_pages--) free_reserved_page(page++); } } else { free_pages((unsigned long)(__va(addr)), page_order); } vmemmap_remove_mapping(start, page_size); } } void __ref vmemmap_free(unsigned long start, unsigned long end, struct vmem_altmap *altmap) { #ifdef CONFIG_PPC_BOOK3S_64 if (radix_enabled()) return radix__vmemmap_free(start, end, altmap); #endif return __vmemmap_free(start, end, altmap); } #endif void register_page_bootmem_memmap(unsigned long section_nr, struct page *start_page, unsigned long size) { } #endif /* CONFIG_SPARSEMEM_VMEMMAP */ #ifdef CONFIG_PPC_BOOK3S_64 unsigned int mmu_lpid_bits; #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE EXPORT_SYMBOL_GPL(mmu_lpid_bits); #endif unsigned int mmu_pid_bits; static bool disable_radix = !IS_ENABLED(CONFIG_PPC_RADIX_MMU_DEFAULT); static int __init parse_disable_radix(char *p) { bool val; if (!p) val = true; else if (kstrtobool(p, &val)) return -EINVAL; disable_radix = val; return 0; } early_param("disable_radix", parse_disable_radix); /* * If we're running under a hypervisor, we need to check the contents of * /chosen/ibm,architecture-vec-5 to see if the hypervisor is willing to do * radix. If not, we clear the radix feature bit so we fall back to hash. */ static void __init early_check_vec5(void) { unsigned long root, chosen; int size; const u8 *vec5; u8 mmu_supported; root = of_get_flat_dt_root(); chosen = of_get_flat_dt_subnode_by_name(root, "chosen"); if (chosen == -FDT_ERR_NOTFOUND) { cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX; return; } vec5 = of_get_flat_dt_prop(chosen, "ibm,architecture-vec-5", &size); if (!vec5) { cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX; return; } if (size <= OV5_INDX(OV5_MMU_SUPPORT)) { cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX; return; } /* Check for supported configuration */ mmu_supported = vec5[OV5_INDX(OV5_MMU_SUPPORT)] & OV5_FEAT(OV5_MMU_SUPPORT); if (mmu_supported == OV5_FEAT(OV5_MMU_RADIX)) { /* Hypervisor only supports radix - check enabled && GTSE */ if (!early_radix_enabled()) { pr_warn("WARNING: Ignoring cmdline option disable_radix\n"); } if (!(vec5[OV5_INDX(OV5_RADIX_GTSE)] & OV5_FEAT(OV5_RADIX_GTSE))) { cur_cpu_spec->mmu_features &= ~MMU_FTR_GTSE; } else cur_cpu_spec->mmu_features |= MMU_FTR_GTSE; /* Do radix anyway - the hypervisor said we had to */ cur_cpu_spec->mmu_features |= MMU_FTR_TYPE_RADIX; } else if (mmu_supported == OV5_FEAT(OV5_MMU_HASH)) { /* Hypervisor only supports hash - disable radix */ cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX; cur_cpu_spec->mmu_features &= ~MMU_FTR_GTSE; } } static int __init dt_scan_mmu_pid_width(unsigned long node, const char *uname, int depth, void *data) { int size = 0; const __be32 *prop; const char *type = of_get_flat_dt_prop(node, "device_type", NULL); /* We are scanning "cpu" nodes only */ if (type == NULL || strcmp(type, "cpu") != 0) return 0; /* Find MMU LPID, PID register size */ prop = of_get_flat_dt_prop(node, "ibm,mmu-lpid-bits", &size); if (prop && size == 4) mmu_lpid_bits = be32_to_cpup(prop); prop = of_get_flat_dt_prop(node, "ibm,mmu-pid-bits", &size); if (prop && size == 4) mmu_pid_bits = be32_to_cpup(prop); if (!mmu_pid_bits && !mmu_lpid_bits) return 0; return 1; } /* * Outside hotplug the kernel uses this value to map the kernel direct map * with radix. To be compatible with older kernels, let's keep this value * as 16M which is also SECTION_SIZE with SPARSEMEM. We can ideally map * things with 1GB size in the case where we don't support hotplug. */ #ifndef CONFIG_MEMORY_HOTPLUG #define DEFAULT_MEMORY_BLOCK_SIZE SZ_16M #else #define DEFAULT_MEMORY_BLOCK_SIZE MIN_MEMORY_BLOCK_SIZE #endif static void update_memory_block_size(unsigned long *block_size, unsigned long mem_size) { unsigned long min_memory_block_size = DEFAULT_MEMORY_BLOCK_SIZE; for (; *block_size > min_memory_block_size; *block_size >>= 2) { if ((mem_size & *block_size) == 0) break; } } static int __init probe_memory_block_size(unsigned long node, const char *uname, int depth, void *data) { const char *type; unsigned long *block_size = (unsigned long *)data; const __be32 *reg, *endp; int l; if (depth != 1) return 0; /* * If we have dynamic-reconfiguration-memory node, use the * lmb value. */ if (strcmp(uname, "ibm,dynamic-reconfiguration-memory") == 0) { const __be32 *prop; prop = of_get_flat_dt_prop(node, "ibm,lmb-size", &l); if (!prop || l < dt_root_size_cells * sizeof(__be32)) /* * Nothing in the device tree */ *block_size = DEFAULT_MEMORY_BLOCK_SIZE; else *block_size = of_read_number(prop, dt_root_size_cells); /* * We have found the final value. Don't probe further. */ return 1; } /* * Find all the device tree nodes of memory type and make sure * the area can be mapped using the memory block size value * we end up using. We start with 1G value and keep reducing * it such that we can map the entire area using memory_block_size. * This will be used on powernv and older pseries that don't * have ibm,lmb-size node. * For ex: with P5 we can end up with * memory@0 -> 128MB * memory@128M -> 64M * This will end up using 64MB memory block size value. */ type = of_get_flat_dt_prop(node, "device_type", NULL); if (type == NULL || strcmp(type, "memory") != 0) return 0; reg = of_get_flat_dt_prop(node, "linux,usable-memory", &l); if (!reg) reg = of_get_flat_dt_prop(node, "reg", &l); if (!reg) return 0; endp = reg + (l / sizeof(__be32)); while ((endp - reg) >= (dt_root_addr_cells + dt_root_size_cells)) { const char *compatible; u64 size; dt_mem_next_cell(dt_root_addr_cells, &reg); size = dt_mem_next_cell(dt_root_size_cells, &reg); if (size) { update_memory_block_size(block_size, size); continue; } /* * ibm,coherent-device-memory with linux,usable-memory = 0 * Force 256MiB block size. Work around for GPUs on P9 PowerNV * linux,usable-memory == 0 implies driver managed memory and * we can't use large memory block size due to hotplug/unplug * limitations. */ compatible = of_get_flat_dt_prop(node, "compatible", NULL); if (compatible && !strcmp(compatible, "ibm,coherent-device-memory")) { if (*block_size > SZ_256M) *block_size = SZ_256M; /* * We keep 256M as the upper limit with GPU present. */ return 0; } } /* continue looking for other memory device types */ return 0; } /* * start with 1G memory block size. Early init will * fix this with correct value. */ unsigned long memory_block_size __ro_after_init = 1UL << 30; static void __init early_init_memory_block_size(void) { /* * We need to do memory_block_size probe early so that * radix__early_init_mmu() can use this as limit for * mapping page size. */ of_scan_flat_dt(probe_memory_block_size, &memory_block_size); } void __init mmu_early_init_devtree(void) { bool hvmode = !!(mfmsr() & MSR_HV); /* Disable radix mode based on kernel command line. */ if (disable_radix) { if (IS_ENABLED(CONFIG_PPC_64S_HASH_MMU)) cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX; else pr_warn("WARNING: Ignoring cmdline option disable_radix\n"); } of_scan_flat_dt(dt_scan_mmu_pid_width, NULL); if (hvmode && !mmu_lpid_bits) { if (early_cpu_has_feature(CPU_FTR_ARCH_207S)) mmu_lpid_bits = 12; /* POWER8-10 */ else mmu_lpid_bits = 10; /* POWER7 */ } if (!mmu_pid_bits) { if (early_cpu_has_feature(CPU_FTR_ARCH_300)) mmu_pid_bits = 20; /* POWER9-10 */ } /* * Check /chosen/ibm,architecture-vec-5 if running as a guest. * When running bare-metal, we can use radix if we like * even though the ibm,architecture-vec-5 property created by * skiboot doesn't have the necessary bits set. */ if (!hvmode) early_check_vec5(); early_init_memory_block_size(); if (early_radix_enabled()) { radix__early_init_devtree(); /* * We have finalized the translation we are going to use by now. * Radix mode is not limited by RMA / VRMA addressing. * Hence don't limit memblock allocations. */ ppc64_rma_size = ULONG_MAX; memblock_set_current_limit(MEMBLOCK_ALLOC_ANYWHERE); } else hash__early_init_devtree(); if (IS_ENABLED(CONFIG_HUGETLB_PAGE_SIZE_VARIABLE)) hugetlbpage_init_defaultsize(); if (!(cur_cpu_spec->mmu_features & MMU_FTR_HPTE_TABLE) && !(cur_cpu_spec->mmu_features & MMU_FTR_TYPE_RADIX)) panic("kernel does not support any MMU type offered by platform"); } #endif /* CONFIG_PPC_BOOK3S_64 */
linux-master
arch/powerpc/mm/init_64.c
// SPDX-License-Identifier: GPL-2.0-or-later #include <linux/highmem.h> #include <linux/kprobes.h> /** * flush_coherent_icache() - if a CPU has a coherent icache, flush it * Return true if the cache was flushed, false otherwise */ static inline bool flush_coherent_icache(void) { /* * For a snooping icache, we still need a dummy icbi to purge all the * prefetched instructions from the ifetch buffers. We also need a sync * before the icbi to order the actual stores to memory that might * have modified instructions with the icbi. */ if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE)) { mb(); /* sync */ icbi((void *)PAGE_OFFSET); mb(); /* sync */ isync(); return true; } return false; } /** * invalidate_icache_range() - Flush the icache by issuing icbi across an address range * @start: the start address * @stop: the stop address (exclusive) */ static void invalidate_icache_range(unsigned long start, unsigned long stop) { unsigned long shift = l1_icache_shift(); unsigned long bytes = l1_icache_bytes(); char *addr = (char *)(start & ~(bytes - 1)); unsigned long size = stop - (unsigned long)addr + (bytes - 1); unsigned long i; for (i = 0; i < size >> shift; i++, addr += bytes) icbi(addr); mb(); /* sync */ isync(); } /** * flush_icache_range: Write any modified data cache blocks out to memory * and invalidate the corresponding blocks in the instruction cache * * Generic code will call this after writing memory, before executing from it. * * @start: the start address * @stop: the stop address (exclusive) */ void flush_icache_range(unsigned long start, unsigned long stop) { if (flush_coherent_icache()) return; clean_dcache_range(start, stop); if (IS_ENABLED(CONFIG_44x)) { /* * Flash invalidate on 44x because we are passed kmapped * addresses and this doesn't work for userspace pages due to * the virtually tagged icache. */ iccci((void *)start); mb(); /* sync */ isync(); } else invalidate_icache_range(start, stop); } EXPORT_SYMBOL(flush_icache_range); #ifdef CONFIG_HIGHMEM /** * flush_dcache_icache_phys() - Flush a page by it's physical address * @physaddr: the physical address of the page */ static void flush_dcache_icache_phys(unsigned long physaddr) { unsigned long bytes = l1_dcache_bytes(); unsigned long nb = PAGE_SIZE / bytes; unsigned long addr = physaddr & PAGE_MASK; unsigned long msr, msr0; unsigned long loop1 = addr, loop2 = addr; msr0 = mfmsr(); msr = msr0 & ~MSR_DR; /* * This must remain as ASM to prevent potential memory accesses * while the data MMU is disabled */ asm volatile( " mtctr %2;\n" " mtmsr %3;\n" " isync;\n" "0: dcbst 0, %0;\n" " addi %0, %0, %4;\n" " bdnz 0b;\n" " sync;\n" " mtctr %2;\n" "1: icbi 0, %1;\n" " addi %1, %1, %4;\n" " bdnz 1b;\n" " sync;\n" " mtmsr %5;\n" " isync;\n" : "+&r" (loop1), "+&r" (loop2) : "r" (nb), "r" (msr), "i" (bytes), "r" (msr0) : "ctr", "memory"); } NOKPROBE_SYMBOL(flush_dcache_icache_phys) #else static void flush_dcache_icache_phys(unsigned long physaddr) { } #endif /** * __flush_dcache_icache(): Flush a particular page from the data cache to RAM. * Note: this is necessary because the instruction cache does *not* * snoop from the data cache. * * @p: the address of the page to flush */ static void __flush_dcache_icache(void *p) { unsigned long addr = (unsigned long)p & PAGE_MASK; clean_dcache_range(addr, addr + PAGE_SIZE); /* * We don't flush the icache on 44x. Those have a virtual icache and we * don't have access to the virtual address here (it's not the page * vaddr but where it's mapped in user space). The flushing of the * icache on these is handled elsewhere, when a change in the address * space occurs, before returning to user space. */ if (mmu_has_feature(MMU_FTR_TYPE_44x)) return; invalidate_icache_range(addr, addr + PAGE_SIZE); } void flush_dcache_icache_folio(struct folio *folio) { unsigned int i, nr = folio_nr_pages(folio); if (flush_coherent_icache()) return; if (!folio_test_highmem(folio)) { void *addr = folio_address(folio); for (i = 0; i < nr; i++) __flush_dcache_icache(addr + i * PAGE_SIZE); } else if (IS_ENABLED(CONFIG_BOOKE) || sizeof(phys_addr_t) > sizeof(void *)) { for (i = 0; i < nr; i++) { void *start = kmap_local_folio(folio, i * PAGE_SIZE); __flush_dcache_icache(start); kunmap_local(start); } } else { unsigned long pfn = folio_pfn(folio); for (i = 0; i < nr; i++) flush_dcache_icache_phys((pfn + i) * PAGE_SIZE); } } EXPORT_SYMBOL(flush_dcache_icache_folio); void clear_user_page(void *page, unsigned long vaddr, struct page *pg) { clear_page(page); /* * We shouldn't have to do this, but some versions of glibc * require it (ld.so assumes zero filled pages are icache clean) * - Anton */ flush_dcache_page(pg); } EXPORT_SYMBOL(clear_user_page); void copy_user_page(void *vto, void *vfrom, unsigned long vaddr, struct page *pg) { copy_page(vto, vfrom); /* * We should be able to use the following optimisation, however * there are two problems. * Firstly a bug in some versions of binutils meant PLT sections * were not marked executable. * Secondly the first word in the GOT section is blrl, used * to establish the GOT address. Until recently the GOT was * not marked executable. * - Anton */ #if 0 if (!vma->vm_file && ((vma->vm_flags & VM_EXEC) == 0)) return; #endif flush_dcache_page(pg); } void flush_icache_user_page(struct vm_area_struct *vma, struct page *page, unsigned long addr, int len) { void *maddr; maddr = kmap_local_page(page) + (addr & ~PAGE_MASK); flush_icache_range((unsigned long)maddr, (unsigned long)maddr + len); kunmap_local(maddr); }
linux-master
arch/powerpc/mm/cacheflush.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * PowerPC version * Copyright (C) 1995-1996 Gary Thomas ([email protected]) * * Modifications by Paul Mackerras (PowerMac) ([email protected]) * and Cort Dougan (PReP) ([email protected]) * Copyright (C) 1996 Paul Mackerras * * Derived from "arch/i386/mm/init.c" * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds * * Dave Engebretsen <[email protected]> * Rework for PPC64 port. */ #undef DEBUG #include <linux/string.h> #include <linux/pgtable.h> #include <asm/pgalloc.h> #include <asm/kup.h> #include <asm/smp.h> phys_addr_t memstart_addr __ro_after_init = (phys_addr_t)~0ull; EXPORT_SYMBOL_GPL(memstart_addr); phys_addr_t kernstart_addr __ro_after_init; EXPORT_SYMBOL_GPL(kernstart_addr); unsigned long kernstart_virt_addr __ro_after_init = KERNELBASE; EXPORT_SYMBOL_GPL(kernstart_virt_addr); bool disable_kuep = !IS_ENABLED(CONFIG_PPC_KUEP); bool disable_kuap = !IS_ENABLED(CONFIG_PPC_KUAP); static int __init parse_nosmep(char *p) { if (!IS_ENABLED(CONFIG_PPC_BOOK3S_64)) return 0; disable_kuep = true; pr_warn("Disabling Kernel Userspace Execution Prevention\n"); return 0; } early_param("nosmep", parse_nosmep); static int __init parse_nosmap(char *p) { disable_kuap = true; pr_warn("Disabling Kernel Userspace Access Protection\n"); return 0; } early_param("nosmap", parse_nosmap); void __weak setup_kuep(bool disabled) { if (!IS_ENABLED(CONFIG_PPC_KUEP) || disabled) return; if (smp_processor_id() != boot_cpuid) return; pr_info("Activating Kernel Userspace Execution Prevention\n"); } void setup_kup(void) { setup_kuap(disable_kuap); setup_kuep(disable_kuep); } #define CTOR(shift) static void ctor_##shift(void *addr) \ { \ memset(addr, 0, sizeof(void *) << (shift)); \ } CTOR(0); CTOR(1); CTOR(2); CTOR(3); CTOR(4); CTOR(5); CTOR(6); CTOR(7); CTOR(8); CTOR(9); CTOR(10); CTOR(11); CTOR(12); CTOR(13); CTOR(14); CTOR(15); static inline void (*ctor(int shift))(void *) { BUILD_BUG_ON(MAX_PGTABLE_INDEX_SIZE != 15); switch (shift) { case 0: return ctor_0; case 1: return ctor_1; case 2: return ctor_2; case 3: return ctor_3; case 4: return ctor_4; case 5: return ctor_5; case 6: return ctor_6; case 7: return ctor_7; case 8: return ctor_8; case 9: return ctor_9; case 10: return ctor_10; case 11: return ctor_11; case 12: return ctor_12; case 13: return ctor_13; case 14: return ctor_14; case 15: return ctor_15; } return NULL; } struct kmem_cache *pgtable_cache[MAX_PGTABLE_INDEX_SIZE + 1]; EXPORT_SYMBOL_GPL(pgtable_cache); /* used by kvm_hv module */ /* * Create a kmem_cache() for pagetables. This is not used for PTE * pages - they're linked to struct page, come from the normal free * pages pool and have a different entry size (see real_pte_t) to * everything else. Caches created by this function are used for all * the higher level pagetables, and for hugepage pagetables. */ void pgtable_cache_add(unsigned int shift) { char *name; unsigned long table_size = sizeof(void *) << shift; unsigned long align = table_size; /* When batching pgtable pointers for RCU freeing, we store * the index size in the low bits. Table alignment must be * big enough to fit it. * * Likewise, hugeapge pagetable pointers contain a (different) * shift value in the low bits. All tables must be aligned so * as to leave enough 0 bits in the address to contain it. */ unsigned long minalign = max(MAX_PGTABLE_INDEX_SIZE + 1, HUGEPD_SHIFT_MASK + 1); struct kmem_cache *new; /* It would be nice if this was a BUILD_BUG_ON(), but at the * moment, gcc doesn't seem to recognize is_power_of_2 as a * constant expression, so so much for that. */ BUG_ON(!is_power_of_2(minalign)); BUG_ON(shift > MAX_PGTABLE_INDEX_SIZE); if (PGT_CACHE(shift)) return; /* Already have a cache of this size */ align = max_t(unsigned long, align, minalign); name = kasprintf(GFP_KERNEL, "pgtable-2^%d", shift); new = kmem_cache_create(name, table_size, align, 0, ctor(shift)); if (!new) panic("Could not allocate pgtable cache for order %d", shift); kfree(name); pgtable_cache[shift] = new; pr_debug("Allocated pgtable cache for order %d\n", shift); } EXPORT_SYMBOL_GPL(pgtable_cache_add); /* used by kvm_hv module */ void pgtable_cache_init(void) { pgtable_cache_add(PGD_INDEX_SIZE); if (PMD_CACHE_INDEX) pgtable_cache_add(PMD_CACHE_INDEX); /* * In all current configs, when the PUD index exists it's the * same size as either the pgd or pmd index except with THP enabled * on book3s 64 */ if (PUD_CACHE_INDEX) pgtable_cache_add(PUD_CACHE_INDEX); }
linux-master
arch/powerpc/mm/init-common.c
// SPDX-License-Identifier: GPL-2.0-or-later #include <linux/io.h> #include <linux/slab.h> #include <linux/mmzone.h> #include <linux/vmalloc.h> #include <asm/io-workarounds.h> unsigned long ioremap_bot; EXPORT_SYMBOL(ioremap_bot); void __iomem *ioremap(phys_addr_t addr, unsigned long size) { pgprot_t prot = pgprot_noncached(PAGE_KERNEL); void *caller = __builtin_return_address(0); if (iowa_is_active()) return iowa_ioremap(addr, size, prot, caller); return __ioremap_caller(addr, size, prot, caller); } EXPORT_SYMBOL(ioremap); void __iomem *ioremap_wc(phys_addr_t addr, unsigned long size) { pgprot_t prot = pgprot_noncached_wc(PAGE_KERNEL); void *caller = __builtin_return_address(0); if (iowa_is_active()) return iowa_ioremap(addr, size, prot, caller); return __ioremap_caller(addr, size, prot, caller); } EXPORT_SYMBOL(ioremap_wc); void __iomem *ioremap_coherent(phys_addr_t addr, unsigned long size) { pgprot_t prot = pgprot_cached(PAGE_KERNEL); void *caller = __builtin_return_address(0); if (iowa_is_active()) return iowa_ioremap(addr, size, prot, caller); return __ioremap_caller(addr, size, prot, caller); } void __iomem *ioremap_prot(phys_addr_t addr, size_t size, unsigned long flags) { pte_t pte = __pte(flags); void *caller = __builtin_return_address(0); /* writeable implies dirty for kernel addresses */ if (pte_write(pte)) pte = pte_mkdirty(pte); /* we don't want to let _PAGE_USER and _PAGE_EXEC leak out */ pte = pte_exprotect(pte); pte = pte_mkprivileged(pte); if (iowa_is_active()) return iowa_ioremap(addr, size, pte_pgprot(pte), caller); return __ioremap_caller(addr, size, pte_pgprot(pte), caller); } EXPORT_SYMBOL(ioremap_prot); int early_ioremap_range(unsigned long ea, phys_addr_t pa, unsigned long size, pgprot_t prot) { unsigned long i; for (i = 0; i < size; i += PAGE_SIZE) { int err = map_kernel_page(ea + i, pa + i, prot); if (WARN_ON_ONCE(err)) /* Should clean up */ return err; } return 0; }
linux-master
arch/powerpc/mm/ioremap.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * This file contains common routines for dealing with free of page tables * Along with common page table handling code * * Derived from arch/powerpc/mm/tlb_64.c: * Copyright (C) 1995-1996 Gary Thomas ([email protected]) * * Modifications by Paul Mackerras (PowerMac) ([email protected]) * and Cort Dougan (PReP) ([email protected]) * Copyright (C) 1996 Paul Mackerras * * Derived from "arch/i386/mm/init.c" * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds * * Dave Engebretsen <[email protected]> * Rework for PPC64 port. */ #include <linux/kernel.h> #include <linux/gfp.h> #include <linux/mm.h> #include <linux/percpu.h> #include <linux/hardirq.h> #include <linux/hugetlb.h> #include <asm/tlbflush.h> #include <asm/tlb.h> #include <asm/hugetlb.h> #include <asm/pte-walk.h> #ifdef CONFIG_PPC64 #define PGD_ALIGN (sizeof(pgd_t) * MAX_PTRS_PER_PGD) #else #define PGD_ALIGN PAGE_SIZE #endif pgd_t swapper_pg_dir[MAX_PTRS_PER_PGD] __section(".bss..page_aligned") __aligned(PGD_ALIGN); static inline int is_exec_fault(void) { return current->thread.regs && TRAP(current->thread.regs) == 0x400; } /* We only try to do i/d cache coherency on stuff that looks like * reasonably "normal" PTEs. We currently require a PTE to be present * and we avoid _PAGE_SPECIAL and cache inhibited pte. We also only do that * on userspace PTEs */ static inline int pte_looks_normal(pte_t pte) { if (pte_present(pte) && !pte_special(pte)) { if (pte_ci(pte)) return 0; if (pte_user(pte)) return 1; } return 0; } static struct folio *maybe_pte_to_folio(pte_t pte) { unsigned long pfn = pte_pfn(pte); struct page *page; if (unlikely(!pfn_valid(pfn))) return NULL; page = pfn_to_page(pfn); if (PageReserved(page)) return NULL; return page_folio(page); } #ifdef CONFIG_PPC_BOOK3S /* Server-style MMU handles coherency when hashing if HW exec permission * is supposed per page (currently 64-bit only). If not, then, we always * flush the cache for valid PTEs in set_pte. Embedded CPU without HW exec * support falls into the same category. */ static pte_t set_pte_filter_hash(pte_t pte) { pte = __pte(pte_val(pte) & ~_PAGE_HPTEFLAGS); if (pte_looks_normal(pte) && !(cpu_has_feature(CPU_FTR_COHERENT_ICACHE) || cpu_has_feature(CPU_FTR_NOEXECUTE))) { struct folio *folio = maybe_pte_to_folio(pte); if (!folio) return pte; if (!test_bit(PG_dcache_clean, &folio->flags)) { flush_dcache_icache_folio(folio); set_bit(PG_dcache_clean, &folio->flags); } } return pte; } #else /* CONFIG_PPC_BOOK3S */ static pte_t set_pte_filter_hash(pte_t pte) { return pte; } #endif /* CONFIG_PPC_BOOK3S */ /* Embedded type MMU with HW exec support. This is a bit more complicated * as we don't have two bits to spare for _PAGE_EXEC and _PAGE_HWEXEC so * instead we "filter out" the exec permission for non clean pages. */ static inline pte_t set_pte_filter(pte_t pte) { struct folio *folio; if (radix_enabled()) return pte; if (mmu_has_feature(MMU_FTR_HPTE_TABLE)) return set_pte_filter_hash(pte); /* No exec permission in the first place, move on */ if (!pte_exec(pte) || !pte_looks_normal(pte)) return pte; /* If you set _PAGE_EXEC on weird pages you're on your own */ folio = maybe_pte_to_folio(pte); if (unlikely(!folio)) return pte; /* If the page clean, we move on */ if (test_bit(PG_dcache_clean, &folio->flags)) return pte; /* If it's an exec fault, we flush the cache and make it clean */ if (is_exec_fault()) { flush_dcache_icache_folio(folio); set_bit(PG_dcache_clean, &folio->flags); return pte; } /* Else, we filter out _PAGE_EXEC */ return pte_exprotect(pte); } static pte_t set_access_flags_filter(pte_t pte, struct vm_area_struct *vma, int dirty) { struct folio *folio; if (IS_ENABLED(CONFIG_PPC_BOOK3S_64)) return pte; if (mmu_has_feature(MMU_FTR_HPTE_TABLE)) return pte; /* So here, we only care about exec faults, as we use them * to recover lost _PAGE_EXEC and perform I$/D$ coherency * if necessary. Also if _PAGE_EXEC is already set, same deal, * we just bail out */ if (dirty || pte_exec(pte) || !is_exec_fault()) return pte; #ifdef CONFIG_DEBUG_VM /* So this is an exec fault, _PAGE_EXEC is not set. If it was * an error we would have bailed out earlier in do_page_fault() * but let's make sure of it */ if (WARN_ON(!(vma->vm_flags & VM_EXEC))) return pte; #endif /* CONFIG_DEBUG_VM */ /* If you set _PAGE_EXEC on weird pages you're on your own */ folio = maybe_pte_to_folio(pte); if (unlikely(!folio)) goto bail; /* If the page is already clean, we move on */ if (test_bit(PG_dcache_clean, &folio->flags)) goto bail; /* Clean the page and set PG_dcache_clean */ flush_dcache_icache_folio(folio); set_bit(PG_dcache_clean, &folio->flags); bail: return pte_mkexec(pte); } /* * set_pte stores a linux PTE into the linux page table. */ void set_ptes(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte, unsigned int nr) { /* * Make sure hardware valid bit is not set. We don't do * tlb flush for this update. */ VM_WARN_ON(pte_hw_valid(*ptep) && !pte_protnone(*ptep)); /* Note: mm->context.id might not yet have been assigned as * this context might not have been activated yet when this * is called. */ pte = set_pte_filter(pte); /* Perform the setting of the PTE */ arch_enter_lazy_mmu_mode(); for (;;) { __set_pte_at(mm, addr, ptep, pte, 0); if (--nr == 0) break; ptep++; pte = __pte(pte_val(pte) + (1UL << PTE_RPN_SHIFT)); addr += PAGE_SIZE; } arch_leave_lazy_mmu_mode(); } void unmap_kernel_page(unsigned long va) { pmd_t *pmdp = pmd_off_k(va); pte_t *ptep = pte_offset_kernel(pmdp, va); pte_clear(&init_mm, va, ptep); flush_tlb_kernel_range(va, va + PAGE_SIZE); } /* * This is called when relaxing access to a PTE. It's also called in the page * fault path when we don't hit any of the major fault cases, ie, a minor * update of _PAGE_ACCESSED, _PAGE_DIRTY, etc... The generic code will have * handled those two for us, we additionally deal with missing execute * permission here on some processors */ int ptep_set_access_flags(struct vm_area_struct *vma, unsigned long address, pte_t *ptep, pte_t entry, int dirty) { int changed; entry = set_access_flags_filter(entry, vma, dirty); changed = !pte_same(*(ptep), entry); if (changed) { assert_pte_locked(vma->vm_mm, address); __ptep_set_access_flags(vma, ptep, entry, address, mmu_virtual_psize); } return changed; } #ifdef CONFIG_HUGETLB_PAGE int huge_ptep_set_access_flags(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep, pte_t pte, int dirty) { #ifdef HUGETLB_NEED_PRELOAD /* * The "return 1" forces a call of update_mmu_cache, which will write a * TLB entry. Without this, platforms that don't do a write of the TLB * entry in the TLB miss handler asm will fault ad infinitum. */ ptep_set_access_flags(vma, addr, ptep, pte, dirty); return 1; #else int changed, psize; pte = set_access_flags_filter(pte, vma, dirty); changed = !pte_same(*(ptep), pte); if (changed) { #ifdef CONFIG_PPC_BOOK3S_64 struct hstate *h = hstate_vma(vma); psize = hstate_get_psize(h); #ifdef CONFIG_DEBUG_VM assert_spin_locked(huge_pte_lockptr(h, vma->vm_mm, ptep)); #endif #else /* * Not used on non book3s64 platforms. * 8xx compares it with mmu_virtual_psize to * know if it is a huge page or not. */ psize = MMU_PAGE_COUNT; #endif __ptep_set_access_flags(vma, ptep, pte, addr, psize); } return changed; #endif } #if defined(CONFIG_PPC_8xx) void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte) { pmd_t *pmd = pmd_off(mm, addr); pte_basic_t val; pte_basic_t *entry = (pte_basic_t *)ptep; int num, i; /* * Make sure hardware valid bit is not set. We don't do * tlb flush for this update. */ VM_WARN_ON(pte_hw_valid(*ptep) && !pte_protnone(*ptep)); pte = set_pte_filter(pte); val = pte_val(pte); num = number_of_cells_per_pte(pmd, val, 1); for (i = 0; i < num; i++, entry++, val += SZ_4K) *entry = val; } #endif #endif /* CONFIG_HUGETLB_PAGE */ #ifdef CONFIG_DEBUG_VM void assert_pte_locked(struct mm_struct *mm, unsigned long addr) { pgd_t *pgd; p4d_t *p4d; pud_t *pud; pmd_t *pmd; pte_t *pte; spinlock_t *ptl; if (mm == &init_mm) return; pgd = mm->pgd + pgd_index(addr); BUG_ON(pgd_none(*pgd)); p4d = p4d_offset(pgd, addr); BUG_ON(p4d_none(*p4d)); pud = pud_offset(p4d, addr); BUG_ON(pud_none(*pud)); pmd = pmd_offset(pud, addr); /* * khugepaged to collapse normal pages to hugepage, first set * pmd to none to force page fault/gup to take mmap_lock. After * pmd is set to none, we do a pte_clear which does this assertion * so if we find pmd none, return. */ if (pmd_none(*pmd)) return; pte = pte_offset_map_nolock(mm, pmd, addr, &ptl); BUG_ON(!pte); assert_spin_locked(ptl); pte_unmap(pte); } #endif /* CONFIG_DEBUG_VM */ unsigned long vmalloc_to_phys(void *va) { unsigned long pfn = vmalloc_to_pfn(va); BUG_ON(!pfn); return __pa(pfn_to_kaddr(pfn)) + offset_in_page(va); } EXPORT_SYMBOL_GPL(vmalloc_to_phys); /* * We have 4 cases for pgds and pmds: * (1) invalid (all zeroes) * (2) pointer to next table, as normal; bottom 6 bits == 0 * (3) leaf pte for huge page _PAGE_PTE set * (4) hugepd pointer, _PAGE_PTE = 0 and bits [2..6] indicate size of table * * So long as we atomically load page table pointers we are safe against teardown, * we can follow the address down to the page and take a ref on it. * This function need to be called with interrupts disabled. We use this variant * when we have MSR[EE] = 0 but the paca->irq_soft_mask = IRQS_ENABLED */ pte_t *__find_linux_pte(pgd_t *pgdir, unsigned long ea, bool *is_thp, unsigned *hpage_shift) { pgd_t *pgdp; p4d_t p4d, *p4dp; pud_t pud, *pudp; pmd_t pmd, *pmdp; pte_t *ret_pte; hugepd_t *hpdp = NULL; unsigned pdshift; if (hpage_shift) *hpage_shift = 0; if (is_thp) *is_thp = false; /* * Always operate on the local stack value. This make sure the * value don't get updated by a parallel THP split/collapse, * page fault or a page unmap. The return pte_t * is still not * stable. So should be checked there for above conditions. * Top level is an exception because it is folded into p4d. */ pgdp = pgdir + pgd_index(ea); p4dp = p4d_offset(pgdp, ea); p4d = READ_ONCE(*p4dp); pdshift = P4D_SHIFT; if (p4d_none(p4d)) return NULL; if (p4d_is_leaf(p4d)) { ret_pte = (pte_t *)p4dp; goto out; } if (is_hugepd(__hugepd(p4d_val(p4d)))) { hpdp = (hugepd_t *)&p4d; goto out_huge; } /* * Even if we end up with an unmap, the pgtable will not * be freed, because we do an rcu free and here we are * irq disabled */ pdshift = PUD_SHIFT; pudp = pud_offset(&p4d, ea); pud = READ_ONCE(*pudp); if (pud_none(pud)) return NULL; if (pud_is_leaf(pud)) { ret_pte = (pte_t *)pudp; goto out; } if (is_hugepd(__hugepd(pud_val(pud)))) { hpdp = (hugepd_t *)&pud; goto out_huge; } pdshift = PMD_SHIFT; pmdp = pmd_offset(&pud, ea); pmd = READ_ONCE(*pmdp); /* * A hugepage collapse is captured by this condition, see * pmdp_collapse_flush. */ if (pmd_none(pmd)) return NULL; #ifdef CONFIG_PPC_BOOK3S_64 /* * A hugepage split is captured by this condition, see * pmdp_invalidate. * * Huge page modification can be caught here too. */ if (pmd_is_serializing(pmd)) return NULL; #endif if (pmd_trans_huge(pmd) || pmd_devmap(pmd)) { if (is_thp) *is_thp = true; ret_pte = (pte_t *)pmdp; goto out; } if (pmd_is_leaf(pmd)) { ret_pte = (pte_t *)pmdp; goto out; } if (is_hugepd(__hugepd(pmd_val(pmd)))) { hpdp = (hugepd_t *)&pmd; goto out_huge; } return pte_offset_kernel(&pmd, ea); out_huge: if (!hpdp) return NULL; ret_pte = hugepte_offset(*hpdp, ea, pdshift); pdshift = hugepd_shift(*hpdp); out: if (hpage_shift) *hpage_shift = pdshift; return ret_pte; } EXPORT_SYMBOL_GPL(__find_linux_pte); /* Note due to the way vm flags are laid out, the bits are XWR */ const pgprot_t protection_map[16] = { [VM_NONE] = PAGE_NONE, [VM_READ] = PAGE_READONLY, [VM_WRITE] = PAGE_COPY, [VM_WRITE | VM_READ] = PAGE_COPY, [VM_EXEC] = PAGE_READONLY_X, [VM_EXEC | VM_READ] = PAGE_READONLY_X, [VM_EXEC | VM_WRITE] = PAGE_COPY_X, [VM_EXEC | VM_WRITE | VM_READ] = PAGE_COPY_X, [VM_SHARED] = PAGE_NONE, [VM_SHARED | VM_READ] = PAGE_READONLY, [VM_SHARED | VM_WRITE] = PAGE_SHARED, [VM_SHARED | VM_WRITE | VM_READ] = PAGE_SHARED, [VM_SHARED | VM_EXEC] = PAGE_READONLY_X, [VM_SHARED | VM_EXEC | VM_READ] = PAGE_READONLY_X, [VM_SHARED | VM_EXEC | VM_WRITE] = PAGE_SHARED_X, [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = PAGE_SHARED_X }; #ifndef CONFIG_PPC_BOOK3S_64 DECLARE_VM_GET_PAGE_PROT #endif
linux-master
arch/powerpc/mm/pgtable.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * pSeries NUMA support * * Copyright (C) 2002 Anton Blanchard <[email protected]>, IBM */ #define pr_fmt(fmt) "numa: " fmt #include <linux/threads.h> #include <linux/memblock.h> #include <linux/init.h> #include <linux/mm.h> #include <linux/mmzone.h> #include <linux/export.h> #include <linux/nodemask.h> #include <linux/cpu.h> #include <linux/notifier.h> #include <linux/of.h> #include <linux/of_address.h> #include <linux/pfn.h> #include <linux/cpuset.h> #include <linux/node.h> #include <linux/stop_machine.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/uaccess.h> #include <linux/slab.h> #include <asm/cputhreads.h> #include <asm/sparsemem.h> #include <asm/smp.h> #include <asm/topology.h> #include <asm/firmware.h> #include <asm/paca.h> #include <asm/hvcall.h> #include <asm/setup.h> #include <asm/vdso.h> #include <asm/vphn.h> #include <asm/drmem.h> static int numa_enabled = 1; static char *cmdline __initdata; int numa_cpu_lookup_table[NR_CPUS]; cpumask_var_t node_to_cpumask_map[MAX_NUMNODES]; struct pglist_data *node_data[MAX_NUMNODES]; EXPORT_SYMBOL(numa_cpu_lookup_table); EXPORT_SYMBOL(node_to_cpumask_map); EXPORT_SYMBOL(node_data); static int primary_domain_index; static int n_mem_addr_cells, n_mem_size_cells; #define FORM0_AFFINITY 0 #define FORM1_AFFINITY 1 #define FORM2_AFFINITY 2 static int affinity_form; #define MAX_DISTANCE_REF_POINTS 4 static int distance_ref_points_depth; static const __be32 *distance_ref_points; static int distance_lookup_table[MAX_NUMNODES][MAX_DISTANCE_REF_POINTS]; static int numa_distance_table[MAX_NUMNODES][MAX_NUMNODES] = { [0 ... MAX_NUMNODES - 1] = { [0 ... MAX_NUMNODES - 1] = -1 } }; static int numa_id_index_table[MAX_NUMNODES] = { [0 ... MAX_NUMNODES - 1] = NUMA_NO_NODE }; /* * Allocate node_to_cpumask_map based on number of available nodes * Requires node_possible_map to be valid. * * Note: cpumask_of_node() is not valid until after this is done. */ static void __init setup_node_to_cpumask_map(void) { unsigned int node; /* setup nr_node_ids if not done yet */ if (nr_node_ids == MAX_NUMNODES) setup_nr_node_ids(); /* allocate the map */ for_each_node(node) alloc_bootmem_cpumask_var(&node_to_cpumask_map[node]); /* cpumask_of_node() will now work */ pr_debug("Node to cpumask map for %u nodes\n", nr_node_ids); } static int __init fake_numa_create_new_node(unsigned long end_pfn, unsigned int *nid) { unsigned long long mem; char *p = cmdline; static unsigned int fake_nid; static unsigned long long curr_boundary; /* * Modify node id, iff we started creating NUMA nodes * We want to continue from where we left of the last time */ if (fake_nid) *nid = fake_nid; /* * In case there are no more arguments to parse, the * node_id should be the same as the last fake node id * (we've handled this above). */ if (!p) return 0; mem = memparse(p, &p); if (!mem) return 0; if (mem < curr_boundary) return 0; curr_boundary = mem; if ((end_pfn << PAGE_SHIFT) > mem) { /* * Skip commas and spaces */ while (*p == ',' || *p == ' ' || *p == '\t') p++; cmdline = p; fake_nid++; *nid = fake_nid; pr_debug("created new fake_node with id %d\n", fake_nid); return 1; } return 0; } static void __init reset_numa_cpu_lookup_table(void) { unsigned int cpu; for_each_possible_cpu(cpu) numa_cpu_lookup_table[cpu] = -1; } void map_cpu_to_node(int cpu, int node) { update_numa_cpu_lookup_table(cpu, node); if (!(cpumask_test_cpu(cpu, node_to_cpumask_map[node]))) { pr_debug("adding cpu %d to node %d\n", cpu, node); cpumask_set_cpu(cpu, node_to_cpumask_map[node]); } } #if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_PPC_SPLPAR) void unmap_cpu_from_node(unsigned long cpu) { int node = numa_cpu_lookup_table[cpu]; if (cpumask_test_cpu(cpu, node_to_cpumask_map[node])) { cpumask_clear_cpu(cpu, node_to_cpumask_map[node]); pr_debug("removing cpu %lu from node %d\n", cpu, node); } else { pr_warn("Warning: cpu %lu not found in node %d\n", cpu, node); } } #endif /* CONFIG_HOTPLUG_CPU || CONFIG_PPC_SPLPAR */ static int __associativity_to_nid(const __be32 *associativity, int max_array_sz) { int nid; /* * primary_domain_index is 1 based array index. */ int index = primary_domain_index - 1; if (!numa_enabled || index >= max_array_sz) return NUMA_NO_NODE; nid = of_read_number(&associativity[index], 1); /* POWER4 LPAR uses 0xffff as invalid node */ if (nid == 0xffff || nid >= nr_node_ids) nid = NUMA_NO_NODE; return nid; } /* * Returns nid in the range [0..nr_node_ids], or -1 if no useful NUMA * info is found. */ static int associativity_to_nid(const __be32 *associativity) { int array_sz = of_read_number(associativity, 1); /* Skip the first element in the associativity array */ return __associativity_to_nid((associativity + 1), array_sz); } static int __cpu_form2_relative_distance(__be32 *cpu1_assoc, __be32 *cpu2_assoc) { int dist; int node1, node2; node1 = associativity_to_nid(cpu1_assoc); node2 = associativity_to_nid(cpu2_assoc); dist = numa_distance_table[node1][node2]; if (dist <= LOCAL_DISTANCE) return 0; else if (dist <= REMOTE_DISTANCE) return 1; else return 2; } static int __cpu_form1_relative_distance(__be32 *cpu1_assoc, __be32 *cpu2_assoc) { int dist = 0; int i, index; for (i = 0; i < distance_ref_points_depth; i++) { index = be32_to_cpu(distance_ref_points[i]); if (cpu1_assoc[index] == cpu2_assoc[index]) break; dist++; } return dist; } int cpu_relative_distance(__be32 *cpu1_assoc, __be32 *cpu2_assoc) { /* We should not get called with FORM0 */ VM_WARN_ON(affinity_form == FORM0_AFFINITY); if (affinity_form == FORM1_AFFINITY) return __cpu_form1_relative_distance(cpu1_assoc, cpu2_assoc); return __cpu_form2_relative_distance(cpu1_assoc, cpu2_assoc); } /* must hold reference to node during call */ static const __be32 *of_get_associativity(struct device_node *dev) { return of_get_property(dev, "ibm,associativity", NULL); } int __node_distance(int a, int b) { int i; int distance = LOCAL_DISTANCE; if (affinity_form == FORM2_AFFINITY) return numa_distance_table[a][b]; else if (affinity_form == FORM0_AFFINITY) return ((a == b) ? LOCAL_DISTANCE : REMOTE_DISTANCE); for (i = 0; i < distance_ref_points_depth; i++) { if (distance_lookup_table[a][i] == distance_lookup_table[b][i]) break; /* Double the distance for each NUMA level */ distance *= 2; } return distance; } EXPORT_SYMBOL(__node_distance); /* Returns the nid associated with the given device tree node, * or -1 if not found. */ static int of_node_to_nid_single(struct device_node *device) { int nid = NUMA_NO_NODE; const __be32 *tmp; tmp = of_get_associativity(device); if (tmp) nid = associativity_to_nid(tmp); return nid; } /* Walk the device tree upwards, looking for an associativity id */ int of_node_to_nid(struct device_node *device) { int nid = NUMA_NO_NODE; of_node_get(device); while (device) { nid = of_node_to_nid_single(device); if (nid != -1) break; device = of_get_next_parent(device); } of_node_put(device); return nid; } EXPORT_SYMBOL(of_node_to_nid); static void __initialize_form1_numa_distance(const __be32 *associativity, int max_array_sz) { int i, nid; if (affinity_form != FORM1_AFFINITY) return; nid = __associativity_to_nid(associativity, max_array_sz); if (nid != NUMA_NO_NODE) { for (i = 0; i < distance_ref_points_depth; i++) { const __be32 *entry; int index = be32_to_cpu(distance_ref_points[i]) - 1; /* * broken hierarchy, return with broken distance table */ if (WARN(index >= max_array_sz, "Broken ibm,associativity property")) return; entry = &associativity[index]; distance_lookup_table[nid][i] = of_read_number(entry, 1); } } } static void initialize_form1_numa_distance(const __be32 *associativity) { int array_sz; array_sz = of_read_number(associativity, 1); /* Skip the first element in the associativity array */ __initialize_form1_numa_distance(associativity + 1, array_sz); } /* * Used to update distance information w.r.t newly added node. */ void update_numa_distance(struct device_node *node) { int nid; if (affinity_form == FORM0_AFFINITY) return; else if (affinity_form == FORM1_AFFINITY) { const __be32 *associativity; associativity = of_get_associativity(node); if (!associativity) return; initialize_form1_numa_distance(associativity); return; } /* FORM2 affinity */ nid = of_node_to_nid_single(node); if (nid == NUMA_NO_NODE) return; /* * With FORM2 we expect NUMA distance of all possible NUMA * nodes to be provided during boot. */ WARN(numa_distance_table[nid][nid] == -1, "NUMA distance details for node %d not provided\n", nid); } EXPORT_SYMBOL_GPL(update_numa_distance); /* * ibm,numa-lookup-index-table= {N, domainid1, domainid2, ..... domainidN} * ibm,numa-distance-table = { N, 1, 2, 4, 5, 1, 6, .... N elements} */ static void __init initialize_form2_numa_distance_lookup_table(void) { int i, j; struct device_node *root; const __u8 *form2_distances; const __be32 *numa_lookup_index; int form2_distances_length; int max_numa_index, distance_index; if (firmware_has_feature(FW_FEATURE_OPAL)) root = of_find_node_by_path("/ibm,opal"); else root = of_find_node_by_path("/rtas"); if (!root) root = of_find_node_by_path("/"); numa_lookup_index = of_get_property(root, "ibm,numa-lookup-index-table", NULL); max_numa_index = of_read_number(&numa_lookup_index[0], 1); /* first element of the array is the size and is encode-int */ form2_distances = of_get_property(root, "ibm,numa-distance-table", NULL); form2_distances_length = of_read_number((const __be32 *)&form2_distances[0], 1); /* Skip the size which is encoded int */ form2_distances += sizeof(__be32); pr_debug("form2_distances_len = %d, numa_dist_indexes_len = %d\n", form2_distances_length, max_numa_index); for (i = 0; i < max_numa_index; i++) /* +1 skip the max_numa_index in the property */ numa_id_index_table[i] = of_read_number(&numa_lookup_index[i + 1], 1); if (form2_distances_length != max_numa_index * max_numa_index) { WARN(1, "Wrong NUMA distance information\n"); form2_distances = NULL; // don't use it } distance_index = 0; for (i = 0; i < max_numa_index; i++) { for (j = 0; j < max_numa_index; j++) { int nodeA = numa_id_index_table[i]; int nodeB = numa_id_index_table[j]; int dist; if (form2_distances) dist = form2_distances[distance_index++]; else if (nodeA == nodeB) dist = LOCAL_DISTANCE; else dist = REMOTE_DISTANCE; numa_distance_table[nodeA][nodeB] = dist; pr_debug("dist[%d][%d]=%d ", nodeA, nodeB, dist); } } of_node_put(root); } static int __init find_primary_domain_index(void) { int index; struct device_node *root; /* * Check for which form of affinity. */ if (firmware_has_feature(FW_FEATURE_OPAL)) { affinity_form = FORM1_AFFINITY; } else if (firmware_has_feature(FW_FEATURE_FORM2_AFFINITY)) { pr_debug("Using form 2 affinity\n"); affinity_form = FORM2_AFFINITY; } else if (firmware_has_feature(FW_FEATURE_FORM1_AFFINITY)) { pr_debug("Using form 1 affinity\n"); affinity_form = FORM1_AFFINITY; } else affinity_form = FORM0_AFFINITY; if (firmware_has_feature(FW_FEATURE_OPAL)) root = of_find_node_by_path("/ibm,opal"); else root = of_find_node_by_path("/rtas"); if (!root) root = of_find_node_by_path("/"); /* * This property is a set of 32-bit integers, each representing * an index into the ibm,associativity nodes. * * With form 0 affinity the first integer is for an SMP configuration * (should be all 0's) and the second is for a normal NUMA * configuration. We have only one level of NUMA. * * With form 1 affinity the first integer is the most significant * NUMA boundary and the following are progressively less significant * boundaries. There can be more than one level of NUMA. */ distance_ref_points = of_get_property(root, "ibm,associativity-reference-points", &distance_ref_points_depth); if (!distance_ref_points) { pr_debug("ibm,associativity-reference-points not found.\n"); goto err; } distance_ref_points_depth /= sizeof(int); if (affinity_form == FORM0_AFFINITY) { if (distance_ref_points_depth < 2) { pr_warn("short ibm,associativity-reference-points\n"); goto err; } index = of_read_number(&distance_ref_points[1], 1); } else { /* * Both FORM1 and FORM2 affinity find the primary domain details * at the same offset. */ index = of_read_number(distance_ref_points, 1); } /* * Warn and cap if the hardware supports more than * MAX_DISTANCE_REF_POINTS domains. */ if (distance_ref_points_depth > MAX_DISTANCE_REF_POINTS) { pr_warn("distance array capped at %d entries\n", MAX_DISTANCE_REF_POINTS); distance_ref_points_depth = MAX_DISTANCE_REF_POINTS; } of_node_put(root); return index; err: of_node_put(root); return -1; } static void __init get_n_mem_cells(int *n_addr_cells, int *n_size_cells) { struct device_node *memory = NULL; memory = of_find_node_by_type(memory, "memory"); if (!memory) panic("numa.c: No memory nodes found!"); *n_addr_cells = of_n_addr_cells(memory); *n_size_cells = of_n_size_cells(memory); of_node_put(memory); } static unsigned long read_n_cells(int n, const __be32 **buf) { unsigned long result = 0; while (n--) { result = (result << 32) | of_read_number(*buf, 1); (*buf)++; } return result; } struct assoc_arrays { u32 n_arrays; u32 array_sz; const __be32 *arrays; }; /* * Retrieve and validate the list of associativity arrays for drconf * memory from the ibm,associativity-lookup-arrays property of the * device tree.. * * The layout of the ibm,associativity-lookup-arrays property is a number N * indicating the number of associativity arrays, followed by a number M * indicating the size of each associativity array, followed by a list * of N associativity arrays. */ static int of_get_assoc_arrays(struct assoc_arrays *aa) { struct device_node *memory; const __be32 *prop; u32 len; memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory"); if (!memory) return -1; prop = of_get_property(memory, "ibm,associativity-lookup-arrays", &len); if (!prop || len < 2 * sizeof(unsigned int)) { of_node_put(memory); return -1; } aa->n_arrays = of_read_number(prop++, 1); aa->array_sz = of_read_number(prop++, 1); of_node_put(memory); /* Now that we know the number of arrays and size of each array, * revalidate the size of the property read in. */ if (len < (aa->n_arrays * aa->array_sz + 2) * sizeof(unsigned int)) return -1; aa->arrays = prop; return 0; } static int __init get_nid_and_numa_distance(struct drmem_lmb *lmb) { struct assoc_arrays aa = { .arrays = NULL }; int default_nid = NUMA_NO_NODE; int nid = default_nid; int rc, index; if ((primary_domain_index < 0) || !numa_enabled) return default_nid; rc = of_get_assoc_arrays(&aa); if (rc) return default_nid; if (primary_domain_index <= aa.array_sz && !(lmb->flags & DRCONF_MEM_AI_INVALID) && lmb->aa_index < aa.n_arrays) { const __be32 *associativity; index = lmb->aa_index * aa.array_sz; associativity = &aa.arrays[index]; nid = __associativity_to_nid(associativity, aa.array_sz); if (nid > 0 && affinity_form == FORM1_AFFINITY) { /* * lookup array associativity entries have * no length of the array as the first element. */ __initialize_form1_numa_distance(associativity, aa.array_sz); } } return nid; } /* * This is like of_node_to_nid_single() for memory represented in the * ibm,dynamic-reconfiguration-memory node. */ int of_drconf_to_nid_single(struct drmem_lmb *lmb) { struct assoc_arrays aa = { .arrays = NULL }; int default_nid = NUMA_NO_NODE; int nid = default_nid; int rc, index; if ((primary_domain_index < 0) || !numa_enabled) return default_nid; rc = of_get_assoc_arrays(&aa); if (rc) return default_nid; if (primary_domain_index <= aa.array_sz && !(lmb->flags & DRCONF_MEM_AI_INVALID) && lmb->aa_index < aa.n_arrays) { const __be32 *associativity; index = lmb->aa_index * aa.array_sz; associativity = &aa.arrays[index]; nid = __associativity_to_nid(associativity, aa.array_sz); } return nid; } #ifdef CONFIG_PPC_SPLPAR static int __vphn_get_associativity(long lcpu, __be32 *associativity) { long rc, hwid; /* * On a shared lpar, device tree will not have node associativity. * At this time lppaca, or its __old_status field may not be * updated. Hence kernel cannot detect if its on a shared lpar. So * request an explicit associativity irrespective of whether the * lpar is shared or dedicated. Use the device tree property as a * fallback. cpu_to_phys_id is only valid between * smp_setup_cpu_maps() and smp_setup_pacas(). */ if (firmware_has_feature(FW_FEATURE_VPHN)) { if (cpu_to_phys_id) hwid = cpu_to_phys_id[lcpu]; else hwid = get_hard_smp_processor_id(lcpu); rc = hcall_vphn(hwid, VPHN_FLAG_VCPU, associativity); if (rc == H_SUCCESS) return 0; } return -1; } static int vphn_get_nid(long lcpu) { __be32 associativity[VPHN_ASSOC_BUFSIZE] = {0}; if (!__vphn_get_associativity(lcpu, associativity)) return associativity_to_nid(associativity); return NUMA_NO_NODE; } #else static int __vphn_get_associativity(long lcpu, __be32 *associativity) { return -1; } static int vphn_get_nid(long unused) { return NUMA_NO_NODE; } #endif /* CONFIG_PPC_SPLPAR */ /* * Figure out to which domain a cpu belongs and stick it there. * Return the id of the domain used. */ static int numa_setup_cpu(unsigned long lcpu) { struct device_node *cpu; int fcpu = cpu_first_thread_sibling(lcpu); int nid = NUMA_NO_NODE; if (!cpu_present(lcpu)) { set_cpu_numa_node(lcpu, first_online_node); return first_online_node; } /* * If a valid cpu-to-node mapping is already available, use it * directly instead of querying the firmware, since it represents * the most recent mapping notified to us by the platform (eg: VPHN). * Since cpu_to_node binding remains the same for all threads in the * core. If a valid cpu-to-node mapping is already available, for * the first thread in the core, use it. */ nid = numa_cpu_lookup_table[fcpu]; if (nid >= 0) { map_cpu_to_node(lcpu, nid); return nid; } nid = vphn_get_nid(lcpu); if (nid != NUMA_NO_NODE) goto out_present; cpu = of_get_cpu_node(lcpu, NULL); if (!cpu) { WARN_ON(1); if (cpu_present(lcpu)) goto out_present; else goto out; } nid = of_node_to_nid_single(cpu); of_node_put(cpu); out_present: if (nid < 0 || !node_possible(nid)) nid = first_online_node; /* * Update for the first thread of the core. All threads of a core * have to be part of the same node. This not only avoids querying * for every other thread in the core, but always avoids a case * where virtual node associativity change causes subsequent threads * of a core to be associated with different nid. However if first * thread is already online, expect it to have a valid mapping. */ if (fcpu != lcpu) { WARN_ON(cpu_online(fcpu)); map_cpu_to_node(fcpu, nid); } map_cpu_to_node(lcpu, nid); out: return nid; } static void verify_cpu_node_mapping(int cpu, int node) { int base, sibling, i; /* Verify that all the threads in the core belong to the same node */ base = cpu_first_thread_sibling(cpu); for (i = 0; i < threads_per_core; i++) { sibling = base + i; if (sibling == cpu || cpu_is_offline(sibling)) continue; if (cpu_to_node(sibling) != node) { WARN(1, "CPU thread siblings %d and %d don't belong" " to the same node!\n", cpu, sibling); break; } } } /* Must run before sched domains notifier. */ static int ppc_numa_cpu_prepare(unsigned int cpu) { int nid; nid = numa_setup_cpu(cpu); verify_cpu_node_mapping(cpu, nid); return 0; } static int ppc_numa_cpu_dead(unsigned int cpu) { return 0; } /* * Check and possibly modify a memory region to enforce the memory limit. * * Returns the size the region should have to enforce the memory limit. * This will either be the original value of size, a truncated value, * or zero. If the returned value of size is 0 the region should be * discarded as it lies wholly above the memory limit. */ static unsigned long __init numa_enforce_memory_limit(unsigned long start, unsigned long size) { /* * We use memblock_end_of_DRAM() in here instead of memory_limit because * we've already adjusted it for the limit and it takes care of * having memory holes below the limit. Also, in the case of * iommu_is_off, memory_limit is not set but is implicitly enforced. */ if (start + size <= memblock_end_of_DRAM()) return size; if (start >= memblock_end_of_DRAM()) return 0; return memblock_end_of_DRAM() - start; } /* * Reads the counter for a given entry in * linux,drconf-usable-memory property */ static inline int __init read_usm_ranges(const __be32 **usm) { /* * For each lmb in ibm,dynamic-memory a corresponding * entry in linux,drconf-usable-memory property contains * a counter followed by that many (base, size) duple. * read the counter from linux,drconf-usable-memory */ return read_n_cells(n_mem_size_cells, usm); } /* * Extract NUMA information from the ibm,dynamic-reconfiguration-memory * node. This assumes n_mem_{addr,size}_cells have been set. */ static int __init numa_setup_drmem_lmb(struct drmem_lmb *lmb, const __be32 **usm, void *data) { unsigned int ranges, is_kexec_kdump = 0; unsigned long base, size, sz; int nid; /* * Skip this block if the reserved bit is set in flags (0x80) * or if the block is not assigned to this partition (0x8) */ if ((lmb->flags & DRCONF_MEM_RESERVED) || !(lmb->flags & DRCONF_MEM_ASSIGNED)) return 0; if (*usm) is_kexec_kdump = 1; base = lmb->base_addr; size = drmem_lmb_size(); ranges = 1; if (is_kexec_kdump) { ranges = read_usm_ranges(usm); if (!ranges) /* there are no (base, size) duple */ return 0; } do { if (is_kexec_kdump) { base = read_n_cells(n_mem_addr_cells, usm); size = read_n_cells(n_mem_size_cells, usm); } nid = get_nid_and_numa_distance(lmb); fake_numa_create_new_node(((base + size) >> PAGE_SHIFT), &nid); node_set_online(nid); sz = numa_enforce_memory_limit(base, size); if (sz) memblock_set_node(base, sz, &memblock.memory, nid); } while (--ranges); return 0; } static int __init parse_numa_properties(void) { struct device_node *memory; int default_nid = 0; unsigned long i; const __be32 *associativity; if (numa_enabled == 0) { pr_warn("disabled by user\n"); return -1; } primary_domain_index = find_primary_domain_index(); if (primary_domain_index < 0) { /* * if we fail to parse primary_domain_index from device tree * mark the numa disabled, boot with numa disabled. */ numa_enabled = false; return primary_domain_index; } pr_debug("associativity depth for CPU/Memory: %d\n", primary_domain_index); /* * If it is FORM2 initialize the distance table here. */ if (affinity_form == FORM2_AFFINITY) initialize_form2_numa_distance_lookup_table(); /* * Even though we connect cpus to numa domains later in SMP * init, we need to know the node ids now. This is because * each node to be onlined must have NODE_DATA etc backing it. */ for_each_present_cpu(i) { __be32 vphn_assoc[VPHN_ASSOC_BUFSIZE]; struct device_node *cpu; int nid = NUMA_NO_NODE; memset(vphn_assoc, 0, VPHN_ASSOC_BUFSIZE * sizeof(__be32)); if (__vphn_get_associativity(i, vphn_assoc) == 0) { nid = associativity_to_nid(vphn_assoc); initialize_form1_numa_distance(vphn_assoc); } else { /* * Don't fall back to default_nid yet -- we will plug * cpus into nodes once the memory scan has discovered * the topology. */ cpu = of_get_cpu_node(i, NULL); BUG_ON(!cpu); associativity = of_get_associativity(cpu); if (associativity) { nid = associativity_to_nid(associativity); initialize_form1_numa_distance(associativity); } of_node_put(cpu); } /* node_set_online() is an UB if 'nid' is negative */ if (likely(nid >= 0)) node_set_online(nid); } get_n_mem_cells(&n_mem_addr_cells, &n_mem_size_cells); for_each_node_by_type(memory, "memory") { unsigned long start; unsigned long size; int nid; int ranges; const __be32 *memcell_buf; unsigned int len; memcell_buf = of_get_property(memory, "linux,usable-memory", &len); if (!memcell_buf || len <= 0) memcell_buf = of_get_property(memory, "reg", &len); if (!memcell_buf || len <= 0) continue; /* ranges in cell */ ranges = (len >> 2) / (n_mem_addr_cells + n_mem_size_cells); new_range: /* these are order-sensitive, and modify the buffer pointer */ start = read_n_cells(n_mem_addr_cells, &memcell_buf); size = read_n_cells(n_mem_size_cells, &memcell_buf); /* * Assumption: either all memory nodes or none will * have associativity properties. If none, then * everything goes to default_nid. */ associativity = of_get_associativity(memory); if (associativity) { nid = associativity_to_nid(associativity); initialize_form1_numa_distance(associativity); } else nid = default_nid; fake_numa_create_new_node(((start + size) >> PAGE_SHIFT), &nid); node_set_online(nid); size = numa_enforce_memory_limit(start, size); if (size) memblock_set_node(start, size, &memblock.memory, nid); if (--ranges) goto new_range; } /* * Now do the same thing for each MEMBLOCK listed in the * ibm,dynamic-memory property in the * ibm,dynamic-reconfiguration-memory node. */ memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory"); if (memory) { walk_drmem_lmbs(memory, NULL, numa_setup_drmem_lmb); of_node_put(memory); } return 0; } static void __init setup_nonnuma(void) { unsigned long top_of_ram = memblock_end_of_DRAM(); unsigned long total_ram = memblock_phys_mem_size(); unsigned long start_pfn, end_pfn; unsigned int nid = 0; int i; pr_debug("Top of RAM: 0x%lx, Total RAM: 0x%lx\n", top_of_ram, total_ram); pr_debug("Memory hole size: %ldMB\n", (top_of_ram - total_ram) >> 20); for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, NULL) { fake_numa_create_new_node(end_pfn, &nid); memblock_set_node(PFN_PHYS(start_pfn), PFN_PHYS(end_pfn - start_pfn), &memblock.memory, nid); node_set_online(nid); } } void __init dump_numa_cpu_topology(void) { unsigned int node; unsigned int cpu, count; if (!numa_enabled) return; for_each_online_node(node) { pr_info("Node %d CPUs:", node); count = 0; /* * If we used a CPU iterator here we would miss printing * the holes in the cpumap. */ for (cpu = 0; cpu < nr_cpu_ids; cpu++) { if (cpumask_test_cpu(cpu, node_to_cpumask_map[node])) { if (count == 0) pr_cont(" %u", cpu); ++count; } else { if (count > 1) pr_cont("-%u", cpu - 1); count = 0; } } if (count > 1) pr_cont("-%u", nr_cpu_ids - 1); pr_cont("\n"); } } /* Initialize NODE_DATA for a node on the local memory */ static void __init setup_node_data(int nid, u64 start_pfn, u64 end_pfn) { u64 spanned_pages = end_pfn - start_pfn; const size_t nd_size = roundup(sizeof(pg_data_t), SMP_CACHE_BYTES); u64 nd_pa; void *nd; int tnid; nd_pa = memblock_phys_alloc_try_nid(nd_size, SMP_CACHE_BYTES, nid); if (!nd_pa) panic("Cannot allocate %zu bytes for node %d data\n", nd_size, nid); nd = __va(nd_pa); /* report and initialize */ pr_info(" NODE_DATA [mem %#010Lx-%#010Lx]\n", nd_pa, nd_pa + nd_size - 1); tnid = early_pfn_to_nid(nd_pa >> PAGE_SHIFT); if (tnid != nid) pr_info(" NODE_DATA(%d) on node %d\n", nid, tnid); node_data[nid] = nd; memset(NODE_DATA(nid), 0, sizeof(pg_data_t)); NODE_DATA(nid)->node_id = nid; NODE_DATA(nid)->node_start_pfn = start_pfn; NODE_DATA(nid)->node_spanned_pages = spanned_pages; } static void __init find_possible_nodes(void) { struct device_node *rtas; const __be32 *domains = NULL; int prop_length, max_nodes; u32 i; if (!numa_enabled) return; rtas = of_find_node_by_path("/rtas"); if (!rtas) return; /* * ibm,current-associativity-domains is a fairly recent property. If * it doesn't exist, then fallback on ibm,max-associativity-domains. * Current denotes what the platform can support compared to max * which denotes what the Hypervisor can support. * * If the LPAR is migratable, new nodes might be activated after a LPM, * so we should consider the max number in that case. */ if (!of_get_property(of_root, "ibm,migratable-partition", NULL)) domains = of_get_property(rtas, "ibm,current-associativity-domains", &prop_length); if (!domains) { domains = of_get_property(rtas, "ibm,max-associativity-domains", &prop_length); if (!domains) goto out; } max_nodes = of_read_number(&domains[primary_domain_index], 1); pr_info("Partition configured for %d NUMA nodes.\n", max_nodes); for (i = 0; i < max_nodes; i++) { if (!node_possible(i)) node_set(i, node_possible_map); } prop_length /= sizeof(int); if (prop_length > primary_domain_index + 2) coregroup_enabled = 1; out: of_node_put(rtas); } void __init mem_topology_setup(void) { int cpu; max_low_pfn = max_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT; min_low_pfn = MEMORY_START >> PAGE_SHIFT; /* * Linux/mm assumes node 0 to be online at boot. However this is not * true on PowerPC, where node 0 is similar to any other node, it * could be cpuless, memoryless node. So force node 0 to be offline * for now. This will prevent cpuless, memoryless node 0 showing up * unnecessarily as online. If a node has cpus or memory that need * to be online, then node will anyway be marked online. */ node_set_offline(0); if (parse_numa_properties()) setup_nonnuma(); /* * Modify the set of possible NUMA nodes to reflect information * available about the set of online nodes, and the set of nodes * that we expect to make use of for this platform's affinity * calculations. */ nodes_and(node_possible_map, node_possible_map, node_online_map); find_possible_nodes(); setup_node_to_cpumask_map(); reset_numa_cpu_lookup_table(); for_each_possible_cpu(cpu) { /* * Powerpc with CONFIG_NUMA always used to have a node 0, * even if it was memoryless or cpuless. For all cpus that * are possible but not present, cpu_to_node() would point * to node 0. To remove a cpuless, memoryless dummy node, * powerpc need to make sure all possible but not present * cpu_to_node are set to a proper node. */ numa_setup_cpu(cpu); } } void __init initmem_init(void) { int nid; memblock_dump_all(); for_each_online_node(nid) { unsigned long start_pfn, end_pfn; get_pfn_range_for_nid(nid, &start_pfn, &end_pfn); setup_node_data(nid, start_pfn, end_pfn); } sparse_init(); /* * We need the numa_cpu_lookup_table to be accurate for all CPUs, * even before we online them, so that we can use cpu_to_{node,mem} * early in boot, cf. smp_prepare_cpus(). * _nocalls() + manual invocation is used because cpuhp is not yet * initialized for the boot CPU. */ cpuhp_setup_state_nocalls(CPUHP_POWER_NUMA_PREPARE, "powerpc/numa:prepare", ppc_numa_cpu_prepare, ppc_numa_cpu_dead); } static int __init early_numa(char *p) { if (!p) return 0; if (strstr(p, "off")) numa_enabled = 0; p = strstr(p, "fake="); if (p) cmdline = p + strlen("fake="); return 0; } early_param("numa", early_numa); #ifdef CONFIG_MEMORY_HOTPLUG /* * Find the node associated with a hot added memory section for * memory represented in the device tree by the property * ibm,dynamic-reconfiguration-memory/ibm,dynamic-memory. */ static int hot_add_drconf_scn_to_nid(unsigned long scn_addr) { struct drmem_lmb *lmb; unsigned long lmb_size; int nid = NUMA_NO_NODE; lmb_size = drmem_lmb_size(); for_each_drmem_lmb(lmb) { /* skip this block if it is reserved or not assigned to * this partition */ if ((lmb->flags & DRCONF_MEM_RESERVED) || !(lmb->flags & DRCONF_MEM_ASSIGNED)) continue; if ((scn_addr < lmb->base_addr) || (scn_addr >= (lmb->base_addr + lmb_size))) continue; nid = of_drconf_to_nid_single(lmb); break; } return nid; } /* * Find the node associated with a hot added memory section for memory * represented in the device tree as a node (i.e. memory@XXXX) for * each memblock. */ static int hot_add_node_scn_to_nid(unsigned long scn_addr) { struct device_node *memory; int nid = NUMA_NO_NODE; for_each_node_by_type(memory, "memory") { int i = 0; while (1) { struct resource res; if (of_address_to_resource(memory, i++, &res)) break; if ((scn_addr < res.start) || (scn_addr > res.end)) continue; nid = of_node_to_nid_single(memory); break; } if (nid >= 0) break; } of_node_put(memory); return nid; } /* * Find the node associated with a hot added memory section. Section * corresponds to a SPARSEMEM section, not an MEMBLOCK. It is assumed that * sections are fully contained within a single MEMBLOCK. */ int hot_add_scn_to_nid(unsigned long scn_addr) { struct device_node *memory = NULL; int nid; if (!numa_enabled) return first_online_node; memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory"); if (memory) { nid = hot_add_drconf_scn_to_nid(scn_addr); of_node_put(memory); } else { nid = hot_add_node_scn_to_nid(scn_addr); } if (nid < 0 || !node_possible(nid)) nid = first_online_node; return nid; } static u64 hot_add_drconf_memory_max(void) { struct device_node *memory = NULL; struct device_node *dn = NULL; const __be64 *lrdr = NULL; dn = of_find_node_by_path("/rtas"); if (dn) { lrdr = of_get_property(dn, "ibm,lrdr-capacity", NULL); of_node_put(dn); if (lrdr) return be64_to_cpup(lrdr); } memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory"); if (memory) { of_node_put(memory); return drmem_lmb_memory_max(); } return 0; } /* * memory_hotplug_max - return max address of memory that may be added * * This is currently only used on systems that support drconfig memory * hotplug. */ u64 memory_hotplug_max(void) { return max(hot_add_drconf_memory_max(), memblock_end_of_DRAM()); } #endif /* CONFIG_MEMORY_HOTPLUG */ /* Virtual Processor Home Node (VPHN) support */ #ifdef CONFIG_PPC_SPLPAR static int topology_inited; /* * Retrieve the new associativity information for a virtual processor's * home node. */ static long vphn_get_associativity(unsigned long cpu, __be32 *associativity) { long rc; rc = hcall_vphn(get_hard_smp_processor_id(cpu), VPHN_FLAG_VCPU, associativity); switch (rc) { case H_SUCCESS: pr_debug("VPHN hcall succeeded. Reset polling...\n"); goto out; case H_FUNCTION: pr_err_ratelimited("VPHN unsupported. Disabling polling...\n"); break; case H_HARDWARE: pr_err_ratelimited("hcall_vphn() experienced a hardware fault " "preventing VPHN. Disabling polling...\n"); break; case H_PARAMETER: pr_err_ratelimited("hcall_vphn() was passed an invalid parameter. " "Disabling polling...\n"); break; default: pr_err_ratelimited("hcall_vphn() returned %ld. Disabling polling...\n" , rc); break; } out: return rc; } void find_and_update_cpu_nid(int cpu) { __be32 associativity[VPHN_ASSOC_BUFSIZE] = {0}; int new_nid; /* Use associativity from first thread for all siblings */ if (vphn_get_associativity(cpu, associativity)) return; /* Do not have previous associativity, so find it now. */ new_nid = associativity_to_nid(associativity); if (new_nid < 0 || !node_possible(new_nid)) new_nid = first_online_node; else // Associate node <-> cpu, so cpu_up() calls // try_online_node() on the right node. set_cpu_numa_node(cpu, new_nid); pr_debug("%s:%d cpu %d nid %d\n", __func__, __LINE__, cpu, new_nid); } int cpu_to_coregroup_id(int cpu) { __be32 associativity[VPHN_ASSOC_BUFSIZE] = {0}; int index; if (cpu < 0 || cpu > nr_cpu_ids) return -1; if (!coregroup_enabled) goto out; if (!firmware_has_feature(FW_FEATURE_VPHN)) goto out; if (vphn_get_associativity(cpu, associativity)) goto out; index = of_read_number(associativity, 1); if (index > primary_domain_index + 1) return of_read_number(&associativity[index - 1], 1); out: return cpu_to_core_id(cpu); } static int topology_update_init(void) { topology_inited = 1; return 0; } device_initcall(topology_update_init); #endif /* CONFIG_PPC_SPLPAR */
linux-master
arch/powerpc/mm/numa.c
// SPDX-License-Identifier: GPL-2.0-or-later #include <linux/io.h> #include <linux/slab.h> #include <linux/vmalloc.h> void __iomem *__ioremap_caller(phys_addr_t addr, unsigned long size, pgprot_t prot, void *caller) { phys_addr_t paligned, offset; void __iomem *ret; int err; /* We don't support the 4K PFN hack with ioremap */ if (pgprot_val(prot) & H_PAGE_4K_PFN) return NULL; /* * Choose an address to map it to. Once the vmalloc system is running, * we use it. Before that, we map using addresses going up from * ioremap_bot. vmalloc will use the addresses from IOREMAP_BASE * through ioremap_bot. */ paligned = addr & PAGE_MASK; offset = addr & ~PAGE_MASK; size = PAGE_ALIGN(addr + size) - paligned; if (size == 0 || paligned == 0) return NULL; if (slab_is_available()) return generic_ioremap_prot(addr, size, prot); pr_warn("ioremap() called early from %pS. Use early_ioremap() instead\n", caller); err = early_ioremap_range(ioremap_bot, paligned, size, prot); if (err) return NULL; ret = (void __iomem *)ioremap_bot + offset; ioremap_bot += size + PAGE_SIZE; return ret; } /* * Unmap an IO region and remove it from vmalloc'd list. * Access to IO memory should be serialized by driver. */ void iounmap(volatile void __iomem *token) { if (!slab_is_available()) return; generic_iounmap(PCI_FIX_ADDR(token)); } EXPORT_SYMBOL(iounmap);
linux-master
arch/powerpc/mm/ioremap_64.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * PowerPC version * Copyright (C) 1995-1996 Gary Thomas ([email protected]) * * Derived from "arch/i386/mm/fault.c" * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds * * Modified by Cort Dougan and Paul Mackerras. * * Modified for PPC64 by Dave Engebretsen ([email protected]) */ #include <linux/signal.h> #include <linux/sched.h> #include <linux/sched/task_stack.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/types.h> #include <linux/pagemap.h> #include <linux/ptrace.h> #include <linux/mman.h> #include <linux/mm.h> #include <linux/interrupt.h> #include <linux/highmem.h> #include <linux/extable.h> #include <linux/kprobes.h> #include <linux/kdebug.h> #include <linux/perf_event.h> #include <linux/ratelimit.h> #include <linux/context_tracking.h> #include <linux/hugetlb.h> #include <linux/uaccess.h> #include <linux/kfence.h> #include <linux/pkeys.h> #include <asm/firmware.h> #include <asm/interrupt.h> #include <asm/page.h> #include <asm/mmu.h> #include <asm/mmu_context.h> #include <asm/siginfo.h> #include <asm/debug.h> #include <asm/kup.h> #include <asm/inst.h> /* * do_page_fault error handling helpers */ static int __bad_area_nosemaphore(struct pt_regs *regs, unsigned long address, int si_code) { /* * If we are in kernel mode, bail out with a SEGV, this will * be caught by the assembly which will restore the non-volatile * registers before calling bad_page_fault() */ if (!user_mode(regs)) return SIGSEGV; _exception(SIGSEGV, regs, si_code, address); return 0; } static noinline int bad_area_nosemaphore(struct pt_regs *regs, unsigned long address) { return __bad_area_nosemaphore(regs, address, SEGV_MAPERR); } static int __bad_area(struct pt_regs *regs, unsigned long address, int si_code) { struct mm_struct *mm = current->mm; /* * Something tried to access memory that isn't in our memory map.. * Fix it, but check if it's kernel or user first.. */ mmap_read_unlock(mm); return __bad_area_nosemaphore(regs, address, si_code); } static noinline int bad_access_pkey(struct pt_regs *regs, unsigned long address, struct vm_area_struct *vma) { struct mm_struct *mm = current->mm; int pkey; /* * We don't try to fetch the pkey from page table because reading * page table without locking doesn't guarantee stable pte value. * Hence the pkey value that we return to userspace can be different * from the pkey that actually caused access error. * * It does *not* guarantee that the VMA we find here * was the one that we faulted on. * * 1. T1 : mprotect_key(foo, PAGE_SIZE, pkey=4); * 2. T1 : set AMR to deny access to pkey=4, touches, page * 3. T1 : faults... * 4. T2: mprotect_key(foo, PAGE_SIZE, pkey=5); * 5. T1 : enters fault handler, takes mmap_lock, etc... * 6. T1 : reaches here, sees vma_pkey(vma)=5, when we really * faulted on a pte with its pkey=4. */ pkey = vma_pkey(vma); mmap_read_unlock(mm); /* * If we are in kernel mode, bail out with a SEGV, this will * be caught by the assembly which will restore the non-volatile * registers before calling bad_page_fault() */ if (!user_mode(regs)) return SIGSEGV; _exception_pkey(regs, address, pkey); return 0; } static noinline int bad_access(struct pt_regs *regs, unsigned long address) { return __bad_area(regs, address, SEGV_ACCERR); } static int do_sigbus(struct pt_regs *regs, unsigned long address, vm_fault_t fault) { if (!user_mode(regs)) return SIGBUS; current->thread.trap_nr = BUS_ADRERR; #ifdef CONFIG_MEMORY_FAILURE if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) { unsigned int lsb = 0; /* shutup gcc */ pr_err("MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n", current->comm, current->pid, address); if (fault & VM_FAULT_HWPOISON_LARGE) lsb = hstate_index_to_shift(VM_FAULT_GET_HINDEX(fault)); if (fault & VM_FAULT_HWPOISON) lsb = PAGE_SHIFT; force_sig_mceerr(BUS_MCEERR_AR, (void __user *)address, lsb); return 0; } #endif force_sig_fault(SIGBUS, BUS_ADRERR, (void __user *)address); return 0; } static int mm_fault_error(struct pt_regs *regs, unsigned long addr, vm_fault_t fault) { /* * Kernel page fault interrupted by SIGKILL. We have no reason to * continue processing. */ if (fatal_signal_pending(current) && !user_mode(regs)) return SIGKILL; /* Out of memory */ if (fault & VM_FAULT_OOM) { /* * We ran out of memory, or some other thing happened to us that * made us unable to handle the page fault gracefully. */ if (!user_mode(regs)) return SIGSEGV; pagefault_out_of_memory(); } else { if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON| VM_FAULT_HWPOISON_LARGE)) return do_sigbus(regs, addr, fault); else if (fault & VM_FAULT_SIGSEGV) return bad_area_nosemaphore(regs, addr); else BUG(); } return 0; } /* Is this a bad kernel fault ? */ static bool bad_kernel_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address, bool is_write) { int is_exec = TRAP(regs) == INTERRUPT_INST_STORAGE; if (is_exec) { pr_crit_ratelimited("kernel tried to execute %s page (%lx) - exploit attempt? (uid: %d)\n", address >= TASK_SIZE ? "exec-protected" : "user", address, from_kuid(&init_user_ns, current_uid())); // Kernel exec fault is always bad return true; } // Kernel fault on kernel address is bad if (address >= TASK_SIZE) return true; // Read/write fault blocked by KUAP is bad, it can never succeed. if (bad_kuap_fault(regs, address, is_write)) { pr_crit_ratelimited("Kernel attempted to %s user page (%lx) - exploit attempt? (uid: %d)\n", is_write ? "write" : "read", address, from_kuid(&init_user_ns, current_uid())); // Fault on user outside of certain regions (eg. copy_tofrom_user()) is bad if (!search_exception_tables(regs->nip)) return true; // Read/write fault in a valid region (the exception table search passed // above), but blocked by KUAP is bad, it can never succeed. return WARN(true, "Bug: %s fault blocked by KUAP!", is_write ? "Write" : "Read"); } // What's left? Kernel fault on user and allowed by KUAP in the faulting context. return false; } static bool access_pkey_error(bool is_write, bool is_exec, bool is_pkey, struct vm_area_struct *vma) { /* * Make sure to check the VMA so that we do not perform * faults just to hit a pkey fault as soon as we fill in a * page. Only called for current mm, hence foreign == 0 */ if (!arch_vma_access_permitted(vma, is_write, is_exec, 0)) return true; return false; } static bool access_error(bool is_write, bool is_exec, struct vm_area_struct *vma) { /* * Allow execution from readable areas if the MMU does not * provide separate controls over reading and executing. * * Note: That code used to not be enabled for 4xx/BookE. * It is now as I/D cache coherency for these is done at * set_pte_at() time and I see no reason why the test * below wouldn't be valid on those processors. This -may- * break programs compiled with a really old ABI though. */ if (is_exec) { return !(vma->vm_flags & VM_EXEC) && (cpu_has_feature(CPU_FTR_NOEXECUTE) || !(vma->vm_flags & (VM_READ | VM_WRITE))); } if (is_write) { if (unlikely(!(vma->vm_flags & VM_WRITE))) return true; return false; } /* * VM_READ, VM_WRITE and VM_EXEC all imply read permissions, as * defined in protection_map[]. Read faults can only be caused by * a PROT_NONE mapping, or with a PROT_EXEC-only mapping on Radix. */ if (unlikely(!vma_is_accessible(vma))) return true; if (unlikely(radix_enabled() && ((vma->vm_flags & VM_ACCESS_FLAGS) == VM_EXEC))) return true; /* * We should ideally do the vma pkey access check here. But in the * fault path, handle_mm_fault() also does the same check. To avoid * these multiple checks, we skip it here and handle access error due * to pkeys later. */ return false; } #ifdef CONFIG_PPC_SMLPAR static inline void cmo_account_page_fault(void) { if (firmware_has_feature(FW_FEATURE_CMO)) { u32 page_ins; preempt_disable(); page_ins = be32_to_cpu(get_lppaca()->page_ins); page_ins += 1 << PAGE_FACTOR; get_lppaca()->page_ins = cpu_to_be32(page_ins); preempt_enable(); } } #else static inline void cmo_account_page_fault(void) { } #endif /* CONFIG_PPC_SMLPAR */ static void sanity_check_fault(bool is_write, bool is_user, unsigned long error_code, unsigned long address) { /* * Userspace trying to access kernel address, we get PROTFAULT for that. */ if (is_user && address >= TASK_SIZE) { if ((long)address == -1) return; pr_crit_ratelimited("%s[%d]: User access of kernel address (%lx) - exploit attempt? (uid: %d)\n", current->comm, current->pid, address, from_kuid(&init_user_ns, current_uid())); return; } if (!IS_ENABLED(CONFIG_PPC_BOOK3S)) return; /* * For hash translation mode, we should never get a * PROTFAULT. Any update to pte to reduce access will result in us * removing the hash page table entry, thus resulting in a DSISR_NOHPTE * fault instead of DSISR_PROTFAULT. * * A pte update to relax the access will not result in a hash page table * entry invalidate and hence can result in DSISR_PROTFAULT. * ptep_set_access_flags() doesn't do a hpte flush. This is why we have * the special !is_write in the below conditional. * * For platforms that doesn't supports coherent icache and do support * per page noexec bit, we do setup things such that we do the * sync between D/I cache via fault. But that is handled via low level * hash fault code (hash_page_do_lazy_icache()) and we should not reach * here in such case. * * For wrong access that can result in PROTFAULT, the above vma->vm_flags * check should handle those and hence we should fall to the bad_area * handling correctly. * * For embedded with per page exec support that doesn't support coherent * icache we do get PROTFAULT and we handle that D/I cache sync in * set_pte_at while taking the noexec/prot fault. Hence this is WARN_ON * is conditional for server MMU. * * For radix, we can get prot fault for autonuma case, because radix * page table will have them marked noaccess for user. */ if (radix_enabled() || is_write) return; WARN_ON_ONCE(error_code & DSISR_PROTFAULT); } /* * Define the correct "is_write" bit in error_code based * on the processor family */ #if (defined(CONFIG_4xx) || defined(CONFIG_BOOKE)) #define page_fault_is_write(__err) ((__err) & ESR_DST) #else #define page_fault_is_write(__err) ((__err) & DSISR_ISSTORE) #endif #if defined(CONFIG_4xx) || defined(CONFIG_BOOKE) #define page_fault_is_bad(__err) (0) #elif defined(CONFIG_PPC_8xx) #define page_fault_is_bad(__err) ((__err) & DSISR_NOEXEC_OR_G) #elif defined(CONFIG_PPC64) static int page_fault_is_bad(unsigned long err) { unsigned long flag = DSISR_BAD_FAULT_64S; /* * PAPR+ v2.11 § 14.15.3.4.1 (unreleased) * If byte 0, bit 3 of pi-attribute-specifier-type in * ibm,pi-features property is defined, ignore the DSI error * which is caused by the paste instruction on the * suspended NX window. */ if (mmu_has_feature(MMU_FTR_NX_DSI)) flag &= ~DSISR_BAD_COPYPASTE; return err & flag; } #else #define page_fault_is_bad(__err) ((__err) & DSISR_BAD_FAULT_32S) #endif /* * For 600- and 800-family processors, the error_code parameter is DSISR * for a data fault, SRR1 for an instruction fault. * For 400-family processors the error_code parameter is ESR for a data fault, * 0 for an instruction fault. * For 64-bit processors, the error_code parameter is DSISR for a data access * fault, SRR1 & 0x08000000 for an instruction access fault. * * The return value is 0 if the fault was handled, or the signal * number if this is a kernel fault that can't be handled here. */ static int ___do_page_fault(struct pt_regs *regs, unsigned long address, unsigned long error_code) { struct vm_area_struct * vma; struct mm_struct *mm = current->mm; unsigned int flags = FAULT_FLAG_DEFAULT; int is_exec = TRAP(regs) == INTERRUPT_INST_STORAGE; int is_user = user_mode(regs); int is_write = page_fault_is_write(error_code); vm_fault_t fault, major = 0; bool kprobe_fault = kprobe_page_fault(regs, 11); if (unlikely(debugger_fault_handler(regs) || kprobe_fault)) return 0; if (unlikely(page_fault_is_bad(error_code))) { if (is_user) { _exception(SIGBUS, regs, BUS_OBJERR, address); return 0; } return SIGBUS; } /* Additional sanity check(s) */ sanity_check_fault(is_write, is_user, error_code, address); /* * The kernel should never take an execute fault nor should it * take a page fault to a kernel address or a page fault to a user * address outside of dedicated places */ if (unlikely(!is_user && bad_kernel_fault(regs, error_code, address, is_write))) { if (kfence_handle_page_fault(address, is_write, regs)) return 0; return SIGSEGV; } /* * If we're in an interrupt, have no user context or are running * in a region with pagefaults disabled then we must not take the fault */ if (unlikely(faulthandler_disabled() || !mm)) { if (is_user) printk_ratelimited(KERN_ERR "Page fault in user mode" " with faulthandler_disabled()=%d" " mm=%p\n", faulthandler_disabled(), mm); return bad_area_nosemaphore(regs, address); } interrupt_cond_local_irq_enable(regs); perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); /* * We want to do this outside mmap_lock, because reading code around nip * can result in fault, which will cause a deadlock when called with * mmap_lock held */ if (is_user) flags |= FAULT_FLAG_USER; if (is_write) flags |= FAULT_FLAG_WRITE; if (is_exec) flags |= FAULT_FLAG_INSTRUCTION; if (!(flags & FAULT_FLAG_USER)) goto lock_mmap; vma = lock_vma_under_rcu(mm, address); if (!vma) goto lock_mmap; if (unlikely(access_pkey_error(is_write, is_exec, (error_code & DSISR_KEYFAULT), vma))) { vma_end_read(vma); goto lock_mmap; } if (unlikely(access_error(is_write, is_exec, vma))) { vma_end_read(vma); goto lock_mmap; } fault = handle_mm_fault(vma, address, flags | FAULT_FLAG_VMA_LOCK, regs); if (!(fault & (VM_FAULT_RETRY | VM_FAULT_COMPLETED))) vma_end_read(vma); if (!(fault & VM_FAULT_RETRY)) { count_vm_vma_lock_event(VMA_LOCK_SUCCESS); goto done; } count_vm_vma_lock_event(VMA_LOCK_RETRY); if (fault_signal_pending(fault, regs)) return user_mode(regs) ? 0 : SIGBUS; lock_mmap: /* When running in the kernel we expect faults to occur only to * addresses in user space. All other faults represent errors in the * kernel and should generate an OOPS. Unfortunately, in the case of an * erroneous fault occurring in a code path which already holds mmap_lock * we will deadlock attempting to validate the fault against the * address space. Luckily the kernel only validly references user * space from well defined areas of code, which are listed in the * exceptions table. lock_mm_and_find_vma() handles that logic. */ retry: vma = lock_mm_and_find_vma(mm, address, regs); if (unlikely(!vma)) return bad_area_nosemaphore(regs, address); if (unlikely(access_pkey_error(is_write, is_exec, (error_code & DSISR_KEYFAULT), vma))) return bad_access_pkey(regs, address, vma); if (unlikely(access_error(is_write, is_exec, vma))) return bad_access(regs, address); /* * If for any reason at all we couldn't handle the fault, * make sure we exit gracefully rather than endlessly redo * the fault. */ fault = handle_mm_fault(vma, address, flags, regs); major |= fault & VM_FAULT_MAJOR; if (fault_signal_pending(fault, regs)) return user_mode(regs) ? 0 : SIGBUS; /* The fault is fully completed (including releasing mmap lock) */ if (fault & VM_FAULT_COMPLETED) goto out; /* * Handle the retry right now, the mmap_lock has been released in that * case. */ if (unlikely(fault & VM_FAULT_RETRY)) { flags |= FAULT_FLAG_TRIED; goto retry; } mmap_read_unlock(current->mm); done: if (unlikely(fault & VM_FAULT_ERROR)) return mm_fault_error(regs, address, fault); out: /* * Major/minor page fault accounting. */ if (major) cmo_account_page_fault(); return 0; } NOKPROBE_SYMBOL(___do_page_fault); static __always_inline void __do_page_fault(struct pt_regs *regs) { long err; err = ___do_page_fault(regs, regs->dar, regs->dsisr); if (unlikely(err)) bad_page_fault(regs, err); } DEFINE_INTERRUPT_HANDLER(do_page_fault) { __do_page_fault(regs); } #ifdef CONFIG_PPC_BOOK3S_64 /* Same as do_page_fault but interrupt entry has already run in do_hash_fault */ void hash__do_page_fault(struct pt_regs *regs) { __do_page_fault(regs); } NOKPROBE_SYMBOL(hash__do_page_fault); #endif /* * bad_page_fault is called when we have a bad access from the kernel. * It is called from the DSI and ISI handlers in head.S and from some * of the procedures in traps.c. */ static void __bad_page_fault(struct pt_regs *regs, int sig) { int is_write = page_fault_is_write(regs->dsisr); const char *msg; /* kernel has accessed a bad area */ if (regs->dar < PAGE_SIZE) msg = "Kernel NULL pointer dereference"; else msg = "Unable to handle kernel data access"; switch (TRAP(regs)) { case INTERRUPT_DATA_STORAGE: case INTERRUPT_H_DATA_STORAGE: pr_alert("BUG: %s on %s at 0x%08lx\n", msg, is_write ? "write" : "read", regs->dar); break; case INTERRUPT_DATA_SEGMENT: pr_alert("BUG: %s at 0x%08lx\n", msg, regs->dar); break; case INTERRUPT_INST_STORAGE: case INTERRUPT_INST_SEGMENT: pr_alert("BUG: Unable to handle kernel instruction fetch%s", regs->nip < PAGE_SIZE ? " (NULL pointer?)\n" : "\n"); break; case INTERRUPT_ALIGNMENT: pr_alert("BUG: Unable to handle kernel unaligned access at 0x%08lx\n", regs->dar); break; default: pr_alert("BUG: Unable to handle unknown paging fault at 0x%08lx\n", regs->dar); break; } printk(KERN_ALERT "Faulting instruction address: 0x%08lx\n", regs->nip); if (task_stack_end_corrupted(current)) printk(KERN_ALERT "Thread overran stack, or stack corrupted\n"); die("Kernel access of bad area", regs, sig); } void bad_page_fault(struct pt_regs *regs, int sig) { const struct exception_table_entry *entry; /* Are we prepared to handle this fault? */ entry = search_exception_tables(instruction_pointer(regs)); if (entry) instruction_pointer_set(regs, extable_fixup(entry)); else __bad_page_fault(regs, sig); } #ifdef CONFIG_PPC_BOOK3S_64 DEFINE_INTERRUPT_HANDLER(do_bad_page_fault_segv) { bad_page_fault(regs, SIGSEGV); } /* * In radix, segment interrupts indicate the EA is not addressable by the * page table geometry, so they are always sent here. * * In hash, this is called if do_slb_fault returns error. Typically it is * because the EA was outside the region allowed by software. */ DEFINE_INTERRUPT_HANDLER(do_bad_segment_interrupt) { int err = regs->result; if (err == -EFAULT) { if (user_mode(regs)) _exception(SIGSEGV, regs, SEGV_BNDERR, regs->dar); else bad_page_fault(regs, SIGSEGV); } else if (err == -EINVAL) { unrecoverable_exception(regs); } else { BUG(); } } #endif
linux-master
arch/powerpc/mm/fault.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * PowerPC version * Copyright (C) 1995-1996 Gary Thomas ([email protected]) * * Modifications by Paul Mackerras (PowerMac) ([email protected]) * and Cort Dougan (PReP) ([email protected]) * Copyright (C) 1996 Paul Mackerras * PPC44x/36-bit changes by Matt Porter ([email protected]) * * Derived from "arch/i386/mm/init.c" * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds */ #include <linux/module.h> #include <linux/sched.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/types.h> #include <linux/mm.h> #include <linux/stddef.h> #include <linux/init.h> #include <linux/highmem.h> #include <linux/initrd.h> #include <linux/pagemap.h> #include <linux/memblock.h> #include <linux/gfp.h> #include <linux/slab.h> #include <linux/hugetlb.h> #include <asm/io.h> #include <asm/mmu.h> #include <asm/smp.h> #include <asm/machdep.h> #include <asm/btext.h> #include <asm/tlb.h> #include <asm/sections.h> #include <asm/hugetlb.h> #include <asm/kup.h> #include <asm/kasan.h> #include <mm/mmu_decl.h> #if defined(CONFIG_KERNEL_START_BOOL) || defined(CONFIG_LOWMEM_SIZE_BOOL) /* The amount of lowmem must be within 0xF0000000 - KERNELBASE. */ #if (CONFIG_LOWMEM_SIZE > (0xF0000000 - PAGE_OFFSET)) #error "You must adjust CONFIG_LOWMEM_SIZE or CONFIG_KERNEL_START" #endif #endif #define MAX_LOW_MEM CONFIG_LOWMEM_SIZE phys_addr_t total_memory; phys_addr_t total_lowmem; #ifdef CONFIG_RELOCATABLE /* Used in __va()/__pa() */ long long virt_phys_offset; EXPORT_SYMBOL(virt_phys_offset); #endif phys_addr_t lowmem_end_addr; int boot_mapsize; #ifdef CONFIG_PPC_PMAC unsigned long agp_special_page; EXPORT_SYMBOL(agp_special_page); #endif void MMU_init(void); /* max amount of low RAM to map in */ unsigned long __max_low_memory = MAX_LOW_MEM; /* * MMU_init sets up the basic memory mappings for the kernel, * including both RAM and possibly some I/O regions, * and sets up the page tables and the MMU hardware ready to go. */ void __init MMU_init(void) { if (ppc_md.progress) ppc_md.progress("MMU:enter", 0x111); total_lowmem = total_memory = memblock_end_of_DRAM() - memstart_addr; lowmem_end_addr = memstart_addr + total_lowmem; #ifdef CONFIG_PPC_85xx /* Freescale Book-E parts expect lowmem to be mapped by fixed TLB * entries, so we need to adjust lowmem to match the amount we can map * in the fixed entries */ adjust_total_lowmem(); #endif /* CONFIG_PPC_85xx */ if (total_lowmem > __max_low_memory) { total_lowmem = __max_low_memory; lowmem_end_addr = memstart_addr + total_lowmem; #ifndef CONFIG_HIGHMEM total_memory = total_lowmem; memblock_enforce_memory_limit(total_lowmem); #endif /* CONFIG_HIGHMEM */ } /* Initialize the MMU hardware */ if (ppc_md.progress) ppc_md.progress("MMU:hw init", 0x300); MMU_init_hw(); /* Map in all of RAM starting at KERNELBASE */ if (ppc_md.progress) ppc_md.progress("MMU:mapin", 0x301); mapin_ram(); /* Initialize early top-down ioremap allocator */ ioremap_bot = IOREMAP_TOP; if (ppc_md.progress) ppc_md.progress("MMU:exit", 0x211); /* From now on, btext is no longer BAT mapped if it was at all */ #ifdef CONFIG_BOOTX_TEXT btext_unmap(); #endif kasan_mmu_init(); setup_kup(); update_mmu_feature_fixups(MMU_FTR_KUAP); /* Shortly after that, the entire linear mapping will be available */ memblock_set_current_limit(lowmem_end_addr); }
linux-master
arch/powerpc/mm/init_32.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * CoProcessor (SPU/AFU) mm fault handler * * (C) Copyright IBM Deutschland Entwicklung GmbH 2007 * * Author: Arnd Bergmann <[email protected]> * Author: Jeremy Kerr <[email protected]> */ #include <linux/sched.h> #include <linux/mm.h> #include <linux/export.h> #include <asm/reg.h> #include <asm/copro.h> #include <asm/spu.h> #include <misc/cxl-base.h> /* * This ought to be kept in sync with the powerpc specific do_page_fault * function. Currently, there are a few corner cases that we haven't had * to handle fortunately. */ int copro_handle_mm_fault(struct mm_struct *mm, unsigned long ea, unsigned long dsisr, vm_fault_t *flt) { struct vm_area_struct *vma; unsigned long is_write; int ret; if (mm == NULL) return -EFAULT; if (mm->pgd == NULL) return -EFAULT; vma = lock_mm_and_find_vma(mm, ea, NULL); if (!vma) return -EFAULT; ret = -EFAULT; is_write = dsisr & DSISR_ISSTORE; if (is_write) { if (!(vma->vm_flags & VM_WRITE)) goto out_unlock; } else { if (!(vma->vm_flags & (VM_READ | VM_EXEC))) goto out_unlock; /* * PROT_NONE is covered by the VMA check above. * and hash should get a NOHPTE fault instead of * a PROTFAULT in case fixup is needed for things * like autonuma. */ if (!radix_enabled()) WARN_ON_ONCE(dsisr & DSISR_PROTFAULT); } ret = 0; *flt = handle_mm_fault(vma, ea, is_write ? FAULT_FLAG_WRITE : 0, NULL); /* The fault is fully completed (including releasing mmap lock) */ if (*flt & VM_FAULT_COMPLETED) return 0; if (unlikely(*flt & VM_FAULT_ERROR)) { if (*flt & VM_FAULT_OOM) { ret = -ENOMEM; goto out_unlock; } else if (*flt & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV)) { ret = -EFAULT; goto out_unlock; } BUG(); } out_unlock: mmap_read_unlock(mm); return ret; } EXPORT_SYMBOL_GPL(copro_handle_mm_fault); #ifdef CONFIG_PPC_64S_HASH_MMU int copro_calculate_slb(struct mm_struct *mm, u64 ea, struct copro_slb *slb) { u64 vsid, vsidkey; int psize, ssize; switch (get_region_id(ea)) { case USER_REGION_ID: pr_devel("%s: 0x%llx -- USER_REGION_ID\n", __func__, ea); if (mm == NULL) return 1; psize = get_slice_psize(mm, ea); ssize = user_segment_size(ea); vsid = get_user_vsid(&mm->context, ea, ssize); vsidkey = SLB_VSID_USER; break; case VMALLOC_REGION_ID: pr_devel("%s: 0x%llx -- VMALLOC_REGION_ID\n", __func__, ea); psize = mmu_vmalloc_psize; ssize = mmu_kernel_ssize; vsid = get_kernel_vsid(ea, mmu_kernel_ssize); vsidkey = SLB_VSID_KERNEL; break; case IO_REGION_ID: pr_devel("%s: 0x%llx -- IO_REGION_ID\n", __func__, ea); psize = mmu_io_psize; ssize = mmu_kernel_ssize; vsid = get_kernel_vsid(ea, mmu_kernel_ssize); vsidkey = SLB_VSID_KERNEL; break; case LINEAR_MAP_REGION_ID: pr_devel("%s: 0x%llx -- LINEAR_MAP_REGION_ID\n", __func__, ea); psize = mmu_linear_psize; ssize = mmu_kernel_ssize; vsid = get_kernel_vsid(ea, mmu_kernel_ssize); vsidkey = SLB_VSID_KERNEL; break; default: pr_debug("%s: invalid region access at %016llx\n", __func__, ea); return 1; } /* Bad address */ if (!vsid) return 1; vsid = (vsid << slb_vsid_shift(ssize)) | vsidkey; vsid |= mmu_psize_defs[psize].sllp | ((ssize == MMU_SEGSIZE_1T) ? SLB_VSID_B_1T : 0); slb->esid = (ea & (ssize == MMU_SEGSIZE_1T ? ESID_MASK_1T : ESID_MASK)) | SLB_ESID_V; slb->vsid = vsid; return 0; } EXPORT_SYMBOL_GPL(copro_calculate_slb); void copro_flush_all_slbs(struct mm_struct *mm) { #ifdef CONFIG_SPU_BASE spu_flush_all_slbs(mm); #endif cxl_slbia(mm); } EXPORT_SYMBOL_GPL(copro_flush_all_slbs); #endif
linux-master
arch/powerpc/mm/copro_fault.c
// SPDX-License-Identifier: GPL-2.0 /* * Handling Page Tables through page fragments * */ #include <linux/kernel.h> #include <linux/gfp.h> #include <linux/mm.h> #include <linux/percpu.h> #include <linux/hardirq.h> #include <linux/hugetlb.h> #include <asm/pgalloc.h> #include <asm/tlbflush.h> #include <asm/tlb.h> void pte_frag_destroy(void *pte_frag) { int count; struct ptdesc *ptdesc; ptdesc = virt_to_ptdesc(pte_frag); /* drop all the pending references */ count = ((unsigned long)pte_frag & ~PAGE_MASK) >> PTE_FRAG_SIZE_SHIFT; /* We allow PTE_FRAG_NR fragments from a PTE page */ if (atomic_sub_and_test(PTE_FRAG_NR - count, &ptdesc->pt_frag_refcount)) { pagetable_pte_dtor(ptdesc); pagetable_free(ptdesc); } } static pte_t *get_pte_from_cache(struct mm_struct *mm) { void *pte_frag, *ret; if (PTE_FRAG_NR == 1) return NULL; spin_lock(&mm->page_table_lock); ret = pte_frag_get(&mm->context); if (ret) { pte_frag = ret + PTE_FRAG_SIZE; /* * If we have taken up all the fragments mark PTE page NULL */ if (((unsigned long)pte_frag & ~PAGE_MASK) == 0) pte_frag = NULL; pte_frag_set(&mm->context, pte_frag); } spin_unlock(&mm->page_table_lock); return (pte_t *)ret; } static pte_t *__alloc_for_ptecache(struct mm_struct *mm, int kernel) { void *ret = NULL; struct ptdesc *ptdesc; if (!kernel) { ptdesc = pagetable_alloc(PGALLOC_GFP | __GFP_ACCOUNT, 0); if (!ptdesc) return NULL; if (!pagetable_pte_ctor(ptdesc)) { pagetable_free(ptdesc); return NULL; } } else { ptdesc = pagetable_alloc(PGALLOC_GFP, 0); if (!ptdesc) return NULL; } atomic_set(&ptdesc->pt_frag_refcount, 1); ret = ptdesc_address(ptdesc); /* * if we support only one fragment just return the * allocated page. */ if (PTE_FRAG_NR == 1) return ret; spin_lock(&mm->page_table_lock); /* * If we find ptdesc_page set, we return * the allocated page with single fragment * count. */ if (likely(!pte_frag_get(&mm->context))) { atomic_set(&ptdesc->pt_frag_refcount, PTE_FRAG_NR); pte_frag_set(&mm->context, ret + PTE_FRAG_SIZE); } spin_unlock(&mm->page_table_lock); return (pte_t *)ret; } pte_t *pte_fragment_alloc(struct mm_struct *mm, int kernel) { pte_t *pte; pte = get_pte_from_cache(mm); if (pte) return pte; return __alloc_for_ptecache(mm, kernel); } static void pte_free_now(struct rcu_head *head) { struct ptdesc *ptdesc; ptdesc = container_of(head, struct ptdesc, pt_rcu_head); pagetable_pte_dtor(ptdesc); pagetable_free(ptdesc); } void pte_fragment_free(unsigned long *table, int kernel) { struct ptdesc *ptdesc = virt_to_ptdesc(table); if (pagetable_is_reserved(ptdesc)) return free_reserved_ptdesc(ptdesc); BUG_ON(atomic_read(&ptdesc->pt_frag_refcount) <= 0); if (atomic_dec_and_test(&ptdesc->pt_frag_refcount)) { if (kernel) pagetable_free(ptdesc); else if (folio_test_clear_active(ptdesc_folio(ptdesc))) call_rcu(&ptdesc->pt_rcu_head, pte_free_now); else pte_free_now(&ptdesc->pt_rcu_head); } } #ifdef CONFIG_TRANSPARENT_HUGEPAGE void pte_free_defer(struct mm_struct *mm, pgtable_t pgtable) { struct page *page; page = virt_to_page(pgtable); SetPageActive(page); pte_fragment_free((unsigned long *)pgtable, 0); } #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
linux-master
arch/powerpc/mm/pgtable-frag.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * This file contains pgtable related functions for 64-bit machines. * * Derived from arch/ppc64/mm/init.c * Copyright (C) 1995-1996 Gary Thomas ([email protected]) * * Modifications by Paul Mackerras (PowerMac) ([email protected]) * and Cort Dougan (PReP) ([email protected]) * Copyright (C) 1996 Paul Mackerras * * Derived from "arch/i386/mm/init.c" * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds * * Dave Engebretsen <[email protected]> * Rework for PPC64 port. */ #include <linux/signal.h> #include <linux/sched.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/export.h> #include <linux/types.h> #include <linux/mman.h> #include <linux/mm.h> #include <linux/swap.h> #include <linux/stddef.h> #include <linux/vmalloc.h> #include <linux/slab.h> #include <linux/hugetlb.h> #include <asm/page.h> #include <asm/mmu_context.h> #include <asm/mmu.h> #include <asm/smp.h> #include <asm/machdep.h> #include <asm/tlb.h> #include <asm/processor.h> #include <asm/cputable.h> #include <asm/sections.h> #include <asm/firmware.h> #include <asm/dma.h> #include <mm/mmu_decl.h> #ifdef CONFIG_PPC_BOOK3S_64 /* * partition table and process table for ISA 3.0 */ struct prtb_entry *process_tb; struct patb_entry *partition_tb; /* * page table size */ unsigned long __pte_index_size; EXPORT_SYMBOL(__pte_index_size); unsigned long __pmd_index_size; EXPORT_SYMBOL(__pmd_index_size); unsigned long __pud_index_size; EXPORT_SYMBOL(__pud_index_size); unsigned long __pgd_index_size; EXPORT_SYMBOL(__pgd_index_size); unsigned long __pud_cache_index; EXPORT_SYMBOL(__pud_cache_index); unsigned long __pte_table_size; EXPORT_SYMBOL(__pte_table_size); unsigned long __pmd_table_size; EXPORT_SYMBOL(__pmd_table_size); unsigned long __pud_table_size; EXPORT_SYMBOL(__pud_table_size); unsigned long __pgd_table_size; EXPORT_SYMBOL(__pgd_table_size); unsigned long __pmd_val_bits; EXPORT_SYMBOL(__pmd_val_bits); unsigned long __pud_val_bits; EXPORT_SYMBOL(__pud_val_bits); unsigned long __pgd_val_bits; EXPORT_SYMBOL(__pgd_val_bits); unsigned long __kernel_virt_start; EXPORT_SYMBOL(__kernel_virt_start); unsigned long __vmalloc_start; EXPORT_SYMBOL(__vmalloc_start); unsigned long __vmalloc_end; EXPORT_SYMBOL(__vmalloc_end); unsigned long __kernel_io_start; EXPORT_SYMBOL(__kernel_io_start); unsigned long __kernel_io_end; struct page *vmemmap; EXPORT_SYMBOL(vmemmap); unsigned long __pte_frag_nr; EXPORT_SYMBOL(__pte_frag_nr); unsigned long __pte_frag_size_shift; EXPORT_SYMBOL(__pte_frag_size_shift); #endif #ifndef __PAGETABLE_PUD_FOLDED /* 4 level page table */ struct page *p4d_page(p4d_t p4d) { if (p4d_is_leaf(p4d)) { if (!IS_ENABLED(CONFIG_HAVE_ARCH_HUGE_VMAP)) VM_WARN_ON(!p4d_huge(p4d)); return pte_page(p4d_pte(p4d)); } return virt_to_page(p4d_pgtable(p4d)); } #endif struct page *pud_page(pud_t pud) { if (pud_is_leaf(pud)) { if (!IS_ENABLED(CONFIG_HAVE_ARCH_HUGE_VMAP)) VM_WARN_ON(!pud_huge(pud)); return pte_page(pud_pte(pud)); } return virt_to_page(pud_pgtable(pud)); } /* * For hugepage we have pfn in the pmd, we use PTE_RPN_SHIFT bits for flags * For PTE page, we have a PTE_FRAG_SIZE (4K) aligned virtual address. */ struct page *pmd_page(pmd_t pmd) { if (pmd_is_leaf(pmd)) { /* * vmalloc_to_page may be called on any vmap address (not only * vmalloc), and it uses pmd_page() etc., when huge vmap is * enabled so these checks can't be used. */ if (!IS_ENABLED(CONFIG_HAVE_ARCH_HUGE_VMAP)) VM_WARN_ON(!(pmd_large(pmd) || pmd_huge(pmd))); return pte_page(pmd_pte(pmd)); } return virt_to_page(pmd_page_vaddr(pmd)); } #ifdef CONFIG_STRICT_KERNEL_RWX void mark_rodata_ro(void) { if (!mmu_has_feature(MMU_FTR_KERNEL_RO)) { pr_warn("Warning: Unable to mark rodata read only on this CPU.\n"); return; } if (radix_enabled()) radix__mark_rodata_ro(); else hash__mark_rodata_ro(); // mark_initmem_nx() should have already run by now ptdump_check_wx(); } void mark_initmem_nx(void) { if (radix_enabled()) radix__mark_initmem_nx(); else hash__mark_initmem_nx(); } #endif
linux-master
arch/powerpc/mm/pgtable_64.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * This file contains the routines for initializing kernel userspace protection */ #include <linux/export.h> #include <linux/init.h> #include <linux/printk.h> #include <linux/smp.h> #include <asm/kup.h> #include <asm/smp.h> #ifdef CONFIG_PPC_KUAP void setup_kuap(bool disabled) { if (disabled) { if (IS_ENABLED(CONFIG_40x)) disable_kuep = true; if (smp_processor_id() == boot_cpuid) cur_cpu_spec->mmu_features &= ~MMU_FTR_KUAP; return; } pr_info("Activating Kernel Userspace Access Protection\n"); prevent_user_access(KUAP_READ_WRITE); } #endif
linux-master
arch/powerpc/mm/nohash/kup.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * This file contains the routines for initializing the MMU * on the 8xx series of chips. * -- christophe * * Derived from arch/powerpc/mm/40x_mmu.c: */ #include <linux/memblock.h> #include <linux/hugetlb.h> #include <mm/mmu_decl.h> #define IMMR_SIZE (FIX_IMMR_SIZE << PAGE_SHIFT) static unsigned long block_mapped_ram; /* * Return PA for this VA if it is in an area mapped with LTLBs or fixmap. * Otherwise, returns 0 */ phys_addr_t v_block_mapped(unsigned long va) { unsigned long p = PHYS_IMMR_BASE; if (va >= VIRT_IMMR_BASE && va < VIRT_IMMR_BASE + IMMR_SIZE) return p + va - VIRT_IMMR_BASE; if (va >= PAGE_OFFSET && va < PAGE_OFFSET + block_mapped_ram) return __pa(va); return 0; } /* * Return VA for a given PA mapped with LTLBs or fixmap * Return 0 if not mapped */ unsigned long p_block_mapped(phys_addr_t pa) { unsigned long p = PHYS_IMMR_BASE; if (pa >= p && pa < p + IMMR_SIZE) return VIRT_IMMR_BASE + pa - p; if (pa < block_mapped_ram) return (unsigned long)__va(pa); return 0; } static pte_t __init *early_hugepd_alloc_kernel(hugepd_t *pmdp, unsigned long va) { if (hpd_val(*pmdp) == 0) { pte_t *ptep = memblock_alloc(sizeof(pte_basic_t), SZ_4K); if (!ptep) return NULL; hugepd_populate_kernel((hugepd_t *)pmdp, ptep, PAGE_SHIFT_8M); hugepd_populate_kernel((hugepd_t *)pmdp + 1, ptep, PAGE_SHIFT_8M); } return hugepte_offset(*(hugepd_t *)pmdp, va, PGDIR_SHIFT); } static int __ref __early_map_kernel_hugepage(unsigned long va, phys_addr_t pa, pgprot_t prot, int psize, bool new) { pmd_t *pmdp = pmd_off_k(va); pte_t *ptep; if (WARN_ON(psize != MMU_PAGE_512K && psize != MMU_PAGE_8M)) return -EINVAL; if (new) { if (WARN_ON(slab_is_available())) return -EINVAL; if (psize == MMU_PAGE_512K) ptep = early_pte_alloc_kernel(pmdp, va); else ptep = early_hugepd_alloc_kernel((hugepd_t *)pmdp, va); } else { if (psize == MMU_PAGE_512K) ptep = pte_offset_kernel(pmdp, va); else ptep = hugepte_offset(*(hugepd_t *)pmdp, va, PGDIR_SHIFT); } if (WARN_ON(!ptep)) return -ENOMEM; /* The PTE should never be already present */ if (new && WARN_ON(pte_present(*ptep) && pgprot_val(prot))) return -EINVAL; set_huge_pte_at(&init_mm, va, ptep, pte_mkhuge(pfn_pte(pa >> PAGE_SHIFT, prot))); return 0; } /* * MMU_init_hw does the chip-specific initialization of the MMU hardware. */ void __init MMU_init_hw(void) { } static bool immr_is_mapped __initdata; void __init mmu_mapin_immr(void) { if (immr_is_mapped) return; immr_is_mapped = true; __early_map_kernel_hugepage(VIRT_IMMR_BASE, PHYS_IMMR_BASE, PAGE_KERNEL_NCG, MMU_PAGE_512K, true); } static void mmu_mapin_ram_chunk(unsigned long offset, unsigned long top, pgprot_t prot, bool new) { unsigned long v = PAGE_OFFSET + offset; unsigned long p = offset; WARN_ON(!IS_ALIGNED(offset, SZ_512K) || !IS_ALIGNED(top, SZ_512K)); for (; p < ALIGN(p, SZ_8M) && p < top; p += SZ_512K, v += SZ_512K) __early_map_kernel_hugepage(v, p, prot, MMU_PAGE_512K, new); for (; p < ALIGN_DOWN(top, SZ_8M) && p < top; p += SZ_8M, v += SZ_8M) __early_map_kernel_hugepage(v, p, prot, MMU_PAGE_8M, new); for (; p < ALIGN_DOWN(top, SZ_512K) && p < top; p += SZ_512K, v += SZ_512K) __early_map_kernel_hugepage(v, p, prot, MMU_PAGE_512K, new); if (!new) flush_tlb_kernel_range(PAGE_OFFSET + v, PAGE_OFFSET + top); } unsigned long __init mmu_mapin_ram(unsigned long base, unsigned long top) { unsigned long etext8 = ALIGN(__pa(_etext), SZ_8M); unsigned long sinittext = __pa(_sinittext); bool strict_boundary = strict_kernel_rwx_enabled() || debug_pagealloc_enabled_or_kfence(); unsigned long boundary = strict_boundary ? sinittext : etext8; unsigned long einittext8 = ALIGN(__pa(_einittext), SZ_8M); WARN_ON(top < einittext8); mmu_mapin_immr(); mmu_mapin_ram_chunk(0, boundary, PAGE_KERNEL_TEXT, true); if (debug_pagealloc_enabled_or_kfence()) { top = boundary; } else { mmu_mapin_ram_chunk(boundary, einittext8, PAGE_KERNEL_TEXT, true); mmu_mapin_ram_chunk(einittext8, top, PAGE_KERNEL, true); } if (top > SZ_32M) memblock_set_current_limit(top); block_mapped_ram = top; return top; } void mmu_mark_initmem_nx(void) { unsigned long etext8 = ALIGN(__pa(_etext), SZ_8M); unsigned long sinittext = __pa(_sinittext); unsigned long boundary = strict_kernel_rwx_enabled() ? sinittext : etext8; unsigned long einittext8 = ALIGN(__pa(_einittext), SZ_8M); if (!debug_pagealloc_enabled_or_kfence()) mmu_mapin_ram_chunk(boundary, einittext8, PAGE_KERNEL, false); mmu_pin_tlb(block_mapped_ram, false); } #ifdef CONFIG_STRICT_KERNEL_RWX void mmu_mark_rodata_ro(void) { unsigned long sinittext = __pa(_sinittext); mmu_mapin_ram_chunk(0, sinittext, PAGE_KERNEL_ROX, false); if (IS_ENABLED(CONFIG_PIN_TLB_DATA)) mmu_pin_tlb(block_mapped_ram, true); } #endif void __init setup_initial_memory_limit(phys_addr_t first_memblock_base, phys_addr_t first_memblock_size) { /* We don't currently support the first MEMBLOCK not mapping 0 * physical on those processors */ BUG_ON(first_memblock_base != 0); /* 8xx can only access 32MB at the moment */ memblock_set_current_limit(min_t(u64, first_memblock_size, SZ_32M)); } int pud_clear_huge(pud_t *pud) { return 0; } int pmd_clear_huge(pmd_t *pmd) { return 0; }
linux-master
arch/powerpc/mm/nohash/8xx.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * This file contains the routines for initializing the MMU * on the 4xx series of chips. * -- paulus * * Derived from arch/ppc/mm/init.c: * Copyright (C) 1995-1996 Gary Thomas ([email protected]) * * Modifications by Paul Mackerras (PowerMac) ([email protected]) * and Cort Dougan (PReP) ([email protected]) * Copyright (C) 1996 Paul Mackerras * * Derived from "arch/i386/mm/init.c" * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds */ #include <linux/signal.h> #include <linux/sched.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/types.h> #include <linux/ptrace.h> #include <linux/mman.h> #include <linux/mm.h> #include <linux/swap.h> #include <linux/stddef.h> #include <linux/vmalloc.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/highmem.h> #include <linux/memblock.h> #include <asm/io.h> #include <asm/mmu_context.h> #include <asm/mmu.h> #include <linux/uaccess.h> #include <asm/smp.h> #include <asm/bootx.h> #include <asm/machdep.h> #include <asm/setup.h> #include <mm/mmu_decl.h> /* * MMU_init_hw does the chip-specific initialization of the MMU hardware. */ void __init MMU_init_hw(void) { /* * The Zone Protection Register (ZPR) defines how protection will * be applied to every page which is a member of a given zone. At * present, we utilize only two of the 4xx's zones. * The zone index bits (of ZSEL) in the PTE are used for software * indicators, except the LSB. For user access, zone 1 is used, * for kernel access, zone 0 is used. We set all but zone 1 * to zero, allowing only kernel access as indicated in the PTE. * For zone 1, we set a 01 binary (a value of 10 will not work) * to allow user access as indicated in the PTE. This also allows * kernel access as indicated in the PTE. */ mtspr(SPRN_ZPR, 0x10000000); flush_instruction_cache(); /* * Set up the real-mode cache parameters for the exception vector * handlers (which are run in real-mode). */ mtspr(SPRN_DCWR, 0x00000000); /* All caching is write-back */ /* * Cache instruction and data space where the exception * vectors and the kernel live in real-mode. */ mtspr(SPRN_DCCR, 0xFFFF0000); /* 2GByte of data space at 0x0. */ mtspr(SPRN_ICCR, 0xFFFF0000); /* 2GByte of instr. space at 0x0. */ } #define LARGE_PAGE_SIZE_16M (1<<24) #define LARGE_PAGE_SIZE_4M (1<<22) unsigned long __init mmu_mapin_ram(unsigned long base, unsigned long top) { unsigned long v, s, mapped; phys_addr_t p; v = KERNELBASE; p = 0; s = total_lowmem; if (IS_ENABLED(CONFIG_KFENCE)) return 0; if (debug_pagealloc_enabled()) return 0; if (strict_kernel_rwx_enabled()) return 0; while (s >= LARGE_PAGE_SIZE_16M) { pmd_t *pmdp; unsigned long val = p | _PMD_SIZE_16M | _PAGE_EXEC | _PAGE_RW; pmdp = pmd_off_k(v); *pmdp++ = __pmd(val); *pmdp++ = __pmd(val); *pmdp++ = __pmd(val); *pmdp++ = __pmd(val); v += LARGE_PAGE_SIZE_16M; p += LARGE_PAGE_SIZE_16M; s -= LARGE_PAGE_SIZE_16M; } while (s >= LARGE_PAGE_SIZE_4M) { pmd_t *pmdp; unsigned long val = p | _PMD_SIZE_4M | _PAGE_EXEC | _PAGE_RW; pmdp = pmd_off_k(v); *pmdp = __pmd(val); v += LARGE_PAGE_SIZE_4M; p += LARGE_PAGE_SIZE_4M; s -= LARGE_PAGE_SIZE_4M; } mapped = total_lowmem - s; /* If the size of RAM is not an exact power of two, we may not * have covered RAM in its entirety with 16 and 4 MiB * pages. Consequently, restrict the top end of RAM currently * allocable so that calls to the MEMBLOCK to allocate PTEs for "tail" * coverage with normal-sized pages (or other reasons) do not * attempt to allocate outside the allowed range. */ memblock_set_current_limit(mapped); return mapped; } void setup_initial_memory_limit(phys_addr_t first_memblock_base, phys_addr_t first_memblock_size) { /* We don't currently support the first MEMBLOCK not mapping 0 * physical on those processors */ BUG_ON(first_memblock_base != 0); /* 40x can only access 16MB at the moment (see head_40x.S) */ memblock_set_current_limit(min_t(u64, first_memblock_size, 0x00800000)); }
linux-master
arch/powerpc/mm/nohash/40x.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Modifications by Matt Porter ([email protected]) to support * PPC44x Book E processors. * * This file contains the routines for initializing the MMU * on the 4xx series of chips. * -- paulus * * Derived from arch/ppc/mm/init.c: * Copyright (C) 1995-1996 Gary Thomas ([email protected]) * * Modifications by Paul Mackerras (PowerMac) ([email protected]) * and Cort Dougan (PReP) ([email protected]) * Copyright (C) 1996 Paul Mackerras * * Derived from "arch/i386/mm/init.c" * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds */ #include <linux/init.h> #include <linux/memblock.h> #include <asm/mmu.h> #include <asm/page.h> #include <asm/cacheflush.h> #include <asm/code-patching.h> #include <asm/smp.h> #include <mm/mmu_decl.h> /* Used by the 44x TLB replacement exception handler. * Just needed it declared someplace. */ unsigned int tlb_44x_index; /* = 0 */ unsigned int tlb_44x_hwater = PPC44x_TLB_SIZE - 1 - PPC44x_EARLY_TLBS; int icache_44x_need_flush; unsigned long tlb_47x_boltmap[1024/8]; static void __init ppc44x_update_tlb_hwater(void) { /* The TLB miss handlers hard codes the watermark in a cmpli * instruction to improve performances rather than loading it * from the global variable. Thus, we patch the instructions * in the 2 TLB miss handlers when updating the value */ modify_instruction_site(&patch__tlb_44x_hwater_D, 0xffff, tlb_44x_hwater); modify_instruction_site(&patch__tlb_44x_hwater_I, 0xffff, tlb_44x_hwater); } /* * "Pins" a 256MB TLB entry in AS0 for kernel lowmem for 44x type MMU */ static void __init ppc44x_pin_tlb(unsigned int virt, unsigned int phys) { unsigned int entry = tlb_44x_hwater--; ppc44x_update_tlb_hwater(); mtspr(SPRN_MMUCR, 0); __asm__ __volatile__( "tlbwe %2,%3,%4\n" "tlbwe %1,%3,%5\n" "tlbwe %0,%3,%6\n" : : "r" (PPC44x_TLB_SW | PPC44x_TLB_SR | PPC44x_TLB_SX | PPC44x_TLB_G), "r" (phys), "r" (virt | PPC44x_TLB_VALID | PPC44x_TLB_256M), "r" (entry), "i" (PPC44x_TLB_PAGEID), "i" (PPC44x_TLB_XLAT), "i" (PPC44x_TLB_ATTRIB)); } static int __init ppc47x_find_free_bolted(void) { unsigned int mmube0 = mfspr(SPRN_MMUBE0); unsigned int mmube1 = mfspr(SPRN_MMUBE1); if (!(mmube0 & MMUBE0_VBE0)) return 0; if (!(mmube0 & MMUBE0_VBE1)) return 1; if (!(mmube0 & MMUBE0_VBE2)) return 2; if (!(mmube1 & MMUBE1_VBE3)) return 3; if (!(mmube1 & MMUBE1_VBE4)) return 4; if (!(mmube1 & MMUBE1_VBE5)) return 5; return -1; } static void __init ppc47x_update_boltmap(void) { unsigned int mmube0 = mfspr(SPRN_MMUBE0); unsigned int mmube1 = mfspr(SPRN_MMUBE1); if (mmube0 & MMUBE0_VBE0) __set_bit((mmube0 >> MMUBE0_IBE0_SHIFT) & 0xff, tlb_47x_boltmap); if (mmube0 & MMUBE0_VBE1) __set_bit((mmube0 >> MMUBE0_IBE1_SHIFT) & 0xff, tlb_47x_boltmap); if (mmube0 & MMUBE0_VBE2) __set_bit((mmube0 >> MMUBE0_IBE2_SHIFT) & 0xff, tlb_47x_boltmap); if (mmube1 & MMUBE1_VBE3) __set_bit((mmube1 >> MMUBE1_IBE3_SHIFT) & 0xff, tlb_47x_boltmap); if (mmube1 & MMUBE1_VBE4) __set_bit((mmube1 >> MMUBE1_IBE4_SHIFT) & 0xff, tlb_47x_boltmap); if (mmube1 & MMUBE1_VBE5) __set_bit((mmube1 >> MMUBE1_IBE5_SHIFT) & 0xff, tlb_47x_boltmap); } /* * "Pins" a 256MB TLB entry in AS0 for kernel lowmem for 47x type MMU */ static void __init ppc47x_pin_tlb(unsigned int virt, unsigned int phys) { unsigned int rA; int bolted; /* Base rA is HW way select, way 0, bolted bit set */ rA = 0x88000000; /* Look for a bolted entry slot */ bolted = ppc47x_find_free_bolted(); BUG_ON(bolted < 0); /* Insert bolted slot number */ rA |= bolted << 24; pr_debug("256M TLB entry for 0x%08x->0x%08x in bolt slot %d\n", virt, phys, bolted); mtspr(SPRN_MMUCR, 0); __asm__ __volatile__( "tlbwe %2,%3,0\n" "tlbwe %1,%3,1\n" "tlbwe %0,%3,2\n" : : "r" (PPC47x_TLB2_SW | PPC47x_TLB2_SR | PPC47x_TLB2_SX #ifdef CONFIG_SMP | PPC47x_TLB2_M #endif ), "r" (phys), "r" (virt | PPC47x_TLB0_VALID | PPC47x_TLB0_256M), "r" (rA)); } void __init MMU_init_hw(void) { /* This is not useful on 47x but won't hurt either */ ppc44x_update_tlb_hwater(); flush_instruction_cache(); } unsigned long __init mmu_mapin_ram(unsigned long base, unsigned long top) { unsigned long addr; unsigned long memstart = memstart_addr & ~(PPC_PIN_SIZE - 1); /* Pin in enough TLBs to cover any lowmem not covered by the * initial 256M mapping established in head_44x.S */ for (addr = memstart + PPC_PIN_SIZE; addr < lowmem_end_addr; addr += PPC_PIN_SIZE) { if (mmu_has_feature(MMU_FTR_TYPE_47x)) ppc47x_pin_tlb(addr + PAGE_OFFSET, addr); else ppc44x_pin_tlb(addr + PAGE_OFFSET, addr); } if (mmu_has_feature(MMU_FTR_TYPE_47x)) { ppc47x_update_boltmap(); #ifdef DEBUG { int i; printk(KERN_DEBUG "bolted entries: "); for (i = 0; i < 255; i++) { if (test_bit(i, tlb_47x_boltmap)) printk("%d ", i); } printk("\n"); } #endif /* DEBUG */ } return total_lowmem; } void setup_initial_memory_limit(phys_addr_t first_memblock_base, phys_addr_t first_memblock_size) { u64 size; #ifndef CONFIG_NONSTATIC_KERNEL /* We don't currently support the first MEMBLOCK not mapping 0 * physical on those processors */ BUG_ON(first_memblock_base != 0); #endif /* 44x has a 256M TLB entry pinned at boot */ size = (min_t(u64, first_memblock_size, PPC_PIN_SIZE)); memblock_set_current_limit(first_memblock_base + size); } #ifdef CONFIG_SMP void __init mmu_init_secondary(int cpu) { unsigned long addr; unsigned long memstart = memstart_addr & ~(PPC_PIN_SIZE - 1); /* Pin in enough TLBs to cover any lowmem not covered by the * initial 256M mapping established in head_44x.S * * WARNING: This is called with only the first 256M of the * linear mapping in the TLB and we can't take faults yet * so beware of what this code uses. It runs off a temporary * stack. current (r2) isn't initialized, smp_processor_id() * will not work, current thread info isn't accessible, ... */ for (addr = memstart + PPC_PIN_SIZE; addr < lowmem_end_addr; addr += PPC_PIN_SIZE) { if (mmu_has_feature(MMU_FTR_TYPE_47x)) ppc47x_pin_tlb(addr + PAGE_OFFSET, addr); else ppc44x_pin_tlb(addr + PAGE_OFFSET, addr); } } #endif /* CONFIG_SMP */
linux-master
arch/powerpc/mm/nohash/44x.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * This file contains the routines for handling the MMU on those * PowerPC implementations where the MMU is not using the hash * table, such as 8xx, 4xx, BookE's etc... * * Copyright 2008 Ben Herrenschmidt <[email protected]> * IBM Corp. * * Derived from previous arch/powerpc/mm/mmu_context.c * and arch/powerpc/include/asm/mmu_context.h * * TODO: * * - The global context lock will not scale very well * - The maps should be dynamically allocated to allow for processors * that support more PID bits at runtime * - Implement flush_tlb_mm() by making the context stale and picking * a new one * - More aggressively clear stale map bits and maybe find some way to * also clear mm->cpu_vm_mask bits when processes are migrated */ #include <linux/kernel.h> #include <linux/mm.h> #include <linux/init.h> #include <linux/spinlock.h> #include <linux/memblock.h> #include <linux/notifier.h> #include <linux/cpu.h> #include <linux/slab.h> #include <asm/mmu_context.h> #include <asm/tlbflush.h> #include <asm/smp.h> #include <asm/kup.h> #include <mm/mmu_decl.h> /* * Room for two PTE table pointers, usually the kernel and current user * pointer to their respective root page table (pgdir). */ void *abatron_pteptrs[2]; /* * The MPC8xx has only 16 contexts. We rotate through them on each task switch. * A better way would be to keep track of tasks that own contexts, and implement * an LRU usage. That way very active tasks don't always have to pay the TLB * reload overhead. The kernel pages are mapped shared, so the kernel can run on * behalf of any task that makes a kernel entry. Shared does not mean they are * not protected, just that the ASID comparison is not performed. -- Dan * * The IBM4xx has 256 contexts, so we can just rotate through these as a way of * "switching" contexts. If the TID of the TLB is zero, the PID/TID comparison * is disabled, so we can use a TID of zero to represent all kernel pages as * shared among all contexts. -- Dan * * The IBM 47x core supports 16-bit PIDs, thus 65535 contexts. We should * normally never have to steal though the facility is present if needed. * -- BenH */ #define FIRST_CONTEXT 1 #if defined(CONFIG_PPC_8xx) #define LAST_CONTEXT 16 #elif defined(CONFIG_PPC_47x) #define LAST_CONTEXT 65535 #else #define LAST_CONTEXT 255 #endif static unsigned int next_context, nr_free_contexts; static unsigned long *context_map; static unsigned long *stale_map[NR_CPUS]; static struct mm_struct **context_mm; static DEFINE_RAW_SPINLOCK(context_lock); #define CTX_MAP_SIZE \ (sizeof(unsigned long) * (LAST_CONTEXT / BITS_PER_LONG + 1)) /* Steal a context from a task that has one at the moment. * * This is used when we are running out of available PID numbers * on the processors. * * This isn't an LRU system, it just frees up each context in * turn (sort-of pseudo-random replacement :). This would be the * place to implement an LRU scheme if anyone was motivated to do it. * -- paulus * * For context stealing, we use a slightly different approach for * SMP and UP. Basically, the UP one is simpler and doesn't use * the stale map as we can just flush the local CPU * -- benh */ static unsigned int steal_context_smp(unsigned int id) { struct mm_struct *mm; unsigned int cpu, max, i; max = LAST_CONTEXT - FIRST_CONTEXT; /* Attempt to free next_context first and then loop until we manage */ while (max--) { /* Pick up the victim mm */ mm = context_mm[id]; /* We have a candidate victim, check if it's active, on SMP * we cannot steal active contexts */ if (mm->context.active) { id++; if (id > LAST_CONTEXT) id = FIRST_CONTEXT; continue; } /* Mark this mm has having no context anymore */ mm->context.id = MMU_NO_CONTEXT; /* Mark it stale on all CPUs that used this mm. For threaded * implementations, we set it on all threads on each core * represented in the mask. A future implementation will use * a core map instead but this will do for now. */ for_each_cpu(cpu, mm_cpumask(mm)) { for (i = cpu_first_thread_sibling(cpu); i <= cpu_last_thread_sibling(cpu); i++) { if (stale_map[i]) __set_bit(id, stale_map[i]); } cpu = i - 1; } return id; } /* This will happen if you have more CPUs than available contexts, * all we can do here is wait a bit and try again */ raw_spin_unlock(&context_lock); cpu_relax(); raw_spin_lock(&context_lock); /* This will cause the caller to try again */ return MMU_NO_CONTEXT; } static unsigned int steal_all_contexts(void) { struct mm_struct *mm; int cpu = smp_processor_id(); unsigned int id; for (id = FIRST_CONTEXT; id <= LAST_CONTEXT; id++) { /* Pick up the victim mm */ mm = context_mm[id]; /* Mark this mm as having no context anymore */ mm->context.id = MMU_NO_CONTEXT; if (id != FIRST_CONTEXT) { context_mm[id] = NULL; __clear_bit(id, context_map); } if (IS_ENABLED(CONFIG_SMP)) __clear_bit(id, stale_map[cpu]); } /* Flush the TLB for all contexts (not to be used on SMP) */ _tlbil_all(); nr_free_contexts = LAST_CONTEXT - FIRST_CONTEXT; return FIRST_CONTEXT; } /* Note that this will also be called on SMP if all other CPUs are * offlined, which means that it may be called for cpu != 0. For * this to work, we somewhat assume that CPUs that are onlined * come up with a fully clean TLB (or are cleaned when offlined) */ static unsigned int steal_context_up(unsigned int id) { struct mm_struct *mm; int cpu = smp_processor_id(); /* Pick up the victim mm */ mm = context_mm[id]; /* Flush the TLB for that context */ local_flush_tlb_mm(mm); /* Mark this mm has having no context anymore */ mm->context.id = MMU_NO_CONTEXT; /* XXX This clear should ultimately be part of local_flush_tlb_mm */ if (IS_ENABLED(CONFIG_SMP)) __clear_bit(id, stale_map[cpu]); return id; } static void set_context(unsigned long id, pgd_t *pgd) { if (IS_ENABLED(CONFIG_PPC_8xx)) { s16 offset = (s16)(__pa(swapper_pg_dir)); /* * Register M_TWB will contain base address of level 1 table minus the * lower part of the kernel PGDIR base address, so that all accesses to * level 1 table are done relative to lower part of kernel PGDIR base * address. */ mtspr(SPRN_M_TWB, __pa(pgd) - offset); /* Update context */ mtspr(SPRN_M_CASID, id - 1); /* sync */ mb(); } else if (kuap_is_disabled()) { if (IS_ENABLED(CONFIG_40x)) mb(); /* sync */ mtspr(SPRN_PID, id); isync(); } } void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk) { unsigned int id; unsigned int i, cpu = smp_processor_id(); unsigned long *map; /* No lockless fast path .. yet */ raw_spin_lock(&context_lock); if (IS_ENABLED(CONFIG_SMP)) { /* Mark us active and the previous one not anymore */ next->context.active++; if (prev) { WARN_ON(prev->context.active < 1); prev->context.active--; } } again: /* If we already have a valid assigned context, skip all that */ id = next->context.id; if (likely(id != MMU_NO_CONTEXT)) goto ctxt_ok; /* We really don't have a context, let's try to acquire one */ id = next_context; if (id > LAST_CONTEXT) id = FIRST_CONTEXT; map = context_map; /* No more free contexts, let's try to steal one */ if (nr_free_contexts == 0) { if (num_online_cpus() > 1) { id = steal_context_smp(id); if (id == MMU_NO_CONTEXT) goto again; goto stolen; } if (IS_ENABLED(CONFIG_PPC_8xx)) id = steal_all_contexts(); else id = steal_context_up(id); goto stolen; } nr_free_contexts--; /* We know there's at least one free context, try to find it */ while (__test_and_set_bit(id, map)) { id = find_next_zero_bit(map, LAST_CONTEXT+1, id); if (id > LAST_CONTEXT) id = FIRST_CONTEXT; } stolen: next_context = id + 1; context_mm[id] = next; next->context.id = id; ctxt_ok: /* If that context got marked stale on this CPU, then flush the * local TLB for it and unmark it before we use it */ if (IS_ENABLED(CONFIG_SMP) && test_bit(id, stale_map[cpu])) { local_flush_tlb_mm(next); /* XXX This clear should ultimately be part of local_flush_tlb_mm */ for (i = cpu_first_thread_sibling(cpu); i <= cpu_last_thread_sibling(cpu); i++) { if (stale_map[i]) __clear_bit(id, stale_map[i]); } } /* Flick the MMU and release lock */ if (IS_ENABLED(CONFIG_BDI_SWITCH)) abatron_pteptrs[1] = next->pgd; set_context(id, next->pgd); #if defined(CONFIG_BOOKE_OR_40x) && defined(CONFIG_PPC_KUAP) tsk->thread.pid = id; #endif raw_spin_unlock(&context_lock); } /* * Set up the context for a new address space. */ int init_new_context(struct task_struct *t, struct mm_struct *mm) { mm->context.id = MMU_NO_CONTEXT; mm->context.active = 0; pte_frag_set(&mm->context, NULL); return 0; } /* * We're finished using the context for an address space. */ void destroy_context(struct mm_struct *mm) { unsigned long flags; unsigned int id; if (mm->context.id == MMU_NO_CONTEXT) return; WARN_ON(mm->context.active != 0); raw_spin_lock_irqsave(&context_lock, flags); id = mm->context.id; if (id != MMU_NO_CONTEXT) { __clear_bit(id, context_map); mm->context.id = MMU_NO_CONTEXT; context_mm[id] = NULL; nr_free_contexts++; } raw_spin_unlock_irqrestore(&context_lock, flags); } static int mmu_ctx_cpu_prepare(unsigned int cpu) { /* We don't touch CPU 0 map, it's allocated at aboot and kept * around forever */ if (cpu == boot_cpuid) return 0; stale_map[cpu] = kzalloc(CTX_MAP_SIZE, GFP_KERNEL); return 0; } static int mmu_ctx_cpu_dead(unsigned int cpu) { #ifdef CONFIG_HOTPLUG_CPU if (cpu == boot_cpuid) return 0; kfree(stale_map[cpu]); stale_map[cpu] = NULL; /* We also clear the cpu_vm_mask bits of CPUs going away */ clear_tasks_mm_cpumask(cpu); #endif return 0; } /* * Initialize the context management stuff. */ void __init mmu_context_init(void) { /* Mark init_mm as being active on all possible CPUs since * we'll get called with prev == init_mm the first time * we schedule on a given CPU */ init_mm.context.active = NR_CPUS; /* * Allocate the maps used by context management */ context_map = memblock_alloc(CTX_MAP_SIZE, SMP_CACHE_BYTES); if (!context_map) panic("%s: Failed to allocate %zu bytes\n", __func__, CTX_MAP_SIZE); context_mm = memblock_alloc(sizeof(void *) * (LAST_CONTEXT + 1), SMP_CACHE_BYTES); if (!context_mm) panic("%s: Failed to allocate %zu bytes\n", __func__, sizeof(void *) * (LAST_CONTEXT + 1)); if (IS_ENABLED(CONFIG_SMP)) { stale_map[boot_cpuid] = memblock_alloc(CTX_MAP_SIZE, SMP_CACHE_BYTES); if (!stale_map[boot_cpuid]) panic("%s: Failed to allocate %zu bytes\n", __func__, CTX_MAP_SIZE); cpuhp_setup_state_nocalls(CPUHP_POWERPC_MMU_CTX_PREPARE, "powerpc/mmu/ctx:prepare", mmu_ctx_cpu_prepare, mmu_ctx_cpu_dead); } printk(KERN_INFO "MMU: Allocated %zu bytes of context maps for %d contexts\n", 2 * CTX_MAP_SIZE + (sizeof(void *) * (LAST_CONTEXT + 1)), LAST_CONTEXT - FIRST_CONTEXT + 1); /* * Some processors have too few contexts to reserve one for * init_mm, and require using context 0 for a normal task. * Other processors reserve the use of context zero for the kernel. * This code assumes FIRST_CONTEXT < 32. */ context_map[0] = (1 << FIRST_CONTEXT) - 1; next_context = FIRST_CONTEXT; nr_free_contexts = LAST_CONTEXT - FIRST_CONTEXT + 1; }
linux-master
arch/powerpc/mm/nohash/mmu_context.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Modifications by Kumar Gala ([email protected]) to support * E500 Book E processors. * * Copyright 2004,2010 Freescale Semiconductor, Inc. * * This file contains the routines for initializing the MMU * on the 4xx series of chips. * -- paulus * * Derived from arch/ppc/mm/init.c: * Copyright (C) 1995-1996 Gary Thomas ([email protected]) * * Modifications by Paul Mackerras (PowerMac) ([email protected]) * and Cort Dougan (PReP) ([email protected]) * Copyright (C) 1996 Paul Mackerras * * Derived from "arch/i386/mm/init.c" * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds */ #include <linux/signal.h> #include <linux/sched.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/types.h> #include <linux/ptrace.h> #include <linux/mman.h> #include <linux/mm.h> #include <linux/swap.h> #include <linux/stddef.h> #include <linux/vmalloc.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/highmem.h> #include <linux/memblock.h> #include <linux/of_fdt.h> #include <asm/io.h> #include <asm/mmu_context.h> #include <asm/mmu.h> #include <linux/uaccess.h> #include <asm/smp.h> #include <asm/machdep.h> #include <asm/setup.h> #include <asm/paca.h> #include <mm/mmu_decl.h> unsigned int tlbcam_index; struct tlbcam TLBCAM[NUM_TLBCAMS]; static struct { unsigned long start; unsigned long limit; phys_addr_t phys; } tlbcam_addrs[NUM_TLBCAMS]; #ifdef CONFIG_PPC_85xx /* * Return PA for this VA if it is mapped by a CAM, or 0 */ phys_addr_t v_block_mapped(unsigned long va) { int b; for (b = 0; b < tlbcam_index; ++b) if (va >= tlbcam_addrs[b].start && va < tlbcam_addrs[b].limit) return tlbcam_addrs[b].phys + (va - tlbcam_addrs[b].start); return 0; } /* * Return VA for a given PA or 0 if not mapped */ unsigned long p_block_mapped(phys_addr_t pa) { int b; for (b = 0; b < tlbcam_index; ++b) if (pa >= tlbcam_addrs[b].phys && pa < (tlbcam_addrs[b].limit-tlbcam_addrs[b].start) +tlbcam_addrs[b].phys) return tlbcam_addrs[b].start+(pa-tlbcam_addrs[b].phys); return 0; } #endif /* * Set up a variable-size TLB entry (tlbcam). The parameters are not checked; * in particular size must be a power of 4 between 4k and the max supported by * an implementation; max may further be limited by what can be represented in * an unsigned long (for example, 32-bit implementations cannot support a 4GB * size). */ static void settlbcam(int index, unsigned long virt, phys_addr_t phys, unsigned long size, unsigned long flags, unsigned int pid) { unsigned int tsize; tsize = __ilog2(size) - 10; #if defined(CONFIG_SMP) || defined(CONFIG_PPC_E500MC) if ((flags & _PAGE_NO_CACHE) == 0) flags |= _PAGE_COHERENT; #endif TLBCAM[index].MAS0 = MAS0_TLBSEL(1) | MAS0_ESEL(index) | MAS0_NV(index+1); TLBCAM[index].MAS1 = MAS1_VALID | MAS1_IPROT | MAS1_TSIZE(tsize) | MAS1_TID(pid); TLBCAM[index].MAS2 = virt & PAGE_MASK; TLBCAM[index].MAS2 |= (flags & _PAGE_WRITETHRU) ? MAS2_W : 0; TLBCAM[index].MAS2 |= (flags & _PAGE_NO_CACHE) ? MAS2_I : 0; TLBCAM[index].MAS2 |= (flags & _PAGE_COHERENT) ? MAS2_M : 0; TLBCAM[index].MAS2 |= (flags & _PAGE_GUARDED) ? MAS2_G : 0; TLBCAM[index].MAS2 |= (flags & _PAGE_ENDIAN) ? MAS2_E : 0; TLBCAM[index].MAS3 = (phys & MAS3_RPN) | MAS3_SR; TLBCAM[index].MAS3 |= (flags & _PAGE_RW) ? MAS3_SW : 0; if (mmu_has_feature(MMU_FTR_BIG_PHYS)) TLBCAM[index].MAS7 = (u64)phys >> 32; /* Below is unlikely -- only for large user pages or similar */ if (pte_user(__pte(flags))) { TLBCAM[index].MAS3 |= MAS3_UR; TLBCAM[index].MAS3 |= (flags & _PAGE_EXEC) ? MAS3_UX : 0; TLBCAM[index].MAS3 |= (flags & _PAGE_RW) ? MAS3_UW : 0; } else { TLBCAM[index].MAS3 |= (flags & _PAGE_EXEC) ? MAS3_SX : 0; } tlbcam_addrs[index].start = virt; tlbcam_addrs[index].limit = virt + size - 1; tlbcam_addrs[index].phys = phys; } static unsigned long calc_cam_sz(unsigned long ram, unsigned long virt, phys_addr_t phys) { unsigned int camsize = __ilog2(ram); unsigned int align = __ffs(virt | phys); unsigned long max_cam; if ((mfspr(SPRN_MMUCFG) & MMUCFG_MAVN) == MMUCFG_MAVN_V1) { /* Convert (4^max) kB to (2^max) bytes */ max_cam = ((mfspr(SPRN_TLB1CFG) >> 16) & 0xf) * 2 + 10; camsize &= ~1U; align &= ~1U; } else { /* Convert (2^max) kB to (2^max) bytes */ max_cam = __ilog2(mfspr(SPRN_TLB1PS)) + 10; } if (camsize > align) camsize = align; if (camsize > max_cam) camsize = max_cam; return 1UL << camsize; } static unsigned long map_mem_in_cams_addr(phys_addr_t phys, unsigned long virt, unsigned long ram, int max_cam_idx, bool dryrun, bool init) { int i; unsigned long amount_mapped = 0; unsigned long boundary; if (strict_kernel_rwx_enabled()) boundary = (unsigned long)(_sinittext - _stext); else boundary = ram; /* Calculate CAM values */ for (i = 0; boundary && i < max_cam_idx; i++) { unsigned long cam_sz; pgprot_t prot = init ? PAGE_KERNEL_X : PAGE_KERNEL_ROX; cam_sz = calc_cam_sz(boundary, virt, phys); if (!dryrun) settlbcam(i, virt, phys, cam_sz, pgprot_val(prot), 0); boundary -= cam_sz; amount_mapped += cam_sz; virt += cam_sz; phys += cam_sz; } for (ram -= amount_mapped; ram && i < max_cam_idx; i++) { unsigned long cam_sz; pgprot_t prot = init ? PAGE_KERNEL_X : PAGE_KERNEL; cam_sz = calc_cam_sz(ram, virt, phys); if (!dryrun) settlbcam(i, virt, phys, cam_sz, pgprot_val(prot), 0); ram -= cam_sz; amount_mapped += cam_sz; virt += cam_sz; phys += cam_sz; } if (dryrun) return amount_mapped; if (init) { loadcam_multi(0, i, max_cam_idx); tlbcam_index = i; } else { loadcam_multi(0, i, 0); WARN_ON(i > tlbcam_index); } #ifdef CONFIG_PPC64 get_paca()->tcd.esel_next = i; get_paca()->tcd.esel_max = mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY; get_paca()->tcd.esel_first = i; #endif return amount_mapped; } unsigned long map_mem_in_cams(unsigned long ram, int max_cam_idx, bool dryrun, bool init) { unsigned long virt = PAGE_OFFSET; phys_addr_t phys = memstart_addr; return map_mem_in_cams_addr(phys, virt, ram, max_cam_idx, dryrun, init); } #ifdef CONFIG_PPC32 #if defined(CONFIG_LOWMEM_CAM_NUM_BOOL) && (CONFIG_LOWMEM_CAM_NUM >= NUM_TLBCAMS) #error "LOWMEM_CAM_NUM must be less than NUM_TLBCAMS" #endif unsigned long __init mmu_mapin_ram(unsigned long base, unsigned long top) { return tlbcam_addrs[tlbcam_index - 1].limit - PAGE_OFFSET + 1; } void flush_instruction_cache(void) { unsigned long tmp; tmp = mfspr(SPRN_L1CSR1); tmp |= L1CSR1_ICFI | L1CSR1_ICLFR; mtspr(SPRN_L1CSR1, tmp); isync(); } /* * MMU_init_hw does the chip-specific initialization of the MMU hardware. */ void __init MMU_init_hw(void) { flush_instruction_cache(); } static unsigned long __init tlbcam_sz(int idx) { return tlbcam_addrs[idx].limit - tlbcam_addrs[idx].start + 1; } void __init adjust_total_lowmem(void) { unsigned long ram; int i; /* adjust lowmem size to __max_low_memory */ ram = min((phys_addr_t)__max_low_memory, (phys_addr_t)total_lowmem); i = switch_to_as1(); __max_low_memory = map_mem_in_cams(ram, CONFIG_LOWMEM_CAM_NUM, false, true); restore_to_as0(i, 0, NULL, 1); pr_info("Memory CAM mapping: "); for (i = 0; i < tlbcam_index - 1; i++) pr_cont("%lu/", tlbcam_sz(i) >> 20); pr_cont("%lu Mb, residual: %dMb\n", tlbcam_sz(tlbcam_index - 1) >> 20, (unsigned int)((total_lowmem - __max_low_memory) >> 20)); memblock_set_current_limit(memstart_addr + __max_low_memory); } #ifdef CONFIG_STRICT_KERNEL_RWX void mmu_mark_rodata_ro(void) { unsigned long remapped; remapped = map_mem_in_cams(__max_low_memory, CONFIG_LOWMEM_CAM_NUM, false, false); WARN_ON(__max_low_memory != remapped); } #endif void mmu_mark_initmem_nx(void) { /* Everything is done in mmu_mark_rodata_ro() */ } void setup_initial_memory_limit(phys_addr_t first_memblock_base, phys_addr_t first_memblock_size) { phys_addr_t limit = first_memblock_base + first_memblock_size; /* 64M mapped initially according to head_fsl_booke.S */ memblock_set_current_limit(min_t(u64, limit, 0x04000000)); } #ifdef CONFIG_RELOCATABLE int __initdata is_second_reloc; notrace void __init relocate_init(u64 dt_ptr, phys_addr_t start) { unsigned long base = kernstart_virt_addr; phys_addr_t size; kernstart_addr = start; if (is_second_reloc) { virt_phys_offset = PAGE_OFFSET - memstart_addr; kaslr_late_init(); return; } /* * Relocatable kernel support based on processing of dynamic * relocation entries. Before we get the real memstart_addr, * We will compute the virt_phys_offset like this: * virt_phys_offset = stext.run - kernstart_addr * * stext.run = (KERNELBASE & ~0x3ffffff) + * (kernstart_addr & 0x3ffffff) * When we relocate, we have : * * (kernstart_addr & 0x3ffffff) = (stext.run & 0x3ffffff) * * hence: * virt_phys_offset = (KERNELBASE & ~0x3ffffff) - * (kernstart_addr & ~0x3ffffff) * */ start &= ~0x3ffffff; base &= ~0x3ffffff; virt_phys_offset = base - start; early_get_first_memblock_info(__va(dt_ptr), &size); /* * We now get the memstart_addr, then we should check if this * address is the same as what the PAGE_OFFSET map to now. If * not we have to change the map of PAGE_OFFSET to memstart_addr * and do a second relocation. */ if (start != memstart_addr) { int n; long offset = start - memstart_addr; is_second_reloc = 1; n = switch_to_as1(); /* map a 64M area for the second relocation */ if (memstart_addr > start) map_mem_in_cams(0x4000000, CONFIG_LOWMEM_CAM_NUM, false, true); else map_mem_in_cams_addr(start, PAGE_OFFSET + offset, 0x4000000, CONFIG_LOWMEM_CAM_NUM, false, true); restore_to_as0(n, offset, __va(dt_ptr), 1); /* We should never reach here */ panic("Relocation error"); } kaslr_early_init(__va(dt_ptr), size); } #endif #endif
linux-master
arch/powerpc/mm/nohash/e500.c
// SPDX-License-Identifier: GPL-2.0 /* * PPC Huge TLB Page Support for Book3E MMU * * Copyright (C) 2009 David Gibson, IBM Corporation. * Copyright (C) 2011 Becky Bruce, Freescale Semiconductor * */ #include <linux/mm.h> #include <linux/hugetlb.h> #include <asm/mmu.h> #ifdef CONFIG_PPC64 #include <asm/paca.h> static inline int tlb1_next(void) { struct paca_struct *paca = get_paca(); struct tlb_core_data *tcd; int this, next; tcd = paca->tcd_ptr; this = tcd->esel_next; next = this + 1; if (next >= tcd->esel_max) next = tcd->esel_first; tcd->esel_next = next; return this; } static inline void book3e_tlb_lock(void) { struct paca_struct *paca = get_paca(); unsigned long tmp; int token = smp_processor_id() + 1; /* * Besides being unnecessary in the absence of SMT, this * check prevents trying to do lbarx/stbcx. on e5500 which * doesn't implement either feature. */ if (!cpu_has_feature(CPU_FTR_SMT)) return; asm volatile(".machine push;" ".machine e6500;" "1: lbarx %0, 0, %1;" "cmpwi %0, 0;" "bne 2f;" "stbcx. %2, 0, %1;" "bne 1b;" "b 3f;" "2: lbzx %0, 0, %1;" "cmpwi %0, 0;" "bne 2b;" "b 1b;" "3:" ".machine pop;" : "=&r" (tmp) : "r" (&paca->tcd_ptr->lock), "r" (token) : "memory"); } static inline void book3e_tlb_unlock(void) { struct paca_struct *paca = get_paca(); if (!cpu_has_feature(CPU_FTR_SMT)) return; isync(); paca->tcd_ptr->lock = 0; } #else static inline int tlb1_next(void) { int index, ncams; ncams = mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY; index = this_cpu_read(next_tlbcam_idx); /* Just round-robin the entries and wrap when we hit the end */ if (unlikely(index == ncams - 1)) __this_cpu_write(next_tlbcam_idx, tlbcam_index); else __this_cpu_inc(next_tlbcam_idx); return index; } static inline void book3e_tlb_lock(void) { } static inline void book3e_tlb_unlock(void) { } #endif static inline int book3e_tlb_exists(unsigned long ea, unsigned long pid) { int found = 0; mtspr(SPRN_MAS6, pid << 16); asm volatile( "tlbsx 0,%1\n" "mfspr %0,0x271\n" "srwi %0,%0,31\n" : "=&r"(found) : "r"(ea)); return found; } static void book3e_hugetlb_preload(struct vm_area_struct *vma, unsigned long ea, pte_t pte) { unsigned long mas1, mas2; u64 mas7_3; unsigned long psize, tsize, shift; unsigned long flags; struct mm_struct *mm; int index; if (unlikely(is_kernel_addr(ea))) return; mm = vma->vm_mm; psize = vma_mmu_pagesize(vma); shift = __ilog2(psize); tsize = shift - 10; /* * We can't be interrupted while we're setting up the MAS * registers or after we've confirmed that no tlb exists. */ local_irq_save(flags); book3e_tlb_lock(); if (unlikely(book3e_tlb_exists(ea, mm->context.id))) { book3e_tlb_unlock(); local_irq_restore(flags); return; } /* We have to use the CAM(TLB1) on FSL parts for hugepages */ index = tlb1_next(); mtspr(SPRN_MAS0, MAS0_ESEL(index) | MAS0_TLBSEL(1)); mas1 = MAS1_VALID | MAS1_TID(mm->context.id) | MAS1_TSIZE(tsize); mas2 = ea & ~((1UL << shift) - 1); mas2 |= (pte_val(pte) >> PTE_WIMGE_SHIFT) & MAS2_WIMGE_MASK; mas7_3 = (u64)pte_pfn(pte) << PAGE_SHIFT; mas7_3 |= (pte_val(pte) >> PTE_BAP_SHIFT) & MAS3_BAP_MASK; if (!pte_dirty(pte)) mas7_3 &= ~(MAS3_SW|MAS3_UW); mtspr(SPRN_MAS1, mas1); mtspr(SPRN_MAS2, mas2); if (mmu_has_feature(MMU_FTR_BIG_PHYS)) mtspr(SPRN_MAS7, upper_32_bits(mas7_3)); mtspr(SPRN_MAS3, lower_32_bits(mas7_3)); asm volatile ("tlbwe"); book3e_tlb_unlock(); local_irq_restore(flags); } /* * This is called at the end of handling a user page fault, when the * fault has been handled by updating a PTE in the linux page tables. * * This must always be called with the pte lock held. */ void update_mmu_cache_range(struct vm_fault *vmf, struct vm_area_struct *vma, unsigned long address, pte_t *ptep, unsigned int nr) { if (is_vm_hugetlb_page(vma)) book3e_hugetlb_preload(vma, address, *ptep); } void flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr) { struct hstate *hstate = hstate_file(vma->vm_file); unsigned long tsize = huge_page_shift(hstate) - 10; __flush_tlb_page(vma->vm_mm, vmaddr, tsize, 0); }
linux-master
arch/powerpc/mm/nohash/e500_hugetlbpage.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright 2005, Paul Mackerras, IBM Corporation. * Copyright 2009, Benjamin Herrenschmidt, IBM Corporation. * Copyright 2015-2016, Aneesh Kumar K.V, IBM Corporation. */ #include <linux/sched.h> #include <linux/memblock.h> #include <asm/pgalloc.h> #include <asm/tlb.h> #include <asm/dma.h> #include <asm/code-patching.h> #include <mm/mmu_decl.h> #ifdef CONFIG_SPARSEMEM_VMEMMAP /* * On Book3E CPUs, the vmemmap is currently mapped in the top half of * the vmalloc space using normal page tables, though the size of * pages encoded in the PTEs can be different */ int __meminit vmemmap_create_mapping(unsigned long start, unsigned long page_size, unsigned long phys) { /* Create a PTE encoding without page size */ unsigned long i, flags = _PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_KERNEL_RW; /* PTEs only contain page size encodings up to 32M */ BUG_ON(mmu_psize_defs[mmu_vmemmap_psize].enc > 0xf); /* Encode the size in the PTE */ flags |= mmu_psize_defs[mmu_vmemmap_psize].enc << 8; /* For each PTE for that area, map things. Note that we don't * increment phys because all PTEs are of the large size and * thus must have the low bits clear */ for (i = 0; i < page_size; i += PAGE_SIZE) BUG_ON(map_kernel_page(start + i, phys, __pgprot(flags))); return 0; } #ifdef CONFIG_MEMORY_HOTPLUG void vmemmap_remove_mapping(unsigned long start, unsigned long page_size) { } #endif #endif /* CONFIG_SPARSEMEM_VMEMMAP */ static void __init *early_alloc_pgtable(unsigned long size) { void *ptr; ptr = memblock_alloc_try_nid(size, size, MEMBLOCK_LOW_LIMIT, __pa(MAX_DMA_ADDRESS), NUMA_NO_NODE); if (!ptr) panic("%s: Failed to allocate %lu bytes align=0x%lx max_addr=%lx\n", __func__, size, size, __pa(MAX_DMA_ADDRESS)); return ptr; } /* * map_kernel_page currently only called by __ioremap * map_kernel_page adds an entry to the ioremap page table * and adds an entry to the HPT, possibly bolting it */ int __ref map_kernel_page(unsigned long ea, unsigned long pa, pgprot_t prot) { pgd_t *pgdp; p4d_t *p4dp; pud_t *pudp; pmd_t *pmdp; pte_t *ptep; BUILD_BUG_ON(TASK_SIZE_USER64 > PGTABLE_RANGE); if (slab_is_available()) { pgdp = pgd_offset_k(ea); p4dp = p4d_offset(pgdp, ea); pudp = pud_alloc(&init_mm, p4dp, ea); if (!pudp) return -ENOMEM; pmdp = pmd_alloc(&init_mm, pudp, ea); if (!pmdp) return -ENOMEM; ptep = pte_alloc_kernel(pmdp, ea); if (!ptep) return -ENOMEM; } else { pgdp = pgd_offset_k(ea); p4dp = p4d_offset(pgdp, ea); if (p4d_none(*p4dp)) { pudp = early_alloc_pgtable(PUD_TABLE_SIZE); p4d_populate(&init_mm, p4dp, pudp); } pudp = pud_offset(p4dp, ea); if (pud_none(*pudp)) { pmdp = early_alloc_pgtable(PMD_TABLE_SIZE); pud_populate(&init_mm, pudp, pmdp); } pmdp = pmd_offset(pudp, ea); if (!pmd_present(*pmdp)) { ptep = early_alloc_pgtable(PTE_TABLE_SIZE); pmd_populate_kernel(&init_mm, pmdp, ptep); } ptep = pte_offset_kernel(pmdp, ea); } set_pte_at(&init_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT, prot)); smp_wmb(); return 0; } void __patch_exception(int exc, unsigned long addr) { unsigned int *ibase = &interrupt_base_book3e; /* * Our exceptions vectors start with a NOP and -then- a branch * to deal with single stepping from userspace which stops on * the second instruction. Thus we need to patch the second * instruction of the exception, not the first one. */ patch_branch(ibase + (exc / 4) + 1, addr, 0); }
linux-master
arch/powerpc/mm/nohash/book3e_pgtable.c
// SPDX-License-Identifier: GPL-2.0-only // // Copyright (C) 2019 Jason Yan <[email protected]> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/types.h> #include <linux/mm.h> #include <linux/swap.h> #include <linux/stddef.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/memblock.h> #include <linux/libfdt.h> #include <linux/crash_core.h> #include <linux/of.h> #include <linux/of_fdt.h> #include <asm/cacheflush.h> #include <asm/kdump.h> #include <mm/mmu_decl.h> struct regions { unsigned long pa_start; unsigned long pa_end; unsigned long kernel_size; unsigned long dtb_start; unsigned long dtb_end; unsigned long initrd_start; unsigned long initrd_end; unsigned long crash_start; unsigned long crash_end; int reserved_mem; int reserved_mem_addr_cells; int reserved_mem_size_cells; }; struct regions __initdata regions; static __init void kaslr_get_cmdline(void *fdt) { early_init_dt_scan_chosen(boot_command_line); } static unsigned long __init rotate_xor(unsigned long hash, const void *area, size_t size) { size_t i; const unsigned long *ptr = area; for (i = 0; i < size / sizeof(hash); i++) { /* Rotate by odd number of bits and XOR. */ hash = (hash << ((sizeof(hash) * 8) - 7)) | (hash >> 7); hash ^= ptr[i]; } return hash; } /* Attempt to create a simple starting entropy. This can make it defferent for * every build but it is still not enough. Stronger entropy should * be added to make it change for every boot. */ static unsigned long __init get_boot_seed(void *fdt) { unsigned long hash = 0; /* build-specific string for starting entropy. */ hash = rotate_xor(hash, linux_banner, strlen(linux_banner)); hash = rotate_xor(hash, fdt, fdt_totalsize(fdt)); return hash; } static __init u64 get_kaslr_seed(void *fdt) { int node, len; fdt64_t *prop; u64 ret; node = fdt_path_offset(fdt, "/chosen"); if (node < 0) return 0; prop = fdt_getprop_w(fdt, node, "kaslr-seed", &len); if (!prop || len != sizeof(u64)) return 0; ret = fdt64_to_cpu(*prop); *prop = 0; return ret; } static __init bool regions_overlap(u32 s1, u32 e1, u32 s2, u32 e2) { return e1 >= s2 && e2 >= s1; } static __init bool overlaps_reserved_region(const void *fdt, u32 start, u32 end) { int subnode, len, i; u64 base, size; /* check for overlap with /memreserve/ entries */ for (i = 0; i < fdt_num_mem_rsv(fdt); i++) { if (fdt_get_mem_rsv(fdt, i, &base, &size) < 0) continue; if (regions_overlap(start, end, base, base + size)) return true; } if (regions.reserved_mem < 0) return false; /* check for overlap with static reservations in /reserved-memory */ for (subnode = fdt_first_subnode(fdt, regions.reserved_mem); subnode >= 0; subnode = fdt_next_subnode(fdt, subnode)) { const fdt32_t *reg; u64 rsv_end; len = 0; reg = fdt_getprop(fdt, subnode, "reg", &len); while (len >= (regions.reserved_mem_addr_cells + regions.reserved_mem_size_cells)) { base = fdt32_to_cpu(reg[0]); if (regions.reserved_mem_addr_cells == 2) base = (base << 32) | fdt32_to_cpu(reg[1]); reg += regions.reserved_mem_addr_cells; len -= 4 * regions.reserved_mem_addr_cells; size = fdt32_to_cpu(reg[0]); if (regions.reserved_mem_size_cells == 2) size = (size << 32) | fdt32_to_cpu(reg[1]); reg += regions.reserved_mem_size_cells; len -= 4 * regions.reserved_mem_size_cells; if (base >= regions.pa_end) continue; rsv_end = min(base + size, (u64)U32_MAX); if (regions_overlap(start, end, base, rsv_end)) return true; } } return false; } static __init bool overlaps_region(const void *fdt, u32 start, u32 end) { if (regions_overlap(start, end, __pa(_stext), __pa(_end))) return true; if (regions_overlap(start, end, regions.dtb_start, regions.dtb_end)) return true; if (regions_overlap(start, end, regions.initrd_start, regions.initrd_end)) return true; if (regions_overlap(start, end, regions.crash_start, regions.crash_end)) return true; return overlaps_reserved_region(fdt, start, end); } static void __init get_crash_kernel(void *fdt, unsigned long size) { #ifdef CONFIG_CRASH_CORE unsigned long long crash_size, crash_base; int ret; ret = parse_crashkernel(boot_command_line, size, &crash_size, &crash_base); if (ret != 0 || crash_size == 0) return; if (crash_base == 0) crash_base = KDUMP_KERNELBASE; regions.crash_start = (unsigned long)crash_base; regions.crash_end = (unsigned long)(crash_base + crash_size); pr_debug("crash_base=0x%llx crash_size=0x%llx\n", crash_base, crash_size); #endif } static void __init get_initrd_range(void *fdt) { u64 start, end; int node, len; const __be32 *prop; node = fdt_path_offset(fdt, "/chosen"); if (node < 0) return; prop = fdt_getprop(fdt, node, "linux,initrd-start", &len); if (!prop) return; start = of_read_number(prop, len / 4); prop = fdt_getprop(fdt, node, "linux,initrd-end", &len); if (!prop) return; end = of_read_number(prop, len / 4); regions.initrd_start = (unsigned long)start; regions.initrd_end = (unsigned long)end; pr_debug("initrd_start=0x%llx initrd_end=0x%llx\n", start, end); } static __init unsigned long get_usable_address(const void *fdt, unsigned long start, unsigned long offset) { unsigned long pa; unsigned long pa_end; for (pa = offset; (long)pa > (long)start; pa -= SZ_16K) { pa_end = pa + regions.kernel_size; if (overlaps_region(fdt, pa, pa_end)) continue; return pa; } return 0; } static __init void get_cell_sizes(const void *fdt, int node, int *addr_cells, int *size_cells) { const int *prop; int len; /* * Retrieve the #address-cells and #size-cells properties * from the 'node', or use the default if not provided. */ *addr_cells = *size_cells = 1; prop = fdt_getprop(fdt, node, "#address-cells", &len); if (len == 4) *addr_cells = fdt32_to_cpu(*prop); prop = fdt_getprop(fdt, node, "#size-cells", &len); if (len == 4) *size_cells = fdt32_to_cpu(*prop); } static unsigned long __init kaslr_legal_offset(void *dt_ptr, unsigned long index, unsigned long offset) { unsigned long koffset = 0; unsigned long start; while ((long)index >= 0) { offset = memstart_addr + index * SZ_64M + offset; start = memstart_addr + index * SZ_64M; koffset = get_usable_address(dt_ptr, start, offset); if (koffset) break; index--; } if (koffset != 0) koffset -= memstart_addr; return koffset; } static inline __init bool kaslr_disabled(void) { return strstr(boot_command_line, "nokaslr") != NULL; } static unsigned long __init kaslr_choose_location(void *dt_ptr, phys_addr_t size, unsigned long kernel_sz) { unsigned long offset, random; unsigned long ram, linear_sz; u64 seed; unsigned long index; kaslr_get_cmdline(dt_ptr); if (kaslr_disabled()) return 0; random = get_boot_seed(dt_ptr); seed = get_tb() << 32; seed ^= get_tb(); random = rotate_xor(random, &seed, sizeof(seed)); /* * Retrieve (and wipe) the seed from the FDT */ seed = get_kaslr_seed(dt_ptr); if (seed) random = rotate_xor(random, &seed, sizeof(seed)); else pr_warn("KASLR: No safe seed for randomizing the kernel base.\n"); ram = min_t(phys_addr_t, __max_low_memory, size); ram = map_mem_in_cams(ram, CONFIG_LOWMEM_CAM_NUM, true, true); linear_sz = min_t(unsigned long, ram, SZ_512M); /* If the linear size is smaller than 64M, do not randomize */ if (linear_sz < SZ_64M) return 0; /* check for a reserved-memory node and record its cell sizes */ regions.reserved_mem = fdt_path_offset(dt_ptr, "/reserved-memory"); if (regions.reserved_mem >= 0) get_cell_sizes(dt_ptr, regions.reserved_mem, &regions.reserved_mem_addr_cells, &regions.reserved_mem_size_cells); regions.pa_start = memstart_addr; regions.pa_end = memstart_addr + linear_sz; regions.dtb_start = __pa(dt_ptr); regions.dtb_end = __pa(dt_ptr) + fdt_totalsize(dt_ptr); regions.kernel_size = kernel_sz; get_initrd_range(dt_ptr); get_crash_kernel(dt_ptr, ram); /* * Decide which 64M we want to start * Only use the low 8 bits of the random seed */ index = random & 0xFF; index %= linear_sz / SZ_64M; /* Decide offset inside 64M */ offset = random % (SZ_64M - kernel_sz); offset = round_down(offset, SZ_16K); return kaslr_legal_offset(dt_ptr, index, offset); } /* * To see if we need to relocate the kernel to a random offset * void *dt_ptr - address of the device tree * phys_addr_t size - size of the first memory block */ notrace void __init kaslr_early_init(void *dt_ptr, phys_addr_t size) { unsigned long tlb_virt; phys_addr_t tlb_phys; unsigned long offset; unsigned long kernel_sz; kernel_sz = (unsigned long)_end - (unsigned long)_stext; offset = kaslr_choose_location(dt_ptr, size, kernel_sz); if (offset == 0) return; kernstart_virt_addr += offset; kernstart_addr += offset; is_second_reloc = 1; if (offset >= SZ_64M) { tlb_virt = round_down(kernstart_virt_addr, SZ_64M); tlb_phys = round_down(kernstart_addr, SZ_64M); /* Create kernel map to relocate in */ create_kaslr_tlb_entry(1, tlb_virt, tlb_phys); } /* Copy the kernel to it's new location and run */ memcpy((void *)kernstart_virt_addr, (void *)_stext, kernel_sz); flush_icache_range(kernstart_virt_addr, kernstart_virt_addr + kernel_sz); reloc_kernel_entry(dt_ptr, kernstart_virt_addr); } void __init kaslr_late_init(void) { /* If randomized, clear the original kernel */ if (kernstart_virt_addr != KERNELBASE) { unsigned long kernel_sz; kernel_sz = (unsigned long)_end - kernstart_virt_addr; memzero_explicit((void *)KERNELBASE, kernel_sz); } }
linux-master
arch/powerpc/mm/nohash/kaslr_booke.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * This file contains the routines for TLB flushing. * On machines where the MMU does not use a hash table to store virtual to * physical translations (ie, SW loaded TLBs or Book3E compilant processors, * this does -not- include 603 however which shares the implementation with * hash based processors) * * -- BenH * * Copyright 2008,2009 Ben Herrenschmidt <[email protected]> * IBM Corp. * * Derived from arch/ppc/mm/init.c: * Copyright (C) 1995-1996 Gary Thomas ([email protected]) * * Modifications by Paul Mackerras (PowerMac) ([email protected]) * and Cort Dougan (PReP) ([email protected]) * Copyright (C) 1996 Paul Mackerras * * Derived from "arch/i386/mm/init.c" * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds */ #include <linux/kernel.h> #include <linux/export.h> #include <linux/mm.h> #include <linux/init.h> #include <linux/highmem.h> #include <linux/pagemap.h> #include <linux/preempt.h> #include <linux/spinlock.h> #include <linux/memblock.h> #include <linux/of_fdt.h> #include <linux/hugetlb.h> #include <asm/pgalloc.h> #include <asm/tlbflush.h> #include <asm/tlb.h> #include <asm/code-patching.h> #include <asm/cputhreads.h> #include <asm/hugetlb.h> #include <asm/paca.h> #include <mm/mmu_decl.h> /* * This struct lists the sw-supported page sizes. The hardawre MMU may support * other sizes not listed here. The .ind field is only used on MMUs that have * indirect page table entries. */ #ifdef CONFIG_PPC_E500 struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT] = { [MMU_PAGE_4K] = { .shift = 12, .enc = BOOK3E_PAGESZ_4K, }, [MMU_PAGE_2M] = { .shift = 21, .enc = BOOK3E_PAGESZ_2M, }, [MMU_PAGE_4M] = { .shift = 22, .enc = BOOK3E_PAGESZ_4M, }, [MMU_PAGE_16M] = { .shift = 24, .enc = BOOK3E_PAGESZ_16M, }, [MMU_PAGE_64M] = { .shift = 26, .enc = BOOK3E_PAGESZ_64M, }, [MMU_PAGE_256M] = { .shift = 28, .enc = BOOK3E_PAGESZ_256M, }, [MMU_PAGE_1G] = { .shift = 30, .enc = BOOK3E_PAGESZ_1GB, }, }; static inline int mmu_get_tsize(int psize) { return mmu_psize_defs[psize].enc; } #else static inline int mmu_get_tsize(int psize) { /* This isn't used on !Book3E for now */ return 0; } #endif #ifdef CONFIG_PPC_8xx struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT] = { [MMU_PAGE_4K] = { .shift = 12, }, [MMU_PAGE_16K] = { .shift = 14, }, [MMU_PAGE_512K] = { .shift = 19, }, [MMU_PAGE_8M] = { .shift = 23, }, }; #endif /* The variables below are currently only used on 64-bit Book3E * though this will probably be made common with other nohash * implementations at some point */ #ifdef CONFIG_PPC64 int mmu_pte_psize; /* Page size used for PTE pages */ int mmu_vmemmap_psize; /* Page size used for the virtual mem map */ int book3e_htw_mode; /* HW tablewalk? Value is PPC_HTW_* */ unsigned long linear_map_top; /* Top of linear mapping */ /* * Number of bytes to add to SPRN_SPRG_TLB_EXFRAME on crit/mcheck/debug * exceptions. This is used for bolted and e6500 TLB miss handlers which * do not modify this SPRG in the TLB miss code; for other TLB miss handlers, * this is set to zero. */ int extlb_level_exc; #endif /* CONFIG_PPC64 */ #ifdef CONFIG_PPC_E500 /* next_tlbcam_idx is used to round-robin tlbcam entry assignment */ DEFINE_PER_CPU(int, next_tlbcam_idx); EXPORT_PER_CPU_SYMBOL(next_tlbcam_idx); #endif /* * Base TLB flushing operations: * * - flush_tlb_mm(mm) flushes the specified mm context TLB's * - flush_tlb_page(vma, vmaddr) flushes one page * - flush_tlb_range(vma, start, end) flushes a range of pages * - flush_tlb_kernel_range(start, end) flushes kernel pages * * - local_* variants of page and mm only apply to the current * processor */ #ifndef CONFIG_PPC_8xx /* * These are the base non-SMP variants of page and mm flushing */ void local_flush_tlb_mm(struct mm_struct *mm) { unsigned int pid; preempt_disable(); pid = mm->context.id; if (pid != MMU_NO_CONTEXT) _tlbil_pid(pid); preempt_enable(); } EXPORT_SYMBOL(local_flush_tlb_mm); void __local_flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr, int tsize, int ind) { unsigned int pid; preempt_disable(); pid = mm ? mm->context.id : 0; if (pid != MMU_NO_CONTEXT) _tlbil_va(vmaddr, pid, tsize, ind); preempt_enable(); } void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr) { __local_flush_tlb_page(vma ? vma->vm_mm : NULL, vmaddr, mmu_get_tsize(mmu_virtual_psize), 0); } EXPORT_SYMBOL(local_flush_tlb_page); void local_flush_tlb_page_psize(struct mm_struct *mm, unsigned long vmaddr, int psize) { __local_flush_tlb_page(mm, vmaddr, mmu_get_tsize(psize), 0); } EXPORT_SYMBOL(local_flush_tlb_page_psize); #endif /* * And here are the SMP non-local implementations */ #ifdef CONFIG_SMP static DEFINE_RAW_SPINLOCK(tlbivax_lock); struct tlb_flush_param { unsigned long addr; unsigned int pid; unsigned int tsize; unsigned int ind; }; static void do_flush_tlb_mm_ipi(void *param) { struct tlb_flush_param *p = param; _tlbil_pid(p ? p->pid : 0); } static void do_flush_tlb_page_ipi(void *param) { struct tlb_flush_param *p = param; _tlbil_va(p->addr, p->pid, p->tsize, p->ind); } /* Note on invalidations and PID: * * We snapshot the PID with preempt disabled. At this point, it can still * change either because: * - our context is being stolen (PID -> NO_CONTEXT) on another CPU * - we are invaliating some target that isn't currently running here * and is concurrently acquiring a new PID on another CPU * - some other CPU is re-acquiring a lost PID for this mm * etc... * * However, this shouldn't be a problem as we only guarantee * invalidation of TLB entries present prior to this call, so we * don't care about the PID changing, and invalidating a stale PID * is generally harmless. */ void flush_tlb_mm(struct mm_struct *mm) { unsigned int pid; preempt_disable(); pid = mm->context.id; if (unlikely(pid == MMU_NO_CONTEXT)) goto no_context; if (!mm_is_core_local(mm)) { struct tlb_flush_param p = { .pid = pid }; /* Ignores smp_processor_id() even if set. */ smp_call_function_many(mm_cpumask(mm), do_flush_tlb_mm_ipi, &p, 1); } _tlbil_pid(pid); no_context: preempt_enable(); } EXPORT_SYMBOL(flush_tlb_mm); void __flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr, int tsize, int ind) { struct cpumask *cpu_mask; unsigned int pid; /* * This function as well as __local_flush_tlb_page() must only be called * for user contexts. */ if (WARN_ON(!mm)) return; preempt_disable(); pid = mm->context.id; if (unlikely(pid == MMU_NO_CONTEXT)) goto bail; cpu_mask = mm_cpumask(mm); if (!mm_is_core_local(mm)) { /* If broadcast tlbivax is supported, use it */ if (mmu_has_feature(MMU_FTR_USE_TLBIVAX_BCAST)) { int lock = mmu_has_feature(MMU_FTR_LOCK_BCAST_INVAL); if (lock) raw_spin_lock(&tlbivax_lock); _tlbivax_bcast(vmaddr, pid, tsize, ind); if (lock) raw_spin_unlock(&tlbivax_lock); goto bail; } else { struct tlb_flush_param p = { .pid = pid, .addr = vmaddr, .tsize = tsize, .ind = ind, }; /* Ignores smp_processor_id() even if set in cpu_mask */ smp_call_function_many(cpu_mask, do_flush_tlb_page_ipi, &p, 1); } } _tlbil_va(vmaddr, pid, tsize, ind); bail: preempt_enable(); } void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr) { #ifdef CONFIG_HUGETLB_PAGE if (vma && is_vm_hugetlb_page(vma)) flush_hugetlb_page(vma, vmaddr); #endif __flush_tlb_page(vma ? vma->vm_mm : NULL, vmaddr, mmu_get_tsize(mmu_virtual_psize), 0); } EXPORT_SYMBOL(flush_tlb_page); #endif /* CONFIG_SMP */ /* * Flush kernel TLB entries in the given range */ #ifndef CONFIG_PPC_8xx void flush_tlb_kernel_range(unsigned long start, unsigned long end) { #ifdef CONFIG_SMP preempt_disable(); smp_call_function(do_flush_tlb_mm_ipi, NULL, 1); _tlbil_pid(0); preempt_enable(); #else _tlbil_pid(0); #endif } EXPORT_SYMBOL(flush_tlb_kernel_range); #endif /* * Currently, for range flushing, we just do a full mm flush. This should * be optimized based on a threshold on the size of the range, since * some implementation can stack multiple tlbivax before a tlbsync but * for now, we keep it that way */ void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { if (end - start == PAGE_SIZE && !(start & ~PAGE_MASK)) flush_tlb_page(vma, start); else flush_tlb_mm(vma->vm_mm); } EXPORT_SYMBOL(flush_tlb_range); void tlb_flush(struct mmu_gather *tlb) { flush_tlb_mm(tlb->mm); } /* * Below are functions specific to the 64-bit variant of Book3E though that * may change in the future */ #ifdef CONFIG_PPC64 /* * Handling of virtual linear page tables or indirect TLB entries * flushing when PTE pages are freed */ void tlb_flush_pgtable(struct mmu_gather *tlb, unsigned long address) { int tsize = mmu_psize_defs[mmu_pte_psize].enc; if (book3e_htw_mode != PPC_HTW_NONE) { unsigned long start = address & PMD_MASK; unsigned long end = address + PMD_SIZE; unsigned long size = 1UL << mmu_psize_defs[mmu_pte_psize].shift; /* This isn't the most optimal, ideally we would factor out the * while preempt & CPU mask mucking around, or even the IPI but * it will do for now */ while (start < end) { __flush_tlb_page(tlb->mm, start, tsize, 1); start += size; } } else { unsigned long rmask = 0xf000000000000000ul; unsigned long rid = (address & rmask) | 0x1000000000000000ul; unsigned long vpte = address & ~rmask; vpte = (vpte >> (PAGE_SHIFT - 3)) & ~0xffful; vpte |= rid; __flush_tlb_page(tlb->mm, vpte, tsize, 0); } } static void __init setup_page_sizes(void) { unsigned int tlb0cfg; unsigned int tlb0ps; unsigned int eptcfg; int i, psize; #ifdef CONFIG_PPC_E500 unsigned int mmucfg = mfspr(SPRN_MMUCFG); int fsl_mmu = mmu_has_feature(MMU_FTR_TYPE_FSL_E); if (fsl_mmu && (mmucfg & MMUCFG_MAVN) == MMUCFG_MAVN_V1) { unsigned int tlb1cfg = mfspr(SPRN_TLB1CFG); unsigned int min_pg, max_pg; min_pg = (tlb1cfg & TLBnCFG_MINSIZE) >> TLBnCFG_MINSIZE_SHIFT; max_pg = (tlb1cfg & TLBnCFG_MAXSIZE) >> TLBnCFG_MAXSIZE_SHIFT; for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) { struct mmu_psize_def *def; unsigned int shift; def = &mmu_psize_defs[psize]; shift = def->shift; if (shift == 0 || shift & 1) continue; /* adjust to be in terms of 4^shift Kb */ shift = (shift - 10) >> 1; if ((shift >= min_pg) && (shift <= max_pg)) def->flags |= MMU_PAGE_SIZE_DIRECT; } goto out; } if (fsl_mmu && (mmucfg & MMUCFG_MAVN) == MMUCFG_MAVN_V2) { u32 tlb1cfg, tlb1ps; tlb0cfg = mfspr(SPRN_TLB0CFG); tlb1cfg = mfspr(SPRN_TLB1CFG); tlb1ps = mfspr(SPRN_TLB1PS); eptcfg = mfspr(SPRN_EPTCFG); if ((tlb1cfg & TLBnCFG_IND) && (tlb0cfg & TLBnCFG_PT)) book3e_htw_mode = PPC_HTW_E6500; /* * We expect 4K subpage size and unrestricted indirect size. * The lack of a restriction on indirect size is a Freescale * extension, indicated by PSn = 0 but SPSn != 0. */ if (eptcfg != 2) book3e_htw_mode = PPC_HTW_NONE; for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) { struct mmu_psize_def *def = &mmu_psize_defs[psize]; if (!def->shift) continue; if (tlb1ps & (1U << (def->shift - 10))) { def->flags |= MMU_PAGE_SIZE_DIRECT; if (book3e_htw_mode && psize == MMU_PAGE_2M) def->flags |= MMU_PAGE_SIZE_INDIRECT; } } goto out; } #endif tlb0cfg = mfspr(SPRN_TLB0CFG); tlb0ps = mfspr(SPRN_TLB0PS); eptcfg = mfspr(SPRN_EPTCFG); /* Look for supported direct sizes */ for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) { struct mmu_psize_def *def = &mmu_psize_defs[psize]; if (tlb0ps & (1U << (def->shift - 10))) def->flags |= MMU_PAGE_SIZE_DIRECT; } /* Indirect page sizes supported ? */ if ((tlb0cfg & TLBnCFG_IND) == 0 || (tlb0cfg & TLBnCFG_PT) == 0) goto out; book3e_htw_mode = PPC_HTW_IBM; /* Now, we only deal with one IND page size for each * direct size. Hopefully all implementations today are * unambiguous, but we might want to be careful in the * future. */ for (i = 0; i < 3; i++) { unsigned int ps, sps; sps = eptcfg & 0x1f; eptcfg >>= 5; ps = eptcfg & 0x1f; eptcfg >>= 5; if (!ps || !sps) continue; for (psize = 0; psize < MMU_PAGE_COUNT; psize++) { struct mmu_psize_def *def = &mmu_psize_defs[psize]; if (ps == (def->shift - 10)) def->flags |= MMU_PAGE_SIZE_INDIRECT; if (sps == (def->shift - 10)) def->ind = ps + 10; } } out: /* Cleanup array and print summary */ pr_info("MMU: Supported page sizes\n"); for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) { struct mmu_psize_def *def = &mmu_psize_defs[psize]; const char *__page_type_names[] = { "unsupported", "direct", "indirect", "direct & indirect" }; if (def->flags == 0) { def->shift = 0; continue; } pr_info(" %8ld KB as %s\n", 1ul << (def->shift - 10), __page_type_names[def->flags & 0x3]); } } static void __init setup_mmu_htw(void) { /* * If we want to use HW tablewalk, enable it by patching the TLB miss * handlers to branch to the one dedicated to it. */ switch (book3e_htw_mode) { case PPC_HTW_IBM: patch_exception(0x1c0, exc_data_tlb_miss_htw_book3e); patch_exception(0x1e0, exc_instruction_tlb_miss_htw_book3e); break; #ifdef CONFIG_PPC_E500 case PPC_HTW_E6500: extlb_level_exc = EX_TLB_SIZE; patch_exception(0x1c0, exc_data_tlb_miss_e6500_book3e); patch_exception(0x1e0, exc_instruction_tlb_miss_e6500_book3e); break; #endif } pr_info("MMU: Book3E HW tablewalk %s\n", book3e_htw_mode != PPC_HTW_NONE ? "enabled" : "not supported"); } /* * Early initialization of the MMU TLB code */ static void early_init_this_mmu(void) { unsigned int mas4; /* Set MAS4 based on page table setting */ mas4 = 0x4 << MAS4_WIMGED_SHIFT; switch (book3e_htw_mode) { case PPC_HTW_E6500: mas4 |= MAS4_INDD; mas4 |= BOOK3E_PAGESZ_2M << MAS4_TSIZED_SHIFT; mas4 |= MAS4_TLBSELD(1); mmu_pte_psize = MMU_PAGE_2M; break; case PPC_HTW_IBM: mas4 |= MAS4_INDD; mas4 |= BOOK3E_PAGESZ_1M << MAS4_TSIZED_SHIFT; mmu_pte_psize = MMU_PAGE_1M; break; case PPC_HTW_NONE: mas4 |= BOOK3E_PAGESZ_4K << MAS4_TSIZED_SHIFT; mmu_pte_psize = mmu_virtual_psize; break; } mtspr(SPRN_MAS4, mas4); #ifdef CONFIG_PPC_E500 if (mmu_has_feature(MMU_FTR_TYPE_FSL_E)) { unsigned int num_cams; bool map = true; /* use a quarter of the TLBCAM for bolted linear map */ num_cams = (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) / 4; /* * Only do the mapping once per core, or else the * transient mapping would cause problems. */ #ifdef CONFIG_SMP if (hweight32(get_tensr()) > 1) map = false; #endif if (map) linear_map_top = map_mem_in_cams(linear_map_top, num_cams, false, true); } #endif /* A sync won't hurt us after mucking around with * the MMU configuration */ mb(); } static void __init early_init_mmu_global(void) { /* XXX This should be decided at runtime based on supported * page sizes in the TLB, but for now let's assume 16M is * always there and a good fit (which it probably is) * * Freescale booke only supports 4K pages in TLB0, so use that. */ if (mmu_has_feature(MMU_FTR_TYPE_FSL_E)) mmu_vmemmap_psize = MMU_PAGE_4K; else mmu_vmemmap_psize = MMU_PAGE_16M; /* XXX This code only checks for TLB 0 capabilities and doesn't * check what page size combos are supported by the HW. It * also doesn't handle the case where a separate array holds * the IND entries from the array loaded by the PT. */ /* Look for supported page sizes */ setup_page_sizes(); /* Look for HW tablewalk support */ setup_mmu_htw(); #ifdef CONFIG_PPC_E500 if (mmu_has_feature(MMU_FTR_TYPE_FSL_E)) { if (book3e_htw_mode == PPC_HTW_NONE) { extlb_level_exc = EX_TLB_SIZE; patch_exception(0x1c0, exc_data_tlb_miss_bolted_book3e); patch_exception(0x1e0, exc_instruction_tlb_miss_bolted_book3e); } } #endif /* Set the global containing the top of the linear mapping * for use by the TLB miss code */ linear_map_top = memblock_end_of_DRAM(); ioremap_bot = IOREMAP_BASE; } static void __init early_mmu_set_memory_limit(void) { #ifdef CONFIG_PPC_E500 if (mmu_has_feature(MMU_FTR_TYPE_FSL_E)) { /* * Limit memory so we dont have linear faults. * Unlike memblock_set_current_limit, which limits * memory available during early boot, this permanently * reduces the memory available to Linux. We need to * do this because highmem is not supported on 64-bit. */ memblock_enforce_memory_limit(linear_map_top); } #endif memblock_set_current_limit(linear_map_top); } /* boot cpu only */ void __init early_init_mmu(void) { early_init_mmu_global(); early_init_this_mmu(); early_mmu_set_memory_limit(); } void early_init_mmu_secondary(void) { early_init_this_mmu(); } void setup_initial_memory_limit(phys_addr_t first_memblock_base, phys_addr_t first_memblock_size) { /* On non-FSL Embedded 64-bit, we adjust the RMA size to match * the bolted TLB entry. We know for now that only 1G * entries are supported though that may eventually * change. * * on FSL Embedded 64-bit, usually all RAM is bolted, but with * unusual memory sizes it's possible for some RAM to not be mapped * (such RAM is not used at all by Linux, since we don't support * highmem on 64-bit). We limit ppc64_rma_size to what would be * mappable if this memblock is the only one. Additional memblocks * can only increase, not decrease, the amount that ends up getting * mapped. We still limit max to 1G even if we'll eventually map * more. This is due to what the early init code is set up to do. * * We crop it to the size of the first MEMBLOCK to * avoid going over total available memory just in case... */ #ifdef CONFIG_PPC_E500 if (early_mmu_has_feature(MMU_FTR_TYPE_FSL_E)) { unsigned long linear_sz; unsigned int num_cams; /* use a quarter of the TLBCAM for bolted linear map */ num_cams = (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) / 4; linear_sz = map_mem_in_cams(first_memblock_size, num_cams, true, true); ppc64_rma_size = min_t(u64, linear_sz, 0x40000000); } else #endif ppc64_rma_size = min_t(u64, first_memblock_size, 0x40000000); /* Finally limit subsequent allocations */ memblock_set_current_limit(first_memblock_base + ppc64_rma_size); } #else /* ! CONFIG_PPC64 */ void __init early_init_mmu(void) { unsigned long root = of_get_flat_dt_root(); if (IS_ENABLED(CONFIG_PPC_47x) && IS_ENABLED(CONFIG_SMP) && of_get_flat_dt_prop(root, "cooperative-partition", NULL)) mmu_clear_feature(MMU_FTR_USE_TLBIVAX_BCAST); } #endif /* CONFIG_PPC64 */
linux-master
arch/powerpc/mm/nohash/tlb.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * This file contains the routines for handling the MMU on those * PowerPC implementations where the MMU substantially follows the * architecture specification. This includes the 6xx, 7xx, 7xxx, * and 8260 implementations but excludes the 8xx and 4xx. * -- paulus * * Derived from arch/ppc/mm/init.c: * Copyright (C) 1995-1996 Gary Thomas ([email protected]) * * Modifications by Paul Mackerras (PowerMac) ([email protected]) * and Cort Dougan (PReP) ([email protected]) * Copyright (C) 1996 Paul Mackerras * * Derived from "arch/i386/mm/init.c" * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds */ #include <linux/mm.h> #include <linux/init.h> #include <linux/export.h> #include <asm/mmu_context.h> /* * Room for two PTE pointers, usually the kernel and current user pointers * to their respective root page table. */ void *abatron_pteptrs[2]; /* * On 32-bit PowerPC 6xx/7xx/7xxx CPUs, we use a set of 16 VSIDs * (virtual segment identifiers) for each context. Although the * hardware supports 24-bit VSIDs, and thus >1 million contexts, * we only use 32,768 of them. That is ample, since there can be * at most around 30,000 tasks in the system anyway, and it means * that we can use a bitmap to indicate which contexts are in use. * Using a bitmap means that we entirely avoid all of the problems * that we used to have when the context number overflowed, * particularly on SMP systems. * -- paulus. */ #define NO_CONTEXT ((unsigned long) -1) #define LAST_CONTEXT 32767 #define FIRST_CONTEXT 1 static unsigned long next_mmu_context; static unsigned long context_map[LAST_CONTEXT / BITS_PER_LONG + 1]; unsigned long __init_new_context(void) { unsigned long ctx = next_mmu_context; while (test_and_set_bit(ctx, context_map)) { ctx = find_next_zero_bit(context_map, LAST_CONTEXT+1, ctx); if (ctx > LAST_CONTEXT) ctx = 0; } next_mmu_context = (ctx + 1) & LAST_CONTEXT; return ctx; } EXPORT_SYMBOL_GPL(__init_new_context); /* * Set up the context for a new address space. */ int init_new_context(struct task_struct *t, struct mm_struct *mm) { mm->context.id = __init_new_context(); mm->context.sr0 = CTX_TO_VSID(mm->context.id, 0); if (IS_ENABLED(CONFIG_PPC_KUEP)) mm->context.sr0 |= SR_NX; if (!kuap_is_disabled()) mm->context.sr0 |= SR_KS; return 0; } /* * Free a context ID. Make sure to call this with preempt disabled! */ void __destroy_context(unsigned long ctx) { clear_bit(ctx, context_map); } EXPORT_SYMBOL_GPL(__destroy_context); /* * We're finished using the context for an address space. */ void destroy_context(struct mm_struct *mm) { preempt_disable(); if (mm->context.id != NO_CONTEXT) { __destroy_context(mm->context.id); mm->context.id = NO_CONTEXT; } preempt_enable(); } /* * Initialize the context management stuff. */ void __init mmu_context_init(void) { /* Reserve context 0 for kernel use */ context_map[0] = (1 << FIRST_CONTEXT) - 1; next_mmu_context = FIRST_CONTEXT; } void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk) { long id = next->context.id; if (id < 0) panic("mm_struct %p has no context ID", next); isync(); update_user_segments(next->context.sr0); if (IS_ENABLED(CONFIG_BDI_SWITCH)) abatron_pteptrs[1] = next->pgd; if (!mmu_has_feature(MMU_FTR_HPTE_TABLE)) mtspr(SPRN_SDR1, rol32(__pa(next->pgd), 4) & 0xffff01ff); mb(); /* sync */ isync(); } EXPORT_SYMBOL(switch_mmu_context);
linux-master
arch/powerpc/mm/book3s32/mmu_context.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * This file contains the routines for TLB flushing. * On machines where the MMU uses a hash table to store virtual to * physical translations, these routines flush entries from the * hash table also. * -- paulus * * Derived from arch/ppc/mm/init.c: * Copyright (C) 1995-1996 Gary Thomas ([email protected]) * * Modifications by Paul Mackerras (PowerMac) ([email protected]) * and Cort Dougan (PReP) ([email protected]) * Copyright (C) 1996 Paul Mackerras * * Derived from "arch/i386/mm/init.c" * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds */ #include <linux/kernel.h> #include <linux/mm.h> #include <linux/init.h> #include <linux/highmem.h> #include <linux/pagemap.h> #include <linux/export.h> #include <asm/tlbflush.h> #include <asm/tlb.h> #include <mm/mmu_decl.h> /* * TLB flushing: * * - flush_tlb_mm(mm) flushes the specified mm context TLB's * - flush_tlb_page(vma, vmaddr) flushes one page * - flush_tlb_range(vma, start, end) flushes a range of pages * - flush_tlb_kernel_range(start, end) flushes kernel pages * * since the hardware hash table functions as an extension of the * tlb as far as the linux tables are concerned, flush it too. * -- Cort */ /* * For each address in the range, find the pte for the address * and check _PAGE_HASHPTE bit; if it is set, find and destroy * the corresponding HPTE. */ void hash__flush_range(struct mm_struct *mm, unsigned long start, unsigned long end) { pmd_t *pmd; unsigned long pmd_end; int count; unsigned int ctx = mm->context.id; start &= PAGE_MASK; if (start >= end) return; end = (end - 1) | ~PAGE_MASK; pmd = pmd_off(mm, start); for (;;) { pmd_end = ((start + PGDIR_SIZE) & PGDIR_MASK) - 1; if (pmd_end > end) pmd_end = end; if (!pmd_none(*pmd)) { count = ((pmd_end - start) >> PAGE_SHIFT) + 1; flush_hash_pages(ctx, start, pmd_val(*pmd), count); } if (pmd_end == end) break; start = pmd_end + 1; ++pmd; } } EXPORT_SYMBOL(hash__flush_range); /* * Flush all the (user) entries for the address space described by mm. */ void hash__flush_tlb_mm(struct mm_struct *mm) { struct vm_area_struct *mp; VMA_ITERATOR(vmi, mm, 0); /* * It is safe to iterate the vmas when called from dup_mmap, * holding mmap_lock. It would also be safe from unmap_region * or exit_mmap, but not from vmtruncate on SMP - but it seems * dup_mmap is the only SMP case which gets here. */ for_each_vma(vmi, mp) hash__flush_range(mp->vm_mm, mp->vm_start, mp->vm_end); } EXPORT_SYMBOL(hash__flush_tlb_mm); void hash__flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr) { struct mm_struct *mm; pmd_t *pmd; mm = (vmaddr < TASK_SIZE)? vma->vm_mm: &init_mm; pmd = pmd_off(mm, vmaddr); if (!pmd_none(*pmd)) flush_hash_pages(mm->context.id, vmaddr, pmd_val(*pmd), 1); } EXPORT_SYMBOL(hash__flush_tlb_page);
linux-master
arch/powerpc/mm/book3s32/tlb.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * This file contains the routines for handling the MMU on those * PowerPC implementations where the MMU substantially follows the * architecture specification. This includes the 6xx, 7xx, 7xxx, * and 8260 implementations but excludes the 8xx and 4xx. * -- paulus * * Derived from arch/ppc/mm/init.c: * Copyright (C) 1995-1996 Gary Thomas ([email protected]) * * Modifications by Paul Mackerras (PowerMac) ([email protected]) * and Cort Dougan (PReP) ([email protected]) * Copyright (C) 1996 Paul Mackerras * * Derived from "arch/i386/mm/init.c" * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds */ #include <linux/kernel.h> #include <linux/mm.h> #include <linux/init.h> #include <linux/highmem.h> #include <linux/memblock.h> #include <asm/mmu.h> #include <asm/machdep.h> #include <asm/code-patching.h> #include <asm/sections.h> #include <mm/mmu_decl.h> u8 __initdata early_hash[SZ_256K] __aligned(SZ_256K) = {0}; static struct hash_pte __initdata *Hash = (struct hash_pte *)early_hash; static unsigned long __initdata Hash_size, Hash_mask; static unsigned int __initdata hash_mb, hash_mb2; unsigned long __initdata _SDR1; struct ppc_bat BATS[8][2]; /* 8 pairs of IBAT, DBAT */ static struct batrange { /* stores address ranges mapped by BATs */ unsigned long start; unsigned long limit; phys_addr_t phys; } bat_addrs[8]; #ifdef CONFIG_SMP unsigned long mmu_hash_lock; #endif /* * Return PA for this VA if it is mapped by a BAT, or 0 */ phys_addr_t v_block_mapped(unsigned long va) { int b; for (b = 0; b < ARRAY_SIZE(bat_addrs); ++b) if (va >= bat_addrs[b].start && va < bat_addrs[b].limit) return bat_addrs[b].phys + (va - bat_addrs[b].start); return 0; } /* * Return VA for a given PA or 0 if not mapped */ unsigned long p_block_mapped(phys_addr_t pa) { int b; for (b = 0; b < ARRAY_SIZE(bat_addrs); ++b) if (pa >= bat_addrs[b].phys && pa < (bat_addrs[b].limit-bat_addrs[b].start) +bat_addrs[b].phys) return bat_addrs[b].start+(pa-bat_addrs[b].phys); return 0; } int __init find_free_bat(void) { int b; int n = mmu_has_feature(MMU_FTR_USE_HIGH_BATS) ? 8 : 4; for (b = 0; b < n; b++) { struct ppc_bat *bat = BATS[b]; if (!(bat[1].batu & 3)) return b; } return -1; } /* * This function calculates the size of the larger block usable to map the * beginning of an area based on the start address and size of that area: * - max block size is 256 on 6xx. * - base address must be aligned to the block size. So the maximum block size * is identified by the lowest bit set to 1 in the base address (for instance * if base is 0x16000000, max size is 0x02000000). * - block size has to be a power of two. This is calculated by finding the * highest bit set to 1. */ unsigned int bat_block_size(unsigned long base, unsigned long top) { unsigned int max_size = SZ_256M; unsigned int base_shift = (ffs(base) - 1) & 31; unsigned int block_shift = (fls(top - base) - 1) & 31; return min3(max_size, 1U << base_shift, 1U << block_shift); } /* * Set up one of the IBAT (block address translation) register pairs. * The parameters are not checked; in particular size must be a power * of 2 between 128k and 256M. */ static void setibat(int index, unsigned long virt, phys_addr_t phys, unsigned int size, pgprot_t prot) { unsigned int bl = (size >> 17) - 1; int wimgxpp; struct ppc_bat *bat = BATS[index]; unsigned long flags = pgprot_val(prot); if (!cpu_has_feature(CPU_FTR_NEED_COHERENT)) flags &= ~_PAGE_COHERENT; wimgxpp = (flags & _PAGE_COHERENT) | (_PAGE_EXEC ? BPP_RX : BPP_XX); bat[0].batu = virt | (bl << 2) | 2; /* Vs=1, Vp=0 */ bat[0].batl = BAT_PHYS_ADDR(phys) | wimgxpp; if (flags & _PAGE_USER) bat[0].batu |= 1; /* Vp = 1 */ } static void clearibat(int index) { struct ppc_bat *bat = BATS[index]; bat[0].batu = 0; bat[0].batl = 0; } static unsigned long __init __mmu_mapin_ram(unsigned long base, unsigned long top) { int idx; while ((idx = find_free_bat()) != -1 && base != top) { unsigned int size = bat_block_size(base, top); if (size < 128 << 10) break; setbat(idx, PAGE_OFFSET + base, base, size, PAGE_KERNEL_X); base += size; } return base; } unsigned long __init mmu_mapin_ram(unsigned long base, unsigned long top) { unsigned long done; unsigned long border = (unsigned long)__srwx_boundary - PAGE_OFFSET; unsigned long size; size = roundup_pow_of_two((unsigned long)_einittext - PAGE_OFFSET); setibat(0, PAGE_OFFSET, 0, size, PAGE_KERNEL_X); if (debug_pagealloc_enabled_or_kfence()) { pr_debug_once("Read-Write memory mapped without BATs\n"); if (base >= border) return base; if (top >= border) top = border; } if (!strict_kernel_rwx_enabled() || base >= border || top <= border) return __mmu_mapin_ram(base, top); done = __mmu_mapin_ram(base, border); if (done != border) return done; return __mmu_mapin_ram(border, top); } static bool is_module_segment(unsigned long addr) { if (!IS_ENABLED(CONFIG_MODULES)) return false; if (addr < ALIGN_DOWN(MODULES_VADDR, SZ_256M)) return false; if (addr > ALIGN(MODULES_END, SZ_256M) - 1) return false; return true; } void mmu_mark_initmem_nx(void) { int nb = mmu_has_feature(MMU_FTR_USE_HIGH_BATS) ? 8 : 4; int i; unsigned long base = (unsigned long)_stext - PAGE_OFFSET; unsigned long top = ALIGN((unsigned long)_etext - PAGE_OFFSET, SZ_128K); unsigned long border = (unsigned long)__init_begin - PAGE_OFFSET; unsigned long size; for (i = 0; i < nb - 1 && base < top;) { size = bat_block_size(base, top); setibat(i++, PAGE_OFFSET + base, base, size, PAGE_KERNEL_TEXT); base += size; } if (base < top) { size = bat_block_size(base, top); if ((top - base) > size) { size <<= 1; if (strict_kernel_rwx_enabled() && base + size > border) pr_warn("Some RW data is getting mapped X. " "Adjust CONFIG_DATA_SHIFT to avoid that.\n"); } setibat(i++, PAGE_OFFSET + base, base, size, PAGE_KERNEL_TEXT); base += size; } for (; i < nb; i++) clearibat(i); update_bats(); for (i = TASK_SIZE >> 28; i < 16; i++) { /* Do not set NX on VM space for modules */ if (is_module_segment(i << 28)) continue; mtsr(mfsr(i << 28) | 0x10000000, i << 28); } } void mmu_mark_rodata_ro(void) { int nb = mmu_has_feature(MMU_FTR_USE_HIGH_BATS) ? 8 : 4; int i; for (i = 0; i < nb; i++) { struct ppc_bat *bat = BATS[i]; if (bat_addrs[i].start < (unsigned long)__end_rodata) bat[1].batl = (bat[1].batl & ~BPP_RW) | BPP_RX; } update_bats(); } /* * Set up one of the D BAT (block address translation) register pairs. * The parameters are not checked; in particular size must be a power * of 2 between 128k and 256M. */ void __init setbat(int index, unsigned long virt, phys_addr_t phys, unsigned int size, pgprot_t prot) { unsigned int bl; int wimgxpp; struct ppc_bat *bat; unsigned long flags = pgprot_val(prot); if (index == -1) index = find_free_bat(); if (index == -1) { pr_err("%s: no BAT available for mapping 0x%llx\n", __func__, (unsigned long long)phys); return; } bat = BATS[index]; if ((flags & _PAGE_NO_CACHE) || (cpu_has_feature(CPU_FTR_NEED_COHERENT) == 0)) flags &= ~_PAGE_COHERENT; bl = (size >> 17) - 1; /* Do DBAT first */ wimgxpp = flags & (_PAGE_WRITETHRU | _PAGE_NO_CACHE | _PAGE_COHERENT | _PAGE_GUARDED); wimgxpp |= (flags & _PAGE_RW)? BPP_RW: BPP_RX; bat[1].batu = virt | (bl << 2) | 2; /* Vs=1, Vp=0 */ bat[1].batl = BAT_PHYS_ADDR(phys) | wimgxpp; if (flags & _PAGE_USER) bat[1].batu |= 1; /* Vp = 1 */ if (flags & _PAGE_GUARDED) { /* G bit must be zero in IBATs */ flags &= ~_PAGE_EXEC; } bat_addrs[index].start = virt; bat_addrs[index].limit = virt + ((bl + 1) << 17) - 1; bat_addrs[index].phys = phys; } /* * Preload a translation in the hash table */ static void hash_preload(struct mm_struct *mm, unsigned long ea) { pmd_t *pmd; if (!mmu_has_feature(MMU_FTR_HPTE_TABLE)) return; pmd = pmd_off(mm, ea); if (!pmd_none(*pmd)) add_hash_page(mm->context.id, ea, pmd_val(*pmd)); } /* * This is called at the end of handling a user page fault, when the * fault has been handled by updating a PTE in the linux page tables. * We use it to preload an HPTE into the hash table corresponding to * the updated linux PTE. * * This must always be called with the pte lock held. */ void __update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) { /* * We don't need to worry about _PAGE_PRESENT here because we are * called with either mm->page_table_lock held or ptl lock held */ /* We only want HPTEs for linux PTEs that have _PAGE_ACCESSED set */ if (!pte_young(*ptep) || address >= TASK_SIZE) return; /* We have to test for regs NULL since init will get here first thing at boot */ if (!current->thread.regs) return; /* We also avoid filling the hash if not coming from a fault */ if (TRAP(current->thread.regs) != 0x300 && TRAP(current->thread.regs) != 0x400) return; hash_preload(vma->vm_mm, address); } /* * Initialize the hash table and patch the instructions in hashtable.S. */ void __init MMU_init_hw(void) { unsigned int n_hpteg, lg_n_hpteg; if (!mmu_has_feature(MMU_FTR_HPTE_TABLE)) return; if ( ppc_md.progress ) ppc_md.progress("hash:enter", 0x105); #define LG_HPTEG_SIZE 6 /* 64 bytes per HPTEG */ #define SDR1_LOW_BITS ((n_hpteg - 1) >> 10) #define MIN_N_HPTEG 1024 /* min 64kB hash table */ /* * Allow 1 HPTE (1/8 HPTEG) for each page of memory. * This is less than the recommended amount, but then * Linux ain't AIX. */ n_hpteg = total_memory / (PAGE_SIZE * 8); if (n_hpteg < MIN_N_HPTEG) n_hpteg = MIN_N_HPTEG; lg_n_hpteg = __ilog2(n_hpteg); if (n_hpteg & (n_hpteg - 1)) { ++lg_n_hpteg; /* round up if not power of 2 */ n_hpteg = 1 << lg_n_hpteg; } Hash_size = n_hpteg << LG_HPTEG_SIZE; /* * Find some memory for the hash table. */ if ( ppc_md.progress ) ppc_md.progress("hash:find piece", 0x322); Hash = memblock_alloc(Hash_size, Hash_size); if (!Hash) panic("%s: Failed to allocate %lu bytes align=0x%lx\n", __func__, Hash_size, Hash_size); _SDR1 = __pa(Hash) | SDR1_LOW_BITS; pr_info("Total memory = %lldMB; using %ldkB for hash table\n", (unsigned long long)(total_memory >> 20), Hash_size >> 10); Hash_mask = n_hpteg - 1; hash_mb2 = hash_mb = 32 - LG_HPTEG_SIZE - lg_n_hpteg; if (lg_n_hpteg > 16) hash_mb2 = 16 - LG_HPTEG_SIZE; } void __init MMU_init_hw_patch(void) { unsigned int hmask = Hash_mask >> (16 - LG_HPTEG_SIZE); unsigned int hash = (unsigned int)Hash - PAGE_OFFSET; if (!mmu_has_feature(MMU_FTR_HPTE_TABLE)) return; if (ppc_md.progress) ppc_md.progress("hash:patch", 0x345); if (ppc_md.progress) ppc_md.progress("hash:done", 0x205); /* WARNING: Make sure nothing can trigger a KASAN check past this point */ /* * Patch up the instructions in hashtable.S:create_hpte */ modify_instruction_site(&patch__hash_page_A0, 0xffff, hash >> 16); modify_instruction_site(&patch__hash_page_A1, 0x7c0, hash_mb << 6); modify_instruction_site(&patch__hash_page_A2, 0x7c0, hash_mb2 << 6); modify_instruction_site(&patch__hash_page_B, 0xffff, hmask); modify_instruction_site(&patch__hash_page_C, 0xffff, hmask); /* * Patch up the instructions in hashtable.S:flush_hash_page */ modify_instruction_site(&patch__flush_hash_A0, 0xffff, hash >> 16); modify_instruction_site(&patch__flush_hash_A1, 0x7c0, hash_mb << 6); modify_instruction_site(&patch__flush_hash_A2, 0x7c0, hash_mb2 << 6); modify_instruction_site(&patch__flush_hash_B, 0xffff, hmask); } void setup_initial_memory_limit(phys_addr_t first_memblock_base, phys_addr_t first_memblock_size) { /* We don't currently support the first MEMBLOCK not mapping 0 * physical on those processors */ BUG_ON(first_memblock_base != 0); memblock_set_current_limit(min_t(u64, first_memblock_size, SZ_256M)); } void __init print_system_hash_info(void) { pr_info("Hash_size = 0x%lx\n", Hash_size); if (Hash_mask) pr_info("Hash_mask = 0x%lx\n", Hash_mask); } void __init early_init_mmu(void) { }
linux-master
arch/powerpc/mm/book3s32/mmu.c
// SPDX-License-Identifier: GPL-2.0-or-later #include <asm/kup.h> #include <asm/smp.h> void setup_kuap(bool disabled) { if (!disabled) { update_user_segments(mfsr(0) | SR_KS); isync(); /* Context sync required after mtsr() */ init_mm.context.sr0 |= SR_KS; current->thread.sr0 |= SR_KS; } if (smp_processor_id() != boot_cpuid) return; if (disabled) cur_cpu_spec->mmu_features &= ~MMU_FTR_KUAP; else pr_info("Activating Kernel Userspace Access Protection\n"); }
linux-master
arch/powerpc/mm/book3s32/kuap.c
// SPDX-License-Identifier: GPL-2.0 #define DISABLE_BRANCH_PROFILING #include <linux/kasan.h> #include <linux/memblock.h> #include <linux/hugetlb.h> static int __init kasan_init_shadow_8M(unsigned long k_start, unsigned long k_end, void *block) { pmd_t *pmd = pmd_off_k(k_start); unsigned long k_cur, k_next; for (k_cur = k_start; k_cur != k_end; k_cur = k_next, pmd += 2, block += SZ_8M) { pte_basic_t *new; k_next = pgd_addr_end(k_cur, k_end); k_next = pgd_addr_end(k_next, k_end); if ((void *)pmd_page_vaddr(*pmd) != kasan_early_shadow_pte) continue; new = memblock_alloc(sizeof(pte_basic_t), SZ_4K); if (!new) return -ENOMEM; *new = pte_val(pte_mkhuge(pfn_pte(PHYS_PFN(__pa(block)), PAGE_KERNEL))); hugepd_populate_kernel((hugepd_t *)pmd, (pte_t *)new, PAGE_SHIFT_8M); hugepd_populate_kernel((hugepd_t *)pmd + 1, (pte_t *)new, PAGE_SHIFT_8M); } return 0; } int __init kasan_init_region(void *start, size_t size) { unsigned long k_start = (unsigned long)kasan_mem_to_shadow(start); unsigned long k_end = (unsigned long)kasan_mem_to_shadow(start + size); unsigned long k_cur; int ret; void *block; block = memblock_alloc(k_end - k_start, SZ_8M); if (!block) return -ENOMEM; if (IS_ALIGNED(k_start, SZ_8M)) { kasan_init_shadow_8M(k_start, ALIGN_DOWN(k_end, SZ_8M), block); k_cur = ALIGN_DOWN(k_end, SZ_8M); if (k_cur == k_end) goto finish; } else { k_cur = k_start; } ret = kasan_init_shadow_page_tables(k_start, k_end); if (ret) return ret; for (; k_cur < k_end; k_cur += PAGE_SIZE) { pmd_t *pmd = pmd_off_k(k_cur); void *va = block + k_cur - k_start; pte_t pte = pfn_pte(PHYS_PFN(__pa(va)), PAGE_KERNEL); if (k_cur < ALIGN_DOWN(k_end, SZ_512K)) pte = pte_mkhuge(pte); __set_pte_at(&init_mm, k_cur, pte_offset_kernel(pmd, k_cur), pte, 0); } finish: flush_tlb_kernel_range(k_start, k_end); return 0; }
linux-master
arch/powerpc/mm/kasan/8xx.c
// SPDX-License-Identifier: GPL-2.0 /* * KASAN for 64-bit Book3e powerpc * * Copyright 2022, Christophe Leroy, CS GROUP France */ #define DISABLE_BRANCH_PROFILING #include <linux/kasan.h> #include <linux/printk.h> #include <linux/memblock.h> #include <linux/set_memory.h> #include <asm/pgalloc.h> static inline bool kasan_pud_table(p4d_t p4d) { return p4d_page(p4d) == virt_to_page(lm_alias(kasan_early_shadow_pud)); } static inline bool kasan_pmd_table(pud_t pud) { return pud_page(pud) == virt_to_page(lm_alias(kasan_early_shadow_pmd)); } static inline bool kasan_pte_table(pmd_t pmd) { return pmd_page(pmd) == virt_to_page(lm_alias(kasan_early_shadow_pte)); } static int __init kasan_map_kernel_page(unsigned long ea, unsigned long pa, pgprot_t prot) { pgd_t *pgdp; p4d_t *p4dp; pud_t *pudp; pmd_t *pmdp; pte_t *ptep; pgdp = pgd_offset_k(ea); p4dp = p4d_offset(pgdp, ea); if (kasan_pud_table(*p4dp)) { pudp = memblock_alloc(PUD_TABLE_SIZE, PUD_TABLE_SIZE); memcpy(pudp, kasan_early_shadow_pud, PUD_TABLE_SIZE); p4d_populate(&init_mm, p4dp, pudp); } pudp = pud_offset(p4dp, ea); if (kasan_pmd_table(*pudp)) { pmdp = memblock_alloc(PMD_TABLE_SIZE, PMD_TABLE_SIZE); memcpy(pmdp, kasan_early_shadow_pmd, PMD_TABLE_SIZE); pud_populate(&init_mm, pudp, pmdp); } pmdp = pmd_offset(pudp, ea); if (kasan_pte_table(*pmdp)) { ptep = memblock_alloc(PTE_TABLE_SIZE, PTE_TABLE_SIZE); memcpy(ptep, kasan_early_shadow_pte, PTE_TABLE_SIZE); pmd_populate_kernel(&init_mm, pmdp, ptep); } ptep = pte_offset_kernel(pmdp, ea); __set_pte_at(&init_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT, prot), 0); return 0; } static void __init kasan_init_phys_region(void *start, void *end) { unsigned long k_start, k_end, k_cur; void *va; if (start >= end) return; k_start = ALIGN_DOWN((unsigned long)kasan_mem_to_shadow(start), PAGE_SIZE); k_end = ALIGN((unsigned long)kasan_mem_to_shadow(end), PAGE_SIZE); va = memblock_alloc(k_end - k_start, PAGE_SIZE); for (k_cur = k_start; k_cur < k_end; k_cur += PAGE_SIZE, va += PAGE_SIZE) kasan_map_kernel_page(k_cur, __pa(va), PAGE_KERNEL); } void __init kasan_early_init(void) { int i; unsigned long addr; pgd_t *pgd = pgd_offset_k(KASAN_SHADOW_START); pte_t zero_pte = pfn_pte(virt_to_pfn(kasan_early_shadow_page), PAGE_KERNEL); BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_START, PGDIR_SIZE)); BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_END, PGDIR_SIZE)); for (i = 0; i < PTRS_PER_PTE; i++) __set_pte_at(&init_mm, (unsigned long)kasan_early_shadow_page, &kasan_early_shadow_pte[i], zero_pte, 0); for (i = 0; i < PTRS_PER_PMD; i++) pmd_populate_kernel(&init_mm, &kasan_early_shadow_pmd[i], kasan_early_shadow_pte); for (i = 0; i < PTRS_PER_PUD; i++) pud_populate(&init_mm, &kasan_early_shadow_pud[i], kasan_early_shadow_pmd); for (addr = KASAN_SHADOW_START; addr != KASAN_SHADOW_END; addr += PGDIR_SIZE) p4d_populate(&init_mm, p4d_offset(pgd++, addr), kasan_early_shadow_pud); } void __init kasan_init(void) { phys_addr_t start, end; u64 i; pte_t zero_pte = pfn_pte(virt_to_pfn(kasan_early_shadow_page), PAGE_KERNEL_RO); for_each_mem_range(i, &start, &end) kasan_init_phys_region((void *)start, (void *)end); if (IS_ENABLED(CONFIG_KASAN_VMALLOC)) kasan_remove_zero_shadow((void *)VMALLOC_START, VMALLOC_SIZE); for (i = 0; i < PTRS_PER_PTE; i++) __set_pte_at(&init_mm, (unsigned long)kasan_early_shadow_page, &kasan_early_shadow_pte[i], zero_pte, 0); flush_tlb_kernel_range(KASAN_SHADOW_START, KASAN_SHADOW_END); memset(kasan_early_shadow_page, 0, PAGE_SIZE); /* Enable error messages */ init_task.kasan_depth = 0; pr_info("KASAN init done\n"); } void __init kasan_late_init(void) { }
linux-master
arch/powerpc/mm/kasan/init_book3e_64.c
// SPDX-License-Identifier: GPL-2.0 /* * KASAN for 64-bit Book3S powerpc * * Copyright 2019-2022, Daniel Axtens, IBM Corporation. */ /* * ppc64 turns on virtual memory late in boot, after calling into generic code * like the device-tree parser, so it uses this in conjunction with a hook in * outline mode to avoid invalid access early in boot. */ #define DISABLE_BRANCH_PROFILING #include <linux/kasan.h> #include <linux/printk.h> #include <linux/sched/task.h> #include <linux/memblock.h> #include <asm/pgalloc.h> DEFINE_STATIC_KEY_FALSE(powerpc_kasan_enabled_key); static void __init kasan_init_phys_region(void *start, void *end) { unsigned long k_start, k_end, k_cur; void *va; if (start >= end) return; k_start = ALIGN_DOWN((unsigned long)kasan_mem_to_shadow(start), PAGE_SIZE); k_end = ALIGN((unsigned long)kasan_mem_to_shadow(end), PAGE_SIZE); va = memblock_alloc(k_end - k_start, PAGE_SIZE); for (k_cur = k_start; k_cur < k_end; k_cur += PAGE_SIZE, va += PAGE_SIZE) map_kernel_page(k_cur, __pa(va), PAGE_KERNEL); } void __init kasan_init(void) { /* * We want to do the following things: * 1) Map real memory into the shadow for all physical memblocks * This takes us from c000... to c008... * 2) Leave a hole over the shadow of vmalloc space. KASAN_VMALLOC * will manage this for us. * This takes us from c008... to c00a... * 3) Map the 'early shadow'/zero page over iomap and vmemmap space. * This takes us up to where we start at c00e... */ void *k_start = kasan_mem_to_shadow((void *)RADIX_VMALLOC_END); void *k_end = kasan_mem_to_shadow((void *)RADIX_VMEMMAP_END); phys_addr_t start, end; u64 i; pte_t zero_pte = pfn_pte(virt_to_pfn(kasan_early_shadow_page), PAGE_KERNEL); if (!early_radix_enabled()) { pr_warn("KASAN not enabled as it requires radix!"); return; } for_each_mem_range(i, &start, &end) kasan_init_phys_region((void *)start, (void *)end); for (i = 0; i < PTRS_PER_PTE; i++) __set_pte_at(&init_mm, (unsigned long)kasan_early_shadow_page, &kasan_early_shadow_pte[i], zero_pte, 0); for (i = 0; i < PTRS_PER_PMD; i++) pmd_populate_kernel(&init_mm, &kasan_early_shadow_pmd[i], kasan_early_shadow_pte); for (i = 0; i < PTRS_PER_PUD; i++) pud_populate(&init_mm, &kasan_early_shadow_pud[i], kasan_early_shadow_pmd); /* map the early shadow over the iomap and vmemmap space */ kasan_populate_early_shadow(k_start, k_end); /* mark early shadow region as RO and wipe it */ zero_pte = pfn_pte(virt_to_pfn(kasan_early_shadow_page), PAGE_KERNEL_RO); for (i = 0; i < PTRS_PER_PTE; i++) __set_pte_at(&init_mm, (unsigned long)kasan_early_shadow_page, &kasan_early_shadow_pte[i], zero_pte, 0); /* * clear_page relies on some cache info that hasn't been set up yet. * It ends up looping ~forever and blows up other data. * Use memset instead. */ memset(kasan_early_shadow_page, 0, PAGE_SIZE); static_branch_inc(&powerpc_kasan_enabled_key); /* Enable error messages */ init_task.kasan_depth = 0; pr_info("KASAN init done\n"); } void __init kasan_early_init(void) { } void __init kasan_late_init(void) { }
linux-master
arch/powerpc/mm/kasan/init_book3s_64.c
// SPDX-License-Identifier: GPL-2.0 #define DISABLE_BRANCH_PROFILING #include <linux/kasan.h> #include <linux/printk.h> #include <linux/memblock.h> #include <linux/sched/task.h> #include <asm/pgalloc.h> #include <asm/code-patching.h> #include <mm/mmu_decl.h> static pgprot_t __init kasan_prot_ro(void) { if (early_mmu_has_feature(MMU_FTR_HPTE_TABLE)) return PAGE_READONLY; return PAGE_KERNEL_RO; } static void __init kasan_populate_pte(pte_t *ptep, pgprot_t prot) { unsigned long va = (unsigned long)kasan_early_shadow_page; phys_addr_t pa = __pa(kasan_early_shadow_page); int i; for (i = 0; i < PTRS_PER_PTE; i++, ptep++) __set_pte_at(&init_mm, va, ptep, pfn_pte(PHYS_PFN(pa), prot), 1); } int __init kasan_init_shadow_page_tables(unsigned long k_start, unsigned long k_end) { pmd_t *pmd; unsigned long k_cur, k_next; pmd = pmd_off_k(k_start); for (k_cur = k_start; k_cur != k_end; k_cur = k_next, pmd++) { pte_t *new; k_next = pgd_addr_end(k_cur, k_end); if ((void *)pmd_page_vaddr(*pmd) != kasan_early_shadow_pte) continue; new = memblock_alloc(PTE_FRAG_SIZE, PTE_FRAG_SIZE); if (!new) return -ENOMEM; kasan_populate_pte(new, PAGE_KERNEL); pmd_populate_kernel(&init_mm, pmd, new); } return 0; } int __init __weak kasan_init_region(void *start, size_t size) { unsigned long k_start = (unsigned long)kasan_mem_to_shadow(start); unsigned long k_end = (unsigned long)kasan_mem_to_shadow(start + size); unsigned long k_cur; int ret; void *block; ret = kasan_init_shadow_page_tables(k_start, k_end); if (ret) return ret; block = memblock_alloc(k_end - k_start, PAGE_SIZE); if (!block) return -ENOMEM; for (k_cur = k_start & PAGE_MASK; k_cur < k_end; k_cur += PAGE_SIZE) { pmd_t *pmd = pmd_off_k(k_cur); void *va = block + k_cur - k_start; pte_t pte = pfn_pte(PHYS_PFN(__pa(va)), PAGE_KERNEL); __set_pte_at(&init_mm, k_cur, pte_offset_kernel(pmd, k_cur), pte, 0); } flush_tlb_kernel_range(k_start, k_end); return 0; } void __init kasan_update_early_region(unsigned long k_start, unsigned long k_end, pte_t pte) { unsigned long k_cur; for (k_cur = k_start; k_cur != k_end; k_cur += PAGE_SIZE) { pmd_t *pmd = pmd_off_k(k_cur); pte_t *ptep = pte_offset_kernel(pmd, k_cur); if (pte_page(*ptep) != virt_to_page(lm_alias(kasan_early_shadow_page))) continue; __set_pte_at(&init_mm, k_cur, ptep, pte, 0); } flush_tlb_kernel_range(k_start, k_end); } static void __init kasan_remap_early_shadow_ro(void) { pgprot_t prot = kasan_prot_ro(); phys_addr_t pa = __pa(kasan_early_shadow_page); kasan_populate_pte(kasan_early_shadow_pte, prot); kasan_update_early_region(KASAN_SHADOW_START, KASAN_SHADOW_END, pfn_pte(PHYS_PFN(pa), prot)); } static void __init kasan_unmap_early_shadow_vmalloc(void) { unsigned long k_start = (unsigned long)kasan_mem_to_shadow((void *)VMALLOC_START); unsigned long k_end = (unsigned long)kasan_mem_to_shadow((void *)VMALLOC_END); kasan_update_early_region(k_start, k_end, __pte(0)); #ifdef MODULES_VADDR k_start = (unsigned long)kasan_mem_to_shadow((void *)MODULES_VADDR); k_end = (unsigned long)kasan_mem_to_shadow((void *)MODULES_END); kasan_update_early_region(k_start, k_end, __pte(0)); #endif } void __init kasan_mmu_init(void) { int ret; if (early_mmu_has_feature(MMU_FTR_HPTE_TABLE)) { ret = kasan_init_shadow_page_tables(KASAN_SHADOW_START, KASAN_SHADOW_END); if (ret) panic("kasan: kasan_init_shadow_page_tables() failed"); } } void __init kasan_init(void) { phys_addr_t base, end; u64 i; int ret; for_each_mem_range(i, &base, &end) { phys_addr_t top = min(end, total_lowmem); if (base >= top) continue; ret = kasan_init_region(__va(base), top - base); if (ret) panic("kasan: kasan_init_region() failed"); } if (IS_ENABLED(CONFIG_KASAN_VMALLOC)) { ret = kasan_init_shadow_page_tables(KASAN_SHADOW_START, KASAN_SHADOW_END); if (ret) panic("kasan: kasan_init_shadow_page_tables() failed"); } kasan_remap_early_shadow_ro(); clear_page(kasan_early_shadow_page); /* At this point kasan is fully initialized. Enable error messages */ init_task.kasan_depth = 0; pr_info("KASAN init done\n"); } void __init kasan_late_init(void) { if (IS_ENABLED(CONFIG_KASAN_VMALLOC)) kasan_unmap_early_shadow_vmalloc(); } void __init kasan_early_init(void) { unsigned long addr = KASAN_SHADOW_START; unsigned long end = KASAN_SHADOW_END; unsigned long next; pmd_t *pmd = pmd_off_k(addr); BUILD_BUG_ON(KASAN_SHADOW_START & ~PGDIR_MASK); kasan_populate_pte(kasan_early_shadow_pte, PAGE_KERNEL); do { next = pgd_addr_end(addr, end); pmd_populate_kernel(&init_mm, pmd, kasan_early_shadow_pte); } while (pmd++, addr = next, addr != end); }
linux-master
arch/powerpc/mm/kasan/init_32.c
// SPDX-License-Identifier: GPL-2.0 #define DISABLE_BRANCH_PROFILING #include <linux/kasan.h> #include <linux/memblock.h> #include <mm/mmu_decl.h> int __init kasan_init_region(void *start, size_t size) { unsigned long k_start = (unsigned long)kasan_mem_to_shadow(start); unsigned long k_end = (unsigned long)kasan_mem_to_shadow(start + size); unsigned long k_nobat = k_start; unsigned long k_cur; phys_addr_t phys; int ret; while (k_nobat < k_end) { unsigned int k_size = bat_block_size(k_nobat, k_end); int idx = find_free_bat(); if (idx == -1) break; if (k_size < SZ_128K) break; phys = memblock_phys_alloc_range(k_size, k_size, 0, MEMBLOCK_ALLOC_ANYWHERE); if (!phys) break; setbat(idx, k_nobat, phys, k_size, PAGE_KERNEL); k_nobat += k_size; } if (k_nobat != k_start) update_bats(); if (k_nobat < k_end) { phys = memblock_phys_alloc_range(k_end - k_nobat, PAGE_SIZE, 0, MEMBLOCK_ALLOC_ANYWHERE); if (!phys) return -ENOMEM; } ret = kasan_init_shadow_page_tables(k_start, k_end); if (ret) return ret; kasan_update_early_region(k_start, k_nobat, __pte(0)); for (k_cur = k_nobat; k_cur < k_end; k_cur += PAGE_SIZE) { pmd_t *pmd = pmd_off_k(k_cur); pte_t pte = pfn_pte(PHYS_PFN(phys + k_cur - k_nobat), PAGE_KERNEL); __set_pte_at(&init_mm, k_cur, pte_offset_kernel(pmd, k_cur), pte, 0); } flush_tlb_kernel_range(k_start, k_end); memset(kasan_mem_to_shadow(start), 0, k_end - k_start); return 0; }
linux-master
arch/powerpc/mm/kasan/book3s_32.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * This file is for defining trace points and trace related helpers. */ #ifdef CONFIG_TRANSPARENT_HUGEPAGE #include <trace/events/thp.h> #endif
linux-master
arch/powerpc/mm/book3s64/trace.c
// SPDX-License-Identifier: GPL-2.0 /* * PPC64 Huge TLB Page Support for hash based MMUs (POWER4 and later) * * Copyright (C) 2003 David Gibson, IBM Corporation. * * Based on the IA-32 version: * Copyright (C) 2002, Rohit Seth <[email protected]> */ #include <linux/mm.h> #include <linux/hugetlb.h> #include <asm/cacheflush.h> #include <asm/machdep.h> unsigned int hpage_shift; EXPORT_SYMBOL(hpage_shift); #ifdef CONFIG_PPC_64S_HASH_MMU int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid, pte_t *ptep, unsigned long trap, unsigned long flags, int ssize, unsigned int shift, unsigned int mmu_psize) { real_pte_t rpte; unsigned long vpn; unsigned long old_pte, new_pte; unsigned long rflags, pa; long slot, offset; BUG_ON(shift != mmu_psize_defs[mmu_psize].shift); /* Search the Linux page table for a match with va */ vpn = hpt_vpn(ea, vsid, ssize); /* * At this point, we have a pte (old_pte) which can be used to build * or update an HPTE. There are 2 cases: * * 1. There is a valid (present) pte with no associated HPTE (this is * the most common case) * 2. There is a valid (present) pte with an associated HPTE. The * current values of the pp bits in the HPTE prevent access * because we are doing software DIRTY bit management and the * page is currently not DIRTY. */ do { old_pte = pte_val(*ptep); /* If PTE busy, retry the access */ if (unlikely(old_pte & H_PAGE_BUSY)) return 0; /* If PTE permissions don't match, take page fault */ if (unlikely(!check_pte_access(access, old_pte))) return 1; /* * Try to lock the PTE, add ACCESSED and DIRTY if it was * a write access */ new_pte = old_pte | H_PAGE_BUSY | _PAGE_ACCESSED; if (access & _PAGE_WRITE) new_pte |= _PAGE_DIRTY; } while(!pte_xchg(ptep, __pte(old_pte), __pte(new_pte))); /* Make sure this is a hugetlb entry */ if (old_pte & (H_PAGE_THP_HUGE | _PAGE_DEVMAP)) return 0; rflags = htab_convert_pte_flags(new_pte, flags); if (unlikely(mmu_psize == MMU_PAGE_16G)) offset = PTRS_PER_PUD; else offset = PTRS_PER_PMD; rpte = __real_pte(__pte(old_pte), ptep, offset); if (!cpu_has_feature(CPU_FTR_COHERENT_ICACHE)) /* * No CPU has hugepages but lacks no execute, so we * don't need to worry about that case */ rflags = hash_page_do_lazy_icache(rflags, __pte(old_pte), trap); /* Check if pte already has an hpte (case 2) */ if (unlikely(old_pte & H_PAGE_HASHPTE)) { /* There MIGHT be an HPTE for this pte */ unsigned long gslot; gslot = pte_get_hash_gslot(vpn, shift, ssize, rpte, 0); if (mmu_hash_ops.hpte_updatepp(gslot, rflags, vpn, mmu_psize, mmu_psize, ssize, flags) == -1) old_pte &= ~_PAGE_HPTEFLAGS; } if (likely(!(old_pte & H_PAGE_HASHPTE))) { unsigned long hash = hpt_hash(vpn, shift, ssize); pa = pte_pfn(__pte(old_pte)) << PAGE_SHIFT; /* clear HPTE slot informations in new PTE */ new_pte = (new_pte & ~_PAGE_HPTEFLAGS) | H_PAGE_HASHPTE; slot = hpte_insert_repeating(hash, vpn, pa, rflags, 0, mmu_psize, ssize); /* * Hypervisor failure. Restore old pte and return -1 * similar to __hash_page_* */ if (unlikely(slot == -2)) { *ptep = __pte(old_pte); hash_failure_debug(ea, access, vsid, trap, ssize, mmu_psize, mmu_psize, old_pte); return -1; } new_pte |= pte_set_hidx(ptep, rpte, 0, slot, offset); } /* * No need to use ldarx/stdcx here */ *ptep = __pte(new_pte & ~H_PAGE_BUSY); return 0; } #endif pte_t huge_ptep_modify_prot_start(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) { unsigned long pte_val; /* * Clear the _PAGE_PRESENT so that no hardware parallel update is * possible. Also keep the pte_present true so that we don't take * wrong fault. */ pte_val = pte_update(vma->vm_mm, addr, ptep, _PAGE_PRESENT, _PAGE_INVALID, 1); return __pte(pte_val); } void huge_ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep, pte_t old_pte, pte_t pte) { if (radix_enabled()) return radix__huge_ptep_modify_prot_commit(vma, addr, ptep, old_pte, pte); set_huge_pte_at(vma->vm_mm, addr, ptep, pte); } void __init hugetlbpage_init_defaultsize(void) { /* Set default large page size. Currently, we pick 16M or 1M * depending on what is available */ if (mmu_psize_defs[MMU_PAGE_16M].shift) hpage_shift = mmu_psize_defs[MMU_PAGE_16M].shift; else if (mmu_psize_defs[MMU_PAGE_1M].shift) hpage_shift = mmu_psize_defs[MMU_PAGE_1M].shift; else if (mmu_psize_defs[MMU_PAGE_2M].shift) hpage_shift = mmu_psize_defs[MMU_PAGE_2M].shift; }
linux-master
arch/powerpc/mm/book3s64/hugetlbpage.c
/* * Copyright IBM Corporation, 2015 * Author Aneesh Kumar K.V <[email protected]> * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU Lesser General Public License * as published by the Free Software Foundation. * * This program is distributed in the hope that it would be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. * */ #include <linux/mm.h> #include <asm/machdep.h> #include <asm/mmu.h> #include "internal.h" int __hash_page_4K(unsigned long ea, unsigned long access, unsigned long vsid, pte_t *ptep, unsigned long trap, unsigned long flags, int ssize, int subpg_prot) { real_pte_t rpte; unsigned long hpte_group; unsigned long rflags, pa; unsigned long old_pte, new_pte; unsigned long vpn, hash, slot; unsigned long shift = mmu_psize_defs[MMU_PAGE_4K].shift; /* * atomically mark the linux large page PTE busy and dirty */ do { pte_t pte = READ_ONCE(*ptep); old_pte = pte_val(pte); /* If PTE busy, retry the access */ if (unlikely(old_pte & H_PAGE_BUSY)) return 0; /* If PTE permissions don't match, take page fault */ if (unlikely(!check_pte_access(access, old_pte))) return 1; /* * Try to lock the PTE, add ACCESSED and DIRTY if it was * a write access. Since this is 4K insert of 64K page size * also add H_PAGE_COMBO */ new_pte = old_pte | H_PAGE_BUSY | _PAGE_ACCESSED; if (access & _PAGE_WRITE) new_pte |= _PAGE_DIRTY; } while (!pte_xchg(ptep, __pte(old_pte), __pte(new_pte))); /* * PP bits. _PAGE_USER is already PP bit 0x2, so we only * need to add in 0x1 if it's a read-only user page */ rflags = htab_convert_pte_flags(new_pte, flags); rpte = __real_pte(__pte(old_pte), ptep, PTRS_PER_PTE); if (cpu_has_feature(CPU_FTR_NOEXECUTE) && !cpu_has_feature(CPU_FTR_COHERENT_ICACHE)) rflags = hash_page_do_lazy_icache(rflags, __pte(old_pte), trap); vpn = hpt_vpn(ea, vsid, ssize); if (unlikely(old_pte & H_PAGE_HASHPTE)) { /* * There MIGHT be an HPTE for this pte */ unsigned long gslot = pte_get_hash_gslot(vpn, shift, ssize, rpte, 0); if (mmu_hash_ops.hpte_updatepp(gslot, rflags, vpn, MMU_PAGE_4K, MMU_PAGE_4K, ssize, flags) == -1) old_pte &= ~_PAGE_HPTEFLAGS; } if (likely(!(old_pte & H_PAGE_HASHPTE))) { pa = pte_pfn(__pte(old_pte)) << PAGE_SHIFT; hash = hpt_hash(vpn, shift, ssize); repeat: hpte_group = (hash & htab_hash_mask) * HPTES_PER_GROUP; /* Insert into the hash table, primary slot */ slot = mmu_hash_ops.hpte_insert(hpte_group, vpn, pa, rflags, 0, MMU_PAGE_4K, MMU_PAGE_4K, ssize); /* * Primary is full, try the secondary */ if (unlikely(slot == -1)) { hpte_group = (~hash & htab_hash_mask) * HPTES_PER_GROUP; slot = mmu_hash_ops.hpte_insert(hpte_group, vpn, pa, rflags, HPTE_V_SECONDARY, MMU_PAGE_4K, MMU_PAGE_4K, ssize); if (slot == -1) { if (mftb() & 0x1) hpte_group = (hash & htab_hash_mask) * HPTES_PER_GROUP; mmu_hash_ops.hpte_remove(hpte_group); /* * FIXME!! Should be try the group from which we removed ? */ goto repeat; } } /* * Hypervisor failure. Restore old pte and return -1 * similar to __hash_page_* */ if (unlikely(slot == -2)) { *ptep = __pte(old_pte); hash_failure_debug(ea, access, vsid, trap, ssize, MMU_PAGE_4K, MMU_PAGE_4K, old_pte); return -1; } new_pte = (new_pte & ~_PAGE_HPTEFLAGS) | H_PAGE_HASHPTE; new_pte |= pte_set_hidx(ptep, rpte, 0, slot, PTRS_PER_PTE); if (stress_hpt()) hpt_do_stress(ea, hpte_group); } *ptep = __pte(new_pte & ~H_PAGE_BUSY); return 0; }
linux-master
arch/powerpc/mm/book3s64/hash_4k.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * MMU context allocation for 64-bit kernels. * * Copyright (C) 2004 Anton Blanchard, IBM Corp. <[email protected]> */ #include <linux/sched.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/types.h> #include <linux/mm.h> #include <linux/pkeys.h> #include <linux/spinlock.h> #include <linux/idr.h> #include <linux/export.h> #include <linux/gfp.h> #include <linux/slab.h> #include <linux/cpu.h> #include <asm/mmu_context.h> #include <asm/pgalloc.h> #include "internal.h" static DEFINE_IDA(mmu_context_ida); static int alloc_context_id(int min_id, int max_id) { return ida_alloc_range(&mmu_context_ida, min_id, max_id, GFP_KERNEL); } #ifdef CONFIG_PPC_64S_HASH_MMU void __init hash__reserve_context_id(int id) { int result = ida_alloc_range(&mmu_context_ida, id, id, GFP_KERNEL); WARN(result != id, "mmu: Failed to reserve context id %d (rc %d)\n", id, result); } int hash__alloc_context_id(void) { unsigned long max; if (mmu_has_feature(MMU_FTR_68_BIT_VA)) max = MAX_USER_CONTEXT; else max = MAX_USER_CONTEXT_65BIT_VA; return alloc_context_id(MIN_USER_CONTEXT, max); } EXPORT_SYMBOL_GPL(hash__alloc_context_id); #endif #ifdef CONFIG_PPC_64S_HASH_MMU static int realloc_context_ids(mm_context_t *ctx) { int i, id; /* * id 0 (aka. ctx->id) is special, we always allocate a new one, even if * there wasn't one allocated previously (which happens in the exec * case where ctx is newly allocated). * * We have to be a bit careful here. We must keep the existing ids in * the array, so that we can test if they're non-zero to decide if we * need to allocate a new one. However in case of error we must free the * ids we've allocated but *not* any of the existing ones (or risk a * UAF). That's why we decrement i at the start of the error handling * loop, to skip the id that we just tested but couldn't reallocate. */ for (i = 0; i < ARRAY_SIZE(ctx->extended_id); i++) { if (i == 0 || ctx->extended_id[i]) { id = hash__alloc_context_id(); if (id < 0) goto error; ctx->extended_id[i] = id; } } /* The caller expects us to return id */ return ctx->id; error: for (i--; i >= 0; i--) { if (ctx->extended_id[i]) ida_free(&mmu_context_ida, ctx->extended_id[i]); } return id; } static int hash__init_new_context(struct mm_struct *mm) { int index; mm->context.hash_context = kmalloc(sizeof(struct hash_mm_context), GFP_KERNEL); if (!mm->context.hash_context) return -ENOMEM; /* * The old code would re-promote on fork, we don't do that when using * slices as it could cause problem promoting slices that have been * forced down to 4K. * * For book3s we have MMU_NO_CONTEXT set to be ~0. Hence check * explicitly against context.id == 0. This ensures that we properly * initialize context slice details for newly allocated mm's (which will * have id == 0) and don't alter context slice inherited via fork (which * will have id != 0). * * We should not be calling init_new_context() on init_mm. Hence a * check against 0 is OK. */ if (mm->context.id == 0) { memset(mm->context.hash_context, 0, sizeof(struct hash_mm_context)); slice_init_new_context_exec(mm); } else { /* This is fork. Copy hash_context details from current->mm */ memcpy(mm->context.hash_context, current->mm->context.hash_context, sizeof(struct hash_mm_context)); #ifdef CONFIG_PPC_SUBPAGE_PROT /* inherit subpage prot details if we have one. */ if (current->mm->context.hash_context->spt) { mm->context.hash_context->spt = kmalloc(sizeof(struct subpage_prot_table), GFP_KERNEL); if (!mm->context.hash_context->spt) { kfree(mm->context.hash_context); return -ENOMEM; } } #endif } index = realloc_context_ids(&mm->context); if (index < 0) { #ifdef CONFIG_PPC_SUBPAGE_PROT kfree(mm->context.hash_context->spt); #endif kfree(mm->context.hash_context); return index; } pkey_mm_init(mm); return index; } void hash__setup_new_exec(void) { slice_setup_new_exec(); slb_setup_new_exec(); } #else static inline int hash__init_new_context(struct mm_struct *mm) { BUILD_BUG(); return 0; } #endif static int radix__init_new_context(struct mm_struct *mm) { unsigned long rts_field; int index, max_id; max_id = (1 << mmu_pid_bits) - 1; index = alloc_context_id(mmu_base_pid, max_id); if (index < 0) return index; /* * set the process table entry, */ rts_field = radix__get_tree_size(); process_tb[index].prtb0 = cpu_to_be64(rts_field | __pa(mm->pgd) | RADIX_PGD_INDEX_SIZE); /* * Order the above store with subsequent update of the PID * register (at which point HW can start loading/caching * the entry) and the corresponding load by the MMU from * the L2 cache. */ asm volatile("ptesync;isync" : : : "memory"); #ifdef CONFIG_PPC_64S_HASH_MMU mm->context.hash_context = NULL; #endif return index; } int init_new_context(struct task_struct *tsk, struct mm_struct *mm) { int index; if (radix_enabled()) index = radix__init_new_context(mm); else index = hash__init_new_context(mm); if (index < 0) return index; mm->context.id = index; mm->context.pte_frag = NULL; mm->context.pmd_frag = NULL; #ifdef CONFIG_SPAPR_TCE_IOMMU mm_iommu_init(mm); #endif atomic_set(&mm->context.active_cpus, 0); atomic_set(&mm->context.copros, 0); return 0; } void __destroy_context(int context_id) { ida_free(&mmu_context_ida, context_id); } EXPORT_SYMBOL_GPL(__destroy_context); static void destroy_contexts(mm_context_t *ctx) { if (radix_enabled()) { ida_free(&mmu_context_ida, ctx->id); } else { #ifdef CONFIG_PPC_64S_HASH_MMU int index, context_id; for (index = 0; index < ARRAY_SIZE(ctx->extended_id); index++) { context_id = ctx->extended_id[index]; if (context_id) ida_free(&mmu_context_ida, context_id); } kfree(ctx->hash_context); #else BUILD_BUG(); // radix_enabled() should be constant true #endif } } static void pmd_frag_destroy(void *pmd_frag) { int count; struct ptdesc *ptdesc; ptdesc = virt_to_ptdesc(pmd_frag); /* drop all the pending references */ count = ((unsigned long)pmd_frag & ~PAGE_MASK) >> PMD_FRAG_SIZE_SHIFT; /* We allow PTE_FRAG_NR fragments from a PTE page */ if (atomic_sub_and_test(PMD_FRAG_NR - count, &ptdesc->pt_frag_refcount)) { pagetable_pmd_dtor(ptdesc); pagetable_free(ptdesc); } } static void destroy_pagetable_cache(struct mm_struct *mm) { void *frag; frag = mm->context.pte_frag; if (frag) pte_frag_destroy(frag); frag = mm->context.pmd_frag; if (frag) pmd_frag_destroy(frag); return; } void destroy_context(struct mm_struct *mm) { #ifdef CONFIG_SPAPR_TCE_IOMMU WARN_ON_ONCE(!list_empty(&mm->context.iommu_group_mem_list)); #endif /* * For tasks which were successfully initialized we end up calling * arch_exit_mmap() which clears the process table entry. And * arch_exit_mmap() is called before the required fullmm TLB flush * which does a RIC=2 flush. Hence for an initialized task, we do clear * any cached process table entries. * * The condition below handles the error case during task init. We have * set the process table entry early and if we fail a task * initialization, we need to ensure the process table entry is zeroed. * We need not worry about process table entry caches because the task * never ran with the PID value. */ if (radix_enabled()) process_tb[mm->context.id].prtb0 = 0; else subpage_prot_free(mm); destroy_contexts(&mm->context); mm->context.id = MMU_NO_CONTEXT; } void arch_exit_mmap(struct mm_struct *mm) { destroy_pagetable_cache(mm); if (radix_enabled()) { /* * Radix doesn't have a valid bit in the process table * entries. However we know that at least P9 implementation * will avoid caching an entry with an invalid RTS field, * and 0 is invalid. So this will do. * * This runs before the "fullmm" tlb flush in exit_mmap, * which does a RIC=2 tlbie to clear the process table * entry. See the "fullmm" comments in tlb-radix.c. * * No barrier required here after the store because * this process will do the invalidate, which starts with * ptesync. */ process_tb[mm->context.id].prtb0 = 0; } } #ifdef CONFIG_PPC_RADIX_MMU void radix__switch_mmu_context(struct mm_struct *prev, struct mm_struct *next) { mtspr(SPRN_PID, next->context.id); isync(); } #endif /** * cleanup_cpu_mmu_context - Clean up MMU details for this CPU (newly offlined) * * This clears the CPU from mm_cpumask for all processes, and then flushes the * local TLB to ensure TLB coherency in case the CPU is onlined again. * * KVM guest translations are not necessarily flushed here. If KVM started * using mm_cpumask or the Linux APIs which do, this would have to be resolved. */ #ifdef CONFIG_HOTPLUG_CPU void cleanup_cpu_mmu_context(void) { int cpu = smp_processor_id(); clear_tasks_mm_cpumask(cpu); tlbiel_all(); } #endif
linux-master
arch/powerpc/mm/book3s64/mmu_context.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright 2007-2008 Paul Mackerras, IBM Corp. */ #include <linux/errno.h> #include <linux/kernel.h> #include <linux/gfp.h> #include <linux/types.h> #include <linux/pagewalk.h> #include <linux/hugetlb.h> #include <linux/syscalls.h> #include <linux/pgtable.h> #include <linux/uaccess.h> /* * Free all pages allocated for subpage protection maps and pointers. * Also makes sure that the subpage_prot_table structure is * reinitialized for the next user. */ void subpage_prot_free(struct mm_struct *mm) { struct subpage_prot_table *spt = mm_ctx_subpage_prot(&mm->context); unsigned long i, j, addr; u32 **p; if (!spt) return; for (i = 0; i < 4; ++i) { if (spt->low_prot[i]) { free_page((unsigned long)spt->low_prot[i]); spt->low_prot[i] = NULL; } } addr = 0; for (i = 0; i < (TASK_SIZE_USER64 >> 43); ++i) { p = spt->protptrs[i]; if (!p) continue; spt->protptrs[i] = NULL; for (j = 0; j < SBP_L2_COUNT && addr < spt->maxaddr; ++j, addr += PAGE_SIZE) if (p[j]) free_page((unsigned long)p[j]); free_page((unsigned long)p); } spt->maxaddr = 0; kfree(spt); } static void hpte_flush_range(struct mm_struct *mm, unsigned long addr, int npages) { pgd_t *pgd; p4d_t *p4d; pud_t *pud; pmd_t *pmd; pte_t *pte; spinlock_t *ptl; pgd = pgd_offset(mm, addr); p4d = p4d_offset(pgd, addr); if (p4d_none(*p4d)) return; pud = pud_offset(p4d, addr); if (pud_none(*pud)) return; pmd = pmd_offset(pud, addr); if (pmd_none(*pmd)) return; pte = pte_offset_map_lock(mm, pmd, addr, &ptl); if (!pte) return; arch_enter_lazy_mmu_mode(); for (; npages > 0; --npages) { pte_update(mm, addr, pte, 0, 0, 0); addr += PAGE_SIZE; ++pte; } arch_leave_lazy_mmu_mode(); pte_unmap_unlock(pte - 1, ptl); } /* * Clear the subpage protection map for an address range, allowing * all accesses that are allowed by the pte permissions. */ static void subpage_prot_clear(unsigned long addr, unsigned long len) { struct mm_struct *mm = current->mm; struct subpage_prot_table *spt; u32 **spm, *spp; unsigned long i; size_t nw; unsigned long next, limit; mmap_write_lock(mm); spt = mm_ctx_subpage_prot(&mm->context); if (!spt) goto err_out; limit = addr + len; if (limit > spt->maxaddr) limit = spt->maxaddr; for (; addr < limit; addr = next) { next = pmd_addr_end(addr, limit); if (addr < 0x100000000UL) { spm = spt->low_prot; } else { spm = spt->protptrs[addr >> SBP_L3_SHIFT]; if (!spm) continue; } spp = spm[(addr >> SBP_L2_SHIFT) & (SBP_L2_COUNT - 1)]; if (!spp) continue; spp += (addr >> PAGE_SHIFT) & (SBP_L1_COUNT - 1); i = (addr >> PAGE_SHIFT) & (PTRS_PER_PTE - 1); nw = PTRS_PER_PTE - i; if (addr + (nw << PAGE_SHIFT) > next) nw = (next - addr) >> PAGE_SHIFT; memset(spp, 0, nw * sizeof(u32)); /* now flush any existing HPTEs for the range */ hpte_flush_range(mm, addr, nw); } err_out: mmap_write_unlock(mm); } #ifdef CONFIG_TRANSPARENT_HUGEPAGE static int subpage_walk_pmd_entry(pmd_t *pmd, unsigned long addr, unsigned long end, struct mm_walk *walk) { struct vm_area_struct *vma = walk->vma; split_huge_pmd(vma, pmd, addr); return 0; } static const struct mm_walk_ops subpage_walk_ops = { .pmd_entry = subpage_walk_pmd_entry, .walk_lock = PGWALK_WRLOCK_VERIFY, }; static void subpage_mark_vma_nohuge(struct mm_struct *mm, unsigned long addr, unsigned long len) { struct vm_area_struct *vma; VMA_ITERATOR(vmi, mm, addr); /* * We don't try too hard, we just mark all the vma in that range * VM_NOHUGEPAGE and split them. */ for_each_vma_range(vmi, vma, addr + len) { vm_flags_set(vma, VM_NOHUGEPAGE); walk_page_vma(vma, &subpage_walk_ops, NULL); } } #else static void subpage_mark_vma_nohuge(struct mm_struct *mm, unsigned long addr, unsigned long len) { return; } #endif /* * Copy in a subpage protection map for an address range. * The map has 2 bits per 4k subpage, so 32 bits per 64k page. * Each 2-bit field is 0 to allow any access, 1 to prevent writes, * 2 or 3 to prevent all accesses. * Note that the normal page protections also apply; the subpage * protection mechanism is an additional constraint, so putting 0 * in a 2-bit field won't allow writes to a page that is otherwise * write-protected. */ SYSCALL_DEFINE3(subpage_prot, unsigned long, addr, unsigned long, len, u32 __user *, map) { struct mm_struct *mm = current->mm; struct subpage_prot_table *spt; u32 **spm, *spp; unsigned long i; size_t nw; unsigned long next, limit; int err; if (radix_enabled()) return -ENOENT; /* Check parameters */ if ((addr & ~PAGE_MASK) || (len & ~PAGE_MASK) || addr >= mm->task_size || len >= mm->task_size || addr + len > mm->task_size) return -EINVAL; if (is_hugepage_only_range(mm, addr, len)) return -EINVAL; if (!map) { /* Clear out the protection map for the address range */ subpage_prot_clear(addr, len); return 0; } if (!access_ok(map, (len >> PAGE_SHIFT) * sizeof(u32))) return -EFAULT; mmap_write_lock(mm); spt = mm_ctx_subpage_prot(&mm->context); if (!spt) { /* * Allocate subpage prot table if not already done. * Do this with mmap_lock held */ spt = kzalloc(sizeof(struct subpage_prot_table), GFP_KERNEL); if (!spt) { err = -ENOMEM; goto out; } mm->context.hash_context->spt = spt; } subpage_mark_vma_nohuge(mm, addr, len); for (limit = addr + len; addr < limit; addr = next) { next = pmd_addr_end(addr, limit); err = -ENOMEM; if (addr < 0x100000000UL) { spm = spt->low_prot; } else { spm = spt->protptrs[addr >> SBP_L3_SHIFT]; if (!spm) { spm = (u32 **)get_zeroed_page(GFP_KERNEL); if (!spm) goto out; spt->protptrs[addr >> SBP_L3_SHIFT] = spm; } } spm += (addr >> SBP_L2_SHIFT) & (SBP_L2_COUNT - 1); spp = *spm; if (!spp) { spp = (u32 *)get_zeroed_page(GFP_KERNEL); if (!spp) goto out; *spm = spp; } spp += (addr >> PAGE_SHIFT) & (SBP_L1_COUNT - 1); local_irq_disable(); demote_segment_4k(mm, addr); local_irq_enable(); i = (addr >> PAGE_SHIFT) & (PTRS_PER_PTE - 1); nw = PTRS_PER_PTE - i; if (addr + (nw << PAGE_SHIFT) > next) nw = (next - addr) >> PAGE_SHIFT; mmap_write_unlock(mm); if (__copy_from_user(spp, map, nw * sizeof(u32))) return -EFAULT; map += nw; mmap_write_lock(mm); /* now flush any existing HPTEs for the range */ hpte_flush_range(mm, addr, nw); } if (limit > spt->maxaddr) spt->maxaddr = limit; err = 0; out: mmap_write_unlock(mm); return err; }
linux-master
arch/powerpc/mm/book3s64/subpage_prot.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * native hashtable management. * * SMP scalability work: * Copyright (C) 2001 Anton Blanchard <[email protected]>, IBM */ #undef DEBUG_LOW #include <linux/spinlock.h> #include <linux/bitops.h> #include <linux/of.h> #include <linux/processor.h> #include <linux/threads.h> #include <linux/smp.h> #include <linux/pgtable.h> #include <asm/machdep.h> #include <asm/mmu.h> #include <asm/mmu_context.h> #include <asm/trace.h> #include <asm/tlb.h> #include <asm/cputable.h> #include <asm/udbg.h> #include <asm/kexec.h> #include <asm/ppc-opcode.h> #include <asm/feature-fixups.h> #include <misc/cxl-base.h> #ifdef DEBUG_LOW #define DBG_LOW(fmt...) udbg_printf(fmt) #else #define DBG_LOW(fmt...) #endif #ifdef __BIG_ENDIAN__ #define HPTE_LOCK_BIT 3 #else #define HPTE_LOCK_BIT (56+3) #endif static DEFINE_RAW_SPINLOCK(native_tlbie_lock); #ifdef CONFIG_LOCKDEP static struct lockdep_map hpte_lock_map = STATIC_LOCKDEP_MAP_INIT("hpte_lock", &hpte_lock_map); static void acquire_hpte_lock(void) { lock_map_acquire(&hpte_lock_map); } static void release_hpte_lock(void) { lock_map_release(&hpte_lock_map); } #else static void acquire_hpte_lock(void) { } static void release_hpte_lock(void) { } #endif static inline unsigned long ___tlbie(unsigned long vpn, int psize, int apsize, int ssize) { unsigned long va; unsigned int penc; unsigned long sllp; /* * We need 14 to 65 bits of va for a tlibe of 4K page * With vpn we ignore the lower VPN_SHIFT bits already. * And top two bits are already ignored because we can * only accomodate 76 bits in a 64 bit vpn with a VPN_SHIFT * of 12. */ va = vpn << VPN_SHIFT; /* * clear top 16 bits of 64bit va, non SLS segment * Older versions of the architecture (2.02 and earler) require the * masking of the top 16 bits. */ if (mmu_has_feature(MMU_FTR_TLBIE_CROP_VA)) va &= ~(0xffffULL << 48); switch (psize) { case MMU_PAGE_4K: /* clear out bits after (52) [0....52.....63] */ va &= ~((1ul << (64 - 52)) - 1); va |= ssize << 8; sllp = get_sllp_encoding(apsize); va |= sllp << 5; asm volatile(ASM_FTR_IFCLR("tlbie %0,0", PPC_TLBIE(%1,%0), %2) : : "r" (va), "r"(0), "i" (CPU_FTR_ARCH_206) : "memory"); break; default: /* We need 14 to 14 + i bits of va */ penc = mmu_psize_defs[psize].penc[apsize]; va &= ~((1ul << mmu_psize_defs[apsize].shift) - 1); va |= penc << 12; va |= ssize << 8; /* * AVAL bits: * We don't need all the bits, but rest of the bits * must be ignored by the processor. * vpn cover upto 65 bits of va. (0...65) and we need * 58..64 bits of va. */ va |= (vpn & 0xfe); /* AVAL */ va |= 1; /* L */ asm volatile(ASM_FTR_IFCLR("tlbie %0,1", PPC_TLBIE(%1,%0), %2) : : "r" (va), "r"(0), "i" (CPU_FTR_ARCH_206) : "memory"); break; } return va; } static inline void fixup_tlbie_vpn(unsigned long vpn, int psize, int apsize, int ssize) { if (cpu_has_feature(CPU_FTR_P9_TLBIE_ERAT_BUG)) { /* Radix flush for a hash guest */ unsigned long rb,rs,prs,r,ric; rb = PPC_BIT(52); /* IS = 2 */ rs = 0; /* lpid = 0 */ prs = 0; /* partition scoped */ r = 1; /* radix format */ ric = 0; /* RIC_FLSUH_TLB */ /* * Need the extra ptesync to make sure we don't * re-order the tlbie */ asm volatile("ptesync": : :"memory"); asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1) : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory"); } if (cpu_has_feature(CPU_FTR_P9_TLBIE_STQ_BUG)) { /* Need the extra ptesync to ensure we don't reorder tlbie*/ asm volatile("ptesync": : :"memory"); ___tlbie(vpn, psize, apsize, ssize); } } static inline void __tlbie(unsigned long vpn, int psize, int apsize, int ssize) { unsigned long rb; rb = ___tlbie(vpn, psize, apsize, ssize); trace_tlbie(0, 0, rb, 0, 0, 0, 0); } static inline void __tlbiel(unsigned long vpn, int psize, int apsize, int ssize) { unsigned long va; unsigned int penc; unsigned long sllp; /* VPN_SHIFT can be atmost 12 */ va = vpn << VPN_SHIFT; /* * clear top 16 bits of 64 bit va, non SLS segment * Older versions of the architecture (2.02 and earler) require the * masking of the top 16 bits. */ if (mmu_has_feature(MMU_FTR_TLBIE_CROP_VA)) va &= ~(0xffffULL << 48); switch (psize) { case MMU_PAGE_4K: /* clear out bits after(52) [0....52.....63] */ va &= ~((1ul << (64 - 52)) - 1); va |= ssize << 8; sllp = get_sllp_encoding(apsize); va |= sllp << 5; asm volatile(ASM_FTR_IFSET("tlbiel %0", PPC_TLBIEL_v205(%0, 0), %1) : : "r" (va), "i" (CPU_FTR_ARCH_206) : "memory"); break; default: /* We need 14 to 14 + i bits of va */ penc = mmu_psize_defs[psize].penc[apsize]; va &= ~((1ul << mmu_psize_defs[apsize].shift) - 1); va |= penc << 12; va |= ssize << 8; /* * AVAL bits: * We don't need all the bits, but rest of the bits * must be ignored by the processor. * vpn cover upto 65 bits of va. (0...65) and we need * 58..64 bits of va. */ va |= (vpn & 0xfe); va |= 1; /* L */ asm volatile(ASM_FTR_IFSET("tlbiel %0", PPC_TLBIEL_v205(%0, 1), %1) : : "r" (va), "i" (CPU_FTR_ARCH_206) : "memory"); break; } trace_tlbie(0, 1, va, 0, 0, 0, 0); } static inline void tlbie(unsigned long vpn, int psize, int apsize, int ssize, int local) { unsigned int use_local; int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE); use_local = local && mmu_has_feature(MMU_FTR_TLBIEL) && !cxl_ctx_in_use(); if (use_local) use_local = mmu_psize_defs[psize].tlbiel; if (lock_tlbie && !use_local) raw_spin_lock(&native_tlbie_lock); asm volatile("ptesync": : :"memory"); if (use_local) { __tlbiel(vpn, psize, apsize, ssize); ppc_after_tlbiel_barrier(); } else { __tlbie(vpn, psize, apsize, ssize); fixup_tlbie_vpn(vpn, psize, apsize, ssize); asm volatile("eieio; tlbsync; ptesync": : :"memory"); } if (lock_tlbie && !use_local) raw_spin_unlock(&native_tlbie_lock); } static inline void native_lock_hpte(struct hash_pte *hptep) { unsigned long *word = (unsigned long *)&hptep->v; acquire_hpte_lock(); while (1) { if (!test_and_set_bit_lock(HPTE_LOCK_BIT, word)) break; spin_begin(); while(test_bit(HPTE_LOCK_BIT, word)) spin_cpu_relax(); spin_end(); } } static inline void native_unlock_hpte(struct hash_pte *hptep) { unsigned long *word = (unsigned long *)&hptep->v; release_hpte_lock(); clear_bit_unlock(HPTE_LOCK_BIT, word); } static long native_hpte_insert(unsigned long hpte_group, unsigned long vpn, unsigned long pa, unsigned long rflags, unsigned long vflags, int psize, int apsize, int ssize) { struct hash_pte *hptep = htab_address + hpte_group; unsigned long hpte_v, hpte_r; unsigned long flags; int i; local_irq_save(flags); if (!(vflags & HPTE_V_BOLTED)) { DBG_LOW(" insert(group=%lx, vpn=%016lx, pa=%016lx," " rflags=%lx, vflags=%lx, psize=%d)\n", hpte_group, vpn, pa, rflags, vflags, psize); } for (i = 0; i < HPTES_PER_GROUP; i++) { if (! (be64_to_cpu(hptep->v) & HPTE_V_VALID)) { /* retry with lock held */ native_lock_hpte(hptep); if (! (be64_to_cpu(hptep->v) & HPTE_V_VALID)) break; native_unlock_hpte(hptep); } hptep++; } if (i == HPTES_PER_GROUP) { local_irq_restore(flags); return -1; } hpte_v = hpte_encode_v(vpn, psize, apsize, ssize) | vflags | HPTE_V_VALID; hpte_r = hpte_encode_r(pa, psize, apsize) | rflags; if (!(vflags & HPTE_V_BOLTED)) { DBG_LOW(" i=%x hpte_v=%016lx, hpte_r=%016lx\n", i, hpte_v, hpte_r); } if (cpu_has_feature(CPU_FTR_ARCH_300)) { hpte_r = hpte_old_to_new_r(hpte_v, hpte_r); hpte_v = hpte_old_to_new_v(hpte_v); } hptep->r = cpu_to_be64(hpte_r); /* Guarantee the second dword is visible before the valid bit */ eieio(); /* * Now set the first dword including the valid bit * NOTE: this also unlocks the hpte */ release_hpte_lock(); hptep->v = cpu_to_be64(hpte_v); __asm__ __volatile__ ("ptesync" : : : "memory"); local_irq_restore(flags); return i | (!!(vflags & HPTE_V_SECONDARY) << 3); } static long native_hpte_remove(unsigned long hpte_group) { unsigned long hpte_v, flags; struct hash_pte *hptep; int i; int slot_offset; local_irq_save(flags); DBG_LOW(" remove(group=%lx)\n", hpte_group); /* pick a random entry to start at */ slot_offset = mftb() & 0x7; for (i = 0; i < HPTES_PER_GROUP; i++) { hptep = htab_address + hpte_group + slot_offset; hpte_v = be64_to_cpu(hptep->v); if ((hpte_v & HPTE_V_VALID) && !(hpte_v & HPTE_V_BOLTED)) { /* retry with lock held */ native_lock_hpte(hptep); hpte_v = be64_to_cpu(hptep->v); if ((hpte_v & HPTE_V_VALID) && !(hpte_v & HPTE_V_BOLTED)) break; native_unlock_hpte(hptep); } slot_offset++; slot_offset &= 0x7; } if (i == HPTES_PER_GROUP) { i = -1; goto out; } /* Invalidate the hpte. NOTE: this also unlocks it */ release_hpte_lock(); hptep->v = 0; out: local_irq_restore(flags); return i; } static long native_hpte_updatepp(unsigned long slot, unsigned long newpp, unsigned long vpn, int bpsize, int apsize, int ssize, unsigned long flags) { struct hash_pte *hptep = htab_address + slot; unsigned long hpte_v, want_v; int ret = 0, local = 0; unsigned long irqflags; local_irq_save(irqflags); want_v = hpte_encode_avpn(vpn, bpsize, ssize); DBG_LOW(" update(vpn=%016lx, avpnv=%016lx, group=%lx, newpp=%lx)", vpn, want_v & HPTE_V_AVPN, slot, newpp); hpte_v = hpte_get_old_v(hptep); /* * We need to invalidate the TLB always because hpte_remove doesn't do * a tlb invalidate. If a hash bucket gets full, we "evict" a more/less * random entry from it. When we do that we don't invalidate the TLB * (hpte_remove) because we assume the old translation is still * technically "valid". */ if (!HPTE_V_COMPARE(hpte_v, want_v) || !(hpte_v & HPTE_V_VALID)) { DBG_LOW(" -> miss\n"); ret = -1; } else { native_lock_hpte(hptep); /* recheck with locks held */ hpte_v = hpte_get_old_v(hptep); if (unlikely(!HPTE_V_COMPARE(hpte_v, want_v) || !(hpte_v & HPTE_V_VALID))) { ret = -1; } else { DBG_LOW(" -> hit\n"); /* Update the HPTE */ hptep->r = cpu_to_be64((be64_to_cpu(hptep->r) & ~(HPTE_R_PPP | HPTE_R_N)) | (newpp & (HPTE_R_PPP | HPTE_R_N | HPTE_R_C))); } native_unlock_hpte(hptep); } if (flags & HPTE_LOCAL_UPDATE) local = 1; /* * Ensure it is out of the tlb too if it is not a nohpte fault */ if (!(flags & HPTE_NOHPTE_UPDATE)) tlbie(vpn, bpsize, apsize, ssize, local); local_irq_restore(irqflags); return ret; } static long __native_hpte_find(unsigned long want_v, unsigned long slot) { struct hash_pte *hptep; unsigned long hpte_v; unsigned long i; for (i = 0; i < HPTES_PER_GROUP; i++) { hptep = htab_address + slot; hpte_v = hpte_get_old_v(hptep); if (HPTE_V_COMPARE(hpte_v, want_v) && (hpte_v & HPTE_V_VALID)) /* HPTE matches */ return slot; ++slot; } return -1; } static long native_hpte_find(unsigned long vpn, int psize, int ssize) { unsigned long hpte_group; unsigned long want_v; unsigned long hash; long slot; hash = hpt_hash(vpn, mmu_psize_defs[psize].shift, ssize); want_v = hpte_encode_avpn(vpn, psize, ssize); /* * We try to keep bolted entries always in primary hash * But in some case we can find them in secondary too. */ hpte_group = (hash & htab_hash_mask) * HPTES_PER_GROUP; slot = __native_hpte_find(want_v, hpte_group); if (slot < 0) { /* Try in secondary */ hpte_group = (~hash & htab_hash_mask) * HPTES_PER_GROUP; slot = __native_hpte_find(want_v, hpte_group); if (slot < 0) return -1; } return slot; } /* * Update the page protection bits. Intended to be used to create * guard pages for kernel data structures on pages which are bolted * in the HPT. Assumes pages being operated on will not be stolen. * * No need to lock here because we should be the only user. */ static void native_hpte_updateboltedpp(unsigned long newpp, unsigned long ea, int psize, int ssize) { unsigned long vpn; unsigned long vsid; long slot; struct hash_pte *hptep; unsigned long flags; local_irq_save(flags); vsid = get_kernel_vsid(ea, ssize); vpn = hpt_vpn(ea, vsid, ssize); slot = native_hpte_find(vpn, psize, ssize); if (slot == -1) panic("could not find page to bolt\n"); hptep = htab_address + slot; /* Update the HPTE */ hptep->r = cpu_to_be64((be64_to_cpu(hptep->r) & ~(HPTE_R_PPP | HPTE_R_N)) | (newpp & (HPTE_R_PPP | HPTE_R_N))); /* * Ensure it is out of the tlb too. Bolted entries base and * actual page size will be same. */ tlbie(vpn, psize, psize, ssize, 0); local_irq_restore(flags); } /* * Remove a bolted kernel entry. Memory hotplug uses this. * * No need to lock here because we should be the only user. */ static int native_hpte_removebolted(unsigned long ea, int psize, int ssize) { unsigned long vpn; unsigned long vsid; long slot; struct hash_pte *hptep; unsigned long flags; local_irq_save(flags); vsid = get_kernel_vsid(ea, ssize); vpn = hpt_vpn(ea, vsid, ssize); slot = native_hpte_find(vpn, psize, ssize); if (slot == -1) return -ENOENT; hptep = htab_address + slot; VM_WARN_ON(!(be64_to_cpu(hptep->v) & HPTE_V_BOLTED)); /* Invalidate the hpte */ hptep->v = 0; /* Invalidate the TLB */ tlbie(vpn, psize, psize, ssize, 0); local_irq_restore(flags); return 0; } static void native_hpte_invalidate(unsigned long slot, unsigned long vpn, int bpsize, int apsize, int ssize, int local) { struct hash_pte *hptep = htab_address + slot; unsigned long hpte_v; unsigned long want_v; unsigned long flags; local_irq_save(flags); DBG_LOW(" invalidate(vpn=%016lx, hash: %lx)\n", vpn, slot); want_v = hpte_encode_avpn(vpn, bpsize, ssize); hpte_v = hpte_get_old_v(hptep); if (HPTE_V_COMPARE(hpte_v, want_v) && (hpte_v & HPTE_V_VALID)) { native_lock_hpte(hptep); /* recheck with locks held */ hpte_v = hpte_get_old_v(hptep); if (HPTE_V_COMPARE(hpte_v, want_v) && (hpte_v & HPTE_V_VALID)) { /* Invalidate the hpte. NOTE: this also unlocks it */ release_hpte_lock(); hptep->v = 0; } else native_unlock_hpte(hptep); } /* * We need to invalidate the TLB always because hpte_remove doesn't do * a tlb invalidate. If a hash bucket gets full, we "evict" a more/less * random entry from it. When we do that we don't invalidate the TLB * (hpte_remove) because we assume the old translation is still * technically "valid". */ tlbie(vpn, bpsize, apsize, ssize, local); local_irq_restore(flags); } #ifdef CONFIG_TRANSPARENT_HUGEPAGE static void native_hugepage_invalidate(unsigned long vsid, unsigned long addr, unsigned char *hpte_slot_array, int psize, int ssize, int local) { int i; struct hash_pte *hptep; int actual_psize = MMU_PAGE_16M; unsigned int max_hpte_count, valid; unsigned long flags, s_addr = addr; unsigned long hpte_v, want_v, shift; unsigned long hidx, vpn = 0, hash, slot; shift = mmu_psize_defs[psize].shift; max_hpte_count = 1U << (PMD_SHIFT - shift); local_irq_save(flags); for (i = 0; i < max_hpte_count; i++) { valid = hpte_valid(hpte_slot_array, i); if (!valid) continue; hidx = hpte_hash_index(hpte_slot_array, i); /* get the vpn */ addr = s_addr + (i * (1ul << shift)); vpn = hpt_vpn(addr, vsid, ssize); hash = hpt_hash(vpn, shift, ssize); if (hidx & _PTEIDX_SECONDARY) hash = ~hash; slot = (hash & htab_hash_mask) * HPTES_PER_GROUP; slot += hidx & _PTEIDX_GROUP_IX; hptep = htab_address + slot; want_v = hpte_encode_avpn(vpn, psize, ssize); hpte_v = hpte_get_old_v(hptep); /* Even if we miss, we need to invalidate the TLB */ if (HPTE_V_COMPARE(hpte_v, want_v) && (hpte_v & HPTE_V_VALID)) { /* recheck with locks held */ native_lock_hpte(hptep); hpte_v = hpte_get_old_v(hptep); if (HPTE_V_COMPARE(hpte_v, want_v) && (hpte_v & HPTE_V_VALID)) { /* Invalidate the hpte. NOTE: this also unlocks it */ release_hpte_lock(); hptep->v = 0; } else native_unlock_hpte(hptep); } /* * We need to do tlb invalidate for all the address, tlbie * instruction compares entry_VA in tlb with the VA specified * here */ tlbie(vpn, psize, actual_psize, ssize, local); } local_irq_restore(flags); } #else static void native_hugepage_invalidate(unsigned long vsid, unsigned long addr, unsigned char *hpte_slot_array, int psize, int ssize, int local) { WARN(1, "%s called without THP support\n", __func__); } #endif static void hpte_decode(struct hash_pte *hpte, unsigned long slot, int *psize, int *apsize, int *ssize, unsigned long *vpn) { unsigned long avpn, pteg, vpi; unsigned long hpte_v = be64_to_cpu(hpte->v); unsigned long hpte_r = be64_to_cpu(hpte->r); unsigned long vsid, seg_off; int size, a_size, shift; /* Look at the 8 bit LP value */ unsigned int lp = (hpte_r >> LP_SHIFT) & ((1 << LP_BITS) - 1); if (cpu_has_feature(CPU_FTR_ARCH_300)) { hpte_v = hpte_new_to_old_v(hpte_v, hpte_r); hpte_r = hpte_new_to_old_r(hpte_r); } if (!(hpte_v & HPTE_V_LARGE)) { size = MMU_PAGE_4K; a_size = MMU_PAGE_4K; } else { size = hpte_page_sizes[lp] & 0xf; a_size = hpte_page_sizes[lp] >> 4; } /* This works for all page sizes, and for 256M and 1T segments */ *ssize = hpte_v >> HPTE_V_SSIZE_SHIFT; shift = mmu_psize_defs[size].shift; avpn = (HPTE_V_AVPN_VAL(hpte_v) & ~mmu_psize_defs[size].avpnm); pteg = slot / HPTES_PER_GROUP; if (hpte_v & HPTE_V_SECONDARY) pteg = ~pteg; switch (*ssize) { case MMU_SEGSIZE_256M: /* We only have 28 - 23 bits of seg_off in avpn */ seg_off = (avpn & 0x1f) << 23; vsid = avpn >> 5; /* We can find more bits from the pteg value */ if (shift < 23) { vpi = (vsid ^ pteg) & htab_hash_mask; seg_off |= vpi << shift; } *vpn = vsid << (SID_SHIFT - VPN_SHIFT) | seg_off >> VPN_SHIFT; break; case MMU_SEGSIZE_1T: /* We only have 40 - 23 bits of seg_off in avpn */ seg_off = (avpn & 0x1ffff) << 23; vsid = avpn >> 17; if (shift < 23) { vpi = (vsid ^ (vsid << 25) ^ pteg) & htab_hash_mask; seg_off |= vpi << shift; } *vpn = vsid << (SID_SHIFT_1T - VPN_SHIFT) | seg_off >> VPN_SHIFT; break; default: *vpn = size = 0; } *psize = size; *apsize = a_size; } /* * clear all mappings on kexec. All cpus are in real mode (or they will * be when they isi), and we are the only one left. We rely on our kernel * mapping being 0xC0's and the hardware ignoring those two real bits. * * This must be called with interrupts disabled. * * Taking the native_tlbie_lock is unsafe here due to the possibility of * lockdep being on. On pre POWER5 hardware, not taking the lock could * cause deadlock. POWER5 and newer not taking the lock is fine. This only * gets called during boot before secondary CPUs have come up and during * crashdump and all bets are off anyway. * * TODO: add batching support when enabled. remember, no dynamic memory here, * although there is the control page available... */ static notrace void native_hpte_clear(void) { unsigned long vpn = 0; unsigned long slot, slots; struct hash_pte *hptep = htab_address; unsigned long hpte_v; unsigned long pteg_count; int psize, apsize, ssize; pteg_count = htab_hash_mask + 1; slots = pteg_count * HPTES_PER_GROUP; for (slot = 0; slot < slots; slot++, hptep++) { /* * we could lock the pte here, but we are the only cpu * running, right? and for crash dump, we probably * don't want to wait for a maybe bad cpu. */ hpte_v = be64_to_cpu(hptep->v); /* * Call __tlbie() here rather than tlbie() since we can't take the * native_tlbie_lock. */ if (hpte_v & HPTE_V_VALID) { hpte_decode(hptep, slot, &psize, &apsize, &ssize, &vpn); hptep->v = 0; ___tlbie(vpn, psize, apsize, ssize); } } asm volatile("eieio; tlbsync; ptesync":::"memory"); } /* * Batched hash table flush, we batch the tlbie's to avoid taking/releasing * the lock all the time */ static void native_flush_hash_range(unsigned long number, int local) { unsigned long vpn = 0; unsigned long hash, index, hidx, shift, slot; struct hash_pte *hptep; unsigned long hpte_v; unsigned long want_v; unsigned long flags; real_pte_t pte; struct ppc64_tlb_batch *batch = this_cpu_ptr(&ppc64_tlb_batch); unsigned long psize = batch->psize; int ssize = batch->ssize; int i; unsigned int use_local; use_local = local && mmu_has_feature(MMU_FTR_TLBIEL) && mmu_psize_defs[psize].tlbiel && !cxl_ctx_in_use(); local_irq_save(flags); for (i = 0; i < number; i++) { vpn = batch->vpn[i]; pte = batch->pte[i]; pte_iterate_hashed_subpages(pte, psize, vpn, index, shift) { hash = hpt_hash(vpn, shift, ssize); hidx = __rpte_to_hidx(pte, index); if (hidx & _PTEIDX_SECONDARY) hash = ~hash; slot = (hash & htab_hash_mask) * HPTES_PER_GROUP; slot += hidx & _PTEIDX_GROUP_IX; hptep = htab_address + slot; want_v = hpte_encode_avpn(vpn, psize, ssize); hpte_v = hpte_get_old_v(hptep); if (!HPTE_V_COMPARE(hpte_v, want_v) || !(hpte_v & HPTE_V_VALID)) continue; /* lock and try again */ native_lock_hpte(hptep); hpte_v = hpte_get_old_v(hptep); if (!HPTE_V_COMPARE(hpte_v, want_v) || !(hpte_v & HPTE_V_VALID)) native_unlock_hpte(hptep); else { release_hpte_lock(); hptep->v = 0; } } pte_iterate_hashed_end(); } if (use_local) { asm volatile("ptesync":::"memory"); for (i = 0; i < number; i++) { vpn = batch->vpn[i]; pte = batch->pte[i]; pte_iterate_hashed_subpages(pte, psize, vpn, index, shift) { __tlbiel(vpn, psize, psize, ssize); } pte_iterate_hashed_end(); } ppc_after_tlbiel_barrier(); } else { int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE); if (lock_tlbie) raw_spin_lock(&native_tlbie_lock); asm volatile("ptesync":::"memory"); for (i = 0; i < number; i++) { vpn = batch->vpn[i]; pte = batch->pte[i]; pte_iterate_hashed_subpages(pte, psize, vpn, index, shift) { __tlbie(vpn, psize, psize, ssize); } pte_iterate_hashed_end(); } /* * Just do one more with the last used values. */ fixup_tlbie_vpn(vpn, psize, psize, ssize); asm volatile("eieio; tlbsync; ptesync":::"memory"); if (lock_tlbie) raw_spin_unlock(&native_tlbie_lock); } local_irq_restore(flags); } void __init hpte_init_native(void) { mmu_hash_ops.hpte_invalidate = native_hpte_invalidate; mmu_hash_ops.hpte_updatepp = native_hpte_updatepp; mmu_hash_ops.hpte_updateboltedpp = native_hpte_updateboltedpp; mmu_hash_ops.hpte_removebolted = native_hpte_removebolted; mmu_hash_ops.hpte_insert = native_hpte_insert; mmu_hash_ops.hpte_remove = native_hpte_remove; mmu_hash_ops.hpte_clear_all = native_hpte_clear; mmu_hash_ops.flush_hash_range = native_flush_hash_range; mmu_hash_ops.hugepage_invalidate = native_hugepage_invalidate; }
linux-master
arch/powerpc/mm/book3s64/hash_native.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * PowerPC64 port by Mike Corrigan and Dave Engebretsen * {mikejc|engebret}@us.ibm.com * * Copyright (c) 2000 Mike Corrigan <[email protected]> * * SMP scalability work: * Copyright (C) 2001 Anton Blanchard <[email protected]>, IBM * * Module name: htab.c * * Description: * PowerPC Hashed Page Table functions */ #undef DEBUG #undef DEBUG_LOW #define pr_fmt(fmt) "hash-mmu: " fmt #include <linux/spinlock.h> #include <linux/errno.h> #include <linux/sched/mm.h> #include <linux/proc_fs.h> #include <linux/stat.h> #include <linux/sysctl.h> #include <linux/export.h> #include <linux/ctype.h> #include <linux/cache.h> #include <linux/init.h> #include <linux/signal.h> #include <linux/memblock.h> #include <linux/context_tracking.h> #include <linux/libfdt.h> #include <linux/pkeys.h> #include <linux/hugetlb.h> #include <linux/cpu.h> #include <linux/pgtable.h> #include <linux/debugfs.h> #include <linux/random.h> #include <linux/elf-randomize.h> #include <linux/of_fdt.h> #include <asm/interrupt.h> #include <asm/processor.h> #include <asm/mmu.h> #include <asm/mmu_context.h> #include <asm/page.h> #include <asm/types.h> #include <linux/uaccess.h> #include <asm/machdep.h> #include <asm/io.h> #include <asm/eeh.h> #include <asm/tlb.h> #include <asm/cacheflush.h> #include <asm/cputable.h> #include <asm/sections.h> #include <asm/copro.h> #include <asm/udbg.h> #include <asm/code-patching.h> #include <asm/fadump.h> #include <asm/firmware.h> #include <asm/tm.h> #include <asm/trace.h> #include <asm/ps3.h> #include <asm/pte-walk.h> #include <asm/asm-prototypes.h> #include <asm/ultravisor.h> #include <mm/mmu_decl.h> #include "internal.h" #ifdef DEBUG #define DBG(fmt...) udbg_printf(fmt) #else #define DBG(fmt...) #endif #ifdef DEBUG_LOW #define DBG_LOW(fmt...) udbg_printf(fmt) #else #define DBG_LOW(fmt...) #endif #define KB (1024) #define MB (1024*KB) #define GB (1024L*MB) /* * Note: pte --> Linux PTE * HPTE --> PowerPC Hashed Page Table Entry * * Execution context: * htab_initialize is called with the MMU off (of course), but * the kernel has been copied down to zero so it can directly * reference global data. At this point it is very difficult * to print debug info. * */ static unsigned long _SDR1; u8 hpte_page_sizes[1 << LP_BITS]; EXPORT_SYMBOL_GPL(hpte_page_sizes); struct hash_pte *htab_address; unsigned long htab_size_bytes; unsigned long htab_hash_mask; EXPORT_SYMBOL_GPL(htab_hash_mask); int mmu_linear_psize = MMU_PAGE_4K; EXPORT_SYMBOL_GPL(mmu_linear_psize); int mmu_virtual_psize = MMU_PAGE_4K; int mmu_vmalloc_psize = MMU_PAGE_4K; EXPORT_SYMBOL_GPL(mmu_vmalloc_psize); int mmu_io_psize = MMU_PAGE_4K; int mmu_kernel_ssize = MMU_SEGSIZE_256M; EXPORT_SYMBOL_GPL(mmu_kernel_ssize); int mmu_highuser_ssize = MMU_SEGSIZE_256M; u16 mmu_slb_size = 64; EXPORT_SYMBOL_GPL(mmu_slb_size); #ifdef CONFIG_PPC_64K_PAGES int mmu_ci_restrictions; #endif static u8 *linear_map_hash_slots; static unsigned long linear_map_hash_count; struct mmu_hash_ops mmu_hash_ops; EXPORT_SYMBOL(mmu_hash_ops); /* * These are definitions of page sizes arrays to be used when none * is provided by the firmware. */ /* * Fallback (4k pages only) */ static struct mmu_psize_def mmu_psize_defaults[] = { [MMU_PAGE_4K] = { .shift = 12, .sllp = 0, .penc = {[MMU_PAGE_4K] = 0, [1 ... MMU_PAGE_COUNT - 1] = -1}, .avpnm = 0, .tlbiel = 0, }, }; /* * POWER4, GPUL, POWER5 * * Support for 16Mb large pages */ static struct mmu_psize_def mmu_psize_defaults_gp[] = { [MMU_PAGE_4K] = { .shift = 12, .sllp = 0, .penc = {[MMU_PAGE_4K] = 0, [1 ... MMU_PAGE_COUNT - 1] = -1}, .avpnm = 0, .tlbiel = 1, }, [MMU_PAGE_16M] = { .shift = 24, .sllp = SLB_VSID_L, .penc = {[0 ... MMU_PAGE_16M - 1] = -1, [MMU_PAGE_16M] = 0, [MMU_PAGE_16M + 1 ... MMU_PAGE_COUNT - 1] = -1 }, .avpnm = 0x1UL, .tlbiel = 0, }, }; static inline void tlbiel_hash_set_isa206(unsigned int set, unsigned int is) { unsigned long rb; rb = (set << PPC_BITLSHIFT(51)) | (is << PPC_BITLSHIFT(53)); asm volatile("tlbiel %0" : : "r" (rb)); } /* * tlbiel instruction for hash, set invalidation * i.e., r=1 and is=01 or is=10 or is=11 */ static __always_inline void tlbiel_hash_set_isa300(unsigned int set, unsigned int is, unsigned int pid, unsigned int ric, unsigned int prs) { unsigned long rb; unsigned long rs; unsigned int r = 0; /* hash format */ rb = (set << PPC_BITLSHIFT(51)) | (is << PPC_BITLSHIFT(53)); rs = ((unsigned long)pid << PPC_BITLSHIFT(31)); asm volatile(PPC_TLBIEL(%0, %1, %2, %3, %4) : : "r"(rb), "r"(rs), "i"(ric), "i"(prs), "i"(r) : "memory"); } static void tlbiel_all_isa206(unsigned int num_sets, unsigned int is) { unsigned int set; asm volatile("ptesync": : :"memory"); for (set = 0; set < num_sets; set++) tlbiel_hash_set_isa206(set, is); ppc_after_tlbiel_barrier(); } static void tlbiel_all_isa300(unsigned int num_sets, unsigned int is) { unsigned int set; asm volatile("ptesync": : :"memory"); /* * Flush the partition table cache if this is HV mode. */ if (early_cpu_has_feature(CPU_FTR_HVMODE)) tlbiel_hash_set_isa300(0, is, 0, 2, 0); /* * Now invalidate the process table cache. UPRT=0 HPT modes (what * current hardware implements) do not use the process table, but * add the flushes anyway. * * From ISA v3.0B p. 1078: * The following forms are invalid. * * PRS=1, R=0, and RIC!=2 (The only process-scoped * HPT caching is of the Process Table.) */ tlbiel_hash_set_isa300(0, is, 0, 2, 1); /* * Then flush the sets of the TLB proper. Hash mode uses * partition scoped TLB translations, which may be flushed * in !HV mode. */ for (set = 0; set < num_sets; set++) tlbiel_hash_set_isa300(set, is, 0, 0, 0); ppc_after_tlbiel_barrier(); asm volatile(PPC_ISA_3_0_INVALIDATE_ERAT "; isync" : : :"memory"); } void hash__tlbiel_all(unsigned int action) { unsigned int is; switch (action) { case TLB_INVAL_SCOPE_GLOBAL: is = 3; break; case TLB_INVAL_SCOPE_LPID: is = 2; break; default: BUG(); } if (early_cpu_has_feature(CPU_FTR_ARCH_300)) tlbiel_all_isa300(POWER9_TLB_SETS_HASH, is); else if (early_cpu_has_feature(CPU_FTR_ARCH_207S)) tlbiel_all_isa206(POWER8_TLB_SETS, is); else if (early_cpu_has_feature(CPU_FTR_ARCH_206)) tlbiel_all_isa206(POWER7_TLB_SETS, is); else WARN(1, "%s called on pre-POWER7 CPU\n", __func__); } /* * 'R' and 'C' update notes: * - Under pHyp or KVM, the updatepp path will not set C, thus it *will* * create writeable HPTEs without C set, because the hcall H_PROTECT * that we use in that case will not update C * - The above is however not a problem, because we also don't do that * fancy "no flush" variant of eviction and we use H_REMOVE which will * do the right thing and thus we don't have the race I described earlier * * - Under bare metal, we do have the race, so we need R and C set * - We make sure R is always set and never lost * - C is _PAGE_DIRTY, and *should* always be set for a writeable mapping */ unsigned long htab_convert_pte_flags(unsigned long pteflags, unsigned long flags) { unsigned long rflags = 0; /* _PAGE_EXEC -> NOEXEC */ if ((pteflags & _PAGE_EXEC) == 0) rflags |= HPTE_R_N; /* * PPP bits: * Linux uses slb key 0 for kernel and 1 for user. * kernel RW areas are mapped with PPP=0b000 * User area is mapped with PPP=0b010 for read/write * or PPP=0b011 for read-only (including writeable but clean pages). */ if (pteflags & _PAGE_PRIVILEGED) { /* * Kernel read only mapped with ppp bits 0b110 */ if (!(pteflags & _PAGE_WRITE)) { if (mmu_has_feature(MMU_FTR_KERNEL_RO)) rflags |= (HPTE_R_PP0 | 0x2); else rflags |= 0x3; } } else { if (pteflags & _PAGE_RWX) rflags |= 0x2; if (!((pteflags & _PAGE_WRITE) && (pteflags & _PAGE_DIRTY))) rflags |= 0x1; } /* * We can't allow hardware to update hpte bits. Hence always * set 'R' bit and set 'C' if it is a write fault */ rflags |= HPTE_R_R; if (pteflags & _PAGE_DIRTY) rflags |= HPTE_R_C; /* * Add in WIG bits */ if ((pteflags & _PAGE_CACHE_CTL) == _PAGE_TOLERANT) rflags |= HPTE_R_I; else if ((pteflags & _PAGE_CACHE_CTL) == _PAGE_NON_IDEMPOTENT) rflags |= (HPTE_R_I | HPTE_R_G); else if ((pteflags & _PAGE_CACHE_CTL) == _PAGE_SAO) rflags |= (HPTE_R_W | HPTE_R_I | HPTE_R_M); else /* * Add memory coherence if cache inhibited is not set */ rflags |= HPTE_R_M; rflags |= pte_to_hpte_pkey_bits(pteflags, flags); return rflags; } int htab_bolt_mapping(unsigned long vstart, unsigned long vend, unsigned long pstart, unsigned long prot, int psize, int ssize) { unsigned long vaddr, paddr; unsigned int step, shift; int ret = 0; shift = mmu_psize_defs[psize].shift; step = 1 << shift; prot = htab_convert_pte_flags(prot, HPTE_USE_KERNEL_KEY); DBG("htab_bolt_mapping(%lx..%lx -> %lx (%lx,%d,%d)\n", vstart, vend, pstart, prot, psize, ssize); /* Carefully map only the possible range */ vaddr = ALIGN(vstart, step); paddr = ALIGN(pstart, step); vend = ALIGN_DOWN(vend, step); for (; vaddr < vend; vaddr += step, paddr += step) { unsigned long hash, hpteg; unsigned long vsid = get_kernel_vsid(vaddr, ssize); unsigned long vpn = hpt_vpn(vaddr, vsid, ssize); unsigned long tprot = prot; bool secondary_hash = false; /* * If we hit a bad address return error. */ if (!vsid) return -1; /* Make kernel text executable */ if (overlaps_kernel_text(vaddr, vaddr + step)) tprot &= ~HPTE_R_N; /* * If relocatable, check if it overlaps interrupt vectors that * are copied down to real 0. For relocatable kernel * (e.g. kdump case) we copy interrupt vectors down to real * address 0. Mark that region as executable. This is * because on p8 system with relocation on exception feature * enabled, exceptions are raised with MMU (IR=DR=1) ON. Hence * in order to execute the interrupt handlers in virtual * mode the vector region need to be marked as executable. */ if ((PHYSICAL_START > MEMORY_START) && overlaps_interrupt_vector_text(vaddr, vaddr + step)) tprot &= ~HPTE_R_N; hash = hpt_hash(vpn, shift, ssize); hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP); BUG_ON(!mmu_hash_ops.hpte_insert); repeat: ret = mmu_hash_ops.hpte_insert(hpteg, vpn, paddr, tprot, HPTE_V_BOLTED, psize, psize, ssize); if (ret == -1) { /* * Try to keep bolted entries in primary. * Remove non bolted entries and try insert again */ ret = mmu_hash_ops.hpte_remove(hpteg); if (ret != -1) ret = mmu_hash_ops.hpte_insert(hpteg, vpn, paddr, tprot, HPTE_V_BOLTED, psize, psize, ssize); if (ret == -1 && !secondary_hash) { secondary_hash = true; hpteg = ((~hash & htab_hash_mask) * HPTES_PER_GROUP); goto repeat; } } if (ret < 0) break; cond_resched(); if (debug_pagealloc_enabled_or_kfence() && (paddr >> PAGE_SHIFT) < linear_map_hash_count) linear_map_hash_slots[paddr >> PAGE_SHIFT] = ret | 0x80; } return ret < 0 ? ret : 0; } int htab_remove_mapping(unsigned long vstart, unsigned long vend, int psize, int ssize) { unsigned long vaddr, time_limit; unsigned int step, shift; int rc; int ret = 0; shift = mmu_psize_defs[psize].shift; step = 1 << shift; if (!mmu_hash_ops.hpte_removebolted) return -ENODEV; /* Unmap the full range specificied */ vaddr = ALIGN_DOWN(vstart, step); time_limit = jiffies + HZ; for (;vaddr < vend; vaddr += step) { rc = mmu_hash_ops.hpte_removebolted(vaddr, psize, ssize); /* * For large number of mappings introduce a cond_resched() * to prevent softlockup warnings. */ if (time_after(jiffies, time_limit)) { cond_resched(); time_limit = jiffies + HZ; } if (rc == -ENOENT) { ret = -ENOENT; continue; } if (rc < 0) return rc; } return ret; } static bool disable_1tb_segments __ro_after_init; static int __init parse_disable_1tb_segments(char *p) { disable_1tb_segments = true; return 0; } early_param("disable_1tb_segments", parse_disable_1tb_segments); bool stress_hpt_enabled __initdata; static int __init parse_stress_hpt(char *p) { stress_hpt_enabled = true; return 0; } early_param("stress_hpt", parse_stress_hpt); __ro_after_init DEFINE_STATIC_KEY_FALSE(stress_hpt_key); /* * per-CPU array allocated if we enable stress_hpt. */ #define STRESS_MAX_GROUPS 16 struct stress_hpt_struct { unsigned long last_group[STRESS_MAX_GROUPS]; }; static inline int stress_nr_groups(void) { /* * LPAR H_REMOVE flushes TLB, so need some number > 1 of entries * to allow practical forward progress. Bare metal returns 1, which * seems to help uncover more bugs. */ if (firmware_has_feature(FW_FEATURE_LPAR)) return STRESS_MAX_GROUPS; else return 1; } static struct stress_hpt_struct *stress_hpt_struct; static int __init htab_dt_scan_seg_sizes(unsigned long node, const char *uname, int depth, void *data) { const char *type = of_get_flat_dt_prop(node, "device_type", NULL); const __be32 *prop; int size = 0; /* We are scanning "cpu" nodes only */ if (type == NULL || strcmp(type, "cpu") != 0) return 0; prop = of_get_flat_dt_prop(node, "ibm,processor-segment-sizes", &size); if (prop == NULL) return 0; for (; size >= 4; size -= 4, ++prop) { if (be32_to_cpu(prop[0]) == 40) { DBG("1T segment support detected\n"); if (disable_1tb_segments) { DBG("1T segments disabled by command line\n"); break; } cur_cpu_spec->mmu_features |= MMU_FTR_1T_SEGMENT; return 1; } } cur_cpu_spec->mmu_features &= ~MMU_FTR_NO_SLBIE_B; return 0; } static int __init get_idx_from_shift(unsigned int shift) { int idx = -1; switch (shift) { case 0xc: idx = MMU_PAGE_4K; break; case 0x10: idx = MMU_PAGE_64K; break; case 0x14: idx = MMU_PAGE_1M; break; case 0x18: idx = MMU_PAGE_16M; break; case 0x22: idx = MMU_PAGE_16G; break; } return idx; } static int __init htab_dt_scan_page_sizes(unsigned long node, const char *uname, int depth, void *data) { const char *type = of_get_flat_dt_prop(node, "device_type", NULL); const __be32 *prop; int size = 0; /* We are scanning "cpu" nodes only */ if (type == NULL || strcmp(type, "cpu") != 0) return 0; prop = of_get_flat_dt_prop(node, "ibm,segment-page-sizes", &size); if (!prop) return 0; pr_info("Page sizes from device-tree:\n"); size /= 4; cur_cpu_spec->mmu_features &= ~(MMU_FTR_16M_PAGE); while(size > 0) { unsigned int base_shift = be32_to_cpu(prop[0]); unsigned int slbenc = be32_to_cpu(prop[1]); unsigned int lpnum = be32_to_cpu(prop[2]); struct mmu_psize_def *def; int idx, base_idx; size -= 3; prop += 3; base_idx = get_idx_from_shift(base_shift); if (base_idx < 0) { /* skip the pte encoding also */ prop += lpnum * 2; size -= lpnum * 2; continue; } def = &mmu_psize_defs[base_idx]; if (base_idx == MMU_PAGE_16M) cur_cpu_spec->mmu_features |= MMU_FTR_16M_PAGE; def->shift = base_shift; if (base_shift <= 23) def->avpnm = 0; else def->avpnm = (1 << (base_shift - 23)) - 1; def->sllp = slbenc; /* * We don't know for sure what's up with tlbiel, so * for now we only set it for 4K and 64K pages */ if (base_idx == MMU_PAGE_4K || base_idx == MMU_PAGE_64K) def->tlbiel = 1; else def->tlbiel = 0; while (size > 0 && lpnum) { unsigned int shift = be32_to_cpu(prop[0]); int penc = be32_to_cpu(prop[1]); prop += 2; size -= 2; lpnum--; idx = get_idx_from_shift(shift); if (idx < 0) continue; if (penc == -1) pr_err("Invalid penc for base_shift=%d " "shift=%d\n", base_shift, shift); def->penc[idx] = penc; pr_info("base_shift=%d: shift=%d, sllp=0x%04lx," " avpnm=0x%08lx, tlbiel=%d, penc=%d\n", base_shift, shift, def->sllp, def->avpnm, def->tlbiel, def->penc[idx]); } } return 1; } #ifdef CONFIG_HUGETLB_PAGE /* * Scan for 16G memory blocks that have been set aside for huge pages * and reserve those blocks for 16G huge pages. */ static int __init htab_dt_scan_hugepage_blocks(unsigned long node, const char *uname, int depth, void *data) { const char *type = of_get_flat_dt_prop(node, "device_type", NULL); const __be64 *addr_prop; const __be32 *page_count_prop; unsigned int expected_pages; long unsigned int phys_addr; long unsigned int block_size; /* We are scanning "memory" nodes only */ if (type == NULL || strcmp(type, "memory") != 0) return 0; /* * This property is the log base 2 of the number of virtual pages that * will represent this memory block. */ page_count_prop = of_get_flat_dt_prop(node, "ibm,expected#pages", NULL); if (page_count_prop == NULL) return 0; expected_pages = (1 << be32_to_cpu(page_count_prop[0])); addr_prop = of_get_flat_dt_prop(node, "reg", NULL); if (addr_prop == NULL) return 0; phys_addr = be64_to_cpu(addr_prop[0]); block_size = be64_to_cpu(addr_prop[1]); if (block_size != (16 * GB)) return 0; printk(KERN_INFO "Huge page(16GB) memory: " "addr = 0x%lX size = 0x%lX pages = %d\n", phys_addr, block_size, expected_pages); if (phys_addr + block_size * expected_pages <= memblock_end_of_DRAM()) { memblock_reserve(phys_addr, block_size * expected_pages); pseries_add_gpage(phys_addr, block_size, expected_pages); } return 0; } #endif /* CONFIG_HUGETLB_PAGE */ static void __init mmu_psize_set_default_penc(void) { int bpsize, apsize; for (bpsize = 0; bpsize < MMU_PAGE_COUNT; bpsize++) for (apsize = 0; apsize < MMU_PAGE_COUNT; apsize++) mmu_psize_defs[bpsize].penc[apsize] = -1; } #ifdef CONFIG_PPC_64K_PAGES static bool __init might_have_hea(void) { /* * The HEA ethernet adapter requires awareness of the * GX bus. Without that awareness we can easily assume * we will never see an HEA ethernet device. */ #ifdef CONFIG_IBMEBUS return !cpu_has_feature(CPU_FTR_ARCH_207S) && firmware_has_feature(FW_FEATURE_SPLPAR); #else return false; #endif } #endif /* #ifdef CONFIG_PPC_64K_PAGES */ static void __init htab_scan_page_sizes(void) { int rc; /* se the invalid penc to -1 */ mmu_psize_set_default_penc(); /* Default to 4K pages only */ memcpy(mmu_psize_defs, mmu_psize_defaults, sizeof(mmu_psize_defaults)); /* * Try to find the available page sizes in the device-tree */ rc = of_scan_flat_dt(htab_dt_scan_page_sizes, NULL); if (rc == 0 && early_mmu_has_feature(MMU_FTR_16M_PAGE)) { /* * Nothing in the device-tree, but the CPU supports 16M pages, * so let's fallback on a known size list for 16M capable CPUs. */ memcpy(mmu_psize_defs, mmu_psize_defaults_gp, sizeof(mmu_psize_defaults_gp)); } #ifdef CONFIG_HUGETLB_PAGE if (!hugetlb_disabled && !early_radix_enabled() ) { /* Reserve 16G huge page memory sections for huge pages */ of_scan_flat_dt(htab_dt_scan_hugepage_blocks, NULL); } #endif /* CONFIG_HUGETLB_PAGE */ } /* * Fill in the hpte_page_sizes[] array. * We go through the mmu_psize_defs[] array looking for all the * supported base/actual page size combinations. Each combination * has a unique pagesize encoding (penc) value in the low bits of * the LP field of the HPTE. For actual page sizes less than 1MB, * some of the upper LP bits are used for RPN bits, meaning that * we need to fill in several entries in hpte_page_sizes[]. * * In diagrammatic form, with r = RPN bits and z = page size bits: * PTE LP actual page size * rrrr rrrz >=8KB * rrrr rrzz >=16KB * rrrr rzzz >=32KB * rrrr zzzz >=64KB * ... * * The zzzz bits are implementation-specific but are chosen so that * no encoding for a larger page size uses the same value in its * low-order N bits as the encoding for the 2^(12+N) byte page size * (if it exists). */ static void __init init_hpte_page_sizes(void) { long int ap, bp; long int shift, penc; for (bp = 0; bp < MMU_PAGE_COUNT; ++bp) { if (!mmu_psize_defs[bp].shift) continue; /* not a supported page size */ for (ap = bp; ap < MMU_PAGE_COUNT; ++ap) { penc = mmu_psize_defs[bp].penc[ap]; if (penc == -1 || !mmu_psize_defs[ap].shift) continue; shift = mmu_psize_defs[ap].shift - LP_SHIFT; if (shift <= 0) continue; /* should never happen */ /* * For page sizes less than 1MB, this loop * replicates the entry for all possible values * of the rrrr bits. */ while (penc < (1 << LP_BITS)) { hpte_page_sizes[penc] = (ap << 4) | bp; penc += 1 << shift; } } } } static void __init htab_init_page_sizes(void) { bool aligned = true; init_hpte_page_sizes(); if (!debug_pagealloc_enabled_or_kfence()) { /* * Pick a size for the linear mapping. Currently, we only * support 16M, 1M and 4K which is the default */ if (IS_ENABLED(CONFIG_STRICT_KERNEL_RWX) && (unsigned long)_stext % 0x1000000) { if (mmu_psize_defs[MMU_PAGE_16M].shift) pr_warn("Kernel not 16M aligned, disabling 16M linear map alignment\n"); aligned = false; } if (mmu_psize_defs[MMU_PAGE_16M].shift && aligned) mmu_linear_psize = MMU_PAGE_16M; else if (mmu_psize_defs[MMU_PAGE_1M].shift) mmu_linear_psize = MMU_PAGE_1M; } #ifdef CONFIG_PPC_64K_PAGES /* * Pick a size for the ordinary pages. Default is 4K, we support * 64K for user mappings and vmalloc if supported by the processor. * We only use 64k for ioremap if the processor * (and firmware) support cache-inhibited large pages. * If not, we use 4k and set mmu_ci_restrictions so that * hash_page knows to switch processes that use cache-inhibited * mappings to 4k pages. */ if (mmu_psize_defs[MMU_PAGE_64K].shift) { mmu_virtual_psize = MMU_PAGE_64K; mmu_vmalloc_psize = MMU_PAGE_64K; if (mmu_linear_psize == MMU_PAGE_4K) mmu_linear_psize = MMU_PAGE_64K; if (mmu_has_feature(MMU_FTR_CI_LARGE_PAGE)) { /* * When running on pSeries using 64k pages for ioremap * would stop us accessing the HEA ethernet. So if we * have the chance of ever seeing one, stay at 4k. */ if (!might_have_hea()) mmu_io_psize = MMU_PAGE_64K; } else mmu_ci_restrictions = 1; } #endif /* CONFIG_PPC_64K_PAGES */ #ifdef CONFIG_SPARSEMEM_VMEMMAP /* * We try to use 16M pages for vmemmap if that is supported * and we have at least 1G of RAM at boot */ if (mmu_psize_defs[MMU_PAGE_16M].shift && memblock_phys_mem_size() >= 0x40000000) mmu_vmemmap_psize = MMU_PAGE_16M; else mmu_vmemmap_psize = mmu_virtual_psize; #endif /* CONFIG_SPARSEMEM_VMEMMAP */ printk(KERN_DEBUG "Page orders: linear mapping = %d, " "virtual = %d, io = %d" #ifdef CONFIG_SPARSEMEM_VMEMMAP ", vmemmap = %d" #endif "\n", mmu_psize_defs[mmu_linear_psize].shift, mmu_psize_defs[mmu_virtual_psize].shift, mmu_psize_defs[mmu_io_psize].shift #ifdef CONFIG_SPARSEMEM_VMEMMAP ,mmu_psize_defs[mmu_vmemmap_psize].shift #endif ); } static int __init htab_dt_scan_pftsize(unsigned long node, const char *uname, int depth, void *data) { const char *type = of_get_flat_dt_prop(node, "device_type", NULL); const __be32 *prop; /* We are scanning "cpu" nodes only */ if (type == NULL || strcmp(type, "cpu") != 0) return 0; prop = of_get_flat_dt_prop(node, "ibm,pft-size", NULL); if (prop != NULL) { /* pft_size[0] is the NUMA CEC cookie */ ppc64_pft_size = be32_to_cpu(prop[1]); return 1; } return 0; } unsigned htab_shift_for_mem_size(unsigned long mem_size) { unsigned memshift = __ilog2(mem_size); unsigned pshift = mmu_psize_defs[mmu_virtual_psize].shift; unsigned pteg_shift; /* round mem_size up to next power of 2 */ if ((1UL << memshift) < mem_size) memshift += 1; /* aim for 2 pages / pteg */ pteg_shift = memshift - (pshift + 1); /* * 2^11 PTEGS of 128 bytes each, ie. 2^18 bytes is the minimum htab * size permitted by the architecture. */ return max(pteg_shift + 7, 18U); } static unsigned long __init htab_get_table_size(void) { /* * If hash size isn't already provided by the platform, we try to * retrieve it from the device-tree. If it's not there neither, we * calculate it now based on the total RAM size */ if (ppc64_pft_size == 0) of_scan_flat_dt(htab_dt_scan_pftsize, NULL); if (ppc64_pft_size) return 1UL << ppc64_pft_size; return 1UL << htab_shift_for_mem_size(memblock_phys_mem_size()); } #ifdef CONFIG_MEMORY_HOTPLUG static int resize_hpt_for_hotplug(unsigned long new_mem_size) { unsigned target_hpt_shift; if (!mmu_hash_ops.resize_hpt) return 0; target_hpt_shift = htab_shift_for_mem_size(new_mem_size); /* * To avoid lots of HPT resizes if memory size is fluctuating * across a boundary, we deliberately have some hysterisis * here: we immediately increase the HPT size if the target * shift exceeds the current shift, but we won't attempt to * reduce unless the target shift is at least 2 below the * current shift */ if (target_hpt_shift > ppc64_pft_size || target_hpt_shift < ppc64_pft_size - 1) return mmu_hash_ops.resize_hpt(target_hpt_shift); return 0; } int hash__create_section_mapping(unsigned long start, unsigned long end, int nid, pgprot_t prot) { int rc; if (end >= H_VMALLOC_START) { pr_warn("Outside the supported range\n"); return -1; } resize_hpt_for_hotplug(memblock_phys_mem_size()); rc = htab_bolt_mapping(start, end, __pa(start), pgprot_val(prot), mmu_linear_psize, mmu_kernel_ssize); if (rc < 0) { int rc2 = htab_remove_mapping(start, end, mmu_linear_psize, mmu_kernel_ssize); BUG_ON(rc2 && (rc2 != -ENOENT)); } return rc; } int hash__remove_section_mapping(unsigned long start, unsigned long end) { int rc = htab_remove_mapping(start, end, mmu_linear_psize, mmu_kernel_ssize); if (resize_hpt_for_hotplug(memblock_phys_mem_size()) == -ENOSPC) pr_warn("Hash collision while resizing HPT\n"); return rc; } #endif /* CONFIG_MEMORY_HOTPLUG */ static void __init hash_init_partition_table(phys_addr_t hash_table, unsigned long htab_size) { mmu_partition_table_init(); /* * PS field (VRMA page size) is not used for LPID 0, hence set to 0. * For now, UPRT is 0 and we have no segment table. */ htab_size = __ilog2(htab_size) - 18; mmu_partition_table_set_entry(0, hash_table | htab_size, 0, false); pr_info("Partition table %p\n", partition_tb); } void hpt_clear_stress(void); static struct timer_list stress_hpt_timer; static void stress_hpt_timer_fn(struct timer_list *timer) { int next_cpu; hpt_clear_stress(); if (!firmware_has_feature(FW_FEATURE_LPAR)) tlbiel_all(); next_cpu = cpumask_next(raw_smp_processor_id(), cpu_online_mask); if (next_cpu >= nr_cpu_ids) next_cpu = cpumask_first(cpu_online_mask); stress_hpt_timer.expires = jiffies + msecs_to_jiffies(10); add_timer_on(&stress_hpt_timer, next_cpu); } static void __init htab_initialize(void) { unsigned long table; unsigned long pteg_count; unsigned long prot; phys_addr_t base = 0, size = 0, end; u64 i; DBG(" -> htab_initialize()\n"); if (mmu_has_feature(MMU_FTR_1T_SEGMENT)) { mmu_kernel_ssize = MMU_SEGSIZE_1T; mmu_highuser_ssize = MMU_SEGSIZE_1T; printk(KERN_INFO "Using 1TB segments\n"); } if (stress_slb_enabled) static_branch_enable(&stress_slb_key); if (stress_hpt_enabled) { unsigned long tmp; static_branch_enable(&stress_hpt_key); // Too early to use nr_cpu_ids, so use NR_CPUS tmp = memblock_phys_alloc_range(sizeof(struct stress_hpt_struct) * NR_CPUS, __alignof__(struct stress_hpt_struct), 0, MEMBLOCK_ALLOC_ANYWHERE); memset((void *)tmp, 0xff, sizeof(struct stress_hpt_struct) * NR_CPUS); stress_hpt_struct = __va(tmp); timer_setup(&stress_hpt_timer, stress_hpt_timer_fn, 0); stress_hpt_timer.expires = jiffies + msecs_to_jiffies(10); add_timer(&stress_hpt_timer); } /* * Calculate the required size of the htab. We want the number of * PTEGs to equal one half the number of real pages. */ htab_size_bytes = htab_get_table_size(); pteg_count = htab_size_bytes >> 7; htab_hash_mask = pteg_count - 1; if (firmware_has_feature(FW_FEATURE_LPAR) || firmware_has_feature(FW_FEATURE_PS3_LV1)) { /* Using a hypervisor which owns the htab */ htab_address = NULL; _SDR1 = 0; #ifdef CONFIG_FA_DUMP /* * If firmware assisted dump is active firmware preserves * the contents of htab along with entire partition memory. * Clear the htab if firmware assisted dump is active so * that we dont end up using old mappings. */ if (is_fadump_active() && mmu_hash_ops.hpte_clear_all) mmu_hash_ops.hpte_clear_all(); #endif } else { unsigned long limit = MEMBLOCK_ALLOC_ANYWHERE; #ifdef CONFIG_PPC_CELL /* * Cell may require the hash table down low when using the * Axon IOMMU in order to fit the dynamic region over it, see * comments in cell/iommu.c */ if (fdt_subnode_offset(initial_boot_params, 0, "axon") > 0) { limit = 0x80000000; pr_info("Hash table forced below 2G for Axon IOMMU\n"); } #endif /* CONFIG_PPC_CELL */ table = memblock_phys_alloc_range(htab_size_bytes, htab_size_bytes, 0, limit); if (!table) panic("ERROR: Failed to allocate %pa bytes below %pa\n", &htab_size_bytes, &limit); DBG("Hash table allocated at %lx, size: %lx\n", table, htab_size_bytes); htab_address = __va(table); /* htab absolute addr + encoded htabsize */ _SDR1 = table + __ilog2(htab_size_bytes) - 18; /* Initialize the HPT with no entries */ memset((void *)table, 0, htab_size_bytes); if (!cpu_has_feature(CPU_FTR_ARCH_300)) /* Set SDR1 */ mtspr(SPRN_SDR1, _SDR1); else hash_init_partition_table(table, htab_size_bytes); } prot = pgprot_val(PAGE_KERNEL); if (debug_pagealloc_enabled_or_kfence()) { linear_map_hash_count = memblock_end_of_DRAM() >> PAGE_SHIFT; linear_map_hash_slots = memblock_alloc_try_nid( linear_map_hash_count, 1, MEMBLOCK_LOW_LIMIT, ppc64_rma_size, NUMA_NO_NODE); if (!linear_map_hash_slots) panic("%s: Failed to allocate %lu bytes max_addr=%pa\n", __func__, linear_map_hash_count, &ppc64_rma_size); } /* create bolted the linear mapping in the hash table */ for_each_mem_range(i, &base, &end) { size = end - base; base = (unsigned long)__va(base); DBG("creating mapping for region: %lx..%lx (prot: %lx)\n", base, size, prot); if ((base + size) >= H_VMALLOC_START) { pr_warn("Outside the supported range\n"); continue; } BUG_ON(htab_bolt_mapping(base, base + size, __pa(base), prot, mmu_linear_psize, mmu_kernel_ssize)); } memblock_set_current_limit(MEMBLOCK_ALLOC_ANYWHERE); /* * If we have a memory_limit and we've allocated TCEs then we need to * explicitly map the TCE area at the top of RAM. We also cope with the * case that the TCEs start below memory_limit. * tce_alloc_start/end are 16MB aligned so the mapping should work * for either 4K or 16MB pages. */ if (tce_alloc_start) { tce_alloc_start = (unsigned long)__va(tce_alloc_start); tce_alloc_end = (unsigned long)__va(tce_alloc_end); if (base + size >= tce_alloc_start) tce_alloc_start = base + size + 1; BUG_ON(htab_bolt_mapping(tce_alloc_start, tce_alloc_end, __pa(tce_alloc_start), prot, mmu_linear_psize, mmu_kernel_ssize)); } DBG(" <- htab_initialize()\n"); } #undef KB #undef MB void __init hash__early_init_devtree(void) { /* Initialize segment sizes */ of_scan_flat_dt(htab_dt_scan_seg_sizes, NULL); /* Initialize page sizes */ htab_scan_page_sizes(); } static struct hash_mm_context init_hash_mm_context; void __init hash__early_init_mmu(void) { #ifndef CONFIG_PPC_64K_PAGES /* * We have code in __hash_page_4K() and elsewhere, which assumes it can * do the following: * new_pte |= (slot << H_PAGE_F_GIX_SHIFT) & (H_PAGE_F_SECOND | H_PAGE_F_GIX); * * Where the slot number is between 0-15, and values of 8-15 indicate * the secondary bucket. For that code to work H_PAGE_F_SECOND and * H_PAGE_F_GIX must occupy four contiguous bits in the PTE, and * H_PAGE_F_SECOND must be placed above H_PAGE_F_GIX. Assert that here * with a BUILD_BUG_ON(). */ BUILD_BUG_ON(H_PAGE_F_SECOND != (1ul << (H_PAGE_F_GIX_SHIFT + 3))); #endif /* CONFIG_PPC_64K_PAGES */ htab_init_page_sizes(); /* * initialize page table size */ __pte_frag_nr = H_PTE_FRAG_NR; __pte_frag_size_shift = H_PTE_FRAG_SIZE_SHIFT; __pmd_frag_nr = H_PMD_FRAG_NR; __pmd_frag_size_shift = H_PMD_FRAG_SIZE_SHIFT; __pte_index_size = H_PTE_INDEX_SIZE; __pmd_index_size = H_PMD_INDEX_SIZE; __pud_index_size = H_PUD_INDEX_SIZE; __pgd_index_size = H_PGD_INDEX_SIZE; __pud_cache_index = H_PUD_CACHE_INDEX; __pte_table_size = H_PTE_TABLE_SIZE; __pmd_table_size = H_PMD_TABLE_SIZE; __pud_table_size = H_PUD_TABLE_SIZE; __pgd_table_size = H_PGD_TABLE_SIZE; /* * 4k use hugepd format, so for hash set then to * zero */ __pmd_val_bits = HASH_PMD_VAL_BITS; __pud_val_bits = HASH_PUD_VAL_BITS; __pgd_val_bits = HASH_PGD_VAL_BITS; __kernel_virt_start = H_KERN_VIRT_START; __vmalloc_start = H_VMALLOC_START; __vmalloc_end = H_VMALLOC_END; __kernel_io_start = H_KERN_IO_START; __kernel_io_end = H_KERN_IO_END; vmemmap = (struct page *)H_VMEMMAP_START; ioremap_bot = IOREMAP_BASE; #ifdef CONFIG_PCI pci_io_base = ISA_IO_BASE; #endif /* Select appropriate backend */ if (firmware_has_feature(FW_FEATURE_PS3_LV1)) ps3_early_mm_init(); else if (firmware_has_feature(FW_FEATURE_LPAR)) hpte_init_pseries(); else if (IS_ENABLED(CONFIG_PPC_HASH_MMU_NATIVE)) hpte_init_native(); if (!mmu_hash_ops.hpte_insert) panic("hash__early_init_mmu: No MMU hash ops defined!\n"); /* * Initialize the MMU Hash table and create the linear mapping * of memory. Has to be done before SLB initialization as this is * currently where the page size encoding is obtained. */ htab_initialize(); init_mm.context.hash_context = &init_hash_mm_context; mm_ctx_set_slb_addr_limit(&init_mm.context, SLB_ADDR_LIMIT_DEFAULT); pr_info("Initializing hash mmu with SLB\n"); /* Initialize SLB management */ slb_initialize(); if (cpu_has_feature(CPU_FTR_ARCH_206) && cpu_has_feature(CPU_FTR_HVMODE)) tlbiel_all(); } #ifdef CONFIG_SMP void hash__early_init_mmu_secondary(void) { /* Initialize hash table for that CPU */ if (!firmware_has_feature(FW_FEATURE_LPAR)) { if (!cpu_has_feature(CPU_FTR_ARCH_300)) mtspr(SPRN_SDR1, _SDR1); else set_ptcr_when_no_uv(__pa(partition_tb) | (PATB_SIZE_SHIFT - 12)); } /* Initialize SLB */ slb_initialize(); if (cpu_has_feature(CPU_FTR_ARCH_206) && cpu_has_feature(CPU_FTR_HVMODE)) tlbiel_all(); #ifdef CONFIG_PPC_MEM_KEYS if (mmu_has_feature(MMU_FTR_PKEY)) mtspr(SPRN_UAMOR, default_uamor); #endif } #endif /* CONFIG_SMP */ /* * Called by asm hashtable.S for doing lazy icache flush */ unsigned int hash_page_do_lazy_icache(unsigned int pp, pte_t pte, int trap) { struct folio *folio; if (!pfn_valid(pte_pfn(pte))) return pp; folio = page_folio(pte_page(pte)); /* page is dirty */ if (!test_bit(PG_dcache_clean, &folio->flags) && !folio_test_reserved(folio)) { if (trap == INTERRUPT_INST_STORAGE) { flush_dcache_icache_folio(folio); set_bit(PG_dcache_clean, &folio->flags); } else pp |= HPTE_R_N; } return pp; } static unsigned int get_paca_psize(unsigned long addr) { unsigned char *psizes; unsigned long index, mask_index; if (addr < SLICE_LOW_TOP) { psizes = get_paca()->mm_ctx_low_slices_psize; index = GET_LOW_SLICE_INDEX(addr); } else { psizes = get_paca()->mm_ctx_high_slices_psize; index = GET_HIGH_SLICE_INDEX(addr); } mask_index = index & 0x1; return (psizes[index >> 1] >> (mask_index * 4)) & 0xF; } /* * Demote a segment to using 4k pages. * For now this makes the whole process use 4k pages. */ #ifdef CONFIG_PPC_64K_PAGES void demote_segment_4k(struct mm_struct *mm, unsigned long addr) { if (get_slice_psize(mm, addr) == MMU_PAGE_4K) return; slice_set_range_psize(mm, addr, 1, MMU_PAGE_4K); copro_flush_all_slbs(mm); if ((get_paca_psize(addr) != MMU_PAGE_4K) && (current->mm == mm)) { copy_mm_to_paca(mm); slb_flush_and_restore_bolted(); } } #endif /* CONFIG_PPC_64K_PAGES */ #ifdef CONFIG_PPC_SUBPAGE_PROT /* * This looks up a 2-bit protection code for a 4k subpage of a 64k page. * Userspace sets the subpage permissions using the subpage_prot system call. * * Result is 0: full permissions, _PAGE_RW: read-only, * _PAGE_RWX: no access. */ static int subpage_protection(struct mm_struct *mm, unsigned long ea) { struct subpage_prot_table *spt = mm_ctx_subpage_prot(&mm->context); u32 spp = 0; u32 **sbpm, *sbpp; if (!spt) return 0; if (ea >= spt->maxaddr) return 0; if (ea < 0x100000000UL) { /* addresses below 4GB use spt->low_prot */ sbpm = spt->low_prot; } else { sbpm = spt->protptrs[ea >> SBP_L3_SHIFT]; if (!sbpm) return 0; } sbpp = sbpm[(ea >> SBP_L2_SHIFT) & (SBP_L2_COUNT - 1)]; if (!sbpp) return 0; spp = sbpp[(ea >> PAGE_SHIFT) & (SBP_L1_COUNT - 1)]; /* extract 2-bit bitfield for this 4k subpage */ spp >>= 30 - 2 * ((ea >> 12) & 0xf); /* * 0 -> full permission * 1 -> Read only * 2 -> no access. * We return the flag that need to be cleared. */ spp = ((spp & 2) ? _PAGE_RWX : 0) | ((spp & 1) ? _PAGE_WRITE : 0); return spp; } #else /* CONFIG_PPC_SUBPAGE_PROT */ static inline int subpage_protection(struct mm_struct *mm, unsigned long ea) { return 0; } #endif void hash_failure_debug(unsigned long ea, unsigned long access, unsigned long vsid, unsigned long trap, int ssize, int psize, int lpsize, unsigned long pte) { if (!printk_ratelimit()) return; pr_info("mm: Hashing failure ! EA=0x%lx access=0x%lx current=%s\n", ea, access, current->comm); pr_info(" trap=0x%lx vsid=0x%lx ssize=%d base psize=%d psize %d pte=0x%lx\n", trap, vsid, ssize, psize, lpsize, pte); } static void check_paca_psize(unsigned long ea, struct mm_struct *mm, int psize, bool user_region) { if (user_region) { if (psize != get_paca_psize(ea)) { copy_mm_to_paca(mm); slb_flush_and_restore_bolted(); } } else if (get_paca()->vmalloc_sllp != mmu_psize_defs[mmu_vmalloc_psize].sllp) { get_paca()->vmalloc_sllp = mmu_psize_defs[mmu_vmalloc_psize].sllp; slb_vmalloc_update(); } } /* * Result code is: * 0 - handled * 1 - normal page fault * -1 - critical hash insertion error * -2 - access not permitted by subpage protection mechanism */ int hash_page_mm(struct mm_struct *mm, unsigned long ea, unsigned long access, unsigned long trap, unsigned long flags) { bool is_thp; pgd_t *pgdir; unsigned long vsid; pte_t *ptep; unsigned hugeshift; int rc, user_region = 0; int psize, ssize; DBG_LOW("hash_page(ea=%016lx, access=%lx, trap=%lx\n", ea, access, trap); trace_hash_fault(ea, access, trap); /* Get region & vsid */ switch (get_region_id(ea)) { case USER_REGION_ID: user_region = 1; if (! mm) { DBG_LOW(" user region with no mm !\n"); rc = 1; goto bail; } psize = get_slice_psize(mm, ea); ssize = user_segment_size(ea); vsid = get_user_vsid(&mm->context, ea, ssize); break; case VMALLOC_REGION_ID: vsid = get_kernel_vsid(ea, mmu_kernel_ssize); psize = mmu_vmalloc_psize; ssize = mmu_kernel_ssize; flags |= HPTE_USE_KERNEL_KEY; break; case IO_REGION_ID: vsid = get_kernel_vsid(ea, mmu_kernel_ssize); psize = mmu_io_psize; ssize = mmu_kernel_ssize; flags |= HPTE_USE_KERNEL_KEY; break; default: /* * Not a valid range * Send the problem up to do_page_fault() */ rc = 1; goto bail; } DBG_LOW(" mm=%p, mm->pgdir=%p, vsid=%016lx\n", mm, mm->pgd, vsid); /* Bad address. */ if (!vsid) { DBG_LOW("Bad address!\n"); rc = 1; goto bail; } /* Get pgdir */ pgdir = mm->pgd; if (pgdir == NULL) { rc = 1; goto bail; } /* Check CPU locality */ if (user_region && mm_is_thread_local(mm)) flags |= HPTE_LOCAL_UPDATE; #ifndef CONFIG_PPC_64K_PAGES /* * If we use 4K pages and our psize is not 4K, then we might * be hitting a special driver mapping, and need to align the * address before we fetch the PTE. * * It could also be a hugepage mapping, in which case this is * not necessary, but it's not harmful, either. */ if (psize != MMU_PAGE_4K) ea &= ~((1ul << mmu_psize_defs[psize].shift) - 1); #endif /* CONFIG_PPC_64K_PAGES */ /* Get PTE and page size from page tables */ ptep = find_linux_pte(pgdir, ea, &is_thp, &hugeshift); if (ptep == NULL || !pte_present(*ptep)) { DBG_LOW(" no PTE !\n"); rc = 1; goto bail; } /* * Add _PAGE_PRESENT to the required access perm. If there are parallel * updates to the pte that can possibly clear _PAGE_PTE, catch that too. * * We can safely use the return pte address in rest of the function * because we do set H_PAGE_BUSY which prevents further updates to pte * from generic code. */ access |= _PAGE_PRESENT | _PAGE_PTE; /* * Pre-check access permissions (will be re-checked atomically * in __hash_page_XX but this pre-check is a fast path */ if (!check_pte_access(access, pte_val(*ptep))) { DBG_LOW(" no access !\n"); rc = 1; goto bail; } if (hugeshift) { if (is_thp) rc = __hash_page_thp(ea, access, vsid, (pmd_t *)ptep, trap, flags, ssize, psize); #ifdef CONFIG_HUGETLB_PAGE else rc = __hash_page_huge(ea, access, vsid, ptep, trap, flags, ssize, hugeshift, psize); #else else { /* * if we have hugeshift, and is not transhuge with * hugetlb disabled, something is really wrong. */ rc = 1; WARN_ON(1); } #endif if (current->mm == mm) check_paca_psize(ea, mm, psize, user_region); goto bail; } #ifndef CONFIG_PPC_64K_PAGES DBG_LOW(" i-pte: %016lx\n", pte_val(*ptep)); #else DBG_LOW(" i-pte: %016lx %016lx\n", pte_val(*ptep), pte_val(*(ptep + PTRS_PER_PTE))); #endif /* Do actual hashing */ #ifdef CONFIG_PPC_64K_PAGES /* If H_PAGE_4K_PFN is set, make sure this is a 4k segment */ if ((pte_val(*ptep) & H_PAGE_4K_PFN) && psize == MMU_PAGE_64K) { demote_segment_4k(mm, ea); psize = MMU_PAGE_4K; } /* * If this PTE is non-cacheable and we have restrictions on * using non cacheable large pages, then we switch to 4k */ if (mmu_ci_restrictions && psize == MMU_PAGE_64K && pte_ci(*ptep)) { if (user_region) { demote_segment_4k(mm, ea); psize = MMU_PAGE_4K; } else if (ea < VMALLOC_END) { /* * some driver did a non-cacheable mapping * in vmalloc space, so switch vmalloc * to 4k pages */ printk(KERN_ALERT "Reducing vmalloc segment " "to 4kB pages because of " "non-cacheable mapping\n"); psize = mmu_vmalloc_psize = MMU_PAGE_4K; copro_flush_all_slbs(mm); } } #endif /* CONFIG_PPC_64K_PAGES */ if (current->mm == mm) check_paca_psize(ea, mm, psize, user_region); #ifdef CONFIG_PPC_64K_PAGES if (psize == MMU_PAGE_64K) rc = __hash_page_64K(ea, access, vsid, ptep, trap, flags, ssize); else #endif /* CONFIG_PPC_64K_PAGES */ { int spp = subpage_protection(mm, ea); if (access & spp) rc = -2; else rc = __hash_page_4K(ea, access, vsid, ptep, trap, flags, ssize, spp); } /* * Dump some info in case of hash insertion failure, they should * never happen so it is really useful to know if/when they do */ if (rc == -1) hash_failure_debug(ea, access, vsid, trap, ssize, psize, psize, pte_val(*ptep)); #ifndef CONFIG_PPC_64K_PAGES DBG_LOW(" o-pte: %016lx\n", pte_val(*ptep)); #else DBG_LOW(" o-pte: %016lx %016lx\n", pte_val(*ptep), pte_val(*(ptep + PTRS_PER_PTE))); #endif DBG_LOW(" -> rc=%d\n", rc); bail: return rc; } EXPORT_SYMBOL_GPL(hash_page_mm); int hash_page(unsigned long ea, unsigned long access, unsigned long trap, unsigned long dsisr) { unsigned long flags = 0; struct mm_struct *mm = current->mm; if ((get_region_id(ea) == VMALLOC_REGION_ID) || (get_region_id(ea) == IO_REGION_ID)) mm = &init_mm; if (dsisr & DSISR_NOHPTE) flags |= HPTE_NOHPTE_UPDATE; return hash_page_mm(mm, ea, access, trap, flags); } EXPORT_SYMBOL_GPL(hash_page); DEFINE_INTERRUPT_HANDLER(do_hash_fault) { unsigned long ea = regs->dar; unsigned long dsisr = regs->dsisr; unsigned long access = _PAGE_PRESENT | _PAGE_READ; unsigned long flags = 0; struct mm_struct *mm; unsigned int region_id; long err; if (unlikely(dsisr & (DSISR_BAD_FAULT_64S | DSISR_KEYFAULT))) { hash__do_page_fault(regs); return; } region_id = get_region_id(ea); if ((region_id == VMALLOC_REGION_ID) || (region_id == IO_REGION_ID)) mm = &init_mm; else mm = current->mm; if (dsisr & DSISR_NOHPTE) flags |= HPTE_NOHPTE_UPDATE; if (dsisr & DSISR_ISSTORE) access |= _PAGE_WRITE; /* * We set _PAGE_PRIVILEGED only when * kernel mode access kernel space. * * _PAGE_PRIVILEGED is NOT set * 1) when kernel mode access user space * 2) user space access kernel space. */ access |= _PAGE_PRIVILEGED; if (user_mode(regs) || (region_id == USER_REGION_ID)) access &= ~_PAGE_PRIVILEGED; if (TRAP(regs) == INTERRUPT_INST_STORAGE) access |= _PAGE_EXEC; err = hash_page_mm(mm, ea, access, TRAP(regs), flags); if (unlikely(err < 0)) { // failed to insert a hash PTE due to an hypervisor error if (user_mode(regs)) { if (IS_ENABLED(CONFIG_PPC_SUBPAGE_PROT) && err == -2) _exception(SIGSEGV, regs, SEGV_ACCERR, ea); else _exception(SIGBUS, regs, BUS_ADRERR, ea); } else { bad_page_fault(regs, SIGBUS); } err = 0; } else if (err) { hash__do_page_fault(regs); } } static bool should_hash_preload(struct mm_struct *mm, unsigned long ea) { int psize = get_slice_psize(mm, ea); /* We only prefault standard pages for now */ if (unlikely(psize != mm_ctx_user_psize(&mm->context))) return false; /* * Don't prefault if subpage protection is enabled for the EA. */ if (unlikely((psize == MMU_PAGE_4K) && subpage_protection(mm, ea))) return false; return true; } static void hash_preload(struct mm_struct *mm, pte_t *ptep, unsigned long ea, bool is_exec, unsigned long trap) { unsigned long vsid; pgd_t *pgdir; int rc, ssize, update_flags = 0; unsigned long access = _PAGE_PRESENT | _PAGE_READ | (is_exec ? _PAGE_EXEC : 0); unsigned long flags; BUG_ON(get_region_id(ea) != USER_REGION_ID); if (!should_hash_preload(mm, ea)) return; DBG_LOW("hash_preload(mm=%p, mm->pgdir=%p, ea=%016lx, access=%lx," " trap=%lx\n", mm, mm->pgd, ea, access, trap); /* Get Linux PTE if available */ pgdir = mm->pgd; if (pgdir == NULL) return; /* Get VSID */ ssize = user_segment_size(ea); vsid = get_user_vsid(&mm->context, ea, ssize); if (!vsid) return; #ifdef CONFIG_PPC_64K_PAGES /* If either H_PAGE_4K_PFN or cache inhibited is set (and we are on * a 64K kernel), then we don't preload, hash_page() will take * care of it once we actually try to access the page. * That way we don't have to duplicate all of the logic for segment * page size demotion here * Called with PTL held, hence can be sure the value won't change in * between. */ if ((pte_val(*ptep) & H_PAGE_4K_PFN) || pte_ci(*ptep)) return; #endif /* CONFIG_PPC_64K_PAGES */ /* * __hash_page_* must run with interrupts off, including PMI interrupts * off, as it sets the H_PAGE_BUSY bit. * * It's otherwise possible for perf interrupts to hit at any time and * may take a hash fault reading the user stack, which could take a * hash miss and deadlock on the same H_PAGE_BUSY bit. * * Interrupts must also be off for the duration of the * mm_is_thread_local test and update, to prevent preempt running the * mm on another CPU (XXX: this may be racy vs kthread_use_mm). */ powerpc_local_irq_pmu_save(flags); /* Is that local to this CPU ? */ if (mm_is_thread_local(mm)) update_flags |= HPTE_LOCAL_UPDATE; /* Hash it in */ #ifdef CONFIG_PPC_64K_PAGES if (mm_ctx_user_psize(&mm->context) == MMU_PAGE_64K) rc = __hash_page_64K(ea, access, vsid, ptep, trap, update_flags, ssize); else #endif /* CONFIG_PPC_64K_PAGES */ rc = __hash_page_4K(ea, access, vsid, ptep, trap, update_flags, ssize, subpage_protection(mm, ea)); /* Dump some info in case of hash insertion failure, they should * never happen so it is really useful to know if/when they do */ if (rc == -1) hash_failure_debug(ea, access, vsid, trap, ssize, mm_ctx_user_psize(&mm->context), mm_ctx_user_psize(&mm->context), pte_val(*ptep)); powerpc_local_irq_pmu_restore(flags); } /* * This is called at the end of handling a user page fault, when the * fault has been handled by updating a PTE in the linux page tables. * We use it to preload an HPTE into the hash table corresponding to * the updated linux PTE. * * This must always be called with the pte lock held. */ void __update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) { /* * We don't need to worry about _PAGE_PRESENT here because we are * called with either mm->page_table_lock held or ptl lock held */ unsigned long trap; bool is_exec; /* We only want HPTEs for linux PTEs that have _PAGE_ACCESSED set */ if (!pte_young(*ptep) || address >= TASK_SIZE) return; /* * We try to figure out if we are coming from an instruction * access fault and pass that down to __hash_page so we avoid * double-faulting on execution of fresh text. We have to test * for regs NULL since init will get here first thing at boot. * * We also avoid filling the hash if not coming from a fault. */ trap = current->thread.regs ? TRAP(current->thread.regs) : 0UL; switch (trap) { case 0x300: is_exec = false; break; case 0x400: is_exec = true; break; default: return; } hash_preload(vma->vm_mm, ptep, address, is_exec, trap); } #ifdef CONFIG_PPC_TRANSACTIONAL_MEM static inline void tm_flush_hash_page(int local) { /* * Transactions are not aborted by tlbiel, only tlbie. Without, syncing a * page back to a block device w/PIO could pick up transactional data * (bad!) so we force an abort here. Before the sync the page will be * made read-only, which will flush_hash_page. BIG ISSUE here: if the * kernel uses a page from userspace without unmapping it first, it may * see the speculated version. */ if (local && cpu_has_feature(CPU_FTR_TM) && current->thread.regs && MSR_TM_ACTIVE(current->thread.regs->msr)) { tm_enable(); tm_abort(TM_CAUSE_TLBI); } } #else static inline void tm_flush_hash_page(int local) { } #endif /* * Return the global hash slot, corresponding to the given PTE, which contains * the HPTE. */ unsigned long pte_get_hash_gslot(unsigned long vpn, unsigned long shift, int ssize, real_pte_t rpte, unsigned int subpg_index) { unsigned long hash, gslot, hidx; hash = hpt_hash(vpn, shift, ssize); hidx = __rpte_to_hidx(rpte, subpg_index); if (hidx & _PTEIDX_SECONDARY) hash = ~hash; gslot = (hash & htab_hash_mask) * HPTES_PER_GROUP; gslot += hidx & _PTEIDX_GROUP_IX; return gslot; } void flush_hash_page(unsigned long vpn, real_pte_t pte, int psize, int ssize, unsigned long flags) { unsigned long index, shift, gslot; int local = flags & HPTE_LOCAL_UPDATE; DBG_LOW("flush_hash_page(vpn=%016lx)\n", vpn); pte_iterate_hashed_subpages(pte, psize, vpn, index, shift) { gslot = pte_get_hash_gslot(vpn, shift, ssize, pte, index); DBG_LOW(" sub %ld: gslot=%lx\n", index, gslot); /* * We use same base page size and actual psize, because we don't * use these functions for hugepage */ mmu_hash_ops.hpte_invalidate(gslot, vpn, psize, psize, ssize, local); } pte_iterate_hashed_end(); tm_flush_hash_page(local); } #ifdef CONFIG_TRANSPARENT_HUGEPAGE void flush_hash_hugepage(unsigned long vsid, unsigned long addr, pmd_t *pmdp, unsigned int psize, int ssize, unsigned long flags) { int i, max_hpte_count, valid; unsigned long s_addr; unsigned char *hpte_slot_array; unsigned long hidx, shift, vpn, hash, slot; int local = flags & HPTE_LOCAL_UPDATE; s_addr = addr & HPAGE_PMD_MASK; hpte_slot_array = get_hpte_slot_array(pmdp); /* * IF we try to do a HUGE PTE update after a withdraw is done. * we will find the below NULL. This happens when we do * split_huge_pmd */ if (!hpte_slot_array) return; if (mmu_hash_ops.hugepage_invalidate) { mmu_hash_ops.hugepage_invalidate(vsid, s_addr, hpte_slot_array, psize, ssize, local); goto tm_abort; } /* * No bluk hpte removal support, invalidate each entry */ shift = mmu_psize_defs[psize].shift; max_hpte_count = HPAGE_PMD_SIZE >> shift; for (i = 0; i < max_hpte_count; i++) { /* * 8 bits per each hpte entries * 000| [ secondary group (one bit) | hidx (3 bits) | valid bit] */ valid = hpte_valid(hpte_slot_array, i); if (!valid) continue; hidx = hpte_hash_index(hpte_slot_array, i); /* get the vpn */ addr = s_addr + (i * (1ul << shift)); vpn = hpt_vpn(addr, vsid, ssize); hash = hpt_hash(vpn, shift, ssize); if (hidx & _PTEIDX_SECONDARY) hash = ~hash; slot = (hash & htab_hash_mask) * HPTES_PER_GROUP; slot += hidx & _PTEIDX_GROUP_IX; mmu_hash_ops.hpte_invalidate(slot, vpn, psize, MMU_PAGE_16M, ssize, local); } tm_abort: tm_flush_hash_page(local); } #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ void flush_hash_range(unsigned long number, int local) { if (mmu_hash_ops.flush_hash_range) mmu_hash_ops.flush_hash_range(number, local); else { int i; struct ppc64_tlb_batch *batch = this_cpu_ptr(&ppc64_tlb_batch); for (i = 0; i < number; i++) flush_hash_page(batch->vpn[i], batch->pte[i], batch->psize, batch->ssize, local); } } long hpte_insert_repeating(unsigned long hash, unsigned long vpn, unsigned long pa, unsigned long rflags, unsigned long vflags, int psize, int ssize) { unsigned long hpte_group; long slot; repeat: hpte_group = (hash & htab_hash_mask) * HPTES_PER_GROUP; /* Insert into the hash table, primary slot */ slot = mmu_hash_ops.hpte_insert(hpte_group, vpn, pa, rflags, vflags, psize, psize, ssize); /* Primary is full, try the secondary */ if (unlikely(slot == -1)) { hpte_group = (~hash & htab_hash_mask) * HPTES_PER_GROUP; slot = mmu_hash_ops.hpte_insert(hpte_group, vpn, pa, rflags, vflags | HPTE_V_SECONDARY, psize, psize, ssize); if (slot == -1) { if (mftb() & 0x1) hpte_group = (hash & htab_hash_mask) * HPTES_PER_GROUP; mmu_hash_ops.hpte_remove(hpte_group); goto repeat; } } return slot; } void hpt_clear_stress(void) { int cpu = raw_smp_processor_id(); int g; for (g = 0; g < stress_nr_groups(); g++) { unsigned long last_group; last_group = stress_hpt_struct[cpu].last_group[g]; if (last_group != -1UL) { int i; for (i = 0; i < HPTES_PER_GROUP; i++) { if (mmu_hash_ops.hpte_remove(last_group) == -1) break; } stress_hpt_struct[cpu].last_group[g] = -1; } } } void hpt_do_stress(unsigned long ea, unsigned long hpte_group) { unsigned long last_group; int cpu = raw_smp_processor_id(); last_group = stress_hpt_struct[cpu].last_group[stress_nr_groups() - 1]; if (hpte_group == last_group) return; if (last_group != -1UL) { int i; /* * Concurrent CPUs might be inserting into this group, so * give up after a number of iterations, to prevent a live * lock. */ for (i = 0; i < HPTES_PER_GROUP; i++) { if (mmu_hash_ops.hpte_remove(last_group) == -1) break; } stress_hpt_struct[cpu].last_group[stress_nr_groups() - 1] = -1; } if (ea >= PAGE_OFFSET) { /* * We would really like to prefetch to get the TLB loaded, then * remove the PTE before returning from fault interrupt, to * increase the hash fault rate. * * Unfortunately QEMU TCG does not model the TLB in a way that * makes this possible, and systemsim (mambo) emulator does not * bring in TLBs with prefetches (although loads/stores do * work for non-CI PTEs). * * So remember this PTE and clear it on the next hash fault. */ memmove(&stress_hpt_struct[cpu].last_group[1], &stress_hpt_struct[cpu].last_group[0], (stress_nr_groups() - 1) * sizeof(unsigned long)); stress_hpt_struct[cpu].last_group[0] = hpte_group; } } #if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_KFENCE) static DEFINE_RAW_SPINLOCK(linear_map_hash_lock); static void kernel_map_linear_page(unsigned long vaddr, unsigned long lmi) { unsigned long hash; unsigned long vsid = get_kernel_vsid(vaddr, mmu_kernel_ssize); unsigned long vpn = hpt_vpn(vaddr, vsid, mmu_kernel_ssize); unsigned long mode = htab_convert_pte_flags(pgprot_val(PAGE_KERNEL), HPTE_USE_KERNEL_KEY); long ret; hash = hpt_hash(vpn, PAGE_SHIFT, mmu_kernel_ssize); /* Don't create HPTE entries for bad address */ if (!vsid) return; if (linear_map_hash_slots[lmi] & 0x80) return; ret = hpte_insert_repeating(hash, vpn, __pa(vaddr), mode, HPTE_V_BOLTED, mmu_linear_psize, mmu_kernel_ssize); BUG_ON (ret < 0); raw_spin_lock(&linear_map_hash_lock); BUG_ON(linear_map_hash_slots[lmi] & 0x80); linear_map_hash_slots[lmi] = ret | 0x80; raw_spin_unlock(&linear_map_hash_lock); } static void kernel_unmap_linear_page(unsigned long vaddr, unsigned long lmi) { unsigned long hash, hidx, slot; unsigned long vsid = get_kernel_vsid(vaddr, mmu_kernel_ssize); unsigned long vpn = hpt_vpn(vaddr, vsid, mmu_kernel_ssize); hash = hpt_hash(vpn, PAGE_SHIFT, mmu_kernel_ssize); raw_spin_lock(&linear_map_hash_lock); if (!(linear_map_hash_slots[lmi] & 0x80)) { raw_spin_unlock(&linear_map_hash_lock); return; } hidx = linear_map_hash_slots[lmi] & 0x7f; linear_map_hash_slots[lmi] = 0; raw_spin_unlock(&linear_map_hash_lock); if (hidx & _PTEIDX_SECONDARY) hash = ~hash; slot = (hash & htab_hash_mask) * HPTES_PER_GROUP; slot += hidx & _PTEIDX_GROUP_IX; mmu_hash_ops.hpte_invalidate(slot, vpn, mmu_linear_psize, mmu_linear_psize, mmu_kernel_ssize, 0); } void hash__kernel_map_pages(struct page *page, int numpages, int enable) { unsigned long flags, vaddr, lmi; int i; local_irq_save(flags); for (i = 0; i < numpages; i++, page++) { vaddr = (unsigned long)page_address(page); lmi = __pa(vaddr) >> PAGE_SHIFT; if (lmi >= linear_map_hash_count) continue; if (enable) kernel_map_linear_page(vaddr, lmi); else kernel_unmap_linear_page(vaddr, lmi); } local_irq_restore(flags); } #endif /* CONFIG_DEBUG_PAGEALLOC || CONFIG_KFENCE */ void hash__setup_initial_memory_limit(phys_addr_t first_memblock_base, phys_addr_t first_memblock_size) { /* * We don't currently support the first MEMBLOCK not mapping 0 * physical on those processors */ BUG_ON(first_memblock_base != 0); /* * On virtualized systems the first entry is our RMA region aka VRMA, * non-virtualized 64-bit hash MMU systems don't have a limitation * on real mode access. * * For guests on platforms before POWER9, we clamp the it limit to 1G * to avoid some funky things such as RTAS bugs etc... * * On POWER9 we limit to 1TB in case the host erroneously told us that * the RMA was >1TB. Effective address bits 0:23 are treated as zero * (meaning the access is aliased to zero i.e. addr = addr % 1TB) * for virtual real mode addressing and so it doesn't make sense to * have an area larger than 1TB as it can't be addressed. */ if (!early_cpu_has_feature(CPU_FTR_HVMODE)) { ppc64_rma_size = first_memblock_size; if (!early_cpu_has_feature(CPU_FTR_ARCH_300)) ppc64_rma_size = min_t(u64, ppc64_rma_size, 0x40000000); else ppc64_rma_size = min_t(u64, ppc64_rma_size, 1UL << SID_SHIFT_1T); /* Finally limit subsequent allocations */ memblock_set_current_limit(ppc64_rma_size); } else { ppc64_rma_size = ULONG_MAX; } } #ifdef CONFIG_DEBUG_FS static int hpt_order_get(void *data, u64 *val) { *val = ppc64_pft_size; return 0; } static int hpt_order_set(void *data, u64 val) { int ret; if (!mmu_hash_ops.resize_hpt) return -ENODEV; cpus_read_lock(); ret = mmu_hash_ops.resize_hpt(val); cpus_read_unlock(); return ret; } DEFINE_DEBUGFS_ATTRIBUTE(fops_hpt_order, hpt_order_get, hpt_order_set, "%llu\n"); static int __init hash64_debugfs(void) { debugfs_create_file("hpt_order", 0600, arch_debugfs_dir, NULL, &fops_hpt_order); return 0; } machine_device_initcall(pseries, hash64_debugfs); #endif /* CONFIG_DEBUG_FS */ void __init print_system_hash_info(void) { pr_info("ppc64_pft_size = 0x%llx\n", ppc64_pft_size); if (htab_hash_mask) pr_info("htab_hash_mask = 0x%lx\n", htab_hash_mask); } unsigned long arch_randomize_brk(struct mm_struct *mm) { /* * If we are using 1TB segments and we are allowed to randomise * the heap, we can put it above 1TB so it is backed by a 1TB * segment. Otherwise the heap will be in the bottom 1TB * which always uses 256MB segments and this may result in a * performance penalty. */ if (is_32bit_task()) return randomize_page(mm->brk, SZ_32M); else if (!radix_enabled() && mmu_highuser_ssize == MMU_SEGSIZE_1T) return randomize_page(max_t(unsigned long, mm->brk, SZ_1T), SZ_1G); else return randomize_page(mm->brk, SZ_1G); }
linux-master
arch/powerpc/mm/book3s64/hash_utils.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright 2015-2016, Aneesh Kumar K.V, IBM Corporation. */ #include <linux/sched.h> #include <linux/mm_types.h> #include <linux/memblock.h> #include <linux/memremap.h> #include <linux/pkeys.h> #include <linux/debugfs.h> #include <linux/proc_fs.h> #include <misc/cxl-base.h> #include <asm/pgalloc.h> #include <asm/tlb.h> #include <asm/trace.h> #include <asm/powernv.h> #include <asm/firmware.h> #include <asm/ultravisor.h> #include <asm/kexec.h> #include <mm/mmu_decl.h> #include <trace/events/thp.h> #include "internal.h" struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT]; EXPORT_SYMBOL_GPL(mmu_psize_defs); #ifdef CONFIG_SPARSEMEM_VMEMMAP int mmu_vmemmap_psize = MMU_PAGE_4K; #endif unsigned long __pmd_frag_nr; EXPORT_SYMBOL(__pmd_frag_nr); unsigned long __pmd_frag_size_shift; EXPORT_SYMBOL(__pmd_frag_size_shift); #ifdef CONFIG_TRANSPARENT_HUGEPAGE /* * This is called when relaxing access to a hugepage. It's also called in the page * fault path when we don't hit any of the major fault cases, ie, a minor * update of _PAGE_ACCESSED, _PAGE_DIRTY, etc... The generic code will have * handled those two for us, we additionally deal with missing execute * permission here on some processors */ int pmdp_set_access_flags(struct vm_area_struct *vma, unsigned long address, pmd_t *pmdp, pmd_t entry, int dirty) { int changed; #ifdef CONFIG_DEBUG_VM WARN_ON(!pmd_trans_huge(*pmdp) && !pmd_devmap(*pmdp)); assert_spin_locked(pmd_lockptr(vma->vm_mm, pmdp)); #endif changed = !pmd_same(*(pmdp), entry); if (changed) { /* * We can use MMU_PAGE_2M here, because only radix * path look at the psize. */ __ptep_set_access_flags(vma, pmdp_ptep(pmdp), pmd_pte(entry), address, MMU_PAGE_2M); } return changed; } int pudp_set_access_flags(struct vm_area_struct *vma, unsigned long address, pud_t *pudp, pud_t entry, int dirty) { int changed; #ifdef CONFIG_DEBUG_VM WARN_ON(!pud_devmap(*pudp)); assert_spin_locked(pud_lockptr(vma->vm_mm, pudp)); #endif changed = !pud_same(*(pudp), entry); if (changed) { /* * We can use MMU_PAGE_1G here, because only radix * path look at the psize. */ __ptep_set_access_flags(vma, pudp_ptep(pudp), pud_pte(entry), address, MMU_PAGE_1G); } return changed; } int pmdp_test_and_clear_young(struct vm_area_struct *vma, unsigned long address, pmd_t *pmdp) { return __pmdp_test_and_clear_young(vma->vm_mm, address, pmdp); } int pudp_test_and_clear_young(struct vm_area_struct *vma, unsigned long address, pud_t *pudp) { return __pudp_test_and_clear_young(vma->vm_mm, address, pudp); } /* * set a new huge pmd. We should not be called for updating * an existing pmd entry. That should go via pmd_hugepage_update. */ void set_pmd_at(struct mm_struct *mm, unsigned long addr, pmd_t *pmdp, pmd_t pmd) { #ifdef CONFIG_DEBUG_VM /* * Make sure hardware valid bit is not set. We don't do * tlb flush for this update. */ WARN_ON(pte_hw_valid(pmd_pte(*pmdp)) && !pte_protnone(pmd_pte(*pmdp))); assert_spin_locked(pmd_lockptr(mm, pmdp)); WARN_ON(!(pmd_large(pmd))); #endif trace_hugepage_set_pmd(addr, pmd_val(pmd)); return set_pte_at(mm, addr, pmdp_ptep(pmdp), pmd_pte(pmd)); } void set_pud_at(struct mm_struct *mm, unsigned long addr, pud_t *pudp, pud_t pud) { #ifdef CONFIG_DEBUG_VM /* * Make sure hardware valid bit is not set. We don't do * tlb flush for this update. */ WARN_ON(pte_hw_valid(pud_pte(*pudp))); assert_spin_locked(pud_lockptr(mm, pudp)); WARN_ON(!(pud_large(pud))); #endif trace_hugepage_set_pud(addr, pud_val(pud)); return set_pte_at(mm, addr, pudp_ptep(pudp), pud_pte(pud)); } static void do_serialize(void *arg) { /* We've taken the IPI, so try to trim the mask while here */ if (radix_enabled()) { struct mm_struct *mm = arg; exit_lazy_flush_tlb(mm, false); } } /* * Serialize against __find_linux_pte() which does lock-less * lookup in page tables with local interrupts disabled. For huge pages * it casts pmd_t to pte_t. Since format of pte_t is different from * pmd_t we want to prevent transit from pmd pointing to page table * to pmd pointing to huge page (and back) while interrupts are disabled. * We clear pmd to possibly replace it with page table pointer in * different code paths. So make sure we wait for the parallel * __find_linux_pte() to finish. */ void serialize_against_pte_lookup(struct mm_struct *mm) { smp_mb(); smp_call_function_many(mm_cpumask(mm), do_serialize, mm, 1); } /* * We use this to invalidate a pmdp entry before switching from a * hugepte to regular pmd entry. */ pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address, pmd_t *pmdp) { unsigned long old_pmd; old_pmd = pmd_hugepage_update(vma->vm_mm, address, pmdp, _PAGE_PRESENT, _PAGE_INVALID); flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE); return __pmd(old_pmd); } pmd_t pmdp_huge_get_and_clear_full(struct vm_area_struct *vma, unsigned long addr, pmd_t *pmdp, int full) { pmd_t pmd; VM_BUG_ON(addr & ~HPAGE_PMD_MASK); VM_BUG_ON((pmd_present(*pmdp) && !pmd_trans_huge(*pmdp) && !pmd_devmap(*pmdp)) || !pmd_present(*pmdp)); pmd = pmdp_huge_get_and_clear(vma->vm_mm, addr, pmdp); /* * if it not a fullmm flush, then we can possibly end up converting * this PMD pte entry to a regular level 0 PTE by a parallel page fault. * Make sure we flush the tlb in this case. */ if (!full) flush_pmd_tlb_range(vma, addr, addr + HPAGE_PMD_SIZE); return pmd; } pud_t pudp_huge_get_and_clear_full(struct vm_area_struct *vma, unsigned long addr, pud_t *pudp, int full) { pud_t pud; VM_BUG_ON(addr & ~HPAGE_PMD_MASK); VM_BUG_ON((pud_present(*pudp) && !pud_devmap(*pudp)) || !pud_present(*pudp)); pud = pudp_huge_get_and_clear(vma->vm_mm, addr, pudp); /* * if it not a fullmm flush, then we can possibly end up converting * this PMD pte entry to a regular level 0 PTE by a parallel page fault. * Make sure we flush the tlb in this case. */ if (!full) flush_pud_tlb_range(vma, addr, addr + HPAGE_PUD_SIZE); return pud; } static pmd_t pmd_set_protbits(pmd_t pmd, pgprot_t pgprot) { return __pmd(pmd_val(pmd) | pgprot_val(pgprot)); } static pud_t pud_set_protbits(pud_t pud, pgprot_t pgprot) { return __pud(pud_val(pud) | pgprot_val(pgprot)); } /* * At some point we should be able to get rid of * pmd_mkhuge() and mk_huge_pmd() when we update all the * other archs to mark the pmd huge in pfn_pmd() */ pmd_t pfn_pmd(unsigned long pfn, pgprot_t pgprot) { unsigned long pmdv; pmdv = (pfn << PAGE_SHIFT) & PTE_RPN_MASK; return __pmd_mkhuge(pmd_set_protbits(__pmd(pmdv), pgprot)); } pud_t pfn_pud(unsigned long pfn, pgprot_t pgprot) { unsigned long pudv; pudv = (pfn << PAGE_SHIFT) & PTE_RPN_MASK; return __pud_mkhuge(pud_set_protbits(__pud(pudv), pgprot)); } pmd_t mk_pmd(struct page *page, pgprot_t pgprot) { return pfn_pmd(page_to_pfn(page), pgprot); } pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot) { unsigned long pmdv; pmdv = pmd_val(pmd); pmdv &= _HPAGE_CHG_MASK; return pmd_set_protbits(__pmd(pmdv), newprot); } #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ /* For use by kexec, called with MMU off */ notrace void mmu_cleanup_all(void) { if (radix_enabled()) radix__mmu_cleanup_all(); else if (mmu_hash_ops.hpte_clear_all) mmu_hash_ops.hpte_clear_all(); reset_sprs(); } #ifdef CONFIG_MEMORY_HOTPLUG int __meminit create_section_mapping(unsigned long start, unsigned long end, int nid, pgprot_t prot) { if (radix_enabled()) return radix__create_section_mapping(start, end, nid, prot); return hash__create_section_mapping(start, end, nid, prot); } int __meminit remove_section_mapping(unsigned long start, unsigned long end) { if (radix_enabled()) return radix__remove_section_mapping(start, end); return hash__remove_section_mapping(start, end); } #endif /* CONFIG_MEMORY_HOTPLUG */ void __init mmu_partition_table_init(void) { unsigned long patb_size = 1UL << PATB_SIZE_SHIFT; unsigned long ptcr; /* Initialize the Partition Table with no entries */ partition_tb = memblock_alloc(patb_size, patb_size); if (!partition_tb) panic("%s: Failed to allocate %lu bytes align=0x%lx\n", __func__, patb_size, patb_size); ptcr = __pa(partition_tb) | (PATB_SIZE_SHIFT - 12); set_ptcr_when_no_uv(ptcr); powernv_set_nmmu_ptcr(ptcr); } static void flush_partition(unsigned int lpid, bool radix) { if (radix) { radix__flush_all_lpid(lpid); radix__flush_all_lpid_guest(lpid); } else { asm volatile("ptesync" : : : "memory"); asm volatile(PPC_TLBIE_5(%0,%1,2,0,0) : : "r" (TLBIEL_INVAL_SET_LPID), "r" (lpid)); /* do we need fixup here ?*/ asm volatile("eieio; tlbsync; ptesync" : : : "memory"); trace_tlbie(lpid, 0, TLBIEL_INVAL_SET_LPID, lpid, 2, 0, 0); } } void mmu_partition_table_set_entry(unsigned int lpid, unsigned long dw0, unsigned long dw1, bool flush) { unsigned long old = be64_to_cpu(partition_tb[lpid].patb0); /* * When ultravisor is enabled, the partition table is stored in secure * memory and can only be accessed doing an ultravisor call. However, we * maintain a copy of the partition table in normal memory to allow Nest * MMU translations to occur (for normal VMs). * * Therefore, here we always update partition_tb, regardless of whether * we are running under an ultravisor or not. */ partition_tb[lpid].patb0 = cpu_to_be64(dw0); partition_tb[lpid].patb1 = cpu_to_be64(dw1); /* * If ultravisor is enabled, we do an ultravisor call to register the * partition table entry (PATE), which also do a global flush of TLBs * and partition table caches for the lpid. Otherwise, just do the * flush. The type of flush (hash or radix) depends on what the previous * use of the partition ID was, not the new use. */ if (firmware_has_feature(FW_FEATURE_ULTRAVISOR)) { uv_register_pate(lpid, dw0, dw1); pr_info("PATE registered by ultravisor: dw0 = 0x%lx, dw1 = 0x%lx\n", dw0, dw1); } else if (flush) { /* * Boot does not need to flush, because MMU is off and each * CPU does a tlbiel_all() before switching them on, which * flushes everything. */ flush_partition(lpid, (old & PATB_HR)); } } EXPORT_SYMBOL_GPL(mmu_partition_table_set_entry); static pmd_t *get_pmd_from_cache(struct mm_struct *mm) { void *pmd_frag, *ret; if (PMD_FRAG_NR == 1) return NULL; spin_lock(&mm->page_table_lock); ret = mm->context.pmd_frag; if (ret) { pmd_frag = ret + PMD_FRAG_SIZE; /* * If we have taken up all the fragments mark PTE page NULL */ if (((unsigned long)pmd_frag & ~PAGE_MASK) == 0) pmd_frag = NULL; mm->context.pmd_frag = pmd_frag; } spin_unlock(&mm->page_table_lock); return (pmd_t *)ret; } static pmd_t *__alloc_for_pmdcache(struct mm_struct *mm) { void *ret = NULL; struct ptdesc *ptdesc; gfp_t gfp = GFP_KERNEL_ACCOUNT | __GFP_ZERO; if (mm == &init_mm) gfp &= ~__GFP_ACCOUNT; ptdesc = pagetable_alloc(gfp, 0); if (!ptdesc) return NULL; if (!pagetable_pmd_ctor(ptdesc)) { pagetable_free(ptdesc); return NULL; } atomic_set(&ptdesc->pt_frag_refcount, 1); ret = ptdesc_address(ptdesc); /* * if we support only one fragment just return the * allocated page. */ if (PMD_FRAG_NR == 1) return ret; spin_lock(&mm->page_table_lock); /* * If we find ptdesc_page set, we return * the allocated page with single fragment * count. */ if (likely(!mm->context.pmd_frag)) { atomic_set(&ptdesc->pt_frag_refcount, PMD_FRAG_NR); mm->context.pmd_frag = ret + PMD_FRAG_SIZE; } spin_unlock(&mm->page_table_lock); return (pmd_t *)ret; } pmd_t *pmd_fragment_alloc(struct mm_struct *mm, unsigned long vmaddr) { pmd_t *pmd; pmd = get_pmd_from_cache(mm); if (pmd) return pmd; return __alloc_for_pmdcache(mm); } void pmd_fragment_free(unsigned long *pmd) { struct ptdesc *ptdesc = virt_to_ptdesc(pmd); if (pagetable_is_reserved(ptdesc)) return free_reserved_ptdesc(ptdesc); BUG_ON(atomic_read(&ptdesc->pt_frag_refcount) <= 0); if (atomic_dec_and_test(&ptdesc->pt_frag_refcount)) { pagetable_pmd_dtor(ptdesc); pagetable_free(ptdesc); } } static inline void pgtable_free(void *table, int index) { switch (index) { case PTE_INDEX: pte_fragment_free(table, 0); break; case PMD_INDEX: pmd_fragment_free(table); break; case PUD_INDEX: __pud_free(table); break; #if defined(CONFIG_PPC_4K_PAGES) && defined(CONFIG_HUGETLB_PAGE) /* 16M hugepd directory at pud level */ case HTLB_16M_INDEX: BUILD_BUG_ON(H_16M_CACHE_INDEX <= 0); kmem_cache_free(PGT_CACHE(H_16M_CACHE_INDEX), table); break; /* 16G hugepd directory at the pgd level */ case HTLB_16G_INDEX: BUILD_BUG_ON(H_16G_CACHE_INDEX <= 0); kmem_cache_free(PGT_CACHE(H_16G_CACHE_INDEX), table); break; #endif /* We don't free pgd table via RCU callback */ default: BUG(); } } void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int index) { unsigned long pgf = (unsigned long)table; BUG_ON(index > MAX_PGTABLE_INDEX_SIZE); pgf |= index; tlb_remove_table(tlb, (void *)pgf); } void __tlb_remove_table(void *_table) { void *table = (void *)((unsigned long)_table & ~MAX_PGTABLE_INDEX_SIZE); unsigned int index = (unsigned long)_table & MAX_PGTABLE_INDEX_SIZE; return pgtable_free(table, index); } #ifdef CONFIG_PROC_FS atomic_long_t direct_pages_count[MMU_PAGE_COUNT]; void arch_report_meminfo(struct seq_file *m) { /* * Hash maps the memory with one size mmu_linear_psize. * So don't bother to print these on hash */ if (!radix_enabled()) return; seq_printf(m, "DirectMap4k: %8lu kB\n", atomic_long_read(&direct_pages_count[MMU_PAGE_4K]) << 2); seq_printf(m, "DirectMap64k: %8lu kB\n", atomic_long_read(&direct_pages_count[MMU_PAGE_64K]) << 6); seq_printf(m, "DirectMap2M: %8lu kB\n", atomic_long_read(&direct_pages_count[MMU_PAGE_2M]) << 11); seq_printf(m, "DirectMap1G: %8lu kB\n", atomic_long_read(&direct_pages_count[MMU_PAGE_1G]) << 20); } #endif /* CONFIG_PROC_FS */ pte_t ptep_modify_prot_start(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) { unsigned long pte_val; /* * Clear the _PAGE_PRESENT so that no hardware parallel update is * possible. Also keep the pte_present true so that we don't take * wrong fault. */ pte_val = pte_update(vma->vm_mm, addr, ptep, _PAGE_PRESENT, _PAGE_INVALID, 0); return __pte(pte_val); } void ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep, pte_t old_pte, pte_t pte) { if (radix_enabled()) return radix__ptep_modify_prot_commit(vma, addr, ptep, old_pte, pte); set_pte_at(vma->vm_mm, addr, ptep, pte); } /* * For hash translation mode, we use the deposited table to store hash slot * information and they are stored at PTRS_PER_PMD offset from related pmd * location. Hence a pmd move requires deposit and withdraw. * * For radix translation with split pmd ptl, we store the deposited table in the * pmd page. Hence if we have different pmd page we need to withdraw during pmd * move. * * With hash we use deposited table always irrespective of anon or not. * With radix we use deposited table only for anonymous mapping. */ int pmd_move_must_withdraw(struct spinlock *new_pmd_ptl, struct spinlock *old_pmd_ptl, struct vm_area_struct *vma) { if (radix_enabled()) return (new_pmd_ptl != old_pmd_ptl) && vma_is_anonymous(vma); return true; } /* * Does the CPU support tlbie? */ bool tlbie_capable __read_mostly = true; EXPORT_SYMBOL(tlbie_capable); /* * Should tlbie be used for management of CPU TLBs, for kernel and process * address spaces? tlbie may still be used for nMMU accelerators, and for KVM * guest address spaces. */ bool tlbie_enabled __read_mostly = true; static int __init setup_disable_tlbie(char *str) { if (!radix_enabled()) { pr_err("disable_tlbie: Unable to disable TLBIE with Hash MMU.\n"); return 1; } tlbie_capable = false; tlbie_enabled = false; return 1; } __setup("disable_tlbie", setup_disable_tlbie); static int __init pgtable_debugfs_setup(void) { if (!tlbie_capable) return 0; /* * There is no locking vs tlb flushing when changing this value. * The tlb flushers will see one value or another, and use either * tlbie or tlbiel with IPIs. In both cases the TLBs will be * invalidated as expected. */ debugfs_create_bool("tlbie_enabled", 0600, arch_debugfs_dir, &tlbie_enabled); return 0; } arch_initcall(pgtable_debugfs_setup); #if defined(CONFIG_ZONE_DEVICE) && defined(CONFIG_ARCH_HAS_MEMREMAP_COMPAT_ALIGN) /* * Override the generic version in mm/memremap.c. * * With hash translation, the direct-map range is mapped with just one * page size selected by htab_init_page_sizes(). Consult * mmu_psize_defs[] to determine the minimum page size alignment. */ unsigned long memremap_compat_align(void) { if (!radix_enabled()) { unsigned int shift = mmu_psize_defs[mmu_linear_psize].shift; return max(SUBSECTION_SIZE, 1UL << shift); } return SUBSECTION_SIZE; } EXPORT_SYMBOL_GPL(memremap_compat_align); #endif pgprot_t vm_get_page_prot(unsigned long vm_flags) { unsigned long prot; /* Radix supports execute-only, but protection_map maps X -> RX */ if (radix_enabled() && ((vm_flags & VM_ACCESS_FLAGS) == VM_EXEC)) { prot = pgprot_val(PAGE_EXECONLY); } else { prot = pgprot_val(protection_map[vm_flags & (VM_ACCESS_FLAGS | VM_SHARED)]); } if (vm_flags & VM_SAO) prot |= _PAGE_SAO; #ifdef CONFIG_PPC_MEM_KEYS prot |= vmflag_to_pte_pkey_bits(vm_flags); #endif return __pgprot(prot); } EXPORT_SYMBOL(vm_get_page_prot);
linux-master
arch/powerpc/mm/book3s64/pgtable.c
/* * Copyright IBM Corporation, 2013 * Author Aneesh Kumar K.V <[email protected]> * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2.1 of the GNU Lesser General Public License * as published by the Free Software Foundation. * * This program is distributed in the hope that it would be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. * */ /* * PPC64 THP Support for hash based MMUs */ #include <linux/mm.h> #include <asm/machdep.h> int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid, pmd_t *pmdp, unsigned long trap, unsigned long flags, int ssize, unsigned int psize) { unsigned int index, valid; unsigned char *hpte_slot_array; unsigned long rflags, pa, hidx; unsigned long old_pmd, new_pmd; int ret, lpsize = MMU_PAGE_16M; unsigned long vpn, hash, shift, slot; /* * atomically mark the linux large page PMD busy and dirty */ do { pmd_t pmd = READ_ONCE(*pmdp); old_pmd = pmd_val(pmd); /* If PMD busy, retry the access */ if (unlikely(old_pmd & H_PAGE_BUSY)) return 0; /* If PMD permissions don't match, take page fault */ if (unlikely(!check_pte_access(access, old_pmd))) return 1; /* * Try to lock the PTE, add ACCESSED and DIRTY if it was * a write access */ new_pmd = old_pmd | H_PAGE_BUSY | _PAGE_ACCESSED; if (access & _PAGE_WRITE) new_pmd |= _PAGE_DIRTY; } while (!pmd_xchg(pmdp, __pmd(old_pmd), __pmd(new_pmd))); /* * Make sure this is thp or devmap entry */ if (!(old_pmd & (H_PAGE_THP_HUGE | _PAGE_DEVMAP))) return 0; rflags = htab_convert_pte_flags(new_pmd, flags); #if 0 if (!cpu_has_feature(CPU_FTR_COHERENT_ICACHE)) { /* * No CPU has hugepages but lacks no execute, so we * don't need to worry about that case */ rflags = hash_page_do_lazy_icache(rflags, __pte(old_pte), trap); } #endif /* * Find the slot index details for this ea, using base page size. */ shift = mmu_psize_defs[psize].shift; index = (ea & ~HPAGE_PMD_MASK) >> shift; BUG_ON(index >= PTE_FRAG_SIZE); vpn = hpt_vpn(ea, vsid, ssize); hpte_slot_array = get_hpte_slot_array(pmdp); if (psize == MMU_PAGE_4K) { /* * invalidate the old hpte entry if we have that mapped via 64K * base page size. This is because demote_segment won't flush * hash page table entries. */ if ((old_pmd & H_PAGE_HASHPTE) && !(old_pmd & H_PAGE_COMBO)) { flush_hash_hugepage(vsid, ea, pmdp, MMU_PAGE_64K, ssize, flags); /* * With THP, we also clear the slot information with * respect to all the 64K hash pte mapping the 16MB * page. They are all invalid now. This make sure we * don't find the slot valid when we fault with 4k * base page size. * */ memset(hpte_slot_array, 0, PTE_FRAG_SIZE); } } valid = hpte_valid(hpte_slot_array, index); if (valid) { /* update the hpte bits */ hash = hpt_hash(vpn, shift, ssize); hidx = hpte_hash_index(hpte_slot_array, index); if (hidx & _PTEIDX_SECONDARY) hash = ~hash; slot = (hash & htab_hash_mask) * HPTES_PER_GROUP; slot += hidx & _PTEIDX_GROUP_IX; ret = mmu_hash_ops.hpte_updatepp(slot, rflags, vpn, psize, lpsize, ssize, flags); /* * We failed to update, try to insert a new entry. */ if (ret == -1) { /* * large pte is marked busy, so we can be sure * nobody is looking at hpte_slot_array. hence we can * safely update this here. */ valid = 0; hpte_slot_array[index] = 0; } } if (!valid) { unsigned long hpte_group; hash = hpt_hash(vpn, shift, ssize); /* insert new entry */ pa = pmd_pfn(__pmd(old_pmd)) << PAGE_SHIFT; new_pmd |= H_PAGE_HASHPTE; repeat: hpte_group = (hash & htab_hash_mask) * HPTES_PER_GROUP; /* Insert into the hash table, primary slot */ slot = mmu_hash_ops.hpte_insert(hpte_group, vpn, pa, rflags, 0, psize, lpsize, ssize); /* * Primary is full, try the secondary */ if (unlikely(slot == -1)) { hpte_group = (~hash & htab_hash_mask) * HPTES_PER_GROUP; slot = mmu_hash_ops.hpte_insert(hpte_group, vpn, pa, rflags, HPTE_V_SECONDARY, psize, lpsize, ssize); if (slot == -1) { if (mftb() & 0x1) hpte_group = (hash & htab_hash_mask) * HPTES_PER_GROUP; mmu_hash_ops.hpte_remove(hpte_group); goto repeat; } } /* * Hypervisor failure. Restore old pmd and return -1 * similar to __hash_page_* */ if (unlikely(slot == -2)) { *pmdp = __pmd(old_pmd); hash_failure_debug(ea, access, vsid, trap, ssize, psize, lpsize, old_pmd); return -1; } /* * large pte is marked busy, so we can be sure * nobody is looking at hpte_slot_array. hence we can * safely update this here. */ mark_hpte_slot_valid(hpte_slot_array, index, slot); } /* * Mark the pte with H_PAGE_COMBO, if we are trying to hash it with * base page size 4k. */ if (psize == MMU_PAGE_4K) new_pmd |= H_PAGE_COMBO; /* * The hpte valid is stored in the pgtable whose address is in the * second half of the PMD. Order this against clearing of the busy bit in * huge pmd. */ smp_wmb(); *pmdp = __pmd(new_pmd & ~H_PAGE_BUSY); return 0; }
linux-master
arch/powerpc/mm/book3s64/hash_hugepage.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Page table handling routines for radix page table. * * Copyright 2015-2016, Aneesh Kumar K.V, IBM Corporation. */ #define pr_fmt(fmt) "radix-mmu: " fmt #include <linux/io.h> #include <linux/kernel.h> #include <linux/sched/mm.h> #include <linux/memblock.h> #include <linux/of.h> #include <linux/of_fdt.h> #include <linux/mm.h> #include <linux/hugetlb.h> #include <linux/string_helpers.h> #include <linux/memory.h> #include <asm/pgalloc.h> #include <asm/mmu_context.h> #include <asm/dma.h> #include <asm/machdep.h> #include <asm/mmu.h> #include <asm/firmware.h> #include <asm/powernv.h> #include <asm/sections.h> #include <asm/smp.h> #include <asm/trace.h> #include <asm/uaccess.h> #include <asm/ultravisor.h> #include <asm/set_memory.h> #include <trace/events/thp.h> #include <mm/mmu_decl.h> unsigned int mmu_base_pid; static __ref void *early_alloc_pgtable(unsigned long size, int nid, unsigned long region_start, unsigned long region_end) { phys_addr_t min_addr = MEMBLOCK_LOW_LIMIT; phys_addr_t max_addr = MEMBLOCK_ALLOC_ANYWHERE; void *ptr; if (region_start) min_addr = region_start; if (region_end) max_addr = region_end; ptr = memblock_alloc_try_nid(size, size, min_addr, max_addr, nid); if (!ptr) panic("%s: Failed to allocate %lu bytes align=0x%lx nid=%d from=%pa max_addr=%pa\n", __func__, size, size, nid, &min_addr, &max_addr); return ptr; } /* * When allocating pud or pmd pointers, we allocate a complete page * of PAGE_SIZE rather than PUD_TABLE_SIZE or PMD_TABLE_SIZE. This * is to ensure that the page obtained from the memblock allocator * can be completely used as page table page and can be freed * correctly when the page table entries are removed. */ static int early_map_kernel_page(unsigned long ea, unsigned long pa, pgprot_t flags, unsigned int map_page_size, int nid, unsigned long region_start, unsigned long region_end) { unsigned long pfn = pa >> PAGE_SHIFT; pgd_t *pgdp; p4d_t *p4dp; pud_t *pudp; pmd_t *pmdp; pte_t *ptep; pgdp = pgd_offset_k(ea); p4dp = p4d_offset(pgdp, ea); if (p4d_none(*p4dp)) { pudp = early_alloc_pgtable(PAGE_SIZE, nid, region_start, region_end); p4d_populate(&init_mm, p4dp, pudp); } pudp = pud_offset(p4dp, ea); if (map_page_size == PUD_SIZE) { ptep = (pte_t *)pudp; goto set_the_pte; } if (pud_none(*pudp)) { pmdp = early_alloc_pgtable(PAGE_SIZE, nid, region_start, region_end); pud_populate(&init_mm, pudp, pmdp); } pmdp = pmd_offset(pudp, ea); if (map_page_size == PMD_SIZE) { ptep = pmdp_ptep(pmdp); goto set_the_pte; } if (!pmd_present(*pmdp)) { ptep = early_alloc_pgtable(PAGE_SIZE, nid, region_start, region_end); pmd_populate_kernel(&init_mm, pmdp, ptep); } ptep = pte_offset_kernel(pmdp, ea); set_the_pte: set_pte_at(&init_mm, ea, ptep, pfn_pte(pfn, flags)); asm volatile("ptesync": : :"memory"); return 0; } /* * nid, region_start, and region_end are hints to try to place the page * table memory in the same node or region. */ static int __map_kernel_page(unsigned long ea, unsigned long pa, pgprot_t flags, unsigned int map_page_size, int nid, unsigned long region_start, unsigned long region_end) { unsigned long pfn = pa >> PAGE_SHIFT; pgd_t *pgdp; p4d_t *p4dp; pud_t *pudp; pmd_t *pmdp; pte_t *ptep; /* * Make sure task size is correct as per the max adddr */ BUILD_BUG_ON(TASK_SIZE_USER64 > RADIX_PGTABLE_RANGE); #ifdef CONFIG_PPC_64K_PAGES BUILD_BUG_ON(RADIX_KERN_MAP_SIZE != (1UL << MAX_EA_BITS_PER_CONTEXT)); #endif if (unlikely(!slab_is_available())) return early_map_kernel_page(ea, pa, flags, map_page_size, nid, region_start, region_end); /* * Should make page table allocation functions be able to take a * node, so we can place kernel page tables on the right nodes after * boot. */ pgdp = pgd_offset_k(ea); p4dp = p4d_offset(pgdp, ea); pudp = pud_alloc(&init_mm, p4dp, ea); if (!pudp) return -ENOMEM; if (map_page_size == PUD_SIZE) { ptep = (pte_t *)pudp; goto set_the_pte; } pmdp = pmd_alloc(&init_mm, pudp, ea); if (!pmdp) return -ENOMEM; if (map_page_size == PMD_SIZE) { ptep = pmdp_ptep(pmdp); goto set_the_pte; } ptep = pte_alloc_kernel(pmdp, ea); if (!ptep) return -ENOMEM; set_the_pte: set_pte_at(&init_mm, ea, ptep, pfn_pte(pfn, flags)); asm volatile("ptesync": : :"memory"); return 0; } int radix__map_kernel_page(unsigned long ea, unsigned long pa, pgprot_t flags, unsigned int map_page_size) { return __map_kernel_page(ea, pa, flags, map_page_size, -1, 0, 0); } #ifdef CONFIG_STRICT_KERNEL_RWX static void radix__change_memory_range(unsigned long start, unsigned long end, unsigned long clear) { unsigned long idx; pgd_t *pgdp; p4d_t *p4dp; pud_t *pudp; pmd_t *pmdp; pte_t *ptep; start = ALIGN_DOWN(start, PAGE_SIZE); end = PAGE_ALIGN(end); // aligns up pr_debug("Changing flags on range %lx-%lx removing 0x%lx\n", start, end, clear); for (idx = start; idx < end; idx += PAGE_SIZE) { pgdp = pgd_offset_k(idx); p4dp = p4d_offset(pgdp, idx); pudp = pud_alloc(&init_mm, p4dp, idx); if (!pudp) continue; if (pud_is_leaf(*pudp)) { ptep = (pte_t *)pudp; goto update_the_pte; } pmdp = pmd_alloc(&init_mm, pudp, idx); if (!pmdp) continue; if (pmd_is_leaf(*pmdp)) { ptep = pmdp_ptep(pmdp); goto update_the_pte; } ptep = pte_alloc_kernel(pmdp, idx); if (!ptep) continue; update_the_pte: radix__pte_update(&init_mm, idx, ptep, clear, 0, 0); } radix__flush_tlb_kernel_range(start, end); } void radix__mark_rodata_ro(void) { unsigned long start, end; start = (unsigned long)_stext; end = (unsigned long)__end_rodata; radix__change_memory_range(start, end, _PAGE_WRITE); for (start = PAGE_OFFSET; start < (unsigned long)_stext; start += PAGE_SIZE) { end = start + PAGE_SIZE; if (overlaps_interrupt_vector_text(start, end)) radix__change_memory_range(start, end, _PAGE_WRITE); else break; } } void radix__mark_initmem_nx(void) { unsigned long start = (unsigned long)__init_begin; unsigned long end = (unsigned long)__init_end; radix__change_memory_range(start, end, _PAGE_EXEC); } #endif /* CONFIG_STRICT_KERNEL_RWX */ static inline void __meminit print_mapping(unsigned long start, unsigned long end, unsigned long size, bool exec) { char buf[10]; if (end <= start) return; string_get_size(size, 1, STRING_UNITS_2, buf, sizeof(buf)); pr_info("Mapped 0x%016lx-0x%016lx with %s pages%s\n", start, end, buf, exec ? " (exec)" : ""); } static unsigned long next_boundary(unsigned long addr, unsigned long end) { #ifdef CONFIG_STRICT_KERNEL_RWX unsigned long stext_phys; stext_phys = __pa_symbol(_stext); // Relocatable kernel running at non-zero real address if (stext_phys != 0) { // The end of interrupts code at zero is a rodata boundary unsigned long end_intr = __pa_symbol(__end_interrupts) - stext_phys; if (addr < end_intr) return end_intr; // Start of relocated kernel text is a rodata boundary if (addr < stext_phys) return stext_phys; } if (addr < __pa_symbol(__srwx_boundary)) return __pa_symbol(__srwx_boundary); #endif return end; } static int __meminit create_physical_mapping(unsigned long start, unsigned long end, int nid, pgprot_t _prot) { unsigned long vaddr, addr, mapping_size = 0; bool prev_exec, exec = false; pgprot_t prot; int psize; unsigned long max_mapping_size = memory_block_size; if (debug_pagealloc_enabled_or_kfence()) max_mapping_size = PAGE_SIZE; start = ALIGN(start, PAGE_SIZE); end = ALIGN_DOWN(end, PAGE_SIZE); for (addr = start; addr < end; addr += mapping_size) { unsigned long gap, previous_size; int rc; gap = next_boundary(addr, end) - addr; if (gap > max_mapping_size) gap = max_mapping_size; previous_size = mapping_size; prev_exec = exec; if (IS_ALIGNED(addr, PUD_SIZE) && gap >= PUD_SIZE && mmu_psize_defs[MMU_PAGE_1G].shift) { mapping_size = PUD_SIZE; psize = MMU_PAGE_1G; } else if (IS_ALIGNED(addr, PMD_SIZE) && gap >= PMD_SIZE && mmu_psize_defs[MMU_PAGE_2M].shift) { mapping_size = PMD_SIZE; psize = MMU_PAGE_2M; } else { mapping_size = PAGE_SIZE; psize = mmu_virtual_psize; } vaddr = (unsigned long)__va(addr); if (overlaps_kernel_text(vaddr, vaddr + mapping_size) || overlaps_interrupt_vector_text(vaddr, vaddr + mapping_size)) { prot = PAGE_KERNEL_X; exec = true; } else { prot = _prot; exec = false; } if (mapping_size != previous_size || exec != prev_exec) { print_mapping(start, addr, previous_size, prev_exec); start = addr; } rc = __map_kernel_page(vaddr, addr, prot, mapping_size, nid, start, end); if (rc) return rc; update_page_count(psize, 1); } print_mapping(start, addr, mapping_size, exec); return 0; } static void __init radix_init_pgtable(void) { unsigned long rts_field; phys_addr_t start, end; u64 i; /* We don't support slb for radix */ slb_set_size(0); /* * Create the linear mapping */ for_each_mem_range(i, &start, &end) { /* * The memblock allocator is up at this point, so the * page tables will be allocated within the range. No * need or a node (which we don't have yet). */ if (end >= RADIX_VMALLOC_START) { pr_warn("Outside the supported range\n"); continue; } WARN_ON(create_physical_mapping(start, end, -1, PAGE_KERNEL)); } if (!cpu_has_feature(CPU_FTR_HVMODE) && cpu_has_feature(CPU_FTR_P9_RADIX_PREFETCH_BUG)) { /* * Older versions of KVM on these machines prefer if the * guest only uses the low 19 PID bits. */ mmu_pid_bits = 19; } mmu_base_pid = 1; /* * Allocate Partition table and process table for the * host. */ BUG_ON(PRTB_SIZE_SHIFT > 36); process_tb = early_alloc_pgtable(1UL << PRTB_SIZE_SHIFT, -1, 0, 0); /* * Fill in the process table. */ rts_field = radix__get_tree_size(); process_tb->prtb0 = cpu_to_be64(rts_field | __pa(init_mm.pgd) | RADIX_PGD_INDEX_SIZE); /* * The init_mm context is given the first available (non-zero) PID, * which is the "guard PID" and contains no page table. PIDR should * never be set to zero because that duplicates the kernel address * space at the 0x0... offset (quadrant 0)! * * An arbitrary PID that may later be allocated by the PID allocator * for userspace processes must not be used either, because that * would cause stale user mappings for that PID on CPUs outside of * the TLB invalidation scheme (because it won't be in mm_cpumask). * * So permanently carve out one PID for the purpose of a guard PID. */ init_mm.context.id = mmu_base_pid; mmu_base_pid++; } static void __init radix_init_partition_table(void) { unsigned long rts_field, dw0, dw1; mmu_partition_table_init(); rts_field = radix__get_tree_size(); dw0 = rts_field | __pa(init_mm.pgd) | RADIX_PGD_INDEX_SIZE | PATB_HR; dw1 = __pa(process_tb) | (PRTB_SIZE_SHIFT - 12) | PATB_GR; mmu_partition_table_set_entry(0, dw0, dw1, false); pr_info("Initializing Radix MMU\n"); } static int __init get_idx_from_shift(unsigned int shift) { int idx = -1; switch (shift) { case 0xc: idx = MMU_PAGE_4K; break; case 0x10: idx = MMU_PAGE_64K; break; case 0x15: idx = MMU_PAGE_2M; break; case 0x1e: idx = MMU_PAGE_1G; break; } return idx; } static int __init radix_dt_scan_page_sizes(unsigned long node, const char *uname, int depth, void *data) { int size = 0; int shift, idx; unsigned int ap; const __be32 *prop; const char *type = of_get_flat_dt_prop(node, "device_type", NULL); /* We are scanning "cpu" nodes only */ if (type == NULL || strcmp(type, "cpu") != 0) return 0; /* Grab page size encodings */ prop = of_get_flat_dt_prop(node, "ibm,processor-radix-AP-encodings", &size); if (!prop) return 0; pr_info("Page sizes from device-tree:\n"); for (; size >= 4; size -= 4, ++prop) { struct mmu_psize_def *def; /* top 3 bit is AP encoding */ shift = be32_to_cpu(prop[0]) & ~(0xe << 28); ap = be32_to_cpu(prop[0]) >> 29; pr_info("Page size shift = %d AP=0x%x\n", shift, ap); idx = get_idx_from_shift(shift); if (idx < 0) continue; def = &mmu_psize_defs[idx]; def->shift = shift; def->ap = ap; def->h_rpt_pgsize = psize_to_rpti_pgsize(idx); } /* needed ? */ cur_cpu_spec->mmu_features &= ~MMU_FTR_NO_SLBIE_B; return 1; } void __init radix__early_init_devtree(void) { int rc; /* * Try to find the available page sizes in the device-tree */ rc = of_scan_flat_dt(radix_dt_scan_page_sizes, NULL); if (!rc) { /* * No page size details found in device tree. * Let's assume we have page 4k and 64k support */ mmu_psize_defs[MMU_PAGE_4K].shift = 12; mmu_psize_defs[MMU_PAGE_4K].ap = 0x0; mmu_psize_defs[MMU_PAGE_4K].h_rpt_pgsize = psize_to_rpti_pgsize(MMU_PAGE_4K); mmu_psize_defs[MMU_PAGE_64K].shift = 16; mmu_psize_defs[MMU_PAGE_64K].ap = 0x5; mmu_psize_defs[MMU_PAGE_64K].h_rpt_pgsize = psize_to_rpti_pgsize(MMU_PAGE_64K); } return; } void __init radix__early_init_mmu(void) { unsigned long lpcr; #ifdef CONFIG_PPC_64S_HASH_MMU #ifdef CONFIG_PPC_64K_PAGES /* PAGE_SIZE mappings */ mmu_virtual_psize = MMU_PAGE_64K; #else mmu_virtual_psize = MMU_PAGE_4K; #endif #endif /* * initialize page table size */ __pte_index_size = RADIX_PTE_INDEX_SIZE; __pmd_index_size = RADIX_PMD_INDEX_SIZE; __pud_index_size = RADIX_PUD_INDEX_SIZE; __pgd_index_size = RADIX_PGD_INDEX_SIZE; __pud_cache_index = RADIX_PUD_INDEX_SIZE; __pte_table_size = RADIX_PTE_TABLE_SIZE; __pmd_table_size = RADIX_PMD_TABLE_SIZE; __pud_table_size = RADIX_PUD_TABLE_SIZE; __pgd_table_size = RADIX_PGD_TABLE_SIZE; __pmd_val_bits = RADIX_PMD_VAL_BITS; __pud_val_bits = RADIX_PUD_VAL_BITS; __pgd_val_bits = RADIX_PGD_VAL_BITS; __kernel_virt_start = RADIX_KERN_VIRT_START; __vmalloc_start = RADIX_VMALLOC_START; __vmalloc_end = RADIX_VMALLOC_END; __kernel_io_start = RADIX_KERN_IO_START; __kernel_io_end = RADIX_KERN_IO_END; vmemmap = (struct page *)RADIX_VMEMMAP_START; ioremap_bot = IOREMAP_BASE; #ifdef CONFIG_PCI pci_io_base = ISA_IO_BASE; #endif __pte_frag_nr = RADIX_PTE_FRAG_NR; __pte_frag_size_shift = RADIX_PTE_FRAG_SIZE_SHIFT; __pmd_frag_nr = RADIX_PMD_FRAG_NR; __pmd_frag_size_shift = RADIX_PMD_FRAG_SIZE_SHIFT; radix_init_pgtable(); if (!firmware_has_feature(FW_FEATURE_LPAR)) { lpcr = mfspr(SPRN_LPCR); mtspr(SPRN_LPCR, lpcr | LPCR_UPRT | LPCR_HR); radix_init_partition_table(); } else { radix_init_pseries(); } memblock_set_current_limit(MEMBLOCK_ALLOC_ANYWHERE); /* Switch to the guard PID before turning on MMU */ radix__switch_mmu_context(NULL, &init_mm); tlbiel_all(); } void radix__early_init_mmu_secondary(void) { unsigned long lpcr; /* * update partition table control register and UPRT */ if (!firmware_has_feature(FW_FEATURE_LPAR)) { lpcr = mfspr(SPRN_LPCR); mtspr(SPRN_LPCR, lpcr | LPCR_UPRT | LPCR_HR); set_ptcr_when_no_uv(__pa(partition_tb) | (PATB_SIZE_SHIFT - 12)); } radix__switch_mmu_context(NULL, &init_mm); tlbiel_all(); /* Make sure userspace can't change the AMR */ mtspr(SPRN_UAMOR, 0); } /* Called during kexec sequence with MMU off */ notrace void radix__mmu_cleanup_all(void) { unsigned long lpcr; if (!firmware_has_feature(FW_FEATURE_LPAR)) { lpcr = mfspr(SPRN_LPCR); mtspr(SPRN_LPCR, lpcr & ~LPCR_UPRT); set_ptcr_when_no_uv(0); powernv_set_nmmu_ptcr(0); radix__flush_tlb_all(); } } #ifdef CONFIG_MEMORY_HOTPLUG static void free_pte_table(pte_t *pte_start, pmd_t *pmd) { pte_t *pte; int i; for (i = 0; i < PTRS_PER_PTE; i++) { pte = pte_start + i; if (!pte_none(*pte)) return; } pte_free_kernel(&init_mm, pte_start); pmd_clear(pmd); } static void free_pmd_table(pmd_t *pmd_start, pud_t *pud) { pmd_t *pmd; int i; for (i = 0; i < PTRS_PER_PMD; i++) { pmd = pmd_start + i; if (!pmd_none(*pmd)) return; } pmd_free(&init_mm, pmd_start); pud_clear(pud); } static void free_pud_table(pud_t *pud_start, p4d_t *p4d) { pud_t *pud; int i; for (i = 0; i < PTRS_PER_PUD; i++) { pud = pud_start + i; if (!pud_none(*pud)) return; } pud_free(&init_mm, pud_start); p4d_clear(p4d); } #ifdef CONFIG_SPARSEMEM_VMEMMAP static bool __meminit vmemmap_pmd_is_unused(unsigned long addr, unsigned long end) { unsigned long start = ALIGN_DOWN(addr, PMD_SIZE); return !vmemmap_populated(start, PMD_SIZE); } static bool __meminit vmemmap_page_is_unused(unsigned long addr, unsigned long end) { unsigned long start = ALIGN_DOWN(addr, PAGE_SIZE); return !vmemmap_populated(start, PAGE_SIZE); } #endif static void __meminit free_vmemmap_pages(struct page *page, struct vmem_altmap *altmap, int order) { unsigned int nr_pages = 1 << order; if (altmap) { unsigned long alt_start, alt_end; unsigned long base_pfn = page_to_pfn(page); /* * with 2M vmemmap mmaping we can have things setup * such that even though atlmap is specified we never * used altmap. */ alt_start = altmap->base_pfn; alt_end = altmap->base_pfn + altmap->reserve + altmap->free; if (base_pfn >= alt_start && base_pfn < alt_end) { vmem_altmap_free(altmap, nr_pages); return; } } if (PageReserved(page)) { /* allocated from memblock */ while (nr_pages--) free_reserved_page(page++); } else free_pages((unsigned long)page_address(page), order); } static void __meminit remove_pte_table(pte_t *pte_start, unsigned long addr, unsigned long end, bool direct, struct vmem_altmap *altmap) { unsigned long next, pages = 0; pte_t *pte; pte = pte_start + pte_index(addr); for (; addr < end; addr = next, pte++) { next = (addr + PAGE_SIZE) & PAGE_MASK; if (next > end) next = end; if (!pte_present(*pte)) continue; if (PAGE_ALIGNED(addr) && PAGE_ALIGNED(next)) { if (!direct) free_vmemmap_pages(pte_page(*pte), altmap, 0); pte_clear(&init_mm, addr, pte); pages++; } #ifdef CONFIG_SPARSEMEM_VMEMMAP else if (!direct && vmemmap_page_is_unused(addr, next)) { free_vmemmap_pages(pte_page(*pte), altmap, 0); pte_clear(&init_mm, addr, pte); } #endif } if (direct) update_page_count(mmu_virtual_psize, -pages); } static void __meminit remove_pmd_table(pmd_t *pmd_start, unsigned long addr, unsigned long end, bool direct, struct vmem_altmap *altmap) { unsigned long next, pages = 0; pte_t *pte_base; pmd_t *pmd; pmd = pmd_start + pmd_index(addr); for (; addr < end; addr = next, pmd++) { next = pmd_addr_end(addr, end); if (!pmd_present(*pmd)) continue; if (pmd_is_leaf(*pmd)) { if (IS_ALIGNED(addr, PMD_SIZE) && IS_ALIGNED(next, PMD_SIZE)) { if (!direct) free_vmemmap_pages(pmd_page(*pmd), altmap, get_order(PMD_SIZE)); pte_clear(&init_mm, addr, (pte_t *)pmd); pages++; } #ifdef CONFIG_SPARSEMEM_VMEMMAP else if (!direct && vmemmap_pmd_is_unused(addr, next)) { free_vmemmap_pages(pmd_page(*pmd), altmap, get_order(PMD_SIZE)); pte_clear(&init_mm, addr, (pte_t *)pmd); } #endif continue; } pte_base = (pte_t *)pmd_page_vaddr(*pmd); remove_pte_table(pte_base, addr, next, direct, altmap); free_pte_table(pte_base, pmd); } if (direct) update_page_count(MMU_PAGE_2M, -pages); } static void __meminit remove_pud_table(pud_t *pud_start, unsigned long addr, unsigned long end, bool direct, struct vmem_altmap *altmap) { unsigned long next, pages = 0; pmd_t *pmd_base; pud_t *pud; pud = pud_start + pud_index(addr); for (; addr < end; addr = next, pud++) { next = pud_addr_end(addr, end); if (!pud_present(*pud)) continue; if (pud_is_leaf(*pud)) { if (!IS_ALIGNED(addr, PUD_SIZE) || !IS_ALIGNED(next, PUD_SIZE)) { WARN_ONCE(1, "%s: unaligned range\n", __func__); continue; } pte_clear(&init_mm, addr, (pte_t *)pud); pages++; continue; } pmd_base = pud_pgtable(*pud); remove_pmd_table(pmd_base, addr, next, direct, altmap); free_pmd_table(pmd_base, pud); } if (direct) update_page_count(MMU_PAGE_1G, -pages); } static void __meminit remove_pagetable(unsigned long start, unsigned long end, bool direct, struct vmem_altmap *altmap) { unsigned long addr, next; pud_t *pud_base; pgd_t *pgd; p4d_t *p4d; spin_lock(&init_mm.page_table_lock); for (addr = start; addr < end; addr = next) { next = pgd_addr_end(addr, end); pgd = pgd_offset_k(addr); p4d = p4d_offset(pgd, addr); if (!p4d_present(*p4d)) continue; if (p4d_is_leaf(*p4d)) { if (!IS_ALIGNED(addr, P4D_SIZE) || !IS_ALIGNED(next, P4D_SIZE)) { WARN_ONCE(1, "%s: unaligned range\n", __func__); continue; } pte_clear(&init_mm, addr, (pte_t *)pgd); continue; } pud_base = p4d_pgtable(*p4d); remove_pud_table(pud_base, addr, next, direct, altmap); free_pud_table(pud_base, p4d); } spin_unlock(&init_mm.page_table_lock); radix__flush_tlb_kernel_range(start, end); } int __meminit radix__create_section_mapping(unsigned long start, unsigned long end, int nid, pgprot_t prot) { if (end >= RADIX_VMALLOC_START) { pr_warn("Outside the supported range\n"); return -1; } return create_physical_mapping(__pa(start), __pa(end), nid, prot); } int __meminit radix__remove_section_mapping(unsigned long start, unsigned long end) { remove_pagetable(start, end, true, NULL); return 0; } #endif /* CONFIG_MEMORY_HOTPLUG */ #ifdef CONFIG_SPARSEMEM_VMEMMAP static int __map_kernel_page_nid(unsigned long ea, unsigned long pa, pgprot_t flags, unsigned int map_page_size, int nid) { return __map_kernel_page(ea, pa, flags, map_page_size, nid, 0, 0); } int __meminit radix__vmemmap_create_mapping(unsigned long start, unsigned long page_size, unsigned long phys) { /* Create a PTE encoding */ int nid = early_pfn_to_nid(phys >> PAGE_SHIFT); int ret; if ((start + page_size) >= RADIX_VMEMMAP_END) { pr_warn("Outside the supported range\n"); return -1; } ret = __map_kernel_page_nid(start, phys, PAGE_KERNEL, page_size, nid); BUG_ON(ret); return 0; } bool vmemmap_can_optimize(struct vmem_altmap *altmap, struct dev_pagemap *pgmap) { if (radix_enabled()) return __vmemmap_can_optimize(altmap, pgmap); return false; } int __meminit vmemmap_check_pmd(pmd_t *pmdp, int node, unsigned long addr, unsigned long next) { int large = pmd_large(*pmdp); if (large) vmemmap_verify(pmdp_ptep(pmdp), node, addr, next); return large; } void __meminit vmemmap_set_pmd(pmd_t *pmdp, void *p, int node, unsigned long addr, unsigned long next) { pte_t entry; pte_t *ptep = pmdp_ptep(pmdp); VM_BUG_ON(!IS_ALIGNED(addr, PMD_SIZE)); entry = pfn_pte(__pa(p) >> PAGE_SHIFT, PAGE_KERNEL); set_pte_at(&init_mm, addr, ptep, entry); asm volatile("ptesync": : :"memory"); vmemmap_verify(ptep, node, addr, next); } static pte_t * __meminit radix__vmemmap_pte_populate(pmd_t *pmdp, unsigned long addr, int node, struct vmem_altmap *altmap, struct page *reuse) { pte_t *pte = pte_offset_kernel(pmdp, addr); if (pte_none(*pte)) { pte_t entry; void *p; if (!reuse) { /* * make sure we don't create altmap mappings * covering things outside the device. */ if (altmap && altmap_cross_boundary(altmap, addr, PAGE_SIZE)) altmap = NULL; p = vmemmap_alloc_block_buf(PAGE_SIZE, node, altmap); if (!p && altmap) p = vmemmap_alloc_block_buf(PAGE_SIZE, node, NULL); if (!p) return NULL; pr_debug("PAGE_SIZE vmemmap mapping\n"); } else { /* * When a PTE/PMD entry is freed from the init_mm * there's a free_pages() call to this page allocated * above. Thus this get_page() is paired with the * put_page_testzero() on the freeing path. * This can only called by certain ZONE_DEVICE path, * and through vmemmap_populate_compound_pages() when * slab is available. */ get_page(reuse); p = page_to_virt(reuse); pr_debug("Tail page reuse vmemmap mapping\n"); } VM_BUG_ON(!PAGE_ALIGNED(addr)); entry = pfn_pte(__pa(p) >> PAGE_SHIFT, PAGE_KERNEL); set_pte_at(&init_mm, addr, pte, entry); asm volatile("ptesync": : :"memory"); } return pte; } static inline pud_t *vmemmap_pud_alloc(p4d_t *p4dp, int node, unsigned long address) { pud_t *pud; /* All early vmemmap mapping to keep simple do it at PAGE_SIZE */ if (unlikely(p4d_none(*p4dp))) { if (unlikely(!slab_is_available())) { pud = early_alloc_pgtable(PAGE_SIZE, node, 0, 0); p4d_populate(&init_mm, p4dp, pud); /* go to the pud_offset */ } else return pud_alloc(&init_mm, p4dp, address); } return pud_offset(p4dp, address); } static inline pmd_t *vmemmap_pmd_alloc(pud_t *pudp, int node, unsigned long address) { pmd_t *pmd; /* All early vmemmap mapping to keep simple do it at PAGE_SIZE */ if (unlikely(pud_none(*pudp))) { if (unlikely(!slab_is_available())) { pmd = early_alloc_pgtable(PAGE_SIZE, node, 0, 0); pud_populate(&init_mm, pudp, pmd); } else return pmd_alloc(&init_mm, pudp, address); } return pmd_offset(pudp, address); } static inline pte_t *vmemmap_pte_alloc(pmd_t *pmdp, int node, unsigned long address) { pte_t *pte; /* All early vmemmap mapping to keep simple do it at PAGE_SIZE */ if (unlikely(pmd_none(*pmdp))) { if (unlikely(!slab_is_available())) { pte = early_alloc_pgtable(PAGE_SIZE, node, 0, 0); pmd_populate(&init_mm, pmdp, pte); } else return pte_alloc_kernel(pmdp, address); } return pte_offset_kernel(pmdp, address); } int __meminit radix__vmemmap_populate(unsigned long start, unsigned long end, int node, struct vmem_altmap *altmap) { unsigned long addr; unsigned long next; pgd_t *pgd; p4d_t *p4d; pud_t *pud; pmd_t *pmd; pte_t *pte; for (addr = start; addr < end; addr = next) { next = pmd_addr_end(addr, end); pgd = pgd_offset_k(addr); p4d = p4d_offset(pgd, addr); pud = vmemmap_pud_alloc(p4d, node, addr); if (!pud) return -ENOMEM; pmd = vmemmap_pmd_alloc(pud, node, addr); if (!pmd) return -ENOMEM; if (pmd_none(READ_ONCE(*pmd))) { void *p; /* * keep it simple by checking addr PMD_SIZE alignment * and verifying the device boundary condition. * For us to use a pmd mapping, both addr and pfn should * be aligned. We skip if addr is not aligned and for * pfn we hope we have extra area in the altmap that * can help to find an aligned block. This can result * in altmap block allocation failures, in which case * we fallback to RAM for vmemmap allocation. */ if (altmap && (!IS_ALIGNED(addr, PMD_SIZE) || altmap_cross_boundary(altmap, addr, PMD_SIZE))) { /* * make sure we don't create altmap mappings * covering things outside the device. */ goto base_mapping; } p = vmemmap_alloc_block_buf(PMD_SIZE, node, altmap); if (p) { vmemmap_set_pmd(pmd, p, node, addr, next); pr_debug("PMD_SIZE vmemmap mapping\n"); continue; } else if (altmap) { /* * A vmemmap block allocation can fail due to * alignment requirements and we trying to align * things aggressively there by running out of * space. Try base mapping on failure. */ goto base_mapping; } } else if (vmemmap_check_pmd(pmd, node, addr, next)) { /* * If a huge mapping exist due to early call to * vmemmap_populate, let's try to use that. */ continue; } base_mapping: /* * Not able allocate higher order memory to back memmap * or we found a pointer to pte page. Allocate base page * size vmemmap */ pte = vmemmap_pte_alloc(pmd, node, addr); if (!pte) return -ENOMEM; pte = radix__vmemmap_pte_populate(pmd, addr, node, altmap, NULL); if (!pte) return -ENOMEM; vmemmap_verify(pte, node, addr, addr + PAGE_SIZE); next = addr + PAGE_SIZE; } return 0; } static pte_t * __meminit radix__vmemmap_populate_address(unsigned long addr, int node, struct vmem_altmap *altmap, struct page *reuse) { pgd_t *pgd; p4d_t *p4d; pud_t *pud; pmd_t *pmd; pte_t *pte; pgd = pgd_offset_k(addr); p4d = p4d_offset(pgd, addr); pud = vmemmap_pud_alloc(p4d, node, addr); if (!pud) return NULL; pmd = vmemmap_pmd_alloc(pud, node, addr); if (!pmd) return NULL; if (pmd_leaf(*pmd)) /* * The second page is mapped as a hugepage due to a nearby request. * Force our mapping to page size without deduplication */ return NULL; pte = vmemmap_pte_alloc(pmd, node, addr); if (!pte) return NULL; radix__vmemmap_pte_populate(pmd, addr, node, NULL, NULL); vmemmap_verify(pte, node, addr, addr + PAGE_SIZE); return pte; } static pte_t * __meminit vmemmap_compound_tail_page(unsigned long addr, unsigned long pfn_offset, int node) { pgd_t *pgd; p4d_t *p4d; pud_t *pud; pmd_t *pmd; pte_t *pte; unsigned long map_addr; /* the second vmemmap page which we use for duplication */ map_addr = addr - pfn_offset * sizeof(struct page) + PAGE_SIZE; pgd = pgd_offset_k(map_addr); p4d = p4d_offset(pgd, map_addr); pud = vmemmap_pud_alloc(p4d, node, map_addr); if (!pud) return NULL; pmd = vmemmap_pmd_alloc(pud, node, map_addr); if (!pmd) return NULL; if (pmd_leaf(*pmd)) /* * The second page is mapped as a hugepage due to a nearby request. * Force our mapping to page size without deduplication */ return NULL; pte = vmemmap_pte_alloc(pmd, node, map_addr); if (!pte) return NULL; /* * Check if there exist a mapping to the left */ if (pte_none(*pte)) { /* * Populate the head page vmemmap page. * It can fall in different pmd, hence * vmemmap_populate_address() */ pte = radix__vmemmap_populate_address(map_addr - PAGE_SIZE, node, NULL, NULL); if (!pte) return NULL; /* * Populate the tail pages vmemmap page */ pte = radix__vmemmap_pte_populate(pmd, map_addr, node, NULL, NULL); if (!pte) return NULL; vmemmap_verify(pte, node, map_addr, map_addr + PAGE_SIZE); return pte; } return pte; } int __meminit vmemmap_populate_compound_pages(unsigned long start_pfn, unsigned long start, unsigned long end, int node, struct dev_pagemap *pgmap) { /* * we want to map things as base page size mapping so that * we can save space in vmemmap. We could have huge mapping * covering out both edges. */ unsigned long addr; unsigned long addr_pfn = start_pfn; unsigned long next; pgd_t *pgd; p4d_t *p4d; pud_t *pud; pmd_t *pmd; pte_t *pte; for (addr = start; addr < end; addr = next) { pgd = pgd_offset_k(addr); p4d = p4d_offset(pgd, addr); pud = vmemmap_pud_alloc(p4d, node, addr); if (!pud) return -ENOMEM; pmd = vmemmap_pmd_alloc(pud, node, addr); if (!pmd) return -ENOMEM; if (pmd_leaf(READ_ONCE(*pmd))) { /* existing huge mapping. Skip the range */ addr_pfn += (PMD_SIZE >> PAGE_SHIFT); next = pmd_addr_end(addr, end); continue; } pte = vmemmap_pte_alloc(pmd, node, addr); if (!pte) return -ENOMEM; if (!pte_none(*pte)) { /* * This could be because we already have a compound * page whose VMEMMAP_RESERVE_NR pages were mapped and * this request fall in those pages. */ addr_pfn += 1; next = addr + PAGE_SIZE; continue; } else { unsigned long nr_pages = pgmap_vmemmap_nr(pgmap); unsigned long pfn_offset = addr_pfn - ALIGN_DOWN(addr_pfn, nr_pages); pte_t *tail_page_pte; /* * if the address is aligned to huge page size it is the * head mapping. */ if (pfn_offset == 0) { /* Populate the head page vmemmap page */ pte = radix__vmemmap_pte_populate(pmd, addr, node, NULL, NULL); if (!pte) return -ENOMEM; vmemmap_verify(pte, node, addr, addr + PAGE_SIZE); /* * Populate the tail pages vmemmap page * It can fall in different pmd, hence * vmemmap_populate_address() */ pte = radix__vmemmap_populate_address(addr + PAGE_SIZE, node, NULL, NULL); if (!pte) return -ENOMEM; addr_pfn += 2; next = addr + 2 * PAGE_SIZE; continue; } /* * get the 2nd mapping details * Also create it if that doesn't exist */ tail_page_pte = vmemmap_compound_tail_page(addr, pfn_offset, node); if (!tail_page_pte) { pte = radix__vmemmap_pte_populate(pmd, addr, node, NULL, NULL); if (!pte) return -ENOMEM; vmemmap_verify(pte, node, addr, addr + PAGE_SIZE); addr_pfn += 1; next = addr + PAGE_SIZE; continue; } pte = radix__vmemmap_pte_populate(pmd, addr, node, NULL, pte_page(*tail_page_pte)); if (!pte) return -ENOMEM; vmemmap_verify(pte, node, addr, addr + PAGE_SIZE); addr_pfn += 1; next = addr + PAGE_SIZE; continue; } } return 0; } #ifdef CONFIG_MEMORY_HOTPLUG void __meminit radix__vmemmap_remove_mapping(unsigned long start, unsigned long page_size) { remove_pagetable(start, start + page_size, true, NULL); } void __ref radix__vmemmap_free(unsigned long start, unsigned long end, struct vmem_altmap *altmap) { remove_pagetable(start, end, false, altmap); } #endif #endif #if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_KFENCE) void radix__kernel_map_pages(struct page *page, int numpages, int enable) { unsigned long addr; addr = (unsigned long)page_address(page); if (enable) set_memory_p(addr, numpages); else set_memory_np(addr, numpages); } #endif #ifdef CONFIG_TRANSPARENT_HUGEPAGE unsigned long radix__pmd_hugepage_update(struct mm_struct *mm, unsigned long addr, pmd_t *pmdp, unsigned long clr, unsigned long set) { unsigned long old; #ifdef CONFIG_DEBUG_VM WARN_ON(!radix__pmd_trans_huge(*pmdp) && !pmd_devmap(*pmdp)); assert_spin_locked(pmd_lockptr(mm, pmdp)); #endif old = radix__pte_update(mm, addr, pmdp_ptep(pmdp), clr, set, 1); trace_hugepage_update_pmd(addr, old, clr, set); return old; } unsigned long radix__pud_hugepage_update(struct mm_struct *mm, unsigned long addr, pud_t *pudp, unsigned long clr, unsigned long set) { unsigned long old; #ifdef CONFIG_DEBUG_VM WARN_ON(!pud_devmap(*pudp)); assert_spin_locked(pud_lockptr(mm, pudp)); #endif old = radix__pte_update(mm, addr, pudp_ptep(pudp), clr, set, 1); trace_hugepage_update_pud(addr, old, clr, set); return old; } pmd_t radix__pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long address, pmd_t *pmdp) { pmd_t pmd; VM_BUG_ON(address & ~HPAGE_PMD_MASK); VM_BUG_ON(radix__pmd_trans_huge(*pmdp)); VM_BUG_ON(pmd_devmap(*pmdp)); /* * khugepaged calls this for normal pmd */ pmd = *pmdp; pmd_clear(pmdp); radix__flush_tlb_collapsed_pmd(vma->vm_mm, address); return pmd; } /* * For us pgtable_t is pte_t *. Inorder to save the deposisted * page table, we consider the allocated page table as a list * head. On withdraw we need to make sure we zero out the used * list_head memory area. */ void radix__pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp, pgtable_t pgtable) { struct list_head *lh = (struct list_head *) pgtable; assert_spin_locked(pmd_lockptr(mm, pmdp)); /* FIFO */ if (!pmd_huge_pte(mm, pmdp)) INIT_LIST_HEAD(lh); else list_add(lh, (struct list_head *) pmd_huge_pte(mm, pmdp)); pmd_huge_pte(mm, pmdp) = pgtable; } pgtable_t radix__pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp) { pte_t *ptep; pgtable_t pgtable; struct list_head *lh; assert_spin_locked(pmd_lockptr(mm, pmdp)); /* FIFO */ pgtable = pmd_huge_pte(mm, pmdp); lh = (struct list_head *) pgtable; if (list_empty(lh)) pmd_huge_pte(mm, pmdp) = NULL; else { pmd_huge_pte(mm, pmdp) = (pgtable_t) lh->next; list_del(lh); } ptep = (pte_t *) pgtable; *ptep = __pte(0); ptep++; *ptep = __pte(0); return pgtable; } pmd_t radix__pmdp_huge_get_and_clear(struct mm_struct *mm, unsigned long addr, pmd_t *pmdp) { pmd_t old_pmd; unsigned long old; old = radix__pmd_hugepage_update(mm, addr, pmdp, ~0UL, 0); old_pmd = __pmd(old); return old_pmd; } pud_t radix__pudp_huge_get_and_clear(struct mm_struct *mm, unsigned long addr, pud_t *pudp) { pud_t old_pud; unsigned long old; old = radix__pud_hugepage_update(mm, addr, pudp, ~0UL, 0); old_pud = __pud(old); return old_pud; } #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ void radix__ptep_set_access_flags(struct vm_area_struct *vma, pte_t *ptep, pte_t entry, unsigned long address, int psize) { struct mm_struct *mm = vma->vm_mm; unsigned long set = pte_val(entry) & (_PAGE_DIRTY | _PAGE_SOFT_DIRTY | _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC); unsigned long change = pte_val(entry) ^ pte_val(*ptep); /* * On POWER9, the NMMU is not able to relax PTE access permissions * for a translation with a TLB. The PTE must be invalidated, TLB * flushed before the new PTE is installed. * * This only needs to be done for radix, because hash translation does * flush when updating the linux pte (and we don't support NMMU * accelerators on HPT on POWER9 anyway XXX: do we?). * * POWER10 (and P9P) NMMU does behave as per ISA. */ if (!cpu_has_feature(CPU_FTR_ARCH_31) && (change & _PAGE_RW) && atomic_read(&mm->context.copros) > 0) { unsigned long old_pte, new_pte; old_pte = __radix_pte_update(ptep, _PAGE_PRESENT, _PAGE_INVALID); new_pte = old_pte | set; radix__flush_tlb_page_psize(mm, address, psize); __radix_pte_update(ptep, _PAGE_INVALID, new_pte); } else { __radix_pte_update(ptep, 0, set); /* * Book3S does not require a TLB flush when relaxing access * restrictions when the address space (modulo the POWER9 nest * MMU issue above) because the MMU will reload the PTE after * taking an access fault, as defined by the architecture. See * "Setting a Reference or Change Bit or Upgrading Access * Authority (PTE Subject to Atomic Hardware Updates)" in * Power ISA Version 3.1B. */ } /* See ptesync comment in radix__set_pte_at */ } void radix__ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep, pte_t old_pte, pte_t pte) { struct mm_struct *mm = vma->vm_mm; /* * POWER9 NMMU must flush the TLB after clearing the PTE before * installing a PTE with more relaxed access permissions, see * radix__ptep_set_access_flags. */ if (!cpu_has_feature(CPU_FTR_ARCH_31) && is_pte_rw_upgrade(pte_val(old_pte), pte_val(pte)) && (atomic_read(&mm->context.copros) > 0)) radix__flush_tlb_page(vma, addr); set_pte_at(mm, addr, ptep, pte); } int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot) { pte_t *ptep = (pte_t *)pud; pte_t new_pud = pfn_pte(__phys_to_pfn(addr), prot); if (!radix_enabled()) return 0; set_pte_at(&init_mm, 0 /* radix unused */, ptep, new_pud); return 1; } int pud_clear_huge(pud_t *pud) { if (pud_is_leaf(*pud)) { pud_clear(pud); return 1; } return 0; } int pud_free_pmd_page(pud_t *pud, unsigned long addr) { pmd_t *pmd; int i; pmd = pud_pgtable(*pud); pud_clear(pud); flush_tlb_kernel_range(addr, addr + PUD_SIZE); for (i = 0; i < PTRS_PER_PMD; i++) { if (!pmd_none(pmd[i])) { pte_t *pte; pte = (pte_t *)pmd_page_vaddr(pmd[i]); pte_free_kernel(&init_mm, pte); } } pmd_free(&init_mm, pmd); return 1; } int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot) { pte_t *ptep = (pte_t *)pmd; pte_t new_pmd = pfn_pte(__phys_to_pfn(addr), prot); if (!radix_enabled()) return 0; set_pte_at(&init_mm, 0 /* radix unused */, ptep, new_pmd); return 1; } int pmd_clear_huge(pmd_t *pmd) { if (pmd_is_leaf(*pmd)) { pmd_clear(pmd); return 1; } return 0; } int pmd_free_pte_page(pmd_t *pmd, unsigned long addr) { pte_t *pte; pte = (pte_t *)pmd_page_vaddr(*pmd); pmd_clear(pmd); flush_tlb_kernel_range(addr, addr + PMD_SIZE); pte_free_kernel(&init_mm, pte); return 1; }
linux-master
arch/powerpc/mm/book3s64/radix_pgtable.c
// SPDX-License-Identifier: GPL-2.0 #include <linux/mm.h> #include <linux/hugetlb.h> #include <linux/security.h> #include <asm/cacheflush.h> #include <asm/machdep.h> #include <asm/mman.h> #include <asm/tlb.h> void radix__flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr) { int psize; struct hstate *hstate = hstate_file(vma->vm_file); psize = hstate_get_psize(hstate); radix__flush_tlb_page_psize(vma->vm_mm, vmaddr, psize); } void radix__local_flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr) { int psize; struct hstate *hstate = hstate_file(vma->vm_file); psize = hstate_get_psize(hstate); radix__local_flush_tlb_page_psize(vma->vm_mm, vmaddr, psize); } void radix__flush_hugetlb_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { int psize; struct hstate *hstate = hstate_file(vma->vm_file); psize = hstate_get_psize(hstate); /* * Flush PWC even if we get PUD_SIZE hugetlb invalidate to keep this simpler. */ if (end - start >= PUD_SIZE) radix__flush_tlb_pwc_range_psize(vma->vm_mm, start, end, psize); else radix__flush_tlb_range_psize(vma->vm_mm, start, end, psize); mmu_notifier_arch_invalidate_secondary_tlbs(vma->vm_mm, start, end); } void radix__huge_ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep, pte_t old_pte, pte_t pte) { struct mm_struct *mm = vma->vm_mm; /* * POWER9 NMMU must flush the TLB after clearing the PTE before * installing a PTE with more relaxed access permissions, see * radix__ptep_set_access_flags. */ if (!cpu_has_feature(CPU_FTR_ARCH_31) && is_pte_rw_upgrade(pte_val(old_pte), pte_val(pte)) && atomic_read(&mm->context.copros) > 0) radix__flush_hugetlb_page(vma, addr); set_huge_pte_at(vma->vm_mm, addr, ptep, pte); }
linux-master
arch/powerpc/mm/book3s64/radix_hugetlbpage.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * This file contains the routines for flushing entries from the * TLB and MMU hash table. * * Derived from arch/ppc64/mm/init.c: * Copyright (C) 1995-1996 Gary Thomas ([email protected]) * * Modifications by Paul Mackerras (PowerMac) ([email protected]) * and Cort Dougan (PReP) ([email protected]) * Copyright (C) 1996 Paul Mackerras * * Derived from "arch/i386/mm/init.c" * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds * * Dave Engebretsen <[email protected]> * Rework for PPC64 port. */ #include <linux/kernel.h> #include <linux/mm.h> #include <linux/percpu.h> #include <linux/hardirq.h> #include <asm/tlbflush.h> #include <asm/tlb.h> #include <asm/bug.h> #include <asm/pte-walk.h> #include <trace/events/thp.h> DEFINE_PER_CPU(struct ppc64_tlb_batch, ppc64_tlb_batch); /* * A linux PTE was changed and the corresponding hash table entry * neesd to be flushed. This function will either perform the flush * immediately or will batch it up if the current CPU has an active * batch on it. */ void hpte_need_flush(struct mm_struct *mm, unsigned long addr, pte_t *ptep, unsigned long pte, int huge) { unsigned long vpn; struct ppc64_tlb_batch *batch = &get_cpu_var(ppc64_tlb_batch); unsigned long vsid; unsigned int psize; int ssize; real_pte_t rpte; int i, offset; i = batch->index; /* * Get page size (maybe move back to caller). * * NOTE: when using special 64K mappings in 4K environment like * for SPEs, we obtain the page size from the slice, which thus * must still exist (and thus the VMA not reused) at the time * of this call */ if (huge) { #ifdef CONFIG_HUGETLB_PAGE psize = get_slice_psize(mm, addr); /* Mask the address for the correct page size */ addr &= ~((1UL << mmu_psize_defs[psize].shift) - 1); if (unlikely(psize == MMU_PAGE_16G)) offset = PTRS_PER_PUD; else offset = PTRS_PER_PMD; #else BUG(); psize = pte_pagesize_index(mm, addr, pte); /* shutup gcc */ #endif } else { psize = pte_pagesize_index(mm, addr, pte); /* * Mask the address for the standard page size. If we * have a 64k page kernel, but the hardware does not * support 64k pages, this might be different from the * hardware page size encoded in the slice table. */ addr &= PAGE_MASK; offset = PTRS_PER_PTE; } /* Build full vaddr */ if (!is_kernel_addr(addr)) { ssize = user_segment_size(addr); vsid = get_user_vsid(&mm->context, addr, ssize); } else { vsid = get_kernel_vsid(addr, mmu_kernel_ssize); ssize = mmu_kernel_ssize; } WARN_ON(vsid == 0); vpn = hpt_vpn(addr, vsid, ssize); rpte = __real_pte(__pte(pte), ptep, offset); /* * Check if we have an active batch on this CPU. If not, just * flush now and return. */ if (!batch->active) { flush_hash_page(vpn, rpte, psize, ssize, mm_is_thread_local(mm)); put_cpu_var(ppc64_tlb_batch); return; } /* * This can happen when we are in the middle of a TLB batch and * we encounter memory pressure (eg copy_page_range when it tries * to allocate a new pte). If we have to reclaim memory and end * up scanning and resetting referenced bits then our batch context * will change mid stream. * * We also need to ensure only one page size is present in a given * batch */ if (i != 0 && (mm != batch->mm || batch->psize != psize || batch->ssize != ssize)) { __flush_tlb_pending(batch); i = 0; } if (i == 0) { batch->mm = mm; batch->psize = psize; batch->ssize = ssize; } batch->pte[i] = rpte; batch->vpn[i] = vpn; batch->index = ++i; if (i >= PPC64_TLB_BATCH_NR) __flush_tlb_pending(batch); put_cpu_var(ppc64_tlb_batch); } /* * This function is called when terminating an mmu batch or when a batch * is full. It will perform the flush of all the entries currently stored * in a batch. * * Must be called from within some kind of spinlock/non-preempt region... */ void __flush_tlb_pending(struct ppc64_tlb_batch *batch) { int i, local; i = batch->index; local = mm_is_thread_local(batch->mm); if (i == 1) flush_hash_page(batch->vpn[0], batch->pte[0], batch->psize, batch->ssize, local); else flush_hash_range(i, local); batch->index = 0; } void hash__tlb_flush(struct mmu_gather *tlb) { struct ppc64_tlb_batch *tlbbatch = &get_cpu_var(ppc64_tlb_batch); /* * If there's a TLB batch pending, then we must flush it because the * pages are going to be freed and we really don't want to have a CPU * access a freed page because it has a stale TLB */ if (tlbbatch->index) __flush_tlb_pending(tlbbatch); put_cpu_var(ppc64_tlb_batch); } /** * __flush_hash_table_range - Flush all HPTEs for a given address range * from the hash table (and the TLB). But keeps * the linux PTEs intact. * * @start : starting address * @end : ending address (not included in the flush) * * This function is mostly to be used by some IO hotplug code in order * to remove all hash entries from a given address range used to map IO * space on a removed PCI-PCI bidge without tearing down the full mapping * since 64K pages may overlap with other bridges when using 64K pages * with 4K HW pages on IO space. * * Because of that usage pattern, it is implemented for small size rather * than speed. */ void __flush_hash_table_range(unsigned long start, unsigned long end) { int hugepage_shift; unsigned long flags; start = ALIGN_DOWN(start, PAGE_SIZE); end = ALIGN(end, PAGE_SIZE); /* * Note: Normally, we should only ever use a batch within a * PTE locked section. This violates the rule, but will work * since we don't actually modify the PTEs, we just flush the * hash while leaving the PTEs intact (including their reference * to being hashed). This is not the most performance oriented * way to do things but is fine for our needs here. */ local_irq_save(flags); arch_enter_lazy_mmu_mode(); for (; start < end; start += PAGE_SIZE) { pte_t *ptep = find_init_mm_pte(start, &hugepage_shift); unsigned long pte; if (ptep == NULL) continue; pte = pte_val(*ptep); if (!(pte & H_PAGE_HASHPTE)) continue; hpte_need_flush(&init_mm, start, ptep, pte, hugepage_shift); } arch_leave_lazy_mmu_mode(); local_irq_restore(flags); } void flush_hash_table_pmd_range(struct mm_struct *mm, pmd_t *pmd, unsigned long addr) { pte_t *pte; pte_t *start_pte; unsigned long flags; addr = ALIGN_DOWN(addr, PMD_SIZE); /* * Note: Normally, we should only ever use a batch within a * PTE locked section. This violates the rule, but will work * since we don't actually modify the PTEs, we just flush the * hash while leaving the PTEs intact (including their reference * to being hashed). This is not the most performance oriented * way to do things but is fine for our needs here. */ local_irq_save(flags); arch_enter_lazy_mmu_mode(); start_pte = pte_offset_map(pmd, addr); if (!start_pte) goto out; for (pte = start_pte; pte < start_pte + PTRS_PER_PTE; pte++) { unsigned long pteval = pte_val(*pte); if (pteval & H_PAGE_HASHPTE) hpte_need_flush(mm, addr, pte, pteval, 0); addr += PAGE_SIZE; } pte_unmap(start_pte); out: arch_leave_lazy_mmu_mode(); local_irq_restore(flags); }
linux-master
arch/powerpc/mm/book3s64/hash_tlb.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * address space "slices" (meta-segments) support * * Copyright (C) 2007 Benjamin Herrenschmidt, IBM Corporation. * * Based on hugetlb implementation * * Copyright (C) 2003 David Gibson, IBM Corporation. */ #undef DEBUG #include <linux/kernel.h> #include <linux/mm.h> #include <linux/pagemap.h> #include <linux/err.h> #include <linux/spinlock.h> #include <linux/export.h> #include <linux/hugetlb.h> #include <linux/sched/mm.h> #include <linux/security.h> #include <asm/mman.h> #include <asm/mmu.h> #include <asm/copro.h> #include <asm/hugetlb.h> #include <asm/mmu_context.h> static DEFINE_SPINLOCK(slice_convert_lock); #ifdef DEBUG int _slice_debug = 1; static void slice_print_mask(const char *label, const struct slice_mask *mask) { if (!_slice_debug) return; pr_devel("%s low_slice: %*pbl\n", label, (int)SLICE_NUM_LOW, &mask->low_slices); pr_devel("%s high_slice: %*pbl\n", label, (int)SLICE_NUM_HIGH, mask->high_slices); } #define slice_dbg(fmt...) do { if (_slice_debug) pr_devel(fmt); } while (0) #else static void slice_print_mask(const char *label, const struct slice_mask *mask) {} #define slice_dbg(fmt...) #endif static inline notrace bool slice_addr_is_low(unsigned long addr) { u64 tmp = (u64)addr; return tmp < SLICE_LOW_TOP; } static void slice_range_to_mask(unsigned long start, unsigned long len, struct slice_mask *ret) { unsigned long end = start + len - 1; ret->low_slices = 0; if (SLICE_NUM_HIGH) bitmap_zero(ret->high_slices, SLICE_NUM_HIGH); if (slice_addr_is_low(start)) { unsigned long mend = min(end, (unsigned long)(SLICE_LOW_TOP - 1)); ret->low_slices = (1u << (GET_LOW_SLICE_INDEX(mend) + 1)) - (1u << GET_LOW_SLICE_INDEX(start)); } if (SLICE_NUM_HIGH && !slice_addr_is_low(end)) { unsigned long start_index = GET_HIGH_SLICE_INDEX(start); unsigned long align_end = ALIGN(end, (1UL << SLICE_HIGH_SHIFT)); unsigned long count = GET_HIGH_SLICE_INDEX(align_end) - start_index; bitmap_set(ret->high_slices, start_index, count); } } static int slice_area_is_free(struct mm_struct *mm, unsigned long addr, unsigned long len) { struct vm_area_struct *vma; if ((mm_ctx_slb_addr_limit(&mm->context) - len) < addr) return 0; vma = find_vma(mm, addr); return (!vma || (addr + len) <= vm_start_gap(vma)); } static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice) { return !slice_area_is_free(mm, slice << SLICE_LOW_SHIFT, 1ul << SLICE_LOW_SHIFT); } static int slice_high_has_vma(struct mm_struct *mm, unsigned long slice) { unsigned long start = slice << SLICE_HIGH_SHIFT; unsigned long end = start + (1ul << SLICE_HIGH_SHIFT); /* Hack, so that each addresses is controlled by exactly one * of the high or low area bitmaps, the first high area starts * at 4GB, not 0 */ if (start == 0) start = (unsigned long)SLICE_LOW_TOP; return !slice_area_is_free(mm, start, end - start); } static void slice_mask_for_free(struct mm_struct *mm, struct slice_mask *ret, unsigned long high_limit) { unsigned long i; ret->low_slices = 0; if (SLICE_NUM_HIGH) bitmap_zero(ret->high_slices, SLICE_NUM_HIGH); for (i = 0; i < SLICE_NUM_LOW; i++) if (!slice_low_has_vma(mm, i)) ret->low_slices |= 1u << i; if (slice_addr_is_low(high_limit - 1)) return; for (i = 0; i < GET_HIGH_SLICE_INDEX(high_limit); i++) if (!slice_high_has_vma(mm, i)) __set_bit(i, ret->high_slices); } static bool slice_check_range_fits(struct mm_struct *mm, const struct slice_mask *available, unsigned long start, unsigned long len) { unsigned long end = start + len - 1; u64 low_slices = 0; if (slice_addr_is_low(start)) { unsigned long mend = min(end, (unsigned long)(SLICE_LOW_TOP - 1)); low_slices = (1u << (GET_LOW_SLICE_INDEX(mend) + 1)) - (1u << GET_LOW_SLICE_INDEX(start)); } if ((low_slices & available->low_slices) != low_slices) return false; if (SLICE_NUM_HIGH && !slice_addr_is_low(end)) { unsigned long start_index = GET_HIGH_SLICE_INDEX(start); unsigned long align_end = ALIGN(end, (1UL << SLICE_HIGH_SHIFT)); unsigned long count = GET_HIGH_SLICE_INDEX(align_end) - start_index; unsigned long i; for (i = start_index; i < start_index + count; i++) { if (!test_bit(i, available->high_slices)) return false; } } return true; } static void slice_flush_segments(void *parm) { #ifdef CONFIG_PPC64 struct mm_struct *mm = parm; unsigned long flags; if (mm != current->active_mm) return; copy_mm_to_paca(current->active_mm); local_irq_save(flags); slb_flush_and_restore_bolted(); local_irq_restore(flags); #endif } static void slice_convert(struct mm_struct *mm, const struct slice_mask *mask, int psize) { int index, mask_index; /* Write the new slice psize bits */ unsigned char *hpsizes, *lpsizes; struct slice_mask *psize_mask, *old_mask; unsigned long i, flags; int old_psize; slice_dbg("slice_convert(mm=%p, psize=%d)\n", mm, psize); slice_print_mask(" mask", mask); psize_mask = slice_mask_for_size(&mm->context, psize); /* We need to use a spinlock here to protect against * concurrent 64k -> 4k demotion ... */ spin_lock_irqsave(&slice_convert_lock, flags); lpsizes = mm_ctx_low_slices(&mm->context); for (i = 0; i < SLICE_NUM_LOW; i++) { if (!(mask->low_slices & (1u << i))) continue; mask_index = i & 0x1; index = i >> 1; /* Update the slice_mask */ old_psize = (lpsizes[index] >> (mask_index * 4)) & 0xf; old_mask = slice_mask_for_size(&mm->context, old_psize); old_mask->low_slices &= ~(1u << i); psize_mask->low_slices |= 1u << i; /* Update the sizes array */ lpsizes[index] = (lpsizes[index] & ~(0xf << (mask_index * 4))) | (((unsigned long)psize) << (mask_index * 4)); } hpsizes = mm_ctx_high_slices(&mm->context); for (i = 0; i < GET_HIGH_SLICE_INDEX(mm_ctx_slb_addr_limit(&mm->context)); i++) { if (!test_bit(i, mask->high_slices)) continue; mask_index = i & 0x1; index = i >> 1; /* Update the slice_mask */ old_psize = (hpsizes[index] >> (mask_index * 4)) & 0xf; old_mask = slice_mask_for_size(&mm->context, old_psize); __clear_bit(i, old_mask->high_slices); __set_bit(i, psize_mask->high_slices); /* Update the sizes array */ hpsizes[index] = (hpsizes[index] & ~(0xf << (mask_index * 4))) | (((unsigned long)psize) << (mask_index * 4)); } slice_dbg(" lsps=%lx, hsps=%lx\n", (unsigned long)mm_ctx_low_slices(&mm->context), (unsigned long)mm_ctx_high_slices(&mm->context)); spin_unlock_irqrestore(&slice_convert_lock, flags); copro_flush_all_slbs(mm); } /* * Compute which slice addr is part of; * set *boundary_addr to the start or end boundary of that slice * (depending on 'end' parameter); * return boolean indicating if the slice is marked as available in the * 'available' slice_mark. */ static bool slice_scan_available(unsigned long addr, const struct slice_mask *available, int end, unsigned long *boundary_addr) { unsigned long slice; if (slice_addr_is_low(addr)) { slice = GET_LOW_SLICE_INDEX(addr); *boundary_addr = (slice + end) << SLICE_LOW_SHIFT; return !!(available->low_slices & (1u << slice)); } else { slice = GET_HIGH_SLICE_INDEX(addr); *boundary_addr = (slice + end) ? ((slice + end) << SLICE_HIGH_SHIFT) : SLICE_LOW_TOP; return !!test_bit(slice, available->high_slices); } } static unsigned long slice_find_area_bottomup(struct mm_struct *mm, unsigned long addr, unsigned long len, const struct slice_mask *available, int psize, unsigned long high_limit) { int pshift = max_t(int, mmu_psize_defs[psize].shift, PAGE_SHIFT); unsigned long found, next_end; struct vm_unmapped_area_info info; info.flags = 0; info.length = len; info.align_mask = PAGE_MASK & ((1ul << pshift) - 1); info.align_offset = 0; /* * Check till the allow max value for this mmap request */ while (addr < high_limit) { info.low_limit = addr; if (!slice_scan_available(addr, available, 1, &addr)) continue; next_slice: /* * At this point [info.low_limit; addr) covers * available slices only and ends at a slice boundary. * Check if we need to reduce the range, or if we can * extend it to cover the next available slice. */ if (addr >= high_limit) addr = high_limit; else if (slice_scan_available(addr, available, 1, &next_end)) { addr = next_end; goto next_slice; } info.high_limit = addr; found = vm_unmapped_area(&info); if (!(found & ~PAGE_MASK)) return found; } return -ENOMEM; } static unsigned long slice_find_area_topdown(struct mm_struct *mm, unsigned long addr, unsigned long len, const struct slice_mask *available, int psize, unsigned long high_limit) { int pshift = max_t(int, mmu_psize_defs[psize].shift, PAGE_SHIFT); unsigned long found, prev; struct vm_unmapped_area_info info; unsigned long min_addr = max(PAGE_SIZE, mmap_min_addr); info.flags = VM_UNMAPPED_AREA_TOPDOWN; info.length = len; info.align_mask = PAGE_MASK & ((1ul << pshift) - 1); info.align_offset = 0; /* * If we are trying to allocate above DEFAULT_MAP_WINDOW * Add the different to the mmap_base. * Only for that request for which high_limit is above * DEFAULT_MAP_WINDOW we should apply this. */ if (high_limit > DEFAULT_MAP_WINDOW) addr += mm_ctx_slb_addr_limit(&mm->context) - DEFAULT_MAP_WINDOW; while (addr > min_addr) { info.high_limit = addr; if (!slice_scan_available(addr - 1, available, 0, &addr)) continue; prev_slice: /* * At this point [addr; info.high_limit) covers * available slices only and starts at a slice boundary. * Check if we need to reduce the range, or if we can * extend it to cover the previous available slice. */ if (addr < min_addr) addr = min_addr; else if (slice_scan_available(addr - 1, available, 0, &prev)) { addr = prev; goto prev_slice; } info.low_limit = addr; found = vm_unmapped_area(&info); if (!(found & ~PAGE_MASK)) return found; } /* * A failed mmap() very likely causes application failure, * so fall back to the bottom-up function here. This scenario * can happen with large stack limits and large mmap() * allocations. */ return slice_find_area_bottomup(mm, TASK_UNMAPPED_BASE, len, available, psize, high_limit); } static unsigned long slice_find_area(struct mm_struct *mm, unsigned long len, const struct slice_mask *mask, int psize, int topdown, unsigned long high_limit) { if (topdown) return slice_find_area_topdown(mm, mm->mmap_base, len, mask, psize, high_limit); else return slice_find_area_bottomup(mm, mm->mmap_base, len, mask, psize, high_limit); } static inline void slice_copy_mask(struct slice_mask *dst, const struct slice_mask *src) { dst->low_slices = src->low_slices; if (!SLICE_NUM_HIGH) return; bitmap_copy(dst->high_slices, src->high_slices, SLICE_NUM_HIGH); } static inline void slice_or_mask(struct slice_mask *dst, const struct slice_mask *src1, const struct slice_mask *src2) { dst->low_slices = src1->low_slices | src2->low_slices; if (!SLICE_NUM_HIGH) return; bitmap_or(dst->high_slices, src1->high_slices, src2->high_slices, SLICE_NUM_HIGH); } static inline void slice_andnot_mask(struct slice_mask *dst, const struct slice_mask *src1, const struct slice_mask *src2) { dst->low_slices = src1->low_slices & ~src2->low_slices; if (!SLICE_NUM_HIGH) return; bitmap_andnot(dst->high_slices, src1->high_slices, src2->high_slices, SLICE_NUM_HIGH); } #ifdef CONFIG_PPC_64K_PAGES #define MMU_PAGE_BASE MMU_PAGE_64K #else #define MMU_PAGE_BASE MMU_PAGE_4K #endif unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len, unsigned long flags, unsigned int psize, int topdown) { struct slice_mask good_mask; struct slice_mask potential_mask; const struct slice_mask *maskp; const struct slice_mask *compat_maskp = NULL; int fixed = (flags & MAP_FIXED); int pshift = max_t(int, mmu_psize_defs[psize].shift, PAGE_SHIFT); unsigned long page_size = 1UL << pshift; struct mm_struct *mm = current->mm; unsigned long newaddr; unsigned long high_limit; high_limit = DEFAULT_MAP_WINDOW; if (addr >= high_limit || (fixed && (addr + len > high_limit))) high_limit = TASK_SIZE; if (len > high_limit) return -ENOMEM; if (len & (page_size - 1)) return -EINVAL; if (fixed) { if (addr & (page_size - 1)) return -EINVAL; if (addr > high_limit - len) return -ENOMEM; } if (high_limit > mm_ctx_slb_addr_limit(&mm->context)) { /* * Increasing the slb_addr_limit does not require * slice mask cache to be recalculated because it should * be already initialised beyond the old address limit. */ mm_ctx_set_slb_addr_limit(&mm->context, high_limit); on_each_cpu(slice_flush_segments, mm, 1); } /* Sanity checks */ BUG_ON(mm->task_size == 0); BUG_ON(mm_ctx_slb_addr_limit(&mm->context) == 0); VM_BUG_ON(radix_enabled()); slice_dbg("slice_get_unmapped_area(mm=%p, psize=%d...\n", mm, psize); slice_dbg(" addr=%lx, len=%lx, flags=%lx, topdown=%d\n", addr, len, flags, topdown); /* If hint, make sure it matches our alignment restrictions */ if (!fixed && addr) { addr = ALIGN(addr, page_size); slice_dbg(" aligned addr=%lx\n", addr); /* Ignore hint if it's too large or overlaps a VMA */ if (addr > high_limit - len || addr < mmap_min_addr || !slice_area_is_free(mm, addr, len)) addr = 0; } /* First make up a "good" mask of slices that have the right size * already */ maskp = slice_mask_for_size(&mm->context, psize); /* * Here "good" means slices that are already the right page size, * "compat" means slices that have a compatible page size (i.e. * 4k in a 64k pagesize kernel), and "free" means slices without * any VMAs. * * If MAP_FIXED: * check if fits in good | compat => OK * check if fits in good | compat | free => convert free * else bad * If have hint: * check if hint fits in good => OK * check if hint fits in good | free => convert free * Otherwise: * search in good, found => OK * search in good | free, found => convert free * search in good | compat | free, found => convert free. */ /* * If we support combo pages, we can allow 64k pages in 4k slices * The mask copies could be avoided in most cases here if we had * a pointer to good mask for the next code to use. */ if (IS_ENABLED(CONFIG_PPC_64K_PAGES) && psize == MMU_PAGE_64K) { compat_maskp = slice_mask_for_size(&mm->context, MMU_PAGE_4K); if (fixed) slice_or_mask(&good_mask, maskp, compat_maskp); else slice_copy_mask(&good_mask, maskp); } else { slice_copy_mask(&good_mask, maskp); } slice_print_mask(" good_mask", &good_mask); if (compat_maskp) slice_print_mask(" compat_mask", compat_maskp); /* First check hint if it's valid or if we have MAP_FIXED */ if (addr != 0 || fixed) { /* Check if we fit in the good mask. If we do, we just return, * nothing else to do */ if (slice_check_range_fits(mm, &good_mask, addr, len)) { slice_dbg(" fits good !\n"); newaddr = addr; goto return_addr; } } else { /* Now let's see if we can find something in the existing * slices for that size */ newaddr = slice_find_area(mm, len, &good_mask, psize, topdown, high_limit); if (newaddr != -ENOMEM) { /* Found within the good mask, we don't have to setup, * we thus return directly */ slice_dbg(" found area at 0x%lx\n", newaddr); goto return_addr; } } /* * We don't fit in the good mask, check what other slices are * empty and thus can be converted */ slice_mask_for_free(mm, &potential_mask, high_limit); slice_or_mask(&potential_mask, &potential_mask, &good_mask); slice_print_mask(" potential", &potential_mask); if (addr != 0 || fixed) { if (slice_check_range_fits(mm, &potential_mask, addr, len)) { slice_dbg(" fits potential !\n"); newaddr = addr; goto convert; } } /* If we have MAP_FIXED and failed the above steps, then error out */ if (fixed) return -EBUSY; slice_dbg(" search...\n"); /* If we had a hint that didn't work out, see if we can fit * anywhere in the good area. */ if (addr) { newaddr = slice_find_area(mm, len, &good_mask, psize, topdown, high_limit); if (newaddr != -ENOMEM) { slice_dbg(" found area at 0x%lx\n", newaddr); goto return_addr; } } /* Now let's see if we can find something in the existing slices * for that size plus free slices */ newaddr = slice_find_area(mm, len, &potential_mask, psize, topdown, high_limit); if (IS_ENABLED(CONFIG_PPC_64K_PAGES) && newaddr == -ENOMEM && psize == MMU_PAGE_64K) { /* retry the search with 4k-page slices included */ slice_or_mask(&potential_mask, &potential_mask, compat_maskp); newaddr = slice_find_area(mm, len, &potential_mask, psize, topdown, high_limit); } if (newaddr == -ENOMEM) return -ENOMEM; slice_range_to_mask(newaddr, len, &potential_mask); slice_dbg(" found potential area at 0x%lx\n", newaddr); slice_print_mask(" mask", &potential_mask); convert: /* * Try to allocate the context before we do slice convert * so that we handle the context allocation failure gracefully. */ if (need_extra_context(mm, newaddr)) { if (alloc_extended_context(mm, newaddr) < 0) return -ENOMEM; } slice_andnot_mask(&potential_mask, &potential_mask, &good_mask); if (compat_maskp && !fixed) slice_andnot_mask(&potential_mask, &potential_mask, compat_maskp); if (potential_mask.low_slices || (SLICE_NUM_HIGH && !bitmap_empty(potential_mask.high_slices, SLICE_NUM_HIGH))) { slice_convert(mm, &potential_mask, psize); if (psize > MMU_PAGE_BASE) on_each_cpu(slice_flush_segments, mm, 1); } return newaddr; return_addr: if (need_extra_context(mm, newaddr)) { if (alloc_extended_context(mm, newaddr) < 0) return -ENOMEM; } return newaddr; } EXPORT_SYMBOL_GPL(slice_get_unmapped_area); unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) { if (radix_enabled()) return generic_get_unmapped_area(filp, addr, len, pgoff, flags); return slice_get_unmapped_area(addr, len, flags, mm_ctx_user_psize(&current->mm->context), 0); } unsigned long arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, const unsigned long len, const unsigned long pgoff, const unsigned long flags) { if (radix_enabled()) return generic_get_unmapped_area_topdown(filp, addr0, len, pgoff, flags); return slice_get_unmapped_area(addr0, len, flags, mm_ctx_user_psize(&current->mm->context), 1); } unsigned int notrace get_slice_psize(struct mm_struct *mm, unsigned long addr) { unsigned char *psizes; int index, mask_index; VM_BUG_ON(radix_enabled()); if (slice_addr_is_low(addr)) { psizes = mm_ctx_low_slices(&mm->context); index = GET_LOW_SLICE_INDEX(addr); } else { psizes = mm_ctx_high_slices(&mm->context); index = GET_HIGH_SLICE_INDEX(addr); } mask_index = index & 0x1; return (psizes[index >> 1] >> (mask_index * 4)) & 0xf; } EXPORT_SYMBOL_GPL(get_slice_psize); void slice_init_new_context_exec(struct mm_struct *mm) { unsigned char *hpsizes, *lpsizes; struct slice_mask *mask; unsigned int psize = mmu_virtual_psize; slice_dbg("slice_init_new_context_exec(mm=%p)\n", mm); /* * In the case of exec, use the default limit. In the * case of fork it is just inherited from the mm being * duplicated. */ mm_ctx_set_slb_addr_limit(&mm->context, SLB_ADDR_LIMIT_DEFAULT); mm_ctx_set_user_psize(&mm->context, psize); /* * Set all slice psizes to the default. */ lpsizes = mm_ctx_low_slices(&mm->context); memset(lpsizes, (psize << 4) | psize, SLICE_NUM_LOW >> 1); hpsizes = mm_ctx_high_slices(&mm->context); memset(hpsizes, (psize << 4) | psize, SLICE_NUM_HIGH >> 1); /* * Slice mask cache starts zeroed, fill the default size cache. */ mask = slice_mask_for_size(&mm->context, psize); mask->low_slices = ~0UL; if (SLICE_NUM_HIGH) bitmap_fill(mask->high_slices, SLICE_NUM_HIGH); } void slice_setup_new_exec(void) { struct mm_struct *mm = current->mm; slice_dbg("slice_setup_new_exec(mm=%p)\n", mm); if (!is_32bit_task()) return; mm_ctx_set_slb_addr_limit(&mm->context, DEFAULT_MAP_WINDOW); } void slice_set_range_psize(struct mm_struct *mm, unsigned long start, unsigned long len, unsigned int psize) { struct slice_mask mask; VM_BUG_ON(radix_enabled()); slice_range_to_mask(start, len, &mask); slice_convert(mm, &mask, psize); } #ifdef CONFIG_HUGETLB_PAGE /* * is_hugepage_only_range() is used by generic code to verify whether * a normal mmap mapping (non hugetlbfs) is valid on a given area. * * until the generic code provides a more generic hook and/or starts * calling arch get_unmapped_area for MAP_FIXED (which our implementation * here knows how to deal with), we hijack it to keep standard mappings * away from us. * * because of that generic code limitation, MAP_FIXED mapping cannot * "convert" back a slice with no VMAs to the standard page size, only * get_unmapped_area() can. It would be possible to fix it here but I * prefer working on fixing the generic code instead. * * WARNING: This will not work if hugetlbfs isn't enabled since the * generic code will redefine that function as 0 in that. This is ok * for now as we only use slices with hugetlbfs enabled. This should * be fixed as the generic code gets fixed. */ int slice_is_hugepage_only_range(struct mm_struct *mm, unsigned long addr, unsigned long len) { const struct slice_mask *maskp; unsigned int psize = mm_ctx_user_psize(&mm->context); VM_BUG_ON(radix_enabled()); maskp = slice_mask_for_size(&mm->context, psize); /* We need to account for 4k slices too */ if (IS_ENABLED(CONFIG_PPC_64K_PAGES) && psize == MMU_PAGE_64K) { const struct slice_mask *compat_maskp; struct slice_mask available; compat_maskp = slice_mask_for_size(&mm->context, MMU_PAGE_4K); slice_or_mask(&available, maskp, compat_maskp); return !slice_check_range_fits(mm, &available, addr, len); } return !slice_check_range_fits(mm, maskp, addr, len); } unsigned long vma_mmu_pagesize(struct vm_area_struct *vma) { /* With radix we don't use slice, so derive it from vma*/ if (radix_enabled()) return vma_kernel_pagesize(vma); return 1UL << mmu_psize_to_shift(get_slice_psize(vma->vm_mm, vma->vm_start)); } static int file_to_psize(struct file *file) { struct hstate *hstate = hstate_file(file); return shift_to_mmu_psize(huge_page_shift(hstate)); } unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) { if (radix_enabled()) return generic_hugetlb_get_unmapped_area(file, addr, len, pgoff, flags); return slice_get_unmapped_area(addr, len, flags, file_to_psize(file), 1); } #endif
linux-master
arch/powerpc/mm/book3s64/slice.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * TLB flush routines for radix kernels. * * Copyright 2015-2016, Aneesh Kumar K.V, IBM Corporation. */ #include <linux/mm.h> #include <linux/hugetlb.h> #include <linux/memblock.h> #include <linux/mmu_context.h> #include <linux/sched/mm.h> #include <linux/debugfs.h> #include <asm/ppc-opcode.h> #include <asm/tlb.h> #include <asm/tlbflush.h> #include <asm/trace.h> #include <asm/cputhreads.h> #include <asm/plpar_wrappers.h> #include "internal.h" /* * tlbiel instruction for radix, set invalidation * i.e., r=1 and is=01 or is=10 or is=11 */ static __always_inline void tlbiel_radix_set_isa300(unsigned int set, unsigned int is, unsigned int pid, unsigned int ric, unsigned int prs) { unsigned long rb; unsigned long rs; rb = (set << PPC_BITLSHIFT(51)) | (is << PPC_BITLSHIFT(53)); rs = ((unsigned long)pid << PPC_BITLSHIFT(31)); asm volatile(PPC_TLBIEL(%0, %1, %2, %3, 1) : : "r"(rb), "r"(rs), "i"(ric), "i"(prs) : "memory"); } static void tlbiel_all_isa300(unsigned int num_sets, unsigned int is) { unsigned int set; asm volatile("ptesync": : :"memory"); /* * Flush the first set of the TLB, and the entire Page Walk Cache * and partition table entries. Then flush the remaining sets of the * TLB. */ if (early_cpu_has_feature(CPU_FTR_HVMODE)) { /* MSR[HV] should flush partition scope translations first. */ tlbiel_radix_set_isa300(0, is, 0, RIC_FLUSH_ALL, 0); if (!early_cpu_has_feature(CPU_FTR_ARCH_31)) { for (set = 1; set < num_sets; set++) tlbiel_radix_set_isa300(set, is, 0, RIC_FLUSH_TLB, 0); } } /* Flush process scoped entries. */ tlbiel_radix_set_isa300(0, is, 0, RIC_FLUSH_ALL, 1); if (!early_cpu_has_feature(CPU_FTR_ARCH_31)) { for (set = 1; set < num_sets; set++) tlbiel_radix_set_isa300(set, is, 0, RIC_FLUSH_TLB, 1); } ppc_after_tlbiel_barrier(); } void radix__tlbiel_all(unsigned int action) { unsigned int is; switch (action) { case TLB_INVAL_SCOPE_GLOBAL: is = 3; break; case TLB_INVAL_SCOPE_LPID: is = 2; break; default: BUG(); } if (early_cpu_has_feature(CPU_FTR_ARCH_300)) tlbiel_all_isa300(POWER9_TLB_SETS_RADIX, is); else WARN(1, "%s called on pre-POWER9 CPU\n", __func__); asm volatile(PPC_ISA_3_0_INVALIDATE_ERAT "; isync" : : :"memory"); } static __always_inline void __tlbiel_pid(unsigned long pid, int set, unsigned long ric) { unsigned long rb,rs,prs,r; rb = PPC_BIT(53); /* IS = 1 */ rb |= set << PPC_BITLSHIFT(51); rs = ((unsigned long)pid) << PPC_BITLSHIFT(31); prs = 1; /* process scoped */ r = 1; /* radix format */ asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1) : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory"); trace_tlbie(0, 1, rb, rs, ric, prs, r); } static __always_inline void __tlbie_pid(unsigned long pid, unsigned long ric) { unsigned long rb,rs,prs,r; rb = PPC_BIT(53); /* IS = 1 */ rs = pid << PPC_BITLSHIFT(31); prs = 1; /* process scoped */ r = 1; /* radix format */ asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1) : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory"); trace_tlbie(0, 0, rb, rs, ric, prs, r); } static __always_inline void __tlbie_lpid(unsigned long lpid, unsigned long ric) { unsigned long rb,rs,prs,r; rb = PPC_BIT(52); /* IS = 2 */ rs = lpid; prs = 0; /* partition scoped */ r = 1; /* radix format */ asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1) : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory"); trace_tlbie(lpid, 0, rb, rs, ric, prs, r); } static __always_inline void __tlbie_lpid_guest(unsigned long lpid, unsigned long ric) { unsigned long rb,rs,prs,r; rb = PPC_BIT(52); /* IS = 2 */ rs = lpid; prs = 1; /* process scoped */ r = 1; /* radix format */ asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1) : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory"); trace_tlbie(lpid, 0, rb, rs, ric, prs, r); } static __always_inline void __tlbiel_va(unsigned long va, unsigned long pid, unsigned long ap, unsigned long ric) { unsigned long rb,rs,prs,r; rb = va & ~(PPC_BITMASK(52, 63)); rb |= ap << PPC_BITLSHIFT(58); rs = pid << PPC_BITLSHIFT(31); prs = 1; /* process scoped */ r = 1; /* radix format */ asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1) : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory"); trace_tlbie(0, 1, rb, rs, ric, prs, r); } static __always_inline void __tlbie_va(unsigned long va, unsigned long pid, unsigned long ap, unsigned long ric) { unsigned long rb,rs,prs,r; rb = va & ~(PPC_BITMASK(52, 63)); rb |= ap << PPC_BITLSHIFT(58); rs = pid << PPC_BITLSHIFT(31); prs = 1; /* process scoped */ r = 1; /* radix format */ asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1) : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory"); trace_tlbie(0, 0, rb, rs, ric, prs, r); } static __always_inline void __tlbie_lpid_va(unsigned long va, unsigned long lpid, unsigned long ap, unsigned long ric) { unsigned long rb,rs,prs,r; rb = va & ~(PPC_BITMASK(52, 63)); rb |= ap << PPC_BITLSHIFT(58); rs = lpid; prs = 0; /* partition scoped */ r = 1; /* radix format */ asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1) : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory"); trace_tlbie(lpid, 0, rb, rs, ric, prs, r); } static inline void fixup_tlbie_va(unsigned long va, unsigned long pid, unsigned long ap) { if (cpu_has_feature(CPU_FTR_P9_TLBIE_ERAT_BUG)) { asm volatile("ptesync": : :"memory"); __tlbie_va(va, 0, ap, RIC_FLUSH_TLB); } if (cpu_has_feature(CPU_FTR_P9_TLBIE_STQ_BUG)) { asm volatile("ptesync": : :"memory"); __tlbie_va(va, pid, ap, RIC_FLUSH_TLB); } } static inline void fixup_tlbie_va_range(unsigned long va, unsigned long pid, unsigned long ap) { if (cpu_has_feature(CPU_FTR_P9_TLBIE_ERAT_BUG)) { asm volatile("ptesync": : :"memory"); __tlbie_pid(0, RIC_FLUSH_TLB); } if (cpu_has_feature(CPU_FTR_P9_TLBIE_STQ_BUG)) { asm volatile("ptesync": : :"memory"); __tlbie_va(va, pid, ap, RIC_FLUSH_TLB); } } static inline void fixup_tlbie_pid(unsigned long pid) { /* * We can use any address for the invalidation, pick one which is * probably unused as an optimisation. */ unsigned long va = ((1UL << 52) - 1); if (cpu_has_feature(CPU_FTR_P9_TLBIE_ERAT_BUG)) { asm volatile("ptesync": : :"memory"); __tlbie_pid(0, RIC_FLUSH_TLB); } if (cpu_has_feature(CPU_FTR_P9_TLBIE_STQ_BUG)) { asm volatile("ptesync": : :"memory"); __tlbie_va(va, pid, mmu_get_ap(MMU_PAGE_64K), RIC_FLUSH_TLB); } } static inline void fixup_tlbie_lpid_va(unsigned long va, unsigned long lpid, unsigned long ap) { if (cpu_has_feature(CPU_FTR_P9_TLBIE_ERAT_BUG)) { asm volatile("ptesync": : :"memory"); __tlbie_lpid_va(va, 0, ap, RIC_FLUSH_TLB); } if (cpu_has_feature(CPU_FTR_P9_TLBIE_STQ_BUG)) { asm volatile("ptesync": : :"memory"); __tlbie_lpid_va(va, lpid, ap, RIC_FLUSH_TLB); } } static inline void fixup_tlbie_lpid(unsigned long lpid) { /* * We can use any address for the invalidation, pick one which is * probably unused as an optimisation. */ unsigned long va = ((1UL << 52) - 1); if (cpu_has_feature(CPU_FTR_P9_TLBIE_ERAT_BUG)) { asm volatile("ptesync": : :"memory"); __tlbie_lpid(0, RIC_FLUSH_TLB); } if (cpu_has_feature(CPU_FTR_P9_TLBIE_STQ_BUG)) { asm volatile("ptesync": : :"memory"); __tlbie_lpid_va(va, lpid, mmu_get_ap(MMU_PAGE_64K), RIC_FLUSH_TLB); } } /* * We use 128 set in radix mode and 256 set in hpt mode. */ static inline void _tlbiel_pid(unsigned long pid, unsigned long ric) { int set; asm volatile("ptesync": : :"memory"); switch (ric) { case RIC_FLUSH_PWC: /* For PWC, only one flush is needed */ __tlbiel_pid(pid, 0, RIC_FLUSH_PWC); ppc_after_tlbiel_barrier(); return; case RIC_FLUSH_TLB: __tlbiel_pid(pid, 0, RIC_FLUSH_TLB); break; case RIC_FLUSH_ALL: default: /* * Flush the first set of the TLB, and if * we're doing a RIC_FLUSH_ALL, also flush * the entire Page Walk Cache. */ __tlbiel_pid(pid, 0, RIC_FLUSH_ALL); } if (!cpu_has_feature(CPU_FTR_ARCH_31)) { /* For the remaining sets, just flush the TLB */ for (set = 1; set < POWER9_TLB_SETS_RADIX ; set++) __tlbiel_pid(pid, set, RIC_FLUSH_TLB); } ppc_after_tlbiel_barrier(); asm volatile(PPC_RADIX_INVALIDATE_ERAT_USER "; isync" : : :"memory"); } static inline void _tlbie_pid(unsigned long pid, unsigned long ric) { asm volatile("ptesync": : :"memory"); /* * Workaround the fact that the "ric" argument to __tlbie_pid * must be a compile-time constraint to match the "i" constraint * in the asm statement. */ switch (ric) { case RIC_FLUSH_TLB: __tlbie_pid(pid, RIC_FLUSH_TLB); fixup_tlbie_pid(pid); break; case RIC_FLUSH_PWC: __tlbie_pid(pid, RIC_FLUSH_PWC); break; case RIC_FLUSH_ALL: default: __tlbie_pid(pid, RIC_FLUSH_ALL); fixup_tlbie_pid(pid); } asm volatile("eieio; tlbsync; ptesync": : :"memory"); } struct tlbiel_pid { unsigned long pid; unsigned long ric; }; static void do_tlbiel_pid(void *info) { struct tlbiel_pid *t = info; if (t->ric == RIC_FLUSH_TLB) _tlbiel_pid(t->pid, RIC_FLUSH_TLB); else if (t->ric == RIC_FLUSH_PWC) _tlbiel_pid(t->pid, RIC_FLUSH_PWC); else _tlbiel_pid(t->pid, RIC_FLUSH_ALL); } static inline void _tlbiel_pid_multicast(struct mm_struct *mm, unsigned long pid, unsigned long ric) { struct cpumask *cpus = mm_cpumask(mm); struct tlbiel_pid t = { .pid = pid, .ric = ric }; on_each_cpu_mask(cpus, do_tlbiel_pid, &t, 1); /* * Always want the CPU translations to be invalidated with tlbiel in * these paths, so while coprocessors must use tlbie, we can not * optimise away the tlbiel component. */ if (atomic_read(&mm->context.copros) > 0) _tlbie_pid(pid, RIC_FLUSH_ALL); } static inline void _tlbie_lpid(unsigned long lpid, unsigned long ric) { asm volatile("ptesync": : :"memory"); /* * Workaround the fact that the "ric" argument to __tlbie_pid * must be a compile-time contraint to match the "i" constraint * in the asm statement. */ switch (ric) { case RIC_FLUSH_TLB: __tlbie_lpid(lpid, RIC_FLUSH_TLB); fixup_tlbie_lpid(lpid); break; case RIC_FLUSH_PWC: __tlbie_lpid(lpid, RIC_FLUSH_PWC); break; case RIC_FLUSH_ALL: default: __tlbie_lpid(lpid, RIC_FLUSH_ALL); fixup_tlbie_lpid(lpid); } asm volatile("eieio; tlbsync; ptesync": : :"memory"); } static __always_inline void _tlbie_lpid_guest(unsigned long lpid, unsigned long ric) { /* * Workaround the fact that the "ric" argument to __tlbie_pid * must be a compile-time contraint to match the "i" constraint * in the asm statement. */ switch (ric) { case RIC_FLUSH_TLB: __tlbie_lpid_guest(lpid, RIC_FLUSH_TLB); break; case RIC_FLUSH_PWC: __tlbie_lpid_guest(lpid, RIC_FLUSH_PWC); break; case RIC_FLUSH_ALL: default: __tlbie_lpid_guest(lpid, RIC_FLUSH_ALL); } fixup_tlbie_lpid(lpid); asm volatile("eieio; tlbsync; ptesync": : :"memory"); } static inline void __tlbiel_va_range(unsigned long start, unsigned long end, unsigned long pid, unsigned long page_size, unsigned long psize) { unsigned long addr; unsigned long ap = mmu_get_ap(psize); for (addr = start; addr < end; addr += page_size) __tlbiel_va(addr, pid, ap, RIC_FLUSH_TLB); } static __always_inline void _tlbiel_va(unsigned long va, unsigned long pid, unsigned long psize, unsigned long ric) { unsigned long ap = mmu_get_ap(psize); asm volatile("ptesync": : :"memory"); __tlbiel_va(va, pid, ap, ric); ppc_after_tlbiel_barrier(); } static inline void _tlbiel_va_range(unsigned long start, unsigned long end, unsigned long pid, unsigned long page_size, unsigned long psize, bool also_pwc) { asm volatile("ptesync": : :"memory"); if (also_pwc) __tlbiel_pid(pid, 0, RIC_FLUSH_PWC); __tlbiel_va_range(start, end, pid, page_size, psize); ppc_after_tlbiel_barrier(); } static inline void __tlbie_va_range(unsigned long start, unsigned long end, unsigned long pid, unsigned long page_size, unsigned long psize) { unsigned long addr; unsigned long ap = mmu_get_ap(psize); for (addr = start; addr < end; addr += page_size) __tlbie_va(addr, pid, ap, RIC_FLUSH_TLB); fixup_tlbie_va_range(addr - page_size, pid, ap); } static __always_inline void _tlbie_va(unsigned long va, unsigned long pid, unsigned long psize, unsigned long ric) { unsigned long ap = mmu_get_ap(psize); asm volatile("ptesync": : :"memory"); __tlbie_va(va, pid, ap, ric); fixup_tlbie_va(va, pid, ap); asm volatile("eieio; tlbsync; ptesync": : :"memory"); } struct tlbiel_va { unsigned long pid; unsigned long va; unsigned long psize; unsigned long ric; }; static void do_tlbiel_va(void *info) { struct tlbiel_va *t = info; if (t->ric == RIC_FLUSH_TLB) _tlbiel_va(t->va, t->pid, t->psize, RIC_FLUSH_TLB); else if (t->ric == RIC_FLUSH_PWC) _tlbiel_va(t->va, t->pid, t->psize, RIC_FLUSH_PWC); else _tlbiel_va(t->va, t->pid, t->psize, RIC_FLUSH_ALL); } static inline void _tlbiel_va_multicast(struct mm_struct *mm, unsigned long va, unsigned long pid, unsigned long psize, unsigned long ric) { struct cpumask *cpus = mm_cpumask(mm); struct tlbiel_va t = { .va = va, .pid = pid, .psize = psize, .ric = ric }; on_each_cpu_mask(cpus, do_tlbiel_va, &t, 1); if (atomic_read(&mm->context.copros) > 0) _tlbie_va(va, pid, psize, RIC_FLUSH_TLB); } struct tlbiel_va_range { unsigned long pid; unsigned long start; unsigned long end; unsigned long page_size; unsigned long psize; bool also_pwc; }; static void do_tlbiel_va_range(void *info) { struct tlbiel_va_range *t = info; _tlbiel_va_range(t->start, t->end, t->pid, t->page_size, t->psize, t->also_pwc); } static __always_inline void _tlbie_lpid_va(unsigned long va, unsigned long lpid, unsigned long psize, unsigned long ric) { unsigned long ap = mmu_get_ap(psize); asm volatile("ptesync": : :"memory"); __tlbie_lpid_va(va, lpid, ap, ric); fixup_tlbie_lpid_va(va, lpid, ap); asm volatile("eieio; tlbsync; ptesync": : :"memory"); } static inline void _tlbie_va_range(unsigned long start, unsigned long end, unsigned long pid, unsigned long page_size, unsigned long psize, bool also_pwc) { asm volatile("ptesync": : :"memory"); if (also_pwc) __tlbie_pid(pid, RIC_FLUSH_PWC); __tlbie_va_range(start, end, pid, page_size, psize); asm volatile("eieio; tlbsync; ptesync": : :"memory"); } static inline void _tlbiel_va_range_multicast(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long pid, unsigned long page_size, unsigned long psize, bool also_pwc) { struct cpumask *cpus = mm_cpumask(mm); struct tlbiel_va_range t = { .start = start, .end = end, .pid = pid, .page_size = page_size, .psize = psize, .also_pwc = also_pwc }; on_each_cpu_mask(cpus, do_tlbiel_va_range, &t, 1); if (atomic_read(&mm->context.copros) > 0) _tlbie_va_range(start, end, pid, page_size, psize, also_pwc); } /* * Base TLB flushing operations: * * - flush_tlb_mm(mm) flushes the specified mm context TLB's * - flush_tlb_page(vma, vmaddr) flushes one page * - flush_tlb_range(vma, start, end) flushes a range of pages * - flush_tlb_kernel_range(start, end) flushes kernel pages * * - local_* variants of page and mm only apply to the current * processor */ void radix__local_flush_tlb_mm(struct mm_struct *mm) { unsigned long pid = mm->context.id; if (WARN_ON_ONCE(pid == MMU_NO_CONTEXT)) return; preempt_disable(); _tlbiel_pid(pid, RIC_FLUSH_TLB); preempt_enable(); } EXPORT_SYMBOL(radix__local_flush_tlb_mm); #ifndef CONFIG_SMP void radix__local_flush_all_mm(struct mm_struct *mm) { unsigned long pid = mm->context.id; if (WARN_ON_ONCE(pid == MMU_NO_CONTEXT)) return; preempt_disable(); _tlbiel_pid(pid, RIC_FLUSH_ALL); preempt_enable(); } EXPORT_SYMBOL(radix__local_flush_all_mm); static void __flush_all_mm(struct mm_struct *mm, bool fullmm) { radix__local_flush_all_mm(mm); } #endif /* CONFIG_SMP */ void radix__local_flush_tlb_page_psize(struct mm_struct *mm, unsigned long vmaddr, int psize) { unsigned long pid = mm->context.id; if (WARN_ON_ONCE(pid == MMU_NO_CONTEXT)) return; preempt_disable(); _tlbiel_va(vmaddr, pid, psize, RIC_FLUSH_TLB); preempt_enable(); } void radix__local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr) { #ifdef CONFIG_HUGETLB_PAGE /* need the return fix for nohash.c */ if (is_vm_hugetlb_page(vma)) return radix__local_flush_hugetlb_page(vma, vmaddr); #endif radix__local_flush_tlb_page_psize(vma->vm_mm, vmaddr, mmu_virtual_psize); } EXPORT_SYMBOL(radix__local_flush_tlb_page); static bool mm_needs_flush_escalation(struct mm_struct *mm) { /* * The P9 nest MMU has issues with the page walk cache caching PTEs * and not flushing them when RIC = 0 for a PID/LPID invalidate. * * This may have been fixed in shipping firmware (by disabling PWC * or preventing it from caching PTEs), but until that is confirmed, * this workaround is required - escalate all RIC=0 IS=1/2/3 flushes * to RIC=2. * * POWER10 (and P9P) does not have this problem. */ if (cpu_has_feature(CPU_FTR_ARCH_31)) return false; if (atomic_read(&mm->context.copros) > 0) return true; return false; } /* * If always_flush is true, then flush even if this CPU can't be removed * from mm_cpumask. */ void exit_lazy_flush_tlb(struct mm_struct *mm, bool always_flush) { unsigned long pid = mm->context.id; int cpu = smp_processor_id(); /* * A kthread could have done a mmget_not_zero() after the flushing CPU * checked mm_cpumask, and be in the process of kthread_use_mm when * interrupted here. In that case, current->mm will be set to mm, * because kthread_use_mm() setting ->mm and switching to the mm is * done with interrupts off. */ if (current->mm == mm) goto out; if (current->active_mm == mm) { unsigned long flags; WARN_ON_ONCE(current->mm != NULL); /* * It is a kernel thread and is using mm as the lazy tlb, so * switch it to init_mm. This is not always called from IPI * (e.g., flush_type_needed), so must disable irqs. */ local_irq_save(flags); mmgrab_lazy_tlb(&init_mm); current->active_mm = &init_mm; switch_mm_irqs_off(mm, &init_mm, current); mmdrop_lazy_tlb(mm); local_irq_restore(flags); } /* * This IPI may be initiated from any source including those not * running the mm, so there may be a racing IPI that comes after * this one which finds the cpumask already clear. Check and avoid * underflowing the active_cpus count in that case. The race should * not otherwise be a problem, but the TLB must be flushed because * that's what the caller expects. */ if (cpumask_test_cpu(cpu, mm_cpumask(mm))) { dec_mm_active_cpus(mm); cpumask_clear_cpu(cpu, mm_cpumask(mm)); always_flush = true; } out: if (always_flush) _tlbiel_pid(pid, RIC_FLUSH_ALL); } #ifdef CONFIG_SMP static void do_exit_flush_lazy_tlb(void *arg) { struct mm_struct *mm = arg; exit_lazy_flush_tlb(mm, true); } static void exit_flush_lazy_tlbs(struct mm_struct *mm) { /* * Would be nice if this was async so it could be run in * parallel with our local flush, but generic code does not * give a good API for it. Could extend the generic code or * make a special powerpc IPI for flushing TLBs. * For now it's not too performance critical. */ smp_call_function_many(mm_cpumask(mm), do_exit_flush_lazy_tlb, (void *)mm, 1); } #else /* CONFIG_SMP */ static inline void exit_flush_lazy_tlbs(struct mm_struct *mm) { } #endif /* CONFIG_SMP */ static DEFINE_PER_CPU(unsigned int, mm_cpumask_trim_clock); /* * Interval between flushes at which we send out IPIs to check whether the * mm_cpumask can be trimmed for the case where it's not a single-threaded * process flushing its own mm. The intent is to reduce the cost of later * flushes. Don't want this to be so low that it adds noticable cost to TLB * flushing, or so high that it doesn't help reduce global TLBIEs. */ static unsigned long tlb_mm_cpumask_trim_timer = 1073; static bool tick_and_test_trim_clock(void) { if (__this_cpu_inc_return(mm_cpumask_trim_clock) == tlb_mm_cpumask_trim_timer) { __this_cpu_write(mm_cpumask_trim_clock, 0); return true; } return false; } enum tlb_flush_type { FLUSH_TYPE_NONE, FLUSH_TYPE_LOCAL, FLUSH_TYPE_GLOBAL, }; static enum tlb_flush_type flush_type_needed(struct mm_struct *mm, bool fullmm) { int active_cpus = atomic_read(&mm->context.active_cpus); int cpu = smp_processor_id(); if (active_cpus == 0) return FLUSH_TYPE_NONE; if (active_cpus == 1 && cpumask_test_cpu(cpu, mm_cpumask(mm))) { if (current->mm != mm) { /* * Asynchronous flush sources may trim down to nothing * if the process is not running, so occasionally try * to trim. */ if (tick_and_test_trim_clock()) { exit_lazy_flush_tlb(mm, true); return FLUSH_TYPE_NONE; } } return FLUSH_TYPE_LOCAL; } /* Coprocessors require TLBIE to invalidate nMMU. */ if (atomic_read(&mm->context.copros) > 0) return FLUSH_TYPE_GLOBAL; /* * In the fullmm case there's no point doing the exit_flush_lazy_tlbs * because the mm is being taken down anyway, and a TLBIE tends to * be faster than an IPI+TLBIEL. */ if (fullmm) return FLUSH_TYPE_GLOBAL; /* * If we are running the only thread of a single-threaded process, * then we should almost always be able to trim off the rest of the * CPU mask (except in the case of use_mm() races), so always try * trimming the mask. */ if (atomic_read(&mm->mm_users) <= 1 && current->mm == mm) { exit_flush_lazy_tlbs(mm); /* * use_mm() race could prevent IPIs from being able to clear * the cpumask here, however those users are established * after our first check (and so after the PTEs are removed), * and the TLB still gets flushed by the IPI, so this CPU * will only require a local flush. */ return FLUSH_TYPE_LOCAL; } /* * Occasionally try to trim down the cpumask. It's possible this can * bring the mask to zero, which results in no flush. */ if (tick_and_test_trim_clock()) { exit_flush_lazy_tlbs(mm); if (current->mm == mm) return FLUSH_TYPE_LOCAL; if (cpumask_test_cpu(cpu, mm_cpumask(mm))) exit_lazy_flush_tlb(mm, true); return FLUSH_TYPE_NONE; } return FLUSH_TYPE_GLOBAL; } #ifdef CONFIG_SMP void radix__flush_tlb_mm(struct mm_struct *mm) { unsigned long pid; enum tlb_flush_type type; pid = mm->context.id; if (WARN_ON_ONCE(pid == MMU_NO_CONTEXT)) return; preempt_disable(); /* * Order loads of mm_cpumask (in flush_type_needed) vs previous * stores to clear ptes before the invalidate. See barrier in * switch_mm_irqs_off */ smp_mb(); type = flush_type_needed(mm, false); if (type == FLUSH_TYPE_LOCAL) { _tlbiel_pid(pid, RIC_FLUSH_TLB); } else if (type == FLUSH_TYPE_GLOBAL) { if (!mmu_has_feature(MMU_FTR_GTSE)) { unsigned long tgt = H_RPTI_TARGET_CMMU; if (atomic_read(&mm->context.copros) > 0) tgt |= H_RPTI_TARGET_NMMU; pseries_rpt_invalidate(pid, tgt, H_RPTI_TYPE_TLB, H_RPTI_PAGE_ALL, 0, -1UL); } else if (cputlb_use_tlbie()) { if (mm_needs_flush_escalation(mm)) _tlbie_pid(pid, RIC_FLUSH_ALL); else _tlbie_pid(pid, RIC_FLUSH_TLB); } else { _tlbiel_pid_multicast(mm, pid, RIC_FLUSH_TLB); } } preempt_enable(); mmu_notifier_arch_invalidate_secondary_tlbs(mm, 0, -1UL); } EXPORT_SYMBOL(radix__flush_tlb_mm); static void __flush_all_mm(struct mm_struct *mm, bool fullmm) { unsigned long pid; enum tlb_flush_type type; pid = mm->context.id; if (WARN_ON_ONCE(pid == MMU_NO_CONTEXT)) return; preempt_disable(); smp_mb(); /* see radix__flush_tlb_mm */ type = flush_type_needed(mm, fullmm); if (type == FLUSH_TYPE_LOCAL) { _tlbiel_pid(pid, RIC_FLUSH_ALL); } else if (type == FLUSH_TYPE_GLOBAL) { if (!mmu_has_feature(MMU_FTR_GTSE)) { unsigned long tgt = H_RPTI_TARGET_CMMU; unsigned long type = H_RPTI_TYPE_TLB | H_RPTI_TYPE_PWC | H_RPTI_TYPE_PRT; if (atomic_read(&mm->context.copros) > 0) tgt |= H_RPTI_TARGET_NMMU; pseries_rpt_invalidate(pid, tgt, type, H_RPTI_PAGE_ALL, 0, -1UL); } else if (cputlb_use_tlbie()) _tlbie_pid(pid, RIC_FLUSH_ALL); else _tlbiel_pid_multicast(mm, pid, RIC_FLUSH_ALL); } preempt_enable(); mmu_notifier_arch_invalidate_secondary_tlbs(mm, 0, -1UL); } void radix__flush_all_mm(struct mm_struct *mm) { __flush_all_mm(mm, false); } EXPORT_SYMBOL(radix__flush_all_mm); void radix__flush_tlb_page_psize(struct mm_struct *mm, unsigned long vmaddr, int psize) { unsigned long pid; enum tlb_flush_type type; pid = mm->context.id; if (WARN_ON_ONCE(pid == MMU_NO_CONTEXT)) return; preempt_disable(); smp_mb(); /* see radix__flush_tlb_mm */ type = flush_type_needed(mm, false); if (type == FLUSH_TYPE_LOCAL) { _tlbiel_va(vmaddr, pid, psize, RIC_FLUSH_TLB); } else if (type == FLUSH_TYPE_GLOBAL) { if (!mmu_has_feature(MMU_FTR_GTSE)) { unsigned long tgt, pg_sizes, size; tgt = H_RPTI_TARGET_CMMU; pg_sizes = psize_to_rpti_pgsize(psize); size = 1UL << mmu_psize_to_shift(psize); if (atomic_read(&mm->context.copros) > 0) tgt |= H_RPTI_TARGET_NMMU; pseries_rpt_invalidate(pid, tgt, H_RPTI_TYPE_TLB, pg_sizes, vmaddr, vmaddr + size); } else if (cputlb_use_tlbie()) _tlbie_va(vmaddr, pid, psize, RIC_FLUSH_TLB); else _tlbiel_va_multicast(mm, vmaddr, pid, psize, RIC_FLUSH_TLB); } preempt_enable(); } void radix__flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr) { #ifdef CONFIG_HUGETLB_PAGE if (is_vm_hugetlb_page(vma)) return radix__flush_hugetlb_page(vma, vmaddr); #endif radix__flush_tlb_page_psize(vma->vm_mm, vmaddr, mmu_virtual_psize); } EXPORT_SYMBOL(radix__flush_tlb_page); #endif /* CONFIG_SMP */ static void do_tlbiel_kernel(void *info) { _tlbiel_pid(0, RIC_FLUSH_ALL); } static inline void _tlbiel_kernel_broadcast(void) { on_each_cpu(do_tlbiel_kernel, NULL, 1); if (tlbie_capable) { /* * Coherent accelerators don't refcount kernel memory mappings, * so have to always issue a tlbie for them. This is quite a * slow path anyway. */ _tlbie_pid(0, RIC_FLUSH_ALL); } } /* * If kernel TLBIs ever become local rather than global, then * drivers/misc/ocxl/link.c:ocxl_link_add_pe will need some work, as it * assumes kernel TLBIs are global. */ void radix__flush_tlb_kernel_range(unsigned long start, unsigned long end) { if (!mmu_has_feature(MMU_FTR_GTSE)) { unsigned long tgt = H_RPTI_TARGET_CMMU | H_RPTI_TARGET_NMMU; unsigned long type = H_RPTI_TYPE_TLB | H_RPTI_TYPE_PWC | H_RPTI_TYPE_PRT; pseries_rpt_invalidate(0, tgt, type, H_RPTI_PAGE_ALL, start, end); } else if (cputlb_use_tlbie()) _tlbie_pid(0, RIC_FLUSH_ALL); else _tlbiel_kernel_broadcast(); } EXPORT_SYMBOL(radix__flush_tlb_kernel_range); /* * Doesn't appear to be used anywhere. Remove. */ #define TLB_FLUSH_ALL -1UL /* * Number of pages above which we invalidate the entire PID rather than * flush individual pages, for local and global flushes respectively. * * tlbie goes out to the interconnect and individual ops are more costly. * It also does not iterate over sets like the local tlbiel variant when * invalidating a full PID, so it has a far lower threshold to change from * individual page flushes to full-pid flushes. */ static u32 tlb_single_page_flush_ceiling __read_mostly = 33; static u32 tlb_local_single_page_flush_ceiling __read_mostly = POWER9_TLB_SETS_RADIX * 2; static inline void __radix__flush_tlb_range(struct mm_struct *mm, unsigned long start, unsigned long end) { unsigned long pid; unsigned int page_shift = mmu_psize_defs[mmu_virtual_psize].shift; unsigned long page_size = 1UL << page_shift; unsigned long nr_pages = (end - start) >> page_shift; bool flush_pid, flush_pwc = false; enum tlb_flush_type type; pid = mm->context.id; if (WARN_ON_ONCE(pid == MMU_NO_CONTEXT)) return; WARN_ON_ONCE(end == TLB_FLUSH_ALL); preempt_disable(); smp_mb(); /* see radix__flush_tlb_mm */ type = flush_type_needed(mm, false); if (type == FLUSH_TYPE_NONE) goto out; if (type == FLUSH_TYPE_GLOBAL) flush_pid = nr_pages > tlb_single_page_flush_ceiling; else flush_pid = nr_pages > tlb_local_single_page_flush_ceiling; /* * full pid flush already does the PWC flush. if it is not full pid * flush check the range is more than PMD and force a pwc flush * mremap() depends on this behaviour. */ if (!flush_pid && (end - start) >= PMD_SIZE) flush_pwc = true; if (!mmu_has_feature(MMU_FTR_GTSE) && type == FLUSH_TYPE_GLOBAL) { unsigned long type = H_RPTI_TYPE_TLB; unsigned long tgt = H_RPTI_TARGET_CMMU; unsigned long pg_sizes = psize_to_rpti_pgsize(mmu_virtual_psize); if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) pg_sizes |= psize_to_rpti_pgsize(MMU_PAGE_2M); if (atomic_read(&mm->context.copros) > 0) tgt |= H_RPTI_TARGET_NMMU; if (flush_pwc) type |= H_RPTI_TYPE_PWC; pseries_rpt_invalidate(pid, tgt, type, pg_sizes, start, end); } else if (flush_pid) { /* * We are now flushing a range larger than PMD size force a RIC_FLUSH_ALL */ if (type == FLUSH_TYPE_LOCAL) { _tlbiel_pid(pid, RIC_FLUSH_ALL); } else { if (cputlb_use_tlbie()) { _tlbie_pid(pid, RIC_FLUSH_ALL); } else { _tlbiel_pid_multicast(mm, pid, RIC_FLUSH_ALL); } } } else { bool hflush; unsigned long hstart, hend; hstart = (start + PMD_SIZE - 1) & PMD_MASK; hend = end & PMD_MASK; hflush = IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && hstart < hend; if (type == FLUSH_TYPE_LOCAL) { asm volatile("ptesync": : :"memory"); if (flush_pwc) /* For PWC, only one flush is needed */ __tlbiel_pid(pid, 0, RIC_FLUSH_PWC); __tlbiel_va_range(start, end, pid, page_size, mmu_virtual_psize); if (hflush) __tlbiel_va_range(hstart, hend, pid, PMD_SIZE, MMU_PAGE_2M); ppc_after_tlbiel_barrier(); } else if (cputlb_use_tlbie()) { asm volatile("ptesync": : :"memory"); if (flush_pwc) __tlbie_pid(pid, RIC_FLUSH_PWC); __tlbie_va_range(start, end, pid, page_size, mmu_virtual_psize); if (hflush) __tlbie_va_range(hstart, hend, pid, PMD_SIZE, MMU_PAGE_2M); asm volatile("eieio; tlbsync; ptesync": : :"memory"); } else { _tlbiel_va_range_multicast(mm, start, end, pid, page_size, mmu_virtual_psize, flush_pwc); if (hflush) _tlbiel_va_range_multicast(mm, hstart, hend, pid, PMD_SIZE, MMU_PAGE_2M, flush_pwc); } } out: preempt_enable(); mmu_notifier_arch_invalidate_secondary_tlbs(mm, start, end); } void radix__flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { #ifdef CONFIG_HUGETLB_PAGE if (is_vm_hugetlb_page(vma)) return radix__flush_hugetlb_tlb_range(vma, start, end); #endif __radix__flush_tlb_range(vma->vm_mm, start, end); } EXPORT_SYMBOL(radix__flush_tlb_range); static int radix_get_mmu_psize(int page_size) { int psize; if (page_size == (1UL << mmu_psize_defs[mmu_virtual_psize].shift)) psize = mmu_virtual_psize; else if (page_size == (1UL << mmu_psize_defs[MMU_PAGE_2M].shift)) psize = MMU_PAGE_2M; else if (page_size == (1UL << mmu_psize_defs[MMU_PAGE_1G].shift)) psize = MMU_PAGE_1G; else return -1; return psize; } /* * Flush partition scoped LPID address translation for all CPUs. */ void radix__flush_tlb_lpid_page(unsigned int lpid, unsigned long addr, unsigned long page_size) { int psize = radix_get_mmu_psize(page_size); _tlbie_lpid_va(addr, lpid, psize, RIC_FLUSH_TLB); } EXPORT_SYMBOL_GPL(radix__flush_tlb_lpid_page); /* * Flush partition scoped PWC from LPID for all CPUs. */ void radix__flush_pwc_lpid(unsigned int lpid) { _tlbie_lpid(lpid, RIC_FLUSH_PWC); } EXPORT_SYMBOL_GPL(radix__flush_pwc_lpid); /* * Flush partition scoped translations from LPID (=LPIDR) */ void radix__flush_all_lpid(unsigned int lpid) { _tlbie_lpid(lpid, RIC_FLUSH_ALL); } EXPORT_SYMBOL_GPL(radix__flush_all_lpid); /* * Flush process scoped translations from LPID (=LPIDR) */ void radix__flush_all_lpid_guest(unsigned int lpid) { _tlbie_lpid_guest(lpid, RIC_FLUSH_ALL); } void radix__tlb_flush(struct mmu_gather *tlb) { int psize = 0; struct mm_struct *mm = tlb->mm; int page_size = tlb->page_size; unsigned long start = tlb->start; unsigned long end = tlb->end; /* * if page size is not something we understand, do a full mm flush * * A "fullmm" flush must always do a flush_all_mm (RIC=2) flush * that flushes the process table entry cache upon process teardown. * See the comment for radix in arch_exit_mmap(). */ if (tlb->fullmm) { if (IS_ENABLED(CONFIG_MMU_LAZY_TLB_SHOOTDOWN)) { /* * Shootdown based lazy tlb mm refcounting means we * have to IPI everyone in the mm_cpumask anyway soon * when the mm goes away, so might as well do it as * part of the final flush now. * * If lazy shootdown was improved to reduce IPIs (e.g., * by batching), then it may end up being better to use * tlbies here instead. */ preempt_disable(); smp_mb(); /* see radix__flush_tlb_mm */ exit_flush_lazy_tlbs(mm); _tlbiel_pid(mm->context.id, RIC_FLUSH_ALL); /* * It should not be possible to have coprocessors still * attached here. */ if (WARN_ON_ONCE(atomic_read(&mm->context.copros) > 0)) __flush_all_mm(mm, true); preempt_enable(); } else { __flush_all_mm(mm, true); } } else if ( (psize = radix_get_mmu_psize(page_size)) == -1) { if (!tlb->freed_tables) radix__flush_tlb_mm(mm); else radix__flush_all_mm(mm); } else { if (!tlb->freed_tables) radix__flush_tlb_range_psize(mm, start, end, psize); else radix__flush_tlb_pwc_range_psize(mm, start, end, psize); } } static void __radix__flush_tlb_range_psize(struct mm_struct *mm, unsigned long start, unsigned long end, int psize, bool also_pwc) { unsigned long pid; unsigned int page_shift = mmu_psize_defs[psize].shift; unsigned long page_size = 1UL << page_shift; unsigned long nr_pages = (end - start) >> page_shift; bool flush_pid; enum tlb_flush_type type; pid = mm->context.id; if (WARN_ON_ONCE(pid == MMU_NO_CONTEXT)) return; WARN_ON_ONCE(end == TLB_FLUSH_ALL); preempt_disable(); smp_mb(); /* see radix__flush_tlb_mm */ type = flush_type_needed(mm, false); if (type == FLUSH_TYPE_NONE) goto out; if (type == FLUSH_TYPE_GLOBAL) flush_pid = nr_pages > tlb_single_page_flush_ceiling; else flush_pid = nr_pages > tlb_local_single_page_flush_ceiling; if (!mmu_has_feature(MMU_FTR_GTSE) && type == FLUSH_TYPE_GLOBAL) { unsigned long tgt = H_RPTI_TARGET_CMMU; unsigned long type = H_RPTI_TYPE_TLB; unsigned long pg_sizes = psize_to_rpti_pgsize(psize); if (also_pwc) type |= H_RPTI_TYPE_PWC; if (atomic_read(&mm->context.copros) > 0) tgt |= H_RPTI_TARGET_NMMU; pseries_rpt_invalidate(pid, tgt, type, pg_sizes, start, end); } else if (flush_pid) { if (type == FLUSH_TYPE_LOCAL) { _tlbiel_pid(pid, also_pwc ? RIC_FLUSH_ALL : RIC_FLUSH_TLB); } else { if (cputlb_use_tlbie()) { if (mm_needs_flush_escalation(mm)) also_pwc = true; _tlbie_pid(pid, also_pwc ? RIC_FLUSH_ALL : RIC_FLUSH_TLB); } else { _tlbiel_pid_multicast(mm, pid, also_pwc ? RIC_FLUSH_ALL : RIC_FLUSH_TLB); } } } else { if (type == FLUSH_TYPE_LOCAL) _tlbiel_va_range(start, end, pid, page_size, psize, also_pwc); else if (cputlb_use_tlbie()) _tlbie_va_range(start, end, pid, page_size, psize, also_pwc); else _tlbiel_va_range_multicast(mm, start, end, pid, page_size, psize, also_pwc); } out: preempt_enable(); mmu_notifier_arch_invalidate_secondary_tlbs(mm, start, end); } void radix__flush_tlb_range_psize(struct mm_struct *mm, unsigned long start, unsigned long end, int psize) { return __radix__flush_tlb_range_psize(mm, start, end, psize, false); } void radix__flush_tlb_pwc_range_psize(struct mm_struct *mm, unsigned long start, unsigned long end, int psize) { __radix__flush_tlb_range_psize(mm, start, end, psize, true); } #ifdef CONFIG_TRANSPARENT_HUGEPAGE void radix__flush_tlb_collapsed_pmd(struct mm_struct *mm, unsigned long addr) { unsigned long pid, end; enum tlb_flush_type type; pid = mm->context.id; if (WARN_ON_ONCE(pid == MMU_NO_CONTEXT)) return; /* 4k page size, just blow the world */ if (PAGE_SIZE == 0x1000) { radix__flush_all_mm(mm); return; } end = addr + HPAGE_PMD_SIZE; /* Otherwise first do the PWC, then iterate the pages. */ preempt_disable(); smp_mb(); /* see radix__flush_tlb_mm */ type = flush_type_needed(mm, false); if (type == FLUSH_TYPE_LOCAL) { _tlbiel_va_range(addr, end, pid, PAGE_SIZE, mmu_virtual_psize, true); } else if (type == FLUSH_TYPE_GLOBAL) { if (!mmu_has_feature(MMU_FTR_GTSE)) { unsigned long tgt, type, pg_sizes; tgt = H_RPTI_TARGET_CMMU; type = H_RPTI_TYPE_TLB | H_RPTI_TYPE_PWC | H_RPTI_TYPE_PRT; pg_sizes = psize_to_rpti_pgsize(mmu_virtual_psize); if (atomic_read(&mm->context.copros) > 0) tgt |= H_RPTI_TARGET_NMMU; pseries_rpt_invalidate(pid, tgt, type, pg_sizes, addr, end); } else if (cputlb_use_tlbie()) _tlbie_va_range(addr, end, pid, PAGE_SIZE, mmu_virtual_psize, true); else _tlbiel_va_range_multicast(mm, addr, end, pid, PAGE_SIZE, mmu_virtual_psize, true); } preempt_enable(); } #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ void radix__flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { radix__flush_tlb_range_psize(vma->vm_mm, start, end, MMU_PAGE_2M); } EXPORT_SYMBOL(radix__flush_pmd_tlb_range); void radix__flush_pud_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { radix__flush_tlb_range_psize(vma->vm_mm, start, end, MMU_PAGE_1G); } EXPORT_SYMBOL(radix__flush_pud_tlb_range); void radix__flush_tlb_all(void) { unsigned long rb,prs,r,rs; unsigned long ric = RIC_FLUSH_ALL; rb = 0x3 << PPC_BITLSHIFT(53); /* IS = 3 */ prs = 0; /* partition scoped */ r = 1; /* radix format */ rs = 1 & ((1UL << 32) - 1); /* any LPID value to flush guest mappings */ asm volatile("ptesync": : :"memory"); /* * now flush guest entries by passing PRS = 1 and LPID != 0 */ asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1) : : "r"(rb), "i"(r), "i"(1), "i"(ric), "r"(rs) : "memory"); /* * now flush host entires by passing PRS = 0 and LPID == 0 */ asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1) : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(0) : "memory"); asm volatile("eieio; tlbsync; ptesync": : :"memory"); } #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE static __always_inline void __tlbie_pid_lpid(unsigned long pid, unsigned long lpid, unsigned long ric) { unsigned long rb, rs, prs, r; rb = PPC_BIT(53); /* IS = 1 */ rs = (pid << PPC_BITLSHIFT(31)) | (lpid & ~(PPC_BITMASK(0, 31))); prs = 1; /* process scoped */ r = 1; /* radix format */ asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1) : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory"); trace_tlbie(0, 0, rb, rs, ric, prs, r); } static __always_inline void __tlbie_va_lpid(unsigned long va, unsigned long pid, unsigned long lpid, unsigned long ap, unsigned long ric) { unsigned long rb, rs, prs, r; rb = va & ~(PPC_BITMASK(52, 63)); rb |= ap << PPC_BITLSHIFT(58); rs = (pid << PPC_BITLSHIFT(31)) | (lpid & ~(PPC_BITMASK(0, 31))); prs = 1; /* process scoped */ r = 1; /* radix format */ asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1) : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory"); trace_tlbie(0, 0, rb, rs, ric, prs, r); } static inline void fixup_tlbie_pid_lpid(unsigned long pid, unsigned long lpid) { /* * We can use any address for the invalidation, pick one which is * probably unused as an optimisation. */ unsigned long va = ((1UL << 52) - 1); if (cpu_has_feature(CPU_FTR_P9_TLBIE_ERAT_BUG)) { asm volatile("ptesync" : : : "memory"); __tlbie_pid_lpid(0, lpid, RIC_FLUSH_TLB); } if (cpu_has_feature(CPU_FTR_P9_TLBIE_STQ_BUG)) { asm volatile("ptesync" : : : "memory"); __tlbie_va_lpid(va, pid, lpid, mmu_get_ap(MMU_PAGE_64K), RIC_FLUSH_TLB); } } static inline void _tlbie_pid_lpid(unsigned long pid, unsigned long lpid, unsigned long ric) { asm volatile("ptesync" : : : "memory"); /* * Workaround the fact that the "ric" argument to __tlbie_pid * must be a compile-time contraint to match the "i" constraint * in the asm statement. */ switch (ric) { case RIC_FLUSH_TLB: __tlbie_pid_lpid(pid, lpid, RIC_FLUSH_TLB); fixup_tlbie_pid_lpid(pid, lpid); break; case RIC_FLUSH_PWC: __tlbie_pid_lpid(pid, lpid, RIC_FLUSH_PWC); break; case RIC_FLUSH_ALL: default: __tlbie_pid_lpid(pid, lpid, RIC_FLUSH_ALL); fixup_tlbie_pid_lpid(pid, lpid); } asm volatile("eieio; tlbsync; ptesync" : : : "memory"); } static inline void fixup_tlbie_va_range_lpid(unsigned long va, unsigned long pid, unsigned long lpid, unsigned long ap) { if (cpu_has_feature(CPU_FTR_P9_TLBIE_ERAT_BUG)) { asm volatile("ptesync" : : : "memory"); __tlbie_pid_lpid(0, lpid, RIC_FLUSH_TLB); } if (cpu_has_feature(CPU_FTR_P9_TLBIE_STQ_BUG)) { asm volatile("ptesync" : : : "memory"); __tlbie_va_lpid(va, pid, lpid, ap, RIC_FLUSH_TLB); } } static inline void __tlbie_va_range_lpid(unsigned long start, unsigned long end, unsigned long pid, unsigned long lpid, unsigned long page_size, unsigned long psize) { unsigned long addr; unsigned long ap = mmu_get_ap(psize); for (addr = start; addr < end; addr += page_size) __tlbie_va_lpid(addr, pid, lpid, ap, RIC_FLUSH_TLB); fixup_tlbie_va_range_lpid(addr - page_size, pid, lpid, ap); } static inline void _tlbie_va_range_lpid(unsigned long start, unsigned long end, unsigned long pid, unsigned long lpid, unsigned long page_size, unsigned long psize, bool also_pwc) { asm volatile("ptesync" : : : "memory"); if (also_pwc) __tlbie_pid_lpid(pid, lpid, RIC_FLUSH_PWC); __tlbie_va_range_lpid(start, end, pid, lpid, page_size, psize); asm volatile("eieio; tlbsync; ptesync" : : : "memory"); } /* * Performs process-scoped invalidations for a given LPID * as part of H_RPT_INVALIDATE hcall. */ void do_h_rpt_invalidate_prt(unsigned long pid, unsigned long lpid, unsigned long type, unsigned long pg_sizes, unsigned long start, unsigned long end) { unsigned long psize, nr_pages; struct mmu_psize_def *def; bool flush_pid; /* * A H_RPTI_TYPE_ALL request implies RIC=3, hence * do a single IS=1 based flush. */ if ((type & H_RPTI_TYPE_ALL) == H_RPTI_TYPE_ALL) { _tlbie_pid_lpid(pid, lpid, RIC_FLUSH_ALL); return; } if (type & H_RPTI_TYPE_PWC) _tlbie_pid_lpid(pid, lpid, RIC_FLUSH_PWC); /* Full PID flush */ if (start == 0 && end == -1) return _tlbie_pid_lpid(pid, lpid, RIC_FLUSH_TLB); /* Do range invalidation for all the valid page sizes */ for (psize = 0; psize < MMU_PAGE_COUNT; psize++) { def = &mmu_psize_defs[psize]; if (!(pg_sizes & def->h_rpt_pgsize)) continue; nr_pages = (end - start) >> def->shift; flush_pid = nr_pages > tlb_single_page_flush_ceiling; /* * If the number of pages spanning the range is above * the ceiling, convert the request into a full PID flush. * And since PID flush takes out all the page sizes, there * is no need to consider remaining page sizes. */ if (flush_pid) { _tlbie_pid_lpid(pid, lpid, RIC_FLUSH_TLB); return; } _tlbie_va_range_lpid(start, end, pid, lpid, (1UL << def->shift), psize, false); } } EXPORT_SYMBOL_GPL(do_h_rpt_invalidate_prt); #endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */ static int __init create_tlb_single_page_flush_ceiling(void) { debugfs_create_u32("tlb_single_page_flush_ceiling", 0600, arch_debugfs_dir, &tlb_single_page_flush_ceiling); debugfs_create_u32("tlb_local_single_page_flush_ceiling", 0600, arch_debugfs_dir, &tlb_local_single_page_flush_ceiling); return 0; } late_initcall(create_tlb_single_page_flush_ceiling);
linux-master
arch/powerpc/mm/book3s64/radix_tlb.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * IOMMU helpers in MMU context. * * Copyright (C) 2015 IBM Corp. <[email protected]> */ #include <linux/sched/signal.h> #include <linux/slab.h> #include <linux/rculist.h> #include <linux/vmalloc.h> #include <linux/mutex.h> #include <linux/migrate.h> #include <linux/hugetlb.h> #include <linux/swap.h> #include <linux/sizes.h> #include <linux/mm.h> #include <asm/mmu_context.h> #include <asm/pte-walk.h> #include <linux/mm_inline.h> static DEFINE_MUTEX(mem_list_mutex); #define MM_IOMMU_TABLE_GROUP_PAGE_DIRTY 0x1 #define MM_IOMMU_TABLE_GROUP_PAGE_MASK ~(SZ_4K - 1) struct mm_iommu_table_group_mem_t { struct list_head next; struct rcu_head rcu; unsigned long used; atomic64_t mapped; unsigned int pageshift; u64 ua; /* userspace address */ u64 entries; /* number of entries in hpas/hpages[] */ /* * in mm_iommu_get we temporarily use this to store * struct page address. * * We need to convert ua to hpa in real mode. Make it * simpler by storing physical address. */ union { struct page **hpages; /* vmalloc'ed */ phys_addr_t *hpas; }; #define MM_IOMMU_TABLE_INVALID_HPA ((uint64_t)-1) u64 dev_hpa; /* Device memory base address */ }; bool mm_iommu_preregistered(struct mm_struct *mm) { return !list_empty(&mm->context.iommu_group_mem_list); } EXPORT_SYMBOL_GPL(mm_iommu_preregistered); static long mm_iommu_do_alloc(struct mm_struct *mm, unsigned long ua, unsigned long entries, unsigned long dev_hpa, struct mm_iommu_table_group_mem_t **pmem) { struct mm_iommu_table_group_mem_t *mem, *mem2; long i, ret, locked_entries = 0, pinned = 0; unsigned int pageshift; unsigned long entry, chunk; if (dev_hpa == MM_IOMMU_TABLE_INVALID_HPA) { ret = account_locked_vm(mm, entries, true); if (ret) return ret; locked_entries = entries; } mem = kzalloc(sizeof(*mem), GFP_KERNEL); if (!mem) { ret = -ENOMEM; goto unlock_exit; } if (dev_hpa != MM_IOMMU_TABLE_INVALID_HPA) { mem->pageshift = __ffs(dev_hpa | (entries << PAGE_SHIFT)); mem->dev_hpa = dev_hpa; goto good_exit; } mem->dev_hpa = MM_IOMMU_TABLE_INVALID_HPA; /* * For a starting point for a maximum page size calculation * we use @ua and @entries natural alignment to allow IOMMU pages * smaller than huge pages but still bigger than PAGE_SIZE. */ mem->pageshift = __ffs(ua | (entries << PAGE_SHIFT)); mem->hpas = vzalloc(array_size(entries, sizeof(mem->hpas[0]))); if (!mem->hpas) { kfree(mem); ret = -ENOMEM; goto unlock_exit; } mmap_read_lock(mm); chunk = (1UL << (PAGE_SHIFT + MAX_ORDER)) / sizeof(struct vm_area_struct *); chunk = min(chunk, entries); for (entry = 0; entry < entries; entry += chunk) { unsigned long n = min(entries - entry, chunk); ret = pin_user_pages(ua + (entry << PAGE_SHIFT), n, FOLL_WRITE | FOLL_LONGTERM, mem->hpages + entry); if (ret == n) { pinned += n; continue; } if (ret > 0) pinned += ret; break; } mmap_read_unlock(mm); if (pinned != entries) { if (!ret) ret = -EFAULT; goto free_exit; } good_exit: atomic64_set(&mem->mapped, 1); mem->used = 1; mem->ua = ua; mem->entries = entries; mutex_lock(&mem_list_mutex); list_for_each_entry_rcu(mem2, &mm->context.iommu_group_mem_list, next, lockdep_is_held(&mem_list_mutex)) { /* Overlap? */ if ((mem2->ua < (ua + (entries << PAGE_SHIFT))) && (ua < (mem2->ua + (mem2->entries << PAGE_SHIFT)))) { ret = -EINVAL; mutex_unlock(&mem_list_mutex); goto free_exit; } } if (mem->dev_hpa == MM_IOMMU_TABLE_INVALID_HPA) { /* * Allow to use larger than 64k IOMMU pages. Only do that * if we are backed by hugetlb. Skip device memory as it is not * backed with page structs. */ pageshift = PAGE_SHIFT; for (i = 0; i < entries; ++i) { struct page *page = mem->hpages[i]; if ((mem->pageshift > PAGE_SHIFT) && PageHuge(page)) pageshift = page_shift(compound_head(page)); mem->pageshift = min(mem->pageshift, pageshift); /* * We don't need struct page reference any more, switch * to physical address. */ mem->hpas[i] = page_to_pfn(page) << PAGE_SHIFT; } } list_add_rcu(&mem->next, &mm->context.iommu_group_mem_list); mutex_unlock(&mem_list_mutex); *pmem = mem; return 0; free_exit: /* free the references taken */ unpin_user_pages(mem->hpages, pinned); vfree(mem->hpas); kfree(mem); unlock_exit: account_locked_vm(mm, locked_entries, false); return ret; } long mm_iommu_new(struct mm_struct *mm, unsigned long ua, unsigned long entries, struct mm_iommu_table_group_mem_t **pmem) { return mm_iommu_do_alloc(mm, ua, entries, MM_IOMMU_TABLE_INVALID_HPA, pmem); } EXPORT_SYMBOL_GPL(mm_iommu_new); long mm_iommu_newdev(struct mm_struct *mm, unsigned long ua, unsigned long entries, unsigned long dev_hpa, struct mm_iommu_table_group_mem_t **pmem) { return mm_iommu_do_alloc(mm, ua, entries, dev_hpa, pmem); } EXPORT_SYMBOL_GPL(mm_iommu_newdev); static void mm_iommu_unpin(struct mm_iommu_table_group_mem_t *mem) { long i; struct page *page = NULL; if (!mem->hpas) return; for (i = 0; i < mem->entries; ++i) { if (!mem->hpas[i]) continue; page = pfn_to_page(mem->hpas[i] >> PAGE_SHIFT); if (!page) continue; if (mem->hpas[i] & MM_IOMMU_TABLE_GROUP_PAGE_DIRTY) SetPageDirty(page); unpin_user_page(page); mem->hpas[i] = 0; } } static void mm_iommu_do_free(struct mm_iommu_table_group_mem_t *mem) { mm_iommu_unpin(mem); vfree(mem->hpas); kfree(mem); } static void mm_iommu_free(struct rcu_head *head) { struct mm_iommu_table_group_mem_t *mem = container_of(head, struct mm_iommu_table_group_mem_t, rcu); mm_iommu_do_free(mem); } static void mm_iommu_release(struct mm_iommu_table_group_mem_t *mem) { list_del_rcu(&mem->next); call_rcu(&mem->rcu, mm_iommu_free); } long mm_iommu_put(struct mm_struct *mm, struct mm_iommu_table_group_mem_t *mem) { long ret = 0; unsigned long unlock_entries = 0; mutex_lock(&mem_list_mutex); if (mem->used == 0) { ret = -ENOENT; goto unlock_exit; } --mem->used; /* There are still users, exit */ if (mem->used) goto unlock_exit; /* Are there still mappings? */ if (atomic64_cmpxchg(&mem->mapped, 1, 0) != 1) { ++mem->used; ret = -EBUSY; goto unlock_exit; } if (mem->dev_hpa == MM_IOMMU_TABLE_INVALID_HPA) unlock_entries = mem->entries; /* @mapped became 0 so now mappings are disabled, release the region */ mm_iommu_release(mem); unlock_exit: mutex_unlock(&mem_list_mutex); account_locked_vm(mm, unlock_entries, false); return ret; } EXPORT_SYMBOL_GPL(mm_iommu_put); struct mm_iommu_table_group_mem_t *mm_iommu_lookup(struct mm_struct *mm, unsigned long ua, unsigned long size) { struct mm_iommu_table_group_mem_t *mem, *ret = NULL; rcu_read_lock(); list_for_each_entry_rcu(mem, &mm->context.iommu_group_mem_list, next) { if ((mem->ua <= ua) && (ua + size <= mem->ua + (mem->entries << PAGE_SHIFT))) { ret = mem; break; } } rcu_read_unlock(); return ret; } EXPORT_SYMBOL_GPL(mm_iommu_lookup); struct mm_iommu_table_group_mem_t *mm_iommu_get(struct mm_struct *mm, unsigned long ua, unsigned long entries) { struct mm_iommu_table_group_mem_t *mem, *ret = NULL; mutex_lock(&mem_list_mutex); list_for_each_entry_rcu(mem, &mm->context.iommu_group_mem_list, next, lockdep_is_held(&mem_list_mutex)) { if ((mem->ua == ua) && (mem->entries == entries)) { ret = mem; ++mem->used; break; } } mutex_unlock(&mem_list_mutex); return ret; } EXPORT_SYMBOL_GPL(mm_iommu_get); long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem, unsigned long ua, unsigned int pageshift, unsigned long *hpa) { const long entry = (ua - mem->ua) >> PAGE_SHIFT; u64 *va; if (entry >= mem->entries) return -EFAULT; if (pageshift > mem->pageshift) return -EFAULT; if (!mem->hpas) { *hpa = mem->dev_hpa + (ua - mem->ua); return 0; } va = &mem->hpas[entry]; *hpa = (*va & MM_IOMMU_TABLE_GROUP_PAGE_MASK) | (ua & ~PAGE_MASK); return 0; } EXPORT_SYMBOL_GPL(mm_iommu_ua_to_hpa); bool mm_iommu_is_devmem(struct mm_struct *mm, unsigned long hpa, unsigned int pageshift, unsigned long *size) { struct mm_iommu_table_group_mem_t *mem; unsigned long end; rcu_read_lock(); list_for_each_entry_rcu(mem, &mm->context.iommu_group_mem_list, next) { if (mem->dev_hpa == MM_IOMMU_TABLE_INVALID_HPA) continue; end = mem->dev_hpa + (mem->entries << PAGE_SHIFT); if ((mem->dev_hpa <= hpa) && (hpa < end)) { /* * Since the IOMMU page size might be bigger than * PAGE_SIZE, the amount of preregistered memory * starting from @hpa might be smaller than 1<<pageshift * and the caller needs to distinguish this situation. */ *size = min(1UL << pageshift, end - hpa); return true; } } rcu_read_unlock(); return false; } EXPORT_SYMBOL_GPL(mm_iommu_is_devmem); long mm_iommu_mapped_inc(struct mm_iommu_table_group_mem_t *mem) { if (atomic64_inc_not_zero(&mem->mapped)) return 0; /* Last mm_iommu_put() has been called, no more mappings allowed() */ return -ENXIO; } EXPORT_SYMBOL_GPL(mm_iommu_mapped_inc); void mm_iommu_mapped_dec(struct mm_iommu_table_group_mem_t *mem) { atomic64_add_unless(&mem->mapped, -1, 1); } EXPORT_SYMBOL_GPL(mm_iommu_mapped_dec); void mm_iommu_init(struct mm_struct *mm) { INIT_LIST_HEAD_RCU(&mm->context.iommu_group_mem_list); }
linux-master
arch/powerpc/mm/book3s64/iommu_api.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * PowerPC64 SLB support. * * Copyright (C) 2004 David Gibson <[email protected]>, IBM * Based on earlier code written by: * Dave Engebretsen and Mike Corrigan {engebret|mikejc}@us.ibm.com * Copyright (c) 2001 Dave Engebretsen * Copyright (C) 2002 Anton Blanchard <[email protected]>, IBM */ #include <asm/interrupt.h> #include <asm/mmu.h> #include <asm/mmu_context.h> #include <asm/paca.h> #include <asm/lppaca.h> #include <asm/ppc-opcode.h> #include <asm/cputable.h> #include <asm/cacheflush.h> #include <asm/smp.h> #include <linux/compiler.h> #include <linux/context_tracking.h> #include <linux/mm_types.h> #include <linux/pgtable.h> #include <asm/udbg.h> #include <asm/code-patching.h> #include "internal.h" static long slb_allocate_user(struct mm_struct *mm, unsigned long ea); bool stress_slb_enabled __initdata; static int __init parse_stress_slb(char *p) { stress_slb_enabled = true; return 0; } early_param("stress_slb", parse_stress_slb); __ro_after_init DEFINE_STATIC_KEY_FALSE(stress_slb_key); static void assert_slb_presence(bool present, unsigned long ea) { #ifdef CONFIG_DEBUG_VM unsigned long tmp; WARN_ON_ONCE(mfmsr() & MSR_EE); if (!cpu_has_feature(CPU_FTR_ARCH_206)) return; /* * slbfee. requires bit 24 (PPC bit 39) be clear in RB. Hardware * ignores all other bits from 0-27, so just clear them all. */ ea &= ~((1UL << SID_SHIFT) - 1); asm volatile(__PPC_SLBFEE_DOT(%0, %1) : "=r"(tmp) : "r"(ea) : "cr0"); WARN_ON(present == (tmp == 0)); #endif } static inline void slb_shadow_update(unsigned long ea, int ssize, unsigned long flags, enum slb_index index) { struct slb_shadow *p = get_slb_shadow(); /* * Clear the ESID first so the entry is not valid while we are * updating it. No write barriers are needed here, provided * we only update the current CPU's SLB shadow buffer. */ WRITE_ONCE(p->save_area[index].esid, 0); WRITE_ONCE(p->save_area[index].vsid, cpu_to_be64(mk_vsid_data(ea, ssize, flags))); WRITE_ONCE(p->save_area[index].esid, cpu_to_be64(mk_esid_data(ea, ssize, index))); } static inline void slb_shadow_clear(enum slb_index index) { WRITE_ONCE(get_slb_shadow()->save_area[index].esid, cpu_to_be64(index)); } static inline void create_shadowed_slbe(unsigned long ea, int ssize, unsigned long flags, enum slb_index index) { /* * Updating the shadow buffer before writing the SLB ensures * we don't get a stale entry here if we get preempted by PHYP * between these two statements. */ slb_shadow_update(ea, ssize, flags, index); assert_slb_presence(false, ea); asm volatile("slbmte %0,%1" : : "r" (mk_vsid_data(ea, ssize, flags)), "r" (mk_esid_data(ea, ssize, index)) : "memory" ); } /* * Insert bolted entries into SLB (which may not be empty, so don't clear * slb_cache_ptr). */ void __slb_restore_bolted_realmode(void) { struct slb_shadow *p = get_slb_shadow(); enum slb_index index; /* No isync needed because realmode. */ for (index = 0; index < SLB_NUM_BOLTED; index++) { asm volatile("slbmte %0,%1" : : "r" (be64_to_cpu(p->save_area[index].vsid)), "r" (be64_to_cpu(p->save_area[index].esid))); } assert_slb_presence(true, local_paca->kstack); } /* * Insert the bolted entries into an empty SLB. */ void slb_restore_bolted_realmode(void) { __slb_restore_bolted_realmode(); get_paca()->slb_cache_ptr = 0; get_paca()->slb_kern_bitmap = (1U << SLB_NUM_BOLTED) - 1; get_paca()->slb_used_bitmap = get_paca()->slb_kern_bitmap; } /* * This flushes all SLB entries including 0, so it must be realmode. */ void slb_flush_all_realmode(void) { asm volatile("slbmte %0,%0; slbia" : : "r" (0)); } static __always_inline void __slb_flush_and_restore_bolted(bool preserve_kernel_lookaside) { struct slb_shadow *p = get_slb_shadow(); unsigned long ksp_esid_data, ksp_vsid_data; u32 ih; /* * SLBIA IH=1 on ISA v2.05 and newer processors may preserve lookaside * information created with Class=0 entries, which we use for kernel * SLB entries (the SLB entries themselves are still invalidated). * * Older processors will ignore this optimisation. Over-invalidation * is fine because we never rely on lookaside information existing. */ if (preserve_kernel_lookaside) ih = 1; else ih = 0; ksp_esid_data = be64_to_cpu(p->save_area[KSTACK_INDEX].esid); ksp_vsid_data = be64_to_cpu(p->save_area[KSTACK_INDEX].vsid); asm volatile(PPC_SLBIA(%0)" \n" "slbmte %1, %2 \n" :: "i" (ih), "r" (ksp_vsid_data), "r" (ksp_esid_data) : "memory"); } /* * This flushes non-bolted entries, it can be run in virtual mode. Must * be called with interrupts disabled. */ void slb_flush_and_restore_bolted(void) { BUILD_BUG_ON(SLB_NUM_BOLTED != 2); WARN_ON(!irqs_disabled()); /* * We can't take a PMU exception in the following code, so hard * disable interrupts. */ hard_irq_disable(); isync(); __slb_flush_and_restore_bolted(false); isync(); assert_slb_presence(true, get_paca()->kstack); get_paca()->slb_cache_ptr = 0; get_paca()->slb_kern_bitmap = (1U << SLB_NUM_BOLTED) - 1; get_paca()->slb_used_bitmap = get_paca()->slb_kern_bitmap; } void slb_save_contents(struct slb_entry *slb_ptr) { int i; unsigned long e, v; /* Save slb_cache_ptr value. */ get_paca()->slb_save_cache_ptr = get_paca()->slb_cache_ptr; if (!slb_ptr) return; for (i = 0; i < mmu_slb_size; i++) { asm volatile("slbmfee %0,%1" : "=r" (e) : "r" (i)); asm volatile("slbmfev %0,%1" : "=r" (v) : "r" (i)); slb_ptr->esid = e; slb_ptr->vsid = v; slb_ptr++; } } void slb_dump_contents(struct slb_entry *slb_ptr) { int i, n; unsigned long e, v; unsigned long llp; if (!slb_ptr) return; pr_err("SLB contents of cpu 0x%x\n", smp_processor_id()); for (i = 0; i < mmu_slb_size; i++) { e = slb_ptr->esid; v = slb_ptr->vsid; slb_ptr++; if (!e && !v) continue; pr_err("%02d %016lx %016lx %s\n", i, e, v, (e & SLB_ESID_V) ? "VALID" : "NOT VALID"); if (!(e & SLB_ESID_V)) continue; llp = v & SLB_VSID_LLP; if (v & SLB_VSID_B_1T) { pr_err(" 1T ESID=%9lx VSID=%13lx LLP:%3lx\n", GET_ESID_1T(e), (v & ~SLB_VSID_B) >> SLB_VSID_SHIFT_1T, llp); } else { pr_err(" 256M ESID=%9lx VSID=%13lx LLP:%3lx\n", GET_ESID(e), (v & ~SLB_VSID_B) >> SLB_VSID_SHIFT, llp); } } if (!early_cpu_has_feature(CPU_FTR_ARCH_300)) { /* RR is not so useful as it's often not used for allocation */ pr_err("SLB RR allocator index %d\n", get_paca()->stab_rr); /* Dump slb cache entires as well. */ pr_err("SLB cache ptr value = %d\n", get_paca()->slb_save_cache_ptr); pr_err("Valid SLB cache entries:\n"); n = min_t(int, get_paca()->slb_save_cache_ptr, SLB_CACHE_ENTRIES); for (i = 0; i < n; i++) pr_err("%02d EA[0-35]=%9x\n", i, get_paca()->slb_cache[i]); pr_err("Rest of SLB cache entries:\n"); for (i = n; i < SLB_CACHE_ENTRIES; i++) pr_err("%02d EA[0-35]=%9x\n", i, get_paca()->slb_cache[i]); } } void slb_vmalloc_update(void) { /* * vmalloc is not bolted, so just have to flush non-bolted. */ slb_flush_and_restore_bolted(); } static bool preload_hit(struct thread_info *ti, unsigned long esid) { unsigned char i; for (i = 0; i < ti->slb_preload_nr; i++) { unsigned char idx; idx = (ti->slb_preload_tail + i) % SLB_PRELOAD_NR; if (esid == ti->slb_preload_esid[idx]) return true; } return false; } static bool preload_add(struct thread_info *ti, unsigned long ea) { unsigned char idx; unsigned long esid; if (mmu_has_feature(MMU_FTR_1T_SEGMENT)) { /* EAs are stored >> 28 so 256MB segments don't need clearing */ if (ea & ESID_MASK_1T) ea &= ESID_MASK_1T; } esid = ea >> SID_SHIFT; if (preload_hit(ti, esid)) return false; idx = (ti->slb_preload_tail + ti->slb_preload_nr) % SLB_PRELOAD_NR; ti->slb_preload_esid[idx] = esid; if (ti->slb_preload_nr == SLB_PRELOAD_NR) ti->slb_preload_tail = (ti->slb_preload_tail + 1) % SLB_PRELOAD_NR; else ti->slb_preload_nr++; return true; } static void preload_age(struct thread_info *ti) { if (!ti->slb_preload_nr) return; ti->slb_preload_nr--; ti->slb_preload_tail = (ti->slb_preload_tail + 1) % SLB_PRELOAD_NR; } void slb_setup_new_exec(void) { struct thread_info *ti = current_thread_info(); struct mm_struct *mm = current->mm; unsigned long exec = 0x10000000; WARN_ON(irqs_disabled()); /* * preload cache can only be used to determine whether a SLB * entry exists if it does not start to overflow. */ if (ti->slb_preload_nr + 2 > SLB_PRELOAD_NR) return; hard_irq_disable(); /* * We have no good place to clear the slb preload cache on exec, * flush_thread is about the earliest arch hook but that happens * after we switch to the mm and have already preloaded the SLBEs. * * For the most part that's probably okay to use entries from the * previous exec, they will age out if unused. It may turn out to * be an advantage to clear the cache before switching to it, * however. */ /* * preload some userspace segments into the SLB. * Almost all 32 and 64bit PowerPC executables are linked at * 0x10000000 so it makes sense to preload this segment. */ if (!is_kernel_addr(exec)) { if (preload_add(ti, exec)) slb_allocate_user(mm, exec); } /* Libraries and mmaps. */ if (!is_kernel_addr(mm->mmap_base)) { if (preload_add(ti, mm->mmap_base)) slb_allocate_user(mm, mm->mmap_base); } /* see switch_slb */ asm volatile("isync" : : : "memory"); local_irq_enable(); } void preload_new_slb_context(unsigned long start, unsigned long sp) { struct thread_info *ti = current_thread_info(); struct mm_struct *mm = current->mm; unsigned long heap = mm->start_brk; WARN_ON(irqs_disabled()); /* see above */ if (ti->slb_preload_nr + 3 > SLB_PRELOAD_NR) return; hard_irq_disable(); /* Userspace entry address. */ if (!is_kernel_addr(start)) { if (preload_add(ti, start)) slb_allocate_user(mm, start); } /* Top of stack, grows down. */ if (!is_kernel_addr(sp)) { if (preload_add(ti, sp)) slb_allocate_user(mm, sp); } /* Bottom of heap, grows up. */ if (heap && !is_kernel_addr(heap)) { if (preload_add(ti, heap)) slb_allocate_user(mm, heap); } /* see switch_slb */ asm volatile("isync" : : : "memory"); local_irq_enable(); } static void slb_cache_slbie_kernel(unsigned int index) { unsigned long slbie_data = get_paca()->slb_cache[index]; unsigned long ksp = get_paca()->kstack; slbie_data <<= SID_SHIFT; slbie_data |= 0xc000000000000000ULL; if ((ksp & slb_esid_mask(mmu_kernel_ssize)) == slbie_data) return; slbie_data |= mmu_kernel_ssize << SLBIE_SSIZE_SHIFT; asm volatile("slbie %0" : : "r" (slbie_data)); } static void slb_cache_slbie_user(unsigned int index) { unsigned long slbie_data = get_paca()->slb_cache[index]; slbie_data <<= SID_SHIFT; slbie_data |= user_segment_size(slbie_data) << SLBIE_SSIZE_SHIFT; slbie_data |= SLBIE_C; /* user slbs have C=1 */ asm volatile("slbie %0" : : "r" (slbie_data)); } /* Flush all user entries from the segment table of the current processor. */ void switch_slb(struct task_struct *tsk, struct mm_struct *mm) { struct thread_info *ti = task_thread_info(tsk); unsigned char i; /* * We need interrupts hard-disabled here, not just soft-disabled, * so that a PMU interrupt can't occur, which might try to access * user memory (to get a stack trace) and possible cause an SLB miss * which would update the slb_cache/slb_cache_ptr fields in the PACA. */ hard_irq_disable(); isync(); if (stress_slb()) { __slb_flush_and_restore_bolted(false); isync(); get_paca()->slb_cache_ptr = 0; get_paca()->slb_kern_bitmap = (1U << SLB_NUM_BOLTED) - 1; } else if (cpu_has_feature(CPU_FTR_ARCH_300)) { /* * SLBIA IH=3 invalidates all Class=1 SLBEs and their * associated lookaside structures, which matches what * switch_slb wants. So ARCH_300 does not use the slb * cache. */ asm volatile(PPC_SLBIA(3)); } else { unsigned long offset = get_paca()->slb_cache_ptr; if (!mmu_has_feature(MMU_FTR_NO_SLBIE_B) && offset <= SLB_CACHE_ENTRIES) { /* * Could assert_slb_presence(true) here, but * hypervisor or machine check could have come * in and removed the entry at this point. */ for (i = 0; i < offset; i++) slb_cache_slbie_user(i); /* Workaround POWER5 < DD2.1 issue */ if (!cpu_has_feature(CPU_FTR_ARCH_207S) && offset == 1) slb_cache_slbie_user(0); } else { /* Flush but retain kernel lookaside information */ __slb_flush_and_restore_bolted(true); isync(); get_paca()->slb_kern_bitmap = (1U << SLB_NUM_BOLTED) - 1; } get_paca()->slb_cache_ptr = 0; } get_paca()->slb_used_bitmap = get_paca()->slb_kern_bitmap; copy_mm_to_paca(mm); /* * We gradually age out SLBs after a number of context switches to * reduce reload overhead of unused entries (like we do with FP/VEC * reload). Each time we wrap 256 switches, take an entry out of the * SLB preload cache. */ tsk->thread.load_slb++; if (!tsk->thread.load_slb) { unsigned long pc = KSTK_EIP(tsk); preload_age(ti); preload_add(ti, pc); } for (i = 0; i < ti->slb_preload_nr; i++) { unsigned char idx; unsigned long ea; idx = (ti->slb_preload_tail + i) % SLB_PRELOAD_NR; ea = (unsigned long)ti->slb_preload_esid[idx] << SID_SHIFT; slb_allocate_user(mm, ea); } /* * Synchronize slbmte preloads with possible subsequent user memory * address accesses by the kernel (user mode won't happen until * rfid, which is safe). */ isync(); } void slb_set_size(u16 size) { mmu_slb_size = size; } void slb_initialize(void) { unsigned long linear_llp, vmalloc_llp, io_llp; unsigned long lflags; static int slb_encoding_inited; #ifdef CONFIG_SPARSEMEM_VMEMMAP unsigned long vmemmap_llp; #endif /* Prepare our SLB miss handler based on our page size */ linear_llp = mmu_psize_defs[mmu_linear_psize].sllp; io_llp = mmu_psize_defs[mmu_io_psize].sllp; vmalloc_llp = mmu_psize_defs[mmu_vmalloc_psize].sllp; get_paca()->vmalloc_sllp = SLB_VSID_KERNEL | vmalloc_llp; #ifdef CONFIG_SPARSEMEM_VMEMMAP vmemmap_llp = mmu_psize_defs[mmu_vmemmap_psize].sllp; #endif if (!slb_encoding_inited) { slb_encoding_inited = 1; pr_devel("SLB: linear LLP = %04lx\n", linear_llp); pr_devel("SLB: io LLP = %04lx\n", io_llp); #ifdef CONFIG_SPARSEMEM_VMEMMAP pr_devel("SLB: vmemmap LLP = %04lx\n", vmemmap_llp); #endif } get_paca()->stab_rr = SLB_NUM_BOLTED - 1; get_paca()->slb_kern_bitmap = (1U << SLB_NUM_BOLTED) - 1; get_paca()->slb_used_bitmap = get_paca()->slb_kern_bitmap; lflags = SLB_VSID_KERNEL | linear_llp; /* Invalidate the entire SLB (even entry 0) & all the ERATS */ asm volatile("isync":::"memory"); asm volatile("slbmte %0,%0"::"r" (0) : "memory"); asm volatile("isync; slbia; isync":::"memory"); create_shadowed_slbe(PAGE_OFFSET, mmu_kernel_ssize, lflags, LINEAR_INDEX); /* * For the boot cpu, we're running on the stack in init_thread_union, * which is in the first segment of the linear mapping, and also * get_paca()->kstack hasn't been initialized yet. * For secondary cpus, we need to bolt the kernel stack entry now. */ slb_shadow_clear(KSTACK_INDEX); if (raw_smp_processor_id() != boot_cpuid && (get_paca()->kstack & slb_esid_mask(mmu_kernel_ssize)) > PAGE_OFFSET) create_shadowed_slbe(get_paca()->kstack, mmu_kernel_ssize, lflags, KSTACK_INDEX); asm volatile("isync":::"memory"); } static void slb_cache_update(unsigned long esid_data) { int slb_cache_index; if (cpu_has_feature(CPU_FTR_ARCH_300)) return; /* ISAv3.0B and later does not use slb_cache */ if (stress_slb()) return; /* * Now update slb cache entries */ slb_cache_index = local_paca->slb_cache_ptr; if (slb_cache_index < SLB_CACHE_ENTRIES) { /* * We have space in slb cache for optimized switch_slb(). * Top 36 bits from esid_data as per ISA */ local_paca->slb_cache[slb_cache_index++] = esid_data >> SID_SHIFT; local_paca->slb_cache_ptr++; } else { /* * Our cache is full and the current cache content strictly * doesn't indicate the active SLB contents. Bump the ptr * so that switch_slb() will ignore the cache. */ local_paca->slb_cache_ptr = SLB_CACHE_ENTRIES + 1; } } static enum slb_index alloc_slb_index(bool kernel) { enum slb_index index; /* * The allocation bitmaps can become out of synch with the SLB * when the _switch code does slbie when bolting a new stack * segment and it must not be anywhere else in the SLB. This leaves * a kernel allocated entry that is unused in the SLB. With very * large systems or small segment sizes, the bitmaps could slowly * fill with these entries. They will eventually be cleared out * by the round robin allocator in that case, so it's probably not * worth accounting for. */ /* * SLBs beyond 32 entries are allocated with stab_rr only * POWER7/8/9 have 32 SLB entries, this could be expanded if a * future CPU has more. */ if (local_paca->slb_used_bitmap != U32_MAX) { index = ffz(local_paca->slb_used_bitmap); local_paca->slb_used_bitmap |= 1U << index; if (kernel) local_paca->slb_kern_bitmap |= 1U << index; } else { /* round-robin replacement of slb starting at SLB_NUM_BOLTED. */ index = local_paca->stab_rr; if (index < (mmu_slb_size - 1)) index++; else index = SLB_NUM_BOLTED; local_paca->stab_rr = index; if (index < 32) { if (kernel) local_paca->slb_kern_bitmap |= 1U << index; else local_paca->slb_kern_bitmap &= ~(1U << index); } } BUG_ON(index < SLB_NUM_BOLTED); return index; } static long slb_insert_entry(unsigned long ea, unsigned long context, unsigned long flags, int ssize, bool kernel) { unsigned long vsid; unsigned long vsid_data, esid_data; enum slb_index index; vsid = get_vsid(context, ea, ssize); if (!vsid) return -EFAULT; /* * There must not be a kernel SLB fault in alloc_slb_index or before * slbmte here or the allocation bitmaps could get out of whack with * the SLB. * * User SLB faults or preloads take this path which might get inlined * into the caller, so add compiler barriers here to ensure unsafe * memory accesses do not come between. */ barrier(); index = alloc_slb_index(kernel); vsid_data = __mk_vsid_data(vsid, ssize, flags); esid_data = mk_esid_data(ea, ssize, index); /* * No need for an isync before or after this slbmte. The exception * we enter with and the rfid we exit with are context synchronizing. * User preloads should add isync afterwards in case the kernel * accesses user memory before it returns to userspace with rfid. */ assert_slb_presence(false, ea); if (stress_slb()) { int slb_cache_index = local_paca->slb_cache_ptr; /* * stress_slb() does not use slb cache, repurpose as a * cache of inserted (non-bolted) kernel SLB entries. All * non-bolted kernel entries are flushed on any user fault, * or if there are already 3 non-boled kernel entries. */ BUILD_BUG_ON(SLB_CACHE_ENTRIES < 3); if (!kernel || slb_cache_index == 3) { int i; for (i = 0; i < slb_cache_index; i++) slb_cache_slbie_kernel(i); slb_cache_index = 0; } if (kernel) local_paca->slb_cache[slb_cache_index++] = esid_data >> SID_SHIFT; local_paca->slb_cache_ptr = slb_cache_index; } asm volatile("slbmte %0, %1" : : "r" (vsid_data), "r" (esid_data)); barrier(); if (!kernel) slb_cache_update(esid_data); return 0; } static long slb_allocate_kernel(unsigned long ea, unsigned long id) { unsigned long context; unsigned long flags; int ssize; if (id == LINEAR_MAP_REGION_ID) { /* We only support upto H_MAX_PHYSMEM_BITS */ if ((ea & EA_MASK) > (1UL << H_MAX_PHYSMEM_BITS)) return -EFAULT; flags = SLB_VSID_KERNEL | mmu_psize_defs[mmu_linear_psize].sllp; #ifdef CONFIG_SPARSEMEM_VMEMMAP } else if (id == VMEMMAP_REGION_ID) { if (ea >= H_VMEMMAP_END) return -EFAULT; flags = SLB_VSID_KERNEL | mmu_psize_defs[mmu_vmemmap_psize].sllp; #endif } else if (id == VMALLOC_REGION_ID) { if (ea >= H_VMALLOC_END) return -EFAULT; flags = local_paca->vmalloc_sllp; } else if (id == IO_REGION_ID) { if (ea >= H_KERN_IO_END) return -EFAULT; flags = SLB_VSID_KERNEL | mmu_psize_defs[mmu_io_psize].sllp; } else { return -EFAULT; } ssize = MMU_SEGSIZE_1T; if (!mmu_has_feature(MMU_FTR_1T_SEGMENT)) ssize = MMU_SEGSIZE_256M; context = get_kernel_context(ea); return slb_insert_entry(ea, context, flags, ssize, true); } static long slb_allocate_user(struct mm_struct *mm, unsigned long ea) { unsigned long context; unsigned long flags; int bpsize; int ssize; /* * consider this as bad access if we take a SLB miss * on an address above addr limit. */ if (ea >= mm_ctx_slb_addr_limit(&mm->context)) return -EFAULT; context = get_user_context(&mm->context, ea); if (!context) return -EFAULT; if (unlikely(ea >= H_PGTABLE_RANGE)) { WARN_ON(1); return -EFAULT; } ssize = user_segment_size(ea); bpsize = get_slice_psize(mm, ea); flags = SLB_VSID_USER | mmu_psize_defs[bpsize].sllp; return slb_insert_entry(ea, context, flags, ssize, false); } DEFINE_INTERRUPT_HANDLER_RAW(do_slb_fault) { unsigned long ea = regs->dar; unsigned long id = get_region_id(ea); /* IRQs are not reconciled here, so can't check irqs_disabled */ VM_WARN_ON(mfmsr() & MSR_EE); if (regs_is_unrecoverable(regs)) return -EINVAL; /* * SLB kernel faults must be very careful not to touch anything that is * not bolted. E.g., PACA and global variables are okay, mm->context * stuff is not. SLB user faults may access all of memory (and induce * one recursive SLB kernel fault), so the kernel fault must not * trample on the user fault state at those points. */ /* * This is a raw interrupt handler, for performance, so that * fast_interrupt_return can be used. The handler must not touch local * irq state, or schedule. We could test for usermode and upgrade to a * normal process context (synchronous) interrupt for those, which * would make them first-class kernel code and able to be traced and * instrumented, although performance would suffer a bit, it would * probably be a good tradeoff. */ if (id >= LINEAR_MAP_REGION_ID) { long err; #ifdef CONFIG_DEBUG_VM /* Catch recursive kernel SLB faults. */ BUG_ON(local_paca->in_kernel_slb_handler); local_paca->in_kernel_slb_handler = 1; #endif err = slb_allocate_kernel(ea, id); #ifdef CONFIG_DEBUG_VM local_paca->in_kernel_slb_handler = 0; #endif return err; } else { struct mm_struct *mm = current->mm; long err; if (unlikely(!mm)) return -EFAULT; err = slb_allocate_user(mm, ea); if (!err) preload_add(current_thread_info(), ea); return err; } }
linux-master
arch/powerpc/mm/book3s64/slb.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright 2005, Paul Mackerras, IBM Corporation. * Copyright 2009, Benjamin Herrenschmidt, IBM Corporation. * Copyright 2015-2016, Aneesh Kumar K.V, IBM Corporation. */ #include <linux/sched.h> #include <linux/mm_types.h> #include <linux/mm.h> #include <linux/stop_machine.h> #include <asm/sections.h> #include <asm/mmu.h> #include <asm/tlb.h> #include <asm/firmware.h> #include <mm/mmu_decl.h> #include <trace/events/thp.h> #if H_PGTABLE_RANGE > (USER_VSID_RANGE * (TASK_SIZE_USER64 / TASK_CONTEXT_SIZE)) #warning Limited user VSID range means pagetable space is wasted #endif #ifdef CONFIG_SPARSEMEM_VMEMMAP /* * vmemmap is the starting address of the virtual address space where * struct pages are allocated for all possible PFNs present on the system * including holes and bad memory (hence sparse). These virtual struct * pages are stored in sequence in this virtual address space irrespective * of the fact whether the corresponding PFN is valid or not. This achieves * constant relationship between address of struct page and its PFN. * * During boot or memory hotplug operation when a new memory section is * added, physical memory allocation (including hash table bolting) will * be performed for the set of struct pages which are part of the memory * section. This saves memory by not allocating struct pages for PFNs * which are not valid. * * ---------------------------------------------- * | PHYSICAL ALLOCATION OF VIRTUAL STRUCT PAGES| * ---------------------------------------------- * * f000000000000000 c000000000000000 * vmemmap +--------------+ +--------------+ * + | page struct | +--------------> | page struct | * | +--------------+ +--------------+ * | | page struct | +--------------> | page struct | * | +--------------+ | +--------------+ * | | page struct | + +------> | page struct | * | +--------------+ | +--------------+ * | | page struct | | +--> | page struct | * | +--------------+ | | +--------------+ * | | page struct | | | * | +--------------+ | | * | | page struct | | | * | +--------------+ | | * | | page struct | | | * | +--------------+ | | * | | page struct | | | * | +--------------+ | | * | | page struct | +-------+ | * | +--------------+ | * | | page struct | +-----------+ * | +--------------+ * | | page struct | No mapping * | +--------------+ * | | page struct | No mapping * v +--------------+ * * ----------------------------------------- * | RELATION BETWEEN STRUCT PAGES AND PFNS| * ----------------------------------------- * * vmemmap +--------------+ +---------------+ * + | page struct | +-------------> | PFN | * | +--------------+ +---------------+ * | | page struct | +-------------> | PFN | * | +--------------+ +---------------+ * | | page struct | +-------------> | PFN | * | +--------------+ +---------------+ * | | page struct | +-------------> | PFN | * | +--------------+ +---------------+ * | | | * | +--------------+ * | | | * | +--------------+ * | | | * | +--------------+ +---------------+ * | | page struct | +-------------> | PFN | * | +--------------+ +---------------+ * | | | * | +--------------+ * | | | * | +--------------+ +---------------+ * | | page struct | +-------------> | PFN | * | +--------------+ +---------------+ * | | page struct | +-------------> | PFN | * v +--------------+ +---------------+ */ /* * On hash-based CPUs, the vmemmap is bolted in the hash table. * */ int __meminit hash__vmemmap_create_mapping(unsigned long start, unsigned long page_size, unsigned long phys) { int rc; if ((start + page_size) >= H_VMEMMAP_END) { pr_warn("Outside the supported range\n"); return -1; } rc = htab_bolt_mapping(start, start + page_size, phys, pgprot_val(PAGE_KERNEL), mmu_vmemmap_psize, mmu_kernel_ssize); if (rc < 0) { int rc2 = htab_remove_mapping(start, start + page_size, mmu_vmemmap_psize, mmu_kernel_ssize); BUG_ON(rc2 && (rc2 != -ENOENT)); } return rc; } #ifdef CONFIG_MEMORY_HOTPLUG void hash__vmemmap_remove_mapping(unsigned long start, unsigned long page_size) { int rc = htab_remove_mapping(start, start + page_size, mmu_vmemmap_psize, mmu_kernel_ssize); BUG_ON((rc < 0) && (rc != -ENOENT)); WARN_ON(rc == -ENOENT); } #endif #endif /* CONFIG_SPARSEMEM_VMEMMAP */ /* * map_kernel_page currently only called by __ioremap * map_kernel_page adds an entry to the ioremap page table * and adds an entry to the HPT, possibly bolting it */ int hash__map_kernel_page(unsigned long ea, unsigned long pa, pgprot_t prot) { pgd_t *pgdp; p4d_t *p4dp; pud_t *pudp; pmd_t *pmdp; pte_t *ptep; BUILD_BUG_ON(TASK_SIZE_USER64 > H_PGTABLE_RANGE); if (slab_is_available()) { pgdp = pgd_offset_k(ea); p4dp = p4d_offset(pgdp, ea); pudp = pud_alloc(&init_mm, p4dp, ea); if (!pudp) return -ENOMEM; pmdp = pmd_alloc(&init_mm, pudp, ea); if (!pmdp) return -ENOMEM; ptep = pte_alloc_kernel(pmdp, ea); if (!ptep) return -ENOMEM; set_pte_at(&init_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT, prot)); } else { /* * If the mm subsystem is not fully up, we cannot create a * linux page table entry for this mapping. Simply bolt an * entry in the hardware page table. * */ if (htab_bolt_mapping(ea, ea + PAGE_SIZE, pa, pgprot_val(prot), mmu_io_psize, mmu_kernel_ssize)) { printk(KERN_ERR "Failed to do bolted mapping IO " "memory at %016lx !\n", pa); return -ENOMEM; } } smp_wmb(); return 0; } #ifdef CONFIG_TRANSPARENT_HUGEPAGE unsigned long hash__pmd_hugepage_update(struct mm_struct *mm, unsigned long addr, pmd_t *pmdp, unsigned long clr, unsigned long set) { __be64 old_be, tmp; unsigned long old; #ifdef CONFIG_DEBUG_VM WARN_ON(!hash__pmd_trans_huge(*pmdp) && !pmd_devmap(*pmdp)); assert_spin_locked(pmd_lockptr(mm, pmdp)); #endif __asm__ __volatile__( "1: ldarx %0,0,%3\n\ and. %1,%0,%6\n\ bne- 1b \n\ andc %1,%0,%4 \n\ or %1,%1,%7\n\ stdcx. %1,0,%3 \n\ bne- 1b" : "=&r" (old_be), "=&r" (tmp), "=m" (*pmdp) : "r" (pmdp), "r" (cpu_to_be64(clr)), "m" (*pmdp), "r" (cpu_to_be64(H_PAGE_BUSY)), "r" (cpu_to_be64(set)) : "cc" ); old = be64_to_cpu(old_be); trace_hugepage_update_pmd(addr, old, clr, set); if (old & H_PAGE_HASHPTE) hpte_do_hugepage_flush(mm, addr, pmdp, old); return old; } pmd_t hash__pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long address, pmd_t *pmdp) { pmd_t pmd; VM_BUG_ON(address & ~HPAGE_PMD_MASK); VM_BUG_ON(pmd_trans_huge(*pmdp)); VM_BUG_ON(pmd_devmap(*pmdp)); pmd = *pmdp; pmd_clear(pmdp); /* * Wait for all pending hash_page to finish. This is needed * in case of subpage collapse. When we collapse normal pages * to hugepage, we first clear the pmd, then invalidate all * the PTE entries. The assumption here is that any low level * page fault will see a none pmd and take the slow path that * will wait on mmap_lock. But we could very well be in a * hash_page with local ptep pointer value. Such a hash page * can result in adding new HPTE entries for normal subpages. * That means we could be modifying the page content as we * copy them to a huge page. So wait for parallel hash_page * to finish before invalidating HPTE entries. We can do this * by sending an IPI to all the cpus and executing a dummy * function there. */ serialize_against_pte_lookup(vma->vm_mm); /* * Now invalidate the hpte entries in the range * covered by pmd. This make sure we take a * fault and will find the pmd as none, which will * result in a major fault which takes mmap_lock and * hence wait for collapse to complete. Without this * the __collapse_huge_page_copy can result in copying * the old content. */ flush_hash_table_pmd_range(vma->vm_mm, &pmd, address); return pmd; } /* * We want to put the pgtable in pmd and use pgtable for tracking * the base page size hptes */ void hash__pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp, pgtable_t pgtable) { pgtable_t *pgtable_slot; assert_spin_locked(pmd_lockptr(mm, pmdp)); /* * we store the pgtable in the second half of PMD */ pgtable_slot = (pgtable_t *)pmdp + PTRS_PER_PMD; *pgtable_slot = pgtable; /* * expose the deposited pgtable to other cpus. * before we set the hugepage PTE at pmd level * hash fault code looks at the deposted pgtable * to store hash index values. */ smp_wmb(); } pgtable_t hash__pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp) { pgtable_t pgtable; pgtable_t *pgtable_slot; assert_spin_locked(pmd_lockptr(mm, pmdp)); pgtable_slot = (pgtable_t *)pmdp + PTRS_PER_PMD; pgtable = *pgtable_slot; /* * Once we withdraw, mark the entry NULL. */ *pgtable_slot = NULL; /* * We store HPTE information in the deposited PTE fragment. * zero out the content on withdraw. */ memset(pgtable, 0, PTE_FRAG_SIZE); return pgtable; } /* * A linux hugepage PMD was changed and the corresponding hash table entries * neesd to be flushed. */ void hpte_do_hugepage_flush(struct mm_struct *mm, unsigned long addr, pmd_t *pmdp, unsigned long old_pmd) { int ssize; unsigned int psize; unsigned long vsid; unsigned long flags = 0; /* get the base page size,vsid and segment size */ #ifdef CONFIG_DEBUG_VM psize = get_slice_psize(mm, addr); BUG_ON(psize == MMU_PAGE_16M); #endif if (old_pmd & H_PAGE_COMBO) psize = MMU_PAGE_4K; else psize = MMU_PAGE_64K; if (!is_kernel_addr(addr)) { ssize = user_segment_size(addr); vsid = get_user_vsid(&mm->context, addr, ssize); WARN_ON(vsid == 0); } else { vsid = get_kernel_vsid(addr, mmu_kernel_ssize); ssize = mmu_kernel_ssize; } if (mm_is_thread_local(mm)) flags |= HPTE_LOCAL_UPDATE; return flush_hash_hugepage(vsid, addr, pmdp, psize, ssize, flags); } pmd_t hash__pmdp_huge_get_and_clear(struct mm_struct *mm, unsigned long addr, pmd_t *pmdp) { pmd_t old_pmd; pgtable_t pgtable; unsigned long old; pgtable_t *pgtable_slot; old = pmd_hugepage_update(mm, addr, pmdp, ~0UL, 0); old_pmd = __pmd(old); /* * We have pmd == none and we are holding page_table_lock. * So we can safely go and clear the pgtable hash * index info. */ pgtable_slot = (pgtable_t *)pmdp + PTRS_PER_PMD; pgtable = *pgtable_slot; /* * Let's zero out old valid and hash index details * hash fault look at them. */ memset(pgtable, 0, PTE_FRAG_SIZE); return old_pmd; } int hash__has_transparent_hugepage(void) { if (!mmu_has_feature(MMU_FTR_16M_PAGE)) return 0; /* * We support THP only if PMD_SIZE is 16MB. */ if (mmu_psize_defs[MMU_PAGE_16M].shift != PMD_SHIFT) return 0; /* * We need to make sure that we support 16MB hugepage in a segment * with base page size 64K or 4K. We only enable THP with a PAGE_SIZE * of 64K. */ /* * If we have 64K HPTE, we will be using that by default */ if (mmu_psize_defs[MMU_PAGE_64K].shift && (mmu_psize_defs[MMU_PAGE_64K].penc[MMU_PAGE_16M] == -1)) return 0; /* * Ok we only have 4K HPTE */ if (mmu_psize_defs[MMU_PAGE_4K].penc[MMU_PAGE_16M] == -1) return 0; return 1; } EXPORT_SYMBOL_GPL(hash__has_transparent_hugepage); #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ #ifdef CONFIG_STRICT_KERNEL_RWX struct change_memory_parms { unsigned long start, end, newpp; unsigned int step, nr_cpus; atomic_t master_cpu; atomic_t cpu_counter; }; // We'd rather this was on the stack but it has to be in the RMO static struct change_memory_parms chmem_parms; // And therefore we need a lock to protect it from concurrent use static DEFINE_MUTEX(chmem_lock); static void change_memory_range(unsigned long start, unsigned long end, unsigned int step, unsigned long newpp) { unsigned long idx; pr_debug("Changing page protection on range 0x%lx-0x%lx, to 0x%lx, step 0x%x\n", start, end, newpp, step); for (idx = start; idx < end; idx += step) /* Not sure if we can do much with the return value */ mmu_hash_ops.hpte_updateboltedpp(newpp, idx, mmu_linear_psize, mmu_kernel_ssize); } static int notrace chmem_secondary_loop(struct change_memory_parms *parms) { unsigned long msr, tmp, flags; int *p; p = &parms->cpu_counter.counter; local_irq_save(flags); hard_irq_disable(); asm volatile ( // Switch to real mode and leave interrupts off "mfmsr %[msr] ;" "li %[tmp], %[MSR_IR_DR] ;" "andc %[tmp], %[msr], %[tmp] ;" "mtmsrd %[tmp] ;" // Tell the master we are in real mode "1: " "lwarx %[tmp], 0, %[p] ;" "addic %[tmp], %[tmp], -1 ;" "stwcx. %[tmp], 0, %[p] ;" "bne- 1b ;" // Spin until the counter goes to zero "2: ;" "lwz %[tmp], 0(%[p]) ;" "cmpwi %[tmp], 0 ;" "bne- 2b ;" // Switch back to virtual mode "mtmsrd %[msr] ;" : // outputs [msr] "=&r" (msr), [tmp] "=&b" (tmp), "+m" (*p) : // inputs [p] "b" (p), [MSR_IR_DR] "i" (MSR_IR | MSR_DR) : // clobbers "cc", "xer" ); local_irq_restore(flags); return 0; } static int change_memory_range_fn(void *data) { struct change_memory_parms *parms = data; // First CPU goes through, all others wait. if (atomic_xchg(&parms->master_cpu, 1) == 1) return chmem_secondary_loop(parms); // Wait for all but one CPU (this one) to call-in while (atomic_read(&parms->cpu_counter) > 1) barrier(); change_memory_range(parms->start, parms->end, parms->step, parms->newpp); mb(); // Signal the other CPUs that we're done atomic_dec(&parms->cpu_counter); return 0; } static bool hash__change_memory_range(unsigned long start, unsigned long end, unsigned long newpp) { unsigned int step, shift; shift = mmu_psize_defs[mmu_linear_psize].shift; step = 1 << shift; start = ALIGN_DOWN(start, step); end = ALIGN(end, step); // aligns up if (start >= end) return false; if (firmware_has_feature(FW_FEATURE_LPAR)) { mutex_lock(&chmem_lock); chmem_parms.start = start; chmem_parms.end = end; chmem_parms.step = step; chmem_parms.newpp = newpp; atomic_set(&chmem_parms.master_cpu, 0); cpus_read_lock(); atomic_set(&chmem_parms.cpu_counter, num_online_cpus()); // Ensure state is consistent before we call the other CPUs mb(); stop_machine_cpuslocked(change_memory_range_fn, &chmem_parms, cpu_online_mask); cpus_read_unlock(); mutex_unlock(&chmem_lock); } else change_memory_range(start, end, step, newpp); return true; } void hash__mark_rodata_ro(void) { unsigned long start, end, pp; start = (unsigned long)_stext; end = (unsigned long)__end_rodata; pp = htab_convert_pte_flags(pgprot_val(PAGE_KERNEL_ROX), HPTE_USE_KERNEL_KEY); WARN_ON(!hash__change_memory_range(start, end, pp)); } void hash__mark_initmem_nx(void) { unsigned long start, end, pp; start = (unsigned long)__init_begin; end = (unsigned long)__init_end; pp = htab_convert_pte_flags(pgprot_val(PAGE_KERNEL), HPTE_USE_KERNEL_KEY); WARN_ON(!hash__change_memory_range(start, end, pp)); } #endif
linux-master
arch/powerpc/mm/book3s64/hash_pgtable.c
/* * Copyright IBM Corporation, 2015 * Author Aneesh Kumar K.V <[email protected]> * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU Lesser General Public License * as published by the Free Software Foundation. * * This program is distributed in the hope that it would be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. * */ #include <linux/mm.h> #include <asm/machdep.h> #include <asm/mmu.h> #include "internal.h" /* * Return true, if the entry has a slot value which * the software considers as invalid. */ static inline bool hpte_soft_invalid(unsigned long hidx) { return ((hidx & 0xfUL) == 0xfUL); } /* * index from 0 - 15 */ bool __rpte_sub_valid(real_pte_t rpte, unsigned long index) { return !(hpte_soft_invalid(__rpte_to_hidx(rpte, index))); } int __hash_page_4K(unsigned long ea, unsigned long access, unsigned long vsid, pte_t *ptep, unsigned long trap, unsigned long flags, int ssize, int subpg_prot) { real_pte_t rpte; unsigned long hpte_group; unsigned int subpg_index; unsigned long rflags, pa; unsigned long old_pte, new_pte, subpg_pte; unsigned long vpn, hash, slot, gslot; unsigned long shift = mmu_psize_defs[MMU_PAGE_4K].shift; /* * atomically mark the linux large page PTE busy and dirty */ do { pte_t pte = READ_ONCE(*ptep); old_pte = pte_val(pte); /* If PTE busy, retry the access */ if (unlikely(old_pte & H_PAGE_BUSY)) return 0; /* If PTE permissions don't match, take page fault */ if (unlikely(!check_pte_access(access, old_pte))) return 1; /* * Try to lock the PTE, add ACCESSED and DIRTY if it was * a write access. Since this is 4K insert of 64K page size * also add H_PAGE_COMBO */ new_pte = old_pte | H_PAGE_BUSY | _PAGE_ACCESSED | H_PAGE_COMBO; if (access & _PAGE_WRITE) new_pte |= _PAGE_DIRTY; } while (!pte_xchg(ptep, __pte(old_pte), __pte(new_pte))); /* * Handle the subpage protection bits */ subpg_pte = new_pte & ~subpg_prot; rflags = htab_convert_pte_flags(subpg_pte, flags); if (cpu_has_feature(CPU_FTR_NOEXECUTE) && !cpu_has_feature(CPU_FTR_COHERENT_ICACHE)) { /* * No CPU has hugepages but lacks no execute, so we * don't need to worry about that case */ rflags = hash_page_do_lazy_icache(rflags, __pte(old_pte), trap); } subpg_index = (ea & (PAGE_SIZE - 1)) >> shift; vpn = hpt_vpn(ea, vsid, ssize); rpte = __real_pte(__pte(old_pte), ptep, PTRS_PER_PTE); /* *None of the sub 4k page is hashed */ if (!(old_pte & H_PAGE_HASHPTE)) goto htab_insert_hpte; /* * Check if the pte was already inserted into the hash table * as a 64k HW page, and invalidate the 64k HPTE if so. */ if (!(old_pte & H_PAGE_COMBO)) { flush_hash_page(vpn, rpte, MMU_PAGE_64K, ssize, flags); /* * clear the old slot details from the old and new pte. * On hash insert failure we use old pte value and we don't * want slot information there if we have a insert failure. */ old_pte &= ~H_PAGE_HASHPTE; new_pte &= ~H_PAGE_HASHPTE; goto htab_insert_hpte; } /* * Check for sub page valid and update */ if (__rpte_sub_valid(rpte, subpg_index)) { int ret; gslot = pte_get_hash_gslot(vpn, shift, ssize, rpte, subpg_index); ret = mmu_hash_ops.hpte_updatepp(gslot, rflags, vpn, MMU_PAGE_4K, MMU_PAGE_4K, ssize, flags); /* * If we failed because typically the HPTE wasn't really here * we try an insertion. */ if (ret == -1) goto htab_insert_hpte; *ptep = __pte(new_pte & ~H_PAGE_BUSY); return 0; } htab_insert_hpte: /* * Initialize all hidx entries to invalid value, the first time * the PTE is about to allocate a 4K HPTE. */ if (!(old_pte & H_PAGE_COMBO)) rpte.hidx = INVALID_RPTE_HIDX; /* * handle H_PAGE_4K_PFN case */ if (old_pte & H_PAGE_4K_PFN) { /* * All the sub 4k page have the same * physical address. */ pa = pte_pfn(__pte(old_pte)) << HW_PAGE_SHIFT; } else { pa = pte_pfn(__pte(old_pte)) << PAGE_SHIFT; pa += (subpg_index << shift); } hash = hpt_hash(vpn, shift, ssize); repeat: hpte_group = (hash & htab_hash_mask) * HPTES_PER_GROUP; /* Insert into the hash table, primary slot */ slot = mmu_hash_ops.hpte_insert(hpte_group, vpn, pa, rflags, 0, MMU_PAGE_4K, MMU_PAGE_4K, ssize); /* * Primary is full, try the secondary */ if (unlikely(slot == -1)) { bool soft_invalid; hpte_group = (~hash & htab_hash_mask) * HPTES_PER_GROUP; slot = mmu_hash_ops.hpte_insert(hpte_group, vpn, pa, rflags, HPTE_V_SECONDARY, MMU_PAGE_4K, MMU_PAGE_4K, ssize); soft_invalid = hpte_soft_invalid(slot); if (unlikely(soft_invalid)) { /* * We got a valid slot from a hardware point of view. * but we cannot use it, because we use this special * value; as defined by hpte_soft_invalid(), to track * invalid slots. We cannot use it. So invalidate it. */ gslot = slot & _PTEIDX_GROUP_IX; mmu_hash_ops.hpte_invalidate(hpte_group + gslot, vpn, MMU_PAGE_4K, MMU_PAGE_4K, ssize, 0); } if (unlikely(slot == -1 || soft_invalid)) { /* * For soft invalid slot, let's ensure that we release a * slot from the primary, with the hope that we will * acquire that slot next time we try. This will ensure * that we do not get the same soft-invalid slot. */ if (soft_invalid || (mftb() & 0x1)) hpte_group = (hash & htab_hash_mask) * HPTES_PER_GROUP; mmu_hash_ops.hpte_remove(hpte_group); /* * FIXME!! Should be try the group from which we removed ? */ goto repeat; } } /* * Hypervisor failure. Restore old pte and return -1 * similar to __hash_page_* */ if (unlikely(slot == -2)) { *ptep = __pte(old_pte); hash_failure_debug(ea, access, vsid, trap, ssize, MMU_PAGE_4K, MMU_PAGE_4K, old_pte); return -1; } new_pte |= pte_set_hidx(ptep, rpte, subpg_index, slot, PTRS_PER_PTE); new_pte |= H_PAGE_HASHPTE; if (stress_hpt()) hpt_do_stress(ea, hpte_group); *ptep = __pte(new_pte & ~H_PAGE_BUSY); return 0; } int __hash_page_64K(unsigned long ea, unsigned long access, unsigned long vsid, pte_t *ptep, unsigned long trap, unsigned long flags, int ssize) { real_pte_t rpte; unsigned long hpte_group; unsigned long rflags, pa; unsigned long old_pte, new_pte; unsigned long vpn, hash, slot; unsigned long shift = mmu_psize_defs[MMU_PAGE_64K].shift; /* * atomically mark the linux large page PTE busy and dirty */ do { pte_t pte = READ_ONCE(*ptep); old_pte = pte_val(pte); /* If PTE busy, retry the access */ if (unlikely(old_pte & H_PAGE_BUSY)) return 0; /* If PTE permissions don't match, take page fault */ if (unlikely(!check_pte_access(access, old_pte))) return 1; /* * Check if PTE has the cache-inhibit bit set * If so, bail out and refault as a 4k page */ if (!mmu_has_feature(MMU_FTR_CI_LARGE_PAGE) && unlikely(pte_ci(pte))) return 0; /* * Try to lock the PTE, add ACCESSED and DIRTY if it was * a write access. */ new_pte = old_pte | H_PAGE_BUSY | _PAGE_ACCESSED; if (access & _PAGE_WRITE) new_pte |= _PAGE_DIRTY; } while (!pte_xchg(ptep, __pte(old_pte), __pte(new_pte))); rflags = htab_convert_pte_flags(new_pte, flags); rpte = __real_pte(__pte(old_pte), ptep, PTRS_PER_PTE); if (cpu_has_feature(CPU_FTR_NOEXECUTE) && !cpu_has_feature(CPU_FTR_COHERENT_ICACHE)) rflags = hash_page_do_lazy_icache(rflags, __pte(old_pte), trap); vpn = hpt_vpn(ea, vsid, ssize); if (unlikely(old_pte & H_PAGE_HASHPTE)) { unsigned long gslot; /* * There MIGHT be an HPTE for this pte */ gslot = pte_get_hash_gslot(vpn, shift, ssize, rpte, 0); if (mmu_hash_ops.hpte_updatepp(gslot, rflags, vpn, MMU_PAGE_64K, MMU_PAGE_64K, ssize, flags) == -1) old_pte &= ~_PAGE_HPTEFLAGS; } if (likely(!(old_pte & H_PAGE_HASHPTE))) { pa = pte_pfn(__pte(old_pte)) << PAGE_SHIFT; hash = hpt_hash(vpn, shift, ssize); repeat: hpte_group = (hash & htab_hash_mask) * HPTES_PER_GROUP; /* Insert into the hash table, primary slot */ slot = mmu_hash_ops.hpte_insert(hpte_group, vpn, pa, rflags, 0, MMU_PAGE_64K, MMU_PAGE_64K, ssize); /* * Primary is full, try the secondary */ if (unlikely(slot == -1)) { hpte_group = (~hash & htab_hash_mask) * HPTES_PER_GROUP; slot = mmu_hash_ops.hpte_insert(hpte_group, vpn, pa, rflags, HPTE_V_SECONDARY, MMU_PAGE_64K, MMU_PAGE_64K, ssize); if (slot == -1) { if (mftb() & 0x1) hpte_group = (hash & htab_hash_mask) * HPTES_PER_GROUP; mmu_hash_ops.hpte_remove(hpte_group); /* * FIXME!! Should be try the group from which we removed ? */ goto repeat; } } /* * Hypervisor failure. Restore old pte and return -1 * similar to __hash_page_* */ if (unlikely(slot == -2)) { *ptep = __pte(old_pte); hash_failure_debug(ea, access, vsid, trap, ssize, MMU_PAGE_64K, MMU_PAGE_64K, old_pte); return -1; } new_pte = (new_pte & ~_PAGE_HPTEFLAGS) | H_PAGE_HASHPTE; new_pte |= pte_set_hidx(ptep, rpte, 0, slot, PTRS_PER_PTE); if (stress_hpt()) hpt_do_stress(ea, hpte_group); } *ptep = __pte(new_pte & ~H_PAGE_BUSY); return 0; }
linux-master
arch/powerpc/mm/book3s64/hash_64k.c
// SPDX-License-Identifier: GPL-2.0+ /* * PowerPC Memory Protection Keys management * * Copyright 2017, Ram Pai, IBM Corporation. */ #include <asm/mman.h> #include <asm/mmu_context.h> #include <asm/mmu.h> #include <asm/setup.h> #include <asm/smp.h> #include <asm/firmware.h> #include <linux/pkeys.h> #include <linux/of_fdt.h> int num_pkey; /* Max number of pkeys supported */ /* * Keys marked in the reservation list cannot be allocated by userspace */ u32 reserved_allocation_mask __ro_after_init; /* Bits set for the initially allocated keys */ static u32 initial_allocation_mask __ro_after_init; /* * Even if we allocate keys with sys_pkey_alloc(), we need to make sure * other thread still find the access denied using the same keys. */ u64 default_amr __ro_after_init = ~0x0UL; u64 default_iamr __ro_after_init = 0x5555555555555555UL; u64 default_uamor __ro_after_init; EXPORT_SYMBOL(default_amr); /* * Key used to implement PROT_EXEC mmap. Denies READ/WRITE * We pick key 2 because 0 is special key and 1 is reserved as per ISA. */ static int execute_only_key = 2; static bool pkey_execute_disable_supported; #define AMR_BITS_PER_PKEY 2 #define AMR_RD_BIT 0x1UL #define AMR_WR_BIT 0x2UL #define IAMR_EX_BIT 0x1UL #define PKEY_REG_BITS (sizeof(u64) * 8) #define pkeyshift(pkey) (PKEY_REG_BITS - ((pkey+1) * AMR_BITS_PER_PKEY)) static int __init dt_scan_storage_keys(unsigned long node, const char *uname, int depth, void *data) { const char *type = of_get_flat_dt_prop(node, "device_type", NULL); const __be32 *prop; int *pkeys_total = (int *) data; /* We are scanning "cpu" nodes only */ if (type == NULL || strcmp(type, "cpu") != 0) return 0; prop = of_get_flat_dt_prop(node, "ibm,processor-storage-keys", NULL); if (!prop) return 0; *pkeys_total = be32_to_cpu(prop[0]); return 1; } static int __init scan_pkey_feature(void) { int ret; int pkeys_total = 0; /* * Pkey is not supported with Radix translation. */ if (early_radix_enabled()) return 0; ret = of_scan_flat_dt(dt_scan_storage_keys, &pkeys_total); if (ret == 0) { /* * Let's assume 32 pkeys on P8/P9 bare metal, if its not defined by device * tree. We make this exception since some version of skiboot forgot to * expose this property on power8/9. */ if (!firmware_has_feature(FW_FEATURE_LPAR)) { unsigned long pvr = mfspr(SPRN_PVR); if (PVR_VER(pvr) == PVR_POWER8 || PVR_VER(pvr) == PVR_POWER8E || PVR_VER(pvr) == PVR_POWER8NVL || PVR_VER(pvr) == PVR_POWER9) pkeys_total = 32; } } #ifdef CONFIG_PPC_MEM_KEYS /* * Adjust the upper limit, based on the number of bits supported by * arch-neutral code. */ pkeys_total = min_t(int, pkeys_total, ((ARCH_VM_PKEY_FLAGS >> VM_PKEY_SHIFT) + 1)); #endif return pkeys_total; } void __init pkey_early_init_devtree(void) { int pkeys_total, i; #ifdef CONFIG_PPC_MEM_KEYS /* * We define PKEY_DISABLE_EXECUTE in addition to the arch-neutral * generic defines for PKEY_DISABLE_ACCESS and PKEY_DISABLE_WRITE. * Ensure that the bits a distinct. */ BUILD_BUG_ON(PKEY_DISABLE_EXECUTE & (PKEY_DISABLE_ACCESS | PKEY_DISABLE_WRITE)); /* * pkey_to_vmflag_bits() assumes that the pkey bits are contiguous * in the vmaflag. Make sure that is really the case. */ BUILD_BUG_ON(__builtin_clzl(ARCH_VM_PKEY_FLAGS >> VM_PKEY_SHIFT) + __builtin_popcountl(ARCH_VM_PKEY_FLAGS >> VM_PKEY_SHIFT) != (sizeof(u64) * BITS_PER_BYTE)); #endif /* * Only P7 and above supports SPRN_AMR update with MSR[PR] = 1 */ if (!early_cpu_has_feature(CPU_FTR_ARCH_206)) return; /* scan the device tree for pkey feature */ pkeys_total = scan_pkey_feature(); if (!pkeys_total) goto out; /* Allow all keys to be modified by default */ default_uamor = ~0x0UL; cur_cpu_spec->mmu_features |= MMU_FTR_PKEY; /* * The device tree cannot be relied to indicate support for * execute_disable support. Instead we use a PVR check. */ if (pvr_version_is(PVR_POWER7) || pvr_version_is(PVR_POWER7p)) pkey_execute_disable_supported = false; else pkey_execute_disable_supported = true; #ifdef CONFIG_PPC_4K_PAGES /* * The OS can manage only 8 pkeys due to its inability to represent them * in the Linux 4K PTE. Mark all other keys reserved. */ num_pkey = min(8, pkeys_total); #else num_pkey = pkeys_total; #endif if (unlikely(num_pkey <= execute_only_key) || !pkey_execute_disable_supported) { /* * Insufficient number of keys to support * execute only key. Mark it unavailable. */ execute_only_key = -1; } else { /* * Mark the execute_only_pkey as not available for * user allocation via pkey_alloc. */ reserved_allocation_mask |= (0x1 << execute_only_key); /* * Deny READ/WRITE for execute_only_key. * Allow execute in IAMR. */ default_amr |= (0x3ul << pkeyshift(execute_only_key)); default_iamr &= ~(0x1ul << pkeyshift(execute_only_key)); /* * Clear the uamor bits for this key. */ default_uamor &= ~(0x3ul << pkeyshift(execute_only_key)); } if (unlikely(num_pkey <= 3)) { /* * Insufficient number of keys to support * KUAP/KUEP feature. */ disable_kuep = true; disable_kuap = true; WARN(1, "Disabling kernel user protection due to low (%d) max supported keys\n", num_pkey); } else { /* handle key which is used by kernel for KAUP */ reserved_allocation_mask |= (0x1 << 3); /* * Mark access for kup_key in default amr so that * we continue to operate with that AMR in * copy_to/from_user(). */ default_amr &= ~(0x3ul << pkeyshift(3)); default_iamr &= ~(0x1ul << pkeyshift(3)); default_uamor &= ~(0x3ul << pkeyshift(3)); } /* * Allow access for only key 0. And prevent any other modification. */ default_amr &= ~(0x3ul << pkeyshift(0)); default_iamr &= ~(0x1ul << pkeyshift(0)); default_uamor &= ~(0x3ul << pkeyshift(0)); /* * key 0 is special in that we want to consider it an allocated * key which is preallocated. We don't allow changing AMR bits * w.r.t key 0. But one can pkey_free(key0) */ initial_allocation_mask |= (0x1 << 0); /* * key 1 is recommended not to be used. PowerISA(3.0) page 1015, * programming note. */ reserved_allocation_mask |= (0x1 << 1); default_uamor &= ~(0x3ul << pkeyshift(1)); /* * Prevent the usage of OS reserved keys. Update UAMOR * for those keys. Also mark the rest of the bits in the * 32 bit mask as reserved. */ for (i = num_pkey; i < 32 ; i++) { reserved_allocation_mask |= (0x1 << i); default_uamor &= ~(0x3ul << pkeyshift(i)); } /* * Prevent the allocation of reserved keys too. */ initial_allocation_mask |= reserved_allocation_mask; pr_info("Enabling pkeys with max key count %d\n", num_pkey); out: /* * Setup uamor on boot cpu */ mtspr(SPRN_UAMOR, default_uamor); return; } #ifdef CONFIG_PPC_KUEP void setup_kuep(bool disabled) { if (disabled) return; /* * On hash if PKEY feature is not enabled, disable KUAP too. */ if (!early_radix_enabled() && !early_mmu_has_feature(MMU_FTR_PKEY)) return; if (smp_processor_id() == boot_cpuid) { pr_info("Activating Kernel Userspace Execution Prevention\n"); cur_cpu_spec->mmu_features |= MMU_FTR_BOOK3S_KUEP; } /* * Radix always uses key0 of the IAMR to determine if an access is * allowed. We set bit 0 (IBM bit 1) of key0, to prevent instruction * fetch. */ mtspr(SPRN_IAMR, AMR_KUEP_BLOCKED); isync(); } #endif #ifdef CONFIG_PPC_KUAP void setup_kuap(bool disabled) { if (disabled) return; /* * On hash if PKEY feature is not enabled, disable KUAP too. */ if (!early_radix_enabled() && !early_mmu_has_feature(MMU_FTR_PKEY)) return; if (smp_processor_id() == boot_cpuid) { pr_info("Activating Kernel Userspace Access Prevention\n"); cur_cpu_spec->mmu_features |= MMU_FTR_KUAP; } /* * Set the default kernel AMR values on all cpus. */ mtspr(SPRN_AMR, AMR_KUAP_BLOCKED); isync(); } #endif #ifdef CONFIG_PPC_MEM_KEYS void pkey_mm_init(struct mm_struct *mm) { if (!mmu_has_feature(MMU_FTR_PKEY)) return; mm_pkey_allocation_map(mm) = initial_allocation_mask; mm->context.execute_only_pkey = execute_only_key; } static inline void init_amr(int pkey, u8 init_bits) { u64 new_amr_bits = (((u64)init_bits & 0x3UL) << pkeyshift(pkey)); u64 old_amr = current_thread_amr() & ~((u64)(0x3ul) << pkeyshift(pkey)); current->thread.regs->amr = old_amr | new_amr_bits; } static inline void init_iamr(int pkey, u8 init_bits) { u64 new_iamr_bits = (((u64)init_bits & 0x1UL) << pkeyshift(pkey)); u64 old_iamr = current_thread_iamr() & ~((u64)(0x1ul) << pkeyshift(pkey)); if (!likely(pkey_execute_disable_supported)) return; current->thread.regs->iamr = old_iamr | new_iamr_bits; } /* * Set the access rights in AMR IAMR and UAMOR registers for @pkey to that * specified in @init_val. */ int __arch_set_user_pkey_access(struct task_struct *tsk, int pkey, unsigned long init_val) { u64 new_amr_bits = 0x0ul; u64 new_iamr_bits = 0x0ul; u64 pkey_bits, uamor_pkey_bits; /* * Check whether the key is disabled by UAMOR. */ pkey_bits = 0x3ul << pkeyshift(pkey); uamor_pkey_bits = (default_uamor & pkey_bits); /* * Both the bits in UAMOR corresponding to the key should be set */ if (uamor_pkey_bits != pkey_bits) return -EINVAL; if (init_val & PKEY_DISABLE_EXECUTE) { if (!pkey_execute_disable_supported) return -EINVAL; new_iamr_bits |= IAMR_EX_BIT; } init_iamr(pkey, new_iamr_bits); /* Set the bits we need in AMR: */ if (init_val & PKEY_DISABLE_ACCESS) new_amr_bits |= AMR_RD_BIT | AMR_WR_BIT; else if (init_val & PKEY_DISABLE_WRITE) new_amr_bits |= AMR_WR_BIT; init_amr(pkey, new_amr_bits); return 0; } int execute_only_pkey(struct mm_struct *mm) { return mm->context.execute_only_pkey; } static inline bool vma_is_pkey_exec_only(struct vm_area_struct *vma) { /* Do this check first since the vm_flags should be hot */ if ((vma->vm_flags & VM_ACCESS_FLAGS) != VM_EXEC) return false; return (vma_pkey(vma) == vma->vm_mm->context.execute_only_pkey); } /* * This should only be called for *plain* mprotect calls. */ int __arch_override_mprotect_pkey(struct vm_area_struct *vma, int prot, int pkey) { /* * If the currently associated pkey is execute-only, but the requested * protection is not execute-only, move it back to the default pkey. */ if (vma_is_pkey_exec_only(vma) && (prot != PROT_EXEC)) return 0; /* * The requested protection is execute-only. Hence let's use an * execute-only pkey. */ if (prot == PROT_EXEC) { pkey = execute_only_pkey(vma->vm_mm); if (pkey > 0) return pkey; } /* Nothing to override. */ return vma_pkey(vma); } static bool pkey_access_permitted(int pkey, bool write, bool execute) { int pkey_shift; u64 amr; pkey_shift = pkeyshift(pkey); if (execute) return !(current_thread_iamr() & (IAMR_EX_BIT << pkey_shift)); amr = current_thread_amr(); if (write) return !(amr & (AMR_WR_BIT << pkey_shift)); return !(amr & (AMR_RD_BIT << pkey_shift)); } bool arch_pte_access_permitted(u64 pte, bool write, bool execute) { if (!mmu_has_feature(MMU_FTR_PKEY)) return true; return pkey_access_permitted(pte_to_pkey_bits(pte), write, execute); } /* * We only want to enforce protection keys on the current thread because we * effectively have no access to AMR/IAMR for other threads or any way to tell * which AMR/IAMR in a threaded process we could use. * * So do not enforce things if the VMA is not from the current mm, or if we are * in a kernel thread. */ bool arch_vma_access_permitted(struct vm_area_struct *vma, bool write, bool execute, bool foreign) { if (!mmu_has_feature(MMU_FTR_PKEY)) return true; /* * Do not enforce our key-permissions on a foreign vma. */ if (foreign || vma_is_foreign(vma)) return true; return pkey_access_permitted(vma_pkey(vma), write, execute); } void arch_dup_pkeys(struct mm_struct *oldmm, struct mm_struct *mm) { if (!mmu_has_feature(MMU_FTR_PKEY)) return; /* Duplicate the oldmm pkey state in mm: */ mm_pkey_allocation_map(mm) = mm_pkey_allocation_map(oldmm); mm->context.execute_only_pkey = oldmm->context.execute_only_pkey; } #endif /* CONFIG_PPC_MEM_KEYS */
linux-master
arch/powerpc/mm/book3s64/pkeys.c
// SPDX-License-Identifier: GPL-2.0 /* * From split of dump_linuxpagetables.c * Copyright 2016, Rashmica Gupta, IBM Corp. * */ #include <linux/kernel.h> #include <linux/pgtable.h> #include "ptdump.h" static const struct flag_info flag_array[] = { { #ifdef CONFIG_PPC_16K_PAGES .mask = _PAGE_HUGE, .val = _PAGE_HUGE, #else .mask = _PAGE_SPS, .val = _PAGE_SPS, #endif .set = "huge", .clear = " ", }, { .mask = _PAGE_SH, .val = 0, .set = "user", .clear = " ", }, { .mask = _PAGE_RO | _PAGE_NA, .val = 0, .set = "rw", }, { .mask = _PAGE_RO | _PAGE_NA, .val = _PAGE_RO, .set = "r ", }, { .mask = _PAGE_RO | _PAGE_NA, .val = _PAGE_NA, .set = " ", }, { .mask = _PAGE_EXEC, .val = _PAGE_EXEC, .set = " X ", .clear = " ", }, { .mask = _PAGE_PRESENT, .val = _PAGE_PRESENT, .set = "present", .clear = " ", }, { .mask = _PAGE_GUARDED, .val = _PAGE_GUARDED, .set = "guarded", .clear = " ", }, { .mask = _PAGE_DIRTY, .val = _PAGE_DIRTY, .set = "dirty", .clear = " ", }, { .mask = _PAGE_ACCESSED, .val = _PAGE_ACCESSED, .set = "accessed", .clear = " ", }, { .mask = _PAGE_NO_CACHE, .val = _PAGE_NO_CACHE, .set = "no cache", .clear = " ", }, { .mask = _PAGE_SPECIAL, .val = _PAGE_SPECIAL, .set = "special", } }; struct pgtable_level pg_level[5] = { { /* pgd */ .flag = flag_array, .num = ARRAY_SIZE(flag_array), }, { /* p4d */ .flag = flag_array, .num = ARRAY_SIZE(flag_array), }, { /* pud */ .flag = flag_array, .num = ARRAY_SIZE(flag_array), }, { /* pmd */ .flag = flag_array, .num = ARRAY_SIZE(flag_array), }, { /* pte */ .flag = flag_array, .num = ARRAY_SIZE(flag_array), }, };
linux-master
arch/powerpc/mm/ptdump/8xx.c
// SPDX-License-Identifier: GPL-2.0 /* * From split of dump_linuxpagetables.c * Copyright 2016, Rashmica Gupta, IBM Corp. * */ #include <linux/kernel.h> #include <linux/pgtable.h> #include "ptdump.h" static const struct flag_info flag_array[] = { { .mask = _PAGE_PRIVILEGED, .val = 0, .set = "user", .clear = " ", }, { .mask = _PAGE_READ, .val = _PAGE_READ, .set = "r", .clear = " ", }, { .mask = _PAGE_WRITE, .val = _PAGE_WRITE, .set = "w", .clear = " ", }, { .mask = _PAGE_EXEC, .val = _PAGE_EXEC, .set = " X ", .clear = " ", }, { .mask = _PAGE_PTE, .val = _PAGE_PTE, .set = "pte", .clear = " ", }, { .mask = _PAGE_PRESENT, .val = _PAGE_PRESENT, .set = "valid", .clear = " ", }, { .mask = _PAGE_PRESENT | _PAGE_INVALID, .val = 0, .set = " ", .clear = "present", }, { .mask = H_PAGE_HASHPTE, .val = H_PAGE_HASHPTE, .set = "hpte", .clear = " ", }, { .mask = _PAGE_DIRTY, .val = _PAGE_DIRTY, .set = "dirty", .clear = " ", }, { .mask = _PAGE_ACCESSED, .val = _PAGE_ACCESSED, .set = "accessed", .clear = " ", }, { .mask = _PAGE_NON_IDEMPOTENT, .val = _PAGE_NON_IDEMPOTENT, .set = "non-idempotent", .clear = " ", }, { .mask = _PAGE_TOLERANT, .val = _PAGE_TOLERANT, .set = "tolerant", .clear = " ", }, { .mask = H_PAGE_BUSY, .val = H_PAGE_BUSY, .set = "busy", }, { #ifdef CONFIG_PPC_64K_PAGES .mask = H_PAGE_COMBO, .val = H_PAGE_COMBO, .set = "combo", }, { .mask = H_PAGE_4K_PFN, .val = H_PAGE_4K_PFN, .set = "4K_pfn", }, { #else /* CONFIG_PPC_64K_PAGES */ .mask = H_PAGE_F_GIX, .val = H_PAGE_F_GIX, .set = "f_gix", .is_val = true, .shift = H_PAGE_F_GIX_SHIFT, }, { .mask = H_PAGE_F_SECOND, .val = H_PAGE_F_SECOND, .set = "f_second", }, { #endif /* CONFIG_PPC_64K_PAGES */ .mask = _PAGE_SPECIAL, .val = _PAGE_SPECIAL, .set = "special", } }; struct pgtable_level pg_level[5] = { { /* pgd */ .flag = flag_array, .num = ARRAY_SIZE(flag_array), }, { /* p4d */ .flag = flag_array, .num = ARRAY_SIZE(flag_array), }, { /* pud */ .flag = flag_array, .num = ARRAY_SIZE(flag_array), }, { /* pmd */ .flag = flag_array, .num = ARRAY_SIZE(flag_array), }, { /* pte */ .flag = flag_array, .num = ARRAY_SIZE(flag_array), }, };
linux-master
arch/powerpc/mm/ptdump/book3s64.c
// SPDX-License-Identifier: GPL-2.0 /* * From split of dump_linuxpagetables.c * Copyright 2016, Rashmica Gupta, IBM Corp. * */ #include <linux/kernel.h> #include <linux/pgtable.h> #include "ptdump.h" static const struct flag_info flag_array[] = { { .mask = _PAGE_USER, .val = _PAGE_USER, .set = "user", .clear = " ", }, { .mask = _PAGE_RW, .val = 0, .set = "r ", .clear = "rw", }, { .mask = _PAGE_EXEC, .val = _PAGE_EXEC, .set = " X ", .clear = " ", }, { .mask = _PAGE_PRESENT, .val = _PAGE_PRESENT, .set = "present", .clear = " ", }, { .mask = _PAGE_COHERENT, .val = _PAGE_COHERENT, .set = "coherent", .clear = " ", }, { .mask = _PAGE_GUARDED, .val = _PAGE_GUARDED, .set = "guarded", .clear = " ", }, { .mask = _PAGE_DIRTY, .val = _PAGE_DIRTY, .set = "dirty", .clear = " ", }, { .mask = _PAGE_ACCESSED, .val = _PAGE_ACCESSED, .set = "accessed", .clear = " ", }, { .mask = _PAGE_WRITETHRU, .val = _PAGE_WRITETHRU, .set = "write through", .clear = " ", }, { .mask = _PAGE_NO_CACHE, .val = _PAGE_NO_CACHE, .set = "no cache", .clear = " ", }, { .mask = _PAGE_SPECIAL, .val = _PAGE_SPECIAL, .set = "special", } }; struct pgtable_level pg_level[5] = { { /* pgd */ .flag = flag_array, .num = ARRAY_SIZE(flag_array), }, { /* p4d */ .flag = flag_array, .num = ARRAY_SIZE(flag_array), }, { /* pud */ .flag = flag_array, .num = ARRAY_SIZE(flag_array), }, { /* pmd */ .flag = flag_array, .num = ARRAY_SIZE(flag_array), }, { /* pte */ .flag = flag_array, .num = ARRAY_SIZE(flag_array), }, };
linux-master
arch/powerpc/mm/ptdump/shared.c
// SPDX-License-Identifier: GPL-2.0+ /* * Copyright 2018, Christophe Leroy CS S.I. * <[email protected]> * * This dumps the content of Segment Registers */ #include <linux/debugfs.h> static void seg_show(struct seq_file *m, int i) { u32 val = mfsr(i << 28); seq_printf(m, "0x%01x0000000-0x%01xfffffff ", i, i); seq_printf(m, "Kern key %d ", (val >> 30) & 1); seq_printf(m, "User key %d ", (val >> 29) & 1); if (val & 0x80000000) { seq_printf(m, "Device 0x%03x", (val >> 20) & 0x1ff); seq_printf(m, "-0x%05x", val & 0xfffff); } else { if (val & 0x10000000) seq_puts(m, "No Exec "); seq_printf(m, "VSID 0x%06x", val & 0xffffff); } seq_puts(m, "\n"); } static int sr_show(struct seq_file *m, void *v) { int i; seq_puts(m, "---[ User Segments ]---\n"); for (i = 0; i < TASK_SIZE >> 28; i++) seg_show(m, i); seq_puts(m, "\n---[ Kernel Segments ]---\n"); for (; i < 16; i++) seg_show(m, i); return 0; } DEFINE_SHOW_ATTRIBUTE(sr); static int __init sr_init(void) { debugfs_create_file("segment_registers", 0400, arch_debugfs_dir, NULL, &sr_fops); return 0; } device_initcall(sr_init);
linux-master
arch/powerpc/mm/ptdump/segment_regs.c
// SPDX-License-Identifier: GPL-2.0+ /* * Copyright 2018, Christophe Leroy CS S.I. * <[email protected]> * * This dumps the content of BATS */ #include <linux/pgtable.h> #include <linux/debugfs.h> #include <asm/cpu_has_feature.h> #include "ptdump.h" static void bat_show_603(struct seq_file *m, int idx, u32 lower, u32 upper, bool is_d) { u32 bepi = upper & 0xfffe0000; u32 bl = (upper >> 2) & 0x7ff; u32 k = upper & 3; phys_addr_t brpn = PHYS_BAT_ADDR(lower); u32 size = (bl + 1) << 17; seq_printf(m, "%d: ", idx); if (k == 0) { seq_puts(m, " -\n"); return; } seq_printf(m, "0x%08x-0x%08x ", bepi, bepi + size - 1); #ifdef CONFIG_PHYS_64BIT seq_printf(m, "0x%016llx ", brpn); #else seq_printf(m, "0x%08x ", brpn); #endif pt_dump_size(m, size); if (k == 1) seq_puts(m, "User "); else if (k == 2) seq_puts(m, "Kernel "); else seq_puts(m, "Kernel/User "); if (lower & BPP_RX) seq_puts(m, is_d ? "r " : " x "); else if (lower & BPP_RW) seq_puts(m, is_d ? "rw " : " x "); else seq_puts(m, is_d ? " " : " "); seq_puts(m, lower & _PAGE_WRITETHRU ? "w " : " "); seq_puts(m, lower & _PAGE_NO_CACHE ? "i " : " "); seq_puts(m, lower & _PAGE_COHERENT ? "m " : " "); seq_puts(m, lower & _PAGE_GUARDED ? "g " : " "); seq_puts(m, "\n"); } #define BAT_SHOW_603(_m, _n, _l, _u, _d) bat_show_603(_m, _n, mfspr(_l), mfspr(_u), _d) static int bats_show(struct seq_file *m, void *v) { seq_puts(m, "---[ Instruction Block Address Translation ]---\n"); BAT_SHOW_603(m, 0, SPRN_IBAT0L, SPRN_IBAT0U, false); BAT_SHOW_603(m, 1, SPRN_IBAT1L, SPRN_IBAT1U, false); BAT_SHOW_603(m, 2, SPRN_IBAT2L, SPRN_IBAT2U, false); BAT_SHOW_603(m, 3, SPRN_IBAT3L, SPRN_IBAT3U, false); if (mmu_has_feature(MMU_FTR_USE_HIGH_BATS)) { BAT_SHOW_603(m, 4, SPRN_IBAT4L, SPRN_IBAT4U, false); BAT_SHOW_603(m, 5, SPRN_IBAT5L, SPRN_IBAT5U, false); BAT_SHOW_603(m, 6, SPRN_IBAT6L, SPRN_IBAT6U, false); BAT_SHOW_603(m, 7, SPRN_IBAT7L, SPRN_IBAT7U, false); } seq_puts(m, "\n---[ Data Block Address Translation ]---\n"); BAT_SHOW_603(m, 0, SPRN_DBAT0L, SPRN_DBAT0U, true); BAT_SHOW_603(m, 1, SPRN_DBAT1L, SPRN_DBAT1U, true); BAT_SHOW_603(m, 2, SPRN_DBAT2L, SPRN_DBAT2U, true); BAT_SHOW_603(m, 3, SPRN_DBAT3L, SPRN_DBAT3U, true); if (mmu_has_feature(MMU_FTR_USE_HIGH_BATS)) { BAT_SHOW_603(m, 4, SPRN_DBAT4L, SPRN_DBAT4U, true); BAT_SHOW_603(m, 5, SPRN_DBAT5L, SPRN_DBAT5U, true); BAT_SHOW_603(m, 6, SPRN_DBAT6L, SPRN_DBAT6U, true); BAT_SHOW_603(m, 7, SPRN_DBAT7L, SPRN_DBAT7U, true); } return 0; } DEFINE_SHOW_ATTRIBUTE(bats); static int __init bats_init(void) { debugfs_create_file("block_address_translation", 0400, arch_debugfs_dir, NULL, &bats_fops); return 0; } device_initcall(bats_init);
linux-master
arch/powerpc/mm/ptdump/bats.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright 2016, Rashmica Gupta, IBM Corp. * * This traverses the kernel virtual memory and dumps the pages that are in * the hash pagetable, along with their flags to * /sys/kernel/debug/kernel_hash_pagetable. * * If radix is enabled then there is no hash page table and so no debugfs file * is generated. */ #include <linux/debugfs.h> #include <linux/fs.h> #include <linux/io.h> #include <linux/mm.h> #include <linux/sched.h> #include <linux/seq_file.h> #include <linux/const.h> #include <asm/page.h> #include <asm/plpar_wrappers.h> #include <linux/memblock.h> #include <asm/firmware.h> #include <asm/pgalloc.h> struct pg_state { struct seq_file *seq; const struct addr_marker *marker; unsigned long start_address; unsigned int level; u64 current_flags; }; struct addr_marker { unsigned long start_address; const char *name; }; static struct addr_marker address_markers[] = { { 0, "Start of kernel VM" }, { 0, "vmalloc() Area" }, { 0, "vmalloc() End" }, { 0, "isa I/O start" }, { 0, "isa I/O end" }, { 0, "phb I/O start" }, { 0, "phb I/O end" }, { 0, "I/O remap start" }, { 0, "I/O remap end" }, { 0, "vmemmap start" }, { -1, NULL }, }; struct flag_info { u64 mask; u64 val; const char *set; const char *clear; bool is_val; int shift; }; static const struct flag_info v_flag_array[] = { { .mask = SLB_VSID_B, .val = SLB_VSID_B_256M, .set = "ssize: 256M", .clear = "ssize: 1T ", }, { .mask = HPTE_V_SECONDARY, .val = HPTE_V_SECONDARY, .set = "secondary", .clear = "primary ", }, { .mask = HPTE_V_VALID, .val = HPTE_V_VALID, .set = "valid ", .clear = "invalid", }, { .mask = HPTE_V_BOLTED, .val = HPTE_V_BOLTED, .set = "bolted", .clear = "", } }; static const struct flag_info r_flag_array[] = { { .mask = HPTE_R_PP0 | HPTE_R_PP, .val = PP_RWXX, .set = "prot:RW--", }, { .mask = HPTE_R_PP0 | HPTE_R_PP, .val = PP_RWRX, .set = "prot:RWR-", }, { .mask = HPTE_R_PP0 | HPTE_R_PP, .val = PP_RWRW, .set = "prot:RWRW", }, { .mask = HPTE_R_PP0 | HPTE_R_PP, .val = PP_RXRX, .set = "prot:R-R-", }, { .mask = HPTE_R_PP0 | HPTE_R_PP, .val = PP_RXXX, .set = "prot:R---", }, { .mask = HPTE_R_KEY_HI | HPTE_R_KEY_LO, .val = HPTE_R_KEY_HI | HPTE_R_KEY_LO, .set = "key", .clear = "", .is_val = true, }, { .mask = HPTE_R_R, .val = HPTE_R_R, .set = "ref", .clear = " ", }, { .mask = HPTE_R_C, .val = HPTE_R_C, .set = "changed", .clear = " ", }, { .mask = HPTE_R_N, .val = HPTE_R_N, .set = "no execute", }, { .mask = HPTE_R_WIMG, .val = HPTE_R_W, .set = "writethru", }, { .mask = HPTE_R_WIMG, .val = HPTE_R_I, .set = "no cache", }, { .mask = HPTE_R_WIMG, .val = HPTE_R_G, .set = "guarded", } }; static int calculate_pagesize(struct pg_state *st, int ps, char s[]) { static const char units[] = "BKMGTPE"; const char *unit = units; while (ps > 9 && unit[1]) { ps -= 10; unit++; } seq_printf(st->seq, " %s_ps: %i%c\t", s, 1<<ps, *unit); return ps; } static void dump_flag_info(struct pg_state *st, const struct flag_info *flag, u64 pte, int num) { unsigned int i; for (i = 0; i < num; i++, flag++) { const char *s = NULL; u64 val; /* flag not defined so don't check it */ if (flag->mask == 0) continue; /* Some 'flags' are actually values */ if (flag->is_val) { val = pte & flag->val; if (flag->shift) val = val >> flag->shift; seq_printf(st->seq, " %s:%llx", flag->set, val); } else { if ((pte & flag->mask) == flag->val) s = flag->set; else s = flag->clear; if (s) seq_printf(st->seq, " %s", s); } } } static void dump_hpte_info(struct pg_state *st, unsigned long ea, u64 v, u64 r, unsigned long rpn, int bps, int aps, unsigned long lp) { int aps_index; while (ea >= st->marker[1].start_address) { st->marker++; seq_printf(st->seq, "---[ %s ]---\n", st->marker->name); } seq_printf(st->seq, "0x%lx:\t", ea); seq_printf(st->seq, "AVPN:%llx\t", HPTE_V_AVPN_VAL(v)); dump_flag_info(st, v_flag_array, v, ARRAY_SIZE(v_flag_array)); seq_printf(st->seq, " rpn: %lx\t", rpn); dump_flag_info(st, r_flag_array, r, ARRAY_SIZE(r_flag_array)); calculate_pagesize(st, bps, "base"); aps_index = calculate_pagesize(st, aps, "actual"); if (aps_index != 2) seq_printf(st->seq, "LP enc: %lx", lp); seq_putc(st->seq, '\n'); } static int native_find(unsigned long ea, int psize, bool primary, u64 *v, u64 *r) { struct hash_pte *hptep; unsigned long hash, vsid, vpn, hpte_group, want_v, hpte_v; int i, ssize = mmu_kernel_ssize; unsigned long shift = mmu_psize_defs[psize].shift; /* calculate hash */ vsid = get_kernel_vsid(ea, ssize); vpn = hpt_vpn(ea, vsid, ssize); hash = hpt_hash(vpn, shift, ssize); want_v = hpte_encode_avpn(vpn, psize, ssize); /* to check in the secondary hash table, we invert the hash */ if (!primary) hash = ~hash; hpte_group = (hash & htab_hash_mask) * HPTES_PER_GROUP; for (i = 0; i < HPTES_PER_GROUP; i++) { hptep = htab_address + hpte_group; hpte_v = be64_to_cpu(hptep->v); if (HPTE_V_COMPARE(hpte_v, want_v) && (hpte_v & HPTE_V_VALID)) { /* HPTE matches */ *v = be64_to_cpu(hptep->v); *r = be64_to_cpu(hptep->r); return 0; } ++hpte_group; } return -1; } static int pseries_find(unsigned long ea, int psize, bool primary, u64 *v, u64 *r) { struct { unsigned long v; unsigned long r; } ptes[4]; unsigned long vsid, vpn, hash, hpte_group, want_v; int i, j, ssize = mmu_kernel_ssize; long lpar_rc = 0; unsigned long shift = mmu_psize_defs[psize].shift; /* calculate hash */ vsid = get_kernel_vsid(ea, ssize); vpn = hpt_vpn(ea, vsid, ssize); hash = hpt_hash(vpn, shift, ssize); want_v = hpte_encode_avpn(vpn, psize, ssize); /* to check in the secondary hash table, we invert the hash */ if (!primary) hash = ~hash; hpte_group = (hash & htab_hash_mask) * HPTES_PER_GROUP; /* see if we can find an entry in the hpte with this hash */ for (i = 0; i < HPTES_PER_GROUP; i += 4, hpte_group += 4) { lpar_rc = plpar_pte_read_4(0, hpte_group, (void *)ptes); if (lpar_rc) continue; for (j = 0; j < 4; j++) { if (HPTE_V_COMPARE(ptes[j].v, want_v) && (ptes[j].v & HPTE_V_VALID)) { /* HPTE matches */ *v = ptes[j].v; *r = ptes[j].r; return 0; } } } return -1; } static void decode_r(int bps, unsigned long r, unsigned long *rpn, int *aps, unsigned long *lp_bits) { struct mmu_psize_def entry; unsigned long arpn, mask, lp; int penc = -2, idx = 0, shift; /*. * The LP field has 8 bits. Depending on the actual page size, some of * these bits are concatenated with the APRN to get the RPN. The rest * of the bits in the LP field is the LP value and is an encoding for * the base page size and the actual page size. * * - find the mmu entry for our base page size * - go through all page encodings and use the associated mask to * find an encoding that matches our encoding in the LP field. */ arpn = (r & HPTE_R_RPN) >> HPTE_R_RPN_SHIFT; lp = arpn & 0xff; entry = mmu_psize_defs[bps]; while (idx < MMU_PAGE_COUNT) { penc = entry.penc[idx]; if ((penc != -1) && (mmu_psize_defs[idx].shift)) { shift = mmu_psize_defs[idx].shift - HPTE_R_RPN_SHIFT; mask = (0x1 << (shift)) - 1; if ((lp & mask) == penc) { *aps = mmu_psize_to_shift(idx); *lp_bits = lp & mask; *rpn = arpn >> shift; return; } } idx++; } } static int base_hpte_find(unsigned long ea, int psize, bool primary, u64 *v, u64 *r) { if (IS_ENABLED(CONFIG_PPC_PSERIES) && firmware_has_feature(FW_FEATURE_LPAR)) return pseries_find(ea, psize, primary, v, r); return native_find(ea, psize, primary, v, r); } static unsigned long hpte_find(struct pg_state *st, unsigned long ea, int psize) { unsigned long slot; u64 v = 0, r = 0; unsigned long rpn, lp_bits; int base_psize = 0, actual_psize = 0; if (ea < PAGE_OFFSET) return -1; /* Look in primary table */ slot = base_hpte_find(ea, psize, true, &v, &r); /* Look in secondary table */ if (slot == -1) slot = base_hpte_find(ea, psize, false, &v, &r); /* No entry found */ if (slot == -1) return -1; /* * We found an entry in the hash page table: * - check that this has the same base page * - find the actual page size * - find the RPN */ base_psize = mmu_psize_to_shift(psize); if ((v & HPTE_V_LARGE) == HPTE_V_LARGE) { decode_r(psize, r, &rpn, &actual_psize, &lp_bits); } else { /* 4K actual page size */ actual_psize = 12; rpn = (r & HPTE_R_RPN) >> HPTE_R_RPN_SHIFT; /* In this case there are no LP bits */ lp_bits = -1; } /* * We didn't find a matching encoding, so the PTE we found isn't for * this address. */ if (actual_psize == -1) return -1; dump_hpte_info(st, ea, v, r, rpn, base_psize, actual_psize, lp_bits); return 0; } static void walk_pte(struct pg_state *st, pmd_t *pmd, unsigned long start) { pte_t *pte = pte_offset_kernel(pmd, 0); unsigned long addr, pteval, psize; int i, status; for (i = 0; i < PTRS_PER_PTE; i++, pte++) { addr = start + i * PAGE_SIZE; pteval = pte_val(*pte); if (addr < VMALLOC_END) psize = mmu_vmalloc_psize; else psize = mmu_io_psize; /* check for secret 4K mappings */ if (IS_ENABLED(CONFIG_PPC_64K_PAGES) && ((pteval & H_PAGE_COMBO) == H_PAGE_COMBO || (pteval & H_PAGE_4K_PFN) == H_PAGE_4K_PFN)) psize = mmu_io_psize; /* check for hashpte */ status = hpte_find(st, addr, psize); if (((pteval & H_PAGE_HASHPTE) != H_PAGE_HASHPTE) && (status != -1)) { /* found a hpte that is not in the linux page tables */ seq_printf(st->seq, "page probably bolted before linux" " pagetables were set: addr:%lx, pteval:%lx\n", addr, pteval); } } } static void walk_pmd(struct pg_state *st, pud_t *pud, unsigned long start) { pmd_t *pmd = pmd_offset(pud, 0); unsigned long addr; unsigned int i; for (i = 0; i < PTRS_PER_PMD; i++, pmd++) { addr = start + i * PMD_SIZE; if (!pmd_none(*pmd)) /* pmd exists */ walk_pte(st, pmd, addr); } } static void walk_pud(struct pg_state *st, p4d_t *p4d, unsigned long start) { pud_t *pud = pud_offset(p4d, 0); unsigned long addr; unsigned int i; for (i = 0; i < PTRS_PER_PUD; i++, pud++) { addr = start + i * PUD_SIZE; if (!pud_none(*pud)) /* pud exists */ walk_pmd(st, pud, addr); } } static void walk_p4d(struct pg_state *st, pgd_t *pgd, unsigned long start) { p4d_t *p4d = p4d_offset(pgd, 0); unsigned long addr; unsigned int i; for (i = 0; i < PTRS_PER_P4D; i++, p4d++) { addr = start + i * P4D_SIZE; if (!p4d_none(*p4d)) /* p4d exists */ walk_pud(st, p4d, addr); } } static void walk_pagetables(struct pg_state *st) { pgd_t *pgd = pgd_offset_k(0UL); unsigned int i; unsigned long addr; /* * Traverse the linux pagetable structure and dump pages that are in * the hash pagetable. */ for (i = 0; i < PTRS_PER_PGD; i++, pgd++) { addr = KERN_VIRT_START + i * PGDIR_SIZE; if (!pgd_none(*pgd)) /* pgd exists */ walk_p4d(st, pgd, addr); } } static void walk_linearmapping(struct pg_state *st) { unsigned long addr; /* * Traverse the linear mapping section of virtual memory and dump pages * that are in the hash pagetable. */ unsigned long psize = 1 << mmu_psize_defs[mmu_linear_psize].shift; for (addr = PAGE_OFFSET; addr < PAGE_OFFSET + memblock_end_of_DRAM(); addr += psize) hpte_find(st, addr, mmu_linear_psize); } static void walk_vmemmap(struct pg_state *st) { struct vmemmap_backing *ptr = vmemmap_list; if (!IS_ENABLED(CONFIG_SPARSEMEM_VMEMMAP)) return; /* * Traverse the vmemmaped memory and dump pages that are in the hash * pagetable. */ while (ptr->list) { hpte_find(st, ptr->virt_addr, mmu_vmemmap_psize); ptr = ptr->list; } seq_puts(st->seq, "---[ vmemmap end ]---\n"); } static void populate_markers(void) { address_markers[0].start_address = PAGE_OFFSET; address_markers[1].start_address = VMALLOC_START; address_markers[2].start_address = VMALLOC_END; address_markers[3].start_address = ISA_IO_BASE; address_markers[4].start_address = ISA_IO_END; address_markers[5].start_address = PHB_IO_BASE; address_markers[6].start_address = PHB_IO_END; address_markers[7].start_address = IOREMAP_BASE; address_markers[8].start_address = IOREMAP_END; address_markers[9].start_address = H_VMEMMAP_START; } static int ptdump_show(struct seq_file *m, void *v) { struct pg_state st = { .seq = m, .start_address = PAGE_OFFSET, .marker = address_markers, }; /* * Traverse the 0xc, 0xd and 0xf areas of the kernel virtual memory and * dump pages that are in the hash pagetable. */ walk_linearmapping(&st); walk_pagetables(&st); walk_vmemmap(&st); return 0; } DEFINE_SHOW_ATTRIBUTE(ptdump); static int ptdump_init(void) { if (!radix_enabled()) { populate_markers(); debugfs_create_file("kernel_hash_pagetable", 0400, NULL, NULL, &ptdump_fops); } return 0; } device_initcall(ptdump_init);
linux-master
arch/powerpc/mm/ptdump/hashpagetable.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright 2016, Rashmica Gupta, IBM Corp. * * This traverses the kernel pagetables and dumps the * information about the used sections of memory to * /sys/kernel/debug/kernel_pagetables. * * Derived from the arm64 implementation: * Copyright (c) 2014, The Linux Foundation, Laura Abbott. * (C) Copyright 2008 Intel Corporation, Arjan van de Ven. */ #include <linux/debugfs.h> #include <linux/fs.h> #include <linux/hugetlb.h> #include <linux/io.h> #include <linux/mm.h> #include <linux/highmem.h> #include <linux/ptdump.h> #include <linux/sched.h> #include <linux/seq_file.h> #include <asm/fixmap.h> #include <linux/const.h> #include <linux/kasan.h> #include <asm/page.h> #include <asm/hugetlb.h> #include <mm/mmu_decl.h> #include "ptdump.h" /* * To visualise what is happening, * * - PTRS_PER_P** = how many entries there are in the corresponding P** * - P**_SHIFT = how many bits of the address we use to index into the * corresponding P** * - P**_SIZE is how much memory we can access through the table - not the * size of the table itself. * P**={PGD, PUD, PMD, PTE} * * * Each entry of the PGD points to a PUD. Each entry of a PUD points to a * PMD. Each entry of a PMD points to a PTE. And every PTE entry points to * a page. * * In the case where there are only 3 levels, the PUD is folded into the * PGD: every PUD has only one entry which points to the PMD. * * The page dumper groups page table entries of the same type into a single * description. It uses pg_state to track the range information while * iterating over the PTE entries. When the continuity is broken it then * dumps out a description of the range - ie PTEs that are virtually contiguous * with the same PTE flags are chunked together. This is to make it clear how * different areas of the kernel virtual memory are used. * */ struct pg_state { struct ptdump_state ptdump; struct seq_file *seq; const struct addr_marker *marker; unsigned long start_address; unsigned long start_pa; int level; u64 current_flags; bool check_wx; unsigned long wx_pages; }; struct addr_marker { unsigned long start_address; const char *name; }; static struct addr_marker address_markers[] = { { 0, "Start of kernel VM" }, #ifdef MODULES_VADDR { 0, "modules start" }, { 0, "modules end" }, #endif { 0, "vmalloc() Area" }, { 0, "vmalloc() End" }, #ifdef CONFIG_PPC64 { 0, "isa I/O start" }, { 0, "isa I/O end" }, { 0, "phb I/O start" }, { 0, "phb I/O end" }, { 0, "I/O remap start" }, { 0, "I/O remap end" }, { 0, "vmemmap start" }, #else { 0, "Early I/O remap start" }, { 0, "Early I/O remap end" }, #ifdef CONFIG_HIGHMEM { 0, "Highmem PTEs start" }, { 0, "Highmem PTEs end" }, #endif { 0, "Fixmap start" }, { 0, "Fixmap end" }, #endif #ifdef CONFIG_KASAN { 0, "kasan shadow mem start" }, { 0, "kasan shadow mem end" }, #endif { -1, NULL }, }; static struct ptdump_range ptdump_range[] __ro_after_init = { {TASK_SIZE_MAX, ~0UL}, {0, 0} }; #define pt_dump_seq_printf(m, fmt, args...) \ ({ \ if (m) \ seq_printf(m, fmt, ##args); \ }) #define pt_dump_seq_putc(m, c) \ ({ \ if (m) \ seq_putc(m, c); \ }) void pt_dump_size(struct seq_file *m, unsigned long size) { static const char units[] = " KMGTPE"; const char *unit = units; /* Work out what appropriate unit to use */ while (!(size & 1023) && unit[1]) { size >>= 10; unit++; } pt_dump_seq_printf(m, "%9lu%c ", size, *unit); } static void dump_flag_info(struct pg_state *st, const struct flag_info *flag, u64 pte, int num) { unsigned int i; for (i = 0; i < num; i++, flag++) { const char *s = NULL; u64 val; /* flag not defined so don't check it */ if (flag->mask == 0) continue; /* Some 'flags' are actually values */ if (flag->is_val) { val = pte & flag->val; if (flag->shift) val = val >> flag->shift; pt_dump_seq_printf(st->seq, " %s:%llx", flag->set, val); } else { if ((pte & flag->mask) == flag->val) s = flag->set; else s = flag->clear; if (s) pt_dump_seq_printf(st->seq, " %s", s); } st->current_flags &= ~flag->mask; } if (st->current_flags != 0) pt_dump_seq_printf(st->seq, " unknown flags:%llx", st->current_flags); } static void dump_addr(struct pg_state *st, unsigned long addr) { #ifdef CONFIG_PPC64 #define REG "0x%016lx" #else #define REG "0x%08lx" #endif pt_dump_seq_printf(st->seq, REG "-" REG " ", st->start_address, addr - 1); pt_dump_seq_printf(st->seq, " " REG " ", st->start_pa); pt_dump_size(st->seq, addr - st->start_address); } static void note_prot_wx(struct pg_state *st, unsigned long addr) { pte_t pte = __pte(st->current_flags); if (!IS_ENABLED(CONFIG_DEBUG_WX) || !st->check_wx) return; if (!pte_write(pte) || !pte_exec(pte)) return; WARN_ONCE(1, "powerpc/mm: Found insecure W+X mapping at address %p/%pS\n", (void *)st->start_address, (void *)st->start_address); st->wx_pages += (addr - st->start_address) / PAGE_SIZE; } static void note_page_update_state(struct pg_state *st, unsigned long addr, int level, u64 val) { u64 flag = level >= 0 ? val & pg_level[level].mask : 0; u64 pa = val & PTE_RPN_MASK; st->level = level; st->current_flags = flag; st->start_address = addr; st->start_pa = pa; while (addr >= st->marker[1].start_address) { st->marker++; pt_dump_seq_printf(st->seq, "---[ %s ]---\n", st->marker->name); } } static void note_page(struct ptdump_state *pt_st, unsigned long addr, int level, u64 val) { u64 flag = level >= 0 ? val & pg_level[level].mask : 0; struct pg_state *st = container_of(pt_st, struct pg_state, ptdump); /* At first no level is set */ if (st->level == -1) { pt_dump_seq_printf(st->seq, "---[ %s ]---\n", st->marker->name); note_page_update_state(st, addr, level, val); /* * Dump the section of virtual memory when: * - the PTE flags from one entry to the next differs. * - we change levels in the tree. * - the address is in a different section of memory and is thus * used for a different purpose, regardless of the flags. */ } else if (flag != st->current_flags || level != st->level || addr >= st->marker[1].start_address) { /* Check the PTE flags */ if (st->current_flags) { note_prot_wx(st, addr); dump_addr(st, addr); /* Dump all the flags */ if (pg_level[st->level].flag) dump_flag_info(st, pg_level[st->level].flag, st->current_flags, pg_level[st->level].num); pt_dump_seq_putc(st->seq, '\n'); } /* * Address indicates we have passed the end of the * current section of virtual memory */ note_page_update_state(st, addr, level, val); } } static void populate_markers(void) { int i = 0; #ifdef CONFIG_PPC64 address_markers[i++].start_address = PAGE_OFFSET; #else address_markers[i++].start_address = TASK_SIZE; #endif #ifdef MODULES_VADDR address_markers[i++].start_address = MODULES_VADDR; address_markers[i++].start_address = MODULES_END; #endif address_markers[i++].start_address = VMALLOC_START; address_markers[i++].start_address = VMALLOC_END; #ifdef CONFIG_PPC64 address_markers[i++].start_address = ISA_IO_BASE; address_markers[i++].start_address = ISA_IO_END; address_markers[i++].start_address = PHB_IO_BASE; address_markers[i++].start_address = PHB_IO_END; address_markers[i++].start_address = IOREMAP_BASE; address_markers[i++].start_address = IOREMAP_END; /* What is the ifdef about? */ #ifdef CONFIG_PPC_BOOK3S_64 address_markers[i++].start_address = H_VMEMMAP_START; #else address_markers[i++].start_address = VMEMMAP_BASE; #endif #else /* !CONFIG_PPC64 */ address_markers[i++].start_address = ioremap_bot; address_markers[i++].start_address = IOREMAP_TOP; #ifdef CONFIG_HIGHMEM address_markers[i++].start_address = PKMAP_BASE; address_markers[i++].start_address = PKMAP_ADDR(LAST_PKMAP); #endif address_markers[i++].start_address = FIXADDR_START; address_markers[i++].start_address = FIXADDR_TOP; #endif /* CONFIG_PPC64 */ #ifdef CONFIG_KASAN address_markers[i++].start_address = KASAN_SHADOW_START; address_markers[i++].start_address = KASAN_SHADOW_END; #endif } static int ptdump_show(struct seq_file *m, void *v) { struct pg_state st = { .seq = m, .marker = address_markers, .level = -1, .ptdump = { .note_page = note_page, .range = ptdump_range, } }; /* Traverse kernel page tables */ ptdump_walk_pgd(&st.ptdump, &init_mm, NULL); return 0; } DEFINE_SHOW_ATTRIBUTE(ptdump); static void __init build_pgtable_complete_mask(void) { unsigned int i, j; for (i = 0; i < ARRAY_SIZE(pg_level); i++) if (pg_level[i].flag) for (j = 0; j < pg_level[i].num; j++) pg_level[i].mask |= pg_level[i].flag[j].mask; } #ifdef CONFIG_DEBUG_WX void ptdump_check_wx(void) { struct pg_state st = { .seq = NULL, .marker = (struct addr_marker[]) { { 0, NULL}, { -1, NULL}, }, .level = -1, .check_wx = true, .ptdump = { .note_page = note_page, .range = ptdump_range, } }; ptdump_walk_pgd(&st.ptdump, &init_mm, NULL); if (st.wx_pages) pr_warn("Checked W+X mappings: FAILED, %lu W+X pages found\n", st.wx_pages); else pr_info("Checked W+X mappings: passed, no W+X pages found\n"); } #endif static int __init ptdump_init(void) { #ifdef CONFIG_PPC64 if (!radix_enabled()) ptdump_range[0].start = KERN_VIRT_START; else ptdump_range[0].start = PAGE_OFFSET; ptdump_range[0].end = PAGE_OFFSET + (PGDIR_SIZE * PTRS_PER_PGD); #endif populate_markers(); build_pgtable_complete_mask(); if (IS_ENABLED(CONFIG_PTDUMP_DEBUGFS)) debugfs_create_file("kernel_page_tables", 0400, NULL, NULL, &ptdump_fops); return 0; } device_initcall(ptdump_init);
linux-master
arch/powerpc/mm/ptdump/ptdump.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Performance counter callchain support - powerpc architecture code * * Copyright © 2009 Paul Mackerras, IBM Corporation. */ #include <linux/kernel.h> #include <linux/sched.h> #include <linux/perf_event.h> #include <linux/percpu.h> #include <linux/uaccess.h> #include <linux/mm.h> #include <asm/ptrace.h> #include <asm/sigcontext.h> #include <asm/ucontext.h> #include <asm/vdso.h> #include <asm/pte-walk.h> #include "callchain.h" /* * Is sp valid as the address of the next kernel stack frame after prev_sp? * The next frame may be in a different stack area but should not go * back down in the same stack area. */ static int valid_next_sp(unsigned long sp, unsigned long prev_sp) { if (sp & 0xf) return 0; /* must be 16-byte aligned */ if (!validate_sp(sp, current)) return 0; if (sp >= prev_sp + STACK_FRAME_MIN_SIZE) return 1; /* * sp could decrease when we jump off an interrupt stack * back to the regular process stack. */ if ((sp & ~(THREAD_SIZE - 1)) != (prev_sp & ~(THREAD_SIZE - 1))) return 1; return 0; } void __no_sanitize_address perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs) { unsigned long sp, next_sp; unsigned long next_ip; unsigned long lr; long level = 0; unsigned long *fp; lr = regs->link; sp = regs->gpr[1]; perf_callchain_store(entry, perf_instruction_pointer(regs)); if (!validate_sp(sp, current)) return; for (;;) { fp = (unsigned long *) sp; next_sp = fp[0]; if (next_sp == sp + STACK_INT_FRAME_SIZE && validate_sp_size(sp, current, STACK_INT_FRAME_SIZE) && fp[STACK_INT_FRAME_MARKER_LONGS] == STACK_FRAME_REGS_MARKER) { /* * This looks like an interrupt frame for an * interrupt that occurred in the kernel */ regs = (struct pt_regs *)(sp + STACK_INT_FRAME_REGS); next_ip = regs->nip; lr = regs->link; level = 0; perf_callchain_store_context(entry, PERF_CONTEXT_KERNEL); } else { if (level == 0) next_ip = lr; else next_ip = fp[STACK_FRAME_LR_SAVE]; /* * We can't tell which of the first two addresses * we get are valid, but we can filter out the * obviously bogus ones here. We replace them * with 0 rather than removing them entirely so * that userspace can tell which is which. */ if ((level == 1 && next_ip == lr) || (level <= 1 && !kernel_text_address(next_ip))) next_ip = 0; ++level; } perf_callchain_store(entry, next_ip); if (!valid_next_sp(next_sp, sp)) return; sp = next_sp; } } void perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs) { if (!is_32bit_task()) perf_callchain_user_64(entry, regs); else perf_callchain_user_32(entry, regs); }
linux-master
arch/powerpc/perf/callchain.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Hypervisor supplied "gpci" ("get performance counter info") performance * counter support * * Author: Cody P Schafer <[email protected]> * Copyright 2014 IBM Corporation. */ #define pr_fmt(fmt) "hv-gpci: " fmt #include <linux/init.h> #include <linux/perf_event.h> #include <asm/firmware.h> #include <asm/hvcall.h> #include <asm/io.h> #include "hv-gpci.h" #include "hv-common.h" /* * Example usage: * perf stat -e 'hv_gpci/counter_info_version=3,offset=0,length=8, * secondary_index=0,starting_index=0xffffffff,request=0x10/' ... */ /* u32 */ EVENT_DEFINE_RANGE_FORMAT(request, config, 0, 31); /* u32 */ /* * Note that starting_index, phys_processor_idx, sibling_part_id, * hw_chip_id, partition_id all refer to the same bit range. They * are basically aliases for the starting_index. The specific alias * used depends on the event. See REQUEST_IDX_KIND in hv-gpci-requests.h */ EVENT_DEFINE_RANGE_FORMAT(starting_index, config, 32, 63); EVENT_DEFINE_RANGE_FORMAT_LITE(phys_processor_idx, config, 32, 63); EVENT_DEFINE_RANGE_FORMAT_LITE(sibling_part_id, config, 32, 63); EVENT_DEFINE_RANGE_FORMAT_LITE(hw_chip_id, config, 32, 63); EVENT_DEFINE_RANGE_FORMAT_LITE(partition_id, config, 32, 63); /* u16 */ EVENT_DEFINE_RANGE_FORMAT(secondary_index, config1, 0, 15); /* u8 */ EVENT_DEFINE_RANGE_FORMAT(counter_info_version, config1, 16, 23); /* u8, bytes of data (1-8) */ EVENT_DEFINE_RANGE_FORMAT(length, config1, 24, 31); /* u32, byte offset */ EVENT_DEFINE_RANGE_FORMAT(offset, config1, 32, 63); static cpumask_t hv_gpci_cpumask; static struct attribute *format_attrs[] = { &format_attr_request.attr, &format_attr_starting_index.attr, &format_attr_phys_processor_idx.attr, &format_attr_sibling_part_id.attr, &format_attr_hw_chip_id.attr, &format_attr_partition_id.attr, &format_attr_secondary_index.attr, &format_attr_counter_info_version.attr, &format_attr_offset.attr, &format_attr_length.attr, NULL, }; static const struct attribute_group format_group = { .name = "format", .attrs = format_attrs, }; static struct attribute_group event_group = { .name = "events", /* .attrs is set in init */ }; #define HV_CAPS_ATTR(_name, _format) \ static ssize_t _name##_show(struct device *dev, \ struct device_attribute *attr, \ char *page) \ { \ struct hv_perf_caps caps; \ unsigned long hret = hv_perf_caps_get(&caps); \ if (hret) \ return -EIO; \ \ return sprintf(page, _format, caps._name); \ } \ static struct device_attribute hv_caps_attr_##_name = __ATTR_RO(_name) static ssize_t kernel_version_show(struct device *dev, struct device_attribute *attr, char *page) { return sprintf(page, "0x%x\n", COUNTER_INFO_VERSION_CURRENT); } static ssize_t cpumask_show(struct device *dev, struct device_attribute *attr, char *buf) { return cpumap_print_to_pagebuf(true, buf, &hv_gpci_cpumask); } /* Interface attribute array index to store system information */ #define INTERFACE_PROCESSOR_BUS_TOPOLOGY_ATTR 6 #define INTERFACE_PROCESSOR_CONFIG_ATTR 7 #define INTERFACE_AFFINITY_DOMAIN_VIA_VP_ATTR 8 #define INTERFACE_AFFINITY_DOMAIN_VIA_DOM_ATTR 9 #define INTERFACE_AFFINITY_DOMAIN_VIA_PAR_ATTR 10 #define INTERFACE_NULL_ATTR 11 /* Counter request value to retrieve system information */ enum { PROCESSOR_BUS_TOPOLOGY, PROCESSOR_CONFIG, AFFINITY_DOMAIN_VIA_VP, /* affinity domain via virtual processor */ AFFINITY_DOMAIN_VIA_DOM, /* affinity domain via domain */ AFFINITY_DOMAIN_VIA_PAR, /* affinity domain via partition */ }; static int sysinfo_counter_request[] = { [PROCESSOR_BUS_TOPOLOGY] = 0xD0, [PROCESSOR_CONFIG] = 0x90, [AFFINITY_DOMAIN_VIA_VP] = 0xA0, [AFFINITY_DOMAIN_VIA_DOM] = 0xB0, [AFFINITY_DOMAIN_VIA_PAR] = 0xB1, }; static DEFINE_PER_CPU(char, hv_gpci_reqb[HGPCI_REQ_BUFFER_SIZE]) __aligned(sizeof(uint64_t)); static unsigned long systeminfo_gpci_request(u32 req, u32 starting_index, u16 secondary_index, char *buf, size_t *n, struct hv_gpci_request_buffer *arg) { unsigned long ret; size_t i, j; arg->params.counter_request = cpu_to_be32(req); arg->params.starting_index = cpu_to_be32(starting_index); arg->params.secondary_index = cpu_to_be16(secondary_index); ret = plpar_hcall_norets(H_GET_PERF_COUNTER_INFO, virt_to_phys(arg), HGPCI_REQ_BUFFER_SIZE); /* * ret value as 'H_PARAMETER' corresponds to 'GEN_BUF_TOO_SMALL', * which means that the current buffer size cannot accommodate * all the information and a partial buffer returned. * hcall fails incase of ret value other than H_SUCCESS or H_PARAMETER. * * ret value as H_AUTHORITY implies that partition is not permitted to retrieve * performance information, and required to set * "Enable Performance Information Collection" option. */ if (ret == H_AUTHORITY) return -EPERM; /* * hcall can fail with other possible ret value like H_PRIVILEGE/H_HARDWARE * because of invalid buffer-length/address or due to some hardware * error. */ if (ret && (ret != H_PARAMETER)) return -EIO; /* * hcall H_GET_PERF_COUNTER_INFO populates the 'returned_values' * to show the total number of counter_value array elements * returned via hcall. * hcall also populates 'cv_element_size' corresponds to individual * counter_value array element size. Below loop go through all * counter_value array elements as per their size and add it to * the output buffer. */ for (i = 0; i < be16_to_cpu(arg->params.returned_values); i++) { j = i * be16_to_cpu(arg->params.cv_element_size); for (; j < (i + 1) * be16_to_cpu(arg->params.cv_element_size); j++) *n += sprintf(buf + *n, "%02x", (u8)arg->bytes[j]); *n += sprintf(buf + *n, "\n"); } if (*n >= PAGE_SIZE) { pr_info("System information exceeds PAGE_SIZE\n"); return -EFBIG; } return ret; } static ssize_t processor_bus_topology_show(struct device *dev, struct device_attribute *attr, char *buf) { struct hv_gpci_request_buffer *arg; unsigned long ret; size_t n = 0; arg = (void *)get_cpu_var(hv_gpci_reqb); memset(arg, 0, HGPCI_REQ_BUFFER_SIZE); /* * Pass the counter request value 0xD0 corresponds to request * type 'Processor_bus_topology', to retrieve * the system topology information. * starting_index value implies the starting hardware * chip id. */ ret = systeminfo_gpci_request(sysinfo_counter_request[PROCESSOR_BUS_TOPOLOGY], 0, 0, buf, &n, arg); if (!ret) return n; if (ret != H_PARAMETER) goto out; /* * ret value as 'H_PARAMETER' corresponds to 'GEN_BUF_TOO_SMALL', which * implies that buffer can't accommodate all information, and a partial buffer * returned. To handle that, we need to make subsequent requests * with next starting index to retrieve additional (missing) data. * Below loop do subsequent hcalls with next starting index and add it * to buffer util we get all the information. */ while (ret == H_PARAMETER) { int returned_values = be16_to_cpu(arg->params.returned_values); int elementsize = be16_to_cpu(arg->params.cv_element_size); int last_element = (returned_values - 1) * elementsize; /* * Since the starting index value is part of counter_value * buffer elements, use the starting index value in the last * element and add 1 to make subsequent hcalls. */ u32 starting_index = arg->bytes[last_element + 3] + (arg->bytes[last_element + 2] << 8) + (arg->bytes[last_element + 1] << 16) + (arg->bytes[last_element] << 24) + 1; memset(arg, 0, HGPCI_REQ_BUFFER_SIZE); ret = systeminfo_gpci_request(sysinfo_counter_request[PROCESSOR_BUS_TOPOLOGY], starting_index, 0, buf, &n, arg); if (!ret) return n; if (ret != H_PARAMETER) goto out; } return n; out: put_cpu_var(hv_gpci_reqb); return ret; } static ssize_t processor_config_show(struct device *dev, struct device_attribute *attr, char *buf) { struct hv_gpci_request_buffer *arg; unsigned long ret; size_t n = 0; arg = (void *)get_cpu_var(hv_gpci_reqb); memset(arg, 0, HGPCI_REQ_BUFFER_SIZE); /* * Pass the counter request value 0x90 corresponds to request * type 'Processor_config', to retrieve * the system processor information. * starting_index value implies the starting hardware * processor index. */ ret = systeminfo_gpci_request(sysinfo_counter_request[PROCESSOR_CONFIG], 0, 0, buf, &n, arg); if (!ret) return n; if (ret != H_PARAMETER) goto out; /* * ret value as 'H_PARAMETER' corresponds to 'GEN_BUF_TOO_SMALL', which * implies that buffer can't accommodate all information, and a partial buffer * returned. To handle that, we need to take subsequent requests * with next starting index to retrieve additional (missing) data. * Below loop do subsequent hcalls with next starting index and add it * to buffer util we get all the information. */ while (ret == H_PARAMETER) { int returned_values = be16_to_cpu(arg->params.returned_values); int elementsize = be16_to_cpu(arg->params.cv_element_size); int last_element = (returned_values - 1) * elementsize; /* * Since the starting index is part of counter_value * buffer elements, use the starting index value in the last * element and add 1 to subsequent hcalls. */ u32 starting_index = arg->bytes[last_element + 3] + (arg->bytes[last_element + 2] << 8) + (arg->bytes[last_element + 1] << 16) + (arg->bytes[last_element] << 24) + 1; memset(arg, 0, HGPCI_REQ_BUFFER_SIZE); ret = systeminfo_gpci_request(sysinfo_counter_request[PROCESSOR_CONFIG], starting_index, 0, buf, &n, arg); if (!ret) return n; if (ret != H_PARAMETER) goto out; } return n; out: put_cpu_var(hv_gpci_reqb); return ret; } static ssize_t affinity_domain_via_virtual_processor_show(struct device *dev, struct device_attribute *attr, char *buf) { struct hv_gpci_request_buffer *arg; unsigned long ret; size_t n = 0; arg = (void *)get_cpu_var(hv_gpci_reqb); memset(arg, 0, HGPCI_REQ_BUFFER_SIZE); /* * Pass the counter request 0xA0 corresponds to request * type 'Affinity_domain_information_by_virutal_processor', * to retrieve the system affinity domain information. * starting_index value refers to the starting hardware * processor index. */ ret = systeminfo_gpci_request(sysinfo_counter_request[AFFINITY_DOMAIN_VIA_VP], 0, 0, buf, &n, arg); if (!ret) return n; if (ret != H_PARAMETER) goto out; /* * ret value as 'H_PARAMETER' corresponds to 'GEN_BUF_TOO_SMALL', which * implies that buffer can't accommodate all information, and a partial buffer * returned. To handle that, we need to take subsequent requests * with next secondary index to retrieve additional (missing) data. * Below loop do subsequent hcalls with next secondary index and add it * to buffer util we get all the information. */ while (ret == H_PARAMETER) { int returned_values = be16_to_cpu(arg->params.returned_values); int elementsize = be16_to_cpu(arg->params.cv_element_size); int last_element = (returned_values - 1) * elementsize; /* * Since the starting index and secondary index type is part of the * counter_value buffer elements, use the starting index value in the * last array element as subsequent starting index, and use secondary index * value in the last array element plus 1 as subsequent secondary index. * For counter request '0xA0', starting index points to partition id * and secondary index points to corresponding virtual processor index. */ u32 starting_index = arg->bytes[last_element + 1] + (arg->bytes[last_element] << 8); u16 secondary_index = arg->bytes[last_element + 3] + (arg->bytes[last_element + 2] << 8) + 1; memset(arg, 0, HGPCI_REQ_BUFFER_SIZE); ret = systeminfo_gpci_request(sysinfo_counter_request[AFFINITY_DOMAIN_VIA_VP], starting_index, secondary_index, buf, &n, arg); if (!ret) return n; if (ret != H_PARAMETER) goto out; } return n; out: put_cpu_var(hv_gpci_reqb); return ret; } static ssize_t affinity_domain_via_domain_show(struct device *dev, struct device_attribute *attr, char *buf) { struct hv_gpci_request_buffer *arg; unsigned long ret; size_t n = 0; arg = (void *)get_cpu_var(hv_gpci_reqb); memset(arg, 0, HGPCI_REQ_BUFFER_SIZE); /* * Pass the counter request 0xB0 corresponds to request * type 'Affinity_domain_information_by_domain', * to retrieve the system affinity domain information. * starting_index value refers to the starting hardware * processor index. */ ret = systeminfo_gpci_request(sysinfo_counter_request[AFFINITY_DOMAIN_VIA_DOM], 0, 0, buf, &n, arg); if (!ret) return n; if (ret != H_PARAMETER) goto out; /* * ret value as 'H_PARAMETER' corresponds to 'GEN_BUF_TOO_SMALL', which * implies that buffer can't accommodate all information, and a partial buffer * returned. To handle that, we need to take subsequent requests * with next starting index to retrieve additional (missing) data. * Below loop do subsequent hcalls with next starting index and add it * to buffer util we get all the information. */ while (ret == H_PARAMETER) { int returned_values = be16_to_cpu(arg->params.returned_values); int elementsize = be16_to_cpu(arg->params.cv_element_size); int last_element = (returned_values - 1) * elementsize; /* * Since the starting index value is part of counter_value * buffer elements, use the starting index value in the last * element and add 1 to make subsequent hcalls. */ u32 starting_index = arg->bytes[last_element + 1] + (arg->bytes[last_element] << 8) + 1; memset(arg, 0, HGPCI_REQ_BUFFER_SIZE); ret = systeminfo_gpci_request(sysinfo_counter_request[AFFINITY_DOMAIN_VIA_DOM], starting_index, 0, buf, &n, arg); if (!ret) return n; if (ret != H_PARAMETER) goto out; } return n; out: put_cpu_var(hv_gpci_reqb); return ret; } static void affinity_domain_via_partition_result_parse(int returned_values, int element_size, char *buf, size_t *last_element, size_t *n, struct hv_gpci_request_buffer *arg) { size_t i = 0, j = 0; size_t k, l, m; uint16_t total_affinity_domain_ele, size_of_each_affinity_domain_ele; /* * hcall H_GET_PERF_COUNTER_INFO populates the 'returned_values' * to show the total number of counter_value array elements * returned via hcall. * Unlike other request types, the data structure returned by this * request is variable-size. For this counter request type, * hcall populates 'cv_element_size' corresponds to minimum size of * the structure returned i.e; the size of the structure with no domain * information. Below loop go through all counter_value array * to determine the number and size of each domain array element and * add it to the output buffer. */ while (i < returned_values) { k = j; for (; k < j + element_size; k++) *n += sprintf(buf + *n, "%02x", (u8)arg->bytes[k]); *n += sprintf(buf + *n, "\n"); total_affinity_domain_ele = (u8)arg->bytes[k - 2] << 8 | (u8)arg->bytes[k - 3]; size_of_each_affinity_domain_ele = (u8)arg->bytes[k] << 8 | (u8)arg->bytes[k - 1]; for (l = 0; l < total_affinity_domain_ele; l++) { for (m = 0; m < size_of_each_affinity_domain_ele; m++) { *n += sprintf(buf + *n, "%02x", (u8)arg->bytes[k]); k++; } *n += sprintf(buf + *n, "\n"); } *n += sprintf(buf + *n, "\n"); i++; j = k; } *last_element = k; } static ssize_t affinity_domain_via_partition_show(struct device *dev, struct device_attribute *attr, char *buf) { struct hv_gpci_request_buffer *arg; unsigned long ret; size_t n = 0; size_t last_element = 0; u32 starting_index; arg = (void *)get_cpu_var(hv_gpci_reqb); memset(arg, 0, HGPCI_REQ_BUFFER_SIZE); /* * Pass the counter request value 0xB1 corresponds to counter request * type 'Affinity_domain_information_by_partition', * to retrieve the system affinity domain by partition information. * starting_index value refers to the starting hardware * processor index. */ arg->params.counter_request = cpu_to_be32(sysinfo_counter_request[AFFINITY_DOMAIN_VIA_PAR]); arg->params.starting_index = cpu_to_be32(0); ret = plpar_hcall_norets(H_GET_PERF_COUNTER_INFO, virt_to_phys(arg), HGPCI_REQ_BUFFER_SIZE); if (!ret) goto parse_result; /* * ret value as 'H_PARAMETER' implies that the current buffer size * can't accommodate all the information, and a partial buffer * returned. To handle that, we need to make subsequent requests * with next starting index to retrieve additional (missing) data. * Below loop do subsequent hcalls with next starting index and add it * to buffer util we get all the information. */ while (ret == H_PARAMETER) { affinity_domain_via_partition_result_parse( be16_to_cpu(arg->params.returned_values) - 1, be16_to_cpu(arg->params.cv_element_size), buf, &last_element, &n, arg); if (n >= PAGE_SIZE) { put_cpu_var(hv_gpci_reqb); pr_debug("System information exceeds PAGE_SIZE\n"); return -EFBIG; } /* * Since the starting index value is part of counter_value * buffer elements, use the starting_index value in the last * element and add 1 to make subsequent hcalls. */ starting_index = (u8)arg->bytes[last_element] << 8 | (u8)arg->bytes[last_element + 1]; memset(arg, 0, HGPCI_REQ_BUFFER_SIZE); arg->params.counter_request = cpu_to_be32( sysinfo_counter_request[AFFINITY_DOMAIN_VIA_PAR]); arg->params.starting_index = cpu_to_be32(starting_index); ret = plpar_hcall_norets(H_GET_PERF_COUNTER_INFO, virt_to_phys(arg), HGPCI_REQ_BUFFER_SIZE); if (ret && (ret != H_PARAMETER)) goto out; } parse_result: affinity_domain_via_partition_result_parse( be16_to_cpu(arg->params.returned_values), be16_to_cpu(arg->params.cv_element_size), buf, &last_element, &n, arg); put_cpu_var(hv_gpci_reqb); return n; out: put_cpu_var(hv_gpci_reqb); /* * ret value as 'H_PARAMETER' corresponds to 'GEN_BUF_TOO_SMALL', * which means that the current buffer size cannot accommodate * all the information and a partial buffer returned. * hcall fails incase of ret value other than H_SUCCESS or H_PARAMETER. * * ret value as H_AUTHORITY implies that partition is not permitted to retrieve * performance information, and required to set * "Enable Performance Information Collection" option. */ if (ret == H_AUTHORITY) return -EPERM; /* * hcall can fail with other possible ret value like H_PRIVILEGE/H_HARDWARE * because of invalid buffer-length/address or due to some hardware * error. */ return -EIO; } static DEVICE_ATTR_RO(kernel_version); static DEVICE_ATTR_RO(cpumask); HV_CAPS_ATTR(version, "0x%x\n"); HV_CAPS_ATTR(ga, "%d\n"); HV_CAPS_ATTR(expanded, "%d\n"); HV_CAPS_ATTR(lab, "%d\n"); HV_CAPS_ATTR(collect_privileged, "%d\n"); static struct attribute *interface_attrs[] = { &dev_attr_kernel_version.attr, &hv_caps_attr_version.attr, &hv_caps_attr_ga.attr, &hv_caps_attr_expanded.attr, &hv_caps_attr_lab.attr, &hv_caps_attr_collect_privileged.attr, /* * This NULL is a placeholder for the processor_bus_topology * attribute, set in init function if applicable. */ NULL, /* * This NULL is a placeholder for the processor_config * attribute, set in init function if applicable. */ NULL, /* * This NULL is a placeholder for the affinity_domain_via_virtual_processor * attribute, set in init function if applicable. */ NULL, /* * This NULL is a placeholder for the affinity_domain_via_domain * attribute, set in init function if applicable. */ NULL, /* * This NULL is a placeholder for the affinity_domain_via_partition * attribute, set in init function if applicable. */ NULL, NULL, }; static struct attribute *cpumask_attrs[] = { &dev_attr_cpumask.attr, NULL, }; static const struct attribute_group cpumask_attr_group = { .attrs = cpumask_attrs, }; static const struct attribute_group interface_group = { .name = "interface", .attrs = interface_attrs, }; static const struct attribute_group *attr_groups[] = { &format_group, &event_group, &interface_group, &cpumask_attr_group, NULL, }; static unsigned long single_gpci_request(u32 req, u32 starting_index, u16 secondary_index, u8 version_in, u32 offset, u8 length, u64 *value) { unsigned long ret; size_t i; u64 count; struct hv_gpci_request_buffer *arg; arg = (void *)get_cpu_var(hv_gpci_reqb); memset(arg, 0, HGPCI_REQ_BUFFER_SIZE); arg->params.counter_request = cpu_to_be32(req); arg->params.starting_index = cpu_to_be32(starting_index); arg->params.secondary_index = cpu_to_be16(secondary_index); arg->params.counter_info_version_in = version_in; ret = plpar_hcall_norets(H_GET_PERF_COUNTER_INFO, virt_to_phys(arg), HGPCI_REQ_BUFFER_SIZE); if (ret) { pr_devel("hcall failed: 0x%lx\n", ret); goto out; } /* * we verify offset and length are within the zeroed buffer at event * init. */ count = 0; for (i = offset; i < offset + length; i++) count |= (u64)(arg->bytes[i]) << ((length - 1 - (i - offset)) * 8); *value = count; out: put_cpu_var(hv_gpci_reqb); return ret; } static u64 h_gpci_get_value(struct perf_event *event) { u64 count; unsigned long ret = single_gpci_request(event_get_request(event), event_get_starting_index(event), event_get_secondary_index(event), event_get_counter_info_version(event), event_get_offset(event), event_get_length(event), &count); if (ret) return 0; return count; } static void h_gpci_event_update(struct perf_event *event) { s64 prev; u64 now = h_gpci_get_value(event); prev = local64_xchg(&event->hw.prev_count, now); local64_add(now - prev, &event->count); } static void h_gpci_event_start(struct perf_event *event, int flags) { local64_set(&event->hw.prev_count, h_gpci_get_value(event)); } static void h_gpci_event_stop(struct perf_event *event, int flags) { h_gpci_event_update(event); } static int h_gpci_event_add(struct perf_event *event, int flags) { if (flags & PERF_EF_START) h_gpci_event_start(event, flags); return 0; } static int h_gpci_event_init(struct perf_event *event) { u64 count; u8 length; /* Not our event */ if (event->attr.type != event->pmu->type) return -ENOENT; /* config2 is unused */ if (event->attr.config2) { pr_devel("config2 set when reserved\n"); return -EINVAL; } /* no branch sampling */ if (has_branch_stack(event)) return -EOPNOTSUPP; length = event_get_length(event); if (length < 1 || length > 8) { pr_devel("length invalid\n"); return -EINVAL; } /* last byte within the buffer? */ if ((event_get_offset(event) + length) > HGPCI_MAX_DATA_BYTES) { pr_devel("request outside of buffer: %zu > %zu\n", (size_t)event_get_offset(event) + length, HGPCI_MAX_DATA_BYTES); return -EINVAL; } /* check if the request works... */ if (single_gpci_request(event_get_request(event), event_get_starting_index(event), event_get_secondary_index(event), event_get_counter_info_version(event), event_get_offset(event), length, &count)) { pr_devel("gpci hcall failed\n"); return -EINVAL; } return 0; } static struct pmu h_gpci_pmu = { .task_ctx_nr = perf_invalid_context, .name = "hv_gpci", .attr_groups = attr_groups, .event_init = h_gpci_event_init, .add = h_gpci_event_add, .del = h_gpci_event_stop, .start = h_gpci_event_start, .stop = h_gpci_event_stop, .read = h_gpci_event_update, .capabilities = PERF_PMU_CAP_NO_EXCLUDE, }; static int ppc_hv_gpci_cpu_online(unsigned int cpu) { if (cpumask_empty(&hv_gpci_cpumask)) cpumask_set_cpu(cpu, &hv_gpci_cpumask); return 0; } static int ppc_hv_gpci_cpu_offline(unsigned int cpu) { int target; /* Check if exiting cpu is used for collecting gpci events */ if (!cpumask_test_and_clear_cpu(cpu, &hv_gpci_cpumask)) return 0; /* Find a new cpu to collect gpci events */ target = cpumask_last(cpu_active_mask); if (target < 0 || target >= nr_cpu_ids) { pr_err("hv_gpci: CPU hotplug init failed\n"); return -1; } /* Migrate gpci events to the new target */ cpumask_set_cpu(target, &hv_gpci_cpumask); perf_pmu_migrate_context(&h_gpci_pmu, cpu, target); return 0; } static int hv_gpci_cpu_hotplug_init(void) { return cpuhp_setup_state(CPUHP_AP_PERF_POWERPC_HV_GPCI_ONLINE, "perf/powerpc/hv_gcpi:online", ppc_hv_gpci_cpu_online, ppc_hv_gpci_cpu_offline); } static struct device_attribute *sysinfo_device_attr_create(int sysinfo_interface_group_index, u32 req) { struct device_attribute *attr = NULL; unsigned long ret; struct hv_gpci_request_buffer *arg; if (sysinfo_interface_group_index < INTERFACE_PROCESSOR_BUS_TOPOLOGY_ATTR || sysinfo_interface_group_index >= INTERFACE_NULL_ATTR) { pr_info("Wrong interface group index for system information\n"); return NULL; } /* Check for given counter request value support */ arg = (void *)get_cpu_var(hv_gpci_reqb); memset(arg, 0, HGPCI_REQ_BUFFER_SIZE); arg->params.counter_request = cpu_to_be32(req); ret = plpar_hcall_norets(H_GET_PERF_COUNTER_INFO, virt_to_phys(arg), HGPCI_REQ_BUFFER_SIZE); put_cpu_var(hv_gpci_reqb); /* * Add given counter request value attribute in the interface_attrs * attribute array, only for valid return types. */ if (!ret || ret == H_AUTHORITY || ret == H_PARAMETER) { attr = kzalloc(sizeof(*attr), GFP_KERNEL); if (!attr) return NULL; sysfs_attr_init(&attr->attr); attr->attr.mode = 0444; switch (sysinfo_interface_group_index) { case INTERFACE_PROCESSOR_BUS_TOPOLOGY_ATTR: attr->attr.name = "processor_bus_topology"; attr->show = processor_bus_topology_show; break; case INTERFACE_PROCESSOR_CONFIG_ATTR: attr->attr.name = "processor_config"; attr->show = processor_config_show; break; case INTERFACE_AFFINITY_DOMAIN_VIA_VP_ATTR: attr->attr.name = "affinity_domain_via_virtual_processor"; attr->show = affinity_domain_via_virtual_processor_show; break; case INTERFACE_AFFINITY_DOMAIN_VIA_DOM_ATTR: attr->attr.name = "affinity_domain_via_domain"; attr->show = affinity_domain_via_domain_show; break; case INTERFACE_AFFINITY_DOMAIN_VIA_PAR_ATTR: attr->attr.name = "affinity_domain_via_partition"; attr->show = affinity_domain_via_partition_show; break; } } else pr_devel("hcall failed, with error: 0x%lx\n", ret); return attr; } static void add_sysinfo_interface_files(void) { int sysfs_count; struct device_attribute *attr[INTERFACE_NULL_ATTR - INTERFACE_PROCESSOR_BUS_TOPOLOGY_ATTR]; int i; sysfs_count = INTERFACE_NULL_ATTR - INTERFACE_PROCESSOR_BUS_TOPOLOGY_ATTR; /* Get device attribute for a given counter request value */ for (i = 0; i < sysfs_count; i++) { attr[i] = sysinfo_device_attr_create(i + INTERFACE_PROCESSOR_BUS_TOPOLOGY_ATTR, sysinfo_counter_request[i]); if (!attr[i]) goto out; } /* Add sysinfo interface attributes in the interface_attrs attribute array */ for (i = 0; i < sysfs_count; i++) interface_attrs[i + INTERFACE_PROCESSOR_BUS_TOPOLOGY_ATTR] = &attr[i]->attr; return; out: /* * The sysinfo interface attributes will be added, only if hcall passed for * all the counter request values. Free the device attribute array incase * of any hcall failure. */ if (i > 0) { while (i >= 0) { kfree(attr[i]); i--; } } } static int hv_gpci_init(void) { int r; unsigned long hret; struct hv_perf_caps caps; struct hv_gpci_request_buffer *arg; hv_gpci_assert_offsets_correct(); if (!firmware_has_feature(FW_FEATURE_LPAR)) { pr_debug("not a virtualized system, not enabling\n"); return -ENODEV; } hret = hv_perf_caps_get(&caps); if (hret) { pr_debug("could not obtain capabilities, not enabling, rc=%ld\n", hret); return -ENODEV; } /* init cpuhotplug */ r = hv_gpci_cpu_hotplug_init(); if (r) return r; /* sampling not supported */ h_gpci_pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT; arg = (void *)get_cpu_var(hv_gpci_reqb); memset(arg, 0, HGPCI_REQ_BUFFER_SIZE); /* * hcall H_GET_PERF_COUNTER_INFO populates the output * counter_info_version value based on the system hypervisor. * Pass the counter request 0x10 corresponds to request type * 'Dispatch_timebase_by_processor', to get the supported * counter_info_version. */ arg->params.counter_request = cpu_to_be32(0x10); r = plpar_hcall_norets(H_GET_PERF_COUNTER_INFO, virt_to_phys(arg), HGPCI_REQ_BUFFER_SIZE); if (r) { pr_devel("hcall failed, can't get supported counter_info_version: 0x%x\n", r); arg->params.counter_info_version_out = 0x8; } /* * Use counter_info_version_out value to assign * required hv-gpci event list. */ if (arg->params.counter_info_version_out >= 0x8) event_group.attrs = hv_gpci_event_attrs; else event_group.attrs = hv_gpci_event_attrs_v6; put_cpu_var(hv_gpci_reqb); r = perf_pmu_register(&h_gpci_pmu, h_gpci_pmu.name, -1); if (r) return r; /* sysinfo interface files are only available for power10 and above platforms */ if (PVR_VER(mfspr(SPRN_PVR)) >= PVR_POWER10) add_sysinfo_interface_files(); return 0; } device_initcall(hv_gpci_init);
linux-master
arch/powerpc/perf/hv-gpci.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Performance counter support for POWER5 (not POWER5++) processors. * * Copyright 2009 Paul Mackerras, IBM Corporation. */ #include <linux/kernel.h> #include <linux/perf_event.h> #include <linux/string.h> #include <asm/reg.h> #include <asm/cputable.h> #include "internal.h" /* * Bits in event code for POWER5 (not POWER5++) */ #define PM_PMC_SH 20 /* PMC number (1-based) for direct events */ #define PM_PMC_MSK 0xf #define PM_PMC_MSKS (PM_PMC_MSK << PM_PMC_SH) #define PM_UNIT_SH 16 /* TTMMUX number and setting - unit select */ #define PM_UNIT_MSK 0xf #define PM_BYTE_SH 12 /* Byte number of event bus to use */ #define PM_BYTE_MSK 7 #define PM_GRS_SH 8 /* Storage subsystem mux select */ #define PM_GRS_MSK 7 #define PM_BUSEVENT_MSK 0x80 /* Set if event uses event bus */ #define PM_PMCSEL_MSK 0x7f /* Values in PM_UNIT field */ #define PM_FPU 0 #define PM_ISU0 1 #define PM_IFU 2 #define PM_ISU1 3 #define PM_IDU 4 #define PM_ISU0_ALT 6 #define PM_GRS 7 #define PM_LSU0 8 #define PM_LSU1 0xc #define PM_LASTUNIT 0xc /* * Bits in MMCR1 for POWER5 */ #define MMCR1_TTM0SEL_SH 62 #define MMCR1_TTM1SEL_SH 60 #define MMCR1_TTM2SEL_SH 58 #define MMCR1_TTM3SEL_SH 56 #define MMCR1_TTMSEL_MSK 3 #define MMCR1_TD_CP_DBG0SEL_SH 54 #define MMCR1_TD_CP_DBG1SEL_SH 52 #define MMCR1_TD_CP_DBG2SEL_SH 50 #define MMCR1_TD_CP_DBG3SEL_SH 48 #define MMCR1_GRS_L2SEL_SH 46 #define MMCR1_GRS_L2SEL_MSK 3 #define MMCR1_GRS_L3SEL_SH 44 #define MMCR1_GRS_L3SEL_MSK 3 #define MMCR1_GRS_MCSEL_SH 41 #define MMCR1_GRS_MCSEL_MSK 7 #define MMCR1_GRS_FABSEL_SH 39 #define MMCR1_GRS_FABSEL_MSK 3 #define MMCR1_PMC1_ADDER_SEL_SH 35 #define MMCR1_PMC2_ADDER_SEL_SH 34 #define MMCR1_PMC3_ADDER_SEL_SH 33 #define MMCR1_PMC4_ADDER_SEL_SH 32 #define MMCR1_PMC1SEL_SH 25 #define MMCR1_PMC2SEL_SH 17 #define MMCR1_PMC3SEL_SH 9 #define MMCR1_PMC4SEL_SH 1 #define MMCR1_PMCSEL_SH(n) (MMCR1_PMC1SEL_SH - (n) * 8) #define MMCR1_PMCSEL_MSK 0x7f /* * Layout of constraint bits: * 6666555555555544444444443333333333222222222211111111110000000000 * 3210987654321098765432109876543210987654321098765432109876543210 * <><>[ ><><>< ><> [ >[ >[ >< >< >< >< ><><><><><><> * T0T1 NC G0G1G2 G3 UC PS1PS2 B0 B1 B2 B3 P6P5P4P3P2P1 * * T0 - TTM0 constraint * 54-55: TTM0SEL value (0=FPU, 2=IFU, 3=ISU1) 0xc0_0000_0000_0000 * * T1 - TTM1 constraint * 52-53: TTM1SEL value (0=IDU, 3=GRS) 0x30_0000_0000_0000 * * NC - number of counters * 51: NC error 0x0008_0000_0000_0000 * 48-50: number of events needing PMC1-4 0x0007_0000_0000_0000 * * G0..G3 - GRS mux constraints * 46-47: GRS_L2SEL value * 44-45: GRS_L3SEL value * 41-44: GRS_MCSEL value * 39-40: GRS_FABSEL value * Note that these match up with their bit positions in MMCR1 * * UC - unit constraint: can't have all three of FPU|IFU|ISU1, ISU0, IDU|GRS * 37: UC3 error 0x20_0000_0000 * 36: FPU|IFU|ISU1 events needed 0x10_0000_0000 * 35: ISU0 events needed 0x08_0000_0000 * 34: IDU|GRS events needed 0x04_0000_0000 * * PS1 * 33: PS1 error 0x2_0000_0000 * 31-32: count of events needing PMC1/2 0x1_8000_0000 * * PS2 * 30: PS2 error 0x4000_0000 * 28-29: count of events needing PMC3/4 0x3000_0000 * * B0 * 24-27: Byte 0 event source 0x0f00_0000 * Encoding as for the event code * * B1, B2, B3 * 20-23, 16-19, 12-15: Byte 1, 2, 3 event sources * * P1..P6 * 0-11: Count of events needing PMC1..PMC6 */ static const int grsel_shift[8] = { MMCR1_GRS_L2SEL_SH, MMCR1_GRS_L2SEL_SH, MMCR1_GRS_L2SEL_SH, MMCR1_GRS_L3SEL_SH, MMCR1_GRS_L3SEL_SH, MMCR1_GRS_L3SEL_SH, MMCR1_GRS_MCSEL_SH, MMCR1_GRS_FABSEL_SH }; /* Masks and values for using events from the various units */ static unsigned long unit_cons[PM_LASTUNIT+1][2] = { [PM_FPU] = { 0xc0002000000000ul, 0x00001000000000ul }, [PM_ISU0] = { 0x00002000000000ul, 0x00000800000000ul }, [PM_ISU1] = { 0xc0002000000000ul, 0xc0001000000000ul }, [PM_IFU] = { 0xc0002000000000ul, 0x80001000000000ul }, [PM_IDU] = { 0x30002000000000ul, 0x00000400000000ul }, [PM_GRS] = { 0x30002000000000ul, 0x30000400000000ul }, }; static int power5_get_constraint(u64 event, unsigned long *maskp, unsigned long *valp, u64 event_config1 __maybe_unused) { int pmc, byte, unit, sh; int bit, fmask; unsigned long mask = 0, value = 0; int grp = -1; pmc = (event >> PM_PMC_SH) & PM_PMC_MSK; if (pmc) { if (pmc > 6) return -1; sh = (pmc - 1) * 2; mask |= 2 << sh; value |= 1 << sh; if (pmc <= 4) grp = (pmc - 1) >> 1; else if (event != 0x500009 && event != 0x600005) return -1; } if (event & PM_BUSEVENT_MSK) { unit = (event >> PM_UNIT_SH) & PM_UNIT_MSK; if (unit > PM_LASTUNIT) return -1; if (unit == PM_ISU0_ALT) unit = PM_ISU0; mask |= unit_cons[unit][0]; value |= unit_cons[unit][1]; byte = (event >> PM_BYTE_SH) & PM_BYTE_MSK; if (byte >= 4) { if (unit != PM_LSU1) return -1; /* Map LSU1 low word (bytes 4-7) to unit LSU1+1 */ ++unit; byte &= 3; } if (unit == PM_GRS) { bit = event & 7; fmask = (bit == 6)? 7: 3; sh = grsel_shift[bit]; mask |= (unsigned long)fmask << sh; value |= (unsigned long)((event >> PM_GRS_SH) & fmask) << sh; } /* * Bus events on bytes 0 and 2 can be counted * on PMC1/2; bytes 1 and 3 on PMC3/4. */ if (!pmc) grp = byte & 1; /* Set byte lane select field */ mask |= 0xfUL << (24 - 4 * byte); value |= (unsigned long)unit << (24 - 4 * byte); } if (grp == 0) { /* increment PMC1/2 field */ mask |= 0x200000000ul; value |= 0x080000000ul; } else if (grp == 1) { /* increment PMC3/4 field */ mask |= 0x40000000ul; value |= 0x10000000ul; } if (pmc < 5) { /* need a counter from PMC1-4 set */ mask |= 0x8000000000000ul; value |= 0x1000000000000ul; } *maskp = mask; *valp = value; return 0; } #define MAX_ALT 3 /* at most 3 alternatives for any event */ static const unsigned int event_alternatives[][MAX_ALT] = { { 0x120e4, 0x400002 }, /* PM_GRP_DISP_REJECT */ { 0x410c7, 0x441084 }, /* PM_THRD_L2MISS_BOTH_CYC */ { 0x100005, 0x600005 }, /* PM_RUN_CYC */ { 0x100009, 0x200009, 0x500009 }, /* PM_INST_CMPL */ { 0x300009, 0x400009 }, /* PM_INST_DISP */ }; /* * Scan the alternatives table for a match and return the * index into the alternatives table if found, else -1. */ static int find_alternative(u64 event) { int i, j; for (i = 0; i < ARRAY_SIZE(event_alternatives); ++i) { if (event < event_alternatives[i][0]) break; for (j = 0; j < MAX_ALT && event_alternatives[i][j]; ++j) if (event == event_alternatives[i][j]) return i; } return -1; } static const unsigned char bytedecode_alternatives[4][4] = { /* PMC 1 */ { 0x21, 0x23, 0x25, 0x27 }, /* PMC 2 */ { 0x07, 0x17, 0x0e, 0x1e }, /* PMC 3 */ { 0x20, 0x22, 0x24, 0x26 }, /* PMC 4 */ { 0x07, 0x17, 0x0e, 0x1e } }; /* * Some direct events for decodes of event bus byte 3 have alternative * PMCSEL values on other counters. This returns the alternative * event code for those that do, or -1 otherwise. */ static s64 find_alternative_bdecode(u64 event) { int pmc, altpmc, pp, j; pmc = (event >> PM_PMC_SH) & PM_PMC_MSK; if (pmc == 0 || pmc > 4) return -1; altpmc = 5 - pmc; /* 1 <-> 4, 2 <-> 3 */ pp = event & PM_PMCSEL_MSK; for (j = 0; j < 4; ++j) { if (bytedecode_alternatives[pmc - 1][j] == pp) { return (event & ~(PM_PMC_MSKS | PM_PMCSEL_MSK)) | (altpmc << PM_PMC_SH) | bytedecode_alternatives[altpmc - 1][j]; } } return -1; } static int power5_get_alternatives(u64 event, unsigned int flags, u64 alt[]) { int i, j, nalt = 1; s64 ae; alt[0] = event; nalt = 1; i = find_alternative(event); if (i >= 0) { for (j = 0; j < MAX_ALT; ++j) { ae = event_alternatives[i][j]; if (ae && ae != event) alt[nalt++] = ae; } } else { ae = find_alternative_bdecode(event); if (ae > 0) alt[nalt++] = ae; } return nalt; } /* * Map of which direct events on which PMCs are marked instruction events. * Indexed by PMCSEL value, bit i (LE) set if PMC i is a marked event. * Bit 0 is set if it is marked for all PMCs. * The 0x80 bit indicates a byte decode PMCSEL value. */ static unsigned char direct_event_is_marked[0x28] = { 0, /* 00 */ 0x1f, /* 01 PM_IOPS_CMPL */ 0x2, /* 02 PM_MRK_GRP_DISP */ 0xe, /* 03 PM_MRK_ST_CMPL, PM_MRK_ST_GPS, PM_MRK_ST_CMPL_INT */ 0, /* 04 */ 0x1c, /* 05 PM_MRK_BRU_FIN, PM_MRK_INST_FIN, PM_MRK_CRU_FIN */ 0x80, /* 06 */ 0x80, /* 07 */ 0, 0, 0,/* 08 - 0a */ 0x18, /* 0b PM_THRESH_TIMEO, PM_MRK_GRP_TIMEO */ 0, /* 0c */ 0x80, /* 0d */ 0x80, /* 0e */ 0, /* 0f */ 0, /* 10 */ 0x14, /* 11 PM_MRK_GRP_BR_REDIR, PM_MRK_GRP_IC_MISS */ 0, /* 12 */ 0x10, /* 13 PM_MRK_GRP_CMPL */ 0x1f, /* 14 PM_GRP_MRK, PM_MRK_{FXU,FPU,LSU}_FIN */ 0x2, /* 15 PM_MRK_GRP_ISSUED */ 0x80, /* 16 */ 0x80, /* 17 */ 0, 0, 0, 0, 0, 0x80, /* 1d */ 0x80, /* 1e */ 0, /* 1f */ 0x80, /* 20 */ 0x80, /* 21 */ 0x80, /* 22 */ 0x80, /* 23 */ 0x80, /* 24 */ 0x80, /* 25 */ 0x80, /* 26 */ 0x80, /* 27 */ }; /* * Returns 1 if event counts things relating to marked instructions * and thus needs the MMCRA_SAMPLE_ENABLE bit set, or 0 if not. */ static int power5_marked_instr_event(u64 event) { int pmc, psel; int bit, byte, unit; u32 mask; pmc = (event >> PM_PMC_SH) & PM_PMC_MSK; psel = event & PM_PMCSEL_MSK; if (pmc >= 5) return 0; bit = -1; if (psel < sizeof(direct_event_is_marked)) { if (direct_event_is_marked[psel] & (1 << pmc)) return 1; if (direct_event_is_marked[psel] & 0x80) bit = 4; else if (psel == 0x08) bit = pmc - 1; else if (psel == 0x10) bit = 4 - pmc; else if (psel == 0x1b && (pmc == 1 || pmc == 3)) bit = 4; } else if ((psel & 0x58) == 0x40) bit = psel & 7; if (!(event & PM_BUSEVENT_MSK)) return 0; byte = (event >> PM_BYTE_SH) & PM_BYTE_MSK; unit = (event >> PM_UNIT_SH) & PM_UNIT_MSK; if (unit == PM_LSU0) { /* byte 1 bits 0-7, byte 2 bits 0,2-4,6 */ mask = 0x5dff00; } else if (unit == PM_LSU1 && byte >= 4) { byte -= 4; /* byte 4 bits 1,3,5,7, byte 5 bits 6-7, byte 7 bits 0-4,6 */ mask = 0x5f00c0aa; } else return 0; return (mask >> (byte * 8 + bit)) & 1; } static int power5_compute_mmcr(u64 event[], int n_ev, unsigned int hwc[], struct mmcr_regs *mmcr, struct perf_event *pevents[], u32 flags __maybe_unused) { unsigned long mmcr1 = 0; unsigned long mmcra = MMCRA_SDAR_DCACHE_MISS | MMCRA_SDAR_ERAT_MISS; unsigned int pmc, unit, byte, psel; unsigned int ttm, grp; int i, isbus, bit, grsel; unsigned int pmc_inuse = 0; unsigned int pmc_grp_use[2]; unsigned char busbyte[4]; unsigned char unituse[16]; int ttmuse; if (n_ev > 6) return -1; /* First pass to count resource use */ pmc_grp_use[0] = pmc_grp_use[1] = 0; memset(busbyte, 0, sizeof(busbyte)); memset(unituse, 0, sizeof(unituse)); for (i = 0; i < n_ev; ++i) { pmc = (event[i] >> PM_PMC_SH) & PM_PMC_MSK; if (pmc) { if (pmc > 6) return -1; if (pmc_inuse & (1 << (pmc - 1))) return -1; pmc_inuse |= 1 << (pmc - 1); /* count 1/2 vs 3/4 use */ if (pmc <= 4) ++pmc_grp_use[(pmc - 1) >> 1]; } if (event[i] & PM_BUSEVENT_MSK) { unit = (event[i] >> PM_UNIT_SH) & PM_UNIT_MSK; byte = (event[i] >> PM_BYTE_SH) & PM_BYTE_MSK; if (unit > PM_LASTUNIT) return -1; if (unit == PM_ISU0_ALT) unit = PM_ISU0; if (byte >= 4) { if (unit != PM_LSU1) return -1; ++unit; byte &= 3; } if (!pmc) ++pmc_grp_use[byte & 1]; if (busbyte[byte] && busbyte[byte] != unit) return -1; busbyte[byte] = unit; unituse[unit] = 1; } } if (pmc_grp_use[0] > 2 || pmc_grp_use[1] > 2) return -1; /* * Assign resources and set multiplexer selects. * * PM_ISU0 can go either on TTM0 or TTM1, but that's the only * choice we have to deal with. */ if (unituse[PM_ISU0] & (unituse[PM_FPU] | unituse[PM_IFU] | unituse[PM_ISU1])) { unituse[PM_ISU0_ALT] = 1; /* move ISU to TTM1 */ unituse[PM_ISU0] = 0; } /* Set TTM[01]SEL fields. */ ttmuse = 0; for (i = PM_FPU; i <= PM_ISU1; ++i) { if (!unituse[i]) continue; if (ttmuse++) return -1; mmcr1 |= (unsigned long)i << MMCR1_TTM0SEL_SH; } ttmuse = 0; for (; i <= PM_GRS; ++i) { if (!unituse[i]) continue; if (ttmuse++) return -1; mmcr1 |= (unsigned long)(i & 3) << MMCR1_TTM1SEL_SH; } if (ttmuse > 1) return -1; /* Set byte lane select fields, TTM[23]SEL and GRS_*SEL. */ for (byte = 0; byte < 4; ++byte) { unit = busbyte[byte]; if (!unit) continue; if (unit == PM_ISU0 && unituse[PM_ISU0_ALT]) { /* get ISU0 through TTM1 rather than TTM0 */ unit = PM_ISU0_ALT; } else if (unit == PM_LSU1 + 1) { /* select lower word of LSU1 for this byte */ mmcr1 |= 1ul << (MMCR1_TTM3SEL_SH + 3 - byte); } ttm = unit >> 2; mmcr1 |= (unsigned long)ttm << (MMCR1_TD_CP_DBG0SEL_SH - 2 * byte); } /* Second pass: assign PMCs, set PMCxSEL and PMCx_ADDER_SEL fields */ for (i = 0; i < n_ev; ++i) { pmc = (event[i] >> PM_PMC_SH) & PM_PMC_MSK; unit = (event[i] >> PM_UNIT_SH) & PM_UNIT_MSK; byte = (event[i] >> PM_BYTE_SH) & PM_BYTE_MSK; psel = event[i] & PM_PMCSEL_MSK; isbus = event[i] & PM_BUSEVENT_MSK; if (!pmc) { /* Bus event or any-PMC direct event */ for (pmc = 0; pmc < 4; ++pmc) { if (pmc_inuse & (1 << pmc)) continue; grp = (pmc >> 1) & 1; if (isbus) { if (grp == (byte & 1)) break; } else if (pmc_grp_use[grp] < 2) { ++pmc_grp_use[grp]; break; } } pmc_inuse |= 1 << pmc; } else if (pmc <= 4) { /* Direct event */ --pmc; if ((psel == 8 || psel == 0x10) && isbus && (byte & 2)) /* add events on higher-numbered bus */ mmcr1 |= 1ul << (MMCR1_PMC1_ADDER_SEL_SH - pmc); } else { /* Instructions or run cycles on PMC5/6 */ --pmc; } if (isbus && unit == PM_GRS) { bit = psel & 7; grsel = (event[i] >> PM_GRS_SH) & PM_GRS_MSK; mmcr1 |= (unsigned long)grsel << grsel_shift[bit]; } if (power5_marked_instr_event(event[i])) mmcra |= MMCRA_SAMPLE_ENABLE; if (pmc <= 3) mmcr1 |= psel << MMCR1_PMCSEL_SH(pmc); hwc[i] = pmc; } /* Return MMCRx values */ mmcr->mmcr0 = 0; if (pmc_inuse & 1) mmcr->mmcr0 = MMCR0_PMC1CE; if (pmc_inuse & 0x3e) mmcr->mmcr0 |= MMCR0_PMCjCE; mmcr->mmcr1 = mmcr1; mmcr->mmcra = mmcra; return 0; } static void power5_disable_pmc(unsigned int pmc, struct mmcr_regs *mmcr) { if (pmc <= 3) mmcr->mmcr1 &= ~(0x7fUL << MMCR1_PMCSEL_SH(pmc)); } static int power5_generic_events[] = { [PERF_COUNT_HW_CPU_CYCLES] = 0xf, [PERF_COUNT_HW_INSTRUCTIONS] = 0x100009, [PERF_COUNT_HW_CACHE_REFERENCES] = 0x4c1090, /* LD_REF_L1 */ [PERF_COUNT_HW_CACHE_MISSES] = 0x3c1088, /* LD_MISS_L1 */ [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x230e4, /* BR_ISSUED */ [PERF_COUNT_HW_BRANCH_MISSES] = 0x230e5, /* BR_MPRED_CR */ }; #define C(x) PERF_COUNT_HW_CACHE_##x /* * Table of generalized cache-related events. * 0 means not supported, -1 means nonsensical, other values * are event codes. */ static u64 power5_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = { [C(L1D)] = { /* RESULT_ACCESS RESULT_MISS */ [C(OP_READ)] = { 0x4c1090, 0x3c1088 }, [C(OP_WRITE)] = { 0x3c1090, 0xc10c3 }, [C(OP_PREFETCH)] = { 0xc70e7, 0 }, }, [C(L1I)] = { /* RESULT_ACCESS RESULT_MISS */ [C(OP_READ)] = { 0, 0 }, [C(OP_WRITE)] = { -1, -1 }, [C(OP_PREFETCH)] = { 0, 0 }, }, [C(LL)] = { /* RESULT_ACCESS RESULT_MISS */ [C(OP_READ)] = { 0, 0x3c309b }, [C(OP_WRITE)] = { 0, 0 }, [C(OP_PREFETCH)] = { 0xc50c3, 0 }, }, [C(DTLB)] = { /* RESULT_ACCESS RESULT_MISS */ [C(OP_READ)] = { 0x2c4090, 0x800c4 }, [C(OP_WRITE)] = { -1, -1 }, [C(OP_PREFETCH)] = { -1, -1 }, }, [C(ITLB)] = { /* RESULT_ACCESS RESULT_MISS */ [C(OP_READ)] = { 0, 0x800c0 }, [C(OP_WRITE)] = { -1, -1 }, [C(OP_PREFETCH)] = { -1, -1 }, }, [C(BPU)] = { /* RESULT_ACCESS RESULT_MISS */ [C(OP_READ)] = { 0x230e4, 0x230e5 }, [C(OP_WRITE)] = { -1, -1 }, [C(OP_PREFETCH)] = { -1, -1 }, }, [C(NODE)] = { /* RESULT_ACCESS RESULT_MISS */ [C(OP_READ)] = { -1, -1 }, [C(OP_WRITE)] = { -1, -1 }, [C(OP_PREFETCH)] = { -1, -1 }, }, }; static struct power_pmu power5_pmu = { .name = "POWER5", .n_counter = 6, .max_alternatives = MAX_ALT, .add_fields = 0x7000090000555ul, .test_adder = 0x3000490000000ul, .compute_mmcr = power5_compute_mmcr, .get_constraint = power5_get_constraint, .get_alternatives = power5_get_alternatives, .disable_pmc = power5_disable_pmc, .n_generic = ARRAY_SIZE(power5_generic_events), .generic_events = power5_generic_events, .cache_events = &power5_cache_events, .flags = PPMU_HAS_SSLOT, }; int __init init_power5_pmu(void) { unsigned int pvr = mfspr(SPRN_PVR); if (PVR_VER(pvr) != PVR_POWER5) return -ENODEV; return register_power_pmu(&power5_pmu); }
linux-master
arch/powerpc/perf/power5-pmu.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Performance counter support for POWER7 processors. * * Copyright 2009 Paul Mackerras, IBM Corporation. */ #include <linux/kernel.h> #include <linux/perf_event.h> #include <linux/string.h> #include <asm/reg.h> #include <asm/cputable.h> #include "internal.h" /* * Bits in event code for POWER7 */ #define PM_PMC_SH 16 /* PMC number (1-based) for direct events */ #define PM_PMC_MSK 0xf #define PM_PMC_MSKS (PM_PMC_MSK << PM_PMC_SH) #define PM_UNIT_SH 12 /* TTMMUX number and setting - unit select */ #define PM_UNIT_MSK 0xf #define PM_COMBINE_SH 11 /* Combined event bit */ #define PM_COMBINE_MSK 1 #define PM_COMBINE_MSKS 0x800 #define PM_L2SEL_SH 8 /* L2 event select */ #define PM_L2SEL_MSK 7 #define PM_PMCSEL_MSK 0xff /* * Bits in MMCR1 for POWER7 */ #define MMCR1_TTM0SEL_SH 60 #define MMCR1_TTM1SEL_SH 56 #define MMCR1_TTM2SEL_SH 52 #define MMCR1_TTM3SEL_SH 48 #define MMCR1_TTMSEL_MSK 0xf #define MMCR1_L2SEL_SH 45 #define MMCR1_L2SEL_MSK 7 #define MMCR1_PMC1_COMBINE_SH 35 #define MMCR1_PMC2_COMBINE_SH 34 #define MMCR1_PMC3_COMBINE_SH 33 #define MMCR1_PMC4_COMBINE_SH 32 #define MMCR1_PMC1SEL_SH 24 #define MMCR1_PMC2SEL_SH 16 #define MMCR1_PMC3SEL_SH 8 #define MMCR1_PMC4SEL_SH 0 #define MMCR1_PMCSEL_SH(n) (MMCR1_PMC1SEL_SH - (n) * 8) #define MMCR1_PMCSEL_MSK 0xff /* * Power7 event codes. */ #define EVENT(_name, _code) \ _name = _code, enum { #include "power7-events-list.h" }; #undef EVENT /* * Layout of constraint bits: * 6666555555555544444444443333333333222222222211111111110000000000 * 3210987654321098765432109876543210987654321098765432109876543210 * < >< ><><><><><><> * L2 NC P6P5P4P3P2P1 * * L2 - 16-18 - Required L2SEL value (select field) * * NC - number of counters * 15: NC error 0x8000 * 12-14: number of events needing PMC1-4 0x7000 * * P6 * 11: P6 error 0x800 * 10-11: Count of events needing PMC6 * * P1..P5 * 0-9: Count of events needing PMC1..PMC5 */ static int power7_get_constraint(u64 event, unsigned long *maskp, unsigned long *valp, u64 event_config1 __maybe_unused) { int pmc, sh, unit; unsigned long mask = 0, value = 0; pmc = (event >> PM_PMC_SH) & PM_PMC_MSK; if (pmc) { if (pmc > 6) return -1; sh = (pmc - 1) * 2; mask |= 2 << sh; value |= 1 << sh; if (pmc >= 5 && !(event == 0x500fa || event == 0x600f4)) return -1; } if (pmc < 5) { /* need a counter from PMC1-4 set */ mask |= 0x8000; value |= 0x1000; } unit = (event >> PM_UNIT_SH) & PM_UNIT_MSK; if (unit == 6) { /* L2SEL must be identical across events */ int l2sel = (event >> PM_L2SEL_SH) & PM_L2SEL_MSK; mask |= 0x7 << 16; value |= l2sel << 16; } *maskp = mask; *valp = value; return 0; } #define MAX_ALT 2 /* at most 2 alternatives for any event */ static const unsigned int event_alternatives[][MAX_ALT] = { { 0x200f2, 0x300f2 }, /* PM_INST_DISP */ { 0x200f4, 0x600f4 }, /* PM_RUN_CYC */ { 0x400fa, 0x500fa }, /* PM_RUN_INST_CMPL */ }; /* * Scan the alternatives table for a match and return the * index into the alternatives table if found, else -1. */ static int find_alternative(u64 event) { int i, j; for (i = 0; i < ARRAY_SIZE(event_alternatives); ++i) { if (event < event_alternatives[i][0]) break; for (j = 0; j < MAX_ALT && event_alternatives[i][j]; ++j) if (event == event_alternatives[i][j]) return i; } return -1; } static s64 find_alternative_decode(u64 event) { int pmc, psel; /* this only handles the 4x decode events */ pmc = (event >> PM_PMC_SH) & PM_PMC_MSK; psel = event & PM_PMCSEL_MSK; if ((pmc == 2 || pmc == 4) && (psel & ~7) == 0x40) return event - (1 << PM_PMC_SH) + 8; if ((pmc == 1 || pmc == 3) && (psel & ~7) == 0x48) return event + (1 << PM_PMC_SH) - 8; return -1; } static int power7_get_alternatives(u64 event, unsigned int flags, u64 alt[]) { int i, j, nalt = 1; s64 ae; alt[0] = event; nalt = 1; i = find_alternative(event); if (i >= 0) { for (j = 0; j < MAX_ALT; ++j) { ae = event_alternatives[i][j]; if (ae && ae != event) alt[nalt++] = ae; } } else { ae = find_alternative_decode(event); if (ae > 0) alt[nalt++] = ae; } if (flags & PPMU_ONLY_COUNT_RUN) { /* * We're only counting in RUN state, * so PM_CYC is equivalent to PM_RUN_CYC * and PM_INST_CMPL === PM_RUN_INST_CMPL. * This doesn't include alternatives that don't provide * any extra flexibility in assigning PMCs. */ j = nalt; for (i = 0; i < nalt; ++i) { switch (alt[i]) { case 0x1e: /* PM_CYC */ alt[j++] = 0x600f4; /* PM_RUN_CYC */ break; case 0x600f4: /* PM_RUN_CYC */ alt[j++] = 0x1e; break; case 0x2: /* PM_PPC_CMPL */ alt[j++] = 0x500fa; /* PM_RUN_INST_CMPL */ break; case 0x500fa: /* PM_RUN_INST_CMPL */ alt[j++] = 0x2; /* PM_PPC_CMPL */ break; } } nalt = j; } return nalt; } /* * Returns 1 if event counts things relating to marked instructions * and thus needs the MMCRA_SAMPLE_ENABLE bit set, or 0 if not. */ static int power7_marked_instr_event(u64 event) { int pmc, psel; int unit; pmc = (event >> PM_PMC_SH) & PM_PMC_MSK; unit = (event >> PM_UNIT_SH) & PM_UNIT_MSK; psel = event & PM_PMCSEL_MSK & ~1; /* trim off edge/level bit */ if (pmc >= 5) return 0; switch (psel >> 4) { case 2: return pmc == 2 || pmc == 4; case 3: if (psel == 0x3c) return pmc == 1; if (psel == 0x3e) return pmc != 2; return 1; case 4: case 5: return unit == 0xd; case 6: if (psel == 0x64) return pmc >= 3; break; case 8: return unit == 0xd; } return 0; } static int power7_compute_mmcr(u64 event[], int n_ev, unsigned int hwc[], struct mmcr_regs *mmcr, struct perf_event *pevents[], u32 flags __maybe_unused) { unsigned long mmcr1 = 0; unsigned long mmcra = MMCRA_SDAR_DCACHE_MISS | MMCRA_SDAR_ERAT_MISS; unsigned int pmc, unit, combine, l2sel, psel; unsigned int pmc_inuse = 0; int i; /* First pass to count resource use */ for (i = 0; i < n_ev; ++i) { pmc = (event[i] >> PM_PMC_SH) & PM_PMC_MSK; if (pmc) { if (pmc > 6) return -1; if (pmc_inuse & (1 << (pmc - 1))) return -1; pmc_inuse |= 1 << (pmc - 1); } } /* Second pass: assign PMCs, set all MMCR1 fields */ for (i = 0; i < n_ev; ++i) { pmc = (event[i] >> PM_PMC_SH) & PM_PMC_MSK; unit = (event[i] >> PM_UNIT_SH) & PM_UNIT_MSK; combine = (event[i] >> PM_COMBINE_SH) & PM_COMBINE_MSK; l2sel = (event[i] >> PM_L2SEL_SH) & PM_L2SEL_MSK; psel = event[i] & PM_PMCSEL_MSK; if (!pmc) { /* Bus event or any-PMC direct event */ for (pmc = 0; pmc < 4; ++pmc) { if (!(pmc_inuse & (1 << pmc))) break; } if (pmc >= 4) return -1; pmc_inuse |= 1 << pmc; } else { /* Direct or decoded event */ --pmc; } if (pmc <= 3) { mmcr1 |= (unsigned long) unit << (MMCR1_TTM0SEL_SH - 4 * pmc); mmcr1 |= (unsigned long) combine << (MMCR1_PMC1_COMBINE_SH - pmc); mmcr1 |= psel << MMCR1_PMCSEL_SH(pmc); if (unit == 6) /* L2 events */ mmcr1 |= (unsigned long) l2sel << MMCR1_L2SEL_SH; } if (power7_marked_instr_event(event[i])) mmcra |= MMCRA_SAMPLE_ENABLE; hwc[i] = pmc; } /* Return MMCRx values */ mmcr->mmcr0 = 0; if (pmc_inuse & 1) mmcr->mmcr0 = MMCR0_PMC1CE; if (pmc_inuse & 0x3e) mmcr->mmcr0 |= MMCR0_PMCjCE; mmcr->mmcr1 = mmcr1; mmcr->mmcra = mmcra; return 0; } static void power7_disable_pmc(unsigned int pmc, struct mmcr_regs *mmcr) { if (pmc <= 3) mmcr->mmcr1 &= ~(0xffUL << MMCR1_PMCSEL_SH(pmc)); } static int power7_generic_events[] = { [PERF_COUNT_HW_CPU_CYCLES] = PM_CYC, [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = PM_GCT_NOSLOT_CYC, [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = PM_CMPLU_STALL, [PERF_COUNT_HW_INSTRUCTIONS] = PM_INST_CMPL, [PERF_COUNT_HW_CACHE_REFERENCES] = PM_LD_REF_L1, [PERF_COUNT_HW_CACHE_MISSES] = PM_LD_MISS_L1, [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = PM_BRU_FIN, [PERF_COUNT_HW_BRANCH_MISSES] = PM_BR_MPRED, }; #define C(x) PERF_COUNT_HW_CACHE_##x /* * Table of generalized cache-related events. * 0 means not supported, -1 means nonsensical, other values * are event codes. */ static u64 power7_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = { [C(L1D)] = { /* RESULT_ACCESS RESULT_MISS */ [C(OP_READ)] = { 0xc880, 0x400f0 }, [C(OP_WRITE)] = { 0, 0x300f0 }, [C(OP_PREFETCH)] = { 0xd8b8, 0 }, }, [C(L1I)] = { /* RESULT_ACCESS RESULT_MISS */ [C(OP_READ)] = { 0, 0x200fc }, [C(OP_WRITE)] = { -1, -1 }, [C(OP_PREFETCH)] = { 0x408a, 0 }, }, [C(LL)] = { /* RESULT_ACCESS RESULT_MISS */ [C(OP_READ)] = { 0x16080, 0x26080 }, [C(OP_WRITE)] = { 0x16082, 0x26082 }, [C(OP_PREFETCH)] = { 0, 0 }, }, [C(DTLB)] = { /* RESULT_ACCESS RESULT_MISS */ [C(OP_READ)] = { 0, 0x300fc }, [C(OP_WRITE)] = { -1, -1 }, [C(OP_PREFETCH)] = { -1, -1 }, }, [C(ITLB)] = { /* RESULT_ACCESS RESULT_MISS */ [C(OP_READ)] = { 0, 0x400fc }, [C(OP_WRITE)] = { -1, -1 }, [C(OP_PREFETCH)] = { -1, -1 }, }, [C(BPU)] = { /* RESULT_ACCESS RESULT_MISS */ [C(OP_READ)] = { 0x10068, 0x400f6 }, [C(OP_WRITE)] = { -1, -1 }, [C(OP_PREFETCH)] = { -1, -1 }, }, [C(NODE)] = { /* RESULT_ACCESS RESULT_MISS */ [C(OP_READ)] = { -1, -1 }, [C(OP_WRITE)] = { -1, -1 }, [C(OP_PREFETCH)] = { -1, -1 }, }, }; GENERIC_EVENT_ATTR(cpu-cycles, PM_CYC); GENERIC_EVENT_ATTR(stalled-cycles-frontend, PM_GCT_NOSLOT_CYC); GENERIC_EVENT_ATTR(stalled-cycles-backend, PM_CMPLU_STALL); GENERIC_EVENT_ATTR(instructions, PM_INST_CMPL); GENERIC_EVENT_ATTR(cache-references, PM_LD_REF_L1); GENERIC_EVENT_ATTR(cache-misses, PM_LD_MISS_L1); GENERIC_EVENT_ATTR(branch-instructions, PM_BRU_FIN); GENERIC_EVENT_ATTR(branch-misses, PM_BR_MPRED); #define EVENT(_name, _code) POWER_EVENT_ATTR(_name, _name); #include "power7-events-list.h" #undef EVENT #define EVENT(_name, _code) POWER_EVENT_PTR(_name), static struct attribute *power7_events_attr[] = { GENERIC_EVENT_PTR(PM_CYC), GENERIC_EVENT_PTR(PM_GCT_NOSLOT_CYC), GENERIC_EVENT_PTR(PM_CMPLU_STALL), GENERIC_EVENT_PTR(PM_INST_CMPL), GENERIC_EVENT_PTR(PM_LD_REF_L1), GENERIC_EVENT_PTR(PM_LD_MISS_L1), GENERIC_EVENT_PTR(PM_BRU_FIN), GENERIC_EVENT_PTR(PM_BR_MPRED), #include "power7-events-list.h" #undef EVENT NULL }; static const struct attribute_group power7_pmu_events_group = { .name = "events", .attrs = power7_events_attr, }; PMU_FORMAT_ATTR(event, "config:0-19"); static struct attribute *power7_pmu_format_attr[] = { &format_attr_event.attr, NULL, }; static const struct attribute_group power7_pmu_format_group = { .name = "format", .attrs = power7_pmu_format_attr, }; static const struct attribute_group *power7_pmu_attr_groups[] = { &power7_pmu_format_group, &power7_pmu_events_group, NULL, }; static struct power_pmu power7_pmu = { .name = "POWER7", .n_counter = 6, .max_alternatives = MAX_ALT + 1, .add_fields = 0x1555ul, .test_adder = 0x3000ul, .compute_mmcr = power7_compute_mmcr, .get_constraint = power7_get_constraint, .get_alternatives = power7_get_alternatives, .disable_pmc = power7_disable_pmc, .flags = PPMU_ALT_SIPR, .attr_groups = power7_pmu_attr_groups, .n_generic = ARRAY_SIZE(power7_generic_events), .generic_events = power7_generic_events, .cache_events = &power7_cache_events, }; int __init init_power7_pmu(void) { unsigned int pvr = mfspr(SPRN_PVR); if (PVR_VER(pvr) != PVR_POWER7 && PVR_VER(pvr) != PVR_POWER7p) return -ENODEV; if (PVR_VER(pvr) == PVR_POWER7p) power7_pmu.flags |= PPMU_SIAR_VALID; return register_power_pmu(&power7_pmu); }
linux-master
arch/powerpc/perf/power7-pmu.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Performance event support - PPC 8xx * * Copyright 2016 Christophe Leroy, CS Systemes d'Information */ #include <linux/kernel.h> #include <linux/sched.h> #include <linux/perf_event.h> #include <linux/percpu.h> #include <linux/hardirq.h> #include <asm/pmc.h> #include <asm/machdep.h> #include <asm/firmware.h> #include <asm/ptrace.h> #include <asm/code-patching.h> #include <asm/inst.h> #define PERF_8xx_ID_CPU_CYCLES 1 #define PERF_8xx_ID_HW_INSTRUCTIONS 2 #define PERF_8xx_ID_ITLB_LOAD_MISS 3 #define PERF_8xx_ID_DTLB_LOAD_MISS 4 #define C(x) PERF_COUNT_HW_CACHE_##x #define DTLB_LOAD_MISS (C(DTLB) | (C(OP_READ) << 8) | (C(RESULT_MISS) << 16)) #define ITLB_LOAD_MISS (C(ITLB) | (C(OP_READ) << 8) | (C(RESULT_MISS) << 16)) extern unsigned long itlb_miss_counter, dtlb_miss_counter; extern atomic_t instruction_counter; static atomic_t insn_ctr_ref; static atomic_t itlb_miss_ref; static atomic_t dtlb_miss_ref; static s64 get_insn_ctr(void) { int ctr; unsigned long counta; do { ctr = atomic_read(&instruction_counter); counta = mfspr(SPRN_COUNTA); } while (ctr != atomic_read(&instruction_counter)); return ((s64)ctr << 16) | (counta >> 16); } static int event_type(struct perf_event *event) { switch (event->attr.type) { case PERF_TYPE_HARDWARE: if (event->attr.config == PERF_COUNT_HW_CPU_CYCLES) return PERF_8xx_ID_CPU_CYCLES; if (event->attr.config == PERF_COUNT_HW_INSTRUCTIONS) return PERF_8xx_ID_HW_INSTRUCTIONS; break; case PERF_TYPE_HW_CACHE: if (event->attr.config == ITLB_LOAD_MISS) return PERF_8xx_ID_ITLB_LOAD_MISS; if (event->attr.config == DTLB_LOAD_MISS) return PERF_8xx_ID_DTLB_LOAD_MISS; break; case PERF_TYPE_RAW: break; default: return -ENOENT; } return -EOPNOTSUPP; } static int mpc8xx_pmu_event_init(struct perf_event *event) { int type = event_type(event); if (type < 0) return type; return 0; } static int mpc8xx_pmu_add(struct perf_event *event, int flags) { int type = event_type(event); s64 val = 0; if (type < 0) return type; switch (type) { case PERF_8xx_ID_CPU_CYCLES: val = get_tb(); break; case PERF_8xx_ID_HW_INSTRUCTIONS: if (atomic_inc_return(&insn_ctr_ref) == 1) mtspr(SPRN_ICTRL, 0xc0080007); val = get_insn_ctr(); break; case PERF_8xx_ID_ITLB_LOAD_MISS: if (atomic_inc_return(&itlb_miss_ref) == 1) { unsigned long target = patch_site_addr(&patch__itlbmiss_perf); patch_branch_site(&patch__itlbmiss_exit_1, target, 0); } val = itlb_miss_counter; break; case PERF_8xx_ID_DTLB_LOAD_MISS: if (atomic_inc_return(&dtlb_miss_ref) == 1) { unsigned long target = patch_site_addr(&patch__dtlbmiss_perf); patch_branch_site(&patch__dtlbmiss_exit_1, target, 0); } val = dtlb_miss_counter; break; } local64_set(&event->hw.prev_count, val); return 0; } static void mpc8xx_pmu_read(struct perf_event *event) { int type = event_type(event); s64 prev, val = 0, delta = 0; if (type < 0) return; do { prev = local64_read(&event->hw.prev_count); switch (type) { case PERF_8xx_ID_CPU_CYCLES: val = get_tb(); delta = 16 * (val - prev); break; case PERF_8xx_ID_HW_INSTRUCTIONS: val = get_insn_ctr(); delta = prev - val; if (delta < 0) delta += 0x1000000000000LL; break; case PERF_8xx_ID_ITLB_LOAD_MISS: val = itlb_miss_counter; delta = (s64)((s32)val - (s32)prev); break; case PERF_8xx_ID_DTLB_LOAD_MISS: val = dtlb_miss_counter; delta = (s64)((s32)val - (s32)prev); break; } } while (local64_cmpxchg(&event->hw.prev_count, prev, val) != prev); local64_add(delta, &event->count); } static void mpc8xx_pmu_del(struct perf_event *event, int flags) { ppc_inst_t insn = ppc_inst(PPC_RAW_MFSPR(10, SPRN_SPRG_SCRATCH2)); mpc8xx_pmu_read(event); /* If it was the last user, stop counting to avoid useless overhead */ switch (event_type(event)) { case PERF_8xx_ID_CPU_CYCLES: break; case PERF_8xx_ID_HW_INSTRUCTIONS: if (atomic_dec_return(&insn_ctr_ref) == 0) mtspr(SPRN_ICTRL, 7); break; case PERF_8xx_ID_ITLB_LOAD_MISS: if (atomic_dec_return(&itlb_miss_ref) == 0) patch_instruction_site(&patch__itlbmiss_exit_1, insn); break; case PERF_8xx_ID_DTLB_LOAD_MISS: if (atomic_dec_return(&dtlb_miss_ref) == 0) patch_instruction_site(&patch__dtlbmiss_exit_1, insn); break; } } static struct pmu mpc8xx_pmu = { .event_init = mpc8xx_pmu_event_init, .add = mpc8xx_pmu_add, .del = mpc8xx_pmu_del, .read = mpc8xx_pmu_read, .capabilities = PERF_PMU_CAP_NO_INTERRUPT | PERF_PMU_CAP_NO_NMI, }; static int init_mpc8xx_pmu(void) { mtspr(SPRN_ICTRL, 7); mtspr(SPRN_CMPA, 0); mtspr(SPRN_COUNTA, 0xffff); return perf_pmu_register(&mpc8xx_pmu, "cpu", PERF_TYPE_RAW); } early_initcall(init_mpc8xx_pmu);
linux-master
arch/powerpc/perf/8xx-pmu.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Hypervisor supplied "24x7" performance counter support * * Author: Cody P Schafer <[email protected]> * Copyright 2014 IBM Corporation. */ #define pr_fmt(fmt) "hv-24x7: " fmt #include <linux/perf_event.h> #include <linux/rbtree.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/vmalloc.h> #include <asm/cputhreads.h> #include <asm/firmware.h> #include <asm/hvcall.h> #include <asm/io.h> #include <asm/papr-sysparm.h> #include <linux/byteorder/generic.h> #include <asm/rtas.h> #include "hv-24x7.h" #include "hv-24x7-catalog.h" #include "hv-common.h" /* Version of the 24x7 hypervisor API that we should use in this machine. */ static int interface_version; /* Whether we have to aggregate result data for some domains. */ static bool aggregate_result_elements; static cpumask_t hv_24x7_cpumask; static bool domain_is_valid(unsigned int domain) { switch (domain) { #define DOMAIN(n, v, x, c) \ case HV_PERF_DOMAIN_##n: \ /* fall through */ #include "hv-24x7-domains.h" #undef DOMAIN return true; default: return false; } } static bool is_physical_domain(unsigned int domain) { switch (domain) { #define DOMAIN(n, v, x, c) \ case HV_PERF_DOMAIN_##n: \ return c; #include "hv-24x7-domains.h" #undef DOMAIN default: return false; } } /* * The Processor Module Information system parameter allows transferring * of certain processor module information from the platform to the OS. * Refer PAPR+ document to get parameter token value as '43'. */ static u32 phys_sockets; /* Physical sockets */ static u32 phys_chipspersocket; /* Physical chips per socket*/ static u32 phys_coresperchip; /* Physical cores per chip */ /* * read_24x7_sys_info() * Retrieve the number of sockets and chips per socket and cores per * chip details through the get-system-parameter rtas call. */ void read_24x7_sys_info(void) { struct papr_sysparm_buf *buf; /* * Making system parameter: chips and sockets and cores per chip * default to 1. */ phys_sockets = 1; phys_chipspersocket = 1; phys_coresperchip = 1; buf = papr_sysparm_buf_alloc(); if (!buf) return; if (!papr_sysparm_get(PAPR_SYSPARM_PROC_MODULE_INFO, buf)) { int ntypes = be16_to_cpup((__be16 *)&buf->val[0]); int len = be16_to_cpu(buf->len); if (len >= 8 && ntypes != 0) { phys_sockets = be16_to_cpup((__be16 *)&buf->val[2]); phys_chipspersocket = be16_to_cpup((__be16 *)&buf->val[4]); phys_coresperchip = be16_to_cpup((__be16 *)&buf->val[6]); } } papr_sysparm_buf_free(buf); } /* Domains for which more than one result element are returned for each event. */ static bool domain_needs_aggregation(unsigned int domain) { return aggregate_result_elements && (domain == HV_PERF_DOMAIN_PHYS_CORE || (domain >= HV_PERF_DOMAIN_VCPU_HOME_CORE && domain <= HV_PERF_DOMAIN_VCPU_REMOTE_NODE)); } static const char *domain_name(unsigned int domain) { if (!domain_is_valid(domain)) return NULL; switch (domain) { case HV_PERF_DOMAIN_PHYS_CHIP: return "Physical Chip"; case HV_PERF_DOMAIN_PHYS_CORE: return "Physical Core"; case HV_PERF_DOMAIN_VCPU_HOME_CORE: return "VCPU Home Core"; case HV_PERF_DOMAIN_VCPU_HOME_CHIP: return "VCPU Home Chip"; case HV_PERF_DOMAIN_VCPU_HOME_NODE: return "VCPU Home Node"; case HV_PERF_DOMAIN_VCPU_REMOTE_NODE: return "VCPU Remote Node"; } WARN_ON_ONCE(domain); return NULL; } static bool catalog_entry_domain_is_valid(unsigned int domain) { /* POWER8 doesn't support virtual domains. */ if (interface_version == 1) return is_physical_domain(domain); else return domain_is_valid(domain); } /* * TODO: Merging events: * - Think of the hcall as an interface to a 4d array of counters: * - x = domains * - y = indexes in the domain (core, chip, vcpu, node, etc) * - z = offset into the counter space * - w = lpars (guest vms, "logical partitions") * - A single request is: x,y,y_last,z,z_last,w,w_last * - this means we can retrieve a rectangle of counters in y,z for a single x. * * - Things to consider (ignoring w): * - input cost_per_request = 16 * - output cost_per_result(ys,zs) = 8 + 8 * ys + ys * zs * - limited number of requests per hcall (must fit into 4K bytes) * - 4k = 16 [buffer header] - 16 [request size] * request_count * - 255 requests per hcall * - sometimes it will be more efficient to read extra data and discard */ /* * Example usage: * perf stat -e 'hv_24x7/domain=2,offset=8,vcpu=0,lpar=0xffffffff/' */ /* u3 0-6, one of HV_24X7_PERF_DOMAIN */ EVENT_DEFINE_RANGE_FORMAT(domain, config, 0, 3); /* u16 */ EVENT_DEFINE_RANGE_FORMAT(core, config, 16, 31); EVENT_DEFINE_RANGE_FORMAT(chip, config, 16, 31); EVENT_DEFINE_RANGE_FORMAT(vcpu, config, 16, 31); /* u32, see "data_offset" */ EVENT_DEFINE_RANGE_FORMAT(offset, config, 32, 63); /* u16 */ EVENT_DEFINE_RANGE_FORMAT(lpar, config1, 0, 15); EVENT_DEFINE_RANGE(reserved1, config, 4, 15); EVENT_DEFINE_RANGE(reserved2, config1, 16, 63); EVENT_DEFINE_RANGE(reserved3, config2, 0, 63); static struct attribute *format_attrs[] = { &format_attr_domain.attr, &format_attr_offset.attr, &format_attr_core.attr, &format_attr_chip.attr, &format_attr_vcpu.attr, &format_attr_lpar.attr, NULL, }; static const struct attribute_group format_group = { .name = "format", .attrs = format_attrs, }; static struct attribute_group event_group = { .name = "events", /* .attrs is set in init */ }; static struct attribute_group event_desc_group = { .name = "event_descs", /* .attrs is set in init */ }; static struct attribute_group event_long_desc_group = { .name = "event_long_descs", /* .attrs is set in init */ }; static struct kmem_cache *hv_page_cache; static DEFINE_PER_CPU(int, hv_24x7_txn_flags); static DEFINE_PER_CPU(int, hv_24x7_txn_err); struct hv_24x7_hw { struct perf_event *events[255]; }; static DEFINE_PER_CPU(struct hv_24x7_hw, hv_24x7_hw); /* * request_buffer and result_buffer are not required to be 4k aligned, * but are not allowed to cross any 4k boundary. Aligning them to 4k is * the simplest way to ensure that. */ #define H24x7_DATA_BUFFER_SIZE 4096 static DEFINE_PER_CPU(char, hv_24x7_reqb[H24x7_DATA_BUFFER_SIZE]) __aligned(4096); static DEFINE_PER_CPU(char, hv_24x7_resb[H24x7_DATA_BUFFER_SIZE]) __aligned(4096); static unsigned int max_num_requests(int interface_version) { return (H24x7_DATA_BUFFER_SIZE - sizeof(struct hv_24x7_request_buffer)) / H24x7_REQUEST_SIZE(interface_version); } static char *event_name(struct hv_24x7_event_data *ev, int *len) { *len = be16_to_cpu(ev->event_name_len) - 2; return (char *)ev->remainder; } static char *event_desc(struct hv_24x7_event_data *ev, int *len) { unsigned int nl = be16_to_cpu(ev->event_name_len); __be16 *desc_len = (__be16 *)(ev->remainder + nl - 2); *len = be16_to_cpu(*desc_len) - 2; return (char *)ev->remainder + nl; } static char *event_long_desc(struct hv_24x7_event_data *ev, int *len) { unsigned int nl = be16_to_cpu(ev->event_name_len); __be16 *desc_len_ = (__be16 *)(ev->remainder + nl - 2); unsigned int desc_len = be16_to_cpu(*desc_len_); __be16 *long_desc_len = (__be16 *)(ev->remainder + nl + desc_len - 2); *len = be16_to_cpu(*long_desc_len) - 2; return (char *)ev->remainder + nl + desc_len; } static bool event_fixed_portion_is_within(struct hv_24x7_event_data *ev, void *end) { void *start = ev; return (start + offsetof(struct hv_24x7_event_data, remainder)) < end; } /* * Things we don't check: * - padding for desc, name, and long/detailed desc is required to be '\0' * bytes. * * Return NULL if we pass end, * Otherwise return the address of the byte just following the event. */ static void *event_end(struct hv_24x7_event_data *ev, void *end) { void *start = ev; __be16 *dl_, *ldl_; unsigned int dl, ldl; unsigned int nl = be16_to_cpu(ev->event_name_len); if (nl < 2) { pr_debug("%s: name length too short: %d", __func__, nl); return NULL; } if (start + nl > end) { pr_debug("%s: start=%p + nl=%u > end=%p", __func__, start, nl, end); return NULL; } dl_ = (__be16 *)(ev->remainder + nl - 2); if (!IS_ALIGNED((uintptr_t)dl_, 2)) pr_warn("desc len not aligned %p", dl_); dl = be16_to_cpu(*dl_); if (dl < 2) { pr_debug("%s: desc len too short: %d", __func__, dl); return NULL; } if (start + nl + dl > end) { pr_debug("%s: (start=%p + nl=%u + dl=%u)=%p > end=%p", __func__, start, nl, dl, start + nl + dl, end); return NULL; } ldl_ = (__be16 *)(ev->remainder + nl + dl - 2); if (!IS_ALIGNED((uintptr_t)ldl_, 2)) pr_warn("long desc len not aligned %p", ldl_); ldl = be16_to_cpu(*ldl_); if (ldl < 2) { pr_debug("%s: long desc len too short (ldl=%u)", __func__, ldl); return NULL; } if (start + nl + dl + ldl > end) { pr_debug("%s: start=%p + nl=%u + dl=%u + ldl=%u > end=%p", __func__, start, nl, dl, ldl, end); return NULL; } return start + nl + dl + ldl; } static long h_get_24x7_catalog_page_(unsigned long phys_4096, unsigned long version, unsigned long index) { pr_devel("h_get_24x7_catalog_page(0x%lx, %lu, %lu)", phys_4096, version, index); WARN_ON(!IS_ALIGNED(phys_4096, 4096)); return plpar_hcall_norets(H_GET_24X7_CATALOG_PAGE, phys_4096, version, index); } static long h_get_24x7_catalog_page(char page[], u64 version, u32 index) { return h_get_24x7_catalog_page_(virt_to_phys(page), version, index); } /* * Each event we find in the catalog, will have a sysfs entry. Format the * data for this sysfs entry based on the event's domain. * * Events belonging to the Chip domain can only be monitored in that domain. * i.e the domain for these events is a fixed/knwon value. * * Events belonging to the Core domain can be monitored either in the physical * core or in one of the virtual CPU domains. So the domain value for these * events must be specified by the user (i.e is a required parameter). Format * the Core events with 'domain=?' so the perf-tool can error check required * parameters. * * NOTE: For the Core domain events, rather than making domain a required * parameter we could default it to PHYS_CORE and allowe users to * override the domain to one of the VCPU domains. * * However, this can make the interface a little inconsistent. * * If we set domain=2 (PHYS_CHIP) and allow user to override this field * the user may be tempted to also modify the "offset=x" field in which * can lead to confusing usage. Consider the HPM_PCYC (offset=0x18) and * HPM_INST (offset=0x20) events. With: * * perf stat -e hv_24x7/HPM_PCYC,offset=0x20/ * * we end up monitoring HPM_INST, while the command line has HPM_PCYC. * * By not assigning a default value to the domain for the Core events, * we can have simple guidelines: * * - Specifying values for parameters with "=?" is required. * * - Specifying (i.e overriding) values for other parameters * is undefined. */ static char *event_fmt(struct hv_24x7_event_data *event, unsigned int domain) { const char *sindex; const char *lpar; const char *domain_str; char buf[8]; switch (domain) { case HV_PERF_DOMAIN_PHYS_CHIP: snprintf(buf, sizeof(buf), "%d", domain); domain_str = buf; lpar = "0x0"; sindex = "chip"; break; case HV_PERF_DOMAIN_PHYS_CORE: domain_str = "?"; lpar = "0x0"; sindex = "core"; break; default: domain_str = "?"; lpar = "?"; sindex = "vcpu"; } return kasprintf(GFP_KERNEL, "domain=%s,offset=0x%x,%s=?,lpar=%s", domain_str, be16_to_cpu(event->event_counter_offs) + be16_to_cpu(event->event_group_record_offs), sindex, lpar); } /* Avoid trusting fw to NUL terminate strings */ static char *memdup_to_str(char *maybe_str, int max_len, gfp_t gfp) { return kasprintf(gfp, "%.*s", max_len, maybe_str); } static ssize_t device_show_string(struct device *dev, struct device_attribute *attr, char *buf) { struct dev_ext_attribute *d; d = container_of(attr, struct dev_ext_attribute, attr); return sprintf(buf, "%s\n", (char *)d->var); } static ssize_t cpumask_show(struct device *dev, struct device_attribute *attr, char *buf) { return cpumap_print_to_pagebuf(true, buf, &hv_24x7_cpumask); } static ssize_t sockets_show(struct device *dev, struct device_attribute *attr, char *buf) { return sprintf(buf, "%d\n", phys_sockets); } static ssize_t chipspersocket_show(struct device *dev, struct device_attribute *attr, char *buf) { return sprintf(buf, "%d\n", phys_chipspersocket); } static ssize_t coresperchip_show(struct device *dev, struct device_attribute *attr, char *buf) { return sprintf(buf, "%d\n", phys_coresperchip); } static struct attribute *device_str_attr_create_(char *name, char *str) { struct dev_ext_attribute *attr = kzalloc(sizeof(*attr), GFP_KERNEL); if (!attr) return NULL; sysfs_attr_init(&attr->attr.attr); attr->var = str; attr->attr.attr.name = name; attr->attr.attr.mode = 0444; attr->attr.show = device_show_string; return &attr->attr.attr; } /* * Allocate and initialize strings representing event attributes. * * NOTE: The strings allocated here are never destroyed and continue to * exist till shutdown. This is to allow us to create as many events * from the catalog as possible, even if we encounter errors with some. * In case of changes to error paths in future, these may need to be * freed by the caller. */ static struct attribute *device_str_attr_create(char *name, int name_max, int name_nonce, char *str, size_t str_max) { char *n; char *s = memdup_to_str(str, str_max, GFP_KERNEL); struct attribute *a; if (!s) return NULL; if (!name_nonce) n = kasprintf(GFP_KERNEL, "%.*s", name_max, name); else n = kasprintf(GFP_KERNEL, "%.*s__%d", name_max, name, name_nonce); if (!n) goto out_s; a = device_str_attr_create_(n, s); if (!a) goto out_n; return a; out_n: kfree(n); out_s: kfree(s); return NULL; } static struct attribute *event_to_attr(unsigned int ix, struct hv_24x7_event_data *event, unsigned int domain, int nonce) { int event_name_len; char *ev_name, *a_ev_name, *val; struct attribute *attr; if (!domain_is_valid(domain)) { pr_warn("catalog event %u has invalid domain %u\n", ix, domain); return NULL; } val = event_fmt(event, domain); if (!val) return NULL; ev_name = event_name(event, &event_name_len); if (!nonce) a_ev_name = kasprintf(GFP_KERNEL, "%.*s", (int)event_name_len, ev_name); else a_ev_name = kasprintf(GFP_KERNEL, "%.*s__%d", (int)event_name_len, ev_name, nonce); if (!a_ev_name) goto out_val; attr = device_str_attr_create_(a_ev_name, val); if (!attr) goto out_name; return attr; out_name: kfree(a_ev_name); out_val: kfree(val); return NULL; } static struct attribute *event_to_desc_attr(struct hv_24x7_event_data *event, int nonce) { int nl, dl; char *name = event_name(event, &nl); char *desc = event_desc(event, &dl); /* If there isn't a description, don't create the sysfs file */ if (!dl) return NULL; return device_str_attr_create(name, nl, nonce, desc, dl); } static struct attribute * event_to_long_desc_attr(struct hv_24x7_event_data *event, int nonce) { int nl, dl; char *name = event_name(event, &nl); char *desc = event_long_desc(event, &dl); /* If there isn't a description, don't create the sysfs file */ if (!dl) return NULL; return device_str_attr_create(name, nl, nonce, desc, dl); } static int event_data_to_attrs(unsigned int ix, struct attribute **attrs, struct hv_24x7_event_data *event, int nonce) { *attrs = event_to_attr(ix, event, event->domain, nonce); if (!*attrs) return -1; return 0; } /* */ struct event_uniq { struct rb_node node; const char *name; int nl; unsigned int ct; unsigned int domain; }; static int memord(const void *d1, size_t s1, const void *d2, size_t s2) { if (s1 < s2) return 1; if (s1 > s2) return -1; return memcmp(d1, d2, s1); } static int ev_uniq_ord(const void *v1, size_t s1, unsigned int d1, const void *v2, size_t s2, unsigned int d2) { int r = memord(v1, s1, v2, s2); if (r) return r; if (d1 > d2) return 1; if (d2 > d1) return -1; return 0; } static int event_uniq_add(struct rb_root *root, const char *name, int nl, unsigned int domain) { struct rb_node **new = &(root->rb_node), *parent = NULL; struct event_uniq *data; /* Figure out where to put new node */ while (*new) { struct event_uniq *it; int result; it = rb_entry(*new, struct event_uniq, node); result = ev_uniq_ord(name, nl, domain, it->name, it->nl, it->domain); parent = *new; if (result < 0) new = &((*new)->rb_left); else if (result > 0) new = &((*new)->rb_right); else { it->ct++; pr_info("found a duplicate event %.*s, ct=%u\n", nl, name, it->ct); return it->ct; } } data = kmalloc(sizeof(*data), GFP_KERNEL); if (!data) return -ENOMEM; *data = (struct event_uniq) { .name = name, .nl = nl, .ct = 0, .domain = domain, }; /* Add new node and rebalance tree. */ rb_link_node(&data->node, parent, new); rb_insert_color(&data->node, root); /* data->ct */ return 0; } static void event_uniq_destroy(struct rb_root *root) { /* * the strings we point to are in the giant block of memory filled by * the catalog, and are freed separately. */ struct event_uniq *pos, *n; rbtree_postorder_for_each_entry_safe(pos, n, root, node) kfree(pos); } /* * ensure the event structure's sizes are self consistent and don't cause us to * read outside of the event * * On success, return the event length in bytes. * Otherwise, return -1 (and print as appropriate). */ static ssize_t catalog_event_len_validate(struct hv_24x7_event_data *event, size_t event_idx, size_t event_data_bytes, size_t event_entry_count, size_t offset, void *end) { ssize_t ev_len; void *ev_end, *calc_ev_end; if (offset >= event_data_bytes) return -1; if (event_idx >= event_entry_count) { pr_devel("catalog event data has %zu bytes of padding after last event\n", event_data_bytes - offset); return -1; } if (!event_fixed_portion_is_within(event, end)) { pr_warn("event %zu fixed portion is not within range\n", event_idx); return -1; } ev_len = be16_to_cpu(event->length); if (ev_len % 16) pr_info("event %zu has length %zu not divisible by 16: event=%pK\n", event_idx, ev_len, event); ev_end = (__u8 *)event + ev_len; if (ev_end > end) { pr_warn("event %zu has .length=%zu, ends after buffer end: ev_end=%pK > end=%pK, offset=%zu\n", event_idx, ev_len, ev_end, end, offset); return -1; } calc_ev_end = event_end(event, end); if (!calc_ev_end) { pr_warn("event %zu has a calculated length which exceeds buffer length %zu: event=%pK end=%pK, offset=%zu\n", event_idx, event_data_bytes, event, end, offset); return -1; } if (calc_ev_end > ev_end) { pr_warn("event %zu exceeds its own length: event=%pK, end=%pK, offset=%zu, calc_ev_end=%pK\n", event_idx, event, ev_end, offset, calc_ev_end); return -1; } return ev_len; } /* * Return true incase of invalid or dummy events with names like RESERVED* */ static bool ignore_event(const char *name) { return strncmp(name, "RESERVED", 8) == 0; } #define MAX_4K (SIZE_MAX / 4096) static int create_events_from_catalog(struct attribute ***events_, struct attribute ***event_descs_, struct attribute ***event_long_descs_) { long hret; size_t catalog_len, catalog_page_len, event_entry_count, event_data_len, event_data_offs, event_data_bytes, junk_events, event_idx, event_attr_ct, i, attr_max, event_idx_last, desc_ct, long_desc_ct; ssize_t ct, ev_len; uint64_t catalog_version_num; struct attribute **events, **event_descs, **event_long_descs; struct hv_24x7_catalog_page_0 *page_0 = kmem_cache_alloc(hv_page_cache, GFP_KERNEL); void *page = page_0; void *event_data, *end; struct hv_24x7_event_data *event; struct rb_root ev_uniq = RB_ROOT; int ret = 0; if (!page) { ret = -ENOMEM; goto e_out; } hret = h_get_24x7_catalog_page(page, 0, 0); if (hret) { ret = -EIO; goto e_free; } catalog_version_num = be64_to_cpu(page_0->version); catalog_page_len = be32_to_cpu(page_0->length); if (MAX_4K < catalog_page_len) { pr_err("invalid page count: %zu\n", catalog_page_len); ret = -EIO; goto e_free; } catalog_len = catalog_page_len * 4096; event_entry_count = be16_to_cpu(page_0->event_entry_count); event_data_offs = be16_to_cpu(page_0->event_data_offs); event_data_len = be16_to_cpu(page_0->event_data_len); pr_devel("cv %llu cl %zu eec %zu edo %zu edl %zu\n", catalog_version_num, catalog_len, event_entry_count, event_data_offs, event_data_len); if ((MAX_4K < event_data_len) || (MAX_4K < event_data_offs) || (MAX_4K - event_data_offs < event_data_len)) { pr_err("invalid event data offs %zu and/or len %zu\n", event_data_offs, event_data_len); ret = -EIO; goto e_free; } if ((event_data_offs + event_data_len) > catalog_page_len) { pr_err("event data %zu-%zu does not fit inside catalog 0-%zu\n", event_data_offs, event_data_offs + event_data_len, catalog_page_len); ret = -EIO; goto e_free; } if (SIZE_MAX - 1 < event_entry_count) { pr_err("event_entry_count %zu is invalid\n", event_entry_count); ret = -EIO; goto e_free; } event_data_bytes = event_data_len * 4096; /* * event data can span several pages, events can cross between these * pages. Use vmalloc to make this easier. */ event_data = vmalloc(event_data_bytes); if (!event_data) { pr_err("could not allocate event data\n"); ret = -ENOMEM; goto e_free; } end = event_data + event_data_bytes; /* * using vmalloc_to_phys() like this only works if PAGE_SIZE is * divisible by 4096 */ BUILD_BUG_ON(PAGE_SIZE % 4096); for (i = 0; i < event_data_len; i++) { hret = h_get_24x7_catalog_page_( vmalloc_to_phys(event_data + i * 4096), catalog_version_num, i + event_data_offs); if (hret) { pr_err("Failed to get event data in page %zu: rc=%ld\n", i + event_data_offs, hret); ret = -EIO; goto e_event_data; } } /* * scan the catalog to determine the number of attributes we need, and * verify it at the same time. */ for (junk_events = 0, event = event_data, event_idx = 0, attr_max = 0; ; event_idx++, event = (void *)event + ev_len) { size_t offset = (void *)event - (void *)event_data; char *name; int nl; ev_len = catalog_event_len_validate(event, event_idx, event_data_bytes, event_entry_count, offset, end); if (ev_len < 0) break; name = event_name(event, &nl); if (ignore_event(name)) { junk_events++; continue; } if (event->event_group_record_len == 0) { pr_devel("invalid event %zu (%.*s): group_record_len == 0, skipping\n", event_idx, nl, name); junk_events++; continue; } if (!catalog_entry_domain_is_valid(event->domain)) { pr_info("event %zu (%.*s) has invalid domain %d\n", event_idx, nl, name, event->domain); junk_events++; continue; } attr_max++; } event_idx_last = event_idx; if (event_idx_last != event_entry_count) pr_warn("event buffer ended before listed # of events were parsed (got %zu, wanted %zu, junk %zu)\n", event_idx_last, event_entry_count, junk_events); events = kmalloc_array(attr_max + 1, sizeof(*events), GFP_KERNEL); if (!events) { ret = -ENOMEM; goto e_event_data; } event_descs = kmalloc_array(event_idx + 1, sizeof(*event_descs), GFP_KERNEL); if (!event_descs) { ret = -ENOMEM; goto e_event_attrs; } event_long_descs = kmalloc_array(event_idx + 1, sizeof(*event_long_descs), GFP_KERNEL); if (!event_long_descs) { ret = -ENOMEM; goto e_event_descs; } /* Iterate over the catalog filling in the attribute vector */ for (junk_events = 0, event_attr_ct = 0, desc_ct = 0, long_desc_ct = 0, event = event_data, event_idx = 0; event_idx < event_idx_last; event_idx++, ev_len = be16_to_cpu(event->length), event = (void *)event + ev_len) { char *name; int nl; int nonce; /* * these are the only "bad" events that are intermixed and that * we can ignore without issue. make sure to skip them here */ if (event->event_group_record_len == 0) continue; if (!catalog_entry_domain_is_valid(event->domain)) continue; name = event_name(event, &nl); if (ignore_event(name)) continue; nonce = event_uniq_add(&ev_uniq, name, nl, event->domain); ct = event_data_to_attrs(event_idx, events + event_attr_ct, event, nonce); if (ct < 0) { pr_warn("event %zu (%.*s) creation failure, skipping\n", event_idx, nl, name); junk_events++; } else { event_attr_ct++; event_descs[desc_ct] = event_to_desc_attr(event, nonce); if (event_descs[desc_ct]) desc_ct++; event_long_descs[long_desc_ct] = event_to_long_desc_attr(event, nonce); if (event_long_descs[long_desc_ct]) long_desc_ct++; } } pr_info("read %zu catalog entries, created %zu event attrs (%zu failures), %zu descs\n", event_idx, event_attr_ct, junk_events, desc_ct); events[event_attr_ct] = NULL; event_descs[desc_ct] = NULL; event_long_descs[long_desc_ct] = NULL; event_uniq_destroy(&ev_uniq); vfree(event_data); kmem_cache_free(hv_page_cache, page); *events_ = events; *event_descs_ = event_descs; *event_long_descs_ = event_long_descs; return 0; e_event_descs: kfree(event_descs); e_event_attrs: kfree(events); e_event_data: vfree(event_data); e_free: kmem_cache_free(hv_page_cache, page); e_out: *events_ = NULL; *event_descs_ = NULL; *event_long_descs_ = NULL; return ret; } static ssize_t catalog_read(struct file *filp, struct kobject *kobj, struct bin_attribute *bin_attr, char *buf, loff_t offset, size_t count) { long hret; ssize_t ret = 0; size_t catalog_len = 0, catalog_page_len = 0; loff_t page_offset = 0; loff_t offset_in_page; size_t copy_len; uint64_t catalog_version_num = 0; void *page = kmem_cache_alloc(hv_page_cache, GFP_USER); struct hv_24x7_catalog_page_0 *page_0 = page; if (!page) return -ENOMEM; hret = h_get_24x7_catalog_page(page, 0, 0); if (hret) { ret = -EIO; goto e_free; } catalog_version_num = be64_to_cpu(page_0->version); catalog_page_len = be32_to_cpu(page_0->length); catalog_len = catalog_page_len * 4096; page_offset = offset / 4096; offset_in_page = offset % 4096; if (page_offset >= catalog_page_len) goto e_free; if (page_offset != 0) { hret = h_get_24x7_catalog_page(page, catalog_version_num, page_offset); if (hret) { ret = -EIO; goto e_free; } } copy_len = 4096 - offset_in_page; if (copy_len > count) copy_len = count; memcpy(buf, page+offset_in_page, copy_len); ret = copy_len; e_free: if (hret) pr_err("h_get_24x7_catalog_page(ver=%lld, page=%lld) failed:" " rc=%ld\n", catalog_version_num, page_offset, hret); kmem_cache_free(hv_page_cache, page); pr_devel("catalog_read: offset=%lld(%lld) count=%zu " "catalog_len=%zu(%zu) => %zd\n", offset, page_offset, count, catalog_len, catalog_page_len, ret); return ret; } static ssize_t domains_show(struct device *dev, struct device_attribute *attr, char *page) { int d, n, count = 0; const char *str; for (d = 0; d < HV_PERF_DOMAIN_MAX; d++) { str = domain_name(d); if (!str) continue; n = sprintf(page, "%d: %s\n", d, str); if (n < 0) break; count += n; page += n; } return count; } #define PAGE_0_ATTR(_name, _fmt, _expr) \ static ssize_t _name##_show(struct device *dev, \ struct device_attribute *dev_attr, \ char *buf) \ { \ long hret; \ ssize_t ret = 0; \ void *page = kmem_cache_alloc(hv_page_cache, GFP_USER); \ struct hv_24x7_catalog_page_0 *page_0 = page; \ if (!page) \ return -ENOMEM; \ hret = h_get_24x7_catalog_page(page, 0, 0); \ if (hret) { \ ret = -EIO; \ goto e_free; \ } \ ret = sprintf(buf, _fmt, _expr); \ e_free: \ kmem_cache_free(hv_page_cache, page); \ return ret; \ } \ static DEVICE_ATTR_RO(_name) PAGE_0_ATTR(catalog_version, "%lld\n", (unsigned long long)be64_to_cpu(page_0->version)); PAGE_0_ATTR(catalog_len, "%lld\n", (unsigned long long)be32_to_cpu(page_0->length) * 4096); static BIN_ATTR_RO(catalog, 0/* real length varies */); static DEVICE_ATTR_RO(domains); static DEVICE_ATTR_RO(sockets); static DEVICE_ATTR_RO(chipspersocket); static DEVICE_ATTR_RO(coresperchip); static DEVICE_ATTR_RO(cpumask); static struct bin_attribute *if_bin_attrs[] = { &bin_attr_catalog, NULL, }; static struct attribute *cpumask_attrs[] = { &dev_attr_cpumask.attr, NULL, }; static const struct attribute_group cpumask_attr_group = { .attrs = cpumask_attrs, }; static struct attribute *if_attrs[] = { &dev_attr_catalog_len.attr, &dev_attr_catalog_version.attr, &dev_attr_domains.attr, &dev_attr_sockets.attr, &dev_attr_chipspersocket.attr, &dev_attr_coresperchip.attr, NULL, }; static const struct attribute_group if_group = { .name = "interface", .bin_attrs = if_bin_attrs, .attrs = if_attrs, }; static const struct attribute_group *attr_groups[] = { &format_group, &event_group, &event_desc_group, &event_long_desc_group, &if_group, &cpumask_attr_group, NULL, }; /* * Start the process for a new H_GET_24x7_DATA hcall. */ static void init_24x7_request(struct hv_24x7_request_buffer *request_buffer, struct hv_24x7_data_result_buffer *result_buffer) { memset(request_buffer, 0, H24x7_DATA_BUFFER_SIZE); memset(result_buffer, 0, H24x7_DATA_BUFFER_SIZE); request_buffer->interface_version = interface_version; /* memset above set request_buffer->num_requests to 0 */ } /* * Commit (i.e perform) the H_GET_24x7_DATA hcall using the data collected * by 'init_24x7_request()' and 'add_event_to_24x7_request()'. */ static int make_24x7_request(struct hv_24x7_request_buffer *request_buffer, struct hv_24x7_data_result_buffer *result_buffer) { long ret; /* * NOTE: Due to variable number of array elements in request and * result buffer(s), sizeof() is not reliable. Use the actual * allocated buffer size, H24x7_DATA_BUFFER_SIZE. */ ret = plpar_hcall_norets(H_GET_24X7_DATA, virt_to_phys(request_buffer), H24x7_DATA_BUFFER_SIZE, virt_to_phys(result_buffer), H24x7_DATA_BUFFER_SIZE); if (ret) { struct hv_24x7_request *req; req = request_buffer->requests; pr_notice_ratelimited("hcall failed: [%d %#x %#x %d] => ret 0x%lx (%ld) detail=0x%x failing ix=%x\n", req->performance_domain, req->data_offset, req->starting_ix, req->starting_lpar_ix, ret, ret, result_buffer->detailed_rc, result_buffer->failing_request_ix); return -EIO; } return 0; } /* * Add the given @event to the next slot in the 24x7 request_buffer. * * Note that H_GET_24X7_DATA hcall allows reading several counters' * values in a single HCALL. We expect the caller to add events to the * request buffer one by one, make the HCALL and process the results. */ static int add_event_to_24x7_request(struct perf_event *event, struct hv_24x7_request_buffer *request_buffer) { u16 idx; int i; size_t req_size; struct hv_24x7_request *req; if (request_buffer->num_requests >= max_num_requests(request_buffer->interface_version)) { pr_devel("Too many requests for 24x7 HCALL %d\n", request_buffer->num_requests); return -EINVAL; } switch (event_get_domain(event)) { case HV_PERF_DOMAIN_PHYS_CHIP: idx = event_get_chip(event); break; case HV_PERF_DOMAIN_PHYS_CORE: idx = event_get_core(event); break; default: idx = event_get_vcpu(event); } req_size = H24x7_REQUEST_SIZE(request_buffer->interface_version); i = request_buffer->num_requests++; req = (void *) request_buffer->requests + i * req_size; req->performance_domain = event_get_domain(event); req->data_size = cpu_to_be16(8); req->data_offset = cpu_to_be32(event_get_offset(event)); req->starting_lpar_ix = cpu_to_be16(event_get_lpar(event)); req->max_num_lpars = cpu_to_be16(1); req->starting_ix = cpu_to_be16(idx); req->max_ix = cpu_to_be16(1); if (request_buffer->interface_version > 1) { if (domain_needs_aggregation(req->performance_domain)) req->max_num_thread_groups = -1; else if (req->performance_domain != HV_PERF_DOMAIN_PHYS_CHIP) { req->starting_thread_group_ix = idx % 2; req->max_num_thread_groups = 1; } } return 0; } /** * get_count_from_result - get event count from all result elements in result * * If the event corresponding to this result needs aggregation of the result * element values, then this function does that. * * @event: Event associated with @res. * @resb: Result buffer containing @res. * @res: Result to work on. * @countp: Output variable containing the event count. * @next: Optional output variable pointing to the next result in @resb. */ static int get_count_from_result(struct perf_event *event, struct hv_24x7_data_result_buffer *resb, struct hv_24x7_result *res, u64 *countp, struct hv_24x7_result **next) { u16 num_elements = be16_to_cpu(res->num_elements_returned); u16 data_size = be16_to_cpu(res->result_element_data_size); unsigned int data_offset; void *element_data; int i; u64 count; /* * We can bail out early if the result is empty. */ if (!num_elements) { pr_debug("Result of request %hhu is empty, nothing to do\n", res->result_ix); if (next) *next = (struct hv_24x7_result *) res->elements; return -ENODATA; } /* * Since we always specify 1 as the maximum for the smallest resource * we're requesting, there should to be only one element per result. * Except when an event needs aggregation, in which case there are more. */ if (num_elements != 1 && !domain_needs_aggregation(event_get_domain(event))) { pr_err("Error: result of request %hhu has %hu elements\n", res->result_ix, num_elements); return -EIO; } if (data_size != sizeof(u64)) { pr_debug("Error: result of request %hhu has data of %hu bytes\n", res->result_ix, data_size); return -ENOTSUPP; } if (resb->interface_version == 1) data_offset = offsetof(struct hv_24x7_result_element_v1, element_data); else data_offset = offsetof(struct hv_24x7_result_element_v2, element_data); /* Go through the result elements in the result. */ for (i = count = 0, element_data = res->elements + data_offset; i < num_elements; i++, element_data += data_size + data_offset) count += be64_to_cpu(*((u64 *) element_data)); *countp = count; /* The next result is after the last result element. */ if (next) *next = element_data - data_offset; return 0; } static int single_24x7_request(struct perf_event *event, u64 *count) { int ret; struct hv_24x7_request_buffer *request_buffer; struct hv_24x7_data_result_buffer *result_buffer; BUILD_BUG_ON(sizeof(*request_buffer) > 4096); BUILD_BUG_ON(sizeof(*result_buffer) > 4096); request_buffer = (void *)get_cpu_var(hv_24x7_reqb); result_buffer = (void *)get_cpu_var(hv_24x7_resb); init_24x7_request(request_buffer, result_buffer); ret = add_event_to_24x7_request(event, request_buffer); if (ret) goto out; ret = make_24x7_request(request_buffer, result_buffer); if (ret) goto out; /* process result from hcall */ ret = get_count_from_result(event, result_buffer, result_buffer->results, count, NULL); out: put_cpu_var(hv_24x7_reqb); put_cpu_var(hv_24x7_resb); return ret; } static int h_24x7_event_init(struct perf_event *event) { struct hv_perf_caps caps; unsigned int domain; unsigned long hret; u64 ct; /* Not our event */ if (event->attr.type != event->pmu->type) return -ENOENT; /* Unused areas must be 0 */ if (event_get_reserved1(event) || event_get_reserved2(event) || event_get_reserved3(event)) { pr_devel("reserved set when forbidden 0x%llx(0x%llx) 0x%llx(0x%llx) 0x%llx(0x%llx)\n", event->attr.config, event_get_reserved1(event), event->attr.config1, event_get_reserved2(event), event->attr.config2, event_get_reserved3(event)); return -EINVAL; } /* no branch sampling */ if (has_branch_stack(event)) return -EOPNOTSUPP; /* offset must be 8 byte aligned */ if (event_get_offset(event) % 8) { pr_devel("bad alignment\n"); return -EINVAL; } domain = event_get_domain(event); if (domain == 0 || domain >= HV_PERF_DOMAIN_MAX) { pr_devel("invalid domain %d\n", domain); return -EINVAL; } hret = hv_perf_caps_get(&caps); if (hret) { pr_devel("could not get capabilities: rc=%ld\n", hret); return -EIO; } /* Physical domains & other lpars require extra capabilities */ if (!caps.collect_privileged && (is_physical_domain(domain) || (event_get_lpar(event) != event_get_lpar_max()))) { pr_devel("hv permissions disallow: is_physical_domain:%d, lpar=0x%llx\n", is_physical_domain(domain), event_get_lpar(event)); return -EACCES; } /* Get the initial value of the counter for this event */ if (single_24x7_request(event, &ct)) { pr_devel("test hcall failed\n"); return -EIO; } (void)local64_xchg(&event->hw.prev_count, ct); return 0; } static u64 h_24x7_get_value(struct perf_event *event) { u64 ct; if (single_24x7_request(event, &ct)) /* We checked this in event init, shouldn't fail here... */ return 0; return ct; } static void update_event_count(struct perf_event *event, u64 now) { s64 prev; prev = local64_xchg(&event->hw.prev_count, now); local64_add(now - prev, &event->count); } static void h_24x7_event_read(struct perf_event *event) { u64 now; struct hv_24x7_request_buffer *request_buffer; struct hv_24x7_hw *h24x7hw; int txn_flags; txn_flags = __this_cpu_read(hv_24x7_txn_flags); /* * If in a READ transaction, add this counter to the list of * counters to read during the next HCALL (i.e commit_txn()). * If not in a READ transaction, go ahead and make the HCALL * to read this counter by itself. */ if (txn_flags & PERF_PMU_TXN_READ) { int i; int ret; if (__this_cpu_read(hv_24x7_txn_err)) return; request_buffer = (void *)get_cpu_var(hv_24x7_reqb); ret = add_event_to_24x7_request(event, request_buffer); if (ret) { __this_cpu_write(hv_24x7_txn_err, ret); } else { /* * Associate the event with the HCALL request index, * so ->commit_txn() can quickly find/update count. */ i = request_buffer->num_requests - 1; h24x7hw = &get_cpu_var(hv_24x7_hw); h24x7hw->events[i] = event; put_cpu_var(h24x7hw); } put_cpu_var(hv_24x7_reqb); } else { now = h_24x7_get_value(event); update_event_count(event, now); } } static void h_24x7_event_start(struct perf_event *event, int flags) { if (flags & PERF_EF_RELOAD) local64_set(&event->hw.prev_count, h_24x7_get_value(event)); } static void h_24x7_event_stop(struct perf_event *event, int flags) { h_24x7_event_read(event); } static int h_24x7_event_add(struct perf_event *event, int flags) { if (flags & PERF_EF_START) h_24x7_event_start(event, flags); return 0; } /* * 24x7 counters only support READ transactions. They are * always counting and dont need/support ADD transactions. * Cache the flags, but otherwise ignore transactions that * are not PERF_PMU_TXN_READ. */ static void h_24x7_event_start_txn(struct pmu *pmu, unsigned int flags) { struct hv_24x7_request_buffer *request_buffer; struct hv_24x7_data_result_buffer *result_buffer; /* We should not be called if we are already in a txn */ WARN_ON_ONCE(__this_cpu_read(hv_24x7_txn_flags)); __this_cpu_write(hv_24x7_txn_flags, flags); if (flags & ~PERF_PMU_TXN_READ) return; request_buffer = (void *)get_cpu_var(hv_24x7_reqb); result_buffer = (void *)get_cpu_var(hv_24x7_resb); init_24x7_request(request_buffer, result_buffer); put_cpu_var(hv_24x7_resb); put_cpu_var(hv_24x7_reqb); } /* * Clean up transaction state. * * NOTE: Ignore state of request and result buffers for now. * We will initialize them during the next read/txn. */ static void reset_txn(void) { __this_cpu_write(hv_24x7_txn_flags, 0); __this_cpu_write(hv_24x7_txn_err, 0); } /* * 24x7 counters only support READ transactions. They are always counting * and dont need/support ADD transactions. Clear ->txn_flags but otherwise * ignore transactions that are not of type PERF_PMU_TXN_READ. * * For READ transactions, submit all pending 24x7 requests (i.e requests * that were queued by h_24x7_event_read()), to the hypervisor and update * the event counts. */ static int h_24x7_event_commit_txn(struct pmu *pmu) { struct hv_24x7_request_buffer *request_buffer; struct hv_24x7_data_result_buffer *result_buffer; struct hv_24x7_result *res, *next_res; u64 count; int i, ret, txn_flags; struct hv_24x7_hw *h24x7hw; txn_flags = __this_cpu_read(hv_24x7_txn_flags); WARN_ON_ONCE(!txn_flags); ret = 0; if (txn_flags & ~PERF_PMU_TXN_READ) goto out; ret = __this_cpu_read(hv_24x7_txn_err); if (ret) goto out; request_buffer = (void *)get_cpu_var(hv_24x7_reqb); result_buffer = (void *)get_cpu_var(hv_24x7_resb); ret = make_24x7_request(request_buffer, result_buffer); if (ret) goto put_reqb; h24x7hw = &get_cpu_var(hv_24x7_hw); /* Go through results in the result buffer to update event counts. */ for (i = 0, res = result_buffer->results; i < result_buffer->num_results; i++, res = next_res) { struct perf_event *event = h24x7hw->events[res->result_ix]; ret = get_count_from_result(event, result_buffer, res, &count, &next_res); if (ret) break; update_event_count(event, count); } put_cpu_var(hv_24x7_hw); put_reqb: put_cpu_var(hv_24x7_resb); put_cpu_var(hv_24x7_reqb); out: reset_txn(); return ret; } /* * 24x7 counters only support READ transactions. They are always counting * and dont need/support ADD transactions. However, regardless of type * of transaction, all we need to do is cleanup, so we don't have to check * the type of transaction. */ static void h_24x7_event_cancel_txn(struct pmu *pmu) { WARN_ON_ONCE(!__this_cpu_read(hv_24x7_txn_flags)); reset_txn(); } static struct pmu h_24x7_pmu = { .task_ctx_nr = perf_invalid_context, .name = "hv_24x7", .attr_groups = attr_groups, .event_init = h_24x7_event_init, .add = h_24x7_event_add, .del = h_24x7_event_stop, .start = h_24x7_event_start, .stop = h_24x7_event_stop, .read = h_24x7_event_read, .start_txn = h_24x7_event_start_txn, .commit_txn = h_24x7_event_commit_txn, .cancel_txn = h_24x7_event_cancel_txn, .capabilities = PERF_PMU_CAP_NO_EXCLUDE, }; static int ppc_hv_24x7_cpu_online(unsigned int cpu) { if (cpumask_empty(&hv_24x7_cpumask)) cpumask_set_cpu(cpu, &hv_24x7_cpumask); return 0; } static int ppc_hv_24x7_cpu_offline(unsigned int cpu) { int target; /* Check if exiting cpu is used for collecting 24x7 events */ if (!cpumask_test_and_clear_cpu(cpu, &hv_24x7_cpumask)) return 0; /* Find a new cpu to collect 24x7 events */ target = cpumask_last(cpu_active_mask); if (target < 0 || target >= nr_cpu_ids) { pr_err("hv_24x7: CPU hotplug init failed\n"); return -1; } /* Migrate 24x7 events to the new target */ cpumask_set_cpu(target, &hv_24x7_cpumask); perf_pmu_migrate_context(&h_24x7_pmu, cpu, target); return 0; } static int hv_24x7_cpu_hotplug_init(void) { return cpuhp_setup_state(CPUHP_AP_PERF_POWERPC_HV_24x7_ONLINE, "perf/powerpc/hv_24x7:online", ppc_hv_24x7_cpu_online, ppc_hv_24x7_cpu_offline); } static int hv_24x7_init(void) { int r; unsigned long hret; unsigned int pvr = mfspr(SPRN_PVR); struct hv_perf_caps caps; if (!firmware_has_feature(FW_FEATURE_LPAR)) { pr_debug("not a virtualized system, not enabling\n"); return -ENODEV; } /* POWER8 only supports v1, while POWER9 only supports v2. */ if (PVR_VER(pvr) == PVR_POWER8 || PVR_VER(pvr) == PVR_POWER8E || PVR_VER(pvr) == PVR_POWER8NVL) interface_version = 1; else { interface_version = 2; /* SMT8 in POWER9 needs to aggregate result elements. */ if (threads_per_core == 8) aggregate_result_elements = true; } hret = hv_perf_caps_get(&caps); if (hret) { pr_debug("could not obtain capabilities, not enabling, rc=%ld\n", hret); return -ENODEV; } hv_page_cache = kmem_cache_create("hv-page-4096", 4096, 4096, 0, NULL); if (!hv_page_cache) return -ENOMEM; /* sampling not supported */ h_24x7_pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT; r = create_events_from_catalog(&event_group.attrs, &event_desc_group.attrs, &event_long_desc_group.attrs); if (r) return r; /* init cpuhotplug */ r = hv_24x7_cpu_hotplug_init(); if (r) return r; r = perf_pmu_register(&h_24x7_pmu, h_24x7_pmu.name, -1); if (r) return r; read_24x7_sys_info(); return 0; } device_initcall(hv_24x7_init);
linux-master
arch/powerpc/perf/hv-24x7.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Performance counter support for e500 family processors. * * Copyright 2008-2009 Paul Mackerras, IBM Corporation. * Copyright 2010 Freescale Semiconductor, Inc. */ #include <linux/string.h> #include <linux/perf_event.h> #include <asm/reg.h> #include <asm/cputable.h> /* * Map of generic hardware event types to hardware events * Zero if unsupported */ static int e500_generic_events[] = { [PERF_COUNT_HW_CPU_CYCLES] = 1, [PERF_COUNT_HW_INSTRUCTIONS] = 2, [PERF_COUNT_HW_CACHE_MISSES] = 41, /* Data L1 cache reloads */ [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 12, [PERF_COUNT_HW_BRANCH_MISSES] = 15, [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 18, [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = 19, }; #define C(x) PERF_COUNT_HW_CACHE_##x /* * Table of generalized cache-related events. * 0 means not supported, -1 means nonsensical, other values * are event codes. */ static int e500_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = { /* * D-cache misses are not split into read/write/prefetch; * use raw event 41. */ [C(L1D)] = { /* RESULT_ACCESS RESULT_MISS */ [C(OP_READ)] = { 27, 0 }, [C(OP_WRITE)] = { 28, 0 }, [C(OP_PREFETCH)] = { 29, 0 }, }, [C(L1I)] = { /* RESULT_ACCESS RESULT_MISS */ [C(OP_READ)] = { 2, 60 }, [C(OP_WRITE)] = { -1, -1 }, [C(OP_PREFETCH)] = { 0, 0 }, }, /* * Assuming LL means L2, it's not a good match for this model. * It allocates only on L1 castout or explicit prefetch, and * does not have separate read/write events (but it does have * separate instruction/data events). */ [C(LL)] = { /* RESULT_ACCESS RESULT_MISS */ [C(OP_READ)] = { 0, 0 }, [C(OP_WRITE)] = { 0, 0 }, [C(OP_PREFETCH)] = { 0, 0 }, }, /* * There are data/instruction MMU misses, but that's a miss on * the chip's internal level-one TLB which is probably not * what the user wants. Instead, unified level-two TLB misses * are reported here. */ [C(DTLB)] = { /* RESULT_ACCESS RESULT_MISS */ [C(OP_READ)] = { 26, 66 }, [C(OP_WRITE)] = { -1, -1 }, [C(OP_PREFETCH)] = { -1, -1 }, }, [C(BPU)] = { /* RESULT_ACCESS RESULT_MISS */ [C(OP_READ)] = { 12, 15 }, [C(OP_WRITE)] = { -1, -1 }, [C(OP_PREFETCH)] = { -1, -1 }, }, [C(NODE)] = { /* RESULT_ACCESS RESULT_MISS */ [C(OP_READ)] = { -1, -1 }, [C(OP_WRITE)] = { -1, -1 }, [C(OP_PREFETCH)] = { -1, -1 }, }, }; static int num_events = 128; /* Upper half of event id is PMLCb, for threshold events */ static u64 e500_xlate_event(u64 event_id) { u32 event_low = (u32)event_id; u64 ret; if (event_low >= num_events) return 0; ret = FSL_EMB_EVENT_VALID; if (event_low >= 76 && event_low <= 81) { ret |= FSL_EMB_EVENT_RESTRICTED; ret |= event_id & (FSL_EMB_EVENT_THRESHMUL | FSL_EMB_EVENT_THRESH); } else if (event_id & (FSL_EMB_EVENT_THRESHMUL | FSL_EMB_EVENT_THRESH)) { /* Threshold requested on non-threshold event */ return 0; } return ret; } static struct fsl_emb_pmu e500_pmu = { .name = "e500 family", .n_counter = 4, .n_restricted = 2, .xlate_event = e500_xlate_event, .n_generic = ARRAY_SIZE(e500_generic_events), .generic_events = e500_generic_events, .cache_events = &e500_cache_events, }; static int init_e500_pmu(void) { unsigned int pvr = mfspr(SPRN_PVR); /* ec500mc */ if (PVR_VER(pvr) == PVR_VER_E500MC || PVR_VER(pvr) == PVR_VER_E5500) num_events = 256; /* e500 */ else if (PVR_VER(pvr) != PVR_VER_E500V1 && PVR_VER(pvr) != PVR_VER_E500V2) return -ENODEV; return register_fsl_emb_pmu(&e500_pmu); } early_initcall(init_e500_pmu);
linux-master
arch/powerpc/perf/e500-pmu.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Performance counter support for POWER6 processors. * * Copyright 2008-2009 Paul Mackerras, IBM Corporation. */ #include <linux/kernel.h> #include <linux/perf_event.h> #include <linux/string.h> #include <asm/reg.h> #include <asm/cputable.h> #include "internal.h" /* * Bits in event code for POWER6 */ #define PM_PMC_SH 20 /* PMC number (1-based) for direct events */ #define PM_PMC_MSK 0x7 #define PM_PMC_MSKS (PM_PMC_MSK << PM_PMC_SH) #define PM_UNIT_SH 16 /* Unit event comes (TTMxSEL encoding) */ #define PM_UNIT_MSK 0xf #define PM_UNIT_MSKS (PM_UNIT_MSK << PM_UNIT_SH) #define PM_LLAV 0x8000 /* Load lookahead match value */ #define PM_LLA 0x4000 /* Load lookahead match enable */ #define PM_BYTE_SH 12 /* Byte of event bus to use */ #define PM_BYTE_MSK 3 #define PM_SUBUNIT_SH 8 /* Subunit event comes from (NEST_SEL enc.) */ #define PM_SUBUNIT_MSK 7 #define PM_SUBUNIT_MSKS (PM_SUBUNIT_MSK << PM_SUBUNIT_SH) #define PM_PMCSEL_MSK 0xff /* PMCxSEL value */ #define PM_BUSEVENT_MSK 0xf3700 /* * Bits in MMCR1 for POWER6 */ #define MMCR1_TTM0SEL_SH 60 #define MMCR1_TTMSEL_SH(n) (MMCR1_TTM0SEL_SH - (n) * 4) #define MMCR1_TTMSEL_MSK 0xf #define MMCR1_TTMSEL(m, n) (((m) >> MMCR1_TTMSEL_SH(n)) & MMCR1_TTMSEL_MSK) #define MMCR1_NESTSEL_SH 45 #define MMCR1_NESTSEL_MSK 0x7 #define MMCR1_NESTSEL(m) (((m) >> MMCR1_NESTSEL_SH) & MMCR1_NESTSEL_MSK) #define MMCR1_PMC1_LLA (1ul << 44) #define MMCR1_PMC1_LLA_VALUE (1ul << 39) #define MMCR1_PMC1_ADDR_SEL (1ul << 35) #define MMCR1_PMC1SEL_SH 24 #define MMCR1_PMCSEL_SH(n) (MMCR1_PMC1SEL_SH - (n) * 8) #define MMCR1_PMCSEL_MSK 0xff /* * Map of which direct events on which PMCs are marked instruction events. * Indexed by PMCSEL value >> 1. * Bottom 4 bits are a map of which PMCs are interesting, * top 4 bits say what sort of event: * 0 = direct marked event, * 1 = byte decode event, * 4 = add/and event (PMC1 -> bits 0 & 4), * 5 = add/and event (PMC1 -> bits 1 & 5), * 6 = add/and event (PMC1 -> bits 2 & 6), * 7 = add/and event (PMC1 -> bits 3 & 7). */ static unsigned char direct_event_is_marked[0x60 >> 1] = { 0, /* 00 */ 0, /* 02 */ 0, /* 04 */ 0x07, /* 06 PM_MRK_ST_CMPL, PM_MRK_ST_GPS, PM_MRK_ST_CMPL_INT */ 0x04, /* 08 PM_MRK_DFU_FIN */ 0x06, /* 0a PM_MRK_IFU_FIN, PM_MRK_INST_FIN */ 0, /* 0c */ 0, /* 0e */ 0x02, /* 10 PM_MRK_INST_DISP */ 0x08, /* 12 PM_MRK_LSU_DERAT_MISS */ 0, /* 14 */ 0, /* 16 */ 0x0c, /* 18 PM_THRESH_TIMEO, PM_MRK_INST_FIN */ 0x0f, /* 1a PM_MRK_INST_DISP, PM_MRK_{FXU,FPU,LSU}_FIN */ 0x01, /* 1c PM_MRK_INST_ISSUED */ 0, /* 1e */ 0, /* 20 */ 0, /* 22 */ 0, /* 24 */ 0, /* 26 */ 0x15, /* 28 PM_MRK_DATA_FROM_L2MISS, PM_MRK_DATA_FROM_L3MISS */ 0, /* 2a */ 0, /* 2c */ 0, /* 2e */ 0x4f, /* 30 */ 0x7f, /* 32 */ 0x4f, /* 34 */ 0x5f, /* 36 */ 0x6f, /* 38 */ 0x4f, /* 3a */ 0, /* 3c */ 0x08, /* 3e PM_MRK_INST_TIMEO */ 0x1f, /* 40 */ 0x1f, /* 42 */ 0x1f, /* 44 */ 0x1f, /* 46 */ 0x1f, /* 48 */ 0x1f, /* 4a */ 0x1f, /* 4c */ 0x1f, /* 4e */ 0, /* 50 */ 0x05, /* 52 PM_MRK_BR_TAKEN, PM_MRK_BR_MPRED */ 0x1c, /* 54 PM_MRK_PTEG_FROM_L3MISS, PM_MRK_PTEG_FROM_L2MISS */ 0x02, /* 56 PM_MRK_LD_MISS_L1 */ 0, /* 58 */ 0, /* 5a */ 0, /* 5c */ 0, /* 5e */ }; /* * Masks showing for each unit which bits are marked events. * These masks are in LE order, i.e. 0x00000001 is byte 0, bit 0. */ static u32 marked_bus_events[16] = { 0x01000000, /* direct events set 1: byte 3 bit 0 */ 0x00010000, /* direct events set 2: byte 2 bit 0 */ 0, 0, 0, 0, /* IDU, IFU, nest: nothing */ 0x00000088, /* VMX set 1: byte 0 bits 3, 7 */ 0x000000c0, /* VMX set 2: byte 0 bits 4-7 */ 0x04010000, /* LSU set 1: byte 2 bit 0, byte 3 bit 2 */ 0xff010000u, /* LSU set 2: byte 2 bit 0, all of byte 3 */ 0, /* LSU set 3 */ 0x00000010, /* VMX set 3: byte 0 bit 4 */ 0, /* BFP set 1 */ 0x00000022, /* BFP set 2: byte 0 bits 1, 5 */ 0, 0 }; /* * Returns 1 if event counts things relating to marked instructions * and thus needs the MMCRA_SAMPLE_ENABLE bit set, or 0 if not. */ static int power6_marked_instr_event(u64 event) { int pmc, psel, ptype; int bit, byte, unit; u32 mask; pmc = (event >> PM_PMC_SH) & PM_PMC_MSK; psel = (event & PM_PMCSEL_MSK) >> 1; /* drop edge/level bit */ if (pmc >= 5) return 0; bit = -1; if (psel < sizeof(direct_event_is_marked)) { ptype = direct_event_is_marked[psel]; if (pmc == 0 || !(ptype & (1 << (pmc - 1)))) return 0; ptype >>= 4; if (ptype == 0) return 1; if (ptype == 1) bit = 0; else bit = ptype ^ (pmc - 1); } else if ((psel & 0x48) == 0x40) bit = psel & 7; if (!(event & PM_BUSEVENT_MSK) || bit == -1) return 0; byte = (event >> PM_BYTE_SH) & PM_BYTE_MSK; unit = (event >> PM_UNIT_SH) & PM_UNIT_MSK; mask = marked_bus_events[unit]; return (mask >> (byte * 8 + bit)) & 1; } /* * Assign PMC numbers and compute MMCR1 value for a set of events */ static int p6_compute_mmcr(u64 event[], int n_ev, unsigned int hwc[], struct mmcr_regs *mmcr, struct perf_event *pevents[], u32 flags __maybe_unused) { unsigned long mmcr1 = 0; unsigned long mmcra = MMCRA_SDAR_DCACHE_MISS | MMCRA_SDAR_ERAT_MISS; int i; unsigned int pmc, ev, b, u, s, psel; unsigned int ttmset = 0; unsigned int pmc_inuse = 0; if (n_ev > 6) return -1; for (i = 0; i < n_ev; ++i) { pmc = (event[i] >> PM_PMC_SH) & PM_PMC_MSK; if (pmc) { if (pmc_inuse & (1 << (pmc - 1))) return -1; /* collision! */ pmc_inuse |= 1 << (pmc - 1); } } for (i = 0; i < n_ev; ++i) { ev = event[i]; pmc = (ev >> PM_PMC_SH) & PM_PMC_MSK; if (pmc) { --pmc; } else { /* can go on any PMC; find a free one */ for (pmc = 0; pmc < 4; ++pmc) if (!(pmc_inuse & (1 << pmc))) break; if (pmc >= 4) return -1; pmc_inuse |= 1 << pmc; } hwc[i] = pmc; psel = ev & PM_PMCSEL_MSK; if (ev & PM_BUSEVENT_MSK) { /* this event uses the event bus */ b = (ev >> PM_BYTE_SH) & PM_BYTE_MSK; u = (ev >> PM_UNIT_SH) & PM_UNIT_MSK; /* check for conflict on this byte of event bus */ if ((ttmset & (1 << b)) && MMCR1_TTMSEL(mmcr1, b) != u) return -1; mmcr1 |= (unsigned long)u << MMCR1_TTMSEL_SH(b); ttmset |= 1 << b; if (u == 5) { /* Nest events have a further mux */ s = (ev >> PM_SUBUNIT_SH) & PM_SUBUNIT_MSK; if ((ttmset & 0x10) && MMCR1_NESTSEL(mmcr1) != s) return -1; ttmset |= 0x10; mmcr1 |= (unsigned long)s << MMCR1_NESTSEL_SH; } if (0x30 <= psel && psel <= 0x3d) { /* these need the PMCx_ADDR_SEL bits */ if (b >= 2) mmcr1 |= MMCR1_PMC1_ADDR_SEL >> pmc; } /* bus select values are different for PMC3/4 */ if (pmc >= 2 && (psel & 0x90) == 0x80) psel ^= 0x20; } if (ev & PM_LLA) { mmcr1 |= MMCR1_PMC1_LLA >> pmc; if (ev & PM_LLAV) mmcr1 |= MMCR1_PMC1_LLA_VALUE >> pmc; } if (power6_marked_instr_event(event[i])) mmcra |= MMCRA_SAMPLE_ENABLE; if (pmc < 4) mmcr1 |= (unsigned long)psel << MMCR1_PMCSEL_SH(pmc); } mmcr->mmcr0 = 0; if (pmc_inuse & 1) mmcr->mmcr0 = MMCR0_PMC1CE; if (pmc_inuse & 0xe) mmcr->mmcr0 |= MMCR0_PMCjCE; mmcr->mmcr1 = mmcr1; mmcr->mmcra = mmcra; return 0; } /* * Layout of constraint bits: * * 0-1 add field: number of uses of PMC1 (max 1) * 2-3, 4-5, 6-7, 8-9, 10-11: ditto for PMC2, 3, 4, 5, 6 * 12-15 add field: number of uses of PMC1-4 (max 4) * 16-19 select field: unit on byte 0 of event bus * 20-23, 24-27, 28-31 ditto for bytes 1, 2, 3 * 32-34 select field: nest (subunit) event selector */ static int p6_get_constraint(u64 event, unsigned long *maskp, unsigned long *valp, u64 event_config1 __maybe_unused) { int pmc, byte, sh, subunit; unsigned long mask = 0, value = 0; pmc = (event >> PM_PMC_SH) & PM_PMC_MSK; if (pmc) { if (pmc > 4 && !(event == 0x500009 || event == 0x600005)) return -1; sh = (pmc - 1) * 2; mask |= 2 << sh; value |= 1 << sh; } if (event & PM_BUSEVENT_MSK) { byte = (event >> PM_BYTE_SH) & PM_BYTE_MSK; sh = byte * 4 + (16 - PM_UNIT_SH); mask |= PM_UNIT_MSKS << sh; value |= (unsigned long)(event & PM_UNIT_MSKS) << sh; if ((event & PM_UNIT_MSKS) == (5 << PM_UNIT_SH)) { subunit = (event >> PM_SUBUNIT_SH) & PM_SUBUNIT_MSK; mask |= (unsigned long)PM_SUBUNIT_MSK << 32; value |= (unsigned long)subunit << 32; } } if (pmc <= 4) { mask |= 0x8000; /* add field for count of PMC1-4 uses */ value |= 0x1000; } *maskp = mask; *valp = value; return 0; } static int p6_limited_pmc_event(u64 event) { int pmc = (event >> PM_PMC_SH) & PM_PMC_MSK; return pmc == 5 || pmc == 6; } #define MAX_ALT 4 /* at most 4 alternatives for any event */ static const unsigned int event_alternatives[][MAX_ALT] = { { 0x0130e8, 0x2000f6, 0x3000fc }, /* PM_PTEG_RELOAD_VALID */ { 0x080080, 0x10000d, 0x30000c, 0x4000f0 }, /* PM_LD_MISS_L1 */ { 0x080088, 0x200054, 0x3000f0 }, /* PM_ST_MISS_L1 */ { 0x10000a, 0x2000f4, 0x600005 }, /* PM_RUN_CYC */ { 0x10000b, 0x2000f5 }, /* PM_RUN_COUNT */ { 0x10000e, 0x400010 }, /* PM_PURR */ { 0x100010, 0x4000f8 }, /* PM_FLUSH */ { 0x10001a, 0x200010 }, /* PM_MRK_INST_DISP */ { 0x100026, 0x3000f8 }, /* PM_TB_BIT_TRANS */ { 0x100054, 0x2000f0 }, /* PM_ST_FIN */ { 0x100056, 0x2000fc }, /* PM_L1_ICACHE_MISS */ { 0x1000f0, 0x40000a }, /* PM_INST_IMC_MATCH_CMPL */ { 0x1000f8, 0x200008 }, /* PM_GCT_EMPTY_CYC */ { 0x1000fc, 0x400006 }, /* PM_LSU_DERAT_MISS_CYC */ { 0x20000e, 0x400007 }, /* PM_LSU_DERAT_MISS */ { 0x200012, 0x300012 }, /* PM_INST_DISP */ { 0x2000f2, 0x3000f2 }, /* PM_INST_DISP */ { 0x2000f8, 0x300010 }, /* PM_EXT_INT */ { 0x2000fe, 0x300056 }, /* PM_DATA_FROM_L2MISS */ { 0x2d0030, 0x30001a }, /* PM_MRK_FPU_FIN */ { 0x30000a, 0x400018 }, /* PM_MRK_INST_FIN */ { 0x3000f6, 0x40000e }, /* PM_L1_DCACHE_RELOAD_VALID */ { 0x3000fe, 0x400056 }, /* PM_DATA_FROM_L3MISS */ }; /* * This could be made more efficient with a binary search on * a presorted list, if necessary */ static int find_alternatives_list(u64 event) { int i, j; unsigned int alt; for (i = 0; i < ARRAY_SIZE(event_alternatives); ++i) { if (event < event_alternatives[i][0]) return -1; for (j = 0; j < MAX_ALT; ++j) { alt = event_alternatives[i][j]; if (!alt || event < alt) break; if (event == alt) return i; } } return -1; } static int p6_get_alternatives(u64 event, unsigned int flags, u64 alt[]) { int i, j, nlim; unsigned int psel, pmc; unsigned int nalt = 1; u64 aevent; alt[0] = event; nlim = p6_limited_pmc_event(event); /* check the alternatives table */ i = find_alternatives_list(event); if (i >= 0) { /* copy out alternatives from list */ for (j = 0; j < MAX_ALT; ++j) { aevent = event_alternatives[i][j]; if (!aevent) break; if (aevent != event) alt[nalt++] = aevent; nlim += p6_limited_pmc_event(aevent); } } else { /* Check for alternative ways of computing sum events */ /* PMCSEL 0x32 counter N == PMCSEL 0x34 counter 5-N */ psel = event & (PM_PMCSEL_MSK & ~1); /* ignore edge bit */ pmc = (event >> PM_PMC_SH) & PM_PMC_MSK; if (pmc && (psel == 0x32 || psel == 0x34)) alt[nalt++] = ((event ^ 0x6) & ~PM_PMC_MSKS) | ((5 - pmc) << PM_PMC_SH); /* PMCSEL 0x38 counter N == PMCSEL 0x3a counter N+/-2 */ if (pmc && (psel == 0x38 || psel == 0x3a)) alt[nalt++] = ((event ^ 0x2) & ~PM_PMC_MSKS) | ((pmc > 2? pmc - 2: pmc + 2) << PM_PMC_SH); } if (flags & PPMU_ONLY_COUNT_RUN) { /* * We're only counting in RUN state, * so PM_CYC is equivalent to PM_RUN_CYC, * PM_INST_CMPL === PM_RUN_INST_CMPL, PM_PURR === PM_RUN_PURR. * This doesn't include alternatives that don't provide * any extra flexibility in assigning PMCs (e.g. * 0x10000a for PM_RUN_CYC vs. 0x1e for PM_CYC). * Note that even with these additional alternatives * we never end up with more than 4 alternatives for any event. */ j = nalt; for (i = 0; i < nalt; ++i) { switch (alt[i]) { case 0x1e: /* PM_CYC */ alt[j++] = 0x600005; /* PM_RUN_CYC */ ++nlim; break; case 0x10000a: /* PM_RUN_CYC */ alt[j++] = 0x1e; /* PM_CYC */ break; case 2: /* PM_INST_CMPL */ alt[j++] = 0x500009; /* PM_RUN_INST_CMPL */ ++nlim; break; case 0x500009: /* PM_RUN_INST_CMPL */ alt[j++] = 2; /* PM_INST_CMPL */ break; case 0x10000e: /* PM_PURR */ alt[j++] = 0x4000f4; /* PM_RUN_PURR */ break; case 0x4000f4: /* PM_RUN_PURR */ alt[j++] = 0x10000e; /* PM_PURR */ break; } } nalt = j; } if (!(flags & PPMU_LIMITED_PMC_OK) && nlim) { /* remove the limited PMC events */ j = 0; for (i = 0; i < nalt; ++i) { if (!p6_limited_pmc_event(alt[i])) { alt[j] = alt[i]; ++j; } } nalt = j; } else if ((flags & PPMU_LIMITED_PMC_REQD) && nlim < nalt) { /* remove all but the limited PMC events */ j = 0; for (i = 0; i < nalt; ++i) { if (p6_limited_pmc_event(alt[i])) { alt[j] = alt[i]; ++j; } } nalt = j; } return nalt; } static void p6_disable_pmc(unsigned int pmc, struct mmcr_regs *mmcr) { /* Set PMCxSEL to 0 to disable PMCx */ if (pmc <= 3) mmcr->mmcr1 &= ~(0xffUL << MMCR1_PMCSEL_SH(pmc)); } static int power6_generic_events[] = { [PERF_COUNT_HW_CPU_CYCLES] = 0x1e, [PERF_COUNT_HW_INSTRUCTIONS] = 2, [PERF_COUNT_HW_CACHE_REFERENCES] = 0x280030, /* LD_REF_L1 */ [PERF_COUNT_HW_CACHE_MISSES] = 0x30000c, /* LD_MISS_L1 */ [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x410a0, /* BR_PRED */ [PERF_COUNT_HW_BRANCH_MISSES] = 0x400052, /* BR_MPRED */ }; #define C(x) PERF_COUNT_HW_CACHE_##x /* * Table of generalized cache-related events. * 0 means not supported, -1 means nonsensical, other values * are event codes. * The "DTLB" and "ITLB" events relate to the DERAT and IERAT. */ static u64 power6_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = { [C(L1D)] = { /* RESULT_ACCESS RESULT_MISS */ [C(OP_READ)] = { 0x280030, 0x80080 }, [C(OP_WRITE)] = { 0x180032, 0x80088 }, [C(OP_PREFETCH)] = { 0x810a4, 0 }, }, [C(L1I)] = { /* RESULT_ACCESS RESULT_MISS */ [C(OP_READ)] = { 0, 0x100056 }, [C(OP_WRITE)] = { -1, -1 }, [C(OP_PREFETCH)] = { 0x4008c, 0 }, }, [C(LL)] = { /* RESULT_ACCESS RESULT_MISS */ [C(OP_READ)] = { 0x150730, 0x250532 }, [C(OP_WRITE)] = { 0x250432, 0x150432 }, [C(OP_PREFETCH)] = { 0x810a6, 0 }, }, [C(DTLB)] = { /* RESULT_ACCESS RESULT_MISS */ [C(OP_READ)] = { 0, 0x20000e }, [C(OP_WRITE)] = { -1, -1 }, [C(OP_PREFETCH)] = { -1, -1 }, }, [C(ITLB)] = { /* RESULT_ACCESS RESULT_MISS */ [C(OP_READ)] = { 0, 0x420ce }, [C(OP_WRITE)] = { -1, -1 }, [C(OP_PREFETCH)] = { -1, -1 }, }, [C(BPU)] = { /* RESULT_ACCESS RESULT_MISS */ [C(OP_READ)] = { 0x430e6, 0x400052 }, [C(OP_WRITE)] = { -1, -1 }, [C(OP_PREFETCH)] = { -1, -1 }, }, [C(NODE)] = { /* RESULT_ACCESS RESULT_MISS */ [C(OP_READ)] = { -1, -1 }, [C(OP_WRITE)] = { -1, -1 }, [C(OP_PREFETCH)] = { -1, -1 }, }, }; static struct power_pmu power6_pmu = { .name = "POWER6", .n_counter = 6, .max_alternatives = MAX_ALT, .add_fields = 0x1555, .test_adder = 0x3000, .compute_mmcr = p6_compute_mmcr, .get_constraint = p6_get_constraint, .get_alternatives = p6_get_alternatives, .disable_pmc = p6_disable_pmc, .limited_pmc_event = p6_limited_pmc_event, .flags = PPMU_LIMITED_PMC5_6 | PPMU_ALT_SIPR, .n_generic = ARRAY_SIZE(power6_generic_events), .generic_events = power6_generic_events, .cache_events = &power6_cache_events, }; int __init init_power6_pmu(void) { unsigned int pvr = mfspr(SPRN_PVR); if (PVR_VER(pvr) != PVR_POWER6) return -ENODEV; return register_power_pmu(&power6_pmu); }
linux-master
arch/powerpc/perf/power6-pmu.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Performance event support - Freescale Embedded Performance Monitor * * Copyright 2008-2009 Paul Mackerras, IBM Corporation. * Copyright 2010 Freescale Semiconductor, Inc. */ #include <linux/kernel.h> #include <linux/sched.h> #include <linux/perf_event.h> #include <linux/percpu.h> #include <linux/hardirq.h> #include <asm/reg_fsl_emb.h> #include <asm/pmc.h> #include <asm/machdep.h> #include <asm/firmware.h> #include <asm/ptrace.h> struct cpu_hw_events { int n_events; int disabled; u8 pmcs_enabled; struct perf_event *event[MAX_HWEVENTS]; }; static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events); static struct fsl_emb_pmu *ppmu; /* Number of perf_events counting hardware events */ static atomic_t num_events; /* Used to avoid races in calling reserve/release_pmc_hardware */ static DEFINE_MUTEX(pmc_reserve_mutex); static void perf_event_interrupt(struct pt_regs *regs); /* * Read one performance monitor counter (PMC). */ static unsigned long read_pmc(int idx) { unsigned long val; switch (idx) { case 0: val = mfpmr(PMRN_PMC0); break; case 1: val = mfpmr(PMRN_PMC1); break; case 2: val = mfpmr(PMRN_PMC2); break; case 3: val = mfpmr(PMRN_PMC3); break; case 4: val = mfpmr(PMRN_PMC4); break; case 5: val = mfpmr(PMRN_PMC5); break; default: printk(KERN_ERR "oops trying to read PMC%d\n", idx); val = 0; } return val; } /* * Write one PMC. */ static void write_pmc(int idx, unsigned long val) { switch (idx) { case 0: mtpmr(PMRN_PMC0, val); break; case 1: mtpmr(PMRN_PMC1, val); break; case 2: mtpmr(PMRN_PMC2, val); break; case 3: mtpmr(PMRN_PMC3, val); break; case 4: mtpmr(PMRN_PMC4, val); break; case 5: mtpmr(PMRN_PMC5, val); break; default: printk(KERN_ERR "oops trying to write PMC%d\n", idx); } isync(); } /* * Write one local control A register */ static void write_pmlca(int idx, unsigned long val) { switch (idx) { case 0: mtpmr(PMRN_PMLCA0, val); break; case 1: mtpmr(PMRN_PMLCA1, val); break; case 2: mtpmr(PMRN_PMLCA2, val); break; case 3: mtpmr(PMRN_PMLCA3, val); break; case 4: mtpmr(PMRN_PMLCA4, val); break; case 5: mtpmr(PMRN_PMLCA5, val); break; default: printk(KERN_ERR "oops trying to write PMLCA%d\n", idx); } isync(); } /* * Write one local control B register */ static void write_pmlcb(int idx, unsigned long val) { switch (idx) { case 0: mtpmr(PMRN_PMLCB0, val); break; case 1: mtpmr(PMRN_PMLCB1, val); break; case 2: mtpmr(PMRN_PMLCB2, val); break; case 3: mtpmr(PMRN_PMLCB3, val); break; case 4: mtpmr(PMRN_PMLCB4, val); break; case 5: mtpmr(PMRN_PMLCB5, val); break; default: printk(KERN_ERR "oops trying to write PMLCB%d\n", idx); } isync(); } static void fsl_emb_pmu_read(struct perf_event *event) { s64 val, delta, prev; if (event->hw.state & PERF_HES_STOPPED) return; /* * Performance monitor interrupts come even when interrupts * are soft-disabled, as long as interrupts are hard-enabled. * Therefore we treat them like NMIs. */ do { prev = local64_read(&event->hw.prev_count); barrier(); val = read_pmc(event->hw.idx); } while (local64_cmpxchg(&event->hw.prev_count, prev, val) != prev); /* The counters are only 32 bits wide */ delta = (val - prev) & 0xfffffffful; local64_add(delta, &event->count); local64_sub(delta, &event->hw.period_left); } /* * Disable all events to prevent PMU interrupts and to allow * events to be added or removed. */ static void fsl_emb_pmu_disable(struct pmu *pmu) { struct cpu_hw_events *cpuhw; unsigned long flags; local_irq_save(flags); cpuhw = this_cpu_ptr(&cpu_hw_events); if (!cpuhw->disabled) { cpuhw->disabled = 1; /* * Check if we ever enabled the PMU on this cpu. */ if (!cpuhw->pmcs_enabled) { ppc_enable_pmcs(); cpuhw->pmcs_enabled = 1; } if (atomic_read(&num_events)) { /* * Set the 'freeze all counters' bit, and disable * interrupts. The barrier is to make sure the * mtpmr has been executed and the PMU has frozen * the events before we return. */ mtpmr(PMRN_PMGC0, PMGC0_FAC); isync(); } } local_irq_restore(flags); } /* * Re-enable all events if disable == 0. * If we were previously disabled and events were added, then * put the new config on the PMU. */ static void fsl_emb_pmu_enable(struct pmu *pmu) { struct cpu_hw_events *cpuhw; unsigned long flags; local_irq_save(flags); cpuhw = this_cpu_ptr(&cpu_hw_events); if (!cpuhw->disabled) goto out; cpuhw->disabled = 0; ppc_set_pmu_inuse(cpuhw->n_events != 0); if (cpuhw->n_events > 0) { mtpmr(PMRN_PMGC0, PMGC0_PMIE | PMGC0_FCECE); isync(); } out: local_irq_restore(flags); } static int collect_events(struct perf_event *group, int max_count, struct perf_event *ctrs[]) { int n = 0; struct perf_event *event; if (!is_software_event(group)) { if (n >= max_count) return -1; ctrs[n] = group; n++; } for_each_sibling_event(event, group) { if (!is_software_event(event) && event->state != PERF_EVENT_STATE_OFF) { if (n >= max_count) return -1; ctrs[n] = event; n++; } } return n; } /* context locked on entry */ static int fsl_emb_pmu_add(struct perf_event *event, int flags) { struct cpu_hw_events *cpuhw; int ret = -EAGAIN; int num_counters = ppmu->n_counter; u64 val; int i; perf_pmu_disable(event->pmu); cpuhw = &get_cpu_var(cpu_hw_events); if (event->hw.config & FSL_EMB_EVENT_RESTRICTED) num_counters = ppmu->n_restricted; /* * Allocate counters from top-down, so that restricted-capable * counters are kept free as long as possible. */ for (i = num_counters - 1; i >= 0; i--) { if (cpuhw->event[i]) continue; break; } if (i < 0) goto out; event->hw.idx = i; cpuhw->event[i] = event; ++cpuhw->n_events; val = 0; if (event->hw.sample_period) { s64 left = local64_read(&event->hw.period_left); if (left < 0x80000000L) val = 0x80000000L - left; } local64_set(&event->hw.prev_count, val); if (unlikely(!(flags & PERF_EF_START))) { event->hw.state = PERF_HES_STOPPED | PERF_HES_UPTODATE; val = 0; } else { event->hw.state &= ~(PERF_HES_STOPPED | PERF_HES_UPTODATE); } write_pmc(i, val); perf_event_update_userpage(event); write_pmlcb(i, event->hw.config >> 32); write_pmlca(i, event->hw.config_base); ret = 0; out: put_cpu_var(cpu_hw_events); perf_pmu_enable(event->pmu); return ret; } /* context locked on entry */ static void fsl_emb_pmu_del(struct perf_event *event, int flags) { struct cpu_hw_events *cpuhw; int i = event->hw.idx; perf_pmu_disable(event->pmu); if (i < 0) goto out; fsl_emb_pmu_read(event); cpuhw = &get_cpu_var(cpu_hw_events); WARN_ON(event != cpuhw->event[event->hw.idx]); write_pmlca(i, 0); write_pmlcb(i, 0); write_pmc(i, 0); cpuhw->event[i] = NULL; event->hw.idx = -1; /* * TODO: if at least one restricted event exists, and we * just freed up a non-restricted-capable counter, and * there is a restricted-capable counter occupied by * a non-restricted event, migrate that event to the * vacated counter. */ cpuhw->n_events--; out: perf_pmu_enable(event->pmu); put_cpu_var(cpu_hw_events); } static void fsl_emb_pmu_start(struct perf_event *event, int ef_flags) { unsigned long flags; unsigned long val; s64 left; if (event->hw.idx < 0 || !event->hw.sample_period) return; if (!(event->hw.state & PERF_HES_STOPPED)) return; if (ef_flags & PERF_EF_RELOAD) WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE)); local_irq_save(flags); perf_pmu_disable(event->pmu); event->hw.state = 0; left = local64_read(&event->hw.period_left); val = 0; if (left < 0x80000000L) val = 0x80000000L - left; write_pmc(event->hw.idx, val); perf_event_update_userpage(event); perf_pmu_enable(event->pmu); local_irq_restore(flags); } static void fsl_emb_pmu_stop(struct perf_event *event, int ef_flags) { unsigned long flags; if (event->hw.idx < 0 || !event->hw.sample_period) return; if (event->hw.state & PERF_HES_STOPPED) return; local_irq_save(flags); perf_pmu_disable(event->pmu); fsl_emb_pmu_read(event); event->hw.state |= PERF_HES_STOPPED | PERF_HES_UPTODATE; write_pmc(event->hw.idx, 0); perf_event_update_userpage(event); perf_pmu_enable(event->pmu); local_irq_restore(flags); } /* * Release the PMU if this is the last perf_event. */ static void hw_perf_event_destroy(struct perf_event *event) { if (!atomic_add_unless(&num_events, -1, 1)) { mutex_lock(&pmc_reserve_mutex); if (atomic_dec_return(&num_events) == 0) release_pmc_hardware(); mutex_unlock(&pmc_reserve_mutex); } } /* * Translate a generic cache event_id config to a raw event_id code. */ static int hw_perf_cache_event(u64 config, u64 *eventp) { unsigned long type, op, result; int ev; if (!ppmu->cache_events) return -EINVAL; /* unpack config */ type = config & 0xff; op = (config >> 8) & 0xff; result = (config >> 16) & 0xff; if (type >= PERF_COUNT_HW_CACHE_MAX || op >= PERF_COUNT_HW_CACHE_OP_MAX || result >= PERF_COUNT_HW_CACHE_RESULT_MAX) return -EINVAL; ev = (*ppmu->cache_events)[type][op][result]; if (ev == 0) return -EOPNOTSUPP; if (ev == -1) return -EINVAL; *eventp = ev; return 0; } static int fsl_emb_pmu_event_init(struct perf_event *event) { u64 ev; struct perf_event *events[MAX_HWEVENTS]; int n; int err; int num_restricted; int i; if (ppmu->n_counter > MAX_HWEVENTS) { WARN(1, "No. of perf counters (%d) is higher than max array size(%d)\n", ppmu->n_counter, MAX_HWEVENTS); ppmu->n_counter = MAX_HWEVENTS; } switch (event->attr.type) { case PERF_TYPE_HARDWARE: ev = event->attr.config; if (ev >= ppmu->n_generic || ppmu->generic_events[ev] == 0) return -EOPNOTSUPP; ev = ppmu->generic_events[ev]; break; case PERF_TYPE_HW_CACHE: err = hw_perf_cache_event(event->attr.config, &ev); if (err) return err; break; case PERF_TYPE_RAW: ev = event->attr.config; break; default: return -ENOENT; } event->hw.config = ppmu->xlate_event(ev); if (!(event->hw.config & FSL_EMB_EVENT_VALID)) return -EINVAL; /* * If this is in a group, check if it can go on with all the * other hardware events in the group. We assume the event * hasn't been linked into its leader's sibling list at this point. */ n = 0; if (event->group_leader != event) { n = collect_events(event->group_leader, ppmu->n_counter - 1, events); if (n < 0) return -EINVAL; } if (event->hw.config & FSL_EMB_EVENT_RESTRICTED) { num_restricted = 0; for (i = 0; i < n; i++) { if (events[i]->hw.config & FSL_EMB_EVENT_RESTRICTED) num_restricted++; } if (num_restricted >= ppmu->n_restricted) return -EINVAL; } event->hw.idx = -1; event->hw.config_base = PMLCA_CE | PMLCA_FCM1 | (u32)((ev << 16) & PMLCA_EVENT_MASK); if (event->attr.exclude_user) event->hw.config_base |= PMLCA_FCU; if (event->attr.exclude_kernel) event->hw.config_base |= PMLCA_FCS; if (event->attr.exclude_idle) return -ENOTSUPP; event->hw.last_period = event->hw.sample_period; local64_set(&event->hw.period_left, event->hw.last_period); /* * See if we need to reserve the PMU. * If no events are currently in use, then we have to take a * mutex to ensure that we don't race with another task doing * reserve_pmc_hardware or release_pmc_hardware. */ err = 0; if (!atomic_inc_not_zero(&num_events)) { mutex_lock(&pmc_reserve_mutex); if (atomic_read(&num_events) == 0 && reserve_pmc_hardware(perf_event_interrupt)) err = -EBUSY; else atomic_inc(&num_events); mutex_unlock(&pmc_reserve_mutex); mtpmr(PMRN_PMGC0, PMGC0_FAC); isync(); } event->destroy = hw_perf_event_destroy; return err; } static struct pmu fsl_emb_pmu = { .pmu_enable = fsl_emb_pmu_enable, .pmu_disable = fsl_emb_pmu_disable, .event_init = fsl_emb_pmu_event_init, .add = fsl_emb_pmu_add, .del = fsl_emb_pmu_del, .start = fsl_emb_pmu_start, .stop = fsl_emb_pmu_stop, .read = fsl_emb_pmu_read, }; /* * A counter has overflowed; update its count and record * things if requested. Note that interrupts are hard-disabled * here so there is no possibility of being interrupted. */ static void record_and_restart(struct perf_event *event, unsigned long val, struct pt_regs *regs) { u64 period = event->hw.sample_period; s64 prev, delta, left; int record = 0; if (event->hw.state & PERF_HES_STOPPED) { write_pmc(event->hw.idx, 0); return; } /* we don't have to worry about interrupts here */ prev = local64_read(&event->hw.prev_count); delta = (val - prev) & 0xfffffffful; local64_add(delta, &event->count); /* * See if the total period for this event has expired, * and update for the next period. */ val = 0; left = local64_read(&event->hw.period_left) - delta; if (period) { if (left <= 0) { left += period; if (left <= 0) left = period; record = 1; event->hw.last_period = event->hw.sample_period; } if (left < 0x80000000LL) val = 0x80000000LL - left; } write_pmc(event->hw.idx, val); local64_set(&event->hw.prev_count, val); local64_set(&event->hw.period_left, left); perf_event_update_userpage(event); /* * Finally record data if requested. */ if (record) { struct perf_sample_data data; perf_sample_data_init(&data, 0, event->hw.last_period); if (perf_event_overflow(event, &data, regs)) fsl_emb_pmu_stop(event, 0); } } static void perf_event_interrupt(struct pt_regs *regs) { int i; struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events); struct perf_event *event; unsigned long val; for (i = 0; i < ppmu->n_counter; ++i) { event = cpuhw->event[i]; val = read_pmc(i); if ((int)val < 0) { if (event) { /* event has overflowed */ record_and_restart(event, val, regs); } else { /* * Disabled counter is negative, * reset it just in case. */ write_pmc(i, 0); } } } /* PMM will keep counters frozen until we return from the interrupt. */ mtmsr(mfmsr() | MSR_PMM); mtpmr(PMRN_PMGC0, PMGC0_PMIE | PMGC0_FCECE); isync(); } static int fsl_emb_pmu_prepare_cpu(unsigned int cpu) { struct cpu_hw_events *cpuhw = &per_cpu(cpu_hw_events, cpu); memset(cpuhw, 0, sizeof(*cpuhw)); return 0; } int register_fsl_emb_pmu(struct fsl_emb_pmu *pmu) { if (ppmu) return -EBUSY; /* something's already registered */ ppmu = pmu; pr_info("%s performance monitor hardware support registered\n", pmu->name); perf_pmu_register(&fsl_emb_pmu, "cpu", PERF_TYPE_RAW); cpuhp_setup_state(CPUHP_PERF_POWER, "perf/powerpc:prepare", fsl_emb_pmu_prepare_cpu, NULL); return 0; }
linux-master
arch/powerpc/perf/core-fsl-emb.c