python_code
stringlengths
0
1.8M
repo_name
stringclasses
7 values
file_path
stringlengths
5
99
// SPDX-License-Identifier: GPL-2.0 /* * 16550 serial console support. * * Original copied from <file:arch/ppc/boot/common/ns16550.c> * (which had no copyright) * Modifications: 2006 (c) MontaVista Software, Inc. * * Modified by: Mark A. Greer <[email protected]> */ #include <stdarg.h> #include <stddef.h> #include "types.h" #include "string.h" #include "stdio.h" #include "io.h" #include "ops.h" #include "of.h" #define UART_DLL 0 /* Out: Divisor Latch Low */ #define UART_DLM 1 /* Out: Divisor Latch High */ #define UART_FCR 2 /* Out: FIFO Control Register */ #define UART_LCR 3 /* Out: Line Control Register */ #define UART_MCR 4 /* Out: Modem Control Register */ #define UART_LSR 5 /* In: Line Status Register */ #define UART_LSR_THRE 0x20 /* Transmit-hold-register empty */ #define UART_LSR_DR 0x01 /* Receiver data ready */ #define UART_MSR 6 /* In: Modem Status Register */ #define UART_SCR 7 /* I/O: Scratch Register */ static unsigned char *reg_base; static u32 reg_shift; static int ns16550_open(void) { out_8(reg_base + (UART_FCR << reg_shift), 0x06); return 0; } static void ns16550_putc(unsigned char c) { while ((in_8(reg_base + (UART_LSR << reg_shift)) & UART_LSR_THRE) == 0); out_8(reg_base, c); } static unsigned char ns16550_getc(void) { while ((in_8(reg_base + (UART_LSR << reg_shift)) & UART_LSR_DR) == 0); return in_8(reg_base); } static u8 ns16550_tstc(void) { return ((in_8(reg_base + (UART_LSR << reg_shift)) & UART_LSR_DR) != 0); } int ns16550_console_init(void *devp, struct serial_console_data *scdp) { int n; u32 reg_offset; if (dt_get_virtual_reg(devp, (void **)&reg_base, 1) < 1) { printf("virt reg parse fail...\r\n"); return -1; } n = getprop(devp, "reg-offset", &reg_offset, sizeof(reg_offset)); if (n == sizeof(reg_offset)) reg_base += be32_to_cpu(reg_offset); n = getprop(devp, "reg-shift", &reg_shift, sizeof(reg_shift)); if (n != sizeof(reg_shift)) reg_shift = 0; else reg_shift = be32_to_cpu(reg_shift); scdp->open = ns16550_open; scdp->putc = ns16550_putc; scdp->getc = ns16550_getc; scdp->tstc = ns16550_tstc; scdp->close = NULL; return 0; }
linux-master
arch/powerpc/boot/ns16550.c
// SPDX-License-Identifier: GPL-2.0 /* * Generic serial console support * * Author: Mark A. Greer <[email protected]> * * Code in serial_edit_cmdline() copied from <file:arch/ppc/boot/simple/misc.c> * and was written by Matt Porter <[email protected]>. * * 2001,2006 (c) MontaVista Software, Inc. */ #include <stdarg.h> #include <stddef.h> #include "types.h" #include "string.h" #include "stdio.h" #include "io.h" #include "ops.h" static int serial_open(void) { struct serial_console_data *scdp = console_ops.data; return scdp->open(); } static void serial_write(const char *buf, int len) { struct serial_console_data *scdp = console_ops.data; while (*buf != '\0') scdp->putc(*buf++); } static void serial_edit_cmdline(char *buf, int len, unsigned int timeout) { int timer = 0, count; char ch, *cp; struct serial_console_data *scdp = console_ops.data; cp = buf; count = strlen(buf); cp = &buf[count]; count++; do { if (scdp->tstc()) { while (((ch = scdp->getc()) != '\n') && (ch != '\r')) { /* Test for backspace/delete */ if ((ch == '\b') || (ch == '\177')) { if (cp != buf) { cp--; count--; printf("\b \b"); } /* Test for ^x/^u (and wipe the line) */ } else if ((ch == '\030') || (ch == '\025')) { while (cp != buf) { cp--; count--; printf("\b \b"); } } else if (count < len) { *cp++ = ch; count++; scdp->putc(ch); } } break; /* Exit 'timer' loop */ } udelay(1000); /* 1 msec */ } while (timer++ < timeout); *cp = 0; } static void serial_close(void) { struct serial_console_data *scdp = console_ops.data; if (scdp->close) scdp->close(); } static void *serial_get_stdout_devp(void) { void *devp; char devtype[MAX_PROP_LEN]; char path[MAX_PATH_LEN]; devp = finddevice("/chosen"); if (devp == NULL) goto err_out; if (getprop(devp, "linux,stdout-path", path, MAX_PATH_LEN) > 0 || getprop(devp, "stdout-path", path, MAX_PATH_LEN) > 0) { devp = finddevice(path); if (devp == NULL) goto err_out; if ((getprop(devp, "device_type", devtype, sizeof(devtype)) > 0) && !strcmp(devtype, "serial")) return devp; } err_out: return NULL; } static struct serial_console_data serial_cd; /* Node's "compatible" property determines which serial driver to use */ int serial_console_init(void) { void *devp; int rc = -1; devp = serial_get_stdout_devp(); if (devp == NULL) goto err_out; if (dt_is_compatible(devp, "ns16550") || dt_is_compatible(devp, "pnpPNP,501")) rc = ns16550_console_init(devp, &serial_cd); #ifdef CONFIG_CPM else if (dt_is_compatible(devp, "fsl,cpm1-scc-uart") || dt_is_compatible(devp, "fsl,cpm1-smc-uart") || dt_is_compatible(devp, "fsl,cpm2-scc-uart") || dt_is_compatible(devp, "fsl,cpm2-smc-uart")) rc = cpm_console_init(devp, &serial_cd); #endif #ifdef CONFIG_PPC_MPC52xx else if (dt_is_compatible(devp, "fsl,mpc5200-psc-uart")) rc = mpc5200_psc_console_init(devp, &serial_cd); #endif #ifdef CONFIG_PPC_POWERNV else if (dt_is_compatible(devp, "ibm,opal-console-raw")) rc = opal_console_init(devp, &serial_cd); #endif /* Add other serial console driver calls here */ if (!rc) { console_ops.open = serial_open; console_ops.write = serial_write; console_ops.close = serial_close; console_ops.data = &serial_cd; if (serial_cd.getc) console_ops.edit_cmdline = serial_edit_cmdline; return 0; } err_out: return -1; }
linux-master
arch/powerpc/boot/serial.c
// SPDX-License-Identifier: GPL-2.0-only /* * Freescale SOC support functions * * Author: Scott Wood <[email protected]> * * Copyright (c) 2007 Freescale Semiconductor, Inc. */ #include "ops.h" #include "types.h" #include "fsl-soc.h" #include "stdio.h" static u32 prop_buf[MAX_PROP_LEN / 4]; u32 *fsl_get_immr(void) { void *soc; unsigned long ret = 0; soc = find_node_by_devtype(NULL, "soc"); if (soc) { int size; u32 naddr; size = getprop(soc, "#address-cells", prop_buf, MAX_PROP_LEN); if (size == 4) naddr = prop_buf[0]; else naddr = 2; if (naddr != 1 && naddr != 2) goto err; size = getprop(soc, "ranges", prop_buf, MAX_PROP_LEN); if (size < 12) goto err; if (prop_buf[0] != 0) goto err; if (naddr == 2 && prop_buf[1] != 0) goto err; if (!dt_xlate_addr(soc, prop_buf + naddr, 8, &ret)) ret = 0; } err: if (!ret) printf("fsl_get_immr: Failed to find immr base\r\n"); return (u32 *)ret; }
linux-master
arch/powerpc/boot/fsl-soc.c
// SPDX-License-Identifier: GPL-2.0-only /* * Old U-boot compatibility for Ebony * * Author: David Gibson <[email protected]> * * Copyright 2007 David Gibson, IBM Corporatio. * Based on cuboot-83xx.c, which is: * Copyright (c) 2007 Freescale Semiconductor, Inc. */ #include "ops.h" #include "stdio.h" #include "44x.h" #include "cuboot.h" #define TARGET_4xx #define TARGET_44x #include "ppcboot.h" static bd_t bd; void platform_init(unsigned long r3, unsigned long r4, unsigned long r5, unsigned long r6, unsigned long r7) { CUBOOT_INIT(); ebony_init(&bd.bi_enetaddr, &bd.bi_enet1addr); }
linux-master
arch/powerpc/boot/cuboot-ebony.c
// SPDX-License-Identifier: GPL-2.0-only /* * The simple platform -- for booting when firmware doesn't supply a device * tree or any platform configuration information. * All data is extracted from an embedded device tree * blob. * * Authors: Scott Wood <[email protected]> * Grant Likely <[email protected]> * * Copyright (c) 2007 Freescale Semiconductor, Inc. * Copyright (c) 2008 Secret Lab Technologies Ltd. */ #include "ops.h" #include "types.h" #include "io.h" #include "stdio.h" #include <libfdt.h> BSS_STACK(4*1024); extern int platform_specific_init(void) __attribute__((weak)); void platform_init(unsigned long r3, unsigned long r4, unsigned long r5, unsigned long r6, unsigned long r7) { const u32 *na, *ns, *reg, *timebase; u64 memsize64; int node, size, i; /* Make sure FDT blob is sane */ if (fdt_check_header(_dtb_start) != 0) fatal("Invalid device tree blob\n"); /* Find the #address-cells and #size-cells properties */ node = fdt_path_offset(_dtb_start, "/"); if (node < 0) fatal("Cannot find root node\n"); na = fdt_getprop(_dtb_start, node, "#address-cells", &size); if (!na || (size != 4)) fatal("Cannot find #address-cells property"); ns = fdt_getprop(_dtb_start, node, "#size-cells", &size); if (!ns || (size != 4)) fatal("Cannot find #size-cells property"); /* Find the memory range */ node = fdt_node_offset_by_prop_value(_dtb_start, -1, "device_type", "memory", sizeof("memory")); if (node < 0) fatal("Cannot find memory node\n"); reg = fdt_getprop(_dtb_start, node, "reg", &size); if (size < (*na+*ns) * sizeof(u32)) fatal("cannot get memory range\n"); /* Only interested in memory based at 0 */ for (i = 0; i < *na; i++) if (*reg++ != 0) fatal("Memory range is not based at address 0\n"); /* get the memsize and truncate it to under 4G on 32 bit machines */ memsize64 = 0; for (i = 0; i < *ns; i++) memsize64 = (memsize64 << 32) | *reg++; if (sizeof(void *) == 4 && memsize64 >= 0x100000000ULL) memsize64 = 0xffffffff; /* finally, setup the timebase */ node = fdt_node_offset_by_prop_value(_dtb_start, -1, "device_type", "cpu", sizeof("cpu")); if (!node) fatal("Cannot find cpu node\n"); timebase = fdt_getprop(_dtb_start, node, "timebase-frequency", &size); if (timebase && (size == 4)) timebase_period_ns = 1000000000 / *timebase; /* Now we have the memory size; initialize the heap */ simple_alloc_init(_end, memsize64 - (unsigned long)_end, 32, 64); /* prepare the device tree and find the console */ fdt_init(_dtb_start); if (platform_specific_init) platform_specific_init(); serial_console_init(); }
linux-master
arch/powerpc/boot/simpleboot.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * arch/powerpc/boot/wii.c * * Nintendo Wii bootwrapper support * Copyright (C) 2008-2009 The GameCube Linux Team * Copyright (C) 2008,2009 Albert Herranz */ #include <stddef.h> #include "stdio.h" #include "types.h" #include "io.h" #include "ops.h" #include "ugecon.h" BSS_STACK(8192); #define HW_REG(x) ((void *)(x)) #define EXI_CTRL HW_REG(0x0d800070) #define EXI_CTRL_ENABLE (1<<0) #define MEM2_TOP (0x10000000 + 64*1024*1024) #define FIRMWARE_DEFAULT_SIZE (12*1024*1024) struct mipc_infohdr { char magic[3]; u8 version; u32 mem2_boundary; u32 ipc_in; size_t ipc_in_size; u32 ipc_out; size_t ipc_out_size; }; static int mipc_check_address(u32 pa) { /* only MEM2 addresses */ if (pa < 0x10000000 || pa > 0x14000000) return -EINVAL; return 0; } static struct mipc_infohdr *mipc_get_infohdr(void) { struct mipc_infohdr **hdrp, *hdr; /* 'mini' header pointer is the last word of MEM2 memory */ hdrp = (struct mipc_infohdr **)0x13fffffc; if (mipc_check_address((u32)hdrp)) { printf("mini: invalid hdrp %08X\n", (u32)hdrp); hdr = NULL; goto out; } hdr = *hdrp; if (mipc_check_address((u32)hdr)) { printf("mini: invalid hdr %08X\n", (u32)hdr); hdr = NULL; goto out; } if (memcmp(hdr->magic, "IPC", 3)) { printf("mini: invalid magic\n"); hdr = NULL; goto out; } out: return hdr; } static int mipc_get_mem2_boundary(u32 *mem2_boundary) { struct mipc_infohdr *hdr; int error; hdr = mipc_get_infohdr(); if (!hdr) { error = -1; goto out; } if (mipc_check_address(hdr->mem2_boundary)) { printf("mini: invalid mem2_boundary %08X\n", hdr->mem2_boundary); error = -EINVAL; goto out; } *mem2_boundary = hdr->mem2_boundary; error = 0; out: return error; } static void platform_fixups(void) { void *mem; u32 reg[4]; u32 mem2_boundary; int len; int error; mem = finddevice("/memory"); if (!mem) fatal("Can't find memory node\n"); /* two ranges of (address, size) words */ len = getprop(mem, "reg", reg, sizeof(reg)); if (len != sizeof(reg)) { /* nothing to do */ goto out; } /* retrieve MEM2 boundary from 'mini' */ error = mipc_get_mem2_boundary(&mem2_boundary); if (error) { /* if that fails use a sane value */ mem2_boundary = MEM2_TOP - FIRMWARE_DEFAULT_SIZE; } if (mem2_boundary > reg[2] && mem2_boundary < reg[2] + reg[3]) { reg[3] = mem2_boundary - reg[2]; printf("top of MEM2 @ %08X\n", reg[2] + reg[3]); setprop(mem, "reg", reg, sizeof(reg)); } out: return; } void platform_init(unsigned long r3, unsigned long r4, unsigned long r5) { u32 heapsize = 24*1024*1024 - (u32)_end; simple_alloc_init(_end, heapsize, 32, 64); fdt_init(_dtb_start); /* * 'mini' boots the Broadway processor with EXI disabled. * We need it enabled before probing for the USB Gecko. */ out_be32(EXI_CTRL, in_be32(EXI_CTRL) | EXI_CTRL_ENABLE); if (ug_probe()) console_ops.write = ug_console_write; platform_ops.fixups = platform_fixups; }
linux-master
arch/powerpc/boot/wii.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Wrapper around the kernel's pre-boot decompression library. * * Copyright (C) IBM Corporation 2016. */ #include "elf.h" #include "page.h" #include "string.h" #include "stdio.h" #include "ops.h" #include "reg.h" #include "types.h" /* * The decompressor_*.c files play #ifdef games so they can be used in both * pre-boot and regular kernel code. We need these definitions to make the * includes work. */ #define STATIC static #define INIT /* * The build process will copy the required zlib source files and headers * out of lib/ and "fix" the includes so they do not pull in other kernel * headers. */ #ifdef CONFIG_KERNEL_GZIP # include "decompress_inflate.c" #endif #ifdef CONFIG_KERNEL_XZ # include "xz_config.h" # include "../../../lib/decompress_unxz.c" #endif /* globals for tracking the state of the decompression */ static unsigned long decompressed_bytes; static unsigned long limit; static unsigned long skip; static char *output_buffer; /* * flush() is called by __decompress() when the decompressor's scratch buffer is * full. */ static long flush(void *v, unsigned long buffer_size) { unsigned long end = decompressed_bytes + buffer_size; unsigned long size = buffer_size; unsigned long offset = 0; char *in = v; char *out; /* * if we hit our decompression limit, we need to fake an error to abort * the in-progress decompression. */ if (decompressed_bytes >= limit) return -1; /* skip this entire block */ if (end <= skip) { decompressed_bytes += buffer_size; return buffer_size; } /* skip some data at the start, but keep the rest of the block */ if (decompressed_bytes < skip && end > skip) { offset = skip - decompressed_bytes; in += offset; size -= offset; decompressed_bytes += offset; } out = &output_buffer[decompressed_bytes - skip]; size = min(decompressed_bytes + size, limit) - decompressed_bytes; memcpy(out, in, size); decompressed_bytes += size; return buffer_size; } static void print_err(char *s) { /* suppress the "error" when we terminate the decompressor */ if (decompressed_bytes >= limit) return; printf("Decompression error: '%s'\n\r", s); } /** * partial_decompress - decompresses part or all of a compressed buffer * @inbuf: input buffer * @input_size: length of the input buffer * @outbuf: output buffer * @output_size: length of the output buffer * @skip number of output bytes to ignore * * This function takes compressed data from inbuf, decompresses and write it to * outbuf. Once output_size bytes are written to the output buffer, or the * stream is exhausted the function will return the number of bytes that were * decompressed. Otherwise it will return whatever error code the decompressor * reported (NB: This is specific to each decompressor type). * * The skip functionality is mainly there so the program and discover * the size of the compressed image so that it can ask firmware (if present) * for an appropriately sized buffer. */ long partial_decompress(void *inbuf, unsigned long input_size, void *outbuf, unsigned long output_size, unsigned long _skip) { int ret; /* * The skipped bytes needs to be included in the size of data we want * to decompress. */ output_size += _skip; decompressed_bytes = 0; output_buffer = outbuf; limit = output_size; skip = _skip; ret = __decompress(inbuf, input_size, NULL, flush, outbuf, output_size, NULL, print_err); /* * If decompression was aborted due to an actual error rather than * a fake error that we used to abort, then we should report it. */ if (decompressed_bytes < limit) return ret; return decompressed_bytes - skip; }
linux-master
arch/powerpc/boot/decompress.c
// SPDX-License-Identifier: GPL-2.0-only /* * Old U-boot compatibility for Rainier * * Valentine Barshak <[email protected]> * Copyright 2007 MontaVista Software, Inc * * Based on Ebony code by David Gibson <[email protected]> * Copyright IBM Corporation, 2007 * * Based on Bamboo code by Josh Boyer <[email protected]> * Copyright IBM Corporation, 2007 */ #include <stdarg.h> #include <stddef.h> #include "types.h" #include "elf.h" #include "string.h" #include "stdio.h" #include "page.h" #include "ops.h" #include "dcr.h" #include "4xx.h" #include "44x.h" #include "cuboot.h" #define TARGET_4xx #define TARGET_44x #include "ppcboot.h" static bd_t bd; static void rainier_fixups(void) { unsigned long sysclk = 33333333; ibm440ep_fixup_clocks(sysclk, 11059200, 50000000); ibm4xx_fixup_ebc_ranges("/plb/opb/ebc"); ibm4xx_denali_fixup_memsize(); dt_fixup_mac_address_by_alias("ethernet0", bd.bi_enetaddr); dt_fixup_mac_address_by_alias("ethernet1", bd.bi_enet1addr); } void platform_init(unsigned long r3, unsigned long r4, unsigned long r5, unsigned long r6, unsigned long r7) { CUBOOT_INIT(); platform_ops.fixups = rainier_fixups; platform_ops.exit = ibm44x_dbcr_reset; fdt_init(_dtb_start); serial_console_init(); }
linux-master
arch/powerpc/boot/cuboot-rainier.c
// SPDX-License-Identifier: GPL-2.0-only /* * Old U-boot compatibility for Taishan * * Author: Hugh Blemings <[email protected]> * * Copyright 2007 Hugh Blemings, IBM Corporation. * Based on cuboot-ebony.c which is: * Copyright 2007 David Gibson, IBM Corporation. * Based on cuboot-83xx.c, which is: * Copyright (c) 2007 Freescale Semiconductor, Inc. */ #include "ops.h" #include "stdio.h" #include "cuboot.h" #include "reg.h" #include "dcr.h" #include "4xx.h" #define TARGET_4xx #define TARGET_44x #define TARGET_440GX #include "ppcboot.h" static bd_t bd; BSS_STACK(4096); static void taishan_fixups(void) { /* FIXME: sysclk should be derived by reading the FPGA registers */ unsigned long sysclk = 33000000; ibm440gx_fixup_clocks(sysclk, 6 * 1843200, 25000000); ibm4xx_sdram_fixup_memsize(); dt_fixup_mac_address_by_alias("ethernet0", bd.bi_enetaddr); dt_fixup_mac_address_by_alias("ethernet1", bd.bi_enet1addr); ibm4xx_fixup_ebc_ranges("/plb/opb/ebc"); } void platform_init(unsigned long r3, unsigned long r4, unsigned long r5, unsigned long r6, unsigned long r7) { CUBOOT_INIT(); platform_ops.fixups = taishan_fixups; fdt_init(_dtb_start); serial_console_init(); }
linux-master
arch/powerpc/boot/cuboot-taishan.c
// SPDX-License-Identifier: GPL-2.0-only /* * Old U-boot compatibility for Katmai * * Author: Hugh Blemings <[email protected]> * * Copyright 2007 Hugh Blemings, IBM Corporation. * Based on cuboot-ebony.c which is: * Copyright 2007 David Gibson, IBM Corporation. * Based on cuboot-83xx.c, which is: * Copyright (c) 2007 Freescale Semiconductor, Inc. */ #include "ops.h" #include "stdio.h" #include "reg.h" #include "dcr.h" #include "4xx.h" #include "44x.h" #include "cuboot.h" #define TARGET_4xx #define TARGET_44x #include "ppcboot.h" static bd_t bd; BSS_STACK(4096); static void katmai_fixups(void) { unsigned long sysclk = 33333000; /* 440SP Clock logic is all but identical to 440GX * so we just use that code for now at least */ ibm440spe_fixup_clocks(sysclk, 6 * 1843200, 0); ibm440spe_fixup_memsize(); dt_fixup_mac_address(0, bd.bi_enetaddr); ibm4xx_fixup_ebc_ranges("/plb/opb/ebc"); } void platform_init(unsigned long r3, unsigned long r4, unsigned long r5, unsigned long r6, unsigned long r7) { CUBOOT_INIT(); platform_ops.fixups = katmai_fixups; fdt_init(_dtb_start); serial_console_init(); }
linux-master
arch/powerpc/boot/cuboot-katmai.c
// SPDX-License-Identifier: GPL-2.0-only /* * Old U-boot compatibility for Yosemite * * Author: Josh Boyer <[email protected]> * * Copyright 2008 IBM Corporation */ #include "ops.h" #include "stdio.h" #include "4xx.h" #include "44x.h" #include "cuboot.h" #define TARGET_4xx #define TARGET_44x #include "ppcboot.h" static bd_t bd; static void yosemite_fixups(void) { unsigned long sysclk = 66666666; ibm440ep_fixup_clocks(sysclk, 11059200, 50000000); ibm4xx_sdram_fixup_memsize(); ibm4xx_quiesce_eth((u32 *)0xef600e00, (u32 *)0xef600f00); dt_fixup_mac_address_by_alias("ethernet0", bd.bi_enetaddr); dt_fixup_mac_address_by_alias("ethernet1", bd.bi_enet1addr); } void platform_init(unsigned long r3, unsigned long r4, unsigned long r5, unsigned long r6, unsigned long r7) { CUBOOT_INIT(); platform_ops.fixups = yosemite_fixups; platform_ops.exit = ibm44x_dbcr_reset; fdt_init(_dtb_start); serial_console_init(); }
linux-master
arch/powerpc/boot/cuboot-yosemite.c
// SPDX-License-Identifier: GPL-2.0-only /* * RedBoot firmware support * * Author: Scott Wood <[email protected]> * * Copyright (c) 2007 Freescale Semiconductor, Inc. */ #include "ops.h" #include "stdio.h" #include "redboot.h" #include "fsl-soc.h" #include "io.h" static bd_t bd; BSS_STACK(4096); #define MHZ(x) ((x + 500000) / 1000000) static void platform_fixups(void) { void *node; dt_fixup_memory(bd.bi_memstart, bd.bi_memsize); dt_fixup_mac_addresses(bd.bi_enetaddr); dt_fixup_cpu_clocks(bd.bi_intfreq, bd.bi_busfreq / 16, bd.bi_busfreq); node = finddevice("/soc/cpm/brg"); if (node) { printf("BRG clock-frequency <- 0x%x (%dMHz)\r\n", bd.bi_busfreq, MHZ(bd.bi_busfreq)); setprop(node, "clock-frequency", &bd.bi_busfreq, 4); } } void platform_init(unsigned long r3, unsigned long r4, unsigned long r5, unsigned long r6, unsigned long r7) { memcpy(&bd, (char *)r3, sizeof(bd)); if (bd.bi_tag != 0x42444944) return; simple_alloc_init(_end, bd.bi_memstart + bd.bi_memsize - (unsigned long)_end, 32, 64); fdt_init(_dtb_start); serial_console_init(); platform_ops.fixups = platform_fixups; loader_info.cmdline = (char *)bd.bi_cmdline; loader_info.cmdline_len = strlen((char *)bd.bi_cmdline); }
linux-master
arch/powerpc/boot/redboot-8xx.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright IBM Corporation, 2007 * Josh Boyer <[email protected]> * * Based on ebony wrapper: * Copyright 2007 David Gibson, IBM Corporation. * * Clocking code based on code by: * Stefan Roese <[email protected]> */ #include <stdarg.h> #include <stddef.h> #include "types.h" #include "elf.h" #include "string.h" #include "stdio.h" #include "page.h" #include "ops.h" #include "dcr.h" #include "4xx.h" #include "44x.h" static u8 *bamboo_mac0, *bamboo_mac1; static void bamboo_fixups(void) { unsigned long sysclk = 33333333; ibm440ep_fixup_clocks(sysclk, 11059200, 25000000); ibm4xx_sdram_fixup_memsize(); ibm4xx_quiesce_eth((u32 *)0xef600e00, (u32 *)0xef600f00); dt_fixup_mac_address_by_alias("ethernet0", bamboo_mac0); dt_fixup_mac_address_by_alias("ethernet1", bamboo_mac1); } void bamboo_init(void *mac0, void *mac1) { platform_ops.fixups = bamboo_fixups; platform_ops.exit = ibm44x_dbcr_reset; bamboo_mac0 = mac0; bamboo_mac1 = mac1; fdt_init(_dtb_start); serial_console_init(); }
linux-master
arch/powerpc/boot/bamboo.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright IBM Corporation, 2007 * Josh Boyer <[email protected]> * * Based on ebony wrapper: * Copyright 2007 David Gibson, IBM Corporation. */ #include "ops.h" #include "stdio.h" #include "44x.h" #include "stdlib.h" BSS_STACK(4096); #define PIBS_MAC0 0xfffc0400 #define PIBS_MAC1 0xfffc0500 char pibs_mac0[6]; char pibs_mac1[6]; static void read_pibs_mac(void) { unsigned long long mac64; mac64 = strtoull((char *)PIBS_MAC0, 0, 16); memcpy(&pibs_mac0, (char *)&mac64+2, 6); mac64 = strtoull((char *)PIBS_MAC1, 0, 16); memcpy(&pibs_mac1, (char *)&mac64+2, 6); } void platform_init(void) { unsigned long end_of_ram = 0x8000000; unsigned long avail_ram = end_of_ram - (unsigned long)_end; simple_alloc_init(_end, avail_ram, 32, 64); read_pibs_mac(); bamboo_init((u8 *)&pibs_mac0, (u8 *)&pibs_mac1); }
linux-master
arch/powerpc/boot/treeboot-bamboo.c
// SPDX-License-Identifier: GPL-2.0 /* * Makes a tree bootable image for IBM Evaluation boards. * Basically, just take a zImage, skip the ELF header, and stuff * a 32 byte header on the front. * * We use htonl, which is a network macro, to make sure we're doing * The Right Thing on an LE machine. It's non-obvious, but it should * work on anything BSD'ish. */ #include <fcntl.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <sys/stat.h> #include <unistd.h> #include <netinet/in.h> #ifdef __sun__ #include <inttypes.h> #else #include <stdint.h> #endif /* This gets tacked on the front of the image. There are also a few * bytes allocated after the _start label used by the boot rom (see * head.S for details). */ typedef struct boot_block { uint32_t bb_magic; /* 0x0052504F */ uint32_t bb_dest; /* Target address of the image */ uint32_t bb_num_512blocks; /* Size, rounded-up, in 512 byte blks */ uint32_t bb_debug_flag; /* Run debugger or image after load */ uint32_t bb_entry_point; /* The image address to start */ uint32_t bb_checksum; /* 32 bit checksum including header */ uint32_t reserved[2]; } boot_block_t; #define IMGBLK 512 unsigned int tmpbuf[IMGBLK / sizeof(unsigned int)]; int main(int argc, char *argv[]) { int in_fd, out_fd; int nblks, i; unsigned int cksum, *cp; struct stat st; boot_block_t bt; if (argc < 5) { fprintf(stderr, "usage: %s <zImage-file> <boot-image> <load address> <entry point>\n",argv[0]); exit(1); } if (stat(argv[1], &st) < 0) { perror("stat"); exit(2); } nblks = (st.st_size + IMGBLK) / IMGBLK; bt.bb_magic = htonl(0x0052504F); /* If we have the optional entry point parameter, use it */ bt.bb_dest = htonl(strtoul(argv[3], NULL, 0)); bt.bb_entry_point = htonl(strtoul(argv[4], NULL, 0)); /* We know these from the linker command. * ...and then move it up into memory a little more so the * relocation can happen. */ bt.bb_num_512blocks = htonl(nblks); bt.bb_debug_flag = 0; bt.bb_checksum = 0; /* To be neat and tidy :-). */ bt.reserved[0] = 0; bt.reserved[1] = 0; if ((in_fd = open(argv[1], O_RDONLY)) < 0) { perror("zImage open"); exit(3); } if ((out_fd = open(argv[2], (O_RDWR | O_CREAT | O_TRUNC), 0666)) < 0) { perror("bootfile open"); exit(3); } cksum = 0; cp = (void *)&bt; for (i = 0; i < sizeof(bt) / sizeof(unsigned int); i++) cksum += *cp++; /* Assume zImage is an ELF file, and skip the 64K header. */ if (read(in_fd, tmpbuf, sizeof(tmpbuf)) != sizeof(tmpbuf)) { fprintf(stderr, "%s is too small to be an ELF image\n", argv[1]); exit(4); } if (tmpbuf[0] != htonl(0x7f454c46)) { fprintf(stderr, "%s is not an ELF image\n", argv[1]); exit(4); } if (lseek(in_fd, (64 * 1024), SEEK_SET) < 0) { fprintf(stderr, "%s failed to seek in ELF image\n", argv[1]); exit(4); } nblks -= (64 * 1024) / IMGBLK; /* And away we go...... */ if (write(out_fd, &bt, sizeof(bt)) != sizeof(bt)) { perror("boot-image write"); exit(5); } while (nblks-- > 0) { if (read(in_fd, tmpbuf, sizeof(tmpbuf)) < 0) { perror("zImage read"); exit(5); } cp = tmpbuf; for (i = 0; i < sizeof(tmpbuf) / sizeof(unsigned int); i++) cksum += *cp++; if (write(out_fd, tmpbuf, sizeof(tmpbuf)) != sizeof(tmpbuf)) { perror("boot-image write"); exit(5); } } /* rewrite the header with the computed checksum. */ bt.bb_checksum = htonl(cksum); if (lseek(out_fd, 0, SEEK_SET) < 0) { perror("rewrite seek"); exit(1); } if (write(out_fd, &bt, sizeof(bt)) != sizeof(bt)) { perror("boot-image rewrite"); exit(1); } exit(0); }
linux-master
arch/powerpc/boot/mktree.c
// SPDX-License-Identifier: GPL-2.0-only /* * Old U-boot compatibility for 8xx * * Author: Scott Wood <[email protected]> * * Copyright (c) 2007 Freescale Semiconductor, Inc. */ #include "ops.h" #include "stdio.h" #include "cuboot.h" #define TARGET_8xx #define TARGET_HAS_ETH1 #include "ppcboot.h" static bd_t bd; static void platform_fixups(void) { void *node; dt_fixup_memory(bd.bi_memstart, bd.bi_memsize); dt_fixup_mac_addresses(bd.bi_enetaddr, bd.bi_enet1addr); dt_fixup_cpu_clocks(bd.bi_intfreq, bd.bi_busfreq / 16, bd.bi_busfreq); node = finddevice("/soc/cpm"); if (node) setprop(node, "clock-frequency", &bd.bi_busfreq, 4); node = finddevice("/soc/cpm/brg"); if (node) setprop(node, "clock-frequency", &bd.bi_busfreq, 4); } void platform_init(unsigned long r3, unsigned long r4, unsigned long r5, unsigned long r6, unsigned long r7) { CUBOOT_INIT(); fdt_init(_dtb_start); serial_console_init(); platform_ops.fixups = platform_fixups; }
linux-master
arch/powerpc/boot/cuboot-8xx.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright 2007 IBM Corporation * * Stephen Winiecki <[email protected]> * Josh Boyer <[email protected]> * * Based on earlier code: * Copyright (C) Paul Mackerras 1997. */ #include <stdarg.h> #include <stddef.h> #include "types.h" #include "elf.h" #include "string.h" #include "stdio.h" #include "page.h" #include "ops.h" #include "io.h" BSS_STACK(4096); void platform_init(unsigned long r3, unsigned long r4, unsigned long r5) { u32 heapsize = 0x8000000 - (u32)_end; /* 128M */ simple_alloc_init(_end, heapsize, 32, 64); fdt_init(_dtb_start); serial_console_init(); }
linux-master
arch/powerpc/boot/holly.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * OF console routines * * Copyright (C) Paul Mackerras 1997. */ #include <stddef.h> #include "types.h" #include "elf.h" #include "string.h" #include "stdio.h" #include "page.h" #include "ops.h" #include "of.h" static unsigned int of_stdout_handle; static int of_console_open(void) { void *devp; if (((devp = of_finddevice("/chosen")) != NULL) && (of_getprop(devp, "stdout", &of_stdout_handle, sizeof(of_stdout_handle)) == sizeof(of_stdout_handle))) { of_stdout_handle = be32_to_cpu(of_stdout_handle); return 0; } return -1; } static void of_console_write(const char *buf, int len) { of_call_prom("write", 3, 1, of_stdout_handle, buf, len); } void of_console_init(void) { console_ops.open = of_console_open; console_ops.write = of_console_write; }
linux-master
arch/powerpc/boot/ofconsole.c
// SPDX-License-Identifier: GPL-2.0-only /* * Old U-boot compatibility for 83xx * * Author: Scott Wood <[email protected]> * * Copyright (c) 2007 Freescale Semiconductor, Inc. */ #include "ops.h" #include "stdio.h" #include "cuboot.h" #define TARGET_83xx #include "ppcboot.h" static bd_t bd; static void platform_fixups(void) { void *soc; dt_fixup_memory(bd.bi_memstart, bd.bi_memsize); dt_fixup_mac_address_by_alias("ethernet0", bd.bi_enetaddr); dt_fixup_mac_address_by_alias("ethernet1", bd.bi_enet1addr); dt_fixup_cpu_clocks(bd.bi_intfreq, bd.bi_busfreq / 4, bd.bi_busfreq); /* Unfortunately, the specific model number is encoded in the * soc node name in existing dts files -- once that is fixed, * this can do a simple path lookup. */ soc = find_node_by_devtype(NULL, "soc"); if (soc) { void *serial = NULL; setprop(soc, "bus-frequency", &bd.bi_busfreq, sizeof(bd.bi_busfreq)); while ((serial = find_node_by_devtype(serial, "serial"))) { if (get_parent(serial) != soc) continue; setprop(serial, "clock-frequency", &bd.bi_busfreq, sizeof(bd.bi_busfreq)); } } } void platform_init(unsigned long r3, unsigned long r4, unsigned long r5, unsigned long r6, unsigned long r7) { CUBOOT_INIT(); fdt_init(_dtb_start); serial_console_init(); platform_ops.fixups = platform_fixups; }
linux-master
arch/powerpc/boot/cuboot-83xx.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (C) Paul Mackerras 1997. */ #include <stdarg.h> #include <stddef.h> #include "string.h" #include "stdio.h" #include "ops.h" size_t strnlen(const char * s, size_t count) { const char *sc; for (sc = s; count-- && *sc != '\0'; ++sc) /* nothing */; return sc - s; } char *strrchr(const char *s, int c) { const char *last = NULL; do { if (*s == (char)c) last = s; } while (*s++); return (char *)last; } #ifdef __powerpc64__ # define do_div(n, base) ({ \ unsigned int __base = (base); \ unsigned int __rem; \ __rem = ((unsigned long long)(n)) % __base; \ (n) = ((unsigned long long)(n)) / __base; \ __rem; \ }) #else extern unsigned int __div64_32(unsigned long long *dividend, unsigned int divisor); /* The unnecessary pointer compare is there * to check for type safety (n must be 64bit) */ # define do_div(n,base) ({ \ unsigned int __base = (base); \ unsigned int __rem; \ (void)(((typeof((n)) *)0) == ((unsigned long long *)0)); \ if (((n) >> 32) == 0) { \ __rem = (unsigned int)(n) % __base; \ (n) = (unsigned int)(n) / __base; \ } else \ __rem = __div64_32(&(n), __base); \ __rem; \ }) #endif /* __powerpc64__ */ static int skip_atoi(const char **s) { int i, c; for (i = 0; '0' <= (c = **s) && c <= '9'; ++*s) i = i*10 + c - '0'; return i; } #define ZEROPAD 1 /* pad with zero */ #define SIGN 2 /* unsigned/signed long */ #define PLUS 4 /* show plus */ #define SPACE 8 /* space if plus */ #define LEFT 16 /* left justified */ #define SPECIAL 32 /* 0x */ #define LARGE 64 /* use 'ABCDEF' instead of 'abcdef' */ static char * number(char * str, unsigned long long num, int base, int size, int precision, int type) { char c,sign,tmp[66]; const char *digits="0123456789abcdefghijklmnopqrstuvwxyz"; int i; if (type & LARGE) digits = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"; if (type & LEFT) type &= ~ZEROPAD; if (base < 2 || base > 36) return 0; c = (type & ZEROPAD) ? '0' : ' '; sign = 0; if (type & SIGN) { if ((signed long long)num < 0) { sign = '-'; num = - (signed long long)num; size--; } else if (type & PLUS) { sign = '+'; size--; } else if (type & SPACE) { sign = ' '; size--; } } if (type & SPECIAL) { if (base == 16) size -= 2; else if (base == 8) size--; } i = 0; if (num == 0) tmp[i++]='0'; else while (num != 0) { tmp[i++] = digits[do_div(num, base)]; } if (i > precision) precision = i; size -= precision; if (!(type&(ZEROPAD+LEFT))) while(size-->0) *str++ = ' '; if (sign) *str++ = sign; if (type & SPECIAL) { if (base==8) *str++ = '0'; else if (base==16) { *str++ = '0'; *str++ = digits[33]; } } if (!(type & LEFT)) while (size-- > 0) *str++ = c; while (i < precision--) *str++ = '0'; while (i-- > 0) *str++ = tmp[i]; while (size-- > 0) *str++ = ' '; return str; } int vsprintf(char *buf, const char *fmt, va_list args) { int len; unsigned long long num; int i, base; char * str; const char *s; int flags; /* flags to number() */ int field_width; /* width of output field */ int precision; /* min. # of digits for integers; max number of chars for from string */ int qualifier; /* 'h', 'l', or 'L' for integer fields */ /* 'z' support added 23/7/1999 S.H. */ /* 'z' changed to 'Z' --davidm 1/25/99 */ for (str=buf ; *fmt ; ++fmt) { if (*fmt != '%') { *str++ = *fmt; continue; } /* process flags */ flags = 0; repeat: ++fmt; /* this also skips first '%' */ switch (*fmt) { case '-': flags |= LEFT; goto repeat; case '+': flags |= PLUS; goto repeat; case ' ': flags |= SPACE; goto repeat; case '#': flags |= SPECIAL; goto repeat; case '0': flags |= ZEROPAD; goto repeat; } /* get field width */ field_width = -1; if ('0' <= *fmt && *fmt <= '9') field_width = skip_atoi(&fmt); else if (*fmt == '*') { ++fmt; /* it's the next argument */ field_width = va_arg(args, int); if (field_width < 0) { field_width = -field_width; flags |= LEFT; } } /* get the precision */ precision = -1; if (*fmt == '.') { ++fmt; if ('0' <= *fmt && *fmt <= '9') precision = skip_atoi(&fmt); else if (*fmt == '*') { ++fmt; /* it's the next argument */ precision = va_arg(args, int); } if (precision < 0) precision = 0; } /* get the conversion qualifier */ qualifier = -1; if (*fmt == 'l' && *(fmt + 1) == 'l') { qualifier = 'q'; fmt += 2; } else if (*fmt == 'h' || *fmt == 'l' || *fmt == 'L' || *fmt == 'Z') { qualifier = *fmt; ++fmt; } /* default base */ base = 10; switch (*fmt) { case 'c': if (!(flags & LEFT)) while (--field_width > 0) *str++ = ' '; *str++ = (unsigned char) va_arg(args, int); while (--field_width > 0) *str++ = ' '; continue; case 's': s = va_arg(args, char *); if (!s) s = "<NULL>"; len = strnlen(s, precision); if (!(flags & LEFT)) while (len < field_width--) *str++ = ' '; for (i = 0; i < len; ++i) *str++ = *s++; while (len < field_width--) *str++ = ' '; continue; case 'p': if (field_width == -1) { field_width = 2*sizeof(void *); flags |= ZEROPAD; } str = number(str, (unsigned long) va_arg(args, void *), 16, field_width, precision, flags); continue; case 'n': if (qualifier == 'l') { long * ip = va_arg(args, long *); *ip = (str - buf); } else if (qualifier == 'Z') { size_t * ip = va_arg(args, size_t *); *ip = (str - buf); } else { int * ip = va_arg(args, int *); *ip = (str - buf); } continue; case '%': *str++ = '%'; continue; /* integer number formats - set up the flags and "break" */ case 'o': base = 8; break; case 'X': flags |= LARGE; case 'x': base = 16; break; case 'd': case 'i': flags |= SIGN; case 'u': break; default: *str++ = '%'; if (*fmt) *str++ = *fmt; else --fmt; continue; } if (qualifier == 'l') { num = va_arg(args, unsigned long); if (flags & SIGN) num = (signed long) num; } else if (qualifier == 'q') { num = va_arg(args, unsigned long long); if (flags & SIGN) num = (signed long long) num; } else if (qualifier == 'Z') { num = va_arg(args, size_t); } else if (qualifier == 'h') { num = (unsigned short) va_arg(args, int); if (flags & SIGN) num = (signed short) num; } else { num = va_arg(args, unsigned int); if (flags & SIGN) num = (signed int) num; } str = number(str, num, base, field_width, precision, flags); } *str = '\0'; return str-buf; } int sprintf(char * buf, const char *fmt, ...) { va_list args; int i; va_start(args, fmt); i=vsprintf(buf,fmt,args); va_end(args); return i; } static char sprint_buf[1024]; int printf(const char *fmt, ...) { va_list args; int n; va_start(args, fmt); n = vsprintf(sprint_buf, fmt, args); va_end(args); if (console_ops.write) console_ops.write(sprint_buf, n); return n; }
linux-master
arch/powerpc/boot/stdio.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Program to hack in a PT_NOTE program header entry in an ELF file. * This is needed for OF on RS/6000s to load an image correctly. * Note that OF needs a program header entry for the note, not an * ELF section. * * Copyright 2000 Paul Mackerras. * * Adapted for 64 bit little endian images by Andrew Tauferner. * * Usage: addnote zImage */ #include <stdio.h> #include <stdlib.h> #include <fcntl.h> #include <unistd.h> #include <string.h> /* CHRP note section */ static const char arch[] = "PowerPC"; #define N_DESCR 6 unsigned int descr[N_DESCR] = { 0xffffffff, /* real-mode = true */ 0x02000000, /* real-base, i.e. where we expect OF to be */ 0xffffffff, /* real-size */ 0xffffffff, /* virt-base */ 0xffffffff, /* virt-size */ 0x4000, /* load-base */ }; /* RPA note section */ static const char rpaname[] = "IBM,RPA-Client-Config"; /* * Note: setting ignore_my_client_config *should* mean that OF ignores * all the other fields, but there is a firmware bug which means that * it looks at the splpar field at least. So these values need to be * reasonable. */ #define N_RPA_DESCR 8 unsigned int rpanote[N_RPA_DESCR] = { 0, /* lparaffinity */ 64, /* min_rmo_size */ 0, /* min_rmo_percent */ 40, /* max_pft_size */ 1, /* splpar */ -1, /* min_load */ 0, /* new_mem_def */ 1, /* ignore_my_client_config */ }; #define ROUNDUP(len) (((len) + 3) & ~3) unsigned char buf[1024]; #define ELFDATA2LSB 1 #define ELFDATA2MSB 2 static int e_data = ELFDATA2MSB; #define ELFCLASS32 1 #define ELFCLASS64 2 static int e_class = ELFCLASS32; #define GET_16BE(off) ((buf[off] << 8) + (buf[(off)+1])) #define GET_32BE(off) ((GET_16BE(off) << 16U) + GET_16BE((off)+2U)) #define GET_64BE(off) ((((unsigned long long)GET_32BE(off)) << 32ULL) + \ ((unsigned long long)GET_32BE((off)+4ULL))) #define PUT_16BE(off, v)(buf[off] = ((v) >> 8) & 0xff, \ buf[(off) + 1] = (v) & 0xff) #define PUT_32BE(off, v)(PUT_16BE((off), (v) >> 16L), PUT_16BE((off) + 2, (v))) #define PUT_64BE(off, v)((PUT_32BE((off), (v) >> 32L), \ PUT_32BE((off) + 4, (v)))) #define GET_16LE(off) ((buf[off]) + (buf[(off)+1] << 8)) #define GET_32LE(off) (GET_16LE(off) + (GET_16LE((off)+2U) << 16U)) #define GET_64LE(off) ((unsigned long long)GET_32LE(off) + \ (((unsigned long long)GET_32LE((off)+4ULL)) << 32ULL)) #define PUT_16LE(off, v) (buf[off] = (v) & 0xff, \ buf[(off) + 1] = ((v) >> 8) & 0xff) #define PUT_32LE(off, v) (PUT_16LE((off), (v)), PUT_16LE((off) + 2, (v) >> 16L)) #define PUT_64LE(off, v) (PUT_32LE((off), (v)), PUT_32LE((off) + 4, (v) >> 32L)) #define GET_16(off) (e_data == ELFDATA2MSB ? GET_16BE(off) : GET_16LE(off)) #define GET_32(off) (e_data == ELFDATA2MSB ? GET_32BE(off) : GET_32LE(off)) #define GET_64(off) (e_data == ELFDATA2MSB ? GET_64BE(off) : GET_64LE(off)) #define PUT_16(off, v) (e_data == ELFDATA2MSB ? PUT_16BE(off, v) : \ PUT_16LE(off, v)) #define PUT_32(off, v) (e_data == ELFDATA2MSB ? PUT_32BE(off, v) : \ PUT_32LE(off, v)) #define PUT_64(off, v) (e_data == ELFDATA2MSB ? PUT_64BE(off, v) : \ PUT_64LE(off, v)) /* Structure of an ELF file */ #define E_IDENT 0 /* ELF header */ #define E_PHOFF (e_class == ELFCLASS32 ? 28 : 32) #define E_PHENTSIZE (e_class == ELFCLASS32 ? 42 : 54) #define E_PHNUM (e_class == ELFCLASS32 ? 44 : 56) #define E_HSIZE (e_class == ELFCLASS32 ? 52 : 64) #define EI_MAGIC 0 /* offsets in E_IDENT area */ #define EI_CLASS 4 #define EI_DATA 5 #define PH_TYPE 0 /* ELF program header */ #define PH_OFFSET (e_class == ELFCLASS32 ? 4 : 8) #define PH_FILESZ (e_class == ELFCLASS32 ? 16 : 32) #define PH_HSIZE (e_class == ELFCLASS32 ? 32 : 56) #define PT_NOTE 4 /* Program header type = note */ unsigned char elf_magic[4] = { 0x7f, 'E', 'L', 'F' }; int main(int ac, char **av) { int fd, n, i; unsigned long ph, ps, np; long nnote, nnote2, ns; if (ac != 2) { fprintf(stderr, "Usage: %s elf-file\n", av[0]); exit(1); } fd = open(av[1], O_RDWR); if (fd < 0) { perror(av[1]); exit(1); } nnote = 12 + ROUNDUP(strlen(arch) + 1) + sizeof(descr); nnote2 = 12 + ROUNDUP(strlen(rpaname) + 1) + sizeof(rpanote); n = read(fd, buf, sizeof(buf)); if (n < 0) { perror("read"); exit(1); } if (memcmp(&buf[E_IDENT+EI_MAGIC], elf_magic, 4) != 0) goto notelf; e_class = buf[E_IDENT+EI_CLASS]; if (e_class != ELFCLASS32 && e_class != ELFCLASS64) goto notelf; e_data = buf[E_IDENT+EI_DATA]; if (e_data != ELFDATA2MSB && e_data != ELFDATA2LSB) goto notelf; if (n < E_HSIZE) goto notelf; ph = (e_class == ELFCLASS32 ? GET_32(E_PHOFF) : GET_64(E_PHOFF)); ps = GET_16(E_PHENTSIZE); np = GET_16(E_PHNUM); if (ph < E_HSIZE || ps < PH_HSIZE || np < 1) goto notelf; if (ph + (np + 2) * ps + nnote + nnote2 > n) goto nospace; for (i = 0; i < np; ++i) { if (GET_32(ph + PH_TYPE) == PT_NOTE) { fprintf(stderr, "%s already has a note entry\n", av[1]); exit(0); } ph += ps; } /* XXX check that the area we want to use is all zeroes */ for (i = 0; i < 2 * ps + nnote + nnote2; ++i) if (buf[ph + i] != 0) goto nospace; /* fill in the program header entry */ ns = ph + 2 * ps; PUT_32(ph + PH_TYPE, PT_NOTE); if (e_class == ELFCLASS32) PUT_32(ph + PH_OFFSET, ns); else PUT_64(ph + PH_OFFSET, ns); if (e_class == ELFCLASS32) PUT_32(ph + PH_FILESZ, nnote); else PUT_64(ph + PH_FILESZ, nnote); /* fill in the note area we point to */ /* XXX we should probably make this a proper section */ PUT_32(ns, strlen(arch) + 1); PUT_32(ns + 4, N_DESCR * 4); PUT_32(ns + 8, 0x1275); strcpy((char *) &buf[ns + 12], arch); ns += 12 + strlen(arch) + 1; for (i = 0; i < N_DESCR; ++i, ns += 4) PUT_32BE(ns, descr[i]); /* fill in the second program header entry and the RPA note area */ ph += ps; PUT_32(ph + PH_TYPE, PT_NOTE); if (e_class == ELFCLASS32) PUT_32(ph + PH_OFFSET, ns); else PUT_64(ph + PH_OFFSET, ns); if (e_class == ELFCLASS32) PUT_32(ph + PH_FILESZ, nnote); else PUT_64(ph + PH_FILESZ, nnote2); /* fill in the note area we point to */ PUT_32(ns, strlen(rpaname) + 1); PUT_32(ns + 4, sizeof(rpanote)); PUT_32(ns + 8, 0x12759999); strcpy((char *) &buf[ns + 12], rpaname); ns += 12 + ROUNDUP(strlen(rpaname) + 1); for (i = 0; i < N_RPA_DESCR; ++i, ns += 4) PUT_32BE(ns, rpanote[i]); /* Update the number of program headers */ PUT_16(E_PHNUM, np + 2); /* write back */ i = lseek(fd, (long) 0, SEEK_SET); if (i < 0) { perror("lseek"); exit(1); } i = write(fd, buf, n); if (i < 0) { perror("write"); exit(1); } if (i < n) { fprintf(stderr, "%s: write truncated\n", av[1]); exit(1); } exit(0); notelf: fprintf(stderr, "%s does not appear to be an ELF file\n", av[1]); exit(1); nospace: fprintf(stderr, "sorry, I can't find space in %s to put the note\n", av[1]); exit(1); }
linux-master
arch/powerpc/boot/addnote.c
// SPDX-License-Identifier: GPL-2.0-only /* * Old U-boot compatibility for Ebony * * Author: David Gibson <[email protected]> * * Copyright 2007 David Gibson, IBM Corporatio. * Based on cuboot-83xx.c, which is: * Copyright (c) 2007 Freescale Semiconductor, Inc. */ #include "ops.h" #include "stdio.h" #include "44x.h" BSS_STACK(4096); #define OPENBIOS_MAC_BASE 0xfffffe0c #define OPENBIOS_MAC_OFFSET 0xc void platform_init(void) { unsigned long end_of_ram = 0x8000000; unsigned long avail_ram = end_of_ram - (unsigned long)_end; simple_alloc_init(_end, avail_ram, 32, 64); ebony_init((u8 *)OPENBIOS_MAC_BASE, (u8 *)(OPENBIOS_MAC_BASE + OPENBIOS_MAC_OFFSET)); }
linux-master
arch/powerpc/boot/treeboot-ebony.c
// SPDX-License-Identifier: GPL-2.0 /* * CPM serial console support. * * Copyright 2007 Freescale Semiconductor, Inc. * Author: Scott Wood <[email protected]> * * It is assumed that the firmware (or the platform file) has already set * up the port. */ #include "types.h" #include "io.h" #include "ops.h" #include "page.h" struct cpm_scc { u32 gsmrl; u32 gsmrh; u16 psmr; u8 res1[2]; u16 todr; u16 dsr; u16 scce; u8 res2[2]; u16 sccm; u8 res3; u8 sccs; u8 res4[8]; }; struct cpm_smc { u8 res1[2]; u16 smcmr; u8 res2[2]; u8 smce; u8 res3[3]; u8 smcm; u8 res4[5]; }; struct cpm_param { u16 rbase; u16 tbase; u8 rfcr; u8 tfcr; u16 mrblr; u32 rstate; u8 res1[4]; u16 rbptr; u8 res2[6]; u32 tstate; u8 res3[4]; u16 tbptr; u8 res4[6]; u16 maxidl; u16 idlc; u16 brkln; u16 brkec; u16 brkcr; u16 rmask; u8 res5[4]; }; struct cpm_bd { u16 sc; /* Status and Control */ u16 len; /* Data length in buffer */ u8 *addr; /* Buffer address in host memory */ }; static void *cpcr; static struct cpm_param *param; static struct cpm_smc *smc; static struct cpm_scc *scc; static struct cpm_bd *tbdf, *rbdf; static u32 cpm_cmd; static void *cbd_addr; static u32 cbd_offset; static void (*do_cmd)(int op); static void (*enable_port)(void); static void (*disable_port)(void); #define CPM_CMD_STOP_TX 4 #define CPM_CMD_RESTART_TX 6 #define CPM_CMD_INIT_RX_TX 0 static void cpm1_cmd(int op) { while (in_be16(cpcr) & 1) ; out_be16(cpcr, (op << 8) | cpm_cmd | 1); while (in_be16(cpcr) & 1) ; } static void cpm2_cmd(int op) { while (in_be32(cpcr) & 0x10000) ; out_be32(cpcr, op | cpm_cmd | 0x10000); while (in_be32(cpcr) & 0x10000) ; } static void smc_disable_port(void) { do_cmd(CPM_CMD_STOP_TX); out_be16(&smc->smcmr, in_be16(&smc->smcmr) & ~3); } static void scc_disable_port(void) { do_cmd(CPM_CMD_STOP_TX); out_be32(&scc->gsmrl, in_be32(&scc->gsmrl) & ~0x30); } static void smc_enable_port(void) { out_be16(&smc->smcmr, in_be16(&smc->smcmr) | 3); do_cmd(CPM_CMD_RESTART_TX); } static void scc_enable_port(void) { out_be32(&scc->gsmrl, in_be32(&scc->gsmrl) | 0x30); do_cmd(CPM_CMD_RESTART_TX); } static int cpm_serial_open(void) { disable_port(); out_8(&param->rfcr, 0x10); out_8(&param->tfcr, 0x10); out_be16(&param->mrblr, 1); out_be16(&param->maxidl, 0); out_be16(&param->brkec, 0); out_be16(&param->brkln, 0); out_be16(&param->brkcr, 0); rbdf = cbd_addr; rbdf->addr = (u8 *)rbdf - 1; rbdf->sc = 0xa000; rbdf->len = 1; tbdf = rbdf + 1; tbdf->addr = (u8 *)rbdf - 2; tbdf->sc = 0x2000; tbdf->len = 1; sync(); out_be16(&param->rbase, cbd_offset); out_be16(&param->tbase, cbd_offset + sizeof(struct cpm_bd)); do_cmd(CPM_CMD_INIT_RX_TX); enable_port(); return 0; } static void cpm_serial_putc(unsigned char c) { while (tbdf->sc & 0x8000) barrier(); sync(); tbdf->addr[0] = c; eieio(); tbdf->sc |= 0x8000; } static unsigned char cpm_serial_tstc(void) { barrier(); return !(rbdf->sc & 0x8000); } static unsigned char cpm_serial_getc(void) { unsigned char c; while (!cpm_serial_tstc()) ; sync(); c = rbdf->addr[0]; eieio(); rbdf->sc |= 0x8000; return c; } int cpm_console_init(void *devp, struct serial_console_data *scdp) { void *vreg[2]; u32 reg[2]; int is_smc = 0, is_cpm2 = 0; void *parent, *muram; void *muram_addr; unsigned long muram_offset, muram_size; if (dt_is_compatible(devp, "fsl,cpm1-smc-uart")) { is_smc = 1; } else if (dt_is_compatible(devp, "fsl,cpm2-scc-uart")) { is_cpm2 = 1; } else if (dt_is_compatible(devp, "fsl,cpm2-smc-uart")) { is_cpm2 = 1; is_smc = 1; } if (is_smc) { enable_port = smc_enable_port; disable_port = smc_disable_port; } else { enable_port = scc_enable_port; disable_port = scc_disable_port; } if (is_cpm2) do_cmd = cpm2_cmd; else do_cmd = cpm1_cmd; if (getprop(devp, "fsl,cpm-command", &cpm_cmd, 4) < 4) return -1; if (dt_get_virtual_reg(devp, vreg, 2) < 2) return -1; if (is_smc) smc = vreg[0]; else scc = vreg[0]; param = vreg[1]; parent = get_parent(devp); if (!parent) return -1; if (dt_get_virtual_reg(parent, &cpcr, 1) < 1) return -1; muram = finddevice("/soc/cpm/muram/data"); if (!muram) return -1; /* For bootwrapper-compatible device trees, we assume that the first * entry has at least 128 bytes, and that #address-cells/#data-cells * is one for both parent and child. */ if (dt_get_virtual_reg(muram, &muram_addr, 1) < 1) return -1; if (getprop(muram, "reg", reg, 8) < 8) return -1; muram_offset = reg[0]; muram_size = reg[1]; /* Store the buffer descriptors at the end of the first muram chunk. * For SMC ports on CPM2-based platforms, relocate the parameter RAM * just before the buffer descriptors. */ cbd_offset = muram_offset + muram_size - 2 * sizeof(struct cpm_bd); if (is_cpm2 && is_smc) { u16 *smc_base = (u16 *)param; u16 pram_offset; pram_offset = cbd_offset - 64; pram_offset = _ALIGN_DOWN(pram_offset, 64); disable_port(); out_be16(smc_base, pram_offset); param = muram_addr - muram_offset + pram_offset; } cbd_addr = muram_addr - muram_offset + cbd_offset; scdp->open = cpm_serial_open; scdp->putc = cpm_serial_putc; scdp->getc = cpm_serial_getc; scdp->tstc = cpm_serial_tstc; return 0; }
linux-master
arch/powerpc/boot/cpm-serial.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (c) 2016 IBM Corporation. */ #include "ops.h" #include "stdio.h" #include "io.h" #include <libfdt.h> #include "../include/asm/opal-api.h" /* Global OPAL struct used by opal-call.S */ struct opal { u64 base; u64 entry; } opal; static u32 opal_con_id; /* see opal-wrappers.S */ int64_t opal_console_write(int64_t term_number, u64 *length, const u8 *buffer); int64_t opal_console_read(int64_t term_number, uint64_t *length, u8 *buffer); int64_t opal_console_write_buffer_space(uint64_t term_number, uint64_t *length); int64_t opal_console_flush(uint64_t term_number); int64_t opal_poll_events(uint64_t *outstanding_event_mask); void opal_kentry(unsigned long fdt_addr, void *vmlinux_addr); static int opal_con_open(void) { /* * When OPAL loads the boot kernel it stashes the OPAL base and entry * address in r8 and r9 so the kernel can use the OPAL console * before unflattening the devicetree. While executing the wrapper will * probably trash r8 and r9 so this kentry hook restores them before * entering the decompressed kernel. */ platform_ops.kentry = opal_kentry; return 0; } static void opal_con_putc(unsigned char c) { int64_t rc; uint64_t olen, len; do { rc = opal_console_write_buffer_space(opal_con_id, &olen); len = be64_to_cpu(olen); if (rc) return; opal_poll_events(NULL); } while (len < 1); olen = cpu_to_be64(1); opal_console_write(opal_con_id, &olen, &c); } static void opal_con_close(void) { opal_console_flush(opal_con_id); } static void opal_init(void) { void *opal_node; opal_node = finddevice("/ibm,opal"); if (!opal_node) return; if (getprop(opal_node, "opal-base-address", &opal.base, sizeof(u64)) < 0) return; opal.base = be64_to_cpu(opal.base); if (getprop(opal_node, "opal-entry-address", &opal.entry, sizeof(u64)) < 0) return; opal.entry = be64_to_cpu(opal.entry); } int opal_console_init(void *devp, struct serial_console_data *scdp) { opal_init(); if (devp) { int n = getprop(devp, "reg", &opal_con_id, sizeof(u32)); if (n != sizeof(u32)) return -1; opal_con_id = be32_to_cpu(opal_con_id); } else opal_con_id = 0; scdp->open = opal_con_open; scdp->putc = opal_con_putc; scdp->close = opal_con_close; return 0; }
linux-master
arch/powerpc/boot/opal.c
// SPDX-License-Identifier: GPL-2.0-only /* * PlanetCore configuration data support functions * * Author: Scott Wood <[email protected]> * * Copyright (c) 2007 Freescale Semiconductor, Inc. */ #include "stdio.h" #include "stdlib.h" #include "ops.h" #include "planetcore.h" #include "io.h" /* PlanetCore passes information to the OS in the form of * a table of key=value strings, separated by newlines. * * The list is terminated by an empty string (i.e. two * consecutive newlines). * * To make it easier to parse, we first convert all the * newlines into null bytes. */ void planetcore_prepare_table(char *table) { do { if (*table == '\n') *table = 0; table++; } while (*(table - 1) || *table != '\n'); *table = 0; } const char *planetcore_get_key(const char *table, const char *key) { int keylen = strlen(key); do { if (!strncmp(table, key, keylen) && table[keylen] == '=') return table + keylen + 1; table += strlen(table) + 1; } while (strlen(table) != 0); return NULL; } int planetcore_get_decimal(const char *table, const char *key, u64 *val) { const char *str = planetcore_get_key(table, key); if (!str) return 0; *val = strtoull(str, NULL, 10); return 1; } int planetcore_get_hex(const char *table, const char *key, u64 *val) { const char *str = planetcore_get_key(table, key); if (!str) return 0; *val = strtoull(str, NULL, 16); return 1; } static u64 mac_table[4] = { 0x000000000000, 0x000000800000, 0x000000400000, 0x000000c00000, }; void planetcore_set_mac_addrs(const char *table) { u8 addr[4][6]; u64 int_addr; u32 i; int j; if (!planetcore_get_hex(table, PLANETCORE_KEY_MAC_ADDR, &int_addr)) return; for (i = 0; i < 4; i++) { u64 this_dev_addr = (int_addr & ~0x000000c00000) | mac_table[i]; for (j = 5; j >= 0; j--) { addr[i][j] = this_dev_addr & 0xff; this_dev_addr >>= 8; } dt_fixup_mac_address(i, addr[i]); } } static char prop_buf[MAX_PROP_LEN]; void planetcore_set_stdout_path(const char *table) { char *path; const char *label; void *node, *chosen; label = planetcore_get_key(table, PLANETCORE_KEY_SERIAL_PORT); if (!label) return; node = find_node_by_prop_value_str(NULL, "linux,planetcore-label", label); if (!node) return; path = get_path(node, prop_buf, MAX_PROP_LEN); if (!path) return; chosen = finddevice("/chosen"); if (!chosen) chosen = create_node(NULL, "chosen"); if (!chosen) return; setprop_str(chosen, "linux,stdout-path", path); }
linux-master
arch/powerpc/boot/planetcore.c
// SPDX-License-Identifier: GPL-2.0-only /* * Old U-boot compatibility for PPC405EX. This image is already included * a dtb. * * Author: Tiejun Chen <[email protected]> * * Copyright (C) 2009 Wind River Systems, Inc. */ #include "ops.h" #include "io.h" #include "dcr.h" #include "stdio.h" #include "4xx.h" #include "44x.h" #include "cuboot.h" #define TARGET_4xx #define TARGET_44x #include "ppcboot.h" #define KILAUEA_SYS_EXT_SERIAL_CLOCK 11059200 /* ext. 11.059MHz clk */ static bd_t bd; static void kilauea_fixups(void) { unsigned long sysclk = 33333333; ibm405ex_fixup_clocks(sysclk, KILAUEA_SYS_EXT_SERIAL_CLOCK); dt_fixup_memory(bd.bi_memstart, bd.bi_memsize); ibm4xx_fixup_ebc_ranges("/plb/opb/ebc"); dt_fixup_mac_address_by_alias("ethernet0", bd.bi_enetaddr); dt_fixup_mac_address_by_alias("ethernet1", bd.bi_enet1addr); } void platform_init(unsigned long r3, unsigned long r4, unsigned long r5, unsigned long r6, unsigned long r7) { CUBOOT_INIT(); platform_ops.fixups = kilauea_fixups; platform_ops.exit = ibm40x_dbcr_reset; fdt_init(_dtb_start); serial_console_init(); }
linux-master
arch/powerpc/boot/cuboot-kilauea.c
// SPDX-License-Identifier: GPL-2.0-only /* * Old U-boot compatibility for 85xx * * Author: Scott Wood <[email protected]> * * Copyright (c) 2007 Freescale Semiconductor, Inc. */ #include "ops.h" #include "stdio.h" #include "cuboot.h" #define TARGET_85xx #define TARGET_HAS_ETH3 #include "ppcboot.h" static bd_t bd; static void platform_fixups(void) { void *soc; dt_fixup_memory(bd.bi_memstart, bd.bi_memsize); dt_fixup_mac_address_by_alias("ethernet0", bd.bi_enetaddr); dt_fixup_mac_address_by_alias("ethernet1", bd.bi_enet1addr); dt_fixup_mac_address_by_alias("ethernet2", bd.bi_enet2addr); dt_fixup_mac_address_by_alias("ethernet3", bd.bi_enet3addr); dt_fixup_cpu_clocks(bd.bi_intfreq, bd.bi_busfreq / 8, bd.bi_busfreq); /* Unfortunately, the specific model number is encoded in the * soc node name in existing dts files -- once that is fixed, * this can do a simple path lookup. */ soc = find_node_by_devtype(NULL, "soc"); if (soc) { void *serial = NULL; setprop(soc, "bus-frequency", &bd.bi_busfreq, sizeof(bd.bi_busfreq)); while ((serial = find_node_by_devtype(serial, "serial"))) { if (get_parent(serial) != soc) continue; setprop(serial, "clock-frequency", &bd.bi_busfreq, sizeof(bd.bi_busfreq)); } } } void platform_init(unsigned long r3, unsigned long r4, unsigned long r5, unsigned long r6, unsigned long r7) { CUBOOT_INIT(); fdt_init(_dtb_start); serial_console_init(); platform_ops.fixups = platform_fixups; }
linux-master
arch/powerpc/boot/cuboot-85xx.c
// SPDX-License-Identifier: GPL-2.0-only /* * Old U-boot compatibility for MPC5200 * * Author: Grant Likely <[email protected]> * * Copyright (c) 2007 Secret Lab Technologies Ltd. * Copyright (c) 2007 Freescale Semiconductor, Inc. */ #include "ops.h" #include "stdio.h" #include "io.h" #include "cuboot.h" #define TARGET_PPC_MPC52xx #include "ppcboot.h" static bd_t bd; static void platform_fixups(void) { void *soc, *reg; int div; u32 sysfreq; dt_fixup_memory(bd.bi_memstart, bd.bi_memsize); dt_fixup_mac_addresses(bd.bi_enetaddr); dt_fixup_cpu_clocks(bd.bi_intfreq, bd.bi_busfreq / 4, bd.bi_busfreq); /* Unfortunately, the specific model number is encoded in the * soc node name in existing dts files -- once that is fixed, * this can do a simple path lookup. */ soc = find_node_by_devtype(NULL, "soc"); if (!soc) soc = find_node_by_compatible(NULL, "fsl,mpc5200-immr"); if (!soc) soc = find_node_by_compatible(NULL, "fsl,mpc5200b-immr"); if (soc) { setprop(soc, "bus-frequency", &bd.bi_ipbfreq, sizeof(bd.bi_ipbfreq)); if (!dt_xlate_reg(soc, 0, (void*)&reg, NULL)) return; div = in_8(reg + 0x204) & 0x0020 ? 8 : 4; sysfreq = bd.bi_busfreq * div; setprop(soc, "system-frequency", &sysfreq, sizeof(sysfreq)); } } void platform_init(unsigned long r3, unsigned long r4, unsigned long r5, unsigned long r6, unsigned long r7) { CUBOOT_INIT(); fdt_init(_dtb_start); serial_console_init(); platform_ops.fixups = platform_fixups; }
linux-master
arch/powerpc/boot/cuboot-52xx.c
// SPDX-License-Identifier: GPL-2.0-only /* * Old U-boot compatibility for Bamboo * * Author: Josh Boyer <[email protected]> * * Copyright 2007 IBM Corporation * * Based on cuboot-ebony.c */ #include "ops.h" #include "stdio.h" #include "44x.h" #include "cuboot.h" #define TARGET_4xx #define TARGET_44x #include "ppcboot.h" static bd_t bd; void platform_init(unsigned long r3, unsigned long r4, unsigned long r5, unsigned long r6, unsigned long r7) { CUBOOT_INIT(); bamboo_init(&bd.bi_enetaddr, &bd.bi_enet1addr); }
linux-master
arch/powerpc/boot/cuboot-bamboo.c
// SPDX-License-Identifier: GPL-2.0-only /* * stdlib functions * * Author: Scott Wood <[email protected]> * * Copyright (c) 2007 Freescale Semiconductor, Inc. */ #include "stdlib.h" /* Not currently supported: leading whitespace, sign, 0x prefix, zero base */ unsigned long long int strtoull(const char *ptr, char **end, int base) { unsigned long long ret = 0; if (base > 36) goto out; while (*ptr) { int digit; if (*ptr >= '0' && *ptr <= '9' && *ptr < '0' + base) digit = *ptr - '0'; else if (*ptr >= 'A' && *ptr < 'A' + base - 10) digit = *ptr - 'A' + 10; else if (*ptr >= 'a' && *ptr < 'a' + base - 10) digit = *ptr - 'a' + 10; else break; ret *= base; ret += digit; ptr++; } out: if (end) *end = (char *)ptr; return ret; }
linux-master
arch/powerpc/boot/stdlib.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (C) Paul Mackerras 1997. * * Updates for PPC64 by Todd Inglett, Dave Engebretsen & Peter Bergner. */ #include <stdarg.h> #include <stddef.h> #include "elf.h" #include "page.h" #include "string.h" #include "stdio.h" int parse_elf64(void *hdr, struct elf_info *info) { Elf64_Ehdr *elf64 = hdr; Elf64_Phdr *elf64ph; unsigned int i; if (!(elf64->e_ident[EI_MAG0] == ELFMAG0 && elf64->e_ident[EI_MAG1] == ELFMAG1 && elf64->e_ident[EI_MAG2] == ELFMAG2 && elf64->e_ident[EI_MAG3] == ELFMAG3 && elf64->e_ident[EI_CLASS] == ELFCLASS64 && #ifdef __LITTLE_ENDIAN__ elf64->e_ident[EI_DATA] == ELFDATA2LSB && #else elf64->e_ident[EI_DATA] == ELFDATA2MSB && #endif (elf64->e_type == ET_EXEC || elf64->e_type == ET_DYN) && elf64->e_machine == EM_PPC64)) return 0; elf64ph = (Elf64_Phdr *)((unsigned long)elf64 + (unsigned long)elf64->e_phoff); for (i = 0; i < (unsigned int)elf64->e_phnum; i++, elf64ph++) if (elf64ph->p_type == PT_LOAD) break; if (i >= (unsigned int)elf64->e_phnum) return 0; info->loadsize = (unsigned long)elf64ph->p_filesz; info->memsize = (unsigned long)elf64ph->p_memsz; info->elfoffset = (unsigned long)elf64ph->p_offset; return 1; } int parse_elf32(void *hdr, struct elf_info *info) { Elf32_Ehdr *elf32 = hdr; Elf32_Phdr *elf32ph; unsigned int i; if (!(elf32->e_ident[EI_MAG0] == ELFMAG0 && elf32->e_ident[EI_MAG1] == ELFMAG1 && elf32->e_ident[EI_MAG2] == ELFMAG2 && elf32->e_ident[EI_MAG3] == ELFMAG3 && elf32->e_ident[EI_CLASS] == ELFCLASS32 && elf32->e_ident[EI_DATA] == ELFDATA2MSB && (elf32->e_type == ET_EXEC || elf32->e_type == ET_DYN) && elf32->e_machine == EM_PPC)) return 0; elf32ph = (Elf32_Phdr *) ((unsigned long)elf32 + elf32->e_phoff); for (i = 0; i < elf32->e_phnum; i++, elf32ph++) if (elf32ph->p_type == PT_LOAD) break; if (i >= elf32->e_phnum) return 0; info->loadsize = elf32ph->p_filesz; info->memsize = elf32ph->p_memsz; info->elfoffset = elf32ph->p_offset; return 1; }
linux-master
arch/powerpc/boot/elf_util.c
// SPDX-License-Identifier: GPL-2.0-only /* * Old U-boot compatibility for Esteem 195E Hotfoot CPU Board * * Author: Solomon Peachy <[email protected]> */ #include "ops.h" #include "stdio.h" #include "reg.h" #include "dcr.h" #include "4xx.h" #include "cuboot.h" #define TARGET_4xx #define TARGET_HOTFOOT #include "ppcboot-hotfoot.h" static bd_t bd; #define NUM_REGS 3 static void hotfoot_fixups(void) { u32 uart = mfdcr(DCRN_CPC0_UCR) & 0x7f; dt_fixup_memory(bd.bi_memstart, bd.bi_memsize); dt_fixup_cpu_clocks(bd.bi_procfreq, bd.bi_procfreq, 0); dt_fixup_clock("/plb", bd.bi_plb_busfreq); dt_fixup_clock("/plb/opb", bd.bi_opbfreq); dt_fixup_clock("/plb/ebc", bd.bi_pci_busfreq); dt_fixup_clock("/plb/opb/serial@ef600300", bd.bi_procfreq / uart); dt_fixup_clock("/plb/opb/serial@ef600400", bd.bi_procfreq / uart); dt_fixup_mac_address_by_alias("ethernet0", bd.bi_enetaddr); dt_fixup_mac_address_by_alias("ethernet1", bd.bi_enet1addr); /* Is this a single eth/serial board? */ if ((bd.bi_enet1addr[0] == 0) && (bd.bi_enet1addr[1] == 0) && (bd.bi_enet1addr[2] == 0) && (bd.bi_enet1addr[3] == 0) && (bd.bi_enet1addr[4] == 0) && (bd.bi_enet1addr[5] == 0)) { void *devp; printf("Trimming devtree for single serial/eth board\n"); devp = finddevice("/plb/opb/serial@ef600300"); if (!devp) fatal("Can't find node for /plb/opb/serial@ef600300"); del_node(devp); devp = finddevice("/plb/opb/ethernet@ef600900"); if (!devp) fatal("Can't find node for /plb/opb/ethernet@ef600900"); del_node(devp); } ibm4xx_quiesce_eth((u32 *)0xef600800, (u32 *)0xef600900); /* Fix up flash size in fdt for 4M boards. */ if (bd.bi_flashsize < 0x800000) { u32 regs[NUM_REGS]; void *devp = finddevice("/plb/ebc/nor_flash@0"); if (!devp) fatal("Can't find FDT node for nor_flash!??"); printf("Fixing devtree for 4M Flash\n"); /* First fix up the base address */ getprop(devp, "reg", regs, sizeof(regs)); regs[0] = 0; regs[1] = 0xffc00000; regs[2] = 0x00400000; setprop(devp, "reg", regs, sizeof(regs)); /* Then the offsets */ devp = finddevice("/plb/ebc/nor_flash@0/partition@0"); if (!devp) fatal("Can't find FDT node for partition@0"); getprop(devp, "reg", regs, 2*sizeof(u32)); regs[0] -= 0x400000; setprop(devp, "reg", regs, 2*sizeof(u32)); devp = finddevice("/plb/ebc/nor_flash@0/partition@1"); if (!devp) fatal("Can't find FDT node for partition@1"); getprop(devp, "reg", regs, 2*sizeof(u32)); regs[0] -= 0x400000; setprop(devp, "reg", regs, 2*sizeof(u32)); devp = finddevice("/plb/ebc/nor_flash@0/partition@2"); if (!devp) fatal("Can't find FDT node for partition@2"); getprop(devp, "reg", regs, 2*sizeof(u32)); regs[0] -= 0x400000; setprop(devp, "reg", regs, 2*sizeof(u32)); devp = finddevice("/plb/ebc/nor_flash@0/partition@3"); if (!devp) fatal("Can't find FDT node for partition@3"); getprop(devp, "reg", regs, 2*sizeof(u32)); regs[0] -= 0x400000; setprop(devp, "reg", regs, 2*sizeof(u32)); devp = finddevice("/plb/ebc/nor_flash@0/partition@4"); if (!devp) fatal("Can't find FDT node for partition@4"); getprop(devp, "reg", regs, 2*sizeof(u32)); regs[0] -= 0x400000; setprop(devp, "reg", regs, 2*sizeof(u32)); devp = finddevice("/plb/ebc/nor_flash@0/partition@6"); if (!devp) fatal("Can't find FDT node for partition@6"); getprop(devp, "reg", regs, 2*sizeof(u32)); regs[0] -= 0x400000; setprop(devp, "reg", regs, 2*sizeof(u32)); /* Delete the FeatFS node */ devp = finddevice("/plb/ebc/nor_flash@0/partition@5"); if (!devp) fatal("Can't find FDT node for partition@5"); del_node(devp); } } void platform_init(unsigned long r3, unsigned long r4, unsigned long r5, unsigned long r6, unsigned long r7) { CUBOOT_INIT(); platform_ops.fixups = hotfoot_fixups; platform_ops.exit = ibm40x_dbcr_reset; fdt_init(_dtb_start); serial_console_init(); }
linux-master
arch/powerpc/boot/cuboot-hotfoot.c
// SPDX-License-Identifier: GPL-2.0-only /* * Bootwrapper for ePAPR compliant firmwares * * Copyright 2010 David Gibson <[email protected]>, IBM Corporation. * * Based on earlier bootwrappers by: * (c) Benjamin Herrenschmidt <[email protected]>, IBM Corp,\ * and * Scott Wood <[email protected]> * Copyright (c) 2007 Freescale Semiconductor, Inc. */ #include "ops.h" #include "stdio.h" #include "io.h" #include <libfdt.h> BSS_STACK(4096); #define EPAPR_SMAGIC 0x65504150 #define EPAPR_EMAGIC 0x45504150 static unsigned epapr_magic; static unsigned long ima_size; static unsigned long fdt_addr; static void platform_fixups(void) { if ((epapr_magic != EPAPR_EMAGIC) && (epapr_magic != EPAPR_SMAGIC)) fatal("r6 contained 0x%08x instead of ePAPR magic number\n", epapr_magic); if (ima_size < (unsigned long)_end) printf("WARNING: Image loaded outside IMA!" " (_end=%p, ima_size=0x%lx)\n", _end, ima_size); if (ima_size < fdt_addr) printf("WARNING: Device tree address is outside IMA!" "(fdt_addr=0x%lx, ima_size=0x%lx)\n", fdt_addr, ima_size); if (ima_size < fdt_addr + fdt_totalsize((void *)fdt_addr)) printf("WARNING: Device tree extends outside IMA!" " (fdt_addr=0x%lx, size=0x%x, ima_size=0x%lx\n", fdt_addr, fdt_totalsize((void *)fdt_addr), ima_size); } void epapr_platform_init(unsigned long r3, unsigned long r4, unsigned long r5, unsigned long r6, unsigned long r7) { epapr_magic = r6; ima_size = r7; fdt_addr = r3; /* FIXME: we should process reserve entries */ simple_alloc_init(_end, ima_size - (unsigned long)_end, 32, 64); fdt_init((void *)fdt_addr); serial_console_init(); platform_ops.fixups = platform_fixups; }
linux-master
arch/powerpc/boot/epapr.c
// SPDX-License-Identifier: GPL-2.0-only /* * MPC8xx support functions * * Author: Scott Wood <[email protected]> * * Copyright (c) 2007 Freescale Semiconductor, Inc. */ #include "ops.h" #include "types.h" #include "fsl-soc.h" #include "mpc8xx.h" #include "stdio.h" #include "io.h" #define MPC8XX_PLPRCR (0x284/4) /* PLL and Reset Control Register */ /* Return system clock from crystal frequency */ u32 mpc885_get_clock(u32 crystal) { u32 *immr; u32 plprcr; int mfi, mfn, mfd, pdf; u32 ret; immr = fsl_get_immr(); if (!immr) { printf("mpc885_get_clock: Couldn't get IMMR base.\r\n"); return 0; } plprcr = in_be32(&immr[MPC8XX_PLPRCR]); mfi = (plprcr >> 16) & 15; if (mfi < 5) { printf("Warning: PLPRCR[MFI] value of %d out-of-bounds\r\n", mfi); mfi = 5; } pdf = (plprcr >> 1) & 0xf; mfd = (plprcr >> 22) & 0x1f; mfn = (plprcr >> 27) & 0x1f; ret = crystal * mfi; if (mfn != 0) ret += crystal * mfn / (mfd + 1); return ret / (pdf + 1); } /* Set common device tree fields based on the given clock frequencies. */ void mpc8xx_set_clocks(u32 sysclk) { void *node; dt_fixup_cpu_clocks(sysclk, sysclk / 16, sysclk); node = finddevice("/soc/cpm"); if (node) setprop(node, "clock-frequency", &sysclk, 4); node = finddevice("/soc/cpm/brg"); if (node) setprop(node, "clock-frequency", &sysclk, 4); } int mpc885_fixup_clocks(u32 crystal) { u32 sysclk = mpc885_get_clock(crystal); if (!sysclk) return 0; mpc8xx_set_clocks(sysclk); return 1; }
linux-master
arch/powerpc/boot/mpc8xx.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright © 2013 Tony Breeds IBM Corporation * Copyright © 2013 Alistair Popple IBM Corporation * * Based on earlier code: * Copyright (C) Paul Mackerras 1997. * * Matt Porter <[email protected]> * Copyright 2002-2005 MontaVista Software Inc. * * Eugene Surovegin <[email protected]> or <[email protected]> * Copyright (c) 2003, 2004 Zultys Technologies * * Copyright 2007 David Gibson, IBM Corporation. * Copyright 2010 Ben. Herrenschmidt, IBM Corporation. * Copyright © 2011 David Kleikamp IBM Corporation */ #include <stdarg.h> #include <stddef.h> #include "types.h" #include "elf.h" #include "string.h" #include "stdlib.h" #include "stdio.h" #include "page.h" #include "ops.h" #include "reg.h" #include "io.h" #include "dcr.h" #include "4xx.h" #include "44x.h" #include "libfdt.h" BSS_STACK(4096); #define SPRN_PIR 0x11E /* Processor Identification Register */ #define USERDATA_LEN 256 /* Length of userdata passed in by PIBS */ #define MAX_RANKS 0x4 #define DDR3_MR0CF 0x80010011U #define CCTL0_MCO2 0x8000080FU #define CCTL0_MCO3 0x80000810U #define CCTL0_MCO4 0x80000811U #define CCTL0_MCO5 0x80000812U #define CCTL0_MCO6 0x80000813U static unsigned long long ibm_akebono_memsize; static long long unsigned mac_addr; static unsigned long long ibm_akebono_detect_memsize(void) { u32 reg; unsigned i; unsigned long long memsize = 0; for (i = 0; i < MAX_RANKS; i++) { reg = mfdcrx(DDR3_MR0CF + i); if (!(reg & 1)) continue; reg &= 0x0000f000; reg >>= 12; memsize += (0x800000ULL << reg); } return memsize; } static void ibm_akebono_fixups(void) { void *emac; u32 reg; dt_fixup_memory(0x0ULL, ibm_akebono_memsize); /* Fixup the SD timeout frequency */ mtdcrx(CCTL0_MCO4, 0x1); /* Disable SD high-speed mode (which seems to be broken) */ reg = mfdcrx(CCTL0_MCO2) & ~0x2; mtdcrx(CCTL0_MCO2, reg); /* Set the MAC address */ emac = finddevice("/plb/opb/ethernet"); if (emac > 0) { if (mac_addr) setprop(emac, "local-mac-address", ((u8 *) &mac_addr) + 2 , 6); } } void platform_init(char *userdata) { unsigned long end_of_ram, avail_ram; u32 pir_reg; int node, size; const u32 *timebase; int len, i, userdata_len; char *end; userdata[USERDATA_LEN - 1] = '\0'; userdata_len = strlen(userdata); for (i = 0; i < userdata_len - 15; i++) { if (strncmp(&userdata[i], "local-mac-addr=", 15) == 0) { if (i > 0 && userdata[i - 1] != ' ') { /* We've only found a substring ending * with local-mac-addr so this isn't * our mac address. */ continue; } mac_addr = strtoull(&userdata[i + 15], &end, 16); /* Remove the "local-mac-addr=<...>" from the kernel * command line, including the tailing space if * present. */ if (*end == ' ') end++; len = ((int) end) - ((int) &userdata[i]); memmove(&userdata[i], end, userdata_len - (len + i) + 1); break; } } loader_info.cmdline = userdata; loader_info.cmdline_len = 256; ibm_akebono_memsize = ibm_akebono_detect_memsize(); if (ibm_akebono_memsize >> 32) end_of_ram = ~0UL; else end_of_ram = ibm_akebono_memsize; avail_ram = end_of_ram - (unsigned long)_end; simple_alloc_init(_end, avail_ram, 128, 64); platform_ops.fixups = ibm_akebono_fixups; platform_ops.exit = ibm44x_dbcr_reset; pir_reg = mfspr(SPRN_PIR); /* Make sure FDT blob is sane */ if (fdt_check_header(_dtb_start) != 0) fatal("Invalid device tree blob\n"); node = fdt_node_offset_by_prop_value(_dtb_start, -1, "device_type", "cpu", sizeof("cpu")); if (!node) fatal("Cannot find cpu node\n"); timebase = fdt_getprop(_dtb_start, node, "timebase-frequency", &size); if (timebase && (size == 4)) timebase_period_ns = 1000000000 / *timebase; fdt_set_boot_cpuid_phys(_dtb_start, pir_reg); fdt_init(_dtb_start); serial_console_init(); }
linux-master
arch/powerpc/boot/treeboot-akebono.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * arch/powerpc/boot/ugecon.c * * USB Gecko bootwrapper console. * Copyright (C) 2008-2009 The GameCube Linux Team * Copyright (C) 2008,2009 Albert Herranz */ #include <stddef.h> #include "stdio.h" #include "types.h" #include "io.h" #include "ops.h" #define EXI_CLK_32MHZ 5 #define EXI_CSR 0x00 #define EXI_CSR_CLKMASK (0x7<<4) #define EXI_CSR_CLK_32MHZ (EXI_CLK_32MHZ<<4) #define EXI_CSR_CSMASK (0x7<<7) #define EXI_CSR_CS_0 (0x1<<7) /* Chip Select 001 */ #define EXI_CR 0x0c #define EXI_CR_TSTART (1<<0) #define EXI_CR_WRITE (1<<2) #define EXI_CR_READ_WRITE (2<<2) #define EXI_CR_TLEN(len) (((len)-1)<<4) #define EXI_DATA 0x10 /* virtual address base for input/output, retrieved from device tree */ static void *ug_io_base; static u32 ug_io_transaction(u32 in) { u32 *csr_reg = ug_io_base + EXI_CSR; u32 *data_reg = ug_io_base + EXI_DATA; u32 *cr_reg = ug_io_base + EXI_CR; u32 csr, data, cr; /* select */ csr = EXI_CSR_CLK_32MHZ | EXI_CSR_CS_0; out_be32(csr_reg, csr); /* read/write */ data = in; out_be32(data_reg, data); cr = EXI_CR_TLEN(2) | EXI_CR_READ_WRITE | EXI_CR_TSTART; out_be32(cr_reg, cr); while (in_be32(cr_reg) & EXI_CR_TSTART) barrier(); /* deselect */ out_be32(csr_reg, 0); data = in_be32(data_reg); return data; } static int ug_is_txfifo_ready(void) { return ug_io_transaction(0xc0000000) & 0x04000000; } static void ug_raw_putc(char ch) { ug_io_transaction(0xb0000000 | (ch << 20)); } static void ug_putc(char ch) { int count = 16; if (!ug_io_base) return; while (!ug_is_txfifo_ready() && count--) barrier(); if (count >= 0) ug_raw_putc(ch); } void ug_console_write(const char *buf, int len) { char *b = (char *)buf; while (len--) { if (*b == '\n') ug_putc('\r'); ug_putc(*b++); } } static int ug_is_adapter_present(void) { if (!ug_io_base) return 0; return ug_io_transaction(0x90000000) == 0x04700000; } static void *ug_grab_exi_io_base(void) { u32 v; void *devp; devp = find_node_by_compatible(NULL, "nintendo,flipper-exi"); if (devp == NULL) goto err_out; if (getprop(devp, "virtual-reg", &v, sizeof(v)) != sizeof(v)) goto err_out; return (void *)v; err_out: return NULL; } void *ug_probe(void) { void *exi_io_base; int i; exi_io_base = ug_grab_exi_io_base(); if (!exi_io_base) return NULL; /* look for a usbgecko on memcard slots A and B */ for (i = 0; i < 2; i++) { ug_io_base = exi_io_base + 0x14 * i; if (ug_is_adapter_present()) break; } if (i == 2) ug_io_base = NULL; return ug_io_base; }
linux-master
arch/powerpc/boot/ugecon.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (C) Paul Mackerras 1997. * * Updates for PPC64 by Todd Inglett, Dave Engebretsen & Peter Bergner. */ #include <stdarg.h> #include <stddef.h> #include "elf.h" #include "page.h" #include "string.h" #include "stdio.h" #include "ops.h" #include "reg.h" struct addr_range { void *addr; unsigned long size; }; #undef DEBUG static struct addr_range prep_kernel(void) { char elfheader[256]; unsigned char *vmlinuz_addr = (unsigned char *)_vmlinux_start; unsigned long vmlinuz_size = _vmlinux_end - _vmlinux_start; void *addr = 0; struct elf_info ei; long len; int uncompressed_image = 0; len = partial_decompress(vmlinuz_addr, vmlinuz_size, elfheader, sizeof(elfheader), 0); /* assume uncompressed data if -1 is returned */ if (len == -1) { uncompressed_image = 1; memcpy(elfheader, vmlinuz_addr, sizeof(elfheader)); printf("No valid compressed data found, assume uncompressed data\n\r"); } if (!parse_elf64(elfheader, &ei) && !parse_elf32(elfheader, &ei)) fatal("Error: not a valid PPC32 or PPC64 ELF file!\n\r"); if (platform_ops.image_hdr) platform_ops.image_hdr(elfheader); /* We need to alloc the memsize: gzip will expand the kernel * text/data, then possible rubbish we don't care about. But * the kernel bss must be claimed (it will be zero'd by the * kernel itself) */ printf("Allocating 0x%lx bytes for kernel...\n\r", ei.memsize); if (platform_ops.vmlinux_alloc) { addr = platform_ops.vmlinux_alloc(ei.memsize); } else { /* * Check if the kernel image (without bss) would overwrite the * bootwrapper. The device tree has been moved in fdt_init() * to an area allocated with malloc() (somewhere past _end). */ if ((unsigned long)_start < ei.loadsize) fatal("Insufficient memory for kernel at address 0!" " (_start=%p, uncompressed size=%08lx)\n\r", _start, ei.loadsize); if ((unsigned long)_end < ei.memsize) fatal("The final kernel image would overwrite the " "device tree\n\r"); } if (uncompressed_image) { memcpy(addr, vmlinuz_addr + ei.elfoffset, ei.loadsize); printf("0x%lx bytes of uncompressed data copied\n\r", ei.loadsize); goto out; } /* Finally, decompress the kernel */ printf("Decompressing (0x%p <- 0x%p:0x%p)...\n\r", addr, vmlinuz_addr, vmlinuz_addr+vmlinuz_size); len = partial_decompress(vmlinuz_addr, vmlinuz_size, addr, ei.loadsize, ei.elfoffset); if (len < 0) fatal("Decompression failed with error code %ld\n\r", len); if (len != ei.loadsize) fatal("Decompression error: got 0x%lx bytes, expected 0x%lx.\n\r", len, ei.loadsize); printf("Done! Decompressed 0x%lx bytes\n\r", len); out: flush_cache(addr, ei.loadsize); return (struct addr_range){addr, ei.memsize}; } static struct addr_range prep_initrd(struct addr_range vmlinux, void *chosen, unsigned long initrd_addr, unsigned long initrd_size) { /* If we have an image attached to us, it overrides anything * supplied by the loader. */ if (&_initrd_end > &_initrd_start) { printf("Attached initrd image at 0x%p-0x%p\n\r", _initrd_start, _initrd_end); initrd_addr = (unsigned long)_initrd_start; initrd_size = _initrd_end - _initrd_start; } else if (initrd_size > 0) { printf("Using loader supplied ramdisk at 0x%lx-0x%lx\n\r", initrd_addr, initrd_addr + initrd_size); } /* If there's no initrd at all, we're done */ if (! initrd_size) return (struct addr_range){0, 0}; /* * If the initrd is too low it will be clobbered when the * kernel relocates to its final location. In this case, * allocate a safer place and move it. */ if (initrd_addr < vmlinux.size) { void *old_addr = (void *)initrd_addr; printf("Allocating 0x%lx bytes for initrd ...\n\r", initrd_size); initrd_addr = (unsigned long)malloc(initrd_size); if (! initrd_addr) fatal("Can't allocate memory for initial " "ramdisk !\n\r"); printf("Relocating initrd 0x%lx <- 0x%p (0x%lx bytes)\n\r", initrd_addr, old_addr, initrd_size); memmove((void *)initrd_addr, old_addr, initrd_size); } printf("initrd head: 0x%lx\n\r", *((unsigned long *)initrd_addr)); /* Tell the kernel initrd address via device tree */ setprop_val(chosen, "linux,initrd-start", (u32)(initrd_addr)); setprop_val(chosen, "linux,initrd-end", (u32)(initrd_addr+initrd_size)); return (struct addr_range){(void *)initrd_addr, initrd_size}; } #ifdef __powerpc64__ static void prep_esm_blob(struct addr_range vmlinux, void *chosen) { unsigned long esm_blob_addr, esm_blob_size; /* Do we have an ESM (Enter Secure Mode) blob? */ if (&_esm_blob_end <= &_esm_blob_start) return; printf("Attached ESM blob at 0x%p-0x%p\n\r", _esm_blob_start, _esm_blob_end); esm_blob_addr = (unsigned long)_esm_blob_start; esm_blob_size = _esm_blob_end - _esm_blob_start; /* * If the ESM blob is too low it will be clobbered when the * kernel relocates to its final location. In this case, * allocate a safer place and move it. */ if (esm_blob_addr < vmlinux.size) { void *old_addr = (void *)esm_blob_addr; printf("Allocating 0x%lx bytes for esm_blob ...\n\r", esm_blob_size); esm_blob_addr = (unsigned long)malloc(esm_blob_size); if (!esm_blob_addr) fatal("Can't allocate memory for ESM blob !\n\r"); printf("Relocating ESM blob 0x%lx <- 0x%p (0x%lx bytes)\n\r", esm_blob_addr, old_addr, esm_blob_size); memmove((void *)esm_blob_addr, old_addr, esm_blob_size); } /* Tell the kernel ESM blob address via device tree. */ setprop_val(chosen, "linux,esm-blob-start", (u32)(esm_blob_addr)); setprop_val(chosen, "linux,esm-blob-end", (u32)(esm_blob_addr + esm_blob_size)); } #else static inline void prep_esm_blob(struct addr_range vmlinux, void *chosen) { } #endif /* A buffer that may be edited by tools operating on a zImage binary so as to * edit the command line passed to vmlinux (by setting /chosen/bootargs). * The buffer is put in it's own section so that tools may locate it easier. */ static char cmdline[BOOT_COMMAND_LINE_SIZE] __attribute__((__section__("__builtin_cmdline"))); static void prep_cmdline(void *chosen) { unsigned int getline_timeout = 5000; int v; int n; /* Wait-for-input time */ n = getprop(chosen, "linux,cmdline-timeout", &v, sizeof(v)); if (n == sizeof(v)) getline_timeout = v; if (cmdline[0] == '\0') getprop(chosen, "bootargs", cmdline, BOOT_COMMAND_LINE_SIZE-1); printf("\n\rLinux/PowerPC load: %s", cmdline); /* If possible, edit the command line */ if (console_ops.edit_cmdline && getline_timeout) console_ops.edit_cmdline(cmdline, BOOT_COMMAND_LINE_SIZE, getline_timeout); printf("\n\r"); /* Put the command line back into the devtree for the kernel */ setprop_str(chosen, "bootargs", cmdline); } struct platform_ops platform_ops; struct dt_ops dt_ops; struct console_ops console_ops; struct loader_info loader_info; void start(void) { struct addr_range vmlinux, initrd; kernel_entry_t kentry; unsigned long ft_addr = 0; void *chosen; /* Do this first, because malloc() could clobber the loader's * command line. Only use the loader command line if a * built-in command line wasn't set by an external tool */ if ((loader_info.cmdline_len > 0) && (cmdline[0] == '\0')) memmove(cmdline, loader_info.cmdline, min(loader_info.cmdline_len, BOOT_COMMAND_LINE_SIZE-1)); if (console_ops.open && (console_ops.open() < 0)) exit(); if (platform_ops.fixups) platform_ops.fixups(); printf("\n\rzImage starting: loaded at 0x%p (sp: 0x%p)\n\r", _start, get_sp()); /* Ensure that the device tree has a /chosen node */ chosen = finddevice("/chosen"); if (!chosen) chosen = create_node(NULL, "chosen"); vmlinux = prep_kernel(); initrd = prep_initrd(vmlinux, chosen, loader_info.initrd_addr, loader_info.initrd_size); prep_esm_blob(vmlinux, chosen); prep_cmdline(chosen); printf("Finalizing device tree..."); if (dt_ops.finalize) ft_addr = dt_ops.finalize(); if (ft_addr) printf(" flat tree at 0x%lx\n\r", ft_addr); else printf(" using OF tree (promptr=%p)\n\r", loader_info.promptr); if (console_ops.close) console_ops.close(); kentry = (kernel_entry_t) vmlinux.addr; if (ft_addr) { if(platform_ops.kentry) platform_ops.kentry(ft_addr, vmlinux.addr); else kentry(ft_addr, 0, NULL); } else kentry((unsigned long)initrd.addr, initrd.size, loader_info.promptr); /* console closed so printf in fatal below may not work */ fatal("Error: Linux kernel returned to zImage boot wrapper!\n\r"); }
linux-master
arch/powerpc/boot/main.c
// SPDX-License-Identifier: GPL-2.0-only /* * Old U-boot compatibility for Sam440ep based off bamboo.c code * original copyrights below * * Author: Josh Boyer <[email protected]> * * Copyright 2007 IBM Corporation * * Based on cuboot-ebony.c * * Modified from cuboot-bamboo.c for sam440ep: * Copyright 2008 Giuseppe Coviello <[email protected]> */ #include "ops.h" #include "stdio.h" #include "44x.h" #include "4xx.h" #include "cuboot.h" #define TARGET_4xx #define TARGET_44x #include "ppcboot.h" static bd_t bd; static void sam440ep_fixups(void) { unsigned long sysclk = 66666666; ibm440ep_fixup_clocks(sysclk, 11059200, 25000000); ibm4xx_sdram_fixup_memsize(); ibm4xx_quiesce_eth((u32 *)0xef600e00, (u32 *)0xef600f00); dt_fixup_mac_addresses(&bd.bi_enetaddr, &bd.bi_enet1addr); } void platform_init(unsigned long r3, unsigned long r4, unsigned long r5, unsigned long r6, unsigned long r7) { CUBOOT_INIT(); platform_ops.fixups = sam440ep_fixups; platform_ops.exit = ibm44x_dbcr_reset; fdt_init(_dtb_start); serial_console_init(); }
linux-master
arch/powerpc/boot/cuboot-sam440ep.c
// SPDX-License-Identifier: GPL-2.0-only /* * Embedded Planet EP88xC with PlanetCore firmware * * Author: Scott Wood <[email protected]> * * Copyright (c) 2007 Freescale Semiconductor, Inc. */ #include "ops.h" #include "stdio.h" #include "planetcore.h" #include "mpc8xx.h" static char *table; static u64 mem_size; static void platform_fixups(void) { u64 val; dt_fixup_memory(0, mem_size); planetcore_set_mac_addrs(table); if (!planetcore_get_decimal(table, PLANETCORE_KEY_CRYSTAL_HZ, &val)) { printf("No PlanetCore crystal frequency key.\r\n"); return; } mpc885_fixup_clocks(val); } void platform_init(unsigned long r3, unsigned long r4, unsigned long r5, unsigned long r6, unsigned long r7) { table = (char *)r3; planetcore_prepare_table(table); if (!planetcore_get_decimal(table, PLANETCORE_KEY_MB_RAM, &mem_size)) return; mem_size *= 1024 * 1024; simple_alloc_init(_end, mem_size - (unsigned long)_end, 32, 64); fdt_init(_dtb_start); planetcore_set_stdout_path(table); serial_console_init(); platform_ops.fixups = platform_fixups; }
linux-master
arch/powerpc/boot/ep88xc.c
// SPDX-License-Identifier: GPL-2.0-only /* * Embedded Planet EP8248E with PlanetCore firmware * * Author: Scott Wood <[email protected]> * * Copyright (c) 2007 Freescale Semiconductor, Inc. */ #include "ops.h" #include "stdio.h" #include "planetcore.h" #include "pq2.h" static char *table; static u64 mem_size; #include <io.h> static void platform_fixups(void) { u64 val; dt_fixup_memory(0, mem_size); planetcore_set_mac_addrs(table); if (!planetcore_get_decimal(table, PLANETCORE_KEY_CRYSTAL_HZ, &val)) { printf("No PlanetCore crystal frequency key.\r\n"); return; } pq2_fixup_clocks(val); } void platform_init(unsigned long r3, unsigned long r4, unsigned long r5, unsigned long r6, unsigned long r7) { table = (char *)r3; planetcore_prepare_table(table); if (!planetcore_get_decimal(table, PLANETCORE_KEY_MB_RAM, &mem_size)) return; mem_size *= 1024 * 1024; simple_alloc_init(_end, mem_size - (unsigned long)_end, 32, 64); fdt_init(_dtb_start); planetcore_set_stdout_path(table); serial_console_init(); platform_ops.fixups = platform_fixups; }
linux-master
arch/powerpc/boot/ep8248e.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (c) 2008 PIKA Technologies * Sean MacLennan <[email protected]> */ #include "ops.h" #include "4xx.h" #include "cuboot.h" #include "stdio.h" #define TARGET_4xx #define TARGET_44x #include "ppcboot.h" static bd_t bd; static void warp_fixups(void) { ibm440ep_fixup_clocks(66000000, 11059200, 50000000); ibm4xx_sdram_fixup_memsize(); ibm4xx_fixup_ebc_ranges("/plb/opb/ebc"); dt_fixup_mac_address_by_alias("ethernet0", bd.bi_enetaddr); } void platform_init(unsigned long r3, unsigned long r4, unsigned long r5, unsigned long r6, unsigned long r7) { CUBOOT_INIT(); platform_ops.fixups = warp_fixups; platform_ops.exit = ibm44x_dbcr_reset; fdt_init(_dtb_start); serial_console_init(); }
linux-master
arch/powerpc/boot/cuboot-warp.c
// SPDX-License-Identifier: GPL-2.0-or-later #include <stddef.h> #include "stdio.h" #include "types.h" #include "io.h" #include "ops.h" BSS_STACK(8192); void platform_init(unsigned long r3, unsigned long r4, unsigned long r5) { unsigned long heapsize = 16*1024*1024 - (unsigned long)_end; /* * Disable interrupts and turn off MSR_RI, since we'll * shortly be overwriting the interrupt vectors. */ __asm__ volatile("mtmsrd %0,1" : : "r" (0)); simple_alloc_init(_end, heapsize, 32, 64); fdt_init(_dtb_start); serial_console_init(); }
linux-master
arch/powerpc/boot/microwatt.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * arch/powerpc/boot/gamecube.c * * Nintendo GameCube bootwrapper support * Copyright (C) 2004-2009 The GameCube Linux Team * Copyright (C) 2008,2009 Albert Herranz */ #include <stddef.h> #include "stdio.h" #include "types.h" #include "io.h" #include "ops.h" #include "ugecon.h" BSS_STACK(8192); void platform_init(unsigned long r3, unsigned long r4, unsigned long r5) { u32 heapsize = 16*1024*1024 - (u32)_end; simple_alloc_init(_end, heapsize, 32, 64); fdt_init(_dtb_start); if (ug_probe()) console_ops.write = ug_console_write; }
linux-master
arch/powerpc/boot/gamecube.c
// SPDX-License-Identifier: GPL-2.0-only /* * Old U-boot compatibility for PowerQUICC II * (a.k.a. 82xx with CPM, not the 8240 family of chips) * * Author: Scott Wood <[email protected]> * * Copyright (c) 2007 Freescale Semiconductor, Inc. */ #include "ops.h" #include "stdio.h" #include "cuboot.h" #include "io.h" #include "fsl-soc.h" #define TARGET_CPM2 #define TARGET_HAS_ETH1 #include "ppcboot.h" static bd_t bd; struct cs_range { u32 csnum; u32 base; /* must be zero */ u32 addr; u32 size; }; struct pci_range { u32 flags; u32 pci_addr[2]; u32 phys_addr; u32 size[2]; }; struct cs_range cs_ranges_buf[MAX_PROP_LEN / sizeof(struct cs_range)]; struct pci_range pci_ranges_buf[MAX_PROP_LEN / sizeof(struct pci_range)]; /* Different versions of u-boot put the BCSR in different places, and * some don't set up the PCI PIC at all, so we assume the device tree is * sane and update the BRx registers appropriately. * * For any node defined as compatible with fsl,pq2-localbus, * #address/#size must be 2/1 for the localbus, and 1/1 for the parent bus. * Ranges must be for whole chip selects. */ static void update_cs_ranges(void) { void *bus_node, *parent_node; u32 *ctrl_addr; unsigned long ctrl_size; u32 naddr, nsize; int len; int i; bus_node = finddevice("/localbus"); if (!bus_node || !dt_is_compatible(bus_node, "fsl,pq2-localbus")) return; dt_get_reg_format(bus_node, &naddr, &nsize); if (naddr != 2 || nsize != 1) goto err; parent_node = get_parent(bus_node); if (!parent_node) goto err; dt_get_reg_format(parent_node, &naddr, &nsize); if (naddr != 1 || nsize != 1) goto err; if (!dt_xlate_reg(bus_node, 0, (unsigned long *)&ctrl_addr, &ctrl_size)) goto err; len = getprop(bus_node, "ranges", cs_ranges_buf, sizeof(cs_ranges_buf)); for (i = 0; i < len / sizeof(struct cs_range); i++) { u32 base, option; int cs = cs_ranges_buf[i].csnum; if (cs >= ctrl_size / 8) goto err; if (cs_ranges_buf[i].base != 0) goto err; base = in_be32(&ctrl_addr[cs * 2]); /* If CS is already valid, use the existing flags. * Otherwise, guess a sane default. */ if (base & 1) { base &= 0x7fff; option = in_be32(&ctrl_addr[cs * 2 + 1]) & 0x7fff; } else { base = 0x1801; option = 0x10; } out_be32(&ctrl_addr[cs * 2], 0); out_be32(&ctrl_addr[cs * 2 + 1], option | ~(cs_ranges_buf[i].size - 1)); out_be32(&ctrl_addr[cs * 2], base | cs_ranges_buf[i].addr); } return; err: printf("Bad /localbus node\r\n"); } /* Older u-boots don't set PCI up properly. Update the hardware to match * the device tree. The prefetch mem region and non-prefetch mem region * must be contiguous in the host bus. As required by the PCI binding, * PCI #addr/#size must be 3/2. The parent bus must be 1/1. Only * 32-bit PCI is supported. All three region types (prefetchable mem, * non-prefetchable mem, and I/O) must be present. */ static void fixup_pci(void) { struct pci_range *mem = NULL, *mmio = NULL, *io = NULL, *mem_base = NULL; u32 *pci_regs[3]; u8 *soc_regs; int i, len; void *node, *parent_node; u32 naddr, nsize, mem_pow2, mem_mask; node = finddevice("/pci"); if (!node || !dt_is_compatible(node, "fsl,pq2-pci")) return; for (i = 0; i < 3; i++) if (!dt_xlate_reg(node, i, (unsigned long *)&pci_regs[i], NULL)) goto err; soc_regs = (u8 *)fsl_get_immr(); if (!soc_regs) goto unhandled; dt_get_reg_format(node, &naddr, &nsize); if (naddr != 3 || nsize != 2) goto err; parent_node = get_parent(node); if (!parent_node) goto err; dt_get_reg_format(parent_node, &naddr, &nsize); if (naddr != 1 || nsize != 1) goto unhandled; len = getprop(node, "ranges", pci_ranges_buf, sizeof(pci_ranges_buf)); for (i = 0; i < len / sizeof(struct pci_range); i++) { u32 flags = pci_ranges_buf[i].flags & 0x43000000; if (flags == 0x42000000) mem = &pci_ranges_buf[i]; else if (flags == 0x02000000) mmio = &pci_ranges_buf[i]; else if (flags == 0x01000000) io = &pci_ranges_buf[i]; } if (!mem || !mmio || !io) goto unhandled; if (mem->size[1] != mmio->size[1]) goto unhandled; if (mem->size[1] & (mem->size[1] - 1)) goto unhandled; if (io->size[1] & (io->size[1] - 1)) goto unhandled; if (mem->phys_addr + mem->size[1] == mmio->phys_addr) mem_base = mem; else if (mmio->phys_addr + mmio->size[1] == mem->phys_addr) mem_base = mmio; else goto unhandled; out_be32(&pci_regs[1][0], mem_base->phys_addr | 1); out_be32(&pci_regs[2][0], ~(mem->size[1] + mmio->size[1] - 1)); out_be32(&pci_regs[1][1], io->phys_addr | 1); out_be32(&pci_regs[2][1], ~(io->size[1] - 1)); out_le32(&pci_regs[0][0], mem->pci_addr[1] >> 12); out_le32(&pci_regs[0][2], mem->phys_addr >> 12); out_le32(&pci_regs[0][4], (~(mem->size[1] - 1) >> 12) | 0xa0000000); out_le32(&pci_regs[0][6], mmio->pci_addr[1] >> 12); out_le32(&pci_regs[0][8], mmio->phys_addr >> 12); out_le32(&pci_regs[0][10], (~(mmio->size[1] - 1) >> 12) | 0x80000000); out_le32(&pci_regs[0][12], io->pci_addr[1] >> 12); out_le32(&pci_regs[0][14], io->phys_addr >> 12); out_le32(&pci_regs[0][16], (~(io->size[1] - 1) >> 12) | 0xc0000000); /* Inbound translation */ out_le32(&pci_regs[0][58], 0); out_le32(&pci_regs[0][60], 0); mem_pow2 = 1 << (__ilog2_u32(bd.bi_memsize - 1) + 1); mem_mask = ~(mem_pow2 - 1) >> 12; out_le32(&pci_regs[0][62], 0xa0000000 | mem_mask); /* If PCI is disabled, drive RST high to enable. */ if (!(in_le32(&pci_regs[0][32]) & 1)) { /* Tpvrh (Power valid to RST# high) 100 ms */ udelay(100000); out_le32(&pci_regs[0][32], 1); /* Trhfa (RST# high to first cfg access) 2^25 clocks */ udelay(1020000); } /* Enable bus master and memory access */ out_le32(&pci_regs[0][64], 0x80000004); out_le32(&pci_regs[0][65], in_le32(&pci_regs[0][65]) | 6); /* Park the bus on PCI, and elevate PCI's arbitration priority, * as required by section 9.6 of the user's manual. */ out_8(&soc_regs[0x10028], 3); out_be32((u32 *)&soc_regs[0x1002c], 0x01236745); return; err: printf("Bad PCI node -- using existing firmware setup.\r\n"); return; unhandled: printf("Unsupported PCI node -- using existing firmware setup.\r\n"); } static void pq2_platform_fixups(void) { void *node; dt_fixup_memory(bd.bi_memstart, bd.bi_memsize); dt_fixup_mac_addresses(bd.bi_enetaddr, bd.bi_enet1addr); dt_fixup_cpu_clocks(bd.bi_intfreq, bd.bi_busfreq / 4, bd.bi_busfreq); node = finddevice("/soc/cpm"); if (node) setprop(node, "clock-frequency", &bd.bi_cpmfreq, 4); node = finddevice("/soc/cpm/brg"); if (node) setprop(node, "clock-frequency", &bd.bi_brgfreq, 4); update_cs_ranges(); fixup_pci(); } void platform_init(unsigned long r3, unsigned long r4, unsigned long r5, unsigned long r6, unsigned long r7) { CUBOOT_INIT(); fdt_init(_dtb_start); serial_console_init(); platform_ops.fixups = pq2_platform_fixups; }
linux-master
arch/powerpc/boot/cuboot-pq2.c
// SPDX-License-Identifier: GPL-2.0-only /* * Old U-boot compatibility for 85xx * * Author: Scott Wood <[email protected]> * * Copyright (c) 2007 Freescale Semiconductor, Inc. */ #include "ops.h" #include "stdio.h" #include "cuboot.h" #define TARGET_85xx #define TARGET_CPM2 #include "ppcboot.h" static bd_t bd; static void platform_fixups(void) { void *devp; dt_fixup_memory(bd.bi_memstart, bd.bi_memsize); dt_fixup_mac_address_by_alias("ethernet0", bd.bi_enetaddr); dt_fixup_mac_address_by_alias("ethernet1", bd.bi_enet1addr); dt_fixup_mac_address_by_alias("ethernet2", bd.bi_enet2addr); dt_fixup_cpu_clocks(bd.bi_intfreq, bd.bi_busfreq / 8, bd.bi_busfreq); /* Unfortunately, the specific model number is encoded in the * soc node name in existing dts files -- once that is fixed, * this can do a simple path lookup. */ devp = find_node_by_devtype(NULL, "soc"); if (devp) { void *serial = NULL; setprop(devp, "bus-frequency", &bd.bi_busfreq, sizeof(bd.bi_busfreq)); while ((serial = find_node_by_devtype(serial, "serial"))) { if (get_parent(serial) != devp) continue; setprop(serial, "clock-frequency", &bd.bi_busfreq, sizeof(bd.bi_busfreq)); } } devp = find_node_by_compatible(NULL, "fsl,cpm2-brg"); if (devp) setprop(devp, "clock-frequency", &bd.bi_brgfreq, sizeof(bd.bi_brgfreq)); } void platform_init(unsigned long r3, unsigned long r4, unsigned long r5, unsigned long r6, unsigned long r7) { CUBOOT_INIT(); fdt_init(_dtb_start); serial_console_init(); platform_ops.fixups = platform_fixups; }
linux-master
arch/powerpc/boot/cuboot-85xx-cpm2.c
// SPDX-License-Identifier: GPL-2.0-only /* * Old U-boot compatibility for 824x * * Copyright (c) 2007 Freescale Semiconductor, Inc. */ #include "ops.h" #include "stdio.h" #include "cuboot.h" #define TARGET_824x #include "ppcboot.h" static bd_t bd; static void platform_fixups(void) { void *soc; dt_fixup_memory(bd.bi_memstart, bd.bi_memsize); dt_fixup_mac_addresses(bd.bi_enetaddr); dt_fixup_cpu_clocks(bd.bi_intfreq, bd.bi_busfreq / 4, bd.bi_busfreq); soc = find_node_by_devtype(NULL, "soc"); if (soc) { void *serial = NULL; setprop(soc, "bus-frequency", &bd.bi_busfreq, sizeof(bd.bi_busfreq)); while ((serial = find_node_by_devtype(serial, "serial"))) { if (get_parent(serial) != soc) continue; setprop(serial, "clock-frequency", &bd.bi_busfreq, sizeof(bd.bi_busfreq)); } } } void platform_init(unsigned long r3, unsigned long r4, unsigned long r5, unsigned long r6, unsigned long r7) { CUBOOT_INIT(); fdt_init(_dtb_start); serial_console_init(); platform_ops.fixups = platform_fixups; }
linux-master
arch/powerpc/boot/cuboot-824x.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (C) Paul Mackerras 1997. */ #include <stdarg.h> #include <stddef.h> #include "types.h" #include "elf.h" #include "string.h" #include "stdio.h" #include "page.h" #include "ops.h" #include "of.h" /* Value picked to match that used by yaboot */ #define PROG_START 0x01400000 /* only used on 64-bit systems */ #define RAM_END (512<<20) /* Fixme: use OF */ #define ONE_MB 0x100000 static unsigned long claim_base; void epapr_platform_init(unsigned long r3, unsigned long r4, unsigned long r5, unsigned long r6, unsigned long r7); static void *of_try_claim(unsigned long size) { unsigned long addr = 0; if (claim_base == 0) claim_base = _ALIGN_UP((unsigned long)_end, ONE_MB); for(; claim_base < RAM_END; claim_base += ONE_MB) { #ifdef DEBUG printf(" trying: 0x%08lx\n\r", claim_base); #endif addr = (unsigned long) of_claim(claim_base, size, 0); if (addr != PROM_ERROR) break; } if (addr == 0) return NULL; claim_base = PAGE_ALIGN(claim_base + size); return (void *)addr; } static void of_image_hdr(const void *hdr) { const Elf64_Ehdr *elf64 = hdr; if (elf64->e_ident[EI_CLASS] == ELFCLASS64) { /* * Maintain a "magic" minimum address. This keeps some older * firmware platforms running. */ if (claim_base < PROG_START) claim_base = PROG_START; } } static void of_platform_init(unsigned long a1, unsigned long a2, void *promptr) { platform_ops.image_hdr = of_image_hdr; platform_ops.malloc = of_try_claim; platform_ops.exit = of_exit; platform_ops.vmlinux_alloc = of_vmlinux_alloc; dt_ops.finddevice = of_finddevice; dt_ops.getprop = of_getprop; dt_ops.setprop = of_setprop; of_console_init(); of_init(promptr); loader_info.promptr = promptr; if (a1 && a2 && a2 != 0xdeadbeef) { loader_info.initrd_addr = a1; loader_info.initrd_size = a2; } } void platform_init(unsigned long r3, unsigned long r4, unsigned long r5, unsigned long r6, unsigned long r7) { /* Detect OF vs. ePAPR boot */ if (r5) of_platform_init(r3, r4, (void *)r5); else epapr_platform_init(r3, r4, r5, r6, r7); }
linux-master
arch/powerpc/boot/of.c
// SPDX-License-Identifier: GPL-2.0 /* * Implement primitive realloc(3) functionality. * * Author: Mark A. Greer <[email protected]> * * 2006 (c) MontaVista, Software, Inc. */ #include <stddef.h> #include "types.h" #include "page.h" #include "string.h" #include "ops.h" #define ENTRY_BEEN_USED 0x01 #define ENTRY_IN_USE 0x02 static struct alloc_info { unsigned long flags; unsigned long base; unsigned long size; } *alloc_tbl; static unsigned long tbl_entries; static unsigned long alloc_min; static unsigned long next_base; static unsigned long space_left; /* * First time an entry is used, its base and size are set. * An entry can be freed and re-malloc'd but its base & size don't change. * Should be smart enough for needs of bootwrapper. */ static void *simple_malloc(unsigned long size) { unsigned long i; struct alloc_info *p = alloc_tbl; if (size == 0) goto err_out; size = _ALIGN_UP(size, alloc_min); for (i=0; i<tbl_entries; i++, p++) if (!(p->flags & ENTRY_BEEN_USED)) { /* never been used */ if (size <= space_left) { p->base = next_base; p->size = size; p->flags = ENTRY_BEEN_USED | ENTRY_IN_USE; next_base += size; space_left -= size; return (void *)p->base; } goto err_out; /* not enough space left */ } /* reuse an entry keeping same base & size */ else if (!(p->flags & ENTRY_IN_USE) && (size <= p->size)) { p->flags |= ENTRY_IN_USE; return (void *)p->base; } err_out: return NULL; } static struct alloc_info *simple_find_entry(void *ptr) { unsigned long i; struct alloc_info *p = alloc_tbl; for (i=0; i<tbl_entries; i++,p++) { if (!(p->flags & ENTRY_BEEN_USED)) break; if ((p->flags & ENTRY_IN_USE) && (p->base == (unsigned long)ptr)) return p; } return NULL; } static void simple_free(void *ptr) { struct alloc_info *p = simple_find_entry(ptr); if (p != NULL) p->flags &= ~ENTRY_IN_USE; } /* * Change size of area pointed to by 'ptr' to 'size'. * If 'ptr' is NULL, then its a malloc(). If 'size' is 0, then its a free(). * 'ptr' must be NULL or a pointer to a non-freed area previously returned by * simple_realloc() or simple_malloc(). */ static void *simple_realloc(void *ptr, unsigned long size) { struct alloc_info *p; void *new; if (size == 0) { simple_free(ptr); return NULL; } if (ptr == NULL) return simple_malloc(size); p = simple_find_entry(ptr); if (p == NULL) /* ptr not from simple_malloc/simple_realloc */ return NULL; if (size <= p->size) /* fits in current block */ return ptr; new = simple_malloc(size); memcpy(new, ptr, p->size); simple_free(ptr); return new; } /* * Returns addr of first byte after heap so caller can see if it took * too much space. If so, change args & try again. */ void *simple_alloc_init(char *base, unsigned long heap_size, unsigned long granularity, unsigned long max_allocs) { unsigned long heap_base, tbl_size; heap_size = _ALIGN_UP(heap_size, granularity); alloc_min = granularity; tbl_entries = max_allocs; tbl_size = tbl_entries * sizeof(struct alloc_info); alloc_tbl = (struct alloc_info *)_ALIGN_UP((unsigned long)base, 8); memset(alloc_tbl, 0, tbl_size); heap_base = _ALIGN_UP((unsigned long)alloc_tbl + tbl_size, alloc_min); next_base = heap_base; space_left = heap_size; platform_ops.malloc = simple_malloc; platform_ops.free = simple_free; platform_ops.realloc = simple_realloc; return (void *)(heap_base + heap_size); }
linux-master
arch/powerpc/boot/simple_alloc.c
// SPDX-License-Identifier: GPL-2.0-only /* * Old U-boot compatibility for AmigaOne * * Author: Gerhard Pircher ([email protected]) * * Based on cuboot-83xx.c * Copyright (c) 2007 Freescale Semiconductor, Inc. */ #include "ops.h" #include "stdio.h" #include "cuboot.h" #include "ppcboot.h" static bd_t bd; static void platform_fixups(void) { dt_fixup_memory(bd.bi_memstart, bd.bi_memsize); dt_fixup_cpu_clocks(bd.bi_intfreq, bd.bi_busfreq / 4, bd.bi_busfreq); } void platform_init(unsigned long r3, unsigned long r4, unsigned long r5, unsigned long r6, unsigned long r7) { CUBOOT_INIT(); fdt_init(_dtb_start); serial_console_init(); platform_ops.fixups = platform_fixups; }
linux-master
arch/powerpc/boot/cuboot-amigaone.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * This file does the necessary interface mapping between the bootwrapper * device tree operations and the interface provided by shared source * files flatdevicetree.[ch]. * * Copyright 2007 David Gibson, IBM Corporation. */ #include <stddef.h> #include <stdio.h> #include <page.h> #include <libfdt.h> #include "ops.h" #define DEBUG 0 #define BAD_ERROR(err) (((err) < 0) \ && ((err) != -FDT_ERR_NOTFOUND) \ && ((err) != -FDT_ERR_EXISTS)) #define check_err(err) \ ({ \ if (BAD_ERROR(err) || ((err < 0) && DEBUG)) \ printf("%s():%d %s\n\r", __func__, __LINE__, \ fdt_strerror(err)); \ if (BAD_ERROR(err)) \ exit(); \ (err < 0) ? -1 : 0; \ }) #define offset_devp(off) \ ({ \ unsigned long _offset = (off); \ check_err(_offset) ? NULL : (void *)(_offset+1); \ }) #define devp_offset_find(devp) (((unsigned long)(devp))-1) #define devp_offset(devp) (devp ? ((unsigned long)(devp))-1 : 0) static void *fdt; static void *buf; /* = NULL */ #define EXPAND_GRANULARITY 1024 static void expand_buf(int minexpand) { int size = fdt_totalsize(fdt); int rc; size = _ALIGN(size + minexpand, EXPAND_GRANULARITY); buf = platform_ops.realloc(buf, size); if (!buf) fatal("Couldn't find %d bytes to expand device tree\n\r", size); rc = fdt_open_into(fdt, buf, size); if (rc != 0) fatal("Couldn't expand fdt into new buffer: %s\n\r", fdt_strerror(rc)); fdt = buf; } static void *fdt_wrapper_finddevice(const char *path) { return offset_devp(fdt_path_offset(fdt, path)); } static int fdt_wrapper_getprop(const void *devp, const char *name, void *buf, const int buflen) { const void *p; int len; p = fdt_getprop(fdt, devp_offset(devp), name, &len); if (!p) return check_err(len); memcpy(buf, p, min(len, buflen)); return len; } static int fdt_wrapper_setprop(const void *devp, const char *name, const void *buf, const int len) { int rc; rc = fdt_setprop(fdt, devp_offset(devp), name, buf, len); if (rc == -FDT_ERR_NOSPACE) { expand_buf(len + 16); rc = fdt_setprop(fdt, devp_offset(devp), name, buf, len); } return check_err(rc); } static int fdt_wrapper_del_node(const void *devp) { return fdt_del_node(fdt, devp_offset(devp)); } static void *fdt_wrapper_get_parent(const void *devp) { return offset_devp(fdt_parent_offset(fdt, devp_offset(devp))); } static void *fdt_wrapper_create_node(const void *devp, const char *name) { int offset; offset = fdt_add_subnode(fdt, devp_offset(devp), name); if (offset == -FDT_ERR_NOSPACE) { expand_buf(strlen(name) + 16); offset = fdt_add_subnode(fdt, devp_offset(devp), name); } return offset_devp(offset); } static void *fdt_wrapper_find_node_by_prop_value(const void *prev, const char *name, const char *val, int len) { int offset = fdt_node_offset_by_prop_value(fdt, devp_offset_find(prev), name, val, len); return offset_devp(offset); } static void *fdt_wrapper_find_node_by_compatible(const void *prev, const char *val) { int offset = fdt_node_offset_by_compatible(fdt, devp_offset_find(prev), val); return offset_devp(offset); } static char *fdt_wrapper_get_path(const void *devp, char *buf, int len) { int rc; rc = fdt_get_path(fdt, devp_offset(devp), buf, len); if (check_err(rc)) return NULL; return buf; } static unsigned long fdt_wrapper_finalize(void) { int rc; rc = fdt_pack(fdt); if (rc != 0) fatal("Couldn't pack flat tree: %s\n\r", fdt_strerror(rc)); return (unsigned long)fdt; } void fdt_init(void *blob) { int err; int bufsize; dt_ops.finddevice = fdt_wrapper_finddevice; dt_ops.getprop = fdt_wrapper_getprop; dt_ops.setprop = fdt_wrapper_setprop; dt_ops.get_parent = fdt_wrapper_get_parent; dt_ops.create_node = fdt_wrapper_create_node; dt_ops.find_node_by_prop_value = fdt_wrapper_find_node_by_prop_value; dt_ops.find_node_by_compatible = fdt_wrapper_find_node_by_compatible; dt_ops.del_node = fdt_wrapper_del_node; dt_ops.get_path = fdt_wrapper_get_path; dt_ops.finalize = fdt_wrapper_finalize; /* Make sure the dt blob is the right version and so forth */ fdt = blob; bufsize = fdt_totalsize(fdt) + EXPAND_GRANULARITY; buf = malloc(bufsize); if(!buf) fatal("malloc failed. can't relocate the device tree\n\r"); err = fdt_open_into(fdt, buf, bufsize); if (err != 0) fatal("fdt_init(): %s\n\r", fdt_strerror(err)); fdt = buf; }
linux-master
arch/powerpc/boot/libfdt-wrapper.c
// SPDX-License-Identifier: GPL-2.0 extern void epapr_platform_init(unsigned long r3, unsigned long r4, unsigned long r5, unsigned long r6, unsigned long r7); void platform_init(unsigned long r3, unsigned long r4, unsigned long r5, unsigned long r6, unsigned long r7) { epapr_platform_init(r3, r4, r5, r6, r7); }
linux-master
arch/powerpc/boot/epapr-wrapper.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Motload compatibility for the Emerson/Artesyn MVME7100 * * Copyright 2016 Elettra-Sincrotrone Trieste S.C.p.A. * * Author: Alessio Igor Bogani <[email protected]> */ #include "ops.h" #include "stdio.h" #include "cuboot.h" #define TARGET_86xx #define TARGET_HAS_ETH1 #define TARGET_HAS_ETH2 #define TARGET_HAS_ETH3 #include "ppcboot.h" static bd_t bd; BSS_STACK(16384); static void mvme7100_fixups(void) { void *devp; unsigned long busfreq = bd.bi_busfreq * 1000000; dt_fixup_cpu_clocks(bd.bi_intfreq * 1000000, busfreq / 4, busfreq); devp = finddevice("/soc@f1000000"); if (devp) setprop(devp, "bus-frequency", &busfreq, sizeof(busfreq)); devp = finddevice("/soc/serial@4500"); if (devp) setprop(devp, "clock-frequency", &busfreq, sizeof(busfreq)); dt_fixup_memory(bd.bi_memstart, bd.bi_memsize); dt_fixup_mac_address_by_alias("ethernet0", bd.bi_enetaddr); dt_fixup_mac_address_by_alias("ethernet1", bd.bi_enet1addr); dt_fixup_mac_address_by_alias("ethernet2", bd.bi_enet2addr); dt_fixup_mac_address_by_alias("ethernet3", bd.bi_enet3addr); } void platform_init(unsigned long r3, unsigned long r4, unsigned long r5, unsigned long r6, unsigned long r7) { CUBOOT_INIT(); fdt_init(_dtb_start); serial_console_init(); platform_ops.fixups = mvme7100_fixups; }
linux-master
arch/powerpc/boot/mvme7100.c
// SPDX-License-Identifier: GPL-2.0-only /* * Old U-boot compatibility for Sequoia * * Valentine Barshak <[email protected]> * Copyright 2007 MontaVista Software, Inc * * Based on Ebony code by David Gibson <[email protected]> * Copyright IBM Corporation, 2007 * * Based on Bamboo code by Josh Boyer <[email protected]> * Copyright IBM Corporation, 2007 */ #include <stdarg.h> #include <stddef.h> #include "types.h" #include "elf.h" #include "string.h" #include "stdio.h" #include "page.h" #include "ops.h" #include "dcr.h" #include "4xx.h" #include "44x.h" #include "cuboot.h" #define TARGET_4xx #define TARGET_44x #include "ppcboot.h" static bd_t bd; static void sequoia_fixups(void) { unsigned long sysclk = 33333333; ibm440ep_fixup_clocks(sysclk, 11059200, 50000000); ibm4xx_fixup_ebc_ranges("/plb/opb/ebc"); ibm4xx_denali_fixup_memsize(); dt_fixup_mac_address_by_alias("ethernet0", bd.bi_enetaddr); dt_fixup_mac_address_by_alias("ethernet1", bd.bi_enet1addr); } void platform_init(unsigned long r3, unsigned long r4, unsigned long r5, unsigned long r6, unsigned long r7) { CUBOOT_INIT(); platform_ops.fixups = sequoia_fixups; platform_ops.exit = ibm44x_dbcr_reset; fdt_init(_dtb_start); serial_console_init(); }
linux-master
arch/powerpc/boot/cuboot-sequoia.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (C) Paul Mackerras 1997. */ #include <stddef.h> #include "types.h" #include "elf.h" #include "string.h" #include "stdio.h" #include "page.h" #include "ops.h" #include "of.h" typedef u32 prom_arg_t; /* The following structure is used to communicate with open firmware. * All arguments in and out are in big endian format. */ struct prom_args { __be32 service; /* Address of service name string. */ __be32 nargs; /* Number of input arguments. */ __be32 nret; /* Number of output arguments. */ __be32 args[10]; /* Input/output arguments. */ }; #ifdef __powerpc64__ extern int prom(void *); #else static int (*prom) (void *); #endif void of_init(void *promptr) { #ifndef __powerpc64__ prom = (int (*)(void *))promptr; #endif } #define ADDR(x) (u32)(unsigned long)(x) int of_call_prom(const char *service, int nargs, int nret, ...) { int i; struct prom_args args; va_list list; args.service = cpu_to_be32(ADDR(service)); args.nargs = cpu_to_be32(nargs); args.nret = cpu_to_be32(nret); va_start(list, nret); for (i = 0; i < nargs; i++) args.args[i] = cpu_to_be32(va_arg(list, prom_arg_t)); va_end(list); for (i = 0; i < nret; i++) args.args[nargs+i] = 0; if (prom(&args) < 0) return PROM_ERROR; return (nret > 0) ? be32_to_cpu(args.args[nargs]) : 0; } static int of_call_prom_ret(const char *service, int nargs, int nret, prom_arg_t *rets, ...) { int i; struct prom_args args; va_list list; args.service = cpu_to_be32(ADDR(service)); args.nargs = cpu_to_be32(nargs); args.nret = cpu_to_be32(nret); va_start(list, rets); for (i = 0; i < nargs; i++) args.args[i] = cpu_to_be32(va_arg(list, prom_arg_t)); va_end(list); for (i = 0; i < nret; i++) args.args[nargs+i] = 0; if (prom(&args) < 0) return PROM_ERROR; if (rets != NULL) for (i = 1; i < nret; ++i) rets[i-1] = be32_to_cpu(args.args[nargs+i]); return (nret > 0) ? be32_to_cpu(args.args[nargs]) : 0; } /* returns true if s2 is a prefix of s1 */ static int string_match(const char *s1, const char *s2) { for (; *s2; ++s2) if (*s1++ != *s2) return 0; return 1; } /* * Older OF's require that when claiming a specific range of addresses, * we claim the physical space in the /memory node and the virtual * space in the chosen mmu node, and then do a map operation to * map virtual to physical. */ static int need_map = -1; static ihandle chosen_mmu; static ihandle memory; static int check_of_version(void) { phandle oprom, chosen; char version[64]; oprom = of_finddevice("/openprom"); if (oprom == (phandle) -1) return 0; if (of_getprop(oprom, "model", version, sizeof(version)) <= 0) return 0; version[sizeof(version)-1] = 0; printf("OF version = '%s'\r\n", version); if (!string_match(version, "Open Firmware, 1.") && !string_match(version, "FirmWorks,3.")) return 0; chosen = of_finddevice("/chosen"); if (chosen == (phandle) -1) { chosen = of_finddevice("/chosen@0"); if (chosen == (phandle) -1) { printf("no chosen\n"); return 0; } } if (of_getprop(chosen, "mmu", &chosen_mmu, sizeof(chosen_mmu)) <= 0) { printf("no mmu\n"); return 0; } memory = of_call_prom("open", 1, 1, "/memory"); if (memory == PROM_ERROR) { memory = of_call_prom("open", 1, 1, "/memory@0"); if (memory == PROM_ERROR) { printf("no memory node\n"); return 0; } } printf("old OF detected\r\n"); return 1; } unsigned int of_claim(unsigned long virt, unsigned long size, unsigned long align) { int ret; prom_arg_t result; if (need_map < 0) need_map = check_of_version(); if (align || !need_map) return of_call_prom("claim", 3, 1, virt, size, align); ret = of_call_prom_ret("call-method", 5, 2, &result, "claim", memory, align, size, virt); if (ret != 0 || result == -1) return -1; ret = of_call_prom_ret("call-method", 5, 2, &result, "claim", chosen_mmu, align, size, virt); /* 0x12 == coherent + read/write */ ret = of_call_prom("call-method", 6, 1, "map", chosen_mmu, 0x12, size, virt, virt); return virt; } void *of_vmlinux_alloc(unsigned long size) { unsigned long start = (unsigned long)_start, end = (unsigned long)_end; unsigned long addr; void *p; /* With some older POWER4 firmware we need to claim the area the kernel * will reside in. Newer firmwares don't need this so we just ignore * the return value. */ addr = (unsigned long) of_claim(start, end - start, 0); printf("Trying to claim from 0x%lx to 0x%lx (0x%lx) got %lx\r\n", start, end, end - start, addr); p = malloc(size); if (!p) fatal("Can't allocate memory for kernel image!\n\r"); return p; } void of_exit(void) { of_call_prom("exit", 0, 0); } /* * OF device tree routines */ void *of_finddevice(const char *name) { return (void *) (unsigned long) of_call_prom("finddevice", 1, 1, name); } int of_getprop(const void *phandle, const char *name, void *buf, const int buflen) { return of_call_prom("getprop", 4, 1, phandle, name, buf, buflen); } int of_setprop(const void *phandle, const char *name, const void *buf, const int buflen) { return of_call_prom("setprop", 4, 1, phandle, name, buf, buflen); }
linux-master
arch/powerpc/boot/oflib.c
// SPDX-License-Identifier: GPL-2.0-only /* * Old U-boot compatibility for Acadia * * Author: Josh Boyer <[email protected]> * * Copyright 2008 IBM Corporation */ #include "ops.h" #include "io.h" #include "dcr.h" #include "stdio.h" #include "4xx.h" #include "44x.h" #include "cuboot.h" #define TARGET_4xx #include "ppcboot.h" static bd_t bd; #define CPR_PERD0_SPIDV_MASK 0x000F0000 /* SPI Clock Divider */ #define PLLC_SRC_MASK 0x20000000 /* PLL feedback source */ #define PLLD_FBDV_MASK 0x1F000000 /* PLL feedback divider value */ #define PLLD_FWDVA_MASK 0x000F0000 /* PLL forward divider A value */ #define PLLD_FWDVB_MASK 0x00000700 /* PLL forward divider B value */ #define PRIMAD_CPUDV_MASK 0x0F000000 /* CPU Clock Divisor Mask */ #define PRIMAD_PLBDV_MASK 0x000F0000 /* PLB Clock Divisor Mask */ #define PRIMAD_OPBDV_MASK 0x00000F00 /* OPB Clock Divisor Mask */ #define PRIMAD_EBCDV_MASK 0x0000000F /* EBC Clock Divisor Mask */ #define PERD0_PWMDV_MASK 0xFF000000 /* PWM Divider Mask */ #define PERD0_SPIDV_MASK 0x000F0000 /* SPI Divider Mask */ #define PERD0_U0DV_MASK 0x0000FF00 /* UART 0 Divider Mask */ #define PERD0_U1DV_MASK 0x000000FF /* UART 1 Divider Mask */ static void get_clocks(void) { unsigned long sysclk, cpr_plld, cpr_pllc, cpr_primad, plloutb, i; unsigned long pllFwdDiv, pllFwdDivB, pllFbkDiv, pllPlbDiv, pllExtBusDiv; unsigned long pllOpbDiv, freqEBC, freqUART, freqOPB; unsigned long div; /* total divisor udiv * bdiv */ unsigned long umin; /* minimum udiv */ unsigned short diff; /* smallest diff */ unsigned long udiv; /* best udiv */ unsigned short idiff; /* current diff */ unsigned short ibdiv; /* current bdiv */ unsigned long est; /* current estimate */ unsigned long baud; void *np; /* read the sysclk value from the CPLD */ sysclk = (in_8((unsigned char *)0x80000000) == 0xc) ? 66666666 : 33333000; /* * Read PLL Mode registers */ cpr_plld = CPR0_READ(DCRN_CPR0_PLLD); cpr_pllc = CPR0_READ(DCRN_CPR0_PLLC); /* * Determine forward divider A */ pllFwdDiv = ((cpr_plld & PLLD_FWDVA_MASK) >> 16); /* * Determine forward divider B */ pllFwdDivB = ((cpr_plld & PLLD_FWDVB_MASK) >> 8); if (pllFwdDivB == 0) pllFwdDivB = 8; /* * Determine FBK_DIV. */ pllFbkDiv = ((cpr_plld & PLLD_FBDV_MASK) >> 24); if (pllFbkDiv == 0) pllFbkDiv = 256; /* * Read CPR_PRIMAD register */ cpr_primad = CPR0_READ(DCRN_CPR0_PRIMAD); /* * Determine PLB_DIV. */ pllPlbDiv = ((cpr_primad & PRIMAD_PLBDV_MASK) >> 16); if (pllPlbDiv == 0) pllPlbDiv = 16; /* * Determine EXTBUS_DIV. */ pllExtBusDiv = (cpr_primad & PRIMAD_EBCDV_MASK); if (pllExtBusDiv == 0) pllExtBusDiv = 16; /* * Determine OPB_DIV. */ pllOpbDiv = ((cpr_primad & PRIMAD_OPBDV_MASK) >> 8); if (pllOpbDiv == 0) pllOpbDiv = 16; /* There is a bug in U-Boot that prevents us from using * bd.bi_opbfreq because U-Boot doesn't populate it for * 405EZ. We get to calculate it, yay! */ freqOPB = (sysclk *pllFbkDiv) /pllOpbDiv; freqEBC = (sysclk * pllFbkDiv) / pllExtBusDiv; plloutb = ((sysclk * ((cpr_pllc & PLLC_SRC_MASK) ? pllFwdDivB : pllFwdDiv) * pllFbkDiv) / pllFwdDivB); np = find_node_by_alias("serial0"); if (getprop(np, "current-speed", &baud, sizeof(baud)) != sizeof(baud)) fatal("no current-speed property\n\r"); udiv = 256; /* Assume lowest possible serial clk */ div = plloutb / (16 * baud); /* total divisor */ umin = (plloutb / freqOPB) << 1; /* 2 x OPB divisor */ diff = 256; /* highest possible */ /* i is the test udiv value -- start with the largest * possible (256) to minimize serial clock and constrain * search to umin. */ for (i = 256; i > umin; i--) { ibdiv = div / i; est = i * ibdiv; idiff = (est > div) ? (est-div) : (div-est); if (idiff == 0) { udiv = i; break; /* can't do better */ } else if (idiff < diff) { udiv = i; /* best so far */ diff = idiff; /* update lowest diff*/ } } freqUART = plloutb / udiv; dt_fixup_cpu_clocks(bd.bi_procfreq, bd.bi_intfreq, bd.bi_plb_busfreq); dt_fixup_clock("/plb/ebc", freqEBC); dt_fixup_clock("/plb/opb", freqOPB); dt_fixup_clock("/plb/opb/serial@ef600300", freqUART); dt_fixup_clock("/plb/opb/serial@ef600400", freqUART); } static void acadia_fixups(void) { dt_fixup_memory(bd.bi_memstart, bd.bi_memsize); get_clocks(); dt_fixup_mac_address_by_alias("ethernet0", bd.bi_enetaddr); } void platform_init(unsigned long r3, unsigned long r4, unsigned long r5, unsigned long r6, unsigned long r7) { CUBOOT_INIT(); platform_ops.fixups = acadia_fixups; platform_ops.exit = ibm40x_dbcr_reset; fdt_init(_dtb_start); serial_console_init(); }
linux-master
arch/powerpc/boot/cuboot-acadia.c
// SPDX-License-Identifier: GPL-2.0-only /* * PS3 bootwrapper support. * * Copyright (C) 2007 Sony Computer Entertainment Inc. * Copyright 2007 Sony Corp. */ #include <stdarg.h> #include <stddef.h> #include "types.h" #include "elf.h" #include "string.h" #include "stdio.h" #include "page.h" #include "ops.h" extern int lv1_panic(u64 in_1); extern int lv1_get_logical_partition_id(u64 *out_1); extern int lv1_get_logical_ppe_id(u64 *out_1); extern int lv1_get_repository_node_value(u64 in_1, u64 in_2, u64 in_3, u64 in_4, u64 in_5, u64 *out_1, u64 *out_2); BSS_STACK(4096); /* A buffer that may be edited by tools operating on a zImage binary so as to * edit the command line passed to vmlinux (by setting /chosen/bootargs). * The buffer is put in it's own section so that tools may locate it easier. */ static char cmdline[BOOT_COMMAND_LINE_SIZE] __attribute__((__section__("__builtin_cmdline"))); static void prep_cmdline(void *chosen) { if (cmdline[0] == '\0') getprop(chosen, "bootargs", cmdline, BOOT_COMMAND_LINE_SIZE-1); else setprop_str(chosen, "bootargs", cmdline); printf("cmdline: '%s'\n", cmdline); } static void ps3_console_write(const char *buf, int len) { } static void ps3_exit(void) { printf("ps3_exit\n"); /* lv1_panic will shutdown the lpar. */ lv1_panic(0); /* zero = do not reboot */ while (1); } static int ps3_repository_read_rm_size(u64 *rm_size) { int result; u64 lpar_id; u64 ppe_id; u64 v2; result = lv1_get_logical_partition_id(&lpar_id); if (result) return -1; result = lv1_get_logical_ppe_id(&ppe_id); if (result) return -1; /* * n1: 0000000062690000 : ....bi.. * n2: 7075000000000000 : pu...... * n3: 0000000000000001 : ........ * n4: 726d5f73697a6500 : rm_size. */ result = lv1_get_repository_node_value(lpar_id, 0x0000000062690000ULL, 0x7075000000000000ULL, ppe_id, 0x726d5f73697a6500ULL, rm_size, &v2); printf("%s:%d: ppe_id %lu \n", __func__, __LINE__, (unsigned long)ppe_id); printf("%s:%d: lpar_id %lu \n", __func__, __LINE__, (unsigned long)lpar_id); printf("%s:%d: rm_size %llxh \n", __func__, __LINE__, *rm_size); return result ? -1 : 0; } void ps3_copy_vectors(void) { extern char __system_reset_kernel[]; memcpy((void *)0x100, __system_reset_kernel, 512); flush_cache((void *)0x100, 512); } void platform_init(void) { const u32 heapsize = 0x1000000 - (u32)_end; /* 16MiB */ void *chosen; unsigned long ft_addr; u64 rm_size; console_ops.write = ps3_console_write; platform_ops.exit = ps3_exit; printf("\n-- PS3 bootwrapper --\n"); simple_alloc_init(_end, heapsize, 32, 64); fdt_init(_dtb_start); chosen = finddevice("/chosen"); ps3_repository_read_rm_size(&rm_size); dt_fixup_memory(0, rm_size); if (&_initrd_end > &_initrd_start) { setprop_val(chosen, "linux,initrd-start", (u32)(_initrd_start)); setprop_val(chosen, "linux,initrd-end", (u32)(_initrd_end)); } prep_cmdline(chosen); ft_addr = dt_ops.finalize(); ps3_copy_vectors(); printf(" flat tree at 0x%lx\n\r", ft_addr); ((kernel_entry_t)0)(ft_addr, 0, NULL); ps3_exit(); }
linux-master
arch/powerpc/boot/ps3.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * hack-coff.c - hack the header of an xcoff file to fill in * a few fields needed by the Open Firmware xcoff loader on * Power Macs but not initialized by objcopy. * * Copyright (C) Paul Mackerras 1997. */ #include <stdio.h> #include <stdlib.h> #include <unistd.h> #include <fcntl.h> #include <string.h> #include "rs6000.h" #define AOUT_MAGIC 0x010b #define get_16be(x) ((((unsigned char *)(x))[0] << 8) \ + ((unsigned char *)(x))[1]) #define put_16be(x, v) (((unsigned char *)(x))[0] = (v) >> 8, \ ((unsigned char *)(x))[1] = (v) & 0xff) #define get_32be(x) ((((unsigned char *)(x))[0] << 24) \ + (((unsigned char *)(x))[1] << 16) \ + (((unsigned char *)(x))[2] << 8) \ + ((unsigned char *)(x))[3]) int main(int ac, char **av) { int fd; int i, nsect; int aoutsz; struct external_filehdr fhdr; AOUTHDR aout; struct external_scnhdr shdr; if (ac != 2) { fprintf(stderr, "Usage: hack-coff coff-file\n"); exit(1); } if ((fd = open(av[1], 2)) == -1) { perror(av[2]); exit(1); } if (read(fd, &fhdr, sizeof(fhdr)) != sizeof(fhdr)) goto readerr; i = get_16be(fhdr.f_magic); if (i != U802TOCMAGIC && i != U802WRMAGIC && i != U802ROMAGIC) { fprintf(stderr, "%s: not an xcoff file\n", av[1]); exit(1); } aoutsz = get_16be(fhdr.f_opthdr); if (read(fd, &aout, aoutsz) != aoutsz) goto readerr; nsect = get_16be(fhdr.f_nscns); for (i = 0; i < nsect; ++i) { if (read(fd, &shdr, sizeof(shdr)) != sizeof(shdr)) goto readerr; if (strcmp(shdr.s_name, ".text") == 0) { put_16be(aout.o_snentry, i+1); put_16be(aout.o_sntext, i+1); } else if (strcmp(shdr.s_name, ".data") == 0) { put_16be(aout.o_sndata, i+1); } else if (strcmp(shdr.s_name, ".bss") == 0) { put_16be(aout.o_snbss, i+1); } } put_16be(aout.magic, AOUT_MAGIC); if (lseek(fd, (long) sizeof(struct external_filehdr), 0) == -1 || write(fd, &aout, aoutsz) != aoutsz) { fprintf(stderr, "%s: write error\n", av[1]); exit(1); } close(fd); exit(0); readerr: fprintf(stderr, "%s: read error or file too short\n", av[1]); exit(1); }
linux-master
arch/powerpc/boot/hack-coff.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright 2010 Ben. Herrenschmidt, IBM Corporation. * * Based on earlier code: * Copyright (C) Paul Mackerras 1997. * * Matt Porter <[email protected]> * Copyright 2002-2005 MontaVista Software Inc. * * Eugene Surovegin <[email protected]> or <[email protected]> * Copyright (c) 2003, 2004 Zultys Technologies * * Copyright 2007 David Gibson, IBM Corporation. */ #include <stdarg.h> #include <stddef.h> #include "types.h" #include "elf.h" #include "string.h" #include "stdio.h" #include "page.h" #include "ops.h" #include "reg.h" #include "io.h" #include "dcr.h" #include "4xx.h" #include "44x.h" #include "libfdt.h" BSS_STACK(4096); static u32 ibm4xx_memstart; static void iss_4xx_fixups(void) { void *memory; u32 reg[3]; memory = finddevice("/memory"); if (!memory) fatal("Can't find memory node\n"); /* This assumes #address-cells = 2, #size-cells =1 and that */ getprop(memory, "reg", reg, sizeof(reg)); if (reg[2]) /* If the device tree specifies the memory range, use it */ ibm4xx_memstart = reg[1]; else /* othersize, read it from the SDRAM controller */ ibm4xx_sdram_fixup_memsize(); } static void *iss_4xx_vmlinux_alloc(unsigned long size) { return (void *)ibm4xx_memstart; } #define SPRN_PIR 0x11E /* Processor Identification Register */ void platform_init(void) { unsigned long end_of_ram = 0x08000000; unsigned long avail_ram = end_of_ram - (unsigned long)_end; u32 pir_reg; simple_alloc_init(_end, avail_ram, 128, 64); platform_ops.fixups = iss_4xx_fixups; platform_ops.vmlinux_alloc = iss_4xx_vmlinux_alloc; platform_ops.exit = ibm44x_dbcr_reset; pir_reg = mfspr(SPRN_PIR); fdt_set_boot_cpuid_phys(_dtb_start, pir_reg); fdt_init(_dtb_start); serial_console_init(); }
linux-master
arch/powerpc/boot/treeboot-iss4xx.c
// SPDX-License-Identifier: GPL-2.0-only /* * PowerQUICC II support functions * * Author: Scott Wood <[email protected]> * * Copyright (c) 2007 Freescale Semiconductor, Inc. */ #include "ops.h" #include "types.h" #include "fsl-soc.h" #include "pq2.h" #include "stdio.h" #include "io.h" #define PQ2_SCCR (0x10c80/4) /* System Clock Configuration Register */ #define PQ2_SCMR (0x10c88/4) /* System Clock Mode Register */ static int pq2_corecnf_map[] = { 3, 2, 2, 2, 4, 4, 5, 9, 6, 11, 8, 10, 3, 12, 7, -1, 6, 5, 13, 2, 14, 4, 15, 9, 0, 11, 8, 10, 16, 12, 7, -1 }; /* Get various clocks from crystal frequency. * Returns zero on failure and non-zero on success. */ int pq2_get_clocks(u32 crystal, u32 *sysfreq, u32 *corefreq, u32 *timebase, u32 *brgfreq) { u32 *immr; u32 sccr, scmr, mainclk, busclk; int corecnf, busdf, plldf, pllmf, dfbrg; immr = fsl_get_immr(); if (!immr) { printf("pq2_get_clocks: Couldn't get IMMR base.\r\n"); return 0; } sccr = in_be32(&immr[PQ2_SCCR]); scmr = in_be32(&immr[PQ2_SCMR]); dfbrg = sccr & 3; corecnf = (scmr >> 24) & 0x1f; busdf = (scmr >> 20) & 0xf; plldf = (scmr >> 12) & 1; pllmf = scmr & 0xfff; mainclk = crystal * (pllmf + 1) / (plldf + 1); busclk = mainclk / (busdf + 1); if (sysfreq) *sysfreq = mainclk / 2; if (timebase) *timebase = busclk / 4; if (brgfreq) *brgfreq = mainclk / (1 << ((dfbrg + 1) * 2)); if (corefreq) { int coremult = pq2_corecnf_map[corecnf]; if (coremult < 0) *corefreq = mainclk / 2; else if (coremult == 0) return 0; else *corefreq = busclk * coremult / 2; } return 1; } /* Set common device tree fields based on the given clock frequencies. */ void pq2_set_clocks(u32 sysfreq, u32 corefreq, u32 timebase, u32 brgfreq) { void *node; dt_fixup_cpu_clocks(corefreq, timebase, sysfreq); node = finddevice("/soc/cpm"); if (node) setprop(node, "clock-frequency", &sysfreq, 4); node = finddevice("/soc/cpm/brg"); if (node) setprop(node, "clock-frequency", &brgfreq, 4); } int pq2_fixup_clocks(u32 crystal) { u32 sysfreq, corefreq, timebase, brgfreq; if (!pq2_get_clocks(crystal, &sysfreq, &corefreq, &timebase, &brgfreq)) return 0; pq2_set_clocks(sysfreq, corefreq, timebase, brgfreq); return 1; }
linux-master
arch/powerpc/boot/pq2.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright 2007 David Gibson, IBM Corporation. * * Based on earlier code: * Matt Porter <[email protected]> * Copyright 2002-2005 MontaVista Software Inc. * * Eugene Surovegin <[email protected]> or <[email protected]> * Copyright (c) 2003, 2004 Zultys Technologies * * Copyright (C) 2009 Wind River Systems, Inc. * Updated for supporting PPC405EX on Kilauea. * Tiejun Chen <[email protected]> */ #include <stddef.h> #include "types.h" #include "string.h" #include "stdio.h" #include "ops.h" #include "reg.h" #include "dcr.h" static unsigned long chip_11_errata(unsigned long memsize) { unsigned long pvr; pvr = mfpvr(); switch (pvr & 0xf0000ff0) { case 0x40000850: case 0x400008d0: case 0x200008d0: memsize -= 4096; break; default: break; } return memsize; } /* Read the 4xx SDRAM controller to get size of system memory. */ void ibm4xx_sdram_fixup_memsize(void) { int i; unsigned long memsize, bank_config; memsize = 0; for (i = 0; i < ARRAY_SIZE(sdram_bxcr); i++) { bank_config = SDRAM0_READ(sdram_bxcr[i]); if (bank_config & SDRAM_CONFIG_BANK_ENABLE) memsize += SDRAM_CONFIG_BANK_SIZE(bank_config); } memsize = chip_11_errata(memsize); dt_fixup_memory(0, memsize); } /* Read the 440SPe MQ controller to get size of system memory. */ #define DCRN_MQ0_B0BAS 0x40 #define DCRN_MQ0_B1BAS 0x41 #define DCRN_MQ0_B2BAS 0x42 #define DCRN_MQ0_B3BAS 0x43 static u64 ibm440spe_decode_bas(u32 bas) { u64 base = ((u64)(bas & 0xFFE00000u)) << 2; /* open coded because I'm paranoid about invalid values */ switch ((bas >> 4) & 0xFFF) { case 0: return 0; case 0xffc: return base + 0x000800000ull; case 0xff8: return base + 0x001000000ull; case 0xff0: return base + 0x002000000ull; case 0xfe0: return base + 0x004000000ull; case 0xfc0: return base + 0x008000000ull; case 0xf80: return base + 0x010000000ull; case 0xf00: return base + 0x020000000ull; case 0xe00: return base + 0x040000000ull; case 0xc00: return base + 0x080000000ull; case 0x800: return base + 0x100000000ull; } printf("Memory BAS value 0x%08x unsupported !\n", bas); return 0; } void ibm440spe_fixup_memsize(void) { u64 banktop, memsize = 0; /* Ultimately, we should directly construct the memory node * so we are able to handle holes in the memory address space */ banktop = ibm440spe_decode_bas(mfdcr(DCRN_MQ0_B0BAS)); if (banktop > memsize) memsize = banktop; banktop = ibm440spe_decode_bas(mfdcr(DCRN_MQ0_B1BAS)); if (banktop > memsize) memsize = banktop; banktop = ibm440spe_decode_bas(mfdcr(DCRN_MQ0_B2BAS)); if (banktop > memsize) memsize = banktop; banktop = ibm440spe_decode_bas(mfdcr(DCRN_MQ0_B3BAS)); if (banktop > memsize) memsize = banktop; dt_fixup_memory(0, memsize); } /* 4xx DDR1/2 Denali memory controller support */ /* DDR0 registers */ #define DDR0_02 2 #define DDR0_08 8 #define DDR0_10 10 #define DDR0_14 14 #define DDR0_42 42 #define DDR0_43 43 /* DDR0_02 */ #define DDR_START 0x1 #define DDR_START_SHIFT 0 #define DDR_MAX_CS_REG 0x3 #define DDR_MAX_CS_REG_SHIFT 24 #define DDR_MAX_COL_REG 0xf #define DDR_MAX_COL_REG_SHIFT 16 #define DDR_MAX_ROW_REG 0xf #define DDR_MAX_ROW_REG_SHIFT 8 /* DDR0_08 */ #define DDR_DDR2_MODE 0x1 #define DDR_DDR2_MODE_SHIFT 0 /* DDR0_10 */ #define DDR_CS_MAP 0x3 #define DDR_CS_MAP_SHIFT 8 /* DDR0_14 */ #define DDR_REDUC 0x1 #define DDR_REDUC_SHIFT 16 /* DDR0_42 */ #define DDR_APIN 0x7 #define DDR_APIN_SHIFT 24 /* DDR0_43 */ #define DDR_COL_SZ 0x7 #define DDR_COL_SZ_SHIFT 8 #define DDR_BANK8 0x1 #define DDR_BANK8_SHIFT 0 #define DDR_GET_VAL(val, mask, shift) (((val) >> (shift)) & (mask)) /* * Some U-Boot versions set the number of chipselects to two * for Sequoia/Rainier boards while they only have one chipselect * hardwired. Hardcode the number of chipselects to one * for sequioa/rainer board models or read the actual value * from the memory controller register DDR0_10 otherwise. */ static inline u32 ibm4xx_denali_get_cs(void) { void *devp; char model[64]; u32 val, cs; devp = finddevice("/"); if (!devp) goto read_cs; if (getprop(devp, "model", model, sizeof(model)) <= 0) goto read_cs; model[sizeof(model)-1] = 0; if (!strcmp(model, "amcc,sequoia") || !strcmp(model, "amcc,rainier")) return 1; read_cs: /* get CS value */ val = SDRAM0_READ(DDR0_10); val = DDR_GET_VAL(val, DDR_CS_MAP, DDR_CS_MAP_SHIFT); cs = 0; while (val) { if (val & 0x1) cs++; val = val >> 1; } return cs; } void ibm4xx_denali_fixup_memsize(void) { u32 val, max_cs, max_col, max_row; u32 cs, col, row, bank, dpath; unsigned long memsize; val = SDRAM0_READ(DDR0_02); if (!DDR_GET_VAL(val, DDR_START, DDR_START_SHIFT)) fatal("DDR controller is not initialized\n"); /* get maximum cs col and row values */ max_cs = DDR_GET_VAL(val, DDR_MAX_CS_REG, DDR_MAX_CS_REG_SHIFT); max_col = DDR_GET_VAL(val, DDR_MAX_COL_REG, DDR_MAX_COL_REG_SHIFT); max_row = DDR_GET_VAL(val, DDR_MAX_ROW_REG, DDR_MAX_ROW_REG_SHIFT); cs = ibm4xx_denali_get_cs(); if (!cs) fatal("No memory installed\n"); if (cs > max_cs) fatal("DDR wrong CS configuration\n"); /* get data path bytes */ val = SDRAM0_READ(DDR0_14); if (DDR_GET_VAL(val, DDR_REDUC, DDR_REDUC_SHIFT)) dpath = 4; /* 32 bits */ else dpath = 8; /* 64 bits */ /* get address pins (rows) */ val = SDRAM0_READ(DDR0_42); row = DDR_GET_VAL(val, DDR_APIN, DDR_APIN_SHIFT); if (row > max_row) fatal("DDR wrong APIN configuration\n"); row = max_row - row; /* get collomn size and banks */ val = SDRAM0_READ(DDR0_43); col = DDR_GET_VAL(val, DDR_COL_SZ, DDR_COL_SZ_SHIFT); if (col > max_col) fatal("DDR wrong COL configuration\n"); col = max_col - col; if (DDR_GET_VAL(val, DDR_BANK8, DDR_BANK8_SHIFT)) bank = 8; /* 8 banks */ else bank = 4; /* 4 banks */ memsize = cs * (1 << (col+row)) * bank * dpath; memsize = chip_11_errata(memsize); dt_fixup_memory(0, memsize); } #define SPRN_DBCR0_40X 0x3F2 #define SPRN_DBCR0_44X 0x134 #define DBCR0_RST_SYSTEM 0x30000000 void ibm44x_dbcr_reset(void) { unsigned long tmp; asm volatile ( "mfspr %0,%1\n" "oris %0,%0,%2@h\n" "mtspr %1,%0" : "=&r"(tmp) : "i"(SPRN_DBCR0_44X), "i"(DBCR0_RST_SYSTEM) ); } void ibm40x_dbcr_reset(void) { unsigned long tmp; asm volatile ( "mfspr %0,%1\n" "oris %0,%0,%2@h\n" "mtspr %1,%0" : "=&r"(tmp) : "i"(SPRN_DBCR0_40X), "i"(DBCR0_RST_SYSTEM) ); } #define EMAC_RESET 0x20000000 void ibm4xx_quiesce_eth(u32 *emac0, u32 *emac1) { /* Quiesce the MAL and EMAC(s) since PIBS/OpenBIOS don't * do this for us */ if (emac0) *emac0 = EMAC_RESET; if (emac1) *emac1 = EMAC_RESET; mtdcr(DCRN_MAL0_CFG, MAL_RESET); while (mfdcr(DCRN_MAL0_CFG) & MAL_RESET) ; /* loop until reset takes effect */ } /* Read 4xx EBC bus bridge registers to get mappings of the peripheral * banks into the OPB address space */ void ibm4xx_fixup_ebc_ranges(const char *ebc) { void *devp; u32 bxcr; u32 ranges[EBC_NUM_BANKS*4]; u32 *p = ranges; int i; for (i = 0; i < EBC_NUM_BANKS; i++) { mtdcr(DCRN_EBC0_CFGADDR, EBC_BXCR(i)); bxcr = mfdcr(DCRN_EBC0_CFGDATA); if ((bxcr & EBC_BXCR_BU) != EBC_BXCR_BU_OFF) { *p++ = i; *p++ = 0; *p++ = bxcr & EBC_BXCR_BAS; *p++ = EBC_BXCR_BANK_SIZE(bxcr); } } devp = finddevice(ebc); if (! devp) fatal("Couldn't locate EBC node %s\n\r", ebc); setprop(devp, "ranges", ranges, (p - ranges) * sizeof(u32)); } /* Calculate 440GP clocks */ void ibm440gp_fixup_clocks(unsigned int sys_clk, unsigned int ser_clk) { u32 sys0 = mfdcr(DCRN_CPC0_SYS0); u32 cr0 = mfdcr(DCRN_CPC0_CR0); u32 cpu, plb, opb, ebc, tb, uart0, uart1, m; u32 opdv = CPC0_SYS0_OPDV(sys0); u32 epdv = CPC0_SYS0_EPDV(sys0); if (sys0 & CPC0_SYS0_BYPASS) { /* Bypass system PLL */ cpu = plb = sys_clk; } else { if (sys0 & CPC0_SYS0_EXTSL) /* PerClk */ m = CPC0_SYS0_FWDVB(sys0) * opdv * epdv; else /* CPU clock */ m = CPC0_SYS0_FBDV(sys0) * CPC0_SYS0_FWDVA(sys0); cpu = sys_clk * m / CPC0_SYS0_FWDVA(sys0); plb = sys_clk * m / CPC0_SYS0_FWDVB(sys0); } opb = plb / opdv; ebc = opb / epdv; /* FIXME: Check if this is for all 440GP, or just Ebony */ if ((mfpvr() & 0xf0000fff) == 0x40000440) /* Rev. B 440GP, use external system clock */ tb = sys_clk; else /* Rev. C 440GP, errata force us to use internal clock */ tb = cpu; if (cr0 & CPC0_CR0_U0EC) /* External UART clock */ uart0 = ser_clk; else /* Internal UART clock */ uart0 = plb / CPC0_CR0_UDIV(cr0); if (cr0 & CPC0_CR0_U1EC) /* External UART clock */ uart1 = ser_clk; else /* Internal UART clock */ uart1 = plb / CPC0_CR0_UDIV(cr0); printf("PPC440GP: SysClk = %dMHz (%x)\n\r", (sys_clk + 500000) / 1000000, sys_clk); dt_fixup_cpu_clocks(cpu, tb, 0); dt_fixup_clock("/plb", plb); dt_fixup_clock("/plb/opb", opb); dt_fixup_clock("/plb/opb/ebc", ebc); dt_fixup_clock("/plb/opb/serial@40000200", uart0); dt_fixup_clock("/plb/opb/serial@40000300", uart1); } #define SPRN_CCR1 0x378 static inline u32 __fix_zero(u32 v, u32 def) { return v ? v : def; } static unsigned int __ibm440eplike_fixup_clocks(unsigned int sys_clk, unsigned int tmr_clk, int per_clk_from_opb) { /* PLL config */ u32 pllc = CPR0_READ(DCRN_CPR0_PLLC); u32 plld = CPR0_READ(DCRN_CPR0_PLLD); /* Dividers */ u32 fbdv = __fix_zero((plld >> 24) & 0x1f, 32); u32 fwdva = __fix_zero((plld >> 16) & 0xf, 16); u32 fwdvb = __fix_zero((plld >> 8) & 7, 8); u32 lfbdv = __fix_zero(plld & 0x3f, 64); u32 pradv0 = __fix_zero((CPR0_READ(DCRN_CPR0_PRIMAD) >> 24) & 7, 8); u32 prbdv0 = __fix_zero((CPR0_READ(DCRN_CPR0_PRIMBD) >> 24) & 7, 8); u32 opbdv0 = __fix_zero((CPR0_READ(DCRN_CPR0_OPBD) >> 24) & 3, 4); u32 perdv0 = __fix_zero((CPR0_READ(DCRN_CPR0_PERD) >> 24) & 3, 4); /* Input clocks for primary dividers */ u32 clk_a, clk_b; /* Resulting clocks */ u32 cpu, plb, opb, ebc, vco; /* Timebase */ u32 ccr1, tb = tmr_clk; if (pllc & 0x40000000) { u32 m; /* Feedback path */ switch ((pllc >> 24) & 7) { case 0: /* PLLOUTx */ m = ((pllc & 0x20000000) ? fwdvb : fwdva) * lfbdv; break; case 1: /* CPU */ m = fwdva * pradv0; break; case 5: /* PERClk */ m = fwdvb * prbdv0 * opbdv0 * perdv0; break; default: printf("WARNING ! Invalid PLL feedback source !\n"); goto bypass; } m *= fbdv; vco = sys_clk * m; clk_a = vco / fwdva; clk_b = vco / fwdvb; } else { bypass: /* Bypass system PLL */ vco = 0; clk_a = clk_b = sys_clk; } cpu = clk_a / pradv0; plb = clk_b / prbdv0; opb = plb / opbdv0; ebc = (per_clk_from_opb ? opb : plb) / perdv0; /* Figure out timebase. Either CPU or default TmrClk */ ccr1 = mfspr(SPRN_CCR1); /* If passed a 0 tmr_clk, force CPU clock */ if (tb == 0) { ccr1 &= ~0x80u; mtspr(SPRN_CCR1, ccr1); } if ((ccr1 & 0x0080) == 0) tb = cpu; dt_fixup_cpu_clocks(cpu, tb, 0); dt_fixup_clock("/plb", plb); dt_fixup_clock("/plb/opb", opb); dt_fixup_clock("/plb/opb/ebc", ebc); return plb; } static void eplike_fixup_uart_clk(int index, const char *path, unsigned int ser_clk, unsigned int plb_clk) { unsigned int sdr; unsigned int clock; switch (index) { case 0: sdr = SDR0_READ(DCRN_SDR0_UART0); break; case 1: sdr = SDR0_READ(DCRN_SDR0_UART1); break; case 2: sdr = SDR0_READ(DCRN_SDR0_UART2); break; case 3: sdr = SDR0_READ(DCRN_SDR0_UART3); break; default: return; } if (sdr & 0x00800000u) clock = ser_clk; else clock = plb_clk / __fix_zero(sdr & 0xff, 256); dt_fixup_clock(path, clock); } void ibm440ep_fixup_clocks(unsigned int sys_clk, unsigned int ser_clk, unsigned int tmr_clk) { unsigned int plb_clk = __ibm440eplike_fixup_clocks(sys_clk, tmr_clk, 0); /* serial clocks need fixup based on int/ext */ eplike_fixup_uart_clk(0, "/plb/opb/serial@ef600300", ser_clk, plb_clk); eplike_fixup_uart_clk(1, "/plb/opb/serial@ef600400", ser_clk, plb_clk); eplike_fixup_uart_clk(2, "/plb/opb/serial@ef600500", ser_clk, plb_clk); eplike_fixup_uart_clk(3, "/plb/opb/serial@ef600600", ser_clk, plb_clk); } void ibm440gx_fixup_clocks(unsigned int sys_clk, unsigned int ser_clk, unsigned int tmr_clk) { unsigned int plb_clk = __ibm440eplike_fixup_clocks(sys_clk, tmr_clk, 1); /* serial clocks need fixup based on int/ext */ eplike_fixup_uart_clk(0, "/plb/opb/serial@40000200", ser_clk, plb_clk); eplike_fixup_uart_clk(1, "/plb/opb/serial@40000300", ser_clk, plb_clk); } void ibm440spe_fixup_clocks(unsigned int sys_clk, unsigned int ser_clk, unsigned int tmr_clk) { unsigned int plb_clk = __ibm440eplike_fixup_clocks(sys_clk, tmr_clk, 1); /* serial clocks need fixup based on int/ext */ eplike_fixup_uart_clk(0, "/plb/opb/serial@f0000200", ser_clk, plb_clk); eplike_fixup_uart_clk(1, "/plb/opb/serial@f0000300", ser_clk, plb_clk); eplike_fixup_uart_clk(2, "/plb/opb/serial@f0000600", ser_clk, plb_clk); } void ibm405gp_fixup_clocks(unsigned int sys_clk, unsigned int ser_clk) { u32 pllmr = mfdcr(DCRN_CPC0_PLLMR); u32 cpc0_cr0 = mfdcr(DCRN_405_CPC0_CR0); u32 cpc0_cr1 = mfdcr(DCRN_405_CPC0_CR1); u32 psr = mfdcr(DCRN_405_CPC0_PSR); u32 cpu, plb, opb, ebc, tb, uart0, uart1, m; u32 fwdv, fwdvb, fbdv, cbdv, opdv, epdv, ppdv, udiv; fwdv = (8 - ((pllmr & 0xe0000000) >> 29)); fbdv = (pllmr & 0x1e000000) >> 25; if (fbdv == 0) fbdv = 16; cbdv = ((pllmr & 0x00060000) >> 17) + 1; /* CPU:PLB */ opdv = ((pllmr & 0x00018000) >> 15) + 1; /* PLB:OPB */ ppdv = ((pllmr & 0x00006000) >> 13) + 1; /* PLB:PCI */ epdv = ((pllmr & 0x00001800) >> 11) + 2; /* PLB:EBC */ udiv = ((cpc0_cr0 & 0x3e) >> 1) + 1; /* check for 405GPr */ if ((mfpvr() & 0xfffffff0) == (0x50910951 & 0xfffffff0)) { fwdvb = 8 - (pllmr & 0x00000007); if (!(psr & 0x00001000)) /* PCI async mode enable == 0 */ if (psr & 0x00000020) /* New mode enable */ m = fwdvb * 2 * ppdv; else m = fwdvb * cbdv * ppdv; else if (psr & 0x00000020) /* New mode enable */ if (psr & 0x00000800) /* PerClk synch mode */ m = fwdvb * 2 * epdv; else m = fbdv * fwdv; else if (epdv == fbdv) m = fbdv * cbdv * epdv; else m = fbdv * fwdvb * cbdv; cpu = sys_clk * m / fwdv; plb = sys_clk * m / (fwdvb * cbdv); } else { m = fwdv * fbdv * cbdv; cpu = sys_clk * m / fwdv; plb = cpu / cbdv; } opb = plb / opdv; ebc = plb / epdv; if (cpc0_cr0 & 0x80) /* uart0 uses the external clock */ uart0 = ser_clk; else uart0 = cpu / udiv; if (cpc0_cr0 & 0x40) /* uart1 uses the external clock */ uart1 = ser_clk; else uart1 = cpu / udiv; /* setup the timebase clock to tick at the cpu frequency */ cpc0_cr1 = cpc0_cr1 & ~0x00800000; mtdcr(DCRN_405_CPC0_CR1, cpc0_cr1); tb = cpu; dt_fixup_cpu_clocks(cpu, tb, 0); dt_fixup_clock("/plb", plb); dt_fixup_clock("/plb/opb", opb); dt_fixup_clock("/plb/ebc", ebc); dt_fixup_clock("/plb/opb/serial@ef600300", uart0); dt_fixup_clock("/plb/opb/serial@ef600400", uart1); } void ibm405ep_fixup_clocks(unsigned int sys_clk) { u32 pllmr0 = mfdcr(DCRN_CPC0_PLLMR0); u32 pllmr1 = mfdcr(DCRN_CPC0_PLLMR1); u32 cpc0_ucr = mfdcr(DCRN_CPC0_UCR); u32 cpu, plb, opb, ebc, uart0, uart1; u32 fwdva, fwdvb, fbdv, cbdv, opdv, epdv; u32 pllmr0_ccdv, tb, m; fwdva = 8 - ((pllmr1 & 0x00070000) >> 16); fwdvb = 8 - ((pllmr1 & 0x00007000) >> 12); fbdv = (pllmr1 & 0x00f00000) >> 20; if (fbdv == 0) fbdv = 16; cbdv = ((pllmr0 & 0x00030000) >> 16) + 1; /* CPU:PLB */ epdv = ((pllmr0 & 0x00000300) >> 8) + 2; /* PLB:EBC */ opdv = ((pllmr0 & 0x00003000) >> 12) + 1; /* PLB:OPB */ m = fbdv * fwdvb; pllmr0_ccdv = ((pllmr0 & 0x00300000) >> 20) + 1; if (pllmr1 & 0x80000000) cpu = sys_clk * m / (fwdva * pllmr0_ccdv); else cpu = sys_clk / pllmr0_ccdv; plb = cpu / cbdv; opb = plb / opdv; ebc = plb / epdv; tb = cpu; uart0 = cpu / (cpc0_ucr & 0x0000007f); uart1 = cpu / ((cpc0_ucr & 0x00007f00) >> 8); dt_fixup_cpu_clocks(cpu, tb, 0); dt_fixup_clock("/plb", plb); dt_fixup_clock("/plb/opb", opb); dt_fixup_clock("/plb/ebc", ebc); dt_fixup_clock("/plb/opb/serial@ef600300", uart0); dt_fixup_clock("/plb/opb/serial@ef600400", uart1); } static u8 ibm405ex_fwdv_multi_bits[] = { /* values for: 1 - 16 */ 0x01, 0x02, 0x0e, 0x09, 0x04, 0x0b, 0x10, 0x0d, 0x0c, 0x05, 0x06, 0x0f, 0x0a, 0x07, 0x08, 0x03 }; u32 ibm405ex_get_fwdva(unsigned long cpr_fwdv) { u32 index; for (index = 0; index < ARRAY_SIZE(ibm405ex_fwdv_multi_bits); index++) if (cpr_fwdv == (u32)ibm405ex_fwdv_multi_bits[index]) return index + 1; return 0; } static u8 ibm405ex_fbdv_multi_bits[] = { /* values for: 1 - 100 */ 0x00, 0xff, 0x7e, 0xfd, 0x7a, 0xf5, 0x6a, 0xd5, 0x2a, 0xd4, 0x29, 0xd3, 0x26, 0xcc, 0x19, 0xb3, 0x67, 0xce, 0x1d, 0xbb, 0x77, 0xee, 0x5d, 0xba, 0x74, 0xe9, 0x52, 0xa5, 0x4b, 0x96, 0x2c, 0xd8, 0x31, 0xe3, 0x46, 0x8d, 0x1b, 0xb7, 0x6f, 0xde, 0x3d, 0xfb, 0x76, 0xed, 0x5a, 0xb5, 0x6b, 0xd6, 0x2d, 0xdb, 0x36, 0xec, 0x59, 0xb2, 0x64, 0xc9, 0x12, 0xa4, 0x48, 0x91, 0x23, 0xc7, 0x0e, 0x9c, 0x38, 0xf0, 0x61, 0xc2, 0x05, 0x8b, 0x17, 0xaf, 0x5f, 0xbe, 0x7c, 0xf9, 0x72, 0xe5, 0x4a, 0x95, 0x2b, 0xd7, 0x2e, 0xdc, 0x39, 0xf3, 0x66, 0xcd, 0x1a, 0xb4, 0x68, 0xd1, 0x22, 0xc4, 0x09, 0x93, 0x27, 0xcf, 0x1e, 0xbc, /* values for: 101 - 200 */ 0x78, 0xf1, 0x62, 0xc5, 0x0a, 0x94, 0x28, 0xd0, 0x21, 0xc3, 0x06, 0x8c, 0x18, 0xb0, 0x60, 0xc1, 0x02, 0x84, 0x08, 0x90, 0x20, 0xc0, 0x01, 0x83, 0x07, 0x8f, 0x1f, 0xbf, 0x7f, 0xfe, 0x7d, 0xfa, 0x75, 0xea, 0x55, 0xaa, 0x54, 0xa9, 0x53, 0xa6, 0x4c, 0x99, 0x33, 0xe7, 0x4e, 0x9d, 0x3b, 0xf7, 0x6e, 0xdd, 0x3a, 0xf4, 0x69, 0xd2, 0x25, 0xcb, 0x16, 0xac, 0x58, 0xb1, 0x63, 0xc6, 0x0d, 0x9b, 0x37, 0xef, 0x5e, 0xbd, 0x7b, 0xf6, 0x6d, 0xda, 0x35, 0xeb, 0x56, 0xad, 0x5b, 0xb6, 0x6c, 0xd9, 0x32, 0xe4, 0x49, 0x92, 0x24, 0xc8, 0x11, 0xa3, 0x47, 0x8e, 0x1c, 0xb8, 0x70, 0xe1, 0x42, 0x85, 0x0b, 0x97, 0x2f, 0xdf, /* values for: 201 - 255 */ 0x3e, 0xfc, 0x79, 0xf2, 0x65, 0xca, 0x15, 0xab, 0x57, 0xae, 0x5c, 0xb9, 0x73, 0xe6, 0x4d, 0x9a, 0x34, 0xe8, 0x51, 0xa2, 0x44, 0x89, 0x13, 0xa7, 0x4f, 0x9e, 0x3c, 0xf8, 0x71, 0xe2, 0x45, 0x8a, 0x14, 0xa8, 0x50, 0xa1, 0x43, 0x86, 0x0c, 0x98, 0x30, 0xe0, 0x41, 0x82, 0x04, 0x88, 0x10, 0xa0, 0x40, 0x81, 0x03, 0x87, 0x0f, 0x9f, 0x3f /* END */ }; u32 ibm405ex_get_fbdv(unsigned long cpr_fbdv) { u32 index; for (index = 0; index < ARRAY_SIZE(ibm405ex_fbdv_multi_bits); index++) if (cpr_fbdv == (u32)ibm405ex_fbdv_multi_bits[index]) return index + 1; return 0; } void ibm405ex_fixup_clocks(unsigned int sys_clk, unsigned int uart_clk) { /* PLL config */ u32 pllc = CPR0_READ(DCRN_CPR0_PLLC); u32 plld = CPR0_READ(DCRN_CPR0_PLLD); u32 cpud = CPR0_READ(DCRN_CPR0_PRIMAD); u32 plbd = CPR0_READ(DCRN_CPR0_PRIMBD); u32 opbd = CPR0_READ(DCRN_CPR0_OPBD); u32 perd = CPR0_READ(DCRN_CPR0_PERD); /* Dividers */ u32 fbdv = ibm405ex_get_fbdv(__fix_zero((plld >> 24) & 0xff, 1)); u32 fwdva = ibm405ex_get_fwdva(__fix_zero((plld >> 16) & 0x0f, 1)); u32 cpudv0 = __fix_zero((cpud >> 24) & 7, 8); /* PLBDV0 is hardwared to 010. */ u32 plbdv0 = 2; u32 plb2xdv0 = __fix_zero((plbd >> 16) & 7, 8); u32 opbdv0 = __fix_zero((opbd >> 24) & 3, 4); u32 perdv0 = __fix_zero((perd >> 24) & 3, 4); /* Resulting clocks */ u32 cpu, plb, opb, ebc, vco, tb, uart0, uart1; /* PLL's VCO is the source for primary forward ? */ if (pllc & 0x40000000) { u32 m; /* Feedback path */ switch ((pllc >> 24) & 7) { case 0: /* PLLOUTx */ m = fbdv; break; case 1: /* CPU */ m = fbdv * fwdva * cpudv0; break; case 5: /* PERClk */ m = fbdv * fwdva * plb2xdv0 * plbdv0 * opbdv0 * perdv0; break; default: printf("WARNING ! Invalid PLL feedback source !\n"); goto bypass; } vco = (unsigned int)(sys_clk * m); } else { bypass: /* Bypass system PLL */ vco = 0; } /* CPU = VCO / ( FWDVA x CPUDV0) */ cpu = vco / (fwdva * cpudv0); /* PLB = VCO / ( FWDVA x PLB2XDV0 x PLBDV0) */ plb = vco / (fwdva * plb2xdv0 * plbdv0); /* OPB = PLB / OPBDV0 */ opb = plb / opbdv0; /* EBC = OPB / PERDV0 */ ebc = opb / perdv0; tb = cpu; uart0 = uart1 = uart_clk; dt_fixup_cpu_clocks(cpu, tb, 0); dt_fixup_clock("/plb", plb); dt_fixup_clock("/plb/opb", opb); dt_fixup_clock("/plb/opb/ebc", ebc); dt_fixup_clock("/plb/opb/serial@ef600200", uart0); dt_fixup_clock("/plb/opb/serial@ef600300", uart1); }
linux-master
arch/powerpc/boot/4xx.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright © 2011 Tony Breeds IBM Corporation * * Based on earlier code: * Copyright (C) Paul Mackerras 1997. * * Matt Porter <[email protected]> * Copyright 2002-2005 MontaVista Software Inc. * * Eugene Surovegin <[email protected]> or <[email protected]> * Copyright (c) 2003, 2004 Zultys Technologies * * Copyright 2007 David Gibson, IBM Corporation. * Copyright 2010 Ben. Herrenschmidt, IBM Corporation. * Copyright © 2011 David Kleikamp IBM Corporation */ #include <stdarg.h> #include <stddef.h> #include "types.h" #include "elf.h" #include "string.h" #include "stdio.h" #include "page.h" #include "ops.h" #include "reg.h" #include "io.h" #include "dcr.h" #include "4xx.h" #include "44x.h" #include "libfdt.h" BSS_STACK(4096); #define MAX_RANKS 0x4 #define DDR3_MR0CF 0x80010011U static unsigned long long ibm_currituck_memsize; static unsigned long long ibm_currituck_detect_memsize(void) { u32 reg; unsigned i; unsigned long long memsize = 0; for(i = 0; i < MAX_RANKS; i++){ reg = mfdcrx(DDR3_MR0CF + i); if (!(reg & 1)) continue; reg &= 0x0000f000; reg >>= 12; memsize += (0x800000ULL << reg); } return memsize; } static void ibm_currituck_fixups(void) { void *devp = finddevice("/"); u32 dma_ranges[7]; dt_fixup_memory(0x0ULL, ibm_currituck_memsize); while ((devp = find_node_by_devtype(devp, "pci"))) { if (getprop(devp, "dma-ranges", dma_ranges, sizeof(dma_ranges)) < 0) { printf("%s: Failed to get dma-ranges\r\n", __func__); continue; } dma_ranges[5] = ibm_currituck_memsize >> 32; dma_ranges[6] = ibm_currituck_memsize & 0xffffffffUL; setprop(devp, "dma-ranges", dma_ranges, sizeof(dma_ranges)); } } #define SPRN_PIR 0x11E /* Processor Identification Register */ void platform_init(void) { unsigned long end_of_ram, avail_ram; u32 pir_reg; int node, size; const u32 *timebase; ibm_currituck_memsize = ibm_currituck_detect_memsize(); if (ibm_currituck_memsize >> 32) end_of_ram = ~0UL; else end_of_ram = ibm_currituck_memsize; avail_ram = end_of_ram - (unsigned long)_end; simple_alloc_init(_end, avail_ram, 128, 64); platform_ops.fixups = ibm_currituck_fixups; platform_ops.exit = ibm44x_dbcr_reset; pir_reg = mfspr(SPRN_PIR); /* Make sure FDT blob is sane */ if (fdt_check_header(_dtb_start) != 0) fatal("Invalid device tree blob\n"); node = fdt_node_offset_by_prop_value(_dtb_start, -1, "device_type", "cpu", sizeof("cpu")); if (!node) fatal("Cannot find cpu node\n"); timebase = fdt_getprop(_dtb_start, node, "timebase-frequency", &size); if (timebase && (size == 4)) timebase_period_ns = 1000000000 / *timebase; fdt_set_boot_cpuid_phys(_dtb_start, pir_reg); fdt_init(_dtb_start); serial_console_init(); }
linux-master
arch/powerpc/boot/treeboot-currituck.c
// SPDX-License-Identifier: GPL-2.0-only /* * RedBoot firmware support * * Author: Scott Wood <[email protected]> * * Copyright (c) 2007 Freescale Semiconductor, Inc. * Copyright (c) 2008 Codehermit */ #include "ops.h" #include "stdio.h" #include "redboot.h" #include "fsl-soc.h" #include "io.h" static bd_t bd; BSS_STACK(4096); #define MHZ(x) ((x + 500000) / 1000000) static void platform_fixups(void) { void *node; dt_fixup_memory(bd.bi_memstart, bd.bi_memsize); dt_fixup_mac_addresses(bd.bi_enetaddr); dt_fixup_cpu_clocks(bd.bi_intfreq, bd.bi_busfreq / 16, bd.bi_busfreq); node = finddevice("/soc/cpm/brg"); if (node) { printf("BRG clock-frequency <- 0x%x (%dMHz)\r\n", bd.bi_busfreq, MHZ(bd.bi_busfreq)); setprop(node, "clock-frequency", &bd.bi_busfreq, 4); } } void platform_init(unsigned long r3, unsigned long r4, unsigned long r5, unsigned long r6, unsigned long r7) { memcpy(&bd, (char *)r3, sizeof(bd)); if (bd.bi_tag != 0x42444944) return; simple_alloc_init(_end, bd.bi_memstart + bd.bi_memsize - (unsigned long)_end, 32, 64); fdt_init(_dtb_start); serial_console_init(); platform_ops.fixups = platform_fixups; loader_info.cmdline = (char *)bd.bi_cmdline; loader_info.cmdline_len = strlen((char *)bd.bi_cmdline); }
linux-master
arch/powerpc/boot/redboot-83xx.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (C) 1996-2005 Paul Mackerras. */ #include <linux/string.h> #include <asm/udbg.h> #include <asm/time.h> #include "nonstdio.h" static bool paginating, paginate_skipping; static unsigned long paginate_lpp; /* Lines Per Page */ static unsigned long paginate_pos; void xmon_start_pagination(void) { paginating = true; paginate_skipping = false; paginate_pos = 0; } void xmon_end_pagination(void) { paginating = false; } void xmon_set_pagination_lpp(unsigned long lpp) { paginate_lpp = lpp; } static int xmon_readchar(void) { if (udbg_getc) return udbg_getc(); return -1; } static int xmon_write(const char *ptr, int nb) { int rv = 0; const char *p = ptr, *q; const char msg[] = "[Hit a key (a:all, q:truncate, any:next page)]"; if (nb <= 0) return rv; if (paginating && paginate_skipping) return nb; if (paginate_lpp) { while (paginating && (q = strchr(p, '\n'))) { rv += udbg_write(p, q - p + 1); p = q + 1; paginate_pos++; if (paginate_pos >= paginate_lpp) { udbg_write(msg, strlen(msg)); switch (xmon_readchar()) { case 'a': paginating = false; break; case 'q': paginate_skipping = true; break; default: /* nothing */ break; } paginate_pos = 0; udbg_write("\r\n", 2); if (paginate_skipping) return nb; } } } return rv + udbg_write(p, nb - (p - ptr)); } int xmon_putchar(int c) { char ch = c; if (c == '\n') xmon_putchar('\r'); return xmon_write(&ch, 1) == 1? c: -1; } static char line[256]; static char *lineptr; static int lineleft; static int xmon_getchar(void) { int c; if (lineleft == 0) { lineptr = line; for (;;) { c = xmon_readchar(); if (c == -1 || c == 4) break; if (c == '\r' || c == '\n') { *lineptr++ = '\n'; xmon_putchar('\n'); break; } switch (c) { case 0177: case '\b': if (lineptr > line) { xmon_putchar('\b'); xmon_putchar(' '); xmon_putchar('\b'); --lineptr; } break; case 'U' & 0x1F: while (lineptr > line) { xmon_putchar('\b'); xmon_putchar(' '); xmon_putchar('\b'); --lineptr; } break; default: if (lineptr >= &line[sizeof(line) - 1]) xmon_putchar('\a'); else { xmon_putchar(c); *lineptr++ = c; } } } lineleft = lineptr - line; lineptr = line; } if (lineleft == 0) return -1; --lineleft; return *lineptr++; } char *xmon_gets(char *str, int nb) { char *p; int c; for (p = str; p < str + nb - 1; ) { c = xmon_getchar(); if (c == -1) { if (p == str) return NULL; break; } *p++ = c; if (c == '\n') break; } *p = 0; return str; } void xmon_printf(const char *format, ...) { va_list args; static char xmon_outbuf[1024]; int rc, n; va_start(args, format); n = vsnprintf(xmon_outbuf, sizeof(xmon_outbuf), format, args); va_end(args); rc = xmon_write(xmon_outbuf, n); if (n && rc == 0) { /* No udbg hooks, fallback to printk() - dangerous */ pr_cont("%s", xmon_outbuf); } } void xmon_puts(const char *str) { xmon_write(str, strlen(str)); }
linux-master
arch/powerpc/xmon/nonstdio.c
// SPDX-License-Identifier: GPL-2.0-or-later /* ppc-dis.c -- Disassemble PowerPC instructions Copyright (C) 1994-2016 Free Software Foundation, Inc. Written by Ian Lance Taylor, Cygnus Support This file is part of GDB, GAS, and the GNU binutils. */ #include <asm/cputable.h> #include <asm/cpu_has_feature.h> #include "nonstdio.h" #include "ansidecl.h" #include "ppc.h" #include "dis-asm.h" /* This file provides several disassembler functions, all of which use the disassembler interface defined in dis-asm.h. Several functions are provided because this file handles disassembly for the PowerPC in both big and little endian mode and also for the POWER (RS/6000) chip. */ /* Extract the operand value from the PowerPC or POWER instruction. */ static long operand_value_powerpc (const struct powerpc_operand *operand, unsigned long insn, ppc_cpu_t dialect) { long value; int invalid; /* Extract the value from the instruction. */ if (operand->extract) value = (*operand->extract) (insn, dialect, &invalid); else { if (operand->shift >= 0) value = (insn >> operand->shift) & operand->bitm; else value = (insn << -operand->shift) & operand->bitm; if ((operand->flags & PPC_OPERAND_SIGNED) != 0) { /* BITM is always some number of zeros followed by some number of ones, followed by some number of zeros. */ unsigned long top = operand->bitm; /* top & -top gives the rightmost 1 bit, so this fills in any trailing zeros. */ top |= (top & -top) - 1; top &= ~(top >> 1); value = (value ^ top) - top; } } return value; } /* Determine whether the optional operand(s) should be printed. */ static int skip_optional_operands (const unsigned char *opindex, unsigned long insn, ppc_cpu_t dialect) { const struct powerpc_operand *operand; for (; *opindex != 0; opindex++) { operand = &powerpc_operands[*opindex]; if ((operand->flags & PPC_OPERAND_NEXT) != 0 || ((operand->flags & PPC_OPERAND_OPTIONAL) != 0 && operand_value_powerpc (operand, insn, dialect) != ppc_optional_operand_value (operand))) return 0; } return 1; } /* Find a match for INSN in the opcode table, given machine DIALECT. A DIALECT of -1 is special, matching all machine opcode variations. */ static const struct powerpc_opcode * lookup_powerpc (unsigned long insn, ppc_cpu_t dialect) { const struct powerpc_opcode *opcode; const struct powerpc_opcode *opcode_end; opcode_end = powerpc_opcodes + powerpc_num_opcodes; /* Find the first match in the opcode table for this major opcode. */ for (opcode = powerpc_opcodes; opcode < opcode_end; ++opcode) { const unsigned char *opindex; const struct powerpc_operand *operand; int invalid; if ((insn & opcode->mask) != opcode->opcode || (dialect != (ppc_cpu_t) -1 && ((opcode->flags & dialect) == 0 || (opcode->deprecated & dialect) != 0))) continue; /* Check validity of operands. */ invalid = 0; for (opindex = opcode->operands; *opindex != 0; opindex++) { operand = powerpc_operands + *opindex; if (operand->extract) (*operand->extract) (insn, dialect, &invalid); } if (invalid) continue; return opcode; } return NULL; } /* Print a PowerPC or POWER instruction. */ int print_insn_powerpc (unsigned long insn, unsigned long memaddr) { const struct powerpc_opcode *opcode; bool insn_is_short; ppc_cpu_t dialect; dialect = PPC_OPCODE_PPC | PPC_OPCODE_COMMON | PPC_OPCODE_64 | PPC_OPCODE_POWER4 | PPC_OPCODE_ALTIVEC; if (cpu_has_feature(CPU_FTRS_POWER5)) dialect |= PPC_OPCODE_POWER5; if (cpu_has_feature(CPU_FTRS_CELL)) dialect |= (PPC_OPCODE_CELL | PPC_OPCODE_ALTIVEC); if (cpu_has_feature(CPU_FTRS_POWER6)) dialect |= (PPC_OPCODE_POWER5 | PPC_OPCODE_POWER6 | PPC_OPCODE_ALTIVEC); if (cpu_has_feature(CPU_FTRS_POWER7)) dialect |= (PPC_OPCODE_POWER5 | PPC_OPCODE_POWER6 | PPC_OPCODE_POWER7 | PPC_OPCODE_ALTIVEC | PPC_OPCODE_VSX); if (cpu_has_feature(CPU_FTRS_POWER8)) dialect |= (PPC_OPCODE_POWER5 | PPC_OPCODE_POWER6 | PPC_OPCODE_POWER7 | PPC_OPCODE_POWER8 | PPC_OPCODE_HTM | PPC_OPCODE_ALTIVEC | PPC_OPCODE_ALTIVEC2 | PPC_OPCODE_VSX); if (cpu_has_feature(CPU_FTRS_POWER9)) dialect |= (PPC_OPCODE_POWER5 | PPC_OPCODE_POWER6 | PPC_OPCODE_POWER7 | PPC_OPCODE_POWER8 | PPC_OPCODE_POWER9 | PPC_OPCODE_HTM | PPC_OPCODE_ALTIVEC | PPC_OPCODE_ALTIVEC2 | PPC_OPCODE_VSX | PPC_OPCODE_VSX3); /* Get the major opcode of the insn. */ opcode = NULL; insn_is_short = false; if (opcode == NULL) opcode = lookup_powerpc (insn, dialect); if (opcode == NULL && (dialect & PPC_OPCODE_ANY) != 0) opcode = lookup_powerpc (insn, (ppc_cpu_t) -1); if (opcode != NULL) { const unsigned char *opindex; const struct powerpc_operand *operand; int need_comma; int need_paren; int skip_optional; if (opcode->operands[0] != 0) printf("%-7s ", opcode->name); else printf("%s", opcode->name); if (insn_is_short) /* The operands will be fetched out of the 16-bit instruction. */ insn >>= 16; /* Now extract and print the operands. */ need_comma = 0; need_paren = 0; skip_optional = -1; for (opindex = opcode->operands; *opindex != 0; opindex++) { long value; operand = powerpc_operands + *opindex; /* Operands that are marked FAKE are simply ignored. We already made sure that the extract function considered the instruction to be valid. */ if ((operand->flags & PPC_OPERAND_FAKE) != 0) continue; /* If all of the optional operands have the value zero, then don't print any of them. */ if ((operand->flags & PPC_OPERAND_OPTIONAL) != 0) { if (skip_optional < 0) skip_optional = skip_optional_operands (opindex, insn, dialect); if (skip_optional) continue; } value = operand_value_powerpc (operand, insn, dialect); if (need_comma) { printf(","); need_comma = 0; } /* Print the operand as directed by the flags. */ if ((operand->flags & PPC_OPERAND_GPR) != 0 || ((operand->flags & PPC_OPERAND_GPR_0) != 0 && value != 0)) printf("r%ld", value); else if ((operand->flags & PPC_OPERAND_FPR) != 0) printf("f%ld", value); else if ((operand->flags & PPC_OPERAND_VR) != 0) printf("v%ld", value); else if ((operand->flags & PPC_OPERAND_VSR) != 0) printf("vs%ld", value); else if ((operand->flags & PPC_OPERAND_RELATIVE) != 0) print_address(memaddr + value); else if ((operand->flags & PPC_OPERAND_ABSOLUTE) != 0) print_address(value & 0xffffffff); else if ((operand->flags & PPC_OPERAND_FSL) != 0) printf("fsl%ld", value); else if ((operand->flags & PPC_OPERAND_FCR) != 0) printf("fcr%ld", value); else if ((operand->flags & PPC_OPERAND_UDI) != 0) printf("%ld", value); else if ((operand->flags & PPC_OPERAND_CR_REG) != 0 && (((dialect & PPC_OPCODE_PPC) != 0) || ((dialect & PPC_OPCODE_VLE) != 0))) printf("cr%ld", value); else if (((operand->flags & PPC_OPERAND_CR_BIT) != 0) && (((dialect & PPC_OPCODE_PPC) != 0) || ((dialect & PPC_OPCODE_VLE) != 0))) { static const char *cbnames[4] = { "lt", "gt", "eq", "so" }; int cr; int cc; cr = value >> 2; if (cr != 0) printf("4*cr%d+", cr); cc = value & 3; printf("%s", cbnames[cc]); } else printf("%d", (int) value); if (need_paren) { printf(")"); need_paren = 0; } if ((operand->flags & PPC_OPERAND_PARENS) == 0) need_comma = 1; else { printf("("); need_paren = 1; } } /* We have found and printed an instruction. If it was a short VLE instruction we have more to do. */ if (insn_is_short) { memaddr += 2; return 2; } else /* Otherwise, return. */ return 4; } /* We could not find a match. */ printf(".long 0x%lx", insn); return 4; }
linux-master
arch/powerpc/xmon/ppc-dis.c
// SPDX-License-Identifier: GPL-2.0-or-later /* ppc-opc.c -- PowerPC opcode list Copyright (C) 1994-2016 Free Software Foundation, Inc. Written by Ian Lance Taylor, Cygnus Support This file is part of GDB, GAS, and the GNU binutils. */ #include <linux/stddef.h> #include <linux/kernel.h> #include <linux/bug.h> #include "nonstdio.h" #include "ppc.h" #define ATTRIBUTE_UNUSED #define _(x) x /* This file holds the PowerPC opcode table. The opcode table includes almost all of the extended instruction mnemonics. This permits the disassembler to use them, and simplifies the assembler logic, at the cost of increasing the table size. The table is strictly constant data, so the compiler should be able to put it in the .text section. This file also holds the operand table. All knowledge about inserting operands into instructions and vice-versa is kept in this file. */ /* Local insertion and extraction functions. */ static unsigned long insert_arx (unsigned long, long, ppc_cpu_t, const char **); static long extract_arx (unsigned long, ppc_cpu_t, int *); static unsigned long insert_ary (unsigned long, long, ppc_cpu_t, const char **); static long extract_ary (unsigned long, ppc_cpu_t, int *); static unsigned long insert_bat (unsigned long, long, ppc_cpu_t, const char **); static long extract_bat (unsigned long, ppc_cpu_t, int *); static unsigned long insert_bba (unsigned long, long, ppc_cpu_t, const char **); static long extract_bba (unsigned long, ppc_cpu_t, int *); static unsigned long insert_bdm (unsigned long, long, ppc_cpu_t, const char **); static long extract_bdm (unsigned long, ppc_cpu_t, int *); static unsigned long insert_bdp (unsigned long, long, ppc_cpu_t, const char **); static long extract_bdp (unsigned long, ppc_cpu_t, int *); static unsigned long insert_bo (unsigned long, long, ppc_cpu_t, const char **); static long extract_bo (unsigned long, ppc_cpu_t, int *); static unsigned long insert_boe (unsigned long, long, ppc_cpu_t, const char **); static long extract_boe (unsigned long, ppc_cpu_t, int *); static unsigned long insert_esync (unsigned long, long, ppc_cpu_t, const char **); static unsigned long insert_dcmxs (unsigned long, long, ppc_cpu_t, const char **); static long extract_dcmxs (unsigned long, ppc_cpu_t, int *); static unsigned long insert_dxd (unsigned long, long, ppc_cpu_t, const char **); static long extract_dxd (unsigned long, ppc_cpu_t, int *); static unsigned long insert_dxdn (unsigned long, long, ppc_cpu_t, const char **); static long extract_dxdn (unsigned long, ppc_cpu_t, int *); static unsigned long insert_fxm (unsigned long, long, ppc_cpu_t, const char **); static long extract_fxm (unsigned long, ppc_cpu_t, int *); static unsigned long insert_li20 (unsigned long, long, ppc_cpu_t, const char **); static long extract_li20 (unsigned long, ppc_cpu_t, int *); static unsigned long insert_ls (unsigned long, long, ppc_cpu_t, const char **); static unsigned long insert_mbe (unsigned long, long, ppc_cpu_t, const char **); static long extract_mbe (unsigned long, ppc_cpu_t, int *); static unsigned long insert_mb6 (unsigned long, long, ppc_cpu_t, const char **); static long extract_mb6 (unsigned long, ppc_cpu_t, int *); static long extract_nb (unsigned long, ppc_cpu_t, int *); static unsigned long insert_nbi (unsigned long, long, ppc_cpu_t, const char **); static unsigned long insert_nsi (unsigned long, long, ppc_cpu_t, const char **); static long extract_nsi (unsigned long, ppc_cpu_t, int *); static unsigned long insert_oimm (unsigned long, long, ppc_cpu_t, const char **); static long extract_oimm (unsigned long, ppc_cpu_t, int *); static unsigned long insert_ral (unsigned long, long, ppc_cpu_t, const char **); static unsigned long insert_ram (unsigned long, long, ppc_cpu_t, const char **); static unsigned long insert_raq (unsigned long, long, ppc_cpu_t, const char **); static unsigned long insert_ras (unsigned long, long, ppc_cpu_t, const char **); static unsigned long insert_rbs (unsigned long, long, ppc_cpu_t, const char **); static long extract_rbs (unsigned long, ppc_cpu_t, int *); static unsigned long insert_rbx (unsigned long, long, ppc_cpu_t, const char **); static unsigned long insert_rx (unsigned long, long, ppc_cpu_t, const char **); static long extract_rx (unsigned long, ppc_cpu_t, int *); static unsigned long insert_ry (unsigned long, long, ppc_cpu_t, const char **); static long extract_ry (unsigned long, ppc_cpu_t, int *); static unsigned long insert_sh6 (unsigned long, long, ppc_cpu_t, const char **); static long extract_sh6 (unsigned long, ppc_cpu_t, int *); static unsigned long insert_sci8 (unsigned long, long, ppc_cpu_t, const char **); static long extract_sci8 (unsigned long, ppc_cpu_t, int *); static unsigned long insert_sci8n (unsigned long, long, ppc_cpu_t, const char **); static long extract_sci8n (unsigned long, ppc_cpu_t, int *); static unsigned long insert_sd4h (unsigned long, long, ppc_cpu_t, const char **); static long extract_sd4h (unsigned long, ppc_cpu_t, int *); static unsigned long insert_sd4w (unsigned long, long, ppc_cpu_t, const char **); static long extract_sd4w (unsigned long, ppc_cpu_t, int *); static unsigned long insert_spr (unsigned long, long, ppc_cpu_t, const char **); static long extract_spr (unsigned long, ppc_cpu_t, int *); static unsigned long insert_sprg (unsigned long, long, ppc_cpu_t, const char **); static long extract_sprg (unsigned long, ppc_cpu_t, int *); static unsigned long insert_tbr (unsigned long, long, ppc_cpu_t, const char **); static long extract_tbr (unsigned long, ppc_cpu_t, int *); static unsigned long insert_xt6 (unsigned long, long, ppc_cpu_t, const char **); static long extract_xt6 (unsigned long, ppc_cpu_t, int *); static unsigned long insert_xtq6 (unsigned long, long, ppc_cpu_t, const char **); static long extract_xtq6 (unsigned long, ppc_cpu_t, int *); static unsigned long insert_xa6 (unsigned long, long, ppc_cpu_t, const char **); static long extract_xa6 (unsigned long, ppc_cpu_t, int *); static unsigned long insert_xb6 (unsigned long, long, ppc_cpu_t, const char **); static long extract_xb6 (unsigned long, ppc_cpu_t, int *); static unsigned long insert_xb6s (unsigned long, long, ppc_cpu_t, const char **); static long extract_xb6s (unsigned long, ppc_cpu_t, int *); static unsigned long insert_xc6 (unsigned long, long, ppc_cpu_t, const char **); static long extract_xc6 (unsigned long, ppc_cpu_t, int *); static unsigned long insert_dm (unsigned long, long, ppc_cpu_t, const char **); static long extract_dm (unsigned long, ppc_cpu_t, int *); static unsigned long insert_vlesi (unsigned long, long, ppc_cpu_t, const char **); static long extract_vlesi (unsigned long, ppc_cpu_t, int *); static unsigned long insert_vlensi (unsigned long, long, ppc_cpu_t, const char **); static long extract_vlensi (unsigned long, ppc_cpu_t, int *); static unsigned long insert_vleui (unsigned long, long, ppc_cpu_t, const char **); static long extract_vleui (unsigned long, ppc_cpu_t, int *); static unsigned long insert_vleil (unsigned long, long, ppc_cpu_t, const char **); static long extract_vleil (unsigned long, ppc_cpu_t, int *); /* The operands table. The fields are bitm, shift, insert, extract, flags. We used to put parens around the various additions, like the one for BA just below. However, that caused trouble with feeble compilers with a limit on depth of a parenthesized expression, like (reportedly) the compiler in Microsoft Developer Studio 5. So we omit the parens, since the macros are never used in a context where the addition will be ambiguous. */ const struct powerpc_operand powerpc_operands[] = { /* The zero index is used to indicate the end of the list of operands. */ #define UNUSED 0 { 0, 0, NULL, NULL, 0 }, /* The BA field in an XL form instruction. */ #define BA UNUSED + 1 /* The BI field in a B form or XL form instruction. */ #define BI BA #define BI_MASK (0x1f << 16) { 0x1f, 16, NULL, NULL, PPC_OPERAND_CR_BIT }, /* The BA field in an XL form instruction when it must be the same as the BT field in the same instruction. */ #define BAT BA + 1 { 0x1f, 16, insert_bat, extract_bat, PPC_OPERAND_FAKE }, /* The BB field in an XL form instruction. */ #define BB BAT + 1 #define BB_MASK (0x1f << 11) { 0x1f, 11, NULL, NULL, PPC_OPERAND_CR_BIT }, /* The BB field in an XL form instruction when it must be the same as the BA field in the same instruction. */ #define BBA BB + 1 /* The VB field in a VX form instruction when it must be the same as the VA field in the same instruction. */ #define VBA BBA { 0x1f, 11, insert_bba, extract_bba, PPC_OPERAND_FAKE }, /* The BD field in a B form instruction. The lower two bits are forced to zero. */ #define BD BBA + 1 { 0xfffc, 0, NULL, NULL, PPC_OPERAND_RELATIVE | PPC_OPERAND_SIGNED }, /* The BD field in a B form instruction when absolute addressing is used. */ #define BDA BD + 1 { 0xfffc, 0, NULL, NULL, PPC_OPERAND_ABSOLUTE | PPC_OPERAND_SIGNED }, /* The BD field in a B form instruction when the - modifier is used. This sets the y bit of the BO field appropriately. */ #define BDM BDA + 1 { 0xfffc, 0, insert_bdm, extract_bdm, PPC_OPERAND_RELATIVE | PPC_OPERAND_SIGNED }, /* The BD field in a B form instruction when the - modifier is used and absolute address is used. */ #define BDMA BDM + 1 { 0xfffc, 0, insert_bdm, extract_bdm, PPC_OPERAND_ABSOLUTE | PPC_OPERAND_SIGNED }, /* The BD field in a B form instruction when the + modifier is used. This sets the y bit of the BO field appropriately. */ #define BDP BDMA + 1 { 0xfffc, 0, insert_bdp, extract_bdp, PPC_OPERAND_RELATIVE | PPC_OPERAND_SIGNED }, /* The BD field in a B form instruction when the + modifier is used and absolute addressing is used. */ #define BDPA BDP + 1 { 0xfffc, 0, insert_bdp, extract_bdp, PPC_OPERAND_ABSOLUTE | PPC_OPERAND_SIGNED }, /* The BF field in an X or XL form instruction. */ #define BF BDPA + 1 /* The CRFD field in an X form instruction. */ #define CRFD BF /* The CRD field in an XL form instruction. */ #define CRD BF { 0x7, 23, NULL, NULL, PPC_OPERAND_CR_REG }, /* The BF field in an X or XL form instruction. */ #define BFF BF + 1 { 0x7, 23, NULL, NULL, 0 }, /* An optional BF field. This is used for comparison instructions, in which an omitted BF field is taken as zero. */ #define OBF BFF + 1 { 0x7, 23, NULL, NULL, PPC_OPERAND_CR_REG | PPC_OPERAND_OPTIONAL }, /* The BFA field in an X or XL form instruction. */ #define BFA OBF + 1 { 0x7, 18, NULL, NULL, PPC_OPERAND_CR_REG }, /* The BO field in a B form instruction. Certain values are illegal. */ #define BO BFA + 1 #define BO_MASK (0x1f << 21) { 0x1f, 21, insert_bo, extract_bo, 0 }, /* The BO field in a B form instruction when the + or - modifier is used. This is like the BO field, but it must be even. */ #define BOE BO + 1 { 0x1e, 21, insert_boe, extract_boe, 0 }, /* The RM field in an X form instruction. */ #define RM BOE + 1 { 0x3, 11, NULL, NULL, 0 }, #define BH RM + 1 { 0x3, 11, NULL, NULL, PPC_OPERAND_OPTIONAL }, /* The BT field in an X or XL form instruction. */ #define BT BH + 1 { 0x1f, 21, NULL, NULL, PPC_OPERAND_CR_BIT }, /* The BI16 field in a BD8 form instruction. */ #define BI16 BT + 1 { 0x3, 8, NULL, NULL, PPC_OPERAND_CR_BIT }, /* The BI32 field in a BD15 form instruction. */ #define BI32 BI16 + 1 { 0xf, 16, NULL, NULL, PPC_OPERAND_CR_BIT }, /* The BO32 field in a BD15 form instruction. */ #define BO32 BI32 + 1 { 0x3, 20, NULL, NULL, 0 }, /* The B8 field in a BD8 form instruction. */ #define B8 BO32 + 1 { 0x1fe, -1, NULL, NULL, PPC_OPERAND_RELATIVE | PPC_OPERAND_SIGNED }, /* The B15 field in a BD15 form instruction. The lowest bit is forced to zero. */ #define B15 B8 + 1 { 0xfffe, 0, NULL, NULL, PPC_OPERAND_RELATIVE | PPC_OPERAND_SIGNED }, /* The B24 field in a BD24 form instruction. The lowest bit is forced to zero. */ #define B24 B15 + 1 { 0x1fffffe, 0, NULL, NULL, PPC_OPERAND_RELATIVE | PPC_OPERAND_SIGNED }, /* The condition register number portion of the BI field in a B form or XL form instruction. This is used for the extended conditional branch mnemonics, which set the lower two bits of the BI field. This field is optional. */ #define CR B24 + 1 { 0x7, 18, NULL, NULL, PPC_OPERAND_CR_REG | PPC_OPERAND_OPTIONAL }, /* The CRB field in an X form instruction. */ #define CRB CR + 1 /* The MB field in an M form instruction. */ #define MB CRB #define MB_MASK (0x1f << 6) { 0x1f, 6, NULL, NULL, 0 }, /* The CRD32 field in an XL form instruction. */ #define CRD32 CRB + 1 { 0x3, 21, NULL, NULL, PPC_OPERAND_CR_REG }, /* The CRFS field in an X form instruction. */ #define CRFS CRD32 + 1 { 0x7, 0, NULL, NULL, PPC_OPERAND_CR_REG }, #define CRS CRFS + 1 { 0x3, 18, NULL, NULL, PPC_OPERAND_CR_REG | PPC_OPERAND_OPTIONAL }, /* The CT field in an X form instruction. */ #define CT CRS + 1 /* The MO field in an mbar instruction. */ #define MO CT { 0x1f, 21, NULL, NULL, PPC_OPERAND_OPTIONAL }, /* The D field in a D form instruction. This is a displacement off a register, and implies that the next operand is a register in parentheses. */ #define D CT + 1 { 0xffff, 0, NULL, NULL, PPC_OPERAND_PARENS | PPC_OPERAND_SIGNED }, /* The D8 field in a D form instruction. This is a displacement off a register, and implies that the next operand is a register in parentheses. */ #define D8 D + 1 { 0xff, 0, NULL, NULL, PPC_OPERAND_PARENS | PPC_OPERAND_SIGNED }, /* The DCMX field in an X form instruction. */ #define DCMX D8 + 1 { 0x7f, 16, NULL, NULL, 0 }, /* The split DCMX field in an X form instruction. */ #define DCMXS DCMX + 1 { 0x7f, PPC_OPSHIFT_INV, insert_dcmxs, extract_dcmxs, 0 }, /* The DQ field in a DQ form instruction. This is like D, but the lower four bits are forced to zero. */ #define DQ DCMXS + 1 { 0xfff0, 0, NULL, NULL, PPC_OPERAND_PARENS | PPC_OPERAND_SIGNED | PPC_OPERAND_DQ }, /* The DS field in a DS form instruction. This is like D, but the lower two bits are forced to zero. */ #define DS DQ + 1 { 0xfffc, 0, NULL, NULL, PPC_OPERAND_PARENS | PPC_OPERAND_SIGNED | PPC_OPERAND_DS }, /* The DUIS or BHRBE fields in a XFX form instruction, 10 bits unsigned imediate */ #define DUIS DS + 1 #define BHRBE DUIS { 0x3ff, 11, NULL, NULL, 0 }, /* The split D field in a DX form instruction. */ #define DXD DUIS + 1 { 0xffff, PPC_OPSHIFT_INV, insert_dxd, extract_dxd, PPC_OPERAND_SIGNED | PPC_OPERAND_SIGNOPT}, /* The split ND field in a DX form instruction. This is the same as the DX field, only negated. */ #define NDXD DXD + 1 { 0xffff, PPC_OPSHIFT_INV, insert_dxdn, extract_dxdn, PPC_OPERAND_NEGATIVE | PPC_OPERAND_SIGNED | PPC_OPERAND_SIGNOPT}, /* The E field in a wrteei instruction. */ /* And the W bit in the pair singles instructions. */ /* And the ST field in a VX form instruction. */ #define E NDXD + 1 #define PSW E #define ST E { 0x1, 15, NULL, NULL, 0 }, /* The FL1 field in a POWER SC form instruction. */ #define FL1 E + 1 /* The U field in an X form instruction. */ #define U FL1 { 0xf, 12, NULL, NULL, 0 }, /* The FL2 field in a POWER SC form instruction. */ #define FL2 FL1 + 1 { 0x7, 2, NULL, NULL, 0 }, /* The FLM field in an XFL form instruction. */ #define FLM FL2 + 1 { 0xff, 17, NULL, NULL, 0 }, /* The FRA field in an X or A form instruction. */ #define FRA FLM + 1 #define FRA_MASK (0x1f << 16) { 0x1f, 16, NULL, NULL, PPC_OPERAND_FPR }, /* The FRAp field of DFP instructions. */ #define FRAp FRA + 1 { 0x1e, 16, NULL, NULL, PPC_OPERAND_FPR }, /* The FRB field in an X or A form instruction. */ #define FRB FRAp + 1 #define FRB_MASK (0x1f << 11) { 0x1f, 11, NULL, NULL, PPC_OPERAND_FPR }, /* The FRBp field of DFP instructions. */ #define FRBp FRB + 1 { 0x1e, 11, NULL, NULL, PPC_OPERAND_FPR }, /* The FRC field in an A form instruction. */ #define FRC FRBp + 1 #define FRC_MASK (0x1f << 6) { 0x1f, 6, NULL, NULL, PPC_OPERAND_FPR }, /* The FRS field in an X form instruction or the FRT field in a D, X or A form instruction. */ #define FRS FRC + 1 #define FRT FRS { 0x1f, 21, NULL, NULL, PPC_OPERAND_FPR }, /* The FRSp field of stfdp or the FRTp field of lfdp and DFP instructions. */ #define FRSp FRS + 1 #define FRTp FRSp { 0x1e, 21, NULL, NULL, PPC_OPERAND_FPR }, /* The FXM field in an XFX instruction. */ #define FXM FRSp + 1 { 0xff, 12, insert_fxm, extract_fxm, 0 }, /* Power4 version for mfcr. */ #define FXM4 FXM + 1 { 0xff, 12, insert_fxm, extract_fxm, PPC_OPERAND_OPTIONAL | PPC_OPERAND_OPTIONAL_VALUE}, /* If the FXM4 operand is omitted, use the sentinel value -1. */ { -1, -1, NULL, NULL, 0}, /* The IMM20 field in an LI instruction. */ #define IMM20 FXM4 + 2 { 0xfffff, PPC_OPSHIFT_INV, insert_li20, extract_li20, PPC_OPERAND_SIGNED}, /* The L field in a D or X form instruction. */ #define L IMM20 + 1 { 0x1, 21, NULL, NULL, 0 }, /* The optional L field in tlbie and tlbiel instructions. */ #define LOPT L + 1 /* The R field in a HTM X form instruction. */ #define HTM_R LOPT { 0x1, 21, NULL, NULL, PPC_OPERAND_OPTIONAL }, /* The optional (for 32-bit) L field in cmp[l][i] instructions. */ #define L32OPT LOPT + 1 { 0x1, 21, NULL, NULL, PPC_OPERAND_OPTIONAL | PPC_OPERAND_OPTIONAL32 }, /* The L field in dcbf instruction. */ #define L2OPT L32OPT + 1 { 0x3, 21, NULL, NULL, PPC_OPERAND_OPTIONAL }, /* The LEV field in a POWER SVC form instruction. */ #define SVC_LEV L2OPT + 1 { 0x7f, 5, NULL, NULL, 0 }, /* The LEV field in an SC form instruction. */ #define LEV SVC_LEV + 1 { 0x7f, 5, NULL, NULL, PPC_OPERAND_OPTIONAL }, /* The LI field in an I form instruction. The lower two bits are forced to zero. */ #define LI LEV + 1 { 0x3fffffc, 0, NULL, NULL, PPC_OPERAND_RELATIVE | PPC_OPERAND_SIGNED }, /* The LI field in an I form instruction when used as an absolute address. */ #define LIA LI + 1 { 0x3fffffc, 0, NULL, NULL, PPC_OPERAND_ABSOLUTE | PPC_OPERAND_SIGNED }, /* The LS or WC field in an X (sync or wait) form instruction. */ #define LS LIA + 1 #define WC LS { 0x3, 21, insert_ls, NULL, PPC_OPERAND_OPTIONAL }, /* The ME field in an M form instruction. */ #define ME LS + 1 #define ME_MASK (0x1f << 1) { 0x1f, 1, NULL, NULL, 0 }, /* The MB and ME fields in an M form instruction expressed a single operand which is a bitmask indicating which bits to select. This is a two operand form using PPC_OPERAND_NEXT. See the description in opcode/ppc.h for what this means. */ #define MBE ME + 1 { 0x1f, 6, NULL, NULL, PPC_OPERAND_OPTIONAL | PPC_OPERAND_NEXT }, { -1, 0, insert_mbe, extract_mbe, 0 }, /* The MB or ME field in an MD or MDS form instruction. The high bit is wrapped to the low end. */ #define MB6 MBE + 2 #define ME6 MB6 #define MB6_MASK (0x3f << 5) { 0x3f, 5, insert_mb6, extract_mb6, 0 }, /* The NB field in an X form instruction. The value 32 is stored as 0. */ #define NB MB6 + 1 { 0x1f, 11, NULL, extract_nb, PPC_OPERAND_PLUS1 }, /* The NBI field in an lswi instruction, which has special value restrictions. The value 32 is stored as 0. */ #define NBI NB + 1 { 0x1f, 11, insert_nbi, extract_nb, PPC_OPERAND_PLUS1 }, /* The NSI field in a D form instruction. This is the same as the SI field, only negated. */ #define NSI NBI + 1 { 0xffff, 0, insert_nsi, extract_nsi, PPC_OPERAND_NEGATIVE | PPC_OPERAND_SIGNED }, /* The NSI field in a D form instruction when we accept a wide range of positive values. */ #define NSISIGNOPT NSI + 1 { 0xffff, 0, insert_nsi, extract_nsi, PPC_OPERAND_NEGATIVE | PPC_OPERAND_SIGNED | PPC_OPERAND_SIGNOPT }, /* The RA field in an D, DS, DQ, X, XO, M, or MDS form instruction. */ #define RA NSISIGNOPT + 1 #define RA_MASK (0x1f << 16) { 0x1f, 16, NULL, NULL, PPC_OPERAND_GPR }, /* As above, but 0 in the RA field means zero, not r0. */ #define RA0 RA + 1 { 0x1f, 16, NULL, NULL, PPC_OPERAND_GPR_0 }, /* The RA field in the DQ form lq or an lswx instruction, which have special value restrictions. */ #define RAQ RA0 + 1 #define RAX RAQ { 0x1f, 16, insert_raq, NULL, PPC_OPERAND_GPR_0 }, /* The RA field in a D or X form instruction which is an updating load, which means that the RA field may not be zero and may not equal the RT field. */ #define RAL RAQ + 1 { 0x1f, 16, insert_ral, NULL, PPC_OPERAND_GPR_0 }, /* The RA field in an lmw instruction, which has special value restrictions. */ #define RAM RAL + 1 { 0x1f, 16, insert_ram, NULL, PPC_OPERAND_GPR_0 }, /* The RA field in a D or X form instruction which is an updating store or an updating floating point load, which means that the RA field may not be zero. */ #define RAS RAM + 1 { 0x1f, 16, insert_ras, NULL, PPC_OPERAND_GPR_0 }, /* The RA field of the tlbwe, dccci and iccci instructions, which are optional. */ #define RAOPT RAS + 1 { 0x1f, 16, NULL, NULL, PPC_OPERAND_GPR | PPC_OPERAND_OPTIONAL }, /* The RB field in an X, XO, M, or MDS form instruction. */ #define RB RAOPT + 1 #define RB_MASK (0x1f << 11) { 0x1f, 11, NULL, NULL, PPC_OPERAND_GPR }, /* The RB field in an X form instruction when it must be the same as the RS field in the instruction. This is used for extended mnemonics like mr. */ #define RBS RB + 1 { 0x1f, 11, insert_rbs, extract_rbs, PPC_OPERAND_FAKE }, /* The RB field in an lswx instruction, which has special value restrictions. */ #define RBX RBS + 1 { 0x1f, 11, insert_rbx, NULL, PPC_OPERAND_GPR }, /* The RB field of the dccci and iccci instructions, which are optional. */ #define RBOPT RBX + 1 { 0x1f, 11, NULL, NULL, PPC_OPERAND_GPR | PPC_OPERAND_OPTIONAL }, /* The RC register field in an maddld, maddhd or maddhdu instruction. */ #define RC RBOPT + 1 { 0x1f, 6, NULL, NULL, PPC_OPERAND_GPR }, /* The RS field in a D, DS, X, XFX, XS, M, MD or MDS form instruction or the RT field in a D, DS, X, XFX or XO form instruction. */ #define RS RC + 1 #define RT RS #define RT_MASK (0x1f << 21) #define RD RS { 0x1f, 21, NULL, NULL, PPC_OPERAND_GPR }, /* The RS and RT fields of the DS form stq and DQ form lq instructions, which have special value restrictions. */ #define RSQ RS + 1 #define RTQ RSQ { 0x1e, 21, NULL, NULL, PPC_OPERAND_GPR }, /* The RS field of the tlbwe instruction, which is optional. */ #define RSO RSQ + 1 #define RTO RSO { 0x1f, 21, NULL, NULL, PPC_OPERAND_GPR | PPC_OPERAND_OPTIONAL }, /* The RX field of the SE_RR form instruction. */ #define RX RSO + 1 { 0x1f, PPC_OPSHIFT_INV, insert_rx, extract_rx, PPC_OPERAND_GPR }, /* The ARX field of the SE_RR form instruction. */ #define ARX RX + 1 { 0x1f, PPC_OPSHIFT_INV, insert_arx, extract_arx, PPC_OPERAND_GPR }, /* The RY field of the SE_RR form instruction. */ #define RY ARX + 1 #define RZ RY { 0x1f, PPC_OPSHIFT_INV, insert_ry, extract_ry, PPC_OPERAND_GPR }, /* The ARY field of the SE_RR form instruction. */ #define ARY RY + 1 { 0x1f, PPC_OPSHIFT_INV, insert_ary, extract_ary, PPC_OPERAND_GPR }, /* The SCLSCI8 field in a D form instruction. */ #define SCLSCI8 ARY + 1 { 0xffffffff, PPC_OPSHIFT_INV, insert_sci8, extract_sci8, 0 }, /* The SCLSCI8N field in a D form instruction. This is the same as the SCLSCI8 field, only negated. */ #define SCLSCI8N SCLSCI8 + 1 { 0xffffffff, PPC_OPSHIFT_INV, insert_sci8n, extract_sci8n, PPC_OPERAND_NEGATIVE | PPC_OPERAND_SIGNED }, /* The SD field of the SD4 form instruction. */ #define SE_SD SCLSCI8N + 1 { 0xf, 8, NULL, NULL, PPC_OPERAND_PARENS }, /* The SD field of the SD4 form instruction, for halfword. */ #define SE_SDH SE_SD + 1 { 0x1e, PPC_OPSHIFT_INV, insert_sd4h, extract_sd4h, PPC_OPERAND_PARENS }, /* The SD field of the SD4 form instruction, for word. */ #define SE_SDW SE_SDH + 1 { 0x3c, PPC_OPSHIFT_INV, insert_sd4w, extract_sd4w, PPC_OPERAND_PARENS }, /* The SH field in an X or M form instruction. */ #define SH SE_SDW + 1 #define SH_MASK (0x1f << 11) /* The other UIMM field in a EVX form instruction. */ #define EVUIMM SH /* The FC field in an atomic X form instruction. */ #define FC SH { 0x1f, 11, NULL, NULL, 0 }, /* The SI field in a HTM X form instruction. */ #define HTM_SI SH + 1 { 0x1f, 11, NULL, NULL, PPC_OPERAND_SIGNED }, /* The SH field in an MD form instruction. This is split. */ #define SH6 HTM_SI + 1 #define SH6_MASK ((0x1f << 11) | (1 << 1)) { 0x3f, PPC_OPSHIFT_INV, insert_sh6, extract_sh6, 0 }, /* The SH field of the tlbwe instruction, which is optional. */ #define SHO SH6 + 1 { 0x1f, 11, NULL, NULL, PPC_OPERAND_OPTIONAL }, /* The SI field in a D form instruction. */ #define SI SHO + 1 { 0xffff, 0, NULL, NULL, PPC_OPERAND_SIGNED }, /* The SI field in a D form instruction when we accept a wide range of positive values. */ #define SISIGNOPT SI + 1 { 0xffff, 0, NULL, NULL, PPC_OPERAND_SIGNED | PPC_OPERAND_SIGNOPT }, /* The SI8 field in a D form instruction. */ #define SI8 SISIGNOPT + 1 { 0xff, 0, NULL, NULL, PPC_OPERAND_SIGNED }, /* The SPR field in an XFX form instruction. This is flipped--the lower 5 bits are stored in the upper 5 and vice- versa. */ #define SPR SI8 + 1 #define PMR SPR #define TMR SPR #define SPR_MASK (0x3ff << 11) { 0x3ff, 11, insert_spr, extract_spr, 0 }, /* The BAT index number in an XFX form m[ft]ibat[lu] instruction. */ #define SPRBAT SPR + 1 #define SPRBAT_MASK (0x3 << 17) { 0x3, 17, NULL, NULL, 0 }, /* The SPRG register number in an XFX form m[ft]sprg instruction. */ #define SPRG SPRBAT + 1 { 0x1f, 16, insert_sprg, extract_sprg, 0 }, /* The SR field in an X form instruction. */ #define SR SPRG + 1 /* The 4-bit UIMM field in a VX form instruction. */ #define UIMM4 SR { 0xf, 16, NULL, NULL, 0 }, /* The STRM field in an X AltiVec form instruction. */ #define STRM SR + 1 /* The T field in a tlbilx form instruction. */ #define T STRM /* The L field in wclr instructions. */ #define L2 STRM { 0x3, 21, NULL, NULL, 0 }, /* The ESYNC field in an X (sync) form instruction. */ #define ESYNC STRM + 1 { 0xf, 16, insert_esync, NULL, PPC_OPERAND_OPTIONAL }, /* The SV field in a POWER SC form instruction. */ #define SV ESYNC + 1 { 0x3fff, 2, NULL, NULL, 0 }, /* The TBR field in an XFX form instruction. This is like the SPR field, but it is optional. */ #define TBR SV + 1 { 0x3ff, 11, insert_tbr, extract_tbr, PPC_OPERAND_OPTIONAL | PPC_OPERAND_OPTIONAL_VALUE}, /* If the TBR operand is ommitted, use the value 268. */ { -1, 268, NULL, NULL, 0}, /* The TO field in a D or X form instruction. */ #define TO TBR + 2 #define DUI TO #define TO_MASK (0x1f << 21) { 0x1f, 21, NULL, NULL, 0 }, /* The UI field in a D form instruction. */ #define UI TO + 1 { 0xffff, 0, NULL, NULL, 0 }, #define UISIGNOPT UI + 1 { 0xffff, 0, NULL, NULL, PPC_OPERAND_SIGNOPT }, /* The IMM field in an SE_IM5 instruction. */ #define UI5 UISIGNOPT + 1 { 0x1f, 4, NULL, NULL, 0 }, /* The OIMM field in an SE_OIM5 instruction. */ #define OIMM5 UI5 + 1 { 0x1f, PPC_OPSHIFT_INV, insert_oimm, extract_oimm, PPC_OPERAND_PLUS1 }, /* The UI7 field in an SE_LI instruction. */ #define UI7 OIMM5 + 1 { 0x7f, 4, NULL, NULL, 0 }, /* The VA field in a VA, VX or VXR form instruction. */ #define VA UI7 + 1 { 0x1f, 16, NULL, NULL, PPC_OPERAND_VR }, /* The VB field in a VA, VX or VXR form instruction. */ #define VB VA + 1 { 0x1f, 11, NULL, NULL, PPC_OPERAND_VR }, /* The VC field in a VA form instruction. */ #define VC VB + 1 { 0x1f, 6, NULL, NULL, PPC_OPERAND_VR }, /* The VD or VS field in a VA, VX, VXR or X form instruction. */ #define VD VC + 1 #define VS VD { 0x1f, 21, NULL, NULL, PPC_OPERAND_VR }, /* The SIMM field in a VX form instruction, and TE in Z form. */ #define SIMM VD + 1 #define TE SIMM { 0x1f, 16, NULL, NULL, PPC_OPERAND_SIGNED}, /* The UIMM field in a VX form instruction. */ #define UIMM SIMM + 1 #define DCTL UIMM { 0x1f, 16, NULL, NULL, 0 }, /* The 3-bit UIMM field in a VX form instruction. */ #define UIMM3 UIMM + 1 { 0x7, 16, NULL, NULL, 0 }, /* The 6-bit UIM field in a X form instruction. */ #define UIM6 UIMM3 + 1 { 0x3f, 16, NULL, NULL, 0 }, /* The SIX field in a VX form instruction. */ #define SIX UIM6 + 1 { 0xf, 11, NULL, NULL, 0 }, /* The PS field in a VX form instruction. */ #define PS SIX + 1 { 0x1, 9, NULL, NULL, 0 }, /* The SHB field in a VA form instruction. */ #define SHB PS + 1 { 0xf, 6, NULL, NULL, 0 }, /* The other UIMM field in a half word EVX form instruction. */ #define EVUIMM_2 SHB + 1 { 0x3e, 10, NULL, NULL, PPC_OPERAND_PARENS }, /* The other UIMM field in a word EVX form instruction. */ #define EVUIMM_4 EVUIMM_2 + 1 { 0x7c, 9, NULL, NULL, PPC_OPERAND_PARENS }, /* The other UIMM field in a double EVX form instruction. */ #define EVUIMM_8 EVUIMM_4 + 1 { 0xf8, 8, NULL, NULL, PPC_OPERAND_PARENS }, /* The WS or DRM field in an X form instruction. */ #define WS EVUIMM_8 + 1 #define DRM WS { 0x7, 11, NULL, NULL, 0 }, /* PowerPC paired singles extensions. */ /* W bit in the pair singles instructions for x type instructions. */ #define PSWM WS + 1 /* The BO16 field in a BD8 form instruction. */ #define BO16 PSWM { 0x1, 10, 0, 0, 0 }, /* IDX bits for quantization in the pair singles instructions. */ #define PSQ PSWM + 1 { 0x7, 12, 0, 0, 0 }, /* IDX bits for quantization in the pair singles x-type instructions. */ #define PSQM PSQ + 1 { 0x7, 7, 0, 0, 0 }, /* Smaller D field for quantization in the pair singles instructions. */ #define PSD PSQM + 1 { 0xfff, 0, 0, 0, PPC_OPERAND_PARENS | PPC_OPERAND_SIGNED }, /* The L field in an mtmsrd or A form instruction or R or W in an X form. */ #define A_L PSD + 1 #define W A_L #define X_R A_L { 0x1, 16, NULL, NULL, PPC_OPERAND_OPTIONAL }, /* The RMC or CY field in a Z23 form instruction. */ #define RMC A_L + 1 #define CY RMC { 0x3, 9, NULL, NULL, 0 }, #define R RMC + 1 { 0x1, 16, NULL, NULL, 0 }, #define RIC R + 1 { 0x3, 18, NULL, NULL, PPC_OPERAND_OPTIONAL }, #define PRS RIC + 1 { 0x1, 17, NULL, NULL, PPC_OPERAND_OPTIONAL }, #define SP PRS + 1 { 0x3, 19, NULL, NULL, 0 }, #define S SP + 1 { 0x1, 20, NULL, NULL, 0 }, /* The S field in a XL form instruction. */ #define SXL S + 1 { 0x1, 11, NULL, NULL, PPC_OPERAND_OPTIONAL | PPC_OPERAND_OPTIONAL_VALUE}, /* If the SXL operand is ommitted, use the value 1. */ { -1, 1, NULL, NULL, 0}, /* SH field starting at bit position 16. */ #define SH16 SXL + 2 /* The DCM and DGM fields in a Z form instruction. */ #define DCM SH16 #define DGM DCM { 0x3f, 10, NULL, NULL, 0 }, /* The EH field in larx instruction. */ #define EH SH16 + 1 { 0x1, 0, NULL, NULL, PPC_OPERAND_OPTIONAL }, /* The L field in an mtfsf or XFL form instruction. */ /* The A field in a HTM X form instruction. */ #define XFL_L EH + 1 #define HTM_A XFL_L { 0x1, 25, NULL, NULL, PPC_OPERAND_OPTIONAL}, /* Xilinx APU related masks and macros */ #define FCRT XFL_L + 1 #define FCRT_MASK (0x1f << 21) { 0x1f, 21, 0, 0, PPC_OPERAND_FCR }, /* Xilinx FSL related masks and macros */ #define FSL FCRT + 1 #define FSL_MASK (0x1f << 11) { 0x1f, 11, 0, 0, PPC_OPERAND_FSL }, /* Xilinx UDI related masks and macros */ #define URT FSL + 1 { 0x1f, 21, 0, 0, PPC_OPERAND_UDI }, #define URA URT + 1 { 0x1f, 16, 0, 0, PPC_OPERAND_UDI }, #define URB URA + 1 { 0x1f, 11, 0, 0, PPC_OPERAND_UDI }, #define URC URB + 1 { 0x1f, 6, 0, 0, PPC_OPERAND_UDI }, /* The VLESIMM field in a D form instruction. */ #define VLESIMM URC + 1 { 0xffff, PPC_OPSHIFT_INV, insert_vlesi, extract_vlesi, PPC_OPERAND_SIGNED | PPC_OPERAND_SIGNOPT }, /* The VLENSIMM field in a D form instruction. */ #define VLENSIMM VLESIMM + 1 { 0xffff, PPC_OPSHIFT_INV, insert_vlensi, extract_vlensi, PPC_OPERAND_NEGATIVE | PPC_OPERAND_SIGNED | PPC_OPERAND_SIGNOPT }, /* The VLEUIMM field in a D form instruction. */ #define VLEUIMM VLENSIMM + 1 { 0xffff, PPC_OPSHIFT_INV, insert_vleui, extract_vleui, 0 }, /* The VLEUIMML field in a D form instruction. */ #define VLEUIMML VLEUIMM + 1 { 0xffff, PPC_OPSHIFT_INV, insert_vleil, extract_vleil, 0 }, /* The XT and XS fields in an XX1 or XX3 form instruction. This is split. */ #define XS6 VLEUIMML + 1 #define XT6 XS6 { 0x3f, PPC_OPSHIFT_INV, insert_xt6, extract_xt6, PPC_OPERAND_VSR }, /* The XT and XS fields in an DQ form VSX instruction. This is split. */ #define XSQ6 XT6 + 1 #define XTQ6 XSQ6 { 0x3f, PPC_OPSHIFT_INV, insert_xtq6, extract_xtq6, PPC_OPERAND_VSR }, /* The XA field in an XX3 form instruction. This is split. */ #define XA6 XTQ6 + 1 { 0x3f, PPC_OPSHIFT_INV, insert_xa6, extract_xa6, PPC_OPERAND_VSR }, /* The XB field in an XX2 or XX3 form instruction. This is split. */ #define XB6 XA6 + 1 { 0x3f, PPC_OPSHIFT_INV, insert_xb6, extract_xb6, PPC_OPERAND_VSR }, /* The XB field in an XX3 form instruction when it must be the same as the XA field in the instruction. This is used in extended mnemonics like xvmovdp. This is split. */ #define XB6S XB6 + 1 { 0x3f, PPC_OPSHIFT_INV, insert_xb6s, extract_xb6s, PPC_OPERAND_FAKE }, /* The XC field in an XX4 form instruction. This is split. */ #define XC6 XB6S + 1 { 0x3f, PPC_OPSHIFT_INV, insert_xc6, extract_xc6, PPC_OPERAND_VSR }, /* The DM or SHW field in an XX3 form instruction. */ #define DM XC6 + 1 #define SHW DM { 0x3, 8, NULL, NULL, 0 }, /* The DM field in an extended mnemonic XX3 form instruction. */ #define DMEX DM + 1 { 0x3, 8, insert_dm, extract_dm, 0 }, /* The UIM field in an XX2 form instruction. */ #define UIM DMEX + 1 /* The 2-bit UIMM field in a VX form instruction. */ #define UIMM2 UIM /* The 2-bit L field in a darn instruction. */ #define LRAND UIM { 0x3, 16, NULL, NULL, 0 }, #define ERAT_T UIM + 1 { 0x7, 21, NULL, NULL, 0 }, #define IH ERAT_T + 1 { 0x7, 21, NULL, NULL, PPC_OPERAND_OPTIONAL }, /* The 8-bit IMM8 field in a XX1 form instruction. */ #define IMM8 IH + 1 { 0xff, 11, NULL, NULL, PPC_OPERAND_SIGNOPT }, }; const unsigned int num_powerpc_operands = (sizeof (powerpc_operands) / sizeof (powerpc_operands[0])); /* The functions used to insert and extract complicated operands. */ /* The ARX, ARY, RX and RY operands are alternate encodings of GPRs. */ static unsigned long insert_arx (unsigned long insn, long value, ppc_cpu_t dialect ATTRIBUTE_UNUSED, const char **errmsg ATTRIBUTE_UNUSED) { if (value >= 8 && value < 24) return insn | ((value - 8) & 0xf); else { *errmsg = _("invalid register"); return 0; } } static long extract_arx (unsigned long insn, ppc_cpu_t dialect ATTRIBUTE_UNUSED, int *invalid ATTRIBUTE_UNUSED) { return (insn & 0xf) + 8; } static unsigned long insert_ary (unsigned long insn, long value, ppc_cpu_t dialect ATTRIBUTE_UNUSED, const char **errmsg ATTRIBUTE_UNUSED) { if (value >= 8 && value < 24) return insn | (((value - 8) & 0xf) << 4); else { *errmsg = _("invalid register"); return 0; } } static long extract_ary (unsigned long insn, ppc_cpu_t dialect ATTRIBUTE_UNUSED, int *invalid ATTRIBUTE_UNUSED) { return ((insn >> 4) & 0xf) + 8; } static unsigned long insert_rx (unsigned long insn, long value, ppc_cpu_t dialect ATTRIBUTE_UNUSED, const char **errmsg) { if (value >= 0 && value < 8) return insn | value; else if (value >= 24 && value <= 31) return insn | (value - 16); else { *errmsg = _("invalid register"); return 0; } } static long extract_rx (unsigned long insn, ppc_cpu_t dialect ATTRIBUTE_UNUSED, int *invalid ATTRIBUTE_UNUSED) { int value = insn & 0xf; if (value >= 0 && value < 8) return value; else return value + 16; } static unsigned long insert_ry (unsigned long insn, long value, ppc_cpu_t dialect ATTRIBUTE_UNUSED, const char **errmsg) { if (value >= 0 && value < 8) return insn | (value << 4); else if (value >= 24 && value <= 31) return insn | ((value - 16) << 4); else { *errmsg = _("invalid register"); return 0; } } static long extract_ry (unsigned long insn, ppc_cpu_t dialect ATTRIBUTE_UNUSED, int *invalid ATTRIBUTE_UNUSED) { int value = (insn >> 4) & 0xf; if (value >= 0 && value < 8) return value; else return value + 16; } /* The BA field in an XL form instruction when it must be the same as the BT field in the same instruction. This operand is marked FAKE. The insertion function just copies the BT field into the BA field, and the extraction function just checks that the fields are the same. */ static unsigned long insert_bat (unsigned long insn, long value ATTRIBUTE_UNUSED, ppc_cpu_t dialect ATTRIBUTE_UNUSED, const char **errmsg ATTRIBUTE_UNUSED) { return insn | (((insn >> 21) & 0x1f) << 16); } static long extract_bat (unsigned long insn, ppc_cpu_t dialect ATTRIBUTE_UNUSED, int *invalid) { if (((insn >> 21) & 0x1f) != ((insn >> 16) & 0x1f)) *invalid = 1; return 0; } /* The BB field in an XL form instruction when it must be the same as the BA field in the same instruction. This operand is marked FAKE. The insertion function just copies the BA field into the BB field, and the extraction function just checks that the fields are the same. */ static unsigned long insert_bba (unsigned long insn, long value ATTRIBUTE_UNUSED, ppc_cpu_t dialect ATTRIBUTE_UNUSED, const char **errmsg ATTRIBUTE_UNUSED) { return insn | (((insn >> 16) & 0x1f) << 11); } static long extract_bba (unsigned long insn, ppc_cpu_t dialect ATTRIBUTE_UNUSED, int *invalid) { if (((insn >> 16) & 0x1f) != ((insn >> 11) & 0x1f)) *invalid = 1; return 0; } /* The BD field in a B form instruction when the - modifier is used. This modifier means that the branch is not expected to be taken. For chips built to versions of the architecture prior to version 2 (ie. not Power4 compatible), we set the y bit of the BO field to 1 if the offset is negative. When extracting, we require that the y bit be 1 and that the offset be positive, since if the y bit is 0 we just want to print the normal form of the instruction. Power4 compatible targets use two bits, "a", and "t", instead of the "y" bit. "at" == 00 => no hint, "at" == 01 => unpredictable, "at" == 10 => not taken, "at" == 11 => taken. The "t" bit is 00001 in BO field, the "a" bit is 00010 for branch on CR(BI) and 01000 for branch on CTR. We only handle the taken/not-taken hint here. Note that we don't relax the conditions tested here when disassembling with -Many because insns using extract_bdm and extract_bdp always occur in pairs. One or the other will always be valid. */ #define ISA_V2 (PPC_OPCODE_POWER4 | PPC_OPCODE_E500MC | PPC_OPCODE_TITAN) static unsigned long insert_bdm (unsigned long insn, long value, ppc_cpu_t dialect, const char **errmsg ATTRIBUTE_UNUSED) { if ((dialect & ISA_V2) == 0) { if ((value & 0x8000) != 0) insn |= 1 << 21; } else { if ((insn & (0x14 << 21)) == (0x04 << 21)) insn |= 0x02 << 21; else if ((insn & (0x14 << 21)) == (0x10 << 21)) insn |= 0x08 << 21; } return insn | (value & 0xfffc); } static long extract_bdm (unsigned long insn, ppc_cpu_t dialect, int *invalid) { if ((dialect & ISA_V2) == 0) { if (((insn & (1 << 21)) == 0) != ((insn & (1 << 15)) == 0)) *invalid = 1; } else { if ((insn & (0x17 << 21)) != (0x06 << 21) && (insn & (0x1d << 21)) != (0x18 << 21)) *invalid = 1; } return ((insn & 0xfffc) ^ 0x8000) - 0x8000; } /* The BD field in a B form instruction when the + modifier is used. This is like BDM, above, except that the branch is expected to be taken. */ static unsigned long insert_bdp (unsigned long insn, long value, ppc_cpu_t dialect, const char **errmsg ATTRIBUTE_UNUSED) { if ((dialect & ISA_V2) == 0) { if ((value & 0x8000) == 0) insn |= 1 << 21; } else { if ((insn & (0x14 << 21)) == (0x04 << 21)) insn |= 0x03 << 21; else if ((insn & (0x14 << 21)) == (0x10 << 21)) insn |= 0x09 << 21; } return insn | (value & 0xfffc); } static long extract_bdp (unsigned long insn, ppc_cpu_t dialect, int *invalid) { if ((dialect & ISA_V2) == 0) { if (((insn & (1 << 21)) == 0) == ((insn & (1 << 15)) == 0)) *invalid = 1; } else { if ((insn & (0x17 << 21)) != (0x07 << 21) && (insn & (0x1d << 21)) != (0x19 << 21)) *invalid = 1; } return ((insn & 0xfffc) ^ 0x8000) - 0x8000; } static inline int valid_bo_pre_v2 (long value) { /* Certain encodings have bits that are required to be zero. These are (z must be zero, y may be anything): 0000y 0001y 001zy 0100y 0101y 011zy 1z00y 1z01y 1z1zz */ if ((value & 0x14) == 0) return 1; else if ((value & 0x14) == 0x4) return (value & 0x2) == 0; else if ((value & 0x14) == 0x10) return (value & 0x8) == 0; else return value == 0x14; } static inline int valid_bo_post_v2 (long value) { /* Certain encodings have bits that are required to be zero. These are (z must be zero, a & t may be anything): 0000z 0001z 001at 0100z 0101z 011at 1a00t 1a01t 1z1zz */ if ((value & 0x14) == 0) return (value & 0x1) == 0; else if ((value & 0x14) == 0x14) return value == 0x14; else return 1; } /* Check for legal values of a BO field. */ static int valid_bo (long value, ppc_cpu_t dialect, int extract) { int valid_y = valid_bo_pre_v2 (value); int valid_at = valid_bo_post_v2 (value); /* When disassembling with -Many, accept either encoding on the second pass through opcodes. */ if (extract && dialect == ~(ppc_cpu_t) PPC_OPCODE_ANY) return valid_y || valid_at; if ((dialect & ISA_V2) == 0) return valid_y; else return valid_at; } /* The BO field in a B form instruction. Warn about attempts to set the field to an illegal value. */ static unsigned long insert_bo (unsigned long insn, long value, ppc_cpu_t dialect, const char **errmsg) { if (!valid_bo (value, dialect, 0)) *errmsg = _("invalid conditional option"); else if (PPC_OP (insn) == 19 && (insn & 0x400) && ! (value & 4)) *errmsg = _("invalid counter access"); return insn | ((value & 0x1f) << 21); } static long extract_bo (unsigned long insn, ppc_cpu_t dialect, int *invalid) { long value; value = (insn >> 21) & 0x1f; if (!valid_bo (value, dialect, 1)) *invalid = 1; return value; } /* The BO field in a B form instruction when the + or - modifier is used. This is like the BO field, but it must be even. When extracting it, we force it to be even. */ static unsigned long insert_boe (unsigned long insn, long value, ppc_cpu_t dialect, const char **errmsg) { if (!valid_bo (value, dialect, 0)) *errmsg = _("invalid conditional option"); else if (PPC_OP (insn) == 19 && (insn & 0x400) && ! (value & 4)) *errmsg = _("invalid counter access"); else if ((value & 1) != 0) *errmsg = _("attempt to set y bit when using + or - modifier"); return insn | ((value & 0x1f) << 21); } static long extract_boe (unsigned long insn, ppc_cpu_t dialect, int *invalid) { long value; value = (insn >> 21) & 0x1f; if (!valid_bo (value, dialect, 1)) *invalid = 1; return value & 0x1e; } /* The DCMX field in a X form instruction when the field is split into separate DC, DM and DX fields. */ static unsigned long insert_dcmxs (unsigned long insn, long value, ppc_cpu_t dialect ATTRIBUTE_UNUSED, const char **errmsg ATTRIBUTE_UNUSED) { return insn | ((value & 0x1f) << 16) | ((value & 0x20) >> 3) | (value & 0x40); } static long extract_dcmxs (unsigned long insn, ppc_cpu_t dialect ATTRIBUTE_UNUSED, int *invalid ATTRIBUTE_UNUSED) { return (insn & 0x40) | ((insn << 3) & 0x20) | ((insn >> 16) & 0x1f); } /* The D field in a DX form instruction when the field is split into separate D0, D1 and D2 fields. */ static unsigned long insert_dxd (unsigned long insn, long value, ppc_cpu_t dialect ATTRIBUTE_UNUSED, const char **errmsg ATTRIBUTE_UNUSED) { return insn | (value & 0xffc1) | ((value & 0x3e) << 15); } static long extract_dxd (unsigned long insn, ppc_cpu_t dialect ATTRIBUTE_UNUSED, int *invalid ATTRIBUTE_UNUSED) { unsigned long dxd = (insn & 0xffc1) | ((insn >> 15) & 0x3e); return (dxd ^ 0x8000) - 0x8000; } static unsigned long insert_dxdn (unsigned long insn, long value, ppc_cpu_t dialect ATTRIBUTE_UNUSED, const char **errmsg ATTRIBUTE_UNUSED) { return insert_dxd (insn, -value, dialect, errmsg); } static long extract_dxdn (unsigned long insn, ppc_cpu_t dialect ATTRIBUTE_UNUSED, int *invalid ATTRIBUTE_UNUSED) { return -extract_dxd (insn, dialect, invalid); } /* FXM mask in mfcr and mtcrf instructions. */ static unsigned long insert_fxm (unsigned long insn, long value, ppc_cpu_t dialect, const char **errmsg) { /* If we're handling the mfocrf and mtocrf insns ensure that exactly one bit of the mask field is set. */ if ((insn & (1 << 20)) != 0) { if (value == 0 || (value & -value) != value) { *errmsg = _("invalid mask field"); value = 0; } } /* If only one bit of the FXM field is set, we can use the new form of the instruction, which is faster. Unlike the Power4 branch hint encoding, this is not backward compatible. Do not generate the new form unless -mpower4 has been given, or -many and the two operand form of mfcr was used. */ else if (value > 0 && (value & -value) == value && ((dialect & PPC_OPCODE_POWER4) != 0 || ((dialect & PPC_OPCODE_ANY) != 0 && (insn & (0x3ff << 1)) == 19 << 1))) insn |= 1 << 20; /* Any other value on mfcr is an error. */ else if ((insn & (0x3ff << 1)) == 19 << 1) { /* A value of -1 means we used the one operand form of mfcr which is valid. */ if (value != -1) *errmsg = _("invalid mfcr mask"); value = 0; } return insn | ((value & 0xff) << 12); } static long extract_fxm (unsigned long insn, ppc_cpu_t dialect ATTRIBUTE_UNUSED, int *invalid) { long mask = (insn >> 12) & 0xff; /* Is this a Power4 insn? */ if ((insn & (1 << 20)) != 0) { /* Exactly one bit of MASK should be set. */ if (mask == 0 || (mask & -mask) != mask) *invalid = 1; } /* Check that non-power4 form of mfcr has a zero MASK. */ else if ((insn & (0x3ff << 1)) == 19 << 1) { if (mask != 0) *invalid = 1; else mask = -1; } return mask; } static unsigned long insert_li20 (unsigned long insn, long value, ppc_cpu_t dialect ATTRIBUTE_UNUSED, const char **errmsg ATTRIBUTE_UNUSED) { return insn | ((value & 0xf0000) >> 5) | ((value & 0x0f800) << 5) | (value & 0x7ff); } static long extract_li20 (unsigned long insn, ppc_cpu_t dialect ATTRIBUTE_UNUSED, int *invalid ATTRIBUTE_UNUSED) { long ext = ((insn & 0x4000) == 0x4000) ? 0xfff00000 : 0x00000000; return ext | (((insn >> 11) & 0xf) << 16) | (((insn >> 17) & 0xf) << 12) | (((insn >> 16) & 0x1) << 11) | (insn & 0x7ff); } /* The 2-bit L field in a SYNC or WC field in a WAIT instruction. For SYNC, some L values are reserved: * Value 3 is reserved on newer server cpus. * Values 2 and 3 are reserved on all other cpus. */ static unsigned long insert_ls (unsigned long insn, long value, ppc_cpu_t dialect, const char **errmsg) { /* For SYNC, some L values are illegal. */ if (((insn >> 1) & 0x3ff) == 598) { long max_lvalue = (dialect & PPC_OPCODE_POWER4) ? 2 : 1; if (value > max_lvalue) { *errmsg = _("illegal L operand value"); return insn; } } return insn | ((value & 0x3) << 21); } /* The 4-bit E field in a sync instruction that accepts 2 operands. If ESYNC is non-zero, then the L field must be either 0 or 1 and the complement of ESYNC-bit2. */ static unsigned long insert_esync (unsigned long insn, long value, ppc_cpu_t dialect, const char **errmsg) { unsigned long ls = (insn >> 21) & 0x03; if (value == 0) { if (((dialect & PPC_OPCODE_E6500) != 0 && ls > 1) || ((dialect & PPC_OPCODE_POWER9) != 0 && ls > 2)) *errmsg = _("illegal L operand value"); return insn; } if ((ls & ~0x1) || (((value >> 1) & 0x1) ^ ls) == 0) *errmsg = _("incompatible L operand value"); return insn | ((value & 0xf) << 16); } /* The MB and ME fields in an M form instruction expressed as a single operand which is itself a bitmask. The extraction function always marks it as invalid, since we never want to recognize an instruction which uses a field of this type. */ static unsigned long insert_mbe (unsigned long insn, long value, ppc_cpu_t dialect ATTRIBUTE_UNUSED, const char **errmsg) { unsigned long uval, mask; int mb, me, mx, count, last; uval = value; if (uval == 0) { *errmsg = _("illegal bitmask"); return insn; } mb = 0; me = 32; if ((uval & 1) != 0) last = 1; else last = 0; count = 0; /* mb: location of last 0->1 transition */ /* me: location of last 1->0 transition */ /* count: # transitions */ for (mx = 0, mask = 1L << 31; mx < 32; ++mx, mask >>= 1) { if ((uval & mask) && !last) { ++count; mb = mx; last = 1; } else if (!(uval & mask) && last) { ++count; me = mx; last = 0; } } if (me == 0) me = 32; if (count != 2 && (count != 0 || ! last)) *errmsg = _("illegal bitmask"); return insn | (mb << 6) | ((me - 1) << 1); } static long extract_mbe (unsigned long insn, ppc_cpu_t dialect ATTRIBUTE_UNUSED, int *invalid) { long ret; int mb, me; int i; *invalid = 1; mb = (insn >> 6) & 0x1f; me = (insn >> 1) & 0x1f; if (mb < me + 1) { ret = 0; for (i = mb; i <= me; i++) ret |= 1L << (31 - i); } else if (mb == me + 1) ret = ~0; else /* (mb > me + 1) */ { ret = ~0; for (i = me + 1; i < mb; i++) ret &= ~(1L << (31 - i)); } return ret; } /* The MB or ME field in an MD or MDS form instruction. The high bit is wrapped to the low end. */ static unsigned long insert_mb6 (unsigned long insn, long value, ppc_cpu_t dialect ATTRIBUTE_UNUSED, const char **errmsg ATTRIBUTE_UNUSED) { return insn | ((value & 0x1f) << 6) | (value & 0x20); } static long extract_mb6 (unsigned long insn, ppc_cpu_t dialect ATTRIBUTE_UNUSED, int *invalid ATTRIBUTE_UNUSED) { return ((insn >> 6) & 0x1f) | (insn & 0x20); } /* The NB field in an X form instruction. The value 32 is stored as 0. */ static long extract_nb (unsigned long insn, ppc_cpu_t dialect ATTRIBUTE_UNUSED, int *invalid ATTRIBUTE_UNUSED) { long ret; ret = (insn >> 11) & 0x1f; if (ret == 0) ret = 32; return ret; } /* The NB field in an lswi instruction, which has special value restrictions. The value 32 is stored as 0. */ static unsigned long insert_nbi (unsigned long insn, long value, ppc_cpu_t dialect ATTRIBUTE_UNUSED, const char **errmsg ATTRIBUTE_UNUSED) { long rtvalue = (insn & RT_MASK) >> 21; long ravalue = (insn & RA_MASK) >> 16; if (value == 0) value = 32; if (rtvalue + (value + 3) / 4 > (rtvalue > ravalue ? ravalue + 32 : ravalue)) *errmsg = _("address register in load range"); return insn | ((value & 0x1f) << 11); } /* The NSI field in a D form instruction. This is the same as the SI field, only negated. The extraction function always marks it as invalid, since we never want to recognize an instruction which uses a field of this type. */ static unsigned long insert_nsi (unsigned long insn, long value, ppc_cpu_t dialect ATTRIBUTE_UNUSED, const char **errmsg ATTRIBUTE_UNUSED) { return insn | (-value & 0xffff); } static long extract_nsi (unsigned long insn, ppc_cpu_t dialect ATTRIBUTE_UNUSED, int *invalid) { *invalid = 1; return -(((insn & 0xffff) ^ 0x8000) - 0x8000); } /* The RA field in a D or X form instruction which is an updating load, which means that the RA field may not be zero and may not equal the RT field. */ static unsigned long insert_ral (unsigned long insn, long value, ppc_cpu_t dialect ATTRIBUTE_UNUSED, const char **errmsg) { if (value == 0 || (unsigned long) value == ((insn >> 21) & 0x1f)) *errmsg = "invalid register operand when updating"; return insn | ((value & 0x1f) << 16); } /* The RA field in an lmw instruction, which has special value restrictions. */ static unsigned long insert_ram (unsigned long insn, long value, ppc_cpu_t dialect ATTRIBUTE_UNUSED, const char **errmsg) { if ((unsigned long) value >= ((insn >> 21) & 0x1f)) *errmsg = _("index register in load range"); return insn | ((value & 0x1f) << 16); } /* The RA field in the DQ form lq or an lswx instruction, which have special value restrictions. */ static unsigned long insert_raq (unsigned long insn, long value, ppc_cpu_t dialect ATTRIBUTE_UNUSED, const char **errmsg) { long rtvalue = (insn & RT_MASK) >> 21; if (value == rtvalue) *errmsg = _("source and target register operands must be different"); return insn | ((value & 0x1f) << 16); } /* The RA field in a D or X form instruction which is an updating store or an updating floating point load, which means that the RA field may not be zero. */ static unsigned long insert_ras (unsigned long insn, long value, ppc_cpu_t dialect ATTRIBUTE_UNUSED, const char **errmsg) { if (value == 0) *errmsg = _("invalid register operand when updating"); return insn | ((value & 0x1f) << 16); } /* The RB field in an X form instruction when it must be the same as the RS field in the instruction. This is used for extended mnemonics like mr. This operand is marked FAKE. The insertion function just copies the BT field into the BA field, and the extraction function just checks that the fields are the same. */ static unsigned long insert_rbs (unsigned long insn, long value ATTRIBUTE_UNUSED, ppc_cpu_t dialect ATTRIBUTE_UNUSED, const char **errmsg ATTRIBUTE_UNUSED) { return insn | (((insn >> 21) & 0x1f) << 11); } static long extract_rbs (unsigned long insn, ppc_cpu_t dialect ATTRIBUTE_UNUSED, int *invalid) { if (((insn >> 21) & 0x1f) != ((insn >> 11) & 0x1f)) *invalid = 1; return 0; } /* The RB field in an lswx instruction, which has special value restrictions. */ static unsigned long insert_rbx (unsigned long insn, long value, ppc_cpu_t dialect ATTRIBUTE_UNUSED, const char **errmsg) { long rtvalue = (insn & RT_MASK) >> 21; if (value == rtvalue) *errmsg = _("source and target register operands must be different"); return insn | ((value & 0x1f) << 11); } /* The SCI8 field is made up of SCL and {U,N}I8 fields. */ static unsigned long insert_sci8 (unsigned long insn, long value, ppc_cpu_t dialect ATTRIBUTE_UNUSED, const char **errmsg) { unsigned int fill_scale = 0; unsigned long ui8 = value; if ((ui8 & 0xffffff00) == 0) ; else if ((ui8 & 0xffffff00) == 0xffffff00) fill_scale = 0x400; else if ((ui8 & 0xffff00ff) == 0) { fill_scale = 1 << 8; ui8 >>= 8; } else if ((ui8 & 0xffff00ff) == 0xffff00ff) { fill_scale = 0x400 | (1 << 8); ui8 >>= 8; } else if ((ui8 & 0xff00ffff) == 0) { fill_scale = 2 << 8; ui8 >>= 16; } else if ((ui8 & 0xff00ffff) == 0xff00ffff) { fill_scale = 0x400 | (2 << 8); ui8 >>= 16; } else if ((ui8 & 0x00ffffff) == 0) { fill_scale = 3 << 8; ui8 >>= 24; } else if ((ui8 & 0x00ffffff) == 0x00ffffff) { fill_scale = 0x400 | (3 << 8); ui8 >>= 24; } else { *errmsg = _("illegal immediate value"); ui8 = 0; } return insn | fill_scale | (ui8 & 0xff); } static long extract_sci8 (unsigned long insn, ppc_cpu_t dialect ATTRIBUTE_UNUSED, int *invalid ATTRIBUTE_UNUSED) { int fill = insn & 0x400; int scale_factor = (insn & 0x300) >> 5; long value = (insn & 0xff) << scale_factor; if (fill != 0) value |= ~((long) 0xff << scale_factor); return value; } static unsigned long insert_sci8n (unsigned long insn, long value, ppc_cpu_t dialect, const char **errmsg) { return insert_sci8 (insn, -value, dialect, errmsg); } static long extract_sci8n (unsigned long insn, ppc_cpu_t dialect, int *invalid) { return -extract_sci8 (insn, dialect, invalid); } static unsigned long insert_sd4h (unsigned long insn, long value, ppc_cpu_t dialect ATTRIBUTE_UNUSED, const char **errmsg ATTRIBUTE_UNUSED) { return insn | ((value & 0x1e) << 7); } static long extract_sd4h (unsigned long insn, ppc_cpu_t dialect ATTRIBUTE_UNUSED, int *invalid ATTRIBUTE_UNUSED) { return ((insn >> 8) & 0xf) << 1; } static unsigned long insert_sd4w (unsigned long insn, long value, ppc_cpu_t dialect ATTRIBUTE_UNUSED, const char **errmsg ATTRIBUTE_UNUSED) { return insn | ((value & 0x3c) << 6); } static long extract_sd4w (unsigned long insn, ppc_cpu_t dialect ATTRIBUTE_UNUSED, int *invalid ATTRIBUTE_UNUSED) { return ((insn >> 8) & 0xf) << 2; } static unsigned long insert_oimm (unsigned long insn, long value, ppc_cpu_t dialect ATTRIBUTE_UNUSED, const char **errmsg ATTRIBUTE_UNUSED) { return insn | (((value - 1) & 0x1f) << 4); } static long extract_oimm (unsigned long insn, ppc_cpu_t dialect ATTRIBUTE_UNUSED, int *invalid ATTRIBUTE_UNUSED) { return ((insn >> 4) & 0x1f) + 1; } /* The SH field in an MD form instruction. This is split. */ static unsigned long insert_sh6 (unsigned long insn, long value, ppc_cpu_t dialect ATTRIBUTE_UNUSED, const char **errmsg ATTRIBUTE_UNUSED) { /* SH6 operand in the rldixor instructions. */ if (PPC_OP (insn) == 4) return insn | ((value & 0x1f) << 6) | ((value & 0x20) >> 5); else return insn | ((value & 0x1f) << 11) | ((value & 0x20) >> 4); } static long extract_sh6 (unsigned long insn, ppc_cpu_t dialect ATTRIBUTE_UNUSED, int *invalid ATTRIBUTE_UNUSED) { /* SH6 operand in the rldixor instructions. */ if (PPC_OP (insn) == 4) return ((insn >> 6) & 0x1f) | ((insn << 5) & 0x20); else return ((insn >> 11) & 0x1f) | ((insn << 4) & 0x20); } /* The SPR field in an XFX form instruction. This is flipped--the lower 5 bits are stored in the upper 5 and vice- versa. */ static unsigned long insert_spr (unsigned long insn, long value, ppc_cpu_t dialect ATTRIBUTE_UNUSED, const char **errmsg ATTRIBUTE_UNUSED) { return insn | ((value & 0x1f) << 16) | ((value & 0x3e0) << 6); } static long extract_spr (unsigned long insn, ppc_cpu_t dialect ATTRIBUTE_UNUSED, int *invalid ATTRIBUTE_UNUSED) { return ((insn >> 16) & 0x1f) | ((insn >> 6) & 0x3e0); } /* Some dialects have 8 SPRG registers instead of the standard 4. */ #define ALLOW8_SPRG (PPC_OPCODE_BOOKE | PPC_OPCODE_405) static unsigned long insert_sprg (unsigned long insn, long value, ppc_cpu_t dialect, const char **errmsg) { if (value > 7 || (value > 3 && (dialect & ALLOW8_SPRG) == 0)) *errmsg = _("invalid sprg number"); /* If this is mfsprg4..7 then use spr 260..263 which can be read in user mode. Anything else must use spr 272..279. */ if (value <= 3 || (insn & 0x100) != 0) value |= 0x10; return insn | ((value & 0x17) << 16); } static long extract_sprg (unsigned long insn, ppc_cpu_t dialect, int *invalid) { unsigned long val = (insn >> 16) & 0x1f; /* mfsprg can use 260..263 and 272..279. mtsprg only uses spr 272..279 If not BOOKE, 405 or VLE, then both use only 272..275. */ if ((val - 0x10 > 3 && (dialect & ALLOW8_SPRG) == 0) || (val - 0x10 > 7 && (insn & 0x100) != 0) || val <= 3 || (val & 8) != 0) *invalid = 1; return val & 7; } /* The TBR field in an XFX instruction. This is just like SPR, but it is optional. */ static unsigned long insert_tbr (unsigned long insn, long value, ppc_cpu_t dialect ATTRIBUTE_UNUSED, const char **errmsg) { if (value != 268 && value != 269) *errmsg = _("invalid tbr number"); return insn | ((value & 0x1f) << 16) | ((value & 0x3e0) << 6); } static long extract_tbr (unsigned long insn, ppc_cpu_t dialect ATTRIBUTE_UNUSED, int *invalid) { long ret; ret = ((insn >> 16) & 0x1f) | ((insn >> 6) & 0x3e0); if (ret != 268 && ret != 269) *invalid = 1; return ret; } /* The XT and XS fields in an XX1 or XX3 form instruction. This is split. */ static unsigned long insert_xt6 (unsigned long insn, long value, ppc_cpu_t dialect ATTRIBUTE_UNUSED, const char **errmsg ATTRIBUTE_UNUSED) { return insn | ((value & 0x1f) << 21) | ((value & 0x20) >> 5); } static long extract_xt6 (unsigned long insn, ppc_cpu_t dialect ATTRIBUTE_UNUSED, int *invalid ATTRIBUTE_UNUSED) { return ((insn << 5) & 0x20) | ((insn >> 21) & 0x1f); } /* The XT and XS fields in an DQ form VSX instruction. This is split. */ static unsigned long insert_xtq6 (unsigned long insn, long value, ppc_cpu_t dialect ATTRIBUTE_UNUSED, const char **errmsg ATTRIBUTE_UNUSED) { return insn | ((value & 0x1f) << 21) | ((value & 0x20) >> 2); } static long extract_xtq6 (unsigned long insn, ppc_cpu_t dialect ATTRIBUTE_UNUSED, int *invalid ATTRIBUTE_UNUSED) { return ((insn << 2) & 0x20) | ((insn >> 21) & 0x1f); } /* The XA field in an XX3 form instruction. This is split. */ static unsigned long insert_xa6 (unsigned long insn, long value, ppc_cpu_t dialect ATTRIBUTE_UNUSED, const char **errmsg ATTRIBUTE_UNUSED) { return insn | ((value & 0x1f) << 16) | ((value & 0x20) >> 3); } static long extract_xa6 (unsigned long insn, ppc_cpu_t dialect ATTRIBUTE_UNUSED, int *invalid ATTRIBUTE_UNUSED) { return ((insn << 3) & 0x20) | ((insn >> 16) & 0x1f); } /* The XB field in an XX3 form instruction. This is split. */ static unsigned long insert_xb6 (unsigned long insn, long value, ppc_cpu_t dialect ATTRIBUTE_UNUSED, const char **errmsg ATTRIBUTE_UNUSED) { return insn | ((value & 0x1f) << 11) | ((value & 0x20) >> 4); } static long extract_xb6 (unsigned long insn, ppc_cpu_t dialect ATTRIBUTE_UNUSED, int *invalid ATTRIBUTE_UNUSED) { return ((insn << 4) & 0x20) | ((insn >> 11) & 0x1f); } /* The XB field in an XX3 form instruction when it must be the same as the XA field in the instruction. This is used for extended mnemonics like xvmovdp. This operand is marked FAKE. The insertion function just copies the XA field into the XB field, and the extraction function just checks that the fields are the same. */ static unsigned long insert_xb6s (unsigned long insn, long value ATTRIBUTE_UNUSED, ppc_cpu_t dialect ATTRIBUTE_UNUSED, const char **errmsg ATTRIBUTE_UNUSED) { return insn | (((insn >> 16) & 0x1f) << 11) | (((insn >> 2) & 0x1) << 1); } static long extract_xb6s (unsigned long insn, ppc_cpu_t dialect ATTRIBUTE_UNUSED, int *invalid) { if ((((insn >> 16) & 0x1f) != ((insn >> 11) & 0x1f)) || (((insn >> 2) & 0x1) != ((insn >> 1) & 0x1))) *invalid = 1; return 0; } /* The XC field in an XX4 form instruction. This is split. */ static unsigned long insert_xc6 (unsigned long insn, long value, ppc_cpu_t dialect ATTRIBUTE_UNUSED, const char **errmsg ATTRIBUTE_UNUSED) { return insn | ((value & 0x1f) << 6) | ((value & 0x20) >> 2); } static long extract_xc6 (unsigned long insn, ppc_cpu_t dialect ATTRIBUTE_UNUSED, int *invalid ATTRIBUTE_UNUSED) { return ((insn << 2) & 0x20) | ((insn >> 6) & 0x1f); } static unsigned long insert_dm (unsigned long insn, long value, ppc_cpu_t dialect ATTRIBUTE_UNUSED, const char **errmsg) { if (value != 0 && value != 1) *errmsg = _("invalid constant"); return insn | (((value) ? 3 : 0) << 8); } static long extract_dm (unsigned long insn, ppc_cpu_t dialect ATTRIBUTE_UNUSED, int *invalid) { long value; value = (insn >> 8) & 3; if (value != 0 && value != 3) *invalid = 1; return (value) ? 1 : 0; } /* The VLESIMM field in an I16A form instruction. This is split. */ static unsigned long insert_vlesi (unsigned long insn, long value, ppc_cpu_t dialect ATTRIBUTE_UNUSED, const char **errmsg ATTRIBUTE_UNUSED) { return insn | ((value & 0xf800) << 10) | (value & 0x7ff); } static long extract_vlesi (unsigned long insn, ppc_cpu_t dialect ATTRIBUTE_UNUSED, int *invalid ATTRIBUTE_UNUSED) { long value = ((insn >> 10) & 0xf800) | (insn & 0x7ff); value = (value ^ 0x8000) - 0x8000; return value; } static unsigned long insert_vlensi (unsigned long insn, long value, ppc_cpu_t dialect ATTRIBUTE_UNUSED, const char **errmsg ATTRIBUTE_UNUSED) { value = -value; return insn | ((value & 0xf800) << 10) | (value & 0x7ff); } static long extract_vlensi (unsigned long insn, ppc_cpu_t dialect ATTRIBUTE_UNUSED, int *invalid ATTRIBUTE_UNUSED) { long value = ((insn >> 10) & 0xf800) | (insn & 0x7ff); value = (value ^ 0x8000) - 0x8000; /* Don't use for disassembly. */ *invalid = 1; return -value; } /* The VLEUIMM field in an I16A form instruction. This is split. */ static unsigned long insert_vleui (unsigned long insn, long value, ppc_cpu_t dialect ATTRIBUTE_UNUSED, const char **errmsg ATTRIBUTE_UNUSED) { return insn | ((value & 0xf800) << 10) | (value & 0x7ff); } static long extract_vleui (unsigned long insn, ppc_cpu_t dialect ATTRIBUTE_UNUSED, int *invalid ATTRIBUTE_UNUSED) { return ((insn >> 10) & 0xf800) | (insn & 0x7ff); } /* The VLEUIMML field in an I16L form instruction. This is split. */ static unsigned long insert_vleil (unsigned long insn, long value, ppc_cpu_t dialect ATTRIBUTE_UNUSED, const char **errmsg ATTRIBUTE_UNUSED) { return insn | ((value & 0xf800) << 5) | (value & 0x7ff); } static long extract_vleil (unsigned long insn, ppc_cpu_t dialect ATTRIBUTE_UNUSED, int *invalid ATTRIBUTE_UNUSED) { return ((insn >> 5) & 0xf800) | (insn & 0x7ff); } /* Macros used to form opcodes. */ /* The main opcode. */ #define OP(x) ((((unsigned long)(x)) & 0x3f) << 26) #define OP_MASK OP (0x3f) /* The main opcode combined with a trap code in the TO field of a D form instruction. Used for extended mnemonics for the trap instructions. */ #define OPTO(x,to) (OP (x) | ((((unsigned long)(to)) & 0x1f) << 21)) #define OPTO_MASK (OP_MASK | TO_MASK) /* The main opcode combined with a comparison size bit in the L field of a D form or X form instruction. Used for extended mnemonics for the comparison instructions. */ #define OPL(x,l) (OP (x) | ((((unsigned long)(l)) & 1) << 21)) #define OPL_MASK OPL (0x3f,1) /* The main opcode combined with an update code in D form instruction. Used for extended mnemonics for VLE memory instructions. */ #define OPVUP(x,vup) (OP (x) | ((((unsigned long)(vup)) & 0xff) << 8)) #define OPVUP_MASK OPVUP (0x3f, 0xff) /* The main opcode combined with an update code and the RT fields specified in D form instruction. Used for VLE volatile context save/restore instructions. */ #define OPVUPRT(x,vup,rt) (OPVUP (x, vup) | ((((unsigned long)(rt)) & 0x1f) << 21)) #define OPVUPRT_MASK OPVUPRT (0x3f, 0xff, 0x1f) /* An A form instruction. */ #define A(op, xop, rc) (OP (op) | ((((unsigned long)(xop)) & 0x1f) << 1) | (((unsigned long)(rc)) & 1)) #define A_MASK A (0x3f, 0x1f, 1) /* An A_MASK with the FRB field fixed. */ #define AFRB_MASK (A_MASK | FRB_MASK) /* An A_MASK with the FRC field fixed. */ #define AFRC_MASK (A_MASK | FRC_MASK) /* An A_MASK with the FRA and FRC fields fixed. */ #define AFRAFRC_MASK (A_MASK | FRA_MASK | FRC_MASK) /* An AFRAFRC_MASK, but with L bit clear. */ #define AFRALFRC_MASK (AFRAFRC_MASK & ~((unsigned long) 1 << 16)) /* A B form instruction. */ #define B(op, aa, lk) (OP (op) | ((((unsigned long)(aa)) & 1) << 1) | ((lk) & 1)) #define B_MASK B (0x3f, 1, 1) /* A BD8 form instruction. This is a 16-bit instruction. */ #define BD8(op, aa, lk) (((((unsigned long)(op)) & 0x3f) << 10) | (((aa) & 1) << 9) | (((lk) & 1) << 8)) #define BD8_MASK BD8 (0x3f, 1, 1) /* Another BD8 form instruction. This is a 16-bit instruction. */ #define BD8IO(op) ((((unsigned long)(op)) & 0x1f) << 11) #define BD8IO_MASK BD8IO (0x1f) /* A BD8 form instruction for simplified mnemonics. */ #define EBD8IO(op, bo, bi) (BD8IO ((op)) | ((bo) << 10) | ((bi) << 8)) /* A mask that excludes BO32 and BI32. */ #define EBD8IO1_MASK 0xf800 /* A mask that includes BO32 and excludes BI32. */ #define EBD8IO2_MASK 0xfc00 /* A mask that include BO32 AND BI32. */ #define EBD8IO3_MASK 0xff00 /* A BD15 form instruction. */ #define BD15(op, aa, lk) (OP (op) | ((((unsigned long)(aa)) & 0xf) << 22) | ((lk) & 1)) #define BD15_MASK BD15 (0x3f, 0xf, 1) /* A BD15 form instruction for extended conditional branch mnemonics. */ #define EBD15(op, aa, bo, lk) (((op) & 0x3f) << 26) | (((aa) & 0xf) << 22) | (((bo) & 0x3) << 20) | ((lk) & 1) #define EBD15_MASK 0xfff00001 /* A BD15 form instruction for extended conditional branch mnemonics with BI. */ #define EBD15BI(op, aa, bo, bi, lk) (((op) & 0x3f) << 26) \ | (((aa) & 0xf) << 22) \ | (((bo) & 0x3) << 20) \ | (((bi) & 0x3) << 16) \ | ((lk) & 1) #define EBD15BI_MASK 0xfff30001 /* A BD24 form instruction. */ #define BD24(op, aa, lk) (OP (op) | ((((unsigned long)(aa)) & 1) << 25) | ((lk) & 1)) #define BD24_MASK BD24 (0x3f, 1, 1) /* A B form instruction setting the BO field. */ #define BBO(op, bo, aa, lk) (B ((op), (aa), (lk)) | ((((unsigned long)(bo)) & 0x1f) << 21)) #define BBO_MASK BBO (0x3f, 0x1f, 1, 1) /* A BBO_MASK with the y bit of the BO field removed. This permits matching a conditional branch regardless of the setting of the y bit. Similarly for the 'at' bits used for power4 branch hints. */ #define Y_MASK (((unsigned long) 1) << 21) #define AT1_MASK (((unsigned long) 3) << 21) #define AT2_MASK (((unsigned long) 9) << 21) #define BBOY_MASK (BBO_MASK &~ Y_MASK) #define BBOAT_MASK (BBO_MASK &~ AT1_MASK) /* A B form instruction setting the BO field and the condition bits of the BI field. */ #define BBOCB(op, bo, cb, aa, lk) \ (BBO ((op), (bo), (aa), (lk)) | ((((unsigned long)(cb)) & 0x3) << 16)) #define BBOCB_MASK BBOCB (0x3f, 0x1f, 0x3, 1, 1) /* A BBOCB_MASK with the y bit of the BO field removed. */ #define BBOYCB_MASK (BBOCB_MASK &~ Y_MASK) #define BBOATCB_MASK (BBOCB_MASK &~ AT1_MASK) #define BBOAT2CB_MASK (BBOCB_MASK &~ AT2_MASK) /* A BBOYCB_MASK in which the BI field is fixed. */ #define BBOYBI_MASK (BBOYCB_MASK | BI_MASK) #define BBOATBI_MASK (BBOAT2CB_MASK | BI_MASK) /* A VLE C form instruction. */ #define C_LK(x, lk) (((((unsigned long)(x)) & 0x7fff) << 1) | ((lk) & 1)) #define C_LK_MASK C_LK(0x7fff, 1) #define C(x) ((((unsigned long)(x)) & 0xffff)) #define C_MASK C(0xffff) /* An Context form instruction. */ #define CTX(op, xop) (OP (op) | (((unsigned long)(xop)) & 0x7)) #define CTX_MASK CTX(0x3f, 0x7) /* A User Context form instruction. */ #define UCTX(op, xop) (OP (op) | (((unsigned long)(xop)) & 0x1f)) #define UCTX_MASK UCTX(0x3f, 0x1f) /* The main opcode mask with the RA field clear. */ #define DRA_MASK (OP_MASK | RA_MASK) /* A DQ form VSX instruction. */ #define DQX(op, xop) (OP (op) | ((xop) & 0x7)) #define DQX_MASK DQX (0x3f, 7) /* A DS form instruction. */ #define DSO(op, xop) (OP (op) | ((xop) & 0x3)) #define DS_MASK DSO (0x3f, 3) /* An DX form instruction. */ #define DX(op, xop) (OP (op) | ((((unsigned long)(xop)) & 0x1f) << 1)) #define DX_MASK DX (0x3f, 0x1f) /* An EVSEL form instruction. */ #define EVSEL(op, xop) (OP (op) | (((unsigned long)(xop)) & 0xff) << 3) #define EVSEL_MASK EVSEL(0x3f, 0xff) /* An IA16 form instruction. */ #define IA16(op, xop) (OP (op) | (((unsigned long)(xop)) & 0x1f) << 11) #define IA16_MASK IA16(0x3f, 0x1f) /* An I16A form instruction. */ #define I16A(op, xop) (OP (op) | (((unsigned long)(xop)) & 0x1f) << 11) #define I16A_MASK I16A(0x3f, 0x1f) /* An I16L form instruction. */ #define I16L(op, xop) (OP (op) | (((unsigned long)(xop)) & 0x1f) << 11) #define I16L_MASK I16L(0x3f, 0x1f) /* An IM7 form instruction. */ #define IM7(op) ((((unsigned long)(op)) & 0x1f) << 11) #define IM7_MASK IM7(0x1f) /* An M form instruction. */ #define M(op, rc) (OP (op) | ((rc) & 1)) #define M_MASK M (0x3f, 1) /* An LI20 form instruction. */ #define LI20(op, xop) (OP (op) | (((unsigned long)(xop)) & 0x1) << 15) #define LI20_MASK LI20(0x3f, 0x1) /* An M form instruction with the ME field specified. */ #define MME(op, me, rc) (M ((op), (rc)) | ((((unsigned long)(me)) & 0x1f) << 1)) /* An M_MASK with the MB and ME fields fixed. */ #define MMBME_MASK (M_MASK | MB_MASK | ME_MASK) /* An M_MASK with the SH and ME fields fixed. */ #define MSHME_MASK (M_MASK | SH_MASK | ME_MASK) /* An MD form instruction. */ #define MD(op, xop, rc) (OP (op) | ((((unsigned long)(xop)) & 0x7) << 2) | ((rc) & 1)) #define MD_MASK MD (0x3f, 0x7, 1) /* An MD_MASK with the MB field fixed. */ #define MDMB_MASK (MD_MASK | MB6_MASK) /* An MD_MASK with the SH field fixed. */ #define MDSH_MASK (MD_MASK | SH6_MASK) /* An MDS form instruction. */ #define MDS(op, xop, rc) (OP (op) | ((((unsigned long)(xop)) & 0xf) << 1) | ((rc) & 1)) #define MDS_MASK MDS (0x3f, 0xf, 1) /* An MDS_MASK with the MB field fixed. */ #define MDSMB_MASK (MDS_MASK | MB6_MASK) /* An SC form instruction. */ #define SC(op, sa, lk) (OP (op) | ((((unsigned long)(sa)) & 1) << 1) | ((lk) & 1)) #define SC_MASK (OP_MASK | (((unsigned long)0x3ff) << 16) | (((unsigned long)1) << 1) | 1) /* An SCI8 form instruction. */ #define SCI8(op, xop) (OP (op) | ((((unsigned long)(xop)) & 0x1f) << 11)) #define SCI8_MASK SCI8(0x3f, 0x1f) /* An SCI8 form instruction. */ #define SCI8BF(op, fop, xop) (OP (op) | ((((unsigned long)(xop)) & 0x1f) << 11) | (((fop) & 7) << 23)) #define SCI8BF_MASK SCI8BF(0x3f, 7, 0x1f) /* An SD4 form instruction. This is a 16-bit instruction. */ #define SD4(op) ((((unsigned long)(op)) & 0xf) << 12) #define SD4_MASK SD4(0xf) /* An SE_IM5 form instruction. This is a 16-bit instruction. */ #define SE_IM5(op, xop) (((((unsigned long)(op)) & 0x3f) << 10) | (((xop) & 0x1) << 9)) #define SE_IM5_MASK SE_IM5(0x3f, 1) /* An SE_R form instruction. This is a 16-bit instruction. */ #define SE_R(op, xop) (((((unsigned long)(op)) & 0x3f) << 10) | (((xop) & 0x3f) << 4)) #define SE_R_MASK SE_R(0x3f, 0x3f) /* An SE_RR form instruction. This is a 16-bit instruction. */ #define SE_RR(op, xop) (((((unsigned long)(op)) & 0x3f) << 10) | (((xop) & 0x3) << 8)) #define SE_RR_MASK SE_RR(0x3f, 3) /* A VX form instruction. */ #define VX(op, xop) (OP (op) | (((unsigned long)(xop)) & 0x7ff)) /* The mask for an VX form instruction. */ #define VX_MASK VX(0x3f, 0x7ff) /* A VX_MASK with the VA field fixed. */ #define VXVA_MASK (VX_MASK | (0x1f << 16)) /* A VX_MASK with the VB field fixed. */ #define VXVB_MASK (VX_MASK | (0x1f << 11)) /* A VX_MASK with the VA and VB fields fixed. */ #define VXVAVB_MASK (VX_MASK | (0x1f << 16) | (0x1f << 11)) /* A VX_MASK with the VD and VA fields fixed. */ #define VXVDVA_MASK (VX_MASK | (0x1f << 21) | (0x1f << 16)) /* A VX_MASK with a UIMM4 field. */ #define VXUIMM4_MASK (VX_MASK | (0x1 << 20)) /* A VX_MASK with a UIMM3 field. */ #define VXUIMM3_MASK (VX_MASK | (0x3 << 19)) /* A VX_MASK with a UIMM2 field. */ #define VXUIMM2_MASK (VX_MASK | (0x7 << 18)) /* A VX_MASK with a PS field. */ #define VXPS_MASK (VX_MASK & ~(0x1 << 9)) /* A VX_MASK with the VA field fixed with a PS field. */ #define VXVAPS_MASK ((VX_MASK | (0x1f << 16)) & ~(0x1 << 9)) /* A VA form instruction. */ #define VXA(op, xop) (OP (op) | (((unsigned long)(xop)) & 0x03f)) /* The mask for an VA form instruction. */ #define VXA_MASK VXA(0x3f, 0x3f) /* A VXA_MASK with a SHB field. */ #define VXASHB_MASK (VXA_MASK | (1 << 10)) /* A VXR form instruction. */ #define VXR(op, xop, rc) (OP (op) | (((rc) & 1) << 10) | (((unsigned long)(xop)) & 0x3ff)) /* The mask for a VXR form instruction. */ #define VXR_MASK VXR(0x3f, 0x3ff, 1) /* A VX form instruction with a VA tertiary opcode. */ #define VXVA(op, xop, vaop) (VX(op,xop) | (((vaop) & 0x1f) << 16)) #define VXASH(op, xop) (OP (op) | ((((unsigned long)(xop)) & 0x1f) << 1)) #define VXASH_MASK VXASH (0x3f, 0x1f) /* An X form instruction. */ #define X(op, xop) (OP (op) | ((((unsigned long)(xop)) & 0x3ff) << 1)) /* A X form instruction for Quad-Precision FP Instructions. */ #define XVA(op, xop, vaop) (X(op,xop) | (((vaop) & 0x1f) << 16)) /* An EX form instruction. */ #define EX(op, xop) (OP (op) | (((unsigned long)(xop)) & 0x7ff)) /* The mask for an EX form instruction. */ #define EX_MASK EX (0x3f, 0x7ff) /* An XX2 form instruction. */ #define XX2(op, xop) (OP (op) | ((((unsigned long)(xop)) & 0x1ff) << 2)) /* A XX2 form instruction with the VA bits specified. */ #define XX2VA(op, xop, vaop) (XX2(op,xop) | (((vaop) & 0x1f) << 16)) /* An XX3 form instruction. */ #define XX3(op, xop) (OP (op) | ((((unsigned long)(xop)) & 0xff) << 3)) /* An XX3 form instruction with the RC bit specified. */ #define XX3RC(op, xop, rc) (OP (op) | (((rc) & 1) << 10) | ((((unsigned long)(xop)) & 0x7f) << 3)) /* An XX4 form instruction. */ #define XX4(op, xop) (OP (op) | ((((unsigned long)(xop)) & 0x3) << 4)) /* A Z form instruction. */ #define Z(op, xop) (OP (op) | ((((unsigned long)(xop)) & 0x1ff) << 1)) /* An X form instruction with the RC bit specified. */ #define XRC(op, xop, rc) (X ((op), (xop)) | ((rc) & 1)) /* A X form instruction for Quad-Precision FP Instructions with RC bit. */ #define XVARC(op, xop, vaop, rc) (XVA ((op), (xop), (vaop)) | ((rc) & 1)) /* An X form instruction with the RA bits specified as two ops. */ #define XMMF(op, xop, mop0, mop1) (X ((op), (xop)) | ((mop0) & 3) << 19 | ((mop1) & 7) << 16) /* A Z form instruction with the RC bit specified. */ #define ZRC(op, xop, rc) (Z ((op), (xop)) | ((rc) & 1)) /* The mask for an X form instruction. */ #define X_MASK XRC (0x3f, 0x3ff, 1) /* The mask for an X form instruction with the BF bits specified. */ #define XBF_MASK (X_MASK | (3 << 21)) /* An X form wait instruction with everything filled in except the WC field. */ #define XWC_MASK (XRC (0x3f, 0x3ff, 1) | (7 << 23) | RA_MASK | RB_MASK) /* The mask for an XX1 form instruction. */ #define XX1_MASK X (0x3f, 0x3ff) /* An XX1_MASK with the RB field fixed. */ #define XX1RB_MASK (XX1_MASK | RB_MASK) /* The mask for an XX2 form instruction. */ #define XX2_MASK (XX2 (0x3f, 0x1ff) | (0x1f << 16)) /* The mask for an XX2 form instruction with the UIM bits specified. */ #define XX2UIM_MASK (XX2 (0x3f, 0x1ff) | (7 << 18)) /* The mask for an XX2 form instruction with the 4 UIM bits specified. */ #define XX2UIM4_MASK (XX2 (0x3f, 0x1ff) | (1 << 20)) /* The mask for an XX2 form instruction with the BF bits specified. */ #define XX2BF_MASK (XX2_MASK | (3 << 21) | (1)) /* The mask for an XX2 form instruction with the BF and DCMX bits specified. */ #define XX2BFD_MASK (XX2 (0x3f, 0x1ff) | 1) /* The mask for an XX2 form instruction with a split DCMX bits specified. */ #define XX2DCMXS_MASK XX2 (0x3f, 0x1ee) /* The mask for an XX3 form instruction. */ #define XX3_MASK XX3 (0x3f, 0xff) /* The mask for an XX3 form instruction with the BF bits specified. */ #define XX3BF_MASK (XX3 (0x3f, 0xff) | (3 << 21) | (1)) /* The mask for an XX3 form instruction with the DM or SHW bits specified. */ #define XX3DM_MASK (XX3 (0x3f, 0x1f) | (1 << 10)) #define XX3SHW_MASK XX3DM_MASK /* The mask for an XX4 form instruction. */ #define XX4_MASK XX4 (0x3f, 0x3) /* An X form wait instruction with everything filled in except the WC field. */ #define XWC_MASK (XRC (0x3f, 0x3ff, 1) | (7 << 23) | RA_MASK | RB_MASK) /* The mask for an XMMF form instruction. */ #define XMMF_MASK (XMMF (0x3f, 0x3ff, 3, 7) | (1)) /* The mask for a Z form instruction. */ #define Z_MASK ZRC (0x3f, 0x1ff, 1) #define Z2_MASK ZRC (0x3f, 0xff, 1) /* An X_MASK with the RA/VA field fixed. */ #define XRA_MASK (X_MASK | RA_MASK) #define XVA_MASK XRA_MASK /* An XRA_MASK with the A_L/W field clear. */ #define XWRA_MASK (XRA_MASK & ~((unsigned long) 1 << 16)) #define XRLA_MASK XWRA_MASK /* An X_MASK with the RB field fixed. */ #define XRB_MASK (X_MASK | RB_MASK) /* An X_MASK with the RT field fixed. */ #define XRT_MASK (X_MASK | RT_MASK) /* An XRT_MASK mask with the L bits clear. */ #define XLRT_MASK (XRT_MASK & ~((unsigned long) 0x3 << 21)) /* An X_MASK with the RA and RB fields fixed. */ #define XRARB_MASK (X_MASK | RA_MASK | RB_MASK) /* An XBF_MASK with the RA and RB fields fixed. */ #define XBFRARB_MASK (XBF_MASK | RA_MASK | RB_MASK) /* An XRARB_MASK, but with the L bit clear. */ #define XRLARB_MASK (XRARB_MASK & ~((unsigned long) 1 << 16)) /* An XRARB_MASK, but with the L bits in a darn instruction clear. */ #define XLRAND_MASK (XRARB_MASK & ~((unsigned long) 3 << 16)) /* An X_MASK with the RT and RA fields fixed. */ #define XRTRA_MASK (X_MASK | RT_MASK | RA_MASK) /* An X_MASK with the RT and RB fields fixed. */ #define XRTRB_MASK (X_MASK | RT_MASK | RB_MASK) /* An XRTRA_MASK, but with L bit clear. */ #define XRTLRA_MASK (XRTRA_MASK & ~((unsigned long) 1 << 21)) /* An X_MASK with the RT, RA and RB fields fixed. */ #define XRTRARB_MASK (X_MASK | RT_MASK | RA_MASK | RB_MASK) /* An XRTRARB_MASK, but with L bit clear. */ #define XRTLRARB_MASK (XRTRARB_MASK & ~((unsigned long) 1 << 21)) /* An XRTRARB_MASK, but with A bit clear. */ #define XRTARARB_MASK (XRTRARB_MASK & ~((unsigned long) 1 << 25)) /* An XRTRARB_MASK, but with BF bits clear. */ #define XRTBFRARB_MASK (XRTRARB_MASK & ~((unsigned long) 7 << 23)) /* An X form instruction with the L bit specified. */ #define XOPL(op, xop, l) (X ((op), (xop)) | ((((unsigned long)(l)) & 1) << 21)) /* An X form instruction with the L bits specified. */ #define XOPL2(op, xop, l) (X ((op), (xop)) | ((((unsigned long)(l)) & 3) << 21)) /* An X form instruction with the L bit and RC bit specified. */ #define XRCL(op, xop, l, rc) (XRC ((op), (xop), (rc)) | ((((unsigned long)(l)) & 1) << 21)) /* An X form instruction with RT fields specified */ #define XRT(op, xop, rt) (X ((op), (xop)) \ | ((((unsigned long)(rt)) & 0x1f) << 21)) /* An X form instruction with RT and RA fields specified */ #define XRTRA(op, xop, rt, ra) (X ((op), (xop)) \ | ((((unsigned long)(rt)) & 0x1f) << 21) \ | ((((unsigned long)(ra)) & 0x1f) << 16)) /* The mask for an X form comparison instruction. */ #define XCMP_MASK (X_MASK | (((unsigned long)1) << 22)) /* The mask for an X form comparison instruction with the L field fixed. */ #define XCMPL_MASK (XCMP_MASK | (((unsigned long)1) << 21)) /* An X form trap instruction with the TO field specified. */ #define XTO(op, xop, to) (X ((op), (xop)) | ((((unsigned long)(to)) & 0x1f) << 21)) #define XTO_MASK (X_MASK | TO_MASK) /* An X form tlb instruction with the SH field specified. */ #define XTLB(op, xop, sh) (X ((op), (xop)) | ((((unsigned long)(sh)) & 0x1f) << 11)) #define XTLB_MASK (X_MASK | SH_MASK) /* An X form sync instruction. */ #define XSYNC(op, xop, l) (X ((op), (xop)) | ((((unsigned long)(l)) & 3) << 21)) /* An X form sync instruction with everything filled in except the LS field. */ #define XSYNC_MASK (0xff9fffff) /* An X form sync instruction with everything filled in except the L and E fields. */ #define XSYNCLE_MASK (0xff90ffff) /* An X_MASK, but with the EH bit clear. */ #define XEH_MASK (X_MASK & ~((unsigned long )1)) /* An X form AltiVec dss instruction. */ #define XDSS(op, xop, a) (X ((op), (xop)) | ((((unsigned long)(a)) & 1) << 25)) #define XDSS_MASK XDSS(0x3f, 0x3ff, 1) /* An XFL form instruction. */ #define XFL(op, xop, rc) (OP (op) | ((((unsigned long)(xop)) & 0x3ff) << 1) | (((unsigned long)(rc)) & 1)) #define XFL_MASK XFL (0x3f, 0x3ff, 1) /* An X form isel instruction. */ #define XISEL(op, xop) (OP (op) | ((((unsigned long)(xop)) & 0x1f) << 1)) #define XISEL_MASK XISEL(0x3f, 0x1f) /* An XL form instruction with the LK field set to 0. */ #define XL(op, xop) (OP (op) | ((((unsigned long)(xop)) & 0x3ff) << 1)) /* An XL form instruction which uses the LK field. */ #define XLLK(op, xop, lk) (XL ((op), (xop)) | ((lk) & 1)) /* The mask for an XL form instruction. */ #define XL_MASK XLLK (0x3f, 0x3ff, 1) /* An XL_MASK with the RT, RA and RB fields fixed, but S bit clear. */ #define XLS_MASK ((XL_MASK | RT_MASK | RA_MASK | RB_MASK) & ~(1 << 11)) /* An XL form instruction which explicitly sets the BO field. */ #define XLO(op, bo, xop, lk) \ (XLLK ((op), (xop), (lk)) | ((((unsigned long)(bo)) & 0x1f) << 21)) #define XLO_MASK (XL_MASK | BO_MASK) /* An XL form instruction which explicitly sets the y bit of the BO field. */ #define XLYLK(op, xop, y, lk) (XLLK ((op), (xop), (lk)) | ((((unsigned long)(y)) & 1) << 21)) #define XLYLK_MASK (XL_MASK | Y_MASK) /* An XL form instruction which sets the BO field and the condition bits of the BI field. */ #define XLOCB(op, bo, cb, xop, lk) \ (XLO ((op), (bo), (xop), (lk)) | ((((unsigned long)(cb)) & 3) << 16)) #define XLOCB_MASK XLOCB (0x3f, 0x1f, 0x3, 0x3ff, 1) /* An XL_MASK or XLYLK_MASK or XLOCB_MASK with the BB field fixed. */ #define XLBB_MASK (XL_MASK | BB_MASK) #define XLYBB_MASK (XLYLK_MASK | BB_MASK) #define XLBOCBBB_MASK (XLOCB_MASK | BB_MASK) /* A mask for branch instructions using the BH field. */ #define XLBH_MASK (XL_MASK | (0x1c << 11)) /* An XL_MASK with the BO and BB fields fixed. */ #define XLBOBB_MASK (XL_MASK | BO_MASK | BB_MASK) /* An XL_MASK with the BO, BI and BB fields fixed. */ #define XLBOBIBB_MASK (XL_MASK | BO_MASK | BI_MASK | BB_MASK) /* An X form mbar instruction with MO field. */ #define XMBAR(op, xop, mo) (X ((op), (xop)) | ((((unsigned long)(mo)) & 1) << 21)) /* An XO form instruction. */ #define XO(op, xop, oe, rc) \ (OP (op) | ((((unsigned long)(xop)) & 0x1ff) << 1) | ((((unsigned long)(oe)) & 1) << 10) | (((unsigned long)(rc)) & 1)) #define XO_MASK XO (0x3f, 0x1ff, 1, 1) /* An XO_MASK with the RB field fixed. */ #define XORB_MASK (XO_MASK | RB_MASK) /* An XOPS form instruction for paired singles. */ #define XOPS(op, xop, rc) \ (OP (op) | ((((unsigned long)(xop)) & 0x3ff) << 1) | (((unsigned long)(rc)) & 1)) #define XOPS_MASK XOPS (0x3f, 0x3ff, 1) /* An XS form instruction. */ #define XS(op, xop, rc) (OP (op) | ((((unsigned long)(xop)) & 0x1ff) << 2) | (((unsigned long)(rc)) & 1)) #define XS_MASK XS (0x3f, 0x1ff, 1) /* A mask for the FXM version of an XFX form instruction. */ #define XFXFXM_MASK (X_MASK | (1 << 11) | (1 << 20)) /* An XFX form instruction with the FXM field filled in. */ #define XFXM(op, xop, fxm, p4) \ (X ((op), (xop)) | ((((unsigned long)(fxm)) & 0xff) << 12) \ | ((unsigned long)(p4) << 20)) /* An XFX form instruction with the SPR field filled in. */ #define XSPR(op, xop, spr) \ (X ((op), (xop)) | ((((unsigned long)(spr)) & 0x1f) << 16) | ((((unsigned long)(spr)) & 0x3e0) << 6)) #define XSPR_MASK (X_MASK | SPR_MASK) /* An XFX form instruction with the SPR field filled in except for the SPRBAT field. */ #define XSPRBAT_MASK (XSPR_MASK &~ SPRBAT_MASK) /* An XFX form instruction with the SPR field filled in except for the SPRG field. */ #define XSPRG_MASK (XSPR_MASK & ~(0x1f << 16)) /* An X form instruction with everything filled in except the E field. */ #define XE_MASK (0xffff7fff) /* An X form user context instruction. */ #define XUC(op, xop) (OP (op) | (((unsigned long)(xop)) & 0x1f)) #define XUC_MASK XUC(0x3f, 0x1f) /* An XW form instruction. */ #define XW(op, xop, rc) (OP (op) | ((((unsigned long)(xop)) & 0x3f) << 1) | ((rc) & 1)) /* The mask for a G form instruction. rc not supported at present. */ #define XW_MASK XW (0x3f, 0x3f, 0) /* An APU form instruction. */ #define APU(op, xop, rc) (OP (op) | (((unsigned long)(xop)) & 0x3ff) << 1 | ((rc) & 1)) /* The mask for an APU form instruction. */ #define APU_MASK APU (0x3f, 0x3ff, 1) #define APU_RT_MASK (APU_MASK | RT_MASK) #define APU_RA_MASK (APU_MASK | RA_MASK) /* The BO encodings used in extended conditional branch mnemonics. */ #define BODNZF (0x0) #define BODNZFP (0x1) #define BODZF (0x2) #define BODZFP (0x3) #define BODNZT (0x8) #define BODNZTP (0x9) #define BODZT (0xa) #define BODZTP (0xb) #define BOF (0x4) #define BOFP (0x5) #define BOFM4 (0x6) #define BOFP4 (0x7) #define BOT (0xc) #define BOTP (0xd) #define BOTM4 (0xe) #define BOTP4 (0xf) #define BODNZ (0x10) #define BODNZP (0x11) #define BODZ (0x12) #define BODZP (0x13) #define BODNZM4 (0x18) #define BODNZP4 (0x19) #define BODZM4 (0x1a) #define BODZP4 (0x1b) #define BOU (0x14) /* The BO16 encodings used in extended VLE conditional branch mnemonics. */ #define BO16F (0x0) #define BO16T (0x1) /* The BO32 encodings used in extended VLE conditional branch mnemonics. */ #define BO32F (0x0) #define BO32T (0x1) #define BO32DNZ (0x2) #define BO32DZ (0x3) /* The BI condition bit encodings used in extended conditional branch mnemonics. */ #define CBLT (0) #define CBGT (1) #define CBEQ (2) #define CBSO (3) /* The TO encodings used in extended trap mnemonics. */ #define TOLGT (0x1) #define TOLLT (0x2) #define TOEQ (0x4) #define TOLGE (0x5) #define TOLNL (0x5) #define TOLLE (0x6) #define TOLNG (0x6) #define TOGT (0x8) #define TOGE (0xc) #define TONL (0xc) #define TOLT (0x10) #define TOLE (0x14) #define TONG (0x14) #define TONE (0x18) #define TOU (0x1f) /* Smaller names for the flags so each entry in the opcodes table will fit on a single line. */ #undef PPC #define PPC PPC_OPCODE_PPC #define PPCCOM PPC_OPCODE_PPC | PPC_OPCODE_COMMON #define POWER4 PPC_OPCODE_POWER4 #define POWER5 PPC_OPCODE_POWER5 #define POWER6 PPC_OPCODE_POWER6 #define POWER7 PPC_OPCODE_POWER7 #define POWER8 PPC_OPCODE_POWER8 #define POWER9 PPC_OPCODE_POWER9 #define CELL PPC_OPCODE_CELL #define PPC64 PPC_OPCODE_64 | PPC_OPCODE_64_BRIDGE #define NON32 (PPC_OPCODE_64 | PPC_OPCODE_POWER4 \ | PPC_OPCODE_EFS | PPC_OPCODE_E500MC | PPC_OPCODE_TITAN) #define PPC403 PPC_OPCODE_403 #define PPC405 PPC_OPCODE_405 #define PPC440 PPC_OPCODE_440 #define PPC464 PPC440 #define PPC476 PPC_OPCODE_476 #define PPC750 PPC_OPCODE_750 #define PPC7450 PPC_OPCODE_7450 #define PPC860 PPC_OPCODE_860 #define PPCPS PPC_OPCODE_PPCPS #define PPCVEC PPC_OPCODE_ALTIVEC #define PPCVEC2 PPC_OPCODE_ALTIVEC2 #define PPCVEC3 PPC_OPCODE_ALTIVEC2 #define PPCVSX PPC_OPCODE_VSX #define PPCVSX2 PPC_OPCODE_VSX #define PPCVSX3 PPC_OPCODE_VSX3 #define POWER PPC_OPCODE_POWER #define POWER2 PPC_OPCODE_POWER | PPC_OPCODE_POWER2 #define PWR2COM PPC_OPCODE_POWER | PPC_OPCODE_POWER2 | PPC_OPCODE_COMMON #define PPCPWR2 PPC_OPCODE_PPC | PPC_OPCODE_POWER | PPC_OPCODE_POWER2 | PPC_OPCODE_COMMON #define COM PPC_OPCODE_POWER | PPC_OPCODE_PPC | PPC_OPCODE_COMMON #define M601 PPC_OPCODE_POWER | PPC_OPCODE_601 #define PWRCOM PPC_OPCODE_POWER | PPC_OPCODE_601 | PPC_OPCODE_COMMON #define MFDEC1 PPC_OPCODE_POWER #define MFDEC2 PPC_OPCODE_PPC | PPC_OPCODE_601 | PPC_OPCODE_BOOKE | PPC_OPCODE_TITAN #define BOOKE PPC_OPCODE_BOOKE #define NO371 PPC_OPCODE_BOOKE | PPC_OPCODE_PPCPS | PPC_OPCODE_EFS #define PPCE300 PPC_OPCODE_E300 #define PPCSPE PPC_OPCODE_SPE #define PPCISEL PPC_OPCODE_ISEL #define PPCEFS PPC_OPCODE_EFS #define PPCBRLK PPC_OPCODE_BRLOCK #define PPCPMR PPC_OPCODE_PMR #define PPCTMR PPC_OPCODE_TMR #define PPCCHLK PPC_OPCODE_CACHELCK #define PPCRFMCI PPC_OPCODE_RFMCI #define E500MC PPC_OPCODE_E500MC #define PPCA2 PPC_OPCODE_A2 #define TITAN PPC_OPCODE_TITAN #define MULHW PPC_OPCODE_405 | PPC_OPCODE_440 | TITAN #define E500 PPC_OPCODE_E500 #define E6500 PPC_OPCODE_E6500 #define PPCVLE PPC_OPCODE_VLE #define PPCHTM PPC_OPCODE_HTM #define E200Z4 PPC_OPCODE_E200Z4 /* The list of embedded processors that use the embedded operand ordering for the 3 operand dcbt and dcbtst instructions. */ #define DCBT_EO (PPC_OPCODE_E500 | PPC_OPCODE_E500MC | PPC_OPCODE_476 \ | PPC_OPCODE_A2) /* The opcode table. The format of the opcode table is: NAME OPCODE MASK FLAGS ANTI {OPERANDS} NAME is the name of the instruction. OPCODE is the instruction opcode. MASK is the opcode mask; this is used to tell the disassembler which bits in the actual opcode must match OPCODE. FLAGS are flags indicating which processors support the instruction. ANTI indicates which processors don't support the instruction. OPERANDS is the list of operands. The disassembler reads the table in order and prints the first instruction which matches, so this table is sorted to put more specific instructions before more general instructions. This table must be sorted by major opcode. Please try to keep it vaguely sorted within major opcode too, except of course where constrained otherwise by disassembler operation. */ const struct powerpc_opcode powerpc_opcodes[] = { {"attn", X(0,256), X_MASK, POWER4|PPCA2, PPC476|PPCVLE, {0}}, {"tdlgti", OPTO(2,TOLGT), OPTO_MASK, PPC64, PPCVLE, {RA, SI}}, {"tdllti", OPTO(2,TOLLT), OPTO_MASK, PPC64, PPCVLE, {RA, SI}}, {"tdeqi", OPTO(2,TOEQ), OPTO_MASK, PPC64, PPCVLE, {RA, SI}}, {"tdlgei", OPTO(2,TOLGE), OPTO_MASK, PPC64, PPCVLE, {RA, SI}}, {"tdlnli", OPTO(2,TOLNL), OPTO_MASK, PPC64, PPCVLE, {RA, SI}}, {"tdllei", OPTO(2,TOLLE), OPTO_MASK, PPC64, PPCVLE, {RA, SI}}, {"tdlngi", OPTO(2,TOLNG), OPTO_MASK, PPC64, PPCVLE, {RA, SI}}, {"tdgti", OPTO(2,TOGT), OPTO_MASK, PPC64, PPCVLE, {RA, SI}}, {"tdgei", OPTO(2,TOGE), OPTO_MASK, PPC64, PPCVLE, {RA, SI}}, {"tdnli", OPTO(2,TONL), OPTO_MASK, PPC64, PPCVLE, {RA, SI}}, {"tdlti", OPTO(2,TOLT), OPTO_MASK, PPC64, PPCVLE, {RA, SI}}, {"tdlei", OPTO(2,TOLE), OPTO_MASK, PPC64, PPCVLE, {RA, SI}}, {"tdngi", OPTO(2,TONG), OPTO_MASK, PPC64, PPCVLE, {RA, SI}}, {"tdnei", OPTO(2,TONE), OPTO_MASK, PPC64, PPCVLE, {RA, SI}}, {"tdui", OPTO(2,TOU), OPTO_MASK, PPC64, PPCVLE, {RA, SI}}, {"tdi", OP(2), OP_MASK, PPC64, PPCVLE, {TO, RA, SI}}, {"twlgti", OPTO(3,TOLGT), OPTO_MASK, PPCCOM, PPCVLE, {RA, SI}}, {"tlgti", OPTO(3,TOLGT), OPTO_MASK, PWRCOM, PPCVLE, {RA, SI}}, {"twllti", OPTO(3,TOLLT), OPTO_MASK, PPCCOM, PPCVLE, {RA, SI}}, {"tllti", OPTO(3,TOLLT), OPTO_MASK, PWRCOM, PPCVLE, {RA, SI}}, {"tweqi", OPTO(3,TOEQ), OPTO_MASK, PPCCOM, PPCVLE, {RA, SI}}, {"teqi", OPTO(3,TOEQ), OPTO_MASK, PWRCOM, PPCVLE, {RA, SI}}, {"twlgei", OPTO(3,TOLGE), OPTO_MASK, PPCCOM, PPCVLE, {RA, SI}}, {"tlgei", OPTO(3,TOLGE), OPTO_MASK, PWRCOM, PPCVLE, {RA, SI}}, {"twlnli", OPTO(3,TOLNL), OPTO_MASK, PPCCOM, PPCVLE, {RA, SI}}, {"tlnli", OPTO(3,TOLNL), OPTO_MASK, PWRCOM, PPCVLE, {RA, SI}}, {"twllei", OPTO(3,TOLLE), OPTO_MASK, PPCCOM, PPCVLE, {RA, SI}}, {"tllei", OPTO(3,TOLLE), OPTO_MASK, PWRCOM, PPCVLE, {RA, SI}}, {"twlngi", OPTO(3,TOLNG), OPTO_MASK, PPCCOM, PPCVLE, {RA, SI}}, {"tlngi", OPTO(3,TOLNG), OPTO_MASK, PWRCOM, PPCVLE, {RA, SI}}, {"twgti", OPTO(3,TOGT), OPTO_MASK, PPCCOM, PPCVLE, {RA, SI}}, {"tgti", OPTO(3,TOGT), OPTO_MASK, PWRCOM, PPCVLE, {RA, SI}}, {"twgei", OPTO(3,TOGE), OPTO_MASK, PPCCOM, PPCVLE, {RA, SI}}, {"tgei", OPTO(3,TOGE), OPTO_MASK, PWRCOM, PPCVLE, {RA, SI}}, {"twnli", OPTO(3,TONL), OPTO_MASK, PPCCOM, PPCVLE, {RA, SI}}, {"tnli", OPTO(3,TONL), OPTO_MASK, PWRCOM, PPCVLE, {RA, SI}}, {"twlti", OPTO(3,TOLT), OPTO_MASK, PPCCOM, PPCVLE, {RA, SI}}, {"tlti", OPTO(3,TOLT), OPTO_MASK, PWRCOM, PPCVLE, {RA, SI}}, {"twlei", OPTO(3,TOLE), OPTO_MASK, PPCCOM, PPCVLE, {RA, SI}}, {"tlei", OPTO(3,TOLE), OPTO_MASK, PWRCOM, PPCVLE, {RA, SI}}, {"twngi", OPTO(3,TONG), OPTO_MASK, PPCCOM, PPCVLE, {RA, SI}}, {"tngi", OPTO(3,TONG), OPTO_MASK, PWRCOM, PPCVLE, {RA, SI}}, {"twnei", OPTO(3,TONE), OPTO_MASK, PPCCOM, PPCVLE, {RA, SI}}, {"tnei", OPTO(3,TONE), OPTO_MASK, PWRCOM, PPCVLE, {RA, SI}}, {"twui", OPTO(3,TOU), OPTO_MASK, PPCCOM, PPCVLE, {RA, SI}}, {"tui", OPTO(3,TOU), OPTO_MASK, PWRCOM, PPCVLE, {RA, SI}}, {"twi", OP(3), OP_MASK, PPCCOM, PPCVLE, {TO, RA, SI}}, {"ti", OP(3), OP_MASK, PWRCOM, PPCVLE, {TO, RA, SI}}, {"ps_cmpu0", X (4, 0), XBF_MASK, PPCPS, 0, {BF, FRA, FRB}}, {"vaddubm", VX (4, 0), VX_MASK, PPCVEC, 0, {VD, VA, VB}}, {"vmul10cuq", VX (4, 1), VXVB_MASK, PPCVEC3, 0, {VD, VA}}, {"vmaxub", VX (4, 2), VX_MASK, PPCVEC, 0, {VD, VA, VB}}, {"vrlb", VX (4, 4), VX_MASK, PPCVEC, 0, {VD, VA, VB}}, {"vcmpequb", VXR(4, 6,0), VXR_MASK, PPCVEC, 0, {VD, VA, VB}}, {"vcmpneb", VXR(4, 7,0), VXR_MASK, PPCVEC3, 0, {VD, VA, VB}}, {"vmuloub", VX (4, 8), VX_MASK, PPCVEC, 0, {VD, VA, VB}}, {"vaddfp", VX (4, 10), VX_MASK, PPCVEC, 0, {VD, VA, VB}}, {"psq_lx", XW (4, 6,0), XW_MASK, PPCPS, 0, {FRT,RA,RB,PSWM,PSQM}}, {"vmrghb", VX (4, 12), VX_MASK, PPCVEC, 0, {VD, VA, VB}}, {"psq_stx", XW (4, 7,0), XW_MASK, PPCPS, 0, {FRS,RA,RB,PSWM,PSQM}}, {"vpkuhum", VX (4, 14), VX_MASK, PPCVEC, 0, {VD, VA, VB}}, {"mulhhwu", XRC(4, 8,0), X_MASK, MULHW, 0, {RT, RA, RB}}, {"mulhhwu.", XRC(4, 8,1), X_MASK, MULHW, 0, {RT, RA, RB}}, {"ps_sum0", A (4, 10,0), A_MASK, PPCPS, 0, {FRT, FRA, FRC, FRB}}, {"ps_sum0.", A (4, 10,1), A_MASK, PPCPS, 0, {FRT, FRA, FRC, FRB}}, {"ps_sum1", A (4, 11,0), A_MASK, PPCPS, 0, {FRT, FRA, FRC, FRB}}, {"ps_sum1.", A (4, 11,1), A_MASK, PPCPS, 0, {FRT, FRA, FRC, FRB}}, {"ps_muls0", A (4, 12,0), AFRB_MASK, PPCPS, 0, {FRT, FRA, FRC}}, {"machhwu", XO (4, 12,0,0), XO_MASK, MULHW, 0, {RT, RA, RB}}, {"ps_muls0.", A (4, 12,1), AFRB_MASK, PPCPS, 0, {FRT, FRA, FRC}}, {"machhwu.", XO (4, 12,0,1), XO_MASK, MULHW, 0, {RT, RA, RB}}, {"ps_muls1", A (4, 13,0), AFRB_MASK, PPCPS, 0, {FRT, FRA, FRC}}, {"ps_muls1.", A (4, 13,1), AFRB_MASK, PPCPS, 0, {FRT, FRA, FRC}}, {"ps_madds0", A (4, 14,0), A_MASK, PPCPS, 0, {FRT, FRA, FRC, FRB}}, {"ps_madds0.", A (4, 14,1), A_MASK, PPCPS, 0, {FRT, FRA, FRC, FRB}}, {"ps_madds1", A (4, 15,0), A_MASK, PPCPS, 0, {FRT, FRA, FRC, FRB}}, {"ps_madds1.", A (4, 15,1), A_MASK, PPCPS, 0, {FRT, FRA, FRC, FRB}}, {"vmhaddshs", VXA(4, 32), VXA_MASK, PPCVEC, 0, {VD, VA, VB, VC}}, {"vmhraddshs", VXA(4, 33), VXA_MASK, PPCVEC, 0, {VD, VA, VB, VC}}, {"vmladduhm", VXA(4, 34), VXA_MASK, PPCVEC, 0, {VD, VA, VB, VC}}, {"vmsumudm", VXA(4, 35), VXA_MASK, PPCVEC3, 0, {VD, VA, VB, VC}}, {"ps_div", A (4, 18,0), AFRC_MASK, PPCPS, 0, {FRT, FRA, FRB}}, {"vmsumubm", VXA(4, 36), VXA_MASK, PPCVEC, 0, {VD, VA, VB, VC}}, {"ps_div.", A (4, 18,1), AFRC_MASK, PPCPS, 0, {FRT, FRA, FRB}}, {"vmsummbm", VXA(4, 37), VXA_MASK, PPCVEC, 0, {VD, VA, VB, VC}}, {"vmsumuhm", VXA(4, 38), VXA_MASK, PPCVEC, 0, {VD, VA, VB, VC}}, {"vmsumuhs", VXA(4, 39), VXA_MASK, PPCVEC, 0, {VD, VA, VB, VC}}, {"ps_sub", A (4, 20,0), AFRC_MASK, PPCPS, 0, {FRT, FRA, FRB}}, {"vmsumshm", VXA(4, 40), VXA_MASK, PPCVEC, 0, {VD, VA, VB, VC}}, {"ps_sub.", A (4, 20,1), AFRC_MASK, PPCPS, 0, {FRT, FRA, FRB}}, {"vmsumshs", VXA(4, 41), VXA_MASK, PPCVEC, 0, {VD, VA, VB, VC}}, {"ps_add", A (4, 21,0), AFRC_MASK, PPCPS, 0, {FRT, FRA, FRB}}, {"vsel", VXA(4, 42), VXA_MASK, PPCVEC, 0, {VD, VA, VB, VC}}, {"ps_add.", A (4, 21,1), AFRC_MASK, PPCPS, 0, {FRT, FRA, FRB}}, {"vperm", VXA(4, 43), VXA_MASK, PPCVEC, 0, {VD, VA, VB, VC}}, {"vsldoi", VXA(4, 44), VXASHB_MASK, PPCVEC, 0, {VD, VA, VB, SHB}}, {"vpermxor", VXA(4, 45), VXA_MASK, PPCVEC2, 0, {VD, VA, VB, VC}}, {"ps_sel", A (4, 23,0), A_MASK, PPCPS, 0, {FRT, FRA, FRC, FRB}}, {"vmaddfp", VXA(4, 46), VXA_MASK, PPCVEC, 0, {VD, VA, VC, VB}}, {"ps_sel.", A (4, 23,1), A_MASK, PPCPS, 0, {FRT, FRA, FRC, FRB}}, {"vnmsubfp", VXA(4, 47), VXA_MASK, PPCVEC, 0, {VD, VA, VC, VB}}, {"ps_res", A (4, 24,0), AFRAFRC_MASK, PPCPS, 0, {FRT, FRB}}, {"maddhd", VXA(4, 48), VXA_MASK, POWER9, 0, {RT, RA, RB, RC}}, {"ps_res.", A (4, 24,1), AFRAFRC_MASK, PPCPS, 0, {FRT, FRB}}, {"maddhdu", VXA(4, 49), VXA_MASK, POWER9, 0, {RT, RA, RB, RC}}, {"ps_mul", A (4, 25,0), AFRB_MASK, PPCPS, 0, {FRT, FRA, FRC}}, {"ps_mul.", A (4, 25,1), AFRB_MASK, PPCPS, 0, {FRT, FRA, FRC}}, {"maddld", VXA(4, 51), VXA_MASK, POWER9, 0, {RT, RA, RB, RC}}, {"ps_rsqrte", A (4, 26,0), AFRAFRC_MASK, PPCPS, 0, {FRT, FRB}}, {"ps_rsqrte.", A (4, 26,1), AFRAFRC_MASK, PPCPS, 0, {FRT, FRB}}, {"ps_msub", A (4, 28,0), A_MASK, PPCPS, 0, {FRT, FRA, FRC, FRB}}, {"ps_msub.", A (4, 28,1), A_MASK, PPCPS, 0, {FRT, FRA, FRC, FRB}}, {"ps_madd", A (4, 29,0), A_MASK, PPCPS, 0, {FRT, FRA, FRC, FRB}}, {"ps_madd.", A (4, 29,1), A_MASK, PPCPS, 0, {FRT, FRA, FRC, FRB}}, {"vpermr", VXA(4, 59), VXA_MASK, PPCVEC3, 0, {VD, VA, VB, VC}}, {"ps_nmsub", A (4, 30,0), A_MASK, PPCPS, 0, {FRT, FRA, FRC, FRB}}, {"vaddeuqm", VXA(4, 60), VXA_MASK, PPCVEC2, 0, {VD, VA, VB, VC}}, {"ps_nmsub.", A (4, 30,1), A_MASK, PPCPS, 0, {FRT, FRA, FRC, FRB}}, {"vaddecuq", VXA(4, 61), VXA_MASK, PPCVEC2, 0, {VD, VA, VB, VC}}, {"ps_nmadd", A (4, 31,0), A_MASK, PPCPS, 0, {FRT, FRA, FRC, FRB}}, {"vsubeuqm", VXA(4, 62), VXA_MASK, PPCVEC2, 0, {VD, VA, VB, VC}}, {"ps_nmadd.", A (4, 31,1), A_MASK, PPCPS, 0, {FRT, FRA, FRC, FRB}}, {"vsubecuq", VXA(4, 63), VXA_MASK, PPCVEC2, 0, {VD, VA, VB, VC}}, {"ps_cmpo0", X (4, 32), XBF_MASK, PPCPS, 0, {BF, FRA, FRB}}, {"vadduhm", VX (4, 64), VX_MASK, PPCVEC, 0, {VD, VA, VB}}, {"vmul10ecuq", VX (4, 65), VX_MASK, PPCVEC3, 0, {VD, VA, VB}}, {"vmaxuh", VX (4, 66), VX_MASK, PPCVEC, 0, {VD, VA, VB}}, {"vrlh", VX (4, 68), VX_MASK, PPCVEC, 0, {VD, VA, VB}}, {"vcmpequh", VXR(4, 70,0), VXR_MASK, PPCVEC, 0, {VD, VA, VB}}, {"vcmpneh", VXR(4, 71,0), VXR_MASK, PPCVEC3, 0, {VD, VA, VB}}, {"vmulouh", VX (4, 72), VX_MASK, PPCVEC, 0, {VD, VA, VB}}, {"vsubfp", VX (4, 74), VX_MASK, PPCVEC, 0, {VD, VA, VB}}, {"psq_lux", XW (4, 38,0), XW_MASK, PPCPS, 0, {FRT,RA,RB,PSWM,PSQM}}, {"vmrghh", VX (4, 76), VX_MASK, PPCVEC, 0, {VD, VA, VB}}, {"psq_stux", XW (4, 39,0), XW_MASK, PPCPS, 0, {FRS,RA,RB,PSWM,PSQM}}, {"vpkuwum", VX (4, 78), VX_MASK, PPCVEC, 0, {VD, VA, VB}}, {"ps_neg", XRC(4, 40,0), XRA_MASK, PPCPS, 0, {FRT, FRB}}, {"mulhhw", XRC(4, 40,0), X_MASK, MULHW, 0, {RT, RA, RB}}, {"ps_neg.", XRC(4, 40,1), XRA_MASK, PPCPS, 0, {FRT, FRB}}, {"mulhhw.", XRC(4, 40,1), X_MASK, MULHW, 0, {RT, RA, RB}}, {"machhw", XO (4, 44,0,0), XO_MASK, MULHW, 0, {RT, RA, RB}}, {"machhw.", XO (4, 44,0,1), XO_MASK, MULHW, 0, {RT, RA, RB}}, {"nmachhw", XO (4, 46,0,0), XO_MASK, MULHW, 0, {RT, RA, RB}}, {"nmachhw.", XO (4, 46,0,1), XO_MASK, MULHW, 0, {RT, RA, RB}}, {"ps_cmpu1", X (4, 64), XBF_MASK, PPCPS, 0, {BF, FRA, FRB}}, {"vadduwm", VX (4, 128), VX_MASK, PPCVEC, 0, {VD, VA, VB}}, {"vmaxuw", VX (4, 130), VX_MASK, PPCVEC, 0, {VD, VA, VB}}, {"vrlw", VX (4, 132), VX_MASK, PPCVEC, 0, {VD, VA, VB}}, {"vrlwmi", VX (4, 133), VX_MASK, PPCVEC3, 0, {VD, VA, VB}}, {"vcmpequw", VXR(4, 134,0), VXR_MASK, PPCVEC, 0, {VD, VA, VB}}, {"vcmpnew", VXR(4, 135,0), VXR_MASK, PPCVEC3, 0, {VD, VA, VB}}, {"vmulouw", VX (4, 136), VX_MASK, PPCVEC2, 0, {VD, VA, VB}}, {"vmuluwm", VX (4, 137), VX_MASK, PPCVEC2, 0, {VD, VA, VB}}, {"vmrghw", VX (4, 140), VX_MASK, PPCVEC, 0, {VD, VA, VB}}, {"vpkuhus", VX (4, 142), VX_MASK, PPCVEC, 0, {VD, VA, VB}}, {"ps_mr", XRC(4, 72,0), XRA_MASK, PPCPS, 0, {FRT, FRB}}, {"ps_mr.", XRC(4, 72,1), XRA_MASK, PPCPS, 0, {FRT, FRB}}, {"machhwsu", XO (4, 76,0,0), XO_MASK, MULHW, 0, {RT, RA, RB}}, {"machhwsu.", XO (4, 76,0,1), XO_MASK, MULHW, 0, {RT, RA, RB}}, {"ps_cmpo1", X (4, 96), XBF_MASK, PPCPS, 0, {BF, FRA, FRB}}, {"vaddudm", VX (4, 192), VX_MASK, PPCVEC2, 0, {VD, VA, VB}}, {"vmaxud", VX (4, 194), VX_MASK, PPCVEC2, 0, {VD, VA, VB}}, {"vrld", VX (4, 196), VX_MASK, PPCVEC2, 0, {VD, VA, VB}}, {"vrldmi", VX (4, 197), VX_MASK, PPCVEC3, 0, {VD, VA, VB}}, {"vcmpeqfp", VXR(4, 198,0), VXR_MASK, PPCVEC, 0, {VD, VA, VB}}, {"vcmpequd", VXR(4, 199,0), VXR_MASK, PPCVEC2, 0, {VD, VA, VB}}, {"vpkuwus", VX (4, 206), VX_MASK, PPCVEC, 0, {VD, VA, VB}}, {"machhws", XO (4, 108,0,0), XO_MASK, MULHW, 0, {RT, RA, RB}}, {"machhws.", XO (4, 108,0,1), XO_MASK, MULHW, 0, {RT, RA, RB}}, {"nmachhws", XO (4, 110,0,0), XO_MASK, MULHW, 0, {RT, RA, RB}}, {"nmachhws.", XO (4, 110,0,1), XO_MASK, MULHW, 0, {RT, RA, RB}}, {"vadduqm", VX (4, 256), VX_MASK, PPCVEC2, 0, {VD, VA, VB}}, {"vmaxsb", VX (4, 258), VX_MASK, PPCVEC, 0, {VD, VA, VB}}, {"vslb", VX (4, 260), VX_MASK, PPCVEC, 0, {VD, VA, VB}}, {"vcmpnezb", VXR(4, 263,0), VXR_MASK, PPCVEC3, 0, {VD, VA, VB}}, {"vmulosb", VX (4, 264), VX_MASK, PPCVEC, 0, {VD, VA, VB}}, {"vrefp", VX (4, 266), VXVA_MASK, PPCVEC, 0, {VD, VB}}, {"vmrglb", VX (4, 268), VX_MASK, PPCVEC, 0, {VD, VA, VB}}, {"vpkshus", VX (4, 270), VX_MASK, PPCVEC, 0, {VD, VA, VB}}, {"ps_nabs", XRC(4, 136,0), XRA_MASK, PPCPS, 0, {FRT, FRB}}, {"mulchwu", XRC(4, 136,0), X_MASK, MULHW, 0, {RT, RA, RB}}, {"ps_nabs.", XRC(4, 136,1), XRA_MASK, PPCPS, 0, {FRT, FRB}}, {"mulchwu.", XRC(4, 136,1), X_MASK, MULHW, 0, {RT, RA, RB}}, {"macchwu", XO (4, 140,0,0), XO_MASK, MULHW, 0, {RT, RA, RB}}, {"macchwu.", XO (4, 140,0,1), XO_MASK, MULHW, 0, {RT, RA, RB}}, {"vaddcuq", VX (4, 320), VX_MASK, PPCVEC2, 0, {VD, VA, VB}}, {"vmaxsh", VX (4, 322), VX_MASK, PPCVEC, 0, {VD, VA, VB}}, {"vslh", VX (4, 324), VX_MASK, PPCVEC, 0, {VD, VA, VB}}, {"vcmpnezh", VXR(4, 327,0), VXR_MASK, PPCVEC3, 0, {VD, VA, VB}}, {"vmulosh", VX (4, 328), VX_MASK, PPCVEC, 0, {VD, VA, VB}}, {"vrsqrtefp", VX (4, 330), VXVA_MASK, PPCVEC, 0, {VD, VB}}, {"vmrglh", VX (4, 332), VX_MASK, PPCVEC, 0, {VD, VA, VB}}, {"vpkswus", VX (4, 334), VX_MASK, PPCVEC, 0, {VD, VA, VB}}, {"mulchw", XRC(4, 168,0), X_MASK, MULHW, 0, {RT, RA, RB}}, {"mulchw.", XRC(4, 168,1), X_MASK, MULHW, 0, {RT, RA, RB}}, {"macchw", XO (4, 172,0,0), XO_MASK, MULHW, 0, {RT, RA, RB}}, {"macchw.", XO (4, 172,0,1), XO_MASK, MULHW, 0, {RT, RA, RB}}, {"nmacchw", XO (4, 174,0,0), XO_MASK, MULHW, 0, {RT, RA, RB}}, {"nmacchw.", XO (4, 174,0,1), XO_MASK, MULHW, 0, {RT, RA, RB}}, {"vaddcuw", VX (4, 384), VX_MASK, PPCVEC, 0, {VD, VA, VB}}, {"vmaxsw", VX (4, 386), VX_MASK, PPCVEC, 0, {VD, VA, VB}}, {"vslw", VX (4, 388), VX_MASK, PPCVEC, 0, {VD, VA, VB}}, {"vrlwnm", VX (4, 389), VX_MASK, PPCVEC3, 0, {VD, VA, VB}}, {"vcmpnezw", VXR(4, 391,0), VXR_MASK, PPCVEC3, 0, {VD, VA, VB}}, {"vmulosw", VX (4, 392), VX_MASK, PPCVEC2, 0, {VD, VA, VB}}, {"vexptefp", VX (4, 394), VXVA_MASK, PPCVEC, 0, {VD, VB}}, {"vmrglw", VX (4, 396), VX_MASK, PPCVEC, 0, {VD, VA, VB}}, {"vpkshss", VX (4, 398), VX_MASK, PPCVEC, 0, {VD, VA, VB}}, {"macchwsu", XO (4, 204,0,0), XO_MASK, MULHW, 0, {RT, RA, RB}}, {"macchwsu.", XO (4, 204,0,1), XO_MASK, MULHW, 0, {RT, RA, RB}}, {"vmaxsd", VX (4, 450), VX_MASK, PPCVEC2, 0, {VD, VA, VB}}, {"vsl", VX (4, 452), VX_MASK, PPCVEC, 0, {VD, VA, VB}}, {"vrldnm", VX (4, 453), VX_MASK, PPCVEC3, 0, {VD, VA, VB}}, {"vcmpgefp", VXR(4, 454,0), VXR_MASK, PPCVEC, 0, {VD, VA, VB}}, {"vlogefp", VX (4, 458), VXVA_MASK, PPCVEC, 0, {VD, VB}}, {"vpkswss", VX (4, 462), VX_MASK, PPCVEC, 0, {VD, VA, VB}}, {"macchws", XO (4, 236,0,0), XO_MASK, MULHW, 0, {RT, RA, RB}}, {"macchws.", XO (4, 236,0,1), XO_MASK, MULHW, 0, {RT, RA, RB}}, {"nmacchws", XO (4, 238,0,0), XO_MASK, MULHW, 0, {RT, RA, RB}}, {"nmacchws.", XO (4, 238,0,1), XO_MASK, MULHW, 0, {RT, RA, RB}}, {"evaddw", VX (4, 512), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, {"vaddubs", VX (4, 512), VX_MASK, PPCVEC, 0, {VD, VA, VB}}, {"vmul10uq", VX (4, 513), VXVB_MASK, PPCVEC3, 0, {VD, VA}}, {"evaddiw", VX (4, 514), VX_MASK, PPCSPE, 0, {RS, RB, UIMM}}, {"vminub", VX (4, 514), VX_MASK, PPCVEC, 0, {VD, VA, VB}}, {"evsubfw", VX (4, 516), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, {"evsubw", VX (4, 516), VX_MASK, PPCSPE, 0, {RS, RB, RA}}, {"vsrb", VX (4, 516), VX_MASK, PPCVEC, 0, {VD, VA, VB}}, {"evsubifw", VX (4, 518), VX_MASK, PPCSPE, 0, {RS, UIMM, RB}}, {"evsubiw", VX (4, 518), VX_MASK, PPCSPE, 0, {RS, RB, UIMM}}, {"vcmpgtub", VXR(4, 518,0), VXR_MASK, PPCVEC, 0, {VD, VA, VB}}, {"evabs", VX (4, 520), VX_MASK, PPCSPE, 0, {RS, RA}}, {"vmuleub", VX (4, 520), VX_MASK, PPCVEC, 0, {VD, VA, VB}}, {"evneg", VX (4, 521), VX_MASK, PPCSPE, 0, {RS, RA}}, {"evextsb", VX (4, 522), VX_MASK, PPCSPE, 0, {RS, RA}}, {"vrfin", VX (4, 522), VXVA_MASK, PPCVEC, 0, {VD, VB}}, {"evextsh", VX (4, 523), VX_MASK, PPCSPE, 0, {RS, RA}}, {"evrndw", VX (4, 524), VX_MASK, PPCSPE, 0, {RS, RA}}, {"vspltb", VX (4, 524), VXUIMM4_MASK, PPCVEC, 0, {VD, VB, UIMM4}}, {"vextractub", VX (4, 525), VXUIMM4_MASK, PPCVEC3, 0, {VD, VB, UIMM4}}, {"evcntlzw", VX (4, 525), VX_MASK, PPCSPE, 0, {RS, RA}}, {"evcntlsw", VX (4, 526), VX_MASK, PPCSPE, 0, {RS, RA}}, {"vupkhsb", VX (4, 526), VXVA_MASK, PPCVEC, 0, {VD, VB}}, {"brinc", VX (4, 527), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, {"ps_abs", XRC(4, 264,0), XRA_MASK, PPCPS, 0, {FRT, FRB}}, {"ps_abs.", XRC(4, 264,1), XRA_MASK, PPCPS, 0, {FRT, FRB}}, {"evand", VX (4, 529), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, {"evandc", VX (4, 530), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, {"evxor", VX (4, 534), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, {"evmr", VX (4, 535), VX_MASK, PPCSPE, 0, {RS, RA, BBA}}, {"evor", VX (4, 535), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, {"evnor", VX (4, 536), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, {"evnot", VX (4, 536), VX_MASK, PPCSPE, 0, {RS, RA, BBA}}, {"get", APU(4, 268,0), APU_RA_MASK, PPC405, 0, {RT, FSL}}, {"eveqv", VX (4, 537), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, {"evorc", VX (4, 539), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, {"evnand", VX (4, 542), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, {"evsrwu", VX (4, 544), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, {"evsrws", VX (4, 545), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, {"evsrwiu", VX (4, 546), VX_MASK, PPCSPE, 0, {RS, RA, EVUIMM}}, {"evsrwis", VX (4, 547), VX_MASK, PPCSPE, 0, {RS, RA, EVUIMM}}, {"evslw", VX (4, 548), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, {"evslwi", VX (4, 550), VX_MASK, PPCSPE, 0, {RS, RA, EVUIMM}}, {"evrlw", VX (4, 552), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, {"evsplati", VX (4, 553), VX_MASK, PPCSPE, 0, {RS, SIMM}}, {"evrlwi", VX (4, 554), VX_MASK, PPCSPE, 0, {RS, RA, EVUIMM}}, {"evsplatfi", VX (4, 555), VX_MASK, PPCSPE, 0, {RS, SIMM}}, {"evmergehi", VX (4, 556), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, {"evmergelo", VX (4, 557), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, {"evmergehilo", VX (4, 558), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, {"evmergelohi", VX (4, 559), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, {"evcmpgtu", VX (4, 560), VX_MASK, PPCSPE, 0, {CRFD, RA, RB}}, {"evcmpgts", VX (4, 561), VX_MASK, PPCSPE, 0, {CRFD, RA, RB}}, {"evcmpltu", VX (4, 562), VX_MASK, PPCSPE, 0, {CRFD, RA, RB}}, {"evcmplts", VX (4, 563), VX_MASK, PPCSPE, 0, {CRFD, RA, RB}}, {"evcmpeq", VX (4, 564), VX_MASK, PPCSPE, 0, {CRFD, RA, RB}}, {"cget", APU(4, 284,0), APU_RA_MASK, PPC405, 0, {RT, FSL}}, {"vadduhs", VX (4, 576), VX_MASK, PPCVEC, 0, {VD, VA, VB}}, {"vmul10euq", VX (4, 577), VX_MASK, PPCVEC3, 0, {VD, VA, VB}}, {"vminuh", VX (4, 578), VX_MASK, PPCVEC, 0, {VD, VA, VB}}, {"vsrh", VX (4, 580), VX_MASK, PPCVEC, 0, {VD, VA, VB}}, {"vcmpgtuh", VXR(4, 582,0), VXR_MASK, PPCVEC, 0, {VD, VA, VB}}, {"vmuleuh", VX (4, 584), VX_MASK, PPCVEC, 0, {VD, VA, VB}}, {"vrfiz", VX (4, 586), VXVA_MASK, PPCVEC, 0, {VD, VB}}, {"vsplth", VX (4, 588), VXUIMM3_MASK, PPCVEC, 0, {VD, VB, UIMM3}}, {"vextractuh", VX (4, 589), VXUIMM4_MASK, PPCVEC3, 0, {VD, VB, UIMM4}}, {"vupkhsh", VX (4, 590), VXVA_MASK, PPCVEC, 0, {VD, VB}}, {"nget", APU(4, 300,0), APU_RA_MASK, PPC405, 0, {RT, FSL}}, {"evsel", EVSEL(4,79), EVSEL_MASK, PPCSPE, 0, {RS, RA, RB, CRFS}}, {"ncget", APU(4, 316,0), APU_RA_MASK, PPC405, 0, {RT, FSL}}, {"evfsadd", VX (4, 640), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, {"vadduws", VX (4, 640), VX_MASK, PPCVEC, 0, {VD, VA, VB}}, {"evfssub", VX (4, 641), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, {"vminuw", VX (4, 642), VX_MASK, PPCVEC, 0, {VD, VA, VB}}, {"evfsabs", VX (4, 644), VX_MASK, PPCSPE, 0, {RS, RA}}, {"vsrw", VX (4, 644), VX_MASK, PPCVEC, 0, {VD, VA, VB}}, {"evfsnabs", VX (4, 645), VX_MASK, PPCSPE, 0, {RS, RA}}, {"evfsneg", VX (4, 646), VX_MASK, PPCSPE, 0, {RS, RA}}, {"vcmpgtuw", VXR(4, 646,0), VXR_MASK, PPCVEC, 0, {VD, VA, VB}}, {"vmuleuw", VX (4, 648), VX_MASK, PPCVEC2, 0, {VD, VA, VB}}, {"evfsmul", VX (4, 648), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, {"evfsdiv", VX (4, 649), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, {"vrfip", VX (4, 650), VXVA_MASK, PPCVEC, 0, {VD, VB}}, {"evfscmpgt", VX (4, 652), VX_MASK, PPCSPE, 0, {CRFD, RA, RB}}, {"vspltw", VX (4, 652), VXUIMM2_MASK, PPCVEC, 0, {VD, VB, UIMM2}}, {"vextractuw", VX (4, 653), VXUIMM4_MASK, PPCVEC3, 0, {VD, VB, UIMM4}}, {"evfscmplt", VX (4, 653), VX_MASK, PPCSPE, 0, {CRFD, RA, RB}}, {"evfscmpeq", VX (4, 654), VX_MASK, PPCSPE, 0, {CRFD, RA, RB}}, {"vupklsb", VX (4, 654), VXVA_MASK, PPCVEC, 0, {VD, VB}}, {"evfscfui", VX (4, 656), VX_MASK, PPCSPE, 0, {RS, RB}}, {"evfscfsi", VX (4, 657), VX_MASK, PPCSPE, 0, {RS, RB}}, {"evfscfuf", VX (4, 658), VX_MASK, PPCSPE, 0, {RS, RB}}, {"evfscfsf", VX (4, 659), VX_MASK, PPCSPE, 0, {RS, RB}}, {"evfsctui", VX (4, 660), VX_MASK, PPCSPE, 0, {RS, RB}}, {"evfsctsi", VX (4, 661), VX_MASK, PPCSPE, 0, {RS, RB}}, {"evfsctuf", VX (4, 662), VX_MASK, PPCSPE, 0, {RS, RB}}, {"evfsctsf", VX (4, 663), VX_MASK, PPCSPE, 0, {RS, RB}}, {"evfsctuiz", VX (4, 664), VX_MASK, PPCSPE, 0, {RS, RB}}, {"put", APU(4, 332,0), APU_RT_MASK, PPC405, 0, {RA, FSL}}, {"evfsctsiz", VX (4, 666), VX_MASK, PPCSPE, 0, {RS, RB}}, {"evfststgt", VX (4, 668), VX_MASK, PPCSPE, 0, {CRFD, RA, RB}}, {"evfststlt", VX (4, 669), VX_MASK, PPCSPE, 0, {CRFD, RA, RB}}, {"evfststeq", VX (4, 670), VX_MASK, PPCSPE, 0, {CRFD, RA, RB}}, {"cput", APU(4, 348,0), APU_RT_MASK, PPC405, 0, {RA, FSL}}, {"efsadd", VX (4, 704), VX_MASK, PPCEFS, 0, {RS, RA, RB}}, {"efssub", VX (4, 705), VX_MASK, PPCEFS, 0, {RS, RA, RB}}, {"vminud", VX (4, 706), VX_MASK, PPCVEC2, 0, {VD, VA, VB}}, {"efsabs", VX (4, 708), VX_MASK, PPCEFS, 0, {RS, RA}}, {"vsr", VX (4, 708), VX_MASK, PPCVEC, 0, {VD, VA, VB}}, {"efsnabs", VX (4, 709), VX_MASK, PPCEFS, 0, {RS, RA}}, {"efsneg", VX (4, 710), VX_MASK, PPCEFS, 0, {RS, RA}}, {"vcmpgtfp", VXR(4, 710,0), VXR_MASK, PPCVEC, 0, {VD, VA, VB}}, {"vcmpgtud", VXR(4, 711,0), VXR_MASK, PPCVEC2, 0, {VD, VA, VB}}, {"efsmul", VX (4, 712), VX_MASK, PPCEFS, 0, {RS, RA, RB}}, {"efsdiv", VX (4, 713), VX_MASK, PPCEFS, 0, {RS, RA, RB}}, {"vrfim", VX (4, 714), VXVA_MASK, PPCVEC, 0, {VD, VB}}, {"efscmpgt", VX (4, 716), VX_MASK, PPCEFS, 0, {CRFD, RA, RB}}, {"vextractd", VX (4, 717), VXUIMM4_MASK, PPCVEC3, 0, {VD, VB, UIMM4}}, {"efscmplt", VX (4, 717), VX_MASK, PPCEFS, 0, {CRFD, RA, RB}}, {"efscmpeq", VX (4, 718), VX_MASK, PPCEFS, 0, {CRFD, RA, RB}}, {"vupklsh", VX (4, 718), VXVA_MASK, PPCVEC, 0, {VD, VB}}, {"efscfd", VX (4, 719), VX_MASK, PPCEFS, 0, {RS, RB}}, {"efscfui", VX (4, 720), VX_MASK, PPCEFS, 0, {RS, RB}}, {"efscfsi", VX (4, 721), VX_MASK, PPCEFS, 0, {RS, RB}}, {"efscfuf", VX (4, 722), VX_MASK, PPCEFS, 0, {RS, RB}}, {"efscfsf", VX (4, 723), VX_MASK, PPCEFS, 0, {RS, RB}}, {"efsctui", VX (4, 724), VX_MASK, PPCEFS, 0, {RS, RB}}, {"efsctsi", VX (4, 725), VX_MASK, PPCEFS, 0, {RS, RB}}, {"efsctuf", VX (4, 726), VX_MASK, PPCEFS, 0, {RS, RB}}, {"efsctsf", VX (4, 727), VX_MASK, PPCEFS, 0, {RS, RB}}, {"efsctuiz", VX (4, 728), VX_MASK, PPCEFS, 0, {RS, RB}}, {"nput", APU(4, 364,0), APU_RT_MASK, PPC405, 0, {RA, FSL}}, {"efsctsiz", VX (4, 730), VX_MASK, PPCEFS, 0, {RS, RB}}, {"efststgt", VX (4, 732), VX_MASK, PPCEFS, 0, {CRFD, RA, RB}}, {"efststlt", VX (4, 733), VX_MASK, PPCEFS, 0, {CRFD, RA, RB}}, {"efststeq", VX (4, 734), VX_MASK, PPCEFS, 0, {CRFD, RA, RB}}, {"efdadd", VX (4, 736), VX_MASK, PPCEFS, 0, {RS, RA, RB}}, {"efdsub", VX (4, 737), VX_MASK, PPCEFS, 0, {RS, RA, RB}}, {"efdcfuid", VX (4, 738), VX_MASK, PPCEFS, 0, {RS, RB}}, {"efdcfsid", VX (4, 739), VX_MASK, PPCEFS, 0, {RS, RB}}, {"efdabs", VX (4, 740), VX_MASK, PPCEFS, 0, {RS, RA}}, {"efdnabs", VX (4, 741), VX_MASK, PPCEFS, 0, {RS, RA}}, {"efdneg", VX (4, 742), VX_MASK, PPCEFS, 0, {RS, RA}}, {"efdmul", VX (4, 744), VX_MASK, PPCEFS, 0, {RS, RA, RB}}, {"efddiv", VX (4, 745), VX_MASK, PPCEFS, 0, {RS, RA, RB}}, {"efdctuidz", VX (4, 746), VX_MASK, PPCEFS, 0, {RS, RB}}, {"efdctsidz", VX (4, 747), VX_MASK, PPCEFS, 0, {RS, RB}}, {"efdcmpgt", VX (4, 748), VX_MASK, PPCEFS, 0, {CRFD, RA, RB}}, {"efdcmplt", VX (4, 749), VX_MASK, PPCEFS, 0, {CRFD, RA, RB}}, {"efdcmpeq", VX (4, 750), VX_MASK, PPCEFS, 0, {CRFD, RA, RB}}, {"efdcfs", VX (4, 751), VX_MASK, PPCEFS, 0, {RS, RB}}, {"efdcfui", VX (4, 752), VX_MASK, PPCEFS, 0, {RS, RB}}, {"efdcfsi", VX (4, 753), VX_MASK, PPCEFS, 0, {RS, RB}}, {"efdcfuf", VX (4, 754), VX_MASK, PPCEFS, 0, {RS, RB}}, {"efdcfsf", VX (4, 755), VX_MASK, PPCEFS, 0, {RS, RB}}, {"efdctui", VX (4, 756), VX_MASK, PPCEFS, 0, {RS, RB}}, {"efdctsi", VX (4, 757), VX_MASK, PPCEFS, 0, {RS, RB}}, {"efdctuf", VX (4, 758), VX_MASK, PPCEFS, 0, {RS, RB}}, {"efdctsf", VX (4, 759), VX_MASK, PPCEFS, 0, {RS, RB}}, {"efdctuiz", VX (4, 760), VX_MASK, PPCEFS, 0, {RS, RB}}, {"ncput", APU(4, 380,0), APU_RT_MASK, PPC405, 0, {RA, FSL}}, {"efdctsiz", VX (4, 762), VX_MASK, PPCEFS, 0, {RS, RB}}, {"efdtstgt", VX (4, 764), VX_MASK, PPCEFS, 0, {CRFD, RA, RB}}, {"efdtstlt", VX (4, 765), VX_MASK, PPCEFS, 0, {CRFD, RA, RB}}, {"efdtsteq", VX (4, 766), VX_MASK, PPCEFS, 0, {CRFD, RA, RB}}, {"evlddx", VX (4, 768), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, {"vaddsbs", VX (4, 768), VX_MASK, PPCVEC, 0, {VD, VA, VB}}, {"evldd", VX (4, 769), VX_MASK, PPCSPE, 0, {RS, EVUIMM_8, RA}}, {"evldwx", VX (4, 770), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, {"vminsb", VX (4, 770), VX_MASK, PPCVEC, 0, {VD, VA, VB}}, {"evldw", VX (4, 771), VX_MASK, PPCSPE, 0, {RS, EVUIMM_8, RA}}, {"evldhx", VX (4, 772), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, {"vsrab", VX (4, 772), VX_MASK, PPCVEC, 0, {VD, VA, VB}}, {"evldh", VX (4, 773), VX_MASK, PPCSPE, 0, {RS, EVUIMM_8, RA}}, {"vcmpgtsb", VXR(4, 774,0), VXR_MASK, PPCVEC, 0, {VD, VA, VB}}, {"evlhhesplatx",VX (4, 776), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, {"vmulesb", VX (4, 776), VX_MASK, PPCVEC, 0, {VD, VA, VB}}, {"evlhhesplat", VX (4, 777), VX_MASK, PPCSPE, 0, {RS, EVUIMM_2, RA}}, {"vcfux", VX (4, 778), VX_MASK, PPCVEC, 0, {VD, VB, UIMM}}, {"vcuxwfp", VX (4, 778), VX_MASK, PPCVEC, 0, {VD, VB, UIMM}}, {"evlhhousplatx",VX(4, 780), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, {"vspltisb", VX (4, 780), VXVB_MASK, PPCVEC, 0, {VD, SIMM}}, {"vinsertb", VX (4, 781), VXUIMM4_MASK, PPCVEC3, 0, {VD, VB, UIMM4}}, {"evlhhousplat",VX (4, 781), VX_MASK, PPCSPE, 0, {RS, EVUIMM_2, RA}}, {"evlhhossplatx",VX(4, 782), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, {"vpkpx", VX (4, 782), VX_MASK, PPCVEC, 0, {VD, VA, VB}}, {"evlhhossplat",VX (4, 783), VX_MASK, PPCSPE, 0, {RS, EVUIMM_2, RA}}, {"mullhwu", XRC(4, 392,0), X_MASK, MULHW, 0, {RT, RA, RB}}, {"evlwhex", VX (4, 784), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, {"mullhwu.", XRC(4, 392,1), X_MASK, MULHW, 0, {RT, RA, RB}}, {"evlwhe", VX (4, 785), VX_MASK, PPCSPE, 0, {RS, EVUIMM_4, RA}}, {"evlwhoux", VX (4, 788), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, {"evlwhou", VX (4, 789), VX_MASK, PPCSPE, 0, {RS, EVUIMM_4, RA}}, {"evlwhosx", VX (4, 790), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, {"evlwhos", VX (4, 791), VX_MASK, PPCSPE, 0, {RS, EVUIMM_4, RA}}, {"maclhwu", XO (4, 396,0,0),XO_MASK, MULHW, 0, {RT, RA, RB}}, {"evlwwsplatx", VX (4, 792), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, {"maclhwu.", XO (4, 396,0,1),XO_MASK, MULHW, 0, {RT, RA, RB}}, {"evlwwsplat", VX (4, 793), VX_MASK, PPCSPE, 0, {RS, EVUIMM_4, RA}}, {"evlwhsplatx", VX (4, 796), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, {"evlwhsplat", VX (4, 797), VX_MASK, PPCSPE, 0, {RS, EVUIMM_4, RA}}, {"evstddx", VX (4, 800), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, {"evstdd", VX (4, 801), VX_MASK, PPCSPE, 0, {RS, EVUIMM_8, RA}}, {"evstdwx", VX (4, 802), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, {"evstdw", VX (4, 803), VX_MASK, PPCSPE, 0, {RS, EVUIMM_8, RA}}, {"evstdhx", VX (4, 804), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, {"evstdh", VX (4, 805), VX_MASK, PPCSPE, 0, {RS, EVUIMM_8, RA}}, {"evstwhex", VX (4, 816), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, {"evstwhe", VX (4, 817), VX_MASK, PPCSPE, 0, {RS, EVUIMM_4, RA}}, {"evstwhox", VX (4, 820), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, {"evstwho", VX (4, 821), VX_MASK, PPCSPE, 0, {RS, EVUIMM_4, RA}}, {"evstwwex", VX (4, 824), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, {"evstwwe", VX (4, 825), VX_MASK, PPCSPE, 0, {RS, EVUIMM_4, RA}}, {"evstwwox", VX (4, 828), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, {"evstwwo", VX (4, 829), VX_MASK, PPCSPE, 0, {RS, EVUIMM_4, RA}}, {"vaddshs", VX (4, 832), VX_MASK, PPCVEC, 0, {VD, VA, VB}}, {"bcdcpsgn.", VX (4, 833), VX_MASK, PPCVEC3, 0, {VD, VA, VB}}, {"vminsh", VX (4, 834), VX_MASK, PPCVEC, 0, {VD, VA, VB}}, {"vsrah", VX (4, 836), VX_MASK, PPCVEC, 0, {VD, VA, VB}}, {"vcmpgtsh", VXR(4, 838,0), VXR_MASK, PPCVEC, 0, {VD, VA, VB}}, {"vmulesh", VX (4, 840), VX_MASK, PPCVEC, 0, {VD, VA, VB}}, {"vcfsx", VX (4, 842), VX_MASK, PPCVEC, 0, {VD, VB, UIMM}}, {"vcsxwfp", VX (4, 842), VX_MASK, PPCVEC, 0, {VD, VB, UIMM}}, {"vspltish", VX (4, 844), VXVB_MASK, PPCVEC, 0, {VD, SIMM}}, {"vinserth", VX (4, 845), VXUIMM4_MASK, PPCVEC3, 0, {VD, VB, UIMM4}}, {"vupkhpx", VX (4, 846), VXVA_MASK, PPCVEC, 0, {VD, VB}}, {"mullhw", XRC(4, 424,0), X_MASK, MULHW, 0, {RT, RA, RB}}, {"mullhw.", XRC(4, 424,1), X_MASK, MULHW, 0, {RT, RA, RB}}, {"maclhw", XO (4, 428,0,0),XO_MASK, MULHW, 0, {RT, RA, RB}}, {"maclhw.", XO (4, 428,0,1),XO_MASK, MULHW, 0, {RT, RA, RB}}, {"nmaclhw", XO (4, 430,0,0),XO_MASK, MULHW, 0, {RT, RA, RB}}, {"nmaclhw.", XO (4, 430,0,1),XO_MASK, MULHW, 0, {RT, RA, RB}}, {"vaddsws", VX (4, 896), VX_MASK, PPCVEC, 0, {VD, VA, VB}}, {"vminsw", VX (4, 898), VX_MASK, PPCVEC, 0, {VD, VA, VB}}, {"vsraw", VX (4, 900), VX_MASK, PPCVEC, 0, {VD, VA, VB}}, {"vcmpgtsw", VXR(4, 902,0), VXR_MASK, PPCVEC, 0, {VD, VA, VB}}, {"vmulesw", VX (4, 904), VX_MASK, PPCVEC2, 0, {VD, VA, VB}}, {"vctuxs", VX (4, 906), VX_MASK, PPCVEC, 0, {VD, VB, UIMM}}, {"vcfpuxws", VX (4, 906), VX_MASK, PPCVEC, 0, {VD, VB, UIMM}}, {"vspltisw", VX (4, 908), VXVB_MASK, PPCVEC, 0, {VD, SIMM}}, {"vinsertw", VX (4, 909), VXUIMM4_MASK, PPCVEC3, 0, {VD, VB, UIMM4}}, {"maclhwsu", XO (4, 460,0,0), XO_MASK, MULHW, 0, {RT, RA, RB}}, {"maclhwsu.", XO (4, 460,0,1), XO_MASK, MULHW, 0, {RT, RA, RB}}, {"vminsd", VX (4, 962), VX_MASK, PPCVEC2, 0, {VD, VA, VB}}, {"vsrad", VX (4, 964), VX_MASK, PPCVEC2, 0, {VD, VA, VB}}, {"vcmpbfp", VXR(4, 966,0), VXR_MASK, PPCVEC, 0, {VD, VA, VB}}, {"vcmpgtsd", VXR(4, 967,0), VXR_MASK, PPCVEC2, 0, {VD, VA, VB}}, {"vctsxs", VX (4, 970), VX_MASK, PPCVEC, 0, {VD, VB, UIMM}}, {"vcfpsxws", VX (4, 970), VX_MASK, PPCVEC, 0, {VD, VB, UIMM}}, {"vinsertd", VX (4, 973), VXUIMM4_MASK, PPCVEC3, 0, {VD, VB, UIMM4}}, {"vupklpx", VX (4, 974), VXVA_MASK, PPCVEC, 0, {VD, VB}}, {"maclhws", XO (4, 492,0,0), XO_MASK, MULHW, 0, {RT, RA, RB}}, {"maclhws.", XO (4, 492,0,1), XO_MASK, MULHW, 0, {RT, RA, RB}}, {"nmaclhws", XO (4, 494,0,0), XO_MASK, MULHW, 0, {RT, RA, RB}}, {"nmaclhws.", XO (4, 494,0,1), XO_MASK, MULHW, 0, {RT, RA, RB}}, {"vsububm", VX (4,1024), VX_MASK, PPCVEC, 0, {VD, VA, VB}}, {"bcdadd.", VX (4,1025), VXPS_MASK, PPCVEC2, 0, {VD, VA, VB, PS}}, {"vavgub", VX (4,1026), VX_MASK, PPCVEC, 0, {VD, VA, VB}}, {"vabsdub", VX (4,1027), VX_MASK, PPCVEC2, 0, {VD, VA, VB}}, {"evmhessf", VX (4,1027), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, {"vand", VX (4,1028), VX_MASK, PPCVEC, 0, {VD, VA, VB}}, {"vcmpequb.", VXR(4, 6,1), VXR_MASK, PPCVEC, 0, {VD, VA, VB}}, {"vcmpneb.", VXR(4, 7,1), VXR_MASK, PPCVEC3, 0, {VD, VA, VB}}, {"udi0fcm.", APU(4, 515,0), APU_MASK, PPC405|PPC440, PPC476, {URT, URA, URB}}, {"udi0fcm", APU(4, 515,1), APU_MASK, PPC405|PPC440, PPC476, {URT, URA, URB}}, {"evmhossf", VX (4,1031), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, {"vpmsumb", VX (4,1032), VX_MASK, PPCVEC2, 0, {VD, VA, VB}}, {"evmheumi", VX (4,1032), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, {"evmhesmi", VX (4,1033), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, {"vmaxfp", VX (4,1034), VX_MASK, PPCVEC, 0, {VD, VA, VB}}, {"evmhesmf", VX (4,1035), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, {"evmhoumi", VX (4,1036), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, {"vslo", VX (4,1036), VX_MASK, PPCVEC, 0, {VD, VA, VB}}, {"evmhosmi", VX (4,1037), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, {"evmhosmf", VX (4,1039), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, {"machhwuo", XO (4, 12,1,0), XO_MASK, MULHW, 0, {RT, RA, RB}}, {"machhwuo.", XO (4, 12,1,1), XO_MASK, MULHW, 0, {RT, RA, RB}}, {"ps_merge00", XOPS(4,528,0), XOPS_MASK, PPCPS, 0, {FRT, FRA, FRB}}, {"ps_merge00.", XOPS(4,528,1), XOPS_MASK, PPCPS, 0, {FRT, FRA, FRB}}, {"evmhessfa", VX (4,1059), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, {"evmhossfa", VX (4,1063), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, {"evmheumia", VX (4,1064), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, {"evmhesmia", VX (4,1065), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, {"evmhesmfa", VX (4,1067), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, {"evmhoumia", VX (4,1068), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, {"evmhosmia", VX (4,1069), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, {"evmhosmfa", VX (4,1071), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, {"vsubuhm", VX (4,1088), VX_MASK, PPCVEC, 0, {VD, VA, VB}}, {"bcdsub.", VX (4,1089), VXPS_MASK, PPCVEC2, 0, {VD, VA, VB, PS}}, {"vavguh", VX (4,1090), VX_MASK, PPCVEC, 0, {VD, VA, VB}}, {"vabsduh", VX (4,1091), VX_MASK, PPCVEC2, 0, {VD, VA, VB}}, {"vandc", VX (4,1092), VX_MASK, PPCVEC, 0, {VD, VA, VB}}, {"vcmpequh.", VXR(4, 70,1), VXR_MASK, PPCVEC, 0, {VD, VA, VB}}, {"udi1fcm.", APU(4, 547,0), APU_MASK, PPC405|PPC440, PPC476, {URT, URA, URB}}, {"udi1fcm", APU(4, 547,1), APU_MASK, PPC405|PPC440, PPC476, {URT, URA, URB}}, {"vcmpneh.", VXR(4, 71,1), VXR_MASK, PPCVEC3, 0, {VD, VA, VB}}, {"evmwhssf", VX (4,1095), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, {"vpmsumh", VX (4,1096), VX_MASK, PPCVEC2, 0, {VD, VA, VB}}, {"evmwlumi", VX (4,1096), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, {"vminfp", VX (4,1098), VX_MASK, PPCVEC, 0, {VD, VA, VB}}, {"evmwhumi", VX (4,1100), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, {"vsro", VX (4,1100), VX_MASK, PPCVEC, 0, {VD, VA, VB}}, {"evmwhsmi", VX (4,1101), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, {"vpkudum", VX (4,1102), VX_MASK, PPCVEC2, 0, {VD, VA, VB}}, {"evmwhsmf", VX (4,1103), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, {"evmwssf", VX (4,1107), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, {"machhwo", XO (4, 44,1,0), XO_MASK, MULHW, 0, {RT, RA, RB}}, {"evmwumi", VX (4,1112), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, {"machhwo.", XO (4, 44,1,1), XO_MASK, MULHW, 0, {RT, RA, RB}}, {"evmwsmi", VX (4,1113), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, {"evmwsmf", VX (4,1115), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, {"nmachhwo", XO (4, 46,1,0), XO_MASK, MULHW, 0, {RT, RA, RB}}, {"nmachhwo.", XO (4, 46,1,1), XO_MASK, MULHW, 0, {RT, RA, RB}}, {"ps_merge01", XOPS(4,560,0), XOPS_MASK, PPCPS, 0, {FRT, FRA, FRB}}, {"ps_merge01.", XOPS(4,560,1), XOPS_MASK, PPCPS, 0, {FRT, FRA, FRB}}, {"evmwhssfa", VX (4,1127), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, {"evmwlumia", VX (4,1128), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, {"evmwhumia", VX (4,1132), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, {"evmwhsmia", VX (4,1133), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, {"evmwhsmfa", VX (4,1135), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, {"evmwssfa", VX (4,1139), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, {"evmwumia", VX (4,1144), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, {"evmwsmia", VX (4,1145), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, {"evmwsmfa", VX (4,1147), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, {"vsubuwm", VX (4,1152), VX_MASK, PPCVEC, 0, {VD, VA, VB}}, {"bcdus.", VX (4,1153), VX_MASK, PPCVEC3, 0, {VD, VA, VB}}, {"vavguw", VX (4,1154), VX_MASK, PPCVEC, 0, {VD, VA, VB}}, {"vabsduw", VX (4,1155), VX_MASK, PPCVEC2, 0, {VD, VA, VB}}, {"vmr", VX (4,1156), VX_MASK, PPCVEC, 0, {VD, VA, VBA}}, {"vor", VX (4,1156), VX_MASK, PPCVEC, 0, {VD, VA, VB}}, {"vcmpnew.", VXR(4, 135,1), VXR_MASK, PPCVEC3, 0, {VD, VA, VB}}, {"vpmsumw", VX (4,1160), VX_MASK, PPCVEC2, 0, {VD, VA, VB}}, {"vcmpequw.", VXR(4, 134,1), VXR_MASK, PPCVEC, 0, {VD, VA, VB}}, {"udi2fcm.", APU(4, 579,0), APU_MASK, PPC405|PPC440, PPC476, {URT, URA, URB}}, {"udi2fcm", APU(4, 579,1), APU_MASK, PPC405|PPC440, PPC476, {URT, URA, URB}}, {"machhwsuo", XO (4, 76,1,0), XO_MASK, MULHW, 0, {RT, RA, RB}}, {"machhwsuo.", XO (4, 76,1,1), XO_MASK, MULHW, 0, {RT, RA, RB}}, {"ps_merge10", XOPS(4,592,0), XOPS_MASK, PPCPS, 0, {FRT, FRA, FRB}}, {"ps_merge10.", XOPS(4,592,1), XOPS_MASK, PPCPS, 0, {FRT, FRA, FRB}}, {"vsubudm", VX (4,1216), VX_MASK, PPCVEC2, 0, {VD, VA, VB}}, {"evaddusiaaw", VX (4,1216), VX_MASK, PPCSPE, 0, {RS, RA}}, {"bcds.", VX (4,1217), VXPS_MASK, PPCVEC3, 0, {VD, VA, VB, PS}}, {"evaddssiaaw", VX (4,1217), VX_MASK, PPCSPE, 0, {RS, RA}}, {"evsubfusiaaw",VX (4,1218), VX_MASK, PPCSPE, 0, {RS, RA}}, {"evsubfssiaaw",VX (4,1219), VX_MASK, PPCSPE, 0, {RS, RA}}, {"evmra", VX (4,1220), VX_MASK, PPCSPE, 0, {RS, RA}}, {"vxor", VX (4,1220), VX_MASK, PPCVEC, 0, {VD, VA, VB}}, {"evdivws", VX (4,1222), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, {"vcmpeqfp.", VXR(4, 198,1), VXR_MASK, PPCVEC, 0, {VD, VA, VB}}, {"udi3fcm.", APU(4, 611,0), APU_MASK, PPC405|PPC440, PPC476, {URT, URA, URB}}, {"vcmpequd.", VXR(4, 199,1), VXR_MASK, PPCVEC2, 0, {VD, VA, VB}}, {"udi3fcm", APU(4, 611,1), APU_MASK, PPC405|PPC440, PPC476, {URT, URA, URB}}, {"evdivwu", VX (4,1223), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, {"vpmsumd", VX (4,1224), VX_MASK, PPCVEC2, 0, {VD, VA, VB}}, {"evaddumiaaw", VX (4,1224), VX_MASK, PPCSPE, 0, {RS, RA}}, {"evaddsmiaaw", VX (4,1225), VX_MASK, PPCSPE, 0, {RS, RA}}, {"evsubfumiaaw",VX (4,1226), VX_MASK, PPCSPE, 0, {RS, RA}}, {"evsubfsmiaaw",VX (4,1227), VX_MASK, PPCSPE, 0, {RS, RA}}, {"vpkudus", VX (4,1230), VX_MASK, PPCVEC2, 0, {VD, VA, VB}}, {"machhwso", XO (4, 108,1,0), XO_MASK, MULHW, 0, {RT, RA, RB}}, {"machhwso.", XO (4, 108,1,1), XO_MASK, MULHW, 0, {RT, RA, RB}}, {"nmachhwso", XO (4, 110,1,0), XO_MASK, MULHW, 0, {RT, RA, RB}}, {"nmachhwso.", XO (4, 110,1,1), XO_MASK, MULHW, 0, {RT, RA, RB}}, {"ps_merge11", XOPS(4,624,0), XOPS_MASK, PPCPS, 0, {FRT, FRA, FRB}}, {"ps_merge11.", XOPS(4,624,1), XOPS_MASK, PPCPS, 0, {FRT, FRA, FRB}}, {"vsubuqm", VX (4,1280), VX_MASK, PPCVEC2, 0, {VD, VA, VB}}, {"evmheusiaaw", VX (4,1280), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, {"bcdtrunc.", VX (4,1281), VXPS_MASK, PPCVEC3, 0, {VD, VA, VB, PS}}, {"evmhessiaaw", VX (4,1281), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, {"vavgsb", VX (4,1282), VX_MASK, PPCVEC, 0, {VD, VA, VB}}, {"evmhessfaaw", VX (4,1283), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, {"evmhousiaaw", VX (4,1284), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, {"vnot", VX (4,1284), VX_MASK, PPCVEC, 0, {VD, VA, VBA}}, {"vnor", VX (4,1284), VX_MASK, PPCVEC, 0, {VD, VA, VB}}, {"evmhossiaaw", VX (4,1285), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, {"udi4fcm.", APU(4, 643,0), APU_MASK, PPC405|PPC440, PPC476, {URT, URA, URB}}, {"udi4fcm", APU(4, 643,1), APU_MASK, PPC405|PPC440, PPC476, {URT, URA, URB}}, {"vcmpnezb.", VXR(4, 263,1), VXR_MASK, PPCVEC3, 0, {VD, VA, VB}}, {"evmhossfaaw", VX (4,1287), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, {"evmheumiaaw", VX (4,1288), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, {"vcipher", VX (4,1288), VX_MASK, PPCVEC2, 0, {VD, VA, VB}}, {"vcipherlast", VX (4,1289), VX_MASK, PPCVEC2, 0, {VD, VA, VB}}, {"evmhesmiaaw", VX (4,1289), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, {"evmhesmfaaw", VX (4,1291), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, {"vgbbd", VX (4,1292), VXVA_MASK, PPCVEC2, 0, {VD, VB}}, {"evmhoumiaaw", VX (4,1292), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, {"evmhosmiaaw", VX (4,1293), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, {"evmhosmfaaw", VX (4,1295), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, {"macchwuo", XO (4, 140,1,0), XO_MASK, MULHW, 0, {RT, RA, RB}}, {"macchwuo.", XO (4, 140,1,1), XO_MASK, MULHW, 0, {RT, RA, RB}}, {"evmhegumiaa", VX (4,1320), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, {"evmhegsmiaa", VX (4,1321), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, {"evmhegsmfaa", VX (4,1323), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, {"evmhogumiaa", VX (4,1324), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, {"evmhogsmiaa", VX (4,1325), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, {"evmhogsmfaa", VX (4,1327), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, {"vsubcuq", VX (4,1344), VX_MASK, PPCVEC2, 0, {VD, VA, VB}}, {"evmwlusiaaw", VX (4,1344), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, {"bcdutrunc.", VX (4,1345), VX_MASK, PPCVEC3, 0, {VD, VA, VB}}, {"evmwlssiaaw", VX (4,1345), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, {"vavgsh", VX (4,1346), VX_MASK, PPCVEC, 0, {VD, VA, VB}}, {"vorc", VX (4,1348), VX_MASK, PPCVEC2, 0, {VD, VA, VB}}, {"udi5fcm.", APU(4, 675,0), APU_MASK, PPC405|PPC440, PPC476, {URT, URA, URB}}, {"udi5fcm", APU(4, 675,1), APU_MASK, PPC405|PPC440, PPC476, {URT, URA, URB}}, {"vcmpnezh.", VXR(4, 327,1), VXR_MASK, PPCVEC3, 0, {VD, VA, VB}}, {"vncipher", VX (4,1352), VX_MASK, PPCVEC2, 0, {VD, VA, VB}}, {"evmwlumiaaw", VX (4,1352), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, {"vncipherlast",VX (4,1353), VX_MASK, PPCVEC2, 0, {VD, VA, VB}}, {"evmwlsmiaaw", VX (4,1353), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, {"vbpermq", VX (4,1356), VX_MASK, PPCVEC2, 0, {VD, VA, VB}}, {"vpksdus", VX (4,1358), VX_MASK, PPCVEC2, 0, {VD, VA, VB}}, {"evmwssfaa", VX (4,1363), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, {"macchwo", XO (4, 172,1,0), XO_MASK, MULHW, 0, {RT, RA, RB}}, {"evmwumiaa", VX (4,1368), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, {"macchwo.", XO (4, 172,1,1), XO_MASK, MULHW, 0, {RT, RA, RB}}, {"evmwsmiaa", VX (4,1369), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, {"evmwsmfaa", VX (4,1371), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, {"nmacchwo", XO (4, 174,1,0), XO_MASK, MULHW, 0, {RT, RA, RB}}, {"nmacchwo.", XO (4, 174,1,1), XO_MASK, MULHW, 0, {RT, RA, RB}}, {"evmheusianw", VX (4,1408), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, {"vsubcuw", VX (4,1408), VX_MASK, PPCVEC, 0, {VD, VA, VB}}, {"evmhessianw", VX (4,1409), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, {"bcdctsq.", VXVA(4,1409,0), VXVA_MASK, PPCVEC3, 0, {VD, VB}}, {"bcdcfsq.", VXVA(4,1409,2), VXVAPS_MASK, PPCVEC3, 0, {VD, VB, PS}}, {"bcdctz.", VXVA(4,1409,4), VXVAPS_MASK, PPCVEC3, 0, {VD, VB, PS}}, {"bcdctn.", VXVA(4,1409,5), VXVA_MASK, PPCVEC3, 0, {VD, VB}}, {"bcdcfz.", VXVA(4,1409,6), VXVAPS_MASK, PPCVEC3, 0, {VD, VB, PS}}, {"bcdcfn.", VXVA(4,1409,7), VXVAPS_MASK, PPCVEC3, 0, {VD, VB, PS}}, {"bcdsetsgn.", VXVA(4,1409,31), VXVAPS_MASK, PPCVEC3, 0, {VD, VB, PS}}, {"vavgsw", VX (4,1410), VX_MASK, PPCVEC, 0, {VD, VA, VB}}, {"evmhessfanw", VX (4,1411), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, {"vnand", VX (4,1412), VX_MASK, PPCVEC2, 0, {VD, VA, VB}}, {"evmhousianw", VX (4,1412), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, {"evmhossianw", VX (4,1413), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, {"udi6fcm.", APU(4, 707,0), APU_MASK, PPC405|PPC440, PPC476, {URT, URA, URB}}, {"udi6fcm", APU(4, 707,1), APU_MASK, PPC405|PPC440, PPC476, {URT, URA, URB}}, {"vcmpnezw.", VXR(4, 391,1), VXR_MASK, PPCVEC3, 0, {VD, VA, VB}}, {"evmhossfanw", VX (4,1415), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, {"evmheumianw", VX (4,1416), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, {"evmhesmianw", VX (4,1417), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, {"evmhesmfanw", VX (4,1419), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, {"evmhoumianw", VX (4,1420), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, {"evmhosmianw", VX (4,1421), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, {"evmhosmfanw", VX (4,1423), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, {"macchwsuo", XO (4, 204,1,0), XO_MASK, MULHW, 0, {RT, RA, RB}}, {"macchwsuo.", XO (4, 204,1,1), XO_MASK, MULHW, 0, {RT, RA, RB}}, {"evmhegumian", VX (4,1448), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, {"evmhegsmian", VX (4,1449), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, {"evmhegsmfan", VX (4,1451), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, {"evmhogumian", VX (4,1452), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, {"evmhogsmian", VX (4,1453), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, {"evmhogsmfan", VX (4,1455), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, {"evmwlusianw", VX (4,1472), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, {"bcdsr.", VX (4,1473), VXPS_MASK, PPCVEC3, 0, {VD, VA, VB, PS}}, {"evmwlssianw", VX (4,1473), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, {"vsld", VX (4,1476), VX_MASK, PPCVEC2, 0, {VD, VA, VB}}, {"vcmpgefp.", VXR(4, 454,1), VXR_MASK, PPCVEC, 0, {VD, VA, VB}}, {"udi7fcm.", APU(4, 739,0), APU_MASK, PPC405|PPC440, PPC476, {URT, URA, URB}}, {"udi7fcm", APU(4, 739,1), APU_MASK, PPC405|PPC440, PPC476, {URT, URA, URB}}, {"vsbox", VX (4,1480), VXVB_MASK, PPCVEC2, 0, {VD, VA}}, {"evmwlumianw", VX (4,1480), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, {"evmwlsmianw", VX (4,1481), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, {"vbpermd", VX (4,1484), VX_MASK, PPCVEC3, 0, {VD, VA, VB}}, {"vpksdss", VX (4,1486), VX_MASK, PPCVEC2, 0, {VD, VA, VB}}, {"evmwssfan", VX (4,1491), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, {"macchwso", XO (4, 236,1,0), XO_MASK, MULHW, 0, {RT, RA, RB}}, {"evmwumian", VX (4,1496), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, {"macchwso.", XO (4, 236,1,1), XO_MASK, MULHW, 0, {RT, RA, RB}}, {"evmwsmian", VX (4,1497), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, {"evmwsmfan", VX (4,1499), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, {"nmacchwso", XO (4, 238,1,0), XO_MASK, MULHW, 0, {RT, RA, RB}}, {"nmacchwso.", XO (4, 238,1,1), XO_MASK, MULHW, 0, {RT, RA, RB}}, {"vsububs", VX (4,1536), VX_MASK, PPCVEC, 0, {VD, VA, VB}}, {"vclzlsbb", VXVA(4,1538,0), VXVA_MASK, PPCVEC3, 0, {RT, VB}}, {"vctzlsbb", VXVA(4,1538,1), VXVA_MASK, PPCVEC3, 0, {RT, VB}}, {"vnegw", VXVA(4,1538,6), VXVA_MASK, PPCVEC3, 0, {VD, VB}}, {"vnegd", VXVA(4,1538,7), VXVA_MASK, PPCVEC3, 0, {VD, VB}}, {"vprtybw", VXVA(4,1538,8), VXVA_MASK, PPCVEC3, 0, {VD, VB}}, {"vprtybd", VXVA(4,1538,9), VXVA_MASK, PPCVEC3, 0, {VD, VB}}, {"vprtybq", VXVA(4,1538,10), VXVA_MASK, PPCVEC3, 0, {VD, VB}}, {"vextsb2w", VXVA(4,1538,16), VXVA_MASK, PPCVEC3, 0, {VD, VB}}, {"vextsh2w", VXVA(4,1538,17), VXVA_MASK, PPCVEC3, 0, {VD, VB}}, {"vextsb2d", VXVA(4,1538,24), VXVA_MASK, PPCVEC3, 0, {VD, VB}}, {"vextsh2d", VXVA(4,1538,25), VXVA_MASK, PPCVEC3, 0, {VD, VB}}, {"vextsw2d", VXVA(4,1538,26), VXVA_MASK, PPCVEC3, 0, {VD, VB}}, {"vctzb", VXVA(4,1538,28), VXVA_MASK, PPCVEC3, 0, {VD, VB}}, {"vctzh", VXVA(4,1538,29), VXVA_MASK, PPCVEC3, 0, {VD, VB}}, {"vctzw", VXVA(4,1538,30), VXVA_MASK, PPCVEC3, 0, {VD, VB}}, {"vctzd", VXVA(4,1538,31), VXVA_MASK, PPCVEC3, 0, {VD, VB}}, {"mfvscr", VX (4,1540), VXVAVB_MASK, PPCVEC, 0, {VD}}, {"vcmpgtub.", VXR(4, 518,1), VXR_MASK, PPCVEC, 0, {VD, VA, VB}}, {"udi8fcm.", APU(4, 771,0), APU_MASK, PPC440, PPC476, {URT, URA, URB}}, {"udi8fcm", APU(4, 771,1), APU_MASK, PPC440, PPC476, {URT, URA, URB}}, {"vsum4ubs", VX (4,1544), VX_MASK, PPCVEC, 0, {VD, VA, VB}}, {"vextublx", VX (4,1549), VX_MASK, PPCVEC3, 0, {RT, RA, VB}}, {"vsubuhs", VX (4,1600), VX_MASK, PPCVEC, 0, {VD, VA, VB}}, {"mtvscr", VX (4,1604), VXVDVA_MASK, PPCVEC, 0, {VB}}, {"vcmpgtuh.", VXR(4, 582,1), VXR_MASK, PPCVEC, 0, {VD, VA, VB}}, {"vsum4shs", VX (4,1608), VX_MASK, PPCVEC, 0, {VD, VA, VB}}, {"udi9fcm.", APU(4, 804,0), APU_MASK, PPC440, PPC476, {URT, URA, URB}}, {"udi9fcm", APU(4, 804,1), APU_MASK, PPC440, PPC476, {URT, URA, URB}}, {"vextuhlx", VX (4,1613), VX_MASK, PPCVEC3, 0, {RT, RA, VB}}, {"vupkhsw", VX (4,1614), VXVA_MASK, PPCVEC2, 0, {VD, VB}}, {"vsubuws", VX (4,1664), VX_MASK, PPCVEC, 0, {VD, VA, VB}}, {"vshasigmaw", VX (4,1666), VX_MASK, PPCVEC2, 0, {VD, VA, ST, SIX}}, {"veqv", VX (4,1668), VX_MASK, PPCVEC2, 0, {VD, VA, VB}}, {"vcmpgtuw.", VXR(4, 646,1), VXR_MASK, PPCVEC, 0, {VD, VA, VB}}, {"udi10fcm.", APU(4, 835,0), APU_MASK, PPC440, PPC476, {URT, URA, URB}}, {"udi10fcm", APU(4, 835,1), APU_MASK, PPC440, PPC476, {URT, URA, URB}}, {"vsum2sws", VX (4,1672), VX_MASK, PPCVEC, 0, {VD, VA, VB}}, {"vmrgow", VX (4,1676), VX_MASK, PPCVEC2, 0, {VD, VA, VB}}, {"vextuwlx", VX (4,1677), VX_MASK, PPCVEC3, 0, {RT, RA, VB}}, {"vshasigmad", VX (4,1730), VX_MASK, PPCVEC2, 0, {VD, VA, ST, SIX}}, {"vsrd", VX (4,1732), VX_MASK, PPCVEC2, 0, {VD, VA, VB}}, {"vcmpgtfp.", VXR(4, 710,1), VXR_MASK, PPCVEC, 0, {VD, VA, VB}}, {"udi11fcm.", APU(4, 867,0), APU_MASK, PPC440, PPC476, {URT, URA, URB}}, {"vcmpgtud.", VXR(4, 711,1), VXR_MASK, PPCVEC2, 0, {VD, VA, VB}}, {"udi11fcm", APU(4, 867,1), APU_MASK, PPC440, PPC476, {URT, URA, URB}}, {"vupklsw", VX (4,1742), VXVA_MASK, PPCVEC2, 0, {VD, VB}}, {"vsubsbs", VX (4,1792), VX_MASK, PPCVEC, 0, {VD, VA, VB}}, {"vclzb", VX (4,1794), VXVA_MASK, PPCVEC2, 0, {VD, VB}}, {"vpopcntb", VX (4,1795), VXVA_MASK, PPCVEC2, 0, {VD, VB}}, {"vsrv", VX (4,1796), VX_MASK, PPCVEC3, 0, {VD, VA, VB}}, {"vcmpgtsb.", VXR(4, 774,1), VXR_MASK, PPCVEC, 0, {VD, VA, VB}}, {"udi12fcm.", APU(4, 899,0), APU_MASK, PPC440, PPC476, {URT, URA, URB}}, {"udi12fcm", APU(4, 899,1), APU_MASK, PPC440, PPC476, {URT, URA, URB}}, {"vsum4sbs", VX (4,1800), VX_MASK, PPCVEC, 0, {VD, VA, VB}}, {"vextubrx", VX (4,1805), VX_MASK, PPCVEC3, 0, {RT, RA, VB}}, {"maclhwuo", XO (4, 396,1,0), XO_MASK, MULHW, 0, {RT, RA, RB}}, {"maclhwuo.", XO (4, 396,1,1), XO_MASK, MULHW, 0, {RT, RA, RB}}, {"vsubshs", VX (4,1856), VX_MASK, PPCVEC, 0, {VD, VA, VB}}, {"vclzh", VX (4,1858), VXVA_MASK, PPCVEC2, 0, {VD, VB}}, {"vpopcnth", VX (4,1859), VXVA_MASK, PPCVEC2, 0, {VD, VB}}, {"vslv", VX (4,1860), VX_MASK, PPCVEC3, 0, {VD, VA, VB}}, {"vcmpgtsh.", VXR(4, 838,1), VXR_MASK, PPCVEC, 0, {VD, VA, VB}}, {"vextuhrx", VX (4,1869), VX_MASK, PPCVEC3, 0, {RT, RA, VB}}, {"udi13fcm.", APU(4, 931,0), APU_MASK, PPC440, PPC476, {URT, URA, URB}}, {"udi13fcm", APU(4, 931,1), APU_MASK, PPC440, PPC476, {URT, URA, URB}}, {"maclhwo", XO (4, 428,1,0), XO_MASK, MULHW, 0, {RT, RA, RB}}, {"maclhwo.", XO (4, 428,1,1), XO_MASK, MULHW, 0, {RT, RA, RB}}, {"nmaclhwo", XO (4, 430,1,0), XO_MASK, MULHW, 0, {RT, RA, RB}}, {"nmaclhwo.", XO (4, 430,1,1), XO_MASK, MULHW, 0, {RT, RA, RB}}, {"vsubsws", VX (4,1920), VX_MASK, PPCVEC, 0, {VD, VA, VB}}, {"vclzw", VX (4,1922), VXVA_MASK, PPCVEC2, 0, {VD, VB}}, {"vpopcntw", VX (4,1923), VXVA_MASK, PPCVEC2, 0, {VD, VB}}, {"vcmpgtsw.", VXR(4, 902,1), VXR_MASK, PPCVEC, 0, {VD, VA, VB}}, {"udi14fcm.", APU(4, 963,0), APU_MASK, PPC440, PPC476, {URT, URA, URB}}, {"udi14fcm", APU(4, 963,1), APU_MASK, PPC440, PPC476, {URT, URA, URB}}, {"vsumsws", VX (4,1928), VX_MASK, PPCVEC, 0, {VD, VA, VB}}, {"vmrgew", VX (4,1932), VX_MASK, PPCVEC2, 0, {VD, VA, VB}}, {"vextuwrx", VX (4,1933), VX_MASK, PPCVEC3, 0, {RT, RA, VB}}, {"maclhwsuo", XO (4, 460,1,0), XO_MASK, MULHW, 0, {RT, RA, RB}}, {"maclhwsuo.", XO (4, 460,1,1), XO_MASK, MULHW, 0, {RT, RA, RB}}, {"vclzd", VX (4,1986), VXVA_MASK, PPCVEC2, 0, {VD, VB}}, {"vpopcntd", VX (4,1987), VXVA_MASK, PPCVEC2, 0, {VD, VB}}, {"vcmpbfp.", VXR(4, 966,1), VXR_MASK, PPCVEC, 0, {VD, VA, VB}}, {"udi15fcm.", APU(4, 995,0), APU_MASK, PPC440, PPC476, {URT, URA, URB}}, {"vcmpgtsd.", VXR(4, 967,1), VXR_MASK, PPCVEC2, 0, {VD, VA, VB}}, {"udi15fcm", APU(4, 995,1), APU_MASK, PPC440, PPC476, {URT, URA, URB}}, {"maclhwso", XO (4, 492,1,0), XO_MASK, MULHW, 0, {RT, RA, RB}}, {"maclhwso.", XO (4, 492,1,1), XO_MASK, MULHW, 0, {RT, RA, RB}}, {"nmaclhwso", XO (4, 494,1,0), XO_MASK, MULHW, 0, {RT, RA, RB}}, {"nmaclhwso.", XO (4, 494,1,1), XO_MASK, MULHW, 0, {RT, RA, RB}}, {"dcbz_l", X (4,1014), XRT_MASK, PPCPS, 0, {RA, RB}}, {"mulli", OP(7), OP_MASK, PPCCOM, PPCVLE, {RT, RA, SI}}, {"muli", OP(7), OP_MASK, PWRCOM, PPCVLE, {RT, RA, SI}}, {"subfic", OP(8), OP_MASK, PPCCOM, PPCVLE, {RT, RA, SI}}, {"sfi", OP(8), OP_MASK, PWRCOM, PPCVLE, {RT, RA, SI}}, {"dozi", OP(9), OP_MASK, M601, PPCVLE, {RT, RA, SI}}, {"cmplwi", OPL(10,0), OPL_MASK, PPCCOM, PPCVLE, {OBF, RA, UISIGNOPT}}, {"cmpldi", OPL(10,1), OPL_MASK, PPC64, PPCVLE, {OBF, RA, UISIGNOPT}}, {"cmpli", OP(10), OP_MASK, PPC, PPCVLE, {BF, L32OPT, RA, UISIGNOPT}}, {"cmpli", OP(10), OP_MASK, PWRCOM, PPC|PPCVLE, {BF, RA, UISIGNOPT}}, {"cmpwi", OPL(11,0), OPL_MASK, PPCCOM, PPCVLE, {OBF, RA, SI}}, {"cmpdi", OPL(11,1), OPL_MASK, PPC64, PPCVLE, {OBF, RA, SI}}, {"cmpi", OP(11), OP_MASK, PPC, PPCVLE, {BF, L32OPT, RA, SI}}, {"cmpi", OP(11), OP_MASK, PWRCOM, PPC|PPCVLE, {BF, RA, SI}}, {"addic", OP(12), OP_MASK, PPCCOM, PPCVLE, {RT, RA, SI}}, {"ai", OP(12), OP_MASK, PWRCOM, PPCVLE, {RT, RA, SI}}, {"subic", OP(12), OP_MASK, PPCCOM, PPCVLE, {RT, RA, NSI}}, {"addic.", OP(13), OP_MASK, PPCCOM, PPCVLE, {RT, RA, SI}}, {"ai.", OP(13), OP_MASK, PWRCOM, PPCVLE, {RT, RA, SI}}, {"subic.", OP(13), OP_MASK, PPCCOM, PPCVLE, {RT, RA, NSI}}, {"li", OP(14), DRA_MASK, PPCCOM, PPCVLE, {RT, SI}}, {"lil", OP(14), DRA_MASK, PWRCOM, PPCVLE, {RT, SI}}, {"addi", OP(14), OP_MASK, PPCCOM, PPCVLE, {RT, RA0, SI}}, {"cal", OP(14), OP_MASK, PWRCOM, PPCVLE, {RT, D, RA0}}, {"subi", OP(14), OP_MASK, PPCCOM, PPCVLE, {RT, RA0, NSI}}, {"la", OP(14), OP_MASK, PPCCOM, PPCVLE, {RT, D, RA0}}, {"lis", OP(15), DRA_MASK, PPCCOM, PPCVLE, {RT, SISIGNOPT}}, {"liu", OP(15), DRA_MASK, PWRCOM, PPCVLE, {RT, SISIGNOPT}}, {"addis", OP(15), OP_MASK, PPCCOM, PPCVLE, {RT, RA0, SISIGNOPT}}, {"cau", OP(15), OP_MASK, PWRCOM, PPCVLE, {RT, RA0, SISIGNOPT}}, {"subis", OP(15), OP_MASK, PPCCOM, PPCVLE, {RT, RA0, NSISIGNOPT}}, {"bdnz-", BBO(16,BODNZ,0,0), BBOATBI_MASK, PPCCOM, PPCVLE, {BDM}}, {"bdnz+", BBO(16,BODNZ,0,0), BBOATBI_MASK, PPCCOM, PPCVLE, {BDP}}, {"bdnz", BBO(16,BODNZ,0,0), BBOATBI_MASK, PPCCOM, PPCVLE, {BD}}, {"bdn", BBO(16,BODNZ,0,0), BBOATBI_MASK, PWRCOM, PPCVLE, {BD}}, {"bdnzl-", BBO(16,BODNZ,0,1), BBOATBI_MASK, PPCCOM, PPCVLE, {BDM}}, {"bdnzl+", BBO(16,BODNZ,0,1), BBOATBI_MASK, PPCCOM, PPCVLE, {BDP}}, {"bdnzl", BBO(16,BODNZ,0,1), BBOATBI_MASK, PPCCOM, PPCVLE, {BD}}, {"bdnl", BBO(16,BODNZ,0,1), BBOATBI_MASK, PWRCOM, PPCVLE, {BD}}, {"bdnza-", BBO(16,BODNZ,1,0), BBOATBI_MASK, PPCCOM, PPCVLE, {BDMA}}, {"bdnza+", BBO(16,BODNZ,1,0), BBOATBI_MASK, PPCCOM, PPCVLE, {BDPA}}, {"bdnza", BBO(16,BODNZ,1,0), BBOATBI_MASK, PPCCOM, PPCVLE, {BDA}}, {"bdna", BBO(16,BODNZ,1,0), BBOATBI_MASK, PWRCOM, PPCVLE, {BDA}}, {"bdnzla-", BBO(16,BODNZ,1,1), BBOATBI_MASK, PPCCOM, PPCVLE, {BDMA}}, {"bdnzla+", BBO(16,BODNZ,1,1), BBOATBI_MASK, PPCCOM, PPCVLE, {BDPA}}, {"bdnzla", BBO(16,BODNZ,1,1), BBOATBI_MASK, PPCCOM, PPCVLE, {BDA}}, {"bdnla", BBO(16,BODNZ,1,1), BBOATBI_MASK, PWRCOM, PPCVLE, {BDA}}, {"bdz-", BBO(16,BODZ,0,0), BBOATBI_MASK, PPCCOM, PPCVLE, {BDM}}, {"bdz+", BBO(16,BODZ,0,0), BBOATBI_MASK, PPCCOM, PPCVLE, {BDP}}, {"bdz", BBO(16,BODZ,0,0), BBOATBI_MASK, COM, PPCVLE, {BD}}, {"bdzl-", BBO(16,BODZ,0,1), BBOATBI_MASK, PPCCOM, PPCVLE, {BDM}}, {"bdzl+", BBO(16,BODZ,0,1), BBOATBI_MASK, PPCCOM, PPCVLE, {BDP}}, {"bdzl", BBO(16,BODZ,0,1), BBOATBI_MASK, COM, PPCVLE, {BD}}, {"bdza-", BBO(16,BODZ,1,0), BBOATBI_MASK, PPCCOM, PPCVLE, {BDMA}}, {"bdza+", BBO(16,BODZ,1,0), BBOATBI_MASK, PPCCOM, PPCVLE, {BDPA}}, {"bdza", BBO(16,BODZ,1,0), BBOATBI_MASK, COM, PPCVLE, {BDA}}, {"bdzla-", BBO(16,BODZ,1,1), BBOATBI_MASK, PPCCOM, PPCVLE, {BDMA}}, {"bdzla+", BBO(16,BODZ,1,1), BBOATBI_MASK, PPCCOM, PPCVLE, {BDPA}}, {"bdzla", BBO(16,BODZ,1,1), BBOATBI_MASK, COM, PPCVLE, {BDA}}, {"bge-", BBOCB(16,BOF,CBLT,0,0), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BDM}}, {"bge+", BBOCB(16,BOF,CBLT,0,0), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BDP}}, {"bge", BBOCB(16,BOF,CBLT,0,0), BBOATCB_MASK, COM, PPCVLE, {CR, BD}}, {"bnl-", BBOCB(16,BOF,CBLT,0,0), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BDM}}, {"bnl+", BBOCB(16,BOF,CBLT,0,0), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BDP}}, {"bnl", BBOCB(16,BOF,CBLT,0,0), BBOATCB_MASK, COM, PPCVLE, {CR, BD}}, {"bgel-", BBOCB(16,BOF,CBLT,0,1), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BDM}}, {"bgel+", BBOCB(16,BOF,CBLT,0,1), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BDP}}, {"bgel", BBOCB(16,BOF,CBLT,0,1), BBOATCB_MASK, COM, PPCVLE, {CR, BD}}, {"bnll-", BBOCB(16,BOF,CBLT,0,1), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BDM}}, {"bnll+", BBOCB(16,BOF,CBLT,0,1), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BDP}}, {"bnll", BBOCB(16,BOF,CBLT,0,1), BBOATCB_MASK, COM, PPCVLE, {CR, BD}}, {"bgea-", BBOCB(16,BOF,CBLT,1,0), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BDMA}}, {"bgea+", BBOCB(16,BOF,CBLT,1,0), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BDPA}}, {"bgea", BBOCB(16,BOF,CBLT,1,0), BBOATCB_MASK, COM, PPCVLE, {CR, BDA}}, {"bnla-", BBOCB(16,BOF,CBLT,1,0), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BDMA}}, {"bnla+", BBOCB(16,BOF,CBLT,1,0), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BDPA}}, {"bnla", BBOCB(16,BOF,CBLT,1,0), BBOATCB_MASK, COM, PPCVLE, {CR, BDA}}, {"bgela-", BBOCB(16,BOF,CBLT,1,1), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BDMA}}, {"bgela+", BBOCB(16,BOF,CBLT,1,1), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BDPA}}, {"bgela", BBOCB(16,BOF,CBLT,1,1), BBOATCB_MASK, COM, PPCVLE, {CR, BDA}}, {"bnlla-", BBOCB(16,BOF,CBLT,1,1), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BDMA}}, {"bnlla+", BBOCB(16,BOF,CBLT,1,1), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BDPA}}, {"bnlla", BBOCB(16,BOF,CBLT,1,1), BBOATCB_MASK, COM, PPCVLE, {CR, BDA}}, {"ble-", BBOCB(16,BOF,CBGT,0,0), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BDM}}, {"ble+", BBOCB(16,BOF,CBGT,0,0), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BDP}}, {"ble", BBOCB(16,BOF,CBGT,0,0), BBOATCB_MASK, COM, PPCVLE, {CR, BD}}, {"bng-", BBOCB(16,BOF,CBGT,0,0), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BDM}}, {"bng+", BBOCB(16,BOF,CBGT,0,0), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BDP}}, {"bng", BBOCB(16,BOF,CBGT,0,0), BBOATCB_MASK, COM, PPCVLE, {CR, BD}}, {"blel-", BBOCB(16,BOF,CBGT,0,1), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BDM}}, {"blel+", BBOCB(16,BOF,CBGT,0,1), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BDP}}, {"blel", BBOCB(16,BOF,CBGT,0,1), BBOATCB_MASK, COM, PPCVLE, {CR, BD}}, {"bngl-", BBOCB(16,BOF,CBGT,0,1), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BDM}}, {"bngl+", BBOCB(16,BOF,CBGT,0,1), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BDP}}, {"bngl", BBOCB(16,BOF,CBGT,0,1), BBOATCB_MASK, COM, PPCVLE, {CR, BD}}, {"blea-", BBOCB(16,BOF,CBGT,1,0), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BDMA}}, {"blea+", BBOCB(16,BOF,CBGT,1,0), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BDPA}}, {"blea", BBOCB(16,BOF,CBGT,1,0), BBOATCB_MASK, COM, PPCVLE, {CR, BDA}}, {"bnga-", BBOCB(16,BOF,CBGT,1,0), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BDMA}}, {"bnga+", BBOCB(16,BOF,CBGT,1,0), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BDPA}}, {"bnga", BBOCB(16,BOF,CBGT,1,0), BBOATCB_MASK, COM, PPCVLE, {CR, BDA}}, {"blela-", BBOCB(16,BOF,CBGT,1,1), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BDMA}}, {"blela+", BBOCB(16,BOF,CBGT,1,1), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BDPA}}, {"blela", BBOCB(16,BOF,CBGT,1,1), BBOATCB_MASK, COM, PPCVLE, {CR, BDA}}, {"bngla-", BBOCB(16,BOF,CBGT,1,1), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BDMA}}, {"bngla+", BBOCB(16,BOF,CBGT,1,1), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BDPA}}, {"bngla", BBOCB(16,BOF,CBGT,1,1), BBOATCB_MASK, COM, PPCVLE, {CR, BDA}}, {"bne-", BBOCB(16,BOF,CBEQ,0,0), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BDM}}, {"bne+", BBOCB(16,BOF,CBEQ,0,0), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BDP}}, {"bne", BBOCB(16,BOF,CBEQ,0,0), BBOATCB_MASK, COM, PPCVLE, {CR, BD}}, {"bnel-", BBOCB(16,BOF,CBEQ,0,1), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BDM}}, {"bnel+", BBOCB(16,BOF,CBEQ,0,1), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BDP}}, {"bnel", BBOCB(16,BOF,CBEQ,0,1), BBOATCB_MASK, COM, PPCVLE, {CR, BD}}, {"bnea-", BBOCB(16,BOF,CBEQ,1,0), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BDMA}}, {"bnea+", BBOCB(16,BOF,CBEQ,1,0), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BDPA}}, {"bnea", BBOCB(16,BOF,CBEQ,1,0), BBOATCB_MASK, COM, PPCVLE, {CR, BDA}}, {"bnela-", BBOCB(16,BOF,CBEQ,1,1), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BDMA}}, {"bnela+", BBOCB(16,BOF,CBEQ,1,1), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BDPA}}, {"bnela", BBOCB(16,BOF,CBEQ,1,1), BBOATCB_MASK, COM, PPCVLE, {CR, BDA}}, {"bns-", BBOCB(16,BOF,CBSO,0,0), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BDM}}, {"bns+", BBOCB(16,BOF,CBSO,0,0), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BDP}}, {"bns", BBOCB(16,BOF,CBSO,0,0), BBOATCB_MASK, COM, PPCVLE, {CR, BD}}, {"bnu-", BBOCB(16,BOF,CBSO,0,0), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BDM}}, {"bnu+", BBOCB(16,BOF,CBSO,0,0), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BDP}}, {"bnu", BBOCB(16,BOF,CBSO,0,0), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BD}}, {"bnsl-", BBOCB(16,BOF,CBSO,0,1), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BDM}}, {"bnsl+", BBOCB(16,BOF,CBSO,0,1), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BDP}}, {"bnsl", BBOCB(16,BOF,CBSO,0,1), BBOATCB_MASK, COM, PPCVLE, {CR, BD}}, {"bnul-", BBOCB(16,BOF,CBSO,0,1), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BDM}}, {"bnul+", BBOCB(16,BOF,CBSO,0,1), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BDP}}, {"bnul", BBOCB(16,BOF,CBSO,0,1), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BD}}, {"bnsa-", BBOCB(16,BOF,CBSO,1,0), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BDMA}}, {"bnsa+", BBOCB(16,BOF,CBSO,1,0), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BDPA}}, {"bnsa", BBOCB(16,BOF,CBSO,1,0), BBOATCB_MASK, COM, PPCVLE, {CR, BDA}}, {"bnua-", BBOCB(16,BOF,CBSO,1,0), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BDMA}}, {"bnua+", BBOCB(16,BOF,CBSO,1,0), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BDPA}}, {"bnua", BBOCB(16,BOF,CBSO,1,0), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BDA}}, {"bnsla-", BBOCB(16,BOF,CBSO,1,1), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BDMA}}, {"bnsla+", BBOCB(16,BOF,CBSO,1,1), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BDPA}}, {"bnsla", BBOCB(16,BOF,CBSO,1,1), BBOATCB_MASK, COM, PPCVLE, {CR, BDA}}, {"bnula-", BBOCB(16,BOF,CBSO,1,1), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BDMA}}, {"bnula+", BBOCB(16,BOF,CBSO,1,1), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BDPA}}, {"bnula", BBOCB(16,BOF,CBSO,1,1), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BDA}}, {"blt-", BBOCB(16,BOT,CBLT,0,0), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BDM}}, {"blt+", BBOCB(16,BOT,CBLT,0,0), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BDP}}, {"blt", BBOCB(16,BOT,CBLT,0,0), BBOATCB_MASK, COM, PPCVLE, {CR, BD}}, {"bltl-", BBOCB(16,BOT,CBLT,0,1), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BDM}}, {"bltl+", BBOCB(16,BOT,CBLT,0,1), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BDP}}, {"bltl", BBOCB(16,BOT,CBLT,0,1), BBOATCB_MASK, COM, PPCVLE, {CR, BD}}, {"blta-", BBOCB(16,BOT,CBLT,1,0), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BDMA}}, {"blta+", BBOCB(16,BOT,CBLT,1,0), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BDPA}}, {"blta", BBOCB(16,BOT,CBLT,1,0), BBOATCB_MASK, COM, PPCVLE, {CR, BDA}}, {"bltla-", BBOCB(16,BOT,CBLT,1,1), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BDMA}}, {"bltla+", BBOCB(16,BOT,CBLT,1,1), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BDPA}}, {"bltla", BBOCB(16,BOT,CBLT,1,1), BBOATCB_MASK, COM, PPCVLE, {CR, BDA}}, {"bgt-", BBOCB(16,BOT,CBGT,0,0), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BDM}}, {"bgt+", BBOCB(16,BOT,CBGT,0,0), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BDP}}, {"bgt", BBOCB(16,BOT,CBGT,0,0), BBOATCB_MASK, COM, PPCVLE, {CR, BD}}, {"bgtl-", BBOCB(16,BOT,CBGT,0,1), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BDM}}, {"bgtl+", BBOCB(16,BOT,CBGT,0,1), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BDP}}, {"bgtl", BBOCB(16,BOT,CBGT,0,1), BBOATCB_MASK, COM, PPCVLE, {CR, BD}}, {"bgta-", BBOCB(16,BOT,CBGT,1,0), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BDMA}}, {"bgta+", BBOCB(16,BOT,CBGT,1,0), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BDPA}}, {"bgta", BBOCB(16,BOT,CBGT,1,0), BBOATCB_MASK, COM, PPCVLE, {CR, BDA}}, {"bgtla-", BBOCB(16,BOT,CBGT,1,1), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BDMA}}, {"bgtla+", BBOCB(16,BOT,CBGT,1,1), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BDPA}}, {"bgtla", BBOCB(16,BOT,CBGT,1,1), BBOATCB_MASK, COM, PPCVLE, {CR, BDA}}, {"beq-", BBOCB(16,BOT,CBEQ,0,0), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BDM}}, {"beq+", BBOCB(16,BOT,CBEQ,0,0), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BDP}}, {"beq", BBOCB(16,BOT,CBEQ,0,0), BBOATCB_MASK, COM, PPCVLE, {CR, BD}}, {"beql-", BBOCB(16,BOT,CBEQ,0,1), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BDM}}, {"beql+", BBOCB(16,BOT,CBEQ,0,1), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BDP}}, {"beql", BBOCB(16,BOT,CBEQ,0,1), BBOATCB_MASK, COM, PPCVLE, {CR, BD}}, {"beqa-", BBOCB(16,BOT,CBEQ,1,0), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BDMA}}, {"beqa+", BBOCB(16,BOT,CBEQ,1,0), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BDPA}}, {"beqa", BBOCB(16,BOT,CBEQ,1,0), BBOATCB_MASK, COM, PPCVLE, {CR, BDA}}, {"beqla-", BBOCB(16,BOT,CBEQ,1,1), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BDMA}}, {"beqla+", BBOCB(16,BOT,CBEQ,1,1), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BDPA}}, {"beqla", BBOCB(16,BOT,CBEQ,1,1), BBOATCB_MASK, COM, PPCVLE, {CR, BDA}}, {"bso-", BBOCB(16,BOT,CBSO,0,0), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BDM}}, {"bso+", BBOCB(16,BOT,CBSO,0,0), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BDP}}, {"bso", BBOCB(16,BOT,CBSO,0,0), BBOATCB_MASK, COM, PPCVLE, {CR, BD}}, {"bun-", BBOCB(16,BOT,CBSO,0,0), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BDM}}, {"bun+", BBOCB(16,BOT,CBSO,0,0), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BDP}}, {"bun", BBOCB(16,BOT,CBSO,0,0), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BD}}, {"bsol-", BBOCB(16,BOT,CBSO,0,1), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BDM}}, {"bsol+", BBOCB(16,BOT,CBSO,0,1), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BDP}}, {"bsol", BBOCB(16,BOT,CBSO,0,1), BBOATCB_MASK, COM, PPCVLE, {CR, BD}}, {"bunl-", BBOCB(16,BOT,CBSO,0,1), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BDM}}, {"bunl+", BBOCB(16,BOT,CBSO,0,1), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BDP}}, {"bunl", BBOCB(16,BOT,CBSO,0,1), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BD}}, {"bsoa-", BBOCB(16,BOT,CBSO,1,0), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BDMA}}, {"bsoa+", BBOCB(16,BOT,CBSO,1,0), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BDPA}}, {"bsoa", BBOCB(16,BOT,CBSO,1,0), BBOATCB_MASK, COM, PPCVLE, {CR, BDA}}, {"buna-", BBOCB(16,BOT,CBSO,1,0), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BDMA}}, {"buna+", BBOCB(16,BOT,CBSO,1,0), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BDPA}}, {"buna", BBOCB(16,BOT,CBSO,1,0), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BDA}}, {"bsola-", BBOCB(16,BOT,CBSO,1,1), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BDMA}}, {"bsola+", BBOCB(16,BOT,CBSO,1,1), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BDPA}}, {"bsola", BBOCB(16,BOT,CBSO,1,1), BBOATCB_MASK, COM, PPCVLE, {CR, BDA}}, {"bunla-", BBOCB(16,BOT,CBSO,1,1), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BDMA}}, {"bunla+", BBOCB(16,BOT,CBSO,1,1), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BDPA}}, {"bunla", BBOCB(16,BOT,CBSO,1,1), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BDA}}, {"bdnzf-", BBO(16,BODNZF,0,0), BBOY_MASK, PPCCOM, ISA_V2|PPCVLE, {BI, BDM}}, {"bdnzf+", BBO(16,BODNZF,0,0), BBOY_MASK, PPCCOM, ISA_V2|PPCVLE, {BI, BDP}}, {"bdnzf", BBO(16,BODNZF,0,0), BBOY_MASK, PPCCOM, PPCVLE, {BI, BD}}, {"bdnzfl-", BBO(16,BODNZF,0,1), BBOY_MASK, PPCCOM, ISA_V2|PPCVLE, {BI, BDM}}, {"bdnzfl+", BBO(16,BODNZF,0,1), BBOY_MASK, PPCCOM, ISA_V2|PPCVLE, {BI, BDP}}, {"bdnzfl", BBO(16,BODNZF,0,1), BBOY_MASK, PPCCOM, PPCVLE, {BI, BD}}, {"bdnzfa-", BBO(16,BODNZF,1,0), BBOY_MASK, PPCCOM, ISA_V2|PPCVLE, {BI, BDMA}}, {"bdnzfa+", BBO(16,BODNZF,1,0), BBOY_MASK, PPCCOM, ISA_V2|PPCVLE, {BI, BDPA}}, {"bdnzfa", BBO(16,BODNZF,1,0), BBOY_MASK, PPCCOM, PPCVLE, {BI, BDA}}, {"bdnzfla-", BBO(16,BODNZF,1,1), BBOY_MASK, PPCCOM, ISA_V2|PPCVLE, {BI, BDMA}}, {"bdnzfla+", BBO(16,BODNZF,1,1), BBOY_MASK, PPCCOM, ISA_V2|PPCVLE, {BI, BDPA}}, {"bdnzfla", BBO(16,BODNZF,1,1), BBOY_MASK, PPCCOM, PPCVLE, {BI, BDA}}, {"bdzf-", BBO(16,BODZF,0,0), BBOY_MASK, PPCCOM, ISA_V2|PPCVLE, {BI, BDM}}, {"bdzf+", BBO(16,BODZF,0,0), BBOY_MASK, PPCCOM, ISA_V2|PPCVLE, {BI, BDP}}, {"bdzf", BBO(16,BODZF,0,0), BBOY_MASK, PPCCOM, PPCVLE, {BI, BD}}, {"bdzfl-", BBO(16,BODZF,0,1), BBOY_MASK, PPCCOM, ISA_V2|PPCVLE, {BI, BDM}}, {"bdzfl+", BBO(16,BODZF,0,1), BBOY_MASK, PPCCOM, ISA_V2|PPCVLE, {BI, BDP}}, {"bdzfl", BBO(16,BODZF,0,1), BBOY_MASK, PPCCOM, PPCVLE, {BI, BD}}, {"bdzfa-", BBO(16,BODZF,1,0), BBOY_MASK, PPCCOM, ISA_V2|PPCVLE, {BI, BDMA}}, {"bdzfa+", BBO(16,BODZF,1,0), BBOY_MASK, PPCCOM, ISA_V2|PPCVLE, {BI, BDPA}}, {"bdzfa", BBO(16,BODZF,1,0), BBOY_MASK, PPCCOM, PPCVLE, {BI, BDA}}, {"bdzfla-", BBO(16,BODZF,1,1), BBOY_MASK, PPCCOM, ISA_V2|PPCVLE, {BI, BDMA}}, {"bdzfla+", BBO(16,BODZF,1,1), BBOY_MASK, PPCCOM, ISA_V2|PPCVLE, {BI, BDPA}}, {"bdzfla", BBO(16,BODZF,1,1), BBOY_MASK, PPCCOM, PPCVLE, {BI, BDA}}, {"bf-", BBO(16,BOF,0,0), BBOAT_MASK, PPCCOM, PPCVLE, {BI, BDM}}, {"bf+", BBO(16,BOF,0,0), BBOAT_MASK, PPCCOM, PPCVLE, {BI, BDP}}, {"bf", BBO(16,BOF,0,0), BBOAT_MASK, PPCCOM, PPCVLE, {BI, BD}}, {"bbf", BBO(16,BOF,0,0), BBOAT_MASK, PWRCOM, PPCVLE, {BI, BD}}, {"bfl-", BBO(16,BOF,0,1), BBOAT_MASK, PPCCOM, PPCVLE, {BI, BDM}}, {"bfl+", BBO(16,BOF,0,1), BBOAT_MASK, PPCCOM, PPCVLE, {BI, BDP}}, {"bfl", BBO(16,BOF,0,1), BBOAT_MASK, PPCCOM, PPCVLE, {BI, BD}}, {"bbfl", BBO(16,BOF,0,1), BBOAT_MASK, PWRCOM, PPCVLE, {BI, BD}}, {"bfa-", BBO(16,BOF,1,0), BBOAT_MASK, PPCCOM, PPCVLE, {BI, BDMA}}, {"bfa+", BBO(16,BOF,1,0), BBOAT_MASK, PPCCOM, PPCVLE, {BI, BDPA}}, {"bfa", BBO(16,BOF,1,0), BBOAT_MASK, PPCCOM, PPCVLE, {BI, BDA}}, {"bbfa", BBO(16,BOF,1,0), BBOAT_MASK, PWRCOM, PPCVLE, {BI, BDA}}, {"bfla-", BBO(16,BOF,1,1), BBOAT_MASK, PPCCOM, PPCVLE, {BI, BDMA}}, {"bfla+", BBO(16,BOF,1,1), BBOAT_MASK, PPCCOM, PPCVLE, {BI, BDPA}}, {"bfla", BBO(16,BOF,1,1), BBOAT_MASK, PPCCOM, PPCVLE, {BI, BDA}}, {"bbfla", BBO(16,BOF,1,1), BBOAT_MASK, PWRCOM, PPCVLE, {BI, BDA}}, {"bdnzt-", BBO(16,BODNZT,0,0), BBOY_MASK, PPCCOM, ISA_V2|PPCVLE, {BI, BDM}}, {"bdnzt+", BBO(16,BODNZT,0,0), BBOY_MASK, PPCCOM, ISA_V2|PPCVLE, {BI, BDP}}, {"bdnzt", BBO(16,BODNZT,0,0), BBOY_MASK, PPCCOM, PPCVLE, {BI, BD}}, {"bdnztl-", BBO(16,BODNZT,0,1), BBOY_MASK, PPCCOM, ISA_V2|PPCVLE, {BI, BDM}}, {"bdnztl+", BBO(16,BODNZT,0,1), BBOY_MASK, PPCCOM, ISA_V2|PPCVLE, {BI, BDP}}, {"bdnztl", BBO(16,BODNZT,0,1), BBOY_MASK, PPCCOM, PPCVLE, {BI, BD}}, {"bdnzta-", BBO(16,BODNZT,1,0), BBOY_MASK, PPCCOM, ISA_V2|PPCVLE, {BI, BDMA}}, {"bdnzta+", BBO(16,BODNZT,1,0), BBOY_MASK, PPCCOM, ISA_V2|PPCVLE, {BI, BDPA}}, {"bdnzta", BBO(16,BODNZT,1,0), BBOY_MASK, PPCCOM, PPCVLE, {BI, BDA}}, {"bdnztla-", BBO(16,BODNZT,1,1), BBOY_MASK, PPCCOM, ISA_V2|PPCVLE, {BI, BDMA}}, {"bdnztla+", BBO(16,BODNZT,1,1), BBOY_MASK, PPCCOM, ISA_V2|PPCVLE, {BI, BDPA}}, {"bdnztla", BBO(16,BODNZT,1,1), BBOY_MASK, PPCCOM, PPCVLE, {BI, BDA}}, {"bdzt-", BBO(16,BODZT,0,0), BBOY_MASK, PPCCOM, ISA_V2|PPCVLE, {BI, BDM}}, {"bdzt+", BBO(16,BODZT,0,0), BBOY_MASK, PPCCOM, ISA_V2|PPCVLE, {BI, BDP}}, {"bdzt", BBO(16,BODZT,0,0), BBOY_MASK, PPCCOM, PPCVLE, {BI, BD}}, {"bdztl-", BBO(16,BODZT,0,1), BBOY_MASK, PPCCOM, ISA_V2|PPCVLE, {BI, BDM}}, {"bdztl+", BBO(16,BODZT,0,1), BBOY_MASK, PPCCOM, ISA_V2|PPCVLE, {BI, BDP}}, {"bdztl", BBO(16,BODZT,0,1), BBOY_MASK, PPCCOM, PPCVLE, {BI, BD}}, {"bdzta-", BBO(16,BODZT,1,0), BBOY_MASK, PPCCOM, ISA_V2|PPCVLE, {BI, BDMA}}, {"bdzta+", BBO(16,BODZT,1,0), BBOY_MASK, PPCCOM, ISA_V2|PPCVLE, {BI, BDPA}}, {"bdzta", BBO(16,BODZT,1,0), BBOY_MASK, PPCCOM, PPCVLE, {BI, BDA}}, {"bdztla-", BBO(16,BODZT,1,1), BBOY_MASK, PPCCOM, ISA_V2|PPCVLE, {BI, BDMA}}, {"bdztla+", BBO(16,BODZT,1,1), BBOY_MASK, PPCCOM, ISA_V2|PPCVLE, {BI, BDPA}}, {"bdztla", BBO(16,BODZT,1,1), BBOY_MASK, PPCCOM, PPCVLE, {BI, BDA}}, {"bt-", BBO(16,BOT,0,0), BBOAT_MASK, PPCCOM, PPCVLE, {BI, BDM}}, {"bt+", BBO(16,BOT,0,0), BBOAT_MASK, PPCCOM, PPCVLE, {BI, BDP}}, {"bt", BBO(16,BOT,0,0), BBOAT_MASK, PPCCOM, PPCVLE, {BI, BD}}, {"bbt", BBO(16,BOT,0,0), BBOAT_MASK, PWRCOM, PPCVLE, {BI, BD}}, {"btl-", BBO(16,BOT,0,1), BBOAT_MASK, PPCCOM, PPCVLE, {BI, BDM}}, {"btl+", BBO(16,BOT,0,1), BBOAT_MASK, PPCCOM, PPCVLE, {BI, BDP}}, {"btl", BBO(16,BOT,0,1), BBOAT_MASK, PPCCOM, PPCVLE, {BI, BD}}, {"bbtl", BBO(16,BOT,0,1), BBOAT_MASK, PWRCOM, PPCVLE, {BI, BD}}, {"bta-", BBO(16,BOT,1,0), BBOAT_MASK, PPCCOM, PPCVLE, {BI, BDMA}}, {"bta+", BBO(16,BOT,1,0), BBOAT_MASK, PPCCOM, PPCVLE, {BI, BDPA}}, {"bta", BBO(16,BOT,1,0), BBOAT_MASK, PPCCOM, PPCVLE, {BI, BDA}}, {"bbta", BBO(16,BOT,1,0), BBOAT_MASK, PWRCOM, PPCVLE, {BI, BDA}}, {"btla-", BBO(16,BOT,1,1), BBOAT_MASK, PPCCOM, PPCVLE, {BI, BDMA}}, {"btla+", BBO(16,BOT,1,1), BBOAT_MASK, PPCCOM, PPCVLE, {BI, BDPA}}, {"btla", BBO(16,BOT,1,1), BBOAT_MASK, PPCCOM, PPCVLE, {BI, BDA}}, {"bbtla", BBO(16,BOT,1,1), BBOAT_MASK, PWRCOM, PPCVLE, {BI, BDA}}, {"bc-", B(16,0,0), B_MASK, PPCCOM, PPCVLE, {BOE, BI, BDM}}, {"bc+", B(16,0,0), B_MASK, PPCCOM, PPCVLE, {BOE, BI, BDP}}, {"bc", B(16,0,0), B_MASK, COM, PPCVLE, {BO, BI, BD}}, {"bcl-", B(16,0,1), B_MASK, PPCCOM, PPCVLE, {BOE, BI, BDM}}, {"bcl+", B(16,0,1), B_MASK, PPCCOM, PPCVLE, {BOE, BI, BDP}}, {"bcl", B(16,0,1), B_MASK, COM, PPCVLE, {BO, BI, BD}}, {"bca-", B(16,1,0), B_MASK, PPCCOM, PPCVLE, {BOE, BI, BDMA}}, {"bca+", B(16,1,0), B_MASK, PPCCOM, PPCVLE, {BOE, BI, BDPA}}, {"bca", B(16,1,0), B_MASK, COM, PPCVLE, {BO, BI, BDA}}, {"bcla-", B(16,1,1), B_MASK, PPCCOM, PPCVLE, {BOE, BI, BDMA}}, {"bcla+", B(16,1,1), B_MASK, PPCCOM, PPCVLE, {BOE, BI, BDPA}}, {"bcla", B(16,1,1), B_MASK, COM, PPCVLE, {BO, BI, BDA}}, {"svc", SC(17,0,0), SC_MASK, POWER, PPCVLE, {SVC_LEV, FL1, FL2}}, {"svcl", SC(17,0,1), SC_MASK, POWER, PPCVLE, {SVC_LEV, FL1, FL2}}, {"sc", SC(17,1,0), SC_MASK, PPC, PPCVLE, {LEV}}, {"svca", SC(17,1,0), SC_MASK, PWRCOM, PPCVLE, {SV}}, {"svcla", SC(17,1,1), SC_MASK, POWER, PPCVLE, {SV}}, {"b", B(18,0,0), B_MASK, COM, PPCVLE, {LI}}, {"bl", B(18,0,1), B_MASK, COM, PPCVLE, {LI}}, {"ba", B(18,1,0), B_MASK, COM, PPCVLE, {LIA}}, {"bla", B(18,1,1), B_MASK, COM, PPCVLE, {LIA}}, {"mcrf", XL(19,0), XLBB_MASK|(3<<21)|(3<<16), COM, PPCVLE, {BF, BFA}}, {"addpcis", DX(19,2), DX_MASK, POWER9, PPCVLE, {RT, DXD}}, {"subpcis", DX(19,2), DX_MASK, POWER9, PPCVLE, {RT, NDXD}}, {"bdnzlr", XLO(19,BODNZ,16,0), XLBOBIBB_MASK, PPCCOM, PPCVLE, {0}}, {"bdnzlr-", XLO(19,BODNZ,16,0), XLBOBIBB_MASK, PPCCOM, ISA_V2|PPCVLE, {0}}, {"bdnzlrl", XLO(19,BODNZ,16,1), XLBOBIBB_MASK, PPCCOM, PPCVLE, {0}}, {"bdnzlrl-", XLO(19,BODNZ,16,1), XLBOBIBB_MASK, PPCCOM, ISA_V2|PPCVLE, {0}}, {"bdnzlr+", XLO(19,BODNZP,16,0), XLBOBIBB_MASK, PPCCOM, ISA_V2|PPCVLE, {0}}, {"bdnzlrl+", XLO(19,BODNZP,16,1), XLBOBIBB_MASK, PPCCOM, ISA_V2|PPCVLE, {0}}, {"bdzlr", XLO(19,BODZ,16,0), XLBOBIBB_MASK, PPCCOM, PPCVLE, {0}}, {"bdzlr-", XLO(19,BODZ,16,0), XLBOBIBB_MASK, PPCCOM, ISA_V2|PPCVLE, {0}}, {"bdzlrl", XLO(19,BODZ,16,1), XLBOBIBB_MASK, PPCCOM, PPCVLE, {0}}, {"bdzlrl-", XLO(19,BODZ,16,1), XLBOBIBB_MASK, PPCCOM, ISA_V2|PPCVLE, {0}}, {"bdzlr+", XLO(19,BODZP,16,0), XLBOBIBB_MASK, PPCCOM, ISA_V2|PPCVLE, {0}}, {"bdzlrl+", XLO(19,BODZP,16,1), XLBOBIBB_MASK, PPCCOM, ISA_V2|PPCVLE, {0}}, {"blr", XLO(19,BOU,16,0), XLBOBIBB_MASK, PPCCOM, PPCVLE, {0}}, {"br", XLO(19,BOU,16,0), XLBOBIBB_MASK, PWRCOM, PPCVLE, {0}}, {"blrl", XLO(19,BOU,16,1), XLBOBIBB_MASK, PPCCOM, PPCVLE, {0}}, {"brl", XLO(19,BOU,16,1), XLBOBIBB_MASK, PWRCOM, PPCVLE, {0}}, {"bdnzlr-", XLO(19,BODNZM4,16,0), XLBOBIBB_MASK, ISA_V2, PPCVLE, {0}}, {"bdnzlrl-", XLO(19,BODNZM4,16,1), XLBOBIBB_MASK, ISA_V2, PPCVLE, {0}}, {"bdnzlr+", XLO(19,BODNZP4,16,0), XLBOBIBB_MASK, ISA_V2, PPCVLE, {0}}, {"bdnzlrl+", XLO(19,BODNZP4,16,1), XLBOBIBB_MASK, ISA_V2, PPCVLE, {0}}, {"bdzlr-", XLO(19,BODZM4,16,0), XLBOBIBB_MASK, ISA_V2, PPCVLE, {0}}, {"bdzlrl-", XLO(19,BODZM4,16,1), XLBOBIBB_MASK, ISA_V2, PPCVLE, {0}}, {"bdzlr+", XLO(19,BODZP4,16,0), XLBOBIBB_MASK, ISA_V2, PPCVLE, {0}}, {"bdzlrl+", XLO(19,BODZP4,16,1), XLBOBIBB_MASK, ISA_V2, PPCVLE, {0}}, {"bgelr", XLOCB(19,BOF,CBLT,16,0), XLBOCBBB_MASK, PPCCOM, PPCVLE, {CR}}, {"bgelr-", XLOCB(19,BOF,CBLT,16,0), XLBOCBBB_MASK, PPCCOM, ISA_V2|PPCVLE, {CR}}, {"bger", XLOCB(19,BOF,CBLT,16,0), XLBOCBBB_MASK, PWRCOM, PPCVLE, {CR}}, {"bnllr", XLOCB(19,BOF,CBLT,16,0), XLBOCBBB_MASK, PPCCOM, PPCVLE, {CR}}, {"bnllr-", XLOCB(19,BOF,CBLT,16,0), XLBOCBBB_MASK, PPCCOM, ISA_V2|PPCVLE, {CR}}, {"bnlr", XLOCB(19,BOF,CBLT,16,0), XLBOCBBB_MASK, PWRCOM, PPCVLE, {CR}}, {"bgelrl", XLOCB(19,BOF,CBLT,16,1), XLBOCBBB_MASK, PPCCOM, PPCVLE, {CR}}, {"bgelrl-", XLOCB(19,BOF,CBLT,16,1), XLBOCBBB_MASK, PPCCOM, ISA_V2|PPCVLE, {CR}}, {"bgerl", XLOCB(19,BOF,CBLT,16,1), XLBOCBBB_MASK, PWRCOM, PPCVLE, {CR}}, {"bnllrl", XLOCB(19,BOF,CBLT,16,1), XLBOCBBB_MASK, PPCCOM, PPCVLE, {CR}}, {"bnllrl-", XLOCB(19,BOF,CBLT,16,1), XLBOCBBB_MASK, PPCCOM, ISA_V2|PPCVLE, {CR}}, {"bnlrl", XLOCB(19,BOF,CBLT,16,1), XLBOCBBB_MASK, PWRCOM, PPCVLE, {CR}}, {"blelr", XLOCB(19,BOF,CBGT,16,0), XLBOCBBB_MASK, PPCCOM, PPCVLE, {CR}}, {"blelr-", XLOCB(19,BOF,CBGT,16,0), XLBOCBBB_MASK, PPCCOM, ISA_V2|PPCVLE, {CR}}, {"bler", XLOCB(19,BOF,CBGT,16,0), XLBOCBBB_MASK, PWRCOM, PPCVLE, {CR}}, {"bnglr", XLOCB(19,BOF,CBGT,16,0), XLBOCBBB_MASK, PPCCOM, PPCVLE, {CR}}, {"bnglr-", XLOCB(19,BOF,CBGT,16,0), XLBOCBBB_MASK, PPCCOM, ISA_V2|PPCVLE, {CR}}, {"bngr", XLOCB(19,BOF,CBGT,16,0), XLBOCBBB_MASK, PWRCOM, PPCVLE, {CR}}, {"blelrl", XLOCB(19,BOF,CBGT,16,1), XLBOCBBB_MASK, PPCCOM, PPCVLE, {CR}}, {"blelrl-", XLOCB(19,BOF,CBGT,16,1), XLBOCBBB_MASK, PPCCOM, ISA_V2|PPCVLE, {CR}}, {"blerl", XLOCB(19,BOF,CBGT,16,1), XLBOCBBB_MASK, PWRCOM, PPCVLE, {CR}}, {"bnglrl", XLOCB(19,BOF,CBGT,16,1), XLBOCBBB_MASK, PPCCOM, PPCVLE, {CR}}, {"bnglrl-", XLOCB(19,BOF,CBGT,16,1), XLBOCBBB_MASK, PPCCOM, ISA_V2|PPCVLE, {CR}}, {"bngrl", XLOCB(19,BOF,CBGT,16,1), XLBOCBBB_MASK, PWRCOM, PPCVLE, {CR}}, {"bnelr", XLOCB(19,BOF,CBEQ,16,0), XLBOCBBB_MASK, PPCCOM, PPCVLE, {CR}}, {"bnelr-", XLOCB(19,BOF,CBEQ,16,0), XLBOCBBB_MASK, PPCCOM, ISA_V2|PPCVLE, {CR}}, {"bner", XLOCB(19,BOF,CBEQ,16,0), XLBOCBBB_MASK, PWRCOM, PPCVLE, {CR}}, {"bnelrl", XLOCB(19,BOF,CBEQ,16,1), XLBOCBBB_MASK, PPCCOM, PPCVLE, {CR}}, {"bnelrl-", XLOCB(19,BOF,CBEQ,16,1), XLBOCBBB_MASK, PPCCOM, ISA_V2|PPCVLE, {CR}}, {"bnerl", XLOCB(19,BOF,CBEQ,16,1), XLBOCBBB_MASK, PWRCOM, PPCVLE, {CR}}, {"bnslr", XLOCB(19,BOF,CBSO,16,0), XLBOCBBB_MASK, PPCCOM, PPCVLE, {CR}}, {"bnslr-", XLOCB(19,BOF,CBSO,16,0), XLBOCBBB_MASK, PPCCOM, ISA_V2|PPCVLE, {CR}}, {"bnsr", XLOCB(19,BOF,CBSO,16,0), XLBOCBBB_MASK, PWRCOM, PPCVLE, {CR}}, {"bnulr", XLOCB(19,BOF,CBSO,16,0), XLBOCBBB_MASK, PPCCOM, PPCVLE, {CR}}, {"bnulr-", XLOCB(19,BOF,CBSO,16,0), XLBOCBBB_MASK, PPCCOM, ISA_V2|PPCVLE, {CR}}, {"bnslrl", XLOCB(19,BOF,CBSO,16,1), XLBOCBBB_MASK, PPCCOM, PPCVLE, {CR}}, {"bnslrl-", XLOCB(19,BOF,CBSO,16,1), XLBOCBBB_MASK, PPCCOM, ISA_V2|PPCVLE, {CR}}, {"bnsrl", XLOCB(19,BOF,CBSO,16,1), XLBOCBBB_MASK, PWRCOM, PPCVLE, {CR}}, {"bnulrl", XLOCB(19,BOF,CBSO,16,1), XLBOCBBB_MASK, PPCCOM, PPCVLE, {CR}}, {"bnulrl-", XLOCB(19,BOF,CBSO,16,1), XLBOCBBB_MASK, PPCCOM, ISA_V2|PPCVLE, {CR}}, {"bgelr+", XLOCB(19,BOFP,CBLT,16,0), XLBOCBBB_MASK, PPCCOM, ISA_V2|PPCVLE, {CR}}, {"bnllr+", XLOCB(19,BOFP,CBLT,16,0), XLBOCBBB_MASK, PPCCOM, ISA_V2|PPCVLE, {CR}}, {"bgelrl+", XLOCB(19,BOFP,CBLT,16,1), XLBOCBBB_MASK, PPCCOM, ISA_V2|PPCVLE, {CR}}, {"bnllrl+", XLOCB(19,BOFP,CBLT,16,1), XLBOCBBB_MASK, PPCCOM, ISA_V2|PPCVLE, {CR}}, {"blelr+", XLOCB(19,BOFP,CBGT,16,0), XLBOCBBB_MASK, PPCCOM, ISA_V2|PPCVLE, {CR}}, {"bnglr+", XLOCB(19,BOFP,CBGT,16,0), XLBOCBBB_MASK, PPCCOM, ISA_V2|PPCVLE, {CR}}, {"blelrl+", XLOCB(19,BOFP,CBGT,16,1), XLBOCBBB_MASK, PPCCOM, ISA_V2|PPCVLE, {CR}}, {"bnglrl+", XLOCB(19,BOFP,CBGT,16,1), XLBOCBBB_MASK, PPCCOM, ISA_V2|PPCVLE, {CR}}, {"bnelr+", XLOCB(19,BOFP,CBEQ,16,0), XLBOCBBB_MASK, PPCCOM, ISA_V2|PPCVLE, {CR}}, {"bnelrl+", XLOCB(19,BOFP,CBEQ,16,1), XLBOCBBB_MASK, PPCCOM, ISA_V2|PPCVLE, {CR}}, {"bnslr+", XLOCB(19,BOFP,CBSO,16,0), XLBOCBBB_MASK, PPCCOM, ISA_V2|PPCVLE, {CR}}, {"bnulr+", XLOCB(19,BOFP,CBSO,16,0), XLBOCBBB_MASK, PPCCOM, ISA_V2|PPCVLE, {CR}}, {"bnslrl+", XLOCB(19,BOFP,CBSO,16,1), XLBOCBBB_MASK, PPCCOM, ISA_V2|PPCVLE, {CR}}, {"bnulrl+", XLOCB(19,BOFP,CBSO,16,1), XLBOCBBB_MASK, PPCCOM, ISA_V2|PPCVLE, {CR}}, {"bgelr-", XLOCB(19,BOFM4,CBLT,16,0), XLBOCBBB_MASK, ISA_V2, PPCVLE, {CR}}, {"bnllr-", XLOCB(19,BOFM4,CBLT,16,0), XLBOCBBB_MASK, ISA_V2, PPCVLE, {CR}}, {"bgelrl-", XLOCB(19,BOFM4,CBLT,16,1), XLBOCBBB_MASK, ISA_V2, PPCVLE, {CR}}, {"bnllrl-", XLOCB(19,BOFM4,CBLT,16,1), XLBOCBBB_MASK, ISA_V2, PPCVLE, {CR}}, {"blelr-", XLOCB(19,BOFM4,CBGT,16,0), XLBOCBBB_MASK, ISA_V2, PPCVLE, {CR}}, {"bnglr-", XLOCB(19,BOFM4,CBGT,16,0), XLBOCBBB_MASK, ISA_V2, PPCVLE, {CR}}, {"blelrl-", XLOCB(19,BOFM4,CBGT,16,1), XLBOCBBB_MASK, ISA_V2, PPCVLE, {CR}}, {"bnglrl-", XLOCB(19,BOFM4,CBGT,16,1), XLBOCBBB_MASK, ISA_V2, PPCVLE, {CR}}, {"bnelr-", XLOCB(19,BOFM4,CBEQ,16,0), XLBOCBBB_MASK, ISA_V2, PPCVLE, {CR}}, {"bnelrl-", XLOCB(19,BOFM4,CBEQ,16,1), XLBOCBBB_MASK, ISA_V2, PPCVLE, {CR}}, {"bnslr-", XLOCB(19,BOFM4,CBSO,16,0), XLBOCBBB_MASK, ISA_V2, PPCVLE, {CR}}, {"bnulr-", XLOCB(19,BOFM4,CBSO,16,0), XLBOCBBB_MASK, ISA_V2, PPCVLE, {CR}}, {"bnslrl-", XLOCB(19,BOFM4,CBSO,16,1), XLBOCBBB_MASK, ISA_V2, PPCVLE, {CR}}, {"bnulrl-", XLOCB(19,BOFM4,CBSO,16,1), XLBOCBBB_MASK, ISA_V2, PPCVLE, {CR}}, {"bgelr+", XLOCB(19,BOFP4,CBLT,16,0), XLBOCBBB_MASK, ISA_V2, PPCVLE, {CR}}, {"bnllr+", XLOCB(19,BOFP4,CBLT,16,0), XLBOCBBB_MASK, ISA_V2, PPCVLE, {CR}}, {"bgelrl+", XLOCB(19,BOFP4,CBLT,16,1), XLBOCBBB_MASK, ISA_V2, PPCVLE, {CR}}, {"bnllrl+", XLOCB(19,BOFP4,CBLT,16,1), XLBOCBBB_MASK, ISA_V2, PPCVLE, {CR}}, {"blelr+", XLOCB(19,BOFP4,CBGT,16,0), XLBOCBBB_MASK, ISA_V2, PPCVLE, {CR}}, {"bnglr+", XLOCB(19,BOFP4,CBGT,16,0), XLBOCBBB_MASK, ISA_V2, PPCVLE, {CR}}, {"blelrl+", XLOCB(19,BOFP4,CBGT,16,1), XLBOCBBB_MASK, ISA_V2, PPCVLE, {CR}}, {"bnglrl+", XLOCB(19,BOFP4,CBGT,16,1), XLBOCBBB_MASK, ISA_V2, PPCVLE, {CR}}, {"bnelr+", XLOCB(19,BOFP4,CBEQ,16,0), XLBOCBBB_MASK, ISA_V2, PPCVLE, {CR}}, {"bnelrl+", XLOCB(19,BOFP4,CBEQ,16,1), XLBOCBBB_MASK, ISA_V2, PPCVLE, {CR}}, {"bnslr+", XLOCB(19,BOFP4,CBSO,16,0), XLBOCBBB_MASK, ISA_V2, PPCVLE, {CR}}, {"bnulr+", XLOCB(19,BOFP4,CBSO,16,0), XLBOCBBB_MASK, ISA_V2, PPCVLE, {CR}}, {"bnslrl+", XLOCB(19,BOFP4,CBSO,16,1), XLBOCBBB_MASK, ISA_V2, PPCVLE, {CR}}, {"bnulrl+", XLOCB(19,BOFP4,CBSO,16,1), XLBOCBBB_MASK, ISA_V2, PPCVLE, {CR}}, {"bltlr", XLOCB(19,BOT,CBLT,16,0), XLBOCBBB_MASK, PPCCOM, PPCVLE, {CR}}, {"bltlr-", XLOCB(19,BOT,CBLT,16,0), XLBOCBBB_MASK, PPCCOM, ISA_V2|PPCVLE, {CR}}, {"bltr", XLOCB(19,BOT,CBLT,16,0), XLBOCBBB_MASK, PWRCOM, PPCVLE, {CR}}, {"bltlrl", XLOCB(19,BOT,CBLT,16,1), XLBOCBBB_MASK, PPCCOM, PPCVLE, {CR}}, {"bltlrl-", XLOCB(19,BOT,CBLT,16,1), XLBOCBBB_MASK, PPCCOM, ISA_V2|PPCVLE, {CR}}, {"bltrl", XLOCB(19,BOT,CBLT,16,1), XLBOCBBB_MASK, PWRCOM, PPCVLE, {CR}}, {"bgtlr", XLOCB(19,BOT,CBGT,16,0), XLBOCBBB_MASK, PPCCOM, PPCVLE, {CR}}, {"bgtlr-", XLOCB(19,BOT,CBGT,16,0), XLBOCBBB_MASK, PPCCOM, ISA_V2|PPCVLE, {CR}}, {"bgtr", XLOCB(19,BOT,CBGT,16,0), XLBOCBBB_MASK, PWRCOM, PPCVLE, {CR}}, {"bgtlrl", XLOCB(19,BOT,CBGT,16,1), XLBOCBBB_MASK, PPCCOM, PPCVLE, {CR}}, {"bgtlrl-", XLOCB(19,BOT,CBGT,16,1), XLBOCBBB_MASK, PPCCOM, ISA_V2|PPCVLE, {CR}}, {"bgtrl", XLOCB(19,BOT,CBGT,16,1), XLBOCBBB_MASK, PWRCOM, PPCVLE, {CR}}, {"beqlr", XLOCB(19,BOT,CBEQ,16,0), XLBOCBBB_MASK, PPCCOM, PPCVLE, {CR}}, {"beqlr-", XLOCB(19,BOT,CBEQ,16,0), XLBOCBBB_MASK, PPCCOM, ISA_V2|PPCVLE, {CR}}, {"beqr", XLOCB(19,BOT,CBEQ,16,0), XLBOCBBB_MASK, PWRCOM, PPCVLE, {CR}}, {"beqlrl", XLOCB(19,BOT,CBEQ,16,1), XLBOCBBB_MASK, PPCCOM, PPCVLE, {CR}}, {"beqlrl-", XLOCB(19,BOT,CBEQ,16,1), XLBOCBBB_MASK, PPCCOM, ISA_V2|PPCVLE, {CR}}, {"beqrl", XLOCB(19,BOT,CBEQ,16,1), XLBOCBBB_MASK, PWRCOM, PPCVLE, {CR}}, {"bsolr", XLOCB(19,BOT,CBSO,16,0), XLBOCBBB_MASK, PPCCOM, PPCVLE, {CR}}, {"bsolr-", XLOCB(19,BOT,CBSO,16,0), XLBOCBBB_MASK, PPCCOM, ISA_V2|PPCVLE, {CR}}, {"bsor", XLOCB(19,BOT,CBSO,16,0), XLBOCBBB_MASK, PWRCOM, PPCVLE, {CR}}, {"bunlr", XLOCB(19,BOT,CBSO,16,0), XLBOCBBB_MASK, PPCCOM, PPCVLE, {CR}}, {"bunlr-", XLOCB(19,BOT,CBSO,16,0), XLBOCBBB_MASK, PPCCOM, ISA_V2|PPCVLE, {CR}}, {"bsolrl", XLOCB(19,BOT,CBSO,16,1), XLBOCBBB_MASK, PPCCOM, PPCVLE, {CR}}, {"bsolrl-", XLOCB(19,BOT,CBSO,16,1), XLBOCBBB_MASK, PPCCOM, ISA_V2|PPCVLE, {CR}}, {"bsorl", XLOCB(19,BOT,CBSO,16,1), XLBOCBBB_MASK, PWRCOM, PPCVLE, {CR}}, {"bunlrl", XLOCB(19,BOT,CBSO,16,1), XLBOCBBB_MASK, PPCCOM, PPCVLE, {CR}}, {"bunlrl-", XLOCB(19,BOT,CBSO,16,1), XLBOCBBB_MASK, PPCCOM, ISA_V2|PPCVLE, {CR}}, {"bltlr+", XLOCB(19,BOTP,CBLT,16,0), XLBOCBBB_MASK, PPCCOM, ISA_V2|PPCVLE, {CR}}, {"bltlrl+", XLOCB(19,BOTP,CBLT,16,1), XLBOCBBB_MASK, PPCCOM, ISA_V2|PPCVLE, {CR}}, {"bgtlr+", XLOCB(19,BOTP,CBGT,16,0), XLBOCBBB_MASK, PPCCOM, ISA_V2|PPCVLE, {CR}}, {"bgtlrl+", XLOCB(19,BOTP,CBGT,16,1), XLBOCBBB_MASK, PPCCOM, ISA_V2|PPCVLE, {CR}}, {"beqlr+", XLOCB(19,BOTP,CBEQ,16,0), XLBOCBBB_MASK, PPCCOM, ISA_V2|PPCVLE, {CR}}, {"beqlrl+", XLOCB(19,BOTP,CBEQ,16,1), XLBOCBBB_MASK, PPCCOM, ISA_V2|PPCVLE, {CR}}, {"bsolr+", XLOCB(19,BOTP,CBSO,16,0), XLBOCBBB_MASK, PPCCOM, ISA_V2|PPCVLE, {CR}}, {"bunlr+", XLOCB(19,BOTP,CBSO,16,0), XLBOCBBB_MASK, PPCCOM, ISA_V2|PPCVLE, {CR}}, {"bsolrl+", XLOCB(19,BOTP,CBSO,16,1), XLBOCBBB_MASK, PPCCOM, ISA_V2|PPCVLE, {CR}}, {"bunlrl+", XLOCB(19,BOTP,CBSO,16,1), XLBOCBBB_MASK, PPCCOM, ISA_V2|PPCVLE, {CR}}, {"bltlr-", XLOCB(19,BOTM4,CBLT,16,0), XLBOCBBB_MASK, ISA_V2, PPCVLE, {CR}}, {"bltlrl-", XLOCB(19,BOTM4,CBLT,16,1), XLBOCBBB_MASK, ISA_V2, PPCVLE, {CR}}, {"bgtlr-", XLOCB(19,BOTM4,CBGT,16,0), XLBOCBBB_MASK, ISA_V2, PPCVLE, {CR}}, {"bgtlrl-", XLOCB(19,BOTM4,CBGT,16,1), XLBOCBBB_MASK, ISA_V2, PPCVLE, {CR}}, {"beqlr-", XLOCB(19,BOTM4,CBEQ,16,0), XLBOCBBB_MASK, ISA_V2, PPCVLE, {CR}}, {"beqlrl-", XLOCB(19,BOTM4,CBEQ,16,1), XLBOCBBB_MASK, ISA_V2, PPCVLE, {CR}}, {"bsolr-", XLOCB(19,BOTM4,CBSO,16,0), XLBOCBBB_MASK, ISA_V2, PPCVLE, {CR}}, {"bunlr-", XLOCB(19,BOTM4,CBSO,16,0), XLBOCBBB_MASK, ISA_V2, PPCVLE, {CR}}, {"bsolrl-", XLOCB(19,BOTM4,CBSO,16,1), XLBOCBBB_MASK, ISA_V2, PPCVLE, {CR}}, {"bunlrl-", XLOCB(19,BOTM4,CBSO,16,1), XLBOCBBB_MASK, ISA_V2, PPCVLE, {CR}}, {"bltlr+", XLOCB(19,BOTP4,CBLT,16,0), XLBOCBBB_MASK, ISA_V2, PPCVLE, {CR}}, {"bltlrl+", XLOCB(19,BOTP4,CBLT,16,1), XLBOCBBB_MASK, ISA_V2, PPCVLE, {CR}}, {"bgtlr+", XLOCB(19,BOTP4,CBGT,16,0), XLBOCBBB_MASK, ISA_V2, PPCVLE, {CR}}, {"bgtlrl+", XLOCB(19,BOTP4,CBGT,16,1), XLBOCBBB_MASK, ISA_V2, PPCVLE, {CR}}, {"beqlr+", XLOCB(19,BOTP4,CBEQ,16,0), XLBOCBBB_MASK, ISA_V2, PPCVLE, {CR}}, {"beqlrl+", XLOCB(19,BOTP4,CBEQ,16,1), XLBOCBBB_MASK, ISA_V2, PPCVLE, {CR}}, {"bsolr+", XLOCB(19,BOTP4,CBSO,16,0), XLBOCBBB_MASK, ISA_V2, PPCVLE, {CR}}, {"bunlr+", XLOCB(19,BOTP4,CBSO,16,0), XLBOCBBB_MASK, ISA_V2, PPCVLE, {CR}}, {"bsolrl+", XLOCB(19,BOTP4,CBSO,16,1), XLBOCBBB_MASK, ISA_V2, PPCVLE, {CR}}, {"bunlrl+", XLOCB(19,BOTP4,CBSO,16,1), XLBOCBBB_MASK, ISA_V2, PPCVLE, {CR}}, {"bdnzflr", XLO(19,BODNZF,16,0), XLBOBB_MASK, PPCCOM, PPCVLE, {BI}}, {"bdnzflr-", XLO(19,BODNZF,16,0), XLBOBB_MASK, PPCCOM, ISA_V2|PPCVLE, {BI}}, {"bdnzflrl", XLO(19,BODNZF,16,1), XLBOBB_MASK, PPCCOM, PPCVLE, {BI}}, {"bdnzflrl-",XLO(19,BODNZF,16,1), XLBOBB_MASK, PPCCOM, ISA_V2|PPCVLE, {BI}}, {"bdnzflr+", XLO(19,BODNZFP,16,0), XLBOBB_MASK, PPCCOM, ISA_V2|PPCVLE, {BI}}, {"bdnzflrl+",XLO(19,BODNZFP,16,1), XLBOBB_MASK, PPCCOM, ISA_V2|PPCVLE, {BI}}, {"bdzflr", XLO(19,BODZF,16,0), XLBOBB_MASK, PPCCOM, PPCVLE, {BI}}, {"bdzflr-", XLO(19,BODZF,16,0), XLBOBB_MASK, PPCCOM, ISA_V2|PPCVLE, {BI}}, {"bdzflrl", XLO(19,BODZF,16,1), XLBOBB_MASK, PPCCOM, PPCVLE, {BI}}, {"bdzflrl-", XLO(19,BODZF,16,1), XLBOBB_MASK, PPCCOM, ISA_V2|PPCVLE, {BI}}, {"bdzflr+", XLO(19,BODZFP,16,0), XLBOBB_MASK, PPCCOM, ISA_V2|PPCVLE, {BI}}, {"bdzflrl+", XLO(19,BODZFP,16,1), XLBOBB_MASK, PPCCOM, ISA_V2|PPCVLE, {BI}}, {"bflr", XLO(19,BOF,16,0), XLBOBB_MASK, PPCCOM, PPCVLE, {BI}}, {"bflr-", XLO(19,BOF,16,0), XLBOBB_MASK, PPCCOM, ISA_V2|PPCVLE, {BI}}, {"bbfr", XLO(19,BOF,16,0), XLBOBB_MASK, PWRCOM, PPCVLE, {BI}}, {"bflrl", XLO(19,BOF,16,1), XLBOBB_MASK, PPCCOM, PPCVLE, {BI}}, {"bflrl-", XLO(19,BOF,16,1), XLBOBB_MASK, PPCCOM, ISA_V2|PPCVLE, {BI}}, {"bbfrl", XLO(19,BOF,16,1), XLBOBB_MASK, PWRCOM, PPCVLE, {BI}}, {"bflr+", XLO(19,BOFP,16,0), XLBOBB_MASK, PPCCOM, ISA_V2|PPCVLE, {BI}}, {"bflrl+", XLO(19,BOFP,16,1), XLBOBB_MASK, PPCCOM, ISA_V2|PPCVLE, {BI}}, {"bflr-", XLO(19,BOFM4,16,0), XLBOBB_MASK, ISA_V2, PPCVLE, {BI}}, {"bflrl-", XLO(19,BOFM4,16,1), XLBOBB_MASK, ISA_V2, PPCVLE, {BI}}, {"bflr+", XLO(19,BOFP4,16,0), XLBOBB_MASK, ISA_V2, PPCVLE, {BI}}, {"bflrl+", XLO(19,BOFP4,16,1), XLBOBB_MASK, ISA_V2, PPCVLE, {BI}}, {"bdnztlr", XLO(19,BODNZT,16,0), XLBOBB_MASK, PPCCOM, PPCVLE, {BI}}, {"bdnztlr-", XLO(19,BODNZT,16,0), XLBOBB_MASK, PPCCOM, ISA_V2|PPCVLE, {BI}}, {"bdnztlrl", XLO(19,BODNZT,16,1), XLBOBB_MASK, PPCCOM, PPCVLE, {BI}}, {"bdnztlrl-", XLO(19,BODNZT,16,1), XLBOBB_MASK, PPCCOM, ISA_V2|PPCVLE, {BI}}, {"bdnztlr+", XLO(19,BODNZTP,16,0), XLBOBB_MASK, PPCCOM, ISA_V2|PPCVLE, {BI}}, {"bdnztlrl+", XLO(19,BODNZTP,16,1), XLBOBB_MASK, PPCCOM, ISA_V2|PPCVLE, {BI}}, {"bdztlr", XLO(19,BODZT,16,0), XLBOBB_MASK, PPCCOM, PPCVLE, {BI}}, {"bdztlr-", XLO(19,BODZT,16,0), XLBOBB_MASK, PPCCOM, ISA_V2|PPCVLE, {BI}}, {"bdztlrl", XLO(19,BODZT,16,1), XLBOBB_MASK, PPCCOM, PPCVLE, {BI}}, {"bdztlrl-", XLO(19,BODZT,16,1), XLBOBB_MASK, PPCCOM, ISA_V2|PPCVLE, {BI}}, {"bdztlr+", XLO(19,BODZTP,16,0), XLBOBB_MASK, PPCCOM, ISA_V2|PPCVLE, {BI}}, {"bdztlrl+", XLO(19,BODZTP,16,1), XLBOBB_MASK, PPCCOM, ISA_V2|PPCVLE, {BI}}, {"btlr", XLO(19,BOT,16,0), XLBOBB_MASK, PPCCOM, PPCVLE, {BI}}, {"btlr-", XLO(19,BOT,16,0), XLBOBB_MASK, PPCCOM, ISA_V2|PPCVLE, {BI}}, {"bbtr", XLO(19,BOT,16,0), XLBOBB_MASK, PWRCOM, PPCVLE, {BI}}, {"btlrl", XLO(19,BOT,16,1), XLBOBB_MASK, PPCCOM, PPCVLE, {BI}}, {"btlrl-", XLO(19,BOT,16,1), XLBOBB_MASK, PPCCOM, ISA_V2|PPCVLE, {BI}}, {"bbtrl", XLO(19,BOT,16,1), XLBOBB_MASK, PWRCOM, PPCVLE, {BI}}, {"btlr+", XLO(19,BOTP,16,0), XLBOBB_MASK, PPCCOM, ISA_V2|PPCVLE, {BI}}, {"btlrl+", XLO(19,BOTP,16,1), XLBOBB_MASK, PPCCOM, ISA_V2|PPCVLE, {BI}}, {"btlr-", XLO(19,BOTM4,16,0), XLBOBB_MASK, ISA_V2, PPCVLE, {BI}}, {"btlrl-", XLO(19,BOTM4,16,1), XLBOBB_MASK, ISA_V2, PPCVLE, {BI}}, {"btlr+", XLO(19,BOTP4,16,0), XLBOBB_MASK, ISA_V2, PPCVLE, {BI}}, {"btlrl+", XLO(19,BOTP4,16,1), XLBOBB_MASK, ISA_V2, PPCVLE, {BI}}, {"bclr-", XLYLK(19,16,0,0), XLYBB_MASK, PPCCOM, PPCVLE, {BOE, BI}}, {"bclrl-", XLYLK(19,16,0,1), XLYBB_MASK, PPCCOM, PPCVLE, {BOE, BI}}, {"bclr+", XLYLK(19,16,1,0), XLYBB_MASK, PPCCOM, PPCVLE, {BOE, BI}}, {"bclrl+", XLYLK(19,16,1,1), XLYBB_MASK, PPCCOM, PPCVLE, {BOE, BI}}, {"bclr", XLLK(19,16,0), XLBH_MASK, PPCCOM, PPCVLE, {BO, BI, BH}}, {"bcr", XLLK(19,16,0), XLBB_MASK, PWRCOM, PPCVLE, {BO, BI}}, {"bclrl", XLLK(19,16,1), XLBH_MASK, PPCCOM, PPCVLE, {BO, BI, BH}}, {"bcrl", XLLK(19,16,1), XLBB_MASK, PWRCOM, PPCVLE, {BO, BI}}, {"rfid", XL(19,18), 0xffffffff, PPC64, PPCVLE, {0}}, {"crnot", XL(19,33), XL_MASK, PPCCOM, PPCVLE, {BT, BA, BBA}}, {"crnor", XL(19,33), XL_MASK, COM, PPCVLE, {BT, BA, BB}}, {"rfmci", X(19,38), 0xffffffff, PPCRFMCI|PPCA2|PPC476, PPCVLE, {0}}, {"rfdi", XL(19,39), 0xffffffff, E500MC, PPCVLE, {0}}, {"rfi", XL(19,50), 0xffffffff, COM, PPCVLE, {0}}, {"rfci", XL(19,51), 0xffffffff, PPC403|BOOKE|PPCE300|PPCA2|PPC476, PPCVLE, {0}}, {"rfsvc", XL(19,82), 0xffffffff, POWER, PPCVLE, {0}}, {"rfgi", XL(19,102), 0xffffffff, E500MC|PPCA2, PPCVLE, {0}}, {"crandc", XL(19,129), XL_MASK, COM, PPCVLE, {BT, BA, BB}}, {"rfebb", XL(19,146), XLS_MASK, POWER8, PPCVLE, {SXL}}, {"isync", XL(19,150), 0xffffffff, PPCCOM, PPCVLE, {0}}, {"ics", XL(19,150), 0xffffffff, PWRCOM, PPCVLE, {0}}, {"crclr", XL(19,193), XL_MASK, PPCCOM, PPCVLE, {BT, BAT, BBA}}, {"crxor", XL(19,193), XL_MASK, COM, PPCVLE, {BT, BA, BB}}, {"dnh", X(19,198), X_MASK, E500MC, PPCVLE, {DUI, DUIS}}, {"crnand", XL(19,225), XL_MASK, COM, PPCVLE, {BT, BA, BB}}, {"crand", XL(19,257), XL_MASK, COM, PPCVLE, {BT, BA, BB}}, {"hrfid", XL(19,274), 0xffffffff, POWER5|CELL, PPC476|PPCVLE, {0}}, {"crset", XL(19,289), XL_MASK, PPCCOM, PPCVLE, {BT, BAT, BBA}}, {"creqv", XL(19,289), XL_MASK, COM, PPCVLE, {BT, BA, BB}}, {"urfid", XL(19,306), 0xffffffff, POWER9, PPCVLE, {0}}, {"stop", XL(19,370), 0xffffffff, POWER9, PPCVLE, {0}}, {"doze", XL(19,402), 0xffffffff, POWER6, POWER9|PPCVLE, {0}}, {"crorc", XL(19,417), XL_MASK, COM, PPCVLE, {BT, BA, BB}}, {"nap", XL(19,434), 0xffffffff, POWER6, POWER9|PPCVLE, {0}}, {"crmove", XL(19,449), XL_MASK, PPCCOM, PPCVLE, {BT, BA, BBA}}, {"cror", XL(19,449), XL_MASK, COM, PPCVLE, {BT, BA, BB}}, {"sleep", XL(19,466), 0xffffffff, POWER6, POWER9|PPCVLE, {0}}, {"rvwinkle", XL(19,498), 0xffffffff, POWER6, POWER9|PPCVLE, {0}}, {"bctr", XLO(19,BOU,528,0), XLBOBIBB_MASK, COM, PPCVLE, {0}}, {"bctrl", XLO(19,BOU,528,1), XLBOBIBB_MASK, COM, PPCVLE, {0}}, {"bgectr", XLOCB(19,BOF,CBLT,528,0), XLBOCBBB_MASK, PPCCOM, PPCVLE, {CR}}, {"bgectr-", XLOCB(19,BOF,CBLT,528,0), XLBOCBBB_MASK, PPCCOM, ISA_V2|PPCVLE, {CR}}, {"bnlctr", XLOCB(19,BOF,CBLT,528,0), XLBOCBBB_MASK, PPCCOM, PPCVLE, {CR}}, {"bnlctr-", XLOCB(19,BOF,CBLT,528,0), XLBOCBBB_MASK, PPCCOM, ISA_V2|PPCVLE, {CR}}, {"bgectrl", XLOCB(19,BOF,CBLT,528,1), XLBOCBBB_MASK, PPCCOM, PPCVLE, {CR}}, {"bgectrl-",XLOCB(19,BOF,CBLT,528,1), XLBOCBBB_MASK, PPCCOM, ISA_V2|PPCVLE, {CR}}, {"bnlctrl", XLOCB(19,BOF,CBLT,528,1), XLBOCBBB_MASK, PPCCOM, PPCVLE, {CR}}, {"bnlctrl-",XLOCB(19,BOF,CBLT,528,1), XLBOCBBB_MASK, PPCCOM, ISA_V2|PPCVLE, {CR}}, {"blectr", XLOCB(19,BOF,CBGT,528,0), XLBOCBBB_MASK, PPCCOM, PPCVLE, {CR}}, {"blectr-", XLOCB(19,BOF,CBGT,528,0), XLBOCBBB_MASK, PPCCOM, ISA_V2|PPCVLE, {CR}}, {"bngctr", XLOCB(19,BOF,CBGT,528,0), XLBOCBBB_MASK, PPCCOM, PPCVLE, {CR}}, {"bngctr-", XLOCB(19,BOF,CBGT,528,0), XLBOCBBB_MASK, PPCCOM, ISA_V2|PPCVLE, {CR}}, {"blectrl", XLOCB(19,BOF,CBGT,528,1), XLBOCBBB_MASK, PPCCOM, PPCVLE, {CR}}, {"blectrl-",XLOCB(19,BOF,CBGT,528,1), XLBOCBBB_MASK, PPCCOM, ISA_V2|PPCVLE, {CR}}, {"bngctrl", XLOCB(19,BOF,CBGT,528,1), XLBOCBBB_MASK, PPCCOM, PPCVLE, {CR}}, {"bngctrl-",XLOCB(19,BOF,CBGT,528,1), XLBOCBBB_MASK, PPCCOM, ISA_V2|PPCVLE, {CR}}, {"bnectr", XLOCB(19,BOF,CBEQ,528,0), XLBOCBBB_MASK, PPCCOM, PPCVLE, {CR}}, {"bnectr-", XLOCB(19,BOF,CBEQ,528,0), XLBOCBBB_MASK, PPCCOM, ISA_V2|PPCVLE, {CR}}, {"bnectrl", XLOCB(19,BOF,CBEQ,528,1), XLBOCBBB_MASK, PPCCOM, PPCVLE, {CR}}, {"bnectrl-",XLOCB(19,BOF,CBEQ,528,1), XLBOCBBB_MASK, PPCCOM, ISA_V2|PPCVLE, {CR}}, {"bnsctr", XLOCB(19,BOF,CBSO,528,0), XLBOCBBB_MASK, PPCCOM, PPCVLE, {CR}}, {"bnsctr-", XLOCB(19,BOF,CBSO,528,0), XLBOCBBB_MASK, PPCCOM, ISA_V2|PPCVLE, {CR}}, {"bnuctr", XLOCB(19,BOF,CBSO,528,0), XLBOCBBB_MASK, PPCCOM, PPCVLE, {CR}}, {"bnuctr-", XLOCB(19,BOF,CBSO,528,0), XLBOCBBB_MASK, PPCCOM, ISA_V2|PPCVLE, {CR}}, {"bnsctrl", XLOCB(19,BOF,CBSO,528,1), XLBOCBBB_MASK, PPCCOM, PPCVLE, {CR}}, {"bnsctrl-",XLOCB(19,BOF,CBSO,528,1), XLBOCBBB_MASK, PPCCOM, ISA_V2|PPCVLE, {CR}}, {"bnuctrl", XLOCB(19,BOF,CBSO,528,1), XLBOCBBB_MASK, PPCCOM, PPCVLE, {CR}}, {"bnuctrl-",XLOCB(19,BOF,CBSO,528,1), XLBOCBBB_MASK, PPCCOM, ISA_V2|PPCVLE, {CR}}, {"bgectr+", XLOCB(19,BOFP,CBLT,528,0), XLBOCBBB_MASK, PPCCOM, ISA_V2|PPCVLE, {CR}}, {"bnlctr+", XLOCB(19,BOFP,CBLT,528,0), XLBOCBBB_MASK, PPCCOM, ISA_V2|PPCVLE, {CR}}, {"bgectrl+",XLOCB(19,BOFP,CBLT,528,1), XLBOCBBB_MASK, PPCCOM, ISA_V2|PPCVLE, {CR}}, {"bnlctrl+",XLOCB(19,BOFP,CBLT,528,1), XLBOCBBB_MASK, PPCCOM, ISA_V2|PPCVLE, {CR}}, {"blectr+", XLOCB(19,BOFP,CBGT,528,0), XLBOCBBB_MASK, PPCCOM, ISA_V2|PPCVLE, {CR}}, {"bngctr+", XLOCB(19,BOFP,CBGT,528,0), XLBOCBBB_MASK, PPCCOM, ISA_V2|PPCVLE, {CR}}, {"blectrl+",XLOCB(19,BOFP,CBGT,528,1), XLBOCBBB_MASK, PPCCOM, ISA_V2|PPCVLE, {CR}}, {"bngctrl+",XLOCB(19,BOFP,CBGT,528,1), XLBOCBBB_MASK, PPCCOM, ISA_V2|PPCVLE, {CR}}, {"bnectr+", XLOCB(19,BOFP,CBEQ,528,0), XLBOCBBB_MASK, PPCCOM, ISA_V2|PPCVLE, {CR}}, {"bnectrl+",XLOCB(19,BOFP,CBEQ,528,1), XLBOCBBB_MASK, PPCCOM, ISA_V2|PPCVLE, {CR}}, {"bnsctr+", XLOCB(19,BOFP,CBSO,528,0), XLBOCBBB_MASK, PPCCOM, ISA_V2|PPCVLE, {CR}}, {"bnuctr+", XLOCB(19,BOFP,CBSO,528,0), XLBOCBBB_MASK, PPCCOM, ISA_V2|PPCVLE, {CR}}, {"bnsctrl+",XLOCB(19,BOFP,CBSO,528,1), XLBOCBBB_MASK, PPCCOM, ISA_V2|PPCVLE, {CR}}, {"bnuctrl+",XLOCB(19,BOFP,CBSO,528,1), XLBOCBBB_MASK, PPCCOM, ISA_V2|PPCVLE, {CR}}, {"bgectr-", XLOCB(19,BOFM4,CBLT,528,0), XLBOCBBB_MASK, ISA_V2, PPCVLE, {CR}}, {"bnlctr-", XLOCB(19,BOFM4,CBLT,528,0), XLBOCBBB_MASK, ISA_V2, PPCVLE, {CR}}, {"bgectrl-",XLOCB(19,BOFM4,CBLT,528,1), XLBOCBBB_MASK, ISA_V2, PPCVLE, {CR}}, {"bnlctrl-",XLOCB(19,BOFM4,CBLT,528,1), XLBOCBBB_MASK, ISA_V2, PPCVLE, {CR}}, {"blectr-", XLOCB(19,BOFM4,CBGT,528,0), XLBOCBBB_MASK, ISA_V2, PPCVLE, {CR}}, {"bngctr-", XLOCB(19,BOFM4,CBGT,528,0), XLBOCBBB_MASK, ISA_V2, PPCVLE, {CR}}, {"blectrl-",XLOCB(19,BOFM4,CBGT,528,1), XLBOCBBB_MASK, ISA_V2, PPCVLE, {CR}}, {"bngctrl-",XLOCB(19,BOFM4,CBGT,528,1), XLBOCBBB_MASK, ISA_V2, PPCVLE, {CR}}, {"bnectr-", XLOCB(19,BOFM4,CBEQ,528,0), XLBOCBBB_MASK, ISA_V2, PPCVLE, {CR}}, {"bnectrl-",XLOCB(19,BOFM4,CBEQ,528,1), XLBOCBBB_MASK, ISA_V2, PPCVLE, {CR}}, {"bnsctr-", XLOCB(19,BOFM4,CBSO,528,0), XLBOCBBB_MASK, ISA_V2, PPCVLE, {CR}}, {"bnuctr-", XLOCB(19,BOFM4,CBSO,528,0), XLBOCBBB_MASK, ISA_V2, PPCVLE, {CR}}, {"bnsctrl-",XLOCB(19,BOFM4,CBSO,528,1), XLBOCBBB_MASK, ISA_V2, PPCVLE, {CR}}, {"bnuctrl-",XLOCB(19,BOFM4,CBSO,528,1), XLBOCBBB_MASK, ISA_V2, PPCVLE, {CR}}, {"bgectr+", XLOCB(19,BOFP4,CBLT,528,0), XLBOCBBB_MASK, ISA_V2, PPCVLE, {CR}}, {"bnlctr+", XLOCB(19,BOFP4,CBLT,528,0), XLBOCBBB_MASK, ISA_V2, PPCVLE, {CR}}, {"bgectrl+",XLOCB(19,BOFP4,CBLT,528,1), XLBOCBBB_MASK, ISA_V2, PPCVLE, {CR}}, {"bnlctrl+",XLOCB(19,BOFP4,CBLT,528,1), XLBOCBBB_MASK, ISA_V2, PPCVLE, {CR}}, {"blectr+", XLOCB(19,BOFP4,CBGT,528,0), XLBOCBBB_MASK, ISA_V2, PPCVLE, {CR}}, {"bngctr+", XLOCB(19,BOFP4,CBGT,528,0), XLBOCBBB_MASK, ISA_V2, PPCVLE, {CR}}, {"blectrl+",XLOCB(19,BOFP4,CBGT,528,1), XLBOCBBB_MASK, ISA_V2, PPCVLE, {CR}}, {"bngctrl+",XLOCB(19,BOFP4,CBGT,528,1), XLBOCBBB_MASK, ISA_V2, PPCVLE, {CR}}, {"bnectr+", XLOCB(19,BOFP4,CBEQ,528,0), XLBOCBBB_MASK, ISA_V2, PPCVLE, {CR}}, {"bnectrl+",XLOCB(19,BOFP4,CBEQ,528,1), XLBOCBBB_MASK, ISA_V2, PPCVLE, {CR}}, {"bnsctr+", XLOCB(19,BOFP4,CBSO,528,0), XLBOCBBB_MASK, ISA_V2, PPCVLE, {CR}}, {"bnuctr+", XLOCB(19,BOFP4,CBSO,528,0), XLBOCBBB_MASK, ISA_V2, PPCVLE, {CR}}, {"bnsctrl+",XLOCB(19,BOFP4,CBSO,528,1), XLBOCBBB_MASK, ISA_V2, PPCVLE, {CR}}, {"bnuctrl+",XLOCB(19,BOFP4,CBSO,528,1), XLBOCBBB_MASK, ISA_V2, PPCVLE, {CR}}, {"bltctr", XLOCB(19,BOT,CBLT,528,0), XLBOCBBB_MASK, PPCCOM, PPCVLE, {CR}}, {"bltctr-", XLOCB(19,BOT,CBLT,528,0), XLBOCBBB_MASK, PPCCOM, ISA_V2|PPCVLE, {CR}}, {"bltctrl", XLOCB(19,BOT,CBLT,528,1), XLBOCBBB_MASK, PPCCOM, PPCVLE, {CR}}, {"bltctrl-",XLOCB(19,BOT,CBLT,528,1), XLBOCBBB_MASK, PPCCOM, ISA_V2|PPCVLE, {CR}}, {"bgtctr", XLOCB(19,BOT,CBGT,528,0), XLBOCBBB_MASK, PPCCOM, PPCVLE, {CR}}, {"bgtctr-", XLOCB(19,BOT,CBGT,528,0), XLBOCBBB_MASK, PPCCOM, ISA_V2|PPCVLE, {CR}}, {"bgtctrl", XLOCB(19,BOT,CBGT,528,1), XLBOCBBB_MASK, PPCCOM, PPCVLE, {CR}}, {"bgtctrl-",XLOCB(19,BOT,CBGT,528,1), XLBOCBBB_MASK, PPCCOM, ISA_V2|PPCVLE, {CR}}, {"beqctr", XLOCB(19,BOT,CBEQ,528,0), XLBOCBBB_MASK, PPCCOM, PPCVLE, {CR}}, {"beqctr-", XLOCB(19,BOT,CBEQ,528,0), XLBOCBBB_MASK, PPCCOM, ISA_V2|PPCVLE, {CR}}, {"beqctrl", XLOCB(19,BOT,CBEQ,528,1), XLBOCBBB_MASK, PPCCOM, PPCVLE, {CR}}, {"beqctrl-",XLOCB(19,BOT,CBEQ,528,1), XLBOCBBB_MASK, PPCCOM, ISA_V2|PPCVLE, {CR}}, {"bsoctr", XLOCB(19,BOT,CBSO,528,0), XLBOCBBB_MASK, PPCCOM, PPCVLE, {CR}}, {"bsoctr-", XLOCB(19,BOT,CBSO,528,0), XLBOCBBB_MASK, PPCCOM, ISA_V2|PPCVLE, {CR}}, {"bunctr", XLOCB(19,BOT,CBSO,528,0), XLBOCBBB_MASK, PPCCOM, PPCVLE, {CR}}, {"bunctr-", XLOCB(19,BOT,CBSO,528,0), XLBOCBBB_MASK, PPCCOM, ISA_V2|PPCVLE, {CR}}, {"bsoctrl", XLOCB(19,BOT,CBSO,528,1), XLBOCBBB_MASK, PPCCOM, PPCVLE, {CR}}, {"bsoctrl-",XLOCB(19,BOT,CBSO,528,1), XLBOCBBB_MASK, PPCCOM, ISA_V2|PPCVLE, {CR}}, {"bunctrl", XLOCB(19,BOT,CBSO,528,1), XLBOCBBB_MASK, PPCCOM, PPCVLE, {CR}}, {"bunctrl-",XLOCB(19,BOT,CBSO,528,1), XLBOCBBB_MASK, PPCCOM, ISA_V2|PPCVLE, {CR}}, {"bltctr+", XLOCB(19,BOTP,CBLT,528,0), XLBOCBBB_MASK, PPCCOM, ISA_V2|PPCVLE, {CR}}, {"bltctrl+",XLOCB(19,BOTP,CBLT,528,1), XLBOCBBB_MASK, PPCCOM, ISA_V2|PPCVLE, {CR}}, {"bgtctr+", XLOCB(19,BOTP,CBGT,528,0), XLBOCBBB_MASK, PPCCOM, ISA_V2|PPCVLE, {CR}}, {"bgtctrl+",XLOCB(19,BOTP,CBGT,528,1), XLBOCBBB_MASK, PPCCOM, ISA_V2|PPCVLE, {CR}}, {"beqctr+", XLOCB(19,BOTP,CBEQ,528,0), XLBOCBBB_MASK, PPCCOM, ISA_V2|PPCVLE, {CR}}, {"beqctrl+",XLOCB(19,BOTP,CBEQ,528,1), XLBOCBBB_MASK, PPCCOM, ISA_V2|PPCVLE, {CR}}, {"bsoctr+", XLOCB(19,BOTP,CBSO,528,0), XLBOCBBB_MASK, PPCCOM, ISA_V2|PPCVLE, {CR}}, {"bunctr+", XLOCB(19,BOTP,CBSO,528,0), XLBOCBBB_MASK, PPCCOM, ISA_V2|PPCVLE, {CR}}, {"bsoctrl+",XLOCB(19,BOTP,CBSO,528,1), XLBOCBBB_MASK, PPCCOM, ISA_V2|PPCVLE, {CR}}, {"bunctrl+",XLOCB(19,BOTP,CBSO,528,1), XLBOCBBB_MASK, PPCCOM, ISA_V2|PPCVLE, {CR}}, {"bltctr-", XLOCB(19,BOTM4,CBLT,528,0), XLBOCBBB_MASK, ISA_V2, PPCVLE, {CR}}, {"bltctrl-",XLOCB(19,BOTM4,CBLT,528,1), XLBOCBBB_MASK, ISA_V2, PPCVLE, {CR}}, {"bgtctr-", XLOCB(19,BOTM4,CBGT,528,0), XLBOCBBB_MASK, ISA_V2, PPCVLE, {CR}}, {"bgtctrl-",XLOCB(19,BOTM4,CBGT,528,1), XLBOCBBB_MASK, ISA_V2, PPCVLE, {CR}}, {"beqctr-", XLOCB(19,BOTM4,CBEQ,528,0), XLBOCBBB_MASK, ISA_V2, PPCVLE, {CR}}, {"beqctrl-",XLOCB(19,BOTM4,CBEQ,528,1), XLBOCBBB_MASK, ISA_V2, PPCVLE, {CR}}, {"bsoctr-", XLOCB(19,BOTM4,CBSO,528,0), XLBOCBBB_MASK, ISA_V2, PPCVLE, {CR}}, {"bunctr-", XLOCB(19,BOTM4,CBSO,528,0), XLBOCBBB_MASK, ISA_V2, PPCVLE, {CR}}, {"bsoctrl-",XLOCB(19,BOTM4,CBSO,528,1), XLBOCBBB_MASK, ISA_V2, PPCVLE, {CR}}, {"bunctrl-",XLOCB(19,BOTM4,CBSO,528,1), XLBOCBBB_MASK, ISA_V2, PPCVLE, {CR}}, {"bltctr+", XLOCB(19,BOTP4,CBLT,528,0), XLBOCBBB_MASK, ISA_V2, PPCVLE, {CR}}, {"bltctrl+",XLOCB(19,BOTP4,CBLT,528,1), XLBOCBBB_MASK, ISA_V2, PPCVLE, {CR}}, {"bgtctr+", XLOCB(19,BOTP4,CBGT,528,0), XLBOCBBB_MASK, ISA_V2, PPCVLE, {CR}}, {"bgtctrl+",XLOCB(19,BOTP4,CBGT,528,1), XLBOCBBB_MASK, ISA_V2, PPCVLE, {CR}}, {"beqctr+", XLOCB(19,BOTP4,CBEQ,528,0), XLBOCBBB_MASK, ISA_V2, PPCVLE, {CR}}, {"beqctrl+",XLOCB(19,BOTP4,CBEQ,528,1), XLBOCBBB_MASK, ISA_V2, PPCVLE, {CR}}, {"bsoctr+", XLOCB(19,BOTP4,CBSO,528,0), XLBOCBBB_MASK, ISA_V2, PPCVLE, {CR}}, {"bunctr+", XLOCB(19,BOTP4,CBSO,528,0), XLBOCBBB_MASK, ISA_V2, PPCVLE, {CR}}, {"bsoctrl+",XLOCB(19,BOTP4,CBSO,528,1), XLBOCBBB_MASK, ISA_V2, PPCVLE, {CR}}, {"bunctrl+",XLOCB(19,BOTP4,CBSO,528,1), XLBOCBBB_MASK, ISA_V2, PPCVLE, {CR}}, {"bfctr", XLO(19,BOF,528,0), XLBOBB_MASK, PPCCOM, PPCVLE, {BI}}, {"bfctr-", XLO(19,BOF,528,0), XLBOBB_MASK, PPCCOM, ISA_V2|PPCVLE, {BI}}, {"bfctrl", XLO(19,BOF,528,1), XLBOBB_MASK, PPCCOM, PPCVLE, {BI}}, {"bfctrl-", XLO(19,BOF,528,1), XLBOBB_MASK, PPCCOM, ISA_V2|PPCVLE, {BI}}, {"bfctr+", XLO(19,BOFP,528,0), XLBOBB_MASK, PPCCOM, ISA_V2|PPCVLE, {BI}}, {"bfctrl+", XLO(19,BOFP,528,1), XLBOBB_MASK, PPCCOM, ISA_V2|PPCVLE, {BI}}, {"bfctr-", XLO(19,BOFM4,528,0), XLBOBB_MASK, ISA_V2, PPCVLE, {BI}}, {"bfctrl-", XLO(19,BOFM4,528,1), XLBOBB_MASK, ISA_V2, PPCVLE, {BI}}, {"bfctr+", XLO(19,BOFP4,528,0), XLBOBB_MASK, ISA_V2, PPCVLE, {BI}}, {"bfctrl+", XLO(19,BOFP4,528,1), XLBOBB_MASK, ISA_V2, PPCVLE, {BI}}, {"btctr", XLO(19,BOT,528,0), XLBOBB_MASK, PPCCOM, PPCVLE, {BI}}, {"btctr-", XLO(19,BOT,528,0), XLBOBB_MASK, PPCCOM, ISA_V2|PPCVLE, {BI}}, {"btctrl", XLO(19,BOT,528,1), XLBOBB_MASK, PPCCOM, PPCVLE, {BI}}, {"btctrl-", XLO(19,BOT,528,1), XLBOBB_MASK, PPCCOM, ISA_V2|PPCVLE, {BI}}, {"btctr+", XLO(19,BOTP,528,0), XLBOBB_MASK, PPCCOM, ISA_V2|PPCVLE, {BI}}, {"btctrl+", XLO(19,BOTP,528,1), XLBOBB_MASK, PPCCOM, ISA_V2|PPCVLE, {BI}}, {"btctr-", XLO(19,BOTM4,528,0), XLBOBB_MASK, ISA_V2, PPCVLE, {BI}}, {"btctrl-", XLO(19,BOTM4,528,1), XLBOBB_MASK, ISA_V2, PPCVLE, {BI}}, {"btctr+", XLO(19,BOTP4,528,0), XLBOBB_MASK, ISA_V2, PPCVLE, {BI}}, {"btctrl+", XLO(19,BOTP4,528,1), XLBOBB_MASK, ISA_V2, PPCVLE, {BI}}, {"bcctr-", XLYLK(19,528,0,0), XLYBB_MASK, PPCCOM, PPCVLE, {BOE, BI}}, {"bcctrl-", XLYLK(19,528,0,1), XLYBB_MASK, PPCCOM, PPCVLE, {BOE, BI}}, {"bcctr+", XLYLK(19,528,1,0), XLYBB_MASK, PPCCOM, PPCVLE, {BOE, BI}}, {"bcctrl+", XLYLK(19,528,1,1), XLYBB_MASK, PPCCOM, PPCVLE, {BOE, BI}}, {"bcctr", XLLK(19,528,0), XLBH_MASK, PPCCOM, PPCVLE, {BO, BI, BH}}, {"bcc", XLLK(19,528,0), XLBB_MASK, PWRCOM, PPCVLE, {BO, BI}}, {"bcctrl", XLLK(19,528,1), XLBH_MASK, PPCCOM, PPCVLE, {BO, BI, BH}}, {"bccl", XLLK(19,528,1), XLBB_MASK, PWRCOM, PPCVLE, {BO, BI}}, {"bctar-", XLYLK(19,560,0,0), XLYBB_MASK, POWER8, PPCVLE, {BOE, BI}}, {"bctarl-", XLYLK(19,560,0,1), XLYBB_MASK, POWER8, PPCVLE, {BOE, BI}}, {"bctar+", XLYLK(19,560,1,0), XLYBB_MASK, POWER8, PPCVLE, {BOE, BI}}, {"bctarl+", XLYLK(19,560,1,1), XLYBB_MASK, POWER8, PPCVLE, {BOE, BI}}, {"bctar", XLLK(19,560,0), XLBH_MASK, POWER8, PPCVLE, {BO, BI, BH}}, {"bctarl", XLLK(19,560,1), XLBH_MASK, POWER8, PPCVLE, {BO, BI, BH}}, {"rlwimi", M(20,0), M_MASK, PPCCOM, PPCVLE, {RA, RS, SH, MBE, ME}}, {"rlimi", M(20,0), M_MASK, PWRCOM, PPCVLE, {RA, RS, SH, MBE, ME}}, {"rlwimi.", M(20,1), M_MASK, PPCCOM, PPCVLE, {RA, RS, SH, MBE, ME}}, {"rlimi.", M(20,1), M_MASK, PWRCOM, PPCVLE, {RA, RS, SH, MBE, ME}}, {"rotlwi", MME(21,31,0), MMBME_MASK, PPCCOM, PPCVLE, {RA, RS, SH}}, {"clrlwi", MME(21,31,0), MSHME_MASK, PPCCOM, PPCVLE, {RA, RS, MB}}, {"rlwinm", M(21,0), M_MASK, PPCCOM, PPCVLE, {RA, RS, SH, MBE, ME}}, {"rlinm", M(21,0), M_MASK, PWRCOM, PPCVLE, {RA, RS, SH, MBE, ME}}, {"rotlwi.", MME(21,31,1), MMBME_MASK, PPCCOM, PPCVLE, {RA, RS, SH}}, {"clrlwi.", MME(21,31,1), MSHME_MASK, PPCCOM, PPCVLE, {RA, RS, MB}}, {"rlwinm.", M(21,1), M_MASK, PPCCOM, PPCVLE, {RA, RS, SH, MBE, ME}}, {"rlinm.", M(21,1), M_MASK, PWRCOM, PPCVLE, {RA, RS, SH, MBE, ME}}, {"rlmi", M(22,0), M_MASK, M601, PPCVLE, {RA, RS, RB, MBE, ME}}, {"rlmi.", M(22,1), M_MASK, M601, PPCVLE, {RA, RS, RB, MBE, ME}}, {"rotlw", MME(23,31,0), MMBME_MASK, PPCCOM, PPCVLE, {RA, RS, RB}}, {"rlwnm", M(23,0), M_MASK, PPCCOM, PPCVLE, {RA, RS, RB, MBE, ME}}, {"rlnm", M(23,0), M_MASK, PWRCOM, PPCVLE, {RA, RS, RB, MBE, ME}}, {"rotlw.", MME(23,31,1), MMBME_MASK, PPCCOM, PPCVLE, {RA, RS, RB}}, {"rlwnm.", M(23,1), M_MASK, PPCCOM, PPCVLE, {RA, RS, RB, MBE, ME}}, {"rlnm.", M(23,1), M_MASK, PWRCOM, PPCVLE, {RA, RS, RB, MBE, ME}}, {"nop", OP(24), 0xffffffff, PPCCOM, PPCVLE, {0}}, {"ori", OP(24), OP_MASK, PPCCOM, PPCVLE, {RA, RS, UI}}, {"oril", OP(24), OP_MASK, PWRCOM, PPCVLE, {RA, RS, UI}}, {"oris", OP(25), OP_MASK, PPCCOM, PPCVLE, {RA, RS, UI}}, {"oriu", OP(25), OP_MASK, PWRCOM, PPCVLE, {RA, RS, UI}}, {"xnop", OP(26), 0xffffffff, PPCCOM, PPCVLE, {0}}, {"xori", OP(26), OP_MASK, PPCCOM, PPCVLE, {RA, RS, UI}}, {"xoril", OP(26), OP_MASK, PWRCOM, PPCVLE, {RA, RS, UI}}, {"xoris", OP(27), OP_MASK, PPCCOM, PPCVLE, {RA, RS, UI}}, {"xoriu", OP(27), OP_MASK, PWRCOM, PPCVLE, {RA, RS, UI}}, {"andi.", OP(28), OP_MASK, PPCCOM, PPCVLE, {RA, RS, UI}}, {"andil.", OP(28), OP_MASK, PWRCOM, PPCVLE, {RA, RS, UI}}, {"andis.", OP(29), OP_MASK, PPCCOM, PPCVLE, {RA, RS, UI}}, {"andiu.", OP(29), OP_MASK, PWRCOM, PPCVLE, {RA, RS, UI}}, {"rotldi", MD(30,0,0), MDMB_MASK, PPC64, PPCVLE, {RA, RS, SH6}}, {"clrldi", MD(30,0,0), MDSH_MASK, PPC64, PPCVLE, {RA, RS, MB6}}, {"rldicl", MD(30,0,0), MD_MASK, PPC64, PPCVLE, {RA, RS, SH6, MB6}}, {"rotldi.", MD(30,0,1), MDMB_MASK, PPC64, PPCVLE, {RA, RS, SH6}}, {"clrldi.", MD(30,0,1), MDSH_MASK, PPC64, PPCVLE, {RA, RS, MB6}}, {"rldicl.", MD(30,0,1), MD_MASK, PPC64, PPCVLE, {RA, RS, SH6, MB6}}, {"rldicr", MD(30,1,0), MD_MASK, PPC64, PPCVLE, {RA, RS, SH6, ME6}}, {"rldicr.", MD(30,1,1), MD_MASK, PPC64, PPCVLE, {RA, RS, SH6, ME6}}, {"rldic", MD(30,2,0), MD_MASK, PPC64, PPCVLE, {RA, RS, SH6, MB6}}, {"rldic.", MD(30,2,1), MD_MASK, PPC64, PPCVLE, {RA, RS, SH6, MB6}}, {"rldimi", MD(30,3,0), MD_MASK, PPC64, PPCVLE, {RA, RS, SH6, MB6}}, {"rldimi.", MD(30,3,1), MD_MASK, PPC64, PPCVLE, {RA, RS, SH6, MB6}}, {"rotld", MDS(30,8,0), MDSMB_MASK, PPC64, PPCVLE, {RA, RS, RB}}, {"rldcl", MDS(30,8,0), MDS_MASK, PPC64, PPCVLE, {RA, RS, RB, MB6}}, {"rotld.", MDS(30,8,1), MDSMB_MASK, PPC64, PPCVLE, {RA, RS, RB}}, {"rldcl.", MDS(30,8,1), MDS_MASK, PPC64, PPCVLE, {RA, RS, RB, MB6}}, {"rldcr", MDS(30,9,0), MDS_MASK, PPC64, PPCVLE, {RA, RS, RB, ME6}}, {"rldcr.", MDS(30,9,1), MDS_MASK, PPC64, PPCVLE, {RA, RS, RB, ME6}}, {"cmpw", XOPL(31,0,0), XCMPL_MASK, PPCCOM, 0, {OBF, RA, RB}}, {"cmpd", XOPL(31,0,1), XCMPL_MASK, PPC64, 0, {OBF, RA, RB}}, {"cmp", X(31,0), XCMP_MASK, PPC, 0, {BF, L32OPT, RA, RB}}, {"cmp", X(31,0), XCMPL_MASK, PWRCOM, PPC, {BF, RA, RB}}, {"twlgt", XTO(31,4,TOLGT), XTO_MASK, PPCCOM, 0, {RA, RB}}, {"tlgt", XTO(31,4,TOLGT), XTO_MASK, PWRCOM, 0, {RA, RB}}, {"twllt", XTO(31,4,TOLLT), XTO_MASK, PPCCOM, 0, {RA, RB}}, {"tllt", XTO(31,4,TOLLT), XTO_MASK, PWRCOM, 0, {RA, RB}}, {"tweq", XTO(31,4,TOEQ), XTO_MASK, PPCCOM, 0, {RA, RB}}, {"teq", XTO(31,4,TOEQ), XTO_MASK, PWRCOM, 0, {RA, RB}}, {"twlge", XTO(31,4,TOLGE), XTO_MASK, PPCCOM, 0, {RA, RB}}, {"tlge", XTO(31,4,TOLGE), XTO_MASK, PWRCOM, 0, {RA, RB}}, {"twlnl", XTO(31,4,TOLNL), XTO_MASK, PPCCOM, 0, {RA, RB}}, {"tlnl", XTO(31,4,TOLNL), XTO_MASK, PWRCOM, 0, {RA, RB}}, {"twlle", XTO(31,4,TOLLE), XTO_MASK, PPCCOM, 0, {RA, RB}}, {"tlle", XTO(31,4,TOLLE), XTO_MASK, PWRCOM, 0, {RA, RB}}, {"twlng", XTO(31,4,TOLNG), XTO_MASK, PPCCOM, 0, {RA, RB}}, {"tlng", XTO(31,4,TOLNG), XTO_MASK, PWRCOM, 0, {RA, RB}}, {"twgt", XTO(31,4,TOGT), XTO_MASK, PPCCOM, 0, {RA, RB}}, {"tgt", XTO(31,4,TOGT), XTO_MASK, PWRCOM, 0, {RA, RB}}, {"twge", XTO(31,4,TOGE), XTO_MASK, PPCCOM, 0, {RA, RB}}, {"tge", XTO(31,4,TOGE), XTO_MASK, PWRCOM, 0, {RA, RB}}, {"twnl", XTO(31,4,TONL), XTO_MASK, PPCCOM, 0, {RA, RB}}, {"tnl", XTO(31,4,TONL), XTO_MASK, PWRCOM, 0, {RA, RB}}, {"twlt", XTO(31,4,TOLT), XTO_MASK, PPCCOM, 0, {RA, RB}}, {"tlt", XTO(31,4,TOLT), XTO_MASK, PWRCOM, 0, {RA, RB}}, {"twle", XTO(31,4,TOLE), XTO_MASK, PPCCOM, 0, {RA, RB}}, {"tle", XTO(31,4,TOLE), XTO_MASK, PWRCOM, 0, {RA, RB}}, {"twng", XTO(31,4,TONG), XTO_MASK, PPCCOM, 0, {RA, RB}}, {"tng", XTO(31,4,TONG), XTO_MASK, PWRCOM, 0, {RA, RB}}, {"twne", XTO(31,4,TONE), XTO_MASK, PPCCOM, 0, {RA, RB}}, {"tne", XTO(31,4,TONE), XTO_MASK, PWRCOM, 0, {RA, RB}}, {"trap", XTO(31,4,TOU), 0xffffffff, PPCCOM, 0, {0}}, {"twu", XTO(31,4,TOU), XTO_MASK, PPCCOM, 0, {RA, RB}}, {"tu", XTO(31,4,TOU), XTO_MASK, PWRCOM, 0, {RA, RB}}, {"tw", X(31,4), X_MASK, PPCCOM, 0, {TO, RA, RB}}, {"t", X(31,4), X_MASK, PWRCOM, 0, {TO, RA, RB}}, {"lvsl", X(31,6), X_MASK, PPCVEC, 0, {VD, RA0, RB}}, {"lvebx", X(31,7), X_MASK, PPCVEC, 0, {VD, RA0, RB}}, {"lbfcmx", APU(31,7,0), APU_MASK, PPC405, 0, {FCRT, RA, RB}}, {"subfc", XO(31,8,0,0), XO_MASK, PPCCOM, 0, {RT, RA, RB}}, {"sf", XO(31,8,0,0), XO_MASK, PWRCOM, 0, {RT, RA, RB}}, {"subc", XO(31,8,0,0), XO_MASK, PPCCOM, 0, {RT, RB, RA}}, {"subfc.", XO(31,8,0,1), XO_MASK, PPCCOM, 0, {RT, RA, RB}}, {"sf.", XO(31,8,0,1), XO_MASK, PWRCOM, 0, {RT, RA, RB}}, {"subc.", XO(31,8,0,1), XO_MASK, PPCCOM, 0, {RT, RB, RA}}, {"mulhdu", XO(31,9,0,0), XO_MASK, PPC64, 0, {RT, RA, RB}}, {"mulhdu.", XO(31,9,0,1), XO_MASK, PPC64, 0, {RT, RA, RB}}, {"addc", XO(31,10,0,0), XO_MASK, PPCCOM, 0, {RT, RA, RB}}, {"a", XO(31,10,0,0), XO_MASK, PWRCOM, 0, {RT, RA, RB}}, {"addc.", XO(31,10,0,1), XO_MASK, PPCCOM, 0, {RT, RA, RB}}, {"a.", XO(31,10,0,1), XO_MASK, PWRCOM, 0, {RT, RA, RB}}, {"mulhwu", XO(31,11,0,0), XO_MASK, PPC, 0, {RT, RA, RB}}, {"mulhwu.", XO(31,11,0,1), XO_MASK, PPC, 0, {RT, RA, RB}}, {"lxsiwzx", X(31,12), XX1_MASK, PPCVSX2, 0, {XT6, RA0, RB}}, {"isellt", X(31,15), X_MASK, PPCISEL, 0, {RT, RA0, RB}}, {"tlbilxlpid", XTO(31,18,0), XTO_MASK, E500MC|PPCA2, 0, {0}}, {"tlbilxpid", XTO(31,18,1), XTO_MASK, E500MC|PPCA2, 0, {0}}, {"tlbilxva", XTO(31,18,3), XTO_MASK, E500MC|PPCA2, 0, {RA0, RB}}, {"tlbilx", X(31,18), X_MASK, E500MC|PPCA2, 0, {T, RA0, RB}}, {"mfcr", XFXM(31,19,0,0), XFXFXM_MASK, COM, 0, {RT, FXM4}}, {"mfocrf", XFXM(31,19,0,1), XFXFXM_MASK, COM, 0, {RT, FXM}}, {"lwarx", X(31,20), XEH_MASK, PPC, 0, {RT, RA0, RB, EH}}, {"ldx", X(31,21), X_MASK, PPC64, 0, {RT, RA0, RB}}, {"icbt", X(31,22), X_MASK, BOOKE|PPCE300|PPCA2|PPC476, 0, {CT, RA0, RB}}, {"lwzx", X(31,23), X_MASK, PPCCOM, 0, {RT, RA0, RB}}, {"lx", X(31,23), X_MASK, PWRCOM, 0, {RT, RA, RB}}, {"slw", XRC(31,24,0), X_MASK, PPCCOM, 0, {RA, RS, RB}}, {"sl", XRC(31,24,0), X_MASK, PWRCOM, 0, {RA, RS, RB}}, {"slw.", XRC(31,24,1), X_MASK, PPCCOM, 0, {RA, RS, RB}}, {"sl.", XRC(31,24,1), X_MASK, PWRCOM, 0, {RA, RS, RB}}, {"cntlzw", XRC(31,26,0), XRB_MASK, PPCCOM, 0, {RA, RS}}, {"cntlz", XRC(31,26,0), XRB_MASK, PWRCOM, 0, {RA, RS}}, {"cntlzw.", XRC(31,26,1), XRB_MASK, PPCCOM, 0, {RA, RS}}, {"cntlz.", XRC(31,26,1), XRB_MASK, PWRCOM, 0, {RA, RS}}, {"sld", XRC(31,27,0), X_MASK, PPC64, 0, {RA, RS, RB}}, {"sld.", XRC(31,27,1), X_MASK, PPC64, 0, {RA, RS, RB}}, {"and", XRC(31,28,0), X_MASK, COM, 0, {RA, RS, RB}}, {"and.", XRC(31,28,1), X_MASK, COM, 0, {RA, RS, RB}}, {"maskg", XRC(31,29,0), X_MASK, M601, PPCA2, {RA, RS, RB}}, {"maskg.", XRC(31,29,1), X_MASK, M601, PPCA2, {RA, RS, RB}}, {"ldepx", X(31,29), X_MASK, E500MC|PPCA2, 0, {RT, RA0, RB}}, {"waitasec", X(31,30), XRTRARB_MASK, POWER8, POWER9, {0}}, {"wait", X(31,30), XWC_MASK, POWER9, 0, {WC}}, {"lwepx", X(31,31), X_MASK, E500MC|PPCA2, 0, {RT, RA0, RB}}, {"cmplw", XOPL(31,32,0), XCMPL_MASK, PPCCOM, 0, {OBF, RA, RB}}, {"cmpld", XOPL(31,32,1), XCMPL_MASK, PPC64, 0, {OBF, RA, RB}}, {"cmpl", X(31,32), XCMP_MASK, PPC, 0, {BF, L32OPT, RA, RB}}, {"cmpl", X(31,32), XCMPL_MASK, PWRCOM, PPC, {BF, RA, RB}}, {"lvsr", X(31,38), X_MASK, PPCVEC, 0, {VD, RA0, RB}}, {"lvehx", X(31,39), X_MASK, PPCVEC, 0, {VD, RA0, RB}}, {"lhfcmx", APU(31,39,0), APU_MASK, PPC405, 0, {FCRT, RA, RB}}, {"mviwsplt", X(31,46), X_MASK, PPCVEC2, 0, {VD, RA, RB}}, {"iselgt", X(31,47), X_MASK, PPCISEL, 0, {RT, RA0, RB}}, {"lvewx", X(31,71), X_MASK, PPCVEC, 0, {VD, RA0, RB}}, {"addg6s", XO(31,74,0,0), XO_MASK, POWER6, 0, {RT, RA, RB}}, {"lxsiwax", X(31,76), XX1_MASK, PPCVSX2, 0, {XT6, RA0, RB}}, {"iseleq", X(31,79), X_MASK, PPCISEL, 0, {RT, RA0, RB}}, {"isel", XISEL(31,15), XISEL_MASK, PPCISEL|TITAN, 0, {RT, RA0, RB, CRB}}, {"subf", XO(31,40,0,0), XO_MASK, PPC, 0, {RT, RA, RB}}, {"sub", XO(31,40,0,0), XO_MASK, PPC, 0, {RT, RB, RA}}, {"subf.", XO(31,40,0,1), XO_MASK, PPC, 0, {RT, RA, RB}}, {"sub.", XO(31,40,0,1), XO_MASK, PPC, 0, {RT, RB, RA}}, {"mfvsrd", X(31,51), XX1RB_MASK, PPCVSX2, 0, {RA, XS6}}, {"mffprd", X(31,51), XX1RB_MASK|1, PPCVSX2, 0, {RA, FRS}}, {"mfvrd", X(31,51)|1, XX1RB_MASK|1, PPCVSX2, 0, {RA, VS}}, {"eratilx", X(31,51), X_MASK, PPCA2, 0, {ERAT_T, RA, RB}}, {"lbarx", X(31,52), XEH_MASK, POWER8|E6500, 0, {RT, RA0, RB, EH}}, {"ldux", X(31,53), X_MASK, PPC64, 0, {RT, RAL, RB}}, {"dcbst", X(31,54), XRT_MASK, PPC, 0, {RA0, RB}}, {"lwzux", X(31,55), X_MASK, PPCCOM, 0, {RT, RAL, RB}}, {"lux", X(31,55), X_MASK, PWRCOM, 0, {RT, RA, RB}}, {"cntlzd", XRC(31,58,0), XRB_MASK, PPC64, 0, {RA, RS}}, {"cntlzd.", XRC(31,58,1), XRB_MASK, PPC64, 0, {RA, RS}}, {"andc", XRC(31,60,0), X_MASK, COM, 0, {RA, RS, RB}}, {"andc.", XRC(31,60,1), X_MASK, COM, 0, {RA, RS, RB}}, {"waitrsv", X(31,62)|(1<<21), 0xffffffff, E500MC|PPCA2, 0, {0}}, {"waitimpl", X(31,62)|(2<<21), 0xffffffff, E500MC|PPCA2, 0, {0}}, {"wait", X(31,62), XWC_MASK, E500MC|PPCA2, 0, {WC}}, {"dcbstep", XRT(31,63,0), XRT_MASK, E500MC|PPCA2, 0, {RA0, RB}}, {"tdlgt", XTO(31,68,TOLGT), XTO_MASK, PPC64, 0, {RA, RB}}, {"tdllt", XTO(31,68,TOLLT), XTO_MASK, PPC64, 0, {RA, RB}}, {"tdeq", XTO(31,68,TOEQ), XTO_MASK, PPC64, 0, {RA, RB}}, {"tdlge", XTO(31,68,TOLGE), XTO_MASK, PPC64, 0, {RA, RB}}, {"tdlnl", XTO(31,68,TOLNL), XTO_MASK, PPC64, 0, {RA, RB}}, {"tdlle", XTO(31,68,TOLLE), XTO_MASK, PPC64, 0, {RA, RB}}, {"tdlng", XTO(31,68,TOLNG), XTO_MASK, PPC64, 0, {RA, RB}}, {"tdgt", XTO(31,68,TOGT), XTO_MASK, PPC64, 0, {RA, RB}}, {"tdge", XTO(31,68,TOGE), XTO_MASK, PPC64, 0, {RA, RB}}, {"tdnl", XTO(31,68,TONL), XTO_MASK, PPC64, 0, {RA, RB}}, {"tdlt", XTO(31,68,TOLT), XTO_MASK, PPC64, 0, {RA, RB}}, {"tdle", XTO(31,68,TOLE), XTO_MASK, PPC64, 0, {RA, RB}}, {"tdng", XTO(31,68,TONG), XTO_MASK, PPC64, 0, {RA, RB}}, {"tdne", XTO(31,68,TONE), XTO_MASK, PPC64, 0, {RA, RB}}, {"tdu", XTO(31,68,TOU), XTO_MASK, PPC64, 0, {RA, RB}}, {"td", X(31,68), X_MASK, PPC64, 0, {TO, RA, RB}}, {"lwfcmx", APU(31,71,0), APU_MASK, PPC405, 0, {FCRT, RA, RB}}, {"mulhd", XO(31,73,0,0), XO_MASK, PPC64, 0, {RT, RA, RB}}, {"mulhd.", XO(31,73,0,1), XO_MASK, PPC64, 0, {RT, RA, RB}}, {"mulhw", XO(31,75,0,0), XO_MASK, PPC, 0, {RT, RA, RB}}, {"mulhw.", XO(31,75,0,1), XO_MASK, PPC, 0, {RT, RA, RB}}, {"dlmzb", XRC(31,78,0), X_MASK, PPC403|PPC440|TITAN, 0, {RA, RS, RB}}, {"dlmzb.", XRC(31,78,1), X_MASK, PPC403|PPC440|TITAN, 0, {RA, RS, RB}}, {"mtsrd", X(31,82), XRB_MASK|(1<<20), PPC64, 0, {SR, RS}}, {"mfmsr", X(31,83), XRARB_MASK, COM, 0, {RT}}, {"ldarx", X(31,84), XEH_MASK, PPC64, 0, {RT, RA0, RB, EH}}, {"dcbfl", XOPL(31,86,1), XRT_MASK, POWER5, PPC476, {RA0, RB}}, {"dcbf", X(31,86), XLRT_MASK, PPC, 0, {RA0, RB, L2OPT}}, {"lbzx", X(31,87), X_MASK, COM, 0, {RT, RA0, RB}}, {"lbepx", X(31,95), X_MASK, E500MC|PPCA2, 0, {RT, RA0, RB}}, {"dni", XRC(31,97,1), XRB_MASK, E6500, 0, {DUI, DCTL}}, {"lvx", X(31,103), X_MASK, PPCVEC, 0, {VD, RA0, RB}}, {"lqfcmx", APU(31,103,0), APU_MASK, PPC405, 0, {FCRT, RA, RB}}, {"neg", XO(31,104,0,0), XORB_MASK, COM, 0, {RT, RA}}, {"neg.", XO(31,104,0,1), XORB_MASK, COM, 0, {RT, RA}}, {"mul", XO(31,107,0,0), XO_MASK, M601, 0, {RT, RA, RB}}, {"mul.", XO(31,107,0,1), XO_MASK, M601, 0, {RT, RA, RB}}, {"mvidsplt", X(31,110), X_MASK, PPCVEC2, 0, {VD, RA, RB}}, {"mtsrdin", X(31,114), XRA_MASK, PPC64, 0, {RS, RB}}, {"mffprwz", X(31,115), XX1RB_MASK|1, PPCVSX2, 0, {RA, FRS}}, {"mfvrwz", X(31,115)|1, XX1RB_MASK|1, PPCVSX2, 0, {RA, VS}}, {"mfvsrwz", X(31,115), XX1RB_MASK, PPCVSX2, 0, {RA, XS6}}, {"lharx", X(31,116), XEH_MASK, POWER8|E6500, 0, {RT, RA0, RB, EH}}, {"clf", X(31,118), XTO_MASK, POWER, 0, {RA, RB}}, {"lbzux", X(31,119), X_MASK, COM, 0, {RT, RAL, RB}}, {"popcntb", X(31,122), XRB_MASK, POWER5, 0, {RA, RS}}, {"not", XRC(31,124,0), X_MASK, COM, 0, {RA, RS, RBS}}, {"nor", XRC(31,124,0), X_MASK, COM, 0, {RA, RS, RB}}, {"not.", XRC(31,124,1), X_MASK, COM, 0, {RA, RS, RBS}}, {"nor.", XRC(31,124,1), X_MASK, COM, 0, {RA, RS, RB}}, {"dcbfep", XRT(31,127,0), XRT_MASK, E500MC|PPCA2, 0, {RA0, RB}}, {"setb", X(31,128), XRB_MASK|(3<<16), POWER9, 0, {RT, BFA}}, {"wrtee", X(31,131), XRARB_MASK, PPC403|BOOKE|PPCA2|PPC476, 0, {RS}}, {"dcbtstls", X(31,134), X_MASK, PPCCHLK|PPC476|TITAN, 0, {CT, RA0, RB}}, {"stvebx", X(31,135), X_MASK, PPCVEC, 0, {VS, RA0, RB}}, {"stbfcmx", APU(31,135,0), APU_MASK, PPC405, 0, {FCRT, RA, RB}}, {"subfe", XO(31,136,0,0), XO_MASK, PPCCOM, 0, {RT, RA, RB}}, {"sfe", XO(31,136,0,0), XO_MASK, PWRCOM, 0, {RT, RA, RB}}, {"subfe.", XO(31,136,0,1), XO_MASK, PPCCOM, 0, {RT, RA, RB}}, {"sfe.", XO(31,136,0,1), XO_MASK, PWRCOM, 0, {RT, RA, RB}}, {"adde", XO(31,138,0,0), XO_MASK, PPCCOM, 0, {RT, RA, RB}}, {"ae", XO(31,138,0,0), XO_MASK, PWRCOM, 0, {RT, RA, RB}}, {"adde.", XO(31,138,0,1), XO_MASK, PPCCOM, 0, {RT, RA, RB}}, {"ae.", XO(31,138,0,1), XO_MASK, PWRCOM, 0, {RT, RA, RB}}, {"stxsiwx", X(31,140), XX1_MASK, PPCVSX2, 0, {XS6, RA0, RB}}, {"msgsndp", XRTRA(31,142,0,0), XRTRA_MASK, POWER8, 0, {RB}}, {"dcbtstlse", X(31,142), X_MASK, PPCCHLK, E500MC, {CT, RA0, RB}}, {"mtcr", XFXM(31,144,0xff,0), XRARB_MASK, COM, 0, {RS}}, {"mtcrf", XFXM(31,144,0,0), XFXFXM_MASK, COM, 0, {FXM, RS}}, {"mtocrf", XFXM(31,144,0,1), XFXFXM_MASK, COM, 0, {FXM, RS}}, {"mtmsr", X(31,146), XRLARB_MASK, COM, 0, {RS, A_L}}, {"mtsle", X(31,147), XRTLRARB_MASK, POWER8, 0, {L}}, {"eratsx", XRC(31,147,0), X_MASK, PPCA2, 0, {RT, RA0, RB}}, {"eratsx.", XRC(31,147,1), X_MASK, PPCA2, 0, {RT, RA0, RB}}, {"stdx", X(31,149), X_MASK, PPC64, 0, {RS, RA0, RB}}, {"stwcx.", XRC(31,150,1), X_MASK, PPC, 0, {RS, RA0, RB}}, {"stwx", X(31,151), X_MASK, PPCCOM, 0, {RS, RA0, RB}}, {"stx", X(31,151), X_MASK, PWRCOM, 0, {RS, RA, RB}}, {"slq", XRC(31,152,0), X_MASK, M601, 0, {RA, RS, RB}}, {"slq.", XRC(31,152,1), X_MASK, M601, 0, {RA, RS, RB}}, {"sle", XRC(31,153,0), X_MASK, M601, 0, {RA, RS, RB}}, {"sle.", XRC(31,153,1), X_MASK, M601, 0, {RA, RS, RB}}, {"prtyw", X(31,154), XRB_MASK, POWER6|PPCA2|PPC476, 0, {RA, RS}}, {"stdepx", X(31,157), X_MASK, E500MC|PPCA2, 0, {RS, RA0, RB}}, {"stwepx", X(31,159), X_MASK, E500MC|PPCA2, 0, {RS, RA0, RB}}, {"wrteei", X(31,163), XE_MASK, PPC403|BOOKE|PPCA2|PPC476, 0, {E}}, {"dcbtls", X(31,166), X_MASK, PPCCHLK|PPC476|TITAN, 0, {CT, RA0, RB}}, {"stvehx", X(31,167), X_MASK, PPCVEC, 0, {VS, RA0, RB}}, {"sthfcmx", APU(31,167,0), APU_MASK, PPC405, 0, {FCRT, RA, RB}}, {"addex", ZRC(31,170,0), Z2_MASK, POWER9, 0, {RT, RA, RB, CY}}, {"msgclrp", XRTRA(31,174,0,0), XRTRA_MASK, POWER8, 0, {RB}}, {"dcbtlse", X(31,174), X_MASK, PPCCHLK, E500MC, {CT, RA0, RB}}, {"mtmsrd", X(31,178), XRLARB_MASK, PPC64, 0, {RS, A_L}}, {"mtvsrd", X(31,179), XX1RB_MASK, PPCVSX2, 0, {XT6, RA}}, {"mtfprd", X(31,179), XX1RB_MASK|1, PPCVSX2, 0, {FRT, RA}}, {"mtvrd", X(31,179)|1, XX1RB_MASK|1, PPCVSX2, 0, {VD, RA}}, {"eratre", X(31,179), X_MASK, PPCA2, 0, {RT, RA, WS}}, {"stdux", X(31,181), X_MASK, PPC64, 0, {RS, RAS, RB}}, {"stqcx.", XRC(31,182,1), X_MASK, POWER8, 0, {RSQ, RA0, RB}}, {"wchkall", X(31,182), X_MASK, PPCA2, 0, {OBF}}, {"stwux", X(31,183), X_MASK, PPCCOM, 0, {RS, RAS, RB}}, {"stux", X(31,183), X_MASK, PWRCOM, 0, {RS, RA0, RB}}, {"sliq", XRC(31,184,0), X_MASK, M601, 0, {RA, RS, SH}}, {"sliq.", XRC(31,184,1), X_MASK, M601, 0, {RA, RS, SH}}, {"prtyd", X(31,186), XRB_MASK, POWER6|PPCA2, 0, {RA, RS}}, {"cmprb", X(31,192), XCMP_MASK, POWER9, 0, {BF, L, RA, RB}}, {"icblq.", XRC(31,198,1), X_MASK, E6500, 0, {CT, RA0, RB}}, {"stvewx", X(31,199), X_MASK, PPCVEC, 0, {VS, RA0, RB}}, {"stwfcmx", APU(31,199,0), APU_MASK, PPC405, 0, {FCRT, RA, RB}}, {"subfze", XO(31,200,0,0), XORB_MASK, PPCCOM, 0, {RT, RA}}, {"sfze", XO(31,200,0,0), XORB_MASK, PWRCOM, 0, {RT, RA}}, {"subfze.", XO(31,200,0,1), XORB_MASK, PPCCOM, 0, {RT, RA}}, {"sfze.", XO(31,200,0,1), XORB_MASK, PWRCOM, 0, {RT, RA}}, {"addze", XO(31,202,0,0), XORB_MASK, PPCCOM, 0, {RT, RA}}, {"aze", XO(31,202,0,0), XORB_MASK, PWRCOM, 0, {RT, RA}}, {"addze.", XO(31,202,0,1), XORB_MASK, PPCCOM, 0, {RT, RA}}, {"aze.", XO(31,202,0,1), XORB_MASK, PWRCOM, 0, {RT, RA}}, {"msgsnd", XRTRA(31,206,0,0), XRTRA_MASK, E500MC|PPCA2|POWER8, 0, {RB}}, {"mtsr", X(31,210), XRB_MASK|(1<<20), COM, NON32, {SR, RS}}, {"mtfprwa", X(31,211), XX1RB_MASK|1, PPCVSX2, 0, {FRT, RA}}, {"mtvrwa", X(31,211)|1, XX1RB_MASK|1, PPCVSX2, 0, {VD, RA}}, {"mtvsrwa", X(31,211), XX1RB_MASK, PPCVSX2, 0, {XT6, RA}}, {"eratwe", X(31,211), X_MASK, PPCA2, 0, {RS, RA, WS}}, {"ldawx.", XRC(31,212,1), X_MASK, PPCA2, 0, {RT, RA0, RB}}, {"stdcx.", XRC(31,214,1), X_MASK, PPC64, 0, {RS, RA0, RB}}, {"stbx", X(31,215), X_MASK, COM, 0, {RS, RA0, RB}}, {"sllq", XRC(31,216,0), X_MASK, M601, 0, {RA, RS, RB}}, {"sllq.", XRC(31,216,1), X_MASK, M601, 0, {RA, RS, RB}}, {"sleq", XRC(31,217,0), X_MASK, M601, 0, {RA, RS, RB}}, {"sleq.", XRC(31,217,1), X_MASK, M601, 0, {RA, RS, RB}}, {"stbepx", X(31,223), X_MASK, E500MC|PPCA2, 0, {RS, RA0, RB}}, {"cmpeqb", X(31,224), XCMPL_MASK, POWER9, 0, {BF, RA, RB}}, {"icblc", X(31,230), X_MASK, PPCCHLK|PPC476|TITAN, 0, {CT, RA0, RB}}, {"stvx", X(31,231), X_MASK, PPCVEC, 0, {VS, RA0, RB}}, {"stqfcmx", APU(31,231,0), APU_MASK, PPC405, 0, {FCRT, RA, RB}}, {"subfme", XO(31,232,0,0), XORB_MASK, PPCCOM, 0, {RT, RA}}, {"sfme", XO(31,232,0,0), XORB_MASK, PWRCOM, 0, {RT, RA}}, {"subfme.", XO(31,232,0,1), XORB_MASK, PPCCOM, 0, {RT, RA}}, {"sfme.", XO(31,232,0,1), XORB_MASK, PWRCOM, 0, {RT, RA}}, {"mulld", XO(31,233,0,0), XO_MASK, PPC64, 0, {RT, RA, RB}}, {"mulld.", XO(31,233,0,1), XO_MASK, PPC64, 0, {RT, RA, RB}}, {"addme", XO(31,234,0,0), XORB_MASK, PPCCOM, 0, {RT, RA}}, {"ame", XO(31,234,0,0), XORB_MASK, PWRCOM, 0, {RT, RA}}, {"addme.", XO(31,234,0,1), XORB_MASK, PPCCOM, 0, {RT, RA}}, {"ame.", XO(31,234,0,1), XORB_MASK, PWRCOM, 0, {RT, RA}}, {"mullw", XO(31,235,0,0), XO_MASK, PPCCOM, 0, {RT, RA, RB}}, {"muls", XO(31,235,0,0), XO_MASK, PWRCOM, 0, {RT, RA, RB}}, {"mullw.", XO(31,235,0,1), XO_MASK, PPCCOM, 0, {RT, RA, RB}}, {"muls.", XO(31,235,0,1), XO_MASK, PWRCOM, 0, {RT, RA, RB}}, {"icblce", X(31,238), X_MASK, PPCCHLK, E500MC|PPCA2, {CT, RA, RB}}, {"msgclr", XRTRA(31,238,0,0), XRTRA_MASK, E500MC|PPCA2|POWER8, 0, {RB}}, {"mtsrin", X(31,242), XRA_MASK, PPC, NON32, {RS, RB}}, {"mtsri", X(31,242), XRA_MASK, POWER, NON32, {RS, RB}}, {"mtfprwz", X(31,243), XX1RB_MASK|1, PPCVSX2, 0, {FRT, RA}}, {"mtvrwz", X(31,243)|1, XX1RB_MASK|1, PPCVSX2, 0, {VD, RA}}, {"mtvsrwz", X(31,243), XX1RB_MASK, PPCVSX2, 0, {XT6, RA}}, {"dcbtstt", XRT(31,246,0x10), XRT_MASK, POWER7, 0, {RA0, RB}}, {"dcbtst", X(31,246), X_MASK, POWER4, DCBT_EO, {RA0, RB, CT}}, {"dcbtst", X(31,246), X_MASK, DCBT_EO, 0, {CT, RA0, RB}}, {"dcbtst", X(31,246), X_MASK, PPC, POWER4|DCBT_EO, {RA0, RB}}, {"stbux", X(31,247), X_MASK, COM, 0, {RS, RAS, RB}}, {"slliq", XRC(31,248,0), X_MASK, M601, 0, {RA, RS, SH}}, {"slliq.", XRC(31,248,1), X_MASK, M601, 0, {RA, RS, SH}}, {"bpermd", X(31,252), X_MASK, POWER7|PPCA2, 0, {RA, RS, RB}}, {"dcbtstep", XRT(31,255,0), X_MASK, E500MC|PPCA2, 0, {RT, RA0, RB}}, {"mfdcrx", X(31,259), X_MASK, BOOKE|PPCA2|PPC476, TITAN, {RS, RA}}, {"mfdcrx.", XRC(31,259,1), X_MASK, PPCA2, 0, {RS, RA}}, {"lvexbx", X(31,261), X_MASK, PPCVEC2, 0, {VD, RA0, RB}}, {"icbt", X(31,262), XRT_MASK, PPC403, 0, {RA, RB}}, {"lvepxl", X(31,263), X_MASK, PPCVEC2, 0, {VD, RA0, RB}}, {"ldfcmx", APU(31,263,0), APU_MASK, PPC405, 0, {FCRT, RA, RB}}, {"doz", XO(31,264,0,0), XO_MASK, M601, 0, {RT, RA, RB}}, {"doz.", XO(31,264,0,1), XO_MASK, M601, 0, {RT, RA, RB}}, {"modud", X(31,265), X_MASK, POWER9, 0, {RT, RA, RB}}, {"add", XO(31,266,0,0), XO_MASK, PPCCOM, 0, {RT, RA, RB}}, {"cax", XO(31,266,0,0), XO_MASK, PWRCOM, 0, {RT, RA, RB}}, {"add.", XO(31,266,0,1), XO_MASK, PPCCOM, 0, {RT, RA, RB}}, {"cax.", XO(31,266,0,1), XO_MASK, PWRCOM, 0, {RT, RA, RB}}, {"moduw", X(31,267), X_MASK, POWER9, 0, {RT, RA, RB}}, {"lxvx", X(31,268), XX1_MASK|1<<6, PPCVSX3, 0, {XT6, RA0, RB}}, {"lxvl", X(31,269), XX1_MASK, PPCVSX3, 0, {XT6, RA0, RB}}, {"ehpriv", X(31,270), 0xffffffff, E500MC|PPCA2, 0, {0}}, {"tlbiel", X(31,274), X_MASK|1<<20,POWER9, PPC476, {RB, RSO, RIC, PRS, X_R}}, {"tlbiel", X(31,274), XRTLRA_MASK, POWER4, POWER9|PPC476, {RB, LOPT}}, {"mfapidi", X(31,275), X_MASK, BOOKE, E500|TITAN, {RT, RA}}, {"lqarx", X(31,276), XEH_MASK, POWER8, 0, {RTQ, RAX, RBX, EH}}, {"lscbx", XRC(31,277,0), X_MASK, M601, 0, {RT, RA, RB}}, {"lscbx.", XRC(31,277,1), X_MASK, M601, 0, {RT, RA, RB}}, {"dcbtt", XRT(31,278,0x10), XRT_MASK, POWER7, 0, {RA0, RB}}, {"dcbt", X(31,278), X_MASK, POWER4, DCBT_EO, {RA0, RB, CT}}, {"dcbt", X(31,278), X_MASK, DCBT_EO, 0, {CT, RA0, RB}}, {"dcbt", X(31,278), X_MASK, PPC, POWER4|DCBT_EO, {RA0, RB}}, {"lhzx", X(31,279), X_MASK, COM, 0, {RT, RA0, RB}}, {"cdtbcd", X(31,282), XRB_MASK, POWER6, 0, {RA, RS}}, {"eqv", XRC(31,284,0), X_MASK, COM, 0, {RA, RS, RB}}, {"eqv.", XRC(31,284,1), X_MASK, COM, 0, {RA, RS, RB}}, {"lhepx", X(31,287), X_MASK, E500MC|PPCA2, 0, {RT, RA0, RB}}, {"mfdcrux", X(31,291), X_MASK, PPC464, 0, {RS, RA}}, {"lvexhx", X(31,293), X_MASK, PPCVEC2, 0, {VD, RA0, RB}}, {"lvepx", X(31,295), X_MASK, PPCVEC2, 0, {VD, RA0, RB}}, {"lxvll", X(31,301), XX1_MASK, PPCVSX3, 0, {XT6, RA0, RB}}, {"mfbhrbe", X(31,302), X_MASK, POWER8, 0, {RT, BHRBE}}, {"tlbie", X(31,306), X_MASK|1<<20,POWER9, TITAN, {RB, RS, RIC, PRS, X_R}}, {"tlbie", X(31,306), XRA_MASK, POWER7, POWER9|TITAN, {RB, RS}}, {"tlbie", X(31,306), XRTLRA_MASK, PPC, E500|POWER7|TITAN, {RB, LOPT}}, {"tlbi", X(31,306), XRT_MASK, POWER, 0, {RA0, RB}}, {"mfvsrld", X(31,307), XX1RB_MASK, PPCVSX3, 0, {RA, XS6}}, {"ldmx", X(31,309), X_MASK, POWER9, 0, {RT, RA0, RB}}, {"eciwx", X(31,310), X_MASK, PPC, E500|TITAN, {RT, RA0, RB}}, {"lhzux", X(31,311), X_MASK, COM, 0, {RT, RAL, RB}}, {"cbcdtd", X(31,314), XRB_MASK, POWER6, 0, {RA, RS}}, {"xor", XRC(31,316,0), X_MASK, COM, 0, {RA, RS, RB}}, {"xor.", XRC(31,316,1), X_MASK, COM, 0, {RA, RS, RB}}, {"dcbtep", XRT(31,319,0), X_MASK, E500MC|PPCA2, 0, {RT, RA0, RB}}, {"mfexisr", XSPR(31,323, 64), XSPR_MASK, PPC403, 0, {RT}}, {"mfexier", XSPR(31,323, 66), XSPR_MASK, PPC403, 0, {RT}}, {"mfbr0", XSPR(31,323,128), XSPR_MASK, PPC403, 0, {RT}}, {"mfbr1", XSPR(31,323,129), XSPR_MASK, PPC403, 0, {RT}}, {"mfbr2", XSPR(31,323,130), XSPR_MASK, PPC403, 0, {RT}}, {"mfbr3", XSPR(31,323,131), XSPR_MASK, PPC403, 0, {RT}}, {"mfbr4", XSPR(31,323,132), XSPR_MASK, PPC403, 0, {RT}}, {"mfbr5", XSPR(31,323,133), XSPR_MASK, PPC403, 0, {RT}}, {"mfbr6", XSPR(31,323,134), XSPR_MASK, PPC403, 0, {RT}}, {"mfbr7", XSPR(31,323,135), XSPR_MASK, PPC403, 0, {RT}}, {"mfbear", XSPR(31,323,144), XSPR_MASK, PPC403, 0, {RT}}, {"mfbesr", XSPR(31,323,145), XSPR_MASK, PPC403, 0, {RT}}, {"mfiocr", XSPR(31,323,160), XSPR_MASK, PPC403, 0, {RT}}, {"mfdmacr0", XSPR(31,323,192), XSPR_MASK, PPC403, 0, {RT}}, {"mfdmact0", XSPR(31,323,193), XSPR_MASK, PPC403, 0, {RT}}, {"mfdmada0", XSPR(31,323,194), XSPR_MASK, PPC403, 0, {RT}}, {"mfdmasa0", XSPR(31,323,195), XSPR_MASK, PPC403, 0, {RT}}, {"mfdmacc0", XSPR(31,323,196), XSPR_MASK, PPC403, 0, {RT}}, {"mfdmacr1", XSPR(31,323,200), XSPR_MASK, PPC403, 0, {RT}}, {"mfdmact1", XSPR(31,323,201), XSPR_MASK, PPC403, 0, {RT}}, {"mfdmada1", XSPR(31,323,202), XSPR_MASK, PPC403, 0, {RT}}, {"mfdmasa1", XSPR(31,323,203), XSPR_MASK, PPC403, 0, {RT}}, {"mfdmacc1", XSPR(31,323,204), XSPR_MASK, PPC403, 0, {RT}}, {"mfdmacr2", XSPR(31,323,208), XSPR_MASK, PPC403, 0, {RT}}, {"mfdmact2", XSPR(31,323,209), XSPR_MASK, PPC403, 0, {RT}}, {"mfdmada2", XSPR(31,323,210), XSPR_MASK, PPC403, 0, {RT}}, {"mfdmasa2", XSPR(31,323,211), XSPR_MASK, PPC403, 0, {RT}}, {"mfdmacc2", XSPR(31,323,212), XSPR_MASK, PPC403, 0, {RT}}, {"mfdmacr3", XSPR(31,323,216), XSPR_MASK, PPC403, 0, {RT}}, {"mfdmact3", XSPR(31,323,217), XSPR_MASK, PPC403, 0, {RT}}, {"mfdmada3", XSPR(31,323,218), XSPR_MASK, PPC403, 0, {RT}}, {"mfdmasa3", XSPR(31,323,219), XSPR_MASK, PPC403, 0, {RT}}, {"mfdmacc3", XSPR(31,323,220), XSPR_MASK, PPC403, 0, {RT}}, {"mfdmasr", XSPR(31,323,224), XSPR_MASK, PPC403, 0, {RT}}, {"mfdcr", X(31,323), X_MASK, PPC403|BOOKE|PPCA2|PPC476, E500|TITAN, {RT, SPR}}, {"mfdcr.", XRC(31,323,1), X_MASK, PPCA2, 0, {RT, SPR}}, {"lvexwx", X(31,325), X_MASK, PPCVEC2, 0, {VD, RA0, RB}}, {"dcread", X(31,326), X_MASK, PPC476|TITAN, 0, {RT, RA0, RB}}, {"div", XO(31,331,0,0), XO_MASK, M601, 0, {RT, RA, RB}}, {"div.", XO(31,331,0,1), XO_MASK, M601, 0, {RT, RA, RB}}, {"lxvdsx", X(31,332), XX1_MASK, PPCVSX, 0, {XT6, RA0, RB}}, {"mfpmr", X(31,334), X_MASK, PPCPMR|PPCE300, 0, {RT, PMR}}, {"mftmr", X(31,366), X_MASK, PPCTMR|E6500, 0, {RT, TMR}}, {"slbsync", X(31,338), 0xffffffff, POWER9, 0, {0}}, {"mfmq", XSPR(31,339, 0), XSPR_MASK, M601, 0, {RT}}, {"mfxer", XSPR(31,339, 1), XSPR_MASK, COM, 0, {RT}}, {"mfrtcu", XSPR(31,339, 4), XSPR_MASK, COM, TITAN, {RT}}, {"mfrtcl", XSPR(31,339, 5), XSPR_MASK, COM, TITAN, {RT}}, {"mfdec", XSPR(31,339, 6), XSPR_MASK, MFDEC1, 0, {RT}}, {"mflr", XSPR(31,339, 8), XSPR_MASK, COM, 0, {RT}}, {"mfctr", XSPR(31,339, 9), XSPR_MASK, COM, 0, {RT}}, {"mfdscr", XSPR(31,339, 17), XSPR_MASK, POWER6, 0, {RT}}, {"mftid", XSPR(31,339, 17), XSPR_MASK, POWER, 0, {RT}}, {"mfdsisr", XSPR(31,339, 18), XSPR_MASK, COM, TITAN, {RT}}, {"mfdar", XSPR(31,339, 19), XSPR_MASK, COM, TITAN, {RT}}, {"mfdec", XSPR(31,339, 22), XSPR_MASK, MFDEC2, MFDEC1, {RT}}, {"mfsdr0", XSPR(31,339, 24), XSPR_MASK, POWER, 0, {RT}}, {"mfsdr1", XSPR(31,339, 25), XSPR_MASK, COM, TITAN, {RT}}, {"mfsrr0", XSPR(31,339, 26), XSPR_MASK, COM, 0, {RT}}, {"mfsrr1", XSPR(31,339, 27), XSPR_MASK, COM, 0, {RT}}, {"mfcfar", XSPR(31,339, 28), XSPR_MASK, POWER6, 0, {RT}}, {"mfpid", XSPR(31,339, 48), XSPR_MASK, BOOKE, 0, {RT}}, {"mfcsrr0", XSPR(31,339, 58), XSPR_MASK, BOOKE, 0, {RT}}, {"mfcsrr1", XSPR(31,339, 59), XSPR_MASK, BOOKE, 0, {RT}}, {"mfdear", XSPR(31,339, 61), XSPR_MASK, BOOKE, 0, {RT}}, {"mfesr", XSPR(31,339, 62), XSPR_MASK, BOOKE, 0, {RT}}, {"mfivpr", XSPR(31,339, 63), XSPR_MASK, BOOKE, 0, {RT}}, {"mfctrl", XSPR(31,339,136), XSPR_MASK, POWER4, 0, {RT}}, {"mfcmpa", XSPR(31,339,144), XSPR_MASK, PPC860, 0, {RT}}, {"mfcmpb", XSPR(31,339,145), XSPR_MASK, PPC860, 0, {RT}}, {"mfcmpc", XSPR(31,339,146), XSPR_MASK, PPC860, 0, {RT}}, {"mfcmpd", XSPR(31,339,147), XSPR_MASK, PPC860, 0, {RT}}, {"mficr", XSPR(31,339,148), XSPR_MASK, PPC860, 0, {RT}}, {"mfder", XSPR(31,339,149), XSPR_MASK, PPC860, 0, {RT}}, {"mfcounta", XSPR(31,339,150), XSPR_MASK, PPC860, 0, {RT}}, {"mfcountb", XSPR(31,339,151), XSPR_MASK, PPC860, 0, {RT}}, {"mfcmpe", XSPR(31,339,152), XSPR_MASK, PPC860, 0, {RT}}, {"mfcmpf", XSPR(31,339,153), XSPR_MASK, PPC860, 0, {RT}}, {"mfcmpg", XSPR(31,339,154), XSPR_MASK, PPC860, 0, {RT}}, {"mfcmph", XSPR(31,339,155), XSPR_MASK, PPC860, 0, {RT}}, {"mflctrl1", XSPR(31,339,156), XSPR_MASK, PPC860, 0, {RT}}, {"mflctrl2", XSPR(31,339,157), XSPR_MASK, PPC860, 0, {RT}}, {"mfictrl", XSPR(31,339,158), XSPR_MASK, PPC860, 0, {RT}}, {"mfbar", XSPR(31,339,159), XSPR_MASK, PPC860, 0, {RT}}, {"mfvrsave", XSPR(31,339,256), XSPR_MASK, PPCVEC, 0, {RT}}, {"mfusprg0", XSPR(31,339,256), XSPR_MASK, BOOKE, 0, {RT}}, {"mfsprg", XSPR(31,339,256), XSPRG_MASK, PPC, 0, {RT, SPRG}}, {"mfsprg4", XSPR(31,339,260), XSPR_MASK, PPC405|BOOKE, 0, {RT}}, {"mfsprg5", XSPR(31,339,261), XSPR_MASK, PPC405|BOOKE, 0, {RT}}, {"mfsprg6", XSPR(31,339,262), XSPR_MASK, PPC405|BOOKE, 0, {RT}}, {"mfsprg7", XSPR(31,339,263), XSPR_MASK, PPC405|BOOKE, 0, {RT}}, {"mftbu", XSPR(31,339,269), XSPR_MASK, POWER4|BOOKE, 0, {RT}}, {"mftb", X(31,339), X_MASK, POWER4|BOOKE, 0, {RT, TBR}}, {"mftbl", XSPR(31,339,268), XSPR_MASK, POWER4|BOOKE, 0, {RT}}, {"mfsprg0", XSPR(31,339,272), XSPR_MASK, PPC, 0, {RT}}, {"mfsprg1", XSPR(31,339,273), XSPR_MASK, PPC, 0, {RT}}, {"mfsprg2", XSPR(31,339,274), XSPR_MASK, PPC, 0, {RT}}, {"mfsprg3", XSPR(31,339,275), XSPR_MASK, PPC, 0, {RT}}, {"mfasr", XSPR(31,339,280), XSPR_MASK, PPC64, 0, {RT}}, {"mfear", XSPR(31,339,282), XSPR_MASK, PPC, TITAN, {RT}}, {"mfpir", XSPR(31,339,286), XSPR_MASK, BOOKE, 0, {RT}}, {"mfpvr", XSPR(31,339,287), XSPR_MASK, PPC, 0, {RT}}, {"mfdbsr", XSPR(31,339,304), XSPR_MASK, BOOKE, 0, {RT}}, {"mfdbcr0", XSPR(31,339,308), XSPR_MASK, BOOKE, 0, {RT}}, {"mfdbcr1", XSPR(31,339,309), XSPR_MASK, BOOKE, 0, {RT}}, {"mfdbcr2", XSPR(31,339,310), XSPR_MASK, BOOKE, 0, {RT}}, {"mfiac1", XSPR(31,339,312), XSPR_MASK, BOOKE, 0, {RT}}, {"mfiac2", XSPR(31,339,313), XSPR_MASK, BOOKE, 0, {RT}}, {"mfiac3", XSPR(31,339,314), XSPR_MASK, BOOKE, 0, {RT}}, {"mfiac4", XSPR(31,339,315), XSPR_MASK, BOOKE, 0, {RT}}, {"mfdac1", XSPR(31,339,316), XSPR_MASK, BOOKE, 0, {RT}}, {"mfdac2", XSPR(31,339,317), XSPR_MASK, BOOKE, 0, {RT}}, {"mfdvc1", XSPR(31,339,318), XSPR_MASK, BOOKE, 0, {RT}}, {"mfdvc2", XSPR(31,339,319), XSPR_MASK, BOOKE, 0, {RT}}, {"mftsr", XSPR(31,339,336), XSPR_MASK, BOOKE, 0, {RT}}, {"mftcr", XSPR(31,339,340), XSPR_MASK, BOOKE, 0, {RT}}, {"mfivor0", XSPR(31,339,400), XSPR_MASK, BOOKE, 0, {RT}}, {"mfivor1", XSPR(31,339,401), XSPR_MASK, BOOKE, 0, {RT}}, {"mfivor2", XSPR(31,339,402), XSPR_MASK, BOOKE, 0, {RT}}, {"mfivor3", XSPR(31,339,403), XSPR_MASK, BOOKE, 0, {RT}}, {"mfivor4", XSPR(31,339,404), XSPR_MASK, BOOKE, 0, {RT}}, {"mfivor5", XSPR(31,339,405), XSPR_MASK, BOOKE, 0, {RT}}, {"mfivor6", XSPR(31,339,406), XSPR_MASK, BOOKE, 0, {RT}}, {"mfivor7", XSPR(31,339,407), XSPR_MASK, BOOKE, 0, {RT}}, {"mfivor8", XSPR(31,339,408), XSPR_MASK, BOOKE, 0, {RT}}, {"mfivor9", XSPR(31,339,409), XSPR_MASK, BOOKE, 0, {RT}}, {"mfivor10", XSPR(31,339,410), XSPR_MASK, BOOKE, 0, {RT}}, {"mfivor11", XSPR(31,339,411), XSPR_MASK, BOOKE, 0, {RT}}, {"mfivor12", XSPR(31,339,412), XSPR_MASK, BOOKE, 0, {RT}}, {"mfivor13", XSPR(31,339,413), XSPR_MASK, BOOKE, 0, {RT}}, {"mfivor14", XSPR(31,339,414), XSPR_MASK, BOOKE, 0, {RT}}, {"mfivor15", XSPR(31,339,415), XSPR_MASK, BOOKE, 0, {RT}}, {"mfspefscr", XSPR(31,339,512), XSPR_MASK, PPCSPE, 0, {RT}}, {"mfbbear", XSPR(31,339,513), XSPR_MASK, PPCBRLK, 0, {RT}}, {"mfbbtar", XSPR(31,339,514), XSPR_MASK, PPCBRLK, 0, {RT}}, {"mfivor32", XSPR(31,339,528), XSPR_MASK, PPCSPE, 0, {RT}}, {"mfibatu", XSPR(31,339,528), XSPRBAT_MASK, PPC, TITAN, {RT, SPRBAT}}, {"mfivor33", XSPR(31,339,529), XSPR_MASK, PPCSPE, 0, {RT}}, {"mfibatl", XSPR(31,339,529), XSPRBAT_MASK, PPC, TITAN, {RT, SPRBAT}}, {"mfivor34", XSPR(31,339,530), XSPR_MASK, PPCSPE, 0, {RT}}, {"mfivor35", XSPR(31,339,531), XSPR_MASK, PPCPMR, 0, {RT}}, {"mfdbatu", XSPR(31,339,536), XSPRBAT_MASK, PPC, TITAN, {RT, SPRBAT}}, {"mfdbatl", XSPR(31,339,537), XSPRBAT_MASK, PPC, TITAN, {RT, SPRBAT}}, {"mfic_cst", XSPR(31,339,560), XSPR_MASK, PPC860, 0, {RT}}, {"mfic_adr", XSPR(31,339,561), XSPR_MASK, PPC860, 0, {RT}}, {"mfic_dat", XSPR(31,339,562), XSPR_MASK, PPC860, 0, {RT}}, {"mfdc_cst", XSPR(31,339,568), XSPR_MASK, PPC860, 0, {RT}}, {"mfdc_adr", XSPR(31,339,569), XSPR_MASK, PPC860, 0, {RT}}, {"mfdc_dat", XSPR(31,339,570), XSPR_MASK, PPC860, 0, {RT}}, {"mfmcsrr0", XSPR(31,339,570), XSPR_MASK, PPCRFMCI, 0, {RT}}, {"mfmcsrr1", XSPR(31,339,571), XSPR_MASK, PPCRFMCI, 0, {RT}}, {"mfmcsr", XSPR(31,339,572), XSPR_MASK, PPCRFMCI, 0, {RT}}, {"mfmcar", XSPR(31,339,573), XSPR_MASK, PPCRFMCI, TITAN, {RT}}, {"mfdpdr", XSPR(31,339,630), XSPR_MASK, PPC860, 0, {RT}}, {"mfdpir", XSPR(31,339,631), XSPR_MASK, PPC860, 0, {RT}}, {"mfimmr", XSPR(31,339,638), XSPR_MASK, PPC860, 0, {RT}}, {"mfmi_ctr", XSPR(31,339,784), XSPR_MASK, PPC860, 0, {RT}}, {"mfmi_ap", XSPR(31,339,786), XSPR_MASK, PPC860, 0, {RT}}, {"mfmi_epn", XSPR(31,339,787), XSPR_MASK, PPC860, 0, {RT}}, {"mfmi_twc", XSPR(31,339,789), XSPR_MASK, PPC860, 0, {RT}}, {"mfmi_rpn", XSPR(31,339,790), XSPR_MASK, PPC860, 0, {RT}}, {"mfmd_ctr", XSPR(31,339,792), XSPR_MASK, PPC860, 0, {RT}}, {"mfm_casid", XSPR(31,339,793), XSPR_MASK, PPC860, 0, {RT}}, {"mfmd_ap", XSPR(31,339,794), XSPR_MASK, PPC860, 0, {RT}}, {"mfmd_epn", XSPR(31,339,795), XSPR_MASK, PPC860, 0, {RT}}, {"mfmd_twb", XSPR(31,339,796), XSPR_MASK, PPC860, 0, {RT}}, {"mfmd_twc", XSPR(31,339,797), XSPR_MASK, PPC860, 0, {RT}}, {"mfmd_rpn", XSPR(31,339,798), XSPR_MASK, PPC860, 0, {RT}}, {"mfm_tw", XSPR(31,339,799), XSPR_MASK, PPC860, 0, {RT}}, {"mfmi_dbcam", XSPR(31,339,816), XSPR_MASK, PPC860, 0, {RT}}, {"mfmi_dbram0", XSPR(31,339,817), XSPR_MASK, PPC860, 0, {RT}}, {"mfmi_dbram1", XSPR(31,339,818), XSPR_MASK, PPC860, 0, {RT}}, {"mfmd_dbcam", XSPR(31,339,824), XSPR_MASK, PPC860, 0, {RT}}, {"mfmd_dbram0", XSPR(31,339,825), XSPR_MASK, PPC860, 0, {RT}}, {"mfmd_dbram1", XSPR(31,339,826), XSPR_MASK, PPC860, 0, {RT}}, {"mfivndx", XSPR(31,339,880), XSPR_MASK, TITAN, 0, {RT}}, {"mfdvndx", XSPR(31,339,881), XSPR_MASK, TITAN, 0, {RT}}, {"mfivlim", XSPR(31,339,882), XSPR_MASK, TITAN, 0, {RT}}, {"mfdvlim", XSPR(31,339,883), XSPR_MASK, TITAN, 0, {RT}}, {"mfclcsr", XSPR(31,339,884), XSPR_MASK, TITAN, 0, {RT}}, {"mfccr1", XSPR(31,339,888), XSPR_MASK, TITAN, 0, {RT}}, {"mfppr", XSPR(31,339,896), XSPR_MASK, POWER7, 0, {RT}}, {"mfppr32", XSPR(31,339,898), XSPR_MASK, POWER7, 0, {RT}}, {"mfrstcfg", XSPR(31,339,923), XSPR_MASK, TITAN, 0, {RT}}, {"mfdcdbtrl", XSPR(31,339,924), XSPR_MASK, TITAN, 0, {RT}}, {"mfdcdbtrh", XSPR(31,339,925), XSPR_MASK, TITAN, 0, {RT}}, {"mficdbtr", XSPR(31,339,927), XSPR_MASK, TITAN, 0, {RT}}, {"mfummcr0", XSPR(31,339,936), XSPR_MASK, PPC750, 0, {RT}}, {"mfupmc1", XSPR(31,339,937), XSPR_MASK, PPC750, 0, {RT}}, {"mfupmc2", XSPR(31,339,938), XSPR_MASK, PPC750, 0, {RT}}, {"mfusia", XSPR(31,339,939), XSPR_MASK, PPC750, 0, {RT}}, {"mfummcr1", XSPR(31,339,940), XSPR_MASK, PPC750, 0, {RT}}, {"mfupmc3", XSPR(31,339,941), XSPR_MASK, PPC750, 0, {RT}}, {"mfupmc4", XSPR(31,339,942), XSPR_MASK, PPC750, 0, {RT}}, {"mfzpr", XSPR(31,339,944), XSPR_MASK, PPC403, 0, {RT}}, {"mfpid", XSPR(31,339,945), XSPR_MASK, PPC403, 0, {RT}}, {"mfmmucr", XSPR(31,339,946), XSPR_MASK, TITAN, 0, {RT}}, {"mfccr0", XSPR(31,339,947), XSPR_MASK, PPC405|TITAN, 0, {RT}}, {"mfiac3", XSPR(31,339,948), XSPR_MASK, PPC405, 0, {RT}}, {"mfiac4", XSPR(31,339,949), XSPR_MASK, PPC405, 0, {RT}}, {"mfdvc1", XSPR(31,339,950), XSPR_MASK, PPC405, 0, {RT}}, {"mfdvc2", XSPR(31,339,951), XSPR_MASK, PPC405, 0, {RT}}, {"mfmmcr0", XSPR(31,339,952), XSPR_MASK, PPC750, 0, {RT}}, {"mfpmc1", XSPR(31,339,953), XSPR_MASK, PPC750, 0, {RT}}, {"mfsgr", XSPR(31,339,953), XSPR_MASK, PPC403, 0, {RT}}, {"mfdcwr", XSPR(31,339,954), XSPR_MASK, PPC403, 0, {RT}}, {"mfpmc2", XSPR(31,339,954), XSPR_MASK, PPC750, 0, {RT}}, {"mfsia", XSPR(31,339,955), XSPR_MASK, PPC750, 0, {RT}}, {"mfsler", XSPR(31,339,955), XSPR_MASK, PPC405, 0, {RT}}, {"mfmmcr1", XSPR(31,339,956), XSPR_MASK, PPC750, 0, {RT}}, {"mfsu0r", XSPR(31,339,956), XSPR_MASK, PPC405, 0, {RT}}, {"mfdbcr1", XSPR(31,339,957), XSPR_MASK, PPC405, 0, {RT}}, {"mfpmc3", XSPR(31,339,957), XSPR_MASK, PPC750, 0, {RT}}, {"mfpmc4", XSPR(31,339,958), XSPR_MASK, PPC750, 0, {RT}}, {"mficdbdr", XSPR(31,339,979), XSPR_MASK, PPC403|TITAN, 0, {RT}}, {"mfesr", XSPR(31,339,980), XSPR_MASK, PPC403, 0, {RT}}, {"mfdear", XSPR(31,339,981), XSPR_MASK, PPC403, 0, {RT}}, {"mfevpr", XSPR(31,339,982), XSPR_MASK, PPC403, 0, {RT}}, {"mfcdbcr", XSPR(31,339,983), XSPR_MASK, PPC403, 0, {RT}}, {"mftsr", XSPR(31,339,984), XSPR_MASK, PPC403, 0, {RT}}, {"mftcr", XSPR(31,339,986), XSPR_MASK, PPC403, 0, {RT}}, {"mfpit", XSPR(31,339,987), XSPR_MASK, PPC403, 0, {RT}}, {"mftbhi", XSPR(31,339,988), XSPR_MASK, PPC403, 0, {RT}}, {"mftblo", XSPR(31,339,989), XSPR_MASK, PPC403, 0, {RT}}, {"mfsrr2", XSPR(31,339,990), XSPR_MASK, PPC403, 0, {RT}}, {"mfsrr3", XSPR(31,339,991), XSPR_MASK, PPC403, 0, {RT}}, {"mfdbsr", XSPR(31,339,1008), XSPR_MASK, PPC403, 0, {RT}}, {"mfdbcr0", XSPR(31,339,1010), XSPR_MASK, PPC405, 0, {RT}}, {"mfdbdr", XSPR(31,339,1011), XSPR_MASK, TITAN, 0, {RS}}, {"mfiac1", XSPR(31,339,1012), XSPR_MASK, PPC403, 0, {RT}}, {"mfiac2", XSPR(31,339,1013), XSPR_MASK, PPC403, 0, {RT}}, {"mfdac1", XSPR(31,339,1014), XSPR_MASK, PPC403, 0, {RT}}, {"mfdac2", XSPR(31,339,1015), XSPR_MASK, PPC403, 0, {RT}}, {"mfl2cr", XSPR(31,339,1017), XSPR_MASK, PPC750, 0, {RT}}, {"mfdccr", XSPR(31,339,1018), XSPR_MASK, PPC403, 0, {RT}}, {"mficcr", XSPR(31,339,1019), XSPR_MASK, PPC403, 0, {RT}}, {"mfictc", XSPR(31,339,1019), XSPR_MASK, PPC750, 0, {RT}}, {"mfpbl1", XSPR(31,339,1020), XSPR_MASK, PPC403, 0, {RT}}, {"mfthrm1", XSPR(31,339,1020), XSPR_MASK, PPC750, 0, {RT}}, {"mfpbu1", XSPR(31,339,1021), XSPR_MASK, PPC403, 0, {RT}}, {"mfthrm2", XSPR(31,339,1021), XSPR_MASK, PPC750, 0, {RT}}, {"mfpbl2", XSPR(31,339,1022), XSPR_MASK, PPC403, 0, {RT}}, {"mfthrm3", XSPR(31,339,1022), XSPR_MASK, PPC750, 0, {RT}}, {"mfpbu2", XSPR(31,339,1023), XSPR_MASK, PPC403, 0, {RT}}, {"mfspr", X(31,339), X_MASK, COM, 0, {RT, SPR}}, {"lwax", X(31,341), X_MASK, PPC64, 0, {RT, RA0, RB}}, {"dst", XDSS(31,342,0), XDSS_MASK, PPCVEC, 0, {RA, RB, STRM}}, {"lhax", X(31,343), X_MASK, COM, 0, {RT, RA0, RB}}, {"lvxl", X(31,359), X_MASK, PPCVEC, 0, {VD, RA0, RB}}, {"abs", XO(31,360,0,0), XORB_MASK, M601, 0, {RT, RA}}, {"abs.", XO(31,360,0,1), XORB_MASK, M601, 0, {RT, RA}}, {"divs", XO(31,363,0,0), XO_MASK, M601, 0, {RT, RA, RB}}, {"divs.", XO(31,363,0,1), XO_MASK, M601, 0, {RT, RA, RB}}, {"lxvwsx", X(31,364), XX1_MASK, PPCVSX3, 0, {XT6, RA0, RB}}, {"tlbia", X(31,370), 0xffffffff, PPC, E500|TITAN, {0}}, {"mftbu", XSPR(31,371,269), XSPR_MASK, PPC, NO371|POWER4, {RT}}, {"mftb", X(31,371), X_MASK, PPC, NO371|POWER4, {RT, TBR}}, {"mftbl", XSPR(31,371,268), XSPR_MASK, PPC, NO371|POWER4, {RT}}, {"lwaux", X(31,373), X_MASK, PPC64, 0, {RT, RAL, RB}}, {"dstst", XDSS(31,374,0), XDSS_MASK, PPCVEC, 0, {RA, RB, STRM}}, {"lhaux", X(31,375), X_MASK, COM, 0, {RT, RAL, RB}}, {"popcntw", X(31,378), XRB_MASK, POWER7|PPCA2, 0, {RA, RS}}, {"mtdcrx", X(31,387), X_MASK, BOOKE|PPCA2|PPC476, TITAN, {RA, RS}}, {"mtdcrx.", XRC(31,387,1), X_MASK, PPCA2, 0, {RA, RS}}, {"stvexbx", X(31,389), X_MASK, PPCVEC2, 0, {VS, RA0, RB}}, {"dcblc", X(31,390), X_MASK, PPCCHLK|PPC476|TITAN, 0, {CT, RA0, RB}}, {"stdfcmx", APU(31,391,0), APU_MASK, PPC405, 0, {FCRT, RA, RB}}, {"divdeu", XO(31,393,0,0), XO_MASK, POWER7|PPCA2, 0, {RT, RA, RB}}, {"divdeu.", XO(31,393,0,1), XO_MASK, POWER7|PPCA2, 0, {RT, RA, RB}}, {"divweu", XO(31,395,0,0), XO_MASK, POWER7|PPCA2, 0, {RT, RA, RB}}, {"divweu.", XO(31,395,0,1), XO_MASK, POWER7|PPCA2, 0, {RT, RA, RB}}, {"stxvx", X(31,396), XX1_MASK, PPCVSX3, 0, {XS6, RA0, RB}}, {"stxvl", X(31,397), XX1_MASK, PPCVSX3, 0, {XS6, RA0, RB}}, {"dcblce", X(31,398), X_MASK, PPCCHLK, E500MC, {CT, RA, RB}}, {"slbmte", X(31,402), XRA_MASK, PPC64, 0, {RS, RB}}, {"mtvsrws", X(31,403), XX1RB_MASK, PPCVSX3, 0, {XT6, RA}}, {"pbt.", XRC(31,404,1), X_MASK, POWER8, 0, {RS, RA0, RB}}, {"icswx", XRC(31,406,0), X_MASK, POWER7|PPCA2, 0, {RS, RA, RB}}, {"icswx.", XRC(31,406,1), X_MASK, POWER7|PPCA2, 0, {RS, RA, RB}}, {"sthx", X(31,407), X_MASK, COM, 0, {RS, RA0, RB}}, {"orc", XRC(31,412,0), X_MASK, COM, 0, {RA, RS, RB}}, {"orc.", XRC(31,412,1), X_MASK, COM, 0, {RA, RS, RB}}, {"sthepx", X(31,415), X_MASK, E500MC|PPCA2, 0, {RS, RA0, RB}}, {"mtdcrux", X(31,419), X_MASK, PPC464, 0, {RA, RS}}, {"stvexhx", X(31,421), X_MASK, PPCVEC2, 0, {VS, RA0, RB}}, {"dcblq.", XRC(31,422,1), X_MASK, E6500, 0, {CT, RA0, RB}}, {"divde", XO(31,425,0,0), XO_MASK, POWER7|PPCA2, 0, {RT, RA, RB}}, {"divde.", XO(31,425,0,1), XO_MASK, POWER7|PPCA2, 0, {RT, RA, RB}}, {"divwe", XO(31,427,0,0), XO_MASK, POWER7|PPCA2, 0, {RT, RA, RB}}, {"divwe.", XO(31,427,0,1), XO_MASK, POWER7|PPCA2, 0, {RT, RA, RB}}, {"stxvll", X(31,429), XX1_MASK, PPCVSX3, 0, {XS6, RA0, RB}}, {"clrbhrb", X(31,430), 0xffffffff, POWER8, 0, {0}}, {"slbie", X(31,434), XRTRA_MASK, PPC64, 0, {RB}}, {"mtvsrdd", X(31,435), XX1_MASK, PPCVSX3, 0, {XT6, RA0, RB}}, {"ecowx", X(31,438), X_MASK, PPC, E500|TITAN, {RT, RA0, RB}}, {"sthux", X(31,439), X_MASK, COM, 0, {RS, RAS, RB}}, {"mdors", 0x7f9ce378, 0xffffffff, E500MC, 0, {0}}, {"miso", 0x7f5ad378, 0xffffffff, E6500, 0, {0}}, /* The "yield", "mdoio" and "mdoom" instructions are extended mnemonics for "or rX,rX,rX", with rX being r27, r29 and r30 respectively. */ {"yield", 0x7f7bdb78, 0xffffffff, POWER7, 0, {0}}, {"mdoio", 0x7fbdeb78, 0xffffffff, POWER7, 0, {0}}, {"mdoom", 0x7fdef378, 0xffffffff, POWER7, 0, {0}}, {"mr", XRC(31,444,0), X_MASK, COM, 0, {RA, RS, RBS}}, {"or", XRC(31,444,0), X_MASK, COM, 0, {RA, RS, RB}}, {"mr.", XRC(31,444,1), X_MASK, COM, 0, {RA, RS, RBS}}, {"or.", XRC(31,444,1), X_MASK, COM, 0, {RA, RS, RB}}, {"mtexisr", XSPR(31,451, 64), XSPR_MASK, PPC403, 0, {RS}}, {"mtexier", XSPR(31,451, 66), XSPR_MASK, PPC403, 0, {RS}}, {"mtbr0", XSPR(31,451,128), XSPR_MASK, PPC403, 0, {RS}}, {"mtbr1", XSPR(31,451,129), XSPR_MASK, PPC403, 0, {RS}}, {"mtbr2", XSPR(31,451,130), XSPR_MASK, PPC403, 0, {RS}}, {"mtbr3", XSPR(31,451,131), XSPR_MASK, PPC403, 0, {RS}}, {"mtbr4", XSPR(31,451,132), XSPR_MASK, PPC403, 0, {RS}}, {"mtbr5", XSPR(31,451,133), XSPR_MASK, PPC403, 0, {RS}}, {"mtbr6", XSPR(31,451,134), XSPR_MASK, PPC403, 0, {RS}}, {"mtbr7", XSPR(31,451,135), XSPR_MASK, PPC403, 0, {RS}}, {"mtbear", XSPR(31,451,144), XSPR_MASK, PPC403, 0, {RS}}, {"mtbesr", XSPR(31,451,145), XSPR_MASK, PPC403, 0, {RS}}, {"mtiocr", XSPR(31,451,160), XSPR_MASK, PPC403, 0, {RS}}, {"mtdmacr0", XSPR(31,451,192), XSPR_MASK, PPC403, 0, {RS}}, {"mtdmact0", XSPR(31,451,193), XSPR_MASK, PPC403, 0, {RS}}, {"mtdmada0", XSPR(31,451,194), XSPR_MASK, PPC403, 0, {RS}}, {"mtdmasa0", XSPR(31,451,195), XSPR_MASK, PPC403, 0, {RS}}, {"mtdmacc0", XSPR(31,451,196), XSPR_MASK, PPC403, 0, {RS}}, {"mtdmacr1", XSPR(31,451,200), XSPR_MASK, PPC403, 0, {RS}}, {"mtdmact1", XSPR(31,451,201), XSPR_MASK, PPC403, 0, {RS}}, {"mtdmada1", XSPR(31,451,202), XSPR_MASK, PPC403, 0, {RS}}, {"mtdmasa1", XSPR(31,451,203), XSPR_MASK, PPC403, 0, {RS}}, {"mtdmacc1", XSPR(31,451,204), XSPR_MASK, PPC403, 0, {RS}}, {"mtdmacr2", XSPR(31,451,208), XSPR_MASK, PPC403, 0, {RS}}, {"mtdmact2", XSPR(31,451,209), XSPR_MASK, PPC403, 0, {RS}}, {"mtdmada2", XSPR(31,451,210), XSPR_MASK, PPC403, 0, {RS}}, {"mtdmasa2", XSPR(31,451,211), XSPR_MASK, PPC403, 0, {RS}}, {"mtdmacc2", XSPR(31,451,212), XSPR_MASK, PPC403, 0, {RS}}, {"mtdmacr3", XSPR(31,451,216), XSPR_MASK, PPC403, 0, {RS}}, {"mtdmact3", XSPR(31,451,217), XSPR_MASK, PPC403, 0, {RS}}, {"mtdmada3", XSPR(31,451,218), XSPR_MASK, PPC403, 0, {RS}}, {"mtdmasa3", XSPR(31,451,219), XSPR_MASK, PPC403, 0, {RS}}, {"mtdmacc3", XSPR(31,451,220), XSPR_MASK, PPC403, 0, {RS}}, {"mtdmasr", XSPR(31,451,224), XSPR_MASK, PPC403, 0, {RS}}, {"mtdcr", X(31,451), X_MASK, PPC403|BOOKE|PPCA2|PPC476, E500|TITAN, {SPR, RS}}, {"mtdcr.", XRC(31,451,1), X_MASK, PPCA2, 0, {SPR, RS}}, {"stvexwx", X(31,453), X_MASK, PPCVEC2, 0, {VS, RA0, RB}}, {"dccci", X(31,454), XRT_MASK, PPC403|PPC440|TITAN|PPCA2, 0, {RAOPT, RBOPT}}, {"dci", X(31,454), XRARB_MASK, PPCA2|PPC476, 0, {CT}}, {"divdu", XO(31,457,0,0), XO_MASK, PPC64, 0, {RT, RA, RB}}, {"divdu.", XO(31,457,0,1), XO_MASK, PPC64, 0, {RT, RA, RB}}, {"divwu", XO(31,459,0,0), XO_MASK, PPC, 0, {RT, RA, RB}}, {"divwu.", XO(31,459,0,1), XO_MASK, PPC, 0, {RT, RA, RB}}, {"mtpmr", X(31,462), X_MASK, PPCPMR|PPCE300, 0, {PMR, RS}}, {"mttmr", X(31,494), X_MASK, PPCTMR|E6500, 0, {TMR, RS}}, {"slbieg", X(31,466), XRA_MASK, POWER9, 0, {RS, RB}}, {"mtmq", XSPR(31,467, 0), XSPR_MASK, M601, 0, {RS}}, {"mtxer", XSPR(31,467, 1), XSPR_MASK, COM, 0, {RS}}, {"mtlr", XSPR(31,467, 8), XSPR_MASK, COM, 0, {RS}}, {"mtctr", XSPR(31,467, 9), XSPR_MASK, COM, 0, {RS}}, {"mtdscr", XSPR(31,467, 17), XSPR_MASK, POWER6, 0, {RS}}, {"mttid", XSPR(31,467, 17), XSPR_MASK, POWER, 0, {RS}}, {"mtdsisr", XSPR(31,467, 18), XSPR_MASK, COM, TITAN, {RS}}, {"mtdar", XSPR(31,467, 19), XSPR_MASK, COM, TITAN, {RS}}, {"mtrtcu", XSPR(31,467, 20), XSPR_MASK, COM, TITAN, {RS}}, {"mtrtcl", XSPR(31,467, 21), XSPR_MASK, COM, TITAN, {RS}}, {"mtdec", XSPR(31,467, 22), XSPR_MASK, COM, 0, {RS}}, {"mtsdr0", XSPR(31,467, 24), XSPR_MASK, POWER, 0, {RS}}, {"mtsdr1", XSPR(31,467, 25), XSPR_MASK, COM, TITAN, {RS}}, {"mtsrr0", XSPR(31,467, 26), XSPR_MASK, COM, 0, {RS}}, {"mtsrr1", XSPR(31,467, 27), XSPR_MASK, COM, 0, {RS}}, {"mtcfar", XSPR(31,467, 28), XSPR_MASK, POWER6, 0, {RS}}, {"mtpid", XSPR(31,467, 48), XSPR_MASK, BOOKE, 0, {RS}}, {"mtdecar", XSPR(31,467, 54), XSPR_MASK, BOOKE, 0, {RS}}, {"mtcsrr0", XSPR(31,467, 58), XSPR_MASK, BOOKE, 0, {RS}}, {"mtcsrr1", XSPR(31,467, 59), XSPR_MASK, BOOKE, 0, {RS}}, {"mtdear", XSPR(31,467, 61), XSPR_MASK, BOOKE, 0, {RS}}, {"mtesr", XSPR(31,467, 62), XSPR_MASK, BOOKE, 0, {RS}}, {"mtivpr", XSPR(31,467, 63), XSPR_MASK, BOOKE, 0, {RS}}, {"mtcmpa", XSPR(31,467,144), XSPR_MASK, PPC860, 0, {RS}}, {"mtcmpb", XSPR(31,467,145), XSPR_MASK, PPC860, 0, {RS}}, {"mtcmpc", XSPR(31,467,146), XSPR_MASK, PPC860, 0, {RS}}, {"mtcmpd", XSPR(31,467,147), XSPR_MASK, PPC860, 0, {RS}}, {"mticr", XSPR(31,467,148), XSPR_MASK, PPC860, 0, {RS}}, {"mtder", XSPR(31,467,149), XSPR_MASK, PPC860, 0, {RS}}, {"mtcounta", XSPR(31,467,150), XSPR_MASK, PPC860, 0, {RS}}, {"mtcountb", XSPR(31,467,151), XSPR_MASK, PPC860, 0, {RS}}, {"mtctrl", XSPR(31,467,152), XSPR_MASK, POWER4, 0, {RS}}, {"mtcmpe", XSPR(31,467,152), XSPR_MASK, PPC860, 0, {RS}}, {"mtcmpf", XSPR(31,467,153), XSPR_MASK, PPC860, 0, {RS}}, {"mtcmpg", XSPR(31,467,154), XSPR_MASK, PPC860, 0, {RS}}, {"mtcmph", XSPR(31,467,155), XSPR_MASK, PPC860, 0, {RS}}, {"mtlctrl1", XSPR(31,467,156), XSPR_MASK, PPC860, 0, {RS}}, {"mtlctrl2", XSPR(31,467,157), XSPR_MASK, PPC860, 0, {RS}}, {"mtictrl", XSPR(31,467,158), XSPR_MASK, PPC860, 0, {RS}}, {"mtbar", XSPR(31,467,159), XSPR_MASK, PPC860, 0, {RS}}, {"mtvrsave", XSPR(31,467,256), XSPR_MASK, PPCVEC, 0, {RS}}, {"mtusprg0", XSPR(31,467,256), XSPR_MASK, BOOKE, 0, {RS}}, {"mtsprg", XSPR(31,467,256), XSPRG_MASK, PPC, 0, {SPRG, RS}}, {"mtsprg0", XSPR(31,467,272), XSPR_MASK, PPC, 0, {RS}}, {"mtsprg1", XSPR(31,467,273), XSPR_MASK, PPC, 0, {RS}}, {"mtsprg2", XSPR(31,467,274), XSPR_MASK, PPC, 0, {RS}}, {"mtsprg3", XSPR(31,467,275), XSPR_MASK, PPC, 0, {RS}}, {"mtsprg4", XSPR(31,467,276), XSPR_MASK, PPC405|BOOKE, 0, {RS}}, {"mtsprg5", XSPR(31,467,277), XSPR_MASK, PPC405|BOOKE, 0, {RS}}, {"mtsprg6", XSPR(31,467,278), XSPR_MASK, PPC405|BOOKE, 0, {RS}}, {"mtsprg7", XSPR(31,467,279), XSPR_MASK, PPC405|BOOKE, 0, {RS}}, {"mtasr", XSPR(31,467,280), XSPR_MASK, PPC64, 0, {RS}}, {"mtear", XSPR(31,467,282), XSPR_MASK, PPC, TITAN, {RS}}, {"mttbl", XSPR(31,467,284), XSPR_MASK, PPC, 0, {RS}}, {"mttbu", XSPR(31,467,285), XSPR_MASK, PPC, 0, {RS}}, {"mtdbsr", XSPR(31,467,304), XSPR_MASK, BOOKE, 0, {RS}}, {"mtdbcr0", XSPR(31,467,308), XSPR_MASK, BOOKE, 0, {RS}}, {"mtdbcr1", XSPR(31,467,309), XSPR_MASK, BOOKE, 0, {RS}}, {"mtdbcr2", XSPR(31,467,310), XSPR_MASK, BOOKE, 0, {RS}}, {"mtiac1", XSPR(31,467,312), XSPR_MASK, BOOKE, 0, {RS}}, {"mtiac2", XSPR(31,467,313), XSPR_MASK, BOOKE, 0, {RS}}, {"mtiac3", XSPR(31,467,314), XSPR_MASK, BOOKE, 0, {RS}}, {"mtiac4", XSPR(31,467,315), XSPR_MASK, BOOKE, 0, {RS}}, {"mtdac1", XSPR(31,467,316), XSPR_MASK, BOOKE, 0, {RS}}, {"mtdac2", XSPR(31,467,317), XSPR_MASK, BOOKE, 0, {RS}}, {"mtdvc1", XSPR(31,467,318), XSPR_MASK, BOOKE, 0, {RS}}, {"mtdvc2", XSPR(31,467,319), XSPR_MASK, BOOKE, 0, {RS}}, {"mttsr", XSPR(31,467,336), XSPR_MASK, BOOKE, 0, {RS}}, {"mttcr", XSPR(31,467,340), XSPR_MASK, BOOKE, 0, {RS}}, {"mtivor0", XSPR(31,467,400), XSPR_MASK, BOOKE, 0, {RS}}, {"mtivor1", XSPR(31,467,401), XSPR_MASK, BOOKE, 0, {RS}}, {"mtivor2", XSPR(31,467,402), XSPR_MASK, BOOKE, 0, {RS}}, {"mtivor3", XSPR(31,467,403), XSPR_MASK, BOOKE, 0, {RS}}, {"mtivor4", XSPR(31,467,404), XSPR_MASK, BOOKE, 0, {RS}}, {"mtivor5", XSPR(31,467,405), XSPR_MASK, BOOKE, 0, {RS}}, {"mtivor6", XSPR(31,467,406), XSPR_MASK, BOOKE, 0, {RS}}, {"mtivor7", XSPR(31,467,407), XSPR_MASK, BOOKE, 0, {RS}}, {"mtivor8", XSPR(31,467,408), XSPR_MASK, BOOKE, 0, {RS}}, {"mtivor9", XSPR(31,467,409), XSPR_MASK, BOOKE, 0, {RS}}, {"mtivor10", XSPR(31,467,410), XSPR_MASK, BOOKE, 0, {RS}}, {"mtivor11", XSPR(31,467,411), XSPR_MASK, BOOKE, 0, {RS}}, {"mtivor12", XSPR(31,467,412), XSPR_MASK, BOOKE, 0, {RS}}, {"mtivor13", XSPR(31,467,413), XSPR_MASK, BOOKE, 0, {RS}}, {"mtivor14", XSPR(31,467,414), XSPR_MASK, BOOKE, 0, {RS}}, {"mtivor15", XSPR(31,467,415), XSPR_MASK, BOOKE, 0, {RS}}, {"mtspefscr", XSPR(31,467,512), XSPR_MASK, PPCSPE, 0, {RS}}, {"mtbbear", XSPR(31,467,513), XSPR_MASK, PPCBRLK, 0, {RS}}, {"mtbbtar", XSPR(31,467,514), XSPR_MASK, PPCBRLK, 0, {RS}}, {"mtivor32", XSPR(31,467,528), XSPR_MASK, PPCSPE, 0, {RS}}, {"mtibatu", XSPR(31,467,528), XSPRBAT_MASK, PPC, TITAN, {SPRBAT, RS}}, {"mtivor33", XSPR(31,467,529), XSPR_MASK, PPCSPE, 0, {RS}}, {"mtibatl", XSPR(31,467,529), XSPRBAT_MASK, PPC, TITAN, {SPRBAT, RS}}, {"mtivor34", XSPR(31,467,530), XSPR_MASK, PPCSPE, 0, {RS}}, {"mtivor35", XSPR(31,467,531), XSPR_MASK, PPCPMR, 0, {RS}}, {"mtdbatu", XSPR(31,467,536), XSPRBAT_MASK, PPC, TITAN, {SPRBAT, RS}}, {"mtdbatl", XSPR(31,467,537), XSPRBAT_MASK, PPC, TITAN, {SPRBAT, RS}}, {"mtmcsrr0", XSPR(31,467,570), XSPR_MASK, PPCRFMCI, 0, {RS}}, {"mtmcsrr1", XSPR(31,467,571), XSPR_MASK, PPCRFMCI, 0, {RS}}, {"mtmcsr", XSPR(31,467,572), XSPR_MASK, PPCRFMCI, 0, {RS}}, {"mtivndx", XSPR(31,467,880), XSPR_MASK, TITAN, 0, {RS}}, {"mtdvndx", XSPR(31,467,881), XSPR_MASK, TITAN, 0, {RS}}, {"mtivlim", XSPR(31,467,882), XSPR_MASK, TITAN, 0, {RS}}, {"mtdvlim", XSPR(31,467,883), XSPR_MASK, TITAN, 0, {RS}}, {"mtclcsr", XSPR(31,467,884), XSPR_MASK, TITAN, 0, {RS}}, {"mtccr1", XSPR(31,467,888), XSPR_MASK, TITAN, 0, {RS}}, {"mtppr", XSPR(31,467,896), XSPR_MASK, POWER7, 0, {RS}}, {"mtppr32", XSPR(31,467,898), XSPR_MASK, POWER7, 0, {RS}}, {"mtummcr0", XSPR(31,467,936), XSPR_MASK, PPC750, 0, {RS}}, {"mtupmc1", XSPR(31,467,937), XSPR_MASK, PPC750, 0, {RS}}, {"mtupmc2", XSPR(31,467,938), XSPR_MASK, PPC750, 0, {RS}}, {"mtusia", XSPR(31,467,939), XSPR_MASK, PPC750, 0, {RS}}, {"mtummcr1", XSPR(31,467,940), XSPR_MASK, PPC750, 0, {RS}}, {"mtupmc3", XSPR(31,467,941), XSPR_MASK, PPC750, 0, {RS}}, {"mtupmc4", XSPR(31,467,942), XSPR_MASK, PPC750, 0, {RS}}, {"mtzpr", XSPR(31,467,944), XSPR_MASK, PPC403, 0, {RS}}, {"mtpid", XSPR(31,467,945), XSPR_MASK, PPC403, 0, {RS}}, {"mtrmmucr", XSPR(31,467,946), XSPR_MASK, TITAN, 0, {RS}}, {"mtccr0", XSPR(31,467,947), XSPR_MASK, PPC405|TITAN, 0, {RS}}, {"mtiac3", XSPR(31,467,948), XSPR_MASK, PPC405, 0, {RS}}, {"mtiac4", XSPR(31,467,949), XSPR_MASK, PPC405, 0, {RS}}, {"mtdvc1", XSPR(31,467,950), XSPR_MASK, PPC405, 0, {RS}}, {"mtdvc2", XSPR(31,467,951), XSPR_MASK, PPC405, 0, {RS}}, {"mtmmcr0", XSPR(31,467,952), XSPR_MASK, PPC750, 0, {RS}}, {"mtpmc1", XSPR(31,467,953), XSPR_MASK, PPC750, 0, {RS}}, {"mtsgr", XSPR(31,467,953), XSPR_MASK, PPC403, 0, {RS}}, {"mtdcwr", XSPR(31,467,954), XSPR_MASK, PPC403, 0, {RS}}, {"mtpmc2", XSPR(31,467,954), XSPR_MASK, PPC750, 0, {RS}}, {"mtsia", XSPR(31,467,955), XSPR_MASK, PPC750, 0, {RS}}, {"mtsler", XSPR(31,467,955), XSPR_MASK, PPC405, 0, {RS}}, {"mtmmcr1", XSPR(31,467,956), XSPR_MASK, PPC750, 0, {RS}}, {"mtsu0r", XSPR(31,467,956), XSPR_MASK, PPC405, 0, {RS}}, {"mtdbcr1", XSPR(31,467,957), XSPR_MASK, PPC405, 0, {RS}}, {"mtpmc3", XSPR(31,467,957), XSPR_MASK, PPC750, 0, {RS}}, {"mtpmc4", XSPR(31,467,958), XSPR_MASK, PPC750, 0, {RS}}, {"mticdbdr", XSPR(31,467,979), XSPR_MASK, PPC403, 0, {RS}}, {"mtesr", XSPR(31,467,980), XSPR_MASK, PPC403, 0, {RS}}, {"mtdear", XSPR(31,467,981), XSPR_MASK, PPC403, 0, {RS}}, {"mtevpr", XSPR(31,467,982), XSPR_MASK, PPC403, 0, {RS}}, {"mtcdbcr", XSPR(31,467,983), XSPR_MASK, PPC403, 0, {RS}}, {"mttsr", XSPR(31,467,984), XSPR_MASK, PPC403, 0, {RS}}, {"mttcr", XSPR(31,467,986), XSPR_MASK, PPC403, 0, {RS}}, {"mtpit", XSPR(31,467,987), XSPR_MASK, PPC403, 0, {RS}}, {"mttbhi", XSPR(31,467,988), XSPR_MASK, PPC403, 0, {RS}}, {"mttblo", XSPR(31,467,989), XSPR_MASK, PPC403, 0, {RS}}, {"mtsrr2", XSPR(31,467,990), XSPR_MASK, PPC403, 0, {RS}}, {"mtsrr3", XSPR(31,467,991), XSPR_MASK, PPC403, 0, {RS}}, {"mtdbsr", XSPR(31,467,1008), XSPR_MASK, PPC403, 0, {RS}}, {"mtdbdr", XSPR(31,467,1011), XSPR_MASK, TITAN, 0, {RS}}, {"mtdbcr0", XSPR(31,467,1010), XSPR_MASK, PPC405, 0, {RS}}, {"mtiac1", XSPR(31,467,1012), XSPR_MASK, PPC403, 0, {RS}}, {"mtiac2", XSPR(31,467,1013), XSPR_MASK, PPC403, 0, {RS}}, {"mtdac1", XSPR(31,467,1014), XSPR_MASK, PPC403, 0, {RS}}, {"mtdac2", XSPR(31,467,1015), XSPR_MASK, PPC403, 0, {RS}}, {"mtl2cr", XSPR(31,467,1017), XSPR_MASK, PPC750, 0, {RS}}, {"mtdccr", XSPR(31,467,1018), XSPR_MASK, PPC403, 0, {RS}}, {"mticcr", XSPR(31,467,1019), XSPR_MASK, PPC403, 0, {RS}}, {"mtictc", XSPR(31,467,1019), XSPR_MASK, PPC750, 0, {RS}}, {"mtpbl1", XSPR(31,467,1020), XSPR_MASK, PPC403, 0, {RS}}, {"mtthrm1", XSPR(31,467,1020), XSPR_MASK, PPC750, 0, {RS}}, {"mtpbu1", XSPR(31,467,1021), XSPR_MASK, PPC403, 0, {RS}}, {"mtthrm2", XSPR(31,467,1021), XSPR_MASK, PPC750, 0, {RS}}, {"mtpbl2", XSPR(31,467,1022), XSPR_MASK, PPC403, 0, {RS}}, {"mtthrm3", XSPR(31,467,1022), XSPR_MASK, PPC750, 0, {RS}}, {"mtpbu2", XSPR(31,467,1023), XSPR_MASK, PPC403, 0, {RS}}, {"mtspr", X(31,467), X_MASK, COM, 0, {SPR, RS}}, {"dcbi", X(31,470), XRT_MASK, PPC, 0, {RA0, RB}}, {"nand", XRC(31,476,0), X_MASK, COM, 0, {RA, RS, RB}}, {"nand.", XRC(31,476,1), X_MASK, COM, 0, {RA, RS, RB}}, {"dsn", X(31,483), XRT_MASK, E500MC, 0, {RA, RB}}, {"dcread", X(31,486), X_MASK, PPC403|PPC440, PPCA2|PPC476, {RT, RA0, RB}}, {"icbtls", X(31,486), X_MASK, PPCCHLK|PPC476|TITAN, 0, {CT, RA0, RB}}, {"stvxl", X(31,487), X_MASK, PPCVEC, 0, {VS, RA0, RB}}, {"nabs", XO(31,488,0,0), XORB_MASK, M601, 0, {RT, RA}}, {"nabs.", XO(31,488,0,1), XORB_MASK, M601, 0, {RT, RA}}, {"divd", XO(31,489,0,0), XO_MASK, PPC64, 0, {RT, RA, RB}}, {"divd.", XO(31,489,0,1), XO_MASK, PPC64, 0, {RT, RA, RB}}, {"divw", XO(31,491,0,0), XO_MASK, PPC, 0, {RT, RA, RB}}, {"divw.", XO(31,491,0,1), XO_MASK, PPC, 0, {RT, RA, RB}}, {"icbtlse", X(31,494), X_MASK, PPCCHLK, E500MC, {CT, RA, RB}}, {"slbia", X(31,498), 0xff1fffff, POWER6, 0, {IH}}, {"slbia", X(31,498), 0xffffffff, PPC64, POWER6, {0}}, {"cli", X(31,502), XRB_MASK, POWER, 0, {RT, RA}}, {"popcntd", X(31,506), XRB_MASK, POWER7|PPCA2, 0, {RA, RS}}, {"cmpb", X(31,508), X_MASK, POWER6|PPCA2|PPC476, 0, {RA, RS, RB}}, {"mcrxr", X(31,512), XBFRARB_MASK, COM, POWER7, {BF}}, {"lbdcbx", X(31,514), X_MASK, E200Z4, 0, {RT, RA, RB}}, {"lbdx", X(31,515), X_MASK, E500MC, 0, {RT, RA, RB}}, {"bblels", X(31,518), X_MASK, PPCBRLK, 0, {0}}, {"lvlx", X(31,519), X_MASK, CELL, 0, {VD, RA0, RB}}, {"lbfcmux", APU(31,519,0), APU_MASK, PPC405, 0, {FCRT, RA, RB}}, {"subfco", XO(31,8,1,0), XO_MASK, PPCCOM, 0, {RT, RA, RB}}, {"sfo", XO(31,8,1,0), XO_MASK, PWRCOM, 0, {RT, RA, RB}}, {"subco", XO(31,8,1,0), XO_MASK, PPCCOM, 0, {RT, RB, RA}}, {"subfco.", XO(31,8,1,1), XO_MASK, PPCCOM, 0, {RT, RA, RB}}, {"sfo.", XO(31,8,1,1), XO_MASK, PWRCOM, 0, {RT, RA, RB}}, {"subco.", XO(31,8,1,1), XO_MASK, PPCCOM, 0, {RT, RB, RA}}, {"addco", XO(31,10,1,0), XO_MASK, PPCCOM, 0, {RT, RA, RB}}, {"ao", XO(31,10,1,0), XO_MASK, PWRCOM, 0, {RT, RA, RB}}, {"addco.", XO(31,10,1,1), XO_MASK, PPCCOM, 0, {RT, RA, RB}}, {"ao.", XO(31,10,1,1), XO_MASK, PWRCOM, 0, {RT, RA, RB}}, {"lxsspx", X(31,524), XX1_MASK, PPCVSX2, 0, {XT6, RA0, RB}}, {"clcs", X(31,531), XRB_MASK, M601, 0, {RT, RA}}, {"ldbrx", X(31,532), X_MASK, CELL|POWER7|PPCA2, 0, {RT, RA0, RB}}, {"lswx", X(31,533), X_MASK, PPCCOM, E500|E500MC, {RT, RAX, RBX}}, {"lsx", X(31,533), X_MASK, PWRCOM, 0, {RT, RA, RB}}, {"lwbrx", X(31,534), X_MASK, PPCCOM, 0, {RT, RA0, RB}}, {"lbrx", X(31,534), X_MASK, PWRCOM, 0, {RT, RA, RB}}, {"lfsx", X(31,535), X_MASK, COM, PPCEFS, {FRT, RA0, RB}}, {"srw", XRC(31,536,0), X_MASK, PPCCOM, 0, {RA, RS, RB}}, {"sr", XRC(31,536,0), X_MASK, PWRCOM, 0, {RA, RS, RB}}, {"srw.", XRC(31,536,1), X_MASK, PPCCOM, 0, {RA, RS, RB}}, {"sr.", XRC(31,536,1), X_MASK, PWRCOM, 0, {RA, RS, RB}}, {"rrib", XRC(31,537,0), X_MASK, M601, 0, {RA, RS, RB}}, {"rrib.", XRC(31,537,1), X_MASK, M601, 0, {RA, RS, RB}}, {"cnttzw", XRC(31,538,0), XRB_MASK, POWER9, 0, {RA, RS}}, {"cnttzw.", XRC(31,538,1), XRB_MASK, POWER9, 0, {RA, RS}}, {"srd", XRC(31,539,0), X_MASK, PPC64, 0, {RA, RS, RB}}, {"srd.", XRC(31,539,1), X_MASK, PPC64, 0, {RA, RS, RB}}, {"maskir", XRC(31,541,0), X_MASK, M601, 0, {RA, RS, RB}}, {"maskir.", XRC(31,541,1), X_MASK, M601, 0, {RA, RS, RB}}, {"lhdcbx", X(31,546), X_MASK, E200Z4, 0, {RT, RA, RB}}, {"lhdx", X(31,547), X_MASK, E500MC, 0, {RT, RA, RB}}, {"lvtrx", X(31,549), X_MASK, PPCVEC2, 0, {VD, RA0, RB}}, {"bbelr", X(31,550), X_MASK, PPCBRLK, 0, {0}}, {"lvrx", X(31,551), X_MASK, CELL, 0, {VD, RA0, RB}}, {"lhfcmux", APU(31,551,0), APU_MASK, PPC405, 0, {FCRT, RA, RB}}, {"subfo", XO(31,40,1,0), XO_MASK, PPC, 0, {RT, RA, RB}}, {"subo", XO(31,40,1,0), XO_MASK, PPC, 0, {RT, RB, RA}}, {"subfo.", XO(31,40,1,1), XO_MASK, PPC, 0, {RT, RA, RB}}, {"subo.", XO(31,40,1,1), XO_MASK, PPC, 0, {RT, RB, RA}}, {"tlbsync", X(31,566), 0xffffffff, PPC, 0, {0}}, {"lfsux", X(31,567), X_MASK, COM, PPCEFS, {FRT, RAS, RB}}, {"cnttzd", XRC(31,570,0), XRB_MASK, POWER9, 0, {RA, RS}}, {"cnttzd.", XRC(31,570,1), XRB_MASK, POWER9, 0, {RA, RS}}, {"mcrxrx", X(31,576), XBFRARB_MASK, POWER9, 0, {BF}}, {"lwdcbx", X(31,578), X_MASK, E200Z4, 0, {RT, RA, RB}}, {"lwdx", X(31,579), X_MASK, E500MC, 0, {RT, RA, RB}}, {"lvtlx", X(31,581), X_MASK, PPCVEC2, 0, {VD, RA0, RB}}, {"lwat", X(31,582), X_MASK, POWER9, 0, {RT, RA0, FC}}, {"lwfcmux", APU(31,583,0), APU_MASK, PPC405, 0, {FCRT, RA, RB}}, {"lxsdx", X(31,588), XX1_MASK, PPCVSX, 0, {XT6, RA0, RB}}, {"mfsr", X(31,595), XRB_MASK|(1<<20), COM, NON32, {RT, SR}}, {"lswi", X(31,597), X_MASK, PPCCOM, E500|E500MC, {RT, RAX, NBI}}, {"lsi", X(31,597), X_MASK, PWRCOM, 0, {RT, RA0, NB}}, {"hwsync", XSYNC(31,598,0), 0xffffffff, POWER4, BOOKE|PPC476, {0}}, {"lwsync", XSYNC(31,598,1), 0xffffffff, PPC, E500, {0}}, {"ptesync", XSYNC(31,598,2), 0xffffffff, PPC64, 0, {0}}, {"sync", X(31,598), XSYNCLE_MASK, E6500, 0, {LS, ESYNC}}, {"sync", X(31,598), XSYNC_MASK, PPCCOM, BOOKE|PPC476, {LS}}, {"msync", X(31,598), 0xffffffff, BOOKE|PPCA2|PPC476, 0, {0}}, {"sync", X(31,598), 0xffffffff, BOOKE|PPC476, E6500, {0}}, {"lwsync", X(31,598), 0xffffffff, E500, 0, {0}}, {"dcs", X(31,598), 0xffffffff, PWRCOM, 0, {0}}, {"lfdx", X(31,599), X_MASK, COM, PPCEFS, {FRT, RA0, RB}}, {"mffgpr", XRC(31,607,0), XRA_MASK, POWER6, POWER7, {FRT, RB}}, {"lfdepx", X(31,607), X_MASK, E500MC|PPCA2, 0, {FRT, RA0, RB}}, {"lddx", X(31,611), X_MASK, E500MC, 0, {RT, RA, RB}}, {"lvswx", X(31,613), X_MASK, PPCVEC2, 0, {VD, RA0, RB}}, {"ldat", X(31,614), X_MASK, POWER9, 0, {RT, RA0, FC}}, {"lqfcmux", APU(31,615,0), APU_MASK, PPC405, 0, {FCRT, RA, RB}}, {"nego", XO(31,104,1,0), XORB_MASK, COM, 0, {RT, RA}}, {"nego.", XO(31,104,1,1), XORB_MASK, COM, 0, {RT, RA}}, {"mulo", XO(31,107,1,0), XO_MASK, M601, 0, {RT, RA, RB}}, {"mulo.", XO(31,107,1,1), XO_MASK, M601, 0, {RT, RA, RB}}, {"mfsri", X(31,627), X_MASK, M601, 0, {RT, RA, RB}}, {"dclst", X(31,630), XRB_MASK, M601, 0, {RS, RA}}, {"lfdux", X(31,631), X_MASK, COM, PPCEFS, {FRT, RAS, RB}}, {"stbdcbx", X(31,642), X_MASK, E200Z4, 0, {RS, RA, RB}}, {"stbdx", X(31,643), X_MASK, E500MC, 0, {RS, RA, RB}}, {"stvlx", X(31,647), X_MASK, CELL, 0, {VS, RA0, RB}}, {"stbfcmux", APU(31,647,0), APU_MASK, PPC405, 0, {FCRT, RA, RB}}, {"stxsspx", X(31,652), XX1_MASK, PPCVSX2, 0, {XS6, RA0, RB}}, {"tbegin.", XRC(31,654,1), XRTLRARB_MASK, PPCHTM, 0, {HTM_R}}, {"subfeo", XO(31,136,1,0), XO_MASK, PPCCOM, 0, {RT, RA, RB}}, {"sfeo", XO(31,136,1,0), XO_MASK, PWRCOM, 0, {RT, RA, RB}}, {"subfeo.", XO(31,136,1,1), XO_MASK, PPCCOM, 0, {RT, RA, RB}}, {"sfeo.", XO(31,136,1,1), XO_MASK, PWRCOM, 0, {RT, RA, RB}}, {"addeo", XO(31,138,1,0), XO_MASK, PPCCOM, 0, {RT, RA, RB}}, {"aeo", XO(31,138,1,0), XO_MASK, PWRCOM, 0, {RT, RA, RB}}, {"addeo.", XO(31,138,1,1), XO_MASK, PPCCOM, 0, {RT, RA, RB}}, {"aeo.", XO(31,138,1,1), XO_MASK, PWRCOM, 0, {RT, RA, RB}}, {"mfsrin", X(31,659), XRA_MASK, PPC, NON32, {RT, RB}}, {"stdbrx", X(31,660), X_MASK, CELL|POWER7|PPCA2, 0, {RS, RA0, RB}}, {"stswx", X(31,661), X_MASK, PPCCOM, E500|E500MC, {RS, RA0, RB}}, {"stsx", X(31,661), X_MASK, PWRCOM, 0, {RS, RA0, RB}}, {"stwbrx", X(31,662), X_MASK, PPCCOM, 0, {RS, RA0, RB}}, {"stbrx", X(31,662), X_MASK, PWRCOM, 0, {RS, RA0, RB}}, {"stfsx", X(31,663), X_MASK, COM, PPCEFS, {FRS, RA0, RB}}, {"srq", XRC(31,664,0), X_MASK, M601, 0, {RA, RS, RB}}, {"srq.", XRC(31,664,1), X_MASK, M601, 0, {RA, RS, RB}}, {"sre", XRC(31,665,0), X_MASK, M601, 0, {RA, RS, RB}}, {"sre.", XRC(31,665,1), X_MASK, M601, 0, {RA, RS, RB}}, {"sthdcbx", X(31,674), X_MASK, E200Z4, 0, {RS, RA, RB}}, {"sthdx", X(31,675), X_MASK, E500MC, 0, {RS, RA, RB}}, {"stvfrx", X(31,677), X_MASK, PPCVEC2, 0, {VS, RA0, RB}}, {"stvrx", X(31,679), X_MASK, CELL, 0, {VS, RA0, RB}}, {"sthfcmux", APU(31,679,0), APU_MASK, PPC405, 0, {FCRT, RA, RB}}, {"tendall.", XRC(31,686,1)|(1<<25), XRTRARB_MASK, PPCHTM, 0, {0}}, {"tend.", XRC(31,686,1), XRTARARB_MASK, PPCHTM, 0, {HTM_A}}, {"stbcx.", XRC(31,694,1), X_MASK, POWER8|E6500, 0, {RS, RA0, RB}}, {"stfsux", X(31,695), X_MASK, COM, PPCEFS, {FRS, RAS, RB}}, {"sriq", XRC(31,696,0), X_MASK, M601, 0, {RA, RS, SH}}, {"sriq.", XRC(31,696,1), X_MASK, M601, 0, {RA, RS, SH}}, {"stwdcbx", X(31,706), X_MASK, E200Z4, 0, {RS, RA, RB}}, {"stwdx", X(31,707), X_MASK, E500MC, 0, {RS, RA, RB}}, {"stvflx", X(31,709), X_MASK, PPCVEC2, 0, {VS, RA0, RB}}, {"stwat", X(31,710), X_MASK, POWER9, 0, {RS, RA0, FC}}, {"stwfcmux", APU(31,711,0), APU_MASK, PPC405, 0, {FCRT, RA, RB}}, {"stxsdx", X(31,716), XX1_MASK, PPCVSX, 0, {XS6, RA0, RB}}, {"tcheck", X(31,718), XRTBFRARB_MASK, PPCHTM, 0, {BF}}, {"subfzeo", XO(31,200,1,0), XORB_MASK, PPCCOM, 0, {RT, RA}}, {"sfzeo", XO(31,200,1,0), XORB_MASK, PWRCOM, 0, {RT, RA}}, {"subfzeo.", XO(31,200,1,1), XORB_MASK, PPCCOM, 0, {RT, RA}}, {"sfzeo.", XO(31,200,1,1), XORB_MASK, PWRCOM, 0, {RT, RA}}, {"addzeo", XO(31,202,1,0), XORB_MASK, PPCCOM, 0, {RT, RA}}, {"azeo", XO(31,202,1,0), XORB_MASK, PWRCOM, 0, {RT, RA}}, {"addzeo.", XO(31,202,1,1), XORB_MASK, PPCCOM, 0, {RT, RA}}, {"azeo.", XO(31,202,1,1), XORB_MASK, PWRCOM, 0, {RT, RA}}, {"stswi", X(31,725), X_MASK, PPCCOM, E500|E500MC, {RS, RA0, NB}}, {"stsi", X(31,725), X_MASK, PWRCOM, 0, {RS, RA0, NB}}, {"sthcx.", XRC(31,726,1), X_MASK, POWER8|E6500, 0, {RS, RA0, RB}}, {"stfdx", X(31,727), X_MASK, COM, PPCEFS, {FRS, RA0, RB}}, {"srlq", XRC(31,728,0), X_MASK, M601, 0, {RA, RS, RB}}, {"srlq.", XRC(31,728,1), X_MASK, M601, 0, {RA, RS, RB}}, {"sreq", XRC(31,729,0), X_MASK, M601, 0, {RA, RS, RB}}, {"sreq.", XRC(31,729,1), X_MASK, M601, 0, {RA, RS, RB}}, {"mftgpr", XRC(31,735,0), XRA_MASK, POWER6, POWER7, {RT, FRB}}, {"stfdepx", X(31,735), X_MASK, E500MC|PPCA2, 0, {FRS, RA0, RB}}, {"stddx", X(31,739), X_MASK, E500MC, 0, {RS, RA, RB}}, {"stvswx", X(31,741), X_MASK, PPCVEC2, 0, {VS, RA0, RB}}, {"stdat", X(31,742), X_MASK, POWER9, 0, {RS, RA0, FC}}, {"stqfcmux", APU(31,743,0), APU_MASK, PPC405, 0, {FCRT, RA, RB}}, {"subfmeo", XO(31,232,1,0), XORB_MASK, PPCCOM, 0, {RT, RA}}, {"sfmeo", XO(31,232,1,0), XORB_MASK, PWRCOM, 0, {RT, RA}}, {"subfmeo.", XO(31,232,1,1), XORB_MASK, PPCCOM, 0, {RT, RA}}, {"sfmeo.", XO(31,232,1,1), XORB_MASK, PWRCOM, 0, {RT, RA}}, {"mulldo", XO(31,233,1,0), XO_MASK, PPC64, 0, {RT, RA, RB}}, {"mulldo.", XO(31,233,1,1), XO_MASK, PPC64, 0, {RT, RA, RB}}, {"addmeo", XO(31,234,1,0), XORB_MASK, PPCCOM, 0, {RT, RA}}, {"ameo", XO(31,234,1,0), XORB_MASK, PWRCOM, 0, {RT, RA}}, {"addmeo.", XO(31,234,1,1), XORB_MASK, PPCCOM, 0, {RT, RA}}, {"ameo.", XO(31,234,1,1), XORB_MASK, PWRCOM, 0, {RT, RA}}, {"mullwo", XO(31,235,1,0), XO_MASK, PPCCOM, 0, {RT, RA, RB}}, {"mulso", XO(31,235,1,0), XO_MASK, PWRCOM, 0, {RT, RA, RB}}, {"mullwo.", XO(31,235,1,1), XO_MASK, PPCCOM, 0, {RT, RA, RB}}, {"mulso.", XO(31,235,1,1), XO_MASK, PWRCOM, 0, {RT, RA, RB}}, {"tsuspend.", XRCL(31,750,0,1), XRTRARB_MASK,PPCHTM, 0, {0}}, {"tresume.", XRCL(31,750,1,1), XRTRARB_MASK,PPCHTM, 0, {0}}, {"tsr.", XRC(31,750,1), XRTLRARB_MASK,PPCHTM, 0, {L}}, {"darn", X(31,755), XLRAND_MASK, POWER9, 0, {RT, LRAND}}, {"dcba", X(31,758), XRT_MASK, PPC405|PPC7450|BOOKE|PPCA2|PPC476, 0, {RA0, RB}}, {"dcbal", XOPL(31,758,1), XRT_MASK, E500MC, 0, {RA0, RB}}, {"stfdux", X(31,759), X_MASK, COM, PPCEFS, {FRS, RAS, RB}}, {"srliq", XRC(31,760,0), X_MASK, M601, 0, {RA, RS, SH}}, {"srliq.", XRC(31,760,1), X_MASK, M601, 0, {RA, RS, SH}}, {"lvsm", X(31,773), X_MASK, PPCVEC2, 0, {VD, RA0, RB}}, {"copy", XOPL(31,774,1), XRT_MASK, POWER9, 0, {RA0, RB}}, {"stvepxl", X(31,775), X_MASK, PPCVEC2, 0, {VS, RA0, RB}}, {"lvlxl", X(31,775), X_MASK, CELL, 0, {VD, RA0, RB}}, {"ldfcmux", APU(31,775,0), APU_MASK, PPC405, 0, {FCRT, RA, RB}}, {"dozo", XO(31,264,1,0), XO_MASK, M601, 0, {RT, RA, RB}}, {"dozo.", XO(31,264,1,1), XO_MASK, M601, 0, {RT, RA, RB}}, {"addo", XO(31,266,1,0), XO_MASK, PPCCOM, 0, {RT, RA, RB}}, {"caxo", XO(31,266,1,0), XO_MASK, PWRCOM, 0, {RT, RA, RB}}, {"addo.", XO(31,266,1,1), XO_MASK, PPCCOM, 0, {RT, RA, RB}}, {"caxo.", XO(31,266,1,1), XO_MASK, PWRCOM, 0, {RT, RA, RB}}, {"modsd", X(31,777), X_MASK, POWER9, 0, {RT, RA, RB}}, {"modsw", X(31,779), X_MASK, POWER9, 0, {RT, RA, RB}}, {"lxvw4x", X(31,780), XX1_MASK, PPCVSX, 0, {XT6, RA0, RB}}, {"lxsibzx", X(31,781), XX1_MASK, PPCVSX3, 0, {XT6, RA0, RB}}, {"tabortwc.", XRC(31,782,1), X_MASK, PPCHTM, 0, {TO, RA, RB}}, {"tlbivax", X(31,786), XRT_MASK, BOOKE|PPCA2|PPC476, 0, {RA0, RB}}, {"lwzcix", X(31,789), X_MASK, POWER6, 0, {RT, RA0, RB}}, {"lhbrx", X(31,790), X_MASK, COM, 0, {RT, RA0, RB}}, {"lfdpx", X(31,791), X_MASK, POWER6, POWER7, {FRTp, RA0, RB}}, {"lfqx", X(31,791), X_MASK, POWER2, 0, {FRT, RA, RB}}, {"sraw", XRC(31,792,0), X_MASK, PPCCOM, 0, {RA, RS, RB}}, {"sra", XRC(31,792,0), X_MASK, PWRCOM, 0, {RA, RS, RB}}, {"sraw.", XRC(31,792,1), X_MASK, PPCCOM, 0, {RA, RS, RB}}, {"sra.", XRC(31,792,1), X_MASK, PWRCOM, 0, {RA, RS, RB}}, {"srad", XRC(31,794,0), X_MASK, PPC64, 0, {RA, RS, RB}}, {"srad.", XRC(31,794,1), X_MASK, PPC64, 0, {RA, RS, RB}}, {"lfddx", X(31,803), X_MASK, E500MC, 0, {FRT, RA, RB}}, {"lvtrxl", X(31,805), X_MASK, PPCVEC2, 0, {VD, RA0, RB}}, {"stvepx", X(31,807), X_MASK, PPCVEC2, 0, {VS, RA0, RB}}, {"lvrxl", X(31,807), X_MASK, CELL, 0, {VD, RA0, RB}}, {"lxvh8x", X(31,812), XX1_MASK, PPCVSX3, 0, {XT6, RA0, RB}}, {"lxsihzx", X(31,813), XX1_MASK, PPCVSX3, 0, {XT6, RA0, RB}}, {"tabortdc.", XRC(31,814,1), X_MASK, PPCHTM, 0, {TO, RA, RB}}, {"rac", X(31,818), X_MASK, M601, 0, {RT, RA, RB}}, {"erativax", X(31,819), X_MASK, PPCA2, 0, {RS, RA0, RB}}, {"lhzcix", X(31,821), X_MASK, POWER6, 0, {RT, RA0, RB}}, {"dss", XDSS(31,822,0), XDSS_MASK, PPCVEC, 0, {STRM}}, {"lfqux", X(31,823), X_MASK, POWER2, 0, {FRT, RA, RB}}, {"srawi", XRC(31,824,0), X_MASK, PPCCOM, 0, {RA, RS, SH}}, {"srai", XRC(31,824,0), X_MASK, PWRCOM, 0, {RA, RS, SH}}, {"srawi.", XRC(31,824,1), X_MASK, PPCCOM, 0, {RA, RS, SH}}, {"srai.", XRC(31,824,1), X_MASK, PWRCOM, 0, {RA, RS, SH}}, {"sradi", XS(31,413,0), XS_MASK, PPC64, 0, {RA, RS, SH6}}, {"sradi.", XS(31,413,1), XS_MASK, PPC64, 0, {RA, RS, SH6}}, {"lvtlxl", X(31,837), X_MASK, PPCVEC2, 0, {VD, RA0, RB}}, {"cpabort", X(31,838), XRTRARB_MASK,POWER9, 0, {0}}, {"divo", XO(31,331,1,0), XO_MASK, M601, 0, {RT, RA, RB}}, {"divo.", XO(31,331,1,1), XO_MASK, M601, 0, {RT, RA, RB}}, {"lxvd2x", X(31,844), XX1_MASK, PPCVSX, 0, {XT6, RA0, RB}}, {"lxvx", X(31,844), XX1_MASK, POWER8, POWER9|PPCVSX3, {XT6, RA0, RB}}, {"tabortwci.", XRC(31,846,1), X_MASK, PPCHTM, 0, {TO, RA, HTM_SI}}, {"tlbsrx.", XRC(31,850,1), XRT_MASK, PPCA2, 0, {RA0, RB}}, {"slbiag", X(31,850), XRARB_MASK, POWER9, 0, {RS}}, {"slbmfev", X(31,851), XRLA_MASK, POWER9, 0, {RT, RB, A_L}}, {"slbmfev", X(31,851), XRA_MASK, PPC64, POWER9, {RT, RB}}, {"lbzcix", X(31,853), X_MASK, POWER6, 0, {RT, RA0, RB}}, {"eieio", X(31,854), 0xffffffff, PPC, BOOKE|PPCA2|PPC476, {0}}, {"mbar", X(31,854), X_MASK, BOOKE|PPCA2|PPC476, 0, {MO}}, {"eieio", XMBAR(31,854,1),0xffffffff, E500, 0, {0}}, {"eieio", X(31,854), 0xffffffff, PPCA2|PPC476, 0, {0}}, {"lfiwax", X(31,855), X_MASK, POWER6|PPCA2|PPC476, 0, {FRT, RA0, RB}}, {"lvswxl", X(31,869), X_MASK, PPCVEC2, 0, {VD, RA0, RB}}, {"abso", XO(31,360,1,0), XORB_MASK, M601, 0, {RT, RA}}, {"abso.", XO(31,360,1,1), XORB_MASK, M601, 0, {RT, RA}}, {"divso", XO(31,363,1,0), XO_MASK, M601, 0, {RT, RA, RB}}, {"divso.", XO(31,363,1,1), XO_MASK, M601, 0, {RT, RA, RB}}, {"lxvb16x", X(31,876), XX1_MASK, PPCVSX3, 0, {XT6, RA0, RB}}, {"tabortdci.", XRC(31,878,1), X_MASK, PPCHTM, 0, {TO, RA, HTM_SI}}, {"rmieg", X(31,882), XRTRA_MASK, POWER9, 0, {RB}}, {"ldcix", X(31,885), X_MASK, POWER6, 0, {RT, RA0, RB}}, {"msgsync", X(31,886), 0xffffffff, POWER9, 0, {0}}, {"lfiwzx", X(31,887), X_MASK, POWER7|PPCA2, 0, {FRT, RA0, RB}}, {"extswsli", XS(31,445,0), XS_MASK, POWER9, 0, {RA, RS, SH6}}, {"extswsli.", XS(31,445,1), XS_MASK, POWER9, 0, {RA, RS, SH6}}, {"paste.", XRCL(31,902,1,1),XRT_MASK, POWER9, 0, {RA0, RB}}, {"stvlxl", X(31,903), X_MASK, CELL, 0, {VS, RA0, RB}}, {"stdfcmux", APU(31,903,0), APU_MASK, PPC405, 0, {FCRT, RA, RB}}, {"divdeuo", XO(31,393,1,0), XO_MASK, POWER7|PPCA2, 0, {RT, RA, RB}}, {"divdeuo.", XO(31,393,1,1), XO_MASK, POWER7|PPCA2, 0, {RT, RA, RB}}, {"divweuo", XO(31,395,1,0), XO_MASK, POWER7|PPCA2, 0, {RT, RA, RB}}, {"divweuo.", XO(31,395,1,1), XO_MASK, POWER7|PPCA2, 0, {RT, RA, RB}}, {"stxvw4x", X(31,908), XX1_MASK, PPCVSX, 0, {XS6, RA0, RB}}, {"stxsibx", X(31,909), XX1_MASK, PPCVSX3, 0, {XS6, RA0, RB}}, {"tabort.", XRC(31,910,1), XRTRB_MASK, PPCHTM, 0, {RA}}, {"tlbsx", XRC(31,914,0), X_MASK, PPC403|BOOKE|PPCA2|PPC476, 0, {RTO, RA0, RB}}, {"tlbsx.", XRC(31,914,1), X_MASK, PPC403|BOOKE|PPCA2|PPC476, 0, {RTO, RA0, RB}}, {"slbmfee", X(31,915), XRLA_MASK, POWER9, 0, {RT, RB, A_L}}, {"slbmfee", X(31,915), XRA_MASK, PPC64, POWER9, {RT, RB}}, {"stwcix", X(31,917), X_MASK, POWER6, 0, {RS, RA0, RB}}, {"sthbrx", X(31,918), X_MASK, COM, 0, {RS, RA0, RB}}, {"stfdpx", X(31,919), X_MASK, POWER6, POWER7, {FRSp, RA0, RB}}, {"stfqx", X(31,919), X_MASK, POWER2, 0, {FRS, RA0, RB}}, {"sraq", XRC(31,920,0), X_MASK, M601, 0, {RA, RS, RB}}, {"sraq.", XRC(31,920,1), X_MASK, M601, 0, {RA, RS, RB}}, {"srea", XRC(31,921,0), X_MASK, M601, 0, {RA, RS, RB}}, {"srea.", XRC(31,921,1), X_MASK, M601, 0, {RA, RS, RB}}, {"extsh", XRC(31,922,0), XRB_MASK, PPCCOM, 0, {RA, RS}}, {"exts", XRC(31,922,0), XRB_MASK, PWRCOM, 0, {RA, RS}}, {"extsh.", XRC(31,922,1), XRB_MASK, PPCCOM, 0, {RA, RS}}, {"exts.", XRC(31,922,1), XRB_MASK, PWRCOM, 0, {RA, RS}}, {"stfddx", X(31,931), X_MASK, E500MC, 0, {FRS, RA, RB}}, {"stvfrxl", X(31,933), X_MASK, PPCVEC2, 0, {VS, RA0, RB}}, {"wclrone", XOPL2(31,934,2),XRT_MASK, PPCA2, 0, {RA0, RB}}, {"wclrall", X(31,934), XRARB_MASK, PPCA2, 0, {L2}}, {"wclr", X(31,934), X_MASK, PPCA2, 0, {L2, RA0, RB}}, {"stvrxl", X(31,935), X_MASK, CELL, 0, {VS, RA0, RB}}, {"divdeo", XO(31,425,1,0), XO_MASK, POWER7|PPCA2, 0, {RT, RA, RB}}, {"divdeo.", XO(31,425,1,1), XO_MASK, POWER7|PPCA2, 0, {RT, RA, RB}}, {"divweo", XO(31,427,1,0), XO_MASK, POWER7|PPCA2, 0, {RT, RA, RB}}, {"divweo.", XO(31,427,1,1), XO_MASK, POWER7|PPCA2, 0, {RT, RA, RB}}, {"stxvh8x", X(31,940), XX1_MASK, PPCVSX3, 0, {XS6, RA0, RB}}, {"stxsihx", X(31,941), XX1_MASK, PPCVSX3, 0, {XS6, RA0, RB}}, {"treclaim.", XRC(31,942,1), XRTRB_MASK, PPCHTM, 0, {RA}}, {"tlbrehi", XTLB(31,946,0), XTLB_MASK, PPC403, PPCA2, {RT, RA}}, {"tlbrelo", XTLB(31,946,1), XTLB_MASK, PPC403, PPCA2, {RT, RA}}, {"tlbre", X(31,946), X_MASK, PPC403|BOOKE|PPCA2|PPC476, 0, {RSO, RAOPT, SHO}}, {"sthcix", X(31,949), X_MASK, POWER6, 0, {RS, RA0, RB}}, {"icswepx", XRC(31,950,0), X_MASK, PPCA2, 0, {RS, RA, RB}}, {"icswepx.", XRC(31,950,1), X_MASK, PPCA2, 0, {RS, RA, RB}}, {"stfqux", X(31,951), X_MASK, POWER2, 0, {FRS, RA, RB}}, {"sraiq", XRC(31,952,0), X_MASK, M601, 0, {RA, RS, SH}}, {"sraiq.", XRC(31,952,1), X_MASK, M601, 0, {RA, RS, SH}}, {"extsb", XRC(31,954,0), XRB_MASK, PPC, 0, {RA, RS}}, {"extsb.", XRC(31,954,1), XRB_MASK, PPC, 0, {RA, RS}}, {"stvflxl", X(31,965), X_MASK, PPCVEC2, 0, {VS, RA0, RB}}, {"iccci", X(31,966), XRT_MASK, PPC403|PPC440|TITAN|PPCA2, 0, {RAOPT, RBOPT}}, {"ici", X(31,966), XRARB_MASK, PPCA2|PPC476, 0, {CT}}, {"divduo", XO(31,457,1,0), XO_MASK, PPC64, 0, {RT, RA, RB}}, {"divduo.", XO(31,457,1,1), XO_MASK, PPC64, 0, {RT, RA, RB}}, {"divwuo", XO(31,459,1,0), XO_MASK, PPC, 0, {RT, RA, RB}}, {"divwuo.", XO(31,459,1,1), XO_MASK, PPC, 0, {RT, RA, RB}}, {"stxvd2x", X(31,972), XX1_MASK, PPCVSX, 0, {XS6, RA0, RB}}, {"stxvx", X(31,972), XX1_MASK, POWER8, POWER9|PPCVSX3, {XS6, RA0, RB}}, {"tlbld", X(31,978), XRTRA_MASK, PPC, PPC403|BOOKE|PPCA2|PPC476, {RB}}, {"tlbwehi", XTLB(31,978,0), XTLB_MASK, PPC403, 0, {RT, RA}}, {"tlbwelo", XTLB(31,978,1), XTLB_MASK, PPC403, 0, {RT, RA}}, {"tlbwe", X(31,978), X_MASK, PPC403|BOOKE|PPCA2|PPC476, 0, {RSO, RAOPT, SHO}}, {"slbfee.", XRC(31,979,1), XRA_MASK, POWER6, 0, {RT, RB}}, {"stbcix", X(31,981), X_MASK, POWER6, 0, {RS, RA0, RB}}, {"icbi", X(31,982), XRT_MASK, PPC, 0, {RA0, RB}}, {"stfiwx", X(31,983), X_MASK, PPC, PPCEFS, {FRS, RA0, RB}}, {"extsw", XRC(31,986,0), XRB_MASK, PPC64, 0, {RA, RS}}, {"extsw.", XRC(31,986,1), XRB_MASK, PPC64, 0, {RA, RS}}, {"icbiep", XRT(31,991,0), XRT_MASK, E500MC|PPCA2, 0, {RA0, RB}}, {"stvswxl", X(31,997), X_MASK, PPCVEC2, 0, {VS, RA0, RB}}, {"icread", X(31,998), XRT_MASK, PPC403|PPC440|PPC476|TITAN, 0, {RA0, RB}}, {"nabso", XO(31,488,1,0), XORB_MASK, M601, 0, {RT, RA}}, {"nabso.", XO(31,488,1,1), XORB_MASK, M601, 0, {RT, RA}}, {"divdo", XO(31,489,1,0), XO_MASK, PPC64, 0, {RT, RA, RB}}, {"divdo.", XO(31,489,1,1), XO_MASK, PPC64, 0, {RT, RA, RB}}, {"divwo", XO(31,491,1,0), XO_MASK, PPC, 0, {RT, RA, RB}}, {"divwo.", XO(31,491,1,1), XO_MASK, PPC, 0, {RT, RA, RB}}, {"stxvb16x", X(31,1004), XX1_MASK, PPCVSX3, 0, {XS6, RA0, RB}}, {"trechkpt.", XRC(31,1006,1), XRTRARB_MASK,PPCHTM, 0, {0}}, {"tlbli", X(31,1010), XRTRA_MASK, PPC, TITAN, {RB}}, {"stdcix", X(31,1013), X_MASK, POWER6, 0, {RS, RA0, RB}}, {"dcbz", X(31,1014), XRT_MASK, PPC, 0, {RA0, RB}}, {"dclz", X(31,1014), XRT_MASK, PPC, 0, {RA0, RB}}, {"dcbzep", XRT(31,1023,0), XRT_MASK, E500MC|PPCA2, 0, {RA0, RB}}, {"dcbzl", XOPL(31,1014,1), XRT_MASK, POWER4|E500MC, PPC476, {RA0, RB}}, {"cctpl", 0x7c210b78, 0xffffffff, CELL, 0, {0}}, {"cctpm", 0x7c421378, 0xffffffff, CELL, 0, {0}}, {"cctph", 0x7c631b78, 0xffffffff, CELL, 0, {0}}, {"dstt", XDSS(31,342,1), XDSS_MASK, PPCVEC, 0, {RA, RB, STRM}}, {"dststt", XDSS(31,374,1), XDSS_MASK, PPCVEC, 0, {RA, RB, STRM}}, {"dssall", XDSS(31,822,1), XDSS_MASK, PPCVEC, 0, {0}}, {"db8cyc", 0x7f9ce378, 0xffffffff, CELL, 0, {0}}, {"db10cyc", 0x7fbdeb78, 0xffffffff, CELL, 0, {0}}, {"db12cyc", 0x7fdef378, 0xffffffff, CELL, 0, {0}}, {"db16cyc", 0x7ffffb78, 0xffffffff, CELL, 0, {0}}, {"lwz", OP(32), OP_MASK, PPCCOM, PPCVLE, {RT, D, RA0}}, {"l", OP(32), OP_MASK, PWRCOM, PPCVLE, {RT, D, RA0}}, {"lwzu", OP(33), OP_MASK, PPCCOM, PPCVLE, {RT, D, RAL}}, {"lu", OP(33), OP_MASK, PWRCOM, PPCVLE, {RT, D, RA0}}, {"lbz", OP(34), OP_MASK, COM, PPCVLE, {RT, D, RA0}}, {"lbzu", OP(35), OP_MASK, COM, PPCVLE, {RT, D, RAL}}, {"stw", OP(36), OP_MASK, PPCCOM, PPCVLE, {RS, D, RA0}}, {"st", OP(36), OP_MASK, PWRCOM, PPCVLE, {RS, D, RA0}}, {"stwu", OP(37), OP_MASK, PPCCOM, PPCVLE, {RS, D, RAS}}, {"stu", OP(37), OP_MASK, PWRCOM, PPCVLE, {RS, D, RA0}}, {"stb", OP(38), OP_MASK, COM, PPCVLE, {RS, D, RA0}}, {"stbu", OP(39), OP_MASK, COM, PPCVLE, {RS, D, RAS}}, {"lhz", OP(40), OP_MASK, COM, PPCVLE, {RT, D, RA0}}, {"lhzu", OP(41), OP_MASK, COM, PPCVLE, {RT, D, RAL}}, {"lha", OP(42), OP_MASK, COM, PPCVLE, {RT, D, RA0}}, {"lhau", OP(43), OP_MASK, COM, PPCVLE, {RT, D, RAL}}, {"sth", OP(44), OP_MASK, COM, PPCVLE, {RS, D, RA0}}, {"sthu", OP(45), OP_MASK, COM, PPCVLE, {RS, D, RAS}}, {"lmw", OP(46), OP_MASK, PPCCOM, PPCVLE, {RT, D, RAM}}, {"lm", OP(46), OP_MASK, PWRCOM, PPCVLE, {RT, D, RA0}}, {"stmw", OP(47), OP_MASK, PPCCOM, PPCVLE, {RS, D, RA0}}, {"stm", OP(47), OP_MASK, PWRCOM, PPCVLE, {RS, D, RA0}}, {"lfs", OP(48), OP_MASK, COM, PPCEFS|PPCVLE, {FRT, D, RA0}}, {"lfsu", OP(49), OP_MASK, COM, PPCEFS|PPCVLE, {FRT, D, RAS}}, {"lfd", OP(50), OP_MASK, COM, PPCEFS|PPCVLE, {FRT, D, RA0}}, {"lfdu", OP(51), OP_MASK, COM, PPCEFS|PPCVLE, {FRT, D, RAS}}, {"stfs", OP(52), OP_MASK, COM, PPCEFS|PPCVLE, {FRS, D, RA0}}, {"stfsu", OP(53), OP_MASK, COM, PPCEFS|PPCVLE, {FRS, D, RAS}}, {"stfd", OP(54), OP_MASK, COM, PPCEFS|PPCVLE, {FRS, D, RA0}}, {"stfdu", OP(55), OP_MASK, COM, PPCEFS|PPCVLE, {FRS, D, RAS}}, {"lq", OP(56), OP_MASK, POWER4, PPC476|PPCVLE, {RTQ, DQ, RAQ}}, {"psq_l", OP(56), OP_MASK, PPCPS, PPCVLE, {FRT,PSD,RA,PSW,PSQ}}, {"lfq", OP(56), OP_MASK, POWER2, PPCVLE, {FRT, D, RA0}}, {"lxsd", DSO(57,2), DS_MASK, PPCVSX3, PPCVLE, {VD, DS, RA0}}, {"lxssp", DSO(57,3), DS_MASK, PPCVSX3, PPCVLE, {VD, DS, RA0}}, {"lfdp", OP(57), OP_MASK, POWER6, POWER7|PPCVLE, {FRTp, DS, RA0}}, {"psq_lu", OP(57), OP_MASK, PPCPS, PPCVLE, {FRT,PSD,RA,PSW,PSQ}}, {"lfqu", OP(57), OP_MASK, POWER2, PPCVLE, {FRT, D, RA0}}, {"ld", DSO(58,0), DS_MASK, PPC64, PPCVLE, {RT, DS, RA0}}, {"ldu", DSO(58,1), DS_MASK, PPC64, PPCVLE, {RT, DS, RAL}}, {"lwa", DSO(58,2), DS_MASK, PPC64, PPCVLE, {RT, DS, RA0}}, {"dadd", XRC(59,2,0), X_MASK, POWER6, PPCVLE, {FRT, FRA, FRB}}, {"dadd.", XRC(59,2,1), X_MASK, POWER6, PPCVLE, {FRT, FRA, FRB}}, {"dqua", ZRC(59,3,0), Z2_MASK, POWER6, PPCVLE, {FRT,FRA,FRB,RMC}}, {"dqua.", ZRC(59,3,1), Z2_MASK, POWER6, PPCVLE, {FRT,FRA,FRB,RMC}}, {"fdivs", A(59,18,0), AFRC_MASK, PPC, PPCEFS|PPCVLE, {FRT, FRA, FRB}}, {"fdivs.", A(59,18,1), AFRC_MASK, PPC, PPCEFS|PPCVLE, {FRT, FRA, FRB}}, {"fsubs", A(59,20,0), AFRC_MASK, PPC, PPCEFS|PPCVLE, {FRT, FRA, FRB}}, {"fsubs.", A(59,20,1), AFRC_MASK, PPC, PPCEFS|PPCVLE, {FRT, FRA, FRB}}, {"fadds", A(59,21,0), AFRC_MASK, PPC, PPCEFS|PPCVLE, {FRT, FRA, FRB}}, {"fadds.", A(59,21,1), AFRC_MASK, PPC, PPCEFS|PPCVLE, {FRT, FRA, FRB}}, {"fsqrts", A(59,22,0), AFRAFRC_MASK, PPC, TITAN|PPCVLE, {FRT, FRB}}, {"fsqrts.", A(59,22,1), AFRAFRC_MASK, PPC, TITAN|PPCVLE, {FRT, FRB}}, {"fres", A(59,24,0), AFRAFRC_MASK, POWER7, PPCVLE, {FRT, FRB}}, {"fres", A(59,24,0), AFRALFRC_MASK, PPC, POWER7|PPCVLE, {FRT, FRB, A_L}}, {"fres.", A(59,24,1), AFRAFRC_MASK, POWER7, PPCVLE, {FRT, FRB}}, {"fres.", A(59,24,1), AFRALFRC_MASK, PPC, POWER7|PPCVLE, {FRT, FRB, A_L}}, {"fmuls", A(59,25,0), AFRB_MASK, PPC, PPCEFS|PPCVLE, {FRT, FRA, FRC}}, {"fmuls.", A(59,25,1), AFRB_MASK, PPC, PPCEFS|PPCVLE, {FRT, FRA, FRC}}, {"frsqrtes", A(59,26,0), AFRAFRC_MASK, POWER7, PPCVLE, {FRT, FRB}}, {"frsqrtes", A(59,26,0), AFRALFRC_MASK, POWER5, POWER7|PPCVLE, {FRT, FRB, A_L}}, {"frsqrtes.", A(59,26,1), AFRAFRC_MASK, POWER7, PPCVLE, {FRT, FRB}}, {"frsqrtes.", A(59,26,1), AFRALFRC_MASK, POWER5, POWER7|PPCVLE, {FRT, FRB, A_L}}, {"fmsubs", A(59,28,0), A_MASK, PPC, PPCEFS|PPCVLE, {FRT, FRA, FRC, FRB}}, {"fmsubs.", A(59,28,1), A_MASK, PPC, PPCEFS|PPCVLE, {FRT, FRA, FRC, FRB}}, {"fmadds", A(59,29,0), A_MASK, PPC, PPCEFS|PPCVLE, {FRT, FRA, FRC, FRB}}, {"fmadds.", A(59,29,1), A_MASK, PPC, PPCEFS|PPCVLE, {FRT, FRA, FRC, FRB}}, {"fnmsubs", A(59,30,0), A_MASK, PPC, PPCEFS|PPCVLE, {FRT, FRA, FRC, FRB}}, {"fnmsubs.", A(59,30,1), A_MASK, PPC, PPCEFS|PPCVLE, {FRT, FRA, FRC, FRB}}, {"fnmadds", A(59,31,0), A_MASK, PPC, PPCEFS|PPCVLE, {FRT, FRA, FRC, FRB}}, {"fnmadds.", A(59,31,1), A_MASK, PPC, PPCEFS|PPCVLE, {FRT, FRA, FRC, FRB}}, {"dmul", XRC(59,34,0), X_MASK, POWER6, PPCVLE, {FRT, FRA, FRB}}, {"dmul.", XRC(59,34,1), X_MASK, POWER6, PPCVLE, {FRT, FRA, FRB}}, {"drrnd", ZRC(59,35,0), Z2_MASK, POWER6, PPCVLE, {FRT, FRA, FRB, RMC}}, {"drrnd.", ZRC(59,35,1), Z2_MASK, POWER6, PPCVLE, {FRT, FRA, FRB, RMC}}, {"dscli", ZRC(59,66,0), Z_MASK, POWER6, PPCVLE, {FRT, FRA, SH16}}, {"dscli.", ZRC(59,66,1), Z_MASK, POWER6, PPCVLE, {FRT, FRA, SH16}}, {"dquai", ZRC(59,67,0), Z2_MASK, POWER6, PPCVLE, {TE, FRT,FRB,RMC}}, {"dquai.", ZRC(59,67,1), Z2_MASK, POWER6, PPCVLE, {TE, FRT,FRB,RMC}}, {"dscri", ZRC(59,98,0), Z_MASK, POWER6, PPCVLE, {FRT, FRA, SH16}}, {"dscri.", ZRC(59,98,1), Z_MASK, POWER6, PPCVLE, {FRT, FRA, SH16}}, {"drintx", ZRC(59,99,0), Z2_MASK, POWER6, PPCVLE, {R, FRT, FRB, RMC}}, {"drintx.", ZRC(59,99,1), Z2_MASK, POWER6, PPCVLE, {R, FRT, FRB, RMC}}, {"dcmpo", X(59,130), X_MASK, POWER6, PPCVLE, {BF, FRA, FRB}}, {"dtstex", X(59,162), X_MASK, POWER6, PPCVLE, {BF, FRA, FRB}}, {"dtstdc", Z(59,194), Z_MASK, POWER6, PPCVLE, {BF, FRA, DCM}}, {"dtstdg", Z(59,226), Z_MASK, POWER6, PPCVLE, {BF, FRA, DGM}}, {"drintn", ZRC(59,227,0), Z2_MASK, POWER6, PPCVLE, {R, FRT, FRB, RMC}}, {"drintn.", ZRC(59,227,1), Z2_MASK, POWER6, PPCVLE, {R, FRT, FRB, RMC}}, {"dctdp", XRC(59,258,0), X_MASK, POWER6, PPCVLE, {FRT, FRB}}, {"dctdp.", XRC(59,258,1), X_MASK, POWER6, PPCVLE, {FRT, FRB}}, {"dctfix", XRC(59,290,0), X_MASK, POWER6, PPCVLE, {FRT, FRB}}, {"dctfix.", XRC(59,290,1), X_MASK, POWER6, PPCVLE, {FRT, FRB}}, {"ddedpd", XRC(59,322,0), X_MASK, POWER6, PPCVLE, {SP, FRT, FRB}}, {"ddedpd.", XRC(59,322,1), X_MASK, POWER6, PPCVLE, {SP, FRT, FRB}}, {"dxex", XRC(59,354,0), X_MASK, POWER6, PPCVLE, {FRT, FRB}}, {"dxex.", XRC(59,354,1), X_MASK, POWER6, PPCVLE, {FRT, FRB}}, {"dsub", XRC(59,514,0), X_MASK, POWER6, PPCVLE, {FRT, FRA, FRB}}, {"dsub.", XRC(59,514,1), X_MASK, POWER6, PPCVLE, {FRT, FRA, FRB}}, {"ddiv", XRC(59,546,0), X_MASK, POWER6, PPCVLE, {FRT, FRA, FRB}}, {"ddiv.", XRC(59,546,1), X_MASK, POWER6, PPCVLE, {FRT, FRA, FRB}}, {"dcmpu", X(59,642), X_MASK, POWER6, PPCVLE, {BF, FRA, FRB}}, {"dtstsf", X(59,674), X_MASK, POWER6, PPCVLE, {BF, FRA, FRB}}, {"dtstsfi", X(59,675), X_MASK|1<<22,POWER9, PPCVLE, {BF, UIM6, FRB}}, {"drsp", XRC(59,770,0), X_MASK, POWER6, PPCVLE, {FRT, FRB}}, {"drsp.", XRC(59,770,1), X_MASK, POWER6, PPCVLE, {FRT, FRB}}, {"dcffix", XRC(59,802,0), X_MASK|FRA_MASK, POWER7, PPCVLE, {FRT, FRB}}, {"dcffix.", XRC(59,802,1), X_MASK|FRA_MASK, POWER7, PPCVLE, {FRT, FRB}}, {"denbcd", XRC(59,834,0), X_MASK, POWER6, PPCVLE, {S, FRT, FRB}}, {"denbcd.", XRC(59,834,1), X_MASK, POWER6, PPCVLE, {S, FRT, FRB}}, {"fcfids", XRC(59,846,0), XRA_MASK, POWER7|PPCA2, PPCVLE, {FRT, FRB}}, {"fcfids.", XRC(59,846,1), XRA_MASK, POWER7|PPCA2, PPCVLE, {FRT, FRB}}, {"diex", XRC(59,866,0), X_MASK, POWER6, PPCVLE, {FRT, FRA, FRB}}, {"diex.", XRC(59,866,1), X_MASK, POWER6, PPCVLE, {FRT, FRA, FRB}}, {"fcfidus", XRC(59,974,0), XRA_MASK, POWER7|PPCA2, PPCVLE, {FRT, FRB}}, {"fcfidus.", XRC(59,974,1), XRA_MASK, POWER7|PPCA2, PPCVLE, {FRT, FRB}}, {"xsaddsp", XX3(60,0), XX3_MASK, PPCVSX2, PPCVLE, {XT6, XA6, XB6}}, {"xsmaddasp", XX3(60,1), XX3_MASK, PPCVSX2, PPCVLE, {XT6, XA6, XB6}}, {"xxsldwi", XX3(60,2), XX3SHW_MASK, PPCVSX, PPCVLE, {XT6, XA6, XB6, SHW}}, {"xscmpeqdp", XX3(60,3), XX3_MASK, PPCVSX3, PPCVLE, {XT6, XA6, XB6}}, {"xsrsqrtesp", XX2(60,10), XX2_MASK, PPCVSX2, PPCVLE, {XT6, XB6}}, {"xssqrtsp", XX2(60,11), XX2_MASK, PPCVSX2, PPCVLE, {XT6, XB6}}, {"xxsel", XX4(60,3), XX4_MASK, PPCVSX, PPCVLE, {XT6, XA6, XB6, XC6}}, {"xssubsp", XX3(60,8), XX3_MASK, PPCVSX2, PPCVLE, {XT6, XA6, XB6}}, {"xsmaddmsp", XX3(60,9), XX3_MASK, PPCVSX2, PPCVLE, {XT6, XA6, XB6}}, {"xxspltd", XX3(60,10), XX3DM_MASK, PPCVSX, PPCVLE, {XT6, XA6, XB6S, DMEX}}, {"xxmrghd", XX3(60,10), XX3_MASK, PPCVSX, PPCVLE, {XT6, XA6, XB6}}, {"xxswapd", XX3(60,10)|(2<<8), XX3_MASK, PPCVSX, PPCVLE, {XT6, XA6, XB6S}}, {"xxmrgld", XX3(60,10)|(3<<8), XX3_MASK, PPCVSX, PPCVLE, {XT6, XA6, XB6}}, {"xxpermdi", XX3(60,10), XX3DM_MASK, PPCVSX, PPCVLE, {XT6, XA6, XB6, DM}}, {"xscmpgtdp", XX3(60,11), XX3_MASK, PPCVSX3, PPCVLE, {XT6, XA6, XB6}}, {"xsresp", XX2(60,26), XX2_MASK, PPCVSX2, PPCVLE, {XT6, XB6}}, {"xsmulsp", XX3(60,16), XX3_MASK, PPCVSX2, PPCVLE, {XT6, XA6, XB6}}, {"xsmsubasp", XX3(60,17), XX3_MASK, PPCVSX2, PPCVLE, {XT6, XA6, XB6}}, {"xxmrghw", XX3(60,18), XX3_MASK, PPCVSX, PPCVLE, {XT6, XA6, XB6}}, {"xscmpgedp", XX3(60,19), XX3_MASK, PPCVSX3, PPCVLE, {XT6, XA6, XB6}}, {"xsdivsp", XX3(60,24), XX3_MASK, PPCVSX2, PPCVLE, {XT6, XA6, XB6}}, {"xsmsubmsp", XX3(60,25), XX3_MASK, PPCVSX2, PPCVLE, {XT6, XA6, XB6}}, {"xxperm", XX3(60,26), XX3_MASK, PPCVSX3, PPCVLE, {XT6, XA6, XB6}}, {"xsadddp", XX3(60,32), XX3_MASK, PPCVSX, PPCVLE, {XT6, XA6, XB6}}, {"xsmaddadp", XX3(60,33), XX3_MASK, PPCVSX, PPCVLE, {XT6, XA6, XB6}}, {"xscmpudp", XX3(60,35), XX3BF_MASK, PPCVSX, PPCVLE, {BF, XA6, XB6}}, {"xscvdpuxws", XX2(60,72), XX2_MASK, PPCVSX, PPCVLE, {XT6, XB6}}, {"xsrdpi", XX2(60,73), XX2_MASK, PPCVSX, PPCVLE, {XT6, XB6}}, {"xsrsqrtedp", XX2(60,74), XX2_MASK, PPCVSX, PPCVLE, {XT6, XB6}}, {"xssqrtdp", XX2(60,75), XX2_MASK, PPCVSX, PPCVLE, {XT6, XB6}}, {"xssubdp", XX3(60,40), XX3_MASK, PPCVSX, PPCVLE, {XT6, XA6, XB6}}, {"xsmaddmdp", XX3(60,41), XX3_MASK, PPCVSX, PPCVLE, {XT6, XA6, XB6}}, {"xscmpodp", XX3(60,43), XX3BF_MASK, PPCVSX, PPCVLE, {BF, XA6, XB6}}, {"xscvdpsxws", XX2(60,88), XX2_MASK, PPCVSX, PPCVLE, {XT6, XB6}}, {"xsrdpiz", XX2(60,89), XX2_MASK, PPCVSX, PPCVLE, {XT6, XB6}}, {"xsredp", XX2(60,90), XX2_MASK, PPCVSX, PPCVLE, {XT6, XB6}}, {"xsmuldp", XX3(60,48), XX3_MASK, PPCVSX, PPCVLE, {XT6, XA6, XB6}}, {"xsmsubadp", XX3(60,49), XX3_MASK, PPCVSX, PPCVLE, {XT6, XA6, XB6}}, {"xxmrglw", XX3(60,50), XX3_MASK, PPCVSX, PPCVLE, {XT6, XA6, XB6}}, {"xsrdpip", XX2(60,105), XX2_MASK, PPCVSX, PPCVLE, {XT6, XB6}}, {"xstsqrtdp", XX2(60,106), XX2BF_MASK, PPCVSX, PPCVLE, {BF, XB6}}, {"xsrdpic", XX2(60,107), XX2_MASK, PPCVSX, PPCVLE, {XT6, XB6}}, {"xsdivdp", XX3(60,56), XX3_MASK, PPCVSX, PPCVLE, {XT6, XA6, XB6}}, {"xsmsubmdp", XX3(60,57), XX3_MASK, PPCVSX, PPCVLE, {XT6, XA6, XB6}}, {"xxpermr", XX3(60,58), XX3_MASK, PPCVSX3, PPCVLE, {XT6, XA6, XB6}}, {"xscmpexpdp", XX3(60,59), XX3BF_MASK, PPCVSX3, PPCVLE, {BF, XA6, XB6}}, {"xsrdpim", XX2(60,121), XX2_MASK, PPCVSX, PPCVLE, {XT6, XB6}}, {"xstdivdp", XX3(60,61), XX3BF_MASK, PPCVSX, PPCVLE, {BF, XA6, XB6}}, {"xvaddsp", XX3(60,64), XX3_MASK, PPCVSX, PPCVLE, {XT6, XA6, XB6}}, {"xvmaddasp", XX3(60,65), XX3_MASK, PPCVSX, PPCVLE, {XT6, XA6, XB6}}, {"xvcmpeqsp", XX3RC(60,67,0), XX3_MASK, PPCVSX, PPCVLE, {XT6, XA6, XB6}}, {"xvcmpeqsp.", XX3RC(60,67,1), XX3_MASK, PPCVSX, PPCVLE, {XT6, XA6, XB6}}, {"xvcvspuxws", XX2(60,136), XX2_MASK, PPCVSX, PPCVLE, {XT6, XB6}}, {"xvrspi", XX2(60,137), XX2_MASK, PPCVSX, PPCVLE, {XT6, XB6}}, {"xvrsqrtesp", XX2(60,138), XX2_MASK, PPCVSX, PPCVLE, {XT6, XB6}}, {"xvsqrtsp", XX2(60,139), XX2_MASK, PPCVSX, PPCVLE, {XT6, XB6}}, {"xvsubsp", XX3(60,72), XX3_MASK, PPCVSX, PPCVLE, {XT6, XA6, XB6}}, {"xvmaddmsp", XX3(60,73), XX3_MASK, PPCVSX, PPCVLE, {XT6, XA6, XB6}}, {"xvcmpgtsp", XX3RC(60,75,0), XX3_MASK, PPCVSX, PPCVLE, {XT6, XA6, XB6}}, {"xvcmpgtsp.", XX3RC(60,75,1), XX3_MASK, PPCVSX, PPCVLE, {XT6, XA6, XB6}}, {"xvcvspsxws", XX2(60,152), XX2_MASK, PPCVSX, PPCVLE, {XT6, XB6}}, {"xvrspiz", XX2(60,153), XX2_MASK, PPCVSX, PPCVLE, {XT6, XB6}}, {"xvresp", XX2(60,154), XX2_MASK, PPCVSX, PPCVLE, {XT6, XB6}}, {"xvmulsp", XX3(60,80), XX3_MASK, PPCVSX, PPCVLE, {XT6, XA6, XB6}}, {"xvmsubasp", XX3(60,81), XX3_MASK, PPCVSX, PPCVLE, {XT6, XA6, XB6}}, {"xxspltw", XX2(60,164), XX2UIM_MASK, PPCVSX, PPCVLE, {XT6, XB6, UIM}}, {"xxextractuw", XX2(60,165), XX2UIM4_MASK, PPCVSX3, PPCVLE, {XT6, XB6, UIMM4}}, {"xvcmpgesp", XX3RC(60,83,0), XX3_MASK, PPCVSX, PPCVLE, {XT6, XA6, XB6}}, {"xvcmpgesp.", XX3RC(60,83,1), XX3_MASK, PPCVSX, PPCVLE, {XT6, XA6, XB6}}, {"xvcvuxwsp", XX2(60,168), XX2_MASK, PPCVSX, PPCVLE, {XT6, XB6}}, {"xvrspip", XX2(60,169), XX2_MASK, PPCVSX, PPCVLE, {XT6, XB6}}, {"xvtsqrtsp", XX2(60,170), XX2BF_MASK, PPCVSX, PPCVLE, {BF, XB6}}, {"xvrspic", XX2(60,171), XX2_MASK, PPCVSX, PPCVLE, {XT6, XB6}}, {"xvdivsp", XX3(60,88), XX3_MASK, PPCVSX, PPCVLE, {XT6, XA6, XB6}}, {"xvmsubmsp", XX3(60,89), XX3_MASK, PPCVSX, PPCVLE, {XT6, XA6, XB6}}, {"xxspltib", X(60,360), XX1_MASK|3<<19, PPCVSX3, PPCVLE, {XT6, IMM8}}, {"xxinsertw", XX2(60,181), XX2UIM4_MASK, PPCVSX3, PPCVLE, {XT6, XB6, UIMM4}}, {"xvcvsxwsp", XX2(60,184), XX2_MASK, PPCVSX, PPCVLE, {XT6, XB6}}, {"xvrspim", XX2(60,185), XX2_MASK, PPCVSX, PPCVLE, {XT6, XB6}}, {"xvtdivsp", XX3(60,93), XX3BF_MASK, PPCVSX, PPCVLE, {BF, XA6, XB6}}, {"xvadddp", XX3(60,96), XX3_MASK, PPCVSX, PPCVLE, {XT6, XA6, XB6}}, {"xvmaddadp", XX3(60,97), XX3_MASK, PPCVSX, PPCVLE, {XT6, XA6, XB6}}, {"xvcmpeqdp", XX3RC(60,99,0), XX3_MASK, PPCVSX, PPCVLE, {XT6, XA6, XB6}}, {"xvcmpeqdp.", XX3RC(60,99,1), XX3_MASK, PPCVSX, PPCVLE, {XT6, XA6, XB6}}, {"xvcvdpuxws", XX2(60,200), XX2_MASK, PPCVSX, PPCVLE, {XT6, XB6}}, {"xvrdpi", XX2(60,201), XX2_MASK, PPCVSX, PPCVLE, {XT6, XB6}}, {"xvrsqrtedp", XX2(60,202), XX2_MASK, PPCVSX, PPCVLE, {XT6, XB6}}, {"xvsqrtdp", XX2(60,203), XX2_MASK, PPCVSX, PPCVLE, {XT6, XB6}}, {"xvsubdp", XX3(60,104), XX3_MASK, PPCVSX, PPCVLE, {XT6, XA6, XB6}}, {"xvmaddmdp", XX3(60,105), XX3_MASK, PPCVSX, PPCVLE, {XT6, XA6, XB6}}, {"xvcmpgtdp", XX3RC(60,107,0), XX3_MASK, PPCVSX, PPCVLE, {XT6, XA6, XB6}}, {"xvcmpgtdp.", XX3RC(60,107,1), XX3_MASK, PPCVSX, PPCVLE, {XT6, XA6, XB6}}, {"xvcvdpsxws", XX2(60,216), XX2_MASK, PPCVSX, PPCVLE, {XT6, XB6}}, {"xvrdpiz", XX2(60,217), XX2_MASK, PPCVSX, PPCVLE, {XT6, XB6}}, {"xvredp", XX2(60,218), XX2_MASK, PPCVSX, PPCVLE, {XT6, XB6}}, {"xvmuldp", XX3(60,112), XX3_MASK, PPCVSX, PPCVLE, {XT6, XA6, XB6}}, {"xvmsubadp", XX3(60,113), XX3_MASK, PPCVSX, PPCVLE, {XT6, XA6, XB6}}, {"xvcmpgedp", XX3RC(60,115,0), XX3_MASK, PPCVSX, PPCVLE, {XT6, XA6, XB6}}, {"xvcmpgedp.", XX3RC(60,115,1), XX3_MASK, PPCVSX, PPCVLE, {XT6, XA6, XB6}}, {"xvcvuxwdp", XX2(60,232), XX2_MASK, PPCVSX, PPCVLE, {XT6, XB6}}, {"xvrdpip", XX2(60,233), XX2_MASK, PPCVSX, PPCVLE, {XT6, XB6}}, {"xvtsqrtdp", XX2(60,234), XX2BF_MASK, PPCVSX, PPCVLE, {BF, XB6}}, {"xvrdpic", XX2(60,235), XX2_MASK, PPCVSX, PPCVLE, {XT6, XB6}}, {"xvdivdp", XX3(60,120), XX3_MASK, PPCVSX, PPCVLE, {XT6, XA6, XB6}}, {"xvmsubmdp", XX3(60,121), XX3_MASK, PPCVSX, PPCVLE, {XT6, XA6, XB6}}, {"xvcvsxwdp", XX2(60,248), XX2_MASK, PPCVSX, PPCVLE, {XT6, XB6}}, {"xvrdpim", XX2(60,249), XX2_MASK, PPCVSX, PPCVLE, {XT6, XB6}}, {"xvtdivdp", XX3(60,125), XX3BF_MASK, PPCVSX, PPCVLE, {BF, XA6, XB6}}, {"xsmaxcdp", XX3(60,128), XX3_MASK, PPCVSX3, PPCVLE, {XT6, XA6, XB6}}, {"xsnmaddasp", XX3(60,129), XX3_MASK, PPCVSX2, PPCVLE, {XT6, XA6, XB6}}, {"xxland", XX3(60,130), XX3_MASK, PPCVSX, PPCVLE, {XT6, XA6, XB6}}, {"xscvdpsp", XX2(60,265), XX2_MASK, PPCVSX, PPCVLE, {XT6, XB6}}, {"xscvdpspn", XX2(60,267), XX2_MASK, PPCVSX2, PPCVLE, {XT6, XB6}}, {"xsmincdp", XX3(60,136), XX3_MASK, PPCVSX3, PPCVLE, {XT6, XA6, XB6}}, {"xsnmaddmsp", XX3(60,137), XX3_MASK, PPCVSX2, PPCVLE, {XT6, XA6, XB6}}, {"xxlandc", XX3(60,138), XX3_MASK, PPCVSX, PPCVLE, {XT6, XA6, XB6}}, {"xsrsp", XX2(60,281), XX2_MASK, PPCVSX2, PPCVLE, {XT6, XB6}}, {"xsmaxjdp", XX3(60,144), XX3_MASK, PPCVSX3, PPCVLE, {XT6, XA6, XB6}}, {"xsnmsubasp", XX3(60,145), XX3_MASK, PPCVSX2, PPCVLE, {XT6, XA6, XB6}}, {"xxlor", XX3(60,146), XX3_MASK, PPCVSX, PPCVLE, {XT6, XA6, XB6}}, {"xscvuxdsp", XX2(60,296), XX2_MASK, PPCVSX2, PPCVLE, {XT6, XB6}}, {"xststdcsp", XX2(60,298), XX2BFD_MASK, PPCVSX3, PPCVLE, {BF, XB6, DCMX}}, {"xsminjdp", XX3(60,152), XX3_MASK, PPCVSX3, PPCVLE, {XT6, XA6, XB6}}, {"xsnmsubmsp", XX3(60,153), XX3_MASK, PPCVSX2, PPCVLE, {XT6, XA6, XB6}}, {"xxlxor", XX3(60,154), XX3_MASK, PPCVSX, PPCVLE, {XT6, XA6, XB6}}, {"xscvsxdsp", XX2(60,312), XX2_MASK, PPCVSX2, PPCVLE, {XT6, XB6}}, {"xsmaxdp", XX3(60,160), XX3_MASK, PPCVSX, PPCVLE, {XT6, XA6, XB6}}, {"xsnmaddadp", XX3(60,161), XX3_MASK, PPCVSX, PPCVLE, {XT6, XA6, XB6}}, {"xxlnor", XX3(60,162), XX3_MASK, PPCVSX, PPCVLE, {XT6, XA6, XB6}}, {"xscvdpuxds", XX2(60,328), XX2_MASK, PPCVSX, PPCVLE, {XT6, XB6}}, {"xscvspdp", XX2(60,329), XX2_MASK, PPCVSX, PPCVLE, {XT6, XB6}}, {"xscvspdpn", XX2(60,331), XX2_MASK, PPCVSX2, PPCVLE, {XT6, XB6}}, {"xsmindp", XX3(60,168), XX3_MASK, PPCVSX, PPCVLE, {XT6, XA6, XB6}}, {"xsnmaddmdp", XX3(60,169), XX3_MASK, PPCVSX, PPCVLE, {XT6, XA6, XB6}}, {"xxlorc", XX3(60,170), XX3_MASK, PPCVSX2, PPCVLE, {XT6, XA6, XB6}}, {"xscvdpsxds", XX2(60,344), XX2_MASK, PPCVSX, PPCVLE, {XT6, XB6}}, {"xsabsdp", XX2(60,345), XX2_MASK, PPCVSX, PPCVLE, {XT6, XB6}}, {"xsxexpdp", XX2VA(60,347,0),XX2_MASK|1, PPCVSX3, PPCVLE, {RT, XB6}}, {"xsxsigdp", XX2VA(60,347,1),XX2_MASK|1, PPCVSX3, PPCVLE, {RT, XB6}}, {"xscvhpdp", XX2VA(60,347,16),XX2_MASK, PPCVSX3, PPCVLE, {XT6, XB6}}, {"xscvdphp", XX2VA(60,347,17),XX2_MASK, PPCVSX3, PPCVLE, {XT6, XB6}}, {"xscpsgndp", XX3(60,176), XX3_MASK, PPCVSX, PPCVLE, {XT6, XA6, XB6}}, {"xsnmsubadp", XX3(60,177), XX3_MASK, PPCVSX, PPCVLE, {XT6, XA6, XB6}}, {"xxlnand", XX3(60,178), XX3_MASK, PPCVSX2, PPCVLE, {XT6, XA6, XB6}}, {"xscvuxddp", XX2(60,360), XX2_MASK, PPCVSX, PPCVLE, {XT6, XB6}}, {"xsnabsdp", XX2(60,361), XX2_MASK, PPCVSX, PPCVLE, {XT6, XB6}}, {"xststdcdp", XX2(60,362), XX2BFD_MASK, PPCVSX3, PPCVLE, {BF, XB6, DCMX}}, {"xsnmsubmdp", XX3(60,185), XX3_MASK, PPCVSX, PPCVLE, {XT6, XA6, XB6}}, {"xxleqv", XX3(60,186), XX3_MASK, PPCVSX2, PPCVLE, {XT6, XA6, XB6}}, {"xscvsxddp", XX2(60,376), XX2_MASK, PPCVSX, PPCVLE, {XT6, XB6}}, {"xsnegdp", XX2(60,377), XX2_MASK, PPCVSX, PPCVLE, {XT6, XB6}}, {"xvmaxsp", XX3(60,192), XX3_MASK, PPCVSX, PPCVLE, {XT6, XA6, XB6}}, {"xvnmaddasp", XX3(60,193), XX3_MASK, PPCVSX, PPCVLE, {XT6, XA6, XB6}}, {"xvcvspuxds", XX2(60,392), XX2_MASK, PPCVSX, PPCVLE, {XT6, XB6}}, {"xvcvdpsp", XX2(60,393), XX2_MASK, PPCVSX, PPCVLE, {XT6, XB6}}, {"xvminsp", XX3(60,200), XX3_MASK, PPCVSX, PPCVLE, {XT6, XA6, XB6}}, {"xvnmaddmsp", XX3(60,201), XX3_MASK, PPCVSX, PPCVLE, {XT6, XA6, XB6}}, {"xvcvspsxds", XX2(60,408), XX2_MASK, PPCVSX, PPCVLE, {XT6, XB6}}, {"xvabssp", XX2(60,409), XX2_MASK, PPCVSX, PPCVLE, {XT6, XB6}}, {"xvmovsp", XX3(60,208), XX3_MASK, PPCVSX, PPCVLE, {XT6, XA6, XB6S}}, {"xvcpsgnsp", XX3(60,208), XX3_MASK, PPCVSX, PPCVLE, {XT6, XA6, XB6}}, {"xvnmsubasp", XX3(60,209), XX3_MASK, PPCVSX, PPCVLE, {XT6, XA6, XB6}}, {"xvcvuxdsp", XX2(60,424), XX2_MASK, PPCVSX, PPCVLE, {XT6, XB6}}, {"xvnabssp", XX2(60,425), XX2_MASK, PPCVSX, PPCVLE, {XT6, XB6}}, {"xvtstdcsp", XX2(60,426), XX2DCMXS_MASK, PPCVSX3, PPCVLE, {XT6, XB6, DCMXS}}, {"xviexpsp", XX3(60,216), XX3_MASK, PPCVSX3, PPCVLE, {XT6, XA6, XB6}}, {"xvnmsubmsp", XX3(60,217), XX3_MASK, PPCVSX, PPCVLE, {XT6, XA6, XB6}}, {"xvcvsxdsp", XX2(60,440), XX2_MASK, PPCVSX, PPCVLE, {XT6, XB6}}, {"xvnegsp", XX2(60,441), XX2_MASK, PPCVSX, PPCVLE, {XT6, XB6}}, {"xvmaxdp", XX3(60,224), XX3_MASK, PPCVSX, PPCVLE, {XT6, XA6, XB6}}, {"xvnmaddadp", XX3(60,225), XX3_MASK, PPCVSX, PPCVLE, {XT6, XA6, XB6}}, {"xvcvdpuxds", XX2(60,456), XX2_MASK, PPCVSX, PPCVLE, {XT6, XB6}}, {"xvcvspdp", XX2(60,457), XX2_MASK, PPCVSX, PPCVLE, {XT6, XB6}}, {"xsiexpdp", X(60,918), XX1_MASK, PPCVSX3, PPCVLE, {XT6, RA, RB}}, {"xvmindp", XX3(60,232), XX3_MASK, PPCVSX, PPCVLE, {XT6, XA6, XB6}}, {"xvnmaddmdp", XX3(60,233), XX3_MASK, PPCVSX, PPCVLE, {XT6, XA6, XB6}}, {"xvcvdpsxds", XX2(60,472), XX2_MASK, PPCVSX, PPCVLE, {XT6, XB6}}, {"xvabsdp", XX2(60,473), XX2_MASK, PPCVSX, PPCVLE, {XT6, XB6}}, {"xvxexpdp", XX2VA(60,475,0),XX2_MASK, PPCVSX3, PPCVLE, {XT6, XB6}}, {"xvxsigdp", XX2VA(60,475,1),XX2_MASK, PPCVSX3, PPCVLE, {XT6, XB6}}, {"xxbrh", XX2VA(60,475,7),XX2_MASK, PPCVSX3, PPCVLE, {XT6, XB6}}, {"xvxexpsp", XX2VA(60,475,8),XX2_MASK, PPCVSX3, PPCVLE, {XT6, XB6}}, {"xvxsigsp", XX2VA(60,475,9),XX2_MASK, PPCVSX3, PPCVLE, {XT6, XB6}}, {"xxbrw", XX2VA(60,475,15),XX2_MASK, PPCVSX3, PPCVLE, {XT6, XB6}}, {"xxbrd", XX2VA(60,475,23),XX2_MASK, PPCVSX3, PPCVLE, {XT6, XB6}}, {"xvcvhpsp", XX2VA(60,475,24),XX2_MASK, PPCVSX3, PPCVLE, {XT6, XB6}}, {"xvcvsphp", XX2VA(60,475,25),XX2_MASK, PPCVSX3, PPCVLE, {XT6, XB6}}, {"xxbrq", XX2VA(60,475,31),XX2_MASK, PPCVSX3, PPCVLE, {XT6, XB6}}, {"xvmovdp", XX3(60,240), XX3_MASK, PPCVSX, PPCVLE, {XT6, XA6, XB6S}}, {"xvcpsgndp", XX3(60,240), XX3_MASK, PPCVSX, PPCVLE, {XT6, XA6, XB6}}, {"xvnmsubadp", XX3(60,241), XX3_MASK, PPCVSX, PPCVLE, {XT6, XA6, XB6}}, {"xvcvuxddp", XX2(60,488), XX2_MASK, PPCVSX, PPCVLE, {XT6, XB6}}, {"xvnabsdp", XX2(60,489), XX2_MASK, PPCVSX, PPCVLE, {XT6, XB6}}, {"xvtstdcdp", XX2(60,490), XX2DCMXS_MASK, PPCVSX3, PPCVLE, {XT6, XB6, DCMXS}}, {"xviexpdp", XX3(60,248), XX3_MASK, PPCVSX3, PPCVLE, {XT6, XA6, XB6}}, {"xvnmsubmdp", XX3(60,249), XX3_MASK, PPCVSX, PPCVLE, {XT6, XA6, XB6}}, {"xvcvsxddp", XX2(60,504), XX2_MASK, PPCVSX, PPCVLE, {XT6, XB6}}, {"xvnegdp", XX2(60,505), XX2_MASK, PPCVSX, PPCVLE, {XT6, XB6}}, {"psq_st", OP(60), OP_MASK, PPCPS, PPCVLE, {FRS,PSD,RA,PSW,PSQ}}, {"stfq", OP(60), OP_MASK, POWER2, PPCVLE, {FRS, D, RA}}, {"lxv", DQX(61,1), DQX_MASK, PPCVSX3, PPCVLE, {XTQ6, DQ, RA0}}, {"stxv", DQX(61,5), DQX_MASK, PPCVSX3, PPCVLE, {XSQ6, DQ, RA0}}, {"stxsd", DSO(61,2), DS_MASK, PPCVSX3, PPCVLE, {VS, DS, RA0}}, {"stxssp", DSO(61,3), DS_MASK, PPCVSX3, PPCVLE, {VS, DS, RA0}}, {"stfdp", OP(61), OP_MASK, POWER6, POWER7|PPCVLE, {FRSp, DS, RA0}}, {"psq_stu", OP(61), OP_MASK, PPCPS, PPCVLE, {FRS,PSD,RA,PSW,PSQ}}, {"stfqu", OP(61), OP_MASK, POWER2, PPCVLE, {FRS, D, RA}}, {"std", DSO(62,0), DS_MASK, PPC64, PPCVLE, {RS, DS, RA0}}, {"stdu", DSO(62,1), DS_MASK, PPC64, PPCVLE, {RS, DS, RAS}}, {"stq", DSO(62,2), DS_MASK, POWER4, PPC476|PPCVLE, {RSQ, DS, RA0}}, {"fcmpu", X(63,0), XBF_MASK, COM, PPCEFS|PPCVLE, {BF, FRA, FRB}}, {"daddq", XRC(63,2,0), X_MASK, POWER6, PPCVLE, {FRTp, FRAp, FRBp}}, {"daddq.", XRC(63,2,1), X_MASK, POWER6, PPCVLE, {FRTp, FRAp, FRBp}}, {"dquaq", ZRC(63,3,0), Z2_MASK, POWER6, PPCVLE, {FRTp, FRAp, FRBp, RMC}}, {"dquaq.", ZRC(63,3,1), Z2_MASK, POWER6, PPCVLE, {FRTp, FRAp, FRBp, RMC}}, {"xsaddqp", XRC(63,4,0), X_MASK, PPCVSX3, PPCVLE, {VD, VA, VB}}, {"xsaddqpo", XRC(63,4,1), X_MASK, PPCVSX3, PPCVLE, {VD, VA, VB}}, {"xsrqpi", ZRC(63,5,0), Z2_MASK, PPCVSX3, PPCVLE, {R, VD, VB, RMC}}, {"xsrqpix", ZRC(63,5,1), Z2_MASK, PPCVSX3, PPCVLE, {R, VD, VB, RMC}}, {"fcpsgn", XRC(63,8,0), X_MASK, POWER6|PPCA2|PPC476, PPCVLE, {FRT, FRA, FRB}}, {"fcpsgn.", XRC(63,8,1), X_MASK, POWER6|PPCA2|PPC476, PPCVLE, {FRT, FRA, FRB}}, {"frsp", XRC(63,12,0), XRA_MASK, COM, PPCEFS|PPCVLE, {FRT, FRB}}, {"frsp.", XRC(63,12,1), XRA_MASK, COM, PPCEFS|PPCVLE, {FRT, FRB}}, {"fctiw", XRC(63,14,0), XRA_MASK, PPCCOM, PPCEFS|PPCVLE, {FRT, FRB}}, {"fcir", XRC(63,14,0), XRA_MASK, PWR2COM, PPCVLE, {FRT, FRB}}, {"fctiw.", XRC(63,14,1), XRA_MASK, PPCCOM, PPCEFS|PPCVLE, {FRT, FRB}}, {"fcir.", XRC(63,14,1), XRA_MASK, PWR2COM, PPCVLE, {FRT, FRB}}, {"fctiwz", XRC(63,15,0), XRA_MASK, PPCCOM, PPCEFS|PPCVLE, {FRT, FRB}}, {"fcirz", XRC(63,15,0), XRA_MASK, PWR2COM, PPCVLE, {FRT, FRB}}, {"fctiwz.", XRC(63,15,1), XRA_MASK, PPCCOM, PPCEFS|PPCVLE, {FRT, FRB}}, {"fcirz.", XRC(63,15,1), XRA_MASK, PWR2COM, PPCVLE, {FRT, FRB}}, {"fdiv", A(63,18,0), AFRC_MASK, PPCCOM, PPCEFS|PPCVLE, {FRT, FRA, FRB}}, {"fd", A(63,18,0), AFRC_MASK, PWRCOM, PPCVLE, {FRT, FRA, FRB}}, {"fdiv.", A(63,18,1), AFRC_MASK, PPCCOM, PPCEFS|PPCVLE, {FRT, FRA, FRB}}, {"fd.", A(63,18,1), AFRC_MASK, PWRCOM, PPCVLE, {FRT, FRA, FRB}}, {"fsub", A(63,20,0), AFRC_MASK, PPCCOM, PPCEFS|PPCVLE, {FRT, FRA, FRB}}, {"fs", A(63,20,0), AFRC_MASK, PWRCOM, PPCVLE, {FRT, FRA, FRB}}, {"fsub.", A(63,20,1), AFRC_MASK, PPCCOM, PPCEFS|PPCVLE, {FRT, FRA, FRB}}, {"fs.", A(63,20,1), AFRC_MASK, PWRCOM, PPCVLE, {FRT, FRA, FRB}}, {"fadd", A(63,21,0), AFRC_MASK, PPCCOM, PPCEFS|PPCVLE, {FRT, FRA, FRB}}, {"fa", A(63,21,0), AFRC_MASK, PWRCOM, PPCVLE, {FRT, FRA, FRB}}, {"fadd.", A(63,21,1), AFRC_MASK, PPCCOM, PPCEFS|PPCVLE, {FRT, FRA, FRB}}, {"fa.", A(63,21,1), AFRC_MASK, PWRCOM, PPCVLE, {FRT, FRA, FRB}}, {"fsqrt", A(63,22,0), AFRAFRC_MASK, PPCPWR2, TITAN|PPCVLE, {FRT, FRB}}, {"fsqrt.", A(63,22,1), AFRAFRC_MASK, PPCPWR2, TITAN|PPCVLE, {FRT, FRB}}, {"fsel", A(63,23,0), A_MASK, PPC, PPCEFS|PPCVLE, {FRT, FRA, FRC, FRB}}, {"fsel.", A(63,23,1), A_MASK, PPC, PPCEFS|PPCVLE, {FRT, FRA, FRC, FRB}}, {"fre", A(63,24,0), AFRAFRC_MASK, POWER7, PPCVLE, {FRT, FRB}}, {"fre", A(63,24,0), AFRALFRC_MASK, POWER5, POWER7|PPCVLE, {FRT, FRB, A_L}}, {"fre.", A(63,24,1), AFRAFRC_MASK, POWER7, PPCVLE, {FRT, FRB}}, {"fre.", A(63,24,1), AFRALFRC_MASK, POWER5, POWER7|PPCVLE, {FRT, FRB, A_L}}, {"fmul", A(63,25,0), AFRB_MASK, PPCCOM, PPCEFS|PPCVLE, {FRT, FRA, FRC}}, {"fm", A(63,25,0), AFRB_MASK, PWRCOM, PPCVLE|PPCVLE, {FRT, FRA, FRC}}, {"fmul.", A(63,25,1), AFRB_MASK, PPCCOM, PPCEFS|PPCVLE, {FRT, FRA, FRC}}, {"fm.", A(63,25,1), AFRB_MASK, PWRCOM, PPCVLE|PPCVLE, {FRT, FRA, FRC}}, {"frsqrte", A(63,26,0), AFRAFRC_MASK, POWER7, PPCVLE, {FRT, FRB}}, {"frsqrte", A(63,26,0), AFRALFRC_MASK, PPC, POWER7|PPCVLE, {FRT, FRB, A_L}}, {"frsqrte.", A(63,26,1), AFRAFRC_MASK, POWER7, PPCVLE, {FRT, FRB}}, {"frsqrte.", A(63,26,1), AFRALFRC_MASK, PPC, POWER7|PPCVLE, {FRT, FRB, A_L}}, {"fmsub", A(63,28,0), A_MASK, PPCCOM, PPCEFS|PPCVLE, {FRT, FRA, FRC, FRB}}, {"fms", A(63,28,0), A_MASK, PWRCOM, PPCVLE, {FRT, FRA, FRC, FRB}}, {"fmsub.", A(63,28,1), A_MASK, PPCCOM, PPCEFS|PPCVLE, {FRT, FRA, FRC, FRB}}, {"fms.", A(63,28,1), A_MASK, PWRCOM, PPCVLE, {FRT, FRA, FRC, FRB}}, {"fmadd", A(63,29,0), A_MASK, PPCCOM, PPCEFS|PPCVLE, {FRT, FRA, FRC, FRB}}, {"fma", A(63,29,0), A_MASK, PWRCOM, PPCVLE, {FRT, FRA, FRC, FRB}}, {"fmadd.", A(63,29,1), A_MASK, PPCCOM, PPCEFS|PPCVLE, {FRT, FRA, FRC, FRB}}, {"fma.", A(63,29,1), A_MASK, PWRCOM, PPCVLE, {FRT, FRA, FRC, FRB}}, {"fnmsub", A(63,30,0), A_MASK, PPCCOM, PPCEFS|PPCVLE, {FRT, FRA, FRC, FRB}}, {"fnms", A(63,30,0), A_MASK, PWRCOM, PPCVLE, {FRT, FRA, FRC, FRB}}, {"fnmsub.", A(63,30,1), A_MASK, PPCCOM, PPCEFS|PPCVLE, {FRT, FRA, FRC, FRB}}, {"fnms.", A(63,30,1), A_MASK, PWRCOM, PPCVLE, {FRT, FRA, FRC, FRB}}, {"fnmadd", A(63,31,0), A_MASK, PPCCOM, PPCEFS|PPCVLE, {FRT, FRA, FRC, FRB}}, {"fnma", A(63,31,0), A_MASK, PWRCOM, PPCVLE, {FRT, FRA, FRC, FRB}}, {"fnmadd.", A(63,31,1), A_MASK, PPCCOM, PPCEFS|PPCVLE, {FRT, FRA, FRC, FRB}}, {"fnma.", A(63,31,1), A_MASK, PWRCOM, PPCVLE, {FRT, FRA, FRC, FRB}}, {"fcmpo", X(63,32), XBF_MASK, COM, PPCEFS|PPCVLE, {BF, FRA, FRB}}, {"dmulq", XRC(63,34,0), X_MASK, POWER6, PPCVLE, {FRTp, FRAp, FRBp}}, {"dmulq.", XRC(63,34,1), X_MASK, POWER6, PPCVLE, {FRTp, FRAp, FRBp}}, {"drrndq", ZRC(63,35,0), Z2_MASK, POWER6, PPCVLE, {FRTp, FRA, FRBp, RMC}}, {"drrndq.", ZRC(63,35,1), Z2_MASK, POWER6, PPCVLE, {FRTp, FRA, FRBp, RMC}}, {"xsmulqp", XRC(63,36,0), X_MASK, PPCVSX3, PPCVLE, {VD, VA, VB}}, {"xsmulqpo", XRC(63,36,1), X_MASK, PPCVSX3, PPCVLE, {VD, VA, VB}}, {"xsrqpxp", Z(63,37), Z2_MASK, PPCVSX3, PPCVLE, {R, VD, VB, RMC}}, {"mtfsb1", XRC(63,38,0), XRARB_MASK, COM, PPCVLE, {BT}}, {"mtfsb1.", XRC(63,38,1), XRARB_MASK, COM, PPCVLE, {BT}}, {"fneg", XRC(63,40,0), XRA_MASK, COM, PPCEFS|PPCVLE, {FRT, FRB}}, {"fneg.", XRC(63,40,1), XRA_MASK, COM, PPCEFS|PPCVLE, {FRT, FRB}}, {"mcrfs", X(63,64), XRB_MASK|(3<<21)|(3<<16), COM, PPCVLE, {BF, BFA}}, {"dscliq", ZRC(63,66,0), Z_MASK, POWER6, PPCVLE, {FRTp, FRAp, SH16}}, {"dscliq.", ZRC(63,66,1), Z_MASK, POWER6, PPCVLE, {FRTp, FRAp, SH16}}, {"dquaiq", ZRC(63,67,0), Z2_MASK, POWER6, PPCVLE, {TE, FRTp, FRBp, RMC}}, {"dquaiq.", ZRC(63,67,1), Z2_MASK, POWER6, PPCVLE, {TE, FRTp, FRBp, RMC}}, {"mtfsb0", XRC(63,70,0), XRARB_MASK, COM, PPCVLE, {BT}}, {"mtfsb0.", XRC(63,70,1), XRARB_MASK, COM, PPCVLE, {BT}}, {"fmr", XRC(63,72,0), XRA_MASK, COM, PPCEFS|PPCVLE, {FRT, FRB}}, {"fmr.", XRC(63,72,1), XRA_MASK, COM, PPCEFS|PPCVLE, {FRT, FRB}}, {"dscriq", ZRC(63,98,0), Z_MASK, POWER6, PPCVLE, {FRTp, FRAp, SH16}}, {"dscriq.", ZRC(63,98,1), Z_MASK, POWER6, PPCVLE, {FRTp, FRAp, SH16}}, {"drintxq", ZRC(63,99,0), Z2_MASK, POWER6, PPCVLE, {R, FRTp, FRBp, RMC}}, {"drintxq.", ZRC(63,99,1), Z2_MASK, POWER6, PPCVLE, {R, FRTp, FRBp, RMC}}, {"xscpsgnqp", X(63,100), X_MASK, PPCVSX3, PPCVLE, {VD, VA, VB}}, {"ftdiv", X(63,128), XBF_MASK, POWER7, PPCVLE, {BF, FRA, FRB}}, {"dcmpoq", X(63,130), X_MASK, POWER6, PPCVLE, {BF, FRAp, FRBp}}, {"xscmpoqp", X(63,132), XBF_MASK, PPCVSX3, PPCVLE, {BF, VA, VB}}, {"mtfsfi", XRC(63,134,0), XWRA_MASK|(3<<21)|(1<<11), POWER6|PPCA2|PPC476, PPCVLE, {BFF, U, W}}, {"mtfsfi", XRC(63,134,0), XRA_MASK|(3<<21)|(1<<11), COM, POWER6|PPCA2|PPC476|PPCVLE, {BFF, U}}, {"mtfsfi.", XRC(63,134,1), XWRA_MASK|(3<<21)|(1<<11), POWER6|PPCA2|PPC476, PPCVLE, {BFF, U, W}}, {"mtfsfi.", XRC(63,134,1), XRA_MASK|(3<<21)|(1<<11), COM, POWER6|PPCA2|PPC476|PPCVLE, {BFF, U}}, {"fnabs", XRC(63,136,0), XRA_MASK, COM, PPCEFS|PPCVLE, {FRT, FRB}}, {"fnabs.", XRC(63,136,1), XRA_MASK, COM, PPCEFS|PPCVLE, {FRT, FRB}}, {"fctiwu", XRC(63,142,0), XRA_MASK, POWER7, PPCVLE, {FRT, FRB}}, {"fctiwu.", XRC(63,142,1), XRA_MASK, POWER7, PPCVLE, {FRT, FRB}}, {"fctiwuz", XRC(63,143,0), XRA_MASK, POWER7, PPCVLE, {FRT, FRB}}, {"fctiwuz.", XRC(63,143,1), XRA_MASK, POWER7, PPCVLE, {FRT, FRB}}, {"ftsqrt", X(63,160), XBF_MASK|FRA_MASK, POWER7, PPCVLE, {BF, FRB}}, {"dtstexq", X(63,162), X_MASK, POWER6, PPCVLE, {BF, FRAp, FRBp}}, {"xscmpexpqp", X(63,164), XBF_MASK, PPCVSX3, PPCVLE, {BF, VA, VB}}, {"dtstdcq", Z(63,194), Z_MASK, POWER6, PPCVLE, {BF, FRAp, DCM}}, {"dtstdgq", Z(63,226), Z_MASK, POWER6, PPCVLE, {BF, FRAp, DGM}}, {"drintnq", ZRC(63,227,0), Z2_MASK, POWER6, PPCVLE, {R, FRTp, FRBp, RMC}}, {"drintnq.", ZRC(63,227,1), Z2_MASK, POWER6, PPCVLE, {R, FRTp, FRBp, RMC}}, {"dctqpq", XRC(63,258,0), X_MASK, POWER6, PPCVLE, {FRTp, FRB}}, {"dctqpq.", XRC(63,258,1), X_MASK, POWER6, PPCVLE, {FRTp, FRB}}, {"fabs", XRC(63,264,0), XRA_MASK, COM, PPCEFS|PPCVLE, {FRT, FRB}}, {"fabs.", XRC(63,264,1), XRA_MASK, COM, PPCEFS|PPCVLE, {FRT, FRB}}, {"dctfixq", XRC(63,290,0), X_MASK, POWER6, PPCVLE, {FRT, FRBp}}, {"dctfixq.", XRC(63,290,1), X_MASK, POWER6, PPCVLE, {FRT, FRBp}}, {"ddedpdq", XRC(63,322,0), X_MASK, POWER6, PPCVLE, {SP, FRTp, FRBp}}, {"ddedpdq.", XRC(63,322,1), X_MASK, POWER6, PPCVLE, {SP, FRTp, FRBp}}, {"dxexq", XRC(63,354,0), X_MASK, POWER6, PPCVLE, {FRT, FRBp}}, {"dxexq.", XRC(63,354,1), X_MASK, POWER6, PPCVLE, {FRT, FRBp}}, {"xsmaddqp", XRC(63,388,0), X_MASK, PPCVSX3, PPCVLE, {VD, VA, VB}}, {"xsmaddqpo", XRC(63,388,1), X_MASK, PPCVSX3, PPCVLE, {VD, VA, VB}}, {"frin", XRC(63,392,0), XRA_MASK, POWER5, PPCVLE, {FRT, FRB}}, {"frin.", XRC(63,392,1), XRA_MASK, POWER5, PPCVLE, {FRT, FRB}}, {"xsmsubqp", XRC(63,420,0), X_MASK, PPCVSX3, PPCVLE, {VD, VA, VB}}, {"xsmsubqpo", XRC(63,420,1), X_MASK, PPCVSX3, PPCVLE, {VD, VA, VB}}, {"friz", XRC(63,424,0), XRA_MASK, POWER5, PPCVLE, {FRT, FRB}}, {"friz.", XRC(63,424,1), XRA_MASK, POWER5, PPCVLE, {FRT, FRB}}, {"xsnmaddqp", XRC(63,452,0), X_MASK, PPCVSX3, PPCVLE, {VD, VA, VB}}, {"xsnmaddqpo", XRC(63,452,1), X_MASK, PPCVSX3, PPCVLE, {VD, VA, VB}}, {"frip", XRC(63,456,0), XRA_MASK, POWER5, PPCVLE, {FRT, FRB}}, {"frip.", XRC(63,456,1), XRA_MASK, POWER5, PPCVLE, {FRT, FRB}}, {"xsnmsubqp", XRC(63,484,0), X_MASK, PPCVSX3, PPCVLE, {VD, VA, VB}}, {"xsnmsubqpo", XRC(63,484,1), X_MASK, PPCVSX3, PPCVLE, {VD, VA, VB}}, {"frim", XRC(63,488,0), XRA_MASK, POWER5, PPCVLE, {FRT, FRB}}, {"frim.", XRC(63,488,1), XRA_MASK, POWER5, PPCVLE, {FRT, FRB}}, {"dsubq", XRC(63,514,0), X_MASK, POWER6, PPCVLE, {FRTp, FRAp, FRBp}}, {"dsubq.", XRC(63,514,1), X_MASK, POWER6, PPCVLE, {FRTp, FRAp, FRBp}}, {"xssubqp", XRC(63,516,0), X_MASK, PPCVSX3, PPCVLE, {VD, VA, VB}}, {"xssubqpo", XRC(63,516,1), X_MASK, PPCVSX3, PPCVLE, {VD, VA, VB}}, {"ddivq", XRC(63,546,0), X_MASK, POWER6, PPCVLE, {FRTp, FRAp, FRBp}}, {"ddivq.", XRC(63,546,1), X_MASK, POWER6, PPCVLE, {FRTp, FRAp, FRBp}}, {"xsdivqp", XRC(63,548,0), X_MASK, PPCVSX3, PPCVLE, {VD, VA, VB}}, {"xsdivqpo", XRC(63,548,1), X_MASK, PPCVSX3, PPCVLE, {VD, VA, VB}}, {"mffs", XRC(63,583,0), XRARB_MASK, COM, PPCEFS|PPCVLE, {FRT}}, {"mffs.", XRC(63,583,1), XRARB_MASK, COM, PPCEFS|PPCVLE, {FRT}}, {"mffsce", XMMF(63,583,0,1), XMMF_MASK|RB_MASK, POWER9, PPCVLE, {FRT}}, {"mffscdrn", XMMF(63,583,2,4), XMMF_MASK, POWER9, PPCVLE, {FRT, FRB}}, {"mffscdrni", XMMF(63,583,2,5), XMMF_MASK|(3<<14), POWER9, PPCVLE, {FRT, DRM}}, {"mffscrn", XMMF(63,583,2,6), XMMF_MASK, POWER9, PPCVLE, {FRT, FRB}}, {"mffscrni", XMMF(63,583,2,7), XMMF_MASK|(7<<13), POWER9, PPCVLE, {FRT, RM}}, {"mffsl", XMMF(63,583,3,0), XMMF_MASK|RB_MASK, POWER9, PPCVLE, {FRT}}, {"dcmpuq", X(63,642), X_MASK, POWER6, PPCVLE, {BF, FRAp, FRBp}}, {"xscmpuqp", X(63,644), XBF_MASK, PPCVSX3, PPCVLE, {BF, VA, VB}}, {"dtstsfq", X(63,674), X_MASK, POWER6, PPCVLE, {BF, FRA, FRBp}}, {"dtstsfiq", X(63,675), X_MASK|1<<22,POWER9, PPCVLE, {BF, UIM6, FRBp}}, {"xststdcqp", X(63,708), X_MASK, PPCVSX3, PPCVLE, {BF, VB, DCMX}}, {"mtfsf", XFL(63,711,0), XFL_MASK, POWER6|PPCA2|PPC476, PPCVLE, {FLM, FRB, XFL_L, W}}, {"mtfsf", XFL(63,711,0), XFL_MASK, COM, POWER6|PPCA2|PPC476|PPCEFS|PPCVLE, {FLM, FRB}}, {"mtfsf.", XFL(63,711,1), XFL_MASK, POWER6|PPCA2|PPC476, PPCVLE, {FLM, FRB, XFL_L, W}}, {"mtfsf.", XFL(63,711,1), XFL_MASK, COM, POWER6|PPCA2|PPC476|PPCEFS|PPCVLE, {FLM, FRB}}, {"drdpq", XRC(63,770,0), X_MASK, POWER6, PPCVLE, {FRTp, FRBp}}, {"drdpq.", XRC(63,770,1), X_MASK, POWER6, PPCVLE, {FRTp, FRBp}}, {"dcffixq", XRC(63,802,0), X_MASK, POWER6, PPCVLE, {FRTp, FRB}}, {"dcffixq.", XRC(63,802,1), X_MASK, POWER6, PPCVLE, {FRTp, FRB}}, {"xsabsqp", XVA(63,804,0), XVA_MASK, PPCVSX3, PPCVLE, {VD, VB}}, {"xsxexpqp", XVA(63,804,2), XVA_MASK, PPCVSX3, PPCVLE, {VD, VB}}, {"xsnabsqp", XVA(63,804,8), XVA_MASK, PPCVSX3, PPCVLE, {VD, VB}}, {"xsnegqp", XVA(63,804,16), XVA_MASK, PPCVSX3, PPCVLE, {VD, VB}}, {"xsxsigqp", XVA(63,804,18), XVA_MASK, PPCVSX3, PPCVLE, {VD, VB}}, {"xssqrtqp", XVARC(63,804,27,0), XVA_MASK, PPCVSX3, PPCVLE, {VD, VB}}, {"xssqrtqpo", XVARC(63,804,27,1), XVA_MASK, PPCVSX3, PPCVLE, {VD, VB}}, {"fctid", XRC(63,814,0), XRA_MASK, PPC64, PPCVLE, {FRT, FRB}}, {"fctid", XRC(63,814,0), XRA_MASK, PPC476, PPCVLE, {FRT, FRB}}, {"fctid.", XRC(63,814,1), XRA_MASK, PPC64, PPCVLE, {FRT, FRB}}, {"fctid.", XRC(63,814,1), XRA_MASK, PPC476, PPCVLE, {FRT, FRB}}, {"fctidz", XRC(63,815,0), XRA_MASK, PPC64, PPCVLE, {FRT, FRB}}, {"fctidz", XRC(63,815,0), XRA_MASK, PPC476, PPCVLE, {FRT, FRB}}, {"fctidz.", XRC(63,815,1), XRA_MASK, PPC64, PPCVLE, {FRT, FRB}}, {"fctidz.", XRC(63,815,1), XRA_MASK, PPC476, PPCVLE, {FRT, FRB}}, {"denbcdq", XRC(63,834,0), X_MASK, POWER6, PPCVLE, {S, FRTp, FRBp}}, {"denbcdq.", XRC(63,834,1), X_MASK, POWER6, PPCVLE, {S, FRTp, FRBp}}, {"xscvqpuwz", XVA(63,836,1), XVA_MASK, PPCVSX3, PPCVLE, {VD, VB}}, {"xscvudqp", XVA(63,836,2), XVA_MASK, PPCVSX3, PPCVLE, {VD, VB}}, {"xscvqpswz", XVA(63,836,9), XVA_MASK, PPCVSX3, PPCVLE, {VD, VB}}, {"xscvsdqp", XVA(63,836,10), XVA_MASK, PPCVSX3, PPCVLE, {VD, VB}}, {"xscvqpudz", XVA(63,836,17), XVA_MASK, PPCVSX3, PPCVLE, {VD, VB}}, {"xscvqpdp", XVARC(63,836,20,0), XVA_MASK, PPCVSX3, PPCVLE, {VD, VB}}, {"xscvqpdpo", XVARC(63,836,20,1), XVA_MASK, PPCVSX3, PPCVLE, {VD, VB}}, {"xscvdpqp", XVA(63,836,22), XVA_MASK, PPCVSX3, PPCVLE, {VD, VB}}, {"xscvqpsdz", XVA(63,836,25), XVA_MASK, PPCVSX3, PPCVLE, {VD, VB}}, {"fmrgow", X(63,838), X_MASK, PPCVSX2, PPCVLE, {FRT, FRA, FRB}}, {"fcfid", XRC(63,846,0), XRA_MASK, PPC64, PPCVLE, {FRT, FRB}}, {"fcfid", XRC(63,846,0), XRA_MASK, PPC476, PPCVLE, {FRT, FRB}}, {"fcfid.", XRC(63,846,1), XRA_MASK, PPC64, PPCVLE, {FRT, FRB}}, {"fcfid.", XRC(63,846,1), XRA_MASK, PPC476, PPCVLE, {FRT, FRB}}, {"diexq", XRC(63,866,0), X_MASK, POWER6, PPCVLE, {FRTp, FRA, FRBp}}, {"diexq.", XRC(63,866,1), X_MASK, POWER6, PPCVLE, {FRTp, FRA, FRBp}}, {"xsiexpqp", X(63,868), X_MASK, PPCVSX3, PPCVLE, {VD, VA, VB}}, {"fctidu", XRC(63,942,0), XRA_MASK, POWER7|PPCA2, PPCVLE, {FRT, FRB}}, {"fctidu.", XRC(63,942,1), XRA_MASK, POWER7|PPCA2, PPCVLE, {FRT, FRB}}, {"fctiduz", XRC(63,943,0), XRA_MASK, POWER7|PPCA2, PPCVLE, {FRT, FRB}}, {"fctiduz.", XRC(63,943,1), XRA_MASK, POWER7|PPCA2, PPCVLE, {FRT, FRB}}, {"fmrgew", X(63,966), X_MASK, PPCVSX2, PPCVLE, {FRT, FRA, FRB}}, {"fcfidu", XRC(63,974,0), XRA_MASK, POWER7|PPCA2, PPCVLE, {FRT, FRB}}, {"fcfidu.", XRC(63,974,1), XRA_MASK, POWER7|PPCA2, PPCVLE, {FRT, FRB}}, }; const int powerpc_num_opcodes = sizeof (powerpc_opcodes) / sizeof (powerpc_opcodes[0]); /* The VLE opcode table. The format of this opcode table is the same as the main opcode table. */ const struct powerpc_opcode vle_opcodes[] = { {"se_illegal", C(0), C_MASK, PPCVLE, 0, {}}, {"se_isync", C(1), C_MASK, PPCVLE, 0, {}}, {"se_sc", C(2), C_MASK, PPCVLE, 0, {}}, {"se_blr", C_LK(2,0), C_LK_MASK, PPCVLE, 0, {}}, {"se_blrl", C_LK(2,1), C_LK_MASK, PPCVLE, 0, {}}, {"se_bctr", C_LK(3,0), C_LK_MASK, PPCVLE, 0, {}}, {"se_bctrl", C_LK(3,1), C_LK_MASK, PPCVLE, 0, {}}, {"se_rfi", C(8), C_MASK, PPCVLE, 0, {}}, {"se_rfci", C(9), C_MASK, PPCVLE, 0, {}}, {"se_rfdi", C(10), C_MASK, PPCVLE, 0, {}}, {"se_rfmci", C(11), C_MASK, PPCRFMCI|PPCVLE, 0, {}}, {"se_not", SE_R(0,2), SE_R_MASK, PPCVLE, 0, {RX}}, {"se_neg", SE_R(0,3), SE_R_MASK, PPCVLE, 0, {RX}}, {"se_mflr", SE_R(0,8), SE_R_MASK, PPCVLE, 0, {RX}}, {"se_mtlr", SE_R(0,9), SE_R_MASK, PPCVLE, 0, {RX}}, {"se_mfctr", SE_R(0,10), SE_R_MASK, PPCVLE, 0, {RX}}, {"se_mtctr", SE_R(0,11), SE_R_MASK, PPCVLE, 0, {RX}}, {"se_extzb", SE_R(0,12), SE_R_MASK, PPCVLE, 0, {RX}}, {"se_extsb", SE_R(0,13), SE_R_MASK, PPCVLE, 0, {RX}}, {"se_extzh", SE_R(0,14), SE_R_MASK, PPCVLE, 0, {RX}}, {"se_extsh", SE_R(0,15), SE_R_MASK, PPCVLE, 0, {RX}}, {"se_mr", SE_RR(0,1), SE_RR_MASK, PPCVLE, 0, {RX, RY}}, {"se_mtar", SE_RR(0,2), SE_RR_MASK, PPCVLE, 0, {ARX, RY}}, {"se_mfar", SE_RR(0,3), SE_RR_MASK, PPCVLE, 0, {RX, ARY}}, {"se_add", SE_RR(1,0), SE_RR_MASK, PPCVLE, 0, {RX, RY}}, {"se_mullw", SE_RR(1,1), SE_RR_MASK, PPCVLE, 0, {RX, RY}}, {"se_sub", SE_RR(1,2), SE_RR_MASK, PPCVLE, 0, {RX, RY}}, {"se_subf", SE_RR(1,3), SE_RR_MASK, PPCVLE, 0, {RX, RY}}, {"se_cmp", SE_RR(3,0), SE_RR_MASK, PPCVLE, 0, {RX, RY}}, {"se_cmpl", SE_RR(3,1), SE_RR_MASK, PPCVLE, 0, {RX, RY}}, {"se_cmph", SE_RR(3,2), SE_RR_MASK, PPCVLE, 0, {RX, RY}}, {"se_cmphl", SE_RR(3,3), SE_RR_MASK, PPCVLE, 0, {RX, RY}}, {"e_cmpi", SCI8BF(6,0,21), SCI8BF_MASK, PPCVLE, 0, {CRD32, RA, SCLSCI8}}, {"e_cmpwi", SCI8BF(6,0,21), SCI8BF_MASK, PPCVLE, 0, {CRD32, RA, SCLSCI8}}, {"e_cmpli", SCI8BF(6,1,21), SCI8BF_MASK, PPCVLE, 0, {CRD32, RA, SCLSCI8}}, {"e_cmplwi", SCI8BF(6,1,21), SCI8BF_MASK, PPCVLE, 0, {CRD32, RA, SCLSCI8}}, {"e_addi", SCI8(6,16), SCI8_MASK, PPCVLE, 0, {RT, RA, SCLSCI8}}, {"e_subi", SCI8(6,16), SCI8_MASK, PPCVLE, 0, {RT, RA, SCLSCI8N}}, {"e_addi.", SCI8(6,17), SCI8_MASK, PPCVLE, 0, {RT, RA, SCLSCI8}}, {"e_addic", SCI8(6,18), SCI8_MASK, PPCVLE, 0, {RT, RA, SCLSCI8}}, {"e_subic", SCI8(6,18), SCI8_MASK, PPCVLE, 0, {RT, RA, SCLSCI8N}}, {"e_addic.", SCI8(6,19), SCI8_MASK, PPCVLE, 0, {RT, RA, SCLSCI8}}, {"e_subic.", SCI8(6,19), SCI8_MASK, PPCVLE, 0, {RT, RA, SCLSCI8N}}, {"e_mulli", SCI8(6,20), SCI8_MASK, PPCVLE, 0, {RT, RA, SCLSCI8}}, {"e_subfic", SCI8(6,22), SCI8_MASK, PPCVLE, 0, {RT, RA, SCLSCI8}}, {"e_subfic.", SCI8(6,23), SCI8_MASK, PPCVLE, 0, {RT, RA, SCLSCI8}}, {"e_andi", SCI8(6,24), SCI8_MASK, PPCVLE, 0, {RA, RS, SCLSCI8}}, {"e_andi.", SCI8(6,25), SCI8_MASK, PPCVLE, 0, {RA, RS, SCLSCI8}}, {"e_nop", SCI8(6,26), 0xffffffff, PPCVLE, 0, {0}}, {"e_ori", SCI8(6,26), SCI8_MASK, PPCVLE, 0, {RA, RS, SCLSCI8}}, {"e_ori.", SCI8(6,27), SCI8_MASK, PPCVLE, 0, {RA, RS, SCLSCI8}}, {"e_xori", SCI8(6,28), SCI8_MASK, PPCVLE, 0, {RA, RS, SCLSCI8}}, {"e_xori.", SCI8(6,29), SCI8_MASK, PPCVLE, 0, {RA, RS, SCLSCI8}}, {"e_lbzu", OPVUP(6,0), OPVUP_MASK, PPCVLE, 0, {RT, D8, RA0}}, {"e_lhau", OPVUP(6,3), OPVUP_MASK, PPCVLE, 0, {RT, D8, RA0}}, {"e_lhzu", OPVUP(6,1), OPVUP_MASK, PPCVLE, 0, {RT, D8, RA0}}, {"e_lmw", OPVUP(6,8), OPVUP_MASK, PPCVLE, 0, {RT, D8, RA0}}, {"e_lwzu", OPVUP(6,2), OPVUP_MASK, PPCVLE, 0, {RT, D8, RA0}}, {"e_stbu", OPVUP(6,4), OPVUP_MASK, PPCVLE, 0, {RT, D8, RA0}}, {"e_sthu", OPVUP(6,5), OPVUP_MASK, PPCVLE, 0, {RT, D8, RA0}}, {"e_stwu", OPVUP(6,6), OPVUP_MASK, PPCVLE, 0, {RT, D8, RA0}}, {"e_stmw", OPVUP(6,9), OPVUP_MASK, PPCVLE, 0, {RT, D8, RA0}}, {"e_ldmvgprw", OPVUPRT(6,16,0),OPVUPRT_MASK, PPCVLE, 0, {D8, RA0}}, {"e_stmvgprw", OPVUPRT(6,17,0),OPVUPRT_MASK, PPCVLE, 0, {D8, RA0}}, {"e_ldmvsprw", OPVUPRT(6,16,1),OPVUPRT_MASK, PPCVLE, 0, {D8, RA0}}, {"e_stmvsprw", OPVUPRT(6,17,1),OPVUPRT_MASK, PPCVLE, 0, {D8, RA0}}, {"e_ldmvsrrw", OPVUPRT(6,16,4),OPVUPRT_MASK, PPCVLE, 0, {D8, RA0}}, {"e_stmvsrrw", OPVUPRT(6,17,4),OPVUPRT_MASK, PPCVLE, 0, {D8, RA0}}, {"e_ldmvcsrrw", OPVUPRT(6,16,5),OPVUPRT_MASK, PPCVLE, 0, {D8, RA0}}, {"e_stmvcsrrw", OPVUPRT(6,17,5),OPVUPRT_MASK, PPCVLE, 0, {D8, RA0}}, {"e_ldmvdsrrw", OPVUPRT(6,16,6),OPVUPRT_MASK, PPCVLE, 0, {D8, RA0}}, {"e_stmvdsrrw", OPVUPRT(6,17,6),OPVUPRT_MASK, PPCVLE, 0, {D8, RA0}}, {"e_add16i", OP(7), OP_MASK, PPCVLE, 0, {RT, RA, SI}}, {"e_la", OP(7), OP_MASK, PPCVLE, 0, {RT, D, RA0}}, {"e_sub16i", OP(7), OP_MASK, PPCVLE, 0, {RT, RA, NSI}}, {"se_addi", SE_IM5(8,0), SE_IM5_MASK, PPCVLE, 0, {RX, OIMM5}}, {"se_cmpli", SE_IM5(8,1), SE_IM5_MASK, PPCVLE, 0, {RX, OIMM5}}, {"se_subi", SE_IM5(9,0), SE_IM5_MASK, PPCVLE, 0, {RX, OIMM5}}, {"se_subi.", SE_IM5(9,1), SE_IM5_MASK, PPCVLE, 0, {RX, OIMM5}}, {"se_cmpi", SE_IM5(10,1), SE_IM5_MASK, PPCVLE, 0, {RX, UI5}}, {"se_bmaski", SE_IM5(11,0), SE_IM5_MASK, PPCVLE, 0, {RX, UI5}}, {"se_andi", SE_IM5(11,1), SE_IM5_MASK, PPCVLE, 0, {RX, UI5}}, {"e_lbz", OP(12), OP_MASK, PPCVLE, 0, {RT, D, RA0}}, {"e_stb", OP(13), OP_MASK, PPCVLE, 0, {RT, D, RA0}}, {"e_lha", OP(14), OP_MASK, PPCVLE, 0, {RT, D, RA0}}, {"se_srw", SE_RR(16,0), SE_RR_MASK, PPCVLE, 0, {RX, RY}}, {"se_sraw", SE_RR(16,1), SE_RR_MASK, PPCVLE, 0, {RX, RY}}, {"se_slw", SE_RR(16,2), SE_RR_MASK, PPCVLE, 0, {RX, RY}}, {"se_nop", SE_RR(17,0), 0xffff, PPCVLE, 0, {0}}, {"se_or", SE_RR(17,0), SE_RR_MASK, PPCVLE, 0, {RX, RY}}, {"se_andc", SE_RR(17,1), SE_RR_MASK, PPCVLE, 0, {RX, RY}}, {"se_and", SE_RR(17,2), SE_RR_MASK, PPCVLE, 0, {RX, RY}}, {"se_and.", SE_RR(17,3), SE_RR_MASK, PPCVLE, 0, {RX, RY}}, {"se_li", IM7(9), IM7_MASK, PPCVLE, 0, {RX, UI7}}, {"e_lwz", OP(20), OP_MASK, PPCVLE, 0, {RT, D, RA0}}, {"e_stw", OP(21), OP_MASK, PPCVLE, 0, {RT, D, RA0}}, {"e_lhz", OP(22), OP_MASK, PPCVLE, 0, {RT, D, RA0}}, {"e_sth", OP(23), OP_MASK, PPCVLE, 0, {RT, D, RA0}}, {"se_bclri", SE_IM5(24,0), SE_IM5_MASK, PPCVLE, 0, {RX, UI5}}, {"se_bgeni", SE_IM5(24,1), SE_IM5_MASK, PPCVLE, 0, {RX, UI5}}, {"se_bseti", SE_IM5(25,0), SE_IM5_MASK, PPCVLE, 0, {RX, UI5}}, {"se_btsti", SE_IM5(25,1), SE_IM5_MASK, PPCVLE, 0, {RX, UI5}}, {"se_srwi", SE_IM5(26,0), SE_IM5_MASK, PPCVLE, 0, {RX, UI5}}, {"se_srawi", SE_IM5(26,1), SE_IM5_MASK, PPCVLE, 0, {RX, UI5}}, {"se_slwi", SE_IM5(27,0), SE_IM5_MASK, PPCVLE, 0, {RX, UI5}}, {"e_lis", I16L(28,28), I16L_MASK, PPCVLE, 0, {RD, VLEUIMML}}, {"e_and2is.", I16L(28,29), I16L_MASK, PPCVLE, 0, {RD, VLEUIMML}}, {"e_or2is", I16L(28,26), I16L_MASK, PPCVLE, 0, {RD, VLEUIMML}}, {"e_and2i.", I16L(28,25), I16L_MASK, PPCVLE, 0, {RD, VLEUIMML}}, {"e_or2i", I16L(28,24), I16L_MASK, PPCVLE, 0, {RD, VLEUIMML}}, {"e_cmphl16i", IA16(28,23), IA16_MASK, PPCVLE, 0, {RA, VLEUIMM}}, {"e_cmph16i", IA16(28,22), IA16_MASK, PPCVLE, 0, {RA, VLESIMM}}, {"e_cmpl16i", I16A(28,21), I16A_MASK, PPCVLE, 0, {RA, VLEUIMM}}, {"e_mull2i", I16A(28,20), I16A_MASK, PPCVLE, 0, {RA, VLESIMM}}, {"e_cmp16i", IA16(28,19), IA16_MASK, PPCVLE, 0, {RA, VLESIMM}}, {"e_sub2is", I16A(28,18), I16A_MASK, PPCVLE, 0, {RA, VLENSIMM}}, {"e_add2is", I16A(28,18), I16A_MASK, PPCVLE, 0, {RA, VLESIMM}}, {"e_sub2i.", I16A(28,17), I16A_MASK, PPCVLE, 0, {RA, VLENSIMM}}, {"e_add2i.", I16A(28,17), I16A_MASK, PPCVLE, 0, {RA, VLESIMM}}, {"e_li", LI20(28,0), LI20_MASK, PPCVLE, 0, {RT, IMM20}}, {"e_rlwimi", M(29,0), M_MASK, PPCVLE, 0, {RA, RS, SH, MB, ME}}, {"e_rlwinm", M(29,1), M_MASK, PPCVLE, 0, {RA, RT, SH, MBE, ME}}, {"e_b", BD24(30,0,0), BD24_MASK, PPCVLE, 0, {B24}}, {"e_bl", BD24(30,0,1), BD24_MASK, PPCVLE, 0, {B24}}, {"e_bdnz", EBD15(30,8,BO32DNZ,0), EBD15_MASK, PPCVLE, 0, {B15}}, {"e_bdnzl", EBD15(30,8,BO32DNZ,1), EBD15_MASK, PPCVLE, 0, {B15}}, {"e_bdz", EBD15(30,8,BO32DZ,0), EBD15_MASK, PPCVLE, 0, {B15}}, {"e_bdzl", EBD15(30,8,BO32DZ,1), EBD15_MASK, PPCVLE, 0, {B15}}, {"e_bge", EBD15BI(30,8,BO32F,CBLT,0), EBD15BI_MASK, PPCVLE, 0, {CRS,B15}}, {"e_bgel", EBD15BI(30,8,BO32F,CBLT,1), EBD15BI_MASK, PPCVLE, 0, {CRS,B15}}, {"e_bnl", EBD15BI(30,8,BO32F,CBLT,0), EBD15BI_MASK, PPCVLE, 0, {CRS,B15}}, {"e_bnll", EBD15BI(30,8,BO32F,CBLT,1), EBD15BI_MASK, PPCVLE, 0, {CRS,B15}}, {"e_blt", EBD15BI(30,8,BO32T,CBLT,0), EBD15BI_MASK, PPCVLE, 0, {CRS,B15}}, {"e_bltl", EBD15BI(30,8,BO32T,CBLT,1), EBD15BI_MASK, PPCVLE, 0, {CRS,B15}}, {"e_bgt", EBD15BI(30,8,BO32T,CBGT,0), EBD15BI_MASK, PPCVLE, 0, {CRS,B15}}, {"e_bgtl", EBD15BI(30,8,BO32T,CBGT,1), EBD15BI_MASK, PPCVLE, 0, {CRS,B15}}, {"e_ble", EBD15BI(30,8,BO32F,CBGT,0), EBD15BI_MASK, PPCVLE, 0, {CRS,B15}}, {"e_blel", EBD15BI(30,8,BO32F,CBGT,1), EBD15BI_MASK, PPCVLE, 0, {CRS,B15}}, {"e_bng", EBD15BI(30,8,BO32F,CBGT,0), EBD15BI_MASK, PPCVLE, 0, {CRS,B15}}, {"e_bngl", EBD15BI(30,8,BO32F,CBGT,1), EBD15BI_MASK, PPCVLE, 0, {CRS,B15}}, {"e_bne", EBD15BI(30,8,BO32F,CBEQ,0), EBD15BI_MASK, PPCVLE, 0, {CRS,B15}}, {"e_bnel", EBD15BI(30,8,BO32F,CBEQ,1), EBD15BI_MASK, PPCVLE, 0, {CRS,B15}}, {"e_beq", EBD15BI(30,8,BO32T,CBEQ,0), EBD15BI_MASK, PPCVLE, 0, {CRS,B15}}, {"e_beql", EBD15BI(30,8,BO32T,CBEQ,1), EBD15BI_MASK, PPCVLE, 0, {CRS,B15}}, {"e_bso", EBD15BI(30,8,BO32T,CBSO,0), EBD15BI_MASK, PPCVLE, 0, {CRS,B15}}, {"e_bsol", EBD15BI(30,8,BO32T,CBSO,1), EBD15BI_MASK, PPCVLE, 0, {CRS,B15}}, {"e_bun", EBD15BI(30,8,BO32T,CBSO,0), EBD15BI_MASK, PPCVLE, 0, {CRS,B15}}, {"e_bunl", EBD15BI(30,8,BO32T,CBSO,1), EBD15BI_MASK, PPCVLE, 0, {CRS,B15}}, {"e_bns", EBD15BI(30,8,BO32F,CBSO,0), EBD15BI_MASK, PPCVLE, 0, {CRS,B15}}, {"e_bnsl", EBD15BI(30,8,BO32F,CBSO,1), EBD15BI_MASK, PPCVLE, 0, {CRS,B15}}, {"e_bnu", EBD15BI(30,8,BO32F,CBSO,0), EBD15BI_MASK, PPCVLE, 0, {CRS,B15}}, {"e_bnul", EBD15BI(30,8,BO32F,CBSO,1), EBD15BI_MASK, PPCVLE, 0, {CRS,B15}}, {"e_bc", BD15(30,8,0), BD15_MASK, PPCVLE, 0, {BO32, BI32, B15}}, {"e_bcl", BD15(30,8,1), BD15_MASK, PPCVLE, 0, {BO32, BI32, B15}}, {"e_bf", EBD15(30,8,BO32F,0), EBD15_MASK, PPCVLE, 0, {BI32,B15}}, {"e_bfl", EBD15(30,8,BO32F,1), EBD15_MASK, PPCVLE, 0, {BI32,B15}}, {"e_bt", EBD15(30,8,BO32T,0), EBD15_MASK, PPCVLE, 0, {BI32,B15}}, {"e_btl", EBD15(30,8,BO32T,1), EBD15_MASK, PPCVLE, 0, {BI32,B15}}, {"e_cmph", X(31,14), X_MASK, PPCVLE, 0, {CRD, RA, RB}}, {"e_cmphl", X(31,46), X_MASK, PPCVLE, 0, {CRD, RA, RB}}, {"e_crandc", XL(31,129), XL_MASK, PPCVLE, 0, {BT, BA, BB}}, {"e_crnand", XL(31,225), XL_MASK, PPCVLE, 0, {BT, BA, BB}}, {"e_crnot", XL(31,33), XL_MASK, PPCVLE, 0, {BT, BA, BBA}}, {"e_crnor", XL(31,33), XL_MASK, PPCVLE, 0, {BT, BA, BB}}, {"e_crclr", XL(31,193), XL_MASK, PPCVLE, 0, {BT, BAT, BBA}}, {"e_crxor", XL(31,193), XL_MASK, PPCVLE, 0, {BT, BA, BB}}, {"e_mcrf", XL(31,16), XL_MASK, PPCVLE, 0, {CRD, CR}}, {"e_slwi", EX(31,112), EX_MASK, PPCVLE, 0, {RA, RS, SH}}, {"e_slwi.", EX(31,113), EX_MASK, PPCVLE, 0, {RA, RS, SH}}, {"e_crand", XL(31,257), XL_MASK, PPCVLE, 0, {BT, BA, BB}}, {"e_rlw", EX(31,560), EX_MASK, PPCVLE, 0, {RA, RS, RB}}, {"e_rlw.", EX(31,561), EX_MASK, PPCVLE, 0, {RA, RS, RB}}, {"e_crset", XL(31,289), XL_MASK, PPCVLE, 0, {BT, BAT, BBA}}, {"e_creqv", XL(31,289), XL_MASK, PPCVLE, 0, {BT, BA, BB}}, {"e_rlwi", EX(31,624), EX_MASK, PPCVLE, 0, {RA, RS, SH}}, {"e_rlwi.", EX(31,625), EX_MASK, PPCVLE, 0, {RA, RS, SH}}, {"e_crorc", XL(31,417), XL_MASK, PPCVLE, 0, {BT, BA, BB}}, {"e_crmove", XL(31,449), XL_MASK, PPCVLE, 0, {BT, BA, BBA}}, {"e_cror", XL(31,449), XL_MASK, PPCVLE, 0, {BT, BA, BB}}, {"mtmas1", XSPR(31,467,625), XSPR_MASK, PPCVLE, 0, {RS}}, {"e_srwi", EX(31,1136), EX_MASK, PPCVLE, 0, {RA, RS, SH}}, {"e_srwi.", EX(31,1137), EX_MASK, PPCVLE, 0, {RA, RS, SH}}, {"se_lbz", SD4(8), SD4_MASK, PPCVLE, 0, {RZ, SE_SD, RX}}, {"se_stb", SD4(9), SD4_MASK, PPCVLE, 0, {RZ, SE_SD, RX}}, {"se_lhz", SD4(10), SD4_MASK, PPCVLE, 0, {RZ, SE_SDH, RX}}, {"se_sth", SD4(11), SD4_MASK, PPCVLE, 0, {RZ, SE_SDH, RX}}, {"se_lwz", SD4(12), SD4_MASK, PPCVLE, 0, {RZ, SE_SDW, RX}}, {"se_stw", SD4(13), SD4_MASK, PPCVLE, 0, {RZ, SE_SDW, RX}}, {"se_bge", EBD8IO(28,0,0), EBD8IO3_MASK, PPCVLE, 0, {B8}}, {"se_bnl", EBD8IO(28,0,0), EBD8IO3_MASK, PPCVLE, 0, {B8}}, {"se_ble", EBD8IO(28,0,1), EBD8IO3_MASK, PPCVLE, 0, {B8}}, {"se_bng", EBD8IO(28,0,1), EBD8IO3_MASK, PPCVLE, 0, {B8}}, {"se_bne", EBD8IO(28,0,2), EBD8IO3_MASK, PPCVLE, 0, {B8}}, {"se_bns", EBD8IO(28,0,3), EBD8IO3_MASK, PPCVLE, 0, {B8}}, {"se_bnu", EBD8IO(28,0,3), EBD8IO3_MASK, PPCVLE, 0, {B8}}, {"se_bf", EBD8IO(28,0,0), EBD8IO2_MASK, PPCVLE, 0, {BI16, B8}}, {"se_blt", EBD8IO(28,1,0), EBD8IO3_MASK, PPCVLE, 0, {B8}}, {"se_bgt", EBD8IO(28,1,1), EBD8IO3_MASK, PPCVLE, 0, {B8}}, {"se_beq", EBD8IO(28,1,2), EBD8IO3_MASK, PPCVLE, 0, {B8}}, {"se_bso", EBD8IO(28,1,3), EBD8IO3_MASK, PPCVLE, 0, {B8}}, {"se_bun", EBD8IO(28,1,3), EBD8IO3_MASK, PPCVLE, 0, {B8}}, {"se_bt", EBD8IO(28,1,0), EBD8IO2_MASK, PPCVLE, 0, {BI16, B8}}, {"se_bc", BD8IO(28), BD8IO_MASK, PPCVLE, 0, {BO16, BI16, B8}}, {"se_b", BD8(58,0,0), BD8_MASK, PPCVLE, 0, {B8}}, {"se_bl", BD8(58,0,1), BD8_MASK, PPCVLE, 0, {B8}}, }; const int vle_num_opcodes = sizeof (vle_opcodes) / sizeof (vle_opcodes[0]); /* The macro table. This is only used by the assembler. */ /* The expressions of the form (-x ! 31) & (x | 31) have the value 0 when x=0; 32-x when x is between 1 and 31; are negative if x is negative; and are 32 or more otherwise. This is what you want when, for instance, you are emulating a right shift by a rotate-left-and-mask, because the underlying instructions support shifts of size 0 but not shifts of size 32. By comparison, when extracting x bits from some word you want to use just 32-x, because the underlying instructions don't support extracting 0 bits but do support extracting the whole word (32 bits in this case). */ const struct powerpc_macro powerpc_macros[] = { {"extldi", 4, PPC64, "rldicr %0,%1,%3,(%2)-1"}, {"extldi.", 4, PPC64, "rldicr. %0,%1,%3,(%2)-1"}, {"extrdi", 4, PPC64, "rldicl %0,%1,((%2)+(%3))&((%2)+(%3)<>64),64-(%2)"}, {"extrdi.", 4, PPC64, "rldicl. %0,%1,((%2)+(%3))&((%2)+(%3)<>64),64-(%2)"}, {"insrdi", 4, PPC64, "rldimi %0,%1,64-((%2)+(%3)),%3"}, {"insrdi.", 4, PPC64, "rldimi. %0,%1,64-((%2)+(%3)),%3"}, {"rotrdi", 3, PPC64, "rldicl %0,%1,(-(%2)!63)&((%2)|63),0"}, {"rotrdi.", 3, PPC64, "rldicl. %0,%1,(-(%2)!63)&((%2)|63),0"}, {"sldi", 3, PPC64, "rldicr %0,%1,%2,63-(%2)"}, {"sldi.", 3, PPC64, "rldicr. %0,%1,%2,63-(%2)"}, {"srdi", 3, PPC64, "rldicl %0,%1,(-(%2)!63)&((%2)|63),%2"}, {"srdi.", 3, PPC64, "rldicl. %0,%1,(-(%2)!63)&((%2)|63),%2"}, {"clrrdi", 3, PPC64, "rldicr %0,%1,0,63-(%2)"}, {"clrrdi.", 3, PPC64, "rldicr. %0,%1,0,63-(%2)"}, {"clrlsldi", 4, PPC64, "rldic %0,%1,%3,(%2)-(%3)"}, {"clrlsldi.",4, PPC64, "rldic. %0,%1,%3,(%2)-(%3)"}, {"extlwi", 4, PPCCOM, "rlwinm %0,%1,%3,0,(%2)-1"}, {"extlwi.", 4, PPCCOM, "rlwinm. %0,%1,%3,0,(%2)-1"}, {"extrwi", 4, PPCCOM, "rlwinm %0,%1,((%2)+(%3))&((%2)+(%3)<>32),32-(%2),31"}, {"extrwi.", 4, PPCCOM, "rlwinm. %0,%1,((%2)+(%3))&((%2)+(%3)<>32),32-(%2),31"}, {"inslwi", 4, PPCCOM, "rlwimi %0,%1,(-(%3)!31)&((%3)|31),%3,(%2)+(%3)-1"}, {"inslwi.", 4, PPCCOM, "rlwimi. %0,%1,(-(%3)!31)&((%3)|31),%3,(%2)+(%3)-1"}, {"insrwi", 4, PPCCOM, "rlwimi %0,%1,32-((%2)+(%3)),%3,(%2)+(%3)-1"}, {"insrwi.", 4, PPCCOM, "rlwimi. %0,%1,32-((%2)+(%3)),%3,(%2)+(%3)-1"}, {"rotrwi", 3, PPCCOM, "rlwinm %0,%1,(-(%2)!31)&((%2)|31),0,31"}, {"rotrwi.", 3, PPCCOM, "rlwinm. %0,%1,(-(%2)!31)&((%2)|31),0,31"}, {"slwi", 3, PPCCOM, "rlwinm %0,%1,%2,0,31-(%2)"}, {"sli", 3, PWRCOM, "rlinm %0,%1,%2,0,31-(%2)"}, {"slwi.", 3, PPCCOM, "rlwinm. %0,%1,%2,0,31-(%2)"}, {"sli.", 3, PWRCOM, "rlinm. %0,%1,%2,0,31-(%2)"}, {"srwi", 3, PPCCOM, "rlwinm %0,%1,(-(%2)!31)&((%2)|31),%2,31"}, {"sri", 3, PWRCOM, "rlinm %0,%1,(-(%2)!31)&((%2)|31),%2,31"}, {"srwi.", 3, PPCCOM, "rlwinm. %0,%1,(-(%2)!31)&((%2)|31),%2,31"}, {"sri.", 3, PWRCOM, "rlinm. %0,%1,(-(%2)!31)&((%2)|31),%2,31"}, {"clrrwi", 3, PPCCOM, "rlwinm %0,%1,0,0,31-(%2)"}, {"clrrwi.", 3, PPCCOM, "rlwinm. %0,%1,0,0,31-(%2)"}, {"clrlslwi", 4, PPCCOM, "rlwinm %0,%1,%3,(%2)-(%3),31-(%3)"}, {"clrlslwi.",4, PPCCOM, "rlwinm. %0,%1,%3,(%2)-(%3),31-(%3)"}, {"e_extlwi", 4, PPCVLE, "e_rlwinm %0,%1,%3,0,(%2)-1"}, {"e_extrwi", 4, PPCVLE, "e_rlwinm %0,%1,((%2)+(%3))&((%2)+(%3)<>32),32-(%2),31"}, {"e_inslwi", 4, PPCVLE, "e_rlwimi %0,%1,(-(%3)!31)&((%3)|31),%3,(%2)+(%3)-1"}, {"e_insrwi", 4, PPCVLE, "e_rlwimi %0,%1,32-((%2)+(%3)),%3,(%2)+(%3)-1"}, {"e_rotlwi", 3, PPCVLE, "e_rlwinm %0,%1,%2,0,31"}, {"e_rotrwi", 3, PPCVLE, "e_rlwinm %0,%1,(-(%2)!31)&((%2)|31),0,31"}, {"e_slwi", 3, PPCVLE, "e_rlwinm %0,%1,%2,0,31-(%2)"}, {"e_srwi", 3, PPCVLE, "e_rlwinm %0,%1,(-(%2)!31)&((%2)|31),%2,31"}, {"e_clrlwi", 3, PPCVLE, "e_rlwinm %0,%1,0,%2,31"}, {"e_clrrwi", 3, PPCVLE, "e_rlwinm %0,%1,0,0,31-(%2)"}, {"e_clrlslwi",4, PPCVLE, "e_rlwinm %0,%1,%3,(%2)-(%3),31-(%3)"}, }; const int powerpc_num_macros = sizeof (powerpc_macros) / sizeof (powerpc_macros[0]);
linux-master
arch/powerpc/xmon/ppc-opc.c
// SPDX-License-Identifier: GPL-2.0-or-later /* SPU opcode list Copyright 2006 Free Software Foundation, Inc. This file is part of GDB, GAS, and the GNU binutils. */ #include <linux/kernel.h> #include <linux/bug.h> #include "spu.h" /* This file holds the Spu opcode table */ /* Example contents of spu-insn.h id_tag mode mode type opcode mnemonic asmtype dependency FPU L/S? branch? instruction QUAD WORD (0,RC,RB,RA,RT) latency APUOP(M_LQD, 1, 0, RI9, 0x1f8, "lqd", ASM_RI9IDX, 00012, FXU, 1, 0) Load Quadword d-form */ const struct spu_opcode spu_opcodes[] = { #define APUOP(TAG,MACFORMAT,OPCODE,MNEMONIC,ASMFORMAT,DEP,PIPE) \ { MACFORMAT, OPCODE, MNEMONIC, ASMFORMAT }, #define APUOPFB(TAG,MACFORMAT,OPCODE,FB,MNEMONIC,ASMFORMAT,DEP,PIPE) \ { MACFORMAT, OPCODE, MNEMONIC, ASMFORMAT }, #include "spu-insns.h" #undef APUOP #undef APUOPFB }; const int spu_num_opcodes = ARRAY_SIZE(spu_opcodes);
linux-master
arch/powerpc/xmon/spu-opc.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Routines providing a simple monitor for use on the PowerMac. * * Copyright (C) 1996-2005 Paul Mackerras. * Copyright (C) 2001 PPC64 Team, IBM Corp * Copyrignt (C) 2006 Michael Ellerman, IBM Corp */ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/sched/signal.h> #include <linux/smp.h> #include <linux/mm.h> #include <linux/reboot.h> #include <linux/delay.h> #include <linux/kallsyms.h> #include <linux/kmsg_dump.h> #include <linux/cpumask.h> #include <linux/export.h> #include <linux/sysrq.h> #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/bug.h> #include <linux/nmi.h> #include <linux/ctype.h> #include <linux/highmem.h> #include <linux/security.h> #include <linux/debugfs.h> #include <asm/ptrace.h> #include <asm/smp.h> #include <asm/string.h> #include <asm/machdep.h> #include <asm/xmon.h> #include <asm/processor.h> #include <asm/mmu.h> #include <asm/mmu_context.h> #include <asm/plpar_wrappers.h> #include <asm/cputable.h> #include <asm/rtas.h> #include <asm/sstep.h> #include <asm/irq_regs.h> #include <asm/spu.h> #include <asm/spu_priv1.h> #include <asm/setjmp.h> #include <asm/reg.h> #include <asm/debug.h> #include <asm/hw_breakpoint.h> #include <asm/xive.h> #include <asm/opal.h> #include <asm/firmware.h> #include <asm/code-patching.h> #include <asm/sections.h> #include <asm/inst.h> #include <asm/interrupt.h> #ifdef CONFIG_PPC64 #include <asm/hvcall.h> #include <asm/paca.h> #include <asm/lppaca.h> #endif #include "nonstdio.h" #include "dis-asm.h" #include "xmon_bpts.h" #ifdef CONFIG_SMP static cpumask_t cpus_in_xmon = CPU_MASK_NONE; static unsigned long xmon_taken = 1; static int xmon_owner; static int xmon_gate; static int xmon_batch; static unsigned long xmon_batch_start_cpu; static cpumask_t xmon_batch_cpus = CPU_MASK_NONE; #else #define xmon_owner 0 #endif /* CONFIG_SMP */ static unsigned long in_xmon __read_mostly = 0; static int xmon_on = IS_ENABLED(CONFIG_XMON_DEFAULT); static bool xmon_is_ro = IS_ENABLED(CONFIG_XMON_DEFAULT_RO_MODE); static unsigned long adrs; static int size = 1; #define MAX_DUMP (64 * 1024) static unsigned long ndump = 64; #define MAX_IDUMP (MAX_DUMP >> 2) static unsigned long nidump = 16; static unsigned long ncsum = 4096; static int termch; static char tmpstr[KSYM_NAME_LEN]; static int tracing_enabled; static long bus_error_jmp[JMP_BUF_LEN]; static int catch_memory_errors; static int catch_spr_faults; static long *xmon_fault_jmp[NR_CPUS]; /* Breakpoint stuff */ struct bpt { unsigned long address; u32 *instr; atomic_t ref_count; int enabled; unsigned long pad; }; /* Bits in bpt.enabled */ #define BP_CIABR 1 #define BP_TRAP 2 #define BP_DABR 4 static struct bpt bpts[NBPTS]; static struct bpt dabr[HBP_NUM_MAX]; static struct bpt *iabr; static unsigned int bpinstr = PPC_RAW_TRAP(); #define BP_NUM(bp) ((bp) - bpts + 1) /* Prototypes */ static int cmds(struct pt_regs *); static int mread(unsigned long, void *, int); static int mwrite(unsigned long, void *, int); static int mread_instr(unsigned long, ppc_inst_t *); static int handle_fault(struct pt_regs *); static void byterev(unsigned char *, int); static void memex(void); static int bsesc(void); static void dump(void); static void show_pte(unsigned long); static void prdump(unsigned long, long); static int ppc_inst_dump(unsigned long, long, int); static void dump_log_buf(void); #ifdef CONFIG_SMP static int xmon_switch_cpu(unsigned long); static int xmon_batch_next_cpu(void); static int batch_cmds(struct pt_regs *); #endif #ifdef CONFIG_PPC_POWERNV static void dump_opal_msglog(void); #else static inline void dump_opal_msglog(void) { printf("Machine is not running OPAL firmware.\n"); } #endif static void backtrace(struct pt_regs *); static void excprint(struct pt_regs *); static void prregs(struct pt_regs *); static void memops(int); static void memlocate(void); static void memzcan(void); static void memdiffs(unsigned char *, unsigned char *, unsigned, unsigned); int skipbl(void); int scanhex(unsigned long *valp); static void scannl(void); static int hexdigit(int); void getstring(char *, int); static void flush_input(void); static int inchar(void); static void take_input(char *); static int read_spr(int, unsigned long *); static void write_spr(int, unsigned long); static void super_regs(void); static void remove_bpts(void); static void insert_bpts(void); static void remove_cpu_bpts(void); static void insert_cpu_bpts(void); static struct bpt *at_breakpoint(unsigned long pc); static struct bpt *in_breakpoint_table(unsigned long pc, unsigned long *offp); static int do_step(struct pt_regs *); static void bpt_cmds(void); static void cacheflush(void); static int cpu_cmd(void); static void csum(void); static void bootcmds(void); static void proccall(void); static void show_tasks(void); void dump_segments(void); static void symbol_lookup(void); static void xmon_show_stack(unsigned long sp, unsigned long lr, unsigned long pc); static void xmon_print_symbol(unsigned long address, const char *mid, const char *after); static const char *getvecname(unsigned long vec); static int do_spu_cmd(void); #ifdef CONFIG_44x static void dump_tlb_44x(void); #endif #ifdef CONFIG_PPC_BOOK3E_64 static void dump_tlb_book3e(void); #endif static void clear_all_bpt(void); #ifdef CONFIG_PPC64 #define REG "%.16lx" #else #define REG "%.8lx" #endif #ifdef __LITTLE_ENDIAN__ #define GETWORD(v) (((v)[3] << 24) + ((v)[2] << 16) + ((v)[1] << 8) + (v)[0]) #else #define GETWORD(v) (((v)[0] << 24) + ((v)[1] << 16) + ((v)[2] << 8) + (v)[3]) #endif static const char *xmon_ro_msg = "Operation disabled: xmon in read-only mode\n"; static char *help_string = "\ Commands:\n\ b show breakpoints\n\ bd set data breakpoint\n\ bi set instruction breakpoint\n\ bc clear breakpoint\n" #ifdef CONFIG_SMP "\ c print cpus stopped in xmon\n\ c# try to switch to cpu number h (in hex)\n\ c# $ run command '$' (one of 'r','S' or 't') on all cpus in xmon\n" #endif "\ C checksum\n\ d dump bytes\n\ d1 dump 1 byte values\n\ d2 dump 2 byte values\n\ d4 dump 4 byte values\n\ d8 dump 8 byte values\n\ di dump instructions\n\ df dump float values\n\ dd dump double values\n\ dl dump the kernel log buffer\n" #ifdef CONFIG_PPC_POWERNV "\ do dump the OPAL message log\n" #endif #ifdef CONFIG_PPC64 "\ dp[#] dump paca for current cpu, or cpu #\n\ dpa dump paca for all possible cpus\n" #endif "\ dr dump stream of raw bytes\n\ dv dump virtual address translation \n\ dt dump the tracing buffers (uses printk)\n\ dtc dump the tracing buffers for current CPU (uses printk)\n\ " #ifdef CONFIG_PPC_POWERNV " dx# dump xive on CPU #\n\ dxi# dump xive irq state #\n\ dxa dump xive on all CPUs\n" #endif " e print exception information\n\ f flush cache\n\ la lookup symbol+offset of specified address\n\ ls lookup address of specified symbol\n\ lp s [#] lookup address of percpu symbol s for current cpu, or cpu #\n\ m examine/change memory\n\ mm move a block of memory\n\ ms set a block of memory\n\ md compare two blocks of memory\n\ ml locate a block of memory\n\ mz zero a block of memory\n\ mi show information about memory allocation\n\ p call a procedure\n\ P list processes/tasks\n\ r print registers\n\ s single step\n" #ifdef CONFIG_SPU_BASE " ss stop execution on all spus\n\ sr restore execution on stopped spus\n\ sf # dump spu fields for spu # (in hex)\n\ sd # dump spu local store for spu # (in hex)\n\ sdi # disassemble spu local store for spu # (in hex)\n" #endif " S print special registers\n\ Sa print all SPRs\n\ Sr # read SPR #\n\ Sw #v write v to SPR #\n\ t print backtrace\n\ x exit monitor and recover\n\ X exit monitor and don't recover\n" #if defined(CONFIG_PPC_BOOK3S_64) " u dump segment table or SLB\n" #elif defined(CONFIG_PPC_BOOK3S_32) " u dump segment registers\n" #elif defined(CONFIG_44x) || defined(CONFIG_PPC_BOOK3E_64) " u dump TLB\n" #endif " U show uptime information\n" " ? help\n" " # n limit output to n lines per page (for dp, dpa, dl)\n" " zr reboot\n" " zh halt\n" ; #ifdef CONFIG_SECURITY static bool xmon_is_locked_down(void) { static bool lockdown; if (!lockdown) { lockdown = !!security_locked_down(LOCKDOWN_XMON_RW); if (lockdown) { printf("xmon: Disabled due to kernel lockdown\n"); xmon_is_ro = true; } } if (!xmon_is_ro) { xmon_is_ro = !!security_locked_down(LOCKDOWN_XMON_WR); if (xmon_is_ro) printf("xmon: Read-only due to kernel lockdown\n"); } return lockdown; } #else /* CONFIG_SECURITY */ static inline bool xmon_is_locked_down(void) { return false; } #endif static struct pt_regs *xmon_regs; static inline void sync(void) { asm volatile("sync; isync"); } static inline void cflush(void *p) { asm volatile ("dcbf 0,%0; icbi 0,%0" : : "r" (p)); } static inline void cinval(void *p) { asm volatile ("dcbi 0,%0; icbi 0,%0" : : "r" (p)); } /** * write_ciabr() - write the CIABR SPR * @ciabr: The value to write. * * This function writes a value to the CIARB register either directly * through mtspr instruction if the kernel is in HV privilege mode or * call a hypervisor function to achieve the same in case the kernel * is in supervisor privilege mode. */ static void write_ciabr(unsigned long ciabr) { if (!cpu_has_feature(CPU_FTR_ARCH_207S)) return; if (cpu_has_feature(CPU_FTR_HVMODE)) { mtspr(SPRN_CIABR, ciabr); return; } plpar_set_ciabr(ciabr); } /** * set_ciabr() - set the CIABR * @addr: The value to set. * * This function sets the correct privilege value into the HW * breakpoint address before writing it up in the CIABR register. */ static void set_ciabr(unsigned long addr) { addr &= ~CIABR_PRIV; if (cpu_has_feature(CPU_FTR_HVMODE)) addr |= CIABR_PRIV_HYPER; else addr |= CIABR_PRIV_SUPER; write_ciabr(addr); } /* * Disable surveillance (the service processor watchdog function) * while we are in xmon. * XXX we should re-enable it when we leave. :) */ #define SURVEILLANCE_TOKEN 9000 static inline void disable_surveillance(void) { #ifdef CONFIG_PPC_PSERIES /* Since this can't be a module, args should end up below 4GB. */ static struct rtas_args args; const s32 token = rtas_function_token(RTAS_FN_SET_INDICATOR); /* * At this point we have got all the cpus we can into * xmon, so there is hopefully no other cpu calling RTAS * at the moment, even though we don't take rtas.lock. * If we did try to take rtas.lock there would be a * real possibility of deadlock. */ if (token == RTAS_UNKNOWN_SERVICE) return; rtas_call_unlocked(&args, token, 3, 1, NULL, SURVEILLANCE_TOKEN, 0, 0); #endif /* CONFIG_PPC_PSERIES */ } #ifdef CONFIG_SMP static int xmon_speaker; static void get_output_lock(void) { int me = smp_processor_id() + 0x100; int last_speaker = 0, prev; long timeout; if (xmon_speaker == me) return; for (;;) { last_speaker = cmpxchg(&xmon_speaker, 0, me); if (last_speaker == 0) return; /* * Wait a full second for the lock, we might be on a slow * console, but check every 100us. */ timeout = 10000; while (xmon_speaker == last_speaker) { if (--timeout > 0) { udelay(100); continue; } /* hostile takeover */ prev = cmpxchg(&xmon_speaker, last_speaker, me); if (prev == last_speaker) return; break; } } } static void release_output_lock(void) { xmon_speaker = 0; } int cpus_are_in_xmon(void) { return !cpumask_empty(&cpus_in_xmon); } static bool wait_for_other_cpus(int ncpus) { unsigned long timeout; /* We wait for 2s, which is a metric "little while" */ for (timeout = 20000; timeout != 0; --timeout) { if (cpumask_weight(&cpus_in_xmon) >= ncpus) return true; udelay(100); barrier(); } return false; } #else /* CONFIG_SMP */ static inline void get_output_lock(void) {} static inline void release_output_lock(void) {} #endif static void xmon_touch_watchdogs(void) { touch_softlockup_watchdog_sync(); rcu_cpu_stall_reset(); touch_nmi_watchdog(); } static int xmon_core(struct pt_regs *regs, volatile int fromipi) { volatile int cmd = 0; struct bpt *volatile bp; long recurse_jmp[JMP_BUF_LEN]; bool locked_down; unsigned long offset; unsigned long flags; #ifdef CONFIG_SMP int cpu; int secondary; #endif local_irq_save(flags); hard_irq_disable(); locked_down = xmon_is_locked_down(); if (!fromipi) { tracing_enabled = tracing_is_on(); tracing_off(); } bp = in_breakpoint_table(regs->nip, &offset); if (bp != NULL) { regs_set_return_ip(regs, bp->address + offset); atomic_dec(&bp->ref_count); } remove_cpu_bpts(); #ifdef CONFIG_SMP cpu = smp_processor_id(); if (cpumask_test_cpu(cpu, &cpus_in_xmon)) { /* * We catch SPR read/write faults here because the 0x700, 0xf60 * etc. handlers don't call debugger_fault_handler(). */ if (catch_spr_faults) longjmp(bus_error_jmp, 1); get_output_lock(); excprint(regs); printf("cpu 0x%x: Exception %lx %s in xmon, " "returning to main loop\n", cpu, regs->trap, getvecname(TRAP(regs))); release_output_lock(); longjmp(xmon_fault_jmp[cpu], 1); } if (setjmp(recurse_jmp) != 0) { if (!in_xmon || !xmon_gate) { get_output_lock(); printf("xmon: WARNING: bad recursive fault " "on cpu 0x%x\n", cpu); release_output_lock(); goto waiting; } secondary = !(xmon_taken && cpu == xmon_owner); goto cmdloop; } xmon_fault_jmp[cpu] = recurse_jmp; bp = NULL; if ((regs->msr & (MSR_IR|MSR_PR|MSR_64BIT)) == (MSR_IR|MSR_64BIT)) bp = at_breakpoint(regs->nip); if (bp || regs_is_unrecoverable(regs)) fromipi = 0; if (!fromipi) { get_output_lock(); if (!locked_down) excprint(regs); if (bp) { printf("cpu 0x%x stopped at breakpoint 0x%tx (", cpu, BP_NUM(bp)); xmon_print_symbol(regs->nip, " ", ")\n"); } if (regs_is_unrecoverable(regs)) printf("WARNING: exception is not recoverable, " "can't continue\n"); release_output_lock(); } cpumask_set_cpu(cpu, &cpus_in_xmon); waiting: secondary = 1; spin_begin(); while (secondary && !xmon_gate) { if (in_xmon == 0) { if (fromipi) { spin_end(); goto leave; } secondary = test_and_set_bit(0, &in_xmon); } spin_cpu_relax(); touch_nmi_watchdog(); } spin_end(); if (!secondary && !xmon_gate) { /* we are the first cpu to come in */ /* interrupt other cpu(s) */ int ncpus = num_online_cpus(); xmon_owner = cpu; mb(); if (ncpus > 1) { /* * A system reset (trap == 0x100) can be triggered on * all CPUs, so when we come in via 0x100 try waiting * for the other CPUs to come in before we send the * debugger break (IPI). This is similar to * crash_kexec_secondary(). */ if (TRAP(regs) != INTERRUPT_SYSTEM_RESET || !wait_for_other_cpus(ncpus)) smp_send_debugger_break(); wait_for_other_cpus(ncpus); } remove_bpts(); disable_surveillance(); if (!locked_down) { /* for breakpoint or single step, print curr insn */ if (bp || TRAP(regs) == INTERRUPT_TRACE) ppc_inst_dump(regs->nip, 1, 0); printf("enter ? for help\n"); } mb(); xmon_gate = 1; barrier(); touch_nmi_watchdog(); } cmdloop: while (in_xmon) { if (secondary) { spin_begin(); if (cpu == xmon_owner) { if (!test_and_set_bit(0, &xmon_taken)) { secondary = 0; spin_end(); continue; } /* missed it */ while (cpu == xmon_owner) spin_cpu_relax(); } spin_cpu_relax(); touch_nmi_watchdog(); } else { cmd = 1; #ifdef CONFIG_SMP if (xmon_batch) cmd = batch_cmds(regs); #endif if (!locked_down && cmd) cmd = cmds(regs); if (locked_down || cmd != 0) { /* exiting xmon */ insert_bpts(); xmon_gate = 0; wmb(); in_xmon = 0; break; } /* have switched to some other cpu */ secondary = 1; } } leave: cpumask_clear_cpu(cpu, &cpus_in_xmon); xmon_fault_jmp[cpu] = NULL; #else /* UP is simple... */ if (in_xmon) { printf("Exception %lx %s in xmon, returning to main loop\n", regs->trap, getvecname(TRAP(regs))); longjmp(xmon_fault_jmp[0], 1); } if (setjmp(recurse_jmp) == 0) { xmon_fault_jmp[0] = recurse_jmp; in_xmon = 1; excprint(regs); bp = at_breakpoint(regs->nip); if (bp) { printf("Stopped at breakpoint %tx (", BP_NUM(bp)); xmon_print_symbol(regs->nip, " ", ")\n"); } if (regs_is_unrecoverable(regs)) printf("WARNING: exception is not recoverable, " "can't continue\n"); remove_bpts(); disable_surveillance(); if (!locked_down) { /* for breakpoint or single step, print current insn */ if (bp || TRAP(regs) == INTERRUPT_TRACE) ppc_inst_dump(regs->nip, 1, 0); printf("enter ? for help\n"); } } if (!locked_down) cmd = cmds(regs); insert_bpts(); in_xmon = 0; #endif #ifdef CONFIG_BOOKE if (regs->msr & MSR_DE) { bp = at_breakpoint(regs->nip); if (bp != NULL) { regs_set_return_ip(regs, (unsigned long) &bp->instr[0]); atomic_inc(&bp->ref_count); } } #else if ((regs->msr & (MSR_IR|MSR_PR|MSR_64BIT)) == (MSR_IR|MSR_64BIT)) { bp = at_breakpoint(regs->nip); if (bp != NULL) { int stepped = emulate_step(regs, ppc_inst_read(bp->instr)); if (stepped == 0) { regs_set_return_ip(regs, (unsigned long) &bp->instr[0]); atomic_inc(&bp->ref_count); } else if (stepped < 0) { printf("Couldn't single-step %s instruction\n", IS_RFID(ppc_inst_read(bp->instr))? "rfid": "mtmsrd"); } } } #endif if (locked_down) clear_all_bpt(); else insert_cpu_bpts(); xmon_touch_watchdogs(); local_irq_restore(flags); return cmd != 'X' && cmd != EOF; } int xmon(struct pt_regs *excp) { struct pt_regs regs; if (excp == NULL) { ppc_save_regs(&regs); excp = &regs; } return xmon_core(excp, 0); } EXPORT_SYMBOL(xmon); irqreturn_t xmon_irq(int irq, void *d) { unsigned long flags; local_irq_save(flags); printf("Keyboard interrupt\n"); xmon(get_irq_regs()); local_irq_restore(flags); return IRQ_HANDLED; } static int xmon_bpt(struct pt_regs *regs) { struct bpt *bp; unsigned long offset; if ((regs->msr & (MSR_IR|MSR_PR|MSR_64BIT)) != (MSR_IR|MSR_64BIT)) return 0; /* Are we at the trap at bp->instr[1] for some bp? */ bp = in_breakpoint_table(regs->nip, &offset); if (bp != NULL && (offset == 4 || offset == 8)) { regs_set_return_ip(regs, bp->address + offset); atomic_dec(&bp->ref_count); return 1; } /* Are we at a breakpoint? */ bp = at_breakpoint(regs->nip); if (!bp) return 0; xmon_core(regs, 0); return 1; } static int xmon_sstep(struct pt_regs *regs) { if (user_mode(regs)) return 0; xmon_core(regs, 0); return 1; } static int xmon_break_match(struct pt_regs *regs) { int i; if ((regs->msr & (MSR_IR|MSR_PR|MSR_64BIT)) != (MSR_IR|MSR_64BIT)) return 0; for (i = 0; i < nr_wp_slots(); i++) { if (dabr[i].enabled) goto found; } return 0; found: xmon_core(regs, 0); return 1; } static int xmon_iabr_match(struct pt_regs *regs) { if ((regs->msr & (MSR_IR|MSR_PR|MSR_64BIT)) != (MSR_IR|MSR_64BIT)) return 0; if (iabr == NULL) return 0; xmon_core(regs, 0); return 1; } static int xmon_ipi(struct pt_regs *regs) { #ifdef CONFIG_SMP if (in_xmon && !cpumask_test_cpu(smp_processor_id(), &cpus_in_xmon)) xmon_core(regs, 1); #endif return 0; } static int xmon_fault_handler(struct pt_regs *regs) { struct bpt *bp; unsigned long offset; if (in_xmon && catch_memory_errors) handle_fault(regs); /* doesn't return */ if ((regs->msr & (MSR_IR|MSR_PR|MSR_64BIT)) == (MSR_IR|MSR_64BIT)) { bp = in_breakpoint_table(regs->nip, &offset); if (bp != NULL) { regs_set_return_ip(regs, bp->address + offset); atomic_dec(&bp->ref_count); } } return 0; } /* Force enable xmon if not already enabled */ static inline void force_enable_xmon(void) { /* Enable xmon hooks if needed */ if (!xmon_on) { printf("xmon: Enabling debugger hooks\n"); xmon_on = 1; } } static struct bpt *at_breakpoint(unsigned long pc) { int i; struct bpt *volatile bp; bp = bpts; for (i = 0; i < NBPTS; ++i, ++bp) if (bp->enabled && pc == bp->address) return bp; return NULL; } static struct bpt *in_breakpoint_table(unsigned long nip, unsigned long *offp) { unsigned long off; off = nip - (unsigned long)bpt_table; if (off >= sizeof(bpt_table)) return NULL; *offp = off & (BPT_SIZE - 1); if (off & 3) return NULL; return bpts + (off / BPT_SIZE); } static struct bpt *new_breakpoint(unsigned long a) { struct bpt *bp; a &= ~3UL; bp = at_breakpoint(a); if (bp) return bp; for (bp = bpts; bp < &bpts[NBPTS]; ++bp) { if (!bp->enabled && atomic_read(&bp->ref_count) == 0) { bp->address = a; bp->instr = (void *)(bpt_table + ((bp - bpts) * BPT_WORDS)); return bp; } } printf("Sorry, no free breakpoints. Please clear one first.\n"); return NULL; } static void insert_bpts(void) { int i; ppc_inst_t instr, instr2; struct bpt *bp, *bp2; bp = bpts; for (i = 0; i < NBPTS; ++i, ++bp) { if ((bp->enabled & (BP_TRAP|BP_CIABR)) == 0) continue; if (!mread_instr(bp->address, &instr)) { printf("Couldn't read instruction at %lx, " "disabling breakpoint there\n", bp->address); bp->enabled = 0; continue; } if (!can_single_step(ppc_inst_val(instr))) { printf("Breakpoint at %lx is on an instruction that can't be single stepped, disabling it\n", bp->address); bp->enabled = 0; continue; } /* * Check the address is not a suffix by looking for a prefix in * front of it. */ if (mread_instr(bp->address - 4, &instr2) == 8) { printf("Breakpoint at %lx is on the second word of a prefixed instruction, disabling it\n", bp->address); bp->enabled = 0; continue; } /* * We might still be a suffix - if the prefix has already been * replaced by a breakpoint we won't catch it with the above * test. */ bp2 = at_breakpoint(bp->address - 4); if (bp2 && ppc_inst_prefixed(ppc_inst_read(bp2->instr))) { printf("Breakpoint at %lx is on the second word of a prefixed instruction, disabling it\n", bp->address); bp->enabled = 0; continue; } patch_instruction(bp->instr, instr); patch_instruction(ppc_inst_next(bp->instr, bp->instr), ppc_inst(bpinstr)); if (bp->enabled & BP_CIABR) continue; if (patch_instruction((u32 *)bp->address, ppc_inst(bpinstr)) != 0) { printf("Couldn't write instruction at %lx, " "disabling breakpoint there\n", bp->address); bp->enabled &= ~BP_TRAP; continue; } } } static void insert_cpu_bpts(void) { int i; struct arch_hw_breakpoint brk; for (i = 0; i < nr_wp_slots(); i++) { if (dabr[i].enabled) { brk.address = dabr[i].address; brk.type = (dabr[i].enabled & HW_BRK_TYPE_DABR) | HW_BRK_TYPE_PRIV_ALL; brk.len = 8; brk.hw_len = 8; __set_breakpoint(i, &brk); } } if (iabr) set_ciabr(iabr->address); } static void remove_bpts(void) { int i; struct bpt *bp; ppc_inst_t instr; bp = bpts; for (i = 0; i < NBPTS; ++i, ++bp) { if ((bp->enabled & (BP_TRAP|BP_CIABR)) != BP_TRAP) continue; if (mread_instr(bp->address, &instr) && ppc_inst_equal(instr, ppc_inst(bpinstr)) && patch_instruction( (u32 *)bp->address, ppc_inst_read(bp->instr)) != 0) printf("Couldn't remove breakpoint at %lx\n", bp->address); } } static void remove_cpu_bpts(void) { hw_breakpoint_disable(); write_ciabr(0); } /* Based on uptime_proc_show(). */ static void show_uptime(void) { struct timespec64 uptime; if (setjmp(bus_error_jmp) == 0) { catch_memory_errors = 1; sync(); ktime_get_coarse_boottime_ts64(&uptime); printf("Uptime: %lu.%.2lu seconds\n", (unsigned long)uptime.tv_sec, ((unsigned long)uptime.tv_nsec / (NSEC_PER_SEC/100))); sync(); __delay(200); \ } catch_memory_errors = 0; } static void set_lpp_cmd(void) { unsigned long lpp; if (!scanhex(&lpp)) { printf("Invalid number.\n"); lpp = 0; } xmon_set_pagination_lpp(lpp); } /* Command interpreting routine */ static char *last_cmd; static int cmds(struct pt_regs *excp) { int cmd = 0; last_cmd = NULL; xmon_regs = excp; xmon_show_stack(excp->gpr[1], excp->link, excp->nip); for(;;) { #ifdef CONFIG_SMP printf("%x:", smp_processor_id()); #endif /* CONFIG_SMP */ printf("mon> "); flush_input(); termch = 0; cmd = skipbl(); if( cmd == '\n' ) { if (last_cmd == NULL) continue; take_input(last_cmd); last_cmd = NULL; cmd = inchar(); } switch (cmd) { case 'm': cmd = inchar(); switch (cmd) { case 'm': case 's': case 'd': memops(cmd); break; case 'l': memlocate(); break; case 'z': if (xmon_is_ro) { printf(xmon_ro_msg); break; } memzcan(); break; case 'i': show_mem(); break; default: termch = cmd; memex(); } break; case 'd': dump(); break; case 'l': symbol_lookup(); break; case 'r': prregs(excp); /* print regs */ break; case 'e': excprint(excp); break; case 'S': super_regs(); break; case 't': backtrace(excp); break; case 'f': cacheflush(); break; case 's': if (do_spu_cmd() == 0) break; if (do_step(excp)) return cmd; break; case 'x': case 'X': if (tracing_enabled) tracing_on(); return cmd; case EOF: printf(" <no input ...>\n"); mdelay(2000); return cmd; case '?': xmon_puts(help_string); break; case '#': set_lpp_cmd(); break; case 'b': bpt_cmds(); break; case 'C': csum(); break; case 'c': if (cpu_cmd()) return 0; break; case 'z': bootcmds(); break; case 'p': if (xmon_is_ro) { printf(xmon_ro_msg); break; } proccall(); break; case 'P': show_tasks(); break; #if defined(CONFIG_PPC_BOOK3S_32) || defined(CONFIG_PPC_64S_HASH_MMU) case 'u': dump_segments(); break; #elif defined(CONFIG_44x) case 'u': dump_tlb_44x(); break; #elif defined(CONFIG_PPC_BOOK3E_64) case 'u': dump_tlb_book3e(); break; #endif case 'U': show_uptime(); break; default: printf("Unrecognized command: "); do { if (' ' < cmd && cmd <= '~') putchar(cmd); else printf("\\x%x", cmd); cmd = inchar(); } while (cmd != '\n'); printf(" (type ? for help)\n"); break; } } } #ifdef CONFIG_BOOKE static int do_step(struct pt_regs *regs) { regs_set_return_msr(regs, regs->msr | MSR_DE); mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) | DBCR0_IC | DBCR0_IDM); return 1; } #else /* * Step a single instruction. * Some instructions we emulate, others we execute with MSR_SE set. */ static int do_step(struct pt_regs *regs) { ppc_inst_t instr; int stepped; force_enable_xmon(); /* check we are in 64-bit kernel mode, translation enabled */ if ((regs->msr & (MSR_64BIT|MSR_PR|MSR_IR)) == (MSR_64BIT|MSR_IR)) { if (mread_instr(regs->nip, &instr)) { stepped = emulate_step(regs, instr); if (stepped < 0) { printf("Couldn't single-step %s instruction\n", (IS_RFID(instr)? "rfid": "mtmsrd")); return 0; } if (stepped > 0) { set_trap(regs, 0xd00); printf("stepped to "); xmon_print_symbol(regs->nip, " ", "\n"); ppc_inst_dump(regs->nip, 1, 0); return 0; } } } regs_set_return_msr(regs, regs->msr | MSR_SE); return 1; } #endif static void bootcmds(void) { char tmp[64]; int cmd; cmd = inchar(); if (cmd == 'r') { getstring(tmp, 64); ppc_md.restart(tmp); } else if (cmd == 'h') { ppc_md.halt(); } else if (cmd == 'p') { do_kernel_power_off(); } } #ifdef CONFIG_SMP static int xmon_switch_cpu(unsigned long cpu) { int timeout; xmon_taken = 0; mb(); xmon_owner = cpu; timeout = 10000000; while (!xmon_taken) { if (--timeout == 0) { if (test_and_set_bit(0, &xmon_taken)) break; /* take control back */ mb(); xmon_owner = smp_processor_id(); printf("cpu 0x%lx didn't take control\n", cpu); return 0; } barrier(); } return 1; } static int xmon_batch_next_cpu(void) { unsigned long cpu; while (!cpumask_empty(&xmon_batch_cpus)) { cpu = cpumask_next_wrap(smp_processor_id(), &xmon_batch_cpus, xmon_batch_start_cpu, true); if (cpu >= nr_cpu_ids) break; if (xmon_batch_start_cpu == -1) xmon_batch_start_cpu = cpu; if (xmon_switch_cpu(cpu)) return 0; cpumask_clear_cpu(cpu, &xmon_batch_cpus); } xmon_batch = 0; printf("%x:mon> \n", smp_processor_id()); return 1; } static int batch_cmds(struct pt_regs *excp) { int cmd; /* simulate command entry */ cmd = xmon_batch; termch = '\n'; last_cmd = NULL; xmon_regs = excp; printf("%x:", smp_processor_id()); printf("mon> "); printf("%c\n", (char)cmd); switch (cmd) { case 'r': prregs(excp); /* print regs */ break; case 'S': super_regs(); break; case 't': backtrace(excp); break; } cpumask_clear_cpu(smp_processor_id(), &xmon_batch_cpus); return xmon_batch_next_cpu(); } static int cpu_cmd(void) { unsigned long cpu, first_cpu, last_cpu; cpu = skipbl(); if (cpu == '#') { xmon_batch = skipbl(); if (xmon_batch) { switch (xmon_batch) { case 'r': case 'S': case 't': cpumask_copy(&xmon_batch_cpus, &cpus_in_xmon); if (cpumask_weight(&xmon_batch_cpus) <= 1) { printf("There are no other cpus in xmon\n"); break; } xmon_batch_start_cpu = -1; if (!xmon_batch_next_cpu()) return 1; break; default: printf("c# only supports 'r', 'S' and 't' commands\n"); } xmon_batch = 0; return 0; } } termch = cpu; if (!scanhex(&cpu)) { /* print cpus waiting or in xmon */ printf("cpus stopped:"); last_cpu = first_cpu = NR_CPUS; for_each_possible_cpu(cpu) { if (cpumask_test_cpu(cpu, &cpus_in_xmon)) { if (cpu == last_cpu + 1) { last_cpu = cpu; } else { if (last_cpu != first_cpu) printf("-0x%lx", last_cpu); last_cpu = first_cpu = cpu; printf(" 0x%lx", cpu); } } } if (last_cpu != first_cpu) printf("-0x%lx", last_cpu); printf("\n"); return 0; } /* try to switch to cpu specified */ if (!cpumask_test_cpu(cpu, &cpus_in_xmon)) { printf("cpu 0x%lx isn't in xmon\n", cpu); #ifdef CONFIG_PPC64 printf("backtrace of paca[0x%lx].saved_r1 (possibly stale):\n", cpu); xmon_show_stack(paca_ptrs[cpu]->saved_r1, 0, 0); #endif return 0; } return xmon_switch_cpu(cpu); } #else static int cpu_cmd(void) { return 0; } #endif /* CONFIG_SMP */ static unsigned short fcstab[256] = { 0x0000, 0x1189, 0x2312, 0x329b, 0x4624, 0x57ad, 0x6536, 0x74bf, 0x8c48, 0x9dc1, 0xaf5a, 0xbed3, 0xca6c, 0xdbe5, 0xe97e, 0xf8f7, 0x1081, 0x0108, 0x3393, 0x221a, 0x56a5, 0x472c, 0x75b7, 0x643e, 0x9cc9, 0x8d40, 0xbfdb, 0xae52, 0xdaed, 0xcb64, 0xf9ff, 0xe876, 0x2102, 0x308b, 0x0210, 0x1399, 0x6726, 0x76af, 0x4434, 0x55bd, 0xad4a, 0xbcc3, 0x8e58, 0x9fd1, 0xeb6e, 0xfae7, 0xc87c, 0xd9f5, 0x3183, 0x200a, 0x1291, 0x0318, 0x77a7, 0x662e, 0x54b5, 0x453c, 0xbdcb, 0xac42, 0x9ed9, 0x8f50, 0xfbef, 0xea66, 0xd8fd, 0xc974, 0x4204, 0x538d, 0x6116, 0x709f, 0x0420, 0x15a9, 0x2732, 0x36bb, 0xce4c, 0xdfc5, 0xed5e, 0xfcd7, 0x8868, 0x99e1, 0xab7a, 0xbaf3, 0x5285, 0x430c, 0x7197, 0x601e, 0x14a1, 0x0528, 0x37b3, 0x263a, 0xdecd, 0xcf44, 0xfddf, 0xec56, 0x98e9, 0x8960, 0xbbfb, 0xaa72, 0x6306, 0x728f, 0x4014, 0x519d, 0x2522, 0x34ab, 0x0630, 0x17b9, 0xef4e, 0xfec7, 0xcc5c, 0xddd5, 0xa96a, 0xb8e3, 0x8a78, 0x9bf1, 0x7387, 0x620e, 0x5095, 0x411c, 0x35a3, 0x242a, 0x16b1, 0x0738, 0xffcf, 0xee46, 0xdcdd, 0xcd54, 0xb9eb, 0xa862, 0x9af9, 0x8b70, 0x8408, 0x9581, 0xa71a, 0xb693, 0xc22c, 0xd3a5, 0xe13e, 0xf0b7, 0x0840, 0x19c9, 0x2b52, 0x3adb, 0x4e64, 0x5fed, 0x6d76, 0x7cff, 0x9489, 0x8500, 0xb79b, 0xa612, 0xd2ad, 0xc324, 0xf1bf, 0xe036, 0x18c1, 0x0948, 0x3bd3, 0x2a5a, 0x5ee5, 0x4f6c, 0x7df7, 0x6c7e, 0xa50a, 0xb483, 0x8618, 0x9791, 0xe32e, 0xf2a7, 0xc03c, 0xd1b5, 0x2942, 0x38cb, 0x0a50, 0x1bd9, 0x6f66, 0x7eef, 0x4c74, 0x5dfd, 0xb58b, 0xa402, 0x9699, 0x8710, 0xf3af, 0xe226, 0xd0bd, 0xc134, 0x39c3, 0x284a, 0x1ad1, 0x0b58, 0x7fe7, 0x6e6e, 0x5cf5, 0x4d7c, 0xc60c, 0xd785, 0xe51e, 0xf497, 0x8028, 0x91a1, 0xa33a, 0xb2b3, 0x4a44, 0x5bcd, 0x6956, 0x78df, 0x0c60, 0x1de9, 0x2f72, 0x3efb, 0xd68d, 0xc704, 0xf59f, 0xe416, 0x90a9, 0x8120, 0xb3bb, 0xa232, 0x5ac5, 0x4b4c, 0x79d7, 0x685e, 0x1ce1, 0x0d68, 0x3ff3, 0x2e7a, 0xe70e, 0xf687, 0xc41c, 0xd595, 0xa12a, 0xb0a3, 0x8238, 0x93b1, 0x6b46, 0x7acf, 0x4854, 0x59dd, 0x2d62, 0x3ceb, 0x0e70, 0x1ff9, 0xf78f, 0xe606, 0xd49d, 0xc514, 0xb1ab, 0xa022, 0x92b9, 0x8330, 0x7bc7, 0x6a4e, 0x58d5, 0x495c, 0x3de3, 0x2c6a, 0x1ef1, 0x0f78 }; #define FCS(fcs, c) (((fcs) >> 8) ^ fcstab[((fcs) ^ (c)) & 0xff]) static void csum(void) { unsigned int i; unsigned short fcs; unsigned char v; if (!scanhex(&adrs)) return; if (!scanhex(&ncsum)) return; fcs = 0xffff; for (i = 0; i < ncsum; ++i) { if (mread(adrs+i, &v, 1) == 0) { printf("csum stopped at "REG"\n", adrs+i); break; } fcs = FCS(fcs, v); } printf("%x\n", fcs); } /* * Check if this is a suitable place to put a breakpoint. */ static long check_bp_loc(unsigned long addr) { ppc_inst_t instr; addr &= ~3; if (!is_kernel_addr(addr)) { printf("Breakpoints may only be placed at kernel addresses\n"); return 0; } if (!mread_instr(addr, &instr)) { printf("Can't read instruction at address %lx\n", addr); return 0; } if (!can_single_step(ppc_inst_val(instr))) { printf("Breakpoints may not be placed on instructions that can't be single stepped\n"); return 0; } return 1; } static int find_free_data_bpt(void) { int i; for (i = 0; i < nr_wp_slots(); i++) { if (!dabr[i].enabled) return i; } printf("Couldn't find free breakpoint register\n"); return -1; } static void print_data_bpts(void) { int i; for (i = 0; i < nr_wp_slots(); i++) { if (!dabr[i].enabled) continue; printf(" data "REG" [", dabr[i].address); if (dabr[i].enabled & 1) printf("r"); if (dabr[i].enabled & 2) printf("w"); printf("]\n"); } } static char *breakpoint_help_string = "Breakpoint command usage:\n" "b show breakpoints\n" "b <addr> [cnt] set breakpoint at given instr addr\n" "bc clear all breakpoints\n" "bc <n/addr> clear breakpoint number n or at addr\n" "bi <addr> [cnt] set hardware instr breakpoint (POWER8 only)\n" "bd <addr> [cnt] set hardware data breakpoint\n" ""; static void bpt_cmds(void) { int cmd; unsigned long a; int i; struct bpt *bp; cmd = inchar(); switch (cmd) { case 'd': { /* bd - hardware data breakpoint */ static const char badaddr[] = "Only kernel addresses are permitted for breakpoints\n"; int mode; if (xmon_is_ro) { printf(xmon_ro_msg); break; } if (!ppc_breakpoint_available()) { printf("Hardware data breakpoint not supported on this cpu\n"); break; } i = find_free_data_bpt(); if (i < 0) break; mode = 7; cmd = inchar(); if (cmd == 'r') mode = 5; else if (cmd == 'w') mode = 6; else termch = cmd; dabr[i].address = 0; dabr[i].enabled = 0; if (scanhex(&dabr[i].address)) { if (!is_kernel_addr(dabr[i].address)) { printf(badaddr); break; } dabr[i].address &= ~HW_BRK_TYPE_DABR; dabr[i].enabled = mode | BP_DABR; } force_enable_xmon(); break; } case 'i': /* bi - hardware instr breakpoint */ if (xmon_is_ro) { printf(xmon_ro_msg); break; } if (!cpu_has_feature(CPU_FTR_ARCH_207S)) { printf("Hardware instruction breakpoint " "not supported on this cpu\n"); break; } if (iabr) { iabr->enabled &= ~BP_CIABR; iabr = NULL; } if (!scanhex(&a)) break; if (!check_bp_loc(a)) break; bp = new_breakpoint(a); if (bp != NULL) { bp->enabled |= BP_CIABR; iabr = bp; force_enable_xmon(); } break; case 'c': if (!scanhex(&a)) { /* clear all breakpoints */ for (i = 0; i < NBPTS; ++i) bpts[i].enabled = 0; iabr = NULL; for (i = 0; i < nr_wp_slots(); i++) dabr[i].enabled = 0; printf("All breakpoints cleared\n"); break; } if (a <= NBPTS && a >= 1) { /* assume a breakpoint number */ bp = &bpts[a-1]; /* bp nums are 1 based */ } else { /* assume a breakpoint address */ bp = at_breakpoint(a); if (bp == NULL) { printf("No breakpoint at %lx\n", a); break; } } printf("Cleared breakpoint %tx (", BP_NUM(bp)); xmon_print_symbol(bp->address, " ", ")\n"); bp->enabled = 0; break; default: termch = cmd; cmd = skipbl(); if (cmd == '?') { printf(breakpoint_help_string); break; } termch = cmd; if (xmon_is_ro || !scanhex(&a)) { /* print all breakpoints */ printf(" type address\n"); print_data_bpts(); for (bp = bpts; bp < &bpts[NBPTS]; ++bp) { if (!bp->enabled) continue; printf("%tx %s ", BP_NUM(bp), (bp->enabled & BP_CIABR) ? "inst": "trap"); xmon_print_symbol(bp->address, " ", "\n"); } break; } if (!check_bp_loc(a)) break; bp = new_breakpoint(a); if (bp != NULL) { bp->enabled |= BP_TRAP; force_enable_xmon(); } break; } } /* Very cheap human name for vector lookup. */ static const char *getvecname(unsigned long vec) { char *ret; switch (vec) { case 0x100: ret = "(System Reset)"; break; case 0x200: ret = "(Machine Check)"; break; case 0x300: ret = "(Data Access)"; break; case 0x380: if (radix_enabled()) ret = "(Data Access Out of Range)"; else ret = "(Data SLB Access)"; break; case 0x400: ret = "(Instruction Access)"; break; case 0x480: if (radix_enabled()) ret = "(Instruction Access Out of Range)"; else ret = "(Instruction SLB Access)"; break; case 0x500: ret = "(Hardware Interrupt)"; break; case 0x600: ret = "(Alignment)"; break; case 0x700: ret = "(Program Check)"; break; case 0x800: ret = "(FPU Unavailable)"; break; case 0x900: ret = "(Decrementer)"; break; case 0x980: ret = "(Hypervisor Decrementer)"; break; case 0xa00: ret = "(Doorbell)"; break; case 0xc00: ret = "(System Call)"; break; case 0xd00: ret = "(Single Step)"; break; case 0xe40: ret = "(Emulation Assist)"; break; case 0xe60: ret = "(HMI)"; break; case 0xe80: ret = "(Hypervisor Doorbell)"; break; case 0xf00: ret = "(Performance Monitor)"; break; case 0xf20: ret = "(Altivec Unavailable)"; break; case 0x1300: ret = "(Instruction Breakpoint)"; break; case 0x1500: ret = "(Denormalisation)"; break; case 0x1700: ret = "(Altivec Assist)"; break; case 0x3000: ret = "(System Call Vectored)"; break; default: ret = ""; } return ret; } static void get_function_bounds(unsigned long pc, unsigned long *startp, unsigned long *endp) { unsigned long size, offset; const char *name; *startp = *endp = 0; if (pc == 0) return; if (setjmp(bus_error_jmp) == 0) { catch_memory_errors = 1; sync(); name = kallsyms_lookup(pc, &size, &offset, NULL, tmpstr); if (name != NULL) { *startp = pc - offset; *endp = pc - offset + size; } sync(); } catch_memory_errors = 0; } #define LRSAVE_OFFSET (STACK_FRAME_LR_SAVE * sizeof(unsigned long)) static void xmon_show_stack(unsigned long sp, unsigned long lr, unsigned long pc) { int max_to_print = 64; unsigned long ip; unsigned long newsp; unsigned long marker; struct pt_regs regs; while (max_to_print--) { if (!is_kernel_addr(sp)) { if (sp != 0) printf("SP (%lx) is in userspace\n", sp); break; } if (!mread(sp + LRSAVE_OFFSET, &ip, sizeof(unsigned long)) || !mread(sp, &newsp, sizeof(unsigned long))) { printf("Couldn't read stack frame at %lx\n", sp); break; } /* * For the first stack frame, try to work out if * LR and/or the saved LR value in the bottommost * stack frame are valid. */ if ((pc | lr) != 0) { unsigned long fnstart, fnend; unsigned long nextip; int printip = 1; get_function_bounds(pc, &fnstart, &fnend); nextip = 0; if (newsp > sp) mread(newsp + LRSAVE_OFFSET, &nextip, sizeof(unsigned long)); if (lr == ip) { if (!is_kernel_addr(lr) || (fnstart <= lr && lr < fnend)) printip = 0; } else if (lr == nextip) { printip = 0; } else if (is_kernel_addr(lr) && !(fnstart <= lr && lr < fnend)) { printf("[link register ] "); xmon_print_symbol(lr, " ", "\n"); } if (printip) { printf("["REG"] ", sp); xmon_print_symbol(ip, " ", " (unreliable)\n"); } pc = lr = 0; } else { printf("["REG"] ", sp); xmon_print_symbol(ip, " ", "\n"); } /* Look for "regs" marker to see if this is an exception frame. */ if (mread(sp + STACK_INT_FRAME_MARKER, &marker, sizeof(unsigned long)) && marker == STACK_FRAME_REGS_MARKER) { if (mread(sp + STACK_INT_FRAME_REGS, &regs, sizeof(regs)) != sizeof(regs)) { printf("Couldn't read registers at %lx\n", sp + STACK_INT_FRAME_REGS); break; } printf("--- Exception: %lx %s at ", regs.trap, getvecname(TRAP(&regs))); pc = regs.nip; lr = regs.link; xmon_print_symbol(pc, " ", "\n"); } if (newsp == 0) break; sp = newsp; } } static void backtrace(struct pt_regs *excp) { unsigned long sp; if (scanhex(&sp)) xmon_show_stack(sp, 0, 0); else xmon_show_stack(excp->gpr[1], excp->link, excp->nip); scannl(); } static void print_bug_trap(struct pt_regs *regs) { #ifdef CONFIG_BUG const struct bug_entry *bug; unsigned long addr; if (regs->msr & MSR_PR) return; /* not in kernel */ addr = regs->nip; /* address of trap instruction */ if (!is_kernel_addr(addr)) return; bug = find_bug(regs->nip); if (bug == NULL) return; if (is_warning_bug(bug)) return; #ifdef CONFIG_DEBUG_BUGVERBOSE printf("kernel BUG at %s:%u!\n", (char *)bug + bug->file_disp, bug->line); #else printf("kernel BUG at %px!\n", (void *)bug + bug->bug_addr_disp); #endif #endif /* CONFIG_BUG */ } static void excprint(struct pt_regs *fp) { unsigned long trap; #ifdef CONFIG_SMP printf("cpu 0x%x: ", smp_processor_id()); #endif /* CONFIG_SMP */ trap = TRAP(fp); printf("Vector: %lx %s at [%px]\n", fp->trap, getvecname(trap), fp); printf(" pc: "); xmon_print_symbol(fp->nip, ": ", "\n"); printf(" lr: "); xmon_print_symbol(fp->link, ": ", "\n"); printf(" sp: %lx\n", fp->gpr[1]); printf(" msr: %lx\n", fp->msr); if (trap == INTERRUPT_DATA_STORAGE || trap == INTERRUPT_DATA_SEGMENT || trap == INTERRUPT_ALIGNMENT || trap == INTERRUPT_MACHINE_CHECK) { printf(" dar: %lx\n", fp->dar); if (trap != INTERRUPT_DATA_SEGMENT) printf(" dsisr: %lx\n", fp->dsisr); } printf(" current = 0x%px\n", current); #ifdef CONFIG_PPC64 printf(" paca = 0x%px\t irqmask: 0x%02x\t irq_happened: 0x%02x\n", local_paca, local_paca->irq_soft_mask, local_paca->irq_happened); #endif if (current) { printf(" pid = %d, comm = %s\n", current->pid, current->comm); } if (trap == INTERRUPT_PROGRAM) print_bug_trap(fp); printf(linux_banner); } static void prregs(struct pt_regs *fp) { int n, trap; unsigned long base; struct pt_regs regs; if (scanhex(&base)) { if (setjmp(bus_error_jmp) == 0) { catch_memory_errors = 1; sync(); regs = *(struct pt_regs *)base; sync(); __delay(200); } else { catch_memory_errors = 0; printf("*** Error reading registers from "REG"\n", base); return; } catch_memory_errors = 0; fp = &regs; } #ifdef CONFIG_PPC64 #define R_PER_LINE 2 #else #define R_PER_LINE 4 #endif for (n = 0; n < 32; ++n) { printf("R%.2d = "REG"%s", n, fp->gpr[n], (n % R_PER_LINE) == R_PER_LINE - 1 ? "\n" : " "); } printf("pc = "); xmon_print_symbol(fp->nip, " ", "\n"); if (!trap_is_syscall(fp) && cpu_has_feature(CPU_FTR_CFAR)) { printf("cfar= "); xmon_print_symbol(fp->orig_gpr3, " ", "\n"); } printf("lr = "); xmon_print_symbol(fp->link, " ", "\n"); printf("msr = "REG" cr = %.8lx\n", fp->msr, fp->ccr); printf("ctr = "REG" xer = "REG" trap = %4lx\n", fp->ctr, fp->xer, fp->trap); trap = TRAP(fp); if (trap == INTERRUPT_DATA_STORAGE || trap == INTERRUPT_DATA_SEGMENT || trap == INTERRUPT_ALIGNMENT) printf("dar = "REG" dsisr = %.8lx\n", fp->dar, fp->dsisr); } static void cacheflush(void) { int cmd; unsigned long nflush; cmd = inchar(); if (cmd != 'i') termch = cmd; scanhex((void *)&adrs); if (termch != '\n') termch = 0; nflush = 1; scanhex(&nflush); nflush = (nflush + L1_CACHE_BYTES - 1) / L1_CACHE_BYTES; if (setjmp(bus_error_jmp) == 0) { catch_memory_errors = 1; sync(); if (cmd != 'i' || IS_ENABLED(CONFIG_PPC_BOOK3S_64)) { for (; nflush > 0; --nflush, adrs += L1_CACHE_BYTES) cflush((void *) adrs); } else { for (; nflush > 0; --nflush, adrs += L1_CACHE_BYTES) cinval((void *) adrs); } sync(); /* wait a little while to see if we get a machine check */ __delay(200); } catch_memory_errors = 0; } extern unsigned long xmon_mfspr(int spr, unsigned long default_value); extern void xmon_mtspr(int spr, unsigned long value); static int read_spr(int n, unsigned long *vp) { unsigned long ret = -1UL; int ok = 0; if (setjmp(bus_error_jmp) == 0) { catch_spr_faults = 1; sync(); ret = xmon_mfspr(n, *vp); sync(); *vp = ret; ok = 1; } catch_spr_faults = 0; return ok; } static void write_spr(int n, unsigned long val) { if (xmon_is_ro) { printf(xmon_ro_msg); return; } if (setjmp(bus_error_jmp) == 0) { catch_spr_faults = 1; sync(); xmon_mtspr(n, val); sync(); } else { printf("SPR 0x%03x (%4d) Faulted during write\n", n, n); } catch_spr_faults = 0; } static void dump_206_sprs(void) { #ifdef CONFIG_PPC64 if (!cpu_has_feature(CPU_FTR_ARCH_206)) return; /* Actually some of these pre-date 2.06, but whatever */ printf("srr0 = %.16lx srr1 = %.16lx dsisr = %.8lx\n", mfspr(SPRN_SRR0), mfspr(SPRN_SRR1), mfspr(SPRN_DSISR)); printf("dscr = %.16lx ppr = %.16lx pir = %.8lx\n", mfspr(SPRN_DSCR), mfspr(SPRN_PPR), mfspr(SPRN_PIR)); printf("amr = %.16lx uamor = %.16lx\n", mfspr(SPRN_AMR), mfspr(SPRN_UAMOR)); if (!(mfmsr() & MSR_HV)) return; printf("sdr1 = %.16lx hdar = %.16lx hdsisr = %.8lx\n", mfspr(SPRN_SDR1), mfspr(SPRN_HDAR), mfspr(SPRN_HDSISR)); printf("hsrr0 = %.16lx hsrr1 = %.16lx hdec = %.16lx\n", mfspr(SPRN_HSRR0), mfspr(SPRN_HSRR1), mfspr(SPRN_HDEC)); printf("lpcr = %.16lx pcr = %.16lx lpidr = %.8lx\n", mfspr(SPRN_LPCR), mfspr(SPRN_PCR), mfspr(SPRN_LPID)); printf("hsprg0 = %.16lx hsprg1 = %.16lx amor = %.16lx\n", mfspr(SPRN_HSPRG0), mfspr(SPRN_HSPRG1), mfspr(SPRN_AMOR)); printf("dabr = %.16lx dabrx = %.16lx\n", mfspr(SPRN_DABR), mfspr(SPRN_DABRX)); #endif } static void dump_207_sprs(void) { #ifdef CONFIG_PPC64 unsigned long msr; if (!cpu_has_feature(CPU_FTR_ARCH_207S)) return; printf("dpdes = %.16lx tir = %.16lx cir = %.8lx\n", mfspr(SPRN_DPDES), mfspr(SPRN_TIR), mfspr(SPRN_CIR)); printf("fscr = %.16lx tar = %.16lx pspb = %.8lx\n", mfspr(SPRN_FSCR), mfspr(SPRN_TAR), mfspr(SPRN_PSPB)); msr = mfmsr(); if (msr & MSR_TM) { /* Only if TM has been enabled in the kernel */ printf("tfhar = %.16lx tfiar = %.16lx texasr = %.16lx\n", mfspr(SPRN_TFHAR), mfspr(SPRN_TFIAR), mfspr(SPRN_TEXASR)); } printf("mmcr0 = %.16lx mmcr1 = %.16lx mmcr2 = %.16lx\n", mfspr(SPRN_MMCR0), mfspr(SPRN_MMCR1), mfspr(SPRN_MMCR2)); printf("pmc1 = %.8lx pmc2 = %.8lx pmc3 = %.8lx pmc4 = %.8lx\n", mfspr(SPRN_PMC1), mfspr(SPRN_PMC2), mfspr(SPRN_PMC3), mfspr(SPRN_PMC4)); printf("mmcra = %.16lx siar = %.16lx pmc5 = %.8lx\n", mfspr(SPRN_MMCRA), mfspr(SPRN_SIAR), mfspr(SPRN_PMC5)); printf("sdar = %.16lx sier = %.16lx pmc6 = %.8lx\n", mfspr(SPRN_SDAR), mfspr(SPRN_SIER), mfspr(SPRN_PMC6)); printf("ebbhr = %.16lx ebbrr = %.16lx bescr = %.16lx\n", mfspr(SPRN_EBBHR), mfspr(SPRN_EBBRR), mfspr(SPRN_BESCR)); printf("iamr = %.16lx\n", mfspr(SPRN_IAMR)); if (!(msr & MSR_HV)) return; printf("hfscr = %.16lx dhdes = %.16lx rpr = %.16lx\n", mfspr(SPRN_HFSCR), mfspr(SPRN_DHDES), mfspr(SPRN_RPR)); printf("dawr0 = %.16lx dawrx0 = %.16lx\n", mfspr(SPRN_DAWR0), mfspr(SPRN_DAWRX0)); if (nr_wp_slots() > 1) { printf("dawr1 = %.16lx dawrx1 = %.16lx\n", mfspr(SPRN_DAWR1), mfspr(SPRN_DAWRX1)); } printf("ciabr = %.16lx\n", mfspr(SPRN_CIABR)); #endif } static void dump_300_sprs(void) { #ifdef CONFIG_PPC64 bool hv = mfmsr() & MSR_HV; if (!cpu_has_feature(CPU_FTR_ARCH_300)) return; if (cpu_has_feature(CPU_FTR_P9_TIDR)) { printf("pidr = %.16lx tidr = %.16lx\n", mfspr(SPRN_PID), mfspr(SPRN_TIDR)); } else { printf("pidr = %.16lx\n", mfspr(SPRN_PID)); } printf("psscr = %.16lx\n", hv ? mfspr(SPRN_PSSCR) : mfspr(SPRN_PSSCR_PR)); if (!hv) return; printf("ptcr = %.16lx asdr = %.16lx\n", mfspr(SPRN_PTCR), mfspr(SPRN_ASDR)); #endif } static void dump_310_sprs(void) { #ifdef CONFIG_PPC64 if (!cpu_has_feature(CPU_FTR_ARCH_31)) return; printf("mmcr3 = %.16lx, sier2 = %.16lx, sier3 = %.16lx\n", mfspr(SPRN_MMCR3), mfspr(SPRN_SIER2), mfspr(SPRN_SIER3)); #endif } static void dump_one_spr(int spr, bool show_unimplemented) { unsigned long val; val = 0xdeadbeef; if (!read_spr(spr, &val)) { printf("SPR 0x%03x (%4d) Faulted during read\n", spr, spr); return; } if (val == 0xdeadbeef) { /* Looks like read was a nop, confirm */ val = 0x0badcafe; if (!read_spr(spr, &val)) { printf("SPR 0x%03x (%4d) Faulted during read\n", spr, spr); return; } if (val == 0x0badcafe) { if (show_unimplemented) printf("SPR 0x%03x (%4d) Unimplemented\n", spr, spr); return; } } printf("SPR 0x%03x (%4d) = 0x%lx\n", spr, spr, val); } static void super_regs(void) { static unsigned long regno; int cmd; int spr; cmd = skipbl(); switch (cmd) { case '\n': { unsigned long sp, toc; asm("mr %0,1" : "=r" (sp) :); asm("mr %0,2" : "=r" (toc) :); printf("msr = "REG" sprg0 = "REG"\n", mfmsr(), mfspr(SPRN_SPRG0)); printf("pvr = "REG" sprg1 = "REG"\n", mfspr(SPRN_PVR), mfspr(SPRN_SPRG1)); printf("dec = "REG" sprg2 = "REG"\n", mfspr(SPRN_DEC), mfspr(SPRN_SPRG2)); printf("sp = "REG" sprg3 = "REG"\n", sp, mfspr(SPRN_SPRG3)); printf("toc = "REG" dar = "REG"\n", toc, mfspr(SPRN_DAR)); dump_206_sprs(); dump_207_sprs(); dump_300_sprs(); dump_310_sprs(); return; } case 'w': { unsigned long val; scanhex(&regno); val = 0; read_spr(regno, &val); scanhex(&val); write_spr(regno, val); dump_one_spr(regno, true); break; } case 'r': scanhex(&regno); dump_one_spr(regno, true); break; case 'a': /* dump ALL SPRs */ for (spr = 1; spr < 1024; ++spr) dump_one_spr(spr, false); break; } scannl(); } /* * Stuff for reading and writing memory safely */ static int mread(unsigned long adrs, void *buf, int size) { volatile int n; char *p, *q; n = 0; if (setjmp(bus_error_jmp) == 0) { catch_memory_errors = 1; sync(); p = (char *)adrs; q = (char *)buf; switch (size) { case 2: *(u16 *)q = *(u16 *)p; break; case 4: *(u32 *)q = *(u32 *)p; break; case 8: *(u64 *)q = *(u64 *)p; break; default: for( ; n < size; ++n) { *q++ = *p++; sync(); } } sync(); /* wait a little while to see if we get a machine check */ __delay(200); n = size; } catch_memory_errors = 0; return n; } static int mwrite(unsigned long adrs, void *buf, int size) { volatile int n; char *p, *q; n = 0; if (xmon_is_ro) { printf(xmon_ro_msg); return n; } if (setjmp(bus_error_jmp) == 0) { catch_memory_errors = 1; sync(); p = (char *) adrs; q = (char *) buf; switch (size) { case 2: *(u16 *)p = *(u16 *)q; break; case 4: *(u32 *)p = *(u32 *)q; break; case 8: *(u64 *)p = *(u64 *)q; break; default: for ( ; n < size; ++n) { *p++ = *q++; sync(); } } sync(); /* wait a little while to see if we get a machine check */ __delay(200); n = size; } else { printf("*** Error writing address "REG"\n", adrs + n); } catch_memory_errors = 0; return n; } static int mread_instr(unsigned long adrs, ppc_inst_t *instr) { volatile int n; n = 0; if (setjmp(bus_error_jmp) == 0) { catch_memory_errors = 1; sync(); *instr = ppc_inst_read((u32 *)adrs); sync(); /* wait a little while to see if we get a machine check */ __delay(200); n = ppc_inst_len(*instr); } catch_memory_errors = 0; return n; } static int fault_type; static int fault_except; static char *fault_chars[] = { "--", "**", "##" }; static int handle_fault(struct pt_regs *regs) { fault_except = TRAP(regs); switch (TRAP(regs)) { case 0x200: fault_type = 0; break; case 0x300: case 0x380: fault_type = 1; break; default: fault_type = 2; } longjmp(bus_error_jmp, 1); return 0; } #define SWAP(a, b, t) ((t) = (a), (a) = (b), (b) = (t)) static void byterev(unsigned char *val, int size) { int t; switch (size) { case 2: SWAP(val[0], val[1], t); break; case 4: SWAP(val[0], val[3], t); SWAP(val[1], val[2], t); break; case 8: /* is there really any use for this? */ SWAP(val[0], val[7], t); SWAP(val[1], val[6], t); SWAP(val[2], val[5], t); SWAP(val[3], val[4], t); break; } } static int brev; static int mnoread; static char *memex_help_string = "Memory examine command usage:\n" "m [addr] [flags] examine/change memory\n" " addr is optional. will start where left off.\n" " flags may include chars from this set:\n" " b modify by bytes (default)\n" " w modify by words (2 byte)\n" " l modify by longs (4 byte)\n" " d modify by doubleword (8 byte)\n" " r toggle reverse byte order mode\n" " n do not read memory (for i/o spaces)\n" " . ok to read (default)\n" "NOTE: flags are saved as defaults\n" ""; static char *memex_subcmd_help_string = "Memory examine subcommands:\n" " hexval write this val to current location\n" " 'string' write chars from string to this location\n" " ' increment address\n" " ^ decrement address\n" " / increment addr by 0x10. //=0x100, ///=0x1000, etc\n" " \\ decrement addr by 0x10. \\\\=0x100, \\\\\\=0x1000, etc\n" " ` clear no-read flag\n" " ; stay at this addr\n" " v change to byte mode\n" " w change to word (2 byte) mode\n" " l change to long (4 byte) mode\n" " u change to doubleword (8 byte) mode\n" " m addr change current addr\n" " n toggle no-read flag\n" " r toggle byte reverse flag\n" " < count back up count bytes\n" " > count skip forward count bytes\n" " x exit this mode\n" ""; static void memex(void) { int cmd, inc, i, nslash; unsigned long n; unsigned char val[16]; scanhex((void *)&adrs); cmd = skipbl(); if (cmd == '?') { printf(memex_help_string); return; } else { termch = cmd; } last_cmd = "m\n"; while ((cmd = skipbl()) != '\n') { switch( cmd ){ case 'b': size = 1; break; case 'w': size = 2; break; case 'l': size = 4; break; case 'd': size = 8; break; case 'r': brev = !brev; break; case 'n': mnoread = 1; break; case '.': mnoread = 0; break; } } if( size <= 0 ) size = 1; else if( size > 8 ) size = 8; for(;;){ if (!mnoread) n = mread(adrs, val, size); printf(REG"%c", adrs, brev? 'r': ' '); if (!mnoread) { if (brev) byterev(val, size); putchar(' '); for (i = 0; i < n; ++i) printf("%.2x", val[i]); for (; i < size; ++i) printf("%s", fault_chars[fault_type]); } putchar(' '); inc = size; nslash = 0; for(;;){ if( scanhex(&n) ){ for (i = 0; i < size; ++i) val[i] = n >> (i * 8); if (!brev) byterev(val, size); mwrite(adrs, val, size); inc = size; } cmd = skipbl(); if (cmd == '\n') break; inc = 0; switch (cmd) { case '\'': for(;;){ n = inchar(); if( n == '\\' ) n = bsesc(); else if( n == '\'' ) break; for (i = 0; i < size; ++i) val[i] = n >> (i * 8); if (!brev) byterev(val, size); mwrite(adrs, val, size); adrs += size; } adrs -= size; inc = size; break; case ',': adrs += size; break; case '.': mnoread = 0; break; case ';': break; case 'x': case EOF: scannl(); return; case 'b': case 'v': size = 1; break; case 'w': size = 2; break; case 'l': size = 4; break; case 'u': size = 8; break; case '^': adrs -= size; break; case '/': if (nslash > 0) adrs -= 1 << nslash; else nslash = 0; nslash += 4; adrs += 1 << nslash; break; case '\\': if (nslash < 0) adrs += 1 << -nslash; else nslash = 0; nslash -= 4; adrs -= 1 << -nslash; break; case 'm': scanhex((void *)&adrs); break; case 'n': mnoread = 1; break; case 'r': brev = !brev; break; case '<': n = size; scanhex(&n); adrs -= n; break; case '>': n = size; scanhex(&n); adrs += n; break; case '?': printf(memex_subcmd_help_string); break; } } adrs += inc; } } static int bsesc(void) { int c; c = inchar(); switch( c ){ case 'n': c = '\n'; break; case 'r': c = '\r'; break; case 'b': c = '\b'; break; case 't': c = '\t'; break; } return c; } static void xmon_rawdump (unsigned long adrs, long ndump) { long n, m, r, nr; unsigned char temp[16]; for (n = ndump; n > 0;) { r = n < 16? n: 16; nr = mread(adrs, temp, r); adrs += nr; for (m = 0; m < r; ++m) { if (m < nr) printf("%.2x", temp[m]); else printf("%s", fault_chars[fault_type]); } n -= r; if (nr < r) break; } printf("\n"); } static void dump_tracing(void) { int c; c = inchar(); if (c == 'c') ftrace_dump(DUMP_ORIG); else ftrace_dump(DUMP_ALL); } #ifdef CONFIG_PPC64 static void dump_one_paca(int cpu) { struct paca_struct *p; #ifdef CONFIG_PPC_64S_HASH_MMU int i = 0; #endif if (setjmp(bus_error_jmp) != 0) { printf("*** Error dumping paca for cpu 0x%x!\n", cpu); return; } catch_memory_errors = 1; sync(); p = paca_ptrs[cpu]; printf("paca for cpu 0x%x @ %px:\n", cpu, p); printf(" %-*s = %s\n", 25, "possible", cpu_possible(cpu) ? "yes" : "no"); printf(" %-*s = %s\n", 25, "present", cpu_present(cpu) ? "yes" : "no"); printf(" %-*s = %s\n", 25, "online", cpu_online(cpu) ? "yes" : "no"); #define DUMP(paca, name, format) \ printf(" %-*s = "format"\t(0x%lx)\n", 25, #name, 18, paca->name, \ offsetof(struct paca_struct, name)); DUMP(p, lock_token, "%#-*x"); DUMP(p, paca_index, "%#-*x"); #ifndef CONFIG_PPC_KERNEL_PCREL DUMP(p, kernel_toc, "%#-*llx"); #endif DUMP(p, kernelbase, "%#-*llx"); DUMP(p, kernel_msr, "%#-*llx"); DUMP(p, emergency_sp, "%-*px"); #ifdef CONFIG_PPC_BOOK3S_64 DUMP(p, nmi_emergency_sp, "%-*px"); DUMP(p, mc_emergency_sp, "%-*px"); DUMP(p, in_nmi, "%#-*x"); DUMP(p, in_mce, "%#-*x"); DUMP(p, hmi_event_available, "%#-*x"); #endif DUMP(p, data_offset, "%#-*llx"); DUMP(p, hw_cpu_id, "%#-*x"); DUMP(p, cpu_start, "%#-*x"); DUMP(p, kexec_state, "%#-*x"); #ifdef CONFIG_PPC_BOOK3S_64 #ifdef CONFIG_PPC_64S_HASH_MMU if (!early_radix_enabled()) { for (i = 0; i < SLB_NUM_BOLTED; i++) { u64 esid, vsid; if (!p->slb_shadow_ptr) continue; esid = be64_to_cpu(p->slb_shadow_ptr->save_area[i].esid); vsid = be64_to_cpu(p->slb_shadow_ptr->save_area[i].vsid); if (esid || vsid) { printf(" %-*s[%d] = 0x%016llx 0x%016llx\n", 22, "slb_shadow", i, esid, vsid); } } DUMP(p, vmalloc_sllp, "%#-*x"); DUMP(p, stab_rr, "%#-*x"); DUMP(p, slb_used_bitmap, "%#-*x"); DUMP(p, slb_kern_bitmap, "%#-*x"); if (!early_cpu_has_feature(CPU_FTR_ARCH_300)) { DUMP(p, slb_cache_ptr, "%#-*x"); for (i = 0; i < SLB_CACHE_ENTRIES; i++) printf(" %-*s[%d] = 0x%016x\n", 22, "slb_cache", i, p->slb_cache[i]); } } #endif DUMP(p, rfi_flush_fallback_area, "%-*px"); #endif DUMP(p, dscr_default, "%#-*llx"); #ifdef CONFIG_PPC_BOOK3E_64 DUMP(p, pgd, "%-*px"); DUMP(p, kernel_pgd, "%-*px"); DUMP(p, tcd_ptr, "%-*px"); DUMP(p, mc_kstack, "%-*px"); DUMP(p, crit_kstack, "%-*px"); DUMP(p, dbg_kstack, "%-*px"); #endif DUMP(p, __current, "%-*px"); DUMP(p, kstack, "%#-*llx"); printf(" %-*s = 0x%016llx\n", 25, "kstack_base", p->kstack & ~(THREAD_SIZE - 1)); #ifdef CONFIG_STACKPROTECTOR DUMP(p, canary, "%#-*lx"); #endif DUMP(p, saved_r1, "%#-*llx"); #ifdef CONFIG_PPC_BOOK3E_64 DUMP(p, trap_save, "%#-*x"); #endif DUMP(p, irq_soft_mask, "%#-*x"); DUMP(p, irq_happened, "%#-*x"); #ifdef CONFIG_MMIOWB DUMP(p, mmiowb_state.nesting_count, "%#-*x"); DUMP(p, mmiowb_state.mmiowb_pending, "%#-*x"); #endif DUMP(p, irq_work_pending, "%#-*x"); DUMP(p, sprg_vdso, "%#-*llx"); #ifdef CONFIG_PPC_TRANSACTIONAL_MEM DUMP(p, tm_scratch, "%#-*llx"); #endif #ifdef CONFIG_PPC_POWERNV DUMP(p, idle_state, "%#-*lx"); if (!early_cpu_has_feature(CPU_FTR_ARCH_300)) { DUMP(p, thread_idle_state, "%#-*x"); DUMP(p, subcore_sibling_mask, "%#-*x"); } else { #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE DUMP(p, requested_psscr, "%#-*llx"); DUMP(p, dont_stop.counter, "%#-*x"); #endif } #endif DUMP(p, accounting.utime, "%#-*lx"); DUMP(p, accounting.stime, "%#-*lx"); #ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME DUMP(p, accounting.utime_scaled, "%#-*lx"); #endif DUMP(p, accounting.starttime, "%#-*lx"); DUMP(p, accounting.starttime_user, "%#-*lx"); #ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME DUMP(p, accounting.startspurr, "%#-*lx"); DUMP(p, accounting.utime_sspurr, "%#-*lx"); #endif DUMP(p, accounting.steal_time, "%#-*lx"); #undef DUMP catch_memory_errors = 0; sync(); } static void dump_all_pacas(void) { int cpu; if (num_possible_cpus() == 0) { printf("No possible cpus, use 'dp #' to dump individual cpus\n"); return; } for_each_possible_cpu(cpu) dump_one_paca(cpu); } static void dump_pacas(void) { unsigned long num; int c; c = inchar(); if (c == 'a') { dump_all_pacas(); return; } termch = c; /* Put c back, it wasn't 'a' */ if (scanhex(&num)) dump_one_paca(num); else dump_one_paca(xmon_owner); } #endif #ifdef CONFIG_PPC_POWERNV static void dump_one_xive(int cpu) { unsigned int hwid = get_hard_smp_processor_id(cpu); bool hv = cpu_has_feature(CPU_FTR_HVMODE); if (hv) { opal_xive_dump(XIVE_DUMP_TM_HYP, hwid); opal_xive_dump(XIVE_DUMP_TM_POOL, hwid); opal_xive_dump(XIVE_DUMP_TM_OS, hwid); opal_xive_dump(XIVE_DUMP_TM_USER, hwid); opal_xive_dump(XIVE_DUMP_VP, hwid); opal_xive_dump(XIVE_DUMP_EMU_STATE, hwid); } if (setjmp(bus_error_jmp) != 0) { catch_memory_errors = 0; printf("*** Error dumping xive on cpu %d\n", cpu); return; } catch_memory_errors = 1; sync(); xmon_xive_do_dump(cpu); sync(); __delay(200); catch_memory_errors = 0; } static void dump_all_xives(void) { int cpu; if (num_online_cpus() == 0) { printf("No possible cpus, use 'dx #' to dump individual cpus\n"); return; } for_each_online_cpu(cpu) dump_one_xive(cpu); } static void dump_xives(void) { unsigned long num; int c; if (!xive_enabled()) { printf("Xive disabled on this system\n"); return; } c = inchar(); if (c == 'a') { dump_all_xives(); return; } else if (c == 'i') { if (scanhex(&num)) xmon_xive_get_irq_config(num, NULL); else xmon_xive_get_irq_all(); return; } termch = c; /* Put c back, it wasn't 'a' */ if (scanhex(&num)) dump_one_xive(num); else dump_one_xive(xmon_owner); } #endif /* CONFIG_PPC_POWERNV */ static void dump_by_size(unsigned long addr, long count, int size) { unsigned char temp[16]; int i, j; u64 val; count = ALIGN(count, 16); for (i = 0; i < count; i += 16, addr += 16) { printf(REG, addr); if (mread(addr, temp, 16) != 16) { printf("\nFaulted reading %d bytes from 0x"REG"\n", 16, addr); return; } for (j = 0; j < 16; j += size) { putchar(' '); switch (size) { case 1: val = temp[j]; break; case 2: val = *(u16 *)&temp[j]; break; case 4: val = *(u32 *)&temp[j]; break; case 8: val = *(u64 *)&temp[j]; break; default: val = 0; } printf("%0*llx", size * 2, val); } printf(" |"); for (j = 0; j < 16; ++j) { val = temp[j]; putchar(' ' <= val && val <= '~' ? val : '.'); } printf("|\n"); } } static void dump(void) { static char last[] = { "d?\n" }; int c; c = inchar(); #ifdef CONFIG_PPC64 if (c == 'p') { xmon_start_pagination(); dump_pacas(); xmon_end_pagination(); return; } #endif #ifdef CONFIG_PPC_POWERNV if (c == 'x') { xmon_start_pagination(); dump_xives(); xmon_end_pagination(); return; } #endif if (c == 't') { dump_tracing(); return; } if (c == '\n') termch = c; scanhex((void *)&adrs); if (termch != '\n') termch = 0; if (c == 'i') { scanhex(&nidump); if (nidump == 0) nidump = 16; else if (nidump > MAX_IDUMP) nidump = MAX_IDUMP; adrs += ppc_inst_dump(adrs, nidump, 1); last_cmd = "di\n"; } else if (c == 'l') { dump_log_buf(); } else if (c == 'o') { dump_opal_msglog(); } else if (c == 'v') { /* dump virtual to physical translation */ show_pte(adrs); } else if (c == 'r') { scanhex(&ndump); if (ndump == 0) ndump = 64; xmon_rawdump(adrs, ndump); adrs += ndump; last_cmd = "dr\n"; } else { scanhex(&ndump); if (ndump == 0) ndump = 64; else if (ndump > MAX_DUMP) ndump = MAX_DUMP; switch (c) { case '8': case '4': case '2': case '1': ndump = ALIGN(ndump, 16); dump_by_size(adrs, ndump, c - '0'); last[1] = c; last_cmd = last; break; default: prdump(adrs, ndump); last_cmd = "d\n"; } adrs += ndump; } } static void prdump(unsigned long adrs, long ndump) { long n, m, c, r, nr; unsigned char temp[16]; for (n = ndump; n > 0;) { printf(REG, adrs); putchar(' '); r = n < 16? n: 16; nr = mread(adrs, temp, r); adrs += nr; for (m = 0; m < r; ++m) { if ((m & (sizeof(long) - 1)) == 0 && m > 0) putchar(' '); if (m < nr) printf("%.2x", temp[m]); else printf("%s", fault_chars[fault_type]); } for (; m < 16; ++m) { if ((m & (sizeof(long) - 1)) == 0) putchar(' '); printf(" "); } printf(" |"); for (m = 0; m < r; ++m) { if (m < nr) { c = temp[m]; putchar(' ' <= c && c <= '~'? c: '.'); } else putchar(' '); } n -= r; for (; m < 16; ++m) putchar(' '); printf("|\n"); if (nr < r) break; } } typedef int (*instruction_dump_func)(unsigned long inst, unsigned long addr); static int generic_inst_dump(unsigned long adr, long count, int praddr, instruction_dump_func dump_func) { int nr, dotted; unsigned long first_adr; ppc_inst_t inst, last_inst = ppc_inst(0); dotted = 0; for (first_adr = adr; count > 0; --count, adr += ppc_inst_len(inst)) { nr = mread_instr(adr, &inst); if (nr == 0) { if (praddr) { const char *x = fault_chars[fault_type]; printf(REG" %s%s%s%s\n", adr, x, x, x, x); } break; } if (adr > first_adr && ppc_inst_equal(inst, last_inst)) { if (!dotted) { printf(" ...\n"); dotted = 1; } continue; } dotted = 0; last_inst = inst; if (praddr) printf(REG" %08lx", adr, ppc_inst_as_ulong(inst)); printf("\t"); if (!ppc_inst_prefixed(inst)) dump_func(ppc_inst_val(inst), adr); else dump_func(ppc_inst_as_ulong(inst), adr); printf("\n"); } return adr - first_adr; } static int ppc_inst_dump(unsigned long adr, long count, int praddr) { return generic_inst_dump(adr, count, praddr, print_insn_powerpc); } void print_address(unsigned long addr) { xmon_print_symbol(addr, "\t# ", ""); } static void dump_log_buf(void) { struct kmsg_dump_iter iter; static unsigned char buf[1024]; size_t len; if (setjmp(bus_error_jmp) != 0) { printf("Error dumping printk buffer!\n"); return; } catch_memory_errors = 1; sync(); kmsg_dump_rewind(&iter); xmon_start_pagination(); while (kmsg_dump_get_line(&iter, false, buf, sizeof(buf), &len)) { buf[len] = '\0'; printf("%s", buf); } xmon_end_pagination(); sync(); /* wait a little while to see if we get a machine check */ __delay(200); catch_memory_errors = 0; } #ifdef CONFIG_PPC_POWERNV static void dump_opal_msglog(void) { unsigned char buf[128]; ssize_t res; volatile loff_t pos = 0; if (!firmware_has_feature(FW_FEATURE_OPAL)) { printf("Machine is not running OPAL firmware.\n"); return; } if (setjmp(bus_error_jmp) != 0) { printf("Error dumping OPAL msglog!\n"); return; } catch_memory_errors = 1; sync(); xmon_start_pagination(); while ((res = opal_msglog_copy(buf, pos, sizeof(buf) - 1))) { if (res < 0) { printf("Error dumping OPAL msglog! Error: %zd\n", res); break; } buf[res] = '\0'; printf("%s", buf); pos += res; } xmon_end_pagination(); sync(); /* wait a little while to see if we get a machine check */ __delay(200); catch_memory_errors = 0; } #endif /* * Memory operations - move, set, print differences */ static unsigned long mdest; /* destination address */ static unsigned long msrc; /* source address */ static unsigned long mval; /* byte value to set memory to */ static unsigned long mcount; /* # bytes to affect */ static unsigned long mdiffs; /* max # differences to print */ static void memops(int cmd) { scanhex((void *)&mdest); if( termch != '\n' ) termch = 0; scanhex((void *)(cmd == 's'? &mval: &msrc)); if( termch != '\n' ) termch = 0; scanhex((void *)&mcount); switch( cmd ){ case 'm': if (xmon_is_ro) { printf(xmon_ro_msg); break; } memmove((void *)mdest, (void *)msrc, mcount); break; case 's': if (xmon_is_ro) { printf(xmon_ro_msg); break; } memset((void *)mdest, mval, mcount); break; case 'd': if( termch != '\n' ) termch = 0; scanhex((void *)&mdiffs); memdiffs((unsigned char *)mdest, (unsigned char *)msrc, mcount, mdiffs); break; } } static void memdiffs(unsigned char *p1, unsigned char *p2, unsigned nb, unsigned maxpr) { unsigned n, prt; prt = 0; for( n = nb; n > 0; --n ) if( *p1++ != *p2++ ) if( ++prt <= maxpr ) printf("%px %.2x # %px %.2x\n", p1 - 1, p1[-1], p2 - 1, p2[-1]); if( prt > maxpr ) printf("Total of %d differences\n", prt); } static unsigned mend; static unsigned mask; static void memlocate(void) { unsigned a, n; unsigned char val[4]; last_cmd = "ml"; scanhex((void *)&mdest); if (termch != '\n') { termch = 0; scanhex((void *)&mend); if (termch != '\n') { termch = 0; scanhex((void *)&mval); mask = ~0; if (termch != '\n') termch = 0; scanhex((void *)&mask); } } n = 0; for (a = mdest; a < mend; a += 4) { if (mread(a, val, 4) == 4 && ((GETWORD(val) ^ mval) & mask) == 0) { printf("%.16x: %.16x\n", a, GETWORD(val)); if (++n >= 10) break; } } } static unsigned long mskip = 0x1000; static unsigned long mlim = 0xffffffff; static void memzcan(void) { unsigned char v; unsigned a; int ok, ook; scanhex(&mdest); if (termch != '\n') termch = 0; scanhex(&mskip); if (termch != '\n') termch = 0; scanhex(&mlim); ook = 0; for (a = mdest; a < mlim; a += mskip) { ok = mread(a, &v, 1); if (ok && !ook) { printf("%.8x .. ", a); } else if (!ok && ook) printf("%.8lx\n", a - mskip); ook = ok; if (a + mskip < a) break; } if (ook) printf("%.8lx\n", a - mskip); } static void show_task(struct task_struct *volatile tsk) { unsigned int p_state = READ_ONCE(tsk->__state); char state; /* * Cloned from kdb_task_state_char(), which is not entirely * appropriate for calling from xmon. This could be moved * to a common, generic, routine used by both. */ state = (p_state == TASK_RUNNING) ? 'R' : (p_state & TASK_UNINTERRUPTIBLE) ? 'D' : (p_state & TASK_STOPPED) ? 'T' : (p_state & TASK_TRACED) ? 'C' : (tsk->exit_state & EXIT_ZOMBIE) ? 'Z' : (tsk->exit_state & EXIT_DEAD) ? 'E' : (p_state & TASK_INTERRUPTIBLE) ? 'S' : '?'; printf("%16px %16lx %16px %6d %6d %c %2d %s\n", tsk, tsk->thread.ksp, tsk->thread.regs, tsk->pid, rcu_dereference(tsk->parent)->pid, state, task_cpu(tsk), tsk->comm); } #ifdef CONFIG_PPC_BOOK3S_64 static void format_pte(void *ptep, unsigned long pte) { pte_t entry = __pte(pte); printf("ptep @ 0x%016lx = 0x%016lx\n", (unsigned long)ptep, pte); printf("Maps physical address = 0x%016lx\n", pte & PTE_RPN_MASK); printf("Flags = %s%s%s%s%s\n", pte_young(entry) ? "Accessed " : "", pte_dirty(entry) ? "Dirty " : "", pte_read(entry) ? "Read " : "", pte_write(entry) ? "Write " : "", pte_exec(entry) ? "Exec " : ""); } static void show_pte(unsigned long addr) { unsigned long tskv = 0; struct task_struct *volatile tsk = NULL; struct mm_struct *volatile mm; pgd_t *pgdp; p4d_t *p4dp; pud_t *pudp; pmd_t *pmdp; pte_t *ptep; if (!scanhex(&tskv)) mm = &init_mm; else tsk = (struct task_struct *)tskv; if (tsk == NULL) mm = &init_mm; else mm = tsk->active_mm; if (setjmp(bus_error_jmp) != 0) { catch_memory_errors = 0; printf("*** Error dumping pte for task %px\n", tsk); return; } catch_memory_errors = 1; sync(); if (mm == &init_mm) pgdp = pgd_offset_k(addr); else pgdp = pgd_offset(mm, addr); p4dp = p4d_offset(pgdp, addr); if (p4d_none(*p4dp)) { printf("No valid P4D\n"); return; } if (p4d_is_leaf(*p4dp)) { format_pte(p4dp, p4d_val(*p4dp)); return; } printf("p4dp @ 0x%px = 0x%016lx\n", p4dp, p4d_val(*p4dp)); pudp = pud_offset(p4dp, addr); if (pud_none(*pudp)) { printf("No valid PUD\n"); return; } if (pud_is_leaf(*pudp)) { format_pte(pudp, pud_val(*pudp)); return; } printf("pudp @ 0x%px = 0x%016lx\n", pudp, pud_val(*pudp)); pmdp = pmd_offset(pudp, addr); if (pmd_none(*pmdp)) { printf("No valid PMD\n"); return; } if (pmd_is_leaf(*pmdp)) { format_pte(pmdp, pmd_val(*pmdp)); return; } printf("pmdp @ 0x%px = 0x%016lx\n", pmdp, pmd_val(*pmdp)); ptep = pte_offset_map(pmdp, addr); if (!ptep || pte_none(*ptep)) { if (ptep) pte_unmap(ptep); printf("no valid PTE\n"); return; } format_pte(ptep, pte_val(*ptep)); pte_unmap(ptep); sync(); __delay(200); catch_memory_errors = 0; } #else static void show_pte(unsigned long addr) { printf("show_pte not yet implemented\n"); } #endif /* CONFIG_PPC_BOOK3S_64 */ static void show_tasks(void) { unsigned long tskv; struct task_struct *volatile tsk = NULL; printf(" task_struct ->thread.ksp ->thread.regs PID PPID S P CMD\n"); if (scanhex(&tskv)) tsk = (struct task_struct *)tskv; if (setjmp(bus_error_jmp) != 0) { catch_memory_errors = 0; printf("*** Error dumping task %px\n", tsk); return; } catch_memory_errors = 1; sync(); if (tsk) show_task(tsk); else for_each_process(tsk) show_task(tsk); sync(); __delay(200); catch_memory_errors = 0; } static void proccall(void) { unsigned long args[8]; unsigned long ret; int i; typedef unsigned long (*callfunc_t)(unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long); callfunc_t func; if (!scanhex(&adrs)) return; if (termch != '\n') termch = 0; for (i = 0; i < 8; ++i) args[i] = 0; for (i = 0; i < 8; ++i) { if (!scanhex(&args[i]) || termch == '\n') break; termch = 0; } func = (callfunc_t) adrs; ret = 0; if (setjmp(bus_error_jmp) == 0) { catch_memory_errors = 1; sync(); ret = func(args[0], args[1], args[2], args[3], args[4], args[5], args[6], args[7]); sync(); printf("return value is 0x%lx\n", ret); } else { printf("*** %x exception occurred\n", fault_except); } catch_memory_errors = 0; } /* Input scanning routines */ int skipbl(void) { int c; if( termch != 0 ){ c = termch; termch = 0; } else c = inchar(); while( c == ' ' || c == '\t' ) c = inchar(); return c; } #define N_PTREGS 44 static const char *regnames[N_PTREGS] = { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15", "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23", "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31", "pc", "msr", "or3", "ctr", "lr", "xer", "ccr", #ifdef CONFIG_PPC64 "softe", #else "mq", #endif "trap", "dar", "dsisr", "res" }; int scanhex(unsigned long *vp) { int c, d; unsigned long v; c = skipbl(); if (c == '%') { /* parse register name */ char regname[8]; int i; for (i = 0; i < sizeof(regname) - 1; ++i) { c = inchar(); if (!isalnum(c)) { termch = c; break; } regname[i] = c; } regname[i] = 0; i = match_string(regnames, N_PTREGS, regname); if (i < 0) { printf("invalid register name '%%%s'\n", regname); return 0; } if (xmon_regs == NULL) { printf("regs not available\n"); return 0; } *vp = ((unsigned long *)xmon_regs)[i]; return 1; } /* skip leading "0x" if any */ if (c == '0') { c = inchar(); if (c == 'x') { c = inchar(); } else { d = hexdigit(c); if (d == EOF) { termch = c; *vp = 0; return 1; } } } else if (c == '$') { int i; for (i=0; i<63; i++) { c = inchar(); if (isspace(c) || c == '\0') { termch = c; break; } tmpstr[i] = c; } tmpstr[i++] = 0; *vp = 0; if (setjmp(bus_error_jmp) == 0) { catch_memory_errors = 1; sync(); *vp = kallsyms_lookup_name(tmpstr); sync(); } catch_memory_errors = 0; if (!(*vp)) { printf("unknown symbol '%s'\n", tmpstr); return 0; } return 1; } d = hexdigit(c); if (d == EOF) { termch = c; return 0; } v = 0; do { v = (v << 4) + d; c = inchar(); d = hexdigit(c); } while (d != EOF); termch = c; *vp = v; return 1; } static void scannl(void) { int c; c = termch; termch = 0; while( c != '\n' ) c = inchar(); } static int hexdigit(int c) { if( '0' <= c && c <= '9' ) return c - '0'; if( 'A' <= c && c <= 'F' ) return c - ('A' - 10); if( 'a' <= c && c <= 'f' ) return c - ('a' - 10); return EOF; } void getstring(char *s, int size) { int c; c = skipbl(); if (c == '\n') { *s = 0; return; } do { if( size > 1 ){ *s++ = c; --size; } c = inchar(); } while( c != ' ' && c != '\t' && c != '\n' ); termch = c; *s = 0; } static char line[256]; static char *lineptr; static void flush_input(void) { lineptr = NULL; } static int inchar(void) { if (lineptr == NULL || *lineptr == 0) { if (xmon_gets(line, sizeof(line)) == NULL) { lineptr = NULL; return EOF; } lineptr = line; } return *lineptr++; } static void take_input(char *str) { lineptr = str; } static void symbol_lookup(void) { int type = inchar(); unsigned long addr, cpu; void __percpu *ptr = NULL; static char tmp[64]; switch (type) { case 'a': if (scanhex(&addr)) xmon_print_symbol(addr, ": ", "\n"); termch = 0; break; case 's': getstring(tmp, 64); if (setjmp(bus_error_jmp) == 0) { catch_memory_errors = 1; sync(); addr = kallsyms_lookup_name(tmp); if (addr) printf("%s: %lx\n", tmp, addr); else printf("Symbol '%s' not found.\n", tmp); sync(); } catch_memory_errors = 0; termch = 0; break; case 'p': getstring(tmp, 64); if (setjmp(bus_error_jmp) == 0) { catch_memory_errors = 1; sync(); ptr = (void __percpu *)kallsyms_lookup_name(tmp); sync(); } if (ptr && ptr >= (void __percpu *)__per_cpu_start && ptr < (void __percpu *)__per_cpu_end) { if (scanhex(&cpu) && cpu < num_possible_cpus()) { addr = (unsigned long)per_cpu_ptr(ptr, cpu); } else { cpu = raw_smp_processor_id(); addr = (unsigned long)this_cpu_ptr(ptr); } printf("%s for cpu 0x%lx: %lx\n", tmp, cpu, addr); } else { printf("Percpu symbol '%s' not found.\n", tmp); } catch_memory_errors = 0; termch = 0; break; } } /* Print an address in numeric and symbolic form (if possible) */ static void xmon_print_symbol(unsigned long address, const char *mid, const char *after) { char *modname; const char *volatile name = NULL; unsigned long offset, size; printf(REG, address); if (setjmp(bus_error_jmp) == 0) { catch_memory_errors = 1; sync(); name = kallsyms_lookup(address, &size, &offset, &modname, tmpstr); sync(); /* wait a little while to see if we get a machine check */ __delay(200); } catch_memory_errors = 0; if (name) { printf("%s%s+%#lx/%#lx", mid, name, offset, size); if (modname) printf(" [%s]", modname); } printf("%s", after); } #ifdef CONFIG_PPC_64S_HASH_MMU void dump_segments(void) { int i; unsigned long esid,vsid; unsigned long llp; printf("SLB contents of cpu 0x%x\n", smp_processor_id()); for (i = 0; i < mmu_slb_size; i++) { asm volatile("slbmfee %0,%1" : "=r" (esid) : "r" (i)); asm volatile("slbmfev %0,%1" : "=r" (vsid) : "r" (i)); if (!esid && !vsid) continue; printf("%02d %016lx %016lx", i, esid, vsid); if (!(esid & SLB_ESID_V)) { printf("\n"); continue; } llp = vsid & SLB_VSID_LLP; if (vsid & SLB_VSID_B_1T) { printf(" 1T ESID=%9lx VSID=%13lx LLP:%3lx \n", GET_ESID_1T(esid), (vsid & ~SLB_VSID_B) >> SLB_VSID_SHIFT_1T, llp); } else { printf(" 256M ESID=%9lx VSID=%13lx LLP:%3lx \n", GET_ESID(esid), (vsid & ~SLB_VSID_B) >> SLB_VSID_SHIFT, llp); } } } #endif #ifdef CONFIG_PPC_BOOK3S_32 void dump_segments(void) { int i; printf("sr0-15 ="); for (i = 0; i < 16; ++i) printf(" %x", mfsr(i << 28)); printf("\n"); } #endif #ifdef CONFIG_44x static void dump_tlb_44x(void) { int i; for (i = 0; i < PPC44x_TLB_SIZE; i++) { unsigned long w0,w1,w2; asm volatile("tlbre %0,%1,0" : "=r" (w0) : "r" (i)); asm volatile("tlbre %0,%1,1" : "=r" (w1) : "r" (i)); asm volatile("tlbre %0,%1,2" : "=r" (w2) : "r" (i)); printf("[%02x] %08lx %08lx %08lx ", i, w0, w1, w2); if (w0 & PPC44x_TLB_VALID) { printf("V %08lx -> %01lx%08lx %c%c%c%c%c", w0 & PPC44x_TLB_EPN_MASK, w1 & PPC44x_TLB_ERPN_MASK, w1 & PPC44x_TLB_RPN_MASK, (w2 & PPC44x_TLB_W) ? 'W' : 'w', (w2 & PPC44x_TLB_I) ? 'I' : 'i', (w2 & PPC44x_TLB_M) ? 'M' : 'm', (w2 & PPC44x_TLB_G) ? 'G' : 'g', (w2 & PPC44x_TLB_E) ? 'E' : 'e'); } printf("\n"); } } #endif /* CONFIG_44x */ #ifdef CONFIG_PPC_BOOK3E_64 static void dump_tlb_book3e(void) { u32 mmucfg; u64 ramask; int i, tlb, ntlbs, pidsz, lpidsz, rasz; int mmu_version; static const char *pgsz_names[] = { " 1K", " 2K", " 4K", " 8K", " 16K", " 32K", " 64K", "128K", "256K", "512K", " 1M", " 2M", " 4M", " 8M", " 16M", " 32M", " 64M", "128M", "256M", "512M", " 1G", " 2G", " 4G", " 8G", " 16G", " 32G", " 64G", "128G", "256G", "512G", " 1T", " 2T", }; /* Gather some infos about the MMU */ mmucfg = mfspr(SPRN_MMUCFG); mmu_version = (mmucfg & 3) + 1; ntlbs = ((mmucfg >> 2) & 3) + 1; pidsz = ((mmucfg >> 6) & 0x1f) + 1; lpidsz = (mmucfg >> 24) & 0xf; rasz = (mmucfg >> 16) & 0x7f; printf("Book3E MMU MAV=%d.0,%d TLBs,%d-bit PID,%d-bit LPID,%d-bit RA\n", mmu_version, ntlbs, pidsz, lpidsz, rasz); ramask = (1ull << rasz) - 1; for (tlb = 0; tlb < ntlbs; tlb++) { u32 tlbcfg; int nent, assoc, new_cc = 1; printf("TLB %d:\n------\n", tlb); switch(tlb) { case 0: tlbcfg = mfspr(SPRN_TLB0CFG); break; case 1: tlbcfg = mfspr(SPRN_TLB1CFG); break; case 2: tlbcfg = mfspr(SPRN_TLB2CFG); break; case 3: tlbcfg = mfspr(SPRN_TLB3CFG); break; default: printf("Unsupported TLB number !\n"); continue; } nent = tlbcfg & 0xfff; assoc = (tlbcfg >> 24) & 0xff; for (i = 0; i < nent; i++) { u32 mas0 = MAS0_TLBSEL(tlb); u32 mas1 = MAS1_TSIZE(BOOK3E_PAGESZ_4K); u64 mas2 = 0; u64 mas7_mas3; int esel = i, cc = i; if (assoc != 0) { cc = i / assoc; esel = i % assoc; mas2 = cc * 0x1000; } mas0 |= MAS0_ESEL(esel); mtspr(SPRN_MAS0, mas0); mtspr(SPRN_MAS1, mas1); mtspr(SPRN_MAS2, mas2); asm volatile("tlbre 0,0,0" : : : "memory"); mas1 = mfspr(SPRN_MAS1); mas2 = mfspr(SPRN_MAS2); mas7_mas3 = mfspr(SPRN_MAS7_MAS3); if (assoc && (i % assoc) == 0) new_cc = 1; if (!(mas1 & MAS1_VALID)) continue; if (assoc == 0) printf("%04x- ", i); else if (new_cc) printf("%04x-%c", cc, 'A' + esel); else printf(" |%c", 'A' + esel); new_cc = 0; printf(" %016llx %04x %s %c%c AS%c", mas2 & ~0x3ffull, (mas1 >> 16) & 0x3fff, pgsz_names[(mas1 >> 7) & 0x1f], mas1 & MAS1_IND ? 'I' : ' ', mas1 & MAS1_IPROT ? 'P' : ' ', mas1 & MAS1_TS ? '1' : '0'); printf(" %c%c%c%c%c%c%c", mas2 & MAS2_X0 ? 'a' : ' ', mas2 & MAS2_X1 ? 'v' : ' ', mas2 & MAS2_W ? 'w' : ' ', mas2 & MAS2_I ? 'i' : ' ', mas2 & MAS2_M ? 'm' : ' ', mas2 & MAS2_G ? 'g' : ' ', mas2 & MAS2_E ? 'e' : ' '); printf(" %016llx", mas7_mas3 & ramask & ~0x7ffull); if (mas1 & MAS1_IND) printf(" %s\n", pgsz_names[(mas7_mas3 >> 1) & 0x1f]); else printf(" U%c%c%c S%c%c%c\n", mas7_mas3 & MAS3_UX ? 'x' : ' ', mas7_mas3 & MAS3_UW ? 'w' : ' ', mas7_mas3 & MAS3_UR ? 'r' : ' ', mas7_mas3 & MAS3_SX ? 'x' : ' ', mas7_mas3 & MAS3_SW ? 'w' : ' ', mas7_mas3 & MAS3_SR ? 'r' : ' '); } } } #endif /* CONFIG_PPC_BOOK3E_64 */ static void xmon_init(int enable) { if (enable) { __debugger = xmon; __debugger_ipi = xmon_ipi; __debugger_bpt = xmon_bpt; __debugger_sstep = xmon_sstep; __debugger_iabr_match = xmon_iabr_match; __debugger_break_match = xmon_break_match; __debugger_fault_handler = xmon_fault_handler; } else { __debugger = NULL; __debugger_ipi = NULL; __debugger_bpt = NULL; __debugger_sstep = NULL; __debugger_iabr_match = NULL; __debugger_break_match = NULL; __debugger_fault_handler = NULL; } } #ifdef CONFIG_MAGIC_SYSRQ static void sysrq_handle_xmon(u8 key) { if (xmon_is_locked_down()) { clear_all_bpt(); xmon_init(0); return; } /* ensure xmon is enabled */ xmon_init(1); debugger(get_irq_regs()); if (!xmon_on) xmon_init(0); } static const struct sysrq_key_op sysrq_xmon_op = { .handler = sysrq_handle_xmon, .help_msg = "xmon(x)", .action_msg = "Entering xmon", }; static int __init setup_xmon_sysrq(void) { register_sysrq_key('x', &sysrq_xmon_op); return 0; } device_initcall(setup_xmon_sysrq); #endif /* CONFIG_MAGIC_SYSRQ */ static void clear_all_bpt(void) { int i; /* clear/unpatch all breakpoints */ remove_bpts(); remove_cpu_bpts(); /* Disable all breakpoints */ for (i = 0; i < NBPTS; ++i) bpts[i].enabled = 0; /* Clear any data or iabr breakpoints */ iabr = NULL; for (i = 0; i < nr_wp_slots(); i++) dabr[i].enabled = 0; } #ifdef CONFIG_DEBUG_FS static int xmon_dbgfs_set(void *data, u64 val) { xmon_on = !!val; xmon_init(xmon_on); /* make sure all breakpoints removed when disabling */ if (!xmon_on) { clear_all_bpt(); get_output_lock(); printf("xmon: All breakpoints cleared\n"); release_output_lock(); } return 0; } static int xmon_dbgfs_get(void *data, u64 *val) { *val = xmon_on; return 0; } DEFINE_SIMPLE_ATTRIBUTE(xmon_dbgfs_ops, xmon_dbgfs_get, xmon_dbgfs_set, "%llu\n"); static int __init setup_xmon_dbgfs(void) { debugfs_create_file("xmon", 0600, arch_debugfs_dir, NULL, &xmon_dbgfs_ops); return 0; } device_initcall(setup_xmon_dbgfs); #endif /* CONFIG_DEBUG_FS */ static int xmon_early __initdata; static int __init early_parse_xmon(char *p) { if (xmon_is_locked_down()) { xmon_init(0); xmon_early = 0; xmon_on = 0; } else if (!p || strncmp(p, "early", 5) == 0) { /* just "xmon" is equivalent to "xmon=early" */ xmon_init(1); xmon_early = 1; xmon_on = 1; } else if (strncmp(p, "on", 2) == 0) { xmon_init(1); xmon_on = 1; } else if (strncmp(p, "rw", 2) == 0) { xmon_init(1); xmon_on = 1; xmon_is_ro = false; } else if (strncmp(p, "ro", 2) == 0) { xmon_init(1); xmon_on = 1; xmon_is_ro = true; } else if (strncmp(p, "off", 3) == 0) xmon_on = 0; else return 1; return 0; } early_param("xmon", early_parse_xmon); void __init xmon_setup(void) { if (xmon_on) xmon_init(1); if (xmon_early) debugger(NULL); } #ifdef CONFIG_SPU_BASE struct spu_info { struct spu *spu; u64 saved_mfc_sr1_RW; u32 saved_spu_runcntl_RW; unsigned long dump_addr; u8 stopped_ok; }; #define XMON_NUM_SPUS 16 /* Enough for current hardware */ static struct spu_info spu_info[XMON_NUM_SPUS]; void __init xmon_register_spus(struct list_head *list) { struct spu *spu; list_for_each_entry(spu, list, full_list) { if (spu->number >= XMON_NUM_SPUS) { WARN_ON(1); continue; } spu_info[spu->number].spu = spu; spu_info[spu->number].stopped_ok = 0; spu_info[spu->number].dump_addr = (unsigned long) spu_info[spu->number].spu->local_store; } } static void stop_spus(void) { struct spu *spu; volatile int i; u64 tmp; for (i = 0; i < XMON_NUM_SPUS; i++) { if (!spu_info[i].spu) continue; if (setjmp(bus_error_jmp) == 0) { catch_memory_errors = 1; sync(); spu = spu_info[i].spu; spu_info[i].saved_spu_runcntl_RW = in_be32(&spu->problem->spu_runcntl_RW); tmp = spu_mfc_sr1_get(spu); spu_info[i].saved_mfc_sr1_RW = tmp; tmp &= ~MFC_STATE1_MASTER_RUN_CONTROL_MASK; spu_mfc_sr1_set(spu, tmp); sync(); __delay(200); spu_info[i].stopped_ok = 1; printf("Stopped spu %.2d (was %s)\n", i, spu_info[i].saved_spu_runcntl_RW ? "running" : "stopped"); } else { catch_memory_errors = 0; printf("*** Error stopping spu %.2d\n", i); } catch_memory_errors = 0; } } static void restart_spus(void) { struct spu *spu; volatile int i; for (i = 0; i < XMON_NUM_SPUS; i++) { if (!spu_info[i].spu) continue; if (!spu_info[i].stopped_ok) { printf("*** Error, spu %d was not successfully stopped" ", not restarting\n", i); continue; } if (setjmp(bus_error_jmp) == 0) { catch_memory_errors = 1; sync(); spu = spu_info[i].spu; spu_mfc_sr1_set(spu, spu_info[i].saved_mfc_sr1_RW); out_be32(&spu->problem->spu_runcntl_RW, spu_info[i].saved_spu_runcntl_RW); sync(); __delay(200); printf("Restarted spu %.2d\n", i); } else { catch_memory_errors = 0; printf("*** Error restarting spu %.2d\n", i); } catch_memory_errors = 0; } } #define DUMP_WIDTH 23 #define DUMP_VALUE(format, field, value) \ do { \ if (setjmp(bus_error_jmp) == 0) { \ catch_memory_errors = 1; \ sync(); \ printf(" %-*s = "format"\n", DUMP_WIDTH, \ #field, value); \ sync(); \ __delay(200); \ } else { \ catch_memory_errors = 0; \ printf(" %-*s = *** Error reading field.\n", \ DUMP_WIDTH, #field); \ } \ catch_memory_errors = 0; \ } while (0) #define DUMP_FIELD(obj, format, field) \ DUMP_VALUE(format, field, obj->field) static void dump_spu_fields(struct spu *spu) { printf("Dumping spu fields at address %p:\n", spu); DUMP_FIELD(spu, "0x%x", number); DUMP_FIELD(spu, "%s", name); DUMP_FIELD(spu, "0x%lx", local_store_phys); DUMP_FIELD(spu, "0x%p", local_store); DUMP_FIELD(spu, "0x%lx", ls_size); DUMP_FIELD(spu, "0x%x", node); DUMP_FIELD(spu, "0x%lx", flags); DUMP_FIELD(spu, "%llu", class_0_pending); DUMP_FIELD(spu, "0x%llx", class_0_dar); DUMP_FIELD(spu, "0x%llx", class_1_dar); DUMP_FIELD(spu, "0x%llx", class_1_dsisr); DUMP_FIELD(spu, "0x%x", irqs[0]); DUMP_FIELD(spu, "0x%x", irqs[1]); DUMP_FIELD(spu, "0x%x", irqs[2]); DUMP_FIELD(spu, "0x%x", slb_replace); DUMP_FIELD(spu, "%d", pid); DUMP_FIELD(spu, "0x%p", mm); DUMP_FIELD(spu, "0x%p", ctx); DUMP_FIELD(spu, "0x%p", rq); DUMP_FIELD(spu, "0x%llx", timestamp); DUMP_FIELD(spu, "0x%lx", problem_phys); DUMP_FIELD(spu, "0x%p", problem); DUMP_VALUE("0x%x", problem->spu_runcntl_RW, in_be32(&spu->problem->spu_runcntl_RW)); DUMP_VALUE("0x%x", problem->spu_status_R, in_be32(&spu->problem->spu_status_R)); DUMP_VALUE("0x%x", problem->spu_npc_RW, in_be32(&spu->problem->spu_npc_RW)); DUMP_FIELD(spu, "0x%p", priv2); DUMP_FIELD(spu, "0x%p", pdata); } static int spu_inst_dump(unsigned long adr, long count, int praddr) { return generic_inst_dump(adr, count, praddr, print_insn_spu); } static void dump_spu_ls(unsigned long num, int subcmd) { unsigned long offset, addr, ls_addr; if (setjmp(bus_error_jmp) == 0) { catch_memory_errors = 1; sync(); ls_addr = (unsigned long)spu_info[num].spu->local_store; sync(); __delay(200); } else { catch_memory_errors = 0; printf("*** Error: accessing spu info for spu %ld\n", num); return; } catch_memory_errors = 0; if (scanhex(&offset)) addr = ls_addr + offset; else addr = spu_info[num].dump_addr; if (addr >= ls_addr + LS_SIZE) { printf("*** Error: address outside of local store\n"); return; } switch (subcmd) { case 'i': addr += spu_inst_dump(addr, 16, 1); last_cmd = "sdi\n"; break; default: prdump(addr, 64); addr += 64; last_cmd = "sd\n"; break; } spu_info[num].dump_addr = addr; } static int do_spu_cmd(void) { static unsigned long num = 0; int cmd, subcmd = 0; cmd = inchar(); switch (cmd) { case 's': stop_spus(); break; case 'r': restart_spus(); break; case 'd': subcmd = inchar(); if (isxdigit(subcmd) || subcmd == '\n') termch = subcmd; fallthrough; case 'f': scanhex(&num); if (num >= XMON_NUM_SPUS || !spu_info[num].spu) { printf("*** Error: invalid spu number\n"); return 0; } switch (cmd) { case 'f': dump_spu_fields(spu_info[num].spu); break; default: dump_spu_ls(num, subcmd); break; } break; default: return -1; } return 0; } #else /* ! CONFIG_SPU_BASE */ static int do_spu_cmd(void) { return -1; } #endif
linux-master
arch/powerpc/xmon/xmon.c
// SPDX-License-Identifier: GPL-2.0-or-later /* Disassemble SPU instructions Copyright 2006 Free Software Foundation, Inc. This file is part of GDB, GAS, and the GNU binutils. */ #include <linux/string.h> #include "nonstdio.h" #include "ansidecl.h" #include "spu.h" #include "dis-asm.h" /* This file provides a disassembler function which uses the disassembler interface defined in dis-asm.h. */ extern const struct spu_opcode spu_opcodes[]; extern const int spu_num_opcodes; #define SPU_DISASM_TBL_SIZE (1 << 11) static const struct spu_opcode *spu_disassemble_table[SPU_DISASM_TBL_SIZE]; static void init_spu_disassemble (void) { int i; /* If two instructions have the same opcode then we prefer the first * one. In most cases it is just an alternate mnemonic. */ for (i = 0; i < spu_num_opcodes; i++) { int o = spu_opcodes[i].opcode; if (o >= SPU_DISASM_TBL_SIZE) continue; /* abort (); */ if (spu_disassemble_table[o] == 0) spu_disassemble_table[o] = &spu_opcodes[i]; } } /* Determine the instruction from the 10 least significant bits. */ static const struct spu_opcode * get_index_for_opcode (unsigned int insn) { const struct spu_opcode *index; unsigned int opcode = insn >> (32-11); /* Init the table. This assumes that element 0/opcode 0 (currently * NOP) is always used */ if (spu_disassemble_table[0] == 0) init_spu_disassemble (); if ((index = spu_disassemble_table[opcode & 0x780]) != 0 && index->insn_type == RRR) return index; if ((index = spu_disassemble_table[opcode & 0x7f0]) != 0 && (index->insn_type == RI18 || index->insn_type == LBT)) return index; if ((index = spu_disassemble_table[opcode & 0x7f8]) != 0 && index->insn_type == RI10) return index; if ((index = spu_disassemble_table[opcode & 0x7fc]) != 0 && (index->insn_type == RI16)) return index; if ((index = spu_disassemble_table[opcode & 0x7fe]) != 0 && (index->insn_type == RI8)) return index; if ((index = spu_disassemble_table[opcode & 0x7ff]) != 0) return index; return NULL; } /* Print a Spu instruction. */ int print_insn_spu (unsigned long insn, unsigned long memaddr) { int value; int hex_value; const struct spu_opcode *index; enum spu_insns tag; index = get_index_for_opcode (insn); if (index == 0) { printf(".long 0x%lx", insn); } else { int i; int paren = 0; tag = (enum spu_insns)(index - spu_opcodes); printf("%s", index->mnemonic); if (tag == M_BI || tag == M_BISL || tag == M_IRET || tag == M_BISLED || tag == M_BIHNZ || tag == M_BIHZ || tag == M_BINZ || tag == M_BIZ || tag == M_SYNC || tag == M_HBR) { int fb = (insn >> (32-18)) & 0x7f; if (fb & 0x40) printf(tag == M_SYNC ? "c" : "p"); if (fb & 0x20) printf("d"); if (fb & 0x10) printf("e"); } if (index->arg[0] != 0) printf("\t"); hex_value = 0; for (i = 1; i <= index->arg[0]; i++) { int arg = index->arg[i]; if (arg != A_P && !paren && i > 1) printf(","); switch (arg) { case A_T: printf("$%lu", DECODE_INSN_RT (insn)); break; case A_A: printf("$%lu", DECODE_INSN_RA (insn)); break; case A_B: printf("$%lu", DECODE_INSN_RB (insn)); break; case A_C: printf("$%lu", DECODE_INSN_RC (insn)); break; case A_S: printf("$sp%lu", DECODE_INSN_RA (insn)); break; case A_H: printf("$ch%lu", DECODE_INSN_RA (insn)); break; case A_P: paren++; printf("("); break; case A_U7A: printf("%lu", 173 - DECODE_INSN_U8 (insn)); break; case A_U7B: printf("%lu", 155 - DECODE_INSN_U8 (insn)); break; case A_S3: case A_S6: case A_S7: case A_S7N: case A_U3: case A_U5: case A_U6: case A_U7: hex_value = DECODE_INSN_I7 (insn); printf("%d", hex_value); break; case A_S11: print_address(memaddr + DECODE_INSN_I9a (insn) * 4); break; case A_S11I: print_address(memaddr + DECODE_INSN_I9b (insn) * 4); break; case A_S10: case A_S10B: hex_value = DECODE_INSN_I10 (insn); printf("%d", hex_value); break; case A_S14: hex_value = DECODE_INSN_I10 (insn) * 16; printf("%d", hex_value); break; case A_S16: hex_value = DECODE_INSN_I16 (insn); printf("%d", hex_value); break; case A_X16: hex_value = DECODE_INSN_U16 (insn); printf("%u", hex_value); break; case A_R18: value = DECODE_INSN_I16 (insn) * 4; if (value == 0) printf("%d", value); else { hex_value = memaddr + value; print_address(hex_value & 0x3ffff); } break; case A_S18: value = DECODE_INSN_U16 (insn) * 4; if (value == 0) printf("%d", value); else print_address(value); break; case A_U18: value = DECODE_INSN_U18 (insn); if (value == 0 || 1) { hex_value = value; printf("%u", value); } else print_address(value); break; case A_U14: hex_value = DECODE_INSN_U14 (insn); printf("%u", hex_value); break; } if (arg != A_P && paren) { printf(")"); paren--; } } if (hex_value > 16) printf("\t# %x", hex_value); } return 4; }
linux-master
arch/powerpc/xmon/spu-dis.c
// SPDX-License-Identifier: GPL-2.0-only /* * * Copyright 2012 Paul Mackerras, IBM Corp. <[email protected]> */ #include <linux/types.h> #include <linux/string.h> #include <linux/kvm.h> #include <linux/kvm_host.h> #include <linux/kernel.h> #include <asm/lppaca.h> #include <asm/opal.h> #include <asm/mce.h> #include <asm/machdep.h> #include <asm/cputhreads.h> #include <asm/hmi.h> #include <asm/kvm_ppc.h> /* SRR1 bits for machine check on POWER7 */ #define SRR1_MC_LDSTERR (1ul << (63-42)) #define SRR1_MC_IFETCH_SH (63-45) #define SRR1_MC_IFETCH_MASK 0x7 #define SRR1_MC_IFETCH_SLBPAR 2 /* SLB parity error */ #define SRR1_MC_IFETCH_SLBMULTI 3 /* SLB multi-hit */ #define SRR1_MC_IFETCH_SLBPARMULTI 4 /* SLB parity + multi-hit */ #define SRR1_MC_IFETCH_TLBMULTI 5 /* I-TLB multi-hit */ /* DSISR bits for machine check on POWER7 */ #define DSISR_MC_DERAT_MULTI 0x800 /* D-ERAT multi-hit */ #define DSISR_MC_TLB_MULTI 0x400 /* D-TLB multi-hit */ #define DSISR_MC_SLB_PARITY 0x100 /* SLB parity error */ #define DSISR_MC_SLB_MULTI 0x080 /* SLB multi-hit */ #define DSISR_MC_SLB_PARMULTI 0x040 /* SLB parity + multi-hit */ /* POWER7 SLB flush and reload */ static void reload_slb(struct kvm_vcpu *vcpu) { struct slb_shadow *slb; unsigned long i, n; /* First clear out SLB */ asm volatile("slbmte %0,%0; slbia" : : "r" (0)); /* Do they have an SLB shadow buffer registered? */ slb = vcpu->arch.slb_shadow.pinned_addr; if (!slb) return; /* Sanity check */ n = min_t(u32, be32_to_cpu(slb->persistent), SLB_MIN_SIZE); if ((void *) &slb->save_area[n] > vcpu->arch.slb_shadow.pinned_end) return; /* Load up the SLB from that */ for (i = 0; i < n; ++i) { unsigned long rb = be64_to_cpu(slb->save_area[i].esid); unsigned long rs = be64_to_cpu(slb->save_area[i].vsid); rb = (rb & ~0xFFFul) | i; /* insert entry number */ asm volatile("slbmte %0,%1" : : "r" (rs), "r" (rb)); } } /* * On POWER7, see if we can handle a machine check that occurred inside * the guest in real mode, without switching to the host partition. */ static long kvmppc_realmode_mc_power7(struct kvm_vcpu *vcpu) { unsigned long srr1 = vcpu->arch.shregs.msr; long handled = 1; if (srr1 & SRR1_MC_LDSTERR) { /* error on load/store */ unsigned long dsisr = vcpu->arch.shregs.dsisr; if (dsisr & (DSISR_MC_SLB_PARMULTI | DSISR_MC_SLB_MULTI | DSISR_MC_SLB_PARITY | DSISR_MC_DERAT_MULTI)) { /* flush and reload SLB; flushes D-ERAT too */ reload_slb(vcpu); dsisr &= ~(DSISR_MC_SLB_PARMULTI | DSISR_MC_SLB_MULTI | DSISR_MC_SLB_PARITY | DSISR_MC_DERAT_MULTI); } if (dsisr & DSISR_MC_TLB_MULTI) { tlbiel_all_lpid(vcpu->kvm->arch.radix); dsisr &= ~DSISR_MC_TLB_MULTI; } /* Any other errors we don't understand? */ if (dsisr & 0xffffffffUL) handled = 0; } switch ((srr1 >> SRR1_MC_IFETCH_SH) & SRR1_MC_IFETCH_MASK) { case 0: break; case SRR1_MC_IFETCH_SLBPAR: case SRR1_MC_IFETCH_SLBMULTI: case SRR1_MC_IFETCH_SLBPARMULTI: reload_slb(vcpu); break; case SRR1_MC_IFETCH_TLBMULTI: tlbiel_all_lpid(vcpu->kvm->arch.radix); break; default: handled = 0; } return handled; } void kvmppc_realmode_machine_check(struct kvm_vcpu *vcpu) { struct machine_check_event mce_evt; long handled; if (vcpu->kvm->arch.fwnmi_enabled) { /* FWNMI guests handle their own recovery */ handled = 0; } else { handled = kvmppc_realmode_mc_power7(vcpu); } /* * Now get the event and stash it in the vcpu struct so it can * be handled by the primary thread in virtual mode. We can't * call machine_check_queue_event() here if we are running on * an offline secondary thread. */ if (get_mce_event(&mce_evt, MCE_EVENT_RELEASE)) { if (handled && mce_evt.version == MCE_V1) mce_evt.disposition = MCE_DISPOSITION_RECOVERED; } else { memset(&mce_evt, 0, sizeof(mce_evt)); } vcpu->arch.mce_evt = mce_evt; } long kvmppc_p9_realmode_hmi_handler(struct kvm_vcpu *vcpu) { struct kvmppc_vcore *vc = vcpu->arch.vcore; long ret = 0; /* * Unapply and clear the offset first. That way, if the TB was not * resynced then it will remain in host-offset, and if it was resynced * then it is brought into host-offset. Then the tb offset is * re-applied before continuing with the KVM exit. * * This way, we don't need to actually know whether not OPAL resynced * the timebase or do any of the complicated dance that the P7/8 * path requires. */ if (vc->tb_offset_applied) { u64 new_tb = mftb() - vc->tb_offset_applied; mtspr(SPRN_TBU40, new_tb); if ((mftb() & 0xffffff) < (new_tb & 0xffffff)) { new_tb += 0x1000000; mtspr(SPRN_TBU40, new_tb); } vc->tb_offset_applied = 0; } local_paca->hmi_irqs++; if (hmi_handle_debugtrig(NULL) >= 0) { ret = 1; goto out; } if (ppc_md.hmi_exception_early) ppc_md.hmi_exception_early(NULL); out: if (vc->tb_offset) { u64 new_tb = mftb() + vc->tb_offset; mtspr(SPRN_TBU40, new_tb); if ((mftb() & 0xffffff) < (new_tb & 0xffffff)) { new_tb += 0x1000000; mtspr(SPRN_TBU40, new_tb); } vc->tb_offset_applied = vc->tb_offset; } return ret; } /* * The following subcore HMI handling is all only for pre-POWER9 CPUs. */ /* Check if dynamic split is in force and return subcore size accordingly. */ static inline int kvmppc_cur_subcore_size(void) { if (local_paca->kvm_hstate.kvm_split_mode) return local_paca->kvm_hstate.kvm_split_mode->subcore_size; return threads_per_subcore; } void kvmppc_subcore_enter_guest(void) { int thread_id, subcore_id; thread_id = cpu_thread_in_core(local_paca->paca_index); subcore_id = thread_id / kvmppc_cur_subcore_size(); local_paca->sibling_subcore_state->in_guest[subcore_id] = 1; } EXPORT_SYMBOL_GPL(kvmppc_subcore_enter_guest); void kvmppc_subcore_exit_guest(void) { int thread_id, subcore_id; thread_id = cpu_thread_in_core(local_paca->paca_index); subcore_id = thread_id / kvmppc_cur_subcore_size(); local_paca->sibling_subcore_state->in_guest[subcore_id] = 0; } EXPORT_SYMBOL_GPL(kvmppc_subcore_exit_guest); static bool kvmppc_tb_resync_required(void) { if (test_and_set_bit(CORE_TB_RESYNC_REQ_BIT, &local_paca->sibling_subcore_state->flags)) return false; return true; } static void kvmppc_tb_resync_done(void) { clear_bit(CORE_TB_RESYNC_REQ_BIT, &local_paca->sibling_subcore_state->flags); } /* * kvmppc_realmode_hmi_handler() is called only by primary thread during * guest exit path. * * There are multiple reasons why HMI could occur, one of them is * Timebase (TB) error. If this HMI is due to TB error, then TB would * have been in stopped state. The opal hmi handler Will fix it and * restore the TB value with host timebase value. For HMI caused due * to non-TB errors, opal hmi handler will not touch/restore TB register * and hence there won't be any change in TB value. * * Since we are not sure about the cause of this HMI, we can't be sure * about the content of TB register whether it holds guest or host timebase * value. Hence the idea is to resync the TB on every HMI, so that we * know about the exact state of the TB value. Resync TB call will * restore TB to host timebase. * * Things to consider: * - On TB error, HMI interrupt is reported on all the threads of the core * that has encountered TB error irrespective of split-core mode. * - The very first thread on the core that get chance to fix TB error * would rsync the TB with local chipTOD value. * - The resync TB is a core level action i.e. it will sync all the TBs * in that core independent of split-core mode. This means if we trigger * TB sync from a thread from one subcore, it would affect TB values of * sibling subcores of the same core. * * All threads need to co-ordinate before making opal hmi handler. * All threads will use sibling_subcore_state->in_guest[] (shared by all * threads in the core) in paca which holds information about whether * sibling subcores are in Guest mode or host mode. The in_guest[] array * is of size MAX_SUBCORE_PER_CORE=4, indexed using subcore id to set/unset * subcore status. Only primary threads from each subcore is responsible * to set/unset its designated array element while entering/exiting the * guset. * * After invoking opal hmi handler call, one of the thread (of entire core) * will need to resync the TB. Bit 63 from subcore state bitmap flags * (sibling_subcore_state->flags) will be used to co-ordinate between * primary threads to decide who takes up the responsibility. * * This is what we do: * - Primary thread from each subcore tries to set resync required bit[63] * of paca->sibling_subcore_state->flags. * - The first primary thread that is able to set the flag takes the * responsibility of TB resync. (Let us call it as thread leader) * - All other threads which are in host will call * wait_for_subcore_guest_exit() and wait for in_guest[0-3] from * paca->sibling_subcore_state to get cleared. * - All the primary thread will clear its subcore status from subcore * state in_guest[] array respectively. * - Once all primary threads clear in_guest[0-3], all of them will invoke * opal hmi handler. * - Now all threads will wait for TB resync to complete by invoking * wait_for_tb_resync() except the thread leader. * - Thread leader will do a TB resync by invoking opal_resync_timebase() * call and the it will clear the resync required bit. * - All other threads will now come out of resync wait loop and proceed * with individual execution. * - On return of this function, primary thread will signal all * secondary threads to proceed. * - All secondary threads will eventually call opal hmi handler on * their exit path. * * Returns 1 if the timebase offset should be applied, 0 if not. */ long kvmppc_realmode_hmi_handler(void) { bool resync_req; local_paca->hmi_irqs++; if (hmi_handle_debugtrig(NULL) >= 0) return 1; /* * By now primary thread has already completed guest->host * partition switch but haven't signaled secondaries yet. * All the secondary threads on this subcore is waiting * for primary thread to signal them to go ahead. * * For threads from subcore which isn't in guest, they all will * wait until all other subcores on this core exit the guest. * * Now set the resync required bit. If you are the first to * set this bit then kvmppc_tb_resync_required() function will * return true. For rest all other subcores * kvmppc_tb_resync_required() will return false. * * If resync_req == true, then this thread is responsible to * initiate TB resync after hmi handler has completed. * All other threads on this core will wait until this thread * clears the resync required bit flag. */ resync_req = kvmppc_tb_resync_required(); /* Reset the subcore status to indicate it has exited guest */ kvmppc_subcore_exit_guest(); /* * Wait for other subcores on this core to exit the guest. * All the primary threads and threads from subcore that are * not in guest will wait here until all subcores are out * of guest context. */ wait_for_subcore_guest_exit(); /* * At this point we are sure that primary threads from each * subcore on this core have completed guest->host partition * switch. Now it is safe to call HMI handler. */ if (ppc_md.hmi_exception_early) ppc_md.hmi_exception_early(NULL); /* * Check if this thread is responsible to resync TB. * All other threads will wait until this thread completes the * TB resync. */ if (resync_req) { opal_resync_timebase(); /* Reset TB resync req bit */ kvmppc_tb_resync_done(); } else { wait_for_tb_resync(); } /* * Reset tb_offset_applied so the guest exit code won't try * to subtract the previous timebase offset from the timebase. */ if (local_paca->kvm_hstate.kvm_vcore) local_paca->kvm_hstate.kvm_vcore->tb_offset_applied = 0; return 0; }
linux-master
arch/powerpc/kvm/book3s_hv_ras.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (c) 2017-2019, IBM Corporation. */ #define pr_fmt(fmt) "xive-kvm: " fmt #include <linux/kernel.h> #include <linux/kvm_host.h> #include <linux/err.h> #include <linux/gfp.h> #include <linux/spinlock.h> #include <linux/delay.h> #include <linux/file.h> #include <linux/irqdomain.h> #include <asm/uaccess.h> #include <asm/kvm_book3s.h> #include <asm/kvm_ppc.h> #include <asm/hvcall.h> #include <asm/xive.h> #include <asm/xive-regs.h> #include <asm/debug.h> #include <asm/opal.h> #include <linux/debugfs.h> #include <linux/seq_file.h> #include "book3s_xive.h" static u8 xive_vm_esb_load(struct xive_irq_data *xd, u32 offset) { u64 val; /* * The KVM XIVE native device does not use the XIVE_ESB_SET_PQ_10 * load operation, so there is no need to enforce load-after-store * ordering. */ val = in_be64(xd->eoi_mmio + offset); return (u8)val; } static void kvmppc_xive_native_cleanup_queue(struct kvm_vcpu *vcpu, int prio) { struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; struct xive_q *q = &xc->queues[prio]; xive_native_disable_queue(xc->vp_id, q, prio); if (q->qpage) { put_page(virt_to_page(q->qpage)); q->qpage = NULL; } } static int kvmppc_xive_native_configure_queue(u32 vp_id, struct xive_q *q, u8 prio, __be32 *qpage, u32 order, bool can_escalate) { int rc; __be32 *qpage_prev = q->qpage; rc = xive_native_configure_queue(vp_id, q, prio, qpage, order, can_escalate); if (rc) return rc; if (qpage_prev) put_page(virt_to_page(qpage_prev)); return rc; } void kvmppc_xive_native_cleanup_vcpu(struct kvm_vcpu *vcpu) { struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; int i; if (!kvmppc_xive_enabled(vcpu)) return; if (!xc) return; pr_devel("native_cleanup_vcpu(cpu=%d)\n", xc->server_num); /* Ensure no interrupt is still routed to that VP */ xc->valid = false; kvmppc_xive_disable_vcpu_interrupts(vcpu); /* Free escalations */ for (i = 0; i < KVMPPC_XIVE_Q_COUNT; i++) { /* Free the escalation irq */ if (xc->esc_virq[i]) { if (kvmppc_xive_has_single_escalation(xc->xive)) xive_cleanup_single_escalation(vcpu, xc->esc_virq[i]); free_irq(xc->esc_virq[i], vcpu); irq_dispose_mapping(xc->esc_virq[i]); kfree(xc->esc_virq_names[i]); xc->esc_virq[i] = 0; } } /* Disable the VP */ xive_native_disable_vp(xc->vp_id); /* Clear the cam word so guest entry won't try to push context */ vcpu->arch.xive_cam_word = 0; /* Free the queues */ for (i = 0; i < KVMPPC_XIVE_Q_COUNT; i++) { kvmppc_xive_native_cleanup_queue(vcpu, i); } /* Free the VP */ kfree(xc); /* Cleanup the vcpu */ vcpu->arch.irq_type = KVMPPC_IRQ_DEFAULT; vcpu->arch.xive_vcpu = NULL; } int kvmppc_xive_native_connect_vcpu(struct kvm_device *dev, struct kvm_vcpu *vcpu, u32 server_num) { struct kvmppc_xive *xive = dev->private; struct kvmppc_xive_vcpu *xc = NULL; int rc; u32 vp_id; pr_devel("native_connect_vcpu(server=%d)\n", server_num); if (dev->ops != &kvm_xive_native_ops) { pr_devel("Wrong ops !\n"); return -EPERM; } if (xive->kvm != vcpu->kvm) return -EPERM; if (vcpu->arch.irq_type != KVMPPC_IRQ_DEFAULT) return -EBUSY; mutex_lock(&xive->lock); rc = kvmppc_xive_compute_vp_id(xive, server_num, &vp_id); if (rc) goto bail; xc = kzalloc(sizeof(*xc), GFP_KERNEL); if (!xc) { rc = -ENOMEM; goto bail; } vcpu->arch.xive_vcpu = xc; xc->xive = xive; xc->vcpu = vcpu; xc->server_num = server_num; xc->vp_id = vp_id; xc->valid = true; vcpu->arch.irq_type = KVMPPC_IRQ_XIVE; rc = xive_native_get_vp_info(xc->vp_id, &xc->vp_cam, &xc->vp_chip_id); if (rc) { pr_err("Failed to get VP info from OPAL: %d\n", rc); goto bail; } if (!kvmppc_xive_check_save_restore(vcpu)) { pr_err("inconsistent save-restore setup for VCPU %d\n", server_num); rc = -EIO; goto bail; } /* * Enable the VP first as the single escalation mode will * affect escalation interrupts numbering */ rc = xive_native_enable_vp(xc->vp_id, kvmppc_xive_has_single_escalation(xive)); if (rc) { pr_err("Failed to enable VP in OPAL: %d\n", rc); goto bail; } /* Configure VCPU fields for use by assembly push/pull */ vcpu->arch.xive_saved_state.w01 = cpu_to_be64(0xff000000); vcpu->arch.xive_cam_word = cpu_to_be32(xc->vp_cam | TM_QW1W2_VO); /* TODO: reset all queues to a clean state ? */ bail: mutex_unlock(&xive->lock); if (rc) kvmppc_xive_native_cleanup_vcpu(vcpu); return rc; } /* * Device passthrough support */ static int kvmppc_xive_native_reset_mapped(struct kvm *kvm, unsigned long irq) { struct kvmppc_xive *xive = kvm->arch.xive; pgoff_t esb_pgoff = KVM_XIVE_ESB_PAGE_OFFSET + irq * 2; if (irq >= KVMPPC_XIVE_NR_IRQS) return -EINVAL; /* * Clear the ESB pages of the IRQ number being mapped (or * unmapped) into the guest and let the VM fault handler * repopulate with the appropriate ESB pages (device or IC) */ pr_debug("clearing esb pages for girq 0x%lx\n", irq); mutex_lock(&xive->mapping_lock); if (xive->mapping) unmap_mapping_range(xive->mapping, esb_pgoff << PAGE_SHIFT, 2ull << PAGE_SHIFT, 1); mutex_unlock(&xive->mapping_lock); return 0; } static struct kvmppc_xive_ops kvmppc_xive_native_ops = { .reset_mapped = kvmppc_xive_native_reset_mapped, }; static vm_fault_t xive_native_esb_fault(struct vm_fault *vmf) { struct vm_area_struct *vma = vmf->vma; struct kvm_device *dev = vma->vm_file->private_data; struct kvmppc_xive *xive = dev->private; struct kvmppc_xive_src_block *sb; struct kvmppc_xive_irq_state *state; struct xive_irq_data *xd; u32 hw_num; u16 src; u64 page; unsigned long irq; u64 page_offset; /* * Linux/KVM uses a two pages ESB setting, one for trigger and * one for EOI */ page_offset = vmf->pgoff - vma->vm_pgoff; irq = page_offset / 2; sb = kvmppc_xive_find_source(xive, irq, &src); if (!sb) { pr_devel("%s: source %lx not found !\n", __func__, irq); return VM_FAULT_SIGBUS; } state = &sb->irq_state[src]; /* Some sanity checking */ if (!state->valid) { pr_devel("%s: source %lx invalid !\n", __func__, irq); return VM_FAULT_SIGBUS; } kvmppc_xive_select_irq(state, &hw_num, &xd); arch_spin_lock(&sb->lock); /* * first/even page is for trigger * second/odd page is for EOI and management. */ page = page_offset % 2 ? xd->eoi_page : xd->trig_page; arch_spin_unlock(&sb->lock); if (WARN_ON(!page)) { pr_err("%s: accessing invalid ESB page for source %lx !\n", __func__, irq); return VM_FAULT_SIGBUS; } vmf_insert_pfn(vma, vmf->address, page >> PAGE_SHIFT); return VM_FAULT_NOPAGE; } static const struct vm_operations_struct xive_native_esb_vmops = { .fault = xive_native_esb_fault, }; static vm_fault_t xive_native_tima_fault(struct vm_fault *vmf) { struct vm_area_struct *vma = vmf->vma; switch (vmf->pgoff - vma->vm_pgoff) { case 0: /* HW - forbid access */ case 1: /* HV - forbid access */ return VM_FAULT_SIGBUS; case 2: /* OS */ vmf_insert_pfn(vma, vmf->address, xive_tima_os >> PAGE_SHIFT); return VM_FAULT_NOPAGE; case 3: /* USER - TODO */ default: return VM_FAULT_SIGBUS; } } static const struct vm_operations_struct xive_native_tima_vmops = { .fault = xive_native_tima_fault, }; static int kvmppc_xive_native_mmap(struct kvm_device *dev, struct vm_area_struct *vma) { struct kvmppc_xive *xive = dev->private; /* We only allow mappings at fixed offset for now */ if (vma->vm_pgoff == KVM_XIVE_TIMA_PAGE_OFFSET) { if (vma_pages(vma) > 4) return -EINVAL; vma->vm_ops = &xive_native_tima_vmops; } else if (vma->vm_pgoff == KVM_XIVE_ESB_PAGE_OFFSET) { if (vma_pages(vma) > KVMPPC_XIVE_NR_IRQS * 2) return -EINVAL; vma->vm_ops = &xive_native_esb_vmops; } else { return -EINVAL; } vm_flags_set(vma, VM_IO | VM_PFNMAP); vma->vm_page_prot = pgprot_noncached_wc(vma->vm_page_prot); /* * Grab the KVM device file address_space to be able to clear * the ESB pages mapping when a device is passed-through into * the guest. */ xive->mapping = vma->vm_file->f_mapping; return 0; } static int kvmppc_xive_native_set_source(struct kvmppc_xive *xive, long irq, u64 addr) { struct kvmppc_xive_src_block *sb; struct kvmppc_xive_irq_state *state; u64 __user *ubufp = (u64 __user *) addr; u64 val; u16 idx; int rc; pr_devel("%s irq=0x%lx\n", __func__, irq); if (irq < KVMPPC_XIVE_FIRST_IRQ || irq >= KVMPPC_XIVE_NR_IRQS) return -E2BIG; sb = kvmppc_xive_find_source(xive, irq, &idx); if (!sb) { pr_debug("No source, creating source block...\n"); sb = kvmppc_xive_create_src_block(xive, irq); if (!sb) { pr_err("Failed to create block...\n"); return -ENOMEM; } } state = &sb->irq_state[idx]; if (get_user(val, ubufp)) { pr_err("fault getting user info !\n"); return -EFAULT; } arch_spin_lock(&sb->lock); /* * If the source doesn't already have an IPI, allocate * one and get the corresponding data */ if (!state->ipi_number) { state->ipi_number = xive_native_alloc_irq(); if (state->ipi_number == 0) { pr_err("Failed to allocate IRQ !\n"); rc = -ENXIO; goto unlock; } xive_native_populate_irq_data(state->ipi_number, &state->ipi_data); pr_debug("%s allocated hw_irq=0x%x for irq=0x%lx\n", __func__, state->ipi_number, irq); } /* Restore LSI state */ if (val & KVM_XIVE_LEVEL_SENSITIVE) { state->lsi = true; if (val & KVM_XIVE_LEVEL_ASSERTED) state->asserted = true; pr_devel(" LSI ! Asserted=%d\n", state->asserted); } /* Mask IRQ to start with */ state->act_server = 0; state->act_priority = MASKED; xive_vm_esb_load(&state->ipi_data, XIVE_ESB_SET_PQ_01); xive_native_configure_irq(state->ipi_number, 0, MASKED, 0); /* Increment the number of valid sources and mark this one valid */ if (!state->valid) xive->src_count++; state->valid = true; rc = 0; unlock: arch_spin_unlock(&sb->lock); return rc; } static int kvmppc_xive_native_update_source_config(struct kvmppc_xive *xive, struct kvmppc_xive_src_block *sb, struct kvmppc_xive_irq_state *state, u32 server, u8 priority, bool masked, u32 eisn) { struct kvm *kvm = xive->kvm; u32 hw_num; int rc = 0; arch_spin_lock(&sb->lock); if (state->act_server == server && state->act_priority == priority && state->eisn == eisn) goto unlock; pr_devel("new_act_prio=%d new_act_server=%d mask=%d act_server=%d act_prio=%d\n", priority, server, masked, state->act_server, state->act_priority); kvmppc_xive_select_irq(state, &hw_num, NULL); if (priority != MASKED && !masked) { rc = kvmppc_xive_select_target(kvm, &server, priority); if (rc) goto unlock; state->act_priority = priority; state->act_server = server; state->eisn = eisn; rc = xive_native_configure_irq(hw_num, kvmppc_xive_vp(xive, server), priority, eisn); } else { state->act_priority = MASKED; state->act_server = 0; state->eisn = 0; rc = xive_native_configure_irq(hw_num, 0, MASKED, 0); } unlock: arch_spin_unlock(&sb->lock); return rc; } static int kvmppc_xive_native_set_source_config(struct kvmppc_xive *xive, long irq, u64 addr) { struct kvmppc_xive_src_block *sb; struct kvmppc_xive_irq_state *state; u64 __user *ubufp = (u64 __user *) addr; u16 src; u64 kvm_cfg; u32 server; u8 priority; bool masked; u32 eisn; sb = kvmppc_xive_find_source(xive, irq, &src); if (!sb) return -ENOENT; state = &sb->irq_state[src]; if (!state->valid) return -EINVAL; if (get_user(kvm_cfg, ubufp)) return -EFAULT; pr_devel("%s irq=0x%lx cfg=%016llx\n", __func__, irq, kvm_cfg); priority = (kvm_cfg & KVM_XIVE_SOURCE_PRIORITY_MASK) >> KVM_XIVE_SOURCE_PRIORITY_SHIFT; server = (kvm_cfg & KVM_XIVE_SOURCE_SERVER_MASK) >> KVM_XIVE_SOURCE_SERVER_SHIFT; masked = (kvm_cfg & KVM_XIVE_SOURCE_MASKED_MASK) >> KVM_XIVE_SOURCE_MASKED_SHIFT; eisn = (kvm_cfg & KVM_XIVE_SOURCE_EISN_MASK) >> KVM_XIVE_SOURCE_EISN_SHIFT; if (priority != xive_prio_from_guest(priority)) { pr_err("invalid priority for queue %d for VCPU %d\n", priority, server); return -EINVAL; } return kvmppc_xive_native_update_source_config(xive, sb, state, server, priority, masked, eisn); } static int kvmppc_xive_native_sync_source(struct kvmppc_xive *xive, long irq, u64 addr) { struct kvmppc_xive_src_block *sb; struct kvmppc_xive_irq_state *state; struct xive_irq_data *xd; u32 hw_num; u16 src; int rc = 0; pr_devel("%s irq=0x%lx", __func__, irq); sb = kvmppc_xive_find_source(xive, irq, &src); if (!sb) return -ENOENT; state = &sb->irq_state[src]; rc = -EINVAL; arch_spin_lock(&sb->lock); if (state->valid) { kvmppc_xive_select_irq(state, &hw_num, &xd); xive_native_sync_source(hw_num); rc = 0; } arch_spin_unlock(&sb->lock); return rc; } static int xive_native_validate_queue_size(u32 qshift) { /* * We only support 64K pages for the moment. This is also * advertised in the DT property "ibm,xive-eq-sizes" */ switch (qshift) { case 0: /* EQ reset */ case 16: return 0; case 12: case 21: case 24: default: return -EINVAL; } } static int kvmppc_xive_native_set_queue_config(struct kvmppc_xive *xive, long eq_idx, u64 addr) { struct kvm *kvm = xive->kvm; struct kvm_vcpu *vcpu; struct kvmppc_xive_vcpu *xc; void __user *ubufp = (void __user *) addr; u32 server; u8 priority; struct kvm_ppc_xive_eq kvm_eq; int rc; __be32 *qaddr = 0; struct page *page; struct xive_q *q; gfn_t gfn; unsigned long page_size; int srcu_idx; /* * Demangle priority/server tuple from the EQ identifier */ priority = (eq_idx & KVM_XIVE_EQ_PRIORITY_MASK) >> KVM_XIVE_EQ_PRIORITY_SHIFT; server = (eq_idx & KVM_XIVE_EQ_SERVER_MASK) >> KVM_XIVE_EQ_SERVER_SHIFT; if (copy_from_user(&kvm_eq, ubufp, sizeof(kvm_eq))) return -EFAULT; vcpu = kvmppc_xive_find_server(kvm, server); if (!vcpu) { pr_err("Can't find server %d\n", server); return -ENOENT; } xc = vcpu->arch.xive_vcpu; if (priority != xive_prio_from_guest(priority)) { pr_err("Trying to restore invalid queue %d for VCPU %d\n", priority, server); return -EINVAL; } q = &xc->queues[priority]; pr_devel("%s VCPU %d priority %d fl:%x shift:%d addr:%llx g:%d idx:%d\n", __func__, server, priority, kvm_eq.flags, kvm_eq.qshift, kvm_eq.qaddr, kvm_eq.qtoggle, kvm_eq.qindex); /* reset queue and disable queueing */ if (!kvm_eq.qshift) { q->guest_qaddr = 0; q->guest_qshift = 0; rc = kvmppc_xive_native_configure_queue(xc->vp_id, q, priority, NULL, 0, true); if (rc) { pr_err("Failed to reset queue %d for VCPU %d: %d\n", priority, xc->server_num, rc); return rc; } return 0; } /* * sPAPR specifies a "Unconditional Notify (n) flag" for the * H_INT_SET_QUEUE_CONFIG hcall which forces notification * without using the coalescing mechanisms provided by the * XIVE END ESBs. This is required on KVM as notification * using the END ESBs is not supported. */ if (kvm_eq.flags != KVM_XIVE_EQ_ALWAYS_NOTIFY) { pr_err("invalid flags %d\n", kvm_eq.flags); return -EINVAL; } rc = xive_native_validate_queue_size(kvm_eq.qshift); if (rc) { pr_err("invalid queue size %d\n", kvm_eq.qshift); return rc; } if (kvm_eq.qaddr & ((1ull << kvm_eq.qshift) - 1)) { pr_err("queue page is not aligned %llx/%llx\n", kvm_eq.qaddr, 1ull << kvm_eq.qshift); return -EINVAL; } srcu_idx = srcu_read_lock(&kvm->srcu); gfn = gpa_to_gfn(kvm_eq.qaddr); page_size = kvm_host_page_size(vcpu, gfn); if (1ull << kvm_eq.qshift > page_size) { srcu_read_unlock(&kvm->srcu, srcu_idx); pr_warn("Incompatible host page size %lx!\n", page_size); return -EINVAL; } page = gfn_to_page(kvm, gfn); if (is_error_page(page)) { srcu_read_unlock(&kvm->srcu, srcu_idx); pr_err("Couldn't get queue page %llx!\n", kvm_eq.qaddr); return -EINVAL; } qaddr = page_to_virt(page) + (kvm_eq.qaddr & ~PAGE_MASK); srcu_read_unlock(&kvm->srcu, srcu_idx); /* * Backup the queue page guest address to the mark EQ page * dirty for migration. */ q->guest_qaddr = kvm_eq.qaddr; q->guest_qshift = kvm_eq.qshift; /* * Unconditional Notification is forced by default at the * OPAL level because the use of END ESBs is not supported by * Linux. */ rc = kvmppc_xive_native_configure_queue(xc->vp_id, q, priority, (__be32 *) qaddr, kvm_eq.qshift, true); if (rc) { pr_err("Failed to configure queue %d for VCPU %d: %d\n", priority, xc->server_num, rc); put_page(page); return rc; } /* * Only restore the queue state when needed. When doing the * H_INT_SET_SOURCE_CONFIG hcall, it should not. */ if (kvm_eq.qtoggle != 1 || kvm_eq.qindex != 0) { rc = xive_native_set_queue_state(xc->vp_id, priority, kvm_eq.qtoggle, kvm_eq.qindex); if (rc) goto error; } rc = kvmppc_xive_attach_escalation(vcpu, priority, kvmppc_xive_has_single_escalation(xive)); error: if (rc) kvmppc_xive_native_cleanup_queue(vcpu, priority); return rc; } static int kvmppc_xive_native_get_queue_config(struct kvmppc_xive *xive, long eq_idx, u64 addr) { struct kvm *kvm = xive->kvm; struct kvm_vcpu *vcpu; struct kvmppc_xive_vcpu *xc; struct xive_q *q; void __user *ubufp = (u64 __user *) addr; u32 server; u8 priority; struct kvm_ppc_xive_eq kvm_eq; u64 qaddr; u64 qshift; u64 qeoi_page; u32 escalate_irq; u64 qflags; int rc; /* * Demangle priority/server tuple from the EQ identifier */ priority = (eq_idx & KVM_XIVE_EQ_PRIORITY_MASK) >> KVM_XIVE_EQ_PRIORITY_SHIFT; server = (eq_idx & KVM_XIVE_EQ_SERVER_MASK) >> KVM_XIVE_EQ_SERVER_SHIFT; vcpu = kvmppc_xive_find_server(kvm, server); if (!vcpu) { pr_err("Can't find server %d\n", server); return -ENOENT; } xc = vcpu->arch.xive_vcpu; if (priority != xive_prio_from_guest(priority)) { pr_err("invalid priority for queue %d for VCPU %d\n", priority, server); return -EINVAL; } q = &xc->queues[priority]; memset(&kvm_eq, 0, sizeof(kvm_eq)); if (!q->qpage) return 0; rc = xive_native_get_queue_info(xc->vp_id, priority, &qaddr, &qshift, &qeoi_page, &escalate_irq, &qflags); if (rc) return rc; kvm_eq.flags = 0; if (qflags & OPAL_XIVE_EQ_ALWAYS_NOTIFY) kvm_eq.flags |= KVM_XIVE_EQ_ALWAYS_NOTIFY; kvm_eq.qshift = q->guest_qshift; kvm_eq.qaddr = q->guest_qaddr; rc = xive_native_get_queue_state(xc->vp_id, priority, &kvm_eq.qtoggle, &kvm_eq.qindex); if (rc) return rc; pr_devel("%s VCPU %d priority %d fl:%x shift:%d addr:%llx g:%d idx:%d\n", __func__, server, priority, kvm_eq.flags, kvm_eq.qshift, kvm_eq.qaddr, kvm_eq.qtoggle, kvm_eq.qindex); if (copy_to_user(ubufp, &kvm_eq, sizeof(kvm_eq))) return -EFAULT; return 0; } static void kvmppc_xive_reset_sources(struct kvmppc_xive_src_block *sb) { int i; for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) { struct kvmppc_xive_irq_state *state = &sb->irq_state[i]; if (!state->valid) continue; if (state->act_priority == MASKED) continue; state->eisn = 0; state->act_server = 0; state->act_priority = MASKED; xive_vm_esb_load(&state->ipi_data, XIVE_ESB_SET_PQ_01); xive_native_configure_irq(state->ipi_number, 0, MASKED, 0); if (state->pt_number) { xive_vm_esb_load(state->pt_data, XIVE_ESB_SET_PQ_01); xive_native_configure_irq(state->pt_number, 0, MASKED, 0); } } } static int kvmppc_xive_reset(struct kvmppc_xive *xive) { struct kvm *kvm = xive->kvm; struct kvm_vcpu *vcpu; unsigned long i; pr_devel("%s\n", __func__); mutex_lock(&xive->lock); kvm_for_each_vcpu(i, vcpu, kvm) { struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; unsigned int prio; if (!xc) continue; kvmppc_xive_disable_vcpu_interrupts(vcpu); for (prio = 0; prio < KVMPPC_XIVE_Q_COUNT; prio++) { /* Single escalation, no queue 7 */ if (prio == 7 && kvmppc_xive_has_single_escalation(xive)) break; if (xc->esc_virq[prio]) { free_irq(xc->esc_virq[prio], vcpu); irq_dispose_mapping(xc->esc_virq[prio]); kfree(xc->esc_virq_names[prio]); xc->esc_virq[prio] = 0; } kvmppc_xive_native_cleanup_queue(vcpu, prio); } } for (i = 0; i <= xive->max_sbid; i++) { struct kvmppc_xive_src_block *sb = xive->src_blocks[i]; if (sb) { arch_spin_lock(&sb->lock); kvmppc_xive_reset_sources(sb); arch_spin_unlock(&sb->lock); } } mutex_unlock(&xive->lock); return 0; } static void kvmppc_xive_native_sync_sources(struct kvmppc_xive_src_block *sb) { int j; for (j = 0; j < KVMPPC_XICS_IRQ_PER_ICS; j++) { struct kvmppc_xive_irq_state *state = &sb->irq_state[j]; struct xive_irq_data *xd; u32 hw_num; if (!state->valid) continue; /* * The struct kvmppc_xive_irq_state reflects the state * of the EAS configuration and not the state of the * source. The source is masked setting the PQ bits to * '-Q', which is what is being done before calling * the KVM_DEV_XIVE_EQ_SYNC control. * * If a source EAS is configured, OPAL syncs the XIVE * IC of the source and the XIVE IC of the previous * target if any. * * So it should be fine ignoring MASKED sources as * they have been synced already. */ if (state->act_priority == MASKED) continue; kvmppc_xive_select_irq(state, &hw_num, &xd); xive_native_sync_source(hw_num); xive_native_sync_queue(hw_num); } } static int kvmppc_xive_native_vcpu_eq_sync(struct kvm_vcpu *vcpu) { struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; unsigned int prio; int srcu_idx; if (!xc) return -ENOENT; for (prio = 0; prio < KVMPPC_XIVE_Q_COUNT; prio++) { struct xive_q *q = &xc->queues[prio]; if (!q->qpage) continue; /* Mark EQ page dirty for migration */ srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); mark_page_dirty(vcpu->kvm, gpa_to_gfn(q->guest_qaddr)); srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx); } return 0; } static int kvmppc_xive_native_eq_sync(struct kvmppc_xive *xive) { struct kvm *kvm = xive->kvm; struct kvm_vcpu *vcpu; unsigned long i; pr_devel("%s\n", __func__); mutex_lock(&xive->lock); for (i = 0; i <= xive->max_sbid; i++) { struct kvmppc_xive_src_block *sb = xive->src_blocks[i]; if (sb) { arch_spin_lock(&sb->lock); kvmppc_xive_native_sync_sources(sb); arch_spin_unlock(&sb->lock); } } kvm_for_each_vcpu(i, vcpu, kvm) { kvmppc_xive_native_vcpu_eq_sync(vcpu); } mutex_unlock(&xive->lock); return 0; } static int kvmppc_xive_native_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr) { struct kvmppc_xive *xive = dev->private; switch (attr->group) { case KVM_DEV_XIVE_GRP_CTRL: switch (attr->attr) { case KVM_DEV_XIVE_RESET: return kvmppc_xive_reset(xive); case KVM_DEV_XIVE_EQ_SYNC: return kvmppc_xive_native_eq_sync(xive); case KVM_DEV_XIVE_NR_SERVERS: return kvmppc_xive_set_nr_servers(xive, attr->addr); } break; case KVM_DEV_XIVE_GRP_SOURCE: return kvmppc_xive_native_set_source(xive, attr->attr, attr->addr); case KVM_DEV_XIVE_GRP_SOURCE_CONFIG: return kvmppc_xive_native_set_source_config(xive, attr->attr, attr->addr); case KVM_DEV_XIVE_GRP_EQ_CONFIG: return kvmppc_xive_native_set_queue_config(xive, attr->attr, attr->addr); case KVM_DEV_XIVE_GRP_SOURCE_SYNC: return kvmppc_xive_native_sync_source(xive, attr->attr, attr->addr); } return -ENXIO; } static int kvmppc_xive_native_get_attr(struct kvm_device *dev, struct kvm_device_attr *attr) { struct kvmppc_xive *xive = dev->private; switch (attr->group) { case KVM_DEV_XIVE_GRP_EQ_CONFIG: return kvmppc_xive_native_get_queue_config(xive, attr->attr, attr->addr); } return -ENXIO; } static int kvmppc_xive_native_has_attr(struct kvm_device *dev, struct kvm_device_attr *attr) { switch (attr->group) { case KVM_DEV_XIVE_GRP_CTRL: switch (attr->attr) { case KVM_DEV_XIVE_RESET: case KVM_DEV_XIVE_EQ_SYNC: case KVM_DEV_XIVE_NR_SERVERS: return 0; } break; case KVM_DEV_XIVE_GRP_SOURCE: case KVM_DEV_XIVE_GRP_SOURCE_CONFIG: case KVM_DEV_XIVE_GRP_SOURCE_SYNC: if (attr->attr >= KVMPPC_XIVE_FIRST_IRQ && attr->attr < KVMPPC_XIVE_NR_IRQS) return 0; break; case KVM_DEV_XIVE_GRP_EQ_CONFIG: return 0; } return -ENXIO; } /* * Called when device fd is closed. kvm->lock is held. */ static void kvmppc_xive_native_release(struct kvm_device *dev) { struct kvmppc_xive *xive = dev->private; struct kvm *kvm = xive->kvm; struct kvm_vcpu *vcpu; unsigned long i; pr_devel("Releasing xive native device\n"); /* * Clear the KVM device file address_space which is used to * unmap the ESB pages when a device is passed-through. */ mutex_lock(&xive->mapping_lock); xive->mapping = NULL; mutex_unlock(&xive->mapping_lock); /* * Since this is the device release function, we know that * userspace does not have any open fd or mmap referring to * the device. Therefore there can not be any of the * device attribute set/get, mmap, or page fault functions * being executed concurrently, and similarly, the * connect_vcpu and set/clr_mapped functions also cannot * be being executed. */ debugfs_remove(xive->dentry); /* * We should clean up the vCPU interrupt presenters first. */ kvm_for_each_vcpu(i, vcpu, kvm) { /* * Take vcpu->mutex to ensure that no one_reg get/set ioctl * (i.e. kvmppc_xive_native_[gs]et_vp) can be being done. * Holding the vcpu->mutex also means that the vcpu cannot * be executing the KVM_RUN ioctl, and therefore it cannot * be executing the XIVE push or pull code or accessing * the XIVE MMIO regions. */ mutex_lock(&vcpu->mutex); kvmppc_xive_native_cleanup_vcpu(vcpu); mutex_unlock(&vcpu->mutex); } /* * Now that we have cleared vcpu->arch.xive_vcpu, vcpu->arch.irq_type * and vcpu->arch.xive_esc_[vr]addr on each vcpu, we are safe * against xive code getting called during vcpu execution or * set/get one_reg operations. */ kvm->arch.xive = NULL; for (i = 0; i <= xive->max_sbid; i++) { if (xive->src_blocks[i]) kvmppc_xive_free_sources(xive->src_blocks[i]); kfree(xive->src_blocks[i]); xive->src_blocks[i] = NULL; } if (xive->vp_base != XIVE_INVALID_VP) xive_native_free_vp_block(xive->vp_base); /* * A reference of the kvmppc_xive pointer is now kept under * the xive_devices struct of the machine for reuse. It is * freed when the VM is destroyed for now until we fix all the * execution paths. */ kfree(dev); } /* * Create a XIVE device. kvm->lock is held. */ static int kvmppc_xive_native_create(struct kvm_device *dev, u32 type) { struct kvmppc_xive *xive; struct kvm *kvm = dev->kvm; pr_devel("Creating xive native device\n"); if (kvm->arch.xive) return -EEXIST; xive = kvmppc_xive_get_device(kvm, type); if (!xive) return -ENOMEM; dev->private = xive; xive->dev = dev; xive->kvm = kvm; mutex_init(&xive->mapping_lock); mutex_init(&xive->lock); /* VP allocation is delayed to the first call to connect_vcpu */ xive->vp_base = XIVE_INVALID_VP; /* KVM_MAX_VCPUS limits the number of VMs to roughly 64 per sockets * on a POWER9 system. */ xive->nr_servers = KVM_MAX_VCPUS; if (xive_native_has_single_escalation()) xive->flags |= KVMPPC_XIVE_FLAG_SINGLE_ESCALATION; if (xive_native_has_save_restore()) xive->flags |= KVMPPC_XIVE_FLAG_SAVE_RESTORE; xive->ops = &kvmppc_xive_native_ops; kvm->arch.xive = xive; return 0; } /* * Interrupt Pending Buffer (IPB) offset */ #define TM_IPB_SHIFT 40 #define TM_IPB_MASK (((u64) 0xFF) << TM_IPB_SHIFT) int kvmppc_xive_native_get_vp(struct kvm_vcpu *vcpu, union kvmppc_one_reg *val) { struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; u64 opal_state; int rc; if (!kvmppc_xive_enabled(vcpu)) return -EPERM; if (!xc) return -ENOENT; /* Thread context registers. We only care about IPB and CPPR */ val->xive_timaval[0] = vcpu->arch.xive_saved_state.w01; /* Get the VP state from OPAL */ rc = xive_native_get_vp_state(xc->vp_id, &opal_state); if (rc) return rc; /* * Capture the backup of IPB register in the NVT structure and * merge it in our KVM VP state. */ val->xive_timaval[0] |= cpu_to_be64(opal_state & TM_IPB_MASK); pr_devel("%s NSR=%02x CPPR=%02x IBP=%02x PIPR=%02x w01=%016llx w2=%08x opal=%016llx\n", __func__, vcpu->arch.xive_saved_state.nsr, vcpu->arch.xive_saved_state.cppr, vcpu->arch.xive_saved_state.ipb, vcpu->arch.xive_saved_state.pipr, vcpu->arch.xive_saved_state.w01, (u32) vcpu->arch.xive_cam_word, opal_state); return 0; } int kvmppc_xive_native_set_vp(struct kvm_vcpu *vcpu, union kvmppc_one_reg *val) { struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; struct kvmppc_xive *xive = vcpu->kvm->arch.xive; pr_devel("%s w01=%016llx vp=%016llx\n", __func__, val->xive_timaval[0], val->xive_timaval[1]); if (!kvmppc_xive_enabled(vcpu)) return -EPERM; if (!xc || !xive) return -ENOENT; /* We can't update the state of a "pushed" VCPU */ if (WARN_ON(vcpu->arch.xive_pushed)) return -EBUSY; /* * Restore the thread context registers. IPB and CPPR should * be the only ones that matter. */ vcpu->arch.xive_saved_state.w01 = val->xive_timaval[0]; /* * There is no need to restore the XIVE internal state (IPB * stored in the NVT) as the IPB register was merged in KVM VP * state when captured. */ return 0; } bool kvmppc_xive_native_supported(void) { return xive_native_has_queue_state_support(); } static int xive_native_debug_show(struct seq_file *m, void *private) { struct kvmppc_xive *xive = m->private; struct kvm *kvm = xive->kvm; struct kvm_vcpu *vcpu; unsigned long i; if (!kvm) return 0; seq_puts(m, "=========\nVCPU state\n=========\n"); kvm_for_each_vcpu(i, vcpu, kvm) { struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; if (!xc) continue; seq_printf(m, "VCPU %d: VP=%#x/%02x\n" " NSR=%02x CPPR=%02x IBP=%02x PIPR=%02x w01=%016llx w2=%08x\n", xc->server_num, xc->vp_id, xc->vp_chip_id, vcpu->arch.xive_saved_state.nsr, vcpu->arch.xive_saved_state.cppr, vcpu->arch.xive_saved_state.ipb, vcpu->arch.xive_saved_state.pipr, be64_to_cpu(vcpu->arch.xive_saved_state.w01), be32_to_cpu(vcpu->arch.xive_cam_word)); kvmppc_xive_debug_show_queues(m, vcpu); } seq_puts(m, "=========\nSources\n=========\n"); for (i = 0; i <= xive->max_sbid; i++) { struct kvmppc_xive_src_block *sb = xive->src_blocks[i]; if (sb) { arch_spin_lock(&sb->lock); kvmppc_xive_debug_show_sources(m, sb); arch_spin_unlock(&sb->lock); } } return 0; } DEFINE_SHOW_ATTRIBUTE(xive_native_debug); static void xive_native_debugfs_init(struct kvmppc_xive *xive) { xive->dentry = debugfs_create_file("xive", 0444, xive->kvm->debugfs_dentry, xive, &xive_native_debug_fops); pr_debug("%s: created\n", __func__); } static void kvmppc_xive_native_init(struct kvm_device *dev) { struct kvmppc_xive *xive = dev->private; /* Register some debug interfaces */ xive_native_debugfs_init(xive); } struct kvm_device_ops kvm_xive_native_ops = { .name = "kvm-xive-native", .create = kvmppc_xive_native_create, .init = kvmppc_xive_native_init, .release = kvmppc_xive_native_release, .set_attr = kvmppc_xive_native_set_attr, .get_attr = kvmppc_xive_native_get_attr, .has_attr = kvmppc_xive_native_has_attr, .mmap = kvmppc_xive_native_mmap, };
linux-master
arch/powerpc/kvm/book3s_xive_native.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2008-2013 Freescale Semiconductor, Inc. All rights reserved. * * Author: Yu Liu, [email protected] * Scott Wood, [email protected] * Ashish Kalra, [email protected] * Varun Sethi, [email protected] * Alexander Graf, [email protected] * * Description: * This file is based on arch/powerpc/kvm/44x_tlb.c, * by Hollis Blanchard <[email protected]>. */ #include <linux/kernel.h> #include <linux/types.h> #include <linux/slab.h> #include <linux/string.h> #include <linux/kvm.h> #include <linux/kvm_host.h> #include <linux/highmem.h> #include <linux/log2.h> #include <linux/uaccess.h> #include <linux/sched/mm.h> #include <linux/rwsem.h> #include <linux/vmalloc.h> #include <linux/hugetlb.h> #include <asm/kvm_ppc.h> #include <asm/pte-walk.h> #include "e500.h" #include "timing.h" #include "e500_mmu_host.h" #include "trace_booke.h" #define to_htlb1_esel(esel) (host_tlb_params[1].entries - (esel) - 1) static struct kvmppc_e500_tlb_params host_tlb_params[E500_TLB_NUM]; static inline unsigned int tlb1_max_shadow_size(void) { /* reserve one entry for magic page */ return host_tlb_params[1].entries - tlbcam_index - 1; } static inline u32 e500_shadow_mas3_attrib(u32 mas3, int usermode) { /* Mask off reserved bits. */ mas3 &= MAS3_ATTRIB_MASK; #ifndef CONFIG_KVM_BOOKE_HV if (!usermode) { /* Guest is in supervisor mode, * so we need to translate guest * supervisor permissions into user permissions. */ mas3 &= ~E500_TLB_USER_PERM_MASK; mas3 |= (mas3 & E500_TLB_SUPER_PERM_MASK) << 1; } mas3 |= E500_TLB_SUPER_PERM_MASK; #endif return mas3; } /* * writing shadow tlb entry to host TLB */ static inline void __write_host_tlbe(struct kvm_book3e_206_tlb_entry *stlbe, uint32_t mas0, uint32_t lpid) { unsigned long flags; local_irq_save(flags); mtspr(SPRN_MAS0, mas0); mtspr(SPRN_MAS1, stlbe->mas1); mtspr(SPRN_MAS2, (unsigned long)stlbe->mas2); mtspr(SPRN_MAS3, (u32)stlbe->mas7_3); mtspr(SPRN_MAS7, (u32)(stlbe->mas7_3 >> 32)); #ifdef CONFIG_KVM_BOOKE_HV mtspr(SPRN_MAS8, MAS8_TGS | get_thread_specific_lpid(lpid)); #endif asm volatile("isync; tlbwe" : : : "memory"); #ifdef CONFIG_KVM_BOOKE_HV /* Must clear mas8 for other host tlbwe's */ mtspr(SPRN_MAS8, 0); isync(); #endif local_irq_restore(flags); trace_kvm_booke206_stlb_write(mas0, stlbe->mas8, stlbe->mas1, stlbe->mas2, stlbe->mas7_3); } /* * Acquire a mas0 with victim hint, as if we just took a TLB miss. * * We don't care about the address we're searching for, other than that it's * in the right set and is not present in the TLB. Using a zero PID and a * userspace address means we don't have to set and then restore MAS5, or * calculate a proper MAS6 value. */ static u32 get_host_mas0(unsigned long eaddr) { unsigned long flags; u32 mas0; u32 mas4; local_irq_save(flags); mtspr(SPRN_MAS6, 0); mas4 = mfspr(SPRN_MAS4); mtspr(SPRN_MAS4, mas4 & ~MAS4_TLBSEL_MASK); asm volatile("tlbsx 0, %0" : : "b" (eaddr & ~CONFIG_PAGE_OFFSET)); mas0 = mfspr(SPRN_MAS0); mtspr(SPRN_MAS4, mas4); local_irq_restore(flags); return mas0; } /* sesel is for tlb1 only */ static inline void write_host_tlbe(struct kvmppc_vcpu_e500 *vcpu_e500, int tlbsel, int sesel, struct kvm_book3e_206_tlb_entry *stlbe) { u32 mas0; if (tlbsel == 0) { mas0 = get_host_mas0(stlbe->mas2); __write_host_tlbe(stlbe, mas0, vcpu_e500->vcpu.kvm->arch.lpid); } else { __write_host_tlbe(stlbe, MAS0_TLBSEL(1) | MAS0_ESEL(to_htlb1_esel(sesel)), vcpu_e500->vcpu.kvm->arch.lpid); } } /* sesel is for tlb1 only */ static void write_stlbe(struct kvmppc_vcpu_e500 *vcpu_e500, struct kvm_book3e_206_tlb_entry *gtlbe, struct kvm_book3e_206_tlb_entry *stlbe, int stlbsel, int sesel) { int stid; preempt_disable(); stid = kvmppc_e500_get_tlb_stid(&vcpu_e500->vcpu, gtlbe); stlbe->mas1 |= MAS1_TID(stid); write_host_tlbe(vcpu_e500, stlbsel, sesel, stlbe); preempt_enable(); } #ifdef CONFIG_KVM_E500V2 /* XXX should be a hook in the gva2hpa translation */ void kvmppc_map_magic(struct kvm_vcpu *vcpu) { struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); struct kvm_book3e_206_tlb_entry magic; ulong shared_page = ((ulong)vcpu->arch.shared) & PAGE_MASK; unsigned int stid; kvm_pfn_t pfn; pfn = (kvm_pfn_t)virt_to_phys((void *)shared_page) >> PAGE_SHIFT; get_page(pfn_to_page(pfn)); preempt_disable(); stid = kvmppc_e500_get_sid(vcpu_e500, 0, 0, 0, 0); magic.mas1 = MAS1_VALID | MAS1_TS | MAS1_TID(stid) | MAS1_TSIZE(BOOK3E_PAGESZ_4K); magic.mas2 = vcpu->arch.magic_page_ea | MAS2_M; magic.mas7_3 = ((u64)pfn << PAGE_SHIFT) | MAS3_SW | MAS3_SR | MAS3_UW | MAS3_UR; magic.mas8 = 0; __write_host_tlbe(&magic, MAS0_TLBSEL(1) | MAS0_ESEL(tlbcam_index), 0); preempt_enable(); } #endif void inval_gtlbe_on_host(struct kvmppc_vcpu_e500 *vcpu_e500, int tlbsel, int esel) { struct kvm_book3e_206_tlb_entry *gtlbe = get_entry(vcpu_e500, tlbsel, esel); struct tlbe_ref *ref = &vcpu_e500->gtlb_priv[tlbsel][esel].ref; /* Don't bother with unmapped entries */ if (!(ref->flags & E500_TLB_VALID)) { WARN(ref->flags & (E500_TLB_BITMAP | E500_TLB_TLB0), "%s: flags %x\n", __func__, ref->flags); WARN_ON(tlbsel == 1 && vcpu_e500->g2h_tlb1_map[esel]); } if (tlbsel == 1 && ref->flags & E500_TLB_BITMAP) { u64 tmp = vcpu_e500->g2h_tlb1_map[esel]; int hw_tlb_indx; unsigned long flags; local_irq_save(flags); while (tmp) { hw_tlb_indx = __ilog2_u64(tmp & -tmp); mtspr(SPRN_MAS0, MAS0_TLBSEL(1) | MAS0_ESEL(to_htlb1_esel(hw_tlb_indx))); mtspr(SPRN_MAS1, 0); asm volatile("tlbwe"); vcpu_e500->h2g_tlb1_rmap[hw_tlb_indx] = 0; tmp &= tmp - 1; } mb(); vcpu_e500->g2h_tlb1_map[esel] = 0; ref->flags &= ~(E500_TLB_BITMAP | E500_TLB_VALID); local_irq_restore(flags); } if (tlbsel == 1 && ref->flags & E500_TLB_TLB0) { /* * TLB1 entry is backed by 4k pages. This should happen * rarely and is not worth optimizing. Invalidate everything. */ kvmppc_e500_tlbil_all(vcpu_e500); ref->flags &= ~(E500_TLB_TLB0 | E500_TLB_VALID); } /* * If TLB entry is still valid then it's a TLB0 entry, and thus * backed by at most one host tlbe per shadow pid */ if (ref->flags & E500_TLB_VALID) kvmppc_e500_tlbil_one(vcpu_e500, gtlbe); /* Mark the TLB as not backed by the host anymore */ ref->flags = 0; } static inline int tlbe_is_writable(struct kvm_book3e_206_tlb_entry *tlbe) { return tlbe->mas7_3 & (MAS3_SW|MAS3_UW); } static inline void kvmppc_e500_ref_setup(struct tlbe_ref *ref, struct kvm_book3e_206_tlb_entry *gtlbe, kvm_pfn_t pfn, unsigned int wimg) { ref->pfn = pfn; ref->flags = E500_TLB_VALID; /* Use guest supplied MAS2_G and MAS2_E */ ref->flags |= (gtlbe->mas2 & MAS2_ATTRIB_MASK) | wimg; /* Mark the page accessed */ kvm_set_pfn_accessed(pfn); if (tlbe_is_writable(gtlbe)) kvm_set_pfn_dirty(pfn); } static inline void kvmppc_e500_ref_release(struct tlbe_ref *ref) { if (ref->flags & E500_TLB_VALID) { /* FIXME: don't log bogus pfn for TLB1 */ trace_kvm_booke206_ref_release(ref->pfn, ref->flags); ref->flags = 0; } } static void clear_tlb1_bitmap(struct kvmppc_vcpu_e500 *vcpu_e500) { if (vcpu_e500->g2h_tlb1_map) memset(vcpu_e500->g2h_tlb1_map, 0, sizeof(u64) * vcpu_e500->gtlb_params[1].entries); if (vcpu_e500->h2g_tlb1_rmap) memset(vcpu_e500->h2g_tlb1_rmap, 0, sizeof(unsigned int) * host_tlb_params[1].entries); } static void clear_tlb_privs(struct kvmppc_vcpu_e500 *vcpu_e500) { int tlbsel; int i; for (tlbsel = 0; tlbsel <= 1; tlbsel++) { for (i = 0; i < vcpu_e500->gtlb_params[tlbsel].entries; i++) { struct tlbe_ref *ref = &vcpu_e500->gtlb_priv[tlbsel][i].ref; kvmppc_e500_ref_release(ref); } } } void kvmppc_core_flush_tlb(struct kvm_vcpu *vcpu) { struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); kvmppc_e500_tlbil_all(vcpu_e500); clear_tlb_privs(vcpu_e500); clear_tlb1_bitmap(vcpu_e500); } /* TID must be supplied by the caller */ static void kvmppc_e500_setup_stlbe( struct kvm_vcpu *vcpu, struct kvm_book3e_206_tlb_entry *gtlbe, int tsize, struct tlbe_ref *ref, u64 gvaddr, struct kvm_book3e_206_tlb_entry *stlbe) { kvm_pfn_t pfn = ref->pfn; u32 pr = vcpu->arch.shared->msr & MSR_PR; BUG_ON(!(ref->flags & E500_TLB_VALID)); /* Force IPROT=0 for all guest mappings. */ stlbe->mas1 = MAS1_TSIZE(tsize) | get_tlb_sts(gtlbe) | MAS1_VALID; stlbe->mas2 = (gvaddr & MAS2_EPN) | (ref->flags & E500_TLB_MAS2_ATTR); stlbe->mas7_3 = ((u64)pfn << PAGE_SHIFT) | e500_shadow_mas3_attrib(gtlbe->mas7_3, pr); } static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500, u64 gvaddr, gfn_t gfn, struct kvm_book3e_206_tlb_entry *gtlbe, int tlbsel, struct kvm_book3e_206_tlb_entry *stlbe, struct tlbe_ref *ref) { struct kvm_memory_slot *slot; unsigned long pfn = 0; /* silence GCC warning */ unsigned long hva; int pfnmap = 0; int tsize = BOOK3E_PAGESZ_4K; int ret = 0; unsigned long mmu_seq; struct kvm *kvm = vcpu_e500->vcpu.kvm; unsigned long tsize_pages = 0; pte_t *ptep; unsigned int wimg = 0; pgd_t *pgdir; unsigned long flags; /* used to check for invalidations in progress */ mmu_seq = kvm->mmu_invalidate_seq; smp_rmb(); /* * Translate guest physical to true physical, acquiring * a page reference if it is normal, non-reserved memory. * * gfn_to_memslot() must succeed because otherwise we wouldn't * have gotten this far. Eventually we should just pass the slot * pointer through from the first lookup. */ slot = gfn_to_memslot(vcpu_e500->vcpu.kvm, gfn); hva = gfn_to_hva_memslot(slot, gfn); if (tlbsel == 1) { struct vm_area_struct *vma; mmap_read_lock(kvm->mm); vma = find_vma(kvm->mm, hva); if (vma && hva >= vma->vm_start && (vma->vm_flags & VM_PFNMAP)) { /* * This VMA is a physically contiguous region (e.g. * /dev/mem) that bypasses normal Linux page * management. Find the overlap between the * vma and the memslot. */ unsigned long start, end; unsigned long slot_start, slot_end; pfnmap = 1; start = vma->vm_pgoff; end = start + vma_pages(vma); pfn = start + ((hva - vma->vm_start) >> PAGE_SHIFT); slot_start = pfn - (gfn - slot->base_gfn); slot_end = slot_start + slot->npages; if (start < slot_start) start = slot_start; if (end > slot_end) end = slot_end; tsize = (gtlbe->mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT; /* * e500 doesn't implement the lowest tsize bit, * or 1K pages. */ tsize = max(BOOK3E_PAGESZ_4K, tsize & ~1); /* * Now find the largest tsize (up to what the guest * requested) that will cover gfn, stay within the * range, and for which gfn and pfn are mutually * aligned. */ for (; tsize > BOOK3E_PAGESZ_4K; tsize -= 2) { unsigned long gfn_start, gfn_end; tsize_pages = 1UL << (tsize - 2); gfn_start = gfn & ~(tsize_pages - 1); gfn_end = gfn_start + tsize_pages; if (gfn_start + pfn - gfn < start) continue; if (gfn_end + pfn - gfn > end) continue; if ((gfn & (tsize_pages - 1)) != (pfn & (tsize_pages - 1))) continue; gvaddr &= ~((tsize_pages << PAGE_SHIFT) - 1); pfn &= ~(tsize_pages - 1); break; } } else if (vma && hva >= vma->vm_start && is_vm_hugetlb_page(vma)) { unsigned long psize = vma_kernel_pagesize(vma); tsize = (gtlbe->mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT; /* * Take the largest page size that satisfies both host * and guest mapping */ tsize = min(__ilog2(psize) - 10, tsize); /* * e500 doesn't implement the lowest tsize bit, * or 1K pages. */ tsize = max(BOOK3E_PAGESZ_4K, tsize & ~1); } mmap_read_unlock(kvm->mm); } if (likely(!pfnmap)) { tsize_pages = 1UL << (tsize + 10 - PAGE_SHIFT); pfn = gfn_to_pfn_memslot(slot, gfn); if (is_error_noslot_pfn(pfn)) { if (printk_ratelimit()) pr_err("%s: real page not found for gfn %lx\n", __func__, (long)gfn); return -EINVAL; } /* Align guest and physical address to page map boundaries */ pfn &= ~(tsize_pages - 1); gvaddr &= ~((tsize_pages << PAGE_SHIFT) - 1); } spin_lock(&kvm->mmu_lock); if (mmu_invalidate_retry(kvm, mmu_seq)) { ret = -EAGAIN; goto out; } pgdir = vcpu_e500->vcpu.arch.pgdir; /* * We are just looking at the wimg bits, so we don't * care much about the trans splitting bit. * We are holding kvm->mmu_lock so a notifier invalidate * can't run hence pfn won't change. */ local_irq_save(flags); ptep = find_linux_pte(pgdir, hva, NULL, NULL); if (ptep) { pte_t pte = READ_ONCE(*ptep); if (pte_present(pte)) { wimg = (pte_val(pte) >> PTE_WIMGE_SHIFT) & MAS2_WIMGE_MASK; local_irq_restore(flags); } else { local_irq_restore(flags); pr_err_ratelimited("%s: pte not present: gfn %lx,pfn %lx\n", __func__, (long)gfn, pfn); ret = -EINVAL; goto out; } } kvmppc_e500_ref_setup(ref, gtlbe, pfn, wimg); kvmppc_e500_setup_stlbe(&vcpu_e500->vcpu, gtlbe, tsize, ref, gvaddr, stlbe); /* Clear i-cache for new pages */ kvmppc_mmu_flush_icache(pfn); out: spin_unlock(&kvm->mmu_lock); /* Drop refcount on page, so that mmu notifiers can clear it */ kvm_release_pfn_clean(pfn); return ret; } /* XXX only map the one-one case, for now use TLB0 */ static int kvmppc_e500_tlb0_map(struct kvmppc_vcpu_e500 *vcpu_e500, int esel, struct kvm_book3e_206_tlb_entry *stlbe) { struct kvm_book3e_206_tlb_entry *gtlbe; struct tlbe_ref *ref; int stlbsel = 0; int sesel = 0; int r; gtlbe = get_entry(vcpu_e500, 0, esel); ref = &vcpu_e500->gtlb_priv[0][esel].ref; r = kvmppc_e500_shadow_map(vcpu_e500, get_tlb_eaddr(gtlbe), get_tlb_raddr(gtlbe) >> PAGE_SHIFT, gtlbe, 0, stlbe, ref); if (r) return r; write_stlbe(vcpu_e500, gtlbe, stlbe, stlbsel, sesel); return 0; } static int kvmppc_e500_tlb1_map_tlb1(struct kvmppc_vcpu_e500 *vcpu_e500, struct tlbe_ref *ref, int esel) { unsigned int sesel = vcpu_e500->host_tlb1_nv++; if (unlikely(vcpu_e500->host_tlb1_nv >= tlb1_max_shadow_size())) vcpu_e500->host_tlb1_nv = 0; if (vcpu_e500->h2g_tlb1_rmap[sesel]) { unsigned int idx = vcpu_e500->h2g_tlb1_rmap[sesel] - 1; vcpu_e500->g2h_tlb1_map[idx] &= ~(1ULL << sesel); } vcpu_e500->gtlb_priv[1][esel].ref.flags |= E500_TLB_BITMAP; vcpu_e500->g2h_tlb1_map[esel] |= (u64)1 << sesel; vcpu_e500->h2g_tlb1_rmap[sesel] = esel + 1; WARN_ON(!(ref->flags & E500_TLB_VALID)); return sesel; } /* Caller must ensure that the specified guest TLB entry is safe to insert into * the shadow TLB. */ /* For both one-one and one-to-many */ static int kvmppc_e500_tlb1_map(struct kvmppc_vcpu_e500 *vcpu_e500, u64 gvaddr, gfn_t gfn, struct kvm_book3e_206_tlb_entry *gtlbe, struct kvm_book3e_206_tlb_entry *stlbe, int esel) { struct tlbe_ref *ref = &vcpu_e500->gtlb_priv[1][esel].ref; int sesel; int r; r = kvmppc_e500_shadow_map(vcpu_e500, gvaddr, gfn, gtlbe, 1, stlbe, ref); if (r) return r; /* Use TLB0 when we can only map a page with 4k */ if (get_tlb_tsize(stlbe) == BOOK3E_PAGESZ_4K) { vcpu_e500->gtlb_priv[1][esel].ref.flags |= E500_TLB_TLB0; write_stlbe(vcpu_e500, gtlbe, stlbe, 0, 0); return 0; } /* Otherwise map into TLB1 */ sesel = kvmppc_e500_tlb1_map_tlb1(vcpu_e500, ref, esel); write_stlbe(vcpu_e500, gtlbe, stlbe, 1, sesel); return 0; } void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 eaddr, gpa_t gpaddr, unsigned int index) { struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); struct tlbe_priv *priv; struct kvm_book3e_206_tlb_entry *gtlbe, stlbe; int tlbsel = tlbsel_of(index); int esel = esel_of(index); gtlbe = get_entry(vcpu_e500, tlbsel, esel); switch (tlbsel) { case 0: priv = &vcpu_e500->gtlb_priv[tlbsel][esel]; /* Triggers after clear_tlb_privs or on initial mapping */ if (!(priv->ref.flags & E500_TLB_VALID)) { kvmppc_e500_tlb0_map(vcpu_e500, esel, &stlbe); } else { kvmppc_e500_setup_stlbe(vcpu, gtlbe, BOOK3E_PAGESZ_4K, &priv->ref, eaddr, &stlbe); write_stlbe(vcpu_e500, gtlbe, &stlbe, 0, 0); } break; case 1: { gfn_t gfn = gpaddr >> PAGE_SHIFT; kvmppc_e500_tlb1_map(vcpu_e500, eaddr, gfn, gtlbe, &stlbe, esel); break; } default: BUG(); break; } } #ifdef CONFIG_KVM_BOOKE_HV int kvmppc_load_last_inst(struct kvm_vcpu *vcpu, enum instruction_fetch_type type, unsigned long *instr) { gva_t geaddr; hpa_t addr; hfn_t pfn; hva_t eaddr; u32 mas1, mas2, mas3; u64 mas7_mas3; struct page *page; unsigned int addr_space, psize_shift; bool pr; unsigned long flags; /* Search TLB for guest pc to get the real address */ geaddr = kvmppc_get_pc(vcpu); addr_space = (vcpu->arch.shared->msr & MSR_IS) >> MSR_IR_LG; local_irq_save(flags); mtspr(SPRN_MAS6, (vcpu->arch.pid << MAS6_SPID_SHIFT) | addr_space); mtspr(SPRN_MAS5, MAS5_SGS | get_lpid(vcpu)); asm volatile("tlbsx 0, %[geaddr]\n" : : [geaddr] "r" (geaddr)); mtspr(SPRN_MAS5, 0); mtspr(SPRN_MAS8, 0); mas1 = mfspr(SPRN_MAS1); mas2 = mfspr(SPRN_MAS2); mas3 = mfspr(SPRN_MAS3); #ifdef CONFIG_64BIT mas7_mas3 = mfspr(SPRN_MAS7_MAS3); #else mas7_mas3 = ((u64)mfspr(SPRN_MAS7) << 32) | mas3; #endif local_irq_restore(flags); /* * If the TLB entry for guest pc was evicted, return to the guest. * There are high chances to find a valid TLB entry next time. */ if (!(mas1 & MAS1_VALID)) return EMULATE_AGAIN; /* * Another thread may rewrite the TLB entry in parallel, don't * execute from the address if the execute permission is not set */ pr = vcpu->arch.shared->msr & MSR_PR; if (unlikely((pr && !(mas3 & MAS3_UX)) || (!pr && !(mas3 & MAS3_SX)))) { pr_err_ratelimited( "%s: Instruction emulation from guest address %08lx without execute permission\n", __func__, geaddr); return EMULATE_AGAIN; } /* * The real address will be mapped by a cacheable, memory coherent, * write-back page. Check for mismatches when LRAT is used. */ if (has_feature(vcpu, VCPU_FTR_MMU_V2) && unlikely((mas2 & MAS2_I) || (mas2 & MAS2_W) || !(mas2 & MAS2_M))) { pr_err_ratelimited( "%s: Instruction emulation from guest address %08lx mismatches storage attributes\n", __func__, geaddr); return EMULATE_AGAIN; } /* Get pfn */ psize_shift = MAS1_GET_TSIZE(mas1) + 10; addr = (mas7_mas3 & (~0ULL << psize_shift)) | (geaddr & ((1ULL << psize_shift) - 1ULL)); pfn = addr >> PAGE_SHIFT; /* Guard against emulation from devices area */ if (unlikely(!page_is_ram(pfn))) { pr_err_ratelimited("%s: Instruction emulation from non-RAM host address %08llx is not supported\n", __func__, addr); return EMULATE_AGAIN; } /* Map a page and get guest's instruction */ page = pfn_to_page(pfn); eaddr = (unsigned long)kmap_atomic(page); *instr = *(u32 *)(eaddr | (unsigned long)(addr & ~PAGE_MASK)); kunmap_atomic((u32 *)eaddr); return EMULATE_DONE; } #else int kvmppc_load_last_inst(struct kvm_vcpu *vcpu, enum instruction_fetch_type type, unsigned long *instr) { return EMULATE_AGAIN; } #endif /************* MMU Notifiers *************/ static bool kvm_e500_mmu_unmap_gfn(struct kvm *kvm, struct kvm_gfn_range *range) { /* * Flush all shadow tlb entries everywhere. This is slow, but * we are 100% sure that we catch the to be unmapped page */ return true; } bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range) { return kvm_e500_mmu_unmap_gfn(kvm, range); } bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range) { /* XXX could be more clever ;) */ return false; } bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range) { /* XXX could be more clever ;) */ return false; } bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range) { /* The page will get remapped properly on its next fault */ return kvm_e500_mmu_unmap_gfn(kvm, range); } /*****************************************/ int e500_mmu_host_init(struct kvmppc_vcpu_e500 *vcpu_e500) { host_tlb_params[0].entries = mfspr(SPRN_TLB0CFG) & TLBnCFG_N_ENTRY; host_tlb_params[1].entries = mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY; /* * This should never happen on real e500 hardware, but is * architecturally possible -- e.g. in some weird nested * virtualization case. */ if (host_tlb_params[0].entries == 0 || host_tlb_params[1].entries == 0) { pr_err("%s: need to know host tlb size\n", __func__); return -ENODEV; } host_tlb_params[0].ways = (mfspr(SPRN_TLB0CFG) & TLBnCFG_ASSOC) >> TLBnCFG_ASSOC_SHIFT; host_tlb_params[1].ways = host_tlb_params[1].entries; if (!is_power_of_2(host_tlb_params[0].entries) || !is_power_of_2(host_tlb_params[0].ways) || host_tlb_params[0].entries < host_tlb_params[0].ways || host_tlb_params[0].ways == 0) { pr_err("%s: bad tlb0 host config: %u entries %u ways\n", __func__, host_tlb_params[0].entries, host_tlb_params[0].ways); return -ENODEV; } host_tlb_params[0].sets = host_tlb_params[0].entries / host_tlb_params[0].ways; host_tlb_params[1].sets = 1; vcpu_e500->h2g_tlb1_rmap = kcalloc(host_tlb_params[1].entries, sizeof(*vcpu_e500->h2g_tlb1_rmap), GFP_KERNEL); if (!vcpu_e500->h2g_tlb1_rmap) return -EINVAL; return 0; } void e500_mmu_host_uninit(struct kvmppc_vcpu_e500 *vcpu_e500) { kfree(vcpu_e500->h2g_tlb1_rmap); }
linux-master
arch/powerpc/kvm/e500_mmu_host.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2011. Freescale Inc. All rights reserved. * * Authors: * Alexander Graf <[email protected]> * Paul Mackerras <[email protected]> * * Description: * * Hypercall handling for running PAPR guests in PR KVM on Book 3S * processors. */ #include <linux/anon_inodes.h> #include <linux/uaccess.h> #include <asm/kvm_ppc.h> #include <asm/kvm_book3s.h> #define HPTE_SIZE 16 /* bytes per HPT entry */ static unsigned long get_pteg_addr(struct kvm_vcpu *vcpu, long pte_index) { struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu); unsigned long pteg_addr; pte_index <<= 4; pte_index &= ((1 << ((vcpu_book3s->sdr1 & 0x1f) + 11)) - 1) << 7 | 0x70; pteg_addr = vcpu_book3s->sdr1 & 0xfffffffffffc0000ULL; pteg_addr |= pte_index; return pteg_addr; } static int kvmppc_h_pr_enter(struct kvm_vcpu *vcpu) { long flags = kvmppc_get_gpr(vcpu, 4); long pte_index = kvmppc_get_gpr(vcpu, 5); __be64 pteg[2 * 8]; __be64 *hpte; unsigned long pteg_addr, i; long int ret; i = pte_index & 7; pte_index &= ~7UL; pteg_addr = get_pteg_addr(vcpu, pte_index); mutex_lock(&vcpu->kvm->arch.hpt_mutex); ret = H_FUNCTION; if (copy_from_user(pteg, (void __user *)pteg_addr, sizeof(pteg))) goto done; hpte = pteg; ret = H_PTEG_FULL; if (likely((flags & H_EXACT) == 0)) { for (i = 0; ; ++i) { if (i == 8) goto done; if ((be64_to_cpu(*hpte) & HPTE_V_VALID) == 0) break; hpte += 2; } } else { hpte += i * 2; if (*hpte & HPTE_V_VALID) goto done; } hpte[0] = cpu_to_be64(kvmppc_get_gpr(vcpu, 6)); hpte[1] = cpu_to_be64(kvmppc_get_gpr(vcpu, 7)); pteg_addr += i * HPTE_SIZE; ret = H_FUNCTION; if (copy_to_user((void __user *)pteg_addr, hpte, HPTE_SIZE)) goto done; kvmppc_set_gpr(vcpu, 4, pte_index | i); ret = H_SUCCESS; done: mutex_unlock(&vcpu->kvm->arch.hpt_mutex); kvmppc_set_gpr(vcpu, 3, ret); return EMULATE_DONE; } static int kvmppc_h_pr_remove(struct kvm_vcpu *vcpu) { unsigned long flags= kvmppc_get_gpr(vcpu, 4); unsigned long pte_index = kvmppc_get_gpr(vcpu, 5); unsigned long avpn = kvmppc_get_gpr(vcpu, 6); unsigned long v = 0, pteg, rb; unsigned long pte[2]; long int ret; pteg = get_pteg_addr(vcpu, pte_index); mutex_lock(&vcpu->kvm->arch.hpt_mutex); ret = H_FUNCTION; if (copy_from_user(pte, (void __user *)pteg, sizeof(pte))) goto done; pte[0] = be64_to_cpu((__force __be64)pte[0]); pte[1] = be64_to_cpu((__force __be64)pte[1]); ret = H_NOT_FOUND; if ((pte[0] & HPTE_V_VALID) == 0 || ((flags & H_AVPN) && (pte[0] & ~0x7fUL) != avpn) || ((flags & H_ANDCOND) && (pte[0] & avpn) != 0)) goto done; ret = H_FUNCTION; if (copy_to_user((void __user *)pteg, &v, sizeof(v))) goto done; rb = compute_tlbie_rb(pte[0], pte[1], pte_index); vcpu->arch.mmu.tlbie(vcpu, rb, rb & 1 ? true : false); ret = H_SUCCESS; kvmppc_set_gpr(vcpu, 4, pte[0]); kvmppc_set_gpr(vcpu, 5, pte[1]); done: mutex_unlock(&vcpu->kvm->arch.hpt_mutex); kvmppc_set_gpr(vcpu, 3, ret); return EMULATE_DONE; } /* Request defs for kvmppc_h_pr_bulk_remove() */ #define H_BULK_REMOVE_TYPE 0xc000000000000000ULL #define H_BULK_REMOVE_REQUEST 0x4000000000000000ULL #define H_BULK_REMOVE_RESPONSE 0x8000000000000000ULL #define H_BULK_REMOVE_END 0xc000000000000000ULL #define H_BULK_REMOVE_CODE 0x3000000000000000ULL #define H_BULK_REMOVE_SUCCESS 0x0000000000000000ULL #define H_BULK_REMOVE_NOT_FOUND 0x1000000000000000ULL #define H_BULK_REMOVE_PARM 0x2000000000000000ULL #define H_BULK_REMOVE_HW 0x3000000000000000ULL #define H_BULK_REMOVE_RC 0x0c00000000000000ULL #define H_BULK_REMOVE_FLAGS 0x0300000000000000ULL #define H_BULK_REMOVE_ABSOLUTE 0x0000000000000000ULL #define H_BULK_REMOVE_ANDCOND 0x0100000000000000ULL #define H_BULK_REMOVE_AVPN 0x0200000000000000ULL #define H_BULK_REMOVE_PTEX 0x00ffffffffffffffULL #define H_BULK_REMOVE_MAX_BATCH 4 static int kvmppc_h_pr_bulk_remove(struct kvm_vcpu *vcpu) { int i; int paramnr = 4; int ret = H_SUCCESS; mutex_lock(&vcpu->kvm->arch.hpt_mutex); for (i = 0; i < H_BULK_REMOVE_MAX_BATCH; i++) { unsigned long tsh = kvmppc_get_gpr(vcpu, paramnr+(2*i)); unsigned long tsl = kvmppc_get_gpr(vcpu, paramnr+(2*i)+1); unsigned long pteg, rb, flags; unsigned long pte[2]; unsigned long v = 0; if ((tsh & H_BULK_REMOVE_TYPE) == H_BULK_REMOVE_END) { break; /* Exit success */ } else if ((tsh & H_BULK_REMOVE_TYPE) != H_BULK_REMOVE_REQUEST) { ret = H_PARAMETER; break; /* Exit fail */ } tsh &= H_BULK_REMOVE_PTEX | H_BULK_REMOVE_FLAGS; tsh |= H_BULK_REMOVE_RESPONSE; if ((tsh & H_BULK_REMOVE_ANDCOND) && (tsh & H_BULK_REMOVE_AVPN)) { tsh |= H_BULK_REMOVE_PARM; kvmppc_set_gpr(vcpu, paramnr+(2*i), tsh); ret = H_PARAMETER; break; /* Exit fail */ } pteg = get_pteg_addr(vcpu, tsh & H_BULK_REMOVE_PTEX); if (copy_from_user(pte, (void __user *)pteg, sizeof(pte))) { ret = H_FUNCTION; break; } pte[0] = be64_to_cpu((__force __be64)pte[0]); pte[1] = be64_to_cpu((__force __be64)pte[1]); /* tsl = AVPN */ flags = (tsh & H_BULK_REMOVE_FLAGS) >> 26; if ((pte[0] & HPTE_V_VALID) == 0 || ((flags & H_AVPN) && (pte[0] & ~0x7fUL) != tsl) || ((flags & H_ANDCOND) && (pte[0] & tsl) != 0)) { tsh |= H_BULK_REMOVE_NOT_FOUND; } else { /* Splat the pteg in (userland) hpt */ if (copy_to_user((void __user *)pteg, &v, sizeof(v))) { ret = H_FUNCTION; break; } rb = compute_tlbie_rb(pte[0], pte[1], tsh & H_BULK_REMOVE_PTEX); vcpu->arch.mmu.tlbie(vcpu, rb, rb & 1 ? true : false); tsh |= H_BULK_REMOVE_SUCCESS; tsh |= (pte[1] & (HPTE_R_C | HPTE_R_R)) << 43; } kvmppc_set_gpr(vcpu, paramnr+(2*i), tsh); } mutex_unlock(&vcpu->kvm->arch.hpt_mutex); kvmppc_set_gpr(vcpu, 3, ret); return EMULATE_DONE; } static int kvmppc_h_pr_protect(struct kvm_vcpu *vcpu) { unsigned long flags = kvmppc_get_gpr(vcpu, 4); unsigned long pte_index = kvmppc_get_gpr(vcpu, 5); unsigned long avpn = kvmppc_get_gpr(vcpu, 6); unsigned long rb, pteg, r, v; unsigned long pte[2]; long int ret; pteg = get_pteg_addr(vcpu, pte_index); mutex_lock(&vcpu->kvm->arch.hpt_mutex); ret = H_FUNCTION; if (copy_from_user(pte, (void __user *)pteg, sizeof(pte))) goto done; pte[0] = be64_to_cpu((__force __be64)pte[0]); pte[1] = be64_to_cpu((__force __be64)pte[1]); ret = H_NOT_FOUND; if ((pte[0] & HPTE_V_VALID) == 0 || ((flags & H_AVPN) && (pte[0] & ~0x7fUL) != avpn)) goto done; v = pte[0]; r = pte[1]; r &= ~(HPTE_R_PP0 | HPTE_R_PP | HPTE_R_N | HPTE_R_KEY_HI | HPTE_R_KEY_LO); r |= (flags << 55) & HPTE_R_PP0; r |= (flags << 48) & HPTE_R_KEY_HI; r |= flags & (HPTE_R_PP | HPTE_R_N | HPTE_R_KEY_LO); pte[1] = r; rb = compute_tlbie_rb(v, r, pte_index); vcpu->arch.mmu.tlbie(vcpu, rb, rb & 1 ? true : false); pte[0] = (__force u64)cpu_to_be64(pte[0]); pte[1] = (__force u64)cpu_to_be64(pte[1]); ret = H_FUNCTION; if (copy_to_user((void __user *)pteg, pte, sizeof(pte))) goto done; ret = H_SUCCESS; done: mutex_unlock(&vcpu->kvm->arch.hpt_mutex); kvmppc_set_gpr(vcpu, 3, ret); return EMULATE_DONE; } static int kvmppc_h_pr_logical_ci_load(struct kvm_vcpu *vcpu) { long rc; rc = kvmppc_h_logical_ci_load(vcpu); if (rc == H_TOO_HARD) return EMULATE_FAIL; kvmppc_set_gpr(vcpu, 3, rc); return EMULATE_DONE; } static int kvmppc_h_pr_logical_ci_store(struct kvm_vcpu *vcpu) { long rc; rc = kvmppc_h_logical_ci_store(vcpu); if (rc == H_TOO_HARD) return EMULATE_FAIL; kvmppc_set_gpr(vcpu, 3, rc); return EMULATE_DONE; } static int kvmppc_h_pr_set_mode(struct kvm_vcpu *vcpu) { unsigned long mflags = kvmppc_get_gpr(vcpu, 4); unsigned long resource = kvmppc_get_gpr(vcpu, 5); if (resource == H_SET_MODE_RESOURCE_ADDR_TRANS_MODE) { /* KVM PR does not provide AIL!=0 to guests */ if (mflags == 0) kvmppc_set_gpr(vcpu, 3, H_SUCCESS); else kvmppc_set_gpr(vcpu, 3, H_UNSUPPORTED_FLAG_START - 63); return EMULATE_DONE; } return EMULATE_FAIL; } #ifdef CONFIG_SPAPR_TCE_IOMMU static int kvmppc_h_pr_put_tce(struct kvm_vcpu *vcpu) { unsigned long liobn = kvmppc_get_gpr(vcpu, 4); unsigned long ioba = kvmppc_get_gpr(vcpu, 5); unsigned long tce = kvmppc_get_gpr(vcpu, 6); long rc; rc = kvmppc_h_put_tce(vcpu, liobn, ioba, tce); if (rc == H_TOO_HARD) return EMULATE_FAIL; kvmppc_set_gpr(vcpu, 3, rc); return EMULATE_DONE; } static int kvmppc_h_pr_put_tce_indirect(struct kvm_vcpu *vcpu) { unsigned long liobn = kvmppc_get_gpr(vcpu, 4); unsigned long ioba = kvmppc_get_gpr(vcpu, 5); unsigned long tce = kvmppc_get_gpr(vcpu, 6); unsigned long npages = kvmppc_get_gpr(vcpu, 7); long rc; rc = kvmppc_h_put_tce_indirect(vcpu, liobn, ioba, tce, npages); if (rc == H_TOO_HARD) return EMULATE_FAIL; kvmppc_set_gpr(vcpu, 3, rc); return EMULATE_DONE; } static int kvmppc_h_pr_stuff_tce(struct kvm_vcpu *vcpu) { unsigned long liobn = kvmppc_get_gpr(vcpu, 4); unsigned long ioba = kvmppc_get_gpr(vcpu, 5); unsigned long tce_value = kvmppc_get_gpr(vcpu, 6); unsigned long npages = kvmppc_get_gpr(vcpu, 7); long rc; rc = kvmppc_h_stuff_tce(vcpu, liobn, ioba, tce_value, npages); if (rc == H_TOO_HARD) return EMULATE_FAIL; kvmppc_set_gpr(vcpu, 3, rc); return EMULATE_DONE; } #else /* CONFIG_SPAPR_TCE_IOMMU */ static int kvmppc_h_pr_put_tce(struct kvm_vcpu *vcpu) { return EMULATE_FAIL; } static int kvmppc_h_pr_put_tce_indirect(struct kvm_vcpu *vcpu) { return EMULATE_FAIL; } static int kvmppc_h_pr_stuff_tce(struct kvm_vcpu *vcpu) { return EMULATE_FAIL; } #endif /* CONFIG_SPAPR_TCE_IOMMU */ static int kvmppc_h_pr_xics_hcall(struct kvm_vcpu *vcpu, u32 cmd) { long rc = kvmppc_xics_hcall(vcpu, cmd); kvmppc_set_gpr(vcpu, 3, rc); return EMULATE_DONE; } int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd) { int rc, idx; if (cmd <= MAX_HCALL_OPCODE && !test_bit(cmd/4, vcpu->kvm->arch.enabled_hcalls)) return EMULATE_FAIL; switch (cmd) { case H_ENTER: return kvmppc_h_pr_enter(vcpu); case H_REMOVE: return kvmppc_h_pr_remove(vcpu); case H_PROTECT: return kvmppc_h_pr_protect(vcpu); case H_BULK_REMOVE: return kvmppc_h_pr_bulk_remove(vcpu); case H_PUT_TCE: return kvmppc_h_pr_put_tce(vcpu); case H_PUT_TCE_INDIRECT: return kvmppc_h_pr_put_tce_indirect(vcpu); case H_STUFF_TCE: return kvmppc_h_pr_stuff_tce(vcpu); case H_CEDE: kvmppc_set_msr_fast(vcpu, kvmppc_get_msr(vcpu) | MSR_EE); kvm_vcpu_halt(vcpu); vcpu->stat.generic.halt_wakeup++; return EMULATE_DONE; case H_LOGICAL_CI_LOAD: return kvmppc_h_pr_logical_ci_load(vcpu); case H_LOGICAL_CI_STORE: return kvmppc_h_pr_logical_ci_store(vcpu); case H_SET_MODE: return kvmppc_h_pr_set_mode(vcpu); case H_XIRR: case H_CPPR: case H_EOI: case H_IPI: case H_IPOLL: case H_XIRR_X: if (kvmppc_xics_enabled(vcpu)) return kvmppc_h_pr_xics_hcall(vcpu, cmd); break; case H_RTAS: if (list_empty(&vcpu->kvm->arch.rtas_tokens)) break; idx = srcu_read_lock(&vcpu->kvm->srcu); rc = kvmppc_rtas_hcall(vcpu); srcu_read_unlock(&vcpu->kvm->srcu, idx); if (rc) break; kvmppc_set_gpr(vcpu, 3, 0); return EMULATE_DONE; } return EMULATE_FAIL; } int kvmppc_hcall_impl_pr(unsigned long cmd) { switch (cmd) { case H_ENTER: case H_REMOVE: case H_PROTECT: case H_BULK_REMOVE: #ifdef CONFIG_SPAPR_TCE_IOMMU case H_GET_TCE: case H_PUT_TCE: case H_PUT_TCE_INDIRECT: case H_STUFF_TCE: #endif case H_CEDE: case H_LOGICAL_CI_LOAD: case H_LOGICAL_CI_STORE: case H_SET_MODE: #ifdef CONFIG_KVM_XICS case H_XIRR: case H_CPPR: case H_EOI: case H_IPI: case H_IPOLL: case H_XIRR_X: #endif return 1; } return 0; } /* * List of hcall numbers to enable by default. * For compatibility with old userspace, we enable by default * all hcalls that were implemented before the hcall-enabling * facility was added. Note this list should not include H_RTAS. */ static unsigned int default_hcall_list[] = { H_ENTER, H_REMOVE, H_PROTECT, H_BULK_REMOVE, #ifdef CONFIG_SPAPR_TCE_IOMMU H_GET_TCE, H_PUT_TCE, #endif H_CEDE, H_SET_MODE, #ifdef CONFIG_KVM_XICS H_XIRR, H_CPPR, H_EOI, H_IPI, H_IPOLL, H_XIRR_X, #endif 0 }; void kvmppc_pr_init_default_hcalls(struct kvm *kvm) { int i; unsigned int hcall; for (i = 0; default_hcall_list[i]; ++i) { hcall = default_hcall_list[i]; WARN_ON(!kvmppc_hcall_impl_pr(hcall)); __set_bit(hcall / 4, kvm->arch.enabled_hcalls); } }
linux-master
arch/powerpc/kvm/book3s_pr_papr.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright 2012 Michael Ellerman, IBM Corporation. * Copyright 2012 Benjamin Herrenschmidt, IBM Corporation */ #include <linux/kernel.h> #include <linux/kvm_host.h> #include <linux/err.h> #include <linux/kernel_stat.h> #include <linux/pgtable.h> #include <asm/kvm_book3s.h> #include <asm/kvm_ppc.h> #include <asm/hvcall.h> #include <asm/xics.h> #include <asm/synch.h> #include <asm/cputhreads.h> #include <asm/ppc-opcode.h> #include <asm/pnv-pci.h> #include <asm/opal.h> #include <asm/smp.h> #include "book3s_xics.h" #define DEBUG_PASSUP int h_ipi_redirect = 1; EXPORT_SYMBOL(h_ipi_redirect); int kvm_irq_bypass = 1; EXPORT_SYMBOL(kvm_irq_bypass); static void icp_rm_deliver_irq(struct kvmppc_xics *xics, struct kvmppc_icp *icp, u32 new_irq, bool check_resend); static int xics_opal_set_server(unsigned int hw_irq, int server_cpu); /* -- ICS routines -- */ static void ics_rm_check_resend(struct kvmppc_xics *xics, struct kvmppc_ics *ics, struct kvmppc_icp *icp) { int i; for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) { struct ics_irq_state *state = &ics->irq_state[i]; if (state->resend) icp_rm_deliver_irq(xics, icp, state->number, true); } } /* -- ICP routines -- */ #ifdef CONFIG_SMP static inline void icp_send_hcore_msg(int hcore, struct kvm_vcpu *vcpu) { int hcpu; hcpu = hcore << threads_shift; kvmppc_host_rm_ops_hv->rm_core[hcore].rm_data = vcpu; smp_muxed_ipi_set_message(hcpu, PPC_MSG_RM_HOST_ACTION); kvmppc_set_host_ipi(hcpu); smp_mb(); kvmhv_rm_send_ipi(hcpu); } #else static inline void icp_send_hcore_msg(int hcore, struct kvm_vcpu *vcpu) { } #endif /* * We start the search from our current CPU Id in the core map * and go in a circle until we get back to our ID looking for a * core that is running in host context and that hasn't already * been targeted for another rm_host_ops. * * In the future, could consider using a fairer algorithm (one * that distributes the IPIs better) * * Returns -1, if no CPU could be found in the host * Else, returns a CPU Id which has been reserved for use */ static inline int grab_next_hostcore(int start, struct kvmppc_host_rm_core *rm_core, int max, int action) { bool success; int core; union kvmppc_rm_state old, new; for (core = start + 1; core < max; core++) { old = new = READ_ONCE(rm_core[core].rm_state); if (!old.in_host || old.rm_action) continue; /* Try to grab this host core if not taken already. */ new.rm_action = action; success = cmpxchg64(&rm_core[core].rm_state.raw, old.raw, new.raw) == old.raw; if (success) { /* * Make sure that the store to the rm_action is made * visible before we return to caller (and the * subsequent store to rm_data) to synchronize with * the IPI handler. */ smp_wmb(); return core; } } return -1; } static inline int find_available_hostcore(int action) { int core; int my_core = smp_processor_id() >> threads_shift; struct kvmppc_host_rm_core *rm_core = kvmppc_host_rm_ops_hv->rm_core; core = grab_next_hostcore(my_core, rm_core, cpu_nr_cores(), action); if (core == -1) core = grab_next_hostcore(core, rm_core, my_core, action); return core; } static void icp_rm_set_vcpu_irq(struct kvm_vcpu *vcpu, struct kvm_vcpu *this_vcpu) { struct kvmppc_icp *this_icp = this_vcpu->arch.icp; int cpu; int hcore; /* Mark the target VCPU as having an interrupt pending */ vcpu->stat.queue_intr++; set_bit(BOOK3S_IRQPRIO_EXTERNAL, &vcpu->arch.pending_exceptions); /* Kick self ? Just set MER and return */ if (vcpu == this_vcpu) { mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) | LPCR_MER); return; } /* * Check if the core is loaded, * if not, find an available host core to post to wake the VCPU, * if we can't find one, set up state to eventually return too hard. */ cpu = vcpu->arch.thread_cpu; if (cpu < 0 || cpu >= nr_cpu_ids) { hcore = -1; if (kvmppc_host_rm_ops_hv && h_ipi_redirect) hcore = find_available_hostcore(XICS_RM_KICK_VCPU); if (hcore != -1) { icp_send_hcore_msg(hcore, vcpu); } else { this_icp->rm_action |= XICS_RM_KICK_VCPU; this_icp->rm_kick_target = vcpu; } return; } smp_mb(); kvmhv_rm_send_ipi(cpu); } static void icp_rm_clr_vcpu_irq(struct kvm_vcpu *vcpu) { /* Note: Only called on self ! */ clear_bit(BOOK3S_IRQPRIO_EXTERNAL, &vcpu->arch.pending_exceptions); mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) & ~LPCR_MER); } static inline bool icp_rm_try_update(struct kvmppc_icp *icp, union kvmppc_icp_state old, union kvmppc_icp_state new) { struct kvm_vcpu *this_vcpu = local_paca->kvm_hstate.kvm_vcpu; bool success; /* Calculate new output value */ new.out_ee = (new.xisr && (new.pending_pri < new.cppr)); /* Attempt atomic update */ success = cmpxchg64(&icp->state.raw, old.raw, new.raw) == old.raw; if (!success) goto bail; /* * Check for output state update * * Note that this is racy since another processor could be updating * the state already. This is why we never clear the interrupt output * here, we only ever set it. The clear only happens prior to doing * an update and only by the processor itself. Currently we do it * in Accept (H_XIRR) and Up_Cppr (H_XPPR). * * We also do not try to figure out whether the EE state has changed, * we unconditionally set it if the new state calls for it. The reason * for that is that we opportunistically remove the pending interrupt * flag when raising CPPR, so we need to set it back here if an * interrupt is still pending. */ if (new.out_ee) icp_rm_set_vcpu_irq(icp->vcpu, this_vcpu); /* Expose the state change for debug purposes */ this_vcpu->arch.icp->rm_dbgstate = new; this_vcpu->arch.icp->rm_dbgtgt = icp->vcpu; bail: return success; } static inline int check_too_hard(struct kvmppc_xics *xics, struct kvmppc_icp *icp) { return (xics->real_mode_dbg || icp->rm_action) ? H_TOO_HARD : H_SUCCESS; } static void icp_rm_check_resend(struct kvmppc_xics *xics, struct kvmppc_icp *icp) { u32 icsid; /* Order this load with the test for need_resend in the caller */ smp_rmb(); for_each_set_bit(icsid, icp->resend_map, xics->max_icsid + 1) { struct kvmppc_ics *ics = xics->ics[icsid]; if (!test_and_clear_bit(icsid, icp->resend_map)) continue; if (!ics) continue; ics_rm_check_resend(xics, ics, icp); } } static bool icp_rm_try_to_deliver(struct kvmppc_icp *icp, u32 irq, u8 priority, u32 *reject) { union kvmppc_icp_state old_state, new_state; bool success; do { old_state = new_state = READ_ONCE(icp->state); *reject = 0; /* See if we can deliver */ success = new_state.cppr > priority && new_state.mfrr > priority && new_state.pending_pri > priority; /* * If we can, check for a rejection and perform the * delivery */ if (success) { *reject = new_state.xisr; new_state.xisr = irq; new_state.pending_pri = priority; } else { /* * If we failed to deliver we set need_resend * so a subsequent CPPR state change causes us * to try a new delivery. */ new_state.need_resend = true; } } while (!icp_rm_try_update(icp, old_state, new_state)); return success; } static void icp_rm_deliver_irq(struct kvmppc_xics *xics, struct kvmppc_icp *icp, u32 new_irq, bool check_resend) { struct ics_irq_state *state; struct kvmppc_ics *ics; u32 reject; u16 src; /* * This is used both for initial delivery of an interrupt and * for subsequent rejection. * * Rejection can be racy vs. resends. We have evaluated the * rejection in an atomic ICP transaction which is now complete, * so potentially the ICP can already accept the interrupt again. * * So we need to retry the delivery. Essentially the reject path * boils down to a failed delivery. Always. * * Now the interrupt could also have moved to a different target, * thus we may need to re-do the ICP lookup as well */ again: /* Get the ICS state and lock it */ ics = kvmppc_xics_find_ics(xics, new_irq, &src); if (!ics) { /* Unsafe increment, but this does not need to be accurate */ xics->err_noics++; return; } state = &ics->irq_state[src]; /* Get a lock on the ICS */ arch_spin_lock(&ics->lock); /* Get our server */ if (!icp || state->server != icp->server_num) { icp = kvmppc_xics_find_server(xics->kvm, state->server); if (!icp) { /* Unsafe increment again*/ xics->err_noicp++; goto out; } } if (check_resend) if (!state->resend) goto out; /* Clear the resend bit of that interrupt */ state->resend = 0; /* * If masked, bail out * * Note: PAPR doesn't mention anything about masked pending * when doing a resend, only when doing a delivery. * * However that would have the effect of losing a masked * interrupt that was rejected and isn't consistent with * the whole masked_pending business which is about not * losing interrupts that occur while masked. * * I don't differentiate normal deliveries and resends, this * implementation will differ from PAPR and not lose such * interrupts. */ if (state->priority == MASKED) { state->masked_pending = 1; goto out; } /* * Try the delivery, this will set the need_resend flag * in the ICP as part of the atomic transaction if the * delivery is not possible. * * Note that if successful, the new delivery might have itself * rejected an interrupt that was "delivered" before we took the * ics spin lock. * * In this case we do the whole sequence all over again for the * new guy. We cannot assume that the rejected interrupt is less * favored than the new one, and thus doesn't need to be delivered, * because by the time we exit icp_rm_try_to_deliver() the target * processor may well have already consumed & completed it, and thus * the rejected interrupt might actually be already acceptable. */ if (icp_rm_try_to_deliver(icp, new_irq, state->priority, &reject)) { /* * Delivery was successful, did we reject somebody else ? */ if (reject && reject != XICS_IPI) { arch_spin_unlock(&ics->lock); icp->n_reject++; new_irq = reject; check_resend = 0; goto again; } } else { /* * We failed to deliver the interrupt we need to set the * resend map bit and mark the ICS state as needing a resend */ state->resend = 1; /* * Make sure when checking resend, we don't miss the resend * if resend_map bit is seen and cleared. */ smp_wmb(); set_bit(ics->icsid, icp->resend_map); /* * If the need_resend flag got cleared in the ICP some time * between icp_rm_try_to_deliver() atomic update and now, then * we know it might have missed the resend_map bit. So we * retry */ smp_mb(); if (!icp->state.need_resend) { state->resend = 0; arch_spin_unlock(&ics->lock); check_resend = 0; goto again; } } out: arch_spin_unlock(&ics->lock); } static void icp_rm_down_cppr(struct kvmppc_xics *xics, struct kvmppc_icp *icp, u8 new_cppr) { union kvmppc_icp_state old_state, new_state; bool resend; /* * This handles several related states in one operation: * * ICP State: Down_CPPR * * Load CPPR with new value and if the XISR is 0 * then check for resends: * * ICP State: Resend * * If MFRR is more favored than CPPR, check for IPIs * and notify ICS of a potential resend. This is done * asynchronously (when used in real mode, we will have * to exit here). * * We do not handle the complete Check_IPI as documented * here. In the PAPR, this state will be used for both * Set_MFRR and Down_CPPR. However, we know that we aren't * changing the MFRR state here so we don't need to handle * the case of an MFRR causing a reject of a pending irq, * this will have been handled when the MFRR was set in the * first place. * * Thus we don't have to handle rejects, only resends. * * When implementing real mode for HV KVM, resend will lead to * a H_TOO_HARD return and the whole transaction will be handled * in virtual mode. */ do { old_state = new_state = READ_ONCE(icp->state); /* Down_CPPR */ new_state.cppr = new_cppr; /* * Cut down Resend / Check_IPI / IPI * * The logic is that we cannot have a pending interrupt * trumped by an IPI at this point (see above), so we * know that either the pending interrupt is already an * IPI (in which case we don't care to override it) or * it's either more favored than us or non existent */ if (new_state.mfrr < new_cppr && new_state.mfrr <= new_state.pending_pri) { new_state.pending_pri = new_state.mfrr; new_state.xisr = XICS_IPI; } /* Latch/clear resend bit */ resend = new_state.need_resend; new_state.need_resend = 0; } while (!icp_rm_try_update(icp, old_state, new_state)); /* * Now handle resend checks. Those are asynchronous to the ICP * state update in HW (ie bus transactions) so we can handle them * separately here as well. */ if (resend) { icp->n_check_resend++; icp_rm_check_resend(xics, icp); } } unsigned long xics_rm_h_xirr_x(struct kvm_vcpu *vcpu) { vcpu->arch.regs.gpr[5] = get_tb(); return xics_rm_h_xirr(vcpu); } unsigned long xics_rm_h_xirr(struct kvm_vcpu *vcpu) { union kvmppc_icp_state old_state, new_state; struct kvmppc_xics *xics = vcpu->kvm->arch.xics; struct kvmppc_icp *icp = vcpu->arch.icp; u32 xirr; if (!xics || !xics->real_mode) return H_TOO_HARD; /* First clear the interrupt */ icp_rm_clr_vcpu_irq(icp->vcpu); /* * ICP State: Accept_Interrupt * * Return the pending interrupt (if any) along with the * current CPPR, then clear the XISR & set CPPR to the * pending priority */ do { old_state = new_state = READ_ONCE(icp->state); xirr = old_state.xisr | (((u32)old_state.cppr) << 24); if (!old_state.xisr) break; new_state.cppr = new_state.pending_pri; new_state.pending_pri = 0xff; new_state.xisr = 0; } while (!icp_rm_try_update(icp, old_state, new_state)); /* Return the result in GPR4 */ vcpu->arch.regs.gpr[4] = xirr; return check_too_hard(xics, icp); } int xics_rm_h_ipi(struct kvm_vcpu *vcpu, unsigned long server, unsigned long mfrr) { union kvmppc_icp_state old_state, new_state; struct kvmppc_xics *xics = vcpu->kvm->arch.xics; struct kvmppc_icp *icp, *this_icp = vcpu->arch.icp; u32 reject; bool resend; bool local; if (!xics || !xics->real_mode) return H_TOO_HARD; local = this_icp->server_num == server; if (local) icp = this_icp; else icp = kvmppc_xics_find_server(vcpu->kvm, server); if (!icp) return H_PARAMETER; /* * ICP state: Set_MFRR * * If the CPPR is more favored than the new MFRR, then * nothing needs to be done as there can be no XISR to * reject. * * ICP state: Check_IPI * * If the CPPR is less favored, then we might be replacing * an interrupt, and thus need to possibly reject it. * * ICP State: IPI * * Besides rejecting any pending interrupts, we also * update XISR and pending_pri to mark IPI as pending. * * PAPR does not describe this state, but if the MFRR is being * made less favored than its earlier value, there might be * a previously-rejected interrupt needing to be resent. * Ideally, we would want to resend only if * prio(pending_interrupt) < mfrr && * prio(pending_interrupt) < cppr * where pending interrupt is the one that was rejected. But * we don't have that state, so we simply trigger a resend * whenever the MFRR is made less favored. */ do { old_state = new_state = READ_ONCE(icp->state); /* Set_MFRR */ new_state.mfrr = mfrr; /* Check_IPI */ reject = 0; resend = false; if (mfrr < new_state.cppr) { /* Reject a pending interrupt if not an IPI */ if (mfrr <= new_state.pending_pri) { reject = new_state.xisr; new_state.pending_pri = mfrr; new_state.xisr = XICS_IPI; } } if (mfrr > old_state.mfrr) { resend = new_state.need_resend; new_state.need_resend = 0; } } while (!icp_rm_try_update(icp, old_state, new_state)); /* Handle reject in real mode */ if (reject && reject != XICS_IPI) { this_icp->n_reject++; icp_rm_deliver_irq(xics, icp, reject, false); } /* Handle resends in real mode */ if (resend) { this_icp->n_check_resend++; icp_rm_check_resend(xics, icp); } return check_too_hard(xics, this_icp); } int xics_rm_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr) { union kvmppc_icp_state old_state, new_state; struct kvmppc_xics *xics = vcpu->kvm->arch.xics; struct kvmppc_icp *icp = vcpu->arch.icp; u32 reject; if (!xics || !xics->real_mode) return H_TOO_HARD; /* * ICP State: Set_CPPR * * We can safely compare the new value with the current * value outside of the transaction as the CPPR is only * ever changed by the processor on itself */ if (cppr > icp->state.cppr) { icp_rm_down_cppr(xics, icp, cppr); goto bail; } else if (cppr == icp->state.cppr) return H_SUCCESS; /* * ICP State: Up_CPPR * * The processor is raising its priority, this can result * in a rejection of a pending interrupt: * * ICP State: Reject_Current * * We can remove EE from the current processor, the update * transaction will set it again if needed */ icp_rm_clr_vcpu_irq(icp->vcpu); do { old_state = new_state = READ_ONCE(icp->state); reject = 0; new_state.cppr = cppr; if (cppr <= new_state.pending_pri) { reject = new_state.xisr; new_state.xisr = 0; new_state.pending_pri = 0xff; } } while (!icp_rm_try_update(icp, old_state, new_state)); /* * Check for rejects. They are handled by doing a new delivery * attempt (see comments in icp_rm_deliver_irq). */ if (reject && reject != XICS_IPI) { icp->n_reject++; icp_rm_deliver_irq(xics, icp, reject, false); } bail: return check_too_hard(xics, icp); } static int ics_rm_eoi(struct kvm_vcpu *vcpu, u32 irq) { struct kvmppc_xics *xics = vcpu->kvm->arch.xics; struct kvmppc_icp *icp = vcpu->arch.icp; struct kvmppc_ics *ics; struct ics_irq_state *state; u16 src; u32 pq_old, pq_new; /* * ICS EOI handling: For LSI, if P bit is still set, we need to * resend it. * * For MSI, we move Q bit into P (and clear Q). If it is set, * resend it. */ ics = kvmppc_xics_find_ics(xics, irq, &src); if (!ics) goto bail; state = &ics->irq_state[src]; if (state->lsi) pq_new = state->pq_state; else do { pq_old = state->pq_state; pq_new = pq_old >> 1; } while (cmpxchg(&state->pq_state, pq_old, pq_new) != pq_old); if (pq_new & PQ_PRESENTED) icp_rm_deliver_irq(xics, NULL, irq, false); if (!hlist_empty(&vcpu->kvm->irq_ack_notifier_list)) { icp->rm_action |= XICS_RM_NOTIFY_EOI; icp->rm_eoied_irq = irq; } /* Handle passthrough interrupts */ if (state->host_irq) { ++vcpu->stat.pthru_all; if (state->intr_cpu != -1) { int pcpu = raw_smp_processor_id(); pcpu = cpu_first_thread_sibling(pcpu); ++vcpu->stat.pthru_host; if (state->intr_cpu != pcpu) { ++vcpu->stat.pthru_bad_aff; xics_opal_set_server(state->host_irq, pcpu); } state->intr_cpu = -1; } } bail: return check_too_hard(xics, icp); } int xics_rm_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr) { struct kvmppc_xics *xics = vcpu->kvm->arch.xics; struct kvmppc_icp *icp = vcpu->arch.icp; u32 irq = xirr & 0x00ffffff; if (!xics || !xics->real_mode) return H_TOO_HARD; /* * ICP State: EOI * * Note: If EOI is incorrectly used by SW to lower the CPPR * value (ie more favored), we do not check for rejection of * a pending interrupt, this is a SW error and PAPR specifies * that we don't have to deal with it. * * The sending of an EOI to the ICS is handled after the * CPPR update * * ICP State: Down_CPPR which we handle * in a separate function as it's shared with H_CPPR. */ icp_rm_down_cppr(xics, icp, xirr >> 24); /* IPIs have no EOI */ if (irq == XICS_IPI) return check_too_hard(xics, icp); return ics_rm_eoi(vcpu, irq); } static unsigned long eoi_rc; static void icp_eoi(struct irq_data *d, u32 hwirq, __be32 xirr, bool *again) { void __iomem *xics_phys; int64_t rc; rc = pnv_opal_pci_msi_eoi(d); if (rc) eoi_rc = rc; iosync(); /* EOI it */ xics_phys = local_paca->kvm_hstate.xics_phys; if (xics_phys) { __raw_rm_writel(xirr, xics_phys + XICS_XIRR); } else { rc = opal_int_eoi(be32_to_cpu(xirr)); *again = rc > 0; } } static int xics_opal_set_server(unsigned int hw_irq, int server_cpu) { unsigned int mangle_cpu = get_hard_smp_processor_id(server_cpu) << 2; return opal_set_xive(hw_irq, mangle_cpu, DEFAULT_PRIORITY); } /* * Increment a per-CPU 32-bit unsigned integer variable. * Safe to call in real-mode. Handles vmalloc'ed addresses * * ToDo: Make this work for any integral type */ static inline void this_cpu_inc_rm(unsigned int __percpu *addr) { unsigned long l; unsigned int *raddr; int cpu = smp_processor_id(); raddr = per_cpu_ptr(addr, cpu); l = (unsigned long)raddr; if (get_region_id(l) == VMALLOC_REGION_ID) { l = vmalloc_to_phys(raddr); raddr = (unsigned int *)l; } ++*raddr; } /* * We don't try to update the flags in the irq_desc 'istate' field in * here as would happen in the normal IRQ handling path for several reasons: * - state flags represent internal IRQ state and are not expected to be * updated outside the IRQ subsystem * - more importantly, these are useful for edge triggered interrupts, * IRQ probing, etc., but we are only handling MSI/MSIx interrupts here * and these states shouldn't apply to us. * * However, we do update irq_stats - we somewhat duplicate the code in * kstat_incr_irqs_this_cpu() for this since this function is defined * in irq/internal.h which we don't want to include here. * The only difference is that desc->kstat_irqs is an allocated per CPU * variable and could have been vmalloc'ed, so we can't directly * call __this_cpu_inc() on it. The kstat structure is a static * per CPU variable and it should be accessible by real-mode KVM. * */ static void kvmppc_rm_handle_irq_desc(struct irq_desc *desc) { this_cpu_inc_rm(desc->kstat_irqs); __this_cpu_inc(kstat.irqs_sum); } long kvmppc_deliver_irq_passthru(struct kvm_vcpu *vcpu, __be32 xirr, struct kvmppc_irq_map *irq_map, struct kvmppc_passthru_irqmap *pimap, bool *again) { struct kvmppc_xics *xics; struct kvmppc_icp *icp; struct kvmppc_ics *ics; struct ics_irq_state *state; u32 irq; u16 src; u32 pq_old, pq_new; irq = irq_map->v_hwirq; xics = vcpu->kvm->arch.xics; icp = vcpu->arch.icp; kvmppc_rm_handle_irq_desc(irq_map->desc); ics = kvmppc_xics_find_ics(xics, irq, &src); if (!ics) return 2; state = &ics->irq_state[src]; /* only MSIs register bypass producers, so it must be MSI here */ do { pq_old = state->pq_state; pq_new = ((pq_old << 1) & 3) | PQ_PRESENTED; } while (cmpxchg(&state->pq_state, pq_old, pq_new) != pq_old); /* Test P=1, Q=0, this is the only case where we present */ if (pq_new == PQ_PRESENTED) icp_rm_deliver_irq(xics, icp, irq, false); /* EOI the interrupt */ icp_eoi(irq_desc_get_irq_data(irq_map->desc), irq_map->r_hwirq, xirr, again); if (check_too_hard(xics, icp) == H_TOO_HARD) return 2; else return -2; } /* --- Non-real mode XICS-related built-in routines --- */ /* * Host Operations poked by RM KVM */ static void rm_host_ipi_action(int action, void *data) { switch (action) { case XICS_RM_KICK_VCPU: kvmppc_host_rm_ops_hv->vcpu_kick(data); break; default: WARN(1, "Unexpected rm_action=%d data=%p\n", action, data); break; } } void kvmppc_xics_ipi_action(void) { int core; unsigned int cpu = smp_processor_id(); struct kvmppc_host_rm_core *rm_corep; core = cpu >> threads_shift; rm_corep = &kvmppc_host_rm_ops_hv->rm_core[core]; if (rm_corep->rm_data) { rm_host_ipi_action(rm_corep->rm_state.rm_action, rm_corep->rm_data); /* Order these stores against the real mode KVM */ rm_corep->rm_data = NULL; smp_wmb(); rm_corep->rm_state.rm_action = 0; } }
linux-master
arch/powerpc/kvm/book3s_hv_rm_xics.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright 2012 Michael Ellerman, IBM Corporation. * Copyright 2012 Benjamin Herrenschmidt, IBM Corporation. */ #include <linux/kernel.h> #include <linux/kvm_host.h> #include <linux/err.h> #include <linux/gfp.h> #include <linux/anon_inodes.h> #include <linux/spinlock.h> #include <linux/debugfs.h> #include <linux/uaccess.h> #include <asm/kvm_book3s.h> #include <asm/kvm_ppc.h> #include <asm/hvcall.h> #include <asm/xics.h> #include <asm/time.h> #include <linux/seq_file.h> #include "book3s_xics.h" #if 1 #define XICS_DBG(fmt...) do { } while (0) #else #define XICS_DBG(fmt...) trace_printk(fmt) #endif #define ENABLE_REALMODE true #define DEBUG_REALMODE false /* * LOCKING * ======= * * Each ICS has a spin lock protecting the information about the IRQ * sources and avoiding simultaneous deliveries of the same interrupt. * * ICP operations are done via a single compare & swap transaction * (most ICP state fits in the union kvmppc_icp_state) */ /* * TODO * ==== * * - To speed up resends, keep a bitmap of "resend" set bits in the * ICS * * - Speed up server# -> ICP lookup (array ? hash table ?) * * - Make ICS lockless as well, or at least a per-interrupt lock or hashed * locks array to improve scalability */ /* -- ICS routines -- */ static void icp_deliver_irq(struct kvmppc_xics *xics, struct kvmppc_icp *icp, u32 new_irq, bool check_resend); /* * Return value ideally indicates how the interrupt was handled, but no * callers look at it (given that we don't implement KVM_IRQ_LINE_STATUS), * so just return 0. */ static int ics_deliver_irq(struct kvmppc_xics *xics, u32 irq, u32 level) { struct ics_irq_state *state; struct kvmppc_ics *ics; u16 src; u32 pq_old, pq_new; XICS_DBG("ics deliver %#x (level: %d)\n", irq, level); ics = kvmppc_xics_find_ics(xics, irq, &src); if (!ics) { XICS_DBG("ics_deliver_irq: IRQ 0x%06x not found !\n", irq); return -EINVAL; } state = &ics->irq_state[src]; if (!state->exists) return -EINVAL; if (level == KVM_INTERRUPT_SET_LEVEL || level == KVM_INTERRUPT_SET) level = 1; else if (level == KVM_INTERRUPT_UNSET) level = 0; /* * Take other values the same as 1, consistent with original code. * maybe WARN here? */ if (!state->lsi && level == 0) /* noop for MSI */ return 0; do { pq_old = state->pq_state; if (state->lsi) { if (level) { if (pq_old & PQ_PRESENTED) /* Setting already set LSI ... */ return 0; pq_new = PQ_PRESENTED; } else pq_new = 0; } else pq_new = ((pq_old << 1) & 3) | PQ_PRESENTED; } while (cmpxchg(&state->pq_state, pq_old, pq_new) != pq_old); /* Test P=1, Q=0, this is the only case where we present */ if (pq_new == PQ_PRESENTED) icp_deliver_irq(xics, NULL, irq, false); /* Record which CPU this arrived on for passed-through interrupts */ if (state->host_irq) state->intr_cpu = raw_smp_processor_id(); return 0; } static void ics_check_resend(struct kvmppc_xics *xics, struct kvmppc_ics *ics, struct kvmppc_icp *icp) { int i; for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) { struct ics_irq_state *state = &ics->irq_state[i]; if (state->resend) { XICS_DBG("resend %#x prio %#x\n", state->number, state->priority); icp_deliver_irq(xics, icp, state->number, true); } } } static bool write_xive(struct kvmppc_xics *xics, struct kvmppc_ics *ics, struct ics_irq_state *state, u32 server, u32 priority, u32 saved_priority) { bool deliver; unsigned long flags; local_irq_save(flags); arch_spin_lock(&ics->lock); state->server = server; state->priority = priority; state->saved_priority = saved_priority; deliver = false; if ((state->masked_pending || state->resend) && priority != MASKED) { state->masked_pending = 0; state->resend = 0; deliver = true; } arch_spin_unlock(&ics->lock); local_irq_restore(flags); return deliver; } int kvmppc_xics_set_xive(struct kvm *kvm, u32 irq, u32 server, u32 priority) { struct kvmppc_xics *xics = kvm->arch.xics; struct kvmppc_icp *icp; struct kvmppc_ics *ics; struct ics_irq_state *state; u16 src; if (!xics) return -ENODEV; ics = kvmppc_xics_find_ics(xics, irq, &src); if (!ics) return -EINVAL; state = &ics->irq_state[src]; icp = kvmppc_xics_find_server(kvm, server); if (!icp) return -EINVAL; XICS_DBG("set_xive %#x server %#x prio %#x MP:%d RS:%d\n", irq, server, priority, state->masked_pending, state->resend); if (write_xive(xics, ics, state, server, priority, priority)) icp_deliver_irq(xics, icp, irq, false); return 0; } int kvmppc_xics_get_xive(struct kvm *kvm, u32 irq, u32 *server, u32 *priority) { struct kvmppc_xics *xics = kvm->arch.xics; struct kvmppc_ics *ics; struct ics_irq_state *state; u16 src; unsigned long flags; if (!xics) return -ENODEV; ics = kvmppc_xics_find_ics(xics, irq, &src); if (!ics) return -EINVAL; state = &ics->irq_state[src]; local_irq_save(flags); arch_spin_lock(&ics->lock); *server = state->server; *priority = state->priority; arch_spin_unlock(&ics->lock); local_irq_restore(flags); return 0; } int kvmppc_xics_int_on(struct kvm *kvm, u32 irq) { struct kvmppc_xics *xics = kvm->arch.xics; struct kvmppc_icp *icp; struct kvmppc_ics *ics; struct ics_irq_state *state; u16 src; if (!xics) return -ENODEV; ics = kvmppc_xics_find_ics(xics, irq, &src); if (!ics) return -EINVAL; state = &ics->irq_state[src]; icp = kvmppc_xics_find_server(kvm, state->server); if (!icp) return -EINVAL; if (write_xive(xics, ics, state, state->server, state->saved_priority, state->saved_priority)) icp_deliver_irq(xics, icp, irq, false); return 0; } int kvmppc_xics_int_off(struct kvm *kvm, u32 irq) { struct kvmppc_xics *xics = kvm->arch.xics; struct kvmppc_ics *ics; struct ics_irq_state *state; u16 src; if (!xics) return -ENODEV; ics = kvmppc_xics_find_ics(xics, irq, &src); if (!ics) return -EINVAL; state = &ics->irq_state[src]; write_xive(xics, ics, state, state->server, MASKED, state->priority); return 0; } /* -- ICP routines, including hcalls -- */ static inline bool icp_try_update(struct kvmppc_icp *icp, union kvmppc_icp_state old, union kvmppc_icp_state new, bool change_self) { bool success; /* Calculate new output value */ new.out_ee = (new.xisr && (new.pending_pri < new.cppr)); /* Attempt atomic update */ success = cmpxchg64(&icp->state.raw, old.raw, new.raw) == old.raw; if (!success) goto bail; XICS_DBG("UPD [%04lx] - C:%02x M:%02x PP: %02x PI:%06x R:%d O:%d\n", icp->server_num, old.cppr, old.mfrr, old.pending_pri, old.xisr, old.need_resend, old.out_ee); XICS_DBG("UPD - C:%02x M:%02x PP: %02x PI:%06x R:%d O:%d\n", new.cppr, new.mfrr, new.pending_pri, new.xisr, new.need_resend, new.out_ee); /* * Check for output state update * * Note that this is racy since another processor could be updating * the state already. This is why we never clear the interrupt output * here, we only ever set it. The clear only happens prior to doing * an update and only by the processor itself. Currently we do it * in Accept (H_XIRR) and Up_Cppr (H_XPPR). * * We also do not try to figure out whether the EE state has changed, * we unconditionally set it if the new state calls for it. The reason * for that is that we opportunistically remove the pending interrupt * flag when raising CPPR, so we need to set it back here if an * interrupt is still pending. */ if (new.out_ee) { kvmppc_book3s_queue_irqprio(icp->vcpu, BOOK3S_INTERRUPT_EXTERNAL); if (!change_self) kvmppc_fast_vcpu_kick(icp->vcpu); } bail: return success; } static void icp_check_resend(struct kvmppc_xics *xics, struct kvmppc_icp *icp) { u32 icsid; /* Order this load with the test for need_resend in the caller */ smp_rmb(); for_each_set_bit(icsid, icp->resend_map, xics->max_icsid + 1) { struct kvmppc_ics *ics = xics->ics[icsid]; if (!test_and_clear_bit(icsid, icp->resend_map)) continue; if (!ics) continue; ics_check_resend(xics, ics, icp); } } static bool icp_try_to_deliver(struct kvmppc_icp *icp, u32 irq, u8 priority, u32 *reject) { union kvmppc_icp_state old_state, new_state; bool success; XICS_DBG("try deliver %#x(P:%#x) to server %#lx\n", irq, priority, icp->server_num); do { old_state = new_state = READ_ONCE(icp->state); *reject = 0; /* See if we can deliver */ success = new_state.cppr > priority && new_state.mfrr > priority && new_state.pending_pri > priority; /* * If we can, check for a rejection and perform the * delivery */ if (success) { *reject = new_state.xisr; new_state.xisr = irq; new_state.pending_pri = priority; } else { /* * If we failed to deliver we set need_resend * so a subsequent CPPR state change causes us * to try a new delivery. */ new_state.need_resend = true; } } while (!icp_try_update(icp, old_state, new_state, false)); return success; } static void icp_deliver_irq(struct kvmppc_xics *xics, struct kvmppc_icp *icp, u32 new_irq, bool check_resend) { struct ics_irq_state *state; struct kvmppc_ics *ics; u32 reject; u16 src; unsigned long flags; /* * This is used both for initial delivery of an interrupt and * for subsequent rejection. * * Rejection can be racy vs. resends. We have evaluated the * rejection in an atomic ICP transaction which is now complete, * so potentially the ICP can already accept the interrupt again. * * So we need to retry the delivery. Essentially the reject path * boils down to a failed delivery. Always. * * Now the interrupt could also have moved to a different target, * thus we may need to re-do the ICP lookup as well */ again: /* Get the ICS state and lock it */ ics = kvmppc_xics_find_ics(xics, new_irq, &src); if (!ics) { XICS_DBG("icp_deliver_irq: IRQ 0x%06x not found !\n", new_irq); return; } state = &ics->irq_state[src]; /* Get a lock on the ICS */ local_irq_save(flags); arch_spin_lock(&ics->lock); /* Get our server */ if (!icp || state->server != icp->server_num) { icp = kvmppc_xics_find_server(xics->kvm, state->server); if (!icp) { pr_warn("icp_deliver_irq: IRQ 0x%06x server 0x%x not found !\n", new_irq, state->server); goto out; } } if (check_resend) if (!state->resend) goto out; /* Clear the resend bit of that interrupt */ state->resend = 0; /* * If masked, bail out * * Note: PAPR doesn't mention anything about masked pending * when doing a resend, only when doing a delivery. * * However that would have the effect of losing a masked * interrupt that was rejected and isn't consistent with * the whole masked_pending business which is about not * losing interrupts that occur while masked. * * I don't differentiate normal deliveries and resends, this * implementation will differ from PAPR and not lose such * interrupts. */ if (state->priority == MASKED) { XICS_DBG("irq %#x masked pending\n", new_irq); state->masked_pending = 1; goto out; } /* * Try the delivery, this will set the need_resend flag * in the ICP as part of the atomic transaction if the * delivery is not possible. * * Note that if successful, the new delivery might have itself * rejected an interrupt that was "delivered" before we took the * ics spin lock. * * In this case we do the whole sequence all over again for the * new guy. We cannot assume that the rejected interrupt is less * favored than the new one, and thus doesn't need to be delivered, * because by the time we exit icp_try_to_deliver() the target * processor may well have already consumed & completed it, and thus * the rejected interrupt might actually be already acceptable. */ if (icp_try_to_deliver(icp, new_irq, state->priority, &reject)) { /* * Delivery was successful, did we reject somebody else ? */ if (reject && reject != XICS_IPI) { arch_spin_unlock(&ics->lock); local_irq_restore(flags); new_irq = reject; check_resend = false; goto again; } } else { /* * We failed to deliver the interrupt we need to set the * resend map bit and mark the ICS state as needing a resend */ state->resend = 1; /* * Make sure when checking resend, we don't miss the resend * if resend_map bit is seen and cleared. */ smp_wmb(); set_bit(ics->icsid, icp->resend_map); /* * If the need_resend flag got cleared in the ICP some time * between icp_try_to_deliver() atomic update and now, then * we know it might have missed the resend_map bit. So we * retry */ smp_mb(); if (!icp->state.need_resend) { state->resend = 0; arch_spin_unlock(&ics->lock); local_irq_restore(flags); check_resend = false; goto again; } } out: arch_spin_unlock(&ics->lock); local_irq_restore(flags); } static void icp_down_cppr(struct kvmppc_xics *xics, struct kvmppc_icp *icp, u8 new_cppr) { union kvmppc_icp_state old_state, new_state; bool resend; /* * This handles several related states in one operation: * * ICP State: Down_CPPR * * Load CPPR with new value and if the XISR is 0 * then check for resends: * * ICP State: Resend * * If MFRR is more favored than CPPR, check for IPIs * and notify ICS of a potential resend. This is done * asynchronously (when used in real mode, we will have * to exit here). * * We do not handle the complete Check_IPI as documented * here. In the PAPR, this state will be used for both * Set_MFRR and Down_CPPR. However, we know that we aren't * changing the MFRR state here so we don't need to handle * the case of an MFRR causing a reject of a pending irq, * this will have been handled when the MFRR was set in the * first place. * * Thus we don't have to handle rejects, only resends. * * When implementing real mode for HV KVM, resend will lead to * a H_TOO_HARD return and the whole transaction will be handled * in virtual mode. */ do { old_state = new_state = READ_ONCE(icp->state); /* Down_CPPR */ new_state.cppr = new_cppr; /* * Cut down Resend / Check_IPI / IPI * * The logic is that we cannot have a pending interrupt * trumped by an IPI at this point (see above), so we * know that either the pending interrupt is already an * IPI (in which case we don't care to override it) or * it's either more favored than us or non existent */ if (new_state.mfrr < new_cppr && new_state.mfrr <= new_state.pending_pri) { WARN_ON(new_state.xisr != XICS_IPI && new_state.xisr != 0); new_state.pending_pri = new_state.mfrr; new_state.xisr = XICS_IPI; } /* Latch/clear resend bit */ resend = new_state.need_resend; new_state.need_resend = 0; } while (!icp_try_update(icp, old_state, new_state, true)); /* * Now handle resend checks. Those are asynchronous to the ICP * state update in HW (ie bus transactions) so we can handle them * separately here too */ if (resend) icp_check_resend(xics, icp); } static noinline unsigned long kvmppc_h_xirr(struct kvm_vcpu *vcpu) { union kvmppc_icp_state old_state, new_state; struct kvmppc_icp *icp = vcpu->arch.icp; u32 xirr; /* First, remove EE from the processor */ kvmppc_book3s_dequeue_irqprio(icp->vcpu, BOOK3S_INTERRUPT_EXTERNAL); /* * ICP State: Accept_Interrupt * * Return the pending interrupt (if any) along with the * current CPPR, then clear the XISR & set CPPR to the * pending priority */ do { old_state = new_state = READ_ONCE(icp->state); xirr = old_state.xisr | (((u32)old_state.cppr) << 24); if (!old_state.xisr) break; new_state.cppr = new_state.pending_pri; new_state.pending_pri = 0xff; new_state.xisr = 0; } while (!icp_try_update(icp, old_state, new_state, true)); XICS_DBG("h_xirr vcpu %d xirr %#x\n", vcpu->vcpu_id, xirr); return xirr; } static noinline int kvmppc_h_ipi(struct kvm_vcpu *vcpu, unsigned long server, unsigned long mfrr) { union kvmppc_icp_state old_state, new_state; struct kvmppc_xics *xics = vcpu->kvm->arch.xics; struct kvmppc_icp *icp; u32 reject; bool resend; bool local; XICS_DBG("h_ipi vcpu %d to server %lu mfrr %#lx\n", vcpu->vcpu_id, server, mfrr); icp = vcpu->arch.icp; local = icp->server_num == server; if (!local) { icp = kvmppc_xics_find_server(vcpu->kvm, server); if (!icp) return H_PARAMETER; } /* * ICP state: Set_MFRR * * If the CPPR is more favored than the new MFRR, then * nothing needs to be rejected as there can be no XISR to * reject. If the MFRR is being made less favored then * there might be a previously-rejected interrupt needing * to be resent. * * ICP state: Check_IPI * * If the CPPR is less favored, then we might be replacing * an interrupt, and thus need to possibly reject it. * * ICP State: IPI * * Besides rejecting any pending interrupts, we also * update XISR and pending_pri to mark IPI as pending. * * PAPR does not describe this state, but if the MFRR is being * made less favored than its earlier value, there might be * a previously-rejected interrupt needing to be resent. * Ideally, we would want to resend only if * prio(pending_interrupt) < mfrr && * prio(pending_interrupt) < cppr * where pending interrupt is the one that was rejected. But * we don't have that state, so we simply trigger a resend * whenever the MFRR is made less favored. */ do { old_state = new_state = READ_ONCE(icp->state); /* Set_MFRR */ new_state.mfrr = mfrr; /* Check_IPI */ reject = 0; resend = false; if (mfrr < new_state.cppr) { /* Reject a pending interrupt if not an IPI */ if (mfrr <= new_state.pending_pri) { reject = new_state.xisr; new_state.pending_pri = mfrr; new_state.xisr = XICS_IPI; } } if (mfrr > old_state.mfrr) { resend = new_state.need_resend; new_state.need_resend = 0; } } while (!icp_try_update(icp, old_state, new_state, local)); /* Handle reject */ if (reject && reject != XICS_IPI) icp_deliver_irq(xics, icp, reject, false); /* Handle resend */ if (resend) icp_check_resend(xics, icp); return H_SUCCESS; } static int kvmppc_h_ipoll(struct kvm_vcpu *vcpu, unsigned long server) { union kvmppc_icp_state state; struct kvmppc_icp *icp; icp = vcpu->arch.icp; if (icp->server_num != server) { icp = kvmppc_xics_find_server(vcpu->kvm, server); if (!icp) return H_PARAMETER; } state = READ_ONCE(icp->state); kvmppc_set_gpr(vcpu, 4, ((u32)state.cppr << 24) | state.xisr); kvmppc_set_gpr(vcpu, 5, state.mfrr); return H_SUCCESS; } static noinline void kvmppc_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr) { union kvmppc_icp_state old_state, new_state; struct kvmppc_xics *xics = vcpu->kvm->arch.xics; struct kvmppc_icp *icp = vcpu->arch.icp; u32 reject; XICS_DBG("h_cppr vcpu %d cppr %#lx\n", vcpu->vcpu_id, cppr); /* * ICP State: Set_CPPR * * We can safely compare the new value with the current * value outside of the transaction as the CPPR is only * ever changed by the processor on itself */ if (cppr > icp->state.cppr) icp_down_cppr(xics, icp, cppr); else if (cppr == icp->state.cppr) return; /* * ICP State: Up_CPPR * * The processor is raising its priority, this can result * in a rejection of a pending interrupt: * * ICP State: Reject_Current * * We can remove EE from the current processor, the update * transaction will set it again if needed */ kvmppc_book3s_dequeue_irqprio(icp->vcpu, BOOK3S_INTERRUPT_EXTERNAL); do { old_state = new_state = READ_ONCE(icp->state); reject = 0; new_state.cppr = cppr; if (cppr <= new_state.pending_pri) { reject = new_state.xisr; new_state.xisr = 0; new_state.pending_pri = 0xff; } } while (!icp_try_update(icp, old_state, new_state, true)); /* * Check for rejects. They are handled by doing a new delivery * attempt (see comments in icp_deliver_irq). */ if (reject && reject != XICS_IPI) icp_deliver_irq(xics, icp, reject, false); } static int ics_eoi(struct kvm_vcpu *vcpu, u32 irq) { struct kvmppc_xics *xics = vcpu->kvm->arch.xics; struct kvmppc_icp *icp = vcpu->arch.icp; struct kvmppc_ics *ics; struct ics_irq_state *state; u16 src; u32 pq_old, pq_new; /* * ICS EOI handling: For LSI, if P bit is still set, we need to * resend it. * * For MSI, we move Q bit into P (and clear Q). If it is set, * resend it. */ ics = kvmppc_xics_find_ics(xics, irq, &src); if (!ics) { XICS_DBG("ios_eoi: IRQ 0x%06x not found !\n", irq); return H_PARAMETER; } state = &ics->irq_state[src]; if (state->lsi) pq_new = state->pq_state; else do { pq_old = state->pq_state; pq_new = pq_old >> 1; } while (cmpxchg(&state->pq_state, pq_old, pq_new) != pq_old); if (pq_new & PQ_PRESENTED) icp_deliver_irq(xics, icp, irq, false); kvm_notify_acked_irq(vcpu->kvm, 0, irq); return H_SUCCESS; } static noinline int kvmppc_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr) { struct kvmppc_xics *xics = vcpu->kvm->arch.xics; struct kvmppc_icp *icp = vcpu->arch.icp; u32 irq = xirr & 0x00ffffff; XICS_DBG("h_eoi vcpu %d eoi %#lx\n", vcpu->vcpu_id, xirr); /* * ICP State: EOI * * Note: If EOI is incorrectly used by SW to lower the CPPR * value (ie more favored), we do not check for rejection of * a pending interrupt, this is a SW error and PAPR specifies * that we don't have to deal with it. * * The sending of an EOI to the ICS is handled after the * CPPR update * * ICP State: Down_CPPR which we handle * in a separate function as it's shared with H_CPPR. */ icp_down_cppr(xics, icp, xirr >> 24); /* IPIs have no EOI */ if (irq == XICS_IPI) return H_SUCCESS; return ics_eoi(vcpu, irq); } int kvmppc_xics_rm_complete(struct kvm_vcpu *vcpu, u32 hcall) { struct kvmppc_xics *xics = vcpu->kvm->arch.xics; struct kvmppc_icp *icp = vcpu->arch.icp; XICS_DBG("XICS_RM: H_%x completing, act: %x state: %lx tgt: %p\n", hcall, icp->rm_action, icp->rm_dbgstate.raw, icp->rm_dbgtgt); if (icp->rm_action & XICS_RM_KICK_VCPU) { icp->n_rm_kick_vcpu++; kvmppc_fast_vcpu_kick(icp->rm_kick_target); } if (icp->rm_action & XICS_RM_CHECK_RESEND) { icp->n_rm_check_resend++; icp_check_resend(xics, icp->rm_resend_icp); } if (icp->rm_action & XICS_RM_NOTIFY_EOI) { icp->n_rm_notify_eoi++; kvm_notify_acked_irq(vcpu->kvm, 0, icp->rm_eoied_irq); } icp->rm_action = 0; return H_SUCCESS; } EXPORT_SYMBOL_GPL(kvmppc_xics_rm_complete); int kvmppc_xics_hcall(struct kvm_vcpu *vcpu, u32 req) { struct kvmppc_xics *xics = vcpu->kvm->arch.xics; unsigned long res; int rc = H_SUCCESS; /* Check if we have an ICP */ if (!xics || !vcpu->arch.icp) return H_HARDWARE; /* These requests don't have real-mode implementations at present */ switch (req) { case H_XIRR_X: res = kvmppc_h_xirr(vcpu); kvmppc_set_gpr(vcpu, 4, res); kvmppc_set_gpr(vcpu, 5, get_tb()); return rc; case H_IPOLL: rc = kvmppc_h_ipoll(vcpu, kvmppc_get_gpr(vcpu, 4)); return rc; } /* Check for real mode returning too hard */ if (xics->real_mode && is_kvmppc_hv_enabled(vcpu->kvm)) return kvmppc_xics_rm_complete(vcpu, req); switch (req) { case H_XIRR: res = kvmppc_h_xirr(vcpu); kvmppc_set_gpr(vcpu, 4, res); break; case H_CPPR: kvmppc_h_cppr(vcpu, kvmppc_get_gpr(vcpu, 4)); break; case H_EOI: rc = kvmppc_h_eoi(vcpu, kvmppc_get_gpr(vcpu, 4)); break; case H_IPI: rc = kvmppc_h_ipi(vcpu, kvmppc_get_gpr(vcpu, 4), kvmppc_get_gpr(vcpu, 5)); break; } return rc; } EXPORT_SYMBOL_GPL(kvmppc_xics_hcall); /* -- Initialisation code etc. -- */ static void xics_debugfs_irqmap(struct seq_file *m, struct kvmppc_passthru_irqmap *pimap) { int i; if (!pimap) return; seq_printf(m, "========\nPIRQ mappings: %d maps\n===========\n", pimap->n_mapped); for (i = 0; i < pimap->n_mapped; i++) { seq_printf(m, "r_hwirq=%x, v_hwirq=%x\n", pimap->mapped[i].r_hwirq, pimap->mapped[i].v_hwirq); } } static int xics_debug_show(struct seq_file *m, void *private) { struct kvmppc_xics *xics = m->private; struct kvm *kvm = xics->kvm; struct kvm_vcpu *vcpu; int icsid; unsigned long flags, i; unsigned long t_rm_kick_vcpu, t_rm_check_resend; unsigned long t_rm_notify_eoi; unsigned long t_reject, t_check_resend; if (!kvm) return 0; t_rm_kick_vcpu = 0; t_rm_notify_eoi = 0; t_rm_check_resend = 0; t_check_resend = 0; t_reject = 0; xics_debugfs_irqmap(m, kvm->arch.pimap); seq_printf(m, "=========\nICP state\n=========\n"); kvm_for_each_vcpu(i, vcpu, kvm) { struct kvmppc_icp *icp = vcpu->arch.icp; union kvmppc_icp_state state; if (!icp) continue; state.raw = READ_ONCE(icp->state.raw); seq_printf(m, "cpu server %#lx XIRR:%#x PPRI:%#x CPPR:%#x MFRR:%#x OUT:%d NR:%d\n", icp->server_num, state.xisr, state.pending_pri, state.cppr, state.mfrr, state.out_ee, state.need_resend); t_rm_kick_vcpu += icp->n_rm_kick_vcpu; t_rm_notify_eoi += icp->n_rm_notify_eoi; t_rm_check_resend += icp->n_rm_check_resend; t_check_resend += icp->n_check_resend; t_reject += icp->n_reject; } seq_printf(m, "ICP Guest->Host totals: kick_vcpu=%lu check_resend=%lu notify_eoi=%lu\n", t_rm_kick_vcpu, t_rm_check_resend, t_rm_notify_eoi); seq_printf(m, "ICP Real Mode totals: check_resend=%lu resend=%lu\n", t_check_resend, t_reject); for (icsid = 0; icsid <= KVMPPC_XICS_MAX_ICS_ID; icsid++) { struct kvmppc_ics *ics = xics->ics[icsid]; if (!ics) continue; seq_printf(m, "=========\nICS state for ICS 0x%x\n=========\n", icsid); local_irq_save(flags); arch_spin_lock(&ics->lock); for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) { struct ics_irq_state *irq = &ics->irq_state[i]; seq_printf(m, "irq 0x%06x: server %#x prio %#x save prio %#x pq_state %d resend %d masked pending %d\n", irq->number, irq->server, irq->priority, irq->saved_priority, irq->pq_state, irq->resend, irq->masked_pending); } arch_spin_unlock(&ics->lock); local_irq_restore(flags); } return 0; } DEFINE_SHOW_ATTRIBUTE(xics_debug); static void xics_debugfs_init(struct kvmppc_xics *xics) { xics->dentry = debugfs_create_file("xics", 0444, xics->kvm->debugfs_dentry, xics, &xics_debug_fops); pr_debug("%s: created\n", __func__); } static struct kvmppc_ics *kvmppc_xics_create_ics(struct kvm *kvm, struct kvmppc_xics *xics, int irq) { struct kvmppc_ics *ics; int i, icsid; icsid = irq >> KVMPPC_XICS_ICS_SHIFT; mutex_lock(&kvm->lock); /* ICS already exists - somebody else got here first */ if (xics->ics[icsid]) goto out; /* Create the ICS */ ics = kzalloc(sizeof(struct kvmppc_ics), GFP_KERNEL); if (!ics) goto out; ics->icsid = icsid; for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) { ics->irq_state[i].number = (icsid << KVMPPC_XICS_ICS_SHIFT) | i; ics->irq_state[i].priority = MASKED; ics->irq_state[i].saved_priority = MASKED; } smp_wmb(); xics->ics[icsid] = ics; if (icsid > xics->max_icsid) xics->max_icsid = icsid; out: mutex_unlock(&kvm->lock); return xics->ics[icsid]; } static int kvmppc_xics_create_icp(struct kvm_vcpu *vcpu, unsigned long server_num) { struct kvmppc_icp *icp; if (!vcpu->kvm->arch.xics) return -ENODEV; if (kvmppc_xics_find_server(vcpu->kvm, server_num)) return -EEXIST; icp = kzalloc(sizeof(struct kvmppc_icp), GFP_KERNEL); if (!icp) return -ENOMEM; icp->vcpu = vcpu; icp->server_num = server_num; icp->state.mfrr = MASKED; icp->state.pending_pri = MASKED; vcpu->arch.icp = icp; XICS_DBG("created server for vcpu %d\n", vcpu->vcpu_id); return 0; } u64 kvmppc_xics_get_icp(struct kvm_vcpu *vcpu) { struct kvmppc_icp *icp = vcpu->arch.icp; union kvmppc_icp_state state; if (!icp) return 0; state = icp->state; return ((u64)state.cppr << KVM_REG_PPC_ICP_CPPR_SHIFT) | ((u64)state.xisr << KVM_REG_PPC_ICP_XISR_SHIFT) | ((u64)state.mfrr << KVM_REG_PPC_ICP_MFRR_SHIFT) | ((u64)state.pending_pri << KVM_REG_PPC_ICP_PPRI_SHIFT); } int kvmppc_xics_set_icp(struct kvm_vcpu *vcpu, u64 icpval) { struct kvmppc_icp *icp = vcpu->arch.icp; struct kvmppc_xics *xics = vcpu->kvm->arch.xics; union kvmppc_icp_state old_state, new_state; struct kvmppc_ics *ics; u8 cppr, mfrr, pending_pri; u32 xisr; u16 src; bool resend; if (!icp || !xics) return -ENOENT; cppr = icpval >> KVM_REG_PPC_ICP_CPPR_SHIFT; xisr = (icpval >> KVM_REG_PPC_ICP_XISR_SHIFT) & KVM_REG_PPC_ICP_XISR_MASK; mfrr = icpval >> KVM_REG_PPC_ICP_MFRR_SHIFT; pending_pri = icpval >> KVM_REG_PPC_ICP_PPRI_SHIFT; /* Require the new state to be internally consistent */ if (xisr == 0) { if (pending_pri != 0xff) return -EINVAL; } else if (xisr == XICS_IPI) { if (pending_pri != mfrr || pending_pri >= cppr) return -EINVAL; } else { if (pending_pri >= mfrr || pending_pri >= cppr) return -EINVAL; ics = kvmppc_xics_find_ics(xics, xisr, &src); if (!ics) return -EINVAL; } new_state.raw = 0; new_state.cppr = cppr; new_state.xisr = xisr; new_state.mfrr = mfrr; new_state.pending_pri = pending_pri; /* * Deassert the CPU interrupt request. * icp_try_update will reassert it if necessary. */ kvmppc_book3s_dequeue_irqprio(icp->vcpu, BOOK3S_INTERRUPT_EXTERNAL); /* * Note that if we displace an interrupt from old_state.xisr, * we don't mark it as rejected. We expect userspace to set * the state of the interrupt sources to be consistent with * the ICP states (either before or afterwards, which doesn't * matter). We do handle resends due to CPPR becoming less * favoured because that is necessary to end up with a * consistent state in the situation where userspace restores * the ICS states before the ICP states. */ do { old_state = READ_ONCE(icp->state); if (new_state.mfrr <= old_state.mfrr) { resend = false; new_state.need_resend = old_state.need_resend; } else { resend = old_state.need_resend; new_state.need_resend = 0; } } while (!icp_try_update(icp, old_state, new_state, false)); if (resend) icp_check_resend(xics, icp); return 0; } static int xics_get_source(struct kvmppc_xics *xics, long irq, u64 addr) { int ret; struct kvmppc_ics *ics; struct ics_irq_state *irqp; u64 __user *ubufp = (u64 __user *) addr; u16 idx; u64 val, prio; unsigned long flags; ics = kvmppc_xics_find_ics(xics, irq, &idx); if (!ics) return -ENOENT; irqp = &ics->irq_state[idx]; local_irq_save(flags); arch_spin_lock(&ics->lock); ret = -ENOENT; if (irqp->exists) { val = irqp->server; prio = irqp->priority; if (prio == MASKED) { val |= KVM_XICS_MASKED; prio = irqp->saved_priority; } val |= prio << KVM_XICS_PRIORITY_SHIFT; if (irqp->lsi) { val |= KVM_XICS_LEVEL_SENSITIVE; if (irqp->pq_state & PQ_PRESENTED) val |= KVM_XICS_PENDING; } else if (irqp->masked_pending || irqp->resend) val |= KVM_XICS_PENDING; if (irqp->pq_state & PQ_PRESENTED) val |= KVM_XICS_PRESENTED; if (irqp->pq_state & PQ_QUEUED) val |= KVM_XICS_QUEUED; ret = 0; } arch_spin_unlock(&ics->lock); local_irq_restore(flags); if (!ret && put_user(val, ubufp)) ret = -EFAULT; return ret; } static int xics_set_source(struct kvmppc_xics *xics, long irq, u64 addr) { struct kvmppc_ics *ics; struct ics_irq_state *irqp; u64 __user *ubufp = (u64 __user *) addr; u16 idx; u64 val; u8 prio; u32 server; unsigned long flags; if (irq < KVMPPC_XICS_FIRST_IRQ || irq >= KVMPPC_XICS_NR_IRQS) return -ENOENT; ics = kvmppc_xics_find_ics(xics, irq, &idx); if (!ics) { ics = kvmppc_xics_create_ics(xics->kvm, xics, irq); if (!ics) return -ENOMEM; } irqp = &ics->irq_state[idx]; if (get_user(val, ubufp)) return -EFAULT; server = val & KVM_XICS_DESTINATION_MASK; prio = val >> KVM_XICS_PRIORITY_SHIFT; if (prio != MASKED && kvmppc_xics_find_server(xics->kvm, server) == NULL) return -EINVAL; local_irq_save(flags); arch_spin_lock(&ics->lock); irqp->server = server; irqp->saved_priority = prio; if (val & KVM_XICS_MASKED) prio = MASKED; irqp->priority = prio; irqp->resend = 0; irqp->masked_pending = 0; irqp->lsi = 0; irqp->pq_state = 0; if (val & KVM_XICS_LEVEL_SENSITIVE) irqp->lsi = 1; /* If PENDING, set P in case P is not saved because of old code */ if (val & KVM_XICS_PRESENTED || val & KVM_XICS_PENDING) irqp->pq_state |= PQ_PRESENTED; if (val & KVM_XICS_QUEUED) irqp->pq_state |= PQ_QUEUED; irqp->exists = 1; arch_spin_unlock(&ics->lock); local_irq_restore(flags); if (val & KVM_XICS_PENDING) icp_deliver_irq(xics, NULL, irqp->number, false); return 0; } int kvmppc_xics_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level, bool line_status) { struct kvmppc_xics *xics = kvm->arch.xics; if (!xics) return -ENODEV; return ics_deliver_irq(xics, irq, level); } static int xics_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr) { struct kvmppc_xics *xics = dev->private; switch (attr->group) { case KVM_DEV_XICS_GRP_SOURCES: return xics_set_source(xics, attr->attr, attr->addr); } return -ENXIO; } static int xics_get_attr(struct kvm_device *dev, struct kvm_device_attr *attr) { struct kvmppc_xics *xics = dev->private; switch (attr->group) { case KVM_DEV_XICS_GRP_SOURCES: return xics_get_source(xics, attr->attr, attr->addr); } return -ENXIO; } static int xics_has_attr(struct kvm_device *dev, struct kvm_device_attr *attr) { switch (attr->group) { case KVM_DEV_XICS_GRP_SOURCES: if (attr->attr >= KVMPPC_XICS_FIRST_IRQ && attr->attr < KVMPPC_XICS_NR_IRQS) return 0; break; } return -ENXIO; } /* * Called when device fd is closed. kvm->lock is held. */ static void kvmppc_xics_release(struct kvm_device *dev) { struct kvmppc_xics *xics = dev->private; unsigned long i; struct kvm *kvm = xics->kvm; struct kvm_vcpu *vcpu; pr_devel("Releasing xics device\n"); /* * Since this is the device release function, we know that * userspace does not have any open fd referring to the * device. Therefore there can not be any of the device * attribute set/get functions being executed concurrently, * and similarly, the connect_vcpu and set/clr_mapped * functions also cannot be being executed. */ debugfs_remove(xics->dentry); /* * We should clean up the vCPU interrupt presenters first. */ kvm_for_each_vcpu(i, vcpu, kvm) { /* * Take vcpu->mutex to ensure that no one_reg get/set ioctl * (i.e. kvmppc_xics_[gs]et_icp) can be done concurrently. * Holding the vcpu->mutex also means that execution is * excluded for the vcpu until the ICP was freed. When the vcpu * can execute again, vcpu->arch.icp and vcpu->arch.irq_type * have been cleared and the vcpu will not be going into the * XICS code anymore. */ mutex_lock(&vcpu->mutex); kvmppc_xics_free_icp(vcpu); mutex_unlock(&vcpu->mutex); } if (kvm) kvm->arch.xics = NULL; for (i = 0; i <= xics->max_icsid; i++) { kfree(xics->ics[i]); xics->ics[i] = NULL; } /* * A reference of the kvmppc_xics pointer is now kept under * the xics_device pointer of the machine for reuse. It is * freed when the VM is destroyed for now until we fix all the * execution paths. */ kfree(dev); } static struct kvmppc_xics *kvmppc_xics_get_device(struct kvm *kvm) { struct kvmppc_xics **kvm_xics_device = &kvm->arch.xics_device; struct kvmppc_xics *xics = *kvm_xics_device; if (!xics) { xics = kzalloc(sizeof(*xics), GFP_KERNEL); *kvm_xics_device = xics; } else { memset(xics, 0, sizeof(*xics)); } return xics; } static int kvmppc_xics_create(struct kvm_device *dev, u32 type) { struct kvmppc_xics *xics; struct kvm *kvm = dev->kvm; pr_devel("Creating xics for partition\n"); /* Already there ? */ if (kvm->arch.xics) return -EEXIST; xics = kvmppc_xics_get_device(kvm); if (!xics) return -ENOMEM; dev->private = xics; xics->dev = dev; xics->kvm = kvm; kvm->arch.xics = xics; #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE if (cpu_has_feature(CPU_FTR_ARCH_206) && cpu_has_feature(CPU_FTR_HVMODE)) { /* Enable real mode support */ xics->real_mode = ENABLE_REALMODE; xics->real_mode_dbg = DEBUG_REALMODE; } #endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */ return 0; } static void kvmppc_xics_init(struct kvm_device *dev) { struct kvmppc_xics *xics = dev->private; xics_debugfs_init(xics); } struct kvm_device_ops kvm_xics_ops = { .name = "kvm-xics", .create = kvmppc_xics_create, .init = kvmppc_xics_init, .release = kvmppc_xics_release, .set_attr = xics_set_attr, .get_attr = xics_get_attr, .has_attr = xics_has_attr, }; int kvmppc_xics_connect_vcpu(struct kvm_device *dev, struct kvm_vcpu *vcpu, u32 xcpu) { struct kvmppc_xics *xics = dev->private; int r = -EBUSY; if (dev->ops != &kvm_xics_ops) return -EPERM; if (xics->kvm != vcpu->kvm) return -EPERM; if (vcpu->arch.irq_type != KVMPPC_IRQ_DEFAULT) return -EBUSY; r = kvmppc_xics_create_icp(vcpu, xcpu); if (!r) vcpu->arch.irq_type = KVMPPC_IRQ_XICS; return r; } void kvmppc_xics_free_icp(struct kvm_vcpu *vcpu) { if (!vcpu->arch.icp) return; kfree(vcpu->arch.icp); vcpu->arch.icp = NULL; vcpu->arch.irq_type = KVMPPC_IRQ_DEFAULT; } void kvmppc_xics_set_mapped(struct kvm *kvm, unsigned long irq, unsigned long host_irq) { struct kvmppc_xics *xics = kvm->arch.xics; struct kvmppc_ics *ics; u16 idx; ics = kvmppc_xics_find_ics(xics, irq, &idx); if (!ics) return; ics->irq_state[idx].host_irq = host_irq; ics->irq_state[idx].intr_cpu = -1; } EXPORT_SYMBOL_GPL(kvmppc_xics_set_mapped); void kvmppc_xics_clr_mapped(struct kvm *kvm, unsigned long irq, unsigned long host_irq) { struct kvmppc_xics *xics = kvm->arch.xics; struct kvmppc_ics *ics; u16 idx; ics = kvmppc_xics_find_ics(xics, irq, &idx); if (!ics) return; ics->irq_state[idx].host_irq = 0; } EXPORT_SYMBOL_GPL(kvmppc_xics_clr_mapped);
linux-master
arch/powerpc/kvm/book3s_xics.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright 2017 Paul Mackerras, IBM Corp. <[email protected]> */ #include <linux/kvm_host.h> #include <asm/kvm_ppc.h> #include <asm/kvm_book3s.h> #include <asm/kvm_book3s_64.h> #include <asm/reg.h> #include <asm/ppc-opcode.h> /* * This handles the cases where the guest is in real suspend mode * and we want to get back to the guest without dooming the transaction. * The caller has checked that the guest is in real-suspend mode * (MSR[TS] = S and the fake-suspend flag is not set). */ int kvmhv_p9_tm_emulation_early(struct kvm_vcpu *vcpu) { u32 instr = vcpu->arch.emul_inst; u64 newmsr, msr, bescr; int rs; /* * rfid, rfebb, and mtmsrd encode bit 31 = 0 since it's a reserved bit * in these instructions, so masking bit 31 out doesn't change these * instructions. For the tsr. instruction if bit 31 = 0 then it is per * ISA an invalid form, however P9 UM, in section 4.6.10 Book II Invalid * Forms, informs specifically that ignoring bit 31 is an acceptable way * to handle TM-related invalid forms that have bit 31 = 0. Moreover, * for emulation purposes both forms (w/ and wo/ bit 31 set) can * generate a softpatch interrupt. Hence both forms are handled below * for tsr. to make them behave the same way. */ switch (instr & PO_XOP_OPCODE_MASK) { case PPC_INST_RFID: /* XXX do we need to check for PR=0 here? */ newmsr = vcpu->arch.shregs.srr1; /* should only get here for Sx -> T1 transition */ if (!(MSR_TM_TRANSACTIONAL(newmsr) && (newmsr & MSR_TM))) return 0; newmsr = sanitize_msr(newmsr); vcpu->arch.shregs.msr = newmsr; vcpu->arch.cfar = vcpu->arch.regs.nip - 4; vcpu->arch.regs.nip = vcpu->arch.shregs.srr0; return 1; case PPC_INST_RFEBB: /* check for PR=1 and arch 2.06 bit set in PCR */ msr = vcpu->arch.shregs.msr; if ((msr & MSR_PR) && (vcpu->arch.vcore->pcr & PCR_ARCH_206)) return 0; /* check EBB facility is available */ if (!(vcpu->arch.hfscr & HFSCR_EBB) || ((msr & MSR_PR) && !(mfspr(SPRN_FSCR) & FSCR_EBB))) return 0; bescr = mfspr(SPRN_BESCR); /* expect to see a S->T transition requested */ if (((bescr >> 30) & 3) != 2) return 0; bescr &= ~BESCR_GE; if (instr & (1 << 11)) bescr |= BESCR_GE; mtspr(SPRN_BESCR, bescr); msr = (msr & ~MSR_TS_MASK) | MSR_TS_T; vcpu->arch.shregs.msr = msr; vcpu->arch.cfar = vcpu->arch.regs.nip - 4; vcpu->arch.regs.nip = mfspr(SPRN_EBBRR); return 1; case PPC_INST_MTMSRD: /* XXX do we need to check for PR=0 here? */ rs = (instr >> 21) & 0x1f; newmsr = kvmppc_get_gpr(vcpu, rs); msr = vcpu->arch.shregs.msr; /* check this is a Sx -> T1 transition */ if (!(MSR_TM_TRANSACTIONAL(newmsr) && (newmsr & MSR_TM))) return 0; /* mtmsrd doesn't change LE */ newmsr = (newmsr & ~MSR_LE) | (msr & MSR_LE); newmsr = sanitize_msr(newmsr); vcpu->arch.shregs.msr = newmsr; return 1; /* ignore bit 31, see comment above */ case (PPC_INST_TSR & PO_XOP_OPCODE_MASK): /* we know the MSR has the TS field = S (0b01) here */ msr = vcpu->arch.shregs.msr; /* check for PR=1 and arch 2.06 bit set in PCR */ if ((msr & MSR_PR) && (vcpu->arch.vcore->pcr & PCR_ARCH_206)) return 0; /* check for TM disabled in the HFSCR or MSR */ if (!(vcpu->arch.hfscr & HFSCR_TM) || !(msr & MSR_TM)) return 0; /* L=1 => tresume => set TS to T (0b10) */ if (instr & (1 << 21)) vcpu->arch.shregs.msr = (msr & ~MSR_TS_MASK) | MSR_TS_T; /* Set CR0 to 0b0010 */ vcpu->arch.regs.ccr = (vcpu->arch.regs.ccr & 0x0fffffff) | 0x20000000; return 1; } return 0; } /* * This is called when we are returning to a guest in TM transactional * state. We roll the guest state back to the checkpointed state. */ void kvmhv_emulate_tm_rollback(struct kvm_vcpu *vcpu) { vcpu->arch.shregs.msr &= ~MSR_TS_MASK; /* go to N state */ vcpu->arch.regs.nip = vcpu->arch.tfhar; copy_from_checkpoint(vcpu); vcpu->arch.regs.ccr = (vcpu->arch.regs.ccr & 0x0fffffff) | 0xa0000000; }
linux-master
arch/powerpc/kvm/book3s_hv_tm_builtin.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright 2011 Paul Mackerras, IBM Corp. <[email protected]> */ #include <linux/cpu.h> #include <linux/kvm_host.h> #include <linux/preempt.h> #include <linux/export.h> #include <linux/sched.h> #include <linux/spinlock.h> #include <linux/init.h> #include <linux/memblock.h> #include <linux/sizes.h> #include <linux/cma.h> #include <linux/bitops.h> #include <asm/cputable.h> #include <asm/interrupt.h> #include <asm/kvm_ppc.h> #include <asm/kvm_book3s.h> #include <asm/machdep.h> #include <asm/xics.h> #include <asm/xive.h> #include <asm/dbell.h> #include <asm/cputhreads.h> #include <asm/io.h> #include <asm/opal.h> #include <asm/smp.h> #define KVM_CMA_CHUNK_ORDER 18 #include "book3s_xics.h" #include "book3s_xive.h" /* * Hash page table alignment on newer cpus(CPU_FTR_ARCH_206) * should be power of 2. */ #define HPT_ALIGN_PAGES ((1 << 18) >> PAGE_SHIFT) /* 256k */ /* * By default we reserve 5% of memory for hash pagetable allocation. */ static unsigned long kvm_cma_resv_ratio = 5; static struct cma *kvm_cma; static int __init early_parse_kvm_cma_resv(char *p) { pr_debug("%s(%s)\n", __func__, p); if (!p) return -EINVAL; return kstrtoul(p, 0, &kvm_cma_resv_ratio); } early_param("kvm_cma_resv_ratio", early_parse_kvm_cma_resv); struct page *kvm_alloc_hpt_cma(unsigned long nr_pages) { VM_BUG_ON(order_base_2(nr_pages) < KVM_CMA_CHUNK_ORDER - PAGE_SHIFT); return cma_alloc(kvm_cma, nr_pages, order_base_2(HPT_ALIGN_PAGES), false); } EXPORT_SYMBOL_GPL(kvm_alloc_hpt_cma); void kvm_free_hpt_cma(struct page *page, unsigned long nr_pages) { cma_release(kvm_cma, page, nr_pages); } EXPORT_SYMBOL_GPL(kvm_free_hpt_cma); /** * kvm_cma_reserve() - reserve area for kvm hash pagetable * * This function reserves memory from early allocator. It should be * called by arch specific code once the memblock allocator * has been activated and all other subsystems have already allocated/reserved * memory. */ void __init kvm_cma_reserve(void) { unsigned long align_size; phys_addr_t selected_size; /* * We need CMA reservation only when we are in HV mode */ if (!cpu_has_feature(CPU_FTR_HVMODE)) return; selected_size = PAGE_ALIGN(memblock_phys_mem_size() * kvm_cma_resv_ratio / 100); if (selected_size) { pr_info("%s: reserving %ld MiB for global area\n", __func__, (unsigned long)selected_size / SZ_1M); align_size = HPT_ALIGN_PAGES << PAGE_SHIFT; cma_declare_contiguous(0, selected_size, 0, align_size, KVM_CMA_CHUNK_ORDER - PAGE_SHIFT, false, "kvm_cma", &kvm_cma); } } /* * Real-mode H_CONFER implementation. * We check if we are the only vcpu out of this virtual core * still running in the guest and not ceded. If so, we pop up * to the virtual-mode implementation; if not, just return to * the guest. */ long int kvmppc_rm_h_confer(struct kvm_vcpu *vcpu, int target, unsigned int yield_count) { struct kvmppc_vcore *vc = local_paca->kvm_hstate.kvm_vcore; int ptid = local_paca->kvm_hstate.ptid; int threads_running; int threads_ceded; int threads_conferring; u64 stop = get_tb() + 10 * tb_ticks_per_usec; int rv = H_SUCCESS; /* => don't yield */ set_bit(ptid, &vc->conferring_threads); while ((get_tb() < stop) && !VCORE_IS_EXITING(vc)) { threads_running = VCORE_ENTRY_MAP(vc); threads_ceded = vc->napping_threads; threads_conferring = vc->conferring_threads; if ((threads_ceded | threads_conferring) == threads_running) { rv = H_TOO_HARD; /* => do yield */ break; } } clear_bit(ptid, &vc->conferring_threads); return rv; } /* * When running HV mode KVM we need to block certain operations while KVM VMs * exist in the system. We use a counter of VMs to track this. * * One of the operations we need to block is onlining of secondaries, so we * protect hv_vm_count with cpus_read_lock/unlock(). */ static atomic_t hv_vm_count; void kvm_hv_vm_activated(void) { cpus_read_lock(); atomic_inc(&hv_vm_count); cpus_read_unlock(); } EXPORT_SYMBOL_GPL(kvm_hv_vm_activated); void kvm_hv_vm_deactivated(void) { cpus_read_lock(); atomic_dec(&hv_vm_count); cpus_read_unlock(); } EXPORT_SYMBOL_GPL(kvm_hv_vm_deactivated); bool kvm_hv_mode_active(void) { return atomic_read(&hv_vm_count) != 0; } extern int hcall_real_table[], hcall_real_table_end[]; int kvmppc_hcall_impl_hv_realmode(unsigned long cmd) { cmd /= 4; if (cmd < hcall_real_table_end - hcall_real_table && hcall_real_table[cmd]) return 1; return 0; } EXPORT_SYMBOL_GPL(kvmppc_hcall_impl_hv_realmode); int kvmppc_hwrng_present(void) { return ppc_md.get_random_seed != NULL; } EXPORT_SYMBOL_GPL(kvmppc_hwrng_present); long kvmppc_rm_h_random(struct kvm_vcpu *vcpu) { if (ppc_md.get_random_seed && ppc_md.get_random_seed(&vcpu->arch.regs.gpr[4])) return H_SUCCESS; return H_HARDWARE; } /* * Send an interrupt or message to another CPU. * The caller needs to include any barrier needed to order writes * to memory vs. the IPI/message. */ void kvmhv_rm_send_ipi(int cpu) { void __iomem *xics_phys; unsigned long msg = PPC_DBELL_TYPE(PPC_DBELL_SERVER); /* On POWER9 we can use msgsnd for any destination cpu. */ if (cpu_has_feature(CPU_FTR_ARCH_300)) { msg |= get_hard_smp_processor_id(cpu); __asm__ __volatile__ (PPC_MSGSND(%0) : : "r" (msg)); return; } /* On POWER8 for IPIs to threads in the same core, use msgsnd. */ if (cpu_has_feature(CPU_FTR_ARCH_207S) && cpu_first_thread_sibling(cpu) == cpu_first_thread_sibling(raw_smp_processor_id())) { msg |= cpu_thread_in_core(cpu); __asm__ __volatile__ (PPC_MSGSND(%0) : : "r" (msg)); return; } /* We should never reach this */ if (WARN_ON_ONCE(xics_on_xive())) return; /* Else poke the target with an IPI */ xics_phys = paca_ptrs[cpu]->kvm_hstate.xics_phys; if (xics_phys) __raw_rm_writeb(IPI_PRIORITY, xics_phys + XICS_MFRR); else opal_int_set_mfrr(get_hard_smp_processor_id(cpu), IPI_PRIORITY); } /* * The following functions are called from the assembly code * in book3s_hv_rmhandlers.S. */ static void kvmhv_interrupt_vcore(struct kvmppc_vcore *vc, int active) { int cpu = vc->pcpu; /* Order setting of exit map vs. msgsnd/IPI */ smp_mb(); for (; active; active >>= 1, ++cpu) if (active & 1) kvmhv_rm_send_ipi(cpu); } void kvmhv_commence_exit(int trap) { struct kvmppc_vcore *vc = local_paca->kvm_hstate.kvm_vcore; int ptid = local_paca->kvm_hstate.ptid; struct kvm_split_mode *sip = local_paca->kvm_hstate.kvm_split_mode; int me, ee, i; /* Set our bit in the threads-exiting-guest map in the 0xff00 bits of vcore->entry_exit_map */ me = 0x100 << ptid; do { ee = vc->entry_exit_map; } while (cmpxchg(&vc->entry_exit_map, ee, ee | me) != ee); /* Are we the first here? */ if ((ee >> 8) != 0) return; /* * Trigger the other threads in this vcore to exit the guest. * If this is a hypervisor decrementer interrupt then they * will be already on their way out of the guest. */ if (trap != BOOK3S_INTERRUPT_HV_DECREMENTER) kvmhv_interrupt_vcore(vc, ee & ~(1 << ptid)); /* * If we are doing dynamic micro-threading, interrupt the other * subcores to pull them out of their guests too. */ if (!sip) return; for (i = 0; i < MAX_SUBCORES; ++i) { vc = sip->vc[i]; if (!vc) break; do { ee = vc->entry_exit_map; /* Already asked to exit? */ if ((ee >> 8) != 0) break; } while (cmpxchg(&vc->entry_exit_map, ee, ee | VCORE_EXIT_REQ) != ee); if ((ee >> 8) == 0) kvmhv_interrupt_vcore(vc, ee); } } struct kvmppc_host_rm_ops *kvmppc_host_rm_ops_hv; EXPORT_SYMBOL_GPL(kvmppc_host_rm_ops_hv); #ifdef CONFIG_KVM_XICS static struct kvmppc_irq_map *get_irqmap(struct kvmppc_passthru_irqmap *pimap, u32 xisr) { int i; /* * We access the mapped array here without a lock. That * is safe because we never reduce the number of entries * in the array and we never change the v_hwirq field of * an entry once it is set. * * We have also carefully ordered the stores in the writer * and the loads here in the reader, so that if we find a matching * hwirq here, the associated GSI and irq_desc fields are valid. */ for (i = 0; i < pimap->n_mapped; i++) { if (xisr == pimap->mapped[i].r_hwirq) { /* * Order subsequent reads in the caller to serialize * with the writer. */ smp_rmb(); return &pimap->mapped[i]; } } return NULL; } /* * If we have an interrupt that's not an IPI, check if we have a * passthrough adapter and if so, check if this external interrupt * is for the adapter. * We will attempt to deliver the IRQ directly to the target VCPU's * ICP, the virtual ICP (based on affinity - the xive value in ICS). * * If the delivery fails or if this is not for a passthrough adapter, * return to the host to handle this interrupt. We earlier * saved a copy of the XIRR in the PACA, it will be picked up by * the host ICP driver. */ static int kvmppc_check_passthru(u32 xisr, __be32 xirr, bool *again) { struct kvmppc_passthru_irqmap *pimap; struct kvmppc_irq_map *irq_map; struct kvm_vcpu *vcpu; vcpu = local_paca->kvm_hstate.kvm_vcpu; if (!vcpu) return 1; pimap = kvmppc_get_passthru_irqmap(vcpu->kvm); if (!pimap) return 1; irq_map = get_irqmap(pimap, xisr); if (!irq_map) return 1; /* We're handling this interrupt, generic code doesn't need to */ local_paca->kvm_hstate.saved_xirr = 0; return kvmppc_deliver_irq_passthru(vcpu, xirr, irq_map, pimap, again); } #else static inline int kvmppc_check_passthru(u32 xisr, __be32 xirr, bool *again) { return 1; } #endif /* * Determine what sort of external interrupt is pending (if any). * Returns: * 0 if no interrupt is pending * 1 if an interrupt is pending that needs to be handled by the host * 2 Passthrough that needs completion in the host * -1 if there was a guest wakeup IPI (which has now been cleared) * -2 if there is PCI passthrough external interrupt that was handled */ static long kvmppc_read_one_intr(bool *again); long kvmppc_read_intr(void) { long ret = 0; long rc; bool again; if (xive_enabled()) return 1; do { again = false; rc = kvmppc_read_one_intr(&again); if (rc && (ret == 0 || rc > ret)) ret = rc; } while (again); return ret; } static long kvmppc_read_one_intr(bool *again) { void __iomem *xics_phys; u32 h_xirr; __be32 xirr; u32 xisr; u8 host_ipi; int64_t rc; if (xive_enabled()) return 1; /* see if a host IPI is pending */ host_ipi = READ_ONCE(local_paca->kvm_hstate.host_ipi); if (host_ipi) return 1; /* Now read the interrupt from the ICP */ xics_phys = local_paca->kvm_hstate.xics_phys; rc = 0; if (!xics_phys) rc = opal_int_get_xirr(&xirr, false); else xirr = __raw_rm_readl(xics_phys + XICS_XIRR); if (rc < 0) return 1; /* * Save XIRR for later. Since we get control in reverse endian * on LE systems, save it byte reversed and fetch it back in * host endian. Note that xirr is the value read from the * XIRR register, while h_xirr is the host endian version. */ h_xirr = be32_to_cpu(xirr); local_paca->kvm_hstate.saved_xirr = h_xirr; xisr = h_xirr & 0xffffff; /* * Ensure that the store/load complete to guarantee all side * effects of loading from XIRR has completed */ smp_mb(); /* if nothing pending in the ICP */ if (!xisr) return 0; /* We found something in the ICP... * * If it is an IPI, clear the MFRR and EOI it. */ if (xisr == XICS_IPI) { rc = 0; if (xics_phys) { __raw_rm_writeb(0xff, xics_phys + XICS_MFRR); __raw_rm_writel(xirr, xics_phys + XICS_XIRR); } else { opal_int_set_mfrr(hard_smp_processor_id(), 0xff); rc = opal_int_eoi(h_xirr); } /* If rc > 0, there is another interrupt pending */ *again = rc > 0; /* * Need to ensure side effects of above stores * complete before proceeding. */ smp_mb(); /* * We need to re-check host IPI now in case it got set in the * meantime. If it's clear, we bounce the interrupt to the * guest */ host_ipi = READ_ONCE(local_paca->kvm_hstate.host_ipi); if (unlikely(host_ipi != 0)) { /* We raced with the host, * we need to resend that IPI, bummer */ if (xics_phys) __raw_rm_writeb(IPI_PRIORITY, xics_phys + XICS_MFRR); else opal_int_set_mfrr(hard_smp_processor_id(), IPI_PRIORITY); /* Let side effects complete */ smp_mb(); return 1; } /* OK, it's an IPI for us */ local_paca->kvm_hstate.saved_xirr = 0; return -1; } return kvmppc_check_passthru(xisr, xirr, again); } static void kvmppc_end_cede(struct kvm_vcpu *vcpu) { vcpu->arch.ceded = 0; if (vcpu->arch.timer_running) { hrtimer_try_to_cancel(&vcpu->arch.dec_timer); vcpu->arch.timer_running = 0; } } void kvmppc_set_msr_hv(struct kvm_vcpu *vcpu, u64 msr) { /* Guest must always run with ME enabled, HV disabled. */ msr = (msr | MSR_ME) & ~MSR_HV; /* * Check for illegal transactional state bit combination * and if we find it, force the TS field to a safe state. */ if ((msr & MSR_TS_MASK) == MSR_TS_MASK) msr &= ~MSR_TS_MASK; vcpu->arch.shregs.msr = msr; kvmppc_end_cede(vcpu); } EXPORT_SYMBOL_GPL(kvmppc_set_msr_hv); static void inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 srr1_flags) { unsigned long msr, pc, new_msr, new_pc; msr = kvmppc_get_msr(vcpu); pc = kvmppc_get_pc(vcpu); new_msr = vcpu->arch.intr_msr; new_pc = vec; /* If transactional, change to suspend mode on IRQ delivery */ if (MSR_TM_TRANSACTIONAL(msr)) new_msr |= MSR_TS_S; else new_msr |= msr & MSR_TS_MASK; /* * Perform MSR and PC adjustment for LPCR[AIL]=3 if it is set and * applicable. AIL=2 is not supported. * * AIL does not apply to SRESET, MCE, or HMI (which is never * delivered to the guest), and does not apply if IR=0 or DR=0. */ if (vec != BOOK3S_INTERRUPT_SYSTEM_RESET && vec != BOOK3S_INTERRUPT_MACHINE_CHECK && (vcpu->arch.vcore->lpcr & LPCR_AIL) == LPCR_AIL_3 && (msr & (MSR_IR|MSR_DR)) == (MSR_IR|MSR_DR) ) { new_msr |= MSR_IR | MSR_DR; new_pc += 0xC000000000004000ULL; } kvmppc_set_srr0(vcpu, pc); kvmppc_set_srr1(vcpu, (msr & SRR1_MSR_BITS) | srr1_flags); kvmppc_set_pc(vcpu, new_pc); vcpu->arch.shregs.msr = new_msr; } void kvmppc_inject_interrupt_hv(struct kvm_vcpu *vcpu, int vec, u64 srr1_flags) { inject_interrupt(vcpu, vec, srr1_flags); kvmppc_end_cede(vcpu); } EXPORT_SYMBOL_GPL(kvmppc_inject_interrupt_hv); /* * Is there a PRIV_DOORBELL pending for the guest (on POWER9)? * Can we inject a Decrementer or a External interrupt? */ void kvmppc_guest_entry_inject_int(struct kvm_vcpu *vcpu) { int ext; unsigned long lpcr; WARN_ON_ONCE(cpu_has_feature(CPU_FTR_ARCH_300)); /* Insert EXTERNAL bit into LPCR at the MER bit position */ ext = (vcpu->arch.pending_exceptions >> BOOK3S_IRQPRIO_EXTERNAL) & 1; lpcr = mfspr(SPRN_LPCR); lpcr |= ext << LPCR_MER_SH; mtspr(SPRN_LPCR, lpcr); isync(); if (vcpu->arch.shregs.msr & MSR_EE) { if (ext) { inject_interrupt(vcpu, BOOK3S_INTERRUPT_EXTERNAL, 0); } else { long int dec = mfspr(SPRN_DEC); if (!(lpcr & LPCR_LD)) dec = (int) dec; if (dec < 0) inject_interrupt(vcpu, BOOK3S_INTERRUPT_DECREMENTER, 0); } } if (vcpu->arch.doorbell_request) { mtspr(SPRN_DPDES, 1); vcpu->arch.vcore->dpdes = 1; smp_wmb(); vcpu->arch.doorbell_request = 0; } } static void flush_guest_tlb(struct kvm *kvm) { unsigned long rb, set; rb = PPC_BIT(52); /* IS = 2 */ for (set = 0; set < kvm->arch.tlb_sets; ++set) { /* R=0 PRS=0 RIC=0 */ asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1) : : "r" (rb), "i" (0), "i" (0), "i" (0), "r" (0) : "memory"); rb += PPC_BIT(51); /* increment set number */ } asm volatile("ptesync": : :"memory"); } void kvmppc_check_need_tlb_flush(struct kvm *kvm, int pcpu) { if (cpumask_test_cpu(pcpu, &kvm->arch.need_tlb_flush)) { flush_guest_tlb(kvm); /* Clear the bit after the TLB flush */ cpumask_clear_cpu(pcpu, &kvm->arch.need_tlb_flush); } } EXPORT_SYMBOL_GPL(kvmppc_check_need_tlb_flush);
linux-master
arch/powerpc/kvm/book3s_hv_builtin.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2010,2012 Freescale Semiconductor, Inc. All rights reserved. * * Author: Varun Sethi, <[email protected]> * * Description: * This file is derived from arch/powerpc/kvm/e500.c, * by Yu Liu <[email protected]>. */ #include <linux/kvm_host.h> #include <linux/slab.h> #include <linux/err.h> #include <linux/export.h> #include <linux/miscdevice.h> #include <linux/module.h> #include <asm/reg.h> #include <asm/cputable.h> #include <asm/kvm_ppc.h> #include <asm/dbell.h> #include <asm/ppc-opcode.h> #include "booke.h" #include "e500.h" void kvmppc_set_pending_interrupt(struct kvm_vcpu *vcpu, enum int_class type) { enum ppc_dbell dbell_type; unsigned long tag; switch (type) { case INT_CLASS_NONCRIT: dbell_type = PPC_G_DBELL; break; case INT_CLASS_CRIT: dbell_type = PPC_G_DBELL_CRIT; break; case INT_CLASS_MC: dbell_type = PPC_G_DBELL_MC; break; default: WARN_ONCE(1, "%s: unknown int type %d\n", __func__, type); return; } preempt_disable(); tag = PPC_DBELL_LPID(get_lpid(vcpu)) | vcpu->vcpu_id; mb(); ppc_msgsnd(dbell_type, 0, tag); preempt_enable(); } /* gtlbe must not be mapped by more than one host tlb entry */ void kvmppc_e500_tlbil_one(struct kvmppc_vcpu_e500 *vcpu_e500, struct kvm_book3e_206_tlb_entry *gtlbe) { unsigned int tid, ts; gva_t eaddr; u32 val; unsigned long flags; ts = get_tlb_ts(gtlbe); tid = get_tlb_tid(gtlbe); /* We search the host TLB to invalidate its shadow TLB entry */ val = (tid << 16) | ts; eaddr = get_tlb_eaddr(gtlbe); local_irq_save(flags); mtspr(SPRN_MAS6, val); mtspr(SPRN_MAS5, MAS5_SGS | get_lpid(&vcpu_e500->vcpu)); asm volatile("tlbsx 0, %[eaddr]\n" : : [eaddr] "r" (eaddr)); val = mfspr(SPRN_MAS1); if (val & MAS1_VALID) { mtspr(SPRN_MAS1, val & ~MAS1_VALID); asm volatile("tlbwe"); } mtspr(SPRN_MAS5, 0); /* NOTE: tlbsx also updates mas8, so clear it for host tlbwe */ mtspr(SPRN_MAS8, 0); isync(); local_irq_restore(flags); } void kvmppc_e500_tlbil_all(struct kvmppc_vcpu_e500 *vcpu_e500) { unsigned long flags; local_irq_save(flags); mtspr(SPRN_MAS5, MAS5_SGS | get_lpid(&vcpu_e500->vcpu)); /* * clang-17 and older could not assemble tlbilxlpid. * https://github.com/ClangBuiltLinux/linux/issues/1891 */ asm volatile (PPC_TLBILX_LPID); mtspr(SPRN_MAS5, 0); local_irq_restore(flags); } void kvmppc_set_pid(struct kvm_vcpu *vcpu, u32 pid) { vcpu->arch.pid = pid; } void kvmppc_mmu_msr_notify(struct kvm_vcpu *vcpu, u32 old_msr) { } /* We use two lpids per VM */ static DEFINE_PER_CPU(struct kvm_vcpu *[KVMPPC_NR_LPIDS], last_vcpu_of_lpid); static void kvmppc_core_vcpu_load_e500mc(struct kvm_vcpu *vcpu, int cpu) { struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); kvmppc_booke_vcpu_load(vcpu, cpu); mtspr(SPRN_LPID, get_lpid(vcpu)); mtspr(SPRN_EPCR, vcpu->arch.shadow_epcr); mtspr(SPRN_GPIR, vcpu->vcpu_id); mtspr(SPRN_MSRP, vcpu->arch.shadow_msrp); vcpu->arch.eplc = EPC_EGS | (get_lpid(vcpu) << EPC_ELPID_SHIFT); vcpu->arch.epsc = vcpu->arch.eplc; mtspr(SPRN_EPLC, vcpu->arch.eplc); mtspr(SPRN_EPSC, vcpu->arch.epsc); mtspr(SPRN_GIVPR, vcpu->arch.ivpr); mtspr(SPRN_GIVOR2, vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE]); mtspr(SPRN_GIVOR8, vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL]); mtspr(SPRN_GSPRG0, (unsigned long)vcpu->arch.shared->sprg0); mtspr(SPRN_GSPRG1, (unsigned long)vcpu->arch.shared->sprg1); mtspr(SPRN_GSPRG2, (unsigned long)vcpu->arch.shared->sprg2); mtspr(SPRN_GSPRG3, (unsigned long)vcpu->arch.shared->sprg3); mtspr(SPRN_GSRR0, vcpu->arch.shared->srr0); mtspr(SPRN_GSRR1, vcpu->arch.shared->srr1); mtspr(SPRN_GEPR, vcpu->arch.epr); mtspr(SPRN_GDEAR, vcpu->arch.shared->dar); mtspr(SPRN_GESR, vcpu->arch.shared->esr); if (vcpu->arch.oldpir != mfspr(SPRN_PIR) || __this_cpu_read(last_vcpu_of_lpid[get_lpid(vcpu)]) != vcpu) { kvmppc_e500_tlbil_all(vcpu_e500); __this_cpu_write(last_vcpu_of_lpid[get_lpid(vcpu)], vcpu); } } static void kvmppc_core_vcpu_put_e500mc(struct kvm_vcpu *vcpu) { vcpu->arch.eplc = mfspr(SPRN_EPLC); vcpu->arch.epsc = mfspr(SPRN_EPSC); vcpu->arch.shared->sprg0 = mfspr(SPRN_GSPRG0); vcpu->arch.shared->sprg1 = mfspr(SPRN_GSPRG1); vcpu->arch.shared->sprg2 = mfspr(SPRN_GSPRG2); vcpu->arch.shared->sprg3 = mfspr(SPRN_GSPRG3); vcpu->arch.shared->srr0 = mfspr(SPRN_GSRR0); vcpu->arch.shared->srr1 = mfspr(SPRN_GSRR1); vcpu->arch.epr = mfspr(SPRN_GEPR); vcpu->arch.shared->dar = mfspr(SPRN_GDEAR); vcpu->arch.shared->esr = mfspr(SPRN_GESR); vcpu->arch.oldpir = mfspr(SPRN_PIR); kvmppc_booke_vcpu_put(vcpu); } static int kvmppc_e500mc_check_processor_compat(void) { int r; if (strcmp(cur_cpu_spec->cpu_name, "e500mc") == 0) r = 0; else if (strcmp(cur_cpu_spec->cpu_name, "e5500") == 0) r = 0; #ifdef CONFIG_ALTIVEC /* * Since guests have the privilege to enable AltiVec, we need AltiVec * support in the host to save/restore their context. * Don't use CPU_FTR_ALTIVEC to identify cores with AltiVec unit * because it's cleared in the absence of CONFIG_ALTIVEC! */ else if (strcmp(cur_cpu_spec->cpu_name, "e6500") == 0) r = 0; #endif else r = -ENOTSUPP; return r; } int kvmppc_core_vcpu_setup(struct kvm_vcpu *vcpu) { struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); vcpu->arch.shadow_epcr = SPRN_EPCR_DSIGS | SPRN_EPCR_DGTMI | \ SPRN_EPCR_DUVD; #ifdef CONFIG_64BIT vcpu->arch.shadow_epcr |= SPRN_EPCR_ICM; #endif vcpu->arch.shadow_msrp = MSRP_UCLEP | MSRP_PMMP; vcpu->arch.pvr = mfspr(SPRN_PVR); vcpu_e500->svr = mfspr(SPRN_SVR); vcpu->arch.cpu_type = KVM_CPU_E500MC; return 0; } static int kvmppc_core_get_sregs_e500mc(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) { struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); sregs->u.e.features |= KVM_SREGS_E_ARCH206_MMU | KVM_SREGS_E_PM | KVM_SREGS_E_PC; sregs->u.e.impl_id = KVM_SREGS_E_IMPL_FSL; sregs->u.e.impl.fsl.features = 0; sregs->u.e.impl.fsl.svr = vcpu_e500->svr; sregs->u.e.impl.fsl.hid0 = vcpu_e500->hid0; sregs->u.e.impl.fsl.mcar = vcpu_e500->mcar; kvmppc_get_sregs_e500_tlb(vcpu, sregs); sregs->u.e.ivor_high[3] = vcpu->arch.ivor[BOOKE_IRQPRIO_PERFORMANCE_MONITOR]; sregs->u.e.ivor_high[4] = vcpu->arch.ivor[BOOKE_IRQPRIO_DBELL]; sregs->u.e.ivor_high[5] = vcpu->arch.ivor[BOOKE_IRQPRIO_DBELL_CRIT]; return kvmppc_get_sregs_ivor(vcpu, sregs); } static int kvmppc_core_set_sregs_e500mc(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) { struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); int ret; if (sregs->u.e.impl_id == KVM_SREGS_E_IMPL_FSL) { vcpu_e500->svr = sregs->u.e.impl.fsl.svr; vcpu_e500->hid0 = sregs->u.e.impl.fsl.hid0; vcpu_e500->mcar = sregs->u.e.impl.fsl.mcar; } ret = kvmppc_set_sregs_e500_tlb(vcpu, sregs); if (ret < 0) return ret; if (!(sregs->u.e.features & KVM_SREGS_E_IVOR)) return 0; if (sregs->u.e.features & KVM_SREGS_E_PM) { vcpu->arch.ivor[BOOKE_IRQPRIO_PERFORMANCE_MONITOR] = sregs->u.e.ivor_high[3]; } if (sregs->u.e.features & KVM_SREGS_E_PC) { vcpu->arch.ivor[BOOKE_IRQPRIO_DBELL] = sregs->u.e.ivor_high[4]; vcpu->arch.ivor[BOOKE_IRQPRIO_DBELL_CRIT] = sregs->u.e.ivor_high[5]; } return kvmppc_set_sregs_ivor(vcpu, sregs); } static int kvmppc_get_one_reg_e500mc(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *val) { int r = 0; switch (id) { case KVM_REG_PPC_SPRG9: *val = get_reg_val(id, vcpu->arch.sprg9); break; default: r = kvmppc_get_one_reg_e500_tlb(vcpu, id, val); } return r; } static int kvmppc_set_one_reg_e500mc(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *val) { int r = 0; switch (id) { case KVM_REG_PPC_SPRG9: vcpu->arch.sprg9 = set_reg_val(id, *val); break; default: r = kvmppc_set_one_reg_e500_tlb(vcpu, id, val); } return r; } static int kvmppc_core_vcpu_create_e500mc(struct kvm_vcpu *vcpu) { struct kvmppc_vcpu_e500 *vcpu_e500; int err; BUILD_BUG_ON(offsetof(struct kvmppc_vcpu_e500, vcpu) != 0); vcpu_e500 = to_e500(vcpu); /* Invalid PIR value -- this LPID doesn't have valid state on any cpu */ vcpu->arch.oldpir = 0xffffffff; err = kvmppc_e500_tlb_init(vcpu_e500); if (err) return err; vcpu->arch.shared = (void *)__get_free_page(GFP_KERNEL | __GFP_ZERO); if (!vcpu->arch.shared) { err = -ENOMEM; goto uninit_tlb; } return 0; uninit_tlb: kvmppc_e500_tlb_uninit(vcpu_e500); return err; } static void kvmppc_core_vcpu_free_e500mc(struct kvm_vcpu *vcpu) { struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); free_page((unsigned long)vcpu->arch.shared); kvmppc_e500_tlb_uninit(vcpu_e500); } static int kvmppc_core_init_vm_e500mc(struct kvm *kvm) { int lpid; lpid = kvmppc_alloc_lpid(); if (lpid < 0) return lpid; /* * Use two lpids per VM on cores with two threads like e6500. Use * even numbers to speedup vcpu lpid computation with consecutive lpids * per VM. vm1 will use lpids 2 and 3, vm2 lpids 4 and 5, and so on. */ if (threads_per_core == 2) lpid <<= 1; kvm->arch.lpid = lpid; return 0; } static void kvmppc_core_destroy_vm_e500mc(struct kvm *kvm) { int lpid = kvm->arch.lpid; if (threads_per_core == 2) lpid >>= 1; kvmppc_free_lpid(lpid); } static struct kvmppc_ops kvm_ops_e500mc = { .get_sregs = kvmppc_core_get_sregs_e500mc, .set_sregs = kvmppc_core_set_sregs_e500mc, .get_one_reg = kvmppc_get_one_reg_e500mc, .set_one_reg = kvmppc_set_one_reg_e500mc, .vcpu_load = kvmppc_core_vcpu_load_e500mc, .vcpu_put = kvmppc_core_vcpu_put_e500mc, .vcpu_create = kvmppc_core_vcpu_create_e500mc, .vcpu_free = kvmppc_core_vcpu_free_e500mc, .init_vm = kvmppc_core_init_vm_e500mc, .destroy_vm = kvmppc_core_destroy_vm_e500mc, .emulate_op = kvmppc_core_emulate_op_e500, .emulate_mtspr = kvmppc_core_emulate_mtspr_e500, .emulate_mfspr = kvmppc_core_emulate_mfspr_e500, .create_vcpu_debugfs = kvmppc_create_vcpu_debugfs_e500, }; static int __init kvmppc_e500mc_init(void) { int r; r = kvmppc_e500mc_check_processor_compat(); if (r) goto err_out; r = kvmppc_booke_init(); if (r) goto err_out; /* * Use two lpids per VM on dual threaded processors like e6500 * to workarround the lack of tlb write conditional instruction. * Expose half the number of available hardware lpids to the lpid * allocator. */ kvmppc_init_lpid(KVMPPC_NR_LPIDS/threads_per_core); r = kvm_init(sizeof(struct kvmppc_vcpu_e500), 0, THIS_MODULE); if (r) goto err_out; kvm_ops_e500mc.owner = THIS_MODULE; kvmppc_pr_ops = &kvm_ops_e500mc; err_out: return r; } static void __exit kvmppc_e500mc_exit(void) { kvmppc_pr_ops = NULL; kvmppc_booke_exit(); } module_init(kvmppc_e500mc_init); module_exit(kvmppc_e500mc_exit); MODULE_ALIAS_MISCDEV(KVM_MINOR); MODULE_ALIAS("devname:kvm");
linux-master
arch/powerpc/kvm/e500mc.c
// SPDX-License-Identifier: GPL-2.0-only /* * * Copyright SUSE Linux Products GmbH 2009 * * Authors: Alexander Graf <[email protected]> */ #include <linux/types.h> #include <linux/string.h> #include <linux/kvm.h> #include <linux/kvm_host.h> #include <linux/highmem.h> #include <asm/kvm_ppc.h> #include <asm/kvm_book3s.h> /* #define DEBUG_MMU */ /* #define DEBUG_MMU_PTE */ /* #define DEBUG_MMU_PTE_IP 0xfff14c40 */ #ifdef DEBUG_MMU #define dprintk(X...) printk(KERN_INFO X) #else #define dprintk(X...) do { } while(0) #endif #ifdef DEBUG_MMU_PTE #define dprintk_pte(X...) printk(KERN_INFO X) #else #define dprintk_pte(X...) do { } while(0) #endif #define PTEG_FLAG_ACCESSED 0x00000100 #define PTEG_FLAG_DIRTY 0x00000080 #ifndef SID_SHIFT #define SID_SHIFT 28 #endif static inline bool check_debug_ip(struct kvm_vcpu *vcpu) { #ifdef DEBUG_MMU_PTE_IP return vcpu->arch.regs.nip == DEBUG_MMU_PTE_IP; #else return true; #endif } static inline u32 sr_vsid(u32 sr_raw) { return sr_raw & 0x0fffffff; } static inline bool sr_valid(u32 sr_raw) { return (sr_raw & 0x80000000) ? false : true; } static inline bool sr_ks(u32 sr_raw) { return (sr_raw & 0x40000000) ? true: false; } static inline bool sr_kp(u32 sr_raw) { return (sr_raw & 0x20000000) ? true: false; } static int kvmppc_mmu_book3s_32_xlate_bat(struct kvm_vcpu *vcpu, gva_t eaddr, struct kvmppc_pte *pte, bool data, bool iswrite); static int kvmppc_mmu_book3s_32_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid, u64 *vsid); static u32 find_sr(struct kvm_vcpu *vcpu, gva_t eaddr) { return kvmppc_get_sr(vcpu, (eaddr >> 28) & 0xf); } static u64 kvmppc_mmu_book3s_32_ea_to_vp(struct kvm_vcpu *vcpu, gva_t eaddr, bool data) { u64 vsid; struct kvmppc_pte pte; if (!kvmppc_mmu_book3s_32_xlate_bat(vcpu, eaddr, &pte, data, false)) return pte.vpage; kvmppc_mmu_book3s_32_esid_to_vsid(vcpu, eaddr >> SID_SHIFT, &vsid); return (((u64)eaddr >> 12) & 0xffff) | (vsid << 16); } static hva_t kvmppc_mmu_book3s_32_get_pteg(struct kvm_vcpu *vcpu, u32 sre, gva_t eaddr, bool primary) { struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu); u32 page, hash, pteg, htabmask; hva_t r; page = (eaddr & 0x0FFFFFFF) >> 12; htabmask = ((vcpu_book3s->sdr1 & 0x1FF) << 16) | 0xFFC0; hash = ((sr_vsid(sre) ^ page) << 6); if (!primary) hash = ~hash; hash &= htabmask; pteg = (vcpu_book3s->sdr1 & 0xffff0000) | hash; dprintk("MMU: pc=0x%lx eaddr=0x%lx sdr1=0x%llx pteg=0x%x vsid=0x%x\n", kvmppc_get_pc(vcpu), eaddr, vcpu_book3s->sdr1, pteg, sr_vsid(sre)); r = gfn_to_hva(vcpu->kvm, pteg >> PAGE_SHIFT); if (kvm_is_error_hva(r)) return r; return r | (pteg & ~PAGE_MASK); } static u32 kvmppc_mmu_book3s_32_get_ptem(u32 sre, gva_t eaddr, bool primary) { return ((eaddr & 0x0fffffff) >> 22) | (sr_vsid(sre) << 7) | (primary ? 0 : 0x40) | 0x80000000; } static int kvmppc_mmu_book3s_32_xlate_bat(struct kvm_vcpu *vcpu, gva_t eaddr, struct kvmppc_pte *pte, bool data, bool iswrite) { struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu); struct kvmppc_bat *bat; int i; for (i = 0; i < 8; i++) { if (data) bat = &vcpu_book3s->dbat[i]; else bat = &vcpu_book3s->ibat[i]; if (kvmppc_get_msr(vcpu) & MSR_PR) { if (!bat->vp) continue; } else { if (!bat->vs) continue; } if (check_debug_ip(vcpu)) { dprintk_pte("%cBAT %02d: 0x%lx - 0x%x (0x%x)\n", data ? 'd' : 'i', i, eaddr, bat->bepi, bat->bepi_mask); } if ((eaddr & bat->bepi_mask) == bat->bepi) { u64 vsid; kvmppc_mmu_book3s_32_esid_to_vsid(vcpu, eaddr >> SID_SHIFT, &vsid); vsid <<= 16; pte->vpage = (((u64)eaddr >> 12) & 0xffff) | vsid; pte->raddr = bat->brpn | (eaddr & ~bat->bepi_mask); pte->may_read = bat->pp; pte->may_write = bat->pp > 1; pte->may_execute = true; if (!pte->may_read) { printk(KERN_INFO "BAT is not readable!\n"); continue; } if (iswrite && !pte->may_write) { dprintk_pte("BAT is read-only!\n"); continue; } return 0; } } return -ENOENT; } static int kvmppc_mmu_book3s_32_xlate_pte(struct kvm_vcpu *vcpu, gva_t eaddr, struct kvmppc_pte *pte, bool data, bool iswrite, bool primary) { u32 sre; hva_t ptegp; u32 pteg[16]; u32 pte0, pte1; u32 ptem = 0; int i; int found = 0; sre = find_sr(vcpu, eaddr); dprintk_pte("SR 0x%lx: vsid=0x%x, raw=0x%x\n", eaddr >> 28, sr_vsid(sre), sre); pte->vpage = kvmppc_mmu_book3s_32_ea_to_vp(vcpu, eaddr, data); ptegp = kvmppc_mmu_book3s_32_get_pteg(vcpu, sre, eaddr, primary); if (kvm_is_error_hva(ptegp)) { printk(KERN_INFO "KVM: Invalid PTEG!\n"); goto no_page_found; } ptem = kvmppc_mmu_book3s_32_get_ptem(sre, eaddr, primary); if(copy_from_user(pteg, (void __user *)ptegp, sizeof(pteg))) { printk_ratelimited(KERN_ERR "KVM: Can't copy data from 0x%lx!\n", ptegp); goto no_page_found; } for (i=0; i<16; i+=2) { pte0 = be32_to_cpu(pteg[i]); pte1 = be32_to_cpu(pteg[i + 1]); if (ptem == pte0) { u8 pp; pte->raddr = (pte1 & ~(0xFFFULL)) | (eaddr & 0xFFF); pp = pte1 & 3; if ((sr_kp(sre) && (kvmppc_get_msr(vcpu) & MSR_PR)) || (sr_ks(sre) && !(kvmppc_get_msr(vcpu) & MSR_PR))) pp |= 4; pte->may_write = false; pte->may_read = false; pte->may_execute = true; switch (pp) { case 0: case 1: case 2: case 6: pte->may_write = true; fallthrough; case 3: case 5: case 7: pte->may_read = true; break; } dprintk_pte("MMU: Found PTE -> %x %x - %x\n", pte0, pte1, pp); found = 1; break; } } /* Update PTE C and A bits, so the guest's swapper knows we used the page */ if (found) { u32 pte_r = pte1; char __user *addr = (char __user *) (ptegp + (i+1) * sizeof(u32)); /* * Use single-byte writes to update the HPTE, to * conform to what real hardware does. */ if (pte->may_read && !(pte_r & PTEG_FLAG_ACCESSED)) { pte_r |= PTEG_FLAG_ACCESSED; put_user(pte_r >> 8, addr + 2); } if (iswrite && pte->may_write && !(pte_r & PTEG_FLAG_DIRTY)) { pte_r |= PTEG_FLAG_DIRTY; put_user(pte_r, addr + 3); } if (!pte->may_read || (iswrite && !pte->may_write)) return -EPERM; return 0; } no_page_found: if (check_debug_ip(vcpu)) { dprintk_pte("KVM MMU: No PTE found (sdr1=0x%llx ptegp=0x%lx)\n", to_book3s(vcpu)->sdr1, ptegp); for (i=0; i<16; i+=2) { dprintk_pte(" %02d: 0x%x - 0x%x (0x%x)\n", i, be32_to_cpu(pteg[i]), be32_to_cpu(pteg[i+1]), ptem); } } return -ENOENT; } static int kvmppc_mmu_book3s_32_xlate(struct kvm_vcpu *vcpu, gva_t eaddr, struct kvmppc_pte *pte, bool data, bool iswrite) { int r; ulong mp_ea = vcpu->arch.magic_page_ea; pte->eaddr = eaddr; pte->page_size = MMU_PAGE_4K; /* Magic page override */ if (unlikely(mp_ea) && unlikely((eaddr & ~0xfffULL) == (mp_ea & ~0xfffULL)) && !(kvmppc_get_msr(vcpu) & MSR_PR)) { pte->vpage = kvmppc_mmu_book3s_32_ea_to_vp(vcpu, eaddr, data); pte->raddr = vcpu->arch.magic_page_pa | (pte->raddr & 0xfff); pte->raddr &= KVM_PAM; pte->may_execute = true; pte->may_read = true; pte->may_write = true; return 0; } r = kvmppc_mmu_book3s_32_xlate_bat(vcpu, eaddr, pte, data, iswrite); if (r < 0) r = kvmppc_mmu_book3s_32_xlate_pte(vcpu, eaddr, pte, data, iswrite, true); if (r == -ENOENT) r = kvmppc_mmu_book3s_32_xlate_pte(vcpu, eaddr, pte, data, iswrite, false); return r; } static u32 kvmppc_mmu_book3s_32_mfsrin(struct kvm_vcpu *vcpu, u32 srnum) { return kvmppc_get_sr(vcpu, srnum); } static void kvmppc_mmu_book3s_32_mtsrin(struct kvm_vcpu *vcpu, u32 srnum, ulong value) { kvmppc_set_sr(vcpu, srnum, value); kvmppc_mmu_map_segment(vcpu, srnum << SID_SHIFT); } static void kvmppc_mmu_book3s_32_tlbie(struct kvm_vcpu *vcpu, ulong ea, bool large) { unsigned long i; struct kvm_vcpu *v; /* flush this VA on all cpus */ kvm_for_each_vcpu(i, v, vcpu->kvm) kvmppc_mmu_pte_flush(v, ea, 0x0FFFF000); } static int kvmppc_mmu_book3s_32_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid, u64 *vsid) { ulong ea = esid << SID_SHIFT; u32 sr; u64 gvsid = esid; u64 msr = kvmppc_get_msr(vcpu); if (msr & (MSR_DR|MSR_IR)) { sr = find_sr(vcpu, ea); if (sr_valid(sr)) gvsid = sr_vsid(sr); } /* In case we only have one of MSR_IR or MSR_DR set, let's put that in the real-mode context (and hope RM doesn't access high memory) */ switch (msr & (MSR_DR|MSR_IR)) { case 0: *vsid = VSID_REAL | esid; break; case MSR_IR: *vsid = VSID_REAL_IR | gvsid; break; case MSR_DR: *vsid = VSID_REAL_DR | gvsid; break; case MSR_DR|MSR_IR: if (sr_valid(sr)) *vsid = sr_vsid(sr); else *vsid = VSID_BAT | gvsid; break; default: BUG(); } if (msr & MSR_PR) *vsid |= VSID_PR; return 0; } static bool kvmppc_mmu_book3s_32_is_dcbz32(struct kvm_vcpu *vcpu) { return true; } void kvmppc_mmu_book3s_32_init(struct kvm_vcpu *vcpu) { struct kvmppc_mmu *mmu = &vcpu->arch.mmu; mmu->mtsrin = kvmppc_mmu_book3s_32_mtsrin; mmu->mfsrin = kvmppc_mmu_book3s_32_mfsrin; mmu->xlate = kvmppc_mmu_book3s_32_xlate; mmu->tlbie = kvmppc_mmu_book3s_32_tlbie; mmu->esid_to_vsid = kvmppc_mmu_book3s_32_esid_to_vsid; mmu->ea_to_vp = kvmppc_mmu_book3s_32_ea_to_vp; mmu->is_dcbz32 = kvmppc_mmu_book3s_32_is_dcbz32; mmu->slbmte = NULL; mmu->slbmfee = NULL; mmu->slbmfev = NULL; mmu->slbfee = NULL; mmu->slbie = NULL; mmu->slbia = NULL; }
linux-master
arch/powerpc/kvm/book3s_32_mmu.c
// SPDX-License-Identifier: GPL-2.0-only /* * * Copyright 2010-2011 Paul Mackerras, IBM Corp. <[email protected]> */ #include <linux/types.h> #include <linux/string.h> #include <linux/kvm.h> #include <linux/kvm_host.h> #include <linux/hugetlb.h> #include <linux/module.h> #include <linux/log2.h> #include <linux/sizes.h> #include <asm/trace.h> #include <asm/kvm_ppc.h> #include <asm/kvm_book3s.h> #include <asm/book3s/64/mmu-hash.h> #include <asm/hvcall.h> #include <asm/synch.h> #include <asm/ppc-opcode.h> #include <asm/pte-walk.h> /* Translate address of a vmalloc'd thing to a linear map address */ static void *real_vmalloc_addr(void *addr) { return __va(ppc_find_vmap_phys((unsigned long)addr)); } /* Return 1 if we need to do a global tlbie, 0 if we can use tlbiel */ static int global_invalidates(struct kvm *kvm) { int global; int cpu; /* * If there is only one vcore, and it's currently running, * as indicated by local_paca->kvm_hstate.kvm_vcpu being set, * we can use tlbiel as long as we mark all other physical * cores as potentially having stale TLB entries for this lpid. * Otherwise, don't use tlbiel. */ if (kvm->arch.online_vcores == 1 && local_paca->kvm_hstate.kvm_vcpu) global = 0; else global = 1; /* LPID has been switched to host if in virt mode so can't do local */ if (!global && (mfmsr() & (MSR_IR|MSR_DR))) global = 1; if (!global) { /* any other core might now have stale TLB entries... */ smp_wmb(); cpumask_setall(&kvm->arch.need_tlb_flush); cpu = local_paca->kvm_hstate.kvm_vcore->pcpu; cpumask_clear_cpu(cpu, &kvm->arch.need_tlb_flush); } return global; } /* * Add this HPTE into the chain for the real page. * Must be called with the chain locked; it unlocks the chain. */ void kvmppc_add_revmap_chain(struct kvm *kvm, struct revmap_entry *rev, unsigned long *rmap, long pte_index, int realmode) { struct revmap_entry *head, *tail; unsigned long i; if (*rmap & KVMPPC_RMAP_PRESENT) { i = *rmap & KVMPPC_RMAP_INDEX; head = &kvm->arch.hpt.rev[i]; if (realmode) head = real_vmalloc_addr(head); tail = &kvm->arch.hpt.rev[head->back]; if (realmode) tail = real_vmalloc_addr(tail); rev->forw = i; rev->back = head->back; tail->forw = pte_index; head->back = pte_index; } else { rev->forw = rev->back = pte_index; *rmap = (*rmap & ~KVMPPC_RMAP_INDEX) | pte_index | KVMPPC_RMAP_PRESENT | KVMPPC_RMAP_HPT; } unlock_rmap(rmap); } EXPORT_SYMBOL_GPL(kvmppc_add_revmap_chain); /* Update the dirty bitmap of a memslot */ void kvmppc_update_dirty_map(const struct kvm_memory_slot *memslot, unsigned long gfn, unsigned long psize) { unsigned long npages; if (!psize || !memslot->dirty_bitmap) return; npages = (psize + PAGE_SIZE - 1) / PAGE_SIZE; gfn -= memslot->base_gfn; set_dirty_bits_atomic(memslot->dirty_bitmap, gfn, npages); } EXPORT_SYMBOL_GPL(kvmppc_update_dirty_map); static void kvmppc_set_dirty_from_hpte(struct kvm *kvm, unsigned long hpte_v, unsigned long hpte_gr) { struct kvm_memory_slot *memslot; unsigned long gfn; unsigned long psize; psize = kvmppc_actual_pgsz(hpte_v, hpte_gr); gfn = hpte_rpn(hpte_gr, psize); memslot = __gfn_to_memslot(kvm_memslots_raw(kvm), gfn); if (memslot && memslot->dirty_bitmap) kvmppc_update_dirty_map(memslot, gfn, psize); } /* Returns a pointer to the revmap entry for the page mapped by a HPTE */ static unsigned long *revmap_for_hpte(struct kvm *kvm, unsigned long hpte_v, unsigned long hpte_gr, struct kvm_memory_slot **memslotp, unsigned long *gfnp) { struct kvm_memory_slot *memslot; unsigned long *rmap; unsigned long gfn; gfn = hpte_rpn(hpte_gr, kvmppc_actual_pgsz(hpte_v, hpte_gr)); memslot = __gfn_to_memslot(kvm_memslots_raw(kvm), gfn); if (memslotp) *memslotp = memslot; if (gfnp) *gfnp = gfn; if (!memslot) return NULL; rmap = real_vmalloc_addr(&memslot->arch.rmap[gfn - memslot->base_gfn]); return rmap; } /* Remove this HPTE from the chain for a real page */ static void remove_revmap_chain(struct kvm *kvm, long pte_index, struct revmap_entry *rev, unsigned long hpte_v, unsigned long hpte_r) { struct revmap_entry *next, *prev; unsigned long ptel, head; unsigned long *rmap; unsigned long rcbits; struct kvm_memory_slot *memslot; unsigned long gfn; rcbits = hpte_r & (HPTE_R_R | HPTE_R_C); ptel = rev->guest_rpte |= rcbits; rmap = revmap_for_hpte(kvm, hpte_v, ptel, &memslot, &gfn); if (!rmap) return; lock_rmap(rmap); head = *rmap & KVMPPC_RMAP_INDEX; next = real_vmalloc_addr(&kvm->arch.hpt.rev[rev->forw]); prev = real_vmalloc_addr(&kvm->arch.hpt.rev[rev->back]); next->back = rev->back; prev->forw = rev->forw; if (head == pte_index) { head = rev->forw; if (head == pte_index) *rmap &= ~(KVMPPC_RMAP_PRESENT | KVMPPC_RMAP_INDEX); else *rmap = (*rmap & ~KVMPPC_RMAP_INDEX) | head; } *rmap |= rcbits << KVMPPC_RMAP_RC_SHIFT; if (rcbits & HPTE_R_C) kvmppc_update_dirty_map(memslot, gfn, kvmppc_actual_pgsz(hpte_v, hpte_r)); unlock_rmap(rmap); } long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags, long pte_index, unsigned long pteh, unsigned long ptel, pgd_t *pgdir, bool realmode, unsigned long *pte_idx_ret) { unsigned long i, pa, gpa, gfn, psize; unsigned long slot_fn, hva; __be64 *hpte; struct revmap_entry *rev; unsigned long g_ptel; struct kvm_memory_slot *memslot; unsigned hpage_shift; bool is_ci; unsigned long *rmap; pte_t *ptep; unsigned int writing; unsigned long mmu_seq; unsigned long rcbits; if (kvm_is_radix(kvm)) return H_FUNCTION; /* * The HPTE gets used by compute_tlbie_rb() to set TLBIE bits, so * these functions should work together -- must ensure a guest can not * cause problems with the TLBIE that KVM executes. */ if ((pteh >> HPTE_V_SSIZE_SHIFT) & 0x2) { /* B=0b1x is a reserved value, disallow it. */ return H_PARAMETER; } psize = kvmppc_actual_pgsz(pteh, ptel); if (!psize) return H_PARAMETER; writing = hpte_is_writable(ptel); pteh &= ~(HPTE_V_HVLOCK | HPTE_V_ABSENT | HPTE_V_VALID); ptel &= ~HPTE_GR_RESERVED; g_ptel = ptel; /* used later to detect if we might have been invalidated */ mmu_seq = kvm->mmu_invalidate_seq; smp_rmb(); /* Find the memslot (if any) for this address */ gpa = (ptel & HPTE_R_RPN) & ~(psize - 1); gfn = gpa >> PAGE_SHIFT; memslot = __gfn_to_memslot(kvm_memslots_raw(kvm), gfn); pa = 0; is_ci = false; rmap = NULL; if (!(memslot && !(memslot->flags & KVM_MEMSLOT_INVALID))) { /* Emulated MMIO - mark this with key=31 */ pteh |= HPTE_V_ABSENT; ptel |= HPTE_R_KEY_HI | HPTE_R_KEY_LO; goto do_insert; } /* Check if the requested page fits entirely in the memslot. */ if (!slot_is_aligned(memslot, psize)) return H_PARAMETER; slot_fn = gfn - memslot->base_gfn; rmap = &memslot->arch.rmap[slot_fn]; /* Translate to host virtual address */ hva = __gfn_to_hva_memslot(memslot, gfn); arch_spin_lock(&kvm->mmu_lock.rlock.raw_lock); ptep = find_kvm_host_pte(kvm, mmu_seq, hva, &hpage_shift); if (ptep) { pte_t pte; unsigned int host_pte_size; if (hpage_shift) host_pte_size = 1ul << hpage_shift; else host_pte_size = PAGE_SIZE; /* * We should always find the guest page size * to <= host page size, if host is using hugepage */ if (host_pte_size < psize) { arch_spin_unlock(&kvm->mmu_lock.rlock.raw_lock); return H_PARAMETER; } pte = kvmppc_read_update_linux_pte(ptep, writing); if (pte_present(pte) && !pte_protnone(pte)) { if (writing && !pte_write(pte)) /* make the actual HPTE be read-only */ ptel = hpte_make_readonly(ptel); is_ci = pte_ci(pte); pa = pte_pfn(pte) << PAGE_SHIFT; pa |= hva & (host_pte_size - 1); pa |= gpa & ~PAGE_MASK; } } arch_spin_unlock(&kvm->mmu_lock.rlock.raw_lock); ptel &= HPTE_R_KEY | HPTE_R_PP0 | (psize-1); ptel |= pa; if (pa) pteh |= HPTE_V_VALID; else { pteh |= HPTE_V_ABSENT; ptel &= ~(HPTE_R_KEY_HI | HPTE_R_KEY_LO); } /*If we had host pte mapping then Check WIMG */ if (ptep && !hpte_cache_flags_ok(ptel, is_ci)) { if (is_ci) return H_PARAMETER; /* * Allow guest to map emulated device memory as * uncacheable, but actually make it cacheable. */ ptel &= ~(HPTE_R_W|HPTE_R_I|HPTE_R_G); ptel |= HPTE_R_M; } /* Find and lock the HPTEG slot to use */ do_insert: if (pte_index >= kvmppc_hpt_npte(&kvm->arch.hpt)) return H_PARAMETER; if (likely((flags & H_EXACT) == 0)) { pte_index &= ~7UL; hpte = (__be64 *)(kvm->arch.hpt.virt + (pte_index << 4)); for (i = 0; i < 8; ++i) { if ((be64_to_cpu(*hpte) & HPTE_V_VALID) == 0 && try_lock_hpte(hpte, HPTE_V_HVLOCK | HPTE_V_VALID | HPTE_V_ABSENT)) break; hpte += 2; } if (i == 8) { /* * Since try_lock_hpte doesn't retry (not even stdcx. * failures), it could be that there is a free slot * but we transiently failed to lock it. Try again, * actually locking each slot and checking it. */ hpte -= 16; for (i = 0; i < 8; ++i) { u64 pte; while (!try_lock_hpte(hpte, HPTE_V_HVLOCK)) cpu_relax(); pte = be64_to_cpu(hpte[0]); if (!(pte & (HPTE_V_VALID | HPTE_V_ABSENT))) break; __unlock_hpte(hpte, pte); hpte += 2; } if (i == 8) return H_PTEG_FULL; } pte_index += i; } else { hpte = (__be64 *)(kvm->arch.hpt.virt + (pte_index << 4)); if (!try_lock_hpte(hpte, HPTE_V_HVLOCK | HPTE_V_VALID | HPTE_V_ABSENT)) { /* Lock the slot and check again */ u64 pte; while (!try_lock_hpte(hpte, HPTE_V_HVLOCK)) cpu_relax(); pte = be64_to_cpu(hpte[0]); if (pte & (HPTE_V_VALID | HPTE_V_ABSENT)) { __unlock_hpte(hpte, pte); return H_PTEG_FULL; } } } /* Save away the guest's idea of the second HPTE dword */ rev = &kvm->arch.hpt.rev[pte_index]; if (realmode) rev = real_vmalloc_addr(rev); if (rev) { rev->guest_rpte = g_ptel; note_hpte_modification(kvm, rev); } /* Link HPTE into reverse-map chain */ if (pteh & HPTE_V_VALID) { if (realmode) rmap = real_vmalloc_addr(rmap); lock_rmap(rmap); /* Check for pending invalidations under the rmap chain lock */ if (mmu_invalidate_retry(kvm, mmu_seq)) { /* inval in progress, write a non-present HPTE */ pteh |= HPTE_V_ABSENT; pteh &= ~HPTE_V_VALID; ptel &= ~(HPTE_R_KEY_HI | HPTE_R_KEY_LO); unlock_rmap(rmap); } else { kvmppc_add_revmap_chain(kvm, rev, rmap, pte_index, realmode); /* Only set R/C in real HPTE if already set in *rmap */ rcbits = *rmap >> KVMPPC_RMAP_RC_SHIFT; ptel &= rcbits | ~(HPTE_R_R | HPTE_R_C); } } /* Convert to new format on P9 */ if (cpu_has_feature(CPU_FTR_ARCH_300)) { ptel = hpte_old_to_new_r(pteh, ptel); pteh = hpte_old_to_new_v(pteh); } hpte[1] = cpu_to_be64(ptel); /* Write the first HPTE dword, unlocking the HPTE and making it valid */ eieio(); __unlock_hpte(hpte, pteh); asm volatile("ptesync" : : : "memory"); *pte_idx_ret = pte_index; return H_SUCCESS; } EXPORT_SYMBOL_GPL(kvmppc_do_h_enter); long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags, long pte_index, unsigned long pteh, unsigned long ptel) { return kvmppc_do_h_enter(vcpu->kvm, flags, pte_index, pteh, ptel, vcpu->arch.pgdir, true, &vcpu->arch.regs.gpr[4]); } EXPORT_SYMBOL_GPL(kvmppc_h_enter); #ifdef __BIG_ENDIAN__ #define LOCK_TOKEN (*(u32 *)(&get_paca()->lock_token)) #else #define LOCK_TOKEN (*(u32 *)(&get_paca()->paca_index)) #endif static inline int is_mmio_hpte(unsigned long v, unsigned long r) { return ((v & HPTE_V_ABSENT) && (r & (HPTE_R_KEY_HI | HPTE_R_KEY_LO)) == (HPTE_R_KEY_HI | HPTE_R_KEY_LO)); } static inline void fixup_tlbie_lpid(unsigned long rb_value, unsigned long lpid) { if (cpu_has_feature(CPU_FTR_P9_TLBIE_ERAT_BUG)) { /* Radix flush for a hash guest */ unsigned long rb,rs,prs,r,ric; rb = PPC_BIT(52); /* IS = 2 */ rs = 0; /* lpid = 0 */ prs = 0; /* partition scoped */ r = 1; /* radix format */ ric = 0; /* RIC_FLSUH_TLB */ /* * Need the extra ptesync to make sure we don't * re-order the tlbie */ asm volatile("ptesync": : :"memory"); asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1) : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory"); } if (cpu_has_feature(CPU_FTR_P9_TLBIE_STQ_BUG)) { asm volatile("ptesync": : :"memory"); asm volatile(PPC_TLBIE_5(%0,%1,0,0,0) : : "r" (rb_value), "r" (lpid)); } } static void do_tlbies(struct kvm *kvm, unsigned long *rbvalues, long npages, int global, bool need_sync) { long i; /* * We use the POWER9 5-operand versions of tlbie and tlbiel here. * Since we are using RIC=0 PRS=0 R=0, and P7/P8 tlbiel ignores * the RS field, this is backwards-compatible with P7 and P8. */ if (global) { if (need_sync) asm volatile("ptesync" : : : "memory"); for (i = 0; i < npages; ++i) { asm volatile(PPC_TLBIE_5(%0,%1,0,0,0) : : "r" (rbvalues[i]), "r" (kvm->arch.lpid)); } fixup_tlbie_lpid(rbvalues[i - 1], kvm->arch.lpid); asm volatile("eieio; tlbsync; ptesync" : : : "memory"); } else { if (need_sync) asm volatile("ptesync" : : : "memory"); for (i = 0; i < npages; ++i) { asm volatile(PPC_TLBIEL(%0,%1,0,0,0) : : "r" (rbvalues[i]), "r" (0)); } asm volatile("ptesync" : : : "memory"); } } long kvmppc_do_h_remove(struct kvm *kvm, unsigned long flags, unsigned long pte_index, unsigned long avpn, unsigned long *hpret) { __be64 *hpte; unsigned long v, r, rb; struct revmap_entry *rev; u64 pte, orig_pte, pte_r; if (kvm_is_radix(kvm)) return H_FUNCTION; if (pte_index >= kvmppc_hpt_npte(&kvm->arch.hpt)) return H_PARAMETER; hpte = (__be64 *)(kvm->arch.hpt.virt + (pte_index << 4)); while (!try_lock_hpte(hpte, HPTE_V_HVLOCK)) cpu_relax(); pte = orig_pte = be64_to_cpu(hpte[0]); pte_r = be64_to_cpu(hpte[1]); if (cpu_has_feature(CPU_FTR_ARCH_300)) { pte = hpte_new_to_old_v(pte, pte_r); pte_r = hpte_new_to_old_r(pte_r); } if ((pte & (HPTE_V_ABSENT | HPTE_V_VALID)) == 0 || ((flags & H_AVPN) && (pte & ~0x7fUL) != avpn) || ((flags & H_ANDCOND) && (pte & avpn) != 0)) { __unlock_hpte(hpte, orig_pte); return H_NOT_FOUND; } rev = real_vmalloc_addr(&kvm->arch.hpt.rev[pte_index]); v = pte & ~HPTE_V_HVLOCK; if (v & HPTE_V_VALID) { hpte[0] &= ~cpu_to_be64(HPTE_V_VALID); rb = compute_tlbie_rb(v, pte_r, pte_index); do_tlbies(kvm, &rb, 1, global_invalidates(kvm), true); /* * The reference (R) and change (C) bits in a HPT * entry can be set by hardware at any time up until * the HPTE is invalidated and the TLB invalidation * sequence has completed. This means that when * removing a HPTE, we need to re-read the HPTE after * the invalidation sequence has completed in order to * obtain reliable values of R and C. */ remove_revmap_chain(kvm, pte_index, rev, v, be64_to_cpu(hpte[1])); } r = rev->guest_rpte & ~HPTE_GR_RESERVED; note_hpte_modification(kvm, rev); unlock_hpte(hpte, 0); if (is_mmio_hpte(v, pte_r)) atomic64_inc(&kvm->arch.mmio_update); if (v & HPTE_V_ABSENT) v = (v & ~HPTE_V_ABSENT) | HPTE_V_VALID; hpret[0] = v; hpret[1] = r; return H_SUCCESS; } EXPORT_SYMBOL_GPL(kvmppc_do_h_remove); long kvmppc_h_remove(struct kvm_vcpu *vcpu, unsigned long flags, unsigned long pte_index, unsigned long avpn) { return kvmppc_do_h_remove(vcpu->kvm, flags, pte_index, avpn, &vcpu->arch.regs.gpr[4]); } EXPORT_SYMBOL_GPL(kvmppc_h_remove); long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu) { struct kvm *kvm = vcpu->kvm; unsigned long *args = &vcpu->arch.regs.gpr[4]; __be64 *hp, *hptes[4]; unsigned long tlbrb[4]; long int i, j, k, n, found, indexes[4]; unsigned long flags, req, pte_index, rcbits; int global; long int ret = H_SUCCESS; struct revmap_entry *rev, *revs[4]; u64 hp0, hp1; if (kvm_is_radix(kvm)) return H_FUNCTION; global = global_invalidates(kvm); for (i = 0; i < 4 && ret == H_SUCCESS; ) { n = 0; for (; i < 4; ++i) { j = i * 2; pte_index = args[j]; flags = pte_index >> 56; pte_index &= ((1ul << 56) - 1); req = flags >> 6; flags &= 3; if (req == 3) { /* no more requests */ i = 4; break; } if (req != 1 || flags == 3 || pte_index >= kvmppc_hpt_npte(&kvm->arch.hpt)) { /* parameter error */ args[j] = ((0xa0 | flags) << 56) + pte_index; ret = H_PARAMETER; break; } hp = (__be64 *) (kvm->arch.hpt.virt + (pte_index << 4)); /* to avoid deadlock, don't spin except for first */ if (!try_lock_hpte(hp, HPTE_V_HVLOCK)) { if (n) break; while (!try_lock_hpte(hp, HPTE_V_HVLOCK)) cpu_relax(); } found = 0; hp0 = be64_to_cpu(hp[0]); hp1 = be64_to_cpu(hp[1]); if (cpu_has_feature(CPU_FTR_ARCH_300)) { hp0 = hpte_new_to_old_v(hp0, hp1); hp1 = hpte_new_to_old_r(hp1); } if (hp0 & (HPTE_V_ABSENT | HPTE_V_VALID)) { switch (flags & 3) { case 0: /* absolute */ found = 1; break; case 1: /* andcond */ if (!(hp0 & args[j + 1])) found = 1; break; case 2: /* AVPN */ if ((hp0 & ~0x7fUL) == args[j + 1]) found = 1; break; } } if (!found) { hp[0] &= ~cpu_to_be64(HPTE_V_HVLOCK); args[j] = ((0x90 | flags) << 56) + pte_index; continue; } args[j] = ((0x80 | flags) << 56) + pte_index; rev = real_vmalloc_addr(&kvm->arch.hpt.rev[pte_index]); note_hpte_modification(kvm, rev); if (!(hp0 & HPTE_V_VALID)) { /* insert R and C bits from PTE */ rcbits = rev->guest_rpte & (HPTE_R_R|HPTE_R_C); args[j] |= rcbits << (56 - 5); hp[0] = 0; if (is_mmio_hpte(hp0, hp1)) atomic64_inc(&kvm->arch.mmio_update); continue; } /* leave it locked */ hp[0] &= ~cpu_to_be64(HPTE_V_VALID); tlbrb[n] = compute_tlbie_rb(hp0, hp1, pte_index); indexes[n] = j; hptes[n] = hp; revs[n] = rev; ++n; } if (!n) break; /* Now that we've collected a batch, do the tlbies */ do_tlbies(kvm, tlbrb, n, global, true); /* Read PTE low words after tlbie to get final R/C values */ for (k = 0; k < n; ++k) { j = indexes[k]; pte_index = args[j] & ((1ul << 56) - 1); hp = hptes[k]; rev = revs[k]; remove_revmap_chain(kvm, pte_index, rev, be64_to_cpu(hp[0]), be64_to_cpu(hp[1])); rcbits = rev->guest_rpte & (HPTE_R_R|HPTE_R_C); args[j] |= rcbits << (56 - 5); __unlock_hpte(hp, 0); } } return ret; } EXPORT_SYMBOL_GPL(kvmppc_h_bulk_remove); long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsigned long flags, unsigned long pte_index, unsigned long avpn) { struct kvm *kvm = vcpu->kvm; __be64 *hpte; struct revmap_entry *rev; unsigned long v, r, rb, mask, bits; u64 pte_v, pte_r; if (kvm_is_radix(kvm)) return H_FUNCTION; if (pte_index >= kvmppc_hpt_npte(&kvm->arch.hpt)) return H_PARAMETER; hpte = (__be64 *)(kvm->arch.hpt.virt + (pte_index << 4)); while (!try_lock_hpte(hpte, HPTE_V_HVLOCK)) cpu_relax(); v = pte_v = be64_to_cpu(hpte[0]); if (cpu_has_feature(CPU_FTR_ARCH_300)) v = hpte_new_to_old_v(v, be64_to_cpu(hpte[1])); if ((v & (HPTE_V_ABSENT | HPTE_V_VALID)) == 0 || ((flags & H_AVPN) && (v & ~0x7fUL) != avpn)) { __unlock_hpte(hpte, pte_v); return H_NOT_FOUND; } pte_r = be64_to_cpu(hpte[1]); bits = (flags << 55) & HPTE_R_PP0; bits |= (flags << 48) & HPTE_R_KEY_HI; bits |= flags & (HPTE_R_PP | HPTE_R_N | HPTE_R_KEY_LO); /* Update guest view of 2nd HPTE dword */ mask = HPTE_R_PP0 | HPTE_R_PP | HPTE_R_N | HPTE_R_KEY_HI | HPTE_R_KEY_LO; rev = real_vmalloc_addr(&kvm->arch.hpt.rev[pte_index]); if (rev) { r = (rev->guest_rpte & ~mask) | bits; rev->guest_rpte = r; note_hpte_modification(kvm, rev); } /* Update HPTE */ if (v & HPTE_V_VALID) { /* * If the page is valid, don't let it transition from * readonly to writable. If it should be writable, we'll * take a trap and let the page fault code sort it out. */ r = (pte_r & ~mask) | bits; if (hpte_is_writable(r) && !hpte_is_writable(pte_r)) r = hpte_make_readonly(r); /* If the PTE is changing, invalidate it first */ if (r != pte_r) { rb = compute_tlbie_rb(v, r, pte_index); hpte[0] = cpu_to_be64((pte_v & ~HPTE_V_VALID) | HPTE_V_ABSENT); do_tlbies(kvm, &rb, 1, global_invalidates(kvm), true); /* Don't lose R/C bit updates done by hardware */ r |= be64_to_cpu(hpte[1]) & (HPTE_R_R | HPTE_R_C); hpte[1] = cpu_to_be64(r); } } unlock_hpte(hpte, pte_v & ~HPTE_V_HVLOCK); asm volatile("ptesync" : : : "memory"); if (is_mmio_hpte(v, pte_r)) atomic64_inc(&kvm->arch.mmio_update); return H_SUCCESS; } EXPORT_SYMBOL_GPL(kvmppc_h_protect); long kvmppc_h_read(struct kvm_vcpu *vcpu, unsigned long flags, unsigned long pte_index) { struct kvm *kvm = vcpu->kvm; __be64 *hpte; unsigned long v, r; int i, n = 1; struct revmap_entry *rev = NULL; if (kvm_is_radix(kvm)) return H_FUNCTION; if (pte_index >= kvmppc_hpt_npte(&kvm->arch.hpt)) return H_PARAMETER; if (flags & H_READ_4) { pte_index &= ~3; n = 4; } rev = real_vmalloc_addr(&kvm->arch.hpt.rev[pte_index]); for (i = 0; i < n; ++i, ++pte_index) { hpte = (__be64 *)(kvm->arch.hpt.virt + (pte_index << 4)); v = be64_to_cpu(hpte[0]) & ~HPTE_V_HVLOCK; r = be64_to_cpu(hpte[1]); if (cpu_has_feature(CPU_FTR_ARCH_300)) { v = hpte_new_to_old_v(v, r); r = hpte_new_to_old_r(r); } if (v & HPTE_V_ABSENT) { v &= ~HPTE_V_ABSENT; v |= HPTE_V_VALID; } if (v & HPTE_V_VALID) { r = rev[i].guest_rpte | (r & (HPTE_R_R | HPTE_R_C)); r &= ~HPTE_GR_RESERVED; } vcpu->arch.regs.gpr[4 + i * 2] = v; vcpu->arch.regs.gpr[5 + i * 2] = r; } return H_SUCCESS; } EXPORT_SYMBOL_GPL(kvmppc_h_read); long kvmppc_h_clear_ref(struct kvm_vcpu *vcpu, unsigned long flags, unsigned long pte_index) { struct kvm *kvm = vcpu->kvm; __be64 *hpte; unsigned long v, r, gr; struct revmap_entry *rev; unsigned long *rmap; long ret = H_NOT_FOUND; if (kvm_is_radix(kvm)) return H_FUNCTION; if (pte_index >= kvmppc_hpt_npte(&kvm->arch.hpt)) return H_PARAMETER; rev = real_vmalloc_addr(&kvm->arch.hpt.rev[pte_index]); hpte = (__be64 *)(kvm->arch.hpt.virt + (pte_index << 4)); while (!try_lock_hpte(hpte, HPTE_V_HVLOCK)) cpu_relax(); v = be64_to_cpu(hpte[0]); r = be64_to_cpu(hpte[1]); if (!(v & (HPTE_V_VALID | HPTE_V_ABSENT))) goto out; gr = rev->guest_rpte; if (rev->guest_rpte & HPTE_R_R) { rev->guest_rpte &= ~HPTE_R_R; note_hpte_modification(kvm, rev); } if (v & HPTE_V_VALID) { gr |= r & (HPTE_R_R | HPTE_R_C); if (r & HPTE_R_R) { kvmppc_clear_ref_hpte(kvm, hpte, pte_index); rmap = revmap_for_hpte(kvm, v, gr, NULL, NULL); if (rmap) { lock_rmap(rmap); *rmap |= KVMPPC_RMAP_REFERENCED; unlock_rmap(rmap); } } } vcpu->arch.regs.gpr[4] = gr; ret = H_SUCCESS; out: unlock_hpte(hpte, v & ~HPTE_V_HVLOCK); return ret; } EXPORT_SYMBOL_GPL(kvmppc_h_clear_ref); long kvmppc_h_clear_mod(struct kvm_vcpu *vcpu, unsigned long flags, unsigned long pte_index) { struct kvm *kvm = vcpu->kvm; __be64 *hpte; unsigned long v, r, gr; struct revmap_entry *rev; long ret = H_NOT_FOUND; if (kvm_is_radix(kvm)) return H_FUNCTION; if (pte_index >= kvmppc_hpt_npte(&kvm->arch.hpt)) return H_PARAMETER; rev = real_vmalloc_addr(&kvm->arch.hpt.rev[pte_index]); hpte = (__be64 *)(kvm->arch.hpt.virt + (pte_index << 4)); while (!try_lock_hpte(hpte, HPTE_V_HVLOCK)) cpu_relax(); v = be64_to_cpu(hpte[0]); r = be64_to_cpu(hpte[1]); if (!(v & (HPTE_V_VALID | HPTE_V_ABSENT))) goto out; gr = rev->guest_rpte; if (gr & HPTE_R_C) { rev->guest_rpte &= ~HPTE_R_C; note_hpte_modification(kvm, rev); } if (v & HPTE_V_VALID) { /* need to make it temporarily absent so C is stable */ hpte[0] |= cpu_to_be64(HPTE_V_ABSENT); kvmppc_invalidate_hpte(kvm, hpte, pte_index); r = be64_to_cpu(hpte[1]); gr |= r & (HPTE_R_R | HPTE_R_C); if (r & HPTE_R_C) { hpte[1] = cpu_to_be64(r & ~HPTE_R_C); eieio(); kvmppc_set_dirty_from_hpte(kvm, v, gr); } } vcpu->arch.regs.gpr[4] = gr; ret = H_SUCCESS; out: unlock_hpte(hpte, v & ~HPTE_V_HVLOCK); return ret; } EXPORT_SYMBOL_GPL(kvmppc_h_clear_mod); static int kvmppc_get_hpa(struct kvm_vcpu *vcpu, unsigned long mmu_seq, unsigned long gpa, int writing, unsigned long *hpa, struct kvm_memory_slot **memslot_p) { struct kvm *kvm = vcpu->kvm; struct kvm_memory_slot *memslot; unsigned long gfn, hva, pa, psize = PAGE_SHIFT; unsigned int shift; pte_t *ptep, pte; /* Find the memslot for this address */ gfn = gpa >> PAGE_SHIFT; memslot = __gfn_to_memslot(kvm_memslots_raw(kvm), gfn); if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID)) return H_PARAMETER; /* Translate to host virtual address */ hva = __gfn_to_hva_memslot(memslot, gfn); /* Try to find the host pte for that virtual address */ ptep = find_kvm_host_pte(kvm, mmu_seq, hva, &shift); if (!ptep) return H_TOO_HARD; pte = kvmppc_read_update_linux_pte(ptep, writing); if (!pte_present(pte)) return H_TOO_HARD; /* Convert to a physical address */ if (shift) psize = 1UL << shift; pa = pte_pfn(pte) << PAGE_SHIFT; pa |= hva & (psize - 1); pa |= gpa & ~PAGE_MASK; if (hpa) *hpa = pa; if (memslot_p) *memslot_p = memslot; return H_SUCCESS; } static long kvmppc_do_h_page_init_zero(struct kvm_vcpu *vcpu, unsigned long dest) { struct kvm_memory_slot *memslot; struct kvm *kvm = vcpu->kvm; unsigned long pa, mmu_seq; long ret = H_SUCCESS; int i; /* Used later to detect if we might have been invalidated */ mmu_seq = kvm->mmu_invalidate_seq; smp_rmb(); arch_spin_lock(&kvm->mmu_lock.rlock.raw_lock); ret = kvmppc_get_hpa(vcpu, mmu_seq, dest, 1, &pa, &memslot); if (ret != H_SUCCESS) goto out_unlock; /* Zero the page */ for (i = 0; i < SZ_4K; i += L1_CACHE_BYTES, pa += L1_CACHE_BYTES) dcbz((void *)pa); kvmppc_update_dirty_map(memslot, dest >> PAGE_SHIFT, PAGE_SIZE); out_unlock: arch_spin_unlock(&kvm->mmu_lock.rlock.raw_lock); return ret; } static long kvmppc_do_h_page_init_copy(struct kvm_vcpu *vcpu, unsigned long dest, unsigned long src) { unsigned long dest_pa, src_pa, mmu_seq; struct kvm_memory_slot *dest_memslot; struct kvm *kvm = vcpu->kvm; long ret = H_SUCCESS; /* Used later to detect if we might have been invalidated */ mmu_seq = kvm->mmu_invalidate_seq; smp_rmb(); arch_spin_lock(&kvm->mmu_lock.rlock.raw_lock); ret = kvmppc_get_hpa(vcpu, mmu_seq, dest, 1, &dest_pa, &dest_memslot); if (ret != H_SUCCESS) goto out_unlock; ret = kvmppc_get_hpa(vcpu, mmu_seq, src, 0, &src_pa, NULL); if (ret != H_SUCCESS) goto out_unlock; /* Copy the page */ memcpy((void *)dest_pa, (void *)src_pa, SZ_4K); kvmppc_update_dirty_map(dest_memslot, dest >> PAGE_SHIFT, PAGE_SIZE); out_unlock: arch_spin_unlock(&kvm->mmu_lock.rlock.raw_lock); return ret; } long kvmppc_rm_h_page_init(struct kvm_vcpu *vcpu, unsigned long flags, unsigned long dest, unsigned long src) { struct kvm *kvm = vcpu->kvm; u64 pg_mask = SZ_4K - 1; /* 4K page size */ long ret = H_SUCCESS; /* Don't handle radix mode here, go up to the virtual mode handler */ if (kvm_is_radix(kvm)) return H_TOO_HARD; /* Check for invalid flags (H_PAGE_SET_LOANED covers all CMO flags) */ if (flags & ~(H_ICACHE_INVALIDATE | H_ICACHE_SYNCHRONIZE | H_ZERO_PAGE | H_COPY_PAGE | H_PAGE_SET_LOANED)) return H_PARAMETER; /* dest (and src if copy_page flag set) must be page aligned */ if ((dest & pg_mask) || ((flags & H_COPY_PAGE) && (src & pg_mask))) return H_PARAMETER; /* zero and/or copy the page as determined by the flags */ if (flags & H_COPY_PAGE) ret = kvmppc_do_h_page_init_copy(vcpu, dest, src); else if (flags & H_ZERO_PAGE) ret = kvmppc_do_h_page_init_zero(vcpu, dest); /* We can ignore the other flags */ return ret; } void kvmppc_invalidate_hpte(struct kvm *kvm, __be64 *hptep, unsigned long pte_index) { unsigned long rb; u64 hp0, hp1; hptep[0] &= ~cpu_to_be64(HPTE_V_VALID); hp0 = be64_to_cpu(hptep[0]); hp1 = be64_to_cpu(hptep[1]); if (cpu_has_feature(CPU_FTR_ARCH_300)) { hp0 = hpte_new_to_old_v(hp0, hp1); hp1 = hpte_new_to_old_r(hp1); } rb = compute_tlbie_rb(hp0, hp1, pte_index); do_tlbies(kvm, &rb, 1, 1, true); } EXPORT_SYMBOL_GPL(kvmppc_invalidate_hpte); void kvmppc_clear_ref_hpte(struct kvm *kvm, __be64 *hptep, unsigned long pte_index) { unsigned long rb; unsigned char rbyte; u64 hp0, hp1; hp0 = be64_to_cpu(hptep[0]); hp1 = be64_to_cpu(hptep[1]); if (cpu_has_feature(CPU_FTR_ARCH_300)) { hp0 = hpte_new_to_old_v(hp0, hp1); hp1 = hpte_new_to_old_r(hp1); } rb = compute_tlbie_rb(hp0, hp1, pte_index); rbyte = (be64_to_cpu(hptep[1]) & ~HPTE_R_R) >> 8; /* modify only the second-last byte, which contains the ref bit */ *((char *)hptep + 14) = rbyte; do_tlbies(kvm, &rb, 1, 1, false); } EXPORT_SYMBOL_GPL(kvmppc_clear_ref_hpte); static int slb_base_page_shift[4] = { 24, /* 16M */ 16, /* 64k */ 34, /* 16G */ 20, /* 1M, unsupported */ }; static struct mmio_hpte_cache_entry *mmio_cache_search(struct kvm_vcpu *vcpu, unsigned long eaddr, unsigned long slb_v, long mmio_update) { struct mmio_hpte_cache_entry *entry = NULL; unsigned int pshift; unsigned int i; for (i = 0; i < MMIO_HPTE_CACHE_SIZE; i++) { entry = &vcpu->arch.mmio_cache.entry[i]; if (entry->mmio_update == mmio_update) { pshift = entry->slb_base_pshift; if ((entry->eaddr >> pshift) == (eaddr >> pshift) && entry->slb_v == slb_v) return entry; } } return NULL; } static struct mmio_hpte_cache_entry * next_mmio_cache_entry(struct kvm_vcpu *vcpu) { unsigned int index = vcpu->arch.mmio_cache.index; vcpu->arch.mmio_cache.index++; if (vcpu->arch.mmio_cache.index == MMIO_HPTE_CACHE_SIZE) vcpu->arch.mmio_cache.index = 0; return &vcpu->arch.mmio_cache.entry[index]; } /* When called from virtmode, this func should be protected by * preempt_disable(), otherwise, the holding of HPTE_V_HVLOCK * can trigger deadlock issue. */ long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr, unsigned long slb_v, unsigned long valid) { unsigned int i; unsigned int pshift; unsigned long somask; unsigned long vsid, hash; unsigned long avpn; __be64 *hpte; unsigned long mask, val; unsigned long v, r, orig_v; /* Get page shift, work out hash and AVPN etc. */ mask = SLB_VSID_B | HPTE_V_AVPN | HPTE_V_SECONDARY; val = 0; pshift = 12; if (slb_v & SLB_VSID_L) { mask |= HPTE_V_LARGE; val |= HPTE_V_LARGE; pshift = slb_base_page_shift[(slb_v & SLB_VSID_LP) >> 4]; } if (slb_v & SLB_VSID_B_1T) { somask = (1UL << 40) - 1; vsid = (slb_v & ~SLB_VSID_B) >> SLB_VSID_SHIFT_1T; vsid ^= vsid << 25; } else { somask = (1UL << 28) - 1; vsid = (slb_v & ~SLB_VSID_B) >> SLB_VSID_SHIFT; } hash = (vsid ^ ((eaddr & somask) >> pshift)) & kvmppc_hpt_mask(&kvm->arch.hpt); avpn = slb_v & ~(somask >> 16); /* also includes B */ avpn |= (eaddr & somask) >> 16; if (pshift >= 24) avpn &= ~((1UL << (pshift - 16)) - 1); else avpn &= ~0x7fUL; val |= avpn; for (;;) { hpte = (__be64 *)(kvm->arch.hpt.virt + (hash << 7)); for (i = 0; i < 16; i += 2) { /* Read the PTE racily */ v = be64_to_cpu(hpte[i]) & ~HPTE_V_HVLOCK; if (cpu_has_feature(CPU_FTR_ARCH_300)) v = hpte_new_to_old_v(v, be64_to_cpu(hpte[i+1])); /* Check valid/absent, hash, segment size and AVPN */ if (!(v & valid) || (v & mask) != val) continue; /* Lock the PTE and read it under the lock */ while (!try_lock_hpte(&hpte[i], HPTE_V_HVLOCK)) cpu_relax(); v = orig_v = be64_to_cpu(hpte[i]) & ~HPTE_V_HVLOCK; r = be64_to_cpu(hpte[i+1]); if (cpu_has_feature(CPU_FTR_ARCH_300)) { v = hpte_new_to_old_v(v, r); r = hpte_new_to_old_r(r); } /* * Check the HPTE again, including base page size */ if ((v & valid) && (v & mask) == val && kvmppc_hpte_base_page_shift(v, r) == pshift) /* Return with the HPTE still locked */ return (hash << 3) + (i >> 1); __unlock_hpte(&hpte[i], orig_v); } if (val & HPTE_V_SECONDARY) break; val |= HPTE_V_SECONDARY; hash = hash ^ kvmppc_hpt_mask(&kvm->arch.hpt); } return -1; } EXPORT_SYMBOL(kvmppc_hv_find_lock_hpte); /* * Called in real mode to check whether an HPTE not found fault * is due to accessing a paged-out page or an emulated MMIO page, * or if a protection fault is due to accessing a page that the * guest wanted read/write access to but which we made read-only. * Returns a possibly modified status (DSISR) value if not * (i.e. pass the interrupt to the guest), * -1 to pass the fault up to host kernel mode code, -2 to do that * and also load the instruction word (for MMIO emulation), * or 0 if we should make the guest retry the access. */ long kvmppc_hpte_hv_fault(struct kvm_vcpu *vcpu, unsigned long addr, unsigned long slb_v, unsigned int status, bool data) { struct kvm *kvm = vcpu->kvm; long int index; unsigned long v, r, gr, orig_v; __be64 *hpte; unsigned long valid; struct revmap_entry *rev; unsigned long pp, key; struct mmio_hpte_cache_entry *cache_entry = NULL; long mmio_update = 0; /* For protection fault, expect to find a valid HPTE */ valid = HPTE_V_VALID; if (status & DSISR_NOHPTE) { valid |= HPTE_V_ABSENT; mmio_update = atomic64_read(&kvm->arch.mmio_update); cache_entry = mmio_cache_search(vcpu, addr, slb_v, mmio_update); } if (cache_entry) { index = cache_entry->pte_index; v = cache_entry->hpte_v; r = cache_entry->hpte_r; gr = cache_entry->rpte; } else { index = kvmppc_hv_find_lock_hpte(kvm, addr, slb_v, valid); if (index < 0) { if (status & DSISR_NOHPTE) return status; /* there really was no HPTE */ return 0; /* for prot fault, HPTE disappeared */ } hpte = (__be64 *)(kvm->arch.hpt.virt + (index << 4)); v = orig_v = be64_to_cpu(hpte[0]) & ~HPTE_V_HVLOCK; r = be64_to_cpu(hpte[1]); if (cpu_has_feature(CPU_FTR_ARCH_300)) { v = hpte_new_to_old_v(v, r); r = hpte_new_to_old_r(r); } rev = real_vmalloc_addr(&kvm->arch.hpt.rev[index]); gr = rev->guest_rpte; unlock_hpte(hpte, orig_v); } /* For not found, if the HPTE is valid by now, retry the instruction */ if ((status & DSISR_NOHPTE) && (v & HPTE_V_VALID)) return 0; /* Check access permissions to the page */ pp = gr & (HPTE_R_PP0 | HPTE_R_PP); key = (vcpu->arch.shregs.msr & MSR_PR) ? SLB_VSID_KP : SLB_VSID_KS; status &= ~DSISR_NOHPTE; /* DSISR_NOHPTE == SRR1_ISI_NOPT */ if (!data) { if (gr & (HPTE_R_N | HPTE_R_G)) return status | SRR1_ISI_N_G_OR_CIP; if (!hpte_read_permission(pp, slb_v & key)) return status | SRR1_ISI_PROT; } else if (status & DSISR_ISSTORE) { /* check write permission */ if (!hpte_write_permission(pp, slb_v & key)) return status | DSISR_PROTFAULT; } else { if (!hpte_read_permission(pp, slb_v & key)) return status | DSISR_PROTFAULT; } /* Check storage key, if applicable */ if (data && (vcpu->arch.shregs.msr & MSR_DR)) { unsigned int perm = hpte_get_skey_perm(gr, vcpu->arch.amr); if (status & DSISR_ISSTORE) perm >>= 1; if (perm & 1) return status | DSISR_KEYFAULT; } /* Save HPTE info for virtual-mode handler */ vcpu->arch.pgfault_addr = addr; vcpu->arch.pgfault_index = index; vcpu->arch.pgfault_hpte[0] = v; vcpu->arch.pgfault_hpte[1] = r; vcpu->arch.pgfault_cache = cache_entry; /* Check the storage key to see if it is possibly emulated MMIO */ if ((r & (HPTE_R_KEY_HI | HPTE_R_KEY_LO)) == (HPTE_R_KEY_HI | HPTE_R_KEY_LO)) { if (!cache_entry) { unsigned int pshift = 12; unsigned int pshift_index; if (slb_v & SLB_VSID_L) { pshift_index = ((slb_v & SLB_VSID_LP) >> 4); pshift = slb_base_page_shift[pshift_index]; } cache_entry = next_mmio_cache_entry(vcpu); cache_entry->eaddr = addr; cache_entry->slb_base_pshift = pshift; cache_entry->pte_index = index; cache_entry->hpte_v = v; cache_entry->hpte_r = r; cache_entry->rpte = gr; cache_entry->slb_v = slb_v; cache_entry->mmio_update = mmio_update; } if (data && (vcpu->arch.shregs.msr & MSR_IR)) return -2; /* MMIO emulation - load instr word */ } return -1; /* send fault up to host kernel mode */ } EXPORT_SYMBOL_GPL(kvmppc_hpte_hv_fault);
linux-master
arch/powerpc/kvm/book3s_hv_rm_mmu.c
// SPDX-License-Identifier: GPL-2.0-only /* * * Copyright SUSE Linux Products GmbH 2009 * * Authors: Alexander Graf <[email protected]> */ #include <linux/export.h> #include <asm/kvm_ppc.h> #include <asm/kvm_book3s.h> #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE EXPORT_SYMBOL_GPL(kvmppc_hv_entry_trampoline); #endif #ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE EXPORT_SYMBOL_GPL(kvmppc_entry_trampoline); #endif
linux-master
arch/powerpc/kvm/book3s_exports.c
// SPDX-License-Identifier: GPL-2.0-only /* * * Copyright SUSE Linux Products GmbH 2009 * * Authors: Alexander Graf <[email protected]> */ #include <asm/kvm_ppc.h> #include <asm/disassemble.h> #include <asm/kvm_book3s.h> #include <asm/reg.h> #include <asm/switch_to.h> #include <asm/time.h> #include <asm/tm.h> #include "book3s.h" #include <asm/asm-prototypes.h> #define OP_19_XOP_RFID 18 #define OP_19_XOP_RFI 50 #define OP_31_XOP_MFMSR 83 #define OP_31_XOP_MTMSR 146 #define OP_31_XOP_MTMSRD 178 #define OP_31_XOP_MTSR 210 #define OP_31_XOP_MTSRIN 242 #define OP_31_XOP_TLBIEL 274 /* Opcode is officially reserved, reuse it as sc 1 when sc 1 doesn't trap */ #define OP_31_XOP_FAKE_SC1 308 #define OP_31_XOP_SLBMTE 402 #define OP_31_XOP_SLBIE 434 #define OP_31_XOP_SLBIA 498 #define OP_31_XOP_MFSR 595 #define OP_31_XOP_MFSRIN 659 #define OP_31_XOP_DCBA 758 #define OP_31_XOP_SLBMFEV 851 #define OP_31_XOP_EIOIO 854 #define OP_31_XOP_SLBMFEE 915 #define OP_31_XOP_SLBFEE 979 #define OP_31_XOP_TBEGIN 654 #define OP_31_XOP_TABORT 910 #define OP_31_XOP_TRECLAIM 942 #define OP_31_XOP_TRCHKPT 1006 /* DCBZ is actually 1014, but we patch it to 1010 so we get a trap */ #define OP_31_XOP_DCBZ 1010 #define OP_LFS 48 #define OP_LFD 50 #define OP_STFS 52 #define OP_STFD 54 #define SPRN_GQR0 912 #define SPRN_GQR1 913 #define SPRN_GQR2 914 #define SPRN_GQR3 915 #define SPRN_GQR4 916 #define SPRN_GQR5 917 #define SPRN_GQR6 918 #define SPRN_GQR7 919 enum priv_level { PRIV_PROBLEM = 0, PRIV_SUPER = 1, PRIV_HYPER = 2, }; static bool spr_allowed(struct kvm_vcpu *vcpu, enum priv_level level) { /* PAPR VMs only access supervisor SPRs */ if (vcpu->arch.papr_enabled && (level > PRIV_SUPER)) return false; /* Limit user space to its own small SPR set */ if ((kvmppc_get_msr(vcpu) & MSR_PR) && level > PRIV_PROBLEM) return false; return true; } #ifdef CONFIG_PPC_TRANSACTIONAL_MEM static inline void kvmppc_copyto_vcpu_tm(struct kvm_vcpu *vcpu) { memcpy(&vcpu->arch.gpr_tm[0], &vcpu->arch.regs.gpr[0], sizeof(vcpu->arch.gpr_tm)); memcpy(&vcpu->arch.fp_tm, &vcpu->arch.fp, sizeof(struct thread_fp_state)); memcpy(&vcpu->arch.vr_tm, &vcpu->arch.vr, sizeof(struct thread_vr_state)); vcpu->arch.ppr_tm = vcpu->arch.ppr; vcpu->arch.dscr_tm = vcpu->arch.dscr; vcpu->arch.amr_tm = vcpu->arch.amr; vcpu->arch.ctr_tm = vcpu->arch.regs.ctr; vcpu->arch.tar_tm = vcpu->arch.tar; vcpu->arch.lr_tm = vcpu->arch.regs.link; vcpu->arch.cr_tm = vcpu->arch.regs.ccr; vcpu->arch.xer_tm = vcpu->arch.regs.xer; vcpu->arch.vrsave_tm = vcpu->arch.vrsave; } static inline void kvmppc_copyfrom_vcpu_tm(struct kvm_vcpu *vcpu) { memcpy(&vcpu->arch.regs.gpr[0], &vcpu->arch.gpr_tm[0], sizeof(vcpu->arch.regs.gpr)); memcpy(&vcpu->arch.fp, &vcpu->arch.fp_tm, sizeof(struct thread_fp_state)); memcpy(&vcpu->arch.vr, &vcpu->arch.vr_tm, sizeof(struct thread_vr_state)); vcpu->arch.ppr = vcpu->arch.ppr_tm; vcpu->arch.dscr = vcpu->arch.dscr_tm; vcpu->arch.amr = vcpu->arch.amr_tm; vcpu->arch.regs.ctr = vcpu->arch.ctr_tm; vcpu->arch.tar = vcpu->arch.tar_tm; vcpu->arch.regs.link = vcpu->arch.lr_tm; vcpu->arch.regs.ccr = vcpu->arch.cr_tm; vcpu->arch.regs.xer = vcpu->arch.xer_tm; vcpu->arch.vrsave = vcpu->arch.vrsave_tm; } static void kvmppc_emulate_treclaim(struct kvm_vcpu *vcpu, int ra_val) { unsigned long guest_msr = kvmppc_get_msr(vcpu); int fc_val = ra_val ? ra_val : 1; uint64_t texasr; /* CR0 = 0 | MSR[TS] | 0 */ vcpu->arch.regs.ccr = (vcpu->arch.regs.ccr & ~(CR0_MASK << CR0_SHIFT)) | (((guest_msr & MSR_TS_MASK) >> (MSR_TS_S_LG - 1)) << CR0_SHIFT); preempt_disable(); tm_enable(); texasr = mfspr(SPRN_TEXASR); kvmppc_save_tm_pr(vcpu); kvmppc_copyfrom_vcpu_tm(vcpu); /* failure recording depends on Failure Summary bit */ if (!(texasr & TEXASR_FS)) { texasr &= ~TEXASR_FC; texasr |= ((u64)fc_val << TEXASR_FC_LG) | TEXASR_FS; texasr &= ~(TEXASR_PR | TEXASR_HV); if (kvmppc_get_msr(vcpu) & MSR_PR) texasr |= TEXASR_PR; if (kvmppc_get_msr(vcpu) & MSR_HV) texasr |= TEXASR_HV; vcpu->arch.texasr = texasr; vcpu->arch.tfiar = kvmppc_get_pc(vcpu); mtspr(SPRN_TEXASR, texasr); mtspr(SPRN_TFIAR, vcpu->arch.tfiar); } tm_disable(); /* * treclaim need quit to non-transactional state. */ guest_msr &= ~(MSR_TS_MASK); kvmppc_set_msr(vcpu, guest_msr); preempt_enable(); if (vcpu->arch.shadow_fscr & FSCR_TAR) mtspr(SPRN_TAR, vcpu->arch.tar); } static void kvmppc_emulate_trchkpt(struct kvm_vcpu *vcpu) { unsigned long guest_msr = kvmppc_get_msr(vcpu); preempt_disable(); /* * need flush FP/VEC/VSX to vcpu save area before * copy. */ kvmppc_giveup_ext(vcpu, MSR_VSX); kvmppc_giveup_fac(vcpu, FSCR_TAR_LG); kvmppc_copyto_vcpu_tm(vcpu); kvmppc_save_tm_sprs(vcpu); /* * as a result of trecheckpoint. set TS to suspended. */ guest_msr &= ~(MSR_TS_MASK); guest_msr |= MSR_TS_S; kvmppc_set_msr(vcpu, guest_msr); kvmppc_restore_tm_pr(vcpu); preempt_enable(); } /* emulate tabort. at guest privilege state */ void kvmppc_emulate_tabort(struct kvm_vcpu *vcpu, int ra_val) { /* currently we only emulate tabort. but no emulation of other * tabort variants since there is no kernel usage of them at * present. */ unsigned long guest_msr = kvmppc_get_msr(vcpu); uint64_t org_texasr; preempt_disable(); tm_enable(); org_texasr = mfspr(SPRN_TEXASR); tm_abort(ra_val); /* CR0 = 0 | MSR[TS] | 0 */ vcpu->arch.regs.ccr = (vcpu->arch.regs.ccr & ~(CR0_MASK << CR0_SHIFT)) | (((guest_msr & MSR_TS_MASK) >> (MSR_TS_S_LG - 1)) << CR0_SHIFT); vcpu->arch.texasr = mfspr(SPRN_TEXASR); /* failure recording depends on Failure Summary bit, * and tabort will be treated as nops in non-transactional * state. */ if (!(org_texasr & TEXASR_FS) && MSR_TM_ACTIVE(guest_msr)) { vcpu->arch.texasr &= ~(TEXASR_PR | TEXASR_HV); if (guest_msr & MSR_PR) vcpu->arch.texasr |= TEXASR_PR; if (guest_msr & MSR_HV) vcpu->arch.texasr |= TEXASR_HV; vcpu->arch.tfiar = kvmppc_get_pc(vcpu); } tm_disable(); preempt_enable(); } #endif int kvmppc_core_emulate_op_pr(struct kvm_vcpu *vcpu, unsigned int inst, int *advance) { int emulated = EMULATE_DONE; int rt = get_rt(inst); int rs = get_rs(inst); int ra = get_ra(inst); int rb = get_rb(inst); u32 inst_sc = 0x44000002; switch (get_op(inst)) { case 0: emulated = EMULATE_FAIL; if ((kvmppc_get_msr(vcpu) & MSR_LE) && (inst == swab32(inst_sc))) { /* * This is the byte reversed syscall instruction of our * hypercall handler. Early versions of LE Linux didn't * swap the instructions correctly and ended up in * illegal instructions. * Just always fail hypercalls on these broken systems. */ kvmppc_set_gpr(vcpu, 3, EV_UNIMPLEMENTED); kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) + 4); emulated = EMULATE_DONE; } break; case 19: switch (get_xop(inst)) { case OP_19_XOP_RFID: case OP_19_XOP_RFI: { unsigned long srr1 = kvmppc_get_srr1(vcpu); #ifdef CONFIG_PPC_TRANSACTIONAL_MEM unsigned long cur_msr = kvmppc_get_msr(vcpu); /* * add rules to fit in ISA specification regarding TM * state transition in TM disable/Suspended state, * and target TM state is TM inactive(00) state. (the * change should be suppressed). */ if (((cur_msr & MSR_TM) == 0) && ((srr1 & MSR_TM) == 0) && MSR_TM_SUSPENDED(cur_msr) && !MSR_TM_ACTIVE(srr1)) srr1 |= MSR_TS_S; #endif kvmppc_set_pc(vcpu, kvmppc_get_srr0(vcpu)); kvmppc_set_msr(vcpu, srr1); *advance = 0; break; } default: emulated = EMULATE_FAIL; break; } break; case 31: switch (get_xop(inst)) { case OP_31_XOP_MFMSR: kvmppc_set_gpr(vcpu, rt, kvmppc_get_msr(vcpu)); break; case OP_31_XOP_MTMSRD: { ulong rs_val = kvmppc_get_gpr(vcpu, rs); if (inst & 0x10000) { ulong new_msr = kvmppc_get_msr(vcpu); new_msr &= ~(MSR_RI | MSR_EE); new_msr |= rs_val & (MSR_RI | MSR_EE); kvmppc_set_msr_fast(vcpu, new_msr); } else kvmppc_set_msr(vcpu, rs_val); break; } case OP_31_XOP_MTMSR: kvmppc_set_msr(vcpu, kvmppc_get_gpr(vcpu, rs)); break; case OP_31_XOP_MFSR: { int srnum; srnum = kvmppc_get_field(inst, 12 + 32, 15 + 32); if (vcpu->arch.mmu.mfsrin) { u32 sr; sr = vcpu->arch.mmu.mfsrin(vcpu, srnum); kvmppc_set_gpr(vcpu, rt, sr); } break; } case OP_31_XOP_MFSRIN: { int srnum; srnum = (kvmppc_get_gpr(vcpu, rb) >> 28) & 0xf; if (vcpu->arch.mmu.mfsrin) { u32 sr; sr = vcpu->arch.mmu.mfsrin(vcpu, srnum); kvmppc_set_gpr(vcpu, rt, sr); } break; } case OP_31_XOP_MTSR: vcpu->arch.mmu.mtsrin(vcpu, (inst >> 16) & 0xf, kvmppc_get_gpr(vcpu, rs)); break; case OP_31_XOP_MTSRIN: vcpu->arch.mmu.mtsrin(vcpu, (kvmppc_get_gpr(vcpu, rb) >> 28) & 0xf, kvmppc_get_gpr(vcpu, rs)); break; case OP_31_XOP_TLBIE: case OP_31_XOP_TLBIEL: { bool large = (inst & 0x00200000) ? true : false; ulong addr = kvmppc_get_gpr(vcpu, rb); vcpu->arch.mmu.tlbie(vcpu, addr, large); break; } #ifdef CONFIG_PPC_BOOK3S_64 case OP_31_XOP_FAKE_SC1: { /* SC 1 papr hypercalls */ ulong cmd = kvmppc_get_gpr(vcpu, 3); int i; if ((kvmppc_get_msr(vcpu) & MSR_PR) || !vcpu->arch.papr_enabled) { emulated = EMULATE_FAIL; break; } if (kvmppc_h_pr(vcpu, cmd) == EMULATE_DONE) break; vcpu->run->papr_hcall.nr = cmd; for (i = 0; i < 9; ++i) { ulong gpr = kvmppc_get_gpr(vcpu, 4 + i); vcpu->run->papr_hcall.args[i] = gpr; } vcpu->run->exit_reason = KVM_EXIT_PAPR_HCALL; vcpu->arch.hcall_needed = 1; emulated = EMULATE_EXIT_USER; break; } #endif case OP_31_XOP_EIOIO: break; case OP_31_XOP_SLBMTE: if (!vcpu->arch.mmu.slbmte) return EMULATE_FAIL; vcpu->arch.mmu.slbmte(vcpu, kvmppc_get_gpr(vcpu, rs), kvmppc_get_gpr(vcpu, rb)); break; case OP_31_XOP_SLBIE: if (!vcpu->arch.mmu.slbie) return EMULATE_FAIL; vcpu->arch.mmu.slbie(vcpu, kvmppc_get_gpr(vcpu, rb)); break; case OP_31_XOP_SLBIA: if (!vcpu->arch.mmu.slbia) return EMULATE_FAIL; vcpu->arch.mmu.slbia(vcpu); break; case OP_31_XOP_SLBFEE: if (!(inst & 1) || !vcpu->arch.mmu.slbfee) { return EMULATE_FAIL; } else { ulong b, t; ulong cr = kvmppc_get_cr(vcpu) & ~CR0_MASK; b = kvmppc_get_gpr(vcpu, rb); if (!vcpu->arch.mmu.slbfee(vcpu, b, &t)) cr |= 2 << CR0_SHIFT; kvmppc_set_gpr(vcpu, rt, t); /* copy XER[SO] bit to CR0[SO] */ cr |= (vcpu->arch.regs.xer & 0x80000000) >> (31 - CR0_SHIFT); kvmppc_set_cr(vcpu, cr); } break; case OP_31_XOP_SLBMFEE: if (!vcpu->arch.mmu.slbmfee) { emulated = EMULATE_FAIL; } else { ulong t, rb_val; rb_val = kvmppc_get_gpr(vcpu, rb); t = vcpu->arch.mmu.slbmfee(vcpu, rb_val); kvmppc_set_gpr(vcpu, rt, t); } break; case OP_31_XOP_SLBMFEV: if (!vcpu->arch.mmu.slbmfev) { emulated = EMULATE_FAIL; } else { ulong t, rb_val; rb_val = kvmppc_get_gpr(vcpu, rb); t = vcpu->arch.mmu.slbmfev(vcpu, rb_val); kvmppc_set_gpr(vcpu, rt, t); } break; case OP_31_XOP_DCBA: /* Gets treated as NOP */ break; case OP_31_XOP_DCBZ: { ulong rb_val = kvmppc_get_gpr(vcpu, rb); ulong ra_val = 0; ulong addr, vaddr; u32 zeros[8] = { 0, 0, 0, 0, 0, 0, 0, 0 }; u32 dsisr; int r; if (ra) ra_val = kvmppc_get_gpr(vcpu, ra); addr = (ra_val + rb_val) & ~31ULL; if (!(kvmppc_get_msr(vcpu) & MSR_SF)) addr &= 0xffffffff; vaddr = addr; r = kvmppc_st(vcpu, &addr, 32, zeros, true); if ((r == -ENOENT) || (r == -EPERM)) { *advance = 0; kvmppc_set_dar(vcpu, vaddr); vcpu->arch.fault_dar = vaddr; dsisr = DSISR_ISSTORE; if (r == -ENOENT) dsisr |= DSISR_NOHPTE; else if (r == -EPERM) dsisr |= DSISR_PROTFAULT; kvmppc_set_dsisr(vcpu, dsisr); vcpu->arch.fault_dsisr = dsisr; kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_DATA_STORAGE); } break; } #ifdef CONFIG_PPC_TRANSACTIONAL_MEM case OP_31_XOP_TBEGIN: { if (!cpu_has_feature(CPU_FTR_TM)) break; if (!(kvmppc_get_msr(vcpu) & MSR_TM)) { kvmppc_trigger_fac_interrupt(vcpu, FSCR_TM_LG); emulated = EMULATE_AGAIN; break; } if (!(kvmppc_get_msr(vcpu) & MSR_PR)) { preempt_disable(); vcpu->arch.regs.ccr = (CR0_TBEGIN_FAILURE | (vcpu->arch.regs.ccr & ~(CR0_MASK << CR0_SHIFT))); vcpu->arch.texasr = (TEXASR_FS | TEXASR_EXACT | (((u64)(TM_CAUSE_EMULATE | TM_CAUSE_PERSISTENT)) << TEXASR_FC_LG)); if ((inst >> 21) & 0x1) vcpu->arch.texasr |= TEXASR_ROT; if (kvmppc_get_msr(vcpu) & MSR_HV) vcpu->arch.texasr |= TEXASR_HV; vcpu->arch.tfhar = kvmppc_get_pc(vcpu) + 4; vcpu->arch.tfiar = kvmppc_get_pc(vcpu); kvmppc_restore_tm_sprs(vcpu); preempt_enable(); } else emulated = EMULATE_FAIL; break; } case OP_31_XOP_TABORT: { ulong guest_msr = kvmppc_get_msr(vcpu); unsigned long ra_val = 0; if (!cpu_has_feature(CPU_FTR_TM)) break; if (!(kvmppc_get_msr(vcpu) & MSR_TM)) { kvmppc_trigger_fac_interrupt(vcpu, FSCR_TM_LG); emulated = EMULATE_AGAIN; break; } /* only emulate for privilege guest, since problem state * guest can run with TM enabled and we don't expect to * trap at here for that case. */ WARN_ON(guest_msr & MSR_PR); if (ra) ra_val = kvmppc_get_gpr(vcpu, ra); kvmppc_emulate_tabort(vcpu, ra_val); break; } case OP_31_XOP_TRECLAIM: { ulong guest_msr = kvmppc_get_msr(vcpu); unsigned long ra_val = 0; if (!cpu_has_feature(CPU_FTR_TM)) break; if (!(kvmppc_get_msr(vcpu) & MSR_TM)) { kvmppc_trigger_fac_interrupt(vcpu, FSCR_TM_LG); emulated = EMULATE_AGAIN; break; } /* generate interrupts based on priorities */ if (guest_msr & MSR_PR) { /* Privileged Instruction type Program Interrupt */ kvmppc_core_queue_program(vcpu, SRR1_PROGPRIV); emulated = EMULATE_AGAIN; break; } if (!MSR_TM_ACTIVE(guest_msr)) { /* TM bad thing interrupt */ kvmppc_core_queue_program(vcpu, SRR1_PROGTM); emulated = EMULATE_AGAIN; break; } if (ra) ra_val = kvmppc_get_gpr(vcpu, ra); kvmppc_emulate_treclaim(vcpu, ra_val); break; } case OP_31_XOP_TRCHKPT: { ulong guest_msr = kvmppc_get_msr(vcpu); unsigned long texasr; if (!cpu_has_feature(CPU_FTR_TM)) break; if (!(kvmppc_get_msr(vcpu) & MSR_TM)) { kvmppc_trigger_fac_interrupt(vcpu, FSCR_TM_LG); emulated = EMULATE_AGAIN; break; } /* generate interrupt based on priorities */ if (guest_msr & MSR_PR) { /* Privileged Instruction type Program Intr */ kvmppc_core_queue_program(vcpu, SRR1_PROGPRIV); emulated = EMULATE_AGAIN; break; } tm_enable(); texasr = mfspr(SPRN_TEXASR); tm_disable(); if (MSR_TM_ACTIVE(guest_msr) || !(texasr & (TEXASR_FS))) { /* TM bad thing interrupt */ kvmppc_core_queue_program(vcpu, SRR1_PROGTM); emulated = EMULATE_AGAIN; break; } kvmppc_emulate_trchkpt(vcpu); break; } #endif default: emulated = EMULATE_FAIL; } break; default: emulated = EMULATE_FAIL; } if (emulated == EMULATE_FAIL) emulated = kvmppc_emulate_paired_single(vcpu); return emulated; } void kvmppc_set_bat(struct kvm_vcpu *vcpu, struct kvmppc_bat *bat, bool upper, u32 val) { if (upper) { /* Upper BAT */ u32 bl = (val >> 2) & 0x7ff; bat->bepi_mask = (~bl << 17); bat->bepi = val & 0xfffe0000; bat->vs = (val & 2) ? 1 : 0; bat->vp = (val & 1) ? 1 : 0; bat->raw = (bat->raw & 0xffffffff00000000ULL) | val; } else { /* Lower BAT */ bat->brpn = val & 0xfffe0000; bat->wimg = (val >> 3) & 0xf; bat->pp = val & 3; bat->raw = (bat->raw & 0x00000000ffffffffULL) | ((u64)val << 32); } } static struct kvmppc_bat *kvmppc_find_bat(struct kvm_vcpu *vcpu, int sprn) { struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu); struct kvmppc_bat *bat; switch (sprn) { case SPRN_IBAT0U ... SPRN_IBAT3L: bat = &vcpu_book3s->ibat[(sprn - SPRN_IBAT0U) / 2]; break; case SPRN_IBAT4U ... SPRN_IBAT7L: bat = &vcpu_book3s->ibat[4 + ((sprn - SPRN_IBAT4U) / 2)]; break; case SPRN_DBAT0U ... SPRN_DBAT3L: bat = &vcpu_book3s->dbat[(sprn - SPRN_DBAT0U) / 2]; break; case SPRN_DBAT4U ... SPRN_DBAT7L: bat = &vcpu_book3s->dbat[4 + ((sprn - SPRN_DBAT4U) / 2)]; break; default: BUG(); } return bat; } int kvmppc_core_emulate_mtspr_pr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val) { int emulated = EMULATE_DONE; switch (sprn) { case SPRN_SDR1: if (!spr_allowed(vcpu, PRIV_HYPER)) goto unprivileged; to_book3s(vcpu)->sdr1 = spr_val; break; case SPRN_DSISR: kvmppc_set_dsisr(vcpu, spr_val); break; case SPRN_DAR: kvmppc_set_dar(vcpu, spr_val); break; case SPRN_HIOR: to_book3s(vcpu)->hior = spr_val; break; case SPRN_IBAT0U ... SPRN_IBAT3L: case SPRN_IBAT4U ... SPRN_IBAT7L: case SPRN_DBAT0U ... SPRN_DBAT3L: case SPRN_DBAT4U ... SPRN_DBAT7L: { struct kvmppc_bat *bat = kvmppc_find_bat(vcpu, sprn); kvmppc_set_bat(vcpu, bat, !(sprn % 2), (u32)spr_val); /* BAT writes happen so rarely that we're ok to flush * everything here */ kvmppc_mmu_pte_flush(vcpu, 0, 0); kvmppc_mmu_flush_segments(vcpu); break; } case SPRN_HID0: to_book3s(vcpu)->hid[0] = spr_val; break; case SPRN_HID1: to_book3s(vcpu)->hid[1] = spr_val; break; case SPRN_HID2: to_book3s(vcpu)->hid[2] = spr_val; break; case SPRN_HID2_GEKKO: to_book3s(vcpu)->hid[2] = spr_val; /* HID2.PSE controls paired single on gekko */ switch (vcpu->arch.pvr) { case 0x00080200: /* lonestar 2.0 */ case 0x00088202: /* lonestar 2.2 */ case 0x70000100: /* gekko 1.0 */ case 0x00080100: /* gekko 2.0 */ case 0x00083203: /* gekko 2.3a */ case 0x00083213: /* gekko 2.3b */ case 0x00083204: /* gekko 2.4 */ case 0x00083214: /* gekko 2.4e (8SE) - retail HW2 */ case 0x00087200: /* broadway */ if (vcpu->arch.hflags & BOOK3S_HFLAG_NATIVE_PS) { /* Native paired singles */ } else if (spr_val & (1 << 29)) { /* HID2.PSE */ vcpu->arch.hflags |= BOOK3S_HFLAG_PAIRED_SINGLE; kvmppc_giveup_ext(vcpu, MSR_FP); } else { vcpu->arch.hflags &= ~BOOK3S_HFLAG_PAIRED_SINGLE; } break; } break; case SPRN_HID4: case SPRN_HID4_GEKKO: to_book3s(vcpu)->hid[4] = spr_val; break; case SPRN_HID5: to_book3s(vcpu)->hid[5] = spr_val; /* guest HID5 set can change is_dcbz32 */ if (vcpu->arch.mmu.is_dcbz32(vcpu) && (mfmsr() & MSR_HV)) vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32; break; case SPRN_GQR0: case SPRN_GQR1: case SPRN_GQR2: case SPRN_GQR3: case SPRN_GQR4: case SPRN_GQR5: case SPRN_GQR6: case SPRN_GQR7: to_book3s(vcpu)->gqr[sprn - SPRN_GQR0] = spr_val; break; #ifdef CONFIG_PPC_BOOK3S_64 case SPRN_FSCR: kvmppc_set_fscr(vcpu, spr_val); break; case SPRN_BESCR: vcpu->arch.bescr = spr_val; break; case SPRN_EBBHR: vcpu->arch.ebbhr = spr_val; break; case SPRN_EBBRR: vcpu->arch.ebbrr = spr_val; break; #ifdef CONFIG_PPC_TRANSACTIONAL_MEM case SPRN_TFHAR: case SPRN_TEXASR: case SPRN_TFIAR: if (!cpu_has_feature(CPU_FTR_TM)) break; if (!(kvmppc_get_msr(vcpu) & MSR_TM)) { kvmppc_trigger_fac_interrupt(vcpu, FSCR_TM_LG); emulated = EMULATE_AGAIN; break; } if (MSR_TM_ACTIVE(kvmppc_get_msr(vcpu)) && !((MSR_TM_SUSPENDED(kvmppc_get_msr(vcpu))) && (sprn == SPRN_TFHAR))) { /* it is illegal to mtspr() TM regs in * other than non-transactional state, with * the exception of TFHAR in suspend state. */ kvmppc_core_queue_program(vcpu, SRR1_PROGTM); emulated = EMULATE_AGAIN; break; } tm_enable(); if (sprn == SPRN_TFHAR) mtspr(SPRN_TFHAR, spr_val); else if (sprn == SPRN_TEXASR) mtspr(SPRN_TEXASR, spr_val); else mtspr(SPRN_TFIAR, spr_val); tm_disable(); break; #endif #endif case SPRN_ICTC: case SPRN_THRM1: case SPRN_THRM2: case SPRN_THRM3: case SPRN_CTRLF: case SPRN_CTRLT: case SPRN_L2CR: case SPRN_DSCR: case SPRN_MMCR0_GEKKO: case SPRN_MMCR1_GEKKO: case SPRN_PMC1_GEKKO: case SPRN_PMC2_GEKKO: case SPRN_PMC3_GEKKO: case SPRN_PMC4_GEKKO: case SPRN_WPAR_GEKKO: case SPRN_MSSSR0: case SPRN_DABR: #ifdef CONFIG_PPC_BOOK3S_64 case SPRN_MMCRS: case SPRN_MMCRA: case SPRN_MMCR0: case SPRN_MMCR1: case SPRN_MMCR2: case SPRN_UMMCR2: case SPRN_UAMOR: case SPRN_IAMR: case SPRN_AMR: #endif break; unprivileged: default: pr_info_ratelimited("KVM: invalid SPR write: %d\n", sprn); if (sprn & 0x10) { if (kvmppc_get_msr(vcpu) & MSR_PR) { kvmppc_core_queue_program(vcpu, SRR1_PROGPRIV); emulated = EMULATE_AGAIN; } } else { if ((kvmppc_get_msr(vcpu) & MSR_PR) || sprn == 0) { kvmppc_core_queue_program(vcpu, SRR1_PROGILL); emulated = EMULATE_AGAIN; } } break; } return emulated; } int kvmppc_core_emulate_mfspr_pr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val) { int emulated = EMULATE_DONE; switch (sprn) { case SPRN_IBAT0U ... SPRN_IBAT3L: case SPRN_IBAT4U ... SPRN_IBAT7L: case SPRN_DBAT0U ... SPRN_DBAT3L: case SPRN_DBAT4U ... SPRN_DBAT7L: { struct kvmppc_bat *bat = kvmppc_find_bat(vcpu, sprn); if (sprn % 2) *spr_val = bat->raw >> 32; else *spr_val = bat->raw; break; } case SPRN_SDR1: if (!spr_allowed(vcpu, PRIV_HYPER)) goto unprivileged; *spr_val = to_book3s(vcpu)->sdr1; break; case SPRN_DSISR: *spr_val = kvmppc_get_dsisr(vcpu); break; case SPRN_DAR: *spr_val = kvmppc_get_dar(vcpu); break; case SPRN_HIOR: *spr_val = to_book3s(vcpu)->hior; break; case SPRN_HID0: *spr_val = to_book3s(vcpu)->hid[0]; break; case SPRN_HID1: *spr_val = to_book3s(vcpu)->hid[1]; break; case SPRN_HID2: case SPRN_HID2_GEKKO: *spr_val = to_book3s(vcpu)->hid[2]; break; case SPRN_HID4: case SPRN_HID4_GEKKO: *spr_val = to_book3s(vcpu)->hid[4]; break; case SPRN_HID5: *spr_val = to_book3s(vcpu)->hid[5]; break; case SPRN_CFAR: case SPRN_DSCR: *spr_val = 0; break; case SPRN_PURR: /* * On exit we would have updated purr */ *spr_val = vcpu->arch.purr; break; case SPRN_SPURR: /* * On exit we would have updated spurr */ *spr_val = vcpu->arch.spurr; break; case SPRN_VTB: *spr_val = to_book3s(vcpu)->vtb; break; case SPRN_IC: *spr_val = vcpu->arch.ic; break; case SPRN_GQR0: case SPRN_GQR1: case SPRN_GQR2: case SPRN_GQR3: case SPRN_GQR4: case SPRN_GQR5: case SPRN_GQR6: case SPRN_GQR7: *spr_val = to_book3s(vcpu)->gqr[sprn - SPRN_GQR0]; break; #ifdef CONFIG_PPC_BOOK3S_64 case SPRN_FSCR: *spr_val = vcpu->arch.fscr; break; case SPRN_BESCR: *spr_val = vcpu->arch.bescr; break; case SPRN_EBBHR: *spr_val = vcpu->arch.ebbhr; break; case SPRN_EBBRR: *spr_val = vcpu->arch.ebbrr; break; #ifdef CONFIG_PPC_TRANSACTIONAL_MEM case SPRN_TFHAR: case SPRN_TEXASR: case SPRN_TFIAR: if (!cpu_has_feature(CPU_FTR_TM)) break; if (!(kvmppc_get_msr(vcpu) & MSR_TM)) { kvmppc_trigger_fac_interrupt(vcpu, FSCR_TM_LG); emulated = EMULATE_AGAIN; break; } tm_enable(); if (sprn == SPRN_TFHAR) *spr_val = mfspr(SPRN_TFHAR); else if (sprn == SPRN_TEXASR) *spr_val = mfspr(SPRN_TEXASR); else if (sprn == SPRN_TFIAR) *spr_val = mfspr(SPRN_TFIAR); tm_disable(); break; #endif #endif case SPRN_THRM1: case SPRN_THRM2: case SPRN_THRM3: case SPRN_CTRLF: case SPRN_CTRLT: case SPRN_L2CR: case SPRN_MMCR0_GEKKO: case SPRN_MMCR1_GEKKO: case SPRN_PMC1_GEKKO: case SPRN_PMC2_GEKKO: case SPRN_PMC3_GEKKO: case SPRN_PMC4_GEKKO: case SPRN_WPAR_GEKKO: case SPRN_MSSSR0: case SPRN_DABR: #ifdef CONFIG_PPC_BOOK3S_64 case SPRN_MMCRS: case SPRN_MMCRA: case SPRN_MMCR0: case SPRN_MMCR1: case SPRN_MMCR2: case SPRN_UMMCR2: case SPRN_TIR: case SPRN_UAMOR: case SPRN_IAMR: case SPRN_AMR: #endif *spr_val = 0; break; default: unprivileged: pr_info_ratelimited("KVM: invalid SPR read: %d\n", sprn); if (sprn & 0x10) { if (kvmppc_get_msr(vcpu) & MSR_PR) { kvmppc_core_queue_program(vcpu, SRR1_PROGPRIV); emulated = EMULATE_AGAIN; } } else { if ((kvmppc_get_msr(vcpu) & MSR_PR) || sprn == 0 || sprn == 4 || sprn == 5 || sprn == 6) { kvmppc_core_queue_program(vcpu, SRR1_PROGILL); emulated = EMULATE_AGAIN; } } break; } return emulated; } u32 kvmppc_alignment_dsisr(struct kvm_vcpu *vcpu, unsigned int inst) { return make_dsisr(inst); } ulong kvmppc_alignment_dar(struct kvm_vcpu *vcpu, unsigned int inst) { #ifdef CONFIG_PPC_BOOK3S_64 /* * Linux's fix_alignment() assumes that DAR is valid, so can we */ return vcpu->arch.fault_dar; #else ulong dar = 0; ulong ra = get_ra(inst); ulong rb = get_rb(inst); switch (get_op(inst)) { case OP_LFS: case OP_LFD: case OP_STFD: case OP_STFS: if (ra) dar = kvmppc_get_gpr(vcpu, ra); dar += (s32)((s16)inst); break; case 31: if (ra) dar = kvmppc_get_gpr(vcpu, ra); dar += kvmppc_get_gpr(vcpu, rb); break; default: printk(KERN_INFO "KVM: Unaligned instruction 0x%x\n", inst); break; } return dar; #endif }
linux-master
arch/powerpc/kvm/book3s_emulate.c
// SPDX-License-Identifier: GPL-2.0-only /* * * Copyright 2010 Paul Mackerras, IBM Corp. <[email protected]> */ #include <linux/types.h> #include <linux/string.h> #include <linux/kvm.h> #include <linux/kvm_host.h> #include <linux/highmem.h> #include <linux/gfp.h> #include <linux/slab.h> #include <linux/hugetlb.h> #include <linux/vmalloc.h> #include <linux/srcu.h> #include <linux/anon_inodes.h> #include <linux/file.h> #include <linux/debugfs.h> #include <asm/kvm_ppc.h> #include <asm/kvm_book3s.h> #include <asm/book3s/64/mmu-hash.h> #include <asm/hvcall.h> #include <asm/synch.h> #include <asm/ppc-opcode.h> #include <asm/cputable.h> #include <asm/pte-walk.h> #include "book3s.h" #include "trace_hv.h" //#define DEBUG_RESIZE_HPT 1 #ifdef DEBUG_RESIZE_HPT #define resize_hpt_debug(resize, ...) \ do { \ printk(KERN_DEBUG "RESIZE HPT %p: ", resize); \ printk(__VA_ARGS__); \ } while (0) #else #define resize_hpt_debug(resize, ...) \ do { } while (0) #endif static long kvmppc_virtmode_do_h_enter(struct kvm *kvm, unsigned long flags, long pte_index, unsigned long pteh, unsigned long ptel, unsigned long *pte_idx_ret); struct kvm_resize_hpt { /* These fields read-only after init */ struct kvm *kvm; struct work_struct work; u32 order; /* These fields protected by kvm->arch.mmu_setup_lock */ /* Possible values and their usage: * <0 an error occurred during allocation, * -EBUSY allocation is in the progress, * 0 allocation made successfully. */ int error; /* Private to the work thread, until error != -EBUSY, * then protected by kvm->arch.mmu_setup_lock. */ struct kvm_hpt_info hpt; }; int kvmppc_allocate_hpt(struct kvm_hpt_info *info, u32 order) { unsigned long hpt = 0; int cma = 0; struct page *page = NULL; struct revmap_entry *rev; unsigned long npte; if ((order < PPC_MIN_HPT_ORDER) || (order > PPC_MAX_HPT_ORDER)) return -EINVAL; page = kvm_alloc_hpt_cma(1ul << (order - PAGE_SHIFT)); if (page) { hpt = (unsigned long)pfn_to_kaddr(page_to_pfn(page)); memset((void *)hpt, 0, (1ul << order)); cma = 1; } if (!hpt) hpt = __get_free_pages(GFP_KERNEL|__GFP_ZERO|__GFP_RETRY_MAYFAIL |__GFP_NOWARN, order - PAGE_SHIFT); if (!hpt) return -ENOMEM; /* HPTEs are 2**4 bytes long */ npte = 1ul << (order - 4); /* Allocate reverse map array */ rev = vmalloc(array_size(npte, sizeof(struct revmap_entry))); if (!rev) { if (cma) kvm_free_hpt_cma(page, 1 << (order - PAGE_SHIFT)); else free_pages(hpt, order - PAGE_SHIFT); return -ENOMEM; } info->order = order; info->virt = hpt; info->cma = cma; info->rev = rev; return 0; } void kvmppc_set_hpt(struct kvm *kvm, struct kvm_hpt_info *info) { atomic64_set(&kvm->arch.mmio_update, 0); kvm->arch.hpt = *info; kvm->arch.sdr1 = __pa(info->virt) | (info->order - 18); pr_debug("KVM guest htab at %lx (order %ld), LPID %x\n", info->virt, (long)info->order, kvm->arch.lpid); } int kvmppc_alloc_reset_hpt(struct kvm *kvm, int order) { int err = -EBUSY; struct kvm_hpt_info info; mutex_lock(&kvm->arch.mmu_setup_lock); if (kvm->arch.mmu_ready) { kvm->arch.mmu_ready = 0; /* order mmu_ready vs. vcpus_running */ smp_mb(); if (atomic_read(&kvm->arch.vcpus_running)) { kvm->arch.mmu_ready = 1; goto out; } } if (kvm_is_radix(kvm)) { err = kvmppc_switch_mmu_to_hpt(kvm); if (err) goto out; } if (kvm->arch.hpt.order == order) { /* We already have a suitable HPT */ /* Set the entire HPT to 0, i.e. invalid HPTEs */ memset((void *)kvm->arch.hpt.virt, 0, 1ul << order); /* * Reset all the reverse-mapping chains for all memslots */ kvmppc_rmap_reset(kvm); err = 0; goto out; } if (kvm->arch.hpt.virt) { kvmppc_free_hpt(&kvm->arch.hpt); kvmppc_rmap_reset(kvm); } err = kvmppc_allocate_hpt(&info, order); if (err < 0) goto out; kvmppc_set_hpt(kvm, &info); out: if (err == 0) /* Ensure that each vcpu will flush its TLB on next entry. */ cpumask_setall(&kvm->arch.need_tlb_flush); mutex_unlock(&kvm->arch.mmu_setup_lock); return err; } void kvmppc_free_hpt(struct kvm_hpt_info *info) { vfree(info->rev); info->rev = NULL; if (info->cma) kvm_free_hpt_cma(virt_to_page((void *)info->virt), 1 << (info->order - PAGE_SHIFT)); else if (info->virt) free_pages(info->virt, info->order - PAGE_SHIFT); info->virt = 0; info->order = 0; } /* Bits in first HPTE dword for pagesize 4k, 64k or 16M */ static inline unsigned long hpte0_pgsize_encoding(unsigned long pgsize) { return (pgsize > 0x1000) ? HPTE_V_LARGE : 0; } /* Bits in second HPTE dword for pagesize 4k, 64k or 16M */ static inline unsigned long hpte1_pgsize_encoding(unsigned long pgsize) { return (pgsize == 0x10000) ? 0x1000 : 0; } void kvmppc_map_vrma(struct kvm_vcpu *vcpu, struct kvm_memory_slot *memslot, unsigned long porder) { unsigned long i; unsigned long npages; unsigned long hp_v, hp_r; unsigned long addr, hash; unsigned long psize; unsigned long hp0, hp1; unsigned long idx_ret; long ret; struct kvm *kvm = vcpu->kvm; psize = 1ul << porder; npages = memslot->npages >> (porder - PAGE_SHIFT); /* VRMA can't be > 1TB */ if (npages > 1ul << (40 - porder)) npages = 1ul << (40 - porder); /* Can't use more than 1 HPTE per HPTEG */ if (npages > kvmppc_hpt_mask(&kvm->arch.hpt) + 1) npages = kvmppc_hpt_mask(&kvm->arch.hpt) + 1; hp0 = HPTE_V_1TB_SEG | (VRMA_VSID << (40 - 16)) | HPTE_V_BOLTED | hpte0_pgsize_encoding(psize); hp1 = hpte1_pgsize_encoding(psize) | HPTE_R_R | HPTE_R_C | HPTE_R_M | PP_RWXX; for (i = 0; i < npages; ++i) { addr = i << porder; /* can't use hpt_hash since va > 64 bits */ hash = (i ^ (VRMA_VSID ^ (VRMA_VSID << 25))) & kvmppc_hpt_mask(&kvm->arch.hpt); /* * We assume that the hash table is empty and no * vcpus are using it at this stage. Since we create * at most one HPTE per HPTEG, we just assume entry 7 * is available and use it. */ hash = (hash << 3) + 7; hp_v = hp0 | ((addr >> 16) & ~0x7fUL); hp_r = hp1 | addr; ret = kvmppc_virtmode_do_h_enter(kvm, H_EXACT, hash, hp_v, hp_r, &idx_ret); if (ret != H_SUCCESS) { pr_err("KVM: map_vrma at %lx failed, ret=%ld\n", addr, ret); break; } } } int kvmppc_mmu_hv_init(void) { unsigned long nr_lpids; if (!mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE)) return -EINVAL; if (cpu_has_feature(CPU_FTR_HVMODE)) { if (WARN_ON(mfspr(SPRN_LPID) != 0)) return -EINVAL; nr_lpids = 1UL << mmu_lpid_bits; } else { nr_lpids = 1UL << KVM_MAX_NESTED_GUESTS_SHIFT; } if (!cpu_has_feature(CPU_FTR_ARCH_300)) { /* POWER7 has 10-bit LPIDs, POWER8 has 12-bit LPIDs */ if (cpu_has_feature(CPU_FTR_ARCH_207S)) WARN_ON(nr_lpids != 1UL << 12); else WARN_ON(nr_lpids != 1UL << 10); /* * Reserve the last implemented LPID use in partition * switching for POWER7 and POWER8. */ nr_lpids -= 1; } kvmppc_init_lpid(nr_lpids); return 0; } static long kvmppc_virtmode_do_h_enter(struct kvm *kvm, unsigned long flags, long pte_index, unsigned long pteh, unsigned long ptel, unsigned long *pte_idx_ret) { long ret; preempt_disable(); ret = kvmppc_do_h_enter(kvm, flags, pte_index, pteh, ptel, kvm->mm->pgd, false, pte_idx_ret); preempt_enable(); if (ret == H_TOO_HARD) { /* this can't happen */ pr_err("KVM: Oops, kvmppc_h_enter returned too hard!\n"); ret = H_RESOURCE; /* or something */ } return ret; } static struct kvmppc_slb *kvmppc_mmu_book3s_hv_find_slbe(struct kvm_vcpu *vcpu, gva_t eaddr) { u64 mask; int i; for (i = 0; i < vcpu->arch.slb_nr; i++) { if (!(vcpu->arch.slb[i].orige & SLB_ESID_V)) continue; if (vcpu->arch.slb[i].origv & SLB_VSID_B_1T) mask = ESID_MASK_1T; else mask = ESID_MASK; if (((vcpu->arch.slb[i].orige ^ eaddr) & mask) == 0) return &vcpu->arch.slb[i]; } return NULL; } static unsigned long kvmppc_mmu_get_real_addr(unsigned long v, unsigned long r, unsigned long ea) { unsigned long ra_mask; ra_mask = kvmppc_actual_pgsz(v, r) - 1; return (r & HPTE_R_RPN & ~ra_mask) | (ea & ra_mask); } static int kvmppc_mmu_book3s_64_hv_xlate(struct kvm_vcpu *vcpu, gva_t eaddr, struct kvmppc_pte *gpte, bool data, bool iswrite) { struct kvm *kvm = vcpu->kvm; struct kvmppc_slb *slbe; unsigned long slb_v; unsigned long pp, key; unsigned long v, orig_v, gr; __be64 *hptep; long int index; int virtmode = vcpu->arch.shregs.msr & (data ? MSR_DR : MSR_IR); if (kvm_is_radix(vcpu->kvm)) return kvmppc_mmu_radix_xlate(vcpu, eaddr, gpte, data, iswrite); /* Get SLB entry */ if (virtmode) { slbe = kvmppc_mmu_book3s_hv_find_slbe(vcpu, eaddr); if (!slbe) return -EINVAL; slb_v = slbe->origv; } else { /* real mode access */ slb_v = vcpu->kvm->arch.vrma_slb_v; } preempt_disable(); /* Find the HPTE in the hash table */ index = kvmppc_hv_find_lock_hpte(kvm, eaddr, slb_v, HPTE_V_VALID | HPTE_V_ABSENT); if (index < 0) { preempt_enable(); return -ENOENT; } hptep = (__be64 *)(kvm->arch.hpt.virt + (index << 4)); v = orig_v = be64_to_cpu(hptep[0]) & ~HPTE_V_HVLOCK; if (cpu_has_feature(CPU_FTR_ARCH_300)) v = hpte_new_to_old_v(v, be64_to_cpu(hptep[1])); gr = kvm->arch.hpt.rev[index].guest_rpte; unlock_hpte(hptep, orig_v); preempt_enable(); gpte->eaddr = eaddr; gpte->vpage = ((v & HPTE_V_AVPN) << 4) | ((eaddr >> 12) & 0xfff); /* Get PP bits and key for permission check */ pp = gr & (HPTE_R_PP0 | HPTE_R_PP); key = (vcpu->arch.shregs.msr & MSR_PR) ? SLB_VSID_KP : SLB_VSID_KS; key &= slb_v; /* Calculate permissions */ gpte->may_read = hpte_read_permission(pp, key); gpte->may_write = hpte_write_permission(pp, key); gpte->may_execute = gpte->may_read && !(gr & (HPTE_R_N | HPTE_R_G)); /* Storage key permission check for POWER7 */ if (data && virtmode) { int amrfield = hpte_get_skey_perm(gr, vcpu->arch.amr); if (amrfield & 1) gpte->may_read = 0; if (amrfield & 2) gpte->may_write = 0; } /* Get the guest physical address */ gpte->raddr = kvmppc_mmu_get_real_addr(v, gr, eaddr); return 0; } /* * Quick test for whether an instruction is a load or a store. * If the instruction is a load or a store, then this will indicate * which it is, at least on server processors. (Embedded processors * have some external PID instructions that don't follow the rule * embodied here.) If the instruction isn't a load or store, then * this doesn't return anything useful. */ static int instruction_is_store(ppc_inst_t instr) { unsigned int mask; unsigned int suffix; mask = 0x10000000; suffix = ppc_inst_val(instr); if (ppc_inst_prefixed(instr)) suffix = ppc_inst_suffix(instr); else if ((suffix & 0xfc000000) == 0x7c000000) mask = 0x100; /* major opcode 31 */ return (suffix & mask) != 0; } int kvmppc_hv_emulate_mmio(struct kvm_vcpu *vcpu, unsigned long gpa, gva_t ea, int is_store) { ppc_inst_t last_inst; bool is_prefixed = !!(kvmppc_get_msr(vcpu) & SRR1_PREFIXED); /* * Fast path - check if the guest physical address corresponds to a * device on the FAST_MMIO_BUS, if so we can avoid loading the * instruction all together, then we can just handle it and return. */ if (is_store) { int idx, ret; idx = srcu_read_lock(&vcpu->kvm->srcu); ret = kvm_io_bus_write(vcpu, KVM_FAST_MMIO_BUS, (gpa_t) gpa, 0, NULL); srcu_read_unlock(&vcpu->kvm->srcu, idx); if (!ret) { kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) + (is_prefixed ? 8 : 4)); return RESUME_GUEST; } } /* * If we fail, we just return to the guest and try executing it again. */ if (kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst) != EMULATE_DONE) return RESUME_GUEST; /* * WARNING: We do not know for sure whether the instruction we just * read from memory is the same that caused the fault in the first * place. * * If the fault is prefixed but the instruction is not or vice * versa, try again so that we don't advance pc the wrong amount. */ if (ppc_inst_prefixed(last_inst) != is_prefixed) return RESUME_GUEST; /* * If the instruction we read is neither an load or a store, * then it can't access memory, so we don't need to worry about * enforcing access permissions. So, assuming it is a load or * store, we just check that its direction (load or store) is * consistent with the original fault, since that's what we * checked the access permissions against. If there is a mismatch * we just return and retry the instruction. */ if (instruction_is_store(last_inst) != !!is_store) return RESUME_GUEST; /* * Emulated accesses are emulated by looking at the hash for * translation once, then performing the access later. The * translation could be invalidated in the meantime in which * point performing the subsequent memory access on the old * physical address could possibly be a security hole for the * guest (but not the host). * * This is less of an issue for MMIO stores since they aren't * globally visible. It could be an issue for MMIO loads to * a certain extent but we'll ignore it for now. */ vcpu->arch.paddr_accessed = gpa; vcpu->arch.vaddr_accessed = ea; return kvmppc_emulate_mmio(vcpu); } int kvmppc_book3s_hv_page_fault(struct kvm_vcpu *vcpu, unsigned long ea, unsigned long dsisr) { struct kvm *kvm = vcpu->kvm; unsigned long hpte[3], r; unsigned long hnow_v, hnow_r; __be64 *hptep; unsigned long mmu_seq, psize, pte_size; unsigned long gpa_base, gfn_base; unsigned long gpa, gfn, hva, pfn, hpa; struct kvm_memory_slot *memslot; unsigned long *rmap; struct revmap_entry *rev; struct page *page; long index, ret; bool is_ci; bool writing, write_ok; unsigned int shift; unsigned long rcbits; long mmio_update; pte_t pte, *ptep; if (kvm_is_radix(kvm)) return kvmppc_book3s_radix_page_fault(vcpu, ea, dsisr); /* * Real-mode code has already searched the HPT and found the * entry we're interested in. Lock the entry and check that * it hasn't changed. If it has, just return and re-execute the * instruction. */ if (ea != vcpu->arch.pgfault_addr) return RESUME_GUEST; if (vcpu->arch.pgfault_cache) { mmio_update = atomic64_read(&kvm->arch.mmio_update); if (mmio_update == vcpu->arch.pgfault_cache->mmio_update) { r = vcpu->arch.pgfault_cache->rpte; psize = kvmppc_actual_pgsz(vcpu->arch.pgfault_hpte[0], r); gpa_base = r & HPTE_R_RPN & ~(psize - 1); gfn_base = gpa_base >> PAGE_SHIFT; gpa = gpa_base | (ea & (psize - 1)); return kvmppc_hv_emulate_mmio(vcpu, gpa, ea, dsisr & DSISR_ISSTORE); } } index = vcpu->arch.pgfault_index; hptep = (__be64 *)(kvm->arch.hpt.virt + (index << 4)); rev = &kvm->arch.hpt.rev[index]; preempt_disable(); while (!try_lock_hpte(hptep, HPTE_V_HVLOCK)) cpu_relax(); hpte[0] = be64_to_cpu(hptep[0]) & ~HPTE_V_HVLOCK; hpte[1] = be64_to_cpu(hptep[1]); hpte[2] = r = rev->guest_rpte; unlock_hpte(hptep, hpte[0]); preempt_enable(); if (cpu_has_feature(CPU_FTR_ARCH_300)) { hpte[0] = hpte_new_to_old_v(hpte[0], hpte[1]); hpte[1] = hpte_new_to_old_r(hpte[1]); } if (hpte[0] != vcpu->arch.pgfault_hpte[0] || hpte[1] != vcpu->arch.pgfault_hpte[1]) return RESUME_GUEST; /* Translate the logical address and get the page */ psize = kvmppc_actual_pgsz(hpte[0], r); gpa_base = r & HPTE_R_RPN & ~(psize - 1); gfn_base = gpa_base >> PAGE_SHIFT; gpa = gpa_base | (ea & (psize - 1)); gfn = gpa >> PAGE_SHIFT; memslot = gfn_to_memslot(kvm, gfn); trace_kvm_page_fault_enter(vcpu, hpte, memslot, ea, dsisr); /* No memslot means it's an emulated MMIO region */ if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID)) return kvmppc_hv_emulate_mmio(vcpu, gpa, ea, dsisr & DSISR_ISSTORE); /* * This should never happen, because of the slot_is_aligned() * check in kvmppc_do_h_enter(). */ if (gfn_base < memslot->base_gfn) return -EFAULT; /* used to check for invalidations in progress */ mmu_seq = kvm->mmu_invalidate_seq; smp_rmb(); ret = -EFAULT; page = NULL; writing = (dsisr & DSISR_ISSTORE) != 0; /* If writing != 0, then the HPTE must allow writing, if we get here */ write_ok = writing; hva = gfn_to_hva_memslot(memslot, gfn); /* * Do a fast check first, since __gfn_to_pfn_memslot doesn't * do it with !atomic && !async, which is how we call it. * We always ask for write permission since the common case * is that the page is writable. */ if (get_user_page_fast_only(hva, FOLL_WRITE, &page)) { write_ok = true; } else { /* Call KVM generic code to do the slow-path check */ pfn = __gfn_to_pfn_memslot(memslot, gfn, false, false, NULL, writing, &write_ok, NULL); if (is_error_noslot_pfn(pfn)) return -EFAULT; page = NULL; if (pfn_valid(pfn)) { page = pfn_to_page(pfn); if (PageReserved(page)) page = NULL; } } /* * Read the PTE from the process' radix tree and use that * so we get the shift and attribute bits. */ spin_lock(&kvm->mmu_lock); ptep = find_kvm_host_pte(kvm, mmu_seq, hva, &shift); pte = __pte(0); if (ptep) pte = READ_ONCE(*ptep); spin_unlock(&kvm->mmu_lock); /* * If the PTE disappeared temporarily due to a THP * collapse, just return and let the guest try again. */ if (!pte_present(pte)) { if (page) put_page(page); return RESUME_GUEST; } hpa = pte_pfn(pte) << PAGE_SHIFT; pte_size = PAGE_SIZE; if (shift) pte_size = 1ul << shift; is_ci = pte_ci(pte); if (psize > pte_size) goto out_put; if (pte_size > psize) hpa |= hva & (pte_size - psize); /* Check WIMG vs. the actual page we're accessing */ if (!hpte_cache_flags_ok(r, is_ci)) { if (is_ci) goto out_put; /* * Allow guest to map emulated device memory as * uncacheable, but actually make it cacheable. */ r = (r & ~(HPTE_R_W|HPTE_R_I|HPTE_R_G)) | HPTE_R_M; } /* * Set the HPTE to point to hpa. * Since the hpa is at PAGE_SIZE granularity, make sure we * don't mask out lower-order bits if psize < PAGE_SIZE. */ if (psize < PAGE_SIZE) psize = PAGE_SIZE; r = (r & HPTE_R_KEY_HI) | (r & ~(HPTE_R_PP0 - psize)) | hpa; if (hpte_is_writable(r) && !write_ok) r = hpte_make_readonly(r); ret = RESUME_GUEST; preempt_disable(); while (!try_lock_hpte(hptep, HPTE_V_HVLOCK)) cpu_relax(); hnow_v = be64_to_cpu(hptep[0]); hnow_r = be64_to_cpu(hptep[1]); if (cpu_has_feature(CPU_FTR_ARCH_300)) { hnow_v = hpte_new_to_old_v(hnow_v, hnow_r); hnow_r = hpte_new_to_old_r(hnow_r); } /* * If the HPT is being resized, don't update the HPTE, * instead let the guest retry after the resize operation is complete. * The synchronization for mmu_ready test vs. set is provided * by the HPTE lock. */ if (!kvm->arch.mmu_ready) goto out_unlock; if ((hnow_v & ~HPTE_V_HVLOCK) != hpte[0] || hnow_r != hpte[1] || rev->guest_rpte != hpte[2]) /* HPTE has been changed under us; let the guest retry */ goto out_unlock; hpte[0] = (hpte[0] & ~HPTE_V_ABSENT) | HPTE_V_VALID; /* Always put the HPTE in the rmap chain for the page base address */ rmap = &memslot->arch.rmap[gfn_base - memslot->base_gfn]; lock_rmap(rmap); /* Check if we might have been invalidated; let the guest retry if so */ ret = RESUME_GUEST; if (mmu_invalidate_retry(vcpu->kvm, mmu_seq)) { unlock_rmap(rmap); goto out_unlock; } /* Only set R/C in real HPTE if set in both *rmap and guest_rpte */ rcbits = *rmap >> KVMPPC_RMAP_RC_SHIFT; r &= rcbits | ~(HPTE_R_R | HPTE_R_C); if (be64_to_cpu(hptep[0]) & HPTE_V_VALID) { /* HPTE was previously valid, so we need to invalidate it */ unlock_rmap(rmap); hptep[0] |= cpu_to_be64(HPTE_V_ABSENT); kvmppc_invalidate_hpte(kvm, hptep, index); /* don't lose previous R and C bits */ r |= be64_to_cpu(hptep[1]) & (HPTE_R_R | HPTE_R_C); } else { kvmppc_add_revmap_chain(kvm, rev, rmap, index, 0); } if (cpu_has_feature(CPU_FTR_ARCH_300)) { r = hpte_old_to_new_r(hpte[0], r); hpte[0] = hpte_old_to_new_v(hpte[0]); } hptep[1] = cpu_to_be64(r); eieio(); __unlock_hpte(hptep, hpte[0]); asm volatile("ptesync" : : : "memory"); preempt_enable(); if (page && hpte_is_writable(r)) set_page_dirty_lock(page); out_put: trace_kvm_page_fault_exit(vcpu, hpte, ret); if (page) put_page(page); return ret; out_unlock: __unlock_hpte(hptep, be64_to_cpu(hptep[0])); preempt_enable(); goto out_put; } void kvmppc_rmap_reset(struct kvm *kvm) { struct kvm_memslots *slots; struct kvm_memory_slot *memslot; int srcu_idx, bkt; srcu_idx = srcu_read_lock(&kvm->srcu); slots = kvm_memslots(kvm); kvm_for_each_memslot(memslot, bkt, slots) { /* Mutual exclusion with kvm_unmap_hva_range etc. */ spin_lock(&kvm->mmu_lock); /* * This assumes it is acceptable to lose reference and * change bits across a reset. */ memset(memslot->arch.rmap, 0, memslot->npages * sizeof(*memslot->arch.rmap)); spin_unlock(&kvm->mmu_lock); } srcu_read_unlock(&kvm->srcu, srcu_idx); } /* Must be called with both HPTE and rmap locked */ static void kvmppc_unmap_hpte(struct kvm *kvm, unsigned long i, struct kvm_memory_slot *memslot, unsigned long *rmapp, unsigned long gfn) { __be64 *hptep = (__be64 *) (kvm->arch.hpt.virt + (i << 4)); struct revmap_entry *rev = kvm->arch.hpt.rev; unsigned long j, h; unsigned long ptel, psize, rcbits; j = rev[i].forw; if (j == i) { /* chain is now empty */ *rmapp &= ~(KVMPPC_RMAP_PRESENT | KVMPPC_RMAP_INDEX); } else { /* remove i from chain */ h = rev[i].back; rev[h].forw = j; rev[j].back = h; rev[i].forw = rev[i].back = i; *rmapp = (*rmapp & ~KVMPPC_RMAP_INDEX) | j; } /* Now check and modify the HPTE */ ptel = rev[i].guest_rpte; psize = kvmppc_actual_pgsz(be64_to_cpu(hptep[0]), ptel); if ((be64_to_cpu(hptep[0]) & HPTE_V_VALID) && hpte_rpn(ptel, psize) == gfn) { hptep[0] |= cpu_to_be64(HPTE_V_ABSENT); kvmppc_invalidate_hpte(kvm, hptep, i); hptep[1] &= ~cpu_to_be64(HPTE_R_KEY_HI | HPTE_R_KEY_LO); /* Harvest R and C */ rcbits = be64_to_cpu(hptep[1]) & (HPTE_R_R | HPTE_R_C); *rmapp |= rcbits << KVMPPC_RMAP_RC_SHIFT; if ((rcbits & HPTE_R_C) && memslot->dirty_bitmap) kvmppc_update_dirty_map(memslot, gfn, psize); if (rcbits & ~rev[i].guest_rpte) { rev[i].guest_rpte = ptel | rcbits; note_hpte_modification(kvm, &rev[i]); } } } static void kvm_unmap_rmapp(struct kvm *kvm, struct kvm_memory_slot *memslot, unsigned long gfn) { unsigned long i; __be64 *hptep; unsigned long *rmapp; rmapp = &memslot->arch.rmap[gfn - memslot->base_gfn]; for (;;) { lock_rmap(rmapp); if (!(*rmapp & KVMPPC_RMAP_PRESENT)) { unlock_rmap(rmapp); break; } /* * To avoid an ABBA deadlock with the HPTE lock bit, * we can't spin on the HPTE lock while holding the * rmap chain lock. */ i = *rmapp & KVMPPC_RMAP_INDEX; hptep = (__be64 *) (kvm->arch.hpt.virt + (i << 4)); if (!try_lock_hpte(hptep, HPTE_V_HVLOCK)) { /* unlock rmap before spinning on the HPTE lock */ unlock_rmap(rmapp); while (be64_to_cpu(hptep[0]) & HPTE_V_HVLOCK) cpu_relax(); continue; } kvmppc_unmap_hpte(kvm, i, memslot, rmapp, gfn); unlock_rmap(rmapp); __unlock_hpte(hptep, be64_to_cpu(hptep[0])); } } bool kvm_unmap_gfn_range_hv(struct kvm *kvm, struct kvm_gfn_range *range) { gfn_t gfn; if (kvm_is_radix(kvm)) { for (gfn = range->start; gfn < range->end; gfn++) kvm_unmap_radix(kvm, range->slot, gfn); } else { for (gfn = range->start; gfn < range->end; gfn++) kvm_unmap_rmapp(kvm, range->slot, gfn); } return false; } void kvmppc_core_flush_memslot_hv(struct kvm *kvm, struct kvm_memory_slot *memslot) { unsigned long gfn; unsigned long n; unsigned long *rmapp; gfn = memslot->base_gfn; rmapp = memslot->arch.rmap; if (kvm_is_radix(kvm)) { kvmppc_radix_flush_memslot(kvm, memslot); return; } for (n = memslot->npages; n; --n, ++gfn) { /* * Testing the present bit without locking is OK because * the memslot has been marked invalid already, and hence * no new HPTEs referencing this page can be created, * thus the present bit can't go from 0 to 1. */ if (*rmapp & KVMPPC_RMAP_PRESENT) kvm_unmap_rmapp(kvm, memslot, gfn); ++rmapp; } } static bool kvm_age_rmapp(struct kvm *kvm, struct kvm_memory_slot *memslot, unsigned long gfn) { struct revmap_entry *rev = kvm->arch.hpt.rev; unsigned long head, i, j; __be64 *hptep; bool ret = false; unsigned long *rmapp; rmapp = &memslot->arch.rmap[gfn - memslot->base_gfn]; retry: lock_rmap(rmapp); if (*rmapp & KVMPPC_RMAP_REFERENCED) { *rmapp &= ~KVMPPC_RMAP_REFERENCED; ret = true; } if (!(*rmapp & KVMPPC_RMAP_PRESENT)) { unlock_rmap(rmapp); return ret; } i = head = *rmapp & KVMPPC_RMAP_INDEX; do { hptep = (__be64 *) (kvm->arch.hpt.virt + (i << 4)); j = rev[i].forw; /* If this HPTE isn't referenced, ignore it */ if (!(be64_to_cpu(hptep[1]) & HPTE_R_R)) continue; if (!try_lock_hpte(hptep, HPTE_V_HVLOCK)) { /* unlock rmap before spinning on the HPTE lock */ unlock_rmap(rmapp); while (be64_to_cpu(hptep[0]) & HPTE_V_HVLOCK) cpu_relax(); goto retry; } /* Now check and modify the HPTE */ if ((be64_to_cpu(hptep[0]) & HPTE_V_VALID) && (be64_to_cpu(hptep[1]) & HPTE_R_R)) { kvmppc_clear_ref_hpte(kvm, hptep, i); if (!(rev[i].guest_rpte & HPTE_R_R)) { rev[i].guest_rpte |= HPTE_R_R; note_hpte_modification(kvm, &rev[i]); } ret = true; } __unlock_hpte(hptep, be64_to_cpu(hptep[0])); } while ((i = j) != head); unlock_rmap(rmapp); return ret; } bool kvm_age_gfn_hv(struct kvm *kvm, struct kvm_gfn_range *range) { gfn_t gfn; bool ret = false; if (kvm_is_radix(kvm)) { for (gfn = range->start; gfn < range->end; gfn++) ret |= kvm_age_radix(kvm, range->slot, gfn); } else { for (gfn = range->start; gfn < range->end; gfn++) ret |= kvm_age_rmapp(kvm, range->slot, gfn); } return ret; } static bool kvm_test_age_rmapp(struct kvm *kvm, struct kvm_memory_slot *memslot, unsigned long gfn) { struct revmap_entry *rev = kvm->arch.hpt.rev; unsigned long head, i, j; unsigned long *hp; bool ret = true; unsigned long *rmapp; rmapp = &memslot->arch.rmap[gfn - memslot->base_gfn]; if (*rmapp & KVMPPC_RMAP_REFERENCED) return true; lock_rmap(rmapp); if (*rmapp & KVMPPC_RMAP_REFERENCED) goto out; if (*rmapp & KVMPPC_RMAP_PRESENT) { i = head = *rmapp & KVMPPC_RMAP_INDEX; do { hp = (unsigned long *)(kvm->arch.hpt.virt + (i << 4)); j = rev[i].forw; if (be64_to_cpu(hp[1]) & HPTE_R_R) goto out; } while ((i = j) != head); } ret = false; out: unlock_rmap(rmapp); return ret; } bool kvm_test_age_gfn_hv(struct kvm *kvm, struct kvm_gfn_range *range) { WARN_ON(range->start + 1 != range->end); if (kvm_is_radix(kvm)) return kvm_test_age_radix(kvm, range->slot, range->start); else return kvm_test_age_rmapp(kvm, range->slot, range->start); } bool kvm_set_spte_gfn_hv(struct kvm *kvm, struct kvm_gfn_range *range) { WARN_ON(range->start + 1 != range->end); if (kvm_is_radix(kvm)) kvm_unmap_radix(kvm, range->slot, range->start); else kvm_unmap_rmapp(kvm, range->slot, range->start); return false; } static int vcpus_running(struct kvm *kvm) { return atomic_read(&kvm->arch.vcpus_running) != 0; } /* * Returns the number of system pages that are dirty. * This can be more than 1 if we find a huge-page HPTE. */ static int kvm_test_clear_dirty_npages(struct kvm *kvm, unsigned long *rmapp) { struct revmap_entry *rev = kvm->arch.hpt.rev; unsigned long head, i, j; unsigned long n; unsigned long v, r; __be64 *hptep; int npages_dirty = 0; retry: lock_rmap(rmapp); if (!(*rmapp & KVMPPC_RMAP_PRESENT)) { unlock_rmap(rmapp); return npages_dirty; } i = head = *rmapp & KVMPPC_RMAP_INDEX; do { unsigned long hptep1; hptep = (__be64 *) (kvm->arch.hpt.virt + (i << 4)); j = rev[i].forw; /* * Checking the C (changed) bit here is racy since there * is no guarantee about when the hardware writes it back. * If the HPTE is not writable then it is stable since the * page can't be written to, and we would have done a tlbie * (which forces the hardware to complete any writeback) * when making the HPTE read-only. * If vcpus are running then this call is racy anyway * since the page could get dirtied subsequently, so we * expect there to be a further call which would pick up * any delayed C bit writeback. * Otherwise we need to do the tlbie even if C==0 in * order to pick up any delayed writeback of C. */ hptep1 = be64_to_cpu(hptep[1]); if (!(hptep1 & HPTE_R_C) && (!hpte_is_writable(hptep1) || vcpus_running(kvm))) continue; if (!try_lock_hpte(hptep, HPTE_V_HVLOCK)) { /* unlock rmap before spinning on the HPTE lock */ unlock_rmap(rmapp); while (hptep[0] & cpu_to_be64(HPTE_V_HVLOCK)) cpu_relax(); goto retry; } /* Now check and modify the HPTE */ if (!(hptep[0] & cpu_to_be64(HPTE_V_VALID))) { __unlock_hpte(hptep, be64_to_cpu(hptep[0])); continue; } /* need to make it temporarily absent so C is stable */ hptep[0] |= cpu_to_be64(HPTE_V_ABSENT); kvmppc_invalidate_hpte(kvm, hptep, i); v = be64_to_cpu(hptep[0]); r = be64_to_cpu(hptep[1]); if (r & HPTE_R_C) { hptep[1] = cpu_to_be64(r & ~HPTE_R_C); if (!(rev[i].guest_rpte & HPTE_R_C)) { rev[i].guest_rpte |= HPTE_R_C; note_hpte_modification(kvm, &rev[i]); } n = kvmppc_actual_pgsz(v, r); n = (n + PAGE_SIZE - 1) >> PAGE_SHIFT; if (n > npages_dirty) npages_dirty = n; eieio(); } v &= ~HPTE_V_ABSENT; v |= HPTE_V_VALID; __unlock_hpte(hptep, v); } while ((i = j) != head); unlock_rmap(rmapp); return npages_dirty; } void kvmppc_harvest_vpa_dirty(struct kvmppc_vpa *vpa, struct kvm_memory_slot *memslot, unsigned long *map) { unsigned long gfn; if (!vpa->dirty || !vpa->pinned_addr) return; gfn = vpa->gpa >> PAGE_SHIFT; if (gfn < memslot->base_gfn || gfn >= memslot->base_gfn + memslot->npages) return; vpa->dirty = false; if (map) __set_bit_le(gfn - memslot->base_gfn, map); } long kvmppc_hv_get_dirty_log_hpt(struct kvm *kvm, struct kvm_memory_slot *memslot, unsigned long *map) { unsigned long i; unsigned long *rmapp; preempt_disable(); rmapp = memslot->arch.rmap; for (i = 0; i < memslot->npages; ++i) { int npages = kvm_test_clear_dirty_npages(kvm, rmapp); /* * Note that if npages > 0 then i must be a multiple of npages, * since we always put huge-page HPTEs in the rmap chain * corresponding to their page base address. */ if (npages) set_dirty_bits(map, i, npages); ++rmapp; } preempt_enable(); return 0; } void *kvmppc_pin_guest_page(struct kvm *kvm, unsigned long gpa, unsigned long *nb_ret) { struct kvm_memory_slot *memslot; unsigned long gfn = gpa >> PAGE_SHIFT; struct page *page, *pages[1]; int npages; unsigned long hva, offset; int srcu_idx; srcu_idx = srcu_read_lock(&kvm->srcu); memslot = gfn_to_memslot(kvm, gfn); if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID)) goto err; hva = gfn_to_hva_memslot(memslot, gfn); npages = get_user_pages_fast(hva, 1, FOLL_WRITE, pages); if (npages < 1) goto err; page = pages[0]; srcu_read_unlock(&kvm->srcu, srcu_idx); offset = gpa & (PAGE_SIZE - 1); if (nb_ret) *nb_ret = PAGE_SIZE - offset; return page_address(page) + offset; err: srcu_read_unlock(&kvm->srcu, srcu_idx); return NULL; } void kvmppc_unpin_guest_page(struct kvm *kvm, void *va, unsigned long gpa, bool dirty) { struct page *page = virt_to_page(va); struct kvm_memory_slot *memslot; unsigned long gfn; int srcu_idx; put_page(page); if (!dirty) return; /* We need to mark this page dirty in the memslot dirty_bitmap, if any */ gfn = gpa >> PAGE_SHIFT; srcu_idx = srcu_read_lock(&kvm->srcu); memslot = gfn_to_memslot(kvm, gfn); if (memslot && memslot->dirty_bitmap) set_bit_le(gfn - memslot->base_gfn, memslot->dirty_bitmap); srcu_read_unlock(&kvm->srcu, srcu_idx); } /* * HPT resizing */ static int resize_hpt_allocate(struct kvm_resize_hpt *resize) { int rc; rc = kvmppc_allocate_hpt(&resize->hpt, resize->order); if (rc < 0) return rc; resize_hpt_debug(resize, "%s(): HPT @ 0x%lx\n", __func__, resize->hpt.virt); return 0; } static unsigned long resize_hpt_rehash_hpte(struct kvm_resize_hpt *resize, unsigned long idx) { struct kvm *kvm = resize->kvm; struct kvm_hpt_info *old = &kvm->arch.hpt; struct kvm_hpt_info *new = &resize->hpt; unsigned long old_hash_mask = (1ULL << (old->order - 7)) - 1; unsigned long new_hash_mask = (1ULL << (new->order - 7)) - 1; __be64 *hptep, *new_hptep; unsigned long vpte, rpte, guest_rpte; int ret; struct revmap_entry *rev; unsigned long apsize, avpn, pteg, hash; unsigned long new_idx, new_pteg, replace_vpte; int pshift; hptep = (__be64 *)(old->virt + (idx << 4)); /* Guest is stopped, so new HPTEs can't be added or faulted * in, only unmapped or altered by host actions. So, it's * safe to check this before we take the HPTE lock */ vpte = be64_to_cpu(hptep[0]); if (!(vpte & HPTE_V_VALID) && !(vpte & HPTE_V_ABSENT)) return 0; /* nothing to do */ while (!try_lock_hpte(hptep, HPTE_V_HVLOCK)) cpu_relax(); vpte = be64_to_cpu(hptep[0]); ret = 0; if (!(vpte & HPTE_V_VALID) && !(vpte & HPTE_V_ABSENT)) /* Nothing to do */ goto out; if (cpu_has_feature(CPU_FTR_ARCH_300)) { rpte = be64_to_cpu(hptep[1]); vpte = hpte_new_to_old_v(vpte, rpte); } /* Unmap */ rev = &old->rev[idx]; guest_rpte = rev->guest_rpte; ret = -EIO; apsize = kvmppc_actual_pgsz(vpte, guest_rpte); if (!apsize) goto out; if (vpte & HPTE_V_VALID) { unsigned long gfn = hpte_rpn(guest_rpte, apsize); int srcu_idx = srcu_read_lock(&kvm->srcu); struct kvm_memory_slot *memslot = __gfn_to_memslot(kvm_memslots(kvm), gfn); if (memslot) { unsigned long *rmapp; rmapp = &memslot->arch.rmap[gfn - memslot->base_gfn]; lock_rmap(rmapp); kvmppc_unmap_hpte(kvm, idx, memslot, rmapp, gfn); unlock_rmap(rmapp); } srcu_read_unlock(&kvm->srcu, srcu_idx); } /* Reload PTE after unmap */ vpte = be64_to_cpu(hptep[0]); BUG_ON(vpte & HPTE_V_VALID); BUG_ON(!(vpte & HPTE_V_ABSENT)); ret = 0; if (!(vpte & HPTE_V_BOLTED)) goto out; rpte = be64_to_cpu(hptep[1]); if (cpu_has_feature(CPU_FTR_ARCH_300)) { vpte = hpte_new_to_old_v(vpte, rpte); rpte = hpte_new_to_old_r(rpte); } pshift = kvmppc_hpte_base_page_shift(vpte, rpte); avpn = HPTE_V_AVPN_VAL(vpte) & ~(((1ul << pshift) - 1) >> 23); pteg = idx / HPTES_PER_GROUP; if (vpte & HPTE_V_SECONDARY) pteg = ~pteg; if (!(vpte & HPTE_V_1TB_SEG)) { unsigned long offset, vsid; /* We only have 28 - 23 bits of offset in avpn */ offset = (avpn & 0x1f) << 23; vsid = avpn >> 5; /* We can find more bits from the pteg value */ if (pshift < 23) offset |= ((vsid ^ pteg) & old_hash_mask) << pshift; hash = vsid ^ (offset >> pshift); } else { unsigned long offset, vsid; /* We only have 40 - 23 bits of seg_off in avpn */ offset = (avpn & 0x1ffff) << 23; vsid = avpn >> 17; if (pshift < 23) offset |= ((vsid ^ (vsid << 25) ^ pteg) & old_hash_mask) << pshift; hash = vsid ^ (vsid << 25) ^ (offset >> pshift); } new_pteg = hash & new_hash_mask; if (vpte & HPTE_V_SECONDARY) new_pteg = ~hash & new_hash_mask; new_idx = new_pteg * HPTES_PER_GROUP + (idx % HPTES_PER_GROUP); new_hptep = (__be64 *)(new->virt + (new_idx << 4)); replace_vpte = be64_to_cpu(new_hptep[0]); if (cpu_has_feature(CPU_FTR_ARCH_300)) { unsigned long replace_rpte = be64_to_cpu(new_hptep[1]); replace_vpte = hpte_new_to_old_v(replace_vpte, replace_rpte); } if (replace_vpte & (HPTE_V_VALID | HPTE_V_ABSENT)) { BUG_ON(new->order >= old->order); if (replace_vpte & HPTE_V_BOLTED) { if (vpte & HPTE_V_BOLTED) /* Bolted collision, nothing we can do */ ret = -ENOSPC; /* Discard the new HPTE */ goto out; } /* Discard the previous HPTE */ } if (cpu_has_feature(CPU_FTR_ARCH_300)) { rpte = hpte_old_to_new_r(vpte, rpte); vpte = hpte_old_to_new_v(vpte); } new_hptep[1] = cpu_to_be64(rpte); new->rev[new_idx].guest_rpte = guest_rpte; /* No need for a barrier, since new HPT isn't active */ new_hptep[0] = cpu_to_be64(vpte); unlock_hpte(new_hptep, vpte); out: unlock_hpte(hptep, vpte); return ret; } static int resize_hpt_rehash(struct kvm_resize_hpt *resize) { struct kvm *kvm = resize->kvm; unsigned long i; int rc; for (i = 0; i < kvmppc_hpt_npte(&kvm->arch.hpt); i++) { rc = resize_hpt_rehash_hpte(resize, i); if (rc != 0) return rc; } return 0; } static void resize_hpt_pivot(struct kvm_resize_hpt *resize) { struct kvm *kvm = resize->kvm; struct kvm_hpt_info hpt_tmp; /* Exchange the pending tables in the resize structure with * the active tables */ resize_hpt_debug(resize, "resize_hpt_pivot()\n"); spin_lock(&kvm->mmu_lock); asm volatile("ptesync" : : : "memory"); hpt_tmp = kvm->arch.hpt; kvmppc_set_hpt(kvm, &resize->hpt); resize->hpt = hpt_tmp; spin_unlock(&kvm->mmu_lock); synchronize_srcu_expedited(&kvm->srcu); if (cpu_has_feature(CPU_FTR_ARCH_300)) kvmppc_setup_partition_table(kvm); resize_hpt_debug(resize, "resize_hpt_pivot() done\n"); } static void resize_hpt_release(struct kvm *kvm, struct kvm_resize_hpt *resize) { if (WARN_ON(!mutex_is_locked(&kvm->arch.mmu_setup_lock))) return; if (!resize) return; if (resize->error != -EBUSY) { if (resize->hpt.virt) kvmppc_free_hpt(&resize->hpt); kfree(resize); } if (kvm->arch.resize_hpt == resize) kvm->arch.resize_hpt = NULL; } static void resize_hpt_prepare_work(struct work_struct *work) { struct kvm_resize_hpt *resize = container_of(work, struct kvm_resize_hpt, work); struct kvm *kvm = resize->kvm; int err = 0; if (WARN_ON(resize->error != -EBUSY)) return; mutex_lock(&kvm->arch.mmu_setup_lock); /* Request is still current? */ if (kvm->arch.resize_hpt == resize) { /* We may request large allocations here: * do not sleep with kvm->arch.mmu_setup_lock held for a while. */ mutex_unlock(&kvm->arch.mmu_setup_lock); resize_hpt_debug(resize, "%s(): order = %d\n", __func__, resize->order); err = resize_hpt_allocate(resize); /* We have strict assumption about -EBUSY * when preparing for HPT resize. */ if (WARN_ON(err == -EBUSY)) err = -EINPROGRESS; mutex_lock(&kvm->arch.mmu_setup_lock); /* It is possible that kvm->arch.resize_hpt != resize * after we grab kvm->arch.mmu_setup_lock again. */ } resize->error = err; if (kvm->arch.resize_hpt != resize) resize_hpt_release(kvm, resize); mutex_unlock(&kvm->arch.mmu_setup_lock); } int kvm_vm_ioctl_resize_hpt_prepare(struct kvm *kvm, struct kvm_ppc_resize_hpt *rhpt) { unsigned long flags = rhpt->flags; unsigned long shift = rhpt->shift; struct kvm_resize_hpt *resize; int ret; if (flags != 0 || kvm_is_radix(kvm)) return -EINVAL; if (shift && ((shift < 18) || (shift > 46))) return -EINVAL; mutex_lock(&kvm->arch.mmu_setup_lock); resize = kvm->arch.resize_hpt; if (resize) { if (resize->order == shift) { /* Suitable resize in progress? */ ret = resize->error; if (ret == -EBUSY) ret = 100; /* estimated time in ms */ else if (ret) resize_hpt_release(kvm, resize); goto out; } /* not suitable, cancel it */ resize_hpt_release(kvm, resize); } ret = 0; if (!shift) goto out; /* nothing to do */ /* start new resize */ resize = kzalloc(sizeof(*resize), GFP_KERNEL); if (!resize) { ret = -ENOMEM; goto out; } resize->error = -EBUSY; resize->order = shift; resize->kvm = kvm; INIT_WORK(&resize->work, resize_hpt_prepare_work); kvm->arch.resize_hpt = resize; schedule_work(&resize->work); ret = 100; /* estimated time in ms */ out: mutex_unlock(&kvm->arch.mmu_setup_lock); return ret; } static void resize_hpt_boot_vcpu(void *opaque) { /* Nothing to do, just force a KVM exit */ } int kvm_vm_ioctl_resize_hpt_commit(struct kvm *kvm, struct kvm_ppc_resize_hpt *rhpt) { unsigned long flags = rhpt->flags; unsigned long shift = rhpt->shift; struct kvm_resize_hpt *resize; int ret; if (flags != 0 || kvm_is_radix(kvm)) return -EINVAL; if (shift && ((shift < 18) || (shift > 46))) return -EINVAL; mutex_lock(&kvm->arch.mmu_setup_lock); resize = kvm->arch.resize_hpt; /* This shouldn't be possible */ ret = -EIO; if (WARN_ON(!kvm->arch.mmu_ready)) goto out_no_hpt; /* Stop VCPUs from running while we mess with the HPT */ kvm->arch.mmu_ready = 0; smp_mb(); /* Boot all CPUs out of the guest so they re-read * mmu_ready */ on_each_cpu(resize_hpt_boot_vcpu, NULL, 1); ret = -ENXIO; if (!resize || (resize->order != shift)) goto out; ret = resize->error; if (ret) goto out; ret = resize_hpt_rehash(resize); if (ret) goto out; resize_hpt_pivot(resize); out: /* Let VCPUs run again */ kvm->arch.mmu_ready = 1; smp_mb(); out_no_hpt: resize_hpt_release(kvm, resize); mutex_unlock(&kvm->arch.mmu_setup_lock); return ret; } /* * Functions for reading and writing the hash table via reads and * writes on a file descriptor. * * Reads return the guest view of the hash table, which has to be * pieced together from the real hash table and the guest_rpte * values in the revmap array. * * On writes, each HPTE written is considered in turn, and if it * is valid, it is written to the HPT as if an H_ENTER with the * exact flag set was done. When the invalid count is non-zero * in the header written to the stream, the kernel will make * sure that that many HPTEs are invalid, and invalidate them * if not. */ struct kvm_htab_ctx { unsigned long index; unsigned long flags; struct kvm *kvm; int first_pass; }; #define HPTE_SIZE (2 * sizeof(unsigned long)) /* * Returns 1 if this HPT entry has been modified or has pending * R/C bit changes. */ static int hpte_dirty(struct revmap_entry *revp, __be64 *hptp) { unsigned long rcbits_unset; if (revp->guest_rpte & HPTE_GR_MODIFIED) return 1; /* Also need to consider changes in reference and changed bits */ rcbits_unset = ~revp->guest_rpte & (HPTE_R_R | HPTE_R_C); if ((be64_to_cpu(hptp[0]) & HPTE_V_VALID) && (be64_to_cpu(hptp[1]) & rcbits_unset)) return 1; return 0; } static long record_hpte(unsigned long flags, __be64 *hptp, unsigned long *hpte, struct revmap_entry *revp, int want_valid, int first_pass) { unsigned long v, r, hr; unsigned long rcbits_unset; int ok = 1; int valid, dirty; /* Unmodified entries are uninteresting except on the first pass */ dirty = hpte_dirty(revp, hptp); if (!first_pass && !dirty) return 0; valid = 0; if (be64_to_cpu(hptp[0]) & (HPTE_V_VALID | HPTE_V_ABSENT)) { valid = 1; if ((flags & KVM_GET_HTAB_BOLTED_ONLY) && !(be64_to_cpu(hptp[0]) & HPTE_V_BOLTED)) valid = 0; } if (valid != want_valid) return 0; v = r = 0; if (valid || dirty) { /* lock the HPTE so it's stable and read it */ preempt_disable(); while (!try_lock_hpte(hptp, HPTE_V_HVLOCK)) cpu_relax(); v = be64_to_cpu(hptp[0]); hr = be64_to_cpu(hptp[1]); if (cpu_has_feature(CPU_FTR_ARCH_300)) { v = hpte_new_to_old_v(v, hr); hr = hpte_new_to_old_r(hr); } /* re-evaluate valid and dirty from synchronized HPTE value */ valid = !!(v & HPTE_V_VALID); dirty = !!(revp->guest_rpte & HPTE_GR_MODIFIED); /* Harvest R and C into guest view if necessary */ rcbits_unset = ~revp->guest_rpte & (HPTE_R_R | HPTE_R_C); if (valid && (rcbits_unset & hr)) { revp->guest_rpte |= (hr & (HPTE_R_R | HPTE_R_C)) | HPTE_GR_MODIFIED; dirty = 1; } if (v & HPTE_V_ABSENT) { v &= ~HPTE_V_ABSENT; v |= HPTE_V_VALID; valid = 1; } if ((flags & KVM_GET_HTAB_BOLTED_ONLY) && !(v & HPTE_V_BOLTED)) valid = 0; r = revp->guest_rpte; /* only clear modified if this is the right sort of entry */ if (valid == want_valid && dirty) { r &= ~HPTE_GR_MODIFIED; revp->guest_rpte = r; } unlock_hpte(hptp, be64_to_cpu(hptp[0])); preempt_enable(); if (!(valid == want_valid && (first_pass || dirty))) ok = 0; } hpte[0] = cpu_to_be64(v); hpte[1] = cpu_to_be64(r); return ok; } static ssize_t kvm_htab_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { struct kvm_htab_ctx *ctx = file->private_data; struct kvm *kvm = ctx->kvm; struct kvm_get_htab_header hdr; __be64 *hptp; struct revmap_entry *revp; unsigned long i, nb, nw; unsigned long __user *lbuf; struct kvm_get_htab_header __user *hptr; unsigned long flags; int first_pass; unsigned long hpte[2]; if (!access_ok(buf, count)) return -EFAULT; if (kvm_is_radix(kvm)) return 0; first_pass = ctx->first_pass; flags = ctx->flags; i = ctx->index; hptp = (__be64 *)(kvm->arch.hpt.virt + (i * HPTE_SIZE)); revp = kvm->arch.hpt.rev + i; lbuf = (unsigned long __user *)buf; nb = 0; while (nb + sizeof(hdr) + HPTE_SIZE < count) { /* Initialize header */ hptr = (struct kvm_get_htab_header __user *)buf; hdr.n_valid = 0; hdr.n_invalid = 0; nw = nb; nb += sizeof(hdr); lbuf = (unsigned long __user *)(buf + sizeof(hdr)); /* Skip uninteresting entries, i.e. clean on not-first pass */ if (!first_pass) { while (i < kvmppc_hpt_npte(&kvm->arch.hpt) && !hpte_dirty(revp, hptp)) { ++i; hptp += 2; ++revp; } } hdr.index = i; /* Grab a series of valid entries */ while (i < kvmppc_hpt_npte(&kvm->arch.hpt) && hdr.n_valid < 0xffff && nb + HPTE_SIZE < count && record_hpte(flags, hptp, hpte, revp, 1, first_pass)) { /* valid entry, write it out */ ++hdr.n_valid; if (__put_user(hpte[0], lbuf) || __put_user(hpte[1], lbuf + 1)) return -EFAULT; nb += HPTE_SIZE; lbuf += 2; ++i; hptp += 2; ++revp; } /* Now skip invalid entries while we can */ while (i < kvmppc_hpt_npte(&kvm->arch.hpt) && hdr.n_invalid < 0xffff && record_hpte(flags, hptp, hpte, revp, 0, first_pass)) { /* found an invalid entry */ ++hdr.n_invalid; ++i; hptp += 2; ++revp; } if (hdr.n_valid || hdr.n_invalid) { /* write back the header */ if (__copy_to_user(hptr, &hdr, sizeof(hdr))) return -EFAULT; nw = nb; buf = (char __user *)lbuf; } else { nb = nw; } /* Check if we've wrapped around the hash table */ if (i >= kvmppc_hpt_npte(&kvm->arch.hpt)) { i = 0; ctx->first_pass = 0; break; } } ctx->index = i; return nb; } static ssize_t kvm_htab_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { struct kvm_htab_ctx *ctx = file->private_data; struct kvm *kvm = ctx->kvm; struct kvm_get_htab_header hdr; unsigned long i, j; unsigned long v, r; unsigned long __user *lbuf; __be64 *hptp; unsigned long tmp[2]; ssize_t nb; long int err, ret; int mmu_ready; int pshift; if (!access_ok(buf, count)) return -EFAULT; if (kvm_is_radix(kvm)) return -EINVAL; /* lock out vcpus from running while we're doing this */ mutex_lock(&kvm->arch.mmu_setup_lock); mmu_ready = kvm->arch.mmu_ready; if (mmu_ready) { kvm->arch.mmu_ready = 0; /* temporarily */ /* order mmu_ready vs. vcpus_running */ smp_mb(); if (atomic_read(&kvm->arch.vcpus_running)) { kvm->arch.mmu_ready = 1; mutex_unlock(&kvm->arch.mmu_setup_lock); return -EBUSY; } } err = 0; for (nb = 0; nb + sizeof(hdr) <= count; ) { err = -EFAULT; if (__copy_from_user(&hdr, buf, sizeof(hdr))) break; err = 0; if (nb + hdr.n_valid * HPTE_SIZE > count) break; nb += sizeof(hdr); buf += sizeof(hdr); err = -EINVAL; i = hdr.index; if (i >= kvmppc_hpt_npte(&kvm->arch.hpt) || i + hdr.n_valid + hdr.n_invalid > kvmppc_hpt_npte(&kvm->arch.hpt)) break; hptp = (__be64 *)(kvm->arch.hpt.virt + (i * HPTE_SIZE)); lbuf = (unsigned long __user *)buf; for (j = 0; j < hdr.n_valid; ++j) { __be64 hpte_v; __be64 hpte_r; err = -EFAULT; if (__get_user(hpte_v, lbuf) || __get_user(hpte_r, lbuf + 1)) goto out; v = be64_to_cpu(hpte_v); r = be64_to_cpu(hpte_r); err = -EINVAL; if (!(v & HPTE_V_VALID)) goto out; pshift = kvmppc_hpte_base_page_shift(v, r); if (pshift <= 0) goto out; lbuf += 2; nb += HPTE_SIZE; if (be64_to_cpu(hptp[0]) & (HPTE_V_VALID | HPTE_V_ABSENT)) kvmppc_do_h_remove(kvm, 0, i, 0, tmp); err = -EIO; ret = kvmppc_virtmode_do_h_enter(kvm, H_EXACT, i, v, r, tmp); if (ret != H_SUCCESS) { pr_err("%s ret %ld i=%ld v=%lx r=%lx\n", __func__, ret, i, v, r); goto out; } if (!mmu_ready && is_vrma_hpte(v)) { unsigned long senc, lpcr; senc = slb_pgsize_encoding(1ul << pshift); kvm->arch.vrma_slb_v = senc | SLB_VSID_B_1T | (VRMA_VSID << SLB_VSID_SHIFT_1T); if (!cpu_has_feature(CPU_FTR_ARCH_300)) { lpcr = senc << (LPCR_VRMASD_SH - 4); kvmppc_update_lpcr(kvm, lpcr, LPCR_VRMASD); } else { kvmppc_setup_partition_table(kvm); } mmu_ready = 1; } ++i; hptp += 2; } for (j = 0; j < hdr.n_invalid; ++j) { if (be64_to_cpu(hptp[0]) & (HPTE_V_VALID | HPTE_V_ABSENT)) kvmppc_do_h_remove(kvm, 0, i, 0, tmp); ++i; hptp += 2; } err = 0; } out: /* Order HPTE updates vs. mmu_ready */ smp_wmb(); kvm->arch.mmu_ready = mmu_ready; mutex_unlock(&kvm->arch.mmu_setup_lock); if (err) return err; return nb; } static int kvm_htab_release(struct inode *inode, struct file *filp) { struct kvm_htab_ctx *ctx = filp->private_data; filp->private_data = NULL; if (!(ctx->flags & KVM_GET_HTAB_WRITE)) atomic_dec(&ctx->kvm->arch.hpte_mod_interest); kvm_put_kvm(ctx->kvm); kfree(ctx); return 0; } static const struct file_operations kvm_htab_fops = { .read = kvm_htab_read, .write = kvm_htab_write, .llseek = default_llseek, .release = kvm_htab_release, }; int kvm_vm_ioctl_get_htab_fd(struct kvm *kvm, struct kvm_get_htab_fd *ghf) { int ret; struct kvm_htab_ctx *ctx; int rwflag; /* reject flags we don't recognize */ if (ghf->flags & ~(KVM_GET_HTAB_BOLTED_ONLY | KVM_GET_HTAB_WRITE)) return -EINVAL; ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); if (!ctx) return -ENOMEM; kvm_get_kvm(kvm); ctx->kvm = kvm; ctx->index = ghf->start_index; ctx->flags = ghf->flags; ctx->first_pass = 1; rwflag = (ghf->flags & KVM_GET_HTAB_WRITE) ? O_WRONLY : O_RDONLY; ret = anon_inode_getfd("kvm-htab", &kvm_htab_fops, ctx, rwflag | O_CLOEXEC); if (ret < 0) { kfree(ctx); kvm_put_kvm_no_destroy(kvm); return ret; } if (rwflag == O_RDONLY) { mutex_lock(&kvm->slots_lock); atomic_inc(&kvm->arch.hpte_mod_interest); /* make sure kvmppc_do_h_enter etc. see the increment */ synchronize_srcu_expedited(&kvm->srcu); mutex_unlock(&kvm->slots_lock); } return ret; } struct debugfs_htab_state { struct kvm *kvm; struct mutex mutex; unsigned long hpt_index; int chars_left; int buf_index; char buf[64]; }; static int debugfs_htab_open(struct inode *inode, struct file *file) { struct kvm *kvm = inode->i_private; struct debugfs_htab_state *p; p = kzalloc(sizeof(*p), GFP_KERNEL); if (!p) return -ENOMEM; kvm_get_kvm(kvm); p->kvm = kvm; mutex_init(&p->mutex); file->private_data = p; return nonseekable_open(inode, file); } static int debugfs_htab_release(struct inode *inode, struct file *file) { struct debugfs_htab_state *p = file->private_data; kvm_put_kvm(p->kvm); kfree(p); return 0; } static ssize_t debugfs_htab_read(struct file *file, char __user *buf, size_t len, loff_t *ppos) { struct debugfs_htab_state *p = file->private_data; ssize_t ret, r; unsigned long i, n; unsigned long v, hr, gr; struct kvm *kvm; __be64 *hptp; kvm = p->kvm; if (kvm_is_radix(kvm)) return 0; ret = mutex_lock_interruptible(&p->mutex); if (ret) return ret; if (p->chars_left) { n = p->chars_left; if (n > len) n = len; r = copy_to_user(buf, p->buf + p->buf_index, n); n -= r; p->chars_left -= n; p->buf_index += n; buf += n; len -= n; ret = n; if (r) { if (!n) ret = -EFAULT; goto out; } } i = p->hpt_index; hptp = (__be64 *)(kvm->arch.hpt.virt + (i * HPTE_SIZE)); for (; len != 0 && i < kvmppc_hpt_npte(&kvm->arch.hpt); ++i, hptp += 2) { if (!(be64_to_cpu(hptp[0]) & (HPTE_V_VALID | HPTE_V_ABSENT))) continue; /* lock the HPTE so it's stable and read it */ preempt_disable(); while (!try_lock_hpte(hptp, HPTE_V_HVLOCK)) cpu_relax(); v = be64_to_cpu(hptp[0]) & ~HPTE_V_HVLOCK; hr = be64_to_cpu(hptp[1]); gr = kvm->arch.hpt.rev[i].guest_rpte; unlock_hpte(hptp, v); preempt_enable(); if (!(v & (HPTE_V_VALID | HPTE_V_ABSENT))) continue; n = scnprintf(p->buf, sizeof(p->buf), "%6lx %.16lx %.16lx %.16lx\n", i, v, hr, gr); p->chars_left = n; if (n > len) n = len; r = copy_to_user(buf, p->buf, n); n -= r; p->chars_left -= n; p->buf_index = n; buf += n; len -= n; ret += n; if (r) { if (!ret) ret = -EFAULT; goto out; } } p->hpt_index = i; out: mutex_unlock(&p->mutex); return ret; } static ssize_t debugfs_htab_write(struct file *file, const char __user *buf, size_t len, loff_t *ppos) { return -EACCES; } static const struct file_operations debugfs_htab_fops = { .owner = THIS_MODULE, .open = debugfs_htab_open, .release = debugfs_htab_release, .read = debugfs_htab_read, .write = debugfs_htab_write, .llseek = generic_file_llseek, }; void kvmppc_mmu_debugfs_init(struct kvm *kvm) { debugfs_create_file("htab", 0400, kvm->debugfs_dentry, kvm, &debugfs_htab_fops); } void kvmppc_mmu_book3s_hv_init(struct kvm_vcpu *vcpu) { struct kvmppc_mmu *mmu = &vcpu->arch.mmu; vcpu->arch.slb_nr = 32; /* POWER7/POWER8 */ mmu->xlate = kvmppc_mmu_book3s_64_hv_xlate; vcpu->arch.hflags |= BOOK3S_HFLAG_SLB; }
linux-master
arch/powerpc/kvm/book3s_64_mmu_hv.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2009 SUSE Linux Products GmbH. All rights reserved. * * Authors: * Alexander Graf <[email protected]> * Kevin Wolf <[email protected]> */ #include <linux/kvm_host.h> #include <linux/pkeys.h> #include <asm/kvm_ppc.h> #include <asm/kvm_book3s.h> #include <asm/book3s/64/mmu-hash.h> #include <asm/machdep.h> #include <asm/mmu_context.h> #include <asm/hw_irq.h> #include "trace_pr.h" #include "book3s.h" #define PTE_SIZE 12 void kvmppc_mmu_invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte) { mmu_hash_ops.hpte_invalidate(pte->slot, pte->host_vpn, pte->pagesize, pte->pagesize, MMU_SEGSIZE_256M, false); } /* We keep 512 gvsid->hvsid entries, mapping the guest ones to the array using * a hash, so we don't waste cycles on looping */ static u16 kvmppc_sid_hash(struct kvm_vcpu *vcpu, u64 gvsid) { return (u16)(((gvsid >> (SID_MAP_BITS * 7)) & SID_MAP_MASK) ^ ((gvsid >> (SID_MAP_BITS * 6)) & SID_MAP_MASK) ^ ((gvsid >> (SID_MAP_BITS * 5)) & SID_MAP_MASK) ^ ((gvsid >> (SID_MAP_BITS * 4)) & SID_MAP_MASK) ^ ((gvsid >> (SID_MAP_BITS * 3)) & SID_MAP_MASK) ^ ((gvsid >> (SID_MAP_BITS * 2)) & SID_MAP_MASK) ^ ((gvsid >> (SID_MAP_BITS * 1)) & SID_MAP_MASK) ^ ((gvsid >> (SID_MAP_BITS * 0)) & SID_MAP_MASK)); } static struct kvmppc_sid_map *find_sid_vsid(struct kvm_vcpu *vcpu, u64 gvsid) { struct kvmppc_sid_map *map; u16 sid_map_mask; if (kvmppc_get_msr(vcpu) & MSR_PR) gvsid |= VSID_PR; sid_map_mask = kvmppc_sid_hash(vcpu, gvsid); map = &to_book3s(vcpu)->sid_map[sid_map_mask]; if (map->valid && (map->guest_vsid == gvsid)) { trace_kvm_book3s_slb_found(gvsid, map->host_vsid); return map; } map = &to_book3s(vcpu)->sid_map[SID_MAP_MASK - sid_map_mask]; if (map->valid && (map->guest_vsid == gvsid)) { trace_kvm_book3s_slb_found(gvsid, map->host_vsid); return map; } trace_kvm_book3s_slb_fail(sid_map_mask, gvsid); return NULL; } int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte, bool iswrite) { unsigned long vpn; kvm_pfn_t hpaddr; ulong hash, hpteg; u64 vsid; int ret; int rflags = 0x192; int vflags = 0; int attempt = 0; struct kvmppc_sid_map *map; int r = 0; int hpsize = MMU_PAGE_4K; bool writable; unsigned long mmu_seq; struct kvm *kvm = vcpu->kvm; struct hpte_cache *cpte; unsigned long gfn = orig_pte->raddr >> PAGE_SHIFT; unsigned long pfn; /* used to check for invalidations in progress */ mmu_seq = kvm->mmu_invalidate_seq; smp_rmb(); /* Get host physical address for gpa */ pfn = kvmppc_gpa_to_pfn(vcpu, orig_pte->raddr, iswrite, &writable); if (is_error_noslot_pfn(pfn)) { printk(KERN_INFO "Couldn't get guest page for gpa %lx!\n", orig_pte->raddr); r = -EINVAL; goto out; } hpaddr = pfn << PAGE_SHIFT; /* and write the mapping ea -> hpa into the pt */ vcpu->arch.mmu.esid_to_vsid(vcpu, orig_pte->eaddr >> SID_SHIFT, &vsid); map = find_sid_vsid(vcpu, vsid); if (!map) { ret = kvmppc_mmu_map_segment(vcpu, orig_pte->eaddr); WARN_ON(ret < 0); map = find_sid_vsid(vcpu, vsid); } if (!map) { printk(KERN_ERR "KVM: Segment map for 0x%llx (0x%lx) failed\n", vsid, orig_pte->eaddr); WARN_ON(true); r = -EINVAL; goto out; } vpn = hpt_vpn(orig_pte->eaddr, map->host_vsid, MMU_SEGSIZE_256M); kvm_set_pfn_accessed(pfn); if (!orig_pte->may_write || !writable) rflags |= PP_RXRX; else { mark_page_dirty(vcpu->kvm, gfn); kvm_set_pfn_dirty(pfn); } if (!orig_pte->may_execute) rflags |= HPTE_R_N; else kvmppc_mmu_flush_icache(pfn); rflags |= pte_to_hpte_pkey_bits(0, HPTE_USE_KERNEL_KEY); rflags = (rflags & ~HPTE_R_WIMG) | orig_pte->wimg; /* * Use 64K pages if possible; otherwise, on 64K page kernels, * we need to transfer 4 more bits from guest real to host real addr. */ if (vsid & VSID_64K) hpsize = MMU_PAGE_64K; else hpaddr |= orig_pte->raddr & (~0xfffULL & ~PAGE_MASK); hash = hpt_hash(vpn, mmu_psize_defs[hpsize].shift, MMU_SEGSIZE_256M); cpte = kvmppc_mmu_hpte_cache_next(vcpu); spin_lock(&kvm->mmu_lock); if (!cpte || mmu_invalidate_retry(kvm, mmu_seq)) { r = -EAGAIN; goto out_unlock; } map_again: hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP); /* In case we tried normal mapping already, let's nuke old entries */ if (attempt > 1) if (mmu_hash_ops.hpte_remove(hpteg) < 0) { r = -1; goto out_unlock; } ret = mmu_hash_ops.hpte_insert(hpteg, vpn, hpaddr, rflags, vflags, hpsize, hpsize, MMU_SEGSIZE_256M); if (ret == -1) { /* If we couldn't map a primary PTE, try a secondary */ hash = ~hash; vflags ^= HPTE_V_SECONDARY; attempt++; goto map_again; } else if (ret < 0) { r = -EIO; goto out_unlock; } else { trace_kvm_book3s_64_mmu_map(rflags, hpteg, vpn, hpaddr, orig_pte); /* * The mmu_hash_ops code may give us a secondary entry even * though we asked for a primary. Fix up. */ if ((ret & _PTEIDX_SECONDARY) && !(vflags & HPTE_V_SECONDARY)) { hash = ~hash; hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP); } cpte->slot = hpteg + (ret & 7); cpte->host_vpn = vpn; cpte->pte = *orig_pte; cpte->pfn = pfn; cpte->pagesize = hpsize; kvmppc_mmu_hpte_cache_map(vcpu, cpte); cpte = NULL; } out_unlock: spin_unlock(&kvm->mmu_lock); kvm_release_pfn_clean(pfn); if (cpte) kvmppc_mmu_hpte_cache_free(cpte); out: return r; } void kvmppc_mmu_unmap_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte) { u64 mask = 0xfffffffffULL; u64 vsid; vcpu->arch.mmu.esid_to_vsid(vcpu, pte->eaddr >> SID_SHIFT, &vsid); if (vsid & VSID_64K) mask = 0xffffffff0ULL; kvmppc_mmu_pte_vflush(vcpu, pte->vpage, mask); } static struct kvmppc_sid_map *create_sid_map(struct kvm_vcpu *vcpu, u64 gvsid) { unsigned long vsid_bits = VSID_BITS_65_256M; struct kvmppc_sid_map *map; struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu); u16 sid_map_mask; static int backwards_map; if (kvmppc_get_msr(vcpu) & MSR_PR) gvsid |= VSID_PR; /* We might get collisions that trap in preceding order, so let's map them differently */ sid_map_mask = kvmppc_sid_hash(vcpu, gvsid); if (backwards_map) sid_map_mask = SID_MAP_MASK - sid_map_mask; map = &to_book3s(vcpu)->sid_map[sid_map_mask]; /* Make sure we're taking the other map next time */ backwards_map = !backwards_map; /* Uh-oh ... out of mappings. Let's flush! */ if (vcpu_book3s->proto_vsid_next == vcpu_book3s->proto_vsid_max) { vcpu_book3s->proto_vsid_next = vcpu_book3s->proto_vsid_first; memset(vcpu_book3s->sid_map, 0, sizeof(struct kvmppc_sid_map) * SID_MAP_NUM); kvmppc_mmu_pte_flush(vcpu, 0, 0); kvmppc_mmu_flush_segments(vcpu); } if (mmu_has_feature(MMU_FTR_68_BIT_VA)) vsid_bits = VSID_BITS_256M; map->host_vsid = vsid_scramble(vcpu_book3s->proto_vsid_next++, VSID_MULTIPLIER_256M, vsid_bits); map->guest_vsid = gvsid; map->valid = true; trace_kvm_book3s_slb_map(sid_map_mask, gvsid, map->host_vsid); return map; } static int kvmppc_mmu_next_segment(struct kvm_vcpu *vcpu, ulong esid) { struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); int i; int max_slb_size = 64; int found_inval = -1; int r; /* Are we overwriting? */ for (i = 0; i < svcpu->slb_max; i++) { if (!(svcpu->slb[i].esid & SLB_ESID_V)) found_inval = i; else if ((svcpu->slb[i].esid & ESID_MASK) == esid) { r = i; goto out; } } /* Found a spare entry that was invalidated before */ if (found_inval >= 0) { r = found_inval; goto out; } /* No spare invalid entry, so create one */ if (mmu_slb_size < 64) max_slb_size = mmu_slb_size; /* Overflowing -> purge */ if ((svcpu->slb_max) == max_slb_size) kvmppc_mmu_flush_segments(vcpu); r = svcpu->slb_max; svcpu->slb_max++; out: svcpu_put(svcpu); return r; } int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr) { struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); u64 esid = eaddr >> SID_SHIFT; u64 slb_esid = (eaddr & ESID_MASK) | SLB_ESID_V; u64 slb_vsid = SLB_VSID_USER; u64 gvsid; int slb_index; struct kvmppc_sid_map *map; int r = 0; slb_index = kvmppc_mmu_next_segment(vcpu, eaddr & ESID_MASK); if (vcpu->arch.mmu.esid_to_vsid(vcpu, esid, &gvsid)) { /* Invalidate an entry */ svcpu->slb[slb_index].esid = 0; r = -ENOENT; goto out; } map = find_sid_vsid(vcpu, gvsid); if (!map) map = create_sid_map(vcpu, gvsid); map->guest_esid = esid; slb_vsid |= (map->host_vsid << 12); slb_vsid &= ~SLB_VSID_KP; slb_esid |= slb_index; #ifdef CONFIG_PPC_64K_PAGES /* Set host segment base page size to 64K if possible */ if (gvsid & VSID_64K) slb_vsid |= mmu_psize_defs[MMU_PAGE_64K].sllp; #endif svcpu->slb[slb_index].esid = slb_esid; svcpu->slb[slb_index].vsid = slb_vsid; trace_kvm_book3s_slbmte(slb_vsid, slb_esid); out: svcpu_put(svcpu); return r; } void kvmppc_mmu_flush_segment(struct kvm_vcpu *vcpu, ulong ea, ulong seg_size) { struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); ulong seg_mask = -seg_size; int i; for (i = 0; i < svcpu->slb_max; i++) { if ((svcpu->slb[i].esid & SLB_ESID_V) && (svcpu->slb[i].esid & seg_mask) == ea) { /* Invalidate this entry */ svcpu->slb[i].esid = 0; } } svcpu_put(svcpu); } void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu) { struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); svcpu->slb_max = 0; svcpu->slb[0].esid = 0; svcpu_put(svcpu); } void kvmppc_mmu_destroy_pr(struct kvm_vcpu *vcpu) { kvmppc_mmu_hpte_destroy(vcpu); __destroy_context(to_book3s(vcpu)->context_id[0]); } int kvmppc_mmu_init_pr(struct kvm_vcpu *vcpu) { struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu); int err; err = hash__alloc_context_id(); if (err < 0) return -1; vcpu3s->context_id[0] = err; vcpu3s->proto_vsid_max = ((u64)(vcpu3s->context_id[0] + 1) << ESID_BITS) - 1; vcpu3s->proto_vsid_first = (u64)vcpu3s->context_id[0] << ESID_BITS; vcpu3s->proto_vsid_next = vcpu3s->proto_vsid_first; kvmppc_mmu_hpte_init(vcpu); return 0; }
linux-master
arch/powerpc/kvm/book3s_64_mmu_host.c
// SPDX-License-Identifier: GPL-2.0-only /* * * Copyright 2016 Paul Mackerras, IBM Corp. <[email protected]> */ #include <linux/types.h> #include <linux/string.h> #include <linux/kvm.h> #include <linux/kvm_host.h> #include <linux/anon_inodes.h> #include <linux/file.h> #include <linux/debugfs.h> #include <linux/pgtable.h> #include <asm/kvm_ppc.h> #include <asm/kvm_book3s.h> #include <asm/page.h> #include <asm/mmu.h> #include <asm/pgalloc.h> #include <asm/pte-walk.h> #include <asm/ultravisor.h> #include <asm/kvm_book3s_uvmem.h> #include <asm/plpar_wrappers.h> #include <asm/firmware.h> /* * Supported radix tree geometry. * Like p9, we support either 5 or 9 bits at the first (lowest) level, * for a page size of 64k or 4k. */ static int p9_supported_radix_bits[4] = { 5, 9, 9, 13 }; unsigned long __kvmhv_copy_tofrom_guest_radix(int lpid, int pid, gva_t eaddr, void *to, void *from, unsigned long n) { int old_pid, old_lpid; unsigned long quadrant, ret = n; bool is_load = !!to; /* Can't access quadrants 1 or 2 in non-HV mode, call the HV to do it */ if (kvmhv_on_pseries()) return plpar_hcall_norets(H_COPY_TOFROM_GUEST, lpid, pid, eaddr, (to != NULL) ? __pa(to): 0, (from != NULL) ? __pa(from): 0, n); if (eaddr & (0xFFFUL << 52)) return ret; quadrant = 1; if (!pid) quadrant = 2; if (is_load) from = (void *) (eaddr | (quadrant << 62)); else to = (void *) (eaddr | (quadrant << 62)); preempt_disable(); asm volatile("hwsync" ::: "memory"); isync(); /* switch the lpid first to avoid running host with unallocated pid */ old_lpid = mfspr(SPRN_LPID); if (old_lpid != lpid) mtspr(SPRN_LPID, lpid); if (quadrant == 1) { old_pid = mfspr(SPRN_PID); if (old_pid != pid) mtspr(SPRN_PID, pid); } isync(); pagefault_disable(); if (is_load) ret = __copy_from_user_inatomic(to, (const void __user *)from, n); else ret = __copy_to_user_inatomic((void __user *)to, from, n); pagefault_enable(); asm volatile("hwsync" ::: "memory"); isync(); /* switch the pid first to avoid running host with unallocated pid */ if (quadrant == 1 && pid != old_pid) mtspr(SPRN_PID, old_pid); if (lpid != old_lpid) mtspr(SPRN_LPID, old_lpid); isync(); preempt_enable(); return ret; } static long kvmhv_copy_tofrom_guest_radix(struct kvm_vcpu *vcpu, gva_t eaddr, void *to, void *from, unsigned long n) { int lpid = vcpu->kvm->arch.lpid; int pid = vcpu->arch.pid; /* This would cause a data segment intr so don't allow the access */ if (eaddr & (0x3FFUL << 52)) return -EINVAL; /* Should we be using the nested lpid */ if (vcpu->arch.nested) lpid = vcpu->arch.nested->shadow_lpid; /* If accessing quadrant 3 then pid is expected to be 0 */ if (((eaddr >> 62) & 0x3) == 0x3) pid = 0; eaddr &= ~(0xFFFUL << 52); return __kvmhv_copy_tofrom_guest_radix(lpid, pid, eaddr, to, from, n); } long kvmhv_copy_from_guest_radix(struct kvm_vcpu *vcpu, gva_t eaddr, void *to, unsigned long n) { long ret; ret = kvmhv_copy_tofrom_guest_radix(vcpu, eaddr, to, NULL, n); if (ret > 0) memset(to + (n - ret), 0, ret); return ret; } long kvmhv_copy_to_guest_radix(struct kvm_vcpu *vcpu, gva_t eaddr, void *from, unsigned long n) { return kvmhv_copy_tofrom_guest_radix(vcpu, eaddr, NULL, from, n); } int kvmppc_mmu_walk_radix_tree(struct kvm_vcpu *vcpu, gva_t eaddr, struct kvmppc_pte *gpte, u64 root, u64 *pte_ret_p) { struct kvm *kvm = vcpu->kvm; int ret, level, ps; unsigned long rts, bits, offset, index; u64 pte, base, gpa; __be64 rpte; rts = ((root & RTS1_MASK) >> (RTS1_SHIFT - 3)) | ((root & RTS2_MASK) >> RTS2_SHIFT); bits = root & RPDS_MASK; base = root & RPDB_MASK; offset = rts + 31; /* Current implementations only support 52-bit space */ if (offset != 52) return -EINVAL; /* Walk each level of the radix tree */ for (level = 3; level >= 0; --level) { u64 addr; /* Check a valid size */ if (level && bits != p9_supported_radix_bits[level]) return -EINVAL; if (level == 0 && !(bits == 5 || bits == 9)) return -EINVAL; offset -= bits; index = (eaddr >> offset) & ((1UL << bits) - 1); /* Check that low bits of page table base are zero */ if (base & ((1UL << (bits + 3)) - 1)) return -EINVAL; /* Read the entry from guest memory */ addr = base + (index * sizeof(rpte)); kvm_vcpu_srcu_read_lock(vcpu); ret = kvm_read_guest(kvm, addr, &rpte, sizeof(rpte)); kvm_vcpu_srcu_read_unlock(vcpu); if (ret) { if (pte_ret_p) *pte_ret_p = addr; return ret; } pte = __be64_to_cpu(rpte); if (!(pte & _PAGE_PRESENT)) return -ENOENT; /* Check if a leaf entry */ if (pte & _PAGE_PTE) break; /* Get ready to walk the next level */ base = pte & RPDB_MASK; bits = pte & RPDS_MASK; } /* Need a leaf at lowest level; 512GB pages not supported */ if (level < 0 || level == 3) return -EINVAL; /* We found a valid leaf PTE */ /* Offset is now log base 2 of the page size */ gpa = pte & 0x01fffffffffff000ul; if (gpa & ((1ul << offset) - 1)) return -EINVAL; gpa |= eaddr & ((1ul << offset) - 1); for (ps = MMU_PAGE_4K; ps < MMU_PAGE_COUNT; ++ps) if (offset == mmu_psize_defs[ps].shift) break; gpte->page_size = ps; gpte->page_shift = offset; gpte->eaddr = eaddr; gpte->raddr = gpa; /* Work out permissions */ gpte->may_read = !!(pte & _PAGE_READ); gpte->may_write = !!(pte & _PAGE_WRITE); gpte->may_execute = !!(pte & _PAGE_EXEC); gpte->rc = pte & (_PAGE_ACCESSED | _PAGE_DIRTY); if (pte_ret_p) *pte_ret_p = pte; return 0; } /* * Used to walk a partition or process table radix tree in guest memory * Note: We exploit the fact that a partition table and a process * table have the same layout, a partition-scoped page table and a * process-scoped page table have the same layout, and the 2nd * doubleword of a partition table entry has the same layout as * the PTCR register. */ int kvmppc_mmu_radix_translate_table(struct kvm_vcpu *vcpu, gva_t eaddr, struct kvmppc_pte *gpte, u64 table, int table_index, u64 *pte_ret_p) { struct kvm *kvm = vcpu->kvm; int ret; unsigned long size, ptbl, root; struct prtb_entry entry; if ((table & PRTS_MASK) > 24) return -EINVAL; size = 1ul << ((table & PRTS_MASK) + 12); /* Is the table big enough to contain this entry? */ if ((table_index * sizeof(entry)) >= size) return -EINVAL; /* Read the table to find the root of the radix tree */ ptbl = (table & PRTB_MASK) + (table_index * sizeof(entry)); kvm_vcpu_srcu_read_lock(vcpu); ret = kvm_read_guest(kvm, ptbl, &entry, sizeof(entry)); kvm_vcpu_srcu_read_unlock(vcpu); if (ret) return ret; /* Root is stored in the first double word */ root = be64_to_cpu(entry.prtb0); return kvmppc_mmu_walk_radix_tree(vcpu, eaddr, gpte, root, pte_ret_p); } int kvmppc_mmu_radix_xlate(struct kvm_vcpu *vcpu, gva_t eaddr, struct kvmppc_pte *gpte, bool data, bool iswrite) { u32 pid; u64 pte; int ret; /* Work out effective PID */ switch (eaddr >> 62) { case 0: pid = vcpu->arch.pid; break; case 3: pid = 0; break; default: return -EINVAL; } ret = kvmppc_mmu_radix_translate_table(vcpu, eaddr, gpte, vcpu->kvm->arch.process_table, pid, &pte); if (ret) return ret; /* Check privilege (applies only to process scoped translations) */ if (kvmppc_get_msr(vcpu) & MSR_PR) { if (pte & _PAGE_PRIVILEGED) { gpte->may_read = 0; gpte->may_write = 0; gpte->may_execute = 0; } } else { if (!(pte & _PAGE_PRIVILEGED)) { /* Check AMR/IAMR to see if strict mode is in force */ if (vcpu->arch.amr & (1ul << 62)) gpte->may_read = 0; if (vcpu->arch.amr & (1ul << 63)) gpte->may_write = 0; if (vcpu->arch.iamr & (1ul << 62)) gpte->may_execute = 0; } } return 0; } void kvmppc_radix_tlbie_page(struct kvm *kvm, unsigned long addr, unsigned int pshift, unsigned int lpid) { unsigned long psize = PAGE_SIZE; int psi; long rc; unsigned long rb; if (pshift) psize = 1UL << pshift; else pshift = PAGE_SHIFT; addr &= ~(psize - 1); if (!kvmhv_on_pseries()) { radix__flush_tlb_lpid_page(lpid, addr, psize); return; } psi = shift_to_mmu_psize(pshift); if (!firmware_has_feature(FW_FEATURE_RPT_INVALIDATE)) { rb = addr | (mmu_get_ap(psi) << PPC_BITLSHIFT(58)); rc = plpar_hcall_norets(H_TLB_INVALIDATE, H_TLBIE_P1_ENC(0, 0, 1), lpid, rb); } else { rc = pseries_rpt_invalidate(lpid, H_RPTI_TARGET_CMMU, H_RPTI_TYPE_NESTED | H_RPTI_TYPE_TLB, psize_to_rpti_pgsize(psi), addr, addr + psize); } if (rc) pr_err("KVM: TLB page invalidation hcall failed, rc=%ld\n", rc); } static void kvmppc_radix_flush_pwc(struct kvm *kvm, unsigned int lpid) { long rc; if (!kvmhv_on_pseries()) { radix__flush_pwc_lpid(lpid); return; } if (!firmware_has_feature(FW_FEATURE_RPT_INVALIDATE)) rc = plpar_hcall_norets(H_TLB_INVALIDATE, H_TLBIE_P1_ENC(1, 0, 1), lpid, TLBIEL_INVAL_SET_LPID); else rc = pseries_rpt_invalidate(lpid, H_RPTI_TARGET_CMMU, H_RPTI_TYPE_NESTED | H_RPTI_TYPE_PWC, H_RPTI_PAGE_ALL, 0, -1UL); if (rc) pr_err("KVM: TLB PWC invalidation hcall failed, rc=%ld\n", rc); } static unsigned long kvmppc_radix_update_pte(struct kvm *kvm, pte_t *ptep, unsigned long clr, unsigned long set, unsigned long addr, unsigned int shift) { return __radix_pte_update(ptep, clr, set); } static void kvmppc_radix_set_pte_at(struct kvm *kvm, unsigned long addr, pte_t *ptep, pte_t pte) { radix__set_pte_at(kvm->mm, addr, ptep, pte, 0); } static struct kmem_cache *kvm_pte_cache; static struct kmem_cache *kvm_pmd_cache; static pte_t *kvmppc_pte_alloc(void) { pte_t *pte; pte = kmem_cache_alloc(kvm_pte_cache, GFP_KERNEL); /* pmd_populate() will only reference _pa(pte). */ kmemleak_ignore(pte); return pte; } static void kvmppc_pte_free(pte_t *ptep) { kmem_cache_free(kvm_pte_cache, ptep); } static pmd_t *kvmppc_pmd_alloc(void) { pmd_t *pmd; pmd = kmem_cache_alloc(kvm_pmd_cache, GFP_KERNEL); /* pud_populate() will only reference _pa(pmd). */ kmemleak_ignore(pmd); return pmd; } static void kvmppc_pmd_free(pmd_t *pmdp) { kmem_cache_free(kvm_pmd_cache, pmdp); } /* Called with kvm->mmu_lock held */ void kvmppc_unmap_pte(struct kvm *kvm, pte_t *pte, unsigned long gpa, unsigned int shift, const struct kvm_memory_slot *memslot, unsigned int lpid) { unsigned long old; unsigned long gfn = gpa >> PAGE_SHIFT; unsigned long page_size = PAGE_SIZE; unsigned long hpa; old = kvmppc_radix_update_pte(kvm, pte, ~0UL, 0, gpa, shift); kvmppc_radix_tlbie_page(kvm, gpa, shift, lpid); /* The following only applies to L1 entries */ if (lpid != kvm->arch.lpid) return; if (!memslot) { memslot = gfn_to_memslot(kvm, gfn); if (!memslot) return; } if (shift) { /* 1GB or 2MB page */ page_size = 1ul << shift; if (shift == PMD_SHIFT) kvm->stat.num_2M_pages--; else if (shift == PUD_SHIFT) kvm->stat.num_1G_pages--; } gpa &= ~(page_size - 1); hpa = old & PTE_RPN_MASK; kvmhv_remove_nest_rmap_range(kvm, memslot, gpa, hpa, page_size); if ((old & _PAGE_DIRTY) && memslot->dirty_bitmap) kvmppc_update_dirty_map(memslot, gfn, page_size); } /* * kvmppc_free_p?d are used to free existing page tables, and recursively * descend and clear and free children. * Callers are responsible for flushing the PWC. * * When page tables are being unmapped/freed as part of page fault path * (full == false), valid ptes are generally not expected; however, there * is one situation where they arise, which is when dirty page logging is * turned off for a memslot while the VM is running. The new memslot * becomes visible to page faults before the memslot commit function * gets to flush the memslot, which can lead to a 2MB page mapping being * installed for a guest physical address where there are already 64kB * (or 4kB) mappings (of sub-pages of the same 2MB page). */ static void kvmppc_unmap_free_pte(struct kvm *kvm, pte_t *pte, bool full, unsigned int lpid) { if (full) { memset(pte, 0, sizeof(long) << RADIX_PTE_INDEX_SIZE); } else { pte_t *p = pte; unsigned long it; for (it = 0; it < PTRS_PER_PTE; ++it, ++p) { if (pte_val(*p) == 0) continue; kvmppc_unmap_pte(kvm, p, pte_pfn(*p) << PAGE_SHIFT, PAGE_SHIFT, NULL, lpid); } } kvmppc_pte_free(pte); } static void kvmppc_unmap_free_pmd(struct kvm *kvm, pmd_t *pmd, bool full, unsigned int lpid) { unsigned long im; pmd_t *p = pmd; for (im = 0; im < PTRS_PER_PMD; ++im, ++p) { if (!pmd_present(*p)) continue; if (pmd_is_leaf(*p)) { if (full) { pmd_clear(p); } else { WARN_ON_ONCE(1); kvmppc_unmap_pte(kvm, (pte_t *)p, pte_pfn(*(pte_t *)p) << PAGE_SHIFT, PMD_SHIFT, NULL, lpid); } } else { pte_t *pte; pte = pte_offset_kernel(p, 0); kvmppc_unmap_free_pte(kvm, pte, full, lpid); pmd_clear(p); } } kvmppc_pmd_free(pmd); } static void kvmppc_unmap_free_pud(struct kvm *kvm, pud_t *pud, unsigned int lpid) { unsigned long iu; pud_t *p = pud; for (iu = 0; iu < PTRS_PER_PUD; ++iu, ++p) { if (!pud_present(*p)) continue; if (pud_is_leaf(*p)) { pud_clear(p); } else { pmd_t *pmd; pmd = pmd_offset(p, 0); kvmppc_unmap_free_pmd(kvm, pmd, true, lpid); pud_clear(p); } } pud_free(kvm->mm, pud); } void kvmppc_free_pgtable_radix(struct kvm *kvm, pgd_t *pgd, unsigned int lpid) { unsigned long ig; for (ig = 0; ig < PTRS_PER_PGD; ++ig, ++pgd) { p4d_t *p4d = p4d_offset(pgd, 0); pud_t *pud; if (!p4d_present(*p4d)) continue; pud = pud_offset(p4d, 0); kvmppc_unmap_free_pud(kvm, pud, lpid); p4d_clear(p4d); } } void kvmppc_free_radix(struct kvm *kvm) { if (kvm->arch.pgtable) { kvmppc_free_pgtable_radix(kvm, kvm->arch.pgtable, kvm->arch.lpid); pgd_free(kvm->mm, kvm->arch.pgtable); kvm->arch.pgtable = NULL; } } static void kvmppc_unmap_free_pmd_entry_table(struct kvm *kvm, pmd_t *pmd, unsigned long gpa, unsigned int lpid) { pte_t *pte = pte_offset_kernel(pmd, 0); /* * Clearing the pmd entry then flushing the PWC ensures that the pte * page no longer be cached by the MMU, so can be freed without * flushing the PWC again. */ pmd_clear(pmd); kvmppc_radix_flush_pwc(kvm, lpid); kvmppc_unmap_free_pte(kvm, pte, false, lpid); } static void kvmppc_unmap_free_pud_entry_table(struct kvm *kvm, pud_t *pud, unsigned long gpa, unsigned int lpid) { pmd_t *pmd = pmd_offset(pud, 0); /* * Clearing the pud entry then flushing the PWC ensures that the pmd * page and any children pte pages will no longer be cached by the MMU, * so can be freed without flushing the PWC again. */ pud_clear(pud); kvmppc_radix_flush_pwc(kvm, lpid); kvmppc_unmap_free_pmd(kvm, pmd, false, lpid); } /* * There are a number of bits which may differ between different faults to * the same partition scope entry. RC bits, in the course of cleaning and * aging. And the write bit can change, either the access could have been * upgraded, or a read fault could happen concurrently with a write fault * that sets those bits first. */ #define PTE_BITS_MUST_MATCH (~(_PAGE_WRITE | _PAGE_DIRTY | _PAGE_ACCESSED)) int kvmppc_create_pte(struct kvm *kvm, pgd_t *pgtable, pte_t pte, unsigned long gpa, unsigned int level, unsigned long mmu_seq, unsigned int lpid, unsigned long *rmapp, struct rmap_nested **n_rmap) { pgd_t *pgd; p4d_t *p4d; pud_t *pud, *new_pud = NULL; pmd_t *pmd, *new_pmd = NULL; pte_t *ptep, *new_ptep = NULL; int ret; /* Traverse the guest's 2nd-level tree, allocate new levels needed */ pgd = pgtable + pgd_index(gpa); p4d = p4d_offset(pgd, gpa); pud = NULL; if (p4d_present(*p4d)) pud = pud_offset(p4d, gpa); else new_pud = pud_alloc_one(kvm->mm, gpa); pmd = NULL; if (pud && pud_present(*pud) && !pud_is_leaf(*pud)) pmd = pmd_offset(pud, gpa); else if (level <= 1) new_pmd = kvmppc_pmd_alloc(); if (level == 0 && !(pmd && pmd_present(*pmd) && !pmd_is_leaf(*pmd))) new_ptep = kvmppc_pte_alloc(); /* Check if we might have been invalidated; let the guest retry if so */ spin_lock(&kvm->mmu_lock); ret = -EAGAIN; if (mmu_invalidate_retry(kvm, mmu_seq)) goto out_unlock; /* Now traverse again under the lock and change the tree */ ret = -ENOMEM; if (p4d_none(*p4d)) { if (!new_pud) goto out_unlock; p4d_populate(kvm->mm, p4d, new_pud); new_pud = NULL; } pud = pud_offset(p4d, gpa); if (pud_is_leaf(*pud)) { unsigned long hgpa = gpa & PUD_MASK; /* Check if we raced and someone else has set the same thing */ if (level == 2) { if (pud_raw(*pud) == pte_raw(pte)) { ret = 0; goto out_unlock; } /* Valid 1GB page here already, add our extra bits */ WARN_ON_ONCE((pud_val(*pud) ^ pte_val(pte)) & PTE_BITS_MUST_MATCH); kvmppc_radix_update_pte(kvm, (pte_t *)pud, 0, pte_val(pte), hgpa, PUD_SHIFT); ret = 0; goto out_unlock; } /* * If we raced with another CPU which has just put * a 1GB pte in after we saw a pmd page, try again. */ if (!new_pmd) { ret = -EAGAIN; goto out_unlock; } /* Valid 1GB page here already, remove it */ kvmppc_unmap_pte(kvm, (pte_t *)pud, hgpa, PUD_SHIFT, NULL, lpid); } if (level == 2) { if (!pud_none(*pud)) { /* * There's a page table page here, but we wanted to * install a large page, so remove and free the page * table page. */ kvmppc_unmap_free_pud_entry_table(kvm, pud, gpa, lpid); } kvmppc_radix_set_pte_at(kvm, gpa, (pte_t *)pud, pte); if (rmapp && n_rmap) kvmhv_insert_nest_rmap(kvm, rmapp, n_rmap); ret = 0; goto out_unlock; } if (pud_none(*pud)) { if (!new_pmd) goto out_unlock; pud_populate(kvm->mm, pud, new_pmd); new_pmd = NULL; } pmd = pmd_offset(pud, gpa); if (pmd_is_leaf(*pmd)) { unsigned long lgpa = gpa & PMD_MASK; /* Check if we raced and someone else has set the same thing */ if (level == 1) { if (pmd_raw(*pmd) == pte_raw(pte)) { ret = 0; goto out_unlock; } /* Valid 2MB page here already, add our extra bits */ WARN_ON_ONCE((pmd_val(*pmd) ^ pte_val(pte)) & PTE_BITS_MUST_MATCH); kvmppc_radix_update_pte(kvm, pmdp_ptep(pmd), 0, pte_val(pte), lgpa, PMD_SHIFT); ret = 0; goto out_unlock; } /* * If we raced with another CPU which has just put * a 2MB pte in after we saw a pte page, try again. */ if (!new_ptep) { ret = -EAGAIN; goto out_unlock; } /* Valid 2MB page here already, remove it */ kvmppc_unmap_pte(kvm, pmdp_ptep(pmd), lgpa, PMD_SHIFT, NULL, lpid); } if (level == 1) { if (!pmd_none(*pmd)) { /* * There's a page table page here, but we wanted to * install a large page, so remove and free the page * table page. */ kvmppc_unmap_free_pmd_entry_table(kvm, pmd, gpa, lpid); } kvmppc_radix_set_pte_at(kvm, gpa, pmdp_ptep(pmd), pte); if (rmapp && n_rmap) kvmhv_insert_nest_rmap(kvm, rmapp, n_rmap); ret = 0; goto out_unlock; } if (pmd_none(*pmd)) { if (!new_ptep) goto out_unlock; pmd_populate(kvm->mm, pmd, new_ptep); new_ptep = NULL; } ptep = pte_offset_kernel(pmd, gpa); if (pte_present(*ptep)) { /* Check if someone else set the same thing */ if (pte_raw(*ptep) == pte_raw(pte)) { ret = 0; goto out_unlock; } /* Valid page here already, add our extra bits */ WARN_ON_ONCE((pte_val(*ptep) ^ pte_val(pte)) & PTE_BITS_MUST_MATCH); kvmppc_radix_update_pte(kvm, ptep, 0, pte_val(pte), gpa, 0); ret = 0; goto out_unlock; } kvmppc_radix_set_pte_at(kvm, gpa, ptep, pte); if (rmapp && n_rmap) kvmhv_insert_nest_rmap(kvm, rmapp, n_rmap); ret = 0; out_unlock: spin_unlock(&kvm->mmu_lock); if (new_pud) pud_free(kvm->mm, new_pud); if (new_pmd) kvmppc_pmd_free(new_pmd); if (new_ptep) kvmppc_pte_free(new_ptep); return ret; } bool kvmppc_hv_handle_set_rc(struct kvm *kvm, bool nested, bool writing, unsigned long gpa, unsigned int lpid) { unsigned long pgflags; unsigned int shift; pte_t *ptep; /* * Need to set an R or C bit in the 2nd-level tables; * since we are just helping out the hardware here, * it is sufficient to do what the hardware does. */ pgflags = _PAGE_ACCESSED; if (writing) pgflags |= _PAGE_DIRTY; if (nested) ptep = find_kvm_nested_guest_pte(kvm, lpid, gpa, &shift); else ptep = find_kvm_secondary_pte(kvm, gpa, &shift); if (ptep && pte_present(*ptep) && (!writing || pte_write(*ptep))) { kvmppc_radix_update_pte(kvm, ptep, 0, pgflags, gpa, shift); return true; } return false; } int kvmppc_book3s_instantiate_page(struct kvm_vcpu *vcpu, unsigned long gpa, struct kvm_memory_slot *memslot, bool writing, bool kvm_ro, pte_t *inserted_pte, unsigned int *levelp) { struct kvm *kvm = vcpu->kvm; struct page *page = NULL; unsigned long mmu_seq; unsigned long hva, gfn = gpa >> PAGE_SHIFT; bool upgrade_write = false; bool *upgrade_p = &upgrade_write; pte_t pte, *ptep; unsigned int shift, level; int ret; bool large_enable; /* used to check for invalidations in progress */ mmu_seq = kvm->mmu_invalidate_seq; smp_rmb(); /* * Do a fast check first, since __gfn_to_pfn_memslot doesn't * do it with !atomic && !async, which is how we call it. * We always ask for write permission since the common case * is that the page is writable. */ hva = gfn_to_hva_memslot(memslot, gfn); if (!kvm_ro && get_user_page_fast_only(hva, FOLL_WRITE, &page)) { upgrade_write = true; } else { unsigned long pfn; /* Call KVM generic code to do the slow-path check */ pfn = __gfn_to_pfn_memslot(memslot, gfn, false, false, NULL, writing, upgrade_p, NULL); if (is_error_noslot_pfn(pfn)) return -EFAULT; page = NULL; if (pfn_valid(pfn)) { page = pfn_to_page(pfn); if (PageReserved(page)) page = NULL; } } /* * Read the PTE from the process' radix tree and use that * so we get the shift and attribute bits. */ spin_lock(&kvm->mmu_lock); ptep = find_kvm_host_pte(kvm, mmu_seq, hva, &shift); pte = __pte(0); if (ptep) pte = READ_ONCE(*ptep); spin_unlock(&kvm->mmu_lock); /* * If the PTE disappeared temporarily due to a THP * collapse, just return and let the guest try again. */ if (!pte_present(pte)) { if (page) put_page(page); return RESUME_GUEST; } /* If we're logging dirty pages, always map single pages */ large_enable = !(memslot->flags & KVM_MEM_LOG_DIRTY_PAGES); /* Get pte level from shift/size */ if (large_enable && shift == PUD_SHIFT && (gpa & (PUD_SIZE - PAGE_SIZE)) == (hva & (PUD_SIZE - PAGE_SIZE))) { level = 2; } else if (large_enable && shift == PMD_SHIFT && (gpa & (PMD_SIZE - PAGE_SIZE)) == (hva & (PMD_SIZE - PAGE_SIZE))) { level = 1; } else { level = 0; if (shift > PAGE_SHIFT) { /* * If the pte maps more than one page, bring over * bits from the virtual address to get the real * address of the specific single page we want. */ unsigned long rpnmask = (1ul << shift) - PAGE_SIZE; pte = __pte(pte_val(pte) | (hva & rpnmask)); } } pte = __pte(pte_val(pte) | _PAGE_EXEC | _PAGE_ACCESSED); if (writing || upgrade_write) { if (pte_val(pte) & _PAGE_WRITE) pte = __pte(pte_val(pte) | _PAGE_DIRTY); } else { pte = __pte(pte_val(pte) & ~(_PAGE_WRITE | _PAGE_DIRTY)); } /* Allocate space in the tree and write the PTE */ ret = kvmppc_create_pte(kvm, kvm->arch.pgtable, pte, gpa, level, mmu_seq, kvm->arch.lpid, NULL, NULL); if (inserted_pte) *inserted_pte = pte; if (levelp) *levelp = level; if (page) { if (!ret && (pte_val(pte) & _PAGE_WRITE)) set_page_dirty_lock(page); put_page(page); } /* Increment number of large pages if we (successfully) inserted one */ if (!ret) { if (level == 1) kvm->stat.num_2M_pages++; else if (level == 2) kvm->stat.num_1G_pages++; } return ret; } int kvmppc_book3s_radix_page_fault(struct kvm_vcpu *vcpu, unsigned long ea, unsigned long dsisr) { struct kvm *kvm = vcpu->kvm; unsigned long gpa, gfn; struct kvm_memory_slot *memslot; long ret; bool writing = !!(dsisr & DSISR_ISSTORE); bool kvm_ro = false; /* Check for unusual errors */ if (dsisr & DSISR_UNSUPP_MMU) { pr_err("KVM: Got unsupported MMU fault\n"); return -EFAULT; } if (dsisr & DSISR_BADACCESS) { /* Reflect to the guest as DSI */ pr_err("KVM: Got radix HV page fault with DSISR=%lx\n", dsisr); kvmppc_core_queue_data_storage(vcpu, kvmppc_get_msr(vcpu) & SRR1_PREFIXED, ea, dsisr); return RESUME_GUEST; } /* Translate the logical address */ gpa = vcpu->arch.fault_gpa & ~0xfffUL; gpa &= ~0xF000000000000000ul; gfn = gpa >> PAGE_SHIFT; if (!(dsisr & DSISR_PRTABLE_FAULT)) gpa |= ea & 0xfff; if (kvm->arch.secure_guest & KVMPPC_SECURE_INIT_DONE) return kvmppc_send_page_to_uv(kvm, gfn); /* Get the corresponding memslot */ memslot = gfn_to_memslot(kvm, gfn); /* No memslot means it's an emulated MMIO region */ if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID)) { if (dsisr & (DSISR_PRTABLE_FAULT | DSISR_BADACCESS | DSISR_SET_RC)) { /* * Bad address in guest page table tree, or other * unusual error - reflect it to the guest as DSI. */ kvmppc_core_queue_data_storage(vcpu, kvmppc_get_msr(vcpu) & SRR1_PREFIXED, ea, dsisr); return RESUME_GUEST; } return kvmppc_hv_emulate_mmio(vcpu, gpa, ea, writing); } if (memslot->flags & KVM_MEM_READONLY) { if (writing) { /* give the guest a DSI */ kvmppc_core_queue_data_storage(vcpu, kvmppc_get_msr(vcpu) & SRR1_PREFIXED, ea, DSISR_ISSTORE | DSISR_PROTFAULT); return RESUME_GUEST; } kvm_ro = true; } /* Failed to set the reference/change bits */ if (dsisr & DSISR_SET_RC) { spin_lock(&kvm->mmu_lock); if (kvmppc_hv_handle_set_rc(kvm, false, writing, gpa, kvm->arch.lpid)) dsisr &= ~DSISR_SET_RC; spin_unlock(&kvm->mmu_lock); if (!(dsisr & (DSISR_BAD_FAULT_64S | DSISR_NOHPTE | DSISR_PROTFAULT | DSISR_SET_RC))) return RESUME_GUEST; } /* Try to insert a pte */ ret = kvmppc_book3s_instantiate_page(vcpu, gpa, memslot, writing, kvm_ro, NULL, NULL); if (ret == 0 || ret == -EAGAIN) ret = RESUME_GUEST; return ret; } /* Called with kvm->mmu_lock held */ void kvm_unmap_radix(struct kvm *kvm, struct kvm_memory_slot *memslot, unsigned long gfn) { pte_t *ptep; unsigned long gpa = gfn << PAGE_SHIFT; unsigned int shift; if (kvm->arch.secure_guest & KVMPPC_SECURE_INIT_DONE) { uv_page_inval(kvm->arch.lpid, gpa, PAGE_SHIFT); return; } ptep = find_kvm_secondary_pte(kvm, gpa, &shift); if (ptep && pte_present(*ptep)) kvmppc_unmap_pte(kvm, ptep, gpa, shift, memslot, kvm->arch.lpid); } /* Called with kvm->mmu_lock held */ bool kvm_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot, unsigned long gfn) { pte_t *ptep; unsigned long gpa = gfn << PAGE_SHIFT; unsigned int shift; bool ref = false; unsigned long old, *rmapp; if (kvm->arch.secure_guest & KVMPPC_SECURE_INIT_DONE) return ref; ptep = find_kvm_secondary_pte(kvm, gpa, &shift); if (ptep && pte_present(*ptep) && pte_young(*ptep)) { old = kvmppc_radix_update_pte(kvm, ptep, _PAGE_ACCESSED, 0, gpa, shift); /* XXX need to flush tlb here? */ /* Also clear bit in ptes in shadow pgtable for nested guests */ rmapp = &memslot->arch.rmap[gfn - memslot->base_gfn]; kvmhv_update_nest_rmap_rc_list(kvm, rmapp, _PAGE_ACCESSED, 0, old & PTE_RPN_MASK, 1UL << shift); ref = true; } return ref; } /* Called with kvm->mmu_lock held */ bool kvm_test_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot, unsigned long gfn) { pte_t *ptep; unsigned long gpa = gfn << PAGE_SHIFT; unsigned int shift; bool ref = false; if (kvm->arch.secure_guest & KVMPPC_SECURE_INIT_DONE) return ref; ptep = find_kvm_secondary_pte(kvm, gpa, &shift); if (ptep && pte_present(*ptep) && pte_young(*ptep)) ref = true; return ref; } /* Returns the number of PAGE_SIZE pages that are dirty */ static int kvm_radix_test_clear_dirty(struct kvm *kvm, struct kvm_memory_slot *memslot, int pagenum) { unsigned long gfn = memslot->base_gfn + pagenum; unsigned long gpa = gfn << PAGE_SHIFT; pte_t *ptep, pte; unsigned int shift; int ret = 0; unsigned long old, *rmapp; if (kvm->arch.secure_guest & KVMPPC_SECURE_INIT_DONE) return ret; /* * For performance reasons we don't hold kvm->mmu_lock while walking the * partition scoped table. */ ptep = find_kvm_secondary_pte_unlocked(kvm, gpa, &shift); if (!ptep) return 0; pte = READ_ONCE(*ptep); if (pte_present(pte) && pte_dirty(pte)) { spin_lock(&kvm->mmu_lock); /* * Recheck the pte again */ if (pte_val(pte) != pte_val(*ptep)) { /* * We have KVM_MEM_LOG_DIRTY_PAGES enabled. Hence we can * only find PAGE_SIZE pte entries here. We can continue * to use the pte addr returned by above page table * walk. */ if (!pte_present(*ptep) || !pte_dirty(*ptep)) { spin_unlock(&kvm->mmu_lock); return 0; } } ret = 1; VM_BUG_ON(shift); old = kvmppc_radix_update_pte(kvm, ptep, _PAGE_DIRTY, 0, gpa, shift); kvmppc_radix_tlbie_page(kvm, gpa, shift, kvm->arch.lpid); /* Also clear bit in ptes in shadow pgtable for nested guests */ rmapp = &memslot->arch.rmap[gfn - memslot->base_gfn]; kvmhv_update_nest_rmap_rc_list(kvm, rmapp, _PAGE_DIRTY, 0, old & PTE_RPN_MASK, 1UL << shift); spin_unlock(&kvm->mmu_lock); } return ret; } long kvmppc_hv_get_dirty_log_radix(struct kvm *kvm, struct kvm_memory_slot *memslot, unsigned long *map) { unsigned long i, j; int npages; for (i = 0; i < memslot->npages; i = j) { npages = kvm_radix_test_clear_dirty(kvm, memslot, i); /* * Note that if npages > 0 then i must be a multiple of npages, * since huge pages are only used to back the guest at guest * real addresses that are a multiple of their size. * Since we have at most one PTE covering any given guest * real address, if npages > 1 we can skip to i + npages. */ j = i + 1; if (npages) { set_dirty_bits(map, i, npages); j = i + npages; } } return 0; } void kvmppc_radix_flush_memslot(struct kvm *kvm, const struct kvm_memory_slot *memslot) { unsigned long n; pte_t *ptep; unsigned long gpa; unsigned int shift; if (kvm->arch.secure_guest & KVMPPC_SECURE_INIT_START) kvmppc_uvmem_drop_pages(memslot, kvm, true); if (kvm->arch.secure_guest & KVMPPC_SECURE_INIT_DONE) return; gpa = memslot->base_gfn << PAGE_SHIFT; spin_lock(&kvm->mmu_lock); for (n = memslot->npages; n; --n) { ptep = find_kvm_secondary_pte(kvm, gpa, &shift); if (ptep && pte_present(*ptep)) kvmppc_unmap_pte(kvm, ptep, gpa, shift, memslot, kvm->arch.lpid); gpa += PAGE_SIZE; } /* * Increase the mmu notifier sequence number to prevent any page * fault that read the memslot earlier from writing a PTE. */ kvm->mmu_invalidate_seq++; spin_unlock(&kvm->mmu_lock); } static void add_rmmu_ap_encoding(struct kvm_ppc_rmmu_info *info, int psize, int *indexp) { if (!mmu_psize_defs[psize].shift) return; info->ap_encodings[*indexp] = mmu_psize_defs[psize].shift | (mmu_psize_defs[psize].ap << 29); ++(*indexp); } int kvmhv_get_rmmu_info(struct kvm *kvm, struct kvm_ppc_rmmu_info *info) { int i; if (!radix_enabled()) return -EINVAL; memset(info, 0, sizeof(*info)); /* 4k page size */ info->geometries[0].page_shift = 12; info->geometries[0].level_bits[0] = 9; for (i = 1; i < 4; ++i) info->geometries[0].level_bits[i] = p9_supported_radix_bits[i]; /* 64k page size */ info->geometries[1].page_shift = 16; for (i = 0; i < 4; ++i) info->geometries[1].level_bits[i] = p9_supported_radix_bits[i]; i = 0; add_rmmu_ap_encoding(info, MMU_PAGE_4K, &i); add_rmmu_ap_encoding(info, MMU_PAGE_64K, &i); add_rmmu_ap_encoding(info, MMU_PAGE_2M, &i); add_rmmu_ap_encoding(info, MMU_PAGE_1G, &i); return 0; } int kvmppc_init_vm_radix(struct kvm *kvm) { kvm->arch.pgtable = pgd_alloc(kvm->mm); if (!kvm->arch.pgtable) return -ENOMEM; return 0; } static void pte_ctor(void *addr) { memset(addr, 0, RADIX_PTE_TABLE_SIZE); } static void pmd_ctor(void *addr) { memset(addr, 0, RADIX_PMD_TABLE_SIZE); } struct debugfs_radix_state { struct kvm *kvm; struct mutex mutex; unsigned long gpa; int lpid; int chars_left; int buf_index; char buf[128]; u8 hdr; }; static int debugfs_radix_open(struct inode *inode, struct file *file) { struct kvm *kvm = inode->i_private; struct debugfs_radix_state *p; p = kzalloc(sizeof(*p), GFP_KERNEL); if (!p) return -ENOMEM; kvm_get_kvm(kvm); p->kvm = kvm; mutex_init(&p->mutex); file->private_data = p; return nonseekable_open(inode, file); } static int debugfs_radix_release(struct inode *inode, struct file *file) { struct debugfs_radix_state *p = file->private_data; kvm_put_kvm(p->kvm); kfree(p); return 0; } static ssize_t debugfs_radix_read(struct file *file, char __user *buf, size_t len, loff_t *ppos) { struct debugfs_radix_state *p = file->private_data; ssize_t ret, r; unsigned long n; struct kvm *kvm; unsigned long gpa; pgd_t *pgt; struct kvm_nested_guest *nested; pgd_t *pgdp; p4d_t p4d, *p4dp; pud_t pud, *pudp; pmd_t pmd, *pmdp; pte_t *ptep; int shift; unsigned long pte; kvm = p->kvm; if (!kvm_is_radix(kvm)) return 0; ret = mutex_lock_interruptible(&p->mutex); if (ret) return ret; if (p->chars_left) { n = p->chars_left; if (n > len) n = len; r = copy_to_user(buf, p->buf + p->buf_index, n); n -= r; p->chars_left -= n; p->buf_index += n; buf += n; len -= n; ret = n; if (r) { if (!n) ret = -EFAULT; goto out; } } gpa = p->gpa; nested = NULL; pgt = NULL; while (len != 0 && p->lpid >= 0) { if (gpa >= RADIX_PGTABLE_RANGE) { gpa = 0; pgt = NULL; if (nested) { kvmhv_put_nested(nested); nested = NULL; } p->lpid = kvmhv_nested_next_lpid(kvm, p->lpid); p->hdr = 0; if (p->lpid < 0) break; } if (!pgt) { if (p->lpid == 0) { pgt = kvm->arch.pgtable; } else { nested = kvmhv_get_nested(kvm, p->lpid, false); if (!nested) { gpa = RADIX_PGTABLE_RANGE; continue; } pgt = nested->shadow_pgtable; } } n = 0; if (!p->hdr) { if (p->lpid > 0) n = scnprintf(p->buf, sizeof(p->buf), "\nNested LPID %d: ", p->lpid); n += scnprintf(p->buf + n, sizeof(p->buf) - n, "pgdir: %lx\n", (unsigned long)pgt); p->hdr = 1; goto copy; } pgdp = pgt + pgd_index(gpa); p4dp = p4d_offset(pgdp, gpa); p4d = READ_ONCE(*p4dp); if (!(p4d_val(p4d) & _PAGE_PRESENT)) { gpa = (gpa & P4D_MASK) + P4D_SIZE; continue; } pudp = pud_offset(&p4d, gpa); pud = READ_ONCE(*pudp); if (!(pud_val(pud) & _PAGE_PRESENT)) { gpa = (gpa & PUD_MASK) + PUD_SIZE; continue; } if (pud_val(pud) & _PAGE_PTE) { pte = pud_val(pud); shift = PUD_SHIFT; goto leaf; } pmdp = pmd_offset(&pud, gpa); pmd = READ_ONCE(*pmdp); if (!(pmd_val(pmd) & _PAGE_PRESENT)) { gpa = (gpa & PMD_MASK) + PMD_SIZE; continue; } if (pmd_val(pmd) & _PAGE_PTE) { pte = pmd_val(pmd); shift = PMD_SHIFT; goto leaf; } ptep = pte_offset_kernel(&pmd, gpa); pte = pte_val(READ_ONCE(*ptep)); if (!(pte & _PAGE_PRESENT)) { gpa += PAGE_SIZE; continue; } shift = PAGE_SHIFT; leaf: n = scnprintf(p->buf, sizeof(p->buf), " %lx: %lx %d\n", gpa, pte, shift); gpa += 1ul << shift; copy: p->chars_left = n; if (n > len) n = len; r = copy_to_user(buf, p->buf, n); n -= r; p->chars_left -= n; p->buf_index = n; buf += n; len -= n; ret += n; if (r) { if (!ret) ret = -EFAULT; break; } } p->gpa = gpa; if (nested) kvmhv_put_nested(nested); out: mutex_unlock(&p->mutex); return ret; } static ssize_t debugfs_radix_write(struct file *file, const char __user *buf, size_t len, loff_t *ppos) { return -EACCES; } static const struct file_operations debugfs_radix_fops = { .owner = THIS_MODULE, .open = debugfs_radix_open, .release = debugfs_radix_release, .read = debugfs_radix_read, .write = debugfs_radix_write, .llseek = generic_file_llseek, }; void kvmhv_radix_debugfs_init(struct kvm *kvm) { debugfs_create_file("radix", 0400, kvm->debugfs_dentry, kvm, &debugfs_radix_fops); } int kvmppc_radix_init(void) { unsigned long size = sizeof(void *) << RADIX_PTE_INDEX_SIZE; kvm_pte_cache = kmem_cache_create("kvm-pte", size, size, 0, pte_ctor); if (!kvm_pte_cache) return -ENOMEM; size = sizeof(void *) << RADIX_PMD_INDEX_SIZE; kvm_pmd_cache = kmem_cache_create("kvm-pmd", size, size, 0, pmd_ctor); if (!kvm_pmd_cache) { kmem_cache_destroy(kvm_pte_cache); return -ENOMEM; } return 0; } void kvmppc_radix_exit(void) { kmem_cache_destroy(kvm_pte_cache); kmem_cache_destroy(kvm_pmd_cache); }
linux-master
arch/powerpc/kvm/book3s_64_mmu_radix.c
// SPDX-License-Identifier: GPL-2.0-only /* * * Copyright IBM Corp. 2007 * Copyright 2010-2011 Freescale Semiconductor, Inc. * * Authors: Hollis Blanchard <[email protected]> * Christian Ehrhardt <[email protected]> * Scott Wood <[email protected]> * Varun Sethi <[email protected]> */ #include <linux/errno.h> #include <linux/err.h> #include <linux/kvm_host.h> #include <linux/gfp.h> #include <linux/module.h> #include <linux/vmalloc.h> #include <linux/fs.h> #include <asm/cputable.h> #include <linux/uaccess.h> #include <asm/interrupt.h> #include <asm/kvm_ppc.h> #include <asm/cacheflush.h> #include <asm/dbell.h> #include <asm/hw_irq.h> #include <asm/irq.h> #include <asm/time.h> #include "timing.h" #include "booke.h" #define CREATE_TRACE_POINTS #include "trace_booke.h" unsigned long kvmppc_booke_handlers; const struct _kvm_stats_desc kvm_vm_stats_desc[] = { KVM_GENERIC_VM_STATS(), STATS_DESC_ICOUNTER(VM, num_2M_pages), STATS_DESC_ICOUNTER(VM, num_1G_pages) }; const struct kvm_stats_header kvm_vm_stats_header = { .name_size = KVM_STATS_NAME_SIZE, .num_desc = ARRAY_SIZE(kvm_vm_stats_desc), .id_offset = sizeof(struct kvm_stats_header), .desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE, .data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE + sizeof(kvm_vm_stats_desc), }; const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = { KVM_GENERIC_VCPU_STATS(), STATS_DESC_COUNTER(VCPU, sum_exits), STATS_DESC_COUNTER(VCPU, mmio_exits), STATS_DESC_COUNTER(VCPU, signal_exits), STATS_DESC_COUNTER(VCPU, light_exits), STATS_DESC_COUNTER(VCPU, itlb_real_miss_exits), STATS_DESC_COUNTER(VCPU, itlb_virt_miss_exits), STATS_DESC_COUNTER(VCPU, dtlb_real_miss_exits), STATS_DESC_COUNTER(VCPU, dtlb_virt_miss_exits), STATS_DESC_COUNTER(VCPU, syscall_exits), STATS_DESC_COUNTER(VCPU, isi_exits), STATS_DESC_COUNTER(VCPU, dsi_exits), STATS_DESC_COUNTER(VCPU, emulated_inst_exits), STATS_DESC_COUNTER(VCPU, dec_exits), STATS_DESC_COUNTER(VCPU, ext_intr_exits), STATS_DESC_COUNTER(VCPU, halt_successful_wait), STATS_DESC_COUNTER(VCPU, dbell_exits), STATS_DESC_COUNTER(VCPU, gdbell_exits), STATS_DESC_COUNTER(VCPU, ld), STATS_DESC_COUNTER(VCPU, st), STATS_DESC_COUNTER(VCPU, pthru_all), STATS_DESC_COUNTER(VCPU, pthru_host), STATS_DESC_COUNTER(VCPU, pthru_bad_aff) }; const struct kvm_stats_header kvm_vcpu_stats_header = { .name_size = KVM_STATS_NAME_SIZE, .num_desc = ARRAY_SIZE(kvm_vcpu_stats_desc), .id_offset = sizeof(struct kvm_stats_header), .desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE, .data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE + sizeof(kvm_vcpu_stats_desc), }; /* TODO: use vcpu_printf() */ void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu) { int i; printk("pc: %08lx msr: %08llx\n", vcpu->arch.regs.nip, vcpu->arch.shared->msr); printk("lr: %08lx ctr: %08lx\n", vcpu->arch.regs.link, vcpu->arch.regs.ctr); printk("srr0: %08llx srr1: %08llx\n", vcpu->arch.shared->srr0, vcpu->arch.shared->srr1); printk("exceptions: %08lx\n", vcpu->arch.pending_exceptions); for (i = 0; i < 32; i += 4) { printk("gpr%02d: %08lx %08lx %08lx %08lx\n", i, kvmppc_get_gpr(vcpu, i), kvmppc_get_gpr(vcpu, i+1), kvmppc_get_gpr(vcpu, i+2), kvmppc_get_gpr(vcpu, i+3)); } } #ifdef CONFIG_SPE void kvmppc_vcpu_disable_spe(struct kvm_vcpu *vcpu) { preempt_disable(); enable_kernel_spe(); kvmppc_save_guest_spe(vcpu); disable_kernel_spe(); vcpu->arch.shadow_msr &= ~MSR_SPE; preempt_enable(); } static void kvmppc_vcpu_enable_spe(struct kvm_vcpu *vcpu) { preempt_disable(); enable_kernel_spe(); kvmppc_load_guest_spe(vcpu); disable_kernel_spe(); vcpu->arch.shadow_msr |= MSR_SPE; preempt_enable(); } static void kvmppc_vcpu_sync_spe(struct kvm_vcpu *vcpu) { if (vcpu->arch.shared->msr & MSR_SPE) { if (!(vcpu->arch.shadow_msr & MSR_SPE)) kvmppc_vcpu_enable_spe(vcpu); } else if (vcpu->arch.shadow_msr & MSR_SPE) { kvmppc_vcpu_disable_spe(vcpu); } } #else static void kvmppc_vcpu_sync_spe(struct kvm_vcpu *vcpu) { } #endif /* * Load up guest vcpu FP state if it's needed. * It also set the MSR_FP in thread so that host know * we're holding FPU, and then host can help to save * guest vcpu FP state if other threads require to use FPU. * This simulates an FP unavailable fault. * * It requires to be called with preemption disabled. */ static inline void kvmppc_load_guest_fp(struct kvm_vcpu *vcpu) { #ifdef CONFIG_PPC_FPU if (!(current->thread.regs->msr & MSR_FP)) { enable_kernel_fp(); load_fp_state(&vcpu->arch.fp); disable_kernel_fp(); current->thread.fp_save_area = &vcpu->arch.fp; current->thread.regs->msr |= MSR_FP; } #endif } /* * Save guest vcpu FP state into thread. * It requires to be called with preemption disabled. */ static inline void kvmppc_save_guest_fp(struct kvm_vcpu *vcpu) { #ifdef CONFIG_PPC_FPU if (current->thread.regs->msr & MSR_FP) giveup_fpu(current); current->thread.fp_save_area = NULL; #endif } static void kvmppc_vcpu_sync_fpu(struct kvm_vcpu *vcpu) { #if defined(CONFIG_PPC_FPU) && !defined(CONFIG_KVM_BOOKE_HV) /* We always treat the FP bit as enabled from the host perspective, so only need to adjust the shadow MSR */ vcpu->arch.shadow_msr &= ~MSR_FP; vcpu->arch.shadow_msr |= vcpu->arch.shared->msr & MSR_FP; #endif } /* * Simulate AltiVec unavailable fault to load guest state * from thread to AltiVec unit. * It requires to be called with preemption disabled. */ static inline void kvmppc_load_guest_altivec(struct kvm_vcpu *vcpu) { #ifdef CONFIG_ALTIVEC if (cpu_has_feature(CPU_FTR_ALTIVEC)) { if (!(current->thread.regs->msr & MSR_VEC)) { enable_kernel_altivec(); load_vr_state(&vcpu->arch.vr); disable_kernel_altivec(); current->thread.vr_save_area = &vcpu->arch.vr; current->thread.regs->msr |= MSR_VEC; } } #endif } /* * Save guest vcpu AltiVec state into thread. * It requires to be called with preemption disabled. */ static inline void kvmppc_save_guest_altivec(struct kvm_vcpu *vcpu) { #ifdef CONFIG_ALTIVEC if (cpu_has_feature(CPU_FTR_ALTIVEC)) { if (current->thread.regs->msr & MSR_VEC) giveup_altivec(current); current->thread.vr_save_area = NULL; } #endif } static void kvmppc_vcpu_sync_debug(struct kvm_vcpu *vcpu) { /* Synchronize guest's desire to get debug interrupts into shadow MSR */ #ifndef CONFIG_KVM_BOOKE_HV vcpu->arch.shadow_msr &= ~MSR_DE; vcpu->arch.shadow_msr |= vcpu->arch.shared->msr & MSR_DE; #endif /* Force enable debug interrupts when user space wants to debug */ if (vcpu->guest_debug) { #ifdef CONFIG_KVM_BOOKE_HV /* * Since there is no shadow MSR, sync MSR_DE into the guest * visible MSR. */ vcpu->arch.shared->msr |= MSR_DE; #else vcpu->arch.shadow_msr |= MSR_DE; vcpu->arch.shared->msr &= ~MSR_DE; #endif } } /* * Helper function for "full" MSR writes. No need to call this if only * EE/CE/ME/DE/RI are changing. */ void kvmppc_set_msr(struct kvm_vcpu *vcpu, u32 new_msr) { u32 old_msr = vcpu->arch.shared->msr; #ifdef CONFIG_KVM_BOOKE_HV new_msr |= MSR_GS; #endif vcpu->arch.shared->msr = new_msr; kvmppc_mmu_msr_notify(vcpu, old_msr); kvmppc_vcpu_sync_spe(vcpu); kvmppc_vcpu_sync_fpu(vcpu); kvmppc_vcpu_sync_debug(vcpu); } static void kvmppc_booke_queue_irqprio(struct kvm_vcpu *vcpu, unsigned int priority) { trace_kvm_booke_queue_irqprio(vcpu, priority); set_bit(priority, &vcpu->arch.pending_exceptions); } void kvmppc_core_queue_dtlb_miss(struct kvm_vcpu *vcpu, ulong dear_flags, ulong esr_flags) { vcpu->arch.queued_dear = dear_flags; vcpu->arch.queued_esr = esr_flags; kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DTLB_MISS); } void kvmppc_core_queue_data_storage(struct kvm_vcpu *vcpu, ulong srr1_flags, ulong dear_flags, ulong esr_flags) { WARN_ON_ONCE(srr1_flags); vcpu->arch.queued_dear = dear_flags; vcpu->arch.queued_esr = esr_flags; kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DATA_STORAGE); } void kvmppc_core_queue_itlb_miss(struct kvm_vcpu *vcpu) { kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ITLB_MISS); } void kvmppc_core_queue_inst_storage(struct kvm_vcpu *vcpu, ulong esr_flags) { vcpu->arch.queued_esr = esr_flags; kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_INST_STORAGE); } static void kvmppc_core_queue_alignment(struct kvm_vcpu *vcpu, ulong dear_flags, ulong esr_flags) { vcpu->arch.queued_dear = dear_flags; vcpu->arch.queued_esr = esr_flags; kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ALIGNMENT); } void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong esr_flags) { vcpu->arch.queued_esr = esr_flags; kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_PROGRAM); } void kvmppc_core_queue_fpunavail(struct kvm_vcpu *vcpu, ulong srr1_flags) { WARN_ON_ONCE(srr1_flags); kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_FP_UNAVAIL); } #ifdef CONFIG_ALTIVEC void kvmppc_core_queue_vec_unavail(struct kvm_vcpu *vcpu, ulong srr1_flags) { WARN_ON_ONCE(srr1_flags); kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ALTIVEC_UNAVAIL); } #endif void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu) { kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DECREMENTER); } int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu) { return test_bit(BOOKE_IRQPRIO_DECREMENTER, &vcpu->arch.pending_exceptions); } void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu) { clear_bit(BOOKE_IRQPRIO_DECREMENTER, &vcpu->arch.pending_exceptions); } void kvmppc_core_queue_external(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq) { unsigned int prio = BOOKE_IRQPRIO_EXTERNAL; if (irq->irq == KVM_INTERRUPT_SET_LEVEL) prio = BOOKE_IRQPRIO_EXTERNAL_LEVEL; kvmppc_booke_queue_irqprio(vcpu, prio); } void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu) { clear_bit(BOOKE_IRQPRIO_EXTERNAL, &vcpu->arch.pending_exceptions); clear_bit(BOOKE_IRQPRIO_EXTERNAL_LEVEL, &vcpu->arch.pending_exceptions); } static void kvmppc_core_queue_watchdog(struct kvm_vcpu *vcpu) { kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_WATCHDOG); } static void kvmppc_core_dequeue_watchdog(struct kvm_vcpu *vcpu) { clear_bit(BOOKE_IRQPRIO_WATCHDOG, &vcpu->arch.pending_exceptions); } void kvmppc_core_queue_debug(struct kvm_vcpu *vcpu) { kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DEBUG); } void kvmppc_core_dequeue_debug(struct kvm_vcpu *vcpu) { clear_bit(BOOKE_IRQPRIO_DEBUG, &vcpu->arch.pending_exceptions); } static void set_guest_srr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1) { kvmppc_set_srr0(vcpu, srr0); kvmppc_set_srr1(vcpu, srr1); } static void set_guest_csrr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1) { vcpu->arch.csrr0 = srr0; vcpu->arch.csrr1 = srr1; } static void set_guest_dsrr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1) { if (cpu_has_feature(CPU_FTR_DEBUG_LVL_EXC)) { vcpu->arch.dsrr0 = srr0; vcpu->arch.dsrr1 = srr1; } else { set_guest_csrr(vcpu, srr0, srr1); } } static void set_guest_mcsrr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1) { vcpu->arch.mcsrr0 = srr0; vcpu->arch.mcsrr1 = srr1; } /* Deliver the interrupt of the corresponding priority, if possible. */ static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu, unsigned int priority) { int allowed = 0; ulong msr_mask = 0; bool update_esr = false, update_dear = false, update_epr = false; ulong crit_raw = vcpu->arch.shared->critical; ulong crit_r1 = kvmppc_get_gpr(vcpu, 1); bool crit; bool keep_irq = false; enum int_class int_class; ulong new_msr = vcpu->arch.shared->msr; /* Truncate crit indicators in 32 bit mode */ if (!(vcpu->arch.shared->msr & MSR_SF)) { crit_raw &= 0xffffffff; crit_r1 &= 0xffffffff; } /* Critical section when crit == r1 */ crit = (crit_raw == crit_r1); /* ... and we're in supervisor mode */ crit = crit && !(vcpu->arch.shared->msr & MSR_PR); if (priority == BOOKE_IRQPRIO_EXTERNAL_LEVEL) { priority = BOOKE_IRQPRIO_EXTERNAL; keep_irq = true; } if ((priority == BOOKE_IRQPRIO_EXTERNAL) && vcpu->arch.epr_flags) update_epr = true; switch (priority) { case BOOKE_IRQPRIO_DTLB_MISS: case BOOKE_IRQPRIO_DATA_STORAGE: case BOOKE_IRQPRIO_ALIGNMENT: update_dear = true; fallthrough; case BOOKE_IRQPRIO_INST_STORAGE: case BOOKE_IRQPRIO_PROGRAM: update_esr = true; fallthrough; case BOOKE_IRQPRIO_ITLB_MISS: case BOOKE_IRQPRIO_SYSCALL: case BOOKE_IRQPRIO_FP_UNAVAIL: #ifdef CONFIG_SPE_POSSIBLE case BOOKE_IRQPRIO_SPE_UNAVAIL: case BOOKE_IRQPRIO_SPE_FP_DATA: case BOOKE_IRQPRIO_SPE_FP_ROUND: #endif #ifdef CONFIG_ALTIVEC case BOOKE_IRQPRIO_ALTIVEC_UNAVAIL: case BOOKE_IRQPRIO_ALTIVEC_ASSIST: #endif case BOOKE_IRQPRIO_AP_UNAVAIL: allowed = 1; msr_mask = MSR_CE | MSR_ME | MSR_DE; int_class = INT_CLASS_NONCRIT; break; case BOOKE_IRQPRIO_WATCHDOG: case BOOKE_IRQPRIO_CRITICAL: case BOOKE_IRQPRIO_DBELL_CRIT: allowed = vcpu->arch.shared->msr & MSR_CE; allowed = allowed && !crit; msr_mask = MSR_ME; int_class = INT_CLASS_CRIT; break; case BOOKE_IRQPRIO_MACHINE_CHECK: allowed = vcpu->arch.shared->msr & MSR_ME; allowed = allowed && !crit; int_class = INT_CLASS_MC; break; case BOOKE_IRQPRIO_DECREMENTER: case BOOKE_IRQPRIO_FIT: keep_irq = true; fallthrough; case BOOKE_IRQPRIO_EXTERNAL: case BOOKE_IRQPRIO_DBELL: allowed = vcpu->arch.shared->msr & MSR_EE; allowed = allowed && !crit; msr_mask = MSR_CE | MSR_ME | MSR_DE; int_class = INT_CLASS_NONCRIT; break; case BOOKE_IRQPRIO_DEBUG: allowed = vcpu->arch.shared->msr & MSR_DE; allowed = allowed && !crit; msr_mask = MSR_ME; if (cpu_has_feature(CPU_FTR_DEBUG_LVL_EXC)) int_class = INT_CLASS_DBG; else int_class = INT_CLASS_CRIT; break; } if (allowed) { switch (int_class) { case INT_CLASS_NONCRIT: set_guest_srr(vcpu, vcpu->arch.regs.nip, vcpu->arch.shared->msr); break; case INT_CLASS_CRIT: set_guest_csrr(vcpu, vcpu->arch.regs.nip, vcpu->arch.shared->msr); break; case INT_CLASS_DBG: set_guest_dsrr(vcpu, vcpu->arch.regs.nip, vcpu->arch.shared->msr); break; case INT_CLASS_MC: set_guest_mcsrr(vcpu, vcpu->arch.regs.nip, vcpu->arch.shared->msr); break; } vcpu->arch.regs.nip = vcpu->arch.ivpr | vcpu->arch.ivor[priority]; if (update_esr) kvmppc_set_esr(vcpu, vcpu->arch.queued_esr); if (update_dear) kvmppc_set_dar(vcpu, vcpu->arch.queued_dear); if (update_epr) { if (vcpu->arch.epr_flags & KVMPPC_EPR_USER) kvm_make_request(KVM_REQ_EPR_EXIT, vcpu); else if (vcpu->arch.epr_flags & KVMPPC_EPR_KERNEL) { BUG_ON(vcpu->arch.irq_type != KVMPPC_IRQ_MPIC); kvmppc_mpic_set_epr(vcpu); } } new_msr &= msr_mask; #if defined(CONFIG_64BIT) if (vcpu->arch.epcr & SPRN_EPCR_ICM) new_msr |= MSR_CM; #endif kvmppc_set_msr(vcpu, new_msr); if (!keep_irq) clear_bit(priority, &vcpu->arch.pending_exceptions); } #ifdef CONFIG_KVM_BOOKE_HV /* * If an interrupt is pending but masked, raise a guest doorbell * so that we are notified when the guest enables the relevant * MSR bit. */ if (vcpu->arch.pending_exceptions & BOOKE_IRQMASK_EE) kvmppc_set_pending_interrupt(vcpu, INT_CLASS_NONCRIT); if (vcpu->arch.pending_exceptions & BOOKE_IRQMASK_CE) kvmppc_set_pending_interrupt(vcpu, INT_CLASS_CRIT); if (vcpu->arch.pending_exceptions & BOOKE_IRQPRIO_MACHINE_CHECK) kvmppc_set_pending_interrupt(vcpu, INT_CLASS_MC); #endif return allowed; } /* * Return the number of jiffies until the next timeout. If the timeout is * longer than the NEXT_TIMER_MAX_DELTA, then return NEXT_TIMER_MAX_DELTA * because the larger value can break the timer APIs. */ static unsigned long watchdog_next_timeout(struct kvm_vcpu *vcpu) { u64 tb, wdt_tb, wdt_ticks = 0; u64 nr_jiffies = 0; u32 period = TCR_GET_WP(vcpu->arch.tcr); wdt_tb = 1ULL << (63 - period); tb = get_tb(); /* * The watchdog timeout will hapeen when TB bit corresponding * to watchdog will toggle from 0 to 1. */ if (tb & wdt_tb) wdt_ticks = wdt_tb; wdt_ticks += wdt_tb - (tb & (wdt_tb - 1)); /* Convert timebase ticks to jiffies */ nr_jiffies = wdt_ticks; if (do_div(nr_jiffies, tb_ticks_per_jiffy)) nr_jiffies++; return min_t(unsigned long long, nr_jiffies, NEXT_TIMER_MAX_DELTA); } static void arm_next_watchdog(struct kvm_vcpu *vcpu) { unsigned long nr_jiffies; unsigned long flags; /* * If TSR_ENW and TSR_WIS are not set then no need to exit to * userspace, so clear the KVM_REQ_WATCHDOG request. */ if ((vcpu->arch.tsr & (TSR_ENW | TSR_WIS)) != (TSR_ENW | TSR_WIS)) kvm_clear_request(KVM_REQ_WATCHDOG, vcpu); spin_lock_irqsave(&vcpu->arch.wdt_lock, flags); nr_jiffies = watchdog_next_timeout(vcpu); /* * If the number of jiffies of watchdog timer >= NEXT_TIMER_MAX_DELTA * then do not run the watchdog timer as this can break timer APIs. */ if (nr_jiffies < NEXT_TIMER_MAX_DELTA) mod_timer(&vcpu->arch.wdt_timer, jiffies + nr_jiffies); else del_timer(&vcpu->arch.wdt_timer); spin_unlock_irqrestore(&vcpu->arch.wdt_lock, flags); } static void kvmppc_watchdog_func(struct timer_list *t) { struct kvm_vcpu *vcpu = from_timer(vcpu, t, arch.wdt_timer); u32 tsr, new_tsr; int final; do { new_tsr = tsr = vcpu->arch.tsr; final = 0; /* Time out event */ if (tsr & TSR_ENW) { if (tsr & TSR_WIS) final = 1; else new_tsr = tsr | TSR_WIS; } else { new_tsr = tsr | TSR_ENW; } } while (cmpxchg(&vcpu->arch.tsr, tsr, new_tsr) != tsr); if (new_tsr & TSR_WIS) { smp_wmb(); kvm_make_request(KVM_REQ_PENDING_TIMER, vcpu); kvm_vcpu_kick(vcpu); } /* * If this is final watchdog expiry and some action is required * then exit to userspace. */ if (final && (vcpu->arch.tcr & TCR_WRC_MASK) && vcpu->arch.watchdog_enabled) { smp_wmb(); kvm_make_request(KVM_REQ_WATCHDOG, vcpu); kvm_vcpu_kick(vcpu); } /* * Stop running the watchdog timer after final expiration to * prevent the host from being flooded with timers if the * guest sets a short period. * Timers will resume when TSR/TCR is updated next time. */ if (!final) arm_next_watchdog(vcpu); } static void update_timer_ints(struct kvm_vcpu *vcpu) { if ((vcpu->arch.tcr & TCR_DIE) && (vcpu->arch.tsr & TSR_DIS)) kvmppc_core_queue_dec(vcpu); else kvmppc_core_dequeue_dec(vcpu); if ((vcpu->arch.tcr & TCR_WIE) && (vcpu->arch.tsr & TSR_WIS)) kvmppc_core_queue_watchdog(vcpu); else kvmppc_core_dequeue_watchdog(vcpu); } static void kvmppc_core_check_exceptions(struct kvm_vcpu *vcpu) { unsigned long *pending = &vcpu->arch.pending_exceptions; unsigned int priority; priority = __ffs(*pending); while (priority < BOOKE_IRQPRIO_MAX) { if (kvmppc_booke_irqprio_deliver(vcpu, priority)) break; priority = find_next_bit(pending, BITS_PER_BYTE * sizeof(*pending), priority + 1); } /* Tell the guest about our interrupt status */ vcpu->arch.shared->int_pending = !!*pending; } /* Check pending exceptions and deliver one, if possible. */ int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu) { int r = 0; WARN_ON_ONCE(!irqs_disabled()); kvmppc_core_check_exceptions(vcpu); if (kvm_request_pending(vcpu)) { /* Exception delivery raised request; start over */ return 1; } if (vcpu->arch.shared->msr & MSR_WE) { local_irq_enable(); kvm_vcpu_halt(vcpu); hard_irq_disable(); kvmppc_set_exit_type(vcpu, EMULATED_MTMSRWE_EXITS); r = 1; } return r; } int kvmppc_core_check_requests(struct kvm_vcpu *vcpu) { int r = 1; /* Indicate we want to get back into the guest */ if (kvm_check_request(KVM_REQ_PENDING_TIMER, vcpu)) update_timer_ints(vcpu); #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC) if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) kvmppc_core_flush_tlb(vcpu); #endif if (kvm_check_request(KVM_REQ_WATCHDOG, vcpu)) { vcpu->run->exit_reason = KVM_EXIT_WATCHDOG; r = 0; } if (kvm_check_request(KVM_REQ_EPR_EXIT, vcpu)) { vcpu->run->epr.epr = 0; vcpu->arch.epr_needed = true; vcpu->run->exit_reason = KVM_EXIT_EPR; r = 0; } return r; } int kvmppc_vcpu_run(struct kvm_vcpu *vcpu) { int ret, s; struct debug_reg debug; if (!vcpu->arch.sane) { vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; return -EINVAL; } s = kvmppc_prepare_to_enter(vcpu); if (s <= 0) { ret = s; goto out; } /* interrupts now hard-disabled */ #ifdef CONFIG_PPC_FPU /* Save userspace FPU state in stack */ enable_kernel_fp(); /* * Since we can't trap on MSR_FP in GS-mode, we consider the guest * as always using the FPU. */ kvmppc_load_guest_fp(vcpu); #endif #ifdef CONFIG_ALTIVEC /* Save userspace AltiVec state in stack */ if (cpu_has_feature(CPU_FTR_ALTIVEC)) enable_kernel_altivec(); /* * Since we can't trap on MSR_VEC in GS-mode, we consider the guest * as always using the AltiVec. */ kvmppc_load_guest_altivec(vcpu); #endif /* Switch to guest debug context */ debug = vcpu->arch.dbg_reg; switch_booke_debug_regs(&debug); debug = current->thread.debug; current->thread.debug = vcpu->arch.dbg_reg; vcpu->arch.pgdir = vcpu->kvm->mm->pgd; kvmppc_fix_ee_before_entry(); ret = __kvmppc_vcpu_run(vcpu); /* No need for guest_exit. It's done in handle_exit. We also get here with interrupts enabled. */ /* Switch back to user space debug context */ switch_booke_debug_regs(&debug); current->thread.debug = debug; #ifdef CONFIG_PPC_FPU kvmppc_save_guest_fp(vcpu); #endif #ifdef CONFIG_ALTIVEC kvmppc_save_guest_altivec(vcpu); #endif out: vcpu->mode = OUTSIDE_GUEST_MODE; return ret; } static int emulation_exit(struct kvm_vcpu *vcpu) { enum emulation_result er; er = kvmppc_emulate_instruction(vcpu); switch (er) { case EMULATE_DONE: /* don't overwrite subtypes, just account kvm_stats */ kvmppc_account_exit_stat(vcpu, EMULATED_INST_EXITS); /* Future optimization: only reload non-volatiles if * they were actually modified by emulation. */ return RESUME_GUEST_NV; case EMULATE_AGAIN: return RESUME_GUEST; case EMULATE_FAIL: printk(KERN_CRIT "%s: emulation at %lx failed (%08lx)\n", __func__, vcpu->arch.regs.nip, vcpu->arch.last_inst); /* For debugging, encode the failing instruction and * report it to userspace. */ vcpu->run->hw.hardware_exit_reason = ~0ULL << 32; vcpu->run->hw.hardware_exit_reason |= vcpu->arch.last_inst; kvmppc_core_queue_program(vcpu, ESR_PIL); return RESUME_HOST; case EMULATE_EXIT_USER: return RESUME_HOST; default: BUG(); } } static int kvmppc_handle_debug(struct kvm_vcpu *vcpu) { struct kvm_run *run = vcpu->run; struct debug_reg *dbg_reg = &(vcpu->arch.dbg_reg); u32 dbsr = vcpu->arch.dbsr; if (vcpu->guest_debug == 0) { /* * Debug resources belong to Guest. * Imprecise debug event is not injected */ if (dbsr & DBSR_IDE) { dbsr &= ~DBSR_IDE; if (!dbsr) return RESUME_GUEST; } if (dbsr && (vcpu->arch.shared->msr & MSR_DE) && (vcpu->arch.dbg_reg.dbcr0 & DBCR0_IDM)) kvmppc_core_queue_debug(vcpu); /* Inject a program interrupt if trap debug is not allowed */ if ((dbsr & DBSR_TIE) && !(vcpu->arch.shared->msr & MSR_DE)) kvmppc_core_queue_program(vcpu, ESR_PTR); return RESUME_GUEST; } /* * Debug resource owned by userspace. * Clear guest dbsr (vcpu->arch.dbsr) */ vcpu->arch.dbsr = 0; run->debug.arch.status = 0; run->debug.arch.address = vcpu->arch.regs.nip; if (dbsr & (DBSR_IAC1 | DBSR_IAC2 | DBSR_IAC3 | DBSR_IAC4)) { run->debug.arch.status |= KVMPPC_DEBUG_BREAKPOINT; } else { if (dbsr & (DBSR_DAC1W | DBSR_DAC2W)) run->debug.arch.status |= KVMPPC_DEBUG_WATCH_WRITE; else if (dbsr & (DBSR_DAC1R | DBSR_DAC2R)) run->debug.arch.status |= KVMPPC_DEBUG_WATCH_READ; if (dbsr & (DBSR_DAC1R | DBSR_DAC1W)) run->debug.arch.address = dbg_reg->dac1; else if (dbsr & (DBSR_DAC2R | DBSR_DAC2W)) run->debug.arch.address = dbg_reg->dac2; } return RESUME_HOST; } static void kvmppc_fill_pt_regs(struct pt_regs *regs) { ulong r1, msr, lr; asm("mr %0, 1" : "=r"(r1)); asm("mflr %0" : "=r"(lr)); asm("mfmsr %0" : "=r"(msr)); memset(regs, 0, sizeof(*regs)); regs->gpr[1] = r1; regs->nip = _THIS_IP_; regs->msr = msr; regs->link = lr; } /* * For interrupts needed to be handled by host interrupt handlers, * corresponding host handler are called from here in similar way * (but not exact) as they are called from low level handler * (such as from arch/powerpc/kernel/head_fsl_booke.S). */ static void kvmppc_restart_interrupt(struct kvm_vcpu *vcpu, unsigned int exit_nr) { struct pt_regs regs; switch (exit_nr) { case BOOKE_INTERRUPT_EXTERNAL: kvmppc_fill_pt_regs(&regs); do_IRQ(&regs); break; case BOOKE_INTERRUPT_DECREMENTER: kvmppc_fill_pt_regs(&regs); timer_interrupt(&regs); break; #if defined(CONFIG_PPC_DOORBELL) case BOOKE_INTERRUPT_DOORBELL: kvmppc_fill_pt_regs(&regs); doorbell_exception(&regs); break; #endif case BOOKE_INTERRUPT_MACHINE_CHECK: /* FIXME */ break; case BOOKE_INTERRUPT_PERFORMANCE_MONITOR: kvmppc_fill_pt_regs(&regs); performance_monitor_exception(&regs); break; case BOOKE_INTERRUPT_WATCHDOG: kvmppc_fill_pt_regs(&regs); #ifdef CONFIG_BOOKE_WDT WatchdogException(&regs); #else unknown_exception(&regs); #endif break; case BOOKE_INTERRUPT_CRITICAL: kvmppc_fill_pt_regs(&regs); unknown_exception(&regs); break; case BOOKE_INTERRUPT_DEBUG: /* Save DBSR before preemption is enabled */ vcpu->arch.dbsr = mfspr(SPRN_DBSR); kvmppc_clear_dbsr(); break; } } static int kvmppc_resume_inst_load(struct kvm_vcpu *vcpu, enum emulation_result emulated, u32 last_inst) { switch (emulated) { case EMULATE_AGAIN: return RESUME_GUEST; case EMULATE_FAIL: pr_debug("%s: load instruction from guest address %lx failed\n", __func__, vcpu->arch.regs.nip); /* For debugging, encode the failing instruction and * report it to userspace. */ vcpu->run->hw.hardware_exit_reason = ~0ULL << 32; vcpu->run->hw.hardware_exit_reason |= last_inst; kvmppc_core_queue_program(vcpu, ESR_PIL); return RESUME_HOST; default: BUG(); } } /* * kvmppc_handle_exit * * Return value is in the form (errcode<<2 | RESUME_FLAG_HOST | RESUME_FLAG_NV) */ int kvmppc_handle_exit(struct kvm_vcpu *vcpu, unsigned int exit_nr) { struct kvm_run *run = vcpu->run; int r = RESUME_HOST; int s; int idx; u32 last_inst = KVM_INST_FETCH_FAILED; ppc_inst_t pinst; enum emulation_result emulated = EMULATE_DONE; /* Fix irq state (pairs with kvmppc_fix_ee_before_entry()) */ kvmppc_fix_ee_after_exit(); /* update before a new last_exit_type is rewritten */ kvmppc_update_timing_stats(vcpu); /* restart interrupts if they were meant for the host */ kvmppc_restart_interrupt(vcpu, exit_nr); /* * get last instruction before being preempted * TODO: for e6500 check also BOOKE_INTERRUPT_LRAT_ERROR & ESR_DATA */ switch (exit_nr) { case BOOKE_INTERRUPT_DATA_STORAGE: case BOOKE_INTERRUPT_DTLB_MISS: case BOOKE_INTERRUPT_HV_PRIV: emulated = kvmppc_get_last_inst(vcpu, INST_GENERIC, &pinst); last_inst = ppc_inst_val(pinst); break; case BOOKE_INTERRUPT_PROGRAM: /* SW breakpoints arrive as illegal instructions on HV */ if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP) { emulated = kvmppc_get_last_inst(vcpu, INST_GENERIC, &pinst); last_inst = ppc_inst_val(pinst); } break; default: break; } trace_kvm_exit(exit_nr, vcpu); context_tracking_guest_exit(); if (!vtime_accounting_enabled_this_cpu()) { local_irq_enable(); /* * Service IRQs here before vtime_account_guest_exit() so any * ticks that occurred while running the guest are accounted to * the guest. If vtime accounting is enabled, accounting uses * TB rather than ticks, so it can be done without enabling * interrupts here, which has the problem that it accounts * interrupt processing overhead to the host. */ local_irq_disable(); } vtime_account_guest_exit(); local_irq_enable(); run->exit_reason = KVM_EXIT_UNKNOWN; run->ready_for_interrupt_injection = 1; if (emulated != EMULATE_DONE) { r = kvmppc_resume_inst_load(vcpu, emulated, last_inst); goto out; } switch (exit_nr) { case BOOKE_INTERRUPT_MACHINE_CHECK: printk("MACHINE CHECK: %lx\n", mfspr(SPRN_MCSR)); kvmppc_dump_vcpu(vcpu); /* For debugging, send invalid exit reason to user space */ run->hw.hardware_exit_reason = ~1ULL << 32; run->hw.hardware_exit_reason |= mfspr(SPRN_MCSR); r = RESUME_HOST; break; case BOOKE_INTERRUPT_EXTERNAL: kvmppc_account_exit(vcpu, EXT_INTR_EXITS); r = RESUME_GUEST; break; case BOOKE_INTERRUPT_DECREMENTER: kvmppc_account_exit(vcpu, DEC_EXITS); r = RESUME_GUEST; break; case BOOKE_INTERRUPT_WATCHDOG: r = RESUME_GUEST; break; case BOOKE_INTERRUPT_DOORBELL: kvmppc_account_exit(vcpu, DBELL_EXITS); r = RESUME_GUEST; break; case BOOKE_INTERRUPT_GUEST_DBELL_CRIT: kvmppc_account_exit(vcpu, GDBELL_EXITS); /* * We are here because there is a pending guest interrupt * which could not be delivered as MSR_CE or MSR_ME was not * set. Once we break from here we will retry delivery. */ r = RESUME_GUEST; break; case BOOKE_INTERRUPT_GUEST_DBELL: kvmppc_account_exit(vcpu, GDBELL_EXITS); /* * We are here because there is a pending guest interrupt * which could not be delivered as MSR_EE was not set. Once * we break from here we will retry delivery. */ r = RESUME_GUEST; break; case BOOKE_INTERRUPT_PERFORMANCE_MONITOR: r = RESUME_GUEST; break; case BOOKE_INTERRUPT_HV_PRIV: r = emulation_exit(vcpu); break; case BOOKE_INTERRUPT_PROGRAM: if ((vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP) && (last_inst == KVMPPC_INST_SW_BREAKPOINT)) { /* * We are here because of an SW breakpoint instr, * so lets return to host to handle. */ r = kvmppc_handle_debug(vcpu); run->exit_reason = KVM_EXIT_DEBUG; kvmppc_account_exit(vcpu, DEBUG_EXITS); break; } if (vcpu->arch.shared->msr & (MSR_PR | MSR_GS)) { /* * Program traps generated by user-level software must * be handled by the guest kernel. * * In GS mode, hypervisor privileged instructions trap * on BOOKE_INTERRUPT_HV_PRIV, not here, so these are * actual program interrupts, handled by the guest. */ kvmppc_core_queue_program(vcpu, vcpu->arch.fault_esr); r = RESUME_GUEST; kvmppc_account_exit(vcpu, USR_PR_INST); break; } r = emulation_exit(vcpu); break; case BOOKE_INTERRUPT_FP_UNAVAIL: kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_FP_UNAVAIL); kvmppc_account_exit(vcpu, FP_UNAVAIL); r = RESUME_GUEST; break; #ifdef CONFIG_SPE case BOOKE_INTERRUPT_SPE_UNAVAIL: { if (vcpu->arch.shared->msr & MSR_SPE) kvmppc_vcpu_enable_spe(vcpu); else kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SPE_UNAVAIL); r = RESUME_GUEST; break; } case BOOKE_INTERRUPT_SPE_FP_DATA: kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SPE_FP_DATA); r = RESUME_GUEST; break; case BOOKE_INTERRUPT_SPE_FP_ROUND: kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SPE_FP_ROUND); r = RESUME_GUEST; break; #elif defined(CONFIG_SPE_POSSIBLE) case BOOKE_INTERRUPT_SPE_UNAVAIL: /* * Guest wants SPE, but host kernel doesn't support it. Send * an "unimplemented operation" program check to the guest. */ kvmppc_core_queue_program(vcpu, ESR_PUO | ESR_SPV); r = RESUME_GUEST; break; /* * These really should never happen without CONFIG_SPE, * as we should never enable the real MSR[SPE] in the guest. */ case BOOKE_INTERRUPT_SPE_FP_DATA: case BOOKE_INTERRUPT_SPE_FP_ROUND: printk(KERN_CRIT "%s: unexpected SPE interrupt %u at %08lx\n", __func__, exit_nr, vcpu->arch.regs.nip); run->hw.hardware_exit_reason = exit_nr; r = RESUME_HOST; break; #endif /* CONFIG_SPE_POSSIBLE */ /* * On cores with Vector category, KVM is loaded only if CONFIG_ALTIVEC, * see kvmppc_e500mc_check_processor_compat(). */ #ifdef CONFIG_ALTIVEC case BOOKE_INTERRUPT_ALTIVEC_UNAVAIL: kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ALTIVEC_UNAVAIL); r = RESUME_GUEST; break; case BOOKE_INTERRUPT_ALTIVEC_ASSIST: kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ALTIVEC_ASSIST); r = RESUME_GUEST; break; #endif case BOOKE_INTERRUPT_DATA_STORAGE: kvmppc_core_queue_data_storage(vcpu, 0, vcpu->arch.fault_dear, vcpu->arch.fault_esr); kvmppc_account_exit(vcpu, DSI_EXITS); r = RESUME_GUEST; break; case BOOKE_INTERRUPT_INST_STORAGE: kvmppc_core_queue_inst_storage(vcpu, vcpu->arch.fault_esr); kvmppc_account_exit(vcpu, ISI_EXITS); r = RESUME_GUEST; break; case BOOKE_INTERRUPT_ALIGNMENT: kvmppc_core_queue_alignment(vcpu, vcpu->arch.fault_dear, vcpu->arch.fault_esr); r = RESUME_GUEST; break; #ifdef CONFIG_KVM_BOOKE_HV case BOOKE_INTERRUPT_HV_SYSCALL: if (!(vcpu->arch.shared->msr & MSR_PR)) { kvmppc_set_gpr(vcpu, 3, kvmppc_kvm_pv(vcpu)); } else { /* * hcall from guest userspace -- send privileged * instruction program check. */ kvmppc_core_queue_program(vcpu, ESR_PPR); } r = RESUME_GUEST; break; #else case BOOKE_INTERRUPT_SYSCALL: if (!(vcpu->arch.shared->msr & MSR_PR) && (((u32)kvmppc_get_gpr(vcpu, 0)) == KVM_SC_MAGIC_R0)) { /* KVM PV hypercalls */ kvmppc_set_gpr(vcpu, 3, kvmppc_kvm_pv(vcpu)); r = RESUME_GUEST; } else { /* Guest syscalls */ kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SYSCALL); } kvmppc_account_exit(vcpu, SYSCALL_EXITS); r = RESUME_GUEST; break; #endif case BOOKE_INTERRUPT_DTLB_MISS: { unsigned long eaddr = vcpu->arch.fault_dear; int gtlb_index; gpa_t gpaddr; gfn_t gfn; #ifdef CONFIG_KVM_E500V2 if (!(vcpu->arch.shared->msr & MSR_PR) && (eaddr & PAGE_MASK) == vcpu->arch.magic_page_ea) { kvmppc_map_magic(vcpu); kvmppc_account_exit(vcpu, DTLB_VIRT_MISS_EXITS); r = RESUME_GUEST; break; } #endif /* Check the guest TLB. */ gtlb_index = kvmppc_mmu_dtlb_index(vcpu, eaddr); if (gtlb_index < 0) { /* The guest didn't have a mapping for it. */ kvmppc_core_queue_dtlb_miss(vcpu, vcpu->arch.fault_dear, vcpu->arch.fault_esr); kvmppc_mmu_dtlb_miss(vcpu); kvmppc_account_exit(vcpu, DTLB_REAL_MISS_EXITS); r = RESUME_GUEST; break; } idx = srcu_read_lock(&vcpu->kvm->srcu); gpaddr = kvmppc_mmu_xlate(vcpu, gtlb_index, eaddr); gfn = gpaddr >> PAGE_SHIFT; if (kvm_is_visible_gfn(vcpu->kvm, gfn)) { /* The guest TLB had a mapping, but the shadow TLB * didn't, and it is RAM. This could be because: * a) the entry is mapping the host kernel, or * b) the guest used a large mapping which we're faking * Either way, we need to satisfy the fault without * invoking the guest. */ kvmppc_mmu_map(vcpu, eaddr, gpaddr, gtlb_index); kvmppc_account_exit(vcpu, DTLB_VIRT_MISS_EXITS); r = RESUME_GUEST; } else { /* Guest has mapped and accessed a page which is not * actually RAM. */ vcpu->arch.paddr_accessed = gpaddr; vcpu->arch.vaddr_accessed = eaddr; r = kvmppc_emulate_mmio(vcpu); kvmppc_account_exit(vcpu, MMIO_EXITS); } srcu_read_unlock(&vcpu->kvm->srcu, idx); break; } case BOOKE_INTERRUPT_ITLB_MISS: { unsigned long eaddr = vcpu->arch.regs.nip; gpa_t gpaddr; gfn_t gfn; int gtlb_index; r = RESUME_GUEST; /* Check the guest TLB. */ gtlb_index = kvmppc_mmu_itlb_index(vcpu, eaddr); if (gtlb_index < 0) { /* The guest didn't have a mapping for it. */ kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ITLB_MISS); kvmppc_mmu_itlb_miss(vcpu); kvmppc_account_exit(vcpu, ITLB_REAL_MISS_EXITS); break; } kvmppc_account_exit(vcpu, ITLB_VIRT_MISS_EXITS); idx = srcu_read_lock(&vcpu->kvm->srcu); gpaddr = kvmppc_mmu_xlate(vcpu, gtlb_index, eaddr); gfn = gpaddr >> PAGE_SHIFT; if (kvm_is_visible_gfn(vcpu->kvm, gfn)) { /* The guest TLB had a mapping, but the shadow TLB * didn't. This could be because: * a) the entry is mapping the host kernel, or * b) the guest used a large mapping which we're faking * Either way, we need to satisfy the fault without * invoking the guest. */ kvmppc_mmu_map(vcpu, eaddr, gpaddr, gtlb_index); } else { /* Guest mapped and leaped at non-RAM! */ kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_MACHINE_CHECK); } srcu_read_unlock(&vcpu->kvm->srcu, idx); break; } case BOOKE_INTERRUPT_DEBUG: { r = kvmppc_handle_debug(vcpu); if (r == RESUME_HOST) run->exit_reason = KVM_EXIT_DEBUG; kvmppc_account_exit(vcpu, DEBUG_EXITS); break; } default: printk(KERN_EMERG "exit_nr %d\n", exit_nr); BUG(); } out: /* * To avoid clobbering exit_reason, only check for signals if we * aren't already exiting to userspace for some other reason. */ if (!(r & RESUME_HOST)) { s = kvmppc_prepare_to_enter(vcpu); if (s <= 0) r = (s << 2) | RESUME_HOST | (r & RESUME_FLAG_NV); else { /* interrupts now hard-disabled */ kvmppc_fix_ee_before_entry(); kvmppc_load_guest_fp(vcpu); kvmppc_load_guest_altivec(vcpu); } } return r; } static void kvmppc_set_tsr(struct kvm_vcpu *vcpu, u32 new_tsr) { u32 old_tsr = vcpu->arch.tsr; vcpu->arch.tsr = new_tsr; if ((old_tsr ^ vcpu->arch.tsr) & (TSR_ENW | TSR_WIS)) arm_next_watchdog(vcpu); update_timer_ints(vcpu); } int kvmppc_subarch_vcpu_init(struct kvm_vcpu *vcpu) { /* setup watchdog timer once */ spin_lock_init(&vcpu->arch.wdt_lock); timer_setup(&vcpu->arch.wdt_timer, kvmppc_watchdog_func, 0); /* * Clear DBSR.MRR to avoid guest debug interrupt as * this is of host interest */ mtspr(SPRN_DBSR, DBSR_MRR); return 0; } void kvmppc_subarch_vcpu_uninit(struct kvm_vcpu *vcpu) { del_timer_sync(&vcpu->arch.wdt_timer); } int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) { int i; vcpu_load(vcpu); regs->pc = vcpu->arch.regs.nip; regs->cr = kvmppc_get_cr(vcpu); regs->ctr = vcpu->arch.regs.ctr; regs->lr = vcpu->arch.regs.link; regs->xer = kvmppc_get_xer(vcpu); regs->msr = vcpu->arch.shared->msr; regs->srr0 = kvmppc_get_srr0(vcpu); regs->srr1 = kvmppc_get_srr1(vcpu); regs->pid = vcpu->arch.pid; regs->sprg0 = kvmppc_get_sprg0(vcpu); regs->sprg1 = kvmppc_get_sprg1(vcpu); regs->sprg2 = kvmppc_get_sprg2(vcpu); regs->sprg3 = kvmppc_get_sprg3(vcpu); regs->sprg4 = kvmppc_get_sprg4(vcpu); regs->sprg5 = kvmppc_get_sprg5(vcpu); regs->sprg6 = kvmppc_get_sprg6(vcpu); regs->sprg7 = kvmppc_get_sprg7(vcpu); for (i = 0; i < ARRAY_SIZE(regs->gpr); i++) regs->gpr[i] = kvmppc_get_gpr(vcpu, i); vcpu_put(vcpu); return 0; } int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) { int i; vcpu_load(vcpu); vcpu->arch.regs.nip = regs->pc; kvmppc_set_cr(vcpu, regs->cr); vcpu->arch.regs.ctr = regs->ctr; vcpu->arch.regs.link = regs->lr; kvmppc_set_xer(vcpu, regs->xer); kvmppc_set_msr(vcpu, regs->msr); kvmppc_set_srr0(vcpu, regs->srr0); kvmppc_set_srr1(vcpu, regs->srr1); kvmppc_set_pid(vcpu, regs->pid); kvmppc_set_sprg0(vcpu, regs->sprg0); kvmppc_set_sprg1(vcpu, regs->sprg1); kvmppc_set_sprg2(vcpu, regs->sprg2); kvmppc_set_sprg3(vcpu, regs->sprg3); kvmppc_set_sprg4(vcpu, regs->sprg4); kvmppc_set_sprg5(vcpu, regs->sprg5); kvmppc_set_sprg6(vcpu, regs->sprg6); kvmppc_set_sprg7(vcpu, regs->sprg7); for (i = 0; i < ARRAY_SIZE(regs->gpr); i++) kvmppc_set_gpr(vcpu, i, regs->gpr[i]); vcpu_put(vcpu); return 0; } static void get_sregs_base(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) { u64 tb = get_tb(); sregs->u.e.features |= KVM_SREGS_E_BASE; sregs->u.e.csrr0 = vcpu->arch.csrr0; sregs->u.e.csrr1 = vcpu->arch.csrr1; sregs->u.e.mcsr = vcpu->arch.mcsr; sregs->u.e.esr = kvmppc_get_esr(vcpu); sregs->u.e.dear = kvmppc_get_dar(vcpu); sregs->u.e.tsr = vcpu->arch.tsr; sregs->u.e.tcr = vcpu->arch.tcr; sregs->u.e.dec = kvmppc_get_dec(vcpu, tb); sregs->u.e.tb = tb; sregs->u.e.vrsave = vcpu->arch.vrsave; } static int set_sregs_base(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) { if (!(sregs->u.e.features & KVM_SREGS_E_BASE)) return 0; vcpu->arch.csrr0 = sregs->u.e.csrr0; vcpu->arch.csrr1 = sregs->u.e.csrr1; vcpu->arch.mcsr = sregs->u.e.mcsr; kvmppc_set_esr(vcpu, sregs->u.e.esr); kvmppc_set_dar(vcpu, sregs->u.e.dear); vcpu->arch.vrsave = sregs->u.e.vrsave; kvmppc_set_tcr(vcpu, sregs->u.e.tcr); if (sregs->u.e.update_special & KVM_SREGS_E_UPDATE_DEC) { vcpu->arch.dec = sregs->u.e.dec; kvmppc_emulate_dec(vcpu); } if (sregs->u.e.update_special & KVM_SREGS_E_UPDATE_TSR) kvmppc_set_tsr(vcpu, sregs->u.e.tsr); return 0; } static void get_sregs_arch206(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) { sregs->u.e.features |= KVM_SREGS_E_ARCH206; sregs->u.e.pir = vcpu->vcpu_id; sregs->u.e.mcsrr0 = vcpu->arch.mcsrr0; sregs->u.e.mcsrr1 = vcpu->arch.mcsrr1; sregs->u.e.decar = vcpu->arch.decar; sregs->u.e.ivpr = vcpu->arch.ivpr; } static int set_sregs_arch206(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) { if (!(sregs->u.e.features & KVM_SREGS_E_ARCH206)) return 0; if (sregs->u.e.pir != vcpu->vcpu_id) return -EINVAL; vcpu->arch.mcsrr0 = sregs->u.e.mcsrr0; vcpu->arch.mcsrr1 = sregs->u.e.mcsrr1; vcpu->arch.decar = sregs->u.e.decar; vcpu->arch.ivpr = sregs->u.e.ivpr; return 0; } int kvmppc_get_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) { sregs->u.e.features |= KVM_SREGS_E_IVOR; sregs->u.e.ivor_low[0] = vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL]; sregs->u.e.ivor_low[1] = vcpu->arch.ivor[BOOKE_IRQPRIO_MACHINE_CHECK]; sregs->u.e.ivor_low[2] = vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE]; sregs->u.e.ivor_low[3] = vcpu->arch.ivor[BOOKE_IRQPRIO_INST_STORAGE]; sregs->u.e.ivor_low[4] = vcpu->arch.ivor[BOOKE_IRQPRIO_EXTERNAL]; sregs->u.e.ivor_low[5] = vcpu->arch.ivor[BOOKE_IRQPRIO_ALIGNMENT]; sregs->u.e.ivor_low[6] = vcpu->arch.ivor[BOOKE_IRQPRIO_PROGRAM]; sregs->u.e.ivor_low[7] = vcpu->arch.ivor[BOOKE_IRQPRIO_FP_UNAVAIL]; sregs->u.e.ivor_low[8] = vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL]; sregs->u.e.ivor_low[9] = vcpu->arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL]; sregs->u.e.ivor_low[10] = vcpu->arch.ivor[BOOKE_IRQPRIO_DECREMENTER]; sregs->u.e.ivor_low[11] = vcpu->arch.ivor[BOOKE_IRQPRIO_FIT]; sregs->u.e.ivor_low[12] = vcpu->arch.ivor[BOOKE_IRQPRIO_WATCHDOG]; sregs->u.e.ivor_low[13] = vcpu->arch.ivor[BOOKE_IRQPRIO_DTLB_MISS]; sregs->u.e.ivor_low[14] = vcpu->arch.ivor[BOOKE_IRQPRIO_ITLB_MISS]; sregs->u.e.ivor_low[15] = vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG]; return 0; } int kvmppc_set_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) { if (!(sregs->u.e.features & KVM_SREGS_E_IVOR)) return 0; vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL] = sregs->u.e.ivor_low[0]; vcpu->arch.ivor[BOOKE_IRQPRIO_MACHINE_CHECK] = sregs->u.e.ivor_low[1]; vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE] = sregs->u.e.ivor_low[2]; vcpu->arch.ivor[BOOKE_IRQPRIO_INST_STORAGE] = sregs->u.e.ivor_low[3]; vcpu->arch.ivor[BOOKE_IRQPRIO_EXTERNAL] = sregs->u.e.ivor_low[4]; vcpu->arch.ivor[BOOKE_IRQPRIO_ALIGNMENT] = sregs->u.e.ivor_low[5]; vcpu->arch.ivor[BOOKE_IRQPRIO_PROGRAM] = sregs->u.e.ivor_low[6]; vcpu->arch.ivor[BOOKE_IRQPRIO_FP_UNAVAIL] = sregs->u.e.ivor_low[7]; vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL] = sregs->u.e.ivor_low[8]; vcpu->arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL] = sregs->u.e.ivor_low[9]; vcpu->arch.ivor[BOOKE_IRQPRIO_DECREMENTER] = sregs->u.e.ivor_low[10]; vcpu->arch.ivor[BOOKE_IRQPRIO_FIT] = sregs->u.e.ivor_low[11]; vcpu->arch.ivor[BOOKE_IRQPRIO_WATCHDOG] = sregs->u.e.ivor_low[12]; vcpu->arch.ivor[BOOKE_IRQPRIO_DTLB_MISS] = sregs->u.e.ivor_low[13]; vcpu->arch.ivor[BOOKE_IRQPRIO_ITLB_MISS] = sregs->u.e.ivor_low[14]; vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG] = sregs->u.e.ivor_low[15]; return 0; } int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) { int ret; vcpu_load(vcpu); sregs->pvr = vcpu->arch.pvr; get_sregs_base(vcpu, sregs); get_sregs_arch206(vcpu, sregs); ret = vcpu->kvm->arch.kvm_ops->get_sregs(vcpu, sregs); vcpu_put(vcpu); return ret; } int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) { int ret = -EINVAL; vcpu_load(vcpu); if (vcpu->arch.pvr != sregs->pvr) goto out; ret = set_sregs_base(vcpu, sregs); if (ret < 0) goto out; ret = set_sregs_arch206(vcpu, sregs); if (ret < 0) goto out; ret = vcpu->kvm->arch.kvm_ops->set_sregs(vcpu, sregs); out: vcpu_put(vcpu); return ret; } int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *val) { int r = 0; switch (id) { case KVM_REG_PPC_IAC1: *val = get_reg_val(id, vcpu->arch.dbg_reg.iac1); break; case KVM_REG_PPC_IAC2: *val = get_reg_val(id, vcpu->arch.dbg_reg.iac2); break; #if CONFIG_PPC_ADV_DEBUG_IACS > 2 case KVM_REG_PPC_IAC3: *val = get_reg_val(id, vcpu->arch.dbg_reg.iac3); break; case KVM_REG_PPC_IAC4: *val = get_reg_val(id, vcpu->arch.dbg_reg.iac4); break; #endif case KVM_REG_PPC_DAC1: *val = get_reg_val(id, vcpu->arch.dbg_reg.dac1); break; case KVM_REG_PPC_DAC2: *val = get_reg_val(id, vcpu->arch.dbg_reg.dac2); break; case KVM_REG_PPC_EPR: { u32 epr = kvmppc_get_epr(vcpu); *val = get_reg_val(id, epr); break; } #if defined(CONFIG_64BIT) case KVM_REG_PPC_EPCR: *val = get_reg_val(id, vcpu->arch.epcr); break; #endif case KVM_REG_PPC_TCR: *val = get_reg_val(id, vcpu->arch.tcr); break; case KVM_REG_PPC_TSR: *val = get_reg_val(id, vcpu->arch.tsr); break; case KVM_REG_PPC_DEBUG_INST: *val = get_reg_val(id, KVMPPC_INST_SW_BREAKPOINT); break; case KVM_REG_PPC_VRSAVE: *val = get_reg_val(id, vcpu->arch.vrsave); break; default: r = vcpu->kvm->arch.kvm_ops->get_one_reg(vcpu, id, val); break; } return r; } int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *val) { int r = 0; switch (id) { case KVM_REG_PPC_IAC1: vcpu->arch.dbg_reg.iac1 = set_reg_val(id, *val); break; case KVM_REG_PPC_IAC2: vcpu->arch.dbg_reg.iac2 = set_reg_val(id, *val); break; #if CONFIG_PPC_ADV_DEBUG_IACS > 2 case KVM_REG_PPC_IAC3: vcpu->arch.dbg_reg.iac3 = set_reg_val(id, *val); break; case KVM_REG_PPC_IAC4: vcpu->arch.dbg_reg.iac4 = set_reg_val(id, *val); break; #endif case KVM_REG_PPC_DAC1: vcpu->arch.dbg_reg.dac1 = set_reg_val(id, *val); break; case KVM_REG_PPC_DAC2: vcpu->arch.dbg_reg.dac2 = set_reg_val(id, *val); break; case KVM_REG_PPC_EPR: { u32 new_epr = set_reg_val(id, *val); kvmppc_set_epr(vcpu, new_epr); break; } #if defined(CONFIG_64BIT) case KVM_REG_PPC_EPCR: { u32 new_epcr = set_reg_val(id, *val); kvmppc_set_epcr(vcpu, new_epcr); break; } #endif case KVM_REG_PPC_OR_TSR: { u32 tsr_bits = set_reg_val(id, *val); kvmppc_set_tsr_bits(vcpu, tsr_bits); break; } case KVM_REG_PPC_CLEAR_TSR: { u32 tsr_bits = set_reg_val(id, *val); kvmppc_clr_tsr_bits(vcpu, tsr_bits); break; } case KVM_REG_PPC_TSR: { u32 tsr = set_reg_val(id, *val); kvmppc_set_tsr(vcpu, tsr); break; } case KVM_REG_PPC_TCR: { u32 tcr = set_reg_val(id, *val); kvmppc_set_tcr(vcpu, tcr); break; } case KVM_REG_PPC_VRSAVE: vcpu->arch.vrsave = set_reg_val(id, *val); break; default: r = vcpu->kvm->arch.kvm_ops->set_one_reg(vcpu, id, val); break; } return r; } int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) { return -EOPNOTSUPP; } int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) { return -EOPNOTSUPP; } int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, struct kvm_translation *tr) { int r; vcpu_load(vcpu); r = kvmppc_core_vcpu_translate(vcpu, tr); vcpu_put(vcpu); return r; } void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot) { } int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log) { return -EOPNOTSUPP; } void kvmppc_core_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot) { } int kvmppc_core_prepare_memory_region(struct kvm *kvm, const struct kvm_memory_slot *old, struct kvm_memory_slot *new, enum kvm_mr_change change) { return 0; } void kvmppc_core_commit_memory_region(struct kvm *kvm, struct kvm_memory_slot *old, const struct kvm_memory_slot *new, enum kvm_mr_change change) { } void kvmppc_core_flush_memslot(struct kvm *kvm, struct kvm_memory_slot *memslot) { } void kvmppc_set_epcr(struct kvm_vcpu *vcpu, u32 new_epcr) { #if defined(CONFIG_64BIT) vcpu->arch.epcr = new_epcr; #ifdef CONFIG_KVM_BOOKE_HV vcpu->arch.shadow_epcr &= ~SPRN_EPCR_GICM; if (vcpu->arch.epcr & SPRN_EPCR_ICM) vcpu->arch.shadow_epcr |= SPRN_EPCR_GICM; #endif #endif } void kvmppc_set_tcr(struct kvm_vcpu *vcpu, u32 new_tcr) { vcpu->arch.tcr = new_tcr; arm_next_watchdog(vcpu); update_timer_ints(vcpu); } void kvmppc_set_tsr_bits(struct kvm_vcpu *vcpu, u32 tsr_bits) { set_bits(tsr_bits, &vcpu->arch.tsr); smp_wmb(); kvm_make_request(KVM_REQ_PENDING_TIMER, vcpu); kvm_vcpu_kick(vcpu); } void kvmppc_clr_tsr_bits(struct kvm_vcpu *vcpu, u32 tsr_bits) { clear_bits(tsr_bits, &vcpu->arch.tsr); /* * We may have stopped the watchdog due to * being stuck on final expiration. */ if (tsr_bits & (TSR_ENW | TSR_WIS)) arm_next_watchdog(vcpu); update_timer_ints(vcpu); } void kvmppc_decrementer_func(struct kvm_vcpu *vcpu) { if (vcpu->arch.tcr & TCR_ARE) { vcpu->arch.dec = vcpu->arch.decar; kvmppc_emulate_dec(vcpu); } kvmppc_set_tsr_bits(vcpu, TSR_DIS); } static int kvmppc_booke_add_breakpoint(struct debug_reg *dbg_reg, uint64_t addr, int index) { switch (index) { case 0: dbg_reg->dbcr0 |= DBCR0_IAC1; dbg_reg->iac1 = addr; break; case 1: dbg_reg->dbcr0 |= DBCR0_IAC2; dbg_reg->iac2 = addr; break; #if CONFIG_PPC_ADV_DEBUG_IACS > 2 case 2: dbg_reg->dbcr0 |= DBCR0_IAC3; dbg_reg->iac3 = addr; break; case 3: dbg_reg->dbcr0 |= DBCR0_IAC4; dbg_reg->iac4 = addr; break; #endif default: return -EINVAL; } dbg_reg->dbcr0 |= DBCR0_IDM; return 0; } static int kvmppc_booke_add_watchpoint(struct debug_reg *dbg_reg, uint64_t addr, int type, int index) { switch (index) { case 0: if (type & KVMPPC_DEBUG_WATCH_READ) dbg_reg->dbcr0 |= DBCR0_DAC1R; if (type & KVMPPC_DEBUG_WATCH_WRITE) dbg_reg->dbcr0 |= DBCR0_DAC1W; dbg_reg->dac1 = addr; break; case 1: if (type & KVMPPC_DEBUG_WATCH_READ) dbg_reg->dbcr0 |= DBCR0_DAC2R; if (type & KVMPPC_DEBUG_WATCH_WRITE) dbg_reg->dbcr0 |= DBCR0_DAC2W; dbg_reg->dac2 = addr; break; default: return -EINVAL; } dbg_reg->dbcr0 |= DBCR0_IDM; return 0; } static void kvm_guest_protect_msr(struct kvm_vcpu *vcpu, ulong prot_bitmap, bool set) { /* XXX: Add similar MSR protection for BookE-PR */ #ifdef CONFIG_KVM_BOOKE_HV BUG_ON(prot_bitmap & ~(MSRP_UCLEP | MSRP_DEP | MSRP_PMMP)); if (set) { if (prot_bitmap & MSR_UCLE) vcpu->arch.shadow_msrp |= MSRP_UCLEP; if (prot_bitmap & MSR_DE) vcpu->arch.shadow_msrp |= MSRP_DEP; if (prot_bitmap & MSR_PMM) vcpu->arch.shadow_msrp |= MSRP_PMMP; } else { if (prot_bitmap & MSR_UCLE) vcpu->arch.shadow_msrp &= ~MSRP_UCLEP; if (prot_bitmap & MSR_DE) vcpu->arch.shadow_msrp &= ~MSRP_DEP; if (prot_bitmap & MSR_PMM) vcpu->arch.shadow_msrp &= ~MSRP_PMMP; } #endif } int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr, enum xlate_instdata xlid, enum xlate_readwrite xlrw, struct kvmppc_pte *pte) { int gtlb_index; gpa_t gpaddr; #ifdef CONFIG_KVM_E500V2 if (!(vcpu->arch.shared->msr & MSR_PR) && (eaddr & PAGE_MASK) == vcpu->arch.magic_page_ea) { pte->eaddr = eaddr; pte->raddr = (vcpu->arch.magic_page_pa & PAGE_MASK) | (eaddr & ~PAGE_MASK); pte->vpage = eaddr >> PAGE_SHIFT; pte->may_read = true; pte->may_write = true; pte->may_execute = true; return 0; } #endif /* Check the guest TLB. */ switch (xlid) { case XLATE_INST: gtlb_index = kvmppc_mmu_itlb_index(vcpu, eaddr); break; case XLATE_DATA: gtlb_index = kvmppc_mmu_dtlb_index(vcpu, eaddr); break; default: BUG(); } /* Do we have a TLB entry at all? */ if (gtlb_index < 0) return -ENOENT; gpaddr = kvmppc_mmu_xlate(vcpu, gtlb_index, eaddr); pte->eaddr = eaddr; pte->raddr = (gpaddr & PAGE_MASK) | (eaddr & ~PAGE_MASK); pte->vpage = eaddr >> PAGE_SHIFT; /* XXX read permissions from the guest TLB */ pte->may_read = true; pte->may_write = true; pte->may_execute = true; return 0; } int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, struct kvm_guest_debug *dbg) { struct debug_reg *dbg_reg; int n, b = 0, w = 0; int ret = 0; vcpu_load(vcpu); if (!(dbg->control & KVM_GUESTDBG_ENABLE)) { vcpu->arch.dbg_reg.dbcr0 = 0; vcpu->guest_debug = 0; kvm_guest_protect_msr(vcpu, MSR_DE, false); goto out; } kvm_guest_protect_msr(vcpu, MSR_DE, true); vcpu->guest_debug = dbg->control; vcpu->arch.dbg_reg.dbcr0 = 0; if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) vcpu->arch.dbg_reg.dbcr0 |= DBCR0_IDM | DBCR0_IC; /* Code below handles only HW breakpoints */ dbg_reg = &(vcpu->arch.dbg_reg); #ifdef CONFIG_KVM_BOOKE_HV /* * On BookE-HV (e500mc) the guest is always executed with MSR.GS=1 * DBCR1 and DBCR2 are set to trigger debug events when MSR.PR is 0 */ dbg_reg->dbcr1 = 0; dbg_reg->dbcr2 = 0; #else /* * On BookE-PR (e500v2) the guest is always executed with MSR.PR=1 * We set DBCR1 and DBCR2 to only trigger debug events when MSR.PR * is set. */ dbg_reg->dbcr1 = DBCR1_IAC1US | DBCR1_IAC2US | DBCR1_IAC3US | DBCR1_IAC4US; dbg_reg->dbcr2 = DBCR2_DAC1US | DBCR2_DAC2US; #endif if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)) goto out; ret = -EINVAL; for (n = 0; n < (KVMPPC_BOOKE_IAC_NUM + KVMPPC_BOOKE_DAC_NUM); n++) { uint64_t addr = dbg->arch.bp[n].addr; uint32_t type = dbg->arch.bp[n].type; if (type == KVMPPC_DEBUG_NONE) continue; if (type & ~(KVMPPC_DEBUG_WATCH_READ | KVMPPC_DEBUG_WATCH_WRITE | KVMPPC_DEBUG_BREAKPOINT)) goto out; if (type & KVMPPC_DEBUG_BREAKPOINT) { /* Setting H/W breakpoint */ if (kvmppc_booke_add_breakpoint(dbg_reg, addr, b++)) goto out; } else { /* Setting H/W watchpoint */ if (kvmppc_booke_add_watchpoint(dbg_reg, addr, type, w++)) goto out; } } ret = 0; out: vcpu_put(vcpu); return ret; } void kvmppc_booke_vcpu_load(struct kvm_vcpu *vcpu, int cpu) { vcpu->cpu = smp_processor_id(); current->thread.kvm_vcpu = vcpu; } void kvmppc_booke_vcpu_put(struct kvm_vcpu *vcpu) { current->thread.kvm_vcpu = NULL; vcpu->cpu = -1; /* Clear pending debug event in DBSR */ kvmppc_clear_dbsr(); } int kvmppc_core_init_vm(struct kvm *kvm) { return kvm->arch.kvm_ops->init_vm(kvm); } int kvmppc_core_vcpu_create(struct kvm_vcpu *vcpu) { int i; int r; r = vcpu->kvm->arch.kvm_ops->vcpu_create(vcpu); if (r) return r; /* Initial guest state: 16MB mapping 0 -> 0, PC = 0, MSR = 0, R1 = 16MB */ vcpu->arch.regs.nip = 0; vcpu->arch.shared->pir = vcpu->vcpu_id; kvmppc_set_gpr(vcpu, 1, (16<<20) - 8); /* -8 for the callee-save LR slot */ kvmppc_set_msr(vcpu, 0); #ifndef CONFIG_KVM_BOOKE_HV vcpu->arch.shadow_msr = MSR_USER | MSR_IS | MSR_DS; vcpu->arch.shadow_pid = 1; vcpu->arch.shared->msr = 0; #endif /* Eye-catching numbers so we know if the guest takes an interrupt * before it's programmed its own IVPR/IVORs. */ vcpu->arch.ivpr = 0x55550000; for (i = 0; i < BOOKE_IRQPRIO_MAX; i++) vcpu->arch.ivor[i] = 0x7700 | i * 4; kvmppc_init_timing_stats(vcpu); r = kvmppc_core_vcpu_setup(vcpu); if (r) vcpu->kvm->arch.kvm_ops->vcpu_free(vcpu); kvmppc_sanity_check(vcpu); return r; } void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu) { vcpu->kvm->arch.kvm_ops->vcpu_free(vcpu); } void kvmppc_core_destroy_vm(struct kvm *kvm) { kvm->arch.kvm_ops->destroy_vm(kvm); } void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu) { vcpu->kvm->arch.kvm_ops->vcpu_load(vcpu, cpu); } void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu) { vcpu->kvm->arch.kvm_ops->vcpu_put(vcpu); } int __init kvmppc_booke_init(void) { #ifndef CONFIG_KVM_BOOKE_HV unsigned long ivor[16]; unsigned long *handler = kvmppc_booke_handler_addr; unsigned long max_ivor = 0; unsigned long handler_len; int i; /* We install our own exception handlers by hijacking IVPR. IVPR must * be 16-bit aligned, so we need a 64KB allocation. */ kvmppc_booke_handlers = __get_free_pages(GFP_KERNEL | __GFP_ZERO, VCPU_SIZE_ORDER); if (!kvmppc_booke_handlers) return -ENOMEM; /* XXX make sure our handlers are smaller than Linux's */ /* Copy our interrupt handlers to match host IVORs. That way we don't * have to swap the IVORs on every guest/host transition. */ ivor[0] = mfspr(SPRN_IVOR0); ivor[1] = mfspr(SPRN_IVOR1); ivor[2] = mfspr(SPRN_IVOR2); ivor[3] = mfspr(SPRN_IVOR3); ivor[4] = mfspr(SPRN_IVOR4); ivor[5] = mfspr(SPRN_IVOR5); ivor[6] = mfspr(SPRN_IVOR6); ivor[7] = mfspr(SPRN_IVOR7); ivor[8] = mfspr(SPRN_IVOR8); ivor[9] = mfspr(SPRN_IVOR9); ivor[10] = mfspr(SPRN_IVOR10); ivor[11] = mfspr(SPRN_IVOR11); ivor[12] = mfspr(SPRN_IVOR12); ivor[13] = mfspr(SPRN_IVOR13); ivor[14] = mfspr(SPRN_IVOR14); ivor[15] = mfspr(SPRN_IVOR15); for (i = 0; i < 16; i++) { if (ivor[i] > max_ivor) max_ivor = i; handler_len = handler[i + 1] - handler[i]; memcpy((void *)kvmppc_booke_handlers + ivor[i], (void *)handler[i], handler_len); } handler_len = handler[max_ivor + 1] - handler[max_ivor]; flush_icache_range(kvmppc_booke_handlers, kvmppc_booke_handlers + ivor[max_ivor] + handler_len); #endif /* !BOOKE_HV */ return 0; } void __exit kvmppc_booke_exit(void) { free_pages(kvmppc_booke_handlers, VCPU_SIZE_ORDER); kvm_exit(); }
linux-master
arch/powerpc/kvm/booke.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright 2017 Paul Mackerras, IBM Corp. <[email protected]> */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/kvm_host.h> #include <asm/kvm_ppc.h> #include <asm/kvm_book3s.h> #include <asm/kvm_book3s_64.h> #include <asm/reg.h> #include <asm/ppc-opcode.h> static void emulate_tx_failure(struct kvm_vcpu *vcpu, u64 failure_cause) { u64 texasr, tfiar; u64 msr = vcpu->arch.shregs.msr; tfiar = vcpu->arch.regs.nip & ~0x3ull; texasr = (failure_cause << 56) | TEXASR_ABORT | TEXASR_FS | TEXASR_EXACT; if (MSR_TM_SUSPENDED(vcpu->arch.shregs.msr)) texasr |= TEXASR_SUSP; if (msr & MSR_PR) { texasr |= TEXASR_PR; tfiar |= 1; } vcpu->arch.tfiar = tfiar; /* Preserve ROT and TL fields of existing TEXASR */ vcpu->arch.texasr = (vcpu->arch.texasr & 0x3ffffff) | texasr; } /* * This gets called on a softpatch interrupt on POWER9 DD2.2 processors. * We expect to find a TM-related instruction to be emulated. The * instruction image is in vcpu->arch.emul_inst. If the guest was in * TM suspended or transactional state, the checkpointed state has been * reclaimed and is in the vcpu struct. The CPU is in virtual mode in * host context. */ int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu) { u32 instr = vcpu->arch.emul_inst; u64 msr = vcpu->arch.shregs.msr; u64 newmsr, bescr; int ra, rs; /* * The TM softpatch interrupt sets NIP to the instruction following * the faulting instruction, which is not executed. Rewind nip to the * faulting instruction so it looks like a normal synchronous * interrupt, then update nip in the places where the instruction is * emulated. */ vcpu->arch.regs.nip -= 4; /* * rfid, rfebb, and mtmsrd encode bit 31 = 0 since it's a reserved bit * in these instructions, so masking bit 31 out doesn't change these * instructions. For treclaim., tsr., and trechkpt. instructions if bit * 31 = 0 then they are per ISA invalid forms, however P9 UM, in section * 4.6.10 Book II Invalid Forms, informs specifically that ignoring bit * 31 is an acceptable way to handle these invalid forms that have * bit 31 = 0. Moreover, for emulation purposes both forms (w/ and wo/ * bit 31 set) can generate a softpatch interrupt. Hence both forms * are handled below for these instructions so they behave the same way. */ switch (instr & PO_XOP_OPCODE_MASK) { case PPC_INST_RFID: /* XXX do we need to check for PR=0 here? */ newmsr = vcpu->arch.shregs.srr1; /* should only get here for Sx -> T1 transition */ WARN_ON_ONCE(!(MSR_TM_SUSPENDED(msr) && MSR_TM_TRANSACTIONAL(newmsr) && (newmsr & MSR_TM))); newmsr = sanitize_msr(newmsr); vcpu->arch.shregs.msr = newmsr; vcpu->arch.cfar = vcpu->arch.regs.nip; vcpu->arch.regs.nip = vcpu->arch.shregs.srr0; return RESUME_GUEST; case PPC_INST_RFEBB: if ((msr & MSR_PR) && (vcpu->arch.vcore->pcr & PCR_ARCH_206)) { /* generate an illegal instruction interrupt */ kvmppc_core_queue_program(vcpu, SRR1_PROGILL); return RESUME_GUEST; } /* check EBB facility is available */ if (!(vcpu->arch.hfscr & HFSCR_EBB)) { vcpu->arch.hfscr &= ~HFSCR_INTR_CAUSE; vcpu->arch.hfscr |= (u64)FSCR_EBB_LG << 56; vcpu->arch.trap = BOOK3S_INTERRUPT_H_FAC_UNAVAIL; return -1; /* rerun host interrupt handler */ } if ((msr & MSR_PR) && !(vcpu->arch.fscr & FSCR_EBB)) { /* generate a facility unavailable interrupt */ vcpu->arch.fscr &= ~FSCR_INTR_CAUSE; vcpu->arch.fscr |= (u64)FSCR_EBB_LG << 56; kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_FAC_UNAVAIL); return RESUME_GUEST; } bescr = vcpu->arch.bescr; /* expect to see a S->T transition requested */ WARN_ON_ONCE(!(MSR_TM_SUSPENDED(msr) && ((bescr >> 30) & 3) == 2)); bescr &= ~BESCR_GE; if (instr & (1 << 11)) bescr |= BESCR_GE; vcpu->arch.bescr = bescr; msr = (msr & ~MSR_TS_MASK) | MSR_TS_T; vcpu->arch.shregs.msr = msr; vcpu->arch.cfar = vcpu->arch.regs.nip; vcpu->arch.regs.nip = vcpu->arch.ebbrr; return RESUME_GUEST; case PPC_INST_MTMSRD: /* XXX do we need to check for PR=0 here? */ rs = (instr >> 21) & 0x1f; newmsr = kvmppc_get_gpr(vcpu, rs); /* check this is a Sx -> T1 transition */ WARN_ON_ONCE(!(MSR_TM_SUSPENDED(msr) && MSR_TM_TRANSACTIONAL(newmsr) && (newmsr & MSR_TM))); /* mtmsrd doesn't change LE */ newmsr = (newmsr & ~MSR_LE) | (msr & MSR_LE); newmsr = sanitize_msr(newmsr); vcpu->arch.shregs.msr = newmsr; vcpu->arch.regs.nip += 4; return RESUME_GUEST; /* ignore bit 31, see comment above */ case (PPC_INST_TSR & PO_XOP_OPCODE_MASK): /* check for PR=1 and arch 2.06 bit set in PCR */ if ((msr & MSR_PR) && (vcpu->arch.vcore->pcr & PCR_ARCH_206)) { /* generate an illegal instruction interrupt */ kvmppc_core_queue_program(vcpu, SRR1_PROGILL); return RESUME_GUEST; } /* check for TM disabled in the HFSCR or MSR */ if (!(vcpu->arch.hfscr & HFSCR_TM)) { vcpu->arch.hfscr &= ~HFSCR_INTR_CAUSE; vcpu->arch.hfscr |= (u64)FSCR_TM_LG << 56; vcpu->arch.trap = BOOK3S_INTERRUPT_H_FAC_UNAVAIL; return -1; /* rerun host interrupt handler */ } if (!(msr & MSR_TM)) { /* generate a facility unavailable interrupt */ vcpu->arch.fscr &= ~FSCR_INTR_CAUSE; vcpu->arch.fscr |= (u64)FSCR_TM_LG << 56; kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_FAC_UNAVAIL); return RESUME_GUEST; } /* Set CR0 to indicate previous transactional state */ vcpu->arch.regs.ccr = (vcpu->arch.regs.ccr & 0x0fffffff) | (((msr & MSR_TS_MASK) >> MSR_TS_S_LG) << 29); /* L=1 => tresume, L=0 => tsuspend */ if (instr & (1 << 21)) { if (MSR_TM_SUSPENDED(msr)) msr = (msr & ~MSR_TS_MASK) | MSR_TS_T; } else { if (MSR_TM_TRANSACTIONAL(msr)) msr = (msr & ~MSR_TS_MASK) | MSR_TS_S; } vcpu->arch.shregs.msr = msr; vcpu->arch.regs.nip += 4; return RESUME_GUEST; /* ignore bit 31, see comment above */ case (PPC_INST_TRECLAIM & PO_XOP_OPCODE_MASK): /* check for TM disabled in the HFSCR or MSR */ if (!(vcpu->arch.hfscr & HFSCR_TM)) { vcpu->arch.hfscr &= ~HFSCR_INTR_CAUSE; vcpu->arch.hfscr |= (u64)FSCR_TM_LG << 56; vcpu->arch.trap = BOOK3S_INTERRUPT_H_FAC_UNAVAIL; return -1; /* rerun host interrupt handler */ } if (!(msr & MSR_TM)) { /* generate a facility unavailable interrupt */ vcpu->arch.fscr &= ~FSCR_INTR_CAUSE; vcpu->arch.fscr |= (u64)FSCR_TM_LG << 56; kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_FAC_UNAVAIL); return RESUME_GUEST; } /* If no transaction active, generate TM bad thing */ if (!MSR_TM_ACTIVE(msr)) { kvmppc_core_queue_program(vcpu, SRR1_PROGTM); return RESUME_GUEST; } /* If failure was not previously recorded, recompute TEXASR */ if (!(vcpu->arch.orig_texasr & TEXASR_FS)) { ra = (instr >> 16) & 0x1f; if (ra) ra = kvmppc_get_gpr(vcpu, ra) & 0xff; emulate_tx_failure(vcpu, ra); } copy_from_checkpoint(vcpu); /* Set CR0 to indicate previous transactional state */ vcpu->arch.regs.ccr = (vcpu->arch.regs.ccr & 0x0fffffff) | (((msr & MSR_TS_MASK) >> MSR_TS_S_LG) << 29); vcpu->arch.shregs.msr &= ~MSR_TS_MASK; vcpu->arch.regs.nip += 4; return RESUME_GUEST; /* ignore bit 31, see comment above */ case (PPC_INST_TRECHKPT & PO_XOP_OPCODE_MASK): /* XXX do we need to check for PR=0 here? */ /* check for TM disabled in the HFSCR or MSR */ if (!(vcpu->arch.hfscr & HFSCR_TM)) { vcpu->arch.hfscr &= ~HFSCR_INTR_CAUSE; vcpu->arch.hfscr |= (u64)FSCR_TM_LG << 56; vcpu->arch.trap = BOOK3S_INTERRUPT_H_FAC_UNAVAIL; return -1; /* rerun host interrupt handler */ } if (!(msr & MSR_TM)) { /* generate a facility unavailable interrupt */ vcpu->arch.fscr &= ~FSCR_INTR_CAUSE; vcpu->arch.fscr |= (u64)FSCR_TM_LG << 56; kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_FAC_UNAVAIL); return RESUME_GUEST; } /* If transaction active or TEXASR[FS] = 0, bad thing */ if (MSR_TM_ACTIVE(msr) || !(vcpu->arch.texasr & TEXASR_FS)) { kvmppc_core_queue_program(vcpu, SRR1_PROGTM); return RESUME_GUEST; } copy_to_checkpoint(vcpu); /* Set CR0 to indicate previous transactional state */ vcpu->arch.regs.ccr = (vcpu->arch.regs.ccr & 0x0fffffff) | (((msr & MSR_TS_MASK) >> MSR_TS_S_LG) << 29); vcpu->arch.shregs.msr = msr | MSR_TS_S; vcpu->arch.regs.nip += 4; return RESUME_GUEST; } /* What should we do here? We didn't recognize the instruction */ kvmppc_core_queue_program(vcpu, SRR1_PROGILL); pr_warn_ratelimited("Unrecognized TM-related instruction %#x for emulation", instr); return RESUME_GUEST; }
linux-master
arch/powerpc/kvm/book3s_hv_tm.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright 2012 Michael Ellerman, IBM Corporation. */ #include <linux/kernel.h> #include <linux/kvm_host.h> #include <linux/kvm.h> #include <linux/err.h> #include <linux/uaccess.h> #include <asm/kvm_book3s.h> #include <asm/kvm_ppc.h> #include <asm/hvcall.h> #include <asm/rtas.h> #include <asm/xive.h> #ifdef CONFIG_KVM_XICS static void kvm_rtas_set_xive(struct kvm_vcpu *vcpu, struct rtas_args *args) { u32 irq, server, priority; int rc; if (be32_to_cpu(args->nargs) != 3 || be32_to_cpu(args->nret) != 1) { rc = -3; goto out; } irq = be32_to_cpu(args->args[0]); server = be32_to_cpu(args->args[1]); priority = be32_to_cpu(args->args[2]); if (xics_on_xive()) rc = kvmppc_xive_set_xive(vcpu->kvm, irq, server, priority); else rc = kvmppc_xics_set_xive(vcpu->kvm, irq, server, priority); if (rc) rc = -3; out: args->rets[0] = cpu_to_be32(rc); } static void kvm_rtas_get_xive(struct kvm_vcpu *vcpu, struct rtas_args *args) { u32 irq, server, priority; int rc; if (be32_to_cpu(args->nargs) != 1 || be32_to_cpu(args->nret) != 3) { rc = -3; goto out; } irq = be32_to_cpu(args->args[0]); server = priority = 0; if (xics_on_xive()) rc = kvmppc_xive_get_xive(vcpu->kvm, irq, &server, &priority); else rc = kvmppc_xics_get_xive(vcpu->kvm, irq, &server, &priority); if (rc) { rc = -3; goto out; } args->rets[1] = cpu_to_be32(server); args->rets[2] = cpu_to_be32(priority); out: args->rets[0] = cpu_to_be32(rc); } static void kvm_rtas_int_off(struct kvm_vcpu *vcpu, struct rtas_args *args) { u32 irq; int rc; if (be32_to_cpu(args->nargs) != 1 || be32_to_cpu(args->nret) != 1) { rc = -3; goto out; } irq = be32_to_cpu(args->args[0]); if (xics_on_xive()) rc = kvmppc_xive_int_off(vcpu->kvm, irq); else rc = kvmppc_xics_int_off(vcpu->kvm, irq); if (rc) rc = -3; out: args->rets[0] = cpu_to_be32(rc); } static void kvm_rtas_int_on(struct kvm_vcpu *vcpu, struct rtas_args *args) { u32 irq; int rc; if (be32_to_cpu(args->nargs) != 1 || be32_to_cpu(args->nret) != 1) { rc = -3; goto out; } irq = be32_to_cpu(args->args[0]); if (xics_on_xive()) rc = kvmppc_xive_int_on(vcpu->kvm, irq); else rc = kvmppc_xics_int_on(vcpu->kvm, irq); if (rc) rc = -3; out: args->rets[0] = cpu_to_be32(rc); } #endif /* CONFIG_KVM_XICS */ struct rtas_handler { void (*handler)(struct kvm_vcpu *vcpu, struct rtas_args *args); char *name; }; static struct rtas_handler rtas_handlers[] = { #ifdef CONFIG_KVM_XICS { .name = "ibm,set-xive", .handler = kvm_rtas_set_xive }, { .name = "ibm,get-xive", .handler = kvm_rtas_get_xive }, { .name = "ibm,int-off", .handler = kvm_rtas_int_off }, { .name = "ibm,int-on", .handler = kvm_rtas_int_on }, #endif }; struct rtas_token_definition { struct list_head list; struct rtas_handler *handler; u64 token; }; static int rtas_name_matches(char *s1, char *s2) { struct kvm_rtas_token_args args; return !strncmp(s1, s2, sizeof(args.name)); } static int rtas_token_undefine(struct kvm *kvm, char *name) { struct rtas_token_definition *d, *tmp; lockdep_assert_held(&kvm->arch.rtas_token_lock); list_for_each_entry_safe(d, tmp, &kvm->arch.rtas_tokens, list) { if (rtas_name_matches(d->handler->name, name)) { list_del(&d->list); kfree(d); return 0; } } /* It's not an error to undefine an undefined token */ return 0; } static int rtas_token_define(struct kvm *kvm, char *name, u64 token) { struct rtas_token_definition *d; struct rtas_handler *h = NULL; bool found; int i; lockdep_assert_held(&kvm->arch.rtas_token_lock); list_for_each_entry(d, &kvm->arch.rtas_tokens, list) { if (d->token == token) return -EEXIST; } found = false; for (i = 0; i < ARRAY_SIZE(rtas_handlers); i++) { h = &rtas_handlers[i]; if (rtas_name_matches(h->name, name)) { found = true; break; } } if (!found) return -ENOENT; d = kzalloc(sizeof(*d), GFP_KERNEL); if (!d) return -ENOMEM; d->handler = h; d->token = token; list_add_tail(&d->list, &kvm->arch.rtas_tokens); return 0; } int kvm_vm_ioctl_rtas_define_token(struct kvm *kvm, void __user *argp) { struct kvm_rtas_token_args args; int rc; if (copy_from_user(&args, argp, sizeof(args))) return -EFAULT; mutex_lock(&kvm->arch.rtas_token_lock); if (args.token) rc = rtas_token_define(kvm, args.name, args.token); else rc = rtas_token_undefine(kvm, args.name); mutex_unlock(&kvm->arch.rtas_token_lock); return rc; } int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu) { struct rtas_token_definition *d; struct rtas_args args; rtas_arg_t *orig_rets; gpa_t args_phys; int rc; /* * r4 contains the guest physical address of the RTAS args * Mask off the top 4 bits since this is a guest real address */ args_phys = kvmppc_get_gpr(vcpu, 4) & KVM_PAM; kvm_vcpu_srcu_read_lock(vcpu); rc = kvm_read_guest(vcpu->kvm, args_phys, &args, sizeof(args)); kvm_vcpu_srcu_read_unlock(vcpu); if (rc) goto fail; /* * args->rets is a pointer into args->args. Now that we've * copied args we need to fix it up to point into our copy, * not the guest args. We also need to save the original * value so we can restore it on the way out. */ orig_rets = args.rets; if (be32_to_cpu(args.nargs) >= ARRAY_SIZE(args.args)) { /* * Don't overflow our args array: ensure there is room for * at least rets[0] (even if the call specifies 0 nret). * * Each handler must then check for the correct nargs and nret * values, but they may always return failure in rets[0]. */ rc = -EINVAL; goto fail; } args.rets = &args.args[be32_to_cpu(args.nargs)]; mutex_lock(&vcpu->kvm->arch.rtas_token_lock); rc = -ENOENT; list_for_each_entry(d, &vcpu->kvm->arch.rtas_tokens, list) { if (d->token == be32_to_cpu(args.token)) { d->handler->handler(vcpu, &args); rc = 0; break; } } mutex_unlock(&vcpu->kvm->arch.rtas_token_lock); if (rc == 0) { args.rets = orig_rets; rc = kvm_write_guest(vcpu->kvm, args_phys, &args, sizeof(args)); if (rc) goto fail; } return rc; fail: /* * We only get here if the guest has called RTAS with a bogus * args pointer or nargs/nret values that would overflow the * array. That means we can't get to the args, and so we can't * fail the RTAS call. So fail right out to userspace, which * should kill the guest. * * SLOF should actually pass the hcall return value from the * rtas handler call in r3, so enter_rtas could be modified to * return a failure indication in r3 and we could return such * errors to the guest rather than failing to host userspace. * However old guests that don't test for failure could then * continue silently after errors, so for now we won't do this. */ return rc; } EXPORT_SYMBOL_GPL(kvmppc_rtas_hcall); void kvmppc_rtas_tokens_free(struct kvm *kvm) { struct rtas_token_definition *d, *tmp; list_for_each_entry_safe(d, tmp, &kvm->arch.rtas_tokens, list) { list_del(&d->list); kfree(d); } }
linux-master
arch/powerpc/kvm/book3s_rtas.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2008-2011 Freescale Semiconductor, Inc. All rights reserved. * * Author: Yu Liu, <[email protected]> * * Description: * This file is derived from arch/powerpc/kvm/44x.c, * by Hollis Blanchard <[email protected]>. */ #include <linux/kvm_host.h> #include <linux/slab.h> #include <linux/err.h> #include <linux/export.h> #include <linux/module.h> #include <linux/miscdevice.h> #include <asm/reg.h> #include <asm/cputable.h> #include <asm/kvm_ppc.h> #include "../mm/mmu_decl.h" #include "booke.h" #include "e500.h" struct id { unsigned long val; struct id **pentry; }; #define NUM_TIDS 256 /* * This table provide mappings from: * (guestAS,guestTID,guestPR) --> ID of physical cpu * guestAS [0..1] * guestTID [0..255] * guestPR [0..1] * ID [1..255] * Each vcpu keeps one vcpu_id_table. */ struct vcpu_id_table { struct id id[2][NUM_TIDS][2]; }; /* * This table provide reversed mappings of vcpu_id_table: * ID --> address of vcpu_id_table item. * Each physical core has one pcpu_id_table. */ struct pcpu_id_table { struct id *entry[NUM_TIDS]; }; static DEFINE_PER_CPU(struct pcpu_id_table, pcpu_sids); /* This variable keeps last used shadow ID on local core. * The valid range of shadow ID is [1..255] */ static DEFINE_PER_CPU(unsigned long, pcpu_last_used_sid); /* * Allocate a free shadow id and setup a valid sid mapping in given entry. * A mapping is only valid when vcpu_id_table and pcpu_id_table are match. * * The caller must have preemption disabled, and keep it that way until * it has finished with the returned shadow id (either written into the * TLB or arch.shadow_pid, or discarded). */ static inline int local_sid_setup_one(struct id *entry) { unsigned long sid; int ret = -1; sid = __this_cpu_inc_return(pcpu_last_used_sid); if (sid < NUM_TIDS) { __this_cpu_write(pcpu_sids.entry[sid], entry); entry->val = sid; entry->pentry = this_cpu_ptr(&pcpu_sids.entry[sid]); ret = sid; } /* * If sid == NUM_TIDS, we've run out of sids. We return -1, and * the caller will invalidate everything and start over. * * sid > NUM_TIDS indicates a race, which we disable preemption to * avoid. */ WARN_ON(sid > NUM_TIDS); return ret; } /* * Check if given entry contain a valid shadow id mapping. * An ID mapping is considered valid only if * both vcpu and pcpu know this mapping. * * The caller must have preemption disabled, and keep it that way until * it has finished with the returned shadow id (either written into the * TLB or arch.shadow_pid, or discarded). */ static inline int local_sid_lookup(struct id *entry) { if (entry && entry->val != 0 && __this_cpu_read(pcpu_sids.entry[entry->val]) == entry && entry->pentry == this_cpu_ptr(&pcpu_sids.entry[entry->val])) return entry->val; return -1; } /* Invalidate all id mappings on local core -- call with preempt disabled */ static inline void local_sid_destroy_all(void) { __this_cpu_write(pcpu_last_used_sid, 0); memset(this_cpu_ptr(&pcpu_sids), 0, sizeof(pcpu_sids)); } static void *kvmppc_e500_id_table_alloc(struct kvmppc_vcpu_e500 *vcpu_e500) { vcpu_e500->idt = kzalloc(sizeof(struct vcpu_id_table), GFP_KERNEL); return vcpu_e500->idt; } static void kvmppc_e500_id_table_free(struct kvmppc_vcpu_e500 *vcpu_e500) { kfree(vcpu_e500->idt); vcpu_e500->idt = NULL; } /* Map guest pid to shadow. * We use PID to keep shadow of current guest non-zero PID, * and use PID1 to keep shadow of guest zero PID. * So that guest tlbe with TID=0 can be accessed at any time */ static void kvmppc_e500_recalc_shadow_pid(struct kvmppc_vcpu_e500 *vcpu_e500) { preempt_disable(); vcpu_e500->vcpu.arch.shadow_pid = kvmppc_e500_get_sid(vcpu_e500, get_cur_as(&vcpu_e500->vcpu), get_cur_pid(&vcpu_e500->vcpu), get_cur_pr(&vcpu_e500->vcpu), 1); vcpu_e500->vcpu.arch.shadow_pid1 = kvmppc_e500_get_sid(vcpu_e500, get_cur_as(&vcpu_e500->vcpu), 0, get_cur_pr(&vcpu_e500->vcpu), 1); preempt_enable(); } /* Invalidate all mappings on vcpu */ static void kvmppc_e500_id_table_reset_all(struct kvmppc_vcpu_e500 *vcpu_e500) { memset(vcpu_e500->idt, 0, sizeof(struct vcpu_id_table)); /* Update shadow pid when mappings are changed */ kvmppc_e500_recalc_shadow_pid(vcpu_e500); } /* Invalidate one ID mapping on vcpu */ static inline void kvmppc_e500_id_table_reset_one( struct kvmppc_vcpu_e500 *vcpu_e500, int as, int pid, int pr) { struct vcpu_id_table *idt = vcpu_e500->idt; BUG_ON(as >= 2); BUG_ON(pid >= NUM_TIDS); BUG_ON(pr >= 2); idt->id[as][pid][pr].val = 0; idt->id[as][pid][pr].pentry = NULL; /* Update shadow pid when mappings are changed */ kvmppc_e500_recalc_shadow_pid(vcpu_e500); } /* * Map guest (vcpu,AS,ID,PR) to physical core shadow id. * This function first lookup if a valid mapping exists, * if not, then creates a new one. * * The caller must have preemption disabled, and keep it that way until * it has finished with the returned shadow id (either written into the * TLB or arch.shadow_pid, or discarded). */ unsigned int kvmppc_e500_get_sid(struct kvmppc_vcpu_e500 *vcpu_e500, unsigned int as, unsigned int gid, unsigned int pr, int avoid_recursion) { struct vcpu_id_table *idt = vcpu_e500->idt; int sid; BUG_ON(as >= 2); BUG_ON(gid >= NUM_TIDS); BUG_ON(pr >= 2); sid = local_sid_lookup(&idt->id[as][gid][pr]); while (sid <= 0) { /* No mapping yet */ sid = local_sid_setup_one(&idt->id[as][gid][pr]); if (sid <= 0) { _tlbil_all(); local_sid_destroy_all(); } /* Update shadow pid when mappings are changed */ if (!avoid_recursion) kvmppc_e500_recalc_shadow_pid(vcpu_e500); } return sid; } unsigned int kvmppc_e500_get_tlb_stid(struct kvm_vcpu *vcpu, struct kvm_book3e_206_tlb_entry *gtlbe) { return kvmppc_e500_get_sid(to_e500(vcpu), get_tlb_ts(gtlbe), get_tlb_tid(gtlbe), get_cur_pr(vcpu), 0); } void kvmppc_set_pid(struct kvm_vcpu *vcpu, u32 pid) { struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); if (vcpu->arch.pid != pid) { vcpu_e500->pid[0] = vcpu->arch.pid = pid; kvmppc_e500_recalc_shadow_pid(vcpu_e500); } } /* gtlbe must not be mapped by more than one host tlbe */ void kvmppc_e500_tlbil_one(struct kvmppc_vcpu_e500 *vcpu_e500, struct kvm_book3e_206_tlb_entry *gtlbe) { struct vcpu_id_table *idt = vcpu_e500->idt; unsigned int pr, tid, ts; int pid; u32 val, eaddr; unsigned long flags; ts = get_tlb_ts(gtlbe); tid = get_tlb_tid(gtlbe); preempt_disable(); /* One guest ID may be mapped to two shadow IDs */ for (pr = 0; pr < 2; pr++) { /* * The shadow PID can have a valid mapping on at most one * host CPU. In the common case, it will be valid on this * CPU, in which case we do a local invalidation of the * specific address. * * If the shadow PID is not valid on the current host CPU, * we invalidate the entire shadow PID. */ pid = local_sid_lookup(&idt->id[ts][tid][pr]); if (pid <= 0) { kvmppc_e500_id_table_reset_one(vcpu_e500, ts, tid, pr); continue; } /* * The guest is invalidating a 4K entry which is in a PID * that has a valid shadow mapping on this host CPU. We * search host TLB to invalidate it's shadow TLB entry, * similar to __tlbil_va except that we need to look in AS1. */ val = (pid << MAS6_SPID_SHIFT) | MAS6_SAS; eaddr = get_tlb_eaddr(gtlbe); local_irq_save(flags); mtspr(SPRN_MAS6, val); asm volatile("tlbsx 0, %[eaddr]" : : [eaddr] "r" (eaddr)); val = mfspr(SPRN_MAS1); if (val & MAS1_VALID) { mtspr(SPRN_MAS1, val & ~MAS1_VALID); asm volatile("tlbwe"); } local_irq_restore(flags); } preempt_enable(); } void kvmppc_e500_tlbil_all(struct kvmppc_vcpu_e500 *vcpu_e500) { kvmppc_e500_id_table_reset_all(vcpu_e500); } void kvmppc_mmu_msr_notify(struct kvm_vcpu *vcpu, u32 old_msr) { /* Recalc shadow pid since MSR changes */ kvmppc_e500_recalc_shadow_pid(to_e500(vcpu)); } static void kvmppc_core_vcpu_load_e500(struct kvm_vcpu *vcpu, int cpu) { kvmppc_booke_vcpu_load(vcpu, cpu); /* Shadow PID may be expired on local core */ kvmppc_e500_recalc_shadow_pid(to_e500(vcpu)); } static void kvmppc_core_vcpu_put_e500(struct kvm_vcpu *vcpu) { #ifdef CONFIG_SPE if (vcpu->arch.shadow_msr & MSR_SPE) kvmppc_vcpu_disable_spe(vcpu); #endif kvmppc_booke_vcpu_put(vcpu); } static int kvmppc_e500_check_processor_compat(void) { int r; if (strcmp(cur_cpu_spec->cpu_name, "e500v2") == 0) r = 0; else r = -ENOTSUPP; return r; } static void kvmppc_e500_tlb_setup(struct kvmppc_vcpu_e500 *vcpu_e500) { struct kvm_book3e_206_tlb_entry *tlbe; /* Insert large initial mapping for guest. */ tlbe = get_entry(vcpu_e500, 1, 0); tlbe->mas1 = MAS1_VALID | MAS1_TSIZE(BOOK3E_PAGESZ_256M); tlbe->mas2 = 0; tlbe->mas7_3 = E500_TLB_SUPER_PERM_MASK; /* 4K map for serial output. Used by kernel wrapper. */ tlbe = get_entry(vcpu_e500, 1, 1); tlbe->mas1 = MAS1_VALID | MAS1_TSIZE(BOOK3E_PAGESZ_4K); tlbe->mas2 = (0xe0004500 & 0xFFFFF000) | MAS2_I | MAS2_G; tlbe->mas7_3 = (0xe0004500 & 0xFFFFF000) | E500_TLB_SUPER_PERM_MASK; } int kvmppc_core_vcpu_setup(struct kvm_vcpu *vcpu) { struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); kvmppc_e500_tlb_setup(vcpu_e500); /* Registers init */ vcpu->arch.pvr = mfspr(SPRN_PVR); vcpu_e500->svr = mfspr(SPRN_SVR); vcpu->arch.cpu_type = KVM_CPU_E500V2; return 0; } static int kvmppc_core_get_sregs_e500(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) { struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); sregs->u.e.features |= KVM_SREGS_E_ARCH206_MMU | KVM_SREGS_E_SPE | KVM_SREGS_E_PM; sregs->u.e.impl_id = KVM_SREGS_E_IMPL_FSL; sregs->u.e.impl.fsl.features = 0; sregs->u.e.impl.fsl.svr = vcpu_e500->svr; sregs->u.e.impl.fsl.hid0 = vcpu_e500->hid0; sregs->u.e.impl.fsl.mcar = vcpu_e500->mcar; sregs->u.e.ivor_high[0] = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_UNAVAIL]; sregs->u.e.ivor_high[1] = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_DATA]; sregs->u.e.ivor_high[2] = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_ROUND]; sregs->u.e.ivor_high[3] = vcpu->arch.ivor[BOOKE_IRQPRIO_PERFORMANCE_MONITOR]; kvmppc_get_sregs_ivor(vcpu, sregs); kvmppc_get_sregs_e500_tlb(vcpu, sregs); return 0; } static int kvmppc_core_set_sregs_e500(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) { struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); int ret; if (sregs->u.e.impl_id == KVM_SREGS_E_IMPL_FSL) { vcpu_e500->svr = sregs->u.e.impl.fsl.svr; vcpu_e500->hid0 = sregs->u.e.impl.fsl.hid0; vcpu_e500->mcar = sregs->u.e.impl.fsl.mcar; } ret = kvmppc_set_sregs_e500_tlb(vcpu, sregs); if (ret < 0) return ret; if (!(sregs->u.e.features & KVM_SREGS_E_IVOR)) return 0; if (sregs->u.e.features & KVM_SREGS_E_SPE) { vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_UNAVAIL] = sregs->u.e.ivor_high[0]; vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_DATA] = sregs->u.e.ivor_high[1]; vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_ROUND] = sregs->u.e.ivor_high[2]; } if (sregs->u.e.features & KVM_SREGS_E_PM) { vcpu->arch.ivor[BOOKE_IRQPRIO_PERFORMANCE_MONITOR] = sregs->u.e.ivor_high[3]; } return kvmppc_set_sregs_ivor(vcpu, sregs); } static int kvmppc_get_one_reg_e500(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *val) { int r = kvmppc_get_one_reg_e500_tlb(vcpu, id, val); return r; } static int kvmppc_set_one_reg_e500(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *val) { int r = kvmppc_get_one_reg_e500_tlb(vcpu, id, val); return r; } static int kvmppc_core_vcpu_create_e500(struct kvm_vcpu *vcpu) { struct kvmppc_vcpu_e500 *vcpu_e500; int err; BUILD_BUG_ON(offsetof(struct kvmppc_vcpu_e500, vcpu) != 0); vcpu_e500 = to_e500(vcpu); if (kvmppc_e500_id_table_alloc(vcpu_e500) == NULL) return -ENOMEM; err = kvmppc_e500_tlb_init(vcpu_e500); if (err) goto uninit_id; vcpu->arch.shared = (void*)__get_free_page(GFP_KERNEL|__GFP_ZERO); if (!vcpu->arch.shared) { err = -ENOMEM; goto uninit_tlb; } return 0; uninit_tlb: kvmppc_e500_tlb_uninit(vcpu_e500); uninit_id: kvmppc_e500_id_table_free(vcpu_e500); return err; } static void kvmppc_core_vcpu_free_e500(struct kvm_vcpu *vcpu) { struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); free_page((unsigned long)vcpu->arch.shared); kvmppc_e500_tlb_uninit(vcpu_e500); kvmppc_e500_id_table_free(vcpu_e500); } static int kvmppc_core_init_vm_e500(struct kvm *kvm) { return 0; } static void kvmppc_core_destroy_vm_e500(struct kvm *kvm) { } static struct kvmppc_ops kvm_ops_e500 = { .get_sregs = kvmppc_core_get_sregs_e500, .set_sregs = kvmppc_core_set_sregs_e500, .get_one_reg = kvmppc_get_one_reg_e500, .set_one_reg = kvmppc_set_one_reg_e500, .vcpu_load = kvmppc_core_vcpu_load_e500, .vcpu_put = kvmppc_core_vcpu_put_e500, .vcpu_create = kvmppc_core_vcpu_create_e500, .vcpu_free = kvmppc_core_vcpu_free_e500, .init_vm = kvmppc_core_init_vm_e500, .destroy_vm = kvmppc_core_destroy_vm_e500, .emulate_op = kvmppc_core_emulate_op_e500, .emulate_mtspr = kvmppc_core_emulate_mtspr_e500, .emulate_mfspr = kvmppc_core_emulate_mfspr_e500, .create_vcpu_debugfs = kvmppc_create_vcpu_debugfs_e500, }; static int __init kvmppc_e500_init(void) { int r, i; unsigned long ivor[3]; /* Process remaining handlers above the generic first 16 */ unsigned long *handler = &kvmppc_booke_handler_addr[16]; unsigned long handler_len; unsigned long max_ivor = 0; r = kvmppc_e500_check_processor_compat(); if (r) goto err_out; r = kvmppc_booke_init(); if (r) goto err_out; /* copy extra E500 exception handlers */ ivor[0] = mfspr(SPRN_IVOR32); ivor[1] = mfspr(SPRN_IVOR33); ivor[2] = mfspr(SPRN_IVOR34); for (i = 0; i < 3; i++) { if (ivor[i] > ivor[max_ivor]) max_ivor = i; handler_len = handler[i + 1] - handler[i]; memcpy((void *)kvmppc_booke_handlers + ivor[i], (void *)handler[i], handler_len); } handler_len = handler[max_ivor + 1] - handler[max_ivor]; flush_icache_range(kvmppc_booke_handlers, kvmppc_booke_handlers + ivor[max_ivor] + handler_len); r = kvm_init(sizeof(struct kvmppc_vcpu_e500), 0, THIS_MODULE); if (r) goto err_out; kvm_ops_e500.owner = THIS_MODULE; kvmppc_pr_ops = &kvm_ops_e500; err_out: return r; } static void __exit kvmppc_e500_exit(void) { kvmppc_pr_ops = NULL; kvmppc_booke_exit(); } module_init(kvmppc_e500_init); module_exit(kvmppc_e500_exit); MODULE_ALIAS_MISCDEV(KVM_MINOR); MODULE_ALIAS("devname:kvm");
linux-master
arch/powerpc/kvm/e500.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright IBM Corporation, 2018 * Authors Suraj Jitindar Singh <[email protected]> * Paul Mackerras <[email protected]> * * Description: KVM functions specific to running nested KVM-HV guests * on Book3S processors (specifically POWER9 and later). */ #include <linux/kernel.h> #include <linux/kvm_host.h> #include <linux/llist.h> #include <linux/pgtable.h> #include <asm/kvm_ppc.h> #include <asm/kvm_book3s.h> #include <asm/mmu.h> #include <asm/pgalloc.h> #include <asm/pte-walk.h> #include <asm/reg.h> #include <asm/plpar_wrappers.h> #include <asm/firmware.h> static struct patb_entry *pseries_partition_tb; static void kvmhv_update_ptbl_cache(struct kvm_nested_guest *gp); static void kvmhv_free_memslot_nest_rmap(struct kvm_memory_slot *free); void kvmhv_save_hv_regs(struct kvm_vcpu *vcpu, struct hv_guest_state *hr) { struct kvmppc_vcore *vc = vcpu->arch.vcore; hr->pcr = vc->pcr | PCR_MASK; hr->dpdes = vc->dpdes; hr->hfscr = vcpu->arch.hfscr; hr->tb_offset = vc->tb_offset; hr->dawr0 = vcpu->arch.dawr0; hr->dawrx0 = vcpu->arch.dawrx0; hr->ciabr = vcpu->arch.ciabr; hr->purr = vcpu->arch.purr; hr->spurr = vcpu->arch.spurr; hr->ic = vcpu->arch.ic; hr->vtb = vc->vtb; hr->srr0 = vcpu->arch.shregs.srr0; hr->srr1 = vcpu->arch.shregs.srr1; hr->sprg[0] = vcpu->arch.shregs.sprg0; hr->sprg[1] = vcpu->arch.shregs.sprg1; hr->sprg[2] = vcpu->arch.shregs.sprg2; hr->sprg[3] = vcpu->arch.shregs.sprg3; hr->pidr = vcpu->arch.pid; hr->cfar = vcpu->arch.cfar; hr->ppr = vcpu->arch.ppr; hr->dawr1 = vcpu->arch.dawr1; hr->dawrx1 = vcpu->arch.dawrx1; } /* Use noinline_for_stack due to https://bugs.llvm.org/show_bug.cgi?id=49610 */ static noinline_for_stack void byteswap_pt_regs(struct pt_regs *regs) { unsigned long *addr = (unsigned long *) regs; for (; addr < ((unsigned long *) (regs + 1)); addr++) *addr = swab64(*addr); } static void byteswap_hv_regs(struct hv_guest_state *hr) { hr->version = swab64(hr->version); hr->lpid = swab32(hr->lpid); hr->vcpu_token = swab32(hr->vcpu_token); hr->lpcr = swab64(hr->lpcr); hr->pcr = swab64(hr->pcr) | PCR_MASK; hr->amor = swab64(hr->amor); hr->dpdes = swab64(hr->dpdes); hr->hfscr = swab64(hr->hfscr); hr->tb_offset = swab64(hr->tb_offset); hr->dawr0 = swab64(hr->dawr0); hr->dawrx0 = swab64(hr->dawrx0); hr->ciabr = swab64(hr->ciabr); hr->hdec_expiry = swab64(hr->hdec_expiry); hr->purr = swab64(hr->purr); hr->spurr = swab64(hr->spurr); hr->ic = swab64(hr->ic); hr->vtb = swab64(hr->vtb); hr->hdar = swab64(hr->hdar); hr->hdsisr = swab64(hr->hdsisr); hr->heir = swab64(hr->heir); hr->asdr = swab64(hr->asdr); hr->srr0 = swab64(hr->srr0); hr->srr1 = swab64(hr->srr1); hr->sprg[0] = swab64(hr->sprg[0]); hr->sprg[1] = swab64(hr->sprg[1]); hr->sprg[2] = swab64(hr->sprg[2]); hr->sprg[3] = swab64(hr->sprg[3]); hr->pidr = swab64(hr->pidr); hr->cfar = swab64(hr->cfar); hr->ppr = swab64(hr->ppr); hr->dawr1 = swab64(hr->dawr1); hr->dawrx1 = swab64(hr->dawrx1); } static void save_hv_return_state(struct kvm_vcpu *vcpu, struct hv_guest_state *hr) { struct kvmppc_vcore *vc = vcpu->arch.vcore; hr->dpdes = vc->dpdes; hr->purr = vcpu->arch.purr; hr->spurr = vcpu->arch.spurr; hr->ic = vcpu->arch.ic; hr->vtb = vc->vtb; hr->srr0 = vcpu->arch.shregs.srr0; hr->srr1 = vcpu->arch.shregs.srr1; hr->sprg[0] = vcpu->arch.shregs.sprg0; hr->sprg[1] = vcpu->arch.shregs.sprg1; hr->sprg[2] = vcpu->arch.shregs.sprg2; hr->sprg[3] = vcpu->arch.shregs.sprg3; hr->pidr = vcpu->arch.pid; hr->cfar = vcpu->arch.cfar; hr->ppr = vcpu->arch.ppr; switch (vcpu->arch.trap) { case BOOK3S_INTERRUPT_H_DATA_STORAGE: hr->hdar = vcpu->arch.fault_dar; hr->hdsisr = vcpu->arch.fault_dsisr; hr->asdr = vcpu->arch.fault_gpa; break; case BOOK3S_INTERRUPT_H_INST_STORAGE: hr->asdr = vcpu->arch.fault_gpa; break; case BOOK3S_INTERRUPT_H_FAC_UNAVAIL: hr->hfscr = ((~HFSCR_INTR_CAUSE & hr->hfscr) | (HFSCR_INTR_CAUSE & vcpu->arch.hfscr)); break; case BOOK3S_INTERRUPT_H_EMUL_ASSIST: hr->heir = vcpu->arch.emul_inst; break; } } static void restore_hv_regs(struct kvm_vcpu *vcpu, const struct hv_guest_state *hr) { struct kvmppc_vcore *vc = vcpu->arch.vcore; vc->pcr = hr->pcr | PCR_MASK; vc->dpdes = hr->dpdes; vcpu->arch.hfscr = hr->hfscr; vcpu->arch.dawr0 = hr->dawr0; vcpu->arch.dawrx0 = hr->dawrx0; vcpu->arch.ciabr = hr->ciabr; vcpu->arch.purr = hr->purr; vcpu->arch.spurr = hr->spurr; vcpu->arch.ic = hr->ic; vc->vtb = hr->vtb; vcpu->arch.shregs.srr0 = hr->srr0; vcpu->arch.shregs.srr1 = hr->srr1; vcpu->arch.shregs.sprg0 = hr->sprg[0]; vcpu->arch.shregs.sprg1 = hr->sprg[1]; vcpu->arch.shregs.sprg2 = hr->sprg[2]; vcpu->arch.shregs.sprg3 = hr->sprg[3]; vcpu->arch.pid = hr->pidr; vcpu->arch.cfar = hr->cfar; vcpu->arch.ppr = hr->ppr; vcpu->arch.dawr1 = hr->dawr1; vcpu->arch.dawrx1 = hr->dawrx1; } void kvmhv_restore_hv_return_state(struct kvm_vcpu *vcpu, struct hv_guest_state *hr) { struct kvmppc_vcore *vc = vcpu->arch.vcore; vc->dpdes = hr->dpdes; vcpu->arch.hfscr = hr->hfscr; vcpu->arch.purr = hr->purr; vcpu->arch.spurr = hr->spurr; vcpu->arch.ic = hr->ic; vc->vtb = hr->vtb; vcpu->arch.fault_dar = hr->hdar; vcpu->arch.fault_dsisr = hr->hdsisr; vcpu->arch.fault_gpa = hr->asdr; vcpu->arch.emul_inst = hr->heir; vcpu->arch.shregs.srr0 = hr->srr0; vcpu->arch.shregs.srr1 = hr->srr1; vcpu->arch.shregs.sprg0 = hr->sprg[0]; vcpu->arch.shregs.sprg1 = hr->sprg[1]; vcpu->arch.shregs.sprg2 = hr->sprg[2]; vcpu->arch.shregs.sprg3 = hr->sprg[3]; vcpu->arch.pid = hr->pidr; vcpu->arch.cfar = hr->cfar; vcpu->arch.ppr = hr->ppr; } static void kvmhv_nested_mmio_needed(struct kvm_vcpu *vcpu, u64 regs_ptr) { /* No need to reflect the page fault to L1, we've handled it */ vcpu->arch.trap = 0; /* * Since the L2 gprs have already been written back into L1 memory when * we complete the mmio, store the L1 memory location of the L2 gpr * being loaded into by the mmio so that the loaded value can be * written there in kvmppc_complete_mmio_load() */ if (((vcpu->arch.io_gpr & KVM_MMIO_REG_EXT_MASK) == KVM_MMIO_REG_GPR) && (vcpu->mmio_is_write == 0)) { vcpu->arch.nested_io_gpr = (gpa_t) regs_ptr + offsetof(struct pt_regs, gpr[vcpu->arch.io_gpr]); vcpu->arch.io_gpr = KVM_MMIO_REG_NESTED_GPR; } } static int kvmhv_read_guest_state_and_regs(struct kvm_vcpu *vcpu, struct hv_guest_state *l2_hv, struct pt_regs *l2_regs, u64 hv_ptr, u64 regs_ptr) { int size; if (kvm_vcpu_read_guest(vcpu, hv_ptr, &l2_hv->version, sizeof(l2_hv->version))) return -1; if (kvmppc_need_byteswap(vcpu)) l2_hv->version = swab64(l2_hv->version); size = hv_guest_state_size(l2_hv->version); if (size < 0) return -1; return kvm_vcpu_read_guest(vcpu, hv_ptr, l2_hv, size) || kvm_vcpu_read_guest(vcpu, regs_ptr, l2_regs, sizeof(struct pt_regs)); } static int kvmhv_write_guest_state_and_regs(struct kvm_vcpu *vcpu, struct hv_guest_state *l2_hv, struct pt_regs *l2_regs, u64 hv_ptr, u64 regs_ptr) { int size; size = hv_guest_state_size(l2_hv->version); if (size < 0) return -1; return kvm_vcpu_write_guest(vcpu, hv_ptr, l2_hv, size) || kvm_vcpu_write_guest(vcpu, regs_ptr, l2_regs, sizeof(struct pt_regs)); } static void load_l2_hv_regs(struct kvm_vcpu *vcpu, const struct hv_guest_state *l2_hv, const struct hv_guest_state *l1_hv, u64 *lpcr) { struct kvmppc_vcore *vc = vcpu->arch.vcore; u64 mask; restore_hv_regs(vcpu, l2_hv); /* * Don't let L1 change LPCR bits for the L2 except these: */ mask = LPCR_DPFD | LPCR_ILE | LPCR_TC | LPCR_AIL | LPCR_LD | LPCR_MER; /* * Additional filtering is required depending on hardware * and configuration. */ *lpcr = kvmppc_filter_lpcr_hv(vcpu->kvm, (vc->lpcr & ~mask) | (*lpcr & mask)); /* * Don't let L1 enable features for L2 which we don't allow for L1, * but preserve the interrupt cause field. */ vcpu->arch.hfscr = l2_hv->hfscr & (HFSCR_INTR_CAUSE | vcpu->arch.hfscr_permitted); /* Don't let data address watchpoint match in hypervisor state */ vcpu->arch.dawrx0 = l2_hv->dawrx0 & ~DAWRX_HYP; vcpu->arch.dawrx1 = l2_hv->dawrx1 & ~DAWRX_HYP; /* Don't let completed instruction address breakpt match in HV state */ if ((l2_hv->ciabr & CIABR_PRIV) == CIABR_PRIV_HYPER) vcpu->arch.ciabr = l2_hv->ciabr & ~CIABR_PRIV; } long kvmhv_enter_nested_guest(struct kvm_vcpu *vcpu) { long int err, r; struct kvm_nested_guest *l2; struct pt_regs l2_regs, saved_l1_regs; struct hv_guest_state l2_hv = {0}, saved_l1_hv; struct kvmppc_vcore *vc = vcpu->arch.vcore; u64 hv_ptr, regs_ptr; u64 hdec_exp, lpcr; s64 delta_purr, delta_spurr, delta_ic, delta_vtb; if (vcpu->kvm->arch.l1_ptcr == 0) return H_NOT_AVAILABLE; if (MSR_TM_TRANSACTIONAL(vcpu->arch.shregs.msr)) return H_BAD_MODE; /* copy parameters in */ hv_ptr = kvmppc_get_gpr(vcpu, 4); regs_ptr = kvmppc_get_gpr(vcpu, 5); kvm_vcpu_srcu_read_lock(vcpu); err = kvmhv_read_guest_state_and_regs(vcpu, &l2_hv, &l2_regs, hv_ptr, regs_ptr); kvm_vcpu_srcu_read_unlock(vcpu); if (err) return H_PARAMETER; if (kvmppc_need_byteswap(vcpu)) byteswap_hv_regs(&l2_hv); if (l2_hv.version > HV_GUEST_STATE_VERSION) return H_P2; if (kvmppc_need_byteswap(vcpu)) byteswap_pt_regs(&l2_regs); if (l2_hv.vcpu_token >= NR_CPUS) return H_PARAMETER; /* * L1 must have set up a suspended state to enter the L2 in a * transactional state, and only in that case. These have to be * filtered out here to prevent causing a TM Bad Thing in the * host HRFID. We could synthesize a TM Bad Thing back to the L1 * here but there doesn't seem like much point. */ if (MSR_TM_SUSPENDED(vcpu->arch.shregs.msr)) { if (!MSR_TM_ACTIVE(l2_regs.msr)) return H_BAD_MODE; } else { if (l2_regs.msr & MSR_TS_MASK) return H_BAD_MODE; if (WARN_ON_ONCE(vcpu->arch.shregs.msr & MSR_TS_MASK)) return H_BAD_MODE; } /* translate lpid */ l2 = kvmhv_get_nested(vcpu->kvm, l2_hv.lpid, true); if (!l2) return H_PARAMETER; if (!l2->l1_gr_to_hr) { mutex_lock(&l2->tlb_lock); kvmhv_update_ptbl_cache(l2); mutex_unlock(&l2->tlb_lock); } /* save l1 values of things */ vcpu->arch.regs.msr = vcpu->arch.shregs.msr; saved_l1_regs = vcpu->arch.regs; kvmhv_save_hv_regs(vcpu, &saved_l1_hv); /* convert TB values/offsets to host (L0) values */ hdec_exp = l2_hv.hdec_expiry - vc->tb_offset; vc->tb_offset += l2_hv.tb_offset; vcpu->arch.dec_expires += l2_hv.tb_offset; /* set L1 state to L2 state */ vcpu->arch.nested = l2; vcpu->arch.nested_vcpu_id = l2_hv.vcpu_token; vcpu->arch.nested_hfscr = l2_hv.hfscr; vcpu->arch.regs = l2_regs; /* Guest must always run with ME enabled, HV disabled. */ vcpu->arch.shregs.msr = (vcpu->arch.regs.msr | MSR_ME) & ~MSR_HV; lpcr = l2_hv.lpcr; load_l2_hv_regs(vcpu, &l2_hv, &saved_l1_hv, &lpcr); vcpu->arch.ret = RESUME_GUEST; vcpu->arch.trap = 0; do { r = kvmhv_run_single_vcpu(vcpu, hdec_exp, lpcr); } while (is_kvmppc_resume_guest(r)); /* save L2 state for return */ l2_regs = vcpu->arch.regs; l2_regs.msr = vcpu->arch.shregs.msr; delta_purr = vcpu->arch.purr - l2_hv.purr; delta_spurr = vcpu->arch.spurr - l2_hv.spurr; delta_ic = vcpu->arch.ic - l2_hv.ic; delta_vtb = vc->vtb - l2_hv.vtb; save_hv_return_state(vcpu, &l2_hv); /* restore L1 state */ vcpu->arch.nested = NULL; vcpu->arch.regs = saved_l1_regs; vcpu->arch.shregs.msr = saved_l1_regs.msr & ~MSR_TS_MASK; /* set L1 MSR TS field according to L2 transaction state */ if (l2_regs.msr & MSR_TS_MASK) vcpu->arch.shregs.msr |= MSR_TS_S; vc->tb_offset = saved_l1_hv.tb_offset; /* XXX: is this always the same delta as saved_l1_hv.tb_offset? */ vcpu->arch.dec_expires -= l2_hv.tb_offset; restore_hv_regs(vcpu, &saved_l1_hv); vcpu->arch.purr += delta_purr; vcpu->arch.spurr += delta_spurr; vcpu->arch.ic += delta_ic; vc->vtb += delta_vtb; kvmhv_put_nested(l2); /* copy l2_hv_state and regs back to guest */ if (kvmppc_need_byteswap(vcpu)) { byteswap_hv_regs(&l2_hv); byteswap_pt_regs(&l2_regs); } kvm_vcpu_srcu_read_lock(vcpu); err = kvmhv_write_guest_state_and_regs(vcpu, &l2_hv, &l2_regs, hv_ptr, regs_ptr); kvm_vcpu_srcu_read_unlock(vcpu); if (err) return H_AUTHORITY; if (r == -EINTR) return H_INTERRUPT; if (vcpu->mmio_needed) { kvmhv_nested_mmio_needed(vcpu, regs_ptr); return H_TOO_HARD; } return vcpu->arch.trap; } long kvmhv_nested_init(void) { long int ptb_order; unsigned long ptcr; long rc; if (!kvmhv_on_pseries()) return 0; if (!radix_enabled()) return -ENODEV; /* Partition table entry is 1<<4 bytes in size, hence the 4. */ ptb_order = KVM_MAX_NESTED_GUESTS_SHIFT + 4; /* Minimum partition table size is 1<<12 bytes */ if (ptb_order < 12) ptb_order = 12; pseries_partition_tb = kmalloc(sizeof(struct patb_entry) << ptb_order, GFP_KERNEL); if (!pseries_partition_tb) { pr_err("kvm-hv: failed to allocated nested partition table\n"); return -ENOMEM; } ptcr = __pa(pseries_partition_tb) | (ptb_order - 12); rc = plpar_hcall_norets(H_SET_PARTITION_TABLE, ptcr); if (rc != H_SUCCESS) { pr_err("kvm-hv: Parent hypervisor does not support nesting (rc=%ld)\n", rc); kfree(pseries_partition_tb); pseries_partition_tb = NULL; return -ENODEV; } return 0; } void kvmhv_nested_exit(void) { /* * N.B. the kvmhv_on_pseries() test is there because it enables * the compiler to remove the call to plpar_hcall_norets() * when CONFIG_PPC_PSERIES=n. */ if (kvmhv_on_pseries() && pseries_partition_tb) { plpar_hcall_norets(H_SET_PARTITION_TABLE, 0); kfree(pseries_partition_tb); pseries_partition_tb = NULL; } } static void kvmhv_flush_lpid(unsigned int lpid) { long rc; if (!kvmhv_on_pseries()) { radix__flush_all_lpid(lpid); return; } if (!firmware_has_feature(FW_FEATURE_RPT_INVALIDATE)) rc = plpar_hcall_norets(H_TLB_INVALIDATE, H_TLBIE_P1_ENC(2, 0, 1), lpid, TLBIEL_INVAL_SET_LPID); else rc = pseries_rpt_invalidate(lpid, H_RPTI_TARGET_CMMU, H_RPTI_TYPE_NESTED | H_RPTI_TYPE_TLB | H_RPTI_TYPE_PWC | H_RPTI_TYPE_PAT, H_RPTI_PAGE_ALL, 0, -1UL); if (rc) pr_err("KVM: TLB LPID invalidation hcall failed, rc=%ld\n", rc); } void kvmhv_set_ptbl_entry(unsigned int lpid, u64 dw0, u64 dw1) { if (!kvmhv_on_pseries()) { mmu_partition_table_set_entry(lpid, dw0, dw1, true); return; } pseries_partition_tb[lpid].patb0 = cpu_to_be64(dw0); pseries_partition_tb[lpid].patb1 = cpu_to_be64(dw1); /* L0 will do the necessary barriers */ kvmhv_flush_lpid(lpid); } static void kvmhv_set_nested_ptbl(struct kvm_nested_guest *gp) { unsigned long dw0; dw0 = PATB_HR | radix__get_tree_size() | __pa(gp->shadow_pgtable) | RADIX_PGD_INDEX_SIZE; kvmhv_set_ptbl_entry(gp->shadow_lpid, dw0, gp->process_table); } /* * Handle the H_SET_PARTITION_TABLE hcall. * r4 = guest real address of partition table + log_2(size) - 12 * (formatted as for the PTCR). */ long kvmhv_set_partition_table(struct kvm_vcpu *vcpu) { struct kvm *kvm = vcpu->kvm; unsigned long ptcr = kvmppc_get_gpr(vcpu, 4); int srcu_idx; long ret = H_SUCCESS; srcu_idx = srcu_read_lock(&kvm->srcu); /* Check partition size and base address. */ if ((ptcr & PRTS_MASK) + 12 - 4 > KVM_MAX_NESTED_GUESTS_SHIFT || !kvm_is_visible_gfn(vcpu->kvm, (ptcr & PRTB_MASK) >> PAGE_SHIFT)) ret = H_PARAMETER; srcu_read_unlock(&kvm->srcu, srcu_idx); if (ret == H_SUCCESS) kvm->arch.l1_ptcr = ptcr; return ret; } /* * Handle the H_COPY_TOFROM_GUEST hcall. * r4 = L1 lpid of nested guest * r5 = pid * r6 = eaddr to access * r7 = to buffer (L1 gpa) * r8 = from buffer (L1 gpa) * r9 = n bytes to copy */ long kvmhv_copy_tofrom_guest_nested(struct kvm_vcpu *vcpu) { struct kvm_nested_guest *gp; int l1_lpid = kvmppc_get_gpr(vcpu, 4); int pid = kvmppc_get_gpr(vcpu, 5); gva_t eaddr = kvmppc_get_gpr(vcpu, 6); gpa_t gp_to = (gpa_t) kvmppc_get_gpr(vcpu, 7); gpa_t gp_from = (gpa_t) kvmppc_get_gpr(vcpu, 8); void *buf; unsigned long n = kvmppc_get_gpr(vcpu, 9); bool is_load = !!gp_to; long rc; if (gp_to && gp_from) /* One must be NULL to determine the direction */ return H_PARAMETER; if (eaddr & (0xFFFUL << 52)) return H_PARAMETER; buf = kzalloc(n, GFP_KERNEL | __GFP_NOWARN); if (!buf) return H_NO_MEM; gp = kvmhv_get_nested(vcpu->kvm, l1_lpid, false); if (!gp) { rc = H_PARAMETER; goto out_free; } mutex_lock(&gp->tlb_lock); if (is_load) { /* Load from the nested guest into our buffer */ rc = __kvmhv_copy_tofrom_guest_radix(gp->shadow_lpid, pid, eaddr, buf, NULL, n); if (rc) goto not_found; /* Write what was loaded into our buffer back to the L1 guest */ kvm_vcpu_srcu_read_lock(vcpu); rc = kvm_vcpu_write_guest(vcpu, gp_to, buf, n); kvm_vcpu_srcu_read_unlock(vcpu); if (rc) goto not_found; } else { /* Load the data to be stored from the L1 guest into our buf */ kvm_vcpu_srcu_read_lock(vcpu); rc = kvm_vcpu_read_guest(vcpu, gp_from, buf, n); kvm_vcpu_srcu_read_unlock(vcpu); if (rc) goto not_found; /* Store from our buffer into the nested guest */ rc = __kvmhv_copy_tofrom_guest_radix(gp->shadow_lpid, pid, eaddr, NULL, buf, n); if (rc) goto not_found; } out_unlock: mutex_unlock(&gp->tlb_lock); kvmhv_put_nested(gp); out_free: kfree(buf); return rc; not_found: rc = H_NOT_FOUND; goto out_unlock; } /* * Reload the partition table entry for a guest. * Caller must hold gp->tlb_lock. */ static void kvmhv_update_ptbl_cache(struct kvm_nested_guest *gp) { int ret; struct patb_entry ptbl_entry; unsigned long ptbl_addr; struct kvm *kvm = gp->l1_host; ret = -EFAULT; ptbl_addr = (kvm->arch.l1_ptcr & PRTB_MASK) + (gp->l1_lpid << 4); if (gp->l1_lpid < (1ul << ((kvm->arch.l1_ptcr & PRTS_MASK) + 12 - 4))) { int srcu_idx = srcu_read_lock(&kvm->srcu); ret = kvm_read_guest(kvm, ptbl_addr, &ptbl_entry, sizeof(ptbl_entry)); srcu_read_unlock(&kvm->srcu, srcu_idx); } if (ret) { gp->l1_gr_to_hr = 0; gp->process_table = 0; } else { gp->l1_gr_to_hr = be64_to_cpu(ptbl_entry.patb0); gp->process_table = be64_to_cpu(ptbl_entry.patb1); } kvmhv_set_nested_ptbl(gp); } void kvmhv_vm_nested_init(struct kvm *kvm) { idr_init(&kvm->arch.kvm_nested_guest_idr); } static struct kvm_nested_guest *__find_nested(struct kvm *kvm, int lpid) { return idr_find(&kvm->arch.kvm_nested_guest_idr, lpid); } static bool __prealloc_nested(struct kvm *kvm, int lpid) { if (idr_alloc(&kvm->arch.kvm_nested_guest_idr, NULL, lpid, lpid + 1, GFP_KERNEL) != lpid) return false; return true; } static void __add_nested(struct kvm *kvm, int lpid, struct kvm_nested_guest *gp) { if (idr_replace(&kvm->arch.kvm_nested_guest_idr, gp, lpid)) WARN_ON(1); } static void __remove_nested(struct kvm *kvm, int lpid) { idr_remove(&kvm->arch.kvm_nested_guest_idr, lpid); } static struct kvm_nested_guest *kvmhv_alloc_nested(struct kvm *kvm, unsigned int lpid) { struct kvm_nested_guest *gp; long shadow_lpid; gp = kzalloc(sizeof(*gp), GFP_KERNEL); if (!gp) return NULL; gp->l1_host = kvm; gp->l1_lpid = lpid; mutex_init(&gp->tlb_lock); gp->shadow_pgtable = pgd_alloc(kvm->mm); if (!gp->shadow_pgtable) goto out_free; shadow_lpid = kvmppc_alloc_lpid(); if (shadow_lpid < 0) goto out_free2; gp->shadow_lpid = shadow_lpid; gp->radix = 1; memset(gp->prev_cpu, -1, sizeof(gp->prev_cpu)); return gp; out_free2: pgd_free(kvm->mm, gp->shadow_pgtable); out_free: kfree(gp); return NULL; } /* * Free up any resources allocated for a nested guest. */ static void kvmhv_release_nested(struct kvm_nested_guest *gp) { struct kvm *kvm = gp->l1_host; if (gp->shadow_pgtable) { /* * No vcpu is using this struct and no call to * kvmhv_get_nested can find this struct, * so we don't need to hold kvm->mmu_lock. */ kvmppc_free_pgtable_radix(kvm, gp->shadow_pgtable, gp->shadow_lpid); pgd_free(kvm->mm, gp->shadow_pgtable); } kvmhv_set_ptbl_entry(gp->shadow_lpid, 0, 0); kvmppc_free_lpid(gp->shadow_lpid); kfree(gp); } static void kvmhv_remove_nested(struct kvm_nested_guest *gp) { struct kvm *kvm = gp->l1_host; int lpid = gp->l1_lpid; long ref; spin_lock(&kvm->mmu_lock); if (gp == __find_nested(kvm, lpid)) { __remove_nested(kvm, lpid); --gp->refcnt; } ref = gp->refcnt; spin_unlock(&kvm->mmu_lock); if (ref == 0) kvmhv_release_nested(gp); } /* * Free up all nested resources allocated for this guest. * This is called with no vcpus of the guest running, when * switching the guest to HPT mode or when destroying the * guest. */ void kvmhv_release_all_nested(struct kvm *kvm) { int lpid; struct kvm_nested_guest *gp; struct kvm_nested_guest *freelist = NULL; struct kvm_memory_slot *memslot; int srcu_idx, bkt; spin_lock(&kvm->mmu_lock); idr_for_each_entry(&kvm->arch.kvm_nested_guest_idr, gp, lpid) { __remove_nested(kvm, lpid); if (--gp->refcnt == 0) { gp->next = freelist; freelist = gp; } } idr_destroy(&kvm->arch.kvm_nested_guest_idr); /* idr is empty and may be reused at this point */ spin_unlock(&kvm->mmu_lock); while ((gp = freelist) != NULL) { freelist = gp->next; kvmhv_release_nested(gp); } srcu_idx = srcu_read_lock(&kvm->srcu); kvm_for_each_memslot(memslot, bkt, kvm_memslots(kvm)) kvmhv_free_memslot_nest_rmap(memslot); srcu_read_unlock(&kvm->srcu, srcu_idx); } /* caller must hold gp->tlb_lock */ static void kvmhv_flush_nested(struct kvm_nested_guest *gp) { struct kvm *kvm = gp->l1_host; spin_lock(&kvm->mmu_lock); kvmppc_free_pgtable_radix(kvm, gp->shadow_pgtable, gp->shadow_lpid); spin_unlock(&kvm->mmu_lock); kvmhv_flush_lpid(gp->shadow_lpid); kvmhv_update_ptbl_cache(gp); if (gp->l1_gr_to_hr == 0) kvmhv_remove_nested(gp); } struct kvm_nested_guest *kvmhv_get_nested(struct kvm *kvm, int l1_lpid, bool create) { struct kvm_nested_guest *gp, *newgp; if (l1_lpid >= (1ul << ((kvm->arch.l1_ptcr & PRTS_MASK) + 12 - 4))) return NULL; spin_lock(&kvm->mmu_lock); gp = __find_nested(kvm, l1_lpid); if (gp) ++gp->refcnt; spin_unlock(&kvm->mmu_lock); if (gp || !create) return gp; newgp = kvmhv_alloc_nested(kvm, l1_lpid); if (!newgp) return NULL; if (!__prealloc_nested(kvm, l1_lpid)) { kvmhv_release_nested(newgp); return NULL; } spin_lock(&kvm->mmu_lock); gp = __find_nested(kvm, l1_lpid); if (!gp) { __add_nested(kvm, l1_lpid, newgp); ++newgp->refcnt; gp = newgp; newgp = NULL; } ++gp->refcnt; spin_unlock(&kvm->mmu_lock); if (newgp) kvmhv_release_nested(newgp); return gp; } void kvmhv_put_nested(struct kvm_nested_guest *gp) { struct kvm *kvm = gp->l1_host; long ref; spin_lock(&kvm->mmu_lock); ref = --gp->refcnt; spin_unlock(&kvm->mmu_lock); if (ref == 0) kvmhv_release_nested(gp); } pte_t *find_kvm_nested_guest_pte(struct kvm *kvm, unsigned long lpid, unsigned long ea, unsigned *hshift) { struct kvm_nested_guest *gp; pte_t *pte; gp = __find_nested(kvm, lpid); if (!gp) return NULL; VM_WARN(!spin_is_locked(&kvm->mmu_lock), "%s called with kvm mmu_lock not held \n", __func__); pte = __find_linux_pte(gp->shadow_pgtable, ea, NULL, hshift); return pte; } static inline bool kvmhv_n_rmap_is_equal(u64 rmap_1, u64 rmap_2) { return !((rmap_1 ^ rmap_2) & (RMAP_NESTED_LPID_MASK | RMAP_NESTED_GPA_MASK)); } void kvmhv_insert_nest_rmap(struct kvm *kvm, unsigned long *rmapp, struct rmap_nested **n_rmap) { struct llist_node *entry = ((struct llist_head *) rmapp)->first; struct rmap_nested *cursor; u64 rmap, new_rmap = (*n_rmap)->rmap; /* Are there any existing entries? */ if (!(*rmapp)) { /* No -> use the rmap as a single entry */ *rmapp = new_rmap | RMAP_NESTED_IS_SINGLE_ENTRY; return; } /* Do any entries match what we're trying to insert? */ for_each_nest_rmap_safe(cursor, entry, &rmap) { if (kvmhv_n_rmap_is_equal(rmap, new_rmap)) return; } /* Do we need to create a list or just add the new entry? */ rmap = *rmapp; if (rmap & RMAP_NESTED_IS_SINGLE_ENTRY) /* Not previously a list */ *rmapp = 0UL; llist_add(&((*n_rmap)->list), (struct llist_head *) rmapp); if (rmap & RMAP_NESTED_IS_SINGLE_ENTRY) /* Not previously a list */ (*n_rmap)->list.next = (struct llist_node *) rmap; /* Set NULL so not freed by caller */ *n_rmap = NULL; } static void kvmhv_update_nest_rmap_rc(struct kvm *kvm, u64 n_rmap, unsigned long clr, unsigned long set, unsigned long hpa, unsigned long mask) { unsigned long gpa; unsigned int shift, lpid; pte_t *ptep; gpa = n_rmap & RMAP_NESTED_GPA_MASK; lpid = (n_rmap & RMAP_NESTED_LPID_MASK) >> RMAP_NESTED_LPID_SHIFT; /* Find the pte */ ptep = find_kvm_nested_guest_pte(kvm, lpid, gpa, &shift); /* * If the pte is present and the pfn is still the same, update the pte. * If the pfn has changed then this is a stale rmap entry, the nested * gpa actually points somewhere else now, and there is nothing to do. * XXX A future optimisation would be to remove the rmap entry here. */ if (ptep && pte_present(*ptep) && ((pte_val(*ptep) & mask) == hpa)) { __radix_pte_update(ptep, clr, set); kvmppc_radix_tlbie_page(kvm, gpa, shift, lpid); } } /* * For a given list of rmap entries, update the rc bits in all ptes in shadow * page tables for nested guests which are referenced by the rmap list. */ void kvmhv_update_nest_rmap_rc_list(struct kvm *kvm, unsigned long *rmapp, unsigned long clr, unsigned long set, unsigned long hpa, unsigned long nbytes) { struct llist_node *entry = ((struct llist_head *) rmapp)->first; struct rmap_nested *cursor; unsigned long rmap, mask; if ((clr | set) & ~(_PAGE_DIRTY | _PAGE_ACCESSED)) return; mask = PTE_RPN_MASK & ~(nbytes - 1); hpa &= mask; for_each_nest_rmap_safe(cursor, entry, &rmap) kvmhv_update_nest_rmap_rc(kvm, rmap, clr, set, hpa, mask); } static void kvmhv_remove_nest_rmap(struct kvm *kvm, u64 n_rmap, unsigned long hpa, unsigned long mask) { struct kvm_nested_guest *gp; unsigned long gpa; unsigned int shift, lpid; pte_t *ptep; gpa = n_rmap & RMAP_NESTED_GPA_MASK; lpid = (n_rmap & RMAP_NESTED_LPID_MASK) >> RMAP_NESTED_LPID_SHIFT; gp = __find_nested(kvm, lpid); if (!gp) return; /* Find and invalidate the pte */ ptep = find_kvm_nested_guest_pte(kvm, lpid, gpa, &shift); /* Don't spuriously invalidate ptes if the pfn has changed */ if (ptep && pte_present(*ptep) && ((pte_val(*ptep) & mask) == hpa)) kvmppc_unmap_pte(kvm, ptep, gpa, shift, NULL, gp->shadow_lpid); } static void kvmhv_remove_nest_rmap_list(struct kvm *kvm, unsigned long *rmapp, unsigned long hpa, unsigned long mask) { struct llist_node *entry = llist_del_all((struct llist_head *) rmapp); struct rmap_nested *cursor; unsigned long rmap; for_each_nest_rmap_safe(cursor, entry, &rmap) { kvmhv_remove_nest_rmap(kvm, rmap, hpa, mask); kfree(cursor); } } /* called with kvm->mmu_lock held */ void kvmhv_remove_nest_rmap_range(struct kvm *kvm, const struct kvm_memory_slot *memslot, unsigned long gpa, unsigned long hpa, unsigned long nbytes) { unsigned long gfn, end_gfn; unsigned long addr_mask; if (!memslot) return; gfn = (gpa >> PAGE_SHIFT) - memslot->base_gfn; end_gfn = gfn + (nbytes >> PAGE_SHIFT); addr_mask = PTE_RPN_MASK & ~(nbytes - 1); hpa &= addr_mask; for (; gfn < end_gfn; gfn++) { unsigned long *rmap = &memslot->arch.rmap[gfn]; kvmhv_remove_nest_rmap_list(kvm, rmap, hpa, addr_mask); } } static void kvmhv_free_memslot_nest_rmap(struct kvm_memory_slot *free) { unsigned long page; for (page = 0; page < free->npages; page++) { unsigned long rmap, *rmapp = &free->arch.rmap[page]; struct rmap_nested *cursor; struct llist_node *entry; entry = llist_del_all((struct llist_head *) rmapp); for_each_nest_rmap_safe(cursor, entry, &rmap) kfree(cursor); } } static bool kvmhv_invalidate_shadow_pte(struct kvm_vcpu *vcpu, struct kvm_nested_guest *gp, long gpa, int *shift_ret) { struct kvm *kvm = vcpu->kvm; bool ret = false; pte_t *ptep; int shift; spin_lock(&kvm->mmu_lock); ptep = find_kvm_nested_guest_pte(kvm, gp->l1_lpid, gpa, &shift); if (!shift) shift = PAGE_SHIFT; if (ptep && pte_present(*ptep)) { kvmppc_unmap_pte(kvm, ptep, gpa, shift, NULL, gp->shadow_lpid); ret = true; } spin_unlock(&kvm->mmu_lock); if (shift_ret) *shift_ret = shift; return ret; } static inline int get_ric(unsigned int instr) { return (instr >> 18) & 0x3; } static inline int get_prs(unsigned int instr) { return (instr >> 17) & 0x1; } static inline int get_r(unsigned int instr) { return (instr >> 16) & 0x1; } static inline int get_lpid(unsigned long r_val) { return r_val & 0xffffffff; } static inline int get_is(unsigned long r_val) { return (r_val >> 10) & 0x3; } static inline int get_ap(unsigned long r_val) { return (r_val >> 5) & 0x7; } static inline long get_epn(unsigned long r_val) { return r_val >> 12; } static int kvmhv_emulate_tlbie_tlb_addr(struct kvm_vcpu *vcpu, int lpid, int ap, long epn) { struct kvm *kvm = vcpu->kvm; struct kvm_nested_guest *gp; long npages; int shift, shadow_shift; unsigned long addr; shift = ap_to_shift(ap); addr = epn << 12; if (shift < 0) /* Invalid ap encoding */ return -EINVAL; addr &= ~((1UL << shift) - 1); npages = 1UL << (shift - PAGE_SHIFT); gp = kvmhv_get_nested(kvm, lpid, false); if (!gp) /* No such guest -> nothing to do */ return 0; mutex_lock(&gp->tlb_lock); /* There may be more than one host page backing this single guest pte */ do { kvmhv_invalidate_shadow_pte(vcpu, gp, addr, &shadow_shift); npages -= 1UL << (shadow_shift - PAGE_SHIFT); addr += 1UL << shadow_shift; } while (npages > 0); mutex_unlock(&gp->tlb_lock); kvmhv_put_nested(gp); return 0; } static void kvmhv_emulate_tlbie_lpid(struct kvm_vcpu *vcpu, struct kvm_nested_guest *gp, int ric) { struct kvm *kvm = vcpu->kvm; mutex_lock(&gp->tlb_lock); switch (ric) { case 0: /* Invalidate TLB */ spin_lock(&kvm->mmu_lock); kvmppc_free_pgtable_radix(kvm, gp->shadow_pgtable, gp->shadow_lpid); kvmhv_flush_lpid(gp->shadow_lpid); spin_unlock(&kvm->mmu_lock); break; case 1: /* * Invalidate PWC * We don't cache this -> nothing to do */ break; case 2: /* Invalidate TLB, PWC and caching of partition table entries */ kvmhv_flush_nested(gp); break; default: break; } mutex_unlock(&gp->tlb_lock); } static void kvmhv_emulate_tlbie_all_lpid(struct kvm_vcpu *vcpu, int ric) { struct kvm *kvm = vcpu->kvm; struct kvm_nested_guest *gp; int lpid; spin_lock(&kvm->mmu_lock); idr_for_each_entry(&kvm->arch.kvm_nested_guest_idr, gp, lpid) { spin_unlock(&kvm->mmu_lock); kvmhv_emulate_tlbie_lpid(vcpu, gp, ric); spin_lock(&kvm->mmu_lock); } spin_unlock(&kvm->mmu_lock); } static int kvmhv_emulate_priv_tlbie(struct kvm_vcpu *vcpu, unsigned int instr, unsigned long rsval, unsigned long rbval) { struct kvm *kvm = vcpu->kvm; struct kvm_nested_guest *gp; int r, ric, prs, is, ap; int lpid; long epn; int ret = 0; ric = get_ric(instr); prs = get_prs(instr); r = get_r(instr); lpid = get_lpid(rsval); is = get_is(rbval); /* * These cases are invalid and are not handled: * r != 1 -> Only radix supported * prs == 1 -> Not HV privileged * ric == 3 -> No cluster bombs for radix * is == 1 -> Partition scoped translations not associated with pid * (!is) && (ric == 1 || ric == 2) -> Not supported by ISA */ if ((!r) || (prs) || (ric == 3) || (is == 1) || ((!is) && (ric == 1 || ric == 2))) return -EINVAL; switch (is) { case 0: /* * We know ric == 0 * Invalidate TLB for a given target address */ epn = get_epn(rbval); ap = get_ap(rbval); ret = kvmhv_emulate_tlbie_tlb_addr(vcpu, lpid, ap, epn); break; case 2: /* Invalidate matching LPID */ gp = kvmhv_get_nested(kvm, lpid, false); if (gp) { kvmhv_emulate_tlbie_lpid(vcpu, gp, ric); kvmhv_put_nested(gp); } break; case 3: /* Invalidate ALL LPIDs */ kvmhv_emulate_tlbie_all_lpid(vcpu, ric); break; default: ret = -EINVAL; break; } return ret; } /* * This handles the H_TLB_INVALIDATE hcall. * Parameters are (r4) tlbie instruction code, (r5) rS contents, * (r6) rB contents. */ long kvmhv_do_nested_tlbie(struct kvm_vcpu *vcpu) { int ret; ret = kvmhv_emulate_priv_tlbie(vcpu, kvmppc_get_gpr(vcpu, 4), kvmppc_get_gpr(vcpu, 5), kvmppc_get_gpr(vcpu, 6)); if (ret) return H_PARAMETER; return H_SUCCESS; } static long do_tlb_invalidate_nested_all(struct kvm_vcpu *vcpu, unsigned long lpid, unsigned long ric) { struct kvm *kvm = vcpu->kvm; struct kvm_nested_guest *gp; gp = kvmhv_get_nested(kvm, lpid, false); if (gp) { kvmhv_emulate_tlbie_lpid(vcpu, gp, ric); kvmhv_put_nested(gp); } return H_SUCCESS; } /* * Number of pages above which we invalidate the entire LPID rather than * flush individual pages. */ static unsigned long tlb_range_flush_page_ceiling __read_mostly = 33; static long do_tlb_invalidate_nested_tlb(struct kvm_vcpu *vcpu, unsigned long lpid, unsigned long pg_sizes, unsigned long start, unsigned long end) { int ret = H_P4; unsigned long addr, nr_pages; struct mmu_psize_def *def; unsigned long psize, ap, page_size; bool flush_lpid; for (psize = 0; psize < MMU_PAGE_COUNT; psize++) { def = &mmu_psize_defs[psize]; if (!(pg_sizes & def->h_rpt_pgsize)) continue; nr_pages = (end - start) >> def->shift; flush_lpid = nr_pages > tlb_range_flush_page_ceiling; if (flush_lpid) return do_tlb_invalidate_nested_all(vcpu, lpid, RIC_FLUSH_TLB); addr = start; ap = mmu_get_ap(psize); page_size = 1UL << def->shift; do { ret = kvmhv_emulate_tlbie_tlb_addr(vcpu, lpid, ap, get_epn(addr)); if (ret) return H_P4; addr += page_size; } while (addr < end); } return ret; } /* * Performs partition-scoped invalidations for nested guests * as part of H_RPT_INVALIDATE hcall. */ long do_h_rpt_invalidate_pat(struct kvm_vcpu *vcpu, unsigned long lpid, unsigned long type, unsigned long pg_sizes, unsigned long start, unsigned long end) { /* * If L2 lpid isn't valid, we need to return H_PARAMETER. * * However, nested KVM issues a L2 lpid flush call when creating * partition table entries for L2. This happens even before the * corresponding shadow lpid is created in HV which happens in * H_ENTER_NESTED call. Since we can't differentiate this case from * the invalid case, we ignore such flush requests and return success. */ if (!__find_nested(vcpu->kvm, lpid)) return H_SUCCESS; /* * A flush all request can be handled by a full lpid flush only. */ if ((type & H_RPTI_TYPE_NESTED_ALL) == H_RPTI_TYPE_NESTED_ALL) return do_tlb_invalidate_nested_all(vcpu, lpid, RIC_FLUSH_ALL); /* * We don't need to handle a PWC flush like process table here, * because intermediate partition scoped table in nested guest doesn't * really have PWC. Only level we have PWC is in L0 and for nested * invalidate at L0 we always do kvm_flush_lpid() which does * radix__flush_all_lpid(). For range invalidate at any level, we * are not removing the higher level page tables and hence there is * no PWC invalidate needed. * * if (type & H_RPTI_TYPE_PWC) { * ret = do_tlb_invalidate_nested_all(vcpu, lpid, RIC_FLUSH_PWC); * if (ret) * return H_P4; * } */ if (start == 0 && end == -1) return do_tlb_invalidate_nested_all(vcpu, lpid, RIC_FLUSH_TLB); if (type & H_RPTI_TYPE_TLB) return do_tlb_invalidate_nested_tlb(vcpu, lpid, pg_sizes, start, end); return H_SUCCESS; } /* Used to convert a nested guest real address to a L1 guest real address */ static int kvmhv_translate_addr_nested(struct kvm_vcpu *vcpu, struct kvm_nested_guest *gp, unsigned long n_gpa, unsigned long dsisr, struct kvmppc_pte *gpte_p) { u64 fault_addr, flags = dsisr & DSISR_ISSTORE; int ret; ret = kvmppc_mmu_walk_radix_tree(vcpu, n_gpa, gpte_p, gp->l1_gr_to_hr, &fault_addr); if (ret) { /* We didn't find a pte */ if (ret == -EINVAL) { /* Unsupported mmu config */ flags |= DSISR_UNSUPP_MMU; } else if (ret == -ENOENT) { /* No translation found */ flags |= DSISR_NOHPTE; } else if (ret == -EFAULT) { /* Couldn't access L1 real address */ flags |= DSISR_PRTABLE_FAULT; vcpu->arch.fault_gpa = fault_addr; } else { /* Unknown error */ return ret; } goto forward_to_l1; } else { /* We found a pte -> check permissions */ if (dsisr & DSISR_ISSTORE) { /* Can we write? */ if (!gpte_p->may_write) { flags |= DSISR_PROTFAULT; goto forward_to_l1; } } else if (vcpu->arch.trap == BOOK3S_INTERRUPT_H_INST_STORAGE) { /* Can we execute? */ if (!gpte_p->may_execute) { flags |= SRR1_ISI_N_G_OR_CIP; goto forward_to_l1; } } else { /* Can we read? */ if (!gpte_p->may_read && !gpte_p->may_write) { flags |= DSISR_PROTFAULT; goto forward_to_l1; } } } return 0; forward_to_l1: vcpu->arch.fault_dsisr = flags; if (vcpu->arch.trap == BOOK3S_INTERRUPT_H_INST_STORAGE) { vcpu->arch.shregs.msr &= SRR1_MSR_BITS; vcpu->arch.shregs.msr |= flags; } return RESUME_HOST; } static long kvmhv_handle_nested_set_rc(struct kvm_vcpu *vcpu, struct kvm_nested_guest *gp, unsigned long n_gpa, struct kvmppc_pte gpte, unsigned long dsisr) { struct kvm *kvm = vcpu->kvm; bool writing = !!(dsisr & DSISR_ISSTORE); u64 pgflags; long ret; /* Are the rc bits set in the L1 partition scoped pte? */ pgflags = _PAGE_ACCESSED; if (writing) pgflags |= _PAGE_DIRTY; if (pgflags & ~gpte.rc) return RESUME_HOST; spin_lock(&kvm->mmu_lock); /* Set the rc bit in the pte of our (L0) pgtable for the L1 guest */ ret = kvmppc_hv_handle_set_rc(kvm, false, writing, gpte.raddr, kvm->arch.lpid); if (!ret) { ret = -EINVAL; goto out_unlock; } /* Set the rc bit in the pte of the shadow_pgtable for the nest guest */ ret = kvmppc_hv_handle_set_rc(kvm, true, writing, n_gpa, gp->l1_lpid); if (!ret) ret = -EINVAL; else ret = 0; out_unlock: spin_unlock(&kvm->mmu_lock); return ret; } static inline int kvmppc_radix_level_to_shift(int level) { switch (level) { case 2: return PUD_SHIFT; case 1: return PMD_SHIFT; default: return PAGE_SHIFT; } } static inline int kvmppc_radix_shift_to_level(int shift) { if (shift == PUD_SHIFT) return 2; if (shift == PMD_SHIFT) return 1; if (shift == PAGE_SHIFT) return 0; WARN_ON_ONCE(1); return 0; } /* called with gp->tlb_lock held */ static long int __kvmhv_nested_page_fault(struct kvm_vcpu *vcpu, struct kvm_nested_guest *gp) { struct kvm *kvm = vcpu->kvm; struct kvm_memory_slot *memslot; struct rmap_nested *n_rmap; struct kvmppc_pte gpte; pte_t pte, *pte_p; unsigned long mmu_seq; unsigned long dsisr = vcpu->arch.fault_dsisr; unsigned long ea = vcpu->arch.fault_dar; unsigned long *rmapp; unsigned long n_gpa, gpa, gfn, perm = 0UL; unsigned int shift, l1_shift, level; bool writing = !!(dsisr & DSISR_ISSTORE); bool kvm_ro = false; long int ret; if (!gp->l1_gr_to_hr) { kvmhv_update_ptbl_cache(gp); if (!gp->l1_gr_to_hr) return RESUME_HOST; } /* Convert the nested guest real address into a L1 guest real address */ n_gpa = vcpu->arch.fault_gpa & ~0xF000000000000FFFULL; if (!(dsisr & DSISR_PRTABLE_FAULT)) n_gpa |= ea & 0xFFF; ret = kvmhv_translate_addr_nested(vcpu, gp, n_gpa, dsisr, &gpte); /* * If the hardware found a translation but we don't now have a usable * translation in the l1 partition-scoped tree, remove the shadow pte * and let the guest retry. */ if (ret == RESUME_HOST && (dsisr & (DSISR_PROTFAULT | DSISR_BADACCESS | DSISR_NOEXEC_OR_G | DSISR_BAD_COPYPASTE))) goto inval; if (ret) return ret; /* Failed to set the reference/change bits */ if (dsisr & DSISR_SET_RC) { ret = kvmhv_handle_nested_set_rc(vcpu, gp, n_gpa, gpte, dsisr); if (ret == RESUME_HOST) return ret; if (ret) goto inval; dsisr &= ~DSISR_SET_RC; if (!(dsisr & (DSISR_BAD_FAULT_64S | DSISR_NOHPTE | DSISR_PROTFAULT))) return RESUME_GUEST; } /* * We took an HISI or HDSI while we were running a nested guest which * means we have no partition scoped translation for that. This means * we need to insert a pte for the mapping into our shadow_pgtable. */ l1_shift = gpte.page_shift; if (l1_shift < PAGE_SHIFT) { /* We don't support l1 using a page size smaller than our own */ pr_err("KVM: L1 guest page shift (%d) less than our own (%d)\n", l1_shift, PAGE_SHIFT); return -EINVAL; } gpa = gpte.raddr; gfn = gpa >> PAGE_SHIFT; /* 1. Get the corresponding host memslot */ memslot = gfn_to_memslot(kvm, gfn); if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID)) { if (dsisr & (DSISR_PRTABLE_FAULT | DSISR_BADACCESS)) { /* unusual error -> reflect to the guest as a DSI */ kvmppc_core_queue_data_storage(vcpu, kvmppc_get_msr(vcpu) & SRR1_PREFIXED, ea, dsisr); return RESUME_GUEST; } /* passthrough of emulated MMIO case */ return kvmppc_hv_emulate_mmio(vcpu, gpa, ea, writing); } if (memslot->flags & KVM_MEM_READONLY) { if (writing) { /* Give the guest a DSI */ kvmppc_core_queue_data_storage(vcpu, kvmppc_get_msr(vcpu) & SRR1_PREFIXED, ea, DSISR_ISSTORE | DSISR_PROTFAULT); return RESUME_GUEST; } kvm_ro = true; } /* 2. Find the host pte for this L1 guest real address */ /* Used to check for invalidations in progress */ mmu_seq = kvm->mmu_invalidate_seq; smp_rmb(); /* See if can find translation in our partition scoped tables for L1 */ pte = __pte(0); spin_lock(&kvm->mmu_lock); pte_p = find_kvm_secondary_pte(kvm, gpa, &shift); if (!shift) shift = PAGE_SHIFT; if (pte_p) pte = *pte_p; spin_unlock(&kvm->mmu_lock); if (!pte_present(pte) || (writing && !(pte_val(pte) & _PAGE_WRITE))) { /* No suitable pte found -> try to insert a mapping */ ret = kvmppc_book3s_instantiate_page(vcpu, gpa, memslot, writing, kvm_ro, &pte, &level); if (ret == -EAGAIN) return RESUME_GUEST; else if (ret) return ret; shift = kvmppc_radix_level_to_shift(level); } /* Align gfn to the start of the page */ gfn = (gpa & ~((1UL << shift) - 1)) >> PAGE_SHIFT; /* 3. Compute the pte we need to insert for nest_gpa -> host r_addr */ /* The permissions is the combination of the host and l1 guest ptes */ perm |= gpte.may_read ? 0UL : _PAGE_READ; perm |= gpte.may_write ? 0UL : _PAGE_WRITE; perm |= gpte.may_execute ? 0UL : _PAGE_EXEC; /* Only set accessed/dirty (rc) bits if set in host and l1 guest ptes */ perm |= (gpte.rc & _PAGE_ACCESSED) ? 0UL : _PAGE_ACCESSED; perm |= ((gpte.rc & _PAGE_DIRTY) && writing) ? 0UL : _PAGE_DIRTY; pte = __pte(pte_val(pte) & ~perm); /* What size pte can we insert? */ if (shift > l1_shift) { u64 mask; unsigned int actual_shift = PAGE_SHIFT; if (PMD_SHIFT < l1_shift) actual_shift = PMD_SHIFT; mask = (1UL << shift) - (1UL << actual_shift); pte = __pte(pte_val(pte) | (gpa & mask)); shift = actual_shift; } level = kvmppc_radix_shift_to_level(shift); n_gpa &= ~((1UL << shift) - 1); /* 4. Insert the pte into our shadow_pgtable */ n_rmap = kzalloc(sizeof(*n_rmap), GFP_KERNEL); if (!n_rmap) return RESUME_GUEST; /* Let the guest try again */ n_rmap->rmap = (n_gpa & RMAP_NESTED_GPA_MASK) | (((unsigned long) gp->l1_lpid) << RMAP_NESTED_LPID_SHIFT); rmapp = &memslot->arch.rmap[gfn - memslot->base_gfn]; ret = kvmppc_create_pte(kvm, gp->shadow_pgtable, pte, n_gpa, level, mmu_seq, gp->shadow_lpid, rmapp, &n_rmap); kfree(n_rmap); if (ret == -EAGAIN) ret = RESUME_GUEST; /* Let the guest try again */ return ret; inval: kvmhv_invalidate_shadow_pte(vcpu, gp, n_gpa, NULL); return RESUME_GUEST; } long int kvmhv_nested_page_fault(struct kvm_vcpu *vcpu) { struct kvm_nested_guest *gp = vcpu->arch.nested; long int ret; mutex_lock(&gp->tlb_lock); ret = __kvmhv_nested_page_fault(vcpu, gp); mutex_unlock(&gp->tlb_lock); return ret; } int kvmhv_nested_next_lpid(struct kvm *kvm, int lpid) { int ret = lpid + 1; spin_lock(&kvm->mmu_lock); if (!idr_get_next(&kvm->arch.kvm_nested_guest_idr, &ret)) ret = -1; spin_unlock(&kvm->mmu_lock); return ret; }
linux-master
arch/powerpc/kvm/book3s_hv_nested.c
// SPDX-License-Identifier: GPL-2.0-only /* * * Copyright IBM Corp. 2007 * * Authors: Hollis Blanchard <[email protected]> * Christian Ehrhardt <[email protected]> */ #include <linux/errno.h> #include <linux/err.h> #include <linux/kvm_host.h> #include <linux/vmalloc.h> #include <linux/hrtimer.h> #include <linux/sched/signal.h> #include <linux/fs.h> #include <linux/slab.h> #include <linux/file.h> #include <linux/module.h> #include <linux/irqbypass.h> #include <linux/kvm_irqfd.h> #include <linux/of.h> #include <asm/cputable.h> #include <linux/uaccess.h> #include <asm/kvm_ppc.h> #include <asm/cputhreads.h> #include <asm/irqflags.h> #include <asm/iommu.h> #include <asm/switch_to.h> #include <asm/xive.h> #ifdef CONFIG_PPC_PSERIES #include <asm/hvcall.h> #include <asm/plpar_wrappers.h> #endif #include <asm/ultravisor.h> #include <asm/setup.h> #include "timing.h" #include "../mm/mmu_decl.h" #define CREATE_TRACE_POINTS #include "trace.h" struct kvmppc_ops *kvmppc_hv_ops; EXPORT_SYMBOL_GPL(kvmppc_hv_ops); struct kvmppc_ops *kvmppc_pr_ops; EXPORT_SYMBOL_GPL(kvmppc_pr_ops); int kvm_arch_vcpu_runnable(struct kvm_vcpu *v) { return !!(v->arch.pending_exceptions) || kvm_request_pending(v); } bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu) { return kvm_arch_vcpu_runnable(vcpu); } bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu) { return false; } int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu) { return 1; } /* * Common checks before entering the guest world. Call with interrupts * disabled. * * returns: * * == 1 if we're ready to go into guest state * <= 0 if we need to go back to the host with return value */ int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu) { int r; WARN_ON(irqs_disabled()); hard_irq_disable(); while (true) { if (need_resched()) { local_irq_enable(); cond_resched(); hard_irq_disable(); continue; } if (signal_pending(current)) { kvmppc_account_exit(vcpu, SIGNAL_EXITS); vcpu->run->exit_reason = KVM_EXIT_INTR; r = -EINTR; break; } vcpu->mode = IN_GUEST_MODE; /* * Reading vcpu->requests must happen after setting vcpu->mode, * so we don't miss a request because the requester sees * OUTSIDE_GUEST_MODE and assumes we'll be checking requests * before next entering the guest (and thus doesn't IPI). * This also orders the write to mode from any reads * to the page tables done while the VCPU is running. * Please see the comment in kvm_flush_remote_tlbs. */ smp_mb(); if (kvm_request_pending(vcpu)) { /* Make sure we process requests preemptable */ local_irq_enable(); trace_kvm_check_requests(vcpu); r = kvmppc_core_check_requests(vcpu); hard_irq_disable(); if (r > 0) continue; break; } if (kvmppc_core_prepare_to_enter(vcpu)) { /* interrupts got enabled in between, so we are back at square 1 */ continue; } guest_enter_irqoff(); return 1; } /* return to host */ local_irq_enable(); return r; } EXPORT_SYMBOL_GPL(kvmppc_prepare_to_enter); #if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE) static void kvmppc_swab_shared(struct kvm_vcpu *vcpu) { struct kvm_vcpu_arch_shared *shared = vcpu->arch.shared; int i; shared->sprg0 = swab64(shared->sprg0); shared->sprg1 = swab64(shared->sprg1); shared->sprg2 = swab64(shared->sprg2); shared->sprg3 = swab64(shared->sprg3); shared->srr0 = swab64(shared->srr0); shared->srr1 = swab64(shared->srr1); shared->dar = swab64(shared->dar); shared->msr = swab64(shared->msr); shared->dsisr = swab32(shared->dsisr); shared->int_pending = swab32(shared->int_pending); for (i = 0; i < ARRAY_SIZE(shared->sr); i++) shared->sr[i] = swab32(shared->sr[i]); } #endif int kvmppc_kvm_pv(struct kvm_vcpu *vcpu) { int nr = kvmppc_get_gpr(vcpu, 11); int r; unsigned long __maybe_unused param1 = kvmppc_get_gpr(vcpu, 3); unsigned long __maybe_unused param2 = kvmppc_get_gpr(vcpu, 4); unsigned long __maybe_unused param3 = kvmppc_get_gpr(vcpu, 5); unsigned long __maybe_unused param4 = kvmppc_get_gpr(vcpu, 6); unsigned long r2 = 0; if (!(kvmppc_get_msr(vcpu) & MSR_SF)) { /* 32 bit mode */ param1 &= 0xffffffff; param2 &= 0xffffffff; param3 &= 0xffffffff; param4 &= 0xffffffff; } switch (nr) { case KVM_HCALL_TOKEN(KVM_HC_PPC_MAP_MAGIC_PAGE): { #if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE) /* Book3S can be little endian, find it out here */ int shared_big_endian = true; if (vcpu->arch.intr_msr & MSR_LE) shared_big_endian = false; if (shared_big_endian != vcpu->arch.shared_big_endian) kvmppc_swab_shared(vcpu); vcpu->arch.shared_big_endian = shared_big_endian; #endif if (!(param2 & MAGIC_PAGE_FLAG_NOT_MAPPED_NX)) { /* * Older versions of the Linux magic page code had * a bug where they would map their trampoline code * NX. If that's the case, remove !PR NX capability. */ vcpu->arch.disable_kernel_nx = true; kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); } vcpu->arch.magic_page_pa = param1 & ~0xfffULL; vcpu->arch.magic_page_ea = param2 & ~0xfffULL; #ifdef CONFIG_PPC_64K_PAGES /* * Make sure our 4k magic page is in the same window of a 64k * page within the guest and within the host's page. */ if ((vcpu->arch.magic_page_pa & 0xf000) != ((ulong)vcpu->arch.shared & 0xf000)) { void *old_shared = vcpu->arch.shared; ulong shared = (ulong)vcpu->arch.shared; void *new_shared; shared &= PAGE_MASK; shared |= vcpu->arch.magic_page_pa & 0xf000; new_shared = (void*)shared; memcpy(new_shared, old_shared, 0x1000); vcpu->arch.shared = new_shared; } #endif r2 = KVM_MAGIC_FEAT_SR | KVM_MAGIC_FEAT_MAS0_TO_SPRG7; r = EV_SUCCESS; break; } case KVM_HCALL_TOKEN(KVM_HC_FEATURES): r = EV_SUCCESS; #if defined(CONFIG_PPC_BOOK3S) || defined(CONFIG_KVM_E500V2) r2 |= (1 << KVM_FEATURE_MAGIC_PAGE); #endif /* Second return value is in r4 */ break; case EV_HCALL_TOKEN(EV_IDLE): r = EV_SUCCESS; kvm_vcpu_halt(vcpu); break; default: r = EV_UNIMPLEMENTED; break; } kvmppc_set_gpr(vcpu, 4, r2); return r; } EXPORT_SYMBOL_GPL(kvmppc_kvm_pv); int kvmppc_sanity_check(struct kvm_vcpu *vcpu) { int r = false; /* We have to know what CPU to virtualize */ if (!vcpu->arch.pvr) goto out; /* PAPR only works with book3s_64 */ if ((vcpu->arch.cpu_type != KVM_CPU_3S_64) && vcpu->arch.papr_enabled) goto out; /* HV KVM can only do PAPR mode for now */ if (!vcpu->arch.papr_enabled && is_kvmppc_hv_enabled(vcpu->kvm)) goto out; #ifdef CONFIG_KVM_BOOKE_HV if (!cpu_has_feature(CPU_FTR_EMB_HV)) goto out; #endif r = true; out: vcpu->arch.sane = r; return r ? 0 : -EINVAL; } EXPORT_SYMBOL_GPL(kvmppc_sanity_check); int kvmppc_emulate_mmio(struct kvm_vcpu *vcpu) { enum emulation_result er; int r; er = kvmppc_emulate_loadstore(vcpu); switch (er) { case EMULATE_DONE: /* Future optimization: only reload non-volatiles if they were * actually modified. */ r = RESUME_GUEST_NV; break; case EMULATE_AGAIN: r = RESUME_GUEST; break; case EMULATE_DO_MMIO: vcpu->run->exit_reason = KVM_EXIT_MMIO; /* We must reload nonvolatiles because "update" load/store * instructions modify register state. */ /* Future optimization: only reload non-volatiles if they were * actually modified. */ r = RESUME_HOST_NV; break; case EMULATE_FAIL: { ppc_inst_t last_inst; kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst); kvm_debug_ratelimited("Guest access to device memory using unsupported instruction (opcode: %#08x)\n", ppc_inst_val(last_inst)); /* * Injecting a Data Storage here is a bit more * accurate since the instruction that caused the * access could still be a valid one. */ if (!IS_ENABLED(CONFIG_BOOKE)) { ulong dsisr = DSISR_BADACCESS; if (vcpu->mmio_is_write) dsisr |= DSISR_ISSTORE; kvmppc_core_queue_data_storage(vcpu, kvmppc_get_msr(vcpu) & SRR1_PREFIXED, vcpu->arch.vaddr_accessed, dsisr); } else { /* * BookE does not send a SIGBUS on a bad * fault, so use a Program interrupt instead * to avoid a fault loop. */ kvmppc_core_queue_program(vcpu, 0); } r = RESUME_GUEST; break; } default: WARN_ON(1); r = RESUME_GUEST; } return r; } EXPORT_SYMBOL_GPL(kvmppc_emulate_mmio); int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, bool data) { ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM & PAGE_MASK; struct kvmppc_pte pte; int r = -EINVAL; vcpu->stat.st++; if (vcpu->kvm->arch.kvm_ops && vcpu->kvm->arch.kvm_ops->store_to_eaddr) r = vcpu->kvm->arch.kvm_ops->store_to_eaddr(vcpu, eaddr, ptr, size); if ((!r) || (r == -EAGAIN)) return r; r = kvmppc_xlate(vcpu, *eaddr, data ? XLATE_DATA : XLATE_INST, XLATE_WRITE, &pte); if (r < 0) return r; *eaddr = pte.raddr; if (!pte.may_write) return -EPERM; /* Magic page override */ if (kvmppc_supports_magic_page(vcpu) && mp_pa && ((pte.raddr & KVM_PAM & PAGE_MASK) == mp_pa) && !(kvmppc_get_msr(vcpu) & MSR_PR)) { void *magic = vcpu->arch.shared; magic += pte.eaddr & 0xfff; memcpy(magic, ptr, size); return EMULATE_DONE; } if (kvm_write_guest(vcpu->kvm, pte.raddr, ptr, size)) return EMULATE_DO_MMIO; return EMULATE_DONE; } EXPORT_SYMBOL_GPL(kvmppc_st); int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, bool data) { ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM & PAGE_MASK; struct kvmppc_pte pte; int rc = -EINVAL; vcpu->stat.ld++; if (vcpu->kvm->arch.kvm_ops && vcpu->kvm->arch.kvm_ops->load_from_eaddr) rc = vcpu->kvm->arch.kvm_ops->load_from_eaddr(vcpu, eaddr, ptr, size); if ((!rc) || (rc == -EAGAIN)) return rc; rc = kvmppc_xlate(vcpu, *eaddr, data ? XLATE_DATA : XLATE_INST, XLATE_READ, &pte); if (rc) return rc; *eaddr = pte.raddr; if (!pte.may_read) return -EPERM; if (!data && !pte.may_execute) return -ENOEXEC; /* Magic page override */ if (kvmppc_supports_magic_page(vcpu) && mp_pa && ((pte.raddr & KVM_PAM & PAGE_MASK) == mp_pa) && !(kvmppc_get_msr(vcpu) & MSR_PR)) { void *magic = vcpu->arch.shared; magic += pte.eaddr & 0xfff; memcpy(ptr, magic, size); return EMULATE_DONE; } kvm_vcpu_srcu_read_lock(vcpu); rc = kvm_read_guest(vcpu->kvm, pte.raddr, ptr, size); kvm_vcpu_srcu_read_unlock(vcpu); if (rc) return EMULATE_DO_MMIO; return EMULATE_DONE; } EXPORT_SYMBOL_GPL(kvmppc_ld); int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) { struct kvmppc_ops *kvm_ops = NULL; int r; /* * if we have both HV and PR enabled, default is HV */ if (type == 0) { if (kvmppc_hv_ops) kvm_ops = kvmppc_hv_ops; else kvm_ops = kvmppc_pr_ops; if (!kvm_ops) goto err_out; } else if (type == KVM_VM_PPC_HV) { if (!kvmppc_hv_ops) goto err_out; kvm_ops = kvmppc_hv_ops; } else if (type == KVM_VM_PPC_PR) { if (!kvmppc_pr_ops) goto err_out; kvm_ops = kvmppc_pr_ops; } else goto err_out; if (!try_module_get(kvm_ops->owner)) return -ENOENT; kvm->arch.kvm_ops = kvm_ops; r = kvmppc_core_init_vm(kvm); if (r) module_put(kvm_ops->owner); return r; err_out: return -EINVAL; } void kvm_arch_destroy_vm(struct kvm *kvm) { #ifdef CONFIG_KVM_XICS /* * We call kick_all_cpus_sync() to ensure that all * CPUs have executed any pending IPIs before we * continue and free VCPUs structures below. */ if (is_kvmppc_hv_enabled(kvm)) kick_all_cpus_sync(); #endif kvm_destroy_vcpus(kvm); mutex_lock(&kvm->lock); kvmppc_core_destroy_vm(kvm); mutex_unlock(&kvm->lock); /* drop the module reference */ module_put(kvm->arch.kvm_ops->owner); } int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) { int r; /* Assume we're using HV mode when the HV module is loaded */ int hv_enabled = kvmppc_hv_ops ? 1 : 0; if (kvm) { /* * Hooray - we know which VM type we're running on. Depend on * that rather than the guess above. */ hv_enabled = is_kvmppc_hv_enabled(kvm); } switch (ext) { #ifdef CONFIG_BOOKE case KVM_CAP_PPC_BOOKE_SREGS: case KVM_CAP_PPC_BOOKE_WATCHDOG: case KVM_CAP_PPC_EPR: #else case KVM_CAP_PPC_SEGSTATE: case KVM_CAP_PPC_HIOR: case KVM_CAP_PPC_PAPR: #endif case KVM_CAP_PPC_UNSET_IRQ: case KVM_CAP_PPC_IRQ_LEVEL: case KVM_CAP_ENABLE_CAP: case KVM_CAP_ONE_REG: case KVM_CAP_IOEVENTFD: case KVM_CAP_DEVICE_CTRL: case KVM_CAP_IMMEDIATE_EXIT: case KVM_CAP_SET_GUEST_DEBUG: r = 1; break; case KVM_CAP_PPC_GUEST_DEBUG_SSTEP: case KVM_CAP_PPC_PAIRED_SINGLES: case KVM_CAP_PPC_OSI: case KVM_CAP_PPC_GET_PVINFO: #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC) case KVM_CAP_SW_TLB: #endif /* We support this only for PR */ r = !hv_enabled; break; #ifdef CONFIG_KVM_MPIC case KVM_CAP_IRQ_MPIC: r = 1; break; #endif #ifdef CONFIG_PPC_BOOK3S_64 case KVM_CAP_SPAPR_TCE: case KVM_CAP_SPAPR_TCE_64: r = 1; break; case KVM_CAP_SPAPR_TCE_VFIO: r = !!cpu_has_feature(CPU_FTR_HVMODE); break; case KVM_CAP_PPC_RTAS: case KVM_CAP_PPC_FIXUP_HCALL: case KVM_CAP_PPC_ENABLE_HCALL: #ifdef CONFIG_KVM_XICS case KVM_CAP_IRQ_XICS: #endif case KVM_CAP_PPC_GET_CPU_CHAR: r = 1; break; #ifdef CONFIG_KVM_XIVE case KVM_CAP_PPC_IRQ_XIVE: /* * We need XIVE to be enabled on the platform (implies * a POWER9 processor) and the PowerNV platform, as * nested is not yet supported. */ r = xive_enabled() && !!cpu_has_feature(CPU_FTR_HVMODE) && kvmppc_xive_native_supported(); break; #endif #ifdef CONFIG_HAVE_KVM_IRQFD case KVM_CAP_IRQFD_RESAMPLE: r = !xive_enabled(); break; #endif case KVM_CAP_PPC_ALLOC_HTAB: r = hv_enabled; break; #endif /* CONFIG_PPC_BOOK3S_64 */ #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE case KVM_CAP_PPC_SMT: r = 0; if (kvm) { if (kvm->arch.emul_smt_mode > 1) r = kvm->arch.emul_smt_mode; else r = kvm->arch.smt_mode; } else if (hv_enabled) { if (cpu_has_feature(CPU_FTR_ARCH_300)) r = 1; else r = threads_per_subcore; } break; case KVM_CAP_PPC_SMT_POSSIBLE: r = 1; if (hv_enabled) { if (!cpu_has_feature(CPU_FTR_ARCH_300)) r = ((threads_per_subcore << 1) - 1); else /* P9 can emulate dbells, so allow any mode */ r = 8 | 4 | 2 | 1; } break; case KVM_CAP_PPC_RMA: r = 0; break; case KVM_CAP_PPC_HWRNG: r = kvmppc_hwrng_present(); break; case KVM_CAP_PPC_MMU_RADIX: r = !!(hv_enabled && radix_enabled()); break; case KVM_CAP_PPC_MMU_HASH_V3: r = !!(hv_enabled && kvmppc_hv_ops->hash_v3_possible && kvmppc_hv_ops->hash_v3_possible()); break; case KVM_CAP_PPC_NESTED_HV: r = !!(hv_enabled && kvmppc_hv_ops->enable_nested && !kvmppc_hv_ops->enable_nested(NULL)); break; #endif case KVM_CAP_SYNC_MMU: #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE r = hv_enabled; #elif defined(KVM_ARCH_WANT_MMU_NOTIFIER) r = 1; #else r = 0; #endif break; #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE case KVM_CAP_PPC_HTAB_FD: r = hv_enabled; break; #endif case KVM_CAP_NR_VCPUS: /* * Recommending a number of CPUs is somewhat arbitrary; we * return the number of present CPUs for -HV (since a host * will have secondary threads "offline"), and for other KVM * implementations just count online CPUs. */ if (hv_enabled) r = min_t(unsigned int, num_present_cpus(), KVM_MAX_VCPUS); else r = min_t(unsigned int, num_online_cpus(), KVM_MAX_VCPUS); break; case KVM_CAP_MAX_VCPUS: r = KVM_MAX_VCPUS; break; case KVM_CAP_MAX_VCPU_ID: r = KVM_MAX_VCPU_IDS; break; #ifdef CONFIG_PPC_BOOK3S_64 case KVM_CAP_PPC_GET_SMMU_INFO: r = 1; break; case KVM_CAP_SPAPR_MULTITCE: r = 1; break; case KVM_CAP_SPAPR_RESIZE_HPT: r = !!hv_enabled; break; #endif #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE case KVM_CAP_PPC_FWNMI: r = hv_enabled; break; #endif #ifdef CONFIG_PPC_TRANSACTIONAL_MEM case KVM_CAP_PPC_HTM: r = !!(cur_cpu_spec->cpu_user_features2 & PPC_FEATURE2_HTM) || (hv_enabled && cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST)); break; #endif #if defined(CONFIG_KVM_BOOK3S_HV_POSSIBLE) case KVM_CAP_PPC_SECURE_GUEST: r = hv_enabled && kvmppc_hv_ops->enable_svm && !kvmppc_hv_ops->enable_svm(NULL); break; case KVM_CAP_PPC_DAWR1: r = !!(hv_enabled && kvmppc_hv_ops->enable_dawr1 && !kvmppc_hv_ops->enable_dawr1(NULL)); break; case KVM_CAP_PPC_RPT_INVALIDATE: r = 1; break; #endif case KVM_CAP_PPC_AIL_MODE_3: r = 0; /* * KVM PR, POWER7, and some POWER9s don't support AIL=3 mode. * The POWER9s can support it if the guest runs in hash mode, * but QEMU doesn't necessarily query the capability in time. */ if (hv_enabled) { if (kvmhv_on_pseries()) { if (pseries_reloc_on_exception()) r = 1; } else if (cpu_has_feature(CPU_FTR_ARCH_207S) && !cpu_has_feature(CPU_FTR_P9_RADIX_PREFETCH_BUG)) { r = 1; } } break; default: r = 0; break; } return r; } long kvm_arch_dev_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) { return -EINVAL; } void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot) { kvmppc_core_free_memslot(kvm, slot); } int kvm_arch_prepare_memory_region(struct kvm *kvm, const struct kvm_memory_slot *old, struct kvm_memory_slot *new, enum kvm_mr_change change) { return kvmppc_core_prepare_memory_region(kvm, old, new, change); } void kvm_arch_commit_memory_region(struct kvm *kvm, struct kvm_memory_slot *old, const struct kvm_memory_slot *new, enum kvm_mr_change change) { kvmppc_core_commit_memory_region(kvm, old, new, change); } void kvm_arch_flush_shadow_memslot(struct kvm *kvm, struct kvm_memory_slot *slot) { kvmppc_core_flush_memslot(kvm, slot); } int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id) { return 0; } static enum hrtimer_restart kvmppc_decrementer_wakeup(struct hrtimer *timer) { struct kvm_vcpu *vcpu; vcpu = container_of(timer, struct kvm_vcpu, arch.dec_timer); kvmppc_decrementer_func(vcpu); return HRTIMER_NORESTART; } int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu) { int err; hrtimer_init(&vcpu->arch.dec_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS); vcpu->arch.dec_timer.function = kvmppc_decrementer_wakeup; #ifdef CONFIG_KVM_EXIT_TIMING mutex_init(&vcpu->arch.exit_timing_lock); #endif err = kvmppc_subarch_vcpu_init(vcpu); if (err) return err; err = kvmppc_core_vcpu_create(vcpu); if (err) goto out_vcpu_uninit; rcuwait_init(&vcpu->arch.wait); vcpu->arch.waitp = &vcpu->arch.wait; return 0; out_vcpu_uninit: kvmppc_subarch_vcpu_uninit(vcpu); return err; } void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) { } void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) { /* Make sure we're not using the vcpu anymore */ hrtimer_cancel(&vcpu->arch.dec_timer); switch (vcpu->arch.irq_type) { case KVMPPC_IRQ_MPIC: kvmppc_mpic_disconnect_vcpu(vcpu->arch.mpic, vcpu); break; case KVMPPC_IRQ_XICS: if (xics_on_xive()) kvmppc_xive_cleanup_vcpu(vcpu); else kvmppc_xics_free_icp(vcpu); break; case KVMPPC_IRQ_XIVE: kvmppc_xive_native_cleanup_vcpu(vcpu); break; } kvmppc_core_vcpu_free(vcpu); kvmppc_subarch_vcpu_uninit(vcpu); } int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) { return kvmppc_core_pending_dec(vcpu); } void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) { #ifdef CONFIG_BOOKE /* * vrsave (formerly usprg0) isn't used by Linux, but may * be used by the guest. * * On non-booke this is associated with Altivec and * is handled by code in book3s.c. */ mtspr(SPRN_VRSAVE, vcpu->arch.vrsave); #endif kvmppc_core_vcpu_load(vcpu, cpu); } void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) { kvmppc_core_vcpu_put(vcpu); #ifdef CONFIG_BOOKE vcpu->arch.vrsave = mfspr(SPRN_VRSAVE); #endif } /* * irq_bypass_add_producer and irq_bypass_del_producer are only * useful if the architecture supports PCI passthrough. * irq_bypass_stop and irq_bypass_start are not needed and so * kvm_ops are not defined for them. */ bool kvm_arch_has_irq_bypass(void) { return ((kvmppc_hv_ops && kvmppc_hv_ops->irq_bypass_add_producer) || (kvmppc_pr_ops && kvmppc_pr_ops->irq_bypass_add_producer)); } int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *cons, struct irq_bypass_producer *prod) { struct kvm_kernel_irqfd *irqfd = container_of(cons, struct kvm_kernel_irqfd, consumer); struct kvm *kvm = irqfd->kvm; if (kvm->arch.kvm_ops->irq_bypass_add_producer) return kvm->arch.kvm_ops->irq_bypass_add_producer(cons, prod); return 0; } void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *cons, struct irq_bypass_producer *prod) { struct kvm_kernel_irqfd *irqfd = container_of(cons, struct kvm_kernel_irqfd, consumer); struct kvm *kvm = irqfd->kvm; if (kvm->arch.kvm_ops->irq_bypass_del_producer) kvm->arch.kvm_ops->irq_bypass_del_producer(cons, prod); } #ifdef CONFIG_VSX static inline int kvmppc_get_vsr_dword_offset(int index) { int offset; if ((index != 0) && (index != 1)) return -1; #ifdef __BIG_ENDIAN offset = index; #else offset = 1 - index; #endif return offset; } static inline int kvmppc_get_vsr_word_offset(int index) { int offset; if ((index > 3) || (index < 0)) return -1; #ifdef __BIG_ENDIAN offset = index; #else offset = 3 - index; #endif return offset; } static inline void kvmppc_set_vsr_dword(struct kvm_vcpu *vcpu, u64 gpr) { union kvmppc_one_reg val; int offset = kvmppc_get_vsr_dword_offset(vcpu->arch.mmio_vsx_offset); int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; if (offset == -1) return; if (index >= 32) { val.vval = VCPU_VSX_VR(vcpu, index - 32); val.vsxval[offset] = gpr; VCPU_VSX_VR(vcpu, index - 32) = val.vval; } else { VCPU_VSX_FPR(vcpu, index, offset) = gpr; } } static inline void kvmppc_set_vsr_dword_dump(struct kvm_vcpu *vcpu, u64 gpr) { union kvmppc_one_reg val; int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; if (index >= 32) { val.vval = VCPU_VSX_VR(vcpu, index - 32); val.vsxval[0] = gpr; val.vsxval[1] = gpr; VCPU_VSX_VR(vcpu, index - 32) = val.vval; } else { VCPU_VSX_FPR(vcpu, index, 0) = gpr; VCPU_VSX_FPR(vcpu, index, 1) = gpr; } } static inline void kvmppc_set_vsr_word_dump(struct kvm_vcpu *vcpu, u32 gpr) { union kvmppc_one_reg val; int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; if (index >= 32) { val.vsx32val[0] = gpr; val.vsx32val[1] = gpr; val.vsx32val[2] = gpr; val.vsx32val[3] = gpr; VCPU_VSX_VR(vcpu, index - 32) = val.vval; } else { val.vsx32val[0] = gpr; val.vsx32val[1] = gpr; VCPU_VSX_FPR(vcpu, index, 0) = val.vsxval[0]; VCPU_VSX_FPR(vcpu, index, 1) = val.vsxval[0]; } } static inline void kvmppc_set_vsr_word(struct kvm_vcpu *vcpu, u32 gpr32) { union kvmppc_one_reg val; int offset = kvmppc_get_vsr_word_offset(vcpu->arch.mmio_vsx_offset); int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; int dword_offset, word_offset; if (offset == -1) return; if (index >= 32) { val.vval = VCPU_VSX_VR(vcpu, index - 32); val.vsx32val[offset] = gpr32; VCPU_VSX_VR(vcpu, index - 32) = val.vval; } else { dword_offset = offset / 2; word_offset = offset % 2; val.vsxval[0] = VCPU_VSX_FPR(vcpu, index, dword_offset); val.vsx32val[word_offset] = gpr32; VCPU_VSX_FPR(vcpu, index, dword_offset) = val.vsxval[0]; } } #endif /* CONFIG_VSX */ #ifdef CONFIG_ALTIVEC static inline int kvmppc_get_vmx_offset_generic(struct kvm_vcpu *vcpu, int index, int element_size) { int offset; int elts = sizeof(vector128)/element_size; if ((index < 0) || (index >= elts)) return -1; if (kvmppc_need_byteswap(vcpu)) offset = elts - index - 1; else offset = index; return offset; } static inline int kvmppc_get_vmx_dword_offset(struct kvm_vcpu *vcpu, int index) { return kvmppc_get_vmx_offset_generic(vcpu, index, 8); } static inline int kvmppc_get_vmx_word_offset(struct kvm_vcpu *vcpu, int index) { return kvmppc_get_vmx_offset_generic(vcpu, index, 4); } static inline int kvmppc_get_vmx_hword_offset(struct kvm_vcpu *vcpu, int index) { return kvmppc_get_vmx_offset_generic(vcpu, index, 2); } static inline int kvmppc_get_vmx_byte_offset(struct kvm_vcpu *vcpu, int index) { return kvmppc_get_vmx_offset_generic(vcpu, index, 1); } static inline void kvmppc_set_vmx_dword(struct kvm_vcpu *vcpu, u64 gpr) { union kvmppc_one_reg val; int offset = kvmppc_get_vmx_dword_offset(vcpu, vcpu->arch.mmio_vmx_offset); int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; if (offset == -1) return; val.vval = VCPU_VSX_VR(vcpu, index); val.vsxval[offset] = gpr; VCPU_VSX_VR(vcpu, index) = val.vval; } static inline void kvmppc_set_vmx_word(struct kvm_vcpu *vcpu, u32 gpr32) { union kvmppc_one_reg val; int offset = kvmppc_get_vmx_word_offset(vcpu, vcpu->arch.mmio_vmx_offset); int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; if (offset == -1) return; val.vval = VCPU_VSX_VR(vcpu, index); val.vsx32val[offset] = gpr32; VCPU_VSX_VR(vcpu, index) = val.vval; } static inline void kvmppc_set_vmx_hword(struct kvm_vcpu *vcpu, u16 gpr16) { union kvmppc_one_reg val; int offset = kvmppc_get_vmx_hword_offset(vcpu, vcpu->arch.mmio_vmx_offset); int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; if (offset == -1) return; val.vval = VCPU_VSX_VR(vcpu, index); val.vsx16val[offset] = gpr16; VCPU_VSX_VR(vcpu, index) = val.vval; } static inline void kvmppc_set_vmx_byte(struct kvm_vcpu *vcpu, u8 gpr8) { union kvmppc_one_reg val; int offset = kvmppc_get_vmx_byte_offset(vcpu, vcpu->arch.mmio_vmx_offset); int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; if (offset == -1) return; val.vval = VCPU_VSX_VR(vcpu, index); val.vsx8val[offset] = gpr8; VCPU_VSX_VR(vcpu, index) = val.vval; } #endif /* CONFIG_ALTIVEC */ #ifdef CONFIG_PPC_FPU static inline u64 sp_to_dp(u32 fprs) { u64 fprd; preempt_disable(); enable_kernel_fp(); asm ("lfs%U1%X1 0,%1; stfd%U0%X0 0,%0" : "=m<>" (fprd) : "m<>" (fprs) : "fr0"); preempt_enable(); return fprd; } static inline u32 dp_to_sp(u64 fprd) { u32 fprs; preempt_disable(); enable_kernel_fp(); asm ("lfd%U1%X1 0,%1; stfs%U0%X0 0,%0" : "=m<>" (fprs) : "m<>" (fprd) : "fr0"); preempt_enable(); return fprs; } #else #define sp_to_dp(x) (x) #define dp_to_sp(x) (x) #endif /* CONFIG_PPC_FPU */ static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu) { struct kvm_run *run = vcpu->run; u64 gpr; if (run->mmio.len > sizeof(gpr)) return; if (!vcpu->arch.mmio_host_swabbed) { switch (run->mmio.len) { case 8: gpr = *(u64 *)run->mmio.data; break; case 4: gpr = *(u32 *)run->mmio.data; break; case 2: gpr = *(u16 *)run->mmio.data; break; case 1: gpr = *(u8 *)run->mmio.data; break; } } else { switch (run->mmio.len) { case 8: gpr = swab64(*(u64 *)run->mmio.data); break; case 4: gpr = swab32(*(u32 *)run->mmio.data); break; case 2: gpr = swab16(*(u16 *)run->mmio.data); break; case 1: gpr = *(u8 *)run->mmio.data; break; } } /* conversion between single and double precision */ if ((vcpu->arch.mmio_sp64_extend) && (run->mmio.len == 4)) gpr = sp_to_dp(gpr); if (vcpu->arch.mmio_sign_extend) { switch (run->mmio.len) { #ifdef CONFIG_PPC64 case 4: gpr = (s64)(s32)gpr; break; #endif case 2: gpr = (s64)(s16)gpr; break; case 1: gpr = (s64)(s8)gpr; break; } } switch (vcpu->arch.io_gpr & KVM_MMIO_REG_EXT_MASK) { case KVM_MMIO_REG_GPR: kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr); break; case KVM_MMIO_REG_FPR: if (vcpu->kvm->arch.kvm_ops->giveup_ext) vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, MSR_FP); VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr; break; #ifdef CONFIG_PPC_BOOK3S case KVM_MMIO_REG_QPR: vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr; break; case KVM_MMIO_REG_FQPR: VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr; vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr; break; #endif #ifdef CONFIG_VSX case KVM_MMIO_REG_VSX: if (vcpu->kvm->arch.kvm_ops->giveup_ext) vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, MSR_VSX); if (vcpu->arch.mmio_copy_type == KVMPPC_VSX_COPY_DWORD) kvmppc_set_vsr_dword(vcpu, gpr); else if (vcpu->arch.mmio_copy_type == KVMPPC_VSX_COPY_WORD) kvmppc_set_vsr_word(vcpu, gpr); else if (vcpu->arch.mmio_copy_type == KVMPPC_VSX_COPY_DWORD_LOAD_DUMP) kvmppc_set_vsr_dword_dump(vcpu, gpr); else if (vcpu->arch.mmio_copy_type == KVMPPC_VSX_COPY_WORD_LOAD_DUMP) kvmppc_set_vsr_word_dump(vcpu, gpr); break; #endif #ifdef CONFIG_ALTIVEC case KVM_MMIO_REG_VMX: if (vcpu->kvm->arch.kvm_ops->giveup_ext) vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, MSR_VEC); if (vcpu->arch.mmio_copy_type == KVMPPC_VMX_COPY_DWORD) kvmppc_set_vmx_dword(vcpu, gpr); else if (vcpu->arch.mmio_copy_type == KVMPPC_VMX_COPY_WORD) kvmppc_set_vmx_word(vcpu, gpr); else if (vcpu->arch.mmio_copy_type == KVMPPC_VMX_COPY_HWORD) kvmppc_set_vmx_hword(vcpu, gpr); else if (vcpu->arch.mmio_copy_type == KVMPPC_VMX_COPY_BYTE) kvmppc_set_vmx_byte(vcpu, gpr); break; #endif #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE case KVM_MMIO_REG_NESTED_GPR: if (kvmppc_need_byteswap(vcpu)) gpr = swab64(gpr); kvm_vcpu_write_guest(vcpu, vcpu->arch.nested_io_gpr, &gpr, sizeof(gpr)); break; #endif default: BUG(); } } static int __kvmppc_handle_load(struct kvm_vcpu *vcpu, unsigned int rt, unsigned int bytes, int is_default_endian, int sign_extend) { struct kvm_run *run = vcpu->run; int idx, ret; bool host_swabbed; /* Pity C doesn't have a logical XOR operator */ if (kvmppc_need_byteswap(vcpu)) { host_swabbed = is_default_endian; } else { host_swabbed = !is_default_endian; } if (bytes > sizeof(run->mmio.data)) return EMULATE_FAIL; run->mmio.phys_addr = vcpu->arch.paddr_accessed; run->mmio.len = bytes; run->mmio.is_write = 0; vcpu->arch.io_gpr = rt; vcpu->arch.mmio_host_swabbed = host_swabbed; vcpu->mmio_needed = 1; vcpu->mmio_is_write = 0; vcpu->arch.mmio_sign_extend = sign_extend; idx = srcu_read_lock(&vcpu->kvm->srcu); ret = kvm_io_bus_read(vcpu, KVM_MMIO_BUS, run->mmio.phys_addr, bytes, &run->mmio.data); srcu_read_unlock(&vcpu->kvm->srcu, idx); if (!ret) { kvmppc_complete_mmio_load(vcpu); vcpu->mmio_needed = 0; return EMULATE_DONE; } return EMULATE_DO_MMIO; } int kvmppc_handle_load(struct kvm_vcpu *vcpu, unsigned int rt, unsigned int bytes, int is_default_endian) { return __kvmppc_handle_load(vcpu, rt, bytes, is_default_endian, 0); } EXPORT_SYMBOL_GPL(kvmppc_handle_load); /* Same as above, but sign extends */ int kvmppc_handle_loads(struct kvm_vcpu *vcpu, unsigned int rt, unsigned int bytes, int is_default_endian) { return __kvmppc_handle_load(vcpu, rt, bytes, is_default_endian, 1); } #ifdef CONFIG_VSX int kvmppc_handle_vsx_load(struct kvm_vcpu *vcpu, unsigned int rt, unsigned int bytes, int is_default_endian, int mmio_sign_extend) { enum emulation_result emulated = EMULATE_DONE; /* Currently, mmio_vsx_copy_nums only allowed to be 4 or less */ if (vcpu->arch.mmio_vsx_copy_nums > 4) return EMULATE_FAIL; while (vcpu->arch.mmio_vsx_copy_nums) { emulated = __kvmppc_handle_load(vcpu, rt, bytes, is_default_endian, mmio_sign_extend); if (emulated != EMULATE_DONE) break; vcpu->arch.paddr_accessed += vcpu->run->mmio.len; vcpu->arch.mmio_vsx_copy_nums--; vcpu->arch.mmio_vsx_offset++; } return emulated; } #endif /* CONFIG_VSX */ int kvmppc_handle_store(struct kvm_vcpu *vcpu, u64 val, unsigned int bytes, int is_default_endian) { struct kvm_run *run = vcpu->run; void *data = run->mmio.data; int idx, ret; bool host_swabbed; /* Pity C doesn't have a logical XOR operator */ if (kvmppc_need_byteswap(vcpu)) { host_swabbed = is_default_endian; } else { host_swabbed = !is_default_endian; } if (bytes > sizeof(run->mmio.data)) return EMULATE_FAIL; run->mmio.phys_addr = vcpu->arch.paddr_accessed; run->mmio.len = bytes; run->mmio.is_write = 1; vcpu->mmio_needed = 1; vcpu->mmio_is_write = 1; if ((vcpu->arch.mmio_sp64_extend) && (bytes == 4)) val = dp_to_sp(val); /* Store the value at the lowest bytes in 'data'. */ if (!host_swabbed) { switch (bytes) { case 8: *(u64 *)data = val; break; case 4: *(u32 *)data = val; break; case 2: *(u16 *)data = val; break; case 1: *(u8 *)data = val; break; } } else { switch (bytes) { case 8: *(u64 *)data = swab64(val); break; case 4: *(u32 *)data = swab32(val); break; case 2: *(u16 *)data = swab16(val); break; case 1: *(u8 *)data = val; break; } } idx = srcu_read_lock(&vcpu->kvm->srcu); ret = kvm_io_bus_write(vcpu, KVM_MMIO_BUS, run->mmio.phys_addr, bytes, &run->mmio.data); srcu_read_unlock(&vcpu->kvm->srcu, idx); if (!ret) { vcpu->mmio_needed = 0; return EMULATE_DONE; } return EMULATE_DO_MMIO; } EXPORT_SYMBOL_GPL(kvmppc_handle_store); #ifdef CONFIG_VSX static inline int kvmppc_get_vsr_data(struct kvm_vcpu *vcpu, int rs, u64 *val) { u32 dword_offset, word_offset; union kvmppc_one_reg reg; int vsx_offset = 0; int copy_type = vcpu->arch.mmio_copy_type; int result = 0; switch (copy_type) { case KVMPPC_VSX_COPY_DWORD: vsx_offset = kvmppc_get_vsr_dword_offset(vcpu->arch.mmio_vsx_offset); if (vsx_offset == -1) { result = -1; break; } if (rs < 32) { *val = VCPU_VSX_FPR(vcpu, rs, vsx_offset); } else { reg.vval = VCPU_VSX_VR(vcpu, rs - 32); *val = reg.vsxval[vsx_offset]; } break; case KVMPPC_VSX_COPY_WORD: vsx_offset = kvmppc_get_vsr_word_offset(vcpu->arch.mmio_vsx_offset); if (vsx_offset == -1) { result = -1; break; } if (rs < 32) { dword_offset = vsx_offset / 2; word_offset = vsx_offset % 2; reg.vsxval[0] = VCPU_VSX_FPR(vcpu, rs, dword_offset); *val = reg.vsx32val[word_offset]; } else { reg.vval = VCPU_VSX_VR(vcpu, rs - 32); *val = reg.vsx32val[vsx_offset]; } break; default: result = -1; break; } return result; } int kvmppc_handle_vsx_store(struct kvm_vcpu *vcpu, int rs, unsigned int bytes, int is_default_endian) { u64 val; enum emulation_result emulated = EMULATE_DONE; vcpu->arch.io_gpr = rs; /* Currently, mmio_vsx_copy_nums only allowed to be 4 or less */ if (vcpu->arch.mmio_vsx_copy_nums > 4) return EMULATE_FAIL; while (vcpu->arch.mmio_vsx_copy_nums) { if (kvmppc_get_vsr_data(vcpu, rs, &val) == -1) return EMULATE_FAIL; emulated = kvmppc_handle_store(vcpu, val, bytes, is_default_endian); if (emulated != EMULATE_DONE) break; vcpu->arch.paddr_accessed += vcpu->run->mmio.len; vcpu->arch.mmio_vsx_copy_nums--; vcpu->arch.mmio_vsx_offset++; } return emulated; } static int kvmppc_emulate_mmio_vsx_loadstore(struct kvm_vcpu *vcpu) { struct kvm_run *run = vcpu->run; enum emulation_result emulated = EMULATE_FAIL; int r; vcpu->arch.paddr_accessed += run->mmio.len; if (!vcpu->mmio_is_write) { emulated = kvmppc_handle_vsx_load(vcpu, vcpu->arch.io_gpr, run->mmio.len, 1, vcpu->arch.mmio_sign_extend); } else { emulated = kvmppc_handle_vsx_store(vcpu, vcpu->arch.io_gpr, run->mmio.len, 1); } switch (emulated) { case EMULATE_DO_MMIO: run->exit_reason = KVM_EXIT_MMIO; r = RESUME_HOST; break; case EMULATE_FAIL: pr_info("KVM: MMIO emulation failed (VSX repeat)\n"); run->exit_reason = KVM_EXIT_INTERNAL_ERROR; run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION; r = RESUME_HOST; break; default: r = RESUME_GUEST; break; } return r; } #endif /* CONFIG_VSX */ #ifdef CONFIG_ALTIVEC int kvmppc_handle_vmx_load(struct kvm_vcpu *vcpu, unsigned int rt, unsigned int bytes, int is_default_endian) { enum emulation_result emulated = EMULATE_DONE; if (vcpu->arch.mmio_vmx_copy_nums > 2) return EMULATE_FAIL; while (vcpu->arch.mmio_vmx_copy_nums) { emulated = __kvmppc_handle_load(vcpu, rt, bytes, is_default_endian, 0); if (emulated != EMULATE_DONE) break; vcpu->arch.paddr_accessed += vcpu->run->mmio.len; vcpu->arch.mmio_vmx_copy_nums--; vcpu->arch.mmio_vmx_offset++; } return emulated; } static int kvmppc_get_vmx_dword(struct kvm_vcpu *vcpu, int index, u64 *val) { union kvmppc_one_reg reg; int vmx_offset = 0; int result = 0; vmx_offset = kvmppc_get_vmx_dword_offset(vcpu, vcpu->arch.mmio_vmx_offset); if (vmx_offset == -1) return -1; reg.vval = VCPU_VSX_VR(vcpu, index); *val = reg.vsxval[vmx_offset]; return result; } static int kvmppc_get_vmx_word(struct kvm_vcpu *vcpu, int index, u64 *val) { union kvmppc_one_reg reg; int vmx_offset = 0; int result = 0; vmx_offset = kvmppc_get_vmx_word_offset(vcpu, vcpu->arch.mmio_vmx_offset); if (vmx_offset == -1) return -1; reg.vval = VCPU_VSX_VR(vcpu, index); *val = reg.vsx32val[vmx_offset]; return result; } static int kvmppc_get_vmx_hword(struct kvm_vcpu *vcpu, int index, u64 *val) { union kvmppc_one_reg reg; int vmx_offset = 0; int result = 0; vmx_offset = kvmppc_get_vmx_hword_offset(vcpu, vcpu->arch.mmio_vmx_offset); if (vmx_offset == -1) return -1; reg.vval = VCPU_VSX_VR(vcpu, index); *val = reg.vsx16val[vmx_offset]; return result; } static int kvmppc_get_vmx_byte(struct kvm_vcpu *vcpu, int index, u64 *val) { union kvmppc_one_reg reg; int vmx_offset = 0; int result = 0; vmx_offset = kvmppc_get_vmx_byte_offset(vcpu, vcpu->arch.mmio_vmx_offset); if (vmx_offset == -1) return -1; reg.vval = VCPU_VSX_VR(vcpu, index); *val = reg.vsx8val[vmx_offset]; return result; } int kvmppc_handle_vmx_store(struct kvm_vcpu *vcpu, unsigned int rs, unsigned int bytes, int is_default_endian) { u64 val = 0; unsigned int index = rs & KVM_MMIO_REG_MASK; enum emulation_result emulated = EMULATE_DONE; if (vcpu->arch.mmio_vmx_copy_nums > 2) return EMULATE_FAIL; vcpu->arch.io_gpr = rs; while (vcpu->arch.mmio_vmx_copy_nums) { switch (vcpu->arch.mmio_copy_type) { case KVMPPC_VMX_COPY_DWORD: if (kvmppc_get_vmx_dword(vcpu, index, &val) == -1) return EMULATE_FAIL; break; case KVMPPC_VMX_COPY_WORD: if (kvmppc_get_vmx_word(vcpu, index, &val) == -1) return EMULATE_FAIL; break; case KVMPPC_VMX_COPY_HWORD: if (kvmppc_get_vmx_hword(vcpu, index, &val) == -1) return EMULATE_FAIL; break; case KVMPPC_VMX_COPY_BYTE: if (kvmppc_get_vmx_byte(vcpu, index, &val) == -1) return EMULATE_FAIL; break; default: return EMULATE_FAIL; } emulated = kvmppc_handle_store(vcpu, val, bytes, is_default_endian); if (emulated != EMULATE_DONE) break; vcpu->arch.paddr_accessed += vcpu->run->mmio.len; vcpu->arch.mmio_vmx_copy_nums--; vcpu->arch.mmio_vmx_offset++; } return emulated; } static int kvmppc_emulate_mmio_vmx_loadstore(struct kvm_vcpu *vcpu) { struct kvm_run *run = vcpu->run; enum emulation_result emulated = EMULATE_FAIL; int r; vcpu->arch.paddr_accessed += run->mmio.len; if (!vcpu->mmio_is_write) { emulated = kvmppc_handle_vmx_load(vcpu, vcpu->arch.io_gpr, run->mmio.len, 1); } else { emulated = kvmppc_handle_vmx_store(vcpu, vcpu->arch.io_gpr, run->mmio.len, 1); } switch (emulated) { case EMULATE_DO_MMIO: run->exit_reason = KVM_EXIT_MMIO; r = RESUME_HOST; break; case EMULATE_FAIL: pr_info("KVM: MMIO emulation failed (VMX repeat)\n"); run->exit_reason = KVM_EXIT_INTERNAL_ERROR; run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION; r = RESUME_HOST; break; default: r = RESUME_GUEST; break; } return r; } #endif /* CONFIG_ALTIVEC */ int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg) { int r = 0; union kvmppc_one_reg val; int size; size = one_reg_size(reg->id); if (size > sizeof(val)) return -EINVAL; r = kvmppc_get_one_reg(vcpu, reg->id, &val); if (r == -EINVAL) { r = 0; switch (reg->id) { #ifdef CONFIG_ALTIVEC case KVM_REG_PPC_VR0 ... KVM_REG_PPC_VR31: if (!cpu_has_feature(CPU_FTR_ALTIVEC)) { r = -ENXIO; break; } val.vval = vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0]; break; case KVM_REG_PPC_VSCR: if (!cpu_has_feature(CPU_FTR_ALTIVEC)) { r = -ENXIO; break; } val = get_reg_val(reg->id, vcpu->arch.vr.vscr.u[3]); break; case KVM_REG_PPC_VRSAVE: val = get_reg_val(reg->id, vcpu->arch.vrsave); break; #endif /* CONFIG_ALTIVEC */ default: r = -EINVAL; break; } } if (r) return r; if (copy_to_user((char __user *)(unsigned long)reg->addr, &val, size)) r = -EFAULT; return r; } int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg) { int r; union kvmppc_one_reg val; int size; size = one_reg_size(reg->id); if (size > sizeof(val)) return -EINVAL; if (copy_from_user(&val, (char __user *)(unsigned long)reg->addr, size)) return -EFAULT; r = kvmppc_set_one_reg(vcpu, reg->id, &val); if (r == -EINVAL) { r = 0; switch (reg->id) { #ifdef CONFIG_ALTIVEC case KVM_REG_PPC_VR0 ... KVM_REG_PPC_VR31: if (!cpu_has_feature(CPU_FTR_ALTIVEC)) { r = -ENXIO; break; } vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0] = val.vval; break; case KVM_REG_PPC_VSCR: if (!cpu_has_feature(CPU_FTR_ALTIVEC)) { r = -ENXIO; break; } vcpu->arch.vr.vscr.u[3] = set_reg_val(reg->id, val); break; case KVM_REG_PPC_VRSAVE: if (!cpu_has_feature(CPU_FTR_ALTIVEC)) { r = -ENXIO; break; } vcpu->arch.vrsave = set_reg_val(reg->id, val); break; #endif /* CONFIG_ALTIVEC */ default: r = -EINVAL; break; } } return r; } int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) { struct kvm_run *run = vcpu->run; int r; vcpu_load(vcpu); if (vcpu->mmio_needed) { vcpu->mmio_needed = 0; if (!vcpu->mmio_is_write) kvmppc_complete_mmio_load(vcpu); #ifdef CONFIG_VSX if (vcpu->arch.mmio_vsx_copy_nums > 0) { vcpu->arch.mmio_vsx_copy_nums--; vcpu->arch.mmio_vsx_offset++; } if (vcpu->arch.mmio_vsx_copy_nums > 0) { r = kvmppc_emulate_mmio_vsx_loadstore(vcpu); if (r == RESUME_HOST) { vcpu->mmio_needed = 1; goto out; } } #endif #ifdef CONFIG_ALTIVEC if (vcpu->arch.mmio_vmx_copy_nums > 0) { vcpu->arch.mmio_vmx_copy_nums--; vcpu->arch.mmio_vmx_offset++; } if (vcpu->arch.mmio_vmx_copy_nums > 0) { r = kvmppc_emulate_mmio_vmx_loadstore(vcpu); if (r == RESUME_HOST) { vcpu->mmio_needed = 1; goto out; } } #endif } else if (vcpu->arch.osi_needed) { u64 *gprs = run->osi.gprs; int i; for (i = 0; i < 32; i++) kvmppc_set_gpr(vcpu, i, gprs[i]); vcpu->arch.osi_needed = 0; } else if (vcpu->arch.hcall_needed) { int i; kvmppc_set_gpr(vcpu, 3, run->papr_hcall.ret); for (i = 0; i < 9; ++i) kvmppc_set_gpr(vcpu, 4 + i, run->papr_hcall.args[i]); vcpu->arch.hcall_needed = 0; #ifdef CONFIG_BOOKE } else if (vcpu->arch.epr_needed) { kvmppc_set_epr(vcpu, run->epr.epr); vcpu->arch.epr_needed = 0; #endif } kvm_sigset_activate(vcpu); if (run->immediate_exit) r = -EINTR; else r = kvmppc_vcpu_run(vcpu); kvm_sigset_deactivate(vcpu); #ifdef CONFIG_ALTIVEC out: #endif /* * We're already returning to userspace, don't pass the * RESUME_HOST flags along. */ if (r > 0) r = 0; vcpu_put(vcpu); return r; } int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq) { if (irq->irq == KVM_INTERRUPT_UNSET) { kvmppc_core_dequeue_external(vcpu); return 0; } kvmppc_core_queue_external(vcpu, irq); kvm_vcpu_kick(vcpu); return 0; } static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu, struct kvm_enable_cap *cap) { int r; if (cap->flags) return -EINVAL; switch (cap->cap) { case KVM_CAP_PPC_OSI: r = 0; vcpu->arch.osi_enabled = true; break; case KVM_CAP_PPC_PAPR: r = 0; vcpu->arch.papr_enabled = true; break; case KVM_CAP_PPC_EPR: r = 0; if (cap->args[0]) vcpu->arch.epr_flags |= KVMPPC_EPR_USER; else vcpu->arch.epr_flags &= ~KVMPPC_EPR_USER; break; #ifdef CONFIG_BOOKE case KVM_CAP_PPC_BOOKE_WATCHDOG: r = 0; vcpu->arch.watchdog_enabled = true; break; #endif #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC) case KVM_CAP_SW_TLB: { struct kvm_config_tlb cfg; void __user *user_ptr = (void __user *)(uintptr_t)cap->args[0]; r = -EFAULT; if (copy_from_user(&cfg, user_ptr, sizeof(cfg))) break; r = kvm_vcpu_ioctl_config_tlb(vcpu, &cfg); break; } #endif #ifdef CONFIG_KVM_MPIC case KVM_CAP_IRQ_MPIC: { struct fd f; struct kvm_device *dev; r = -EBADF; f = fdget(cap->args[0]); if (!f.file) break; r = -EPERM; dev = kvm_device_from_filp(f.file); if (dev) r = kvmppc_mpic_connect_vcpu(dev, vcpu, cap->args[1]); fdput(f); break; } #endif #ifdef CONFIG_KVM_XICS case KVM_CAP_IRQ_XICS: { struct fd f; struct kvm_device *dev; r = -EBADF; f = fdget(cap->args[0]); if (!f.file) break; r = -EPERM; dev = kvm_device_from_filp(f.file); if (dev) { if (xics_on_xive()) r = kvmppc_xive_connect_vcpu(dev, vcpu, cap->args[1]); else r = kvmppc_xics_connect_vcpu(dev, vcpu, cap->args[1]); } fdput(f); break; } #endif /* CONFIG_KVM_XICS */ #ifdef CONFIG_KVM_XIVE case KVM_CAP_PPC_IRQ_XIVE: { struct fd f; struct kvm_device *dev; r = -EBADF; f = fdget(cap->args[0]); if (!f.file) break; r = -ENXIO; if (!xive_enabled()) break; r = -EPERM; dev = kvm_device_from_filp(f.file); if (dev) r = kvmppc_xive_native_connect_vcpu(dev, vcpu, cap->args[1]); fdput(f); break; } #endif /* CONFIG_KVM_XIVE */ #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE case KVM_CAP_PPC_FWNMI: r = -EINVAL; if (!is_kvmppc_hv_enabled(vcpu->kvm)) break; r = 0; vcpu->kvm->arch.fwnmi_enabled = true; break; #endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */ default: r = -EINVAL; break; } if (!r) r = kvmppc_sanity_check(vcpu); return r; } bool kvm_arch_intc_initialized(struct kvm *kvm) { #ifdef CONFIG_KVM_MPIC if (kvm->arch.mpic) return true; #endif #ifdef CONFIG_KVM_XICS if (kvm->arch.xics || kvm->arch.xive) return true; #endif return false; } int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, struct kvm_mp_state *mp_state) { return -EINVAL; } int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, struct kvm_mp_state *mp_state) { return -EINVAL; } long kvm_arch_vcpu_async_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) { struct kvm_vcpu *vcpu = filp->private_data; void __user *argp = (void __user *)arg; if (ioctl == KVM_INTERRUPT) { struct kvm_interrupt irq; if (copy_from_user(&irq, argp, sizeof(irq))) return -EFAULT; return kvm_vcpu_ioctl_interrupt(vcpu, &irq); } return -ENOIOCTLCMD; } long kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) { struct kvm_vcpu *vcpu = filp->private_data; void __user *argp = (void __user *)arg; long r; switch (ioctl) { case KVM_ENABLE_CAP: { struct kvm_enable_cap cap; r = -EFAULT; if (copy_from_user(&cap, argp, sizeof(cap))) goto out; vcpu_load(vcpu); r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap); vcpu_put(vcpu); break; } case KVM_SET_ONE_REG: case KVM_GET_ONE_REG: { struct kvm_one_reg reg; r = -EFAULT; if (copy_from_user(&reg, argp, sizeof(reg))) goto out; if (ioctl == KVM_SET_ONE_REG) r = kvm_vcpu_ioctl_set_one_reg(vcpu, &reg); else r = kvm_vcpu_ioctl_get_one_reg(vcpu, &reg); break; } #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC) case KVM_DIRTY_TLB: { struct kvm_dirty_tlb dirty; r = -EFAULT; if (copy_from_user(&dirty, argp, sizeof(dirty))) goto out; vcpu_load(vcpu); r = kvm_vcpu_ioctl_dirty_tlb(vcpu, &dirty); vcpu_put(vcpu); break; } #endif default: r = -EINVAL; } out: return r; } vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) { return VM_FAULT_SIGBUS; } static int kvm_vm_ioctl_get_pvinfo(struct kvm_ppc_pvinfo *pvinfo) { u32 inst_nop = 0x60000000; #ifdef CONFIG_KVM_BOOKE_HV u32 inst_sc1 = 0x44000022; pvinfo->hcall[0] = cpu_to_be32(inst_sc1); pvinfo->hcall[1] = cpu_to_be32(inst_nop); pvinfo->hcall[2] = cpu_to_be32(inst_nop); pvinfo->hcall[3] = cpu_to_be32(inst_nop); #else u32 inst_lis = 0x3c000000; u32 inst_ori = 0x60000000; u32 inst_sc = 0x44000002; u32 inst_imm_mask = 0xffff; /* * The hypercall to get into KVM from within guest context is as * follows: * * lis r0, r0, KVM_SC_MAGIC_R0@h * ori r0, KVM_SC_MAGIC_R0@l * sc * nop */ pvinfo->hcall[0] = cpu_to_be32(inst_lis | ((KVM_SC_MAGIC_R0 >> 16) & inst_imm_mask)); pvinfo->hcall[1] = cpu_to_be32(inst_ori | (KVM_SC_MAGIC_R0 & inst_imm_mask)); pvinfo->hcall[2] = cpu_to_be32(inst_sc); pvinfo->hcall[3] = cpu_to_be32(inst_nop); #endif pvinfo->flags = KVM_PPC_PVINFO_FLAGS_EV_IDLE; return 0; } bool kvm_arch_irqchip_in_kernel(struct kvm *kvm) { int ret = 0; #ifdef CONFIG_KVM_MPIC ret = ret || (kvm->arch.mpic != NULL); #endif #ifdef CONFIG_KVM_XICS ret = ret || (kvm->arch.xics != NULL); ret = ret || (kvm->arch.xive != NULL); #endif smp_rmb(); return ret; } int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_event, bool line_status) { if (!kvm_arch_irqchip_in_kernel(kvm)) return -ENXIO; irq_event->status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, irq_event->irq, irq_event->level, line_status); return 0; } int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap) { int r; if (cap->flags) return -EINVAL; switch (cap->cap) { #ifdef CONFIG_KVM_BOOK3S_64_HANDLER case KVM_CAP_PPC_ENABLE_HCALL: { unsigned long hcall = cap->args[0]; r = -EINVAL; if (hcall > MAX_HCALL_OPCODE || (hcall & 3) || cap->args[1] > 1) break; if (!kvmppc_book3s_hcall_implemented(kvm, hcall)) break; if (cap->args[1]) set_bit(hcall / 4, kvm->arch.enabled_hcalls); else clear_bit(hcall / 4, kvm->arch.enabled_hcalls); r = 0; break; } case KVM_CAP_PPC_SMT: { unsigned long mode = cap->args[0]; unsigned long flags = cap->args[1]; r = -EINVAL; if (kvm->arch.kvm_ops->set_smt_mode) r = kvm->arch.kvm_ops->set_smt_mode(kvm, mode, flags); break; } case KVM_CAP_PPC_NESTED_HV: r = -EINVAL; if (!is_kvmppc_hv_enabled(kvm) || !kvm->arch.kvm_ops->enable_nested) break; r = kvm->arch.kvm_ops->enable_nested(kvm); break; #endif #if defined(CONFIG_KVM_BOOK3S_HV_POSSIBLE) case KVM_CAP_PPC_SECURE_GUEST: r = -EINVAL; if (!is_kvmppc_hv_enabled(kvm) || !kvm->arch.kvm_ops->enable_svm) break; r = kvm->arch.kvm_ops->enable_svm(kvm); break; case KVM_CAP_PPC_DAWR1: r = -EINVAL; if (!is_kvmppc_hv_enabled(kvm) || !kvm->arch.kvm_ops->enable_dawr1) break; r = kvm->arch.kvm_ops->enable_dawr1(kvm); break; #endif default: r = -EINVAL; break; } return r; } #ifdef CONFIG_PPC_BOOK3S_64 /* * These functions check whether the underlying hardware is safe * against attacks based on observing the effects of speculatively * executed instructions, and whether it supplies instructions for * use in workarounds. The information comes from firmware, either * via the device tree on powernv platforms or from an hcall on * pseries platforms. */ #ifdef CONFIG_PPC_PSERIES static int pseries_get_cpu_char(struct kvm_ppc_cpu_char *cp) { struct h_cpu_char_result c; unsigned long rc; if (!machine_is(pseries)) return -ENOTTY; rc = plpar_get_cpu_characteristics(&c); if (rc == H_SUCCESS) { cp->character = c.character; cp->behaviour = c.behaviour; cp->character_mask = KVM_PPC_CPU_CHAR_SPEC_BAR_ORI31 | KVM_PPC_CPU_CHAR_BCCTRL_SERIALISED | KVM_PPC_CPU_CHAR_L1D_FLUSH_ORI30 | KVM_PPC_CPU_CHAR_L1D_FLUSH_TRIG2 | KVM_PPC_CPU_CHAR_L1D_THREAD_PRIV | KVM_PPC_CPU_CHAR_BR_HINT_HONOURED | KVM_PPC_CPU_CHAR_MTTRIG_THR_RECONF | KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS | KVM_PPC_CPU_CHAR_BCCTR_FLUSH_ASSIST; cp->behaviour_mask = KVM_PPC_CPU_BEHAV_FAVOUR_SECURITY | KVM_PPC_CPU_BEHAV_L1D_FLUSH_PR | KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR | KVM_PPC_CPU_BEHAV_FLUSH_COUNT_CACHE; } return 0; } #else static int pseries_get_cpu_char(struct kvm_ppc_cpu_char *cp) { return -ENOTTY; } #endif static inline bool have_fw_feat(struct device_node *fw_features, const char *state, const char *name) { struct device_node *np; bool r = false; np = of_get_child_by_name(fw_features, name); if (np) { r = of_property_read_bool(np, state); of_node_put(np); } return r; } static int kvmppc_get_cpu_char(struct kvm_ppc_cpu_char *cp) { struct device_node *np, *fw_features; int r; memset(cp, 0, sizeof(*cp)); r = pseries_get_cpu_char(cp); if (r != -ENOTTY) return r; np = of_find_node_by_name(NULL, "ibm,opal"); if (np) { fw_features = of_get_child_by_name(np, "fw-features"); of_node_put(np); if (!fw_features) return 0; if (have_fw_feat(fw_features, "enabled", "inst-spec-barrier-ori31,31,0")) cp->character |= KVM_PPC_CPU_CHAR_SPEC_BAR_ORI31; if (have_fw_feat(fw_features, "enabled", "fw-bcctrl-serialized")) cp->character |= KVM_PPC_CPU_CHAR_BCCTRL_SERIALISED; if (have_fw_feat(fw_features, "enabled", "inst-l1d-flush-ori30,30,0")) cp->character |= KVM_PPC_CPU_CHAR_L1D_FLUSH_ORI30; if (have_fw_feat(fw_features, "enabled", "inst-l1d-flush-trig2")) cp->character |= KVM_PPC_CPU_CHAR_L1D_FLUSH_TRIG2; if (have_fw_feat(fw_features, "enabled", "fw-l1d-thread-split")) cp->character |= KVM_PPC_CPU_CHAR_L1D_THREAD_PRIV; if (have_fw_feat(fw_features, "enabled", "fw-count-cache-disabled")) cp->character |= KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS; if (have_fw_feat(fw_features, "enabled", "fw-count-cache-flush-bcctr2,0,0")) cp->character |= KVM_PPC_CPU_CHAR_BCCTR_FLUSH_ASSIST; cp->character_mask = KVM_PPC_CPU_CHAR_SPEC_BAR_ORI31 | KVM_PPC_CPU_CHAR_BCCTRL_SERIALISED | KVM_PPC_CPU_CHAR_L1D_FLUSH_ORI30 | KVM_PPC_CPU_CHAR_L1D_FLUSH_TRIG2 | KVM_PPC_CPU_CHAR_L1D_THREAD_PRIV | KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS | KVM_PPC_CPU_CHAR_BCCTR_FLUSH_ASSIST; if (have_fw_feat(fw_features, "enabled", "speculation-policy-favor-security")) cp->behaviour |= KVM_PPC_CPU_BEHAV_FAVOUR_SECURITY; if (!have_fw_feat(fw_features, "disabled", "needs-l1d-flush-msr-pr-0-to-1")) cp->behaviour |= KVM_PPC_CPU_BEHAV_L1D_FLUSH_PR; if (!have_fw_feat(fw_features, "disabled", "needs-spec-barrier-for-bound-checks")) cp->behaviour |= KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR; if (have_fw_feat(fw_features, "enabled", "needs-count-cache-flush-on-context-switch")) cp->behaviour |= KVM_PPC_CPU_BEHAV_FLUSH_COUNT_CACHE; cp->behaviour_mask = KVM_PPC_CPU_BEHAV_FAVOUR_SECURITY | KVM_PPC_CPU_BEHAV_L1D_FLUSH_PR | KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR | KVM_PPC_CPU_BEHAV_FLUSH_COUNT_CACHE; of_node_put(fw_features); } return 0; } #endif int kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) { struct kvm *kvm __maybe_unused = filp->private_data; void __user *argp = (void __user *)arg; int r; switch (ioctl) { case KVM_PPC_GET_PVINFO: { struct kvm_ppc_pvinfo pvinfo; memset(&pvinfo, 0, sizeof(pvinfo)); r = kvm_vm_ioctl_get_pvinfo(&pvinfo); if (copy_to_user(argp, &pvinfo, sizeof(pvinfo))) { r = -EFAULT; goto out; } break; } #ifdef CONFIG_SPAPR_TCE_IOMMU case KVM_CREATE_SPAPR_TCE_64: { struct kvm_create_spapr_tce_64 create_tce_64; r = -EFAULT; if (copy_from_user(&create_tce_64, argp, sizeof(create_tce_64))) goto out; if (create_tce_64.flags) { r = -EINVAL; goto out; } r = kvm_vm_ioctl_create_spapr_tce(kvm, &create_tce_64); goto out; } case KVM_CREATE_SPAPR_TCE: { struct kvm_create_spapr_tce create_tce; struct kvm_create_spapr_tce_64 create_tce_64; r = -EFAULT; if (copy_from_user(&create_tce, argp, sizeof(create_tce))) goto out; create_tce_64.liobn = create_tce.liobn; create_tce_64.page_shift = IOMMU_PAGE_SHIFT_4K; create_tce_64.offset = 0; create_tce_64.size = create_tce.window_size >> IOMMU_PAGE_SHIFT_4K; create_tce_64.flags = 0; r = kvm_vm_ioctl_create_spapr_tce(kvm, &create_tce_64); goto out; } #endif #ifdef CONFIG_PPC_BOOK3S_64 case KVM_PPC_GET_SMMU_INFO: { struct kvm_ppc_smmu_info info; struct kvm *kvm = filp->private_data; memset(&info, 0, sizeof(info)); r = kvm->arch.kvm_ops->get_smmu_info(kvm, &info); if (r >= 0 && copy_to_user(argp, &info, sizeof(info))) r = -EFAULT; break; } case KVM_PPC_RTAS_DEFINE_TOKEN: { struct kvm *kvm = filp->private_data; r = kvm_vm_ioctl_rtas_define_token(kvm, argp); break; } case KVM_PPC_CONFIGURE_V3_MMU: { struct kvm *kvm = filp->private_data; struct kvm_ppc_mmuv3_cfg cfg; r = -EINVAL; if (!kvm->arch.kvm_ops->configure_mmu) goto out; r = -EFAULT; if (copy_from_user(&cfg, argp, sizeof(cfg))) goto out; r = kvm->arch.kvm_ops->configure_mmu(kvm, &cfg); break; } case KVM_PPC_GET_RMMU_INFO: { struct kvm *kvm = filp->private_data; struct kvm_ppc_rmmu_info info; r = -EINVAL; if (!kvm->arch.kvm_ops->get_rmmu_info) goto out; r = kvm->arch.kvm_ops->get_rmmu_info(kvm, &info); if (r >= 0 && copy_to_user(argp, &info, sizeof(info))) r = -EFAULT; break; } case KVM_PPC_GET_CPU_CHAR: { struct kvm_ppc_cpu_char cpuchar; r = kvmppc_get_cpu_char(&cpuchar); if (r >= 0 && copy_to_user(argp, &cpuchar, sizeof(cpuchar))) r = -EFAULT; break; } case KVM_PPC_SVM_OFF: { struct kvm *kvm = filp->private_data; r = 0; if (!kvm->arch.kvm_ops->svm_off) goto out; r = kvm->arch.kvm_ops->svm_off(kvm); break; } default: { struct kvm *kvm = filp->private_data; r = kvm->arch.kvm_ops->arch_vm_ioctl(filp, ioctl, arg); } #else /* CONFIG_PPC_BOOK3S_64 */ default: r = -ENOTTY; #endif } out: return r; } static DEFINE_IDA(lpid_inuse); static unsigned long nr_lpids; long kvmppc_alloc_lpid(void) { int lpid; /* The host LPID must always be 0 (allocation starts at 1) */ lpid = ida_alloc_range(&lpid_inuse, 1, nr_lpids - 1, GFP_KERNEL); if (lpid < 0) { if (lpid == -ENOMEM) pr_err("%s: Out of memory\n", __func__); else pr_err("%s: No LPIDs free\n", __func__); return -ENOMEM; } return lpid; } EXPORT_SYMBOL_GPL(kvmppc_alloc_lpid); void kvmppc_free_lpid(long lpid) { ida_free(&lpid_inuse, lpid); } EXPORT_SYMBOL_GPL(kvmppc_free_lpid); /* nr_lpids_param includes the host LPID */ void kvmppc_init_lpid(unsigned long nr_lpids_param) { nr_lpids = nr_lpids_param; } EXPORT_SYMBOL_GPL(kvmppc_init_lpid); EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_ppc_instr); void kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu, struct dentry *debugfs_dentry) { if (vcpu->kvm->arch.kvm_ops->create_vcpu_debugfs) vcpu->kvm->arch.kvm_ops->create_vcpu_debugfs(vcpu, debugfs_dentry); } int kvm_arch_create_vm_debugfs(struct kvm *kvm) { if (kvm->arch.kvm_ops->create_vm_debugfs) kvm->arch.kvm_ops->create_vm_debugfs(kvm); return 0; }
linux-master
arch/powerpc/kvm/powerpc.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2010 SUSE Linux Products GmbH. All rights reserved. * * Authors: * Alexander Graf <[email protected]> */ #include <linux/kvm_host.h> #include <linux/hash.h> #include <linux/slab.h> #include <linux/rculist.h> #include <asm/kvm_ppc.h> #include <asm/kvm_book3s.h> #include <asm/machdep.h> #include <asm/mmu_context.h> #include <asm/hw_irq.h> #include "trace_pr.h" #define PTE_SIZE 12 static struct kmem_cache *hpte_cache; static inline u64 kvmppc_mmu_hash_pte(u64 eaddr) { return hash_64(eaddr >> PTE_SIZE, HPTEG_HASH_BITS_PTE); } static inline u64 kvmppc_mmu_hash_pte_long(u64 eaddr) { return hash_64((eaddr & 0x0ffff000) >> PTE_SIZE, HPTEG_HASH_BITS_PTE_LONG); } static inline u64 kvmppc_mmu_hash_vpte(u64 vpage) { return hash_64(vpage & 0xfffffffffULL, HPTEG_HASH_BITS_VPTE); } static inline u64 kvmppc_mmu_hash_vpte_long(u64 vpage) { return hash_64((vpage & 0xffffff000ULL) >> 12, HPTEG_HASH_BITS_VPTE_LONG); } #ifdef CONFIG_PPC_BOOK3S_64 static inline u64 kvmppc_mmu_hash_vpte_64k(u64 vpage) { return hash_64((vpage & 0xffffffff0ULL) >> 4, HPTEG_HASH_BITS_VPTE_64K); } #endif void kvmppc_mmu_hpte_cache_map(struct kvm_vcpu *vcpu, struct hpte_cache *pte) { u64 index; struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu); trace_kvm_book3s_mmu_map(pte); spin_lock(&vcpu3s->mmu_lock); /* Add to ePTE list */ index = kvmppc_mmu_hash_pte(pte->pte.eaddr); hlist_add_head_rcu(&pte->list_pte, &vcpu3s->hpte_hash_pte[index]); /* Add to ePTE_long list */ index = kvmppc_mmu_hash_pte_long(pte->pte.eaddr); hlist_add_head_rcu(&pte->list_pte_long, &vcpu3s->hpte_hash_pte_long[index]); /* Add to vPTE list */ index = kvmppc_mmu_hash_vpte(pte->pte.vpage); hlist_add_head_rcu(&pte->list_vpte, &vcpu3s->hpte_hash_vpte[index]); /* Add to vPTE_long list */ index = kvmppc_mmu_hash_vpte_long(pte->pte.vpage); hlist_add_head_rcu(&pte->list_vpte_long, &vcpu3s->hpte_hash_vpte_long[index]); #ifdef CONFIG_PPC_BOOK3S_64 /* Add to vPTE_64k list */ index = kvmppc_mmu_hash_vpte_64k(pte->pte.vpage); hlist_add_head_rcu(&pte->list_vpte_64k, &vcpu3s->hpte_hash_vpte_64k[index]); #endif vcpu3s->hpte_cache_count++; spin_unlock(&vcpu3s->mmu_lock); } static void free_pte_rcu(struct rcu_head *head) { struct hpte_cache *pte = container_of(head, struct hpte_cache, rcu_head); kmem_cache_free(hpte_cache, pte); } static void invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte) { struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu); trace_kvm_book3s_mmu_invalidate(pte); /* Different for 32 and 64 bit */ kvmppc_mmu_invalidate_pte(vcpu, pte); spin_lock(&vcpu3s->mmu_lock); /* pte already invalidated in between? */ if (hlist_unhashed(&pte->list_pte)) { spin_unlock(&vcpu3s->mmu_lock); return; } hlist_del_init_rcu(&pte->list_pte); hlist_del_init_rcu(&pte->list_pte_long); hlist_del_init_rcu(&pte->list_vpte); hlist_del_init_rcu(&pte->list_vpte_long); #ifdef CONFIG_PPC_BOOK3S_64 hlist_del_init_rcu(&pte->list_vpte_64k); #endif vcpu3s->hpte_cache_count--; spin_unlock(&vcpu3s->mmu_lock); call_rcu(&pte->rcu_head, free_pte_rcu); } static void kvmppc_mmu_pte_flush_all(struct kvm_vcpu *vcpu) { struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu); struct hpte_cache *pte; int i; rcu_read_lock(); for (i = 0; i < HPTEG_HASH_NUM_VPTE_LONG; i++) { struct hlist_head *list = &vcpu3s->hpte_hash_vpte_long[i]; hlist_for_each_entry_rcu(pte, list, list_vpte_long) invalidate_pte(vcpu, pte); } rcu_read_unlock(); } static void kvmppc_mmu_pte_flush_page(struct kvm_vcpu *vcpu, ulong guest_ea) { struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu); struct hlist_head *list; struct hpte_cache *pte; /* Find the list of entries in the map */ list = &vcpu3s->hpte_hash_pte[kvmppc_mmu_hash_pte(guest_ea)]; rcu_read_lock(); /* Check the list for matching entries and invalidate */ hlist_for_each_entry_rcu(pte, list, list_pte) if ((pte->pte.eaddr & ~0xfffUL) == guest_ea) invalidate_pte(vcpu, pte); rcu_read_unlock(); } static void kvmppc_mmu_pte_flush_long(struct kvm_vcpu *vcpu, ulong guest_ea) { struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu); struct hlist_head *list; struct hpte_cache *pte; /* Find the list of entries in the map */ list = &vcpu3s->hpte_hash_pte_long[ kvmppc_mmu_hash_pte_long(guest_ea)]; rcu_read_lock(); /* Check the list for matching entries and invalidate */ hlist_for_each_entry_rcu(pte, list, list_pte_long) if ((pte->pte.eaddr & 0x0ffff000UL) == guest_ea) invalidate_pte(vcpu, pte); rcu_read_unlock(); } void kvmppc_mmu_pte_flush(struct kvm_vcpu *vcpu, ulong guest_ea, ulong ea_mask) { trace_kvm_book3s_mmu_flush("", vcpu, guest_ea, ea_mask); guest_ea &= ea_mask; switch (ea_mask) { case ~0xfffUL: kvmppc_mmu_pte_flush_page(vcpu, guest_ea); break; case 0x0ffff000: kvmppc_mmu_pte_flush_long(vcpu, guest_ea); break; case 0: /* Doing a complete flush -> start from scratch */ kvmppc_mmu_pte_flush_all(vcpu); break; default: WARN_ON(1); break; } } /* Flush with mask 0xfffffffff */ static void kvmppc_mmu_pte_vflush_short(struct kvm_vcpu *vcpu, u64 guest_vp) { struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu); struct hlist_head *list; struct hpte_cache *pte; u64 vp_mask = 0xfffffffffULL; list = &vcpu3s->hpte_hash_vpte[kvmppc_mmu_hash_vpte(guest_vp)]; rcu_read_lock(); /* Check the list for matching entries and invalidate */ hlist_for_each_entry_rcu(pte, list, list_vpte) if ((pte->pte.vpage & vp_mask) == guest_vp) invalidate_pte(vcpu, pte); rcu_read_unlock(); } #ifdef CONFIG_PPC_BOOK3S_64 /* Flush with mask 0xffffffff0 */ static void kvmppc_mmu_pte_vflush_64k(struct kvm_vcpu *vcpu, u64 guest_vp) { struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu); struct hlist_head *list; struct hpte_cache *pte; u64 vp_mask = 0xffffffff0ULL; list = &vcpu3s->hpte_hash_vpte_64k[ kvmppc_mmu_hash_vpte_64k(guest_vp)]; rcu_read_lock(); /* Check the list for matching entries and invalidate */ hlist_for_each_entry_rcu(pte, list, list_vpte_64k) if ((pte->pte.vpage & vp_mask) == guest_vp) invalidate_pte(vcpu, pte); rcu_read_unlock(); } #endif /* Flush with mask 0xffffff000 */ static void kvmppc_mmu_pte_vflush_long(struct kvm_vcpu *vcpu, u64 guest_vp) { struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu); struct hlist_head *list; struct hpte_cache *pte; u64 vp_mask = 0xffffff000ULL; list = &vcpu3s->hpte_hash_vpte_long[ kvmppc_mmu_hash_vpte_long(guest_vp)]; rcu_read_lock(); /* Check the list for matching entries and invalidate */ hlist_for_each_entry_rcu(pte, list, list_vpte_long) if ((pte->pte.vpage & vp_mask) == guest_vp) invalidate_pte(vcpu, pte); rcu_read_unlock(); } void kvmppc_mmu_pte_vflush(struct kvm_vcpu *vcpu, u64 guest_vp, u64 vp_mask) { trace_kvm_book3s_mmu_flush("v", vcpu, guest_vp, vp_mask); guest_vp &= vp_mask; switch(vp_mask) { case 0xfffffffffULL: kvmppc_mmu_pte_vflush_short(vcpu, guest_vp); break; #ifdef CONFIG_PPC_BOOK3S_64 case 0xffffffff0ULL: kvmppc_mmu_pte_vflush_64k(vcpu, guest_vp); break; #endif case 0xffffff000ULL: kvmppc_mmu_pte_vflush_long(vcpu, guest_vp); break; default: WARN_ON(1); return; } } void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end) { struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu); struct hpte_cache *pte; int i; trace_kvm_book3s_mmu_flush("p", vcpu, pa_start, pa_end); rcu_read_lock(); for (i = 0; i < HPTEG_HASH_NUM_VPTE_LONG; i++) { struct hlist_head *list = &vcpu3s->hpte_hash_vpte_long[i]; hlist_for_each_entry_rcu(pte, list, list_vpte_long) if ((pte->pte.raddr >= pa_start) && (pte->pte.raddr < pa_end)) invalidate_pte(vcpu, pte); } rcu_read_unlock(); } struct hpte_cache *kvmppc_mmu_hpte_cache_next(struct kvm_vcpu *vcpu) { struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu); struct hpte_cache *pte; if (vcpu3s->hpte_cache_count == HPTEG_CACHE_NUM) kvmppc_mmu_pte_flush_all(vcpu); pte = kmem_cache_zalloc(hpte_cache, GFP_KERNEL); return pte; } void kvmppc_mmu_hpte_cache_free(struct hpte_cache *pte) { kmem_cache_free(hpte_cache, pte); } void kvmppc_mmu_hpte_destroy(struct kvm_vcpu *vcpu) { kvmppc_mmu_pte_flush(vcpu, 0, 0); } static void kvmppc_mmu_hpte_init_hash(struct hlist_head *hash_list, int len) { int i; for (i = 0; i < len; i++) INIT_HLIST_HEAD(&hash_list[i]); } int kvmppc_mmu_hpte_init(struct kvm_vcpu *vcpu) { struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu); /* init hpte lookup hashes */ kvmppc_mmu_hpte_init_hash(vcpu3s->hpte_hash_pte, ARRAY_SIZE(vcpu3s->hpte_hash_pte)); kvmppc_mmu_hpte_init_hash(vcpu3s->hpte_hash_pte_long, ARRAY_SIZE(vcpu3s->hpte_hash_pte_long)); kvmppc_mmu_hpte_init_hash(vcpu3s->hpte_hash_vpte, ARRAY_SIZE(vcpu3s->hpte_hash_vpte)); kvmppc_mmu_hpte_init_hash(vcpu3s->hpte_hash_vpte_long, ARRAY_SIZE(vcpu3s->hpte_hash_vpte_long)); #ifdef CONFIG_PPC_BOOK3S_64 kvmppc_mmu_hpte_init_hash(vcpu3s->hpte_hash_vpte_64k, ARRAY_SIZE(vcpu3s->hpte_hash_vpte_64k)); #endif spin_lock_init(&vcpu3s->mmu_lock); return 0; } int kvmppc_mmu_hpte_sysinit(void) { /* init hpte slab cache */ hpte_cache = kmem_cache_create("kvm-spt", sizeof(struct hpte_cache), sizeof(struct hpte_cache), 0, NULL); return 0; } void kvmppc_mmu_hpte_sysexit(void) { kmem_cache_destroy(hpte_cache); }
linux-master
arch/powerpc/kvm/book3s_mmu_hpte.c
// SPDX-License-Identifier: GPL-2.0-only /* * * Copyright IBM Corp. 2007 * Copyright 2011 Freescale Semiconductor, Inc. * * Authors: Hollis Blanchard <[email protected]> */ #include <linux/jiffies.h> #include <linux/hrtimer.h> #include <linux/types.h> #include <linux/string.h> #include <linux/kvm_host.h> #include <linux/clockchips.h> #include <asm/reg.h> #include <asm/time.h> #include <asm/byteorder.h> #include <asm/kvm_ppc.h> #include <asm/disassemble.h> #include <asm/ppc-opcode.h> #include "timing.h" #include "trace.h" void kvmppc_emulate_dec(struct kvm_vcpu *vcpu) { unsigned long dec_nsec; unsigned long long dec_time; pr_debug("mtDEC: %lx\n", vcpu->arch.dec); hrtimer_try_to_cancel(&vcpu->arch.dec_timer); #ifdef CONFIG_PPC_BOOK3S /* mtdec lowers the interrupt line when positive. */ kvmppc_core_dequeue_dec(vcpu); #endif #ifdef CONFIG_BOOKE /* On BOOKE, DEC = 0 is as good as decrementer not enabled */ if (vcpu->arch.dec == 0) return; #endif /* * The decrementer ticks at the same rate as the timebase, so * that's how we convert the guest DEC value to the number of * host ticks. */ dec_time = vcpu->arch.dec; /* * Guest timebase ticks at the same frequency as host timebase. * So use the host timebase calculations for decrementer emulation. */ dec_time = tb_to_ns(dec_time); dec_nsec = do_div(dec_time, NSEC_PER_SEC); hrtimer_start(&vcpu->arch.dec_timer, ktime_set(dec_time, dec_nsec), HRTIMER_MODE_REL); vcpu->arch.dec_jiffies = get_tb(); } u32 kvmppc_get_dec(struct kvm_vcpu *vcpu, u64 tb) { u64 jd = tb - vcpu->arch.dec_jiffies; #ifdef CONFIG_BOOKE if (vcpu->arch.dec < jd) return 0; #endif return vcpu->arch.dec - jd; } static int kvmppc_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs) { enum emulation_result emulated = EMULATE_DONE; ulong spr_val = kvmppc_get_gpr(vcpu, rs); switch (sprn) { case SPRN_SRR0: kvmppc_set_srr0(vcpu, spr_val); break; case SPRN_SRR1: kvmppc_set_srr1(vcpu, spr_val); break; /* XXX We need to context-switch the timebase for * watchdog and FIT. */ case SPRN_TBWL: break; case SPRN_TBWU: break; case SPRN_DEC: vcpu->arch.dec = (u32) spr_val; kvmppc_emulate_dec(vcpu); break; case SPRN_SPRG0: kvmppc_set_sprg0(vcpu, spr_val); break; case SPRN_SPRG1: kvmppc_set_sprg1(vcpu, spr_val); break; case SPRN_SPRG2: kvmppc_set_sprg2(vcpu, spr_val); break; case SPRN_SPRG3: kvmppc_set_sprg3(vcpu, spr_val); break; /* PIR can legally be written, but we ignore it */ case SPRN_PIR: break; default: emulated = vcpu->kvm->arch.kvm_ops->emulate_mtspr(vcpu, sprn, spr_val); if (emulated == EMULATE_FAIL) printk(KERN_INFO "mtspr: unknown spr " "0x%x\n", sprn); break; } kvmppc_set_exit_type(vcpu, EMULATED_MTSPR_EXITS); return emulated; } static int kvmppc_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt) { enum emulation_result emulated = EMULATE_DONE; ulong spr_val = 0; switch (sprn) { case SPRN_SRR0: spr_val = kvmppc_get_srr0(vcpu); break; case SPRN_SRR1: spr_val = kvmppc_get_srr1(vcpu); break; case SPRN_PVR: spr_val = vcpu->arch.pvr; break; case SPRN_PIR: spr_val = vcpu->vcpu_id; break; /* Note: mftb and TBRL/TBWL are user-accessible, so * the guest can always access the real TB anyways. * In fact, we probably will never see these traps. */ case SPRN_TBWL: spr_val = get_tb() >> 32; break; case SPRN_TBWU: spr_val = get_tb(); break; case SPRN_SPRG0: spr_val = kvmppc_get_sprg0(vcpu); break; case SPRN_SPRG1: spr_val = kvmppc_get_sprg1(vcpu); break; case SPRN_SPRG2: spr_val = kvmppc_get_sprg2(vcpu); break; case SPRN_SPRG3: spr_val = kvmppc_get_sprg3(vcpu); break; /* Note: SPRG4-7 are user-readable, so we don't get * a trap. */ case SPRN_DEC: spr_val = kvmppc_get_dec(vcpu, get_tb()); break; default: emulated = vcpu->kvm->arch.kvm_ops->emulate_mfspr(vcpu, sprn, &spr_val); if (unlikely(emulated == EMULATE_FAIL)) { printk(KERN_INFO "mfspr: unknown spr " "0x%x\n", sprn); } break; } if (emulated == EMULATE_DONE) kvmppc_set_gpr(vcpu, rt, spr_val); kvmppc_set_exit_type(vcpu, EMULATED_MFSPR_EXITS); return emulated; } /* XXX Should probably auto-generate instruction decoding for a particular core * from opcode tables in the future. */ int kvmppc_emulate_instruction(struct kvm_vcpu *vcpu) { u32 inst; ppc_inst_t pinst; int rs, rt, sprn; enum emulation_result emulated; int advance = 1; /* this default type might be overwritten by subcategories */ kvmppc_set_exit_type(vcpu, EMULATED_INST_EXITS); emulated = kvmppc_get_last_inst(vcpu, INST_GENERIC, &pinst); inst = ppc_inst_val(pinst); if (emulated != EMULATE_DONE) return emulated; pr_debug("Emulating opcode %d / %d\n", get_op(inst), get_xop(inst)); rs = get_rs(inst); rt = get_rt(inst); sprn = get_sprn(inst); switch (get_op(inst)) { case OP_TRAP: #ifdef CONFIG_PPC_BOOK3S case OP_TRAP_64: kvmppc_core_queue_program(vcpu, SRR1_PROGTRAP); #else kvmppc_core_queue_program(vcpu, vcpu->arch.shared->esr | ESR_PTR); #endif advance = 0; break; case 31: switch (get_xop(inst)) { case OP_31_XOP_TRAP: #ifdef CONFIG_64BIT case OP_31_XOP_TRAP_64: #endif #ifdef CONFIG_PPC_BOOK3S kvmppc_core_queue_program(vcpu, SRR1_PROGTRAP); #else kvmppc_core_queue_program(vcpu, vcpu->arch.shared->esr | ESR_PTR); #endif advance = 0; break; case OP_31_XOP_MFSPR: emulated = kvmppc_emulate_mfspr(vcpu, sprn, rt); if (emulated == EMULATE_AGAIN) { emulated = EMULATE_DONE; advance = 0; } break; case OP_31_XOP_MTSPR: emulated = kvmppc_emulate_mtspr(vcpu, sprn, rs); if (emulated == EMULATE_AGAIN) { emulated = EMULATE_DONE; advance = 0; } break; case OP_31_XOP_TLBSYNC: break; default: /* Attempt core-specific emulation below. */ emulated = EMULATE_FAIL; } break; case 0: /* * Instruction with primary opcode 0. Based on PowerISA * these are illegal instructions. */ if (inst == KVMPPC_INST_SW_BREAKPOINT) { vcpu->run->exit_reason = KVM_EXIT_DEBUG; vcpu->run->debug.arch.status = 0; vcpu->run->debug.arch.address = kvmppc_get_pc(vcpu); emulated = EMULATE_EXIT_USER; advance = 0; } else emulated = EMULATE_FAIL; break; default: emulated = EMULATE_FAIL; } if (emulated == EMULATE_FAIL) { emulated = vcpu->kvm->arch.kvm_ops->emulate_op(vcpu, inst, &advance); if (emulated == EMULATE_AGAIN) { advance = 0; } else if (emulated == EMULATE_FAIL) { advance = 0; printk(KERN_ERR "Couldn't emulate instruction 0x%08x " "(op %d xop %d)\n", inst, get_op(inst), get_xop(inst)); } } trace_kvm_ppc_instr(inst, kvmppc_get_pc(vcpu), emulated); /* Advance past emulated instruction. */ /* * If this ever handles prefixed instructions, the 4 * will need to become ppc_inst_len(pinst) instead. */ if (advance) kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) + 4); return emulated; } EXPORT_SYMBOL_GPL(kvmppc_emulate_instruction);
linux-master
arch/powerpc/kvm/emulate.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright 2017 Benjamin Herrenschmidt, IBM Corporation. */ #define pr_fmt(fmt) "xive-kvm: " fmt #include <linux/kernel.h> #include <linux/kvm_host.h> #include <linux/err.h> #include <linux/gfp.h> #include <linux/spinlock.h> #include <linux/delay.h> #include <linux/percpu.h> #include <linux/cpumask.h> #include <linux/uaccess.h> #include <linux/irqdomain.h> #include <asm/kvm_book3s.h> #include <asm/kvm_ppc.h> #include <asm/hvcall.h> #include <asm/xics.h> #include <asm/xive.h> #include <asm/xive-regs.h> #include <asm/debug.h> #include <asm/time.h> #include <asm/opal.h> #include <linux/debugfs.h> #include <linux/seq_file.h> #include "book3s_xive.h" #define __x_eoi_page(xd) ((void __iomem *)((xd)->eoi_mmio)) #define __x_trig_page(xd) ((void __iomem *)((xd)->trig_mmio)) /* Dummy interrupt used when taking interrupts out of a queue in H_CPPR */ #define XICS_DUMMY 1 static void xive_vm_ack_pending(struct kvmppc_xive_vcpu *xc) { u8 cppr; u16 ack; /* * Ensure any previous store to CPPR is ordered vs. * the subsequent loads from PIPR or ACK. */ eieio(); /* Perform the acknowledge OS to register cycle. */ ack = be16_to_cpu(__raw_readw(xive_tima + TM_SPC_ACK_OS_REG)); /* Synchronize subsequent queue accesses */ mb(); /* XXX Check grouping level */ /* Anything ? */ if (!((ack >> 8) & TM_QW1_NSR_EO)) return; /* Grab CPPR of the most favored pending interrupt */ cppr = ack & 0xff; if (cppr < 8) xc->pending |= 1 << cppr; /* Check consistency */ if (cppr >= xc->hw_cppr) pr_warn("KVM-XIVE: CPU %d odd ack CPPR, got %d at %d\n", smp_processor_id(), cppr, xc->hw_cppr); /* * Update our image of the HW CPPR. We don't yet modify * xc->cppr, this will be done as we scan for interrupts * in the queues. */ xc->hw_cppr = cppr; } static u8 xive_vm_esb_load(struct xive_irq_data *xd, u32 offset) { u64 val; if (offset == XIVE_ESB_SET_PQ_10 && xd->flags & XIVE_IRQ_FLAG_STORE_EOI) offset |= XIVE_ESB_LD_ST_MO; val = __raw_readq(__x_eoi_page(xd) + offset); #ifdef __LITTLE_ENDIAN__ val >>= 64-8; #endif return (u8)val; } static void xive_vm_source_eoi(u32 hw_irq, struct xive_irq_data *xd) { /* If the XIVE supports the new "store EOI facility, use it */ if (xd->flags & XIVE_IRQ_FLAG_STORE_EOI) __raw_writeq(0, __x_eoi_page(xd) + XIVE_ESB_STORE_EOI); else if (xd->flags & XIVE_IRQ_FLAG_LSI) { /* * For LSIs the HW EOI cycle is used rather than PQ bits, * as they are automatically re-triggred in HW when still * pending. */ __raw_readq(__x_eoi_page(xd) + XIVE_ESB_LOAD_EOI); } else { uint64_t eoi_val; /* * Otherwise for EOI, we use the special MMIO that does * a clear of both P and Q and returns the old Q, * except for LSIs where we use the "EOI cycle" special * load. * * This allows us to then do a re-trigger if Q was set * rather than synthetizing an interrupt in software */ eoi_val = xive_vm_esb_load(xd, XIVE_ESB_SET_PQ_00); /* Re-trigger if needed */ if ((eoi_val & 1) && __x_trig_page(xd)) __raw_writeq(0, __x_trig_page(xd)); } } enum { scan_fetch, scan_poll, scan_eoi, }; static u32 xive_vm_scan_interrupts(struct kvmppc_xive_vcpu *xc, u8 pending, int scan_type) { u32 hirq = 0; u8 prio = 0xff; /* Find highest pending priority */ while ((xc->mfrr != 0xff || pending != 0) && hirq == 0) { struct xive_q *q; u32 idx, toggle; __be32 *qpage; /* * If pending is 0 this will return 0xff which is what * we want */ prio = ffs(pending) - 1; /* Don't scan past the guest cppr */ if (prio >= xc->cppr || prio > 7) { if (xc->mfrr < xc->cppr) { prio = xc->mfrr; hirq = XICS_IPI; } break; } /* Grab queue and pointers */ q = &xc->queues[prio]; idx = q->idx; toggle = q->toggle; /* * Snapshot the queue page. The test further down for EOI * must use the same "copy" that was used by __xive_read_eq * since qpage can be set concurrently and we don't want * to miss an EOI. */ qpage = READ_ONCE(q->qpage); skip_ipi: /* * Try to fetch from the queue. Will return 0 for a * non-queueing priority (ie, qpage = 0). */ hirq = __xive_read_eq(qpage, q->msk, &idx, &toggle); /* * If this was a signal for an MFFR change done by * H_IPI we skip it. Additionally, if we were fetching * we EOI it now, thus re-enabling reception of a new * such signal. * * We also need to do that if prio is 0 and we had no * page for the queue. In this case, we have non-queued * IPI that needs to be EOId. * * This is safe because if we have another pending MFRR * change that wasn't observed above, the Q bit will have * been set and another occurrence of the IPI will trigger. */ if (hirq == XICS_IPI || (prio == 0 && !qpage)) { if (scan_type == scan_fetch) { xive_vm_source_eoi(xc->vp_ipi, &xc->vp_ipi_data); q->idx = idx; q->toggle = toggle; } /* Loop back on same queue with updated idx/toggle */ WARN_ON(hirq && hirq != XICS_IPI); if (hirq) goto skip_ipi; } /* If it's the dummy interrupt, continue searching */ if (hirq == XICS_DUMMY) goto skip_ipi; /* Clear the pending bit if the queue is now empty */ if (!hirq) { pending &= ~(1 << prio); /* * Check if the queue count needs adjusting due to * interrupts being moved away. */ if (atomic_read(&q->pending_count)) { int p = atomic_xchg(&q->pending_count, 0); if (p) { WARN_ON(p > atomic_read(&q->count)); atomic_sub(p, &q->count); } } } /* * If the most favoured prio we found pending is less * favored (or equal) than a pending IPI, we return * the IPI instead. */ if (prio >= xc->mfrr && xc->mfrr < xc->cppr) { prio = xc->mfrr; hirq = XICS_IPI; break; } /* If fetching, update queue pointers */ if (scan_type == scan_fetch) { q->idx = idx; q->toggle = toggle; } } /* If we are just taking a "peek", do nothing else */ if (scan_type == scan_poll) return hirq; /* Update the pending bits */ xc->pending = pending; /* * If this is an EOI that's it, no CPPR adjustment done here, * all we needed was cleanup the stale pending bits and check * if there's anything left. */ if (scan_type == scan_eoi) return hirq; /* * If we found an interrupt, adjust what the guest CPPR should * be as if we had just fetched that interrupt from HW. * * Note: This can only make xc->cppr smaller as the previous * loop will only exit with hirq != 0 if prio is lower than * the current xc->cppr. Thus we don't need to re-check xc->mfrr * for pending IPIs. */ if (hirq) xc->cppr = prio; /* * If it was an IPI the HW CPPR might have been lowered too much * as the HW interrupt we use for IPIs is routed to priority 0. * * We re-sync it here. */ if (xc->cppr != xc->hw_cppr) { xc->hw_cppr = xc->cppr; __raw_writeb(xc->cppr, xive_tima + TM_QW1_OS + TM_CPPR); } return hirq; } static unsigned long xive_vm_h_xirr(struct kvm_vcpu *vcpu) { struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; u8 old_cppr; u32 hirq; pr_devel("H_XIRR\n"); xc->stat_vm_h_xirr++; /* First collect pending bits from HW */ xive_vm_ack_pending(xc); pr_devel(" new pending=0x%02x hw_cppr=%d cppr=%d\n", xc->pending, xc->hw_cppr, xc->cppr); /* Grab previous CPPR and reverse map it */ old_cppr = xive_prio_to_guest(xc->cppr); /* Scan for actual interrupts */ hirq = xive_vm_scan_interrupts(xc, xc->pending, scan_fetch); pr_devel(" got hirq=0x%x hw_cppr=%d cppr=%d\n", hirq, xc->hw_cppr, xc->cppr); /* That should never hit */ if (hirq & 0xff000000) pr_warn("XIVE: Weird guest interrupt number 0x%08x\n", hirq); /* * XXX We could check if the interrupt is masked here and * filter it. If we chose to do so, we would need to do: * * if (masked) { * lock(); * if (masked) { * old_Q = true; * hirq = 0; * } * unlock(); * } */ /* Return interrupt and old CPPR in GPR4 */ vcpu->arch.regs.gpr[4] = hirq | (old_cppr << 24); return H_SUCCESS; } static unsigned long xive_vm_h_ipoll(struct kvm_vcpu *vcpu, unsigned long server) { struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; u8 pending = xc->pending; u32 hirq; pr_devel("H_IPOLL(server=%ld)\n", server); xc->stat_vm_h_ipoll++; /* Grab the target VCPU if not the current one */ if (xc->server_num != server) { vcpu = kvmppc_xive_find_server(vcpu->kvm, server); if (!vcpu) return H_PARAMETER; xc = vcpu->arch.xive_vcpu; /* Scan all priorities */ pending = 0xff; } else { /* Grab pending interrupt if any */ __be64 qw1 = __raw_readq(xive_tima + TM_QW1_OS); u8 pipr = be64_to_cpu(qw1) & 0xff; if (pipr < 8) pending |= 1 << pipr; } hirq = xive_vm_scan_interrupts(xc, pending, scan_poll); /* Return interrupt and old CPPR in GPR4 */ vcpu->arch.regs.gpr[4] = hirq | (xc->cppr << 24); return H_SUCCESS; } static void xive_vm_push_pending_to_hw(struct kvmppc_xive_vcpu *xc) { u8 pending, prio; pending = xc->pending; if (xc->mfrr != 0xff) { if (xc->mfrr < 8) pending |= 1 << xc->mfrr; else pending |= 0x80; } if (!pending) return; prio = ffs(pending) - 1; __raw_writeb(prio, xive_tima + TM_SPC_SET_OS_PENDING); } static void xive_vm_scan_for_rerouted_irqs(struct kvmppc_xive *xive, struct kvmppc_xive_vcpu *xc) { unsigned int prio; /* For each priority that is now masked */ for (prio = xc->cppr; prio < KVMPPC_XIVE_Q_COUNT; prio++) { struct xive_q *q = &xc->queues[prio]; struct kvmppc_xive_irq_state *state; struct kvmppc_xive_src_block *sb; u32 idx, toggle, entry, irq, hw_num; struct xive_irq_data *xd; __be32 *qpage; u16 src; idx = q->idx; toggle = q->toggle; qpage = READ_ONCE(q->qpage); if (!qpage) continue; /* For each interrupt in the queue */ for (;;) { entry = be32_to_cpup(qpage + idx); /* No more ? */ if ((entry >> 31) == toggle) break; irq = entry & 0x7fffffff; /* Skip dummies and IPIs */ if (irq == XICS_DUMMY || irq == XICS_IPI) goto next; sb = kvmppc_xive_find_source(xive, irq, &src); if (!sb) goto next; state = &sb->irq_state[src]; /* Has it been rerouted ? */ if (xc->server_num == state->act_server) goto next; /* * Allright, it *has* been re-routed, kill it from * the queue. */ qpage[idx] = cpu_to_be32((entry & 0x80000000) | XICS_DUMMY); /* Find the HW interrupt */ kvmppc_xive_select_irq(state, &hw_num, &xd); /* If it's not an LSI, set PQ to 11 the EOI will force a resend */ if (!(xd->flags & XIVE_IRQ_FLAG_LSI)) xive_vm_esb_load(xd, XIVE_ESB_SET_PQ_11); /* EOI the source */ xive_vm_source_eoi(hw_num, xd); next: idx = (idx + 1) & q->msk; if (idx == 0) toggle ^= 1; } } } static int xive_vm_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr) { struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; struct kvmppc_xive *xive = vcpu->kvm->arch.xive; u8 old_cppr; pr_devel("H_CPPR(cppr=%ld)\n", cppr); xc->stat_vm_h_cppr++; /* Map CPPR */ cppr = xive_prio_from_guest(cppr); /* Remember old and update SW state */ old_cppr = xc->cppr; xc->cppr = cppr; /* * Order the above update of xc->cppr with the subsequent * read of xc->mfrr inside push_pending_to_hw() */ smp_mb(); if (cppr > old_cppr) { /* * We are masking less, we need to look for pending things * to deliver and set VP pending bits accordingly to trigger * a new interrupt otherwise we might miss MFRR changes for * which we have optimized out sending an IPI signal. */ xive_vm_push_pending_to_hw(xc); } else { /* * We are masking more, we need to check the queue for any * interrupt that has been routed to another CPU, take * it out (replace it with the dummy) and retrigger it. * * This is necessary since those interrupts may otherwise * never be processed, at least not until this CPU restores * its CPPR. * * This is in theory racy vs. HW adding new interrupts to * the queue. In practice this works because the interesting * cases are when the guest has done a set_xive() to move the * interrupt away, which flushes the xive, followed by the * target CPU doing a H_CPPR. So any new interrupt coming into * the queue must still be routed to us and isn't a source * of concern. */ xive_vm_scan_for_rerouted_irqs(xive, xc); } /* Apply new CPPR */ xc->hw_cppr = cppr; __raw_writeb(cppr, xive_tima + TM_QW1_OS + TM_CPPR); return H_SUCCESS; } static int xive_vm_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr) { struct kvmppc_xive *xive = vcpu->kvm->arch.xive; struct kvmppc_xive_src_block *sb; struct kvmppc_xive_irq_state *state; struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; struct xive_irq_data *xd; u8 new_cppr = xirr >> 24; u32 irq = xirr & 0x00ffffff, hw_num; u16 src; int rc = 0; pr_devel("H_EOI(xirr=%08lx)\n", xirr); xc->stat_vm_h_eoi++; xc->cppr = xive_prio_from_guest(new_cppr); /* * IPIs are synthetized from MFRR and thus don't need * any special EOI handling. The underlying interrupt * used to signal MFRR changes is EOId when fetched from * the queue. */ if (irq == XICS_IPI || irq == 0) { /* * This barrier orders the setting of xc->cppr vs. * subsequent test of xc->mfrr done inside * scan_interrupts and push_pending_to_hw */ smp_mb(); goto bail; } /* Find interrupt source */ sb = kvmppc_xive_find_source(xive, irq, &src); if (!sb) { pr_devel(" source not found !\n"); rc = H_PARAMETER; /* Same as above */ smp_mb(); goto bail; } state = &sb->irq_state[src]; kvmppc_xive_select_irq(state, &hw_num, &xd); state->in_eoi = true; /* * This barrier orders both setting of in_eoi above vs, * subsequent test of guest_priority, and the setting * of xc->cppr vs. subsequent test of xc->mfrr done inside * scan_interrupts and push_pending_to_hw */ smp_mb(); again: if (state->guest_priority == MASKED) { arch_spin_lock(&sb->lock); if (state->guest_priority != MASKED) { arch_spin_unlock(&sb->lock); goto again; } pr_devel(" EOI on saved P...\n"); /* Clear old_p, that will cause unmask to perform an EOI */ state->old_p = false; arch_spin_unlock(&sb->lock); } else { pr_devel(" EOI on source...\n"); /* Perform EOI on the source */ xive_vm_source_eoi(hw_num, xd); /* If it's an emulated LSI, check level and resend */ if (state->lsi && state->asserted) __raw_writeq(0, __x_trig_page(xd)); } /* * This barrier orders the above guest_priority check * and spin_lock/unlock with clearing in_eoi below. * * It also has to be a full mb() as it must ensure * the MMIOs done in source_eoi() are completed before * state->in_eoi is visible. */ mb(); state->in_eoi = false; bail: /* Re-evaluate pending IRQs and update HW */ xive_vm_scan_interrupts(xc, xc->pending, scan_eoi); xive_vm_push_pending_to_hw(xc); pr_devel(" after scan pending=%02x\n", xc->pending); /* Apply new CPPR */ xc->hw_cppr = xc->cppr; __raw_writeb(xc->cppr, xive_tima + TM_QW1_OS + TM_CPPR); return rc; } static int xive_vm_h_ipi(struct kvm_vcpu *vcpu, unsigned long server, unsigned long mfrr) { struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; pr_devel("H_IPI(server=%08lx,mfrr=%ld)\n", server, mfrr); xc->stat_vm_h_ipi++; /* Find target */ vcpu = kvmppc_xive_find_server(vcpu->kvm, server); if (!vcpu) return H_PARAMETER; xc = vcpu->arch.xive_vcpu; /* Locklessly write over MFRR */ xc->mfrr = mfrr; /* * The load of xc->cppr below and the subsequent MMIO store * to the IPI must happen after the above mfrr update is * globally visible so that: * * - Synchronize with another CPU doing an H_EOI or a H_CPPR * updating xc->cppr then reading xc->mfrr. * * - The target of the IPI sees the xc->mfrr update */ mb(); /* Shoot the IPI if most favored than target cppr */ if (mfrr < xc->cppr) __raw_writeq(0, __x_trig_page(&xc->vp_ipi_data)); return H_SUCCESS; } /* * We leave a gap of a couple of interrupts in the queue to * account for the IPI and additional safety guard. */ #define XIVE_Q_GAP 2 static bool kvmppc_xive_vcpu_has_save_restore(struct kvm_vcpu *vcpu) { struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; /* Check enablement at VP level */ return xc->vp_cam & TM_QW1W2_HO; } bool kvmppc_xive_check_save_restore(struct kvm_vcpu *vcpu) { struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; struct kvmppc_xive *xive = xc->xive; if (xive->flags & KVMPPC_XIVE_FLAG_SAVE_RESTORE) return kvmppc_xive_vcpu_has_save_restore(vcpu); return true; } /* * Push a vcpu's context to the XIVE on guest entry. * This assumes we are in virtual mode (MMU on) */ void kvmppc_xive_push_vcpu(struct kvm_vcpu *vcpu) { void __iomem *tima = local_paca->kvm_hstate.xive_tima_virt; u64 pq; /* * Nothing to do if the platform doesn't have a XIVE * or this vCPU doesn't have its own XIVE context * (e.g. because it's not using an in-kernel interrupt controller). */ if (!tima || !vcpu->arch.xive_cam_word) return; eieio(); if (!kvmppc_xive_vcpu_has_save_restore(vcpu)) __raw_writeq(vcpu->arch.xive_saved_state.w01, tima + TM_QW1_OS); __raw_writel(vcpu->arch.xive_cam_word, tima + TM_QW1_OS + TM_WORD2); vcpu->arch.xive_pushed = 1; eieio(); /* * We clear the irq_pending flag. There is a small chance of a * race vs. the escalation interrupt happening on another * processor setting it again, but the only consequence is to * cause a spurious wakeup on the next H_CEDE, which is not an * issue. */ vcpu->arch.irq_pending = 0; /* * In single escalation mode, if the escalation interrupt is * on, we mask it. */ if (vcpu->arch.xive_esc_on) { pq = __raw_readq((void __iomem *)(vcpu->arch.xive_esc_vaddr + XIVE_ESB_SET_PQ_01)); mb(); /* * We have a possible subtle race here: The escalation * interrupt might have fired and be on its way to the * host queue while we mask it, and if we unmask it * early enough (re-cede right away), there is a * theoretical possibility that it fires again, thus * landing in the target queue more than once which is * a big no-no. * * Fortunately, solving this is rather easy. If the * above load setting PQ to 01 returns a previous * value where P is set, then we know the escalation * interrupt is somewhere on its way to the host. In * that case we simply don't clear the xive_esc_on * flag below. It will be eventually cleared by the * handler for the escalation interrupt. * * Then, when doing a cede, we check that flag again * before re-enabling the escalation interrupt, and if * set, we abort the cede. */ if (!(pq & XIVE_ESB_VAL_P)) /* Now P is 0, we can clear the flag */ vcpu->arch.xive_esc_on = 0; } } EXPORT_SYMBOL_GPL(kvmppc_xive_push_vcpu); /* * Pull a vcpu's context from the XIVE on guest exit. * This assumes we are in virtual mode (MMU on) */ void kvmppc_xive_pull_vcpu(struct kvm_vcpu *vcpu) { void __iomem *tima = local_paca->kvm_hstate.xive_tima_virt; if (!vcpu->arch.xive_pushed) return; /* * Should not have been pushed if there is no tima */ if (WARN_ON(!tima)) return; eieio(); /* First load to pull the context, we ignore the value */ __raw_readl(tima + TM_SPC_PULL_OS_CTX); /* Second load to recover the context state (Words 0 and 1) */ if (!kvmppc_xive_vcpu_has_save_restore(vcpu)) vcpu->arch.xive_saved_state.w01 = __raw_readq(tima + TM_QW1_OS); /* Fixup some of the state for the next load */ vcpu->arch.xive_saved_state.lsmfb = 0; vcpu->arch.xive_saved_state.ack = 0xff; vcpu->arch.xive_pushed = 0; eieio(); } EXPORT_SYMBOL_GPL(kvmppc_xive_pull_vcpu); bool kvmppc_xive_rearm_escalation(struct kvm_vcpu *vcpu) { void __iomem *esc_vaddr = (void __iomem *)vcpu->arch.xive_esc_vaddr; bool ret = true; if (!esc_vaddr) return ret; /* we are using XIVE with single escalation */ if (vcpu->arch.xive_esc_on) { /* * If we still have a pending escalation, abort the cede, * and we must set PQ to 10 rather than 00 so that we don't * potentially end up with two entries for the escalation * interrupt in the XIVE interrupt queue. In that case * we also don't want to set xive_esc_on to 1 here in * case we race with xive_esc_irq(). */ ret = false; /* * The escalation interrupts are special as we don't EOI them. * There is no need to use the load-after-store ordering offset * to set PQ to 10 as we won't use StoreEOI. */ __raw_readq(esc_vaddr + XIVE_ESB_SET_PQ_10); } else { vcpu->arch.xive_esc_on = true; mb(); __raw_readq(esc_vaddr + XIVE_ESB_SET_PQ_00); } mb(); return ret; } EXPORT_SYMBOL_GPL(kvmppc_xive_rearm_escalation); /* * This is a simple trigger for a generic XIVE IRQ. This must * only be called for interrupts that support a trigger page */ static bool xive_irq_trigger(struct xive_irq_data *xd) { /* This should be only for MSIs */ if (WARN_ON(xd->flags & XIVE_IRQ_FLAG_LSI)) return false; /* Those interrupts should always have a trigger page */ if (WARN_ON(!xd->trig_mmio)) return false; out_be64(xd->trig_mmio, 0); return true; } static irqreturn_t xive_esc_irq(int irq, void *data) { struct kvm_vcpu *vcpu = data; vcpu->arch.irq_pending = 1; smp_mb(); if (vcpu->arch.ceded || vcpu->arch.nested) kvmppc_fast_vcpu_kick(vcpu); /* Since we have the no-EOI flag, the interrupt is effectively * disabled now. Clearing xive_esc_on means we won't bother * doing so on the next entry. * * This also allows the entry code to know that if a PQ combination * of 10 is observed while xive_esc_on is true, it means the queue * contains an unprocessed escalation interrupt. We don't make use of * that knowledge today but might (see comment in book3s_hv_rmhandler.S) */ vcpu->arch.xive_esc_on = false; /* This orders xive_esc_on = false vs. subsequent stale_p = true */ smp_wmb(); /* goes with smp_mb() in cleanup_single_escalation */ return IRQ_HANDLED; } int kvmppc_xive_attach_escalation(struct kvm_vcpu *vcpu, u8 prio, bool single_escalation) { struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; struct xive_q *q = &xc->queues[prio]; char *name = NULL; int rc; /* Already there ? */ if (xc->esc_virq[prio]) return 0; /* Hook up the escalation interrupt */ xc->esc_virq[prio] = irq_create_mapping(NULL, q->esc_irq); if (!xc->esc_virq[prio]) { pr_err("Failed to map escalation interrupt for queue %d of VCPU %d\n", prio, xc->server_num); return -EIO; } if (single_escalation) name = kasprintf(GFP_KERNEL, "kvm-%d-%d", vcpu->kvm->arch.lpid, xc->server_num); else name = kasprintf(GFP_KERNEL, "kvm-%d-%d-%d", vcpu->kvm->arch.lpid, xc->server_num, prio); if (!name) { pr_err("Failed to allocate escalation irq name for queue %d of VCPU %d\n", prio, xc->server_num); rc = -ENOMEM; goto error; } pr_devel("Escalation %s irq %d (prio %d)\n", name, xc->esc_virq[prio], prio); rc = request_irq(xc->esc_virq[prio], xive_esc_irq, IRQF_NO_THREAD, name, vcpu); if (rc) { pr_err("Failed to request escalation interrupt for queue %d of VCPU %d\n", prio, xc->server_num); goto error; } xc->esc_virq_names[prio] = name; /* In single escalation mode, we grab the ESB MMIO of the * interrupt and mask it. Also populate the VCPU v/raddr * of the ESB page for use by asm entry/exit code. Finally * set the XIVE_IRQ_FLAG_NO_EOI flag which will prevent the * core code from performing an EOI on the escalation * interrupt, thus leaving it effectively masked after * it fires once. */ if (single_escalation) { struct irq_data *d = irq_get_irq_data(xc->esc_virq[prio]); struct xive_irq_data *xd = irq_data_get_irq_handler_data(d); xive_vm_esb_load(xd, XIVE_ESB_SET_PQ_01); vcpu->arch.xive_esc_raddr = xd->eoi_page; vcpu->arch.xive_esc_vaddr = (__force u64)xd->eoi_mmio; xd->flags |= XIVE_IRQ_FLAG_NO_EOI; } return 0; error: irq_dispose_mapping(xc->esc_virq[prio]); xc->esc_virq[prio] = 0; kfree(name); return rc; } static int xive_provision_queue(struct kvm_vcpu *vcpu, u8 prio) { struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; struct kvmppc_xive *xive = xc->xive; struct xive_q *q = &xc->queues[prio]; void *qpage; int rc; if (WARN_ON(q->qpage)) return 0; /* Allocate the queue and retrieve infos on current node for now */ qpage = (__be32 *)__get_free_pages(GFP_KERNEL, xive->q_page_order); if (!qpage) { pr_err("Failed to allocate queue %d for VCPU %d\n", prio, xc->server_num); return -ENOMEM; } memset(qpage, 0, 1 << xive->q_order); /* * Reconfigure the queue. This will set q->qpage only once the * queue is fully configured. This is a requirement for prio 0 * as we will stop doing EOIs for every IPI as soon as we observe * qpage being non-NULL, and instead will only EOI when we receive * corresponding queue 0 entries */ rc = xive_native_configure_queue(xc->vp_id, q, prio, qpage, xive->q_order, true); if (rc) pr_err("Failed to configure queue %d for VCPU %d\n", prio, xc->server_num); return rc; } /* Called with xive->lock held */ static int xive_check_provisioning(struct kvm *kvm, u8 prio) { struct kvmppc_xive *xive = kvm->arch.xive; struct kvm_vcpu *vcpu; unsigned long i; int rc; lockdep_assert_held(&xive->lock); /* Already provisioned ? */ if (xive->qmap & (1 << prio)) return 0; pr_devel("Provisioning prio... %d\n", prio); /* Provision each VCPU and enable escalations if needed */ kvm_for_each_vcpu(i, vcpu, kvm) { if (!vcpu->arch.xive_vcpu) continue; rc = xive_provision_queue(vcpu, prio); if (rc == 0 && !kvmppc_xive_has_single_escalation(xive)) kvmppc_xive_attach_escalation(vcpu, prio, kvmppc_xive_has_single_escalation(xive)); if (rc) return rc; } /* Order previous stores and mark it as provisioned */ mb(); xive->qmap |= (1 << prio); return 0; } static void xive_inc_q_pending(struct kvm *kvm, u32 server, u8 prio) { struct kvm_vcpu *vcpu; struct kvmppc_xive_vcpu *xc; struct xive_q *q; /* Locate target server */ vcpu = kvmppc_xive_find_server(kvm, server); if (!vcpu) { pr_warn("%s: Can't find server %d\n", __func__, server); return; } xc = vcpu->arch.xive_vcpu; if (WARN_ON(!xc)) return; q = &xc->queues[prio]; atomic_inc(&q->pending_count); } static int xive_try_pick_queue(struct kvm_vcpu *vcpu, u8 prio) { struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; struct xive_q *q; u32 max; if (WARN_ON(!xc)) return -ENXIO; if (!xc->valid) return -ENXIO; q = &xc->queues[prio]; if (WARN_ON(!q->qpage)) return -ENXIO; /* Calculate max number of interrupts in that queue. */ max = (q->msk + 1) - XIVE_Q_GAP; return atomic_add_unless(&q->count, 1, max) ? 0 : -EBUSY; } int kvmppc_xive_select_target(struct kvm *kvm, u32 *server, u8 prio) { struct kvm_vcpu *vcpu; unsigned long i; int rc; /* Locate target server */ vcpu = kvmppc_xive_find_server(kvm, *server); if (!vcpu) { pr_devel("Can't find server %d\n", *server); return -EINVAL; } pr_devel("Finding irq target on 0x%x/%d...\n", *server, prio); /* Try pick it */ rc = xive_try_pick_queue(vcpu, prio); if (rc == 0) return rc; pr_devel(" .. failed, looking up candidate...\n"); /* Failed, pick another VCPU */ kvm_for_each_vcpu(i, vcpu, kvm) { if (!vcpu->arch.xive_vcpu) continue; rc = xive_try_pick_queue(vcpu, prio); if (rc == 0) { *server = vcpu->arch.xive_vcpu->server_num; pr_devel(" found on 0x%x/%d\n", *server, prio); return rc; } } pr_devel(" no available target !\n"); /* No available target ! */ return -EBUSY; } static u8 xive_lock_and_mask(struct kvmppc_xive *xive, struct kvmppc_xive_src_block *sb, struct kvmppc_xive_irq_state *state) { struct xive_irq_data *xd; u32 hw_num; u8 old_prio; u64 val; /* * Take the lock, set masked, try again if racing * with H_EOI */ for (;;) { arch_spin_lock(&sb->lock); old_prio = state->guest_priority; state->guest_priority = MASKED; mb(); if (!state->in_eoi) break; state->guest_priority = old_prio; arch_spin_unlock(&sb->lock); } /* No change ? Bail */ if (old_prio == MASKED) return old_prio; /* Get the right irq */ kvmppc_xive_select_irq(state, &hw_num, &xd); /* Set PQ to 10, return old P and old Q and remember them */ val = xive_vm_esb_load(xd, XIVE_ESB_SET_PQ_10); state->old_p = !!(val & 2); state->old_q = !!(val & 1); /* * Synchronize hardware to sensure the queues are updated when * masking */ xive_native_sync_source(hw_num); return old_prio; } static void xive_lock_for_unmask(struct kvmppc_xive_src_block *sb, struct kvmppc_xive_irq_state *state) { /* * Take the lock try again if racing with H_EOI */ for (;;) { arch_spin_lock(&sb->lock); if (!state->in_eoi) break; arch_spin_unlock(&sb->lock); } } static void xive_finish_unmask(struct kvmppc_xive *xive, struct kvmppc_xive_src_block *sb, struct kvmppc_xive_irq_state *state, u8 prio) { struct xive_irq_data *xd; u32 hw_num; /* If we aren't changing a thing, move on */ if (state->guest_priority != MASKED) goto bail; /* Get the right irq */ kvmppc_xive_select_irq(state, &hw_num, &xd); /* Old Q set, set PQ to 11 */ if (state->old_q) xive_vm_esb_load(xd, XIVE_ESB_SET_PQ_11); /* * If not old P, then perform an "effective" EOI, * on the source. This will handle the cases where * FW EOI is needed. */ if (!state->old_p) xive_vm_source_eoi(hw_num, xd); /* Synchronize ordering and mark unmasked */ mb(); bail: state->guest_priority = prio; } /* * Target an interrupt to a given server/prio, this will fallback * to another server if necessary and perform the HW targetting * updates as needed * * NOTE: Must be called with the state lock held */ static int xive_target_interrupt(struct kvm *kvm, struct kvmppc_xive_irq_state *state, u32 server, u8 prio) { struct kvmppc_xive *xive = kvm->arch.xive; u32 hw_num; int rc; /* * This will return a tentative server and actual * priority. The count for that new target will have * already been incremented. */ rc = kvmppc_xive_select_target(kvm, &server, prio); /* * We failed to find a target ? Not much we can do * at least until we support the GIQ. */ if (rc) return rc; /* * Increment the old queue pending count if there * was one so that the old queue count gets adjusted later * when observed to be empty. */ if (state->act_priority != MASKED) xive_inc_q_pending(kvm, state->act_server, state->act_priority); /* * Update state and HW */ state->act_priority = prio; state->act_server = server; /* Get the right irq */ kvmppc_xive_select_irq(state, &hw_num, NULL); return xive_native_configure_irq(hw_num, kvmppc_xive_vp(xive, server), prio, state->number); } /* * Targetting rules: In order to avoid losing track of * pending interrupts across mask and unmask, which would * allow queue overflows, we implement the following rules: * * - Unless it was never enabled (or we run out of capacity) * an interrupt is always targetted at a valid server/queue * pair even when "masked" by the guest. This pair tends to * be the last one used but it can be changed under some * circumstances. That allows us to separate targetting * from masking, we only handle accounting during (re)targetting, * this also allows us to let an interrupt drain into its target * queue after masking, avoiding complex schemes to remove * interrupts out of remote processor queues. * * - When masking, we set PQ to 10 and save the previous value * of P and Q. * * - When unmasking, if saved Q was set, we set PQ to 11 * otherwise we leave PQ to the HW state which will be either * 10 if nothing happened or 11 if the interrupt fired while * masked. Effectively we are OR'ing the previous Q into the * HW Q. * * Then if saved P is clear, we do an effective EOI (Q->P->Trigger) * which will unmask the interrupt and shoot a new one if Q was * set. * * Otherwise (saved P is set) we leave PQ unchanged (so 10 or 11, * effectively meaning an H_EOI from the guest is still expected * for that interrupt). * * - If H_EOI occurs while masked, we clear the saved P. * * - When changing target, we account on the new target and * increment a separate "pending" counter on the old one. * This pending counter will be used to decrement the old * target's count when its queue has been observed empty. */ int kvmppc_xive_set_xive(struct kvm *kvm, u32 irq, u32 server, u32 priority) { struct kvmppc_xive *xive = kvm->arch.xive; struct kvmppc_xive_src_block *sb; struct kvmppc_xive_irq_state *state; u8 new_act_prio; int rc = 0; u16 idx; if (!xive) return -ENODEV; pr_devel("set_xive ! irq 0x%x server 0x%x prio %d\n", irq, server, priority); /* First, check provisioning of queues */ if (priority != MASKED) { mutex_lock(&xive->lock); rc = xive_check_provisioning(xive->kvm, xive_prio_from_guest(priority)); mutex_unlock(&xive->lock); } if (rc) { pr_devel(" provisioning failure %d !\n", rc); return rc; } sb = kvmppc_xive_find_source(xive, irq, &idx); if (!sb) return -EINVAL; state = &sb->irq_state[idx]; /* * We first handle masking/unmasking since the locking * might need to be retried due to EOIs, we'll handle * targetting changes later. These functions will return * with the SB lock held. * * xive_lock_and_mask() will also set state->guest_priority * but won't otherwise change other fields of the state. * * xive_lock_for_unmask will not actually unmask, this will * be done later by xive_finish_unmask() once the targetting * has been done, so we don't try to unmask an interrupt * that hasn't yet been targetted. */ if (priority == MASKED) xive_lock_and_mask(xive, sb, state); else xive_lock_for_unmask(sb, state); /* * Then we handle targetting. * * First calculate a new "actual priority" */ new_act_prio = state->act_priority; if (priority != MASKED) new_act_prio = xive_prio_from_guest(priority); pr_devel(" new_act_prio=%x act_server=%x act_prio=%x\n", new_act_prio, state->act_server, state->act_priority); /* * Then check if we actually need to change anything, * * The condition for re-targetting the interrupt is that * we have a valid new priority (new_act_prio is not 0xff) * and either the server or the priority changed. * * Note: If act_priority was ff and the new priority is * also ff, we don't do anything and leave the interrupt * untargetted. An attempt of doing an int_on on an * untargetted interrupt will fail. If that is a problem * we could initialize interrupts with valid default */ if (new_act_prio != MASKED && (state->act_server != server || state->act_priority != new_act_prio)) rc = xive_target_interrupt(kvm, state, server, new_act_prio); /* * Perform the final unmasking of the interrupt source * if necessary */ if (priority != MASKED) xive_finish_unmask(xive, sb, state, priority); /* * Finally Update saved_priority to match. Only int_on/off * set this field to a different value. */ state->saved_priority = priority; arch_spin_unlock(&sb->lock); return rc; } int kvmppc_xive_get_xive(struct kvm *kvm, u32 irq, u32 *server, u32 *priority) { struct kvmppc_xive *xive = kvm->arch.xive; struct kvmppc_xive_src_block *sb; struct kvmppc_xive_irq_state *state; u16 idx; if (!xive) return -ENODEV; sb = kvmppc_xive_find_source(xive, irq, &idx); if (!sb) return -EINVAL; state = &sb->irq_state[idx]; arch_spin_lock(&sb->lock); *server = state->act_server; *priority = state->guest_priority; arch_spin_unlock(&sb->lock); return 0; } int kvmppc_xive_int_on(struct kvm *kvm, u32 irq) { struct kvmppc_xive *xive = kvm->arch.xive; struct kvmppc_xive_src_block *sb; struct kvmppc_xive_irq_state *state; u16 idx; if (!xive) return -ENODEV; sb = kvmppc_xive_find_source(xive, irq, &idx); if (!sb) return -EINVAL; state = &sb->irq_state[idx]; pr_devel("int_on(irq=0x%x)\n", irq); /* * Check if interrupt was not targetted */ if (state->act_priority == MASKED) { pr_devel("int_on on untargetted interrupt\n"); return -EINVAL; } /* If saved_priority is 0xff, do nothing */ if (state->saved_priority == MASKED) return 0; /* * Lock and unmask it. */ xive_lock_for_unmask(sb, state); xive_finish_unmask(xive, sb, state, state->saved_priority); arch_spin_unlock(&sb->lock); return 0; } int kvmppc_xive_int_off(struct kvm *kvm, u32 irq) { struct kvmppc_xive *xive = kvm->arch.xive; struct kvmppc_xive_src_block *sb; struct kvmppc_xive_irq_state *state; u16 idx; if (!xive) return -ENODEV; sb = kvmppc_xive_find_source(xive, irq, &idx); if (!sb) return -EINVAL; state = &sb->irq_state[idx]; pr_devel("int_off(irq=0x%x)\n", irq); /* * Lock and mask */ state->saved_priority = xive_lock_and_mask(xive, sb, state); arch_spin_unlock(&sb->lock); return 0; } static bool xive_restore_pending_irq(struct kvmppc_xive *xive, u32 irq) { struct kvmppc_xive_src_block *sb; struct kvmppc_xive_irq_state *state; u16 idx; sb = kvmppc_xive_find_source(xive, irq, &idx); if (!sb) return false; state = &sb->irq_state[idx]; if (!state->valid) return false; /* * Trigger the IPI. This assumes we never restore a pass-through * interrupt which should be safe enough */ xive_irq_trigger(&state->ipi_data); return true; } u64 kvmppc_xive_get_icp(struct kvm_vcpu *vcpu) { struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; if (!xc) return 0; /* Return the per-cpu state for state saving/migration */ return (u64)xc->cppr << KVM_REG_PPC_ICP_CPPR_SHIFT | (u64)xc->mfrr << KVM_REG_PPC_ICP_MFRR_SHIFT | (u64)0xff << KVM_REG_PPC_ICP_PPRI_SHIFT; } int kvmppc_xive_set_icp(struct kvm_vcpu *vcpu, u64 icpval) { struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; struct kvmppc_xive *xive = vcpu->kvm->arch.xive; u8 cppr, mfrr; u32 xisr; if (!xc || !xive) return -ENOENT; /* Grab individual state fields. We don't use pending_pri */ cppr = icpval >> KVM_REG_PPC_ICP_CPPR_SHIFT; xisr = (icpval >> KVM_REG_PPC_ICP_XISR_SHIFT) & KVM_REG_PPC_ICP_XISR_MASK; mfrr = icpval >> KVM_REG_PPC_ICP_MFRR_SHIFT; pr_devel("set_icp vcpu %d cppr=0x%x mfrr=0x%x xisr=0x%x\n", xc->server_num, cppr, mfrr, xisr); /* * We can't update the state of a "pushed" VCPU, but that * shouldn't happen because the vcpu->mutex makes running a * vcpu mutually exclusive with doing one_reg get/set on it. */ if (WARN_ON(vcpu->arch.xive_pushed)) return -EIO; /* Update VCPU HW saved state */ vcpu->arch.xive_saved_state.cppr = cppr; xc->hw_cppr = xc->cppr = cppr; /* * Update MFRR state. If it's not 0xff, we mark the VCPU as * having a pending MFRR change, which will re-evaluate the * target. The VCPU will thus potentially get a spurious * interrupt but that's not a big deal. */ xc->mfrr = mfrr; if (mfrr < cppr) xive_irq_trigger(&xc->vp_ipi_data); /* * Now saved XIRR is "interesting". It means there's something in * the legacy "1 element" queue... for an IPI we simply ignore it, * as the MFRR restore will handle that. For anything else we need * to force a resend of the source. * However the source may not have been setup yet. If that's the * case, we keep that info and increment a counter in the xive to * tell subsequent xive_set_source() to go look. */ if (xisr > XICS_IPI && !xive_restore_pending_irq(xive, xisr)) { xc->delayed_irq = xisr; xive->delayed_irqs++; pr_devel(" xisr restore delayed\n"); } return 0; } int kvmppc_xive_set_mapped(struct kvm *kvm, unsigned long guest_irq, unsigned long host_irq) { struct kvmppc_xive *xive = kvm->arch.xive; struct kvmppc_xive_src_block *sb; struct kvmppc_xive_irq_state *state; struct irq_data *host_data = irq_domain_get_irq_data(irq_get_default_host(), host_irq); unsigned int hw_irq = (unsigned int)irqd_to_hwirq(host_data); u16 idx; u8 prio; int rc; if (!xive) return -ENODEV; pr_debug("%s: GIRQ 0x%lx host IRQ %ld XIVE HW IRQ 0x%x\n", __func__, guest_irq, host_irq, hw_irq); sb = kvmppc_xive_find_source(xive, guest_irq, &idx); if (!sb) return -EINVAL; state = &sb->irq_state[idx]; /* * Mark the passed-through interrupt as going to a VCPU, * this will prevent further EOIs and similar operations * from the XIVE code. It will also mask the interrupt * to either PQ=10 or 11 state, the latter if the interrupt * is pending. This will allow us to unmask or retrigger it * after routing it to the guest with a simple EOI. * * The "state" argument is a "token", all it needs is to be * non-NULL to switch to passed-through or NULL for the * other way around. We may not yet have an actual VCPU * target here and we don't really care. */ rc = irq_set_vcpu_affinity(host_irq, state); if (rc) { pr_err("Failed to set VCPU affinity for host IRQ %ld\n", host_irq); return rc; } /* * Mask and read state of IPI. We need to know if its P bit * is set as that means it's potentially already using a * queue entry in the target */ prio = xive_lock_and_mask(xive, sb, state); pr_devel(" old IPI prio %02x P:%d Q:%d\n", prio, state->old_p, state->old_q); /* Turn the IPI hard off */ xive_vm_esb_load(&state->ipi_data, XIVE_ESB_SET_PQ_01); /* * Reset ESB guest mapping. Needed when ESB pages are exposed * to the guest in XIVE native mode */ if (xive->ops && xive->ops->reset_mapped) xive->ops->reset_mapped(kvm, guest_irq); /* Grab info about irq */ state->pt_number = hw_irq; state->pt_data = irq_data_get_irq_handler_data(host_data); /* * Configure the IRQ to match the existing configuration of * the IPI if it was already targetted. Otherwise this will * mask the interrupt in a lossy way (act_priority is 0xff) * which is fine for a never started interrupt. */ xive_native_configure_irq(hw_irq, kvmppc_xive_vp(xive, state->act_server), state->act_priority, state->number); /* * We do an EOI to enable the interrupt (and retrigger if needed) * if the guest has the interrupt unmasked and the P bit was *not* * set in the IPI. If it was set, we know a slot may still be in * use in the target queue thus we have to wait for a guest * originated EOI */ if (prio != MASKED && !state->old_p) xive_vm_source_eoi(hw_irq, state->pt_data); /* Clear old_p/old_q as they are no longer relevant */ state->old_p = state->old_q = false; /* Restore guest prio (unlocks EOI) */ mb(); state->guest_priority = prio; arch_spin_unlock(&sb->lock); return 0; } EXPORT_SYMBOL_GPL(kvmppc_xive_set_mapped); int kvmppc_xive_clr_mapped(struct kvm *kvm, unsigned long guest_irq, unsigned long host_irq) { struct kvmppc_xive *xive = kvm->arch.xive; struct kvmppc_xive_src_block *sb; struct kvmppc_xive_irq_state *state; u16 idx; u8 prio; int rc; if (!xive) return -ENODEV; pr_debug("%s: GIRQ 0x%lx host IRQ %ld\n", __func__, guest_irq, host_irq); sb = kvmppc_xive_find_source(xive, guest_irq, &idx); if (!sb) return -EINVAL; state = &sb->irq_state[idx]; /* * Mask and read state of IRQ. We need to know if its P bit * is set as that means it's potentially already using a * queue entry in the target */ prio = xive_lock_and_mask(xive, sb, state); pr_devel(" old IRQ prio %02x P:%d Q:%d\n", prio, state->old_p, state->old_q); /* * If old_p is set, the interrupt is pending, we switch it to * PQ=11. This will force a resend in the host so the interrupt * isn't lost to whatever host driver may pick it up */ if (state->old_p) xive_vm_esb_load(state->pt_data, XIVE_ESB_SET_PQ_11); /* Release the passed-through interrupt to the host */ rc = irq_set_vcpu_affinity(host_irq, NULL); if (rc) { pr_err("Failed to clr VCPU affinity for host IRQ %ld\n", host_irq); return rc; } /* Forget about the IRQ */ state->pt_number = 0; state->pt_data = NULL; /* * Reset ESB guest mapping. Needed when ESB pages are exposed * to the guest in XIVE native mode */ if (xive->ops && xive->ops->reset_mapped) { xive->ops->reset_mapped(kvm, guest_irq); } /* Reconfigure the IPI */ xive_native_configure_irq(state->ipi_number, kvmppc_xive_vp(xive, state->act_server), state->act_priority, state->number); /* * If old_p is set (we have a queue entry potentially * occupied) or the interrupt is masked, we set the IPI * to PQ=10 state. Otherwise we just re-enable it (PQ=00). */ if (prio == MASKED || state->old_p) xive_vm_esb_load(&state->ipi_data, XIVE_ESB_SET_PQ_10); else xive_vm_esb_load(&state->ipi_data, XIVE_ESB_SET_PQ_00); /* Restore guest prio (unlocks EOI) */ mb(); state->guest_priority = prio; arch_spin_unlock(&sb->lock); return 0; } EXPORT_SYMBOL_GPL(kvmppc_xive_clr_mapped); void kvmppc_xive_disable_vcpu_interrupts(struct kvm_vcpu *vcpu) { struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; struct kvm *kvm = vcpu->kvm; struct kvmppc_xive *xive = kvm->arch.xive; int i, j; for (i = 0; i <= xive->max_sbid; i++) { struct kvmppc_xive_src_block *sb = xive->src_blocks[i]; if (!sb) continue; for (j = 0; j < KVMPPC_XICS_IRQ_PER_ICS; j++) { struct kvmppc_xive_irq_state *state = &sb->irq_state[j]; if (!state->valid) continue; if (state->act_priority == MASKED) continue; if (state->act_server != xc->server_num) continue; /* Clean it up */ arch_spin_lock(&sb->lock); state->act_priority = MASKED; xive_vm_esb_load(&state->ipi_data, XIVE_ESB_SET_PQ_01); xive_native_configure_irq(state->ipi_number, 0, MASKED, 0); if (state->pt_number) { xive_vm_esb_load(state->pt_data, XIVE_ESB_SET_PQ_01); xive_native_configure_irq(state->pt_number, 0, MASKED, 0); } arch_spin_unlock(&sb->lock); } } /* Disable vcpu's escalation interrupt */ if (vcpu->arch.xive_esc_on) { __raw_readq((void __iomem *)(vcpu->arch.xive_esc_vaddr + XIVE_ESB_SET_PQ_01)); vcpu->arch.xive_esc_on = false; } /* * Clear pointers to escalation interrupt ESB. * This is safe because the vcpu->mutex is held, preventing * any other CPU from concurrently executing a KVM_RUN ioctl. */ vcpu->arch.xive_esc_vaddr = 0; vcpu->arch.xive_esc_raddr = 0; } /* * In single escalation mode, the escalation interrupt is marked so * that EOI doesn't re-enable it, but just sets the stale_p flag to * indicate that the P bit has already been dealt with. However, the * assembly code that enters the guest sets PQ to 00 without clearing * stale_p (because it has no easy way to address it). Hence we have * to adjust stale_p before shutting down the interrupt. */ void xive_cleanup_single_escalation(struct kvm_vcpu *vcpu, int irq) { struct irq_data *d = irq_get_irq_data(irq); struct xive_irq_data *xd = irq_data_get_irq_handler_data(d); /* * This slightly odd sequence gives the right result * (i.e. stale_p set if xive_esc_on is false) even if * we race with xive_esc_irq() and xive_irq_eoi(). */ xd->stale_p = false; smp_mb(); /* paired with smb_wmb in xive_esc_irq */ if (!vcpu->arch.xive_esc_on) xd->stale_p = true; } void kvmppc_xive_cleanup_vcpu(struct kvm_vcpu *vcpu) { struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; struct kvmppc_xive *xive = vcpu->kvm->arch.xive; int i; if (!kvmppc_xics_enabled(vcpu)) return; if (!xc) return; pr_devel("cleanup_vcpu(cpu=%d)\n", xc->server_num); /* Ensure no interrupt is still routed to that VP */ xc->valid = false; kvmppc_xive_disable_vcpu_interrupts(vcpu); /* Mask the VP IPI */ xive_vm_esb_load(&xc->vp_ipi_data, XIVE_ESB_SET_PQ_01); /* Free escalations */ for (i = 0; i < KVMPPC_XIVE_Q_COUNT; i++) { if (xc->esc_virq[i]) { if (kvmppc_xive_has_single_escalation(xc->xive)) xive_cleanup_single_escalation(vcpu, xc->esc_virq[i]); free_irq(xc->esc_virq[i], vcpu); irq_dispose_mapping(xc->esc_virq[i]); kfree(xc->esc_virq_names[i]); } } /* Disable the VP */ xive_native_disable_vp(xc->vp_id); /* Clear the cam word so guest entry won't try to push context */ vcpu->arch.xive_cam_word = 0; /* Free the queues */ for (i = 0; i < KVMPPC_XIVE_Q_COUNT; i++) { struct xive_q *q = &xc->queues[i]; xive_native_disable_queue(xc->vp_id, q, i); if (q->qpage) { free_pages((unsigned long)q->qpage, xive->q_page_order); q->qpage = NULL; } } /* Free the IPI */ if (xc->vp_ipi) { xive_cleanup_irq_data(&xc->vp_ipi_data); xive_native_free_irq(xc->vp_ipi); } /* Free the VP */ kfree(xc); /* Cleanup the vcpu */ vcpu->arch.irq_type = KVMPPC_IRQ_DEFAULT; vcpu->arch.xive_vcpu = NULL; } static bool kvmppc_xive_vcpu_id_valid(struct kvmppc_xive *xive, u32 cpu) { /* We have a block of xive->nr_servers VPs. We just need to check * packed vCPU ids are below that. */ return kvmppc_pack_vcpu_id(xive->kvm, cpu) < xive->nr_servers; } int kvmppc_xive_compute_vp_id(struct kvmppc_xive *xive, u32 cpu, u32 *vp) { u32 vp_id; if (!kvmppc_xive_vcpu_id_valid(xive, cpu)) { pr_devel("Out of bounds !\n"); return -EINVAL; } if (xive->vp_base == XIVE_INVALID_VP) { xive->vp_base = xive_native_alloc_vp_block(xive->nr_servers); pr_devel("VP_Base=%x nr_servers=%d\n", xive->vp_base, xive->nr_servers); if (xive->vp_base == XIVE_INVALID_VP) return -ENOSPC; } vp_id = kvmppc_xive_vp(xive, cpu); if (kvmppc_xive_vp_in_use(xive->kvm, vp_id)) { pr_devel("Duplicate !\n"); return -EEXIST; } *vp = vp_id; return 0; } int kvmppc_xive_connect_vcpu(struct kvm_device *dev, struct kvm_vcpu *vcpu, u32 cpu) { struct kvmppc_xive *xive = dev->private; struct kvmppc_xive_vcpu *xc; int i, r = -EBUSY; u32 vp_id; pr_devel("connect_vcpu(cpu=%d)\n", cpu); if (dev->ops != &kvm_xive_ops) { pr_devel("Wrong ops !\n"); return -EPERM; } if (xive->kvm != vcpu->kvm) return -EPERM; if (vcpu->arch.irq_type != KVMPPC_IRQ_DEFAULT) return -EBUSY; /* We need to synchronize with queue provisioning */ mutex_lock(&xive->lock); r = kvmppc_xive_compute_vp_id(xive, cpu, &vp_id); if (r) goto bail; xc = kzalloc(sizeof(*xc), GFP_KERNEL); if (!xc) { r = -ENOMEM; goto bail; } vcpu->arch.xive_vcpu = xc; xc->xive = xive; xc->vcpu = vcpu; xc->server_num = cpu; xc->vp_id = vp_id; xc->mfrr = 0xff; xc->valid = true; r = xive_native_get_vp_info(xc->vp_id, &xc->vp_cam, &xc->vp_chip_id); if (r) goto bail; if (!kvmppc_xive_check_save_restore(vcpu)) { pr_err("inconsistent save-restore setup for VCPU %d\n", cpu); r = -EIO; goto bail; } /* Configure VCPU fields for use by assembly push/pull */ vcpu->arch.xive_saved_state.w01 = cpu_to_be64(0xff000000); vcpu->arch.xive_cam_word = cpu_to_be32(xc->vp_cam | TM_QW1W2_VO); /* Allocate IPI */ xc->vp_ipi = xive_native_alloc_irq(); if (!xc->vp_ipi) { pr_err("Failed to allocate xive irq for VCPU IPI\n"); r = -EIO; goto bail; } pr_devel(" IPI=0x%x\n", xc->vp_ipi); r = xive_native_populate_irq_data(xc->vp_ipi, &xc->vp_ipi_data); if (r) goto bail; /* * Enable the VP first as the single escalation mode will * affect escalation interrupts numbering */ r = xive_native_enable_vp(xc->vp_id, kvmppc_xive_has_single_escalation(xive)); if (r) { pr_err("Failed to enable VP in OPAL, err %d\n", r); goto bail; } /* * Initialize queues. Initially we set them all for no queueing * and we enable escalation for queue 0 only which we'll use for * our mfrr change notifications. If the VCPU is hot-plugged, we * do handle provisioning however based on the existing "map" * of enabled queues. */ for (i = 0; i < KVMPPC_XIVE_Q_COUNT; i++) { struct xive_q *q = &xc->queues[i]; /* Single escalation, no queue 7 */ if (i == 7 && kvmppc_xive_has_single_escalation(xive)) break; /* Is queue already enabled ? Provision it */ if (xive->qmap & (1 << i)) { r = xive_provision_queue(vcpu, i); if (r == 0 && !kvmppc_xive_has_single_escalation(xive)) kvmppc_xive_attach_escalation( vcpu, i, kvmppc_xive_has_single_escalation(xive)); if (r) goto bail; } else { r = xive_native_configure_queue(xc->vp_id, q, i, NULL, 0, true); if (r) { pr_err("Failed to configure queue %d for VCPU %d\n", i, cpu); goto bail; } } } /* If not done above, attach priority 0 escalation */ r = kvmppc_xive_attach_escalation(vcpu, 0, kvmppc_xive_has_single_escalation(xive)); if (r) goto bail; /* Route the IPI */ r = xive_native_configure_irq(xc->vp_ipi, xc->vp_id, 0, XICS_IPI); if (!r) xive_vm_esb_load(&xc->vp_ipi_data, XIVE_ESB_SET_PQ_00); bail: mutex_unlock(&xive->lock); if (r) { kvmppc_xive_cleanup_vcpu(vcpu); return r; } vcpu->arch.irq_type = KVMPPC_IRQ_XICS; return 0; } /* * Scanning of queues before/after migration save */ static void xive_pre_save_set_queued(struct kvmppc_xive *xive, u32 irq) { struct kvmppc_xive_src_block *sb; struct kvmppc_xive_irq_state *state; u16 idx; sb = kvmppc_xive_find_source(xive, irq, &idx); if (!sb) return; state = &sb->irq_state[idx]; /* Some sanity checking */ if (!state->valid) { pr_err("invalid irq 0x%x in cpu queue!\n", irq); return; } /* * If the interrupt is in a queue it should have P set. * We warn so that gets reported. A backtrace isn't useful * so no need to use a WARN_ON. */ if (!state->saved_p) pr_err("Interrupt 0x%x is marked in a queue but P not set !\n", irq); /* Set flag */ state->in_queue = true; } static void xive_pre_save_mask_irq(struct kvmppc_xive *xive, struct kvmppc_xive_src_block *sb, u32 irq) { struct kvmppc_xive_irq_state *state = &sb->irq_state[irq]; if (!state->valid) return; /* Mask and save state, this will also sync HW queues */ state->saved_scan_prio = xive_lock_and_mask(xive, sb, state); /* Transfer P and Q */ state->saved_p = state->old_p; state->saved_q = state->old_q; /* Unlock */ arch_spin_unlock(&sb->lock); } static void xive_pre_save_unmask_irq(struct kvmppc_xive *xive, struct kvmppc_xive_src_block *sb, u32 irq) { struct kvmppc_xive_irq_state *state = &sb->irq_state[irq]; if (!state->valid) return; /* * Lock / exclude EOI (not technically necessary if the * guest isn't running concurrently. If this becomes a * performance issue we can probably remove the lock. */ xive_lock_for_unmask(sb, state); /* Restore mask/prio if it wasn't masked */ if (state->saved_scan_prio != MASKED) xive_finish_unmask(xive, sb, state, state->saved_scan_prio); /* Unlock */ arch_spin_unlock(&sb->lock); } static void xive_pre_save_queue(struct kvmppc_xive *xive, struct xive_q *q) { u32 idx = q->idx; u32 toggle = q->toggle; u32 irq; do { irq = __xive_read_eq(q->qpage, q->msk, &idx, &toggle); if (irq > XICS_IPI) xive_pre_save_set_queued(xive, irq); } while(irq); } static void xive_pre_save_scan(struct kvmppc_xive *xive) { struct kvm_vcpu *vcpu = NULL; unsigned long i; int j; /* * See comment in xive_get_source() about how this * work. Collect a stable state for all interrupts */ for (i = 0; i <= xive->max_sbid; i++) { struct kvmppc_xive_src_block *sb = xive->src_blocks[i]; if (!sb) continue; for (j = 0; j < KVMPPC_XICS_IRQ_PER_ICS; j++) xive_pre_save_mask_irq(xive, sb, j); } /* Then scan the queues and update the "in_queue" flag */ kvm_for_each_vcpu(i, vcpu, xive->kvm) { struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; if (!xc) continue; for (j = 0; j < KVMPPC_XIVE_Q_COUNT; j++) { if (xc->queues[j].qpage) xive_pre_save_queue(xive, &xc->queues[j]); } } /* Finally restore interrupt states */ for (i = 0; i <= xive->max_sbid; i++) { struct kvmppc_xive_src_block *sb = xive->src_blocks[i]; if (!sb) continue; for (j = 0; j < KVMPPC_XICS_IRQ_PER_ICS; j++) xive_pre_save_unmask_irq(xive, sb, j); } } static void xive_post_save_scan(struct kvmppc_xive *xive) { u32 i, j; /* Clear all the in_queue flags */ for (i = 0; i <= xive->max_sbid; i++) { struct kvmppc_xive_src_block *sb = xive->src_blocks[i]; if (!sb) continue; for (j = 0; j < KVMPPC_XICS_IRQ_PER_ICS; j++) sb->irq_state[j].in_queue = false; } /* Next get_source() will do a new scan */ xive->saved_src_count = 0; } /* * This returns the source configuration and state to user space. */ static int xive_get_source(struct kvmppc_xive *xive, long irq, u64 addr) { struct kvmppc_xive_src_block *sb; struct kvmppc_xive_irq_state *state; u64 __user *ubufp = (u64 __user *) addr; u64 val, prio; u16 idx; sb = kvmppc_xive_find_source(xive, irq, &idx); if (!sb) return -ENOENT; state = &sb->irq_state[idx]; if (!state->valid) return -ENOENT; pr_devel("get_source(%ld)...\n", irq); /* * So to properly save the state into something that looks like a * XICS migration stream we cannot treat interrupts individually. * * We need, instead, mask them all (& save their previous PQ state) * to get a stable state in the HW, then sync them to ensure that * any interrupt that had already fired hits its queue, and finally * scan all the queues to collect which interrupts are still present * in the queues, so we can set the "pending" flag on them and * they can be resent on restore. * * So we do it all when the "first" interrupt gets saved, all the * state is collected at that point, the rest of xive_get_source() * will merely collect and convert that state to the expected * userspace bit mask. */ if (xive->saved_src_count == 0) xive_pre_save_scan(xive); xive->saved_src_count++; /* Convert saved state into something compatible with xics */ val = state->act_server; prio = state->saved_scan_prio; if (prio == MASKED) { val |= KVM_XICS_MASKED; prio = state->saved_priority; } val |= prio << KVM_XICS_PRIORITY_SHIFT; if (state->lsi) { val |= KVM_XICS_LEVEL_SENSITIVE; if (state->saved_p) val |= KVM_XICS_PENDING; } else { if (state->saved_p) val |= KVM_XICS_PRESENTED; if (state->saved_q) val |= KVM_XICS_QUEUED; /* * We mark it pending (which will attempt a re-delivery) * if we are in a queue *or* we were masked and had * Q set which is equivalent to the XICS "masked pending" * state */ if (state->in_queue || (prio == MASKED && state->saved_q)) val |= KVM_XICS_PENDING; } /* * If that was the last interrupt saved, reset the * in_queue flags */ if (xive->saved_src_count == xive->src_count) xive_post_save_scan(xive); /* Copy the result to userspace */ if (put_user(val, ubufp)) return -EFAULT; return 0; } struct kvmppc_xive_src_block *kvmppc_xive_create_src_block( struct kvmppc_xive *xive, int irq) { struct kvmppc_xive_src_block *sb; int i, bid; bid = irq >> KVMPPC_XICS_ICS_SHIFT; mutex_lock(&xive->lock); /* block already exists - somebody else got here first */ if (xive->src_blocks[bid]) goto out; /* Create the ICS */ sb = kzalloc(sizeof(*sb), GFP_KERNEL); if (!sb) goto out; sb->id = bid; for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) { sb->irq_state[i].number = (bid << KVMPPC_XICS_ICS_SHIFT) | i; sb->irq_state[i].eisn = 0; sb->irq_state[i].guest_priority = MASKED; sb->irq_state[i].saved_priority = MASKED; sb->irq_state[i].act_priority = MASKED; } smp_wmb(); xive->src_blocks[bid] = sb; if (bid > xive->max_sbid) xive->max_sbid = bid; out: mutex_unlock(&xive->lock); return xive->src_blocks[bid]; } static bool xive_check_delayed_irq(struct kvmppc_xive *xive, u32 irq) { struct kvm *kvm = xive->kvm; struct kvm_vcpu *vcpu = NULL; unsigned long i; kvm_for_each_vcpu(i, vcpu, kvm) { struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; if (!xc) continue; if (xc->delayed_irq == irq) { xc->delayed_irq = 0; xive->delayed_irqs--; return true; } } return false; } static int xive_set_source(struct kvmppc_xive *xive, long irq, u64 addr) { struct kvmppc_xive_src_block *sb; struct kvmppc_xive_irq_state *state; u64 __user *ubufp = (u64 __user *) addr; u16 idx; u64 val; u8 act_prio, guest_prio; u32 server; int rc = 0; if (irq < KVMPPC_XICS_FIRST_IRQ || irq >= KVMPPC_XICS_NR_IRQS) return -ENOENT; pr_devel("set_source(irq=0x%lx)\n", irq); /* Find the source */ sb = kvmppc_xive_find_source(xive, irq, &idx); if (!sb) { pr_devel("No source, creating source block...\n"); sb = kvmppc_xive_create_src_block(xive, irq); if (!sb) { pr_devel("Failed to create block...\n"); return -ENOMEM; } } state = &sb->irq_state[idx]; /* Read user passed data */ if (get_user(val, ubufp)) { pr_devel("fault getting user info !\n"); return -EFAULT; } server = val & KVM_XICS_DESTINATION_MASK; guest_prio = val >> KVM_XICS_PRIORITY_SHIFT; pr_devel(" val=0x016%llx (server=0x%x, guest_prio=%d)\n", val, server, guest_prio); /* * If the source doesn't already have an IPI, allocate * one and get the corresponding data */ if (!state->ipi_number) { state->ipi_number = xive_native_alloc_irq(); if (state->ipi_number == 0) { pr_devel("Failed to allocate IPI !\n"); return -ENOMEM; } xive_native_populate_irq_data(state->ipi_number, &state->ipi_data); pr_devel(" src_ipi=0x%x\n", state->ipi_number); } /* * We use lock_and_mask() to set us in the right masked * state. We will override that state from the saved state * further down, but this will handle the cases of interrupts * that need FW masking. We set the initial guest_priority to * 0 before calling it to ensure it actually performs the masking. */ state->guest_priority = 0; xive_lock_and_mask(xive, sb, state); /* * Now, we select a target if we have one. If we don't we * leave the interrupt untargetted. It means that an interrupt * can become "untargetted" across migration if it was masked * by set_xive() but there is little we can do about it. */ /* First convert prio and mark interrupt as untargetted */ act_prio = xive_prio_from_guest(guest_prio); state->act_priority = MASKED; /* * We need to drop the lock due to the mutex below. Hopefully * nothing is touching that interrupt yet since it hasn't been * advertized to a running guest yet */ arch_spin_unlock(&sb->lock); /* If we have a priority target the interrupt */ if (act_prio != MASKED) { /* First, check provisioning of queues */ mutex_lock(&xive->lock); rc = xive_check_provisioning(xive->kvm, act_prio); mutex_unlock(&xive->lock); /* Target interrupt */ if (rc == 0) rc = xive_target_interrupt(xive->kvm, state, server, act_prio); /* * If provisioning or targetting failed, leave it * alone and masked. It will remain disabled until * the guest re-targets it. */ } /* * Find out if this was a delayed irq stashed in an ICP, * in which case, treat it as pending */ if (xive->delayed_irqs && xive_check_delayed_irq(xive, irq)) { val |= KVM_XICS_PENDING; pr_devel(" Found delayed ! forcing PENDING !\n"); } /* Cleanup the SW state */ state->old_p = false; state->old_q = false; state->lsi = false; state->asserted = false; /* Restore LSI state */ if (val & KVM_XICS_LEVEL_SENSITIVE) { state->lsi = true; if (val & KVM_XICS_PENDING) state->asserted = true; pr_devel(" LSI ! Asserted=%d\n", state->asserted); } /* * Restore P and Q. If the interrupt was pending, we * force Q and !P, which will trigger a resend. * * That means that a guest that had both an interrupt * pending (queued) and Q set will restore with only * one instance of that interrupt instead of 2, but that * is perfectly fine as coalescing interrupts that haven't * been presented yet is always allowed. */ if (val & KVM_XICS_PRESENTED && !(val & KVM_XICS_PENDING)) state->old_p = true; if (val & KVM_XICS_QUEUED || val & KVM_XICS_PENDING) state->old_q = true; pr_devel(" P=%d, Q=%d\n", state->old_p, state->old_q); /* * If the interrupt was unmasked, update guest priority and * perform the appropriate state transition and do a * re-trigger if necessary. */ if (val & KVM_XICS_MASKED) { pr_devel(" masked, saving prio\n"); state->guest_priority = MASKED; state->saved_priority = guest_prio; } else { pr_devel(" unmasked, restoring to prio %d\n", guest_prio); xive_finish_unmask(xive, sb, state, guest_prio); state->saved_priority = guest_prio; } /* Increment the number of valid sources and mark this one valid */ if (!state->valid) xive->src_count++; state->valid = true; return 0; } int kvmppc_xive_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level, bool line_status) { struct kvmppc_xive *xive = kvm->arch.xive; struct kvmppc_xive_src_block *sb; struct kvmppc_xive_irq_state *state; u16 idx; if (!xive) return -ENODEV; sb = kvmppc_xive_find_source(xive, irq, &idx); if (!sb) return -EINVAL; /* Perform locklessly .... (we need to do some RCUisms here...) */ state = &sb->irq_state[idx]; if (!state->valid) return -EINVAL; /* We don't allow a trigger on a passed-through interrupt */ if (state->pt_number) return -EINVAL; if ((level == 1 && state->lsi) || level == KVM_INTERRUPT_SET_LEVEL) state->asserted = true; else if (level == 0 || level == KVM_INTERRUPT_UNSET) { state->asserted = false; return 0; } /* Trigger the IPI */ xive_irq_trigger(&state->ipi_data); return 0; } int kvmppc_xive_set_nr_servers(struct kvmppc_xive *xive, u64 addr) { u32 __user *ubufp = (u32 __user *) addr; u32 nr_servers; int rc = 0; if (get_user(nr_servers, ubufp)) return -EFAULT; pr_devel("%s nr_servers=%u\n", __func__, nr_servers); if (!nr_servers || nr_servers > KVM_MAX_VCPU_IDS) return -EINVAL; mutex_lock(&xive->lock); if (xive->vp_base != XIVE_INVALID_VP) /* The VP block is allocated once and freed when the device * is released. Better not allow to change its size since its * used by connect_vcpu to validate vCPU ids are valid (eg, * setting it back to a higher value could allow connect_vcpu * to come up with a VP id that goes beyond the VP block, which * is likely to cause a crash in OPAL). */ rc = -EBUSY; else if (nr_servers > KVM_MAX_VCPUS) /* We don't need more servers. Higher vCPU ids get packed * down below KVM_MAX_VCPUS by kvmppc_pack_vcpu_id(). */ xive->nr_servers = KVM_MAX_VCPUS; else xive->nr_servers = nr_servers; mutex_unlock(&xive->lock); return rc; } static int xive_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr) { struct kvmppc_xive *xive = dev->private; /* We honor the existing XICS ioctl */ switch (attr->group) { case KVM_DEV_XICS_GRP_SOURCES: return xive_set_source(xive, attr->attr, attr->addr); case KVM_DEV_XICS_GRP_CTRL: switch (attr->attr) { case KVM_DEV_XICS_NR_SERVERS: return kvmppc_xive_set_nr_servers(xive, attr->addr); } } return -ENXIO; } static int xive_get_attr(struct kvm_device *dev, struct kvm_device_attr *attr) { struct kvmppc_xive *xive = dev->private; /* We honor the existing XICS ioctl */ switch (attr->group) { case KVM_DEV_XICS_GRP_SOURCES: return xive_get_source(xive, attr->attr, attr->addr); } return -ENXIO; } static int xive_has_attr(struct kvm_device *dev, struct kvm_device_attr *attr) { /* We honor the same limits as XICS, at least for now */ switch (attr->group) { case KVM_DEV_XICS_GRP_SOURCES: if (attr->attr >= KVMPPC_XICS_FIRST_IRQ && attr->attr < KVMPPC_XICS_NR_IRQS) return 0; break; case KVM_DEV_XICS_GRP_CTRL: switch (attr->attr) { case KVM_DEV_XICS_NR_SERVERS: return 0; } } return -ENXIO; } static void kvmppc_xive_cleanup_irq(u32 hw_num, struct xive_irq_data *xd) { xive_vm_esb_load(xd, XIVE_ESB_SET_PQ_01); xive_native_configure_irq(hw_num, 0, MASKED, 0); } void kvmppc_xive_free_sources(struct kvmppc_xive_src_block *sb) { int i; for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) { struct kvmppc_xive_irq_state *state = &sb->irq_state[i]; if (!state->valid) continue; kvmppc_xive_cleanup_irq(state->ipi_number, &state->ipi_data); xive_cleanup_irq_data(&state->ipi_data); xive_native_free_irq(state->ipi_number); /* Pass-through, cleanup too but keep IRQ hw data */ if (state->pt_number) kvmppc_xive_cleanup_irq(state->pt_number, state->pt_data); state->valid = false; } } /* * Called when device fd is closed. kvm->lock is held. */ static void kvmppc_xive_release(struct kvm_device *dev) { struct kvmppc_xive *xive = dev->private; struct kvm *kvm = xive->kvm; struct kvm_vcpu *vcpu; unsigned long i; pr_devel("Releasing xive device\n"); /* * Since this is the device release function, we know that * userspace does not have any open fd referring to the * device. Therefore there can not be any of the device * attribute set/get functions being executed concurrently, * and similarly, the connect_vcpu and set/clr_mapped * functions also cannot be being executed. */ debugfs_remove(xive->dentry); /* * We should clean up the vCPU interrupt presenters first. */ kvm_for_each_vcpu(i, vcpu, kvm) { /* * Take vcpu->mutex to ensure that no one_reg get/set ioctl * (i.e. kvmppc_xive_[gs]et_icp) can be done concurrently. * Holding the vcpu->mutex also means that the vcpu cannot * be executing the KVM_RUN ioctl, and therefore it cannot * be executing the XIVE push or pull code or accessing * the XIVE MMIO regions. */ mutex_lock(&vcpu->mutex); kvmppc_xive_cleanup_vcpu(vcpu); mutex_unlock(&vcpu->mutex); } /* * Now that we have cleared vcpu->arch.xive_vcpu, vcpu->arch.irq_type * and vcpu->arch.xive_esc_[vr]addr on each vcpu, we are safe * against xive code getting called during vcpu execution or * set/get one_reg operations. */ kvm->arch.xive = NULL; /* Mask and free interrupts */ for (i = 0; i <= xive->max_sbid; i++) { if (xive->src_blocks[i]) kvmppc_xive_free_sources(xive->src_blocks[i]); kfree(xive->src_blocks[i]); xive->src_blocks[i] = NULL; } if (xive->vp_base != XIVE_INVALID_VP) xive_native_free_vp_block(xive->vp_base); /* * A reference of the kvmppc_xive pointer is now kept under * the xive_devices struct of the machine for reuse. It is * freed when the VM is destroyed for now until we fix all the * execution paths. */ kfree(dev); } /* * When the guest chooses the interrupt mode (XICS legacy or XIVE * native), the VM will switch of KVM device. The previous device will * be "released" before the new one is created. * * Until we are sure all execution paths are well protected, provide a * fail safe (transitional) method for device destruction, in which * the XIVE device pointer is recycled and not directly freed. */ struct kvmppc_xive *kvmppc_xive_get_device(struct kvm *kvm, u32 type) { struct kvmppc_xive **kvm_xive_device = type == KVM_DEV_TYPE_XIVE ? &kvm->arch.xive_devices.native : &kvm->arch.xive_devices.xics_on_xive; struct kvmppc_xive *xive = *kvm_xive_device; if (!xive) { xive = kzalloc(sizeof(*xive), GFP_KERNEL); *kvm_xive_device = xive; } else { memset(xive, 0, sizeof(*xive)); } return xive; } /* * Create a XICS device with XIVE backend. kvm->lock is held. */ static int kvmppc_xive_create(struct kvm_device *dev, u32 type) { struct kvmppc_xive *xive; struct kvm *kvm = dev->kvm; pr_devel("Creating xive for partition\n"); /* Already there ? */ if (kvm->arch.xive) return -EEXIST; xive = kvmppc_xive_get_device(kvm, type); if (!xive) return -ENOMEM; dev->private = xive; xive->dev = dev; xive->kvm = kvm; mutex_init(&xive->lock); /* We use the default queue size set by the host */ xive->q_order = xive_native_default_eq_shift(); if (xive->q_order < PAGE_SHIFT) xive->q_page_order = 0; else xive->q_page_order = xive->q_order - PAGE_SHIFT; /* VP allocation is delayed to the first call to connect_vcpu */ xive->vp_base = XIVE_INVALID_VP; /* KVM_MAX_VCPUS limits the number of VMs to roughly 64 per sockets * on a POWER9 system. */ xive->nr_servers = KVM_MAX_VCPUS; if (xive_native_has_single_escalation()) xive->flags |= KVMPPC_XIVE_FLAG_SINGLE_ESCALATION; if (xive_native_has_save_restore()) xive->flags |= KVMPPC_XIVE_FLAG_SAVE_RESTORE; kvm->arch.xive = xive; return 0; } int kvmppc_xive_xics_hcall(struct kvm_vcpu *vcpu, u32 req) { struct kvmppc_vcore *vc = vcpu->arch.vcore; /* The VM should have configured XICS mode before doing XICS hcalls. */ if (!kvmppc_xics_enabled(vcpu)) return H_TOO_HARD; switch (req) { case H_XIRR: return xive_vm_h_xirr(vcpu); case H_CPPR: return xive_vm_h_cppr(vcpu, kvmppc_get_gpr(vcpu, 4)); case H_EOI: return xive_vm_h_eoi(vcpu, kvmppc_get_gpr(vcpu, 4)); case H_IPI: return xive_vm_h_ipi(vcpu, kvmppc_get_gpr(vcpu, 4), kvmppc_get_gpr(vcpu, 5)); case H_IPOLL: return xive_vm_h_ipoll(vcpu, kvmppc_get_gpr(vcpu, 4)); case H_XIRR_X: xive_vm_h_xirr(vcpu); kvmppc_set_gpr(vcpu, 5, get_tb() + vc->tb_offset); return H_SUCCESS; } return H_UNSUPPORTED; } EXPORT_SYMBOL_GPL(kvmppc_xive_xics_hcall); int kvmppc_xive_debug_show_queues(struct seq_file *m, struct kvm_vcpu *vcpu) { struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; unsigned int i; for (i = 0; i < KVMPPC_XIVE_Q_COUNT; i++) { struct xive_q *q = &xc->queues[i]; u32 i0, i1, idx; if (!q->qpage && !xc->esc_virq[i]) continue; if (q->qpage) { seq_printf(m, " q[%d]: ", i); idx = q->idx; i0 = be32_to_cpup(q->qpage + idx); idx = (idx + 1) & q->msk; i1 = be32_to_cpup(q->qpage + idx); seq_printf(m, "T=%d %08x %08x...\n", q->toggle, i0, i1); } if (xc->esc_virq[i]) { struct irq_data *d = irq_get_irq_data(xc->esc_virq[i]); struct xive_irq_data *xd = irq_data_get_irq_handler_data(d); u64 pq = xive_vm_esb_load(xd, XIVE_ESB_GET); seq_printf(m, " ESC %d %c%c EOI @%llx", xc->esc_virq[i], (pq & XIVE_ESB_VAL_P) ? 'P' : '-', (pq & XIVE_ESB_VAL_Q) ? 'Q' : '-', xd->eoi_page); seq_puts(m, "\n"); } } return 0; } void kvmppc_xive_debug_show_sources(struct seq_file *m, struct kvmppc_xive_src_block *sb) { int i; seq_puts(m, " LISN HW/CHIP TYPE PQ EISN CPU/PRIO\n"); for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) { struct kvmppc_xive_irq_state *state = &sb->irq_state[i]; struct xive_irq_data *xd; u64 pq; u32 hw_num; if (!state->valid) continue; kvmppc_xive_select_irq(state, &hw_num, &xd); pq = xive_vm_esb_load(xd, XIVE_ESB_GET); seq_printf(m, "%08x %08x/%02x", state->number, hw_num, xd->src_chip); if (state->lsi) seq_printf(m, " %cLSI", state->asserted ? '^' : ' '); else seq_puts(m, " MSI"); seq_printf(m, " %s %c%c %08x % 4d/%d", state->ipi_number == hw_num ? "IPI" : " PT", pq & XIVE_ESB_VAL_P ? 'P' : '-', pq & XIVE_ESB_VAL_Q ? 'Q' : '-', state->eisn, state->act_server, state->act_priority); seq_puts(m, "\n"); } } static int xive_debug_show(struct seq_file *m, void *private) { struct kvmppc_xive *xive = m->private; struct kvm *kvm = xive->kvm; struct kvm_vcpu *vcpu; u64 t_rm_h_xirr = 0; u64 t_rm_h_ipoll = 0; u64 t_rm_h_cppr = 0; u64 t_rm_h_eoi = 0; u64 t_rm_h_ipi = 0; u64 t_vm_h_xirr = 0; u64 t_vm_h_ipoll = 0; u64 t_vm_h_cppr = 0; u64 t_vm_h_eoi = 0; u64 t_vm_h_ipi = 0; unsigned long i; if (!kvm) return 0; seq_puts(m, "=========\nVCPU state\n=========\n"); kvm_for_each_vcpu(i, vcpu, kvm) { struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; if (!xc) continue; seq_printf(m, "VCPU %d: VP:%#x/%02x\n" " CPPR:%#x HWCPPR:%#x MFRR:%#x PEND:%#x h_xirr: R=%lld V=%lld\n", xc->server_num, xc->vp_id, xc->vp_chip_id, xc->cppr, xc->hw_cppr, xc->mfrr, xc->pending, xc->stat_rm_h_xirr, xc->stat_vm_h_xirr); kvmppc_xive_debug_show_queues(m, vcpu); t_rm_h_xirr += xc->stat_rm_h_xirr; t_rm_h_ipoll += xc->stat_rm_h_ipoll; t_rm_h_cppr += xc->stat_rm_h_cppr; t_rm_h_eoi += xc->stat_rm_h_eoi; t_rm_h_ipi += xc->stat_rm_h_ipi; t_vm_h_xirr += xc->stat_vm_h_xirr; t_vm_h_ipoll += xc->stat_vm_h_ipoll; t_vm_h_cppr += xc->stat_vm_h_cppr; t_vm_h_eoi += xc->stat_vm_h_eoi; t_vm_h_ipi += xc->stat_vm_h_ipi; } seq_puts(m, "Hcalls totals\n"); seq_printf(m, " H_XIRR R=%10lld V=%10lld\n", t_rm_h_xirr, t_vm_h_xirr); seq_printf(m, " H_IPOLL R=%10lld V=%10lld\n", t_rm_h_ipoll, t_vm_h_ipoll); seq_printf(m, " H_CPPR R=%10lld V=%10lld\n", t_rm_h_cppr, t_vm_h_cppr); seq_printf(m, " H_EOI R=%10lld V=%10lld\n", t_rm_h_eoi, t_vm_h_eoi); seq_printf(m, " H_IPI R=%10lld V=%10lld\n", t_rm_h_ipi, t_vm_h_ipi); seq_puts(m, "=========\nSources\n=========\n"); for (i = 0; i <= xive->max_sbid; i++) { struct kvmppc_xive_src_block *sb = xive->src_blocks[i]; if (sb) { arch_spin_lock(&sb->lock); kvmppc_xive_debug_show_sources(m, sb); arch_spin_unlock(&sb->lock); } } return 0; } DEFINE_SHOW_ATTRIBUTE(xive_debug); static void xive_debugfs_init(struct kvmppc_xive *xive) { xive->dentry = debugfs_create_file("xive", S_IRUGO, xive->kvm->debugfs_dentry, xive, &xive_debug_fops); pr_debug("%s: created\n", __func__); } static void kvmppc_xive_init(struct kvm_device *dev) { struct kvmppc_xive *xive = dev->private; /* Register some debug interfaces */ xive_debugfs_init(xive); } struct kvm_device_ops kvm_xive_ops = { .name = "kvm-xive", .create = kvmppc_xive_create, .init = kvmppc_xive_init, .release = kvmppc_xive_release, .set_attr = xive_set_attr, .get_attr = xive_get_attr, .has_attr = xive_has_attr, };
linux-master
arch/powerpc/kvm/book3s_xive.c
// SPDX-License-Identifier: GPL-2.0-only /* * * Copyright 2010 Paul Mackerras, IBM Corp. <[email protected]> * Copyright 2011 David Gibson, IBM Corporation <[email protected]> * Copyright 2016 Alexey Kardashevskiy, IBM Corporation <[email protected]> */ #include <linux/types.h> #include <linux/string.h> #include <linux/kvm.h> #include <linux/kvm_host.h> #include <linux/highmem.h> #include <linux/gfp.h> #include <linux/slab.h> #include <linux/sched/signal.h> #include <linux/hugetlb.h> #include <linux/list.h> #include <linux/anon_inodes.h> #include <linux/iommu.h> #include <linux/file.h> #include <linux/mm.h> #include <asm/kvm_ppc.h> #include <asm/kvm_book3s.h> #include <asm/book3s/64/mmu-hash.h> #include <asm/hvcall.h> #include <asm/synch.h> #include <asm/ppc-opcode.h> #include <asm/udbg.h> #include <asm/iommu.h> #include <asm/tce.h> #include <asm/mmu_context.h> static struct kvmppc_spapr_tce_table *kvmppc_find_table(struct kvm *kvm, unsigned long liobn) { struct kvmppc_spapr_tce_table *stt; list_for_each_entry_lockless(stt, &kvm->arch.spapr_tce_tables, list) if (stt->liobn == liobn) return stt; return NULL; } static unsigned long kvmppc_tce_pages(unsigned long iommu_pages) { return ALIGN(iommu_pages * sizeof(u64), PAGE_SIZE) / PAGE_SIZE; } static unsigned long kvmppc_stt_pages(unsigned long tce_pages) { unsigned long stt_bytes = sizeof(struct kvmppc_spapr_tce_table) + (tce_pages * sizeof(struct page *)); return tce_pages + ALIGN(stt_bytes, PAGE_SIZE) / PAGE_SIZE; } static void kvm_spapr_tce_iommu_table_free(struct rcu_head *head) { struct kvmppc_spapr_tce_iommu_table *stit = container_of(head, struct kvmppc_spapr_tce_iommu_table, rcu); iommu_tce_table_put(stit->tbl); kfree(stit); } static void kvm_spapr_tce_liobn_put(struct kref *kref) { struct kvmppc_spapr_tce_iommu_table *stit = container_of(kref, struct kvmppc_spapr_tce_iommu_table, kref); list_del_rcu(&stit->next); call_rcu(&stit->rcu, kvm_spapr_tce_iommu_table_free); } extern void kvm_spapr_tce_release_iommu_group(struct kvm *kvm, struct iommu_group *grp) { int i; struct kvmppc_spapr_tce_table *stt; struct kvmppc_spapr_tce_iommu_table *stit, *tmp; struct iommu_table_group *table_group = NULL; rcu_read_lock(); list_for_each_entry_rcu(stt, &kvm->arch.spapr_tce_tables, list) { table_group = iommu_group_get_iommudata(grp); if (WARN_ON(!table_group)) continue; list_for_each_entry_safe(stit, tmp, &stt->iommu_tables, next) { for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) { if (table_group->tables[i] != stit->tbl) continue; kref_put(&stit->kref, kvm_spapr_tce_liobn_put); } } cond_resched_rcu(); } rcu_read_unlock(); } extern long kvm_spapr_tce_attach_iommu_group(struct kvm *kvm, int tablefd, struct iommu_group *grp) { struct kvmppc_spapr_tce_table *stt = NULL; bool found = false; struct iommu_table *tbl = NULL; struct iommu_table_group *table_group; long i; struct kvmppc_spapr_tce_iommu_table *stit; struct fd f; f = fdget(tablefd); if (!f.file) return -EBADF; rcu_read_lock(); list_for_each_entry_rcu(stt, &kvm->arch.spapr_tce_tables, list) { if (stt == f.file->private_data) { found = true; break; } } rcu_read_unlock(); fdput(f); if (!found) return -EINVAL; table_group = iommu_group_get_iommudata(grp); if (WARN_ON(!table_group)) return -EFAULT; for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) { struct iommu_table *tbltmp = table_group->tables[i]; if (!tbltmp) continue; /* Make sure hardware table parameters are compatible */ if ((tbltmp->it_page_shift <= stt->page_shift) && (tbltmp->it_offset << tbltmp->it_page_shift == stt->offset << stt->page_shift) && (tbltmp->it_size << tbltmp->it_page_shift >= stt->size << stt->page_shift)) { /* * Reference the table to avoid races with * add/remove DMA windows. */ tbl = iommu_tce_table_get(tbltmp); break; } } if (!tbl) return -EINVAL; rcu_read_lock(); list_for_each_entry_rcu(stit, &stt->iommu_tables, next) { if (tbl != stit->tbl) continue; if (!kref_get_unless_zero(&stit->kref)) { /* stit is being destroyed */ iommu_tce_table_put(tbl); rcu_read_unlock(); return -ENOTTY; } /* * The table is already known to this KVM, we just increased * its KVM reference counter and can return. */ rcu_read_unlock(); return 0; } rcu_read_unlock(); stit = kzalloc(sizeof(*stit), GFP_KERNEL); if (!stit) { iommu_tce_table_put(tbl); return -ENOMEM; } stit->tbl = tbl; kref_init(&stit->kref); list_add_rcu(&stit->next, &stt->iommu_tables); return 0; } static void release_spapr_tce_table(struct rcu_head *head) { struct kvmppc_spapr_tce_table *stt = container_of(head, struct kvmppc_spapr_tce_table, rcu); unsigned long i, npages = kvmppc_tce_pages(stt->size); for (i = 0; i < npages; i++) if (stt->pages[i]) __free_page(stt->pages[i]); kfree(stt); } static struct page *kvm_spapr_get_tce_page(struct kvmppc_spapr_tce_table *stt, unsigned long sttpage) { struct page *page = stt->pages[sttpage]; if (page) return page; mutex_lock(&stt->alloc_lock); page = stt->pages[sttpage]; if (!page) { page = alloc_page(GFP_KERNEL | __GFP_ZERO); WARN_ON_ONCE(!page); if (page) stt->pages[sttpage] = page; } mutex_unlock(&stt->alloc_lock); return page; } static vm_fault_t kvm_spapr_tce_fault(struct vm_fault *vmf) { struct kvmppc_spapr_tce_table *stt = vmf->vma->vm_file->private_data; struct page *page; if (vmf->pgoff >= kvmppc_tce_pages(stt->size)) return VM_FAULT_SIGBUS; page = kvm_spapr_get_tce_page(stt, vmf->pgoff); if (!page) return VM_FAULT_OOM; get_page(page); vmf->page = page; return 0; } static const struct vm_operations_struct kvm_spapr_tce_vm_ops = { .fault = kvm_spapr_tce_fault, }; static int kvm_spapr_tce_mmap(struct file *file, struct vm_area_struct *vma) { vma->vm_ops = &kvm_spapr_tce_vm_ops; return 0; } static int kvm_spapr_tce_release(struct inode *inode, struct file *filp) { struct kvmppc_spapr_tce_table *stt = filp->private_data; struct kvmppc_spapr_tce_iommu_table *stit, *tmp; struct kvm *kvm = stt->kvm; mutex_lock(&kvm->lock); list_del_rcu(&stt->list); mutex_unlock(&kvm->lock); list_for_each_entry_safe(stit, tmp, &stt->iommu_tables, next) { WARN_ON(!kref_read(&stit->kref)); while (1) { if (kref_put(&stit->kref, kvm_spapr_tce_liobn_put)) break; } } account_locked_vm(kvm->mm, kvmppc_stt_pages(kvmppc_tce_pages(stt->size)), false); kvm_put_kvm(stt->kvm); call_rcu(&stt->rcu, release_spapr_tce_table); return 0; } static const struct file_operations kvm_spapr_tce_fops = { .mmap = kvm_spapr_tce_mmap, .release = kvm_spapr_tce_release, }; int kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm, struct kvm_create_spapr_tce_64 *args) { struct kvmppc_spapr_tce_table *stt = NULL; struct kvmppc_spapr_tce_table *siter; struct mm_struct *mm = kvm->mm; unsigned long npages; int ret; if (!args->size || args->page_shift < 12 || args->page_shift > 34 || (args->offset + args->size > (ULLONG_MAX >> args->page_shift))) return -EINVAL; npages = kvmppc_tce_pages(args->size); ret = account_locked_vm(mm, kvmppc_stt_pages(npages), true); if (ret) return ret; ret = -ENOMEM; stt = kzalloc(struct_size(stt, pages, npages), GFP_KERNEL | __GFP_NOWARN); if (!stt) goto fail_acct; stt->liobn = args->liobn; stt->page_shift = args->page_shift; stt->offset = args->offset; stt->size = args->size; stt->kvm = kvm; mutex_init(&stt->alloc_lock); INIT_LIST_HEAD_RCU(&stt->iommu_tables); mutex_lock(&kvm->lock); /* Check this LIOBN hasn't been previously allocated */ ret = 0; list_for_each_entry(siter, &kvm->arch.spapr_tce_tables, list) { if (siter->liobn == args->liobn) { ret = -EBUSY; break; } } kvm_get_kvm(kvm); if (!ret) ret = anon_inode_getfd("kvm-spapr-tce", &kvm_spapr_tce_fops, stt, O_RDWR | O_CLOEXEC); if (ret >= 0) list_add_rcu(&stt->list, &kvm->arch.spapr_tce_tables); else kvm_put_kvm_no_destroy(kvm); mutex_unlock(&kvm->lock); if (ret >= 0) return ret; kfree(stt); fail_acct: account_locked_vm(mm, kvmppc_stt_pages(npages), false); return ret; } static long kvmppc_tce_to_ua(struct kvm *kvm, unsigned long tce, unsigned long *ua) { unsigned long gfn = tce >> PAGE_SHIFT; struct kvm_memory_slot *memslot; memslot = __gfn_to_memslot(kvm_memslots(kvm), gfn); if (!memslot) return -EINVAL; *ua = __gfn_to_hva_memslot(memslot, gfn) | (tce & ~(PAGE_MASK | TCE_PCI_READ | TCE_PCI_WRITE)); return 0; } static long kvmppc_tce_validate(struct kvmppc_spapr_tce_table *stt, unsigned long tce) { unsigned long gpa = tce & ~(TCE_PCI_READ | TCE_PCI_WRITE); enum dma_data_direction dir = iommu_tce_direction(tce); struct kvmppc_spapr_tce_iommu_table *stit; unsigned long ua = 0; /* Allow userspace to poison TCE table */ if (dir == DMA_NONE) return H_SUCCESS; if (iommu_tce_check_gpa(stt->page_shift, gpa)) return H_TOO_HARD; if (kvmppc_tce_to_ua(stt->kvm, tce, &ua)) return H_TOO_HARD; rcu_read_lock(); list_for_each_entry_rcu(stit, &stt->iommu_tables, next) { unsigned long hpa = 0; struct mm_iommu_table_group_mem_t *mem; long shift = stit->tbl->it_page_shift; mem = mm_iommu_lookup(stt->kvm->mm, ua, 1ULL << shift); if (!mem || mm_iommu_ua_to_hpa(mem, ua, shift, &hpa)) { rcu_read_unlock(); return H_TOO_HARD; } } rcu_read_unlock(); return H_SUCCESS; } /* * Handles TCE requests for emulated devices. * Puts guest TCE values to the table and expects user space to convert them. * Cannot fail so kvmppc_tce_validate must be called before it. */ static void kvmppc_tce_put(struct kvmppc_spapr_tce_table *stt, unsigned long idx, unsigned long tce) { struct page *page; u64 *tbl; unsigned long sttpage; idx -= stt->offset; sttpage = idx / TCES_PER_PAGE; page = stt->pages[sttpage]; if (!page) { /* We allow any TCE, not just with read|write permissions */ if (!tce) return; page = kvm_spapr_get_tce_page(stt, sttpage); if (!page) return; } tbl = page_to_virt(page); tbl[idx % TCES_PER_PAGE] = tce; } static void kvmppc_clear_tce(struct mm_struct *mm, struct kvmppc_spapr_tce_table *stt, struct iommu_table *tbl, unsigned long entry) { unsigned long i; unsigned long subpages = 1ULL << (stt->page_shift - tbl->it_page_shift); unsigned long io_entry = entry << (stt->page_shift - tbl->it_page_shift); for (i = 0; i < subpages; ++i) { unsigned long hpa = 0; enum dma_data_direction dir = DMA_NONE; iommu_tce_xchg_no_kill(mm, tbl, io_entry + i, &hpa, &dir); } } static long kvmppc_tce_iommu_mapped_dec(struct kvm *kvm, struct iommu_table *tbl, unsigned long entry) { struct mm_iommu_table_group_mem_t *mem = NULL; const unsigned long pgsize = 1ULL << tbl->it_page_shift; __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY_RO(tbl, entry); if (!pua) return H_SUCCESS; mem = mm_iommu_lookup(kvm->mm, be64_to_cpu(*pua), pgsize); if (!mem) return H_TOO_HARD; mm_iommu_mapped_dec(mem); *pua = cpu_to_be64(0); return H_SUCCESS; } static long kvmppc_tce_iommu_do_unmap(struct kvm *kvm, struct iommu_table *tbl, unsigned long entry) { enum dma_data_direction dir = DMA_NONE; unsigned long hpa = 0; long ret; if (WARN_ON_ONCE(iommu_tce_xchg_no_kill(kvm->mm, tbl, entry, &hpa, &dir))) return H_TOO_HARD; if (dir == DMA_NONE) return H_SUCCESS; ret = kvmppc_tce_iommu_mapped_dec(kvm, tbl, entry); if (ret != H_SUCCESS) iommu_tce_xchg_no_kill(kvm->mm, tbl, entry, &hpa, &dir); return ret; } static long kvmppc_tce_iommu_unmap(struct kvm *kvm, struct kvmppc_spapr_tce_table *stt, struct iommu_table *tbl, unsigned long entry) { unsigned long i, ret = H_SUCCESS; unsigned long subpages = 1ULL << (stt->page_shift - tbl->it_page_shift); unsigned long io_entry = entry * subpages; for (i = 0; i < subpages; ++i) { ret = kvmppc_tce_iommu_do_unmap(kvm, tbl, io_entry + i); if (ret != H_SUCCESS) break; } iommu_tce_kill(tbl, io_entry, subpages); return ret; } static long kvmppc_tce_iommu_do_map(struct kvm *kvm, struct iommu_table *tbl, unsigned long entry, unsigned long ua, enum dma_data_direction dir) { long ret; unsigned long hpa; __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry); struct mm_iommu_table_group_mem_t *mem; if (!pua) /* it_userspace allocation might be delayed */ return H_TOO_HARD; mem = mm_iommu_lookup(kvm->mm, ua, 1ULL << tbl->it_page_shift); if (!mem) /* This only handles v2 IOMMU type, v1 is handled via ioctl() */ return H_TOO_HARD; if (WARN_ON_ONCE(mm_iommu_ua_to_hpa(mem, ua, tbl->it_page_shift, &hpa))) return H_TOO_HARD; if (mm_iommu_mapped_inc(mem)) return H_TOO_HARD; ret = iommu_tce_xchg_no_kill(kvm->mm, tbl, entry, &hpa, &dir); if (WARN_ON_ONCE(ret)) { mm_iommu_mapped_dec(mem); return H_TOO_HARD; } if (dir != DMA_NONE) kvmppc_tce_iommu_mapped_dec(kvm, tbl, entry); *pua = cpu_to_be64(ua); return 0; } static long kvmppc_tce_iommu_map(struct kvm *kvm, struct kvmppc_spapr_tce_table *stt, struct iommu_table *tbl, unsigned long entry, unsigned long ua, enum dma_data_direction dir) { unsigned long i, pgoff, ret = H_SUCCESS; unsigned long subpages = 1ULL << (stt->page_shift - tbl->it_page_shift); unsigned long io_entry = entry * subpages; for (i = 0, pgoff = 0; i < subpages; ++i, pgoff += IOMMU_PAGE_SIZE(tbl)) { ret = kvmppc_tce_iommu_do_map(kvm, tbl, io_entry + i, ua + pgoff, dir); if (ret != H_SUCCESS) break; } iommu_tce_kill(tbl, io_entry, subpages); return ret; } long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn, unsigned long ioba, unsigned long tce) { struct kvmppc_spapr_tce_table *stt; long ret, idx; struct kvmppc_spapr_tce_iommu_table *stit; unsigned long entry, ua = 0; enum dma_data_direction dir; /* udbg_printf("H_PUT_TCE(): liobn=0x%lx ioba=0x%lx, tce=0x%lx\n", */ /* liobn, ioba, tce); */ stt = kvmppc_find_table(vcpu->kvm, liobn); if (!stt) return H_TOO_HARD; ret = kvmppc_ioba_validate(stt, ioba, 1); if (ret != H_SUCCESS) return ret; idx = srcu_read_lock(&vcpu->kvm->srcu); ret = kvmppc_tce_validate(stt, tce); if (ret != H_SUCCESS) goto unlock_exit; dir = iommu_tce_direction(tce); if ((dir != DMA_NONE) && kvmppc_tce_to_ua(vcpu->kvm, tce, &ua)) { ret = H_PARAMETER; goto unlock_exit; } entry = ioba >> stt->page_shift; list_for_each_entry_lockless(stit, &stt->iommu_tables, next) { if (dir == DMA_NONE) ret = kvmppc_tce_iommu_unmap(vcpu->kvm, stt, stit->tbl, entry); else ret = kvmppc_tce_iommu_map(vcpu->kvm, stt, stit->tbl, entry, ua, dir); if (ret != H_SUCCESS) { kvmppc_clear_tce(vcpu->kvm->mm, stt, stit->tbl, entry); goto unlock_exit; } } kvmppc_tce_put(stt, entry, tce); unlock_exit: srcu_read_unlock(&vcpu->kvm->srcu, idx); return ret; } EXPORT_SYMBOL_GPL(kvmppc_h_put_tce); long kvmppc_h_put_tce_indirect(struct kvm_vcpu *vcpu, unsigned long liobn, unsigned long ioba, unsigned long tce_list, unsigned long npages) { struct kvmppc_spapr_tce_table *stt; long i, ret = H_SUCCESS, idx; unsigned long entry, ua = 0; u64 __user *tces; u64 tce; struct kvmppc_spapr_tce_iommu_table *stit; stt = kvmppc_find_table(vcpu->kvm, liobn); if (!stt) return H_TOO_HARD; entry = ioba >> stt->page_shift; /* * SPAPR spec says that the maximum size of the list is 512 TCEs * so the whole table fits in 4K page */ if (npages > 512) return H_PARAMETER; if (tce_list & (SZ_4K - 1)) return H_PARAMETER; ret = kvmppc_ioba_validate(stt, ioba, npages); if (ret != H_SUCCESS) return ret; idx = srcu_read_lock(&vcpu->kvm->srcu); if (kvmppc_tce_to_ua(vcpu->kvm, tce_list, &ua)) { ret = H_TOO_HARD; goto unlock_exit; } tces = (u64 __user *) ua; for (i = 0; i < npages; ++i) { if (get_user(tce, tces + i)) { ret = H_TOO_HARD; goto unlock_exit; } tce = be64_to_cpu(tce); ret = kvmppc_tce_validate(stt, tce); if (ret != H_SUCCESS) goto unlock_exit; } for (i = 0; i < npages; ++i) { /* * This looks unsafe, because we validate, then regrab * the TCE from userspace which could have been changed by * another thread. * * But it actually is safe, because the relevant checks will be * re-executed in the following code. If userspace tries to * change this dodgily it will result in a messier failure mode * but won't threaten the host. */ if (get_user(tce, tces + i)) { ret = H_TOO_HARD; goto unlock_exit; } tce = be64_to_cpu(tce); if (kvmppc_tce_to_ua(vcpu->kvm, tce, &ua)) { ret = H_PARAMETER; goto unlock_exit; } list_for_each_entry_lockless(stit, &stt->iommu_tables, next) { ret = kvmppc_tce_iommu_map(vcpu->kvm, stt, stit->tbl, entry + i, ua, iommu_tce_direction(tce)); if (ret != H_SUCCESS) { kvmppc_clear_tce(vcpu->kvm->mm, stt, stit->tbl, entry + i); goto unlock_exit; } } kvmppc_tce_put(stt, entry + i, tce); } unlock_exit: srcu_read_unlock(&vcpu->kvm->srcu, idx); return ret; } EXPORT_SYMBOL_GPL(kvmppc_h_put_tce_indirect); long kvmppc_h_stuff_tce(struct kvm_vcpu *vcpu, unsigned long liobn, unsigned long ioba, unsigned long tce_value, unsigned long npages) { struct kvmppc_spapr_tce_table *stt; long i, ret; struct kvmppc_spapr_tce_iommu_table *stit; stt = kvmppc_find_table(vcpu->kvm, liobn); if (!stt) return H_TOO_HARD; ret = kvmppc_ioba_validate(stt, ioba, npages); if (ret != H_SUCCESS) return ret; /* Check permission bits only to allow userspace poison TCE for debug */ if (tce_value & (TCE_PCI_WRITE | TCE_PCI_READ)) return H_PARAMETER; list_for_each_entry_lockless(stit, &stt->iommu_tables, next) { unsigned long entry = ioba >> stt->page_shift; for (i = 0; i < npages; ++i) { ret = kvmppc_tce_iommu_unmap(vcpu->kvm, stt, stit->tbl, entry + i); if (ret == H_SUCCESS) continue; if (ret == H_TOO_HARD) return ret; WARN_ON_ONCE(1); kvmppc_clear_tce(vcpu->kvm->mm, stt, stit->tbl, entry + i); } } for (i = 0; i < npages; ++i, ioba += (1ULL << stt->page_shift)) kvmppc_tce_put(stt, ioba >> stt->page_shift, tce_value); return ret; } EXPORT_SYMBOL_GPL(kvmppc_h_stuff_tce); long kvmppc_h_get_tce(struct kvm_vcpu *vcpu, unsigned long liobn, unsigned long ioba) { struct kvmppc_spapr_tce_table *stt; long ret; unsigned long idx; struct page *page; u64 *tbl; stt = kvmppc_find_table(vcpu->kvm, liobn); if (!stt) return H_TOO_HARD; ret = kvmppc_ioba_validate(stt, ioba, 1); if (ret != H_SUCCESS) return ret; idx = (ioba >> stt->page_shift) - stt->offset; page = stt->pages[idx / TCES_PER_PAGE]; if (!page) { vcpu->arch.regs.gpr[4] = 0; return H_SUCCESS; } tbl = (u64 *)page_address(page); vcpu->arch.regs.gpr[4] = tbl[idx % TCES_PER_PAGE]; return H_SUCCESS; } EXPORT_SYMBOL_GPL(kvmppc_h_get_tce);
linux-master
arch/powerpc/kvm/book3s_64_vio.c
// SPDX-License-Identifier: GPL-2.0-only #include <asm/kvm_ppc.h> #include <asm/pmc.h> #include "book3s_hv.h" static void freeze_pmu(unsigned long mmcr0, unsigned long mmcra) { if (!(mmcr0 & MMCR0_FC)) goto do_freeze; if (mmcra & MMCRA_SAMPLE_ENABLE) goto do_freeze; if (cpu_has_feature(CPU_FTR_ARCH_31)) { if (!(mmcr0 & MMCR0_PMCCEXT)) goto do_freeze; if (!(mmcra & MMCRA_BHRB_DISABLE)) goto do_freeze; } return; do_freeze: mmcr0 = MMCR0_FC; mmcra = 0; if (cpu_has_feature(CPU_FTR_ARCH_31)) { mmcr0 |= MMCR0_PMCCEXT; mmcra = MMCRA_BHRB_DISABLE; } mtspr(SPRN_MMCR0, mmcr0); mtspr(SPRN_MMCRA, mmcra); isync(); } void switch_pmu_to_guest(struct kvm_vcpu *vcpu, struct p9_host_os_sprs *host_os_sprs) { struct lppaca *lp; int load_pmu = 1; lp = vcpu->arch.vpa.pinned_addr; if (lp) load_pmu = lp->pmcregs_in_use; /* Save host */ if (ppc_get_pmu_inuse()) { /* POWER9, POWER10 do not implement HPMC or SPMC */ host_os_sprs->mmcr0 = mfspr(SPRN_MMCR0); host_os_sprs->mmcra = mfspr(SPRN_MMCRA); freeze_pmu(host_os_sprs->mmcr0, host_os_sprs->mmcra); host_os_sprs->pmc1 = mfspr(SPRN_PMC1); host_os_sprs->pmc2 = mfspr(SPRN_PMC2); host_os_sprs->pmc3 = mfspr(SPRN_PMC3); host_os_sprs->pmc4 = mfspr(SPRN_PMC4); host_os_sprs->pmc5 = mfspr(SPRN_PMC5); host_os_sprs->pmc6 = mfspr(SPRN_PMC6); host_os_sprs->mmcr1 = mfspr(SPRN_MMCR1); host_os_sprs->mmcr2 = mfspr(SPRN_MMCR2); host_os_sprs->sdar = mfspr(SPRN_SDAR); host_os_sprs->siar = mfspr(SPRN_SIAR); host_os_sprs->sier1 = mfspr(SPRN_SIER); if (cpu_has_feature(CPU_FTR_ARCH_31)) { host_os_sprs->mmcr3 = mfspr(SPRN_MMCR3); host_os_sprs->sier2 = mfspr(SPRN_SIER2); host_os_sprs->sier3 = mfspr(SPRN_SIER3); } } #ifdef CONFIG_PPC_PSERIES /* After saving PMU, before loading guest PMU, flip pmcregs_in_use */ if (kvmhv_on_pseries()) { barrier(); get_lppaca()->pmcregs_in_use = load_pmu; barrier(); } #endif /* * Load guest. If the VPA said the PMCs are not in use but the guest * tried to access them anyway, HFSCR[PM] will be set by the HFAC * fault so we can make forward progress. */ if (load_pmu || (vcpu->arch.hfscr & HFSCR_PM)) { mtspr(SPRN_PMC1, vcpu->arch.pmc[0]); mtspr(SPRN_PMC2, vcpu->arch.pmc[1]); mtspr(SPRN_PMC3, vcpu->arch.pmc[2]); mtspr(SPRN_PMC4, vcpu->arch.pmc[3]); mtspr(SPRN_PMC5, vcpu->arch.pmc[4]); mtspr(SPRN_PMC6, vcpu->arch.pmc[5]); mtspr(SPRN_MMCR1, vcpu->arch.mmcr[1]); mtspr(SPRN_MMCR2, vcpu->arch.mmcr[2]); mtspr(SPRN_SDAR, vcpu->arch.sdar); mtspr(SPRN_SIAR, vcpu->arch.siar); mtspr(SPRN_SIER, vcpu->arch.sier[0]); if (cpu_has_feature(CPU_FTR_ARCH_31)) { mtspr(SPRN_MMCR3, vcpu->arch.mmcr[3]); mtspr(SPRN_SIER2, vcpu->arch.sier[1]); mtspr(SPRN_SIER3, vcpu->arch.sier[2]); } /* Set MMCRA then MMCR0 last */ mtspr(SPRN_MMCRA, vcpu->arch.mmcra); mtspr(SPRN_MMCR0, vcpu->arch.mmcr[0]); /* No isync necessary because we're starting counters */ if (!vcpu->arch.nested && (vcpu->arch.hfscr_permitted & HFSCR_PM)) vcpu->arch.hfscr |= HFSCR_PM; } } EXPORT_SYMBOL_GPL(switch_pmu_to_guest); void switch_pmu_to_host(struct kvm_vcpu *vcpu, struct p9_host_os_sprs *host_os_sprs) { struct lppaca *lp; int save_pmu = 1; lp = vcpu->arch.vpa.pinned_addr; if (lp) save_pmu = lp->pmcregs_in_use; if (IS_ENABLED(CONFIG_KVM_BOOK3S_HV_NESTED_PMU_WORKAROUND)) { /* * Save pmu if this guest is capable of running nested guests. * This is option is for old L1s that do not set their * lppaca->pmcregs_in_use properly when entering their L2. */ save_pmu |= nesting_enabled(vcpu->kvm); } if (save_pmu) { vcpu->arch.mmcr[0] = mfspr(SPRN_MMCR0); vcpu->arch.mmcra = mfspr(SPRN_MMCRA); freeze_pmu(vcpu->arch.mmcr[0], vcpu->arch.mmcra); vcpu->arch.pmc[0] = mfspr(SPRN_PMC1); vcpu->arch.pmc[1] = mfspr(SPRN_PMC2); vcpu->arch.pmc[2] = mfspr(SPRN_PMC3); vcpu->arch.pmc[3] = mfspr(SPRN_PMC4); vcpu->arch.pmc[4] = mfspr(SPRN_PMC5); vcpu->arch.pmc[5] = mfspr(SPRN_PMC6); vcpu->arch.mmcr[1] = mfspr(SPRN_MMCR1); vcpu->arch.mmcr[2] = mfspr(SPRN_MMCR2); vcpu->arch.sdar = mfspr(SPRN_SDAR); vcpu->arch.siar = mfspr(SPRN_SIAR); vcpu->arch.sier[0] = mfspr(SPRN_SIER); if (cpu_has_feature(CPU_FTR_ARCH_31)) { vcpu->arch.mmcr[3] = mfspr(SPRN_MMCR3); vcpu->arch.sier[1] = mfspr(SPRN_SIER2); vcpu->arch.sier[2] = mfspr(SPRN_SIER3); } } else if (vcpu->arch.hfscr & HFSCR_PM) { /* * The guest accessed PMC SPRs without specifying they should * be preserved, or it cleared pmcregs_in_use after the last * access. Just ensure they are frozen. */ freeze_pmu(mfspr(SPRN_MMCR0), mfspr(SPRN_MMCRA)); /* * Demand-fault PMU register access in the guest. * * This is used to grab the guest's VPA pmcregs_in_use value * and reflect it into the host's VPA in the case of a nested * hypervisor. * * It also avoids having to zero-out SPRs after each guest * exit to avoid side-channels when. * * This is cleared here when we exit the guest, so later HFSCR * interrupt handling can add it back to run the guest with * PM enabled next time. */ if (!vcpu->arch.nested) vcpu->arch.hfscr &= ~HFSCR_PM; } /* otherwise the PMU should still be frozen */ #ifdef CONFIG_PPC_PSERIES if (kvmhv_on_pseries()) { barrier(); get_lppaca()->pmcregs_in_use = ppc_get_pmu_inuse(); barrier(); } #endif if (ppc_get_pmu_inuse()) { mtspr(SPRN_PMC1, host_os_sprs->pmc1); mtspr(SPRN_PMC2, host_os_sprs->pmc2); mtspr(SPRN_PMC3, host_os_sprs->pmc3); mtspr(SPRN_PMC4, host_os_sprs->pmc4); mtspr(SPRN_PMC5, host_os_sprs->pmc5); mtspr(SPRN_PMC6, host_os_sprs->pmc6); mtspr(SPRN_MMCR1, host_os_sprs->mmcr1); mtspr(SPRN_MMCR2, host_os_sprs->mmcr2); mtspr(SPRN_SDAR, host_os_sprs->sdar); mtspr(SPRN_SIAR, host_os_sprs->siar); mtspr(SPRN_SIER, host_os_sprs->sier1); if (cpu_has_feature(CPU_FTR_ARCH_31)) { mtspr(SPRN_MMCR3, host_os_sprs->mmcr3); mtspr(SPRN_SIER2, host_os_sprs->sier2); mtspr(SPRN_SIER3, host_os_sprs->sier3); } /* Set MMCRA then MMCR0 last */ mtspr(SPRN_MMCRA, host_os_sprs->mmcra); mtspr(SPRN_MMCR0, host_os_sprs->mmcr0); isync(); } } EXPORT_SYMBOL_GPL(switch_pmu_to_host);
linux-master
arch/powerpc/kvm/book3s_hv_p9_perf.c
// SPDX-License-Identifier: GPL-2.0 /* * Secure pages management: Migration of pages between normal and secure * memory of KVM guests. * * Copyright 2018 Bharata B Rao, IBM Corp. <[email protected]> */ /* * A pseries guest can be run as secure guest on Ultravisor-enabled * POWER platforms. On such platforms, this driver will be used to manage * the movement of guest pages between the normal memory managed by * hypervisor (HV) and secure memory managed by Ultravisor (UV). * * The page-in or page-out requests from UV will come to HV as hcalls and * HV will call back into UV via ultracalls to satisfy these page requests. * * Private ZONE_DEVICE memory equal to the amount of secure memory * available in the platform for running secure guests is hotplugged. * Whenever a page belonging to the guest becomes secure, a page from this * private device memory is used to represent and track that secure page * on the HV side. Some pages (like virtio buffers, VPA pages etc) are * shared between UV and HV. However such pages aren't represented by * device private memory and mappings to shared memory exist in both * UV and HV page tables. */ /* * Notes on locking * * kvm->arch.uvmem_lock is a per-guest lock that prevents concurrent * page-in and page-out requests for the same GPA. Concurrent accesses * can either come via UV (guest vCPUs requesting for same page) * or when HV and guest simultaneously access the same page. * This mutex serializes the migration of page from HV(normal) to * UV(secure) and vice versa. So the serialization points are around * migrate_vma routines and page-in/out routines. * * Per-guest mutex comes with a cost though. Mainly it serializes the * fault path as page-out can occur when HV faults on accessing secure * guest pages. Currently UV issues page-in requests for all the guest * PFNs one at a time during early boot (UV_ESM uvcall), so this is * not a cause for concern. Also currently the number of page-outs caused * by HV touching secure pages is very very low. If an when UV supports * overcommitting, then we might see concurrent guest driven page-outs. * * Locking order * * 1. kvm->srcu - Protects KVM memslots * 2. kvm->mm->mmap_lock - find_vma, migrate_vma_pages and helpers, ksm_madvise * 3. kvm->arch.uvmem_lock - protects read/writes to uvmem slots thus acting * as sync-points for page-in/out */ /* * Notes on page size * * Currently UV uses 2MB mappings internally, but will issue H_SVM_PAGE_IN * and H_SVM_PAGE_OUT hcalls in PAGE_SIZE(64K) granularity. HV tracks * secure GPAs at 64K page size and maintains one device PFN for each * 64K secure GPA. UV_PAGE_IN and UV_PAGE_OUT calls by HV are also issued * for 64K page at a time. * * HV faulting on secure pages: When HV touches any secure page, it * faults and issues a UV_PAGE_OUT request with 64K page size. Currently * UV splits and remaps the 2MB page if necessary and copies out the * required 64K page contents. * * Shared pages: Whenever guest shares a secure page, UV will split and * remap the 2MB page if required and issue H_SVM_PAGE_IN with 64K page size. * * HV invalidating a page: When a regular page belonging to secure * guest gets unmapped, HV informs UV with UV_PAGE_INVAL of 64K * page size. Using 64K page size is correct here because any non-secure * page will essentially be of 64K page size. Splitting by UV during sharing * and page-out ensures this. * * Page fault handling: When HV handles page fault of a page belonging * to secure guest, it sends that to UV with a 64K UV_PAGE_IN request. * Using 64K size is correct here too as UV would have split the 2MB page * into 64k mappings and would have done page-outs earlier. * * In summary, the current secure pages handling code in HV assumes * 64K page size and in fact fails any page-in/page-out requests of * non-64K size upfront. If and when UV starts supporting multiple * page-sizes, we need to break this assumption. */ #include <linux/pagemap.h> #include <linux/migrate.h> #include <linux/kvm_host.h> #include <linux/ksm.h> #include <linux/of.h> #include <linux/memremap.h> #include <asm/ultravisor.h> #include <asm/mman.h> #include <asm/kvm_ppc.h> #include <asm/kvm_book3s_uvmem.h> static struct dev_pagemap kvmppc_uvmem_pgmap; static unsigned long *kvmppc_uvmem_bitmap; static DEFINE_SPINLOCK(kvmppc_uvmem_bitmap_lock); /* * States of a GFN * --------------- * The GFN can be in one of the following states. * * (a) Secure - The GFN is secure. The GFN is associated with * a Secure VM, the contents of the GFN is not accessible * to the Hypervisor. This GFN can be backed by a secure-PFN, * or can be backed by a normal-PFN with contents encrypted. * The former is true when the GFN is paged-in into the * ultravisor. The latter is true when the GFN is paged-out * of the ultravisor. * * (b) Shared - The GFN is shared. The GFN is associated with a * a secure VM. The contents of the GFN is accessible to * Hypervisor. This GFN is backed by a normal-PFN and its * content is un-encrypted. * * (c) Normal - The GFN is a normal. The GFN is associated with * a normal VM. The contents of the GFN is accessible to * the Hypervisor. Its content is never encrypted. * * States of a VM. * --------------- * * Normal VM: A VM whose contents are always accessible to * the hypervisor. All its GFNs are normal-GFNs. * * Secure VM: A VM whose contents are not accessible to the * hypervisor without the VM's consent. Its GFNs are * either Shared-GFN or Secure-GFNs. * * Transient VM: A Normal VM that is transitioning to secure VM. * The transition starts on successful return of * H_SVM_INIT_START, and ends on successful return * of H_SVM_INIT_DONE. This transient VM, can have GFNs * in any of the three states; i.e Secure-GFN, Shared-GFN, * and Normal-GFN. The VM never executes in this state * in supervisor-mode. * * Memory slot State. * ----------------------------- * The state of a memory slot mirrors the state of the * VM the memory slot is associated with. * * VM State transition. * -------------------- * * A VM always starts in Normal Mode. * * H_SVM_INIT_START moves the VM into transient state. During this * time the Ultravisor may request some of its GFNs to be shared or * secured. So its GFNs can be in one of the three GFN states. * * H_SVM_INIT_DONE moves the VM entirely from transient state to * secure-state. At this point any left-over normal-GFNs are * transitioned to Secure-GFN. * * H_SVM_INIT_ABORT moves the transient VM back to normal VM. * All its GFNs are moved to Normal-GFNs. * * UV_TERMINATE transitions the secure-VM back to normal-VM. All * the secure-GFN and shared-GFNs are tranistioned to normal-GFN * Note: The contents of the normal-GFN is undefined at this point. * * GFN state implementation: * ------------------------- * * Secure GFN is associated with a secure-PFN; also called uvmem_pfn, * when the GFN is paged-in. Its pfn[] has KVMPPC_GFN_UVMEM_PFN flag * set, and contains the value of the secure-PFN. * It is associated with a normal-PFN; also called mem_pfn, when * the GFN is pagedout. Its pfn[] has KVMPPC_GFN_MEM_PFN flag set. * The value of the normal-PFN is not tracked. * * Shared GFN is associated with a normal-PFN. Its pfn[] has * KVMPPC_UVMEM_SHARED_PFN flag set. The value of the normal-PFN * is not tracked. * * Normal GFN is associated with normal-PFN. Its pfn[] has * no flag set. The value of the normal-PFN is not tracked. * * Life cycle of a GFN * -------------------- * * -------------------------------------------------------------- * | | Share | Unshare | SVM |H_SVM_INIT_DONE| * | |operation |operation | abort/ | | * | | | | terminate | | * ------------------------------------------------------------- * | | | | | | * | Secure | Shared | Secure |Normal |Secure | * | | | | | | * | Shared | Shared | Secure |Normal |Shared | * | | | | | | * | Normal | Shared | Secure |Normal |Secure | * -------------------------------------------------------------- * * Life cycle of a VM * -------------------- * * -------------------------------------------------------------------- * | | start | H_SVM_ |H_SVM_ |H_SVM_ |UV_SVM_ | * | | VM |INIT_START|INIT_DONE|INIT_ABORT |TERMINATE | * | | | | | | | * --------- ---------------------------------------------------------- * | | | | | | | * | Normal | Normal | Transient|Error |Error |Normal | * | | | | | | | * | Secure | Error | Error |Error |Error |Normal | * | | | | | | | * |Transient| N/A | Error |Secure |Normal |Normal | * -------------------------------------------------------------------- */ #define KVMPPC_GFN_UVMEM_PFN (1UL << 63) #define KVMPPC_GFN_MEM_PFN (1UL << 62) #define KVMPPC_GFN_SHARED (1UL << 61) #define KVMPPC_GFN_SECURE (KVMPPC_GFN_UVMEM_PFN | KVMPPC_GFN_MEM_PFN) #define KVMPPC_GFN_FLAG_MASK (KVMPPC_GFN_SECURE | KVMPPC_GFN_SHARED) #define KVMPPC_GFN_PFN_MASK (~KVMPPC_GFN_FLAG_MASK) struct kvmppc_uvmem_slot { struct list_head list; unsigned long nr_pfns; unsigned long base_pfn; unsigned long *pfns; }; struct kvmppc_uvmem_page_pvt { struct kvm *kvm; unsigned long gpa; bool skip_page_out; bool remove_gfn; }; bool kvmppc_uvmem_available(void) { /* * If kvmppc_uvmem_bitmap != NULL, then there is an ultravisor * and our data structures have been initialized successfully. */ return !!kvmppc_uvmem_bitmap; } int kvmppc_uvmem_slot_init(struct kvm *kvm, const struct kvm_memory_slot *slot) { struct kvmppc_uvmem_slot *p; p = kzalloc(sizeof(*p), GFP_KERNEL); if (!p) return -ENOMEM; p->pfns = vcalloc(slot->npages, sizeof(*p->pfns)); if (!p->pfns) { kfree(p); return -ENOMEM; } p->nr_pfns = slot->npages; p->base_pfn = slot->base_gfn; mutex_lock(&kvm->arch.uvmem_lock); list_add(&p->list, &kvm->arch.uvmem_pfns); mutex_unlock(&kvm->arch.uvmem_lock); return 0; } /* * All device PFNs are already released by the time we come here. */ void kvmppc_uvmem_slot_free(struct kvm *kvm, const struct kvm_memory_slot *slot) { struct kvmppc_uvmem_slot *p, *next; mutex_lock(&kvm->arch.uvmem_lock); list_for_each_entry_safe(p, next, &kvm->arch.uvmem_pfns, list) { if (p->base_pfn == slot->base_gfn) { vfree(p->pfns); list_del(&p->list); kfree(p); break; } } mutex_unlock(&kvm->arch.uvmem_lock); } static void kvmppc_mark_gfn(unsigned long gfn, struct kvm *kvm, unsigned long flag, unsigned long uvmem_pfn) { struct kvmppc_uvmem_slot *p; list_for_each_entry(p, &kvm->arch.uvmem_pfns, list) { if (gfn >= p->base_pfn && gfn < p->base_pfn + p->nr_pfns) { unsigned long index = gfn - p->base_pfn; if (flag == KVMPPC_GFN_UVMEM_PFN) p->pfns[index] = uvmem_pfn | flag; else p->pfns[index] = flag; return; } } } /* mark the GFN as secure-GFN associated with @uvmem pfn device-PFN. */ static void kvmppc_gfn_secure_uvmem_pfn(unsigned long gfn, unsigned long uvmem_pfn, struct kvm *kvm) { kvmppc_mark_gfn(gfn, kvm, KVMPPC_GFN_UVMEM_PFN, uvmem_pfn); } /* mark the GFN as secure-GFN associated with a memory-PFN. */ static void kvmppc_gfn_secure_mem_pfn(unsigned long gfn, struct kvm *kvm) { kvmppc_mark_gfn(gfn, kvm, KVMPPC_GFN_MEM_PFN, 0); } /* mark the GFN as a shared GFN. */ static void kvmppc_gfn_shared(unsigned long gfn, struct kvm *kvm) { kvmppc_mark_gfn(gfn, kvm, KVMPPC_GFN_SHARED, 0); } /* mark the GFN as a non-existent GFN. */ static void kvmppc_gfn_remove(unsigned long gfn, struct kvm *kvm) { kvmppc_mark_gfn(gfn, kvm, 0, 0); } /* return true, if the GFN is a secure-GFN backed by a secure-PFN */ static bool kvmppc_gfn_is_uvmem_pfn(unsigned long gfn, struct kvm *kvm, unsigned long *uvmem_pfn) { struct kvmppc_uvmem_slot *p; list_for_each_entry(p, &kvm->arch.uvmem_pfns, list) { if (gfn >= p->base_pfn && gfn < p->base_pfn + p->nr_pfns) { unsigned long index = gfn - p->base_pfn; if (p->pfns[index] & KVMPPC_GFN_UVMEM_PFN) { if (uvmem_pfn) *uvmem_pfn = p->pfns[index] & KVMPPC_GFN_PFN_MASK; return true; } else return false; } } return false; } /* * starting from *gfn search for the next available GFN that is not yet * transitioned to a secure GFN. return the value of that GFN in *gfn. If a * GFN is found, return true, else return false * * Must be called with kvm->arch.uvmem_lock held. */ static bool kvmppc_next_nontransitioned_gfn(const struct kvm_memory_slot *memslot, struct kvm *kvm, unsigned long *gfn) { struct kvmppc_uvmem_slot *p = NULL, *iter; bool ret = false; unsigned long i; list_for_each_entry(iter, &kvm->arch.uvmem_pfns, list) if (*gfn >= iter->base_pfn && *gfn < iter->base_pfn + iter->nr_pfns) { p = iter; break; } if (!p) return ret; /* * The code below assumes, one to one correspondence between * kvmppc_uvmem_slot and memslot. */ for (i = *gfn; i < p->base_pfn + p->nr_pfns; i++) { unsigned long index = i - p->base_pfn; if (!(p->pfns[index] & KVMPPC_GFN_FLAG_MASK)) { *gfn = i; ret = true; break; } } return ret; } static int kvmppc_memslot_page_merge(struct kvm *kvm, const struct kvm_memory_slot *memslot, bool merge) { unsigned long gfn = memslot->base_gfn; unsigned long end, start = gfn_to_hva(kvm, gfn); unsigned long vm_flags; int ret = 0; struct vm_area_struct *vma; int merge_flag = (merge) ? MADV_MERGEABLE : MADV_UNMERGEABLE; if (kvm_is_error_hva(start)) return H_STATE; end = start + (memslot->npages << PAGE_SHIFT); mmap_write_lock(kvm->mm); do { vma = find_vma_intersection(kvm->mm, start, end); if (!vma) { ret = H_STATE; break; } vma_start_write(vma); /* Copy vm_flags to avoid partial modifications in ksm_madvise */ vm_flags = vma->vm_flags; ret = ksm_madvise(vma, vma->vm_start, vma->vm_end, merge_flag, &vm_flags); if (ret) { ret = H_STATE; break; } vm_flags_reset(vma, vm_flags); start = vma->vm_end; } while (end > vma->vm_end); mmap_write_unlock(kvm->mm); return ret; } static void __kvmppc_uvmem_memslot_delete(struct kvm *kvm, const struct kvm_memory_slot *memslot) { uv_unregister_mem_slot(kvm->arch.lpid, memslot->id); kvmppc_uvmem_slot_free(kvm, memslot); kvmppc_memslot_page_merge(kvm, memslot, true); } static int __kvmppc_uvmem_memslot_create(struct kvm *kvm, const struct kvm_memory_slot *memslot) { int ret = H_PARAMETER; if (kvmppc_memslot_page_merge(kvm, memslot, false)) return ret; if (kvmppc_uvmem_slot_init(kvm, memslot)) goto out1; ret = uv_register_mem_slot(kvm->arch.lpid, memslot->base_gfn << PAGE_SHIFT, memslot->npages * PAGE_SIZE, 0, memslot->id); if (ret < 0) { ret = H_PARAMETER; goto out; } return 0; out: kvmppc_uvmem_slot_free(kvm, memslot); out1: kvmppc_memslot_page_merge(kvm, memslot, true); return ret; } unsigned long kvmppc_h_svm_init_start(struct kvm *kvm) { struct kvm_memslots *slots; struct kvm_memory_slot *memslot, *m; int ret = H_SUCCESS; int srcu_idx, bkt; kvm->arch.secure_guest = KVMPPC_SECURE_INIT_START; if (!kvmppc_uvmem_bitmap) return H_UNSUPPORTED; /* Only radix guests can be secure guests */ if (!kvm_is_radix(kvm)) return H_UNSUPPORTED; /* NAK the transition to secure if not enabled */ if (!kvm->arch.svm_enabled) return H_AUTHORITY; srcu_idx = srcu_read_lock(&kvm->srcu); /* register the memslot */ slots = kvm_memslots(kvm); kvm_for_each_memslot(memslot, bkt, slots) { ret = __kvmppc_uvmem_memslot_create(kvm, memslot); if (ret) break; } if (ret) { slots = kvm_memslots(kvm); kvm_for_each_memslot(m, bkt, slots) { if (m == memslot) break; __kvmppc_uvmem_memslot_delete(kvm, memslot); } } srcu_read_unlock(&kvm->srcu, srcu_idx); return ret; } /* * Provision a new page on HV side and copy over the contents * from secure memory using UV_PAGE_OUT uvcall. * Caller must held kvm->arch.uvmem_lock. */ static int __kvmppc_svm_page_out(struct vm_area_struct *vma, unsigned long start, unsigned long end, unsigned long page_shift, struct kvm *kvm, unsigned long gpa, struct page *fault_page) { unsigned long src_pfn, dst_pfn = 0; struct migrate_vma mig = { 0 }; struct page *dpage, *spage; struct kvmppc_uvmem_page_pvt *pvt; unsigned long pfn; int ret = U_SUCCESS; memset(&mig, 0, sizeof(mig)); mig.vma = vma; mig.start = start; mig.end = end; mig.src = &src_pfn; mig.dst = &dst_pfn; mig.pgmap_owner = &kvmppc_uvmem_pgmap; mig.flags = MIGRATE_VMA_SELECT_DEVICE_PRIVATE; mig.fault_page = fault_page; /* The requested page is already paged-out, nothing to do */ if (!kvmppc_gfn_is_uvmem_pfn(gpa >> page_shift, kvm, NULL)) return ret; ret = migrate_vma_setup(&mig); if (ret) return -1; spage = migrate_pfn_to_page(*mig.src); if (!spage || !(*mig.src & MIGRATE_PFN_MIGRATE)) goto out_finalize; if (!is_zone_device_page(spage)) goto out_finalize; dpage = alloc_page_vma(GFP_HIGHUSER, vma, start); if (!dpage) { ret = -1; goto out_finalize; } lock_page(dpage); pvt = spage->zone_device_data; pfn = page_to_pfn(dpage); /* * This function is used in two cases: * - When HV touches a secure page, for which we do UV_PAGE_OUT * - When a secure page is converted to shared page, we *get* * the page to essentially unmap the device page. In this * case we skip page-out. */ if (!pvt->skip_page_out) ret = uv_page_out(kvm->arch.lpid, pfn << page_shift, gpa, 0, page_shift); if (ret == U_SUCCESS) *mig.dst = migrate_pfn(pfn); else { unlock_page(dpage); __free_page(dpage); goto out_finalize; } migrate_vma_pages(&mig); out_finalize: migrate_vma_finalize(&mig); return ret; } static inline int kvmppc_svm_page_out(struct vm_area_struct *vma, unsigned long start, unsigned long end, unsigned long page_shift, struct kvm *kvm, unsigned long gpa, struct page *fault_page) { int ret; mutex_lock(&kvm->arch.uvmem_lock); ret = __kvmppc_svm_page_out(vma, start, end, page_shift, kvm, gpa, fault_page); mutex_unlock(&kvm->arch.uvmem_lock); return ret; } /* * Drop device pages that we maintain for the secure guest * * We first mark the pages to be skipped from UV_PAGE_OUT when there * is HV side fault on these pages. Next we *get* these pages, forcing * fault on them, do fault time migration to replace the device PTEs in * QEMU page table with normal PTEs from newly allocated pages. */ void kvmppc_uvmem_drop_pages(const struct kvm_memory_slot *slot, struct kvm *kvm, bool skip_page_out) { int i; struct kvmppc_uvmem_page_pvt *pvt; struct page *uvmem_page; struct vm_area_struct *vma = NULL; unsigned long uvmem_pfn, gfn; unsigned long addr; mmap_read_lock(kvm->mm); addr = slot->userspace_addr; gfn = slot->base_gfn; for (i = slot->npages; i; --i, ++gfn, addr += PAGE_SIZE) { /* Fetch the VMA if addr is not in the latest fetched one */ if (!vma || addr >= vma->vm_end) { vma = vma_lookup(kvm->mm, addr); if (!vma) { pr_err("Can't find VMA for gfn:0x%lx\n", gfn); break; } } mutex_lock(&kvm->arch.uvmem_lock); if (kvmppc_gfn_is_uvmem_pfn(gfn, kvm, &uvmem_pfn)) { uvmem_page = pfn_to_page(uvmem_pfn); pvt = uvmem_page->zone_device_data; pvt->skip_page_out = skip_page_out; pvt->remove_gfn = true; if (__kvmppc_svm_page_out(vma, addr, addr + PAGE_SIZE, PAGE_SHIFT, kvm, pvt->gpa, NULL)) pr_err("Can't page out gpa:0x%lx addr:0x%lx\n", pvt->gpa, addr); } else { /* Remove the shared flag if any */ kvmppc_gfn_remove(gfn, kvm); } mutex_unlock(&kvm->arch.uvmem_lock); } mmap_read_unlock(kvm->mm); } unsigned long kvmppc_h_svm_init_abort(struct kvm *kvm) { int srcu_idx, bkt; struct kvm_memory_slot *memslot; /* * Expect to be called only after INIT_START and before INIT_DONE. * If INIT_DONE was completed, use normal VM termination sequence. */ if (!(kvm->arch.secure_guest & KVMPPC_SECURE_INIT_START)) return H_UNSUPPORTED; if (kvm->arch.secure_guest & KVMPPC_SECURE_INIT_DONE) return H_STATE; srcu_idx = srcu_read_lock(&kvm->srcu); kvm_for_each_memslot(memslot, bkt, kvm_memslots(kvm)) kvmppc_uvmem_drop_pages(memslot, kvm, false); srcu_read_unlock(&kvm->srcu, srcu_idx); kvm->arch.secure_guest = 0; uv_svm_terminate(kvm->arch.lpid); return H_PARAMETER; } /* * Get a free device PFN from the pool * * Called when a normal page is moved to secure memory (UV_PAGE_IN). Device * PFN will be used to keep track of the secure page on HV side. * * Called with kvm->arch.uvmem_lock held */ static struct page *kvmppc_uvmem_get_page(unsigned long gpa, struct kvm *kvm) { struct page *dpage = NULL; unsigned long bit, uvmem_pfn; struct kvmppc_uvmem_page_pvt *pvt; unsigned long pfn_last, pfn_first; pfn_first = kvmppc_uvmem_pgmap.range.start >> PAGE_SHIFT; pfn_last = pfn_first + (range_len(&kvmppc_uvmem_pgmap.range) >> PAGE_SHIFT); spin_lock(&kvmppc_uvmem_bitmap_lock); bit = find_first_zero_bit(kvmppc_uvmem_bitmap, pfn_last - pfn_first); if (bit >= (pfn_last - pfn_first)) goto out; bitmap_set(kvmppc_uvmem_bitmap, bit, 1); spin_unlock(&kvmppc_uvmem_bitmap_lock); pvt = kzalloc(sizeof(*pvt), GFP_KERNEL); if (!pvt) goto out_clear; uvmem_pfn = bit + pfn_first; kvmppc_gfn_secure_uvmem_pfn(gpa >> PAGE_SHIFT, uvmem_pfn, kvm); pvt->gpa = gpa; pvt->kvm = kvm; dpage = pfn_to_page(uvmem_pfn); dpage->zone_device_data = pvt; zone_device_page_init(dpage); return dpage; out_clear: spin_lock(&kvmppc_uvmem_bitmap_lock); bitmap_clear(kvmppc_uvmem_bitmap, bit, 1); out: spin_unlock(&kvmppc_uvmem_bitmap_lock); return NULL; } /* * Alloc a PFN from private device memory pool. If @pagein is true, * copy page from normal memory to secure memory using UV_PAGE_IN uvcall. */ static int kvmppc_svm_page_in(struct vm_area_struct *vma, unsigned long start, unsigned long end, unsigned long gpa, struct kvm *kvm, unsigned long page_shift, bool pagein) { unsigned long src_pfn, dst_pfn = 0; struct migrate_vma mig = { 0 }; struct page *spage; unsigned long pfn; struct page *dpage; int ret = 0; memset(&mig, 0, sizeof(mig)); mig.vma = vma; mig.start = start; mig.end = end; mig.src = &src_pfn; mig.dst = &dst_pfn; mig.flags = MIGRATE_VMA_SELECT_SYSTEM; ret = migrate_vma_setup(&mig); if (ret) return ret; if (!(*mig.src & MIGRATE_PFN_MIGRATE)) { ret = -1; goto out_finalize; } dpage = kvmppc_uvmem_get_page(gpa, kvm); if (!dpage) { ret = -1; goto out_finalize; } if (pagein) { pfn = *mig.src >> MIGRATE_PFN_SHIFT; spage = migrate_pfn_to_page(*mig.src); if (spage) { ret = uv_page_in(kvm->arch.lpid, pfn << page_shift, gpa, 0, page_shift); if (ret) goto out_finalize; } } *mig.dst = migrate_pfn(page_to_pfn(dpage)); migrate_vma_pages(&mig); out_finalize: migrate_vma_finalize(&mig); return ret; } static int kvmppc_uv_migrate_mem_slot(struct kvm *kvm, const struct kvm_memory_slot *memslot) { unsigned long gfn = memslot->base_gfn; struct vm_area_struct *vma; unsigned long start, end; int ret = 0; mmap_read_lock(kvm->mm); mutex_lock(&kvm->arch.uvmem_lock); while (kvmppc_next_nontransitioned_gfn(memslot, kvm, &gfn)) { ret = H_STATE; start = gfn_to_hva(kvm, gfn); if (kvm_is_error_hva(start)) break; end = start + (1UL << PAGE_SHIFT); vma = find_vma_intersection(kvm->mm, start, end); if (!vma || vma->vm_start > start || vma->vm_end < end) break; ret = kvmppc_svm_page_in(vma, start, end, (gfn << PAGE_SHIFT), kvm, PAGE_SHIFT, false); if (ret) { ret = H_STATE; break; } /* relinquish the cpu if needed */ cond_resched(); } mutex_unlock(&kvm->arch.uvmem_lock); mmap_read_unlock(kvm->mm); return ret; } unsigned long kvmppc_h_svm_init_done(struct kvm *kvm) { struct kvm_memslots *slots; struct kvm_memory_slot *memslot; int srcu_idx, bkt; long ret = H_SUCCESS; if (!(kvm->arch.secure_guest & KVMPPC_SECURE_INIT_START)) return H_UNSUPPORTED; /* migrate any unmoved normal pfn to device pfns*/ srcu_idx = srcu_read_lock(&kvm->srcu); slots = kvm_memslots(kvm); kvm_for_each_memslot(memslot, bkt, slots) { ret = kvmppc_uv_migrate_mem_slot(kvm, memslot); if (ret) { /* * The pages will remain transitioned. * Its the callers responsibility to * terminate the VM, which will undo * all state of the VM. Till then * this VM is in a erroneous state. * Its KVMPPC_SECURE_INIT_DONE will * remain unset. */ ret = H_STATE; goto out; } } kvm->arch.secure_guest |= KVMPPC_SECURE_INIT_DONE; pr_info("LPID %d went secure\n", kvm->arch.lpid); out: srcu_read_unlock(&kvm->srcu, srcu_idx); return ret; } /* * Shares the page with HV, thus making it a normal page. * * - If the page is already secure, then provision a new page and share * - If the page is a normal page, share the existing page * * In the former case, uses dev_pagemap_ops.migrate_to_ram handler * to unmap the device page from QEMU's page tables. */ static unsigned long kvmppc_share_page(struct kvm *kvm, unsigned long gpa, unsigned long page_shift) { int ret = H_PARAMETER; struct page *uvmem_page; struct kvmppc_uvmem_page_pvt *pvt; unsigned long pfn; unsigned long gfn = gpa >> page_shift; int srcu_idx; unsigned long uvmem_pfn; srcu_idx = srcu_read_lock(&kvm->srcu); mutex_lock(&kvm->arch.uvmem_lock); if (kvmppc_gfn_is_uvmem_pfn(gfn, kvm, &uvmem_pfn)) { uvmem_page = pfn_to_page(uvmem_pfn); pvt = uvmem_page->zone_device_data; pvt->skip_page_out = true; /* * do not drop the GFN. It is a valid GFN * that is transitioned to a shared GFN. */ pvt->remove_gfn = false; } retry: mutex_unlock(&kvm->arch.uvmem_lock); pfn = gfn_to_pfn(kvm, gfn); if (is_error_noslot_pfn(pfn)) goto out; mutex_lock(&kvm->arch.uvmem_lock); if (kvmppc_gfn_is_uvmem_pfn(gfn, kvm, &uvmem_pfn)) { uvmem_page = pfn_to_page(uvmem_pfn); pvt = uvmem_page->zone_device_data; pvt->skip_page_out = true; pvt->remove_gfn = false; /* it continues to be a valid GFN */ kvm_release_pfn_clean(pfn); goto retry; } if (!uv_page_in(kvm->arch.lpid, pfn << page_shift, gpa, 0, page_shift)) { kvmppc_gfn_shared(gfn, kvm); ret = H_SUCCESS; } kvm_release_pfn_clean(pfn); mutex_unlock(&kvm->arch.uvmem_lock); out: srcu_read_unlock(&kvm->srcu, srcu_idx); return ret; } /* * H_SVM_PAGE_IN: Move page from normal memory to secure memory. * * H_PAGE_IN_SHARED flag makes the page shared which means that the same * memory in is visible from both UV and HV. */ unsigned long kvmppc_h_svm_page_in(struct kvm *kvm, unsigned long gpa, unsigned long flags, unsigned long page_shift) { unsigned long start, end; struct vm_area_struct *vma; int srcu_idx; unsigned long gfn = gpa >> page_shift; int ret; if (!(kvm->arch.secure_guest & KVMPPC_SECURE_INIT_START)) return H_UNSUPPORTED; if (page_shift != PAGE_SHIFT) return H_P3; if (flags & ~H_PAGE_IN_SHARED) return H_P2; if (flags & H_PAGE_IN_SHARED) return kvmppc_share_page(kvm, gpa, page_shift); ret = H_PARAMETER; srcu_idx = srcu_read_lock(&kvm->srcu); mmap_read_lock(kvm->mm); start = gfn_to_hva(kvm, gfn); if (kvm_is_error_hva(start)) goto out; mutex_lock(&kvm->arch.uvmem_lock); /* Fail the page-in request of an already paged-in page */ if (kvmppc_gfn_is_uvmem_pfn(gfn, kvm, NULL)) goto out_unlock; end = start + (1UL << page_shift); vma = find_vma_intersection(kvm->mm, start, end); if (!vma || vma->vm_start > start || vma->vm_end < end) goto out_unlock; if (kvmppc_svm_page_in(vma, start, end, gpa, kvm, page_shift, true)) goto out_unlock; ret = H_SUCCESS; out_unlock: mutex_unlock(&kvm->arch.uvmem_lock); out: mmap_read_unlock(kvm->mm); srcu_read_unlock(&kvm->srcu, srcu_idx); return ret; } /* * Fault handler callback that gets called when HV touches any page that * has been moved to secure memory, we ask UV to give back the page by * issuing UV_PAGE_OUT uvcall. * * This eventually results in dropping of device PFN and the newly * provisioned page/PFN gets populated in QEMU page tables. */ static vm_fault_t kvmppc_uvmem_migrate_to_ram(struct vm_fault *vmf) { struct kvmppc_uvmem_page_pvt *pvt = vmf->page->zone_device_data; if (kvmppc_svm_page_out(vmf->vma, vmf->address, vmf->address + PAGE_SIZE, PAGE_SHIFT, pvt->kvm, pvt->gpa, vmf->page)) return VM_FAULT_SIGBUS; else return 0; } /* * Release the device PFN back to the pool * * Gets called when secure GFN tranistions from a secure-PFN * to a normal PFN during H_SVM_PAGE_OUT. * Gets called with kvm->arch.uvmem_lock held. */ static void kvmppc_uvmem_page_free(struct page *page) { unsigned long pfn = page_to_pfn(page) - (kvmppc_uvmem_pgmap.range.start >> PAGE_SHIFT); struct kvmppc_uvmem_page_pvt *pvt; spin_lock(&kvmppc_uvmem_bitmap_lock); bitmap_clear(kvmppc_uvmem_bitmap, pfn, 1); spin_unlock(&kvmppc_uvmem_bitmap_lock); pvt = page->zone_device_data; page->zone_device_data = NULL; if (pvt->remove_gfn) kvmppc_gfn_remove(pvt->gpa >> PAGE_SHIFT, pvt->kvm); else kvmppc_gfn_secure_mem_pfn(pvt->gpa >> PAGE_SHIFT, pvt->kvm); kfree(pvt); } static const struct dev_pagemap_ops kvmppc_uvmem_ops = { .page_free = kvmppc_uvmem_page_free, .migrate_to_ram = kvmppc_uvmem_migrate_to_ram, }; /* * H_SVM_PAGE_OUT: Move page from secure memory to normal memory. */ unsigned long kvmppc_h_svm_page_out(struct kvm *kvm, unsigned long gpa, unsigned long flags, unsigned long page_shift) { unsigned long gfn = gpa >> page_shift; unsigned long start, end; struct vm_area_struct *vma; int srcu_idx; int ret; if (!(kvm->arch.secure_guest & KVMPPC_SECURE_INIT_START)) return H_UNSUPPORTED; if (page_shift != PAGE_SHIFT) return H_P3; if (flags) return H_P2; ret = H_PARAMETER; srcu_idx = srcu_read_lock(&kvm->srcu); mmap_read_lock(kvm->mm); start = gfn_to_hva(kvm, gfn); if (kvm_is_error_hva(start)) goto out; end = start + (1UL << page_shift); vma = find_vma_intersection(kvm->mm, start, end); if (!vma || vma->vm_start > start || vma->vm_end < end) goto out; if (!kvmppc_svm_page_out(vma, start, end, page_shift, kvm, gpa, NULL)) ret = H_SUCCESS; out: mmap_read_unlock(kvm->mm); srcu_read_unlock(&kvm->srcu, srcu_idx); return ret; } int kvmppc_send_page_to_uv(struct kvm *kvm, unsigned long gfn) { unsigned long pfn; int ret = U_SUCCESS; pfn = gfn_to_pfn(kvm, gfn); if (is_error_noslot_pfn(pfn)) return -EFAULT; mutex_lock(&kvm->arch.uvmem_lock); if (kvmppc_gfn_is_uvmem_pfn(gfn, kvm, NULL)) goto out; ret = uv_page_in(kvm->arch.lpid, pfn << PAGE_SHIFT, gfn << PAGE_SHIFT, 0, PAGE_SHIFT); out: kvm_release_pfn_clean(pfn); mutex_unlock(&kvm->arch.uvmem_lock); return (ret == U_SUCCESS) ? RESUME_GUEST : -EFAULT; } int kvmppc_uvmem_memslot_create(struct kvm *kvm, const struct kvm_memory_slot *new) { int ret = __kvmppc_uvmem_memslot_create(kvm, new); if (!ret) ret = kvmppc_uv_migrate_mem_slot(kvm, new); return ret; } void kvmppc_uvmem_memslot_delete(struct kvm *kvm, const struct kvm_memory_slot *old) { __kvmppc_uvmem_memslot_delete(kvm, old); } static u64 kvmppc_get_secmem_size(void) { struct device_node *np; int i, len; const __be32 *prop; u64 size = 0; /* * First try the new ibm,secure-memory nodes which supersede the * secure-memory-ranges property. * If we found some, no need to read the deprecated ones. */ for_each_compatible_node(np, NULL, "ibm,secure-memory") { prop = of_get_property(np, "reg", &len); if (!prop) continue; size += of_read_number(prop + 2, 2); } if (size) return size; np = of_find_compatible_node(NULL, NULL, "ibm,uv-firmware"); if (!np) goto out; prop = of_get_property(np, "secure-memory-ranges", &len); if (!prop) goto out_put; for (i = 0; i < len / (sizeof(*prop) * 4); i++) size += of_read_number(prop + (i * 4) + 2, 2); out_put: of_node_put(np); out: return size; } int kvmppc_uvmem_init(void) { int ret = 0; unsigned long size; struct resource *res; void *addr; unsigned long pfn_last, pfn_first; size = kvmppc_get_secmem_size(); if (!size) { /* * Don't fail the initialization of kvm-hv module if * the platform doesn't export ibm,uv-firmware node. * Let normal guests run on such PEF-disabled platform. */ pr_info("KVMPPC-UVMEM: No support for secure guests\n"); goto out; } res = request_free_mem_region(&iomem_resource, size, "kvmppc_uvmem"); if (IS_ERR(res)) { ret = PTR_ERR(res); goto out; } kvmppc_uvmem_pgmap.type = MEMORY_DEVICE_PRIVATE; kvmppc_uvmem_pgmap.range.start = res->start; kvmppc_uvmem_pgmap.range.end = res->end; kvmppc_uvmem_pgmap.nr_range = 1; kvmppc_uvmem_pgmap.ops = &kvmppc_uvmem_ops; /* just one global instance: */ kvmppc_uvmem_pgmap.owner = &kvmppc_uvmem_pgmap; addr = memremap_pages(&kvmppc_uvmem_pgmap, NUMA_NO_NODE); if (IS_ERR(addr)) { ret = PTR_ERR(addr); goto out_free_region; } pfn_first = res->start >> PAGE_SHIFT; pfn_last = pfn_first + (resource_size(res) >> PAGE_SHIFT); kvmppc_uvmem_bitmap = bitmap_zalloc(pfn_last - pfn_first, GFP_KERNEL); if (!kvmppc_uvmem_bitmap) { ret = -ENOMEM; goto out_unmap; } pr_info("KVMPPC-UVMEM: Secure Memory size 0x%lx\n", size); return ret; out_unmap: memunmap_pages(&kvmppc_uvmem_pgmap); out_free_region: release_mem_region(res->start, size); out: return ret; } void kvmppc_uvmem_free(void) { if (!kvmppc_uvmem_bitmap) return; memunmap_pages(&kvmppc_uvmem_pgmap); release_mem_region(kvmppc_uvmem_pgmap.range.start, range_len(&kvmppc_uvmem_pgmap.range)); bitmap_free(kvmppc_uvmem_bitmap); }
linux-master
arch/powerpc/kvm/book3s_hv_uvmem.c
// SPDX-License-Identifier: GPL-2.0-only #include <linux/kernel.h> #include <linux/kvm_host.h> #include <asm/asm-prototypes.h> #include <asm/dbell.h> #include <asm/ppc-opcode.h> #include "book3s_hv.h" static void load_spr_state(struct kvm_vcpu *vcpu, struct p9_host_os_sprs *host_os_sprs) { /* TAR is very fast */ mtspr(SPRN_TAR, vcpu->arch.tar); #ifdef CONFIG_ALTIVEC if (cpu_has_feature(CPU_FTR_ALTIVEC) && current->thread.vrsave != vcpu->arch.vrsave) mtspr(SPRN_VRSAVE, vcpu->arch.vrsave); #endif if (vcpu->arch.hfscr & HFSCR_EBB) { if (current->thread.ebbhr != vcpu->arch.ebbhr) mtspr(SPRN_EBBHR, vcpu->arch.ebbhr); if (current->thread.ebbrr != vcpu->arch.ebbrr) mtspr(SPRN_EBBRR, vcpu->arch.ebbrr); if (current->thread.bescr != vcpu->arch.bescr) mtspr(SPRN_BESCR, vcpu->arch.bescr); } if (cpu_has_feature(CPU_FTR_P9_TIDR) && current->thread.tidr != vcpu->arch.tid) mtspr(SPRN_TIDR, vcpu->arch.tid); if (host_os_sprs->iamr != vcpu->arch.iamr) mtspr(SPRN_IAMR, vcpu->arch.iamr); if (host_os_sprs->amr != vcpu->arch.amr) mtspr(SPRN_AMR, vcpu->arch.amr); if (vcpu->arch.uamor != 0) mtspr(SPRN_UAMOR, vcpu->arch.uamor); if (current->thread.fscr != vcpu->arch.fscr) mtspr(SPRN_FSCR, vcpu->arch.fscr); if (current->thread.dscr != vcpu->arch.dscr) mtspr(SPRN_DSCR, vcpu->arch.dscr); if (vcpu->arch.pspb != 0) mtspr(SPRN_PSPB, vcpu->arch.pspb); /* * DAR, DSISR, and for nested HV, SPRGs must be set with MSR[RI] * clear (or hstate set appropriately to catch those registers * being clobbered if we take a MCE or SRESET), so those are done * later. */ if (!(vcpu->arch.ctrl & 1)) mtspr(SPRN_CTRLT, 0); } static void store_spr_state(struct kvm_vcpu *vcpu) { vcpu->arch.tar = mfspr(SPRN_TAR); #ifdef CONFIG_ALTIVEC if (cpu_has_feature(CPU_FTR_ALTIVEC)) vcpu->arch.vrsave = mfspr(SPRN_VRSAVE); #endif if (vcpu->arch.hfscr & HFSCR_EBB) { vcpu->arch.ebbhr = mfspr(SPRN_EBBHR); vcpu->arch.ebbrr = mfspr(SPRN_EBBRR); vcpu->arch.bescr = mfspr(SPRN_BESCR); } if (cpu_has_feature(CPU_FTR_P9_TIDR)) vcpu->arch.tid = mfspr(SPRN_TIDR); vcpu->arch.iamr = mfspr(SPRN_IAMR); vcpu->arch.amr = mfspr(SPRN_AMR); vcpu->arch.uamor = mfspr(SPRN_UAMOR); vcpu->arch.fscr = mfspr(SPRN_FSCR); vcpu->arch.dscr = mfspr(SPRN_DSCR); vcpu->arch.pspb = mfspr(SPRN_PSPB); vcpu->arch.ctrl = mfspr(SPRN_CTRLF); } /* Returns true if current MSR and/or guest MSR may have changed */ bool load_vcpu_state(struct kvm_vcpu *vcpu, struct p9_host_os_sprs *host_os_sprs) { bool ret = false; #ifdef CONFIG_PPC_TRANSACTIONAL_MEM if (cpu_has_feature(CPU_FTR_TM) || cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST)) { unsigned long guest_msr = vcpu->arch.shregs.msr; if (MSR_TM_ACTIVE(guest_msr)) { kvmppc_restore_tm_hv(vcpu, guest_msr, true); ret = true; } else if (vcpu->arch.hfscr & HFSCR_TM) { mtspr(SPRN_TEXASR, vcpu->arch.texasr); mtspr(SPRN_TFHAR, vcpu->arch.tfhar); mtspr(SPRN_TFIAR, vcpu->arch.tfiar); } } #endif load_spr_state(vcpu, host_os_sprs); load_fp_state(&vcpu->arch.fp); #ifdef CONFIG_ALTIVEC load_vr_state(&vcpu->arch.vr); #endif return ret; } EXPORT_SYMBOL_GPL(load_vcpu_state); void store_vcpu_state(struct kvm_vcpu *vcpu) { store_spr_state(vcpu); store_fp_state(&vcpu->arch.fp); #ifdef CONFIG_ALTIVEC store_vr_state(&vcpu->arch.vr); #endif #ifdef CONFIG_PPC_TRANSACTIONAL_MEM if (cpu_has_feature(CPU_FTR_TM) || cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST)) { unsigned long guest_msr = vcpu->arch.shregs.msr; if (MSR_TM_ACTIVE(guest_msr)) { kvmppc_save_tm_hv(vcpu, guest_msr, true); } else if (vcpu->arch.hfscr & HFSCR_TM) { vcpu->arch.texasr = mfspr(SPRN_TEXASR); vcpu->arch.tfhar = mfspr(SPRN_TFHAR); vcpu->arch.tfiar = mfspr(SPRN_TFIAR); if (!vcpu->arch.nested) { vcpu->arch.load_tm++; /* see load_ebb comment */ if (!vcpu->arch.load_tm) vcpu->arch.hfscr &= ~HFSCR_TM; } } } #endif } EXPORT_SYMBOL_GPL(store_vcpu_state); void save_p9_host_os_sprs(struct p9_host_os_sprs *host_os_sprs) { host_os_sprs->iamr = mfspr(SPRN_IAMR); host_os_sprs->amr = mfspr(SPRN_AMR); } EXPORT_SYMBOL_GPL(save_p9_host_os_sprs); /* vcpu guest regs must already be saved */ void restore_p9_host_os_sprs(struct kvm_vcpu *vcpu, struct p9_host_os_sprs *host_os_sprs) { /* * current->thread.xxx registers must all be restored to host * values before a potential context switch, otherwise the context * switch itself will overwrite current->thread.xxx with the values * from the guest SPRs. */ mtspr(SPRN_SPRG_VDSO_WRITE, local_paca->sprg_vdso); if (cpu_has_feature(CPU_FTR_P9_TIDR) && current->thread.tidr != vcpu->arch.tid) mtspr(SPRN_TIDR, current->thread.tidr); if (host_os_sprs->iamr != vcpu->arch.iamr) mtspr(SPRN_IAMR, host_os_sprs->iamr); if (vcpu->arch.uamor != 0) mtspr(SPRN_UAMOR, 0); if (host_os_sprs->amr != vcpu->arch.amr) mtspr(SPRN_AMR, host_os_sprs->amr); if (current->thread.fscr != vcpu->arch.fscr) mtspr(SPRN_FSCR, current->thread.fscr); if (current->thread.dscr != vcpu->arch.dscr) mtspr(SPRN_DSCR, current->thread.dscr); if (vcpu->arch.pspb != 0) mtspr(SPRN_PSPB, 0); /* Save guest CTRL register, set runlatch to 1 */ if (!(vcpu->arch.ctrl & 1)) mtspr(SPRN_CTRLT, 1); #ifdef CONFIG_ALTIVEC if (cpu_has_feature(CPU_FTR_ALTIVEC) && vcpu->arch.vrsave != current->thread.vrsave) mtspr(SPRN_VRSAVE, current->thread.vrsave); #endif if (vcpu->arch.hfscr & HFSCR_EBB) { if (vcpu->arch.bescr != current->thread.bescr) mtspr(SPRN_BESCR, current->thread.bescr); if (vcpu->arch.ebbhr != current->thread.ebbhr) mtspr(SPRN_EBBHR, current->thread.ebbhr); if (vcpu->arch.ebbrr != current->thread.ebbrr) mtspr(SPRN_EBBRR, current->thread.ebbrr); if (!vcpu->arch.nested) { /* * This is like load_fp in context switching, turn off * the facility after it wraps the u8 to try avoiding * saving and restoring the registers each partition * switch. */ vcpu->arch.load_ebb++; if (!vcpu->arch.load_ebb) vcpu->arch.hfscr &= ~HFSCR_EBB; } } if (vcpu->arch.tar != current->thread.tar) mtspr(SPRN_TAR, current->thread.tar); } EXPORT_SYMBOL_GPL(restore_p9_host_os_sprs); #ifdef CONFIG_KVM_BOOK3S_HV_P9_TIMING void accumulate_time(struct kvm_vcpu *vcpu, struct kvmhv_tb_accumulator *next) { struct kvmppc_vcore *vc = vcpu->arch.vcore; struct kvmhv_tb_accumulator *curr; u64 tb = mftb() - vc->tb_offset_applied; u64 prev_tb; u64 delta; u64 seq; curr = vcpu->arch.cur_activity; vcpu->arch.cur_activity = next; prev_tb = vcpu->arch.cur_tb_start; vcpu->arch.cur_tb_start = tb; if (!curr) return; delta = tb - prev_tb; seq = curr->seqcount; curr->seqcount = seq + 1; smp_wmb(); curr->tb_total += delta; if (seq == 0 || delta < curr->tb_min) curr->tb_min = delta; if (delta > curr->tb_max) curr->tb_max = delta; smp_wmb(); curr->seqcount = seq + 2; } EXPORT_SYMBOL_GPL(accumulate_time); #endif static inline u64 mfslbv(unsigned int idx) { u64 slbev; asm volatile("slbmfev %0,%1" : "=r" (slbev) : "r" (idx)); return slbev; } static inline u64 mfslbe(unsigned int idx) { u64 slbee; asm volatile("slbmfee %0,%1" : "=r" (slbee) : "r" (idx)); return slbee; } static inline void mtslb(u64 slbee, u64 slbev) { asm volatile("slbmte %0,%1" :: "r" (slbev), "r" (slbee)); } static inline void clear_slb_entry(unsigned int idx) { mtslb(idx, 0); } static inline void slb_clear_invalidate_partition(void) { clear_slb_entry(0); asm volatile(PPC_SLBIA(6)); } /* * Malicious or buggy radix guests may have inserted SLB entries * (only 0..3 because radix always runs with UPRT=1), so these must * be cleared here to avoid side-channels. slbmte is used rather * than slbia, as it won't clear cached translations. */ static void radix_clear_slb(void) { int i; for (i = 0; i < 4; i++) clear_slb_entry(i); } static void switch_mmu_to_guest_radix(struct kvm *kvm, struct kvm_vcpu *vcpu, u64 lpcr) { struct kvm_nested_guest *nested = vcpu->arch.nested; u32 lpid; u32 pid; lpid = nested ? nested->shadow_lpid : kvm->arch.lpid; pid = vcpu->arch.pid; /* * Prior memory accesses to host PID Q3 must be completed before we * start switching, and stores must be drained to avoid not-my-LPAR * logic (see switch_mmu_to_host). */ asm volatile("hwsync" ::: "memory"); isync(); mtspr(SPRN_LPID, lpid); mtspr(SPRN_LPCR, lpcr); mtspr(SPRN_PID, pid); /* * isync not required here because we are HRFID'ing to guest before * any guest context access, which is context synchronising. */ } static void switch_mmu_to_guest_hpt(struct kvm *kvm, struct kvm_vcpu *vcpu, u64 lpcr) { u32 lpid; u32 pid; int i; lpid = kvm->arch.lpid; pid = vcpu->arch.pid; /* * See switch_mmu_to_guest_radix. ptesync should not be required here * even if the host is in HPT mode because speculative accesses would * not cause RC updates (we are in real mode). */ asm volatile("hwsync" ::: "memory"); isync(); mtspr(SPRN_LPID, lpid); mtspr(SPRN_LPCR, lpcr); mtspr(SPRN_PID, pid); for (i = 0; i < vcpu->arch.slb_max; i++) mtslb(vcpu->arch.slb[i].orige, vcpu->arch.slb[i].origv); /* * isync not required here, see switch_mmu_to_guest_radix. */ } static void switch_mmu_to_host(struct kvm *kvm, u32 pid) { u32 lpid = kvm->arch.host_lpid; u64 lpcr = kvm->arch.host_lpcr; /* * The guest has exited, so guest MMU context is no longer being * non-speculatively accessed, but a hwsync is needed before the * mtLPIDR / mtPIDR switch, in order to ensure all stores are drained, * so the not-my-LPAR tlbie logic does not overlook them. */ asm volatile("hwsync" ::: "memory"); isync(); mtspr(SPRN_PID, pid); mtspr(SPRN_LPID, lpid); mtspr(SPRN_LPCR, lpcr); /* * isync is not required after the switch, because mtmsrd with L=0 * is performed after this switch, which is context synchronising. */ if (!radix_enabled()) slb_restore_bolted_realmode(); } static void save_clear_host_mmu(struct kvm *kvm) { if (!radix_enabled()) { /* * Hash host could save and restore host SLB entries to * reduce SLB fault overheads of VM exits, but for now the * existing code clears all entries and restores just the * bolted ones when switching back to host. */ slb_clear_invalidate_partition(); } } static void save_clear_guest_mmu(struct kvm *kvm, struct kvm_vcpu *vcpu) { if (kvm_is_radix(kvm)) { radix_clear_slb(); } else { int i; int nr = 0; /* * This must run before switching to host (radix host can't * access all SLBs). */ for (i = 0; i < vcpu->arch.slb_nr; i++) { u64 slbee, slbev; slbee = mfslbe(i); if (slbee & SLB_ESID_V) { slbev = mfslbv(i); vcpu->arch.slb[nr].orige = slbee | i; vcpu->arch.slb[nr].origv = slbev; nr++; } } vcpu->arch.slb_max = nr; slb_clear_invalidate_partition(); } } static void flush_guest_tlb(struct kvm *kvm) { unsigned long rb, set; rb = PPC_BIT(52); /* IS = 2 */ if (kvm_is_radix(kvm)) { /* R=1 PRS=1 RIC=2 */ asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1) : : "r" (rb), "i" (1), "i" (1), "i" (2), "r" (0) : "memory"); for (set = 1; set < kvm->arch.tlb_sets; ++set) { rb += PPC_BIT(51); /* increment set number */ /* R=1 PRS=1 RIC=0 */ asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1) : : "r" (rb), "i" (1), "i" (1), "i" (0), "r" (0) : "memory"); } asm volatile("ptesync": : :"memory"); // POWER9 congruence-class TLBIEL leaves ERAT. Flush it now. asm volatile(PPC_RADIX_INVALIDATE_ERAT_GUEST : : :"memory"); } else { for (set = 0; set < kvm->arch.tlb_sets; ++set) { /* R=0 PRS=0 RIC=0 */ asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1) : : "r" (rb), "i" (0), "i" (0), "i" (0), "r" (0) : "memory"); rb += PPC_BIT(51); /* increment set number */ } asm volatile("ptesync": : :"memory"); // POWER9 congruence-class TLBIEL leaves ERAT. Flush it now. asm volatile(PPC_ISA_3_0_INVALIDATE_ERAT : : :"memory"); } } static void check_need_tlb_flush(struct kvm *kvm, int pcpu, struct kvm_nested_guest *nested) { cpumask_t *need_tlb_flush; bool all_set = true; int i; if (nested) need_tlb_flush = &nested->need_tlb_flush; else need_tlb_flush = &kvm->arch.need_tlb_flush; if (likely(!cpumask_test_cpu(pcpu, need_tlb_flush))) return; /* * Individual threads can come in here, but the TLB is shared between * the 4 threads in a core, hence invalidating on one thread * invalidates for all, so only invalidate the first time (if all bits * were set. The others must still execute a ptesync. * * If a race occurs and two threads do the TLB flush, that is not a * problem, just sub-optimal. */ for (i = cpu_first_tlb_thread_sibling(pcpu); i <= cpu_last_tlb_thread_sibling(pcpu); i += cpu_tlb_thread_sibling_step()) { if (!cpumask_test_cpu(i, need_tlb_flush)) { all_set = false; break; } } if (all_set) flush_guest_tlb(kvm); else asm volatile("ptesync" ::: "memory"); /* Clear the bit after the TLB flush */ cpumask_clear_cpu(pcpu, need_tlb_flush); } unsigned long kvmppc_msr_hard_disable_set_facilities(struct kvm_vcpu *vcpu, unsigned long msr) { unsigned long msr_needed = 0; msr &= ~MSR_EE; /* MSR bits may have been cleared by context switch so must recheck */ if (IS_ENABLED(CONFIG_PPC_FPU)) msr_needed |= MSR_FP; if (cpu_has_feature(CPU_FTR_ALTIVEC)) msr_needed |= MSR_VEC; if (cpu_has_feature(CPU_FTR_VSX)) msr_needed |= MSR_VSX; if ((cpu_has_feature(CPU_FTR_TM) || cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST)) && (vcpu->arch.hfscr & HFSCR_TM)) msr_needed |= MSR_TM; /* * This could be combined with MSR[RI] clearing, but that expands * the unrecoverable window. It would be better to cover unrecoverable * with KVM bad interrupt handling rather than use MSR[RI] at all. * * Much more difficult and less worthwhile to combine with IR/DR * disable. */ if ((msr & msr_needed) != msr_needed) { msr |= msr_needed; __mtmsrd(msr, 0); } else { __hard_irq_disable(); } local_paca->irq_happened |= PACA_IRQ_HARD_DIS; return msr; } EXPORT_SYMBOL_GPL(kvmppc_msr_hard_disable_set_facilities); int kvmhv_vcpu_entry_p9(struct kvm_vcpu *vcpu, u64 time_limit, unsigned long lpcr, u64 *tb) { struct p9_host_os_sprs host_os_sprs; struct kvm *kvm = vcpu->kvm; struct kvm_nested_guest *nested = vcpu->arch.nested; struct kvmppc_vcore *vc = vcpu->arch.vcore; s64 hdec, dec; u64 purr, spurr; u64 *exsave; int trap; unsigned long msr; unsigned long host_hfscr; unsigned long host_ciabr; unsigned long host_dawr0; unsigned long host_dawrx0; unsigned long host_psscr; unsigned long host_hpsscr; unsigned long host_pidr; unsigned long host_dawr1; unsigned long host_dawrx1; unsigned long dpdes; hdec = time_limit - *tb; if (hdec < 0) return BOOK3S_INTERRUPT_HV_DECREMENTER; WARN_ON_ONCE(vcpu->arch.shregs.msr & MSR_HV); WARN_ON_ONCE(!(vcpu->arch.shregs.msr & MSR_ME)); vcpu->arch.ceded = 0; /* Save MSR for restore, with EE clear. */ msr = mfmsr() & ~MSR_EE; host_hfscr = mfspr(SPRN_HFSCR); host_ciabr = mfspr(SPRN_CIABR); host_psscr = mfspr(SPRN_PSSCR_PR); if (cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST)) host_hpsscr = mfspr(SPRN_PSSCR); host_pidr = mfspr(SPRN_PID); if (dawr_enabled()) { host_dawr0 = mfspr(SPRN_DAWR0); host_dawrx0 = mfspr(SPRN_DAWRX0); if (cpu_has_feature(CPU_FTR_DAWR1)) { host_dawr1 = mfspr(SPRN_DAWR1); host_dawrx1 = mfspr(SPRN_DAWRX1); } } local_paca->kvm_hstate.host_purr = mfspr(SPRN_PURR); local_paca->kvm_hstate.host_spurr = mfspr(SPRN_SPURR); save_p9_host_os_sprs(&host_os_sprs); msr = kvmppc_msr_hard_disable_set_facilities(vcpu, msr); if (lazy_irq_pending()) { trap = 0; goto out; } if (unlikely(load_vcpu_state(vcpu, &host_os_sprs))) msr = mfmsr(); /* MSR may have been updated */ if (vc->tb_offset) { u64 new_tb = *tb + vc->tb_offset; mtspr(SPRN_TBU40, new_tb); if ((mftb() & 0xffffff) < (new_tb & 0xffffff)) { new_tb += 0x1000000; mtspr(SPRN_TBU40, new_tb); } *tb = new_tb; vc->tb_offset_applied = vc->tb_offset; } mtspr(SPRN_VTB, vc->vtb); mtspr(SPRN_PURR, vcpu->arch.purr); mtspr(SPRN_SPURR, vcpu->arch.spurr); if (vc->pcr) mtspr(SPRN_PCR, vc->pcr | PCR_MASK); if (vcpu->arch.doorbell_request) { vcpu->arch.doorbell_request = 0; mtspr(SPRN_DPDES, 1); } if (dawr_enabled()) { if (vcpu->arch.dawr0 != host_dawr0) mtspr(SPRN_DAWR0, vcpu->arch.dawr0); if (vcpu->arch.dawrx0 != host_dawrx0) mtspr(SPRN_DAWRX0, vcpu->arch.dawrx0); if (cpu_has_feature(CPU_FTR_DAWR1)) { if (vcpu->arch.dawr1 != host_dawr1) mtspr(SPRN_DAWR1, vcpu->arch.dawr1); if (vcpu->arch.dawrx1 != host_dawrx1) mtspr(SPRN_DAWRX1, vcpu->arch.dawrx1); } } if (vcpu->arch.ciabr != host_ciabr) mtspr(SPRN_CIABR, vcpu->arch.ciabr); if (cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST)) { mtspr(SPRN_PSSCR, vcpu->arch.psscr | PSSCR_EC | (local_paca->kvm_hstate.fake_suspend << PSSCR_FAKE_SUSPEND_LG)); } else { if (vcpu->arch.psscr != host_psscr) mtspr(SPRN_PSSCR_PR, vcpu->arch.psscr); } mtspr(SPRN_HFSCR, vcpu->arch.hfscr); mtspr(SPRN_HSRR0, vcpu->arch.regs.nip); mtspr(SPRN_HSRR1, (vcpu->arch.shregs.msr & ~MSR_HV) | MSR_ME); /* * On POWER9 DD2.1 and below, sometimes on a Hypervisor Data Storage * Interrupt (HDSI) the HDSISR is not be updated at all. * * To work around this we put a canary value into the HDSISR before * returning to a guest and then check for this canary when we take a * HDSI. If we find the canary on a HDSI, we know the hardware didn't * update the HDSISR. In this case we return to the guest to retake the * HDSI which should correctly update the HDSISR the second time HDSI * entry. * * The "radix prefetch bug" test can be used to test for this bug, as * it also exists fo DD2.1 and below. */ if (cpu_has_feature(CPU_FTR_P9_RADIX_PREFETCH_BUG)) mtspr(SPRN_HDSISR, HDSISR_CANARY); mtspr(SPRN_SPRG0, vcpu->arch.shregs.sprg0); mtspr(SPRN_SPRG1, vcpu->arch.shregs.sprg1); mtspr(SPRN_SPRG2, vcpu->arch.shregs.sprg2); mtspr(SPRN_SPRG3, vcpu->arch.shregs.sprg3); /* * It might be preferable to load_vcpu_state here, in order to get the * GPR/FP register loads executing in parallel with the previous mtSPR * instructions, but for now that can't be done because the TM handling * in load_vcpu_state can change some SPRs and vcpu state (nip, msr). * But TM could be split out if this would be a significant benefit. */ /* * MSR[RI] does not need to be cleared (and is not, for radix guests * with no prefetch bug), because in_guest is set. If we take a SRESET * or MCE with in_guest set but still in HV mode, then * kvmppc_p9_bad_interrupt handles the interrupt, which effectively * clears MSR[RI] and doesn't return. */ WRITE_ONCE(local_paca->kvm_hstate.in_guest, KVM_GUEST_MODE_HV_P9); barrier(); /* Open in_guest critical section */ /* * Hash host, hash guest, or radix guest with prefetch bug, all have * to disable the MMU before switching to guest MMU state. */ if (!radix_enabled() || !kvm_is_radix(kvm) || cpu_has_feature(CPU_FTR_P9_RADIX_PREFETCH_BUG)) __mtmsrd(msr & ~(MSR_IR|MSR_DR|MSR_RI), 0); save_clear_host_mmu(kvm); if (kvm_is_radix(kvm)) switch_mmu_to_guest_radix(kvm, vcpu, lpcr); else switch_mmu_to_guest_hpt(kvm, vcpu, lpcr); /* TLBIEL uses LPID=LPIDR, so run this after setting guest LPID */ check_need_tlb_flush(kvm, vc->pcpu, nested); /* * P9 suppresses the HDEC exception when LPCR[HDICE] = 0, * so set guest LPCR (with HDICE) before writing HDEC. */ mtspr(SPRN_HDEC, hdec); mtspr(SPRN_DEC, vcpu->arch.dec_expires - *tb); #ifdef CONFIG_PPC_TRANSACTIONAL_MEM tm_return_to_guest: #endif mtspr(SPRN_DAR, vcpu->arch.shregs.dar); mtspr(SPRN_DSISR, vcpu->arch.shregs.dsisr); mtspr(SPRN_SRR0, vcpu->arch.shregs.srr0); mtspr(SPRN_SRR1, vcpu->arch.shregs.srr1); switch_pmu_to_guest(vcpu, &host_os_sprs); accumulate_time(vcpu, &vcpu->arch.in_guest); kvmppc_p9_enter_guest(vcpu); accumulate_time(vcpu, &vcpu->arch.guest_exit); switch_pmu_to_host(vcpu, &host_os_sprs); /* XXX: Could get these from r11/12 and paca exsave instead */ vcpu->arch.shregs.srr0 = mfspr(SPRN_SRR0); vcpu->arch.shregs.srr1 = mfspr(SPRN_SRR1); vcpu->arch.shregs.dar = mfspr(SPRN_DAR); vcpu->arch.shregs.dsisr = mfspr(SPRN_DSISR); /* 0x2 bit for HSRR is only used by PR and P7/8 HV paths, clear it */ trap = local_paca->kvm_hstate.scratch0 & ~0x2; if (likely(trap > BOOK3S_INTERRUPT_MACHINE_CHECK)) exsave = local_paca->exgen; else if (trap == BOOK3S_INTERRUPT_SYSTEM_RESET) exsave = local_paca->exnmi; else /* trap == 0x200 */ exsave = local_paca->exmc; vcpu->arch.regs.gpr[1] = local_paca->kvm_hstate.scratch1; vcpu->arch.regs.gpr[3] = local_paca->kvm_hstate.scratch2; /* * After reading machine check regs (DAR, DSISR, SRR0/1) and hstate * scratch (which we need to move into exsave to make re-entrant vs * SRESET/MCE), register state is protected from reentrancy. However * timebase, MMU, among other state is still set to guest, so don't * enable MSR[RI] here. It gets enabled at the end, after in_guest * is cleared. * * It is possible an NMI could come in here, which is why it is * important to save the above state early so it can be debugged. */ vcpu->arch.regs.gpr[9] = exsave[EX_R9/sizeof(u64)]; vcpu->arch.regs.gpr[10] = exsave[EX_R10/sizeof(u64)]; vcpu->arch.regs.gpr[11] = exsave[EX_R11/sizeof(u64)]; vcpu->arch.regs.gpr[12] = exsave[EX_R12/sizeof(u64)]; vcpu->arch.regs.gpr[13] = exsave[EX_R13/sizeof(u64)]; vcpu->arch.ppr = exsave[EX_PPR/sizeof(u64)]; vcpu->arch.cfar = exsave[EX_CFAR/sizeof(u64)]; vcpu->arch.regs.ctr = exsave[EX_CTR/sizeof(u64)]; vcpu->arch.last_inst = KVM_INST_FETCH_FAILED; if (unlikely(trap == BOOK3S_INTERRUPT_MACHINE_CHECK)) { vcpu->arch.fault_dar = exsave[EX_DAR/sizeof(u64)]; vcpu->arch.fault_dsisr = exsave[EX_DSISR/sizeof(u64)]; kvmppc_realmode_machine_check(vcpu); } else if (unlikely(trap == BOOK3S_INTERRUPT_HMI)) { kvmppc_p9_realmode_hmi_handler(vcpu); } else if (trap == BOOK3S_INTERRUPT_H_EMUL_ASSIST) { vcpu->arch.emul_inst = mfspr(SPRN_HEIR); } else if (trap == BOOK3S_INTERRUPT_H_DATA_STORAGE) { vcpu->arch.fault_dar = exsave[EX_DAR/sizeof(u64)]; vcpu->arch.fault_dsisr = exsave[EX_DSISR/sizeof(u64)]; vcpu->arch.fault_gpa = mfspr(SPRN_ASDR); } else if (trap == BOOK3S_INTERRUPT_H_INST_STORAGE) { vcpu->arch.fault_gpa = mfspr(SPRN_ASDR); } else if (trap == BOOK3S_INTERRUPT_H_FAC_UNAVAIL) { vcpu->arch.hfscr = mfspr(SPRN_HFSCR); #ifdef CONFIG_PPC_TRANSACTIONAL_MEM /* * Softpatch interrupt for transactional memory emulation cases * on POWER9 DD2.2. This is early in the guest exit path - we * haven't saved registers or done a treclaim yet. */ } else if (trap == BOOK3S_INTERRUPT_HV_SOFTPATCH) { vcpu->arch.emul_inst = mfspr(SPRN_HEIR); /* * The cases we want to handle here are those where the guest * is in real suspend mode and is trying to transition to * transactional mode. */ if (!local_paca->kvm_hstate.fake_suspend && (vcpu->arch.shregs.msr & MSR_TS_S)) { if (kvmhv_p9_tm_emulation_early(vcpu)) { /* * Go straight back into the guest with the * new NIP/MSR as set by TM emulation. */ mtspr(SPRN_HSRR0, vcpu->arch.regs.nip); mtspr(SPRN_HSRR1, vcpu->arch.shregs.msr); goto tm_return_to_guest; } } #endif } /* Advance host PURR/SPURR by the amount used by guest */ purr = mfspr(SPRN_PURR); spurr = mfspr(SPRN_SPURR); local_paca->kvm_hstate.host_purr += purr - vcpu->arch.purr; local_paca->kvm_hstate.host_spurr += spurr - vcpu->arch.spurr; vcpu->arch.purr = purr; vcpu->arch.spurr = spurr; vcpu->arch.ic = mfspr(SPRN_IC); vcpu->arch.pid = mfspr(SPRN_PID); vcpu->arch.psscr = mfspr(SPRN_PSSCR_PR); vcpu->arch.shregs.sprg0 = mfspr(SPRN_SPRG0); vcpu->arch.shregs.sprg1 = mfspr(SPRN_SPRG1); vcpu->arch.shregs.sprg2 = mfspr(SPRN_SPRG2); vcpu->arch.shregs.sprg3 = mfspr(SPRN_SPRG3); dpdes = mfspr(SPRN_DPDES); if (dpdes) vcpu->arch.doorbell_request = 1; vc->vtb = mfspr(SPRN_VTB); dec = mfspr(SPRN_DEC); if (!(lpcr & LPCR_LD)) /* Sign extend if not using large decrementer */ dec = (s32) dec; *tb = mftb(); vcpu->arch.dec_expires = dec + *tb; if (vc->tb_offset_applied) { u64 new_tb = *tb - vc->tb_offset_applied; mtspr(SPRN_TBU40, new_tb); if ((mftb() & 0xffffff) < (new_tb & 0xffffff)) { new_tb += 0x1000000; mtspr(SPRN_TBU40, new_tb); } *tb = new_tb; vc->tb_offset_applied = 0; } save_clear_guest_mmu(kvm, vcpu); switch_mmu_to_host(kvm, host_pidr); /* * Enable MSR here in order to have facilities enabled to save * guest registers. This enables MMU (if we were in realmode), so * only switch MMU on after the MMU is switched to host, to avoid * the P9_RADIX_PREFETCH_BUG or hash guest context. */ if (IS_ENABLED(CONFIG_PPC_TRANSACTIONAL_MEM) && vcpu->arch.shregs.msr & MSR_TS_MASK) msr |= MSR_TS_S; __mtmsrd(msr, 0); store_vcpu_state(vcpu); mtspr(SPRN_PURR, local_paca->kvm_hstate.host_purr); mtspr(SPRN_SPURR, local_paca->kvm_hstate.host_spurr); if (cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST)) { /* Preserve PSSCR[FAKE_SUSPEND] until we've called kvmppc_save_tm_hv */ mtspr(SPRN_PSSCR, host_hpsscr | (local_paca->kvm_hstate.fake_suspend << PSSCR_FAKE_SUSPEND_LG)); } mtspr(SPRN_HFSCR, host_hfscr); if (vcpu->arch.ciabr != host_ciabr) mtspr(SPRN_CIABR, host_ciabr); if (dawr_enabled()) { if (vcpu->arch.dawr0 != host_dawr0) mtspr(SPRN_DAWR0, host_dawr0); if (vcpu->arch.dawrx0 != host_dawrx0) mtspr(SPRN_DAWRX0, host_dawrx0); if (cpu_has_feature(CPU_FTR_DAWR1)) { if (vcpu->arch.dawr1 != host_dawr1) mtspr(SPRN_DAWR1, host_dawr1); if (vcpu->arch.dawrx1 != host_dawrx1) mtspr(SPRN_DAWRX1, host_dawrx1); } } if (dpdes) mtspr(SPRN_DPDES, 0); if (vc->pcr) mtspr(SPRN_PCR, PCR_MASK); /* HDEC must be at least as large as DEC, so decrementer_max fits */ mtspr(SPRN_HDEC, decrementer_max); timer_rearm_host_dec(*tb); restore_p9_host_os_sprs(vcpu, &host_os_sprs); barrier(); /* Close in_guest critical section */ WRITE_ONCE(local_paca->kvm_hstate.in_guest, KVM_GUEST_MODE_NONE); /* Interrupts are recoverable at this point */ /* * cp_abort is required if the processor supports local copy-paste * to clear the copy buffer that was under control of the guest. */ if (cpu_has_feature(CPU_FTR_ARCH_31)) asm volatile(PPC_CP_ABORT); out: return trap; } EXPORT_SYMBOL_GPL(kvmhv_vcpu_entry_p9);
linux-master
arch/powerpc/kvm/book3s_hv_p9_entry.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2010 SUSE Linux Products GmbH. All rights reserved. * * Authors: * Alexander Graf <[email protected]> */ #include <linux/kvm_host.h> #include <asm/kvm_ppc.h> #include <asm/kvm_book3s.h> #include <asm/book3s/32/mmu-hash.h> #include <asm/machdep.h> #include <asm/mmu_context.h> #include <asm/hw_irq.h> #include "book3s.h" /* #define DEBUG_MMU */ /* #define DEBUG_SR */ #ifdef DEBUG_MMU #define dprintk_mmu(a, ...) printk(KERN_INFO a, __VA_ARGS__) #else #define dprintk_mmu(a, ...) do { } while(0) #endif #ifdef DEBUG_SR #define dprintk_sr(a, ...) printk(KERN_INFO a, __VA_ARGS__) #else #define dprintk_sr(a, ...) do { } while(0) #endif #if PAGE_SHIFT != 12 #error Unknown page size #endif #ifdef CONFIG_SMP #error XXX need to grab mmu_hash_lock #endif #ifdef CONFIG_PTE_64BIT #error Only 32 bit pages are supported for now #endif static ulong htab; static u32 htabmask; void kvmppc_mmu_invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte) { volatile u32 *pteg; /* Remove from host HTAB */ pteg = (u32*)pte->slot; pteg[0] = 0; /* And make sure it's gone from the TLB too */ asm volatile ("sync"); asm volatile ("tlbie %0" : : "r" (pte->pte.eaddr) : "memory"); asm volatile ("sync"); asm volatile ("tlbsync"); } /* We keep 512 gvsid->hvsid entries, mapping the guest ones to the array using * a hash, so we don't waste cycles on looping */ static u16 kvmppc_sid_hash(struct kvm_vcpu *vcpu, u64 gvsid) { return (u16)(((gvsid >> (SID_MAP_BITS * 7)) & SID_MAP_MASK) ^ ((gvsid >> (SID_MAP_BITS * 6)) & SID_MAP_MASK) ^ ((gvsid >> (SID_MAP_BITS * 5)) & SID_MAP_MASK) ^ ((gvsid >> (SID_MAP_BITS * 4)) & SID_MAP_MASK) ^ ((gvsid >> (SID_MAP_BITS * 3)) & SID_MAP_MASK) ^ ((gvsid >> (SID_MAP_BITS * 2)) & SID_MAP_MASK) ^ ((gvsid >> (SID_MAP_BITS * 1)) & SID_MAP_MASK) ^ ((gvsid >> (SID_MAP_BITS * 0)) & SID_MAP_MASK)); } static struct kvmppc_sid_map *find_sid_vsid(struct kvm_vcpu *vcpu, u64 gvsid) { struct kvmppc_sid_map *map; u16 sid_map_mask; if (kvmppc_get_msr(vcpu) & MSR_PR) gvsid |= VSID_PR; sid_map_mask = kvmppc_sid_hash(vcpu, gvsid); map = &to_book3s(vcpu)->sid_map[sid_map_mask]; if (map->guest_vsid == gvsid) { dprintk_sr("SR: Searching 0x%llx -> 0x%llx\n", gvsid, map->host_vsid); return map; } map = &to_book3s(vcpu)->sid_map[SID_MAP_MASK - sid_map_mask]; if (map->guest_vsid == gvsid) { dprintk_sr("SR: Searching 0x%llx -> 0x%llx\n", gvsid, map->host_vsid); return map; } dprintk_sr("SR: Searching 0x%llx -> not found\n", gvsid); return NULL; } static u32 *kvmppc_mmu_get_pteg(struct kvm_vcpu *vcpu, u32 vsid, u32 eaddr, bool primary) { u32 page, hash; ulong pteg = htab; page = (eaddr & ~ESID_MASK) >> 12; hash = ((vsid ^ page) << 6); if (!primary) hash = ~hash; hash &= htabmask; pteg |= hash; dprintk_mmu("htab: %lx | hash: %x | htabmask: %x | pteg: %lx\n", htab, hash, htabmask, pteg); return (u32*)pteg; } extern char etext[]; int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte, bool iswrite) { kvm_pfn_t hpaddr; u64 vpn; u64 vsid; struct kvmppc_sid_map *map; volatile u32 *pteg; u32 eaddr = orig_pte->eaddr; u32 pteg0, pteg1; register int rr = 0; bool primary = false; bool evict = false; struct hpte_cache *pte; int r = 0; bool writable; /* Get host physical address for gpa */ hpaddr = kvmppc_gpa_to_pfn(vcpu, orig_pte->raddr, iswrite, &writable); if (is_error_noslot_pfn(hpaddr)) { printk(KERN_INFO "Couldn't get guest page for gpa %lx!\n", orig_pte->raddr); r = -EINVAL; goto out; } hpaddr <<= PAGE_SHIFT; /* and write the mapping ea -> hpa into the pt */ vcpu->arch.mmu.esid_to_vsid(vcpu, orig_pte->eaddr >> SID_SHIFT, &vsid); map = find_sid_vsid(vcpu, vsid); if (!map) { kvmppc_mmu_map_segment(vcpu, eaddr); map = find_sid_vsid(vcpu, vsid); } BUG_ON(!map); vsid = map->host_vsid; vpn = (vsid << (SID_SHIFT - VPN_SHIFT)) | ((eaddr & ~ESID_MASK) >> VPN_SHIFT); next_pteg: if (rr == 16) { primary = !primary; evict = true; rr = 0; } pteg = kvmppc_mmu_get_pteg(vcpu, vsid, eaddr, primary); /* not evicting yet */ if (!evict && (pteg[rr] & PTE_V)) { rr += 2; goto next_pteg; } dprintk_mmu("KVM: old PTEG: %p (%d)\n", pteg, rr); dprintk_mmu("KVM: %08x - %08x\n", pteg[0], pteg[1]); dprintk_mmu("KVM: %08x - %08x\n", pteg[2], pteg[3]); dprintk_mmu("KVM: %08x - %08x\n", pteg[4], pteg[5]); dprintk_mmu("KVM: %08x - %08x\n", pteg[6], pteg[7]); dprintk_mmu("KVM: %08x - %08x\n", pteg[8], pteg[9]); dprintk_mmu("KVM: %08x - %08x\n", pteg[10], pteg[11]); dprintk_mmu("KVM: %08x - %08x\n", pteg[12], pteg[13]); dprintk_mmu("KVM: %08x - %08x\n", pteg[14], pteg[15]); pteg0 = ((eaddr & 0x0fffffff) >> 22) | (vsid << 7) | PTE_V | (primary ? 0 : PTE_SEC); pteg1 = hpaddr | PTE_M | PTE_R | PTE_C; if (orig_pte->may_write && writable) { pteg1 |= PP_RWRW; mark_page_dirty(vcpu->kvm, orig_pte->raddr >> PAGE_SHIFT); } else { pteg1 |= PP_RWRX; } if (orig_pte->may_execute) kvmppc_mmu_flush_icache(hpaddr >> PAGE_SHIFT); local_irq_disable(); if (pteg[rr]) { pteg[rr] = 0; asm volatile ("sync"); } pteg[rr + 1] = pteg1; pteg[rr] = pteg0; asm volatile ("sync"); local_irq_enable(); dprintk_mmu("KVM: new PTEG: %p\n", pteg); dprintk_mmu("KVM: %08x - %08x\n", pteg[0], pteg[1]); dprintk_mmu("KVM: %08x - %08x\n", pteg[2], pteg[3]); dprintk_mmu("KVM: %08x - %08x\n", pteg[4], pteg[5]); dprintk_mmu("KVM: %08x - %08x\n", pteg[6], pteg[7]); dprintk_mmu("KVM: %08x - %08x\n", pteg[8], pteg[9]); dprintk_mmu("KVM: %08x - %08x\n", pteg[10], pteg[11]); dprintk_mmu("KVM: %08x - %08x\n", pteg[12], pteg[13]); dprintk_mmu("KVM: %08x - %08x\n", pteg[14], pteg[15]); /* Now tell our Shadow PTE code about the new page */ pte = kvmppc_mmu_hpte_cache_next(vcpu); if (!pte) { kvm_release_pfn_clean(hpaddr >> PAGE_SHIFT); r = -EAGAIN; goto out; } dprintk_mmu("KVM: %c%c Map 0x%llx: [%lx] 0x%llx (0x%llx) -> %lx\n", orig_pte->may_write ? 'w' : '-', orig_pte->may_execute ? 'x' : '-', orig_pte->eaddr, (ulong)pteg, vpn, orig_pte->vpage, hpaddr); pte->slot = (ulong)&pteg[rr]; pte->host_vpn = vpn; pte->pte = *orig_pte; pte->pfn = hpaddr >> PAGE_SHIFT; kvmppc_mmu_hpte_cache_map(vcpu, pte); kvm_release_pfn_clean(hpaddr >> PAGE_SHIFT); out: return r; } void kvmppc_mmu_unmap_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte) { kvmppc_mmu_pte_vflush(vcpu, pte->vpage, 0xfffffffffULL); } static struct kvmppc_sid_map *create_sid_map(struct kvm_vcpu *vcpu, u64 gvsid) { struct kvmppc_sid_map *map; struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu); u16 sid_map_mask; static int backwards_map = 0; if (kvmppc_get_msr(vcpu) & MSR_PR) gvsid |= VSID_PR; /* We might get collisions that trap in preceding order, so let's map them differently */ sid_map_mask = kvmppc_sid_hash(vcpu, gvsid); if (backwards_map) sid_map_mask = SID_MAP_MASK - sid_map_mask; map = &to_book3s(vcpu)->sid_map[sid_map_mask]; /* Make sure we're taking the other map next time */ backwards_map = !backwards_map; /* Uh-oh ... out of mappings. Let's flush! */ if (vcpu_book3s->vsid_next >= VSID_POOL_SIZE) { vcpu_book3s->vsid_next = 0; memset(vcpu_book3s->sid_map, 0, sizeof(struct kvmppc_sid_map) * SID_MAP_NUM); kvmppc_mmu_pte_flush(vcpu, 0, 0); kvmppc_mmu_flush_segments(vcpu); } map->host_vsid = vcpu_book3s->vsid_pool[vcpu_book3s->vsid_next]; vcpu_book3s->vsid_next++; map->guest_vsid = gvsid; map->valid = true; return map; } int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr) { u32 esid = eaddr >> SID_SHIFT; u64 gvsid; u32 sr; struct kvmppc_sid_map *map; struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); int r = 0; if (vcpu->arch.mmu.esid_to_vsid(vcpu, esid, &gvsid)) { /* Invalidate an entry */ svcpu->sr[esid] = SR_INVALID; r = -ENOENT; goto out; } map = find_sid_vsid(vcpu, gvsid); if (!map) map = create_sid_map(vcpu, gvsid); map->guest_esid = esid; sr = map->host_vsid | SR_KP; svcpu->sr[esid] = sr; dprintk_sr("MMU: mtsr %d, 0x%x\n", esid, sr); out: svcpu_put(svcpu); return r; } void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu) { int i; struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); dprintk_sr("MMU: flushing all segments (%d)\n", ARRAY_SIZE(svcpu->sr)); for (i = 0; i < ARRAY_SIZE(svcpu->sr); i++) svcpu->sr[i] = SR_INVALID; svcpu_put(svcpu); } void kvmppc_mmu_destroy_pr(struct kvm_vcpu *vcpu) { int i; kvmppc_mmu_hpte_destroy(vcpu); preempt_disable(); for (i = 0; i < SID_CONTEXTS; i++) __destroy_context(to_book3s(vcpu)->context_id[i]); preempt_enable(); } int kvmppc_mmu_init_pr(struct kvm_vcpu *vcpu) { struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu); int err; ulong sdr1; int i; int j; for (i = 0; i < SID_CONTEXTS; i++) { err = __init_new_context(); if (err < 0) goto init_fail; vcpu3s->context_id[i] = err; /* Remember context id for this combination */ for (j = 0; j < 16; j++) vcpu3s->vsid_pool[(i * 16) + j] = CTX_TO_VSID(err, j); } vcpu3s->vsid_next = 0; /* Remember where the HTAB is */ asm ( "mfsdr1 %0" : "=r"(sdr1) ); htabmask = ((sdr1 & 0x1FF) << 16) | 0xFFC0; htab = (ulong)__va(sdr1 & 0xffff0000); kvmppc_mmu_hpte_init(vcpu); return 0; init_fail: for (j = 0; j < i; j++) { if (!vcpu3s->context_id[j]) continue; __destroy_context(to_book3s(vcpu)->context_id[j]); } return -1; }
linux-master
arch/powerpc/kvm/book3s_32_mmu_host.c
// SPDX-License-Identifier: GPL-2.0-only /* * * Copyright IBM Corp. 2008 * Copyright 2011 Freescale Semiconductor, Inc. * * Authors: Hollis Blanchard <[email protected]> */ #include <linux/kvm_host.h> #include <asm/disassemble.h> #include "booke.h" #define OP_19_XOP_RFI 50 #define OP_19_XOP_RFCI 51 #define OP_19_XOP_RFDI 39 #define OP_31_XOP_MFMSR 83 #define OP_31_XOP_WRTEE 131 #define OP_31_XOP_MTMSR 146 #define OP_31_XOP_WRTEEI 163 static void kvmppc_emul_rfi(struct kvm_vcpu *vcpu) { vcpu->arch.regs.nip = vcpu->arch.shared->srr0; kvmppc_set_msr(vcpu, vcpu->arch.shared->srr1); } static void kvmppc_emul_rfdi(struct kvm_vcpu *vcpu) { vcpu->arch.regs.nip = vcpu->arch.dsrr0; kvmppc_set_msr(vcpu, vcpu->arch.dsrr1); } static void kvmppc_emul_rfci(struct kvm_vcpu *vcpu) { vcpu->arch.regs.nip = vcpu->arch.csrr0; kvmppc_set_msr(vcpu, vcpu->arch.csrr1); } int kvmppc_booke_emulate_op(struct kvm_vcpu *vcpu, unsigned int inst, int *advance) { int emulated = EMULATE_DONE; int rs = get_rs(inst); int rt = get_rt(inst); switch (get_op(inst)) { case 19: switch (get_xop(inst)) { case OP_19_XOP_RFI: kvmppc_emul_rfi(vcpu); kvmppc_set_exit_type(vcpu, EMULATED_RFI_EXITS); *advance = 0; break; case OP_19_XOP_RFCI: kvmppc_emul_rfci(vcpu); kvmppc_set_exit_type(vcpu, EMULATED_RFCI_EXITS); *advance = 0; break; case OP_19_XOP_RFDI: kvmppc_emul_rfdi(vcpu); kvmppc_set_exit_type(vcpu, EMULATED_RFDI_EXITS); *advance = 0; break; default: emulated = EMULATE_FAIL; break; } break; case 31: switch (get_xop(inst)) { case OP_31_XOP_MFMSR: kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->msr); kvmppc_set_exit_type(vcpu, EMULATED_MFMSR_EXITS); break; case OP_31_XOP_MTMSR: kvmppc_set_exit_type(vcpu, EMULATED_MTMSR_EXITS); kvmppc_set_msr(vcpu, kvmppc_get_gpr(vcpu, rs)); break; case OP_31_XOP_WRTEE: vcpu->arch.shared->msr = (vcpu->arch.shared->msr & ~MSR_EE) | (kvmppc_get_gpr(vcpu, rs) & MSR_EE); kvmppc_set_exit_type(vcpu, EMULATED_WRTEE_EXITS); break; case OP_31_XOP_WRTEEI: vcpu->arch.shared->msr = (vcpu->arch.shared->msr & ~MSR_EE) | (inst & MSR_EE); kvmppc_set_exit_type(vcpu, EMULATED_WRTEE_EXITS); break; default: emulated = EMULATE_FAIL; } break; default: emulated = EMULATE_FAIL; } return emulated; } /* * NOTE: some of these registers are not emulated on BOOKE_HV (GS-mode). * Their backing store is in real registers, and these functions * will return the wrong result if called for them in another context * (such as debugging). */ int kvmppc_booke_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val) { int emulated = EMULATE_DONE; bool debug_inst = false; switch (sprn) { case SPRN_DEAR: vcpu->arch.shared->dar = spr_val; break; case SPRN_ESR: vcpu->arch.shared->esr = spr_val; break; case SPRN_CSRR0: vcpu->arch.csrr0 = spr_val; break; case SPRN_CSRR1: vcpu->arch.csrr1 = spr_val; break; case SPRN_DSRR0: vcpu->arch.dsrr0 = spr_val; break; case SPRN_DSRR1: vcpu->arch.dsrr1 = spr_val; break; case SPRN_IAC1: /* * If userspace is debugging guest then guest * can not access debug registers. */ if (vcpu->guest_debug) break; debug_inst = true; vcpu->arch.dbg_reg.iac1 = spr_val; break; case SPRN_IAC2: /* * If userspace is debugging guest then guest * can not access debug registers. */ if (vcpu->guest_debug) break; debug_inst = true; vcpu->arch.dbg_reg.iac2 = spr_val; break; #if CONFIG_PPC_ADV_DEBUG_IACS > 2 case SPRN_IAC3: /* * If userspace is debugging guest then guest * can not access debug registers. */ if (vcpu->guest_debug) break; debug_inst = true; vcpu->arch.dbg_reg.iac3 = spr_val; break; case SPRN_IAC4: /* * If userspace is debugging guest then guest * can not access debug registers. */ if (vcpu->guest_debug) break; debug_inst = true; vcpu->arch.dbg_reg.iac4 = spr_val; break; #endif case SPRN_DAC1: /* * If userspace is debugging guest then guest * can not access debug registers. */ if (vcpu->guest_debug) break; debug_inst = true; vcpu->arch.dbg_reg.dac1 = spr_val; break; case SPRN_DAC2: /* * If userspace is debugging guest then guest * can not access debug registers. */ if (vcpu->guest_debug) break; debug_inst = true; vcpu->arch.dbg_reg.dac2 = spr_val; break; case SPRN_DBCR0: /* * If userspace is debugging guest then guest * can not access debug registers. */ if (vcpu->guest_debug) break; debug_inst = true; spr_val &= (DBCR0_IDM | DBCR0_IC | DBCR0_BT | DBCR0_TIE | DBCR0_IAC1 | DBCR0_IAC2 | DBCR0_IAC3 | DBCR0_IAC4 | DBCR0_DAC1R | DBCR0_DAC1W | DBCR0_DAC2R | DBCR0_DAC2W); vcpu->arch.dbg_reg.dbcr0 = spr_val; break; case SPRN_DBCR1: /* * If userspace is debugging guest then guest * can not access debug registers. */ if (vcpu->guest_debug) break; debug_inst = true; vcpu->arch.dbg_reg.dbcr1 = spr_val; break; case SPRN_DBCR2: /* * If userspace is debugging guest then guest * can not access debug registers. */ if (vcpu->guest_debug) break; debug_inst = true; vcpu->arch.dbg_reg.dbcr2 = spr_val; break; case SPRN_DBSR: /* * If userspace is debugging guest then guest * can not access debug registers. */ if (vcpu->guest_debug) break; vcpu->arch.dbsr &= ~spr_val; if (!(vcpu->arch.dbsr & ~DBSR_IDE)) kvmppc_core_dequeue_debug(vcpu); break; case SPRN_TSR: kvmppc_clr_tsr_bits(vcpu, spr_val); break; case SPRN_TCR: /* * WRC is a 2-bit field that is supposed to preserve its * value once written to non-zero. */ if (vcpu->arch.tcr & TCR_WRC_MASK) { spr_val &= ~TCR_WRC_MASK; spr_val |= vcpu->arch.tcr & TCR_WRC_MASK; } kvmppc_set_tcr(vcpu, spr_val); break; case SPRN_DECAR: vcpu->arch.decar = spr_val; break; /* * Note: SPRG4-7 are user-readable. * These values are loaded into the real SPRGs when resuming the * guest (PR-mode only). */ case SPRN_SPRG4: kvmppc_set_sprg4(vcpu, spr_val); break; case SPRN_SPRG5: kvmppc_set_sprg5(vcpu, spr_val); break; case SPRN_SPRG6: kvmppc_set_sprg6(vcpu, spr_val); break; case SPRN_SPRG7: kvmppc_set_sprg7(vcpu, spr_val); break; case SPRN_IVPR: vcpu->arch.ivpr = spr_val; #ifdef CONFIG_KVM_BOOKE_HV mtspr(SPRN_GIVPR, spr_val); #endif break; case SPRN_IVOR0: vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL] = spr_val; break; case SPRN_IVOR1: vcpu->arch.ivor[BOOKE_IRQPRIO_MACHINE_CHECK] = spr_val; break; case SPRN_IVOR2: vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE] = spr_val; #ifdef CONFIG_KVM_BOOKE_HV mtspr(SPRN_GIVOR2, spr_val); #endif break; case SPRN_IVOR3: vcpu->arch.ivor[BOOKE_IRQPRIO_INST_STORAGE] = spr_val; break; case SPRN_IVOR4: vcpu->arch.ivor[BOOKE_IRQPRIO_EXTERNAL] = spr_val; break; case SPRN_IVOR5: vcpu->arch.ivor[BOOKE_IRQPRIO_ALIGNMENT] = spr_val; break; case SPRN_IVOR6: vcpu->arch.ivor[BOOKE_IRQPRIO_PROGRAM] = spr_val; break; case SPRN_IVOR7: vcpu->arch.ivor[BOOKE_IRQPRIO_FP_UNAVAIL] = spr_val; break; case SPRN_IVOR8: vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL] = spr_val; #ifdef CONFIG_KVM_BOOKE_HV mtspr(SPRN_GIVOR8, spr_val); #endif break; case SPRN_IVOR9: vcpu->arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL] = spr_val; break; case SPRN_IVOR10: vcpu->arch.ivor[BOOKE_IRQPRIO_DECREMENTER] = spr_val; break; case SPRN_IVOR11: vcpu->arch.ivor[BOOKE_IRQPRIO_FIT] = spr_val; break; case SPRN_IVOR12: vcpu->arch.ivor[BOOKE_IRQPRIO_WATCHDOG] = spr_val; break; case SPRN_IVOR13: vcpu->arch.ivor[BOOKE_IRQPRIO_DTLB_MISS] = spr_val; break; case SPRN_IVOR14: vcpu->arch.ivor[BOOKE_IRQPRIO_ITLB_MISS] = spr_val; break; case SPRN_IVOR15: vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG] = spr_val; break; case SPRN_MCSR: vcpu->arch.mcsr &= ~spr_val; break; #if defined(CONFIG_64BIT) case SPRN_EPCR: kvmppc_set_epcr(vcpu, spr_val); #ifdef CONFIG_KVM_BOOKE_HV mtspr(SPRN_EPCR, vcpu->arch.shadow_epcr); #endif break; #endif default: emulated = EMULATE_FAIL; } if (debug_inst) { current->thread.debug = vcpu->arch.dbg_reg; switch_booke_debug_regs(&vcpu->arch.dbg_reg); } return emulated; } int kvmppc_booke_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val) { int emulated = EMULATE_DONE; switch (sprn) { case SPRN_IVPR: *spr_val = vcpu->arch.ivpr; break; case SPRN_DEAR: *spr_val = vcpu->arch.shared->dar; break; case SPRN_ESR: *spr_val = vcpu->arch.shared->esr; break; case SPRN_EPR: *spr_val = vcpu->arch.epr; break; case SPRN_CSRR0: *spr_val = vcpu->arch.csrr0; break; case SPRN_CSRR1: *spr_val = vcpu->arch.csrr1; break; case SPRN_DSRR0: *spr_val = vcpu->arch.dsrr0; break; case SPRN_DSRR1: *spr_val = vcpu->arch.dsrr1; break; case SPRN_IAC1: *spr_val = vcpu->arch.dbg_reg.iac1; break; case SPRN_IAC2: *spr_val = vcpu->arch.dbg_reg.iac2; break; #if CONFIG_PPC_ADV_DEBUG_IACS > 2 case SPRN_IAC3: *spr_val = vcpu->arch.dbg_reg.iac3; break; case SPRN_IAC4: *spr_val = vcpu->arch.dbg_reg.iac4; break; #endif case SPRN_DAC1: *spr_val = vcpu->arch.dbg_reg.dac1; break; case SPRN_DAC2: *spr_val = vcpu->arch.dbg_reg.dac2; break; case SPRN_DBCR0: *spr_val = vcpu->arch.dbg_reg.dbcr0; if (vcpu->guest_debug) *spr_val = *spr_val | DBCR0_EDM; break; case SPRN_DBCR1: *spr_val = vcpu->arch.dbg_reg.dbcr1; break; case SPRN_DBCR2: *spr_val = vcpu->arch.dbg_reg.dbcr2; break; case SPRN_DBSR: *spr_val = vcpu->arch.dbsr; break; case SPRN_TSR: *spr_val = vcpu->arch.tsr; break; case SPRN_TCR: *spr_val = vcpu->arch.tcr; break; case SPRN_IVOR0: *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL]; break; case SPRN_IVOR1: *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_MACHINE_CHECK]; break; case SPRN_IVOR2: *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE]; break; case SPRN_IVOR3: *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_INST_STORAGE]; break; case SPRN_IVOR4: *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_EXTERNAL]; break; case SPRN_IVOR5: *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_ALIGNMENT]; break; case SPRN_IVOR6: *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_PROGRAM]; break; case SPRN_IVOR7: *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_FP_UNAVAIL]; break; case SPRN_IVOR8: *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL]; break; case SPRN_IVOR9: *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL]; break; case SPRN_IVOR10: *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_DECREMENTER]; break; case SPRN_IVOR11: *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_FIT]; break; case SPRN_IVOR12: *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_WATCHDOG]; break; case SPRN_IVOR13: *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_DTLB_MISS]; break; case SPRN_IVOR14: *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_ITLB_MISS]; break; case SPRN_IVOR15: *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG]; break; case SPRN_MCSR: *spr_val = vcpu->arch.mcsr; break; #if defined(CONFIG_64BIT) case SPRN_EPCR: *spr_val = vcpu->arch.epcr; break; #endif default: emulated = EMULATE_FAIL; } return emulated; }
linux-master
arch/powerpc/kvm/booke_emulate.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2008-2013 Freescale Semiconductor, Inc. All rights reserved. * * Author: Yu Liu, [email protected] * Scott Wood, [email protected] * Ashish Kalra, [email protected] * Varun Sethi, [email protected] * Alexander Graf, [email protected] * * Description: * This file is based on arch/powerpc/kvm/44x_tlb.c, * by Hollis Blanchard <[email protected]>. */ #include <linux/kernel.h> #include <linux/types.h> #include <linux/slab.h> #include <linux/string.h> #include <linux/kvm.h> #include <linux/kvm_host.h> #include <linux/highmem.h> #include <linux/log2.h> #include <linux/uaccess.h> #include <linux/sched.h> #include <linux/rwsem.h> #include <linux/vmalloc.h> #include <linux/hugetlb.h> #include <asm/kvm_ppc.h> #include "e500.h" #include "trace_booke.h" #include "timing.h" #include "e500_mmu_host.h" static inline unsigned int gtlb0_get_next_victim( struct kvmppc_vcpu_e500 *vcpu_e500) { unsigned int victim; victim = vcpu_e500->gtlb_nv[0]++; if (unlikely(vcpu_e500->gtlb_nv[0] >= vcpu_e500->gtlb_params[0].ways)) vcpu_e500->gtlb_nv[0] = 0; return victim; } static int tlb0_set_base(gva_t addr, int sets, int ways) { int set_base; set_base = (addr >> PAGE_SHIFT) & (sets - 1); set_base *= ways; return set_base; } static int gtlb0_set_base(struct kvmppc_vcpu_e500 *vcpu_e500, gva_t addr) { return tlb0_set_base(addr, vcpu_e500->gtlb_params[0].sets, vcpu_e500->gtlb_params[0].ways); } static unsigned int get_tlb_esel(struct kvm_vcpu *vcpu, int tlbsel) { struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); int esel = get_tlb_esel_bit(vcpu); if (tlbsel == 0) { esel &= vcpu_e500->gtlb_params[0].ways - 1; esel += gtlb0_set_base(vcpu_e500, vcpu->arch.shared->mas2); } else { esel &= vcpu_e500->gtlb_params[tlbsel].entries - 1; } return esel; } /* Search the guest TLB for a matching entry. */ static int kvmppc_e500_tlb_index(struct kvmppc_vcpu_e500 *vcpu_e500, gva_t eaddr, int tlbsel, unsigned int pid, int as) { int size = vcpu_e500->gtlb_params[tlbsel].entries; unsigned int set_base, offset; int i; if (tlbsel == 0) { set_base = gtlb0_set_base(vcpu_e500, eaddr); size = vcpu_e500->gtlb_params[0].ways; } else { if (eaddr < vcpu_e500->tlb1_min_eaddr || eaddr > vcpu_e500->tlb1_max_eaddr) return -1; set_base = 0; } offset = vcpu_e500->gtlb_offset[tlbsel]; for (i = 0; i < size; i++) { struct kvm_book3e_206_tlb_entry *tlbe = &vcpu_e500->gtlb_arch[offset + set_base + i]; unsigned int tid; if (eaddr < get_tlb_eaddr(tlbe)) continue; if (eaddr > get_tlb_end(tlbe)) continue; tid = get_tlb_tid(tlbe); if (tid && (tid != pid)) continue; if (!get_tlb_v(tlbe)) continue; if (get_tlb_ts(tlbe) != as && as != -1) continue; return set_base + i; } return -1; } static inline void kvmppc_e500_deliver_tlb_miss(struct kvm_vcpu *vcpu, gva_t eaddr, int as) { struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); unsigned int victim, tsized; int tlbsel; /* since we only have two TLBs, only lower bit is used. */ tlbsel = (vcpu->arch.shared->mas4 >> 28) & 0x1; victim = (tlbsel == 0) ? gtlb0_get_next_victim(vcpu_e500) : 0; tsized = (vcpu->arch.shared->mas4 >> 7) & 0x1f; vcpu->arch.shared->mas0 = MAS0_TLBSEL(tlbsel) | MAS0_ESEL(victim) | MAS0_NV(vcpu_e500->gtlb_nv[tlbsel]); vcpu->arch.shared->mas1 = MAS1_VALID | (as ? MAS1_TS : 0) | MAS1_TID(get_tlbmiss_tid(vcpu)) | MAS1_TSIZE(tsized); vcpu->arch.shared->mas2 = (eaddr & MAS2_EPN) | (vcpu->arch.shared->mas4 & MAS2_ATTRIB_MASK); vcpu->arch.shared->mas7_3 &= MAS3_U0 | MAS3_U1 | MAS3_U2 | MAS3_U3; vcpu->arch.shared->mas6 = (vcpu->arch.shared->mas6 & MAS6_SPID1) | (get_cur_pid(vcpu) << 16) | (as ? MAS6_SAS : 0); } static void kvmppc_recalc_tlb1map_range(struct kvmppc_vcpu_e500 *vcpu_e500) { int size = vcpu_e500->gtlb_params[1].entries; unsigned int offset; gva_t eaddr; int i; vcpu_e500->tlb1_min_eaddr = ~0UL; vcpu_e500->tlb1_max_eaddr = 0; offset = vcpu_e500->gtlb_offset[1]; for (i = 0; i < size; i++) { struct kvm_book3e_206_tlb_entry *tlbe = &vcpu_e500->gtlb_arch[offset + i]; if (!get_tlb_v(tlbe)) continue; eaddr = get_tlb_eaddr(tlbe); vcpu_e500->tlb1_min_eaddr = min(vcpu_e500->tlb1_min_eaddr, eaddr); eaddr = get_tlb_end(tlbe); vcpu_e500->tlb1_max_eaddr = max(vcpu_e500->tlb1_max_eaddr, eaddr); } } static int kvmppc_need_recalc_tlb1map_range(struct kvmppc_vcpu_e500 *vcpu_e500, struct kvm_book3e_206_tlb_entry *gtlbe) { unsigned long start, end, size; size = get_tlb_bytes(gtlbe); start = get_tlb_eaddr(gtlbe) & ~(size - 1); end = start + size - 1; return vcpu_e500->tlb1_min_eaddr == start || vcpu_e500->tlb1_max_eaddr == end; } /* This function is supposed to be called for a adding a new valid tlb entry */ static void kvmppc_set_tlb1map_range(struct kvm_vcpu *vcpu, struct kvm_book3e_206_tlb_entry *gtlbe) { unsigned long start, end, size; struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); if (!get_tlb_v(gtlbe)) return; size = get_tlb_bytes(gtlbe); start = get_tlb_eaddr(gtlbe) & ~(size - 1); end = start + size - 1; vcpu_e500->tlb1_min_eaddr = min(vcpu_e500->tlb1_min_eaddr, start); vcpu_e500->tlb1_max_eaddr = max(vcpu_e500->tlb1_max_eaddr, end); } static inline int kvmppc_e500_gtlbe_invalidate( struct kvmppc_vcpu_e500 *vcpu_e500, int tlbsel, int esel) { struct kvm_book3e_206_tlb_entry *gtlbe = get_entry(vcpu_e500, tlbsel, esel); if (unlikely(get_tlb_iprot(gtlbe))) return -1; if (tlbsel == 1 && kvmppc_need_recalc_tlb1map_range(vcpu_e500, gtlbe)) kvmppc_recalc_tlb1map_range(vcpu_e500); gtlbe->mas1 = 0; return 0; } int kvmppc_e500_emul_mt_mmucsr0(struct kvmppc_vcpu_e500 *vcpu_e500, ulong value) { int esel; if (value & MMUCSR0_TLB0FI) for (esel = 0; esel < vcpu_e500->gtlb_params[0].entries; esel++) kvmppc_e500_gtlbe_invalidate(vcpu_e500, 0, esel); if (value & MMUCSR0_TLB1FI) for (esel = 0; esel < vcpu_e500->gtlb_params[1].entries; esel++) kvmppc_e500_gtlbe_invalidate(vcpu_e500, 1, esel); /* Invalidate all host shadow mappings */ kvmppc_core_flush_tlb(&vcpu_e500->vcpu); return EMULATE_DONE; } int kvmppc_e500_emul_tlbivax(struct kvm_vcpu *vcpu, gva_t ea) { struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); unsigned int ia; int esel, tlbsel; ia = (ea >> 2) & 0x1; /* since we only have two TLBs, only lower bit is used. */ tlbsel = (ea >> 3) & 0x1; if (ia) { /* invalidate all entries */ for (esel = 0; esel < vcpu_e500->gtlb_params[tlbsel].entries; esel++) kvmppc_e500_gtlbe_invalidate(vcpu_e500, tlbsel, esel); } else { ea &= 0xfffff000; esel = kvmppc_e500_tlb_index(vcpu_e500, ea, tlbsel, get_cur_pid(vcpu), -1); if (esel >= 0) kvmppc_e500_gtlbe_invalidate(vcpu_e500, tlbsel, esel); } /* Invalidate all host shadow mappings */ kvmppc_core_flush_tlb(&vcpu_e500->vcpu); return EMULATE_DONE; } static void tlbilx_all(struct kvmppc_vcpu_e500 *vcpu_e500, int tlbsel, int pid, int type) { struct kvm_book3e_206_tlb_entry *tlbe; int tid, esel; /* invalidate all entries */ for (esel = 0; esel < vcpu_e500->gtlb_params[tlbsel].entries; esel++) { tlbe = get_entry(vcpu_e500, tlbsel, esel); tid = get_tlb_tid(tlbe); if (type == 0 || tid == pid) { inval_gtlbe_on_host(vcpu_e500, tlbsel, esel); kvmppc_e500_gtlbe_invalidate(vcpu_e500, tlbsel, esel); } } } static void tlbilx_one(struct kvmppc_vcpu_e500 *vcpu_e500, int pid, gva_t ea) { int tlbsel, esel; for (tlbsel = 0; tlbsel < 2; tlbsel++) { esel = kvmppc_e500_tlb_index(vcpu_e500, ea, tlbsel, pid, -1); if (esel >= 0) { inval_gtlbe_on_host(vcpu_e500, tlbsel, esel); kvmppc_e500_gtlbe_invalidate(vcpu_e500, tlbsel, esel); break; } } } int kvmppc_e500_emul_tlbilx(struct kvm_vcpu *vcpu, int type, gva_t ea) { struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); int pid = get_cur_spid(vcpu); if (type == 0 || type == 1) { tlbilx_all(vcpu_e500, 0, pid, type); tlbilx_all(vcpu_e500, 1, pid, type); } else if (type == 3) { tlbilx_one(vcpu_e500, pid, ea); } return EMULATE_DONE; } int kvmppc_e500_emul_tlbre(struct kvm_vcpu *vcpu) { struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); int tlbsel, esel; struct kvm_book3e_206_tlb_entry *gtlbe; tlbsel = get_tlb_tlbsel(vcpu); esel = get_tlb_esel(vcpu, tlbsel); gtlbe = get_entry(vcpu_e500, tlbsel, esel); vcpu->arch.shared->mas0 &= ~MAS0_NV(~0); vcpu->arch.shared->mas0 |= MAS0_NV(vcpu_e500->gtlb_nv[tlbsel]); vcpu->arch.shared->mas1 = gtlbe->mas1; vcpu->arch.shared->mas2 = gtlbe->mas2; vcpu->arch.shared->mas7_3 = gtlbe->mas7_3; return EMULATE_DONE; } int kvmppc_e500_emul_tlbsx(struct kvm_vcpu *vcpu, gva_t ea) { struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); int as = !!get_cur_sas(vcpu); unsigned int pid = get_cur_spid(vcpu); int esel, tlbsel; struct kvm_book3e_206_tlb_entry *gtlbe = NULL; for (tlbsel = 0; tlbsel < 2; tlbsel++) { esel = kvmppc_e500_tlb_index(vcpu_e500, ea, tlbsel, pid, as); if (esel >= 0) { gtlbe = get_entry(vcpu_e500, tlbsel, esel); break; } } if (gtlbe) { esel &= vcpu_e500->gtlb_params[tlbsel].ways - 1; vcpu->arch.shared->mas0 = MAS0_TLBSEL(tlbsel) | MAS0_ESEL(esel) | MAS0_NV(vcpu_e500->gtlb_nv[tlbsel]); vcpu->arch.shared->mas1 = gtlbe->mas1; vcpu->arch.shared->mas2 = gtlbe->mas2; vcpu->arch.shared->mas7_3 = gtlbe->mas7_3; } else { int victim; /* since we only have two TLBs, only lower bit is used. */ tlbsel = vcpu->arch.shared->mas4 >> 28 & 0x1; victim = (tlbsel == 0) ? gtlb0_get_next_victim(vcpu_e500) : 0; vcpu->arch.shared->mas0 = MAS0_TLBSEL(tlbsel) | MAS0_ESEL(victim) | MAS0_NV(vcpu_e500->gtlb_nv[tlbsel]); vcpu->arch.shared->mas1 = (vcpu->arch.shared->mas6 & MAS6_SPID0) | ((vcpu->arch.shared->mas6 & MAS6_SAS) ? MAS1_TS : 0) | (vcpu->arch.shared->mas4 & MAS4_TSIZED(~0)); vcpu->arch.shared->mas2 &= MAS2_EPN; vcpu->arch.shared->mas2 |= vcpu->arch.shared->mas4 & MAS2_ATTRIB_MASK; vcpu->arch.shared->mas7_3 &= MAS3_U0 | MAS3_U1 | MAS3_U2 | MAS3_U3; } kvmppc_set_exit_type(vcpu, EMULATED_TLBSX_EXITS); return EMULATE_DONE; } int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu) { struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); struct kvm_book3e_206_tlb_entry *gtlbe; int tlbsel, esel; int recal = 0; int idx; tlbsel = get_tlb_tlbsel(vcpu); esel = get_tlb_esel(vcpu, tlbsel); gtlbe = get_entry(vcpu_e500, tlbsel, esel); if (get_tlb_v(gtlbe)) { inval_gtlbe_on_host(vcpu_e500, tlbsel, esel); if ((tlbsel == 1) && kvmppc_need_recalc_tlb1map_range(vcpu_e500, gtlbe)) recal = 1; } gtlbe->mas1 = vcpu->arch.shared->mas1; gtlbe->mas2 = vcpu->arch.shared->mas2; if (!(vcpu->arch.shared->msr & MSR_CM)) gtlbe->mas2 &= 0xffffffffUL; gtlbe->mas7_3 = vcpu->arch.shared->mas7_3; trace_kvm_booke206_gtlb_write(vcpu->arch.shared->mas0, gtlbe->mas1, gtlbe->mas2, gtlbe->mas7_3); if (tlbsel == 1) { /* * If a valid tlb1 entry is overwritten then recalculate the * min/max TLB1 map address range otherwise no need to look * in tlb1 array. */ if (recal) kvmppc_recalc_tlb1map_range(vcpu_e500); else kvmppc_set_tlb1map_range(vcpu, gtlbe); } idx = srcu_read_lock(&vcpu->kvm->srcu); /* Invalidate shadow mappings for the about-to-be-clobbered TLBE. */ if (tlbe_is_host_safe(vcpu, gtlbe)) { u64 eaddr = get_tlb_eaddr(gtlbe); u64 raddr = get_tlb_raddr(gtlbe); if (tlbsel == 0) { gtlbe->mas1 &= ~MAS1_TSIZE(~0); gtlbe->mas1 |= MAS1_TSIZE(BOOK3E_PAGESZ_4K); } /* Premap the faulting page */ kvmppc_mmu_map(vcpu, eaddr, raddr, index_of(tlbsel, esel)); } srcu_read_unlock(&vcpu->kvm->srcu, idx); kvmppc_set_exit_type(vcpu, EMULATED_TLBWE_EXITS); return EMULATE_DONE; } static int kvmppc_e500_tlb_search(struct kvm_vcpu *vcpu, gva_t eaddr, unsigned int pid, int as) { struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); int esel, tlbsel; for (tlbsel = 0; tlbsel < 2; tlbsel++) { esel = kvmppc_e500_tlb_index(vcpu_e500, eaddr, tlbsel, pid, as); if (esel >= 0) return index_of(tlbsel, esel); } return -1; } /* 'linear_address' is actually an encoding of AS|PID|EADDR . */ int kvmppc_core_vcpu_translate(struct kvm_vcpu *vcpu, struct kvm_translation *tr) { int index; gva_t eaddr; u8 pid; u8 as; eaddr = tr->linear_address; pid = (tr->linear_address >> 32) & 0xff; as = (tr->linear_address >> 40) & 0x1; index = kvmppc_e500_tlb_search(vcpu, eaddr, pid, as); if (index < 0) { tr->valid = 0; return 0; } tr->physical_address = kvmppc_mmu_xlate(vcpu, index, eaddr); /* XXX what does "writeable" and "usermode" even mean? */ tr->valid = 1; return 0; } int kvmppc_mmu_itlb_index(struct kvm_vcpu *vcpu, gva_t eaddr) { unsigned int as = !!(vcpu->arch.shared->msr & MSR_IS); return kvmppc_e500_tlb_search(vcpu, eaddr, get_cur_pid(vcpu), as); } int kvmppc_mmu_dtlb_index(struct kvm_vcpu *vcpu, gva_t eaddr) { unsigned int as = !!(vcpu->arch.shared->msr & MSR_DS); return kvmppc_e500_tlb_search(vcpu, eaddr, get_cur_pid(vcpu), as); } void kvmppc_mmu_itlb_miss(struct kvm_vcpu *vcpu) { unsigned int as = !!(vcpu->arch.shared->msr & MSR_IS); kvmppc_e500_deliver_tlb_miss(vcpu, vcpu->arch.regs.nip, as); } void kvmppc_mmu_dtlb_miss(struct kvm_vcpu *vcpu) { unsigned int as = !!(vcpu->arch.shared->msr & MSR_DS); kvmppc_e500_deliver_tlb_miss(vcpu, vcpu->arch.fault_dear, as); } gpa_t kvmppc_mmu_xlate(struct kvm_vcpu *vcpu, unsigned int index, gva_t eaddr) { struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); struct kvm_book3e_206_tlb_entry *gtlbe; u64 pgmask; gtlbe = get_entry(vcpu_e500, tlbsel_of(index), esel_of(index)); pgmask = get_tlb_bytes(gtlbe) - 1; return get_tlb_raddr(gtlbe) | (eaddr & pgmask); } /*****************************************/ static void free_gtlb(struct kvmppc_vcpu_e500 *vcpu_e500) { int i; kvmppc_core_flush_tlb(&vcpu_e500->vcpu); kfree(vcpu_e500->g2h_tlb1_map); kfree(vcpu_e500->gtlb_priv[0]); kfree(vcpu_e500->gtlb_priv[1]); if (vcpu_e500->shared_tlb_pages) { vfree((void *)(round_down((uintptr_t)vcpu_e500->gtlb_arch, PAGE_SIZE))); for (i = 0; i < vcpu_e500->num_shared_tlb_pages; i++) { set_page_dirty_lock(vcpu_e500->shared_tlb_pages[i]); put_page(vcpu_e500->shared_tlb_pages[i]); } vcpu_e500->num_shared_tlb_pages = 0; kfree(vcpu_e500->shared_tlb_pages); vcpu_e500->shared_tlb_pages = NULL; } else { kfree(vcpu_e500->gtlb_arch); } vcpu_e500->gtlb_arch = NULL; } void kvmppc_get_sregs_e500_tlb(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) { sregs->u.e.mas0 = vcpu->arch.shared->mas0; sregs->u.e.mas1 = vcpu->arch.shared->mas1; sregs->u.e.mas2 = vcpu->arch.shared->mas2; sregs->u.e.mas7_3 = vcpu->arch.shared->mas7_3; sregs->u.e.mas4 = vcpu->arch.shared->mas4; sregs->u.e.mas6 = vcpu->arch.shared->mas6; sregs->u.e.mmucfg = vcpu->arch.mmucfg; sregs->u.e.tlbcfg[0] = vcpu->arch.tlbcfg[0]; sregs->u.e.tlbcfg[1] = vcpu->arch.tlbcfg[1]; sregs->u.e.tlbcfg[2] = 0; sregs->u.e.tlbcfg[3] = 0; } int kvmppc_set_sregs_e500_tlb(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) { if (sregs->u.e.features & KVM_SREGS_E_ARCH206_MMU) { vcpu->arch.shared->mas0 = sregs->u.e.mas0; vcpu->arch.shared->mas1 = sregs->u.e.mas1; vcpu->arch.shared->mas2 = sregs->u.e.mas2; vcpu->arch.shared->mas7_3 = sregs->u.e.mas7_3; vcpu->arch.shared->mas4 = sregs->u.e.mas4; vcpu->arch.shared->mas6 = sregs->u.e.mas6; } return 0; } int kvmppc_get_one_reg_e500_tlb(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *val) { int r = 0; long int i; switch (id) { case KVM_REG_PPC_MAS0: *val = get_reg_val(id, vcpu->arch.shared->mas0); break; case KVM_REG_PPC_MAS1: *val = get_reg_val(id, vcpu->arch.shared->mas1); break; case KVM_REG_PPC_MAS2: *val = get_reg_val(id, vcpu->arch.shared->mas2); break; case KVM_REG_PPC_MAS7_3: *val = get_reg_val(id, vcpu->arch.shared->mas7_3); break; case KVM_REG_PPC_MAS4: *val = get_reg_val(id, vcpu->arch.shared->mas4); break; case KVM_REG_PPC_MAS6: *val = get_reg_val(id, vcpu->arch.shared->mas6); break; case KVM_REG_PPC_MMUCFG: *val = get_reg_val(id, vcpu->arch.mmucfg); break; case KVM_REG_PPC_EPTCFG: *val = get_reg_val(id, vcpu->arch.eptcfg); break; case KVM_REG_PPC_TLB0CFG: case KVM_REG_PPC_TLB1CFG: case KVM_REG_PPC_TLB2CFG: case KVM_REG_PPC_TLB3CFG: i = id - KVM_REG_PPC_TLB0CFG; *val = get_reg_val(id, vcpu->arch.tlbcfg[i]); break; case KVM_REG_PPC_TLB0PS: case KVM_REG_PPC_TLB1PS: case KVM_REG_PPC_TLB2PS: case KVM_REG_PPC_TLB3PS: i = id - KVM_REG_PPC_TLB0PS; *val = get_reg_val(id, vcpu->arch.tlbps[i]); break; default: r = -EINVAL; break; } return r; } int kvmppc_set_one_reg_e500_tlb(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *val) { int r = 0; long int i; switch (id) { case KVM_REG_PPC_MAS0: vcpu->arch.shared->mas0 = set_reg_val(id, *val); break; case KVM_REG_PPC_MAS1: vcpu->arch.shared->mas1 = set_reg_val(id, *val); break; case KVM_REG_PPC_MAS2: vcpu->arch.shared->mas2 = set_reg_val(id, *val); break; case KVM_REG_PPC_MAS7_3: vcpu->arch.shared->mas7_3 = set_reg_val(id, *val); break; case KVM_REG_PPC_MAS4: vcpu->arch.shared->mas4 = set_reg_val(id, *val); break; case KVM_REG_PPC_MAS6: vcpu->arch.shared->mas6 = set_reg_val(id, *val); break; /* Only allow MMU registers to be set to the config supported by KVM */ case KVM_REG_PPC_MMUCFG: { u32 reg = set_reg_val(id, *val); if (reg != vcpu->arch.mmucfg) r = -EINVAL; break; } case KVM_REG_PPC_EPTCFG: { u32 reg = set_reg_val(id, *val); if (reg != vcpu->arch.eptcfg) r = -EINVAL; break; } case KVM_REG_PPC_TLB0CFG: case KVM_REG_PPC_TLB1CFG: case KVM_REG_PPC_TLB2CFG: case KVM_REG_PPC_TLB3CFG: { /* MMU geometry (N_ENTRY/ASSOC) can be set only using SW_TLB */ u32 reg = set_reg_val(id, *val); i = id - KVM_REG_PPC_TLB0CFG; if (reg != vcpu->arch.tlbcfg[i]) r = -EINVAL; break; } case KVM_REG_PPC_TLB0PS: case KVM_REG_PPC_TLB1PS: case KVM_REG_PPC_TLB2PS: case KVM_REG_PPC_TLB3PS: { u32 reg = set_reg_val(id, *val); i = id - KVM_REG_PPC_TLB0PS; if (reg != vcpu->arch.tlbps[i]) r = -EINVAL; break; } default: r = -EINVAL; break; } return r; } static int vcpu_mmu_geometry_update(struct kvm_vcpu *vcpu, struct kvm_book3e_206_tlb_params *params) { vcpu->arch.tlbcfg[0] &= ~(TLBnCFG_N_ENTRY | TLBnCFG_ASSOC); if (params->tlb_sizes[0] <= 2048) vcpu->arch.tlbcfg[0] |= params->tlb_sizes[0]; vcpu->arch.tlbcfg[0] |= params->tlb_ways[0] << TLBnCFG_ASSOC_SHIFT; vcpu->arch.tlbcfg[1] &= ~(TLBnCFG_N_ENTRY | TLBnCFG_ASSOC); vcpu->arch.tlbcfg[1] |= params->tlb_sizes[1]; vcpu->arch.tlbcfg[1] |= params->tlb_ways[1] << TLBnCFG_ASSOC_SHIFT; return 0; } int kvm_vcpu_ioctl_config_tlb(struct kvm_vcpu *vcpu, struct kvm_config_tlb *cfg) { struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); struct kvm_book3e_206_tlb_params params; char *virt; struct page **pages; struct tlbe_priv *privs[2] = {}; u64 *g2h_bitmap; size_t array_len; u32 sets; int num_pages, ret, i; if (cfg->mmu_type != KVM_MMU_FSL_BOOKE_NOHV) return -EINVAL; if (copy_from_user(&params, (void __user *)(uintptr_t)cfg->params, sizeof(params))) return -EFAULT; if (params.tlb_sizes[1] > 64) return -EINVAL; if (params.tlb_ways[1] != params.tlb_sizes[1]) return -EINVAL; if (params.tlb_sizes[2] != 0 || params.tlb_sizes[3] != 0) return -EINVAL; if (params.tlb_ways[2] != 0 || params.tlb_ways[3] != 0) return -EINVAL; if (!is_power_of_2(params.tlb_ways[0])) return -EINVAL; sets = params.tlb_sizes[0] >> ilog2(params.tlb_ways[0]); if (!is_power_of_2(sets)) return -EINVAL; array_len = params.tlb_sizes[0] + params.tlb_sizes[1]; array_len *= sizeof(struct kvm_book3e_206_tlb_entry); if (cfg->array_len < array_len) return -EINVAL; num_pages = DIV_ROUND_UP(cfg->array + array_len - 1, PAGE_SIZE) - cfg->array / PAGE_SIZE; pages = kmalloc_array(num_pages, sizeof(*pages), GFP_KERNEL); if (!pages) return -ENOMEM; ret = get_user_pages_fast(cfg->array, num_pages, FOLL_WRITE, pages); if (ret < 0) goto free_pages; if (ret != num_pages) { num_pages = ret; ret = -EFAULT; goto put_pages; } virt = vmap(pages, num_pages, VM_MAP, PAGE_KERNEL); if (!virt) { ret = -ENOMEM; goto put_pages; } privs[0] = kcalloc(params.tlb_sizes[0], sizeof(*privs[0]), GFP_KERNEL); if (!privs[0]) { ret = -ENOMEM; goto put_pages; } privs[1] = kcalloc(params.tlb_sizes[1], sizeof(*privs[1]), GFP_KERNEL); if (!privs[1]) { ret = -ENOMEM; goto free_privs_first; } g2h_bitmap = kcalloc(params.tlb_sizes[1], sizeof(*g2h_bitmap), GFP_KERNEL); if (!g2h_bitmap) { ret = -ENOMEM; goto free_privs_second; } free_gtlb(vcpu_e500); vcpu_e500->gtlb_priv[0] = privs[0]; vcpu_e500->gtlb_priv[1] = privs[1]; vcpu_e500->g2h_tlb1_map = g2h_bitmap; vcpu_e500->gtlb_arch = (struct kvm_book3e_206_tlb_entry *) (virt + (cfg->array & (PAGE_SIZE - 1))); vcpu_e500->gtlb_params[0].entries = params.tlb_sizes[0]; vcpu_e500->gtlb_params[1].entries = params.tlb_sizes[1]; vcpu_e500->gtlb_offset[0] = 0; vcpu_e500->gtlb_offset[1] = params.tlb_sizes[0]; /* Update vcpu's MMU geometry based on SW_TLB input */ vcpu_mmu_geometry_update(vcpu, &params); vcpu_e500->shared_tlb_pages = pages; vcpu_e500->num_shared_tlb_pages = num_pages; vcpu_e500->gtlb_params[0].ways = params.tlb_ways[0]; vcpu_e500->gtlb_params[0].sets = sets; vcpu_e500->gtlb_params[1].ways = params.tlb_sizes[1]; vcpu_e500->gtlb_params[1].sets = 1; kvmppc_recalc_tlb1map_range(vcpu_e500); return 0; free_privs_second: kfree(privs[1]); free_privs_first: kfree(privs[0]); put_pages: for (i = 0; i < num_pages; i++) put_page(pages[i]); free_pages: kfree(pages); return ret; } int kvm_vcpu_ioctl_dirty_tlb(struct kvm_vcpu *vcpu, struct kvm_dirty_tlb *dirty) { struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); kvmppc_recalc_tlb1map_range(vcpu_e500); kvmppc_core_flush_tlb(vcpu); return 0; } /* Vcpu's MMU default configuration */ static int vcpu_mmu_init(struct kvm_vcpu *vcpu, struct kvmppc_e500_tlb_params *params) { /* Initialize RASIZE, PIDSIZE, NTLBS and MAVN fields with host values*/ vcpu->arch.mmucfg = mfspr(SPRN_MMUCFG) & ~MMUCFG_LPIDSIZE; /* Initialize TLBnCFG fields with host values and SW_TLB geometry*/ vcpu->arch.tlbcfg[0] = mfspr(SPRN_TLB0CFG) & ~(TLBnCFG_N_ENTRY | TLBnCFG_ASSOC); vcpu->arch.tlbcfg[0] |= params[0].entries; vcpu->arch.tlbcfg[0] |= params[0].ways << TLBnCFG_ASSOC_SHIFT; vcpu->arch.tlbcfg[1] = mfspr(SPRN_TLB1CFG) & ~(TLBnCFG_N_ENTRY | TLBnCFG_ASSOC); vcpu->arch.tlbcfg[1] |= params[1].entries; vcpu->arch.tlbcfg[1] |= params[1].ways << TLBnCFG_ASSOC_SHIFT; if (has_feature(vcpu, VCPU_FTR_MMU_V2)) { vcpu->arch.tlbps[0] = mfspr(SPRN_TLB0PS); vcpu->arch.tlbps[1] = mfspr(SPRN_TLB1PS); vcpu->arch.mmucfg &= ~MMUCFG_LRAT; /* Guest mmu emulation currently doesn't handle E.PT */ vcpu->arch.eptcfg = 0; vcpu->arch.tlbcfg[0] &= ~TLBnCFG_PT; vcpu->arch.tlbcfg[1] &= ~TLBnCFG_IND; } return 0; } int kvmppc_e500_tlb_init(struct kvmppc_vcpu_e500 *vcpu_e500) { struct kvm_vcpu *vcpu = &vcpu_e500->vcpu; if (e500_mmu_host_init(vcpu_e500)) goto free_vcpu; vcpu_e500->gtlb_params[0].entries = KVM_E500_TLB0_SIZE; vcpu_e500->gtlb_params[1].entries = KVM_E500_TLB1_SIZE; vcpu_e500->gtlb_params[0].ways = KVM_E500_TLB0_WAY_NUM; vcpu_e500->gtlb_params[0].sets = KVM_E500_TLB0_SIZE / KVM_E500_TLB0_WAY_NUM; vcpu_e500->gtlb_params[1].ways = KVM_E500_TLB1_SIZE; vcpu_e500->gtlb_params[1].sets = 1; vcpu_e500->gtlb_arch = kmalloc_array(KVM_E500_TLB0_SIZE + KVM_E500_TLB1_SIZE, sizeof(*vcpu_e500->gtlb_arch), GFP_KERNEL); if (!vcpu_e500->gtlb_arch) return -ENOMEM; vcpu_e500->gtlb_offset[0] = 0; vcpu_e500->gtlb_offset[1] = KVM_E500_TLB0_SIZE; vcpu_e500->gtlb_priv[0] = kcalloc(vcpu_e500->gtlb_params[0].entries, sizeof(struct tlbe_ref), GFP_KERNEL); if (!vcpu_e500->gtlb_priv[0]) goto free_vcpu; vcpu_e500->gtlb_priv[1] = kcalloc(vcpu_e500->gtlb_params[1].entries, sizeof(struct tlbe_ref), GFP_KERNEL); if (!vcpu_e500->gtlb_priv[1]) goto free_vcpu; vcpu_e500->g2h_tlb1_map = kcalloc(vcpu_e500->gtlb_params[1].entries, sizeof(*vcpu_e500->g2h_tlb1_map), GFP_KERNEL); if (!vcpu_e500->g2h_tlb1_map) goto free_vcpu; vcpu_mmu_init(vcpu, vcpu_e500->gtlb_params); kvmppc_recalc_tlb1map_range(vcpu_e500); return 0; free_vcpu: free_gtlb(vcpu_e500); return -1; } void kvmppc_e500_tlb_uninit(struct kvmppc_vcpu_e500 *vcpu_e500) { free_gtlb(vcpu_e500); e500_mmu_host_uninit(vcpu_e500); }
linux-master
arch/powerpc/kvm/e500_mmu.c