python_code
stringlengths
0
1.8M
repo_name
stringclasses
7 values
file_path
stringlengths
5
99
// SPDX-License-Identifier: GPL-2.0-or-later /* * Intel CPU Microcode Update Driver for Linux * * Copyright (C) 2000-2006 Tigran Aivazian <[email protected]> * 2006 Shaohua Li <[email protected]> * * Intel CPU microcode early update for Linux * * Copyright (C) 2012 Fenghua Yu <[email protected]> * H Peter Anvin" <[email protected]> */ #define pr_fmt(fmt) "microcode: " fmt #include <linux/earlycpio.h> #include <linux/firmware.h> #include <linux/uaccess.h> #include <linux/vmalloc.h> #include <linux/initrd.h> #include <linux/kernel.h> #include <linux/slab.h> #include <linux/cpu.h> #include <linux/uio.h> #include <linux/mm.h> #include <asm/intel-family.h> #include <asm/processor.h> #include <asm/tlbflush.h> #include <asm/setup.h> #include <asm/msr.h> #include "internal.h" static const char ucode_path[] = "kernel/x86/microcode/GenuineIntel.bin"; /* Current microcode patch used in early patching on the APs. */ static struct microcode_intel *intel_ucode_patch; /* last level cache size per core */ static int llc_size_per_core; /* microcode format is extended from prescott processors */ struct extended_signature { unsigned int sig; unsigned int pf; unsigned int cksum; }; struct extended_sigtable { unsigned int count; unsigned int cksum; unsigned int reserved[3]; struct extended_signature sigs[]; }; #define DEFAULT_UCODE_TOTALSIZE (DEFAULT_UCODE_DATASIZE + MC_HEADER_SIZE) #define EXT_HEADER_SIZE (sizeof(struct extended_sigtable)) #define EXT_SIGNATURE_SIZE (sizeof(struct extended_signature)) static inline unsigned int get_totalsize(struct microcode_header_intel *hdr) { return hdr->datasize ? hdr->totalsize : DEFAULT_UCODE_TOTALSIZE; } static inline unsigned int exttable_size(struct extended_sigtable *et) { return et->count * EXT_SIGNATURE_SIZE + EXT_HEADER_SIZE; } int intel_cpu_collect_info(struct ucode_cpu_info *uci) { unsigned int val[2]; unsigned int family, model; struct cpu_signature csig = { 0 }; unsigned int eax, ebx, ecx, edx; memset(uci, 0, sizeof(*uci)); eax = 0x00000001; ecx = 0; native_cpuid(&eax, &ebx, &ecx, &edx); csig.sig = eax; family = x86_family(eax); model = x86_model(eax); if (model >= 5 || family > 6) { /* get processor flags from MSR 0x17 */ native_rdmsr(MSR_IA32_PLATFORM_ID, val[0], val[1]); csig.pf = 1 << ((val[1] >> 18) & 7); } csig.rev = intel_get_microcode_revision(); uci->cpu_sig = csig; return 0; } EXPORT_SYMBOL_GPL(intel_cpu_collect_info); /* * Returns 1 if update has been found, 0 otherwise. */ int intel_find_matching_signature(void *mc, unsigned int csig, int cpf) { struct microcode_header_intel *mc_hdr = mc; struct extended_sigtable *ext_hdr; struct extended_signature *ext_sig; int i; if (intel_cpu_signatures_match(csig, cpf, mc_hdr->sig, mc_hdr->pf)) return 1; /* Look for ext. headers: */ if (get_totalsize(mc_hdr) <= intel_microcode_get_datasize(mc_hdr) + MC_HEADER_SIZE) return 0; ext_hdr = mc + intel_microcode_get_datasize(mc_hdr) + MC_HEADER_SIZE; ext_sig = (void *)ext_hdr + EXT_HEADER_SIZE; for (i = 0; i < ext_hdr->count; i++) { if (intel_cpu_signatures_match(csig, cpf, ext_sig->sig, ext_sig->pf)) return 1; ext_sig++; } return 0; } EXPORT_SYMBOL_GPL(intel_find_matching_signature); /** * intel_microcode_sanity_check() - Sanity check microcode file. * @mc: Pointer to the microcode file contents. * @print_err: Display failure reason if true, silent if false. * @hdr_type: Type of file, i.e. normal microcode file or In Field Scan file. * Validate if the microcode header type matches with the type * specified here. * * Validate certain header fields and verify if computed checksum matches * with the one specified in the header. * * Return: 0 if the file passes all the checks, -EINVAL if any of the checks * fail. */ int intel_microcode_sanity_check(void *mc, bool print_err, int hdr_type) { unsigned long total_size, data_size, ext_table_size; struct microcode_header_intel *mc_header = mc; struct extended_sigtable *ext_header = NULL; u32 sum, orig_sum, ext_sigcount = 0, i; struct extended_signature *ext_sig; total_size = get_totalsize(mc_header); data_size = intel_microcode_get_datasize(mc_header); if (data_size + MC_HEADER_SIZE > total_size) { if (print_err) pr_err("Error: bad microcode data file size.\n"); return -EINVAL; } if (mc_header->ldrver != 1 || mc_header->hdrver != hdr_type) { if (print_err) pr_err("Error: invalid/unknown microcode update format. Header type %d\n", mc_header->hdrver); return -EINVAL; } ext_table_size = total_size - (MC_HEADER_SIZE + data_size); if (ext_table_size) { u32 ext_table_sum = 0; u32 *ext_tablep; if (ext_table_size < EXT_HEADER_SIZE || ((ext_table_size - EXT_HEADER_SIZE) % EXT_SIGNATURE_SIZE)) { if (print_err) pr_err("Error: truncated extended signature table.\n"); return -EINVAL; } ext_header = mc + MC_HEADER_SIZE + data_size; if (ext_table_size != exttable_size(ext_header)) { if (print_err) pr_err("Error: extended signature table size mismatch.\n"); return -EFAULT; } ext_sigcount = ext_header->count; /* * Check extended table checksum: the sum of all dwords that * comprise a valid table must be 0. */ ext_tablep = (u32 *)ext_header; i = ext_table_size / sizeof(u32); while (i--) ext_table_sum += ext_tablep[i]; if (ext_table_sum) { if (print_err) pr_warn("Bad extended signature table checksum, aborting.\n"); return -EINVAL; } } /* * Calculate the checksum of update data and header. The checksum of * valid update data and header including the extended signature table * must be 0. */ orig_sum = 0; i = (MC_HEADER_SIZE + data_size) / sizeof(u32); while (i--) orig_sum += ((u32 *)mc)[i]; if (orig_sum) { if (print_err) pr_err("Bad microcode data checksum, aborting.\n"); return -EINVAL; } if (!ext_table_size) return 0; /* * Check extended signature checksum: 0 => valid. */ for (i = 0; i < ext_sigcount; i++) { ext_sig = (void *)ext_header + EXT_HEADER_SIZE + EXT_SIGNATURE_SIZE * i; sum = (mc_header->sig + mc_header->pf + mc_header->cksum) - (ext_sig->sig + ext_sig->pf + ext_sig->cksum); if (sum) { if (print_err) pr_err("Bad extended signature checksum, aborting.\n"); return -EINVAL; } } return 0; } EXPORT_SYMBOL_GPL(intel_microcode_sanity_check); /* * Returns 1 if update has been found, 0 otherwise. */ static int has_newer_microcode(void *mc, unsigned int csig, int cpf, int new_rev) { struct microcode_header_intel *mc_hdr = mc; if (mc_hdr->rev <= new_rev) return 0; return intel_find_matching_signature(mc, csig, cpf); } static struct ucode_patch *memdup_patch(void *data, unsigned int size) { struct ucode_patch *p; p = kzalloc(sizeof(struct ucode_patch), GFP_KERNEL); if (!p) return NULL; p->data = kmemdup(data, size, GFP_KERNEL); if (!p->data) { kfree(p); return NULL; } return p; } static void save_microcode_patch(struct ucode_cpu_info *uci, void *data, unsigned int size) { struct microcode_header_intel *mc_hdr, *mc_saved_hdr; struct ucode_patch *iter, *tmp, *p = NULL; bool prev_found = false; unsigned int sig, pf; mc_hdr = (struct microcode_header_intel *)data; list_for_each_entry_safe(iter, tmp, &microcode_cache, plist) { mc_saved_hdr = (struct microcode_header_intel *)iter->data; sig = mc_saved_hdr->sig; pf = mc_saved_hdr->pf; if (intel_find_matching_signature(data, sig, pf)) { prev_found = true; if (mc_hdr->rev <= mc_saved_hdr->rev) continue; p = memdup_patch(data, size); if (!p) pr_err("Error allocating buffer %p\n", data); else { list_replace(&iter->plist, &p->plist); kfree(iter->data); kfree(iter); } } } /* * There weren't any previous patches found in the list cache; save the * newly found. */ if (!prev_found) { p = memdup_patch(data, size); if (!p) pr_err("Error allocating buffer for %p\n", data); else list_add_tail(&p->plist, &microcode_cache); } if (!p) return; if (!intel_find_matching_signature(p->data, uci->cpu_sig.sig, uci->cpu_sig.pf)) return; /* * Save for early loading. On 32-bit, that needs to be a physical * address as the APs are running from physical addresses, before * paging has been enabled. */ if (IS_ENABLED(CONFIG_X86_32)) intel_ucode_patch = (struct microcode_intel *)__pa_nodebug(p->data); else intel_ucode_patch = p->data; } /* * Get microcode matching with BSP's model. Only CPUs with the same model as * BSP can stay in the platform. */ static struct microcode_intel * scan_microcode(void *data, size_t size, struct ucode_cpu_info *uci, bool save) { struct microcode_header_intel *mc_header; struct microcode_intel *patch = NULL; unsigned int mc_size; while (size) { if (size < sizeof(struct microcode_header_intel)) break; mc_header = (struct microcode_header_intel *)data; mc_size = get_totalsize(mc_header); if (!mc_size || mc_size > size || intel_microcode_sanity_check(data, false, MC_HEADER_TYPE_MICROCODE) < 0) break; size -= mc_size; if (!intel_find_matching_signature(data, uci->cpu_sig.sig, uci->cpu_sig.pf)) { data += mc_size; continue; } if (save) { save_microcode_patch(uci, data, mc_size); goto next; } if (!patch) { if (!has_newer_microcode(data, uci->cpu_sig.sig, uci->cpu_sig.pf, uci->cpu_sig.rev)) goto next; } else { struct microcode_header_intel *phdr = &patch->hdr; if (!has_newer_microcode(data, phdr->sig, phdr->pf, phdr->rev)) goto next; } /* We have a newer patch, save it. */ patch = data; next: data += mc_size; } if (size) return NULL; return patch; } static bool load_builtin_intel_microcode(struct cpio_data *cp) { unsigned int eax = 1, ebx, ecx = 0, edx; struct firmware fw; char name[30]; if (IS_ENABLED(CONFIG_X86_32)) return false; native_cpuid(&eax, &ebx, &ecx, &edx); sprintf(name, "intel-ucode/%02x-%02x-%02x", x86_family(eax), x86_model(eax), x86_stepping(eax)); if (firmware_request_builtin(&fw, name)) { cp->size = fw.size; cp->data = (void *)fw.data; return true; } return false; } static void print_ucode_info(int old_rev, int new_rev, unsigned int date) { pr_info_once("updated early: 0x%x -> 0x%x, date = %04x-%02x-%02x\n", old_rev, new_rev, date & 0xffff, date >> 24, (date >> 16) & 0xff); } #ifdef CONFIG_X86_32 static int delay_ucode_info; static int current_mc_date; static int early_old_rev; /* * Print early updated ucode info after printk works. This is delayed info dump. */ void show_ucode_info_early(void) { struct ucode_cpu_info uci; if (delay_ucode_info) { intel_cpu_collect_info(&uci); print_ucode_info(early_old_rev, uci.cpu_sig.rev, current_mc_date); delay_ucode_info = 0; } } /* * At this point, we can not call printk() yet. Delay printing microcode info in * show_ucode_info_early() until printk() works. */ static void print_ucode(int old_rev, int new_rev, int date) { int *delay_ucode_info_p; int *current_mc_date_p; int *early_old_rev_p; delay_ucode_info_p = (int *)__pa_nodebug(&delay_ucode_info); current_mc_date_p = (int *)__pa_nodebug(&current_mc_date); early_old_rev_p = (int *)__pa_nodebug(&early_old_rev); *delay_ucode_info_p = 1; *current_mc_date_p = date; *early_old_rev_p = old_rev; } #else static inline void print_ucode(int old_rev, int new_rev, int date) { print_ucode_info(old_rev, new_rev, date); } #endif static int apply_microcode_early(struct ucode_cpu_info *uci, bool early) { struct microcode_intel *mc; u32 rev, old_rev; mc = uci->mc; if (!mc) return 0; /* * Save us the MSR write below - which is a particular expensive * operation - when the other hyperthread has updated the microcode * already. */ rev = intel_get_microcode_revision(); if (rev >= mc->hdr.rev) { uci->cpu_sig.rev = rev; return UCODE_OK; } old_rev = rev; /* * Writeback and invalidate caches before updating microcode to avoid * internal issues depending on what the microcode is updating. */ native_wbinvd(); /* write microcode via MSR 0x79 */ native_wrmsrl(MSR_IA32_UCODE_WRITE, (unsigned long)mc->bits); rev = intel_get_microcode_revision(); if (rev != mc->hdr.rev) return -1; uci->cpu_sig.rev = rev; if (early) print_ucode(old_rev, uci->cpu_sig.rev, mc->hdr.date); else print_ucode_info(old_rev, uci->cpu_sig.rev, mc->hdr.date); return 0; } int __init save_microcode_in_initrd_intel(void) { struct ucode_cpu_info uci; struct cpio_data cp; /* * initrd is going away, clear patch ptr. We will scan the microcode one * last time before jettisoning and save a patch, if found. Then we will * update that pointer too, with a stable patch address to use when * resuming the cores. */ intel_ucode_patch = NULL; if (!load_builtin_intel_microcode(&cp)) cp = find_microcode_in_initrd(ucode_path, false); if (!(cp.data && cp.size)) return 0; intel_cpu_collect_info(&uci); scan_microcode(cp.data, cp.size, &uci, true); return 0; } /* * @res_patch, output: a pointer to the patch we found. */ static struct microcode_intel *__load_ucode_intel(struct ucode_cpu_info *uci) { static const char *path; struct cpio_data cp; bool use_pa; if (IS_ENABLED(CONFIG_X86_32)) { path = (const char *)__pa_nodebug(ucode_path); use_pa = true; } else { path = ucode_path; use_pa = false; } /* try built-in microcode first */ if (!load_builtin_intel_microcode(&cp)) cp = find_microcode_in_initrd(path, use_pa); if (!(cp.data && cp.size)) return NULL; intel_cpu_collect_info(uci); return scan_microcode(cp.data, cp.size, uci, false); } void __init load_ucode_intel_bsp(void) { struct microcode_intel *patch; struct ucode_cpu_info uci; patch = __load_ucode_intel(&uci); if (!patch) return; uci.mc = patch; apply_microcode_early(&uci, true); } void load_ucode_intel_ap(void) { struct microcode_intel *patch, **iup; struct ucode_cpu_info uci; if (IS_ENABLED(CONFIG_X86_32)) iup = (struct microcode_intel **) __pa_nodebug(&intel_ucode_patch); else iup = &intel_ucode_patch; if (!*iup) { patch = __load_ucode_intel(&uci); if (!patch) return; *iup = patch; } uci.mc = *iup; apply_microcode_early(&uci, true); } static struct microcode_intel *find_patch(struct ucode_cpu_info *uci) { struct microcode_header_intel *phdr; struct ucode_patch *iter, *tmp; list_for_each_entry_safe(iter, tmp, &microcode_cache, plist) { phdr = (struct microcode_header_intel *)iter->data; if (phdr->rev <= uci->cpu_sig.rev) continue; if (!intel_find_matching_signature(phdr, uci->cpu_sig.sig, uci->cpu_sig.pf)) continue; return iter->data; } return NULL; } void reload_ucode_intel(void) { struct microcode_intel *p; struct ucode_cpu_info uci; intel_cpu_collect_info(&uci); p = find_patch(&uci); if (!p) return; uci.mc = p; apply_microcode_early(&uci, false); } static int collect_cpu_info(int cpu_num, struct cpu_signature *csig) { struct cpuinfo_x86 *c = &cpu_data(cpu_num); unsigned int val[2]; memset(csig, 0, sizeof(*csig)); csig->sig = cpuid_eax(0x00000001); if ((c->x86_model >= 5) || (c->x86 > 6)) { /* get processor flags from MSR 0x17 */ rdmsr(MSR_IA32_PLATFORM_ID, val[0], val[1]); csig->pf = 1 << ((val[1] >> 18) & 7); } csig->rev = c->microcode; return 0; } static enum ucode_state apply_microcode_intel(int cpu) { struct ucode_cpu_info *uci = ucode_cpu_info + cpu; struct cpuinfo_x86 *c = &cpu_data(cpu); bool bsp = c->cpu_index == boot_cpu_data.cpu_index; struct microcode_intel *mc; enum ucode_state ret; static int prev_rev; u32 rev; /* We should bind the task to the CPU */ if (WARN_ON(raw_smp_processor_id() != cpu)) return UCODE_ERROR; /* Look for a newer patch in our cache: */ mc = find_patch(uci); if (!mc) { mc = uci->mc; if (!mc) return UCODE_NFOUND; } /* * Save us the MSR write below - which is a particular expensive * operation - when the other hyperthread has updated the microcode * already. */ rev = intel_get_microcode_revision(); if (rev >= mc->hdr.rev) { ret = UCODE_OK; goto out; } /* * Writeback and invalidate caches before updating microcode to avoid * internal issues depending on what the microcode is updating. */ native_wbinvd(); /* write microcode via MSR 0x79 */ wrmsrl(MSR_IA32_UCODE_WRITE, (unsigned long)mc->bits); rev = intel_get_microcode_revision(); if (rev != mc->hdr.rev) { pr_err("CPU%d update to revision 0x%x failed\n", cpu, mc->hdr.rev); return UCODE_ERROR; } if (bsp && rev != prev_rev) { pr_info("updated to revision 0x%x, date = %04x-%02x-%02x\n", rev, mc->hdr.date & 0xffff, mc->hdr.date >> 24, (mc->hdr.date >> 16) & 0xff); prev_rev = rev; } ret = UCODE_UPDATED; out: uci->cpu_sig.rev = rev; c->microcode = rev; /* Update boot_cpu_data's revision too, if we're on the BSP: */ if (bsp) boot_cpu_data.microcode = rev; return ret; } static enum ucode_state generic_load_microcode(int cpu, struct iov_iter *iter) { struct ucode_cpu_info *uci = ucode_cpu_info + cpu; unsigned int curr_mc_size = 0, new_mc_size = 0; enum ucode_state ret = UCODE_OK; int new_rev = uci->cpu_sig.rev; u8 *new_mc = NULL, *mc = NULL; unsigned int csig, cpf; while (iov_iter_count(iter)) { struct microcode_header_intel mc_header; unsigned int mc_size, data_size; u8 *data; if (!copy_from_iter_full(&mc_header, sizeof(mc_header), iter)) { pr_err("error! Truncated or inaccessible header in microcode data file\n"); break; } mc_size = get_totalsize(&mc_header); if (mc_size < sizeof(mc_header)) { pr_err("error! Bad data in microcode data file (totalsize too small)\n"); break; } data_size = mc_size - sizeof(mc_header); if (data_size > iov_iter_count(iter)) { pr_err("error! Bad data in microcode data file (truncated file?)\n"); break; } /* For performance reasons, reuse mc area when possible */ if (!mc || mc_size > curr_mc_size) { vfree(mc); mc = vmalloc(mc_size); if (!mc) break; curr_mc_size = mc_size; } memcpy(mc, &mc_header, sizeof(mc_header)); data = mc + sizeof(mc_header); if (!copy_from_iter_full(data, data_size, iter) || intel_microcode_sanity_check(mc, true, MC_HEADER_TYPE_MICROCODE) < 0) { break; } csig = uci->cpu_sig.sig; cpf = uci->cpu_sig.pf; if (has_newer_microcode(mc, csig, cpf, new_rev)) { vfree(new_mc); new_rev = mc_header.rev; new_mc = mc; new_mc_size = mc_size; mc = NULL; /* trigger new vmalloc */ ret = UCODE_NEW; } } vfree(mc); if (iov_iter_count(iter)) { vfree(new_mc); return UCODE_ERROR; } if (!new_mc) return UCODE_NFOUND; vfree(uci->mc); uci->mc = (struct microcode_intel *)new_mc; /* Save for CPU hotplug */ save_microcode_patch(uci, new_mc, new_mc_size); pr_debug("CPU%d found a matching microcode update with version 0x%x (current=0x%x)\n", cpu, new_rev, uci->cpu_sig.rev); return ret; } static bool is_blacklisted(unsigned int cpu) { struct cpuinfo_x86 *c = &cpu_data(cpu); /* * Late loading on model 79 with microcode revision less than 0x0b000021 * and LLC size per core bigger than 2.5MB may result in a system hang. * This behavior is documented in item BDF90, #334165 (Intel Xeon * Processor E7-8800/4800 v4 Product Family). */ if (c->x86 == 6 && c->x86_model == INTEL_FAM6_BROADWELL_X && c->x86_stepping == 0x01 && llc_size_per_core > 2621440 && c->microcode < 0x0b000021) { pr_err_once("Erratum BDF90: late loading with revision < 0x0b000021 (0x%x) disabled.\n", c->microcode); pr_err_once("Please consider either early loading through initrd/built-in or a potential BIOS update.\n"); return true; } return false; } static enum ucode_state request_microcode_fw(int cpu, struct device *device) { struct cpuinfo_x86 *c = &cpu_data(cpu); const struct firmware *firmware; struct iov_iter iter; enum ucode_state ret; struct kvec kvec; char name[30]; if (is_blacklisted(cpu)) return UCODE_NFOUND; sprintf(name, "intel-ucode/%02x-%02x-%02x", c->x86, c->x86_model, c->x86_stepping); if (request_firmware_direct(&firmware, name, device)) { pr_debug("data file %s load failed\n", name); return UCODE_NFOUND; } kvec.iov_base = (void *)firmware->data; kvec.iov_len = firmware->size; iov_iter_kvec(&iter, ITER_SOURCE, &kvec, 1, firmware->size); ret = generic_load_microcode(cpu, &iter); release_firmware(firmware); return ret; } static struct microcode_ops microcode_intel_ops = { .request_microcode_fw = request_microcode_fw, .collect_cpu_info = collect_cpu_info, .apply_microcode = apply_microcode_intel, }; static int __init calc_llc_size_per_core(struct cpuinfo_x86 *c) { u64 llc_size = c->x86_cache_size * 1024ULL; do_div(llc_size, c->x86_max_cores); return (int)llc_size; } struct microcode_ops * __init init_intel_microcode(void) { struct cpuinfo_x86 *c = &boot_cpu_data; if (c->x86_vendor != X86_VENDOR_INTEL || c->x86 < 6 || cpu_has(c, X86_FEATURE_IA64)) { pr_err("Intel CPU family 0x%x not supported\n", c->x86); return NULL; } llc_size_per_core = calc_llc_size_per_core(c); return &microcode_intel_ops; }
linux-master
arch/x86/kernel/cpu/microcode/intel.c
// SPDX-License-Identifier: GPL-2.0-only /* * AMD CPU Microcode Update Driver for Linux * * This driver allows to upgrade microcode on F10h AMD * CPUs and later. * * Copyright (C) 2008-2011 Advanced Micro Devices Inc. * 2013-2018 Borislav Petkov <[email protected]> * * Author: Peter Oruba <[email protected]> * * Based on work by: * Tigran Aivazian <[email protected]> * * early loader: * Copyright (C) 2013 Advanced Micro Devices, Inc. * * Author: Jacob Shin <[email protected]> * Fixes: Borislav Petkov <[email protected]> */ #define pr_fmt(fmt) "microcode: " fmt #include <linux/earlycpio.h> #include <linux/firmware.h> #include <linux/uaccess.h> #include <linux/vmalloc.h> #include <linux/initrd.h> #include <linux/kernel.h> #include <linux/pci.h> #include <asm/microcode.h> #include <asm/processor.h> #include <asm/setup.h> #include <asm/cpu.h> #include <asm/msr.h> #include "internal.h" #define UCODE_MAGIC 0x00414d44 #define UCODE_EQUIV_CPU_TABLE_TYPE 0x00000000 #define UCODE_UCODE_TYPE 0x00000001 #define SECTION_HDR_SIZE 8 #define CONTAINER_HDR_SZ 12 struct equiv_cpu_entry { u32 installed_cpu; u32 fixed_errata_mask; u32 fixed_errata_compare; u16 equiv_cpu; u16 res; } __packed; struct microcode_header_amd { u32 data_code; u32 patch_id; u16 mc_patch_data_id; u8 mc_patch_data_len; u8 init_flag; u32 mc_patch_data_checksum; u32 nb_dev_id; u32 sb_dev_id; u16 processor_rev_id; u8 nb_rev_id; u8 sb_rev_id; u8 bios_api_rev; u8 reserved1[3]; u32 match_reg[8]; } __packed; struct microcode_amd { struct microcode_header_amd hdr; unsigned int mpb[]; }; #define PATCH_MAX_SIZE (3 * PAGE_SIZE) static struct equiv_cpu_table { unsigned int num_entries; struct equiv_cpu_entry *entry; } equiv_table; /* * This points to the current valid container of microcode patches which we will * save from the initrd/builtin before jettisoning its contents. @mc is the * microcode patch we found to match. */ struct cont_desc { struct microcode_amd *mc; u32 cpuid_1_eax; u32 psize; u8 *data; size_t size; }; static u32 ucode_new_rev; /* * Microcode patch container file is prepended to the initrd in cpio * format. See Documentation/arch/x86/microcode.rst */ static const char ucode_path[] __maybe_unused = "kernel/x86/microcode/AuthenticAMD.bin"; static u16 find_equiv_id(struct equiv_cpu_table *et, u32 sig) { unsigned int i; if (!et || !et->num_entries) return 0; for (i = 0; i < et->num_entries; i++) { struct equiv_cpu_entry *e = &et->entry[i]; if (sig == e->installed_cpu) return e->equiv_cpu; } return 0; } /* * Check whether there is a valid microcode container file at the beginning * of @buf of size @buf_size. Set @early to use this function in the early path. */ static bool verify_container(const u8 *buf, size_t buf_size, bool early) { u32 cont_magic; if (buf_size <= CONTAINER_HDR_SZ) { if (!early) pr_debug("Truncated microcode container header.\n"); return false; } cont_magic = *(const u32 *)buf; if (cont_magic != UCODE_MAGIC) { if (!early) pr_debug("Invalid magic value (0x%08x).\n", cont_magic); return false; } return true; } /* * Check whether there is a valid, non-truncated CPU equivalence table at the * beginning of @buf of size @buf_size. Set @early to use this function in the * early path. */ static bool verify_equivalence_table(const u8 *buf, size_t buf_size, bool early) { const u32 *hdr = (const u32 *)buf; u32 cont_type, equiv_tbl_len; if (!verify_container(buf, buf_size, early)) return false; cont_type = hdr[1]; if (cont_type != UCODE_EQUIV_CPU_TABLE_TYPE) { if (!early) pr_debug("Wrong microcode container equivalence table type: %u.\n", cont_type); return false; } buf_size -= CONTAINER_HDR_SZ; equiv_tbl_len = hdr[2]; if (equiv_tbl_len < sizeof(struct equiv_cpu_entry) || buf_size < equiv_tbl_len) { if (!early) pr_debug("Truncated equivalence table.\n"); return false; } return true; } /* * Check whether there is a valid, non-truncated microcode patch section at the * beginning of @buf of size @buf_size. Set @early to use this function in the * early path. * * On success, @sh_psize returns the patch size according to the section header, * to the caller. */ static bool __verify_patch_section(const u8 *buf, size_t buf_size, u32 *sh_psize, bool early) { u32 p_type, p_size; const u32 *hdr; if (buf_size < SECTION_HDR_SIZE) { if (!early) pr_debug("Truncated patch section.\n"); return false; } hdr = (const u32 *)buf; p_type = hdr[0]; p_size = hdr[1]; if (p_type != UCODE_UCODE_TYPE) { if (!early) pr_debug("Invalid type field (0x%x) in container file section header.\n", p_type); return false; } if (p_size < sizeof(struct microcode_header_amd)) { if (!early) pr_debug("Patch of size %u too short.\n", p_size); return false; } *sh_psize = p_size; return true; } /* * Check whether the passed remaining file @buf_size is large enough to contain * a patch of the indicated @sh_psize (and also whether this size does not * exceed the per-family maximum). @sh_psize is the size read from the section * header. */ static unsigned int __verify_patch_size(u8 family, u32 sh_psize, size_t buf_size) { u32 max_size; if (family >= 0x15) return min_t(u32, sh_psize, buf_size); #define F1XH_MPB_MAX_SIZE 2048 #define F14H_MPB_MAX_SIZE 1824 switch (family) { case 0x10 ... 0x12: max_size = F1XH_MPB_MAX_SIZE; break; case 0x14: max_size = F14H_MPB_MAX_SIZE; break; default: WARN(1, "%s: WTF family: 0x%x\n", __func__, family); return 0; } if (sh_psize > min_t(u32, buf_size, max_size)) return 0; return sh_psize; } /* * Verify the patch in @buf. * * Returns: * negative: on error * positive: patch is not for this family, skip it * 0: success */ static int verify_patch(u8 family, const u8 *buf, size_t buf_size, u32 *patch_size, bool early) { struct microcode_header_amd *mc_hdr; unsigned int ret; u32 sh_psize; u16 proc_id; u8 patch_fam; if (!__verify_patch_section(buf, buf_size, &sh_psize, early)) return -1; /* * The section header length is not included in this indicated size * but is present in the leftover file length so we need to subtract * it before passing this value to the function below. */ buf_size -= SECTION_HDR_SIZE; /* * Check if the remaining buffer is big enough to contain a patch of * size sh_psize, as the section claims. */ if (buf_size < sh_psize) { if (!early) pr_debug("Patch of size %u truncated.\n", sh_psize); return -1; } ret = __verify_patch_size(family, sh_psize, buf_size); if (!ret) { if (!early) pr_debug("Per-family patch size mismatch.\n"); return -1; } *patch_size = sh_psize; mc_hdr = (struct microcode_header_amd *)(buf + SECTION_HDR_SIZE); if (mc_hdr->nb_dev_id || mc_hdr->sb_dev_id) { if (!early) pr_err("Patch-ID 0x%08x: chipset-specific code unsupported.\n", mc_hdr->patch_id); return -1; } proc_id = mc_hdr->processor_rev_id; patch_fam = 0xf + (proc_id >> 12); if (patch_fam != family) return 1; return 0; } /* * This scans the ucode blob for the proper container as we can have multiple * containers glued together. Returns the equivalence ID from the equivalence * table or 0 if none found. * Returns the amount of bytes consumed while scanning. @desc contains all the * data we're going to use in later stages of the application. */ static size_t parse_container(u8 *ucode, size_t size, struct cont_desc *desc) { struct equiv_cpu_table table; size_t orig_size = size; u32 *hdr = (u32 *)ucode; u16 eq_id; u8 *buf; if (!verify_equivalence_table(ucode, size, true)) return 0; buf = ucode; table.entry = (struct equiv_cpu_entry *)(buf + CONTAINER_HDR_SZ); table.num_entries = hdr[2] / sizeof(struct equiv_cpu_entry); /* * Find the equivalence ID of our CPU in this table. Even if this table * doesn't contain a patch for the CPU, scan through the whole container * so that it can be skipped in case there are other containers appended. */ eq_id = find_equiv_id(&table, desc->cpuid_1_eax); buf += hdr[2] + CONTAINER_HDR_SZ; size -= hdr[2] + CONTAINER_HDR_SZ; /* * Scan through the rest of the container to find where it ends. We do * some basic sanity-checking too. */ while (size > 0) { struct microcode_amd *mc; u32 patch_size; int ret; ret = verify_patch(x86_family(desc->cpuid_1_eax), buf, size, &patch_size, true); if (ret < 0) { /* * Patch verification failed, skip to the next container, if * there is one. Before exit, check whether that container has * found a patch already. If so, use it. */ goto out; } else if (ret > 0) { goto skip; } mc = (struct microcode_amd *)(buf + SECTION_HDR_SIZE); if (eq_id == mc->hdr.processor_rev_id) { desc->psize = patch_size; desc->mc = mc; } skip: /* Skip patch section header too: */ buf += patch_size + SECTION_HDR_SIZE; size -= patch_size + SECTION_HDR_SIZE; } out: /* * If we have found a patch (desc->mc), it means we're looking at the * container which has a patch for this CPU so return 0 to mean, @ucode * already points to the proper container. Otherwise, we return the size * we scanned so that we can advance to the next container in the * buffer. */ if (desc->mc) { desc->data = ucode; desc->size = orig_size - size; return 0; } return orig_size - size; } /* * Scan the ucode blob for the proper container as we can have multiple * containers glued together. */ static void scan_containers(u8 *ucode, size_t size, struct cont_desc *desc) { while (size) { size_t s = parse_container(ucode, size, desc); if (!s) return; /* catch wraparound */ if (size >= s) { ucode += s; size -= s; } else { return; } } } static int __apply_microcode_amd(struct microcode_amd *mc) { u32 rev, dummy; native_wrmsrl(MSR_AMD64_PATCH_LOADER, (u64)(long)&mc->hdr.data_code); /* verify patch application was successful */ native_rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy); if (rev != mc->hdr.patch_id) return -1; return 0; } /* * Early load occurs before we can vmalloc(). So we look for the microcode * patch container file in initrd, traverse equivalent cpu table, look for a * matching microcode patch, and update, all in initrd memory in place. * When vmalloc() is available for use later -- on 64-bit during first AP load, * and on 32-bit during save_microcode_in_initrd_amd() -- we can call * load_microcode_amd() to save equivalent cpu table and microcode patches in * kernel heap memory. * * Returns true if container found (sets @desc), false otherwise. */ static bool early_apply_microcode(u32 cpuid_1_eax, void *ucode, size_t size) { struct cont_desc desc = { 0 }; struct microcode_amd *mc; u32 rev, dummy, *new_rev; bool ret = false; #ifdef CONFIG_X86_32 new_rev = (u32 *)__pa_nodebug(&ucode_new_rev); #else new_rev = &ucode_new_rev; #endif desc.cpuid_1_eax = cpuid_1_eax; scan_containers(ucode, size, &desc); mc = desc.mc; if (!mc) return ret; native_rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy); /* * Allow application of the same revision to pick up SMT-specific * changes even if the revision of the other SMT thread is already * up-to-date. */ if (rev > mc->hdr.patch_id) return ret; if (!__apply_microcode_amd(mc)) { *new_rev = mc->hdr.patch_id; ret = true; } return ret; } static bool get_builtin_microcode(struct cpio_data *cp, unsigned int family) { char fw_name[36] = "amd-ucode/microcode_amd.bin"; struct firmware fw; if (IS_ENABLED(CONFIG_X86_32)) return false; if (family >= 0x15) snprintf(fw_name, sizeof(fw_name), "amd-ucode/microcode_amd_fam%.2xh.bin", family); if (firmware_request_builtin(&fw, fw_name)) { cp->size = fw.size; cp->data = (void *)fw.data; return true; } return false; } static void find_blobs_in_containers(unsigned int cpuid_1_eax, struct cpio_data *ret) { struct ucode_cpu_info *uci; struct cpio_data cp; const char *path; bool use_pa; if (IS_ENABLED(CONFIG_X86_32)) { uci = (struct ucode_cpu_info *)__pa_nodebug(ucode_cpu_info); path = (const char *)__pa_nodebug(ucode_path); use_pa = true; } else { uci = ucode_cpu_info; path = ucode_path; use_pa = false; } if (!get_builtin_microcode(&cp, x86_family(cpuid_1_eax))) cp = find_microcode_in_initrd(path, use_pa); /* Needed in load_microcode_amd() */ uci->cpu_sig.sig = cpuid_1_eax; *ret = cp; } static void apply_ucode_from_containers(unsigned int cpuid_1_eax) { struct cpio_data cp = { }; find_blobs_in_containers(cpuid_1_eax, &cp); if (!(cp.data && cp.size)) return; early_apply_microcode(cpuid_1_eax, cp.data, cp.size); } void load_ucode_amd_early(unsigned int cpuid_1_eax) { return apply_ucode_from_containers(cpuid_1_eax); } static enum ucode_state load_microcode_amd(u8 family, const u8 *data, size_t size); int __init save_microcode_in_initrd_amd(unsigned int cpuid_1_eax) { struct cont_desc desc = { 0 }; enum ucode_state ret; struct cpio_data cp; cp = find_microcode_in_initrd(ucode_path, false); if (!(cp.data && cp.size)) return -EINVAL; desc.cpuid_1_eax = cpuid_1_eax; scan_containers(cp.data, cp.size, &desc); if (!desc.mc) return -EINVAL; ret = load_microcode_amd(x86_family(cpuid_1_eax), desc.data, desc.size); if (ret > UCODE_UPDATED) return -EINVAL; return 0; } /* * a small, trivial cache of per-family ucode patches */ static struct ucode_patch *cache_find_patch(u16 equiv_cpu) { struct ucode_patch *p; list_for_each_entry(p, &microcode_cache, plist) if (p->equiv_cpu == equiv_cpu) return p; return NULL; } static void update_cache(struct ucode_patch *new_patch) { struct ucode_patch *p; list_for_each_entry(p, &microcode_cache, plist) { if (p->equiv_cpu == new_patch->equiv_cpu) { if (p->patch_id >= new_patch->patch_id) { /* we already have the latest patch */ kfree(new_patch->data); kfree(new_patch); return; } list_replace(&p->plist, &new_patch->plist); kfree(p->data); kfree(p); return; } } /* no patch found, add it */ list_add_tail(&new_patch->plist, &microcode_cache); } static void free_cache(void) { struct ucode_patch *p, *tmp; list_for_each_entry_safe(p, tmp, &microcode_cache, plist) { __list_del(p->plist.prev, p->plist.next); kfree(p->data); kfree(p); } } static struct ucode_patch *find_patch(unsigned int cpu) { struct ucode_cpu_info *uci = ucode_cpu_info + cpu; u16 equiv_id; equiv_id = find_equiv_id(&equiv_table, uci->cpu_sig.sig); if (!equiv_id) return NULL; return cache_find_patch(equiv_id); } void reload_ucode_amd(unsigned int cpu) { u32 rev, dummy __always_unused; struct microcode_amd *mc; struct ucode_patch *p; p = find_patch(cpu); if (!p) return; mc = p->data; rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy); if (rev < mc->hdr.patch_id) { if (!__apply_microcode_amd(mc)) { ucode_new_rev = mc->hdr.patch_id; pr_info("reload patch_level=0x%08x\n", ucode_new_rev); } } } static int collect_cpu_info_amd(int cpu, struct cpu_signature *csig) { struct cpuinfo_x86 *c = &cpu_data(cpu); struct ucode_cpu_info *uci = ucode_cpu_info + cpu; struct ucode_patch *p; csig->sig = cpuid_eax(0x00000001); csig->rev = c->microcode; /* * a patch could have been loaded early, set uci->mc so that * mc_bp_resume() can call apply_microcode() */ p = find_patch(cpu); if (p && (p->patch_id == csig->rev)) uci->mc = p->data; pr_info("CPU%d: patch_level=0x%08x\n", cpu, csig->rev); return 0; } static enum ucode_state apply_microcode_amd(int cpu) { struct cpuinfo_x86 *c = &cpu_data(cpu); struct microcode_amd *mc_amd; struct ucode_cpu_info *uci; struct ucode_patch *p; enum ucode_state ret; u32 rev, dummy __always_unused; BUG_ON(raw_smp_processor_id() != cpu); uci = ucode_cpu_info + cpu; p = find_patch(cpu); if (!p) return UCODE_NFOUND; mc_amd = p->data; uci->mc = p->data; rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy); /* need to apply patch? */ if (rev > mc_amd->hdr.patch_id) { ret = UCODE_OK; goto out; } if (__apply_microcode_amd(mc_amd)) { pr_err("CPU%d: update failed for patch_level=0x%08x\n", cpu, mc_amd->hdr.patch_id); return UCODE_ERROR; } rev = mc_amd->hdr.patch_id; ret = UCODE_UPDATED; pr_info("CPU%d: new patch_level=0x%08x\n", cpu, rev); out: uci->cpu_sig.rev = rev; c->microcode = rev; /* Update boot_cpu_data's revision too, if we're on the BSP: */ if (c->cpu_index == boot_cpu_data.cpu_index) boot_cpu_data.microcode = rev; return ret; } static size_t install_equiv_cpu_table(const u8 *buf, size_t buf_size) { u32 equiv_tbl_len; const u32 *hdr; if (!verify_equivalence_table(buf, buf_size, false)) return 0; hdr = (const u32 *)buf; equiv_tbl_len = hdr[2]; equiv_table.entry = vmalloc(equiv_tbl_len); if (!equiv_table.entry) { pr_err("failed to allocate equivalent CPU table\n"); return 0; } memcpy(equiv_table.entry, buf + CONTAINER_HDR_SZ, equiv_tbl_len); equiv_table.num_entries = equiv_tbl_len / sizeof(struct equiv_cpu_entry); /* add header length */ return equiv_tbl_len + CONTAINER_HDR_SZ; } static void free_equiv_cpu_table(void) { vfree(equiv_table.entry); memset(&equiv_table, 0, sizeof(equiv_table)); } static void cleanup(void) { free_equiv_cpu_table(); free_cache(); } /* * Return a non-negative value even if some of the checks failed so that * we can skip over the next patch. If we return a negative value, we * signal a grave error like a memory allocation has failed and the * driver cannot continue functioning normally. In such cases, we tear * down everything we've used up so far and exit. */ static int verify_and_add_patch(u8 family, u8 *fw, unsigned int leftover, unsigned int *patch_size) { struct microcode_header_amd *mc_hdr; struct ucode_patch *patch; u16 proc_id; int ret; ret = verify_patch(family, fw, leftover, patch_size, false); if (ret) return ret; patch = kzalloc(sizeof(*patch), GFP_KERNEL); if (!patch) { pr_err("Patch allocation failure.\n"); return -EINVAL; } patch->data = kmemdup(fw + SECTION_HDR_SIZE, *patch_size, GFP_KERNEL); if (!patch->data) { pr_err("Patch data allocation failure.\n"); kfree(patch); return -EINVAL; } patch->size = *patch_size; mc_hdr = (struct microcode_header_amd *)(fw + SECTION_HDR_SIZE); proc_id = mc_hdr->processor_rev_id; INIT_LIST_HEAD(&patch->plist); patch->patch_id = mc_hdr->patch_id; patch->equiv_cpu = proc_id; pr_debug("%s: Added patch_id: 0x%08x, proc_id: 0x%04x\n", __func__, patch->patch_id, proc_id); /* ... and add to cache. */ update_cache(patch); return 0; } /* Scan the blob in @data and add microcode patches to the cache. */ static enum ucode_state __load_microcode_amd(u8 family, const u8 *data, size_t size) { u8 *fw = (u8 *)data; size_t offset; offset = install_equiv_cpu_table(data, size); if (!offset) return UCODE_ERROR; fw += offset; size -= offset; if (*(u32 *)fw != UCODE_UCODE_TYPE) { pr_err("invalid type field in container file section header\n"); free_equiv_cpu_table(); return UCODE_ERROR; } while (size > 0) { unsigned int crnt_size = 0; int ret; ret = verify_and_add_patch(family, fw, size, &crnt_size); if (ret < 0) return UCODE_ERROR; fw += crnt_size + SECTION_HDR_SIZE; size -= (crnt_size + SECTION_HDR_SIZE); } return UCODE_OK; } static enum ucode_state load_microcode_amd(u8 family, const u8 *data, size_t size) { struct cpuinfo_x86 *c; unsigned int nid, cpu; struct ucode_patch *p; enum ucode_state ret; /* free old equiv table */ free_equiv_cpu_table(); ret = __load_microcode_amd(family, data, size); if (ret != UCODE_OK) { cleanup(); return ret; } for_each_node(nid) { cpu = cpumask_first(cpumask_of_node(nid)); c = &cpu_data(cpu); p = find_patch(cpu); if (!p) continue; if (c->microcode >= p->patch_id) continue; ret = UCODE_NEW; } return ret; } /* * AMD microcode firmware naming convention, up to family 15h they are in * the legacy file: * * amd-ucode/microcode_amd.bin * * This legacy file is always smaller than 2K in size. * * Beginning with family 15h, they are in family-specific firmware files: * * amd-ucode/microcode_amd_fam15h.bin * amd-ucode/microcode_amd_fam16h.bin * ... * * These might be larger than 2K. */ static enum ucode_state request_microcode_amd(int cpu, struct device *device) { char fw_name[36] = "amd-ucode/microcode_amd.bin"; struct cpuinfo_x86 *c = &cpu_data(cpu); enum ucode_state ret = UCODE_NFOUND; const struct firmware *fw; if (c->x86 >= 0x15) snprintf(fw_name, sizeof(fw_name), "amd-ucode/microcode_amd_fam%.2xh.bin", c->x86); if (request_firmware_direct(&fw, (const char *)fw_name, device)) { pr_debug("failed to load file %s\n", fw_name); goto out; } ret = UCODE_ERROR; if (!verify_container(fw->data, fw->size, false)) goto fw_release; ret = load_microcode_amd(c->x86, fw->data, fw->size); fw_release: release_firmware(fw); out: return ret; } static void microcode_fini_cpu_amd(int cpu) { struct ucode_cpu_info *uci = ucode_cpu_info + cpu; uci->mc = NULL; } static struct microcode_ops microcode_amd_ops = { .request_microcode_fw = request_microcode_amd, .collect_cpu_info = collect_cpu_info_amd, .apply_microcode = apply_microcode_amd, .microcode_fini_cpu = microcode_fini_cpu_amd, }; struct microcode_ops * __init init_amd_microcode(void) { struct cpuinfo_x86 *c = &boot_cpu_data; if (c->x86_vendor != X86_VENDOR_AMD || c->x86 < 0x10) { pr_warn("AMD CPU family 0x%x not supported\n", c->x86); return NULL; } if (ucode_new_rev) pr_info_once("microcode updated early to new patch_level=0x%08x\n", ucode_new_rev); return &microcode_amd_ops; } void __exit exit_amd_microcode(void) { cleanup(); }
linux-master
arch/x86/kernel/cpu/microcode/amd.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * CPU Microcode Update Driver for Linux * * Copyright (C) 2000-2006 Tigran Aivazian <[email protected]> * 2006 Shaohua Li <[email protected]> * 2013-2016 Borislav Petkov <[email protected]> * * X86 CPU microcode early update for Linux: * * Copyright (C) 2012 Fenghua Yu <[email protected]> * H Peter Anvin" <[email protected]> * (C) 2015 Borislav Petkov <[email protected]> * * This driver allows to upgrade microcode on x86 processors. */ #define pr_fmt(fmt) "microcode: " fmt #include <linux/platform_device.h> #include <linux/stop_machine.h> #include <linux/syscore_ops.h> #include <linux/miscdevice.h> #include <linux/capability.h> #include <linux/firmware.h> #include <linux/kernel.h> #include <linux/delay.h> #include <linux/mutex.h> #include <linux/cpu.h> #include <linux/nmi.h> #include <linux/fs.h> #include <linux/mm.h> #include <asm/cpu_device_id.h> #include <asm/perf_event.h> #include <asm/processor.h> #include <asm/cmdline.h> #include <asm/setup.h> #include "internal.h" #define DRIVER_VERSION "2.2" static struct microcode_ops *microcode_ops; static bool dis_ucode_ldr = true; bool initrd_gone; LIST_HEAD(microcode_cache); /* * Synchronization. * * All non cpu-hotplug-callback call sites use: * * - cpus_read_lock/unlock() to synchronize with * the cpu-hotplug-callback call sites. * * We guarantee that only a single cpu is being * updated at any particular moment of time. */ struct ucode_cpu_info ucode_cpu_info[NR_CPUS]; struct cpu_info_ctx { struct cpu_signature *cpu_sig; int err; }; /* * Those patch levels cannot be updated to newer ones and thus should be final. */ static u32 final_levels[] = { 0x01000098, 0x0100009f, 0x010000af, 0, /* T-101 terminator */ }; /* * Check the current patch level on this CPU. * * Returns: * - true: if update should stop * - false: otherwise */ static bool amd_check_current_patch_level(void) { u32 lvl, dummy, i; u32 *levels; native_rdmsr(MSR_AMD64_PATCH_LEVEL, lvl, dummy); if (IS_ENABLED(CONFIG_X86_32)) levels = (u32 *)__pa_nodebug(&final_levels); else levels = final_levels; for (i = 0; levels[i]; i++) { if (lvl == levels[i]) return true; } return false; } static bool __init check_loader_disabled_bsp(void) { static const char *__dis_opt_str = "dis_ucode_ldr"; #ifdef CONFIG_X86_32 const char *cmdline = (const char *)__pa_nodebug(boot_command_line); const char *option = (const char *)__pa_nodebug(__dis_opt_str); bool *res = (bool *)__pa_nodebug(&dis_ucode_ldr); #else /* CONFIG_X86_64 */ const char *cmdline = boot_command_line; const char *option = __dis_opt_str; bool *res = &dis_ucode_ldr; #endif /* * CPUID(1).ECX[31]: reserved for hypervisor use. This is still not * completely accurate as xen pv guests don't see that CPUID bit set but * that's good enough as they don't land on the BSP path anyway. */ if (native_cpuid_ecx(1) & BIT(31)) return *res; if (x86_cpuid_vendor() == X86_VENDOR_AMD) { if (amd_check_current_patch_level()) return *res; } if (cmdline_find_option_bool(cmdline, option) <= 0) *res = false; return *res; } void __init load_ucode_bsp(void) { unsigned int cpuid_1_eax; bool intel = true; if (!have_cpuid_p()) return; cpuid_1_eax = native_cpuid_eax(1); switch (x86_cpuid_vendor()) { case X86_VENDOR_INTEL: if (x86_family(cpuid_1_eax) < 6) return; break; case X86_VENDOR_AMD: if (x86_family(cpuid_1_eax) < 0x10) return; intel = false; break; default: return; } if (check_loader_disabled_bsp()) return; if (intel) load_ucode_intel_bsp(); else load_ucode_amd_early(cpuid_1_eax); } static bool check_loader_disabled_ap(void) { #ifdef CONFIG_X86_32 return *((bool *)__pa_nodebug(&dis_ucode_ldr)); #else return dis_ucode_ldr; #endif } void load_ucode_ap(void) { unsigned int cpuid_1_eax; if (check_loader_disabled_ap()) return; cpuid_1_eax = native_cpuid_eax(1); switch (x86_cpuid_vendor()) { case X86_VENDOR_INTEL: if (x86_family(cpuid_1_eax) >= 6) load_ucode_intel_ap(); break; case X86_VENDOR_AMD: if (x86_family(cpuid_1_eax) >= 0x10) load_ucode_amd_early(cpuid_1_eax); break; default: break; } } static int __init save_microcode_in_initrd(void) { struct cpuinfo_x86 *c = &boot_cpu_data; int ret = -EINVAL; switch (c->x86_vendor) { case X86_VENDOR_INTEL: if (c->x86 >= 6) ret = save_microcode_in_initrd_intel(); break; case X86_VENDOR_AMD: if (c->x86 >= 0x10) ret = save_microcode_in_initrd_amd(cpuid_eax(1)); break; default: break; } initrd_gone = true; return ret; } struct cpio_data find_microcode_in_initrd(const char *path, bool use_pa) { #ifdef CONFIG_BLK_DEV_INITRD unsigned long start = 0; size_t size; #ifdef CONFIG_X86_32 struct boot_params *params; if (use_pa) params = (struct boot_params *)__pa_nodebug(&boot_params); else params = &boot_params; size = params->hdr.ramdisk_size; /* * Set start only if we have an initrd image. We cannot use initrd_start * because it is not set that early yet. */ if (size) start = params->hdr.ramdisk_image; # else /* CONFIG_X86_64 */ size = (unsigned long)boot_params.ext_ramdisk_size << 32; size |= boot_params.hdr.ramdisk_size; if (size) { start = (unsigned long)boot_params.ext_ramdisk_image << 32; start |= boot_params.hdr.ramdisk_image; start += PAGE_OFFSET; } # endif /* * Fixup the start address: after reserve_initrd() runs, initrd_start * has the virtual address of the beginning of the initrd. It also * possibly relocates the ramdisk. In either case, initrd_start contains * the updated address so use that instead. * * initrd_gone is for the hotplug case where we've thrown out initrd * already. */ if (!use_pa) { if (initrd_gone) return (struct cpio_data){ NULL, 0, "" }; if (initrd_start) start = initrd_start; } else { /* * The picture with physical addresses is a bit different: we * need to get the *physical* address to which the ramdisk was * relocated, i.e., relocated_ramdisk (not initrd_start) and * since we're running from physical addresses, we need to access * relocated_ramdisk through its *physical* address too. */ u64 *rr = (u64 *)__pa_nodebug(&relocated_ramdisk); if (*rr) start = *rr; } return find_cpio_data(path, (void *)start, size, NULL); #else /* !CONFIG_BLK_DEV_INITRD */ return (struct cpio_data){ NULL, 0, "" }; #endif } static void reload_early_microcode(unsigned int cpu) { int vendor, family; vendor = x86_cpuid_vendor(); family = x86_cpuid_family(); switch (vendor) { case X86_VENDOR_INTEL: if (family >= 6) reload_ucode_intel(); break; case X86_VENDOR_AMD: if (family >= 0x10) reload_ucode_amd(cpu); break; default: break; } } /* fake device for request_firmware */ static struct platform_device *microcode_pdev; #ifdef CONFIG_MICROCODE_LATE_LOADING /* * Late loading dance. Why the heavy-handed stomp_machine effort? * * - HT siblings must be idle and not execute other code while the other sibling * is loading microcode in order to avoid any negative interactions caused by * the loading. * * - In addition, microcode update on the cores must be serialized until this * requirement can be relaxed in the future. Right now, this is conservative * and good. */ #define SPINUNIT 100 /* 100 nsec */ static int check_online_cpus(void) { unsigned int cpu; /* * Make sure all CPUs are online. It's fine for SMT to be disabled if * all the primary threads are still online. */ for_each_present_cpu(cpu) { if (topology_is_primary_thread(cpu) && !cpu_online(cpu)) { pr_err("Not all CPUs online, aborting microcode update.\n"); return -EINVAL; } } return 0; } static atomic_t late_cpus_in; static atomic_t late_cpus_out; static int __wait_for_cpus(atomic_t *t, long long timeout) { int all_cpus = num_online_cpus(); atomic_inc(t); while (atomic_read(t) < all_cpus) { if (timeout < SPINUNIT) { pr_err("Timeout while waiting for CPUs rendezvous, remaining: %d\n", all_cpus - atomic_read(t)); return 1; } ndelay(SPINUNIT); timeout -= SPINUNIT; touch_nmi_watchdog(); } return 0; } /* * Returns: * < 0 - on error * 0 - success (no update done or microcode was updated) */ static int __reload_late(void *info) { int cpu = smp_processor_id(); enum ucode_state err; int ret = 0; /* * Wait for all CPUs to arrive. A load will not be attempted unless all * CPUs show up. * */ if (__wait_for_cpus(&late_cpus_in, NSEC_PER_SEC)) return -1; /* * On an SMT system, it suffices to load the microcode on one sibling of * the core because the microcode engine is shared between the threads. * Synchronization still needs to take place so that no concurrent * loading attempts happen on multiple threads of an SMT core. See * below. */ if (cpumask_first(topology_sibling_cpumask(cpu)) == cpu) err = microcode_ops->apply_microcode(cpu); else goto wait_for_siblings; if (err >= UCODE_NFOUND) { if (err == UCODE_ERROR) { pr_warn("Error reloading microcode on CPU %d\n", cpu); ret = -1; } } wait_for_siblings: if (__wait_for_cpus(&late_cpus_out, NSEC_PER_SEC)) panic("Timeout during microcode update!\n"); /* * At least one thread has completed update on each core. * For others, simply call the update to make sure the * per-cpu cpuinfo can be updated with right microcode * revision. */ if (cpumask_first(topology_sibling_cpumask(cpu)) != cpu) err = microcode_ops->apply_microcode(cpu); return ret; } /* * Reload microcode late on all CPUs. Wait for a sec until they * all gather together. */ static int microcode_reload_late(void) { int old = boot_cpu_data.microcode, ret; struct cpuinfo_x86 prev_info; pr_err("Attempting late microcode loading - it is dangerous and taints the kernel.\n"); pr_err("You should switch to early loading, if possible.\n"); atomic_set(&late_cpus_in, 0); atomic_set(&late_cpus_out, 0); /* * Take a snapshot before the microcode update in order to compare and * check whether any bits changed after an update. */ store_cpu_caps(&prev_info); ret = stop_machine_cpuslocked(__reload_late, NULL, cpu_online_mask); if (!ret) { pr_info("Reload succeeded, microcode revision: 0x%x -> 0x%x\n", old, boot_cpu_data.microcode); microcode_check(&prev_info); } else { pr_info("Reload failed, current microcode revision: 0x%x\n", boot_cpu_data.microcode); } return ret; } static ssize_t reload_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t size) { enum ucode_state tmp_ret = UCODE_OK; int bsp = boot_cpu_data.cpu_index; unsigned long val; ssize_t ret = 0; ret = kstrtoul(buf, 0, &val); if (ret || val != 1) return -EINVAL; cpus_read_lock(); ret = check_online_cpus(); if (ret) goto put; tmp_ret = microcode_ops->request_microcode_fw(bsp, &microcode_pdev->dev); if (tmp_ret != UCODE_NEW) goto put; ret = microcode_reload_late(); put: cpus_read_unlock(); if (ret == 0) ret = size; add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_STILL_OK); return ret; } static DEVICE_ATTR_WO(reload); #endif static ssize_t version_show(struct device *dev, struct device_attribute *attr, char *buf) { struct ucode_cpu_info *uci = ucode_cpu_info + dev->id; return sprintf(buf, "0x%x\n", uci->cpu_sig.rev); } static ssize_t processor_flags_show(struct device *dev, struct device_attribute *attr, char *buf) { struct ucode_cpu_info *uci = ucode_cpu_info + dev->id; return sprintf(buf, "0x%x\n", uci->cpu_sig.pf); } static DEVICE_ATTR_RO(version); static DEVICE_ATTR_RO(processor_flags); static struct attribute *mc_default_attrs[] = { &dev_attr_version.attr, &dev_attr_processor_flags.attr, NULL }; static const struct attribute_group mc_attr_group = { .attrs = mc_default_attrs, .name = "microcode", }; static void microcode_fini_cpu(int cpu) { if (microcode_ops->microcode_fini_cpu) microcode_ops->microcode_fini_cpu(cpu); } static enum ucode_state microcode_init_cpu(int cpu) { struct ucode_cpu_info *uci = ucode_cpu_info + cpu; memset(uci, 0, sizeof(*uci)); microcode_ops->collect_cpu_info(cpu, &uci->cpu_sig); return microcode_ops->apply_microcode(cpu); } /** * microcode_bsp_resume - Update boot CPU microcode during resume. */ void microcode_bsp_resume(void) { int cpu = smp_processor_id(); struct ucode_cpu_info *uci = ucode_cpu_info + cpu; if (uci->mc) microcode_ops->apply_microcode(cpu); else reload_early_microcode(cpu); } static struct syscore_ops mc_syscore_ops = { .resume = microcode_bsp_resume, }; static int mc_cpu_starting(unsigned int cpu) { enum ucode_state err = microcode_ops->apply_microcode(cpu); pr_debug("%s: CPU%d, err: %d\n", __func__, cpu, err); return err == UCODE_ERROR; } static int mc_cpu_online(unsigned int cpu) { struct device *dev = get_cpu_device(cpu); if (sysfs_create_group(&dev->kobj, &mc_attr_group)) pr_err("Failed to create group for CPU%d\n", cpu); return 0; } static int mc_cpu_down_prep(unsigned int cpu) { struct device *dev; dev = get_cpu_device(cpu); microcode_fini_cpu(cpu); /* Suspend is in progress, only remove the interface */ sysfs_remove_group(&dev->kobj, &mc_attr_group); pr_debug("%s: CPU%d\n", __func__, cpu); return 0; } static void setup_online_cpu(struct work_struct *work) { int cpu = smp_processor_id(); enum ucode_state err; err = microcode_init_cpu(cpu); if (err == UCODE_ERROR) { pr_err("Error applying microcode on CPU%d\n", cpu); return; } mc_cpu_online(cpu); } static struct attribute *cpu_root_microcode_attrs[] = { #ifdef CONFIG_MICROCODE_LATE_LOADING &dev_attr_reload.attr, #endif NULL }; static const struct attribute_group cpu_root_microcode_group = { .name = "microcode", .attrs = cpu_root_microcode_attrs, }; static int __init microcode_init(void) { struct device *dev_root; struct cpuinfo_x86 *c = &boot_cpu_data; int error; if (dis_ucode_ldr) return -EINVAL; if (c->x86_vendor == X86_VENDOR_INTEL) microcode_ops = init_intel_microcode(); else if (c->x86_vendor == X86_VENDOR_AMD) microcode_ops = init_amd_microcode(); else pr_err("no support for this CPU vendor\n"); if (!microcode_ops) return -ENODEV; microcode_pdev = platform_device_register_simple("microcode", -1, NULL, 0); if (IS_ERR(microcode_pdev)) return PTR_ERR(microcode_pdev); dev_root = bus_get_dev_root(&cpu_subsys); if (dev_root) { error = sysfs_create_group(&dev_root->kobj, &cpu_root_microcode_group); put_device(dev_root); if (error) { pr_err("Error creating microcode group!\n"); goto out_pdev; } } /* Do per-CPU setup */ schedule_on_each_cpu(setup_online_cpu); register_syscore_ops(&mc_syscore_ops); cpuhp_setup_state_nocalls(CPUHP_AP_MICROCODE_LOADER, "x86/microcode:starting", mc_cpu_starting, NULL); cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "x86/microcode:online", mc_cpu_online, mc_cpu_down_prep); pr_info("Microcode Update Driver: v%s.", DRIVER_VERSION); return 0; out_pdev: platform_device_unregister(microcode_pdev); return error; } fs_initcall(save_microcode_in_initrd); late_initcall(microcode_init);
linux-master
arch/x86/kernel/cpu/microcode/core.c
// SPDX-License-Identifier: GPL-2.0-only /* * Resource Director Technology(RDT) * - Monitoring code * * Copyright (C) 2017 Intel Corporation * * Author: * Vikas Shivappa <[email protected]> * * This replaces the cqm.c based on perf but we reuse a lot of * code and datastructures originally from Peter Zijlstra and Matt Fleming. * * More information about RDT be found in the Intel (R) x86 Architecture * Software Developer Manual June 2016, volume 3, section 17.17. */ #include <linux/module.h> #include <linux/sizes.h> #include <linux/slab.h> #include <asm/cpu_device_id.h> #include <asm/resctrl.h> #include "internal.h" struct rmid_entry { u32 rmid; int busy; struct list_head list; }; /** * @rmid_free_lru A least recently used list of free RMIDs * These RMIDs are guaranteed to have an occupancy less than the * threshold occupancy */ static LIST_HEAD(rmid_free_lru); /** * @rmid_limbo_count count of currently unused but (potentially) * dirty RMIDs. * This counts RMIDs that no one is currently using but that * may have a occupancy value > resctrl_rmid_realloc_threshold. User can * change the threshold occupancy value. */ static unsigned int rmid_limbo_count; /** * @rmid_entry - The entry in the limbo and free lists. */ static struct rmid_entry *rmid_ptrs; /* * Global boolean for rdt_monitor which is true if any * resource monitoring is enabled. */ bool rdt_mon_capable; /* * Global to indicate which monitoring events are enabled. */ unsigned int rdt_mon_features; /* * This is the threshold cache occupancy in bytes at which we will consider an * RMID available for re-allocation. */ unsigned int resctrl_rmid_realloc_threshold; /* * This is the maximum value for the reallocation threshold, in bytes. */ unsigned int resctrl_rmid_realloc_limit; #define CF(cf) ((unsigned long)(1048576 * (cf) + 0.5)) /* * The correction factor table is documented in Documentation/arch/x86/resctrl.rst. * If rmid > rmid threshold, MBM total and local values should be multiplied * by the correction factor. * * The original table is modified for better code: * * 1. The threshold 0 is changed to rmid count - 1 so don't do correction * for the case. * 2. MBM total and local correction table indexed by core counter which is * equal to (x86_cache_max_rmid + 1) / 8 - 1 and is from 0 up to 27. * 3. The correction factor is normalized to 2^20 (1048576) so it's faster * to calculate corrected value by shifting: * corrected_value = (original_value * correction_factor) >> 20 */ static const struct mbm_correction_factor_table { u32 rmidthreshold; u64 cf; } mbm_cf_table[] __initconst = { {7, CF(1.000000)}, {15, CF(1.000000)}, {15, CF(0.969650)}, {31, CF(1.000000)}, {31, CF(1.066667)}, {31, CF(0.969650)}, {47, CF(1.142857)}, {63, CF(1.000000)}, {63, CF(1.185115)}, {63, CF(1.066553)}, {79, CF(1.454545)}, {95, CF(1.000000)}, {95, CF(1.230769)}, {95, CF(1.142857)}, {95, CF(1.066667)}, {127, CF(1.000000)}, {127, CF(1.254863)}, {127, CF(1.185255)}, {151, CF(1.000000)}, {127, CF(1.066667)}, {167, CF(1.000000)}, {159, CF(1.454334)}, {183, CF(1.000000)}, {127, CF(0.969744)}, {191, CF(1.280246)}, {191, CF(1.230921)}, {215, CF(1.000000)}, {191, CF(1.143118)}, }; static u32 mbm_cf_rmidthreshold __read_mostly = UINT_MAX; static u64 mbm_cf __read_mostly; static inline u64 get_corrected_mbm_count(u32 rmid, unsigned long val) { /* Correct MBM value. */ if (rmid > mbm_cf_rmidthreshold) val = (val * mbm_cf) >> 20; return val; } static inline struct rmid_entry *__rmid_entry(u32 rmid) { struct rmid_entry *entry; entry = &rmid_ptrs[rmid]; WARN_ON(entry->rmid != rmid); return entry; } static int __rmid_read(u32 rmid, enum resctrl_event_id eventid, u64 *val) { u64 msr_val; /* * As per the SDM, when IA32_QM_EVTSEL.EvtID (bits 7:0) is configured * with a valid event code for supported resource type and the bits * IA32_QM_EVTSEL.RMID (bits 41:32) are configured with valid RMID, * IA32_QM_CTR.data (bits 61:0) reports the monitored data. * IA32_QM_CTR.Error (bit 63) and IA32_QM_CTR.Unavailable (bit 62) * are error bits. */ wrmsr(MSR_IA32_QM_EVTSEL, eventid, rmid); rdmsrl(MSR_IA32_QM_CTR, msr_val); if (msr_val & RMID_VAL_ERROR) return -EIO; if (msr_val & RMID_VAL_UNAVAIL) return -EINVAL; *val = msr_val; return 0; } static struct arch_mbm_state *get_arch_mbm_state(struct rdt_hw_domain *hw_dom, u32 rmid, enum resctrl_event_id eventid) { switch (eventid) { case QOS_L3_OCCUP_EVENT_ID: return NULL; case QOS_L3_MBM_TOTAL_EVENT_ID: return &hw_dom->arch_mbm_total[rmid]; case QOS_L3_MBM_LOCAL_EVENT_ID: return &hw_dom->arch_mbm_local[rmid]; } /* Never expect to get here */ WARN_ON_ONCE(1); return NULL; } void resctrl_arch_reset_rmid(struct rdt_resource *r, struct rdt_domain *d, u32 rmid, enum resctrl_event_id eventid) { struct rdt_hw_domain *hw_dom = resctrl_to_arch_dom(d); struct arch_mbm_state *am; am = get_arch_mbm_state(hw_dom, rmid, eventid); if (am) { memset(am, 0, sizeof(*am)); /* Record any initial, non-zero count value. */ __rmid_read(rmid, eventid, &am->prev_msr); } } /* * Assumes that hardware counters are also reset and thus that there is * no need to record initial non-zero counts. */ void resctrl_arch_reset_rmid_all(struct rdt_resource *r, struct rdt_domain *d) { struct rdt_hw_domain *hw_dom = resctrl_to_arch_dom(d); if (is_mbm_total_enabled()) memset(hw_dom->arch_mbm_total, 0, sizeof(*hw_dom->arch_mbm_total) * r->num_rmid); if (is_mbm_local_enabled()) memset(hw_dom->arch_mbm_local, 0, sizeof(*hw_dom->arch_mbm_local) * r->num_rmid); } static u64 mbm_overflow_count(u64 prev_msr, u64 cur_msr, unsigned int width) { u64 shift = 64 - width, chunks; chunks = (cur_msr << shift) - (prev_msr << shift); return chunks >> shift; } int resctrl_arch_rmid_read(struct rdt_resource *r, struct rdt_domain *d, u32 rmid, enum resctrl_event_id eventid, u64 *val) { struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r); struct rdt_hw_domain *hw_dom = resctrl_to_arch_dom(d); struct arch_mbm_state *am; u64 msr_val, chunks; int ret; if (!cpumask_test_cpu(smp_processor_id(), &d->cpu_mask)) return -EINVAL; ret = __rmid_read(rmid, eventid, &msr_val); if (ret) return ret; am = get_arch_mbm_state(hw_dom, rmid, eventid); if (am) { am->chunks += mbm_overflow_count(am->prev_msr, msr_val, hw_res->mbm_width); chunks = get_corrected_mbm_count(rmid, am->chunks); am->prev_msr = msr_val; } else { chunks = msr_val; } *val = chunks * hw_res->mon_scale; return 0; } /* * Check the RMIDs that are marked as busy for this domain. If the * reported LLC occupancy is below the threshold clear the busy bit and * decrement the count. If the busy count gets to zero on an RMID, we * free the RMID */ void __check_limbo(struct rdt_domain *d, bool force_free) { struct rdt_resource *r = &rdt_resources_all[RDT_RESOURCE_L3].r_resctrl; struct rmid_entry *entry; u32 crmid = 1, nrmid; bool rmid_dirty; u64 val = 0; /* * Skip RMID 0 and start from RMID 1 and check all the RMIDs that * are marked as busy for occupancy < threshold. If the occupancy * is less than the threshold decrement the busy counter of the * RMID and move it to the free list when the counter reaches 0. */ for (;;) { nrmid = find_next_bit(d->rmid_busy_llc, r->num_rmid, crmid); if (nrmid >= r->num_rmid) break; entry = __rmid_entry(nrmid); if (resctrl_arch_rmid_read(r, d, entry->rmid, QOS_L3_OCCUP_EVENT_ID, &val)) { rmid_dirty = true; } else { rmid_dirty = (val >= resctrl_rmid_realloc_threshold); } if (force_free || !rmid_dirty) { clear_bit(entry->rmid, d->rmid_busy_llc); if (!--entry->busy) { rmid_limbo_count--; list_add_tail(&entry->list, &rmid_free_lru); } } crmid = nrmid + 1; } } bool has_busy_rmid(struct rdt_resource *r, struct rdt_domain *d) { return find_first_bit(d->rmid_busy_llc, r->num_rmid) != r->num_rmid; } /* * As of now the RMIDs allocation is global. * However we keep track of which packages the RMIDs * are used to optimize the limbo list management. */ int alloc_rmid(void) { struct rmid_entry *entry; lockdep_assert_held(&rdtgroup_mutex); if (list_empty(&rmid_free_lru)) return rmid_limbo_count ? -EBUSY : -ENOSPC; entry = list_first_entry(&rmid_free_lru, struct rmid_entry, list); list_del(&entry->list); return entry->rmid; } static void add_rmid_to_limbo(struct rmid_entry *entry) { struct rdt_resource *r = &rdt_resources_all[RDT_RESOURCE_L3].r_resctrl; struct rdt_domain *d; int cpu, err; u64 val = 0; entry->busy = 0; cpu = get_cpu(); list_for_each_entry(d, &r->domains, list) { if (cpumask_test_cpu(cpu, &d->cpu_mask)) { err = resctrl_arch_rmid_read(r, d, entry->rmid, QOS_L3_OCCUP_EVENT_ID, &val); if (err || val <= resctrl_rmid_realloc_threshold) continue; } /* * For the first limbo RMID in the domain, * setup up the limbo worker. */ if (!has_busy_rmid(r, d)) cqm_setup_limbo_handler(d, CQM_LIMBOCHECK_INTERVAL); set_bit(entry->rmid, d->rmid_busy_llc); entry->busy++; } put_cpu(); if (entry->busy) rmid_limbo_count++; else list_add_tail(&entry->list, &rmid_free_lru); } void free_rmid(u32 rmid) { struct rmid_entry *entry; if (!rmid) return; lockdep_assert_held(&rdtgroup_mutex); entry = __rmid_entry(rmid); if (is_llc_occupancy_enabled()) add_rmid_to_limbo(entry); else list_add_tail(&entry->list, &rmid_free_lru); } static struct mbm_state *get_mbm_state(struct rdt_domain *d, u32 rmid, enum resctrl_event_id evtid) { switch (evtid) { case QOS_L3_MBM_TOTAL_EVENT_ID: return &d->mbm_total[rmid]; case QOS_L3_MBM_LOCAL_EVENT_ID: return &d->mbm_local[rmid]; default: return NULL; } } static int __mon_event_count(u32 rmid, struct rmid_read *rr) { struct mbm_state *m; u64 tval = 0; if (rr->first) { resctrl_arch_reset_rmid(rr->r, rr->d, rmid, rr->evtid); m = get_mbm_state(rr->d, rmid, rr->evtid); if (m) memset(m, 0, sizeof(struct mbm_state)); return 0; } rr->err = resctrl_arch_rmid_read(rr->r, rr->d, rmid, rr->evtid, &tval); if (rr->err) return rr->err; rr->val += tval; return 0; } /* * mbm_bw_count() - Update bw count from values previously read by * __mon_event_count(). * @rmid: The rmid used to identify the cached mbm_state. * @rr: The struct rmid_read populated by __mon_event_count(). * * Supporting function to calculate the memory bandwidth * and delta bandwidth in MBps. The chunks value previously read by * __mon_event_count() is compared with the chunks value from the previous * invocation. This must be called once per second to maintain values in MBps. */ static void mbm_bw_count(u32 rmid, struct rmid_read *rr) { struct mbm_state *m = &rr->d->mbm_local[rmid]; u64 cur_bw, bytes, cur_bytes; cur_bytes = rr->val; bytes = cur_bytes - m->prev_bw_bytes; m->prev_bw_bytes = cur_bytes; cur_bw = bytes / SZ_1M; if (m->delta_comp) m->delta_bw = abs(cur_bw - m->prev_bw); m->delta_comp = false; m->prev_bw = cur_bw; } /* * This is called via IPI to read the CQM/MBM counters * on a domain. */ void mon_event_count(void *info) { struct rdtgroup *rdtgrp, *entry; struct rmid_read *rr = info; struct list_head *head; int ret; rdtgrp = rr->rgrp; ret = __mon_event_count(rdtgrp->mon.rmid, rr); /* * For Ctrl groups read data from child monitor groups and * add them together. Count events which are read successfully. * Discard the rmid_read's reporting errors. */ head = &rdtgrp->mon.crdtgrp_list; if (rdtgrp->type == RDTCTRL_GROUP) { list_for_each_entry(entry, head, mon.crdtgrp_list) { if (__mon_event_count(entry->mon.rmid, rr) == 0) ret = 0; } } /* * __mon_event_count() calls for newly created monitor groups may * report -EINVAL/Unavailable if the monitor hasn't seen any traffic. * Discard error if any of the monitor event reads succeeded. */ if (ret == 0) rr->err = 0; } /* * Feedback loop for MBA software controller (mba_sc) * * mba_sc is a feedback loop where we periodically read MBM counters and * adjust the bandwidth percentage values via the IA32_MBA_THRTL_MSRs so * that: * * current bandwidth(cur_bw) < user specified bandwidth(user_bw) * * This uses the MBM counters to measure the bandwidth and MBA throttle * MSRs to control the bandwidth for a particular rdtgrp. It builds on the * fact that resctrl rdtgroups have both monitoring and control. * * The frequency of the checks is 1s and we just tag along the MBM overflow * timer. Having 1s interval makes the calculation of bandwidth simpler. * * Although MBA's goal is to restrict the bandwidth to a maximum, there may * be a need to increase the bandwidth to avoid unnecessarily restricting * the L2 <-> L3 traffic. * * Since MBA controls the L2 external bandwidth where as MBM measures the * L3 external bandwidth the following sequence could lead to such a * situation. * * Consider an rdtgroup which had high L3 <-> memory traffic in initial * phases -> mba_sc kicks in and reduced bandwidth percentage values -> but * after some time rdtgroup has mostly L2 <-> L3 traffic. * * In this case we may restrict the rdtgroup's L2 <-> L3 traffic as its * throttle MSRs already have low percentage values. To avoid * unnecessarily restricting such rdtgroups, we also increase the bandwidth. */ static void update_mba_bw(struct rdtgroup *rgrp, struct rdt_domain *dom_mbm) { u32 closid, rmid, cur_msr_val, new_msr_val; struct mbm_state *pmbm_data, *cmbm_data; u32 cur_bw, delta_bw, user_bw; struct rdt_resource *r_mba; struct rdt_domain *dom_mba; struct list_head *head; struct rdtgroup *entry; if (!is_mbm_local_enabled()) return; r_mba = &rdt_resources_all[RDT_RESOURCE_MBA].r_resctrl; closid = rgrp->closid; rmid = rgrp->mon.rmid; pmbm_data = &dom_mbm->mbm_local[rmid]; dom_mba = get_domain_from_cpu(smp_processor_id(), r_mba); if (!dom_mba) { pr_warn_once("Failure to get domain for MBA update\n"); return; } cur_bw = pmbm_data->prev_bw; user_bw = dom_mba->mbps_val[closid]; delta_bw = pmbm_data->delta_bw; /* MBA resource doesn't support CDP */ cur_msr_val = resctrl_arch_get_config(r_mba, dom_mba, closid, CDP_NONE); /* * For Ctrl groups read data from child monitor groups. */ head = &rgrp->mon.crdtgrp_list; list_for_each_entry(entry, head, mon.crdtgrp_list) { cmbm_data = &dom_mbm->mbm_local[entry->mon.rmid]; cur_bw += cmbm_data->prev_bw; delta_bw += cmbm_data->delta_bw; } /* * Scale up/down the bandwidth linearly for the ctrl group. The * bandwidth step is the bandwidth granularity specified by the * hardware. * * The delta_bw is used when increasing the bandwidth so that we * dont alternately increase and decrease the control values * continuously. * * For ex: consider cur_bw = 90MBps, user_bw = 100MBps and if * bandwidth step is 20MBps(> user_bw - cur_bw), we would keep * switching between 90 and 110 continuously if we only check * cur_bw < user_bw. */ if (cur_msr_val > r_mba->membw.min_bw && user_bw < cur_bw) { new_msr_val = cur_msr_val - r_mba->membw.bw_gran; } else if (cur_msr_val < MAX_MBA_BW && (user_bw > (cur_bw + delta_bw))) { new_msr_val = cur_msr_val + r_mba->membw.bw_gran; } else { return; } resctrl_arch_update_one(r_mba, dom_mba, closid, CDP_NONE, new_msr_val); /* * Delta values are updated dynamically package wise for each * rdtgrp every time the throttle MSR changes value. * * This is because (1)the increase in bandwidth is not perfectly * linear and only "approximately" linear even when the hardware * says it is linear.(2)Also since MBA is a core specific * mechanism, the delta values vary based on number of cores used * by the rdtgrp. */ pmbm_data->delta_comp = true; list_for_each_entry(entry, head, mon.crdtgrp_list) { cmbm_data = &dom_mbm->mbm_local[entry->mon.rmid]; cmbm_data->delta_comp = true; } } static void mbm_update(struct rdt_resource *r, struct rdt_domain *d, int rmid) { struct rmid_read rr; rr.first = false; rr.r = r; rr.d = d; /* * This is protected from concurrent reads from user * as both the user and we hold the global mutex. */ if (is_mbm_total_enabled()) { rr.evtid = QOS_L3_MBM_TOTAL_EVENT_ID; rr.val = 0; __mon_event_count(rmid, &rr); } if (is_mbm_local_enabled()) { rr.evtid = QOS_L3_MBM_LOCAL_EVENT_ID; rr.val = 0; __mon_event_count(rmid, &rr); /* * Call the MBA software controller only for the * control groups and when user has enabled * the software controller explicitly. */ if (is_mba_sc(NULL)) mbm_bw_count(rmid, &rr); } } /* * Handler to scan the limbo list and move the RMIDs * to free list whose occupancy < threshold_occupancy. */ void cqm_handle_limbo(struct work_struct *work) { unsigned long delay = msecs_to_jiffies(CQM_LIMBOCHECK_INTERVAL); int cpu = smp_processor_id(); struct rdt_resource *r; struct rdt_domain *d; mutex_lock(&rdtgroup_mutex); r = &rdt_resources_all[RDT_RESOURCE_L3].r_resctrl; d = container_of(work, struct rdt_domain, cqm_limbo.work); __check_limbo(d, false); if (has_busy_rmid(r, d)) schedule_delayed_work_on(cpu, &d->cqm_limbo, delay); mutex_unlock(&rdtgroup_mutex); } void cqm_setup_limbo_handler(struct rdt_domain *dom, unsigned long delay_ms) { unsigned long delay = msecs_to_jiffies(delay_ms); int cpu; cpu = cpumask_any(&dom->cpu_mask); dom->cqm_work_cpu = cpu; schedule_delayed_work_on(cpu, &dom->cqm_limbo, delay); } void mbm_handle_overflow(struct work_struct *work) { unsigned long delay = msecs_to_jiffies(MBM_OVERFLOW_INTERVAL); struct rdtgroup *prgrp, *crgrp; int cpu = smp_processor_id(); struct list_head *head; struct rdt_resource *r; struct rdt_domain *d; mutex_lock(&rdtgroup_mutex); if (!static_branch_likely(&rdt_mon_enable_key)) goto out_unlock; r = &rdt_resources_all[RDT_RESOURCE_L3].r_resctrl; d = container_of(work, struct rdt_domain, mbm_over.work); list_for_each_entry(prgrp, &rdt_all_groups, rdtgroup_list) { mbm_update(r, d, prgrp->mon.rmid); head = &prgrp->mon.crdtgrp_list; list_for_each_entry(crgrp, head, mon.crdtgrp_list) mbm_update(r, d, crgrp->mon.rmid); if (is_mba_sc(NULL)) update_mba_bw(prgrp, d); } schedule_delayed_work_on(cpu, &d->mbm_over, delay); out_unlock: mutex_unlock(&rdtgroup_mutex); } void mbm_setup_overflow_handler(struct rdt_domain *dom, unsigned long delay_ms) { unsigned long delay = msecs_to_jiffies(delay_ms); int cpu; if (!static_branch_likely(&rdt_mon_enable_key)) return; cpu = cpumask_any(&dom->cpu_mask); dom->mbm_work_cpu = cpu; schedule_delayed_work_on(cpu, &dom->mbm_over, delay); } static int dom_data_init(struct rdt_resource *r) { struct rmid_entry *entry = NULL; int i, nr_rmids; nr_rmids = r->num_rmid; rmid_ptrs = kcalloc(nr_rmids, sizeof(struct rmid_entry), GFP_KERNEL); if (!rmid_ptrs) return -ENOMEM; for (i = 0; i < nr_rmids; i++) { entry = &rmid_ptrs[i]; INIT_LIST_HEAD(&entry->list); entry->rmid = i; list_add_tail(&entry->list, &rmid_free_lru); } /* * RMID 0 is special and is always allocated. It's used for all * tasks that are not monitored. */ entry = __rmid_entry(0); list_del(&entry->list); return 0; } static struct mon_evt llc_occupancy_event = { .name = "llc_occupancy", .evtid = QOS_L3_OCCUP_EVENT_ID, }; static struct mon_evt mbm_total_event = { .name = "mbm_total_bytes", .evtid = QOS_L3_MBM_TOTAL_EVENT_ID, }; static struct mon_evt mbm_local_event = { .name = "mbm_local_bytes", .evtid = QOS_L3_MBM_LOCAL_EVENT_ID, }; /* * Initialize the event list for the resource. * * Note that MBM events are also part of RDT_RESOURCE_L3 resource * because as per the SDM the total and local memory bandwidth * are enumerated as part of L3 monitoring. */ static void l3_mon_evt_init(struct rdt_resource *r) { INIT_LIST_HEAD(&r->evt_list); if (is_llc_occupancy_enabled()) list_add_tail(&llc_occupancy_event.list, &r->evt_list); if (is_mbm_total_enabled()) list_add_tail(&mbm_total_event.list, &r->evt_list); if (is_mbm_local_enabled()) list_add_tail(&mbm_local_event.list, &r->evt_list); } int __init rdt_get_mon_l3_config(struct rdt_resource *r) { unsigned int mbm_offset = boot_cpu_data.x86_cache_mbm_width_offset; struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r); unsigned int threshold; int ret; resctrl_rmid_realloc_limit = boot_cpu_data.x86_cache_size * 1024; hw_res->mon_scale = boot_cpu_data.x86_cache_occ_scale; r->num_rmid = boot_cpu_data.x86_cache_max_rmid + 1; hw_res->mbm_width = MBM_CNTR_WIDTH_BASE; if (mbm_offset > 0 && mbm_offset <= MBM_CNTR_WIDTH_OFFSET_MAX) hw_res->mbm_width += mbm_offset; else if (mbm_offset > MBM_CNTR_WIDTH_OFFSET_MAX) pr_warn("Ignoring impossible MBM counter offset\n"); /* * A reasonable upper limit on the max threshold is the number * of lines tagged per RMID if all RMIDs have the same number of * lines tagged in the LLC. * * For a 35MB LLC and 56 RMIDs, this is ~1.8% of the LLC. */ threshold = resctrl_rmid_realloc_limit / r->num_rmid; /* * Because num_rmid may not be a power of two, round the value * to the nearest multiple of hw_res->mon_scale so it matches a * value the hardware will measure. mon_scale may not be a power of 2. */ resctrl_rmid_realloc_threshold = resctrl_arch_round_mon_val(threshold); ret = dom_data_init(r); if (ret) return ret; if (rdt_cpu_has(X86_FEATURE_BMEC)) { if (rdt_cpu_has(X86_FEATURE_CQM_MBM_TOTAL)) { mbm_total_event.configurable = true; mbm_config_rftype_init("mbm_total_bytes_config"); } if (rdt_cpu_has(X86_FEATURE_CQM_MBM_LOCAL)) { mbm_local_event.configurable = true; mbm_config_rftype_init("mbm_local_bytes_config"); } } l3_mon_evt_init(r); r->mon_capable = true; return 0; } void __init intel_rdt_mbm_apply_quirk(void) { int cf_index; cf_index = (boot_cpu_data.x86_cache_max_rmid + 1) / 8 - 1; if (cf_index >= ARRAY_SIZE(mbm_cf_table)) { pr_info("No MBM correction factor available\n"); return; } mbm_cf_rmidthreshold = mbm_cf_table[cf_index].rmidthreshold; mbm_cf = mbm_cf_table[cf_index].cf; }
linux-master
arch/x86/kernel/cpu/resctrl/monitor.c
// SPDX-License-Identifier: GPL-2.0-only /* * Resource Director Technology(RDT) * - Cache Allocation code. * * Copyright (C) 2016 Intel Corporation * * Authors: * Fenghua Yu <[email protected]> * Tony Luck <[email protected]> * * More information about RDT be found in the Intel (R) x86 Architecture * Software Developer Manual June 2016, volume 3, section 17.17. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/cpu.h> #include <linux/kernfs.h> #include <linux/seq_file.h> #include <linux/slab.h> #include "internal.h" /* * Check whether MBA bandwidth percentage value is correct. The value is * checked against the minimum and max bandwidth values specified by the * hardware. The allocated bandwidth percentage is rounded to the next * control step available on the hardware. */ static bool bw_validate(char *buf, unsigned long *data, struct rdt_resource *r) { unsigned long bw; int ret; /* * Only linear delay values is supported for current Intel SKUs. */ if (!r->membw.delay_linear && r->membw.arch_needs_linear) { rdt_last_cmd_puts("No support for non-linear MB domains\n"); return false; } ret = kstrtoul(buf, 10, &bw); if (ret) { rdt_last_cmd_printf("Non-decimal digit in MB value %s\n", buf); return false; } if ((bw < r->membw.min_bw || bw > r->default_ctrl) && !is_mba_sc(r)) { rdt_last_cmd_printf("MB value %ld out of range [%d,%d]\n", bw, r->membw.min_bw, r->default_ctrl); return false; } *data = roundup(bw, (unsigned long)r->membw.bw_gran); return true; } int parse_bw(struct rdt_parse_data *data, struct resctrl_schema *s, struct rdt_domain *d) { struct resctrl_staged_config *cfg; u32 closid = data->rdtgrp->closid; struct rdt_resource *r = s->res; unsigned long bw_val; cfg = &d->staged_config[s->conf_type]; if (cfg->have_new_ctrl) { rdt_last_cmd_printf("Duplicate domain %d\n", d->id); return -EINVAL; } if (!bw_validate(data->buf, &bw_val, r)) return -EINVAL; if (is_mba_sc(r)) { d->mbps_val[closid] = bw_val; return 0; } cfg->new_ctrl = bw_val; cfg->have_new_ctrl = true; return 0; } /* * Check whether a cache bit mask is valid. * For Intel the SDM says: * Please note that all (and only) contiguous '1' combinations * are allowed (e.g. FFFFH, 0FF0H, 003CH, etc.). * Additionally Haswell requires at least two bits set. * AMD allows non-contiguous bitmasks. */ static bool cbm_validate(char *buf, u32 *data, struct rdt_resource *r) { unsigned long first_bit, zero_bit, val; unsigned int cbm_len = r->cache.cbm_len; int ret; ret = kstrtoul(buf, 16, &val); if (ret) { rdt_last_cmd_printf("Non-hex character in the mask %s\n", buf); return false; } if ((r->cache.min_cbm_bits > 0 && val == 0) || val > r->default_ctrl) { rdt_last_cmd_puts("Mask out of range\n"); return false; } first_bit = find_first_bit(&val, cbm_len); zero_bit = find_next_zero_bit(&val, cbm_len, first_bit); /* Are non-contiguous bitmaps allowed? */ if (!r->cache.arch_has_sparse_bitmaps && (find_next_bit(&val, cbm_len, zero_bit) < cbm_len)) { rdt_last_cmd_printf("The mask %lx has non-consecutive 1-bits\n", val); return false; } if ((zero_bit - first_bit) < r->cache.min_cbm_bits) { rdt_last_cmd_printf("Need at least %d bits in the mask\n", r->cache.min_cbm_bits); return false; } *data = val; return true; } /* * Read one cache bit mask (hex). Check that it is valid for the current * resource type. */ int parse_cbm(struct rdt_parse_data *data, struct resctrl_schema *s, struct rdt_domain *d) { struct rdtgroup *rdtgrp = data->rdtgrp; struct resctrl_staged_config *cfg; struct rdt_resource *r = s->res; u32 cbm_val; cfg = &d->staged_config[s->conf_type]; if (cfg->have_new_ctrl) { rdt_last_cmd_printf("Duplicate domain %d\n", d->id); return -EINVAL; } /* * Cannot set up more than one pseudo-locked region in a cache * hierarchy. */ if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP && rdtgroup_pseudo_locked_in_hierarchy(d)) { rdt_last_cmd_puts("Pseudo-locked region in hierarchy\n"); return -EINVAL; } if (!cbm_validate(data->buf, &cbm_val, r)) return -EINVAL; if ((rdtgrp->mode == RDT_MODE_EXCLUSIVE || rdtgrp->mode == RDT_MODE_SHAREABLE) && rdtgroup_cbm_overlaps_pseudo_locked(d, cbm_val)) { rdt_last_cmd_puts("CBM overlaps with pseudo-locked region\n"); return -EINVAL; } /* * The CBM may not overlap with the CBM of another closid if * either is exclusive. */ if (rdtgroup_cbm_overlaps(s, d, cbm_val, rdtgrp->closid, true)) { rdt_last_cmd_puts("Overlaps with exclusive group\n"); return -EINVAL; } if (rdtgroup_cbm_overlaps(s, d, cbm_val, rdtgrp->closid, false)) { if (rdtgrp->mode == RDT_MODE_EXCLUSIVE || rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) { rdt_last_cmd_puts("Overlaps with other group\n"); return -EINVAL; } } cfg->new_ctrl = cbm_val; cfg->have_new_ctrl = true; return 0; } /* * For each domain in this resource we expect to find a series of: * id=mask * separated by ";". The "id" is in decimal, and must match one of * the "id"s for this resource. */ static int parse_line(char *line, struct resctrl_schema *s, struct rdtgroup *rdtgrp) { enum resctrl_conf_type t = s->conf_type; struct resctrl_staged_config *cfg; struct rdt_resource *r = s->res; struct rdt_parse_data data; char *dom = NULL, *id; struct rdt_domain *d; unsigned long dom_id; if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP && (r->rid == RDT_RESOURCE_MBA || r->rid == RDT_RESOURCE_SMBA)) { rdt_last_cmd_puts("Cannot pseudo-lock MBA resource\n"); return -EINVAL; } next: if (!line || line[0] == '\0') return 0; dom = strsep(&line, ";"); id = strsep(&dom, "="); if (!dom || kstrtoul(id, 10, &dom_id)) { rdt_last_cmd_puts("Missing '=' or non-numeric domain\n"); return -EINVAL; } dom = strim(dom); list_for_each_entry(d, &r->domains, list) { if (d->id == dom_id) { data.buf = dom; data.rdtgrp = rdtgrp; if (r->parse_ctrlval(&data, s, d)) return -EINVAL; if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) { cfg = &d->staged_config[t]; /* * In pseudo-locking setup mode and just * parsed a valid CBM that should be * pseudo-locked. Only one locked region per * resource group and domain so just do * the required initialization for single * region and return. */ rdtgrp->plr->s = s; rdtgrp->plr->d = d; rdtgrp->plr->cbm = cfg->new_ctrl; d->plr = rdtgrp->plr; return 0; } goto next; } } return -EINVAL; } static u32 get_config_index(u32 closid, enum resctrl_conf_type type) { switch (type) { default: case CDP_NONE: return closid; case CDP_CODE: return closid * 2 + 1; case CDP_DATA: return closid * 2; } } static bool apply_config(struct rdt_hw_domain *hw_dom, struct resctrl_staged_config *cfg, u32 idx, cpumask_var_t cpu_mask) { struct rdt_domain *dom = &hw_dom->d_resctrl; if (cfg->new_ctrl != hw_dom->ctrl_val[idx]) { cpumask_set_cpu(cpumask_any(&dom->cpu_mask), cpu_mask); hw_dom->ctrl_val[idx] = cfg->new_ctrl; return true; } return false; } int resctrl_arch_update_one(struct rdt_resource *r, struct rdt_domain *d, u32 closid, enum resctrl_conf_type t, u32 cfg_val) { struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r); struct rdt_hw_domain *hw_dom = resctrl_to_arch_dom(d); u32 idx = get_config_index(closid, t); struct msr_param msr_param; if (!cpumask_test_cpu(smp_processor_id(), &d->cpu_mask)) return -EINVAL; hw_dom->ctrl_val[idx] = cfg_val; msr_param.res = r; msr_param.low = idx; msr_param.high = idx + 1; hw_res->msr_update(d, &msr_param, r); return 0; } int resctrl_arch_update_domains(struct rdt_resource *r, u32 closid) { struct resctrl_staged_config *cfg; struct rdt_hw_domain *hw_dom; struct msr_param msr_param; enum resctrl_conf_type t; cpumask_var_t cpu_mask; struct rdt_domain *d; u32 idx; if (!zalloc_cpumask_var(&cpu_mask, GFP_KERNEL)) return -ENOMEM; msr_param.res = NULL; list_for_each_entry(d, &r->domains, list) { hw_dom = resctrl_to_arch_dom(d); for (t = 0; t < CDP_NUM_TYPES; t++) { cfg = &hw_dom->d_resctrl.staged_config[t]; if (!cfg->have_new_ctrl) continue; idx = get_config_index(closid, t); if (!apply_config(hw_dom, cfg, idx, cpu_mask)) continue; if (!msr_param.res) { msr_param.low = idx; msr_param.high = msr_param.low + 1; msr_param.res = r; } else { msr_param.low = min(msr_param.low, idx); msr_param.high = max(msr_param.high, idx + 1); } } } if (cpumask_empty(cpu_mask)) goto done; /* Update resource control msr on all the CPUs. */ on_each_cpu_mask(cpu_mask, rdt_ctrl_update, &msr_param, 1); done: free_cpumask_var(cpu_mask); return 0; } static int rdtgroup_parse_resource(char *resname, char *tok, struct rdtgroup *rdtgrp) { struct resctrl_schema *s; list_for_each_entry(s, &resctrl_schema_all, list) { if (!strcmp(resname, s->name) && rdtgrp->closid < s->num_closid) return parse_line(tok, s, rdtgrp); } rdt_last_cmd_printf("Unknown or unsupported resource name '%s'\n", resname); return -EINVAL; } ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of, char *buf, size_t nbytes, loff_t off) { struct resctrl_schema *s; struct rdtgroup *rdtgrp; struct rdt_resource *r; char *tok, *resname; int ret = 0; /* Valid input requires a trailing newline */ if (nbytes == 0 || buf[nbytes - 1] != '\n') return -EINVAL; buf[nbytes - 1] = '\0'; cpus_read_lock(); rdtgrp = rdtgroup_kn_lock_live(of->kn); if (!rdtgrp) { rdtgroup_kn_unlock(of->kn); cpus_read_unlock(); return -ENOENT; } rdt_last_cmd_clear(); /* * No changes to pseudo-locked region allowed. It has to be removed * and re-created instead. */ if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) { ret = -EINVAL; rdt_last_cmd_puts("Resource group is pseudo-locked\n"); goto out; } rdt_staged_configs_clear(); while ((tok = strsep(&buf, "\n")) != NULL) { resname = strim(strsep(&tok, ":")); if (!tok) { rdt_last_cmd_puts("Missing ':'\n"); ret = -EINVAL; goto out; } if (tok[0] == '\0') { rdt_last_cmd_printf("Missing '%s' value\n", resname); ret = -EINVAL; goto out; } ret = rdtgroup_parse_resource(resname, tok, rdtgrp); if (ret) goto out; } list_for_each_entry(s, &resctrl_schema_all, list) { r = s->res; /* * Writes to mba_sc resources update the software controller, * not the control MSR. */ if (is_mba_sc(r)) continue; ret = resctrl_arch_update_domains(r, rdtgrp->closid); if (ret) goto out; } if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) { /* * If pseudo-locking fails we keep the resource group in * mode RDT_MODE_PSEUDO_LOCKSETUP with its class of service * active and updated for just the domain the pseudo-locked * region was requested for. */ ret = rdtgroup_pseudo_lock_create(rdtgrp); } out: rdt_staged_configs_clear(); rdtgroup_kn_unlock(of->kn); cpus_read_unlock(); return ret ?: nbytes; } u32 resctrl_arch_get_config(struct rdt_resource *r, struct rdt_domain *d, u32 closid, enum resctrl_conf_type type) { struct rdt_hw_domain *hw_dom = resctrl_to_arch_dom(d); u32 idx = get_config_index(closid, type); return hw_dom->ctrl_val[idx]; } static void show_doms(struct seq_file *s, struct resctrl_schema *schema, int closid) { struct rdt_resource *r = schema->res; struct rdt_domain *dom; bool sep = false; u32 ctrl_val; seq_printf(s, "%*s:", max_name_width, schema->name); list_for_each_entry(dom, &r->domains, list) { if (sep) seq_puts(s, ";"); if (is_mba_sc(r)) ctrl_val = dom->mbps_val[closid]; else ctrl_val = resctrl_arch_get_config(r, dom, closid, schema->conf_type); seq_printf(s, r->format_str, dom->id, max_data_width, ctrl_val); sep = true; } seq_puts(s, "\n"); } int rdtgroup_schemata_show(struct kernfs_open_file *of, struct seq_file *s, void *v) { struct resctrl_schema *schema; struct rdtgroup *rdtgrp; int ret = 0; u32 closid; rdtgrp = rdtgroup_kn_lock_live(of->kn); if (rdtgrp) { if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) { list_for_each_entry(schema, &resctrl_schema_all, list) { seq_printf(s, "%s:uninitialized\n", schema->name); } } else if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) { if (!rdtgrp->plr->d) { rdt_last_cmd_clear(); rdt_last_cmd_puts("Cache domain offline\n"); ret = -ENODEV; } else { seq_printf(s, "%s:%d=%x\n", rdtgrp->plr->s->res->name, rdtgrp->plr->d->id, rdtgrp->plr->cbm); } } else { closid = rdtgrp->closid; list_for_each_entry(schema, &resctrl_schema_all, list) { if (closid < schema->num_closid) show_doms(s, schema, closid); } } } else { ret = -ENOENT; } rdtgroup_kn_unlock(of->kn); return ret; } void mon_event_read(struct rmid_read *rr, struct rdt_resource *r, struct rdt_domain *d, struct rdtgroup *rdtgrp, int evtid, int first) { /* * setup the parameters to send to the IPI to read the data. */ rr->rgrp = rdtgrp; rr->evtid = evtid; rr->r = r; rr->d = d; rr->val = 0; rr->first = first; smp_call_function_any(&d->cpu_mask, mon_event_count, rr, 1); } int rdtgroup_mondata_show(struct seq_file *m, void *arg) { struct kernfs_open_file *of = m->private; u32 resid, evtid, domid; struct rdtgroup *rdtgrp; struct rdt_resource *r; union mon_data_bits md; struct rdt_domain *d; struct rmid_read rr; int ret = 0; rdtgrp = rdtgroup_kn_lock_live(of->kn); if (!rdtgrp) { ret = -ENOENT; goto out; } md.priv = of->kn->priv; resid = md.u.rid; domid = md.u.domid; evtid = md.u.evtid; r = &rdt_resources_all[resid].r_resctrl; d = rdt_find_domain(r, domid, NULL); if (IS_ERR_OR_NULL(d)) { ret = -ENOENT; goto out; } mon_event_read(&rr, r, d, rdtgrp, evtid, false); if (rr.err == -EIO) seq_puts(m, "Error\n"); else if (rr.err == -EINVAL) seq_puts(m, "Unavailable\n"); else seq_printf(m, "%llu\n", rr.val); out: rdtgroup_kn_unlock(of->kn); return ret; }
linux-master
arch/x86/kernel/cpu/resctrl/ctrlmondata.c
// SPDX-License-Identifier: GPL-2.0 /* * Resource Director Technology (RDT) * * Pseudo-locking support built on top of Cache Allocation Technology (CAT) * * Copyright (C) 2018 Intel Corporation * * Author: Reinette Chatre <[email protected]> */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/cacheinfo.h> #include <linux/cpu.h> #include <linux/cpumask.h> #include <linux/debugfs.h> #include <linux/kthread.h> #include <linux/mman.h> #include <linux/perf_event.h> #include <linux/pm_qos.h> #include <linux/slab.h> #include <linux/uaccess.h> #include <asm/cacheflush.h> #include <asm/intel-family.h> #include <asm/resctrl.h> #include <asm/perf_event.h> #include "../../events/perf_event.h" /* For X86_CONFIG() */ #include "internal.h" #define CREATE_TRACE_POINTS #include "pseudo_lock_event.h" /* * The bits needed to disable hardware prefetching varies based on the * platform. During initialization we will discover which bits to use. */ static u64 prefetch_disable_bits; /* * Major number assigned to and shared by all devices exposing * pseudo-locked regions. */ static unsigned int pseudo_lock_major; static unsigned long pseudo_lock_minor_avail = GENMASK(MINORBITS, 0); static char *pseudo_lock_devnode(const struct device *dev, umode_t *mode) { const struct rdtgroup *rdtgrp; rdtgrp = dev_get_drvdata(dev); if (mode) *mode = 0600; return kasprintf(GFP_KERNEL, "pseudo_lock/%s", rdtgrp->kn->name); } static const struct class pseudo_lock_class = { .name = "pseudo_lock", .devnode = pseudo_lock_devnode, }; /** * get_prefetch_disable_bits - prefetch disable bits of supported platforms * @void: It takes no parameters. * * Capture the list of platforms that have been validated to support * pseudo-locking. This includes testing to ensure pseudo-locked regions * with low cache miss rates can be created under variety of load conditions * as well as that these pseudo-locked regions can maintain their low cache * miss rates under variety of load conditions for significant lengths of time. * * After a platform has been validated to support pseudo-locking its * hardware prefetch disable bits are included here as they are documented * in the SDM. * * When adding a platform here also add support for its cache events to * measure_cycles_perf_fn() * * Return: * If platform is supported, the bits to disable hardware prefetchers, 0 * if platform is not supported. */ static u64 get_prefetch_disable_bits(void) { if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL || boot_cpu_data.x86 != 6) return 0; switch (boot_cpu_data.x86_model) { case INTEL_FAM6_BROADWELL_X: /* * SDM defines bits of MSR_MISC_FEATURE_CONTROL register * as: * 0 L2 Hardware Prefetcher Disable (R/W) * 1 L2 Adjacent Cache Line Prefetcher Disable (R/W) * 2 DCU Hardware Prefetcher Disable (R/W) * 3 DCU IP Prefetcher Disable (R/W) * 63:4 Reserved */ return 0xF; case INTEL_FAM6_ATOM_GOLDMONT: case INTEL_FAM6_ATOM_GOLDMONT_PLUS: /* * SDM defines bits of MSR_MISC_FEATURE_CONTROL register * as: * 0 L2 Hardware Prefetcher Disable (R/W) * 1 Reserved * 2 DCU Hardware Prefetcher Disable (R/W) * 63:3 Reserved */ return 0x5; } return 0; } /** * pseudo_lock_minor_get - Obtain available minor number * @minor: Pointer to where new minor number will be stored * * A bitmask is used to track available minor numbers. Here the next free * minor number is marked as unavailable and returned. * * Return: 0 on success, <0 on failure. */ static int pseudo_lock_minor_get(unsigned int *minor) { unsigned long first_bit; first_bit = find_first_bit(&pseudo_lock_minor_avail, MINORBITS); if (first_bit == MINORBITS) return -ENOSPC; __clear_bit(first_bit, &pseudo_lock_minor_avail); *minor = first_bit; return 0; } /** * pseudo_lock_minor_release - Return minor number to available * @minor: The minor number made available */ static void pseudo_lock_minor_release(unsigned int minor) { __set_bit(minor, &pseudo_lock_minor_avail); } /** * region_find_by_minor - Locate a pseudo-lock region by inode minor number * @minor: The minor number of the device representing pseudo-locked region * * When the character device is accessed we need to determine which * pseudo-locked region it belongs to. This is done by matching the minor * number of the device to the pseudo-locked region it belongs. * * Minor numbers are assigned at the time a pseudo-locked region is associated * with a cache instance. * * Return: On success return pointer to resource group owning the pseudo-locked * region, NULL on failure. */ static struct rdtgroup *region_find_by_minor(unsigned int minor) { struct rdtgroup *rdtgrp, *rdtgrp_match = NULL; list_for_each_entry(rdtgrp, &rdt_all_groups, rdtgroup_list) { if (rdtgrp->plr && rdtgrp->plr->minor == minor) { rdtgrp_match = rdtgrp; break; } } return rdtgrp_match; } /** * struct pseudo_lock_pm_req - A power management QoS request list entry * @list: Entry within the @pm_reqs list for a pseudo-locked region * @req: PM QoS request */ struct pseudo_lock_pm_req { struct list_head list; struct dev_pm_qos_request req; }; static void pseudo_lock_cstates_relax(struct pseudo_lock_region *plr) { struct pseudo_lock_pm_req *pm_req, *next; list_for_each_entry_safe(pm_req, next, &plr->pm_reqs, list) { dev_pm_qos_remove_request(&pm_req->req); list_del(&pm_req->list); kfree(pm_req); } } /** * pseudo_lock_cstates_constrain - Restrict cores from entering C6 * @plr: Pseudo-locked region * * To prevent the cache from being affected by power management entering * C6 has to be avoided. This is accomplished by requesting a latency * requirement lower than lowest C6 exit latency of all supported * platforms as found in the cpuidle state tables in the intel_idle driver. * At this time it is possible to do so with a single latency requirement * for all supported platforms. * * Since Goldmont is supported, which is affected by X86_BUG_MONITOR, * the ACPI latencies need to be considered while keeping in mind that C2 * may be set to map to deeper sleep states. In this case the latency * requirement needs to prevent entering C2 also. * * Return: 0 on success, <0 on failure */ static int pseudo_lock_cstates_constrain(struct pseudo_lock_region *plr) { struct pseudo_lock_pm_req *pm_req; int cpu; int ret; for_each_cpu(cpu, &plr->d->cpu_mask) { pm_req = kzalloc(sizeof(*pm_req), GFP_KERNEL); if (!pm_req) { rdt_last_cmd_puts("Failure to allocate memory for PM QoS\n"); ret = -ENOMEM; goto out_err; } ret = dev_pm_qos_add_request(get_cpu_device(cpu), &pm_req->req, DEV_PM_QOS_RESUME_LATENCY, 30); if (ret < 0) { rdt_last_cmd_printf("Failed to add latency req CPU%d\n", cpu); kfree(pm_req); ret = -1; goto out_err; } list_add(&pm_req->list, &plr->pm_reqs); } return 0; out_err: pseudo_lock_cstates_relax(plr); return ret; } /** * pseudo_lock_region_clear - Reset pseudo-lock region data * @plr: pseudo-lock region * * All content of the pseudo-locked region is reset - any memory allocated * freed. * * Return: void */ static void pseudo_lock_region_clear(struct pseudo_lock_region *plr) { plr->size = 0; plr->line_size = 0; kfree(plr->kmem); plr->kmem = NULL; plr->s = NULL; if (plr->d) plr->d->plr = NULL; plr->d = NULL; plr->cbm = 0; plr->debugfs_dir = NULL; } /** * pseudo_lock_region_init - Initialize pseudo-lock region information * @plr: pseudo-lock region * * Called after user provided a schemata to be pseudo-locked. From the * schemata the &struct pseudo_lock_region is on entry already initialized * with the resource, domain, and capacity bitmask. Here the information * required for pseudo-locking is deduced from this data and &struct * pseudo_lock_region initialized further. This information includes: * - size in bytes of the region to be pseudo-locked * - cache line size to know the stride with which data needs to be accessed * to be pseudo-locked * - a cpu associated with the cache instance on which the pseudo-locking * flow can be executed * * Return: 0 on success, <0 on failure. Descriptive error will be written * to last_cmd_status buffer. */ static int pseudo_lock_region_init(struct pseudo_lock_region *plr) { struct cpu_cacheinfo *ci; int ret; int i; /* Pick the first cpu we find that is associated with the cache. */ plr->cpu = cpumask_first(&plr->d->cpu_mask); if (!cpu_online(plr->cpu)) { rdt_last_cmd_printf("CPU %u associated with cache not online\n", plr->cpu); ret = -ENODEV; goto out_region; } ci = get_cpu_cacheinfo(plr->cpu); plr->size = rdtgroup_cbm_to_size(plr->s->res, plr->d, plr->cbm); for (i = 0; i < ci->num_leaves; i++) { if (ci->info_list[i].level == plr->s->res->cache_level) { plr->line_size = ci->info_list[i].coherency_line_size; return 0; } } ret = -1; rdt_last_cmd_puts("Unable to determine cache line size\n"); out_region: pseudo_lock_region_clear(plr); return ret; } /** * pseudo_lock_init - Initialize a pseudo-lock region * @rdtgrp: resource group to which new pseudo-locked region will belong * * A pseudo-locked region is associated with a resource group. When this * association is created the pseudo-locked region is initialized. The * details of the pseudo-locked region are not known at this time so only * allocation is done and association established. * * Return: 0 on success, <0 on failure */ static int pseudo_lock_init(struct rdtgroup *rdtgrp) { struct pseudo_lock_region *plr; plr = kzalloc(sizeof(*plr), GFP_KERNEL); if (!plr) return -ENOMEM; init_waitqueue_head(&plr->lock_thread_wq); INIT_LIST_HEAD(&plr->pm_reqs); rdtgrp->plr = plr; return 0; } /** * pseudo_lock_region_alloc - Allocate kernel memory that will be pseudo-locked * @plr: pseudo-lock region * * Initialize the details required to set up the pseudo-locked region and * allocate the contiguous memory that will be pseudo-locked to the cache. * * Return: 0 on success, <0 on failure. Descriptive error will be written * to last_cmd_status buffer. */ static int pseudo_lock_region_alloc(struct pseudo_lock_region *plr) { int ret; ret = pseudo_lock_region_init(plr); if (ret < 0) return ret; /* * We do not yet support contiguous regions larger than * KMALLOC_MAX_SIZE. */ if (plr->size > KMALLOC_MAX_SIZE) { rdt_last_cmd_puts("Requested region exceeds maximum size\n"); ret = -E2BIG; goto out_region; } plr->kmem = kzalloc(plr->size, GFP_KERNEL); if (!plr->kmem) { rdt_last_cmd_puts("Unable to allocate memory\n"); ret = -ENOMEM; goto out_region; } ret = 0; goto out; out_region: pseudo_lock_region_clear(plr); out: return ret; } /** * pseudo_lock_free - Free a pseudo-locked region * @rdtgrp: resource group to which pseudo-locked region belonged * * The pseudo-locked region's resources have already been released, or not * yet created at this point. Now it can be freed and disassociated from the * resource group. * * Return: void */ static void pseudo_lock_free(struct rdtgroup *rdtgrp) { pseudo_lock_region_clear(rdtgrp->plr); kfree(rdtgrp->plr); rdtgrp->plr = NULL; } /** * pseudo_lock_fn - Load kernel memory into cache * @_rdtgrp: resource group to which pseudo-lock region belongs * * This is the core pseudo-locking flow. * * First we ensure that the kernel memory cannot be found in the cache. * Then, while taking care that there will be as little interference as * possible, the memory to be loaded is accessed while core is running * with class of service set to the bitmask of the pseudo-locked region. * After this is complete no future CAT allocations will be allowed to * overlap with this bitmask. * * Local register variables are utilized to ensure that the memory region * to be locked is the only memory access made during the critical locking * loop. * * Return: 0. Waiter on waitqueue will be woken on completion. */ static int pseudo_lock_fn(void *_rdtgrp) { struct rdtgroup *rdtgrp = _rdtgrp; struct pseudo_lock_region *plr = rdtgrp->plr; u32 rmid_p, closid_p; unsigned long i; u64 saved_msr; #ifdef CONFIG_KASAN /* * The registers used for local register variables are also used * when KASAN is active. When KASAN is active we use a regular * variable to ensure we always use a valid pointer, but the cost * is that this variable will enter the cache through evicting the * memory we are trying to lock into the cache. Thus expect lower * pseudo-locking success rate when KASAN is active. */ unsigned int line_size; unsigned int size; void *mem_r; #else register unsigned int line_size asm("esi"); register unsigned int size asm("edi"); register void *mem_r asm(_ASM_BX); #endif /* CONFIG_KASAN */ /* * Make sure none of the allocated memory is cached. If it is we * will get a cache hit in below loop from outside of pseudo-locked * region. * wbinvd (as opposed to clflush/clflushopt) is required to * increase likelihood that allocated cache portion will be filled * with associated memory. */ native_wbinvd(); /* * Always called with interrupts enabled. By disabling interrupts * ensure that we will not be preempted during this critical section. */ local_irq_disable(); /* * Call wrmsr and rdmsr as directly as possible to avoid tracing * clobbering local register variables or affecting cache accesses. * * Disable the hardware prefetcher so that when the end of the memory * being pseudo-locked is reached the hardware will not read beyond * the buffer and evict pseudo-locked memory read earlier from the * cache. */ saved_msr = __rdmsr(MSR_MISC_FEATURE_CONTROL); __wrmsr(MSR_MISC_FEATURE_CONTROL, prefetch_disable_bits, 0x0); closid_p = this_cpu_read(pqr_state.cur_closid); rmid_p = this_cpu_read(pqr_state.cur_rmid); mem_r = plr->kmem; size = plr->size; line_size = plr->line_size; /* * Critical section begin: start by writing the closid associated * with the capacity bitmask of the cache region being * pseudo-locked followed by reading of kernel memory to load it * into the cache. */ __wrmsr(MSR_IA32_PQR_ASSOC, rmid_p, rdtgrp->closid); /* * Cache was flushed earlier. Now access kernel memory to read it * into cache region associated with just activated plr->closid. * Loop over data twice: * - In first loop the cache region is shared with the page walker * as it populates the paging structure caches (including TLB). * - In the second loop the paging structure caches are used and * cache region is populated with the memory being referenced. */ for (i = 0; i < size; i += PAGE_SIZE) { /* * Add a barrier to prevent speculative execution of this * loop reading beyond the end of the buffer. */ rmb(); asm volatile("mov (%0,%1,1), %%eax\n\t" : : "r" (mem_r), "r" (i) : "%eax", "memory"); } for (i = 0; i < size; i += line_size) { /* * Add a barrier to prevent speculative execution of this * loop reading beyond the end of the buffer. */ rmb(); asm volatile("mov (%0,%1,1), %%eax\n\t" : : "r" (mem_r), "r" (i) : "%eax", "memory"); } /* * Critical section end: restore closid with capacity bitmask that * does not overlap with pseudo-locked region. */ __wrmsr(MSR_IA32_PQR_ASSOC, rmid_p, closid_p); /* Re-enable the hardware prefetcher(s) */ wrmsrl(MSR_MISC_FEATURE_CONTROL, saved_msr); local_irq_enable(); plr->thread_done = 1; wake_up_interruptible(&plr->lock_thread_wq); return 0; } /** * rdtgroup_monitor_in_progress - Test if monitoring in progress * @rdtgrp: resource group being queried * * Return: 1 if monitor groups have been created for this resource * group, 0 otherwise. */ static int rdtgroup_monitor_in_progress(struct rdtgroup *rdtgrp) { return !list_empty(&rdtgrp->mon.crdtgrp_list); } /** * rdtgroup_locksetup_user_restrict - Restrict user access to group * @rdtgrp: resource group needing access restricted * * A resource group used for cache pseudo-locking cannot have cpus or tasks * assigned to it. This is communicated to the user by restricting access * to all the files that can be used to make such changes. * * Permissions restored with rdtgroup_locksetup_user_restore() * * Return: 0 on success, <0 on failure. If a failure occurs during the * restriction of access an attempt will be made to restore permissions but * the state of the mode of these files will be uncertain when a failure * occurs. */ static int rdtgroup_locksetup_user_restrict(struct rdtgroup *rdtgrp) { int ret; ret = rdtgroup_kn_mode_restrict(rdtgrp, "tasks"); if (ret) return ret; ret = rdtgroup_kn_mode_restrict(rdtgrp, "cpus"); if (ret) goto err_tasks; ret = rdtgroup_kn_mode_restrict(rdtgrp, "cpus_list"); if (ret) goto err_cpus; if (rdt_mon_capable) { ret = rdtgroup_kn_mode_restrict(rdtgrp, "mon_groups"); if (ret) goto err_cpus_list; } ret = 0; goto out; err_cpus_list: rdtgroup_kn_mode_restore(rdtgrp, "cpus_list", 0777); err_cpus: rdtgroup_kn_mode_restore(rdtgrp, "cpus", 0777); err_tasks: rdtgroup_kn_mode_restore(rdtgrp, "tasks", 0777); out: return ret; } /** * rdtgroup_locksetup_user_restore - Restore user access to group * @rdtgrp: resource group needing access restored * * Restore all file access previously removed using * rdtgroup_locksetup_user_restrict() * * Return: 0 on success, <0 on failure. If a failure occurs during the * restoration of access an attempt will be made to restrict permissions * again but the state of the mode of these files will be uncertain when * a failure occurs. */ static int rdtgroup_locksetup_user_restore(struct rdtgroup *rdtgrp) { int ret; ret = rdtgroup_kn_mode_restore(rdtgrp, "tasks", 0777); if (ret) return ret; ret = rdtgroup_kn_mode_restore(rdtgrp, "cpus", 0777); if (ret) goto err_tasks; ret = rdtgroup_kn_mode_restore(rdtgrp, "cpus_list", 0777); if (ret) goto err_cpus; if (rdt_mon_capable) { ret = rdtgroup_kn_mode_restore(rdtgrp, "mon_groups", 0777); if (ret) goto err_cpus_list; } ret = 0; goto out; err_cpus_list: rdtgroup_kn_mode_restrict(rdtgrp, "cpus_list"); err_cpus: rdtgroup_kn_mode_restrict(rdtgrp, "cpus"); err_tasks: rdtgroup_kn_mode_restrict(rdtgrp, "tasks"); out: return ret; } /** * rdtgroup_locksetup_enter - Resource group enters locksetup mode * @rdtgrp: resource group requested to enter locksetup mode * * A resource group enters locksetup mode to reflect that it would be used * to represent a pseudo-locked region and is in the process of being set * up to do so. A resource group used for a pseudo-locked region would * lose the closid associated with it so we cannot allow it to have any * tasks or cpus assigned nor permit tasks or cpus to be assigned in the * future. Monitoring of a pseudo-locked region is not allowed either. * * The above and more restrictions on a pseudo-locked region are checked * for and enforced before the resource group enters the locksetup mode. * * Returns: 0 if the resource group successfully entered locksetup mode, <0 * on failure. On failure the last_cmd_status buffer is updated with text to * communicate details of failure to the user. */ int rdtgroup_locksetup_enter(struct rdtgroup *rdtgrp) { int ret; /* * The default resource group can neither be removed nor lose the * default closid associated with it. */ if (rdtgrp == &rdtgroup_default) { rdt_last_cmd_puts("Cannot pseudo-lock default group\n"); return -EINVAL; } /* * Cache Pseudo-locking not supported when CDP is enabled. * * Some things to consider if you would like to enable this * support (using L3 CDP as example): * - When CDP is enabled two separate resources are exposed, * L3DATA and L3CODE, but they are actually on the same cache. * The implication for pseudo-locking is that if a * pseudo-locked region is created on a domain of one * resource (eg. L3CODE), then a pseudo-locked region cannot * be created on that same domain of the other resource * (eg. L3DATA). This is because the creation of a * pseudo-locked region involves a call to wbinvd that will * affect all cache allocations on particular domain. * - Considering the previous, it may be possible to only * expose one of the CDP resources to pseudo-locking and * hide the other. For example, we could consider to only * expose L3DATA and since the L3 cache is unified it is * still possible to place instructions there are execute it. * - If only one region is exposed to pseudo-locking we should * still keep in mind that availability of a portion of cache * for pseudo-locking should take into account both resources. * Similarly, if a pseudo-locked region is created in one * resource, the portion of cache used by it should be made * unavailable to all future allocations from both resources. */ if (resctrl_arch_get_cdp_enabled(RDT_RESOURCE_L3) || resctrl_arch_get_cdp_enabled(RDT_RESOURCE_L2)) { rdt_last_cmd_puts("CDP enabled\n"); return -EINVAL; } /* * Not knowing the bits to disable prefetching implies that this * platform does not support Cache Pseudo-Locking. */ prefetch_disable_bits = get_prefetch_disable_bits(); if (prefetch_disable_bits == 0) { rdt_last_cmd_puts("Pseudo-locking not supported\n"); return -EINVAL; } if (rdtgroup_monitor_in_progress(rdtgrp)) { rdt_last_cmd_puts("Monitoring in progress\n"); return -EINVAL; } if (rdtgroup_tasks_assigned(rdtgrp)) { rdt_last_cmd_puts("Tasks assigned to resource group\n"); return -EINVAL; } if (!cpumask_empty(&rdtgrp->cpu_mask)) { rdt_last_cmd_puts("CPUs assigned to resource group\n"); return -EINVAL; } if (rdtgroup_locksetup_user_restrict(rdtgrp)) { rdt_last_cmd_puts("Unable to modify resctrl permissions\n"); return -EIO; } ret = pseudo_lock_init(rdtgrp); if (ret) { rdt_last_cmd_puts("Unable to init pseudo-lock region\n"); goto out_release; } /* * If this system is capable of monitoring a rmid would have been * allocated when the control group was created. This is not needed * anymore when this group would be used for pseudo-locking. This * is safe to call on platforms not capable of monitoring. */ free_rmid(rdtgrp->mon.rmid); ret = 0; goto out; out_release: rdtgroup_locksetup_user_restore(rdtgrp); out: return ret; } /** * rdtgroup_locksetup_exit - resource group exist locksetup mode * @rdtgrp: resource group * * When a resource group exits locksetup mode the earlier restrictions are * lifted. * * Return: 0 on success, <0 on failure */ int rdtgroup_locksetup_exit(struct rdtgroup *rdtgrp) { int ret; if (rdt_mon_capable) { ret = alloc_rmid(); if (ret < 0) { rdt_last_cmd_puts("Out of RMIDs\n"); return ret; } rdtgrp->mon.rmid = ret; } ret = rdtgroup_locksetup_user_restore(rdtgrp); if (ret) { free_rmid(rdtgrp->mon.rmid); return ret; } pseudo_lock_free(rdtgrp); return 0; } /** * rdtgroup_cbm_overlaps_pseudo_locked - Test if CBM or portion is pseudo-locked * @d: RDT domain * @cbm: CBM to test * * @d represents a cache instance and @cbm a capacity bitmask that is * considered for it. Determine if @cbm overlaps with any existing * pseudo-locked region on @d. * * @cbm is unsigned long, even if only 32 bits are used, to make the * bitmap functions work correctly. * * Return: true if @cbm overlaps with pseudo-locked region on @d, false * otherwise. */ bool rdtgroup_cbm_overlaps_pseudo_locked(struct rdt_domain *d, unsigned long cbm) { unsigned int cbm_len; unsigned long cbm_b; if (d->plr) { cbm_len = d->plr->s->res->cache.cbm_len; cbm_b = d->plr->cbm; if (bitmap_intersects(&cbm, &cbm_b, cbm_len)) return true; } return false; } /** * rdtgroup_pseudo_locked_in_hierarchy - Pseudo-locked region in cache hierarchy * @d: RDT domain under test * * The setup of a pseudo-locked region affects all cache instances within * the hierarchy of the region. It is thus essential to know if any * pseudo-locked regions exist within a cache hierarchy to prevent any * attempts to create new pseudo-locked regions in the same hierarchy. * * Return: true if a pseudo-locked region exists in the hierarchy of @d or * if it is not possible to test due to memory allocation issue, * false otherwise. */ bool rdtgroup_pseudo_locked_in_hierarchy(struct rdt_domain *d) { cpumask_var_t cpu_with_psl; struct rdt_resource *r; struct rdt_domain *d_i; bool ret = false; if (!zalloc_cpumask_var(&cpu_with_psl, GFP_KERNEL)) return true; /* * First determine which cpus have pseudo-locked regions * associated with them. */ for_each_alloc_capable_rdt_resource(r) { list_for_each_entry(d_i, &r->domains, list) { if (d_i->plr) cpumask_or(cpu_with_psl, cpu_with_psl, &d_i->cpu_mask); } } /* * Next test if new pseudo-locked region would intersect with * existing region. */ if (cpumask_intersects(&d->cpu_mask, cpu_with_psl)) ret = true; free_cpumask_var(cpu_with_psl); return ret; } /** * measure_cycles_lat_fn - Measure cycle latency to read pseudo-locked memory * @_plr: pseudo-lock region to measure * * There is no deterministic way to test if a memory region is cached. One * way is to measure how long it takes to read the memory, the speed of * access is a good way to learn how close to the cpu the data was. Even * more, if the prefetcher is disabled and the memory is read at a stride * of half the cache line, then a cache miss will be easy to spot since the * read of the first half would be significantly slower than the read of * the second half. * * Return: 0. Waiter on waitqueue will be woken on completion. */ static int measure_cycles_lat_fn(void *_plr) { struct pseudo_lock_region *plr = _plr; u32 saved_low, saved_high; unsigned long i; u64 start, end; void *mem_r; local_irq_disable(); /* * Disable hardware prefetchers. */ rdmsr(MSR_MISC_FEATURE_CONTROL, saved_low, saved_high); wrmsr(MSR_MISC_FEATURE_CONTROL, prefetch_disable_bits, 0x0); mem_r = READ_ONCE(plr->kmem); /* * Dummy execute of the time measurement to load the needed * instructions into the L1 instruction cache. */ start = rdtsc_ordered(); for (i = 0; i < plr->size; i += 32) { start = rdtsc_ordered(); asm volatile("mov (%0,%1,1), %%eax\n\t" : : "r" (mem_r), "r" (i) : "%eax", "memory"); end = rdtsc_ordered(); trace_pseudo_lock_mem_latency((u32)(end - start)); } wrmsr(MSR_MISC_FEATURE_CONTROL, saved_low, saved_high); local_irq_enable(); plr->thread_done = 1; wake_up_interruptible(&plr->lock_thread_wq); return 0; } /* * Create a perf_event_attr for the hit and miss perf events that will * be used during the performance measurement. A perf_event maintains * a pointer to its perf_event_attr so a unique attribute structure is * created for each perf_event. * * The actual configuration of the event is set right before use in order * to use the X86_CONFIG macro. */ static struct perf_event_attr perf_miss_attr = { .type = PERF_TYPE_RAW, .size = sizeof(struct perf_event_attr), .pinned = 1, .disabled = 0, .exclude_user = 1, }; static struct perf_event_attr perf_hit_attr = { .type = PERF_TYPE_RAW, .size = sizeof(struct perf_event_attr), .pinned = 1, .disabled = 0, .exclude_user = 1, }; struct residency_counts { u64 miss_before, hits_before; u64 miss_after, hits_after; }; static int measure_residency_fn(struct perf_event_attr *miss_attr, struct perf_event_attr *hit_attr, struct pseudo_lock_region *plr, struct residency_counts *counts) { u64 hits_before = 0, hits_after = 0, miss_before = 0, miss_after = 0; struct perf_event *miss_event, *hit_event; int hit_pmcnum, miss_pmcnum; u32 saved_low, saved_high; unsigned int line_size; unsigned int size; unsigned long i; void *mem_r; u64 tmp; miss_event = perf_event_create_kernel_counter(miss_attr, plr->cpu, NULL, NULL, NULL); if (IS_ERR(miss_event)) goto out; hit_event = perf_event_create_kernel_counter(hit_attr, plr->cpu, NULL, NULL, NULL); if (IS_ERR(hit_event)) goto out_miss; local_irq_disable(); /* * Check any possible error state of events used by performing * one local read. */ if (perf_event_read_local(miss_event, &tmp, NULL, NULL)) { local_irq_enable(); goto out_hit; } if (perf_event_read_local(hit_event, &tmp, NULL, NULL)) { local_irq_enable(); goto out_hit; } /* * Disable hardware prefetchers. */ rdmsr(MSR_MISC_FEATURE_CONTROL, saved_low, saved_high); wrmsr(MSR_MISC_FEATURE_CONTROL, prefetch_disable_bits, 0x0); /* Initialize rest of local variables */ /* * Performance event has been validated right before this with * interrupts disabled - it is thus safe to read the counter index. */ miss_pmcnum = x86_perf_rdpmc_index(miss_event); hit_pmcnum = x86_perf_rdpmc_index(hit_event); line_size = READ_ONCE(plr->line_size); mem_r = READ_ONCE(plr->kmem); size = READ_ONCE(plr->size); /* * Read counter variables twice - first to load the instructions * used in L1 cache, second to capture accurate value that does not * include cache misses incurred because of instruction loads. */ rdpmcl(hit_pmcnum, hits_before); rdpmcl(miss_pmcnum, miss_before); /* * From SDM: Performing back-to-back fast reads are not guaranteed * to be monotonic. * Use LFENCE to ensure all previous instructions are retired * before proceeding. */ rmb(); rdpmcl(hit_pmcnum, hits_before); rdpmcl(miss_pmcnum, miss_before); /* * Use LFENCE to ensure all previous instructions are retired * before proceeding. */ rmb(); for (i = 0; i < size; i += line_size) { /* * Add a barrier to prevent speculative execution of this * loop reading beyond the end of the buffer. */ rmb(); asm volatile("mov (%0,%1,1), %%eax\n\t" : : "r" (mem_r), "r" (i) : "%eax", "memory"); } /* * Use LFENCE to ensure all previous instructions are retired * before proceeding. */ rmb(); rdpmcl(hit_pmcnum, hits_after); rdpmcl(miss_pmcnum, miss_after); /* * Use LFENCE to ensure all previous instructions are retired * before proceeding. */ rmb(); /* Re-enable hardware prefetchers */ wrmsr(MSR_MISC_FEATURE_CONTROL, saved_low, saved_high); local_irq_enable(); out_hit: perf_event_release_kernel(hit_event); out_miss: perf_event_release_kernel(miss_event); out: /* * All counts will be zero on failure. */ counts->miss_before = miss_before; counts->hits_before = hits_before; counts->miss_after = miss_after; counts->hits_after = hits_after; return 0; } static int measure_l2_residency(void *_plr) { struct pseudo_lock_region *plr = _plr; struct residency_counts counts = {0}; /* * Non-architectural event for the Goldmont Microarchitecture * from Intel x86 Architecture Software Developer Manual (SDM): * MEM_LOAD_UOPS_RETIRED D1H (event number) * Umask values: * L2_HIT 02H * L2_MISS 10H */ switch (boot_cpu_data.x86_model) { case INTEL_FAM6_ATOM_GOLDMONT: case INTEL_FAM6_ATOM_GOLDMONT_PLUS: perf_miss_attr.config = X86_CONFIG(.event = 0xd1, .umask = 0x10); perf_hit_attr.config = X86_CONFIG(.event = 0xd1, .umask = 0x2); break; default: goto out; } measure_residency_fn(&perf_miss_attr, &perf_hit_attr, plr, &counts); /* * If a failure prevented the measurements from succeeding * tracepoints will still be written and all counts will be zero. */ trace_pseudo_lock_l2(counts.hits_after - counts.hits_before, counts.miss_after - counts.miss_before); out: plr->thread_done = 1; wake_up_interruptible(&plr->lock_thread_wq); return 0; } static int measure_l3_residency(void *_plr) { struct pseudo_lock_region *plr = _plr; struct residency_counts counts = {0}; /* * On Broadwell Microarchitecture the MEM_LOAD_UOPS_RETIRED event * has two "no fix" errata associated with it: BDM35 and BDM100. On * this platform the following events are used instead: * LONGEST_LAT_CACHE 2EH (Documented in SDM) * REFERENCE 4FH * MISS 41H */ switch (boot_cpu_data.x86_model) { case INTEL_FAM6_BROADWELL_X: /* On BDW the hit event counts references, not hits */ perf_hit_attr.config = X86_CONFIG(.event = 0x2e, .umask = 0x4f); perf_miss_attr.config = X86_CONFIG(.event = 0x2e, .umask = 0x41); break; default: goto out; } measure_residency_fn(&perf_miss_attr, &perf_hit_attr, plr, &counts); /* * If a failure prevented the measurements from succeeding * tracepoints will still be written and all counts will be zero. */ counts.miss_after -= counts.miss_before; if (boot_cpu_data.x86_model == INTEL_FAM6_BROADWELL_X) { /* * On BDW references and misses are counted, need to adjust. * Sometimes the "hits" counter is a bit more than the * references, for example, x references but x + 1 hits. * To not report invalid hit values in this case we treat * that as misses equal to references. */ /* First compute the number of cache references measured */ counts.hits_after -= counts.hits_before; /* Next convert references to cache hits */ counts.hits_after -= min(counts.miss_after, counts.hits_after); } else { counts.hits_after -= counts.hits_before; } trace_pseudo_lock_l3(counts.hits_after, counts.miss_after); out: plr->thread_done = 1; wake_up_interruptible(&plr->lock_thread_wq); return 0; } /** * pseudo_lock_measure_cycles - Trigger latency measure to pseudo-locked region * @rdtgrp: Resource group to which the pseudo-locked region belongs. * @sel: Selector of which measurement to perform on a pseudo-locked region. * * The measurement of latency to access a pseudo-locked region should be * done from a cpu that is associated with that pseudo-locked region. * Determine which cpu is associated with this region and start a thread on * that cpu to perform the measurement, wait for that thread to complete. * * Return: 0 on success, <0 on failure */ static int pseudo_lock_measure_cycles(struct rdtgroup *rdtgrp, int sel) { struct pseudo_lock_region *plr = rdtgrp->plr; struct task_struct *thread; unsigned int cpu; int ret = -1; cpus_read_lock(); mutex_lock(&rdtgroup_mutex); if (rdtgrp->flags & RDT_DELETED) { ret = -ENODEV; goto out; } if (!plr->d) { ret = -ENODEV; goto out; } plr->thread_done = 0; cpu = cpumask_first(&plr->d->cpu_mask); if (!cpu_online(cpu)) { ret = -ENODEV; goto out; } plr->cpu = cpu; if (sel == 1) thread = kthread_create_on_node(measure_cycles_lat_fn, plr, cpu_to_node(cpu), "pseudo_lock_measure/%u", cpu); else if (sel == 2) thread = kthread_create_on_node(measure_l2_residency, plr, cpu_to_node(cpu), "pseudo_lock_measure/%u", cpu); else if (sel == 3) thread = kthread_create_on_node(measure_l3_residency, plr, cpu_to_node(cpu), "pseudo_lock_measure/%u", cpu); else goto out; if (IS_ERR(thread)) { ret = PTR_ERR(thread); goto out; } kthread_bind(thread, cpu); wake_up_process(thread); ret = wait_event_interruptible(plr->lock_thread_wq, plr->thread_done == 1); if (ret < 0) goto out; ret = 0; out: mutex_unlock(&rdtgroup_mutex); cpus_read_unlock(); return ret; } static ssize_t pseudo_lock_measure_trigger(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos) { struct rdtgroup *rdtgrp = file->private_data; size_t buf_size; char buf[32]; int ret; int sel; buf_size = min(count, (sizeof(buf) - 1)); if (copy_from_user(buf, user_buf, buf_size)) return -EFAULT; buf[buf_size] = '\0'; ret = kstrtoint(buf, 10, &sel); if (ret == 0) { if (sel != 1 && sel != 2 && sel != 3) return -EINVAL; ret = debugfs_file_get(file->f_path.dentry); if (ret) return ret; ret = pseudo_lock_measure_cycles(rdtgrp, sel); if (ret == 0) ret = count; debugfs_file_put(file->f_path.dentry); } return ret; } static const struct file_operations pseudo_measure_fops = { .write = pseudo_lock_measure_trigger, .open = simple_open, .llseek = default_llseek, }; /** * rdtgroup_pseudo_lock_create - Create a pseudo-locked region * @rdtgrp: resource group to which pseudo-lock region belongs * * Called when a resource group in the pseudo-locksetup mode receives a * valid schemata that should be pseudo-locked. Since the resource group is * in pseudo-locksetup mode the &struct pseudo_lock_region has already been * allocated and initialized with the essential information. If a failure * occurs the resource group remains in the pseudo-locksetup mode with the * &struct pseudo_lock_region associated with it, but cleared from all * information and ready for the user to re-attempt pseudo-locking by * writing the schemata again. * * Return: 0 if the pseudo-locked region was successfully pseudo-locked, <0 * on failure. Descriptive error will be written to last_cmd_status buffer. */ int rdtgroup_pseudo_lock_create(struct rdtgroup *rdtgrp) { struct pseudo_lock_region *plr = rdtgrp->plr; struct task_struct *thread; unsigned int new_minor; struct device *dev; int ret; ret = pseudo_lock_region_alloc(plr); if (ret < 0) return ret; ret = pseudo_lock_cstates_constrain(plr); if (ret < 0) { ret = -EINVAL; goto out_region; } plr->thread_done = 0; thread = kthread_create_on_node(pseudo_lock_fn, rdtgrp, cpu_to_node(plr->cpu), "pseudo_lock/%u", plr->cpu); if (IS_ERR(thread)) { ret = PTR_ERR(thread); rdt_last_cmd_printf("Locking thread returned error %d\n", ret); goto out_cstates; } kthread_bind(thread, plr->cpu); wake_up_process(thread); ret = wait_event_interruptible(plr->lock_thread_wq, plr->thread_done == 1); if (ret < 0) { /* * If the thread does not get on the CPU for whatever * reason and the process which sets up the region is * interrupted then this will leave the thread in runnable * state and once it gets on the CPU it will dereference * the cleared, but not freed, plr struct resulting in an * empty pseudo-locking loop. */ rdt_last_cmd_puts("Locking thread interrupted\n"); goto out_cstates; } ret = pseudo_lock_minor_get(&new_minor); if (ret < 0) { rdt_last_cmd_puts("Unable to obtain a new minor number\n"); goto out_cstates; } /* * Unlock access but do not release the reference. The * pseudo-locked region will still be here on return. * * The mutex has to be released temporarily to avoid a potential * deadlock with the mm->mmap_lock which is obtained in the * device_create() and debugfs_create_dir() callpath below as well as * before the mmap() callback is called. */ mutex_unlock(&rdtgroup_mutex); if (!IS_ERR_OR_NULL(debugfs_resctrl)) { plr->debugfs_dir = debugfs_create_dir(rdtgrp->kn->name, debugfs_resctrl); if (!IS_ERR_OR_NULL(plr->debugfs_dir)) debugfs_create_file("pseudo_lock_measure", 0200, plr->debugfs_dir, rdtgrp, &pseudo_measure_fops); } dev = device_create(&pseudo_lock_class, NULL, MKDEV(pseudo_lock_major, new_minor), rdtgrp, "%s", rdtgrp->kn->name); mutex_lock(&rdtgroup_mutex); if (IS_ERR(dev)) { ret = PTR_ERR(dev); rdt_last_cmd_printf("Failed to create character device: %d\n", ret); goto out_debugfs; } /* We released the mutex - check if group was removed while we did so */ if (rdtgrp->flags & RDT_DELETED) { ret = -ENODEV; goto out_device; } plr->minor = new_minor; rdtgrp->mode = RDT_MODE_PSEUDO_LOCKED; closid_free(rdtgrp->closid); rdtgroup_kn_mode_restore(rdtgrp, "cpus", 0444); rdtgroup_kn_mode_restore(rdtgrp, "cpus_list", 0444); ret = 0; goto out; out_device: device_destroy(&pseudo_lock_class, MKDEV(pseudo_lock_major, new_minor)); out_debugfs: debugfs_remove_recursive(plr->debugfs_dir); pseudo_lock_minor_release(new_minor); out_cstates: pseudo_lock_cstates_relax(plr); out_region: pseudo_lock_region_clear(plr); out: return ret; } /** * rdtgroup_pseudo_lock_remove - Remove a pseudo-locked region * @rdtgrp: resource group to which the pseudo-locked region belongs * * The removal of a pseudo-locked region can be initiated when the resource * group is removed from user space via a "rmdir" from userspace or the * unmount of the resctrl filesystem. On removal the resource group does * not go back to pseudo-locksetup mode before it is removed, instead it is * removed directly. There is thus asymmetry with the creation where the * &struct pseudo_lock_region is removed here while it was not created in * rdtgroup_pseudo_lock_create(). * * Return: void */ void rdtgroup_pseudo_lock_remove(struct rdtgroup *rdtgrp) { struct pseudo_lock_region *plr = rdtgrp->plr; if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) { /* * Default group cannot be a pseudo-locked region so we can * free closid here. */ closid_free(rdtgrp->closid); goto free; } pseudo_lock_cstates_relax(plr); debugfs_remove_recursive(rdtgrp->plr->debugfs_dir); device_destroy(&pseudo_lock_class, MKDEV(pseudo_lock_major, plr->minor)); pseudo_lock_minor_release(plr->minor); free: pseudo_lock_free(rdtgrp); } static int pseudo_lock_dev_open(struct inode *inode, struct file *filp) { struct rdtgroup *rdtgrp; mutex_lock(&rdtgroup_mutex); rdtgrp = region_find_by_minor(iminor(inode)); if (!rdtgrp) { mutex_unlock(&rdtgroup_mutex); return -ENODEV; } filp->private_data = rdtgrp; atomic_inc(&rdtgrp->waitcount); /* Perform a non-seekable open - llseek is not supported */ filp->f_mode &= ~(FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE); mutex_unlock(&rdtgroup_mutex); return 0; } static int pseudo_lock_dev_release(struct inode *inode, struct file *filp) { struct rdtgroup *rdtgrp; mutex_lock(&rdtgroup_mutex); rdtgrp = filp->private_data; WARN_ON(!rdtgrp); if (!rdtgrp) { mutex_unlock(&rdtgroup_mutex); return -ENODEV; } filp->private_data = NULL; atomic_dec(&rdtgrp->waitcount); mutex_unlock(&rdtgroup_mutex); return 0; } static int pseudo_lock_dev_mremap(struct vm_area_struct *area) { /* Not supported */ return -EINVAL; } static const struct vm_operations_struct pseudo_mmap_ops = { .mremap = pseudo_lock_dev_mremap, }; static int pseudo_lock_dev_mmap(struct file *filp, struct vm_area_struct *vma) { unsigned long vsize = vma->vm_end - vma->vm_start; unsigned long off = vma->vm_pgoff << PAGE_SHIFT; struct pseudo_lock_region *plr; struct rdtgroup *rdtgrp; unsigned long physical; unsigned long psize; mutex_lock(&rdtgroup_mutex); rdtgrp = filp->private_data; WARN_ON(!rdtgrp); if (!rdtgrp) { mutex_unlock(&rdtgroup_mutex); return -ENODEV; } plr = rdtgrp->plr; if (!plr->d) { mutex_unlock(&rdtgroup_mutex); return -ENODEV; } /* * Task is required to run with affinity to the cpus associated * with the pseudo-locked region. If this is not the case the task * may be scheduled elsewhere and invalidate entries in the * pseudo-locked region. */ if (!cpumask_subset(current->cpus_ptr, &plr->d->cpu_mask)) { mutex_unlock(&rdtgroup_mutex); return -EINVAL; } physical = __pa(plr->kmem) >> PAGE_SHIFT; psize = plr->size - off; if (off > plr->size) { mutex_unlock(&rdtgroup_mutex); return -ENOSPC; } /* * Ensure changes are carried directly to the memory being mapped, * do not allow copy-on-write mapping. */ if (!(vma->vm_flags & VM_SHARED)) { mutex_unlock(&rdtgroup_mutex); return -EINVAL; } if (vsize > psize) { mutex_unlock(&rdtgroup_mutex); return -ENOSPC; } memset(plr->kmem + off, 0, vsize); if (remap_pfn_range(vma, vma->vm_start, physical + vma->vm_pgoff, vsize, vma->vm_page_prot)) { mutex_unlock(&rdtgroup_mutex); return -EAGAIN; } vma->vm_ops = &pseudo_mmap_ops; mutex_unlock(&rdtgroup_mutex); return 0; } static const struct file_operations pseudo_lock_dev_fops = { .owner = THIS_MODULE, .llseek = no_llseek, .read = NULL, .write = NULL, .open = pseudo_lock_dev_open, .release = pseudo_lock_dev_release, .mmap = pseudo_lock_dev_mmap, }; int rdt_pseudo_lock_init(void) { int ret; ret = register_chrdev(0, "pseudo_lock", &pseudo_lock_dev_fops); if (ret < 0) return ret; pseudo_lock_major = ret; ret = class_register(&pseudo_lock_class); if (ret) { unregister_chrdev(pseudo_lock_major, "pseudo_lock"); return ret; } return 0; } void rdt_pseudo_lock_release(void) { class_unregister(&pseudo_lock_class); unregister_chrdev(pseudo_lock_major, "pseudo_lock"); pseudo_lock_major = 0; }
linux-master
arch/x86/kernel/cpu/resctrl/pseudo_lock.c
// SPDX-License-Identifier: GPL-2.0-only /* * Resource Director Technology(RDT) * - Cache Allocation code. * * Copyright (C) 2016 Intel Corporation * * Authors: * Fenghua Yu <[email protected]> * Tony Luck <[email protected]> * Vikas Shivappa <[email protected]> * * More information about RDT be found in the Intel (R) x86 Architecture * Software Developer Manual June 2016, volume 3, section 17.17. */ #define pr_fmt(fmt) "resctrl: " fmt #include <linux/slab.h> #include <linux/err.h> #include <linux/cacheinfo.h> #include <linux/cpuhotplug.h> #include <asm/intel-family.h> #include <asm/resctrl.h> #include "internal.h" /* Mutex to protect rdtgroup access. */ DEFINE_MUTEX(rdtgroup_mutex); /* * The cached resctrl_pqr_state is strictly per CPU and can never be * updated from a remote CPU. Functions which modify the state * are called with interrupts disabled and no preemption, which * is sufficient for the protection. */ DEFINE_PER_CPU(struct resctrl_pqr_state, pqr_state); /* * Used to store the max resource name width and max resource data width * to display the schemata in a tabular format */ int max_name_width, max_data_width; /* * Global boolean for rdt_alloc which is true if any * resource allocation is enabled. */ bool rdt_alloc_capable; static void mba_wrmsr_intel(struct rdt_domain *d, struct msr_param *m, struct rdt_resource *r); static void cat_wrmsr(struct rdt_domain *d, struct msr_param *m, struct rdt_resource *r); static void mba_wrmsr_amd(struct rdt_domain *d, struct msr_param *m, struct rdt_resource *r); #define domain_init(id) LIST_HEAD_INIT(rdt_resources_all[id].r_resctrl.domains) struct rdt_hw_resource rdt_resources_all[] = { [RDT_RESOURCE_L3] = { .r_resctrl = { .rid = RDT_RESOURCE_L3, .name = "L3", .cache_level = 3, .domains = domain_init(RDT_RESOURCE_L3), .parse_ctrlval = parse_cbm, .format_str = "%d=%0*x", .fflags = RFTYPE_RES_CACHE, }, .msr_base = MSR_IA32_L3_CBM_BASE, .msr_update = cat_wrmsr, }, [RDT_RESOURCE_L2] = { .r_resctrl = { .rid = RDT_RESOURCE_L2, .name = "L2", .cache_level = 2, .domains = domain_init(RDT_RESOURCE_L2), .parse_ctrlval = parse_cbm, .format_str = "%d=%0*x", .fflags = RFTYPE_RES_CACHE, }, .msr_base = MSR_IA32_L2_CBM_BASE, .msr_update = cat_wrmsr, }, [RDT_RESOURCE_MBA] = { .r_resctrl = { .rid = RDT_RESOURCE_MBA, .name = "MB", .cache_level = 3, .domains = domain_init(RDT_RESOURCE_MBA), .parse_ctrlval = parse_bw, .format_str = "%d=%*u", .fflags = RFTYPE_RES_MB, }, }, [RDT_RESOURCE_SMBA] = { .r_resctrl = { .rid = RDT_RESOURCE_SMBA, .name = "SMBA", .cache_level = 3, .domains = domain_init(RDT_RESOURCE_SMBA), .parse_ctrlval = parse_bw, .format_str = "%d=%*u", .fflags = RFTYPE_RES_MB, }, }, }; /* * cache_alloc_hsw_probe() - Have to probe for Intel haswell server CPUs * as they do not have CPUID enumeration support for Cache allocation. * The check for Vendor/Family/Model is not enough to guarantee that * the MSRs won't #GP fault because only the following SKUs support * CAT: * Intel(R) Xeon(R) CPU E5-2658 v3 @ 2.20GHz * Intel(R) Xeon(R) CPU E5-2648L v3 @ 1.80GHz * Intel(R) Xeon(R) CPU E5-2628L v3 @ 2.00GHz * Intel(R) Xeon(R) CPU E5-2618L v3 @ 2.30GHz * Intel(R) Xeon(R) CPU E5-2608L v3 @ 2.00GHz * Intel(R) Xeon(R) CPU E5-2658A v3 @ 2.20GHz * * Probe by trying to write the first of the L3 cache mask registers * and checking that the bits stick. Max CLOSids is always 4 and max cbm length * is always 20 on hsw server parts. The minimum cache bitmask length * allowed for HSW server is always 2 bits. Hardcode all of them. */ static inline void cache_alloc_hsw_probe(void) { struct rdt_hw_resource *hw_res = &rdt_resources_all[RDT_RESOURCE_L3]; struct rdt_resource *r = &hw_res->r_resctrl; u32 l, h, max_cbm = BIT_MASK(20) - 1; if (wrmsr_safe(MSR_IA32_L3_CBM_BASE, max_cbm, 0)) return; rdmsr(MSR_IA32_L3_CBM_BASE, l, h); /* If all the bits were set in MSR, return success */ if (l != max_cbm) return; hw_res->num_closid = 4; r->default_ctrl = max_cbm; r->cache.cbm_len = 20; r->cache.shareable_bits = 0xc0000; r->cache.min_cbm_bits = 2; r->alloc_capable = true; rdt_alloc_capable = true; } bool is_mba_sc(struct rdt_resource *r) { if (!r) return rdt_resources_all[RDT_RESOURCE_MBA].r_resctrl.membw.mba_sc; /* * The software controller support is only applicable to MBA resource. * Make sure to check for resource type. */ if (r->rid != RDT_RESOURCE_MBA) return false; return r->membw.mba_sc; } /* * rdt_get_mb_table() - get a mapping of bandwidth(b/w) percentage values * exposed to user interface and the h/w understandable delay values. * * The non-linear delay values have the granularity of power of two * and also the h/w does not guarantee a curve for configured delay * values vs. actual b/w enforced. * Hence we need a mapping that is pre calibrated so the user can * express the memory b/w as a percentage value. */ static inline bool rdt_get_mb_table(struct rdt_resource *r) { /* * There are no Intel SKUs as of now to support non-linear delay. */ pr_info("MBA b/w map not implemented for cpu:%d, model:%d", boot_cpu_data.x86, boot_cpu_data.x86_model); return false; } static bool __get_mem_config_intel(struct rdt_resource *r) { struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r); union cpuid_0x10_3_eax eax; union cpuid_0x10_x_edx edx; u32 ebx, ecx, max_delay; cpuid_count(0x00000010, 3, &eax.full, &ebx, &ecx, &edx.full); hw_res->num_closid = edx.split.cos_max + 1; max_delay = eax.split.max_delay + 1; r->default_ctrl = MAX_MBA_BW; r->membw.arch_needs_linear = true; if (ecx & MBA_IS_LINEAR) { r->membw.delay_linear = true; r->membw.min_bw = MAX_MBA_BW - max_delay; r->membw.bw_gran = MAX_MBA_BW - max_delay; } else { if (!rdt_get_mb_table(r)) return false; r->membw.arch_needs_linear = false; } r->data_width = 3; if (boot_cpu_has(X86_FEATURE_PER_THREAD_MBA)) r->membw.throttle_mode = THREAD_THROTTLE_PER_THREAD; else r->membw.throttle_mode = THREAD_THROTTLE_MAX; thread_throttle_mode_init(); r->alloc_capable = true; return true; } static bool __rdt_get_mem_config_amd(struct rdt_resource *r) { struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r); union cpuid_0x10_3_eax eax; union cpuid_0x10_x_edx edx; u32 ebx, ecx, subleaf; /* * Query CPUID_Fn80000020_EDX_x01 for MBA and * CPUID_Fn80000020_EDX_x02 for SMBA */ subleaf = (r->rid == RDT_RESOURCE_SMBA) ? 2 : 1; cpuid_count(0x80000020, subleaf, &eax.full, &ebx, &ecx, &edx.full); hw_res->num_closid = edx.split.cos_max + 1; r->default_ctrl = MAX_MBA_BW_AMD; /* AMD does not use delay */ r->membw.delay_linear = false; r->membw.arch_needs_linear = false; /* * AMD does not use memory delay throttle model to control * the allocation like Intel does. */ r->membw.throttle_mode = THREAD_THROTTLE_UNDEFINED; r->membw.min_bw = 0; r->membw.bw_gran = 1; /* Max value is 2048, Data width should be 4 in decimal */ r->data_width = 4; r->alloc_capable = true; return true; } static void rdt_get_cache_alloc_cfg(int idx, struct rdt_resource *r) { struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r); union cpuid_0x10_1_eax eax; union cpuid_0x10_x_edx edx; u32 ebx, ecx; cpuid_count(0x00000010, idx, &eax.full, &ebx, &ecx, &edx.full); hw_res->num_closid = edx.split.cos_max + 1; r->cache.cbm_len = eax.split.cbm_len + 1; r->default_ctrl = BIT_MASK(eax.split.cbm_len + 1) - 1; r->cache.shareable_bits = ebx & r->default_ctrl; r->data_width = (r->cache.cbm_len + 3) / 4; r->alloc_capable = true; } static void rdt_get_cdp_config(int level) { /* * By default, CDP is disabled. CDP can be enabled by mount parameter * "cdp" during resctrl file system mount time. */ rdt_resources_all[level].cdp_enabled = false; rdt_resources_all[level].r_resctrl.cdp_capable = true; } static void rdt_get_cdp_l3_config(void) { rdt_get_cdp_config(RDT_RESOURCE_L3); } static void rdt_get_cdp_l2_config(void) { rdt_get_cdp_config(RDT_RESOURCE_L2); } static void mba_wrmsr_amd(struct rdt_domain *d, struct msr_param *m, struct rdt_resource *r) { unsigned int i; struct rdt_hw_domain *hw_dom = resctrl_to_arch_dom(d); struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r); for (i = m->low; i < m->high; i++) wrmsrl(hw_res->msr_base + i, hw_dom->ctrl_val[i]); } /* * Map the memory b/w percentage value to delay values * that can be written to QOS_MSRs. * There are currently no SKUs which support non linear delay values. */ static u32 delay_bw_map(unsigned long bw, struct rdt_resource *r) { if (r->membw.delay_linear) return MAX_MBA_BW - bw; pr_warn_once("Non Linear delay-bw map not supported but queried\n"); return r->default_ctrl; } static void mba_wrmsr_intel(struct rdt_domain *d, struct msr_param *m, struct rdt_resource *r) { unsigned int i; struct rdt_hw_domain *hw_dom = resctrl_to_arch_dom(d); struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r); /* Write the delay values for mba. */ for (i = m->low; i < m->high; i++) wrmsrl(hw_res->msr_base + i, delay_bw_map(hw_dom->ctrl_val[i], r)); } static void cat_wrmsr(struct rdt_domain *d, struct msr_param *m, struct rdt_resource *r) { unsigned int i; struct rdt_hw_domain *hw_dom = resctrl_to_arch_dom(d); struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r); for (i = m->low; i < m->high; i++) wrmsrl(hw_res->msr_base + i, hw_dom->ctrl_val[i]); } struct rdt_domain *get_domain_from_cpu(int cpu, struct rdt_resource *r) { struct rdt_domain *d; list_for_each_entry(d, &r->domains, list) { /* Find the domain that contains this CPU */ if (cpumask_test_cpu(cpu, &d->cpu_mask)) return d; } return NULL; } u32 resctrl_arch_get_num_closid(struct rdt_resource *r) { return resctrl_to_arch_res(r)->num_closid; } void rdt_ctrl_update(void *arg) { struct msr_param *m = arg; struct rdt_hw_resource *hw_res = resctrl_to_arch_res(m->res); struct rdt_resource *r = m->res; int cpu = smp_processor_id(); struct rdt_domain *d; d = get_domain_from_cpu(cpu, r); if (d) { hw_res->msr_update(d, m, r); return; } pr_warn_once("cpu %d not found in any domain for resource %s\n", cpu, r->name); } /* * rdt_find_domain - Find a domain in a resource that matches input resource id * * Search resource r's domain list to find the resource id. If the resource * id is found in a domain, return the domain. Otherwise, if requested by * caller, return the first domain whose id is bigger than the input id. * The domain list is sorted by id in ascending order. */ struct rdt_domain *rdt_find_domain(struct rdt_resource *r, int id, struct list_head **pos) { struct rdt_domain *d; struct list_head *l; if (id < 0) return ERR_PTR(-ENODEV); list_for_each(l, &r->domains) { d = list_entry(l, struct rdt_domain, list); /* When id is found, return its domain. */ if (id == d->id) return d; /* Stop searching when finding id's position in sorted list. */ if (id < d->id) break; } if (pos) *pos = l; return NULL; } static void setup_default_ctrlval(struct rdt_resource *r, u32 *dc) { struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r); int i; /* * Initialize the Control MSRs to having no control. * For Cache Allocation: Set all bits in cbm * For Memory Allocation: Set b/w requested to 100% */ for (i = 0; i < hw_res->num_closid; i++, dc++) *dc = r->default_ctrl; } static void domain_free(struct rdt_hw_domain *hw_dom) { kfree(hw_dom->arch_mbm_total); kfree(hw_dom->arch_mbm_local); kfree(hw_dom->ctrl_val); kfree(hw_dom); } static int domain_setup_ctrlval(struct rdt_resource *r, struct rdt_domain *d) { struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r); struct rdt_hw_domain *hw_dom = resctrl_to_arch_dom(d); struct msr_param m; u32 *dc; dc = kmalloc_array(hw_res->num_closid, sizeof(*hw_dom->ctrl_val), GFP_KERNEL); if (!dc) return -ENOMEM; hw_dom->ctrl_val = dc; setup_default_ctrlval(r, dc); m.low = 0; m.high = hw_res->num_closid; hw_res->msr_update(d, &m, r); return 0; } /** * arch_domain_mbm_alloc() - Allocate arch private storage for the MBM counters * @num_rmid: The size of the MBM counter array * @hw_dom: The domain that owns the allocated arrays */ static int arch_domain_mbm_alloc(u32 num_rmid, struct rdt_hw_domain *hw_dom) { size_t tsize; if (is_mbm_total_enabled()) { tsize = sizeof(*hw_dom->arch_mbm_total); hw_dom->arch_mbm_total = kcalloc(num_rmid, tsize, GFP_KERNEL); if (!hw_dom->arch_mbm_total) return -ENOMEM; } if (is_mbm_local_enabled()) { tsize = sizeof(*hw_dom->arch_mbm_local); hw_dom->arch_mbm_local = kcalloc(num_rmid, tsize, GFP_KERNEL); if (!hw_dom->arch_mbm_local) { kfree(hw_dom->arch_mbm_total); hw_dom->arch_mbm_total = NULL; return -ENOMEM; } } return 0; } /* * domain_add_cpu - Add a cpu to a resource's domain list. * * If an existing domain in the resource r's domain list matches the cpu's * resource id, add the cpu in the domain. * * Otherwise, a new domain is allocated and inserted into the right position * in the domain list sorted by id in ascending order. * * The order in the domain list is visible to users when we print entries * in the schemata file and schemata input is validated to have the same order * as this list. */ static void domain_add_cpu(int cpu, struct rdt_resource *r) { int id = get_cpu_cacheinfo_id(cpu, r->cache_level); struct list_head *add_pos = NULL; struct rdt_hw_domain *hw_dom; struct rdt_domain *d; int err; d = rdt_find_domain(r, id, &add_pos); if (IS_ERR(d)) { pr_warn("Couldn't find cache id for CPU %d\n", cpu); return; } if (d) { cpumask_set_cpu(cpu, &d->cpu_mask); if (r->cache.arch_has_per_cpu_cfg) rdt_domain_reconfigure_cdp(r); return; } hw_dom = kzalloc_node(sizeof(*hw_dom), GFP_KERNEL, cpu_to_node(cpu)); if (!hw_dom) return; d = &hw_dom->d_resctrl; d->id = id; cpumask_set_cpu(cpu, &d->cpu_mask); rdt_domain_reconfigure_cdp(r); if (r->alloc_capable && domain_setup_ctrlval(r, d)) { domain_free(hw_dom); return; } if (r->mon_capable && arch_domain_mbm_alloc(r->num_rmid, hw_dom)) { domain_free(hw_dom); return; } list_add_tail(&d->list, add_pos); err = resctrl_online_domain(r, d); if (err) { list_del(&d->list); domain_free(hw_dom); } } static void domain_remove_cpu(int cpu, struct rdt_resource *r) { int id = get_cpu_cacheinfo_id(cpu, r->cache_level); struct rdt_hw_domain *hw_dom; struct rdt_domain *d; d = rdt_find_domain(r, id, NULL); if (IS_ERR_OR_NULL(d)) { pr_warn("Couldn't find cache id for CPU %d\n", cpu); return; } hw_dom = resctrl_to_arch_dom(d); cpumask_clear_cpu(cpu, &d->cpu_mask); if (cpumask_empty(&d->cpu_mask)) { resctrl_offline_domain(r, d); list_del(&d->list); /* * rdt_domain "d" is going to be freed below, so clear * its pointer from pseudo_lock_region struct. */ if (d->plr) d->plr->d = NULL; domain_free(hw_dom); return; } if (r == &rdt_resources_all[RDT_RESOURCE_L3].r_resctrl) { if (is_mbm_enabled() && cpu == d->mbm_work_cpu) { cancel_delayed_work(&d->mbm_over); mbm_setup_overflow_handler(d, 0); } if (is_llc_occupancy_enabled() && cpu == d->cqm_work_cpu && has_busy_rmid(r, d)) { cancel_delayed_work(&d->cqm_limbo); cqm_setup_limbo_handler(d, 0); } } } static void clear_closid_rmid(int cpu) { struct resctrl_pqr_state *state = this_cpu_ptr(&pqr_state); state->default_closid = 0; state->default_rmid = 0; state->cur_closid = 0; state->cur_rmid = 0; wrmsr(MSR_IA32_PQR_ASSOC, 0, 0); } static int resctrl_online_cpu(unsigned int cpu) { struct rdt_resource *r; mutex_lock(&rdtgroup_mutex); for_each_capable_rdt_resource(r) domain_add_cpu(cpu, r); /* The cpu is set in default rdtgroup after online. */ cpumask_set_cpu(cpu, &rdtgroup_default.cpu_mask); clear_closid_rmid(cpu); mutex_unlock(&rdtgroup_mutex); return 0; } static void clear_childcpus(struct rdtgroup *r, unsigned int cpu) { struct rdtgroup *cr; list_for_each_entry(cr, &r->mon.crdtgrp_list, mon.crdtgrp_list) { if (cpumask_test_and_clear_cpu(cpu, &cr->cpu_mask)) { break; } } } static int resctrl_offline_cpu(unsigned int cpu) { struct rdtgroup *rdtgrp; struct rdt_resource *r; mutex_lock(&rdtgroup_mutex); for_each_capable_rdt_resource(r) domain_remove_cpu(cpu, r); list_for_each_entry(rdtgrp, &rdt_all_groups, rdtgroup_list) { if (cpumask_test_and_clear_cpu(cpu, &rdtgrp->cpu_mask)) { clear_childcpus(rdtgrp, cpu); break; } } clear_closid_rmid(cpu); mutex_unlock(&rdtgroup_mutex); return 0; } /* * Choose a width for the resource name and resource data based on the * resource that has widest name and cbm. */ static __init void rdt_init_padding(void) { struct rdt_resource *r; for_each_alloc_capable_rdt_resource(r) { if (r->data_width > max_data_width) max_data_width = r->data_width; } } enum { RDT_FLAG_CMT, RDT_FLAG_MBM_TOTAL, RDT_FLAG_MBM_LOCAL, RDT_FLAG_L3_CAT, RDT_FLAG_L3_CDP, RDT_FLAG_L2_CAT, RDT_FLAG_L2_CDP, RDT_FLAG_MBA, RDT_FLAG_SMBA, RDT_FLAG_BMEC, }; #define RDT_OPT(idx, n, f) \ [idx] = { \ .name = n, \ .flag = f \ } struct rdt_options { char *name; int flag; bool force_off, force_on; }; static struct rdt_options rdt_options[] __initdata = { RDT_OPT(RDT_FLAG_CMT, "cmt", X86_FEATURE_CQM_OCCUP_LLC), RDT_OPT(RDT_FLAG_MBM_TOTAL, "mbmtotal", X86_FEATURE_CQM_MBM_TOTAL), RDT_OPT(RDT_FLAG_MBM_LOCAL, "mbmlocal", X86_FEATURE_CQM_MBM_LOCAL), RDT_OPT(RDT_FLAG_L3_CAT, "l3cat", X86_FEATURE_CAT_L3), RDT_OPT(RDT_FLAG_L3_CDP, "l3cdp", X86_FEATURE_CDP_L3), RDT_OPT(RDT_FLAG_L2_CAT, "l2cat", X86_FEATURE_CAT_L2), RDT_OPT(RDT_FLAG_L2_CDP, "l2cdp", X86_FEATURE_CDP_L2), RDT_OPT(RDT_FLAG_MBA, "mba", X86_FEATURE_MBA), RDT_OPT(RDT_FLAG_SMBA, "smba", X86_FEATURE_SMBA), RDT_OPT(RDT_FLAG_BMEC, "bmec", X86_FEATURE_BMEC), }; #define NUM_RDT_OPTIONS ARRAY_SIZE(rdt_options) static int __init set_rdt_options(char *str) { struct rdt_options *o; bool force_off; char *tok; if (*str == '=') str++; while ((tok = strsep(&str, ",")) != NULL) { force_off = *tok == '!'; if (force_off) tok++; for (o = rdt_options; o < &rdt_options[NUM_RDT_OPTIONS]; o++) { if (strcmp(tok, o->name) == 0) { if (force_off) o->force_off = true; else o->force_on = true; break; } } } return 1; } __setup("rdt", set_rdt_options); bool __init rdt_cpu_has(int flag) { bool ret = boot_cpu_has(flag); struct rdt_options *o; if (!ret) return ret; for (o = rdt_options; o < &rdt_options[NUM_RDT_OPTIONS]; o++) { if (flag == o->flag) { if (o->force_off) ret = false; if (o->force_on) ret = true; break; } } return ret; } static __init bool get_mem_config(void) { struct rdt_hw_resource *hw_res = &rdt_resources_all[RDT_RESOURCE_MBA]; if (!rdt_cpu_has(X86_FEATURE_MBA)) return false; if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) return __get_mem_config_intel(&hw_res->r_resctrl); else if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) return __rdt_get_mem_config_amd(&hw_res->r_resctrl); return false; } static __init bool get_slow_mem_config(void) { struct rdt_hw_resource *hw_res = &rdt_resources_all[RDT_RESOURCE_SMBA]; if (!rdt_cpu_has(X86_FEATURE_SMBA)) return false; if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) return __rdt_get_mem_config_amd(&hw_res->r_resctrl); return false; } static __init bool get_rdt_alloc_resources(void) { struct rdt_resource *r; bool ret = false; if (rdt_alloc_capable) return true; if (!boot_cpu_has(X86_FEATURE_RDT_A)) return false; if (rdt_cpu_has(X86_FEATURE_CAT_L3)) { r = &rdt_resources_all[RDT_RESOURCE_L3].r_resctrl; rdt_get_cache_alloc_cfg(1, r); if (rdt_cpu_has(X86_FEATURE_CDP_L3)) rdt_get_cdp_l3_config(); ret = true; } if (rdt_cpu_has(X86_FEATURE_CAT_L2)) { /* CPUID 0x10.2 fields are same format at 0x10.1 */ r = &rdt_resources_all[RDT_RESOURCE_L2].r_resctrl; rdt_get_cache_alloc_cfg(2, r); if (rdt_cpu_has(X86_FEATURE_CDP_L2)) rdt_get_cdp_l2_config(); ret = true; } if (get_mem_config()) ret = true; if (get_slow_mem_config()) ret = true; return ret; } static __init bool get_rdt_mon_resources(void) { struct rdt_resource *r = &rdt_resources_all[RDT_RESOURCE_L3].r_resctrl; if (rdt_cpu_has(X86_FEATURE_CQM_OCCUP_LLC)) rdt_mon_features |= (1 << QOS_L3_OCCUP_EVENT_ID); if (rdt_cpu_has(X86_FEATURE_CQM_MBM_TOTAL)) rdt_mon_features |= (1 << QOS_L3_MBM_TOTAL_EVENT_ID); if (rdt_cpu_has(X86_FEATURE_CQM_MBM_LOCAL)) rdt_mon_features |= (1 << QOS_L3_MBM_LOCAL_EVENT_ID); if (!rdt_mon_features) return false; return !rdt_get_mon_l3_config(r); } static __init void __check_quirks_intel(void) { switch (boot_cpu_data.x86_model) { case INTEL_FAM6_HASWELL_X: if (!rdt_options[RDT_FLAG_L3_CAT].force_off) cache_alloc_hsw_probe(); break; case INTEL_FAM6_SKYLAKE_X: if (boot_cpu_data.x86_stepping <= 4) set_rdt_options("!cmt,!mbmtotal,!mbmlocal,!l3cat"); else set_rdt_options("!l3cat"); fallthrough; case INTEL_FAM6_BROADWELL_X: intel_rdt_mbm_apply_quirk(); break; } } static __init void check_quirks(void) { if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) __check_quirks_intel(); } static __init bool get_rdt_resources(void) { rdt_alloc_capable = get_rdt_alloc_resources(); rdt_mon_capable = get_rdt_mon_resources(); return (rdt_mon_capable || rdt_alloc_capable); } static __init void rdt_init_res_defs_intel(void) { struct rdt_hw_resource *hw_res; struct rdt_resource *r; for_each_rdt_resource(r) { hw_res = resctrl_to_arch_res(r); if (r->rid == RDT_RESOURCE_L3 || r->rid == RDT_RESOURCE_L2) { r->cache.arch_has_sparse_bitmaps = false; r->cache.arch_has_per_cpu_cfg = false; r->cache.min_cbm_bits = 1; } else if (r->rid == RDT_RESOURCE_MBA) { hw_res->msr_base = MSR_IA32_MBA_THRTL_BASE; hw_res->msr_update = mba_wrmsr_intel; } } } static __init void rdt_init_res_defs_amd(void) { struct rdt_hw_resource *hw_res; struct rdt_resource *r; for_each_rdt_resource(r) { hw_res = resctrl_to_arch_res(r); if (r->rid == RDT_RESOURCE_L3 || r->rid == RDT_RESOURCE_L2) { r->cache.arch_has_sparse_bitmaps = true; r->cache.arch_has_per_cpu_cfg = true; r->cache.min_cbm_bits = 0; } else if (r->rid == RDT_RESOURCE_MBA) { hw_res->msr_base = MSR_IA32_MBA_BW_BASE; hw_res->msr_update = mba_wrmsr_amd; } else if (r->rid == RDT_RESOURCE_SMBA) { hw_res->msr_base = MSR_IA32_SMBA_BW_BASE; hw_res->msr_update = mba_wrmsr_amd; } } } static __init void rdt_init_res_defs(void) { if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) rdt_init_res_defs_intel(); else if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) rdt_init_res_defs_amd(); } static enum cpuhp_state rdt_online; /* Runs once on the BSP during boot. */ void resctrl_cpu_detect(struct cpuinfo_x86 *c) { if (!cpu_has(c, X86_FEATURE_CQM_LLC)) { c->x86_cache_max_rmid = -1; c->x86_cache_occ_scale = -1; c->x86_cache_mbm_width_offset = -1; return; } /* will be overridden if occupancy monitoring exists */ c->x86_cache_max_rmid = cpuid_ebx(0xf); if (cpu_has(c, X86_FEATURE_CQM_OCCUP_LLC) || cpu_has(c, X86_FEATURE_CQM_MBM_TOTAL) || cpu_has(c, X86_FEATURE_CQM_MBM_LOCAL)) { u32 eax, ebx, ecx, edx; /* QoS sub-leaf, EAX=0Fh, ECX=1 */ cpuid_count(0xf, 1, &eax, &ebx, &ecx, &edx); c->x86_cache_max_rmid = ecx; c->x86_cache_occ_scale = ebx; c->x86_cache_mbm_width_offset = eax & 0xff; if (c->x86_vendor == X86_VENDOR_AMD && !c->x86_cache_mbm_width_offset) c->x86_cache_mbm_width_offset = MBM_CNTR_WIDTH_OFFSET_AMD; } } static int __init resctrl_late_init(void) { struct rdt_resource *r; int state, ret; /* * Initialize functions(or definitions) that are different * between vendors here. */ rdt_init_res_defs(); check_quirks(); if (!get_rdt_resources()) return -ENODEV; rdt_init_padding(); state = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "x86/resctrl/cat:online:", resctrl_online_cpu, resctrl_offline_cpu); if (state < 0) return state; ret = rdtgroup_init(); if (ret) { cpuhp_remove_state(state); return ret; } rdt_online = state; for_each_alloc_capable_rdt_resource(r) pr_info("%s allocation detected\n", r->name); for_each_mon_capable_rdt_resource(r) pr_info("%s monitoring detected\n", r->name); return 0; } late_initcall(resctrl_late_init); static void __exit resctrl_exit(void) { cpuhp_remove_state(rdt_online); rdtgroup_exit(); } __exitcall(resctrl_exit);
linux-master
arch/x86/kernel/cpu/resctrl/core.c
// SPDX-License-Identifier: GPL-2.0-only /* * User interface for Resource Allocation in Resource Director Technology(RDT) * * Copyright (C) 2016 Intel Corporation * * Author: Fenghua Yu <[email protected]> * * More information about RDT be found in the Intel (R) x86 Architecture * Software Developer Manual. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/cacheinfo.h> #include <linux/cpu.h> #include <linux/debugfs.h> #include <linux/fs.h> #include <linux/fs_parser.h> #include <linux/sysfs.h> #include <linux/kernfs.h> #include <linux/seq_buf.h> #include <linux/seq_file.h> #include <linux/sched/signal.h> #include <linux/sched/task.h> #include <linux/slab.h> #include <linux/task_work.h> #include <linux/user_namespace.h> #include <uapi/linux/magic.h> #include <asm/resctrl.h> #include "internal.h" DEFINE_STATIC_KEY_FALSE(rdt_enable_key); DEFINE_STATIC_KEY_FALSE(rdt_mon_enable_key); DEFINE_STATIC_KEY_FALSE(rdt_alloc_enable_key); static struct kernfs_root *rdt_root; struct rdtgroup rdtgroup_default; LIST_HEAD(rdt_all_groups); /* list of entries for the schemata file */ LIST_HEAD(resctrl_schema_all); /* Kernel fs node for "info" directory under root */ static struct kernfs_node *kn_info; /* Kernel fs node for "mon_groups" directory under root */ static struct kernfs_node *kn_mongrp; /* Kernel fs node for "mon_data" directory under root */ static struct kernfs_node *kn_mondata; static struct seq_buf last_cmd_status; static char last_cmd_status_buf[512]; struct dentry *debugfs_resctrl; void rdt_last_cmd_clear(void) { lockdep_assert_held(&rdtgroup_mutex); seq_buf_clear(&last_cmd_status); } void rdt_last_cmd_puts(const char *s) { lockdep_assert_held(&rdtgroup_mutex); seq_buf_puts(&last_cmd_status, s); } void rdt_last_cmd_printf(const char *fmt, ...) { va_list ap; va_start(ap, fmt); lockdep_assert_held(&rdtgroup_mutex); seq_buf_vprintf(&last_cmd_status, fmt, ap); va_end(ap); } void rdt_staged_configs_clear(void) { struct rdt_resource *r; struct rdt_domain *dom; lockdep_assert_held(&rdtgroup_mutex); for_each_alloc_capable_rdt_resource(r) { list_for_each_entry(dom, &r->domains, list) memset(dom->staged_config, 0, sizeof(dom->staged_config)); } } /* * Trivial allocator for CLOSIDs. Since h/w only supports a small number, * we can keep a bitmap of free CLOSIDs in a single integer. * * Using a global CLOSID across all resources has some advantages and * some drawbacks: * + We can simply set "current->closid" to assign a task to a resource * group. * + Context switch code can avoid extra memory references deciding which * CLOSID to load into the PQR_ASSOC MSR * - We give up some options in configuring resource groups across multi-socket * systems. * - Our choices on how to configure each resource become progressively more * limited as the number of resources grows. */ static int closid_free_map; static int closid_free_map_len; int closids_supported(void) { return closid_free_map_len; } static void closid_init(void) { struct resctrl_schema *s; u32 rdt_min_closid = 32; /* Compute rdt_min_closid across all resources */ list_for_each_entry(s, &resctrl_schema_all, list) rdt_min_closid = min(rdt_min_closid, s->num_closid); closid_free_map = BIT_MASK(rdt_min_closid) - 1; /* CLOSID 0 is always reserved for the default group */ closid_free_map &= ~1; closid_free_map_len = rdt_min_closid; } static int closid_alloc(void) { u32 closid = ffs(closid_free_map); if (closid == 0) return -ENOSPC; closid--; closid_free_map &= ~(1 << closid); return closid; } void closid_free(int closid) { closid_free_map |= 1 << closid; } /** * closid_allocated - test if provided closid is in use * @closid: closid to be tested * * Return: true if @closid is currently associated with a resource group, * false if @closid is free */ static bool closid_allocated(unsigned int closid) { return (closid_free_map & (1 << closid)) == 0; } /** * rdtgroup_mode_by_closid - Return mode of resource group with closid * @closid: closid if the resource group * * Each resource group is associated with a @closid. Here the mode * of a resource group can be queried by searching for it using its closid. * * Return: mode as &enum rdtgrp_mode of resource group with closid @closid */ enum rdtgrp_mode rdtgroup_mode_by_closid(int closid) { struct rdtgroup *rdtgrp; list_for_each_entry(rdtgrp, &rdt_all_groups, rdtgroup_list) { if (rdtgrp->closid == closid) return rdtgrp->mode; } return RDT_NUM_MODES; } static const char * const rdt_mode_str[] = { [RDT_MODE_SHAREABLE] = "shareable", [RDT_MODE_EXCLUSIVE] = "exclusive", [RDT_MODE_PSEUDO_LOCKSETUP] = "pseudo-locksetup", [RDT_MODE_PSEUDO_LOCKED] = "pseudo-locked", }; /** * rdtgroup_mode_str - Return the string representation of mode * @mode: the resource group mode as &enum rdtgroup_mode * * Return: string representation of valid mode, "unknown" otherwise */ static const char *rdtgroup_mode_str(enum rdtgrp_mode mode) { if (mode < RDT_MODE_SHAREABLE || mode >= RDT_NUM_MODES) return "unknown"; return rdt_mode_str[mode]; } /* set uid and gid of rdtgroup dirs and files to that of the creator */ static int rdtgroup_kn_set_ugid(struct kernfs_node *kn) { struct iattr iattr = { .ia_valid = ATTR_UID | ATTR_GID, .ia_uid = current_fsuid(), .ia_gid = current_fsgid(), }; if (uid_eq(iattr.ia_uid, GLOBAL_ROOT_UID) && gid_eq(iattr.ia_gid, GLOBAL_ROOT_GID)) return 0; return kernfs_setattr(kn, &iattr); } static int rdtgroup_add_file(struct kernfs_node *parent_kn, struct rftype *rft) { struct kernfs_node *kn; int ret; kn = __kernfs_create_file(parent_kn, rft->name, rft->mode, GLOBAL_ROOT_UID, GLOBAL_ROOT_GID, 0, rft->kf_ops, rft, NULL, NULL); if (IS_ERR(kn)) return PTR_ERR(kn); ret = rdtgroup_kn_set_ugid(kn); if (ret) { kernfs_remove(kn); return ret; } return 0; } static int rdtgroup_seqfile_show(struct seq_file *m, void *arg) { struct kernfs_open_file *of = m->private; struct rftype *rft = of->kn->priv; if (rft->seq_show) return rft->seq_show(of, m, arg); return 0; } static ssize_t rdtgroup_file_write(struct kernfs_open_file *of, char *buf, size_t nbytes, loff_t off) { struct rftype *rft = of->kn->priv; if (rft->write) return rft->write(of, buf, nbytes, off); return -EINVAL; } static const struct kernfs_ops rdtgroup_kf_single_ops = { .atomic_write_len = PAGE_SIZE, .write = rdtgroup_file_write, .seq_show = rdtgroup_seqfile_show, }; static const struct kernfs_ops kf_mondata_ops = { .atomic_write_len = PAGE_SIZE, .seq_show = rdtgroup_mondata_show, }; static bool is_cpu_list(struct kernfs_open_file *of) { struct rftype *rft = of->kn->priv; return rft->flags & RFTYPE_FLAGS_CPUS_LIST; } static int rdtgroup_cpus_show(struct kernfs_open_file *of, struct seq_file *s, void *v) { struct rdtgroup *rdtgrp; struct cpumask *mask; int ret = 0; rdtgrp = rdtgroup_kn_lock_live(of->kn); if (rdtgrp) { if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) { if (!rdtgrp->plr->d) { rdt_last_cmd_clear(); rdt_last_cmd_puts("Cache domain offline\n"); ret = -ENODEV; } else { mask = &rdtgrp->plr->d->cpu_mask; seq_printf(s, is_cpu_list(of) ? "%*pbl\n" : "%*pb\n", cpumask_pr_args(mask)); } } else { seq_printf(s, is_cpu_list(of) ? "%*pbl\n" : "%*pb\n", cpumask_pr_args(&rdtgrp->cpu_mask)); } } else { ret = -ENOENT; } rdtgroup_kn_unlock(of->kn); return ret; } /* * This is safe against resctrl_sched_in() called from __switch_to() * because __switch_to() is executed with interrupts disabled. A local call * from update_closid_rmid() is protected against __switch_to() because * preemption is disabled. */ static void update_cpu_closid_rmid(void *info) { struct rdtgroup *r = info; if (r) { this_cpu_write(pqr_state.default_closid, r->closid); this_cpu_write(pqr_state.default_rmid, r->mon.rmid); } /* * We cannot unconditionally write the MSR because the current * executing task might have its own closid selected. Just reuse * the context switch code. */ resctrl_sched_in(current); } /* * Update the PGR_ASSOC MSR on all cpus in @cpu_mask, * * Per task closids/rmids must have been set up before calling this function. */ static void update_closid_rmid(const struct cpumask *cpu_mask, struct rdtgroup *r) { on_each_cpu_mask(cpu_mask, update_cpu_closid_rmid, r, 1); } static int cpus_mon_write(struct rdtgroup *rdtgrp, cpumask_var_t newmask, cpumask_var_t tmpmask) { struct rdtgroup *prgrp = rdtgrp->mon.parent, *crgrp; struct list_head *head; /* Check whether cpus belong to parent ctrl group */ cpumask_andnot(tmpmask, newmask, &prgrp->cpu_mask); if (!cpumask_empty(tmpmask)) { rdt_last_cmd_puts("Can only add CPUs to mongroup that belong to parent\n"); return -EINVAL; } /* Check whether cpus are dropped from this group */ cpumask_andnot(tmpmask, &rdtgrp->cpu_mask, newmask); if (!cpumask_empty(tmpmask)) { /* Give any dropped cpus to parent rdtgroup */ cpumask_or(&prgrp->cpu_mask, &prgrp->cpu_mask, tmpmask); update_closid_rmid(tmpmask, prgrp); } /* * If we added cpus, remove them from previous group that owned them * and update per-cpu rmid */ cpumask_andnot(tmpmask, newmask, &rdtgrp->cpu_mask); if (!cpumask_empty(tmpmask)) { head = &prgrp->mon.crdtgrp_list; list_for_each_entry(crgrp, head, mon.crdtgrp_list) { if (crgrp == rdtgrp) continue; cpumask_andnot(&crgrp->cpu_mask, &crgrp->cpu_mask, tmpmask); } update_closid_rmid(tmpmask, rdtgrp); } /* Done pushing/pulling - update this group with new mask */ cpumask_copy(&rdtgrp->cpu_mask, newmask); return 0; } static void cpumask_rdtgrp_clear(struct rdtgroup *r, struct cpumask *m) { struct rdtgroup *crgrp; cpumask_andnot(&r->cpu_mask, &r->cpu_mask, m); /* update the child mon group masks as well*/ list_for_each_entry(crgrp, &r->mon.crdtgrp_list, mon.crdtgrp_list) cpumask_and(&crgrp->cpu_mask, &r->cpu_mask, &crgrp->cpu_mask); } static int cpus_ctrl_write(struct rdtgroup *rdtgrp, cpumask_var_t newmask, cpumask_var_t tmpmask, cpumask_var_t tmpmask1) { struct rdtgroup *r, *crgrp; struct list_head *head; /* Check whether cpus are dropped from this group */ cpumask_andnot(tmpmask, &rdtgrp->cpu_mask, newmask); if (!cpumask_empty(tmpmask)) { /* Can't drop from default group */ if (rdtgrp == &rdtgroup_default) { rdt_last_cmd_puts("Can't drop CPUs from default group\n"); return -EINVAL; } /* Give any dropped cpus to rdtgroup_default */ cpumask_or(&rdtgroup_default.cpu_mask, &rdtgroup_default.cpu_mask, tmpmask); update_closid_rmid(tmpmask, &rdtgroup_default); } /* * If we added cpus, remove them from previous group and * the prev group's child groups that owned them * and update per-cpu closid/rmid. */ cpumask_andnot(tmpmask, newmask, &rdtgrp->cpu_mask); if (!cpumask_empty(tmpmask)) { list_for_each_entry(r, &rdt_all_groups, rdtgroup_list) { if (r == rdtgrp) continue; cpumask_and(tmpmask1, &r->cpu_mask, tmpmask); if (!cpumask_empty(tmpmask1)) cpumask_rdtgrp_clear(r, tmpmask1); } update_closid_rmid(tmpmask, rdtgrp); } /* Done pushing/pulling - update this group with new mask */ cpumask_copy(&rdtgrp->cpu_mask, newmask); /* * Clear child mon group masks since there is a new parent mask * now and update the rmid for the cpus the child lost. */ head = &rdtgrp->mon.crdtgrp_list; list_for_each_entry(crgrp, head, mon.crdtgrp_list) { cpumask_and(tmpmask, &rdtgrp->cpu_mask, &crgrp->cpu_mask); update_closid_rmid(tmpmask, rdtgrp); cpumask_clear(&crgrp->cpu_mask); } return 0; } static ssize_t rdtgroup_cpus_write(struct kernfs_open_file *of, char *buf, size_t nbytes, loff_t off) { cpumask_var_t tmpmask, newmask, tmpmask1; struct rdtgroup *rdtgrp; int ret; if (!buf) return -EINVAL; if (!zalloc_cpumask_var(&tmpmask, GFP_KERNEL)) return -ENOMEM; if (!zalloc_cpumask_var(&newmask, GFP_KERNEL)) { free_cpumask_var(tmpmask); return -ENOMEM; } if (!zalloc_cpumask_var(&tmpmask1, GFP_KERNEL)) { free_cpumask_var(tmpmask); free_cpumask_var(newmask); return -ENOMEM; } rdtgrp = rdtgroup_kn_lock_live(of->kn); if (!rdtgrp) { ret = -ENOENT; goto unlock; } if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED || rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) { ret = -EINVAL; rdt_last_cmd_puts("Pseudo-locking in progress\n"); goto unlock; } if (is_cpu_list(of)) ret = cpulist_parse(buf, newmask); else ret = cpumask_parse(buf, newmask); if (ret) { rdt_last_cmd_puts("Bad CPU list/mask\n"); goto unlock; } /* check that user didn't specify any offline cpus */ cpumask_andnot(tmpmask, newmask, cpu_online_mask); if (!cpumask_empty(tmpmask)) { ret = -EINVAL; rdt_last_cmd_puts("Can only assign online CPUs\n"); goto unlock; } if (rdtgrp->type == RDTCTRL_GROUP) ret = cpus_ctrl_write(rdtgrp, newmask, tmpmask, tmpmask1); else if (rdtgrp->type == RDTMON_GROUP) ret = cpus_mon_write(rdtgrp, newmask, tmpmask); else ret = -EINVAL; unlock: rdtgroup_kn_unlock(of->kn); free_cpumask_var(tmpmask); free_cpumask_var(newmask); free_cpumask_var(tmpmask1); return ret ?: nbytes; } /** * rdtgroup_remove - the helper to remove resource group safely * @rdtgrp: resource group to remove * * On resource group creation via a mkdir, an extra kernfs_node reference is * taken to ensure that the rdtgroup structure remains accessible for the * rdtgroup_kn_unlock() calls where it is removed. * * Drop the extra reference here, then free the rdtgroup structure. * * Return: void */ static void rdtgroup_remove(struct rdtgroup *rdtgrp) { kernfs_put(rdtgrp->kn); kfree(rdtgrp); } static void _update_task_closid_rmid(void *task) { /* * If the task is still current on this CPU, update PQR_ASSOC MSR. * Otherwise, the MSR is updated when the task is scheduled in. */ if (task == current) resctrl_sched_in(task); } static void update_task_closid_rmid(struct task_struct *t) { if (IS_ENABLED(CONFIG_SMP) && task_curr(t)) smp_call_function_single(task_cpu(t), _update_task_closid_rmid, t, 1); else _update_task_closid_rmid(t); } static int __rdtgroup_move_task(struct task_struct *tsk, struct rdtgroup *rdtgrp) { /* If the task is already in rdtgrp, no need to move the task. */ if ((rdtgrp->type == RDTCTRL_GROUP && tsk->closid == rdtgrp->closid && tsk->rmid == rdtgrp->mon.rmid) || (rdtgrp->type == RDTMON_GROUP && tsk->rmid == rdtgrp->mon.rmid && tsk->closid == rdtgrp->mon.parent->closid)) return 0; /* * Set the task's closid/rmid before the PQR_ASSOC MSR can be * updated by them. * * For ctrl_mon groups, move both closid and rmid. * For monitor groups, can move the tasks only from * their parent CTRL group. */ if (rdtgrp->type == RDTCTRL_GROUP) { WRITE_ONCE(tsk->closid, rdtgrp->closid); WRITE_ONCE(tsk->rmid, rdtgrp->mon.rmid); } else if (rdtgrp->type == RDTMON_GROUP) { if (rdtgrp->mon.parent->closid == tsk->closid) { WRITE_ONCE(tsk->rmid, rdtgrp->mon.rmid); } else { rdt_last_cmd_puts("Can't move task to different control group\n"); return -EINVAL; } } /* * Ensure the task's closid and rmid are written before determining if * the task is current that will decide if it will be interrupted. * This pairs with the full barrier between the rq->curr update and * resctrl_sched_in() during context switch. */ smp_mb(); /* * By now, the task's closid and rmid are set. If the task is current * on a CPU, the PQR_ASSOC MSR needs to be updated to make the resource * group go into effect. If the task is not current, the MSR will be * updated when the task is scheduled in. */ update_task_closid_rmid(tsk); return 0; } static bool is_closid_match(struct task_struct *t, struct rdtgroup *r) { return (rdt_alloc_capable && (r->type == RDTCTRL_GROUP) && (t->closid == r->closid)); } static bool is_rmid_match(struct task_struct *t, struct rdtgroup *r) { return (rdt_mon_capable && (r->type == RDTMON_GROUP) && (t->rmid == r->mon.rmid)); } /** * rdtgroup_tasks_assigned - Test if tasks have been assigned to resource group * @r: Resource group * * Return: 1 if tasks have been assigned to @r, 0 otherwise */ int rdtgroup_tasks_assigned(struct rdtgroup *r) { struct task_struct *p, *t; int ret = 0; lockdep_assert_held(&rdtgroup_mutex); rcu_read_lock(); for_each_process_thread(p, t) { if (is_closid_match(t, r) || is_rmid_match(t, r)) { ret = 1; break; } } rcu_read_unlock(); return ret; } static int rdtgroup_task_write_permission(struct task_struct *task, struct kernfs_open_file *of) { const struct cred *tcred = get_task_cred(task); const struct cred *cred = current_cred(); int ret = 0; /* * Even if we're attaching all tasks in the thread group, we only * need to check permissions on one of them. */ if (!uid_eq(cred->euid, GLOBAL_ROOT_UID) && !uid_eq(cred->euid, tcred->uid) && !uid_eq(cred->euid, tcred->suid)) { rdt_last_cmd_printf("No permission to move task %d\n", task->pid); ret = -EPERM; } put_cred(tcred); return ret; } static int rdtgroup_move_task(pid_t pid, struct rdtgroup *rdtgrp, struct kernfs_open_file *of) { struct task_struct *tsk; int ret; rcu_read_lock(); if (pid) { tsk = find_task_by_vpid(pid); if (!tsk) { rcu_read_unlock(); rdt_last_cmd_printf("No task %d\n", pid); return -ESRCH; } } else { tsk = current; } get_task_struct(tsk); rcu_read_unlock(); ret = rdtgroup_task_write_permission(tsk, of); if (!ret) ret = __rdtgroup_move_task(tsk, rdtgrp); put_task_struct(tsk); return ret; } static ssize_t rdtgroup_tasks_write(struct kernfs_open_file *of, char *buf, size_t nbytes, loff_t off) { struct rdtgroup *rdtgrp; int ret = 0; pid_t pid; if (kstrtoint(strstrip(buf), 0, &pid) || pid < 0) return -EINVAL; rdtgrp = rdtgroup_kn_lock_live(of->kn); if (!rdtgrp) { rdtgroup_kn_unlock(of->kn); return -ENOENT; } rdt_last_cmd_clear(); if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED || rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) { ret = -EINVAL; rdt_last_cmd_puts("Pseudo-locking in progress\n"); goto unlock; } ret = rdtgroup_move_task(pid, rdtgrp, of); unlock: rdtgroup_kn_unlock(of->kn); return ret ?: nbytes; } static void show_rdt_tasks(struct rdtgroup *r, struct seq_file *s) { struct task_struct *p, *t; pid_t pid; rcu_read_lock(); for_each_process_thread(p, t) { if (is_closid_match(t, r) || is_rmid_match(t, r)) { pid = task_pid_vnr(t); if (pid) seq_printf(s, "%d\n", pid); } } rcu_read_unlock(); } static int rdtgroup_tasks_show(struct kernfs_open_file *of, struct seq_file *s, void *v) { struct rdtgroup *rdtgrp; int ret = 0; rdtgrp = rdtgroup_kn_lock_live(of->kn); if (rdtgrp) show_rdt_tasks(rdtgrp, s); else ret = -ENOENT; rdtgroup_kn_unlock(of->kn); return ret; } #ifdef CONFIG_PROC_CPU_RESCTRL /* * A task can only be part of one resctrl control group and of one monitor * group which is associated to that control group. * * 1) res: * mon: * * resctrl is not available. * * 2) res:/ * mon: * * Task is part of the root resctrl control group, and it is not associated * to any monitor group. * * 3) res:/ * mon:mon0 * * Task is part of the root resctrl control group and monitor group mon0. * * 4) res:group0 * mon: * * Task is part of resctrl control group group0, and it is not associated * to any monitor group. * * 5) res:group0 * mon:mon1 * * Task is part of resctrl control group group0 and monitor group mon1. */ int proc_resctrl_show(struct seq_file *s, struct pid_namespace *ns, struct pid *pid, struct task_struct *tsk) { struct rdtgroup *rdtg; int ret = 0; mutex_lock(&rdtgroup_mutex); /* Return empty if resctrl has not been mounted. */ if (!static_branch_unlikely(&rdt_enable_key)) { seq_puts(s, "res:\nmon:\n"); goto unlock; } list_for_each_entry(rdtg, &rdt_all_groups, rdtgroup_list) { struct rdtgroup *crg; /* * Task information is only relevant for shareable * and exclusive groups. */ if (rdtg->mode != RDT_MODE_SHAREABLE && rdtg->mode != RDT_MODE_EXCLUSIVE) continue; if (rdtg->closid != tsk->closid) continue; seq_printf(s, "res:%s%s\n", (rdtg == &rdtgroup_default) ? "/" : "", rdtg->kn->name); seq_puts(s, "mon:"); list_for_each_entry(crg, &rdtg->mon.crdtgrp_list, mon.crdtgrp_list) { if (tsk->rmid != crg->mon.rmid) continue; seq_printf(s, "%s", crg->kn->name); break; } seq_putc(s, '\n'); goto unlock; } /* * The above search should succeed. Otherwise return * with an error. */ ret = -ENOENT; unlock: mutex_unlock(&rdtgroup_mutex); return ret; } #endif static int rdt_last_cmd_status_show(struct kernfs_open_file *of, struct seq_file *seq, void *v) { int len; mutex_lock(&rdtgroup_mutex); len = seq_buf_used(&last_cmd_status); if (len) seq_printf(seq, "%.*s", len, last_cmd_status_buf); else seq_puts(seq, "ok\n"); mutex_unlock(&rdtgroup_mutex); return 0; } static int rdt_num_closids_show(struct kernfs_open_file *of, struct seq_file *seq, void *v) { struct resctrl_schema *s = of->kn->parent->priv; seq_printf(seq, "%u\n", s->num_closid); return 0; } static int rdt_default_ctrl_show(struct kernfs_open_file *of, struct seq_file *seq, void *v) { struct resctrl_schema *s = of->kn->parent->priv; struct rdt_resource *r = s->res; seq_printf(seq, "%x\n", r->default_ctrl); return 0; } static int rdt_min_cbm_bits_show(struct kernfs_open_file *of, struct seq_file *seq, void *v) { struct resctrl_schema *s = of->kn->parent->priv; struct rdt_resource *r = s->res; seq_printf(seq, "%u\n", r->cache.min_cbm_bits); return 0; } static int rdt_shareable_bits_show(struct kernfs_open_file *of, struct seq_file *seq, void *v) { struct resctrl_schema *s = of->kn->parent->priv; struct rdt_resource *r = s->res; seq_printf(seq, "%x\n", r->cache.shareable_bits); return 0; } /** * rdt_bit_usage_show - Display current usage of resources * * A domain is a shared resource that can now be allocated differently. Here * we display the current regions of the domain as an annotated bitmask. * For each domain of this resource its allocation bitmask * is annotated as below to indicate the current usage of the corresponding bit: * 0 - currently unused * X - currently available for sharing and used by software and hardware * H - currently used by hardware only but available for software use * S - currently used and shareable by software only * E - currently used exclusively by one resource group * P - currently pseudo-locked by one resource group */ static int rdt_bit_usage_show(struct kernfs_open_file *of, struct seq_file *seq, void *v) { struct resctrl_schema *s = of->kn->parent->priv; /* * Use unsigned long even though only 32 bits are used to ensure * test_bit() is used safely. */ unsigned long sw_shareable = 0, hw_shareable = 0; unsigned long exclusive = 0, pseudo_locked = 0; struct rdt_resource *r = s->res; struct rdt_domain *dom; int i, hwb, swb, excl, psl; enum rdtgrp_mode mode; bool sep = false; u32 ctrl_val; mutex_lock(&rdtgroup_mutex); hw_shareable = r->cache.shareable_bits; list_for_each_entry(dom, &r->domains, list) { if (sep) seq_putc(seq, ';'); sw_shareable = 0; exclusive = 0; seq_printf(seq, "%d=", dom->id); for (i = 0; i < closids_supported(); i++) { if (!closid_allocated(i)) continue; ctrl_val = resctrl_arch_get_config(r, dom, i, s->conf_type); mode = rdtgroup_mode_by_closid(i); switch (mode) { case RDT_MODE_SHAREABLE: sw_shareable |= ctrl_val; break; case RDT_MODE_EXCLUSIVE: exclusive |= ctrl_val; break; case RDT_MODE_PSEUDO_LOCKSETUP: /* * RDT_MODE_PSEUDO_LOCKSETUP is possible * here but not included since the CBM * associated with this CLOSID in this mode * is not initialized and no task or cpu can be * assigned this CLOSID. */ break; case RDT_MODE_PSEUDO_LOCKED: case RDT_NUM_MODES: WARN(1, "invalid mode for closid %d\n", i); break; } } for (i = r->cache.cbm_len - 1; i >= 0; i--) { pseudo_locked = dom->plr ? dom->plr->cbm : 0; hwb = test_bit(i, &hw_shareable); swb = test_bit(i, &sw_shareable); excl = test_bit(i, &exclusive); psl = test_bit(i, &pseudo_locked); if (hwb && swb) seq_putc(seq, 'X'); else if (hwb && !swb) seq_putc(seq, 'H'); else if (!hwb && swb) seq_putc(seq, 'S'); else if (excl) seq_putc(seq, 'E'); else if (psl) seq_putc(seq, 'P'); else /* Unused bits remain */ seq_putc(seq, '0'); } sep = true; } seq_putc(seq, '\n'); mutex_unlock(&rdtgroup_mutex); return 0; } static int rdt_min_bw_show(struct kernfs_open_file *of, struct seq_file *seq, void *v) { struct resctrl_schema *s = of->kn->parent->priv; struct rdt_resource *r = s->res; seq_printf(seq, "%u\n", r->membw.min_bw); return 0; } static int rdt_num_rmids_show(struct kernfs_open_file *of, struct seq_file *seq, void *v) { struct rdt_resource *r = of->kn->parent->priv; seq_printf(seq, "%d\n", r->num_rmid); return 0; } static int rdt_mon_features_show(struct kernfs_open_file *of, struct seq_file *seq, void *v) { struct rdt_resource *r = of->kn->parent->priv; struct mon_evt *mevt; list_for_each_entry(mevt, &r->evt_list, list) { seq_printf(seq, "%s\n", mevt->name); if (mevt->configurable) seq_printf(seq, "%s_config\n", mevt->name); } return 0; } static int rdt_bw_gran_show(struct kernfs_open_file *of, struct seq_file *seq, void *v) { struct resctrl_schema *s = of->kn->parent->priv; struct rdt_resource *r = s->res; seq_printf(seq, "%u\n", r->membw.bw_gran); return 0; } static int rdt_delay_linear_show(struct kernfs_open_file *of, struct seq_file *seq, void *v) { struct resctrl_schema *s = of->kn->parent->priv; struct rdt_resource *r = s->res; seq_printf(seq, "%u\n", r->membw.delay_linear); return 0; } static int max_threshold_occ_show(struct kernfs_open_file *of, struct seq_file *seq, void *v) { seq_printf(seq, "%u\n", resctrl_rmid_realloc_threshold); return 0; } static int rdt_thread_throttle_mode_show(struct kernfs_open_file *of, struct seq_file *seq, void *v) { struct resctrl_schema *s = of->kn->parent->priv; struct rdt_resource *r = s->res; if (r->membw.throttle_mode == THREAD_THROTTLE_PER_THREAD) seq_puts(seq, "per-thread\n"); else seq_puts(seq, "max\n"); return 0; } static ssize_t max_threshold_occ_write(struct kernfs_open_file *of, char *buf, size_t nbytes, loff_t off) { unsigned int bytes; int ret; ret = kstrtouint(buf, 0, &bytes); if (ret) return ret; if (bytes > resctrl_rmid_realloc_limit) return -EINVAL; resctrl_rmid_realloc_threshold = resctrl_arch_round_mon_val(bytes); return nbytes; } /* * rdtgroup_mode_show - Display mode of this resource group */ static int rdtgroup_mode_show(struct kernfs_open_file *of, struct seq_file *s, void *v) { struct rdtgroup *rdtgrp; rdtgrp = rdtgroup_kn_lock_live(of->kn); if (!rdtgrp) { rdtgroup_kn_unlock(of->kn); return -ENOENT; } seq_printf(s, "%s\n", rdtgroup_mode_str(rdtgrp->mode)); rdtgroup_kn_unlock(of->kn); return 0; } static enum resctrl_conf_type resctrl_peer_type(enum resctrl_conf_type my_type) { switch (my_type) { case CDP_CODE: return CDP_DATA; case CDP_DATA: return CDP_CODE; default: case CDP_NONE: return CDP_NONE; } } /** * __rdtgroup_cbm_overlaps - Does CBM for intended closid overlap with other * @r: Resource to which domain instance @d belongs. * @d: The domain instance for which @closid is being tested. * @cbm: Capacity bitmask being tested. * @closid: Intended closid for @cbm. * @exclusive: Only check if overlaps with exclusive resource groups * * Checks if provided @cbm intended to be used for @closid on domain * @d overlaps with any other closids or other hardware usage associated * with this domain. If @exclusive is true then only overlaps with * resource groups in exclusive mode will be considered. If @exclusive * is false then overlaps with any resource group or hardware entities * will be considered. * * @cbm is unsigned long, even if only 32 bits are used, to make the * bitmap functions work correctly. * * Return: false if CBM does not overlap, true if it does. */ static bool __rdtgroup_cbm_overlaps(struct rdt_resource *r, struct rdt_domain *d, unsigned long cbm, int closid, enum resctrl_conf_type type, bool exclusive) { enum rdtgrp_mode mode; unsigned long ctrl_b; int i; /* Check for any overlap with regions used by hardware directly */ if (!exclusive) { ctrl_b = r->cache.shareable_bits; if (bitmap_intersects(&cbm, &ctrl_b, r->cache.cbm_len)) return true; } /* Check for overlap with other resource groups */ for (i = 0; i < closids_supported(); i++) { ctrl_b = resctrl_arch_get_config(r, d, i, type); mode = rdtgroup_mode_by_closid(i); if (closid_allocated(i) && i != closid && mode != RDT_MODE_PSEUDO_LOCKSETUP) { if (bitmap_intersects(&cbm, &ctrl_b, r->cache.cbm_len)) { if (exclusive) { if (mode == RDT_MODE_EXCLUSIVE) return true; continue; } return true; } } } return false; } /** * rdtgroup_cbm_overlaps - Does CBM overlap with other use of hardware * @s: Schema for the resource to which domain instance @d belongs. * @d: The domain instance for which @closid is being tested. * @cbm: Capacity bitmask being tested. * @closid: Intended closid for @cbm. * @exclusive: Only check if overlaps with exclusive resource groups * * Resources that can be allocated using a CBM can use the CBM to control * the overlap of these allocations. rdtgroup_cmb_overlaps() is the test * for overlap. Overlap test is not limited to the specific resource for * which the CBM is intended though - when dealing with CDP resources that * share the underlying hardware the overlap check should be performed on * the CDP resource sharing the hardware also. * * Refer to description of __rdtgroup_cbm_overlaps() for the details of the * overlap test. * * Return: true if CBM overlap detected, false if there is no overlap */ bool rdtgroup_cbm_overlaps(struct resctrl_schema *s, struct rdt_domain *d, unsigned long cbm, int closid, bool exclusive) { enum resctrl_conf_type peer_type = resctrl_peer_type(s->conf_type); struct rdt_resource *r = s->res; if (__rdtgroup_cbm_overlaps(r, d, cbm, closid, s->conf_type, exclusive)) return true; if (!resctrl_arch_get_cdp_enabled(r->rid)) return false; return __rdtgroup_cbm_overlaps(r, d, cbm, closid, peer_type, exclusive); } /** * rdtgroup_mode_test_exclusive - Test if this resource group can be exclusive * * An exclusive resource group implies that there should be no sharing of * its allocated resources. At the time this group is considered to be * exclusive this test can determine if its current schemata supports this * setting by testing for overlap with all other resource groups. * * Return: true if resource group can be exclusive, false if there is overlap * with allocations of other resource groups and thus this resource group * cannot be exclusive. */ static bool rdtgroup_mode_test_exclusive(struct rdtgroup *rdtgrp) { int closid = rdtgrp->closid; struct resctrl_schema *s; struct rdt_resource *r; bool has_cache = false; struct rdt_domain *d; u32 ctrl; list_for_each_entry(s, &resctrl_schema_all, list) { r = s->res; if (r->rid == RDT_RESOURCE_MBA || r->rid == RDT_RESOURCE_SMBA) continue; has_cache = true; list_for_each_entry(d, &r->domains, list) { ctrl = resctrl_arch_get_config(r, d, closid, s->conf_type); if (rdtgroup_cbm_overlaps(s, d, ctrl, closid, false)) { rdt_last_cmd_puts("Schemata overlaps\n"); return false; } } } if (!has_cache) { rdt_last_cmd_puts("Cannot be exclusive without CAT/CDP\n"); return false; } return true; } /** * rdtgroup_mode_write - Modify the resource group's mode * */ static ssize_t rdtgroup_mode_write(struct kernfs_open_file *of, char *buf, size_t nbytes, loff_t off) { struct rdtgroup *rdtgrp; enum rdtgrp_mode mode; int ret = 0; /* Valid input requires a trailing newline */ if (nbytes == 0 || buf[nbytes - 1] != '\n') return -EINVAL; buf[nbytes - 1] = '\0'; rdtgrp = rdtgroup_kn_lock_live(of->kn); if (!rdtgrp) { rdtgroup_kn_unlock(of->kn); return -ENOENT; } rdt_last_cmd_clear(); mode = rdtgrp->mode; if ((!strcmp(buf, "shareable") && mode == RDT_MODE_SHAREABLE) || (!strcmp(buf, "exclusive") && mode == RDT_MODE_EXCLUSIVE) || (!strcmp(buf, "pseudo-locksetup") && mode == RDT_MODE_PSEUDO_LOCKSETUP) || (!strcmp(buf, "pseudo-locked") && mode == RDT_MODE_PSEUDO_LOCKED)) goto out; if (mode == RDT_MODE_PSEUDO_LOCKED) { rdt_last_cmd_puts("Cannot change pseudo-locked group\n"); ret = -EINVAL; goto out; } if (!strcmp(buf, "shareable")) { if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) { ret = rdtgroup_locksetup_exit(rdtgrp); if (ret) goto out; } rdtgrp->mode = RDT_MODE_SHAREABLE; } else if (!strcmp(buf, "exclusive")) { if (!rdtgroup_mode_test_exclusive(rdtgrp)) { ret = -EINVAL; goto out; } if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) { ret = rdtgroup_locksetup_exit(rdtgrp); if (ret) goto out; } rdtgrp->mode = RDT_MODE_EXCLUSIVE; } else if (!strcmp(buf, "pseudo-locksetup")) { ret = rdtgroup_locksetup_enter(rdtgrp); if (ret) goto out; rdtgrp->mode = RDT_MODE_PSEUDO_LOCKSETUP; } else { rdt_last_cmd_puts("Unknown or unsupported mode\n"); ret = -EINVAL; } out: rdtgroup_kn_unlock(of->kn); return ret ?: nbytes; } /** * rdtgroup_cbm_to_size - Translate CBM to size in bytes * @r: RDT resource to which @d belongs. * @d: RDT domain instance. * @cbm: bitmask for which the size should be computed. * * The bitmask provided associated with the RDT domain instance @d will be * translated into how many bytes it represents. The size in bytes is * computed by first dividing the total cache size by the CBM length to * determine how many bytes each bit in the bitmask represents. The result * is multiplied with the number of bits set in the bitmask. * * @cbm is unsigned long, even if only 32 bits are used to make the * bitmap functions work correctly. */ unsigned int rdtgroup_cbm_to_size(struct rdt_resource *r, struct rdt_domain *d, unsigned long cbm) { struct cpu_cacheinfo *ci; unsigned int size = 0; int num_b, i; num_b = bitmap_weight(&cbm, r->cache.cbm_len); ci = get_cpu_cacheinfo(cpumask_any(&d->cpu_mask)); for (i = 0; i < ci->num_leaves; i++) { if (ci->info_list[i].level == r->cache_level) { size = ci->info_list[i].size / r->cache.cbm_len * num_b; break; } } return size; } /** * rdtgroup_size_show - Display size in bytes of allocated regions * * The "size" file mirrors the layout of the "schemata" file, printing the * size in bytes of each region instead of the capacity bitmask. * */ static int rdtgroup_size_show(struct kernfs_open_file *of, struct seq_file *s, void *v) { struct resctrl_schema *schema; enum resctrl_conf_type type; struct rdtgroup *rdtgrp; struct rdt_resource *r; struct rdt_domain *d; unsigned int size; int ret = 0; u32 closid; bool sep; u32 ctrl; rdtgrp = rdtgroup_kn_lock_live(of->kn); if (!rdtgrp) { rdtgroup_kn_unlock(of->kn); return -ENOENT; } if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) { if (!rdtgrp->plr->d) { rdt_last_cmd_clear(); rdt_last_cmd_puts("Cache domain offline\n"); ret = -ENODEV; } else { seq_printf(s, "%*s:", max_name_width, rdtgrp->plr->s->name); size = rdtgroup_cbm_to_size(rdtgrp->plr->s->res, rdtgrp->plr->d, rdtgrp->plr->cbm); seq_printf(s, "%d=%u\n", rdtgrp->plr->d->id, size); } goto out; } closid = rdtgrp->closid; list_for_each_entry(schema, &resctrl_schema_all, list) { r = schema->res; type = schema->conf_type; sep = false; seq_printf(s, "%*s:", max_name_width, schema->name); list_for_each_entry(d, &r->domains, list) { if (sep) seq_putc(s, ';'); if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) { size = 0; } else { if (is_mba_sc(r)) ctrl = d->mbps_val[closid]; else ctrl = resctrl_arch_get_config(r, d, closid, type); if (r->rid == RDT_RESOURCE_MBA || r->rid == RDT_RESOURCE_SMBA) size = ctrl; else size = rdtgroup_cbm_to_size(r, d, ctrl); } seq_printf(s, "%d=%u", d->id, size); sep = true; } seq_putc(s, '\n'); } out: rdtgroup_kn_unlock(of->kn); return ret; } struct mon_config_info { u32 evtid; u32 mon_config; }; #define INVALID_CONFIG_INDEX UINT_MAX /** * mon_event_config_index_get - get the hardware index for the * configurable event * @evtid: event id. * * Return: 0 for evtid == QOS_L3_MBM_TOTAL_EVENT_ID * 1 for evtid == QOS_L3_MBM_LOCAL_EVENT_ID * INVALID_CONFIG_INDEX for invalid evtid */ static inline unsigned int mon_event_config_index_get(u32 evtid) { switch (evtid) { case QOS_L3_MBM_TOTAL_EVENT_ID: return 0; case QOS_L3_MBM_LOCAL_EVENT_ID: return 1; default: /* Should never reach here */ return INVALID_CONFIG_INDEX; } } static void mon_event_config_read(void *info) { struct mon_config_info *mon_info = info; unsigned int index; u64 msrval; index = mon_event_config_index_get(mon_info->evtid); if (index == INVALID_CONFIG_INDEX) { pr_warn_once("Invalid event id %d\n", mon_info->evtid); return; } rdmsrl(MSR_IA32_EVT_CFG_BASE + index, msrval); /* Report only the valid event configuration bits */ mon_info->mon_config = msrval & MAX_EVT_CONFIG_BITS; } static void mondata_config_read(struct rdt_domain *d, struct mon_config_info *mon_info) { smp_call_function_any(&d->cpu_mask, mon_event_config_read, mon_info, 1); } static int mbm_config_show(struct seq_file *s, struct rdt_resource *r, u32 evtid) { struct mon_config_info mon_info = {0}; struct rdt_domain *dom; bool sep = false; mutex_lock(&rdtgroup_mutex); list_for_each_entry(dom, &r->domains, list) { if (sep) seq_puts(s, ";"); memset(&mon_info, 0, sizeof(struct mon_config_info)); mon_info.evtid = evtid; mondata_config_read(dom, &mon_info); seq_printf(s, "%d=0x%02x", dom->id, mon_info.mon_config); sep = true; } seq_puts(s, "\n"); mutex_unlock(&rdtgroup_mutex); return 0; } static int mbm_total_bytes_config_show(struct kernfs_open_file *of, struct seq_file *seq, void *v) { struct rdt_resource *r = of->kn->parent->priv; mbm_config_show(seq, r, QOS_L3_MBM_TOTAL_EVENT_ID); return 0; } static int mbm_local_bytes_config_show(struct kernfs_open_file *of, struct seq_file *seq, void *v) { struct rdt_resource *r = of->kn->parent->priv; mbm_config_show(seq, r, QOS_L3_MBM_LOCAL_EVENT_ID); return 0; } static void mon_event_config_write(void *info) { struct mon_config_info *mon_info = info; unsigned int index; index = mon_event_config_index_get(mon_info->evtid); if (index == INVALID_CONFIG_INDEX) { pr_warn_once("Invalid event id %d\n", mon_info->evtid); return; } wrmsr(MSR_IA32_EVT_CFG_BASE + index, mon_info->mon_config, 0); } static int mbm_config_write_domain(struct rdt_resource *r, struct rdt_domain *d, u32 evtid, u32 val) { struct mon_config_info mon_info = {0}; int ret = 0; /* mon_config cannot be more than the supported set of events */ if (val > MAX_EVT_CONFIG_BITS) { rdt_last_cmd_puts("Invalid event configuration\n"); return -EINVAL; } /* * Read the current config value first. If both are the same then * no need to write it again. */ mon_info.evtid = evtid; mondata_config_read(d, &mon_info); if (mon_info.mon_config == val) goto out; mon_info.mon_config = val; /* * Update MSR_IA32_EVT_CFG_BASE MSR on one of the CPUs in the * domain. The MSRs offset from MSR MSR_IA32_EVT_CFG_BASE * are scoped at the domain level. Writing any of these MSRs * on one CPU is observed by all the CPUs in the domain. */ smp_call_function_any(&d->cpu_mask, mon_event_config_write, &mon_info, 1); /* * When an Event Configuration is changed, the bandwidth counters * for all RMIDs and Events will be cleared by the hardware. The * hardware also sets MSR_IA32_QM_CTR.Unavailable (bit 62) for * every RMID on the next read to any event for every RMID. * Subsequent reads will have MSR_IA32_QM_CTR.Unavailable (bit 62) * cleared while it is tracked by the hardware. Clear the * mbm_local and mbm_total counts for all the RMIDs. */ resctrl_arch_reset_rmid_all(r, d); out: return ret; } static int mon_config_write(struct rdt_resource *r, char *tok, u32 evtid) { char *dom_str = NULL, *id_str; unsigned long dom_id, val; struct rdt_domain *d; int ret = 0; next: if (!tok || tok[0] == '\0') return 0; /* Start processing the strings for each domain */ dom_str = strim(strsep(&tok, ";")); id_str = strsep(&dom_str, "="); if (!id_str || kstrtoul(id_str, 10, &dom_id)) { rdt_last_cmd_puts("Missing '=' or non-numeric domain id\n"); return -EINVAL; } if (!dom_str || kstrtoul(dom_str, 16, &val)) { rdt_last_cmd_puts("Non-numeric event configuration value\n"); return -EINVAL; } list_for_each_entry(d, &r->domains, list) { if (d->id == dom_id) { ret = mbm_config_write_domain(r, d, evtid, val); if (ret) return -EINVAL; goto next; } } return -EINVAL; } static ssize_t mbm_total_bytes_config_write(struct kernfs_open_file *of, char *buf, size_t nbytes, loff_t off) { struct rdt_resource *r = of->kn->parent->priv; int ret; /* Valid input requires a trailing newline */ if (nbytes == 0 || buf[nbytes - 1] != '\n') return -EINVAL; mutex_lock(&rdtgroup_mutex); rdt_last_cmd_clear(); buf[nbytes - 1] = '\0'; ret = mon_config_write(r, buf, QOS_L3_MBM_TOTAL_EVENT_ID); mutex_unlock(&rdtgroup_mutex); return ret ?: nbytes; } static ssize_t mbm_local_bytes_config_write(struct kernfs_open_file *of, char *buf, size_t nbytes, loff_t off) { struct rdt_resource *r = of->kn->parent->priv; int ret; /* Valid input requires a trailing newline */ if (nbytes == 0 || buf[nbytes - 1] != '\n') return -EINVAL; mutex_lock(&rdtgroup_mutex); rdt_last_cmd_clear(); buf[nbytes - 1] = '\0'; ret = mon_config_write(r, buf, QOS_L3_MBM_LOCAL_EVENT_ID); mutex_unlock(&rdtgroup_mutex); return ret ?: nbytes; } /* rdtgroup information files for one cache resource. */ static struct rftype res_common_files[] = { { .name = "last_cmd_status", .mode = 0444, .kf_ops = &rdtgroup_kf_single_ops, .seq_show = rdt_last_cmd_status_show, .fflags = RF_TOP_INFO, }, { .name = "num_closids", .mode = 0444, .kf_ops = &rdtgroup_kf_single_ops, .seq_show = rdt_num_closids_show, .fflags = RF_CTRL_INFO, }, { .name = "mon_features", .mode = 0444, .kf_ops = &rdtgroup_kf_single_ops, .seq_show = rdt_mon_features_show, .fflags = RF_MON_INFO, }, { .name = "num_rmids", .mode = 0444, .kf_ops = &rdtgroup_kf_single_ops, .seq_show = rdt_num_rmids_show, .fflags = RF_MON_INFO, }, { .name = "cbm_mask", .mode = 0444, .kf_ops = &rdtgroup_kf_single_ops, .seq_show = rdt_default_ctrl_show, .fflags = RF_CTRL_INFO | RFTYPE_RES_CACHE, }, { .name = "min_cbm_bits", .mode = 0444, .kf_ops = &rdtgroup_kf_single_ops, .seq_show = rdt_min_cbm_bits_show, .fflags = RF_CTRL_INFO | RFTYPE_RES_CACHE, }, { .name = "shareable_bits", .mode = 0444, .kf_ops = &rdtgroup_kf_single_ops, .seq_show = rdt_shareable_bits_show, .fflags = RF_CTRL_INFO | RFTYPE_RES_CACHE, }, { .name = "bit_usage", .mode = 0444, .kf_ops = &rdtgroup_kf_single_ops, .seq_show = rdt_bit_usage_show, .fflags = RF_CTRL_INFO | RFTYPE_RES_CACHE, }, { .name = "min_bandwidth", .mode = 0444, .kf_ops = &rdtgroup_kf_single_ops, .seq_show = rdt_min_bw_show, .fflags = RF_CTRL_INFO | RFTYPE_RES_MB, }, { .name = "bandwidth_gran", .mode = 0444, .kf_ops = &rdtgroup_kf_single_ops, .seq_show = rdt_bw_gran_show, .fflags = RF_CTRL_INFO | RFTYPE_RES_MB, }, { .name = "delay_linear", .mode = 0444, .kf_ops = &rdtgroup_kf_single_ops, .seq_show = rdt_delay_linear_show, .fflags = RF_CTRL_INFO | RFTYPE_RES_MB, }, /* * Platform specific which (if any) capabilities are provided by * thread_throttle_mode. Defer "fflags" initialization to platform * discovery. */ { .name = "thread_throttle_mode", .mode = 0444, .kf_ops = &rdtgroup_kf_single_ops, .seq_show = rdt_thread_throttle_mode_show, }, { .name = "max_threshold_occupancy", .mode = 0644, .kf_ops = &rdtgroup_kf_single_ops, .write = max_threshold_occ_write, .seq_show = max_threshold_occ_show, .fflags = RF_MON_INFO | RFTYPE_RES_CACHE, }, { .name = "mbm_total_bytes_config", .mode = 0644, .kf_ops = &rdtgroup_kf_single_ops, .seq_show = mbm_total_bytes_config_show, .write = mbm_total_bytes_config_write, }, { .name = "mbm_local_bytes_config", .mode = 0644, .kf_ops = &rdtgroup_kf_single_ops, .seq_show = mbm_local_bytes_config_show, .write = mbm_local_bytes_config_write, }, { .name = "cpus", .mode = 0644, .kf_ops = &rdtgroup_kf_single_ops, .write = rdtgroup_cpus_write, .seq_show = rdtgroup_cpus_show, .fflags = RFTYPE_BASE, }, { .name = "cpus_list", .mode = 0644, .kf_ops = &rdtgroup_kf_single_ops, .write = rdtgroup_cpus_write, .seq_show = rdtgroup_cpus_show, .flags = RFTYPE_FLAGS_CPUS_LIST, .fflags = RFTYPE_BASE, }, { .name = "tasks", .mode = 0644, .kf_ops = &rdtgroup_kf_single_ops, .write = rdtgroup_tasks_write, .seq_show = rdtgroup_tasks_show, .fflags = RFTYPE_BASE, }, { .name = "schemata", .mode = 0644, .kf_ops = &rdtgroup_kf_single_ops, .write = rdtgroup_schemata_write, .seq_show = rdtgroup_schemata_show, .fflags = RF_CTRL_BASE, }, { .name = "mode", .mode = 0644, .kf_ops = &rdtgroup_kf_single_ops, .write = rdtgroup_mode_write, .seq_show = rdtgroup_mode_show, .fflags = RF_CTRL_BASE, }, { .name = "size", .mode = 0444, .kf_ops = &rdtgroup_kf_single_ops, .seq_show = rdtgroup_size_show, .fflags = RF_CTRL_BASE, }, }; static int rdtgroup_add_files(struct kernfs_node *kn, unsigned long fflags) { struct rftype *rfts, *rft; int ret, len; rfts = res_common_files; len = ARRAY_SIZE(res_common_files); lockdep_assert_held(&rdtgroup_mutex); for (rft = rfts; rft < rfts + len; rft++) { if (rft->fflags && ((fflags & rft->fflags) == rft->fflags)) { ret = rdtgroup_add_file(kn, rft); if (ret) goto error; } } return 0; error: pr_warn("Failed to add %s, err=%d\n", rft->name, ret); while (--rft >= rfts) { if ((fflags & rft->fflags) == rft->fflags) kernfs_remove_by_name(kn, rft->name); } return ret; } static struct rftype *rdtgroup_get_rftype_by_name(const char *name) { struct rftype *rfts, *rft; int len; rfts = res_common_files; len = ARRAY_SIZE(res_common_files); for (rft = rfts; rft < rfts + len; rft++) { if (!strcmp(rft->name, name)) return rft; } return NULL; } void __init thread_throttle_mode_init(void) { struct rftype *rft; rft = rdtgroup_get_rftype_by_name("thread_throttle_mode"); if (!rft) return; rft->fflags = RF_CTRL_INFO | RFTYPE_RES_MB; } void __init mbm_config_rftype_init(const char *config) { struct rftype *rft; rft = rdtgroup_get_rftype_by_name(config); if (rft) rft->fflags = RF_MON_INFO | RFTYPE_RES_CACHE; } /** * rdtgroup_kn_mode_restrict - Restrict user access to named resctrl file * @r: The resource group with which the file is associated. * @name: Name of the file * * The permissions of named resctrl file, directory, or link are modified * to not allow read, write, or execute by any user. * * WARNING: This function is intended to communicate to the user that the * resctrl file has been locked down - that it is not relevant to the * particular state the system finds itself in. It should not be relied * on to protect from user access because after the file's permissions * are restricted the user can still change the permissions using chmod * from the command line. * * Return: 0 on success, <0 on failure. */ int rdtgroup_kn_mode_restrict(struct rdtgroup *r, const char *name) { struct iattr iattr = {.ia_valid = ATTR_MODE,}; struct kernfs_node *kn; int ret = 0; kn = kernfs_find_and_get_ns(r->kn, name, NULL); if (!kn) return -ENOENT; switch (kernfs_type(kn)) { case KERNFS_DIR: iattr.ia_mode = S_IFDIR; break; case KERNFS_FILE: iattr.ia_mode = S_IFREG; break; case KERNFS_LINK: iattr.ia_mode = S_IFLNK; break; } ret = kernfs_setattr(kn, &iattr); kernfs_put(kn); return ret; } /** * rdtgroup_kn_mode_restore - Restore user access to named resctrl file * @r: The resource group with which the file is associated. * @name: Name of the file * @mask: Mask of permissions that should be restored * * Restore the permissions of the named file. If @name is a directory the * permissions of its parent will be used. * * Return: 0 on success, <0 on failure. */ int rdtgroup_kn_mode_restore(struct rdtgroup *r, const char *name, umode_t mask) { struct iattr iattr = {.ia_valid = ATTR_MODE,}; struct kernfs_node *kn, *parent; struct rftype *rfts, *rft; int ret, len; rfts = res_common_files; len = ARRAY_SIZE(res_common_files); for (rft = rfts; rft < rfts + len; rft++) { if (!strcmp(rft->name, name)) iattr.ia_mode = rft->mode & mask; } kn = kernfs_find_and_get_ns(r->kn, name, NULL); if (!kn) return -ENOENT; switch (kernfs_type(kn)) { case KERNFS_DIR: parent = kernfs_get_parent(kn); if (parent) { iattr.ia_mode |= parent->mode; kernfs_put(parent); } iattr.ia_mode |= S_IFDIR; break; case KERNFS_FILE: iattr.ia_mode |= S_IFREG; break; case KERNFS_LINK: iattr.ia_mode |= S_IFLNK; break; } ret = kernfs_setattr(kn, &iattr); kernfs_put(kn); return ret; } static int rdtgroup_mkdir_info_resdir(void *priv, char *name, unsigned long fflags) { struct kernfs_node *kn_subdir; int ret; kn_subdir = kernfs_create_dir(kn_info, name, kn_info->mode, priv); if (IS_ERR(kn_subdir)) return PTR_ERR(kn_subdir); ret = rdtgroup_kn_set_ugid(kn_subdir); if (ret) return ret; ret = rdtgroup_add_files(kn_subdir, fflags); if (!ret) kernfs_activate(kn_subdir); return ret; } static int rdtgroup_create_info_dir(struct kernfs_node *parent_kn) { struct resctrl_schema *s; struct rdt_resource *r; unsigned long fflags; char name[32]; int ret; /* create the directory */ kn_info = kernfs_create_dir(parent_kn, "info", parent_kn->mode, NULL); if (IS_ERR(kn_info)) return PTR_ERR(kn_info); ret = rdtgroup_add_files(kn_info, RF_TOP_INFO); if (ret) goto out_destroy; /* loop over enabled controls, these are all alloc_capable */ list_for_each_entry(s, &resctrl_schema_all, list) { r = s->res; fflags = r->fflags | RF_CTRL_INFO; ret = rdtgroup_mkdir_info_resdir(s, s->name, fflags); if (ret) goto out_destroy; } for_each_mon_capable_rdt_resource(r) { fflags = r->fflags | RF_MON_INFO; sprintf(name, "%s_MON", r->name); ret = rdtgroup_mkdir_info_resdir(r, name, fflags); if (ret) goto out_destroy; } ret = rdtgroup_kn_set_ugid(kn_info); if (ret) goto out_destroy; kernfs_activate(kn_info); return 0; out_destroy: kernfs_remove(kn_info); return ret; } static int mongroup_create_dir(struct kernfs_node *parent_kn, struct rdtgroup *prgrp, char *name, struct kernfs_node **dest_kn) { struct kernfs_node *kn; int ret; /* create the directory */ kn = kernfs_create_dir(parent_kn, name, parent_kn->mode, prgrp); if (IS_ERR(kn)) return PTR_ERR(kn); if (dest_kn) *dest_kn = kn; ret = rdtgroup_kn_set_ugid(kn); if (ret) goto out_destroy; kernfs_activate(kn); return 0; out_destroy: kernfs_remove(kn); return ret; } static void l3_qos_cfg_update(void *arg) { bool *enable = arg; wrmsrl(MSR_IA32_L3_QOS_CFG, *enable ? L3_QOS_CDP_ENABLE : 0ULL); } static void l2_qos_cfg_update(void *arg) { bool *enable = arg; wrmsrl(MSR_IA32_L2_QOS_CFG, *enable ? L2_QOS_CDP_ENABLE : 0ULL); } static inline bool is_mba_linear(void) { return rdt_resources_all[RDT_RESOURCE_MBA].r_resctrl.membw.delay_linear; } static int set_cache_qos_cfg(int level, bool enable) { void (*update)(void *arg); struct rdt_resource *r_l; cpumask_var_t cpu_mask; struct rdt_domain *d; int cpu; if (level == RDT_RESOURCE_L3) update = l3_qos_cfg_update; else if (level == RDT_RESOURCE_L2) update = l2_qos_cfg_update; else return -EINVAL; if (!zalloc_cpumask_var(&cpu_mask, GFP_KERNEL)) return -ENOMEM; r_l = &rdt_resources_all[level].r_resctrl; list_for_each_entry(d, &r_l->domains, list) { if (r_l->cache.arch_has_per_cpu_cfg) /* Pick all the CPUs in the domain instance */ for_each_cpu(cpu, &d->cpu_mask) cpumask_set_cpu(cpu, cpu_mask); else /* Pick one CPU from each domain instance to update MSR */ cpumask_set_cpu(cpumask_any(&d->cpu_mask), cpu_mask); } /* Update QOS_CFG MSR on all the CPUs in cpu_mask */ on_each_cpu_mask(cpu_mask, update, &enable, 1); free_cpumask_var(cpu_mask); return 0; } /* Restore the qos cfg state when a domain comes online */ void rdt_domain_reconfigure_cdp(struct rdt_resource *r) { struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r); if (!r->cdp_capable) return; if (r->rid == RDT_RESOURCE_L2) l2_qos_cfg_update(&hw_res->cdp_enabled); if (r->rid == RDT_RESOURCE_L3) l3_qos_cfg_update(&hw_res->cdp_enabled); } static int mba_sc_domain_allocate(struct rdt_resource *r, struct rdt_domain *d) { u32 num_closid = resctrl_arch_get_num_closid(r); int cpu = cpumask_any(&d->cpu_mask); int i; d->mbps_val = kcalloc_node(num_closid, sizeof(*d->mbps_val), GFP_KERNEL, cpu_to_node(cpu)); if (!d->mbps_val) return -ENOMEM; for (i = 0; i < num_closid; i++) d->mbps_val[i] = MBA_MAX_MBPS; return 0; } static void mba_sc_domain_destroy(struct rdt_resource *r, struct rdt_domain *d) { kfree(d->mbps_val); d->mbps_val = NULL; } /* * MBA software controller is supported only if * MBM is supported and MBA is in linear scale. */ static bool supports_mba_mbps(void) { struct rdt_resource *r = &rdt_resources_all[RDT_RESOURCE_MBA].r_resctrl; return (is_mbm_local_enabled() && r->alloc_capable && is_mba_linear()); } /* * Enable or disable the MBA software controller * which helps user specify bandwidth in MBps. */ static int set_mba_sc(bool mba_sc) { struct rdt_resource *r = &rdt_resources_all[RDT_RESOURCE_MBA].r_resctrl; u32 num_closid = resctrl_arch_get_num_closid(r); struct rdt_domain *d; int i; if (!supports_mba_mbps() || mba_sc == is_mba_sc(r)) return -EINVAL; r->membw.mba_sc = mba_sc; list_for_each_entry(d, &r->domains, list) { for (i = 0; i < num_closid; i++) d->mbps_val[i] = MBA_MAX_MBPS; } return 0; } static int cdp_enable(int level) { struct rdt_resource *r_l = &rdt_resources_all[level].r_resctrl; int ret; if (!r_l->alloc_capable) return -EINVAL; ret = set_cache_qos_cfg(level, true); if (!ret) rdt_resources_all[level].cdp_enabled = true; return ret; } static void cdp_disable(int level) { struct rdt_hw_resource *r_hw = &rdt_resources_all[level]; if (r_hw->cdp_enabled) { set_cache_qos_cfg(level, false); r_hw->cdp_enabled = false; } } int resctrl_arch_set_cdp_enabled(enum resctrl_res_level l, bool enable) { struct rdt_hw_resource *hw_res = &rdt_resources_all[l]; if (!hw_res->r_resctrl.cdp_capable) return -EINVAL; if (enable) return cdp_enable(l); cdp_disable(l); return 0; } static void cdp_disable_all(void) { if (resctrl_arch_get_cdp_enabled(RDT_RESOURCE_L3)) resctrl_arch_set_cdp_enabled(RDT_RESOURCE_L3, false); if (resctrl_arch_get_cdp_enabled(RDT_RESOURCE_L2)) resctrl_arch_set_cdp_enabled(RDT_RESOURCE_L2, false); } /* * We don't allow rdtgroup directories to be created anywhere * except the root directory. Thus when looking for the rdtgroup * structure for a kernfs node we are either looking at a directory, * in which case the rdtgroup structure is pointed at by the "priv" * field, otherwise we have a file, and need only look to the parent * to find the rdtgroup. */ static struct rdtgroup *kernfs_to_rdtgroup(struct kernfs_node *kn) { if (kernfs_type(kn) == KERNFS_DIR) { /* * All the resource directories use "kn->priv" * to point to the "struct rdtgroup" for the * resource. "info" and its subdirectories don't * have rdtgroup structures, so return NULL here. */ if (kn == kn_info || kn->parent == kn_info) return NULL; else return kn->priv; } else { return kn->parent->priv; } } static void rdtgroup_kn_get(struct rdtgroup *rdtgrp, struct kernfs_node *kn) { atomic_inc(&rdtgrp->waitcount); kernfs_break_active_protection(kn); } static void rdtgroup_kn_put(struct rdtgroup *rdtgrp, struct kernfs_node *kn) { if (atomic_dec_and_test(&rdtgrp->waitcount) && (rdtgrp->flags & RDT_DELETED)) { if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP || rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) rdtgroup_pseudo_lock_remove(rdtgrp); kernfs_unbreak_active_protection(kn); rdtgroup_remove(rdtgrp); } else { kernfs_unbreak_active_protection(kn); } } struct rdtgroup *rdtgroup_kn_lock_live(struct kernfs_node *kn) { struct rdtgroup *rdtgrp = kernfs_to_rdtgroup(kn); if (!rdtgrp) return NULL; rdtgroup_kn_get(rdtgrp, kn); mutex_lock(&rdtgroup_mutex); /* Was this group deleted while we waited? */ if (rdtgrp->flags & RDT_DELETED) return NULL; return rdtgrp; } void rdtgroup_kn_unlock(struct kernfs_node *kn) { struct rdtgroup *rdtgrp = kernfs_to_rdtgroup(kn); if (!rdtgrp) return; mutex_unlock(&rdtgroup_mutex); rdtgroup_kn_put(rdtgrp, kn); } static int mkdir_mondata_all(struct kernfs_node *parent_kn, struct rdtgroup *prgrp, struct kernfs_node **mon_data_kn); static int rdt_enable_ctx(struct rdt_fs_context *ctx) { int ret = 0; if (ctx->enable_cdpl2) ret = resctrl_arch_set_cdp_enabled(RDT_RESOURCE_L2, true); if (!ret && ctx->enable_cdpl3) ret = resctrl_arch_set_cdp_enabled(RDT_RESOURCE_L3, true); if (!ret && ctx->enable_mba_mbps) ret = set_mba_sc(true); return ret; } static int schemata_list_add(struct rdt_resource *r, enum resctrl_conf_type type) { struct resctrl_schema *s; const char *suffix = ""; int ret, cl; s = kzalloc(sizeof(*s), GFP_KERNEL); if (!s) return -ENOMEM; s->res = r; s->num_closid = resctrl_arch_get_num_closid(r); if (resctrl_arch_get_cdp_enabled(r->rid)) s->num_closid /= 2; s->conf_type = type; switch (type) { case CDP_CODE: suffix = "CODE"; break; case CDP_DATA: suffix = "DATA"; break; case CDP_NONE: suffix = ""; break; } ret = snprintf(s->name, sizeof(s->name), "%s%s", r->name, suffix); if (ret >= sizeof(s->name)) { kfree(s); return -EINVAL; } cl = strlen(s->name); /* * If CDP is supported by this resource, but not enabled, * include the suffix. This ensures the tabular format of the * schemata file does not change between mounts of the filesystem. */ if (r->cdp_capable && !resctrl_arch_get_cdp_enabled(r->rid)) cl += 4; if (cl > max_name_width) max_name_width = cl; INIT_LIST_HEAD(&s->list); list_add(&s->list, &resctrl_schema_all); return 0; } static int schemata_list_create(void) { struct rdt_resource *r; int ret = 0; for_each_alloc_capable_rdt_resource(r) { if (resctrl_arch_get_cdp_enabled(r->rid)) { ret = schemata_list_add(r, CDP_CODE); if (ret) break; ret = schemata_list_add(r, CDP_DATA); } else { ret = schemata_list_add(r, CDP_NONE); } if (ret) break; } return ret; } static void schemata_list_destroy(void) { struct resctrl_schema *s, *tmp; list_for_each_entry_safe(s, tmp, &resctrl_schema_all, list) { list_del(&s->list); kfree(s); } } static int rdt_get_tree(struct fs_context *fc) { struct rdt_fs_context *ctx = rdt_fc2context(fc); struct rdt_domain *dom; struct rdt_resource *r; int ret; cpus_read_lock(); mutex_lock(&rdtgroup_mutex); /* * resctrl file system can only be mounted once. */ if (static_branch_unlikely(&rdt_enable_key)) { ret = -EBUSY; goto out; } ret = rdt_enable_ctx(ctx); if (ret < 0) goto out_cdp; ret = schemata_list_create(); if (ret) { schemata_list_destroy(); goto out_mba; } closid_init(); ret = rdtgroup_create_info_dir(rdtgroup_default.kn); if (ret < 0) goto out_schemata_free; if (rdt_mon_capable) { ret = mongroup_create_dir(rdtgroup_default.kn, &rdtgroup_default, "mon_groups", &kn_mongrp); if (ret < 0) goto out_info; ret = mkdir_mondata_all(rdtgroup_default.kn, &rdtgroup_default, &kn_mondata); if (ret < 0) goto out_mongrp; rdtgroup_default.mon.mon_data_kn = kn_mondata; } ret = rdt_pseudo_lock_init(); if (ret) goto out_mondata; ret = kernfs_get_tree(fc); if (ret < 0) goto out_psl; if (rdt_alloc_capable) static_branch_enable_cpuslocked(&rdt_alloc_enable_key); if (rdt_mon_capable) static_branch_enable_cpuslocked(&rdt_mon_enable_key); if (rdt_alloc_capable || rdt_mon_capable) static_branch_enable_cpuslocked(&rdt_enable_key); if (is_mbm_enabled()) { r = &rdt_resources_all[RDT_RESOURCE_L3].r_resctrl; list_for_each_entry(dom, &r->domains, list) mbm_setup_overflow_handler(dom, MBM_OVERFLOW_INTERVAL); } goto out; out_psl: rdt_pseudo_lock_release(); out_mondata: if (rdt_mon_capable) kernfs_remove(kn_mondata); out_mongrp: if (rdt_mon_capable) kernfs_remove(kn_mongrp); out_info: kernfs_remove(kn_info); out_schemata_free: schemata_list_destroy(); out_mba: if (ctx->enable_mba_mbps) set_mba_sc(false); out_cdp: cdp_disable_all(); out: rdt_last_cmd_clear(); mutex_unlock(&rdtgroup_mutex); cpus_read_unlock(); return ret; } enum rdt_param { Opt_cdp, Opt_cdpl2, Opt_mba_mbps, nr__rdt_params }; static const struct fs_parameter_spec rdt_fs_parameters[] = { fsparam_flag("cdp", Opt_cdp), fsparam_flag("cdpl2", Opt_cdpl2), fsparam_flag("mba_MBps", Opt_mba_mbps), {} }; static int rdt_parse_param(struct fs_context *fc, struct fs_parameter *param) { struct rdt_fs_context *ctx = rdt_fc2context(fc); struct fs_parse_result result; int opt; opt = fs_parse(fc, rdt_fs_parameters, param, &result); if (opt < 0) return opt; switch (opt) { case Opt_cdp: ctx->enable_cdpl3 = true; return 0; case Opt_cdpl2: ctx->enable_cdpl2 = true; return 0; case Opt_mba_mbps: if (!supports_mba_mbps()) return -EINVAL; ctx->enable_mba_mbps = true; return 0; } return -EINVAL; } static void rdt_fs_context_free(struct fs_context *fc) { struct rdt_fs_context *ctx = rdt_fc2context(fc); kernfs_free_fs_context(fc); kfree(ctx); } static const struct fs_context_operations rdt_fs_context_ops = { .free = rdt_fs_context_free, .parse_param = rdt_parse_param, .get_tree = rdt_get_tree, }; static int rdt_init_fs_context(struct fs_context *fc) { struct rdt_fs_context *ctx; ctx = kzalloc(sizeof(struct rdt_fs_context), GFP_KERNEL); if (!ctx) return -ENOMEM; ctx->kfc.root = rdt_root; ctx->kfc.magic = RDTGROUP_SUPER_MAGIC; fc->fs_private = &ctx->kfc; fc->ops = &rdt_fs_context_ops; put_user_ns(fc->user_ns); fc->user_ns = get_user_ns(&init_user_ns); fc->global = true; return 0; } static int reset_all_ctrls(struct rdt_resource *r) { struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r); struct rdt_hw_domain *hw_dom; struct msr_param msr_param; cpumask_var_t cpu_mask; struct rdt_domain *d; int i; if (!zalloc_cpumask_var(&cpu_mask, GFP_KERNEL)) return -ENOMEM; msr_param.res = r; msr_param.low = 0; msr_param.high = hw_res->num_closid; /* * Disable resource control for this resource by setting all * CBMs in all domains to the maximum mask value. Pick one CPU * from each domain to update the MSRs below. */ list_for_each_entry(d, &r->domains, list) { hw_dom = resctrl_to_arch_dom(d); cpumask_set_cpu(cpumask_any(&d->cpu_mask), cpu_mask); for (i = 0; i < hw_res->num_closid; i++) hw_dom->ctrl_val[i] = r->default_ctrl; } /* Update CBM on all the CPUs in cpu_mask */ on_each_cpu_mask(cpu_mask, rdt_ctrl_update, &msr_param, 1); free_cpumask_var(cpu_mask); return 0; } /* * Move tasks from one to the other group. If @from is NULL, then all tasks * in the systems are moved unconditionally (used for teardown). * * If @mask is not NULL the cpus on which moved tasks are running are set * in that mask so the update smp function call is restricted to affected * cpus. */ static void rdt_move_group_tasks(struct rdtgroup *from, struct rdtgroup *to, struct cpumask *mask) { struct task_struct *p, *t; read_lock(&tasklist_lock); for_each_process_thread(p, t) { if (!from || is_closid_match(t, from) || is_rmid_match(t, from)) { WRITE_ONCE(t->closid, to->closid); WRITE_ONCE(t->rmid, to->mon.rmid); /* * Order the closid/rmid stores above before the loads * in task_curr(). This pairs with the full barrier * between the rq->curr update and resctrl_sched_in() * during context switch. */ smp_mb(); /* * If the task is on a CPU, set the CPU in the mask. * The detection is inaccurate as tasks might move or * schedule before the smp function call takes place. * In such a case the function call is pointless, but * there is no other side effect. */ if (IS_ENABLED(CONFIG_SMP) && mask && task_curr(t)) cpumask_set_cpu(task_cpu(t), mask); } } read_unlock(&tasklist_lock); } static void free_all_child_rdtgrp(struct rdtgroup *rdtgrp) { struct rdtgroup *sentry, *stmp; struct list_head *head; head = &rdtgrp->mon.crdtgrp_list; list_for_each_entry_safe(sentry, stmp, head, mon.crdtgrp_list) { free_rmid(sentry->mon.rmid); list_del(&sentry->mon.crdtgrp_list); if (atomic_read(&sentry->waitcount) != 0) sentry->flags = RDT_DELETED; else rdtgroup_remove(sentry); } } /* * Forcibly remove all of subdirectories under root. */ static void rmdir_all_sub(void) { struct rdtgroup *rdtgrp, *tmp; /* Move all tasks to the default resource group */ rdt_move_group_tasks(NULL, &rdtgroup_default, NULL); list_for_each_entry_safe(rdtgrp, tmp, &rdt_all_groups, rdtgroup_list) { /* Free any child rmids */ free_all_child_rdtgrp(rdtgrp); /* Remove each rdtgroup other than root */ if (rdtgrp == &rdtgroup_default) continue; if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP || rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) rdtgroup_pseudo_lock_remove(rdtgrp); /* * Give any CPUs back to the default group. We cannot copy * cpu_online_mask because a CPU might have executed the * offline callback already, but is still marked online. */ cpumask_or(&rdtgroup_default.cpu_mask, &rdtgroup_default.cpu_mask, &rdtgrp->cpu_mask); free_rmid(rdtgrp->mon.rmid); kernfs_remove(rdtgrp->kn); list_del(&rdtgrp->rdtgroup_list); if (atomic_read(&rdtgrp->waitcount) != 0) rdtgrp->flags = RDT_DELETED; else rdtgroup_remove(rdtgrp); } /* Notify online CPUs to update per cpu storage and PQR_ASSOC MSR */ update_closid_rmid(cpu_online_mask, &rdtgroup_default); kernfs_remove(kn_info); kernfs_remove(kn_mongrp); kernfs_remove(kn_mondata); } static void rdt_kill_sb(struct super_block *sb) { struct rdt_resource *r; cpus_read_lock(); mutex_lock(&rdtgroup_mutex); set_mba_sc(false); /*Put everything back to default values. */ for_each_alloc_capable_rdt_resource(r) reset_all_ctrls(r); cdp_disable_all(); rmdir_all_sub(); rdt_pseudo_lock_release(); rdtgroup_default.mode = RDT_MODE_SHAREABLE; schemata_list_destroy(); static_branch_disable_cpuslocked(&rdt_alloc_enable_key); static_branch_disable_cpuslocked(&rdt_mon_enable_key); static_branch_disable_cpuslocked(&rdt_enable_key); kernfs_kill_sb(sb); mutex_unlock(&rdtgroup_mutex); cpus_read_unlock(); } static struct file_system_type rdt_fs_type = { .name = "resctrl", .init_fs_context = rdt_init_fs_context, .parameters = rdt_fs_parameters, .kill_sb = rdt_kill_sb, }; static int mon_addfile(struct kernfs_node *parent_kn, const char *name, void *priv) { struct kernfs_node *kn; int ret = 0; kn = __kernfs_create_file(parent_kn, name, 0444, GLOBAL_ROOT_UID, GLOBAL_ROOT_GID, 0, &kf_mondata_ops, priv, NULL, NULL); if (IS_ERR(kn)) return PTR_ERR(kn); ret = rdtgroup_kn_set_ugid(kn); if (ret) { kernfs_remove(kn); return ret; } return ret; } /* * Remove all subdirectories of mon_data of ctrl_mon groups * and monitor groups with given domain id. */ static void rmdir_mondata_subdir_allrdtgrp(struct rdt_resource *r, unsigned int dom_id) { struct rdtgroup *prgrp, *crgrp; char name[32]; list_for_each_entry(prgrp, &rdt_all_groups, rdtgroup_list) { sprintf(name, "mon_%s_%02d", r->name, dom_id); kernfs_remove_by_name(prgrp->mon.mon_data_kn, name); list_for_each_entry(crgrp, &prgrp->mon.crdtgrp_list, mon.crdtgrp_list) kernfs_remove_by_name(crgrp->mon.mon_data_kn, name); } } static int mkdir_mondata_subdir(struct kernfs_node *parent_kn, struct rdt_domain *d, struct rdt_resource *r, struct rdtgroup *prgrp) { union mon_data_bits priv; struct kernfs_node *kn; struct mon_evt *mevt; struct rmid_read rr; char name[32]; int ret; sprintf(name, "mon_%s_%02d", r->name, d->id); /* create the directory */ kn = kernfs_create_dir(parent_kn, name, parent_kn->mode, prgrp); if (IS_ERR(kn)) return PTR_ERR(kn); ret = rdtgroup_kn_set_ugid(kn); if (ret) goto out_destroy; if (WARN_ON(list_empty(&r->evt_list))) { ret = -EPERM; goto out_destroy; } priv.u.rid = r->rid; priv.u.domid = d->id; list_for_each_entry(mevt, &r->evt_list, list) { priv.u.evtid = mevt->evtid; ret = mon_addfile(kn, mevt->name, priv.priv); if (ret) goto out_destroy; if (is_mbm_event(mevt->evtid)) mon_event_read(&rr, r, d, prgrp, mevt->evtid, true); } kernfs_activate(kn); return 0; out_destroy: kernfs_remove(kn); return ret; } /* * Add all subdirectories of mon_data for "ctrl_mon" groups * and "monitor" groups with given domain id. */ static void mkdir_mondata_subdir_allrdtgrp(struct rdt_resource *r, struct rdt_domain *d) { struct kernfs_node *parent_kn; struct rdtgroup *prgrp, *crgrp; struct list_head *head; list_for_each_entry(prgrp, &rdt_all_groups, rdtgroup_list) { parent_kn = prgrp->mon.mon_data_kn; mkdir_mondata_subdir(parent_kn, d, r, prgrp); head = &prgrp->mon.crdtgrp_list; list_for_each_entry(crgrp, head, mon.crdtgrp_list) { parent_kn = crgrp->mon.mon_data_kn; mkdir_mondata_subdir(parent_kn, d, r, crgrp); } } } static int mkdir_mondata_subdir_alldom(struct kernfs_node *parent_kn, struct rdt_resource *r, struct rdtgroup *prgrp) { struct rdt_domain *dom; int ret; list_for_each_entry(dom, &r->domains, list) { ret = mkdir_mondata_subdir(parent_kn, dom, r, prgrp); if (ret) return ret; } return 0; } /* * This creates a directory mon_data which contains the monitored data. * * mon_data has one directory for each domain which are named * in the format mon_<domain_name>_<domain_id>. For ex: A mon_data * with L3 domain looks as below: * ./mon_data: * mon_L3_00 * mon_L3_01 * mon_L3_02 * ... * * Each domain directory has one file per event: * ./mon_L3_00/: * llc_occupancy * */ static int mkdir_mondata_all(struct kernfs_node *parent_kn, struct rdtgroup *prgrp, struct kernfs_node **dest_kn) { struct rdt_resource *r; struct kernfs_node *kn; int ret; /* * Create the mon_data directory first. */ ret = mongroup_create_dir(parent_kn, prgrp, "mon_data", &kn); if (ret) return ret; if (dest_kn) *dest_kn = kn; /* * Create the subdirectories for each domain. Note that all events * in a domain like L3 are grouped into a resource whose domain is L3 */ for_each_mon_capable_rdt_resource(r) { ret = mkdir_mondata_subdir_alldom(kn, r, prgrp); if (ret) goto out_destroy; } return 0; out_destroy: kernfs_remove(kn); return ret; } /** * cbm_ensure_valid - Enforce validity on provided CBM * @_val: Candidate CBM * @r: RDT resource to which the CBM belongs * * The provided CBM represents all cache portions available for use. This * may be represented by a bitmap that does not consist of contiguous ones * and thus be an invalid CBM. * Here the provided CBM is forced to be a valid CBM by only considering * the first set of contiguous bits as valid and clearing all bits. * The intention here is to provide a valid default CBM with which a new * resource group is initialized. The user can follow this with a * modification to the CBM if the default does not satisfy the * requirements. */ static u32 cbm_ensure_valid(u32 _val, struct rdt_resource *r) { unsigned int cbm_len = r->cache.cbm_len; unsigned long first_bit, zero_bit; unsigned long val = _val; if (!val) return 0; first_bit = find_first_bit(&val, cbm_len); zero_bit = find_next_zero_bit(&val, cbm_len, first_bit); /* Clear any remaining bits to ensure contiguous region */ bitmap_clear(&val, zero_bit, cbm_len - zero_bit); return (u32)val; } /* * Initialize cache resources per RDT domain * * Set the RDT domain up to start off with all usable allocations. That is, * all shareable and unused bits. All-zero CBM is invalid. */ static int __init_one_rdt_domain(struct rdt_domain *d, struct resctrl_schema *s, u32 closid) { enum resctrl_conf_type peer_type = resctrl_peer_type(s->conf_type); enum resctrl_conf_type t = s->conf_type; struct resctrl_staged_config *cfg; struct rdt_resource *r = s->res; u32 used_b = 0, unused_b = 0; unsigned long tmp_cbm; enum rdtgrp_mode mode; u32 peer_ctl, ctrl_val; int i; cfg = &d->staged_config[t]; cfg->have_new_ctrl = false; cfg->new_ctrl = r->cache.shareable_bits; used_b = r->cache.shareable_bits; for (i = 0; i < closids_supported(); i++) { if (closid_allocated(i) && i != closid) { mode = rdtgroup_mode_by_closid(i); if (mode == RDT_MODE_PSEUDO_LOCKSETUP) /* * ctrl values for locksetup aren't relevant * until the schemata is written, and the mode * becomes RDT_MODE_PSEUDO_LOCKED. */ continue; /* * If CDP is active include peer domain's * usage to ensure there is no overlap * with an exclusive group. */ if (resctrl_arch_get_cdp_enabled(r->rid)) peer_ctl = resctrl_arch_get_config(r, d, i, peer_type); else peer_ctl = 0; ctrl_val = resctrl_arch_get_config(r, d, i, s->conf_type); used_b |= ctrl_val | peer_ctl; if (mode == RDT_MODE_SHAREABLE) cfg->new_ctrl |= ctrl_val | peer_ctl; } } if (d->plr && d->plr->cbm > 0) used_b |= d->plr->cbm; unused_b = used_b ^ (BIT_MASK(r->cache.cbm_len) - 1); unused_b &= BIT_MASK(r->cache.cbm_len) - 1; cfg->new_ctrl |= unused_b; /* * Force the initial CBM to be valid, user can * modify the CBM based on system availability. */ cfg->new_ctrl = cbm_ensure_valid(cfg->new_ctrl, r); /* * Assign the u32 CBM to an unsigned long to ensure that * bitmap_weight() does not access out-of-bound memory. */ tmp_cbm = cfg->new_ctrl; if (bitmap_weight(&tmp_cbm, r->cache.cbm_len) < r->cache.min_cbm_bits) { rdt_last_cmd_printf("No space on %s:%d\n", s->name, d->id); return -ENOSPC; } cfg->have_new_ctrl = true; return 0; } /* * Initialize cache resources with default values. * * A new RDT group is being created on an allocation capable (CAT) * supporting system. Set this group up to start off with all usable * allocations. * * If there are no more shareable bits available on any domain then * the entire allocation will fail. */ static int rdtgroup_init_cat(struct resctrl_schema *s, u32 closid) { struct rdt_domain *d; int ret; list_for_each_entry(d, &s->res->domains, list) { ret = __init_one_rdt_domain(d, s, closid); if (ret < 0) return ret; } return 0; } /* Initialize MBA resource with default values. */ static void rdtgroup_init_mba(struct rdt_resource *r, u32 closid) { struct resctrl_staged_config *cfg; struct rdt_domain *d; list_for_each_entry(d, &r->domains, list) { if (is_mba_sc(r)) { d->mbps_val[closid] = MBA_MAX_MBPS; continue; } cfg = &d->staged_config[CDP_NONE]; cfg->new_ctrl = r->default_ctrl; cfg->have_new_ctrl = true; } } /* Initialize the RDT group's allocations. */ static int rdtgroup_init_alloc(struct rdtgroup *rdtgrp) { struct resctrl_schema *s; struct rdt_resource *r; int ret = 0; rdt_staged_configs_clear(); list_for_each_entry(s, &resctrl_schema_all, list) { r = s->res; if (r->rid == RDT_RESOURCE_MBA || r->rid == RDT_RESOURCE_SMBA) { rdtgroup_init_mba(r, rdtgrp->closid); if (is_mba_sc(r)) continue; } else { ret = rdtgroup_init_cat(s, rdtgrp->closid); if (ret < 0) goto out; } ret = resctrl_arch_update_domains(r, rdtgrp->closid); if (ret < 0) { rdt_last_cmd_puts("Failed to initialize allocations\n"); goto out; } } rdtgrp->mode = RDT_MODE_SHAREABLE; out: rdt_staged_configs_clear(); return ret; } static int mkdir_rdt_prepare(struct kernfs_node *parent_kn, const char *name, umode_t mode, enum rdt_group_type rtype, struct rdtgroup **r) { struct rdtgroup *prdtgrp, *rdtgrp; struct kernfs_node *kn; uint files = 0; int ret; prdtgrp = rdtgroup_kn_lock_live(parent_kn); if (!prdtgrp) { ret = -ENODEV; goto out_unlock; } if (rtype == RDTMON_GROUP && (prdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP || prdtgrp->mode == RDT_MODE_PSEUDO_LOCKED)) { ret = -EINVAL; rdt_last_cmd_puts("Pseudo-locking in progress\n"); goto out_unlock; } /* allocate the rdtgroup. */ rdtgrp = kzalloc(sizeof(*rdtgrp), GFP_KERNEL); if (!rdtgrp) { ret = -ENOSPC; rdt_last_cmd_puts("Kernel out of memory\n"); goto out_unlock; } *r = rdtgrp; rdtgrp->mon.parent = prdtgrp; rdtgrp->type = rtype; INIT_LIST_HEAD(&rdtgrp->mon.crdtgrp_list); /* kernfs creates the directory for rdtgrp */ kn = kernfs_create_dir(parent_kn, name, mode, rdtgrp); if (IS_ERR(kn)) { ret = PTR_ERR(kn); rdt_last_cmd_puts("kernfs create error\n"); goto out_free_rgrp; } rdtgrp->kn = kn; /* * kernfs_remove() will drop the reference count on "kn" which * will free it. But we still need it to stick around for the * rdtgroup_kn_unlock(kn) call. Take one extra reference here, * which will be dropped by kernfs_put() in rdtgroup_remove(). */ kernfs_get(kn); ret = rdtgroup_kn_set_ugid(kn); if (ret) { rdt_last_cmd_puts("kernfs perm error\n"); goto out_destroy; } files = RFTYPE_BASE | BIT(RF_CTRLSHIFT + rtype); ret = rdtgroup_add_files(kn, files); if (ret) { rdt_last_cmd_puts("kernfs fill error\n"); goto out_destroy; } if (rdt_mon_capable) { ret = alloc_rmid(); if (ret < 0) { rdt_last_cmd_puts("Out of RMIDs\n"); goto out_destroy; } rdtgrp->mon.rmid = ret; ret = mkdir_mondata_all(kn, rdtgrp, &rdtgrp->mon.mon_data_kn); if (ret) { rdt_last_cmd_puts("kernfs subdir error\n"); goto out_idfree; } } kernfs_activate(kn); /* * The caller unlocks the parent_kn upon success. */ return 0; out_idfree: free_rmid(rdtgrp->mon.rmid); out_destroy: kernfs_put(rdtgrp->kn); kernfs_remove(rdtgrp->kn); out_free_rgrp: kfree(rdtgrp); out_unlock: rdtgroup_kn_unlock(parent_kn); return ret; } static void mkdir_rdt_prepare_clean(struct rdtgroup *rgrp) { kernfs_remove(rgrp->kn); free_rmid(rgrp->mon.rmid); rdtgroup_remove(rgrp); } /* * Create a monitor group under "mon_groups" directory of a control * and monitor group(ctrl_mon). This is a resource group * to monitor a subset of tasks and cpus in its parent ctrl_mon group. */ static int rdtgroup_mkdir_mon(struct kernfs_node *parent_kn, const char *name, umode_t mode) { struct rdtgroup *rdtgrp, *prgrp; int ret; ret = mkdir_rdt_prepare(parent_kn, name, mode, RDTMON_GROUP, &rdtgrp); if (ret) return ret; prgrp = rdtgrp->mon.parent; rdtgrp->closid = prgrp->closid; /* * Add the rdtgrp to the list of rdtgrps the parent * ctrl_mon group has to track. */ list_add_tail(&rdtgrp->mon.crdtgrp_list, &prgrp->mon.crdtgrp_list); rdtgroup_kn_unlock(parent_kn); return ret; } /* * These are rdtgroups created under the root directory. Can be used * to allocate and monitor resources. */ static int rdtgroup_mkdir_ctrl_mon(struct kernfs_node *parent_kn, const char *name, umode_t mode) { struct rdtgroup *rdtgrp; struct kernfs_node *kn; u32 closid; int ret; ret = mkdir_rdt_prepare(parent_kn, name, mode, RDTCTRL_GROUP, &rdtgrp); if (ret) return ret; kn = rdtgrp->kn; ret = closid_alloc(); if (ret < 0) { rdt_last_cmd_puts("Out of CLOSIDs\n"); goto out_common_fail; } closid = ret; ret = 0; rdtgrp->closid = closid; ret = rdtgroup_init_alloc(rdtgrp); if (ret < 0) goto out_id_free; list_add(&rdtgrp->rdtgroup_list, &rdt_all_groups); if (rdt_mon_capable) { /* * Create an empty mon_groups directory to hold the subset * of tasks and cpus to monitor. */ ret = mongroup_create_dir(kn, rdtgrp, "mon_groups", NULL); if (ret) { rdt_last_cmd_puts("kernfs subdir error\n"); goto out_del_list; } } goto out_unlock; out_del_list: list_del(&rdtgrp->rdtgroup_list); out_id_free: closid_free(closid); out_common_fail: mkdir_rdt_prepare_clean(rdtgrp); out_unlock: rdtgroup_kn_unlock(parent_kn); return ret; } /* * We allow creating mon groups only with in a directory called "mon_groups" * which is present in every ctrl_mon group. Check if this is a valid * "mon_groups" directory. * * 1. The directory should be named "mon_groups". * 2. The mon group itself should "not" be named "mon_groups". * This makes sure "mon_groups" directory always has a ctrl_mon group * as parent. */ static bool is_mon_groups(struct kernfs_node *kn, const char *name) { return (!strcmp(kn->name, "mon_groups") && strcmp(name, "mon_groups")); } static int rdtgroup_mkdir(struct kernfs_node *parent_kn, const char *name, umode_t mode) { /* Do not accept '\n' to avoid unparsable situation. */ if (strchr(name, '\n')) return -EINVAL; /* * If the parent directory is the root directory and RDT * allocation is supported, add a control and monitoring * subdirectory */ if (rdt_alloc_capable && parent_kn == rdtgroup_default.kn) return rdtgroup_mkdir_ctrl_mon(parent_kn, name, mode); /* * If RDT monitoring is supported and the parent directory is a valid * "mon_groups" directory, add a monitoring subdirectory. */ if (rdt_mon_capable && is_mon_groups(parent_kn, name)) return rdtgroup_mkdir_mon(parent_kn, name, mode); return -EPERM; } static int rdtgroup_rmdir_mon(struct rdtgroup *rdtgrp, cpumask_var_t tmpmask) { struct rdtgroup *prdtgrp = rdtgrp->mon.parent; int cpu; /* Give any tasks back to the parent group */ rdt_move_group_tasks(rdtgrp, prdtgrp, tmpmask); /* Update per cpu rmid of the moved CPUs first */ for_each_cpu(cpu, &rdtgrp->cpu_mask) per_cpu(pqr_state.default_rmid, cpu) = prdtgrp->mon.rmid; /* * Update the MSR on moved CPUs and CPUs which have moved * task running on them. */ cpumask_or(tmpmask, tmpmask, &rdtgrp->cpu_mask); update_closid_rmid(tmpmask, NULL); rdtgrp->flags = RDT_DELETED; free_rmid(rdtgrp->mon.rmid); /* * Remove the rdtgrp from the parent ctrl_mon group's list */ WARN_ON(list_empty(&prdtgrp->mon.crdtgrp_list)); list_del(&rdtgrp->mon.crdtgrp_list); kernfs_remove(rdtgrp->kn); return 0; } static int rdtgroup_ctrl_remove(struct rdtgroup *rdtgrp) { rdtgrp->flags = RDT_DELETED; list_del(&rdtgrp->rdtgroup_list); kernfs_remove(rdtgrp->kn); return 0; } static int rdtgroup_rmdir_ctrl(struct rdtgroup *rdtgrp, cpumask_var_t tmpmask) { int cpu; /* Give any tasks back to the default group */ rdt_move_group_tasks(rdtgrp, &rdtgroup_default, tmpmask); /* Give any CPUs back to the default group */ cpumask_or(&rdtgroup_default.cpu_mask, &rdtgroup_default.cpu_mask, &rdtgrp->cpu_mask); /* Update per cpu closid and rmid of the moved CPUs first */ for_each_cpu(cpu, &rdtgrp->cpu_mask) { per_cpu(pqr_state.default_closid, cpu) = rdtgroup_default.closid; per_cpu(pqr_state.default_rmid, cpu) = rdtgroup_default.mon.rmid; } /* * Update the MSR on moved CPUs and CPUs which have moved * task running on them. */ cpumask_or(tmpmask, tmpmask, &rdtgrp->cpu_mask); update_closid_rmid(tmpmask, NULL); closid_free(rdtgrp->closid); free_rmid(rdtgrp->mon.rmid); rdtgroup_ctrl_remove(rdtgrp); /* * Free all the child monitor group rmids. */ free_all_child_rdtgrp(rdtgrp); return 0; } static int rdtgroup_rmdir(struct kernfs_node *kn) { struct kernfs_node *parent_kn = kn->parent; struct rdtgroup *rdtgrp; cpumask_var_t tmpmask; int ret = 0; if (!zalloc_cpumask_var(&tmpmask, GFP_KERNEL)) return -ENOMEM; rdtgrp = rdtgroup_kn_lock_live(kn); if (!rdtgrp) { ret = -EPERM; goto out; } /* * If the rdtgroup is a ctrl_mon group and parent directory * is the root directory, remove the ctrl_mon group. * * If the rdtgroup is a mon group and parent directory * is a valid "mon_groups" directory, remove the mon group. */ if (rdtgrp->type == RDTCTRL_GROUP && parent_kn == rdtgroup_default.kn && rdtgrp != &rdtgroup_default) { if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP || rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) { ret = rdtgroup_ctrl_remove(rdtgrp); } else { ret = rdtgroup_rmdir_ctrl(rdtgrp, tmpmask); } } else if (rdtgrp->type == RDTMON_GROUP && is_mon_groups(parent_kn, kn->name)) { ret = rdtgroup_rmdir_mon(rdtgrp, tmpmask); } else { ret = -EPERM; } out: rdtgroup_kn_unlock(kn); free_cpumask_var(tmpmask); return ret; } /** * mongrp_reparent() - replace parent CTRL_MON group of a MON group * @rdtgrp: the MON group whose parent should be replaced * @new_prdtgrp: replacement parent CTRL_MON group for @rdtgrp * @cpus: cpumask provided by the caller for use during this call * * Replaces the parent CTRL_MON group for a MON group, resulting in all member * tasks' CLOSID immediately changing to that of the new parent group. * Monitoring data for the group is unaffected by this operation. */ static void mongrp_reparent(struct rdtgroup *rdtgrp, struct rdtgroup *new_prdtgrp, cpumask_var_t cpus) { struct rdtgroup *prdtgrp = rdtgrp->mon.parent; WARN_ON(rdtgrp->type != RDTMON_GROUP); WARN_ON(new_prdtgrp->type != RDTCTRL_GROUP); /* Nothing to do when simply renaming a MON group. */ if (prdtgrp == new_prdtgrp) return; WARN_ON(list_empty(&prdtgrp->mon.crdtgrp_list)); list_move_tail(&rdtgrp->mon.crdtgrp_list, &new_prdtgrp->mon.crdtgrp_list); rdtgrp->mon.parent = new_prdtgrp; rdtgrp->closid = new_prdtgrp->closid; /* Propagate updated closid to all tasks in this group. */ rdt_move_group_tasks(rdtgrp, rdtgrp, cpus); update_closid_rmid(cpus, NULL); } static int rdtgroup_rename(struct kernfs_node *kn, struct kernfs_node *new_parent, const char *new_name) { struct rdtgroup *new_prdtgrp; struct rdtgroup *rdtgrp; cpumask_var_t tmpmask; int ret; rdtgrp = kernfs_to_rdtgroup(kn); new_prdtgrp = kernfs_to_rdtgroup(new_parent); if (!rdtgrp || !new_prdtgrp) return -ENOENT; /* Release both kernfs active_refs before obtaining rdtgroup mutex. */ rdtgroup_kn_get(rdtgrp, kn); rdtgroup_kn_get(new_prdtgrp, new_parent); mutex_lock(&rdtgroup_mutex); rdt_last_cmd_clear(); /* * Don't allow kernfs_to_rdtgroup() to return a parent rdtgroup if * either kernfs_node is a file. */ if (kernfs_type(kn) != KERNFS_DIR || kernfs_type(new_parent) != KERNFS_DIR) { rdt_last_cmd_puts("Source and destination must be directories"); ret = -EPERM; goto out; } if ((rdtgrp->flags & RDT_DELETED) || (new_prdtgrp->flags & RDT_DELETED)) { ret = -ENOENT; goto out; } if (rdtgrp->type != RDTMON_GROUP || !kn->parent || !is_mon_groups(kn->parent, kn->name)) { rdt_last_cmd_puts("Source must be a MON group\n"); ret = -EPERM; goto out; } if (!is_mon_groups(new_parent, new_name)) { rdt_last_cmd_puts("Destination must be a mon_groups subdirectory\n"); ret = -EPERM; goto out; } /* * If the MON group is monitoring CPUs, the CPUs must be assigned to the * current parent CTRL_MON group and therefore cannot be assigned to * the new parent, making the move illegal. */ if (!cpumask_empty(&rdtgrp->cpu_mask) && rdtgrp->mon.parent != new_prdtgrp) { rdt_last_cmd_puts("Cannot move a MON group that monitors CPUs\n"); ret = -EPERM; goto out; } /* * Allocate the cpumask for use in mongrp_reparent() to avoid the * possibility of failing to allocate it after kernfs_rename() has * succeeded. */ if (!zalloc_cpumask_var(&tmpmask, GFP_KERNEL)) { ret = -ENOMEM; goto out; } /* * Perform all input validation and allocations needed to ensure * mongrp_reparent() will succeed before calling kernfs_rename(), * otherwise it would be necessary to revert this call if * mongrp_reparent() failed. */ ret = kernfs_rename(kn, new_parent, new_name); if (!ret) mongrp_reparent(rdtgrp, new_prdtgrp, tmpmask); free_cpumask_var(tmpmask); out: mutex_unlock(&rdtgroup_mutex); rdtgroup_kn_put(rdtgrp, kn); rdtgroup_kn_put(new_prdtgrp, new_parent); return ret; } static int rdtgroup_show_options(struct seq_file *seq, struct kernfs_root *kf) { if (resctrl_arch_get_cdp_enabled(RDT_RESOURCE_L3)) seq_puts(seq, ",cdp"); if (resctrl_arch_get_cdp_enabled(RDT_RESOURCE_L2)) seq_puts(seq, ",cdpl2"); if (is_mba_sc(&rdt_resources_all[RDT_RESOURCE_MBA].r_resctrl)) seq_puts(seq, ",mba_MBps"); return 0; } static struct kernfs_syscall_ops rdtgroup_kf_syscall_ops = { .mkdir = rdtgroup_mkdir, .rmdir = rdtgroup_rmdir, .rename = rdtgroup_rename, .show_options = rdtgroup_show_options, }; static int __init rdtgroup_setup_root(void) { int ret; rdt_root = kernfs_create_root(&rdtgroup_kf_syscall_ops, KERNFS_ROOT_CREATE_DEACTIVATED | KERNFS_ROOT_EXTRA_OPEN_PERM_CHECK, &rdtgroup_default); if (IS_ERR(rdt_root)) return PTR_ERR(rdt_root); mutex_lock(&rdtgroup_mutex); rdtgroup_default.closid = 0; rdtgroup_default.mon.rmid = 0; rdtgroup_default.type = RDTCTRL_GROUP; INIT_LIST_HEAD(&rdtgroup_default.mon.crdtgrp_list); list_add(&rdtgroup_default.rdtgroup_list, &rdt_all_groups); ret = rdtgroup_add_files(kernfs_root_to_node(rdt_root), RF_CTRL_BASE); if (ret) { kernfs_destroy_root(rdt_root); goto out; } rdtgroup_default.kn = kernfs_root_to_node(rdt_root); kernfs_activate(rdtgroup_default.kn); out: mutex_unlock(&rdtgroup_mutex); return ret; } static void domain_destroy_mon_state(struct rdt_domain *d) { bitmap_free(d->rmid_busy_llc); kfree(d->mbm_total); kfree(d->mbm_local); } void resctrl_offline_domain(struct rdt_resource *r, struct rdt_domain *d) { lockdep_assert_held(&rdtgroup_mutex); if (supports_mba_mbps() && r->rid == RDT_RESOURCE_MBA) mba_sc_domain_destroy(r, d); if (!r->mon_capable) return; /* * If resctrl is mounted, remove all the * per domain monitor data directories. */ if (static_branch_unlikely(&rdt_mon_enable_key)) rmdir_mondata_subdir_allrdtgrp(r, d->id); if (is_mbm_enabled()) cancel_delayed_work(&d->mbm_over); if (is_llc_occupancy_enabled() && has_busy_rmid(r, d)) { /* * When a package is going down, forcefully * decrement rmid->ebusy. There is no way to know * that the L3 was flushed and hence may lead to * incorrect counts in rare scenarios, but leaving * the RMID as busy creates RMID leaks if the * package never comes back. */ __check_limbo(d, true); cancel_delayed_work(&d->cqm_limbo); } domain_destroy_mon_state(d); } static int domain_setup_mon_state(struct rdt_resource *r, struct rdt_domain *d) { size_t tsize; if (is_llc_occupancy_enabled()) { d->rmid_busy_llc = bitmap_zalloc(r->num_rmid, GFP_KERNEL); if (!d->rmid_busy_llc) return -ENOMEM; } if (is_mbm_total_enabled()) { tsize = sizeof(*d->mbm_total); d->mbm_total = kcalloc(r->num_rmid, tsize, GFP_KERNEL); if (!d->mbm_total) { bitmap_free(d->rmid_busy_llc); return -ENOMEM; } } if (is_mbm_local_enabled()) { tsize = sizeof(*d->mbm_local); d->mbm_local = kcalloc(r->num_rmid, tsize, GFP_KERNEL); if (!d->mbm_local) { bitmap_free(d->rmid_busy_llc); kfree(d->mbm_total); return -ENOMEM; } } return 0; } int resctrl_online_domain(struct rdt_resource *r, struct rdt_domain *d) { int err; lockdep_assert_held(&rdtgroup_mutex); if (supports_mba_mbps() && r->rid == RDT_RESOURCE_MBA) /* RDT_RESOURCE_MBA is never mon_capable */ return mba_sc_domain_allocate(r, d); if (!r->mon_capable) return 0; err = domain_setup_mon_state(r, d); if (err) return err; if (is_mbm_enabled()) { INIT_DELAYED_WORK(&d->mbm_over, mbm_handle_overflow); mbm_setup_overflow_handler(d, MBM_OVERFLOW_INTERVAL); } if (is_llc_occupancy_enabled()) INIT_DELAYED_WORK(&d->cqm_limbo, cqm_handle_limbo); /* If resctrl is mounted, add per domain monitor data directories. */ if (static_branch_unlikely(&rdt_mon_enable_key)) mkdir_mondata_subdir_allrdtgrp(r, d); return 0; } /* * rdtgroup_init - rdtgroup initialization * * Setup resctrl file system including set up root, create mount point, * register rdtgroup filesystem, and initialize files under root directory. * * Return: 0 on success or -errno */ int __init rdtgroup_init(void) { int ret = 0; seq_buf_init(&last_cmd_status, last_cmd_status_buf, sizeof(last_cmd_status_buf)); ret = rdtgroup_setup_root(); if (ret) return ret; ret = sysfs_create_mount_point(fs_kobj, "resctrl"); if (ret) goto cleanup_root; ret = register_filesystem(&rdt_fs_type); if (ret) goto cleanup_mountpoint; /* * Adding the resctrl debugfs directory here may not be ideal since * it would let the resctrl debugfs directory appear on the debugfs * filesystem before the resctrl filesystem is mounted. * It may also be ok since that would enable debugging of RDT before * resctrl is mounted. * The reason why the debugfs directory is created here and not in * rdt_get_tree() is because rdt_get_tree() takes rdtgroup_mutex and * during the debugfs directory creation also &sb->s_type->i_mutex_key * (the lockdep class of inode->i_rwsem). Other filesystem * interactions (eg. SyS_getdents) have the lock ordering: * &sb->s_type->i_mutex_key --> &mm->mmap_lock * During mmap(), called with &mm->mmap_lock, the rdtgroup_mutex * is taken, thus creating dependency: * &mm->mmap_lock --> rdtgroup_mutex for the latter that can cause * issues considering the other two lock dependencies. * By creating the debugfs directory here we avoid a dependency * that may cause deadlock (even though file operations cannot * occur until the filesystem is mounted, but I do not know how to * tell lockdep that). */ debugfs_resctrl = debugfs_create_dir("resctrl", NULL); return 0; cleanup_mountpoint: sysfs_remove_mount_point(fs_kobj, "resctrl"); cleanup_root: kernfs_destroy_root(rdt_root); return ret; } void __exit rdtgroup_exit(void) { debugfs_remove_recursive(debugfs_resctrl); unregister_filesystem(&rdt_fs_type); sysfs_remove_mount_point(fs_kobj, "resctrl"); kernfs_destroy_root(rdt_root); }
linux-master
arch/x86/kernel/cpu/resctrl/rdtgroup.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Dynamic Ftrace based Kprobes Optimization * * Copyright (C) Hitachi Ltd., 2012 */ #include <linux/kprobes.h> #include <linux/ptrace.h> #include <linux/hardirq.h> #include <linux/preempt.h> #include <linux/ftrace.h> #include "common.h" /* Ftrace callback handler for kprobes -- called under preempt disabled */ void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip, struct ftrace_ops *ops, struct ftrace_regs *fregs) { struct pt_regs *regs = ftrace_get_regs(fregs); struct kprobe *p; struct kprobe_ctlblk *kcb; int bit; bit = ftrace_test_recursion_trylock(ip, parent_ip); if (bit < 0) return; p = get_kprobe((kprobe_opcode_t *)ip); if (unlikely(!p) || kprobe_disabled(p)) goto out; kcb = get_kprobe_ctlblk(); if (kprobe_running()) { kprobes_inc_nmissed_count(p); } else { unsigned long orig_ip = regs->ip; /* Kprobe handler expects regs->ip = ip + 1 as breakpoint hit */ regs->ip = ip + sizeof(kprobe_opcode_t); __this_cpu_write(current_kprobe, p); kcb->kprobe_status = KPROBE_HIT_ACTIVE; if (!p->pre_handler || !p->pre_handler(p, regs)) { /* * Emulate singlestep (and also recover regs->ip) * as if there is a 5byte nop */ regs->ip = (unsigned long)p->addr + MCOUNT_INSN_SIZE; if (unlikely(p->post_handler)) { kcb->kprobe_status = KPROBE_HIT_SSDONE; p->post_handler(p, regs, 0); } regs->ip = orig_ip; } /* * If pre_handler returns !0, it changes regs->ip. We have to * skip emulating post_handler. */ __this_cpu_write(current_kprobe, NULL); } out: ftrace_test_recursion_unlock(bit); } NOKPROBE_SYMBOL(kprobe_ftrace_handler); int arch_prepare_kprobe_ftrace(struct kprobe *p) { p->ainsn.insn = NULL; p->ainsn.boostable = false; return 0; }
linux-master
arch/x86/kernel/kprobes/ftrace.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Kernel Probes Jump Optimization (Optprobes) * * Copyright (C) IBM Corporation, 2002, 2004 * Copyright (C) Hitachi Ltd., 2012 */ #include <linux/kprobes.h> #include <linux/perf_event.h> #include <linux/ptrace.h> #include <linux/string.h> #include <linux/slab.h> #include <linux/hardirq.h> #include <linux/preempt.h> #include <linux/extable.h> #include <linux/kdebug.h> #include <linux/kallsyms.h> #include <linux/kgdb.h> #include <linux/ftrace.h> #include <linux/objtool.h> #include <linux/pgtable.h> #include <linux/static_call.h> #include <asm/text-patching.h> #include <asm/cacheflush.h> #include <asm/desc.h> #include <linux/uaccess.h> #include <asm/alternative.h> #include <asm/insn.h> #include <asm/debugreg.h> #include <asm/set_memory.h> #include <asm/sections.h> #include <asm/nospec-branch.h> #include "common.h" unsigned long __recover_optprobed_insn(kprobe_opcode_t *buf, unsigned long addr) { struct optimized_kprobe *op; struct kprobe *kp; long offs; int i; for (i = 0; i < JMP32_INSN_SIZE; i++) { kp = get_kprobe((void *)addr - i); /* This function only handles jump-optimized kprobe */ if (kp && kprobe_optimized(kp)) { op = container_of(kp, struct optimized_kprobe, kp); /* If op is optimized or under unoptimizing */ if (list_empty(&op->list) || optprobe_queued_unopt(op)) goto found; } } return addr; found: /* * If the kprobe can be optimized, original bytes which can be * overwritten by jump destination address. In this case, original * bytes must be recovered from op->optinsn.copied_insn buffer. */ if (copy_from_kernel_nofault(buf, (void *)addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t))) return 0UL; if (addr == (unsigned long)kp->addr) { buf[0] = kp->opcode; memcpy(buf + 1, op->optinsn.copied_insn, DISP32_SIZE); } else { offs = addr - (unsigned long)kp->addr - 1; memcpy(buf, op->optinsn.copied_insn + offs, DISP32_SIZE - offs); } return (unsigned long)buf; } static void synthesize_clac(kprobe_opcode_t *addr) { /* * Can't be static_cpu_has() due to how objtool treats this feature bit. * This isn't a fast path anyway. */ if (!boot_cpu_has(X86_FEATURE_SMAP)) return; /* Replace the NOP3 with CLAC */ addr[0] = 0x0f; addr[1] = 0x01; addr[2] = 0xca; } /* Insert a move instruction which sets a pointer to eax/rdi (1st arg). */ static void synthesize_set_arg1(kprobe_opcode_t *addr, unsigned long val) { #ifdef CONFIG_X86_64 *addr++ = 0x48; *addr++ = 0xbf; #else *addr++ = 0xb8; #endif *(unsigned long *)addr = val; } asm ( ".pushsection .rodata\n" "optprobe_template_func:\n" ".global optprobe_template_entry\n" "optprobe_template_entry:\n" #ifdef CONFIG_X86_64 " pushq $" __stringify(__KERNEL_DS) "\n" /* Save the 'sp - 8', this will be fixed later. */ " pushq %rsp\n" " pushfq\n" ".global optprobe_template_clac\n" "optprobe_template_clac:\n" ASM_NOP3 SAVE_REGS_STRING " movq %rsp, %rsi\n" ".global optprobe_template_val\n" "optprobe_template_val:\n" ASM_NOP5 ASM_NOP5 ".global optprobe_template_call\n" "optprobe_template_call:\n" ASM_NOP5 /* Copy 'regs->flags' into 'regs->ss'. */ " movq 18*8(%rsp), %rdx\n" " movq %rdx, 20*8(%rsp)\n" RESTORE_REGS_STRING /* Skip 'regs->flags' and 'regs->sp'. */ " addq $16, %rsp\n" /* And pop flags register from 'regs->ss'. */ " popfq\n" #else /* CONFIG_X86_32 */ " pushl %ss\n" /* Save the 'sp - 4', this will be fixed later. */ " pushl %esp\n" " pushfl\n" ".global optprobe_template_clac\n" "optprobe_template_clac:\n" ASM_NOP3 SAVE_REGS_STRING " movl %esp, %edx\n" ".global optprobe_template_val\n" "optprobe_template_val:\n" ASM_NOP5 ".global optprobe_template_call\n" "optprobe_template_call:\n" ASM_NOP5 /* Copy 'regs->flags' into 'regs->ss'. */ " movl 14*4(%esp), %edx\n" " movl %edx, 16*4(%esp)\n" RESTORE_REGS_STRING /* Skip 'regs->flags' and 'regs->sp'. */ " addl $8, %esp\n" /* And pop flags register from 'regs->ss'. */ " popfl\n" #endif ".global optprobe_template_end\n" "optprobe_template_end:\n" ".popsection\n"); void optprobe_template_func(void); STACK_FRAME_NON_STANDARD(optprobe_template_func); #define TMPL_CLAC_IDX \ ((long)optprobe_template_clac - (long)optprobe_template_entry) #define TMPL_MOVE_IDX \ ((long)optprobe_template_val - (long)optprobe_template_entry) #define TMPL_CALL_IDX \ ((long)optprobe_template_call - (long)optprobe_template_entry) #define TMPL_END_IDX \ ((long)optprobe_template_end - (long)optprobe_template_entry) /* Optimized kprobe call back function: called from optinsn */ static void optimized_callback(struct optimized_kprobe *op, struct pt_regs *regs) { /* This is possible if op is under delayed unoptimizing */ if (kprobe_disabled(&op->kp)) return; preempt_disable(); if (kprobe_running()) { kprobes_inc_nmissed_count(&op->kp); } else { struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); /* Adjust stack pointer */ regs->sp += sizeof(long); /* Save skipped registers */ regs->cs = __KERNEL_CS; #ifdef CONFIG_X86_32 regs->gs = 0; #endif regs->ip = (unsigned long)op->kp.addr + INT3_INSN_SIZE; regs->orig_ax = ~0UL; __this_cpu_write(current_kprobe, &op->kp); kcb->kprobe_status = KPROBE_HIT_ACTIVE; opt_pre_handler(&op->kp, regs); __this_cpu_write(current_kprobe, NULL); } preempt_enable(); } NOKPROBE_SYMBOL(optimized_callback); static int copy_optimized_instructions(u8 *dest, u8 *src, u8 *real) { struct insn insn; int len = 0, ret; while (len < JMP32_INSN_SIZE) { ret = __copy_instruction(dest + len, src + len, real + len, &insn); if (!ret || !can_boost(&insn, src + len)) return -EINVAL; len += ret; } /* Check whether the address range is reserved */ if (ftrace_text_reserved(src, src + len - 1) || alternatives_text_reserved(src, src + len - 1) || jump_label_text_reserved(src, src + len - 1) || static_call_text_reserved(src, src + len - 1)) return -EBUSY; return len; } /* Check whether insn is indirect jump */ static int insn_is_indirect_jump(struct insn *insn) { return ((insn->opcode.bytes[0] == 0xff && (X86_MODRM_REG(insn->modrm.value) & 6) == 4) || /* Jump */ insn->opcode.bytes[0] == 0xea); /* Segment based jump */ } /* Check whether insn jumps into specified address range */ static int insn_jump_into_range(struct insn *insn, unsigned long start, int len) { unsigned long target = 0; switch (insn->opcode.bytes[0]) { case 0xe0: /* loopne */ case 0xe1: /* loope */ case 0xe2: /* loop */ case 0xe3: /* jcxz */ case 0xe9: /* near relative jump */ case 0xeb: /* short relative jump */ break; case 0x0f: if ((insn->opcode.bytes[1] & 0xf0) == 0x80) /* jcc near */ break; return 0; default: if ((insn->opcode.bytes[0] & 0xf0) == 0x70) /* jcc short */ break; return 0; } target = (unsigned long)insn->next_byte + insn->immediate.value; return (start <= target && target <= start + len); } /* Decode whole function to ensure any instructions don't jump into target */ static int can_optimize(unsigned long paddr) { unsigned long addr, size = 0, offset = 0; struct insn insn; kprobe_opcode_t buf[MAX_INSN_SIZE]; /* Lookup symbol including addr */ if (!kallsyms_lookup_size_offset(paddr, &size, &offset)) return 0; /* * Do not optimize in the entry code due to the unstable * stack handling and registers setup. */ if (((paddr >= (unsigned long)__entry_text_start) && (paddr < (unsigned long)__entry_text_end))) return 0; /* Check there is enough space for a relative jump. */ if (size - offset < JMP32_INSN_SIZE) return 0; /* Decode instructions */ addr = paddr - offset; while (addr < paddr - offset + size) { /* Decode until function end */ unsigned long recovered_insn; int ret; if (search_exception_tables(addr)) /* * Since some fixup code will jumps into this function, * we can't optimize kprobe in this function. */ return 0; recovered_insn = recover_probed_instruction(buf, addr); if (!recovered_insn) return 0; ret = insn_decode_kernel(&insn, (void *)recovered_insn); if (ret < 0) return 0; #ifdef CONFIG_KGDB /* * If there is a dynamically installed kgdb sw breakpoint, * this function should not be probed. */ if (insn.opcode.bytes[0] == INT3_INSN_OPCODE && kgdb_has_hit_break(addr)) return 0; #endif /* Recover address */ insn.kaddr = (void *)addr; insn.next_byte = (void *)(addr + insn.length); /* * Check any instructions don't jump into target, indirectly or * directly. * * The indirect case is present to handle a code with jump * tables. When the kernel uses retpolines, the check should in * theory additionally look for jumps to indirect thunks. * However, the kernel built with retpolines or IBT has jump * tables disabled so the check can be skipped altogether. */ if (!IS_ENABLED(CONFIG_RETPOLINE) && !IS_ENABLED(CONFIG_X86_KERNEL_IBT) && insn_is_indirect_jump(&insn)) return 0; if (insn_jump_into_range(&insn, paddr + INT3_INSN_SIZE, DISP32_SIZE)) return 0; addr += insn.length; } return 1; } /* Check optimized_kprobe can actually be optimized. */ int arch_check_optimized_kprobe(struct optimized_kprobe *op) { int i; struct kprobe *p; for (i = 1; i < op->optinsn.size; i++) { p = get_kprobe(op->kp.addr + i); if (p && !kprobe_disarmed(p)) return -EEXIST; } return 0; } /* Check the addr is within the optimized instructions. */ int arch_within_optimized_kprobe(struct optimized_kprobe *op, kprobe_opcode_t *addr) { return (op->kp.addr <= addr && op->kp.addr + op->optinsn.size > addr); } /* Free optimized instruction slot */ static void __arch_remove_optimized_kprobe(struct optimized_kprobe *op, int dirty) { u8 *slot = op->optinsn.insn; if (slot) { int len = TMPL_END_IDX + op->optinsn.size + JMP32_INSN_SIZE; /* Record the perf event before freeing the slot */ if (dirty) perf_event_text_poke(slot, slot, len, NULL, 0); free_optinsn_slot(slot, dirty); op->optinsn.insn = NULL; op->optinsn.size = 0; } } void arch_remove_optimized_kprobe(struct optimized_kprobe *op) { __arch_remove_optimized_kprobe(op, 1); } /* * Copy replacing target instructions * Target instructions MUST be relocatable (checked inside) * This is called when new aggr(opt)probe is allocated or reused. */ int arch_prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *__unused) { u8 *buf = NULL, *slot; int ret, len; long rel; if (!can_optimize((unsigned long)op->kp.addr)) return -EILSEQ; buf = kzalloc(MAX_OPTINSN_SIZE, GFP_KERNEL); if (!buf) return -ENOMEM; op->optinsn.insn = slot = get_optinsn_slot(); if (!slot) { ret = -ENOMEM; goto out; } /* * Verify if the address gap is in 2GB range, because this uses * a relative jump. */ rel = (long)slot - (long)op->kp.addr + JMP32_INSN_SIZE; if (abs(rel) > 0x7fffffff) { ret = -ERANGE; goto err; } /* Copy arch-dep-instance from template */ memcpy(buf, optprobe_template_entry, TMPL_END_IDX); /* Copy instructions into the out-of-line buffer */ ret = copy_optimized_instructions(buf + TMPL_END_IDX, op->kp.addr, slot + TMPL_END_IDX); if (ret < 0) goto err; op->optinsn.size = ret; len = TMPL_END_IDX + op->optinsn.size; synthesize_clac(buf + TMPL_CLAC_IDX); /* Set probe information */ synthesize_set_arg1(buf + TMPL_MOVE_IDX, (unsigned long)op); /* Set probe function call */ synthesize_relcall(buf + TMPL_CALL_IDX, slot + TMPL_CALL_IDX, optimized_callback); /* Set returning jmp instruction at the tail of out-of-line buffer */ synthesize_reljump(buf + len, slot + len, (u8 *)op->kp.addr + op->optinsn.size); len += JMP32_INSN_SIZE; /* * Note len = TMPL_END_IDX + op->optinsn.size + JMP32_INSN_SIZE is also * used in __arch_remove_optimized_kprobe(). */ /* We have to use text_poke() for instruction buffer because it is RO */ perf_event_text_poke(slot, NULL, 0, buf, len); text_poke(slot, buf, len); ret = 0; out: kfree(buf); return ret; err: __arch_remove_optimized_kprobe(op, 0); goto out; } /* * Replace breakpoints (INT3) with relative jumps (JMP.d32). * Caller must call with locking kprobe_mutex and text_mutex. * * The caller will have installed a regular kprobe and after that issued * syncrhonize_rcu_tasks(), this ensures that the instruction(s) that live in * the 4 bytes after the INT3 are unused and can now be overwritten. */ void arch_optimize_kprobes(struct list_head *oplist) { struct optimized_kprobe *op, *tmp; u8 insn_buff[JMP32_INSN_SIZE]; list_for_each_entry_safe(op, tmp, oplist, list) { s32 rel = (s32)((long)op->optinsn.insn - ((long)op->kp.addr + JMP32_INSN_SIZE)); WARN_ON(kprobe_disabled(&op->kp)); /* Backup instructions which will be replaced by jump address */ memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_INSN_SIZE, DISP32_SIZE); insn_buff[0] = JMP32_INSN_OPCODE; *(s32 *)(&insn_buff[1]) = rel; text_poke_bp(op->kp.addr, insn_buff, JMP32_INSN_SIZE, NULL); list_del_init(&op->list); } } /* * Replace a relative jump (JMP.d32) with a breakpoint (INT3). * * After that, we can restore the 4 bytes after the INT3 to undo what * arch_optimize_kprobes() scribbled. This is safe since those bytes will be * unused once the INT3 lands. */ void arch_unoptimize_kprobe(struct optimized_kprobe *op) { u8 new[JMP32_INSN_SIZE] = { INT3_INSN_OPCODE, }; u8 old[JMP32_INSN_SIZE]; u8 *addr = op->kp.addr; memcpy(old, op->kp.addr, JMP32_INSN_SIZE); memcpy(new + INT3_INSN_SIZE, op->optinsn.copied_insn, JMP32_INSN_SIZE - INT3_INSN_SIZE); text_poke(addr, new, INT3_INSN_SIZE); text_poke_sync(); text_poke(addr + INT3_INSN_SIZE, new + INT3_INSN_SIZE, JMP32_INSN_SIZE - INT3_INSN_SIZE); text_poke_sync(); perf_event_text_poke(op->kp.addr, old, JMP32_INSN_SIZE, new, JMP32_INSN_SIZE); } /* * Recover original instructions and breakpoints from relative jumps. * Caller must call with locking kprobe_mutex. */ extern void arch_unoptimize_kprobes(struct list_head *oplist, struct list_head *done_list) { struct optimized_kprobe *op, *tmp; list_for_each_entry_safe(op, tmp, oplist, list) { arch_unoptimize_kprobe(op); list_move(&op->list, done_list); } } int setup_detour_execution(struct kprobe *p, struct pt_regs *regs, int reenter) { struct optimized_kprobe *op; if (p->flags & KPROBE_FLAG_OPTIMIZED) { /* This kprobe is really able to run optimized path. */ op = container_of(p, struct optimized_kprobe, kp); /* Detour through copied instructions */ regs->ip = (unsigned long)op->optinsn.insn + TMPL_END_IDX; if (!reenter) reset_current_kprobe(); return 1; } return 0; } NOKPROBE_SYMBOL(setup_detour_execution);
linux-master
arch/x86/kernel/kprobes/opt.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Kernel Probes (KProbes) * * Copyright (C) IBM Corporation, 2002, 2004 * * 2002-Oct Created by Vamsi Krishna S <[email protected]> Kernel * Probes initial implementation ( includes contributions from * Rusty Russell). * 2004-July Suparna Bhattacharya <[email protected]> added jumper probes * interface to access function arguments. * 2004-Oct Jim Keniston <[email protected]> and Prasanna S Panchamukhi * <[email protected]> adapted for x86_64 from i386. * 2005-Mar Roland McGrath <[email protected]> * Fixed to handle %rip-relative addressing mode correctly. * 2005-May Hien Nguyen <[email protected]>, Jim Keniston * <[email protected]> and Prasanna S Panchamukhi * <[email protected]> added function-return probes. * 2005-May Rusty Lynch <[email protected]> * Added function return probes functionality * 2006-Feb Masami Hiramatsu <[email protected]> added * kprobe-booster and kretprobe-booster for i386. * 2007-Dec Masami Hiramatsu <[email protected]> added kprobe-booster * and kretprobe-booster for x86-64 * 2007-Dec Masami Hiramatsu <[email protected]>, Arjan van de Ven * <[email protected]> and Jim Keniston <[email protected]> * unified x86 kprobes code. */ #include <linux/kprobes.h> #include <linux/ptrace.h> #include <linux/string.h> #include <linux/slab.h> #include <linux/hardirq.h> #include <linux/preempt.h> #include <linux/sched/debug.h> #include <linux/perf_event.h> #include <linux/extable.h> #include <linux/kdebug.h> #include <linux/kallsyms.h> #include <linux/kgdb.h> #include <linux/ftrace.h> #include <linux/kasan.h> #include <linux/moduleloader.h> #include <linux/objtool.h> #include <linux/vmalloc.h> #include <linux/pgtable.h> #include <linux/set_memory.h> #include <linux/cfi.h> #include <asm/text-patching.h> #include <asm/cacheflush.h> #include <asm/desc.h> #include <linux/uaccess.h> #include <asm/alternative.h> #include <asm/insn.h> #include <asm/debugreg.h> #include <asm/ibt.h> #include "common.h" DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL; DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk); #define W(row, b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, ba, bb, bc, bd, be, bf)\ (((b0##UL << 0x0)|(b1##UL << 0x1)|(b2##UL << 0x2)|(b3##UL << 0x3) | \ (b4##UL << 0x4)|(b5##UL << 0x5)|(b6##UL << 0x6)|(b7##UL << 0x7) | \ (b8##UL << 0x8)|(b9##UL << 0x9)|(ba##UL << 0xa)|(bb##UL << 0xb) | \ (bc##UL << 0xc)|(bd##UL << 0xd)|(be##UL << 0xe)|(bf##UL << 0xf)) \ << (row % 32)) /* * Undefined/reserved opcodes, conditional jump, Opcode Extension * Groups, and some special opcodes can not boost. * This is non-const and volatile to keep gcc from statically * optimizing it out, as variable_test_bit makes gcc think only * *(unsigned long*) is used. */ static volatile u32 twobyte_is_boostable[256 / 32] = { /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */ /* ---------------------------------------------- */ W(0x00, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0) | /* 00 */ W(0x10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1) , /* 10 */ W(0x20, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) | /* 20 */ W(0x30, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) , /* 30 */ W(0x40, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 40 */ W(0x50, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) , /* 50 */ W(0x60, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1) | /* 60 */ W(0x70, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1) , /* 70 */ W(0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) | /* 80 */ W(0x90, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 90 */ W(0xa0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1) | /* a0 */ W(0xb0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1) , /* b0 */ W(0xc0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1) | /* c0 */ W(0xd0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1) , /* d0 */ W(0xe0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1) | /* e0 */ W(0xf0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0) /* f0 */ /* ----------------------------------------------- */ /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */ }; #undef W struct kretprobe_blackpoint kretprobe_blacklist[] = { {"__switch_to", }, /* This function switches only current task, but doesn't switch kernel stack.*/ {NULL, NULL} /* Terminator */ }; const int kretprobe_blacklist_size = ARRAY_SIZE(kretprobe_blacklist); static nokprobe_inline void __synthesize_relative_insn(void *dest, void *from, void *to, u8 op) { struct __arch_relative_insn { u8 op; s32 raddr; } __packed *insn; insn = (struct __arch_relative_insn *)dest; insn->raddr = (s32)((long)(to) - ((long)(from) + 5)); insn->op = op; } /* Insert a jump instruction at address 'from', which jumps to address 'to'.*/ void synthesize_reljump(void *dest, void *from, void *to) { __synthesize_relative_insn(dest, from, to, JMP32_INSN_OPCODE); } NOKPROBE_SYMBOL(synthesize_reljump); /* Insert a call instruction at address 'from', which calls address 'to'.*/ void synthesize_relcall(void *dest, void *from, void *to) { __synthesize_relative_insn(dest, from, to, CALL_INSN_OPCODE); } NOKPROBE_SYMBOL(synthesize_relcall); /* * Returns non-zero if INSN is boostable. * RIP relative instructions are adjusted at copying time in 64 bits mode */ int can_boost(struct insn *insn, void *addr) { kprobe_opcode_t opcode; insn_byte_t prefix; int i; if (search_exception_tables((unsigned long)addr)) return 0; /* Page fault may occur on this address. */ /* 2nd-byte opcode */ if (insn->opcode.nbytes == 2) return test_bit(insn->opcode.bytes[1], (unsigned long *)twobyte_is_boostable); if (insn->opcode.nbytes != 1) return 0; for_each_insn_prefix(insn, i, prefix) { insn_attr_t attr; attr = inat_get_opcode_attribute(prefix); /* Can't boost Address-size override prefix and CS override prefix */ if (prefix == 0x2e || inat_is_address_size_prefix(attr)) return 0; } opcode = insn->opcode.bytes[0]; switch (opcode) { case 0x62: /* bound */ case 0x70 ... 0x7f: /* Conditional jumps */ case 0x9a: /* Call far */ case 0xc0 ... 0xc1: /* Grp2 */ case 0xcc ... 0xce: /* software exceptions */ case 0xd0 ... 0xd3: /* Grp2 */ case 0xd6: /* (UD) */ case 0xd8 ... 0xdf: /* ESC */ case 0xe0 ... 0xe3: /* LOOP*, JCXZ */ case 0xe8 ... 0xe9: /* near Call, JMP */ case 0xeb: /* Short JMP */ case 0xf0 ... 0xf4: /* LOCK/REP, HLT */ case 0xf6 ... 0xf7: /* Grp3 */ case 0xfe: /* Grp4 */ /* ... are not boostable */ return 0; case 0xff: /* Grp5 */ /* Only indirect jmp is boostable */ return X86_MODRM_REG(insn->modrm.bytes[0]) == 4; default: return 1; } } static unsigned long __recover_probed_insn(kprobe_opcode_t *buf, unsigned long addr) { struct kprobe *kp; bool faddr; kp = get_kprobe((void *)addr); faddr = ftrace_location(addr) == addr; /* * Use the current code if it is not modified by Kprobe * and it cannot be modified by ftrace. */ if (!kp && !faddr) return addr; /* * Basically, kp->ainsn.insn has an original instruction. * However, RIP-relative instruction can not do single-stepping * at different place, __copy_instruction() tweaks the displacement of * that instruction. In that case, we can't recover the instruction * from the kp->ainsn.insn. * * On the other hand, in case on normal Kprobe, kp->opcode has a copy * of the first byte of the probed instruction, which is overwritten * by int3. And the instruction at kp->addr is not modified by kprobes * except for the first byte, we can recover the original instruction * from it and kp->opcode. * * In case of Kprobes using ftrace, we do not have a copy of * the original instruction. In fact, the ftrace location might * be modified at anytime and even could be in an inconsistent state. * Fortunately, we know that the original code is the ideal 5-byte * long NOP. */ if (copy_from_kernel_nofault(buf, (void *)addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t))) return 0UL; if (faddr) memcpy(buf, x86_nops[5], 5); else buf[0] = kp->opcode; return (unsigned long)buf; } /* * Recover the probed instruction at addr for further analysis. * Caller must lock kprobes by kprobe_mutex, or disable preemption * for preventing to release referencing kprobes. * Returns zero if the instruction can not get recovered (or access failed). */ unsigned long recover_probed_instruction(kprobe_opcode_t *buf, unsigned long addr) { unsigned long __addr; __addr = __recover_optprobed_insn(buf, addr); if (__addr != addr) return __addr; return __recover_probed_insn(buf, addr); } /* Check if paddr is at an instruction boundary */ static int can_probe(unsigned long paddr) { unsigned long addr, __addr, offset = 0; struct insn insn; kprobe_opcode_t buf[MAX_INSN_SIZE]; if (!kallsyms_lookup_size_offset(paddr, NULL, &offset)) return 0; /* Decode instructions */ addr = paddr - offset; while (addr < paddr) { int ret; /* * Check if the instruction has been modified by another * kprobe, in which case we replace the breakpoint by the * original instruction in our buffer. * Also, jump optimization will change the breakpoint to * relative-jump. Since the relative-jump itself is * normally used, we just go through if there is no kprobe. */ __addr = recover_probed_instruction(buf, addr); if (!__addr) return 0; ret = insn_decode_kernel(&insn, (void *)__addr); if (ret < 0) return 0; #ifdef CONFIG_KGDB /* * If there is a dynamically installed kgdb sw breakpoint, * this function should not be probed. */ if (insn.opcode.bytes[0] == INT3_INSN_OPCODE && kgdb_has_hit_break(addr)) return 0; #endif addr += insn.length; } if (IS_ENABLED(CONFIG_CFI_CLANG)) { /* * The compiler generates the following instruction sequence * for indirect call checks and cfi.c decodes this; * *  movl -<id>, %r10d ; 6 bytes * addl -4(%reg), %r10d ; 4 bytes * je .Ltmp1 ; 2 bytes * ud2 ; <- regs->ip * .Ltmp1: * * Also, these movl and addl are used for showing expected * type. So those must not be touched. */ __addr = recover_probed_instruction(buf, addr); if (!__addr) return 0; if (insn_decode_kernel(&insn, (void *)__addr) < 0) return 0; if (insn.opcode.value == 0xBA) offset = 12; else if (insn.opcode.value == 0x3) offset = 6; else goto out; /* This movl/addl is used for decoding CFI. */ if (is_cfi_trap(addr + offset)) return 0; } out: return (addr == paddr); } /* If x86 supports IBT (ENDBR) it must be skipped. */ kprobe_opcode_t *arch_adjust_kprobe_addr(unsigned long addr, unsigned long offset, bool *on_func_entry) { if (is_endbr(*(u32 *)addr)) { *on_func_entry = !offset || offset == 4; if (*on_func_entry) offset = 4; } else { *on_func_entry = !offset; } return (kprobe_opcode_t *)(addr + offset); } /* * Copy an instruction with recovering modified instruction by kprobes * and adjust the displacement if the instruction uses the %rip-relative * addressing mode. Note that since @real will be the final place of copied * instruction, displacement must be adjust by @real, not @dest. * This returns the length of copied instruction, or 0 if it has an error. */ int __copy_instruction(u8 *dest, u8 *src, u8 *real, struct insn *insn) { kprobe_opcode_t buf[MAX_INSN_SIZE]; unsigned long recovered_insn = recover_probed_instruction(buf, (unsigned long)src); int ret; if (!recovered_insn || !insn) return 0; /* This can access kernel text if given address is not recovered */ if (copy_from_kernel_nofault(dest, (void *)recovered_insn, MAX_INSN_SIZE)) return 0; ret = insn_decode_kernel(insn, dest); if (ret < 0) return 0; /* We can not probe force emulate prefixed instruction */ if (insn_has_emulate_prefix(insn)) return 0; /* Another subsystem puts a breakpoint, failed to recover */ if (insn->opcode.bytes[0] == INT3_INSN_OPCODE) return 0; /* We should not singlestep on the exception masking instructions */ if (insn_masking_exception(insn)) return 0; #ifdef CONFIG_X86_64 /* Only x86_64 has RIP relative instructions */ if (insn_rip_relative(insn)) { s64 newdisp; u8 *disp; /* * The copied instruction uses the %rip-relative addressing * mode. Adjust the displacement for the difference between * the original location of this instruction and the location * of the copy that will actually be run. The tricky bit here * is making sure that the sign extension happens correctly in * this calculation, since we need a signed 32-bit result to * be sign-extended to 64 bits when it's added to the %rip * value and yield the same 64-bit result that the sign- * extension of the original signed 32-bit displacement would * have given. */ newdisp = (u8 *) src + (s64) insn->displacement.value - (u8 *) real; if ((s64) (s32) newdisp != newdisp) { pr_err("Kprobes error: new displacement does not fit into s32 (%llx)\n", newdisp); return 0; } disp = (u8 *) dest + insn_offset_displacement(insn); *(s32 *) disp = (s32) newdisp; } #endif return insn->length; } /* Prepare reljump or int3 right after instruction */ static int prepare_singlestep(kprobe_opcode_t *buf, struct kprobe *p, struct insn *insn) { int len = insn->length; if (!IS_ENABLED(CONFIG_PREEMPTION) && !p->post_handler && can_boost(insn, p->addr) && MAX_INSN_SIZE - len >= JMP32_INSN_SIZE) { /* * These instructions can be executed directly if it * jumps back to correct address. */ synthesize_reljump(buf + len, p->ainsn.insn + len, p->addr + insn->length); len += JMP32_INSN_SIZE; p->ainsn.boostable = 1; } else { /* Otherwise, put an int3 for trapping singlestep */ if (MAX_INSN_SIZE - len < INT3_INSN_SIZE) return -ENOSPC; buf[len] = INT3_INSN_OPCODE; len += INT3_INSN_SIZE; } return len; } /* Make page to RO mode when allocate it */ void *alloc_insn_page(void) { void *page; page = module_alloc(PAGE_SIZE); if (!page) return NULL; /* * TODO: Once additional kernel code protection mechanisms are set, ensure * that the page was not maliciously altered and it is still zeroed. */ set_memory_rox((unsigned long)page, 1); return page; } /* Kprobe x86 instruction emulation - only regs->ip or IF flag modifiers */ static void kprobe_emulate_ifmodifiers(struct kprobe *p, struct pt_regs *regs) { switch (p->ainsn.opcode) { case 0xfa: /* cli */ regs->flags &= ~(X86_EFLAGS_IF); break; case 0xfb: /* sti */ regs->flags |= X86_EFLAGS_IF; break; case 0x9c: /* pushf */ int3_emulate_push(regs, regs->flags); break; case 0x9d: /* popf */ regs->flags = int3_emulate_pop(regs); break; } regs->ip = regs->ip - INT3_INSN_SIZE + p->ainsn.size; } NOKPROBE_SYMBOL(kprobe_emulate_ifmodifiers); static void kprobe_emulate_ret(struct kprobe *p, struct pt_regs *regs) { int3_emulate_ret(regs); } NOKPROBE_SYMBOL(kprobe_emulate_ret); static void kprobe_emulate_call(struct kprobe *p, struct pt_regs *regs) { unsigned long func = regs->ip - INT3_INSN_SIZE + p->ainsn.size; func += p->ainsn.rel32; int3_emulate_call(regs, func); } NOKPROBE_SYMBOL(kprobe_emulate_call); static void kprobe_emulate_jmp(struct kprobe *p, struct pt_regs *regs) { unsigned long ip = regs->ip - INT3_INSN_SIZE + p->ainsn.size; ip += p->ainsn.rel32; int3_emulate_jmp(regs, ip); } NOKPROBE_SYMBOL(kprobe_emulate_jmp); static void kprobe_emulate_jcc(struct kprobe *p, struct pt_regs *regs) { unsigned long ip = regs->ip - INT3_INSN_SIZE + p->ainsn.size; int3_emulate_jcc(regs, p->ainsn.jcc.type, ip, p->ainsn.rel32); } NOKPROBE_SYMBOL(kprobe_emulate_jcc); static void kprobe_emulate_loop(struct kprobe *p, struct pt_regs *regs) { unsigned long ip = regs->ip - INT3_INSN_SIZE + p->ainsn.size; bool match; if (p->ainsn.loop.type != 3) { /* LOOP* */ if (p->ainsn.loop.asize == 32) match = ((*(u32 *)&regs->cx)--) != 0; #ifdef CONFIG_X86_64 else if (p->ainsn.loop.asize == 64) match = ((*(u64 *)&regs->cx)--) != 0; #endif else match = ((*(u16 *)&regs->cx)--) != 0; } else { /* JCXZ */ if (p->ainsn.loop.asize == 32) match = *(u32 *)(&regs->cx) == 0; #ifdef CONFIG_X86_64 else if (p->ainsn.loop.asize == 64) match = *(u64 *)(&regs->cx) == 0; #endif else match = *(u16 *)(&regs->cx) == 0; } if (p->ainsn.loop.type == 0) /* LOOPNE */ match = match && !(regs->flags & X86_EFLAGS_ZF); else if (p->ainsn.loop.type == 1) /* LOOPE */ match = match && (regs->flags & X86_EFLAGS_ZF); if (match) ip += p->ainsn.rel32; int3_emulate_jmp(regs, ip); } NOKPROBE_SYMBOL(kprobe_emulate_loop); static const int addrmode_regoffs[] = { offsetof(struct pt_regs, ax), offsetof(struct pt_regs, cx), offsetof(struct pt_regs, dx), offsetof(struct pt_regs, bx), offsetof(struct pt_regs, sp), offsetof(struct pt_regs, bp), offsetof(struct pt_regs, si), offsetof(struct pt_regs, di), #ifdef CONFIG_X86_64 offsetof(struct pt_regs, r8), offsetof(struct pt_regs, r9), offsetof(struct pt_regs, r10), offsetof(struct pt_regs, r11), offsetof(struct pt_regs, r12), offsetof(struct pt_regs, r13), offsetof(struct pt_regs, r14), offsetof(struct pt_regs, r15), #endif }; static void kprobe_emulate_call_indirect(struct kprobe *p, struct pt_regs *regs) { unsigned long offs = addrmode_regoffs[p->ainsn.indirect.reg]; int3_emulate_call(regs, regs_get_register(regs, offs)); } NOKPROBE_SYMBOL(kprobe_emulate_call_indirect); static void kprobe_emulate_jmp_indirect(struct kprobe *p, struct pt_regs *regs) { unsigned long offs = addrmode_regoffs[p->ainsn.indirect.reg]; int3_emulate_jmp(regs, regs_get_register(regs, offs)); } NOKPROBE_SYMBOL(kprobe_emulate_jmp_indirect); static int prepare_emulation(struct kprobe *p, struct insn *insn) { insn_byte_t opcode = insn->opcode.bytes[0]; switch (opcode) { case 0xfa: /* cli */ case 0xfb: /* sti */ case 0x9c: /* pushfl */ case 0x9d: /* popf/popfd */ /* * IF modifiers must be emulated since it will enable interrupt while * int3 single stepping. */ p->ainsn.emulate_op = kprobe_emulate_ifmodifiers; p->ainsn.opcode = opcode; break; case 0xc2: /* ret/lret */ case 0xc3: case 0xca: case 0xcb: p->ainsn.emulate_op = kprobe_emulate_ret; break; case 0x9a: /* far call absolute -- segment is not supported */ case 0xea: /* far jmp absolute -- segment is not supported */ case 0xcc: /* int3 */ case 0xcf: /* iret -- in-kernel IRET is not supported */ return -EOPNOTSUPP; break; case 0xe8: /* near call relative */ p->ainsn.emulate_op = kprobe_emulate_call; if (insn->immediate.nbytes == 2) p->ainsn.rel32 = *(s16 *)&insn->immediate.value; else p->ainsn.rel32 = *(s32 *)&insn->immediate.value; break; case 0xeb: /* short jump relative */ case 0xe9: /* near jump relative */ p->ainsn.emulate_op = kprobe_emulate_jmp; if (insn->immediate.nbytes == 1) p->ainsn.rel32 = *(s8 *)&insn->immediate.value; else if (insn->immediate.nbytes == 2) p->ainsn.rel32 = *(s16 *)&insn->immediate.value; else p->ainsn.rel32 = *(s32 *)&insn->immediate.value; break; case 0x70 ... 0x7f: /* 1 byte conditional jump */ p->ainsn.emulate_op = kprobe_emulate_jcc; p->ainsn.jcc.type = opcode & 0xf; p->ainsn.rel32 = insn->immediate.value; break; case 0x0f: opcode = insn->opcode.bytes[1]; if ((opcode & 0xf0) == 0x80) { /* 2 bytes Conditional Jump */ p->ainsn.emulate_op = kprobe_emulate_jcc; p->ainsn.jcc.type = opcode & 0xf; if (insn->immediate.nbytes == 2) p->ainsn.rel32 = *(s16 *)&insn->immediate.value; else p->ainsn.rel32 = *(s32 *)&insn->immediate.value; } else if (opcode == 0x01 && X86_MODRM_REG(insn->modrm.bytes[0]) == 0 && X86_MODRM_MOD(insn->modrm.bytes[0]) == 3) { /* VM extensions - not supported */ return -EOPNOTSUPP; } break; case 0xe0: /* Loop NZ */ case 0xe1: /* Loop */ case 0xe2: /* Loop */ case 0xe3: /* J*CXZ */ p->ainsn.emulate_op = kprobe_emulate_loop; p->ainsn.loop.type = opcode & 0x3; p->ainsn.loop.asize = insn->addr_bytes * 8; p->ainsn.rel32 = *(s8 *)&insn->immediate.value; break; case 0xff: /* * Since the 0xff is an extended group opcode, the instruction * is determined by the MOD/RM byte. */ opcode = insn->modrm.bytes[0]; switch (X86_MODRM_REG(opcode)) { case 0b010: /* FF /2, call near, absolute indirect */ p->ainsn.emulate_op = kprobe_emulate_call_indirect; break; case 0b100: /* FF /4, jmp near, absolute indirect */ p->ainsn.emulate_op = kprobe_emulate_jmp_indirect; break; case 0b011: /* FF /3, call far, absolute indirect */ case 0b101: /* FF /5, jmp far, absolute indirect */ return -EOPNOTSUPP; } if (!p->ainsn.emulate_op) break; if (insn->addr_bytes != sizeof(unsigned long)) return -EOPNOTSUPP; /* Don't support different size */ if (X86_MODRM_MOD(opcode) != 3) return -EOPNOTSUPP; /* TODO: support memory addressing */ p->ainsn.indirect.reg = X86_MODRM_RM(opcode); #ifdef CONFIG_X86_64 if (X86_REX_B(insn->rex_prefix.value)) p->ainsn.indirect.reg += 8; #endif break; default: break; } p->ainsn.size = insn->length; return 0; } static int arch_copy_kprobe(struct kprobe *p) { struct insn insn; kprobe_opcode_t buf[MAX_INSN_SIZE]; int ret, len; /* Copy an instruction with recovering if other optprobe modifies it.*/ len = __copy_instruction(buf, p->addr, p->ainsn.insn, &insn); if (!len) return -EINVAL; /* Analyze the opcode and setup emulate functions */ ret = prepare_emulation(p, &insn); if (ret < 0) return ret; /* Add int3 for single-step or booster jmp */ len = prepare_singlestep(buf, p, &insn); if (len < 0) return len; /* Also, displacement change doesn't affect the first byte */ p->opcode = buf[0]; p->ainsn.tp_len = len; perf_event_text_poke(p->ainsn.insn, NULL, 0, buf, len); /* OK, write back the instruction(s) into ROX insn buffer */ text_poke(p->ainsn.insn, buf, len); return 0; } int arch_prepare_kprobe(struct kprobe *p) { int ret; if (alternatives_text_reserved(p->addr, p->addr)) return -EINVAL; if (!can_probe((unsigned long)p->addr)) return -EILSEQ; memset(&p->ainsn, 0, sizeof(p->ainsn)); /* insn: must be on special executable page on x86. */ p->ainsn.insn = get_insn_slot(); if (!p->ainsn.insn) return -ENOMEM; ret = arch_copy_kprobe(p); if (ret) { free_insn_slot(p->ainsn.insn, 0); p->ainsn.insn = NULL; } return ret; } void arch_arm_kprobe(struct kprobe *p) { u8 int3 = INT3_INSN_OPCODE; text_poke(p->addr, &int3, 1); text_poke_sync(); perf_event_text_poke(p->addr, &p->opcode, 1, &int3, 1); } void arch_disarm_kprobe(struct kprobe *p) { u8 int3 = INT3_INSN_OPCODE; perf_event_text_poke(p->addr, &int3, 1, &p->opcode, 1); text_poke(p->addr, &p->opcode, 1); text_poke_sync(); } void arch_remove_kprobe(struct kprobe *p) { if (p->ainsn.insn) { /* Record the perf event before freeing the slot */ perf_event_text_poke(p->ainsn.insn, p->ainsn.insn, p->ainsn.tp_len, NULL, 0); free_insn_slot(p->ainsn.insn, p->ainsn.boostable); p->ainsn.insn = NULL; } } static nokprobe_inline void save_previous_kprobe(struct kprobe_ctlblk *kcb) { kcb->prev_kprobe.kp = kprobe_running(); kcb->prev_kprobe.status = kcb->kprobe_status; kcb->prev_kprobe.old_flags = kcb->kprobe_old_flags; kcb->prev_kprobe.saved_flags = kcb->kprobe_saved_flags; } static nokprobe_inline void restore_previous_kprobe(struct kprobe_ctlblk *kcb) { __this_cpu_write(current_kprobe, kcb->prev_kprobe.kp); kcb->kprobe_status = kcb->prev_kprobe.status; kcb->kprobe_old_flags = kcb->prev_kprobe.old_flags; kcb->kprobe_saved_flags = kcb->prev_kprobe.saved_flags; } static nokprobe_inline void set_current_kprobe(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *kcb) { __this_cpu_write(current_kprobe, p); kcb->kprobe_saved_flags = kcb->kprobe_old_flags = (regs->flags & X86_EFLAGS_IF); } static void kprobe_post_process(struct kprobe *cur, struct pt_regs *regs, struct kprobe_ctlblk *kcb) { /* Restore back the original saved kprobes variables and continue. */ if (kcb->kprobe_status == KPROBE_REENTER) { /* This will restore both kcb and current_kprobe */ restore_previous_kprobe(kcb); } else { /* * Always update the kcb status because * reset_curent_kprobe() doesn't update kcb. */ kcb->kprobe_status = KPROBE_HIT_SSDONE; if (cur->post_handler) cur->post_handler(cur, regs, 0); reset_current_kprobe(); } } NOKPROBE_SYMBOL(kprobe_post_process); static void setup_singlestep(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *kcb, int reenter) { if (setup_detour_execution(p, regs, reenter)) return; #if !defined(CONFIG_PREEMPTION) if (p->ainsn.boostable) { /* Boost up -- we can execute copied instructions directly */ if (!reenter) reset_current_kprobe(); /* * Reentering boosted probe doesn't reset current_kprobe, * nor set current_kprobe, because it doesn't use single * stepping. */ regs->ip = (unsigned long)p->ainsn.insn; return; } #endif if (reenter) { save_previous_kprobe(kcb); set_current_kprobe(p, regs, kcb); kcb->kprobe_status = KPROBE_REENTER; } else kcb->kprobe_status = KPROBE_HIT_SS; if (p->ainsn.emulate_op) { p->ainsn.emulate_op(p, regs); kprobe_post_process(p, regs, kcb); return; } /* Disable interrupt, and set ip register on trampoline */ regs->flags &= ~X86_EFLAGS_IF; regs->ip = (unsigned long)p->ainsn.insn; } NOKPROBE_SYMBOL(setup_singlestep); /* * Called after single-stepping. p->addr is the address of the * instruction whose first byte has been replaced by the "int3" * instruction. To avoid the SMP problems that can occur when we * temporarily put back the original opcode to single-step, we * single-stepped a copy of the instruction. The address of this * copy is p->ainsn.insn. We also doesn't use trap, but "int3" again * right after the copied instruction. * Different from the trap single-step, "int3" single-step can not * handle the instruction which changes the ip register, e.g. jmp, * call, conditional jmp, and the instructions which changes the IF * flags because interrupt must be disabled around the single-stepping. * Such instructions are software emulated, but others are single-stepped * using "int3". * * When the 2nd "int3" handled, the regs->ip and regs->flags needs to * be adjusted, so that we can resume execution on correct code. */ static void resume_singlestep(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *kcb) { unsigned long copy_ip = (unsigned long)p->ainsn.insn; unsigned long orig_ip = (unsigned long)p->addr; /* Restore saved interrupt flag and ip register */ regs->flags |= kcb->kprobe_saved_flags; /* Note that regs->ip is executed int3 so must be a step back */ regs->ip += (orig_ip - copy_ip) - INT3_INSN_SIZE; } NOKPROBE_SYMBOL(resume_singlestep); /* * We have reentered the kprobe_handler(), since another probe was hit while * within the handler. We save the original kprobes variables and just single * step on the instruction of the new probe without calling any user handlers. */ static int reenter_kprobe(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *kcb) { switch (kcb->kprobe_status) { case KPROBE_HIT_SSDONE: case KPROBE_HIT_ACTIVE: case KPROBE_HIT_SS: kprobes_inc_nmissed_count(p); setup_singlestep(p, regs, kcb, 1); break; case KPROBE_REENTER: /* A probe has been hit in the codepath leading up to, or just * after, single-stepping of a probed instruction. This entire * codepath should strictly reside in .kprobes.text section. * Raise a BUG or we'll continue in an endless reentering loop * and eventually a stack overflow. */ pr_err("Unrecoverable kprobe detected.\n"); dump_kprobe(p); BUG(); default: /* impossible cases */ WARN_ON(1); return 0; } return 1; } NOKPROBE_SYMBOL(reenter_kprobe); static nokprobe_inline int kprobe_is_ss(struct kprobe_ctlblk *kcb) { return (kcb->kprobe_status == KPROBE_HIT_SS || kcb->kprobe_status == KPROBE_REENTER); } /* * Interrupts are disabled on entry as trap3 is an interrupt gate and they * remain disabled throughout this function. */ int kprobe_int3_handler(struct pt_regs *regs) { kprobe_opcode_t *addr; struct kprobe *p; struct kprobe_ctlblk *kcb; if (user_mode(regs)) return 0; addr = (kprobe_opcode_t *)(regs->ip - sizeof(kprobe_opcode_t)); /* * We don't want to be preempted for the entire duration of kprobe * processing. Since int3 and debug trap disables irqs and we clear * IF while singlestepping, it must be no preemptible. */ kcb = get_kprobe_ctlblk(); p = get_kprobe(addr); if (p) { if (kprobe_running()) { if (reenter_kprobe(p, regs, kcb)) return 1; } else { set_current_kprobe(p, regs, kcb); kcb->kprobe_status = KPROBE_HIT_ACTIVE; /* * If we have no pre-handler or it returned 0, we * continue with normal processing. If we have a * pre-handler and it returned non-zero, that means * user handler setup registers to exit to another * instruction, we must skip the single stepping. */ if (!p->pre_handler || !p->pre_handler(p, regs)) setup_singlestep(p, regs, kcb, 0); else reset_current_kprobe(); return 1; } } else if (kprobe_is_ss(kcb)) { p = kprobe_running(); if ((unsigned long)p->ainsn.insn < regs->ip && (unsigned long)p->ainsn.insn + MAX_INSN_SIZE > regs->ip) { /* Most provably this is the second int3 for singlestep */ resume_singlestep(p, regs, kcb); kprobe_post_process(p, regs, kcb); return 1; } } /* else: not a kprobe fault; let the kernel handle it */ return 0; } NOKPROBE_SYMBOL(kprobe_int3_handler); int kprobe_fault_handler(struct pt_regs *regs, int trapnr) { struct kprobe *cur = kprobe_running(); struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); if (unlikely(regs->ip == (unsigned long)cur->ainsn.insn)) { /* This must happen on single-stepping */ WARN_ON(kcb->kprobe_status != KPROBE_HIT_SS && kcb->kprobe_status != KPROBE_REENTER); /* * We are here because the instruction being single * stepped caused a page fault. We reset the current * kprobe and the ip points back to the probe address * and allow the page fault handler to continue as a * normal page fault. */ regs->ip = (unsigned long)cur->addr; /* * If the IF flag was set before the kprobe hit, * don't touch it: */ regs->flags |= kcb->kprobe_old_flags; if (kcb->kprobe_status == KPROBE_REENTER) restore_previous_kprobe(kcb); else reset_current_kprobe(); } return 0; } NOKPROBE_SYMBOL(kprobe_fault_handler); int __init arch_populate_kprobe_blacklist(void) { return kprobe_add_area_blacklist((unsigned long)__entry_text_start, (unsigned long)__entry_text_end); } int __init arch_init_kprobes(void) { return 0; } int arch_trampoline_kprobe(struct kprobe *p) { return 0; }
linux-master
arch/x86/kernel/kprobes/core.c
// SPDX-License-Identifier: GPL-2.0 /* * Intel IO-APIC support for multi-Pentium hosts. * * Copyright (C) 1997, 1998, 1999, 2000, 2009 Ingo Molnar, Hajnalka Szabo * * Many thanks to Stig Venaas for trying out countless experimental * patches and reporting/debugging problems patiently! * * (c) 1999, Multiple IO-APIC support, developed by * Ken-ichi Yaku <[email protected]> and * Hidemi Kishimoto <[email protected]>, * further tested and cleaned up by Zach Brown <[email protected]> * and Ingo Molnar <[email protected]> * * Fixes * Maciej W. Rozycki : Bits for genuine 82489DX APICs; * thanks to Eric Gilmore * and Rolf G. Tews * for testing these extensively * Paul Diefenbaugh : Added full ACPI support * * Historical information which is worth to be preserved: * * - SiS APIC rmw bug: * * We used to have a workaround for a bug in SiS chips which * required to rewrite the index register for a read-modify-write * operation as the chip lost the index information which was * setup for the read already. We cache the data now, so that * workaround has been removed. */ #include <linux/mm.h> #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/sched.h> #include <linux/pci.h> #include <linux/mc146818rtc.h> #include <linux/compiler.h> #include <linux/acpi.h> #include <linux/export.h> #include <linux/syscore_ops.h> #include <linux/freezer.h> #include <linux/kthread.h> #include <linux/jiffies.h> /* time_after() */ #include <linux/slab.h> #include <linux/memblock.h> #include <linux/msi.h> #include <asm/irqdomain.h> #include <asm/io.h> #include <asm/smp.h> #include <asm/cpu.h> #include <asm/desc.h> #include <asm/proto.h> #include <asm/acpi.h> #include <asm/dma.h> #include <asm/timer.h> #include <asm/time.h> #include <asm/i8259.h> #include <asm/setup.h> #include <asm/irq_remapping.h> #include <asm/hw_irq.h> #include <asm/apic.h> #include <asm/pgtable.h> #include <asm/x86_init.h> #define for_each_ioapic(idx) \ for ((idx) = 0; (idx) < nr_ioapics; (idx)++) #define for_each_ioapic_reverse(idx) \ for ((idx) = nr_ioapics - 1; (idx) >= 0; (idx)--) #define for_each_pin(idx, pin) \ for ((pin) = 0; (pin) < ioapics[(idx)].nr_registers; (pin)++) #define for_each_ioapic_pin(idx, pin) \ for_each_ioapic((idx)) \ for_each_pin((idx), (pin)) #define for_each_irq_pin(entry, head) \ list_for_each_entry(entry, &head, list) static DEFINE_RAW_SPINLOCK(ioapic_lock); static DEFINE_MUTEX(ioapic_mutex); static unsigned int ioapic_dynirq_base; static int ioapic_initialized; struct irq_pin_list { struct list_head list; int apic, pin; }; struct mp_chip_data { struct list_head irq_2_pin; struct IO_APIC_route_entry entry; bool is_level; bool active_low; bool isa_irq; u32 count; }; struct mp_ioapic_gsi { u32 gsi_base; u32 gsi_end; }; static struct ioapic { /* * # of IRQ routing registers */ int nr_registers; /* * Saved state during suspend/resume, or while enabling intr-remap. */ struct IO_APIC_route_entry *saved_registers; /* I/O APIC config */ struct mpc_ioapic mp_config; /* IO APIC gsi routing info */ struct mp_ioapic_gsi gsi_config; struct ioapic_domain_cfg irqdomain_cfg; struct irq_domain *irqdomain; struct resource *iomem_res; } ioapics[MAX_IO_APICS]; #define mpc_ioapic_ver(ioapic_idx) ioapics[ioapic_idx].mp_config.apicver int mpc_ioapic_id(int ioapic_idx) { return ioapics[ioapic_idx].mp_config.apicid; } unsigned int mpc_ioapic_addr(int ioapic_idx) { return ioapics[ioapic_idx].mp_config.apicaddr; } static inline struct mp_ioapic_gsi *mp_ioapic_gsi_routing(int ioapic_idx) { return &ioapics[ioapic_idx].gsi_config; } static inline int mp_ioapic_pin_count(int ioapic) { struct mp_ioapic_gsi *gsi_cfg = mp_ioapic_gsi_routing(ioapic); return gsi_cfg->gsi_end - gsi_cfg->gsi_base + 1; } static inline u32 mp_pin_to_gsi(int ioapic, int pin) { return mp_ioapic_gsi_routing(ioapic)->gsi_base + pin; } static inline bool mp_is_legacy_irq(int irq) { return irq >= 0 && irq < nr_legacy_irqs(); } static inline struct irq_domain *mp_ioapic_irqdomain(int ioapic) { return ioapics[ioapic].irqdomain; } int nr_ioapics; /* The one past the highest gsi number used */ u32 gsi_top; /* MP IRQ source entries */ struct mpc_intsrc mp_irqs[MAX_IRQ_SOURCES]; /* # of MP IRQ source entries */ int mp_irq_entries; #ifdef CONFIG_EISA int mp_bus_id_to_type[MAX_MP_BUSSES]; #endif DECLARE_BITMAP(mp_bus_not_pci, MAX_MP_BUSSES); bool ioapic_is_disabled __ro_after_init; /** * disable_ioapic_support() - disables ioapic support at runtime */ void disable_ioapic_support(void) { #ifdef CONFIG_PCI noioapicquirk = 1; noioapicreroute = -1; #endif ioapic_is_disabled = true; } static int __init parse_noapic(char *str) { /* disable IO-APIC */ disable_ioapic_support(); return 0; } early_param("noapic", parse_noapic); /* Will be called in mpparse/ACPI codes for saving IRQ info */ void mp_save_irq(struct mpc_intsrc *m) { int i; apic_printk(APIC_VERBOSE, "Int: type %d, pol %d, trig %d, bus %02x," " IRQ %02x, APIC ID %x, APIC INT %02x\n", m->irqtype, m->irqflag & 3, (m->irqflag >> 2) & 3, m->srcbus, m->srcbusirq, m->dstapic, m->dstirq); for (i = 0; i < mp_irq_entries; i++) { if (!memcmp(&mp_irqs[i], m, sizeof(*m))) return; } memcpy(&mp_irqs[mp_irq_entries], m, sizeof(*m)); if (++mp_irq_entries == MAX_IRQ_SOURCES) panic("Max # of irq sources exceeded!!\n"); } static void alloc_ioapic_saved_registers(int idx) { size_t size; if (ioapics[idx].saved_registers) return; size = sizeof(struct IO_APIC_route_entry) * ioapics[idx].nr_registers; ioapics[idx].saved_registers = kzalloc(size, GFP_KERNEL); if (!ioapics[idx].saved_registers) pr_err("IOAPIC %d: suspend/resume impossible!\n", idx); } static void free_ioapic_saved_registers(int idx) { kfree(ioapics[idx].saved_registers); ioapics[idx].saved_registers = NULL; } int __init arch_early_ioapic_init(void) { int i; if (!nr_legacy_irqs()) io_apic_irqs = ~0UL; for_each_ioapic(i) alloc_ioapic_saved_registers(i); return 0; } struct io_apic { unsigned int index; unsigned int unused[3]; unsigned int data; unsigned int unused2[11]; unsigned int eoi; }; static __attribute_const__ struct io_apic __iomem *io_apic_base(int idx) { return (void __iomem *) __fix_to_virt(FIX_IO_APIC_BASE_0 + idx) + (mpc_ioapic_addr(idx) & ~PAGE_MASK); } static inline void io_apic_eoi(unsigned int apic, unsigned int vector) { struct io_apic __iomem *io_apic = io_apic_base(apic); writel(vector, &io_apic->eoi); } unsigned int native_io_apic_read(unsigned int apic, unsigned int reg) { struct io_apic __iomem *io_apic = io_apic_base(apic); writel(reg, &io_apic->index); return readl(&io_apic->data); } static void io_apic_write(unsigned int apic, unsigned int reg, unsigned int value) { struct io_apic __iomem *io_apic = io_apic_base(apic); writel(reg, &io_apic->index); writel(value, &io_apic->data); } static struct IO_APIC_route_entry __ioapic_read_entry(int apic, int pin) { struct IO_APIC_route_entry entry; entry.w1 = io_apic_read(apic, 0x10 + 2 * pin); entry.w2 = io_apic_read(apic, 0x11 + 2 * pin); return entry; } static struct IO_APIC_route_entry ioapic_read_entry(int apic, int pin) { struct IO_APIC_route_entry entry; unsigned long flags; raw_spin_lock_irqsave(&ioapic_lock, flags); entry = __ioapic_read_entry(apic, pin); raw_spin_unlock_irqrestore(&ioapic_lock, flags); return entry; } /* * When we write a new IO APIC routing entry, we need to write the high * word first! If the mask bit in the low word is clear, we will enable * the interrupt, and we need to make sure the entry is fully populated * before that happens. */ static void __ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e) { io_apic_write(apic, 0x11 + 2*pin, e.w2); io_apic_write(apic, 0x10 + 2*pin, e.w1); } static void ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e) { unsigned long flags; raw_spin_lock_irqsave(&ioapic_lock, flags); __ioapic_write_entry(apic, pin, e); raw_spin_unlock_irqrestore(&ioapic_lock, flags); } /* * When we mask an IO APIC routing entry, we need to write the low * word first, in order to set the mask bit before we change the * high bits! */ static void ioapic_mask_entry(int apic, int pin) { struct IO_APIC_route_entry e = { .masked = true }; unsigned long flags; raw_spin_lock_irqsave(&ioapic_lock, flags); io_apic_write(apic, 0x10 + 2*pin, e.w1); io_apic_write(apic, 0x11 + 2*pin, e.w2); raw_spin_unlock_irqrestore(&ioapic_lock, flags); } /* * The common case is 1:1 IRQ<->pin mappings. Sometimes there are * shared ISA-space IRQs, so we have to support them. We are super * fast in the common case, and fast for shared ISA-space IRQs. */ static int __add_pin_to_irq_node(struct mp_chip_data *data, int node, int apic, int pin) { struct irq_pin_list *entry; /* don't allow duplicates */ for_each_irq_pin(entry, data->irq_2_pin) if (entry->apic == apic && entry->pin == pin) return 0; entry = kzalloc_node(sizeof(struct irq_pin_list), GFP_ATOMIC, node); if (!entry) { pr_err("can not alloc irq_pin_list (%d,%d,%d)\n", node, apic, pin); return -ENOMEM; } entry->apic = apic; entry->pin = pin; list_add_tail(&entry->list, &data->irq_2_pin); return 0; } static void __remove_pin_from_irq(struct mp_chip_data *data, int apic, int pin) { struct irq_pin_list *tmp, *entry; list_for_each_entry_safe(entry, tmp, &data->irq_2_pin, list) if (entry->apic == apic && entry->pin == pin) { list_del(&entry->list); kfree(entry); return; } } static void add_pin_to_irq_node(struct mp_chip_data *data, int node, int apic, int pin) { if (__add_pin_to_irq_node(data, node, apic, pin)) panic("IO-APIC: failed to add irq-pin. Can not proceed\n"); } /* * Reroute an IRQ to a different pin. */ static void __init replace_pin_at_irq_node(struct mp_chip_data *data, int node, int oldapic, int oldpin, int newapic, int newpin) { struct irq_pin_list *entry; for_each_irq_pin(entry, data->irq_2_pin) { if (entry->apic == oldapic && entry->pin == oldpin) { entry->apic = newapic; entry->pin = newpin; /* every one is different, right? */ return; } } /* old apic/pin didn't exist, so just add new ones */ add_pin_to_irq_node(data, node, newapic, newpin); } static void io_apic_modify_irq(struct mp_chip_data *data, bool masked, void (*final)(struct irq_pin_list *entry)) { struct irq_pin_list *entry; data->entry.masked = masked; for_each_irq_pin(entry, data->irq_2_pin) { io_apic_write(entry->apic, 0x10 + 2 * entry->pin, data->entry.w1); if (final) final(entry); } } static void io_apic_sync(struct irq_pin_list *entry) { /* * Synchronize the IO-APIC and the CPU by doing * a dummy read from the IO-APIC */ struct io_apic __iomem *io_apic; io_apic = io_apic_base(entry->apic); readl(&io_apic->data); } static void mask_ioapic_irq(struct irq_data *irq_data) { struct mp_chip_data *data = irq_data->chip_data; unsigned long flags; raw_spin_lock_irqsave(&ioapic_lock, flags); io_apic_modify_irq(data, true, &io_apic_sync); raw_spin_unlock_irqrestore(&ioapic_lock, flags); } static void __unmask_ioapic(struct mp_chip_data *data) { io_apic_modify_irq(data, false, NULL); } static void unmask_ioapic_irq(struct irq_data *irq_data) { struct mp_chip_data *data = irq_data->chip_data; unsigned long flags; raw_spin_lock_irqsave(&ioapic_lock, flags); __unmask_ioapic(data); raw_spin_unlock_irqrestore(&ioapic_lock, flags); } /* * IO-APIC versions below 0x20 don't support EOI register. * For the record, here is the information about various versions: * 0Xh 82489DX * 1Xh I/OAPIC or I/O(x)APIC which are not PCI 2.2 Compliant * 2Xh I/O(x)APIC which is PCI 2.2 Compliant * 30h-FFh Reserved * * Some of the Intel ICH Specs (ICH2 to ICH5) documents the io-apic * version as 0x2. This is an error with documentation and these ICH chips * use io-apic's of version 0x20. * * For IO-APIC's with EOI register, we use that to do an explicit EOI. * Otherwise, we simulate the EOI message manually by changing the trigger * mode to edge and then back to level, with RTE being masked during this. */ static void __eoi_ioapic_pin(int apic, int pin, int vector) { if (mpc_ioapic_ver(apic) >= 0x20) { io_apic_eoi(apic, vector); } else { struct IO_APIC_route_entry entry, entry1; entry = entry1 = __ioapic_read_entry(apic, pin); /* * Mask the entry and change the trigger mode to edge. */ entry1.masked = true; entry1.is_level = false; __ioapic_write_entry(apic, pin, entry1); /* * Restore the previous level triggered entry. */ __ioapic_write_entry(apic, pin, entry); } } static void eoi_ioapic_pin(int vector, struct mp_chip_data *data) { unsigned long flags; struct irq_pin_list *entry; raw_spin_lock_irqsave(&ioapic_lock, flags); for_each_irq_pin(entry, data->irq_2_pin) __eoi_ioapic_pin(entry->apic, entry->pin, vector); raw_spin_unlock_irqrestore(&ioapic_lock, flags); } static void clear_IO_APIC_pin(unsigned int apic, unsigned int pin) { struct IO_APIC_route_entry entry; /* Check delivery_mode to be sure we're not clearing an SMI pin */ entry = ioapic_read_entry(apic, pin); if (entry.delivery_mode == APIC_DELIVERY_MODE_SMI) return; /* * Make sure the entry is masked and re-read the contents to check * if it is a level triggered pin and if the remote-IRR is set. */ if (!entry.masked) { entry.masked = true; ioapic_write_entry(apic, pin, entry); entry = ioapic_read_entry(apic, pin); } if (entry.irr) { unsigned long flags; /* * Make sure the trigger mode is set to level. Explicit EOI * doesn't clear the remote-IRR if the trigger mode is not * set to level. */ if (!entry.is_level) { entry.is_level = true; ioapic_write_entry(apic, pin, entry); } raw_spin_lock_irqsave(&ioapic_lock, flags); __eoi_ioapic_pin(apic, pin, entry.vector); raw_spin_unlock_irqrestore(&ioapic_lock, flags); } /* * Clear the rest of the bits in the IO-APIC RTE except for the mask * bit. */ ioapic_mask_entry(apic, pin); entry = ioapic_read_entry(apic, pin); if (entry.irr) pr_err("Unable to reset IRR for apic: %d, pin :%d\n", mpc_ioapic_id(apic), pin); } void clear_IO_APIC (void) { int apic, pin; for_each_ioapic_pin(apic, pin) clear_IO_APIC_pin(apic, pin); } #ifdef CONFIG_X86_32 /* * support for broken MP BIOSs, enables hand-redirection of PIRQ0-7 to * specific CPU-side IRQs. */ #define MAX_PIRQS 8 static int pirq_entries[MAX_PIRQS] = { [0 ... MAX_PIRQS - 1] = -1 }; static int __init ioapic_pirq_setup(char *str) { int i, max; int ints[MAX_PIRQS+1]; get_options(str, ARRAY_SIZE(ints), ints); apic_printk(APIC_VERBOSE, KERN_INFO "PIRQ redirection, working around broken MP-BIOS.\n"); max = MAX_PIRQS; if (ints[0] < MAX_PIRQS) max = ints[0]; for (i = 0; i < max; i++) { apic_printk(APIC_VERBOSE, KERN_DEBUG "... PIRQ%d -> IRQ %d\n", i, ints[i+1]); /* * PIRQs are mapped upside down, usually. */ pirq_entries[MAX_PIRQS-i-1] = ints[i+1]; } return 1; } __setup("pirq=", ioapic_pirq_setup); #endif /* CONFIG_X86_32 */ /* * Saves all the IO-APIC RTE's */ int save_ioapic_entries(void) { int apic, pin; int err = 0; for_each_ioapic(apic) { if (!ioapics[apic].saved_registers) { err = -ENOMEM; continue; } for_each_pin(apic, pin) ioapics[apic].saved_registers[pin] = ioapic_read_entry(apic, pin); } return err; } /* * Mask all IO APIC entries. */ void mask_ioapic_entries(void) { int apic, pin; for_each_ioapic(apic) { if (!ioapics[apic].saved_registers) continue; for_each_pin(apic, pin) { struct IO_APIC_route_entry entry; entry = ioapics[apic].saved_registers[pin]; if (!entry.masked) { entry.masked = true; ioapic_write_entry(apic, pin, entry); } } } } /* * Restore IO APIC entries which was saved in the ioapic structure. */ int restore_ioapic_entries(void) { int apic, pin; for_each_ioapic(apic) { if (!ioapics[apic].saved_registers) continue; for_each_pin(apic, pin) ioapic_write_entry(apic, pin, ioapics[apic].saved_registers[pin]); } return 0; } /* * Find the IRQ entry number of a certain pin. */ static int find_irq_entry(int ioapic_idx, int pin, int type) { int i; for (i = 0; i < mp_irq_entries; i++) if (mp_irqs[i].irqtype == type && (mp_irqs[i].dstapic == mpc_ioapic_id(ioapic_idx) || mp_irqs[i].dstapic == MP_APIC_ALL) && mp_irqs[i].dstirq == pin) return i; return -1; } /* * Find the pin to which IRQ[irq] (ISA) is connected */ static int __init find_isa_irq_pin(int irq, int type) { int i; for (i = 0; i < mp_irq_entries; i++) { int lbus = mp_irqs[i].srcbus; if (test_bit(lbus, mp_bus_not_pci) && (mp_irqs[i].irqtype == type) && (mp_irqs[i].srcbusirq == irq)) return mp_irqs[i].dstirq; } return -1; } static int __init find_isa_irq_apic(int irq, int type) { int i; for (i = 0; i < mp_irq_entries; i++) { int lbus = mp_irqs[i].srcbus; if (test_bit(lbus, mp_bus_not_pci) && (mp_irqs[i].irqtype == type) && (mp_irqs[i].srcbusirq == irq)) break; } if (i < mp_irq_entries) { int ioapic_idx; for_each_ioapic(ioapic_idx) if (mpc_ioapic_id(ioapic_idx) == mp_irqs[i].dstapic) return ioapic_idx; } return -1; } static bool irq_active_low(int idx) { int bus = mp_irqs[idx].srcbus; /* * Determine IRQ line polarity (high active or low active): */ switch (mp_irqs[idx].irqflag & MP_IRQPOL_MASK) { case MP_IRQPOL_DEFAULT: /* * Conforms to spec, ie. bus-type dependent polarity. PCI * defaults to low active. [E]ISA defaults to high active. */ return !test_bit(bus, mp_bus_not_pci); case MP_IRQPOL_ACTIVE_HIGH: return false; case MP_IRQPOL_RESERVED: pr_warn("IOAPIC: Invalid polarity: 2, defaulting to low\n"); fallthrough; case MP_IRQPOL_ACTIVE_LOW: default: /* Pointless default required due to do gcc stupidity */ return true; } } #ifdef CONFIG_EISA /* * EISA Edge/Level control register, ELCR */ static bool EISA_ELCR(unsigned int irq) { if (irq < nr_legacy_irqs()) { unsigned int port = PIC_ELCR1 + (irq >> 3); return (inb(port) >> (irq & 7)) & 1; } apic_printk(APIC_VERBOSE, KERN_INFO "Broken MPtable reports ISA irq %d\n", irq); return false; } /* * EISA interrupts are always active high and can be edge or level * triggered depending on the ELCR value. If an interrupt is listed as * EISA conforming in the MP table, that means its trigger type must be * read in from the ELCR. */ static bool eisa_irq_is_level(int idx, int bus, bool level) { switch (mp_bus_id_to_type[bus]) { case MP_BUS_PCI: case MP_BUS_ISA: return level; case MP_BUS_EISA: return EISA_ELCR(mp_irqs[idx].srcbusirq); } pr_warn("IOAPIC: Invalid srcbus: %d defaulting to level\n", bus); return true; } #else static inline int eisa_irq_is_level(int idx, int bus, bool level) { return level; } #endif static bool irq_is_level(int idx) { int bus = mp_irqs[idx].srcbus; bool level; /* * Determine IRQ trigger mode (edge or level sensitive): */ switch (mp_irqs[idx].irqflag & MP_IRQTRIG_MASK) { case MP_IRQTRIG_DEFAULT: /* * Conforms to spec, ie. bus-type dependent trigger * mode. PCI defaults to level, ISA to edge. */ level = !test_bit(bus, mp_bus_not_pci); /* Take EISA into account */ return eisa_irq_is_level(idx, bus, level); case MP_IRQTRIG_EDGE: return false; case MP_IRQTRIG_RESERVED: pr_warn("IOAPIC: Invalid trigger mode 2 defaulting to level\n"); fallthrough; case MP_IRQTRIG_LEVEL: default: /* Pointless default required due to do gcc stupidity */ return true; } } static int __acpi_get_override_irq(u32 gsi, bool *trigger, bool *polarity) { int ioapic, pin, idx; if (ioapic_is_disabled) return -1; ioapic = mp_find_ioapic(gsi); if (ioapic < 0) return -1; pin = mp_find_ioapic_pin(ioapic, gsi); if (pin < 0) return -1; idx = find_irq_entry(ioapic, pin, mp_INT); if (idx < 0) return -1; *trigger = irq_is_level(idx); *polarity = irq_active_low(idx); return 0; } #ifdef CONFIG_ACPI int acpi_get_override_irq(u32 gsi, int *is_level, int *active_low) { *is_level = *active_low = 0; return __acpi_get_override_irq(gsi, (bool *)is_level, (bool *)active_low); } #endif void ioapic_set_alloc_attr(struct irq_alloc_info *info, int node, int trigger, int polarity) { init_irq_alloc_info(info, NULL); info->type = X86_IRQ_ALLOC_TYPE_IOAPIC; info->ioapic.node = node; info->ioapic.is_level = trigger; info->ioapic.active_low = polarity; info->ioapic.valid = 1; } static void ioapic_copy_alloc_attr(struct irq_alloc_info *dst, struct irq_alloc_info *src, u32 gsi, int ioapic_idx, int pin) { bool level, pol_low; copy_irq_alloc_info(dst, src); dst->type = X86_IRQ_ALLOC_TYPE_IOAPIC; dst->devid = mpc_ioapic_id(ioapic_idx); dst->ioapic.pin = pin; dst->ioapic.valid = 1; if (src && src->ioapic.valid) { dst->ioapic.node = src->ioapic.node; dst->ioapic.is_level = src->ioapic.is_level; dst->ioapic.active_low = src->ioapic.active_low; } else { dst->ioapic.node = NUMA_NO_NODE; if (__acpi_get_override_irq(gsi, &level, &pol_low) >= 0) { dst->ioapic.is_level = level; dst->ioapic.active_low = pol_low; } else { /* * PCI interrupts are always active low level * triggered. */ dst->ioapic.is_level = true; dst->ioapic.active_low = true; } } } static int ioapic_alloc_attr_node(struct irq_alloc_info *info) { return (info && info->ioapic.valid) ? info->ioapic.node : NUMA_NO_NODE; } static void mp_register_handler(unsigned int irq, bool level) { irq_flow_handler_t hdl; bool fasteoi; if (level) { irq_set_status_flags(irq, IRQ_LEVEL); fasteoi = true; } else { irq_clear_status_flags(irq, IRQ_LEVEL); fasteoi = false; } hdl = fasteoi ? handle_fasteoi_irq : handle_edge_irq; __irq_set_handler(irq, hdl, 0, fasteoi ? "fasteoi" : "edge"); } static bool mp_check_pin_attr(int irq, struct irq_alloc_info *info) { struct mp_chip_data *data = irq_get_chip_data(irq); /* * setup_IO_APIC_irqs() programs all legacy IRQs with default trigger * and polarity attributes. So allow the first user to reprogram the * pin with real trigger and polarity attributes. */ if (irq < nr_legacy_irqs() && data->count == 1) { if (info->ioapic.is_level != data->is_level) mp_register_handler(irq, info->ioapic.is_level); data->entry.is_level = data->is_level = info->ioapic.is_level; data->entry.active_low = data->active_low = info->ioapic.active_low; } return data->is_level == info->ioapic.is_level && data->active_low == info->ioapic.active_low; } static int alloc_irq_from_domain(struct irq_domain *domain, int ioapic, u32 gsi, struct irq_alloc_info *info) { bool legacy = false; int irq = -1; int type = ioapics[ioapic].irqdomain_cfg.type; switch (type) { case IOAPIC_DOMAIN_LEGACY: /* * Dynamically allocate IRQ number for non-ISA IRQs in the first * 16 GSIs on some weird platforms. */ if (!ioapic_initialized || gsi >= nr_legacy_irqs()) irq = gsi; legacy = mp_is_legacy_irq(irq); break; case IOAPIC_DOMAIN_STRICT: irq = gsi; break; case IOAPIC_DOMAIN_DYNAMIC: break; default: WARN(1, "ioapic: unknown irqdomain type %d\n", type); return -1; } return __irq_domain_alloc_irqs(domain, irq, 1, ioapic_alloc_attr_node(info), info, legacy, NULL); } /* * Need special handling for ISA IRQs because there may be multiple IOAPIC pins * sharing the same ISA IRQ number and irqdomain only supports 1:1 mapping * between IOAPIC pin and IRQ number. A typical IOAPIC has 24 pins, pin 0-15 are * used for legacy IRQs and pin 16-23 are used for PCI IRQs (PIRQ A-H). * When ACPI is disabled, only legacy IRQ numbers (IRQ0-15) are available, and * some BIOSes may use MP Interrupt Source records to override IRQ numbers for * PIRQs instead of reprogramming the interrupt routing logic. Thus there may be * multiple pins sharing the same legacy IRQ number when ACPI is disabled. */ static int alloc_isa_irq_from_domain(struct irq_domain *domain, int irq, int ioapic, int pin, struct irq_alloc_info *info) { struct mp_chip_data *data; struct irq_data *irq_data = irq_get_irq_data(irq); int node = ioapic_alloc_attr_node(info); /* * Legacy ISA IRQ has already been allocated, just add pin to * the pin list associated with this IRQ and program the IOAPIC * entry. The IOAPIC entry */ if (irq_data && irq_data->parent_data) { if (!mp_check_pin_attr(irq, info)) return -EBUSY; if (__add_pin_to_irq_node(irq_data->chip_data, node, ioapic, info->ioapic.pin)) return -ENOMEM; } else { info->flags |= X86_IRQ_ALLOC_LEGACY; irq = __irq_domain_alloc_irqs(domain, irq, 1, node, info, true, NULL); if (irq >= 0) { irq_data = irq_domain_get_irq_data(domain, irq); data = irq_data->chip_data; data->isa_irq = true; } } return irq; } static int mp_map_pin_to_irq(u32 gsi, int idx, int ioapic, int pin, unsigned int flags, struct irq_alloc_info *info) { int irq; bool legacy = false; struct irq_alloc_info tmp; struct mp_chip_data *data; struct irq_domain *domain = mp_ioapic_irqdomain(ioapic); if (!domain) return -ENOSYS; if (idx >= 0 && test_bit(mp_irqs[idx].srcbus, mp_bus_not_pci)) { irq = mp_irqs[idx].srcbusirq; legacy = mp_is_legacy_irq(irq); /* * IRQ2 is unusable for historical reasons on systems which * have a legacy PIC. See the comment vs. IRQ2 further down. * * If this gets removed at some point then the related code * in lapic_assign_system_vectors() needs to be adjusted as * well. */ if (legacy && irq == PIC_CASCADE_IR) return -EINVAL; } mutex_lock(&ioapic_mutex); if (!(flags & IOAPIC_MAP_ALLOC)) { if (!legacy) { irq = irq_find_mapping(domain, pin); if (irq == 0) irq = -ENOENT; } } else { ioapic_copy_alloc_attr(&tmp, info, gsi, ioapic, pin); if (legacy) irq = alloc_isa_irq_from_domain(domain, irq, ioapic, pin, &tmp); else if ((irq = irq_find_mapping(domain, pin)) == 0) irq = alloc_irq_from_domain(domain, ioapic, gsi, &tmp); else if (!mp_check_pin_attr(irq, &tmp)) irq = -EBUSY; if (irq >= 0) { data = irq_get_chip_data(irq); data->count++; } } mutex_unlock(&ioapic_mutex); return irq; } static int pin_2_irq(int idx, int ioapic, int pin, unsigned int flags) { u32 gsi = mp_pin_to_gsi(ioapic, pin); /* * Debugging check, we are in big trouble if this message pops up! */ if (mp_irqs[idx].dstirq != pin) pr_err("broken BIOS or MPTABLE parser, ayiee!!\n"); #ifdef CONFIG_X86_32 /* * PCI IRQ command line redirection. Yes, limits are hardcoded. */ if ((pin >= 16) && (pin <= 23)) { if (pirq_entries[pin-16] != -1) { if (!pirq_entries[pin-16]) { apic_printk(APIC_VERBOSE, KERN_DEBUG "disabling PIRQ%d\n", pin-16); } else { int irq = pirq_entries[pin-16]; apic_printk(APIC_VERBOSE, KERN_DEBUG "using PIRQ%d -> IRQ %d\n", pin-16, irq); return irq; } } } #endif return mp_map_pin_to_irq(gsi, idx, ioapic, pin, flags, NULL); } int mp_map_gsi_to_irq(u32 gsi, unsigned int flags, struct irq_alloc_info *info) { int ioapic, pin, idx; ioapic = mp_find_ioapic(gsi); if (ioapic < 0) return -ENODEV; pin = mp_find_ioapic_pin(ioapic, gsi); idx = find_irq_entry(ioapic, pin, mp_INT); if ((flags & IOAPIC_MAP_CHECK) && idx < 0) return -ENODEV; return mp_map_pin_to_irq(gsi, idx, ioapic, pin, flags, info); } void mp_unmap_irq(int irq) { struct irq_data *irq_data = irq_get_irq_data(irq); struct mp_chip_data *data; if (!irq_data || !irq_data->domain) return; data = irq_data->chip_data; if (!data || data->isa_irq) return; mutex_lock(&ioapic_mutex); if (--data->count == 0) irq_domain_free_irqs(irq, 1); mutex_unlock(&ioapic_mutex); } /* * Find a specific PCI IRQ entry. * Not an __init, possibly needed by modules */ int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin) { int irq, i, best_ioapic = -1, best_idx = -1; apic_printk(APIC_DEBUG, "querying PCI -> IRQ mapping bus:%d, slot:%d, pin:%d.\n", bus, slot, pin); if (test_bit(bus, mp_bus_not_pci)) { apic_printk(APIC_VERBOSE, "PCI BIOS passed nonexistent PCI bus %d!\n", bus); return -1; } for (i = 0; i < mp_irq_entries; i++) { int lbus = mp_irqs[i].srcbus; int ioapic_idx, found = 0; if (bus != lbus || mp_irqs[i].irqtype != mp_INT || slot != ((mp_irqs[i].srcbusirq >> 2) & 0x1f)) continue; for_each_ioapic(ioapic_idx) if (mpc_ioapic_id(ioapic_idx) == mp_irqs[i].dstapic || mp_irqs[i].dstapic == MP_APIC_ALL) { found = 1; break; } if (!found) continue; /* Skip ISA IRQs */ irq = pin_2_irq(i, ioapic_idx, mp_irqs[i].dstirq, 0); if (irq > 0 && !IO_APIC_IRQ(irq)) continue; if (pin == (mp_irqs[i].srcbusirq & 3)) { best_idx = i; best_ioapic = ioapic_idx; goto out; } /* * Use the first all-but-pin matching entry as a * best-guess fuzzy result for broken mptables. */ if (best_idx < 0) { best_idx = i; best_ioapic = ioapic_idx; } } if (best_idx < 0) return -1; out: return pin_2_irq(best_idx, best_ioapic, mp_irqs[best_idx].dstirq, IOAPIC_MAP_ALLOC); } EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector); static struct irq_chip ioapic_chip, ioapic_ir_chip; static void __init setup_IO_APIC_irqs(void) { unsigned int ioapic, pin; int idx; apic_printk(APIC_VERBOSE, KERN_DEBUG "init IO_APIC IRQs\n"); for_each_ioapic_pin(ioapic, pin) { idx = find_irq_entry(ioapic, pin, mp_INT); if (idx < 0) apic_printk(APIC_VERBOSE, KERN_DEBUG " apic %d pin %d not connected\n", mpc_ioapic_id(ioapic), pin); else pin_2_irq(idx, ioapic, pin, ioapic ? 0 : IOAPIC_MAP_ALLOC); } } void ioapic_zap_locks(void) { raw_spin_lock_init(&ioapic_lock); } static void io_apic_print_entries(unsigned int apic, unsigned int nr_entries) { struct IO_APIC_route_entry entry; char buf[256]; int i; printk(KERN_DEBUG "IOAPIC %d:\n", apic); for (i = 0; i <= nr_entries; i++) { entry = ioapic_read_entry(apic, i); snprintf(buf, sizeof(buf), " pin%02x, %s, %s, %s, V(%02X), IRR(%1d), S(%1d)", i, entry.masked ? "disabled" : "enabled ", entry.is_level ? "level" : "edge ", entry.active_low ? "low " : "high", entry.vector, entry.irr, entry.delivery_status); if (entry.ir_format) { printk(KERN_DEBUG "%s, remapped, I(%04X), Z(%X)\n", buf, (entry.ir_index_15 << 15) | entry.ir_index_0_14, entry.ir_zero); } else { printk(KERN_DEBUG "%s, %s, D(%02X%02X), M(%1d)\n", buf, entry.dest_mode_logical ? "logical " : "physical", entry.virt_destid_8_14, entry.destid_0_7, entry.delivery_mode); } } } static void __init print_IO_APIC(int ioapic_idx) { union IO_APIC_reg_00 reg_00; union IO_APIC_reg_01 reg_01; union IO_APIC_reg_02 reg_02; union IO_APIC_reg_03 reg_03; unsigned long flags; raw_spin_lock_irqsave(&ioapic_lock, flags); reg_00.raw = io_apic_read(ioapic_idx, 0); reg_01.raw = io_apic_read(ioapic_idx, 1); if (reg_01.bits.version >= 0x10) reg_02.raw = io_apic_read(ioapic_idx, 2); if (reg_01.bits.version >= 0x20) reg_03.raw = io_apic_read(ioapic_idx, 3); raw_spin_unlock_irqrestore(&ioapic_lock, flags); printk(KERN_DEBUG "IO APIC #%d......\n", mpc_ioapic_id(ioapic_idx)); printk(KERN_DEBUG ".... register #00: %08X\n", reg_00.raw); printk(KERN_DEBUG "....... : physical APIC id: %02X\n", reg_00.bits.ID); printk(KERN_DEBUG "....... : Delivery Type: %X\n", reg_00.bits.delivery_type); printk(KERN_DEBUG "....... : LTS : %X\n", reg_00.bits.LTS); printk(KERN_DEBUG ".... register #01: %08X\n", *(int *)&reg_01); printk(KERN_DEBUG "....... : max redirection entries: %02X\n", reg_01.bits.entries); printk(KERN_DEBUG "....... : PRQ implemented: %X\n", reg_01.bits.PRQ); printk(KERN_DEBUG "....... : IO APIC version: %02X\n", reg_01.bits.version); /* * Some Intel chipsets with IO APIC VERSION of 0x1? don't have reg_02, * but the value of reg_02 is read as the previous read register * value, so ignore it if reg_02 == reg_01. */ if (reg_01.bits.version >= 0x10 && reg_02.raw != reg_01.raw) { printk(KERN_DEBUG ".... register #02: %08X\n", reg_02.raw); printk(KERN_DEBUG "....... : arbitration: %02X\n", reg_02.bits.arbitration); } /* * Some Intel chipsets with IO APIC VERSION of 0x2? don't have reg_02 * or reg_03, but the value of reg_0[23] is read as the previous read * register value, so ignore it if reg_03 == reg_0[12]. */ if (reg_01.bits.version >= 0x20 && reg_03.raw != reg_02.raw && reg_03.raw != reg_01.raw) { printk(KERN_DEBUG ".... register #03: %08X\n", reg_03.raw); printk(KERN_DEBUG "....... : Boot DT : %X\n", reg_03.bits.boot_DT); } printk(KERN_DEBUG ".... IRQ redirection table:\n"); io_apic_print_entries(ioapic_idx, reg_01.bits.entries); } void __init print_IO_APICs(void) { int ioapic_idx; unsigned int irq; printk(KERN_DEBUG "number of MP IRQ sources: %d.\n", mp_irq_entries); for_each_ioapic(ioapic_idx) printk(KERN_DEBUG "number of IO-APIC #%d registers: %d.\n", mpc_ioapic_id(ioapic_idx), ioapics[ioapic_idx].nr_registers); /* * We are a bit conservative about what we expect. We have to * know about every hardware change ASAP. */ printk(KERN_INFO "testing the IO APIC.......................\n"); for_each_ioapic(ioapic_idx) print_IO_APIC(ioapic_idx); printk(KERN_DEBUG "IRQ to pin mappings:\n"); for_each_active_irq(irq) { struct irq_pin_list *entry; struct irq_chip *chip; struct mp_chip_data *data; chip = irq_get_chip(irq); if (chip != &ioapic_chip && chip != &ioapic_ir_chip) continue; data = irq_get_chip_data(irq); if (!data) continue; if (list_empty(&data->irq_2_pin)) continue; printk(KERN_DEBUG "IRQ%d ", irq); for_each_irq_pin(entry, data->irq_2_pin) pr_cont("-> %d:%d", entry->apic, entry->pin); pr_cont("\n"); } printk(KERN_INFO ".................................... done.\n"); } /* Where if anywhere is the i8259 connect in external int mode */ static struct { int pin, apic; } ioapic_i8259 = { -1, -1 }; void __init enable_IO_APIC(void) { int i8259_apic, i8259_pin; int apic, pin; if (ioapic_is_disabled) nr_ioapics = 0; if (!nr_legacy_irqs() || !nr_ioapics) return; for_each_ioapic_pin(apic, pin) { /* See if any of the pins is in ExtINT mode */ struct IO_APIC_route_entry entry = ioapic_read_entry(apic, pin); /* If the interrupt line is enabled and in ExtInt mode * I have found the pin where the i8259 is connected. */ if (!entry.masked && entry.delivery_mode == APIC_DELIVERY_MODE_EXTINT) { ioapic_i8259.apic = apic; ioapic_i8259.pin = pin; goto found_i8259; } } found_i8259: /* Look to see what if the MP table has reported the ExtINT */ /* If we could not find the appropriate pin by looking at the ioapic * the i8259 probably is not connected the ioapic but give the * mptable a chance anyway. */ i8259_pin = find_isa_irq_pin(0, mp_ExtINT); i8259_apic = find_isa_irq_apic(0, mp_ExtINT); /* Trust the MP table if nothing is setup in the hardware */ if ((ioapic_i8259.pin == -1) && (i8259_pin >= 0)) { printk(KERN_WARNING "ExtINT not setup in hardware but reported by MP table\n"); ioapic_i8259.pin = i8259_pin; ioapic_i8259.apic = i8259_apic; } /* Complain if the MP table and the hardware disagree */ if (((ioapic_i8259.apic != i8259_apic) || (ioapic_i8259.pin != i8259_pin)) && (i8259_pin >= 0) && (ioapic_i8259.pin >= 0)) { printk(KERN_WARNING "ExtINT in hardware and MP table differ\n"); } /* * Do not trust the IO-APIC being empty at bootup */ clear_IO_APIC(); } void native_restore_boot_irq_mode(void) { /* * If the i8259 is routed through an IOAPIC * Put that IOAPIC in virtual wire mode * so legacy interrupts can be delivered. */ if (ioapic_i8259.pin != -1) { struct IO_APIC_route_entry entry; u32 apic_id = read_apic_id(); memset(&entry, 0, sizeof(entry)); entry.masked = false; entry.is_level = false; entry.active_low = false; entry.dest_mode_logical = false; entry.delivery_mode = APIC_DELIVERY_MODE_EXTINT; entry.destid_0_7 = apic_id & 0xFF; entry.virt_destid_8_14 = apic_id >> 8; /* * Add it to the IO-APIC irq-routing table: */ ioapic_write_entry(ioapic_i8259.apic, ioapic_i8259.pin, entry); } if (boot_cpu_has(X86_FEATURE_APIC) || apic_from_smp_config()) disconnect_bsp_APIC(ioapic_i8259.pin != -1); } void restore_boot_irq_mode(void) { if (!nr_legacy_irqs()) return; x86_apic_ops.restore(); } #ifdef CONFIG_X86_32 /* * function to set the IO-APIC physical IDs based on the * values stored in the MPC table. * * by Matt Domsch <[email protected]> Tue Dec 21 12:25:05 CST 1999 */ void __init setup_ioapic_ids_from_mpc_nocheck(void) { union IO_APIC_reg_00 reg_00; physid_mask_t phys_id_present_map; int ioapic_idx; int i; unsigned char old_id; unsigned long flags; /* * This is broken; anything with a real cpu count has to * circumvent this idiocy regardless. */ apic->ioapic_phys_id_map(&phys_cpu_present_map, &phys_id_present_map); /* * Set the IOAPIC ID to the value stored in the MPC table. */ for_each_ioapic(ioapic_idx) { /* Read the register 0 value */ raw_spin_lock_irqsave(&ioapic_lock, flags); reg_00.raw = io_apic_read(ioapic_idx, 0); raw_spin_unlock_irqrestore(&ioapic_lock, flags); old_id = mpc_ioapic_id(ioapic_idx); if (mpc_ioapic_id(ioapic_idx) >= get_physical_broadcast()) { printk(KERN_ERR "BIOS bug, IO-APIC#%d ID is %d in the MPC table!...\n", ioapic_idx, mpc_ioapic_id(ioapic_idx)); printk(KERN_ERR "... fixing up to %d. (tell your hw vendor)\n", reg_00.bits.ID); ioapics[ioapic_idx].mp_config.apicid = reg_00.bits.ID; } /* * Sanity check, is the ID really free? Every APIC in a * system must have a unique ID or we get lots of nice * 'stuck on smp_invalidate_needed IPI wait' messages. */ if (apic->check_apicid_used(&phys_id_present_map, mpc_ioapic_id(ioapic_idx))) { printk(KERN_ERR "BIOS bug, IO-APIC#%d ID %d is already used!...\n", ioapic_idx, mpc_ioapic_id(ioapic_idx)); for (i = 0; i < get_physical_broadcast(); i++) if (!physid_isset(i, phys_id_present_map)) break; if (i >= get_physical_broadcast()) panic("Max APIC ID exceeded!\n"); printk(KERN_ERR "... fixing up to %d. (tell your hw vendor)\n", i); physid_set(i, phys_id_present_map); ioapics[ioapic_idx].mp_config.apicid = i; } else { apic_printk(APIC_VERBOSE, "Setting %d in the phys_id_present_map\n", mpc_ioapic_id(ioapic_idx)); physid_set(mpc_ioapic_id(ioapic_idx), phys_id_present_map); } /* * We need to adjust the IRQ routing table * if the ID changed. */ if (old_id != mpc_ioapic_id(ioapic_idx)) for (i = 0; i < mp_irq_entries; i++) if (mp_irqs[i].dstapic == old_id) mp_irqs[i].dstapic = mpc_ioapic_id(ioapic_idx); /* * Update the ID register according to the right value * from the MPC table if they are different. */ if (mpc_ioapic_id(ioapic_idx) == reg_00.bits.ID) continue; apic_printk(APIC_VERBOSE, KERN_INFO "...changing IO-APIC physical APIC ID to %d ...", mpc_ioapic_id(ioapic_idx)); reg_00.bits.ID = mpc_ioapic_id(ioapic_idx); raw_spin_lock_irqsave(&ioapic_lock, flags); io_apic_write(ioapic_idx, 0, reg_00.raw); raw_spin_unlock_irqrestore(&ioapic_lock, flags); /* * Sanity check */ raw_spin_lock_irqsave(&ioapic_lock, flags); reg_00.raw = io_apic_read(ioapic_idx, 0); raw_spin_unlock_irqrestore(&ioapic_lock, flags); if (reg_00.bits.ID != mpc_ioapic_id(ioapic_idx)) pr_cont("could not set ID!\n"); else apic_printk(APIC_VERBOSE, " ok.\n"); } } void __init setup_ioapic_ids_from_mpc(void) { if (acpi_ioapic) return; /* * Don't check I/O APIC IDs for xAPIC systems. They have * no meaning without the serial APIC bus. */ if (!(boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) || APIC_XAPIC(boot_cpu_apic_version)) return; setup_ioapic_ids_from_mpc_nocheck(); } #endif int no_timer_check __initdata; static int __init notimercheck(char *s) { no_timer_check = 1; return 1; } __setup("no_timer_check", notimercheck); static void __init delay_with_tsc(void) { unsigned long long start, now; unsigned long end = jiffies + 4; start = rdtsc(); /* * We don't know the TSC frequency yet, but waiting for * 40000000000/HZ TSC cycles is safe: * 4 GHz == 10 jiffies * 1 GHz == 40 jiffies */ do { rep_nop(); now = rdtsc(); } while ((now - start) < 40000000000ULL / HZ && time_before_eq(jiffies, end)); } static void __init delay_without_tsc(void) { unsigned long end = jiffies + 4; int band = 1; /* * We don't know any frequency yet, but waiting for * 40940000000/HZ cycles is safe: * 4 GHz == 10 jiffies * 1 GHz == 40 jiffies * 1 << 1 + 1 << 2 +...+ 1 << 11 = 4094 */ do { __delay(((1U << band++) * 10000000UL) / HZ); } while (band < 12 && time_before_eq(jiffies, end)); } /* * There is a nasty bug in some older SMP boards, their mptable lies * about the timer IRQ. We do the following to work around the situation: * * - timer IRQ defaults to IO-APIC IRQ * - if this function detects that timer IRQs are defunct, then we fall * back to ISA timer IRQs */ static int __init timer_irq_works(void) { unsigned long t1 = jiffies; if (no_timer_check) return 1; local_irq_enable(); if (boot_cpu_has(X86_FEATURE_TSC)) delay_with_tsc(); else delay_without_tsc(); /* * Expect a few ticks at least, to be sure some possible * glue logic does not lock up after one or two first * ticks in a non-ExtINT mode. Also the local APIC * might have cached one ExtINT interrupt. Finally, at * least one tick may be lost due to delays. */ local_irq_disable(); /* Did jiffies advance? */ return time_after(jiffies, t1 + 4); } /* * In the SMP+IOAPIC case it might happen that there are an unspecified * number of pending IRQ events unhandled. These cases are very rare, * so we 'resend' these IRQs via IPIs, to the same CPU. It's much * better to do it this way as thus we do not have to be aware of * 'pending' interrupts in the IRQ path, except at this point. */ /* * Edge triggered needs to resend any interrupt * that was delayed but this is now handled in the device * independent code. */ /* * Starting up a edge-triggered IO-APIC interrupt is * nasty - we need to make sure that we get the edge. * If it is already asserted for some reason, we need * return 1 to indicate that is was pending. * * This is not complete - we should be able to fake * an edge even if it isn't on the 8259A... */ static unsigned int startup_ioapic_irq(struct irq_data *data) { int was_pending = 0, irq = data->irq; unsigned long flags; raw_spin_lock_irqsave(&ioapic_lock, flags); if (irq < nr_legacy_irqs()) { legacy_pic->mask(irq); if (legacy_pic->irq_pending(irq)) was_pending = 1; } __unmask_ioapic(data->chip_data); raw_spin_unlock_irqrestore(&ioapic_lock, flags); return was_pending; } atomic_t irq_mis_count; #ifdef CONFIG_GENERIC_PENDING_IRQ static bool io_apic_level_ack_pending(struct mp_chip_data *data) { struct irq_pin_list *entry; unsigned long flags; raw_spin_lock_irqsave(&ioapic_lock, flags); for_each_irq_pin(entry, data->irq_2_pin) { struct IO_APIC_route_entry e; int pin; pin = entry->pin; e.w1 = io_apic_read(entry->apic, 0x10 + pin*2); /* Is the remote IRR bit set? */ if (e.irr) { raw_spin_unlock_irqrestore(&ioapic_lock, flags); return true; } } raw_spin_unlock_irqrestore(&ioapic_lock, flags); return false; } static inline bool ioapic_prepare_move(struct irq_data *data) { /* If we are moving the IRQ we need to mask it */ if (unlikely(irqd_is_setaffinity_pending(data))) { if (!irqd_irq_masked(data)) mask_ioapic_irq(data); return true; } return false; } static inline void ioapic_finish_move(struct irq_data *data, bool moveit) { if (unlikely(moveit)) { /* Only migrate the irq if the ack has been received. * * On rare occasions the broadcast level triggered ack gets * delayed going to ioapics, and if we reprogram the * vector while Remote IRR is still set the irq will never * fire again. * * To prevent this scenario we read the Remote IRR bit * of the ioapic. This has two effects. * - On any sane system the read of the ioapic will * flush writes (and acks) going to the ioapic from * this cpu. * - We get to see if the ACK has actually been delivered. * * Based on failed experiments of reprogramming the * ioapic entry from outside of irq context starting * with masking the ioapic entry and then polling until * Remote IRR was clear before reprogramming the * ioapic I don't trust the Remote IRR bit to be * completely accurate. * * However there appears to be no other way to plug * this race, so if the Remote IRR bit is not * accurate and is causing problems then it is a hardware bug * and you can go talk to the chipset vendor about it. */ if (!io_apic_level_ack_pending(data->chip_data)) irq_move_masked_irq(data); /* If the IRQ is masked in the core, leave it: */ if (!irqd_irq_masked(data)) unmask_ioapic_irq(data); } } #else static inline bool ioapic_prepare_move(struct irq_data *data) { return false; } static inline void ioapic_finish_move(struct irq_data *data, bool moveit) { } #endif static void ioapic_ack_level(struct irq_data *irq_data) { struct irq_cfg *cfg = irqd_cfg(irq_data); unsigned long v; bool moveit; int i; irq_complete_move(cfg); moveit = ioapic_prepare_move(irq_data); /* * It appears there is an erratum which affects at least version 0x11 * of I/O APIC (that's the 82093AA and cores integrated into various * chipsets). Under certain conditions a level-triggered interrupt is * erroneously delivered as edge-triggered one but the respective IRR * bit gets set nevertheless. As a result the I/O unit expects an EOI * message but it will never arrive and further interrupts are blocked * from the source. The exact reason is so far unknown, but the * phenomenon was observed when two consecutive interrupt requests * from a given source get delivered to the same CPU and the source is * temporarily disabled in between. * * A workaround is to simulate an EOI message manually. We achieve it * by setting the trigger mode to edge and then to level when the edge * trigger mode gets detected in the TMR of a local APIC for a * level-triggered interrupt. We mask the source for the time of the * operation to prevent an edge-triggered interrupt escaping meanwhile. * The idea is from Manfred Spraul. --macro * * Also in the case when cpu goes offline, fixup_irqs() will forward * any unhandled interrupt on the offlined cpu to the new cpu * destination that is handling the corresponding interrupt. This * interrupt forwarding is done via IPI's. Hence, in this case also * level-triggered io-apic interrupt will be seen as an edge * interrupt in the IRR. And we can't rely on the cpu's EOI * to be broadcasted to the IO-APIC's which will clear the remoteIRR * corresponding to the level-triggered interrupt. Hence on IO-APIC's * supporting EOI register, we do an explicit EOI to clear the * remote IRR and on IO-APIC's which don't have an EOI register, * we use the above logic (mask+edge followed by unmask+level) from * Manfred Spraul to clear the remote IRR. */ i = cfg->vector; v = apic_read(APIC_TMR + ((i & ~0x1f) >> 1)); /* * We must acknowledge the irq before we move it or the acknowledge will * not propagate properly. */ apic_eoi(); /* * Tail end of clearing remote IRR bit (either by delivering the EOI * message via io-apic EOI register write or simulating it using * mask+edge followed by unmask+level logic) manually when the * level triggered interrupt is seen as the edge triggered interrupt * at the cpu. */ if (!(v & (1 << (i & 0x1f)))) { atomic_inc(&irq_mis_count); eoi_ioapic_pin(cfg->vector, irq_data->chip_data); } ioapic_finish_move(irq_data, moveit); } static void ioapic_ir_ack_level(struct irq_data *irq_data) { struct mp_chip_data *data = irq_data->chip_data; /* * Intr-remapping uses pin number as the virtual vector * in the RTE. Actual vector is programmed in * intr-remapping table entry. Hence for the io-apic * EOI we use the pin number. */ apic_ack_irq(irq_data); eoi_ioapic_pin(data->entry.vector, data); } /* * The I/OAPIC is just a device for generating MSI messages from legacy * interrupt pins. Various fields of the RTE translate into bits of the * resulting MSI which had a historical meaning. * * With interrupt remapping, many of those bits have different meanings * in the underlying MSI, but the way that the I/OAPIC transforms them * from its RTE to the MSI message is the same. This function allows * the parent IRQ domain to compose the MSI message, then takes the * relevant bits to put them in the appropriate places in the RTE in * order to generate that message when the IRQ happens. * * The setup here relies on a preconfigured route entry (is_level, * active_low, masked) because the parent domain is merely composing the * generic message routing information which is used for the MSI. */ static void ioapic_setup_msg_from_msi(struct irq_data *irq_data, struct IO_APIC_route_entry *entry) { struct msi_msg msg; /* Let the parent domain compose the MSI message */ irq_chip_compose_msi_msg(irq_data, &msg); /* * - Real vector * - DMAR/IR: 8bit subhandle (ioapic.pin) * - AMD/IR: 8bit IRTE index */ entry->vector = msg.arch_data.vector; /* Delivery mode (for DMAR/IR all 0) */ entry->delivery_mode = msg.arch_data.delivery_mode; /* Destination mode or DMAR/IR index bit 15 */ entry->dest_mode_logical = msg.arch_addr_lo.dest_mode_logical; /* DMAR/IR: 1, 0 for all other modes */ entry->ir_format = msg.arch_addr_lo.dmar_format; /* * - DMAR/IR: index bit 0-14. * * - Virt: If the host supports x2apic without a virtualized IR * unit then bit 0-6 of dmar_index_0_14 are providing bit * 8-14 of the destination id. * * All other modes have bit 0-6 of dmar_index_0_14 cleared and the * topmost 8 bits are destination id bit 0-7 (entry::destid_0_7). */ entry->ir_index_0_14 = msg.arch_addr_lo.dmar_index_0_14; } static void ioapic_configure_entry(struct irq_data *irqd) { struct mp_chip_data *mpd = irqd->chip_data; struct irq_pin_list *entry; ioapic_setup_msg_from_msi(irqd, &mpd->entry); for_each_irq_pin(entry, mpd->irq_2_pin) __ioapic_write_entry(entry->apic, entry->pin, mpd->entry); } static int ioapic_set_affinity(struct irq_data *irq_data, const struct cpumask *mask, bool force) { struct irq_data *parent = irq_data->parent_data; unsigned long flags; int ret; ret = parent->chip->irq_set_affinity(parent, mask, force); raw_spin_lock_irqsave(&ioapic_lock, flags); if (ret >= 0 && ret != IRQ_SET_MASK_OK_DONE) ioapic_configure_entry(irq_data); raw_spin_unlock_irqrestore(&ioapic_lock, flags); return ret; } /* * Interrupt shutdown masks the ioapic pin, but the interrupt might already * be in flight, but not yet serviced by the target CPU. That means * __synchronize_hardirq() would return and claim that everything is calmed * down. So free_irq() would proceed and deactivate the interrupt and free * resources. * * Once the target CPU comes around to service it it will find a cleared * vector and complain. While the spurious interrupt is harmless, the full * release of resources might prevent the interrupt from being acknowledged * which keeps the hardware in a weird state. * * Verify that the corresponding Remote-IRR bits are clear. */ static int ioapic_irq_get_chip_state(struct irq_data *irqd, enum irqchip_irq_state which, bool *state) { struct mp_chip_data *mcd = irqd->chip_data; struct IO_APIC_route_entry rentry; struct irq_pin_list *p; if (which != IRQCHIP_STATE_ACTIVE) return -EINVAL; *state = false; raw_spin_lock(&ioapic_lock); for_each_irq_pin(p, mcd->irq_2_pin) { rentry = __ioapic_read_entry(p->apic, p->pin); /* * The remote IRR is only valid in level trigger mode. It's * meaning is undefined for edge triggered interrupts and * irrelevant because the IO-APIC treats them as fire and * forget. */ if (rentry.irr && rentry.is_level) { *state = true; break; } } raw_spin_unlock(&ioapic_lock); return 0; } static struct irq_chip ioapic_chip __read_mostly = { .name = "IO-APIC", .irq_startup = startup_ioapic_irq, .irq_mask = mask_ioapic_irq, .irq_unmask = unmask_ioapic_irq, .irq_ack = irq_chip_ack_parent, .irq_eoi = ioapic_ack_level, .irq_set_affinity = ioapic_set_affinity, .irq_retrigger = irq_chip_retrigger_hierarchy, .irq_get_irqchip_state = ioapic_irq_get_chip_state, .flags = IRQCHIP_SKIP_SET_WAKE | IRQCHIP_AFFINITY_PRE_STARTUP, }; static struct irq_chip ioapic_ir_chip __read_mostly = { .name = "IR-IO-APIC", .irq_startup = startup_ioapic_irq, .irq_mask = mask_ioapic_irq, .irq_unmask = unmask_ioapic_irq, .irq_ack = irq_chip_ack_parent, .irq_eoi = ioapic_ir_ack_level, .irq_set_affinity = ioapic_set_affinity, .irq_retrigger = irq_chip_retrigger_hierarchy, .irq_get_irqchip_state = ioapic_irq_get_chip_state, .flags = IRQCHIP_SKIP_SET_WAKE | IRQCHIP_AFFINITY_PRE_STARTUP, }; static inline void init_IO_APIC_traps(void) { struct irq_cfg *cfg; unsigned int irq; for_each_active_irq(irq) { cfg = irq_cfg(irq); if (IO_APIC_IRQ(irq) && cfg && !cfg->vector) { /* * Hmm.. We don't have an entry for this, * so default to an old-fashioned 8259 * interrupt if we can.. */ if (irq < nr_legacy_irqs()) legacy_pic->make_irq(irq); else /* Strange. Oh, well.. */ irq_set_chip(irq, &no_irq_chip); } } } /* * The local APIC irq-chip implementation: */ static void mask_lapic_irq(struct irq_data *data) { unsigned long v; v = apic_read(APIC_LVT0); apic_write(APIC_LVT0, v | APIC_LVT_MASKED); } static void unmask_lapic_irq(struct irq_data *data) { unsigned long v; v = apic_read(APIC_LVT0); apic_write(APIC_LVT0, v & ~APIC_LVT_MASKED); } static void ack_lapic_irq(struct irq_data *data) { apic_eoi(); } static struct irq_chip lapic_chip __read_mostly = { .name = "local-APIC", .irq_mask = mask_lapic_irq, .irq_unmask = unmask_lapic_irq, .irq_ack = ack_lapic_irq, }; static void lapic_register_intr(int irq) { irq_clear_status_flags(irq, IRQ_LEVEL); irq_set_chip_and_handler_name(irq, &lapic_chip, handle_edge_irq, "edge"); } /* * This looks a bit hackish but it's about the only one way of sending * a few INTA cycles to 8259As and any associated glue logic. ICR does * not support the ExtINT mode, unfortunately. We need to send these * cycles as some i82489DX-based boards have glue logic that keeps the * 8259A interrupt line asserted until INTA. --macro */ static inline void __init unlock_ExtINT_logic(void) { int apic, pin, i; struct IO_APIC_route_entry entry0, entry1; unsigned char save_control, save_freq_select; u32 apic_id; pin = find_isa_irq_pin(8, mp_INT); if (pin == -1) { WARN_ON_ONCE(1); return; } apic = find_isa_irq_apic(8, mp_INT); if (apic == -1) { WARN_ON_ONCE(1); return; } entry0 = ioapic_read_entry(apic, pin); clear_IO_APIC_pin(apic, pin); apic_id = read_apic_id(); memset(&entry1, 0, sizeof(entry1)); entry1.dest_mode_logical = true; entry1.masked = false; entry1.destid_0_7 = apic_id & 0xFF; entry1.virt_destid_8_14 = apic_id >> 8; entry1.delivery_mode = APIC_DELIVERY_MODE_EXTINT; entry1.active_low = entry0.active_low; entry1.is_level = false; entry1.vector = 0; ioapic_write_entry(apic, pin, entry1); save_control = CMOS_READ(RTC_CONTROL); save_freq_select = CMOS_READ(RTC_FREQ_SELECT); CMOS_WRITE((save_freq_select & ~RTC_RATE_SELECT) | 0x6, RTC_FREQ_SELECT); CMOS_WRITE(save_control | RTC_PIE, RTC_CONTROL); i = 100; while (i-- > 0) { mdelay(10); if ((CMOS_READ(RTC_INTR_FLAGS) & RTC_PF) == RTC_PF) i -= 10; } CMOS_WRITE(save_control, RTC_CONTROL); CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT); clear_IO_APIC_pin(apic, pin); ioapic_write_entry(apic, pin, entry0); } static int disable_timer_pin_1 __initdata; /* Actually the next is obsolete, but keep it for paranoid reasons -AK */ static int __init disable_timer_pin_setup(char *arg) { disable_timer_pin_1 = 1; return 0; } early_param("disable_timer_pin_1", disable_timer_pin_setup); static int mp_alloc_timer_irq(int ioapic, int pin) { int irq = -1; struct irq_domain *domain = mp_ioapic_irqdomain(ioapic); if (domain) { struct irq_alloc_info info; ioapic_set_alloc_attr(&info, NUMA_NO_NODE, 0, 0); info.devid = mpc_ioapic_id(ioapic); info.ioapic.pin = pin; mutex_lock(&ioapic_mutex); irq = alloc_isa_irq_from_domain(domain, 0, ioapic, pin, &info); mutex_unlock(&ioapic_mutex); } return irq; } /* * This code may look a bit paranoid, but it's supposed to cooperate with * a wide range of boards and BIOS bugs. Fortunately only the timer IRQ * is so screwy. Thanks to Brian Perkins for testing/hacking this beast * fanatically on his truly buggy board. * * FIXME: really need to revamp this for all platforms. */ static inline void __init check_timer(void) { struct irq_data *irq_data = irq_get_irq_data(0); struct mp_chip_data *data = irq_data->chip_data; struct irq_cfg *cfg = irqd_cfg(irq_data); int node = cpu_to_node(0); int apic1, pin1, apic2, pin2; int no_pin1 = 0; if (!global_clock_event) return; local_irq_disable(); /* * get/set the timer IRQ vector: */ legacy_pic->mask(0); /* * As IRQ0 is to be enabled in the 8259A, the virtual * wire has to be disabled in the local APIC. Also * timer interrupts need to be acknowledged manually in * the 8259A for the i82489DX when using the NMI * watchdog as that APIC treats NMIs as level-triggered. * The AEOI mode will finish them in the 8259A * automatically. */ apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_EXTINT); legacy_pic->init(1); pin1 = find_isa_irq_pin(0, mp_INT); apic1 = find_isa_irq_apic(0, mp_INT); pin2 = ioapic_i8259.pin; apic2 = ioapic_i8259.apic; apic_printk(APIC_QUIET, KERN_INFO "..TIMER: vector=0x%02X " "apic1=%d pin1=%d apic2=%d pin2=%d\n", cfg->vector, apic1, pin1, apic2, pin2); /* * Some BIOS writers are clueless and report the ExtINTA * I/O APIC input from the cascaded 8259A as the timer * interrupt input. So just in case, if only one pin * was found above, try it both directly and through the * 8259A. */ if (pin1 == -1) { panic_if_irq_remap("BIOS bug: timer not connected to IO-APIC"); pin1 = pin2; apic1 = apic2; no_pin1 = 1; } else if (pin2 == -1) { pin2 = pin1; apic2 = apic1; } if (pin1 != -1) { /* Ok, does IRQ0 through the IOAPIC work? */ if (no_pin1) { mp_alloc_timer_irq(apic1, pin1); } else { /* * for edge trigger, it's already unmasked, * so only need to unmask if it is level-trigger * do we really have level trigger timer? */ int idx = find_irq_entry(apic1, pin1, mp_INT); if (idx != -1 && irq_is_level(idx)) unmask_ioapic_irq(irq_get_irq_data(0)); } irq_domain_deactivate_irq(irq_data); irq_domain_activate_irq(irq_data, false); if (timer_irq_works()) { if (disable_timer_pin_1 > 0) clear_IO_APIC_pin(0, pin1); goto out; } panic_if_irq_remap("timer doesn't work through Interrupt-remapped IO-APIC"); clear_IO_APIC_pin(apic1, pin1); if (!no_pin1) apic_printk(APIC_QUIET, KERN_ERR "..MP-BIOS bug: " "8254 timer not connected to IO-APIC\n"); apic_printk(APIC_QUIET, KERN_INFO "...trying to set up timer " "(IRQ0) through the 8259A ...\n"); apic_printk(APIC_QUIET, KERN_INFO "..... (found apic %d pin %d) ...\n", apic2, pin2); /* * legacy devices should be connected to IO APIC #0 */ replace_pin_at_irq_node(data, node, apic1, pin1, apic2, pin2); irq_domain_deactivate_irq(irq_data); irq_domain_activate_irq(irq_data, false); legacy_pic->unmask(0); if (timer_irq_works()) { apic_printk(APIC_QUIET, KERN_INFO "....... works.\n"); goto out; } /* * Cleanup, just in case ... */ legacy_pic->mask(0); clear_IO_APIC_pin(apic2, pin2); apic_printk(APIC_QUIET, KERN_INFO "....... failed.\n"); } apic_printk(APIC_QUIET, KERN_INFO "...trying to set up timer as Virtual Wire IRQ...\n"); lapic_register_intr(0); apic_write(APIC_LVT0, APIC_DM_FIXED | cfg->vector); /* Fixed mode */ legacy_pic->unmask(0); if (timer_irq_works()) { apic_printk(APIC_QUIET, KERN_INFO "..... works.\n"); goto out; } legacy_pic->mask(0); apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_FIXED | cfg->vector); apic_printk(APIC_QUIET, KERN_INFO "..... failed.\n"); apic_printk(APIC_QUIET, KERN_INFO "...trying to set up timer as ExtINT IRQ...\n"); legacy_pic->init(0); legacy_pic->make_irq(0); apic_write(APIC_LVT0, APIC_DM_EXTINT); legacy_pic->unmask(0); unlock_ExtINT_logic(); if (timer_irq_works()) { apic_printk(APIC_QUIET, KERN_INFO "..... works.\n"); goto out; } apic_printk(APIC_QUIET, KERN_INFO "..... failed :(.\n"); if (apic_is_x2apic_enabled()) apic_printk(APIC_QUIET, KERN_INFO "Perhaps problem with the pre-enabled x2apic mode\n" "Try booting with x2apic and interrupt-remapping disabled in the bios.\n"); panic("IO-APIC + timer doesn't work! Boot with apic=debug and send a " "report. Then try booting with the 'noapic' option.\n"); out: local_irq_enable(); } /* * Traditionally ISA IRQ2 is the cascade IRQ, and is not available * to devices. However there may be an I/O APIC pin available for * this interrupt regardless. The pin may be left unconnected, but * typically it will be reused as an ExtINT cascade interrupt for * the master 8259A. In the MPS case such a pin will normally be * reported as an ExtINT interrupt in the MP table. With ACPI * there is no provision for ExtINT interrupts, and in the absence * of an override it would be treated as an ordinary ISA I/O APIC * interrupt, that is edge-triggered and unmasked by default. We * used to do this, but it caused problems on some systems because * of the NMI watchdog and sometimes IRQ0 of the 8254 timer using * the same ExtINT cascade interrupt to drive the local APIC of the * bootstrap processor. Therefore we refrain from routing IRQ2 to * the I/O APIC in all cases now. No actual device should request * it anyway. --macro */ #define PIC_IRQS (1UL << PIC_CASCADE_IR) static int mp_irqdomain_create(int ioapic) { struct irq_domain *parent; int hwirqs = mp_ioapic_pin_count(ioapic); struct ioapic *ip = &ioapics[ioapic]; struct ioapic_domain_cfg *cfg = &ip->irqdomain_cfg; struct mp_ioapic_gsi *gsi_cfg = mp_ioapic_gsi_routing(ioapic); struct fwnode_handle *fn; struct irq_fwspec fwspec; if (cfg->type == IOAPIC_DOMAIN_INVALID) return 0; /* Handle device tree enumerated APICs proper */ if (cfg->dev) { fn = of_node_to_fwnode(cfg->dev); } else { fn = irq_domain_alloc_named_id_fwnode("IO-APIC", mpc_ioapic_id(ioapic)); if (!fn) return -ENOMEM; } fwspec.fwnode = fn; fwspec.param_count = 1; fwspec.param[0] = mpc_ioapic_id(ioapic); parent = irq_find_matching_fwspec(&fwspec, DOMAIN_BUS_ANY); if (!parent) { if (!cfg->dev) irq_domain_free_fwnode(fn); return -ENODEV; } ip->irqdomain = irq_domain_create_hierarchy(parent, 0, hwirqs, fn, cfg->ops, (void *)(long)ioapic); if (!ip->irqdomain) { /* Release fw handle if it was allocated above */ if (!cfg->dev) irq_domain_free_fwnode(fn); return -ENOMEM; } if (cfg->type == IOAPIC_DOMAIN_LEGACY || cfg->type == IOAPIC_DOMAIN_STRICT) ioapic_dynirq_base = max(ioapic_dynirq_base, gsi_cfg->gsi_end + 1); return 0; } static void ioapic_destroy_irqdomain(int idx) { struct ioapic_domain_cfg *cfg = &ioapics[idx].irqdomain_cfg; struct fwnode_handle *fn = ioapics[idx].irqdomain->fwnode; if (ioapics[idx].irqdomain) { irq_domain_remove(ioapics[idx].irqdomain); if (!cfg->dev) irq_domain_free_fwnode(fn); ioapics[idx].irqdomain = NULL; } } void __init setup_IO_APIC(void) { int ioapic; if (ioapic_is_disabled || !nr_ioapics) return; io_apic_irqs = nr_legacy_irqs() ? ~PIC_IRQS : ~0UL; apic_printk(APIC_VERBOSE, "ENABLING IO-APIC IRQs\n"); for_each_ioapic(ioapic) BUG_ON(mp_irqdomain_create(ioapic)); /* * Set up IO-APIC IRQ routing. */ x86_init.mpparse.setup_ioapic_ids(); sync_Arb_IDs(); setup_IO_APIC_irqs(); init_IO_APIC_traps(); if (nr_legacy_irqs()) check_timer(); ioapic_initialized = 1; } static void resume_ioapic_id(int ioapic_idx) { unsigned long flags; union IO_APIC_reg_00 reg_00; raw_spin_lock_irqsave(&ioapic_lock, flags); reg_00.raw = io_apic_read(ioapic_idx, 0); if (reg_00.bits.ID != mpc_ioapic_id(ioapic_idx)) { reg_00.bits.ID = mpc_ioapic_id(ioapic_idx); io_apic_write(ioapic_idx, 0, reg_00.raw); } raw_spin_unlock_irqrestore(&ioapic_lock, flags); } static void ioapic_resume(void) { int ioapic_idx; for_each_ioapic_reverse(ioapic_idx) resume_ioapic_id(ioapic_idx); restore_ioapic_entries(); } static struct syscore_ops ioapic_syscore_ops = { .suspend = save_ioapic_entries, .resume = ioapic_resume, }; static int __init ioapic_init_ops(void) { register_syscore_ops(&ioapic_syscore_ops); return 0; } device_initcall(ioapic_init_ops); static int io_apic_get_redir_entries(int ioapic) { union IO_APIC_reg_01 reg_01; unsigned long flags; raw_spin_lock_irqsave(&ioapic_lock, flags); reg_01.raw = io_apic_read(ioapic, 1); raw_spin_unlock_irqrestore(&ioapic_lock, flags); /* The register returns the maximum index redir index * supported, which is one less than the total number of redir * entries. */ return reg_01.bits.entries + 1; } unsigned int arch_dynirq_lower_bound(unsigned int from) { unsigned int ret; /* * dmar_alloc_hwirq() may be called before setup_IO_APIC(), so use * gsi_top if ioapic_dynirq_base hasn't been initialized yet. */ ret = ioapic_dynirq_base ? : gsi_top; /* * For DT enabled machines ioapic_dynirq_base is irrelevant and * always 0. gsi_top can be 0 if there is no IO/APIC registered. * 0 is an invalid interrupt number for dynamic allocations. Return * @from instead. */ return ret ? : from; } #ifdef CONFIG_X86_32 static int io_apic_get_unique_id(int ioapic, int apic_id) { union IO_APIC_reg_00 reg_00; static physid_mask_t apic_id_map = PHYSID_MASK_NONE; physid_mask_t tmp; unsigned long flags; int i = 0; /* * The P4 platform supports up to 256 APIC IDs on two separate APIC * buses (one for LAPICs, one for IOAPICs), where predecessors only * supports up to 16 on one shared APIC bus. * * TBD: Expand LAPIC/IOAPIC support on P4-class systems to take full * advantage of new APIC bus architecture. */ if (physids_empty(apic_id_map)) apic->ioapic_phys_id_map(&phys_cpu_present_map, &apic_id_map); raw_spin_lock_irqsave(&ioapic_lock, flags); reg_00.raw = io_apic_read(ioapic, 0); raw_spin_unlock_irqrestore(&ioapic_lock, flags); if (apic_id >= get_physical_broadcast()) { printk(KERN_WARNING "IOAPIC[%d]: Invalid apic_id %d, trying " "%d\n", ioapic, apic_id, reg_00.bits.ID); apic_id = reg_00.bits.ID; } /* * Every APIC in a system must have a unique ID or we get lots of nice * 'stuck on smp_invalidate_needed IPI wait' messages. */ if (apic->check_apicid_used(&apic_id_map, apic_id)) { for (i = 0; i < get_physical_broadcast(); i++) { if (!apic->check_apicid_used(&apic_id_map, i)) break; } if (i == get_physical_broadcast()) panic("Max apic_id exceeded!\n"); printk(KERN_WARNING "IOAPIC[%d]: apic_id %d already used, " "trying %d\n", ioapic, apic_id, i); apic_id = i; } physid_set_mask_of_physid(apic_id, &tmp); physids_or(apic_id_map, apic_id_map, tmp); if (reg_00.bits.ID != apic_id) { reg_00.bits.ID = apic_id; raw_spin_lock_irqsave(&ioapic_lock, flags); io_apic_write(ioapic, 0, reg_00.raw); reg_00.raw = io_apic_read(ioapic, 0); raw_spin_unlock_irqrestore(&ioapic_lock, flags); /* Sanity check */ if (reg_00.bits.ID != apic_id) { pr_err("IOAPIC[%d]: Unable to change apic_id!\n", ioapic); return -1; } } apic_printk(APIC_VERBOSE, KERN_INFO "IOAPIC[%d]: Assigned apic_id %d\n", ioapic, apic_id); return apic_id; } static u8 io_apic_unique_id(int idx, u8 id) { if ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) && !APIC_XAPIC(boot_cpu_apic_version)) return io_apic_get_unique_id(idx, id); else return id; } #else static u8 io_apic_unique_id(int idx, u8 id) { union IO_APIC_reg_00 reg_00; DECLARE_BITMAP(used, 256); unsigned long flags; u8 new_id; int i; bitmap_zero(used, 256); for_each_ioapic(i) __set_bit(mpc_ioapic_id(i), used); /* Hand out the requested id if available */ if (!test_bit(id, used)) return id; /* * Read the current id from the ioapic and keep it if * available. */ raw_spin_lock_irqsave(&ioapic_lock, flags); reg_00.raw = io_apic_read(idx, 0); raw_spin_unlock_irqrestore(&ioapic_lock, flags); new_id = reg_00.bits.ID; if (!test_bit(new_id, used)) { apic_printk(APIC_VERBOSE, KERN_INFO "IOAPIC[%d]: Using reg apic_id %d instead of %d\n", idx, new_id, id); return new_id; } /* * Get the next free id and write it to the ioapic. */ new_id = find_first_zero_bit(used, 256); reg_00.bits.ID = new_id; raw_spin_lock_irqsave(&ioapic_lock, flags); io_apic_write(idx, 0, reg_00.raw); reg_00.raw = io_apic_read(idx, 0); raw_spin_unlock_irqrestore(&ioapic_lock, flags); /* Sanity check */ BUG_ON(reg_00.bits.ID != new_id); return new_id; } #endif static int io_apic_get_version(int ioapic) { union IO_APIC_reg_01 reg_01; unsigned long flags; raw_spin_lock_irqsave(&ioapic_lock, flags); reg_01.raw = io_apic_read(ioapic, 1); raw_spin_unlock_irqrestore(&ioapic_lock, flags); return reg_01.bits.version; } /* * This function updates target affinity of IOAPIC interrupts to include * the CPUs which came online during SMP bringup. */ #define IOAPIC_RESOURCE_NAME_SIZE 11 static struct resource *ioapic_resources; static struct resource * __init ioapic_setup_resources(void) { unsigned long n; struct resource *res; char *mem; int i; if (nr_ioapics == 0) return NULL; n = IOAPIC_RESOURCE_NAME_SIZE + sizeof(struct resource); n *= nr_ioapics; mem = memblock_alloc(n, SMP_CACHE_BYTES); if (!mem) panic("%s: Failed to allocate %lu bytes\n", __func__, n); res = (void *)mem; mem += sizeof(struct resource) * nr_ioapics; for_each_ioapic(i) { res[i].name = mem; res[i].flags = IORESOURCE_MEM | IORESOURCE_BUSY; snprintf(mem, IOAPIC_RESOURCE_NAME_SIZE, "IOAPIC %u", i); mem += IOAPIC_RESOURCE_NAME_SIZE; ioapics[i].iomem_res = &res[i]; } ioapic_resources = res; return res; } static void io_apic_set_fixmap(enum fixed_addresses idx, phys_addr_t phys) { pgprot_t flags = FIXMAP_PAGE_NOCACHE; /* * Ensure fixmaps for IO-APIC MMIO respect memory encryption pgprot * bits, just like normal ioremap(): */ if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT)) { if (x86_platform.hyper.is_private_mmio(phys)) flags = pgprot_encrypted(flags); else flags = pgprot_decrypted(flags); } __set_fixmap(idx, phys, flags); } void __init io_apic_init_mappings(void) { unsigned long ioapic_phys, idx = FIX_IO_APIC_BASE_0; struct resource *ioapic_res; int i; ioapic_res = ioapic_setup_resources(); for_each_ioapic(i) { if (smp_found_config) { ioapic_phys = mpc_ioapic_addr(i); #ifdef CONFIG_X86_32 if (!ioapic_phys) { printk(KERN_ERR "WARNING: bogus zero IO-APIC " "address found in MPTABLE, " "disabling IO/APIC support!\n"); smp_found_config = 0; ioapic_is_disabled = true; goto fake_ioapic_page; } #endif } else { #ifdef CONFIG_X86_32 fake_ioapic_page: #endif ioapic_phys = (unsigned long)memblock_alloc(PAGE_SIZE, PAGE_SIZE); if (!ioapic_phys) panic("%s: Failed to allocate %lu bytes align=0x%lx\n", __func__, PAGE_SIZE, PAGE_SIZE); ioapic_phys = __pa(ioapic_phys); } io_apic_set_fixmap(idx, ioapic_phys); apic_printk(APIC_VERBOSE, "mapped IOAPIC to %08lx (%08lx)\n", __fix_to_virt(idx) + (ioapic_phys & ~PAGE_MASK), ioapic_phys); idx++; ioapic_res->start = ioapic_phys; ioapic_res->end = ioapic_phys + IO_APIC_SLOT_SIZE - 1; ioapic_res++; } } void __init ioapic_insert_resources(void) { int i; struct resource *r = ioapic_resources; if (!r) { if (nr_ioapics > 0) printk(KERN_ERR "IO APIC resources couldn't be allocated.\n"); return; } for_each_ioapic(i) { insert_resource(&iomem_resource, r); r++; } } int mp_find_ioapic(u32 gsi) { int i; if (nr_ioapics == 0) return -1; /* Find the IOAPIC that manages this GSI. */ for_each_ioapic(i) { struct mp_ioapic_gsi *gsi_cfg = mp_ioapic_gsi_routing(i); if (gsi >= gsi_cfg->gsi_base && gsi <= gsi_cfg->gsi_end) return i; } printk(KERN_ERR "ERROR: Unable to locate IOAPIC for GSI %d\n", gsi); return -1; } int mp_find_ioapic_pin(int ioapic, u32 gsi) { struct mp_ioapic_gsi *gsi_cfg; if (WARN_ON(ioapic < 0)) return -1; gsi_cfg = mp_ioapic_gsi_routing(ioapic); if (WARN_ON(gsi > gsi_cfg->gsi_end)) return -1; return gsi - gsi_cfg->gsi_base; } static int bad_ioapic_register(int idx) { union IO_APIC_reg_00 reg_00; union IO_APIC_reg_01 reg_01; union IO_APIC_reg_02 reg_02; reg_00.raw = io_apic_read(idx, 0); reg_01.raw = io_apic_read(idx, 1); reg_02.raw = io_apic_read(idx, 2); if (reg_00.raw == -1 && reg_01.raw == -1 && reg_02.raw == -1) { pr_warn("I/O APIC 0x%x registers return all ones, skipping!\n", mpc_ioapic_addr(idx)); return 1; } return 0; } static int find_free_ioapic_entry(void) { int idx; for (idx = 0; idx < MAX_IO_APICS; idx++) if (ioapics[idx].nr_registers == 0) return idx; return MAX_IO_APICS; } /** * mp_register_ioapic - Register an IOAPIC device * @id: hardware IOAPIC ID * @address: physical address of IOAPIC register area * @gsi_base: base of GSI associated with the IOAPIC * @cfg: configuration information for the IOAPIC */ int mp_register_ioapic(int id, u32 address, u32 gsi_base, struct ioapic_domain_cfg *cfg) { bool hotplug = !!ioapic_initialized; struct mp_ioapic_gsi *gsi_cfg; int idx, ioapic, entries; u32 gsi_end; if (!address) { pr_warn("Bogus (zero) I/O APIC address found, skipping!\n"); return -EINVAL; } for_each_ioapic(ioapic) if (ioapics[ioapic].mp_config.apicaddr == address) { pr_warn("address 0x%x conflicts with IOAPIC%d\n", address, ioapic); return -EEXIST; } idx = find_free_ioapic_entry(); if (idx >= MAX_IO_APICS) { pr_warn("Max # of I/O APICs (%d) exceeded (found %d), skipping\n", MAX_IO_APICS, idx); return -ENOSPC; } ioapics[idx].mp_config.type = MP_IOAPIC; ioapics[idx].mp_config.flags = MPC_APIC_USABLE; ioapics[idx].mp_config.apicaddr = address; io_apic_set_fixmap(FIX_IO_APIC_BASE_0 + idx, address); if (bad_ioapic_register(idx)) { clear_fixmap(FIX_IO_APIC_BASE_0 + idx); return -ENODEV; } ioapics[idx].mp_config.apicid = io_apic_unique_id(idx, id); ioapics[idx].mp_config.apicver = io_apic_get_version(idx); /* * Build basic GSI lookup table to facilitate gsi->io_apic lookups * and to prevent reprogramming of IOAPIC pins (PCI GSIs). */ entries = io_apic_get_redir_entries(idx); gsi_end = gsi_base + entries - 1; for_each_ioapic(ioapic) { gsi_cfg = mp_ioapic_gsi_routing(ioapic); if ((gsi_base >= gsi_cfg->gsi_base && gsi_base <= gsi_cfg->gsi_end) || (gsi_end >= gsi_cfg->gsi_base && gsi_end <= gsi_cfg->gsi_end)) { pr_warn("GSI range [%u-%u] for new IOAPIC conflicts with GSI[%u-%u]\n", gsi_base, gsi_end, gsi_cfg->gsi_base, gsi_cfg->gsi_end); clear_fixmap(FIX_IO_APIC_BASE_0 + idx); return -ENOSPC; } } gsi_cfg = mp_ioapic_gsi_routing(idx); gsi_cfg->gsi_base = gsi_base; gsi_cfg->gsi_end = gsi_end; ioapics[idx].irqdomain = NULL; ioapics[idx].irqdomain_cfg = *cfg; /* * If mp_register_ioapic() is called during early boot stage when * walking ACPI/DT tables, it's too early to create irqdomain, * we are still using bootmem allocator. So delay it to setup_IO_APIC(). */ if (hotplug) { if (mp_irqdomain_create(idx)) { clear_fixmap(FIX_IO_APIC_BASE_0 + idx); return -ENOMEM; } alloc_ioapic_saved_registers(idx); } if (gsi_cfg->gsi_end >= gsi_top) gsi_top = gsi_cfg->gsi_end + 1; if (nr_ioapics <= idx) nr_ioapics = idx + 1; /* Set nr_registers to mark entry present */ ioapics[idx].nr_registers = entries; pr_info("IOAPIC[%d]: apic_id %d, version %d, address 0x%x, GSI %d-%d\n", idx, mpc_ioapic_id(idx), mpc_ioapic_ver(idx), mpc_ioapic_addr(idx), gsi_cfg->gsi_base, gsi_cfg->gsi_end); return 0; } int mp_unregister_ioapic(u32 gsi_base) { int ioapic, pin; int found = 0; for_each_ioapic(ioapic) if (ioapics[ioapic].gsi_config.gsi_base == gsi_base) { found = 1; break; } if (!found) { pr_warn("can't find IOAPIC for GSI %d\n", gsi_base); return -ENODEV; } for_each_pin(ioapic, pin) { u32 gsi = mp_pin_to_gsi(ioapic, pin); int irq = mp_map_gsi_to_irq(gsi, 0, NULL); struct mp_chip_data *data; if (irq >= 0) { data = irq_get_chip_data(irq); if (data && data->count) { pr_warn("pin%d on IOAPIC%d is still in use.\n", pin, ioapic); return -EBUSY; } } } /* Mark entry not present */ ioapics[ioapic].nr_registers = 0; ioapic_destroy_irqdomain(ioapic); free_ioapic_saved_registers(ioapic); if (ioapics[ioapic].iomem_res) release_resource(ioapics[ioapic].iomem_res); clear_fixmap(FIX_IO_APIC_BASE_0 + ioapic); memset(&ioapics[ioapic], 0, sizeof(ioapics[ioapic])); return 0; } int mp_ioapic_registered(u32 gsi_base) { int ioapic; for_each_ioapic(ioapic) if (ioapics[ioapic].gsi_config.gsi_base == gsi_base) return 1; return 0; } static void mp_irqdomain_get_attr(u32 gsi, struct mp_chip_data *data, struct irq_alloc_info *info) { if (info && info->ioapic.valid) { data->is_level = info->ioapic.is_level; data->active_low = info->ioapic.active_low; } else if (__acpi_get_override_irq(gsi, &data->is_level, &data->active_low) < 0) { /* PCI interrupts are always active low level triggered. */ data->is_level = true; data->active_low = true; } } /* * Configure the I/O-APIC specific fields in the routing entry. * * This is important to setup the I/O-APIC specific bits (is_level, * active_low, masked) because the underlying parent domain will only * provide the routing information and is oblivious of the I/O-APIC * specific bits. * * The entry is just preconfigured at this point and not written into the * RTE. This happens later during activation which will fill in the actual * routing information. */ static void mp_preconfigure_entry(struct mp_chip_data *data) { struct IO_APIC_route_entry *entry = &data->entry; memset(entry, 0, sizeof(*entry)); entry->is_level = data->is_level; entry->active_low = data->active_low; /* * Mask level triggered irqs. Edge triggered irqs are masked * by the irq core code in case they fire. */ entry->masked = data->is_level; } int mp_irqdomain_alloc(struct irq_domain *domain, unsigned int virq, unsigned int nr_irqs, void *arg) { struct irq_alloc_info *info = arg; struct mp_chip_data *data; struct irq_data *irq_data; int ret, ioapic, pin; unsigned long flags; if (!info || nr_irqs > 1) return -EINVAL; irq_data = irq_domain_get_irq_data(domain, virq); if (!irq_data) return -EINVAL; ioapic = mp_irqdomain_ioapic_idx(domain); pin = info->ioapic.pin; if (irq_find_mapping(domain, (irq_hw_number_t)pin) > 0) return -EEXIST; data = kzalloc(sizeof(*data), GFP_KERNEL); if (!data) return -ENOMEM; ret = irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, info); if (ret < 0) { kfree(data); return ret; } INIT_LIST_HEAD(&data->irq_2_pin); irq_data->hwirq = info->ioapic.pin; irq_data->chip = (domain->parent == x86_vector_domain) ? &ioapic_chip : &ioapic_ir_chip; irq_data->chip_data = data; mp_irqdomain_get_attr(mp_pin_to_gsi(ioapic, pin), data, info); add_pin_to_irq_node(data, ioapic_alloc_attr_node(info), ioapic, pin); mp_preconfigure_entry(data); mp_register_handler(virq, data->is_level); local_irq_save(flags); if (virq < nr_legacy_irqs()) legacy_pic->mask(virq); local_irq_restore(flags); apic_printk(APIC_VERBOSE, KERN_DEBUG "IOAPIC[%d]: Preconfigured routing entry (%d-%d -> IRQ %d Level:%i ActiveLow:%i)\n", ioapic, mpc_ioapic_id(ioapic), pin, virq, data->is_level, data->active_low); return 0; } void mp_irqdomain_free(struct irq_domain *domain, unsigned int virq, unsigned int nr_irqs) { struct irq_data *irq_data; struct mp_chip_data *data; BUG_ON(nr_irqs != 1); irq_data = irq_domain_get_irq_data(domain, virq); if (irq_data && irq_data->chip_data) { data = irq_data->chip_data; __remove_pin_from_irq(data, mp_irqdomain_ioapic_idx(domain), (int)irq_data->hwirq); WARN_ON(!list_empty(&data->irq_2_pin)); kfree(irq_data->chip_data); } irq_domain_free_irqs_top(domain, virq, nr_irqs); } int mp_irqdomain_activate(struct irq_domain *domain, struct irq_data *irq_data, bool reserve) { unsigned long flags; raw_spin_lock_irqsave(&ioapic_lock, flags); ioapic_configure_entry(irq_data); raw_spin_unlock_irqrestore(&ioapic_lock, flags); return 0; } void mp_irqdomain_deactivate(struct irq_domain *domain, struct irq_data *irq_data) { /* It won't be called for IRQ with multiple IOAPIC pins associated */ ioapic_mask_entry(mp_irqdomain_ioapic_idx(domain), (int)irq_data->hwirq); } int mp_irqdomain_ioapic_idx(struct irq_domain *domain) { return (int)(long)domain->host_data; } const struct irq_domain_ops mp_ioapic_irqdomain_ops = { .alloc = mp_irqdomain_alloc, .free = mp_irqdomain_free, .activate = mp_irqdomain_activate, .deactivate = mp_irqdomain_deactivate, };
linux-master
arch/x86/kernel/apic/io_apic.c
// SPDX-License-Identifier: GPL-2.0-only /* * Support of MSI, HPET and DMAR interrupts. * * Copyright (C) 1997, 1998, 1999, 2000, 2009 Ingo Molnar, Hajnalka Szabo * Moved from arch/x86/kernel/apic/io_apic.c. * Jiang Liu <[email protected]> * Convert to hierarchical irqdomain */ #include <linux/mm.h> #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/pci.h> #include <linux/dmar.h> #include <linux/hpet.h> #include <linux/msi.h> #include <asm/irqdomain.h> #include <asm/hpet.h> #include <asm/hw_irq.h> #include <asm/apic.h> #include <asm/irq_remapping.h> #include <asm/xen/hypervisor.h> struct irq_domain *x86_pci_msi_default_domain __ro_after_init; static void irq_msi_update_msg(struct irq_data *irqd, struct irq_cfg *cfg) { struct msi_msg msg[2] = { [1] = { }, }; __irq_msi_compose_msg(cfg, msg, false); irq_data_get_irq_chip(irqd)->irq_write_msi_msg(irqd, msg); } static int msi_set_affinity(struct irq_data *irqd, const struct cpumask *mask, bool force) { struct irq_cfg old_cfg, *cfg = irqd_cfg(irqd); struct irq_data *parent = irqd->parent_data; unsigned int cpu; int ret; /* Save the current configuration */ cpu = cpumask_first(irq_data_get_effective_affinity_mask(irqd)); old_cfg = *cfg; /* Allocate a new target vector */ ret = parent->chip->irq_set_affinity(parent, mask, force); if (ret < 0 || ret == IRQ_SET_MASK_OK_DONE) return ret; /* * For non-maskable and non-remapped MSI interrupts the migration * to a different destination CPU and a different vector has to be * done careful to handle the possible stray interrupt which can be * caused by the non-atomic update of the address/data pair. * * Direct update is possible when: * - The MSI is maskable (remapped MSI does not use this code path)). * The quirk bit is not set in this case. * - The new vector is the same as the old vector * - The old vector is MANAGED_IRQ_SHUTDOWN_VECTOR (interrupt starts up) * - The interrupt is not yet started up * - The new destination CPU is the same as the old destination CPU */ if (!irqd_msi_nomask_quirk(irqd) || cfg->vector == old_cfg.vector || old_cfg.vector == MANAGED_IRQ_SHUTDOWN_VECTOR || !irqd_is_started(irqd) || cfg->dest_apicid == old_cfg.dest_apicid) { irq_msi_update_msg(irqd, cfg); return ret; } /* * Paranoia: Validate that the interrupt target is the local * CPU. */ if (WARN_ON_ONCE(cpu != smp_processor_id())) { irq_msi_update_msg(irqd, cfg); return ret; } /* * Redirect the interrupt to the new vector on the current CPU * first. This might cause a spurious interrupt on this vector if * the device raises an interrupt right between this update and the * update to the final destination CPU. * * If the vector is in use then the installed device handler will * denote it as spurious which is no harm as this is a rare event * and interrupt handlers have to cope with spurious interrupts * anyway. If the vector is unused, then it is marked so it won't * trigger the 'No irq handler for vector' warning in * common_interrupt(). * * This requires to hold vector lock to prevent concurrent updates to * the affected vector. */ lock_vector_lock(); /* * Mark the new target vector on the local CPU if it is currently * unused. Reuse the VECTOR_RETRIGGERED state which is also used in * the CPU hotplug path for a similar purpose. This cannot be * undone here as the current CPU has interrupts disabled and * cannot handle the interrupt before the whole set_affinity() * section is done. In the CPU unplug case, the current CPU is * about to vanish and will not handle any interrupts anymore. The * vector is cleaned up when the CPU comes online again. */ if (IS_ERR_OR_NULL(this_cpu_read(vector_irq[cfg->vector]))) this_cpu_write(vector_irq[cfg->vector], VECTOR_RETRIGGERED); /* Redirect it to the new vector on the local CPU temporarily */ old_cfg.vector = cfg->vector; irq_msi_update_msg(irqd, &old_cfg); /* Now transition it to the target CPU */ irq_msi_update_msg(irqd, cfg); /* * All interrupts after this point are now targeted at the new * vector/CPU. * * Drop vector lock before testing whether the temporary assignment * to the local CPU was hit by an interrupt raised in the device, * because the retrigger function acquires vector lock again. */ unlock_vector_lock(); /* * Check whether the transition raced with a device interrupt and * is pending in the local APICs IRR. It is safe to do this outside * of vector lock as the irq_desc::lock of this interrupt is still * held and interrupts are disabled: The check is not accessing the * underlying vector store. It's just checking the local APIC's * IRR. */ if (lapic_vector_set_in_irr(cfg->vector)) irq_data_get_irq_chip(irqd)->irq_retrigger(irqd); return ret; } /** * pci_dev_has_default_msi_parent_domain - Check whether the device has the default * MSI parent domain associated * @dev: Pointer to the PCI device */ bool pci_dev_has_default_msi_parent_domain(struct pci_dev *dev) { struct irq_domain *domain = dev_get_msi_domain(&dev->dev); if (!domain) domain = dev_get_msi_domain(&dev->bus->dev); if (!domain) return false; return domain == x86_vector_domain; } /** * x86_msi_prepare - Setup of msi_alloc_info_t for allocations * @domain: The domain for which this setup happens * @dev: The device for which interrupts are allocated * @nvec: The number of vectors to allocate * @alloc: The allocation info structure to initialize * * This function is to be used for all types of MSI domains above the x86 * vector domain and any intermediates. It is always invoked from the * top level interrupt domain. The domain specific allocation * functionality is determined via the @domain's bus token which allows to * map the X86 specific allocation type. */ static int x86_msi_prepare(struct irq_domain *domain, struct device *dev, int nvec, msi_alloc_info_t *alloc) { struct msi_domain_info *info = domain->host_data; init_irq_alloc_info(alloc, NULL); switch (info->bus_token) { case DOMAIN_BUS_PCI_DEVICE_MSI: alloc->type = X86_IRQ_ALLOC_TYPE_PCI_MSI; return 0; case DOMAIN_BUS_PCI_DEVICE_MSIX: case DOMAIN_BUS_PCI_DEVICE_IMS: alloc->type = X86_IRQ_ALLOC_TYPE_PCI_MSIX; return 0; default: return -EINVAL; } } /** * x86_init_dev_msi_info - Domain info setup for MSI domains * @dev: The device for which the domain should be created * @domain: The (root) domain providing this callback * @real_parent: The real parent domain of the to initialize domain * @info: The domain info for the to initialize domain * * This function is to be used for all types of MSI domains above the x86 * vector domain and any intermediates. The domain specific functionality * is determined via the @real_parent. */ static bool x86_init_dev_msi_info(struct device *dev, struct irq_domain *domain, struct irq_domain *real_parent, struct msi_domain_info *info) { const struct msi_parent_ops *pops = real_parent->msi_parent_ops; /* MSI parent domain specific settings */ switch (real_parent->bus_token) { case DOMAIN_BUS_ANY: /* Only the vector domain can have the ANY token */ if (WARN_ON_ONCE(domain != real_parent)) return false; info->chip->irq_set_affinity = msi_set_affinity; /* See msi_set_affinity() for the gory details */ info->flags |= MSI_FLAG_NOMASK_QUIRK; break; case DOMAIN_BUS_DMAR: case DOMAIN_BUS_AMDVI: break; default: WARN_ON_ONCE(1); return false; } /* Is the target supported? */ switch(info->bus_token) { case DOMAIN_BUS_PCI_DEVICE_MSI: case DOMAIN_BUS_PCI_DEVICE_MSIX: break; case DOMAIN_BUS_PCI_DEVICE_IMS: if (!(pops->supported_flags & MSI_FLAG_PCI_IMS)) return false; break; default: WARN_ON_ONCE(1); return false; } /* * Mask out the domain specific MSI feature flags which are not * supported by the real parent. */ info->flags &= pops->supported_flags; /* Enforce the required flags */ info->flags |= X86_VECTOR_MSI_FLAGS_REQUIRED; /* This is always invoked from the top level MSI domain! */ info->ops->msi_prepare = x86_msi_prepare; info->chip->irq_ack = irq_chip_ack_parent; info->chip->irq_retrigger = irq_chip_retrigger_hierarchy; info->chip->flags |= IRQCHIP_SKIP_SET_WAKE | IRQCHIP_AFFINITY_PRE_STARTUP; info->handler = handle_edge_irq; info->handler_name = "edge"; return true; } static const struct msi_parent_ops x86_vector_msi_parent_ops = { .supported_flags = X86_VECTOR_MSI_FLAGS_SUPPORTED, .init_dev_msi_info = x86_init_dev_msi_info, }; struct irq_domain * __init native_create_pci_msi_domain(void) { if (apic_is_disabled) return NULL; x86_vector_domain->flags |= IRQ_DOMAIN_FLAG_MSI_PARENT; x86_vector_domain->msi_parent_ops = &x86_vector_msi_parent_ops; return x86_vector_domain; } void __init x86_create_pci_msi_domain(void) { x86_pci_msi_default_domain = x86_init.irqs.create_pci_msi_domain(); } /* Keep around for hyperV */ int pci_msi_prepare(struct irq_domain *domain, struct device *dev, int nvec, msi_alloc_info_t *arg) { init_irq_alloc_info(arg, NULL); if (to_pci_dev(dev)->msix_enabled) arg->type = X86_IRQ_ALLOC_TYPE_PCI_MSIX; else arg->type = X86_IRQ_ALLOC_TYPE_PCI_MSI; return 0; } EXPORT_SYMBOL_GPL(pci_msi_prepare); #ifdef CONFIG_DMAR_TABLE /* * The Intel IOMMU (ab)uses the high bits of the MSI address to contain the * high bits of the destination APIC ID. This can't be done in the general * case for MSIs as it would be targeting real memory above 4GiB not the * APIC. */ static void dmar_msi_compose_msg(struct irq_data *data, struct msi_msg *msg) { __irq_msi_compose_msg(irqd_cfg(data), msg, true); } static void dmar_msi_write_msg(struct irq_data *data, struct msi_msg *msg) { dmar_msi_write(data->irq, msg); } static struct irq_chip dmar_msi_controller = { .name = "DMAR-MSI", .irq_unmask = dmar_msi_unmask, .irq_mask = dmar_msi_mask, .irq_ack = irq_chip_ack_parent, .irq_set_affinity = msi_domain_set_affinity, .irq_retrigger = irq_chip_retrigger_hierarchy, .irq_compose_msi_msg = dmar_msi_compose_msg, .irq_write_msi_msg = dmar_msi_write_msg, .flags = IRQCHIP_SKIP_SET_WAKE | IRQCHIP_AFFINITY_PRE_STARTUP, }; static int dmar_msi_init(struct irq_domain *domain, struct msi_domain_info *info, unsigned int virq, irq_hw_number_t hwirq, msi_alloc_info_t *arg) { irq_domain_set_info(domain, virq, arg->devid, info->chip, NULL, handle_edge_irq, arg->data, "edge"); return 0; } static struct msi_domain_ops dmar_msi_domain_ops = { .msi_init = dmar_msi_init, }; static struct msi_domain_info dmar_msi_domain_info = { .ops = &dmar_msi_domain_ops, .chip = &dmar_msi_controller, .flags = MSI_FLAG_USE_DEF_DOM_OPS, }; static struct irq_domain *dmar_get_irq_domain(void) { static struct irq_domain *dmar_domain; static DEFINE_MUTEX(dmar_lock); struct fwnode_handle *fn; mutex_lock(&dmar_lock); if (dmar_domain) goto out; fn = irq_domain_alloc_named_fwnode("DMAR-MSI"); if (fn) { dmar_domain = msi_create_irq_domain(fn, &dmar_msi_domain_info, x86_vector_domain); if (!dmar_domain) irq_domain_free_fwnode(fn); } out: mutex_unlock(&dmar_lock); return dmar_domain; } int dmar_alloc_hwirq(int id, int node, void *arg) { struct irq_domain *domain = dmar_get_irq_domain(); struct irq_alloc_info info; if (!domain) return -1; init_irq_alloc_info(&info, NULL); info.type = X86_IRQ_ALLOC_TYPE_DMAR; info.devid = id; info.hwirq = id; info.data = arg; return irq_domain_alloc_irqs(domain, 1, node, &info); } void dmar_free_hwirq(int irq) { irq_domain_free_irqs(irq, 1); } #endif bool arch_restore_msi_irqs(struct pci_dev *dev) { return xen_initdom_restore_msi(dev); }
linux-master
arch/x86/kernel/apic/msi.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright 2004 James Cleverdon, IBM. * * Generic APIC sub-arch probe layer. * * Hacked for x86-64 by James Cleverdon from i386 architecture code by * Martin Bligh, Andi Kleen, James Bottomley, John Stultz, and * James Cleverdon. */ #include <linux/thread_info.h> #include <asm/apic.h> #include "local.h" /* Select the appropriate APIC driver */ void __init x86_64_probe_apic(void) { struct apic **drv; enable_IR_x2apic(); for (drv = __apicdrivers; drv < __apicdrivers_end; drv++) { if ((*drv)->probe && (*drv)->probe()) { apic_install_driver(*drv); break; } } } int __init default_acpi_madt_oem_check(char *oem_id, char *oem_table_id) { struct apic **drv; for (drv = __apicdrivers; drv < __apicdrivers_end; drv++) { if ((*drv)->acpi_madt_oem_check(oem_id, oem_table_id)) { apic_install_driver(*drv); return 1; } } return 0; }
linux-master
arch/x86/kernel/apic/probe_64.c
// SPDX-License-Identifier: GPL-2.0 /* * APIC driver for "bigsmp" xAPIC machines with more than 8 virtual CPUs. * * Drives the local APIC in "clustered mode". */ #include <linux/cpumask.h> #include <linux/dmi.h> #include <linux/smp.h> #include <asm/apic.h> #include <asm/io_apic.h> #include "local.h" static unsigned bigsmp_get_apic_id(unsigned long x) { return (x >> 24) & 0xFF; } static bool bigsmp_check_apicid_used(physid_mask_t *map, int apicid) { return false; } static void bigsmp_ioapic_phys_id_map(physid_mask_t *phys_map, physid_mask_t *retmap) { /* For clustered we don't have a good way to do this yet - hack */ physids_promote(0xFFL, retmap); } static int bigsmp_phys_pkg_id(int cpuid_apic, int index_msb) { return cpuid_apic >> index_msb; } static void bigsmp_send_IPI_allbutself(int vector) { default_send_IPI_mask_allbutself_phys(cpu_online_mask, vector); } static void bigsmp_send_IPI_all(int vector) { default_send_IPI_mask_sequence_phys(cpu_online_mask, vector); } static int dmi_bigsmp; /* can be set by dmi scanners */ static int hp_ht_bigsmp(const struct dmi_system_id *d) { printk(KERN_NOTICE "%s detected: force use of apic=bigsmp\n", d->ident); dmi_bigsmp = 1; return 0; } static const struct dmi_system_id bigsmp_dmi_table[] = { { hp_ht_bigsmp, "HP ProLiant DL760 G2", { DMI_MATCH(DMI_BIOS_VENDOR, "HP"), DMI_MATCH(DMI_BIOS_VERSION, "P44-"), } }, { hp_ht_bigsmp, "HP ProLiant DL740", { DMI_MATCH(DMI_BIOS_VENDOR, "HP"), DMI_MATCH(DMI_BIOS_VERSION, "P47-"), } }, { } /* NULL entry stops DMI scanning */ }; static int probe_bigsmp(void) { return dmi_check_system(bigsmp_dmi_table); } static struct apic apic_bigsmp __ro_after_init = { .name = "bigsmp", .probe = probe_bigsmp, .delivery_mode = APIC_DELIVERY_MODE_FIXED, .dest_mode_logical = false, .disable_esr = 1, .check_apicid_used = bigsmp_check_apicid_used, .ioapic_phys_id_map = bigsmp_ioapic_phys_id_map, .cpu_present_to_apicid = default_cpu_present_to_apicid, .phys_pkg_id = bigsmp_phys_pkg_id, .max_apic_id = 0xFE, .get_apic_id = bigsmp_get_apic_id, .set_apic_id = NULL, .calc_dest_apicid = apic_default_calc_apicid, .send_IPI = default_send_IPI_single_phys, .send_IPI_mask = default_send_IPI_mask_sequence_phys, .send_IPI_mask_allbutself = NULL, .send_IPI_allbutself = bigsmp_send_IPI_allbutself, .send_IPI_all = bigsmp_send_IPI_all, .send_IPI_self = default_send_IPI_self, .read = native_apic_mem_read, .write = native_apic_mem_write, .eoi = native_apic_mem_eoi, .icr_read = native_apic_icr_read, .icr_write = native_apic_icr_write, .wait_icr_idle = apic_mem_wait_icr_idle, .safe_wait_icr_idle = apic_mem_wait_icr_idle_timeout, }; bool __init apic_bigsmp_possible(bool cmdline_override) { return apic == &apic_bigsmp || !cmdline_override; } void __init apic_bigsmp_force(void) { if (apic != &apic_bigsmp) apic_install_driver(&apic_bigsmp); } apic_driver(apic_bigsmp);
linux-master
arch/x86/kernel/apic/bigsmp_32.c
// SPDX-License-Identifier: GPL-2.0 #include <linux/cpuhotplug.h> #include <linux/cpumask.h> #include <linux/slab.h> #include <linux/mm.h> #include <asm/apic.h> #include "local.h" #define apic_cluster(apicid) ((apicid) >> 4) /* * __x2apic_send_IPI_mask() possibly needs to read * x86_cpu_to_logical_apicid for all online cpus in a sequential way. * Using per cpu variable would cost one cache line per cpu. */ static u32 *x86_cpu_to_logical_apicid __read_mostly; static DEFINE_PER_CPU(cpumask_var_t, ipi_mask); static DEFINE_PER_CPU_READ_MOSTLY(struct cpumask *, cluster_masks); static int x2apic_acpi_madt_oem_check(char *oem_id, char *oem_table_id) { return x2apic_enabled(); } static void x2apic_send_IPI(int cpu, int vector) { u32 dest = x86_cpu_to_logical_apicid[cpu]; /* x2apic MSRs are special and need a special fence: */ weak_wrmsr_fence(); __x2apic_send_IPI_dest(dest, vector, APIC_DEST_LOGICAL); } static void __x2apic_send_IPI_mask(const struct cpumask *mask, int vector, int apic_dest) { unsigned int cpu, clustercpu; struct cpumask *tmpmsk; unsigned long flags; u32 dest; /* x2apic MSRs are special and need a special fence: */ weak_wrmsr_fence(); local_irq_save(flags); tmpmsk = this_cpu_cpumask_var_ptr(ipi_mask); cpumask_copy(tmpmsk, mask); /* If IPI should not be sent to self, clear current CPU */ if (apic_dest != APIC_DEST_ALLINC) __cpumask_clear_cpu(smp_processor_id(), tmpmsk); /* Collapse cpus in a cluster so a single IPI per cluster is sent */ for_each_cpu(cpu, tmpmsk) { struct cpumask *cmsk = per_cpu(cluster_masks, cpu); dest = 0; for_each_cpu_and(clustercpu, tmpmsk, cmsk) dest |= x86_cpu_to_logical_apicid[clustercpu]; if (!dest) continue; __x2apic_send_IPI_dest(dest, vector, APIC_DEST_LOGICAL); /* Remove cluster CPUs from tmpmask */ cpumask_andnot(tmpmsk, tmpmsk, cmsk); } local_irq_restore(flags); } static void x2apic_send_IPI_mask(const struct cpumask *mask, int vector) { __x2apic_send_IPI_mask(mask, vector, APIC_DEST_ALLINC); } static void x2apic_send_IPI_mask_allbutself(const struct cpumask *mask, int vector) { __x2apic_send_IPI_mask(mask, vector, APIC_DEST_ALLBUT); } static u32 x2apic_calc_apicid(unsigned int cpu) { return x86_cpu_to_logical_apicid[cpu]; } static void init_x2apic_ldr(void) { struct cpumask *cmsk = this_cpu_read(cluster_masks); BUG_ON(!cmsk); cpumask_set_cpu(smp_processor_id(), cmsk); } /* * As an optimisation during boot, set the cluster_mask for all present * CPUs at once, to prevent each of them having to iterate over the others * to find the existing cluster_mask. */ static void prefill_clustermask(struct cpumask *cmsk, unsigned int cpu, u32 cluster) { int cpu_i; for_each_present_cpu(cpu_i) { struct cpumask **cpu_cmsk = &per_cpu(cluster_masks, cpu_i); u32 apicid = apic->cpu_present_to_apicid(cpu_i); if (apicid == BAD_APICID || cpu_i == cpu || apic_cluster(apicid) != cluster) continue; if (WARN_ON_ONCE(*cpu_cmsk == cmsk)) continue; BUG_ON(*cpu_cmsk); *cpu_cmsk = cmsk; } } static int alloc_clustermask(unsigned int cpu, u32 cluster, int node) { struct cpumask *cmsk = NULL; unsigned int cpu_i; /* * At boot time, the CPU present mask is stable. The cluster mask is * allocated for the first CPU in the cluster and propagated to all * present siblings in the cluster. If the cluster mask is already set * on entry to this function for a given CPU, there is nothing to do. */ if (per_cpu(cluster_masks, cpu)) return 0; if (system_state < SYSTEM_RUNNING) goto alloc; /* * On post boot hotplug for a CPU which was not present at boot time, * iterate over all possible CPUs (even those which are not present * any more) to find any existing cluster mask. */ for_each_possible_cpu(cpu_i) { u32 apicid = apic->cpu_present_to_apicid(cpu_i); if (apicid != BAD_APICID && apic_cluster(apicid) == cluster) { cmsk = per_cpu(cluster_masks, cpu_i); /* * If the cluster is already initialized, just store * the mask and return. There's no need to propagate. */ if (cmsk) { per_cpu(cluster_masks, cpu) = cmsk; return 0; } } } /* * No CPU in the cluster has ever been initialized, so fall through to * the boot time code which will also populate the cluster mask for any * other CPU in the cluster which is (now) present. */ alloc: cmsk = kzalloc_node(sizeof(*cmsk), GFP_KERNEL, node); if (!cmsk) return -ENOMEM; per_cpu(cluster_masks, cpu) = cmsk; prefill_clustermask(cmsk, cpu, cluster); return 0; } static int x2apic_prepare_cpu(unsigned int cpu) { u32 phys_apicid = apic->cpu_present_to_apicid(cpu); u32 cluster = apic_cluster(phys_apicid); u32 logical_apicid = (cluster << 16) | (1 << (phys_apicid & 0xf)); x86_cpu_to_logical_apicid[cpu] = logical_apicid; if (alloc_clustermask(cpu, cluster, cpu_to_node(cpu)) < 0) return -ENOMEM; if (!zalloc_cpumask_var(&per_cpu(ipi_mask, cpu), GFP_KERNEL)) return -ENOMEM; return 0; } static int x2apic_dead_cpu(unsigned int dead_cpu) { struct cpumask *cmsk = per_cpu(cluster_masks, dead_cpu); if (cmsk) cpumask_clear_cpu(dead_cpu, cmsk); free_cpumask_var(per_cpu(ipi_mask, dead_cpu)); return 0; } static int x2apic_cluster_probe(void) { u32 slots; if (!x2apic_mode) return 0; slots = max_t(u32, L1_CACHE_BYTES/sizeof(u32), nr_cpu_ids); x86_cpu_to_logical_apicid = kcalloc(slots, sizeof(u32), GFP_KERNEL); if (!x86_cpu_to_logical_apicid) return 0; if (cpuhp_setup_state(CPUHP_X2APIC_PREPARE, "x86/x2apic:prepare", x2apic_prepare_cpu, x2apic_dead_cpu) < 0) { pr_err("Failed to register X2APIC_PREPARE\n"); kfree(x86_cpu_to_logical_apicid); x86_cpu_to_logical_apicid = NULL; return 0; } init_x2apic_ldr(); return 1; } static struct apic apic_x2apic_cluster __ro_after_init = { .name = "cluster x2apic", .probe = x2apic_cluster_probe, .acpi_madt_oem_check = x2apic_acpi_madt_oem_check, .delivery_mode = APIC_DELIVERY_MODE_FIXED, .dest_mode_logical = true, .disable_esr = 0, .check_apicid_used = NULL, .init_apic_ldr = init_x2apic_ldr, .ioapic_phys_id_map = NULL, .cpu_present_to_apicid = default_cpu_present_to_apicid, .phys_pkg_id = x2apic_phys_pkg_id, .max_apic_id = UINT_MAX, .x2apic_set_max_apicid = true, .get_apic_id = x2apic_get_apic_id, .set_apic_id = x2apic_set_apic_id, .calc_dest_apicid = x2apic_calc_apicid, .send_IPI = x2apic_send_IPI, .send_IPI_mask = x2apic_send_IPI_mask, .send_IPI_mask_allbutself = x2apic_send_IPI_mask_allbutself, .send_IPI_allbutself = x2apic_send_IPI_allbutself, .send_IPI_all = x2apic_send_IPI_all, .send_IPI_self = x2apic_send_IPI_self, .read = native_apic_msr_read, .write = native_apic_msr_write, .eoi = native_apic_msr_eoi, .icr_read = native_x2apic_icr_read, .icr_write = native_x2apic_icr_write, }; apic_driver(apic_x2apic_cluster);
linux-master
arch/x86/kernel/apic/x2apic_cluster.c
/* * Common functions shared between the various APIC flavours * * SPDX-License-Identifier: GPL-2.0 */ #include <linux/irq.h> #include <asm/apic.h> #include "local.h" u32 apic_default_calc_apicid(unsigned int cpu) { return per_cpu(x86_cpu_to_apicid, cpu); } u32 apic_flat_calc_apicid(unsigned int cpu) { return 1U << cpu; } bool default_check_apicid_used(physid_mask_t *map, int apicid) { return physid_isset(apicid, *map); } void default_ioapic_phys_id_map(physid_mask_t *phys_map, physid_mask_t *retmap) { *retmap = *phys_map; } int default_cpu_present_to_apicid(int mps_cpu) { if (mps_cpu < nr_cpu_ids && cpu_present(mps_cpu)) return (int)per_cpu(x86_cpu_to_apicid, mps_cpu); else return BAD_APICID; } EXPORT_SYMBOL_GPL(default_cpu_present_to_apicid); bool default_apic_id_registered(void) { return physid_isset(read_apic_id(), phys_cpu_present_map); } /* * Set up the logical destination ID when the APIC operates in logical * destination mode. */ void default_init_apic_ldr(void) { unsigned long val; apic_write(APIC_DFR, APIC_DFR_FLAT); val = apic_read(APIC_LDR) & ~APIC_LDR_MASK; val |= SET_APIC_LOGICAL_ID(1UL << smp_processor_id()); apic_write(APIC_LDR, val); }
linux-master
arch/x86/kernel/apic/apic_common.c
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Numascale NumaConnect-Specific APIC Code * * Copyright (C) 2011 Numascale AS. All rights reserved. * * Send feedback to <[email protected]> * */ #include <linux/types.h> #include <linux/init.h> #include <linux/pgtable.h> #include <asm/numachip/numachip.h> #include <asm/numachip/numachip_csr.h> #include "local.h" u8 numachip_system __read_mostly; static const struct apic apic_numachip1; static const struct apic apic_numachip2; static void (*numachip_apic_icr_write)(int apicid, unsigned int val) __read_mostly; static unsigned int numachip1_get_apic_id(unsigned long x) { unsigned long value; unsigned int id = (x >> 24) & 0xff; if (static_cpu_has(X86_FEATURE_NODEID_MSR)) { rdmsrl(MSR_FAM10H_NODE_ID, value); id |= (value << 2) & 0xff00; } return id; } static u32 numachip1_set_apic_id(unsigned int id) { return (id & 0xff) << 24; } static unsigned int numachip2_get_apic_id(unsigned long x) { u64 mcfg; rdmsrl(MSR_FAM10H_MMIO_CONF_BASE, mcfg); return ((mcfg >> (28 - 8)) & 0xfff00) | (x >> 24); } static u32 numachip2_set_apic_id(unsigned int id) { return id << 24; } static int numachip_phys_pkg_id(int initial_apic_id, int index_msb) { return initial_apic_id >> index_msb; } static void numachip1_apic_icr_write(int apicid, unsigned int val) { write_lcsr(CSR_G3_EXT_IRQ_GEN, (apicid << 16) | val); } static void numachip2_apic_icr_write(int apicid, unsigned int val) { numachip2_write32_lcsr(NUMACHIP2_APIC_ICR, (apicid << 12) | val); } static int numachip_wakeup_secondary(int phys_apicid, unsigned long start_rip) { numachip_apic_icr_write(phys_apicid, APIC_DM_INIT); numachip_apic_icr_write(phys_apicid, APIC_DM_STARTUP | (start_rip >> 12)); return 0; } static void numachip_send_IPI_one(int cpu, int vector) { int local_apicid, apicid = per_cpu(x86_cpu_to_apicid, cpu); unsigned int dmode; preempt_disable(); local_apicid = __this_cpu_read(x86_cpu_to_apicid); /* Send via local APIC where non-local part matches */ if (!((apicid ^ local_apicid) >> NUMACHIP_LAPIC_BITS)) { unsigned long flags; local_irq_save(flags); __default_send_IPI_dest_field(apicid, vector, APIC_DEST_PHYSICAL); local_irq_restore(flags); preempt_enable(); return; } preempt_enable(); dmode = (vector == NMI_VECTOR) ? APIC_DM_NMI : APIC_DM_FIXED; numachip_apic_icr_write(apicid, dmode | vector); } static void numachip_send_IPI_mask(const struct cpumask *mask, int vector) { unsigned int cpu; for_each_cpu(cpu, mask) numachip_send_IPI_one(cpu, vector); } static void numachip_send_IPI_mask_allbutself(const struct cpumask *mask, int vector) { unsigned int this_cpu = smp_processor_id(); unsigned int cpu; for_each_cpu(cpu, mask) { if (cpu != this_cpu) numachip_send_IPI_one(cpu, vector); } } static void numachip_send_IPI_allbutself(int vector) { unsigned int this_cpu = smp_processor_id(); unsigned int cpu; for_each_online_cpu(cpu) { if (cpu != this_cpu) numachip_send_IPI_one(cpu, vector); } } static void numachip_send_IPI_all(int vector) { numachip_send_IPI_mask(cpu_online_mask, vector); } static void numachip_send_IPI_self(int vector) { apic_write(APIC_SELF_IPI, vector); } static int __init numachip1_probe(void) { return apic == &apic_numachip1; } static int __init numachip2_probe(void) { return apic == &apic_numachip2; } static void fixup_cpu_id(struct cpuinfo_x86 *c, int node) { u64 val; u32 nodes = 1; this_cpu_write(cpu_llc_id, node); /* Account for nodes per socket in multi-core-module processors */ if (boot_cpu_has(X86_FEATURE_NODEID_MSR)) { rdmsrl(MSR_FAM10H_NODE_ID, val); nodes = ((val >> 3) & 7) + 1; } c->phys_proc_id = node / nodes; } static int __init numachip_system_init(void) { /* Map the LCSR area and set up the apic_icr_write function */ switch (numachip_system) { case 1: init_extra_mapping_uc(NUMACHIP_LCSR_BASE, NUMACHIP_LCSR_SIZE); numachip_apic_icr_write = numachip1_apic_icr_write; break; case 2: init_extra_mapping_uc(NUMACHIP2_LCSR_BASE, NUMACHIP2_LCSR_SIZE); numachip_apic_icr_write = numachip2_apic_icr_write; break; default: return 0; } x86_cpuinit.fixup_cpu_id = fixup_cpu_id; x86_init.pci.arch_init = pci_numachip_init; return 0; } early_initcall(numachip_system_init); static int numachip1_acpi_madt_oem_check(char *oem_id, char *oem_table_id) { if ((strncmp(oem_id, "NUMASC", 6) != 0) || (strncmp(oem_table_id, "NCONNECT", 8) != 0)) return 0; numachip_system = 1; return 1; } static int numachip2_acpi_madt_oem_check(char *oem_id, char *oem_table_id) { if ((strncmp(oem_id, "NUMASC", 6) != 0) || (strncmp(oem_table_id, "NCONECT2", 8) != 0)) return 0; numachip_system = 2; return 1; } static const struct apic apic_numachip1 __refconst = { .name = "NumaConnect system", .probe = numachip1_probe, .acpi_madt_oem_check = numachip1_acpi_madt_oem_check, .delivery_mode = APIC_DELIVERY_MODE_FIXED, .dest_mode_logical = false, .disable_esr = 0, .cpu_present_to_apicid = default_cpu_present_to_apicid, .phys_pkg_id = numachip_phys_pkg_id, .max_apic_id = UINT_MAX, .get_apic_id = numachip1_get_apic_id, .set_apic_id = numachip1_set_apic_id, .calc_dest_apicid = apic_default_calc_apicid, .send_IPI = numachip_send_IPI_one, .send_IPI_mask = numachip_send_IPI_mask, .send_IPI_mask_allbutself = numachip_send_IPI_mask_allbutself, .send_IPI_allbutself = numachip_send_IPI_allbutself, .send_IPI_all = numachip_send_IPI_all, .send_IPI_self = numachip_send_IPI_self, .wakeup_secondary_cpu = numachip_wakeup_secondary, .read = native_apic_mem_read, .write = native_apic_mem_write, .eoi = native_apic_mem_eoi, .icr_read = native_apic_icr_read, .icr_write = native_apic_icr_write, }; apic_driver(apic_numachip1); static const struct apic apic_numachip2 __refconst = { .name = "NumaConnect2 system", .probe = numachip2_probe, .acpi_madt_oem_check = numachip2_acpi_madt_oem_check, .delivery_mode = APIC_DELIVERY_MODE_FIXED, .dest_mode_logical = false, .disable_esr = 0, .cpu_present_to_apicid = default_cpu_present_to_apicid, .phys_pkg_id = numachip_phys_pkg_id, .max_apic_id = UINT_MAX, .get_apic_id = numachip2_get_apic_id, .set_apic_id = numachip2_set_apic_id, .calc_dest_apicid = apic_default_calc_apicid, .send_IPI = numachip_send_IPI_one, .send_IPI_mask = numachip_send_IPI_mask, .send_IPI_mask_allbutself = numachip_send_IPI_mask_allbutself, .send_IPI_allbutself = numachip_send_IPI_allbutself, .send_IPI_all = numachip_send_IPI_all, .send_IPI_self = numachip_send_IPI_self, .wakeup_secondary_cpu = numachip_wakeup_secondary, .read = native_apic_mem_read, .write = native_apic_mem_write, .eoi = native_apic_mem_eoi, .icr_read = native_apic_icr_read, .icr_write = native_apic_icr_write, }; apic_driver(apic_numachip2);
linux-master
arch/x86/kernel/apic/apic_numachip.c
// SPDX-License-Identifier: GPL-2.0-only #define pr_fmt(fmt) "APIC: " fmt #include <asm/apic.h> #include "local.h" /* * Use DEFINE_STATIC_CALL_NULL() to avoid having to provide stub functions * for each callback. The callbacks are setup during boot and all except * wait_icr_idle() must be initialized before usage. The IPI wrappers * use static_call() and not static_call_cond() to catch any fails. */ #define DEFINE_APIC_CALL(__cb) \ DEFINE_STATIC_CALL_NULL(apic_call_##__cb, *apic->__cb) DEFINE_APIC_CALL(eoi); DEFINE_APIC_CALL(native_eoi); DEFINE_APIC_CALL(icr_read); DEFINE_APIC_CALL(icr_write); DEFINE_APIC_CALL(read); DEFINE_APIC_CALL(send_IPI); DEFINE_APIC_CALL(send_IPI_mask); DEFINE_APIC_CALL(send_IPI_mask_allbutself); DEFINE_APIC_CALL(send_IPI_allbutself); DEFINE_APIC_CALL(send_IPI_all); DEFINE_APIC_CALL(send_IPI_self); DEFINE_APIC_CALL(wait_icr_idle); DEFINE_APIC_CALL(wakeup_secondary_cpu); DEFINE_APIC_CALL(wakeup_secondary_cpu_64); DEFINE_APIC_CALL(write); EXPORT_STATIC_CALL_TRAMP_GPL(apic_call_send_IPI_mask); EXPORT_STATIC_CALL_TRAMP_GPL(apic_call_send_IPI_self); /* The container for function call overrides */ struct apic_override __x86_apic_override __initdata; #define apply_override(__cb) \ if (__x86_apic_override.__cb) \ apic->__cb = __x86_apic_override.__cb static __init void restore_override_callbacks(void) { apply_override(eoi); apply_override(native_eoi); apply_override(write); apply_override(read); apply_override(send_IPI); apply_override(send_IPI_mask); apply_override(send_IPI_mask_allbutself); apply_override(send_IPI_allbutself); apply_override(send_IPI_all); apply_override(send_IPI_self); apply_override(icr_read); apply_override(icr_write); apply_override(wakeup_secondary_cpu); apply_override(wakeup_secondary_cpu_64); } #define update_call(__cb) \ static_call_update(apic_call_##__cb, *apic->__cb) static __init void update_static_calls(void) { update_call(eoi); update_call(native_eoi); update_call(write); update_call(read); update_call(send_IPI); update_call(send_IPI_mask); update_call(send_IPI_mask_allbutself); update_call(send_IPI_allbutself); update_call(send_IPI_all); update_call(send_IPI_self); update_call(icr_read); update_call(icr_write); update_call(wait_icr_idle); update_call(wakeup_secondary_cpu); update_call(wakeup_secondary_cpu_64); } void __init apic_setup_apic_calls(void) { /* Ensure that the default APIC has native_eoi populated */ apic->native_eoi = apic->eoi; update_static_calls(); pr_info("Static calls initialized\n"); } void __init apic_install_driver(struct apic *driver) { if (apic == driver) return; apic = driver; if (IS_ENABLED(CONFIG_X86_X2APIC) && apic->x2apic_set_max_apicid) apic->max_apic_id = x2apic_max_apicid; /* Copy the original eoi() callback as KVM/HyperV might overwrite it */ if (!apic->native_eoi) apic->native_eoi = apic->eoi; /* Apply any already installed callback overrides */ restore_override_callbacks(); update_static_calls(); pr_info("Switched APIC routing to: %s\n", driver->name); }
linux-master
arch/x86/kernel/apic/init.c
// SPDX-License-Identifier: GPL-2.0-only /* * Local APIC related interfaces to support IOAPIC, MSI, etc. * * Copyright (C) 1997, 1998, 1999, 2000, 2009 Ingo Molnar, Hajnalka Szabo * Moved from arch/x86/kernel/apic/io_apic.c. * Jiang Liu <[email protected]> * Enable support of hierarchical irqdomains */ #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/seq_file.h> #include <linux/init.h> #include <linux/compiler.h> #include <linux/slab.h> #include <asm/irqdomain.h> #include <asm/hw_irq.h> #include <asm/traps.h> #include <asm/apic.h> #include <asm/i8259.h> #include <asm/desc.h> #include <asm/irq_remapping.h> #include <asm/trace/irq_vectors.h> struct apic_chip_data { struct irq_cfg hw_irq_cfg; unsigned int vector; unsigned int prev_vector; unsigned int cpu; unsigned int prev_cpu; unsigned int irq; struct hlist_node clist; unsigned int move_in_progress : 1, is_managed : 1, can_reserve : 1, has_reserved : 1; }; struct irq_domain *x86_vector_domain; EXPORT_SYMBOL_GPL(x86_vector_domain); static DEFINE_RAW_SPINLOCK(vector_lock); static cpumask_var_t vector_searchmask; static struct irq_chip lapic_controller; static struct irq_matrix *vector_matrix; #ifdef CONFIG_SMP static void vector_cleanup_callback(struct timer_list *tmr); struct vector_cleanup { struct hlist_head head; struct timer_list timer; }; static DEFINE_PER_CPU(struct vector_cleanup, vector_cleanup) = { .head = HLIST_HEAD_INIT, .timer = __TIMER_INITIALIZER(vector_cleanup_callback, TIMER_PINNED), }; #endif void lock_vector_lock(void) { /* Used to the online set of cpus does not change * during assign_irq_vector. */ raw_spin_lock(&vector_lock); } void unlock_vector_lock(void) { raw_spin_unlock(&vector_lock); } void init_irq_alloc_info(struct irq_alloc_info *info, const struct cpumask *mask) { memset(info, 0, sizeof(*info)); info->mask = mask; } void copy_irq_alloc_info(struct irq_alloc_info *dst, struct irq_alloc_info *src) { if (src) *dst = *src; else memset(dst, 0, sizeof(*dst)); } static struct apic_chip_data *apic_chip_data(struct irq_data *irqd) { if (!irqd) return NULL; while (irqd->parent_data) irqd = irqd->parent_data; return irqd->chip_data; } struct irq_cfg *irqd_cfg(struct irq_data *irqd) { struct apic_chip_data *apicd = apic_chip_data(irqd); return apicd ? &apicd->hw_irq_cfg : NULL; } EXPORT_SYMBOL_GPL(irqd_cfg); struct irq_cfg *irq_cfg(unsigned int irq) { return irqd_cfg(irq_get_irq_data(irq)); } static struct apic_chip_data *alloc_apic_chip_data(int node) { struct apic_chip_data *apicd; apicd = kzalloc_node(sizeof(*apicd), GFP_KERNEL, node); if (apicd) INIT_HLIST_NODE(&apicd->clist); return apicd; } static void free_apic_chip_data(struct apic_chip_data *apicd) { kfree(apicd); } static void apic_update_irq_cfg(struct irq_data *irqd, unsigned int vector, unsigned int cpu) { struct apic_chip_data *apicd = apic_chip_data(irqd); lockdep_assert_held(&vector_lock); apicd->hw_irq_cfg.vector = vector; apicd->hw_irq_cfg.dest_apicid = apic->calc_dest_apicid(cpu); irq_data_update_effective_affinity(irqd, cpumask_of(cpu)); trace_vector_config(irqd->irq, vector, cpu, apicd->hw_irq_cfg.dest_apicid); } static void apic_update_vector(struct irq_data *irqd, unsigned int newvec, unsigned int newcpu) { struct apic_chip_data *apicd = apic_chip_data(irqd); struct irq_desc *desc = irq_data_to_desc(irqd); bool managed = irqd_affinity_is_managed(irqd); lockdep_assert_held(&vector_lock); trace_vector_update(irqd->irq, newvec, newcpu, apicd->vector, apicd->cpu); /* * If there is no vector associated or if the associated vector is * the shutdown vector, which is associated to make PCI/MSI * shutdown mode work, then there is nothing to release. Clear out * prev_vector for this and the offlined target case. */ apicd->prev_vector = 0; if (!apicd->vector || apicd->vector == MANAGED_IRQ_SHUTDOWN_VECTOR) goto setnew; /* * If the target CPU of the previous vector is online, then mark * the vector as move in progress and store it for cleanup when the * first interrupt on the new vector arrives. If the target CPU is * offline then the regular release mechanism via the cleanup * vector is not possible and the vector can be immediately freed * in the underlying matrix allocator. */ if (cpu_online(apicd->cpu)) { apicd->move_in_progress = true; apicd->prev_vector = apicd->vector; apicd->prev_cpu = apicd->cpu; WARN_ON_ONCE(apicd->cpu == newcpu); } else { irq_matrix_free(vector_matrix, apicd->cpu, apicd->vector, managed); } setnew: apicd->vector = newvec; apicd->cpu = newcpu; BUG_ON(!IS_ERR_OR_NULL(per_cpu(vector_irq, newcpu)[newvec])); per_cpu(vector_irq, newcpu)[newvec] = desc; } static void vector_assign_managed_shutdown(struct irq_data *irqd) { unsigned int cpu = cpumask_first(cpu_online_mask); apic_update_irq_cfg(irqd, MANAGED_IRQ_SHUTDOWN_VECTOR, cpu); } static int reserve_managed_vector(struct irq_data *irqd) { const struct cpumask *affmsk = irq_data_get_affinity_mask(irqd); struct apic_chip_data *apicd = apic_chip_data(irqd); unsigned long flags; int ret; raw_spin_lock_irqsave(&vector_lock, flags); apicd->is_managed = true; ret = irq_matrix_reserve_managed(vector_matrix, affmsk); raw_spin_unlock_irqrestore(&vector_lock, flags); trace_vector_reserve_managed(irqd->irq, ret); return ret; } static void reserve_irq_vector_locked(struct irq_data *irqd) { struct apic_chip_data *apicd = apic_chip_data(irqd); irq_matrix_reserve(vector_matrix); apicd->can_reserve = true; apicd->has_reserved = true; irqd_set_can_reserve(irqd); trace_vector_reserve(irqd->irq, 0); vector_assign_managed_shutdown(irqd); } static int reserve_irq_vector(struct irq_data *irqd) { unsigned long flags; raw_spin_lock_irqsave(&vector_lock, flags); reserve_irq_vector_locked(irqd); raw_spin_unlock_irqrestore(&vector_lock, flags); return 0; } static int assign_vector_locked(struct irq_data *irqd, const struct cpumask *dest) { struct apic_chip_data *apicd = apic_chip_data(irqd); bool resvd = apicd->has_reserved; unsigned int cpu = apicd->cpu; int vector = apicd->vector; lockdep_assert_held(&vector_lock); /* * If the current target CPU is online and in the new requested * affinity mask, there is no point in moving the interrupt from * one CPU to another. */ if (vector && cpu_online(cpu) && cpumask_test_cpu(cpu, dest)) return 0; /* * Careful here. @apicd might either have move_in_progress set or * be enqueued for cleanup. Assigning a new vector would either * leave a stale vector on some CPU around or in case of a pending * cleanup corrupt the hlist. */ if (apicd->move_in_progress || !hlist_unhashed(&apicd->clist)) return -EBUSY; vector = irq_matrix_alloc(vector_matrix, dest, resvd, &cpu); trace_vector_alloc(irqd->irq, vector, resvd, vector); if (vector < 0) return vector; apic_update_vector(irqd, vector, cpu); apic_update_irq_cfg(irqd, vector, cpu); return 0; } static int assign_irq_vector(struct irq_data *irqd, const struct cpumask *dest) { unsigned long flags; int ret; raw_spin_lock_irqsave(&vector_lock, flags); cpumask_and(vector_searchmask, dest, cpu_online_mask); ret = assign_vector_locked(irqd, vector_searchmask); raw_spin_unlock_irqrestore(&vector_lock, flags); return ret; } static int assign_irq_vector_any_locked(struct irq_data *irqd) { /* Get the affinity mask - either irq_default_affinity or (user) set */ const struct cpumask *affmsk = irq_data_get_affinity_mask(irqd); int node = irq_data_get_node(irqd); if (node != NUMA_NO_NODE) { /* Try the intersection of @affmsk and node mask */ cpumask_and(vector_searchmask, cpumask_of_node(node), affmsk); if (!assign_vector_locked(irqd, vector_searchmask)) return 0; } /* Try the full affinity mask */ cpumask_and(vector_searchmask, affmsk, cpu_online_mask); if (!assign_vector_locked(irqd, vector_searchmask)) return 0; if (node != NUMA_NO_NODE) { /* Try the node mask */ if (!assign_vector_locked(irqd, cpumask_of_node(node))) return 0; } /* Try the full online mask */ return assign_vector_locked(irqd, cpu_online_mask); } static int assign_irq_vector_policy(struct irq_data *irqd, struct irq_alloc_info *info) { if (irqd_affinity_is_managed(irqd)) return reserve_managed_vector(irqd); if (info->mask) return assign_irq_vector(irqd, info->mask); /* * Make only a global reservation with no guarantee. A real vector * is associated at activation time. */ return reserve_irq_vector(irqd); } static int assign_managed_vector(struct irq_data *irqd, const struct cpumask *dest) { const struct cpumask *affmsk = irq_data_get_affinity_mask(irqd); struct apic_chip_data *apicd = apic_chip_data(irqd); int vector, cpu; cpumask_and(vector_searchmask, dest, affmsk); /* set_affinity might call here for nothing */ if (apicd->vector && cpumask_test_cpu(apicd->cpu, vector_searchmask)) return 0; vector = irq_matrix_alloc_managed(vector_matrix, vector_searchmask, &cpu); trace_vector_alloc_managed(irqd->irq, vector, vector); if (vector < 0) return vector; apic_update_vector(irqd, vector, cpu); apic_update_irq_cfg(irqd, vector, cpu); return 0; } static void clear_irq_vector(struct irq_data *irqd) { struct apic_chip_data *apicd = apic_chip_data(irqd); bool managed = irqd_affinity_is_managed(irqd); unsigned int vector = apicd->vector; lockdep_assert_held(&vector_lock); if (!vector) return; trace_vector_clear(irqd->irq, vector, apicd->cpu, apicd->prev_vector, apicd->prev_cpu); per_cpu(vector_irq, apicd->cpu)[vector] = VECTOR_SHUTDOWN; irq_matrix_free(vector_matrix, apicd->cpu, vector, managed); apicd->vector = 0; /* Clean up move in progress */ vector = apicd->prev_vector; if (!vector) return; per_cpu(vector_irq, apicd->prev_cpu)[vector] = VECTOR_SHUTDOWN; irq_matrix_free(vector_matrix, apicd->prev_cpu, vector, managed); apicd->prev_vector = 0; apicd->move_in_progress = 0; hlist_del_init(&apicd->clist); } static void x86_vector_deactivate(struct irq_domain *dom, struct irq_data *irqd) { struct apic_chip_data *apicd = apic_chip_data(irqd); unsigned long flags; trace_vector_deactivate(irqd->irq, apicd->is_managed, apicd->can_reserve, false); /* Regular fixed assigned interrupt */ if (!apicd->is_managed && !apicd->can_reserve) return; /* If the interrupt has a global reservation, nothing to do */ if (apicd->has_reserved) return; raw_spin_lock_irqsave(&vector_lock, flags); clear_irq_vector(irqd); if (apicd->can_reserve) reserve_irq_vector_locked(irqd); else vector_assign_managed_shutdown(irqd); raw_spin_unlock_irqrestore(&vector_lock, flags); } static int activate_reserved(struct irq_data *irqd) { struct apic_chip_data *apicd = apic_chip_data(irqd); int ret; ret = assign_irq_vector_any_locked(irqd); if (!ret) { apicd->has_reserved = false; /* * Core might have disabled reservation mode after * allocating the irq descriptor. Ideally this should * happen before allocation time, but that would require * completely convoluted ways of transporting that * information. */ if (!irqd_can_reserve(irqd)) apicd->can_reserve = false; } /* * Check to ensure that the effective affinity mask is a subset * the user supplied affinity mask, and warn the user if it is not */ if (!cpumask_subset(irq_data_get_effective_affinity_mask(irqd), irq_data_get_affinity_mask(irqd))) { pr_warn("irq %u: Affinity broken due to vector space exhaustion.\n", irqd->irq); } return ret; } static int activate_managed(struct irq_data *irqd) { const struct cpumask *dest = irq_data_get_affinity_mask(irqd); int ret; cpumask_and(vector_searchmask, dest, cpu_online_mask); if (WARN_ON_ONCE(cpumask_empty(vector_searchmask))) { /* Something in the core code broke! Survive gracefully */ pr_err("Managed startup for irq %u, but no CPU\n", irqd->irq); return -EINVAL; } ret = assign_managed_vector(irqd, vector_searchmask); /* * This should not happen. The vector reservation got buggered. Handle * it gracefully. */ if (WARN_ON_ONCE(ret < 0)) { pr_err("Managed startup irq %u, no vector available\n", irqd->irq); } return ret; } static int x86_vector_activate(struct irq_domain *dom, struct irq_data *irqd, bool reserve) { struct apic_chip_data *apicd = apic_chip_data(irqd); unsigned long flags; int ret = 0; trace_vector_activate(irqd->irq, apicd->is_managed, apicd->can_reserve, reserve); raw_spin_lock_irqsave(&vector_lock, flags); if (!apicd->can_reserve && !apicd->is_managed) assign_irq_vector_any_locked(irqd); else if (reserve || irqd_is_managed_and_shutdown(irqd)) vector_assign_managed_shutdown(irqd); else if (apicd->is_managed) ret = activate_managed(irqd); else if (apicd->has_reserved) ret = activate_reserved(irqd); raw_spin_unlock_irqrestore(&vector_lock, flags); return ret; } static void vector_free_reserved_and_managed(struct irq_data *irqd) { const struct cpumask *dest = irq_data_get_affinity_mask(irqd); struct apic_chip_data *apicd = apic_chip_data(irqd); trace_vector_teardown(irqd->irq, apicd->is_managed, apicd->has_reserved); if (apicd->has_reserved) irq_matrix_remove_reserved(vector_matrix); if (apicd->is_managed) irq_matrix_remove_managed(vector_matrix, dest); } static void x86_vector_free_irqs(struct irq_domain *domain, unsigned int virq, unsigned int nr_irqs) { struct apic_chip_data *apicd; struct irq_data *irqd; unsigned long flags; int i; for (i = 0; i < nr_irqs; i++) { irqd = irq_domain_get_irq_data(x86_vector_domain, virq + i); if (irqd && irqd->chip_data) { raw_spin_lock_irqsave(&vector_lock, flags); clear_irq_vector(irqd); vector_free_reserved_and_managed(irqd); apicd = irqd->chip_data; irq_domain_reset_irq_data(irqd); raw_spin_unlock_irqrestore(&vector_lock, flags); free_apic_chip_data(apicd); } } } static bool vector_configure_legacy(unsigned int virq, struct irq_data *irqd, struct apic_chip_data *apicd) { unsigned long flags; bool realloc = false; apicd->vector = ISA_IRQ_VECTOR(virq); apicd->cpu = 0; raw_spin_lock_irqsave(&vector_lock, flags); /* * If the interrupt is activated, then it must stay at this vector * position. That's usually the timer interrupt (0). */ if (irqd_is_activated(irqd)) { trace_vector_setup(virq, true, 0); apic_update_irq_cfg(irqd, apicd->vector, apicd->cpu); } else { /* Release the vector */ apicd->can_reserve = true; irqd_set_can_reserve(irqd); clear_irq_vector(irqd); realloc = true; } raw_spin_unlock_irqrestore(&vector_lock, flags); return realloc; } static int x86_vector_alloc_irqs(struct irq_domain *domain, unsigned int virq, unsigned int nr_irqs, void *arg) { struct irq_alloc_info *info = arg; struct apic_chip_data *apicd; struct irq_data *irqd; int i, err, node; if (apic_is_disabled) return -ENXIO; /* * Catch any attempt to touch the cascade interrupt on a PIC * equipped system. */ if (WARN_ON_ONCE(info->flags & X86_IRQ_ALLOC_LEGACY && virq == PIC_CASCADE_IR)) return -EINVAL; for (i = 0; i < nr_irqs; i++) { irqd = irq_domain_get_irq_data(domain, virq + i); BUG_ON(!irqd); node = irq_data_get_node(irqd); WARN_ON_ONCE(irqd->chip_data); apicd = alloc_apic_chip_data(node); if (!apicd) { err = -ENOMEM; goto error; } apicd->irq = virq + i; irqd->chip = &lapic_controller; irqd->chip_data = apicd; irqd->hwirq = virq + i; irqd_set_single_target(irqd); /* * Prevent that any of these interrupts is invoked in * non interrupt context via e.g. generic_handle_irq() * as that can corrupt the affinity move state. */ irqd_set_handle_enforce_irqctx(irqd); /* Don't invoke affinity setter on deactivated interrupts */ irqd_set_affinity_on_activate(irqd); /* * Legacy vectors are already assigned when the IOAPIC * takes them over. They stay on the same vector. This is * required for check_timer() to work correctly as it might * switch back to legacy mode. Only update the hardware * config. */ if (info->flags & X86_IRQ_ALLOC_LEGACY) { if (!vector_configure_legacy(virq + i, irqd, apicd)) continue; } err = assign_irq_vector_policy(irqd, info); trace_vector_setup(virq + i, false, err); if (err) { irqd->chip_data = NULL; free_apic_chip_data(apicd); goto error; } } return 0; error: x86_vector_free_irqs(domain, virq, i); return err; } #ifdef CONFIG_GENERIC_IRQ_DEBUGFS static void x86_vector_debug_show(struct seq_file *m, struct irq_domain *d, struct irq_data *irqd, int ind) { struct apic_chip_data apicd; unsigned long flags; int irq; if (!irqd) { irq_matrix_debug_show(m, vector_matrix, ind); return; } irq = irqd->irq; if (irq < nr_legacy_irqs() && !test_bit(irq, &io_apic_irqs)) { seq_printf(m, "%*sVector: %5d\n", ind, "", ISA_IRQ_VECTOR(irq)); seq_printf(m, "%*sTarget: Legacy PIC all CPUs\n", ind, ""); return; } if (!irqd->chip_data) { seq_printf(m, "%*sVector: Not assigned\n", ind, ""); return; } raw_spin_lock_irqsave(&vector_lock, flags); memcpy(&apicd, irqd->chip_data, sizeof(apicd)); raw_spin_unlock_irqrestore(&vector_lock, flags); seq_printf(m, "%*sVector: %5u\n", ind, "", apicd.vector); seq_printf(m, "%*sTarget: %5u\n", ind, "", apicd.cpu); if (apicd.prev_vector) { seq_printf(m, "%*sPrevious vector: %5u\n", ind, "", apicd.prev_vector); seq_printf(m, "%*sPrevious target: %5u\n", ind, "", apicd.prev_cpu); } seq_printf(m, "%*smove_in_progress: %u\n", ind, "", apicd.move_in_progress ? 1 : 0); seq_printf(m, "%*sis_managed: %u\n", ind, "", apicd.is_managed ? 1 : 0); seq_printf(m, "%*scan_reserve: %u\n", ind, "", apicd.can_reserve ? 1 : 0); seq_printf(m, "%*shas_reserved: %u\n", ind, "", apicd.has_reserved ? 1 : 0); seq_printf(m, "%*scleanup_pending: %u\n", ind, "", !hlist_unhashed(&apicd.clist)); } #endif int x86_fwspec_is_ioapic(struct irq_fwspec *fwspec) { if (fwspec->param_count != 1) return 0; if (is_fwnode_irqchip(fwspec->fwnode)) { const char *fwname = fwnode_get_name(fwspec->fwnode); return fwname && !strncmp(fwname, "IO-APIC-", 8) && simple_strtol(fwname+8, NULL, 10) == fwspec->param[0]; } return to_of_node(fwspec->fwnode) && of_device_is_compatible(to_of_node(fwspec->fwnode), "intel,ce4100-ioapic"); } int x86_fwspec_is_hpet(struct irq_fwspec *fwspec) { if (fwspec->param_count != 1) return 0; if (is_fwnode_irqchip(fwspec->fwnode)) { const char *fwname = fwnode_get_name(fwspec->fwnode); return fwname && !strncmp(fwname, "HPET-MSI-", 9) && simple_strtol(fwname+9, NULL, 10) == fwspec->param[0]; } return 0; } static int x86_vector_select(struct irq_domain *d, struct irq_fwspec *fwspec, enum irq_domain_bus_token bus_token) { /* * HPET and I/OAPIC cannot be parented in the vector domain * if IRQ remapping is enabled. APIC IDs above 15 bits are * only permitted if IRQ remapping is enabled, so check that. */ if (apic_id_valid(32768)) return 0; return x86_fwspec_is_ioapic(fwspec) || x86_fwspec_is_hpet(fwspec); } static const struct irq_domain_ops x86_vector_domain_ops = { .select = x86_vector_select, .alloc = x86_vector_alloc_irqs, .free = x86_vector_free_irqs, .activate = x86_vector_activate, .deactivate = x86_vector_deactivate, #ifdef CONFIG_GENERIC_IRQ_DEBUGFS .debug_show = x86_vector_debug_show, #endif }; int __init arch_probe_nr_irqs(void) { int nr; if (nr_irqs > (NR_VECTORS * nr_cpu_ids)) nr_irqs = NR_VECTORS * nr_cpu_ids; nr = (gsi_top + nr_legacy_irqs()) + 8 * nr_cpu_ids; #if defined(CONFIG_PCI_MSI) /* * for MSI and HT dyn irq */ if (gsi_top <= NR_IRQS_LEGACY) nr += 8 * nr_cpu_ids; else nr += gsi_top * 16; #endif if (nr < nr_irqs) nr_irqs = nr; /* * We don't know if PIC is present at this point so we need to do * probe() to get the right number of legacy IRQs. */ return legacy_pic->probe(); } void lapic_assign_legacy_vector(unsigned int irq, bool replace) { /* * Use assign system here so it wont get accounted as allocated * and moveable in the cpu hotplug check and it prevents managed * irq reservation from touching it. */ irq_matrix_assign_system(vector_matrix, ISA_IRQ_VECTOR(irq), replace); } void __init lapic_update_legacy_vectors(void) { unsigned int i; if (IS_ENABLED(CONFIG_X86_IO_APIC) && nr_ioapics > 0) return; /* * If the IO/APIC is disabled via config, kernel command line or * lack of enumeration then all legacy interrupts are routed * through the PIC. Make sure that they are marked as legacy * vectors. PIC_CASCADE_IRQ has already been marked in * lapic_assign_system_vectors(). */ for (i = 0; i < nr_legacy_irqs(); i++) { if (i != PIC_CASCADE_IR) lapic_assign_legacy_vector(i, true); } } void __init lapic_assign_system_vectors(void) { unsigned int i, vector; for_each_set_bit(vector, system_vectors, NR_VECTORS) irq_matrix_assign_system(vector_matrix, vector, false); if (nr_legacy_irqs() > 1) lapic_assign_legacy_vector(PIC_CASCADE_IR, false); /* System vectors are reserved, online it */ irq_matrix_online(vector_matrix); /* Mark the preallocated legacy interrupts */ for (i = 0; i < nr_legacy_irqs(); i++) { /* * Don't touch the cascade interrupt. It's unusable * on PIC equipped machines. See the large comment * in the IO/APIC code. */ if (i != PIC_CASCADE_IR) irq_matrix_assign(vector_matrix, ISA_IRQ_VECTOR(i)); } } int __init arch_early_irq_init(void) { struct fwnode_handle *fn; fn = irq_domain_alloc_named_fwnode("VECTOR"); BUG_ON(!fn); x86_vector_domain = irq_domain_create_tree(fn, &x86_vector_domain_ops, NULL); BUG_ON(x86_vector_domain == NULL); irq_set_default_host(x86_vector_domain); BUG_ON(!alloc_cpumask_var(&vector_searchmask, GFP_KERNEL)); /* * Allocate the vector matrix allocator data structure and limit the * search area. */ vector_matrix = irq_alloc_matrix(NR_VECTORS, FIRST_EXTERNAL_VECTOR, FIRST_SYSTEM_VECTOR); BUG_ON(!vector_matrix); return arch_early_ioapic_init(); } #ifdef CONFIG_SMP static struct irq_desc *__setup_vector_irq(int vector) { int isairq = vector - ISA_IRQ_VECTOR(0); /* Check whether the irq is in the legacy space */ if (isairq < 0 || isairq >= nr_legacy_irqs()) return VECTOR_UNUSED; /* Check whether the irq is handled by the IOAPIC */ if (test_bit(isairq, &io_apic_irqs)) return VECTOR_UNUSED; return irq_to_desc(isairq); } /* Online the local APIC infrastructure and initialize the vectors */ void lapic_online(void) { unsigned int vector; lockdep_assert_held(&vector_lock); /* Online the vector matrix array for this CPU */ irq_matrix_online(vector_matrix); /* * The interrupt affinity logic never targets interrupts to offline * CPUs. The exception are the legacy PIC interrupts. In general * they are only targeted to CPU0, but depending on the platform * they can be distributed to any online CPU in hardware. The * kernel has no influence on that. So all active legacy vectors * must be installed on all CPUs. All non legacy interrupts can be * cleared. */ for (vector = 0; vector < NR_VECTORS; vector++) this_cpu_write(vector_irq[vector], __setup_vector_irq(vector)); } static void __vector_cleanup(struct vector_cleanup *cl, bool check_irr); void lapic_offline(void) { struct vector_cleanup *cl = this_cpu_ptr(&vector_cleanup); lock_vector_lock(); /* In case the vector cleanup timer has not expired */ __vector_cleanup(cl, false); irq_matrix_offline(vector_matrix); WARN_ON_ONCE(try_to_del_timer_sync(&cl->timer) < 0); WARN_ON_ONCE(!hlist_empty(&cl->head)); unlock_vector_lock(); } static int apic_set_affinity(struct irq_data *irqd, const struct cpumask *dest, bool force) { int err; if (WARN_ON_ONCE(!irqd_is_activated(irqd))) return -EIO; raw_spin_lock(&vector_lock); cpumask_and(vector_searchmask, dest, cpu_online_mask); if (irqd_affinity_is_managed(irqd)) err = assign_managed_vector(irqd, vector_searchmask); else err = assign_vector_locked(irqd, vector_searchmask); raw_spin_unlock(&vector_lock); return err ? err : IRQ_SET_MASK_OK; } #else # define apic_set_affinity NULL #endif static int apic_retrigger_irq(struct irq_data *irqd) { struct apic_chip_data *apicd = apic_chip_data(irqd); unsigned long flags; raw_spin_lock_irqsave(&vector_lock, flags); __apic_send_IPI(apicd->cpu, apicd->vector); raw_spin_unlock_irqrestore(&vector_lock, flags); return 1; } void apic_ack_irq(struct irq_data *irqd) { irq_move_irq(irqd); apic_eoi(); } void apic_ack_edge(struct irq_data *irqd) { irq_complete_move(irqd_cfg(irqd)); apic_ack_irq(irqd); } static void x86_vector_msi_compose_msg(struct irq_data *data, struct msi_msg *msg) { __irq_msi_compose_msg(irqd_cfg(data), msg, false); } static struct irq_chip lapic_controller = { .name = "APIC", .irq_ack = apic_ack_edge, .irq_set_affinity = apic_set_affinity, .irq_compose_msi_msg = x86_vector_msi_compose_msg, .irq_retrigger = apic_retrigger_irq, }; #ifdef CONFIG_SMP static void free_moved_vector(struct apic_chip_data *apicd) { unsigned int vector = apicd->prev_vector; unsigned int cpu = apicd->prev_cpu; bool managed = apicd->is_managed; /* * Managed interrupts are usually not migrated away * from an online CPU, but CPU isolation 'managed_irq' * can make that happen. * 1) Activation does not take the isolation into account * to keep the code simple * 2) Migration away from an isolated CPU can happen when * a non-isolated CPU which is in the calculated * affinity mask comes online. */ trace_vector_free_moved(apicd->irq, cpu, vector, managed); irq_matrix_free(vector_matrix, cpu, vector, managed); per_cpu(vector_irq, cpu)[vector] = VECTOR_UNUSED; hlist_del_init(&apicd->clist); apicd->prev_vector = 0; apicd->move_in_progress = 0; } static void __vector_cleanup(struct vector_cleanup *cl, bool check_irr) { struct apic_chip_data *apicd; struct hlist_node *tmp; bool rearm = false; lockdep_assert_held(&vector_lock); hlist_for_each_entry_safe(apicd, tmp, &cl->head, clist) { unsigned int irr, vector = apicd->prev_vector; /* * Paranoia: Check if the vector that needs to be cleaned * up is registered at the APICs IRR. That's clearly a * hardware issue if the vector arrived on the old target * _after_ interrupts were disabled above. Keep @apicd * on the list and schedule the timer again to give the CPU * a chance to handle the pending interrupt. * * Do not check IRR when called from lapic_offline(), because * fixup_irqs() was just called to scan IRR for set bits and * forward them to new destination CPUs via IPIs. */ irr = check_irr ? apic_read(APIC_IRR + (vector / 32 * 0x10)) : 0; if (irr & (1U << (vector % 32))) { pr_warn_once("Moved interrupt pending in old target APIC %u\n", apicd->irq); rearm = true; continue; } free_moved_vector(apicd); } /* * Must happen under vector_lock to make the timer_pending() check * in __vector_schedule_cleanup() race free against the rearm here. */ if (rearm) mod_timer(&cl->timer, jiffies + 1); } static void vector_cleanup_callback(struct timer_list *tmr) { struct vector_cleanup *cl = container_of(tmr, typeof(*cl), timer); /* Prevent vectors vanishing under us */ raw_spin_lock_irq(&vector_lock); __vector_cleanup(cl, true); raw_spin_unlock_irq(&vector_lock); } static void __vector_schedule_cleanup(struct apic_chip_data *apicd) { unsigned int cpu = apicd->prev_cpu; raw_spin_lock(&vector_lock); apicd->move_in_progress = 0; if (cpu_online(cpu)) { struct vector_cleanup *cl = per_cpu_ptr(&vector_cleanup, cpu); hlist_add_head(&apicd->clist, &cl->head); /* * The lockless timer_pending() check is safe here. If it * returns true, then the callback will observe this new * apic data in the hlist as everything is serialized by * vector lock. * * If it returns false then the timer is either not armed * or the other CPU executes the callback, which again * would be blocked on vector lock. Rearming it in the * latter case makes it fire for nothing. * * This is also safe against the callback rearming the timer * because that's serialized via vector lock too. */ if (!timer_pending(&cl->timer)) { cl->timer.expires = jiffies + 1; add_timer_on(&cl->timer, cpu); } } else { apicd->prev_vector = 0; } raw_spin_unlock(&vector_lock); } void vector_schedule_cleanup(struct irq_cfg *cfg) { struct apic_chip_data *apicd; apicd = container_of(cfg, struct apic_chip_data, hw_irq_cfg); if (apicd->move_in_progress) __vector_schedule_cleanup(apicd); } void irq_complete_move(struct irq_cfg *cfg) { struct apic_chip_data *apicd; apicd = container_of(cfg, struct apic_chip_data, hw_irq_cfg); if (likely(!apicd->move_in_progress)) return; /* * If the interrupt arrived on the new target CPU, cleanup the * vector on the old target CPU. A vector check is not required * because an interrupt can never move from one vector to another * on the same CPU. */ if (apicd->cpu == smp_processor_id()) __vector_schedule_cleanup(apicd); } /* * Called from fixup_irqs() with @desc->lock held and interrupts disabled. */ void irq_force_complete_move(struct irq_desc *desc) { struct apic_chip_data *apicd; struct irq_data *irqd; unsigned int vector; /* * The function is called for all descriptors regardless of which * irqdomain they belong to. For example if an IRQ is provided by * an irq_chip as part of a GPIO driver, the chip data for that * descriptor is specific to the irq_chip in question. * * Check first that the chip_data is what we expect * (apic_chip_data) before touching it any further. */ irqd = irq_domain_get_irq_data(x86_vector_domain, irq_desc_get_irq(desc)); if (!irqd) return; raw_spin_lock(&vector_lock); apicd = apic_chip_data(irqd); if (!apicd) goto unlock; /* * If prev_vector is empty, no action required. */ vector = apicd->prev_vector; if (!vector) goto unlock; /* * This is tricky. If the cleanup of the old vector has not been * done yet, then the following setaffinity call will fail with * -EBUSY. This can leave the interrupt in a stale state. * * All CPUs are stuck in stop machine with interrupts disabled so * calling __irq_complete_move() would be completely pointless. * * 1) The interrupt is in move_in_progress state. That means that we * have not seen an interrupt since the io_apic was reprogrammed to * the new vector. * * 2) The interrupt has fired on the new vector, but the cleanup IPIs * have not been processed yet. */ if (apicd->move_in_progress) { /* * In theory there is a race: * * set_ioapic(new_vector) <-- Interrupt is raised before update * is effective, i.e. it's raised on * the old vector. * * So if the target cpu cannot handle that interrupt before * the old vector is cleaned up, we get a spurious interrupt * and in the worst case the ioapic irq line becomes stale. * * But in case of cpu hotplug this should be a non issue * because if the affinity update happens right before all * cpus rendezvous in stop machine, there is no way that the * interrupt can be blocked on the target cpu because all cpus * loops first with interrupts enabled in stop machine, so the * old vector is not yet cleaned up when the interrupt fires. * * So the only way to run into this issue is if the delivery * of the interrupt on the apic/system bus would be delayed * beyond the point where the target cpu disables interrupts * in stop machine. I doubt that it can happen, but at least * there is a theoretical chance. Virtualization might be * able to expose this, but AFAICT the IOAPIC emulation is not * as stupid as the real hardware. * * Anyway, there is nothing we can do about that at this point * w/o refactoring the whole fixup_irq() business completely. * We print at least the irq number and the old vector number, * so we have the necessary information when a problem in that * area arises. */ pr_warn("IRQ fixup: irq %d move in progress, old vector %d\n", irqd->irq, vector); } free_moved_vector(apicd); unlock: raw_spin_unlock(&vector_lock); } #ifdef CONFIG_HOTPLUG_CPU /* * Note, this is not accurate accounting, but at least good enough to * prevent that the actual interrupt move will run out of vectors. */ int lapic_can_unplug_cpu(void) { unsigned int rsvd, avl, tomove, cpu = smp_processor_id(); int ret = 0; raw_spin_lock(&vector_lock); tomove = irq_matrix_allocated(vector_matrix); avl = irq_matrix_available(vector_matrix, true); if (avl < tomove) { pr_warn("CPU %u has %u vectors, %u available. Cannot disable CPU\n", cpu, tomove, avl); ret = -ENOSPC; goto out; } rsvd = irq_matrix_reserved(vector_matrix); if (avl < rsvd) { pr_warn("Reserved vectors %u > available %u. IRQ request may fail\n", rsvd, avl); } out: raw_spin_unlock(&vector_lock); return ret; } #endif /* HOTPLUG_CPU */ #endif /* SMP */ static void __init print_APIC_field(int base) { int i; printk(KERN_DEBUG); for (i = 0; i < 8; i++) pr_cont("%08x", apic_read(base + i*0x10)); pr_cont("\n"); } static void __init print_local_APIC(void *dummy) { unsigned int i, v, ver, maxlvt; u64 icr; pr_debug("printing local APIC contents on CPU#%d/%d:\n", smp_processor_id(), read_apic_id()); v = apic_read(APIC_ID); pr_info("... APIC ID: %08x (%01x)\n", v, read_apic_id()); v = apic_read(APIC_LVR); pr_info("... APIC VERSION: %08x\n", v); ver = GET_APIC_VERSION(v); maxlvt = lapic_get_maxlvt(); v = apic_read(APIC_TASKPRI); pr_debug("... APIC TASKPRI: %08x (%02x)\n", v, v & APIC_TPRI_MASK); /* !82489DX */ if (APIC_INTEGRATED(ver)) { if (!APIC_XAPIC(ver)) { v = apic_read(APIC_ARBPRI); pr_debug("... APIC ARBPRI: %08x (%02x)\n", v, v & APIC_ARBPRI_MASK); } v = apic_read(APIC_PROCPRI); pr_debug("... APIC PROCPRI: %08x\n", v); } /* * Remote read supported only in the 82489DX and local APIC for * Pentium processors. */ if (!APIC_INTEGRATED(ver) || maxlvt == 3) { v = apic_read(APIC_RRR); pr_debug("... APIC RRR: %08x\n", v); } v = apic_read(APIC_LDR); pr_debug("... APIC LDR: %08x\n", v); if (!x2apic_enabled()) { v = apic_read(APIC_DFR); pr_debug("... APIC DFR: %08x\n", v); } v = apic_read(APIC_SPIV); pr_debug("... APIC SPIV: %08x\n", v); pr_debug("... APIC ISR field:\n"); print_APIC_field(APIC_ISR); pr_debug("... APIC TMR field:\n"); print_APIC_field(APIC_TMR); pr_debug("... APIC IRR field:\n"); print_APIC_field(APIC_IRR); /* !82489DX */ if (APIC_INTEGRATED(ver)) { /* Due to the Pentium erratum 3AP. */ if (maxlvt > 3) apic_write(APIC_ESR, 0); v = apic_read(APIC_ESR); pr_debug("... APIC ESR: %08x\n", v); } icr = apic_icr_read(); pr_debug("... APIC ICR: %08x\n", (u32)icr); pr_debug("... APIC ICR2: %08x\n", (u32)(icr >> 32)); v = apic_read(APIC_LVTT); pr_debug("... APIC LVTT: %08x\n", v); if (maxlvt > 3) { /* PC is LVT#4. */ v = apic_read(APIC_LVTPC); pr_debug("... APIC LVTPC: %08x\n", v); } v = apic_read(APIC_LVT0); pr_debug("... APIC LVT0: %08x\n", v); v = apic_read(APIC_LVT1); pr_debug("... APIC LVT1: %08x\n", v); if (maxlvt > 2) { /* ERR is LVT#3. */ v = apic_read(APIC_LVTERR); pr_debug("... APIC LVTERR: %08x\n", v); } v = apic_read(APIC_TMICT); pr_debug("... APIC TMICT: %08x\n", v); v = apic_read(APIC_TMCCT); pr_debug("... APIC TMCCT: %08x\n", v); v = apic_read(APIC_TDCR); pr_debug("... APIC TDCR: %08x\n", v); if (boot_cpu_has(X86_FEATURE_EXTAPIC)) { v = apic_read(APIC_EFEAT); maxlvt = (v >> 16) & 0xff; pr_debug("... APIC EFEAT: %08x\n", v); v = apic_read(APIC_ECTRL); pr_debug("... APIC ECTRL: %08x\n", v); for (i = 0; i < maxlvt; i++) { v = apic_read(APIC_EILVTn(i)); pr_debug("... APIC EILVT%d: %08x\n", i, v); } } pr_cont("\n"); } static void __init print_local_APICs(int maxcpu) { int cpu; if (!maxcpu) return; preempt_disable(); for_each_online_cpu(cpu) { if (cpu >= maxcpu) break; smp_call_function_single(cpu, print_local_APIC, NULL, 1); } preempt_enable(); } static void __init print_PIC(void) { unsigned int v; unsigned long flags; if (!nr_legacy_irqs()) return; pr_debug("\nprinting PIC contents\n"); raw_spin_lock_irqsave(&i8259A_lock, flags); v = inb(0xa1) << 8 | inb(0x21); pr_debug("... PIC IMR: %04x\n", v); v = inb(0xa0) << 8 | inb(0x20); pr_debug("... PIC IRR: %04x\n", v); outb(0x0b, 0xa0); outb(0x0b, 0x20); v = inb(0xa0) << 8 | inb(0x20); outb(0x0a, 0xa0); outb(0x0a, 0x20); raw_spin_unlock_irqrestore(&i8259A_lock, flags); pr_debug("... PIC ISR: %04x\n", v); v = inb(PIC_ELCR2) << 8 | inb(PIC_ELCR1); pr_debug("... PIC ELCR: %04x\n", v); } static int show_lapic __initdata = 1; static __init int setup_show_lapic(char *arg) { int num = -1; if (strcmp(arg, "all") == 0) { show_lapic = CONFIG_NR_CPUS; } else { get_option(&arg, &num); if (num >= 0) show_lapic = num; } return 1; } __setup("show_lapic=", setup_show_lapic); static int __init print_ICs(void) { if (apic_verbosity == APIC_QUIET) return 0; print_PIC(); /* don't print out if apic is not there */ if (!boot_cpu_has(X86_FEATURE_APIC) && !apic_from_smp_config()) return 0; print_local_APICs(show_lapic); print_IO_APICs(); return 0; } late_initcall(print_ICs);
linux-master
arch/x86/kernel/apic/vector.c
// SPDX-License-Identifier: GPL-2.0 /* * NOOP APIC driver. * * Does almost nothing and should be substituted by a real apic driver via * probe routine. * * Though in case if apic is disabled (for some reason) we try * to not uglify the caller's code and allow to call (some) apic routines * like self-ipi, etc... * * FIXME: Remove this gunk. The above argument which was intentionally left * in place is silly to begin with because none of the callbacks except for * APIC::read/write() have a WARN_ON_ONCE() in them. Sigh... */ #include <linux/cpumask.h> #include <linux/thread_info.h> #include <asm/apic.h> static void noop_send_IPI(int cpu, int vector) { } static void noop_send_IPI_mask(const struct cpumask *cpumask, int vector) { } static void noop_send_IPI_mask_allbutself(const struct cpumask *cpumask, int vector) { } static void noop_send_IPI_allbutself(int vector) { } static void noop_send_IPI_all(int vector) { } static void noop_send_IPI_self(int vector) { } static void noop_apic_icr_write(u32 low, u32 id) { } static int noop_wakeup_secondary_cpu(int apicid, unsigned long start_eip) { return -1; } static u64 noop_apic_icr_read(void) { return 0; } static int noop_phys_pkg_id(int cpuid_apic, int index_msb) { return 0; } static unsigned int noop_get_apic_id(unsigned long x) { return 0; } static void noop_apic_eoi(void) { } static u32 noop_apic_read(u32 reg) { WARN_ON_ONCE(boot_cpu_has(X86_FEATURE_APIC) && !apic_is_disabled); return 0; } static void noop_apic_write(u32 reg, u32 val) { WARN_ON_ONCE(boot_cpu_has(X86_FEATURE_APIC) && !apic_is_disabled); } struct apic apic_noop __ro_after_init = { .name = "noop", .delivery_mode = APIC_DELIVERY_MODE_FIXED, .dest_mode_logical = true, .disable_esr = 0, .check_apicid_used = default_check_apicid_used, .ioapic_phys_id_map = default_ioapic_phys_id_map, .cpu_present_to_apicid = default_cpu_present_to_apicid, .phys_pkg_id = noop_phys_pkg_id, .max_apic_id = 0xFE, .get_apic_id = noop_get_apic_id, .calc_dest_apicid = apic_flat_calc_apicid, .send_IPI = noop_send_IPI, .send_IPI_mask = noop_send_IPI_mask, .send_IPI_mask_allbutself = noop_send_IPI_mask_allbutself, .send_IPI_allbutself = noop_send_IPI_allbutself, .send_IPI_all = noop_send_IPI_all, .send_IPI_self = noop_send_IPI_self, .wakeup_secondary_cpu = noop_wakeup_secondary_cpu, .read = noop_apic_read, .write = noop_apic_write, .eoi = noop_apic_eoi, .icr_read = noop_apic_icr_read, .icr_write = noop_apic_icr_write, };
linux-master
arch/x86/kernel/apic/apic_noop.c
// SPDX-License-Identifier: GPL-2.0 /* * HW NMI watchdog support * * started by Don Zickus, Copyright (C) 2010 Red Hat, Inc. * * Arch specific calls to support NMI watchdog * * Bits copied from original nmi.c file * */ #include <linux/thread_info.h> #include <asm/apic.h> #include <asm/nmi.h> #include <linux/cpumask.h> #include <linux/kdebug.h> #include <linux/notifier.h> #include <linux/kprobes.h> #include <linux/nmi.h> #include <linux/init.h> #include <linux/delay.h> #include "local.h" #ifdef CONFIG_HARDLOCKUP_DETECTOR_PERF u64 hw_nmi_get_sample_period(int watchdog_thresh) { return (u64)(cpu_khz) * 1000 * watchdog_thresh; } #endif #ifdef arch_trigger_cpumask_backtrace static void nmi_raise_cpu_backtrace(cpumask_t *mask) { __apic_send_IPI_mask(mask, NMI_VECTOR); } void arch_trigger_cpumask_backtrace(const cpumask_t *mask, int exclude_cpu) { nmi_trigger_cpumask_backtrace(mask, exclude_cpu, nmi_raise_cpu_backtrace); } static int nmi_cpu_backtrace_handler(unsigned int cmd, struct pt_regs *regs) { if (nmi_cpu_backtrace(regs)) return NMI_HANDLED; return NMI_DONE; } NOKPROBE_SYMBOL(nmi_cpu_backtrace_handler); static int __init register_nmi_cpu_backtrace_handler(void) { register_nmi_handler(NMI_LOCAL, nmi_cpu_backtrace_handler, 0, "arch_bt"); return 0; } early_initcall(register_nmi_cpu_backtrace_handler); #endif
linux-master
arch/x86/kernel/apic/hw_nmi.c
// SPDX-License-Identifier: GPL-2.0-only /* * Default generic APIC driver. This handles up to 8 CPUs. * * Copyright 2003 Andi Kleen, SuSE Labs. * * Generic x86 APIC driver probe layer. */ #include <linux/export.h> #include <linux/errno.h> #include <linux/smp.h> #include <xen/xen.h> #include <asm/io_apic.h> #include <asm/apic.h> #include <asm/acpi.h> #include "local.h" static int default_phys_pkg_id(int cpuid_apic, int index_msb) { return cpuid_apic >> index_msb; } /* should be called last. */ static int probe_default(void) { return 1; } static struct apic apic_default __ro_after_init = { .name = "default", .probe = probe_default, .apic_id_registered = default_apic_id_registered, .delivery_mode = APIC_DELIVERY_MODE_FIXED, .dest_mode_logical = true, .disable_esr = 0, .check_apicid_used = default_check_apicid_used, .init_apic_ldr = default_init_apic_ldr, .ioapic_phys_id_map = default_ioapic_phys_id_map, .cpu_present_to_apicid = default_cpu_present_to_apicid, .phys_pkg_id = default_phys_pkg_id, .max_apic_id = 0xFE, .get_apic_id = default_get_apic_id, .calc_dest_apicid = apic_flat_calc_apicid, .send_IPI = default_send_IPI_single, .send_IPI_mask = default_send_IPI_mask_logical, .send_IPI_mask_allbutself = default_send_IPI_mask_allbutself_logical, .send_IPI_allbutself = default_send_IPI_allbutself, .send_IPI_all = default_send_IPI_all, .send_IPI_self = default_send_IPI_self, .read = native_apic_mem_read, .write = native_apic_mem_write, .eoi = native_apic_mem_eoi, .icr_read = native_apic_icr_read, .icr_write = native_apic_icr_write, .wait_icr_idle = apic_mem_wait_icr_idle, .safe_wait_icr_idle = apic_mem_wait_icr_idle_timeout, }; apic_driver(apic_default); struct apic *apic __ro_after_init = &apic_default; EXPORT_SYMBOL_GPL(apic); static int cmdline_apic __initdata; static int __init parse_apic(char *arg) { struct apic **drv; if (!arg) return -EINVAL; for (drv = __apicdrivers; drv < __apicdrivers_end; drv++) { if (!strcmp((*drv)->name, arg)) { apic_install_driver(*drv); cmdline_apic = 1; return 0; } } /* Parsed again by __setup for debug/verbose */ return 0; } early_param("apic", parse_apic); void __init x86_32_probe_bigsmp_early(void) { if (nr_cpu_ids <= 8 || xen_pv_domain()) return; if (IS_ENABLED(CONFIG_X86_BIGSMP)) { switch (boot_cpu_data.x86_vendor) { case X86_VENDOR_INTEL: if (!APIC_XAPIC(boot_cpu_apic_version)) break; /* P4 and above */ fallthrough; case X86_VENDOR_HYGON: case X86_VENDOR_AMD: if (apic_bigsmp_possible(cmdline_apic)) return; break; } } pr_info("Limiting to 8 possible CPUs\n"); set_nr_cpu_ids(8); } void __init x86_32_install_bigsmp(void) { if (nr_cpu_ids > 8 && !xen_pv_domain()) apic_bigsmp_force(); } void __init x86_32_probe_apic(void) { if (!cmdline_apic) { struct apic **drv; for (drv = __apicdrivers; drv < __apicdrivers_end; drv++) { if ((*drv)->probe()) { apic_install_driver(*drv); break; } } /* Not visible without early console */ if (drv == __apicdrivers_end) panic("Didn't find an APIC driver"); } }
linux-master
arch/x86/kernel/apic/probe_32.c
// SPDX-License-Identifier: GPL-2.0 #include <linux/cpumask.h> #include <linux/acpi.h> #include "local.h" int x2apic_phys; static struct apic apic_x2apic_phys; u32 x2apic_max_apicid __ro_after_init = UINT_MAX; void __init x2apic_set_max_apicid(u32 apicid) { x2apic_max_apicid = apicid; if (apic->x2apic_set_max_apicid) apic->max_apic_id = apicid; } static int __init set_x2apic_phys_mode(char *arg) { x2apic_phys = 1; return 0; } early_param("x2apic_phys", set_x2apic_phys_mode); static bool x2apic_fadt_phys(void) { #ifdef CONFIG_ACPI if ((acpi_gbl_FADT.header.revision >= FADT2_REVISION_ID) && (acpi_gbl_FADT.flags & ACPI_FADT_APIC_PHYSICAL)) { printk(KERN_DEBUG "System requires x2apic physical mode\n"); return true; } #endif return false; } static int x2apic_acpi_madt_oem_check(char *oem_id, char *oem_table_id) { return x2apic_enabled() && (x2apic_phys || x2apic_fadt_phys()); } static void x2apic_send_IPI(int cpu, int vector) { u32 dest = per_cpu(x86_cpu_to_apicid, cpu); /* x2apic MSRs are special and need a special fence: */ weak_wrmsr_fence(); __x2apic_send_IPI_dest(dest, vector, APIC_DEST_PHYSICAL); } static void __x2apic_send_IPI_mask(const struct cpumask *mask, int vector, int apic_dest) { unsigned long query_cpu; unsigned long this_cpu; unsigned long flags; /* x2apic MSRs are special and need a special fence: */ weak_wrmsr_fence(); local_irq_save(flags); this_cpu = smp_processor_id(); for_each_cpu(query_cpu, mask) { if (apic_dest == APIC_DEST_ALLBUT && this_cpu == query_cpu) continue; __x2apic_send_IPI_dest(per_cpu(x86_cpu_to_apicid, query_cpu), vector, APIC_DEST_PHYSICAL); } local_irq_restore(flags); } static void x2apic_send_IPI_mask(const struct cpumask *mask, int vector) { __x2apic_send_IPI_mask(mask, vector, APIC_DEST_ALLINC); } static void x2apic_send_IPI_mask_allbutself(const struct cpumask *mask, int vector) { __x2apic_send_IPI_mask(mask, vector, APIC_DEST_ALLBUT); } static void __x2apic_send_IPI_shorthand(int vector, u32 which) { unsigned long cfg = __prepare_ICR(which, vector, 0); /* x2apic MSRs are special and need a special fence: */ weak_wrmsr_fence(); native_x2apic_icr_write(cfg, 0); } void x2apic_send_IPI_allbutself(int vector) { __x2apic_send_IPI_shorthand(vector, APIC_DEST_ALLBUT); } void x2apic_send_IPI_all(int vector) { __x2apic_send_IPI_shorthand(vector, APIC_DEST_ALLINC); } void x2apic_send_IPI_self(int vector) { apic_write(APIC_SELF_IPI, vector); } void __x2apic_send_IPI_dest(unsigned int apicid, int vector, unsigned int dest) { unsigned long cfg = __prepare_ICR(0, vector, dest); native_x2apic_icr_write(cfg, apicid); } static int x2apic_phys_probe(void) { if (!x2apic_mode) return 0; if (x2apic_phys || x2apic_fadt_phys()) return 1; return apic == &apic_x2apic_phys; } unsigned int x2apic_get_apic_id(unsigned long id) { return id; } u32 x2apic_set_apic_id(unsigned int id) { return id; } int x2apic_phys_pkg_id(int initial_apicid, int index_msb) { return initial_apicid >> index_msb; } static struct apic apic_x2apic_phys __ro_after_init = { .name = "physical x2apic", .probe = x2apic_phys_probe, .acpi_madt_oem_check = x2apic_acpi_madt_oem_check, .delivery_mode = APIC_DELIVERY_MODE_FIXED, .dest_mode_logical = false, .disable_esr = 0, .cpu_present_to_apicid = default_cpu_present_to_apicid, .phys_pkg_id = x2apic_phys_pkg_id, .max_apic_id = UINT_MAX, .x2apic_set_max_apicid = true, .get_apic_id = x2apic_get_apic_id, .set_apic_id = x2apic_set_apic_id, .calc_dest_apicid = apic_default_calc_apicid, .send_IPI = x2apic_send_IPI, .send_IPI_mask = x2apic_send_IPI_mask, .send_IPI_mask_allbutself = x2apic_send_IPI_mask_allbutself, .send_IPI_allbutself = x2apic_send_IPI_allbutself, .send_IPI_all = x2apic_send_IPI_all, .send_IPI_self = x2apic_send_IPI_self, .read = native_apic_msr_read, .write = native_apic_msr_write, .eoi = native_apic_msr_eoi, .icr_read = native_x2apic_icr_read, .icr_write = native_x2apic_icr_write, }; apic_driver(apic_x2apic_phys);
linux-master
arch/x86/kernel/apic/x2apic_phys.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright 2004 James Cleverdon, IBM. * * Flat APIC subarch code. * * Hacked for x86-64 by James Cleverdon from i386 architecture code by * Martin Bligh, Andi Kleen, James Bottomley, John Stultz, and * James Cleverdon. */ #include <linux/cpumask.h> #include <linux/export.h> #include <linux/acpi.h> #include <asm/jailhouse_para.h> #include <asm/apic.h> #include "local.h" static struct apic apic_physflat; static struct apic apic_flat; struct apic *apic __ro_after_init = &apic_flat; EXPORT_SYMBOL_GPL(apic); static int flat_acpi_madt_oem_check(char *oem_id, char *oem_table_id) { return 1; } static void _flat_send_IPI_mask(unsigned long mask, int vector) { unsigned long flags; local_irq_save(flags); __default_send_IPI_dest_field(mask, vector, APIC_DEST_LOGICAL); local_irq_restore(flags); } static void flat_send_IPI_mask(const struct cpumask *cpumask, int vector) { unsigned long mask = cpumask_bits(cpumask)[0]; _flat_send_IPI_mask(mask, vector); } static void flat_send_IPI_mask_allbutself(const struct cpumask *cpumask, int vector) { unsigned long mask = cpumask_bits(cpumask)[0]; int cpu = smp_processor_id(); if (cpu < BITS_PER_LONG) __clear_bit(cpu, &mask); _flat_send_IPI_mask(mask, vector); } static unsigned int flat_get_apic_id(unsigned long x) { return (x >> 24) & 0xFF; } static u32 set_apic_id(unsigned int id) { return (id & 0xFF) << 24; } static int flat_phys_pkg_id(int initial_apic_id, int index_msb) { return initial_apic_id >> index_msb; } static int flat_probe(void) { return 1; } static struct apic apic_flat __ro_after_init = { .name = "flat", .probe = flat_probe, .acpi_madt_oem_check = flat_acpi_madt_oem_check, .apic_id_registered = default_apic_id_registered, .delivery_mode = APIC_DELIVERY_MODE_FIXED, .dest_mode_logical = true, .disable_esr = 0, .init_apic_ldr = default_init_apic_ldr, .cpu_present_to_apicid = default_cpu_present_to_apicid, .phys_pkg_id = flat_phys_pkg_id, .max_apic_id = 0xFE, .get_apic_id = flat_get_apic_id, .set_apic_id = set_apic_id, .calc_dest_apicid = apic_flat_calc_apicid, .send_IPI = default_send_IPI_single, .send_IPI_mask = flat_send_IPI_mask, .send_IPI_mask_allbutself = flat_send_IPI_mask_allbutself, .send_IPI_allbutself = default_send_IPI_allbutself, .send_IPI_all = default_send_IPI_all, .send_IPI_self = default_send_IPI_self, .read = native_apic_mem_read, .write = native_apic_mem_write, .eoi = native_apic_mem_eoi, .icr_read = native_apic_icr_read, .icr_write = native_apic_icr_write, .wait_icr_idle = apic_mem_wait_icr_idle, .safe_wait_icr_idle = apic_mem_wait_icr_idle_timeout, }; /* * Physflat mode is used when there are more than 8 CPUs on a system. * We cannot use logical delivery in this case because the mask * overflows, so use physical mode. */ static int physflat_acpi_madt_oem_check(char *oem_id, char *oem_table_id) { #ifdef CONFIG_ACPI /* * Quirk: some x86_64 machines can only use physical APIC mode * regardless of how many processors are present (x86_64 ES7000 * is an example). */ if (acpi_gbl_FADT.header.revision >= FADT2_REVISION_ID && (acpi_gbl_FADT.flags & ACPI_FADT_APIC_PHYSICAL)) { printk(KERN_DEBUG "system APIC only can use physical flat"); return 1; } if (!strncmp(oem_id, "IBM", 3) && !strncmp(oem_table_id, "EXA", 3)) { printk(KERN_DEBUG "IBM Summit detected, will use apic physical"); return 1; } #endif return 0; } static int physflat_probe(void) { return apic == &apic_physflat || num_possible_cpus() > 8 || jailhouse_paravirt(); } static struct apic apic_physflat __ro_after_init = { .name = "physical flat", .probe = physflat_probe, .acpi_madt_oem_check = physflat_acpi_madt_oem_check, .apic_id_registered = default_apic_id_registered, .delivery_mode = APIC_DELIVERY_MODE_FIXED, .dest_mode_logical = false, .disable_esr = 0, .check_apicid_used = NULL, .ioapic_phys_id_map = NULL, .cpu_present_to_apicid = default_cpu_present_to_apicid, .phys_pkg_id = flat_phys_pkg_id, .max_apic_id = 0xFE, .get_apic_id = flat_get_apic_id, .set_apic_id = set_apic_id, .calc_dest_apicid = apic_default_calc_apicid, .send_IPI = default_send_IPI_single_phys, .send_IPI_mask = default_send_IPI_mask_sequence_phys, .send_IPI_mask_allbutself = default_send_IPI_mask_allbutself_phys, .send_IPI_allbutself = default_send_IPI_allbutself, .send_IPI_all = default_send_IPI_all, .send_IPI_self = default_send_IPI_self, .read = native_apic_mem_read, .write = native_apic_mem_write, .eoi = native_apic_mem_eoi, .icr_read = native_apic_icr_read, .icr_write = native_apic_icr_write, .wait_icr_idle = apic_mem_wait_icr_idle, .safe_wait_icr_idle = apic_mem_wait_icr_idle_timeout, }; /* * We need to check for physflat first, so this order is important. */ apic_drivers(apic_physflat, apic_flat);
linux-master
arch/x86/kernel/apic/apic_flat_64.c
// SPDX-License-Identifier: GPL-2.0 #include <linux/cpumask.h> #include <linux/delay.h> #include <linux/smp.h> #include <asm/io_apic.h> #include "local.h" DEFINE_STATIC_KEY_FALSE(apic_use_ipi_shorthand); #ifdef CONFIG_SMP static int apic_ipi_shorthand_off __ro_after_init; static __init int apic_ipi_shorthand(char *str) { get_option(&str, &apic_ipi_shorthand_off); return 1; } __setup("no_ipi_broadcast=", apic_ipi_shorthand); static int __init print_ipi_mode(void) { pr_info("IPI shorthand broadcast: %s\n", apic_ipi_shorthand_off ? "disabled" : "enabled"); return 0; } late_initcall(print_ipi_mode); void apic_smt_update(void) { /* * Do not switch to broadcast mode if: * - Disabled on the command line * - Only a single CPU is online * - Not all present CPUs have been at least booted once * * The latter is important as the local APIC might be in some * random state and a broadcast might cause havoc. That's * especially true for NMI broadcasting. */ if (apic_ipi_shorthand_off || num_online_cpus() == 1 || !cpumask_equal(cpu_present_mask, &cpus_booted_once_mask)) { static_branch_disable(&apic_use_ipi_shorthand); } else { static_branch_enable(&apic_use_ipi_shorthand); } } void apic_send_IPI_allbutself(unsigned int vector) { if (num_online_cpus() < 2) return; if (static_branch_likely(&apic_use_ipi_shorthand)) __apic_send_IPI_allbutself(vector); else __apic_send_IPI_mask_allbutself(cpu_online_mask, vector); } /* * Send a 'reschedule' IPI to another CPU. It goes straight through and * wastes no time serializing anything. Worst case is that we lose a * reschedule ... */ void native_smp_send_reschedule(int cpu) { if (unlikely(cpu_is_offline(cpu))) { WARN(1, "sched: Unexpected reschedule of offline CPU#%d!\n", cpu); return; } __apic_send_IPI(cpu, RESCHEDULE_VECTOR); } void native_send_call_func_single_ipi(int cpu) { __apic_send_IPI(cpu, CALL_FUNCTION_SINGLE_VECTOR); } void native_send_call_func_ipi(const struct cpumask *mask) { if (static_branch_likely(&apic_use_ipi_shorthand)) { unsigned int cpu = smp_processor_id(); if (!cpumask_or_equal(mask, cpumask_of(cpu), cpu_online_mask)) goto sendmask; if (cpumask_test_cpu(cpu, mask)) __apic_send_IPI_all(CALL_FUNCTION_VECTOR); else if (num_online_cpus() > 1) __apic_send_IPI_allbutself(CALL_FUNCTION_VECTOR); return; } sendmask: __apic_send_IPI_mask(mask, CALL_FUNCTION_VECTOR); } #endif /* CONFIG_SMP */ static inline int __prepare_ICR2(unsigned int mask) { return SET_XAPIC_DEST_FIELD(mask); } u32 apic_mem_wait_icr_idle_timeout(void) { int cnt; for (cnt = 0; cnt < 1000; cnt++) { if (!(apic_read(APIC_ICR) & APIC_ICR_BUSY)) return 0; inc_irq_stat(icr_read_retry_count); udelay(100); } return APIC_ICR_BUSY; } void apic_mem_wait_icr_idle(void) { while (native_apic_mem_read(APIC_ICR) & APIC_ICR_BUSY) cpu_relax(); } /* * This is safe against interruption because it only writes the lower 32 * bits of the APIC_ICR register. The destination field is ignored for * short hand IPIs. * * wait_icr_idle() * write(ICR2, dest) * NMI * wait_icr_idle() * write(ICR) * wait_icr_idle() * write(ICR) * * This function does not need to disable interrupts as there is no ICR2 * interaction. The memory write is direct except when the machine is * affected by the 11AP Pentium erratum, which turns the plain write into * an XCHG operation. */ static void __default_send_IPI_shortcut(unsigned int shortcut, int vector) { /* * Wait for the previous ICR command to complete. Use * safe_apic_wait_icr_idle() for the NMI vector as there have been * issues where otherwise the system hangs when the panic CPU tries * to stop the others before launching the kdump kernel. */ if (unlikely(vector == NMI_VECTOR)) apic_mem_wait_icr_idle_timeout(); else apic_mem_wait_icr_idle(); /* Destination field (ICR2) and the destination mode are ignored */ native_apic_mem_write(APIC_ICR, __prepare_ICR(shortcut, vector, 0)); } /* * This is used to send an IPI with no shorthand notation (the destination is * specified in bits 56 to 63 of the ICR). */ void __default_send_IPI_dest_field(unsigned int dest_mask, int vector, unsigned int dest_mode) { /* See comment in __default_send_IPI_shortcut() */ if (unlikely(vector == NMI_VECTOR)) apic_mem_wait_icr_idle_timeout(); else apic_mem_wait_icr_idle(); /* Set the IPI destination field in the ICR */ native_apic_mem_write(APIC_ICR2, __prepare_ICR2(dest_mask)); /* Send it with the proper destination mode */ native_apic_mem_write(APIC_ICR, __prepare_ICR(0, vector, dest_mode)); } void default_send_IPI_single_phys(int cpu, int vector) { unsigned long flags; local_irq_save(flags); __default_send_IPI_dest_field(per_cpu(x86_cpu_to_apicid, cpu), vector, APIC_DEST_PHYSICAL); local_irq_restore(flags); } void default_send_IPI_mask_sequence_phys(const struct cpumask *mask, int vector) { unsigned long flags; unsigned long cpu; local_irq_save(flags); for_each_cpu(cpu, mask) { __default_send_IPI_dest_field(per_cpu(x86_cpu_to_apicid, cpu), vector, APIC_DEST_PHYSICAL); } local_irq_restore(flags); } void default_send_IPI_mask_allbutself_phys(const struct cpumask *mask, int vector) { unsigned int cpu, this_cpu = smp_processor_id(); unsigned long flags; local_irq_save(flags); for_each_cpu(cpu, mask) { if (cpu == this_cpu) continue; __default_send_IPI_dest_field(per_cpu(x86_cpu_to_apicid, cpu), vector, APIC_DEST_PHYSICAL); } local_irq_restore(flags); } /* * Helper function for APICs which insist on cpumasks */ void default_send_IPI_single(int cpu, int vector) { __apic_send_IPI_mask(cpumask_of(cpu), vector); } void default_send_IPI_allbutself(int vector) { __default_send_IPI_shortcut(APIC_DEST_ALLBUT, vector); } void default_send_IPI_all(int vector) { __default_send_IPI_shortcut(APIC_DEST_ALLINC, vector); } void default_send_IPI_self(int vector) { __default_send_IPI_shortcut(APIC_DEST_SELF, vector); } #ifdef CONFIG_X86_32 void default_send_IPI_mask_sequence_logical(const struct cpumask *mask, int vector) { unsigned long flags; unsigned int cpu; local_irq_save(flags); for_each_cpu(cpu, mask) __default_send_IPI_dest_field(1U << cpu, vector, APIC_DEST_LOGICAL); local_irq_restore(flags); } void default_send_IPI_mask_allbutself_logical(const struct cpumask *mask, int vector) { unsigned int cpu, this_cpu = smp_processor_id(); unsigned long flags; local_irq_save(flags); for_each_cpu(cpu, mask) { if (cpu == this_cpu) continue; __default_send_IPI_dest_field(1U << cpu, vector, APIC_DEST_LOGICAL); } local_irq_restore(flags); } void default_send_IPI_mask_logical(const struct cpumask *cpumask, int vector) { unsigned long mask = cpumask_bits(cpumask)[0]; unsigned long flags; if (!mask) return; local_irq_save(flags); WARN_ON(mask & ~cpumask_bits(cpu_online_mask)[0]); __default_send_IPI_dest_field(mask, vector, APIC_DEST_LOGICAL); local_irq_restore(flags); } #ifdef CONFIG_SMP static int convert_apicid_to_cpu(int apic_id) { int i; for_each_possible_cpu(i) { if (per_cpu(x86_cpu_to_apicid, i) == apic_id) return i; } return -1; } int safe_smp_processor_id(void) { int apicid, cpuid; if (!boot_cpu_has(X86_FEATURE_APIC)) return 0; apicid = read_apic_id(); if (apicid == BAD_APICID) return 0; cpuid = convert_apicid_to_cpu(apicid); return cpuid >= 0 ? cpuid : 0; } #endif #endif
linux-master
arch/x86/kernel/apic/ipi.c
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * SGI UV APIC functions (note: not an Intel compatible APIC) * * (C) Copyright 2020 Hewlett Packard Enterprise Development LP * Copyright (C) 2007-2014 Silicon Graphics, Inc. All rights reserved. */ #include <linux/crash_dump.h> #include <linux/cpuhotplug.h> #include <linux/cpumask.h> #include <linux/proc_fs.h> #include <linux/memory.h> #include <linux/export.h> #include <linux/pci.h> #include <linux/acpi.h> #include <linux/efi.h> #include <asm/e820/api.h> #include <asm/uv/uv_mmrs.h> #include <asm/uv/uv_hub.h> #include <asm/uv/bios.h> #include <asm/uv/uv.h> #include <asm/apic.h> #include "local.h" static enum uv_system_type uv_system_type; static int uv_hubbed_system; static int uv_hubless_system; static u64 gru_start_paddr, gru_end_paddr; static union uvh_apicid uvh_apicid; static int uv_node_id; /* Unpack AT/OEM/TABLE ID's to be NULL terminated strings */ static u8 uv_archtype[UV_AT_SIZE + 1]; static u8 oem_id[ACPI_OEM_ID_SIZE + 1]; static u8 oem_table_id[ACPI_OEM_TABLE_ID_SIZE + 1]; /* Information derived from CPUID and some UV MMRs */ static struct { unsigned int apicid_shift; unsigned int apicid_mask; unsigned int socketid_shift; /* aka pnode_shift for UV2/3 */ unsigned int pnode_mask; unsigned int nasid_shift; unsigned int gpa_shift; unsigned int gnode_shift; unsigned int m_skt; unsigned int n_skt; } uv_cpuid; static int uv_min_hub_revision_id; static struct apic apic_x2apic_uv_x; static struct uv_hub_info_s uv_hub_info_node0; /* Set this to use hardware error handler instead of kernel panic: */ static int disable_uv_undefined_panic = 1; unsigned long uv_undefined(char *str) { if (likely(!disable_uv_undefined_panic)) panic("UV: error: undefined MMR: %s\n", str); else pr_crit("UV: error: undefined MMR: %s\n", str); /* Cause a machine fault: */ return ~0ul; } EXPORT_SYMBOL(uv_undefined); static unsigned long __init uv_early_read_mmr(unsigned long addr) { unsigned long val, *mmr; mmr = early_ioremap(UV_LOCAL_MMR_BASE | addr, sizeof(*mmr)); val = *mmr; early_iounmap(mmr, sizeof(*mmr)); return val; } static inline bool is_GRU_range(u64 start, u64 end) { if (!gru_start_paddr) return false; return start >= gru_start_paddr && end <= gru_end_paddr; } static bool uv_is_untracked_pat_range(u64 start, u64 end) { return is_ISA_range(start, end) || is_GRU_range(start, end); } static void __init early_get_pnodeid(void) { int pnode; uv_cpuid.m_skt = 0; if (UVH_RH10_GAM_ADDR_MAP_CONFIG) { union uvh_rh10_gam_addr_map_config_u m_n_config; m_n_config.v = uv_early_read_mmr(UVH_RH10_GAM_ADDR_MAP_CONFIG); uv_cpuid.n_skt = m_n_config.s.n_skt; uv_cpuid.nasid_shift = 0; } else if (UVH_RH_GAM_ADDR_MAP_CONFIG) { union uvh_rh_gam_addr_map_config_u m_n_config; m_n_config.v = uv_early_read_mmr(UVH_RH_GAM_ADDR_MAP_CONFIG); uv_cpuid.n_skt = m_n_config.s.n_skt; if (is_uv(UV3)) uv_cpuid.m_skt = m_n_config.s3.m_skt; if (is_uv(UV2)) uv_cpuid.m_skt = m_n_config.s2.m_skt; uv_cpuid.nasid_shift = 1; } else { unsigned long GAM_ADDR_MAP_CONFIG = 0; WARN(GAM_ADDR_MAP_CONFIG == 0, "UV: WARN: GAM_ADDR_MAP_CONFIG is not available\n"); uv_cpuid.n_skt = 0; uv_cpuid.nasid_shift = 0; } if (is_uv(UV4|UVY)) uv_cpuid.gnode_shift = 2; /* min partition is 4 sockets */ uv_cpuid.pnode_mask = (1 << uv_cpuid.n_skt) - 1; pnode = (uv_node_id >> uv_cpuid.nasid_shift) & uv_cpuid.pnode_mask; uv_cpuid.gpa_shift = 46; /* Default unless changed */ pr_info("UV: n_skt:%d pnmsk:%x pn:%x\n", uv_cpuid.n_skt, uv_cpuid.pnode_mask, pnode); } /* Running on a UV Hubbed system, determine which UV Hub Type it is */ static int __init early_set_hub_type(void) { union uvh_node_id_u node_id; /* * The NODE_ID MMR is always at offset 0. * Contains the chip part # + revision. * Node_id field started with 15 bits, * ... now 7 but upper 8 are masked to 0. * All blades/nodes have the same part # and hub revision. */ node_id.v = uv_early_read_mmr(UVH_NODE_ID); uv_node_id = node_id.sx.node_id; switch (node_id.s.part_number) { case UV5_HUB_PART_NUMBER: uv_min_hub_revision_id = node_id.s.revision + UV5_HUB_REVISION_BASE; uv_hub_type_set(UV5); break; /* UV4/4A only have a revision difference */ case UV4_HUB_PART_NUMBER: uv_min_hub_revision_id = node_id.s.revision + UV4_HUB_REVISION_BASE - 1; uv_hub_type_set(UV4); if (uv_min_hub_revision_id == UV4A_HUB_REVISION_BASE) uv_hub_type_set(UV4|UV4A); break; case UV3_HUB_PART_NUMBER: case UV3_HUB_PART_NUMBER_X: uv_min_hub_revision_id = node_id.s.revision + UV3_HUB_REVISION_BASE; uv_hub_type_set(UV3); break; case UV2_HUB_PART_NUMBER: case UV2_HUB_PART_NUMBER_X: uv_min_hub_revision_id = node_id.s.revision + UV2_HUB_REVISION_BASE - 1; uv_hub_type_set(UV2); break; default: return 0; } pr_info("UV: part#:%x rev:%d rev_id:%d UVtype:0x%x\n", node_id.s.part_number, node_id.s.revision, uv_min_hub_revision_id, is_uv(~0)); return 1; } static void __init uv_tsc_check_sync(void) { u64 mmr; int sync_state; int mmr_shift; char *state; /* UV5 guarantees synced TSCs; do not zero TSC_ADJUST */ if (!is_uv(UV2|UV3|UV4)) { mark_tsc_async_resets("UV5+"); return; } /* UV2,3,4, UV BIOS TSC sync state available */ mmr = uv_early_read_mmr(UVH_TSC_SYNC_MMR); mmr_shift = is_uv2_hub() ? UVH_TSC_SYNC_SHIFT_UV2K : UVH_TSC_SYNC_SHIFT; sync_state = (mmr >> mmr_shift) & UVH_TSC_SYNC_MASK; /* Check if TSC is valid for all sockets */ switch (sync_state) { case UVH_TSC_SYNC_VALID: state = "in sync"; mark_tsc_async_resets("UV BIOS"); break; /* If BIOS state unknown, don't do anything */ case UVH_TSC_SYNC_UNKNOWN: state = "unknown"; break; /* Otherwise, BIOS indicates problem with TSC */ default: state = "unstable"; mark_tsc_unstable("UV BIOS"); break; } pr_info("UV: TSC sync state from BIOS:0%d(%s)\n", sync_state, state); } /* Selector for (4|4A|5) structs */ #define uvxy_field(sname, field, undef) ( \ is_uv(UV4A) ? sname.s4a.field : \ is_uv(UV4) ? sname.s4.field : \ is_uv(UV3) ? sname.s3.field : \ undef) /* [Copied from arch/x86/kernel/cpu/topology.c:detect_extended_topology()] */ #define SMT_LEVEL 0 /* Leaf 0xb SMT level */ #define INVALID_TYPE 0 /* Leaf 0xb sub-leaf types */ #define SMT_TYPE 1 #define CORE_TYPE 2 #define LEAFB_SUBTYPE(ecx) (((ecx) >> 8) & 0xff) #define BITS_SHIFT_NEXT_LEVEL(eax) ((eax) & 0x1f) static void set_x2apic_bits(void) { unsigned int eax, ebx, ecx, edx, sub_index; unsigned int sid_shift; cpuid(0, &eax, &ebx, &ecx, &edx); if (eax < 0xb) { pr_info("UV: CPU does not have CPUID.11\n"); return; } cpuid_count(0xb, SMT_LEVEL, &eax, &ebx, &ecx, &edx); if (ebx == 0 || (LEAFB_SUBTYPE(ecx) != SMT_TYPE)) { pr_info("UV: CPUID.11 not implemented\n"); return; } sid_shift = BITS_SHIFT_NEXT_LEVEL(eax); sub_index = 1; do { cpuid_count(0xb, sub_index, &eax, &ebx, &ecx, &edx); if (LEAFB_SUBTYPE(ecx) == CORE_TYPE) { sid_shift = BITS_SHIFT_NEXT_LEVEL(eax); break; } sub_index++; } while (LEAFB_SUBTYPE(ecx) != INVALID_TYPE); uv_cpuid.apicid_shift = 0; uv_cpuid.apicid_mask = (~(-1 << sid_shift)); uv_cpuid.socketid_shift = sid_shift; } static void __init early_get_apic_socketid_shift(void) { if (is_uv2_hub() || is_uv3_hub()) uvh_apicid.v = uv_early_read_mmr(UVH_APICID); set_x2apic_bits(); pr_info("UV: apicid_shift:%d apicid_mask:0x%x\n", uv_cpuid.apicid_shift, uv_cpuid.apicid_mask); pr_info("UV: socketid_shift:%d pnode_mask:0x%x\n", uv_cpuid.socketid_shift, uv_cpuid.pnode_mask); } static void __init uv_stringify(int len, char *to, char *from) { strscpy(to, from, len); /* Trim trailing spaces */ (void)strim(to); } /* Find UV arch type entry in UVsystab */ static unsigned long __init early_find_archtype(struct uv_systab *st) { int i; for (i = 0; st->entry[i].type != UV_SYSTAB_TYPE_UNUSED; i++) { unsigned long ptr = st->entry[i].offset; if (!ptr) continue; ptr += (unsigned long)st; if (st->entry[i].type == UV_SYSTAB_TYPE_ARCH_TYPE) return ptr; } return 0; } /* Validate UV arch type field in UVsystab */ static int __init decode_arch_type(unsigned long ptr) { struct uv_arch_type_entry *uv_ate = (struct uv_arch_type_entry *)ptr; int n = strlen(uv_ate->archtype); if (n > 0 && n < sizeof(uv_ate->archtype)) { pr_info("UV: UVarchtype received from BIOS\n"); uv_stringify(sizeof(uv_archtype), uv_archtype, uv_ate->archtype); return 1; } return 0; } /* Determine if UV arch type entry might exist in UVsystab */ static int __init early_get_arch_type(void) { unsigned long uvst_physaddr, uvst_size, ptr; struct uv_systab *st; u32 rev; int ret; uvst_physaddr = get_uv_systab_phys(0); if (!uvst_physaddr) return 0; st = early_memremap_ro(uvst_physaddr, sizeof(struct uv_systab)); if (!st) { pr_err("UV: Cannot access UVsystab, remap failed\n"); return 0; } rev = st->revision; if (rev < UV_SYSTAB_VERSION_UV5) { early_memunmap(st, sizeof(struct uv_systab)); return 0; } uvst_size = st->size; early_memunmap(st, sizeof(struct uv_systab)); st = early_memremap_ro(uvst_physaddr, uvst_size); if (!st) { pr_err("UV: Cannot access UVarchtype, remap failed\n"); return 0; } ptr = early_find_archtype(st); if (!ptr) { early_memunmap(st, uvst_size); return 0; } ret = decode_arch_type(ptr); early_memunmap(st, uvst_size); return ret; } /* UV system found, check which APIC MODE BIOS already selected */ static void __init early_set_apic_mode(void) { if (x2apic_enabled()) uv_system_type = UV_X2APIC; else uv_system_type = UV_LEGACY_APIC; } static int __init uv_set_system_type(char *_oem_id, char *_oem_table_id) { /* Save OEM_ID passed from ACPI MADT */ uv_stringify(sizeof(oem_id), oem_id, _oem_id); /* Check if BIOS sent us a UVarchtype */ if (!early_get_arch_type()) /* If not use OEM ID for UVarchtype */ uv_stringify(sizeof(uv_archtype), uv_archtype, oem_id); /* Check if not hubbed */ if (strncmp(uv_archtype, "SGI", 3) != 0) { /* (Not hubbed), check if not hubless */ if (strncmp(uv_archtype, "NSGI", 4) != 0) /* (Not hubless), not a UV */ return 0; /* Is UV hubless system */ uv_hubless_system = 0x01; /* UV5 Hubless */ if (strncmp(uv_archtype, "NSGI5", 5) == 0) uv_hubless_system |= 0x20; /* UV4 Hubless: CH */ else if (strncmp(uv_archtype, "NSGI4", 5) == 0) uv_hubless_system |= 0x10; /* UV3 Hubless: UV300/MC990X w/o hub */ else uv_hubless_system |= 0x8; /* Copy OEM Table ID */ uv_stringify(sizeof(oem_table_id), oem_table_id, _oem_table_id); pr_info("UV: OEM IDs %s/%s, SystemType %d, HUBLESS ID %x\n", oem_id, oem_table_id, uv_system_type, uv_hubless_system); return 0; } if (numa_off) { pr_err("UV: NUMA is off, disabling UV support\n"); return 0; } /* Set hubbed type if true */ uv_hub_info->hub_revision = !strncmp(uv_archtype, "SGI5", 4) ? UV5_HUB_REVISION_BASE : !strncmp(uv_archtype, "SGI4", 4) ? UV4_HUB_REVISION_BASE : !strncmp(uv_archtype, "SGI3", 4) ? UV3_HUB_REVISION_BASE : !strcmp(uv_archtype, "SGI2") ? UV2_HUB_REVISION_BASE : 0; switch (uv_hub_info->hub_revision) { case UV5_HUB_REVISION_BASE: uv_hubbed_system = 0x21; uv_hub_type_set(UV5); break; case UV4_HUB_REVISION_BASE: uv_hubbed_system = 0x11; uv_hub_type_set(UV4); break; case UV3_HUB_REVISION_BASE: uv_hubbed_system = 0x9; uv_hub_type_set(UV3); break; case UV2_HUB_REVISION_BASE: uv_hubbed_system = 0x5; uv_hub_type_set(UV2); break; default: return 0; } /* Get UV hub chip part number & revision */ early_set_hub_type(); /* Other UV setup functions */ early_set_apic_mode(); early_get_pnodeid(); early_get_apic_socketid_shift(); x86_platform.is_untracked_pat_range = uv_is_untracked_pat_range; x86_platform.nmi_init = uv_nmi_init; uv_tsc_check_sync(); return 1; } /* Called early to probe for the correct APIC driver */ static int __init uv_acpi_madt_oem_check(char *_oem_id, char *_oem_table_id) { /* Set up early hub info fields for Node 0 */ uv_cpu_info->p_uv_hub_info = &uv_hub_info_node0; /* If not UV, return. */ if (uv_set_system_type(_oem_id, _oem_table_id) == 0) return 0; /* Save for display of the OEM Table ID */ uv_stringify(sizeof(oem_table_id), oem_table_id, _oem_table_id); pr_info("UV: OEM IDs %s/%s, System/UVType %d/0x%x, HUB RevID %d\n", oem_id, oem_table_id, uv_system_type, is_uv(UV_ANY), uv_min_hub_revision_id); return 0; } enum uv_system_type get_uv_system_type(void) { return uv_system_type; } int uv_get_hubless_system(void) { return uv_hubless_system; } EXPORT_SYMBOL_GPL(uv_get_hubless_system); ssize_t uv_get_archtype(char *buf, int len) { return scnprintf(buf, len, "%s/%s", uv_archtype, oem_table_id); } EXPORT_SYMBOL_GPL(uv_get_archtype); int is_uv_system(void) { return uv_system_type != UV_NONE; } EXPORT_SYMBOL_GPL(is_uv_system); int is_uv_hubbed(int uvtype) { return (uv_hubbed_system & uvtype); } EXPORT_SYMBOL_GPL(is_uv_hubbed); static int is_uv_hubless(int uvtype) { return (uv_hubless_system & uvtype); } void **__uv_hub_info_list; EXPORT_SYMBOL_GPL(__uv_hub_info_list); DEFINE_PER_CPU(struct uv_cpu_info_s, __uv_cpu_info); EXPORT_PER_CPU_SYMBOL_GPL(__uv_cpu_info); short uv_possible_blades; EXPORT_SYMBOL_GPL(uv_possible_blades); unsigned long sn_rtc_cycles_per_second; EXPORT_SYMBOL(sn_rtc_cycles_per_second); /* The following values are used for the per node hub info struct */ static __initdata unsigned short _min_socket, _max_socket; static __initdata unsigned short _min_pnode, _max_pnode, _gr_table_len; static __initdata struct uv_gam_range_entry *uv_gre_table; static __initdata struct uv_gam_parameters *uv_gp_table; static __initdata unsigned short *_socket_to_node; static __initdata unsigned short *_socket_to_pnode; static __initdata unsigned short *_pnode_to_socket; static __initdata unsigned short *_node_to_socket; static __initdata struct uv_gam_range_s *_gr_table; #define SOCK_EMPTY ((unsigned short)~0) /* Default UV memory block size is 2GB */ static unsigned long mem_block_size __initdata = (2UL << 30); /* Kernel parameter to specify UV mem block size */ static int __init parse_mem_block_size(char *ptr) { unsigned long size = memparse(ptr, NULL); /* Size will be rounded down by set_block_size() below */ mem_block_size = size; return 0; } early_param("uv_memblksize", parse_mem_block_size); static __init int adj_blksize(u32 lgre) { unsigned long base = (unsigned long)lgre << UV_GAM_RANGE_SHFT; unsigned long size; for (size = mem_block_size; size > MIN_MEMORY_BLOCK_SIZE; size >>= 1) if (IS_ALIGNED(base, size)) break; if (size >= mem_block_size) return 0; mem_block_size = size; return 1; } static __init void set_block_size(void) { unsigned int order = ffs(mem_block_size); if (order) { /* adjust for ffs return of 1..64 */ set_memory_block_size_order(order - 1); pr_info("UV: mem_block_size set to 0x%lx\n", mem_block_size); } else { /* bad or zero value, default to 1UL << 31 (2GB) */ pr_err("UV: mem_block_size error with 0x%lx\n", mem_block_size); set_memory_block_size_order(31); } } /* Build GAM range lookup table: */ static __init void build_uv_gr_table(void) { struct uv_gam_range_entry *gre = uv_gre_table; struct uv_gam_range_s *grt; unsigned long last_limit = 0, ram_limit = 0; int bytes, i, sid, lsid = -1, indx = 0, lindx = -1; if (!gre) return; bytes = _gr_table_len * sizeof(struct uv_gam_range_s); grt = kzalloc(bytes, GFP_KERNEL); if (WARN_ON_ONCE(!grt)) return; _gr_table = grt; for (; gre->type != UV_GAM_RANGE_TYPE_UNUSED; gre++) { if (gre->type == UV_GAM_RANGE_TYPE_HOLE) { if (!ram_limit) { /* Mark hole between RAM/non-RAM: */ ram_limit = last_limit; last_limit = gre->limit; lsid++; continue; } last_limit = gre->limit; pr_info("UV: extra hole in GAM RE table @%d\n", (int)(gre - uv_gre_table)); continue; } if (_max_socket < gre->sockid) { pr_err("UV: GAM table sockid(%d) too large(>%d) @%d\n", gre->sockid, _max_socket, (int)(gre - uv_gre_table)); continue; } sid = gre->sockid - _min_socket; if (lsid < sid) { /* New range: */ grt = &_gr_table[indx]; grt->base = lindx; grt->nasid = gre->nasid; grt->limit = last_limit = gre->limit; lsid = sid; lindx = indx++; continue; } /* Update range: */ if (lsid == sid && !ram_limit) { /* .. if contiguous: */ if (grt->limit == last_limit) { grt->limit = last_limit = gre->limit; continue; } } /* Non-contiguous RAM range: */ if (!ram_limit) { grt++; grt->base = lindx; grt->nasid = gre->nasid; grt->limit = last_limit = gre->limit; continue; } /* Non-contiguous/non-RAM: */ grt++; /* base is this entry */ grt->base = grt - _gr_table; grt->nasid = gre->nasid; grt->limit = last_limit = gre->limit; lsid++; } /* Shorten table if possible */ grt++; i = grt - _gr_table; if (i < _gr_table_len) { void *ret; bytes = i * sizeof(struct uv_gam_range_s); ret = krealloc(_gr_table, bytes, GFP_KERNEL); if (ret) { _gr_table = ret; _gr_table_len = i; } } /* Display resultant GAM range table: */ for (i = 0, grt = _gr_table; i < _gr_table_len; i++, grt++) { unsigned long start, end; int gb = grt->base; start = gb < 0 ? 0 : (unsigned long)_gr_table[gb].limit << UV_GAM_RANGE_SHFT; end = (unsigned long)grt->limit << UV_GAM_RANGE_SHFT; pr_info("UV: GAM Range %2d %04x 0x%013lx-0x%013lx (%d)\n", i, grt->nasid, start, end, gb); } } static int uv_wakeup_secondary(int phys_apicid, unsigned long start_rip) { unsigned long val; int pnode; pnode = uv_apicid_to_pnode(phys_apicid); val = (1UL << UVH_IPI_INT_SEND_SHFT) | (phys_apicid << UVH_IPI_INT_APIC_ID_SHFT) | ((start_rip << UVH_IPI_INT_VECTOR_SHFT) >> 12) | APIC_DM_INIT; uv_write_global_mmr64(pnode, UVH_IPI_INT, val); val = (1UL << UVH_IPI_INT_SEND_SHFT) | (phys_apicid << UVH_IPI_INT_APIC_ID_SHFT) | ((start_rip << UVH_IPI_INT_VECTOR_SHFT) >> 12) | APIC_DM_STARTUP; uv_write_global_mmr64(pnode, UVH_IPI_INT, val); return 0; } static void uv_send_IPI_one(int cpu, int vector) { unsigned long apicid = per_cpu(x86_cpu_to_apicid, cpu); int pnode = uv_apicid_to_pnode(apicid); unsigned long dmode, val; if (vector == NMI_VECTOR) dmode = APIC_DELIVERY_MODE_NMI; else dmode = APIC_DELIVERY_MODE_FIXED; val = (1UL << UVH_IPI_INT_SEND_SHFT) | (apicid << UVH_IPI_INT_APIC_ID_SHFT) | (dmode << UVH_IPI_INT_DELIVERY_MODE_SHFT) | (vector << UVH_IPI_INT_VECTOR_SHFT); uv_write_global_mmr64(pnode, UVH_IPI_INT, val); } static void uv_send_IPI_mask(const struct cpumask *mask, int vector) { unsigned int cpu; for_each_cpu(cpu, mask) uv_send_IPI_one(cpu, vector); } static void uv_send_IPI_mask_allbutself(const struct cpumask *mask, int vector) { unsigned int this_cpu = smp_processor_id(); unsigned int cpu; for_each_cpu(cpu, mask) { if (cpu != this_cpu) uv_send_IPI_one(cpu, vector); } } static void uv_send_IPI_allbutself(int vector) { unsigned int this_cpu = smp_processor_id(); unsigned int cpu; for_each_online_cpu(cpu) { if (cpu != this_cpu) uv_send_IPI_one(cpu, vector); } } static void uv_send_IPI_all(int vector) { uv_send_IPI_mask(cpu_online_mask, vector); } static u32 set_apic_id(unsigned int id) { return id; } static unsigned int uv_read_apic_id(void) { return x2apic_get_apic_id(apic_read(APIC_ID)); } static int uv_phys_pkg_id(int initial_apicid, int index_msb) { return uv_read_apic_id() >> index_msb; } static int uv_probe(void) { return apic == &apic_x2apic_uv_x; } static struct apic apic_x2apic_uv_x __ro_after_init = { .name = "UV large system", .probe = uv_probe, .acpi_madt_oem_check = uv_acpi_madt_oem_check, .delivery_mode = APIC_DELIVERY_MODE_FIXED, .dest_mode_logical = false, .disable_esr = 0, .cpu_present_to_apicid = default_cpu_present_to_apicid, .phys_pkg_id = uv_phys_pkg_id, .max_apic_id = UINT_MAX, .get_apic_id = x2apic_get_apic_id, .set_apic_id = set_apic_id, .calc_dest_apicid = apic_default_calc_apicid, .send_IPI = uv_send_IPI_one, .send_IPI_mask = uv_send_IPI_mask, .send_IPI_mask_allbutself = uv_send_IPI_mask_allbutself, .send_IPI_allbutself = uv_send_IPI_allbutself, .send_IPI_all = uv_send_IPI_all, .send_IPI_self = x2apic_send_IPI_self, .wakeup_secondary_cpu = uv_wakeup_secondary, .read = native_apic_msr_read, .write = native_apic_msr_write, .eoi = native_apic_msr_eoi, .icr_read = native_x2apic_icr_read, .icr_write = native_x2apic_icr_write, }; #define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_LENGTH 3 #define DEST_SHIFT UVXH_RH_GAM_ALIAS_0_REDIRECT_CONFIG_DEST_BASE_SHFT static __init void get_lowmem_redirect(unsigned long *base, unsigned long *size) { union uvh_rh_gam_alias_2_overlay_config_u alias; union uvh_rh_gam_alias_2_redirect_config_u redirect; unsigned long m_redirect; unsigned long m_overlay; int i; for (i = 0; i < UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_LENGTH; i++) { switch (i) { case 0: m_redirect = UVH_RH_GAM_ALIAS_0_REDIRECT_CONFIG; m_overlay = UVH_RH_GAM_ALIAS_0_OVERLAY_CONFIG; break; case 1: m_redirect = UVH_RH_GAM_ALIAS_1_REDIRECT_CONFIG; m_overlay = UVH_RH_GAM_ALIAS_1_OVERLAY_CONFIG; break; case 2: m_redirect = UVH_RH_GAM_ALIAS_2_REDIRECT_CONFIG; m_overlay = UVH_RH_GAM_ALIAS_2_OVERLAY_CONFIG; break; } alias.v = uv_read_local_mmr(m_overlay); if (alias.s.enable && alias.s.base == 0) { *size = (1UL << alias.s.m_alias); redirect.v = uv_read_local_mmr(m_redirect); *base = (unsigned long)redirect.s.dest_base << DEST_SHIFT; return; } } *base = *size = 0; } enum map_type {map_wb, map_uc}; static const char * const mt[] = { "WB", "UC" }; static __init void map_high(char *id, unsigned long base, int pshift, int bshift, int max_pnode, enum map_type map_type) { unsigned long bytes, paddr; paddr = base << pshift; bytes = (1UL << bshift) * (max_pnode + 1); if (!paddr) { pr_info("UV: Map %s_HI base address NULL\n", id); return; } if (map_type == map_uc) init_extra_mapping_uc(paddr, bytes); else init_extra_mapping_wb(paddr, bytes); pr_info("UV: Map %s_HI 0x%lx - 0x%lx %s (%d segments)\n", id, paddr, paddr + bytes, mt[map_type], max_pnode + 1); } static __init void map_gru_high(int max_pnode) { union uvh_rh_gam_gru_overlay_config_u gru; unsigned long mask, base; int shift; if (UVH_RH_GAM_GRU_OVERLAY_CONFIG) { gru.v = uv_read_local_mmr(UVH_RH_GAM_GRU_OVERLAY_CONFIG); shift = UVH_RH_GAM_GRU_OVERLAY_CONFIG_BASE_SHFT; mask = UVH_RH_GAM_GRU_OVERLAY_CONFIG_BASE_MASK; } else if (UVH_RH10_GAM_GRU_OVERLAY_CONFIG) { gru.v = uv_read_local_mmr(UVH_RH10_GAM_GRU_OVERLAY_CONFIG); shift = UVH_RH10_GAM_GRU_OVERLAY_CONFIG_BASE_SHFT; mask = UVH_RH10_GAM_GRU_OVERLAY_CONFIG_BASE_MASK; } else { pr_err("UV: GRU unavailable (no MMR)\n"); return; } if (!gru.s.enable) { pr_info("UV: GRU disabled (by BIOS)\n"); return; } base = (gru.v & mask) >> shift; map_high("GRU", base, shift, shift, max_pnode, map_wb); gru_start_paddr = ((u64)base << shift); gru_end_paddr = gru_start_paddr + (1UL << shift) * (max_pnode + 1); } static __init void map_mmr_high(int max_pnode) { unsigned long base; int shift; bool enable; if (UVH_RH10_GAM_MMR_OVERLAY_CONFIG) { union uvh_rh10_gam_mmr_overlay_config_u mmr; mmr.v = uv_read_local_mmr(UVH_RH10_GAM_MMR_OVERLAY_CONFIG); enable = mmr.s.enable; base = mmr.s.base; shift = UVH_RH10_GAM_MMR_OVERLAY_CONFIG_BASE_SHFT; } else if (UVH_RH_GAM_MMR_OVERLAY_CONFIG) { union uvh_rh_gam_mmr_overlay_config_u mmr; mmr.v = uv_read_local_mmr(UVH_RH_GAM_MMR_OVERLAY_CONFIG); enable = mmr.s.enable; base = mmr.s.base; shift = UVH_RH_GAM_MMR_OVERLAY_CONFIG_BASE_SHFT; } else { pr_err("UV:%s:RH_GAM_MMR_OVERLAY_CONFIG MMR undefined?\n", __func__); return; } if (enable) map_high("MMR", base, shift, shift, max_pnode, map_uc); else pr_info("UV: MMR disabled\n"); } /* Arch specific ENUM cases */ enum mmioh_arch { UV2_MMIOH = -1, UVY_MMIOH0, UVY_MMIOH1, UVX_MMIOH0, UVX_MMIOH1, }; /* Calculate and Map MMIOH Regions */ static void __init calc_mmioh_map(enum mmioh_arch index, int min_pnode, int max_pnode, int shift, unsigned long base, int m_io, int n_io) { unsigned long mmr, nasid_mask; int nasid, min_nasid, max_nasid, lnasid, mapped; int i, fi, li, n, max_io; char id[8]; /* One (UV2) mapping */ if (index == UV2_MMIOH) { strscpy(id, "MMIOH", sizeof(id)); max_io = max_pnode; mapped = 0; goto map_exit; } /* small and large MMIOH mappings */ switch (index) { case UVY_MMIOH0: mmr = UVH_RH10_GAM_MMIOH_REDIRECT_CONFIG0; nasid_mask = UVYH_RH10_GAM_MMIOH_REDIRECT_CONFIG0_NASID_MASK; n = UVH_RH10_GAM_MMIOH_REDIRECT_CONFIG0_DEPTH; min_nasid = min_pnode; max_nasid = max_pnode; mapped = 1; break; case UVY_MMIOH1: mmr = UVH_RH10_GAM_MMIOH_REDIRECT_CONFIG1; nasid_mask = UVYH_RH10_GAM_MMIOH_REDIRECT_CONFIG1_NASID_MASK; n = UVH_RH10_GAM_MMIOH_REDIRECT_CONFIG1_DEPTH; min_nasid = min_pnode; max_nasid = max_pnode; mapped = 1; break; case UVX_MMIOH0: mmr = UVH_RH_GAM_MMIOH_REDIRECT_CONFIG0; nasid_mask = UVH_RH_GAM_MMIOH_REDIRECT_CONFIG0_NASID_MASK; n = UVH_RH_GAM_MMIOH_REDIRECT_CONFIG0_DEPTH; min_nasid = min_pnode * 2; max_nasid = max_pnode * 2; mapped = 1; break; case UVX_MMIOH1: mmr = UVH_RH_GAM_MMIOH_REDIRECT_CONFIG1; nasid_mask = UVH_RH_GAM_MMIOH_REDIRECT_CONFIG1_NASID_MASK; n = UVH_RH_GAM_MMIOH_REDIRECT_CONFIG1_DEPTH; min_nasid = min_pnode * 2; max_nasid = max_pnode * 2; mapped = 1; break; default: pr_err("UV:%s:Invalid mapping type:%d\n", __func__, index); return; } /* enum values chosen so (index mod 2) is MMIOH 0/1 (low/high) */ snprintf(id, sizeof(id), "MMIOH%d", index%2); max_io = lnasid = fi = li = -1; for (i = 0; i < n; i++) { unsigned long m_redirect = mmr + i * 8; unsigned long redirect = uv_read_local_mmr(m_redirect); nasid = redirect & nasid_mask; if (i == 0) pr_info("UV: %s redirect base 0x%lx(@0x%lx) 0x%04x\n", id, redirect, m_redirect, nasid); /* Invalid NASID check */ if (nasid < min_nasid || max_nasid < nasid) { /* Not an error: unused table entries get "poison" values */ pr_debug("UV:%s:Invalid NASID(%x):%x (range:%x..%x)\n", __func__, index, nasid, min_nasid, max_nasid); nasid = -1; } if (nasid == lnasid) { li = i; /* Last entry check: */ if (i != n-1) continue; } /* Check if we have a cached (or last) redirect to print: */ if (lnasid != -1 || (i == n-1 && nasid != -1)) { unsigned long addr1, addr2; int f, l; if (lnasid == -1) { f = l = i; lnasid = nasid; } else { f = fi; l = li; } addr1 = (base << shift) + f * (1ULL << m_io); addr2 = (base << shift) + (l + 1) * (1ULL << m_io); pr_info("UV: %s[%03d..%03d] NASID 0x%04x ADDR 0x%016lx - 0x%016lx\n", id, fi, li, lnasid, addr1, addr2); if (max_io < l) max_io = l; } fi = li = i; lnasid = nasid; } map_exit: pr_info("UV: %s base:0x%lx shift:%d m_io:%d max_io:%d max_pnode:0x%x\n", id, base, shift, m_io, max_io, max_pnode); if (max_io >= 0 && !mapped) map_high(id, base, shift, m_io, max_io, map_uc); } static __init void map_mmioh_high(int min_pnode, int max_pnode) { /* UVY flavor */ if (UVH_RH10_GAM_MMIOH_OVERLAY_CONFIG0) { union uvh_rh10_gam_mmioh_overlay_config0_u mmioh0; union uvh_rh10_gam_mmioh_overlay_config1_u mmioh1; mmioh0.v = uv_read_local_mmr(UVH_RH10_GAM_MMIOH_OVERLAY_CONFIG0); if (unlikely(mmioh0.s.enable == 0)) pr_info("UV: MMIOH0 disabled\n"); else calc_mmioh_map(UVY_MMIOH0, min_pnode, max_pnode, UVH_RH10_GAM_MMIOH_OVERLAY_CONFIG0_BASE_SHFT, mmioh0.s.base, mmioh0.s.m_io, mmioh0.s.n_io); mmioh1.v = uv_read_local_mmr(UVH_RH10_GAM_MMIOH_OVERLAY_CONFIG1); if (unlikely(mmioh1.s.enable == 0)) pr_info("UV: MMIOH1 disabled\n"); else calc_mmioh_map(UVY_MMIOH1, min_pnode, max_pnode, UVH_RH10_GAM_MMIOH_OVERLAY_CONFIG1_BASE_SHFT, mmioh1.s.base, mmioh1.s.m_io, mmioh1.s.n_io); return; } /* UVX flavor */ if (UVH_RH_GAM_MMIOH_OVERLAY_CONFIG0) { union uvh_rh_gam_mmioh_overlay_config0_u mmioh0; union uvh_rh_gam_mmioh_overlay_config1_u mmioh1; mmioh0.v = uv_read_local_mmr(UVH_RH_GAM_MMIOH_OVERLAY_CONFIG0); if (unlikely(mmioh0.s.enable == 0)) pr_info("UV: MMIOH0 disabled\n"); else { unsigned long base = uvxy_field(mmioh0, base, 0); int m_io = uvxy_field(mmioh0, m_io, 0); int n_io = uvxy_field(mmioh0, n_io, 0); calc_mmioh_map(UVX_MMIOH0, min_pnode, max_pnode, UVH_RH_GAM_MMIOH_OVERLAY_CONFIG0_BASE_SHFT, base, m_io, n_io); } mmioh1.v = uv_read_local_mmr(UVH_RH_GAM_MMIOH_OVERLAY_CONFIG1); if (unlikely(mmioh1.s.enable == 0)) pr_info("UV: MMIOH1 disabled\n"); else { unsigned long base = uvxy_field(mmioh1, base, 0); int m_io = uvxy_field(mmioh1, m_io, 0); int n_io = uvxy_field(mmioh1, n_io, 0); calc_mmioh_map(UVX_MMIOH1, min_pnode, max_pnode, UVH_RH_GAM_MMIOH_OVERLAY_CONFIG1_BASE_SHFT, base, m_io, n_io); } return; } /* UV2 flavor */ if (UVH_RH_GAM_MMIOH_OVERLAY_CONFIG) { union uvh_rh_gam_mmioh_overlay_config_u mmioh; mmioh.v = uv_read_local_mmr(UVH_RH_GAM_MMIOH_OVERLAY_CONFIG); if (unlikely(mmioh.s2.enable == 0)) pr_info("UV: MMIOH disabled\n"); else calc_mmioh_map(UV2_MMIOH, min_pnode, max_pnode, UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG_BASE_SHFT, mmioh.s2.base, mmioh.s2.m_io, mmioh.s2.n_io); return; } } static __init void map_low_mmrs(void) { if (UV_GLOBAL_MMR32_BASE) init_extra_mapping_uc(UV_GLOBAL_MMR32_BASE, UV_GLOBAL_MMR32_SIZE); if (UV_LOCAL_MMR_BASE) init_extra_mapping_uc(UV_LOCAL_MMR_BASE, UV_LOCAL_MMR_SIZE); } static __init void uv_rtc_init(void) { long status; u64 ticks_per_sec; status = uv_bios_freq_base(BIOS_FREQ_BASE_REALTIME_CLOCK, &ticks_per_sec); if (status != BIOS_STATUS_SUCCESS || ticks_per_sec < 100000) { pr_warn("UV: unable to determine platform RTC clock frequency, guessing.\n"); /* BIOS gives wrong value for clock frequency, so guess: */ sn_rtc_cycles_per_second = 1000000000000UL / 30000UL; } else { sn_rtc_cycles_per_second = ticks_per_sec; } } /* Direct Legacy VGA I/O traffic to designated IOH */ static int uv_set_vga_state(struct pci_dev *pdev, bool decode, unsigned int command_bits, u32 flags) { int domain, bus, rc; if (!(flags & PCI_VGA_STATE_CHANGE_BRIDGE)) return 0; if ((command_bits & PCI_COMMAND_IO) == 0) return 0; domain = pci_domain_nr(pdev->bus); bus = pdev->bus->number; rc = uv_bios_set_legacy_vga_target(decode, domain, bus); return rc; } /* * Called on each CPU to initialize the per_cpu UV data area. * FIXME: hotplug not supported yet */ void uv_cpu_init(void) { /* CPU 0 initialization will be done via uv_system_init. */ if (smp_processor_id() == 0) return; uv_hub_info->nr_online_cpus++; } struct mn { unsigned char m_val; unsigned char n_val; unsigned char m_shift; unsigned char n_lshift; }; /* Initialize caller's MN struct and fill in values */ static void get_mn(struct mn *mnp) { memset(mnp, 0, sizeof(*mnp)); mnp->n_val = uv_cpuid.n_skt; if (is_uv(UV4|UVY)) { mnp->m_val = 0; mnp->n_lshift = 0; } else if (is_uv3_hub()) { union uvyh_gr0_gam_gr_config_u m_gr_config; mnp->m_val = uv_cpuid.m_skt; m_gr_config.v = uv_read_local_mmr(UVH_GR0_GAM_GR_CONFIG); mnp->n_lshift = m_gr_config.s3.m_skt; } else if (is_uv2_hub()) { mnp->m_val = uv_cpuid.m_skt; mnp->n_lshift = mnp->m_val == 40 ? 40 : 39; } mnp->m_shift = mnp->m_val ? 64 - mnp->m_val : 0; } static void __init uv_init_hub_info(struct uv_hub_info_s *hi) { struct mn mn; get_mn(&mn); hi->gpa_mask = mn.m_val ? (1UL << (mn.m_val + mn.n_val)) - 1 : (1UL << uv_cpuid.gpa_shift) - 1; hi->m_val = mn.m_val; hi->n_val = mn.n_val; hi->m_shift = mn.m_shift; hi->n_lshift = mn.n_lshift ? mn.n_lshift : 0; hi->hub_revision = uv_hub_info->hub_revision; hi->hub_type = uv_hub_info->hub_type; hi->pnode_mask = uv_cpuid.pnode_mask; hi->nasid_shift = uv_cpuid.nasid_shift; hi->min_pnode = _min_pnode; hi->min_socket = _min_socket; hi->node_to_socket = _node_to_socket; hi->pnode_to_socket = _pnode_to_socket; hi->socket_to_node = _socket_to_node; hi->socket_to_pnode = _socket_to_pnode; hi->gr_table_len = _gr_table_len; hi->gr_table = _gr_table; uv_cpuid.gnode_shift = max_t(unsigned int, uv_cpuid.gnode_shift, mn.n_val); hi->gnode_extra = (uv_node_id & ~((1 << uv_cpuid.gnode_shift) - 1)) >> 1; if (mn.m_val) hi->gnode_upper = (u64)hi->gnode_extra << mn.m_val; if (uv_gp_table) { hi->global_mmr_base = uv_gp_table->mmr_base; hi->global_mmr_shift = uv_gp_table->mmr_shift; hi->global_gru_base = uv_gp_table->gru_base; hi->global_gru_shift = uv_gp_table->gru_shift; hi->gpa_shift = uv_gp_table->gpa_shift; hi->gpa_mask = (1UL << hi->gpa_shift) - 1; } else { hi->global_mmr_base = uv_read_local_mmr(UVH_RH_GAM_MMR_OVERLAY_CONFIG) & ~UV_MMR_ENABLE; hi->global_mmr_shift = _UV_GLOBAL_MMR64_PNODE_SHIFT; } get_lowmem_redirect(&hi->lowmem_remap_base, &hi->lowmem_remap_top); hi->apic_pnode_shift = uv_cpuid.socketid_shift; /* Show system specific info: */ pr_info("UV: N:%d M:%d m_shift:%d n_lshift:%d\n", hi->n_val, hi->m_val, hi->m_shift, hi->n_lshift); pr_info("UV: gpa_mask/shift:0x%lx/%d pnode_mask:0x%x apic_pns:%d\n", hi->gpa_mask, hi->gpa_shift, hi->pnode_mask, hi->apic_pnode_shift); pr_info("UV: mmr_base/shift:0x%lx/%ld\n", hi->global_mmr_base, hi->global_mmr_shift); if (hi->global_gru_base) pr_info("UV: gru_base/shift:0x%lx/%ld\n", hi->global_gru_base, hi->global_gru_shift); pr_info("UV: gnode_upper:0x%lx gnode_extra:0x%x\n", hi->gnode_upper, hi->gnode_extra); } static void __init decode_gam_params(unsigned long ptr) { uv_gp_table = (struct uv_gam_parameters *)ptr; pr_info("UV: GAM Params...\n"); pr_info("UV: mmr_base/shift:0x%llx/%d gru_base/shift:0x%llx/%d gpa_shift:%d\n", uv_gp_table->mmr_base, uv_gp_table->mmr_shift, uv_gp_table->gru_base, uv_gp_table->gru_shift, uv_gp_table->gpa_shift); } static void __init decode_gam_rng_tbl(unsigned long ptr) { struct uv_gam_range_entry *gre = (struct uv_gam_range_entry *)ptr; unsigned long lgre = 0, gend = 0; int index = 0; int sock_min = INT_MAX, pnode_min = INT_MAX; int sock_max = -1, pnode_max = -1; uv_gre_table = gre; for (; gre->type != UV_GAM_RANGE_TYPE_UNUSED; gre++) { unsigned long size = ((unsigned long)(gre->limit - lgre) << UV_GAM_RANGE_SHFT); int order = 0; char suffix[] = " KMGTPE"; int flag = ' '; while (size > 9999 && order < sizeof(suffix)) { size /= 1024; order++; } /* adjust max block size to current range start */ if (gre->type == 1 || gre->type == 2) if (adj_blksize(lgre)) flag = '*'; if (!index) { pr_info("UV: GAM Range Table...\n"); pr_info("UV: # %20s %14s %6s %4s %5s %3s %2s\n", "Range", "", "Size", "Type", "NASID", "SID", "PN"); } pr_info("UV: %2d: 0x%014lx-0x%014lx%c %5lu%c %3d %04x %02x %02x\n", index++, (unsigned long)lgre << UV_GAM_RANGE_SHFT, (unsigned long)gre->limit << UV_GAM_RANGE_SHFT, flag, size, suffix[order], gre->type, gre->nasid, gre->sockid, gre->pnode); if (gre->type == UV_GAM_RANGE_TYPE_HOLE) gend = (unsigned long)gre->limit << UV_GAM_RANGE_SHFT; /* update to next range start */ lgre = gre->limit; if (sock_min > gre->sockid) sock_min = gre->sockid; if (sock_max < gre->sockid) sock_max = gre->sockid; if (pnode_min > gre->pnode) pnode_min = gre->pnode; if (pnode_max < gre->pnode) pnode_max = gre->pnode; } _min_socket = sock_min; _max_socket = sock_max; _min_pnode = pnode_min; _max_pnode = pnode_max; _gr_table_len = index; pr_info("UV: GRT: %d entries, sockets(min:%x,max:%x), pnodes(min:%x,max:%x), gap_end(%d)\n", index, _min_socket, _max_socket, _min_pnode, _max_pnode, fls64(gend)); } /* Walk through UVsystab decoding the fields */ static int __init decode_uv_systab(void) { struct uv_systab *st; int i; /* Get mapped UVsystab pointer */ st = uv_systab; /* If UVsystab is version 1, there is no extended UVsystab */ if (st && st->revision == UV_SYSTAB_VERSION_1) return 0; if ((!st) || (st->revision < UV_SYSTAB_VERSION_UV4_LATEST)) { int rev = st ? st->revision : 0; pr_err("UV: BIOS UVsystab mismatch, (%x < %x)\n", rev, UV_SYSTAB_VERSION_UV4_LATEST); pr_err("UV: Does not support UV, switch to non-UV x86_64\n"); uv_system_type = UV_NONE; return -EINVAL; } for (i = 0; st->entry[i].type != UV_SYSTAB_TYPE_UNUSED; i++) { unsigned long ptr = st->entry[i].offset; if (!ptr) continue; /* point to payload */ ptr += (unsigned long)st; switch (st->entry[i].type) { case UV_SYSTAB_TYPE_GAM_PARAMS: decode_gam_params(ptr); break; case UV_SYSTAB_TYPE_GAM_RNG_TBL: decode_gam_rng_tbl(ptr); break; case UV_SYSTAB_TYPE_ARCH_TYPE: /* already processed in early startup */ break; default: pr_err("UV:%s:Unrecognized UV_SYSTAB_TYPE:%d, skipped\n", __func__, st->entry[i].type); break; } } return 0; } /* * Given a bitmask 'bits' representing presnt blades, numbered * starting at 'base', masking off unused high bits of blade number * with 'mask', update the minimum and maximum blade numbers that we * have found. (Masking with 'mask' necessary because of BIOS * treatment of system partitioning when creating this table we are * interpreting.) */ static inline void blade_update_min_max(unsigned long bits, int base, int mask, int *min, int *max) { int first, last; if (!bits) return; first = (base + __ffs(bits)) & mask; last = (base + __fls(bits)) & mask; if (*min > first) *min = first; if (*max < last) *max = last; } /* Set up physical blade translations from UVH_NODE_PRESENT_TABLE */ static __init void boot_init_possible_blades(struct uv_hub_info_s *hub_info) { unsigned long np; int i, uv_pb = 0; int sock_min = INT_MAX, sock_max = -1, s_mask; s_mask = (1 << uv_cpuid.n_skt) - 1; if (UVH_NODE_PRESENT_TABLE) { pr_info("UV: NODE_PRESENT_DEPTH = %d\n", UVH_NODE_PRESENT_TABLE_DEPTH); for (i = 0; i < UVH_NODE_PRESENT_TABLE_DEPTH; i++) { np = uv_read_local_mmr(UVH_NODE_PRESENT_TABLE + i * 8); pr_info("UV: NODE_PRESENT(%d) = 0x%016lx\n", i, np); blade_update_min_max(np, i * 64, s_mask, &sock_min, &sock_max); } } if (UVH_NODE_PRESENT_0) { np = uv_read_local_mmr(UVH_NODE_PRESENT_0); pr_info("UV: NODE_PRESENT_0 = 0x%016lx\n", np); blade_update_min_max(np, 0, s_mask, &sock_min, &sock_max); } if (UVH_NODE_PRESENT_1) { np = uv_read_local_mmr(UVH_NODE_PRESENT_1); pr_info("UV: NODE_PRESENT_1 = 0x%016lx\n", np); blade_update_min_max(np, 64, s_mask, &sock_min, &sock_max); } /* Only update if we actually found some bits indicating blades present */ if (sock_max >= sock_min) { _min_socket = sock_min; _max_socket = sock_max; uv_pb = sock_max - sock_min + 1; } if (uv_possible_blades != uv_pb) uv_possible_blades = uv_pb; pr_info("UV: number nodes/possible blades %d (%d - %d)\n", uv_pb, sock_min, sock_max); } static int __init alloc_conv_table(int num_elem, unsigned short **table) { int i; size_t bytes; bytes = num_elem * sizeof(*table[0]); *table = kmalloc(bytes, GFP_KERNEL); if (WARN_ON_ONCE(!*table)) return -ENOMEM; for (i = 0; i < num_elem; i++) ((unsigned short *)*table)[i] = SOCK_EMPTY; return 0; } /* Remove conversion table if it's 1:1 */ #define FREE_1_TO_1_TABLE(tbl, min, max, max2) free_1_to_1_table(&tbl, #tbl, min, max, max2) static void __init free_1_to_1_table(unsigned short **tp, char *tname, int min, int max, int max2) { int i; unsigned short *table = *tp; if (table == NULL) return; if (max != max2) return; for (i = 0; i < max; i++) { if (i != table[i]) return; } kfree(table); *tp = NULL; pr_info("UV: %s is 1:1, conversion table removed\n", tname); } /* * Build Socket Tables * If the number of nodes is >1 per socket, socket to node table will * contain lowest node number on that socket. */ static void __init build_socket_tables(void) { struct uv_gam_range_entry *gre = uv_gre_table; int nums, numn, nump; int i, lnid, apicid; int minsock = _min_socket; int maxsock = _max_socket; int minpnode = _min_pnode; int maxpnode = _max_pnode; if (!gre) { if (is_uv2_hub() || is_uv3_hub()) { pr_info("UV: No UVsystab socket table, ignoring\n"); return; } pr_err("UV: Error: UVsystab address translations not available!\n"); WARN_ON_ONCE(!gre); return; } numn = num_possible_nodes(); nump = maxpnode - minpnode + 1; nums = maxsock - minsock + 1; /* Allocate and clear tables */ if ((alloc_conv_table(nump, &_pnode_to_socket) < 0) || (alloc_conv_table(nums, &_socket_to_pnode) < 0) || (alloc_conv_table(numn, &_node_to_socket) < 0) || (alloc_conv_table(nums, &_socket_to_node) < 0)) { kfree(_pnode_to_socket); kfree(_socket_to_pnode); kfree(_node_to_socket); return; } /* Fill in pnode/node/addr conversion list values: */ for (; gre->type != UV_GAM_RANGE_TYPE_UNUSED; gre++) { if (gre->type == UV_GAM_RANGE_TYPE_HOLE) continue; i = gre->sockid - minsock; if (_socket_to_pnode[i] == SOCK_EMPTY) _socket_to_pnode[i] = gre->pnode; i = gre->pnode - minpnode; if (_pnode_to_socket[i] == SOCK_EMPTY) _pnode_to_socket[i] = gre->sockid; pr_info("UV: sid:%02x type:%d nasid:%04x pn:%02x pn2s:%2x\n", gre->sockid, gre->type, gre->nasid, _socket_to_pnode[gre->sockid - minsock], _pnode_to_socket[gre->pnode - minpnode]); } /* Set socket -> node values: */ lnid = NUMA_NO_NODE; for (apicid = 0; apicid < ARRAY_SIZE(__apicid_to_node); apicid++) { int nid = __apicid_to_node[apicid]; int sockid; if ((nid == NUMA_NO_NODE) || (lnid == nid)) continue; lnid = nid; sockid = apicid >> uv_cpuid.socketid_shift; if (_socket_to_node[sockid - minsock] == SOCK_EMPTY) _socket_to_node[sockid - minsock] = nid; if (_node_to_socket[nid] == SOCK_EMPTY) _node_to_socket[nid] = sockid; pr_info("UV: sid:%02x: apicid:%04x socket:%02d node:%03x s2n:%03x\n", sockid, apicid, _node_to_socket[nid], nid, _socket_to_node[sockid - minsock]); } /* * If e.g. socket id == pnode for all pnodes, * system runs faster by removing corresponding conversion table. */ FREE_1_TO_1_TABLE(_socket_to_node, _min_socket, nums, numn); FREE_1_TO_1_TABLE(_node_to_socket, _min_socket, nums, numn); FREE_1_TO_1_TABLE(_socket_to_pnode, _min_pnode, nums, nump); FREE_1_TO_1_TABLE(_pnode_to_socket, _min_pnode, nums, nump); } /* Check which reboot to use */ static void check_efi_reboot(void) { /* If EFI reboot not available, use ACPI reboot */ if (!efi_enabled(EFI_BOOT)) reboot_type = BOOT_ACPI; } /* * User proc fs file handling now deprecated. * Recommend using /sys/firmware/sgi_uv/... instead. */ static int __maybe_unused proc_hubbed_show(struct seq_file *file, void *data) { pr_notice_once("%s: using deprecated /proc/sgi_uv/hubbed, use /sys/firmware/sgi_uv/hub_type\n", current->comm); seq_printf(file, "0x%x\n", uv_hubbed_system); return 0; } static int __maybe_unused proc_hubless_show(struct seq_file *file, void *data) { pr_notice_once("%s: using deprecated /proc/sgi_uv/hubless, use /sys/firmware/sgi_uv/hubless\n", current->comm); seq_printf(file, "0x%x\n", uv_hubless_system); return 0; } static int __maybe_unused proc_archtype_show(struct seq_file *file, void *data) { pr_notice_once("%s: using deprecated /proc/sgi_uv/archtype, use /sys/firmware/sgi_uv/archtype\n", current->comm); seq_printf(file, "%s/%s\n", uv_archtype, oem_table_id); return 0; } static __init void uv_setup_proc_files(int hubless) { struct proc_dir_entry *pde; pde = proc_mkdir(UV_PROC_NODE, NULL); proc_create_single("archtype", 0, pde, proc_archtype_show); if (hubless) proc_create_single("hubless", 0, pde, proc_hubless_show); else proc_create_single("hubbed", 0, pde, proc_hubbed_show); } /* Initialize UV hubless systems */ static __init int uv_system_init_hubless(void) { int rc; /* Setup PCH NMI handler */ uv_nmi_setup_hubless(); /* Init kernel/BIOS interface */ rc = uv_bios_init(); if (rc < 0) return rc; /* Process UVsystab */ rc = decode_uv_systab(); if (rc < 0) return rc; /* Set section block size for current node memory */ set_block_size(); /* Create user access node */ if (rc >= 0) uv_setup_proc_files(1); check_efi_reboot(); return rc; } static void __init uv_system_init_hub(void) { struct uv_hub_info_s hub_info = {0}; int bytes, cpu, nodeid, bid; unsigned short min_pnode = USHRT_MAX, max_pnode = 0; char *hub = is_uv5_hub() ? "UV500" : is_uv4_hub() ? "UV400" : is_uv3_hub() ? "UV300" : is_uv2_hub() ? "UV2000/3000" : NULL; struct uv_hub_info_s **uv_hub_info_list_blade; if (!hub) { pr_err("UV: Unknown/unsupported UV hub\n"); return; } pr_info("UV: Found %s hub\n", hub); map_low_mmrs(); /* Get uv_systab for decoding, setup UV BIOS calls */ uv_bios_init(); /* If there's an UVsystab problem then abort UV init: */ if (decode_uv_systab() < 0) { pr_err("UV: Mangled UVsystab format\n"); return; } build_socket_tables(); build_uv_gr_table(); set_block_size(); uv_init_hub_info(&hub_info); /* If UV2 or UV3 may need to get # blades from HW */ if (is_uv(UV2|UV3) && !uv_gre_table) boot_init_possible_blades(&hub_info); else /* min/max sockets set in decode_gam_rng_tbl */ uv_possible_blades = (_max_socket - _min_socket) + 1; /* uv_num_possible_blades() is really the hub count: */ pr_info("UV: Found %d hubs, %d nodes, %d CPUs\n", uv_num_possible_blades(), num_possible_nodes(), num_possible_cpus()); uv_bios_get_sn_info(0, &uv_type, &sn_partition_id, &sn_coherency_id, &sn_region_size, &system_serial_number); hub_info.coherency_domain_number = sn_coherency_id; uv_rtc_init(); /* * __uv_hub_info_list[] is indexed by node, but there is only * one hub_info structure per blade. First, allocate one * structure per blade. Further down we create a per-node * table (__uv_hub_info_list[]) pointing to hub_info * structures for the correct blade. */ bytes = sizeof(void *) * uv_num_possible_blades(); uv_hub_info_list_blade = kzalloc(bytes, GFP_KERNEL); if (WARN_ON_ONCE(!uv_hub_info_list_blade)) return; bytes = sizeof(struct uv_hub_info_s); for_each_possible_blade(bid) { struct uv_hub_info_s *new_hub; /* Allocate & fill new per hub info list */ new_hub = (bid == 0) ? &uv_hub_info_node0 : kzalloc_node(bytes, GFP_KERNEL, uv_blade_to_node(bid)); if (WARN_ON_ONCE(!new_hub)) { /* do not kfree() bid 0, which is statically allocated */ while (--bid > 0) kfree(uv_hub_info_list_blade[bid]); kfree(uv_hub_info_list_blade); return; } uv_hub_info_list_blade[bid] = new_hub; *new_hub = hub_info; /* Use information from GAM table if available: */ if (uv_gre_table) new_hub->pnode = uv_blade_to_pnode(bid); else /* Or fill in during CPU loop: */ new_hub->pnode = 0xffff; new_hub->numa_blade_id = bid; new_hub->memory_nid = NUMA_NO_NODE; new_hub->nr_possible_cpus = 0; new_hub->nr_online_cpus = 0; } /* * Now populate __uv_hub_info_list[] for each node with the * pointer to the struct for the blade it resides on. */ bytes = sizeof(void *) * num_possible_nodes(); __uv_hub_info_list = kzalloc(bytes, GFP_KERNEL); if (WARN_ON_ONCE(!__uv_hub_info_list)) { for_each_possible_blade(bid) /* bid 0 is statically allocated */ if (bid != 0) kfree(uv_hub_info_list_blade[bid]); kfree(uv_hub_info_list_blade); return; } for_each_node(nodeid) __uv_hub_info_list[nodeid] = uv_hub_info_list_blade[uv_node_to_blade_id(nodeid)]; /* Initialize per CPU info: */ for_each_possible_cpu(cpu) { int apicid = per_cpu(x86_cpu_to_apicid, cpu); unsigned short bid; unsigned short pnode; pnode = uv_apicid_to_pnode(apicid); bid = uv_pnode_to_socket(pnode) - _min_socket; uv_cpu_info_per(cpu)->p_uv_hub_info = uv_hub_info_list_blade[bid]; uv_cpu_info_per(cpu)->blade_cpu_id = uv_cpu_hub_info(cpu)->nr_possible_cpus++; if (uv_cpu_hub_info(cpu)->memory_nid == NUMA_NO_NODE) uv_cpu_hub_info(cpu)->memory_nid = cpu_to_node(cpu); if (uv_cpu_hub_info(cpu)->pnode == 0xffff) uv_cpu_hub_info(cpu)->pnode = pnode; } for_each_possible_blade(bid) { unsigned short pnode = uv_hub_info_list_blade[bid]->pnode; if (pnode == 0xffff) continue; min_pnode = min(pnode, min_pnode); max_pnode = max(pnode, max_pnode); pr_info("UV: HUB:%2d pn:%02x nrcpus:%d\n", bid, uv_hub_info_list_blade[bid]->pnode, uv_hub_info_list_blade[bid]->nr_possible_cpus); } pr_info("UV: min_pnode:%02x max_pnode:%02x\n", min_pnode, max_pnode); map_gru_high(max_pnode); map_mmr_high(max_pnode); map_mmioh_high(min_pnode, max_pnode); kfree(uv_hub_info_list_blade); uv_hub_info_list_blade = NULL; uv_nmi_setup(); uv_cpu_init(); uv_setup_proc_files(0); /* Register Legacy VGA I/O redirection handler: */ pci_register_set_vga_state(uv_set_vga_state); check_efi_reboot(); } /* * There is a different code path needed to initialize a UV system that does * not have a "UV HUB" (referred to as "hubless"). */ void __init uv_system_init(void) { if (likely(!is_uv_system() && !is_uv_hubless(1))) return; if (is_uv_system()) uv_system_init_hub(); else uv_system_init_hubless(); } apic_driver(apic_x2apic_uv_x);
linux-master
arch/x86/kernel/apic/x2apic_uv_x.c
// SPDX-License-Identifier: GPL-2.0-only /* * Local APIC handling, local APIC timers * * (c) 1999, 2000, 2009 Ingo Molnar <[email protected]> * * Fixes * Maciej W. Rozycki : Bits for genuine 82489DX APICs; * thanks to Eric Gilmore * and Rolf G. Tews * for testing these extensively. * Maciej W. Rozycki : Various updates and fixes. * Mikael Pettersson : Power Management for UP-APIC. * Pavel Machek and * Mikael Pettersson : PM converted to driver model. */ #include <linux/perf_event.h> #include <linux/kernel_stat.h> #include <linux/mc146818rtc.h> #include <linux/acpi_pmtmr.h> #include <linux/clockchips.h> #include <linux/interrupt.h> #include <linux/memblock.h> #include <linux/ftrace.h> #include <linux/ioport.h> #include <linux/export.h> #include <linux/syscore_ops.h> #include <linux/delay.h> #include <linux/timex.h> #include <linux/i8253.h> #include <linux/dmar.h> #include <linux/init.h> #include <linux/cpu.h> #include <linux/dmi.h> #include <linux/smp.h> #include <linux/mm.h> #include <asm/trace/irq_vectors.h> #include <asm/irq_remapping.h> #include <asm/pc-conf-reg.h> #include <asm/perf_event.h> #include <asm/x86_init.h> #include <linux/atomic.h> #include <asm/barrier.h> #include <asm/mpspec.h> #include <asm/i8259.h> #include <asm/proto.h> #include <asm/traps.h> #include <asm/apic.h> #include <asm/acpi.h> #include <asm/io_apic.h> #include <asm/desc.h> #include <asm/hpet.h> #include <asm/mtrr.h> #include <asm/time.h> #include <asm/smp.h> #include <asm/mce.h> #include <asm/tsc.h> #include <asm/hypervisor.h> #include <asm/cpu_device_id.h> #include <asm/intel-family.h> #include <asm/irq_regs.h> #include <asm/cpu.h> #include "local.h" unsigned int num_processors; unsigned disabled_cpus; /* Processor that is doing the boot up */ unsigned int boot_cpu_physical_apicid __ro_after_init = -1U; EXPORT_SYMBOL_GPL(boot_cpu_physical_apicid); u8 boot_cpu_apic_version __ro_after_init; /* * Bitmask of physically existing CPUs: */ physid_mask_t phys_cpu_present_map; /* * Processor to be disabled specified by kernel parameter * disable_cpu_apicid=<int>, mostly used for the kdump 2nd kernel to * avoid undefined behaviour caused by sending INIT from AP to BSP. */ static unsigned int disabled_cpu_apicid __ro_after_init = BAD_APICID; /* * This variable controls which CPUs receive external NMIs. By default, * external NMIs are delivered only to the BSP. */ static int apic_extnmi __ro_after_init = APIC_EXTNMI_BSP; /* * Hypervisor supports 15 bits of APIC ID in MSI Extended Destination ID */ static bool virt_ext_dest_id __ro_after_init; /* For parallel bootup. */ unsigned long apic_mmio_base __ro_after_init; static inline bool apic_accessible(void) { return x2apic_mode || apic_mmio_base; } /* * Map cpu index to physical APIC ID */ DEFINE_EARLY_PER_CPU_READ_MOSTLY(u16, x86_cpu_to_apicid, BAD_APICID); DEFINE_EARLY_PER_CPU_READ_MOSTLY(u32, x86_cpu_to_acpiid, U32_MAX); EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_apicid); EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_acpiid); #ifdef CONFIG_X86_32 /* Local APIC was disabled by the BIOS and enabled by the kernel */ static int enabled_via_apicbase __ro_after_init; /* * Handle interrupt mode configuration register (IMCR). * This register controls whether the interrupt signals * that reach the BSP come from the master PIC or from the * local APIC. Before entering Symmetric I/O Mode, either * the BIOS or the operating system must switch out of * PIC Mode by changing the IMCR. */ static inline void imcr_pic_to_apic(void) { /* NMI and 8259 INTR go through APIC */ pc_conf_set(PC_CONF_MPS_IMCR, 0x01); } static inline void imcr_apic_to_pic(void) { /* NMI and 8259 INTR go directly to BSP */ pc_conf_set(PC_CONF_MPS_IMCR, 0x00); } #endif /* * Knob to control our willingness to enable the local APIC. * * +1=force-enable */ static int force_enable_local_apic __initdata; /* * APIC command line parameters */ static int __init parse_lapic(char *arg) { if (IS_ENABLED(CONFIG_X86_32) && !arg) force_enable_local_apic = 1; else if (arg && !strncmp(arg, "notscdeadline", 13)) setup_clear_cpu_cap(X86_FEATURE_TSC_DEADLINE_TIMER); return 0; } early_param("lapic", parse_lapic); #ifdef CONFIG_X86_64 static int apic_calibrate_pmtmr __initdata; static __init int setup_apicpmtimer(char *s) { apic_calibrate_pmtmr = 1; notsc_setup(NULL); return 1; } __setup("apicpmtimer", setup_apicpmtimer); #endif static unsigned long mp_lapic_addr __ro_after_init; bool apic_is_disabled __ro_after_init; /* Disable local APIC timer from the kernel commandline or via dmi quirk */ static int disable_apic_timer __initdata; /* Local APIC timer works in C2 */ int local_apic_timer_c2_ok __ro_after_init; EXPORT_SYMBOL_GPL(local_apic_timer_c2_ok); /* * Debug level, exported for io_apic.c */ int apic_verbosity __ro_after_init; int pic_mode __ro_after_init; /* Have we found an MP table */ int smp_found_config __ro_after_init; static struct resource lapic_resource = { .name = "Local APIC", .flags = IORESOURCE_MEM | IORESOURCE_BUSY, }; unsigned int lapic_timer_period = 0; static void apic_pm_activate(void); /* * Get the LAPIC version */ static inline int lapic_get_version(void) { return GET_APIC_VERSION(apic_read(APIC_LVR)); } /* * Check, if the APIC is integrated or a separate chip */ static inline int lapic_is_integrated(void) { return APIC_INTEGRATED(lapic_get_version()); } /* * Check, whether this is a modern or a first generation APIC */ static int modern_apic(void) { /* AMD systems use old APIC versions, so check the CPU */ if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD && boot_cpu_data.x86 >= 0xf) return 1; /* Hygon systems use modern APIC */ if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) return 1; return lapic_get_version() >= 0x14; } /* * right after this call apic become NOOP driven * so apic->write/read doesn't do anything */ static void __init apic_disable(void) { apic_install_driver(&apic_noop); } void native_apic_icr_write(u32 low, u32 id) { unsigned long flags; local_irq_save(flags); apic_write(APIC_ICR2, SET_XAPIC_DEST_FIELD(id)); apic_write(APIC_ICR, low); local_irq_restore(flags); } u64 native_apic_icr_read(void) { u32 icr1, icr2; icr2 = apic_read(APIC_ICR2); icr1 = apic_read(APIC_ICR); return icr1 | ((u64)icr2 << 32); } #ifdef CONFIG_X86_32 /** * get_physical_broadcast - Get number of physical broadcast IDs */ int get_physical_broadcast(void) { return modern_apic() ? 0xff : 0xf; } #endif /** * lapic_get_maxlvt - get the maximum number of local vector table entries */ int lapic_get_maxlvt(void) { /* * - we always have APIC integrated on 64bit mode * - 82489DXs do not report # of LVT entries */ return lapic_is_integrated() ? GET_APIC_MAXLVT(apic_read(APIC_LVR)) : 2; } /* * Local APIC timer */ /* Clock divisor */ #define APIC_DIVISOR 16 #define TSC_DIVISOR 8 /* i82489DX specific */ #define I82489DX_BASE_DIVIDER (((0x2) << 18)) /* * This function sets up the local APIC timer, with a timeout of * 'clocks' APIC bus clock. During calibration we actually call * this function twice on the boot CPU, once with a bogus timeout * value, second time for real. The other (noncalibrating) CPUs * call this function only once, with the real, calibrated value. * * We do reads before writes even if unnecessary, to get around the * P5 APIC double write bug. */ static void __setup_APIC_LVTT(unsigned int clocks, int oneshot, int irqen) { unsigned int lvtt_value, tmp_value; lvtt_value = LOCAL_TIMER_VECTOR; if (!oneshot) lvtt_value |= APIC_LVT_TIMER_PERIODIC; else if (boot_cpu_has(X86_FEATURE_TSC_DEADLINE_TIMER)) lvtt_value |= APIC_LVT_TIMER_TSCDEADLINE; /* * The i82489DX APIC uses bit 18 and 19 for the base divider. This * overlaps with bit 18 on integrated APICs, but is not documented * in the SDM. No problem though. i82489DX equipped systems do not * have TSC deadline timer. */ if (!lapic_is_integrated()) lvtt_value |= I82489DX_BASE_DIVIDER; if (!irqen) lvtt_value |= APIC_LVT_MASKED; apic_write(APIC_LVTT, lvtt_value); if (lvtt_value & APIC_LVT_TIMER_TSCDEADLINE) { /* * See Intel SDM: TSC-Deadline Mode chapter. In xAPIC mode, * writing to the APIC LVTT and TSC_DEADLINE MSR isn't serialized. * According to Intel, MFENCE can do the serialization here. */ asm volatile("mfence" : : : "memory"); return; } /* * Divide PICLK by 16 */ tmp_value = apic_read(APIC_TDCR); apic_write(APIC_TDCR, (tmp_value & ~(APIC_TDR_DIV_1 | APIC_TDR_DIV_TMBASE)) | APIC_TDR_DIV_16); if (!oneshot) apic_write(APIC_TMICT, clocks / APIC_DIVISOR); } /* * Setup extended LVT, AMD specific * * Software should use the LVT offsets the BIOS provides. The offsets * are determined by the subsystems using it like those for MCE * threshold or IBS. On K8 only offset 0 (APIC500) and MCE interrupts * are supported. Beginning with family 10h at least 4 offsets are * available. * * Since the offsets must be consistent for all cores, we keep track * of the LVT offsets in software and reserve the offset for the same * vector also to be used on other cores. An offset is freed by * setting the entry to APIC_EILVT_MASKED. * * If the BIOS is right, there should be no conflicts. Otherwise a * "[Firmware Bug]: ..." error message is generated. However, if * software does not properly determines the offsets, it is not * necessarily a BIOS bug. */ static atomic_t eilvt_offsets[APIC_EILVT_NR_MAX]; static inline int eilvt_entry_is_changeable(unsigned int old, unsigned int new) { return (old & APIC_EILVT_MASKED) || (new == APIC_EILVT_MASKED) || ((new & ~APIC_EILVT_MASKED) == old); } static unsigned int reserve_eilvt_offset(int offset, unsigned int new) { unsigned int rsvd, vector; if (offset >= APIC_EILVT_NR_MAX) return ~0; rsvd = atomic_read(&eilvt_offsets[offset]); do { vector = rsvd & ~APIC_EILVT_MASKED; /* 0: unassigned */ if (vector && !eilvt_entry_is_changeable(vector, new)) /* may not change if vectors are different */ return rsvd; } while (!atomic_try_cmpxchg(&eilvt_offsets[offset], &rsvd, new)); rsvd = new & ~APIC_EILVT_MASKED; if (rsvd && rsvd != vector) pr_info("LVT offset %d assigned for vector 0x%02x\n", offset, rsvd); return new; } /* * If mask=1, the LVT entry does not generate interrupts while mask=0 * enables the vector. See also the BKDGs. Must be called with * preemption disabled. */ int setup_APIC_eilvt(u8 offset, u8 vector, u8 msg_type, u8 mask) { unsigned long reg = APIC_EILVTn(offset); unsigned int new, old, reserved; new = (mask << 16) | (msg_type << 8) | vector; old = apic_read(reg); reserved = reserve_eilvt_offset(offset, new); if (reserved != new) { pr_err(FW_BUG "cpu %d, try to use APIC%lX (LVT offset %d) for " "vector 0x%x, but the register is already in use for " "vector 0x%x on another cpu\n", smp_processor_id(), reg, offset, new, reserved); return -EINVAL; } if (!eilvt_entry_is_changeable(old, new)) { pr_err(FW_BUG "cpu %d, try to use APIC%lX (LVT offset %d) for " "vector 0x%x, but the register is already in use for " "vector 0x%x on this cpu\n", smp_processor_id(), reg, offset, new, old); return -EBUSY; } apic_write(reg, new); return 0; } EXPORT_SYMBOL_GPL(setup_APIC_eilvt); /* * Program the next event, relative to now */ static int lapic_next_event(unsigned long delta, struct clock_event_device *evt) { apic_write(APIC_TMICT, delta); return 0; } static int lapic_next_deadline(unsigned long delta, struct clock_event_device *evt) { u64 tsc; /* This MSR is special and need a special fence: */ weak_wrmsr_fence(); tsc = rdtsc(); wrmsrl(MSR_IA32_TSC_DEADLINE, tsc + (((u64) delta) * TSC_DIVISOR)); return 0; } static int lapic_timer_shutdown(struct clock_event_device *evt) { unsigned int v; /* Lapic used as dummy for broadcast ? */ if (evt->features & CLOCK_EVT_FEAT_DUMMY) return 0; v = apic_read(APIC_LVTT); v |= (APIC_LVT_MASKED | LOCAL_TIMER_VECTOR); apic_write(APIC_LVTT, v); apic_write(APIC_TMICT, 0); return 0; } static inline int lapic_timer_set_periodic_oneshot(struct clock_event_device *evt, bool oneshot) { /* Lapic used as dummy for broadcast ? */ if (evt->features & CLOCK_EVT_FEAT_DUMMY) return 0; __setup_APIC_LVTT(lapic_timer_period, oneshot, 1); return 0; } static int lapic_timer_set_periodic(struct clock_event_device *evt) { return lapic_timer_set_periodic_oneshot(evt, false); } static int lapic_timer_set_oneshot(struct clock_event_device *evt) { return lapic_timer_set_periodic_oneshot(evt, true); } /* * Local APIC timer broadcast function */ static void lapic_timer_broadcast(const struct cpumask *mask) { #ifdef CONFIG_SMP __apic_send_IPI_mask(mask, LOCAL_TIMER_VECTOR); #endif } /* * The local apic timer can be used for any function which is CPU local. */ static struct clock_event_device lapic_clockevent = { .name = "lapic", .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_C3STOP | CLOCK_EVT_FEAT_DUMMY, .shift = 32, .set_state_shutdown = lapic_timer_shutdown, .set_state_periodic = lapic_timer_set_periodic, .set_state_oneshot = lapic_timer_set_oneshot, .set_state_oneshot_stopped = lapic_timer_shutdown, .set_next_event = lapic_next_event, .broadcast = lapic_timer_broadcast, .rating = 100, .irq = -1, }; static DEFINE_PER_CPU(struct clock_event_device, lapic_events); static const struct x86_cpu_id deadline_match[] __initconst = { X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(HASWELL_X, X86_STEPPINGS(0x2, 0x2), 0x3a), /* EP */ X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(HASWELL_X, X86_STEPPINGS(0x4, 0x4), 0x0f), /* EX */ X86_MATCH_INTEL_FAM6_MODEL( BROADWELL_X, 0x0b000020), X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(BROADWELL_D, X86_STEPPINGS(0x2, 0x2), 0x00000011), X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(BROADWELL_D, X86_STEPPINGS(0x3, 0x3), 0x0700000e), X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(BROADWELL_D, X86_STEPPINGS(0x4, 0x4), 0x0f00000c), X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(BROADWELL_D, X86_STEPPINGS(0x5, 0x5), 0x0e000003), X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(SKYLAKE_X, X86_STEPPINGS(0x3, 0x3), 0x01000136), X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(SKYLAKE_X, X86_STEPPINGS(0x4, 0x4), 0x02000014), X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(SKYLAKE_X, X86_STEPPINGS(0x5, 0xf), 0), X86_MATCH_INTEL_FAM6_MODEL( HASWELL, 0x22), X86_MATCH_INTEL_FAM6_MODEL( HASWELL_L, 0x20), X86_MATCH_INTEL_FAM6_MODEL( HASWELL_G, 0x17), X86_MATCH_INTEL_FAM6_MODEL( BROADWELL, 0x25), X86_MATCH_INTEL_FAM6_MODEL( BROADWELL_G, 0x17), X86_MATCH_INTEL_FAM6_MODEL( SKYLAKE_L, 0xb2), X86_MATCH_INTEL_FAM6_MODEL( SKYLAKE, 0xb2), X86_MATCH_INTEL_FAM6_MODEL( KABYLAKE_L, 0x52), X86_MATCH_INTEL_FAM6_MODEL( KABYLAKE, 0x52), {}, }; static __init bool apic_validate_deadline_timer(void) { const struct x86_cpu_id *m; u32 rev; if (!boot_cpu_has(X86_FEATURE_TSC_DEADLINE_TIMER)) return false; if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) return true; m = x86_match_cpu(deadline_match); if (!m) return true; rev = (u32)m->driver_data; if (boot_cpu_data.microcode >= rev) return true; setup_clear_cpu_cap(X86_FEATURE_TSC_DEADLINE_TIMER); pr_err(FW_BUG "TSC_DEADLINE disabled due to Errata; " "please update microcode to version: 0x%x (or later)\n", rev); return false; } /* * Setup the local APIC timer for this CPU. Copy the initialized values * of the boot CPU and register the clock event in the framework. */ static void setup_APIC_timer(void) { struct clock_event_device *levt = this_cpu_ptr(&lapic_events); if (this_cpu_has(X86_FEATURE_ARAT)) { lapic_clockevent.features &= ~CLOCK_EVT_FEAT_C3STOP; /* Make LAPIC timer preferable over percpu HPET */ lapic_clockevent.rating = 150; } memcpy(levt, &lapic_clockevent, sizeof(*levt)); levt->cpumask = cpumask_of(smp_processor_id()); if (this_cpu_has(X86_FEATURE_TSC_DEADLINE_TIMER)) { levt->name = "lapic-deadline"; levt->features &= ~(CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_DUMMY); levt->set_next_event = lapic_next_deadline; clockevents_config_and_register(levt, tsc_khz * (1000 / TSC_DIVISOR), 0xF, ~0UL); } else clockevents_register_device(levt); } /* * Install the updated TSC frequency from recalibration at the TSC * deadline clockevent devices. */ static void __lapic_update_tsc_freq(void *info) { struct clock_event_device *levt = this_cpu_ptr(&lapic_events); if (!this_cpu_has(X86_FEATURE_TSC_DEADLINE_TIMER)) return; clockevents_update_freq(levt, tsc_khz * (1000 / TSC_DIVISOR)); } void lapic_update_tsc_freq(void) { /* * The clockevent device's ->mult and ->shift can both be * changed. In order to avoid races, schedule the frequency * update code on each CPU. */ on_each_cpu(__lapic_update_tsc_freq, NULL, 0); } /* * In this functions we calibrate APIC bus clocks to the external timer. * * We want to do the calibration only once since we want to have local timer * irqs synchronous. CPUs connected by the same APIC bus have the very same bus * frequency. * * This was previously done by reading the PIT/HPET and waiting for a wrap * around to find out, that a tick has elapsed. I have a box, where the PIT * readout is broken, so it never gets out of the wait loop again. This was * also reported by others. * * Monitoring the jiffies value is inaccurate and the clockevents * infrastructure allows us to do a simple substitution of the interrupt * handler. * * The calibration routine also uses the pm_timer when possible, as the PIT * happens to run way too slow (factor 2.3 on my VAIO CoreDuo, which goes * back to normal later in the boot process). */ #define LAPIC_CAL_LOOPS (HZ/10) static __initdata int lapic_cal_loops = -1; static __initdata long lapic_cal_t1, lapic_cal_t2; static __initdata unsigned long long lapic_cal_tsc1, lapic_cal_tsc2; static __initdata unsigned long lapic_cal_pm1, lapic_cal_pm2; static __initdata unsigned long lapic_cal_j1, lapic_cal_j2; /* * Temporary interrupt handler and polled calibration function. */ static void __init lapic_cal_handler(struct clock_event_device *dev) { unsigned long long tsc = 0; long tapic = apic_read(APIC_TMCCT); unsigned long pm = acpi_pm_read_early(); if (boot_cpu_has(X86_FEATURE_TSC)) tsc = rdtsc(); switch (lapic_cal_loops++) { case 0: lapic_cal_t1 = tapic; lapic_cal_tsc1 = tsc; lapic_cal_pm1 = pm; lapic_cal_j1 = jiffies; break; case LAPIC_CAL_LOOPS: lapic_cal_t2 = tapic; lapic_cal_tsc2 = tsc; if (pm < lapic_cal_pm1) pm += ACPI_PM_OVRRUN; lapic_cal_pm2 = pm; lapic_cal_j2 = jiffies; break; } } static int __init calibrate_by_pmtimer(long deltapm, long *delta, long *deltatsc) { const long pm_100ms = PMTMR_TICKS_PER_SEC / 10; const long pm_thresh = pm_100ms / 100; unsigned long mult; u64 res; #ifndef CONFIG_X86_PM_TIMER return -1; #endif apic_printk(APIC_VERBOSE, "... PM-Timer delta = %ld\n", deltapm); /* Check, if the PM timer is available */ if (!deltapm) return -1; mult = clocksource_hz2mult(PMTMR_TICKS_PER_SEC, 22); if (deltapm > (pm_100ms - pm_thresh) && deltapm < (pm_100ms + pm_thresh)) { apic_printk(APIC_VERBOSE, "... PM-Timer result ok\n"); return 0; } res = (((u64)deltapm) * mult) >> 22; do_div(res, 1000000); pr_warn("APIC calibration not consistent " "with PM-Timer: %ldms instead of 100ms\n", (long)res); /* Correct the lapic counter value */ res = (((u64)(*delta)) * pm_100ms); do_div(res, deltapm); pr_info("APIC delta adjusted to PM-Timer: " "%lu (%ld)\n", (unsigned long)res, *delta); *delta = (long)res; /* Correct the tsc counter value */ if (boot_cpu_has(X86_FEATURE_TSC)) { res = (((u64)(*deltatsc)) * pm_100ms); do_div(res, deltapm); apic_printk(APIC_VERBOSE, "TSC delta adjusted to " "PM-Timer: %lu (%ld)\n", (unsigned long)res, *deltatsc); *deltatsc = (long)res; } return 0; } static int __init lapic_init_clockevent(void) { if (!lapic_timer_period) return -1; /* Calculate the scaled math multiplication factor */ lapic_clockevent.mult = div_sc(lapic_timer_period/APIC_DIVISOR, TICK_NSEC, lapic_clockevent.shift); lapic_clockevent.max_delta_ns = clockevent_delta2ns(0x7FFFFFFF, &lapic_clockevent); lapic_clockevent.max_delta_ticks = 0x7FFFFFFF; lapic_clockevent.min_delta_ns = clockevent_delta2ns(0xF, &lapic_clockevent); lapic_clockevent.min_delta_ticks = 0xF; return 0; } bool __init apic_needs_pit(void) { /* * If the frequencies are not known, PIT is required for both TSC * and apic timer calibration. */ if (!tsc_khz || !cpu_khz) return true; /* Is there an APIC at all or is it disabled? */ if (!boot_cpu_has(X86_FEATURE_APIC) || apic_is_disabled) return true; /* * If interrupt delivery mode is legacy PIC or virtual wire without * configuration, the local APIC timer wont be set up. Make sure * that the PIT is initialized. */ if (apic_intr_mode == APIC_PIC || apic_intr_mode == APIC_VIRTUAL_WIRE_NO_CONFIG) return true; /* Virt guests may lack ARAT, but still have DEADLINE */ if (!boot_cpu_has(X86_FEATURE_ARAT)) return true; /* Deadline timer is based on TSC so no further PIT action required */ if (boot_cpu_has(X86_FEATURE_TSC_DEADLINE_TIMER)) return false; /* APIC timer disabled? */ if (disable_apic_timer) return true; /* * The APIC timer frequency is known already, no PIT calibration * required. If unknown, let the PIT be initialized. */ return lapic_timer_period == 0; } static int __init calibrate_APIC_clock(void) { struct clock_event_device *levt = this_cpu_ptr(&lapic_events); u64 tsc_perj = 0, tsc_start = 0; unsigned long jif_start; unsigned long deltaj; long delta, deltatsc; int pm_referenced = 0; if (boot_cpu_has(X86_FEATURE_TSC_DEADLINE_TIMER)) return 0; /* * Check if lapic timer has already been calibrated by platform * specific routine, such as tsc calibration code. If so just fill * in the clockevent structure and return. */ if (!lapic_init_clockevent()) { apic_printk(APIC_VERBOSE, "lapic timer already calibrated %d\n", lapic_timer_period); /* * Direct calibration methods must have an always running * local APIC timer, no need for broadcast timer. */ lapic_clockevent.features &= ~CLOCK_EVT_FEAT_DUMMY; return 0; } apic_printk(APIC_VERBOSE, "Using local APIC timer interrupts.\n" "calibrating APIC timer ...\n"); /* * There are platforms w/o global clockevent devices. Instead of * making the calibration conditional on that, use a polling based * approach everywhere. */ local_irq_disable(); /* * Setup the APIC counter to maximum. There is no way the lapic * can underflow in the 100ms detection time frame */ __setup_APIC_LVTT(0xffffffff, 0, 0); /* * Methods to terminate the calibration loop: * 1) Global clockevent if available (jiffies) * 2) TSC if available and frequency is known */ jif_start = READ_ONCE(jiffies); if (tsc_khz) { tsc_start = rdtsc(); tsc_perj = div_u64((u64)tsc_khz * 1000, HZ); } /* * Enable interrupts so the tick can fire, if a global * clockevent device is available */ local_irq_enable(); while (lapic_cal_loops <= LAPIC_CAL_LOOPS) { /* Wait for a tick to elapse */ while (1) { if (tsc_khz) { u64 tsc_now = rdtsc(); if ((tsc_now - tsc_start) >= tsc_perj) { tsc_start += tsc_perj; break; } } else { unsigned long jif_now = READ_ONCE(jiffies); if (time_after(jif_now, jif_start)) { jif_start = jif_now; break; } } cpu_relax(); } /* Invoke the calibration routine */ local_irq_disable(); lapic_cal_handler(NULL); local_irq_enable(); } local_irq_disable(); /* Build delta t1-t2 as apic timer counts down */ delta = lapic_cal_t1 - lapic_cal_t2; apic_printk(APIC_VERBOSE, "... lapic delta = %ld\n", delta); deltatsc = (long)(lapic_cal_tsc2 - lapic_cal_tsc1); /* we trust the PM based calibration if possible */ pm_referenced = !calibrate_by_pmtimer(lapic_cal_pm2 - lapic_cal_pm1, &delta, &deltatsc); lapic_timer_period = (delta * APIC_DIVISOR) / LAPIC_CAL_LOOPS; lapic_init_clockevent(); apic_printk(APIC_VERBOSE, "..... delta %ld\n", delta); apic_printk(APIC_VERBOSE, "..... mult: %u\n", lapic_clockevent.mult); apic_printk(APIC_VERBOSE, "..... calibration result: %u\n", lapic_timer_period); if (boot_cpu_has(X86_FEATURE_TSC)) { apic_printk(APIC_VERBOSE, "..... CPU clock speed is " "%ld.%04ld MHz.\n", (deltatsc / LAPIC_CAL_LOOPS) / (1000000 / HZ), (deltatsc / LAPIC_CAL_LOOPS) % (1000000 / HZ)); } apic_printk(APIC_VERBOSE, "..... host bus clock speed is " "%u.%04u MHz.\n", lapic_timer_period / (1000000 / HZ), lapic_timer_period % (1000000 / HZ)); /* * Do a sanity check on the APIC calibration result */ if (lapic_timer_period < (1000000 / HZ)) { local_irq_enable(); pr_warn("APIC frequency too slow, disabling apic timer\n"); return -1; } levt->features &= ~CLOCK_EVT_FEAT_DUMMY; /* * PM timer calibration failed or not turned on so lets try APIC * timer based calibration, if a global clockevent device is * available. */ if (!pm_referenced && global_clock_event) { apic_printk(APIC_VERBOSE, "... verify APIC timer\n"); /* * Setup the apic timer manually */ levt->event_handler = lapic_cal_handler; lapic_timer_set_periodic(levt); lapic_cal_loops = -1; /* Let the interrupts run */ local_irq_enable(); while (lapic_cal_loops <= LAPIC_CAL_LOOPS) cpu_relax(); /* Stop the lapic timer */ local_irq_disable(); lapic_timer_shutdown(levt); /* Jiffies delta */ deltaj = lapic_cal_j2 - lapic_cal_j1; apic_printk(APIC_VERBOSE, "... jiffies delta = %lu\n", deltaj); /* Check, if the jiffies result is consistent */ if (deltaj >= LAPIC_CAL_LOOPS-2 && deltaj <= LAPIC_CAL_LOOPS+2) apic_printk(APIC_VERBOSE, "... jiffies result ok\n"); else levt->features |= CLOCK_EVT_FEAT_DUMMY; } local_irq_enable(); if (levt->features & CLOCK_EVT_FEAT_DUMMY) { pr_warn("APIC timer disabled due to verification failure\n"); return -1; } return 0; } /* * Setup the boot APIC * * Calibrate and verify the result. */ void __init setup_boot_APIC_clock(void) { /* * The local apic timer can be disabled via the kernel * commandline or from the CPU detection code. Register the lapic * timer as a dummy clock event source on SMP systems, so the * broadcast mechanism is used. On UP systems simply ignore it. */ if (disable_apic_timer) { pr_info("Disabling APIC timer\n"); /* No broadcast on UP ! */ if (num_possible_cpus() > 1) { lapic_clockevent.mult = 1; setup_APIC_timer(); } return; } if (calibrate_APIC_clock()) { /* No broadcast on UP ! */ if (num_possible_cpus() > 1) setup_APIC_timer(); return; } /* * If nmi_watchdog is set to IO_APIC, we need the * PIT/HPET going. Otherwise register lapic as a dummy * device. */ lapic_clockevent.features &= ~CLOCK_EVT_FEAT_DUMMY; /* Setup the lapic or request the broadcast */ setup_APIC_timer(); amd_e400_c1e_apic_setup(); } void setup_secondary_APIC_clock(void) { setup_APIC_timer(); amd_e400_c1e_apic_setup(); } /* * The guts of the apic timer interrupt */ static void local_apic_timer_interrupt(void) { struct clock_event_device *evt = this_cpu_ptr(&lapic_events); /* * Normally we should not be here till LAPIC has been initialized but * in some cases like kdump, its possible that there is a pending LAPIC * timer interrupt from previous kernel's context and is delivered in * new kernel the moment interrupts are enabled. * * Interrupts are enabled early and LAPIC is setup much later, hence * its possible that when we get here evt->event_handler is NULL. * Check for event_handler being NULL and discard the interrupt as * spurious. */ if (!evt->event_handler) { pr_warn("Spurious LAPIC timer interrupt on cpu %d\n", smp_processor_id()); /* Switch it off */ lapic_timer_shutdown(evt); return; } /* * the NMI deadlock-detector uses this. */ inc_irq_stat(apic_timer_irqs); evt->event_handler(evt); } /* * Local APIC timer interrupt. This is the most natural way for doing * local interrupts, but local timer interrupts can be emulated by * broadcast interrupts too. [in case the hw doesn't support APIC timers] * * [ if a single-CPU system runs an SMP kernel then we call the local * interrupt as well. Thus we cannot inline the local irq ... ] */ DEFINE_IDTENTRY_SYSVEC(sysvec_apic_timer_interrupt) { struct pt_regs *old_regs = set_irq_regs(regs); apic_eoi(); trace_local_timer_entry(LOCAL_TIMER_VECTOR); local_apic_timer_interrupt(); trace_local_timer_exit(LOCAL_TIMER_VECTOR); set_irq_regs(old_regs); } /* * Local APIC start and shutdown */ /** * clear_local_APIC - shutdown the local APIC * * This is called, when a CPU is disabled and before rebooting, so the state of * the local APIC has no dangling leftovers. Also used to cleanout any BIOS * leftovers during boot. */ void clear_local_APIC(void) { int maxlvt; u32 v; if (!apic_accessible()) return; maxlvt = lapic_get_maxlvt(); /* * Masking an LVT entry can trigger a local APIC error * if the vector is zero. Mask LVTERR first to prevent this. */ if (maxlvt >= 3) { v = ERROR_APIC_VECTOR; /* any non-zero vector will do */ apic_write(APIC_LVTERR, v | APIC_LVT_MASKED); } /* * Careful: we have to set masks only first to deassert * any level-triggered sources. */ v = apic_read(APIC_LVTT); apic_write(APIC_LVTT, v | APIC_LVT_MASKED); v = apic_read(APIC_LVT0); apic_write(APIC_LVT0, v | APIC_LVT_MASKED); v = apic_read(APIC_LVT1); apic_write(APIC_LVT1, v | APIC_LVT_MASKED); if (maxlvt >= 4) { v = apic_read(APIC_LVTPC); apic_write(APIC_LVTPC, v | APIC_LVT_MASKED); } /* lets not touch this if we didn't frob it */ #ifdef CONFIG_X86_THERMAL_VECTOR if (maxlvt >= 5) { v = apic_read(APIC_LVTTHMR); apic_write(APIC_LVTTHMR, v | APIC_LVT_MASKED); } #endif #ifdef CONFIG_X86_MCE_INTEL if (maxlvt >= 6) { v = apic_read(APIC_LVTCMCI); if (!(v & APIC_LVT_MASKED)) apic_write(APIC_LVTCMCI, v | APIC_LVT_MASKED); } #endif /* * Clean APIC state for other OSs: */ apic_write(APIC_LVTT, APIC_LVT_MASKED); apic_write(APIC_LVT0, APIC_LVT_MASKED); apic_write(APIC_LVT1, APIC_LVT_MASKED); if (maxlvt >= 3) apic_write(APIC_LVTERR, APIC_LVT_MASKED); if (maxlvt >= 4) apic_write(APIC_LVTPC, APIC_LVT_MASKED); /* Integrated APIC (!82489DX) ? */ if (lapic_is_integrated()) { if (maxlvt > 3) /* Clear ESR due to Pentium errata 3AP and 11AP */ apic_write(APIC_ESR, 0); apic_read(APIC_ESR); } } /** * apic_soft_disable - Clears and software disables the local APIC on hotplug * * Contrary to disable_local_APIC() this does not touch the enable bit in * MSR_IA32_APICBASE. Clearing that bit on systems based on the 3 wire APIC * bus would require a hardware reset as the APIC would lose track of bus * arbitration. On systems with FSB delivery APICBASE could be disabled, * but it has to be guaranteed that no interrupt is sent to the APIC while * in that state and it's not clear from the SDM whether it still responds * to INIT/SIPI messages. Stay on the safe side and use software disable. */ void apic_soft_disable(void) { u32 value; clear_local_APIC(); /* Soft disable APIC (implies clearing of registers for 82489DX!). */ value = apic_read(APIC_SPIV); value &= ~APIC_SPIV_APIC_ENABLED; apic_write(APIC_SPIV, value); } /** * disable_local_APIC - clear and disable the local APIC */ void disable_local_APIC(void) { if (!apic_accessible()) return; apic_soft_disable(); #ifdef CONFIG_X86_32 /* * When LAPIC was disabled by the BIOS and enabled by the kernel, * restore the disabled state. */ if (enabled_via_apicbase) { unsigned int l, h; rdmsr(MSR_IA32_APICBASE, l, h); l &= ~MSR_IA32_APICBASE_ENABLE; wrmsr(MSR_IA32_APICBASE, l, h); } #endif } /* * If Linux enabled the LAPIC against the BIOS default disable it down before * re-entering the BIOS on shutdown. Otherwise the BIOS may get confused and * not power-off. Additionally clear all LVT entries before disable_local_APIC * for the case where Linux didn't enable the LAPIC. */ void lapic_shutdown(void) { unsigned long flags; if (!boot_cpu_has(X86_FEATURE_APIC) && !apic_from_smp_config()) return; local_irq_save(flags); #ifdef CONFIG_X86_32 if (!enabled_via_apicbase) clear_local_APIC(); else #endif disable_local_APIC(); local_irq_restore(flags); } /** * sync_Arb_IDs - synchronize APIC bus arbitration IDs */ void __init sync_Arb_IDs(void) { /* * Unsupported on P4 - see Intel Dev. Manual Vol. 3, Ch. 8.6.1 And not * needed on AMD. */ if (modern_apic() || boot_cpu_data.x86_vendor == X86_VENDOR_AMD) return; /* * Wait for idle. */ apic_wait_icr_idle(); apic_printk(APIC_DEBUG, "Synchronizing Arb IDs.\n"); apic_write(APIC_ICR, APIC_DEST_ALLINC | APIC_INT_LEVELTRIG | APIC_DM_INIT); } enum apic_intr_mode_id apic_intr_mode __ro_after_init; static int __init __apic_intr_mode_select(void) { /* Check kernel option */ if (apic_is_disabled) { pr_info("APIC disabled via kernel command line\n"); return APIC_PIC; } /* Check BIOS */ #ifdef CONFIG_X86_64 /* On 64-bit, the APIC must be integrated, Check local APIC only */ if (!boot_cpu_has(X86_FEATURE_APIC)) { apic_is_disabled = true; pr_info("APIC disabled by BIOS\n"); return APIC_PIC; } #else /* On 32-bit, the APIC may be integrated APIC or 82489DX */ /* Neither 82489DX nor integrated APIC ? */ if (!boot_cpu_has(X86_FEATURE_APIC) && !smp_found_config) { apic_is_disabled = true; return APIC_PIC; } /* If the BIOS pretends there is an integrated APIC ? */ if (!boot_cpu_has(X86_FEATURE_APIC) && APIC_INTEGRATED(boot_cpu_apic_version)) { apic_is_disabled = true; pr_err(FW_BUG "Local APIC not detected, force emulation\n"); return APIC_PIC; } #endif /* Check MP table or ACPI MADT configuration */ if (!smp_found_config) { disable_ioapic_support(); if (!acpi_lapic) { pr_info("APIC: ACPI MADT or MP tables are not detected\n"); return APIC_VIRTUAL_WIRE_NO_CONFIG; } return APIC_VIRTUAL_WIRE; } #ifdef CONFIG_SMP /* If SMP should be disabled, then really disable it! */ if (!setup_max_cpus) { pr_info("APIC: SMP mode deactivated\n"); return APIC_SYMMETRIC_IO_NO_ROUTING; } #endif return APIC_SYMMETRIC_IO; } /* Select the interrupt delivery mode for the BSP */ void __init apic_intr_mode_select(void) { apic_intr_mode = __apic_intr_mode_select(); } /* * An initial setup of the virtual wire mode. */ void __init init_bsp_APIC(void) { unsigned int value; /* * Don't do the setup now if we have a SMP BIOS as the * through-I/O-APIC virtual wire mode might be active. */ if (smp_found_config || !boot_cpu_has(X86_FEATURE_APIC)) return; /* * Do not trust the local APIC being empty at bootup. */ clear_local_APIC(); /* * Enable APIC. */ value = apic_read(APIC_SPIV); value &= ~APIC_VECTOR_MASK; value |= APIC_SPIV_APIC_ENABLED; #ifdef CONFIG_X86_32 /* This bit is reserved on P4/Xeon and should be cleared */ if ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) && (boot_cpu_data.x86 == 15)) value &= ~APIC_SPIV_FOCUS_DISABLED; else #endif value |= APIC_SPIV_FOCUS_DISABLED; value |= SPURIOUS_APIC_VECTOR; apic_write(APIC_SPIV, value); /* * Set up the virtual wire mode. */ apic_write(APIC_LVT0, APIC_DM_EXTINT); value = APIC_DM_NMI; if (!lapic_is_integrated()) /* 82489DX */ value |= APIC_LVT_LEVEL_TRIGGER; if (apic_extnmi == APIC_EXTNMI_NONE) value |= APIC_LVT_MASKED; apic_write(APIC_LVT1, value); } static void __init apic_bsp_setup(bool upmode); /* Init the interrupt delivery mode for the BSP */ void __init apic_intr_mode_init(void) { bool upmode = IS_ENABLED(CONFIG_UP_LATE_INIT); switch (apic_intr_mode) { case APIC_PIC: pr_info("APIC: Keep in PIC mode(8259)\n"); return; case APIC_VIRTUAL_WIRE: pr_info("APIC: Switch to virtual wire mode setup\n"); break; case APIC_VIRTUAL_WIRE_NO_CONFIG: pr_info("APIC: Switch to virtual wire mode setup with no configuration\n"); upmode = true; break; case APIC_SYMMETRIC_IO: pr_info("APIC: Switch to symmetric I/O mode setup\n"); break; case APIC_SYMMETRIC_IO_NO_ROUTING: pr_info("APIC: Switch to symmetric I/O mode setup in no SMP routine\n"); break; } x86_64_probe_apic(); x86_32_install_bigsmp(); if (x86_platform.apic_post_init) x86_platform.apic_post_init(); apic_bsp_setup(upmode); } static void lapic_setup_esr(void) { unsigned int oldvalue, value, maxlvt; if (!lapic_is_integrated()) { pr_info("No ESR for 82489DX.\n"); return; } if (apic->disable_esr) { /* * Something untraceable is creating bad interrupts on * secondary quads ... for the moment, just leave the * ESR disabled - we can't do anything useful with the * errors anyway - mbligh */ pr_info("Leaving ESR disabled.\n"); return; } maxlvt = lapic_get_maxlvt(); if (maxlvt > 3) /* Due to the Pentium erratum 3AP. */ apic_write(APIC_ESR, 0); oldvalue = apic_read(APIC_ESR); /* enables sending errors */ value = ERROR_APIC_VECTOR; apic_write(APIC_LVTERR, value); /* * spec says clear errors after enabling vector. */ if (maxlvt > 3) apic_write(APIC_ESR, 0); value = apic_read(APIC_ESR); if (value != oldvalue) apic_printk(APIC_VERBOSE, "ESR value before enabling " "vector: 0x%08x after: 0x%08x\n", oldvalue, value); } #define APIC_IR_REGS APIC_ISR_NR #define APIC_IR_BITS (APIC_IR_REGS * 32) #define APIC_IR_MAPSIZE (APIC_IR_BITS / BITS_PER_LONG) union apic_ir { unsigned long map[APIC_IR_MAPSIZE]; u32 regs[APIC_IR_REGS]; }; static bool apic_check_and_ack(union apic_ir *irr, union apic_ir *isr) { int i, bit; /* Read the IRRs */ for (i = 0; i < APIC_IR_REGS; i++) irr->regs[i] = apic_read(APIC_IRR + i * 0x10); /* Read the ISRs */ for (i = 0; i < APIC_IR_REGS; i++) isr->regs[i] = apic_read(APIC_ISR + i * 0x10); /* * If the ISR map is not empty. ACK the APIC and run another round * to verify whether a pending IRR has been unblocked and turned * into a ISR. */ if (!bitmap_empty(isr->map, APIC_IR_BITS)) { /* * There can be multiple ISR bits set when a high priority * interrupt preempted a lower priority one. Issue an ACK * per set bit. */ for_each_set_bit(bit, isr->map, APIC_IR_BITS) apic_eoi(); return true; } return !bitmap_empty(irr->map, APIC_IR_BITS); } /* * After a crash, we no longer service the interrupts and a pending * interrupt from previous kernel might still have ISR bit set. * * Most probably by now the CPU has serviced that pending interrupt and it * might not have done the apic_eoi() because it thought, interrupt * came from i8259 as ExtInt. LAPIC did not get EOI so it does not clear * the ISR bit and cpu thinks it has already serviced the interrupt. Hence * a vector might get locked. It was noticed for timer irq (vector * 0x31). Issue an extra EOI to clear ISR. * * If there are pending IRR bits they turn into ISR bits after a higher * priority ISR bit has been acked. */ static void apic_pending_intr_clear(void) { union apic_ir irr, isr; unsigned int i; /* 512 loops are way oversized and give the APIC a chance to obey. */ for (i = 0; i < 512; i++) { if (!apic_check_and_ack(&irr, &isr)) return; } /* Dump the IRR/ISR content if that failed */ pr_warn("APIC: Stale IRR: %256pb ISR: %256pb\n", irr.map, isr.map); } /** * setup_local_APIC - setup the local APIC * * Used to setup local APIC while initializing BSP or bringing up APs. * Always called with preemption disabled. */ static void setup_local_APIC(void) { int cpu = smp_processor_id(); unsigned int value; if (apic_is_disabled) { disable_ioapic_support(); return; } /* * If this comes from kexec/kcrash the APIC might be enabled in * SPIV. Soft disable it before doing further initialization. */ value = apic_read(APIC_SPIV); value &= ~APIC_SPIV_APIC_ENABLED; apic_write(APIC_SPIV, value); #ifdef CONFIG_X86_32 /* Pound the ESR really hard over the head with a big hammer - mbligh */ if (lapic_is_integrated() && apic->disable_esr) { apic_write(APIC_ESR, 0); apic_write(APIC_ESR, 0); apic_write(APIC_ESR, 0); apic_write(APIC_ESR, 0); } #endif /* Validate that the APIC is registered if required */ BUG_ON(apic->apic_id_registered && !apic->apic_id_registered()); /* * Intel recommends to set DFR, LDR and TPR before enabling * an APIC. See e.g. "AP-388 82489DX User's Manual" (Intel * document number 292116). * * Except for APICs which operate in physical destination mode. */ if (apic->init_apic_ldr) apic->init_apic_ldr(); /* * Set Task Priority to 'accept all except vectors 0-31'. An APIC * vector in the 16-31 range could be delivered if TPR == 0, but we * would think it's an exception and terrible things will happen. We * never change this later on. */ value = apic_read(APIC_TASKPRI); value &= ~APIC_TPRI_MASK; value |= 0x10; apic_write(APIC_TASKPRI, value); /* Clear eventually stale ISR/IRR bits */ apic_pending_intr_clear(); /* * Now that we are all set up, enable the APIC */ value = apic_read(APIC_SPIV); value &= ~APIC_VECTOR_MASK; /* * Enable APIC */ value |= APIC_SPIV_APIC_ENABLED; #ifdef CONFIG_X86_32 /* * Some unknown Intel IO/APIC (or APIC) errata is biting us with * certain networking cards. If high frequency interrupts are * happening on a particular IOAPIC pin, plus the IOAPIC routing * entry is masked/unmasked at a high rate as well then sooner or * later IOAPIC line gets 'stuck', no more interrupts are received * from the device. If focus CPU is disabled then the hang goes * away, oh well :-( * * [ This bug can be reproduced easily with a level-triggered * PCI Ne2000 networking cards and PII/PIII processors, dual * BX chipset. ] */ /* * Actually disabling the focus CPU check just makes the hang less * frequent as it makes the interrupt distribution model be more * like LRU than MRU (the short-term load is more even across CPUs). */ /* * - enable focus processor (bit==0) * - 64bit mode always use processor focus * so no need to set it */ value &= ~APIC_SPIV_FOCUS_DISABLED; #endif /* * Set spurious IRQ vector */ value |= SPURIOUS_APIC_VECTOR; apic_write(APIC_SPIV, value); perf_events_lapic_init(); /* * Set up LVT0, LVT1: * * set up through-local-APIC on the boot CPU's LINT0. This is not * strictly necessary in pure symmetric-IO mode, but sometimes * we delegate interrupts to the 8259A. */ /* * TODO: set up through-local-APIC from through-I/O-APIC? --macro */ value = apic_read(APIC_LVT0) & APIC_LVT_MASKED; if (!cpu && (pic_mode || !value || ioapic_is_disabled)) { value = APIC_DM_EXTINT; apic_printk(APIC_VERBOSE, "enabled ExtINT on CPU#%d\n", cpu); } else { value = APIC_DM_EXTINT | APIC_LVT_MASKED; apic_printk(APIC_VERBOSE, "masked ExtINT on CPU#%d\n", cpu); } apic_write(APIC_LVT0, value); /* * Only the BSP sees the LINT1 NMI signal by default. This can be * modified by apic_extnmi= boot option. */ if ((!cpu && apic_extnmi != APIC_EXTNMI_NONE) || apic_extnmi == APIC_EXTNMI_ALL) value = APIC_DM_NMI; else value = APIC_DM_NMI | APIC_LVT_MASKED; /* Is 82489DX ? */ if (!lapic_is_integrated()) value |= APIC_LVT_LEVEL_TRIGGER; apic_write(APIC_LVT1, value); #ifdef CONFIG_X86_MCE_INTEL /* Recheck CMCI information after local APIC is up on CPU #0 */ if (!cpu) cmci_recheck(); #endif } static void end_local_APIC_setup(void) { lapic_setup_esr(); #ifdef CONFIG_X86_32 { unsigned int value; /* Disable the local apic timer */ value = apic_read(APIC_LVTT); value |= (APIC_LVT_MASKED | LOCAL_TIMER_VECTOR); apic_write(APIC_LVTT, value); } #endif apic_pm_activate(); } /* * APIC setup function for application processors. Called from smpboot.c */ void apic_ap_setup(void) { setup_local_APIC(); end_local_APIC_setup(); } static __init void cpu_set_boot_apic(void); static __init void apic_read_boot_cpu_id(bool x2apic) { /* * This can be invoked from check_x2apic() before the APIC has been * selected. But that code knows for sure that the BIOS enabled * X2APIC. */ if (x2apic) { boot_cpu_physical_apicid = native_apic_msr_read(APIC_ID); boot_cpu_apic_version = GET_APIC_VERSION(native_apic_msr_read(APIC_LVR)); } else { boot_cpu_physical_apicid = read_apic_id(); boot_cpu_apic_version = GET_APIC_VERSION(apic_read(APIC_LVR)); } cpu_set_boot_apic(); } #ifdef CONFIG_X86_X2APIC int x2apic_mode; EXPORT_SYMBOL_GPL(x2apic_mode); enum { X2APIC_OFF, X2APIC_DISABLED, /* All states below here have X2APIC enabled */ X2APIC_ON, X2APIC_ON_LOCKED }; static int x2apic_state; static bool x2apic_hw_locked(void) { u64 ia32_cap; u64 msr; ia32_cap = x86_read_arch_cap_msr(); if (ia32_cap & ARCH_CAP_XAPIC_DISABLE) { rdmsrl(MSR_IA32_XAPIC_DISABLE_STATUS, msr); return (msr & LEGACY_XAPIC_DISABLED); } return false; } static void __x2apic_disable(void) { u64 msr; if (!boot_cpu_has(X86_FEATURE_APIC)) return; rdmsrl(MSR_IA32_APICBASE, msr); if (!(msr & X2APIC_ENABLE)) return; /* Disable xapic and x2apic first and then reenable xapic mode */ wrmsrl(MSR_IA32_APICBASE, msr & ~(X2APIC_ENABLE | XAPIC_ENABLE)); wrmsrl(MSR_IA32_APICBASE, msr & ~X2APIC_ENABLE); printk_once(KERN_INFO "x2apic disabled\n"); } static void __x2apic_enable(void) { u64 msr; rdmsrl(MSR_IA32_APICBASE, msr); if (msr & X2APIC_ENABLE) return; wrmsrl(MSR_IA32_APICBASE, msr | X2APIC_ENABLE); printk_once(KERN_INFO "x2apic enabled\n"); } static int __init setup_nox2apic(char *str) { if (x2apic_enabled()) { int apicid = native_apic_msr_read(APIC_ID); if (apicid >= 255) { pr_warn("Apicid: %08x, cannot enforce nox2apic\n", apicid); return 0; } if (x2apic_hw_locked()) { pr_warn("APIC locked in x2apic mode, can't disable\n"); return 0; } pr_warn("x2apic already enabled.\n"); __x2apic_disable(); } setup_clear_cpu_cap(X86_FEATURE_X2APIC); x2apic_state = X2APIC_DISABLED; x2apic_mode = 0; return 0; } early_param("nox2apic", setup_nox2apic); /* Called from cpu_init() to enable x2apic on (secondary) cpus */ void x2apic_setup(void) { /* * Try to make the AP's APIC state match that of the BSP, but if the * BSP is unlocked and the AP is locked then there is a state mismatch. * Warn about the mismatch in case a GP fault occurs due to a locked AP * trying to be turned off. */ if (x2apic_state != X2APIC_ON_LOCKED && x2apic_hw_locked()) pr_warn("x2apic lock mismatch between BSP and AP.\n"); /* * If x2apic is not in ON or LOCKED state, disable it if already enabled * from BIOS. */ if (x2apic_state < X2APIC_ON) { __x2apic_disable(); return; } __x2apic_enable(); } static __init void apic_set_fixmap(void); static __init void x2apic_disable(void) { u32 x2apic_id, state = x2apic_state; x2apic_mode = 0; x2apic_state = X2APIC_DISABLED; if (state != X2APIC_ON) return; x2apic_id = read_apic_id(); if (x2apic_id >= 255) panic("Cannot disable x2apic, id: %08x\n", x2apic_id); if (x2apic_hw_locked()) { pr_warn("Cannot disable locked x2apic, id: %08x\n", x2apic_id); return; } __x2apic_disable(); apic_set_fixmap(); } static __init void x2apic_enable(void) { if (x2apic_state != X2APIC_OFF) return; x2apic_mode = 1; x2apic_state = X2APIC_ON; __x2apic_enable(); } static __init void try_to_enable_x2apic(int remap_mode) { if (x2apic_state == X2APIC_DISABLED) return; if (remap_mode != IRQ_REMAP_X2APIC_MODE) { u32 apic_limit = 255; /* * Using X2APIC without IR is not architecturally supported * on bare metal but may be supported in guests. */ if (!x86_init.hyper.x2apic_available()) { pr_info("x2apic: IRQ remapping doesn't support X2APIC mode\n"); x2apic_disable(); return; } /* * If the hypervisor supports extended destination ID in * MSI, that increases the maximum APIC ID that can be * used for non-remapped IRQ domains. */ if (x86_init.hyper.msi_ext_dest_id()) { virt_ext_dest_id = 1; apic_limit = 32767; } /* * Without IR, all CPUs can be addressed by IOAPIC/MSI only * in physical mode, and CPUs with an APIC ID that cannot * be addressed must not be brought online. */ x2apic_set_max_apicid(apic_limit); x2apic_phys = 1; } x2apic_enable(); } void __init check_x2apic(void) { if (x2apic_enabled()) { pr_info("x2apic: enabled by BIOS, switching to x2apic ops\n"); x2apic_mode = 1; if (x2apic_hw_locked()) x2apic_state = X2APIC_ON_LOCKED; else x2apic_state = X2APIC_ON; apic_read_boot_cpu_id(true); } else if (!boot_cpu_has(X86_FEATURE_X2APIC)) { x2apic_state = X2APIC_DISABLED; } } #else /* CONFIG_X86_X2APIC */ void __init check_x2apic(void) { if (!apic_is_x2apic_enabled()) return; /* * Checkme: Can we simply turn off x2APIC here instead of disabling the APIC? */ pr_err("Kernel does not support x2APIC, please recompile with CONFIG_X86_X2APIC.\n"); pr_err("Disabling APIC, expect reduced performance and functionality.\n"); apic_is_disabled = true; setup_clear_cpu_cap(X86_FEATURE_APIC); } static inline void try_to_enable_x2apic(int remap_mode) { } static inline void __x2apic_enable(void) { } #endif /* !CONFIG_X86_X2APIC */ void __init enable_IR_x2apic(void) { unsigned long flags; int ret, ir_stat; if (ioapic_is_disabled) { pr_info("Not enabling interrupt remapping due to skipped IO-APIC setup\n"); return; } ir_stat = irq_remapping_prepare(); if (ir_stat < 0 && !x2apic_supported()) return; ret = save_ioapic_entries(); if (ret) { pr_info("Saving IO-APIC state failed: %d\n", ret); return; } local_irq_save(flags); legacy_pic->mask_all(); mask_ioapic_entries(); /* If irq_remapping_prepare() succeeded, try to enable it */ if (ir_stat >= 0) ir_stat = irq_remapping_enable(); /* ir_stat contains the remap mode or an error code */ try_to_enable_x2apic(ir_stat); if (ir_stat < 0) restore_ioapic_entries(); legacy_pic->restore_mask(); local_irq_restore(flags); } #ifdef CONFIG_X86_64 /* * Detect and enable local APICs on non-SMP boards. * Original code written by Keir Fraser. * On AMD64 we trust the BIOS - if it says no APIC it is likely * not correctly set up (usually the APIC timer won't work etc.) */ static bool __init detect_init_APIC(void) { if (!boot_cpu_has(X86_FEATURE_APIC)) { pr_info("No local APIC present\n"); return false; } register_lapic_address(APIC_DEFAULT_PHYS_BASE); return true; } #else static bool __init apic_verify(unsigned long addr) { u32 features, h, l; /* * The APIC feature bit should now be enabled * in `cpuid' */ features = cpuid_edx(1); if (!(features & (1 << X86_FEATURE_APIC))) { pr_warn("Could not enable APIC!\n"); return false; } set_cpu_cap(&boot_cpu_data, X86_FEATURE_APIC); /* The BIOS may have set up the APIC at some other address */ if (boot_cpu_data.x86 >= 6) { rdmsr(MSR_IA32_APICBASE, l, h); if (l & MSR_IA32_APICBASE_ENABLE) addr = l & MSR_IA32_APICBASE_BASE; } register_lapic_address(addr); pr_info("Found and enabled local APIC!\n"); return true; } bool __init apic_force_enable(unsigned long addr) { u32 h, l; if (apic_is_disabled) return false; /* * Some BIOSes disable the local APIC in the APIC_BASE * MSR. This can only be done in software for Intel P6 or later * and AMD K7 (Model > 1) or later. */ if (boot_cpu_data.x86 >= 6) { rdmsr(MSR_IA32_APICBASE, l, h); if (!(l & MSR_IA32_APICBASE_ENABLE)) { pr_info("Local APIC disabled by BIOS -- reenabling.\n"); l &= ~MSR_IA32_APICBASE_BASE; l |= MSR_IA32_APICBASE_ENABLE | addr; wrmsr(MSR_IA32_APICBASE, l, h); enabled_via_apicbase = 1; } } return apic_verify(addr); } /* * Detect and initialize APIC */ static bool __init detect_init_APIC(void) { /* Disabled by kernel option? */ if (apic_is_disabled) return false; switch (boot_cpu_data.x86_vendor) { case X86_VENDOR_AMD: if ((boot_cpu_data.x86 == 6 && boot_cpu_data.x86_model > 1) || (boot_cpu_data.x86 >= 15)) break; goto no_apic; case X86_VENDOR_HYGON: break; case X86_VENDOR_INTEL: if (boot_cpu_data.x86 == 6 || boot_cpu_data.x86 == 15 || (boot_cpu_data.x86 == 5 && boot_cpu_has(X86_FEATURE_APIC))) break; goto no_apic; default: goto no_apic; } if (!boot_cpu_has(X86_FEATURE_APIC)) { /* * Over-ride BIOS and try to enable the local APIC only if * "lapic" specified. */ if (!force_enable_local_apic) { pr_info("Local APIC disabled by BIOS -- " "you can enable it with \"lapic\"\n"); return false; } if (!apic_force_enable(APIC_DEFAULT_PHYS_BASE)) return false; } else { if (!apic_verify(APIC_DEFAULT_PHYS_BASE)) return false; } apic_pm_activate(); return true; no_apic: pr_info("No local APIC present or hardware disabled\n"); return false; } #endif /** * init_apic_mappings - initialize APIC mappings */ void __init init_apic_mappings(void) { if (apic_validate_deadline_timer()) pr_info("TSC deadline timer available\n"); if (x2apic_mode) return; if (!smp_found_config) { if (!detect_init_APIC()) { pr_info("APIC: disable apic facility\n"); apic_disable(); } num_processors = 1; } } static __init void apic_set_fixmap(void) { set_fixmap_nocache(FIX_APIC_BASE, mp_lapic_addr); apic_mmio_base = APIC_BASE; apic_printk(APIC_VERBOSE, "mapped APIC to %16lx (%16lx)\n", apic_mmio_base, mp_lapic_addr); apic_read_boot_cpu_id(false); } void __init register_lapic_address(unsigned long address) { /* This should only happen once */ WARN_ON_ONCE(mp_lapic_addr); mp_lapic_addr = address; if (!x2apic_mode) apic_set_fixmap(); } /* * Local APIC interrupts */ /* * Common handling code for spurious_interrupt and spurious_vector entry * points below. No point in allowing the compiler to inline it twice. */ static noinline void handle_spurious_interrupt(u8 vector) { u32 v; trace_spurious_apic_entry(vector); inc_irq_stat(irq_spurious_count); /* * If this is a spurious interrupt then do not acknowledge */ if (vector == SPURIOUS_APIC_VECTOR) { /* See SDM vol 3 */ pr_info("Spurious APIC interrupt (vector 0xFF) on CPU#%d, should never happen.\n", smp_processor_id()); goto out; } /* * If it is a vectored one, verify it's set in the ISR. If set, * acknowledge it. */ v = apic_read(APIC_ISR + ((vector & ~0x1f) >> 1)); if (v & (1 << (vector & 0x1f))) { pr_info("Spurious interrupt (vector 0x%02x) on CPU#%d. Acked\n", vector, smp_processor_id()); apic_eoi(); } else { pr_info("Spurious interrupt (vector 0x%02x) on CPU#%d. Not pending!\n", vector, smp_processor_id()); } out: trace_spurious_apic_exit(vector); } /** * spurious_interrupt - Catch all for interrupts raised on unused vectors * @regs: Pointer to pt_regs on stack * @vector: The vector number * * This is invoked from ASM entry code to catch all interrupts which * trigger on an entry which is routed to the common_spurious idtentry * point. */ DEFINE_IDTENTRY_IRQ(spurious_interrupt) { handle_spurious_interrupt(vector); } DEFINE_IDTENTRY_SYSVEC(sysvec_spurious_apic_interrupt) { handle_spurious_interrupt(SPURIOUS_APIC_VECTOR); } /* * This interrupt should never happen with our APIC/SMP architecture */ DEFINE_IDTENTRY_SYSVEC(sysvec_error_interrupt) { static const char * const error_interrupt_reason[] = { "Send CS error", /* APIC Error Bit 0 */ "Receive CS error", /* APIC Error Bit 1 */ "Send accept error", /* APIC Error Bit 2 */ "Receive accept error", /* APIC Error Bit 3 */ "Redirectable IPI", /* APIC Error Bit 4 */ "Send illegal vector", /* APIC Error Bit 5 */ "Received illegal vector", /* APIC Error Bit 6 */ "Illegal register address", /* APIC Error Bit 7 */ }; u32 v, i = 0; trace_error_apic_entry(ERROR_APIC_VECTOR); /* First tickle the hardware, only then report what went on. -- REW */ if (lapic_get_maxlvt() > 3) /* Due to the Pentium erratum 3AP. */ apic_write(APIC_ESR, 0); v = apic_read(APIC_ESR); apic_eoi(); atomic_inc(&irq_err_count); apic_printk(APIC_DEBUG, KERN_DEBUG "APIC error on CPU%d: %02x", smp_processor_id(), v); v &= 0xff; while (v) { if (v & 0x1) apic_printk(APIC_DEBUG, KERN_CONT " : %s", error_interrupt_reason[i]); i++; v >>= 1; } apic_printk(APIC_DEBUG, KERN_CONT "\n"); trace_error_apic_exit(ERROR_APIC_VECTOR); } /** * connect_bsp_APIC - attach the APIC to the interrupt system */ static void __init connect_bsp_APIC(void) { #ifdef CONFIG_X86_32 if (pic_mode) { /* * Do not trust the local APIC being empty at bootup. */ clear_local_APIC(); /* * PIC mode, enable APIC mode in the IMCR, i.e. connect BSP's * local APIC to INT and NMI lines. */ apic_printk(APIC_VERBOSE, "leaving PIC mode, " "enabling APIC mode.\n"); imcr_pic_to_apic(); } #endif } /** * disconnect_bsp_APIC - detach the APIC from the interrupt system * @virt_wire_setup: indicates, whether virtual wire mode is selected * * Virtual wire mode is necessary to deliver legacy interrupts even when the * APIC is disabled. */ void disconnect_bsp_APIC(int virt_wire_setup) { unsigned int value; #ifdef CONFIG_X86_32 if (pic_mode) { /* * Put the board back into PIC mode (has an effect only on * certain older boards). Note that APIC interrupts, including * IPIs, won't work beyond this point! The only exception are * INIT IPIs. */ apic_printk(APIC_VERBOSE, "disabling APIC mode, " "entering PIC mode.\n"); imcr_apic_to_pic(); return; } #endif /* Go back to Virtual Wire compatibility mode */ /* For the spurious interrupt use vector F, and enable it */ value = apic_read(APIC_SPIV); value &= ~APIC_VECTOR_MASK; value |= APIC_SPIV_APIC_ENABLED; value |= 0xf; apic_write(APIC_SPIV, value); if (!virt_wire_setup) { /* * For LVT0 make it edge triggered, active high, * external and enabled */ value = apic_read(APIC_LVT0); value &= ~(APIC_MODE_MASK | APIC_SEND_PENDING | APIC_INPUT_POLARITY | APIC_LVT_REMOTE_IRR | APIC_LVT_LEVEL_TRIGGER | APIC_LVT_MASKED); value |= APIC_LVT_REMOTE_IRR | APIC_SEND_PENDING; value = SET_APIC_DELIVERY_MODE(value, APIC_MODE_EXTINT); apic_write(APIC_LVT0, value); } else { /* Disable LVT0 */ apic_write(APIC_LVT0, APIC_LVT_MASKED); } /* * For LVT1 make it edge triggered, active high, * nmi and enabled */ value = apic_read(APIC_LVT1); value &= ~(APIC_MODE_MASK | APIC_SEND_PENDING | APIC_INPUT_POLARITY | APIC_LVT_REMOTE_IRR | APIC_LVT_LEVEL_TRIGGER | APIC_LVT_MASKED); value |= APIC_LVT_REMOTE_IRR | APIC_SEND_PENDING; value = SET_APIC_DELIVERY_MODE(value, APIC_MODE_NMI); apic_write(APIC_LVT1, value); } /* * The number of allocated logical CPU IDs. Since logical CPU IDs are allocated * contiguously, it equals to current allocated max logical CPU ID plus 1. * All allocated CPU IDs should be in the [0, nr_logical_cpuids) range, * so the maximum of nr_logical_cpuids is nr_cpu_ids. * * NOTE: Reserve 0 for BSP. */ static int nr_logical_cpuids = 1; /* * Used to store mapping between logical CPU IDs and APIC IDs. */ int cpuid_to_apicid[] = { [0 ... NR_CPUS - 1] = -1, }; bool arch_match_cpu_phys_id(int cpu, u64 phys_id) { return phys_id == cpuid_to_apicid[cpu]; } #ifdef CONFIG_SMP static void cpu_mark_primary_thread(unsigned int cpu, unsigned int apicid) { /* Isolate the SMT bit(s) in the APICID and check for 0 */ u32 mask = (1U << (fls(smp_num_siblings) - 1)) - 1; if (smp_num_siblings == 1 || !(apicid & mask)) cpumask_set_cpu(cpu, &__cpu_primary_thread_mask); } /* * Due to the utter mess of CPUID evaluation smp_num_siblings is not valid * during early boot. Initialize the primary thread mask before SMP * bringup. */ static int __init smp_init_primary_thread_mask(void) { unsigned int cpu; for (cpu = 0; cpu < nr_logical_cpuids; cpu++) cpu_mark_primary_thread(cpu, cpuid_to_apicid[cpu]); return 0; } early_initcall(smp_init_primary_thread_mask); #else static inline void cpu_mark_primary_thread(unsigned int cpu, unsigned int apicid) { } #endif /* * Should use this API to allocate logical CPU IDs to keep nr_logical_cpuids * and cpuid_to_apicid[] synchronized. */ static int allocate_logical_cpuid(int apicid) { int i; /* * cpuid <-> apicid mapping is persistent, so when a cpu is up, * check if the kernel has allocated a cpuid for it. */ for (i = 0; i < nr_logical_cpuids; i++) { if (cpuid_to_apicid[i] == apicid) return i; } /* Allocate a new cpuid. */ if (nr_logical_cpuids >= nr_cpu_ids) { WARN_ONCE(1, "APIC: NR_CPUS/possible_cpus limit of %u reached. " "Processor %d/0x%x and the rest are ignored.\n", nr_cpu_ids, nr_logical_cpuids, apicid); return -EINVAL; } cpuid_to_apicid[nr_logical_cpuids] = apicid; return nr_logical_cpuids++; } static void cpu_update_apic(int cpu, int apicid) { #if defined(CONFIG_SMP) || defined(CONFIG_X86_64) early_per_cpu(x86_cpu_to_apicid, cpu) = apicid; #endif set_cpu_possible(cpu, true); physid_set(apicid, phys_cpu_present_map); set_cpu_present(cpu, true); num_processors++; if (system_state != SYSTEM_BOOTING) cpu_mark_primary_thread(cpu, apicid); } static __init void cpu_set_boot_apic(void) { cpuid_to_apicid[0] = boot_cpu_physical_apicid; cpu_update_apic(0, boot_cpu_physical_apicid); x86_32_probe_bigsmp_early(); } int generic_processor_info(int apicid) { int cpu, max = nr_cpu_ids; /* The boot CPU must be set before MADT/MPTABLE parsing happens */ if (cpuid_to_apicid[0] == BAD_APICID) panic("Boot CPU APIC not registered yet\n"); if (apicid == boot_cpu_physical_apicid) return 0; if (disabled_cpu_apicid == apicid) { int thiscpu = num_processors + disabled_cpus; pr_warn("APIC: Disabling requested cpu. Processor %d/0x%x ignored.\n", thiscpu, apicid); disabled_cpus++; return -ENODEV; } if (num_processors >= nr_cpu_ids) { int thiscpu = max + disabled_cpus; pr_warn("APIC: NR_CPUS/possible_cpus limit of %i reached. " "Processor %d/0x%x ignored.\n", max, thiscpu, apicid); disabled_cpus++; return -EINVAL; } cpu = allocate_logical_cpuid(apicid); if (cpu < 0) { disabled_cpus++; return -EINVAL; } cpu_update_apic(cpu, apicid); return cpu; } void __irq_msi_compose_msg(struct irq_cfg *cfg, struct msi_msg *msg, bool dmar) { memset(msg, 0, sizeof(*msg)); msg->arch_addr_lo.base_address = X86_MSI_BASE_ADDRESS_LOW; msg->arch_addr_lo.dest_mode_logical = apic->dest_mode_logical; msg->arch_addr_lo.destid_0_7 = cfg->dest_apicid & 0xFF; msg->arch_data.delivery_mode = APIC_DELIVERY_MODE_FIXED; msg->arch_data.vector = cfg->vector; msg->address_hi = X86_MSI_BASE_ADDRESS_HIGH; /* * Only the IOMMU itself can use the trick of putting destination * APIC ID into the high bits of the address. Anything else would * just be writing to memory if it tried that, and needs IR to * address APICs which can't be addressed in the normal 32-bit * address range at 0xFFExxxxx. That is typically just 8 bits, but * some hypervisors allow the extended destination ID field in bits * 5-11 to be used, giving support for 15 bits of APIC IDs in total. */ if (dmar) msg->arch_addr_hi.destid_8_31 = cfg->dest_apicid >> 8; else if (virt_ext_dest_id && cfg->dest_apicid < 0x8000) msg->arch_addr_lo.virt_destid_8_14 = cfg->dest_apicid >> 8; else WARN_ON_ONCE(cfg->dest_apicid > 0xFF); } u32 x86_msi_msg_get_destid(struct msi_msg *msg, bool extid) { u32 dest = msg->arch_addr_lo.destid_0_7; if (extid) dest |= msg->arch_addr_hi.destid_8_31 << 8; return dest; } EXPORT_SYMBOL_GPL(x86_msi_msg_get_destid); static void __init apic_bsp_up_setup(void) { #ifdef CONFIG_X86_64 apic_write(APIC_ID, apic->set_apic_id(boot_cpu_physical_apicid)); #endif physid_set_mask_of_physid(boot_cpu_physical_apicid, &phys_cpu_present_map); } /** * apic_bsp_setup - Setup function for local apic and io-apic * @upmode: Force UP mode (for APIC_init_uniprocessor) */ static void __init apic_bsp_setup(bool upmode) { connect_bsp_APIC(); if (upmode) apic_bsp_up_setup(); setup_local_APIC(); enable_IO_APIC(); end_local_APIC_setup(); irq_remap_enable_fault_handling(); setup_IO_APIC(); lapic_update_legacy_vectors(); } #ifdef CONFIG_UP_LATE_INIT void __init up_late_init(void) { if (apic_intr_mode == APIC_PIC) return; /* Setup local timer */ x86_init.timers.setup_percpu_clockev(); } #endif /* * Power management */ #ifdef CONFIG_PM static struct { /* * 'active' is true if the local APIC was enabled by us and * not the BIOS; this signifies that we are also responsible * for disabling it before entering apm/acpi suspend */ int active; /* r/w apic fields */ unsigned int apic_id; unsigned int apic_taskpri; unsigned int apic_ldr; unsigned int apic_dfr; unsigned int apic_spiv; unsigned int apic_lvtt; unsigned int apic_lvtpc; unsigned int apic_lvt0; unsigned int apic_lvt1; unsigned int apic_lvterr; unsigned int apic_tmict; unsigned int apic_tdcr; unsigned int apic_thmr; unsigned int apic_cmci; } apic_pm_state; static int lapic_suspend(void) { unsigned long flags; int maxlvt; if (!apic_pm_state.active) return 0; maxlvt = lapic_get_maxlvt(); apic_pm_state.apic_id = apic_read(APIC_ID); apic_pm_state.apic_taskpri = apic_read(APIC_TASKPRI); apic_pm_state.apic_ldr = apic_read(APIC_LDR); apic_pm_state.apic_dfr = apic_read(APIC_DFR); apic_pm_state.apic_spiv = apic_read(APIC_SPIV); apic_pm_state.apic_lvtt = apic_read(APIC_LVTT); if (maxlvt >= 4) apic_pm_state.apic_lvtpc = apic_read(APIC_LVTPC); apic_pm_state.apic_lvt0 = apic_read(APIC_LVT0); apic_pm_state.apic_lvt1 = apic_read(APIC_LVT1); apic_pm_state.apic_lvterr = apic_read(APIC_LVTERR); apic_pm_state.apic_tmict = apic_read(APIC_TMICT); apic_pm_state.apic_tdcr = apic_read(APIC_TDCR); #ifdef CONFIG_X86_THERMAL_VECTOR if (maxlvt >= 5) apic_pm_state.apic_thmr = apic_read(APIC_LVTTHMR); #endif #ifdef CONFIG_X86_MCE_INTEL if (maxlvt >= 6) apic_pm_state.apic_cmci = apic_read(APIC_LVTCMCI); #endif local_irq_save(flags); /* * Mask IOAPIC before disabling the local APIC to prevent stale IRR * entries on some implementations. */ mask_ioapic_entries(); disable_local_APIC(); irq_remapping_disable(); local_irq_restore(flags); return 0; } static void lapic_resume(void) { unsigned int l, h; unsigned long flags; int maxlvt; if (!apic_pm_state.active) return; local_irq_save(flags); /* * IO-APIC and PIC have their own resume routines. * We just mask them here to make sure the interrupt * subsystem is completely quiet while we enable x2apic * and interrupt-remapping. */ mask_ioapic_entries(); legacy_pic->mask_all(); if (x2apic_mode) { __x2apic_enable(); } else { /* * Make sure the APICBASE points to the right address * * FIXME! This will be wrong if we ever support suspend on * SMP! We'll need to do this as part of the CPU restore! */ if (boot_cpu_data.x86 >= 6) { rdmsr(MSR_IA32_APICBASE, l, h); l &= ~MSR_IA32_APICBASE_BASE; l |= MSR_IA32_APICBASE_ENABLE | mp_lapic_addr; wrmsr(MSR_IA32_APICBASE, l, h); } } maxlvt = lapic_get_maxlvt(); apic_write(APIC_LVTERR, ERROR_APIC_VECTOR | APIC_LVT_MASKED); apic_write(APIC_ID, apic_pm_state.apic_id); apic_write(APIC_DFR, apic_pm_state.apic_dfr); apic_write(APIC_LDR, apic_pm_state.apic_ldr); apic_write(APIC_TASKPRI, apic_pm_state.apic_taskpri); apic_write(APIC_SPIV, apic_pm_state.apic_spiv); apic_write(APIC_LVT0, apic_pm_state.apic_lvt0); apic_write(APIC_LVT1, apic_pm_state.apic_lvt1); #ifdef CONFIG_X86_THERMAL_VECTOR if (maxlvt >= 5) apic_write(APIC_LVTTHMR, apic_pm_state.apic_thmr); #endif #ifdef CONFIG_X86_MCE_INTEL if (maxlvt >= 6) apic_write(APIC_LVTCMCI, apic_pm_state.apic_cmci); #endif if (maxlvt >= 4) apic_write(APIC_LVTPC, apic_pm_state.apic_lvtpc); apic_write(APIC_LVTT, apic_pm_state.apic_lvtt); apic_write(APIC_TDCR, apic_pm_state.apic_tdcr); apic_write(APIC_TMICT, apic_pm_state.apic_tmict); apic_write(APIC_ESR, 0); apic_read(APIC_ESR); apic_write(APIC_LVTERR, apic_pm_state.apic_lvterr); apic_write(APIC_ESR, 0); apic_read(APIC_ESR); irq_remapping_reenable(x2apic_mode); local_irq_restore(flags); } /* * This device has no shutdown method - fully functioning local APICs * are needed on every CPU up until machine_halt/restart/poweroff. */ static struct syscore_ops lapic_syscore_ops = { .resume = lapic_resume, .suspend = lapic_suspend, }; static void apic_pm_activate(void) { apic_pm_state.active = 1; } static int __init init_lapic_sysfs(void) { /* XXX: remove suspend/resume procs if !apic_pm_state.active? */ if (boot_cpu_has(X86_FEATURE_APIC)) register_syscore_ops(&lapic_syscore_ops); return 0; } /* local apic needs to resume before other devices access its registers. */ core_initcall(init_lapic_sysfs); #else /* CONFIG_PM */ static void apic_pm_activate(void) { } #endif /* CONFIG_PM */ #ifdef CONFIG_X86_64 static int multi_checked; static int multi; static int set_multi(const struct dmi_system_id *d) { if (multi) return 0; pr_info("APIC: %s detected, Multi Chassis\n", d->ident); multi = 1; return 0; } static const struct dmi_system_id multi_dmi_table[] = { { .callback = set_multi, .ident = "IBM System Summit2", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "IBM"), DMI_MATCH(DMI_PRODUCT_NAME, "Summit2"), }, }, {} }; static void dmi_check_multi(void) { if (multi_checked) return; dmi_check_system(multi_dmi_table); multi_checked = 1; } /* * apic_is_clustered_box() -- Check if we can expect good TSC * * Thus far, the major user of this is IBM's Summit2 series: * Clustered boxes may have unsynced TSC problems if they are * multi-chassis. * Use DMI to check them */ int apic_is_clustered_box(void) { dmi_check_multi(); return multi; } #endif /* * APIC command line parameters */ static int __init setup_disableapic(char *arg) { apic_is_disabled = true; setup_clear_cpu_cap(X86_FEATURE_APIC); return 0; } early_param("disableapic", setup_disableapic); /* same as disableapic, for compatibility */ static int __init setup_nolapic(char *arg) { return setup_disableapic(arg); } early_param("nolapic", setup_nolapic); static int __init parse_lapic_timer_c2_ok(char *arg) { local_apic_timer_c2_ok = 1; return 0; } early_param("lapic_timer_c2_ok", parse_lapic_timer_c2_ok); static int __init parse_disable_apic_timer(char *arg) { disable_apic_timer = 1; return 0; } early_param("noapictimer", parse_disable_apic_timer); static int __init parse_nolapic_timer(char *arg) { disable_apic_timer = 1; return 0; } early_param("nolapic_timer", parse_nolapic_timer); static int __init apic_set_verbosity(char *arg) { if (!arg) { if (IS_ENABLED(CONFIG_X86_32)) return -EINVAL; ioapic_is_disabled = false; return 0; } if (strcmp("debug", arg) == 0) apic_verbosity = APIC_DEBUG; else if (strcmp("verbose", arg) == 0) apic_verbosity = APIC_VERBOSE; #ifdef CONFIG_X86_64 else { pr_warn("APIC Verbosity level %s not recognised" " use apic=verbose or apic=debug\n", arg); return -EINVAL; } #endif return 0; } early_param("apic", apic_set_verbosity); static int __init lapic_insert_resource(void) { if (!apic_mmio_base) return -1; /* Put local APIC into the resource map. */ lapic_resource.start = apic_mmio_base; lapic_resource.end = lapic_resource.start + PAGE_SIZE - 1; insert_resource(&iomem_resource, &lapic_resource); return 0; } /* * need call insert after e820__reserve_resources() * that is using request_resource */ late_initcall(lapic_insert_resource); static int __init apic_set_disabled_cpu_apicid(char *arg) { if (!arg || !get_option(&arg, &disabled_cpu_apicid)) return -EINVAL; return 0; } early_param("disable_cpu_apicid", apic_set_disabled_cpu_apicid); static int __init apic_set_extnmi(char *arg) { if (!arg) return -EINVAL; if (!strncmp("all", arg, 3)) apic_extnmi = APIC_EXTNMI_ALL; else if (!strncmp("none", arg, 4)) apic_extnmi = APIC_EXTNMI_NONE; else if (!strncmp("bsp", arg, 3)) apic_extnmi = APIC_EXTNMI_BSP; else { pr_warn("Unknown external NMI delivery mode `%s' ignored\n", arg); return -EINVAL; } return 0; } early_param("apic_extnmi", apic_set_extnmi);
linux-master
arch/x86/kernel/apic/apic.c
// SPDX-License-Identifier: GPL-2.0-only /* * xsave/xrstor support. * * Author: Suresh Siddha <[email protected]> */ #include <linux/bitops.h> #include <linux/compat.h> #include <linux/cpu.h> #include <linux/mman.h> #include <linux/nospec.h> #include <linux/pkeys.h> #include <linux/seq_file.h> #include <linux/proc_fs.h> #include <linux/vmalloc.h> #include <asm/fpu/api.h> #include <asm/fpu/regset.h> #include <asm/fpu/signal.h> #include <asm/fpu/xcr.h> #include <asm/tlbflush.h> #include <asm/prctl.h> #include <asm/elf.h> #include "context.h" #include "internal.h" #include "legacy.h" #include "xstate.h" #define for_each_extended_xfeature(bit, mask) \ (bit) = FIRST_EXTENDED_XFEATURE; \ for_each_set_bit_from(bit, (unsigned long *)&(mask), 8 * sizeof(mask)) /* * Although we spell it out in here, the Processor Trace * xfeature is completely unused. We use other mechanisms * to save/restore PT state in Linux. */ static const char *xfeature_names[] = { "x87 floating point registers", "SSE registers", "AVX registers", "MPX bounds registers", "MPX CSR", "AVX-512 opmask", "AVX-512 Hi256", "AVX-512 ZMM_Hi256", "Processor Trace (unused)", "Protection Keys User registers", "PASID state", "Control-flow User registers", "Control-flow Kernel registers (unused)", "unknown xstate feature", "unknown xstate feature", "unknown xstate feature", "unknown xstate feature", "AMX Tile config", "AMX Tile data", "unknown xstate feature", }; static unsigned short xsave_cpuid_features[] __initdata = { [XFEATURE_FP] = X86_FEATURE_FPU, [XFEATURE_SSE] = X86_FEATURE_XMM, [XFEATURE_YMM] = X86_FEATURE_AVX, [XFEATURE_BNDREGS] = X86_FEATURE_MPX, [XFEATURE_BNDCSR] = X86_FEATURE_MPX, [XFEATURE_OPMASK] = X86_FEATURE_AVX512F, [XFEATURE_ZMM_Hi256] = X86_FEATURE_AVX512F, [XFEATURE_Hi16_ZMM] = X86_FEATURE_AVX512F, [XFEATURE_PT_UNIMPLEMENTED_SO_FAR] = X86_FEATURE_INTEL_PT, [XFEATURE_PKRU] = X86_FEATURE_OSPKE, [XFEATURE_PASID] = X86_FEATURE_ENQCMD, [XFEATURE_CET_USER] = X86_FEATURE_SHSTK, [XFEATURE_XTILE_CFG] = X86_FEATURE_AMX_TILE, [XFEATURE_XTILE_DATA] = X86_FEATURE_AMX_TILE, }; static unsigned int xstate_offsets[XFEATURE_MAX] __ro_after_init = { [ 0 ... XFEATURE_MAX - 1] = -1}; static unsigned int xstate_sizes[XFEATURE_MAX] __ro_after_init = { [ 0 ... XFEATURE_MAX - 1] = -1}; static unsigned int xstate_flags[XFEATURE_MAX] __ro_after_init; #define XSTATE_FLAG_SUPERVISOR BIT(0) #define XSTATE_FLAG_ALIGNED64 BIT(1) /* * Return whether the system supports a given xfeature. * * Also return the name of the (most advanced) feature that the caller requested: */ int cpu_has_xfeatures(u64 xfeatures_needed, const char **feature_name) { u64 xfeatures_missing = xfeatures_needed & ~fpu_kernel_cfg.max_features; if (unlikely(feature_name)) { long xfeature_idx, max_idx; u64 xfeatures_print; /* * So we use FLS here to be able to print the most advanced * feature that was requested but is missing. So if a driver * asks about "XFEATURE_MASK_SSE | XFEATURE_MASK_YMM" we'll print the * missing AVX feature - this is the most informative message * to users: */ if (xfeatures_missing) xfeatures_print = xfeatures_missing; else xfeatures_print = xfeatures_needed; xfeature_idx = fls64(xfeatures_print)-1; max_idx = ARRAY_SIZE(xfeature_names)-1; xfeature_idx = min(xfeature_idx, max_idx); *feature_name = xfeature_names[xfeature_idx]; } if (xfeatures_missing) return 0; return 1; } EXPORT_SYMBOL_GPL(cpu_has_xfeatures); static bool xfeature_is_aligned64(int xfeature_nr) { return xstate_flags[xfeature_nr] & XSTATE_FLAG_ALIGNED64; } static bool xfeature_is_supervisor(int xfeature_nr) { return xstate_flags[xfeature_nr] & XSTATE_FLAG_SUPERVISOR; } static unsigned int xfeature_get_offset(u64 xcomp_bv, int xfeature) { unsigned int offs, i; /* * Non-compacted format and legacy features use the cached fixed * offsets. */ if (!cpu_feature_enabled(X86_FEATURE_XCOMPACTED) || xfeature <= XFEATURE_SSE) return xstate_offsets[xfeature]; /* * Compacted format offsets depend on the actual content of the * compacted xsave area which is determined by the xcomp_bv header * field. */ offs = FXSAVE_SIZE + XSAVE_HDR_SIZE; for_each_extended_xfeature(i, xcomp_bv) { if (xfeature_is_aligned64(i)) offs = ALIGN(offs, 64); if (i == xfeature) break; offs += xstate_sizes[i]; } return offs; } /* * Enable the extended processor state save/restore feature. * Called once per CPU onlining. */ void fpu__init_cpu_xstate(void) { if (!boot_cpu_has(X86_FEATURE_XSAVE) || !fpu_kernel_cfg.max_features) return; cr4_set_bits(X86_CR4_OSXSAVE); /* * Must happen after CR4 setup and before xsetbv() to allow KVM * lazy passthrough. Write independent of the dynamic state static * key as that does not work on the boot CPU. This also ensures * that any stale state is wiped out from XFD. */ if (cpu_feature_enabled(X86_FEATURE_XFD)) wrmsrl(MSR_IA32_XFD, init_fpstate.xfd); /* * XCR_XFEATURE_ENABLED_MASK (aka. XCR0) sets user features * managed by XSAVE{C, OPT, S} and XRSTOR{S}. Only XSAVE user * states can be set here. */ xsetbv(XCR_XFEATURE_ENABLED_MASK, fpu_user_cfg.max_features); /* * MSR_IA32_XSS sets supervisor states managed by XSAVES. */ if (boot_cpu_has(X86_FEATURE_XSAVES)) { wrmsrl(MSR_IA32_XSS, xfeatures_mask_supervisor() | xfeatures_mask_independent()); } } static bool xfeature_enabled(enum xfeature xfeature) { return fpu_kernel_cfg.max_features & BIT_ULL(xfeature); } /* * Record the offsets and sizes of various xstates contained * in the XSAVE state memory layout. */ static void __init setup_xstate_cache(void) { u32 eax, ebx, ecx, edx, i; /* start at the beginning of the "extended state" */ unsigned int last_good_offset = offsetof(struct xregs_state, extended_state_area); /* * The FP xstates and SSE xstates are legacy states. They are always * in the fixed offsets in the xsave area in either compacted form * or standard form. */ xstate_offsets[XFEATURE_FP] = 0; xstate_sizes[XFEATURE_FP] = offsetof(struct fxregs_state, xmm_space); xstate_offsets[XFEATURE_SSE] = xstate_sizes[XFEATURE_FP]; xstate_sizes[XFEATURE_SSE] = sizeof_field(struct fxregs_state, xmm_space); for_each_extended_xfeature(i, fpu_kernel_cfg.max_features) { cpuid_count(XSTATE_CPUID, i, &eax, &ebx, &ecx, &edx); xstate_sizes[i] = eax; xstate_flags[i] = ecx; /* * If an xfeature is supervisor state, the offset in EBX is * invalid, leave it to -1. */ if (xfeature_is_supervisor(i)) continue; xstate_offsets[i] = ebx; /* * In our xstate size checks, we assume that the highest-numbered * xstate feature has the highest offset in the buffer. Ensure * it does. */ WARN_ONCE(last_good_offset > xstate_offsets[i], "x86/fpu: misordered xstate at %d\n", last_good_offset); last_good_offset = xstate_offsets[i]; } } static void __init print_xstate_feature(u64 xstate_mask) { const char *feature_name; if (cpu_has_xfeatures(xstate_mask, &feature_name)) pr_info("x86/fpu: Supporting XSAVE feature 0x%03Lx: '%s'\n", xstate_mask, feature_name); } /* * Print out all the supported xstate features: */ static void __init print_xstate_features(void) { print_xstate_feature(XFEATURE_MASK_FP); print_xstate_feature(XFEATURE_MASK_SSE); print_xstate_feature(XFEATURE_MASK_YMM); print_xstate_feature(XFEATURE_MASK_BNDREGS); print_xstate_feature(XFEATURE_MASK_BNDCSR); print_xstate_feature(XFEATURE_MASK_OPMASK); print_xstate_feature(XFEATURE_MASK_ZMM_Hi256); print_xstate_feature(XFEATURE_MASK_Hi16_ZMM); print_xstate_feature(XFEATURE_MASK_PKRU); print_xstate_feature(XFEATURE_MASK_PASID); print_xstate_feature(XFEATURE_MASK_CET_USER); print_xstate_feature(XFEATURE_MASK_XTILE_CFG); print_xstate_feature(XFEATURE_MASK_XTILE_DATA); } /* * This check is important because it is easy to get XSTATE_* * confused with XSTATE_BIT_*. */ #define CHECK_XFEATURE(nr) do { \ WARN_ON(nr < FIRST_EXTENDED_XFEATURE); \ WARN_ON(nr >= XFEATURE_MAX); \ } while (0) /* * Print out xstate component offsets and sizes */ static void __init print_xstate_offset_size(void) { int i; for_each_extended_xfeature(i, fpu_kernel_cfg.max_features) { pr_info("x86/fpu: xstate_offset[%d]: %4d, xstate_sizes[%d]: %4d\n", i, xfeature_get_offset(fpu_kernel_cfg.max_features, i), i, xstate_sizes[i]); } } /* * This function is called only during boot time when x86 caps are not set * up and alternative can not be used yet. */ static __init void os_xrstor_booting(struct xregs_state *xstate) { u64 mask = fpu_kernel_cfg.max_features & XFEATURE_MASK_FPSTATE; u32 lmask = mask; u32 hmask = mask >> 32; int err; if (cpu_feature_enabled(X86_FEATURE_XSAVES)) XSTATE_OP(XRSTORS, xstate, lmask, hmask, err); else XSTATE_OP(XRSTOR, xstate, lmask, hmask, err); /* * We should never fault when copying from a kernel buffer, and the FPU * state we set at boot time should be valid. */ WARN_ON_FPU(err); } /* * All supported features have either init state all zeros or are * handled in setup_init_fpu() individually. This is an explicit * feature list and does not use XFEATURE_MASK*SUPPORTED to catch * newly added supported features at build time and make people * actually look at the init state for the new feature. */ #define XFEATURES_INIT_FPSTATE_HANDLED \ (XFEATURE_MASK_FP | \ XFEATURE_MASK_SSE | \ XFEATURE_MASK_YMM | \ XFEATURE_MASK_OPMASK | \ XFEATURE_MASK_ZMM_Hi256 | \ XFEATURE_MASK_Hi16_ZMM | \ XFEATURE_MASK_PKRU | \ XFEATURE_MASK_BNDREGS | \ XFEATURE_MASK_BNDCSR | \ XFEATURE_MASK_PASID | \ XFEATURE_MASK_CET_USER | \ XFEATURE_MASK_XTILE) /* * setup the xstate image representing the init state */ static void __init setup_init_fpu_buf(void) { BUILD_BUG_ON((XFEATURE_MASK_USER_SUPPORTED | XFEATURE_MASK_SUPERVISOR_SUPPORTED) != XFEATURES_INIT_FPSTATE_HANDLED); if (!boot_cpu_has(X86_FEATURE_XSAVE)) return; print_xstate_features(); xstate_init_xcomp_bv(&init_fpstate.regs.xsave, init_fpstate.xfeatures); /* * Init all the features state with header.xfeatures being 0x0 */ os_xrstor_booting(&init_fpstate.regs.xsave); /* * All components are now in init state. Read the state back so * that init_fpstate contains all non-zero init state. This only * works with XSAVE, but not with XSAVEOPT and XSAVEC/S because * those use the init optimization which skips writing data for * components in init state. * * XSAVE could be used, but that would require to reshuffle the * data when XSAVEC/S is available because XSAVEC/S uses xstate * compaction. But doing so is a pointless exercise because most * components have an all zeros init state except for the legacy * ones (FP and SSE). Those can be saved with FXSAVE into the * legacy area. Adding new features requires to ensure that init * state is all zeroes or if not to add the necessary handling * here. */ fxsave(&init_fpstate.regs.fxsave); } int xfeature_size(int xfeature_nr) { u32 eax, ebx, ecx, edx; CHECK_XFEATURE(xfeature_nr); cpuid_count(XSTATE_CPUID, xfeature_nr, &eax, &ebx, &ecx, &edx); return eax; } /* Validate an xstate header supplied by userspace (ptrace or sigreturn) */ static int validate_user_xstate_header(const struct xstate_header *hdr, struct fpstate *fpstate) { /* No unknown or supervisor features may be set */ if (hdr->xfeatures & ~fpstate->user_xfeatures) return -EINVAL; /* Userspace must use the uncompacted format */ if (hdr->xcomp_bv) return -EINVAL; /* * If 'reserved' is shrunken to add a new field, make sure to validate * that new field here! */ BUILD_BUG_ON(sizeof(hdr->reserved) != 48); /* No reserved bits may be set */ if (memchr_inv(hdr->reserved, 0, sizeof(hdr->reserved))) return -EINVAL; return 0; } static void __init __xstate_dump_leaves(void) { int i; u32 eax, ebx, ecx, edx; static int should_dump = 1; if (!should_dump) return; should_dump = 0; /* * Dump out a few leaves past the ones that we support * just in case there are some goodies up there */ for (i = 0; i < XFEATURE_MAX + 10; i++) { cpuid_count(XSTATE_CPUID, i, &eax, &ebx, &ecx, &edx); pr_warn("CPUID[%02x, %02x]: eax=%08x ebx=%08x ecx=%08x edx=%08x\n", XSTATE_CPUID, i, eax, ebx, ecx, edx); } } #define XSTATE_WARN_ON(x, fmt, ...) do { \ if (WARN_ONCE(x, "XSAVE consistency problem: " fmt, ##__VA_ARGS__)) { \ __xstate_dump_leaves(); \ } \ } while (0) #define XCHECK_SZ(sz, nr, __struct) ({ \ if (WARN_ONCE(sz != sizeof(__struct), \ "[%s]: struct is %zu bytes, cpu state %d bytes\n", \ xfeature_names[nr], sizeof(__struct), sz)) { \ __xstate_dump_leaves(); \ } \ true; \ }) /** * check_xtile_data_against_struct - Check tile data state size. * * Calculate the state size by multiplying the single tile size which is * recorded in a C struct, and the number of tiles that the CPU informs. * Compare the provided size with the calculation. * * @size: The tile data state size * * Returns: 0 on success, -EINVAL on mismatch. */ static int __init check_xtile_data_against_struct(int size) { u32 max_palid, palid, state_size; u32 eax, ebx, ecx, edx; u16 max_tile; /* * Check the maximum palette id: * eax: the highest numbered palette subleaf. */ cpuid_count(TILE_CPUID, 0, &max_palid, &ebx, &ecx, &edx); /* * Cross-check each tile size and find the maximum number of * supported tiles. */ for (palid = 1, max_tile = 0; palid <= max_palid; palid++) { u16 tile_size, max; /* * Check the tile size info: * eax[31:16]: bytes per title * ebx[31:16]: the max names (or max number of tiles) */ cpuid_count(TILE_CPUID, palid, &eax, &ebx, &edx, &edx); tile_size = eax >> 16; max = ebx >> 16; if (tile_size != sizeof(struct xtile_data)) { pr_err("%s: struct is %zu bytes, cpu xtile %d bytes\n", __stringify(XFEATURE_XTILE_DATA), sizeof(struct xtile_data), tile_size); __xstate_dump_leaves(); return -EINVAL; } if (max > max_tile) max_tile = max; } state_size = sizeof(struct xtile_data) * max_tile; if (size != state_size) { pr_err("%s: calculated size is %u bytes, cpu state %d bytes\n", __stringify(XFEATURE_XTILE_DATA), state_size, size); __xstate_dump_leaves(); return -EINVAL; } return 0; } /* * We have a C struct for each 'xstate'. We need to ensure * that our software representation matches what the CPU * tells us about the state's size. */ static bool __init check_xstate_against_struct(int nr) { /* * Ask the CPU for the size of the state. */ int sz = xfeature_size(nr); /* * Match each CPU state with the corresponding software * structure. */ switch (nr) { case XFEATURE_YMM: return XCHECK_SZ(sz, nr, struct ymmh_struct); case XFEATURE_BNDREGS: return XCHECK_SZ(sz, nr, struct mpx_bndreg_state); case XFEATURE_BNDCSR: return XCHECK_SZ(sz, nr, struct mpx_bndcsr_state); case XFEATURE_OPMASK: return XCHECK_SZ(sz, nr, struct avx_512_opmask_state); case XFEATURE_ZMM_Hi256: return XCHECK_SZ(sz, nr, struct avx_512_zmm_uppers_state); case XFEATURE_Hi16_ZMM: return XCHECK_SZ(sz, nr, struct avx_512_hi16_state); case XFEATURE_PKRU: return XCHECK_SZ(sz, nr, struct pkru_state); case XFEATURE_PASID: return XCHECK_SZ(sz, nr, struct ia32_pasid_state); case XFEATURE_XTILE_CFG: return XCHECK_SZ(sz, nr, struct xtile_cfg); case XFEATURE_CET_USER: return XCHECK_SZ(sz, nr, struct cet_user_state); case XFEATURE_XTILE_DATA: check_xtile_data_against_struct(sz); return true; default: XSTATE_WARN_ON(1, "No structure for xstate: %d\n", nr); return false; } return true; } static unsigned int xstate_calculate_size(u64 xfeatures, bool compacted) { unsigned int topmost = fls64(xfeatures) - 1; unsigned int offset = xstate_offsets[topmost]; if (topmost <= XFEATURE_SSE) return sizeof(struct xregs_state); if (compacted) offset = xfeature_get_offset(xfeatures, topmost); return offset + xstate_sizes[topmost]; } /* * This essentially double-checks what the cpu told us about * how large the XSAVE buffer needs to be. We are recalculating * it to be safe. * * Independent XSAVE features allocate their own buffers and are not * covered by these checks. Only the size of the buffer for task->fpu * is checked here. */ static bool __init paranoid_xstate_size_valid(unsigned int kernel_size) { bool compacted = cpu_feature_enabled(X86_FEATURE_XCOMPACTED); bool xsaves = cpu_feature_enabled(X86_FEATURE_XSAVES); unsigned int size = FXSAVE_SIZE + XSAVE_HDR_SIZE; int i; for_each_extended_xfeature(i, fpu_kernel_cfg.max_features) { if (!check_xstate_against_struct(i)) return false; /* * Supervisor state components can be managed only by * XSAVES. */ if (!xsaves && xfeature_is_supervisor(i)) { XSTATE_WARN_ON(1, "Got supervisor feature %d, but XSAVES not advertised\n", i); return false; } } size = xstate_calculate_size(fpu_kernel_cfg.max_features, compacted); XSTATE_WARN_ON(size != kernel_size, "size %u != kernel_size %u\n", size, kernel_size); return size == kernel_size; } /* * Get total size of enabled xstates in XCR0 | IA32_XSS. * * Note the SDM's wording here. "sub-function 0" only enumerates * the size of the *user* states. If we use it to size a buffer * that we use 'XSAVES' on, we could potentially overflow the * buffer because 'XSAVES' saves system states too. * * This also takes compaction into account. So this works for * XSAVEC as well. */ static unsigned int __init get_compacted_size(void) { unsigned int eax, ebx, ecx, edx; /* * - CPUID function 0DH, sub-function 1: * EBX enumerates the size (in bytes) required by * the XSAVES instruction for an XSAVE area * containing all the state components * corresponding to bits currently set in * XCR0 | IA32_XSS. * * When XSAVES is not available but XSAVEC is (virt), then there * are no supervisor states, but XSAVEC still uses compacted * format. */ cpuid_count(XSTATE_CPUID, 1, &eax, &ebx, &ecx, &edx); return ebx; } /* * Get the total size of the enabled xstates without the independent supervisor * features. */ static unsigned int __init get_xsave_compacted_size(void) { u64 mask = xfeatures_mask_independent(); unsigned int size; if (!mask) return get_compacted_size(); /* Disable independent features. */ wrmsrl(MSR_IA32_XSS, xfeatures_mask_supervisor()); /* * Ask the hardware what size is required of the buffer. * This is the size required for the task->fpu buffer. */ size = get_compacted_size(); /* Re-enable independent features so XSAVES will work on them again. */ wrmsrl(MSR_IA32_XSS, xfeatures_mask_supervisor() | mask); return size; } static unsigned int __init get_xsave_size_user(void) { unsigned int eax, ebx, ecx, edx; /* * - CPUID function 0DH, sub-function 0: * EBX enumerates the size (in bytes) required by * the XSAVE instruction for an XSAVE area * containing all the *user* state components * corresponding to bits currently set in XCR0. */ cpuid_count(XSTATE_CPUID, 0, &eax, &ebx, &ecx, &edx); return ebx; } static int __init init_xstate_size(void) { /* Recompute the context size for enabled features: */ unsigned int user_size, kernel_size, kernel_default_size; bool compacted = cpu_feature_enabled(X86_FEATURE_XCOMPACTED); /* Uncompacted user space size */ user_size = get_xsave_size_user(); /* * XSAVES kernel size includes supervisor states and uses compacted * format. XSAVEC uses compacted format, but does not save * supervisor states. * * XSAVE[OPT] do not support supervisor states so kernel and user * size is identical. */ if (compacted) kernel_size = get_xsave_compacted_size(); else kernel_size = user_size; kernel_default_size = xstate_calculate_size(fpu_kernel_cfg.default_features, compacted); if (!paranoid_xstate_size_valid(kernel_size)) return -EINVAL; fpu_kernel_cfg.max_size = kernel_size; fpu_user_cfg.max_size = user_size; fpu_kernel_cfg.default_size = kernel_default_size; fpu_user_cfg.default_size = xstate_calculate_size(fpu_user_cfg.default_features, false); return 0; } /* * We enabled the XSAVE hardware, but something went wrong and * we can not use it. Disable it. */ static void __init fpu__init_disable_system_xstate(unsigned int legacy_size) { fpu_kernel_cfg.max_features = 0; cr4_clear_bits(X86_CR4_OSXSAVE); setup_clear_cpu_cap(X86_FEATURE_XSAVE); /* Restore the legacy size.*/ fpu_kernel_cfg.max_size = legacy_size; fpu_kernel_cfg.default_size = legacy_size; fpu_user_cfg.max_size = legacy_size; fpu_user_cfg.default_size = legacy_size; /* * Prevent enabling the static branch which enables writes to the * XFD MSR. */ init_fpstate.xfd = 0; fpstate_reset(&current->thread.fpu); } /* * Enable and initialize the xsave feature. * Called once per system bootup. */ void __init fpu__init_system_xstate(unsigned int legacy_size) { unsigned int eax, ebx, ecx, edx; u64 xfeatures; int err; int i; if (!boot_cpu_has(X86_FEATURE_FPU)) { pr_info("x86/fpu: No FPU detected\n"); return; } if (!boot_cpu_has(X86_FEATURE_XSAVE)) { pr_info("x86/fpu: x87 FPU will use %s\n", boot_cpu_has(X86_FEATURE_FXSR) ? "FXSAVE" : "FSAVE"); return; } if (boot_cpu_data.cpuid_level < XSTATE_CPUID) { WARN_ON_FPU(1); return; } /* * Find user xstates supported by the processor. */ cpuid_count(XSTATE_CPUID, 0, &eax, &ebx, &ecx, &edx); fpu_kernel_cfg.max_features = eax + ((u64)edx << 32); /* * Find supervisor xstates supported by the processor. */ cpuid_count(XSTATE_CPUID, 1, &eax, &ebx, &ecx, &edx); fpu_kernel_cfg.max_features |= ecx + ((u64)edx << 32); if ((fpu_kernel_cfg.max_features & XFEATURE_MASK_FPSSE) != XFEATURE_MASK_FPSSE) { /* * This indicates that something really unexpected happened * with the enumeration. Disable XSAVE and try to continue * booting without it. This is too early to BUG(). */ pr_err("x86/fpu: FP/SSE not present amongst the CPU's xstate features: 0x%llx.\n", fpu_kernel_cfg.max_features); goto out_disable; } /* * Clear XSAVE features that are disabled in the normal CPUID. */ for (i = 0; i < ARRAY_SIZE(xsave_cpuid_features); i++) { unsigned short cid = xsave_cpuid_features[i]; /* Careful: X86_FEATURE_FPU is 0! */ if ((i != XFEATURE_FP && !cid) || !boot_cpu_has(cid)) fpu_kernel_cfg.max_features &= ~BIT_ULL(i); } if (!cpu_feature_enabled(X86_FEATURE_XFD)) fpu_kernel_cfg.max_features &= ~XFEATURE_MASK_USER_DYNAMIC; if (!cpu_feature_enabled(X86_FEATURE_XSAVES)) fpu_kernel_cfg.max_features &= XFEATURE_MASK_USER_SUPPORTED; else fpu_kernel_cfg.max_features &= XFEATURE_MASK_USER_SUPPORTED | XFEATURE_MASK_SUPERVISOR_SUPPORTED; fpu_user_cfg.max_features = fpu_kernel_cfg.max_features; fpu_user_cfg.max_features &= XFEATURE_MASK_USER_SUPPORTED; /* Clean out dynamic features from default */ fpu_kernel_cfg.default_features = fpu_kernel_cfg.max_features; fpu_kernel_cfg.default_features &= ~XFEATURE_MASK_USER_DYNAMIC; fpu_user_cfg.default_features = fpu_user_cfg.max_features; fpu_user_cfg.default_features &= ~XFEATURE_MASK_USER_DYNAMIC; /* Store it for paranoia check at the end */ xfeatures = fpu_kernel_cfg.max_features; /* * Initialize the default XFD state in initfp_state and enable the * dynamic sizing mechanism if dynamic states are available. The * static key cannot be enabled here because this runs before * jump_label_init(). This is delayed to an initcall. */ init_fpstate.xfd = fpu_user_cfg.max_features & XFEATURE_MASK_USER_DYNAMIC; /* Set up compaction feature bit */ if (cpu_feature_enabled(X86_FEATURE_XSAVEC) || cpu_feature_enabled(X86_FEATURE_XSAVES)) setup_force_cpu_cap(X86_FEATURE_XCOMPACTED); /* Enable xstate instructions to be able to continue with initialization: */ fpu__init_cpu_xstate(); /* Cache size, offset and flags for initialization */ setup_xstate_cache(); err = init_xstate_size(); if (err) goto out_disable; /* Reset the state for the current task */ fpstate_reset(&current->thread.fpu); /* * Update info used for ptrace frames; use standard-format size and no * supervisor xstates: */ update_regset_xstate_info(fpu_user_cfg.max_size, fpu_user_cfg.max_features); /* * init_fpstate excludes dynamic states as they are large but init * state is zero. */ init_fpstate.size = fpu_kernel_cfg.default_size; init_fpstate.xfeatures = fpu_kernel_cfg.default_features; if (init_fpstate.size > sizeof(init_fpstate.regs)) { pr_warn("x86/fpu: init_fpstate buffer too small (%zu < %d), disabling XSAVE\n", sizeof(init_fpstate.regs), init_fpstate.size); goto out_disable; } setup_init_fpu_buf(); /* * Paranoia check whether something in the setup modified the * xfeatures mask. */ if (xfeatures != fpu_kernel_cfg.max_features) { pr_err("x86/fpu: xfeatures modified from 0x%016llx to 0x%016llx during init, disabling XSAVE\n", xfeatures, fpu_kernel_cfg.max_features); goto out_disable; } /* * CPU capabilities initialization runs before FPU init. So * X86_FEATURE_OSXSAVE is not set. Now that XSAVE is completely * functional, set the feature bit so depending code works. */ setup_force_cpu_cap(X86_FEATURE_OSXSAVE); print_xstate_offset_size(); pr_info("x86/fpu: Enabled xstate features 0x%llx, context size is %d bytes, using '%s' format.\n", fpu_kernel_cfg.max_features, fpu_kernel_cfg.max_size, boot_cpu_has(X86_FEATURE_XCOMPACTED) ? "compacted" : "standard"); return; out_disable: /* something went wrong, try to boot without any XSAVE support */ fpu__init_disable_system_xstate(legacy_size); } /* * Restore minimal FPU state after suspend: */ void fpu__resume_cpu(void) { /* * Restore XCR0 on xsave capable CPUs: */ if (cpu_feature_enabled(X86_FEATURE_XSAVE)) xsetbv(XCR_XFEATURE_ENABLED_MASK, fpu_user_cfg.max_features); /* * Restore IA32_XSS. The same CPUID bit enumerates support * of XSAVES and MSR_IA32_XSS. */ if (cpu_feature_enabled(X86_FEATURE_XSAVES)) { wrmsrl(MSR_IA32_XSS, xfeatures_mask_supervisor() | xfeatures_mask_independent()); } if (fpu_state_size_dynamic()) wrmsrl(MSR_IA32_XFD, current->thread.fpu.fpstate->xfd); } /* * Given an xstate feature nr, calculate where in the xsave * buffer the state is. Callers should ensure that the buffer * is valid. */ static void *__raw_xsave_addr(struct xregs_state *xsave, int xfeature_nr) { u64 xcomp_bv = xsave->header.xcomp_bv; if (WARN_ON_ONCE(!xfeature_enabled(xfeature_nr))) return NULL; if (cpu_feature_enabled(X86_FEATURE_XCOMPACTED)) { if (WARN_ON_ONCE(!(xcomp_bv & BIT_ULL(xfeature_nr)))) return NULL; } return (void *)xsave + xfeature_get_offset(xcomp_bv, xfeature_nr); } /* * Given the xsave area and a state inside, this function returns the * address of the state. * * This is the API that is called to get xstate address in either * standard format or compacted format of xsave area. * * Note that if there is no data for the field in the xsave buffer * this will return NULL. * * Inputs: * xstate: the thread's storage area for all FPU data * xfeature_nr: state which is defined in xsave.h (e.g. XFEATURE_FP, * XFEATURE_SSE, etc...) * Output: * address of the state in the xsave area, or NULL if the * field is not present in the xsave buffer. */ void *get_xsave_addr(struct xregs_state *xsave, int xfeature_nr) { /* * Do we even *have* xsave state? */ if (!boot_cpu_has(X86_FEATURE_XSAVE)) return NULL; /* * We should not ever be requesting features that we * have not enabled. */ if (WARN_ON_ONCE(!xfeature_enabled(xfeature_nr))) return NULL; /* * This assumes the last 'xsave*' instruction to * have requested that 'xfeature_nr' be saved. * If it did not, we might be seeing and old value * of the field in the buffer. * * This can happen because the last 'xsave' did not * request that this feature be saved (unlikely) * or because the "init optimization" caused it * to not be saved. */ if (!(xsave->header.xfeatures & BIT_ULL(xfeature_nr))) return NULL; return __raw_xsave_addr(xsave, xfeature_nr); } #ifdef CONFIG_ARCH_HAS_PKEYS /* * This will go out and modify PKRU register to set the access * rights for @pkey to @init_val. */ int arch_set_user_pkey_access(struct task_struct *tsk, int pkey, unsigned long init_val) { u32 old_pkru, new_pkru_bits = 0; int pkey_shift; /* * This check implies XSAVE support. OSPKE only gets * set if we enable XSAVE and we enable PKU in XCR0. */ if (!cpu_feature_enabled(X86_FEATURE_OSPKE)) return -EINVAL; /* * This code should only be called with valid 'pkey' * values originating from in-kernel users. Complain * if a bad value is observed. */ if (WARN_ON_ONCE(pkey >= arch_max_pkey())) return -EINVAL; /* Set the bits we need in PKRU: */ if (init_val & PKEY_DISABLE_ACCESS) new_pkru_bits |= PKRU_AD_BIT; if (init_val & PKEY_DISABLE_WRITE) new_pkru_bits |= PKRU_WD_BIT; /* Shift the bits in to the correct place in PKRU for pkey: */ pkey_shift = pkey * PKRU_BITS_PER_PKEY; new_pkru_bits <<= pkey_shift; /* Get old PKRU and mask off any old bits in place: */ old_pkru = read_pkru(); old_pkru &= ~((PKRU_AD_BIT|PKRU_WD_BIT) << pkey_shift); /* Write old part along with new part: */ write_pkru(old_pkru | new_pkru_bits); return 0; } #endif /* ! CONFIG_ARCH_HAS_PKEYS */ static void copy_feature(bool from_xstate, struct membuf *to, void *xstate, void *init_xstate, unsigned int size) { membuf_write(to, from_xstate ? xstate : init_xstate, size); } /** * __copy_xstate_to_uabi_buf - Copy kernel saved xstate to a UABI buffer * @to: membuf descriptor * @fpstate: The fpstate buffer from which to copy * @pkru_val: The PKRU value to store in the PKRU component * @copy_mode: The requested copy mode * * Converts from kernel XSAVE or XSAVES compacted format to UABI conforming * format, i.e. from the kernel internal hardware dependent storage format * to the requested @mode. UABI XSTATE is always uncompacted! * * It supports partial copy but @to.pos always starts from zero. */ void __copy_xstate_to_uabi_buf(struct membuf to, struct fpstate *fpstate, u32 pkru_val, enum xstate_copy_mode copy_mode) { const unsigned int off_mxcsr = offsetof(struct fxregs_state, mxcsr); struct xregs_state *xinit = &init_fpstate.regs.xsave; struct xregs_state *xsave = &fpstate->regs.xsave; struct xstate_header header; unsigned int zerofrom; u64 mask; int i; memset(&header, 0, sizeof(header)); header.xfeatures = xsave->header.xfeatures; /* Mask out the feature bits depending on copy mode */ switch (copy_mode) { case XSTATE_COPY_FP: header.xfeatures &= XFEATURE_MASK_FP; break; case XSTATE_COPY_FX: header.xfeatures &= XFEATURE_MASK_FP | XFEATURE_MASK_SSE; break; case XSTATE_COPY_XSAVE: header.xfeatures &= fpstate->user_xfeatures; break; } /* Copy FP state up to MXCSR */ copy_feature(header.xfeatures & XFEATURE_MASK_FP, &to, &xsave->i387, &xinit->i387, off_mxcsr); /* Copy MXCSR when SSE or YMM are set in the feature mask */ copy_feature(header.xfeatures & (XFEATURE_MASK_SSE | XFEATURE_MASK_YMM), &to, &xsave->i387.mxcsr, &xinit->i387.mxcsr, MXCSR_AND_FLAGS_SIZE); /* Copy the remaining FP state */ copy_feature(header.xfeatures & XFEATURE_MASK_FP, &to, &xsave->i387.st_space, &xinit->i387.st_space, sizeof(xsave->i387.st_space)); /* Copy the SSE state - shared with YMM, but independently managed */ copy_feature(header.xfeatures & XFEATURE_MASK_SSE, &to, &xsave->i387.xmm_space, &xinit->i387.xmm_space, sizeof(xsave->i387.xmm_space)); if (copy_mode != XSTATE_COPY_XSAVE) goto out; /* Zero the padding area */ membuf_zero(&to, sizeof(xsave->i387.padding)); /* Copy xsave->i387.sw_reserved */ membuf_write(&to, xstate_fx_sw_bytes, sizeof(xsave->i387.sw_reserved)); /* Copy the user space relevant state of @xsave->header */ membuf_write(&to, &header, sizeof(header)); zerofrom = offsetof(struct xregs_state, extended_state_area); /* * This 'mask' indicates which states to copy from fpstate. * Those extended states that are not present in fpstate are * either disabled or initialized: * * In non-compacted format, disabled features still occupy * state space but there is no state to copy from in the * compacted init_fpstate. The gap tracking will zero these * states. * * The extended features have an all zeroes init state. Thus, * remove them from 'mask' to zero those features in the user * buffer instead of retrieving them from init_fpstate. */ mask = header.xfeatures; for_each_extended_xfeature(i, mask) { /* * If there was a feature or alignment gap, zero the space * in the destination buffer. */ if (zerofrom < xstate_offsets[i]) membuf_zero(&to, xstate_offsets[i] - zerofrom); if (i == XFEATURE_PKRU) { struct pkru_state pkru = {0}; /* * PKRU is not necessarily up to date in the * XSAVE buffer. Use the provided value. */ pkru.pkru = pkru_val; membuf_write(&to, &pkru, sizeof(pkru)); } else { membuf_write(&to, __raw_xsave_addr(xsave, i), xstate_sizes[i]); } /* * Keep track of the last copied state in the non-compacted * target buffer for gap zeroing. */ zerofrom = xstate_offsets[i] + xstate_sizes[i]; } out: if (to.left) membuf_zero(&to, to.left); } /** * copy_xstate_to_uabi_buf - Copy kernel saved xstate to a UABI buffer * @to: membuf descriptor * @tsk: The task from which to copy the saved xstate * @copy_mode: The requested copy mode * * Converts from kernel XSAVE or XSAVES compacted format to UABI conforming * format, i.e. from the kernel internal hardware dependent storage format * to the requested @mode. UABI XSTATE is always uncompacted! * * It supports partial copy but @to.pos always starts from zero. */ void copy_xstate_to_uabi_buf(struct membuf to, struct task_struct *tsk, enum xstate_copy_mode copy_mode) { __copy_xstate_to_uabi_buf(to, tsk->thread.fpu.fpstate, tsk->thread.pkru, copy_mode); } static int copy_from_buffer(void *dst, unsigned int offset, unsigned int size, const void *kbuf, const void __user *ubuf) { if (kbuf) { memcpy(dst, kbuf + offset, size); } else { if (copy_from_user(dst, ubuf + offset, size)) return -EFAULT; } return 0; } /** * copy_uabi_to_xstate - Copy a UABI format buffer to the kernel xstate * @fpstate: The fpstate buffer to copy to * @kbuf: The UABI format buffer, if it comes from the kernel * @ubuf: The UABI format buffer, if it comes from userspace * @pkru: The location to write the PKRU value to * * Converts from the UABI format into the kernel internal hardware * dependent format. * * This function ultimately has three different callers with distinct PKRU * behavior. * 1. When called from sigreturn the PKRU register will be restored from * @fpstate via an XRSTOR. Correctly copying the UABI format buffer to * @fpstate is sufficient to cover this case, but the caller will also * pass a pointer to the thread_struct's pkru field in @pkru and updating * it is harmless. * 2. When called from ptrace the PKRU register will be restored from the * thread_struct's pkru field. A pointer to that is passed in @pkru. * The kernel will restore it manually, so the XRSTOR behavior that resets * the PKRU register to the hardware init value (0) if the corresponding * xfeatures bit is not set is emulated here. * 3. When called from KVM the PKRU register will be restored from the vcpu's * pkru field. A pointer to that is passed in @pkru. KVM hasn't used * XRSTOR and hasn't had the PKRU resetting behavior described above. To * preserve that KVM behavior, it passes NULL for @pkru if the xfeatures * bit is not set. */ static int copy_uabi_to_xstate(struct fpstate *fpstate, const void *kbuf, const void __user *ubuf, u32 *pkru) { struct xregs_state *xsave = &fpstate->regs.xsave; unsigned int offset, size; struct xstate_header hdr; u64 mask; int i; offset = offsetof(struct xregs_state, header); if (copy_from_buffer(&hdr, offset, sizeof(hdr), kbuf, ubuf)) return -EFAULT; if (validate_user_xstate_header(&hdr, fpstate)) return -EINVAL; /* Validate MXCSR when any of the related features is in use */ mask = XFEATURE_MASK_FP | XFEATURE_MASK_SSE | XFEATURE_MASK_YMM; if (hdr.xfeatures & mask) { u32 mxcsr[2]; offset = offsetof(struct fxregs_state, mxcsr); if (copy_from_buffer(mxcsr, offset, sizeof(mxcsr), kbuf, ubuf)) return -EFAULT; /* Reserved bits in MXCSR must be zero. */ if (mxcsr[0] & ~mxcsr_feature_mask) return -EINVAL; /* SSE and YMM require MXCSR even when FP is not in use. */ if (!(hdr.xfeatures & XFEATURE_MASK_FP)) { xsave->i387.mxcsr = mxcsr[0]; xsave->i387.mxcsr_mask = mxcsr[1]; } } for (i = 0; i < XFEATURE_MAX; i++) { mask = BIT_ULL(i); if (hdr.xfeatures & mask) { void *dst = __raw_xsave_addr(xsave, i); offset = xstate_offsets[i]; size = xstate_sizes[i]; if (copy_from_buffer(dst, offset, size, kbuf, ubuf)) return -EFAULT; } } if (hdr.xfeatures & XFEATURE_MASK_PKRU) { struct pkru_state *xpkru; xpkru = __raw_xsave_addr(xsave, XFEATURE_PKRU); *pkru = xpkru->pkru; } else { /* * KVM may pass NULL here to indicate that it does not need * PKRU updated. */ if (pkru) *pkru = 0; } /* * The state that came in from userspace was user-state only. * Mask all the user states out of 'xfeatures': */ xsave->header.xfeatures &= XFEATURE_MASK_SUPERVISOR_ALL; /* * Add back in the features that came in from userspace: */ xsave->header.xfeatures |= hdr.xfeatures; return 0; } /* * Convert from a ptrace standard-format kernel buffer to kernel XSAVE[S] * format and copy to the target thread. Used by ptrace and KVM. */ int copy_uabi_from_kernel_to_xstate(struct fpstate *fpstate, const void *kbuf, u32 *pkru) { return copy_uabi_to_xstate(fpstate, kbuf, NULL, pkru); } /* * Convert from a sigreturn standard-format user-space buffer to kernel * XSAVE[S] format and copy to the target thread. This is called from the * sigreturn() and rt_sigreturn() system calls. */ int copy_sigframe_from_user_to_xstate(struct task_struct *tsk, const void __user *ubuf) { return copy_uabi_to_xstate(tsk->thread.fpu.fpstate, NULL, ubuf, &tsk->thread.pkru); } static bool validate_independent_components(u64 mask) { u64 xchk; if (WARN_ON_FPU(!cpu_feature_enabled(X86_FEATURE_XSAVES))) return false; xchk = ~xfeatures_mask_independent(); if (WARN_ON_ONCE(!mask || mask & xchk)) return false; return true; } /** * xsaves - Save selected components to a kernel xstate buffer * @xstate: Pointer to the buffer * @mask: Feature mask to select the components to save * * The @xstate buffer must be 64 byte aligned and correctly initialized as * XSAVES does not write the full xstate header. Before first use the * buffer should be zeroed otherwise a consecutive XRSTORS from that buffer * can #GP. * * The feature mask must be a subset of the independent features. */ void xsaves(struct xregs_state *xstate, u64 mask) { int err; if (!validate_independent_components(mask)) return; XSTATE_OP(XSAVES, xstate, (u32)mask, (u32)(mask >> 32), err); WARN_ON_ONCE(err); } /** * xrstors - Restore selected components from a kernel xstate buffer * @xstate: Pointer to the buffer * @mask: Feature mask to select the components to restore * * The @xstate buffer must be 64 byte aligned and correctly initialized * otherwise XRSTORS from that buffer can #GP. * * Proper usage is to restore the state which was saved with * xsaves() into @xstate. * * The feature mask must be a subset of the independent features. */ void xrstors(struct xregs_state *xstate, u64 mask) { int err; if (!validate_independent_components(mask)) return; XSTATE_OP(XRSTORS, xstate, (u32)mask, (u32)(mask >> 32), err); WARN_ON_ONCE(err); } #if IS_ENABLED(CONFIG_KVM) void fpstate_clear_xstate_component(struct fpstate *fps, unsigned int xfeature) { void *addr = get_xsave_addr(&fps->regs.xsave, xfeature); if (addr) memset(addr, 0, xstate_sizes[xfeature]); } EXPORT_SYMBOL_GPL(fpstate_clear_xstate_component); #endif #ifdef CONFIG_X86_64 #ifdef CONFIG_X86_DEBUG_FPU /* * Ensure that a subsequent XSAVE* or XRSTOR* instruction with RFBM=@mask * can safely operate on the @fpstate buffer. */ static bool xstate_op_valid(struct fpstate *fpstate, u64 mask, bool rstor) { u64 xfd = __this_cpu_read(xfd_state); if (fpstate->xfd == xfd) return true; /* * The XFD MSR does not match fpstate->xfd. That's invalid when * the passed in fpstate is current's fpstate. */ if (fpstate->xfd == current->thread.fpu.fpstate->xfd) return false; /* * XRSTOR(S) from init_fpstate are always correct as it will just * bring all components into init state and not read from the * buffer. XSAVE(S) raises #PF after init. */ if (fpstate == &init_fpstate) return rstor; /* * XSAVE(S): clone(), fpu_swap_kvm_fpu() * XRSTORS(S): fpu_swap_kvm_fpu() */ /* * No XSAVE/XRSTOR instructions (except XSAVE itself) touch * the buffer area for XFD-disabled state components. */ mask &= ~xfd; /* * Remove features which are valid in fpstate. They * have space allocated in fpstate. */ mask &= ~fpstate->xfeatures; /* * Any remaining state components in 'mask' might be written * by XSAVE/XRSTOR. Fail validation it found. */ return !mask; } void xfd_validate_state(struct fpstate *fpstate, u64 mask, bool rstor) { WARN_ON_ONCE(!xstate_op_valid(fpstate, mask, rstor)); } #endif /* CONFIG_X86_DEBUG_FPU */ static int __init xfd_update_static_branch(void) { /* * If init_fpstate.xfd has bits set then dynamic features are * available and the dynamic sizing must be enabled. */ if (init_fpstate.xfd) static_branch_enable(&__fpu_state_size_dynamic); return 0; } arch_initcall(xfd_update_static_branch) void fpstate_free(struct fpu *fpu) { if (fpu->fpstate && fpu->fpstate != &fpu->__fpstate) vfree(fpu->fpstate); } /** * fpstate_realloc - Reallocate struct fpstate for the requested new features * * @xfeatures: A bitmap of xstate features which extend the enabled features * of that task * @ksize: The required size for the kernel buffer * @usize: The required size for user space buffers * @guest_fpu: Pointer to a guest FPU container. NULL for host allocations * * Note vs. vmalloc(): If the task with a vzalloc()-allocated buffer * terminates quickly, vfree()-induced IPIs may be a concern, but tasks * with large states are likely to live longer. * * Returns: 0 on success, -ENOMEM on allocation error. */ static int fpstate_realloc(u64 xfeatures, unsigned int ksize, unsigned int usize, struct fpu_guest *guest_fpu) { struct fpu *fpu = &current->thread.fpu; struct fpstate *curfps, *newfps = NULL; unsigned int fpsize; bool in_use; fpsize = ksize + ALIGN(offsetof(struct fpstate, regs), 64); newfps = vzalloc(fpsize); if (!newfps) return -ENOMEM; newfps->size = ksize; newfps->user_size = usize; newfps->is_valloc = true; /* * When a guest FPU is supplied, use @guest_fpu->fpstate * as reference independent whether it is in use or not. */ curfps = guest_fpu ? guest_fpu->fpstate : fpu->fpstate; /* Determine whether @curfps is the active fpstate */ in_use = fpu->fpstate == curfps; if (guest_fpu) { newfps->is_guest = true; newfps->is_confidential = curfps->is_confidential; newfps->in_use = curfps->in_use; guest_fpu->xfeatures |= xfeatures; guest_fpu->uabi_size = usize; } fpregs_lock(); /* * If @curfps is in use, ensure that the current state is in the * registers before swapping fpstate as that might invalidate it * due to layout changes. */ if (in_use && test_thread_flag(TIF_NEED_FPU_LOAD)) fpregs_restore_userregs(); newfps->xfeatures = curfps->xfeatures | xfeatures; if (!guest_fpu) newfps->user_xfeatures = curfps->user_xfeatures | xfeatures; newfps->xfd = curfps->xfd & ~xfeatures; /* Do the final updates within the locked region */ xstate_init_xcomp_bv(&newfps->regs.xsave, newfps->xfeatures); if (guest_fpu) { guest_fpu->fpstate = newfps; /* If curfps is active, update the FPU fpstate pointer */ if (in_use) fpu->fpstate = newfps; } else { fpu->fpstate = newfps; } if (in_use) xfd_update_state(fpu->fpstate); fpregs_unlock(); /* Only free valloc'ed state */ if (curfps && curfps->is_valloc) vfree(curfps); return 0; } static int validate_sigaltstack(unsigned int usize) { struct task_struct *thread, *leader = current->group_leader; unsigned long framesize = get_sigframe_size(); lockdep_assert_held(&current->sighand->siglock); /* get_sigframe_size() is based on fpu_user_cfg.max_size */ framesize -= fpu_user_cfg.max_size; framesize += usize; for_each_thread(leader, thread) { if (thread->sas_ss_size && thread->sas_ss_size < framesize) return -ENOSPC; } return 0; } static int __xstate_request_perm(u64 permitted, u64 requested, bool guest) { /* * This deliberately does not exclude !XSAVES as we still might * decide to optionally context switch XCR0 or talk the silicon * vendors into extending XFD for the pre AMX states, especially * AVX512. */ bool compacted = cpu_feature_enabled(X86_FEATURE_XCOMPACTED); struct fpu *fpu = &current->group_leader->thread.fpu; struct fpu_state_perm *perm; unsigned int ksize, usize; u64 mask; int ret = 0; /* Check whether fully enabled */ if ((permitted & requested) == requested) return 0; /* Calculate the resulting kernel state size */ mask = permitted | requested; /* Take supervisor states into account on the host */ if (!guest) mask |= xfeatures_mask_supervisor(); ksize = xstate_calculate_size(mask, compacted); /* Calculate the resulting user state size */ mask &= XFEATURE_MASK_USER_SUPPORTED; usize = xstate_calculate_size(mask, false); if (!guest) { ret = validate_sigaltstack(usize); if (ret) return ret; } perm = guest ? &fpu->guest_perm : &fpu->perm; /* Pairs with the READ_ONCE() in xstate_get_group_perm() */ WRITE_ONCE(perm->__state_perm, mask); /* Protected by sighand lock */ perm->__state_size = ksize; perm->__user_state_size = usize; return ret; } /* * Permissions array to map facilities with more than one component */ static const u64 xstate_prctl_req[XFEATURE_MAX] = { [XFEATURE_XTILE_DATA] = XFEATURE_MASK_XTILE_DATA, }; static int xstate_request_perm(unsigned long idx, bool guest) { u64 permitted, requested; int ret; if (idx >= XFEATURE_MAX) return -EINVAL; /* * Look up the facility mask which can require more than * one xstate component. */ idx = array_index_nospec(idx, ARRAY_SIZE(xstate_prctl_req)); requested = xstate_prctl_req[idx]; if (!requested) return -EOPNOTSUPP; if ((fpu_user_cfg.max_features & requested) != requested) return -EOPNOTSUPP; /* Lockless quick check */ permitted = xstate_get_group_perm(guest); if ((permitted & requested) == requested) return 0; /* Protect against concurrent modifications */ spin_lock_irq(&current->sighand->siglock); permitted = xstate_get_group_perm(guest); /* First vCPU allocation locks the permissions. */ if (guest && (permitted & FPU_GUEST_PERM_LOCKED)) ret = -EBUSY; else ret = __xstate_request_perm(permitted, requested, guest); spin_unlock_irq(&current->sighand->siglock); return ret; } int __xfd_enable_feature(u64 xfd_err, struct fpu_guest *guest_fpu) { u64 xfd_event = xfd_err & XFEATURE_MASK_USER_DYNAMIC; struct fpu_state_perm *perm; unsigned int ksize, usize; struct fpu *fpu; if (!xfd_event) { if (!guest_fpu) pr_err_once("XFD: Invalid xfd error: %016llx\n", xfd_err); return 0; } /* Protect against concurrent modifications */ spin_lock_irq(&current->sighand->siglock); /* If not permitted let it die */ if ((xstate_get_group_perm(!!guest_fpu) & xfd_event) != xfd_event) { spin_unlock_irq(&current->sighand->siglock); return -EPERM; } fpu = &current->group_leader->thread.fpu; perm = guest_fpu ? &fpu->guest_perm : &fpu->perm; ksize = perm->__state_size; usize = perm->__user_state_size; /* * The feature is permitted. State size is sufficient. Dropping * the lock is safe here even if more features are added from * another task, the retrieved buffer sizes are valid for the * currently requested feature(s). */ spin_unlock_irq(&current->sighand->siglock); /* * Try to allocate a new fpstate. If that fails there is no way * out. */ if (fpstate_realloc(xfd_event, ksize, usize, guest_fpu)) return -EFAULT; return 0; } int xfd_enable_feature(u64 xfd_err) { return __xfd_enable_feature(xfd_err, NULL); } #else /* CONFIG_X86_64 */ static inline int xstate_request_perm(unsigned long idx, bool guest) { return -EPERM; } #endif /* !CONFIG_X86_64 */ u64 xstate_get_guest_group_perm(void) { return xstate_get_group_perm(true); } EXPORT_SYMBOL_GPL(xstate_get_guest_group_perm); /** * fpu_xstate_prctl - xstate permission operations * @tsk: Redundant pointer to current * @option: A subfunction of arch_prctl() * @arg2: option argument * Return: 0 if successful; otherwise, an error code * * Option arguments: * * ARCH_GET_XCOMP_SUPP: Pointer to user space u64 to store the info * ARCH_GET_XCOMP_PERM: Pointer to user space u64 to store the info * ARCH_REQ_XCOMP_PERM: Facility number requested * * For facilities which require more than one XSTATE component, the request * must be the highest state component number related to that facility, * e.g. for AMX which requires XFEATURE_XTILE_CFG(17) and * XFEATURE_XTILE_DATA(18) this would be XFEATURE_XTILE_DATA(18). */ long fpu_xstate_prctl(int option, unsigned long arg2) { u64 __user *uptr = (u64 __user *)arg2; u64 permitted, supported; unsigned long idx = arg2; bool guest = false; switch (option) { case ARCH_GET_XCOMP_SUPP: supported = fpu_user_cfg.max_features | fpu_user_cfg.legacy_features; return put_user(supported, uptr); case ARCH_GET_XCOMP_PERM: /* * Lockless snapshot as it can also change right after the * dropping the lock. */ permitted = xstate_get_host_group_perm(); permitted &= XFEATURE_MASK_USER_SUPPORTED; return put_user(permitted, uptr); case ARCH_GET_XCOMP_GUEST_PERM: permitted = xstate_get_guest_group_perm(); permitted &= XFEATURE_MASK_USER_SUPPORTED; return put_user(permitted, uptr); case ARCH_REQ_XCOMP_GUEST_PERM: guest = true; fallthrough; case ARCH_REQ_XCOMP_PERM: if (!IS_ENABLED(CONFIG_X86_64)) return -EOPNOTSUPP; return xstate_request_perm(idx, guest); default: return -EINVAL; } } #ifdef CONFIG_PROC_PID_ARCH_STATUS /* * Report the amount of time elapsed in millisecond since last AVX512 * use in the task. */ static void avx512_status(struct seq_file *m, struct task_struct *task) { unsigned long timestamp = READ_ONCE(task->thread.fpu.avx512_timestamp); long delta; if (!timestamp) { /* * Report -1 if no AVX512 usage */ delta = -1; } else { delta = (long)(jiffies - timestamp); /* * Cap to LONG_MAX if time difference > LONG_MAX */ if (delta < 0) delta = LONG_MAX; delta = jiffies_to_msecs(delta); } seq_put_decimal_ll(m, "AVX512_elapsed_ms:\t", delta); seq_putc(m, '\n'); } /* * Report architecture specific information */ int proc_pid_arch_status(struct seq_file *m, struct pid_namespace *ns, struct pid *pid, struct task_struct *task) { /* * Report AVX512 state if the processor and build option supported. */ if (cpu_feature_enabled(X86_FEATURE_AVX512F)) avx512_status(m, task); return 0; } #endif /* CONFIG_PROC_PID_ARCH_STATUS */
linux-master
arch/x86/kernel/fpu/xstate.c
// SPDX-License-Identifier: GPL-2.0-only /* * x86 FPU boot time init code: */ #include <asm/fpu/api.h> #include <asm/tlbflush.h> #include <asm/setup.h> #include <linux/sched.h> #include <linux/sched/task.h> #include <linux/init.h> #include "internal.h" #include "legacy.h" #include "xstate.h" /* * Initialize the registers found in all CPUs, CR0 and CR4: */ static void fpu__init_cpu_generic(void) { unsigned long cr0; unsigned long cr4_mask = 0; if (boot_cpu_has(X86_FEATURE_FXSR)) cr4_mask |= X86_CR4_OSFXSR; if (boot_cpu_has(X86_FEATURE_XMM)) cr4_mask |= X86_CR4_OSXMMEXCPT; if (cr4_mask) cr4_set_bits(cr4_mask); cr0 = read_cr0(); cr0 &= ~(X86_CR0_TS|X86_CR0_EM); /* clear TS and EM */ if (!boot_cpu_has(X86_FEATURE_FPU)) cr0 |= X86_CR0_EM; write_cr0(cr0); /* Flush out any pending x87 state: */ #ifdef CONFIG_MATH_EMULATION if (!boot_cpu_has(X86_FEATURE_FPU)) fpstate_init_soft(&current->thread.fpu.fpstate->regs.soft); else #endif asm volatile ("fninit"); } /* * Enable all supported FPU features. Called when a CPU is brought online: */ void fpu__init_cpu(void) { fpu__init_cpu_generic(); fpu__init_cpu_xstate(); } static bool __init fpu__probe_without_cpuid(void) { unsigned long cr0; u16 fsw, fcw; fsw = fcw = 0xffff; cr0 = read_cr0(); cr0 &= ~(X86_CR0_TS | X86_CR0_EM); write_cr0(cr0); asm volatile("fninit ; fnstsw %0 ; fnstcw %1" : "+m" (fsw), "+m" (fcw)); pr_info("x86/fpu: Probing for FPU: FSW=0x%04hx FCW=0x%04hx\n", fsw, fcw); return fsw == 0 && (fcw & 0x103f) == 0x003f; } static void __init fpu__init_system_early_generic(void) { if (!boot_cpu_has(X86_FEATURE_CPUID) && !test_bit(X86_FEATURE_FPU, (unsigned long *)cpu_caps_cleared)) { if (fpu__probe_without_cpuid()) setup_force_cpu_cap(X86_FEATURE_FPU); else setup_clear_cpu_cap(X86_FEATURE_FPU); } #ifndef CONFIG_MATH_EMULATION if (!test_cpu_cap(&boot_cpu_data, X86_FEATURE_FPU)) { pr_emerg("x86/fpu: Giving up, no FPU found and no math emulation present\n"); for (;;) asm volatile("hlt"); } #endif } /* * Boot time FPU feature detection code: */ unsigned int mxcsr_feature_mask __ro_after_init = 0xffffffffu; EXPORT_SYMBOL_GPL(mxcsr_feature_mask); static void __init fpu__init_system_mxcsr(void) { unsigned int mask = 0; if (boot_cpu_has(X86_FEATURE_FXSR)) { /* Static because GCC does not get 16-byte stack alignment right: */ static struct fxregs_state fxregs __initdata; asm volatile("fxsave %0" : "+m" (fxregs)); mask = fxregs.mxcsr_mask; /* * If zero then use the default features mask, * which has all features set, except the * denormals-are-zero feature bit: */ if (mask == 0) mask = 0x0000ffbf; } mxcsr_feature_mask &= mask; } /* * Once per bootup FPU initialization sequences that will run on most x86 CPUs: */ static void __init fpu__init_system_generic(void) { /* * Set up the legacy init FPU context. Will be updated when the * CPU supports XSAVE[S]. */ fpstate_init_user(&init_fpstate); fpu__init_system_mxcsr(); } /* * Enforce that 'MEMBER' is the last field of 'TYPE'. * * Align the computed size with alignment of the TYPE, * because that's how C aligns structs. */ #define CHECK_MEMBER_AT_END_OF(TYPE, MEMBER) \ BUILD_BUG_ON(sizeof(TYPE) != \ ALIGN(offsetofend(TYPE, MEMBER), _Alignof(TYPE))) /* * We append the 'struct fpu' to the task_struct: */ static void __init fpu__init_task_struct_size(void) { int task_size = sizeof(struct task_struct); /* * Subtract off the static size of the register state. * It potentially has a bunch of padding. */ task_size -= sizeof(current->thread.fpu.__fpstate.regs); /* * Add back the dynamically-calculated register state * size. */ task_size += fpu_kernel_cfg.default_size; /* * We dynamically size 'struct fpu', so we require that * it be at the end of 'thread_struct' and that * 'thread_struct' be at the end of 'task_struct'. If * you hit a compile error here, check the structure to * see if something got added to the end. */ CHECK_MEMBER_AT_END_OF(struct fpu, __fpstate); CHECK_MEMBER_AT_END_OF(struct thread_struct, fpu); CHECK_MEMBER_AT_END_OF(struct task_struct, thread); arch_task_struct_size = task_size; } /* * Set up the user and kernel xstate sizes based on the legacy FPU context size. * * We set this up first, and later it will be overwritten by * fpu__init_system_xstate() if the CPU knows about xstates. */ static void __init fpu__init_system_xstate_size_legacy(void) { unsigned int size; /* * Note that the size configuration might be overwritten later * during fpu__init_system_xstate(). */ if (!cpu_feature_enabled(X86_FEATURE_FPU)) { size = sizeof(struct swregs_state); } else if (cpu_feature_enabled(X86_FEATURE_FXSR)) { size = sizeof(struct fxregs_state); fpu_user_cfg.legacy_features = XFEATURE_MASK_FPSSE; } else { size = sizeof(struct fregs_state); fpu_user_cfg.legacy_features = XFEATURE_MASK_FP; } fpu_kernel_cfg.max_size = size; fpu_kernel_cfg.default_size = size; fpu_user_cfg.max_size = size; fpu_user_cfg.default_size = size; fpstate_reset(&current->thread.fpu); } /* * Called on the boot CPU once per system bootup, to set up the initial * FPU state that is later cloned into all processes: */ void __init fpu__init_system(void) { fpstate_reset(&current->thread.fpu); fpu__init_system_early_generic(); /* * The FPU has to be operational for some of the * later FPU init activities: */ fpu__init_cpu(); fpu__init_system_generic(); fpu__init_system_xstate_size_legacy(); fpu__init_system_xstate(fpu_kernel_cfg.max_size); fpu__init_task_struct_size(); }
linux-master
arch/x86/kernel/fpu/init.c
// SPDX-License-Identifier: GPL-2.0 /* * FPU register's regset abstraction, for ptrace, core dumps, etc. */ #include <linux/sched/task_stack.h> #include <linux/vmalloc.h> #include <asm/fpu/api.h> #include <asm/fpu/signal.h> #include <asm/fpu/regset.h> #include <asm/prctl.h> #include "context.h" #include "internal.h" #include "legacy.h" #include "xstate.h" /* * The xstateregs_active() routine is the same as the regset_fpregs_active() routine, * as the "regset->n" for the xstate regset will be updated based on the feature * capabilities supported by the xsave. */ int regset_fpregs_active(struct task_struct *target, const struct user_regset *regset) { return regset->n; } int regset_xregset_fpregs_active(struct task_struct *target, const struct user_regset *regset) { if (boot_cpu_has(X86_FEATURE_FXSR)) return regset->n; else return 0; } /* * The regset get() functions are invoked from: * * - coredump to dump the current task's fpstate. If the current task * owns the FPU then the memory state has to be synchronized and the * FPU register state preserved. Otherwise fpstate is already in sync. * * - ptrace to dump fpstate of a stopped task, in which case the registers * have already been saved to fpstate on context switch. */ static void sync_fpstate(struct fpu *fpu) { if (fpu == &current->thread.fpu) fpu_sync_fpstate(fpu); } /* * Invalidate cached FPU registers before modifying the stopped target * task's fpstate. * * This forces the target task on resume to restore the FPU registers from * modified fpstate. Otherwise the task might skip the restore and operate * with the cached FPU registers which discards the modifications. */ static void fpu_force_restore(struct fpu *fpu) { /* * Only stopped child tasks can be used to modify the FPU * state in the fpstate buffer: */ WARN_ON_FPU(fpu == &current->thread.fpu); __fpu_invalidate_fpregs_state(fpu); } int xfpregs_get(struct task_struct *target, const struct user_regset *regset, struct membuf to) { struct fpu *fpu = &target->thread.fpu; if (!cpu_feature_enabled(X86_FEATURE_FXSR)) return -ENODEV; sync_fpstate(fpu); if (!use_xsave()) { return membuf_write(&to, &fpu->fpstate->regs.fxsave, sizeof(fpu->fpstate->regs.fxsave)); } copy_xstate_to_uabi_buf(to, target, XSTATE_COPY_FX); return 0; } int xfpregs_set(struct task_struct *target, const struct user_regset *regset, unsigned int pos, unsigned int count, const void *kbuf, const void __user *ubuf) { struct fpu *fpu = &target->thread.fpu; struct fxregs_state newstate; int ret; if (!cpu_feature_enabled(X86_FEATURE_FXSR)) return -ENODEV; /* No funny business with partial or oversized writes is permitted. */ if (pos != 0 || count != sizeof(newstate)) return -EINVAL; ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newstate, 0, -1); if (ret) return ret; /* Do not allow an invalid MXCSR value. */ if (newstate.mxcsr & ~mxcsr_feature_mask) return -EINVAL; fpu_force_restore(fpu); /* Copy the state */ memcpy(&fpu->fpstate->regs.fxsave, &newstate, sizeof(newstate)); /* Clear xmm8..15 for 32-bit callers */ BUILD_BUG_ON(sizeof(fpu->__fpstate.regs.fxsave.xmm_space) != 16 * 16); if (in_ia32_syscall()) memset(&fpu->fpstate->regs.fxsave.xmm_space[8*4], 0, 8 * 16); /* Mark FP and SSE as in use when XSAVE is enabled */ if (use_xsave()) fpu->fpstate->regs.xsave.header.xfeatures |= XFEATURE_MASK_FPSSE; return 0; } int xstateregs_get(struct task_struct *target, const struct user_regset *regset, struct membuf to) { if (!cpu_feature_enabled(X86_FEATURE_XSAVE)) return -ENODEV; sync_fpstate(&target->thread.fpu); copy_xstate_to_uabi_buf(to, target, XSTATE_COPY_XSAVE); return 0; } int xstateregs_set(struct task_struct *target, const struct user_regset *regset, unsigned int pos, unsigned int count, const void *kbuf, const void __user *ubuf) { struct fpu *fpu = &target->thread.fpu; struct xregs_state *tmpbuf = NULL; int ret; if (!cpu_feature_enabled(X86_FEATURE_XSAVE)) return -ENODEV; /* * A whole standard-format XSAVE buffer is needed: */ if (pos != 0 || count != fpu_user_cfg.max_size) return -EFAULT; if (!kbuf) { tmpbuf = vmalloc(count); if (!tmpbuf) return -ENOMEM; if (copy_from_user(tmpbuf, ubuf, count)) { ret = -EFAULT; goto out; } } fpu_force_restore(fpu); ret = copy_uabi_from_kernel_to_xstate(fpu->fpstate, kbuf ?: tmpbuf, &target->thread.pkru); out: vfree(tmpbuf); return ret; } #ifdef CONFIG_X86_USER_SHADOW_STACK int ssp_active(struct task_struct *target, const struct user_regset *regset) { if (target->thread.features & ARCH_SHSTK_SHSTK) return regset->n; return 0; } int ssp_get(struct task_struct *target, const struct user_regset *regset, struct membuf to) { struct fpu *fpu = &target->thread.fpu; struct cet_user_state *cetregs; if (!cpu_feature_enabled(X86_FEATURE_USER_SHSTK)) return -ENODEV; sync_fpstate(fpu); cetregs = get_xsave_addr(&fpu->fpstate->regs.xsave, XFEATURE_CET_USER); if (WARN_ON(!cetregs)) { /* * This shouldn't ever be NULL because shadow stack was * verified to be enabled above. This means * MSR_IA32_U_CET.CET_SHSTK_EN should be 1 and so * XFEATURE_CET_USER should not be in the init state. */ return -ENODEV; } return membuf_write(&to, (unsigned long *)&cetregs->user_ssp, sizeof(cetregs->user_ssp)); } int ssp_set(struct task_struct *target, const struct user_regset *regset, unsigned int pos, unsigned int count, const void *kbuf, const void __user *ubuf) { struct fpu *fpu = &target->thread.fpu; struct xregs_state *xsave = &fpu->fpstate->regs.xsave; struct cet_user_state *cetregs; unsigned long user_ssp; int r; if (!cpu_feature_enabled(X86_FEATURE_USER_SHSTK) || !ssp_active(target, regset)) return -ENODEV; if (pos != 0 || count != sizeof(user_ssp)) return -EINVAL; r = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &user_ssp, 0, -1); if (r) return r; /* * Some kernel instructions (IRET, etc) can cause exceptions in the case * of disallowed CET register values. Just prevent invalid values. */ if (user_ssp >= TASK_SIZE_MAX || !IS_ALIGNED(user_ssp, 8)) return -EINVAL; fpu_force_restore(fpu); cetregs = get_xsave_addr(xsave, XFEATURE_CET_USER); if (WARN_ON(!cetregs)) { /* * This shouldn't ever be NULL because shadow stack was * verified to be enabled above. This means * MSR_IA32_U_CET.CET_SHSTK_EN should be 1 and so * XFEATURE_CET_USER should not be in the init state. */ return -ENODEV; } cetregs->user_ssp = user_ssp; return 0; } #endif /* CONFIG_X86_USER_SHADOW_STACK */ #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION /* * FPU tag word conversions. */ static inline unsigned short twd_i387_to_fxsr(unsigned short twd) { unsigned int tmp; /* to avoid 16 bit prefixes in the code */ /* Transform each pair of bits into 01 (valid) or 00 (empty) */ tmp = ~twd; tmp = (tmp | (tmp>>1)) & 0x5555; /* 0V0V0V0V0V0V0V0V */ /* and move the valid bits to the lower byte. */ tmp = (tmp | (tmp >> 1)) & 0x3333; /* 00VV00VV00VV00VV */ tmp = (tmp | (tmp >> 2)) & 0x0f0f; /* 0000VVVV0000VVVV */ tmp = (tmp | (tmp >> 4)) & 0x00ff; /* 00000000VVVVVVVV */ return tmp; } #define FPREG_ADDR(f, n) ((void *)&(f)->st_space + (n) * 16) #define FP_EXP_TAG_VALID 0 #define FP_EXP_TAG_ZERO 1 #define FP_EXP_TAG_SPECIAL 2 #define FP_EXP_TAG_EMPTY 3 static inline u32 twd_fxsr_to_i387(struct fxregs_state *fxsave) { struct _fpxreg *st; u32 tos = (fxsave->swd >> 11) & 7; u32 twd = (unsigned long) fxsave->twd; u32 tag; u32 ret = 0xffff0000u; int i; for (i = 0; i < 8; i++, twd >>= 1) { if (twd & 0x1) { st = FPREG_ADDR(fxsave, (i - tos) & 7); switch (st->exponent & 0x7fff) { case 0x7fff: tag = FP_EXP_TAG_SPECIAL; break; case 0x0000: if (!st->significand[0] && !st->significand[1] && !st->significand[2] && !st->significand[3]) tag = FP_EXP_TAG_ZERO; else tag = FP_EXP_TAG_SPECIAL; break; default: if (st->significand[3] & 0x8000) tag = FP_EXP_TAG_VALID; else tag = FP_EXP_TAG_SPECIAL; break; } } else { tag = FP_EXP_TAG_EMPTY; } ret |= tag << (2 * i); } return ret; } /* * FXSR floating point environment conversions. */ static void __convert_from_fxsr(struct user_i387_ia32_struct *env, struct task_struct *tsk, struct fxregs_state *fxsave) { struct _fpreg *to = (struct _fpreg *) &env->st_space[0]; struct _fpxreg *from = (struct _fpxreg *) &fxsave->st_space[0]; int i; env->cwd = fxsave->cwd | 0xffff0000u; env->swd = fxsave->swd | 0xffff0000u; env->twd = twd_fxsr_to_i387(fxsave); #ifdef CONFIG_X86_64 env->fip = fxsave->rip; env->foo = fxsave->rdp; /* * should be actually ds/cs at fpu exception time, but * that information is not available in 64bit mode. */ env->fcs = task_pt_regs(tsk)->cs; if (tsk == current) { savesegment(ds, env->fos); } else { env->fos = tsk->thread.ds; } env->fos |= 0xffff0000; #else env->fip = fxsave->fip; env->fcs = (u16) fxsave->fcs | ((u32) fxsave->fop << 16); env->foo = fxsave->foo; env->fos = fxsave->fos; #endif for (i = 0; i < 8; ++i) memcpy(&to[i], &from[i], sizeof(to[0])); } void convert_from_fxsr(struct user_i387_ia32_struct *env, struct task_struct *tsk) { __convert_from_fxsr(env, tsk, &tsk->thread.fpu.fpstate->regs.fxsave); } void convert_to_fxsr(struct fxregs_state *fxsave, const struct user_i387_ia32_struct *env) { struct _fpreg *from = (struct _fpreg *) &env->st_space[0]; struct _fpxreg *to = (struct _fpxreg *) &fxsave->st_space[0]; int i; fxsave->cwd = env->cwd; fxsave->swd = env->swd; fxsave->twd = twd_i387_to_fxsr(env->twd); fxsave->fop = (u16) ((u32) env->fcs >> 16); #ifdef CONFIG_X86_64 fxsave->rip = env->fip; fxsave->rdp = env->foo; /* cs and ds ignored */ #else fxsave->fip = env->fip; fxsave->fcs = (env->fcs & 0xffff); fxsave->foo = env->foo; fxsave->fos = env->fos; #endif for (i = 0; i < 8; ++i) memcpy(&to[i], &from[i], sizeof(from[0])); } int fpregs_get(struct task_struct *target, const struct user_regset *regset, struct membuf to) { struct fpu *fpu = &target->thread.fpu; struct user_i387_ia32_struct env; struct fxregs_state fxsave, *fx; sync_fpstate(fpu); if (!cpu_feature_enabled(X86_FEATURE_FPU)) return fpregs_soft_get(target, regset, to); if (!cpu_feature_enabled(X86_FEATURE_FXSR)) { return membuf_write(&to, &fpu->fpstate->regs.fsave, sizeof(struct fregs_state)); } if (use_xsave()) { struct membuf mb = { .p = &fxsave, .left = sizeof(fxsave) }; /* Handle init state optimized xstate correctly */ copy_xstate_to_uabi_buf(mb, target, XSTATE_COPY_FP); fx = &fxsave; } else { fx = &fpu->fpstate->regs.fxsave; } __convert_from_fxsr(&env, target, fx); return membuf_write(&to, &env, sizeof(env)); } int fpregs_set(struct task_struct *target, const struct user_regset *regset, unsigned int pos, unsigned int count, const void *kbuf, const void __user *ubuf) { struct fpu *fpu = &target->thread.fpu; struct user_i387_ia32_struct env; int ret; /* No funny business with partial or oversized writes is permitted. */ if (pos != 0 || count != sizeof(struct user_i387_ia32_struct)) return -EINVAL; if (!cpu_feature_enabled(X86_FEATURE_FPU)) return fpregs_soft_set(target, regset, pos, count, kbuf, ubuf); ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &env, 0, -1); if (ret) return ret; fpu_force_restore(fpu); if (cpu_feature_enabled(X86_FEATURE_FXSR)) convert_to_fxsr(&fpu->fpstate->regs.fxsave, &env); else memcpy(&fpu->fpstate->regs.fsave, &env, sizeof(env)); /* * Update the header bit in the xsave header, indicating the * presence of FP. */ if (cpu_feature_enabled(X86_FEATURE_XSAVE)) fpu->fpstate->regs.xsave.header.xfeatures |= XFEATURE_MASK_FP; return 0; } #endif /* CONFIG_X86_32 || CONFIG_IA32_EMULATION */
linux-master
arch/x86/kernel/fpu/regset.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 1994 Linus Torvalds * * Pentium III FXSR, SSE support * General FPU state handling cleanups * Gareth Hughes <[email protected]>, May 2000 */ #include <asm/fpu/api.h> #include <asm/fpu/regset.h> #include <asm/fpu/sched.h> #include <asm/fpu/signal.h> #include <asm/fpu/types.h> #include <asm/traps.h> #include <asm/irq_regs.h> #include <uapi/asm/kvm.h> #include <linux/hardirq.h> #include <linux/pkeys.h> #include <linux/vmalloc.h> #include "context.h" #include "internal.h" #include "legacy.h" #include "xstate.h" #define CREATE_TRACE_POINTS #include <asm/trace/fpu.h> #ifdef CONFIG_X86_64 DEFINE_STATIC_KEY_FALSE(__fpu_state_size_dynamic); DEFINE_PER_CPU(u64, xfd_state); #endif /* The FPU state configuration data for kernel and user space */ struct fpu_state_config fpu_kernel_cfg __ro_after_init; struct fpu_state_config fpu_user_cfg __ro_after_init; /* * Represents the initial FPU state. It's mostly (but not completely) zeroes, * depending on the FPU hardware format: */ struct fpstate init_fpstate __ro_after_init; /* Track in-kernel FPU usage */ static DEFINE_PER_CPU(bool, in_kernel_fpu); /* * Track which context is using the FPU on the CPU: */ DEFINE_PER_CPU(struct fpu *, fpu_fpregs_owner_ctx); /* * Can we use the FPU in kernel mode with the * whole "kernel_fpu_begin/end()" sequence? */ bool irq_fpu_usable(void) { if (WARN_ON_ONCE(in_nmi())) return false; /* In kernel FPU usage already active? */ if (this_cpu_read(in_kernel_fpu)) return false; /* * When not in NMI or hard interrupt context, FPU can be used in: * * - Task context except from within fpregs_lock()'ed critical * regions. * * - Soft interrupt processing context which cannot happen * while in a fpregs_lock()'ed critical region. */ if (!in_hardirq()) return true; /* * In hard interrupt context it's safe when soft interrupts * are enabled, which means the interrupt did not hit in * a fpregs_lock()'ed critical region. */ return !softirq_count(); } EXPORT_SYMBOL(irq_fpu_usable); /* * Track AVX512 state use because it is known to slow the max clock * speed of the core. */ static void update_avx_timestamp(struct fpu *fpu) { #define AVX512_TRACKING_MASK (XFEATURE_MASK_ZMM_Hi256 | XFEATURE_MASK_Hi16_ZMM) if (fpu->fpstate->regs.xsave.header.xfeatures & AVX512_TRACKING_MASK) fpu->avx512_timestamp = jiffies; } /* * Save the FPU register state in fpu->fpstate->regs. The register state is * preserved. * * Must be called with fpregs_lock() held. * * The legacy FNSAVE instruction clears all FPU state unconditionally, so * register state has to be reloaded. That might be a pointless exercise * when the FPU is going to be used by another task right after that. But * this only affects 20+ years old 32bit systems and avoids conditionals all * over the place. * * FXSAVE and all XSAVE variants preserve the FPU register state. */ void save_fpregs_to_fpstate(struct fpu *fpu) { if (likely(use_xsave())) { os_xsave(fpu->fpstate); update_avx_timestamp(fpu); return; } if (likely(use_fxsr())) { fxsave(&fpu->fpstate->regs.fxsave); return; } /* * Legacy FPU register saving, FNSAVE always clears FPU registers, * so we have to reload them from the memory state. */ asm volatile("fnsave %[fp]; fwait" : [fp] "=m" (fpu->fpstate->regs.fsave)); frstor(&fpu->fpstate->regs.fsave); } void restore_fpregs_from_fpstate(struct fpstate *fpstate, u64 mask) { /* * AMD K7/K8 and later CPUs up to Zen don't save/restore * FDP/FIP/FOP unless an exception is pending. Clear the x87 state * here by setting it to fixed values. "m" is a random variable * that should be in L1. */ if (unlikely(static_cpu_has_bug(X86_BUG_FXSAVE_LEAK))) { asm volatile( "fnclex\n\t" "emms\n\t" "fildl %P[addr]" /* set F?P to defined value */ : : [addr] "m" (fpstate)); } if (use_xsave()) { /* * Dynamically enabled features are enabled in XCR0, but * usage requires also that the corresponding bits in XFD * are cleared. If the bits are set then using a related * instruction will raise #NM. This allows to do the * allocation of the larger FPU buffer lazy from #NM or if * the task has no permission to kill it which would happen * via #UD if the feature is disabled in XCR0. * * XFD state is following the same life time rules as * XSTATE and to restore state correctly XFD has to be * updated before XRSTORS otherwise the component would * stay in or go into init state even if the bits are set * in fpstate::regs::xsave::xfeatures. */ xfd_update_state(fpstate); /* * Restoring state always needs to modify all features * which are in @mask even if the current task cannot use * extended features. * * So fpstate->xfeatures cannot be used here, because then * a feature for which the task has no permission but was * used by the previous task would not go into init state. */ mask = fpu_kernel_cfg.max_features & mask; os_xrstor(fpstate, mask); } else { if (use_fxsr()) fxrstor(&fpstate->regs.fxsave); else frstor(&fpstate->regs.fsave); } } void fpu_reset_from_exception_fixup(void) { restore_fpregs_from_fpstate(&init_fpstate, XFEATURE_MASK_FPSTATE); } #if IS_ENABLED(CONFIG_KVM) static void __fpstate_reset(struct fpstate *fpstate, u64 xfd); static void fpu_init_guest_permissions(struct fpu_guest *gfpu) { struct fpu_state_perm *fpuperm; u64 perm; if (!IS_ENABLED(CONFIG_X86_64)) return; spin_lock_irq(&current->sighand->siglock); fpuperm = &current->group_leader->thread.fpu.guest_perm; perm = fpuperm->__state_perm; /* First fpstate allocation locks down permissions. */ WRITE_ONCE(fpuperm->__state_perm, perm | FPU_GUEST_PERM_LOCKED); spin_unlock_irq(&current->sighand->siglock); gfpu->perm = perm & ~FPU_GUEST_PERM_LOCKED; } bool fpu_alloc_guest_fpstate(struct fpu_guest *gfpu) { struct fpstate *fpstate; unsigned int size; size = fpu_user_cfg.default_size + ALIGN(offsetof(struct fpstate, regs), 64); fpstate = vzalloc(size); if (!fpstate) return false; /* Leave xfd to 0 (the reset value defined by spec) */ __fpstate_reset(fpstate, 0); fpstate_init_user(fpstate); fpstate->is_valloc = true; fpstate->is_guest = true; gfpu->fpstate = fpstate; gfpu->xfeatures = fpu_user_cfg.default_features; gfpu->perm = fpu_user_cfg.default_features; /* * KVM sets the FP+SSE bits in the XSAVE header when copying FPU state * to userspace, even when XSAVE is unsupported, so that restoring FPU * state on a different CPU that does support XSAVE can cleanly load * the incoming state using its natural XSAVE. In other words, KVM's * uABI size may be larger than this host's default size. Conversely, * the default size should never be larger than KVM's base uABI size; * all features that can expand the uABI size must be opt-in. */ gfpu->uabi_size = sizeof(struct kvm_xsave); if (WARN_ON_ONCE(fpu_user_cfg.default_size > gfpu->uabi_size)) gfpu->uabi_size = fpu_user_cfg.default_size; fpu_init_guest_permissions(gfpu); return true; } EXPORT_SYMBOL_GPL(fpu_alloc_guest_fpstate); void fpu_free_guest_fpstate(struct fpu_guest *gfpu) { struct fpstate *fps = gfpu->fpstate; if (!fps) return; if (WARN_ON_ONCE(!fps->is_valloc || !fps->is_guest || fps->in_use)) return; gfpu->fpstate = NULL; vfree(fps); } EXPORT_SYMBOL_GPL(fpu_free_guest_fpstate); /* * fpu_enable_guest_xfd_features - Check xfeatures against guest perm and enable * @guest_fpu: Pointer to the guest FPU container * @xfeatures: Features requested by guest CPUID * * Enable all dynamic xfeatures according to guest perm and requested CPUID. * * Return: 0 on success, error code otherwise */ int fpu_enable_guest_xfd_features(struct fpu_guest *guest_fpu, u64 xfeatures) { lockdep_assert_preemption_enabled(); /* Nothing to do if all requested features are already enabled. */ xfeatures &= ~guest_fpu->xfeatures; if (!xfeatures) return 0; return __xfd_enable_feature(xfeatures, guest_fpu); } EXPORT_SYMBOL_GPL(fpu_enable_guest_xfd_features); #ifdef CONFIG_X86_64 void fpu_update_guest_xfd(struct fpu_guest *guest_fpu, u64 xfd) { fpregs_lock(); guest_fpu->fpstate->xfd = xfd; if (guest_fpu->fpstate->in_use) xfd_update_state(guest_fpu->fpstate); fpregs_unlock(); } EXPORT_SYMBOL_GPL(fpu_update_guest_xfd); /** * fpu_sync_guest_vmexit_xfd_state - Synchronize XFD MSR and software state * * Must be invoked from KVM after a VMEXIT before enabling interrupts when * XFD write emulation is disabled. This is required because the guest can * freely modify XFD and the state at VMEXIT is not guaranteed to be the * same as the state on VMENTER. So software state has to be udpated before * any operation which depends on it can take place. * * Note: It can be invoked unconditionally even when write emulation is * enabled for the price of a then pointless MSR read. */ void fpu_sync_guest_vmexit_xfd_state(void) { struct fpstate *fps = current->thread.fpu.fpstate; lockdep_assert_irqs_disabled(); if (fpu_state_size_dynamic()) { rdmsrl(MSR_IA32_XFD, fps->xfd); __this_cpu_write(xfd_state, fps->xfd); } } EXPORT_SYMBOL_GPL(fpu_sync_guest_vmexit_xfd_state); #endif /* CONFIG_X86_64 */ int fpu_swap_kvm_fpstate(struct fpu_guest *guest_fpu, bool enter_guest) { struct fpstate *guest_fps = guest_fpu->fpstate; struct fpu *fpu = &current->thread.fpu; struct fpstate *cur_fps = fpu->fpstate; fpregs_lock(); if (!cur_fps->is_confidential && !test_thread_flag(TIF_NEED_FPU_LOAD)) save_fpregs_to_fpstate(fpu); /* Swap fpstate */ if (enter_guest) { fpu->__task_fpstate = cur_fps; fpu->fpstate = guest_fps; guest_fps->in_use = true; } else { guest_fps->in_use = false; fpu->fpstate = fpu->__task_fpstate; fpu->__task_fpstate = NULL; } cur_fps = fpu->fpstate; if (!cur_fps->is_confidential) { /* Includes XFD update */ restore_fpregs_from_fpstate(cur_fps, XFEATURE_MASK_FPSTATE); } else { /* * XSTATE is restored by firmware from encrypted * memory. Make sure XFD state is correct while * running with guest fpstate */ xfd_update_state(cur_fps); } fpregs_mark_activate(); fpregs_unlock(); return 0; } EXPORT_SYMBOL_GPL(fpu_swap_kvm_fpstate); void fpu_copy_guest_fpstate_to_uabi(struct fpu_guest *gfpu, void *buf, unsigned int size, u32 pkru) { struct fpstate *kstate = gfpu->fpstate; union fpregs_state *ustate = buf; struct membuf mb = { .p = buf, .left = size }; if (cpu_feature_enabled(X86_FEATURE_XSAVE)) { __copy_xstate_to_uabi_buf(mb, kstate, pkru, XSTATE_COPY_XSAVE); } else { memcpy(&ustate->fxsave, &kstate->regs.fxsave, sizeof(ustate->fxsave)); /* Make it restorable on a XSAVE enabled host */ ustate->xsave.header.xfeatures = XFEATURE_MASK_FPSSE; } } EXPORT_SYMBOL_GPL(fpu_copy_guest_fpstate_to_uabi); int fpu_copy_uabi_to_guest_fpstate(struct fpu_guest *gfpu, const void *buf, u64 xcr0, u32 *vpkru) { struct fpstate *kstate = gfpu->fpstate; const union fpregs_state *ustate = buf; if (!cpu_feature_enabled(X86_FEATURE_XSAVE)) { if (ustate->xsave.header.xfeatures & ~XFEATURE_MASK_FPSSE) return -EINVAL; if (ustate->fxsave.mxcsr & ~mxcsr_feature_mask) return -EINVAL; memcpy(&kstate->regs.fxsave, &ustate->fxsave, sizeof(ustate->fxsave)); return 0; } if (ustate->xsave.header.xfeatures & ~xcr0) return -EINVAL; /* * Nullify @vpkru to preserve its current value if PKRU's bit isn't set * in the header. KVM's odd ABI is to leave PKRU untouched in this * case (all other components are eventually re-initialized). */ if (!(ustate->xsave.header.xfeatures & XFEATURE_MASK_PKRU)) vpkru = NULL; return copy_uabi_from_kernel_to_xstate(kstate, ustate, vpkru); } EXPORT_SYMBOL_GPL(fpu_copy_uabi_to_guest_fpstate); #endif /* CONFIG_KVM */ void kernel_fpu_begin_mask(unsigned int kfpu_mask) { preempt_disable(); WARN_ON_FPU(!irq_fpu_usable()); WARN_ON_FPU(this_cpu_read(in_kernel_fpu)); this_cpu_write(in_kernel_fpu, true); if (!(current->flags & (PF_KTHREAD | PF_USER_WORKER)) && !test_thread_flag(TIF_NEED_FPU_LOAD)) { set_thread_flag(TIF_NEED_FPU_LOAD); save_fpregs_to_fpstate(&current->thread.fpu); } __cpu_invalidate_fpregs_state(); /* Put sane initial values into the control registers. */ if (likely(kfpu_mask & KFPU_MXCSR) && boot_cpu_has(X86_FEATURE_XMM)) ldmxcsr(MXCSR_DEFAULT); if (unlikely(kfpu_mask & KFPU_387) && boot_cpu_has(X86_FEATURE_FPU)) asm volatile ("fninit"); } EXPORT_SYMBOL_GPL(kernel_fpu_begin_mask); void kernel_fpu_end(void) { WARN_ON_FPU(!this_cpu_read(in_kernel_fpu)); this_cpu_write(in_kernel_fpu, false); preempt_enable(); } EXPORT_SYMBOL_GPL(kernel_fpu_end); /* * Sync the FPU register state to current's memory register state when the * current task owns the FPU. The hardware register state is preserved. */ void fpu_sync_fpstate(struct fpu *fpu) { WARN_ON_FPU(fpu != &current->thread.fpu); fpregs_lock(); trace_x86_fpu_before_save(fpu); if (!test_thread_flag(TIF_NEED_FPU_LOAD)) save_fpregs_to_fpstate(fpu); trace_x86_fpu_after_save(fpu); fpregs_unlock(); } static inline unsigned int init_fpstate_copy_size(void) { if (!use_xsave()) return fpu_kernel_cfg.default_size; /* XSAVE(S) just needs the legacy and the xstate header part */ return sizeof(init_fpstate.regs.xsave); } static inline void fpstate_init_fxstate(struct fpstate *fpstate) { fpstate->regs.fxsave.cwd = 0x37f; fpstate->regs.fxsave.mxcsr = MXCSR_DEFAULT; } /* * Legacy x87 fpstate state init: */ static inline void fpstate_init_fstate(struct fpstate *fpstate) { fpstate->regs.fsave.cwd = 0xffff037fu; fpstate->regs.fsave.swd = 0xffff0000u; fpstate->regs.fsave.twd = 0xffffffffu; fpstate->regs.fsave.fos = 0xffff0000u; } /* * Used in two places: * 1) Early boot to setup init_fpstate for non XSAVE systems * 2) fpu_init_fpstate_user() which is invoked from KVM */ void fpstate_init_user(struct fpstate *fpstate) { if (!cpu_feature_enabled(X86_FEATURE_FPU)) { fpstate_init_soft(&fpstate->regs.soft); return; } xstate_init_xcomp_bv(&fpstate->regs.xsave, fpstate->xfeatures); if (cpu_feature_enabled(X86_FEATURE_FXSR)) fpstate_init_fxstate(fpstate); else fpstate_init_fstate(fpstate); } static void __fpstate_reset(struct fpstate *fpstate, u64 xfd) { /* Initialize sizes and feature masks */ fpstate->size = fpu_kernel_cfg.default_size; fpstate->user_size = fpu_user_cfg.default_size; fpstate->xfeatures = fpu_kernel_cfg.default_features; fpstate->user_xfeatures = fpu_user_cfg.default_features; fpstate->xfd = xfd; } void fpstate_reset(struct fpu *fpu) { /* Set the fpstate pointer to the default fpstate */ fpu->fpstate = &fpu->__fpstate; __fpstate_reset(fpu->fpstate, init_fpstate.xfd); /* Initialize the permission related info in fpu */ fpu->perm.__state_perm = fpu_kernel_cfg.default_features; fpu->perm.__state_size = fpu_kernel_cfg.default_size; fpu->perm.__user_state_size = fpu_user_cfg.default_size; /* Same defaults for guests */ fpu->guest_perm = fpu->perm; } static inline void fpu_inherit_perms(struct fpu *dst_fpu) { if (fpu_state_size_dynamic()) { struct fpu *src_fpu = &current->group_leader->thread.fpu; spin_lock_irq(&current->sighand->siglock); /* Fork also inherits the permissions of the parent */ dst_fpu->perm = src_fpu->perm; dst_fpu->guest_perm = src_fpu->guest_perm; spin_unlock_irq(&current->sighand->siglock); } } /* A passed ssp of zero will not cause any update */ static int update_fpu_shstk(struct task_struct *dst, unsigned long ssp) { #ifdef CONFIG_X86_USER_SHADOW_STACK struct cet_user_state *xstate; /* If ssp update is not needed. */ if (!ssp) return 0; xstate = get_xsave_addr(&dst->thread.fpu.fpstate->regs.xsave, XFEATURE_CET_USER); /* * If there is a non-zero ssp, then 'dst' must be configured with a shadow * stack and the fpu state should be up to date since it was just copied * from the parent in fpu_clone(). So there must be a valid non-init CET * state location in the buffer. */ if (WARN_ON_ONCE(!xstate)) return 1; xstate->user_ssp = (u64)ssp; #endif return 0; } /* Clone current's FPU state on fork */ int fpu_clone(struct task_struct *dst, unsigned long clone_flags, bool minimal, unsigned long ssp) { struct fpu *src_fpu = &current->thread.fpu; struct fpu *dst_fpu = &dst->thread.fpu; /* The new task's FPU state cannot be valid in the hardware. */ dst_fpu->last_cpu = -1; fpstate_reset(dst_fpu); if (!cpu_feature_enabled(X86_FEATURE_FPU)) return 0; /* * Enforce reload for user space tasks and prevent kernel threads * from trying to save the FPU registers on context switch. */ set_tsk_thread_flag(dst, TIF_NEED_FPU_LOAD); /* * No FPU state inheritance for kernel threads and IO * worker threads. */ if (minimal) { /* Clear out the minimal state */ memcpy(&dst_fpu->fpstate->regs, &init_fpstate.regs, init_fpstate_copy_size()); return 0; } /* * If a new feature is added, ensure all dynamic features are * caller-saved from here! */ BUILD_BUG_ON(XFEATURE_MASK_USER_DYNAMIC != XFEATURE_MASK_XTILE_DATA); /* * Save the default portion of the current FPU state into the * clone. Assume all dynamic features to be defined as caller- * saved, which enables skipping both the expansion of fpstate * and the copying of any dynamic state. * * Do not use memcpy() when TIF_NEED_FPU_LOAD is set because * copying is not valid when current uses non-default states. */ fpregs_lock(); if (test_thread_flag(TIF_NEED_FPU_LOAD)) fpregs_restore_userregs(); save_fpregs_to_fpstate(dst_fpu); fpregs_unlock(); if (!(clone_flags & CLONE_THREAD)) fpu_inherit_perms(dst_fpu); /* * Children never inherit PASID state. * Force it to have its init value: */ if (use_xsave()) dst_fpu->fpstate->regs.xsave.header.xfeatures &= ~XFEATURE_MASK_PASID; /* * Update shadow stack pointer, in case it changed during clone. */ if (update_fpu_shstk(dst, ssp)) return 1; trace_x86_fpu_copy_src(src_fpu); trace_x86_fpu_copy_dst(dst_fpu); return 0; } /* * Whitelist the FPU register state embedded into task_struct for hardened * usercopy. */ void fpu_thread_struct_whitelist(unsigned long *offset, unsigned long *size) { *offset = offsetof(struct thread_struct, fpu.__fpstate.regs); *size = fpu_kernel_cfg.default_size; } /* * Drops current FPU state: deactivates the fpregs and * the fpstate. NOTE: it still leaves previous contents * in the fpregs in the eager-FPU case. * * This function can be used in cases where we know that * a state-restore is coming: either an explicit one, * or a reschedule. */ void fpu__drop(struct fpu *fpu) { preempt_disable(); if (fpu == &current->thread.fpu) { /* Ignore delayed exceptions from user space */ asm volatile("1: fwait\n" "2:\n" _ASM_EXTABLE(1b, 2b)); fpregs_deactivate(fpu); } trace_x86_fpu_dropped(fpu); preempt_enable(); } /* * Clear FPU registers by setting them up from the init fpstate. * Caller must do fpregs_[un]lock() around it. */ static inline void restore_fpregs_from_init_fpstate(u64 features_mask) { if (use_xsave()) os_xrstor(&init_fpstate, features_mask); else if (use_fxsr()) fxrstor(&init_fpstate.regs.fxsave); else frstor(&init_fpstate.regs.fsave); pkru_write_default(); } /* * Reset current->fpu memory state to the init values. */ static void fpu_reset_fpregs(void) { struct fpu *fpu = &current->thread.fpu; fpregs_lock(); __fpu_invalidate_fpregs_state(fpu); /* * This does not change the actual hardware registers. It just * resets the memory image and sets TIF_NEED_FPU_LOAD so a * subsequent return to usermode will reload the registers from the * task's memory image. * * Do not use fpstate_init() here. Just copy init_fpstate which has * the correct content already except for PKRU. * * PKRU handling does not rely on the xstate when restoring for * user space as PKRU is eagerly written in switch_to() and * flush_thread(). */ memcpy(&fpu->fpstate->regs, &init_fpstate.regs, init_fpstate_copy_size()); set_thread_flag(TIF_NEED_FPU_LOAD); fpregs_unlock(); } /* * Reset current's user FPU states to the init states. current's * supervisor states, if any, are not modified by this function. The * caller guarantees that the XSTATE header in memory is intact. */ void fpu__clear_user_states(struct fpu *fpu) { WARN_ON_FPU(fpu != &current->thread.fpu); fpregs_lock(); if (!cpu_feature_enabled(X86_FEATURE_FPU)) { fpu_reset_fpregs(); fpregs_unlock(); return; } /* * Ensure that current's supervisor states are loaded into their * corresponding registers. */ if (xfeatures_mask_supervisor() && !fpregs_state_valid(fpu, smp_processor_id())) os_xrstor_supervisor(fpu->fpstate); /* Reset user states in registers. */ restore_fpregs_from_init_fpstate(XFEATURE_MASK_USER_RESTORE); /* * Now all FPU registers have their desired values. Inform the FPU * state machine that current's FPU registers are in the hardware * registers. The memory image does not need to be updated because * any operation relying on it has to save the registers first when * current's FPU is marked active. */ fpregs_mark_activate(); fpregs_unlock(); } void fpu_flush_thread(void) { fpstate_reset(&current->thread.fpu); fpu_reset_fpregs(); } /* * Load FPU context before returning to userspace. */ void switch_fpu_return(void) { if (!static_cpu_has(X86_FEATURE_FPU)) return; fpregs_restore_userregs(); } EXPORT_SYMBOL_GPL(switch_fpu_return); void fpregs_lock_and_load(void) { /* * fpregs_lock() only disables preemption (mostly). So modifying state * in an interrupt could screw up some in progress fpregs operation. * Warn about it. */ WARN_ON_ONCE(!irq_fpu_usable()); WARN_ON_ONCE(current->flags & PF_KTHREAD); fpregs_lock(); fpregs_assert_state_consistent(); if (test_thread_flag(TIF_NEED_FPU_LOAD)) fpregs_restore_userregs(); } #ifdef CONFIG_X86_DEBUG_FPU /* * If current FPU state according to its tracking (loaded FPU context on this * CPU) is not valid then we must have TIF_NEED_FPU_LOAD set so the context is * loaded on return to userland. */ void fpregs_assert_state_consistent(void) { struct fpu *fpu = &current->thread.fpu; if (test_thread_flag(TIF_NEED_FPU_LOAD)) return; WARN_ON_FPU(!fpregs_state_valid(fpu, smp_processor_id())); } EXPORT_SYMBOL_GPL(fpregs_assert_state_consistent); #endif void fpregs_mark_activate(void) { struct fpu *fpu = &current->thread.fpu; fpregs_activate(fpu); fpu->last_cpu = smp_processor_id(); clear_thread_flag(TIF_NEED_FPU_LOAD); } /* * x87 math exception handling: */ int fpu__exception_code(struct fpu *fpu, int trap_nr) { int err; if (trap_nr == X86_TRAP_MF) { unsigned short cwd, swd; /* * (~cwd & swd) will mask out exceptions that are not set to unmasked * status. 0x3f is the exception bits in these regs, 0x200 is the * C1 reg you need in case of a stack fault, 0x040 is the stack * fault bit. We should only be taking one exception at a time, * so if this combination doesn't produce any single exception, * then we have a bad program that isn't synchronizing its FPU usage * and it will suffer the consequences since we won't be able to * fully reproduce the context of the exception. */ if (boot_cpu_has(X86_FEATURE_FXSR)) { cwd = fpu->fpstate->regs.fxsave.cwd; swd = fpu->fpstate->regs.fxsave.swd; } else { cwd = (unsigned short)fpu->fpstate->regs.fsave.cwd; swd = (unsigned short)fpu->fpstate->regs.fsave.swd; } err = swd & ~cwd; } else { /* * The SIMD FPU exceptions are handled a little differently, as there * is only a single status/control register. Thus, to determine which * unmasked exception was caught we must mask the exception mask bits * at 0x1f80, and then use these to mask the exception bits at 0x3f. */ unsigned short mxcsr = MXCSR_DEFAULT; if (boot_cpu_has(X86_FEATURE_XMM)) mxcsr = fpu->fpstate->regs.fxsave.mxcsr; err = ~(mxcsr >> 7) & mxcsr; } if (err & 0x001) { /* Invalid op */ /* * swd & 0x240 == 0x040: Stack Underflow * swd & 0x240 == 0x240: Stack Overflow * User must clear the SF bit (0x40) if set */ return FPE_FLTINV; } else if (err & 0x004) { /* Divide by Zero */ return FPE_FLTDIV; } else if (err & 0x008) { /* Overflow */ return FPE_FLTOVF; } else if (err & 0x012) { /* Denormal, Underflow */ return FPE_FLTUND; } else if (err & 0x020) { /* Precision */ return FPE_FLTRES; } /* * If we're using IRQ 13, or supposedly even some trap * X86_TRAP_MF implementations, it's possible * we get a spurious trap, which is not an error. */ return 0; } /* * Initialize register state that may prevent from entering low-power idle. * This function will be invoked from the cpuidle driver only when needed. */ noinstr void fpu_idle_fpregs(void) { /* Note: AMX_TILE being enabled implies XGETBV1 support */ if (cpu_feature_enabled(X86_FEATURE_AMX_TILE) && (xfeatures_in_use() & XFEATURE_MASK_XTILE)) { tile_release(); __this_cpu_write(fpu_fpregs_owner_ctx, NULL); } }
linux-master
arch/x86/kernel/fpu/core.c
// SPDX-License-Identifier: GPL-2.0 /* * FPU signal frame handling routines. */ #include <linux/compat.h> #include <linux/cpu.h> #include <linux/pagemap.h> #include <asm/fpu/signal.h> #include <asm/fpu/regset.h> #include <asm/fpu/xstate.h> #include <asm/sigframe.h> #include <asm/trapnr.h> #include <asm/trace/fpu.h> #include "context.h" #include "internal.h" #include "legacy.h" #include "xstate.h" /* * Check for the presence of extended state information in the * user fpstate pointer in the sigcontext. */ static inline bool check_xstate_in_sigframe(struct fxregs_state __user *fxbuf, struct _fpx_sw_bytes *fx_sw) { int min_xstate_size = sizeof(struct fxregs_state) + sizeof(struct xstate_header); void __user *fpstate = fxbuf; unsigned int magic2; if (__copy_from_user(fx_sw, &fxbuf->sw_reserved[0], sizeof(*fx_sw))) return false; /* Check for the first magic field and other error scenarios. */ if (fx_sw->magic1 != FP_XSTATE_MAGIC1 || fx_sw->xstate_size < min_xstate_size || fx_sw->xstate_size > current->thread.fpu.fpstate->user_size || fx_sw->xstate_size > fx_sw->extended_size) goto setfx; /* * Check for the presence of second magic word at the end of memory * layout. This detects the case where the user just copied the legacy * fpstate layout with out copying the extended state information * in the memory layout. */ if (__get_user(magic2, (__u32 __user *)(fpstate + fx_sw->xstate_size))) return false; if (likely(magic2 == FP_XSTATE_MAGIC2)) return true; setfx: trace_x86_fpu_xstate_check_failed(&current->thread.fpu); /* Set the parameters for fx only state */ fx_sw->magic1 = 0; fx_sw->xstate_size = sizeof(struct fxregs_state); fx_sw->xfeatures = XFEATURE_MASK_FPSSE; return true; } /* * Signal frame handlers. */ static inline bool save_fsave_header(struct task_struct *tsk, void __user *buf) { if (use_fxsr()) { struct xregs_state *xsave = &tsk->thread.fpu.fpstate->regs.xsave; struct user_i387_ia32_struct env; struct _fpstate_32 __user *fp = buf; fpregs_lock(); if (!test_thread_flag(TIF_NEED_FPU_LOAD)) fxsave(&tsk->thread.fpu.fpstate->regs.fxsave); fpregs_unlock(); convert_from_fxsr(&env, tsk); if (__copy_to_user(buf, &env, sizeof(env)) || __put_user(xsave->i387.swd, &fp->status) || __put_user(X86_FXSR_MAGIC, &fp->magic)) return false; } else { struct fregs_state __user *fp = buf; u32 swd; if (__get_user(swd, &fp->swd) || __put_user(swd, &fp->status)) return false; } return true; } /* * Prepare the SW reserved portion of the fxsave memory layout, indicating * the presence of the extended state information in the memory layout * pointed to by the fpstate pointer in the sigcontext. * This is saved when ever the FP and extended state context is * saved on the user stack during the signal handler delivery to the user. */ static inline void save_sw_bytes(struct _fpx_sw_bytes *sw_bytes, bool ia32_frame, struct fpstate *fpstate) { sw_bytes->magic1 = FP_XSTATE_MAGIC1; sw_bytes->extended_size = fpstate->user_size + FP_XSTATE_MAGIC2_SIZE; sw_bytes->xfeatures = fpstate->user_xfeatures; sw_bytes->xstate_size = fpstate->user_size; if (ia32_frame) sw_bytes->extended_size += sizeof(struct fregs_state); } static inline bool save_xstate_epilog(void __user *buf, int ia32_frame, struct fpstate *fpstate) { struct xregs_state __user *x = buf; struct _fpx_sw_bytes sw_bytes = {}; u32 xfeatures; int err; /* Setup the bytes not touched by the [f]xsave and reserved for SW. */ save_sw_bytes(&sw_bytes, ia32_frame, fpstate); err = __copy_to_user(&x->i387.sw_reserved, &sw_bytes, sizeof(sw_bytes)); if (!use_xsave()) return !err; err |= __put_user(FP_XSTATE_MAGIC2, (__u32 __user *)(buf + fpstate->user_size)); /* * Read the xfeatures which we copied (directly from the cpu or * from the state in task struct) to the user buffers. */ err |= __get_user(xfeatures, (__u32 __user *)&x->header.xfeatures); /* * For legacy compatible, we always set FP/SSE bits in the bit * vector while saving the state to the user context. This will * enable us capturing any changes(during sigreturn) to * the FP/SSE bits by the legacy applications which don't touch * xfeatures in the xsave header. * * xsave aware apps can change the xfeatures in the xsave * header as well as change any contents in the memory layout. * xrestore as part of sigreturn will capture all the changes. */ xfeatures |= XFEATURE_MASK_FPSSE; err |= __put_user(xfeatures, (__u32 __user *)&x->header.xfeatures); return !err; } static inline int copy_fpregs_to_sigframe(struct xregs_state __user *buf) { if (use_xsave()) return xsave_to_user_sigframe(buf); if (use_fxsr()) return fxsave_to_user_sigframe((struct fxregs_state __user *) buf); else return fnsave_to_user_sigframe((struct fregs_state __user *) buf); } /* * Save the fpu, extended register state to the user signal frame. * * 'buf_fx' is the 64-byte aligned pointer at which the [f|fx|x]save * state is copied. * 'buf' points to the 'buf_fx' or to the fsave header followed by 'buf_fx'. * * buf == buf_fx for 64-bit frames and 32-bit fsave frame. * buf != buf_fx for 32-bit frames with fxstate. * * Save it directly to the user frame with disabled page fault handler. If * that faults, try to clear the frame which handles the page fault. * * If this is a 32-bit frame with fxstate, put a fsave header before * the aligned state at 'buf_fx'. * * For [f]xsave state, update the SW reserved fields in the [f]xsave frame * indicating the absence/presence of the extended state to the user. */ bool copy_fpstate_to_sigframe(void __user *buf, void __user *buf_fx, int size) { struct task_struct *tsk = current; struct fpstate *fpstate = tsk->thread.fpu.fpstate; bool ia32_fxstate = (buf != buf_fx); int ret; ia32_fxstate &= (IS_ENABLED(CONFIG_X86_32) || IS_ENABLED(CONFIG_IA32_EMULATION)); if (!static_cpu_has(X86_FEATURE_FPU)) { struct user_i387_ia32_struct fp; fpregs_soft_get(current, NULL, (struct membuf){.p = &fp, .left = sizeof(fp)}); return !copy_to_user(buf, &fp, sizeof(fp)); } if (!access_ok(buf, size)) return false; if (use_xsave()) { struct xregs_state __user *xbuf = buf_fx; /* * Clear the xsave header first, so that reserved fields are * initialized to zero. */ if (__clear_user(&xbuf->header, sizeof(xbuf->header))) return false; } retry: /* * Load the FPU registers if they are not valid for the current task. * With a valid FPU state we can attempt to save the state directly to * userland's stack frame which will likely succeed. If it does not, * resolve the fault in the user memory and try again. */ fpregs_lock(); if (test_thread_flag(TIF_NEED_FPU_LOAD)) fpregs_restore_userregs(); pagefault_disable(); ret = copy_fpregs_to_sigframe(buf_fx); pagefault_enable(); fpregs_unlock(); if (ret) { if (!__clear_user(buf_fx, fpstate->user_size)) goto retry; return false; } /* Save the fsave header for the 32-bit frames. */ if ((ia32_fxstate || !use_fxsr()) && !save_fsave_header(tsk, buf)) return false; if (use_fxsr() && !save_xstate_epilog(buf_fx, ia32_fxstate, fpstate)) return false; return true; } static int __restore_fpregs_from_user(void __user *buf, u64 ufeatures, u64 xrestore, bool fx_only) { if (use_xsave()) { u64 init_bv = ufeatures & ~xrestore; int ret; if (likely(!fx_only)) ret = xrstor_from_user_sigframe(buf, xrestore); else ret = fxrstor_from_user_sigframe(buf); if (!ret && unlikely(init_bv)) os_xrstor(&init_fpstate, init_bv); return ret; } else if (use_fxsr()) { return fxrstor_from_user_sigframe(buf); } else { return frstor_from_user_sigframe(buf); } } /* * Attempt to restore the FPU registers directly from user memory. * Pagefaults are handled and any errors returned are fatal. */ static bool restore_fpregs_from_user(void __user *buf, u64 xrestore, bool fx_only, unsigned int size) { struct fpu *fpu = &current->thread.fpu; int ret; retry: fpregs_lock(); /* Ensure that XFD is up to date */ xfd_update_state(fpu->fpstate); pagefault_disable(); ret = __restore_fpregs_from_user(buf, fpu->fpstate->user_xfeatures, xrestore, fx_only); pagefault_enable(); if (unlikely(ret)) { /* * The above did an FPU restore operation, restricted to * the user portion of the registers, and failed, but the * microcode might have modified the FPU registers * nevertheless. * * If the FPU registers do not belong to current, then * invalidate the FPU register state otherwise the task * might preempt current and return to user space with * corrupted FPU registers. */ if (test_thread_flag(TIF_NEED_FPU_LOAD)) __cpu_invalidate_fpregs_state(); fpregs_unlock(); /* Try to handle #PF, but anything else is fatal. */ if (ret != X86_TRAP_PF) return false; if (!fault_in_readable(buf, size)) goto retry; return false; } /* * Restore supervisor states: previous context switch etc has done * XSAVES and saved the supervisor states in the kernel buffer from * which they can be restored now. * * It would be optimal to handle this with a single XRSTORS, but * this does not work because the rest of the FPU registers have * been restored from a user buffer directly. */ if (test_thread_flag(TIF_NEED_FPU_LOAD) && xfeatures_mask_supervisor()) os_xrstor_supervisor(fpu->fpstate); fpregs_mark_activate(); fpregs_unlock(); return true; } static bool __fpu_restore_sig(void __user *buf, void __user *buf_fx, bool ia32_fxstate) { struct task_struct *tsk = current; struct fpu *fpu = &tsk->thread.fpu; struct user_i387_ia32_struct env; bool success, fx_only = false; union fpregs_state *fpregs; unsigned int state_size; u64 user_xfeatures = 0; if (use_xsave()) { struct _fpx_sw_bytes fx_sw_user; if (!check_xstate_in_sigframe(buf_fx, &fx_sw_user)) return false; fx_only = !fx_sw_user.magic1; state_size = fx_sw_user.xstate_size; user_xfeatures = fx_sw_user.xfeatures; } else { user_xfeatures = XFEATURE_MASK_FPSSE; state_size = fpu->fpstate->user_size; } if (likely(!ia32_fxstate)) { /* Restore the FPU registers directly from user memory. */ return restore_fpregs_from_user(buf_fx, user_xfeatures, fx_only, state_size); } /* * Copy the legacy state because the FP portion of the FX frame has * to be ignored for histerical raisins. The legacy state is folded * in once the larger state has been copied. */ if (__copy_from_user(&env, buf, sizeof(env))) return false; /* * By setting TIF_NEED_FPU_LOAD it is ensured that our xstate is * not modified on context switch and that the xstate is considered * to be loaded again on return to userland (overriding last_cpu avoids * the optimisation). */ fpregs_lock(); if (!test_thread_flag(TIF_NEED_FPU_LOAD)) { /* * If supervisor states are available then save the * hardware state in current's fpstate so that the * supervisor state is preserved. Save the full state for * simplicity. There is no point in optimizing this by only * saving the supervisor states and then shuffle them to * the right place in memory. It's ia32 mode. Shrug. */ if (xfeatures_mask_supervisor()) os_xsave(fpu->fpstate); set_thread_flag(TIF_NEED_FPU_LOAD); } __fpu_invalidate_fpregs_state(fpu); __cpu_invalidate_fpregs_state(); fpregs_unlock(); fpregs = &fpu->fpstate->regs; if (use_xsave() && !fx_only) { if (copy_sigframe_from_user_to_xstate(tsk, buf_fx)) return false; } else { if (__copy_from_user(&fpregs->fxsave, buf_fx, sizeof(fpregs->fxsave))) return false; if (IS_ENABLED(CONFIG_X86_64)) { /* Reject invalid MXCSR values. */ if (fpregs->fxsave.mxcsr & ~mxcsr_feature_mask) return false; } else { /* Mask invalid bits out for historical reasons (broken hardware). */ fpregs->fxsave.mxcsr &= mxcsr_feature_mask; } /* Enforce XFEATURE_MASK_FPSSE when XSAVE is enabled */ if (use_xsave()) fpregs->xsave.header.xfeatures |= XFEATURE_MASK_FPSSE; } /* Fold the legacy FP storage */ convert_to_fxsr(&fpregs->fxsave, &env); fpregs_lock(); if (use_xsave()) { /* * Remove all UABI feature bits not set in user_xfeatures * from the memory xstate header which makes the full * restore below bring them into init state. This works for * fx_only mode as well because that has only FP and SSE * set in user_xfeatures. * * Preserve supervisor states! */ u64 mask = user_xfeatures | xfeatures_mask_supervisor(); fpregs->xsave.header.xfeatures &= mask; success = !os_xrstor_safe(fpu->fpstate, fpu_kernel_cfg.max_features); } else { success = !fxrstor_safe(&fpregs->fxsave); } if (likely(success)) fpregs_mark_activate(); fpregs_unlock(); return success; } static inline unsigned int xstate_sigframe_size(struct fpstate *fpstate) { unsigned int size = fpstate->user_size; return use_xsave() ? size + FP_XSTATE_MAGIC2_SIZE : size; } /* * Restore FPU state from a sigframe: */ bool fpu__restore_sig(void __user *buf, int ia32_frame) { struct fpu *fpu = &current->thread.fpu; void __user *buf_fx = buf; bool ia32_fxstate = false; bool success = false; unsigned int size; if (unlikely(!buf)) { fpu__clear_user_states(fpu); return true; } size = xstate_sigframe_size(fpu->fpstate); ia32_frame &= (IS_ENABLED(CONFIG_X86_32) || IS_ENABLED(CONFIG_IA32_EMULATION)); /* * Only FXSR enabled systems need the FX state quirk. * FRSTOR does not need it and can use the fast path. */ if (ia32_frame && use_fxsr()) { buf_fx = buf + sizeof(struct fregs_state); size += sizeof(struct fregs_state); ia32_fxstate = true; } if (!access_ok(buf, size)) goto out; if (!IS_ENABLED(CONFIG_X86_64) && !cpu_feature_enabled(X86_FEATURE_FPU)) { success = !fpregs_soft_set(current, NULL, 0, sizeof(struct user_i387_ia32_struct), NULL, buf); } else { success = __fpu_restore_sig(buf, buf_fx, ia32_fxstate); } out: if (unlikely(!success)) fpu__clear_user_states(fpu); return success; } unsigned long fpu__alloc_mathframe(unsigned long sp, int ia32_frame, unsigned long *buf_fx, unsigned long *size) { unsigned long frame_size = xstate_sigframe_size(current->thread.fpu.fpstate); *buf_fx = sp = round_down(sp - frame_size, 64); if (ia32_frame && use_fxsr()) { frame_size += sizeof(struct fregs_state); sp -= sizeof(struct fregs_state); } *size = frame_size; return sp; } unsigned long __init fpu__get_fpstate_size(void) { unsigned long ret = fpu_user_cfg.max_size; if (use_xsave()) ret += FP_XSTATE_MAGIC2_SIZE; /* * This space is needed on (most) 32-bit kernels, or when a 32-bit * app is running on a 64-bit kernel. To keep things simple, just * assume the worst case and always include space for 'freg_state', * even for 64-bit apps on 64-bit kernels. This wastes a bit of * space, but keeps the code simple. */ if ((IS_ENABLED(CONFIG_IA32_EMULATION) || IS_ENABLED(CONFIG_X86_32)) && use_fxsr()) ret += sizeof(struct fregs_state); return ret; }
linux-master
arch/x86/kernel/fpu/signal.c
// SPDX-License-Identifier: GPL-2.0 /* * x86 FPU bug checks: */ #include <asm/fpu/api.h> /* * Boot time CPU/FPU FDIV bug detection code: */ static double __initdata x = 4195835.0; static double __initdata y = 3145727.0; /* * This used to check for exceptions.. * However, it turns out that to support that, * the XMM trap handlers basically had to * be buggy. So let's have a correct XMM trap * handler, and forget about printing out * some status at boot. * * We should really only care about bugs here * anyway. Not features. */ void __init fpu__init_check_bugs(void) { s32 fdiv_bug; /* kernel_fpu_begin/end() relies on patched alternative instructions. */ if (!boot_cpu_has(X86_FEATURE_FPU)) return; kernel_fpu_begin(); /* * trap_init() enabled FXSR and company _before_ testing for FP * problems here. * * Test for the divl bug: http://en.wikipedia.org/wiki/Fdiv_bug */ __asm__("fninit\n\t" "fldl %1\n\t" "fdivl %2\n\t" "fmull %2\n\t" "fldl %1\n\t" "fsubp %%st,%%st(1)\n\t" "fistpl %0\n\t" "fwait\n\t" "fninit" : "=m" (*&fdiv_bug) : "m" (*&x), "m" (*&y)); kernel_fpu_end(); if (fdiv_bug) { set_cpu_bug(&boot_cpu_data, X86_BUG_FDIV); pr_warn("Hmm, FPU with FDIV bug\n"); } }
linux-master
arch/x86/kernel/fpu/bugs.c
// SPDX-License-Identifier: GPL-2.0-only /* * purgatory: Runs between two kernels * * Copyright (C) 2014 Red Hat Inc. * * Author: * Vivek Goyal <[email protected]> */ #include <linux/bug.h> #include <linux/kernel.h> #include <linux/types.h> #include <crypto/sha2.h> #include <asm/purgatory.h> #include "../boot/compressed/error.h" #include "../boot/string.h" u8 purgatory_sha256_digest[SHA256_DIGEST_SIZE] __section(".kexec-purgatory"); struct kexec_sha_region purgatory_sha_regions[KEXEC_SEGMENT_MAX] __section(".kexec-purgatory"); static int verify_sha256_digest(void) { struct kexec_sha_region *ptr, *end; u8 digest[SHA256_DIGEST_SIZE]; struct sha256_state sctx; sha256_init(&sctx); end = purgatory_sha_regions + ARRAY_SIZE(purgatory_sha_regions); for (ptr = purgatory_sha_regions; ptr < end; ptr++) sha256_update(&sctx, (uint8_t *)(ptr->start), ptr->len); sha256_final(&sctx, digest); if (memcmp(digest, purgatory_sha256_digest, sizeof(digest))) return 1; return 0; } void purgatory(void) { int ret; ret = verify_sha256_digest(); if (ret) { /* loop forever */ for (;;) ; } } /* * Defined in order to reuse memcpy() and memset() from * arch/x86/boot/compressed/string.c */ void warn(const char *msg) {}
linux-master
arch/x86/purgatory/purgatory.c
// SPDX-License-Identifier: GPL-2.0 /* * ppc64 "iomap" interface implementation. * * (C) Copyright 2004 Linus Torvalds */ #include <linux/init.h> #include <linux/pci.h> #include <linux/mm.h> #include <linux/export.h> #include <linux/io.h> #include <asm/pci-bridge.h> static DEFINE_SPINLOCK(hose_spinlock); LIST_HEAD(hose_list); unsigned long isa_io_base; EXPORT_SYMBOL(isa_io_base); static resource_size_t pcibios_io_size(const struct pci_controller *hose) { return resource_size(&hose->io_resource); } int pcibios_vaddr_is_ioport(void __iomem *address) { int ret = 0; struct pci_controller *hose; resource_size_t size; spin_lock(&hose_spinlock); list_for_each_entry(hose, &hose_list, list_node) { size = pcibios_io_size(hose); if (address >= hose->io_base_virt && address < (hose->io_base_virt + size)) { ret = 1; break; } } spin_unlock(&hose_spinlock); return ret; } /* Display the domain number in /proc */ int pci_proc_domain(struct pci_bus *bus) { return pci_domain_nr(bus); } void pci_iounmap(struct pci_dev *dev, void __iomem *addr) { if (isa_vaddr_is_ioport(addr)) return; if (pcibios_vaddr_is_ioport(addr)) return; iounmap(addr); } EXPORT_SYMBOL(pci_iounmap);
linux-master
arch/microblaze/pci/iomap.c
// SPDX-License-Identifier: GPL-2.0 #include <linux/export.h> #include "libgcc.h" long long __lshrdi3(long long u, word_type b) { DWunion uu, w; word_type bm; if (b == 0) return u; uu.ll = u; bm = 32 - b; if (bm <= 0) { w.s.high = 0; w.s.low = (unsigned int) uu.s.high >> -bm; } else { const unsigned int carries = (unsigned int) uu.s.high << bm; w.s.high = (unsigned int) uu.s.high >> b; w.s.low = ((unsigned int) uu.s.low >> b) | carries; } return w.ll; } EXPORT_SYMBOL(__lshrdi3);
linux-master
arch/microblaze/lib/lshrdi3.c
/* * Copyright (C) 2008-2009 Michal Simek <[email protected]> * Copyright (C) 2008-2009 PetaLogix * Copyright (C) 2007 John Williams * * Reasonably optimised generic C-code for memcpy on Microblaze * This is generic C code to do efficient, alignment-aware memcpy. * * It is based on demo code originally Copyright 2001 by Intel Corp, taken from * http://www.embedded.com/showArticle.jhtml?articleID=19205567 * * Attempts were made, unsuccessfully, to contact the original * author of this code (Michael Morrow, Intel). Below is the original * copyright notice. * * This software has been developed by Intel Corporation. * Intel specifically disclaims all warranties, express or * implied, and all liability, including consequential and * other indirect damages, for the use of this program, including * liability for infringement of any proprietary rights, * and including the warranties of merchantability and fitness * for a particular purpose. Intel does not assume any * responsibility for and errors which may appear in this program * not any responsibility to update it. */ #include <linux/export.h> #include <linux/types.h> #include <linux/stddef.h> #include <linux/compiler.h> #include <linux/string.h> #ifdef CONFIG_OPT_LIB_FUNCTION void *memcpy(void *v_dst, const void *v_src, __kernel_size_t c) { const char *src = v_src; char *dst = v_dst; /* The following code tries to optimize the copy by using unsigned * alignment. This will work fine if both source and destination are * aligned on the same boundary. However, if they are aligned on * different boundaries shifts will be necessary. This might result in * bad performance on MicroBlaze systems without a barrel shifter. */ const uint32_t *i_src; uint32_t *i_dst; if (likely(c >= 4)) { unsigned value, buf_hold; /* Align the destination to a word boundary. */ /* This is done in an endian independent manner. */ switch ((unsigned long)dst & 3) { case 1: *dst++ = *src++; --c; fallthrough; case 2: *dst++ = *src++; --c; fallthrough; case 3: *dst++ = *src++; --c; } i_dst = (void *)dst; /* Choose a copy scheme based on the source */ /* alignment relative to destination. */ switch ((unsigned long)src & 3) { case 0x0: /* Both byte offsets are aligned */ i_src = (const void *)src; for (; c >= 4; c -= 4) *i_dst++ = *i_src++; src = (const void *)i_src; break; case 0x1: /* Unaligned - Off by 1 */ /* Word align the source */ i_src = (const void *) ((unsigned)src & ~3); #ifndef __MICROBLAZEEL__ /* Load the holding buffer */ buf_hold = *i_src++ << 8; for (; c >= 4; c -= 4) { value = *i_src++; *i_dst++ = buf_hold | value >> 24; buf_hold = value << 8; } #else /* Load the holding buffer */ buf_hold = (*i_src++ & 0xFFFFFF00) >> 8; for (; c >= 4; c -= 4) { value = *i_src++; *i_dst++ = buf_hold | ((value & 0xFF) << 24); buf_hold = (value & 0xFFFFFF00) >> 8; } #endif /* Realign the source */ src = (const void *)i_src; src -= 3; break; case 0x2: /* Unaligned - Off by 2 */ /* Word align the source */ i_src = (const void *) ((unsigned)src & ~3); #ifndef __MICROBLAZEEL__ /* Load the holding buffer */ buf_hold = *i_src++ << 16; for (; c >= 4; c -= 4) { value = *i_src++; *i_dst++ = buf_hold | value >> 16; buf_hold = value << 16; } #else /* Load the holding buffer */ buf_hold = (*i_src++ & 0xFFFF0000) >> 16; for (; c >= 4; c -= 4) { value = *i_src++; *i_dst++ = buf_hold | ((value & 0xFFFF) << 16); buf_hold = (value & 0xFFFF0000) >> 16; } #endif /* Realign the source */ src = (const void *)i_src; src -= 2; break; case 0x3: /* Unaligned - Off by 3 */ /* Word align the source */ i_src = (const void *) ((unsigned)src & ~3); #ifndef __MICROBLAZEEL__ /* Load the holding buffer */ buf_hold = *i_src++ << 24; for (; c >= 4; c -= 4) { value = *i_src++; *i_dst++ = buf_hold | value >> 8; buf_hold = value << 24; } #else /* Load the holding buffer */ buf_hold = (*i_src++ & 0xFF000000) >> 24; for (; c >= 4; c -= 4) { value = *i_src++; *i_dst++ = buf_hold | ((value & 0xFFFFFF) << 8); buf_hold = (value & 0xFF000000) >> 24; } #endif /* Realign the source */ src = (const void *)i_src; src -= 1; break; } dst = (void *)i_dst; } /* Finish off any remaining bytes */ /* simple fast copy, ... unless a cache boundary is crossed */ switch (c) { case 3: *dst++ = *src++; fallthrough; case 2: *dst++ = *src++; fallthrough; case 1: *dst++ = *src++; } return v_dst; } EXPORT_SYMBOL(memcpy); #endif /* CONFIG_OPT_LIB_FUNCTION */
linux-master
arch/microblaze/lib/memcpy.c
// SPDX-License-Identifier: GPL-2.0 #include <linux/export.h> #include "libgcc.h" word_type __cmpdi2(long long a, long long b) { const DWunion au = { .ll = a }; const DWunion bu = { .ll = b }; if (au.s.high < bu.s.high) return 0; else if (au.s.high > bu.s.high) return 2; if ((unsigned int) au.s.low < (unsigned int) bu.s.low) return 0; else if ((unsigned int) au.s.low > (unsigned int) bu.s.low) return 2; return 1; } EXPORT_SYMBOL(__cmpdi2);
linux-master
arch/microblaze/lib/cmpdi2.c
// SPDX-License-Identifier: GPL-2.0 #include <linux/export.h> #include "libgcc.h" #define W_TYPE_SIZE 32 #define __ll_B ((unsigned long) 1 << (W_TYPE_SIZE / 2)) #define __ll_lowpart(t) ((unsigned long) (t) & (__ll_B - 1)) #define __ll_highpart(t) ((unsigned long) (t) >> (W_TYPE_SIZE / 2)) /* If we still don't have umul_ppmm, define it using plain C. */ #if !defined(umul_ppmm) #define umul_ppmm(w1, w0, u, v) \ do { \ unsigned long __x0, __x1, __x2, __x3; \ unsigned short __ul, __vl, __uh, __vh; \ \ __ul = __ll_lowpart(u); \ __uh = __ll_highpart(u); \ __vl = __ll_lowpart(v); \ __vh = __ll_highpart(v); \ \ __x0 = (unsigned long) __ul * __vl; \ __x1 = (unsigned long) __ul * __vh; \ __x2 = (unsigned long) __uh * __vl; \ __x3 = (unsigned long) __uh * __vh; \ \ __x1 += __ll_highpart(__x0); /* this can't give carry */\ __x1 += __x2; /* but this indeed can */ \ if (__x1 < __x2) /* did we get it? */ \ __x3 += __ll_B; /* yes, add it in the proper pos */ \ \ (w1) = __x3 + __ll_highpart(__x1); \ (w0) = __ll_lowpart(__x1) * __ll_B + __ll_lowpart(__x0);\ } while (0) #endif #if !defined(__umulsidi3) #define __umulsidi3(u, v) ({ \ DWunion __w; \ umul_ppmm(__w.s.high, __w.s.low, u, v); \ __w.ll; \ }) #endif long long __muldi3(long long u, long long v) { const DWunion uu = {.ll = u}; const DWunion vv = {.ll = v}; DWunion w = {.ll = __umulsidi3(uu.s.low, vv.s.low)}; w.s.high += ((unsigned long) uu.s.low * (unsigned long) vv.s.high + (unsigned long) uu.s.high * (unsigned long) vv.s.low); return w.ll; } EXPORT_SYMBOL(__muldi3);
linux-master
arch/microblaze/lib/muldi3.c
/* * Copyright (C) 2008-2009 Michal Simek <[email protected]> * Copyright (C) 2008-2009 PetaLogix * Copyright (C) 2007 John Williams * * Reasonably optimised generic C-code for memcpy on Microblaze * This is generic C code to do efficient, alignment-aware memmove. * * It is based on demo code originally Copyright 2001 by Intel Corp, taken from * http://www.embedded.com/showArticle.jhtml?articleID=19205567 * * Attempts were made, unsuccessfully, to contact the original * author of this code (Michael Morrow, Intel). Below is the original * copyright notice. * * This software has been developed by Intel Corporation. * Intel specifically disclaims all warranties, express or * implied, and all liability, including consequential and * other indirect damages, for the use of this program, including * liability for infringement of any proprietary rights, * and including the warranties of merchantability and fitness * for a particular purpose. Intel does not assume any * responsibility for and errors which may appear in this program * not any responsibility to update it. */ #include <linux/export.h> #include <linux/types.h> #include <linux/stddef.h> #include <linux/compiler.h> #include <linux/string.h> #ifdef CONFIG_OPT_LIB_FUNCTION void *memmove(void *v_dst, const void *v_src, __kernel_size_t c) { const char *src = v_src; char *dst = v_dst; const uint32_t *i_src; uint32_t *i_dst; if (!c) return v_dst; /* Use memcpy when source is higher than dest */ if (v_dst <= v_src) return memcpy(v_dst, v_src, c); /* The following code tries to optimize the copy by using unsigned * alignment. This will work fine if both source and destination are * aligned on the same boundary. However, if they are aligned on * different boundaries shifts will be necessary. This might result in * bad performance on MicroBlaze systems without a barrel shifter. */ /* FIXME this part needs more test */ /* Do a descending copy - this is a bit trickier! */ dst += c; src += c; if (c >= 4) { unsigned value, buf_hold; /* Align the destination to a word boundary. */ /* This is done in an endian independent manner. */ switch ((unsigned long)dst & 3) { case 3: *--dst = *--src; --c; fallthrough; case 2: *--dst = *--src; --c; fallthrough; case 1: *--dst = *--src; --c; } i_dst = (void *)dst; /* Choose a copy scheme based on the source */ /* alignment relative to destination. */ switch ((unsigned long)src & 3) { case 0x0: /* Both byte offsets are aligned */ i_src = (const void *)src; for (; c >= 4; c -= 4) *--i_dst = *--i_src; src = (const void *)i_src; break; case 0x1: /* Unaligned - Off by 1 */ /* Word align the source */ i_src = (const void *) (((unsigned)src + 4) & ~3); #ifndef __MICROBLAZEEL__ /* Load the holding buffer */ buf_hold = *--i_src >> 24; for (; c >= 4; c -= 4) { value = *--i_src; *--i_dst = buf_hold << 8 | value; buf_hold = value >> 24; } #else /* Load the holding buffer */ buf_hold = (*--i_src & 0xFF) << 24; for (; c >= 4; c -= 4) { value = *--i_src; *--i_dst = buf_hold | ((value & 0xFFFFFF00) >> 8); buf_hold = (value & 0xFF) << 24; } #endif /* Realign the source */ src = (const void *)i_src; src += 1; break; case 0x2: /* Unaligned - Off by 2 */ /* Word align the source */ i_src = (const void *) (((unsigned)src + 4) & ~3); #ifndef __MICROBLAZEEL__ /* Load the holding buffer */ buf_hold = *--i_src >> 16; for (; c >= 4; c -= 4) { value = *--i_src; *--i_dst = buf_hold << 16 | value; buf_hold = value >> 16; } #else /* Load the holding buffer */ buf_hold = (*--i_src & 0xFFFF) << 16; for (; c >= 4; c -= 4) { value = *--i_src; *--i_dst = buf_hold | ((value & 0xFFFF0000) >> 16); buf_hold = (value & 0xFFFF) << 16; } #endif /* Realign the source */ src = (const void *)i_src; src += 2; break; case 0x3: /* Unaligned - Off by 3 */ /* Word align the source */ i_src = (const void *) (((unsigned)src + 4) & ~3); #ifndef __MICROBLAZEEL__ /* Load the holding buffer */ buf_hold = *--i_src >> 8; for (; c >= 4; c -= 4) { value = *--i_src; *--i_dst = buf_hold << 24 | value; buf_hold = value >> 8; } #else /* Load the holding buffer */ buf_hold = (*--i_src & 0xFFFFFF) << 8; for (; c >= 4; c -= 4) { value = *--i_src; *--i_dst = buf_hold | ((value & 0xFF000000) >> 24); buf_hold = (value & 0xFFFFFF) << 8; } #endif /* Realign the source */ src = (const void *)i_src; src += 3; break; } dst = (void *)i_dst; } /* simple fast copy, ... unless a cache boundary is crossed */ /* Finish off any remaining bytes */ switch (c) { case 4: *--dst = *--src; fallthrough; case 3: *--dst = *--src; fallthrough; case 2: *--dst = *--src; fallthrough; case 1: *--dst = *--src; } return v_dst; } EXPORT_SYMBOL(memmove); #endif /* CONFIG_OPT_LIB_FUNCTION */
linux-master
arch/microblaze/lib/memmove.c
// SPDX-License-Identifier: GPL-2.0 #include <linux/export.h> #include "libgcc.h" long long __ashldi3(long long u, word_type b) { DWunion uu, w; word_type bm; if (b == 0) return u; uu.ll = u; bm = 32 - b; if (bm <= 0) { w.s.low = 0; w.s.high = (unsigned int) uu.s.low << -bm; } else { const unsigned int carries = (unsigned int) uu.s.low >> bm; w.s.low = (unsigned int) uu.s.low << b; w.s.high = ((unsigned int) uu.s.high << b) | carries; } return w.ll; } EXPORT_SYMBOL(__ashldi3);
linux-master
arch/microblaze/lib/ashldi3.c
/* * Copyright (C) 2008-2009 Michal Simek <[email protected]> * Copyright (C) 2008-2009 PetaLogix * Copyright (C) 2007 John Williams * * Reasonably optimised generic C-code for memset on Microblaze * This is generic C code to do efficient, alignment-aware memcpy. * * It is based on demo code originally Copyright 2001 by Intel Corp, taken from * http://www.embedded.com/showArticle.jhtml?articleID=19205567 * * Attempts were made, unsuccessfully, to contact the original * author of this code (Michael Morrow, Intel). Below is the original * copyright notice. * * This software has been developed by Intel Corporation. * Intel specifically disclaims all warranties, express or * implied, and all liability, including consequential and * other indirect damages, for the use of this program, including * liability for infringement of any proprietary rights, * and including the warranties of merchantability and fitness * for a particular purpose. Intel does not assume any * responsibility for and errors which may appear in this program * not any responsibility to update it. */ #include <linux/export.h> #include <linux/types.h> #include <linux/stddef.h> #include <linux/compiler.h> #include <linux/string.h> #ifdef CONFIG_OPT_LIB_FUNCTION void *memset(void *v_src, int c, __kernel_size_t n) { char *src = v_src; uint32_t *i_src; uint32_t w32 = 0; /* Truncate c to 8 bits */ c = (c & 0xFF); if (unlikely(c)) { /* Make a repeating word out of it */ w32 = c; w32 |= w32 << 8; w32 |= w32 << 16; } if (likely(n >= 4)) { /* Align the destination to a word boundary */ /* This is done in an endian independent manner */ switch ((unsigned) src & 3) { case 1: *src++ = c; --n; fallthrough; case 2: *src++ = c; --n; fallthrough; case 3: *src++ = c; --n; } i_src = (void *)src; /* Do as many full-word copies as we can */ for (; n >= 4; n -= 4) *i_src++ = w32; src = (void *)i_src; } /* Simple, byte oriented memset or the rest of count. */ switch (n) { case 3: *src++ = c; fallthrough; case 2: *src++ = c; fallthrough; case 1: *src++ = c; break; default: break; } return v_src; } EXPORT_SYMBOL(memset); #endif /* CONFIG_OPT_LIB_FUNCTION */
linux-master
arch/microblaze/lib/memset.c
// SPDX-License-Identifier: GPL-2.0 #include <linux/export.h> #include "libgcc.h" long long __ashrdi3(long long u, word_type b) { DWunion uu, w; word_type bm; if (b == 0) return u; uu.ll = u; bm = 32 - b; if (bm <= 0) { /* w.s.high = 1..1 or 0..0 */ w.s.high = uu.s.high >> 31; w.s.low = uu.s.high >> -bm; } else { const unsigned int carries = (unsigned int) uu.s.high << bm; w.s.high = uu.s.high >> b; w.s.low = ((unsigned int) uu.s.low >> b) | carries; } return w.ll; } EXPORT_SYMBOL(__ashrdi3);
linux-master
arch/microblaze/lib/ashrdi3.c
// SPDX-License-Identifier: GPL-2.0 #include <linux/export.h> #include "libgcc.h" word_type __ucmpdi2(unsigned long long a, unsigned long long b) { const DWunion au = {.ll = a}; const DWunion bu = {.ll = b}; if ((unsigned int) au.s.high < (unsigned int) bu.s.high) return 0; else if ((unsigned int) au.s.high > (unsigned int) bu.s.high) return 2; if ((unsigned int) au.s.low < (unsigned int) bu.s.low) return 0; else if ((unsigned int) au.s.low > (unsigned int) bu.s.low) return 2; return 1; } EXPORT_SYMBOL(__ucmpdi2);
linux-master
arch/microblaze/lib/ucmpdi2.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * This file contains the routines for handling the MMU. * * Copyright (C) 2007 Xilinx, Inc. All rights reserved. * * Derived from arch/ppc/mm/4xx_mmu.c: * -- paulus * * Derived from arch/ppc/mm/init.c: * Copyright (C) 1995-1996 Gary Thomas ([email protected]) * * Modifications by Paul Mackerras (PowerMac) ([email protected]) * and Cort Dougan (PReP) ([email protected]) * Copyright (C) 1996 Paul Mackerras * Amiga/APUS changes by Jesper Skov ([email protected]). * * Derived from "arch/i386/mm/init.c" * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds */ #include <linux/mm.h> #include <linux/init.h> #include <asm/tlbflush.h> #include <asm/mmu_context.h> mm_context_t next_mmu_context; unsigned long context_map[LAST_CONTEXT / BITS_PER_LONG + 1]; atomic_t nr_free_contexts; struct mm_struct *context_mm[LAST_CONTEXT+1]; /* * Initialize the context management stuff. */ void __init mmu_context_init(void) { /* * The use of context zero is reserved for the kernel. * This code assumes FIRST_CONTEXT < 32. */ context_map[0] = (1 << FIRST_CONTEXT) - 1; next_mmu_context = FIRST_CONTEXT; atomic_set(&nr_free_contexts, LAST_CONTEXT - FIRST_CONTEXT + 1); } /* * Steal a context from a task that has one at the moment. * * This isn't an LRU system, it just frees up each context in * turn (sort-of pseudo-random replacement :). This would be the * place to implement an LRU scheme if anyone were motivated to do it. */ void steal_context(void) { struct mm_struct *mm; /* free up context `next_mmu_context' */ /* if we shouldn't free context 0, don't... */ if (next_mmu_context < FIRST_CONTEXT) next_mmu_context = FIRST_CONTEXT; mm = context_mm[next_mmu_context]; flush_tlb_mm(mm); destroy_context(mm); }
linux-master
arch/microblaze/mm/mmu_context.c
/* * Copyright (C) 2007-2008 Michal Simek <[email protected]> * Copyright (C) 2006 Atmark Techno, Inc. * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/dma-map-ops.h> #include <linux/memblock.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/mm.h> /* mem_init */ #include <linux/initrd.h> #include <linux/of_fdt.h> #include <linux/pagemap.h> #include <linux/pfn.h> #include <linux/slab.h> #include <linux/swap.h> #include <linux/export.h> #include <asm/page.h> #include <asm/mmu_context.h> #include <asm/pgalloc.h> #include <asm/sections.h> #include <asm/tlb.h> #include <asm/fixmap.h> /* Use for MMU and noMMU because of PCI generic code */ int mem_init_done; char *klimit = _end; /* * Initialize the bootmem system and give it all the memory we * have available. */ unsigned long memory_start; EXPORT_SYMBOL(memory_start); unsigned long memory_size; EXPORT_SYMBOL(memory_size); unsigned long lowmem_size; EXPORT_SYMBOL(min_low_pfn); EXPORT_SYMBOL(max_low_pfn); #ifdef CONFIG_HIGHMEM static void __init highmem_init(void) { pr_debug("%x\n", (u32)PKMAP_BASE); map_page(PKMAP_BASE, 0, 0); /* XXX gross */ pkmap_page_table = virt_to_kpte(PKMAP_BASE); } static void __meminit highmem_setup(void) { unsigned long pfn; for (pfn = max_low_pfn; pfn < max_pfn; ++pfn) { struct page *page = pfn_to_page(pfn); /* FIXME not sure about */ if (!memblock_is_reserved(pfn << PAGE_SHIFT)) free_highmem_page(page); } } #endif /* CONFIG_HIGHMEM */ /* * paging_init() sets up the page tables - in fact we've already done this. */ static void __init paging_init(void) { unsigned long zones_size[MAX_NR_ZONES]; int idx; /* Setup fixmaps */ for (idx = 0; idx < __end_of_fixed_addresses; idx++) clear_fixmap(idx); /* Clean every zones */ memset(zones_size, 0, sizeof(zones_size)); #ifdef CONFIG_HIGHMEM highmem_init(); zones_size[ZONE_DMA] = max_low_pfn; zones_size[ZONE_HIGHMEM] = max_pfn; #else zones_size[ZONE_DMA] = max_pfn; #endif /* We don't have holes in memory map */ free_area_init(zones_size); } void __init setup_memory(void) { /* * Kernel: * start: base phys address of kernel - page align * end: base phys address of kernel - page align * * min_low_pfn - the first page (mm/bootmem.c - node_boot_start) * max_low_pfn * max_mapnr - the first unused page (mm/bootmem.c - node_low_pfn) */ /* memory start is from the kernel end (aligned) to higher addr */ min_low_pfn = memory_start >> PAGE_SHIFT; /* minimum for allocation */ /* RAM is assumed contiguous */ max_mapnr = memory_size >> PAGE_SHIFT; max_low_pfn = ((u64)memory_start + (u64)lowmem_size) >> PAGE_SHIFT; max_pfn = ((u64)memory_start + (u64)memory_size) >> PAGE_SHIFT; pr_info("%s: max_mapnr: %#lx\n", __func__, max_mapnr); pr_info("%s: min_low_pfn: %#lx\n", __func__, min_low_pfn); pr_info("%s: max_low_pfn: %#lx\n", __func__, max_low_pfn); pr_info("%s: max_pfn: %#lx\n", __func__, max_pfn); paging_init(); } void __init mem_init(void) { high_memory = (void *)__va(memory_start + lowmem_size - 1); /* this will put all memory onto the freelists */ memblock_free_all(); #ifdef CONFIG_HIGHMEM highmem_setup(); #endif mem_init_done = 1; } int page_is_ram(unsigned long pfn) { return pfn < max_low_pfn; } /* * Check for command-line options that affect what MMU_init will do. */ static void mm_cmdline_setup(void) { unsigned long maxmem = 0; char *p = cmd_line; /* Look for mem= option on command line */ p = strstr(cmd_line, "mem="); if (p) { p += 4; maxmem = memparse(p, &p); if (maxmem && memory_size > maxmem) { memory_size = maxmem; memblock.memory.regions[0].size = memory_size; } } } /* * MMU_init_hw does the chip-specific initialization of the MMU hardware. */ static void __init mmu_init_hw(void) { /* * The Zone Protection Register (ZPR) defines how protection will * be applied to every page which is a member of a given zone. At * present, we utilize only two of the zones. * The zone index bits (of ZSEL) in the PTE are used for software * indicators, except the LSB. For user access, zone 1 is used, * for kernel access, zone 0 is used. We set all but zone 1 * to zero, allowing only kernel access as indicated in the PTE. * For zone 1, we set a 01 binary (a value of 10 will not work) * to allow user access as indicated in the PTE. This also allows * kernel access as indicated in the PTE. */ __asm__ __volatile__ ("ori r11, r0, 0x10000000;" \ "mts rzpr, r11;" : : : "r11"); } /* * MMU_init sets up the basic memory mappings for the kernel, * including both RAM and possibly some I/O regions, * and sets up the page tables and the MMU hardware ready to go. */ /* called from head.S */ asmlinkage void __init mmu_init(void) { unsigned int kstart, ksize; if (!memblock.reserved.cnt) { pr_emerg("Error memory count\n"); machine_restart(NULL); } if ((u32) memblock.memory.regions[0].size < 0x400000) { pr_emerg("Memory must be greater than 4MB\n"); machine_restart(NULL); } if ((u32) memblock.memory.regions[0].size < kernel_tlb) { pr_emerg("Kernel size is greater than memory node\n"); machine_restart(NULL); } /* Find main memory where the kernel is */ memory_start = (u32) memblock.memory.regions[0].base; lowmem_size = memory_size = (u32) memblock.memory.regions[0].size; if (lowmem_size > CONFIG_LOWMEM_SIZE) { lowmem_size = CONFIG_LOWMEM_SIZE; #ifndef CONFIG_HIGHMEM memory_size = lowmem_size; #endif } mm_cmdline_setup(); /* FIXME parse args from command line - not used */ /* * Map out the kernel text/data/bss from the available physical * memory. */ kstart = __pa(CONFIG_KERNEL_START); /* kernel start */ /* kernel size */ ksize = PAGE_ALIGN(((u32)_end - (u32)CONFIG_KERNEL_START)); memblock_reserve(kstart, ksize); #if defined(CONFIG_BLK_DEV_INITRD) /* Remove the init RAM disk from the available memory. */ if (initrd_start) { unsigned long size; size = initrd_end - initrd_start; memblock_reserve(__virt_to_phys(initrd_start), size); } #endif /* CONFIG_BLK_DEV_INITRD */ /* Initialize the MMU hardware */ mmu_init_hw(); /* Map in all of RAM starting at CONFIG_KERNEL_START */ mapin_ram(); /* Extend vmalloc and ioremap area as big as possible */ #ifdef CONFIG_HIGHMEM ioremap_base = ioremap_bot = PKMAP_BASE; #else ioremap_base = ioremap_bot = FIXADDR_START; #endif /* Initialize the context management stuff */ mmu_context_init(); /* Shortly after that, the entire linear mapping will be available */ /* This will also cause that unflatten device tree will be allocated * inside 768MB limit */ memblock_set_current_limit(memory_start + lowmem_size - 1); parse_early_param(); early_init_fdt_scan_reserved_mem(); /* CMA initialization */ dma_contiguous_reserve(memory_start + lowmem_size - 1); memblock_dump_all(); } static const pgprot_t protection_map[16] = { [VM_NONE] = PAGE_NONE, [VM_READ] = PAGE_READONLY_X, [VM_WRITE] = PAGE_COPY, [VM_WRITE | VM_READ] = PAGE_COPY_X, [VM_EXEC] = PAGE_READONLY, [VM_EXEC | VM_READ] = PAGE_READONLY_X, [VM_EXEC | VM_WRITE] = PAGE_COPY, [VM_EXEC | VM_WRITE | VM_READ] = PAGE_COPY_X, [VM_SHARED] = PAGE_NONE, [VM_SHARED | VM_READ] = PAGE_READONLY_X, [VM_SHARED | VM_WRITE] = PAGE_SHARED, [VM_SHARED | VM_WRITE | VM_READ] = PAGE_SHARED_X, [VM_SHARED | VM_EXEC] = PAGE_READONLY, [VM_SHARED | VM_EXEC | VM_READ] = PAGE_READONLY_X, [VM_SHARED | VM_EXEC | VM_WRITE] = PAGE_SHARED, [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = PAGE_SHARED_X }; DECLARE_VM_GET_PAGE_PROT
linux-master
arch/microblaze/mm/init.c
// SPDX-License-Identifier: GPL-2.0-only /* * Microblaze support for cache consistent memory. * Copyright (C) 2010 Michal Simek <[email protected]> * Copyright (C) 2010 PetaLogix * Copyright (C) 2005 John Williams <[email protected]> */ #include <linux/kernel.h> #include <linux/string.h> #include <linux/types.h> #include <linux/mm.h> #include <linux/init.h> #include <linux/dma-map-ops.h> #include <asm/cpuinfo.h> #include <asm/cacheflush.h> void arch_dma_prep_coherent(struct page *page, size_t size) { phys_addr_t paddr = page_to_phys(page); flush_dcache_range(paddr, paddr + size); }
linux-master
arch/microblaze/mm/consistent.c
/* * This file contains the routines setting up the linux page tables. * * Copyright (C) 2008 Michal Simek * Copyright (C) 2008 PetaLogix * * Copyright (C) 2007 Xilinx, Inc. All rights reserved. * * Derived from arch/ppc/mm/pgtable.c: * -- paulus * * Derived from arch/ppc/mm/init.c: * Copyright (C) 1995-1996 Gary Thomas ([email protected]) * * Modifications by Paul Mackerras (PowerMac) ([email protected]) * and Cort Dougan (PReP) ([email protected]) * Copyright (C) 1996 Paul Mackerras * Amiga/APUS changes by Jesper Skov ([email protected]). * * Derived from "arch/i386/mm/init.c" * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds * * This file is subject to the terms and conditions of the GNU General * Public License. See the file COPYING in the main directory of this * archive for more details. * */ #include <linux/export.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/vmalloc.h> #include <linux/init.h> #include <linux/mm_types.h> #include <linux/pgtable.h> #include <linux/memblock.h> #include <linux/kallsyms.h> #include <asm/pgalloc.h> #include <linux/io.h> #include <asm/mmu.h> #include <asm/sections.h> #include <asm/fixmap.h> unsigned long ioremap_base; unsigned long ioremap_bot; EXPORT_SYMBOL(ioremap_bot); static void __iomem *__ioremap(phys_addr_t addr, unsigned long size, unsigned long flags) { unsigned long v, i; phys_addr_t p; int err; /* * Choose an address to map it to. * Once the vmalloc system is running, we use it. * Before then, we use space going down from ioremap_base * (ioremap_bot records where we're up to). */ p = addr & PAGE_MASK; size = PAGE_ALIGN(addr + size) - p; /* * Don't allow anybody to remap normal RAM that we're using. * mem_init() sets high_memory so only do the check after that. * * However, allow remap of rootfs: TBD */ if (mem_init_done && p >= memory_start && p < virt_to_phys(high_memory) && !(p >= __virt_to_phys((phys_addr_t)__bss_stop) && p < __virt_to_phys((phys_addr_t)__bss_stop))) { pr_warn("__ioremap(): phys addr "PTE_FMT" is RAM lr %ps\n", (unsigned long)p, __builtin_return_address(0)); return NULL; } if (size == 0) return NULL; /* * Is it already mapped? If the whole area is mapped then we're * done, otherwise remap it since we want to keep the virt addrs for * each request contiguous. * * We make the assumption here that if the bottom and top * of the range we want are mapped then it's mapped to the * same virt address (and this is contiguous). * -- Cort */ if (mem_init_done) { struct vm_struct *area; area = get_vm_area(size, VM_IOREMAP); if (area == NULL) return NULL; v = (unsigned long) area->addr; } else { v = (ioremap_bot -= size); } if ((flags & _PAGE_PRESENT) == 0) flags |= _PAGE_KERNEL; if (flags & _PAGE_NO_CACHE) flags |= _PAGE_GUARDED; err = 0; for (i = 0; i < size && err == 0; i += PAGE_SIZE) err = map_page(v + i, p + i, flags); if (err) { if (mem_init_done) vfree((void *)v); return NULL; } return (void __iomem *) (v + ((unsigned long)addr & ~PAGE_MASK)); } void __iomem *ioremap(phys_addr_t addr, unsigned long size) { return __ioremap(addr, size, _PAGE_NO_CACHE); } EXPORT_SYMBOL(ioremap); void iounmap(volatile void __iomem *addr) { if ((__force void *)addr > high_memory && (unsigned long) addr < ioremap_bot) vfree((void *) (PAGE_MASK & (unsigned long) addr)); } EXPORT_SYMBOL(iounmap); int map_page(unsigned long va, phys_addr_t pa, int flags) { p4d_t *p4d; pud_t *pud; pmd_t *pd; pte_t *pg; int err = -ENOMEM; /* Use upper 10 bits of VA to index the first level map */ p4d = p4d_offset(pgd_offset_k(va), va); pud = pud_offset(p4d, va); pd = pmd_offset(pud, va); /* Use middle 10 bits of VA to index the second-level map */ pg = pte_alloc_kernel(pd, va); /* from powerpc - pgtable.c */ /* pg = pte_alloc_kernel(&init_mm, pd, va); */ if (pg != NULL) { err = 0; set_pte_at(&init_mm, va, pg, pfn_pte(pa >> PAGE_SHIFT, __pgprot(flags))); if (unlikely(mem_init_done)) _tlbie(va); } return err; } /* * Map in all of physical memory starting at CONFIG_KERNEL_START. */ void __init mapin_ram(void) { unsigned long v, p, s, f; v = CONFIG_KERNEL_START; p = memory_start; for (s = 0; s < lowmem_size; s += PAGE_SIZE) { f = _PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_SHARED | _PAGE_HWEXEC; if (!is_kernel_text(v)) f |= _PAGE_WRENABLE; else /* On the MicroBlaze, no user access forces R/W kernel access */ f |= _PAGE_USER; map_page(v, p, f); v += PAGE_SIZE; p += PAGE_SIZE; } } /* is x a power of 2? */ #define is_power_of_2(x) ((x) != 0 && (((x) & ((x) - 1)) == 0)) /* Scan the real Linux page tables and return a PTE pointer for * a virtual address in a context. * Returns true (1) if PTE was found, zero otherwise. The pointer to * the PTE pointer is unmodified if PTE is not found. */ static int get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep) { pgd_t *pgd; p4d_t *p4d; pud_t *pud; pmd_t *pmd; pte_t *pte; int retval = 0; pgd = pgd_offset(mm, addr & PAGE_MASK); if (pgd) { p4d = p4d_offset(pgd, addr & PAGE_MASK); pud = pud_offset(p4d, addr & PAGE_MASK); pmd = pmd_offset(pud, addr & PAGE_MASK); if (pmd_present(*pmd)) { pte = pte_offset_kernel(pmd, addr & PAGE_MASK); if (pte) { retval = 1; *ptep = pte; } } } return retval; } /* Find physical address for this virtual address. Normally used by * I/O functions, but anyone can call it. */ unsigned long iopa(unsigned long addr) { unsigned long pa; pte_t *pte; struct mm_struct *mm; /* Allow mapping of user addresses (within the thread) * for DMA if necessary. */ if (addr < TASK_SIZE) mm = current->mm; else mm = &init_mm; pa = 0; if (get_pteptr(mm, addr, &pte)) pa = (pte_val(*pte) & PAGE_MASK) | (addr & ~PAGE_MASK); return pa; } __ref pte_t *pte_alloc_one_kernel(struct mm_struct *mm) { if (mem_init_done) return (pte_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO); else return memblock_alloc_try_nid(PAGE_SIZE, PAGE_SIZE, MEMBLOCK_LOW_LIMIT, memory_start + kernel_tlb, NUMA_NO_NODE); } void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t flags) { unsigned long address = __fix_to_virt(idx); if (idx >= __end_of_fixed_addresses) BUG(); map_page(address, phys, pgprot_val(flags)); }
linux-master
arch/microblaze/mm/pgtable.c
/* * arch/microblaze/mm/fault.c * * Copyright (C) 2007 Xilinx, Inc. All rights reserved. * * Derived from "arch/ppc/mm/fault.c" * Copyright (C) 1995-1996 Gary Thomas ([email protected]) * * Derived from "arch/i386/mm/fault.c" * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds * * Modified by Cort Dougan and Paul Mackerras. * * This file is subject to the terms and conditions of the GNU General * Public License. See the file COPYING in the main directory of this * archive for more details. * */ #include <linux/extable.h> #include <linux/signal.h> #include <linux/sched.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/types.h> #include <linux/ptrace.h> #include <linux/mman.h> #include <linux/mm.h> #include <linux/interrupt.h> #include <linux/perf_event.h> #include <asm/page.h> #include <asm/mmu.h> #include <linux/mmu_context.h> #include <linux/uaccess.h> #include <asm/exceptions.h> static unsigned long pte_misses; /* updated by do_page_fault() */ static unsigned long pte_errors; /* updated by do_page_fault() */ /* * Check whether the instruction at regs->pc is a store using * an update addressing form which will update r1. */ static int store_updates_sp(struct pt_regs *regs) { unsigned int inst; if (get_user(inst, (unsigned int __user *)regs->pc)) return 0; /* check for 1 in the rD field */ if (((inst >> 21) & 0x1f) != 1) return 0; /* check for store opcodes */ if ((inst & 0xd0000000) == 0xd0000000) return 1; return 0; } /* * bad_page_fault is called when we have a bad access from the kernel. * It is called from do_page_fault above and from some of the procedures * in traps.c. */ void bad_page_fault(struct pt_regs *regs, unsigned long address, int sig) { const struct exception_table_entry *fixup; /* MS: no context */ /* Are we prepared to handle this fault? */ fixup = search_exception_tables(regs->pc); if (fixup) { regs->pc = fixup->fixup; return; } /* kernel has accessed a bad area */ die("kernel access of bad area", regs, sig); } /* * The error_code parameter is ESR for a data fault, * 0 for an instruction fault. */ void do_page_fault(struct pt_regs *regs, unsigned long address, unsigned long error_code) { struct vm_area_struct *vma; struct mm_struct *mm = current->mm; int code = SEGV_MAPERR; int is_write = error_code & ESR_S; vm_fault_t fault; unsigned int flags = FAULT_FLAG_DEFAULT; regs->ear = address; regs->esr = error_code; /* On a kernel SLB miss we can only check for a valid exception entry */ if (unlikely(kernel_mode(regs) && (address >= TASK_SIZE))) { pr_warn("kernel task_size exceed"); _exception(SIGSEGV, regs, code, address); } /* for instr TLB miss and instr storage exception ESR_S is undefined */ if ((error_code & 0x13) == 0x13 || (error_code & 0x11) == 0x11) is_write = 0; if (unlikely(faulthandler_disabled() || !mm)) { if (kernel_mode(regs)) goto bad_area_nosemaphore; /* faulthandler_disabled() in user mode is really bad, as is current->mm == NULL. */ pr_emerg("Page fault in user mode with faulthandler_disabled(), mm = %p\n", mm); pr_emerg("r15 = %lx MSR = %lx\n", regs->r15, regs->msr); die("Weird page fault", regs, SIGSEGV); } if (user_mode(regs)) flags |= FAULT_FLAG_USER; perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); /* When running in the kernel we expect faults to occur only to * addresses in user space. All other faults represent errors in the * kernel and should generate an OOPS. Unfortunately, in the case of an * erroneous fault occurring in a code path which already holds mmap_lock * we will deadlock attempting to validate the fault against the * address space. Luckily the kernel only validly references user * space from well defined areas of code, which are listed in the * exceptions table. * * As the vast majority of faults will be valid we will only perform * the source reference check when there is a possibility of a deadlock. * Attempt to lock the address space, if we cannot we then validate the * source. If this is invalid we can skip the address space check, * thus avoiding the deadlock. */ if (unlikely(!mmap_read_trylock(mm))) { if (kernel_mode(regs) && !search_exception_tables(regs->pc)) goto bad_area_nosemaphore; retry: mmap_read_lock(mm); } vma = find_vma(mm, address); if (unlikely(!vma)) goto bad_area; if (vma->vm_start <= address) goto good_area; if (unlikely(!(vma->vm_flags & VM_GROWSDOWN))) goto bad_area; if (unlikely(!is_write)) goto bad_area; /* * N.B. The ABI allows programs to access up to * a few hundred bytes below the stack pointer (TBD). * The kernel signal delivery code writes up to about 1.5kB * below the stack pointer (r1) before decrementing it. * The exec code can write slightly over 640kB to the stack * before setting the user r1. Thus we allow the stack to * expand to 1MB without further checks. */ if (unlikely(address + 0x100000 < vma->vm_end)) { /* get user regs even if this fault is in kernel mode */ struct pt_regs *uregs = current->thread.regs; if (uregs == NULL) goto bad_area; /* * A user-mode access to an address a long way below * the stack pointer is only valid if the instruction * is one which would update the stack pointer to the * address accessed if the instruction completed, * i.e. either stwu rs,n(r1) or stwux rs,r1,rb * (or the byte, halfword, float or double forms). * * If we don't check this then any write to the area * between the last mapped region and the stack will * expand the stack rather than segfaulting. */ if (address + 2048 < uregs->r1 && (kernel_mode(regs) || !store_updates_sp(regs))) goto bad_area; } vma = expand_stack(mm, address); if (!vma) goto bad_area_nosemaphore; good_area: code = SEGV_ACCERR; /* a write */ if (unlikely(is_write)) { if (unlikely(!(vma->vm_flags & VM_WRITE))) goto bad_area; flags |= FAULT_FLAG_WRITE; /* a read */ } else { /* protection fault */ if (unlikely(error_code & 0x08000000)) goto bad_area; if (unlikely(!(vma->vm_flags & (VM_READ | VM_EXEC)))) goto bad_area; } /* * If for any reason at all we couldn't handle the fault, * make sure we exit gracefully rather than endlessly redo * the fault. */ fault = handle_mm_fault(vma, address, flags, regs); if (fault_signal_pending(fault, regs)) { if (!user_mode(regs)) bad_page_fault(regs, address, SIGBUS); return; } /* The fault is fully completed (including releasing mmap lock) */ if (fault & VM_FAULT_COMPLETED) return; if (unlikely(fault & VM_FAULT_ERROR)) { if (fault & VM_FAULT_OOM) goto out_of_memory; else if (fault & VM_FAULT_SIGSEGV) goto bad_area; else if (fault & VM_FAULT_SIGBUS) goto do_sigbus; BUG(); } if (fault & VM_FAULT_RETRY) { flags |= FAULT_FLAG_TRIED; /* * No need to mmap_read_unlock(mm) as we would * have already released it in __lock_page_or_retry * in mm/filemap.c. */ goto retry; } mmap_read_unlock(mm); /* * keep track of tlb+htab misses that are good addrs but * just need pte's created via handle_mm_fault() * -- Cort */ pte_misses++; return; bad_area: mmap_read_unlock(mm); bad_area_nosemaphore: pte_errors++; /* User mode accesses cause a SIGSEGV */ if (user_mode(regs)) { _exception(SIGSEGV, regs, code, address); return; } bad_page_fault(regs, address, SIGSEGV); return; /* * We ran out of memory, or some other thing happened to us that made * us unable to handle the page fault gracefully. */ out_of_memory: mmap_read_unlock(mm); if (!user_mode(regs)) bad_page_fault(regs, address, SIGKILL); else pagefault_out_of_memory(); return; do_sigbus: mmap_read_unlock(mm); if (user_mode(regs)) { force_sig_fault(SIGBUS, BUS_ADRERR, (void __user *)address); return; } bad_page_fault(regs, address, SIGBUS); }
linux-master
arch/microblaze/mm/fault.c
/* * Ftrace support for Microblaze. * * Copyright (C) 2009 Michal Simek <[email protected]> * Copyright (C) 2009 PetaLogix * * Based on MIPS and PowerPC ftrace code * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <asm/cacheflush.h> #include <linux/ftrace.h> #ifdef CONFIG_FUNCTION_GRAPH_TRACER /* * Hook the return address and push it in the stack of return addrs * in current thread info. */ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr) { unsigned long old; int faulted; unsigned long return_hooker = (unsigned long) &return_to_handler; if (unlikely(ftrace_graph_is_dead())) return; if (unlikely(atomic_read(&current->tracing_graph_pause))) return; /* * Protect against fault, even if it shouldn't * happen. This tool is too much intrusive to * ignore such a protection. */ asm volatile(" 1: lwi %0, %2, 0;" \ "2: swi %3, %2, 0;" \ " addik %1, r0, 0;" \ "3:" \ " .section .fixup, \"ax\";" \ "4: brid 3b;" \ " addik %1, r0, 1;" \ " .previous;" \ " .section __ex_table,\"a\";" \ " .word 1b,4b;" \ " .word 2b,4b;" \ " .previous;" \ : "=&r" (old), "=r" (faulted) : "r" (parent), "r" (return_hooker) ); flush_dcache_range((u32)parent, (u32)parent + 4); flush_icache_range((u32)parent, (u32)parent + 4); if (unlikely(faulted)) { ftrace_graph_stop(); WARN_ON(1); return; } if (function_graph_enter(old, self_addr, 0, NULL)) *parent = old; } #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ #ifdef CONFIG_DYNAMIC_FTRACE /* save value to addr - it is save to do it in asm */ static int ftrace_modify_code(unsigned long addr, unsigned int value) { int faulted = 0; __asm__ __volatile__(" 1: swi %2, %1, 0;" \ " addik %0, r0, 0;" \ "2:" \ " .section .fixup, \"ax\";" \ "3: brid 2b;" \ " addik %0, r0, 1;" \ " .previous;" \ " .section __ex_table,\"a\";" \ " .word 1b,3b;" \ " .previous;" \ : "=r" (faulted) : "r" (addr), "r" (value) ); if (unlikely(faulted)) return -EFAULT; flush_dcache_range(addr, addr + 4); flush_icache_range(addr, addr + 4); return 0; } #define MICROBLAZE_NOP 0x80000000 #define MICROBLAZE_BRI 0xb800000C static unsigned int recorded; /* if save was or not */ static unsigned int imm; /* saving whole imm instruction */ /* There are two approaches howto solve ftrace_make nop function - look below */ #undef USE_FTRACE_NOP #ifdef USE_FTRACE_NOP static unsigned int bralid; /* saving whole bralid instruction */ #endif int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, unsigned long addr) { /* we have this part of code which we are working with * b000c000 imm -16384 * b9fc8e30 bralid r15, -29136 // c0008e30 <_mcount> * 80000000 or r0, r0, r0 * * The first solution (!USE_FTRACE_NOP-could be called branch solution) * b000c000 bri 12 (0xC - jump to any other instruction) * b9fc8e30 bralid r15, -29136 // c0008e30 <_mcount> * 80000000 or r0, r0, r0 * any other instruction * * The second solution (USE_FTRACE_NOP) - no jump just nops * 80000000 or r0, r0, r0 * 80000000 or r0, r0, r0 * 80000000 or r0, r0, r0 */ int ret = 0; if (recorded == 0) { recorded = 1; imm = *(unsigned int *)rec->ip; pr_debug("%s: imm:0x%x\n", __func__, imm); #ifdef USE_FTRACE_NOP bralid = *(unsigned int *)(rec->ip + 4); pr_debug("%s: bralid 0x%x\n", __func__, bralid); #endif /* USE_FTRACE_NOP */ } #ifdef USE_FTRACE_NOP ret = ftrace_modify_code(rec->ip, MICROBLAZE_NOP); ret += ftrace_modify_code(rec->ip + 4, MICROBLAZE_NOP); #else /* USE_FTRACE_NOP */ ret = ftrace_modify_code(rec->ip, MICROBLAZE_BRI); #endif /* USE_FTRACE_NOP */ return ret; } /* I believe that first is called ftrace_make_nop before this function */ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) { int ret; pr_debug("%s: addr:0x%x, rec->ip: 0x%x, imm:0x%x\n", __func__, (unsigned int)addr, (unsigned int)rec->ip, imm); ret = ftrace_modify_code(rec->ip, imm); #ifdef USE_FTRACE_NOP pr_debug("%s: bralid:0x%x\n", __func__, bralid); ret += ftrace_modify_code(rec->ip + 4, bralid); #endif /* USE_FTRACE_NOP */ return ret; } int ftrace_update_ftrace_func(ftrace_func_t func) { unsigned long ip = (unsigned long)(&ftrace_call); unsigned int upper = (unsigned int)func; unsigned int lower = (unsigned int)func; int ret = 0; /* create proper saving to ftrace_call poll */ upper = 0xb0000000 + (upper >> 16); /* imm func_upper */ lower = 0x32800000 + (lower & 0xFFFF); /* addik r20, r0, func_lower */ pr_debug("%s: func=0x%x, ip=0x%x, upper=0x%x, lower=0x%x\n", __func__, (unsigned int)func, (unsigned int)ip, upper, lower); /* save upper and lower code */ ret = ftrace_modify_code(ip, upper); ret += ftrace_modify_code(ip + 4, lower); /* We just need to replace the rtsd r15, 8 with NOP */ ret += ftrace_modify_code((unsigned long)&ftrace_caller, MICROBLAZE_NOP); return ret; } #ifdef CONFIG_FUNCTION_GRAPH_TRACER unsigned int old_jump; /* saving place for jump instruction */ int ftrace_enable_ftrace_graph_caller(void) { unsigned int ret; unsigned long ip = (unsigned long)(&ftrace_call_graph); old_jump = *(unsigned int *)ip; /* save jump over instruction */ ret = ftrace_modify_code(ip, MICROBLAZE_NOP); pr_debug("%s: Replace instruction: 0x%x\n", __func__, old_jump); return ret; } int ftrace_disable_ftrace_graph_caller(void) { unsigned int ret; unsigned long ip = (unsigned long)(&ftrace_call_graph); ret = ftrace_modify_code(ip, old_jump); pr_debug("%s\n", __func__); return ret; } #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ #endif /* CONFIG_DYNAMIC_FTRACE */
linux-master
arch/microblaze/kernel/ftrace.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2008-2009 Michal Simek <[email protected]> * Copyright (C) 2008-2009 PetaLogix */ #include <linux/export.h> #include <linux/string.h> #include <linux/delay.h> #include <linux/in6.h> #include <linux/syscalls.h> #include <asm/checksum.h> #include <asm/cacheflush.h> #include <linux/io.h> #include <asm/page.h> #include <linux/ftrace.h> #include <linux/uaccess.h> #ifdef CONFIG_FUNCTION_TRACER extern void _mcount(void); EXPORT_SYMBOL(_mcount); #endif /* * Assembly functions that may be used (directly or indirectly) by modules */ EXPORT_SYMBOL(__copy_tofrom_user); #ifdef CONFIG_OPT_LIB_ASM EXPORT_SYMBOL(memcpy); EXPORT_SYMBOL(memmove); #endif EXPORT_SYMBOL(empty_zero_page); EXPORT_SYMBOL(mbc); extern void __divsi3(void); EXPORT_SYMBOL(__divsi3); extern void __modsi3(void); EXPORT_SYMBOL(__modsi3); extern void __mulsi3(void); EXPORT_SYMBOL(__mulsi3); extern void __udivsi3(void); EXPORT_SYMBOL(__udivsi3); extern void __umodsi3(void); EXPORT_SYMBOL(__umodsi3);
linux-master
arch/microblaze/kernel/microblaze_ksyms.c
/* * Copyright (C) 2007-2013 Michal Simek <[email protected]> * Copyright (C) 2012-2013 Xilinx, Inc. * Copyright (C) 2007-2009 PetaLogix * Copyright (C) 2006 Atmark Techno, Inc. * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/interrupt.h> #include <linux/delay.h> #include <linux/sched.h> #include <linux/sched/clock.h> #include <linux/sched_clock.h> #include <linux/clk.h> #include <linux/clockchips.h> #include <linux/of_address.h> #include <linux/of_irq.h> #include <linux/timecounter.h> #include <asm/cpuinfo.h> static void __iomem *timer_baseaddr; static unsigned int freq_div_hz; static unsigned int timer_clock_freq; #define TCSR0 (0x00) #define TLR0 (0x04) #define TCR0 (0x08) #define TCSR1 (0x10) #define TLR1 (0x14) #define TCR1 (0x18) #define TCSR_MDT (1<<0) #define TCSR_UDT (1<<1) #define TCSR_GENT (1<<2) #define TCSR_CAPT (1<<3) #define TCSR_ARHT (1<<4) #define TCSR_LOAD (1<<5) #define TCSR_ENIT (1<<6) #define TCSR_ENT (1<<7) #define TCSR_TINT (1<<8) #define TCSR_PWMA (1<<9) #define TCSR_ENALL (1<<10) static unsigned int (*read_fn)(void __iomem *); static void (*write_fn)(u32, void __iomem *); static void timer_write32(u32 val, void __iomem *addr) { iowrite32(val, addr); } static unsigned int timer_read32(void __iomem *addr) { return ioread32(addr); } static void timer_write32_be(u32 val, void __iomem *addr) { iowrite32be(val, addr); } static unsigned int timer_read32_be(void __iomem *addr) { return ioread32be(addr); } static inline void xilinx_timer0_stop(void) { write_fn(read_fn(timer_baseaddr + TCSR0) & ~TCSR_ENT, timer_baseaddr + TCSR0); } static inline void xilinx_timer0_start_periodic(unsigned long load_val) { if (!load_val) load_val = 1; /* loading value to timer reg */ write_fn(load_val, timer_baseaddr + TLR0); /* load the initial value */ write_fn(TCSR_LOAD, timer_baseaddr + TCSR0); /* see timer data sheet for detail * !ENALL - don't enable 'em all * !PWMA - disable pwm * TINT - clear interrupt status * ENT- enable timer itself * ENIT - enable interrupt * !LOAD - clear the bit to let go * ARHT - auto reload * !CAPT - no external trigger * !GENT - no external signal * UDT - set the timer as down counter * !MDT0 - generate mode */ write_fn(TCSR_TINT|TCSR_ENIT|TCSR_ENT|TCSR_ARHT|TCSR_UDT, timer_baseaddr + TCSR0); } static inline void xilinx_timer0_start_oneshot(unsigned long load_val) { if (!load_val) load_val = 1; /* loading value to timer reg */ write_fn(load_val, timer_baseaddr + TLR0); /* load the initial value */ write_fn(TCSR_LOAD, timer_baseaddr + TCSR0); write_fn(TCSR_TINT|TCSR_ENIT|TCSR_ENT|TCSR_ARHT|TCSR_UDT, timer_baseaddr + TCSR0); } static int xilinx_timer_set_next_event(unsigned long delta, struct clock_event_device *dev) { pr_debug("%s: next event, delta %x\n", __func__, (u32)delta); xilinx_timer0_start_oneshot(delta); return 0; } static int xilinx_timer_shutdown(struct clock_event_device *evt) { pr_info("%s\n", __func__); xilinx_timer0_stop(); return 0; } static int xilinx_timer_set_periodic(struct clock_event_device *evt) { pr_info("%s\n", __func__); xilinx_timer0_start_periodic(freq_div_hz); return 0; } static struct clock_event_device clockevent_xilinx_timer = { .name = "xilinx_clockevent", .features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_PERIODIC, .shift = 8, .rating = 300, .set_next_event = xilinx_timer_set_next_event, .set_state_shutdown = xilinx_timer_shutdown, .set_state_periodic = xilinx_timer_set_periodic, }; static inline void timer_ack(void) { write_fn(read_fn(timer_baseaddr + TCSR0), timer_baseaddr + TCSR0); } static irqreturn_t timer_interrupt(int irq, void *dev_id) { struct clock_event_device *evt = &clockevent_xilinx_timer; timer_ack(); evt->event_handler(evt); return IRQ_HANDLED; } static __init int xilinx_clockevent_init(void) { clockevent_xilinx_timer.mult = div_sc(timer_clock_freq, NSEC_PER_SEC, clockevent_xilinx_timer.shift); clockevent_xilinx_timer.max_delta_ns = clockevent_delta2ns((u32)~0, &clockevent_xilinx_timer); clockevent_xilinx_timer.max_delta_ticks = (u32)~0; clockevent_xilinx_timer.min_delta_ns = clockevent_delta2ns(1, &clockevent_xilinx_timer); clockevent_xilinx_timer.min_delta_ticks = 1; clockevent_xilinx_timer.cpumask = cpumask_of(0); clockevents_register_device(&clockevent_xilinx_timer); return 0; } static u64 xilinx_clock_read(void) { return read_fn(timer_baseaddr + TCR1); } static u64 xilinx_read(struct clocksource *cs) { /* reading actual value of timer 1 */ return (u64)xilinx_clock_read(); } static struct timecounter xilinx_tc = { .cc = NULL, }; static u64 xilinx_cc_read(const struct cyclecounter *cc) { return xilinx_read(NULL); } static struct cyclecounter xilinx_cc = { .read = xilinx_cc_read, .mask = CLOCKSOURCE_MASK(32), .shift = 8, }; static int __init init_xilinx_timecounter(void) { xilinx_cc.mult = div_sc(timer_clock_freq, NSEC_PER_SEC, xilinx_cc.shift); timecounter_init(&xilinx_tc, &xilinx_cc, sched_clock()); return 0; } static struct clocksource clocksource_microblaze = { .name = "xilinx_clocksource", .rating = 300, .read = xilinx_read, .mask = CLOCKSOURCE_MASK(32), .flags = CLOCK_SOURCE_IS_CONTINUOUS, }; static int __init xilinx_clocksource_init(void) { int ret; ret = clocksource_register_hz(&clocksource_microblaze, timer_clock_freq); if (ret) { pr_err("failed to register clocksource"); return ret; } /* stop timer1 */ write_fn(read_fn(timer_baseaddr + TCSR1) & ~TCSR_ENT, timer_baseaddr + TCSR1); /* start timer1 - up counting without interrupt */ write_fn(TCSR_TINT|TCSR_ENT|TCSR_ARHT, timer_baseaddr + TCSR1); /* register timecounter - for ftrace support */ return init_xilinx_timecounter(); } static int __init xilinx_timer_init(struct device_node *timer) { struct clk *clk; static int initialized; u32 irq; u32 timer_num = 1; int ret; /* If this property is present, the device is a PWM and not a timer */ if (of_property_read_bool(timer, "#pwm-cells")) return 0; if (initialized) return -EINVAL; initialized = 1; timer_baseaddr = of_iomap(timer, 0); if (!timer_baseaddr) { pr_err("ERROR: invalid timer base address\n"); return -ENXIO; } write_fn = timer_write32; read_fn = timer_read32; write_fn(TCSR_MDT, timer_baseaddr + TCSR0); if (!(read_fn(timer_baseaddr + TCSR0) & TCSR_MDT)) { write_fn = timer_write32_be; read_fn = timer_read32_be; } irq = irq_of_parse_and_map(timer, 0); if (irq <= 0) { pr_err("Failed to parse and map irq"); return -EINVAL; } of_property_read_u32(timer, "xlnx,one-timer-only", &timer_num); if (timer_num) { pr_err("Please enable two timers in HW\n"); return -EINVAL; } pr_info("%pOF: irq=%d\n", timer, irq); clk = of_clk_get(timer, 0); if (IS_ERR(clk)) { pr_err("ERROR: timer CCF input clock not found\n"); /* If there is clock-frequency property than use it */ of_property_read_u32(timer, "clock-frequency", &timer_clock_freq); } else { timer_clock_freq = clk_get_rate(clk); } if (!timer_clock_freq) { pr_err("ERROR: Using CPU clock frequency\n"); timer_clock_freq = cpuinfo.cpu_clock_freq; } freq_div_hz = timer_clock_freq / HZ; ret = request_irq(irq, timer_interrupt, IRQF_TIMER, "timer", &clockevent_xilinx_timer); if (ret) { pr_err("Failed to setup IRQ"); return ret; } ret = xilinx_clocksource_init(); if (ret) return ret; ret = xilinx_clockevent_init(); if (ret) return ret; sched_clock_register(xilinx_clock_read, 32, timer_clock_freq); return 0; } TIMER_OF_DECLARE(xilinx_timer, "xlnx,xps-timer-1.00.a", xilinx_timer_init);
linux-master
arch/microblaze/kernel/timer.c
/* * Copyright (C) 2008-2009 Michal Simek <[email protected]> * Copyright (C) 2008-2009 PetaLogix * Copyright (C) 2006 Atmark Techno, Inc. * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/cpu.h> #include <linux/export.h> #include <linux/sched.h> #include <linux/sched/debug.h> #include <linux/sched/task.h> #include <linux/sched/task_stack.h> #include <linux/pm.h> #include <linux/tick.h> #include <linux/bitops.h> #include <linux/ptrace.h> #include <asm/cacheflush.h> void show_regs(struct pt_regs *regs) { show_regs_print_info(KERN_INFO); pr_info(" Registers dump: mode=%X\r\n", regs->pt_mode); pr_info(" r1=%08lX, r2=%08lX, r3=%08lX, r4=%08lX\n", regs->r1, regs->r2, regs->r3, regs->r4); pr_info(" r5=%08lX, r6=%08lX, r7=%08lX, r8=%08lX\n", regs->r5, regs->r6, regs->r7, regs->r8); pr_info(" r9=%08lX, r10=%08lX, r11=%08lX, r12=%08lX\n", regs->r9, regs->r10, regs->r11, regs->r12); pr_info(" r13=%08lX, r14=%08lX, r15=%08lX, r16=%08lX\n", regs->r13, regs->r14, regs->r15, regs->r16); pr_info(" r17=%08lX, r18=%08lX, r19=%08lX, r20=%08lX\n", regs->r17, regs->r18, regs->r19, regs->r20); pr_info(" r21=%08lX, r22=%08lX, r23=%08lX, r24=%08lX\n", regs->r21, regs->r22, regs->r23, regs->r24); pr_info(" r25=%08lX, r26=%08lX, r27=%08lX, r28=%08lX\n", regs->r25, regs->r26, regs->r27, regs->r28); pr_info(" r29=%08lX, r30=%08lX, r31=%08lX, rPC=%08lX\n", regs->r29, regs->r30, regs->r31, regs->pc); pr_info(" msr=%08lX, ear=%08lX, esr=%08lX, fsr=%08lX\n", regs->msr, regs->ear, regs->esr, regs->fsr); } void (*pm_power_off)(void) = NULL; EXPORT_SYMBOL(pm_power_off); void flush_thread(void) { } int copy_thread(struct task_struct *p, const struct kernel_clone_args *args) { unsigned long clone_flags = args->flags; unsigned long usp = args->stack; unsigned long tls = args->tls; struct pt_regs *childregs = task_pt_regs(p); struct thread_info *ti = task_thread_info(p); if (unlikely(args->fn)) { /* if we're creating a new kernel thread then just zeroing all * the registers. That's OK for a brand new thread.*/ memset(childregs, 0, sizeof(struct pt_regs)); memset(&ti->cpu_context, 0, sizeof(struct cpu_context)); ti->cpu_context.r1 = (unsigned long)childregs; ti->cpu_context.r20 = (unsigned long)args->fn; ti->cpu_context.r19 = (unsigned long)args->fn_arg; childregs->pt_mode = 1; local_save_flags(childregs->msr); ti->cpu_context.msr = childregs->msr & ~MSR_IE; ti->cpu_context.r15 = (unsigned long)ret_from_kernel_thread - 8; return 0; } *childregs = *current_pt_regs(); if (usp) childregs->r1 = usp; memset(&ti->cpu_context, 0, sizeof(struct cpu_context)); ti->cpu_context.r1 = (unsigned long)childregs; childregs->msr |= MSR_UMS; /* we should consider the fact that childregs is a copy of the parent * regs which were saved immediately after entering the kernel state * before enabling VM. This MSR will be restored in switch_to and * RETURN() and we want to have the right machine state there * specifically this state must have INTs disabled before and enabled * after performing rtbd * compose the right MSR for RETURN(). It will work for switch_to also * excepting for VM and UMS * don't touch UMS , CARRY and cache bits * right now MSR is a copy of parent one */ childregs->msr &= ~MSR_EIP; childregs->msr |= MSR_IE; childregs->msr &= ~MSR_VM; childregs->msr |= MSR_VMS; childregs->msr |= MSR_EE; /* exceptions will be enabled*/ ti->cpu_context.msr = (childregs->msr|MSR_VM); ti->cpu_context.msr &= ~MSR_UMS; /* switch_to to kernel mode */ ti->cpu_context.msr &= ~MSR_IE; ti->cpu_context.r15 = (unsigned long)ret_from_fork - 8; /* * r21 is the thread reg, r10 is 6th arg to clone * which contains TLS area */ if (clone_flags & CLONE_SETTLS) childregs->r21 = tls; return 0; } unsigned long __get_wchan(struct task_struct *p) { /* TBD (used by procfs) */ return 0; } /* Set up a thread for executing a new program */ void start_thread(struct pt_regs *regs, unsigned long pc, unsigned long usp) { regs->pc = pc; regs->r1 = usp; regs->pt_mode = 0; regs->msr |= MSR_UMS; regs->msr &= ~MSR_VM; } #include <linux/elfcore.h> /* * Set up a thread for executing a new program */ int elf_core_copy_task_fpregs(struct task_struct *t, elf_fpregset_t *fpu) { return 0; /* MicroBlaze has no separate FPU registers */ } void arch_cpu_idle(void) { }
linux-master
arch/microblaze/kernel/process.c
/* * Copyright (C) 2007-2009 Michal Simek <[email protected]> * Copyright (C) 2007-2009 PetaLogix * Copyright (C) 2006 Atmark Techno, Inc. * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/init.h> #include <linux/ftrace.h> #include <linux/kernel.h> #include <linux/hardirq.h> #include <linux/interrupt.h> #include <linux/irqflags.h> #include <linux/seq_file.h> #include <linux/kernel_stat.h> #include <linux/irq.h> #include <linux/irqchip.h> #include <linux/of_irq.h> void __irq_entry do_IRQ(struct pt_regs *regs) { struct pt_regs *old_regs = set_irq_regs(regs); trace_hardirqs_off(); irq_enter(); handle_arch_irq(regs); irq_exit(); set_irq_regs(old_regs); trace_hardirqs_on(); } void __init init_IRQ(void) { /* process the entire interrupt tree in one go */ irqchip_init(); }
linux-master
arch/microblaze/kernel/irq.c
/* * `ptrace' system call * * Copyright (C) 2008-2009 Michal Simek <[email protected]> * Copyright (C) 2007-2009 PetaLogix * Copyright (C) 2004-2007 John Williams <[email protected]> * * derived from arch/v850/kernel/ptrace.c * * Copyright (C) 2002,03 NEC Electronics Corporation * Copyright (C) 2002,03 Miles Bader <[email protected]> * * Derived from arch/mips/kernel/ptrace.c: * * Copyright (C) 1992 Ross Biro * Copyright (C) Linus Torvalds * Copyright (C) 1994, 95, 96, 97, 98, 2000 Ralf Baechle * Copyright (C) 1996 David S. Miller * Kevin D. Kissell, [email protected] and Carsten Langgaard, [email protected] * Copyright (C) 1999 MIPS Technologies, Inc. * * This file is subject to the terms and conditions of the GNU General * Public License. See the file COPYING in the main directory of this * archive for more details. */ #include <linux/kernel.h> #include <linux/mm.h> #include <linux/sched.h> #include <linux/sched/task_stack.h> #include <linux/ptrace.h> #include <linux/signal.h> #include <linux/elf.h> #include <linux/audit.h> #include <linux/seccomp.h> #include <linux/errno.h> #include <asm/processor.h> #include <linux/uaccess.h> #include <asm/asm-offsets.h> #include <asm/cacheflush.h> #include <asm/syscall.h> #include <linux/io.h> /* Returns the address where the register at REG_OFFS in P is stashed away. */ static microblaze_reg_t *reg_save_addr(unsigned reg_offs, struct task_struct *t) { struct pt_regs *regs; /* * Three basic cases: * * (1) A register normally saved before calling the scheduler, is * available in the kernel entry pt_regs structure at the top * of the kernel stack. The kernel trap/irq exit path takes * care to save/restore almost all registers for ptrace'd * processes. * * (2) A call-clobbered register, where the process P entered the * kernel via [syscall] trap, is not stored anywhere; that's * OK, because such registers are not expected to be preserved * when the trap returns anyway (so we don't actually bother to * test for this case). * * (3) A few registers not used at all by the kernel, and so * normally never saved except by context-switches, are in the * context switch state. */ /* Register saved during kernel entry (or not available). */ regs = task_pt_regs(t); return (microblaze_reg_t *)((char *)regs + reg_offs); } long arch_ptrace(struct task_struct *child, long request, unsigned long addr, unsigned long data) { int rval; unsigned long val = 0; switch (request) { /* Read/write the word at location ADDR in the registers. */ case PTRACE_PEEKUSR: case PTRACE_POKEUSR: pr_debug("PEEKUSR/POKEUSR : 0x%08lx\n", addr); rval = 0; if (addr >= PT_SIZE && request == PTRACE_PEEKUSR) { /* * Special requests that don't actually correspond * to offsets in struct pt_regs. */ if (addr == PT_TEXT_ADDR) { val = child->mm->start_code; } else if (addr == PT_DATA_ADDR) { val = child->mm->start_data; } else if (addr == PT_TEXT_LEN) { val = child->mm->end_code - child->mm->start_code; } else { rval = -EIO; } } else if (addr < PT_SIZE && (addr & 0x3) == 0) { microblaze_reg_t *reg_addr = reg_save_addr(addr, child); if (request == PTRACE_PEEKUSR) val = *reg_addr; else { #if 1 *reg_addr = data; #else /* MS potential problem on WB system * Be aware that reg_addr is virtual address * virt_to_phys conversion is necessary. * This could be sensible solution. */ u32 paddr = virt_to_phys((u32)reg_addr); invalidate_icache_range(paddr, paddr + 4); *reg_addr = data; flush_dcache_range(paddr, paddr + 4); #endif } } else rval = -EIO; if (rval == 0 && request == PTRACE_PEEKUSR) rval = put_user(val, (unsigned long __user *)data); break; default: rval = ptrace_request(child, request, addr, data); } return rval; } asmlinkage unsigned long do_syscall_trace_enter(struct pt_regs *regs) { unsigned long ret = 0; secure_computing_strict(regs->r12); if (test_thread_flag(TIF_SYSCALL_TRACE) && ptrace_report_syscall_entry(regs)) /* * Tracing decided this syscall should not happen. * We'll return a bogus call number to get an ENOSYS * error, but leave the original number in regs->regs[0]. */ ret = -1L; audit_syscall_entry(regs->r12, regs->r5, regs->r6, regs->r7, regs->r8); return ret ?: regs->r12; } asmlinkage void do_syscall_trace_leave(struct pt_regs *regs) { int step; audit_syscall_exit(regs); step = test_thread_flag(TIF_SINGLESTEP); if (step || test_thread_flag(TIF_SYSCALL_TRACE)) ptrace_report_syscall_exit(regs, step); } void ptrace_disable(struct task_struct *child) { /* nothing to do */ }
linux-master
arch/microblaze/kernel/ptrace.c
/* * Copyright (C) 2007-2009 Michal Simek <[email protected]> * Copyright (C) 2007-2009 PetaLogix * Copyright (C) 2006 Atmark Techno, Inc. * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/export.h> #include <linux/kernel.h> #include <linux/kallsyms.h> #include <linux/sched.h> #include <linux/sched/debug.h> #include <linux/debug_locks.h> #include <asm/exceptions.h> #include <asm/unwind.h> void trap_init(void) { __enable_hw_exceptions(); } static unsigned long kstack_depth_to_print; /* 0 == entire stack */ static int __init kstack_setup(char *s) { return !kstrtoul(s, 0, &kstack_depth_to_print); } __setup("kstack=", kstack_setup); void show_stack(struct task_struct *task, unsigned long *sp, const char *loglvl) { unsigned long words_to_show; u32 fp = (u32) sp; if (fp == 0) { if (task) { fp = ((struct thread_info *) (task->stack))->cpu_context.r1; } else { /* Pick up caller of dump_stack() */ fp = (u32)&sp - 8; } } words_to_show = (THREAD_SIZE - (fp & (THREAD_SIZE - 1))) >> 2; if (kstack_depth_to_print && (words_to_show > kstack_depth_to_print)) words_to_show = kstack_depth_to_print; printk("%sKernel Stack:\n", loglvl); /* * Make the first line an 'odd' size if necessary to get * remaining lines to start at an address multiple of 0x10 */ if (fp & 0xF) { unsigned long line1_words = (0x10 - (fp & 0xF)) >> 2; if (line1_words < words_to_show) { print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS, 32, 4, (void *)fp, line1_words << 2, 0); fp += line1_words << 2; words_to_show -= line1_words; } } print_hex_dump(loglvl, "", DUMP_PREFIX_ADDRESS, 32, 4, (void *)fp, words_to_show << 2, 0); printk("%s\n\nCall Trace:\n", loglvl); microblaze_unwind(task, NULL, loglvl); printk("%s\n", loglvl); if (!task) task = current; debug_show_held_locks(task); }
linux-master
arch/microblaze/kernel/traps.c
/* * Copyright (C) 2007-2009 Michal Simek <[email protected]> * Copyright (C) 2007-2009 PetaLogix * Copyright (C) 2006 Atmark Techno, Inc. * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/init.h> #include <linux/stddef.h> #include <linux/sched.h> #include <linux/kernel_stat.h> #include <linux/ptrace.h> #include <linux/hardirq.h> #include <linux/thread_info.h> #include <linux/kbuild.h> #include <asm/cpuinfo.h> int main(int argc, char *argv[]) { /* struct pt_regs */ DEFINE(PT_SIZE, sizeof(struct pt_regs)); DEFINE(PT_MSR, offsetof(struct pt_regs, msr)); DEFINE(PT_EAR, offsetof(struct pt_regs, ear)); DEFINE(PT_ESR, offsetof(struct pt_regs, esr)); DEFINE(PT_FSR, offsetof(struct pt_regs, fsr)); DEFINE(PT_PC, offsetof(struct pt_regs, pc)); DEFINE(PT_R0, offsetof(struct pt_regs, r0)); DEFINE(PT_R1, offsetof(struct pt_regs, r1)); DEFINE(PT_R2, offsetof(struct pt_regs, r2)); DEFINE(PT_R3, offsetof(struct pt_regs, r3)); DEFINE(PT_R4, offsetof(struct pt_regs, r4)); DEFINE(PT_R5, offsetof(struct pt_regs, r5)); DEFINE(PT_R6, offsetof(struct pt_regs, r6)); DEFINE(PT_R7, offsetof(struct pt_regs, r7)); DEFINE(PT_R8, offsetof(struct pt_regs, r8)); DEFINE(PT_R9, offsetof(struct pt_regs, r9)); DEFINE(PT_R10, offsetof(struct pt_regs, r10)); DEFINE(PT_R11, offsetof(struct pt_regs, r11)); DEFINE(PT_R12, offsetof(struct pt_regs, r12)); DEFINE(PT_R13, offsetof(struct pt_regs, r13)); DEFINE(PT_R14, offsetof(struct pt_regs, r14)); DEFINE(PT_R15, offsetof(struct pt_regs, r15)); DEFINE(PT_R16, offsetof(struct pt_regs, r16)); DEFINE(PT_R17, offsetof(struct pt_regs, r17)); DEFINE(PT_R18, offsetof(struct pt_regs, r18)); DEFINE(PT_R19, offsetof(struct pt_regs, r19)); DEFINE(PT_R20, offsetof(struct pt_regs, r20)); DEFINE(PT_R21, offsetof(struct pt_regs, r21)); DEFINE(PT_R22, offsetof(struct pt_regs, r22)); DEFINE(PT_R23, offsetof(struct pt_regs, r23)); DEFINE(PT_R24, offsetof(struct pt_regs, r24)); DEFINE(PT_R25, offsetof(struct pt_regs, r25)); DEFINE(PT_R26, offsetof(struct pt_regs, r26)); DEFINE(PT_R27, offsetof(struct pt_regs, r27)); DEFINE(PT_R28, offsetof(struct pt_regs, r28)); DEFINE(PT_R29, offsetof(struct pt_regs, r29)); DEFINE(PT_R30, offsetof(struct pt_regs, r30)); DEFINE(PT_R31, offsetof(struct pt_regs, r31)); DEFINE(PT_MODE, offsetof(struct pt_regs, pt_mode)); BLANK(); /* Magic offsets for PTRACE PEEK/POKE etc */ DEFINE(PT_TEXT_ADDR, sizeof(struct pt_regs) + 1); DEFINE(PT_TEXT_LEN, sizeof(struct pt_regs) + 2); DEFINE(PT_DATA_ADDR, sizeof(struct pt_regs) + 3); BLANK(); /* struct task_struct */ DEFINE(TS_THREAD_INFO, offsetof(struct task_struct, stack)); DEFINE(TASK_FLAGS, offsetof(struct task_struct, flags)); DEFINE(TASK_PTRACE, offsetof(struct task_struct, ptrace)); DEFINE(TASK_BLOCKED, offsetof(struct task_struct, blocked)); DEFINE(TASK_MM, offsetof(struct task_struct, mm)); DEFINE(TASK_ACTIVE_MM, offsetof(struct task_struct, active_mm)); DEFINE(TASK_PID, offsetof(struct task_struct, pid)); DEFINE(TASK_THREAD, offsetof(struct task_struct, thread)); DEFINE(THREAD_KSP, offsetof(struct thread_struct, ksp)); BLANK(); DEFINE(PGDIR, offsetof(struct thread_struct, pgdir)); BLANK(); /* struct thread_info */ DEFINE(TI_TASK, offsetof(struct thread_info, task)); DEFINE(TI_FLAGS, offsetof(struct thread_info, flags)); DEFINE(TI_CPU_CONTEXT, offsetof(struct thread_info, cpu_context)); DEFINE(TI_PREEMPT_COUNT, offsetof(struct thread_info, preempt_count)); BLANK(); /* struct cpu_context */ DEFINE(CC_R1, offsetof(struct cpu_context, r1)); /* r1 */ DEFINE(CC_R2, offsetof(struct cpu_context, r2)); /* dedicated registers */ DEFINE(CC_R13, offsetof(struct cpu_context, r13)); DEFINE(CC_R14, offsetof(struct cpu_context, r14)); DEFINE(CC_R15, offsetof(struct cpu_context, r15)); DEFINE(CC_R16, offsetof(struct cpu_context, r16)); DEFINE(CC_R17, offsetof(struct cpu_context, r17)); DEFINE(CC_R18, offsetof(struct cpu_context, r18)); /* non-volatile registers */ DEFINE(CC_R19, offsetof(struct cpu_context, r19)); DEFINE(CC_R20, offsetof(struct cpu_context, r20)); DEFINE(CC_R21, offsetof(struct cpu_context, r21)); DEFINE(CC_R22, offsetof(struct cpu_context, r22)); DEFINE(CC_R23, offsetof(struct cpu_context, r23)); DEFINE(CC_R24, offsetof(struct cpu_context, r24)); DEFINE(CC_R25, offsetof(struct cpu_context, r25)); DEFINE(CC_R26, offsetof(struct cpu_context, r26)); DEFINE(CC_R27, offsetof(struct cpu_context, r27)); DEFINE(CC_R28, offsetof(struct cpu_context, r28)); DEFINE(CC_R29, offsetof(struct cpu_context, r29)); DEFINE(CC_R30, offsetof(struct cpu_context, r30)); /* special purpose registers */ DEFINE(CC_MSR, offsetof(struct cpu_context, msr)); DEFINE(CC_EAR, offsetof(struct cpu_context, ear)); DEFINE(CC_ESR, offsetof(struct cpu_context, esr)); DEFINE(CC_FSR, offsetof(struct cpu_context, fsr)); BLANK(); /* struct cpuinfo */ DEFINE(CI_DCS, offsetof(struct cpuinfo, dcache_size)); DEFINE(CI_DCL, offsetof(struct cpuinfo, dcache_line_length)); DEFINE(CI_ICS, offsetof(struct cpuinfo, icache_size)); DEFINE(CI_ICL, offsetof(struct cpuinfo, icache_line_length)); BLANK(); return 0; }
linux-master
arch/microblaze/kernel/asm-offsets.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2007-2009 Michal Simek <[email protected]> * Copyright (C) 2007-2009 PetaLogix */ #include <linux/export.h> #include <linux/moduleloader.h> #include <linux/kernel.h> #include <linux/elf.h> #include <linux/vmalloc.h> #include <linux/fs.h> #include <linux/string.h> #include <linux/pgtable.h> #include <asm/cacheflush.h> int apply_relocate_add(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex, unsigned int relsec, struct module *module) { unsigned int i; Elf32_Rela *rela = (void *)sechdrs[relsec].sh_addr; Elf32_Sym *sym; unsigned long int *location; unsigned long int value; pr_debug("Applying add relocation section %u to %u\n", relsec, sechdrs[relsec].sh_info); for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rela); i++) { location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rela[i].r_offset; sym = (Elf32_Sym *)sechdrs[symindex].sh_addr + ELF32_R_SYM(rela[i].r_info); value = sym->st_value + rela[i].r_addend; switch (ELF32_R_TYPE(rela[i].r_info)) { /* * Be careful! mb-gcc / mb-ld splits the relocs between the * text and the reloc table. In general this means we must * read the current contents of (*location), add any offset * then store the result back in */ case R_MICROBLAZE_32: *location = value; break; case R_MICROBLAZE_64: location[0] = (location[0] & 0xFFFF0000) | (value >> 16); location[1] = (location[1] & 0xFFFF0000) | (value & 0xFFFF); break; case R_MICROBLAZE_64_PCREL: value -= (unsigned long int)(location) + 4; location[0] = (location[0] & 0xFFFF0000) | (value >> 16); location[1] = (location[1] & 0xFFFF0000) | (value & 0xFFFF); pr_debug("R_MICROBLAZE_64_PCREL (%08lx)\n", value); break; case R_MICROBLAZE_32_PCREL_LO: pr_debug("R_MICROBLAZE_32_PCREL_LO\n"); break; case R_MICROBLAZE_64_NONE: pr_debug("R_MICROBLAZE_64_NONE\n"); break; case R_MICROBLAZE_NONE: pr_debug("R_MICROBLAZE_NONE\n"); break; default: pr_err("module %s: Unknown relocation: %u\n", module->name, ELF32_R_TYPE(rela[i].r_info)); return -ENOEXEC; } } return 0; } int module_finalize(const Elf32_Ehdr *hdr, const Elf_Shdr *sechdrs, struct module *module) { flush_dcache(); return 0; }
linux-master
arch/microblaze/kernel/module.c
/* * Copyright (C) 2009 Michal Simek <[email protected]> * Copyright (C) 2009 PetaLogix * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/init.h> #include <linux/delay.h> #include <linux/reboot.h> void machine_shutdown(void) { pr_notice("Machine shutdown...\n"); while (1) ; } void machine_halt(void) { pr_notice("Machine halt...\n"); while (1) ; } void machine_power_off(void) { pr_notice("Machine power off...\n"); while (1) ; } void machine_restart(char *cmd) { do_kernel_restart(cmd); /* Give the restart hook 1 s to take us down */ mdelay(1000); pr_emerg("Reboot failed -- System halted\n"); while (1); }
linux-master
arch/microblaze/kernel/reset.c
/* * Backtrace support for Microblaze * * Copyright (C) 2010 Digital Design Corporation * * Based on arch/sh/kernel/cpu/sh5/unwind.c code which is: * Copyright (C) 2004 Paul Mundt * Copyright (C) 2004 Richard Curnow * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ /* #define DEBUG 1 */ #include <linux/export.h> #include <linux/kallsyms.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/sched/task_stack.h> #include <linux/stacktrace.h> #include <linux/types.h> #include <linux/errno.h> #include <linux/io.h> #include <asm/sections.h> #include <asm/exceptions.h> #include <asm/unwind.h> #include <asm/switch_to.h> struct stack_trace; /* * On Microblaze, finding the previous stack frame is a little tricky. * At this writing (3/2010), Microblaze does not support CONFIG_FRAME_POINTERS, * and even if it did, gcc (4.1.2) does not store the frame pointer at * a consistent offset within each frame. To determine frame size, it is * necessary to search for the assembly instruction that creates or reclaims * the frame and extract the size from it. * * Microblaze stores the stack pointer in r1, and creates a frame via * * addik r1, r1, -FRAME_SIZE * * The frame is reclaimed via * * addik r1, r1, FRAME_SIZE * * Frame creation occurs at or near the top of a function. * Depending on the compiler, reclaim may occur at the end, or before * a mid-function return. * * A stack frame is usually not created in a leaf function. * */ /** * get_frame_size - Extract the stack adjustment from an * "addik r1, r1, adjust" instruction * @instr : Microblaze instruction * * Return - Number of stack bytes the instruction reserves or reclaims */ static inline long get_frame_size(unsigned long instr) { return abs((s16)(instr & 0xFFFF)); } /** * find_frame_creation - Search backward to find the instruction that creates * the stack frame (hopefully, for the same function the * initial PC is in). * @pc : Program counter at which to begin the search * * Return - PC at which stack frame creation occurs * NULL if this cannot be found, i.e. a leaf function */ static unsigned long *find_frame_creation(unsigned long *pc) { int i; /* NOTE: Distance to search is arbitrary * 250 works well for most things, * 750 picks up things like tcp_recvmsg(), * 1000 needed for fat_fill_super() */ for (i = 0; i < 1000; i++, pc--) { unsigned long instr; s16 frame_size; if (!kernel_text_address((unsigned long) pc)) return NULL; instr = *pc; /* addik r1, r1, foo ? */ if ((instr & 0xFFFF0000) != 0x30210000) continue; /* No */ frame_size = get_frame_size(instr); if ((frame_size < 8) || (frame_size & 3)) { pr_debug(" Invalid frame size %d at 0x%p\n", frame_size, pc); return NULL; } pr_debug(" Found frame creation at 0x%p, size %d\n", pc, frame_size); return pc; } return NULL; } /** * lookup_prev_stack_frame - Find the stack frame of the previous function. * @fp : Frame (stack) pointer for current function * @pc : Program counter within current function * @leaf_return : r15 value within current function. If the current function * is a leaf, this is the caller's return address. * @pprev_fp : On exit, set to frame (stack) pointer for previous function * @pprev_pc : On exit, set to current function caller's return address * * Return - 0 on success, -EINVAL if the previous frame cannot be found */ static int lookup_prev_stack_frame(unsigned long fp, unsigned long pc, unsigned long leaf_return, unsigned long *pprev_fp, unsigned long *pprev_pc) { unsigned long *prologue = NULL; /* _switch_to is a special leaf function */ if (pc != (unsigned long) &_switch_to) prologue = find_frame_creation((unsigned long *)pc); if (prologue) { long frame_size = get_frame_size(*prologue); *pprev_fp = fp + frame_size; *pprev_pc = *(unsigned long *)fp; } else { if (!leaf_return) return -EINVAL; *pprev_pc = leaf_return; *pprev_fp = fp; } /* NOTE: don't check kernel_text_address here, to allow display * of userland return address */ return (!*pprev_pc || (*pprev_pc & 3)) ? -EINVAL : 0; } static void microblaze_unwind_inner(struct task_struct *task, unsigned long pc, unsigned long fp, unsigned long leaf_return, struct stack_trace *trace, const char *loglvl); /** * unwind_trap - Unwind through a system trap, that stored previous state * on the stack. */ static inline void unwind_trap(struct task_struct *task, unsigned long pc, unsigned long fp, struct stack_trace *trace, const char *loglvl) { /* To be implemented */ } /** * microblaze_unwind_inner - Unwind the stack from the specified point * @task : Task whose stack we are to unwind (may be NULL) * @pc : Program counter from which we start unwinding * @fp : Frame (stack) pointer from which we start unwinding * @leaf_return : Value of r15 at pc. If the function is a leaf, this is * the caller's return address. * @trace : Where to store stack backtrace (PC values). * NULL == print backtrace to kernel log * @loglvl : Used for printk log level if (trace == NULL). */ static void microblaze_unwind_inner(struct task_struct *task, unsigned long pc, unsigned long fp, unsigned long leaf_return, struct stack_trace *trace, const char *loglvl) { int ofs = 0; pr_debug(" Unwinding with PC=%p, FP=%p\n", (void *)pc, (void *)fp); if (!pc || !fp || (pc & 3) || (fp & 3)) { pr_debug(" Invalid state for unwind, aborting\n"); return; } for (; pc != 0;) { unsigned long next_fp, next_pc = 0; unsigned long return_to = pc + 2 * sizeof(unsigned long); const struct trap_handler_info *handler = &microblaze_trap_handlers; /* Is previous function the HW exception handler? */ if ((return_to >= (unsigned long)&_hw_exception_handler) &&(return_to < (unsigned long)&ex_handler_unhandled)) { /* * HW exception handler doesn't save all registers, * so we open-code a special case of unwind_trap() */ printk("%sHW EXCEPTION\n", loglvl); return; } /* Is previous function a trap handler? */ for (; handler->start_addr; ++handler) { if ((return_to >= handler->start_addr) && (return_to <= handler->end_addr)) { if (!trace) printk("%s%s\n", loglvl, handler->trap_name); unwind_trap(task, pc, fp, trace, loglvl); return; } } pc -= ofs; if (trace) { #ifdef CONFIG_STACKTRACE if (trace->skip > 0) trace->skip--; else trace->entries[trace->nr_entries++] = pc; if (trace->nr_entries >= trace->max_entries) break; #endif } else { /* Have we reached userland? */ if (unlikely(pc == task_pt_regs(task)->pc)) { printk("%s[<%p>] PID %lu [%s]\n", loglvl, (void *) pc, (unsigned long) task->pid, task->comm); break; } else print_ip_sym(loglvl, pc); } /* Stop when we reach anything not part of the kernel */ if (!kernel_text_address(pc)) break; if (lookup_prev_stack_frame(fp, pc, leaf_return, &next_fp, &next_pc) == 0) { ofs = sizeof(unsigned long); pc = next_pc & ~3; fp = next_fp; leaf_return = 0; } else { pr_debug(" Failed to find previous stack frame\n"); break; } pr_debug(" Next PC=%p, next FP=%p\n", (void *)next_pc, (void *)next_fp); } } /** * microblaze_unwind - Stack unwinder for Microblaze (external entry point) * @task : Task whose stack we are to unwind (NULL == current) * @trace : Where to store stack backtrace (PC values). * NULL == print backtrace to kernel log * @loglvl : Used for printk log level if (trace == NULL). */ void microblaze_unwind(struct task_struct *task, struct stack_trace *trace, const char *loglvl) { if (task) { if (task == current) { const struct pt_regs *regs = task_pt_regs(task); microblaze_unwind_inner(task, regs->pc, regs->r1, regs->r15, trace, loglvl); } else { struct thread_info *thread_info = (struct thread_info *)(task->stack); const struct cpu_context *cpu_context = &thread_info->cpu_context; microblaze_unwind_inner(task, (unsigned long) &_switch_to, cpu_context->r1, cpu_context->r15, trace, loglvl); } } else { unsigned long pc, fp; __asm__ __volatile__ ("or %0, r1, r0" : "=r" (fp)); __asm__ __volatile__ ( "brlid %0, 0f;" "nop;" "0:" : "=r" (pc) ); /* Since we are not a leaf function, use leaf_return = 0 */ microblaze_unwind_inner(current, pc, fp, 0, trace, loglvl); } }
linux-master
arch/microblaze/kernel/unwind.c
/* * Copyright (C) 2007-2009 Michal Simek <[email protected]> * Copyright (C) 2007-2009 PetaLogix * Copyright (C) 2006 Atmark Techno, Inc. * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/init.h> #include <linux/of_clk.h> #include <linux/clocksource.h> #include <linux/string.h> #include <linux/seq_file.h> #include <linux/cpu.h> #include <linux/initrd.h> #include <linux/console.h> #include <linux/debugfs.h> #include <linux/of_fdt.h> #include <linux/pgtable.h> #include <asm/setup.h> #include <asm/sections.h> #include <asm/page.h> #include <linux/io.h> #include <linux/bug.h> #include <linux/param.h> #include <linux/pci.h> #include <linux/cache.h> #include <linux/of.h> #include <linux/dma-mapping.h> #include <asm/cacheflush.h> #include <asm/entry.h> #include <asm/cpuinfo.h> DEFINE_PER_CPU(unsigned int, KSP); /* Saved kernel stack pointer */ DEFINE_PER_CPU(unsigned int, KM); /* Kernel/user mode */ DEFINE_PER_CPU(unsigned int, ENTRY_SP); /* Saved SP on kernel entry */ DEFINE_PER_CPU(unsigned int, R11_SAVE); /* Temp variable for entry */ DEFINE_PER_CPU(unsigned int, CURRENT_SAVE); /* Saved current pointer */ /* * Placed cmd_line to .data section because can be initialized from * ASM code. Default position is BSS section which is cleared * in machine_early_init(). */ char cmd_line[COMMAND_LINE_SIZE] __section(".data"); void __init setup_arch(char **cmdline_p) { *cmdline_p = boot_command_line; setup_memory(); console_verbose(); unflatten_device_tree(); setup_cpuinfo(); microblaze_cache_init(); xilinx_pci_init(); } #ifdef CONFIG_MTD_UCLINUX /* Handle both romfs and cramfs types, without generating unnecessary code (ie no point checking for CRAMFS if it's not even enabled) */ inline unsigned get_romfs_len(unsigned *addr) { #ifdef CONFIG_ROMFS_FS if (memcmp(&addr[0], "-rom1fs-", 8) == 0) /* romfs */ return be32_to_cpu(addr[2]); #endif #ifdef CONFIG_CRAMFS if (addr[0] == le32_to_cpu(0x28cd3d45)) /* cramfs */ return le32_to_cpu(addr[1]); #endif return 0; } #endif /* CONFIG_MTD_UCLINUX_EBSS */ unsigned long kernel_tlb; void __init machine_early_init(const char *cmdline, unsigned int ram, unsigned int fdt, unsigned int msr, unsigned int tlb0, unsigned int tlb1) { unsigned long *src, *dst; unsigned int offset = 0; /* If CONFIG_MTD_UCLINUX is defined, assume ROMFS is at the * end of kernel. There are two position which we want to check. * The first is __init_end and the second __bss_start. */ #ifdef CONFIG_MTD_UCLINUX int romfs_size; unsigned int romfs_base; char *old_klimit = klimit; romfs_base = (ram ? ram : (unsigned int)&__init_end); romfs_size = PAGE_ALIGN(get_romfs_len((unsigned *)romfs_base)); if (!romfs_size) { romfs_base = (unsigned int)&__bss_start; romfs_size = PAGE_ALIGN(get_romfs_len((unsigned *)romfs_base)); } /* Move ROMFS out of BSS before clearing it */ if (romfs_size > 0) { memmove(&__bss_stop, (int *)romfs_base, romfs_size); klimit += romfs_size; } #endif /* clearing bss section */ memset(__bss_start, 0, __bss_stop-__bss_start); memset(_ssbss, 0, _esbss-_ssbss); /* initialize device tree for usage in early_printk */ early_init_devtree(_fdt_start); /* setup kernel_tlb after BSS cleaning * Maybe worth to move to asm code */ kernel_tlb = tlb0 + tlb1; /* printk("TLB1 0x%08x, TLB0 0x%08x, tlb 0x%x\n", tlb0, tlb1, kernel_tlb); */ pr_info("Ramdisk addr 0x%08x, ", ram); if (fdt) pr_info("FDT at 0x%08x\n", fdt); else pr_info("Compiled-in FDT at %p\n", _fdt_start); #ifdef CONFIG_MTD_UCLINUX pr_info("Found romfs @ 0x%08x (0x%08x)\n", romfs_base, romfs_size); pr_info("#### klimit %p ####\n", old_klimit); BUG_ON(romfs_size < 0); /* What else can we do? */ pr_info("Moved 0x%08x bytes from 0x%08x to 0x%08x\n", romfs_size, romfs_base, (unsigned)&__bss_stop); pr_info("New klimit: 0x%08x\n", (unsigned)klimit); #endif #if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR if (msr) { pr_info("!!!Your kernel has setup MSR instruction but "); pr_cont("CPU don't have it %x\n", msr); } #else if (!msr) { pr_info("!!!Your kernel not setup MSR instruction but "); pr_cont("CPU have it %x\n", msr); } #endif /* Do not copy reset vectors. offset = 0x2 means skip the first * two instructions. dst is pointer to MB vectors which are placed * in block ram. If you want to copy reset vector setup offset to 0x0 */ #if !CONFIG_MANUAL_RESET_VECTOR offset = 0x2; #endif dst = (unsigned long *) (offset * sizeof(u32)); for (src = __ivt_start + offset; src < __ivt_end; src++, dst++) *dst = *src; /* Initialize global data */ per_cpu(KM, 0) = 0x1; /* We start in kernel mode */ per_cpu(CURRENT_SAVE, 0) = (unsigned long)current; } void __init time_init(void) { of_clk_init(NULL); setup_cpuinfo_clk(); timer_probe(); } #ifdef CONFIG_DEBUG_FS struct dentry *of_debugfs_root; static int microblaze_debugfs_init(void) { of_debugfs_root = debugfs_create_dir("microblaze", NULL); return 0; } arch_initcall(microblaze_debugfs_init); static int __init debugfs_tlb(void) { debugfs_create_u32("tlb_skip", S_IRUGO, of_debugfs_root, &tlb_skip); return 0; } device_initcall(debugfs_tlb); #endif
linux-master
arch/microblaze/kernel/setup.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2009-2010 PetaLogix * Copyright (C) 2006 Benjamin Herrenschmidt, IBM Corporation * * Provide default implementations of the DMA mapping callbacks for * directly mapped busses. */ #include <linux/device.h> #include <linux/dma-map-ops.h> #include <linux/gfp.h> #include <linux/export.h> #include <linux/bug.h> #include <asm/cacheflush.h> static void __dma_sync(phys_addr_t paddr, size_t size, enum dma_data_direction direction) { switch (direction) { case DMA_TO_DEVICE: case DMA_BIDIRECTIONAL: flush_dcache_range(paddr, paddr + size); break; case DMA_FROM_DEVICE: invalidate_dcache_range(paddr, paddr + size); break; default: BUG(); } } void arch_sync_dma_for_device(phys_addr_t paddr, size_t size, enum dma_data_direction dir) { __dma_sync(paddr, size, dir); } void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size, enum dma_data_direction dir) { __dma_sync(paddr, size, dir); }
linux-master
arch/microblaze/kernel/dma.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Procedures for creating, accessing and interpreting the device tree. * * Paul Mackerras August 1996. * Copyright (C) 1996-2005 Paul Mackerras. * * Adapted for 64bit PowerPC by Dave Engebretsen and Peter Bergner. * {engebret|bergner}@us.ibm.com */ #include <linux/kernel.h> #include <linux/string.h> #include <linux/memblock.h> #include <linux/of_fdt.h> void __init early_init_devtree(void *params) { pr_debug(" -> early_init_devtree(%p)\n", params); early_init_dt_scan(params); if (!strlen(boot_command_line)) strscpy(boot_command_line, cmd_line, COMMAND_LINE_SIZE); memblock_allow_resize(); pr_debug("Phys. mem: %lx\n", (unsigned long) memblock_phys_mem_size()); pr_debug(" <- early_init_devtree()\n"); }
linux-master
arch/microblaze/kernel/prom.c
/* * Stack trace support for Microblaze. * * Copyright (C) 2009 Michal Simek <[email protected]> * Copyright (C) 2009 PetaLogix * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/export.h> #include <linux/sched.h> #include <linux/stacktrace.h> #include <linux/thread_info.h> #include <linux/ptrace.h> #include <asm/unwind.h> void save_stack_trace(struct stack_trace *trace) { /* Exclude our helper functions from the trace*/ trace->skip += 2; microblaze_unwind(NULL, trace, ""); } EXPORT_SYMBOL_GPL(save_stack_trace); void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) { microblaze_unwind(tsk, trace, ""); } EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
linux-master
arch/microblaze/kernel/stacktrace.c
/* * Signal handling * * Copyright (C) 2008-2009 Michal Simek <[email protected]> * Copyright (C) 2008-2009 PetaLogix * Copyright (C) 2003,2004 John Williams <[email protected]> * Copyright (C) 2001 NEC Corporation * Copyright (C) 2001 Miles Bader <[email protected]> * Copyright (C) 1999,2000 Niibe Yutaka & Kaz Kojima * Copyright (C) 1991,1992 Linus Torvalds * * 1997-11-28 Modified for POSIX.1b signals by Richard Henderson * * This file was derived from the sh version, arch/sh/kernel/signal.c * * This file is subject to the terms and conditions of the GNU General * Public License. See the file COPYING in the main directory of this * archive for more details. */ #include <linux/sched.h> #include <linux/mm.h> #include <linux/smp.h> #include <linux/kernel.h> #include <linux/signal.h> #include <linux/errno.h> #include <linux/wait.h> #include <linux/ptrace.h> #include <linux/unistd.h> #include <linux/stddef.h> #include <linux/personality.h> #include <linux/percpu.h> #include <linux/linkage.h> #include <linux/resume_user_mode.h> #include <asm/entry.h> #include <asm/ucontext.h> #include <linux/uaccess.h> #include <linux/syscalls.h> #include <asm/cacheflush.h> #include <asm/syscalls.h> /* * Do a signal return; undo the signal stack. */ struct sigframe { struct sigcontext sc; unsigned long extramask[_NSIG_WORDS-1]; unsigned long tramp[2]; /* signal trampoline */ }; struct rt_sigframe { struct siginfo info; struct ucontext uc; unsigned long tramp[2]; /* signal trampoline */ }; static int restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, int *rval_p) { unsigned int err = 0; #define COPY(x) {err |= __get_user(regs->x, &sc->regs.x); } COPY(r0); COPY(r1); COPY(r2); COPY(r3); COPY(r4); COPY(r5); COPY(r6); COPY(r7); COPY(r8); COPY(r9); COPY(r10); COPY(r11); COPY(r12); COPY(r13); COPY(r14); COPY(r15); COPY(r16); COPY(r17); COPY(r18); COPY(r19); COPY(r20); COPY(r21); COPY(r22); COPY(r23); COPY(r24); COPY(r25); COPY(r26); COPY(r27); COPY(r28); COPY(r29); COPY(r30); COPY(r31); COPY(pc); COPY(ear); COPY(esr); COPY(fsr); #undef COPY *rval_p = regs->r3; return err; } asmlinkage long sys_rt_sigreturn(struct pt_regs *regs) { struct rt_sigframe __user *frame = (struct rt_sigframe __user *)(regs->r1); sigset_t set; int rval; /* Always make any pending restarted system calls return -EINTR */ current->restart_block.fn = do_no_restart_syscall; if (!access_ok(frame, sizeof(*frame))) goto badframe; if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set))) goto badframe; set_current_blocked(&set); if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &rval)) goto badframe; if (restore_altstack(&frame->uc.uc_stack)) goto badframe; return rval; badframe: force_sig(SIGSEGV); return 0; } /* * Set up a signal frame. */ static int setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs, unsigned long mask) { int err = 0; #define COPY(x) {err |= __put_user(regs->x, &sc->regs.x); } COPY(r0); COPY(r1); COPY(r2); COPY(r3); COPY(r4); COPY(r5); COPY(r6); COPY(r7); COPY(r8); COPY(r9); COPY(r10); COPY(r11); COPY(r12); COPY(r13); COPY(r14); COPY(r15); COPY(r16); COPY(r17); COPY(r18); COPY(r19); COPY(r20); COPY(r21); COPY(r22); COPY(r23); COPY(r24); COPY(r25); COPY(r26); COPY(r27); COPY(r28); COPY(r29); COPY(r30); COPY(r31); COPY(pc); COPY(ear); COPY(esr); COPY(fsr); #undef COPY err |= __put_user(mask, &sc->oldmask); return err; } /* * Determine which stack to use.. */ static inline void __user * get_sigframe(struct ksignal *ksig, struct pt_regs *regs, size_t frame_size) { /* Default to using normal stack */ unsigned long sp = sigsp(regs->r1, ksig); return (void __user *)((sp - frame_size) & -8UL); } static int setup_rt_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs) { struct rt_sigframe __user *frame; int err = 0, sig = ksig->sig; unsigned long address = 0; pmd_t *pmdp; pte_t *ptep; frame = get_sigframe(ksig, regs, sizeof(*frame)); if (!access_ok(frame, sizeof(*frame))) return -EFAULT; if (ksig->ka.sa.sa_flags & SA_SIGINFO) err |= copy_siginfo_to_user(&frame->info, &ksig->info); /* Create the ucontext. */ err |= __put_user(0, &frame->uc.uc_flags); err |= __put_user(NULL, &frame->uc.uc_link); err |= __save_altstack(&frame->uc.uc_stack, regs->r1); err |= setup_sigcontext(&frame->uc.uc_mcontext, regs, set->sig[0]); err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); /* Set up to return from userspace. If provided, use a stub already in userspace. */ /* minus 8 is offset to cater for "rtsd r15,8" */ /* addi r12, r0, __NR_sigreturn */ err |= __put_user(0x31800000 | __NR_rt_sigreturn , frame->tramp + 0); /* brki r14, 0x8 */ err |= __put_user(0xb9cc0008, frame->tramp + 1); /* Return from sighandler will jump to the tramp. Negative 8 offset because return is rtsd r15, 8 */ regs->r15 = ((unsigned long)frame->tramp)-8; address = ((unsigned long)frame->tramp); pmdp = pmd_off(current->mm, address); preempt_disable(); ptep = pte_offset_map(pmdp, address); if (ptep && pte_present(*ptep)) { address = (unsigned long) page_address(pte_page(*ptep)); /* MS: I need add offset in page */ address += ((unsigned long)frame->tramp) & ~PAGE_MASK; /* MS address is virtual */ address = __virt_to_phys(address); invalidate_icache_range(address, address + 8); flush_dcache_range(address, address + 8); } if (ptep) pte_unmap(ptep); preempt_enable(); if (err) return -EFAULT; /* Set up registers for signal handler */ regs->r1 = (unsigned long) frame; /* Signal handler args: */ regs->r5 = sig; /* arg 0: signum */ regs->r6 = (unsigned long) &frame->info; /* arg 1: siginfo */ regs->r7 = (unsigned long) &frame->uc; /* arg2: ucontext */ /* Offset to handle microblaze rtid r14, 0 */ regs->pc = (unsigned long)ksig->ka.sa.sa_handler; #ifdef DEBUG_SIG pr_info("SIG deliver (%s:%d): sp=%p pc=%08lx\n", current->comm, current->pid, frame, regs->pc); #endif return 0; } /* Handle restarting system calls */ static inline void handle_restart(struct pt_regs *regs, struct k_sigaction *ka, int has_handler) { switch (regs->r3) { case -ERESTART_RESTARTBLOCK: case -ERESTARTNOHAND: if (!has_handler) goto do_restart; regs->r3 = -EINTR; break; case -ERESTARTSYS: if (has_handler && !(ka->sa.sa_flags & SA_RESTART)) { regs->r3 = -EINTR; break; } fallthrough; case -ERESTARTNOINTR: do_restart: /* offset of 4 bytes to re-execute trap (brki) instruction */ regs->pc -= 4; break; } } /* * OK, we're invoking a handler */ static void handle_signal(struct ksignal *ksig, struct pt_regs *regs) { sigset_t *oldset = sigmask_to_save(); int ret; /* Set up the stack frame */ ret = setup_rt_frame(ksig, oldset, regs); signal_setup_done(ret, ksig, test_thread_flag(TIF_SINGLESTEP)); } /* * Note that 'init' is a special process: it doesn't get signals it doesn't * want to handle. Thus you cannot kill init even with a SIGKILL even by * mistake. * * Note that we go through the signals twice: once to check the signals that * the kernel can handle, and then we build all the user-level signal handling * stack-frames in one go after that. */ static void do_signal(struct pt_regs *regs, int in_syscall) { struct ksignal ksig; #ifdef DEBUG_SIG pr_info("do signal: %p %d\n", regs, in_syscall); pr_info("do signal2: %lx %lx %ld [%lx]\n", regs->pc, regs->r1, regs->r12, read_thread_flags()); #endif if (get_signal(&ksig)) { /* Whee! Actually deliver the signal. */ if (in_syscall) handle_restart(regs, &ksig.ka, 1); handle_signal(&ksig, regs); return; } if (in_syscall) handle_restart(regs, NULL, 0); /* * If there's no signal to deliver, we just put the saved sigmask * back. */ restore_saved_sigmask(); } asmlinkage void do_notify_resume(struct pt_regs *regs, int in_syscall) { if (test_thread_flag(TIF_SIGPENDING) || test_thread_flag(TIF_NOTIFY_SIGNAL)) do_signal(regs, in_syscall); if (test_thread_flag(TIF_NOTIFY_RESUME)) resume_user_mode_work(regs); }
linux-master
arch/microblaze/kernel/signal.c
/* * Microblaze KGDB support * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/kgdb.h> #include <linux/kdebug.h> #include <linux/irq.h> #include <linux/io.h> #include <asm/cacheflush.h> #include <asm/asm-offsets.h> #include <asm/kgdb.h> #include <asm/pvr.h> #define GDB_REG 0 #define GDB_PC 32 #define GDB_MSR 33 #define GDB_EAR 34 #define GDB_ESR 35 #define GDB_FSR 36 #define GDB_BTR 37 #define GDB_PVR 38 #define GDB_REDR 50 #define GDB_RPID 51 #define GDB_RZPR 52 #define GDB_RTLBX 53 #define GDB_RTLBSX 54 /* mfs can't read it */ #define GDB_RTLBLO 55 #define GDB_RTLBHI 56 /* keep pvr separately because it is unchangeable */ static struct pvr_s pvr; void pt_regs_to_gdb_regs(unsigned long *gdb_regs, struct pt_regs *regs) { unsigned int i; unsigned long *pt_regb = (unsigned long *)regs; int temp; /* registers r0 - r31, pc, msr, ear, esr, fsr + do not save pt_mode */ for (i = 0; i < (sizeof(struct pt_regs) / 4) - 1; i++) gdb_regs[i] = pt_regb[i]; /* Branch target register can't be changed */ __asm__ __volatile__ ("mfs %0, rbtr;" : "=r"(temp) : ); gdb_regs[GDB_BTR] = temp; /* pvr part - we have 11 pvr regs */ for (i = 0; i < sizeof(struct pvr_s)/4; i++) gdb_regs[GDB_PVR + i] = pvr.pvr[i]; /* read special registers - can't be changed */ __asm__ __volatile__ ("mfs %0, redr;" : "=r"(temp) : ); gdb_regs[GDB_REDR] = temp; __asm__ __volatile__ ("mfs %0, rpid;" : "=r"(temp) : ); gdb_regs[GDB_RPID] = temp; __asm__ __volatile__ ("mfs %0, rzpr;" : "=r"(temp) : ); gdb_regs[GDB_RZPR] = temp; __asm__ __volatile__ ("mfs %0, rtlbx;" : "=r"(temp) : ); gdb_regs[GDB_RTLBX] = temp; __asm__ __volatile__ ("mfs %0, rtlblo;" : "=r"(temp) : ); gdb_regs[GDB_RTLBLO] = temp; __asm__ __volatile__ ("mfs %0, rtlbhi;" : "=r"(temp) : ); gdb_regs[GDB_RTLBHI] = temp; } void gdb_regs_to_pt_regs(unsigned long *gdb_regs, struct pt_regs *regs) { unsigned int i; unsigned long *pt_regb = (unsigned long *)regs; /* pt_regs and gdb_regs have the same 37 values. * The rest of gdb_regs are unused and can't be changed. * r0 register value can't be changed too. */ for (i = 1; i < (sizeof(struct pt_regs) / 4) - 1; i++) pt_regb[i] = gdb_regs[i]; } asmlinkage void microblaze_kgdb_break(struct pt_regs *regs) { if (kgdb_handle_exception(1, SIGTRAP, 0, regs) != 0) return; /* Jump over the first arch_kgdb_breakpoint which is barrier to * get kgdb work. The same solution is used for powerpc */ if (*(u32 *) (regs->pc) == *(u32 *) (&arch_kgdb_ops.gdb_bpt_instr)) regs->pc += BREAK_INSTR_SIZE; } /* untested */ void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *p) { unsigned int i; unsigned long *pt_regb = (unsigned long *)(p->thread.regs); /* registers r0 - r31, pc, msr, ear, esr, fsr + do not save pt_mode */ for (i = 0; i < (sizeof(struct pt_regs) / 4) - 1; i++) gdb_regs[i] = pt_regb[i]; /* pvr part - we have 11 pvr regs */ for (i = 0; i < sizeof(struct pvr_s)/4; i++) gdb_regs[GDB_PVR + i] = pvr.pvr[i]; } void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long ip) { regs->pc = ip; } int kgdb_arch_handle_exception(int vector, int signo, int err_code, char *remcom_in_buffer, char *remcom_out_buffer, struct pt_regs *regs) { char *ptr; unsigned long address; switch (remcom_in_buffer[0]) { case 'c': /* handle the optional parameter */ ptr = &remcom_in_buffer[1]; if (kgdb_hex2long(&ptr, &address)) regs->pc = address; return 0; } return -1; /* this means that we do not want to exit from the handler */ } int kgdb_arch_init(void) { get_pvr(&pvr); /* Fill PVR structure */ return 0; } void kgdb_arch_exit(void) { /* Nothing to do */ } /* * Global data */ const struct kgdb_arch arch_kgdb_ops = { #ifdef __MICROBLAZEEL__ .gdb_bpt_instr = {0x18, 0x00, 0x0c, 0xba}, /* brki r16, 0x18 */ #else .gdb_bpt_instr = {0xba, 0x0c, 0x00, 0x18}, /* brki r16, 0x18 */ #endif };
linux-master
arch/microblaze/kernel/kgdb.c
/* * HW exception handling * * Copyright (C) 2008-2009 Michal Simek <[email protected]> * Copyright (C) 2008 PetaLogix * * This file is subject to the terms and conditions of the GNU General * Public License. See the file COPYING in the main directory of this * archive for more details. */ /* * This file handles the architecture-dependent parts of hardware exceptions */ #include <linux/export.h> #include <linux/kernel.h> #include <linux/signal.h> #include <linux/sched.h> #include <linux/sched/debug.h> #include <linux/kallsyms.h> #include <asm/exceptions.h> #include <asm/entry.h> /* For KM CPU var */ #include <linux/uaccess.h> #include <linux/errno.h> #include <linux/ptrace.h> #include <asm/current.h> #include <asm/cacheflush.h> #define MICROBLAZE_ILL_OPCODE_EXCEPTION 0x02 #define MICROBLAZE_IBUS_EXCEPTION 0x03 #define MICROBLAZE_DBUS_EXCEPTION 0x04 #define MICROBLAZE_DIV_ZERO_EXCEPTION 0x05 #define MICROBLAZE_FPU_EXCEPTION 0x06 #define MICROBLAZE_PRIVILEGED_EXCEPTION 0x07 static DEFINE_SPINLOCK(die_lock); void die(const char *str, struct pt_regs *fp, long err) { console_verbose(); spin_lock_irq(&die_lock); pr_warn("Oops: %s, sig: %ld\n", str, err); show_regs(fp); spin_unlock_irq(&die_lock); /* make_task_dead() should take care of panic'ing from an interrupt * context so we don't handle it here */ make_task_dead(err); } /* for user application debugging */ asmlinkage void sw_exception(struct pt_regs *regs) { _exception(SIGTRAP, regs, TRAP_BRKPT, regs->r16); flush_dcache_range(regs->r16, regs->r16 + 0x4); flush_icache_range(regs->r16, regs->r16 + 0x4); } void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr) { if (kernel_mode(regs)) die("Exception in kernel mode", regs, signr); force_sig_fault(signr, code, (void __user *)addr); } asmlinkage void full_exception(struct pt_regs *regs, unsigned int type, int fsr, int addr) { addr = regs->pc; #if 0 pr_warn("Exception %02x in %s mode, FSR=%08x PC=%08x ESR=%08x\n", type, user_mode(regs) ? "user" : "kernel", fsr, (unsigned int) regs->pc, (unsigned int) regs->esr); #endif switch (type & 0x1F) { case MICROBLAZE_ILL_OPCODE_EXCEPTION: if (user_mode(regs)) { pr_debug("Illegal opcode exception in user mode\n"); _exception(SIGILL, regs, ILL_ILLOPC, addr); return; } pr_warn("Illegal opcode exception in kernel mode.\n"); die("opcode exception", regs, SIGBUS); break; case MICROBLAZE_IBUS_EXCEPTION: if (user_mode(regs)) { pr_debug("Instruction bus error exception in user mode\n"); _exception(SIGBUS, regs, BUS_ADRERR, addr); return; } pr_warn("Instruction bus error exception in kernel mode.\n"); die("bus exception", regs, SIGBUS); break; case MICROBLAZE_DBUS_EXCEPTION: if (user_mode(regs)) { pr_debug("Data bus error exception in user mode\n"); _exception(SIGBUS, regs, BUS_ADRERR, addr); return; } pr_warn("Data bus error exception in kernel mode.\n"); die("bus exception", regs, SIGBUS); break; case MICROBLAZE_DIV_ZERO_EXCEPTION: if (user_mode(regs)) { pr_debug("Divide by zero exception in user mode\n"); _exception(SIGFPE, regs, FPE_INTDIV, addr); return; } pr_warn("Divide by zero exception in kernel mode.\n"); die("Divide by zero exception", regs, SIGBUS); break; case MICROBLAZE_FPU_EXCEPTION: pr_debug("FPU exception\n"); /* IEEE FP exception */ /* I removed fsr variable and use code var for storing fsr */ if (fsr & FSR_IO) fsr = FPE_FLTINV; else if (fsr & FSR_OF) fsr = FPE_FLTOVF; else if (fsr & FSR_UF) fsr = FPE_FLTUND; else if (fsr & FSR_DZ) fsr = FPE_FLTDIV; else if (fsr & FSR_DO) fsr = FPE_FLTRES; _exception(SIGFPE, regs, fsr, addr); break; case MICROBLAZE_PRIVILEGED_EXCEPTION: pr_debug("Privileged exception\n"); _exception(SIGILL, regs, ILL_PRVOPC, addr); break; default: /* FIXME what to do in unexpected exception */ pr_warn("Unexpected exception %02x PC=%08x in %s mode\n", type, (unsigned int) addr, kernel_mode(regs) ? "kernel" : "user"); } return; }
linux-master
arch/microblaze/kernel/exceptions.c
/* * Copyright (C) 2007-2009 Michal Simek <[email protected]> * Copyright (C) 2007-2009 PetaLogix * Copyright (C) 2007 John Williams <[email protected]> * * Copyright (C) 2006 Atmark Techno, Inc. * Yasushi SHOJI <[email protected]> * Tetsuya OHKAWA <[email protected]> * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/errno.h> #include <linux/export.h> #include <linux/mm.h> #include <linux/smp.h> #include <linux/syscalls.h> #include <linux/sem.h> #include <linux/msg.h> #include <linux/shm.h> #include <linux/stat.h> #include <linux/mman.h> #include <linux/sys.h> #include <linux/ipc.h> #include <linux/file.h> #include <linux/err.h> #include <linux/fs.h> #include <linux/semaphore.h> #include <linux/uaccess.h> #include <linux/unistd.h> #include <linux/slab.h> #include <asm/syscalls.h> SYSCALL_DEFINE6(mmap, unsigned long, addr, unsigned long, len, unsigned long, prot, unsigned long, flags, unsigned long, fd, off_t, pgoff) { if (pgoff & ~PAGE_MASK) return -EINVAL; return ksys_mmap_pgoff(addr, len, prot, flags, fd, pgoff >> PAGE_SHIFT); } SYSCALL_DEFINE6(mmap2, unsigned long, addr, unsigned long, len, unsigned long, prot, unsigned long, flags, unsigned long, fd, unsigned long, pgoff) { if (pgoff & (~PAGE_MASK >> 12)) return -EINVAL; return ksys_mmap_pgoff(addr, len, prot, flags, fd, pgoff >> (PAGE_SHIFT - 12)); }
linux-master
arch/microblaze/kernel/sys_microblaze.c
/* * Copyright (C) 2007-2009 Michal Simek <[email protected]> * Copyright (C) 2007-2009 PetaLogix * Copyright (C) 2007 John Williams <[email protected]> * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/string.h> #include <asm/cpuinfo.h> #include <asm/pvr.h> static const char family_string[] = CONFIG_XILINX_MICROBLAZE0_FAMILY; static const char cpu_ver_string[] = CONFIG_XILINX_MICROBLAZE0_HW_VER; #define err_printk(x) \ early_printk("ERROR: Microblaze " x "-different for kernel and DTS\n"); void __init set_cpuinfo_static(struct cpuinfo *ci, struct device_node *cpu) { u32 i = 0; ci->use_instr = (fcpu(cpu, "xlnx,use-barrel") ? PVR0_USE_BARREL_MASK : 0) | (fcpu(cpu, "xlnx,use-msr-instr") ? PVR2_USE_MSR_INSTR : 0) | (fcpu(cpu, "xlnx,use-pcmp-instr") ? PVR2_USE_PCMP_INSTR : 0) | (fcpu(cpu, "xlnx,use-div") ? PVR0_USE_DIV_MASK : 0); if (CONFIG_XILINX_MICROBLAZE0_USE_BARREL) i |= PVR0_USE_BARREL_MASK; if (CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR) i |= PVR2_USE_MSR_INSTR; if (CONFIG_XILINX_MICROBLAZE0_USE_PCMP_INSTR) i |= PVR2_USE_PCMP_INSTR; if (CONFIG_XILINX_MICROBLAZE0_USE_DIV) i |= PVR0_USE_DIV_MASK; if (ci->use_instr != i) err_printk("BARREL, MSR, PCMP or DIV"); ci->use_mult = fcpu(cpu, "xlnx,use-hw-mul"); if (ci->use_mult != CONFIG_XILINX_MICROBLAZE0_USE_HW_MUL) err_printk("HW_MUL"); ci->use_mult = (ci->use_mult > 1 ? (PVR2_USE_MUL64_MASK | PVR0_USE_HW_MUL_MASK) : (ci->use_mult == 1 ? PVR0_USE_HW_MUL_MASK : 0)); ci->use_fpu = fcpu(cpu, "xlnx,use-fpu"); if (ci->use_fpu != CONFIG_XILINX_MICROBLAZE0_USE_FPU) err_printk("HW_FPU"); ci->use_fpu = (ci->use_fpu > 1 ? (PVR2_USE_FPU2_MASK | PVR0_USE_FPU_MASK) : (ci->use_fpu == 1 ? PVR0_USE_FPU_MASK : 0)); ci->use_exc = (fcpu(cpu, "xlnx,unaligned-exceptions") ? PVR2_UNALIGNED_EXC_MASK : 0) | (fcpu(cpu, "xlnx,ill-opcode-exception") ? PVR2_ILL_OPCODE_EXC_MASK : 0) | (fcpu(cpu, "xlnx,iopb-bus-exception") ? PVR2_IOPB_BUS_EXC_MASK : 0) | (fcpu(cpu, "xlnx,dopb-bus-exception") ? PVR2_DOPB_BUS_EXC_MASK : 0) | (fcpu(cpu, "xlnx,div-zero-exception") ? PVR2_DIV_ZERO_EXC_MASK : 0) | (fcpu(cpu, "xlnx,fpu-exception") ? PVR2_FPU_EXC_MASK : 0) | (fcpu(cpu, "xlnx,fsl-exception") ? PVR2_USE_EXTEND_FSL : 0); ci->use_icache = fcpu(cpu, "xlnx,use-icache"); ci->icache_tagbits = fcpu(cpu, "xlnx,addr-tag-bits"); ci->icache_write = fcpu(cpu, "xlnx,allow-icache-wr"); ci->icache_line_length = fcpu(cpu, "xlnx,icache-line-len") << 2; if (!ci->icache_line_length) { if (fcpu(cpu, "xlnx,icache-use-fsl")) ci->icache_line_length = 4 << 2; else ci->icache_line_length = 1 << 2; } ci->icache_size = fcpu(cpu, "i-cache-size"); ci->icache_base = fcpu(cpu, "i-cache-baseaddr"); ci->icache_high = fcpu(cpu, "i-cache-highaddr"); ci->use_dcache = fcpu(cpu, "xlnx,use-dcache"); ci->dcache_tagbits = fcpu(cpu, "xlnx,dcache-addr-tag"); ci->dcache_write = fcpu(cpu, "xlnx,allow-dcache-wr"); ci->dcache_line_length = fcpu(cpu, "xlnx,dcache-line-len") << 2; if (!ci->dcache_line_length) { if (fcpu(cpu, "xlnx,dcache-use-fsl")) ci->dcache_line_length = 4 << 2; else ci->dcache_line_length = 1 << 2; } ci->dcache_size = fcpu(cpu, "d-cache-size"); ci->dcache_base = fcpu(cpu, "d-cache-baseaddr"); ci->dcache_high = fcpu(cpu, "d-cache-highaddr"); ci->dcache_wb = fcpu(cpu, "xlnx,dcache-use-writeback"); ci->use_dopb = fcpu(cpu, "xlnx,d-opb"); ci->use_iopb = fcpu(cpu, "xlnx,i-opb"); ci->use_dlmb = fcpu(cpu, "xlnx,d-lmb"); ci->use_ilmb = fcpu(cpu, "xlnx,i-lmb"); ci->num_fsl = fcpu(cpu, "xlnx,fsl-links"); ci->irq_edge = fcpu(cpu, "xlnx,interrupt-is-edge"); ci->irq_positive = fcpu(cpu, "xlnx,edge-is-positive"); ci->area_optimised = 0; ci->hw_debug = fcpu(cpu, "xlnx,debug-enabled"); ci->num_pc_brk = fcpu(cpu, "xlnx,number-of-pc-brk"); ci->num_rd_brk = fcpu(cpu, "xlnx,number-of-rd-addr-brk"); ci->num_wr_brk = fcpu(cpu, "xlnx,number-of-wr-addr-brk"); ci->pvr_user1 = fcpu(cpu, "xlnx,pvr-user1"); ci->pvr_user2 = fcpu(cpu, "xlnx,pvr-user2"); ci->mmu = fcpu(cpu, "xlnx,use-mmu"); ci->mmu_privins = fcpu(cpu, "xlnx,mmu-privileged-instr"); ci->endian = fcpu(cpu, "xlnx,endianness"); ci->ver_code = 0; ci->fpga_family_code = 0; /* Do various fixups based on CPU version and FPGA family strings */ /* Resolved the CPU version code */ for (i = 0; cpu_ver_lookup[i].s != NULL; i++) { if (strcmp(cpu_ver_lookup[i].s, cpu_ver_string) == 0) ci->ver_code = cpu_ver_lookup[i].k; } /* Resolved the fpga family code */ for (i = 0; family_string_lookup[i].s != NULL; i++) { if (strcmp(family_string_lookup[i].s, family_string) == 0) ci->fpga_family_code = family_string_lookup[i].k; } /* FIXME - mb3 and spartan2 do not exist in PVR */ /* This is mb3 and on a non Spartan2 */ if (ci->ver_code == 0x20 && ci->fpga_family_code != 0xf0) /* Hardware Multiplier in use */ ci->use_mult = 1; }
linux-master
arch/microblaze/kernel/cpu/cpuinfo-static.c
/* * Support for MicroBlaze PVR (processor version register) * * Copyright (C) 2007-2009 Michal Simek <[email protected]> * Copyright (C) 2007-2009 PetaLogix * Copyright (C) 2007 John Williams <[email protected]> * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/init.h> #include <linux/string.h> #include <asm/pvr.h> #include <asm/cpuinfo.h> /* * Helper macro to map between fields in our struct cpuinfo, and * the PVR macros in pvr.h. */ #define CI(c, p) { ci->c = PVR_##p(pvr); } #define err_printk(x) \ pr_err("ERROR: Microblaze " x "-different for PVR and DTS\n"); void set_cpuinfo_pvr_full(struct cpuinfo *ci, struct device_node *cpu) { struct pvr_s pvr; u32 temp; /* for saving temp value */ get_pvr(&pvr); CI(ver_code, VERSION); if (!ci->ver_code) { pr_err("ERROR: MB has broken PVR regs -> use DTS setting\n"); return; } temp = PVR_USE_BARREL(pvr) | PVR_USE_MSR_INSTR(pvr) | PVR_USE_PCMP_INSTR(pvr) | PVR_USE_DIV(pvr); if (ci->use_instr != temp) err_printk("BARREL, MSR, PCMP or DIV"); ci->use_instr = temp; temp = PVR_USE_HW_MUL(pvr) | PVR_USE_MUL64(pvr); if (ci->use_mult != temp) err_printk("HW_MUL"); ci->use_mult = temp; temp = PVR_USE_FPU(pvr) | PVR_USE_FPU2(pvr); if (ci->use_fpu != temp) err_printk("HW_FPU"); ci->use_fpu = temp; ci->use_exc = PVR_OPCODE_0x0_ILLEGAL(pvr) | PVR_UNALIGNED_EXCEPTION(pvr) | PVR_ILL_OPCODE_EXCEPTION(pvr) | PVR_IOPB_BUS_EXCEPTION(pvr) | PVR_DOPB_BUS_EXCEPTION(pvr) | PVR_DIV_ZERO_EXCEPTION(pvr) | PVR_FPU_EXCEPTION(pvr) | PVR_FSL_EXCEPTION(pvr); CI(pvr_user1, USER1); CI(pvr_user2, USER2); CI(mmu, USE_MMU); CI(mmu_privins, MMU_PRIVINS); CI(endian, ENDIAN); CI(use_icache, USE_ICACHE); CI(icache_tagbits, ICACHE_ADDR_TAG_BITS); CI(icache_write, ICACHE_ALLOW_WR); ci->icache_line_length = PVR_ICACHE_LINE_LEN(pvr) << 2; CI(icache_size, ICACHE_BYTE_SIZE); CI(icache_base, ICACHE_BASEADDR); CI(icache_high, ICACHE_HIGHADDR); CI(use_dcache, USE_DCACHE); CI(dcache_tagbits, DCACHE_ADDR_TAG_BITS); CI(dcache_write, DCACHE_ALLOW_WR); ci->dcache_line_length = PVR_DCACHE_LINE_LEN(pvr) << 2; CI(dcache_size, DCACHE_BYTE_SIZE); CI(dcache_base, DCACHE_BASEADDR); CI(dcache_high, DCACHE_HIGHADDR); temp = PVR_DCACHE_USE_WRITEBACK(pvr); if (ci->dcache_wb != temp) err_printk("DCACHE WB"); ci->dcache_wb = temp; CI(use_dopb, D_OPB); CI(use_iopb, I_OPB); CI(use_dlmb, D_LMB); CI(use_ilmb, I_LMB); CI(num_fsl, FSL_LINKS); CI(irq_edge, INTERRUPT_IS_EDGE); CI(irq_positive, EDGE_IS_POSITIVE); CI(area_optimised, AREA_OPTIMISED); CI(hw_debug, DEBUG_ENABLED); CI(num_pc_brk, NUMBER_OF_PC_BRK); CI(num_rd_brk, NUMBER_OF_RD_ADDR_BRK); CI(num_wr_brk, NUMBER_OF_WR_ADDR_BRK); CI(fpga_family_code, TARGET_FAMILY); }
linux-master
arch/microblaze/kernel/cpu/cpuinfo-pvr-full.c
/* * CPU-version specific code * * Copyright (C) 2007-2009 Michal Simek <[email protected]> * Copyright (C) 2006-2009 PetaLogix * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/init.h> #include <linux/string.h> #include <linux/seq_file.h> #include <linux/cpu.h> #include <linux/initrd.h> #include <linux/bug.h> #include <asm/cpuinfo.h> #include <linux/delay.h> #include <linux/io.h> #include <asm/page.h> #include <linux/param.h> #include <asm/pvr.h> #include <asm/sections.h> #include <asm/setup.h> static int show_cpuinfo(struct seq_file *m, void *v) { char *fpga_family = "Unknown"; char *cpu_ver = "Unknown"; int i; /* Denormalised to get the fpga family string */ for (i = 0; family_string_lookup[i].s != NULL; i++) { if (cpuinfo.fpga_family_code == family_string_lookup[i].k) { fpga_family = (char *)family_string_lookup[i].s; break; } } /* Denormalised to get the hw version string */ for (i = 0; cpu_ver_lookup[i].s != NULL; i++) { if (cpuinfo.ver_code == cpu_ver_lookup[i].k) { cpu_ver = (char *)cpu_ver_lookup[i].s; break; } } seq_printf(m, "CPU-Family: MicroBlaze\n" "FPGA-Arch: %s\n" "CPU-Ver: %s, %s endian\n" "CPU-MHz: %d.%02d\n" "BogoMips: %lu.%02lu\n", fpga_family, cpu_ver, cpuinfo.endian ? "little" : "big", cpuinfo.cpu_clock_freq / 1000000, cpuinfo.cpu_clock_freq % 1000000, loops_per_jiffy / (500000 / HZ), (loops_per_jiffy / (5000 / HZ)) % 100); seq_printf(m, "HW:\n Shift:\t\t%s\n" " MSR:\t\t%s\n" " PCMP:\t\t%s\n" " DIV:\t\t%s\n", (cpuinfo.use_instr & PVR0_USE_BARREL_MASK) ? "yes" : "no", (cpuinfo.use_instr & PVR2_USE_MSR_INSTR) ? "yes" : "no", (cpuinfo.use_instr & PVR2_USE_PCMP_INSTR) ? "yes" : "no", (cpuinfo.use_instr & PVR0_USE_DIV_MASK) ? "yes" : "no"); seq_printf(m, " MMU:\t\t%x\n", cpuinfo.mmu); seq_printf(m, " MUL:\t\t%s\n" " FPU:\t\t%s\n", (cpuinfo.use_mult & PVR2_USE_MUL64_MASK) ? "v2" : (cpuinfo.use_mult & PVR0_USE_HW_MUL_MASK) ? "v1" : "no", (cpuinfo.use_fpu & PVR2_USE_FPU2_MASK) ? "v2" : (cpuinfo.use_fpu & PVR0_USE_FPU_MASK) ? "v1" : "no"); seq_printf(m, " Exc:\t\t%s%s%s%s%s%s%s%s\n", (cpuinfo.use_exc & PVR2_OPCODE_0x0_ILL_MASK) ? "op0x0 " : "", (cpuinfo.use_exc & PVR2_UNALIGNED_EXC_MASK) ? "unal " : "", (cpuinfo.use_exc & PVR2_ILL_OPCODE_EXC_MASK) ? "ill " : "", (cpuinfo.use_exc & PVR2_IOPB_BUS_EXC_MASK) ? "iopb " : "", (cpuinfo.use_exc & PVR2_DOPB_BUS_EXC_MASK) ? "dopb " : "", (cpuinfo.use_exc & PVR2_DIV_ZERO_EXC_MASK) ? "zero " : "", (cpuinfo.use_exc & PVR2_FPU_EXC_MASK) ? "fpu " : "", (cpuinfo.use_exc & PVR2_USE_FSL_EXC) ? "fsl " : ""); seq_printf(m, "Stream-insns:\t%sprivileged\n", cpuinfo.mmu_privins ? "un" : ""); if (cpuinfo.use_icache) seq_printf(m, "Icache:\t\t%ukB\tline length:\t%dB\n", cpuinfo.icache_size >> 10, cpuinfo.icache_line_length); else seq_puts(m, "Icache:\t\tno\n"); if (cpuinfo.use_dcache) { seq_printf(m, "Dcache:\t\t%ukB\tline length:\t%dB\n", cpuinfo.dcache_size >> 10, cpuinfo.dcache_line_length); seq_puts(m, "Dcache-Policy:\t"); if (cpuinfo.dcache_wb) seq_puts(m, "write-back\n"); else seq_puts(m, "write-through\n"); } else { seq_puts(m, "Dcache:\t\tno\n"); } seq_printf(m, "HW-Debug:\t%s\n", cpuinfo.hw_debug ? "yes" : "no"); seq_printf(m, "PVR-USR1:\t%02x\n" "PVR-USR2:\t%08x\n", cpuinfo.pvr_user1, cpuinfo.pvr_user2); seq_printf(m, "Page size:\t%lu\n", PAGE_SIZE); return 0; } static void *c_start(struct seq_file *m, loff_t *pos) { int i = *pos; return i < NR_CPUS ? (void *) (i + 1) : NULL; } static void *c_next(struct seq_file *m, void *v, loff_t *pos) { ++*pos; return c_start(m, pos); } static void c_stop(struct seq_file *m, void *v) { } const struct seq_operations cpuinfo_op = { .start = c_start, .next = c_next, .stop = c_stop, .show = show_cpuinfo, };
linux-master
arch/microblaze/kernel/cpu/mb.c
/* * Support for MicroBlaze PVR (processor version register) * * Copyright (C) 2007-2009 Michal Simek <[email protected]> * Copyright (C) 2007-2009 PetaLogix * Copyright (C) 2007 John Williams <[email protected]> * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/kernel.h> #include <linux/compiler.h> #include <asm/exceptions.h> #include <asm/pvr.h> #include <linux/irqflags.h> /* * Until we get an assembler that knows about the pvr registers, * this horrible cruft will have to do. * That hardcoded opcode is mfs r3, rpvrNN */ #define get_single_pvr(pvrid, val) \ { \ register unsigned tmp __asm__("r3"); \ tmp = 0x0; /* Prevent warning about unused */ \ __asm__ __volatile__ ( \ "mfs %0, rpvr" #pvrid ";" \ : "=r" (tmp) : : "memory"); \ val = tmp; \ } /* * Does the CPU support the PVR register? * return value: * 0: no PVR * 1: simple PVR * 2: full PVR * * This must work on all CPU versions, including those before the * PVR was even an option. */ int cpu_has_pvr(void) { unsigned long flags; unsigned pvr0; local_save_flags(flags); /* PVR bit in MSR tells us if there is any support */ if (!(flags & PVR_MSR_BIT)) return 0; get_single_pvr(0, pvr0); pr_debug("%s: pvr0 is 0x%08x\n", __func__, pvr0); if (pvr0 & PVR0_PVR_FULL_MASK) return 1; /* for partial PVR use static cpuinfo */ return 2; } void get_pvr(struct pvr_s *p) { get_single_pvr(0, p->pvr[0]); get_single_pvr(1, p->pvr[1]); get_single_pvr(2, p->pvr[2]); get_single_pvr(3, p->pvr[3]); get_single_pvr(4, p->pvr[4]); get_single_pvr(5, p->pvr[5]); get_single_pvr(6, p->pvr[6]); get_single_pvr(7, p->pvr[7]); get_single_pvr(8, p->pvr[8]); get_single_pvr(9, p->pvr[9]); get_single_pvr(10, p->pvr[10]); get_single_pvr(11, p->pvr[11]); }
linux-master
arch/microblaze/kernel/cpu/pvr.c
/* * Cache control for MicroBlaze cache memories * * Copyright (C) 2007-2009 Michal Simek <[email protected]> * Copyright (C) 2007-2009 PetaLogix * Copyright (C) 2007-2009 John Williams <[email protected]> * * This file is subject to the terms and conditions of the GNU General * Public License. See the file COPYING in the main directory of this * archive for more details. */ #include <asm/cacheflush.h> #include <linux/cache.h> #include <asm/cpuinfo.h> #include <asm/pvr.h> static inline void __enable_icache_msr(void) { __asm__ __volatile__ (" msrset r0, %0;" \ "nop;" \ : : "i" (MSR_ICE) : "memory"); } static inline void __disable_icache_msr(void) { __asm__ __volatile__ (" msrclr r0, %0;" \ "nop;" \ : : "i" (MSR_ICE) : "memory"); } static inline void __enable_dcache_msr(void) { __asm__ __volatile__ (" msrset r0, %0;" \ "nop;" \ : : "i" (MSR_DCE) : "memory"); } static inline void __disable_dcache_msr(void) { __asm__ __volatile__ (" msrclr r0, %0;" \ "nop; " \ : : "i" (MSR_DCE) : "memory"); } static inline void __enable_icache_nomsr(void) { __asm__ __volatile__ (" mfs r12, rmsr;" \ "nop;" \ "ori r12, r12, %0;" \ "mts rmsr, r12;" \ "nop;" \ : : "i" (MSR_ICE) : "memory", "r12"); } static inline void __disable_icache_nomsr(void) { __asm__ __volatile__ (" mfs r12, rmsr;" \ "nop;" \ "andi r12, r12, ~%0;" \ "mts rmsr, r12;" \ "nop;" \ : : "i" (MSR_ICE) : "memory", "r12"); } static inline void __enable_dcache_nomsr(void) { __asm__ __volatile__ (" mfs r12, rmsr;" \ "nop;" \ "ori r12, r12, %0;" \ "mts rmsr, r12;" \ "nop;" \ : : "i" (MSR_DCE) : "memory", "r12"); } static inline void __disable_dcache_nomsr(void) { __asm__ __volatile__ (" mfs r12, rmsr;" \ "nop;" \ "andi r12, r12, ~%0;" \ "mts rmsr, r12;" \ "nop;" \ : : "i" (MSR_DCE) : "memory", "r12"); } /* Helper macro for computing the limits of cache range loops * * End address can be unaligned which is OK for C implementation. * ASM implementation align it in ASM macros */ #define CACHE_LOOP_LIMITS(start, end, cache_line_length, cache_size) \ do { \ int align = ~(cache_line_length - 1); \ if (start < UINT_MAX - cache_size) \ end = min(start + cache_size, end); \ start &= align; \ } while (0) /* * Helper macro to loop over the specified cache_size/line_length and * execute 'op' on that cacheline */ #define CACHE_ALL_LOOP(cache_size, line_length, op) \ do { \ unsigned int len = cache_size - line_length; \ int step = -line_length; \ WARN_ON(step >= 0); \ \ __asm__ __volatile__ (" 1: " #op " %0, r0;" \ "bgtid %0, 1b;" \ "addk %0, %0, %1;" \ : : "r" (len), "r" (step) \ : "memory"); \ } while (0) /* Used for wdc.flush/clear which can use rB for offset which is not possible * to use for simple wdc or wic. * * start address is cache aligned * end address is not aligned, if end is aligned then I have to subtract * cacheline length because I can't flush/invalidate the next cacheline. * If is not, I align it because I will flush/invalidate whole line. */ #define CACHE_RANGE_LOOP_2(start, end, line_length, op) \ do { \ int step = -line_length; \ int align = ~(line_length - 1); \ int count; \ end = ((end & align) == end) ? end - line_length : end & align; \ count = end - start; \ WARN_ON(count < 0); \ \ __asm__ __volatile__ (" 1: " #op " %0, %1;" \ "bgtid %1, 1b;" \ "addk %1, %1, %2;" \ : : "r" (start), "r" (count), \ "r" (step) : "memory"); \ } while (0) /* It is used only first parameter for OP - for wic, wdc */ #define CACHE_RANGE_LOOP_1(start, end, line_length, op) \ do { \ unsigned int volatile temp = 0; \ unsigned int align = ~(line_length - 1); \ end = ((end & align) == end) ? end - line_length : end & align; \ WARN_ON(end < start); \ \ __asm__ __volatile__ (" 1: " #op " %1, r0;" \ "cmpu %0, %1, %2;" \ "bgtid %0, 1b;" \ "addk %1, %1, %3;" \ : : "r" (temp), "r" (start), "r" (end), \ "r" (line_length) : "memory"); \ } while (0) #define ASM_LOOP static void __flush_icache_range_msr_irq(unsigned long start, unsigned long end) { unsigned long flags; #ifndef ASM_LOOP int i; #endif pr_debug("%s: start 0x%x, end 0x%x\n", __func__, (unsigned int)start, (unsigned int) end); CACHE_LOOP_LIMITS(start, end, cpuinfo.icache_line_length, cpuinfo.icache_size); local_irq_save(flags); __disable_icache_msr(); #ifdef ASM_LOOP CACHE_RANGE_LOOP_1(start, end, cpuinfo.icache_line_length, wic); #else for (i = start; i < end; i += cpuinfo.icache_line_length) __asm__ __volatile__ ("wic %0, r0;" \ : : "r" (i)); #endif __enable_icache_msr(); local_irq_restore(flags); } static void __flush_icache_range_nomsr_irq(unsigned long start, unsigned long end) { unsigned long flags; #ifndef ASM_LOOP int i; #endif pr_debug("%s: start 0x%x, end 0x%x\n", __func__, (unsigned int)start, (unsigned int) end); CACHE_LOOP_LIMITS(start, end, cpuinfo.icache_line_length, cpuinfo.icache_size); local_irq_save(flags); __disable_icache_nomsr(); #ifdef ASM_LOOP CACHE_RANGE_LOOP_1(start, end, cpuinfo.icache_line_length, wic); #else for (i = start; i < end; i += cpuinfo.icache_line_length) __asm__ __volatile__ ("wic %0, r0;" \ : : "r" (i)); #endif __enable_icache_nomsr(); local_irq_restore(flags); } static void __flush_icache_range_noirq(unsigned long start, unsigned long end) { #ifndef ASM_LOOP int i; #endif pr_debug("%s: start 0x%x, end 0x%x\n", __func__, (unsigned int)start, (unsigned int) end); CACHE_LOOP_LIMITS(start, end, cpuinfo.icache_line_length, cpuinfo.icache_size); #ifdef ASM_LOOP CACHE_RANGE_LOOP_1(start, end, cpuinfo.icache_line_length, wic); #else for (i = start; i < end; i += cpuinfo.icache_line_length) __asm__ __volatile__ ("wic %0, r0;" \ : : "r" (i)); #endif } static void __flush_icache_all_msr_irq(void) { unsigned long flags; #ifndef ASM_LOOP int i; #endif pr_debug("%s\n", __func__); local_irq_save(flags); __disable_icache_msr(); #ifdef ASM_LOOP CACHE_ALL_LOOP(cpuinfo.icache_size, cpuinfo.icache_line_length, wic); #else for (i = 0; i < cpuinfo.icache_size; i += cpuinfo.icache_line_length) __asm__ __volatile__ ("wic %0, r0;" \ : : "r" (i)); #endif __enable_icache_msr(); local_irq_restore(flags); } static void __flush_icache_all_nomsr_irq(void) { unsigned long flags; #ifndef ASM_LOOP int i; #endif pr_debug("%s\n", __func__); local_irq_save(flags); __disable_icache_nomsr(); #ifdef ASM_LOOP CACHE_ALL_LOOP(cpuinfo.icache_size, cpuinfo.icache_line_length, wic); #else for (i = 0; i < cpuinfo.icache_size; i += cpuinfo.icache_line_length) __asm__ __volatile__ ("wic %0, r0;" \ : : "r" (i)); #endif __enable_icache_nomsr(); local_irq_restore(flags); } static void __flush_icache_all_noirq(void) { #ifndef ASM_LOOP int i; #endif pr_debug("%s\n", __func__); #ifdef ASM_LOOP CACHE_ALL_LOOP(cpuinfo.icache_size, cpuinfo.icache_line_length, wic); #else for (i = 0; i < cpuinfo.icache_size; i += cpuinfo.icache_line_length) __asm__ __volatile__ ("wic %0, r0;" \ : : "r" (i)); #endif } static void __invalidate_dcache_all_msr_irq(void) { unsigned long flags; #ifndef ASM_LOOP int i; #endif pr_debug("%s\n", __func__); local_irq_save(flags); __disable_dcache_msr(); #ifdef ASM_LOOP CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length, wdc); #else for (i = 0; i < cpuinfo.dcache_size; i += cpuinfo.dcache_line_length) __asm__ __volatile__ ("wdc %0, r0;" \ : : "r" (i)); #endif __enable_dcache_msr(); local_irq_restore(flags); } static void __invalidate_dcache_all_nomsr_irq(void) { unsigned long flags; #ifndef ASM_LOOP int i; #endif pr_debug("%s\n", __func__); local_irq_save(flags); __disable_dcache_nomsr(); #ifdef ASM_LOOP CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length, wdc); #else for (i = 0; i < cpuinfo.dcache_size; i += cpuinfo.dcache_line_length) __asm__ __volatile__ ("wdc %0, r0;" \ : : "r" (i)); #endif __enable_dcache_nomsr(); local_irq_restore(flags); } static void __invalidate_dcache_all_noirq_wt(void) { #ifndef ASM_LOOP int i; #endif pr_debug("%s\n", __func__); #ifdef ASM_LOOP CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length, wdc); #else for (i = 0; i < cpuinfo.dcache_size; i += cpuinfo.dcache_line_length) __asm__ __volatile__ ("wdc %0, r0;" \ : : "r" (i)); #endif } /* * FIXME It is blindly invalidation as is expected * but can't be called on noMMU in microblaze_cache_init below * * MS: noMMU kernel won't boot if simple wdc is used * The reason should be that there are discared data which kernel needs */ static void __invalidate_dcache_all_wb(void) { #ifndef ASM_LOOP int i; #endif pr_debug("%s\n", __func__); #ifdef ASM_LOOP CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length, wdc); #else for (i = 0; i < cpuinfo.dcache_size; i += cpuinfo.dcache_line_length) __asm__ __volatile__ ("wdc %0, r0;" \ : : "r" (i)); #endif } static void __invalidate_dcache_range_wb(unsigned long start, unsigned long end) { #ifndef ASM_LOOP int i; #endif pr_debug("%s: start 0x%x, end 0x%x\n", __func__, (unsigned int)start, (unsigned int) end); CACHE_LOOP_LIMITS(start, end, cpuinfo.dcache_line_length, cpuinfo.dcache_size); #ifdef ASM_LOOP CACHE_RANGE_LOOP_2(start, end, cpuinfo.dcache_line_length, wdc.clear); #else for (i = start; i < end; i += cpuinfo.dcache_line_length) __asm__ __volatile__ ("wdc.clear %0, r0;" \ : : "r" (i)); #endif } static void __invalidate_dcache_range_nomsr_wt(unsigned long start, unsigned long end) { #ifndef ASM_LOOP int i; #endif pr_debug("%s: start 0x%x, end 0x%x\n", __func__, (unsigned int)start, (unsigned int) end); CACHE_LOOP_LIMITS(start, end, cpuinfo.dcache_line_length, cpuinfo.dcache_size); #ifdef ASM_LOOP CACHE_RANGE_LOOP_1(start, end, cpuinfo.dcache_line_length, wdc); #else for (i = start; i < end; i += cpuinfo.dcache_line_length) __asm__ __volatile__ ("wdc %0, r0;" \ : : "r" (i)); #endif } static void __invalidate_dcache_range_msr_irq_wt(unsigned long start, unsigned long end) { unsigned long flags; #ifndef ASM_LOOP int i; #endif pr_debug("%s: start 0x%x, end 0x%x\n", __func__, (unsigned int)start, (unsigned int) end); CACHE_LOOP_LIMITS(start, end, cpuinfo.dcache_line_length, cpuinfo.dcache_size); local_irq_save(flags); __disable_dcache_msr(); #ifdef ASM_LOOP CACHE_RANGE_LOOP_1(start, end, cpuinfo.dcache_line_length, wdc); #else for (i = start; i < end; i += cpuinfo.dcache_line_length) __asm__ __volatile__ ("wdc %0, r0;" \ : : "r" (i)); #endif __enable_dcache_msr(); local_irq_restore(flags); } static void __invalidate_dcache_range_nomsr_irq(unsigned long start, unsigned long end) { unsigned long flags; #ifndef ASM_LOOP int i; #endif pr_debug("%s: start 0x%x, end 0x%x\n", __func__, (unsigned int)start, (unsigned int) end); CACHE_LOOP_LIMITS(start, end, cpuinfo.dcache_line_length, cpuinfo.dcache_size); local_irq_save(flags); __disable_dcache_nomsr(); #ifdef ASM_LOOP CACHE_RANGE_LOOP_1(start, end, cpuinfo.dcache_line_length, wdc); #else for (i = start; i < end; i += cpuinfo.dcache_line_length) __asm__ __volatile__ ("wdc %0, r0;" \ : : "r" (i)); #endif __enable_dcache_nomsr(); local_irq_restore(flags); } static void __flush_dcache_all_wb(void) { #ifndef ASM_LOOP int i; #endif pr_debug("%s\n", __func__); #ifdef ASM_LOOP CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length, wdc.flush); #else for (i = 0; i < cpuinfo.dcache_size; i += cpuinfo.dcache_line_length) __asm__ __volatile__ ("wdc.flush %0, r0;" \ : : "r" (i)); #endif } static void __flush_dcache_range_wb(unsigned long start, unsigned long end) { #ifndef ASM_LOOP int i; #endif pr_debug("%s: start 0x%x, end 0x%x\n", __func__, (unsigned int)start, (unsigned int) end); CACHE_LOOP_LIMITS(start, end, cpuinfo.dcache_line_length, cpuinfo.dcache_size); #ifdef ASM_LOOP CACHE_RANGE_LOOP_2(start, end, cpuinfo.dcache_line_length, wdc.flush); #else for (i = start; i < end; i += cpuinfo.dcache_line_length) __asm__ __volatile__ ("wdc.flush %0, r0;" \ : : "r" (i)); #endif } /* struct for wb caches and for wt caches */ struct scache *mbc; /* new wb cache model */ static const struct scache wb_msr = { .ie = __enable_icache_msr, .id = __disable_icache_msr, .ifl = __flush_icache_all_noirq, .iflr = __flush_icache_range_noirq, .iin = __flush_icache_all_noirq, .iinr = __flush_icache_range_noirq, .de = __enable_dcache_msr, .dd = __disable_dcache_msr, .dfl = __flush_dcache_all_wb, .dflr = __flush_dcache_range_wb, .din = __invalidate_dcache_all_wb, .dinr = __invalidate_dcache_range_wb, }; /* There is only difference in ie, id, de, dd functions */ static const struct scache wb_nomsr = { .ie = __enable_icache_nomsr, .id = __disable_icache_nomsr, .ifl = __flush_icache_all_noirq, .iflr = __flush_icache_range_noirq, .iin = __flush_icache_all_noirq, .iinr = __flush_icache_range_noirq, .de = __enable_dcache_nomsr, .dd = __disable_dcache_nomsr, .dfl = __flush_dcache_all_wb, .dflr = __flush_dcache_range_wb, .din = __invalidate_dcache_all_wb, .dinr = __invalidate_dcache_range_wb, }; /* Old wt cache model with disabling irq and turn off cache */ static const struct scache wt_msr = { .ie = __enable_icache_msr, .id = __disable_icache_msr, .ifl = __flush_icache_all_msr_irq, .iflr = __flush_icache_range_msr_irq, .iin = __flush_icache_all_msr_irq, .iinr = __flush_icache_range_msr_irq, .de = __enable_dcache_msr, .dd = __disable_dcache_msr, .dfl = __invalidate_dcache_all_msr_irq, .dflr = __invalidate_dcache_range_msr_irq_wt, .din = __invalidate_dcache_all_msr_irq, .dinr = __invalidate_dcache_range_msr_irq_wt, }; static const struct scache wt_nomsr = { .ie = __enable_icache_nomsr, .id = __disable_icache_nomsr, .ifl = __flush_icache_all_nomsr_irq, .iflr = __flush_icache_range_nomsr_irq, .iin = __flush_icache_all_nomsr_irq, .iinr = __flush_icache_range_nomsr_irq, .de = __enable_dcache_nomsr, .dd = __disable_dcache_nomsr, .dfl = __invalidate_dcache_all_nomsr_irq, .dflr = __invalidate_dcache_range_nomsr_irq, .din = __invalidate_dcache_all_nomsr_irq, .dinr = __invalidate_dcache_range_nomsr_irq, }; /* New wt cache model for newer Microblaze versions */ static const struct scache wt_msr_noirq = { .ie = __enable_icache_msr, .id = __disable_icache_msr, .ifl = __flush_icache_all_noirq, .iflr = __flush_icache_range_noirq, .iin = __flush_icache_all_noirq, .iinr = __flush_icache_range_noirq, .de = __enable_dcache_msr, .dd = __disable_dcache_msr, .dfl = __invalidate_dcache_all_noirq_wt, .dflr = __invalidate_dcache_range_nomsr_wt, .din = __invalidate_dcache_all_noirq_wt, .dinr = __invalidate_dcache_range_nomsr_wt, }; static const struct scache wt_nomsr_noirq = { .ie = __enable_icache_nomsr, .id = __disable_icache_nomsr, .ifl = __flush_icache_all_noirq, .iflr = __flush_icache_range_noirq, .iin = __flush_icache_all_noirq, .iinr = __flush_icache_range_noirq, .de = __enable_dcache_nomsr, .dd = __disable_dcache_nomsr, .dfl = __invalidate_dcache_all_noirq_wt, .dflr = __invalidate_dcache_range_nomsr_wt, .din = __invalidate_dcache_all_noirq_wt, .dinr = __invalidate_dcache_range_nomsr_wt, }; /* CPU version code for 7.20.c - see arch/microblaze/kernel/cpu/cpuinfo.c */ #define CPUVER_7_20_A 0x0c #define CPUVER_7_20_D 0x0f void microblaze_cache_init(void) { if (cpuinfo.use_instr & PVR2_USE_MSR_INSTR) { if (cpuinfo.dcache_wb) { pr_info("wb_msr\n"); mbc = (struct scache *)&wb_msr; if (cpuinfo.ver_code <= CPUVER_7_20_D) { /* MS: problem with signal handling - hw bug */ pr_info("WB won't work properly\n"); } } else { if (cpuinfo.ver_code >= CPUVER_7_20_A) { pr_info("wt_msr_noirq\n"); mbc = (struct scache *)&wt_msr_noirq; } else { pr_info("wt_msr\n"); mbc = (struct scache *)&wt_msr; } } } else { if (cpuinfo.dcache_wb) { pr_info("wb_nomsr\n"); mbc = (struct scache *)&wb_nomsr; if (cpuinfo.ver_code <= CPUVER_7_20_D) { /* MS: problem with signal handling - hw bug */ pr_info("WB won't work properly\n"); } } else { if (cpuinfo.ver_code >= CPUVER_7_20_A) { pr_info("wt_nomsr_noirq\n"); mbc = (struct scache *)&wt_nomsr_noirq; } else { pr_info("wt_nomsr\n"); mbc = (struct scache *)&wt_nomsr; } } } /* * FIXME Invalidation is done in U-BOOT * WT cache: Data is already written to main memory * WB cache: Discard data on noMMU which caused that kernel doesn't boot */ /* invalidate_dcache(); */ enable_dcache(); invalidate_icache(); enable_icache(); }
linux-master
arch/microblaze/kernel/cpu/cache.c
/* * Copyright (C) 2007-2009 Michal Simek <[email protected]> * Copyright (C) 2007-2009 PetaLogix * Copyright (C) 2007 John Williams <[email protected]> * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/clk.h> #include <linux/init.h> #include <asm/cpuinfo.h> #include <asm/pvr.h> const struct cpu_ver_key cpu_ver_lookup[] = { /* These key value are as per MBV field in PVR0 */ {"5.00.a", 0x01}, {"5.00.b", 0x02}, {"5.00.c", 0x03}, {"6.00.a", 0x04}, {"6.00.b", 0x06}, {"7.00.a", 0x05}, {"7.00.b", 0x07}, {"7.10.a", 0x08}, {"7.10.b", 0x09}, {"7.10.c", 0x0a}, {"7.10.d", 0x0b}, {"7.20.a", 0x0c}, {"7.20.b", 0x0d}, {"7.20.c", 0x0e}, {"7.20.d", 0x0f}, {"7.30.a", 0x10}, {"7.30.b", 0x11}, {"8.00.a", 0x12}, {"8.00.b", 0x13}, {"8.10.a", 0x14}, {"8.20.a", 0x15}, {"8.20.b", 0x16}, {"8.30.a", 0x17}, {"8.40.a", 0x18}, {"8.40.b", 0x19}, {"8.50.a", 0x1a}, {"8.50.b", 0x1c}, {"8.50.c", 0x1e}, {"9.0", 0x1b}, {"9.1", 0x1d}, {"9.2", 0x1f}, {"9.3", 0x20}, {"9.4", 0x21}, {"9.5", 0x22}, {"9.6", 0x23}, {"10.0", 0x24}, {"11.0", 0x25}, {NULL, 0}, }; /* * FIXME Not sure if the actual key is defined by Xilinx in the PVR */ const struct family_string_key family_string_lookup[] = { {"virtex2", 0x4}, {"virtex2pro", 0x5}, {"spartan3", 0x6}, {"virtex4", 0x7}, {"virtex5", 0x8}, {"spartan3e", 0x9}, {"spartan3a", 0xa}, {"spartan3an", 0xb}, {"spartan3adsp", 0xc}, {"spartan6", 0xd}, {"virtex6", 0xe}, {"virtex7", 0xf}, /* FIXME There is no key code defined for spartan2 */ {"spartan2", 0xf0}, {"kintex7", 0x10}, {"artix7", 0x11}, {"zynq7000", 0x12}, {"UltraScale Virtex", 0x13}, {"UltraScale Kintex", 0x14}, {"UltraScale+ Zynq", 0x15}, {"UltraScale+ Virtex", 0x16}, {"UltraScale+ Kintex", 0x17}, {"Spartan7", 0x18}, {NULL, 0}, }; struct cpuinfo cpuinfo; static struct device_node *cpu; void __init setup_cpuinfo(void) { cpu = of_get_cpu_node(0, NULL); if (!cpu) pr_err("You don't have cpu or are missing cpu reg property!!!\n"); pr_info("%s: initialising\n", __func__); switch (cpu_has_pvr()) { case 0: pr_warn("%s: No PVR support. Using static CPU info from FDT\n", __func__); set_cpuinfo_static(&cpuinfo, cpu); break; /* FIXME I found weird behavior with MB 7.00.a/b 7.10.a * please do not use FULL PVR with MMU */ case 1: pr_info("%s: Using full CPU PVR support\n", __func__); set_cpuinfo_static(&cpuinfo, cpu); set_cpuinfo_pvr_full(&cpuinfo, cpu); break; default: pr_warn("%s: Unsupported PVR setting\n", __func__); set_cpuinfo_static(&cpuinfo, cpu); } if (cpuinfo.mmu_privins) pr_warn("%s: Stream instructions enabled" " - USERSPACE CAN LOCK THIS KERNEL!\n", __func__); of_node_put(cpu); } void __init setup_cpuinfo_clk(void) { struct clk *clk; clk = of_clk_get(cpu, 0); if (IS_ERR(clk)) { pr_err("ERROR: CPU CCF input clock not found\n"); /* take timebase-frequency from DTS */ cpuinfo.cpu_clock_freq = fcpu(cpu, "timebase-frequency"); } else { cpuinfo.cpu_clock_freq = clk_get_rate(clk); } if (!cpuinfo.cpu_clock_freq) { pr_err("ERROR: CPU clock frequency not setup\n"); BUG(); } }
linux-master
arch/microblaze/kernel/cpu/cpuinfo.c
// SPDX-License-Identifier: GPL-2.0 /* * Device Tree support for MStar/Sigmastar Armv7 SoCs * * Copyright (c) 2020 thingy.jp * Author: Daniel Palmer <[email protected]> */ #include <linux/init.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> #include <linux/of.h> #include <linux/of_address.h> #include <linux/io.h> /* * In the u-boot code the area these registers are in is * called "L3 bridge" and there are register descriptions * for something in the same area called "AXI". * * It's not exactly known what this is but the vendor code * for both u-boot and linux share calls to "flush the miu pipe". * This seems to be to force pending CPU writes to memory so that * the state is right before DMA capable devices try to read * descriptors and data the CPU has prepared. Without doing this * ethernet doesn't work reliably for example. */ #define MSTARV7_L3BRIDGE_FLUSH 0x14 #define MSTARV7_L3BRIDGE_STATUS 0x40 #define MSTARV7_L3BRIDGE_FLUSH_TRIGGER BIT(0) #define MSTARV7_L3BRIDGE_STATUS_DONE BIT(12) #ifdef CONFIG_SMP #define MSTARV7_CPU1_BOOT_ADDR_HIGH 0x4c #define MSTARV7_CPU1_BOOT_ADDR_LOW 0x50 #define MSTARV7_CPU1_UNLOCK 0x58 #define MSTARV7_CPU1_UNLOCK_MAGIC 0xbabe #endif static void __iomem *l3bridge; static const char * const mstarv7_board_dt_compat[] __initconst = { "mstar,infinity", "mstar,infinity2m", "mstar,infinity3", "mstar,mercury5", NULL, }; /* * This may need locking to deal with situations where an interrupt * happens while we are in here and mb() gets called by the interrupt handler. * * The vendor code did have a spin lock but it doesn't seem to be needed and * removing it hasn't caused any side effects so far. * * [writel|readl]_relaxed have to be used here because otherwise * we'd end up right back in here. */ static void mstarv7_mb(void) { /* toggle the flush miu pipe fire bit */ writel_relaxed(0, l3bridge + MSTARV7_L3BRIDGE_FLUSH); writel_relaxed(MSTARV7_L3BRIDGE_FLUSH_TRIGGER, l3bridge + MSTARV7_L3BRIDGE_FLUSH); while (!(readl_relaxed(l3bridge + MSTARV7_L3BRIDGE_STATUS) & MSTARV7_L3BRIDGE_STATUS_DONE)) { /* wait for flush to complete */ } } #ifdef CONFIG_SMP static int mstarv7_boot_secondary(unsigned int cpu, struct task_struct *idle) { struct device_node *np; u32 bootaddr = (u32) __pa_symbol(secondary_startup_arm); void __iomem *smpctrl; /* * right now we don't know how to boot anything except * cpu 1. */ if (cpu != 1) return -EINVAL; np = of_find_compatible_node(NULL, NULL, "mstar,smpctrl"); smpctrl = of_iomap(np, 0); if (!smpctrl) return -ENODEV; /* set the boot address for the second cpu */ writew(bootaddr & 0xffff, smpctrl + MSTARV7_CPU1_BOOT_ADDR_LOW); writew((bootaddr >> 16) & 0xffff, smpctrl + MSTARV7_CPU1_BOOT_ADDR_HIGH); /* unlock the second cpu */ writew(MSTARV7_CPU1_UNLOCK_MAGIC, smpctrl + MSTARV7_CPU1_UNLOCK); /* and away we go...*/ arch_send_wakeup_ipi_mask(cpumask_of(cpu)); iounmap(smpctrl); return 0; } static const struct smp_operations __initdata mstarv7_smp_ops = { .smp_boot_secondary = mstarv7_boot_secondary, }; #endif static void __init mstarv7_init(void) { struct device_node *np; np = of_find_compatible_node(NULL, NULL, "mstar,l3bridge"); l3bridge = of_iomap(np, 0); if (l3bridge) soc_mb = mstarv7_mb; else pr_warn("Failed to install memory barrier, DMA will be broken!\n"); } DT_MACHINE_START(MSTARV7_DT, "MStar/Sigmastar Armv7 (Device Tree)") .dt_compat = mstarv7_board_dt_compat, .init_machine = mstarv7_init, .smp = smp_ops(mstarv7_smp_ops), MACHINE_END
linux-master
arch/arm/mach-mstar/mstarv7.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * arch/arm/mach-ep93xx/edb93xx.c * Cirrus Logic EDB93xx Development Board support. * * EDB93XX, EDB9301, EDB9307A * Copyright (C) 2008-2009 H Hartley Sweeten <[email protected]> * * EDB9302 * Copyright (C) 2006 George Kashperko <[email protected]> * * EDB9302A, EDB9315, EDB9315A * Copyright (C) 2006 Lennert Buytenhek <[email protected]> * * EDB9307 * Copyright (C) 2007 Herbert Valerio Riedel <[email protected]> * * EDB9312 * Copyright (C) 2006 Infosys Technologies Limited * Toufeeq Hussain <[email protected]> */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/i2c.h> #include <linux/spi/spi.h> #include <linux/gpio/machine.h> #include <sound/cs4271.h> #include "hardware.h" #include <linux/platform_data/video-ep93xx.h> #include <linux/platform_data/spi-ep93xx.h> #include "gpio-ep93xx.h" #include <asm/mach-types.h> #include <asm/mach/arch.h> #include "soc.h" static void __init edb93xx_register_flash(void) { if (machine_is_edb9307() || machine_is_edb9312() || machine_is_edb9315()) { ep93xx_register_flash(4, EP93XX_CS6_PHYS_BASE, SZ_32M); } else { ep93xx_register_flash(2, EP93XX_CS6_PHYS_BASE, SZ_16M); } } static struct ep93xx_eth_data __initdata edb93xx_eth_data = { .phy_id = 1, }; /************************************************************************* * EDB93xx i2c peripheral handling *************************************************************************/ static struct i2c_board_info __initdata edb93xxa_i2c_board_info[] = { { I2C_BOARD_INFO("isl1208", 0x6f), }, }; static struct i2c_board_info __initdata edb93xx_i2c_board_info[] = { { I2C_BOARD_INFO("ds1337", 0x68), }, }; static void __init edb93xx_register_i2c(void) { if (machine_is_edb9302a() || machine_is_edb9307a() || machine_is_edb9315a()) { ep93xx_register_i2c(edb93xxa_i2c_board_info, ARRAY_SIZE(edb93xxa_i2c_board_info)); } else if (machine_is_edb9302() || machine_is_edb9307() || machine_is_edb9312() || machine_is_edb9315()) { ep93xx_register_i2c(edb93xx_i2c_board_info, ARRAY_SIZE(edb93xx_i2c_board_info)); } } /************************************************************************* * EDB93xx SPI peripheral handling *************************************************************************/ static struct cs4271_platform_data edb93xx_cs4271_data = { .gpio_nreset = -EINVAL, /* filled in later */ }; static struct spi_board_info edb93xx_spi_board_info[] __initdata = { { .modalias = "cs4271", .platform_data = &edb93xx_cs4271_data, .max_speed_hz = 6000000, .bus_num = 0, .chip_select = 0, .mode = SPI_MODE_3, }, }; static struct gpiod_lookup_table edb93xx_spi_cs_gpio_table = { .dev_id = "spi0", .table = { GPIO_LOOKUP("A", 6, "cs", GPIO_ACTIVE_LOW), { }, }, }; static struct ep93xx_spi_info edb93xx_spi_info __initdata = { /* Intentionally left blank */ }; static void __init edb93xx_register_spi(void) { if (machine_is_edb9301() || machine_is_edb9302()) edb93xx_cs4271_data.gpio_nreset = EP93XX_GPIO_LINE_EGPIO1; else if (machine_is_edb9302a() || machine_is_edb9307a()) edb93xx_cs4271_data.gpio_nreset = EP93XX_GPIO_LINE_H(2); else if (machine_is_edb9315a()) edb93xx_cs4271_data.gpio_nreset = EP93XX_GPIO_LINE_EGPIO14; gpiod_add_lookup_table(&edb93xx_spi_cs_gpio_table); ep93xx_register_spi(&edb93xx_spi_info, edb93xx_spi_board_info, ARRAY_SIZE(edb93xx_spi_board_info)); } /************************************************************************* * EDB93xx I2S *************************************************************************/ static struct platform_device edb93xx_audio_device = { .name = "edb93xx-audio", .id = -1, }; static int __init edb93xx_has_audio(void) { return (machine_is_edb9301() || machine_is_edb9302() || machine_is_edb9302a() || machine_is_edb9307a() || machine_is_edb9315a()); } static void __init edb93xx_register_i2s(void) { if (edb93xx_has_audio()) { ep93xx_register_i2s(); platform_device_register(&edb93xx_audio_device); } } /************************************************************************* * EDB93xx pwm *************************************************************************/ static void __init edb93xx_register_pwm(void) { if (machine_is_edb9301() || machine_is_edb9302() || machine_is_edb9302a()) { /* EP9301 and EP9302 only have pwm.1 (EGPIO14) */ ep93xx_register_pwm(0, 1); } else if (machine_is_edb9307() || machine_is_edb9307a()) { /* EP9307 only has pwm.0 (PWMOUT) */ ep93xx_register_pwm(1, 0); } else { /* EP9312 and EP9315 have both */ ep93xx_register_pwm(1, 1); } } /************************************************************************* * EDB93xx framebuffer *************************************************************************/ static struct ep93xxfb_mach_info __initdata edb93xxfb_info = { .flags = 0, }; static int __init edb93xx_has_fb(void) { /* These platforms have an ep93xx with video capability */ return machine_is_edb9307() || machine_is_edb9307a() || machine_is_edb9312() || machine_is_edb9315() || machine_is_edb9315a(); } static void __init edb93xx_register_fb(void) { if (!edb93xx_has_fb()) return; if (machine_is_edb9307a() || machine_is_edb9315a()) edb93xxfb_info.flags |= EP93XXFB_USE_SDCSN0; else edb93xxfb_info.flags |= EP93XXFB_USE_SDCSN3; ep93xx_register_fb(&edb93xxfb_info); } /************************************************************************* * EDB93xx IDE *************************************************************************/ static int __init edb93xx_has_ide(void) { /* * Although EDB9312 and EDB9315 do have IDE capability, they have * INTRQ line wired as pull-up, which makes using IDE interface * problematic. */ return machine_is_edb9312() || machine_is_edb9315() || machine_is_edb9315a(); } static void __init edb93xx_register_ide(void) { if (!edb93xx_has_ide()) return; ep93xx_register_ide(); } static void __init edb93xx_init_machine(void) { ep93xx_init_devices(); edb93xx_register_flash(); ep93xx_register_eth(&edb93xx_eth_data, 1); edb93xx_register_i2c(); edb93xx_register_spi(); edb93xx_register_i2s(); edb93xx_register_pwm(); edb93xx_register_fb(); edb93xx_register_ide(); ep93xx_register_adc(); } #ifdef CONFIG_MACH_EDB9301 MACHINE_START(EDB9301, "Cirrus Logic EDB9301 Evaluation Board") /* Maintainer: H Hartley Sweeten <[email protected]> */ .atag_offset = 0x100, .nr_irqs = NR_EP93XX_IRQS, .map_io = ep93xx_map_io, .init_irq = ep93xx_init_irq, .init_time = ep93xx_timer_init, .init_machine = edb93xx_init_machine, .restart = ep93xx_restart, MACHINE_END #endif #ifdef CONFIG_MACH_EDB9302 MACHINE_START(EDB9302, "Cirrus Logic EDB9302 Evaluation Board") /* Maintainer: George Kashperko <[email protected]> */ .atag_offset = 0x100, .nr_irqs = NR_EP93XX_IRQS, .map_io = ep93xx_map_io, .init_irq = ep93xx_init_irq, .init_time = ep93xx_timer_init, .init_machine = edb93xx_init_machine, .restart = ep93xx_restart, MACHINE_END #endif #ifdef CONFIG_MACH_EDB9302A MACHINE_START(EDB9302A, "Cirrus Logic EDB9302A Evaluation Board") /* Maintainer: Lennert Buytenhek <[email protected]> */ .atag_offset = 0x100, .nr_irqs = NR_EP93XX_IRQS, .map_io = ep93xx_map_io, .init_irq = ep93xx_init_irq, .init_time = ep93xx_timer_init, .init_machine = edb93xx_init_machine, .restart = ep93xx_restart, MACHINE_END #endif #ifdef CONFIG_MACH_EDB9307 MACHINE_START(EDB9307, "Cirrus Logic EDB9307 Evaluation Board") /* Maintainer: Herbert Valerio Riedel <[email protected]> */ .atag_offset = 0x100, .nr_irqs = NR_EP93XX_IRQS, .map_io = ep93xx_map_io, .init_irq = ep93xx_init_irq, .init_time = ep93xx_timer_init, .init_machine = edb93xx_init_machine, .restart = ep93xx_restart, MACHINE_END #endif #ifdef CONFIG_MACH_EDB9307A MACHINE_START(EDB9307A, "Cirrus Logic EDB9307A Evaluation Board") /* Maintainer: H Hartley Sweeten <[email protected]> */ .atag_offset = 0x100, .nr_irqs = NR_EP93XX_IRQS, .map_io = ep93xx_map_io, .init_irq = ep93xx_init_irq, .init_time = ep93xx_timer_init, .init_machine = edb93xx_init_machine, .restart = ep93xx_restart, MACHINE_END #endif #ifdef CONFIG_MACH_EDB9312 MACHINE_START(EDB9312, "Cirrus Logic EDB9312 Evaluation Board") /* Maintainer: Toufeeq Hussain <[email protected]> */ .atag_offset = 0x100, .nr_irqs = NR_EP93XX_IRQS, .map_io = ep93xx_map_io, .init_irq = ep93xx_init_irq, .init_time = ep93xx_timer_init, .init_machine = edb93xx_init_machine, .restart = ep93xx_restart, MACHINE_END #endif #ifdef CONFIG_MACH_EDB9315 MACHINE_START(EDB9315, "Cirrus Logic EDB9315 Evaluation Board") /* Maintainer: Lennert Buytenhek <[email protected]> */ .atag_offset = 0x100, .nr_irqs = NR_EP93XX_IRQS, .map_io = ep93xx_map_io, .init_irq = ep93xx_init_irq, .init_time = ep93xx_timer_init, .init_machine = edb93xx_init_machine, .restart = ep93xx_restart, MACHINE_END #endif #ifdef CONFIG_MACH_EDB9315A MACHINE_START(EDB9315A, "Cirrus Logic EDB9315A Evaluation Board") /* Maintainer: Lennert Buytenhek <[email protected]> */ .atag_offset = 0x100, .nr_irqs = NR_EP93XX_IRQS, .map_io = ep93xx_map_io, .init_irq = ep93xx_init_irq, .init_time = ep93xx_timer_init, .init_machine = edb93xx_init_machine, .restart = ep93xx_restart, MACHINE_END #endif
linux-master
arch/arm/mach-ep93xx/edb93xx.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * arch/arm/mach-ep93xx/clock.c * Clock control for Cirrus EP93xx chips. * * Copyright (C) 2006 Lennert Buytenhek <[email protected]> */ #define pr_fmt(fmt) "ep93xx " KBUILD_MODNAME ": " fmt #include <linux/kernel.h> #include <linux/clk.h> #include <linux/err.h> #include <linux/module.h> #include <linux/string.h> #include <linux/io.h> #include <linux/spinlock.h> #include <linux/clkdev.h> #include <linux/clk-provider.h> #include <linux/soc/cirrus/ep93xx.h> #include "hardware.h" #include <asm/div64.h> #include "soc.h" static DEFINE_SPINLOCK(clk_lock); static char fclk_divisors[] = { 1, 2, 4, 8, 16, 1, 1, 1 }; static char hclk_divisors[] = { 1, 2, 4, 5, 6, 8, 16, 32 }; static char pclk_divisors[] = { 1, 2, 4, 8 }; static char adc_divisors[] = { 16, 4 }; static char sclk_divisors[] = { 2, 4 }; static char lrclk_divisors[] = { 32, 64, 128 }; static const char * const mux_parents[] = { "xtali", "pll1", "pll2" }; /* * PLL rate = 14.7456 MHz * (X1FBD + 1) * (X2FBD + 1) / (X2IPD + 1) / 2^PS */ static unsigned long calc_pll_rate(unsigned long long rate, u32 config_word) { int i; rate *= ((config_word >> 11) & 0x1f) + 1; /* X1FBD */ rate *= ((config_word >> 5) & 0x3f) + 1; /* X2FBD */ do_div(rate, (config_word & 0x1f) + 1); /* X2IPD */ for (i = 0; i < ((config_word >> 16) & 3); i++) /* PS */ rate >>= 1; return (unsigned long)rate; } struct clk_psc { struct clk_hw hw; void __iomem *reg; u8 bit_idx; u32 mask; u8 shift; u8 width; char *div; u8 num_div; spinlock_t *lock; }; #define to_clk_psc(_hw) container_of(_hw, struct clk_psc, hw) static int ep93xx_clk_is_enabled(struct clk_hw *hw) { struct clk_psc *psc = to_clk_psc(hw); u32 val = readl(psc->reg); return (val & BIT(psc->bit_idx)) ? 1 : 0; } static int ep93xx_clk_enable(struct clk_hw *hw) { struct clk_psc *psc = to_clk_psc(hw); unsigned long flags = 0; u32 val; if (psc->lock) spin_lock_irqsave(psc->lock, flags); val = __raw_readl(psc->reg); val |= BIT(psc->bit_idx); ep93xx_syscon_swlocked_write(val, psc->reg); if (psc->lock) spin_unlock_irqrestore(psc->lock, flags); return 0; } static void ep93xx_clk_disable(struct clk_hw *hw) { struct clk_psc *psc = to_clk_psc(hw); unsigned long flags = 0; u32 val; if (psc->lock) spin_lock_irqsave(psc->lock, flags); val = __raw_readl(psc->reg); val &= ~BIT(psc->bit_idx); ep93xx_syscon_swlocked_write(val, psc->reg); if (psc->lock) spin_unlock_irqrestore(psc->lock, flags); } static const struct clk_ops clk_ep93xx_gate_ops = { .enable = ep93xx_clk_enable, .disable = ep93xx_clk_disable, .is_enabled = ep93xx_clk_is_enabled, }; static struct clk_hw *ep93xx_clk_register_gate(const char *name, const char *parent_name, void __iomem *reg, u8 bit_idx) { struct clk_init_data init; struct clk_psc *psc; struct clk *clk; psc = kzalloc(sizeof(*psc), GFP_KERNEL); if (!psc) return ERR_PTR(-ENOMEM); init.name = name; init.ops = &clk_ep93xx_gate_ops; init.flags = CLK_SET_RATE_PARENT; init.parent_names = (parent_name ? &parent_name : NULL); init.num_parents = (parent_name ? 1 : 0); psc->reg = reg; psc->bit_idx = bit_idx; psc->hw.init = &init; psc->lock = &clk_lock; clk = clk_register(NULL, &psc->hw); if (IS_ERR(clk)) { kfree(psc); return ERR_CAST(clk); } return &psc->hw; } static u8 ep93xx_mux_get_parent(struct clk_hw *hw) { struct clk_psc *psc = to_clk_psc(hw); u32 val = __raw_readl(psc->reg); if (!(val & EP93XX_SYSCON_CLKDIV_ESEL)) return 0; if (!(val & EP93XX_SYSCON_CLKDIV_PSEL)) return 1; return 2; } static int ep93xx_mux_set_parent_lock(struct clk_hw *hw, u8 index) { struct clk_psc *psc = to_clk_psc(hw); unsigned long flags = 0; u32 val; if (index >= ARRAY_SIZE(mux_parents)) return -EINVAL; if (psc->lock) spin_lock_irqsave(psc->lock, flags); val = __raw_readl(psc->reg); val &= ~(EP93XX_SYSCON_CLKDIV_ESEL | EP93XX_SYSCON_CLKDIV_PSEL); if (index != 0) { val |= EP93XX_SYSCON_CLKDIV_ESEL; val |= (index - 1) ? EP93XX_SYSCON_CLKDIV_PSEL : 0; } ep93xx_syscon_swlocked_write(val, psc->reg); if (psc->lock) spin_unlock_irqrestore(psc->lock, flags); return 0; } static bool is_best(unsigned long rate, unsigned long now, unsigned long best) { return abs(rate - now) < abs(rate - best); } static int ep93xx_mux_determine_rate(struct clk_hw *hw, struct clk_rate_request *req) { unsigned long rate = req->rate; struct clk *best_parent = NULL; unsigned long __parent_rate; unsigned long best_rate = 0, actual_rate, mclk_rate; unsigned long best_parent_rate; int __div = 0, __pdiv = 0; int i; /* * Try the two pll's and the external clock * Because the valid predividers are 2, 2.5 and 3, we multiply * all the clocks by 2 to avoid floating point math. * * This is based on the algorithm in the ep93xx raster guide: * http://be-a-maverick.com/en/pubs/appNote/AN269REV1.pdf * */ for (i = 0; i < ARRAY_SIZE(mux_parents); i++) { struct clk *parent = clk_get_sys(mux_parents[i], NULL); __parent_rate = clk_get_rate(parent); mclk_rate = __parent_rate * 2; /* Try each predivider value */ for (__pdiv = 4; __pdiv <= 6; __pdiv++) { __div = mclk_rate / (rate * __pdiv); if (__div < 2 || __div > 127) continue; actual_rate = mclk_rate / (__pdiv * __div); if (is_best(rate, actual_rate, best_rate)) { best_rate = actual_rate; best_parent_rate = __parent_rate; best_parent = parent; } } } if (!best_parent) return -EINVAL; req->best_parent_rate = best_parent_rate; req->best_parent_hw = __clk_get_hw(best_parent); req->rate = best_rate; return 0; } static unsigned long ep93xx_ddiv_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) { struct clk_psc *psc = to_clk_psc(hw); unsigned long rate = 0; u32 val = __raw_readl(psc->reg); int __pdiv = ((val >> EP93XX_SYSCON_CLKDIV_PDIV_SHIFT) & 0x03); int __div = val & 0x7f; if (__div > 0) rate = (parent_rate * 2) / ((__pdiv + 3) * __div); return rate; } static int ep93xx_ddiv_set_rate(struct clk_hw *hw, unsigned long rate, unsigned long parent_rate) { struct clk_psc *psc = to_clk_psc(hw); int pdiv = 0, div = 0; unsigned long best_rate = 0, actual_rate, mclk_rate; int __div = 0, __pdiv = 0; u32 val; mclk_rate = parent_rate * 2; for (__pdiv = 4; __pdiv <= 6; __pdiv++) { __div = mclk_rate / (rate * __pdiv); if (__div < 2 || __div > 127) continue; actual_rate = mclk_rate / (__pdiv * __div); if (is_best(rate, actual_rate, best_rate)) { pdiv = __pdiv - 3; div = __div; best_rate = actual_rate; } } if (!best_rate) return -EINVAL; val = __raw_readl(psc->reg); /* Clear old dividers */ val &= ~0x37f; /* Set the new pdiv and div bits for the new clock rate */ val |= (pdiv << EP93XX_SYSCON_CLKDIV_PDIV_SHIFT) | div; ep93xx_syscon_swlocked_write(val, psc->reg); return 0; } static const struct clk_ops clk_ddiv_ops = { .enable = ep93xx_clk_enable, .disable = ep93xx_clk_disable, .is_enabled = ep93xx_clk_is_enabled, .get_parent = ep93xx_mux_get_parent, .set_parent = ep93xx_mux_set_parent_lock, .determine_rate = ep93xx_mux_determine_rate, .recalc_rate = ep93xx_ddiv_recalc_rate, .set_rate = ep93xx_ddiv_set_rate, }; static struct clk_hw *clk_hw_register_ddiv(const char *name, void __iomem *reg, u8 bit_idx) { struct clk_init_data init; struct clk_psc *psc; struct clk *clk; psc = kzalloc(sizeof(*psc), GFP_KERNEL); if (!psc) return ERR_PTR(-ENOMEM); init.name = name; init.ops = &clk_ddiv_ops; init.flags = 0; init.parent_names = mux_parents; init.num_parents = ARRAY_SIZE(mux_parents); psc->reg = reg; psc->bit_idx = bit_idx; psc->lock = &clk_lock; psc->hw.init = &init; clk = clk_register(NULL, &psc->hw); if (IS_ERR(clk)) { kfree(psc); return ERR_CAST(clk); } return &psc->hw; } static unsigned long ep93xx_div_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) { struct clk_psc *psc = to_clk_psc(hw); u32 val = __raw_readl(psc->reg); u8 index = (val & psc->mask) >> psc->shift; if (index > psc->num_div) return 0; return DIV_ROUND_UP_ULL(parent_rate, psc->div[index]); } static long ep93xx_div_round_rate(struct clk_hw *hw, unsigned long rate, unsigned long *parent_rate) { struct clk_psc *psc = to_clk_psc(hw); unsigned long best = 0, now, maxdiv; int i; maxdiv = psc->div[psc->num_div - 1]; for (i = 0; i < psc->num_div; i++) { if ((rate * psc->div[i]) == *parent_rate) return DIV_ROUND_UP_ULL((u64)*parent_rate, psc->div[i]); now = DIV_ROUND_UP_ULL((u64)*parent_rate, psc->div[i]); if (is_best(rate, now, best)) best = now; } if (!best) best = DIV_ROUND_UP_ULL(*parent_rate, maxdiv); return best; } static int ep93xx_div_set_rate(struct clk_hw *hw, unsigned long rate, unsigned long parent_rate) { struct clk_psc *psc = to_clk_psc(hw); u32 val = __raw_readl(psc->reg) & ~psc->mask; int i; for (i = 0; i < psc->num_div; i++) if (rate == parent_rate / psc->div[i]) { val |= i << psc->shift; break; } if (i == psc->num_div) return -EINVAL; ep93xx_syscon_swlocked_write(val, psc->reg); return 0; } static const struct clk_ops ep93xx_div_ops = { .enable = ep93xx_clk_enable, .disable = ep93xx_clk_disable, .is_enabled = ep93xx_clk_is_enabled, .recalc_rate = ep93xx_div_recalc_rate, .round_rate = ep93xx_div_round_rate, .set_rate = ep93xx_div_set_rate, }; static struct clk_hw *clk_hw_register_div(const char *name, const char *parent_name, void __iomem *reg, u8 enable_bit, u8 shift, u8 width, char *clk_divisors, u8 num_div) { struct clk_init_data init; struct clk_psc *psc; struct clk *clk; psc = kzalloc(sizeof(*psc), GFP_KERNEL); if (!psc) return ERR_PTR(-ENOMEM); init.name = name; init.ops = &ep93xx_div_ops; init.flags = 0; init.parent_names = (parent_name ? &parent_name : NULL); init.num_parents = 1; psc->reg = reg; psc->bit_idx = enable_bit; psc->mask = GENMASK(shift + width - 1, shift); psc->shift = shift; psc->div = clk_divisors; psc->num_div = num_div; psc->lock = &clk_lock; psc->hw.init = &init; clk = clk_register(NULL, &psc->hw); if (IS_ERR(clk)) { kfree(psc); return ERR_CAST(clk); } return &psc->hw; } struct ep93xx_gate { unsigned int bit; const char *dev_id; const char *con_id; }; static struct ep93xx_gate ep93xx_uarts[] = { {EP93XX_SYSCON_DEVCFG_U1EN, "apb:uart1", NULL}, {EP93XX_SYSCON_DEVCFG_U2EN, "apb:uart2", NULL}, {EP93XX_SYSCON_DEVCFG_U3EN, "apb:uart3", NULL}, }; static void __init ep93xx_uart_clock_init(void) { unsigned int i; struct clk_hw *hw; u32 value; unsigned int clk_uart_div; value = __raw_readl(EP93XX_SYSCON_PWRCNT); if (value & EP93XX_SYSCON_PWRCNT_UARTBAUD) clk_uart_div = 1; else clk_uart_div = 2; hw = clk_hw_register_fixed_factor(NULL, "uart", "xtali", 0, 1, clk_uart_div); /* parenting uart gate clocks to uart clock */ for (i = 0; i < ARRAY_SIZE(ep93xx_uarts); i++) { hw = ep93xx_clk_register_gate(ep93xx_uarts[i].dev_id, "uart", EP93XX_SYSCON_DEVCFG, ep93xx_uarts[i].bit); clk_hw_register_clkdev(hw, NULL, ep93xx_uarts[i].dev_id); } } static struct ep93xx_gate ep93xx_dmas[] = { {EP93XX_SYSCON_PWRCNT_DMA_M2P0, NULL, "m2p0"}, {EP93XX_SYSCON_PWRCNT_DMA_M2P1, NULL, "m2p1"}, {EP93XX_SYSCON_PWRCNT_DMA_M2P2, NULL, "m2p2"}, {EP93XX_SYSCON_PWRCNT_DMA_M2P3, NULL, "m2p3"}, {EP93XX_SYSCON_PWRCNT_DMA_M2P4, NULL, "m2p4"}, {EP93XX_SYSCON_PWRCNT_DMA_M2P5, NULL, "m2p5"}, {EP93XX_SYSCON_PWRCNT_DMA_M2P6, NULL, "m2p6"}, {EP93XX_SYSCON_PWRCNT_DMA_M2P7, NULL, "m2p7"}, {EP93XX_SYSCON_PWRCNT_DMA_M2P8, NULL, "m2p8"}, {EP93XX_SYSCON_PWRCNT_DMA_M2P9, NULL, "m2p9"}, {EP93XX_SYSCON_PWRCNT_DMA_M2M0, NULL, "m2m0"}, {EP93XX_SYSCON_PWRCNT_DMA_M2M1, NULL, "m2m1"}, }; static void __init ep93xx_dma_clock_init(void) { unsigned int i; struct clk_hw *hw; int ret; for (i = 0; i < ARRAY_SIZE(ep93xx_dmas); i++) { hw = clk_hw_register_gate(NULL, ep93xx_dmas[i].con_id, "hclk", 0, EP93XX_SYSCON_PWRCNT, ep93xx_dmas[i].bit, 0, &clk_lock); ret = clk_hw_register_clkdev(hw, ep93xx_dmas[i].con_id, NULL); if (ret) pr_err("%s: failed to register lookup %s\n", __func__, ep93xx_dmas[i].con_id); } } static int __init ep93xx_clock_init(void) { u32 value; struct clk_hw *hw; unsigned long clk_pll1_rate; unsigned long clk_f_rate; unsigned long clk_h_rate; unsigned long clk_p_rate; unsigned long clk_pll2_rate; unsigned int clk_f_div; unsigned int clk_h_div; unsigned int clk_p_div; unsigned int clk_usb_div; unsigned long clk_spi_div; hw = clk_hw_register_fixed_rate(NULL, "xtali", NULL, 0, EP93XX_EXT_CLK_RATE); clk_hw_register_clkdev(hw, NULL, "xtali"); /* Determine the bootloader configured pll1 rate */ value = __raw_readl(EP93XX_SYSCON_CLKSET1); if (!(value & EP93XX_SYSCON_CLKSET1_NBYP1)) clk_pll1_rate = EP93XX_EXT_CLK_RATE; else clk_pll1_rate = calc_pll_rate(EP93XX_EXT_CLK_RATE, value); hw = clk_hw_register_fixed_rate(NULL, "pll1", "xtali", 0, clk_pll1_rate); clk_hw_register_clkdev(hw, NULL, "pll1"); /* Initialize the pll1 derived clocks */ clk_f_div = fclk_divisors[(value >> 25) & 0x7]; clk_h_div = hclk_divisors[(value >> 20) & 0x7]; clk_p_div = pclk_divisors[(value >> 18) & 0x3]; hw = clk_hw_register_fixed_factor(NULL, "fclk", "pll1", 0, 1, clk_f_div); clk_f_rate = clk_get_rate(hw->clk); hw = clk_hw_register_fixed_factor(NULL, "hclk", "pll1", 0, 1, clk_h_div); clk_h_rate = clk_get_rate(hw->clk); hw = clk_hw_register_fixed_factor(NULL, "pclk", "hclk", 0, 1, clk_p_div); clk_p_rate = clk_get_rate(hw->clk); clk_hw_register_clkdev(hw, "apb_pclk", NULL); ep93xx_dma_clock_init(); /* Determine the bootloader configured pll2 rate */ value = __raw_readl(EP93XX_SYSCON_CLKSET2); if (!(value & EP93XX_SYSCON_CLKSET2_NBYP2)) clk_pll2_rate = EP93XX_EXT_CLK_RATE; else if (value & EP93XX_SYSCON_CLKSET2_PLL2_EN) clk_pll2_rate = calc_pll_rate(EP93XX_EXT_CLK_RATE, value); else clk_pll2_rate = 0; hw = clk_hw_register_fixed_rate(NULL, "pll2", "xtali", 0, clk_pll2_rate); clk_hw_register_clkdev(hw, NULL, "pll2"); /* Initialize the pll2 derived clocks */ /* * These four bits set the divide ratio between the PLL2 * output and the USB clock. * 0000 - Divide by 1 * 0001 - Divide by 2 * 0010 - Divide by 3 * 0011 - Divide by 4 * 0100 - Divide by 5 * 0101 - Divide by 6 * 0110 - Divide by 7 * 0111 - Divide by 8 * 1000 - Divide by 9 * 1001 - Divide by 10 * 1010 - Divide by 11 * 1011 - Divide by 12 * 1100 - Divide by 13 * 1101 - Divide by 14 * 1110 - Divide by 15 * 1111 - Divide by 1 * On power-on-reset these bits are reset to 0000b. */ clk_usb_div = (((value >> 28) & 0xf) + 1); hw = clk_hw_register_fixed_factor(NULL, "usb_clk", "pll2", 0, 1, clk_usb_div); hw = clk_hw_register_gate(NULL, "ohci-platform", "usb_clk", 0, EP93XX_SYSCON_PWRCNT, EP93XX_SYSCON_PWRCNT_USH_EN, 0, &clk_lock); clk_hw_register_clkdev(hw, NULL, "ohci-platform"); /* * EP93xx SSP clock rate was doubled in version E2. For more information * see: * http://www.cirrus.com/en/pubs/appNote/AN273REV4.pdf */ clk_spi_div = 1; if (ep93xx_chip_revision() < EP93XX_CHIP_REV_E2) clk_spi_div = 2; hw = clk_hw_register_fixed_factor(NULL, "ep93xx-spi.0", "xtali", 0, 1, clk_spi_div); clk_hw_register_clkdev(hw, NULL, "ep93xx-spi.0"); /* pwm clock */ hw = clk_hw_register_fixed_factor(NULL, "pwm_clk", "xtali", 0, 1, 1); clk_hw_register_clkdev(hw, "pwm_clk", NULL); pr_info("PLL1 running at %ld MHz, PLL2 at %ld MHz\n", clk_pll1_rate / 1000000, clk_pll2_rate / 1000000); pr_info("FCLK %ld MHz, HCLK %ld MHz, PCLK %ld MHz\n", clk_f_rate / 1000000, clk_h_rate / 1000000, clk_p_rate / 1000000); ep93xx_uart_clock_init(); /* touchscreen/adc clock */ hw = clk_hw_register_div("ep93xx-adc", "xtali", EP93XX_SYSCON_KEYTCHCLKDIV, EP93XX_SYSCON_KEYTCHCLKDIV_TSEN, EP93XX_SYSCON_KEYTCHCLKDIV_ADIV, 1, adc_divisors, ARRAY_SIZE(adc_divisors)); clk_hw_register_clkdev(hw, NULL, "ep93xx-adc"); /* keypad clock */ hw = clk_hw_register_div("ep93xx-keypad", "xtali", EP93XX_SYSCON_KEYTCHCLKDIV, EP93XX_SYSCON_KEYTCHCLKDIV_KEN, EP93XX_SYSCON_KEYTCHCLKDIV_KDIV, 1, adc_divisors, ARRAY_SIZE(adc_divisors)); clk_hw_register_clkdev(hw, NULL, "ep93xx-keypad"); /* On reset PDIV and VDIV is set to zero, while PDIV zero * means clock disable, VDIV shouldn't be zero. * So i set both dividers to minimum. */ /* ENA - Enable CLK divider. */ /* PDIV - 00 - Disable clock */ /* VDIV - at least 2 */ /* Check and enable video clk registers */ value = __raw_readl(EP93XX_SYSCON_VIDCLKDIV); value |= (1 << EP93XX_SYSCON_CLKDIV_PDIV_SHIFT) | 2; ep93xx_syscon_swlocked_write(value, EP93XX_SYSCON_VIDCLKDIV); /* check and enable i2s clk registers */ value = __raw_readl(EP93XX_SYSCON_I2SCLKDIV); value |= (1 << EP93XX_SYSCON_CLKDIV_PDIV_SHIFT) | 2; ep93xx_syscon_swlocked_write(value, EP93XX_SYSCON_I2SCLKDIV); /* video clk */ hw = clk_hw_register_ddiv("ep93xx-fb", EP93XX_SYSCON_VIDCLKDIV, EP93XX_SYSCON_CLKDIV_ENABLE); clk_hw_register_clkdev(hw, NULL, "ep93xx-fb"); /* i2s clk */ hw = clk_hw_register_ddiv("mclk", EP93XX_SYSCON_I2SCLKDIV, EP93XX_SYSCON_CLKDIV_ENABLE); clk_hw_register_clkdev(hw, "mclk", "ep93xx-i2s"); /* i2s sclk */ #define EP93XX_I2SCLKDIV_SDIV_SHIFT 16 #define EP93XX_I2SCLKDIV_SDIV_WIDTH 1 hw = clk_hw_register_div("sclk", "mclk", EP93XX_SYSCON_I2SCLKDIV, EP93XX_SYSCON_I2SCLKDIV_SENA, EP93XX_I2SCLKDIV_SDIV_SHIFT, EP93XX_I2SCLKDIV_SDIV_WIDTH, sclk_divisors, ARRAY_SIZE(sclk_divisors)); clk_hw_register_clkdev(hw, "sclk", "ep93xx-i2s"); /* i2s lrclk */ #define EP93XX_I2SCLKDIV_LRDIV32_SHIFT 17 #define EP93XX_I2SCLKDIV_LRDIV32_WIDTH 3 hw = clk_hw_register_div("lrclk", "sclk", EP93XX_SYSCON_I2SCLKDIV, EP93XX_SYSCON_I2SCLKDIV_SENA, EP93XX_I2SCLKDIV_LRDIV32_SHIFT, EP93XX_I2SCLKDIV_LRDIV32_WIDTH, lrclk_divisors, ARRAY_SIZE(lrclk_divisors)); clk_hw_register_clkdev(hw, "lrclk", "ep93xx-i2s"); return 0; } postcore_initcall(ep93xx_clock_init);
linux-master
arch/arm/mach-ep93xx/clock.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * arch/arm/mach-ep93xx/ts72xx.c * Technologic Systems TS72xx SBC support. * * Copyright (C) 2006 Lennert Buytenhek <[email protected]> */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/kernel.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/io.h> #include <linux/mtd/platnand.h> #include <linux/spi/spi.h> #include <linux/spi/flash.h> #include <linux/spi/mmc_spi.h> #include <linux/mmc/host.h> #include <linux/platform_data/spi-ep93xx.h> #include <linux/gpio/machine.h> #include "gpio-ep93xx.h" #include "hardware.h" #include <asm/mach-types.h> #include <asm/mach/map.h> #include <asm/mach/arch.h> #include "soc.h" #include "ts72xx.h" /************************************************************************* * IO map *************************************************************************/ static struct map_desc ts72xx_io_desc[] __initdata = { { .virtual = (unsigned long)TS72XX_MODEL_VIRT_BASE, .pfn = __phys_to_pfn(TS72XX_MODEL_PHYS_BASE), .length = TS72XX_MODEL_SIZE, .type = MT_DEVICE, }, { .virtual = (unsigned long)TS72XX_OPTIONS_VIRT_BASE, .pfn = __phys_to_pfn(TS72XX_OPTIONS_PHYS_BASE), .length = TS72XX_OPTIONS_SIZE, .type = MT_DEVICE, }, { .virtual = (unsigned long)TS72XX_OPTIONS2_VIRT_BASE, .pfn = __phys_to_pfn(TS72XX_OPTIONS2_PHYS_BASE), .length = TS72XX_OPTIONS2_SIZE, .type = MT_DEVICE, }, { .virtual = (unsigned long)TS72XX_CPLDVER_VIRT_BASE, .pfn = __phys_to_pfn(TS72XX_CPLDVER_PHYS_BASE), .length = TS72XX_CPLDVER_SIZE, .type = MT_DEVICE, } }; static void __init ts72xx_map_io(void) { ep93xx_map_io(); iotable_init(ts72xx_io_desc, ARRAY_SIZE(ts72xx_io_desc)); } /************************************************************************* * NAND flash *************************************************************************/ #define TS72XX_NAND_CONTROL_ADDR_LINE 22 /* 0xN0400000 */ #define TS72XX_NAND_BUSY_ADDR_LINE 23 /* 0xN0800000 */ static void ts72xx_nand_hwcontrol(struct nand_chip *chip, int cmd, unsigned int ctrl) { if (ctrl & NAND_CTRL_CHANGE) { void __iomem *addr = chip->legacy.IO_ADDR_R; unsigned char bits; addr += (1 << TS72XX_NAND_CONTROL_ADDR_LINE); bits = __raw_readb(addr) & ~0x07; bits |= (ctrl & NAND_NCE) << 2; /* bit 0 -> bit 2 */ bits |= (ctrl & NAND_CLE); /* bit 1 -> bit 1 */ bits |= (ctrl & NAND_ALE) >> 2; /* bit 2 -> bit 0 */ __raw_writeb(bits, addr); } if (cmd != NAND_CMD_NONE) __raw_writeb(cmd, chip->legacy.IO_ADDR_W); } static int ts72xx_nand_device_ready(struct nand_chip *chip) { void __iomem *addr = chip->legacy.IO_ADDR_R; addr += (1 << TS72XX_NAND_BUSY_ADDR_LINE); return !!(__raw_readb(addr) & 0x20); } #define TS72XX_BOOTROM_PART_SIZE (SZ_16K) #define TS72XX_REDBOOT_PART_SIZE (SZ_2M + SZ_1M) static struct mtd_partition ts72xx_nand_parts[] = { { .name = "TS-BOOTROM", .offset = 0, .size = TS72XX_BOOTROM_PART_SIZE, .mask_flags = MTD_WRITEABLE, /* force read-only */ }, { .name = "Linux", .offset = MTDPART_OFS_RETAIN, .size = TS72XX_REDBOOT_PART_SIZE, /* leave so much for last partition */ }, { .name = "RedBoot", .offset = MTDPART_OFS_APPEND, .size = MTDPART_SIZ_FULL, .mask_flags = MTD_WRITEABLE, /* force read-only */ }, }; static struct platform_nand_data ts72xx_nand_data = { .chip = { .nr_chips = 1, .chip_offset = 0, .chip_delay = 15, }, .ctrl = { .cmd_ctrl = ts72xx_nand_hwcontrol, .dev_ready = ts72xx_nand_device_ready, }, }; static struct resource ts72xx_nand_resource[] = { { .start = 0, /* filled in later */ .end = 0, /* filled in later */ .flags = IORESOURCE_MEM, }, }; static struct platform_device ts72xx_nand_flash = { .name = "gen_nand", .id = -1, .dev.platform_data = &ts72xx_nand_data, .resource = ts72xx_nand_resource, .num_resources = ARRAY_SIZE(ts72xx_nand_resource), }; static void __init ts72xx_register_flash(struct mtd_partition *parts, int n, resource_size_t start) { /* * TS7200 has NOR flash all other TS72xx board have NAND flash. */ if (board_is_ts7200()) { ep93xx_register_flash(2, EP93XX_CS6_PHYS_BASE, SZ_16M); } else { ts72xx_nand_resource[0].start = start; ts72xx_nand_resource[0].end = start + SZ_16M - 1; ts72xx_nand_data.chip.partitions = parts; ts72xx_nand_data.chip.nr_partitions = n; platform_device_register(&ts72xx_nand_flash); } } /************************************************************************* * RTC M48T86 *************************************************************************/ #define TS72XX_RTC_INDEX_PHYS_BASE (EP93XX_CS1_PHYS_BASE + 0x00800000) #define TS72XX_RTC_DATA_PHYS_BASE (EP93XX_CS1_PHYS_BASE + 0x01700000) static struct resource ts72xx_rtc_resources[] = { DEFINE_RES_MEM(TS72XX_RTC_INDEX_PHYS_BASE, 0x01), DEFINE_RES_MEM(TS72XX_RTC_DATA_PHYS_BASE, 0x01), }; static struct platform_device ts72xx_rtc_device = { .name = "rtc-m48t86", .id = -1, .resource = ts72xx_rtc_resources, .num_resources = ARRAY_SIZE(ts72xx_rtc_resources), }; /************************************************************************* * Watchdog (in CPLD) *************************************************************************/ #define TS72XX_WDT_CONTROL_PHYS_BASE (EP93XX_CS2_PHYS_BASE + 0x03800000) #define TS72XX_WDT_FEED_PHYS_BASE (EP93XX_CS2_PHYS_BASE + 0x03c00000) static struct resource ts72xx_wdt_resources[] = { DEFINE_RES_MEM(TS72XX_WDT_CONTROL_PHYS_BASE, 0x01), DEFINE_RES_MEM(TS72XX_WDT_FEED_PHYS_BASE, 0x01), }; static struct platform_device ts72xx_wdt_device = { .name = "ts72xx-wdt", .id = -1, .resource = ts72xx_wdt_resources, .num_resources = ARRAY_SIZE(ts72xx_wdt_resources), }; /************************************************************************* * ETH *************************************************************************/ static struct ep93xx_eth_data __initdata ts72xx_eth_data = { .phy_id = 1, }; /************************************************************************* * SPI SD/MMC host *************************************************************************/ #define BK3_EN_SDCARD_PHYS_BASE 0x12400000 #define BK3_EN_SDCARD_PWR 0x0 #define BK3_DIS_SDCARD_PWR 0x0C static void bk3_mmc_spi_setpower(struct device *dev, unsigned int vdd) { void __iomem *pwr_sd = ioremap(BK3_EN_SDCARD_PHYS_BASE, SZ_4K); if (!pwr_sd) { pr_err("Failed to enable SD card power!"); return; } pr_debug("%s: SD card pwr %s VDD:0x%x\n", __func__, !!vdd ? "ON" : "OFF", vdd); if (!!vdd) __raw_writeb(BK3_EN_SDCARD_PWR, pwr_sd); else __raw_writeb(BK3_DIS_SDCARD_PWR, pwr_sd); iounmap(pwr_sd); } static struct mmc_spi_platform_data bk3_spi_mmc_data = { .detect_delay = 500, .powerup_msecs = 100, .ocr_mask = MMC_VDD_32_33 | MMC_VDD_33_34, .caps = MMC_CAP_NONREMOVABLE, .setpower = bk3_mmc_spi_setpower, }; /************************************************************************* * SPI Bus - SD card access *************************************************************************/ static struct spi_board_info bk3_spi_board_info[] __initdata = { { .modalias = "mmc_spi", .platform_data = &bk3_spi_mmc_data, .max_speed_hz = 7.4E6, .bus_num = 0, .chip_select = 0, .mode = SPI_MODE_0, }, }; /* * This is a stub -> the FGPIO[3] pin is not connected on the schematic * The all work is performed automatically by !SPI_FRAME (SFRM1) and * goes through CPLD */ static struct gpiod_lookup_table bk3_spi_cs_gpio_table = { .dev_id = "spi0", .table = { GPIO_LOOKUP("F", 3, "cs", GPIO_ACTIVE_LOW), { }, }, }; static struct ep93xx_spi_info bk3_spi_master __initdata = { .use_dma = 1, }; /************************************************************************* * TS72XX support code *************************************************************************/ #if IS_ENABLED(CONFIG_FPGA_MGR_TS73XX) /* Relative to EP93XX_CS1_PHYS_BASE */ #define TS73XX_FPGA_LOADER_BASE 0x03c00000 static struct resource ts73xx_fpga_resources[] = { { .start = EP93XX_CS1_PHYS_BASE + TS73XX_FPGA_LOADER_BASE, .end = EP93XX_CS1_PHYS_BASE + TS73XX_FPGA_LOADER_BASE + 1, .flags = IORESOURCE_MEM, }, }; static struct platform_device ts73xx_fpga_device = { .name = "ts73xx-fpga-mgr", .id = -1, .resource = ts73xx_fpga_resources, .num_resources = ARRAY_SIZE(ts73xx_fpga_resources), }; #endif /************************************************************************* * SPI Bus *************************************************************************/ static struct spi_board_info ts72xx_spi_devices[] __initdata = { { .modalias = "tmp122", .max_speed_hz = 2 * 1000 * 1000, .bus_num = 0, .chip_select = 0, }, }; static struct gpiod_lookup_table ts72xx_spi_cs_gpio_table = { .dev_id = "spi0", .table = { /* DIO_17 */ GPIO_LOOKUP("F", 2, "cs", GPIO_ACTIVE_LOW), { }, }, }; static struct ep93xx_spi_info ts72xx_spi_info __initdata = { /* Intentionally left blank */ }; static void __init ts72xx_init_machine(void) { ep93xx_init_devices(); ts72xx_register_flash(ts72xx_nand_parts, ARRAY_SIZE(ts72xx_nand_parts), is_ts9420_installed() ? EP93XX_CS7_PHYS_BASE : EP93XX_CS6_PHYS_BASE); platform_device_register(&ts72xx_rtc_device); platform_device_register(&ts72xx_wdt_device); ep93xx_register_eth(&ts72xx_eth_data, 1); #if IS_ENABLED(CONFIG_FPGA_MGR_TS73XX) if (board_is_ts7300()) platform_device_register(&ts73xx_fpga_device); #endif gpiod_add_lookup_table(&ts72xx_spi_cs_gpio_table); ep93xx_register_spi(&ts72xx_spi_info, ts72xx_spi_devices, ARRAY_SIZE(ts72xx_spi_devices)); } MACHINE_START(TS72XX, "Technologic Systems TS-72xx SBC") /* Maintainer: Lennert Buytenhek <[email protected]> */ .atag_offset = 0x100, .nr_irqs = NR_EP93XX_IRQS, .map_io = ts72xx_map_io, .init_irq = ep93xx_init_irq, .init_time = ep93xx_timer_init, .init_machine = ts72xx_init_machine, .restart = ep93xx_restart, MACHINE_END /************************************************************************* * EP93xx I2S audio peripheral handling *************************************************************************/ static struct resource ep93xx_i2s_resource[] = { DEFINE_RES_MEM(EP93XX_I2S_PHYS_BASE, 0x100), DEFINE_RES_IRQ_NAMED(IRQ_EP93XX_SAI, "spilink i2s slave"), }; static struct platform_device ep93xx_i2s_device = { .name = "ep93xx-spilink-i2s", .id = -1, .num_resources = ARRAY_SIZE(ep93xx_i2s_resource), .resource = ep93xx_i2s_resource, }; /************************************************************************* * BK3 support code *************************************************************************/ static struct mtd_partition bk3_nand_parts[] = { { .name = "System", .offset = 0x00000000, .size = 0x01e00000, }, { .name = "Data", .offset = 0x01e00000, .size = 0x05f20000 }, { .name = "RedBoot", .offset = 0x07d20000, .size = 0x002e0000, .mask_flags = MTD_WRITEABLE, /* force RO */ }, }; static void __init bk3_init_machine(void) { ep93xx_init_devices(); ts72xx_register_flash(bk3_nand_parts, ARRAY_SIZE(bk3_nand_parts), EP93XX_CS6_PHYS_BASE); ep93xx_register_eth(&ts72xx_eth_data, 1); gpiod_add_lookup_table(&bk3_spi_cs_gpio_table); ep93xx_register_spi(&bk3_spi_master, bk3_spi_board_info, ARRAY_SIZE(bk3_spi_board_info)); /* Configure ep93xx's I2S to use AC97 pins */ ep93xx_devcfg_set_bits(EP93XX_SYSCON_DEVCFG_I2SONAC97); platform_device_register(&ep93xx_i2s_device); } MACHINE_START(BK3, "Liebherr controller BK3.1") /* Maintainer: Lukasz Majewski <[email protected]> */ .atag_offset = 0x100, .nr_irqs = NR_EP93XX_IRQS, .map_io = ts72xx_map_io, .init_irq = ep93xx_init_irq, .init_time = ep93xx_timer_init, .init_machine = bk3_init_machine, .restart = ep93xx_restart, MACHINE_END
linux-master
arch/arm/mach-ep93xx/ts72xx.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * arch/arm/mach-ep93xx/core.c * Core routines for Cirrus EP93xx chips. * * Copyright (C) 2006 Lennert Buytenhek <[email protected]> * Copyright (C) 2007 Herbert Valerio Riedel <[email protected]> * * Thanks go to Michael Burian and Ray Lehtiniemi for their key * role in the ep93xx linux community. */ #define pr_fmt(fmt) "ep93xx " KBUILD_MODNAME ": " fmt #include <linux/kernel.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/interrupt.h> #include <linux/dma-mapping.h> #include <linux/sys_soc.h> #include <linux/irq.h> #include <linux/io.h> #include <linux/gpio.h> #include <linux/leds.h> #include <linux/uaccess.h> #include <linux/termios.h> #include <linux/amba/bus.h> #include <linux/amba/serial.h> #include <linux/mtd/physmap.h> #include <linux/i2c.h> #include <linux/gpio/machine.h> #include <linux/spi/spi.h> #include <linux/export.h> #include <linux/irqchip/arm-vic.h> #include <linux/reboot.h> #include <linux/usb/ohci_pdriver.h> #include <linux/random.h> #include "hardware.h" #include <linux/platform_data/video-ep93xx.h> #include <linux/platform_data/keypad-ep93xx.h> #include <linux/platform_data/spi-ep93xx.h> #include <linux/soc/cirrus/ep93xx.h> #include "gpio-ep93xx.h" #include <asm/mach/arch.h> #include <asm/mach/map.h> #include "soc.h" #include "irqs.h" /************************************************************************* * Static I/O mappings that are needed for all EP93xx platforms *************************************************************************/ static struct map_desc ep93xx_io_desc[] __initdata = { { .virtual = EP93XX_AHB_VIRT_BASE, .pfn = __phys_to_pfn(EP93XX_AHB_PHYS_BASE), .length = EP93XX_AHB_SIZE, .type = MT_DEVICE, }, { .virtual = EP93XX_APB_VIRT_BASE, .pfn = __phys_to_pfn(EP93XX_APB_PHYS_BASE), .length = EP93XX_APB_SIZE, .type = MT_DEVICE, }, }; void __init ep93xx_map_io(void) { iotable_init(ep93xx_io_desc, ARRAY_SIZE(ep93xx_io_desc)); } /************************************************************************* * EP93xx IRQ handling *************************************************************************/ void __init ep93xx_init_irq(void) { vic_init(EP93XX_VIC1_BASE, IRQ_EP93XX_VIC0, EP93XX_VIC1_VALID_IRQ_MASK, 0); vic_init(EP93XX_VIC2_BASE, IRQ_EP93XX_VIC1, EP93XX_VIC2_VALID_IRQ_MASK, 0); } /************************************************************************* * EP93xx System Controller Software Locked register handling *************************************************************************/ /* * syscon_swlock prevents anything else from writing to the syscon * block while a software locked register is being written. */ static DEFINE_SPINLOCK(syscon_swlock); void ep93xx_syscon_swlocked_write(unsigned int val, void __iomem *reg) { unsigned long flags; spin_lock_irqsave(&syscon_swlock, flags); __raw_writel(0xaa, EP93XX_SYSCON_SWLOCK); __raw_writel(val, reg); spin_unlock_irqrestore(&syscon_swlock, flags); } void ep93xx_devcfg_set_clear(unsigned int set_bits, unsigned int clear_bits) { unsigned long flags; unsigned int val; spin_lock_irqsave(&syscon_swlock, flags); val = __raw_readl(EP93XX_SYSCON_DEVCFG); val &= ~clear_bits; val |= set_bits; __raw_writel(0xaa, EP93XX_SYSCON_SWLOCK); __raw_writel(val, EP93XX_SYSCON_DEVCFG); spin_unlock_irqrestore(&syscon_swlock, flags); } /** * ep93xx_chip_revision() - returns the EP93xx chip revision * * See "platform.h" for more information. */ unsigned int ep93xx_chip_revision(void) { unsigned int v; v = __raw_readl(EP93XX_SYSCON_SYSCFG); v &= EP93XX_SYSCON_SYSCFG_REV_MASK; v >>= EP93XX_SYSCON_SYSCFG_REV_SHIFT; return v; } EXPORT_SYMBOL_GPL(ep93xx_chip_revision); /************************************************************************* * EP93xx GPIO *************************************************************************/ static struct resource ep93xx_gpio_resource[] = { DEFINE_RES_MEM(EP93XX_GPIO_PHYS_BASE, 0xcc), DEFINE_RES_IRQ(IRQ_EP93XX_GPIO_AB), DEFINE_RES_IRQ(IRQ_EP93XX_GPIO0MUX), DEFINE_RES_IRQ(IRQ_EP93XX_GPIO1MUX), DEFINE_RES_IRQ(IRQ_EP93XX_GPIO2MUX), DEFINE_RES_IRQ(IRQ_EP93XX_GPIO3MUX), DEFINE_RES_IRQ(IRQ_EP93XX_GPIO4MUX), DEFINE_RES_IRQ(IRQ_EP93XX_GPIO5MUX), DEFINE_RES_IRQ(IRQ_EP93XX_GPIO6MUX), DEFINE_RES_IRQ(IRQ_EP93XX_GPIO7MUX), }; static struct platform_device ep93xx_gpio_device = { .name = "gpio-ep93xx", .id = -1, .num_resources = ARRAY_SIZE(ep93xx_gpio_resource), .resource = ep93xx_gpio_resource, }; /************************************************************************* * EP93xx peripheral handling *************************************************************************/ #define EP93XX_UART_MCR_OFFSET (0x0100) static void ep93xx_uart_set_mctrl(struct amba_device *dev, void __iomem *base, unsigned int mctrl) { unsigned int mcr; mcr = 0; if (mctrl & TIOCM_RTS) mcr |= 2; if (mctrl & TIOCM_DTR) mcr |= 1; __raw_writel(mcr, base + EP93XX_UART_MCR_OFFSET); } static struct amba_pl010_data ep93xx_uart_data = { .set_mctrl = ep93xx_uart_set_mctrl, }; static AMBA_APB_DEVICE(uart1, "apb:uart1", 0x00041010, EP93XX_UART1_PHYS_BASE, { IRQ_EP93XX_UART1 }, &ep93xx_uart_data); static AMBA_APB_DEVICE(uart2, "apb:uart2", 0x00041010, EP93XX_UART2_PHYS_BASE, { IRQ_EP93XX_UART2 }, NULL); static AMBA_APB_DEVICE(uart3, "apb:uart3", 0x00041010, EP93XX_UART3_PHYS_BASE, { IRQ_EP93XX_UART3 }, &ep93xx_uart_data); static struct resource ep93xx_rtc_resource[] = { DEFINE_RES_MEM(EP93XX_RTC_PHYS_BASE, 0x10c), }; static struct platform_device ep93xx_rtc_device = { .name = "ep93xx-rtc", .id = -1, .num_resources = ARRAY_SIZE(ep93xx_rtc_resource), .resource = ep93xx_rtc_resource, }; /************************************************************************* * EP93xx OHCI USB Host *************************************************************************/ static struct clk *ep93xx_ohci_host_clock; static int ep93xx_ohci_power_on(struct platform_device *pdev) { if (!ep93xx_ohci_host_clock) { ep93xx_ohci_host_clock = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(ep93xx_ohci_host_clock)) return PTR_ERR(ep93xx_ohci_host_clock); } return clk_prepare_enable(ep93xx_ohci_host_clock); } static void ep93xx_ohci_power_off(struct platform_device *pdev) { clk_disable(ep93xx_ohci_host_clock); } static struct usb_ohci_pdata ep93xx_ohci_pdata = { .power_on = ep93xx_ohci_power_on, .power_off = ep93xx_ohci_power_off, .power_suspend = ep93xx_ohci_power_off, }; static struct resource ep93xx_ohci_resources[] = { DEFINE_RES_MEM(EP93XX_USB_PHYS_BASE, 0x1000), DEFINE_RES_IRQ(IRQ_EP93XX_USB), }; static u64 ep93xx_ohci_dma_mask = DMA_BIT_MASK(32); static struct platform_device ep93xx_ohci_device = { .name = "ohci-platform", .id = -1, .num_resources = ARRAY_SIZE(ep93xx_ohci_resources), .resource = ep93xx_ohci_resources, .dev = { .dma_mask = &ep93xx_ohci_dma_mask, .coherent_dma_mask = DMA_BIT_MASK(32), .platform_data = &ep93xx_ohci_pdata, }, }; /************************************************************************* * EP93xx physmap'ed flash *************************************************************************/ static struct physmap_flash_data ep93xx_flash_data; static struct resource ep93xx_flash_resource = { .flags = IORESOURCE_MEM, }; static struct platform_device ep93xx_flash = { .name = "physmap-flash", .id = 0, .dev = { .platform_data = &ep93xx_flash_data, }, .num_resources = 1, .resource = &ep93xx_flash_resource, }; /** * ep93xx_register_flash() - Register the external flash device. * @width: bank width in octets * @start: resource start address * @size: resource size */ void __init ep93xx_register_flash(unsigned int width, resource_size_t start, resource_size_t size) { ep93xx_flash_data.width = width; ep93xx_flash_resource.start = start; ep93xx_flash_resource.end = start + size - 1; platform_device_register(&ep93xx_flash); } /************************************************************************* * EP93xx ethernet peripheral handling *************************************************************************/ static struct ep93xx_eth_data ep93xx_eth_data; static struct resource ep93xx_eth_resource[] = { DEFINE_RES_MEM(EP93XX_ETHERNET_PHYS_BASE, 0x10000), DEFINE_RES_IRQ(IRQ_EP93XX_ETHERNET), }; static u64 ep93xx_eth_dma_mask = DMA_BIT_MASK(32); static struct platform_device ep93xx_eth_device = { .name = "ep93xx-eth", .id = -1, .dev = { .platform_data = &ep93xx_eth_data, .coherent_dma_mask = DMA_BIT_MASK(32), .dma_mask = &ep93xx_eth_dma_mask, }, .num_resources = ARRAY_SIZE(ep93xx_eth_resource), .resource = ep93xx_eth_resource, }; /** * ep93xx_register_eth - Register the built-in ethernet platform device. * @data: platform specific ethernet configuration (__initdata) * @copy_addr: flag indicating that the MAC address should be copied * from the IndAd registers (as programmed by the bootloader) */ void __init ep93xx_register_eth(struct ep93xx_eth_data *data, int copy_addr) { if (copy_addr) memcpy_fromio(data->dev_addr, EP93XX_ETHERNET_BASE + 0x50, 6); ep93xx_eth_data = *data; platform_device_register(&ep93xx_eth_device); } /************************************************************************* * EP93xx i2c peripheral handling *************************************************************************/ /* All EP93xx devices use the same two GPIO pins for I2C bit-banging */ static struct gpiod_lookup_table ep93xx_i2c_gpiod_table = { .dev_id = "i2c-gpio.0", .table = { /* Use local offsets on gpiochip/port "G" */ GPIO_LOOKUP_IDX("G", 1, NULL, 0, GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN), GPIO_LOOKUP_IDX("G", 0, NULL, 1, GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN), }, }; static struct platform_device ep93xx_i2c_device = { .name = "i2c-gpio", .id = 0, .dev = { .platform_data = NULL, }, }; /** * ep93xx_register_i2c - Register the i2c platform device. * @devices: platform specific i2c bus device information (__initdata) * @num: the number of devices on the i2c bus */ void __init ep93xx_register_i2c(struct i2c_board_info *devices, int num) { /* * FIXME: this just sets the two pins as non-opendrain, as no * platforms tries to do that anyway. Flag the applicable lines * as open drain in the GPIO_LOOKUP above and the driver or * gpiolib will handle open drain/open drain emulation as need * be. Right now i2c-gpio emulates open drain which is not * optimal. */ __raw_writel((0 << 1) | (0 << 0), EP93XX_GPIO_EEDRIVE); i2c_register_board_info(0, devices, num); gpiod_add_lookup_table(&ep93xx_i2c_gpiod_table); platform_device_register(&ep93xx_i2c_device); } /************************************************************************* * EP93xx SPI peripheral handling *************************************************************************/ static struct ep93xx_spi_info ep93xx_spi_master_data; static struct resource ep93xx_spi_resources[] = { DEFINE_RES_MEM(EP93XX_SPI_PHYS_BASE, 0x18), DEFINE_RES_IRQ(IRQ_EP93XX_SSP), }; static u64 ep93xx_spi_dma_mask = DMA_BIT_MASK(32); static struct platform_device ep93xx_spi_device = { .name = "ep93xx-spi", .id = 0, .dev = { .platform_data = &ep93xx_spi_master_data, .coherent_dma_mask = DMA_BIT_MASK(32), .dma_mask = &ep93xx_spi_dma_mask, }, .num_resources = ARRAY_SIZE(ep93xx_spi_resources), .resource = ep93xx_spi_resources, }; /** * ep93xx_register_spi() - registers spi platform device * @info: ep93xx board specific spi master info (__initdata) * @devices: SPI devices to register (__initdata) * @num: number of SPI devices to register * * This function registers platform device for the EP93xx SPI controller and * also makes sure that SPI pins are muxed so that I2S is not using those pins. */ void __init ep93xx_register_spi(struct ep93xx_spi_info *info, struct spi_board_info *devices, int num) { /* * When SPI is used, we need to make sure that I2S is muxed off from * SPI pins. */ ep93xx_devcfg_clear_bits(EP93XX_SYSCON_DEVCFG_I2SONSSP); ep93xx_spi_master_data = *info; spi_register_board_info(devices, num); platform_device_register(&ep93xx_spi_device); } /************************************************************************* * EP93xx LEDs *************************************************************************/ static const struct gpio_led ep93xx_led_pins[] __initconst = { { .name = "platform:grled", }, { .name = "platform:rdled", }, }; static const struct gpio_led_platform_data ep93xx_led_data __initconst = { .num_leds = ARRAY_SIZE(ep93xx_led_pins), .leds = ep93xx_led_pins, }; static struct gpiod_lookup_table ep93xx_leds_gpio_table = { .dev_id = "leds-gpio", .table = { /* Use local offsets on gpiochip/port "E" */ GPIO_LOOKUP_IDX("E", 0, NULL, 0, GPIO_ACTIVE_HIGH), GPIO_LOOKUP_IDX("E", 1, NULL, 1, GPIO_ACTIVE_HIGH), { } }, }; /************************************************************************* * EP93xx pwm peripheral handling *************************************************************************/ static struct resource ep93xx_pwm0_resource[] = { DEFINE_RES_MEM(EP93XX_PWM_PHYS_BASE, 0x10), }; static struct platform_device ep93xx_pwm0_device = { .name = "ep93xx-pwm", .id = 0, .num_resources = ARRAY_SIZE(ep93xx_pwm0_resource), .resource = ep93xx_pwm0_resource, }; static struct resource ep93xx_pwm1_resource[] = { DEFINE_RES_MEM(EP93XX_PWM_PHYS_BASE + 0x20, 0x10), }; static struct platform_device ep93xx_pwm1_device = { .name = "ep93xx-pwm", .id = 1, .num_resources = ARRAY_SIZE(ep93xx_pwm1_resource), .resource = ep93xx_pwm1_resource, }; void __init ep93xx_register_pwm(int pwm0, int pwm1) { if (pwm0) platform_device_register(&ep93xx_pwm0_device); /* NOTE: EP9307 does not have PWMOUT1 (pin EGPIO14) */ if (pwm1) platform_device_register(&ep93xx_pwm1_device); } int ep93xx_pwm_acquire_gpio(struct platform_device *pdev) { int err; if (pdev->id == 0) { err = 0; } else if (pdev->id == 1) { err = gpio_request(EP93XX_GPIO_LINE_EGPIO14, dev_name(&pdev->dev)); if (err) return err; err = gpio_direction_output(EP93XX_GPIO_LINE_EGPIO14, 0); if (err) goto fail; /* PWM 1 output on EGPIO[14] */ ep93xx_devcfg_set_bits(EP93XX_SYSCON_DEVCFG_PONG); } else { err = -ENODEV; } return err; fail: gpio_free(EP93XX_GPIO_LINE_EGPIO14); return err; } EXPORT_SYMBOL(ep93xx_pwm_acquire_gpio); void ep93xx_pwm_release_gpio(struct platform_device *pdev) { if (pdev->id == 1) { gpio_direction_input(EP93XX_GPIO_LINE_EGPIO14); gpio_free(EP93XX_GPIO_LINE_EGPIO14); /* EGPIO[14] used for GPIO */ ep93xx_devcfg_clear_bits(EP93XX_SYSCON_DEVCFG_PONG); } } EXPORT_SYMBOL(ep93xx_pwm_release_gpio); /************************************************************************* * EP93xx video peripheral handling *************************************************************************/ static struct ep93xxfb_mach_info ep93xxfb_data; static struct resource ep93xx_fb_resource[] = { DEFINE_RES_MEM(EP93XX_RASTER_PHYS_BASE, 0x800), }; static struct platform_device ep93xx_fb_device = { .name = "ep93xx-fb", .id = -1, .dev = { .platform_data = &ep93xxfb_data, .coherent_dma_mask = DMA_BIT_MASK(32), .dma_mask = &ep93xx_fb_device.dev.coherent_dma_mask, }, .num_resources = ARRAY_SIZE(ep93xx_fb_resource), .resource = ep93xx_fb_resource, }; /* The backlight use a single register in the framebuffer's register space */ #define EP93XX_RASTER_REG_BRIGHTNESS 0x20 static struct resource ep93xx_bl_resources[] = { DEFINE_RES_MEM(EP93XX_RASTER_PHYS_BASE + EP93XX_RASTER_REG_BRIGHTNESS, 0x04), }; static struct platform_device ep93xx_bl_device = { .name = "ep93xx-bl", .id = -1, .num_resources = ARRAY_SIZE(ep93xx_bl_resources), .resource = ep93xx_bl_resources, }; /** * ep93xx_register_fb - Register the framebuffer platform device. * @data: platform specific framebuffer configuration (__initdata) */ void __init ep93xx_register_fb(struct ep93xxfb_mach_info *data) { ep93xxfb_data = *data; platform_device_register(&ep93xx_fb_device); platform_device_register(&ep93xx_bl_device); } /************************************************************************* * EP93xx matrix keypad peripheral handling *************************************************************************/ static struct ep93xx_keypad_platform_data ep93xx_keypad_data; static struct resource ep93xx_keypad_resource[] = { DEFINE_RES_MEM(EP93XX_KEY_MATRIX_PHYS_BASE, 0x0c), DEFINE_RES_IRQ(IRQ_EP93XX_KEY), }; static struct platform_device ep93xx_keypad_device = { .name = "ep93xx-keypad", .id = -1, .dev = { .platform_data = &ep93xx_keypad_data, }, .num_resources = ARRAY_SIZE(ep93xx_keypad_resource), .resource = ep93xx_keypad_resource, }; /** * ep93xx_register_keypad - Register the keypad platform device. * @data: platform specific keypad configuration (__initdata) */ void __init ep93xx_register_keypad(struct ep93xx_keypad_platform_data *data) { ep93xx_keypad_data = *data; platform_device_register(&ep93xx_keypad_device); } int ep93xx_keypad_acquire_gpio(struct platform_device *pdev) { int err; int i; for (i = 0; i < 8; i++) { err = gpio_request(EP93XX_GPIO_LINE_C(i), dev_name(&pdev->dev)); if (err) goto fail_gpio_c; err = gpio_request(EP93XX_GPIO_LINE_D(i), dev_name(&pdev->dev)); if (err) goto fail_gpio_d; } /* Enable the keypad controller; GPIO ports C and D used for keypad */ ep93xx_devcfg_clear_bits(EP93XX_SYSCON_DEVCFG_KEYS | EP93XX_SYSCON_DEVCFG_GONK); return 0; fail_gpio_d: gpio_free(EP93XX_GPIO_LINE_C(i)); fail_gpio_c: for (--i; i >= 0; --i) { gpio_free(EP93XX_GPIO_LINE_C(i)); gpio_free(EP93XX_GPIO_LINE_D(i)); } return err; } EXPORT_SYMBOL(ep93xx_keypad_acquire_gpio); void ep93xx_keypad_release_gpio(struct platform_device *pdev) { int i; for (i = 0; i < 8; i++) { gpio_free(EP93XX_GPIO_LINE_C(i)); gpio_free(EP93XX_GPIO_LINE_D(i)); } /* Disable the keypad controller; GPIO ports C and D used for GPIO */ ep93xx_devcfg_set_bits(EP93XX_SYSCON_DEVCFG_KEYS | EP93XX_SYSCON_DEVCFG_GONK); } EXPORT_SYMBOL(ep93xx_keypad_release_gpio); /************************************************************************* * EP93xx I2S audio peripheral handling *************************************************************************/ static struct resource ep93xx_i2s_resource[] = { DEFINE_RES_MEM(EP93XX_I2S_PHYS_BASE, 0x100), DEFINE_RES_IRQ(IRQ_EP93XX_SAI), }; static struct platform_device ep93xx_i2s_device = { .name = "ep93xx-i2s", .id = -1, .num_resources = ARRAY_SIZE(ep93xx_i2s_resource), .resource = ep93xx_i2s_resource, }; static struct platform_device ep93xx_pcm_device = { .name = "ep93xx-pcm-audio", .id = -1, }; void __init ep93xx_register_i2s(void) { platform_device_register(&ep93xx_i2s_device); platform_device_register(&ep93xx_pcm_device); } #define EP93XX_SYSCON_DEVCFG_I2S_MASK (EP93XX_SYSCON_DEVCFG_I2SONSSP | \ EP93XX_SYSCON_DEVCFG_I2SONAC97) #define EP93XX_I2SCLKDIV_MASK (EP93XX_SYSCON_I2SCLKDIV_ORIDE | \ EP93XX_SYSCON_I2SCLKDIV_SPOL) int ep93xx_i2s_acquire(void) { unsigned val; ep93xx_devcfg_set_clear(EP93XX_SYSCON_DEVCFG_I2SONAC97, EP93XX_SYSCON_DEVCFG_I2S_MASK); /* * This is potentially racy with the clock api for i2s_mclk, sclk and * lrclk. Since the i2s driver is the only user of those clocks we * rely on it to prevent parallel use of this function and the * clock api for the i2s clocks. */ val = __raw_readl(EP93XX_SYSCON_I2SCLKDIV); val &= ~EP93XX_I2SCLKDIV_MASK; val |= EP93XX_SYSCON_I2SCLKDIV_ORIDE | EP93XX_SYSCON_I2SCLKDIV_SPOL; ep93xx_syscon_swlocked_write(val, EP93XX_SYSCON_I2SCLKDIV); return 0; } EXPORT_SYMBOL(ep93xx_i2s_acquire); void ep93xx_i2s_release(void) { ep93xx_devcfg_clear_bits(EP93XX_SYSCON_DEVCFG_I2S_MASK); } EXPORT_SYMBOL(ep93xx_i2s_release); /************************************************************************* * EP93xx AC97 audio peripheral handling *************************************************************************/ static struct resource ep93xx_ac97_resources[] = { DEFINE_RES_MEM(EP93XX_AAC_PHYS_BASE, 0xac), DEFINE_RES_IRQ(IRQ_EP93XX_AACINTR), }; static struct platform_device ep93xx_ac97_device = { .name = "ep93xx-ac97", .id = -1, .num_resources = ARRAY_SIZE(ep93xx_ac97_resources), .resource = ep93xx_ac97_resources, }; void __init ep93xx_register_ac97(void) { /* * Make sure that the AC97 pins are not used by I2S. */ ep93xx_devcfg_clear_bits(EP93XX_SYSCON_DEVCFG_I2SONAC97); platform_device_register(&ep93xx_ac97_device); platform_device_register(&ep93xx_pcm_device); } /************************************************************************* * EP93xx Watchdog *************************************************************************/ static struct resource ep93xx_wdt_resources[] = { DEFINE_RES_MEM(EP93XX_WATCHDOG_PHYS_BASE, 0x08), }; static struct platform_device ep93xx_wdt_device = { .name = "ep93xx-wdt", .id = -1, .num_resources = ARRAY_SIZE(ep93xx_wdt_resources), .resource = ep93xx_wdt_resources, }; /************************************************************************* * EP93xx IDE *************************************************************************/ static struct resource ep93xx_ide_resources[] = { DEFINE_RES_MEM(EP93XX_IDE_PHYS_BASE, 0x38), DEFINE_RES_IRQ(IRQ_EP93XX_EXT3), }; static struct platform_device ep93xx_ide_device = { .name = "ep93xx-ide", .id = -1, .dev = { .dma_mask = &ep93xx_ide_device.dev.coherent_dma_mask, .coherent_dma_mask = DMA_BIT_MASK(32), }, .num_resources = ARRAY_SIZE(ep93xx_ide_resources), .resource = ep93xx_ide_resources, }; void __init ep93xx_register_ide(void) { platform_device_register(&ep93xx_ide_device); } int ep93xx_ide_acquire_gpio(struct platform_device *pdev) { int err; int i; err = gpio_request(EP93XX_GPIO_LINE_EGPIO2, dev_name(&pdev->dev)); if (err) return err; err = gpio_request(EP93XX_GPIO_LINE_EGPIO15, dev_name(&pdev->dev)); if (err) goto fail_egpio15; for (i = 2; i < 8; i++) { err = gpio_request(EP93XX_GPIO_LINE_E(i), dev_name(&pdev->dev)); if (err) goto fail_gpio_e; } for (i = 4; i < 8; i++) { err = gpio_request(EP93XX_GPIO_LINE_G(i), dev_name(&pdev->dev)); if (err) goto fail_gpio_g; } for (i = 0; i < 8; i++) { err = gpio_request(EP93XX_GPIO_LINE_H(i), dev_name(&pdev->dev)); if (err) goto fail_gpio_h; } /* GPIO ports E[7:2], G[7:4] and H used by IDE */ ep93xx_devcfg_clear_bits(EP93XX_SYSCON_DEVCFG_EONIDE | EP93XX_SYSCON_DEVCFG_GONIDE | EP93XX_SYSCON_DEVCFG_HONIDE); return 0; fail_gpio_h: for (--i; i >= 0; --i) gpio_free(EP93XX_GPIO_LINE_H(i)); i = 8; fail_gpio_g: for (--i; i >= 4; --i) gpio_free(EP93XX_GPIO_LINE_G(i)); i = 8; fail_gpio_e: for (--i; i >= 2; --i) gpio_free(EP93XX_GPIO_LINE_E(i)); gpio_free(EP93XX_GPIO_LINE_EGPIO15); fail_egpio15: gpio_free(EP93XX_GPIO_LINE_EGPIO2); return err; } EXPORT_SYMBOL(ep93xx_ide_acquire_gpio); void ep93xx_ide_release_gpio(struct platform_device *pdev) { int i; for (i = 2; i < 8; i++) gpio_free(EP93XX_GPIO_LINE_E(i)); for (i = 4; i < 8; i++) gpio_free(EP93XX_GPIO_LINE_G(i)); for (i = 0; i < 8; i++) gpio_free(EP93XX_GPIO_LINE_H(i)); gpio_free(EP93XX_GPIO_LINE_EGPIO15); gpio_free(EP93XX_GPIO_LINE_EGPIO2); /* GPIO ports E[7:2], G[7:4] and H used by GPIO */ ep93xx_devcfg_set_bits(EP93XX_SYSCON_DEVCFG_EONIDE | EP93XX_SYSCON_DEVCFG_GONIDE | EP93XX_SYSCON_DEVCFG_HONIDE); } EXPORT_SYMBOL(ep93xx_ide_release_gpio); /************************************************************************* * EP93xx ADC *************************************************************************/ static struct resource ep93xx_adc_resources[] = { DEFINE_RES_MEM(EP93XX_ADC_PHYS_BASE, 0x28), DEFINE_RES_IRQ(IRQ_EP93XX_TOUCH), }; static struct platform_device ep93xx_adc_device = { .name = "ep93xx-adc", .id = -1, .num_resources = ARRAY_SIZE(ep93xx_adc_resources), .resource = ep93xx_adc_resources, }; void __init ep93xx_register_adc(void) { /* Power up ADC, deactivate Touch Screen Controller */ ep93xx_devcfg_set_clear(EP93XX_SYSCON_DEVCFG_TIN, EP93XX_SYSCON_DEVCFG_ADCPD); platform_device_register(&ep93xx_adc_device); } /************************************************************************* * EP93xx Security peripheral *************************************************************************/ /* * The Maverick Key is 256 bits of micro fuses blown at the factory during * manufacturing to uniquely identify a part. * * See: http://arm.cirrus.com/forum/viewtopic.php?t=486&highlight=maverick+key */ #define EP93XX_SECURITY_REG(x) (EP93XX_SECURITY_BASE + (x)) #define EP93XX_SECURITY_SECFLG EP93XX_SECURITY_REG(0x2400) #define EP93XX_SECURITY_FUSEFLG EP93XX_SECURITY_REG(0x2410) #define EP93XX_SECURITY_UNIQID EP93XX_SECURITY_REG(0x2440) #define EP93XX_SECURITY_UNIQCHK EP93XX_SECURITY_REG(0x2450) #define EP93XX_SECURITY_UNIQVAL EP93XX_SECURITY_REG(0x2460) #define EP93XX_SECURITY_SECID1 EP93XX_SECURITY_REG(0x2500) #define EP93XX_SECURITY_SECID2 EP93XX_SECURITY_REG(0x2504) #define EP93XX_SECURITY_SECCHK1 EP93XX_SECURITY_REG(0x2520) #define EP93XX_SECURITY_SECCHK2 EP93XX_SECURITY_REG(0x2524) #define EP93XX_SECURITY_UNIQID2 EP93XX_SECURITY_REG(0x2700) #define EP93XX_SECURITY_UNIQID3 EP93XX_SECURITY_REG(0x2704) #define EP93XX_SECURITY_UNIQID4 EP93XX_SECURITY_REG(0x2708) #define EP93XX_SECURITY_UNIQID5 EP93XX_SECURITY_REG(0x270c) static char ep93xx_soc_id[33]; static const char __init *ep93xx_get_soc_id(void) { unsigned int id, id2, id3, id4, id5; if (__raw_readl(EP93XX_SECURITY_UNIQVAL) != 1) return "bad Hamming code"; id = __raw_readl(EP93XX_SECURITY_UNIQID); id2 = __raw_readl(EP93XX_SECURITY_UNIQID2); id3 = __raw_readl(EP93XX_SECURITY_UNIQID3); id4 = __raw_readl(EP93XX_SECURITY_UNIQID4); id5 = __raw_readl(EP93XX_SECURITY_UNIQID5); if (id != id2) return "invalid"; /* Toss the unique ID into the entropy pool */ add_device_randomness(&id2, 4); add_device_randomness(&id3, 4); add_device_randomness(&id4, 4); add_device_randomness(&id5, 4); snprintf(ep93xx_soc_id, sizeof(ep93xx_soc_id), "%08x%08x%08x%08x", id2, id3, id4, id5); return ep93xx_soc_id; } static const char __init *ep93xx_get_soc_rev(void) { int rev = ep93xx_chip_revision(); switch (rev) { case EP93XX_CHIP_REV_D0: return "D0"; case EP93XX_CHIP_REV_D1: return "D1"; case EP93XX_CHIP_REV_E0: return "E0"; case EP93XX_CHIP_REV_E1: return "E1"; case EP93XX_CHIP_REV_E2: return "E2"; default: return "unknown"; } } static const char __init *ep93xx_get_machine_name(void) { return kasprintf(GFP_KERNEL,"%s", machine_desc->name); } static struct device __init *ep93xx_init_soc(void) { struct soc_device_attribute *soc_dev_attr; struct soc_device *soc_dev; soc_dev_attr = kzalloc(sizeof(*soc_dev_attr), GFP_KERNEL); if (!soc_dev_attr) return NULL; soc_dev_attr->machine = ep93xx_get_machine_name(); soc_dev_attr->family = "Cirrus Logic EP93xx"; soc_dev_attr->revision = ep93xx_get_soc_rev(); soc_dev_attr->soc_id = ep93xx_get_soc_id(); soc_dev = soc_device_register(soc_dev_attr); if (IS_ERR(soc_dev)) { kfree(soc_dev_attr->machine); kfree(soc_dev_attr); return NULL; } return soc_device_to_device(soc_dev); } struct device __init *ep93xx_init_devices(void) { struct device *parent; /* Disallow access to MaverickCrunch initially */ ep93xx_devcfg_clear_bits(EP93XX_SYSCON_DEVCFG_CPENA); /* Default all ports to GPIO */ ep93xx_devcfg_set_bits(EP93XX_SYSCON_DEVCFG_KEYS | EP93XX_SYSCON_DEVCFG_GONK | EP93XX_SYSCON_DEVCFG_EONIDE | EP93XX_SYSCON_DEVCFG_GONIDE | EP93XX_SYSCON_DEVCFG_HONIDE); parent = ep93xx_init_soc(); /* Get the GPIO working early, other devices need it */ platform_device_register(&ep93xx_gpio_device); amba_device_register(&uart1_device, &iomem_resource); amba_device_register(&uart2_device, &iomem_resource); amba_device_register(&uart3_device, &iomem_resource); platform_device_register(&ep93xx_rtc_device); platform_device_register(&ep93xx_ohci_device); platform_device_register(&ep93xx_wdt_device); gpiod_add_lookup_table(&ep93xx_leds_gpio_table); gpio_led_register_device(-1, &ep93xx_led_data); return parent; } void ep93xx_restart(enum reboot_mode mode, const char *cmd) { /* * Set then clear the SWRST bit to initiate a software reset */ ep93xx_devcfg_set_bits(EP93XX_SYSCON_DEVCFG_SWRST); ep93xx_devcfg_clear_bits(EP93XX_SYSCON_DEVCFG_SWRST); while (1) ; }
linux-master
arch/arm/mach-ep93xx/core.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * arch/arm/mach-ep93xx/dma.c * * Platform support code for the EP93xx dmaengine driver. * * Copyright (C) 2011 Mika Westerberg * * This work is based on the original dma-m2p implementation with * following copyrights: * * Copyright (C) 2006 Lennert Buytenhek <[email protected]> * Copyright (C) 2006 Applied Data Systems * Copyright (C) 2009 Ryan Mallon <[email protected]> */ #include <linux/dmaengine.h> #include <linux/dma-mapping.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/kernel.h> #include <linux/platform_device.h> #include <linux/platform_data/dma-ep93xx.h> #include "hardware.h" #include "soc.h" #define DMA_CHANNEL(_name, _base, _irq) \ { .name = (_name), .base = (_base), .irq = (_irq) } /* * DMA M2P channels. * * On the EP93xx chip the following peripherals my be allocated to the 10 * Memory to Internal Peripheral (M2P) channels (5 transmit + 5 receive). * * I2S contains 3 Tx and 3 Rx DMA Channels * AAC contains 3 Tx and 3 Rx DMA Channels * UART1 contains 1 Tx and 1 Rx DMA Channels * UART2 contains 1 Tx and 1 Rx DMA Channels * UART3 contains 1 Tx and 1 Rx DMA Channels * IrDA contains 1 Tx and 1 Rx DMA Channels * * Registers are mapped statically in ep93xx_map_io(). */ static struct ep93xx_dma_chan_data ep93xx_dma_m2p_channels[] = { DMA_CHANNEL("m2p0", EP93XX_DMA_BASE + 0x0000, IRQ_EP93XX_DMAM2P0), DMA_CHANNEL("m2p1", EP93XX_DMA_BASE + 0x0040, IRQ_EP93XX_DMAM2P1), DMA_CHANNEL("m2p2", EP93XX_DMA_BASE + 0x0080, IRQ_EP93XX_DMAM2P2), DMA_CHANNEL("m2p3", EP93XX_DMA_BASE + 0x00c0, IRQ_EP93XX_DMAM2P3), DMA_CHANNEL("m2p4", EP93XX_DMA_BASE + 0x0240, IRQ_EP93XX_DMAM2P4), DMA_CHANNEL("m2p5", EP93XX_DMA_BASE + 0x0200, IRQ_EP93XX_DMAM2P5), DMA_CHANNEL("m2p6", EP93XX_DMA_BASE + 0x02c0, IRQ_EP93XX_DMAM2P6), DMA_CHANNEL("m2p7", EP93XX_DMA_BASE + 0x0280, IRQ_EP93XX_DMAM2P7), DMA_CHANNEL("m2p8", EP93XX_DMA_BASE + 0x0340, IRQ_EP93XX_DMAM2P8), DMA_CHANNEL("m2p9", EP93XX_DMA_BASE + 0x0300, IRQ_EP93XX_DMAM2P9), }; static struct ep93xx_dma_platform_data ep93xx_dma_m2p_data = { .channels = ep93xx_dma_m2p_channels, .num_channels = ARRAY_SIZE(ep93xx_dma_m2p_channels), }; static u64 ep93xx_dma_m2p_mask = DMA_BIT_MASK(32); static struct platform_device ep93xx_dma_m2p_device = { .name = "ep93xx-dma-m2p", .id = -1, .dev = { .platform_data = &ep93xx_dma_m2p_data, .dma_mask = &ep93xx_dma_m2p_mask, .coherent_dma_mask = DMA_BIT_MASK(32), }, }; /* * DMA M2M channels. * * There are 2 M2M channels which support memcpy/memset and in addition simple * hardware requests from/to SSP and IDE. We do not implement an external * hardware requests. * * Registers are mapped statically in ep93xx_map_io(). */ static struct ep93xx_dma_chan_data ep93xx_dma_m2m_channels[] = { DMA_CHANNEL("m2m0", EP93XX_DMA_BASE + 0x0100, IRQ_EP93XX_DMAM2M0), DMA_CHANNEL("m2m1", EP93XX_DMA_BASE + 0x0140, IRQ_EP93XX_DMAM2M1), }; static struct ep93xx_dma_platform_data ep93xx_dma_m2m_data = { .channels = ep93xx_dma_m2m_channels, .num_channels = ARRAY_SIZE(ep93xx_dma_m2m_channels), }; static u64 ep93xx_dma_m2m_mask = DMA_BIT_MASK(32); static struct platform_device ep93xx_dma_m2m_device = { .name = "ep93xx-dma-m2m", .id = -1, .dev = { .platform_data = &ep93xx_dma_m2m_data, .dma_mask = &ep93xx_dma_m2m_mask, .coherent_dma_mask = DMA_BIT_MASK(32), }, }; static int __init ep93xx_dma_init(void) { platform_device_register(&ep93xx_dma_m2p_device); platform_device_register(&ep93xx_dma_m2m_device); return 0; } arch_initcall(ep93xx_dma_init);
linux-master
arch/arm/mach-ep93xx/dma.c
// SPDX-License-Identifier: GPL-2.0 #include <linux/kernel.h> #include <linux/init.h> #include <linux/clocksource.h> #include <linux/clockchips.h> #include <linux/sched_clock.h> #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/io.h> #include <asm/mach/time.h> #include "soc.h" #include "platform.h" /************************************************************************* * Timer handling for EP93xx ************************************************************************* * The ep93xx has four internal timers. Timers 1, 2 (both 16 bit) and * 3 (32 bit) count down at 508 kHz, are self-reloading, and can generate * an interrupt on underflow. Timer 4 (40 bit) counts down at 983.04 kHz, * is free-running, and can't generate interrupts. * * The 508 kHz timers are ideal for use for the timer interrupt, as the * most common values of HZ divide 508 kHz nicely. We pick the 32 bit * timer (timer 3) to get as long sleep intervals as possible when using * CONFIG_NO_HZ. * * The higher clock rate of timer 4 makes it a better choice than the * other timers for use as clock source and for sched_clock(), providing * a stable 40 bit time base. ************************************************************************* */ #define EP93XX_TIMER_REG(x) (EP93XX_TIMER_BASE + (x)) #define EP93XX_TIMER1_LOAD EP93XX_TIMER_REG(0x00) #define EP93XX_TIMER1_VALUE EP93XX_TIMER_REG(0x04) #define EP93XX_TIMER1_CONTROL EP93XX_TIMER_REG(0x08) #define EP93XX_TIMER123_CONTROL_ENABLE (1 << 7) #define EP93XX_TIMER123_CONTROL_MODE (1 << 6) #define EP93XX_TIMER123_CONTROL_CLKSEL (1 << 3) #define EP93XX_TIMER1_CLEAR EP93XX_TIMER_REG(0x0c) #define EP93XX_TIMER2_LOAD EP93XX_TIMER_REG(0x20) #define EP93XX_TIMER2_VALUE EP93XX_TIMER_REG(0x24) #define EP93XX_TIMER2_CONTROL EP93XX_TIMER_REG(0x28) #define EP93XX_TIMER2_CLEAR EP93XX_TIMER_REG(0x2c) #define EP93XX_TIMER4_VALUE_LOW EP93XX_TIMER_REG(0x60) #define EP93XX_TIMER4_VALUE_HIGH EP93XX_TIMER_REG(0x64) #define EP93XX_TIMER4_VALUE_HIGH_ENABLE (1 << 8) #define EP93XX_TIMER3_LOAD EP93XX_TIMER_REG(0x80) #define EP93XX_TIMER3_VALUE EP93XX_TIMER_REG(0x84) #define EP93XX_TIMER3_CONTROL EP93XX_TIMER_REG(0x88) #define EP93XX_TIMER3_CLEAR EP93XX_TIMER_REG(0x8c) #define EP93XX_TIMER123_RATE 508469 #define EP93XX_TIMER4_RATE 983040 static u64 notrace ep93xx_read_sched_clock(void) { u64 ret; ret = readl(EP93XX_TIMER4_VALUE_LOW); ret |= ((u64) (readl(EP93XX_TIMER4_VALUE_HIGH) & 0xff) << 32); return ret; } static u64 ep93xx_clocksource_read(struct clocksource *c) { u64 ret; ret = readl(EP93XX_TIMER4_VALUE_LOW); ret |= ((u64) (readl(EP93XX_TIMER4_VALUE_HIGH) & 0xff) << 32); return (u64) ret; } static int ep93xx_clkevt_set_next_event(unsigned long next, struct clock_event_device *evt) { /* Default mode: periodic, off, 508 kHz */ u32 tmode = EP93XX_TIMER123_CONTROL_MODE | EP93XX_TIMER123_CONTROL_CLKSEL; /* Clear timer */ writel(tmode, EP93XX_TIMER3_CONTROL); /* Set next event */ writel(next, EP93XX_TIMER3_LOAD); writel(tmode | EP93XX_TIMER123_CONTROL_ENABLE, EP93XX_TIMER3_CONTROL); return 0; } static int ep93xx_clkevt_shutdown(struct clock_event_device *evt) { /* Disable timer */ writel(0, EP93XX_TIMER3_CONTROL); return 0; } static struct clock_event_device ep93xx_clockevent = { .name = "timer1", .features = CLOCK_EVT_FEAT_ONESHOT, .set_state_shutdown = ep93xx_clkevt_shutdown, .set_state_oneshot = ep93xx_clkevt_shutdown, .tick_resume = ep93xx_clkevt_shutdown, .set_next_event = ep93xx_clkevt_set_next_event, .rating = 300, }; static irqreturn_t ep93xx_timer_interrupt(int irq, void *dev_id) { struct clock_event_device *evt = dev_id; /* Writing any value clears the timer interrupt */ writel(1, EP93XX_TIMER3_CLEAR); evt->event_handler(evt); return IRQ_HANDLED; } void __init ep93xx_timer_init(void) { int irq = IRQ_EP93XX_TIMER3; unsigned long flags = IRQF_TIMER | IRQF_IRQPOLL; /* Enable and register clocksource and sched_clock on timer 4 */ writel(EP93XX_TIMER4_VALUE_HIGH_ENABLE, EP93XX_TIMER4_VALUE_HIGH); clocksource_mmio_init(NULL, "timer4", EP93XX_TIMER4_RATE, 200, 40, ep93xx_clocksource_read); sched_clock_register(ep93xx_read_sched_clock, 40, EP93XX_TIMER4_RATE); /* Set up clockevent on timer 3 */ if (request_irq(irq, ep93xx_timer_interrupt, flags, "ep93xx timer", &ep93xx_clockevent)) pr_err("Failed to request irq %d (ep93xx timer)\n", irq); clockevents_config_and_register(&ep93xx_clockevent, EP93XX_TIMER123_RATE, 1, 0xffffffffU); }
linux-master
arch/arm/mach-ep93xx/timer-ep93xx.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * arch/arm/mach-ep93xx/vision_ep9307.c * Vision Engraving Systems EP9307 SoM support. * * Copyright (C) 2008-2011 Vision Engraving Systems * H Hartley Sweeten <[email protected]> */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/kernel.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/irq.h> #include <linux/gpio.h> #include <linux/gpio/machine.h> #include <linux/fb.h> #include <linux/io.h> #include <linux/mtd/partitions.h> #include <linux/i2c.h> #include <linux/platform_data/pca953x.h> #include <linux/spi/spi.h> #include <linux/spi/flash.h> #include <linux/spi/mmc_spi.h> #include <linux/mmc/host.h> #include <sound/cs4271.h> #include "hardware.h" #include <linux/platform_data/video-ep93xx.h> #include <linux/platform_data/spi-ep93xx.h> #include "gpio-ep93xx.h" #include <asm/mach-types.h> #include <asm/mach/map.h> #include <asm/mach/arch.h> #include "soc.h" /************************************************************************* * Static I/O mappings for the FPGA *************************************************************************/ #define VISION_PHYS_BASE EP93XX_CS7_PHYS_BASE #define VISION_VIRT_BASE 0xfebff000 static struct map_desc vision_io_desc[] __initdata = { { .virtual = VISION_VIRT_BASE, .pfn = __phys_to_pfn(VISION_PHYS_BASE), .length = SZ_4K, .type = MT_DEVICE, }, }; static void __init vision_map_io(void) { ep93xx_map_io(); iotable_init(vision_io_desc, ARRAY_SIZE(vision_io_desc)); } /************************************************************************* * Ethernet *************************************************************************/ static struct ep93xx_eth_data vision_eth_data __initdata = { .phy_id = 1, }; /************************************************************************* * Framebuffer *************************************************************************/ #define VISION_LCD_ENABLE EP93XX_GPIO_LINE_EGPIO1 static int vision_lcd_setup(struct platform_device *pdev) { int err; err = gpio_request_one(VISION_LCD_ENABLE, GPIOF_INIT_HIGH, dev_name(&pdev->dev)); if (err) return err; ep93xx_devcfg_clear_bits(EP93XX_SYSCON_DEVCFG_RAS | EP93XX_SYSCON_DEVCFG_RASONP3 | EP93XX_SYSCON_DEVCFG_EXVC); return 0; } static void vision_lcd_teardown(struct platform_device *pdev) { gpio_free(VISION_LCD_ENABLE); } static void vision_lcd_blank(int blank_mode, struct fb_info *info) { if (blank_mode) gpio_set_value(VISION_LCD_ENABLE, 0); else gpio_set_value(VISION_LCD_ENABLE, 1); } static struct ep93xxfb_mach_info ep93xxfb_info __initdata = { .flags = EP93XXFB_USE_SDCSN0 | EP93XXFB_PCLK_FALLING, .setup = vision_lcd_setup, .teardown = vision_lcd_teardown, .blank = vision_lcd_blank, }; /************************************************************************* * GPIO Expanders *************************************************************************/ #define PCA9539_74_GPIO_BASE (EP93XX_GPIO_LINE_MAX + 1) #define PCA9539_75_GPIO_BASE (PCA9539_74_GPIO_BASE + 16) #define PCA9539_76_GPIO_BASE (PCA9539_75_GPIO_BASE + 16) #define PCA9539_77_GPIO_BASE (PCA9539_76_GPIO_BASE + 16) static struct pca953x_platform_data pca953x_74_gpio_data = { .gpio_base = PCA9539_74_GPIO_BASE, .irq_base = EP93XX_BOARD_IRQ(0), }; static struct pca953x_platform_data pca953x_75_gpio_data = { .gpio_base = PCA9539_75_GPIO_BASE, .irq_base = -1, }; static struct pca953x_platform_data pca953x_76_gpio_data = { .gpio_base = PCA9539_76_GPIO_BASE, .irq_base = -1, }; static struct pca953x_platform_data pca953x_77_gpio_data = { .gpio_base = PCA9539_77_GPIO_BASE, .irq_base = -1, }; /************************************************************************* * I2C Bus *************************************************************************/ static struct i2c_board_info vision_i2c_info[] __initdata = { { I2C_BOARD_INFO("isl1208", 0x6f), .irq = IRQ_EP93XX_EXT1, }, { I2C_BOARD_INFO("pca9539", 0x74), .platform_data = &pca953x_74_gpio_data, }, { I2C_BOARD_INFO("pca9539", 0x75), .platform_data = &pca953x_75_gpio_data, }, { I2C_BOARD_INFO("pca9539", 0x76), .platform_data = &pca953x_76_gpio_data, }, { I2C_BOARD_INFO("pca9539", 0x77), .platform_data = &pca953x_77_gpio_data, }, }; /************************************************************************* * SPI CS4271 Audio Codec *************************************************************************/ static struct cs4271_platform_data vision_cs4271_data = { .gpio_nreset = EP93XX_GPIO_LINE_H(2), }; /************************************************************************* * SPI Flash *************************************************************************/ static struct mtd_partition vision_spi_flash_partitions[] = { { .name = "SPI bootstrap", .offset = 0, .size = SZ_4K, }, { .name = "Bootstrap config", .offset = MTDPART_OFS_APPEND, .size = SZ_4K, }, { .name = "System config", .offset = MTDPART_OFS_APPEND, .size = MTDPART_SIZ_FULL, }, }; static struct flash_platform_data vision_spi_flash_data = { .name = "SPI Flash", .parts = vision_spi_flash_partitions, .nr_parts = ARRAY_SIZE(vision_spi_flash_partitions), }; /************************************************************************* * SPI SD/MMC host *************************************************************************/ static struct mmc_spi_platform_data vision_spi_mmc_data = { .detect_delay = 100, .powerup_msecs = 100, .ocr_mask = MMC_VDD_32_33 | MMC_VDD_33_34, .caps2 = MMC_CAP2_RO_ACTIVE_HIGH, }; static struct gpiod_lookup_table vision_spi_mmc_gpio_table = { .dev_id = "mmc_spi.2", /* "mmc_spi @ CS2 */ .table = { /* Card detect */ GPIO_LOOKUP_IDX("B", 7, NULL, 0, GPIO_ACTIVE_LOW), /* Write protect */ GPIO_LOOKUP_IDX("F", 0, NULL, 1, GPIO_ACTIVE_HIGH), { }, }, }; /************************************************************************* * SPI Bus *************************************************************************/ static struct spi_board_info vision_spi_board_info[] __initdata = { { .modalias = "cs4271", .platform_data = &vision_cs4271_data, .max_speed_hz = 6000000, .bus_num = 0, .chip_select = 0, .mode = SPI_MODE_3, }, { .modalias = "sst25l", .platform_data = &vision_spi_flash_data, .max_speed_hz = 20000000, .bus_num = 0, .chip_select = 1, .mode = SPI_MODE_3, }, { .modalias = "mmc_spi", .platform_data = &vision_spi_mmc_data, .max_speed_hz = 20000000, .bus_num = 0, .chip_select = 2, .mode = SPI_MODE_3, }, }; static struct gpiod_lookup_table vision_spi_cs_gpio_table = { .dev_id = "spi0", .table = { GPIO_LOOKUP_IDX("A", 6, "cs", 0, GPIO_ACTIVE_LOW), GPIO_LOOKUP_IDX("A", 7, "cs", 1, GPIO_ACTIVE_LOW), GPIO_LOOKUP_IDX("G", 2, "cs", 2, GPIO_ACTIVE_LOW), { }, }, }; static struct ep93xx_spi_info vision_spi_master __initdata = { .use_dma = 1, }; /************************************************************************* * I2S Audio *************************************************************************/ static struct platform_device vision_audio_device = { .name = "edb93xx-audio", .id = -1, }; static void __init vision_register_i2s(void) { ep93xx_register_i2s(); platform_device_register(&vision_audio_device); } /************************************************************************* * Machine Initialization *************************************************************************/ static void __init vision_init_machine(void) { ep93xx_init_devices(); ep93xx_register_flash(2, EP93XX_CS6_PHYS_BASE, SZ_64M); ep93xx_register_eth(&vision_eth_data, 1); ep93xx_register_fb(&ep93xxfb_info); ep93xx_register_pwm(1, 0); /* * Request the gpio expander's interrupt gpio line now to prevent * the kernel from doing a WARN in gpiolib:gpio_ensure_requested(). */ if (gpio_request_one(EP93XX_GPIO_LINE_F(7), GPIOF_DIR_IN, "pca9539:74")) pr_warn("cannot request interrupt gpio for pca9539:74\n"); vision_i2c_info[1].irq = gpio_to_irq(EP93XX_GPIO_LINE_F(7)); ep93xx_register_i2c(vision_i2c_info, ARRAY_SIZE(vision_i2c_info)); gpiod_add_lookup_table(&vision_spi_mmc_gpio_table); gpiod_add_lookup_table(&vision_spi_cs_gpio_table); ep93xx_register_spi(&vision_spi_master, vision_spi_board_info, ARRAY_SIZE(vision_spi_board_info)); vision_register_i2s(); } MACHINE_START(VISION_EP9307, "Vision Engraving Systems EP9307") /* Maintainer: H Hartley Sweeten <[email protected]> */ .atag_offset = 0x100, .nr_irqs = NR_EP93XX_IRQS + EP93XX_BOARD_IRQS, .map_io = vision_map_io, .init_irq = ep93xx_init_irq, .init_time = ep93xx_timer_init, .init_machine = vision_init_machine, .restart = ep93xx_restart, MACHINE_END
linux-master
arch/arm/mach-ep93xx/vision_ep9307.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2002 ARM Ltd. * All Rights Reserved * Copyright (c) 2010, Code Aurora Forum. All rights reserved. * Copyright (c) 2014 The Linux Foundation. All rights reserved. */ #include <linux/init.h> #include <linux/errno.h> #include <linux/delay.h> #include <linux/device.h> #include <linux/of.h> #include <linux/of_address.h> #include <linux/smp.h> #include <linux/io.h> #include <linux/firmware/qcom/qcom_scm.h> #include <asm/smp_plat.h> #define VDD_SC1_ARRAY_CLAMP_GFS_CTL 0x35a0 #define SCSS_CPU1CORE_RESET 0x2d80 #define SCSS_DBG_STATUS_CORE_PWRDUP 0x2e64 #define APCS_CPU_PWR_CTL 0x04 #define PLL_CLAMP BIT(8) #define CORE_PWRD_UP BIT(7) #define COREPOR_RST BIT(5) #define CORE_RST BIT(4) #define L2DT_SLP BIT(3) #define CORE_MEM_CLAMP BIT(1) #define CLAMP BIT(0) #define APC_PWR_GATE_CTL 0x14 #define BHS_CNT_SHIFT 24 #define LDO_PWR_DWN_SHIFT 16 #define LDO_BYP_SHIFT 8 #define BHS_SEG_SHIFT 1 #define BHS_EN BIT(0) #define APCS_SAW2_VCTL 0x14 #define APCS_SAW2_2_VCTL 0x1c extern void secondary_startup_arm(void); #ifdef CONFIG_HOTPLUG_CPU static void qcom_cpu_die(unsigned int cpu) { wfi(); } #endif static int scss_release_secondary(unsigned int cpu) { struct device_node *node; void __iomem *base; node = of_find_compatible_node(NULL, NULL, "qcom,gcc-msm8660"); if (!node) { pr_err("%s: can't find node\n", __func__); return -ENXIO; } base = of_iomap(node, 0); of_node_put(node); if (!base) return -ENOMEM; writel_relaxed(0, base + VDD_SC1_ARRAY_CLAMP_GFS_CTL); writel_relaxed(0, base + SCSS_CPU1CORE_RESET); writel_relaxed(3, base + SCSS_DBG_STATUS_CORE_PWRDUP); mb(); iounmap(base); return 0; } static int cortex_a7_release_secondary(unsigned int cpu) { int ret = 0; void __iomem *reg; struct device_node *cpu_node, *acc_node; u32 reg_val; cpu_node = of_get_cpu_node(cpu, NULL); if (!cpu_node) return -ENODEV; acc_node = of_parse_phandle(cpu_node, "qcom,acc", 0); if (!acc_node) { ret = -ENODEV; goto out_acc; } reg = of_iomap(acc_node, 0); if (!reg) { ret = -ENOMEM; goto out_acc_map; } /* Put the CPU into reset. */ reg_val = CORE_RST | COREPOR_RST | CLAMP | CORE_MEM_CLAMP; writel(reg_val, reg + APCS_CPU_PWR_CTL); /* Turn on the BHS and set the BHS_CNT to 16 XO clock cycles */ writel(BHS_EN | (0x10 << BHS_CNT_SHIFT), reg + APC_PWR_GATE_CTL); /* Wait for the BHS to settle */ udelay(2); reg_val &= ~CORE_MEM_CLAMP; writel(reg_val, reg + APCS_CPU_PWR_CTL); reg_val |= L2DT_SLP; writel(reg_val, reg + APCS_CPU_PWR_CTL); udelay(2); reg_val = (reg_val | BIT(17)) & ~CLAMP; writel(reg_val, reg + APCS_CPU_PWR_CTL); udelay(2); /* Release CPU out of reset and bring it to life. */ reg_val &= ~(CORE_RST | COREPOR_RST); writel(reg_val, reg + APCS_CPU_PWR_CTL); reg_val |= CORE_PWRD_UP; writel(reg_val, reg + APCS_CPU_PWR_CTL); iounmap(reg); out_acc_map: of_node_put(acc_node); out_acc: of_node_put(cpu_node); return ret; } static int kpssv1_release_secondary(unsigned int cpu) { int ret = 0; void __iomem *reg, *saw_reg; struct device_node *cpu_node, *acc_node, *saw_node; u32 val; cpu_node = of_get_cpu_node(cpu, NULL); if (!cpu_node) return -ENODEV; acc_node = of_parse_phandle(cpu_node, "qcom,acc", 0); if (!acc_node) { ret = -ENODEV; goto out_acc; } saw_node = of_parse_phandle(cpu_node, "qcom,saw", 0); if (!saw_node) { ret = -ENODEV; goto out_saw; } reg = of_iomap(acc_node, 0); if (!reg) { ret = -ENOMEM; goto out_acc_map; } saw_reg = of_iomap(saw_node, 0); if (!saw_reg) { ret = -ENOMEM; goto out_saw_map; } /* Turn on CPU rail */ writel_relaxed(0xA4, saw_reg + APCS_SAW2_VCTL); mb(); udelay(512); /* Krait bring-up sequence */ val = PLL_CLAMP | L2DT_SLP | CLAMP; writel_relaxed(val, reg + APCS_CPU_PWR_CTL); val &= ~L2DT_SLP; writel_relaxed(val, reg + APCS_CPU_PWR_CTL); mb(); ndelay(300); val |= COREPOR_RST; writel_relaxed(val, reg + APCS_CPU_PWR_CTL); mb(); udelay(2); val &= ~CLAMP; writel_relaxed(val, reg + APCS_CPU_PWR_CTL); mb(); udelay(2); val &= ~COREPOR_RST; writel_relaxed(val, reg + APCS_CPU_PWR_CTL); mb(); udelay(100); val |= CORE_PWRD_UP; writel_relaxed(val, reg + APCS_CPU_PWR_CTL); mb(); iounmap(saw_reg); out_saw_map: iounmap(reg); out_acc_map: of_node_put(saw_node); out_saw: of_node_put(acc_node); out_acc: of_node_put(cpu_node); return ret; } static int kpssv2_release_secondary(unsigned int cpu) { void __iomem *reg; struct device_node *cpu_node, *l2_node, *acc_node, *saw_node; void __iomem *l2_saw_base; unsigned reg_val; int ret; cpu_node = of_get_cpu_node(cpu, NULL); if (!cpu_node) return -ENODEV; acc_node = of_parse_phandle(cpu_node, "qcom,acc", 0); if (!acc_node) { ret = -ENODEV; goto out_acc; } l2_node = of_parse_phandle(cpu_node, "next-level-cache", 0); if (!l2_node) { ret = -ENODEV; goto out_l2; } saw_node = of_parse_phandle(l2_node, "qcom,saw", 0); if (!saw_node) { ret = -ENODEV; goto out_saw; } reg = of_iomap(acc_node, 0); if (!reg) { ret = -ENOMEM; goto out_map; } l2_saw_base = of_iomap(saw_node, 0); if (!l2_saw_base) { ret = -ENOMEM; goto out_saw_map; } /* Turn on the BHS, turn off LDO Bypass and power down LDO */ reg_val = (64 << BHS_CNT_SHIFT) | (0x3f << LDO_PWR_DWN_SHIFT) | BHS_EN; writel_relaxed(reg_val, reg + APC_PWR_GATE_CTL); mb(); /* wait for the BHS to settle */ udelay(1); /* Turn on BHS segments */ reg_val |= 0x3f << BHS_SEG_SHIFT; writel_relaxed(reg_val, reg + APC_PWR_GATE_CTL); mb(); /* wait for the BHS to settle */ udelay(1); /* Finally turn on the bypass so that BHS supplies power */ reg_val |= 0x3f << LDO_BYP_SHIFT; writel_relaxed(reg_val, reg + APC_PWR_GATE_CTL); /* enable max phases */ writel_relaxed(0x10003, l2_saw_base + APCS_SAW2_2_VCTL); mb(); udelay(50); reg_val = COREPOR_RST | CLAMP; writel_relaxed(reg_val, reg + APCS_CPU_PWR_CTL); mb(); udelay(2); reg_val &= ~CLAMP; writel_relaxed(reg_val, reg + APCS_CPU_PWR_CTL); mb(); udelay(2); reg_val &= ~COREPOR_RST; writel_relaxed(reg_val, reg + APCS_CPU_PWR_CTL); mb(); reg_val |= CORE_PWRD_UP; writel_relaxed(reg_val, reg + APCS_CPU_PWR_CTL); mb(); ret = 0; iounmap(l2_saw_base); out_saw_map: iounmap(reg); out_map: of_node_put(saw_node); out_saw: of_node_put(l2_node); out_l2: of_node_put(acc_node); out_acc: of_node_put(cpu_node); return ret; } static DEFINE_PER_CPU(int, cold_boot_done); static int qcom_boot_secondary(unsigned int cpu, int (*func)(unsigned int)) { int ret = 0; if (!per_cpu(cold_boot_done, cpu)) { ret = func(cpu); if (!ret) per_cpu(cold_boot_done, cpu) = true; } /* * Send the secondary CPU a soft interrupt, thereby causing * the boot monitor to read the system wide flags register, * and branch to the address found there. */ arch_send_wakeup_ipi_mask(cpumask_of(cpu)); return ret; } static int msm8660_boot_secondary(unsigned int cpu, struct task_struct *idle) { return qcom_boot_secondary(cpu, scss_release_secondary); } static int cortex_a7_boot_secondary(unsigned int cpu, struct task_struct *idle) { return qcom_boot_secondary(cpu, cortex_a7_release_secondary); } static int kpssv1_boot_secondary(unsigned int cpu, struct task_struct *idle) { return qcom_boot_secondary(cpu, kpssv1_release_secondary); } static int kpssv2_boot_secondary(unsigned int cpu, struct task_struct *idle) { return qcom_boot_secondary(cpu, kpssv2_release_secondary); } static void __init qcom_smp_prepare_cpus(unsigned int max_cpus) { int cpu; if (qcom_scm_set_cold_boot_addr(secondary_startup_arm)) { for_each_present_cpu(cpu) { if (cpu == smp_processor_id()) continue; set_cpu_present(cpu, false); } pr_warn("Failed to set CPU boot address, disabling SMP\n"); } } static const struct smp_operations smp_msm8660_ops __initconst = { .smp_prepare_cpus = qcom_smp_prepare_cpus, .smp_boot_secondary = msm8660_boot_secondary, #ifdef CONFIG_HOTPLUG_CPU .cpu_die = qcom_cpu_die, #endif }; CPU_METHOD_OF_DECLARE(qcom_smp, "qcom,gcc-msm8660", &smp_msm8660_ops); static const struct smp_operations qcom_smp_cortex_a7_ops __initconst = { .smp_prepare_cpus = qcom_smp_prepare_cpus, .smp_boot_secondary = cortex_a7_boot_secondary, #ifdef CONFIG_HOTPLUG_CPU .cpu_die = qcom_cpu_die, #endif }; CPU_METHOD_OF_DECLARE(qcom_smp_msm8226, "qcom,msm8226-smp", &qcom_smp_cortex_a7_ops); CPU_METHOD_OF_DECLARE(qcom_smp_msm8909, "qcom,msm8909-smp", &qcom_smp_cortex_a7_ops); CPU_METHOD_OF_DECLARE(qcom_smp_msm8916, "qcom,msm8916-smp", &qcom_smp_cortex_a7_ops); static const struct smp_operations qcom_smp_kpssv1_ops __initconst = { .smp_prepare_cpus = qcom_smp_prepare_cpus, .smp_boot_secondary = kpssv1_boot_secondary, #ifdef CONFIG_HOTPLUG_CPU .cpu_die = qcom_cpu_die, #endif }; CPU_METHOD_OF_DECLARE(qcom_smp_kpssv1, "qcom,kpss-acc-v1", &qcom_smp_kpssv1_ops); static const struct smp_operations qcom_smp_kpssv2_ops __initconst = { .smp_prepare_cpus = qcom_smp_prepare_cpus, .smp_boot_secondary = kpssv2_boot_secondary, #ifdef CONFIG_HOTPLUG_CPU .cpu_die = qcom_cpu_die, #endif }; CPU_METHOD_OF_DECLARE(qcom_smp_kpssv2, "qcom,kpss-acc-v2", &qcom_smp_kpssv2_ops);
linux-master
arch/arm/mach-qcom/platsmp.c
/****************************************************************************** * grant_table.c * ARM specific part * * Granting foreign access to our memory reservation. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include <xen/interface/xen.h> #include <xen/page.h> #include <xen/grant_table.h> int arch_gnttab_map_shared(xen_pfn_t *frames, unsigned long nr_gframes, unsigned long max_nr_gframes, void **__shared) { return -ENOSYS; } void arch_gnttab_unmap(void *shared, unsigned long nr_gframes) { return; } int arch_gnttab_map_status(uint64_t *frames, unsigned long nr_gframes, unsigned long max_nr_gframes, grant_status_t **__shared) { return -ENOSYS; } int arch_gnttab_init(unsigned long nr_shared, unsigned long nr_status) { return 0; }
linux-master
arch/arm/xen/grant-table.c
// SPDX-License-Identifier: GPL-2.0-only #include <linux/memblock.h> #include <linux/gfp.h> #include <linux/export.h> #include <linux/spinlock.h> #include <linux/slab.h> #include <linux/types.h> #include <linux/dma-mapping.h> #include <linux/vmalloc.h> #include <linux/swiotlb.h> #include <xen/xen.h> #include <xen/interface/memory.h> #include <xen/grant_table.h> #include <xen/page.h> #include <xen/swiotlb-xen.h> #include <asm/cacheflush.h> #include <asm/xen/hypercall.h> #include <asm/xen/interface.h> struct xen_p2m_entry { unsigned long pfn; unsigned long mfn; unsigned long nr_pages; struct rb_node rbnode_phys; }; static rwlock_t p2m_lock; struct rb_root phys_to_mach = RB_ROOT; EXPORT_SYMBOL_GPL(phys_to_mach); static int xen_add_phys_to_mach_entry(struct xen_p2m_entry *new) { struct rb_node **link = &phys_to_mach.rb_node; struct rb_node *parent = NULL; struct xen_p2m_entry *entry; int rc = 0; while (*link) { parent = *link; entry = rb_entry(parent, struct xen_p2m_entry, rbnode_phys); if (new->pfn == entry->pfn) goto err_out; if (new->pfn < entry->pfn) link = &(*link)->rb_left; else link = &(*link)->rb_right; } rb_link_node(&new->rbnode_phys, parent, link); rb_insert_color(&new->rbnode_phys, &phys_to_mach); goto out; err_out: rc = -EINVAL; pr_warn("%s: cannot add pfn=%pa -> mfn=%pa: pfn=%pa -> mfn=%pa already exists\n", __func__, &new->pfn, &new->mfn, &entry->pfn, &entry->mfn); out: return rc; } unsigned long __pfn_to_mfn(unsigned long pfn) { struct rb_node *n; struct xen_p2m_entry *entry; unsigned long irqflags; read_lock_irqsave(&p2m_lock, irqflags); n = phys_to_mach.rb_node; while (n) { entry = rb_entry(n, struct xen_p2m_entry, rbnode_phys); if (entry->pfn <= pfn && entry->pfn + entry->nr_pages > pfn) { unsigned long mfn = entry->mfn + (pfn - entry->pfn); read_unlock_irqrestore(&p2m_lock, irqflags); return mfn; } if (pfn < entry->pfn) n = n->rb_left; else n = n->rb_right; } read_unlock_irqrestore(&p2m_lock, irqflags); return INVALID_P2M_ENTRY; } EXPORT_SYMBOL_GPL(__pfn_to_mfn); int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops, struct gnttab_map_grant_ref *kmap_ops, struct page **pages, unsigned int count) { int i; for (i = 0; i < count; i++) { struct gnttab_unmap_grant_ref unmap; int rc; if (map_ops[i].status) continue; if (likely(set_phys_to_machine(map_ops[i].host_addr >> XEN_PAGE_SHIFT, map_ops[i].dev_bus_addr >> XEN_PAGE_SHIFT))) continue; /* * Signal an error for this slot. This in turn requires * immediate unmapping. */ map_ops[i].status = GNTST_general_error; unmap.host_addr = map_ops[i].host_addr, unmap.handle = map_ops[i].handle; map_ops[i].handle = INVALID_GRANT_HANDLE; if (map_ops[i].flags & GNTMAP_device_map) unmap.dev_bus_addr = map_ops[i].dev_bus_addr; else unmap.dev_bus_addr = 0; /* * Pre-populate the status field, to be recognizable in * the log message below. */ unmap.status = 1; rc = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &unmap, 1); if (rc || unmap.status != GNTST_okay) pr_err_once("gnttab unmap failed: rc=%d st=%d\n", rc, unmap.status); } return 0; } int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops, struct gnttab_unmap_grant_ref *kunmap_ops, struct page **pages, unsigned int count) { int i; for (i = 0; i < count; i++) { set_phys_to_machine(unmap_ops[i].host_addr >> XEN_PAGE_SHIFT, INVALID_P2M_ENTRY); } return 0; } bool __set_phys_to_machine_multi(unsigned long pfn, unsigned long mfn, unsigned long nr_pages) { int rc; unsigned long irqflags; struct xen_p2m_entry *p2m_entry; struct rb_node *n; if (mfn == INVALID_P2M_ENTRY) { write_lock_irqsave(&p2m_lock, irqflags); n = phys_to_mach.rb_node; while (n) { p2m_entry = rb_entry(n, struct xen_p2m_entry, rbnode_phys); if (p2m_entry->pfn <= pfn && p2m_entry->pfn + p2m_entry->nr_pages > pfn) { rb_erase(&p2m_entry->rbnode_phys, &phys_to_mach); write_unlock_irqrestore(&p2m_lock, irqflags); kfree(p2m_entry); return true; } if (pfn < p2m_entry->pfn) n = n->rb_left; else n = n->rb_right; } write_unlock_irqrestore(&p2m_lock, irqflags); return true; } p2m_entry = kzalloc(sizeof(*p2m_entry), GFP_NOWAIT); if (!p2m_entry) return false; p2m_entry->pfn = pfn; p2m_entry->nr_pages = nr_pages; p2m_entry->mfn = mfn; write_lock_irqsave(&p2m_lock, irqflags); rc = xen_add_phys_to_mach_entry(p2m_entry); if (rc < 0) { write_unlock_irqrestore(&p2m_lock, irqflags); kfree(p2m_entry); return false; } write_unlock_irqrestore(&p2m_lock, irqflags); return true; } EXPORT_SYMBOL_GPL(__set_phys_to_machine_multi); bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn) { return __set_phys_to_machine_multi(pfn, mfn, 1); } EXPORT_SYMBOL_GPL(__set_phys_to_machine); static int p2m_init(void) { rwlock_init(&p2m_lock); return 0; } arch_initcall(p2m_init);
linux-master
arch/arm/xen/p2m.c
// SPDX-License-Identifier: GPL-2.0-only #include <linux/cpu.h> #include <linux/dma-direct.h> #include <linux/dma-map-ops.h> #include <linux/gfp.h> #include <linux/highmem.h> #include <linux/export.h> #include <linux/memblock.h> #include <linux/of_address.h> #include <linux/slab.h> #include <linux/types.h> #include <linux/vmalloc.h> #include <linux/swiotlb.h> #include <xen/xen.h> #include <xen/interface/grant_table.h> #include <xen/interface/memory.h> #include <xen/page.h> #include <xen/xen-ops.h> #include <xen/swiotlb-xen.h> #include <asm/cacheflush.h> #include <asm/xen/hypercall.h> #include <asm/xen/interface.h> static gfp_t xen_swiotlb_gfp(void) { phys_addr_t base; u64 i; for_each_mem_range(i, &base, NULL) { if (base < (phys_addr_t)0xffffffff) { if (IS_ENABLED(CONFIG_ZONE_DMA32)) return __GFP_DMA32; return __GFP_DMA; } } return GFP_KERNEL; } static bool hypercall_cflush = false; /* buffers in highmem or foreign pages cannot cross page boundaries */ static void dma_cache_maint(struct device *dev, dma_addr_t handle, size_t size, u32 op) { struct gnttab_cache_flush cflush; cflush.offset = xen_offset_in_page(handle); cflush.op = op; handle &= XEN_PAGE_MASK; do { cflush.a.dev_bus_addr = dma_to_phys(dev, handle); if (size + cflush.offset > XEN_PAGE_SIZE) cflush.length = XEN_PAGE_SIZE - cflush.offset; else cflush.length = size; HYPERVISOR_grant_table_op(GNTTABOP_cache_flush, &cflush, 1); cflush.offset = 0; handle += cflush.length; size -= cflush.length; } while (size); } /* * Dom0 is mapped 1:1, and while the Linux page can span across multiple Xen * pages, it is not possible for it to contain a mix of local and foreign Xen * pages. Calling pfn_valid on a foreign mfn will always return false, so if * pfn_valid returns true the pages is local and we can use the native * dma-direct functions, otherwise we call the Xen specific version. */ void xen_dma_sync_for_cpu(struct device *dev, dma_addr_t handle, size_t size, enum dma_data_direction dir) { if (dir != DMA_TO_DEVICE) dma_cache_maint(dev, handle, size, GNTTAB_CACHE_INVAL); } void xen_dma_sync_for_device(struct device *dev, dma_addr_t handle, size_t size, enum dma_data_direction dir) { if (dir == DMA_FROM_DEVICE) dma_cache_maint(dev, handle, size, GNTTAB_CACHE_INVAL); else dma_cache_maint(dev, handle, size, GNTTAB_CACHE_CLEAN); } bool xen_arch_need_swiotlb(struct device *dev, phys_addr_t phys, dma_addr_t dev_addr) { unsigned int xen_pfn = XEN_PFN_DOWN(phys); unsigned int bfn = XEN_PFN_DOWN(dma_to_phys(dev, dev_addr)); /* * The swiotlb buffer should be used if * - Xen doesn't have the cache flush hypercall * - The Linux page refers to foreign memory * - The device doesn't support coherent DMA request * * The Linux page may be spanned acrros multiple Xen page, although * it's not possible to have a mix of local and foreign Xen page. * Furthermore, range_straddles_page_boundary is already checking * if buffer is physically contiguous in the host RAM. * * Therefore we only need to check the first Xen page to know if we * require a bounce buffer because the device doesn't support coherent * memory and we are not able to flush the cache. */ return (!hypercall_cflush && (xen_pfn != bfn) && !dev_is_dma_coherent(dev)); } static int __init xen_mm_init(void) { struct gnttab_cache_flush cflush; int rc; if (!xen_swiotlb_detect()) return 0; /* we can work with the default swiotlb */ rc = swiotlb_init_late(swiotlb_size_or_default(), xen_swiotlb_gfp(), NULL); if (rc < 0) return rc; cflush.op = 0; cflush.a.dev_bus_addr = 0; cflush.offset = 0; cflush.length = 0; if (HYPERVISOR_grant_table_op(GNTTABOP_cache_flush, &cflush, 1) != -ENOSYS) hypercall_cflush = true; return 0; } arch_initcall(xen_mm_init);
linux-master
arch/arm/xen/mm.c
// SPDX-License-Identifier: GPL-2.0-only #include <xen/xen.h> #include <xen/events.h> #include <xen/grant_table.h> #include <xen/hvm.h> #include <xen/interface/vcpu.h> #include <xen/interface/xen.h> #include <xen/interface/memory.h> #include <xen/interface/hvm/params.h> #include <xen/features.h> #include <xen/platform_pci.h> #include <xen/xenbus.h> #include <xen/page.h> #include <xen/interface/sched.h> #include <xen/xen-ops.h> #include <asm/xen/hypervisor.h> #include <asm/xen/hypercall.h> #include <asm/system_misc.h> #include <asm/efi.h> #include <linux/interrupt.h> #include <linux/irqreturn.h> #include <linux/module.h> #include <linux/of.h> #include <linux/of_fdt.h> #include <linux/of_irq.h> #include <linux/of_address.h> #include <linux/cpuidle.h> #include <linux/cpufreq.h> #include <linux/cpu.h> #include <linux/console.h> #include <linux/pvclock_gtod.h> #include <linux/reboot.h> #include <linux/time64.h> #include <linux/timekeeping.h> #include <linux/timekeeper_internal.h> #include <linux/acpi.h> #include <linux/virtio_anchor.h> #include <linux/mm.h> static struct start_info _xen_start_info; struct start_info *xen_start_info = &_xen_start_info; EXPORT_SYMBOL(xen_start_info); enum xen_domain_type xen_domain_type = XEN_NATIVE; EXPORT_SYMBOL(xen_domain_type); struct shared_info xen_dummy_shared_info; struct shared_info *HYPERVISOR_shared_info = (void *)&xen_dummy_shared_info; DEFINE_PER_CPU(struct vcpu_info *, xen_vcpu); static struct vcpu_info __percpu *xen_vcpu_info; /* Linux <-> Xen vCPU id mapping */ DEFINE_PER_CPU(uint32_t, xen_vcpu_id); EXPORT_PER_CPU_SYMBOL(xen_vcpu_id); /* These are unused until we support booting "pre-ballooned" */ unsigned long xen_released_pages; struct xen_memory_region xen_extra_mem[XEN_EXTRA_MEM_MAX_REGIONS] __initdata; static __read_mostly unsigned int xen_events_irq; static __read_mostly phys_addr_t xen_grant_frames; #define GRANT_TABLE_INDEX 0 #define EXT_REGION_INDEX 1 uint32_t xen_start_flags; EXPORT_SYMBOL(xen_start_flags); int xen_unmap_domain_gfn_range(struct vm_area_struct *vma, int nr, struct page **pages) { return xen_xlate_unmap_gfn_range(vma, nr, pages); } EXPORT_SYMBOL_GPL(xen_unmap_domain_gfn_range); static void xen_read_wallclock(struct timespec64 *ts) { u32 version; struct timespec64 now, ts_monotonic; struct shared_info *s = HYPERVISOR_shared_info; struct pvclock_wall_clock *wall_clock = &(s->wc); /* get wallclock at system boot */ do { version = wall_clock->version; rmb(); /* fetch version before time */ now.tv_sec = ((uint64_t)wall_clock->sec_hi << 32) | wall_clock->sec; now.tv_nsec = wall_clock->nsec; rmb(); /* fetch time before checking version */ } while ((wall_clock->version & 1) || (version != wall_clock->version)); /* time since system boot */ ktime_get_ts64(&ts_monotonic); *ts = timespec64_add(now, ts_monotonic); } static int xen_pvclock_gtod_notify(struct notifier_block *nb, unsigned long was_set, void *priv) { /* Protected by the calling core code serialization */ static struct timespec64 next_sync; struct xen_platform_op op; struct timespec64 now, system_time; struct timekeeper *tk = priv; now.tv_sec = tk->xtime_sec; now.tv_nsec = (long)(tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift); system_time = timespec64_add(now, tk->wall_to_monotonic); /* * We only take the expensive HV call when the clock was set * or when the 11 minutes RTC synchronization time elapsed. */ if (!was_set && timespec64_compare(&now, &next_sync) < 0) return NOTIFY_OK; op.cmd = XENPF_settime64; op.u.settime64.mbz = 0; op.u.settime64.secs = now.tv_sec; op.u.settime64.nsecs = now.tv_nsec; op.u.settime64.system_time = timespec64_to_ns(&system_time); (void)HYPERVISOR_platform_op(&op); /* * Move the next drift compensation time 11 minutes * ahead. That's emulating the sync_cmos_clock() update for * the hardware RTC. */ next_sync = now; next_sync.tv_sec += 11 * 60; return NOTIFY_OK; } static struct notifier_block xen_pvclock_gtod_notifier = { .notifier_call = xen_pvclock_gtod_notify, }; static int xen_starting_cpu(unsigned int cpu) { struct vcpu_register_vcpu_info info; struct vcpu_info *vcpup; int err; /* * VCPUOP_register_vcpu_info cannot be called twice for the same * vcpu, so if vcpu_info is already registered, just get out. This * can happen with cpu-hotplug. */ if (per_cpu(xen_vcpu, cpu) != NULL) goto after_register_vcpu_info; pr_info("Xen: initializing cpu%d\n", cpu); vcpup = per_cpu_ptr(xen_vcpu_info, cpu); info.mfn = percpu_to_gfn(vcpup); info.offset = xen_offset_in_page(vcpup); err = HYPERVISOR_vcpu_op(VCPUOP_register_vcpu_info, xen_vcpu_nr(cpu), &info); BUG_ON(err); per_cpu(xen_vcpu, cpu) = vcpup; if (!xen_kernel_unmapped_at_usr()) xen_setup_runstate_info(cpu); after_register_vcpu_info: enable_percpu_irq(xen_events_irq, 0); return 0; } static int xen_dying_cpu(unsigned int cpu) { disable_percpu_irq(xen_events_irq); return 0; } void xen_reboot(int reason) { struct sched_shutdown r = { .reason = reason }; int rc; rc = HYPERVISOR_sched_op(SCHEDOP_shutdown, &r); BUG_ON(rc); } static int xen_restart(struct notifier_block *nb, unsigned long action, void *data) { xen_reboot(SHUTDOWN_reboot); return NOTIFY_DONE; } static struct notifier_block xen_restart_nb = { .notifier_call = xen_restart, .priority = 192, }; static void xen_power_off(void) { xen_reboot(SHUTDOWN_poweroff); } static irqreturn_t xen_arm_callback(int irq, void *arg) { xen_evtchn_do_upcall(); return IRQ_HANDLED; } static __initdata struct { const char *compat; const char *prefix; const char *version; bool found; } hyper_node = {"xen,xen", "xen,xen-", NULL, false}; static int __init fdt_find_hyper_node(unsigned long node, const char *uname, int depth, void *data) { const void *s = NULL; int len; if (depth != 1 || strcmp(uname, "hypervisor") != 0) return 0; if (of_flat_dt_is_compatible(node, hyper_node.compat)) hyper_node.found = true; s = of_get_flat_dt_prop(node, "compatible", &len); if (strlen(hyper_node.prefix) + 3 < len && !strncmp(hyper_node.prefix, s, strlen(hyper_node.prefix))) hyper_node.version = s + strlen(hyper_node.prefix); /* * Check if Xen supports EFI by checking whether there is the * "/hypervisor/uefi" node in DT. If so, runtime services are available * through proxy functions (e.g. in case of Xen dom0 EFI implementation * they call special hypercall which executes relevant EFI functions) * and that is why they are always enabled. */ if (IS_ENABLED(CONFIG_XEN_EFI)) { if ((of_get_flat_dt_subnode_by_name(node, "uefi") > 0) && !efi_runtime_disabled()) set_bit(EFI_RUNTIME_SERVICES, &efi.flags); } return 0; } /* * see Documentation/devicetree/bindings/arm/xen.txt for the * documentation of the Xen Device Tree format. */ void __init xen_early_init(void) { of_scan_flat_dt(fdt_find_hyper_node, NULL); if (!hyper_node.found) { pr_debug("No Xen support\n"); return; } if (hyper_node.version == NULL) { pr_debug("Xen version not found\n"); return; } pr_info("Xen %s support found\n", hyper_node.version); xen_domain_type = XEN_HVM_DOMAIN; xen_setup_features(); if (xen_feature(XENFEAT_dom0)) xen_start_flags |= SIF_INITDOMAIN|SIF_PRIVILEGED; if (!console_set_on_cmdline && !xen_initial_domain()) add_preferred_console("hvc", 0, NULL); } static void __init xen_acpi_guest_init(void) { #ifdef CONFIG_ACPI struct xen_hvm_param a; int interrupt, trigger, polarity; a.domid = DOMID_SELF; a.index = HVM_PARAM_CALLBACK_IRQ; if (HYPERVISOR_hvm_op(HVMOP_get_param, &a) || (a.value >> 56) != HVM_PARAM_CALLBACK_TYPE_PPI) { xen_events_irq = 0; return; } interrupt = a.value & 0xff; trigger = ((a.value >> 8) & 0x1) ? ACPI_EDGE_SENSITIVE : ACPI_LEVEL_SENSITIVE; polarity = ((a.value >> 8) & 0x2) ? ACPI_ACTIVE_LOW : ACPI_ACTIVE_HIGH; xen_events_irq = acpi_register_gsi(NULL, interrupt, trigger, polarity); #endif } #ifdef CONFIG_XEN_UNPOPULATED_ALLOC /* * A type-less specific Xen resource which contains extended regions * (unused regions of guest physical address space provided by the hypervisor). */ static struct resource xen_resource = { .name = "Xen unused space", }; int __init arch_xen_unpopulated_init(struct resource **res) { struct device_node *np; struct resource *regs, *tmp_res; uint64_t min_gpaddr = -1, max_gpaddr = 0; unsigned int i, nr_reg = 0; int rc; if (!xen_domain()) return -ENODEV; if (!acpi_disabled) return -ENODEV; np = of_find_compatible_node(NULL, NULL, "xen,xen"); if (WARN_ON(!np)) return -ENODEV; /* Skip region 0 which is reserved for grant table space */ while (of_get_address(np, nr_reg + EXT_REGION_INDEX, NULL, NULL)) nr_reg++; if (!nr_reg) { pr_err("No extended regions are found\n"); of_node_put(np); return -EINVAL; } regs = kcalloc(nr_reg, sizeof(*regs), GFP_KERNEL); if (!regs) { of_node_put(np); return -ENOMEM; } /* * Create resource from extended regions provided by the hypervisor to be * used as unused address space for Xen scratch pages. */ for (i = 0; i < nr_reg; i++) { rc = of_address_to_resource(np, i + EXT_REGION_INDEX, &regs[i]); if (rc) goto err; if (max_gpaddr < regs[i].end) max_gpaddr = regs[i].end; if (min_gpaddr > regs[i].start) min_gpaddr = regs[i].start; } xen_resource.start = min_gpaddr; xen_resource.end = max_gpaddr; /* * Mark holes between extended regions as unavailable. The rest of that * address space will be available for the allocation. */ for (i = 1; i < nr_reg; i++) { resource_size_t start, end; /* There is an overlap between regions */ if (regs[i - 1].end + 1 > regs[i].start) { rc = -EINVAL; goto err; } /* There is no hole between regions */ if (regs[i - 1].end + 1 == regs[i].start) continue; start = regs[i - 1].end + 1; end = regs[i].start - 1; tmp_res = kzalloc(sizeof(*tmp_res), GFP_KERNEL); if (!tmp_res) { rc = -ENOMEM; goto err; } tmp_res->name = "Unavailable space"; tmp_res->start = start; tmp_res->end = end; rc = insert_resource(&xen_resource, tmp_res); if (rc) { pr_err("Cannot insert resource %pR (%d)\n", tmp_res, rc); kfree(tmp_res); goto err; } } *res = &xen_resource; err: of_node_put(np); kfree(regs); return rc; } #endif static void __init xen_dt_guest_init(void) { struct device_node *xen_node; struct resource res; xen_node = of_find_compatible_node(NULL, NULL, "xen,xen"); if (!xen_node) { pr_err("Xen support was detected before, but it has disappeared\n"); return; } xen_events_irq = irq_of_parse_and_map(xen_node, 0); if (of_address_to_resource(xen_node, GRANT_TABLE_INDEX, &res)) { pr_err("Xen grant table region is not found\n"); of_node_put(xen_node); return; } of_node_put(xen_node); xen_grant_frames = res.start; } static int __init xen_guest_init(void) { struct xen_add_to_physmap xatp; struct shared_info *shared_info_page = NULL; int rc, cpu; if (!xen_domain()) return 0; if (IS_ENABLED(CONFIG_XEN_VIRTIO)) virtio_set_mem_acc_cb(xen_virtio_restricted_mem_acc); if (!acpi_disabled) xen_acpi_guest_init(); else xen_dt_guest_init(); if (!xen_events_irq) { pr_err("Xen event channel interrupt not found\n"); return -ENODEV; } /* * The fdt parsing codes have set EFI_RUNTIME_SERVICES if Xen EFI * parameters are found. Force enable runtime services. */ if (efi_enabled(EFI_RUNTIME_SERVICES)) xen_efi_runtime_setup(); shared_info_page = (struct shared_info *)get_zeroed_page(GFP_KERNEL); if (!shared_info_page) { pr_err("not enough memory\n"); return -ENOMEM; } xatp.domid = DOMID_SELF; xatp.idx = 0; xatp.space = XENMAPSPACE_shared_info; xatp.gpfn = virt_to_gfn(shared_info_page); if (HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp)) BUG(); HYPERVISOR_shared_info = (struct shared_info *)shared_info_page; /* xen_vcpu is a pointer to the vcpu_info struct in the shared_info * page, we use it in the event channel upcall and in some pvclock * related functions. * The shared info contains exactly 1 CPU (the boot CPU). The guest * is required to use VCPUOP_register_vcpu_info to place vcpu info * for secondary CPUs as they are brought up. * For uniformity we use VCPUOP_register_vcpu_info even on cpu0. */ xen_vcpu_info = alloc_percpu(struct vcpu_info); if (xen_vcpu_info == NULL) return -ENOMEM; /* Direct vCPU id mapping for ARM guests. */ for_each_possible_cpu(cpu) per_cpu(xen_vcpu_id, cpu) = cpu; if (!xen_grant_frames) { xen_auto_xlat_grant_frames.count = gnttab_max_grant_frames(); rc = xen_xlate_map_ballooned_pages(&xen_auto_xlat_grant_frames.pfn, &xen_auto_xlat_grant_frames.vaddr, xen_auto_xlat_grant_frames.count); } else rc = gnttab_setup_auto_xlat_frames(xen_grant_frames); if (rc) { free_percpu(xen_vcpu_info); return rc; } gnttab_init(); /* * Making sure board specific code will not set up ops for * cpu idle and cpu freq. */ disable_cpuidle(); disable_cpufreq(); xen_init_IRQ(); if (request_percpu_irq(xen_events_irq, xen_arm_callback, "events", &xen_vcpu)) { pr_err("Error request IRQ %d\n", xen_events_irq); return -EINVAL; } if (!xen_kernel_unmapped_at_usr()) xen_time_setup_guest(); if (xen_initial_domain()) pvclock_gtod_register_notifier(&xen_pvclock_gtod_notifier); return cpuhp_setup_state(CPUHP_AP_ARM_XEN_STARTING, "arm/xen:starting", xen_starting_cpu, xen_dying_cpu); } early_initcall(xen_guest_init); static int __init xen_pm_init(void) { if (!xen_domain()) return -ENODEV; pm_power_off = xen_power_off; register_restart_handler(&xen_restart_nb); if (!xen_initial_domain()) { struct timespec64 ts; xen_read_wallclock(&ts); do_settimeofday64(&ts); } return 0; } late_initcall(xen_pm_init); /* empty stubs */ void xen_arch_pre_suspend(void) { } void xen_arch_post_suspend(int suspend_cancelled) { } void xen_timer_resume(void) { } void xen_arch_resume(void) { } void xen_arch_suspend(void) { } /* In the hypercall.S file. */ EXPORT_SYMBOL_GPL(HYPERVISOR_event_channel_op); EXPORT_SYMBOL_GPL(HYPERVISOR_grant_table_op); EXPORT_SYMBOL_GPL(HYPERVISOR_xen_version); EXPORT_SYMBOL_GPL(HYPERVISOR_console_io); EXPORT_SYMBOL_GPL(HYPERVISOR_sched_op); EXPORT_SYMBOL_GPL(HYPERVISOR_hvm_op); EXPORT_SYMBOL_GPL(HYPERVISOR_memory_op); EXPORT_SYMBOL_GPL(HYPERVISOR_physdev_op); EXPORT_SYMBOL_GPL(HYPERVISOR_vcpu_op); EXPORT_SYMBOL_GPL(HYPERVISOR_platform_op_raw); EXPORT_SYMBOL_GPL(HYPERVISOR_multicall); EXPORT_SYMBOL_GPL(HYPERVISOR_vm_assist); EXPORT_SYMBOL_GPL(HYPERVISOR_dm_op); EXPORT_SYMBOL_GPL(privcmd_call);
linux-master
arch/arm/xen/enlighten.c
// SPDX-License-Identifier: GPL-2.0-only /* * linux/arch/arm/kernel/ecard.c * * Copyright 1995-2001 Russell King * * Find all installed expansion cards, and handle interrupts from them. * * Created from information from Acorns RiscOS3 PRMs * * 08-Dec-1996 RMK Added code for the 9'th expansion card - the ether * podule slot. * 06-May-1997 RMK Added blacklist for cards whose loader doesn't work. * 12-Sep-1997 RMK Created new handling of interrupt enables/disables * - cards can now register their own routine to control * interrupts (recommended). * 29-Sep-1997 RMK Expansion card interrupt hardware not being re-enabled * on reset from Linux. (Caused cards not to respond * under RiscOS without hard reset). * 15-Feb-1998 RMK Added DMA support * 12-Sep-1998 RMK Added EASI support * 10-Jan-1999 RMK Run loaders in a simulated RISC OS environment. * 17-Apr-1999 RMK Support for EASI Type C cycles. */ #define ECARD_C #include <linux/module.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/sched.h> #include <linux/sched/mm.h> #include <linux/interrupt.h> #include <linux/completion.h> #include <linux/reboot.h> #include <linux/mm.h> #include <linux/slab.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/device.h> #include <linux/init.h> #include <linux/mutex.h> #include <linux/kthread.h> #include <linux/irq.h> #include <linux/io.h> #include <asm/dma.h> #include <asm/ecard.h> #include <mach/hardware.h> #include <asm/irq.h> #include <asm/mmu_context.h> #include <asm/mach/irq.h> #include <asm/tlbflush.h> #include "ecard.h" struct ecard_request { void (*fn)(struct ecard_request *); ecard_t *ec; unsigned int address; unsigned int length; unsigned int use_loader; void *buffer; struct completion *complete; }; struct expcard_quirklist { unsigned short manufacturer; unsigned short product; const char *type; void (*init)(ecard_t *ec); }; static ecard_t *cards; static ecard_t *slot_to_expcard[MAX_ECARDS]; static unsigned int ectcr; static void atomwide_3p_quirk(ecard_t *ec); /* List of descriptions of cards which don't have an extended * identification, or chunk directories containing a description. */ static struct expcard_quirklist quirklist[] __initdata = { { MANU_ACORN, PROD_ACORN_ETHER1, "Acorn Ether1" }, { MANU_ATOMWIDE, PROD_ATOMWIDE_3PSERIAL, NULL, atomwide_3p_quirk }, }; asmlinkage extern int ecard_loader_reset(unsigned long base, loader_t loader); asmlinkage extern int ecard_loader_read(int off, unsigned long base, loader_t loader); static inline unsigned short ecard_getu16(unsigned char *v) { return v[0] | v[1] << 8; } static inline signed long ecard_gets24(unsigned char *v) { return v[0] | v[1] << 8 | v[2] << 16 | ((v[2] & 0x80) ? 0xff000000 : 0); } static inline ecard_t *slot_to_ecard(unsigned int slot) { return slot < MAX_ECARDS ? slot_to_expcard[slot] : NULL; } /* ===================== Expansion card daemon ======================== */ /* * Since the loader programs on the expansion cards need to be run * in a specific environment, create a separate task with this * environment up, and pass requests to this task as and when we * need to. * * This should allow 99% of loaders to be called from Linux. * * From a security standpoint, we trust the card vendors. This * may be a misplaced trust. */ static void ecard_task_reset(struct ecard_request *req) { struct expansion_card *ec = req->ec; struct resource *res; res = ec->slot_no == 8 ? &ec->resource[ECARD_RES_MEMC] : ec->easi ? &ec->resource[ECARD_RES_EASI] : &ec->resource[ECARD_RES_IOCSYNC]; ecard_loader_reset(res->start, ec->loader); } static void ecard_task_readbytes(struct ecard_request *req) { struct expansion_card *ec = req->ec; unsigned char *buf = req->buffer; unsigned int len = req->length; unsigned int off = req->address; if (ec->slot_no == 8) { void __iomem *base = (void __iomem *) ec->resource[ECARD_RES_MEMC].start; /* * The card maintains an index which increments the address * into a 4096-byte page on each access. We need to keep * track of the counter. */ static unsigned int index; unsigned int page; page = (off >> 12) * 4; if (page > 256 * 4) return; off &= 4095; /* * If we are reading offset 0, or our current index is * greater than the offset, reset the hardware index counter. */ if (off == 0 || index > off) { writeb(0, base); index = 0; } /* * Increment the hardware index counter until we get to the * required offset. The read bytes are discarded. */ while (index < off) { readb(base + page); index += 1; } while (len--) { *buf++ = readb(base + page); index += 1; } } else { unsigned long base = (ec->easi ? &ec->resource[ECARD_RES_EASI] : &ec->resource[ECARD_RES_IOCSYNC])->start; void __iomem *pbase = (void __iomem *)base; if (!req->use_loader || !ec->loader) { off *= 4; while (len--) { *buf++ = readb(pbase + off); off += 4; } } else { while(len--) { /* * The following is required by some * expansion card loader programs. */ *(unsigned long *)0x108 = 0; *buf++ = ecard_loader_read(off++, base, ec->loader); } } } } static DECLARE_WAIT_QUEUE_HEAD(ecard_wait); static struct ecard_request *ecard_req; static DEFINE_MUTEX(ecard_mutex); /* * Set up the expansion card daemon's page tables. */ static void ecard_init_pgtables(struct mm_struct *mm) { struct vm_area_struct vma = TLB_FLUSH_VMA(mm, VM_EXEC); /* We want to set up the page tables for the following mapping: * Virtual Physical * 0x03000000 0x03000000 * 0x03010000 unmapped * 0x03210000 0x03210000 * 0x03400000 unmapped * 0x08000000 0x08000000 * 0x10000000 unmapped * * FIXME: we don't follow this 100% yet. */ pgd_t *src_pgd, *dst_pgd; src_pgd = pgd_offset(mm, (unsigned long)IO_BASE); dst_pgd = pgd_offset(mm, IO_START); memcpy(dst_pgd, src_pgd, sizeof(pgd_t) * (IO_SIZE / PGDIR_SIZE)); src_pgd = pgd_offset(mm, (unsigned long)EASI_BASE); dst_pgd = pgd_offset(mm, EASI_START); memcpy(dst_pgd, src_pgd, sizeof(pgd_t) * (EASI_SIZE / PGDIR_SIZE)); flush_tlb_range(&vma, IO_START, IO_START + IO_SIZE); flush_tlb_range(&vma, EASI_START, EASI_START + EASI_SIZE); } static int ecard_init_mm(void) { struct mm_struct * mm = mm_alloc(); struct mm_struct *active_mm = current->active_mm; if (!mm) return -ENOMEM; current->mm = mm; current->active_mm = mm; activate_mm(active_mm, mm); mmdrop_lazy_tlb(active_mm); ecard_init_pgtables(mm); return 0; } static int ecard_task(void * unused) { /* * Allocate a mm. We're not a lazy-TLB kernel task since we need * to set page table entries where the user space would be. Note * that this also creates the page tables. Failure is not an * option here. */ if (ecard_init_mm()) panic("kecardd: unable to alloc mm\n"); while (1) { struct ecard_request *req; wait_event_interruptible(ecard_wait, ecard_req != NULL); req = xchg(&ecard_req, NULL); if (req != NULL) { req->fn(req); complete(req->complete); } } } /* * Wake the expansion card daemon to action our request. * * FIXME: The test here is not sufficient to detect if the * kcardd is running. */ static void ecard_call(struct ecard_request *req) { DECLARE_COMPLETION_ONSTACK(completion); req->complete = &completion; mutex_lock(&ecard_mutex); ecard_req = req; wake_up(&ecard_wait); /* * Now wait for kecardd to run. */ wait_for_completion(&completion); mutex_unlock(&ecard_mutex); } /* ======================= Mid-level card control ===================== */ static void ecard_readbytes(void *addr, ecard_t *ec, int off, int len, int useld) { struct ecard_request req; req.fn = ecard_task_readbytes; req.ec = ec; req.address = off; req.length = len; req.use_loader = useld; req.buffer = addr; ecard_call(&req); } int ecard_readchunk(struct in_chunk_dir *cd, ecard_t *ec, int id, int num) { struct ex_chunk_dir excd; int index = 16; int useld = 0; if (!ec->cid.cd) return 0; while(1) { ecard_readbytes(&excd, ec, index, 8, useld); index += 8; if (c_id(&excd) == 0) { if (!useld && ec->loader) { useld = 1; index = 0; continue; } return 0; } if (c_id(&excd) == 0xf0) { /* link */ index = c_start(&excd); continue; } if (c_id(&excd) == 0x80) { /* loader */ if (!ec->loader) { ec->loader = kmalloc(c_len(&excd), GFP_KERNEL); if (ec->loader) ecard_readbytes(ec->loader, ec, (int)c_start(&excd), c_len(&excd), useld); else return 0; } continue; } if (c_id(&excd) == id && num-- == 0) break; } if (c_id(&excd) & 0x80) { switch (c_id(&excd) & 0x70) { case 0x70: ecard_readbytes((unsigned char *)excd.d.string, ec, (int)c_start(&excd), c_len(&excd), useld); break; case 0x00: break; } } cd->start_offset = c_start(&excd); memcpy(cd->d.string, excd.d.string, 256); return 1; } /* ======================= Interrupt control ============================ */ static void ecard_def_irq_enable(ecard_t *ec, int irqnr) { } static void ecard_def_irq_disable(ecard_t *ec, int irqnr) { } static int ecard_def_irq_pending(ecard_t *ec) { return !ec->irqmask || readb(ec->irqaddr) & ec->irqmask; } static void ecard_def_fiq_enable(ecard_t *ec, int fiqnr) { panic("ecard_def_fiq_enable called - impossible"); } static void ecard_def_fiq_disable(ecard_t *ec, int fiqnr) { panic("ecard_def_fiq_disable called - impossible"); } static int ecard_def_fiq_pending(ecard_t *ec) { return !ec->fiqmask || readb(ec->fiqaddr) & ec->fiqmask; } static expansioncard_ops_t ecard_default_ops = { ecard_def_irq_enable, ecard_def_irq_disable, ecard_def_irq_pending, ecard_def_fiq_enable, ecard_def_fiq_disable, ecard_def_fiq_pending }; /* * Enable and disable interrupts from expansion cards. * (interrupts are disabled for these functions). * * They are not meant to be called directly, but via enable/disable_irq. */ static void ecard_irq_unmask(struct irq_data *d) { ecard_t *ec = irq_data_get_irq_chip_data(d); if (ec) { if (!ec->ops) ec->ops = &ecard_default_ops; if (ec->claimed && ec->ops->irqenable) ec->ops->irqenable(ec, d->irq); else printk(KERN_ERR "ecard: rejecting request to " "enable IRQs for %d\n", d->irq); } } static void ecard_irq_mask(struct irq_data *d) { ecard_t *ec = irq_data_get_irq_chip_data(d); if (ec) { if (!ec->ops) ec->ops = &ecard_default_ops; if (ec->ops && ec->ops->irqdisable) ec->ops->irqdisable(ec, d->irq); } } static struct irq_chip ecard_chip = { .name = "ECARD", .irq_ack = ecard_irq_mask, .irq_mask = ecard_irq_mask, .irq_unmask = ecard_irq_unmask, }; void ecard_enablefiq(unsigned int fiqnr) { ecard_t *ec = slot_to_ecard(fiqnr); if (ec) { if (!ec->ops) ec->ops = &ecard_default_ops; if (ec->claimed && ec->ops->fiqenable) ec->ops->fiqenable(ec, fiqnr); else printk(KERN_ERR "ecard: rejecting request to " "enable FIQs for %d\n", fiqnr); } } void ecard_disablefiq(unsigned int fiqnr) { ecard_t *ec = slot_to_ecard(fiqnr); if (ec) { if (!ec->ops) ec->ops = &ecard_default_ops; if (ec->ops->fiqdisable) ec->ops->fiqdisable(ec, fiqnr); } } static void ecard_dump_irq_state(void) { ecard_t *ec; printk("Expansion card IRQ state:\n"); for (ec = cards; ec; ec = ec->next) { const char *claimed; if (ec->slot_no == 8) continue; claimed = ec->claimed ? "" : "not "; if (ec->ops && ec->ops->irqpending && ec->ops != &ecard_default_ops) printk(" %d: %sclaimed irq %spending\n", ec->slot_no, claimed, ec->ops->irqpending(ec) ? "" : "not "); else printk(" %d: %sclaimed irqaddr %p, mask = %02X, status = %02X\n", ec->slot_no, claimed, ec->irqaddr, ec->irqmask, readb(ec->irqaddr)); } } static void ecard_check_lockup(struct irq_desc *desc) { static unsigned long last; static int lockup; /* * If the timer interrupt has not run since the last million * unrecognised expansion card interrupts, then there is * something seriously wrong. Disable the expansion card * interrupts so at least we can continue. * * Maybe we ought to start a timer to re-enable them some time * later? */ if (last == jiffies) { lockup += 1; if (lockup > 1000000) { printk(KERN_ERR "\nInterrupt lockup detected - " "disabling all expansion card interrupts\n"); desc->irq_data.chip->irq_mask(&desc->irq_data); ecard_dump_irq_state(); } } else lockup = 0; /* * If we did not recognise the source of this interrupt, * warn the user, but don't flood the user with these messages. */ if (!last || time_after(jiffies, last + 5*HZ)) { last = jiffies; printk(KERN_WARNING "Unrecognised interrupt from backplane\n"); ecard_dump_irq_state(); } } static void ecard_irq_handler(struct irq_desc *desc) { ecard_t *ec; int called = 0; desc->irq_data.chip->irq_mask(&desc->irq_data); for (ec = cards; ec; ec = ec->next) { int pending; if (!ec->claimed || !ec->irq || ec->slot_no == 8) continue; if (ec->ops && ec->ops->irqpending) pending = ec->ops->irqpending(ec); else pending = ecard_default_ops.irqpending(ec); if (pending) { generic_handle_irq(ec->irq); called ++; } } desc->irq_data.chip->irq_unmask(&desc->irq_data); if (called == 0) ecard_check_lockup(desc); } static void __iomem *__ecard_address(ecard_t *ec, card_type_t type, card_speed_t speed) { void __iomem *address = NULL; int slot = ec->slot_no; if (ec->slot_no == 8) return ECARD_MEMC8_BASE; ectcr &= ~(1 << slot); switch (type) { case ECARD_MEMC: if (slot < 4) address = ECARD_MEMC_BASE + (slot << 14); break; case ECARD_IOC: if (slot < 4) address = ECARD_IOC_BASE + (slot << 14); else address = ECARD_IOC4_BASE + ((slot - 4) << 14); if (address) address += speed << 19; break; case ECARD_EASI: address = ECARD_EASI_BASE + (slot << 24); if (speed == ECARD_FAST) ectcr |= 1 << slot; break; default: break; } #ifdef IOMD_ECTCR iomd_writeb(ectcr, IOMD_ECTCR); #endif return address; } static int ecard_prints(struct seq_file *m, ecard_t *ec) { seq_printf(m, " %d: %s ", ec->slot_no, ec->easi ? "EASI" : " "); if (ec->cid.id == 0) { struct in_chunk_dir incd; seq_printf(m, "[%04X:%04X] ", ec->cid.manufacturer, ec->cid.product); if (!ec->card_desc && ec->cid.cd && ecard_readchunk(&incd, ec, 0xf5, 0)) { ec->card_desc = kmalloc(strlen(incd.d.string)+1, GFP_KERNEL); if (ec->card_desc) strcpy((char *)ec->card_desc, incd.d.string); } seq_printf(m, "%s\n", ec->card_desc ? ec->card_desc : "*unknown*"); } else seq_printf(m, "Simple card %d\n", ec->cid.id); return 0; } static int ecard_devices_proc_show(struct seq_file *m, void *v) { ecard_t *ec = cards; while (ec) { ecard_prints(m, ec); ec = ec->next; } return 0; } static struct proc_dir_entry *proc_bus_ecard_dir = NULL; static void ecard_proc_init(void) { proc_bus_ecard_dir = proc_mkdir("bus/ecard", NULL); proc_create_single("devices", 0, proc_bus_ecard_dir, ecard_devices_proc_show); } #define ec_set_resource(ec,nr,st,sz) \ do { \ (ec)->resource[nr].name = dev_name(&ec->dev); \ (ec)->resource[nr].start = st; \ (ec)->resource[nr].end = (st) + (sz) - 1; \ (ec)->resource[nr].flags = IORESOURCE_MEM; \ } while (0) static void __init ecard_free_card(struct expansion_card *ec) { int i; for (i = 0; i < ECARD_NUM_RESOURCES; i++) if (ec->resource[i].flags) release_resource(&ec->resource[i]); kfree(ec); } static struct expansion_card *__init ecard_alloc_card(int type, int slot) { struct expansion_card *ec; unsigned long base; int i; ec = kzalloc(sizeof(ecard_t), GFP_KERNEL); if (!ec) { ec = ERR_PTR(-ENOMEM); goto nomem; } ec->slot_no = slot; ec->easi = type == ECARD_EASI; ec->irq = 0; ec->fiq = 0; ec->dma = NO_DMA; ec->ops = &ecard_default_ops; dev_set_name(&ec->dev, "ecard%d", slot); ec->dev.parent = NULL; ec->dev.bus = &ecard_bus_type; ec->dev.dma_mask = &ec->dma_mask; ec->dma_mask = (u64)0xffffffff; ec->dev.coherent_dma_mask = ec->dma_mask; if (slot < 4) { ec_set_resource(ec, ECARD_RES_MEMC, PODSLOT_MEMC_BASE + (slot << 14), PODSLOT_MEMC_SIZE); base = PODSLOT_IOC0_BASE + (slot << 14); } else base = PODSLOT_IOC4_BASE + ((slot - 4) << 14); #ifdef CONFIG_ARCH_RPC if (slot < 8) { ec_set_resource(ec, ECARD_RES_EASI, PODSLOT_EASI_BASE + (slot << 24), PODSLOT_EASI_SIZE); } if (slot == 8) { ec_set_resource(ec, ECARD_RES_MEMC, NETSLOT_BASE, NETSLOT_SIZE); } else #endif for (i = 0; i <= ECARD_RES_IOCSYNC - ECARD_RES_IOCSLOW; i++) ec_set_resource(ec, i + ECARD_RES_IOCSLOW, base + (i << 19), PODSLOT_IOC_SIZE); for (i = 0; i < ECARD_NUM_RESOURCES; i++) { if (ec->resource[i].flags && request_resource(&iomem_resource, &ec->resource[i])) { dev_err(&ec->dev, "resource(s) not available\n"); ec->resource[i].end -= ec->resource[i].start; ec->resource[i].start = 0; ec->resource[i].flags = 0; } } nomem: return ec; } static ssize_t irq_show(struct device *dev, struct device_attribute *attr, char *buf) { struct expansion_card *ec = ECARD_DEV(dev); return sprintf(buf, "%u\n", ec->irq); } static DEVICE_ATTR_RO(irq); static ssize_t dma_show(struct device *dev, struct device_attribute *attr, char *buf) { struct expansion_card *ec = ECARD_DEV(dev); return sprintf(buf, "%u\n", ec->dma); } static DEVICE_ATTR_RO(dma); static ssize_t resource_show(struct device *dev, struct device_attribute *attr, char *buf) { struct expansion_card *ec = ECARD_DEV(dev); char *str = buf; int i; for (i = 0; i < ECARD_NUM_RESOURCES; i++) str += sprintf(str, "%08x %08x %08lx\n", ec->resource[i].start, ec->resource[i].end, ec->resource[i].flags); return str - buf; } static DEVICE_ATTR_RO(resource); static ssize_t vendor_show(struct device *dev, struct device_attribute *attr, char *buf) { struct expansion_card *ec = ECARD_DEV(dev); return sprintf(buf, "%u\n", ec->cid.manufacturer); } static DEVICE_ATTR_RO(vendor); static ssize_t device_show(struct device *dev, struct device_attribute *attr, char *buf) { struct expansion_card *ec = ECARD_DEV(dev); return sprintf(buf, "%u\n", ec->cid.product); } static DEVICE_ATTR_RO(device); static ssize_t type_show(struct device *dev, struct device_attribute *attr, char *buf) { struct expansion_card *ec = ECARD_DEV(dev); return sprintf(buf, "%s\n", ec->easi ? "EASI" : "IOC"); } static DEVICE_ATTR_RO(type); static struct attribute *ecard_dev_attrs[] = { &dev_attr_device.attr, &dev_attr_dma.attr, &dev_attr_irq.attr, &dev_attr_resource.attr, &dev_attr_type.attr, &dev_attr_vendor.attr, NULL, }; ATTRIBUTE_GROUPS(ecard_dev); int ecard_request_resources(struct expansion_card *ec) { int i, err = 0; for (i = 0; i < ECARD_NUM_RESOURCES; i++) { if (ecard_resource_end(ec, i) && !request_mem_region(ecard_resource_start(ec, i), ecard_resource_len(ec, i), ec->dev.driver->name)) { err = -EBUSY; break; } } if (err) { while (i--) if (ecard_resource_end(ec, i)) release_mem_region(ecard_resource_start(ec, i), ecard_resource_len(ec, i)); } return err; } EXPORT_SYMBOL(ecard_request_resources); void ecard_release_resources(struct expansion_card *ec) { int i; for (i = 0; i < ECARD_NUM_RESOURCES; i++) if (ecard_resource_end(ec, i)) release_mem_region(ecard_resource_start(ec, i), ecard_resource_len(ec, i)); } EXPORT_SYMBOL(ecard_release_resources); void ecard_setirq(struct expansion_card *ec, const struct expansion_card_ops *ops, void *irq_data) { ec->irq_data = irq_data; barrier(); ec->ops = ops; } EXPORT_SYMBOL(ecard_setirq); void __iomem *ecardm_iomap(struct expansion_card *ec, unsigned int res, unsigned long offset, unsigned long maxsize) { unsigned long start = ecard_resource_start(ec, res); unsigned long end = ecard_resource_end(ec, res); if (offset > (end - start)) return NULL; start += offset; if (maxsize && end - start > maxsize) end = start + maxsize; return devm_ioremap(&ec->dev, start, end - start); } EXPORT_SYMBOL(ecardm_iomap); static void atomwide_3p_quirk(ecard_t *ec) { void __iomem *addr = __ecard_address(ec, ECARD_IOC, ECARD_SYNC); unsigned int i; /* Disable interrupts on each port */ for (i = 0x2000; i <= 0x2800; i += 0x0400) writeb(0, addr + i + 4); } /* * Probe for an expansion card. * * If bit 1 of the first byte of the card is set, then the * card does not exist. */ static int __init ecard_probe(int slot, unsigned irq, card_type_t type) { ecard_t **ecp; ecard_t *ec; struct ex_ecid cid; void __iomem *addr; int i, rc; ec = ecard_alloc_card(type, slot); if (IS_ERR(ec)) { rc = PTR_ERR(ec); goto nomem; } rc = -ENODEV; if ((addr = __ecard_address(ec, type, ECARD_SYNC)) == NULL) goto nodev; cid.r_zero = 1; ecard_readbytes(&cid, ec, 0, 16, 0); if (cid.r_zero) goto nodev; ec->cid.id = cid.r_id; ec->cid.cd = cid.r_cd; ec->cid.is = cid.r_is; ec->cid.w = cid.r_w; ec->cid.manufacturer = ecard_getu16(cid.r_manu); ec->cid.product = ecard_getu16(cid.r_prod); ec->cid.country = cid.r_country; ec->cid.irqmask = cid.r_irqmask; ec->cid.irqoff = ecard_gets24(cid.r_irqoff); ec->cid.fiqmask = cid.r_fiqmask; ec->cid.fiqoff = ecard_gets24(cid.r_fiqoff); ec->fiqaddr = ec->irqaddr = addr; if (ec->cid.is) { ec->irqmask = ec->cid.irqmask; ec->irqaddr += ec->cid.irqoff; ec->fiqmask = ec->cid.fiqmask; ec->fiqaddr += ec->cid.fiqoff; } else { ec->irqmask = 1; ec->fiqmask = 4; } for (i = 0; i < ARRAY_SIZE(quirklist); i++) if (quirklist[i].manufacturer == ec->cid.manufacturer && quirklist[i].product == ec->cid.product) { if (quirklist[i].type) ec->card_desc = quirklist[i].type; if (quirklist[i].init) quirklist[i].init(ec); break; } ec->irq = irq; /* * hook the interrupt handlers */ if (slot < 8) { irq_set_chip_and_handler(ec->irq, &ecard_chip, handle_level_irq); irq_set_chip_data(ec->irq, ec); irq_clear_status_flags(ec->irq, IRQ_NOREQUEST); } #ifdef CONFIG_ARCH_RPC /* On RiscPC, only first two slots have DMA capability */ if (slot < 2) ec->dma = 2 + slot; #endif for (ecp = &cards; *ecp; ecp = &(*ecp)->next); *ecp = ec; slot_to_expcard[slot] = ec; rc = device_register(&ec->dev); if (rc) goto nodev; return 0; nodev: ecard_free_card(ec); nomem: return rc; } /* * Initialise the expansion card system. * Locate all hardware - interrupt management and * actual cards. */ static int __init ecard_init(void) { struct task_struct *task; int slot, irqbase; irqbase = irq_alloc_descs(-1, 0, 8, -1); if (irqbase < 0) return irqbase; task = kthread_run(ecard_task, NULL, "kecardd"); if (IS_ERR(task)) { printk(KERN_ERR "Ecard: unable to create kernel thread: %ld\n", PTR_ERR(task)); irq_free_descs(irqbase, 8); return PTR_ERR(task); } printk("Probing expansion cards\n"); for (slot = 0; slot < 8; slot ++) { if (ecard_probe(slot, irqbase + slot, ECARD_EASI) == -ENODEV) ecard_probe(slot, irqbase + slot, ECARD_IOC); } ecard_probe(8, 11, ECARD_IOC); irq_set_chained_handler(IRQ_EXPANSIONCARD, ecard_irq_handler); ecard_proc_init(); return 0; } subsys_initcall(ecard_init); /* * ECARD "bus" */ static const struct ecard_id * ecard_match_device(const struct ecard_id *ids, struct expansion_card *ec) { int i; for (i = 0; ids[i].manufacturer != 65535; i++) if (ec->cid.manufacturer == ids[i].manufacturer && ec->cid.product == ids[i].product) return ids + i; return NULL; } static int ecard_drv_probe(struct device *dev) { struct expansion_card *ec = ECARD_DEV(dev); struct ecard_driver *drv = ECARD_DRV(dev->driver); const struct ecard_id *id; int ret; id = ecard_match_device(drv->id_table, ec); ec->claimed = 1; ret = drv->probe(ec, id); if (ret) ec->claimed = 0; return ret; } static void ecard_drv_remove(struct device *dev) { struct expansion_card *ec = ECARD_DEV(dev); struct ecard_driver *drv = ECARD_DRV(dev->driver); drv->remove(ec); ec->claimed = 0; /* * Restore the default operations. We ensure that the * ops are set before we change the data. */ ec->ops = &ecard_default_ops; barrier(); ec->irq_data = NULL; } /* * Before rebooting, we must make sure that the expansion card is in a * sensible state, so it can be re-detected. This means that the first * page of the ROM must be visible. We call the expansion cards reset * handler, if any. */ static void ecard_drv_shutdown(struct device *dev) { struct expansion_card *ec = ECARD_DEV(dev); struct ecard_driver *drv = ECARD_DRV(dev->driver); struct ecard_request req; if (dev->driver) { if (drv->shutdown) drv->shutdown(ec); ec->claimed = 0; } /* * If this card has a loader, call the reset handler. */ if (ec->loader) { req.fn = ecard_task_reset; req.ec = ec; ecard_call(&req); } } int ecard_register_driver(struct ecard_driver *drv) { drv->drv.bus = &ecard_bus_type; return driver_register(&drv->drv); } void ecard_remove_driver(struct ecard_driver *drv) { driver_unregister(&drv->drv); } static int ecard_match(struct device *_dev, struct device_driver *_drv) { struct expansion_card *ec = ECARD_DEV(_dev); struct ecard_driver *drv = ECARD_DRV(_drv); int ret; if (drv->id_table) { ret = ecard_match_device(drv->id_table, ec) != NULL; } else { ret = ec->cid.id == drv->id; } return ret; } struct bus_type ecard_bus_type = { .name = "ecard", .dev_groups = ecard_dev_groups, .match = ecard_match, .probe = ecard_drv_probe, .remove = ecard_drv_remove, .shutdown = ecard_drv_shutdown, }; static int ecard_bus_init(void) { return bus_register(&ecard_bus_type); } postcore_initcall(ecard_bus_init); EXPORT_SYMBOL(ecard_readchunk); EXPORT_SYMBOL(ecard_register_driver); EXPORT_SYMBOL(ecard_remove_driver); EXPORT_SYMBOL(ecard_bus_type);
linux-master
arch/arm/mach-rpc/ecard.c
// SPDX-License-Identifier: GPL-2.0 #include <linux/init.h> #include <linux/list.h> #include <linux/io.h> #include <asm/mach/irq.h> #include <asm/hardware/iomd.h> #include <asm/irq.h> #include <asm/fiq.h> // These are offsets from the stat register for each IRQ bank #define STAT 0x00 #define REQ 0x04 #define CLR 0x04 #define MASK 0x08 static const u8 irq_prio_h[256] = { 0, 8, 9, 8,10,10,10,10,11,11,11,11,10,10,10,10, 12, 8, 9, 8,10,10,10,10,11,11,11,11,10,10,10,10, 13,13,13,13,10,10,10,10,11,11,11,11,10,10,10,10, 13,13,13,13,10,10,10,10,11,11,11,11,10,10,10,10, 14,14,14,14,10,10,10,10,11,11,11,11,10,10,10,10, 14,14,14,14,10,10,10,10,11,11,11,11,10,10,10,10, 13,13,13,13,10,10,10,10,11,11,11,11,10,10,10,10, 13,13,13,13,10,10,10,10,11,11,11,11,10,10,10,10, 15,15,15,15,10,10,10,10,11,11,11,11,10,10,10,10, 15,15,15,15,10,10,10,10,11,11,11,11,10,10,10,10, 13,13,13,13,10,10,10,10,11,11,11,11,10,10,10,10, 13,13,13,13,10,10,10,10,11,11,11,11,10,10,10,10, 15,15,15,15,10,10,10,10,11,11,11,11,10,10,10,10, 15,15,15,15,10,10,10,10,11,11,11,11,10,10,10,10, 13,13,13,13,10,10,10,10,11,11,11,11,10,10,10,10, 13,13,13,13,10,10,10,10,11,11,11,11,10,10,10,10, }; static const u8 irq_prio_d[256] = { 0,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16, 20,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16, 21,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16, 21,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16, 22,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16, 22,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16, 21,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16, 21,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16, 23,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16, 23,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16, 21,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16, 21,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16, 22,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16, 22,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16, 21,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16, 21,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16, }; static const u8 irq_prio_l[256] = { 0, 0, 1, 0, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 4, 0, 1, 0, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 6, 6, 6, 6, 6, 6, 6, 6, 3, 3, 3, 3, 3, 3, 3, 3, 6, 6, 6, 6, 6, 6, 6, 6, 3, 3, 3, 3, 3, 3, 3, 3, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, }; static int iomd_get_irq_nr(void) { int irq; u8 reg; /* get highest priority first */ reg = readb(IOC_BASE + IOMD_IRQREQB); irq = irq_prio_h[reg]; if (irq) return irq; /* get DMA */ reg = readb(IOC_BASE + IOMD_DMAREQ); irq = irq_prio_d[reg]; if (irq) return irq; /* get low priority */ reg = readb(IOC_BASE + IOMD_IRQREQA); irq = irq_prio_l[reg]; if (irq) return irq; return 0; } static void iomd_handle_irq(struct pt_regs *regs) { int irq; do { irq = iomd_get_irq_nr(); if (irq) generic_handle_irq(irq); } while (irq); } static void __iomem *iomd_get_base(struct irq_data *d) { void *cd = irq_data_get_irq_chip_data(d); return (void __iomem *)(unsigned long)cd; } static void iomd_set_base_mask(unsigned int irq, void __iomem *base, u32 mask) { struct irq_data *d = irq_get_irq_data(irq); d->mask = mask; irq_set_chip_data(irq, (void *)(unsigned long)base); } static void iomd_irq_mask_ack(struct irq_data *d) { void __iomem *base = iomd_get_base(d); unsigned int val, mask = d->mask; val = readb(base + MASK); writeb(val & ~mask, base + MASK); writeb(mask, base + CLR); } static void iomd_irq_mask(struct irq_data *d) { void __iomem *base = iomd_get_base(d); unsigned int val, mask = d->mask; val = readb(base + MASK); writeb(val & ~mask, base + MASK); } static void iomd_irq_unmask(struct irq_data *d) { void __iomem *base = iomd_get_base(d); unsigned int val, mask = d->mask; val = readb(base + MASK); writeb(val | mask, base + MASK); } static struct irq_chip iomd_chip_clr = { .irq_mask_ack = iomd_irq_mask_ack, .irq_mask = iomd_irq_mask, .irq_unmask = iomd_irq_unmask, }; static struct irq_chip iomd_chip_noclr = { .irq_mask = iomd_irq_mask, .irq_unmask = iomd_irq_unmask, }; extern unsigned char rpc_default_fiq_start, rpc_default_fiq_end; void __init rpc_init_irq(void) { unsigned int irq, clr, set; iomd_writeb(0, IOMD_IRQMASKA); iomd_writeb(0, IOMD_IRQMASKB); iomd_writeb(0, IOMD_FIQMASK); iomd_writeb(0, IOMD_DMAMASK); set_fiq_handler(&rpc_default_fiq_start, &rpc_default_fiq_end - &rpc_default_fiq_start); set_handle_irq(iomd_handle_irq); for (irq = 0; irq < NR_IRQS; irq++) { clr = IRQ_NOREQUEST; set = 0; if (irq <= 6 || (irq >= 9 && irq <= 15)) clr |= IRQ_NOPROBE; if (irq == 21 || (irq >= 16 && irq <= 19) || irq == IRQ_KEYBOARDTX) set |= IRQ_NOAUTOEN; switch (irq) { case 0 ... 7: irq_set_chip_and_handler(irq, &iomd_chip_clr, handle_level_irq); irq_modify_status(irq, clr, set); iomd_set_base_mask(irq, IOMD_BASE + IOMD_IRQSTATA, BIT(irq)); break; case 8 ... 15: irq_set_chip_and_handler(irq, &iomd_chip_noclr, handle_level_irq); irq_modify_status(irq, clr, set); iomd_set_base_mask(irq, IOMD_BASE + IOMD_IRQSTATB, BIT(irq - 8)); break; case 16 ... 21: irq_set_chip_and_handler(irq, &iomd_chip_noclr, handle_level_irq); irq_modify_status(irq, clr, set); iomd_set_base_mask(irq, IOMD_BASE + IOMD_DMASTAT, BIT(irq - 16)); break; case 64 ... 71: irq_set_chip(irq, &iomd_chip_noclr); irq_modify_status(irq, clr, set); iomd_set_base_mask(irq, IOMD_BASE + IOMD_FIQSTAT, BIT(irq - 64)); break; } } init_FIQ(FIQ_START); }
linux-master
arch/arm/mach-rpc/irq.c
// SPDX-License-Identifier: GPL-2.0-only /* * linux/arch/arm/mach-rpc/dma.c * * Copyright (C) 1998 Russell King * * DMA functions specific to RiscPC architecture */ #include <linux/mman.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/dma-mapping.h> #include <linux/io.h> #include <asm/page.h> #include <asm/dma.h> #include <asm/fiq.h> #include <asm/irq.h> #include <mach/hardware.h> #include <linux/uaccess.h> #include <asm/mach/dma.h> #include <asm/hardware/iomd.h> struct iomd_dma { struct dma_struct dma; void __iomem *base; /* Controller base address */ int irq; /* Controller IRQ */ unsigned int state; dma_addr_t cur_addr; unsigned int cur_len; dma_addr_t dma_addr; unsigned int dma_len; }; #if 0 typedef enum { dma_size_8 = 1, dma_size_16 = 2, dma_size_32 = 4, dma_size_128 = 16 } dma_size_t; #endif #define TRANSFER_SIZE 2 #define CURA (0) #define ENDA (IOMD_IO0ENDA - IOMD_IO0CURA) #define CURB (IOMD_IO0CURB - IOMD_IO0CURA) #define ENDB (IOMD_IO0ENDB - IOMD_IO0CURA) #define CR (IOMD_IO0CR - IOMD_IO0CURA) #define ST (IOMD_IO0ST - IOMD_IO0CURA) static void iomd_get_next_sg(struct iomd_dma *idma) { unsigned long end, offset, flags = 0; if (idma->dma.sg) { idma->cur_addr = idma->dma_addr; offset = idma->cur_addr & ~PAGE_MASK; end = offset + idma->dma_len; if (end > PAGE_SIZE) end = PAGE_SIZE; if (offset + TRANSFER_SIZE >= end) flags |= DMA_END_L; idma->cur_len = end - TRANSFER_SIZE; idma->dma_len -= end - offset; idma->dma_addr += end - offset; if (idma->dma_len == 0) { if (idma->dma.sgcount > 1) { idma->dma.sg = sg_next(idma->dma.sg); idma->dma_addr = idma->dma.sg->dma_address; idma->dma_len = idma->dma.sg->length; idma->dma.sgcount--; } else { idma->dma.sg = NULL; flags |= DMA_END_S; } } } else { flags = DMA_END_S | DMA_END_L; idma->cur_addr = 0; idma->cur_len = 0; } idma->cur_len |= flags; } static irqreturn_t iomd_dma_handle(int irq, void *dev_id) { struct iomd_dma *idma = dev_id; void __iomem *base = idma->base; unsigned int state = idma->state; unsigned int status, cur, end; do { status = readb(base + ST); if (!(status & DMA_ST_INT)) goto out; if ((state ^ status) & DMA_ST_AB) iomd_get_next_sg(idma); // This efficiently implements state = OFL != AB ? AB : 0 state = ((status >> 2) ^ status) & DMA_ST_AB; if (state) { cur = CURA; end = ENDA; } else { cur = CURB; end = ENDB; } writel(idma->cur_addr, base + cur); writel(idma->cur_len, base + end); if (status & DMA_ST_OFL && idma->cur_len == (DMA_END_S|DMA_END_L)) break; } while (1); state = ~DMA_ST_AB; disable_irq_nosync(irq); out: idma->state = state; return IRQ_HANDLED; } static int iomd_request_dma(unsigned int chan, dma_t *dma) { struct iomd_dma *idma = container_of(dma, struct iomd_dma, dma); return request_irq(idma->irq, iomd_dma_handle, 0, idma->dma.device_id, idma); } static void iomd_free_dma(unsigned int chan, dma_t *dma) { struct iomd_dma *idma = container_of(dma, struct iomd_dma, dma); free_irq(idma->irq, idma); } static struct device isa_dma_dev = { .init_name = "fallback device", .coherent_dma_mask = ~(dma_addr_t)0, .dma_mask = &isa_dma_dev.coherent_dma_mask, }; static void iomd_enable_dma(unsigned int chan, dma_t *dma) { struct iomd_dma *idma = container_of(dma, struct iomd_dma, dma); void __iomem *base = idma->base; unsigned int ctrl = TRANSFER_SIZE | DMA_CR_E; if (idma->dma.invalid) { idma->dma.invalid = 0; /* * Cope with ISA-style drivers which expect cache * coherence. */ if (!idma->dma.sg) { idma->dma.sg = &idma->dma.buf; idma->dma.sgcount = 1; idma->dma.buf.length = idma->dma.count; idma->dma.buf.dma_address = dma_map_single(&isa_dma_dev, idma->dma.addr, idma->dma.count, idma->dma.dma_mode == DMA_MODE_READ ? DMA_FROM_DEVICE : DMA_TO_DEVICE); } idma->dma_addr = idma->dma.sg->dma_address; idma->dma_len = idma->dma.sg->length; writeb(DMA_CR_C, base + CR); idma->state = DMA_ST_AB; } if (idma->dma.dma_mode == DMA_MODE_READ) ctrl |= DMA_CR_D; writeb(ctrl, base + CR); enable_irq(idma->irq); } static void iomd_disable_dma(unsigned int chan, dma_t *dma) { struct iomd_dma *idma = container_of(dma, struct iomd_dma, dma); void __iomem *base = idma->base; unsigned long flags; local_irq_save(flags); if (idma->state != ~DMA_ST_AB) disable_irq(idma->irq); writeb(0, base + CR); local_irq_restore(flags); } static int iomd_set_dma_speed(unsigned int chan, dma_t *dma, int cycle) { int tcr, speed; if (cycle < 188) speed = 3; else if (cycle <= 250) speed = 2; else if (cycle < 438) speed = 1; else speed = 0; tcr = iomd_readb(IOMD_DMATCR); speed &= 3; switch (chan) { case DMA_0: tcr = (tcr & ~0x03) | speed; break; case DMA_1: tcr = (tcr & ~0x0c) | (speed << 2); break; case DMA_2: tcr = (tcr & ~0x30) | (speed << 4); break; case DMA_3: tcr = (tcr & ~0xc0) | (speed << 6); break; default: break; } iomd_writeb(tcr, IOMD_DMATCR); return speed; } static struct dma_ops iomd_dma_ops = { .type = "IOMD", .request = iomd_request_dma, .free = iomd_free_dma, .enable = iomd_enable_dma, .disable = iomd_disable_dma, .setspeed = iomd_set_dma_speed, }; static struct fiq_handler fh = { .name = "floppydma" }; struct floppy_dma { struct dma_struct dma; unsigned int fiq; }; static void floppy_enable_dma(unsigned int chan, dma_t *dma) { struct floppy_dma *fdma = container_of(dma, struct floppy_dma, dma); void *fiqhandler_start; unsigned int fiqhandler_length; struct pt_regs regs; if (fdma->dma.sg) BUG(); if (fdma->dma.dma_mode == DMA_MODE_READ) { extern unsigned char floppy_fiqin_start, floppy_fiqin_end; fiqhandler_start = &floppy_fiqin_start; fiqhandler_length = &floppy_fiqin_end - &floppy_fiqin_start; } else { extern unsigned char floppy_fiqout_start, floppy_fiqout_end; fiqhandler_start = &floppy_fiqout_start; fiqhandler_length = &floppy_fiqout_end - &floppy_fiqout_start; } regs.ARM_r9 = fdma->dma.count; regs.ARM_r10 = (unsigned long)fdma->dma.addr; regs.ARM_fp = (unsigned long)FLOPPYDMA_BASE; if (claim_fiq(&fh)) { printk("floppydma: couldn't claim FIQ.\n"); return; } set_fiq_handler(fiqhandler_start, fiqhandler_length); set_fiq_regs(&regs); enable_fiq(fdma->fiq); } static void floppy_disable_dma(unsigned int chan, dma_t *dma) { struct floppy_dma *fdma = container_of(dma, struct floppy_dma, dma); disable_fiq(fdma->fiq); release_fiq(&fh); } static int floppy_get_residue(unsigned int chan, dma_t *dma) { struct pt_regs regs; get_fiq_regs(&regs); return regs.ARM_r9; } static struct dma_ops floppy_dma_ops = { .type = "FIQDMA", .enable = floppy_enable_dma, .disable = floppy_disable_dma, .residue = floppy_get_residue, }; /* * This is virtual DMA - we don't need anything here. */ static void sound_enable_disable_dma(unsigned int chan, dma_t *dma) { } static struct dma_ops sound_dma_ops = { .type = "VIRTUAL", .enable = sound_enable_disable_dma, .disable = sound_enable_disable_dma, }; static struct iomd_dma iomd_dma[6]; static struct floppy_dma floppy_dma = { .dma = { .d_ops = &floppy_dma_ops, }, .fiq = FIQ_FLOPPYDATA, }; static dma_t sound_dma = { .d_ops = &sound_dma_ops, }; static int __init rpc_dma_init(void) { unsigned int i; int ret; iomd_writeb(0, IOMD_IO0CR); iomd_writeb(0, IOMD_IO1CR); iomd_writeb(0, IOMD_IO2CR); iomd_writeb(0, IOMD_IO3CR); iomd_writeb(0xa0, IOMD_DMATCR); /* * Setup DMA channels 2,3 to be for podules * and channels 0,1 for internal devices */ iomd_writeb(DMA_EXT_IO3|DMA_EXT_IO2, IOMD_DMAEXT); iomd_dma[DMA_0].base = IOMD_BASE + IOMD_IO0CURA; iomd_dma[DMA_0].irq = IRQ_DMA0; iomd_dma[DMA_1].base = IOMD_BASE + IOMD_IO1CURA; iomd_dma[DMA_1].irq = IRQ_DMA1; iomd_dma[DMA_2].base = IOMD_BASE + IOMD_IO2CURA; iomd_dma[DMA_2].irq = IRQ_DMA2; iomd_dma[DMA_3].base = IOMD_BASE + IOMD_IO3CURA; iomd_dma[DMA_3].irq = IRQ_DMA3; iomd_dma[DMA_S0].base = IOMD_BASE + IOMD_SD0CURA; iomd_dma[DMA_S0].irq = IRQ_DMAS0; iomd_dma[DMA_S1].base = IOMD_BASE + IOMD_SD1CURA; iomd_dma[DMA_S1].irq = IRQ_DMAS1; for (i = DMA_0; i <= DMA_S1; i++) { iomd_dma[i].dma.d_ops = &iomd_dma_ops; ret = isa_dma_add(i, &iomd_dma[i].dma); if (ret) printk("IOMDDMA%u: unable to register: %d\n", i, ret); } ret = isa_dma_add(DMA_VIRTUAL_FLOPPY, &floppy_dma.dma); if (ret) printk("IOMDFLOPPY: unable to register: %d\n", ret); ret = isa_dma_add(DMA_VIRTUAL_SOUND, &sound_dma); if (ret) printk("IOMDSOUND: unable to register: %d\n", ret); return 0; } core_initcall(rpc_dma_init);
linux-master
arch/arm/mach-rpc/dma.c
// SPDX-License-Identifier: GPL-2.0-only /* * linux/arch/arm/common/time-acorn.c * * Copyright (c) 1996-2000 Russell King. * * Changelog: * 24-Sep-1996 RMK Created * 10-Oct-1996 RMK Brought up to date with arch-sa110eval * 04-Dec-1997 RMK Updated for new arch/arm/time.c * 13=Jun-2004 DS Moved to arch/arm/common b/c shared w/CLPS7500 */ #include <linux/clocksource.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/io.h> #include <mach/hardware.h> #include <asm/hardware/ioc.h> #include <asm/mach/time.h> #define RPC_CLOCK_FREQ 2000000 #define RPC_LATCH DIV_ROUND_CLOSEST(RPC_CLOCK_FREQ, HZ) static u32 ioc_time; static u64 ioc_timer_read(struct clocksource *cs) { unsigned int count1, count2, status; unsigned long flags; u32 ticks; local_irq_save(flags); ioc_writeb (0, IOC_T0LATCH); barrier (); count1 = ioc_readb(IOC_T0CNTL) | (ioc_readb(IOC_T0CNTH) << 8); barrier (); status = ioc_readb(IOC_IRQREQA); barrier (); ioc_writeb (0, IOC_T0LATCH); barrier (); count2 = ioc_readb(IOC_T0CNTL) | (ioc_readb(IOC_T0CNTH) << 8); ticks = ioc_time + RPC_LATCH - count2; local_irq_restore(flags); if (count2 < count1) { /* * The timer has not reloaded between reading count1 and * count2, check whether an interrupt was actually pending. */ if (status & (1 << 5)) ticks += RPC_LATCH; } else if (count2 > count1) { /* * The timer has reloaded, so count2 indicates the new * count since the wrap. The interrupt would not have * been processed, so add the missed ticks. */ ticks += RPC_LATCH; } return ticks; } static struct clocksource ioctime_clocksource = { .read = ioc_timer_read, .mask = CLOCKSOURCE_MASK(32), .rating = 100, }; void __init ioctime_init(void) { ioc_writeb(RPC_LATCH & 255, IOC_T0LTCHL); ioc_writeb(RPC_LATCH >> 8, IOC_T0LTCHH); ioc_writeb(0, IOC_T0GO); } static irqreturn_t ioc_timer_interrupt(int irq, void *dev_id) { ioc_time += RPC_LATCH; legacy_timer_tick(1); return IRQ_HANDLED; } /* * Set up timer interrupt. */ void __init ioc_timer_init(void) { WARN_ON(clocksource_register_hz(&ioctime_clocksource, RPC_CLOCK_FREQ)); ioctime_init(); if (request_irq(IRQ_TIMER0, ioc_timer_interrupt, 0, "timer", NULL)) pr_err("Failed to request irq %d (timer)\n", IRQ_TIMER0); }
linux-master
arch/arm/mach-rpc/time.c
// SPDX-License-Identifier: GPL-2.0-only /* * linux/arch/arm/mach-rpc/riscpc.c * * Copyright (C) 1998-2001 Russell King * * Architecture specific fixups. */ #include <linux/kernel.h> #include <linux/tty.h> #include <linux/delay.h> #include <linux/pm.h> #include <linux/init.h> #include <linux/sched.h> #include <linux/device.h> #include <linux/serial_8250.h> #include <linux/ata_platform.h> #include <linux/io.h> #include <linux/i2c.h> #include <linux/reboot.h> #include <asm/elf.h> #include <asm/mach-types.h> #include <mach/hardware.h> #include <asm/hardware/iomd.h> #include <asm/page.h> #include <asm/domain.h> #include <asm/setup.h> #include <asm/system_misc.h> #include <asm/mach/map.h> #include <asm/mach/arch.h> #include <asm/mach/time.h> extern void rpc_init_irq(void); unsigned int vram_size; unsigned int memc_ctrl_reg; unsigned int number_mfm_drives; static int __init parse_tag_acorn(const struct tag *tag) { memc_ctrl_reg = tag->u.acorn.memc_control_reg; number_mfm_drives = tag->u.acorn.adfsdrives; switch (tag->u.acorn.vram_pages) { case 512: vram_size += PAGE_SIZE * 256; fallthrough; /* ??? */ case 256: vram_size += PAGE_SIZE * 256; break; default: break; } #if 0 if (vram_size) { desc->video_start = 0x02000000; desc->video_end = 0x02000000 + vram_size; } #endif return 0; } __tagtable(ATAG_ACORN, parse_tag_acorn); static struct map_desc rpc_io_desc[] __initdata = { { /* VRAM */ .virtual = SCREEN_BASE, .pfn = __phys_to_pfn(SCREEN_START), .length = 2*1048576, .type = MT_DEVICE }, { /* IO space */ .virtual = (u32)IO_BASE, .pfn = __phys_to_pfn(IO_START), .length = IO_SIZE , .type = MT_DEVICE }, { /* EASI space */ .virtual = (unsigned long)EASI_BASE, .pfn = __phys_to_pfn(EASI_START), .length = EASI_SIZE, .type = MT_DEVICE } }; static void __init rpc_map_io(void) { iotable_init(rpc_io_desc, ARRAY_SIZE(rpc_io_desc)); /* * Turn off floppy. */ writeb(0xc, PCIO_BASE + (0x3f2 << 2)); /* * RiscPC can't handle half-word loads and stores */ elf_hwcap &= ~HWCAP_HALF; } static struct resource acornfb_resources[] = { /* VIDC */ DEFINE_RES_MEM(0x03400000, 0x00200000), DEFINE_RES_IRQ(IRQ_VSYNCPULSE), }; static struct platform_device acornfb_device = { .name = "acornfb", .id = -1, .dev = { .coherent_dma_mask = 0xffffffff, }, .num_resources = ARRAY_SIZE(acornfb_resources), .resource = acornfb_resources, }; static struct resource iomd_resources[] = { DEFINE_RES_MEM(0x03200000, 0x10000), }; static struct platform_device iomd_device = { .name = "iomd", .id = -1, .num_resources = ARRAY_SIZE(iomd_resources), .resource = iomd_resources, }; static struct resource iomd_kart_resources[] = { DEFINE_RES_IRQ(IRQ_KEYBOARDRX), DEFINE_RES_IRQ(IRQ_KEYBOARDTX), }; static struct platform_device kbd_device = { .name = "kart", .id = -1, .dev = { .parent = &iomd_device.dev, }, .num_resources = ARRAY_SIZE(iomd_kart_resources), .resource = iomd_kart_resources, }; static struct plat_serial8250_port serial_platform_data[] = { { .mapbase = 0x03010fe0, .irq = IRQ_SERIALPORT, .uartclk = 1843200, .regshift = 2, .iotype = UPIO_MEM, .flags = UPF_BOOT_AUTOCONF | UPF_IOREMAP | UPF_SKIP_TEST, }, { }, }; static struct platform_device serial_device = { .name = "serial8250", .id = PLAT8250_DEV_PLATFORM, .dev = { .platform_data = serial_platform_data, }, }; static struct pata_platform_info pata_platform_data = { .ioport_shift = 2, }; static struct resource pata_resources[] = { DEFINE_RES_MEM(0x030107c0, 0x20), DEFINE_RES_MEM(0x03010fd8, 0x04), DEFINE_RES_IRQ(IRQ_HARDDISK), }; static struct platform_device pata_device = { .name = "pata_platform", .id = -1, .num_resources = ARRAY_SIZE(pata_resources), .resource = pata_resources, .dev = { .platform_data = &pata_platform_data, .coherent_dma_mask = ~0, /* grumble */ }, }; static struct platform_device *devs[] __initdata = { &iomd_device, &kbd_device, &serial_device, &acornfb_device, &pata_device, }; static struct i2c_board_info i2c_rtc = { I2C_BOARD_INFO("pcf8583", 0x50) }; static int __init rpc_init(void) { i2c_register_board_info(0, &i2c_rtc, 1); return platform_add_devices(devs, ARRAY_SIZE(devs)); } arch_initcall(rpc_init); static void rpc_restart(enum reboot_mode mode, const char *cmd) { iomd_writeb(0, IOMD_ROMCR0); /* * Jump into the ROM */ soft_restart(0); } void ioc_timer_init(void); MACHINE_START(RISCPC, "Acorn-RiscPC") /* Maintainer: Russell King */ .atag_offset = 0x100, .reserve_lp0 = 1, .reserve_lp1 = 1, .map_io = rpc_map_io, .init_irq = rpc_init_irq, .init_time = ioc_timer_init, .restart = rpc_restart, MACHINE_END
linux-master
arch/arm/mach-rpc/riscpc.c
// SPDX-License-Identifier: GPL-2.0-only /* * Bit sliced AES using NEON instructions * * Copyright (C) 2017 Linaro Ltd <[email protected]> */ #include <asm/neon.h> #include <asm/simd.h> #include <crypto/aes.h> #include <crypto/ctr.h> #include <crypto/internal/cipher.h> #include <crypto/internal/simd.h> #include <crypto/internal/skcipher.h> #include <crypto/scatterwalk.h> #include <crypto/xts.h> #include <linux/module.h> MODULE_AUTHOR("Ard Biesheuvel <[email protected]>"); MODULE_LICENSE("GPL v2"); MODULE_ALIAS_CRYPTO("ecb(aes)"); MODULE_ALIAS_CRYPTO("cbc(aes)-all"); MODULE_ALIAS_CRYPTO("ctr(aes)"); MODULE_ALIAS_CRYPTO("xts(aes)"); MODULE_IMPORT_NS(CRYPTO_INTERNAL); asmlinkage void aesbs_convert_key(u8 out[], u32 const rk[], int rounds); asmlinkage void aesbs_ecb_encrypt(u8 out[], u8 const in[], u8 const rk[], int rounds, int blocks); asmlinkage void aesbs_ecb_decrypt(u8 out[], u8 const in[], u8 const rk[], int rounds, int blocks); asmlinkage void aesbs_cbc_decrypt(u8 out[], u8 const in[], u8 const rk[], int rounds, int blocks, u8 iv[]); asmlinkage void aesbs_ctr_encrypt(u8 out[], u8 const in[], u8 const rk[], int rounds, int blocks, u8 ctr[]); asmlinkage void aesbs_xts_encrypt(u8 out[], u8 const in[], u8 const rk[], int rounds, int blocks, u8 iv[], int); asmlinkage void aesbs_xts_decrypt(u8 out[], u8 const in[], u8 const rk[], int rounds, int blocks, u8 iv[], int); struct aesbs_ctx { int rounds; u8 rk[13 * (8 * AES_BLOCK_SIZE) + 32] __aligned(AES_BLOCK_SIZE); }; struct aesbs_cbc_ctx { struct aesbs_ctx key; struct crypto_skcipher *enc_tfm; }; struct aesbs_xts_ctx { struct aesbs_ctx key; struct crypto_cipher *cts_tfm; struct crypto_cipher *tweak_tfm; }; struct aesbs_ctr_ctx { struct aesbs_ctx key; /* must be first member */ struct crypto_aes_ctx fallback; }; static int aesbs_setkey(struct crypto_skcipher *tfm, const u8 *in_key, unsigned int key_len) { struct aesbs_ctx *ctx = crypto_skcipher_ctx(tfm); struct crypto_aes_ctx rk; int err; err = aes_expandkey(&rk, in_key, key_len); if (err) return err; ctx->rounds = 6 + key_len / 4; kernel_neon_begin(); aesbs_convert_key(ctx->rk, rk.key_enc, ctx->rounds); kernel_neon_end(); return 0; } static int __ecb_crypt(struct skcipher_request *req, void (*fn)(u8 out[], u8 const in[], u8 const rk[], int rounds, int blocks)) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); struct aesbs_ctx *ctx = crypto_skcipher_ctx(tfm); struct skcipher_walk walk; int err; err = skcipher_walk_virt(&walk, req, false); while (walk.nbytes >= AES_BLOCK_SIZE) { unsigned int blocks = walk.nbytes / AES_BLOCK_SIZE; if (walk.nbytes < walk.total) blocks = round_down(blocks, walk.stride / AES_BLOCK_SIZE); kernel_neon_begin(); fn(walk.dst.virt.addr, walk.src.virt.addr, ctx->rk, ctx->rounds, blocks); kernel_neon_end(); err = skcipher_walk_done(&walk, walk.nbytes - blocks * AES_BLOCK_SIZE); } return err; } static int ecb_encrypt(struct skcipher_request *req) { return __ecb_crypt(req, aesbs_ecb_encrypt); } static int ecb_decrypt(struct skcipher_request *req) { return __ecb_crypt(req, aesbs_ecb_decrypt); } static int aesbs_cbc_setkey(struct crypto_skcipher *tfm, const u8 *in_key, unsigned int key_len) { struct aesbs_cbc_ctx *ctx = crypto_skcipher_ctx(tfm); struct crypto_aes_ctx rk; int err; err = aes_expandkey(&rk, in_key, key_len); if (err) return err; ctx->key.rounds = 6 + key_len / 4; kernel_neon_begin(); aesbs_convert_key(ctx->key.rk, rk.key_enc, ctx->key.rounds); kernel_neon_end(); memzero_explicit(&rk, sizeof(rk)); return crypto_skcipher_setkey(ctx->enc_tfm, in_key, key_len); } static int cbc_encrypt(struct skcipher_request *req) { struct skcipher_request *subreq = skcipher_request_ctx(req); struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); struct aesbs_cbc_ctx *ctx = crypto_skcipher_ctx(tfm); skcipher_request_set_tfm(subreq, ctx->enc_tfm); skcipher_request_set_callback(subreq, skcipher_request_flags(req), NULL, NULL); skcipher_request_set_crypt(subreq, req->src, req->dst, req->cryptlen, req->iv); return crypto_skcipher_encrypt(subreq); } static int cbc_decrypt(struct skcipher_request *req) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); struct aesbs_cbc_ctx *ctx = crypto_skcipher_ctx(tfm); struct skcipher_walk walk; int err; err = skcipher_walk_virt(&walk, req, false); while (walk.nbytes >= AES_BLOCK_SIZE) { unsigned int blocks = walk.nbytes / AES_BLOCK_SIZE; if (walk.nbytes < walk.total) blocks = round_down(blocks, walk.stride / AES_BLOCK_SIZE); kernel_neon_begin(); aesbs_cbc_decrypt(walk.dst.virt.addr, walk.src.virt.addr, ctx->key.rk, ctx->key.rounds, blocks, walk.iv); kernel_neon_end(); err = skcipher_walk_done(&walk, walk.nbytes - blocks * AES_BLOCK_SIZE); } return err; } static int cbc_init(struct crypto_skcipher *tfm) { struct aesbs_cbc_ctx *ctx = crypto_skcipher_ctx(tfm); unsigned int reqsize; ctx->enc_tfm = crypto_alloc_skcipher("cbc(aes)", 0, CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK); if (IS_ERR(ctx->enc_tfm)) return PTR_ERR(ctx->enc_tfm); reqsize = sizeof(struct skcipher_request); reqsize += crypto_skcipher_reqsize(ctx->enc_tfm); crypto_skcipher_set_reqsize(tfm, reqsize); return 0; } static void cbc_exit(struct crypto_skcipher *tfm) { struct aesbs_cbc_ctx *ctx = crypto_skcipher_ctx(tfm); crypto_free_skcipher(ctx->enc_tfm); } static int aesbs_ctr_setkey_sync(struct crypto_skcipher *tfm, const u8 *in_key, unsigned int key_len) { struct aesbs_ctr_ctx *ctx = crypto_skcipher_ctx(tfm); int err; err = aes_expandkey(&ctx->fallback, in_key, key_len); if (err) return err; ctx->key.rounds = 6 + key_len / 4; kernel_neon_begin(); aesbs_convert_key(ctx->key.rk, ctx->fallback.key_enc, ctx->key.rounds); kernel_neon_end(); return 0; } static int ctr_encrypt(struct skcipher_request *req) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); struct aesbs_ctx *ctx = crypto_skcipher_ctx(tfm); struct skcipher_walk walk; u8 buf[AES_BLOCK_SIZE]; int err; err = skcipher_walk_virt(&walk, req, false); while (walk.nbytes > 0) { const u8 *src = walk.src.virt.addr; u8 *dst = walk.dst.virt.addr; int bytes = walk.nbytes; if (unlikely(bytes < AES_BLOCK_SIZE)) src = dst = memcpy(buf + sizeof(buf) - bytes, src, bytes); else if (walk.nbytes < walk.total) bytes &= ~(8 * AES_BLOCK_SIZE - 1); kernel_neon_begin(); aesbs_ctr_encrypt(dst, src, ctx->rk, ctx->rounds, bytes, walk.iv); kernel_neon_end(); if (unlikely(bytes < AES_BLOCK_SIZE)) memcpy(walk.dst.virt.addr, buf + sizeof(buf) - bytes, bytes); err = skcipher_walk_done(&walk, walk.nbytes - bytes); } return err; } static void ctr_encrypt_one(struct crypto_skcipher *tfm, const u8 *src, u8 *dst) { struct aesbs_ctr_ctx *ctx = crypto_skcipher_ctx(tfm); unsigned long flags; /* * Temporarily disable interrupts to avoid races where * cachelines are evicted when the CPU is interrupted * to do something else. */ local_irq_save(flags); aes_encrypt(&ctx->fallback, dst, src); local_irq_restore(flags); } static int ctr_encrypt_sync(struct skcipher_request *req) { if (!crypto_simd_usable()) return crypto_ctr_encrypt_walk(req, ctr_encrypt_one); return ctr_encrypt(req); } static int aesbs_xts_setkey(struct crypto_skcipher *tfm, const u8 *in_key, unsigned int key_len) { struct aesbs_xts_ctx *ctx = crypto_skcipher_ctx(tfm); int err; err = xts_verify_key(tfm, in_key, key_len); if (err) return err; key_len /= 2; err = crypto_cipher_setkey(ctx->cts_tfm, in_key, key_len); if (err) return err; err = crypto_cipher_setkey(ctx->tweak_tfm, in_key + key_len, key_len); if (err) return err; return aesbs_setkey(tfm, in_key, key_len); } static int xts_init(struct crypto_skcipher *tfm) { struct aesbs_xts_ctx *ctx = crypto_skcipher_ctx(tfm); ctx->cts_tfm = crypto_alloc_cipher("aes", 0, 0); if (IS_ERR(ctx->cts_tfm)) return PTR_ERR(ctx->cts_tfm); ctx->tweak_tfm = crypto_alloc_cipher("aes", 0, 0); if (IS_ERR(ctx->tweak_tfm)) crypto_free_cipher(ctx->cts_tfm); return PTR_ERR_OR_ZERO(ctx->tweak_tfm); } static void xts_exit(struct crypto_skcipher *tfm) { struct aesbs_xts_ctx *ctx = crypto_skcipher_ctx(tfm); crypto_free_cipher(ctx->tweak_tfm); crypto_free_cipher(ctx->cts_tfm); } static int __xts_crypt(struct skcipher_request *req, bool encrypt, void (*fn)(u8 out[], u8 const in[], u8 const rk[], int rounds, int blocks, u8 iv[], int)) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); struct aesbs_xts_ctx *ctx = crypto_skcipher_ctx(tfm); int tail = req->cryptlen % AES_BLOCK_SIZE; struct skcipher_request subreq; u8 buf[2 * AES_BLOCK_SIZE]; struct skcipher_walk walk; int err; if (req->cryptlen < AES_BLOCK_SIZE) return -EINVAL; if (unlikely(tail)) { skcipher_request_set_tfm(&subreq, tfm); skcipher_request_set_callback(&subreq, skcipher_request_flags(req), NULL, NULL); skcipher_request_set_crypt(&subreq, req->src, req->dst, req->cryptlen - tail, req->iv); req = &subreq; } err = skcipher_walk_virt(&walk, req, true); if (err) return err; crypto_cipher_encrypt_one(ctx->tweak_tfm, walk.iv, walk.iv); while (walk.nbytes >= AES_BLOCK_SIZE) { unsigned int blocks = walk.nbytes / AES_BLOCK_SIZE; int reorder_last_tweak = !encrypt && tail > 0; if (walk.nbytes < walk.total) { blocks = round_down(blocks, walk.stride / AES_BLOCK_SIZE); reorder_last_tweak = 0; } kernel_neon_begin(); fn(walk.dst.virt.addr, walk.src.virt.addr, ctx->key.rk, ctx->key.rounds, blocks, walk.iv, reorder_last_tweak); kernel_neon_end(); err = skcipher_walk_done(&walk, walk.nbytes - blocks * AES_BLOCK_SIZE); } if (err || likely(!tail)) return err; /* handle ciphertext stealing */ scatterwalk_map_and_copy(buf, req->dst, req->cryptlen - AES_BLOCK_SIZE, AES_BLOCK_SIZE, 0); memcpy(buf + AES_BLOCK_SIZE, buf, tail); scatterwalk_map_and_copy(buf, req->src, req->cryptlen, tail, 0); crypto_xor(buf, req->iv, AES_BLOCK_SIZE); if (encrypt) crypto_cipher_encrypt_one(ctx->cts_tfm, buf, buf); else crypto_cipher_decrypt_one(ctx->cts_tfm, buf, buf); crypto_xor(buf, req->iv, AES_BLOCK_SIZE); scatterwalk_map_and_copy(buf, req->dst, req->cryptlen - AES_BLOCK_SIZE, AES_BLOCK_SIZE + tail, 1); return 0; } static int xts_encrypt(struct skcipher_request *req) { return __xts_crypt(req, true, aesbs_xts_encrypt); } static int xts_decrypt(struct skcipher_request *req) { return __xts_crypt(req, false, aesbs_xts_decrypt); } static struct skcipher_alg aes_algs[] = { { .base.cra_name = "__ecb(aes)", .base.cra_driver_name = "__ecb-aes-neonbs", .base.cra_priority = 250, .base.cra_blocksize = AES_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct aesbs_ctx), .base.cra_module = THIS_MODULE, .base.cra_flags = CRYPTO_ALG_INTERNAL, .min_keysize = AES_MIN_KEY_SIZE, .max_keysize = AES_MAX_KEY_SIZE, .walksize = 8 * AES_BLOCK_SIZE, .setkey = aesbs_setkey, .encrypt = ecb_encrypt, .decrypt = ecb_decrypt, }, { .base.cra_name = "__cbc(aes)", .base.cra_driver_name = "__cbc-aes-neonbs", .base.cra_priority = 250, .base.cra_blocksize = AES_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct aesbs_cbc_ctx), .base.cra_module = THIS_MODULE, .base.cra_flags = CRYPTO_ALG_INTERNAL | CRYPTO_ALG_NEED_FALLBACK, .min_keysize = AES_MIN_KEY_SIZE, .max_keysize = AES_MAX_KEY_SIZE, .walksize = 8 * AES_BLOCK_SIZE, .ivsize = AES_BLOCK_SIZE, .setkey = aesbs_cbc_setkey, .encrypt = cbc_encrypt, .decrypt = cbc_decrypt, .init = cbc_init, .exit = cbc_exit, }, { .base.cra_name = "__ctr(aes)", .base.cra_driver_name = "__ctr-aes-neonbs", .base.cra_priority = 250, .base.cra_blocksize = 1, .base.cra_ctxsize = sizeof(struct aesbs_ctx), .base.cra_module = THIS_MODULE, .base.cra_flags = CRYPTO_ALG_INTERNAL, .min_keysize = AES_MIN_KEY_SIZE, .max_keysize = AES_MAX_KEY_SIZE, .chunksize = AES_BLOCK_SIZE, .walksize = 8 * AES_BLOCK_SIZE, .ivsize = AES_BLOCK_SIZE, .setkey = aesbs_setkey, .encrypt = ctr_encrypt, .decrypt = ctr_encrypt, }, { .base.cra_name = "ctr(aes)", .base.cra_driver_name = "ctr-aes-neonbs-sync", .base.cra_priority = 250 - 1, .base.cra_blocksize = 1, .base.cra_ctxsize = sizeof(struct aesbs_ctr_ctx), .base.cra_module = THIS_MODULE, .min_keysize = AES_MIN_KEY_SIZE, .max_keysize = AES_MAX_KEY_SIZE, .chunksize = AES_BLOCK_SIZE, .walksize = 8 * AES_BLOCK_SIZE, .ivsize = AES_BLOCK_SIZE, .setkey = aesbs_ctr_setkey_sync, .encrypt = ctr_encrypt_sync, .decrypt = ctr_encrypt_sync, }, { .base.cra_name = "__xts(aes)", .base.cra_driver_name = "__xts-aes-neonbs", .base.cra_priority = 250, .base.cra_blocksize = AES_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct aesbs_xts_ctx), .base.cra_module = THIS_MODULE, .base.cra_flags = CRYPTO_ALG_INTERNAL, .min_keysize = 2 * AES_MIN_KEY_SIZE, .max_keysize = 2 * AES_MAX_KEY_SIZE, .walksize = 8 * AES_BLOCK_SIZE, .ivsize = AES_BLOCK_SIZE, .setkey = aesbs_xts_setkey, .encrypt = xts_encrypt, .decrypt = xts_decrypt, .init = xts_init, .exit = xts_exit, } }; static struct simd_skcipher_alg *aes_simd_algs[ARRAY_SIZE(aes_algs)]; static void aes_exit(void) { int i; for (i = 0; i < ARRAY_SIZE(aes_simd_algs); i++) if (aes_simd_algs[i]) simd_skcipher_free(aes_simd_algs[i]); crypto_unregister_skciphers(aes_algs, ARRAY_SIZE(aes_algs)); } static int __init aes_init(void) { struct simd_skcipher_alg *simd; const char *basename; const char *algname; const char *drvname; int err; int i; if (!(elf_hwcap & HWCAP_NEON)) return -ENODEV; err = crypto_register_skciphers(aes_algs, ARRAY_SIZE(aes_algs)); if (err) return err; for (i = 0; i < ARRAY_SIZE(aes_algs); i++) { if (!(aes_algs[i].base.cra_flags & CRYPTO_ALG_INTERNAL)) continue; algname = aes_algs[i].base.cra_name + 2; drvname = aes_algs[i].base.cra_driver_name + 2; basename = aes_algs[i].base.cra_driver_name; simd = simd_skcipher_create_compat(algname, drvname, basename); err = PTR_ERR(simd); if (IS_ERR(simd)) goto unregister_simds; aes_simd_algs[i] = simd; } return 0; unregister_simds: aes_exit(); return err; } late_initcall(aes_init); module_exit(aes_exit);
linux-master
arch/arm/crypto/aes-neonbs-glue.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Glue code for the SHA256 Secure Hash Algorithm assembly implementation * using optimized ARM assembler and NEON instructions. * * Copyright © 2015 Google Inc. * * This file is based on sha256_ssse3_glue.c: * Copyright (C) 2013 Intel Corporation * Author: Tim Chen <[email protected]> */ #include <crypto/internal/hash.h> #include <linux/crypto.h> #include <linux/init.h> #include <linux/module.h> #include <linux/mm.h> #include <linux/types.h> #include <linux/string.h> #include <crypto/sha2.h> #include <crypto/sha256_base.h> #include <asm/simd.h> #include <asm/neon.h> #include "sha256_glue.h" asmlinkage void sha256_block_data_order(u32 *digest, const void *data, unsigned int num_blks); int crypto_sha256_arm_update(struct shash_desc *desc, const u8 *data, unsigned int len) { /* make sure casting to sha256_block_fn() is safe */ BUILD_BUG_ON(offsetof(struct sha256_state, state) != 0); return sha256_base_do_update(desc, data, len, (sha256_block_fn *)sha256_block_data_order); } EXPORT_SYMBOL(crypto_sha256_arm_update); static int crypto_sha256_arm_final(struct shash_desc *desc, u8 *out) { sha256_base_do_finalize(desc, (sha256_block_fn *)sha256_block_data_order); return sha256_base_finish(desc, out); } int crypto_sha256_arm_finup(struct shash_desc *desc, const u8 *data, unsigned int len, u8 *out) { sha256_base_do_update(desc, data, len, (sha256_block_fn *)sha256_block_data_order); return crypto_sha256_arm_final(desc, out); } EXPORT_SYMBOL(crypto_sha256_arm_finup); static struct shash_alg algs[] = { { .digestsize = SHA256_DIGEST_SIZE, .init = sha256_base_init, .update = crypto_sha256_arm_update, .final = crypto_sha256_arm_final, .finup = crypto_sha256_arm_finup, .descsize = sizeof(struct sha256_state), .base = { .cra_name = "sha256", .cra_driver_name = "sha256-asm", .cra_priority = 150, .cra_blocksize = SHA256_BLOCK_SIZE, .cra_module = THIS_MODULE, } }, { .digestsize = SHA224_DIGEST_SIZE, .init = sha224_base_init, .update = crypto_sha256_arm_update, .final = crypto_sha256_arm_final, .finup = crypto_sha256_arm_finup, .descsize = sizeof(struct sha256_state), .base = { .cra_name = "sha224", .cra_driver_name = "sha224-asm", .cra_priority = 150, .cra_blocksize = SHA224_BLOCK_SIZE, .cra_module = THIS_MODULE, } } }; static int __init sha256_mod_init(void) { int res = crypto_register_shashes(algs, ARRAY_SIZE(algs)); if (res < 0) return res; if (IS_ENABLED(CONFIG_KERNEL_MODE_NEON) && cpu_has_neon()) { res = crypto_register_shashes(sha256_neon_algs, ARRAY_SIZE(sha256_neon_algs)); if (res < 0) crypto_unregister_shashes(algs, ARRAY_SIZE(algs)); } return res; } static void __exit sha256_mod_fini(void) { crypto_unregister_shashes(algs, ARRAY_SIZE(algs)); if (IS_ENABLED(CONFIG_KERNEL_MODE_NEON) && cpu_has_neon()) crypto_unregister_shashes(sha256_neon_algs, ARRAY_SIZE(sha256_neon_algs)); } module_init(sha256_mod_init); module_exit(sha256_mod_fini); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("SHA256 Secure Hash Algorithm (ARM), including NEON"); MODULE_ALIAS_CRYPTO("sha256");
linux-master
arch/arm/crypto/sha256_glue.c
// SPDX-License-Identifier: GPL-2.0 /* * ARM NEON accelerated ChaCha and XChaCha stream ciphers, * including ChaCha20 (RFC7539) * * Copyright (C) 2016-2019 Linaro, Ltd. <[email protected]> * Copyright (C) 2015 Martin Willi */ #include <crypto/algapi.h> #include <crypto/internal/chacha.h> #include <crypto/internal/simd.h> #include <crypto/internal/skcipher.h> #include <linux/jump_label.h> #include <linux/kernel.h> #include <linux/module.h> #include <asm/cputype.h> #include <asm/hwcap.h> #include <asm/neon.h> #include <asm/simd.h> asmlinkage void chacha_block_xor_neon(const u32 *state, u8 *dst, const u8 *src, int nrounds); asmlinkage void chacha_4block_xor_neon(const u32 *state, u8 *dst, const u8 *src, int nrounds, unsigned int nbytes); asmlinkage void hchacha_block_arm(const u32 *state, u32 *out, int nrounds); asmlinkage void hchacha_block_neon(const u32 *state, u32 *out, int nrounds); asmlinkage void chacha_doarm(u8 *dst, const u8 *src, unsigned int bytes, const u32 *state, int nrounds); static __ro_after_init DEFINE_STATIC_KEY_FALSE(use_neon); static inline bool neon_usable(void) { return static_branch_likely(&use_neon) && crypto_simd_usable(); } static void chacha_doneon(u32 *state, u8 *dst, const u8 *src, unsigned int bytes, int nrounds) { u8 buf[CHACHA_BLOCK_SIZE]; while (bytes > CHACHA_BLOCK_SIZE) { unsigned int l = min(bytes, CHACHA_BLOCK_SIZE * 4U); chacha_4block_xor_neon(state, dst, src, nrounds, l); bytes -= l; src += l; dst += l; state[12] += DIV_ROUND_UP(l, CHACHA_BLOCK_SIZE); } if (bytes) { const u8 *s = src; u8 *d = dst; if (bytes != CHACHA_BLOCK_SIZE) s = d = memcpy(buf, src, bytes); chacha_block_xor_neon(state, d, s, nrounds); if (d != dst) memcpy(dst, buf, bytes); state[12]++; } } void hchacha_block_arch(const u32 *state, u32 *stream, int nrounds) { if (!IS_ENABLED(CONFIG_KERNEL_MODE_NEON) || !neon_usable()) { hchacha_block_arm(state, stream, nrounds); } else { kernel_neon_begin(); hchacha_block_neon(state, stream, nrounds); kernel_neon_end(); } } EXPORT_SYMBOL(hchacha_block_arch); void chacha_init_arch(u32 *state, const u32 *key, const u8 *iv) { chacha_init_generic(state, key, iv); } EXPORT_SYMBOL(chacha_init_arch); void chacha_crypt_arch(u32 *state, u8 *dst, const u8 *src, unsigned int bytes, int nrounds) { if (!IS_ENABLED(CONFIG_KERNEL_MODE_NEON) || !neon_usable() || bytes <= CHACHA_BLOCK_SIZE) { chacha_doarm(dst, src, bytes, state, nrounds); state[12] += DIV_ROUND_UP(bytes, CHACHA_BLOCK_SIZE); return; } do { unsigned int todo = min_t(unsigned int, bytes, SZ_4K); kernel_neon_begin(); chacha_doneon(state, dst, src, todo, nrounds); kernel_neon_end(); bytes -= todo; src += todo; dst += todo; } while (bytes); } EXPORT_SYMBOL(chacha_crypt_arch); static int chacha_stream_xor(struct skcipher_request *req, const struct chacha_ctx *ctx, const u8 *iv, bool neon) { struct skcipher_walk walk; u32 state[16]; int err; err = skcipher_walk_virt(&walk, req, false); chacha_init_generic(state, ctx->key, iv); while (walk.nbytes > 0) { unsigned int nbytes = walk.nbytes; if (nbytes < walk.total) nbytes = round_down(nbytes, walk.stride); if (!IS_ENABLED(CONFIG_KERNEL_MODE_NEON) || !neon) { chacha_doarm(walk.dst.virt.addr, walk.src.virt.addr, nbytes, state, ctx->nrounds); state[12] += DIV_ROUND_UP(nbytes, CHACHA_BLOCK_SIZE); } else { kernel_neon_begin(); chacha_doneon(state, walk.dst.virt.addr, walk.src.virt.addr, nbytes, ctx->nrounds); kernel_neon_end(); } err = skcipher_walk_done(&walk, walk.nbytes - nbytes); } return err; } static int do_chacha(struct skcipher_request *req, bool neon) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm); return chacha_stream_xor(req, ctx, req->iv, neon); } static int chacha_arm(struct skcipher_request *req) { return do_chacha(req, false); } static int chacha_neon(struct skcipher_request *req) { return do_chacha(req, neon_usable()); } static int do_xchacha(struct skcipher_request *req, bool neon) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm); struct chacha_ctx subctx; u32 state[16]; u8 real_iv[16]; chacha_init_generic(state, ctx->key, req->iv); if (!IS_ENABLED(CONFIG_KERNEL_MODE_NEON) || !neon) { hchacha_block_arm(state, subctx.key, ctx->nrounds); } else { kernel_neon_begin(); hchacha_block_neon(state, subctx.key, ctx->nrounds); kernel_neon_end(); } subctx.nrounds = ctx->nrounds; memcpy(&real_iv[0], req->iv + 24, 8); memcpy(&real_iv[8], req->iv + 16, 8); return chacha_stream_xor(req, &subctx, real_iv, neon); } static int xchacha_arm(struct skcipher_request *req) { return do_xchacha(req, false); } static int xchacha_neon(struct skcipher_request *req) { return do_xchacha(req, neon_usable()); } static struct skcipher_alg arm_algs[] = { { .base.cra_name = "chacha20", .base.cra_driver_name = "chacha20-arm", .base.cra_priority = 200, .base.cra_blocksize = 1, .base.cra_ctxsize = sizeof(struct chacha_ctx), .base.cra_module = THIS_MODULE, .min_keysize = CHACHA_KEY_SIZE, .max_keysize = CHACHA_KEY_SIZE, .ivsize = CHACHA_IV_SIZE, .chunksize = CHACHA_BLOCK_SIZE, .setkey = chacha20_setkey, .encrypt = chacha_arm, .decrypt = chacha_arm, }, { .base.cra_name = "xchacha20", .base.cra_driver_name = "xchacha20-arm", .base.cra_priority = 200, .base.cra_blocksize = 1, .base.cra_ctxsize = sizeof(struct chacha_ctx), .base.cra_module = THIS_MODULE, .min_keysize = CHACHA_KEY_SIZE, .max_keysize = CHACHA_KEY_SIZE, .ivsize = XCHACHA_IV_SIZE, .chunksize = CHACHA_BLOCK_SIZE, .setkey = chacha20_setkey, .encrypt = xchacha_arm, .decrypt = xchacha_arm, }, { .base.cra_name = "xchacha12", .base.cra_driver_name = "xchacha12-arm", .base.cra_priority = 200, .base.cra_blocksize = 1, .base.cra_ctxsize = sizeof(struct chacha_ctx), .base.cra_module = THIS_MODULE, .min_keysize = CHACHA_KEY_SIZE, .max_keysize = CHACHA_KEY_SIZE, .ivsize = XCHACHA_IV_SIZE, .chunksize = CHACHA_BLOCK_SIZE, .setkey = chacha12_setkey, .encrypt = xchacha_arm, .decrypt = xchacha_arm, }, }; static struct skcipher_alg neon_algs[] = { { .base.cra_name = "chacha20", .base.cra_driver_name = "chacha20-neon", .base.cra_priority = 300, .base.cra_blocksize = 1, .base.cra_ctxsize = sizeof(struct chacha_ctx), .base.cra_module = THIS_MODULE, .min_keysize = CHACHA_KEY_SIZE, .max_keysize = CHACHA_KEY_SIZE, .ivsize = CHACHA_IV_SIZE, .chunksize = CHACHA_BLOCK_SIZE, .walksize = 4 * CHACHA_BLOCK_SIZE, .setkey = chacha20_setkey, .encrypt = chacha_neon, .decrypt = chacha_neon, }, { .base.cra_name = "xchacha20", .base.cra_driver_name = "xchacha20-neon", .base.cra_priority = 300, .base.cra_blocksize = 1, .base.cra_ctxsize = sizeof(struct chacha_ctx), .base.cra_module = THIS_MODULE, .min_keysize = CHACHA_KEY_SIZE, .max_keysize = CHACHA_KEY_SIZE, .ivsize = XCHACHA_IV_SIZE, .chunksize = CHACHA_BLOCK_SIZE, .walksize = 4 * CHACHA_BLOCK_SIZE, .setkey = chacha20_setkey, .encrypt = xchacha_neon, .decrypt = xchacha_neon, }, { .base.cra_name = "xchacha12", .base.cra_driver_name = "xchacha12-neon", .base.cra_priority = 300, .base.cra_blocksize = 1, .base.cra_ctxsize = sizeof(struct chacha_ctx), .base.cra_module = THIS_MODULE, .min_keysize = CHACHA_KEY_SIZE, .max_keysize = CHACHA_KEY_SIZE, .ivsize = XCHACHA_IV_SIZE, .chunksize = CHACHA_BLOCK_SIZE, .walksize = 4 * CHACHA_BLOCK_SIZE, .setkey = chacha12_setkey, .encrypt = xchacha_neon, .decrypt = xchacha_neon, } }; static int __init chacha_simd_mod_init(void) { int err = 0; if (IS_REACHABLE(CONFIG_CRYPTO_SKCIPHER)) { err = crypto_register_skciphers(arm_algs, ARRAY_SIZE(arm_algs)); if (err) return err; } if (IS_ENABLED(CONFIG_KERNEL_MODE_NEON) && (elf_hwcap & HWCAP_NEON)) { int i; switch (read_cpuid_part()) { case ARM_CPU_PART_CORTEX_A7: case ARM_CPU_PART_CORTEX_A5: /* * The Cortex-A7 and Cortex-A5 do not perform well with * the NEON implementation but do incredibly with the * scalar one and use less power. */ for (i = 0; i < ARRAY_SIZE(neon_algs); i++) neon_algs[i].base.cra_priority = 0; break; default: static_branch_enable(&use_neon); } if (IS_REACHABLE(CONFIG_CRYPTO_SKCIPHER)) { err = crypto_register_skciphers(neon_algs, ARRAY_SIZE(neon_algs)); if (err) crypto_unregister_skciphers(arm_algs, ARRAY_SIZE(arm_algs)); } } return err; } static void __exit chacha_simd_mod_fini(void) { if (IS_REACHABLE(CONFIG_CRYPTO_SKCIPHER)) { crypto_unregister_skciphers(arm_algs, ARRAY_SIZE(arm_algs)); if (IS_ENABLED(CONFIG_KERNEL_MODE_NEON) && (elf_hwcap & HWCAP_NEON)) crypto_unregister_skciphers(neon_algs, ARRAY_SIZE(neon_algs)); } } module_init(chacha_simd_mod_init); module_exit(chacha_simd_mod_fini); MODULE_DESCRIPTION("ChaCha and XChaCha stream ciphers (scalar and NEON accelerated)"); MODULE_AUTHOR("Ard Biesheuvel <[email protected]>"); MODULE_LICENSE("GPL v2"); MODULE_ALIAS_CRYPTO("chacha20"); MODULE_ALIAS_CRYPTO("chacha20-arm"); MODULE_ALIAS_CRYPTO("xchacha20"); MODULE_ALIAS_CRYPTO("xchacha20-arm"); MODULE_ALIAS_CRYPTO("xchacha12"); MODULE_ALIAS_CRYPTO("xchacha12-arm"); #ifdef CONFIG_KERNEL_MODE_NEON MODULE_ALIAS_CRYPTO("chacha20-neon"); MODULE_ALIAS_CRYPTO("xchacha20-neon"); MODULE_ALIAS_CRYPTO("xchacha12-neon"); #endif
linux-master
arch/arm/crypto/chacha-glue.c
// SPDX-License-Identifier: GPL-2.0-only /* * Accelerated CRC-T10DIF using ARM NEON and Crypto Extensions instructions * * Copyright (C) 2016 Linaro Ltd <[email protected]> */ #include <linux/crc-t10dif.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/string.h> #include <crypto/internal/hash.h> #include <crypto/internal/simd.h> #include <asm/neon.h> #include <asm/simd.h> #define CRC_T10DIF_PMULL_CHUNK_SIZE 16U asmlinkage u16 crc_t10dif_pmull(u16 init_crc, const u8 *buf, size_t len); static int crct10dif_init(struct shash_desc *desc) { u16 *crc = shash_desc_ctx(desc); *crc = 0; return 0; } static int crct10dif_update(struct shash_desc *desc, const u8 *data, unsigned int length) { u16 *crc = shash_desc_ctx(desc); if (length >= CRC_T10DIF_PMULL_CHUNK_SIZE && crypto_simd_usable()) { kernel_neon_begin(); *crc = crc_t10dif_pmull(*crc, data, length); kernel_neon_end(); } else { *crc = crc_t10dif_generic(*crc, data, length); } return 0; } static int crct10dif_final(struct shash_desc *desc, u8 *out) { u16 *crc = shash_desc_ctx(desc); *(u16 *)out = *crc; return 0; } static struct shash_alg crc_t10dif_alg = { .digestsize = CRC_T10DIF_DIGEST_SIZE, .init = crct10dif_init, .update = crct10dif_update, .final = crct10dif_final, .descsize = CRC_T10DIF_DIGEST_SIZE, .base.cra_name = "crct10dif", .base.cra_driver_name = "crct10dif-arm-ce", .base.cra_priority = 200, .base.cra_blocksize = CRC_T10DIF_BLOCK_SIZE, .base.cra_module = THIS_MODULE, }; static int __init crc_t10dif_mod_init(void) { if (!(elf_hwcap2 & HWCAP2_PMULL)) return -ENODEV; return crypto_register_shash(&crc_t10dif_alg); } static void __exit crc_t10dif_mod_exit(void) { crypto_unregister_shash(&crc_t10dif_alg); } module_init(crc_t10dif_mod_init); module_exit(crc_t10dif_mod_exit); MODULE_AUTHOR("Ard Biesheuvel <[email protected]>"); MODULE_LICENSE("GPL v2"); MODULE_ALIAS_CRYPTO("crct10dif");
linux-master
arch/arm/crypto/crct10dif-ce-glue.c
// SPDX-License-Identifier: GPL-2.0-only /* * Scalar AES core transform * * Copyright (C) 2017 Linaro Ltd. * Author: Ard Biesheuvel <[email protected]> */ #include <crypto/aes.h> #include <crypto/algapi.h> #include <linux/module.h> asmlinkage void __aes_arm_encrypt(u32 *rk, int rounds, const u8 *in, u8 *out); asmlinkage void __aes_arm_decrypt(u32 *rk, int rounds, const u8 *in, u8 *out); static void aes_arm_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) { struct crypto_aes_ctx *ctx = crypto_tfm_ctx(tfm); int rounds = 6 + ctx->key_length / 4; __aes_arm_encrypt(ctx->key_enc, rounds, in, out); } static void aes_arm_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) { struct crypto_aes_ctx *ctx = crypto_tfm_ctx(tfm); int rounds = 6 + ctx->key_length / 4; __aes_arm_decrypt(ctx->key_dec, rounds, in, out); } static struct crypto_alg aes_alg = { .cra_name = "aes", .cra_driver_name = "aes-arm", .cra_priority = 200, .cra_flags = CRYPTO_ALG_TYPE_CIPHER, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct crypto_aes_ctx), .cra_module = THIS_MODULE, .cra_cipher.cia_min_keysize = AES_MIN_KEY_SIZE, .cra_cipher.cia_max_keysize = AES_MAX_KEY_SIZE, .cra_cipher.cia_setkey = crypto_aes_set_key, .cra_cipher.cia_encrypt = aes_arm_encrypt, .cra_cipher.cia_decrypt = aes_arm_decrypt, #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS .cra_alignmask = 3, #endif }; static int __init aes_init(void) { return crypto_register_alg(&aes_alg); } static void __exit aes_fini(void) { crypto_unregister_alg(&aes_alg); } module_init(aes_init); module_exit(aes_fini); MODULE_DESCRIPTION("Scalar AES cipher for ARM"); MODULE_AUTHOR("Ard Biesheuvel <[email protected]>"); MODULE_LICENSE("GPL v2"); MODULE_ALIAS_CRYPTO("aes");
linux-master
arch/arm/crypto/aes-cipher-glue.c
// SPDX-License-Identifier: GPL-2.0-only /* * Accelerated CRC32(C) using ARM CRC, NEON and Crypto Extensions instructions * * Copyright (C) 2016 Linaro Ltd <[email protected]> */ #include <linux/cpufeature.h> #include <linux/crc32.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/string.h> #include <crypto/internal/hash.h> #include <crypto/internal/simd.h> #include <asm/hwcap.h> #include <asm/neon.h> #include <asm/simd.h> #include <asm/unaligned.h> #define PMULL_MIN_LEN 64L /* minimum size of buffer * for crc32_pmull_le_16 */ #define SCALE_F 16L /* size of NEON register */ asmlinkage u32 crc32_pmull_le(const u8 buf[], u32 len, u32 init_crc); asmlinkage u32 crc32_armv8_le(u32 init_crc, const u8 buf[], u32 len); asmlinkage u32 crc32c_pmull_le(const u8 buf[], u32 len, u32 init_crc); asmlinkage u32 crc32c_armv8_le(u32 init_crc, const u8 buf[], u32 len); static u32 (*fallback_crc32)(u32 init_crc, const u8 buf[], u32 len); static u32 (*fallback_crc32c)(u32 init_crc, const u8 buf[], u32 len); static int crc32_cra_init(struct crypto_tfm *tfm) { u32 *key = crypto_tfm_ctx(tfm); *key = 0; return 0; } static int crc32c_cra_init(struct crypto_tfm *tfm) { u32 *key = crypto_tfm_ctx(tfm); *key = ~0; return 0; } static int crc32_setkey(struct crypto_shash *hash, const u8 *key, unsigned int keylen) { u32 *mctx = crypto_shash_ctx(hash); if (keylen != sizeof(u32)) return -EINVAL; *mctx = le32_to_cpup((__le32 *)key); return 0; } static int crc32_init(struct shash_desc *desc) { u32 *mctx = crypto_shash_ctx(desc->tfm); u32 *crc = shash_desc_ctx(desc); *crc = *mctx; return 0; } static int crc32_update(struct shash_desc *desc, const u8 *data, unsigned int length) { u32 *crc = shash_desc_ctx(desc); *crc = crc32_armv8_le(*crc, data, length); return 0; } static int crc32c_update(struct shash_desc *desc, const u8 *data, unsigned int length) { u32 *crc = shash_desc_ctx(desc); *crc = crc32c_armv8_le(*crc, data, length); return 0; } static int crc32_final(struct shash_desc *desc, u8 *out) { u32 *crc = shash_desc_ctx(desc); put_unaligned_le32(*crc, out); return 0; } static int crc32c_final(struct shash_desc *desc, u8 *out) { u32 *crc = shash_desc_ctx(desc); put_unaligned_le32(~*crc, out); return 0; } static int crc32_pmull_update(struct shash_desc *desc, const u8 *data, unsigned int length) { u32 *crc = shash_desc_ctx(desc); unsigned int l; if (crypto_simd_usable()) { if ((u32)data % SCALE_F) { l = min_t(u32, length, SCALE_F - ((u32)data % SCALE_F)); *crc = fallback_crc32(*crc, data, l); data += l; length -= l; } if (length >= PMULL_MIN_LEN) { l = round_down(length, SCALE_F); kernel_neon_begin(); *crc = crc32_pmull_le(data, l, *crc); kernel_neon_end(); data += l; length -= l; } } if (length > 0) *crc = fallback_crc32(*crc, data, length); return 0; } static int crc32c_pmull_update(struct shash_desc *desc, const u8 *data, unsigned int length) { u32 *crc = shash_desc_ctx(desc); unsigned int l; if (crypto_simd_usable()) { if ((u32)data % SCALE_F) { l = min_t(u32, length, SCALE_F - ((u32)data % SCALE_F)); *crc = fallback_crc32c(*crc, data, l); data += l; length -= l; } if (length >= PMULL_MIN_LEN) { l = round_down(length, SCALE_F); kernel_neon_begin(); *crc = crc32c_pmull_le(data, l, *crc); kernel_neon_end(); data += l; length -= l; } } if (length > 0) *crc = fallback_crc32c(*crc, data, length); return 0; } static struct shash_alg crc32_pmull_algs[] = { { .setkey = crc32_setkey, .init = crc32_init, .update = crc32_update, .final = crc32_final, .descsize = sizeof(u32), .digestsize = sizeof(u32), .base.cra_ctxsize = sizeof(u32), .base.cra_init = crc32_cra_init, .base.cra_name = "crc32", .base.cra_driver_name = "crc32-arm-ce", .base.cra_priority = 200, .base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY, .base.cra_blocksize = 1, .base.cra_module = THIS_MODULE, }, { .setkey = crc32_setkey, .init = crc32_init, .update = crc32c_update, .final = crc32c_final, .descsize = sizeof(u32), .digestsize = sizeof(u32), .base.cra_ctxsize = sizeof(u32), .base.cra_init = crc32c_cra_init, .base.cra_name = "crc32c", .base.cra_driver_name = "crc32c-arm-ce", .base.cra_priority = 200, .base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY, .base.cra_blocksize = 1, .base.cra_module = THIS_MODULE, } }; static int __init crc32_pmull_mod_init(void) { if (elf_hwcap2 & HWCAP2_PMULL) { crc32_pmull_algs[0].update = crc32_pmull_update; crc32_pmull_algs[1].update = crc32c_pmull_update; if (elf_hwcap2 & HWCAP2_CRC32) { fallback_crc32 = crc32_armv8_le; fallback_crc32c = crc32c_armv8_le; } else { fallback_crc32 = crc32_le; fallback_crc32c = __crc32c_le; } } else if (!(elf_hwcap2 & HWCAP2_CRC32)) { return -ENODEV; } return crypto_register_shashes(crc32_pmull_algs, ARRAY_SIZE(crc32_pmull_algs)); } static void __exit crc32_pmull_mod_exit(void) { crypto_unregister_shashes(crc32_pmull_algs, ARRAY_SIZE(crc32_pmull_algs)); } static const struct cpu_feature __maybe_unused crc32_cpu_feature[] = { { cpu_feature(CRC32) }, { cpu_feature(PMULL) }, { } }; MODULE_DEVICE_TABLE(cpu, crc32_cpu_feature); module_init(crc32_pmull_mod_init); module_exit(crc32_pmull_mod_exit); MODULE_AUTHOR("Ard Biesheuvel <[email protected]>"); MODULE_LICENSE("GPL v2"); MODULE_ALIAS_CRYPTO("crc32"); MODULE_ALIAS_CRYPTO("crc32c");
linux-master
arch/arm/crypto/crc32-ce-glue.c
// SPDX-License-Identifier: GPL-2.0 OR MIT /* * Copyright (C) 2015-2019 Jason A. Donenfeld <[email protected]>. All Rights Reserved. * * Based on public domain code from Daniel J. Bernstein and Peter Schwabe. This * began from SUPERCOP's curve25519/neon2/scalarmult.s, but has subsequently been * manually reworked for use in kernel space. */ #include <asm/hwcap.h> #include <asm/neon.h> #include <asm/simd.h> #include <crypto/internal/kpp.h> #include <crypto/internal/simd.h> #include <linux/types.h> #include <linux/module.h> #include <linux/init.h> #include <linux/jump_label.h> #include <linux/scatterlist.h> #include <crypto/curve25519.h> asmlinkage void curve25519_neon(u8 mypublic[CURVE25519_KEY_SIZE], const u8 secret[CURVE25519_KEY_SIZE], const u8 basepoint[CURVE25519_KEY_SIZE]); static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_neon); void curve25519_arch(u8 out[CURVE25519_KEY_SIZE], const u8 scalar[CURVE25519_KEY_SIZE], const u8 point[CURVE25519_KEY_SIZE]) { if (static_branch_likely(&have_neon) && crypto_simd_usable()) { kernel_neon_begin(); curve25519_neon(out, scalar, point); kernel_neon_end(); } else { curve25519_generic(out, scalar, point); } } EXPORT_SYMBOL(curve25519_arch); void curve25519_base_arch(u8 pub[CURVE25519_KEY_SIZE], const u8 secret[CURVE25519_KEY_SIZE]) { return curve25519_arch(pub, secret, curve25519_base_point); } EXPORT_SYMBOL(curve25519_base_arch); static int curve25519_set_secret(struct crypto_kpp *tfm, const void *buf, unsigned int len) { u8 *secret = kpp_tfm_ctx(tfm); if (!len) curve25519_generate_secret(secret); else if (len == CURVE25519_KEY_SIZE && crypto_memneq(buf, curve25519_null_point, CURVE25519_KEY_SIZE)) memcpy(secret, buf, CURVE25519_KEY_SIZE); else return -EINVAL; return 0; } static int curve25519_compute_value(struct kpp_request *req) { struct crypto_kpp *tfm = crypto_kpp_reqtfm(req); const u8 *secret = kpp_tfm_ctx(tfm); u8 public_key[CURVE25519_KEY_SIZE]; u8 buf[CURVE25519_KEY_SIZE]; int copied, nbytes; u8 const *bp; if (req->src) { copied = sg_copy_to_buffer(req->src, sg_nents_for_len(req->src, CURVE25519_KEY_SIZE), public_key, CURVE25519_KEY_SIZE); if (copied != CURVE25519_KEY_SIZE) return -EINVAL; bp = public_key; } else { bp = curve25519_base_point; } curve25519_arch(buf, secret, bp); /* might want less than we've got */ nbytes = min_t(size_t, CURVE25519_KEY_SIZE, req->dst_len); copied = sg_copy_from_buffer(req->dst, sg_nents_for_len(req->dst, nbytes), buf, nbytes); if (copied != nbytes) return -EINVAL; return 0; } static unsigned int curve25519_max_size(struct crypto_kpp *tfm) { return CURVE25519_KEY_SIZE; } static struct kpp_alg curve25519_alg = { .base.cra_name = "curve25519", .base.cra_driver_name = "curve25519-neon", .base.cra_priority = 200, .base.cra_module = THIS_MODULE, .base.cra_ctxsize = CURVE25519_KEY_SIZE, .set_secret = curve25519_set_secret, .generate_public_key = curve25519_compute_value, .compute_shared_secret = curve25519_compute_value, .max_size = curve25519_max_size, }; static int __init arm_curve25519_init(void) { if (elf_hwcap & HWCAP_NEON) { static_branch_enable(&have_neon); return IS_REACHABLE(CONFIG_CRYPTO_KPP) ? crypto_register_kpp(&curve25519_alg) : 0; } return 0; } static void __exit arm_curve25519_exit(void) { if (IS_REACHABLE(CONFIG_CRYPTO_KPP) && elf_hwcap & HWCAP_NEON) crypto_unregister_kpp(&curve25519_alg); } module_init(arm_curve25519_init); module_exit(arm_curve25519_exit); MODULE_ALIAS_CRYPTO("curve25519"); MODULE_ALIAS_CRYPTO("curve25519-neon"); MODULE_LICENSE("GPL v2");
linux-master
arch/arm/crypto/curve25519-glue.c