python_code
stringlengths 0
1.8M
| repo_name
stringclasses 7
values | file_path
stringlengths 5
99
|
---|---|---|
// SPDX-License-Identifier: GPL-2.0+
//
// Copyright 2019 Madhavan Srinivasan, IBM Corporation.
#define pr_fmt(fmt) "generic-compat-pmu: " fmt
#include "isa207-common.h"
/*
* Raw event encoding:
*
* 60 56 52 48 44 40 36 32
* | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - |
*
* 28 24 20 16 12 8 4 0
* | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - |
* [ pmc ] [ pmcxsel ]
*/
/*
* Event codes defined in ISA v3.0B
*/
#define EVENT(_name, _code) _name = _code,
enum {
/* Cycles, alternate code */
EVENT(PM_CYC_ALT, 0x100f0)
/* One or more instructions completed in a cycle */
EVENT(PM_CYC_INST_CMPL, 0x100f2)
/* Floating-point instruction completed */
EVENT(PM_FLOP_CMPL, 0x100f4)
/* Instruction ERAT/L1-TLB miss */
EVENT(PM_L1_ITLB_MISS, 0x100f6)
/* All instructions completed and none available */
EVENT(PM_NO_INST_AVAIL, 0x100f8)
/* A load-type instruction completed (ISA v3.0+) */
EVENT(PM_LD_CMPL, 0x100fc)
/* Instruction completed, alternate code (ISA v3.0+) */
EVENT(PM_INST_CMPL_ALT, 0x100fe)
/* A store-type instruction completed */
EVENT(PM_ST_CMPL, 0x200f0)
/* Instruction Dispatched */
EVENT(PM_INST_DISP, 0x200f2)
/* Run_cycles */
EVENT(PM_RUN_CYC, 0x200f4)
/* Data ERAT/L1-TLB miss/reload */
EVENT(PM_L1_DTLB_RELOAD, 0x200f6)
/* Taken branch completed */
EVENT(PM_BR_TAKEN_CMPL, 0x200fa)
/* Demand iCache Miss */
EVENT(PM_L1_ICACHE_MISS, 0x200fc)
/* L1 Dcache reload from memory */
EVENT(PM_L1_RELOAD_FROM_MEM, 0x200fe)
/* L1 Dcache store miss */
EVENT(PM_ST_MISS_L1, 0x300f0)
/* Alternate code for PM_INST_DISP */
EVENT(PM_INST_DISP_ALT, 0x300f2)
/* Branch direction or target mispredicted */
EVENT(PM_BR_MISPREDICT, 0x300f6)
/* Data TLB miss/reload */
EVENT(PM_DTLB_MISS, 0x300fc)
/* Demand LD - L3 Miss (not L2 hit and not L3 hit) */
EVENT(PM_DATA_FROM_L3MISS, 0x300fe)
/* L1 Dcache load miss */
EVENT(PM_LD_MISS_L1, 0x400f0)
/* Cycle when instruction(s) dispatched */
EVENT(PM_CYC_INST_DISP, 0x400f2)
/* Branch or branch target mispredicted */
EVENT(PM_BR_MPRED_CMPL, 0x400f6)
/* Instructions completed with run latch set */
EVENT(PM_RUN_INST_CMPL, 0x400fa)
/* Instruction TLB miss/reload */
EVENT(PM_ITLB_MISS, 0x400fc)
/* Load data not cached */
EVENT(PM_LD_NOT_CACHED, 0x400fe)
/* Instructions */
EVENT(PM_INST_CMPL, 0x500fa)
/* Cycles */
EVENT(PM_CYC, 0x600f4)
};
#undef EVENT
/* Table of alternatives, sorted in increasing order of column 0 */
/* Note that in each row, column 0 must be the smallest */
static const unsigned int generic_event_alternatives[][MAX_ALT] = {
{ PM_CYC_ALT, PM_CYC },
{ PM_INST_CMPL_ALT, PM_INST_CMPL },
{ PM_INST_DISP, PM_INST_DISP_ALT },
};
static int generic_get_alternatives(u64 event, unsigned int flags, u64 alt[])
{
int num_alt = 0;
num_alt = isa207_get_alternatives(event, alt,
ARRAY_SIZE(generic_event_alternatives), flags,
generic_event_alternatives);
return num_alt;
}
GENERIC_EVENT_ATTR(cpu-cycles, PM_CYC);
GENERIC_EVENT_ATTR(instructions, PM_INST_CMPL);
GENERIC_EVENT_ATTR(stalled-cycles-frontend, PM_NO_INST_AVAIL);
GENERIC_EVENT_ATTR(branch-misses, PM_BR_MPRED_CMPL);
GENERIC_EVENT_ATTR(cache-misses, PM_LD_MISS_L1);
CACHE_EVENT_ATTR(L1-dcache-load-misses, PM_LD_MISS_L1);
CACHE_EVENT_ATTR(L1-dcache-store-misses, PM_ST_MISS_L1);
CACHE_EVENT_ATTR(L1-icache-load-misses, PM_L1_ICACHE_MISS);
CACHE_EVENT_ATTR(LLC-load-misses, PM_DATA_FROM_L3MISS);
CACHE_EVENT_ATTR(branch-load-misses, PM_BR_MPRED_CMPL);
CACHE_EVENT_ATTR(dTLB-load-misses, PM_DTLB_MISS);
CACHE_EVENT_ATTR(iTLB-load-misses, PM_ITLB_MISS);
static struct attribute *generic_compat_events_attr[] = {
GENERIC_EVENT_PTR(PM_CYC),
GENERIC_EVENT_PTR(PM_INST_CMPL),
GENERIC_EVENT_PTR(PM_NO_INST_AVAIL),
GENERIC_EVENT_PTR(PM_BR_MPRED_CMPL),
GENERIC_EVENT_PTR(PM_LD_MISS_L1),
CACHE_EVENT_PTR(PM_LD_MISS_L1),
CACHE_EVENT_PTR(PM_ST_MISS_L1),
CACHE_EVENT_PTR(PM_L1_ICACHE_MISS),
CACHE_EVENT_PTR(PM_DATA_FROM_L3MISS),
CACHE_EVENT_PTR(PM_BR_MPRED_CMPL),
CACHE_EVENT_PTR(PM_DTLB_MISS),
CACHE_EVENT_PTR(PM_ITLB_MISS),
NULL
};
static const struct attribute_group generic_compat_pmu_events_group = {
.name = "events",
.attrs = generic_compat_events_attr,
};
PMU_FORMAT_ATTR(event, "config:0-19");
PMU_FORMAT_ATTR(pmcxsel, "config:0-7");
PMU_FORMAT_ATTR(pmc, "config:16-19");
static struct attribute *generic_compat_pmu_format_attr[] = {
&format_attr_event.attr,
&format_attr_pmcxsel.attr,
&format_attr_pmc.attr,
NULL,
};
static const struct attribute_group generic_compat_pmu_format_group = {
.name = "format",
.attrs = generic_compat_pmu_format_attr,
};
static struct attribute *generic_compat_pmu_caps_attrs[] = {
NULL
};
static struct attribute_group generic_compat_pmu_caps_group = {
.name = "caps",
.attrs = generic_compat_pmu_caps_attrs,
};
static const struct attribute_group *generic_compat_pmu_attr_groups[] = {
&generic_compat_pmu_format_group,
&generic_compat_pmu_events_group,
&generic_compat_pmu_caps_group,
NULL,
};
static int compat_generic_events[] = {
[PERF_COUNT_HW_CPU_CYCLES] = PM_CYC,
[PERF_COUNT_HW_INSTRUCTIONS] = PM_INST_CMPL,
[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = PM_NO_INST_AVAIL,
[PERF_COUNT_HW_BRANCH_MISSES] = PM_BR_MPRED_CMPL,
[PERF_COUNT_HW_CACHE_MISSES] = PM_LD_MISS_L1,
};
#define C(x) PERF_COUNT_HW_CACHE_##x
/*
* Table of generalized cache-related events.
* 0 means not supported, -1 means nonsensical, other values
* are event codes.
*/
static u64 generic_compat_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
[ C(L1D) ] = {
[ C(OP_READ) ] = {
[ C(RESULT_ACCESS) ] = 0,
[ C(RESULT_MISS) ] = PM_LD_MISS_L1,
},
[ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = 0,
[ C(RESULT_MISS) ] = PM_ST_MISS_L1,
},
[ C(OP_PREFETCH) ] = {
[ C(RESULT_ACCESS) ] = 0,
[ C(RESULT_MISS) ] = 0,
},
},
[ C(L1I) ] = {
[ C(OP_READ) ] = {
[ C(RESULT_ACCESS) ] = 0,
[ C(RESULT_MISS) ] = PM_L1_ICACHE_MISS,
},
[ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = 0,
[ C(RESULT_MISS) ] = -1,
},
[ C(OP_PREFETCH) ] = {
[ C(RESULT_ACCESS) ] = 0,
[ C(RESULT_MISS) ] = 0,
},
},
[ C(LL) ] = {
[ C(OP_READ) ] = {
[ C(RESULT_ACCESS) ] = 0,
[ C(RESULT_MISS) ] = PM_DATA_FROM_L3MISS,
},
[ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = 0,
[ C(RESULT_MISS) ] = 0,
},
[ C(OP_PREFETCH) ] = {
[ C(RESULT_ACCESS) ] = 0,
[ C(RESULT_MISS) ] = 0,
},
},
[ C(DTLB) ] = {
[ C(OP_READ) ] = {
[ C(RESULT_ACCESS) ] = 0,
[ C(RESULT_MISS) ] = PM_DTLB_MISS,
},
[ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = -1,
[ C(RESULT_MISS) ] = -1,
},
[ C(OP_PREFETCH) ] = {
[ C(RESULT_ACCESS) ] = -1,
[ C(RESULT_MISS) ] = -1,
},
},
[ C(ITLB) ] = {
[ C(OP_READ) ] = {
[ C(RESULT_ACCESS) ] = 0,
[ C(RESULT_MISS) ] = PM_ITLB_MISS,
},
[ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = -1,
[ C(RESULT_MISS) ] = -1,
},
[ C(OP_PREFETCH) ] = {
[ C(RESULT_ACCESS) ] = -1,
[ C(RESULT_MISS) ] = -1,
},
},
[ C(BPU) ] = {
[ C(OP_READ) ] = {
[ C(RESULT_ACCESS) ] = 0,
[ C(RESULT_MISS) ] = PM_BR_MPRED_CMPL,
},
[ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = -1,
[ C(RESULT_MISS) ] = -1,
},
[ C(OP_PREFETCH) ] = {
[ C(RESULT_ACCESS) ] = -1,
[ C(RESULT_MISS) ] = -1,
},
},
[ C(NODE) ] = {
[ C(OP_READ) ] = {
[ C(RESULT_ACCESS) ] = -1,
[ C(RESULT_MISS) ] = -1,
},
[ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = -1,
[ C(RESULT_MISS) ] = -1,
},
[ C(OP_PREFETCH) ] = {
[ C(RESULT_ACCESS) ] = -1,
[ C(RESULT_MISS) ] = -1,
},
},
};
#undef C
/*
* We set MMCR0[CC5-6RUN] so we can use counters 5 and 6 for
* PM_INST_CMPL and PM_CYC.
*/
static int generic_compute_mmcr(u64 event[], int n_ev,
unsigned int hwc[], struct mmcr_regs *mmcr,
struct perf_event *pevents[], u32 flags)
{
int ret;
ret = isa207_compute_mmcr(event, n_ev, hwc, mmcr, pevents, flags);
if (!ret)
mmcr->mmcr0 |= MMCR0_C56RUN;
return ret;
}
static struct power_pmu generic_compat_pmu = {
.name = "ISAv3",
.n_counter = MAX_PMU_COUNTERS,
.add_fields = ISA207_ADD_FIELDS,
.test_adder = ISA207_TEST_ADDER,
.compute_mmcr = generic_compute_mmcr,
.get_constraint = isa207_get_constraint,
.get_alternatives = generic_get_alternatives,
.disable_pmc = isa207_disable_pmc,
.flags = PPMU_HAS_SIER | PPMU_ARCH_207S,
.n_generic = ARRAY_SIZE(compat_generic_events),
.generic_events = compat_generic_events,
.cache_events = &generic_compat_cache_events,
.attr_groups = generic_compat_pmu_attr_groups,
};
int __init init_generic_compat_pmu(void)
{
int rc = 0;
/*
* From ISA v2.07 on, PMU features are architected;
* we require >= v3.0 because (a) that has PM_LD_CMPL and
* PM_INST_CMPL_ALT, which v2.07 doesn't have, and
* (b) we don't expect any non-IBM Power ISA
* implementations that conform to v2.07 but not v3.0.
*/
if (!cpu_has_feature(CPU_FTR_ARCH_300))
return -ENODEV;
rc = register_power_pmu(&generic_compat_pmu);
if (rc)
return rc;
/* Tell userspace that EBB is supported */
cur_cpu_spec->cpu_user_features2 |= PPC_FEATURE2_EBB;
return 0;
}
| linux-master | arch/powerpc/perf/generic-compat-pmu.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Common Performance counter support functions for PowerISA v2.07 processors.
*
* Copyright 2009 Paul Mackerras, IBM Corporation.
* Copyright 2013 Michael Ellerman, IBM Corporation.
* Copyright 2016 Madhavan Srinivasan, IBM Corporation.
*/
#include "isa207-common.h"
PMU_FORMAT_ATTR(event, "config:0-49");
PMU_FORMAT_ATTR(pmcxsel, "config:0-7");
PMU_FORMAT_ATTR(mark, "config:8");
PMU_FORMAT_ATTR(combine, "config:11");
PMU_FORMAT_ATTR(unit, "config:12-15");
PMU_FORMAT_ATTR(pmc, "config:16-19");
PMU_FORMAT_ATTR(cache_sel, "config:20-23");
PMU_FORMAT_ATTR(sample_mode, "config:24-28");
PMU_FORMAT_ATTR(thresh_sel, "config:29-31");
PMU_FORMAT_ATTR(thresh_stop, "config:32-35");
PMU_FORMAT_ATTR(thresh_start, "config:36-39");
PMU_FORMAT_ATTR(thresh_cmp, "config:40-49");
static struct attribute *isa207_pmu_format_attr[] = {
&format_attr_event.attr,
&format_attr_pmcxsel.attr,
&format_attr_mark.attr,
&format_attr_combine.attr,
&format_attr_unit.attr,
&format_attr_pmc.attr,
&format_attr_cache_sel.attr,
&format_attr_sample_mode.attr,
&format_attr_thresh_sel.attr,
&format_attr_thresh_stop.attr,
&format_attr_thresh_start.attr,
&format_attr_thresh_cmp.attr,
NULL,
};
const struct attribute_group isa207_pmu_format_group = {
.name = "format",
.attrs = isa207_pmu_format_attr,
};
static inline bool event_is_fab_match(u64 event)
{
/* Only check pmc, unit and pmcxsel, ignore the edge bit (0) */
event &= 0xff0fe;
/* PM_MRK_FAB_RSP_MATCH & PM_MRK_FAB_RSP_MATCH_CYC */
return (event == 0x30056 || event == 0x4f052);
}
static bool is_event_valid(u64 event)
{
u64 valid_mask = EVENT_VALID_MASK;
if (cpu_has_feature(CPU_FTR_ARCH_31))
valid_mask = p10_EVENT_VALID_MASK;
else if (cpu_has_feature(CPU_FTR_ARCH_300))
valid_mask = p9_EVENT_VALID_MASK;
return !(event & ~valid_mask);
}
static inline bool is_event_marked(u64 event)
{
if (event & EVENT_IS_MARKED)
return true;
return false;
}
static unsigned long sdar_mod_val(u64 event)
{
if (cpu_has_feature(CPU_FTR_ARCH_31))
return p10_SDAR_MODE(event);
return p9_SDAR_MODE(event);
}
static void mmcra_sdar_mode(u64 event, unsigned long *mmcra)
{
/*
* MMCRA[SDAR_MODE] specifies how the SDAR should be updated in
* continuous sampling mode.
*
* Incase of Power8:
* MMCRA[SDAR_MODE] will be programmed as "0b01" for continuous sampling
* mode and will be un-changed when setting MMCRA[63] (Marked events).
*
* Incase of Power9/power10:
* Marked event: MMCRA[SDAR_MODE] will be set to 0b00 ('No Updates'),
* or if group already have any marked events.
* For rest
* MMCRA[SDAR_MODE] will be set from event code.
* If sdar_mode from event is zero, default to 0b01. Hardware
* requires that we set a non-zero value.
*/
if (cpu_has_feature(CPU_FTR_ARCH_300)) {
if (is_event_marked(event) || (*mmcra & MMCRA_SAMPLE_ENABLE))
*mmcra &= MMCRA_SDAR_MODE_NO_UPDATES;
else if (sdar_mod_val(event))
*mmcra |= sdar_mod_val(event) << MMCRA_SDAR_MODE_SHIFT;
else
*mmcra |= MMCRA_SDAR_MODE_DCACHE;
} else
*mmcra |= MMCRA_SDAR_MODE_TLB;
}
static int p10_thresh_cmp_val(u64 value)
{
int exp = 0;
u64 result = value;
if (!value)
return value;
/*
* Incase of P10, thresh_cmp value is not part of raw event code
* and provided via attr.config1 parameter. To program threshold in MMCRA,
* take a 18 bit number N and shift right 2 places and increment
* the exponent E by 1 until the upper 10 bits of N are zero.
* Write E to the threshold exponent and write the lower 8 bits of N
* to the threshold mantissa.
* The max threshold that can be written is 261120.
*/
if (cpu_has_feature(CPU_FTR_ARCH_31)) {
if (value > 261120)
value = 261120;
while ((64 - __builtin_clzl(value)) > 8) {
exp++;
value >>= 2;
}
/*
* Note that it is invalid to write a mantissa with the
* upper 2 bits of mantissa being zero, unless the
* exponent is also zero.
*/
if (!(value & 0xC0) && exp)
result = -1;
else
result = (exp << 8) | value;
}
return result;
}
static u64 thresh_cmp_val(u64 value)
{
if (cpu_has_feature(CPU_FTR_ARCH_31))
value = p10_thresh_cmp_val(value);
/*
* Since location of threshold compare bits in MMCRA
* is different for p8, using different shift value.
*/
if (cpu_has_feature(CPU_FTR_ARCH_300))
return value << p9_MMCRA_THR_CMP_SHIFT;
else
return value << MMCRA_THR_CMP_SHIFT;
}
static unsigned long combine_from_event(u64 event)
{
if (cpu_has_feature(CPU_FTR_ARCH_300))
return p9_EVENT_COMBINE(event);
return EVENT_COMBINE(event);
}
static unsigned long combine_shift(unsigned long pmc)
{
if (cpu_has_feature(CPU_FTR_ARCH_300))
return p9_MMCR1_COMBINE_SHIFT(pmc);
return MMCR1_COMBINE_SHIFT(pmc);
}
static inline bool event_is_threshold(u64 event)
{
return (event >> EVENT_THR_SEL_SHIFT) & EVENT_THR_SEL_MASK;
}
static bool is_thresh_cmp_valid(u64 event)
{
unsigned int cmp, exp;
if (cpu_has_feature(CPU_FTR_ARCH_31))
return p10_thresh_cmp_val(event) >= 0;
/*
* Check the mantissa upper two bits are not zero, unless the
* exponent is also zero. See the THRESH_CMP_MANTISSA doc.
*/
cmp = (event >> EVENT_THR_CMP_SHIFT) & EVENT_THR_CMP_MASK;
exp = cmp >> 7;
if (exp && (cmp & 0x60) == 0)
return false;
return true;
}
static unsigned int dc_ic_rld_quad_l1_sel(u64 event)
{
unsigned int cache;
cache = (event >> EVENT_CACHE_SEL_SHIFT) & MMCR1_DC_IC_QUAL_MASK;
return cache;
}
static inline u64 isa207_find_source(u64 idx, u32 sub_idx)
{
u64 ret = PERF_MEM_NA;
switch(idx) {
case 0:
/* Nothing to do */
break;
case 1:
ret = PH(LVL, L1) | LEVEL(L1) | P(SNOOP, HIT);
break;
case 2:
ret = PH(LVL, L2) | LEVEL(L2) | P(SNOOP, HIT);
break;
case 3:
ret = PH(LVL, L3) | LEVEL(L3) | P(SNOOP, HIT);
break;
case 4:
if (cpu_has_feature(CPU_FTR_ARCH_31)) {
ret = P(SNOOP, HIT);
if (sub_idx == 1)
ret |= PH(LVL, LOC_RAM) | LEVEL(RAM);
else if (sub_idx == 2 || sub_idx == 3)
ret |= P(LVL, HIT) | LEVEL(PMEM);
else if (sub_idx == 4)
ret |= PH(LVL, REM_RAM1) | REM | LEVEL(RAM) | P(HOPS, 2);
else if (sub_idx == 5 || sub_idx == 7)
ret |= P(LVL, HIT) | LEVEL(PMEM) | REM;
else if (sub_idx == 6)
ret |= PH(LVL, REM_RAM2) | REM | LEVEL(RAM) | P(HOPS, 3);
} else {
if (sub_idx <= 1)
ret = PH(LVL, LOC_RAM);
else if (sub_idx > 1 && sub_idx <= 2)
ret = PH(LVL, REM_RAM1);
else
ret = PH(LVL, REM_RAM2);
ret |= P(SNOOP, HIT);
}
break;
case 5:
if (cpu_has_feature(CPU_FTR_ARCH_31)) {
ret = REM | P(HOPS, 0);
if (sub_idx == 0 || sub_idx == 4)
ret |= PH(LVL, L2) | LEVEL(L2) | P(SNOOP, HIT);
else if (sub_idx == 1 || sub_idx == 5)
ret |= PH(LVL, L2) | LEVEL(L2) | P(SNOOP, HITM);
else if (sub_idx == 2 || sub_idx == 6)
ret |= PH(LVL, L3) | LEVEL(L3) | P(SNOOP, HIT);
else if (sub_idx == 3 || sub_idx == 7)
ret |= PH(LVL, L3) | LEVEL(L3) | P(SNOOP, HITM);
} else {
if (sub_idx == 0)
ret = PH(LVL, L2) | LEVEL(L2) | REM | P(SNOOP, HIT) | P(HOPS, 0);
else if (sub_idx == 1)
ret = PH(LVL, L2) | LEVEL(L2) | REM | P(SNOOP, HITM) | P(HOPS, 0);
else if (sub_idx == 2 || sub_idx == 4)
ret = PH(LVL, L3) | LEVEL(L3) | REM | P(SNOOP, HIT) | P(HOPS, 0);
else if (sub_idx == 3 || sub_idx == 5)
ret = PH(LVL, L3) | LEVEL(L3) | REM | P(SNOOP, HITM) | P(HOPS, 0);
}
break;
case 6:
if (cpu_has_feature(CPU_FTR_ARCH_31)) {
if (sub_idx == 0)
ret = PH(LVL, REM_CCE1) | LEVEL(ANY_CACHE) | REM |
P(SNOOP, HIT) | P(HOPS, 2);
else if (sub_idx == 1)
ret = PH(LVL, REM_CCE1) | LEVEL(ANY_CACHE) | REM |
P(SNOOP, HITM) | P(HOPS, 2);
else if (sub_idx == 2)
ret = PH(LVL, REM_CCE2) | LEVEL(ANY_CACHE) | REM |
P(SNOOP, HIT) | P(HOPS, 3);
else if (sub_idx == 3)
ret = PH(LVL, REM_CCE2) | LEVEL(ANY_CACHE) | REM |
P(SNOOP, HITM) | P(HOPS, 3);
} else {
ret = PH(LVL, REM_CCE2);
if (sub_idx == 0 || sub_idx == 2)
ret |= P(SNOOP, HIT);
else if (sub_idx == 1 || sub_idx == 3)
ret |= P(SNOOP, HITM);
}
break;
case 7:
ret = PM(LVL, L1);
break;
}
return ret;
}
void isa207_get_mem_data_src(union perf_mem_data_src *dsrc, u32 flags,
struct pt_regs *regs)
{
u64 idx;
u32 sub_idx;
u64 sier;
u64 val;
/* Skip if no SIER support */
if (!(flags & PPMU_HAS_SIER)) {
dsrc->val = 0;
return;
}
sier = mfspr(SPRN_SIER);
val = (sier & ISA207_SIER_TYPE_MASK) >> ISA207_SIER_TYPE_SHIFT;
if (val != 1 && val != 2 && !(val == 7 && cpu_has_feature(CPU_FTR_ARCH_31)))
return;
idx = (sier & ISA207_SIER_LDST_MASK) >> ISA207_SIER_LDST_SHIFT;
sub_idx = (sier & ISA207_SIER_DATA_SRC_MASK) >> ISA207_SIER_DATA_SRC_SHIFT;
dsrc->val = isa207_find_source(idx, sub_idx);
if (val == 7) {
u64 mmcra;
u32 op_type;
/*
* Type 0b111 denotes either larx or stcx instruction. Use the
* MMCRA sampling bits [57:59] along with the type value
* to determine the exact instruction type. If the sampling
* criteria is neither load or store, set the type as default
* to NA.
*/
mmcra = mfspr(SPRN_MMCRA);
op_type = (mmcra >> MMCRA_SAMP_ELIG_SHIFT) & MMCRA_SAMP_ELIG_MASK;
switch (op_type) {
case 5:
dsrc->val |= P(OP, LOAD);
break;
case 7:
dsrc->val |= P(OP, STORE);
break;
default:
dsrc->val |= P(OP, NA);
break;
}
} else {
dsrc->val |= (val == 1) ? P(OP, LOAD) : P(OP, STORE);
}
}
void isa207_get_mem_weight(u64 *weight, u64 type)
{
union perf_sample_weight *weight_fields;
u64 weight_lat;
u64 mmcra = mfspr(SPRN_MMCRA);
u64 exp = MMCRA_THR_CTR_EXP(mmcra);
u64 mantissa = MMCRA_THR_CTR_MANT(mmcra);
u64 sier = mfspr(SPRN_SIER);
u64 val = (sier & ISA207_SIER_TYPE_MASK) >> ISA207_SIER_TYPE_SHIFT;
if (cpu_has_feature(CPU_FTR_ARCH_31))
mantissa = P10_MMCRA_THR_CTR_MANT(mmcra);
if (val == 0 || (val == 7 && !cpu_has_feature(CPU_FTR_ARCH_31)))
weight_lat = 0;
else
weight_lat = mantissa << (2 * exp);
/*
* Use 64 bit weight field (full) if sample type is
* WEIGHT.
*
* if sample type is WEIGHT_STRUCT:
* - store memory latency in the lower 32 bits.
* - For ISA v3.1, use remaining two 16 bit fields of
* perf_sample_weight to store cycle counter values
* from sier2.
*/
weight_fields = (union perf_sample_weight *)weight;
if (type & PERF_SAMPLE_WEIGHT)
weight_fields->full = weight_lat;
else {
weight_fields->var1_dw = (u32)weight_lat;
if (cpu_has_feature(CPU_FTR_ARCH_31)) {
weight_fields->var2_w = P10_SIER2_FINISH_CYC(mfspr(SPRN_SIER2));
weight_fields->var3_w = P10_SIER2_DISPATCH_CYC(mfspr(SPRN_SIER2));
}
}
}
int isa207_get_constraint(u64 event, unsigned long *maskp, unsigned long *valp, u64 event_config1)
{
unsigned int unit, pmc, cache, ebb;
unsigned long mask, value;
mask = value = 0;
if (!is_event_valid(event))
return -1;
pmc = (event >> EVENT_PMC_SHIFT) & EVENT_PMC_MASK;
unit = (event >> EVENT_UNIT_SHIFT) & EVENT_UNIT_MASK;
if (cpu_has_feature(CPU_FTR_ARCH_31))
cache = (event >> EVENT_CACHE_SEL_SHIFT) &
p10_EVENT_CACHE_SEL_MASK;
else
cache = (event >> EVENT_CACHE_SEL_SHIFT) &
EVENT_CACHE_SEL_MASK;
ebb = (event >> EVENT_EBB_SHIFT) & EVENT_EBB_MASK;
if (pmc) {
u64 base_event;
if (pmc > 6)
return -1;
/* Ignore Linux defined bits when checking event below */
base_event = event & ~EVENT_LINUX_MASK;
if (pmc >= 5 && base_event != 0x500fa &&
base_event != 0x600f4)
return -1;
mask |= CNST_PMC_MASK(pmc);
value |= CNST_PMC_VAL(pmc);
/*
* PMC5 and PMC6 are used to count cycles and instructions and
* they do not support most of the constraint bits. Add a check
* to exclude PMC5/6 from most of the constraints except for
* EBB/BHRB.
*/
if (pmc >= 5)
goto ebb_bhrb;
}
if (pmc <= 4) {
/*
* Add to number of counters in use. Note this includes events with
* a PMC of 0 - they still need a PMC, it's just assigned later.
* Don't count events on PMC 5 & 6, there is only one valid event
* on each of those counters, and they are handled above.
*/
mask |= CNST_NC_MASK;
value |= CNST_NC_VAL;
}
if (unit >= 6 && unit <= 9) {
if (cpu_has_feature(CPU_FTR_ARCH_31)) {
if (unit == 6) {
mask |= CNST_L2L3_GROUP_MASK;
value |= CNST_L2L3_GROUP_VAL(event >> p10_L2L3_EVENT_SHIFT);
}
} else if (cpu_has_feature(CPU_FTR_ARCH_300)) {
mask |= CNST_CACHE_GROUP_MASK;
value |= CNST_CACHE_GROUP_VAL(event & 0xff);
mask |= CNST_CACHE_PMC4_MASK;
if (pmc == 4)
value |= CNST_CACHE_PMC4_VAL;
} else if (cache & 0x7) {
/*
* L2/L3 events contain a cache selector field, which is
* supposed to be programmed into MMCRC. However MMCRC is only
* HV writable, and there is no API for guest kernels to modify
* it. The solution is for the hypervisor to initialise the
* field to zeroes, and for us to only ever allow events that
* have a cache selector of zero. The bank selector (bit 3) is
* irrelevant, as long as the rest of the value is 0.
*/
return -1;
}
} else if (cpu_has_feature(CPU_FTR_ARCH_300) || (event & EVENT_IS_L1)) {
mask |= CNST_L1_QUAL_MASK;
value |= CNST_L1_QUAL_VAL(cache);
}
if (cpu_has_feature(CPU_FTR_ARCH_31)) {
mask |= CNST_RADIX_SCOPE_GROUP_MASK;
value |= CNST_RADIX_SCOPE_GROUP_VAL(event >> p10_EVENT_RADIX_SCOPE_QUAL_SHIFT);
}
if (is_event_marked(event)) {
mask |= CNST_SAMPLE_MASK;
value |= CNST_SAMPLE_VAL(event >> EVENT_SAMPLE_SHIFT);
}
if (cpu_has_feature(CPU_FTR_ARCH_31)) {
if (event_is_threshold(event) && is_thresh_cmp_valid(event_config1)) {
mask |= CNST_THRESH_CTL_SEL_MASK;
value |= CNST_THRESH_CTL_SEL_VAL(event >> EVENT_THRESH_SHIFT);
mask |= p10_CNST_THRESH_CMP_MASK;
value |= p10_CNST_THRESH_CMP_VAL(p10_thresh_cmp_val(event_config1));
} else if (event_is_threshold(event))
return -1;
} else if (cpu_has_feature(CPU_FTR_ARCH_300)) {
if (event_is_threshold(event) && is_thresh_cmp_valid(event)) {
mask |= CNST_THRESH_MASK;
value |= CNST_THRESH_VAL(event >> EVENT_THRESH_SHIFT);
} else if (event_is_threshold(event))
return -1;
} else {
/*
* Special case for PM_MRK_FAB_RSP_MATCH and PM_MRK_FAB_RSP_MATCH_CYC,
* the threshold control bits are used for the match value.
*/
if (event_is_fab_match(event)) {
mask |= CNST_FAB_MATCH_MASK;
value |= CNST_FAB_MATCH_VAL(event >> EVENT_THR_CTL_SHIFT);
} else {
if (!is_thresh_cmp_valid(event))
return -1;
mask |= CNST_THRESH_MASK;
value |= CNST_THRESH_VAL(event >> EVENT_THRESH_SHIFT);
}
}
ebb_bhrb:
if (!pmc && ebb)
/* EBB events must specify the PMC */
return -1;
if (event & EVENT_WANTS_BHRB) {
if (!ebb)
/* Only EBB events can request BHRB */
return -1;
mask |= CNST_IFM_MASK;
value |= CNST_IFM_VAL(event >> EVENT_IFM_SHIFT);
}
/*
* All events must agree on EBB, either all request it or none.
* EBB events are pinned & exclusive, so this should never actually
* hit, but we leave it as a fallback in case.
*/
mask |= CNST_EBB_MASK;
value |= CNST_EBB_VAL(ebb);
*maskp = mask;
*valp = value;
return 0;
}
int isa207_compute_mmcr(u64 event[], int n_ev,
unsigned int hwc[], struct mmcr_regs *mmcr,
struct perf_event *pevents[], u32 flags)
{
unsigned long mmcra, mmcr1, mmcr2, unit, combine, psel, cache, val;
unsigned long mmcr3;
unsigned int pmc, pmc_inuse;
int i;
pmc_inuse = 0;
/* First pass to count resource use */
for (i = 0; i < n_ev; ++i) {
pmc = (event[i] >> EVENT_PMC_SHIFT) & EVENT_PMC_MASK;
if (pmc)
pmc_inuse |= 1 << pmc;
}
mmcra = mmcr1 = mmcr2 = mmcr3 = 0;
/*
* Disable bhrb unless explicitly requested
* by setting MMCRA (BHRBRD) bit.
*/
if (cpu_has_feature(CPU_FTR_ARCH_31))
mmcra |= MMCRA_BHRB_DISABLE;
/* Second pass: assign PMCs, set all MMCR1 fields */
for (i = 0; i < n_ev; ++i) {
pmc = (event[i] >> EVENT_PMC_SHIFT) & EVENT_PMC_MASK;
unit = (event[i] >> EVENT_UNIT_SHIFT) & EVENT_UNIT_MASK;
combine = combine_from_event(event[i]);
psel = event[i] & EVENT_PSEL_MASK;
if (!pmc) {
for (pmc = 1; pmc <= 4; ++pmc) {
if (!(pmc_inuse & (1 << pmc)))
break;
}
pmc_inuse |= 1 << pmc;
}
if (pmc <= 4) {
mmcr1 |= unit << MMCR1_UNIT_SHIFT(pmc);
mmcr1 |= combine << combine_shift(pmc);
mmcr1 |= psel << MMCR1_PMCSEL_SHIFT(pmc);
}
/* In continuous sampling mode, update SDAR on TLB miss */
mmcra_sdar_mode(event[i], &mmcra);
if (cpu_has_feature(CPU_FTR_ARCH_300)) {
cache = dc_ic_rld_quad_l1_sel(event[i]);
mmcr1 |= (cache) << MMCR1_DC_IC_QUAL_SHIFT;
} else {
if (event[i] & EVENT_IS_L1) {
cache = dc_ic_rld_quad_l1_sel(event[i]);
mmcr1 |= (cache) << MMCR1_DC_IC_QUAL_SHIFT;
}
}
/* Set RADIX_SCOPE_QUAL bit */
if (cpu_has_feature(CPU_FTR_ARCH_31)) {
val = (event[i] >> p10_EVENT_RADIX_SCOPE_QUAL_SHIFT) &
p10_EVENT_RADIX_SCOPE_QUAL_MASK;
mmcr1 |= val << p10_MMCR1_RADIX_SCOPE_QUAL_SHIFT;
}
if (is_event_marked(event[i])) {
mmcra |= MMCRA_SAMPLE_ENABLE;
val = (event[i] >> EVENT_SAMPLE_SHIFT) & EVENT_SAMPLE_MASK;
if (val) {
mmcra |= (val & 3) << MMCRA_SAMP_MODE_SHIFT;
mmcra |= (val >> 2) << MMCRA_SAMP_ELIG_SHIFT;
}
}
/*
* PM_MRK_FAB_RSP_MATCH and PM_MRK_FAB_RSP_MATCH_CYC,
* the threshold bits are used for the match value.
*/
if (!cpu_has_feature(CPU_FTR_ARCH_300) && event_is_fab_match(event[i])) {
mmcr1 |= ((event[i] >> EVENT_THR_CTL_SHIFT) &
EVENT_THR_CTL_MASK) << MMCR1_FAB_SHIFT;
} else {
val = (event[i] >> EVENT_THR_CTL_SHIFT) & EVENT_THR_CTL_MASK;
mmcra |= val << MMCRA_THR_CTL_SHIFT;
val = (event[i] >> EVENT_THR_SEL_SHIFT) & EVENT_THR_SEL_MASK;
mmcra |= val << MMCRA_THR_SEL_SHIFT;
if (!cpu_has_feature(CPU_FTR_ARCH_31)) {
val = (event[i] >> EVENT_THR_CMP_SHIFT) &
EVENT_THR_CMP_MASK;
mmcra |= thresh_cmp_val(val);
} else if (flags & PPMU_HAS_ATTR_CONFIG1) {
val = (pevents[i]->attr.config1 >> p10_EVENT_THR_CMP_SHIFT) &
p10_EVENT_THR_CMP_MASK;
mmcra |= thresh_cmp_val(val);
}
}
if (cpu_has_feature(CPU_FTR_ARCH_31) && (unit == 6)) {
val = (event[i] >> p10_L2L3_EVENT_SHIFT) &
p10_EVENT_L2L3_SEL_MASK;
mmcr2 |= val << p10_L2L3_SEL_SHIFT;
}
if (event[i] & EVENT_WANTS_BHRB) {
val = (event[i] >> EVENT_IFM_SHIFT) & EVENT_IFM_MASK;
mmcra |= val << MMCRA_IFM_SHIFT;
}
/* set MMCRA (BHRBRD) to 0 if there is user request for BHRB */
if (cpu_has_feature(CPU_FTR_ARCH_31) &&
(has_branch_stack(pevents[i]) || (event[i] & EVENT_WANTS_BHRB)))
mmcra &= ~MMCRA_BHRB_DISABLE;
if (pevents[i]->attr.exclude_user)
mmcr2 |= MMCR2_FCP(pmc);
if (pevents[i]->attr.exclude_hv)
mmcr2 |= MMCR2_FCH(pmc);
if (pevents[i]->attr.exclude_kernel) {
if (cpu_has_feature(CPU_FTR_HVMODE))
mmcr2 |= MMCR2_FCH(pmc);
else
mmcr2 |= MMCR2_FCS(pmc);
}
if (pevents[i]->attr.exclude_idle)
mmcr2 |= MMCR2_FCWAIT(pmc);
if (cpu_has_feature(CPU_FTR_ARCH_31)) {
if (pmc <= 4) {
val = (event[i] >> p10_EVENT_MMCR3_SHIFT) &
p10_EVENT_MMCR3_MASK;
mmcr3 |= val << MMCR3_SHIFT(pmc);
}
}
hwc[i] = pmc - 1;
}
/* Return MMCRx values */
mmcr->mmcr0 = 0;
/* pmc_inuse is 1-based */
if (pmc_inuse & 2)
mmcr->mmcr0 = MMCR0_PMC1CE;
if (pmc_inuse & 0x7c)
mmcr->mmcr0 |= MMCR0_PMCjCE;
/* If we're not using PMC 5 or 6, freeze them */
if (!(pmc_inuse & 0x60))
mmcr->mmcr0 |= MMCR0_FC56;
/*
* Set mmcr0 (PMCCEXT) for p10 which
* will restrict access to group B registers
* when MMCR0 PMCC=0b00.
*/
if (cpu_has_feature(CPU_FTR_ARCH_31))
mmcr->mmcr0 |= MMCR0_PMCCEXT;
mmcr->mmcr1 = mmcr1;
mmcr->mmcra = mmcra;
mmcr->mmcr2 = mmcr2;
mmcr->mmcr3 = mmcr3;
return 0;
}
void isa207_disable_pmc(unsigned int pmc, struct mmcr_regs *mmcr)
{
if (pmc <= 3)
mmcr->mmcr1 &= ~(0xffUL << MMCR1_PMCSEL_SHIFT(pmc + 1));
}
static int find_alternative(u64 event, const unsigned int ev_alt[][MAX_ALT], int size)
{
int i, j;
for (i = 0; i < size; ++i) {
if (event < ev_alt[i][0])
break;
for (j = 0; j < MAX_ALT && ev_alt[i][j]; ++j)
if (event == ev_alt[i][j])
return i;
}
return -1;
}
int isa207_get_alternatives(u64 event, u64 alt[], int size, unsigned int flags,
const unsigned int ev_alt[][MAX_ALT])
{
int i, j, num_alt = 0;
u64 alt_event;
alt[num_alt++] = event;
i = find_alternative(event, ev_alt, size);
if (i >= 0) {
/* Filter out the original event, it's already in alt[0] */
for (j = 0; j < MAX_ALT; ++j) {
alt_event = ev_alt[i][j];
if (alt_event && alt_event != event)
alt[num_alt++] = alt_event;
}
}
if (flags & PPMU_ONLY_COUNT_RUN) {
/*
* We're only counting in RUN state, so PM_CYC is equivalent to
* PM_RUN_CYC and PM_INST_CMPL === PM_RUN_INST_CMPL.
*/
j = num_alt;
for (i = 0; i < num_alt; ++i) {
switch (alt[i]) {
case 0x1e: /* PMC_CYC */
alt[j++] = 0x600f4; /* PM_RUN_CYC */
break;
case 0x600f4:
alt[j++] = 0x1e;
break;
case 0x2: /* PM_INST_CMPL */
alt[j++] = 0x500fa; /* PM_RUN_INST_CMPL */
break;
case 0x500fa:
alt[j++] = 0x2;
break;
}
}
num_alt = j;
}
return num_alt;
}
int isa3XX_check_attr_config(struct perf_event *ev)
{
u64 val, sample_mode;
u64 event = ev->attr.config;
val = (event >> EVENT_SAMPLE_SHIFT) & EVENT_SAMPLE_MASK;
sample_mode = val & 0x3;
/*
* MMCRA[61:62] is Random Sampling Mode (SM).
* value of 0b11 is reserved.
*/
if (sample_mode == 0x3)
return -EINVAL;
/*
* Check for all reserved value
* Source: Performance Monitoring Unit User Guide
*/
switch (val) {
case 0x5:
case 0x9:
case 0xD:
case 0x19:
case 0x1D:
case 0x1A:
case 0x1E:
return -EINVAL;
}
/*
* MMCRA[48:51]/[52:55]) Threshold Start/Stop
* Events Selection.
* 0b11110000/0b00001111 is reserved.
*/
val = (event >> EVENT_THR_CTL_SHIFT) & EVENT_THR_CTL_MASK;
if (((val & 0xF0) == 0xF0) || ((val & 0xF) == 0xF))
return -EINVAL;
return 0;
}
| linux-master | arch/powerpc/perf/isa207-common.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Performance counter callchain support - powerpc architecture code
*
* Copyright © 2009 Paul Mackerras, IBM Corporation.
*/
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/perf_event.h>
#include <linux/percpu.h>
#include <linux/uaccess.h>
#include <linux/mm.h>
#include <asm/ptrace.h>
#include <asm/sigcontext.h>
#include <asm/ucontext.h>
#include <asm/vdso.h>
#include <asm/pte-walk.h>
#include "callchain.h"
#ifdef CONFIG_PPC64
#include <asm/syscalls_32.h>
#else /* CONFIG_PPC64 */
#define __SIGNAL_FRAMESIZE32 __SIGNAL_FRAMESIZE
#define sigcontext32 sigcontext
#define mcontext32 mcontext
#define ucontext32 ucontext
#define compat_siginfo_t struct siginfo
#endif /* CONFIG_PPC64 */
static int read_user_stack_32(const unsigned int __user *ptr, unsigned int *ret)
{
return __read_user_stack(ptr, ret, sizeof(*ret));
}
/*
* Layout for non-RT signal frames
*/
struct signal_frame_32 {
char dummy[__SIGNAL_FRAMESIZE32];
struct sigcontext32 sctx;
struct mcontext32 mctx;
int abigap[56];
};
/*
* Layout for RT signal frames
*/
struct rt_signal_frame_32 {
char dummy[__SIGNAL_FRAMESIZE32 + 16];
compat_siginfo_t info;
struct ucontext32 uc;
int abigap[56];
};
static int is_sigreturn_32_address(unsigned int nip, unsigned int fp)
{
if (nip == fp + offsetof(struct signal_frame_32, mctx.mc_pad))
return 1;
if (current->mm->context.vdso &&
nip == VDSO32_SYMBOL(current->mm->context.vdso, sigtramp32))
return 1;
return 0;
}
static int is_rt_sigreturn_32_address(unsigned int nip, unsigned int fp)
{
if (nip == fp + offsetof(struct rt_signal_frame_32,
uc.uc_mcontext.mc_pad))
return 1;
if (current->mm->context.vdso &&
nip == VDSO32_SYMBOL(current->mm->context.vdso, sigtramp_rt32))
return 1;
return 0;
}
static int sane_signal_32_frame(unsigned int sp)
{
struct signal_frame_32 __user *sf;
unsigned int regs;
sf = (struct signal_frame_32 __user *) (unsigned long) sp;
if (read_user_stack_32((unsigned int __user *) &sf->sctx.regs, ®s))
return 0;
return regs == (unsigned long) &sf->mctx;
}
static int sane_rt_signal_32_frame(unsigned int sp)
{
struct rt_signal_frame_32 __user *sf;
unsigned int regs;
sf = (struct rt_signal_frame_32 __user *) (unsigned long) sp;
if (read_user_stack_32((unsigned int __user *) &sf->uc.uc_regs, ®s))
return 0;
return regs == (unsigned long) &sf->uc.uc_mcontext;
}
static unsigned int __user *signal_frame_32_regs(unsigned int sp,
unsigned int next_sp, unsigned int next_ip)
{
struct mcontext32 __user *mctx = NULL;
struct signal_frame_32 __user *sf;
struct rt_signal_frame_32 __user *rt_sf;
/*
* Note: the next_sp - sp >= signal frame size check
* is true when next_sp < sp, for example, when
* transitioning from an alternate signal stack to the
* normal stack.
*/
if (next_sp - sp >= sizeof(struct signal_frame_32) &&
is_sigreturn_32_address(next_ip, sp) &&
sane_signal_32_frame(sp)) {
sf = (struct signal_frame_32 __user *) (unsigned long) sp;
mctx = &sf->mctx;
}
if (!mctx && next_sp - sp >= sizeof(struct rt_signal_frame_32) &&
is_rt_sigreturn_32_address(next_ip, sp) &&
sane_rt_signal_32_frame(sp)) {
rt_sf = (struct rt_signal_frame_32 __user *) (unsigned long) sp;
mctx = &rt_sf->uc.uc_mcontext;
}
if (!mctx)
return NULL;
return mctx->mc_gregs;
}
void perf_callchain_user_32(struct perf_callchain_entry_ctx *entry,
struct pt_regs *regs)
{
unsigned int sp, next_sp;
unsigned int next_ip;
unsigned int lr;
long level = 0;
unsigned int __user *fp, *uregs;
next_ip = perf_instruction_pointer(regs);
lr = regs->link;
sp = regs->gpr[1];
perf_callchain_store(entry, next_ip);
while (entry->nr < entry->max_stack) {
fp = (unsigned int __user *) (unsigned long) sp;
if (invalid_user_sp(sp) || read_user_stack_32(fp, &next_sp))
return;
if (level > 0 && read_user_stack_32(&fp[1], &next_ip))
return;
uregs = signal_frame_32_regs(sp, next_sp, next_ip);
if (!uregs && level <= 1)
uregs = signal_frame_32_regs(sp, next_sp, lr);
if (uregs) {
/*
* This looks like an signal frame, so restart
* the stack trace with the values in it.
*/
if (read_user_stack_32(&uregs[PT_NIP], &next_ip) ||
read_user_stack_32(&uregs[PT_LNK], &lr) ||
read_user_stack_32(&uregs[PT_R1], &sp))
return;
level = 0;
perf_callchain_store_context(entry, PERF_CONTEXT_USER);
perf_callchain_store(entry, next_ip);
continue;
}
if (level == 0)
next_ip = lr;
perf_callchain_store(entry, next_ip);
++level;
sp = next_sp;
}
}
| linux-master | arch/powerpc/perf/callchain_32.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Performance counter support for PPC970-family processors.
*
* Copyright 2008-2009 Paul Mackerras, IBM Corporation.
*/
#include <linux/string.h>
#include <linux/perf_event.h>
#include <asm/reg.h>
#include <asm/cputable.h>
#include "internal.h"
/*
* Bits in event code for PPC970
*/
#define PM_PMC_SH 12 /* PMC number (1-based) for direct events */
#define PM_PMC_MSK 0xf
#define PM_UNIT_SH 8 /* TTMMUX number and setting - unit select */
#define PM_UNIT_MSK 0xf
#define PM_SPCSEL_SH 6
#define PM_SPCSEL_MSK 3
#define PM_BYTE_SH 4 /* Byte number of event bus to use */
#define PM_BYTE_MSK 3
#define PM_PMCSEL_MSK 0xf
/* Values in PM_UNIT field */
#define PM_NONE 0
#define PM_FPU 1
#define PM_VPU 2
#define PM_ISU 3
#define PM_IFU 4
#define PM_IDU 5
#define PM_STS 6
#define PM_LSU0 7
#define PM_LSU1U 8
#define PM_LSU1L 9
#define PM_LASTUNIT 9
/*
* Bits in MMCR0 for PPC970
*/
#define MMCR0_PMC1SEL_SH 8
#define MMCR0_PMC2SEL_SH 1
#define MMCR_PMCSEL_MSK 0x1f
/*
* Bits in MMCR1 for PPC970
*/
#define MMCR1_TTM0SEL_SH 62
#define MMCR1_TTM1SEL_SH 59
#define MMCR1_TTM3SEL_SH 53
#define MMCR1_TTMSEL_MSK 3
#define MMCR1_TD_CP_DBG0SEL_SH 50
#define MMCR1_TD_CP_DBG1SEL_SH 48
#define MMCR1_TD_CP_DBG2SEL_SH 46
#define MMCR1_TD_CP_DBG3SEL_SH 44
#define MMCR1_PMC1_ADDER_SEL_SH 39
#define MMCR1_PMC2_ADDER_SEL_SH 38
#define MMCR1_PMC6_ADDER_SEL_SH 37
#define MMCR1_PMC5_ADDER_SEL_SH 36
#define MMCR1_PMC8_ADDER_SEL_SH 35
#define MMCR1_PMC7_ADDER_SEL_SH 34
#define MMCR1_PMC3_ADDER_SEL_SH 33
#define MMCR1_PMC4_ADDER_SEL_SH 32
#define MMCR1_PMC3SEL_SH 27
#define MMCR1_PMC4SEL_SH 22
#define MMCR1_PMC5SEL_SH 17
#define MMCR1_PMC6SEL_SH 12
#define MMCR1_PMC7SEL_SH 7
#define MMCR1_PMC8SEL_SH 2
static short mmcr1_adder_bits[8] = {
MMCR1_PMC1_ADDER_SEL_SH,
MMCR1_PMC2_ADDER_SEL_SH,
MMCR1_PMC3_ADDER_SEL_SH,
MMCR1_PMC4_ADDER_SEL_SH,
MMCR1_PMC5_ADDER_SEL_SH,
MMCR1_PMC6_ADDER_SEL_SH,
MMCR1_PMC7_ADDER_SEL_SH,
MMCR1_PMC8_ADDER_SEL_SH
};
/*
* Layout of constraint bits:
* 6666555555555544444444443333333333222222222211111111110000000000
* 3210987654321098765432109876543210987654321098765432109876543210
* <><><>[ >[ >[ >< >< >< >< ><><><><><><><><>
* SPT0T1 UC PS1 PS2 B0 B1 B2 B3 P1P2P3P4P5P6P7P8
*
* SP - SPCSEL constraint
* 48-49: SPCSEL value 0x3_0000_0000_0000
*
* T0 - TTM0 constraint
* 46-47: TTM0SEL value (0=FPU, 2=IFU, 3=VPU) 0xC000_0000_0000
*
* T1 - TTM1 constraint
* 44-45: TTM1SEL value (0=IDU, 3=STS) 0x3000_0000_0000
*
* UC - unit constraint: can't have all three of FPU|IFU|VPU, ISU, IDU|STS
* 43: UC3 error 0x0800_0000_0000
* 42: FPU|IFU|VPU events needed 0x0400_0000_0000
* 41: ISU events needed 0x0200_0000_0000
* 40: IDU|STS events needed 0x0100_0000_0000
*
* PS1
* 39: PS1 error 0x0080_0000_0000
* 36-38: count of events needing PMC1/2/5/6 0x0070_0000_0000
*
* PS2
* 35: PS2 error 0x0008_0000_0000
* 32-34: count of events needing PMC3/4/7/8 0x0007_0000_0000
*
* B0
* 28-31: Byte 0 event source 0xf000_0000
* Encoding as for the event code
*
* B1, B2, B3
* 24-27, 20-23, 16-19: Byte 1, 2, 3 event sources
*
* P1
* 15: P1 error 0x8000
* 14-15: Count of events needing PMC1
*
* P2..P8
* 0-13: Count of events needing PMC2..PMC8
*/
static unsigned char direct_marked_event[8] = {
(1<<2) | (1<<3), /* PMC1: PM_MRK_GRP_DISP, PM_MRK_ST_CMPL */
(1<<3) | (1<<5), /* PMC2: PM_THRESH_TIMEO, PM_MRK_BRU_FIN */
(1<<3) | (1<<5), /* PMC3: PM_MRK_ST_CMPL_INT, PM_MRK_VMX_FIN */
(1<<4) | (1<<5), /* PMC4: PM_MRK_GRP_CMPL, PM_MRK_CRU_FIN */
(1<<4) | (1<<5), /* PMC5: PM_GRP_MRK, PM_MRK_GRP_TIMEO */
(1<<3) | (1<<4) | (1<<5),
/* PMC6: PM_MRK_ST_STS, PM_MRK_FXU_FIN, PM_MRK_GRP_ISSUED */
(1<<4) | (1<<5), /* PMC7: PM_MRK_FPU_FIN, PM_MRK_INST_FIN */
(1<<4) /* PMC8: PM_MRK_LSU_FIN */
};
/*
* Returns 1 if event counts things relating to marked instructions
* and thus needs the MMCRA_SAMPLE_ENABLE bit set, or 0 if not.
*/
static int p970_marked_instr_event(u64 event)
{
int pmc, psel, unit, byte, bit;
unsigned int mask;
pmc = (event >> PM_PMC_SH) & PM_PMC_MSK;
psel = event & PM_PMCSEL_MSK;
if (pmc) {
if (direct_marked_event[pmc - 1] & (1 << psel))
return 1;
if (psel == 0) /* add events */
bit = (pmc <= 4)? pmc - 1: 8 - pmc;
else if (psel == 7 || psel == 13) /* decode events */
bit = 4;
else
return 0;
} else
bit = psel;
byte = (event >> PM_BYTE_SH) & PM_BYTE_MSK;
unit = (event >> PM_UNIT_SH) & PM_UNIT_MSK;
mask = 0;
switch (unit) {
case PM_VPU:
mask = 0x4c; /* byte 0 bits 2,3,6 */
break;
case PM_LSU0:
/* byte 2 bits 0,2,3,4,6; all of byte 1 */
mask = 0x085dff00;
break;
case PM_LSU1L:
mask = 0x50 << 24; /* byte 3 bits 4,6 */
break;
}
return (mask >> (byte * 8 + bit)) & 1;
}
/* Masks and values for using events from the various units */
static unsigned long unit_cons[PM_LASTUNIT+1][2] = {
[PM_FPU] = { 0xc80000000000ull, 0x040000000000ull },
[PM_VPU] = { 0xc80000000000ull, 0xc40000000000ull },
[PM_ISU] = { 0x080000000000ull, 0x020000000000ull },
[PM_IFU] = { 0xc80000000000ull, 0x840000000000ull },
[PM_IDU] = { 0x380000000000ull, 0x010000000000ull },
[PM_STS] = { 0x380000000000ull, 0x310000000000ull },
};
static int p970_get_constraint(u64 event, unsigned long *maskp,
unsigned long *valp, u64 event_config1 __maybe_unused)
{
int pmc, byte, unit, sh, spcsel;
unsigned long mask = 0, value = 0;
int grp = -1;
pmc = (event >> PM_PMC_SH) & PM_PMC_MSK;
if (pmc) {
if (pmc > 8)
return -1;
sh = (pmc - 1) * 2;
mask |= 2 << sh;
value |= 1 << sh;
grp = ((pmc - 1) >> 1) & 1;
}
unit = (event >> PM_UNIT_SH) & PM_UNIT_MSK;
if (unit) {
if (unit > PM_LASTUNIT)
return -1;
mask |= unit_cons[unit][0];
value |= unit_cons[unit][1];
byte = (event >> PM_BYTE_SH) & PM_BYTE_MSK;
/*
* Bus events on bytes 0 and 2 can be counted
* on PMC1/2/5/6; bytes 1 and 3 on PMC3/4/7/8.
*/
if (!pmc)
grp = byte & 1;
/* Set byte lane select field */
mask |= 0xfULL << (28 - 4 * byte);
value |= (unsigned long)unit << (28 - 4 * byte);
}
if (grp == 0) {
/* increment PMC1/2/5/6 field */
mask |= 0x8000000000ull;
value |= 0x1000000000ull;
} else if (grp == 1) {
/* increment PMC3/4/7/8 field */
mask |= 0x800000000ull;
value |= 0x100000000ull;
}
spcsel = (event >> PM_SPCSEL_SH) & PM_SPCSEL_MSK;
if (spcsel) {
mask |= 3ull << 48;
value |= (unsigned long)spcsel << 48;
}
*maskp = mask;
*valp = value;
return 0;
}
static int p970_get_alternatives(u64 event, unsigned int flags, u64 alt[])
{
alt[0] = event;
/* 2 alternatives for LSU empty */
if (event == 0x2002 || event == 0x3002) {
alt[1] = event ^ 0x1000;
return 2;
}
return 1;
}
static int p970_compute_mmcr(u64 event[], int n_ev,
unsigned int hwc[], struct mmcr_regs *mmcr,
struct perf_event *pevents[],
u32 flags __maybe_unused)
{
unsigned long mmcr0 = 0, mmcr1 = 0, mmcra = 0;
unsigned int pmc, unit, byte, psel;
unsigned int ttm, grp;
unsigned int pmc_inuse = 0;
unsigned int pmc_grp_use[2];
unsigned char busbyte[4];
unsigned char unituse[16];
unsigned char unitmap[] = { 0, 0<<3, 3<<3, 1<<3, 2<<3, 0|4, 3|4 };
unsigned char ttmuse[2];
unsigned char pmcsel[8];
int i;
int spcsel;
if (n_ev > 8)
return -1;
/* First pass to count resource use */
pmc_grp_use[0] = pmc_grp_use[1] = 0;
memset(busbyte, 0, sizeof(busbyte));
memset(unituse, 0, sizeof(unituse));
for (i = 0; i < n_ev; ++i) {
pmc = (event[i] >> PM_PMC_SH) & PM_PMC_MSK;
if (pmc) {
if (pmc_inuse & (1 << (pmc - 1)))
return -1;
pmc_inuse |= 1 << (pmc - 1);
/* count 1/2/5/6 vs 3/4/7/8 use */
++pmc_grp_use[((pmc - 1) >> 1) & 1];
}
unit = (event[i] >> PM_UNIT_SH) & PM_UNIT_MSK;
byte = (event[i] >> PM_BYTE_SH) & PM_BYTE_MSK;
if (unit) {
if (unit > PM_LASTUNIT)
return -1;
if (!pmc)
++pmc_grp_use[byte & 1];
if (busbyte[byte] && busbyte[byte] != unit)
return -1;
busbyte[byte] = unit;
unituse[unit] = 1;
}
}
if (pmc_grp_use[0] > 4 || pmc_grp_use[1] > 4)
return -1;
/*
* Assign resources and set multiplexer selects.
*
* PM_ISU can go either on TTM0 or TTM1, but that's the only
* choice we have to deal with.
*/
if (unituse[PM_ISU] &
(unituse[PM_FPU] | unituse[PM_IFU] | unituse[PM_VPU]))
unitmap[PM_ISU] = 2 | 4; /* move ISU to TTM1 */
/* Set TTM[01]SEL fields. */
ttmuse[0] = ttmuse[1] = 0;
for (i = PM_FPU; i <= PM_STS; ++i) {
if (!unituse[i])
continue;
ttm = unitmap[i];
++ttmuse[(ttm >> 2) & 1];
mmcr1 |= (unsigned long)(ttm & ~4) << MMCR1_TTM1SEL_SH;
}
/* Check only one unit per TTMx */
if (ttmuse[0] > 1 || ttmuse[1] > 1)
return -1;
/* Set byte lane select fields and TTM3SEL. */
for (byte = 0; byte < 4; ++byte) {
unit = busbyte[byte];
if (!unit)
continue;
if (unit <= PM_STS)
ttm = (unitmap[unit] >> 2) & 1;
else if (unit == PM_LSU0)
ttm = 2;
else {
ttm = 3;
if (unit == PM_LSU1L && byte >= 2)
mmcr1 |= 1ull << (MMCR1_TTM3SEL_SH + 3 - byte);
}
mmcr1 |= (unsigned long)ttm
<< (MMCR1_TD_CP_DBG0SEL_SH - 2 * byte);
}
/* Second pass: assign PMCs, set PMCxSEL and PMCx_ADDER_SEL fields */
memset(pmcsel, 0x8, sizeof(pmcsel)); /* 8 means don't count */
for (i = 0; i < n_ev; ++i) {
pmc = (event[i] >> PM_PMC_SH) & PM_PMC_MSK;
unit = (event[i] >> PM_UNIT_SH) & PM_UNIT_MSK;
byte = (event[i] >> PM_BYTE_SH) & PM_BYTE_MSK;
psel = event[i] & PM_PMCSEL_MSK;
if (!pmc) {
/* Bus event or any-PMC direct event */
if (unit)
psel |= 0x10 | ((byte & 2) << 2);
else
psel |= 8;
for (pmc = 0; pmc < 8; ++pmc) {
if (pmc_inuse & (1 << pmc))
continue;
grp = (pmc >> 1) & 1;
if (unit) {
if (grp == (byte & 1))
break;
} else if (pmc_grp_use[grp] < 4) {
++pmc_grp_use[grp];
break;
}
}
pmc_inuse |= 1 << pmc;
} else {
/* Direct event */
--pmc;
if (psel == 0 && (byte & 2))
/* add events on higher-numbered bus */
mmcr1 |= 1ull << mmcr1_adder_bits[pmc];
}
pmcsel[pmc] = psel;
hwc[i] = pmc;
spcsel = (event[i] >> PM_SPCSEL_SH) & PM_SPCSEL_MSK;
mmcr1 |= spcsel;
if (p970_marked_instr_event(event[i]))
mmcra |= MMCRA_SAMPLE_ENABLE;
}
for (pmc = 0; pmc < 2; ++pmc)
mmcr0 |= pmcsel[pmc] << (MMCR0_PMC1SEL_SH - 7 * pmc);
for (; pmc < 8; ++pmc)
mmcr1 |= (unsigned long)pmcsel[pmc]
<< (MMCR1_PMC3SEL_SH - 5 * (pmc - 2));
if (pmc_inuse & 1)
mmcr0 |= MMCR0_PMC1CE;
if (pmc_inuse & 0xfe)
mmcr0 |= MMCR0_PMCjCE;
mmcra |= 0x2000; /* mark only one IOP per PPC instruction */
/* Return MMCRx values */
mmcr->mmcr0 = mmcr0;
mmcr->mmcr1 = mmcr1;
mmcr->mmcra = mmcra;
return 0;
}
static void p970_disable_pmc(unsigned int pmc, struct mmcr_regs *mmcr)
{
int shift;
/*
* Setting the PMCxSEL field to 0x08 disables PMC x.
*/
if (pmc <= 1) {
shift = MMCR0_PMC1SEL_SH - 7 * pmc;
mmcr->mmcr0 = (mmcr->mmcr0 & ~(0x1fUL << shift)) | (0x08UL << shift);
} else {
shift = MMCR1_PMC3SEL_SH - 5 * (pmc - 2);
mmcr->mmcr1 = (mmcr->mmcr1 & ~(0x1fUL << shift)) | (0x08UL << shift);
}
}
static int ppc970_generic_events[] = {
[PERF_COUNT_HW_CPU_CYCLES] = 7,
[PERF_COUNT_HW_INSTRUCTIONS] = 1,
[PERF_COUNT_HW_CACHE_REFERENCES] = 0x8810, /* PM_LD_REF_L1 */
[PERF_COUNT_HW_CACHE_MISSES] = 0x3810, /* PM_LD_MISS_L1 */
[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x431, /* PM_BR_ISSUED */
[PERF_COUNT_HW_BRANCH_MISSES] = 0x327, /* PM_GRP_BR_MPRED */
};
#define C(x) PERF_COUNT_HW_CACHE_##x
/*
* Table of generalized cache-related events.
* 0 means not supported, -1 means nonsensical, other values
* are event codes.
*/
static u64 ppc970_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
[C(L1D)] = { /* RESULT_ACCESS RESULT_MISS */
[C(OP_READ)] = { 0x8810, 0x3810 },
[C(OP_WRITE)] = { 0x7810, 0x813 },
[C(OP_PREFETCH)] = { 0x731, 0 },
},
[C(L1I)] = { /* RESULT_ACCESS RESULT_MISS */
[C(OP_READ)] = { 0, 0 },
[C(OP_WRITE)] = { -1, -1 },
[C(OP_PREFETCH)] = { 0, 0 },
},
[C(LL)] = { /* RESULT_ACCESS RESULT_MISS */
[C(OP_READ)] = { 0, 0 },
[C(OP_WRITE)] = { 0, 0 },
[C(OP_PREFETCH)] = { 0x733, 0 },
},
[C(DTLB)] = { /* RESULT_ACCESS RESULT_MISS */
[C(OP_READ)] = { 0, 0x704 },
[C(OP_WRITE)] = { -1, -1 },
[C(OP_PREFETCH)] = { -1, -1 },
},
[C(ITLB)] = { /* RESULT_ACCESS RESULT_MISS */
[C(OP_READ)] = { 0, 0x700 },
[C(OP_WRITE)] = { -1, -1 },
[C(OP_PREFETCH)] = { -1, -1 },
},
[C(BPU)] = { /* RESULT_ACCESS RESULT_MISS */
[C(OP_READ)] = { 0x431, 0x327 },
[C(OP_WRITE)] = { -1, -1 },
[C(OP_PREFETCH)] = { -1, -1 },
},
[C(NODE)] = { /* RESULT_ACCESS RESULT_MISS */
[C(OP_READ)] = { -1, -1 },
[C(OP_WRITE)] = { -1, -1 },
[C(OP_PREFETCH)] = { -1, -1 },
},
};
static struct power_pmu ppc970_pmu = {
.name = "PPC970/FX/MP",
.n_counter = 8,
.max_alternatives = 2,
.add_fields = 0x001100005555ull,
.test_adder = 0x013300000000ull,
.compute_mmcr = p970_compute_mmcr,
.get_constraint = p970_get_constraint,
.get_alternatives = p970_get_alternatives,
.disable_pmc = p970_disable_pmc,
.n_generic = ARRAY_SIZE(ppc970_generic_events),
.generic_events = ppc970_generic_events,
.cache_events = &ppc970_cache_events,
.flags = PPMU_NO_SIPR | PPMU_NO_CONT_SAMPLING,
};
int __init init_ppc970_pmu(void)
{
unsigned int pvr = mfspr(SPRN_PVR);
if (PVR_VER(pvr) != PVR_970 && PVR_VER(pvr) != PVR_970MP &&
PVR_VER(pvr) != PVR_970FX && PVR_VER(pvr) != PVR_970GX)
return -ENODEV;
return register_power_pmu(&ppc970_pmu);
}
| linux-master | arch/powerpc/perf/ppc970-pmu.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Performance counter support for e6500 family processors.
*
* Author: Priyanka Jain, [email protected]
* Based on e500-pmu.c
* Copyright 2013 Freescale Semiconductor, Inc.
* Copyright 2008-2009 Paul Mackerras, IBM Corporation.
*/
#include <linux/string.h>
#include <linux/perf_event.h>
#include <asm/reg.h>
#include <asm/cputable.h>
/*
* Map of generic hardware event types to hardware events
* Zero if unsupported
*/
static int e6500_generic_events[] = {
[PERF_COUNT_HW_CPU_CYCLES] = 1,
[PERF_COUNT_HW_INSTRUCTIONS] = 2,
[PERF_COUNT_HW_CACHE_MISSES] = 221,
[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 12,
[PERF_COUNT_HW_BRANCH_MISSES] = 15,
};
#define C(x) PERF_COUNT_HW_CACHE_##x
/*
* Table of generalized cache-related events.
* 0 means not supported, -1 means nonsensical, other values
* are event codes.
*/
static int e6500_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
[C(L1D)] = {
/*RESULT_ACCESS RESULT_MISS */
[C(OP_READ)] = { 27, 222 },
[C(OP_WRITE)] = { 28, 223 },
[C(OP_PREFETCH)] = { 29, 0 },
},
[C(L1I)] = {
/*RESULT_ACCESS RESULT_MISS */
[C(OP_READ)] = { 2, 254 },
[C(OP_WRITE)] = { -1, -1 },
[C(OP_PREFETCH)] = { 37, 0 },
},
/*
* Assuming LL means L2, it's not a good match for this model.
* It does not have separate read/write events (but it does have
* separate instruction/data events).
*/
[C(LL)] = {
/*RESULT_ACCESS RESULT_MISS */
[C(OP_READ)] = { 0, 0 },
[C(OP_WRITE)] = { 0, 0 },
[C(OP_PREFETCH)] = { 0, 0 },
},
/*
* There are data/instruction MMU misses, but that's a miss on
* the chip's internal level-one TLB which is probably not
* what the user wants. Instead, unified level-two TLB misses
* are reported here.
*/
[C(DTLB)] = {
/*RESULT_ACCESS RESULT_MISS */
[C(OP_READ)] = { 26, 66 },
[C(OP_WRITE)] = { -1, -1 },
[C(OP_PREFETCH)] = { -1, -1 },
},
[C(BPU)] = {
/*RESULT_ACCESS RESULT_MISS */
[C(OP_READ)] = { 12, 15 },
[C(OP_WRITE)] = { -1, -1 },
[C(OP_PREFETCH)] = { -1, -1 },
},
[C(NODE)] = {
/* RESULT_ACCESS RESULT_MISS */
[C(OP_READ)] = { -1, -1 },
[C(OP_WRITE)] = { -1, -1 },
[C(OP_PREFETCH)] = { -1, -1 },
},
};
static int num_events = 512;
/* Upper half of event id is PMLCb, for threshold events */
static u64 e6500_xlate_event(u64 event_id)
{
u32 event_low = (u32)event_id;
if (event_low >= num_events ||
(event_id & (FSL_EMB_EVENT_THRESHMUL | FSL_EMB_EVENT_THRESH)))
return 0;
return FSL_EMB_EVENT_VALID;
}
static struct fsl_emb_pmu e6500_pmu = {
.name = "e6500 family",
.n_counter = 6,
.n_restricted = 0,
.xlate_event = e6500_xlate_event,
.n_generic = ARRAY_SIZE(e6500_generic_events),
.generic_events = e6500_generic_events,
.cache_events = &e6500_cache_events,
};
static int init_e6500_pmu(void)
{
unsigned int pvr = mfspr(SPRN_PVR);
if (PVR_VER(pvr) != PVR_VER_E6500)
return -ENODEV;
return register_fsl_emb_pmu(&e6500_pmu);
}
early_initcall(init_e6500_pmu);
| linux-master | arch/powerpc/perf/e6500-pmu.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Performance event support - powerpc architecture code
*
* Copyright 2008-2009 Paul Mackerras, IBM Corporation.
*/
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/sched/clock.h>
#include <linux/perf_event.h>
#include <linux/percpu.h>
#include <linux/hardirq.h>
#include <linux/uaccess.h>
#include <asm/reg.h>
#include <asm/pmc.h>
#include <asm/machdep.h>
#include <asm/firmware.h>
#include <asm/ptrace.h>
#include <asm/code-patching.h>
#include <asm/hw_irq.h>
#include <asm/interrupt.h>
#ifdef CONFIG_PPC64
#include "internal.h"
#endif
#define BHRB_MAX_ENTRIES 32
#define BHRB_TARGET 0x0000000000000002
#define BHRB_PREDICTION 0x0000000000000001
#define BHRB_EA 0xFFFFFFFFFFFFFFFCUL
struct cpu_hw_events {
int n_events;
int n_percpu;
int disabled;
int n_added;
int n_limited;
u8 pmcs_enabled;
struct perf_event *event[MAX_HWEVENTS];
u64 events[MAX_HWEVENTS];
unsigned int flags[MAX_HWEVENTS];
struct mmcr_regs mmcr;
struct perf_event *limited_counter[MAX_LIMITED_HWCOUNTERS];
u8 limited_hwidx[MAX_LIMITED_HWCOUNTERS];
u64 alternatives[MAX_HWEVENTS][MAX_EVENT_ALTERNATIVES];
unsigned long amasks[MAX_HWEVENTS][MAX_EVENT_ALTERNATIVES];
unsigned long avalues[MAX_HWEVENTS][MAX_EVENT_ALTERNATIVES];
unsigned int txn_flags;
int n_txn_start;
/* BHRB bits */
u64 bhrb_filter; /* BHRB HW branch filter */
unsigned int bhrb_users;
void *bhrb_context;
struct perf_branch_stack bhrb_stack;
struct perf_branch_entry bhrb_entries[BHRB_MAX_ENTRIES];
u64 ic_init;
/* Store the PMC values */
unsigned long pmcs[MAX_HWEVENTS];
};
static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events);
static struct power_pmu *ppmu;
/*
* Normally, to ignore kernel events we set the FCS (freeze counters
* in supervisor mode) bit in MMCR0, but if the kernel runs with the
* hypervisor bit set in the MSR, or if we are running on a processor
* where the hypervisor bit is forced to 1 (as on Apple G5 processors),
* then we need to use the FCHV bit to ignore kernel events.
*/
static unsigned int freeze_events_kernel = MMCR0_FCS;
/*
* 32-bit doesn't have MMCRA but does have an MMCR2,
* and a few other names are different.
* Also 32-bit doesn't have MMCR3, SIER2 and SIER3.
* Define them as zero knowing that any code path accessing
* these registers (via mtspr/mfspr) are done under ppmu flag
* check for PPMU_ARCH_31 and we will not enter that code path
* for 32-bit.
*/
#ifdef CONFIG_PPC32
#define MMCR0_FCHV 0
#define MMCR0_PMCjCE MMCR0_PMCnCE
#define MMCR0_FC56 0
#define MMCR0_PMAO 0
#define MMCR0_EBE 0
#define MMCR0_BHRBA 0
#define MMCR0_PMCC 0
#define MMCR0_PMCC_U6 0
#define SPRN_MMCRA SPRN_MMCR2
#define SPRN_MMCR3 0
#define SPRN_SIER2 0
#define SPRN_SIER3 0
#define MMCRA_SAMPLE_ENABLE 0
#define MMCRA_BHRB_DISABLE 0
#define MMCR0_PMCCEXT 0
static inline unsigned long perf_ip_adjust(struct pt_regs *regs)
{
return 0;
}
static inline void perf_get_data_addr(struct perf_event *event, struct pt_regs *regs, u64 *addrp) { }
static inline u32 perf_get_misc_flags(struct pt_regs *regs)
{
return 0;
}
static inline void perf_read_regs(struct pt_regs *regs)
{
regs->result = 0;
}
static inline int siar_valid(struct pt_regs *regs)
{
return 1;
}
static bool is_ebb_event(struct perf_event *event) { return false; }
static int ebb_event_check(struct perf_event *event) { return 0; }
static void ebb_event_add(struct perf_event *event) { }
static void ebb_switch_out(unsigned long mmcr0) { }
static unsigned long ebb_switch_in(bool ebb, struct cpu_hw_events *cpuhw)
{
return cpuhw->mmcr.mmcr0;
}
static inline void power_pmu_bhrb_enable(struct perf_event *event) {}
static inline void power_pmu_bhrb_disable(struct perf_event *event) {}
static void power_pmu_sched_task(struct perf_event_pmu_context *pmu_ctx, bool sched_in) {}
static inline void power_pmu_bhrb_read(struct perf_event *event, struct cpu_hw_events *cpuhw) {}
static void pmao_restore_workaround(bool ebb) { }
#endif /* CONFIG_PPC32 */
bool is_sier_available(void)
{
if (!ppmu)
return false;
if (ppmu->flags & PPMU_HAS_SIER)
return true;
return false;
}
/*
* Return PMC value corresponding to the
* index passed.
*/
unsigned long get_pmcs_ext_regs(int idx)
{
struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events);
return cpuhw->pmcs[idx];
}
static bool regs_use_siar(struct pt_regs *regs)
{
/*
* When we take a performance monitor exception the regs are setup
* using perf_read_regs() which overloads some fields, in particular
* regs->result to tell us whether to use SIAR.
*
* However if the regs are from another exception, eg. a syscall, then
* they have not been setup using perf_read_regs() and so regs->result
* is something random.
*/
return ((TRAP(regs) == INTERRUPT_PERFMON) && regs->result);
}
/*
* Things that are specific to 64-bit implementations.
*/
#ifdef CONFIG_PPC64
static inline unsigned long perf_ip_adjust(struct pt_regs *regs)
{
unsigned long mmcra = regs->dsisr;
if ((ppmu->flags & PPMU_HAS_SSLOT) && (mmcra & MMCRA_SAMPLE_ENABLE)) {
unsigned long slot = (mmcra & MMCRA_SLOT) >> MMCRA_SLOT_SHIFT;
if (slot > 1)
return 4 * (slot - 1);
}
return 0;
}
/*
* The user wants a data address recorded.
* If we're not doing instruction sampling, give them the SDAR
* (sampled data address). If we are doing instruction sampling, then
* only give them the SDAR if it corresponds to the instruction
* pointed to by SIAR; this is indicated by the [POWER6_]MMCRA_SDSYNC, the
* [POWER7P_]MMCRA_SDAR_VALID bit in MMCRA, or the SDAR_VALID bit in SIER.
*/
static inline void perf_get_data_addr(struct perf_event *event, struct pt_regs *regs, u64 *addrp)
{
unsigned long mmcra = regs->dsisr;
bool sdar_valid;
if (ppmu->flags & PPMU_HAS_SIER)
sdar_valid = regs->dar & SIER_SDAR_VALID;
else {
unsigned long sdsync;
if (ppmu->flags & PPMU_SIAR_VALID)
sdsync = POWER7P_MMCRA_SDAR_VALID;
else if (ppmu->flags & PPMU_ALT_SIPR)
sdsync = POWER6_MMCRA_SDSYNC;
else if (ppmu->flags & PPMU_NO_SIAR)
sdsync = MMCRA_SAMPLE_ENABLE;
else
sdsync = MMCRA_SDSYNC;
sdar_valid = mmcra & sdsync;
}
if (!(mmcra & MMCRA_SAMPLE_ENABLE) || sdar_valid)
*addrp = mfspr(SPRN_SDAR);
if (is_kernel_addr(mfspr(SPRN_SDAR)) && event->attr.exclude_kernel)
*addrp = 0;
}
static bool regs_sihv(struct pt_regs *regs)
{
unsigned long sihv = MMCRA_SIHV;
if (ppmu->flags & PPMU_HAS_SIER)
return !!(regs->dar & SIER_SIHV);
if (ppmu->flags & PPMU_ALT_SIPR)
sihv = POWER6_MMCRA_SIHV;
return !!(regs->dsisr & sihv);
}
static bool regs_sipr(struct pt_regs *regs)
{
unsigned long sipr = MMCRA_SIPR;
if (ppmu->flags & PPMU_HAS_SIER)
return !!(regs->dar & SIER_SIPR);
if (ppmu->flags & PPMU_ALT_SIPR)
sipr = POWER6_MMCRA_SIPR;
return !!(regs->dsisr & sipr);
}
static inline u32 perf_flags_from_msr(struct pt_regs *regs)
{
if (regs->msr & MSR_PR)
return PERF_RECORD_MISC_USER;
if ((regs->msr & MSR_HV) && freeze_events_kernel != MMCR0_FCHV)
return PERF_RECORD_MISC_HYPERVISOR;
return PERF_RECORD_MISC_KERNEL;
}
static inline u32 perf_get_misc_flags(struct pt_regs *regs)
{
bool use_siar = regs_use_siar(regs);
unsigned long mmcra = regs->dsisr;
int marked = mmcra & MMCRA_SAMPLE_ENABLE;
if (!use_siar)
return perf_flags_from_msr(regs);
/*
* Check the address in SIAR to identify the
* privilege levels since the SIER[MSR_HV, MSR_PR]
* bits are not set for marked events in power10
* DD1.
*/
if (marked && (ppmu->flags & PPMU_P10_DD1)) {
unsigned long siar = mfspr(SPRN_SIAR);
if (siar) {
if (is_kernel_addr(siar))
return PERF_RECORD_MISC_KERNEL;
return PERF_RECORD_MISC_USER;
} else {
if (is_kernel_addr(regs->nip))
return PERF_RECORD_MISC_KERNEL;
return PERF_RECORD_MISC_USER;
}
}
/*
* If we don't have flags in MMCRA, rather than using
* the MSR, we intuit the flags from the address in
* SIAR which should give slightly more reliable
* results
*/
if (ppmu->flags & PPMU_NO_SIPR) {
unsigned long siar = mfspr(SPRN_SIAR);
if (is_kernel_addr(siar))
return PERF_RECORD_MISC_KERNEL;
return PERF_RECORD_MISC_USER;
}
/* PR has priority over HV, so order below is important */
if (regs_sipr(regs))
return PERF_RECORD_MISC_USER;
if (regs_sihv(regs) && (freeze_events_kernel != MMCR0_FCHV))
return PERF_RECORD_MISC_HYPERVISOR;
return PERF_RECORD_MISC_KERNEL;
}
/*
* Overload regs->dsisr to store MMCRA so we only need to read it once
* on each interrupt.
* Overload regs->dar to store SIER if we have it.
* Overload regs->result to specify whether we should use the MSR (result
* is zero) or the SIAR (result is non zero).
*/
static inline void perf_read_regs(struct pt_regs *regs)
{
unsigned long mmcra = mfspr(SPRN_MMCRA);
int marked = mmcra & MMCRA_SAMPLE_ENABLE;
int use_siar;
regs->dsisr = mmcra;
if (ppmu->flags & PPMU_HAS_SIER)
regs->dar = mfspr(SPRN_SIER);
/*
* If this isn't a PMU exception (eg a software event) the SIAR is
* not valid. Use pt_regs.
*
* If it is a marked event use the SIAR.
*
* If the PMU doesn't update the SIAR for non marked events use
* pt_regs.
*
* If regs is a kernel interrupt, always use SIAR. Some PMUs have an
* issue with regs_sipr not being in synch with SIAR in interrupt entry
* and return sequences, which can result in regs_sipr being true for
* kernel interrupts and SIAR, which has the effect of causing samples
* to pile up at mtmsrd MSR[EE] 0->1 or pending irq replay around
* interrupt entry/exit.
*
* If the PMU has HV/PR flags then check to see if they
* place the exception in userspace. If so, use pt_regs. In
* continuous sampling mode the SIAR and the PMU exception are
* not synchronised, so they may be many instructions apart.
* This can result in confusing backtraces. We still want
* hypervisor samples as well as samples in the kernel with
* interrupts off hence the userspace check.
*/
if (TRAP(regs) != INTERRUPT_PERFMON)
use_siar = 0;
else if ((ppmu->flags & PPMU_NO_SIAR))
use_siar = 0;
else if (marked)
use_siar = 1;
else if ((ppmu->flags & PPMU_NO_CONT_SAMPLING))
use_siar = 0;
else if (!user_mode(regs))
use_siar = 1;
else if (!(ppmu->flags & PPMU_NO_SIPR) && regs_sipr(regs))
use_siar = 0;
else
use_siar = 1;
regs->result = use_siar;
}
/*
* On processors like P7+ that have the SIAR-Valid bit, marked instructions
* must be sampled only if the SIAR-valid bit is set.
*
* For unmarked instructions and for processors that don't have the SIAR-Valid
* bit, assume that SIAR is valid.
*/
static inline int siar_valid(struct pt_regs *regs)
{
unsigned long mmcra = regs->dsisr;
int marked = mmcra & MMCRA_SAMPLE_ENABLE;
if (marked) {
/*
* SIER[SIAR_VALID] is not set for some
* marked events on power10 DD1, so drop
* the check for SIER[SIAR_VALID] and return true.
*/
if (ppmu->flags & PPMU_P10_DD1)
return 0x1;
else if (ppmu->flags & PPMU_HAS_SIER)
return regs->dar & SIER_SIAR_VALID;
if (ppmu->flags & PPMU_SIAR_VALID)
return mmcra & POWER7P_MMCRA_SIAR_VALID;
}
return 1;
}
/* Reset all possible BHRB entries */
static void power_pmu_bhrb_reset(void)
{
asm volatile(PPC_CLRBHRB);
}
static void power_pmu_bhrb_enable(struct perf_event *event)
{
struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events);
if (!ppmu->bhrb_nr)
return;
/* Clear BHRB if we changed task context to avoid data leaks */
if (event->ctx->task && cpuhw->bhrb_context != event->ctx) {
power_pmu_bhrb_reset();
cpuhw->bhrb_context = event->ctx;
}
cpuhw->bhrb_users++;
perf_sched_cb_inc(event->pmu);
}
static void power_pmu_bhrb_disable(struct perf_event *event)
{
struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events);
if (!ppmu->bhrb_nr)
return;
WARN_ON_ONCE(!cpuhw->bhrb_users);
cpuhw->bhrb_users--;
perf_sched_cb_dec(event->pmu);
if (!cpuhw->disabled && !cpuhw->bhrb_users) {
/* BHRB cannot be turned off when other
* events are active on the PMU.
*/
/* avoid stale pointer */
cpuhw->bhrb_context = NULL;
}
}
/* Called from ctxsw to prevent one process's branch entries to
* mingle with the other process's entries during context switch.
*/
static void power_pmu_sched_task(struct perf_event_pmu_context *pmu_ctx, bool sched_in)
{
if (!ppmu->bhrb_nr)
return;
if (sched_in)
power_pmu_bhrb_reset();
}
/* Calculate the to address for a branch */
static __u64 power_pmu_bhrb_to(u64 addr)
{
unsigned int instr;
__u64 target;
if (is_kernel_addr(addr)) {
if (copy_from_kernel_nofault(&instr, (void *)addr,
sizeof(instr)))
return 0;
return branch_target(&instr);
}
/* Userspace: need copy instruction here then translate it */
if (copy_from_user_nofault(&instr, (unsigned int __user *)addr,
sizeof(instr)))
return 0;
target = branch_target(&instr);
if ((!target) || (instr & BRANCH_ABSOLUTE))
return target;
/* Translate relative branch target from kernel to user address */
return target - (unsigned long)&instr + addr;
}
/* Processing BHRB entries */
static void power_pmu_bhrb_read(struct perf_event *event, struct cpu_hw_events *cpuhw)
{
u64 val;
u64 addr;
int r_index, u_index, pred;
r_index = 0;
u_index = 0;
while (r_index < ppmu->bhrb_nr) {
/* Assembly read function */
val = read_bhrb(r_index++);
if (!val)
/* Terminal marker: End of valid BHRB entries */
break;
else {
addr = val & BHRB_EA;
pred = val & BHRB_PREDICTION;
if (!addr)
/* invalid entry */
continue;
/*
* BHRB rolling buffer could very much contain the kernel
* addresses at this point. Check the privileges before
* exporting it to userspace (avoid exposure of regions
* where we could have speculative execution)
* Incase of ISA v3.1, BHRB will capture only user-space
* addresses, hence include a check before filtering code
*/
if (!(ppmu->flags & PPMU_ARCH_31) &&
is_kernel_addr(addr) && event->attr.exclude_kernel)
continue;
/* Branches are read most recent first (ie. mfbhrb 0 is
* the most recent branch).
* There are two types of valid entries:
* 1) a target entry which is the to address of a
* computed goto like a blr,bctr,btar. The next
* entry read from the bhrb will be branch
* corresponding to this target (ie. the actual
* blr/bctr/btar instruction).
* 2) a from address which is an actual branch. If a
* target entry proceeds this, then this is the
* matching branch for that target. If this is not
* following a target entry, then this is a branch
* where the target is given as an immediate field
* in the instruction (ie. an i or b form branch).
* In this case we need to read the instruction from
* memory to determine the target/to address.
*/
if (val & BHRB_TARGET) {
/* Target branches use two entries
* (ie. computed gotos/XL form)
*/
cpuhw->bhrb_entries[u_index].to = addr;
cpuhw->bhrb_entries[u_index].mispred = pred;
cpuhw->bhrb_entries[u_index].predicted = ~pred;
/* Get from address in next entry */
val = read_bhrb(r_index++);
addr = val & BHRB_EA;
if (val & BHRB_TARGET) {
/* Shouldn't have two targets in a
row.. Reset index and try again */
r_index--;
addr = 0;
}
cpuhw->bhrb_entries[u_index].from = addr;
} else {
/* Branches to immediate field
(ie I or B form) */
cpuhw->bhrb_entries[u_index].from = addr;
cpuhw->bhrb_entries[u_index].to =
power_pmu_bhrb_to(addr);
cpuhw->bhrb_entries[u_index].mispred = pred;
cpuhw->bhrb_entries[u_index].predicted = ~pred;
}
u_index++;
}
}
cpuhw->bhrb_stack.nr = u_index;
cpuhw->bhrb_stack.hw_idx = -1ULL;
return;
}
static bool is_ebb_event(struct perf_event *event)
{
/*
* This could be a per-PMU callback, but we'd rather avoid the cost. We
* check that the PMU supports EBB, meaning those that don't can still
* use bit 63 of the event code for something else if they wish.
*/
return (ppmu->flags & PPMU_ARCH_207S) &&
((event->attr.config >> PERF_EVENT_CONFIG_EBB_SHIFT) & 1);
}
static int ebb_event_check(struct perf_event *event)
{
struct perf_event *leader = event->group_leader;
/* Event and group leader must agree on EBB */
if (is_ebb_event(leader) != is_ebb_event(event))
return -EINVAL;
if (is_ebb_event(event)) {
if (!(event->attach_state & PERF_ATTACH_TASK))
return -EINVAL;
if (!leader->attr.pinned || !leader->attr.exclusive)
return -EINVAL;
if (event->attr.freq ||
event->attr.inherit ||
event->attr.sample_type ||
event->attr.sample_period ||
event->attr.enable_on_exec)
return -EINVAL;
}
return 0;
}
static void ebb_event_add(struct perf_event *event)
{
if (!is_ebb_event(event) || current->thread.used_ebb)
return;
/*
* IFF this is the first time we've added an EBB event, set
* PMXE in the user MMCR0 so we can detect when it's cleared by
* userspace. We need this so that we can context switch while
* userspace is in the EBB handler (where PMXE is 0).
*/
current->thread.used_ebb = 1;
current->thread.mmcr0 |= MMCR0_PMXE;
}
static void ebb_switch_out(unsigned long mmcr0)
{
if (!(mmcr0 & MMCR0_EBE))
return;
current->thread.siar = mfspr(SPRN_SIAR);
current->thread.sier = mfspr(SPRN_SIER);
current->thread.sdar = mfspr(SPRN_SDAR);
current->thread.mmcr0 = mmcr0 & MMCR0_USER_MASK;
current->thread.mmcr2 = mfspr(SPRN_MMCR2) & MMCR2_USER_MASK;
if (ppmu->flags & PPMU_ARCH_31) {
current->thread.mmcr3 = mfspr(SPRN_MMCR3);
current->thread.sier2 = mfspr(SPRN_SIER2);
current->thread.sier3 = mfspr(SPRN_SIER3);
}
}
static unsigned long ebb_switch_in(bool ebb, struct cpu_hw_events *cpuhw)
{
unsigned long mmcr0 = cpuhw->mmcr.mmcr0;
if (!ebb)
goto out;
/* Enable EBB and read/write to all 6 PMCs and BHRB for userspace */
mmcr0 |= MMCR0_EBE | MMCR0_BHRBA | MMCR0_PMCC_U6;
/*
* Add any bits from the user MMCR0, FC or PMAO. This is compatible
* with pmao_restore_workaround() because we may add PMAO but we never
* clear it here.
*/
mmcr0 |= current->thread.mmcr0;
/*
* Be careful not to set PMXE if userspace had it cleared. This is also
* compatible with pmao_restore_workaround() because it has already
* cleared PMXE and we leave PMAO alone.
*/
if (!(current->thread.mmcr0 & MMCR0_PMXE))
mmcr0 &= ~MMCR0_PMXE;
mtspr(SPRN_SIAR, current->thread.siar);
mtspr(SPRN_SIER, current->thread.sier);
mtspr(SPRN_SDAR, current->thread.sdar);
/*
* Merge the kernel & user values of MMCR2. The semantics we implement
* are that the user MMCR2 can set bits, ie. cause counters to freeze,
* but not clear bits. If a task wants to be able to clear bits, ie.
* unfreeze counters, it should not set exclude_xxx in its events and
* instead manage the MMCR2 entirely by itself.
*/
mtspr(SPRN_MMCR2, cpuhw->mmcr.mmcr2 | current->thread.mmcr2);
if (ppmu->flags & PPMU_ARCH_31) {
mtspr(SPRN_MMCR3, current->thread.mmcr3);
mtspr(SPRN_SIER2, current->thread.sier2);
mtspr(SPRN_SIER3, current->thread.sier3);
}
out:
return mmcr0;
}
static void pmao_restore_workaround(bool ebb)
{
unsigned pmcs[6];
if (!cpu_has_feature(CPU_FTR_PMAO_BUG))
return;
/*
* On POWER8E there is a hardware defect which affects the PMU context
* switch logic, ie. power_pmu_disable/enable().
*
* When a counter overflows PMXE is cleared and FC/PMAO is set in MMCR0
* by the hardware. Sometime later the actual PMU exception is
* delivered.
*
* If we context switch, or simply disable/enable, the PMU prior to the
* exception arriving, the exception will be lost when we clear PMAO.
*
* When we reenable the PMU, we will write the saved MMCR0 with PMAO
* set, and this _should_ generate an exception. However because of the
* defect no exception is generated when we write PMAO, and we get
* stuck with no counters counting but no exception delivered.
*
* The workaround is to detect this case and tweak the hardware to
* create another pending PMU exception.
*
* We do that by setting up PMC6 (cycles) for an imminent overflow and
* enabling the PMU. That causes a new exception to be generated in the
* chip, but we don't take it yet because we have interrupts hard
* disabled. We then write back the PMU state as we want it to be seen
* by the exception handler. When we reenable interrupts the exception
* handler will be called and see the correct state.
*
* The logic is the same for EBB, except that the exception is gated by
* us having interrupts hard disabled as well as the fact that we are
* not in userspace. The exception is finally delivered when we return
* to userspace.
*/
/* Only if PMAO is set and PMAO_SYNC is clear */
if ((current->thread.mmcr0 & (MMCR0_PMAO | MMCR0_PMAO_SYNC)) != MMCR0_PMAO)
return;
/* If we're doing EBB, only if BESCR[GE] is set */
if (ebb && !(current->thread.bescr & BESCR_GE))
return;
/*
* We are already soft-disabled in power_pmu_enable(). We need to hard
* disable to actually prevent the PMU exception from firing.
*/
hard_irq_disable();
/*
* This is a bit gross, but we know we're on POWER8E and have 6 PMCs.
* Using read/write_pmc() in a for loop adds 12 function calls and
* almost doubles our code size.
*/
pmcs[0] = mfspr(SPRN_PMC1);
pmcs[1] = mfspr(SPRN_PMC2);
pmcs[2] = mfspr(SPRN_PMC3);
pmcs[3] = mfspr(SPRN_PMC4);
pmcs[4] = mfspr(SPRN_PMC5);
pmcs[5] = mfspr(SPRN_PMC6);
/* Ensure all freeze bits are unset */
mtspr(SPRN_MMCR2, 0);
/* Set up PMC6 to overflow in one cycle */
mtspr(SPRN_PMC6, 0x7FFFFFFE);
/* Enable exceptions and unfreeze PMC6 */
mtspr(SPRN_MMCR0, MMCR0_PMXE | MMCR0_PMCjCE | MMCR0_PMAO);
/* Now we need to refreeze and restore the PMCs */
mtspr(SPRN_MMCR0, MMCR0_FC | MMCR0_PMAO);
mtspr(SPRN_PMC1, pmcs[0]);
mtspr(SPRN_PMC2, pmcs[1]);
mtspr(SPRN_PMC3, pmcs[2]);
mtspr(SPRN_PMC4, pmcs[3]);
mtspr(SPRN_PMC5, pmcs[4]);
mtspr(SPRN_PMC6, pmcs[5]);
}
/*
* If the perf subsystem wants performance monitor interrupts as soon as
* possible (e.g., to sample the instruction address and stack chain),
* this should return true. The IRQ masking code can then enable MSR[EE]
* in some places (e.g., interrupt handlers) that allows PMI interrupts
* through to improve accuracy of profiles, at the cost of some performance.
*
* The PMU counters can be enabled by other means (e.g., sysfs raw SPR
* access), but in that case there is no need for prompt PMI handling.
*
* This currently returns true if any perf counter is being used. It
* could possibly return false if only events are being counted rather than
* samples being taken, but for now this is good enough.
*/
bool power_pmu_wants_prompt_pmi(void)
{
struct cpu_hw_events *cpuhw;
/*
* This could simply test local_paca->pmcregs_in_use if that were not
* under ifdef KVM.
*/
if (!ppmu)
return false;
cpuhw = this_cpu_ptr(&cpu_hw_events);
return cpuhw->n_events;
}
#endif /* CONFIG_PPC64 */
static void perf_event_interrupt(struct pt_regs *regs);
/*
* Read one performance monitor counter (PMC).
*/
static unsigned long read_pmc(int idx)
{
unsigned long val;
switch (idx) {
case 1:
val = mfspr(SPRN_PMC1);
break;
case 2:
val = mfspr(SPRN_PMC2);
break;
case 3:
val = mfspr(SPRN_PMC3);
break;
case 4:
val = mfspr(SPRN_PMC4);
break;
case 5:
val = mfspr(SPRN_PMC5);
break;
case 6:
val = mfspr(SPRN_PMC6);
break;
#ifdef CONFIG_PPC64
case 7:
val = mfspr(SPRN_PMC7);
break;
case 8:
val = mfspr(SPRN_PMC8);
break;
#endif /* CONFIG_PPC64 */
default:
printk(KERN_ERR "oops trying to read PMC%d\n", idx);
val = 0;
}
return val;
}
/*
* Write one PMC.
*/
static void write_pmc(int idx, unsigned long val)
{
switch (idx) {
case 1:
mtspr(SPRN_PMC1, val);
break;
case 2:
mtspr(SPRN_PMC2, val);
break;
case 3:
mtspr(SPRN_PMC3, val);
break;
case 4:
mtspr(SPRN_PMC4, val);
break;
case 5:
mtspr(SPRN_PMC5, val);
break;
case 6:
mtspr(SPRN_PMC6, val);
break;
#ifdef CONFIG_PPC64
case 7:
mtspr(SPRN_PMC7, val);
break;
case 8:
mtspr(SPRN_PMC8, val);
break;
#endif /* CONFIG_PPC64 */
default:
printk(KERN_ERR "oops trying to write PMC%d\n", idx);
}
}
static int any_pmc_overflown(struct cpu_hw_events *cpuhw)
{
int i, idx;
for (i = 0; i < cpuhw->n_events; i++) {
idx = cpuhw->event[i]->hw.idx;
if ((idx) && ((int)read_pmc(idx) < 0))
return idx;
}
return 0;
}
/* Called from sysrq_handle_showregs() */
void perf_event_print_debug(void)
{
unsigned long sdar, sier, flags;
u32 pmcs[MAX_HWEVENTS];
int i;
if (!ppmu) {
pr_info("Performance monitor hardware not registered.\n");
return;
}
if (!ppmu->n_counter)
return;
local_irq_save(flags);
pr_info("CPU: %d PMU registers, ppmu = %s n_counters = %d",
smp_processor_id(), ppmu->name, ppmu->n_counter);
for (i = 0; i < ppmu->n_counter; i++)
pmcs[i] = read_pmc(i + 1);
for (; i < MAX_HWEVENTS; i++)
pmcs[i] = 0xdeadbeef;
pr_info("PMC1: %08x PMC2: %08x PMC3: %08x PMC4: %08x\n",
pmcs[0], pmcs[1], pmcs[2], pmcs[3]);
if (ppmu->n_counter > 4)
pr_info("PMC5: %08x PMC6: %08x PMC7: %08x PMC8: %08x\n",
pmcs[4], pmcs[5], pmcs[6], pmcs[7]);
pr_info("MMCR0: %016lx MMCR1: %016lx MMCRA: %016lx\n",
mfspr(SPRN_MMCR0), mfspr(SPRN_MMCR1), mfspr(SPRN_MMCRA));
sdar = sier = 0;
#ifdef CONFIG_PPC64
sdar = mfspr(SPRN_SDAR);
if (ppmu->flags & PPMU_HAS_SIER)
sier = mfspr(SPRN_SIER);
if (ppmu->flags & PPMU_ARCH_207S) {
pr_info("MMCR2: %016lx EBBHR: %016lx\n",
mfspr(SPRN_MMCR2), mfspr(SPRN_EBBHR));
pr_info("EBBRR: %016lx BESCR: %016lx\n",
mfspr(SPRN_EBBRR), mfspr(SPRN_BESCR));
}
if (ppmu->flags & PPMU_ARCH_31) {
pr_info("MMCR3: %016lx SIER2: %016lx SIER3: %016lx\n",
mfspr(SPRN_MMCR3), mfspr(SPRN_SIER2), mfspr(SPRN_SIER3));
}
#endif
pr_info("SIAR: %016lx SDAR: %016lx SIER: %016lx\n",
mfspr(SPRN_SIAR), sdar, sier);
local_irq_restore(flags);
}
/*
* Check if a set of events can all go on the PMU at once.
* If they can't, this will look at alternative codes for the events
* and see if any combination of alternative codes is feasible.
* The feasible set is returned in event_id[].
*/
static int power_check_constraints(struct cpu_hw_events *cpuhw,
u64 event_id[], unsigned int cflags[],
int n_ev, struct perf_event **event)
{
unsigned long mask, value, nv;
unsigned long smasks[MAX_HWEVENTS], svalues[MAX_HWEVENTS];
int n_alt[MAX_HWEVENTS], choice[MAX_HWEVENTS];
int i, j;
unsigned long addf = ppmu->add_fields;
unsigned long tadd = ppmu->test_adder;
unsigned long grp_mask = ppmu->group_constraint_mask;
unsigned long grp_val = ppmu->group_constraint_val;
if (n_ev > ppmu->n_counter)
return -1;
/* First see if the events will go on as-is */
for (i = 0; i < n_ev; ++i) {
if ((cflags[i] & PPMU_LIMITED_PMC_REQD)
&& !ppmu->limited_pmc_event(event_id[i])) {
ppmu->get_alternatives(event_id[i], cflags[i],
cpuhw->alternatives[i]);
event_id[i] = cpuhw->alternatives[i][0];
}
if (ppmu->get_constraint(event_id[i], &cpuhw->amasks[i][0],
&cpuhw->avalues[i][0], event[i]->attr.config1))
return -1;
}
value = mask = 0;
for (i = 0; i < n_ev; ++i) {
nv = (value | cpuhw->avalues[i][0]) +
(value & cpuhw->avalues[i][0] & addf);
if (((((nv + tadd) ^ value) & mask) & (~grp_mask)) != 0)
break;
if (((((nv + tadd) ^ cpuhw->avalues[i][0]) & cpuhw->amasks[i][0])
& (~grp_mask)) != 0)
break;
value = nv;
mask |= cpuhw->amasks[i][0];
}
if (i == n_ev) {
if ((value & mask & grp_mask) != (mask & grp_val))
return -1;
else
return 0; /* all OK */
}
/* doesn't work, gather alternatives... */
if (!ppmu->get_alternatives)
return -1;
for (i = 0; i < n_ev; ++i) {
choice[i] = 0;
n_alt[i] = ppmu->get_alternatives(event_id[i], cflags[i],
cpuhw->alternatives[i]);
for (j = 1; j < n_alt[i]; ++j)
ppmu->get_constraint(cpuhw->alternatives[i][j],
&cpuhw->amasks[i][j],
&cpuhw->avalues[i][j],
event[i]->attr.config1);
}
/* enumerate all possibilities and see if any will work */
i = 0;
j = -1;
value = mask = nv = 0;
while (i < n_ev) {
if (j >= 0) {
/* we're backtracking, restore context */
value = svalues[i];
mask = smasks[i];
j = choice[i];
}
/*
* See if any alternative k for event_id i,
* where k > j, will satisfy the constraints.
*/
while (++j < n_alt[i]) {
nv = (value | cpuhw->avalues[i][j]) +
(value & cpuhw->avalues[i][j] & addf);
if ((((nv + tadd) ^ value) & mask) == 0 &&
(((nv + tadd) ^ cpuhw->avalues[i][j])
& cpuhw->amasks[i][j]) == 0)
break;
}
if (j >= n_alt[i]) {
/*
* No feasible alternative, backtrack
* to event_id i-1 and continue enumerating its
* alternatives from where we got up to.
*/
if (--i < 0)
return -1;
} else {
/*
* Found a feasible alternative for event_id i,
* remember where we got up to with this event_id,
* go on to the next event_id, and start with
* the first alternative for it.
*/
choice[i] = j;
svalues[i] = value;
smasks[i] = mask;
value = nv;
mask |= cpuhw->amasks[i][j];
++i;
j = -1;
}
}
/* OK, we have a feasible combination, tell the caller the solution */
for (i = 0; i < n_ev; ++i)
event_id[i] = cpuhw->alternatives[i][choice[i]];
return 0;
}
/*
* Check if newly-added events have consistent settings for
* exclude_{user,kernel,hv} with each other and any previously
* added events.
*/
static int check_excludes(struct perf_event **ctrs, unsigned int cflags[],
int n_prev, int n_new)
{
int eu = 0, ek = 0, eh = 0;
int i, n, first;
struct perf_event *event;
/*
* If the PMU we're on supports per event exclude settings then we
* don't need to do any of this logic. NB. This assumes no PMU has both
* per event exclude and limited PMCs.
*/
if (ppmu->flags & PPMU_ARCH_207S)
return 0;
n = n_prev + n_new;
if (n <= 1)
return 0;
first = 1;
for (i = 0; i < n; ++i) {
if (cflags[i] & PPMU_LIMITED_PMC_OK) {
cflags[i] &= ~PPMU_LIMITED_PMC_REQD;
continue;
}
event = ctrs[i];
if (first) {
eu = event->attr.exclude_user;
ek = event->attr.exclude_kernel;
eh = event->attr.exclude_hv;
first = 0;
} else if (event->attr.exclude_user != eu ||
event->attr.exclude_kernel != ek ||
event->attr.exclude_hv != eh) {
return -EAGAIN;
}
}
if (eu || ek || eh)
for (i = 0; i < n; ++i)
if (cflags[i] & PPMU_LIMITED_PMC_OK)
cflags[i] |= PPMU_LIMITED_PMC_REQD;
return 0;
}
static u64 check_and_compute_delta(u64 prev, u64 val)
{
u64 delta = (val - prev) & 0xfffffffful;
/*
* POWER7 can roll back counter values, if the new value is smaller
* than the previous value it will cause the delta and the counter to
* have bogus values unless we rolled a counter over. If a counter is
* rolled back, it will be smaller, but within 256, which is the maximum
* number of events to rollback at once. If we detect a rollback
* return 0. This can lead to a small lack of precision in the
* counters.
*/
if (prev > val && (prev - val) < 256)
delta = 0;
return delta;
}
static void power_pmu_read(struct perf_event *event)
{
s64 val, delta, prev;
if (event->hw.state & PERF_HES_STOPPED)
return;
if (!event->hw.idx)
return;
if (is_ebb_event(event)) {
val = read_pmc(event->hw.idx);
local64_set(&event->hw.prev_count, val);
return;
}
/*
* Performance monitor interrupts come even when interrupts
* are soft-disabled, as long as interrupts are hard-enabled.
* Therefore we treat them like NMIs.
*/
do {
prev = local64_read(&event->hw.prev_count);
barrier();
val = read_pmc(event->hw.idx);
delta = check_and_compute_delta(prev, val);
if (!delta)
return;
} while (local64_cmpxchg(&event->hw.prev_count, prev, val) != prev);
local64_add(delta, &event->count);
/*
* A number of places program the PMC with (0x80000000 - period_left).
* We never want period_left to be less than 1 because we will program
* the PMC with a value >= 0x800000000 and an edge detected PMC will
* roll around to 0 before taking an exception. We have seen this
* on POWER8.
*
* To fix this, clamp the minimum value of period_left to 1.
*/
do {
prev = local64_read(&event->hw.period_left);
val = prev - delta;
if (val < 1)
val = 1;
} while (local64_cmpxchg(&event->hw.period_left, prev, val) != prev);
}
/*
* On some machines, PMC5 and PMC6 can't be written, don't respect
* the freeze conditions, and don't generate interrupts. This tells
* us if `event' is using such a PMC.
*/
static int is_limited_pmc(int pmcnum)
{
return (ppmu->flags & PPMU_LIMITED_PMC5_6)
&& (pmcnum == 5 || pmcnum == 6);
}
static void freeze_limited_counters(struct cpu_hw_events *cpuhw,
unsigned long pmc5, unsigned long pmc6)
{
struct perf_event *event;
u64 val, prev, delta;
int i;
for (i = 0; i < cpuhw->n_limited; ++i) {
event = cpuhw->limited_counter[i];
if (!event->hw.idx)
continue;
val = (event->hw.idx == 5) ? pmc5 : pmc6;
prev = local64_read(&event->hw.prev_count);
event->hw.idx = 0;
delta = check_and_compute_delta(prev, val);
if (delta)
local64_add(delta, &event->count);
}
}
static void thaw_limited_counters(struct cpu_hw_events *cpuhw,
unsigned long pmc5, unsigned long pmc6)
{
struct perf_event *event;
u64 val, prev;
int i;
for (i = 0; i < cpuhw->n_limited; ++i) {
event = cpuhw->limited_counter[i];
event->hw.idx = cpuhw->limited_hwidx[i];
val = (event->hw.idx == 5) ? pmc5 : pmc6;
prev = local64_read(&event->hw.prev_count);
if (check_and_compute_delta(prev, val))
local64_set(&event->hw.prev_count, val);
perf_event_update_userpage(event);
}
}
/*
* Since limited events don't respect the freeze conditions, we
* have to read them immediately after freezing or unfreezing the
* other events. We try to keep the values from the limited
* events as consistent as possible by keeping the delay (in
* cycles and instructions) between freezing/unfreezing and reading
* the limited events as small and consistent as possible.
* Therefore, if any limited events are in use, we read them
* both, and always in the same order, to minimize variability,
* and do it inside the same asm that writes MMCR0.
*/
static void write_mmcr0(struct cpu_hw_events *cpuhw, unsigned long mmcr0)
{
unsigned long pmc5, pmc6;
if (!cpuhw->n_limited) {
mtspr(SPRN_MMCR0, mmcr0);
return;
}
/*
* Write MMCR0, then read PMC5 and PMC6 immediately.
* To ensure we don't get a performance monitor interrupt
* between writing MMCR0 and freezing/thawing the limited
* events, we first write MMCR0 with the event overflow
* interrupt enable bits turned off.
*/
asm volatile("mtspr %3,%2; mfspr %0,%4; mfspr %1,%5"
: "=&r" (pmc5), "=&r" (pmc6)
: "r" (mmcr0 & ~(MMCR0_PMC1CE | MMCR0_PMCjCE)),
"i" (SPRN_MMCR0),
"i" (SPRN_PMC5), "i" (SPRN_PMC6));
if (mmcr0 & MMCR0_FC)
freeze_limited_counters(cpuhw, pmc5, pmc6);
else
thaw_limited_counters(cpuhw, pmc5, pmc6);
/*
* Write the full MMCR0 including the event overflow interrupt
* enable bits, if necessary.
*/
if (mmcr0 & (MMCR0_PMC1CE | MMCR0_PMCjCE))
mtspr(SPRN_MMCR0, mmcr0);
}
/*
* Disable all events to prevent PMU interrupts and to allow
* events to be added or removed.
*/
static void power_pmu_disable(struct pmu *pmu)
{
struct cpu_hw_events *cpuhw;
unsigned long flags, mmcr0, val, mmcra;
if (!ppmu)
return;
local_irq_save(flags);
cpuhw = this_cpu_ptr(&cpu_hw_events);
if (!cpuhw->disabled) {
/*
* Check if we ever enabled the PMU on this cpu.
*/
if (!cpuhw->pmcs_enabled) {
ppc_enable_pmcs();
cpuhw->pmcs_enabled = 1;
}
/*
* Set the 'freeze counters' bit, clear EBE/BHRBA/PMCC/PMAO/FC56
* Also clear PMXE to disable PMI's getting triggered in some
* corner cases during PMU disable.
*/
val = mmcr0 = mfspr(SPRN_MMCR0);
val |= MMCR0_FC;
val &= ~(MMCR0_EBE | MMCR0_BHRBA | MMCR0_PMCC | MMCR0_PMAO |
MMCR0_PMXE | MMCR0_FC56);
/* Set mmcr0 PMCCEXT for p10 */
if (ppmu->flags & PPMU_ARCH_31)
val |= MMCR0_PMCCEXT;
/*
* The barrier is to make sure the mtspr has been
* executed and the PMU has frozen the events etc.
* before we return.
*/
write_mmcr0(cpuhw, val);
mb();
isync();
/*
* Some corner cases could clear the PMU counter overflow
* while a masked PMI is pending. One such case is when
* a PMI happens during interrupt replay and perf counter
* values are cleared by PMU callbacks before replay.
*
* Disable the interrupt by clearing the paca bit for PMI
* since we are disabling the PMU now. Otherwise provide a
* warning if there is PMI pending, but no counter is found
* overflown.
*
* Since power_pmu_disable runs under local_irq_save, it
* could happen that code hits a PMC overflow without PMI
* pending in paca. Hence only clear PMI pending if it was
* set.
*
* If a PMI is pending, then MSR[EE] must be disabled (because
* the masked PMI handler disabling EE). So it is safe to
* call clear_pmi_irq_pending().
*/
if (pmi_irq_pending())
clear_pmi_irq_pending();
val = mmcra = cpuhw->mmcr.mmcra;
/*
* Disable instruction sampling if it was enabled
*/
if (cpuhw->mmcr.mmcra & MMCRA_SAMPLE_ENABLE)
val &= ~MMCRA_SAMPLE_ENABLE;
/* Disable BHRB via mmcra (BHRBRD) for p10 */
if (ppmu->flags & PPMU_ARCH_31)
val |= MMCRA_BHRB_DISABLE;
/*
* Write SPRN_MMCRA if mmcra has either disabled
* instruction sampling or BHRB.
*/
if (val != mmcra) {
mtspr(SPRN_MMCRA, mmcra);
mb();
isync();
}
cpuhw->disabled = 1;
cpuhw->n_added = 0;
ebb_switch_out(mmcr0);
#ifdef CONFIG_PPC64
/*
* These are readable by userspace, may contain kernel
* addresses and are not switched by context switch, so clear
* them now to avoid leaking anything to userspace in general
* including to another process.
*/
if (ppmu->flags & PPMU_ARCH_207S) {
mtspr(SPRN_SDAR, 0);
mtspr(SPRN_SIAR, 0);
}
#endif
}
local_irq_restore(flags);
}
/*
* Re-enable all events if disable == 0.
* If we were previously disabled and events were added, then
* put the new config on the PMU.
*/
static void power_pmu_enable(struct pmu *pmu)
{
struct perf_event *event;
struct cpu_hw_events *cpuhw;
unsigned long flags;
long i;
unsigned long val, mmcr0;
s64 left;
unsigned int hwc_index[MAX_HWEVENTS];
int n_lim;
int idx;
bool ebb;
if (!ppmu)
return;
local_irq_save(flags);
cpuhw = this_cpu_ptr(&cpu_hw_events);
if (!cpuhw->disabled)
goto out;
if (cpuhw->n_events == 0) {
ppc_set_pmu_inuse(0);
goto out;
}
cpuhw->disabled = 0;
/*
* EBB requires an exclusive group and all events must have the EBB
* flag set, or not set, so we can just check a single event. Also we
* know we have at least one event.
*/
ebb = is_ebb_event(cpuhw->event[0]);
/*
* If we didn't change anything, or only removed events,
* no need to recalculate MMCR* settings and reset the PMCs.
* Just reenable the PMU with the current MMCR* settings
* (possibly updated for removal of events).
*/
if (!cpuhw->n_added) {
/*
* If there is any active event with an overflown PMC
* value, set back PACA_IRQ_PMI which would have been
* cleared in power_pmu_disable().
*/
hard_irq_disable();
if (any_pmc_overflown(cpuhw))
set_pmi_irq_pending();
mtspr(SPRN_MMCRA, cpuhw->mmcr.mmcra & ~MMCRA_SAMPLE_ENABLE);
mtspr(SPRN_MMCR1, cpuhw->mmcr.mmcr1);
if (ppmu->flags & PPMU_ARCH_31)
mtspr(SPRN_MMCR3, cpuhw->mmcr.mmcr3);
goto out_enable;
}
/*
* Clear all MMCR settings and recompute them for the new set of events.
*/
memset(&cpuhw->mmcr, 0, sizeof(cpuhw->mmcr));
if (ppmu->compute_mmcr(cpuhw->events, cpuhw->n_events, hwc_index,
&cpuhw->mmcr, cpuhw->event, ppmu->flags)) {
/* shouldn't ever get here */
printk(KERN_ERR "oops compute_mmcr failed\n");
goto out;
}
if (!(ppmu->flags & PPMU_ARCH_207S)) {
/*
* Add in MMCR0 freeze bits corresponding to the attr.exclude_*
* bits for the first event. We have already checked that all
* events have the same value for these bits as the first event.
*/
event = cpuhw->event[0];
if (event->attr.exclude_user)
cpuhw->mmcr.mmcr0 |= MMCR0_FCP;
if (event->attr.exclude_kernel)
cpuhw->mmcr.mmcr0 |= freeze_events_kernel;
if (event->attr.exclude_hv)
cpuhw->mmcr.mmcr0 |= MMCR0_FCHV;
}
/*
* Write the new configuration to MMCR* with the freeze
* bit set and set the hardware events to their initial values.
* Then unfreeze the events.
*/
ppc_set_pmu_inuse(1);
mtspr(SPRN_MMCRA, cpuhw->mmcr.mmcra & ~MMCRA_SAMPLE_ENABLE);
mtspr(SPRN_MMCR1, cpuhw->mmcr.mmcr1);
mtspr(SPRN_MMCR0, (cpuhw->mmcr.mmcr0 & ~(MMCR0_PMC1CE | MMCR0_PMCjCE))
| MMCR0_FC);
if (ppmu->flags & PPMU_ARCH_207S)
mtspr(SPRN_MMCR2, cpuhw->mmcr.mmcr2);
if (ppmu->flags & PPMU_ARCH_31)
mtspr(SPRN_MMCR3, cpuhw->mmcr.mmcr3);
/*
* Read off any pre-existing events that need to move
* to another PMC.
*/
for (i = 0; i < cpuhw->n_events; ++i) {
event = cpuhw->event[i];
if (event->hw.idx && event->hw.idx != hwc_index[i] + 1) {
power_pmu_read(event);
write_pmc(event->hw.idx, 0);
event->hw.idx = 0;
}
}
/*
* Initialize the PMCs for all the new and moved events.
*/
cpuhw->n_limited = n_lim = 0;
for (i = 0; i < cpuhw->n_events; ++i) {
event = cpuhw->event[i];
if (event->hw.idx)
continue;
idx = hwc_index[i] + 1;
if (is_limited_pmc(idx)) {
cpuhw->limited_counter[n_lim] = event;
cpuhw->limited_hwidx[n_lim] = idx;
++n_lim;
continue;
}
if (ebb)
val = local64_read(&event->hw.prev_count);
else {
val = 0;
if (event->hw.sample_period) {
left = local64_read(&event->hw.period_left);
if (left < 0x80000000L)
val = 0x80000000L - left;
}
local64_set(&event->hw.prev_count, val);
}
event->hw.idx = idx;
if (event->hw.state & PERF_HES_STOPPED)
val = 0;
write_pmc(idx, val);
perf_event_update_userpage(event);
}
cpuhw->n_limited = n_lim;
cpuhw->mmcr.mmcr0 |= MMCR0_PMXE | MMCR0_FCECE;
out_enable:
pmao_restore_workaround(ebb);
mmcr0 = ebb_switch_in(ebb, cpuhw);
mb();
if (cpuhw->bhrb_users)
ppmu->config_bhrb(cpuhw->bhrb_filter);
write_mmcr0(cpuhw, mmcr0);
/*
* Enable instruction sampling if necessary
*/
if (cpuhw->mmcr.mmcra & MMCRA_SAMPLE_ENABLE) {
mb();
mtspr(SPRN_MMCRA, cpuhw->mmcr.mmcra);
}
out:
local_irq_restore(flags);
}
static int collect_events(struct perf_event *group, int max_count,
struct perf_event *ctrs[], u64 *events,
unsigned int *flags)
{
int n = 0;
struct perf_event *event;
if (group->pmu->task_ctx_nr == perf_hw_context) {
if (n >= max_count)
return -1;
ctrs[n] = group;
flags[n] = group->hw.event_base;
events[n++] = group->hw.config;
}
for_each_sibling_event(event, group) {
if (event->pmu->task_ctx_nr == perf_hw_context &&
event->state != PERF_EVENT_STATE_OFF) {
if (n >= max_count)
return -1;
ctrs[n] = event;
flags[n] = event->hw.event_base;
events[n++] = event->hw.config;
}
}
return n;
}
/*
* Add an event to the PMU.
* If all events are not already frozen, then we disable and
* re-enable the PMU in order to get hw_perf_enable to do the
* actual work of reconfiguring the PMU.
*/
static int power_pmu_add(struct perf_event *event, int ef_flags)
{
struct cpu_hw_events *cpuhw;
unsigned long flags;
int n0;
int ret = -EAGAIN;
local_irq_save(flags);
perf_pmu_disable(event->pmu);
/*
* Add the event to the list (if there is room)
* and check whether the total set is still feasible.
*/
cpuhw = this_cpu_ptr(&cpu_hw_events);
n0 = cpuhw->n_events;
if (n0 >= ppmu->n_counter)
goto out;
cpuhw->event[n0] = event;
cpuhw->events[n0] = event->hw.config;
cpuhw->flags[n0] = event->hw.event_base;
/*
* This event may have been disabled/stopped in record_and_restart()
* because we exceeded the ->event_limit. If re-starting the event,
* clear the ->hw.state (STOPPED and UPTODATE flags), so the user
* notification is re-enabled.
*/
if (!(ef_flags & PERF_EF_START))
event->hw.state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
else
event->hw.state = 0;
/*
* If group events scheduling transaction was started,
* skip the schedulability test here, it will be performed
* at commit time(->commit_txn) as a whole
*/
if (cpuhw->txn_flags & PERF_PMU_TXN_ADD)
goto nocheck;
if (check_excludes(cpuhw->event, cpuhw->flags, n0, 1))
goto out;
if (power_check_constraints(cpuhw, cpuhw->events, cpuhw->flags, n0 + 1, cpuhw->event))
goto out;
event->hw.config = cpuhw->events[n0];
nocheck:
ebb_event_add(event);
++cpuhw->n_events;
++cpuhw->n_added;
ret = 0;
out:
if (has_branch_stack(event)) {
u64 bhrb_filter = -1;
if (ppmu->bhrb_filter_map)
bhrb_filter = ppmu->bhrb_filter_map(
event->attr.branch_sample_type);
if (bhrb_filter != -1) {
cpuhw->bhrb_filter = bhrb_filter;
power_pmu_bhrb_enable(event);
}
}
perf_pmu_enable(event->pmu);
local_irq_restore(flags);
return ret;
}
/*
* Remove an event from the PMU.
*/
static void power_pmu_del(struct perf_event *event, int ef_flags)
{
struct cpu_hw_events *cpuhw;
long i;
unsigned long flags;
local_irq_save(flags);
perf_pmu_disable(event->pmu);
power_pmu_read(event);
cpuhw = this_cpu_ptr(&cpu_hw_events);
for (i = 0; i < cpuhw->n_events; ++i) {
if (event == cpuhw->event[i]) {
while (++i < cpuhw->n_events) {
cpuhw->event[i-1] = cpuhw->event[i];
cpuhw->events[i-1] = cpuhw->events[i];
cpuhw->flags[i-1] = cpuhw->flags[i];
}
--cpuhw->n_events;
ppmu->disable_pmc(event->hw.idx - 1, &cpuhw->mmcr);
if (event->hw.idx) {
write_pmc(event->hw.idx, 0);
event->hw.idx = 0;
}
perf_event_update_userpage(event);
break;
}
}
for (i = 0; i < cpuhw->n_limited; ++i)
if (event == cpuhw->limited_counter[i])
break;
if (i < cpuhw->n_limited) {
while (++i < cpuhw->n_limited) {
cpuhw->limited_counter[i-1] = cpuhw->limited_counter[i];
cpuhw->limited_hwidx[i-1] = cpuhw->limited_hwidx[i];
}
--cpuhw->n_limited;
}
if (cpuhw->n_events == 0) {
/* disable exceptions if no events are running */
cpuhw->mmcr.mmcr0 &= ~(MMCR0_PMXE | MMCR0_FCECE);
}
if (has_branch_stack(event))
power_pmu_bhrb_disable(event);
perf_pmu_enable(event->pmu);
local_irq_restore(flags);
}
/*
* POWER-PMU does not support disabling individual counters, hence
* program their cycle counter to their max value and ignore the interrupts.
*/
static void power_pmu_start(struct perf_event *event, int ef_flags)
{
unsigned long flags;
s64 left;
unsigned long val;
if (!event->hw.idx || !event->hw.sample_period)
return;
if (!(event->hw.state & PERF_HES_STOPPED))
return;
if (ef_flags & PERF_EF_RELOAD)
WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE));
local_irq_save(flags);
perf_pmu_disable(event->pmu);
event->hw.state = 0;
left = local64_read(&event->hw.period_left);
val = 0;
if (left < 0x80000000L)
val = 0x80000000L - left;
write_pmc(event->hw.idx, val);
perf_event_update_userpage(event);
perf_pmu_enable(event->pmu);
local_irq_restore(flags);
}
static void power_pmu_stop(struct perf_event *event, int ef_flags)
{
unsigned long flags;
if (!event->hw.idx || !event->hw.sample_period)
return;
if (event->hw.state & PERF_HES_STOPPED)
return;
local_irq_save(flags);
perf_pmu_disable(event->pmu);
power_pmu_read(event);
event->hw.state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
write_pmc(event->hw.idx, 0);
perf_event_update_userpage(event);
perf_pmu_enable(event->pmu);
local_irq_restore(flags);
}
/*
* Start group events scheduling transaction
* Set the flag to make pmu::enable() not perform the
* schedulability test, it will be performed at commit time
*
* We only support PERF_PMU_TXN_ADD transactions. Save the
* transaction flags but otherwise ignore non-PERF_PMU_TXN_ADD
* transactions.
*/
static void power_pmu_start_txn(struct pmu *pmu, unsigned int txn_flags)
{
struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events);
WARN_ON_ONCE(cpuhw->txn_flags); /* txn already in flight */
cpuhw->txn_flags = txn_flags;
if (txn_flags & ~PERF_PMU_TXN_ADD)
return;
perf_pmu_disable(pmu);
cpuhw->n_txn_start = cpuhw->n_events;
}
/*
* Stop group events scheduling transaction
* Clear the flag and pmu::enable() will perform the
* schedulability test.
*/
static void power_pmu_cancel_txn(struct pmu *pmu)
{
struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events);
unsigned int txn_flags;
WARN_ON_ONCE(!cpuhw->txn_flags); /* no txn in flight */
txn_flags = cpuhw->txn_flags;
cpuhw->txn_flags = 0;
if (txn_flags & ~PERF_PMU_TXN_ADD)
return;
perf_pmu_enable(pmu);
}
/*
* Commit group events scheduling transaction
* Perform the group schedulability test as a whole
* Return 0 if success
*/
static int power_pmu_commit_txn(struct pmu *pmu)
{
struct cpu_hw_events *cpuhw;
long i, n;
if (!ppmu)
return -EAGAIN;
cpuhw = this_cpu_ptr(&cpu_hw_events);
WARN_ON_ONCE(!cpuhw->txn_flags); /* no txn in flight */
if (cpuhw->txn_flags & ~PERF_PMU_TXN_ADD) {
cpuhw->txn_flags = 0;
return 0;
}
n = cpuhw->n_events;
if (check_excludes(cpuhw->event, cpuhw->flags, 0, n))
return -EAGAIN;
i = power_check_constraints(cpuhw, cpuhw->events, cpuhw->flags, n, cpuhw->event);
if (i < 0)
return -EAGAIN;
for (i = cpuhw->n_txn_start; i < n; ++i)
cpuhw->event[i]->hw.config = cpuhw->events[i];
cpuhw->txn_flags = 0;
perf_pmu_enable(pmu);
return 0;
}
/*
* Return 1 if we might be able to put event on a limited PMC,
* or 0 if not.
* An event can only go on a limited PMC if it counts something
* that a limited PMC can count, doesn't require interrupts, and
* doesn't exclude any processor mode.
*/
static int can_go_on_limited_pmc(struct perf_event *event, u64 ev,
unsigned int flags)
{
int n;
u64 alt[MAX_EVENT_ALTERNATIVES];
if (event->attr.exclude_user
|| event->attr.exclude_kernel
|| event->attr.exclude_hv
|| event->attr.sample_period)
return 0;
if (ppmu->limited_pmc_event(ev))
return 1;
/*
* The requested event_id isn't on a limited PMC already;
* see if any alternative code goes on a limited PMC.
*/
if (!ppmu->get_alternatives)
return 0;
flags |= PPMU_LIMITED_PMC_OK | PPMU_LIMITED_PMC_REQD;
n = ppmu->get_alternatives(ev, flags, alt);
return n > 0;
}
/*
* Find an alternative event_id that goes on a normal PMC, if possible,
* and return the event_id code, or 0 if there is no such alternative.
* (Note: event_id code 0 is "don't count" on all machines.)
*/
static u64 normal_pmc_alternative(u64 ev, unsigned long flags)
{
u64 alt[MAX_EVENT_ALTERNATIVES];
int n;
flags &= ~(PPMU_LIMITED_PMC_OK | PPMU_LIMITED_PMC_REQD);
n = ppmu->get_alternatives(ev, flags, alt);
if (!n)
return 0;
return alt[0];
}
/* Number of perf_events counting hardware events */
static atomic_t num_events;
/* Used to avoid races in calling reserve/release_pmc_hardware */
static DEFINE_MUTEX(pmc_reserve_mutex);
/*
* Release the PMU if this is the last perf_event.
*/
static void hw_perf_event_destroy(struct perf_event *event)
{
if (!atomic_add_unless(&num_events, -1, 1)) {
mutex_lock(&pmc_reserve_mutex);
if (atomic_dec_return(&num_events) == 0)
release_pmc_hardware();
mutex_unlock(&pmc_reserve_mutex);
}
}
/*
* Translate a generic cache event_id config to a raw event_id code.
*/
static int hw_perf_cache_event(u64 config, u64 *eventp)
{
unsigned long type, op, result;
u64 ev;
if (!ppmu->cache_events)
return -EINVAL;
/* unpack config */
type = config & 0xff;
op = (config >> 8) & 0xff;
result = (config >> 16) & 0xff;
if (type >= PERF_COUNT_HW_CACHE_MAX ||
op >= PERF_COUNT_HW_CACHE_OP_MAX ||
result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
return -EINVAL;
ev = (*ppmu->cache_events)[type][op][result];
if (ev == 0)
return -EOPNOTSUPP;
if (ev == -1)
return -EINVAL;
*eventp = ev;
return 0;
}
static bool is_event_blacklisted(u64 ev)
{
int i;
for (i=0; i < ppmu->n_blacklist_ev; i++) {
if (ppmu->blacklist_ev[i] == ev)
return true;
}
return false;
}
static int power_pmu_event_init(struct perf_event *event)
{
u64 ev;
unsigned long flags, irq_flags;
struct perf_event *ctrs[MAX_HWEVENTS];
u64 events[MAX_HWEVENTS];
unsigned int cflags[MAX_HWEVENTS];
int n;
int err;
struct cpu_hw_events *cpuhw;
if (!ppmu)
return -ENOENT;
if (has_branch_stack(event)) {
/* PMU has BHRB enabled */
if (!(ppmu->flags & PPMU_ARCH_207S))
return -EOPNOTSUPP;
}
switch (event->attr.type) {
case PERF_TYPE_HARDWARE:
ev = event->attr.config;
if (ev >= ppmu->n_generic || ppmu->generic_events[ev] == 0)
return -EOPNOTSUPP;
if (ppmu->blacklist_ev && is_event_blacklisted(ev))
return -EINVAL;
ev = ppmu->generic_events[ev];
break;
case PERF_TYPE_HW_CACHE:
err = hw_perf_cache_event(event->attr.config, &ev);
if (err)
return err;
if (ppmu->blacklist_ev && is_event_blacklisted(ev))
return -EINVAL;
break;
case PERF_TYPE_RAW:
ev = event->attr.config;
if (ppmu->blacklist_ev && is_event_blacklisted(ev))
return -EINVAL;
break;
default:
return -ENOENT;
}
/*
* PMU config registers have fields that are
* reserved and some specific values for bit fields are reserved.
* For ex., MMCRA[61:62] is Random Sampling Mode (SM)
* and value of 0b11 to this field is reserved.
* Check for invalid values in attr.config.
*/
if (ppmu->check_attr_config &&
ppmu->check_attr_config(event))
return -EINVAL;
event->hw.config_base = ev;
event->hw.idx = 0;
/*
* If we are not running on a hypervisor, force the
* exclude_hv bit to 0 so that we don't care what
* the user set it to.
*/
if (!firmware_has_feature(FW_FEATURE_LPAR))
event->attr.exclude_hv = 0;
/*
* If this is a per-task event, then we can use
* PM_RUN_* events interchangeably with their non RUN_*
* equivalents, e.g. PM_RUN_CYC instead of PM_CYC.
* XXX we should check if the task is an idle task.
*/
flags = 0;
if (event->attach_state & PERF_ATTACH_TASK)
flags |= PPMU_ONLY_COUNT_RUN;
/*
* If this machine has limited events, check whether this
* event_id could go on a limited event.
*/
if (ppmu->flags & PPMU_LIMITED_PMC5_6) {
if (can_go_on_limited_pmc(event, ev, flags)) {
flags |= PPMU_LIMITED_PMC_OK;
} else if (ppmu->limited_pmc_event(ev)) {
/*
* The requested event_id is on a limited PMC,
* but we can't use a limited PMC; see if any
* alternative goes on a normal PMC.
*/
ev = normal_pmc_alternative(ev, flags);
if (!ev)
return -EINVAL;
}
}
/* Extra checks for EBB */
err = ebb_event_check(event);
if (err)
return err;
/*
* If this is in a group, check if it can go on with all the
* other hardware events in the group. We assume the event
* hasn't been linked into its leader's sibling list at this point.
*/
n = 0;
if (event->group_leader != event) {
n = collect_events(event->group_leader, ppmu->n_counter - 1,
ctrs, events, cflags);
if (n < 0)
return -EINVAL;
}
events[n] = ev;
ctrs[n] = event;
cflags[n] = flags;
if (check_excludes(ctrs, cflags, n, 1))
return -EINVAL;
local_irq_save(irq_flags);
cpuhw = this_cpu_ptr(&cpu_hw_events);
err = power_check_constraints(cpuhw, events, cflags, n + 1, ctrs);
if (has_branch_stack(event)) {
u64 bhrb_filter = -1;
/*
* Currently no PMU supports having multiple branch filters
* at the same time. Branch filters are set via MMCRA IFM[32:33]
* bits for Power8 and above. Return EOPNOTSUPP when multiple
* branch filters are requested in the event attr.
*
* When opening event via perf_event_open(), branch_sample_type
* gets adjusted in perf_copy_attr(). Kernel will automatically
* adjust the branch_sample_type based on the event modifier
* settings to include PERF_SAMPLE_BRANCH_PLM_ALL. Hence drop
* the check for PERF_SAMPLE_BRANCH_PLM_ALL.
*/
if (hweight64(event->attr.branch_sample_type & ~PERF_SAMPLE_BRANCH_PLM_ALL) > 1) {
local_irq_restore(irq_flags);
return -EOPNOTSUPP;
}
if (ppmu->bhrb_filter_map)
bhrb_filter = ppmu->bhrb_filter_map(
event->attr.branch_sample_type);
if (bhrb_filter == -1) {
local_irq_restore(irq_flags);
return -EOPNOTSUPP;
}
cpuhw->bhrb_filter = bhrb_filter;
}
local_irq_restore(irq_flags);
if (err)
return -EINVAL;
event->hw.config = events[n];
event->hw.event_base = cflags[n];
event->hw.last_period = event->hw.sample_period;
local64_set(&event->hw.period_left, event->hw.last_period);
/*
* For EBB events we just context switch the PMC value, we don't do any
* of the sample_period logic. We use hw.prev_count for this.
*/
if (is_ebb_event(event))
local64_set(&event->hw.prev_count, 0);
/*
* See if we need to reserve the PMU.
* If no events are currently in use, then we have to take a
* mutex to ensure that we don't race with another task doing
* reserve_pmc_hardware or release_pmc_hardware.
*/
err = 0;
if (!atomic_inc_not_zero(&num_events)) {
mutex_lock(&pmc_reserve_mutex);
if (atomic_read(&num_events) == 0 &&
reserve_pmc_hardware(perf_event_interrupt))
err = -EBUSY;
else
atomic_inc(&num_events);
mutex_unlock(&pmc_reserve_mutex);
}
event->destroy = hw_perf_event_destroy;
return err;
}
static int power_pmu_event_idx(struct perf_event *event)
{
return event->hw.idx;
}
ssize_t power_events_sysfs_show(struct device *dev,
struct device_attribute *attr, char *page)
{
struct perf_pmu_events_attr *pmu_attr;
pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr);
return sprintf(page, "event=0x%02llx\n", pmu_attr->id);
}
static struct pmu power_pmu = {
.pmu_enable = power_pmu_enable,
.pmu_disable = power_pmu_disable,
.event_init = power_pmu_event_init,
.add = power_pmu_add,
.del = power_pmu_del,
.start = power_pmu_start,
.stop = power_pmu_stop,
.read = power_pmu_read,
.start_txn = power_pmu_start_txn,
.cancel_txn = power_pmu_cancel_txn,
.commit_txn = power_pmu_commit_txn,
.event_idx = power_pmu_event_idx,
.sched_task = power_pmu_sched_task,
};
#define PERF_SAMPLE_ADDR_TYPE (PERF_SAMPLE_ADDR | \
PERF_SAMPLE_PHYS_ADDR | \
PERF_SAMPLE_DATA_PAGE_SIZE)
/*
* A counter has overflowed; update its count and record
* things if requested. Note that interrupts are hard-disabled
* here so there is no possibility of being interrupted.
*/
static void record_and_restart(struct perf_event *event, unsigned long val,
struct pt_regs *regs)
{
u64 period = event->hw.sample_period;
s64 prev, delta, left;
int record = 0;
if (event->hw.state & PERF_HES_STOPPED) {
write_pmc(event->hw.idx, 0);
return;
}
/* we don't have to worry about interrupts here */
prev = local64_read(&event->hw.prev_count);
delta = check_and_compute_delta(prev, val);
local64_add(delta, &event->count);
/*
* See if the total period for this event has expired,
* and update for the next period.
*/
val = 0;
left = local64_read(&event->hw.period_left) - delta;
if (delta == 0)
left++;
if (period) {
if (left <= 0) {
left += period;
if (left <= 0)
left = period;
/*
* If address is not requested in the sample via
* PERF_SAMPLE_IP, just record that sample irrespective
* of SIAR valid check.
*/
if (event->attr.sample_type & PERF_SAMPLE_IP)
record = siar_valid(regs);
else
record = 1;
event->hw.last_period = event->hw.sample_period;
}
if (left < 0x80000000LL)
val = 0x80000000LL - left;
}
write_pmc(event->hw.idx, val);
local64_set(&event->hw.prev_count, val);
local64_set(&event->hw.period_left, left);
perf_event_update_userpage(event);
/*
* Due to hardware limitation, sometimes SIAR could sample a kernel
* address even when freeze on supervisor state (kernel) is set in
* MMCR2. Check attr.exclude_kernel and address to drop the sample in
* these cases.
*/
if (event->attr.exclude_kernel &&
(event->attr.sample_type & PERF_SAMPLE_IP) &&
is_kernel_addr(mfspr(SPRN_SIAR)))
record = 0;
/*
* Finally record data if requested.
*/
if (record) {
struct perf_sample_data data;
perf_sample_data_init(&data, ~0ULL, event->hw.last_period);
if (event->attr.sample_type & PERF_SAMPLE_ADDR_TYPE)
perf_get_data_addr(event, regs, &data.addr);
if (event->attr.sample_type & PERF_SAMPLE_BRANCH_STACK) {
struct cpu_hw_events *cpuhw;
cpuhw = this_cpu_ptr(&cpu_hw_events);
power_pmu_bhrb_read(event, cpuhw);
perf_sample_save_brstack(&data, event, &cpuhw->bhrb_stack);
}
if (event->attr.sample_type & PERF_SAMPLE_DATA_SRC &&
ppmu->get_mem_data_src) {
ppmu->get_mem_data_src(&data.data_src, ppmu->flags, regs);
data.sample_flags |= PERF_SAMPLE_DATA_SRC;
}
if (event->attr.sample_type & PERF_SAMPLE_WEIGHT_TYPE &&
ppmu->get_mem_weight) {
ppmu->get_mem_weight(&data.weight.full, event->attr.sample_type);
data.sample_flags |= PERF_SAMPLE_WEIGHT_TYPE;
}
if (perf_event_overflow(event, &data, regs))
power_pmu_stop(event, 0);
} else if (period) {
/* Account for interrupt in case of invalid SIAR */
if (perf_event_account_interrupt(event))
power_pmu_stop(event, 0);
}
}
/*
* Called from generic code to get the misc flags (i.e. processor mode)
* for an event_id.
*/
unsigned long perf_misc_flags(struct pt_regs *regs)
{
u32 flags = perf_get_misc_flags(regs);
if (flags)
return flags;
return user_mode(regs) ? PERF_RECORD_MISC_USER :
PERF_RECORD_MISC_KERNEL;
}
/*
* Called from generic code to get the instruction pointer
* for an event_id.
*/
unsigned long perf_instruction_pointer(struct pt_regs *regs)
{
unsigned long siar = mfspr(SPRN_SIAR);
if (regs_use_siar(regs) && siar_valid(regs) && siar)
return siar + perf_ip_adjust(regs);
else
return regs->nip;
}
static bool pmc_overflow_power7(unsigned long val)
{
/*
* Events on POWER7 can roll back if a speculative event doesn't
* eventually complete. Unfortunately in some rare cases they will
* raise a performance monitor exception. We need to catch this to
* ensure we reset the PMC. In all cases the PMC will be 256 or less
* cycles from overflow.
*
* We only do this if the first pass fails to find any overflowing
* PMCs because a user might set a period of less than 256 and we
* don't want to mistakenly reset them.
*/
if ((0x80000000 - val) <= 256)
return true;
return false;
}
static bool pmc_overflow(unsigned long val)
{
if ((int)val < 0)
return true;
return false;
}
/*
* Performance monitor interrupt stuff
*/
static void __perf_event_interrupt(struct pt_regs *regs)
{
int i, j;
struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events);
struct perf_event *event;
int found, active;
if (cpuhw->n_limited)
freeze_limited_counters(cpuhw, mfspr(SPRN_PMC5),
mfspr(SPRN_PMC6));
perf_read_regs(regs);
/* Read all the PMCs since we'll need them a bunch of times */
for (i = 0; i < ppmu->n_counter; ++i)
cpuhw->pmcs[i] = read_pmc(i + 1);
/* Try to find what caused the IRQ */
found = 0;
for (i = 0; i < ppmu->n_counter; ++i) {
if (!pmc_overflow(cpuhw->pmcs[i]))
continue;
if (is_limited_pmc(i + 1))
continue; /* these won't generate IRQs */
/*
* We've found one that's overflowed. For active
* counters we need to log this. For inactive
* counters, we need to reset it anyway
*/
found = 1;
active = 0;
for (j = 0; j < cpuhw->n_events; ++j) {
event = cpuhw->event[j];
if (event->hw.idx == (i + 1)) {
active = 1;
record_and_restart(event, cpuhw->pmcs[i], regs);
break;
}
}
/*
* Clear PACA_IRQ_PMI in case it was set by
* set_pmi_irq_pending() when PMU was enabled
* after accounting for interrupts.
*/
clear_pmi_irq_pending();
if (!active)
/* reset non active counters that have overflowed */
write_pmc(i + 1, 0);
}
if (!found && pvr_version_is(PVR_POWER7)) {
/* check active counters for special buggy p7 overflow */
for (i = 0; i < cpuhw->n_events; ++i) {
event = cpuhw->event[i];
if (!event->hw.idx || is_limited_pmc(event->hw.idx))
continue;
if (pmc_overflow_power7(cpuhw->pmcs[event->hw.idx - 1])) {
/* event has overflowed in a buggy way*/
found = 1;
record_and_restart(event,
cpuhw->pmcs[event->hw.idx - 1],
regs);
}
}
}
/*
* During system wide profiling or while specific CPU is monitored for an
* event, some corner cases could cause PMC to overflow in idle path. This
* will trigger a PMI after waking up from idle. Since counter values are _not_
* saved/restored in idle path, can lead to below "Can't find PMC" message.
*/
if (unlikely(!found) && !arch_irq_disabled_regs(regs))
printk_ratelimited(KERN_WARNING "Can't find PMC that caused IRQ\n");
/*
* Reset MMCR0 to its normal value. This will set PMXE and
* clear FC (freeze counters) and PMAO (perf mon alert occurred)
* and thus allow interrupts to occur again.
* XXX might want to use MSR.PM to keep the events frozen until
* we get back out of this interrupt.
*/
write_mmcr0(cpuhw, cpuhw->mmcr.mmcr0);
/* Clear the cpuhw->pmcs */
memset(&cpuhw->pmcs, 0, sizeof(cpuhw->pmcs));
}
static void perf_event_interrupt(struct pt_regs *regs)
{
u64 start_clock = sched_clock();
__perf_event_interrupt(regs);
perf_sample_event_took(sched_clock() - start_clock);
}
static int power_pmu_prepare_cpu(unsigned int cpu)
{
struct cpu_hw_events *cpuhw = &per_cpu(cpu_hw_events, cpu);
if (ppmu) {
memset(cpuhw, 0, sizeof(*cpuhw));
cpuhw->mmcr.mmcr0 = MMCR0_FC;
}
return 0;
}
static ssize_t pmu_name_show(struct device *cdev,
struct device_attribute *attr,
char *buf)
{
if (ppmu)
return sysfs_emit(buf, "%s", ppmu->name);
return 0;
}
static DEVICE_ATTR_RO(pmu_name);
static struct attribute *pmu_caps_attrs[] = {
&dev_attr_pmu_name.attr,
NULL
};
static const struct attribute_group pmu_caps_group = {
.name = "caps",
.attrs = pmu_caps_attrs,
};
static const struct attribute_group *pmu_caps_groups[] = {
&pmu_caps_group,
NULL,
};
int __init register_power_pmu(struct power_pmu *pmu)
{
if (ppmu)
return -EBUSY; /* something's already registered */
ppmu = pmu;
pr_info("%s performance monitor hardware support registered\n",
pmu->name);
power_pmu.attr_groups = ppmu->attr_groups;
if (ppmu->flags & PPMU_ARCH_207S)
power_pmu.attr_update = pmu_caps_groups;
power_pmu.capabilities |= (ppmu->capabilities & PERF_PMU_CAP_EXTENDED_REGS);
#ifdef MSR_HV
/*
* Use FCHV to ignore kernel events if MSR.HV is set.
*/
if (mfmsr() & MSR_HV)
freeze_events_kernel = MMCR0_FCHV;
#endif /* CONFIG_PPC64 */
perf_pmu_register(&power_pmu, "cpu", PERF_TYPE_RAW);
cpuhp_setup_state(CPUHP_PERF_POWER, "perf/powerpc:prepare",
power_pmu_prepare_cpu, NULL);
return 0;
}
#ifdef CONFIG_PPC64
static bool pmu_override = false;
static unsigned long pmu_override_val;
static void do_pmu_override(void *data)
{
ppc_set_pmu_inuse(1);
if (pmu_override_val)
mtspr(SPRN_MMCR1, pmu_override_val);
mtspr(SPRN_MMCR0, mfspr(SPRN_MMCR0) & ~MMCR0_FC);
}
static int __init init_ppc64_pmu(void)
{
if (cpu_has_feature(CPU_FTR_HVMODE) && pmu_override) {
pr_warn("disabling perf due to pmu_override= command line option.\n");
on_each_cpu(do_pmu_override, NULL, 1);
return 0;
}
/* run through all the pmu drivers one at a time */
if (!init_power5_pmu())
return 0;
else if (!init_power5p_pmu())
return 0;
else if (!init_power6_pmu())
return 0;
else if (!init_power7_pmu())
return 0;
else if (!init_power8_pmu())
return 0;
else if (!init_power9_pmu())
return 0;
else if (!init_power10_pmu())
return 0;
else if (!init_ppc970_pmu())
return 0;
else
return init_generic_compat_pmu();
}
early_initcall(init_ppc64_pmu);
static int __init pmu_setup(char *str)
{
unsigned long val;
if (!early_cpu_has_feature(CPU_FTR_HVMODE))
return 0;
pmu_override = true;
if (kstrtoul(str, 0, &val))
val = 0;
pmu_override_val = val;
return 1;
}
__setup("pmu_override=", pmu_setup);
#endif
| linux-master | arch/powerpc/perf/core-book3s.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Performance counter support for POWER8 processors.
*
* Copyright 2009 Paul Mackerras, IBM Corporation.
* Copyright 2013 Michael Ellerman, IBM Corporation.
*/
#define pr_fmt(fmt) "power8-pmu: " fmt
#include "isa207-common.h"
/*
* Some power8 event codes.
*/
#define EVENT(_name, _code) _name = _code,
enum {
#include "power8-events-list.h"
};
#undef EVENT
/* MMCRA IFM bits - POWER8 */
#define POWER8_MMCRA_IFM1 0x0000000040000000UL
#define POWER8_MMCRA_IFM2 0x0000000080000000UL
#define POWER8_MMCRA_IFM3 0x00000000C0000000UL
#define POWER8_MMCRA_BHRB_MASK 0x00000000C0000000UL
/*
* Raw event encoding for PowerISA v2.07 (Power8):
*
* 60 56 52 48 44 40 36 32
* | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - |
* | | [ ] [ thresh_cmp ] [ thresh_ctl ]
* | | | |
* | | *- IFM (Linux) thresh start/stop OR FAB match -*
* | *- BHRB (Linux)
* *- EBB (Linux)
*
* 28 24 20 16 12 8 4 0
* | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - |
* [ ] [ sample ] [cache] [ pmc ] [unit ] c m [ pmcxsel ]
* | | | | |
* | | | | *- mark
* | | *- L1/L2/L3 cache_sel |
* | | |
* | *- sampling mode for marked events *- combine
* |
* *- thresh_sel
*
* Below uses IBM bit numbering.
*
* MMCR1[x:y] = unit (PMCxUNIT)
* MMCR1[x] = combine (PMCxCOMB)
*
* if pmc == 3 and unit == 0 and pmcxsel[0:6] == 0b0101011
* # PM_MRK_FAB_RSP_MATCH
* MMCR1[20:27] = thresh_ctl (FAB_CRESP_MATCH / FAB_TYPE_MATCH)
* else if pmc == 4 and unit == 0xf and pmcxsel[0:6] == 0b0101001
* # PM_MRK_FAB_RSP_MATCH_CYC
* MMCR1[20:27] = thresh_ctl (FAB_CRESP_MATCH / FAB_TYPE_MATCH)
* else
* MMCRA[48:55] = thresh_ctl (THRESH START/END)
*
* if thresh_sel:
* MMCRA[45:47] = thresh_sel
*
* if thresh_cmp:
* MMCRA[22:24] = thresh_cmp[0:2]
* MMCRA[25:31] = thresh_cmp[3:9]
*
* if unit == 6 or unit == 7
* MMCRC[53:55] = cache_sel[1:3] (L2EVENT_SEL)
* else if unit == 8 or unit == 9:
* if cache_sel[0] == 0: # L3 bank
* MMCRC[47:49] = cache_sel[1:3] (L3EVENT_SEL0)
* else if cache_sel[0] == 1:
* MMCRC[50:51] = cache_sel[2:3] (L3EVENT_SEL1)
* else if cache_sel[1]: # L1 event
* MMCR1[16] = cache_sel[2]
* MMCR1[17] = cache_sel[3]
*
* if mark:
* MMCRA[63] = 1 (SAMPLE_ENABLE)
* MMCRA[57:59] = sample[0:2] (RAND_SAMP_ELIG)
* MMCRA[61:62] = sample[3:4] (RAND_SAMP_MODE)
*
* if EBB and BHRB:
* MMCRA[32:33] = IFM
*
*/
/* PowerISA v2.07 format attribute structure*/
extern const struct attribute_group isa207_pmu_format_group;
/* Table of alternatives, sorted by column 0 */
static const unsigned int event_alternatives[][MAX_ALT] = {
{ PM_MRK_ST_CMPL, PM_MRK_ST_CMPL_ALT },
{ PM_BR_MRK_2PATH, PM_BR_MRK_2PATH_ALT },
{ PM_L3_CO_MEPF, PM_L3_CO_MEPF_ALT },
{ PM_MRK_DATA_FROM_L2MISS, PM_MRK_DATA_FROM_L2MISS_ALT },
{ PM_CMPLU_STALL_ALT, PM_CMPLU_STALL },
{ PM_BR_2PATH, PM_BR_2PATH_ALT },
{ PM_INST_DISP, PM_INST_DISP_ALT },
{ PM_RUN_CYC_ALT, PM_RUN_CYC },
{ PM_MRK_FILT_MATCH, PM_MRK_FILT_MATCH_ALT },
{ PM_LD_MISS_L1, PM_LD_MISS_L1_ALT },
{ PM_RUN_INST_CMPL_ALT, PM_RUN_INST_CMPL },
};
static int power8_get_alternatives(u64 event, unsigned int flags, u64 alt[])
{
int num_alt = 0;
num_alt = isa207_get_alternatives(event, alt,
ARRAY_SIZE(event_alternatives), flags,
event_alternatives);
return num_alt;
}
GENERIC_EVENT_ATTR(cpu-cycles, PM_CYC);
GENERIC_EVENT_ATTR(stalled-cycles-frontend, PM_GCT_NOSLOT_CYC);
GENERIC_EVENT_ATTR(stalled-cycles-backend, PM_CMPLU_STALL);
GENERIC_EVENT_ATTR(instructions, PM_INST_CMPL);
GENERIC_EVENT_ATTR(branch-instructions, PM_BRU_FIN);
GENERIC_EVENT_ATTR(branch-misses, PM_BR_MPRED_CMPL);
GENERIC_EVENT_ATTR(cache-references, PM_LD_REF_L1);
GENERIC_EVENT_ATTR(cache-misses, PM_LD_MISS_L1);
GENERIC_EVENT_ATTR(mem_access, MEM_ACCESS);
CACHE_EVENT_ATTR(L1-dcache-load-misses, PM_LD_MISS_L1);
CACHE_EVENT_ATTR(L1-dcache-loads, PM_LD_REF_L1);
CACHE_EVENT_ATTR(L1-dcache-prefetches, PM_L1_PREF);
CACHE_EVENT_ATTR(L1-dcache-store-misses, PM_ST_MISS_L1);
CACHE_EVENT_ATTR(L1-icache-load-misses, PM_L1_ICACHE_MISS);
CACHE_EVENT_ATTR(L1-icache-loads, PM_INST_FROM_L1);
CACHE_EVENT_ATTR(L1-icache-prefetches, PM_IC_PREF_WRITE);
CACHE_EVENT_ATTR(LLC-load-misses, PM_DATA_FROM_L3MISS);
CACHE_EVENT_ATTR(LLC-loads, PM_DATA_FROM_L3);
CACHE_EVENT_ATTR(LLC-prefetches, PM_L3_PREF_ALL);
CACHE_EVENT_ATTR(LLC-store-misses, PM_L2_ST_MISS);
CACHE_EVENT_ATTR(LLC-stores, PM_L2_ST);
CACHE_EVENT_ATTR(branch-load-misses, PM_BR_MPRED_CMPL);
CACHE_EVENT_ATTR(branch-loads, PM_BRU_FIN);
CACHE_EVENT_ATTR(dTLB-load-misses, PM_DTLB_MISS);
CACHE_EVENT_ATTR(iTLB-load-misses, PM_ITLB_MISS);
static struct attribute *power8_events_attr[] = {
GENERIC_EVENT_PTR(PM_CYC),
GENERIC_EVENT_PTR(PM_GCT_NOSLOT_CYC),
GENERIC_EVENT_PTR(PM_CMPLU_STALL),
GENERIC_EVENT_PTR(PM_INST_CMPL),
GENERIC_EVENT_PTR(PM_BRU_FIN),
GENERIC_EVENT_PTR(PM_BR_MPRED_CMPL),
GENERIC_EVENT_PTR(PM_LD_REF_L1),
GENERIC_EVENT_PTR(PM_LD_MISS_L1),
GENERIC_EVENT_PTR(MEM_ACCESS),
CACHE_EVENT_PTR(PM_LD_MISS_L1),
CACHE_EVENT_PTR(PM_LD_REF_L1),
CACHE_EVENT_PTR(PM_L1_PREF),
CACHE_EVENT_PTR(PM_ST_MISS_L1),
CACHE_EVENT_PTR(PM_L1_ICACHE_MISS),
CACHE_EVENT_PTR(PM_INST_FROM_L1),
CACHE_EVENT_PTR(PM_IC_PREF_WRITE),
CACHE_EVENT_PTR(PM_DATA_FROM_L3MISS),
CACHE_EVENT_PTR(PM_DATA_FROM_L3),
CACHE_EVENT_PTR(PM_L3_PREF_ALL),
CACHE_EVENT_PTR(PM_L2_ST_MISS),
CACHE_EVENT_PTR(PM_L2_ST),
CACHE_EVENT_PTR(PM_BR_MPRED_CMPL),
CACHE_EVENT_PTR(PM_BRU_FIN),
CACHE_EVENT_PTR(PM_DTLB_MISS),
CACHE_EVENT_PTR(PM_ITLB_MISS),
NULL
};
static const struct attribute_group power8_pmu_events_group = {
.name = "events",
.attrs = power8_events_attr,
};
static struct attribute *power8_pmu_caps_attrs[] = {
NULL
};
static struct attribute_group power8_pmu_caps_group = {
.name = "caps",
.attrs = power8_pmu_caps_attrs,
};
static const struct attribute_group *power8_pmu_attr_groups[] = {
&isa207_pmu_format_group,
&power8_pmu_events_group,
&power8_pmu_caps_group,
NULL,
};
static int power8_generic_events[] = {
[PERF_COUNT_HW_CPU_CYCLES] = PM_CYC,
[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = PM_GCT_NOSLOT_CYC,
[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = PM_CMPLU_STALL,
[PERF_COUNT_HW_INSTRUCTIONS] = PM_INST_CMPL,
[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = PM_BRU_FIN,
[PERF_COUNT_HW_BRANCH_MISSES] = PM_BR_MPRED_CMPL,
[PERF_COUNT_HW_CACHE_REFERENCES] = PM_LD_REF_L1,
[PERF_COUNT_HW_CACHE_MISSES] = PM_LD_MISS_L1,
};
static u64 power8_bhrb_filter_map(u64 branch_sample_type)
{
u64 pmu_bhrb_filter = 0;
/* BHRB and regular PMU events share the same privilege state
* filter configuration. BHRB is always recorded along with a
* regular PMU event. As the privilege state filter is handled
* in the basic PMC configuration of the accompanying regular
* PMU event, we ignore any separate BHRB specific request.
*/
/* No branch filter requested */
if (branch_sample_type & PERF_SAMPLE_BRANCH_ANY)
return pmu_bhrb_filter;
/* Invalid branch filter options - HW does not support */
if (branch_sample_type & PERF_SAMPLE_BRANCH_ANY_RETURN)
return -1;
if (branch_sample_type & PERF_SAMPLE_BRANCH_IND_CALL)
return -1;
if (branch_sample_type & PERF_SAMPLE_BRANCH_CALL)
return -1;
if (branch_sample_type & PERF_SAMPLE_BRANCH_ANY_CALL) {
pmu_bhrb_filter |= POWER8_MMCRA_IFM1;
return pmu_bhrb_filter;
}
/* Every thing else is unsupported */
return -1;
}
static void power8_config_bhrb(u64 pmu_bhrb_filter)
{
pmu_bhrb_filter &= POWER8_MMCRA_BHRB_MASK;
/* Enable BHRB filter in PMU */
mtspr(SPRN_MMCRA, (mfspr(SPRN_MMCRA) | pmu_bhrb_filter));
}
#define C(x) PERF_COUNT_HW_CACHE_##x
/*
* Table of generalized cache-related events.
* 0 means not supported, -1 means nonsensical, other values
* are event codes.
*/
static u64 power8_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
[ C(L1D) ] = {
[ C(OP_READ) ] = {
[ C(RESULT_ACCESS) ] = PM_LD_REF_L1,
[ C(RESULT_MISS) ] = PM_LD_MISS_L1,
},
[ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = 0,
[ C(RESULT_MISS) ] = PM_ST_MISS_L1,
},
[ C(OP_PREFETCH) ] = {
[ C(RESULT_ACCESS) ] = PM_L1_PREF,
[ C(RESULT_MISS) ] = 0,
},
},
[ C(L1I) ] = {
[ C(OP_READ) ] = {
[ C(RESULT_ACCESS) ] = PM_INST_FROM_L1,
[ C(RESULT_MISS) ] = PM_L1_ICACHE_MISS,
},
[ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = PM_L1_DEMAND_WRITE,
[ C(RESULT_MISS) ] = -1,
},
[ C(OP_PREFETCH) ] = {
[ C(RESULT_ACCESS) ] = PM_IC_PREF_WRITE,
[ C(RESULT_MISS) ] = 0,
},
},
[ C(LL) ] = {
[ C(OP_READ) ] = {
[ C(RESULT_ACCESS) ] = PM_DATA_FROM_L3,
[ C(RESULT_MISS) ] = PM_DATA_FROM_L3MISS,
},
[ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = PM_L2_ST,
[ C(RESULT_MISS) ] = PM_L2_ST_MISS,
},
[ C(OP_PREFETCH) ] = {
[ C(RESULT_ACCESS) ] = PM_L3_PREF_ALL,
[ C(RESULT_MISS) ] = 0,
},
},
[ C(DTLB) ] = {
[ C(OP_READ) ] = {
[ C(RESULT_ACCESS) ] = 0,
[ C(RESULT_MISS) ] = PM_DTLB_MISS,
},
[ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = -1,
[ C(RESULT_MISS) ] = -1,
},
[ C(OP_PREFETCH) ] = {
[ C(RESULT_ACCESS) ] = -1,
[ C(RESULT_MISS) ] = -1,
},
},
[ C(ITLB) ] = {
[ C(OP_READ) ] = {
[ C(RESULT_ACCESS) ] = 0,
[ C(RESULT_MISS) ] = PM_ITLB_MISS,
},
[ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = -1,
[ C(RESULT_MISS) ] = -1,
},
[ C(OP_PREFETCH) ] = {
[ C(RESULT_ACCESS) ] = -1,
[ C(RESULT_MISS) ] = -1,
},
},
[ C(BPU) ] = {
[ C(OP_READ) ] = {
[ C(RESULT_ACCESS) ] = PM_BRU_FIN,
[ C(RESULT_MISS) ] = PM_BR_MPRED_CMPL,
},
[ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = -1,
[ C(RESULT_MISS) ] = -1,
},
[ C(OP_PREFETCH) ] = {
[ C(RESULT_ACCESS) ] = -1,
[ C(RESULT_MISS) ] = -1,
},
},
[ C(NODE) ] = {
[ C(OP_READ) ] = {
[ C(RESULT_ACCESS) ] = -1,
[ C(RESULT_MISS) ] = -1,
},
[ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = -1,
[ C(RESULT_MISS) ] = -1,
},
[ C(OP_PREFETCH) ] = {
[ C(RESULT_ACCESS) ] = -1,
[ C(RESULT_MISS) ] = -1,
},
},
};
#undef C
static struct power_pmu power8_pmu = {
.name = "POWER8",
.n_counter = MAX_PMU_COUNTERS,
.max_alternatives = MAX_ALT + 1,
.add_fields = ISA207_ADD_FIELDS,
.test_adder = ISA207_TEST_ADDER,
.compute_mmcr = isa207_compute_mmcr,
.config_bhrb = power8_config_bhrb,
.bhrb_filter_map = power8_bhrb_filter_map,
.get_constraint = isa207_get_constraint,
.get_alternatives = power8_get_alternatives,
.get_mem_data_src = isa207_get_mem_data_src,
.get_mem_weight = isa207_get_mem_weight,
.disable_pmc = isa207_disable_pmc,
.flags = PPMU_HAS_SIER | PPMU_ARCH_207S,
.n_generic = ARRAY_SIZE(power8_generic_events),
.generic_events = power8_generic_events,
.cache_events = &power8_cache_events,
.attr_groups = power8_pmu_attr_groups,
.bhrb_nr = 32,
};
int __init init_power8_pmu(void)
{
int rc;
unsigned int pvr = mfspr(SPRN_PVR);
if (PVR_VER(pvr) != PVR_POWER8E && PVR_VER(pvr) != PVR_POWER8NVL &&
PVR_VER(pvr) != PVR_POWER8)
return -ENODEV;
rc = register_power_pmu(&power8_pmu);
if (rc)
return rc;
/* Tell userspace that EBB is supported */
cur_cpu_spec->cpu_user_features2 |= PPC_FEATURE2_EBB;
if (cpu_has_feature(CPU_FTR_PMAO_BUG))
pr_info("PMAO restore workaround active.\n");
return 0;
}
| linux-master | arch/powerpc/perf/power8-pmu.c |
// SPDX-License-Identifier: GPL-2.0
#include <asm/io.h>
#include <asm/hvcall.h>
#include "hv-gpci.h"
#include "hv-common.h"
unsigned long hv_perf_caps_get(struct hv_perf_caps *caps)
{
unsigned long r;
struct p {
struct hv_get_perf_counter_info_params params;
struct hv_gpci_system_performance_capabilities caps;
} __packed __aligned(sizeof(uint64_t));
struct p arg = {
.params = {
.counter_request = cpu_to_be32(
HV_GPCI_system_performance_capabilities),
.starting_index = cpu_to_be32(-1),
.counter_info_version_in = 0,
}
};
r = plpar_hcall_norets(H_GET_PERF_COUNTER_INFO,
virt_to_phys(&arg), sizeof(arg));
if (r)
return r;
pr_devel("capability_mask: 0x%x\n", arg.caps.capability_mask);
caps->version = arg.params.counter_info_version_out;
caps->collect_privileged = !!arg.caps.perf_collect_privileged;
caps->ga = !!(arg.caps.capability_mask & HV_GPCI_CM_GA);
caps->expanded = !!(arg.caps.capability_mask & HV_GPCI_CM_EXPANDED);
caps->lab = !!(arg.caps.capability_mask & HV_GPCI_CM_LAB);
return r;
}
| linux-master | arch/powerpc/perf/hv-common.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Performance counter support for POWER10 processors.
*
* Copyright 2020 Madhavan Srinivasan, IBM Corporation.
* Copyright 2020 Athira Rajeev, IBM Corporation.
*/
#define pr_fmt(fmt) "power10-pmu: " fmt
#include "isa207-common.h"
/*
* Raw event encoding for Power10:
*
* 60 56 52 48 44 40 36 32
* | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - |
* | | [ ] [ src_match ] [ src_mask ] | [ ] [ l2l3_sel ] [ thresh_ctl ]
* | | | | | |
* | | *- IFM (Linux) | | thresh start/stop -*
* | *- BHRB (Linux) | src_sel
* *- EBB (Linux) *invert_bit
*
* 28 24 20 16 12 8 4 0
* | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - |
* [ ] [ sample ] [ ] [ ] [ pmc ] [unit ] [ ] | m [ pmcxsel ]
* | | | | | | |
* | | | | | | *- mark
* | | | *- L1/L2/L3 cache_sel | |*-radix_scope_qual
* | | sdar_mode |
* | *- sampling mode for marked events *- combine
* |
* *- thresh_sel
*
* Below uses IBM bit numbering.
*
* MMCR1[x:y] = unit (PMCxUNIT)
* MMCR1[24] = pmc1combine[0]
* MMCR1[25] = pmc1combine[1]
* MMCR1[26] = pmc2combine[0]
* MMCR1[27] = pmc2combine[1]
* MMCR1[28] = pmc3combine[0]
* MMCR1[29] = pmc3combine[1]
* MMCR1[30] = pmc4combine[0]
* MMCR1[31] = pmc4combine[1]
*
* if pmc == 3 and unit == 0 and pmcxsel[0:6] == 0b0101011
* MMCR1[20:27] = thresh_ctl
* else if pmc == 4 and unit == 0xf and pmcxsel[0:6] == 0b0101001
* MMCR1[20:27] = thresh_ctl
* else
* MMCRA[48:55] = thresh_ctl (THRESH START/END)
*
* if thresh_sel:
* MMCRA[45:47] = thresh_sel
*
* if l2l3_sel:
* MMCR2[56:60] = l2l3_sel[0:4]
*
* MMCR1[16] = cache_sel[0]
* MMCR1[17] = cache_sel[1]
* MMCR1[18] = radix_scope_qual
*
* if mark:
* MMCRA[63] = 1 (SAMPLE_ENABLE)
* MMCRA[57:59] = sample[0:2] (RAND_SAMP_ELIG)
* MMCRA[61:62] = sample[3:4] (RAND_SAMP_MODE)
*
* if EBB and BHRB:
* MMCRA[32:33] = IFM
*
* MMCRA[SDAR_MODE] = sdar_mode[0:1]
*/
/*
* Some power10 event codes.
*/
#define EVENT(_name, _code) enum{_name = _code}
#include "power10-events-list.h"
#undef EVENT
/* MMCRA IFM bits - POWER10 */
#define POWER10_MMCRA_IFM1 0x0000000040000000UL
#define POWER10_MMCRA_IFM2 0x0000000080000000UL
#define POWER10_MMCRA_IFM3 0x00000000C0000000UL
#define POWER10_MMCRA_BHRB_MASK 0x00000000C0000000UL
extern u64 PERF_REG_EXTENDED_MASK;
/* Table of alternatives, sorted by column 0 */
static const unsigned int power10_event_alternatives[][MAX_ALT] = {
{ PM_INST_CMPL_ALT, PM_INST_CMPL },
{ PM_CYC_ALT, PM_CYC },
};
static int power10_get_alternatives(u64 event, unsigned int flags, u64 alt[])
{
int num_alt = 0;
num_alt = isa207_get_alternatives(event, alt,
ARRAY_SIZE(power10_event_alternatives), flags,
power10_event_alternatives);
return num_alt;
}
static int power10_check_attr_config(struct perf_event *ev)
{
u64 val;
u64 event = ev->attr.config;
val = (event >> EVENT_SAMPLE_SHIFT) & EVENT_SAMPLE_MASK;
if (val == 0x10 || isa3XX_check_attr_config(ev))
return -EINVAL;
return 0;
}
GENERIC_EVENT_ATTR(cpu-cycles, PM_CYC);
GENERIC_EVENT_ATTR(instructions, PM_INST_CMPL);
GENERIC_EVENT_ATTR(branch-instructions, PM_BR_CMPL);
GENERIC_EVENT_ATTR(branch-misses, PM_BR_MPRED_CMPL);
GENERIC_EVENT_ATTR(cache-references, PM_LD_REF_L1);
GENERIC_EVENT_ATTR(cache-misses, PM_LD_MISS_L1);
GENERIC_EVENT_ATTR(mem-loads, MEM_LOADS);
GENERIC_EVENT_ATTR(mem-stores, MEM_STORES);
GENERIC_EVENT_ATTR(branch-instructions, PM_BR_FIN);
GENERIC_EVENT_ATTR(branch-misses, PM_MPRED_BR_FIN);
GENERIC_EVENT_ATTR(cache-misses, PM_LD_DEMAND_MISS_L1_FIN);
CACHE_EVENT_ATTR(L1-dcache-load-misses, PM_LD_MISS_L1);
CACHE_EVENT_ATTR(L1-dcache-loads, PM_LD_REF_L1);
CACHE_EVENT_ATTR(L1-dcache-prefetches, PM_LD_PREFETCH_CACHE_LINE_MISS);
CACHE_EVENT_ATTR(L1-dcache-store-misses, PM_ST_MISS_L1);
CACHE_EVENT_ATTR(L1-icache-load-misses, PM_L1_ICACHE_MISS);
CACHE_EVENT_ATTR(L1-icache-loads, PM_INST_FROM_L1);
CACHE_EVENT_ATTR(L1-icache-prefetches, PM_IC_PREF_REQ);
CACHE_EVENT_ATTR(LLC-load-misses, PM_DATA_FROM_L3MISS);
CACHE_EVENT_ATTR(LLC-loads, PM_DATA_FROM_L3);
CACHE_EVENT_ATTR(LLC-prefetches, PM_L3_PF_MISS_L3);
CACHE_EVENT_ATTR(LLC-store-misses, PM_L2_ST_MISS);
CACHE_EVENT_ATTR(LLC-stores, PM_L2_ST);
CACHE_EVENT_ATTR(branch-load-misses, PM_BR_MPRED_CMPL);
CACHE_EVENT_ATTR(branch-loads, PM_BR_CMPL);
CACHE_EVENT_ATTR(dTLB-load-misses, PM_DTLB_MISS);
CACHE_EVENT_ATTR(iTLB-load-misses, PM_ITLB_MISS);
static struct attribute *power10_events_attr_dd1[] = {
GENERIC_EVENT_PTR(PM_CYC),
GENERIC_EVENT_PTR(PM_INST_CMPL),
GENERIC_EVENT_PTR(PM_BR_CMPL),
GENERIC_EVENT_PTR(PM_BR_MPRED_CMPL),
GENERIC_EVENT_PTR(PM_LD_REF_L1),
GENERIC_EVENT_PTR(PM_LD_MISS_L1),
GENERIC_EVENT_PTR(MEM_LOADS),
GENERIC_EVENT_PTR(MEM_STORES),
CACHE_EVENT_PTR(PM_LD_MISS_L1),
CACHE_EVENT_PTR(PM_LD_REF_L1),
CACHE_EVENT_PTR(PM_LD_PREFETCH_CACHE_LINE_MISS),
CACHE_EVENT_PTR(PM_ST_MISS_L1),
CACHE_EVENT_PTR(PM_L1_ICACHE_MISS),
CACHE_EVENT_PTR(PM_INST_FROM_L1),
CACHE_EVENT_PTR(PM_IC_PREF_REQ),
CACHE_EVENT_PTR(PM_DATA_FROM_L3MISS),
CACHE_EVENT_PTR(PM_DATA_FROM_L3),
CACHE_EVENT_PTR(PM_BR_MPRED_CMPL),
CACHE_EVENT_PTR(PM_BR_CMPL),
CACHE_EVENT_PTR(PM_DTLB_MISS),
CACHE_EVENT_PTR(PM_ITLB_MISS),
NULL
};
static struct attribute *power10_events_attr[] = {
GENERIC_EVENT_PTR(PM_CYC),
GENERIC_EVENT_PTR(PM_INST_CMPL),
GENERIC_EVENT_PTR(PM_BR_FIN),
GENERIC_EVENT_PTR(PM_MPRED_BR_FIN),
GENERIC_EVENT_PTR(PM_LD_REF_L1),
GENERIC_EVENT_PTR(PM_LD_DEMAND_MISS_L1_FIN),
GENERIC_EVENT_PTR(MEM_LOADS),
GENERIC_EVENT_PTR(MEM_STORES),
CACHE_EVENT_PTR(PM_LD_MISS_L1),
CACHE_EVENT_PTR(PM_LD_REF_L1),
CACHE_EVENT_PTR(PM_LD_PREFETCH_CACHE_LINE_MISS),
CACHE_EVENT_PTR(PM_ST_MISS_L1),
CACHE_EVENT_PTR(PM_L1_ICACHE_MISS),
CACHE_EVENT_PTR(PM_INST_FROM_L1),
CACHE_EVENT_PTR(PM_IC_PREF_REQ),
CACHE_EVENT_PTR(PM_DATA_FROM_L3MISS),
CACHE_EVENT_PTR(PM_DATA_FROM_L3),
CACHE_EVENT_PTR(PM_L3_PF_MISS_L3),
CACHE_EVENT_PTR(PM_L2_ST_MISS),
CACHE_EVENT_PTR(PM_L2_ST),
CACHE_EVENT_PTR(PM_BR_MPRED_CMPL),
CACHE_EVENT_PTR(PM_BR_CMPL),
CACHE_EVENT_PTR(PM_DTLB_MISS),
CACHE_EVENT_PTR(PM_ITLB_MISS),
NULL
};
static const struct attribute_group power10_pmu_events_group_dd1 = {
.name = "events",
.attrs = power10_events_attr_dd1,
};
static const struct attribute_group power10_pmu_events_group = {
.name = "events",
.attrs = power10_events_attr,
};
PMU_FORMAT_ATTR(event, "config:0-59");
PMU_FORMAT_ATTR(pmcxsel, "config:0-7");
PMU_FORMAT_ATTR(mark, "config:8");
PMU_FORMAT_ATTR(combine, "config:10-11");
PMU_FORMAT_ATTR(unit, "config:12-15");
PMU_FORMAT_ATTR(pmc, "config:16-19");
PMU_FORMAT_ATTR(cache_sel, "config:20-21");
PMU_FORMAT_ATTR(sdar_mode, "config:22-23");
PMU_FORMAT_ATTR(sample_mode, "config:24-28");
PMU_FORMAT_ATTR(thresh_sel, "config:29-31");
PMU_FORMAT_ATTR(thresh_stop, "config:32-35");
PMU_FORMAT_ATTR(thresh_start, "config:36-39");
PMU_FORMAT_ATTR(l2l3_sel, "config:40-44");
PMU_FORMAT_ATTR(src_sel, "config:45-46");
PMU_FORMAT_ATTR(invert_bit, "config:47");
PMU_FORMAT_ATTR(src_mask, "config:48-53");
PMU_FORMAT_ATTR(src_match, "config:54-59");
PMU_FORMAT_ATTR(radix_scope, "config:9");
PMU_FORMAT_ATTR(thresh_cmp, "config1:0-17");
static struct attribute *power10_pmu_format_attr[] = {
&format_attr_event.attr,
&format_attr_pmcxsel.attr,
&format_attr_mark.attr,
&format_attr_combine.attr,
&format_attr_unit.attr,
&format_attr_pmc.attr,
&format_attr_cache_sel.attr,
&format_attr_sdar_mode.attr,
&format_attr_sample_mode.attr,
&format_attr_thresh_sel.attr,
&format_attr_thresh_stop.attr,
&format_attr_thresh_start.attr,
&format_attr_l2l3_sel.attr,
&format_attr_src_sel.attr,
&format_attr_invert_bit.attr,
&format_attr_src_mask.attr,
&format_attr_src_match.attr,
&format_attr_radix_scope.attr,
&format_attr_thresh_cmp.attr,
NULL,
};
static const struct attribute_group power10_pmu_format_group = {
.name = "format",
.attrs = power10_pmu_format_attr,
};
static struct attribute *power10_pmu_caps_attrs[] = {
NULL
};
static struct attribute_group power10_pmu_caps_group = {
.name = "caps",
.attrs = power10_pmu_caps_attrs,
};
static const struct attribute_group *power10_pmu_attr_groups_dd1[] = {
&power10_pmu_format_group,
&power10_pmu_events_group_dd1,
&power10_pmu_caps_group,
NULL,
};
static const struct attribute_group *power10_pmu_attr_groups[] = {
&power10_pmu_format_group,
&power10_pmu_events_group,
&power10_pmu_caps_group,
NULL,
};
static int power10_generic_events_dd1[] = {
[PERF_COUNT_HW_CPU_CYCLES] = PM_CYC,
[PERF_COUNT_HW_INSTRUCTIONS] = PM_INST_CMPL,
[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = PM_BR_CMPL,
[PERF_COUNT_HW_BRANCH_MISSES] = PM_BR_MPRED_CMPL,
[PERF_COUNT_HW_CACHE_REFERENCES] = PM_LD_REF_L1,
[PERF_COUNT_HW_CACHE_MISSES] = PM_LD_MISS_L1,
};
static int power10_generic_events[] = {
[PERF_COUNT_HW_CPU_CYCLES] = PM_CYC,
[PERF_COUNT_HW_INSTRUCTIONS] = PM_INST_CMPL,
[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = PM_BR_FIN,
[PERF_COUNT_HW_BRANCH_MISSES] = PM_MPRED_BR_FIN,
[PERF_COUNT_HW_CACHE_REFERENCES] = PM_LD_REF_L1,
[PERF_COUNT_HW_CACHE_MISSES] = PM_LD_DEMAND_MISS_L1_FIN,
};
static u64 power10_bhrb_filter_map(u64 branch_sample_type)
{
u64 pmu_bhrb_filter = 0;
/* BHRB and regular PMU events share the same privilege state
* filter configuration. BHRB is always recorded along with a
* regular PMU event. As the privilege state filter is handled
* in the basic PMC configuration of the accompanying regular
* PMU event, we ignore any separate BHRB specific request.
*/
/* No branch filter requested */
if (branch_sample_type & PERF_SAMPLE_BRANCH_ANY)
return pmu_bhrb_filter;
/* Invalid branch filter options - HW does not support */
if (branch_sample_type & PERF_SAMPLE_BRANCH_ANY_RETURN)
return -1;
if (branch_sample_type & PERF_SAMPLE_BRANCH_IND_CALL) {
pmu_bhrb_filter |= POWER10_MMCRA_IFM2;
return pmu_bhrb_filter;
}
if (branch_sample_type & PERF_SAMPLE_BRANCH_COND) {
pmu_bhrb_filter |= POWER10_MMCRA_IFM3;
return pmu_bhrb_filter;
}
if (branch_sample_type & PERF_SAMPLE_BRANCH_CALL)
return -1;
if (branch_sample_type & PERF_SAMPLE_BRANCH_ANY_CALL) {
pmu_bhrb_filter |= POWER10_MMCRA_IFM1;
return pmu_bhrb_filter;
}
/* Every thing else is unsupported */
return -1;
}
static void power10_config_bhrb(u64 pmu_bhrb_filter)
{
pmu_bhrb_filter &= POWER10_MMCRA_BHRB_MASK;
/* Enable BHRB filter in PMU */
mtspr(SPRN_MMCRA, (mfspr(SPRN_MMCRA) | pmu_bhrb_filter));
}
#define C(x) PERF_COUNT_HW_CACHE_##x
/*
* Table of generalized cache-related events.
* 0 means not supported, -1 means nonsensical, other values
* are event codes.
*/
static u64 power10_cache_events_dd1[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
[C(L1D)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = PM_LD_REF_L1,
[C(RESULT_MISS)] = PM_LD_MISS_L1,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = 0,
[C(RESULT_MISS)] = PM_ST_MISS_L1,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = PM_LD_PREFETCH_CACHE_LINE_MISS,
[C(RESULT_MISS)] = 0,
},
},
[C(L1I)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = PM_INST_FROM_L1,
[C(RESULT_MISS)] = PM_L1_ICACHE_MISS,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = PM_INST_FROM_L1MISS,
[C(RESULT_MISS)] = -1,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = PM_IC_PREF_REQ,
[C(RESULT_MISS)] = 0,
},
},
[C(LL)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = PM_DATA_FROM_L3,
[C(RESULT_MISS)] = PM_DATA_FROM_L3MISS,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = -1,
[C(RESULT_MISS)] = -1,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = -1,
[C(RESULT_MISS)] = 0,
},
},
[C(DTLB)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = 0,
[C(RESULT_MISS)] = PM_DTLB_MISS,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = -1,
[C(RESULT_MISS)] = -1,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = -1,
[C(RESULT_MISS)] = -1,
},
},
[C(ITLB)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = 0,
[C(RESULT_MISS)] = PM_ITLB_MISS,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = -1,
[C(RESULT_MISS)] = -1,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = -1,
[C(RESULT_MISS)] = -1,
},
},
[C(BPU)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = PM_BR_CMPL,
[C(RESULT_MISS)] = PM_BR_MPRED_CMPL,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = -1,
[C(RESULT_MISS)] = -1,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = -1,
[C(RESULT_MISS)] = -1,
},
},
[C(NODE)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = -1,
[C(RESULT_MISS)] = -1,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = -1,
[C(RESULT_MISS)] = -1,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = -1,
[C(RESULT_MISS)] = -1,
},
},
};
static u64 power10_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
[C(L1D)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = PM_LD_REF_L1,
[C(RESULT_MISS)] = PM_LD_MISS_L1,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = 0,
[C(RESULT_MISS)] = PM_ST_MISS_L1,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = PM_LD_PREFETCH_CACHE_LINE_MISS,
[C(RESULT_MISS)] = 0,
},
},
[C(L1I)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = PM_INST_FROM_L1,
[C(RESULT_MISS)] = PM_L1_ICACHE_MISS,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = PM_INST_FROM_L1MISS,
[C(RESULT_MISS)] = -1,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = PM_IC_PREF_REQ,
[C(RESULT_MISS)] = 0,
},
},
[C(LL)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = PM_DATA_FROM_L3,
[C(RESULT_MISS)] = PM_DATA_FROM_L3MISS,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = PM_L2_ST,
[C(RESULT_MISS)] = PM_L2_ST_MISS,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = PM_L3_PF_MISS_L3,
[C(RESULT_MISS)] = 0,
},
},
[C(DTLB)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = 0,
[C(RESULT_MISS)] = PM_DTLB_MISS,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = -1,
[C(RESULT_MISS)] = -1,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = -1,
[C(RESULT_MISS)] = -1,
},
},
[C(ITLB)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = 0,
[C(RESULT_MISS)] = PM_ITLB_MISS,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = -1,
[C(RESULT_MISS)] = -1,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = -1,
[C(RESULT_MISS)] = -1,
},
},
[C(BPU)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = PM_BR_CMPL,
[C(RESULT_MISS)] = PM_BR_MPRED_CMPL,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = -1,
[C(RESULT_MISS)] = -1,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = -1,
[C(RESULT_MISS)] = -1,
},
},
[C(NODE)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = -1,
[C(RESULT_MISS)] = -1,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = -1,
[C(RESULT_MISS)] = -1,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = -1,
[C(RESULT_MISS)] = -1,
},
},
};
#undef C
/*
* Set the MMCR0[CC56RUN] bit to enable counting for
* PMC5 and PMC6 regardless of the state of CTRL[RUN],
* so that we can use counters 5 and 6 as PM_INST_CMPL and
* PM_CYC.
*/
static int power10_compute_mmcr(u64 event[], int n_ev,
unsigned int hwc[], struct mmcr_regs *mmcr,
struct perf_event *pevents[], u32 flags)
{
int ret;
ret = isa207_compute_mmcr(event, n_ev, hwc, mmcr, pevents, flags);
if (!ret)
mmcr->mmcr0 |= MMCR0_C56RUN;
return ret;
}
static struct power_pmu power10_pmu = {
.name = "POWER10",
.n_counter = MAX_PMU_COUNTERS,
.add_fields = ISA207_ADD_FIELDS,
.test_adder = ISA207_TEST_ADDER,
.group_constraint_mask = CNST_CACHE_PMC4_MASK,
.group_constraint_val = CNST_CACHE_PMC4_VAL,
.compute_mmcr = power10_compute_mmcr,
.config_bhrb = power10_config_bhrb,
.bhrb_filter_map = power10_bhrb_filter_map,
.get_constraint = isa207_get_constraint,
.get_alternatives = power10_get_alternatives,
.get_mem_data_src = isa207_get_mem_data_src,
.get_mem_weight = isa207_get_mem_weight,
.disable_pmc = isa207_disable_pmc,
.flags = PPMU_HAS_SIER | PPMU_ARCH_207S |
PPMU_ARCH_31 | PPMU_HAS_ATTR_CONFIG1,
.n_generic = ARRAY_SIZE(power10_generic_events),
.generic_events = power10_generic_events,
.cache_events = &power10_cache_events,
.attr_groups = power10_pmu_attr_groups,
.bhrb_nr = 32,
.capabilities = PERF_PMU_CAP_EXTENDED_REGS,
.check_attr_config = power10_check_attr_config,
};
int __init init_power10_pmu(void)
{
unsigned int pvr;
int rc;
pvr = mfspr(SPRN_PVR);
if (PVR_VER(pvr) != PVR_POWER10)
return -ENODEV;
/* Add the ppmu flag for power10 DD1 */
if ((PVR_CFG(pvr) == 1))
power10_pmu.flags |= PPMU_P10_DD1;
/* Set the PERF_REG_EXTENDED_MASK here */
PERF_REG_EXTENDED_MASK = PERF_REG_PMU_MASK_31;
if ((PVR_CFG(pvr) == 1)) {
power10_pmu.generic_events = power10_generic_events_dd1;
power10_pmu.attr_groups = power10_pmu_attr_groups_dd1;
power10_pmu.cache_events = &power10_cache_events_dd1;
}
rc = register_power_pmu(&power10_pmu);
if (rc)
return rc;
/* Tell userspace that EBB is supported */
cur_cpu_spec->cpu_user_features2 |= PPC_FEATURE2_EBB;
return 0;
}
| linux-master | arch/powerpc/perf/power10-pmu.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* In-Memory Collection (IMC) Performance Monitor counter support.
*
* Copyright (C) 2017 Madhavan Srinivasan, IBM Corporation.
* (C) 2017 Anju T Sudhakar, IBM Corporation.
* (C) 2017 Hemant K Shaw, IBM Corporation.
*/
#include <linux/of.h>
#include <linux/perf_event.h>
#include <linux/slab.h>
#include <asm/opal.h>
#include <asm/imc-pmu.h>
#include <asm/cputhreads.h>
#include <asm/smp.h>
#include <linux/string.h>
#include <linux/spinlock.h>
/* Nest IMC data structures and variables */
/*
* Used to avoid races in counting the nest-pmu units during hotplug
* register and unregister
*/
static DEFINE_MUTEX(nest_init_lock);
static DEFINE_PER_CPU(struct imc_pmu_ref *, local_nest_imc_refc);
static struct imc_pmu **per_nest_pmu_arr;
static cpumask_t nest_imc_cpumask;
static struct imc_pmu_ref *nest_imc_refc;
static int nest_pmus;
/* Core IMC data structures and variables */
static cpumask_t core_imc_cpumask;
static struct imc_pmu_ref *core_imc_refc;
static struct imc_pmu *core_imc_pmu;
/* Thread IMC data structures and variables */
static DEFINE_PER_CPU(u64 *, thread_imc_mem);
static struct imc_pmu *thread_imc_pmu;
static int thread_imc_mem_size;
/* Trace IMC data structures */
static DEFINE_PER_CPU(u64 *, trace_imc_mem);
static struct imc_pmu_ref *trace_imc_refc;
static int trace_imc_mem_size;
/*
* Global data structure used to avoid races between thread,
* core and trace-imc
*/
static struct imc_pmu_ref imc_global_refc = {
.lock = __SPIN_LOCK_INITIALIZER(imc_global_refc.lock),
.id = 0,
.refc = 0,
};
static struct imc_pmu *imc_event_to_pmu(struct perf_event *event)
{
return container_of(event->pmu, struct imc_pmu, pmu);
}
PMU_FORMAT_ATTR(event, "config:0-61");
PMU_FORMAT_ATTR(offset, "config:0-31");
PMU_FORMAT_ATTR(rvalue, "config:32");
PMU_FORMAT_ATTR(mode, "config:33-40");
static struct attribute *imc_format_attrs[] = {
&format_attr_event.attr,
&format_attr_offset.attr,
&format_attr_rvalue.attr,
&format_attr_mode.attr,
NULL,
};
static const struct attribute_group imc_format_group = {
.name = "format",
.attrs = imc_format_attrs,
};
/* Format attribute for imc trace-mode */
PMU_FORMAT_ATTR(cpmc_reserved, "config:0-19");
PMU_FORMAT_ATTR(cpmc_event, "config:20-27");
PMU_FORMAT_ATTR(cpmc_samplesel, "config:28-29");
PMU_FORMAT_ATTR(cpmc_load, "config:30-61");
static struct attribute *trace_imc_format_attrs[] = {
&format_attr_event.attr,
&format_attr_cpmc_reserved.attr,
&format_attr_cpmc_event.attr,
&format_attr_cpmc_samplesel.attr,
&format_attr_cpmc_load.attr,
NULL,
};
static const struct attribute_group trace_imc_format_group = {
.name = "format",
.attrs = trace_imc_format_attrs,
};
/* Get the cpumask printed to a buffer "buf" */
static ssize_t imc_pmu_cpumask_get_attr(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct pmu *pmu = dev_get_drvdata(dev);
struct imc_pmu *imc_pmu = container_of(pmu, struct imc_pmu, pmu);
cpumask_t *active_mask;
switch(imc_pmu->domain){
case IMC_DOMAIN_NEST:
active_mask = &nest_imc_cpumask;
break;
case IMC_DOMAIN_CORE:
active_mask = &core_imc_cpumask;
break;
default:
return 0;
}
return cpumap_print_to_pagebuf(true, buf, active_mask);
}
static DEVICE_ATTR(cpumask, S_IRUGO, imc_pmu_cpumask_get_attr, NULL);
static struct attribute *imc_pmu_cpumask_attrs[] = {
&dev_attr_cpumask.attr,
NULL,
};
static const struct attribute_group imc_pmu_cpumask_attr_group = {
.attrs = imc_pmu_cpumask_attrs,
};
/* device_str_attr_create : Populate event "name" and string "str" in attribute */
static struct attribute *device_str_attr_create(const char *name, const char *str)
{
struct perf_pmu_events_attr *attr;
attr = kzalloc(sizeof(*attr), GFP_KERNEL);
if (!attr)
return NULL;
sysfs_attr_init(&attr->attr.attr);
attr->event_str = str;
attr->attr.attr.name = name;
attr->attr.attr.mode = 0444;
attr->attr.show = perf_event_sysfs_show;
return &attr->attr.attr;
}
static int imc_parse_event(struct device_node *np, const char *scale,
const char *unit, const char *prefix,
u32 base, struct imc_events *event)
{
const char *s;
u32 reg;
if (of_property_read_u32(np, "reg", ®))
goto error;
/* Add the base_reg value to the "reg" */
event->value = base + reg;
if (of_property_read_string(np, "event-name", &s))
goto error;
event->name = kasprintf(GFP_KERNEL, "%s%s", prefix, s);
if (!event->name)
goto error;
if (of_property_read_string(np, "scale", &s))
s = scale;
if (s) {
event->scale = kstrdup(s, GFP_KERNEL);
if (!event->scale)
goto error;
}
if (of_property_read_string(np, "unit", &s))
s = unit;
if (s) {
event->unit = kstrdup(s, GFP_KERNEL);
if (!event->unit)
goto error;
}
return 0;
error:
kfree(event->unit);
kfree(event->scale);
kfree(event->name);
return -EINVAL;
}
/*
* imc_free_events: Function to cleanup the events list, having
* "nr_entries".
*/
static void imc_free_events(struct imc_events *events, int nr_entries)
{
int i;
/* Nothing to clean, return */
if (!events)
return;
for (i = 0; i < nr_entries; i++) {
kfree(events[i].unit);
kfree(events[i].scale);
kfree(events[i].name);
}
kfree(events);
}
/*
* update_events_in_group: Update the "events" information in an attr_group
* and assign the attr_group to the pmu "pmu".
*/
static int update_events_in_group(struct device_node *node, struct imc_pmu *pmu)
{
struct attribute_group *attr_group;
struct attribute **attrs, *dev_str;
struct device_node *np, *pmu_events;
u32 handle, base_reg;
int i = 0, j = 0, ct, ret;
const char *prefix, *g_scale, *g_unit;
const char *ev_val_str, *ev_scale_str, *ev_unit_str;
if (!of_property_read_u32(node, "events", &handle))
pmu_events = of_find_node_by_phandle(handle);
else
return 0;
/* Did not find any node with a given phandle */
if (!pmu_events)
return 0;
/* Get a count of number of child nodes */
ct = of_get_child_count(pmu_events);
/* Get the event prefix */
if (of_property_read_string(node, "events-prefix", &prefix)) {
of_node_put(pmu_events);
return 0;
}
/* Get a global unit and scale data if available */
if (of_property_read_string(node, "scale", &g_scale))
g_scale = NULL;
if (of_property_read_string(node, "unit", &g_unit))
g_unit = NULL;
/* "reg" property gives out the base offset of the counters data */
of_property_read_u32(node, "reg", &base_reg);
/* Allocate memory for the events */
pmu->events = kcalloc(ct, sizeof(struct imc_events), GFP_KERNEL);
if (!pmu->events) {
of_node_put(pmu_events);
return -ENOMEM;
}
ct = 0;
/* Parse the events and update the struct */
for_each_child_of_node(pmu_events, np) {
ret = imc_parse_event(np, g_scale, g_unit, prefix, base_reg, &pmu->events[ct]);
if (!ret)
ct++;
}
of_node_put(pmu_events);
/* Allocate memory for attribute group */
attr_group = kzalloc(sizeof(*attr_group), GFP_KERNEL);
if (!attr_group) {
imc_free_events(pmu->events, ct);
return -ENOMEM;
}
/*
* Allocate memory for attributes.
* Since we have count of events for this pmu, we also allocate
* memory for the scale and unit attribute for now.
* "ct" has the total event structs added from the events-parent node.
* So allocate three times the "ct" (this includes event, event_scale and
* event_unit).
*/
attrs = kcalloc(((ct * 3) + 1), sizeof(struct attribute *), GFP_KERNEL);
if (!attrs) {
kfree(attr_group);
imc_free_events(pmu->events, ct);
return -ENOMEM;
}
attr_group->name = "events";
attr_group->attrs = attrs;
do {
ev_val_str = kasprintf(GFP_KERNEL, "event=0x%x", pmu->events[i].value);
dev_str = device_str_attr_create(pmu->events[i].name, ev_val_str);
if (!dev_str)
continue;
attrs[j++] = dev_str;
if (pmu->events[i].scale) {
ev_scale_str = kasprintf(GFP_KERNEL, "%s.scale", pmu->events[i].name);
dev_str = device_str_attr_create(ev_scale_str, pmu->events[i].scale);
if (!dev_str)
continue;
attrs[j++] = dev_str;
}
if (pmu->events[i].unit) {
ev_unit_str = kasprintf(GFP_KERNEL, "%s.unit", pmu->events[i].name);
dev_str = device_str_attr_create(ev_unit_str, pmu->events[i].unit);
if (!dev_str)
continue;
attrs[j++] = dev_str;
}
} while (++i < ct);
/* Save the event attribute */
pmu->attr_groups[IMC_EVENT_ATTR] = attr_group;
return 0;
}
/* get_nest_pmu_ref: Return the imc_pmu_ref struct for the given node */
static struct imc_pmu_ref *get_nest_pmu_ref(int cpu)
{
return per_cpu(local_nest_imc_refc, cpu);
}
static void nest_change_cpu_context(int old_cpu, int new_cpu)
{
struct imc_pmu **pn = per_nest_pmu_arr;
if (old_cpu < 0 || new_cpu < 0)
return;
while (*pn) {
perf_pmu_migrate_context(&(*pn)->pmu, old_cpu, new_cpu);
pn++;
}
}
static int ppc_nest_imc_cpu_offline(unsigned int cpu)
{
int nid, target = -1;
const struct cpumask *l_cpumask;
struct imc_pmu_ref *ref;
/*
* Check in the designated list for this cpu. Dont bother
* if not one of them.
*/
if (!cpumask_test_and_clear_cpu(cpu, &nest_imc_cpumask))
return 0;
/*
* Check whether nest_imc is registered. We could end up here if the
* cpuhotplug callback registration fails. i.e, callback invokes the
* offline path for all successfully registered nodes. At this stage,
* nest_imc pmu will not be registered and we should return here.
*
* We return with a zero since this is not an offline failure. And
* cpuhp_setup_state() returns the actual failure reason to the caller,
* which in turn will call the cleanup routine.
*/
if (!nest_pmus)
return 0;
/*
* Now that this cpu is one of the designated,
* find a next cpu a) which is online and b) in same chip.
*/
nid = cpu_to_node(cpu);
l_cpumask = cpumask_of_node(nid);
target = cpumask_last(l_cpumask);
/*
* If this(target) is the last cpu in the cpumask for this chip,
* check for any possible online cpu in the chip.
*/
if (unlikely(target == cpu))
target = cpumask_any_but(l_cpumask, cpu);
/*
* Update the cpumask with the target cpu and
* migrate the context if needed
*/
if (target >= 0 && target < nr_cpu_ids) {
cpumask_set_cpu(target, &nest_imc_cpumask);
nest_change_cpu_context(cpu, target);
} else {
opal_imc_counters_stop(OPAL_IMC_COUNTERS_NEST,
get_hard_smp_processor_id(cpu));
/*
* If this is the last cpu in this chip then, skip the reference
* count lock and make the reference count on this chip zero.
*/
ref = get_nest_pmu_ref(cpu);
if (!ref)
return -EINVAL;
ref->refc = 0;
}
return 0;
}
static int ppc_nest_imc_cpu_online(unsigned int cpu)
{
const struct cpumask *l_cpumask;
static struct cpumask tmp_mask;
int res;
/* Get the cpumask of this node */
l_cpumask = cpumask_of_node(cpu_to_node(cpu));
/*
* If this is not the first online CPU on this node, then
* just return.
*/
if (cpumask_and(&tmp_mask, l_cpumask, &nest_imc_cpumask))
return 0;
/*
* If this is the first online cpu on this node
* disable the nest counters by making an OPAL call.
*/
res = opal_imc_counters_stop(OPAL_IMC_COUNTERS_NEST,
get_hard_smp_processor_id(cpu));
if (res)
return res;
/* Make this CPU the designated target for counter collection */
cpumask_set_cpu(cpu, &nest_imc_cpumask);
return 0;
}
static int nest_pmu_cpumask_init(void)
{
return cpuhp_setup_state(CPUHP_AP_PERF_POWERPC_NEST_IMC_ONLINE,
"perf/powerpc/imc:online",
ppc_nest_imc_cpu_online,
ppc_nest_imc_cpu_offline);
}
static void nest_imc_counters_release(struct perf_event *event)
{
int rc, node_id;
struct imc_pmu_ref *ref;
if (event->cpu < 0)
return;
node_id = cpu_to_node(event->cpu);
/*
* See if we need to disable the nest PMU.
* If no events are currently in use, then we have to take a
* lock to ensure that we don't race with another task doing
* enable or disable the nest counters.
*/
ref = get_nest_pmu_ref(event->cpu);
if (!ref)
return;
/* Take the lock for this node and then decrement the reference count */
spin_lock(&ref->lock);
if (ref->refc == 0) {
/*
* The scenario where this is true is, when perf session is
* started, followed by offlining of all cpus in a given node.
*
* In the cpuhotplug offline path, ppc_nest_imc_cpu_offline()
* function set the ref->count to zero, if the cpu which is
* about to offline is the last cpu in a given node and make
* an OPAL call to disable the engine in that node.
*
*/
spin_unlock(&ref->lock);
return;
}
ref->refc--;
if (ref->refc == 0) {
rc = opal_imc_counters_stop(OPAL_IMC_COUNTERS_NEST,
get_hard_smp_processor_id(event->cpu));
if (rc) {
spin_unlock(&ref->lock);
pr_err("nest-imc: Unable to stop the counters for core %d\n", node_id);
return;
}
} else if (ref->refc < 0) {
WARN(1, "nest-imc: Invalid event reference count\n");
ref->refc = 0;
}
spin_unlock(&ref->lock);
}
static int nest_imc_event_init(struct perf_event *event)
{
int chip_id, rc, node_id;
u32 l_config, config = event->attr.config;
struct imc_mem_info *pcni;
struct imc_pmu *pmu;
struct imc_pmu_ref *ref;
bool flag = false;
if (event->attr.type != event->pmu->type)
return -ENOENT;
/* Sampling not supported */
if (event->hw.sample_period)
return -EINVAL;
if (event->cpu < 0)
return -EINVAL;
pmu = imc_event_to_pmu(event);
/* Sanity check for config (event offset) */
if ((config & IMC_EVENT_OFFSET_MASK) > pmu->counter_mem_size)
return -EINVAL;
/*
* Nest HW counter memory resides in a per-chip reserve-memory (HOMER).
* Get the base memory address for this cpu.
*/
chip_id = cpu_to_chip_id(event->cpu);
/* Return, if chip_id is not valid */
if (chip_id < 0)
return -ENODEV;
pcni = pmu->mem_info;
do {
if (pcni->id == chip_id) {
flag = true;
break;
}
pcni++;
} while (pcni->vbase != 0);
if (!flag)
return -ENODEV;
/*
* Add the event offset to the base address.
*/
l_config = config & IMC_EVENT_OFFSET_MASK;
event->hw.event_base = (u64)pcni->vbase + l_config;
node_id = cpu_to_node(event->cpu);
/*
* Get the imc_pmu_ref struct for this node.
* Take the lock and then increment the count of nest pmu events inited.
*/
ref = get_nest_pmu_ref(event->cpu);
if (!ref)
return -EINVAL;
spin_lock(&ref->lock);
if (ref->refc == 0) {
rc = opal_imc_counters_start(OPAL_IMC_COUNTERS_NEST,
get_hard_smp_processor_id(event->cpu));
if (rc) {
spin_unlock(&ref->lock);
pr_err("nest-imc: Unable to start the counters for node %d\n",
node_id);
return rc;
}
}
++ref->refc;
spin_unlock(&ref->lock);
event->destroy = nest_imc_counters_release;
return 0;
}
/*
* core_imc_mem_init : Initializes memory for the current core.
*
* Uses alloc_pages_node() and uses the returned address as an argument to
* an opal call to configure the pdbar. The address sent as an argument is
* converted to physical address before the opal call is made. This is the
* base address at which the core imc counters are populated.
*/
static int core_imc_mem_init(int cpu, int size)
{
int nid, rc = 0, core_id = (cpu / threads_per_core);
struct imc_mem_info *mem_info;
struct page *page;
/*
* alloc_pages_node() will allocate memory for core in the
* local node only.
*/
nid = cpu_to_node(cpu);
mem_info = &core_imc_pmu->mem_info[core_id];
mem_info->id = core_id;
/* We need only vbase for core counters */
page = alloc_pages_node(nid,
GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE |
__GFP_NOWARN, get_order(size));
if (!page)
return -ENOMEM;
mem_info->vbase = page_address(page);
core_imc_refc[core_id].id = core_id;
spin_lock_init(&core_imc_refc[core_id].lock);
rc = opal_imc_counters_init(OPAL_IMC_COUNTERS_CORE,
__pa((void *)mem_info->vbase),
get_hard_smp_processor_id(cpu));
if (rc) {
free_pages((u64)mem_info->vbase, get_order(size));
mem_info->vbase = NULL;
}
return rc;
}
static bool is_core_imc_mem_inited(int cpu)
{
struct imc_mem_info *mem_info;
int core_id = (cpu / threads_per_core);
mem_info = &core_imc_pmu->mem_info[core_id];
if (!mem_info->vbase)
return false;
return true;
}
static int ppc_core_imc_cpu_online(unsigned int cpu)
{
const struct cpumask *l_cpumask;
static struct cpumask tmp_mask;
int ret = 0;
/* Get the cpumask for this core */
l_cpumask = cpu_sibling_mask(cpu);
/* If a cpu for this core is already set, then, don't do anything */
if (cpumask_and(&tmp_mask, l_cpumask, &core_imc_cpumask))
return 0;
if (!is_core_imc_mem_inited(cpu)) {
ret = core_imc_mem_init(cpu, core_imc_pmu->counter_mem_size);
if (ret) {
pr_info("core_imc memory allocation for cpu %d failed\n", cpu);
return ret;
}
}
/* set the cpu in the mask */
cpumask_set_cpu(cpu, &core_imc_cpumask);
return 0;
}
static int ppc_core_imc_cpu_offline(unsigned int cpu)
{
unsigned int core_id;
int ncpu;
struct imc_pmu_ref *ref;
/*
* clear this cpu out of the mask, if not present in the mask,
* don't bother doing anything.
*/
if (!cpumask_test_and_clear_cpu(cpu, &core_imc_cpumask))
return 0;
/*
* Check whether core_imc is registered. We could end up here
* if the cpuhotplug callback registration fails. i.e, callback
* invokes the offline path for all successfully registered cpus.
* At this stage, core_imc pmu will not be registered and we
* should return here.
*
* We return with a zero since this is not an offline failure.
* And cpuhp_setup_state() returns the actual failure reason
* to the caller, which inturn will call the cleanup routine.
*/
if (!core_imc_pmu->pmu.event_init)
return 0;
/* Find any online cpu in that core except the current "cpu" */
ncpu = cpumask_last(cpu_sibling_mask(cpu));
if (unlikely(ncpu == cpu))
ncpu = cpumask_any_but(cpu_sibling_mask(cpu), cpu);
if (ncpu >= 0 && ncpu < nr_cpu_ids) {
cpumask_set_cpu(ncpu, &core_imc_cpumask);
perf_pmu_migrate_context(&core_imc_pmu->pmu, cpu, ncpu);
} else {
/*
* If this is the last cpu in this core then skip taking reference
* count lock for this core and directly zero "refc" for this core.
*/
opal_imc_counters_stop(OPAL_IMC_COUNTERS_CORE,
get_hard_smp_processor_id(cpu));
core_id = cpu / threads_per_core;
ref = &core_imc_refc[core_id];
if (!ref)
return -EINVAL;
ref->refc = 0;
/*
* Reduce the global reference count, if this is the
* last cpu in this core and core-imc event running
* in this cpu.
*/
spin_lock(&imc_global_refc.lock);
if (imc_global_refc.id == IMC_DOMAIN_CORE)
imc_global_refc.refc--;
spin_unlock(&imc_global_refc.lock);
}
return 0;
}
static int core_imc_pmu_cpumask_init(void)
{
return cpuhp_setup_state(CPUHP_AP_PERF_POWERPC_CORE_IMC_ONLINE,
"perf/powerpc/imc_core:online",
ppc_core_imc_cpu_online,
ppc_core_imc_cpu_offline);
}
static void reset_global_refc(struct perf_event *event)
{
spin_lock(&imc_global_refc.lock);
imc_global_refc.refc--;
/*
* If no other thread is running any
* event for this domain(thread/core/trace),
* set the global id to zero.
*/
if (imc_global_refc.refc <= 0) {
imc_global_refc.refc = 0;
imc_global_refc.id = 0;
}
spin_unlock(&imc_global_refc.lock);
}
static void core_imc_counters_release(struct perf_event *event)
{
int rc, core_id;
struct imc_pmu_ref *ref;
if (event->cpu < 0)
return;
/*
* See if we need to disable the IMC PMU.
* If no events are currently in use, then we have to take a
* lock to ensure that we don't race with another task doing
* enable or disable the core counters.
*/
core_id = event->cpu / threads_per_core;
/* Take the lock and decrement the refernce count for this core */
ref = &core_imc_refc[core_id];
if (!ref)
return;
spin_lock(&ref->lock);
if (ref->refc == 0) {
/*
* The scenario where this is true is, when perf session is
* started, followed by offlining of all cpus in a given core.
*
* In the cpuhotplug offline path, ppc_core_imc_cpu_offline()
* function set the ref->count to zero, if the cpu which is
* about to offline is the last cpu in a given core and make
* an OPAL call to disable the engine in that core.
*
*/
spin_unlock(&ref->lock);
return;
}
ref->refc--;
if (ref->refc == 0) {
rc = opal_imc_counters_stop(OPAL_IMC_COUNTERS_CORE,
get_hard_smp_processor_id(event->cpu));
if (rc) {
spin_unlock(&ref->lock);
pr_err("IMC: Unable to stop the counters for core %d\n", core_id);
return;
}
} else if (ref->refc < 0) {
WARN(1, "core-imc: Invalid event reference count\n");
ref->refc = 0;
}
spin_unlock(&ref->lock);
reset_global_refc(event);
}
static int core_imc_event_init(struct perf_event *event)
{
int core_id, rc;
u64 config = event->attr.config;
struct imc_mem_info *pcmi;
struct imc_pmu *pmu;
struct imc_pmu_ref *ref;
if (event->attr.type != event->pmu->type)
return -ENOENT;
/* Sampling not supported */
if (event->hw.sample_period)
return -EINVAL;
if (event->cpu < 0)
return -EINVAL;
event->hw.idx = -1;
pmu = imc_event_to_pmu(event);
/* Sanity check for config (event offset) */
if (((config & IMC_EVENT_OFFSET_MASK) > pmu->counter_mem_size))
return -EINVAL;
if (!is_core_imc_mem_inited(event->cpu))
return -ENODEV;
core_id = event->cpu / threads_per_core;
pcmi = &core_imc_pmu->mem_info[core_id];
if ((!pcmi->vbase))
return -ENODEV;
ref = &core_imc_refc[core_id];
if (!ref)
return -EINVAL;
/*
* Core pmu units are enabled only when it is used.
* See if this is triggered for the first time.
* If yes, take the lock and enable the core counters.
* If not, just increment the count in core_imc_refc struct.
*/
spin_lock(&ref->lock);
if (ref->refc == 0) {
rc = opal_imc_counters_start(OPAL_IMC_COUNTERS_CORE,
get_hard_smp_processor_id(event->cpu));
if (rc) {
spin_unlock(&ref->lock);
pr_err("core-imc: Unable to start the counters for core %d\n",
core_id);
return rc;
}
}
++ref->refc;
spin_unlock(&ref->lock);
/*
* Since the system can run either in accumulation or trace-mode
* of IMC at a time, core-imc events are allowed only if no other
* trace/thread imc events are enabled/monitored.
*
* Take the global lock, and check the refc.id
* to know whether any other trace/thread imc
* events are running.
*/
spin_lock(&imc_global_refc.lock);
if (imc_global_refc.id == 0 || imc_global_refc.id == IMC_DOMAIN_CORE) {
/*
* No other trace/thread imc events are running in
* the system, so set the refc.id to core-imc.
*/
imc_global_refc.id = IMC_DOMAIN_CORE;
imc_global_refc.refc++;
} else {
spin_unlock(&imc_global_refc.lock);
return -EBUSY;
}
spin_unlock(&imc_global_refc.lock);
event->hw.event_base = (u64)pcmi->vbase + (config & IMC_EVENT_OFFSET_MASK);
event->destroy = core_imc_counters_release;
return 0;
}
/*
* Allocates a page of memory for each of the online cpus, and load
* LDBAR with 0.
* The physical base address of the page allocated for a cpu will be
* written to the LDBAR for that cpu, when the thread-imc event
* is added.
*
* LDBAR Register Layout:
*
* 0 4 8 12 16 20 24 28
* | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - |
* | | [ ] [ Counter Address [8:50]
* | * Mode |
* | * PB Scope
* * Enable/Disable
*
* 32 36 40 44 48 52 56 60
* | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - |
* Counter Address [8:50] ]
*
*/
static int thread_imc_mem_alloc(int cpu_id, int size)
{
u64 *local_mem = per_cpu(thread_imc_mem, cpu_id);
int nid = cpu_to_node(cpu_id);
if (!local_mem) {
struct page *page;
/*
* This case could happen only once at start, since we dont
* free the memory in cpu offline path.
*/
page = alloc_pages_node(nid,
GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE |
__GFP_NOWARN, get_order(size));
if (!page)
return -ENOMEM;
local_mem = page_address(page);
per_cpu(thread_imc_mem, cpu_id) = local_mem;
}
mtspr(SPRN_LDBAR, 0);
return 0;
}
static int ppc_thread_imc_cpu_online(unsigned int cpu)
{
return thread_imc_mem_alloc(cpu, thread_imc_mem_size);
}
static int ppc_thread_imc_cpu_offline(unsigned int cpu)
{
/*
* Set the bit 0 of LDBAR to zero.
*
* If bit 0 of LDBAR is unset, it will stop posting
* the counter data to memory.
* For thread-imc, bit 0 of LDBAR will be set to 1 in the
* event_add function. So reset this bit here, to stop the updates
* to memory in the cpu_offline path.
*/
mtspr(SPRN_LDBAR, (mfspr(SPRN_LDBAR) & (~(1UL << 63))));
/* Reduce the refc if thread-imc event running on this cpu */
spin_lock(&imc_global_refc.lock);
if (imc_global_refc.id == IMC_DOMAIN_THREAD)
imc_global_refc.refc--;
spin_unlock(&imc_global_refc.lock);
return 0;
}
static int thread_imc_cpu_init(void)
{
return cpuhp_setup_state(CPUHP_AP_PERF_POWERPC_THREAD_IMC_ONLINE,
"perf/powerpc/imc_thread:online",
ppc_thread_imc_cpu_online,
ppc_thread_imc_cpu_offline);
}
static int thread_imc_event_init(struct perf_event *event)
{
u32 config = event->attr.config;
struct task_struct *target;
struct imc_pmu *pmu;
if (event->attr.type != event->pmu->type)
return -ENOENT;
if (!perfmon_capable())
return -EACCES;
/* Sampling not supported */
if (event->hw.sample_period)
return -EINVAL;
event->hw.idx = -1;
pmu = imc_event_to_pmu(event);
/* Sanity check for config offset */
if (((config & IMC_EVENT_OFFSET_MASK) > pmu->counter_mem_size))
return -EINVAL;
target = event->hw.target;
if (!target)
return -EINVAL;
spin_lock(&imc_global_refc.lock);
/*
* Check if any other trace/core imc events are running in the
* system, if not set the global id to thread-imc.
*/
if (imc_global_refc.id == 0 || imc_global_refc.id == IMC_DOMAIN_THREAD) {
imc_global_refc.id = IMC_DOMAIN_THREAD;
imc_global_refc.refc++;
} else {
spin_unlock(&imc_global_refc.lock);
return -EBUSY;
}
spin_unlock(&imc_global_refc.lock);
event->pmu->task_ctx_nr = perf_sw_context;
event->destroy = reset_global_refc;
return 0;
}
static bool is_thread_imc_pmu(struct perf_event *event)
{
if (!strncmp(event->pmu->name, "thread_imc", strlen("thread_imc")))
return true;
return false;
}
static u64 * get_event_base_addr(struct perf_event *event)
{
u64 addr;
if (is_thread_imc_pmu(event)) {
addr = (u64)per_cpu(thread_imc_mem, smp_processor_id());
return (u64 *)(addr + (event->attr.config & IMC_EVENT_OFFSET_MASK));
}
return (u64 *)event->hw.event_base;
}
static void thread_imc_pmu_start_txn(struct pmu *pmu,
unsigned int txn_flags)
{
if (txn_flags & ~PERF_PMU_TXN_ADD)
return;
perf_pmu_disable(pmu);
}
static void thread_imc_pmu_cancel_txn(struct pmu *pmu)
{
perf_pmu_enable(pmu);
}
static int thread_imc_pmu_commit_txn(struct pmu *pmu)
{
perf_pmu_enable(pmu);
return 0;
}
static u64 imc_read_counter(struct perf_event *event)
{
u64 *addr, data;
/*
* In-Memory Collection (IMC) counters are free flowing counters.
* So we take a snapshot of the counter value on enable and save it
* to calculate the delta at later stage to present the event counter
* value.
*/
addr = get_event_base_addr(event);
data = be64_to_cpu(READ_ONCE(*addr));
local64_set(&event->hw.prev_count, data);
return data;
}
static void imc_event_update(struct perf_event *event)
{
u64 counter_prev, counter_new, final_count;
counter_prev = local64_read(&event->hw.prev_count);
counter_new = imc_read_counter(event);
final_count = counter_new - counter_prev;
/* Update the delta to the event count */
local64_add(final_count, &event->count);
}
static void imc_event_start(struct perf_event *event, int flags)
{
/*
* In Memory Counters are free flowing counters. HW or the microcode
* keeps adding to the counter offset in memory. To get event
* counter value, we snapshot the value here and we calculate
* delta at later point.
*/
imc_read_counter(event);
}
static void imc_event_stop(struct perf_event *event, int flags)
{
/*
* Take a snapshot and calculate the delta and update
* the event counter values.
*/
imc_event_update(event);
}
static int imc_event_add(struct perf_event *event, int flags)
{
if (flags & PERF_EF_START)
imc_event_start(event, flags);
return 0;
}
static int thread_imc_event_add(struct perf_event *event, int flags)
{
int core_id;
struct imc_pmu_ref *ref;
u64 ldbar_value, *local_mem = per_cpu(thread_imc_mem, smp_processor_id());
if (flags & PERF_EF_START)
imc_event_start(event, flags);
if (!is_core_imc_mem_inited(smp_processor_id()))
return -EINVAL;
core_id = smp_processor_id() / threads_per_core;
ldbar_value = ((u64)local_mem & THREAD_IMC_LDBAR_MASK) | THREAD_IMC_ENABLE;
mtspr(SPRN_LDBAR, ldbar_value);
/*
* imc pmus are enabled only when it is used.
* See if this is triggered for the first time.
* If yes, take the lock and enable the counters.
* If not, just increment the count in ref count struct.
*/
ref = &core_imc_refc[core_id];
if (!ref)
return -EINVAL;
spin_lock(&ref->lock);
if (ref->refc == 0) {
if (opal_imc_counters_start(OPAL_IMC_COUNTERS_CORE,
get_hard_smp_processor_id(smp_processor_id()))) {
spin_unlock(&ref->lock);
pr_err("thread-imc: Unable to start the counter\
for core %d\n", core_id);
return -EINVAL;
}
}
++ref->refc;
spin_unlock(&ref->lock);
return 0;
}
static void thread_imc_event_del(struct perf_event *event, int flags)
{
int core_id;
struct imc_pmu_ref *ref;
core_id = smp_processor_id() / threads_per_core;
ref = &core_imc_refc[core_id];
if (!ref) {
pr_debug("imc: Failed to get event reference count\n");
return;
}
spin_lock(&ref->lock);
ref->refc--;
if (ref->refc == 0) {
if (opal_imc_counters_stop(OPAL_IMC_COUNTERS_CORE,
get_hard_smp_processor_id(smp_processor_id()))) {
spin_unlock(&ref->lock);
pr_err("thread-imc: Unable to stop the counters\
for core %d\n", core_id);
return;
}
} else if (ref->refc < 0) {
ref->refc = 0;
}
spin_unlock(&ref->lock);
/* Set bit 0 of LDBAR to zero, to stop posting updates to memory */
mtspr(SPRN_LDBAR, (mfspr(SPRN_LDBAR) & (~(1UL << 63))));
/*
* Take a snapshot and calculate the delta and update
* the event counter values.
*/
imc_event_update(event);
}
/*
* Allocate a page of memory for each cpu, and load LDBAR with 0.
*/
static int trace_imc_mem_alloc(int cpu_id, int size)
{
u64 *local_mem = per_cpu(trace_imc_mem, cpu_id);
int phys_id = cpu_to_node(cpu_id), rc = 0;
int core_id = (cpu_id / threads_per_core);
if (!local_mem) {
struct page *page;
page = alloc_pages_node(phys_id,
GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE |
__GFP_NOWARN, get_order(size));
if (!page)
return -ENOMEM;
local_mem = page_address(page);
per_cpu(trace_imc_mem, cpu_id) = local_mem;
/* Initialise the counters for trace mode */
rc = opal_imc_counters_init(OPAL_IMC_COUNTERS_TRACE, __pa((void *)local_mem),
get_hard_smp_processor_id(cpu_id));
if (rc) {
pr_info("IMC:opal init failed for trace imc\n");
return rc;
}
}
trace_imc_refc[core_id].id = core_id;
spin_lock_init(&trace_imc_refc[core_id].lock);
mtspr(SPRN_LDBAR, 0);
return 0;
}
static int ppc_trace_imc_cpu_online(unsigned int cpu)
{
return trace_imc_mem_alloc(cpu, trace_imc_mem_size);
}
static int ppc_trace_imc_cpu_offline(unsigned int cpu)
{
/*
* No need to set bit 0 of LDBAR to zero, as
* it is set to zero for imc trace-mode
*
* Reduce the refc if any trace-imc event running
* on this cpu.
*/
spin_lock(&imc_global_refc.lock);
if (imc_global_refc.id == IMC_DOMAIN_TRACE)
imc_global_refc.refc--;
spin_unlock(&imc_global_refc.lock);
return 0;
}
static int trace_imc_cpu_init(void)
{
return cpuhp_setup_state(CPUHP_AP_PERF_POWERPC_TRACE_IMC_ONLINE,
"perf/powerpc/imc_trace:online",
ppc_trace_imc_cpu_online,
ppc_trace_imc_cpu_offline);
}
static u64 get_trace_imc_event_base_addr(void)
{
return (u64)per_cpu(trace_imc_mem, smp_processor_id());
}
/*
* Function to parse trace-imc data obtained
* and to prepare the perf sample.
*/
static int trace_imc_prepare_sample(struct trace_imc_data *mem,
struct perf_sample_data *data,
u64 *prev_tb,
struct perf_event_header *header,
struct perf_event *event)
{
/* Sanity checks for a valid record */
if (be64_to_cpu(READ_ONCE(mem->tb1)) > *prev_tb)
*prev_tb = be64_to_cpu(READ_ONCE(mem->tb1));
else
return -EINVAL;
if ((be64_to_cpu(READ_ONCE(mem->tb1)) & IMC_TRACE_RECORD_TB1_MASK) !=
be64_to_cpu(READ_ONCE(mem->tb2)))
return -EINVAL;
/* Prepare perf sample */
data->ip = be64_to_cpu(READ_ONCE(mem->ip));
data->period = event->hw.last_period;
header->type = PERF_RECORD_SAMPLE;
header->size = sizeof(*header) + event->header_size;
header->misc = 0;
if (cpu_has_feature(CPU_FTR_ARCH_31)) {
switch (IMC_TRACE_RECORD_VAL_HVPR(be64_to_cpu(READ_ONCE(mem->val)))) {
case 0:/* when MSR HV and PR not set in the trace-record */
header->misc |= PERF_RECORD_MISC_GUEST_KERNEL;
break;
case 1: /* MSR HV is 0 and PR is 1 */
header->misc |= PERF_RECORD_MISC_GUEST_USER;
break;
case 2: /* MSR HV is 1 and PR is 0 */
header->misc |= PERF_RECORD_MISC_KERNEL;
break;
case 3: /* MSR HV is 1 and PR is 1 */
header->misc |= PERF_RECORD_MISC_USER;
break;
default:
pr_info("IMC: Unable to set the flag based on MSR bits\n");
break;
}
} else {
if (is_kernel_addr(data->ip))
header->misc |= PERF_RECORD_MISC_KERNEL;
else
header->misc |= PERF_RECORD_MISC_USER;
}
perf_event_header__init_id(header, data, event);
return 0;
}
static void dump_trace_imc_data(struct perf_event *event)
{
struct trace_imc_data *mem;
int i, ret;
u64 prev_tb = 0;
mem = (struct trace_imc_data *)get_trace_imc_event_base_addr();
for (i = 0; i < (trace_imc_mem_size / sizeof(struct trace_imc_data));
i++, mem++) {
struct perf_sample_data data;
struct perf_event_header header;
ret = trace_imc_prepare_sample(mem, &data, &prev_tb, &header, event);
if (ret) /* Exit, if not a valid record */
break;
else {
/* If this is a valid record, create the sample */
struct perf_output_handle handle;
if (perf_output_begin(&handle, &data, event, header.size))
return;
perf_output_sample(&handle, &header, &data, event);
perf_output_end(&handle);
}
}
}
static int trace_imc_event_add(struct perf_event *event, int flags)
{
int core_id = smp_processor_id() / threads_per_core;
struct imc_pmu_ref *ref = NULL;
u64 local_mem, ldbar_value;
/* Set trace-imc bit in ldbar and load ldbar with per-thread memory address */
local_mem = get_trace_imc_event_base_addr();
ldbar_value = ((u64)local_mem & THREAD_IMC_LDBAR_MASK) | TRACE_IMC_ENABLE;
/* trace-imc reference count */
if (trace_imc_refc)
ref = &trace_imc_refc[core_id];
if (!ref) {
pr_debug("imc: Failed to get the event reference count\n");
return -EINVAL;
}
mtspr(SPRN_LDBAR, ldbar_value);
spin_lock(&ref->lock);
if (ref->refc == 0) {
if (opal_imc_counters_start(OPAL_IMC_COUNTERS_TRACE,
get_hard_smp_processor_id(smp_processor_id()))) {
spin_unlock(&ref->lock);
pr_err("trace-imc: Unable to start the counters for core %d\n", core_id);
return -EINVAL;
}
}
++ref->refc;
spin_unlock(&ref->lock);
return 0;
}
static void trace_imc_event_read(struct perf_event *event)
{
return;
}
static void trace_imc_event_stop(struct perf_event *event, int flags)
{
u64 local_mem = get_trace_imc_event_base_addr();
dump_trace_imc_data(event);
memset((void *)local_mem, 0, sizeof(u64));
}
static void trace_imc_event_start(struct perf_event *event, int flags)
{
return;
}
static void trace_imc_event_del(struct perf_event *event, int flags)
{
int core_id = smp_processor_id() / threads_per_core;
struct imc_pmu_ref *ref = NULL;
if (trace_imc_refc)
ref = &trace_imc_refc[core_id];
if (!ref) {
pr_debug("imc: Failed to get event reference count\n");
return;
}
spin_lock(&ref->lock);
ref->refc--;
if (ref->refc == 0) {
if (opal_imc_counters_stop(OPAL_IMC_COUNTERS_TRACE,
get_hard_smp_processor_id(smp_processor_id()))) {
spin_unlock(&ref->lock);
pr_err("trace-imc: Unable to stop the counters for core %d\n", core_id);
return;
}
} else if (ref->refc < 0) {
ref->refc = 0;
}
spin_unlock(&ref->lock);
trace_imc_event_stop(event, flags);
}
static int trace_imc_event_init(struct perf_event *event)
{
if (event->attr.type != event->pmu->type)
return -ENOENT;
if (!perfmon_capable())
return -EACCES;
/* Return if this is a couting event */
if (event->attr.sample_period == 0)
return -ENOENT;
/*
* Take the global lock, and make sure
* no other thread is running any core/thread imc
* events
*/
spin_lock(&imc_global_refc.lock);
if (imc_global_refc.id == 0 || imc_global_refc.id == IMC_DOMAIN_TRACE) {
/*
* No core/thread imc events are running in the
* system, so set the refc.id to trace-imc.
*/
imc_global_refc.id = IMC_DOMAIN_TRACE;
imc_global_refc.refc++;
} else {
spin_unlock(&imc_global_refc.lock);
return -EBUSY;
}
spin_unlock(&imc_global_refc.lock);
event->hw.idx = -1;
/*
* There can only be a single PMU for perf_hw_context events which is assigned to
* core PMU. Hence use "perf_sw_context" for trace_imc.
*/
event->pmu->task_ctx_nr = perf_sw_context;
event->destroy = reset_global_refc;
return 0;
}
/* update_pmu_ops : Populate the appropriate operations for "pmu" */
static int update_pmu_ops(struct imc_pmu *pmu)
{
pmu->pmu.task_ctx_nr = perf_invalid_context;
pmu->pmu.add = imc_event_add;
pmu->pmu.del = imc_event_stop;
pmu->pmu.start = imc_event_start;
pmu->pmu.stop = imc_event_stop;
pmu->pmu.read = imc_event_update;
pmu->pmu.attr_groups = pmu->attr_groups;
pmu->pmu.capabilities = PERF_PMU_CAP_NO_EXCLUDE;
pmu->attr_groups[IMC_FORMAT_ATTR] = &imc_format_group;
switch (pmu->domain) {
case IMC_DOMAIN_NEST:
pmu->pmu.event_init = nest_imc_event_init;
pmu->attr_groups[IMC_CPUMASK_ATTR] = &imc_pmu_cpumask_attr_group;
break;
case IMC_DOMAIN_CORE:
pmu->pmu.event_init = core_imc_event_init;
pmu->attr_groups[IMC_CPUMASK_ATTR] = &imc_pmu_cpumask_attr_group;
break;
case IMC_DOMAIN_THREAD:
pmu->pmu.event_init = thread_imc_event_init;
pmu->pmu.add = thread_imc_event_add;
pmu->pmu.del = thread_imc_event_del;
pmu->pmu.start_txn = thread_imc_pmu_start_txn;
pmu->pmu.cancel_txn = thread_imc_pmu_cancel_txn;
pmu->pmu.commit_txn = thread_imc_pmu_commit_txn;
break;
case IMC_DOMAIN_TRACE:
pmu->pmu.event_init = trace_imc_event_init;
pmu->pmu.add = trace_imc_event_add;
pmu->pmu.del = trace_imc_event_del;
pmu->pmu.start = trace_imc_event_start;
pmu->pmu.stop = trace_imc_event_stop;
pmu->pmu.read = trace_imc_event_read;
pmu->attr_groups[IMC_FORMAT_ATTR] = &trace_imc_format_group;
break;
default:
break;
}
return 0;
}
/* init_nest_pmu_ref: Initialize the imc_pmu_ref struct for all the nodes */
static int init_nest_pmu_ref(void)
{
int nid, i, cpu;
nest_imc_refc = kcalloc(num_possible_nodes(), sizeof(*nest_imc_refc),
GFP_KERNEL);
if (!nest_imc_refc)
return -ENOMEM;
i = 0;
for_each_node(nid) {
/*
* Take the lock to avoid races while tracking the number of
* sessions using the chip's nest pmu units.
*/
spin_lock_init(&nest_imc_refc[i].lock);
/*
* Loop to init the "id" with the node_id. Variable "i" initialized to
* 0 and will be used as index to the array. "i" will not go off the
* end of the array since the "for_each_node" loops for "N_POSSIBLE"
* nodes only.
*/
nest_imc_refc[i++].id = nid;
}
/*
* Loop to init the per_cpu "local_nest_imc_refc" with the proper
* "nest_imc_refc" index. This makes get_nest_pmu_ref() alot simple.
*/
for_each_possible_cpu(cpu) {
nid = cpu_to_node(cpu);
for (i = 0; i < num_possible_nodes(); i++) {
if (nest_imc_refc[i].id == nid) {
per_cpu(local_nest_imc_refc, cpu) = &nest_imc_refc[i];
break;
}
}
}
return 0;
}
static void cleanup_all_core_imc_memory(void)
{
int i, nr_cores = DIV_ROUND_UP(num_possible_cpus(), threads_per_core);
struct imc_mem_info *ptr = core_imc_pmu->mem_info;
int size = core_imc_pmu->counter_mem_size;
/* mem_info will never be NULL */
for (i = 0; i < nr_cores; i++) {
if (ptr[i].vbase)
free_pages((u64)ptr[i].vbase, get_order(size));
}
kfree(ptr);
kfree(core_imc_refc);
}
static void thread_imc_ldbar_disable(void *dummy)
{
/*
* By setting 0th bit of LDBAR to zero, we disable thread-imc
* updates to memory.
*/
mtspr(SPRN_LDBAR, (mfspr(SPRN_LDBAR) & (~(1UL << 63))));
}
void thread_imc_disable(void)
{
on_each_cpu(thread_imc_ldbar_disable, NULL, 1);
}
static void cleanup_all_thread_imc_memory(void)
{
int i, order = get_order(thread_imc_mem_size);
for_each_online_cpu(i) {
if (per_cpu(thread_imc_mem, i))
free_pages((u64)per_cpu(thread_imc_mem, i), order);
}
}
static void cleanup_all_trace_imc_memory(void)
{
int i, order = get_order(trace_imc_mem_size);
for_each_online_cpu(i) {
if (per_cpu(trace_imc_mem, i))
free_pages((u64)per_cpu(trace_imc_mem, i), order);
}
kfree(trace_imc_refc);
}
/* Function to free the attr_groups which are dynamically allocated */
static void imc_common_mem_free(struct imc_pmu *pmu_ptr)
{
if (pmu_ptr->attr_groups[IMC_EVENT_ATTR])
kfree(pmu_ptr->attr_groups[IMC_EVENT_ATTR]->attrs);
kfree(pmu_ptr->attr_groups[IMC_EVENT_ATTR]);
}
/*
* Common function to unregister cpu hotplug callback and
* free the memory.
* TODO: Need to handle pmu unregistering, which will be
* done in followup series.
*/
static void imc_common_cpuhp_mem_free(struct imc_pmu *pmu_ptr)
{
if (pmu_ptr->domain == IMC_DOMAIN_NEST) {
mutex_lock(&nest_init_lock);
if (nest_pmus == 1) {
cpuhp_remove_state(CPUHP_AP_PERF_POWERPC_NEST_IMC_ONLINE);
kfree(nest_imc_refc);
kfree(per_nest_pmu_arr);
per_nest_pmu_arr = NULL;
}
if (nest_pmus > 0)
nest_pmus--;
mutex_unlock(&nest_init_lock);
}
/* Free core_imc memory */
if (pmu_ptr->domain == IMC_DOMAIN_CORE) {
cpuhp_remove_state(CPUHP_AP_PERF_POWERPC_CORE_IMC_ONLINE);
cleanup_all_core_imc_memory();
}
/* Free thread_imc memory */
if (pmu_ptr->domain == IMC_DOMAIN_THREAD) {
cpuhp_remove_state(CPUHP_AP_PERF_POWERPC_THREAD_IMC_ONLINE);
cleanup_all_thread_imc_memory();
}
if (pmu_ptr->domain == IMC_DOMAIN_TRACE) {
cpuhp_remove_state(CPUHP_AP_PERF_POWERPC_TRACE_IMC_ONLINE);
cleanup_all_trace_imc_memory();
}
}
/*
* Function to unregister thread-imc if core-imc
* is not registered.
*/
void unregister_thread_imc(void)
{
imc_common_cpuhp_mem_free(thread_imc_pmu);
imc_common_mem_free(thread_imc_pmu);
perf_pmu_unregister(&thread_imc_pmu->pmu);
}
/*
* imc_mem_init : Function to support memory allocation for core imc.
*/
static int imc_mem_init(struct imc_pmu *pmu_ptr, struct device_node *parent,
int pmu_index)
{
const char *s;
int nr_cores, cpu, res = -ENOMEM;
if (of_property_read_string(parent, "name", &s))
return -ENODEV;
switch (pmu_ptr->domain) {
case IMC_DOMAIN_NEST:
/* Update the pmu name */
pmu_ptr->pmu.name = kasprintf(GFP_KERNEL, "%s%s_imc", "nest_", s);
if (!pmu_ptr->pmu.name)
goto err;
/* Needed for hotplug/migration */
if (!per_nest_pmu_arr) {
per_nest_pmu_arr = kcalloc(get_max_nest_dev() + 1,
sizeof(struct imc_pmu *),
GFP_KERNEL);
if (!per_nest_pmu_arr)
goto err;
}
per_nest_pmu_arr[pmu_index] = pmu_ptr;
break;
case IMC_DOMAIN_CORE:
/* Update the pmu name */
pmu_ptr->pmu.name = kasprintf(GFP_KERNEL, "%s%s", s, "_imc");
if (!pmu_ptr->pmu.name)
goto err;
nr_cores = DIV_ROUND_UP(num_possible_cpus(), threads_per_core);
pmu_ptr->mem_info = kcalloc(nr_cores, sizeof(struct imc_mem_info),
GFP_KERNEL);
if (!pmu_ptr->mem_info)
goto err;
core_imc_refc = kcalloc(nr_cores, sizeof(struct imc_pmu_ref),
GFP_KERNEL);
if (!core_imc_refc) {
kfree(pmu_ptr->mem_info);
goto err;
}
core_imc_pmu = pmu_ptr;
break;
case IMC_DOMAIN_THREAD:
/* Update the pmu name */
pmu_ptr->pmu.name = kasprintf(GFP_KERNEL, "%s%s", s, "_imc");
if (!pmu_ptr->pmu.name)
goto err;
thread_imc_mem_size = pmu_ptr->counter_mem_size;
for_each_online_cpu(cpu) {
res = thread_imc_mem_alloc(cpu, pmu_ptr->counter_mem_size);
if (res) {
cleanup_all_thread_imc_memory();
goto err;
}
}
thread_imc_pmu = pmu_ptr;
break;
case IMC_DOMAIN_TRACE:
/* Update the pmu name */
pmu_ptr->pmu.name = kasprintf(GFP_KERNEL, "%s%s", s, "_imc");
if (!pmu_ptr->pmu.name)
return -ENOMEM;
nr_cores = DIV_ROUND_UP(num_possible_cpus(), threads_per_core);
trace_imc_refc = kcalloc(nr_cores, sizeof(struct imc_pmu_ref),
GFP_KERNEL);
if (!trace_imc_refc)
return -ENOMEM;
trace_imc_mem_size = pmu_ptr->counter_mem_size;
for_each_online_cpu(cpu) {
res = trace_imc_mem_alloc(cpu, trace_imc_mem_size);
if (res) {
cleanup_all_trace_imc_memory();
goto err;
}
}
break;
default:
return -EINVAL;
}
return 0;
err:
return res;
}
/*
* init_imc_pmu : Setup and register the IMC pmu device.
*
* @parent: Device tree unit node
* @pmu_ptr: memory allocated for this pmu
* @pmu_idx: Count of nest pmc registered
*
* init_imc_pmu() setup pmu cpumask and registers for a cpu hotplug callback.
* Handles failure cases and accordingly frees memory.
*/
int init_imc_pmu(struct device_node *parent, struct imc_pmu *pmu_ptr, int pmu_idx)
{
int ret;
ret = imc_mem_init(pmu_ptr, parent, pmu_idx);
if (ret)
goto err_free_mem;
switch (pmu_ptr->domain) {
case IMC_DOMAIN_NEST:
/*
* Nest imc pmu need only one cpu per chip, we initialize the
* cpumask for the first nest imc pmu and use the same for the
* rest. To handle the cpuhotplug callback unregister, we track
* the number of nest pmus in "nest_pmus".
*/
mutex_lock(&nest_init_lock);
if (nest_pmus == 0) {
ret = init_nest_pmu_ref();
if (ret) {
mutex_unlock(&nest_init_lock);
kfree(per_nest_pmu_arr);
per_nest_pmu_arr = NULL;
goto err_free_mem;
}
/* Register for cpu hotplug notification. */
ret = nest_pmu_cpumask_init();
if (ret) {
mutex_unlock(&nest_init_lock);
kfree(nest_imc_refc);
kfree(per_nest_pmu_arr);
per_nest_pmu_arr = NULL;
goto err_free_mem;
}
}
nest_pmus++;
mutex_unlock(&nest_init_lock);
break;
case IMC_DOMAIN_CORE:
ret = core_imc_pmu_cpumask_init();
if (ret) {
cleanup_all_core_imc_memory();
goto err_free_mem;
}
break;
case IMC_DOMAIN_THREAD:
ret = thread_imc_cpu_init();
if (ret) {
cleanup_all_thread_imc_memory();
goto err_free_mem;
}
break;
case IMC_DOMAIN_TRACE:
ret = trace_imc_cpu_init();
if (ret) {
cleanup_all_trace_imc_memory();
goto err_free_mem;
}
break;
default:
return -EINVAL; /* Unknown domain */
}
ret = update_events_in_group(parent, pmu_ptr);
if (ret)
goto err_free_cpuhp_mem;
ret = update_pmu_ops(pmu_ptr);
if (ret)
goto err_free_cpuhp_mem;
ret = perf_pmu_register(&pmu_ptr->pmu, pmu_ptr->pmu.name, -1);
if (ret)
goto err_free_cpuhp_mem;
pr_debug("%s performance monitor hardware support registered\n",
pmu_ptr->pmu.name);
return 0;
err_free_cpuhp_mem:
imc_common_cpuhp_mem_free(pmu_ptr);
err_free_mem:
imc_common_mem_free(pmu_ptr);
return ret;
}
| linux-master | arch/powerpc/perf/imc-pmu.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Performance counter support for MPC7450-family processors.
*
* Copyright 2008-2009 Paul Mackerras, IBM Corporation.
*/
#include <linux/string.h>
#include <linux/perf_event.h>
#include <asm/reg.h>
#include <asm/cputable.h>
#define N_COUNTER 6 /* Number of hardware counters */
#define MAX_ALT 3 /* Maximum number of event alternative codes */
/*
* Bits in event code for MPC7450 family
*/
#define PM_THRMULT_MSKS 0x40000
#define PM_THRESH_SH 12
#define PM_THRESH_MSK 0x3f
#define PM_PMC_SH 8
#define PM_PMC_MSK 7
#define PM_PMCSEL_MSK 0x7f
/*
* Classify events according to how specific their PMC requirements are.
* Result is:
* 0: can go on any PMC
* 1: can go on PMCs 1-4
* 2: can go on PMCs 1,2,4
* 3: can go on PMCs 1 or 2
* 4: can only go on one PMC
* -1: event code is invalid
*/
#define N_CLASSES 5
static int mpc7450_classify_event(u32 event)
{
int pmc;
pmc = (event >> PM_PMC_SH) & PM_PMC_MSK;
if (pmc) {
if (pmc > N_COUNTER)
return -1;
return 4;
}
event &= PM_PMCSEL_MSK;
if (event <= 1)
return 0;
if (event <= 7)
return 1;
if (event <= 13)
return 2;
if (event <= 22)
return 3;
return -1;
}
/*
* Events using threshold and possible threshold scale:
* code scale? name
* 11e N PM_INSTQ_EXCEED_CYC
* 11f N PM_ALTV_IQ_EXCEED_CYC
* 128 Y PM_DTLB_SEARCH_EXCEED_CYC
* 12b Y PM_LD_MISS_EXCEED_L1_CYC
* 220 N PM_CQ_EXCEED_CYC
* 30c N PM_GPR_RB_EXCEED_CYC
* 30d ? PM_FPR_IQ_EXCEED_CYC ?
* 311 Y PM_ITLB_SEARCH_EXCEED
* 410 N PM_GPR_IQ_EXCEED_CYC
*/
/*
* Return use of threshold and threshold scale bits:
* 0 = uses neither, 1 = uses threshold, 2 = uses both
*/
static int mpc7450_threshold_use(u32 event)
{
int pmc, sel;
pmc = (event >> PM_PMC_SH) & PM_PMC_MSK;
sel = event & PM_PMCSEL_MSK;
switch (pmc) {
case 1:
if (sel == 0x1e || sel == 0x1f)
return 1;
if (sel == 0x28 || sel == 0x2b)
return 2;
break;
case 2:
if (sel == 0x20)
return 1;
break;
case 3:
if (sel == 0xc || sel == 0xd)
return 1;
if (sel == 0x11)
return 2;
break;
case 4:
if (sel == 0x10)
return 1;
break;
}
return 0;
}
/*
* Layout of constraint bits:
* 33222222222211111111110000000000
* 10987654321098765432109876543210
* |< >< > < > < ><><><><><><>
* TS TV G4 G3 G2P6P5P4P3P2P1
*
* P1 - P6
* 0 - 11: Count of events needing PMC1 .. PMC6
*
* G2
* 12 - 14: Count of events needing PMC1 or PMC2
*
* G3
* 16 - 18: Count of events needing PMC1, PMC2 or PMC4
*
* G4
* 20 - 23: Count of events needing PMC1, PMC2, PMC3 or PMC4
*
* TV
* 24 - 29: Threshold value requested
*
* TS
* 30: Threshold scale value requested
*/
static u32 pmcbits[N_COUNTER][2] = {
{ 0x00844002, 0x00111001 }, /* PMC1 mask, value: P1,G2,G3,G4 */
{ 0x00844008, 0x00111004 }, /* PMC2: P2,G2,G3,G4 */
{ 0x00800020, 0x00100010 }, /* PMC3: P3,G4 */
{ 0x00840080, 0x00110040 }, /* PMC4: P4,G3,G4 */
{ 0x00000200, 0x00000100 }, /* PMC5: P5 */
{ 0x00000800, 0x00000400 } /* PMC6: P6 */
};
static u32 classbits[N_CLASSES - 1][2] = {
{ 0x00000000, 0x00000000 }, /* class 0: no constraint */
{ 0x00800000, 0x00100000 }, /* class 1: G4 */
{ 0x00040000, 0x00010000 }, /* class 2: G3 */
{ 0x00004000, 0x00001000 }, /* class 3: G2 */
};
static int mpc7450_get_constraint(u64 event, unsigned long *maskp,
unsigned long *valp, u64 event_config1 __maybe_unused)
{
int pmc, class;
u32 mask, value;
int thresh, tuse;
class = mpc7450_classify_event(event);
if (class < 0)
return -1;
if (class == 4) {
pmc = ((unsigned int)event >> PM_PMC_SH) & PM_PMC_MSK;
mask = pmcbits[pmc - 1][0];
value = pmcbits[pmc - 1][1];
} else {
mask = classbits[class][0];
value = classbits[class][1];
}
tuse = mpc7450_threshold_use(event);
if (tuse) {
thresh = ((unsigned int)event >> PM_THRESH_SH) & PM_THRESH_MSK;
mask |= 0x3f << 24;
value |= thresh << 24;
if (tuse == 2) {
mask |= 0x40000000;
if ((unsigned int)event & PM_THRMULT_MSKS)
value |= 0x40000000;
}
}
*maskp = mask;
*valp = value;
return 0;
}
static const unsigned int event_alternatives[][MAX_ALT] = {
{ 0x217, 0x317 }, /* PM_L1_DCACHE_MISS */
{ 0x418, 0x50f, 0x60f }, /* PM_SNOOP_RETRY */
{ 0x502, 0x602 }, /* PM_L2_HIT */
{ 0x503, 0x603 }, /* PM_L3_HIT */
{ 0x504, 0x604 }, /* PM_L2_ICACHE_MISS */
{ 0x505, 0x605 }, /* PM_L3_ICACHE_MISS */
{ 0x506, 0x606 }, /* PM_L2_DCACHE_MISS */
{ 0x507, 0x607 }, /* PM_L3_DCACHE_MISS */
{ 0x50a, 0x623 }, /* PM_LD_HIT_L3 */
{ 0x50b, 0x624 }, /* PM_ST_HIT_L3 */
{ 0x50d, 0x60d }, /* PM_L2_TOUCH_HIT */
{ 0x50e, 0x60e }, /* PM_L3_TOUCH_HIT */
{ 0x512, 0x612 }, /* PM_INT_LOCAL */
{ 0x513, 0x61d }, /* PM_L2_MISS */
{ 0x514, 0x61e }, /* PM_L3_MISS */
};
/*
* Scan the alternatives table for a match and return the
* index into the alternatives table if found, else -1.
*/
static int find_alternative(u32 event)
{
int i, j;
for (i = 0; i < ARRAY_SIZE(event_alternatives); ++i) {
if (event < event_alternatives[i][0])
break;
for (j = 0; j < MAX_ALT && event_alternatives[i][j]; ++j)
if (event == event_alternatives[i][j])
return i;
}
return -1;
}
static int mpc7450_get_alternatives(u64 event, unsigned int flags, u64 alt[])
{
int i, j, nalt = 1;
u32 ae;
alt[0] = event;
nalt = 1;
i = find_alternative((u32)event);
if (i >= 0) {
for (j = 0; j < MAX_ALT; ++j) {
ae = event_alternatives[i][j];
if (ae && ae != (u32)event)
alt[nalt++] = ae;
}
}
return nalt;
}
/*
* Bitmaps of which PMCs each class can use for classes 0 - 3.
* Bit i is set if PMC i+1 is usable.
*/
static const u8 classmap[N_CLASSES] = {
0x3f, 0x0f, 0x0b, 0x03, 0
};
/* Bit position and width of each PMCSEL field */
static const int pmcsel_shift[N_COUNTER] = {
6, 0, 27, 22, 17, 11
};
static const u32 pmcsel_mask[N_COUNTER] = {
0x7f, 0x3f, 0x1f, 0x1f, 0x1f, 0x3f
};
/*
* Compute MMCR0/1/2 values for a set of events.
*/
static int mpc7450_compute_mmcr(u64 event[], int n_ev, unsigned int hwc[],
struct mmcr_regs *mmcr,
struct perf_event *pevents[],
u32 flags __maybe_unused)
{
u8 event_index[N_CLASSES][N_COUNTER];
int n_classevent[N_CLASSES];
int i, j, class, tuse;
u32 pmc_inuse = 0, pmc_avail;
u32 mmcr0 = 0, mmcr1 = 0, mmcr2 = 0;
u32 ev, pmc, thresh;
if (n_ev > N_COUNTER)
return -1;
/* First pass: count usage in each class */
for (i = 0; i < N_CLASSES; ++i)
n_classevent[i] = 0;
for (i = 0; i < n_ev; ++i) {
class = mpc7450_classify_event(event[i]);
if (class < 0)
return -1;
j = n_classevent[class]++;
event_index[class][j] = i;
}
/* Second pass: allocate PMCs from most specific event to least */
for (class = N_CLASSES - 1; class >= 0; --class) {
for (i = 0; i < n_classevent[class]; ++i) {
ev = event[event_index[class][i]];
if (class == 4) {
pmc = (ev >> PM_PMC_SH) & PM_PMC_MSK;
if (pmc_inuse & (1 << (pmc - 1)))
return -1;
} else {
/* Find a suitable PMC */
pmc_avail = classmap[class] & ~pmc_inuse;
if (!pmc_avail)
return -1;
pmc = ffs(pmc_avail);
}
pmc_inuse |= 1 << (pmc - 1);
tuse = mpc7450_threshold_use(ev);
if (tuse) {
thresh = (ev >> PM_THRESH_SH) & PM_THRESH_MSK;
mmcr0 |= thresh << 16;
if (tuse == 2 && (ev & PM_THRMULT_MSKS))
mmcr2 = 0x80000000;
}
ev &= pmcsel_mask[pmc - 1];
ev <<= pmcsel_shift[pmc - 1];
if (pmc <= 2)
mmcr0 |= ev;
else
mmcr1 |= ev;
hwc[event_index[class][i]] = pmc - 1;
}
}
if (pmc_inuse & 1)
mmcr0 |= MMCR0_PMC1CE;
if (pmc_inuse & 0x3e)
mmcr0 |= MMCR0_PMCnCE;
/* Return MMCRx values */
mmcr->mmcr0 = mmcr0;
mmcr->mmcr1 = mmcr1;
mmcr->mmcr2 = mmcr2;
/*
* 32-bit doesn't have an MMCRA and uses SPRN_MMCR2 to define
* SPRN_MMCRA. So assign mmcra of cpu_hw_events with `mmcr2`
* value to ensure that any write to this SPRN_MMCRA will
* use mmcr2 value.
*/
mmcr->mmcra = mmcr2;
return 0;
}
/*
* Disable counting by a PMC.
* Note that the pmc argument is 0-based here, not 1-based.
*/
static void mpc7450_disable_pmc(unsigned int pmc, struct mmcr_regs *mmcr)
{
if (pmc <= 1)
mmcr->mmcr0 &= ~(pmcsel_mask[pmc] << pmcsel_shift[pmc]);
else
mmcr->mmcr1 &= ~(pmcsel_mask[pmc] << pmcsel_shift[pmc]);
}
static int mpc7450_generic_events[] = {
[PERF_COUNT_HW_CPU_CYCLES] = 1,
[PERF_COUNT_HW_INSTRUCTIONS] = 2,
[PERF_COUNT_HW_CACHE_MISSES] = 0x217, /* PM_L1_DCACHE_MISS */
[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x122, /* PM_BR_CMPL */
[PERF_COUNT_HW_BRANCH_MISSES] = 0x41c, /* PM_BR_MPRED */
};
#define C(x) PERF_COUNT_HW_CACHE_##x
/*
* Table of generalized cache-related events.
* 0 means not supported, -1 means nonsensical, other values
* are event codes.
*/
static u64 mpc7450_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
[C(L1D)] = { /* RESULT_ACCESS RESULT_MISS */
[C(OP_READ)] = { 0, 0x225 },
[C(OP_WRITE)] = { 0, 0x227 },
[C(OP_PREFETCH)] = { 0, 0 },
},
[C(L1I)] = { /* RESULT_ACCESS RESULT_MISS */
[C(OP_READ)] = { 0x129, 0x115 },
[C(OP_WRITE)] = { -1, -1 },
[C(OP_PREFETCH)] = { 0x634, 0 },
},
[C(LL)] = { /* RESULT_ACCESS RESULT_MISS */
[C(OP_READ)] = { 0, 0 },
[C(OP_WRITE)] = { 0, 0 },
[C(OP_PREFETCH)] = { 0, 0 },
},
[C(DTLB)] = { /* RESULT_ACCESS RESULT_MISS */
[C(OP_READ)] = { 0, 0x312 },
[C(OP_WRITE)] = { -1, -1 },
[C(OP_PREFETCH)] = { -1, -1 },
},
[C(ITLB)] = { /* RESULT_ACCESS RESULT_MISS */
[C(OP_READ)] = { 0, 0x223 },
[C(OP_WRITE)] = { -1, -1 },
[C(OP_PREFETCH)] = { -1, -1 },
},
[C(BPU)] = { /* RESULT_ACCESS RESULT_MISS */
[C(OP_READ)] = { 0x122, 0x41c },
[C(OP_WRITE)] = { -1, -1 },
[C(OP_PREFETCH)] = { -1, -1 },
},
[C(NODE)] = { /* RESULT_ACCESS RESULT_MISS */
[C(OP_READ)] = { -1, -1 },
[C(OP_WRITE)] = { -1, -1 },
[C(OP_PREFETCH)] = { -1, -1 },
},
};
struct power_pmu mpc7450_pmu = {
.name = "MPC7450 family",
.n_counter = N_COUNTER,
.max_alternatives = MAX_ALT,
.add_fields = 0x00111555ul,
.test_adder = 0x00301000ul,
.compute_mmcr = mpc7450_compute_mmcr,
.get_constraint = mpc7450_get_constraint,
.get_alternatives = mpc7450_get_alternatives,
.disable_pmc = mpc7450_disable_pmc,
.n_generic = ARRAY_SIZE(mpc7450_generic_events),
.generic_events = mpc7450_generic_events,
.cache_events = &mpc7450_cache_events,
};
static int __init init_mpc7450_pmu(void)
{
if (!pvr_version_is(PVR_VER_7450) && !pvr_version_is(PVR_VER_7455) &&
!pvr_version_is(PVR_VER_7447) && !pvr_version_is(PVR_VER_7447A) &&
!pvr_version_is(PVR_VER_7448))
return -ENODEV;
return register_power_pmu(&mpc7450_pmu);
}
early_initcall(init_mpc7450_pmu);
| linux-master | arch/powerpc/perf/mpc7450-pmu.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright 2016 Anju T, IBM Corporation.
*/
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/sched/task_stack.h>
#include <linux/perf_event.h>
#include <linux/bug.h>
#include <linux/stddef.h>
#include <asm/ptrace.h>
#include <asm/perf_regs.h>
u64 PERF_REG_EXTENDED_MASK;
#define PT_REGS_OFFSET(id, r) [id] = offsetof(struct pt_regs, r)
#define REG_RESERVED (~(PERF_REG_EXTENDED_MASK | PERF_REG_PMU_MASK))
static unsigned int pt_regs_offset[PERF_REG_POWERPC_MAX] = {
PT_REGS_OFFSET(PERF_REG_POWERPC_R0, gpr[0]),
PT_REGS_OFFSET(PERF_REG_POWERPC_R1, gpr[1]),
PT_REGS_OFFSET(PERF_REG_POWERPC_R2, gpr[2]),
PT_REGS_OFFSET(PERF_REG_POWERPC_R3, gpr[3]),
PT_REGS_OFFSET(PERF_REG_POWERPC_R4, gpr[4]),
PT_REGS_OFFSET(PERF_REG_POWERPC_R5, gpr[5]),
PT_REGS_OFFSET(PERF_REG_POWERPC_R6, gpr[6]),
PT_REGS_OFFSET(PERF_REG_POWERPC_R7, gpr[7]),
PT_REGS_OFFSET(PERF_REG_POWERPC_R8, gpr[8]),
PT_REGS_OFFSET(PERF_REG_POWERPC_R9, gpr[9]),
PT_REGS_OFFSET(PERF_REG_POWERPC_R10, gpr[10]),
PT_REGS_OFFSET(PERF_REG_POWERPC_R11, gpr[11]),
PT_REGS_OFFSET(PERF_REG_POWERPC_R12, gpr[12]),
PT_REGS_OFFSET(PERF_REG_POWERPC_R13, gpr[13]),
PT_REGS_OFFSET(PERF_REG_POWERPC_R14, gpr[14]),
PT_REGS_OFFSET(PERF_REG_POWERPC_R15, gpr[15]),
PT_REGS_OFFSET(PERF_REG_POWERPC_R16, gpr[16]),
PT_REGS_OFFSET(PERF_REG_POWERPC_R17, gpr[17]),
PT_REGS_OFFSET(PERF_REG_POWERPC_R18, gpr[18]),
PT_REGS_OFFSET(PERF_REG_POWERPC_R19, gpr[19]),
PT_REGS_OFFSET(PERF_REG_POWERPC_R20, gpr[20]),
PT_REGS_OFFSET(PERF_REG_POWERPC_R21, gpr[21]),
PT_REGS_OFFSET(PERF_REG_POWERPC_R22, gpr[22]),
PT_REGS_OFFSET(PERF_REG_POWERPC_R23, gpr[23]),
PT_REGS_OFFSET(PERF_REG_POWERPC_R24, gpr[24]),
PT_REGS_OFFSET(PERF_REG_POWERPC_R25, gpr[25]),
PT_REGS_OFFSET(PERF_REG_POWERPC_R26, gpr[26]),
PT_REGS_OFFSET(PERF_REG_POWERPC_R27, gpr[27]),
PT_REGS_OFFSET(PERF_REG_POWERPC_R28, gpr[28]),
PT_REGS_OFFSET(PERF_REG_POWERPC_R29, gpr[29]),
PT_REGS_OFFSET(PERF_REG_POWERPC_R30, gpr[30]),
PT_REGS_OFFSET(PERF_REG_POWERPC_R31, gpr[31]),
PT_REGS_OFFSET(PERF_REG_POWERPC_NIP, nip),
PT_REGS_OFFSET(PERF_REG_POWERPC_MSR, msr),
PT_REGS_OFFSET(PERF_REG_POWERPC_ORIG_R3, orig_gpr3),
PT_REGS_OFFSET(PERF_REG_POWERPC_CTR, ctr),
PT_REGS_OFFSET(PERF_REG_POWERPC_LINK, link),
PT_REGS_OFFSET(PERF_REG_POWERPC_XER, xer),
PT_REGS_OFFSET(PERF_REG_POWERPC_CCR, ccr),
#ifdef CONFIG_PPC64
PT_REGS_OFFSET(PERF_REG_POWERPC_SOFTE, softe),
#else
PT_REGS_OFFSET(PERF_REG_POWERPC_SOFTE, mq),
#endif
PT_REGS_OFFSET(PERF_REG_POWERPC_TRAP, trap),
PT_REGS_OFFSET(PERF_REG_POWERPC_DAR, dar),
PT_REGS_OFFSET(PERF_REG_POWERPC_DSISR, dsisr),
PT_REGS_OFFSET(PERF_REG_POWERPC_SIER, dar),
PT_REGS_OFFSET(PERF_REG_POWERPC_MMCRA, dsisr),
};
/* Function to return the extended register values */
static u64 get_ext_regs_value(int idx)
{
switch (idx) {
case PERF_REG_POWERPC_PMC1 ... PERF_REG_POWERPC_PMC6:
return get_pmcs_ext_regs(idx - PERF_REG_POWERPC_PMC1);
case PERF_REG_POWERPC_MMCR0:
return mfspr(SPRN_MMCR0);
case PERF_REG_POWERPC_MMCR1:
return mfspr(SPRN_MMCR1);
case PERF_REG_POWERPC_MMCR2:
return mfspr(SPRN_MMCR2);
#ifdef CONFIG_PPC64
case PERF_REG_POWERPC_MMCR3:
return mfspr(SPRN_MMCR3);
case PERF_REG_POWERPC_SIER2:
return mfspr(SPRN_SIER2);
case PERF_REG_POWERPC_SIER3:
return mfspr(SPRN_SIER3);
case PERF_REG_POWERPC_SDAR:
return mfspr(SPRN_SDAR);
#endif
case PERF_REG_POWERPC_SIAR:
return mfspr(SPRN_SIAR);
default: return 0;
}
}
u64 perf_reg_value(struct pt_regs *regs, int idx)
{
if (idx == PERF_REG_POWERPC_SIER &&
(IS_ENABLED(CONFIG_FSL_EMB_PERF_EVENT) ||
IS_ENABLED(CONFIG_PPC32) ||
!is_sier_available()))
return 0;
if (idx == PERF_REG_POWERPC_MMCRA &&
(IS_ENABLED(CONFIG_FSL_EMB_PERF_EVENT) ||
IS_ENABLED(CONFIG_PPC32)))
return 0;
if (idx >= PERF_REG_POWERPC_MAX && idx < PERF_REG_EXTENDED_MAX)
return get_ext_regs_value(idx);
/*
* If the idx is referring to value beyond the
* supported registers, return 0 with a warning
*/
if (WARN_ON_ONCE(idx >= PERF_REG_EXTENDED_MAX))
return 0;
return regs_get_register(regs, pt_regs_offset[idx]);
}
int perf_reg_validate(u64 mask)
{
if (!mask || mask & REG_RESERVED)
return -EINVAL;
return 0;
}
u64 perf_reg_abi(struct task_struct *task)
{
if (is_tsk_32bit_task(task))
return PERF_SAMPLE_REGS_ABI_32;
else
return PERF_SAMPLE_REGS_ABI_64;
}
void perf_get_regs_user(struct perf_regs *regs_user,
struct pt_regs *regs)
{
regs_user->regs = task_pt_regs(current);
regs_user->abi = (regs_user->regs) ? perf_reg_abi(current) :
PERF_SAMPLE_REGS_ABI_NONE;
}
| linux-master | arch/powerpc/perf/perf_regs.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Performance counter support for POWER5+/++ (not POWER5) processors.
*
* Copyright 2009 Paul Mackerras, IBM Corporation.
*/
#include <linux/kernel.h>
#include <linux/perf_event.h>
#include <linux/string.h>
#include <asm/reg.h>
#include <asm/cputable.h>
#include "internal.h"
/*
* Bits in event code for POWER5+ (POWER5 GS) and POWER5++ (POWER5 GS DD3)
*/
#define PM_PMC_SH 20 /* PMC number (1-based) for direct events */
#define PM_PMC_MSK 0xf
#define PM_PMC_MSKS (PM_PMC_MSK << PM_PMC_SH)
#define PM_UNIT_SH 16 /* TTMMUX number and setting - unit select */
#define PM_UNIT_MSK 0xf
#define PM_BYTE_SH 12 /* Byte number of event bus to use */
#define PM_BYTE_MSK 7
#define PM_GRS_SH 8 /* Storage subsystem mux select */
#define PM_GRS_MSK 7
#define PM_BUSEVENT_MSK 0x80 /* Set if event uses event bus */
#define PM_PMCSEL_MSK 0x7f
/* Values in PM_UNIT field */
#define PM_FPU 0
#define PM_ISU0 1
#define PM_IFU 2
#define PM_ISU1 3
#define PM_IDU 4
#define PM_ISU0_ALT 6
#define PM_GRS 7
#define PM_LSU0 8
#define PM_LSU1 0xc
#define PM_LASTUNIT 0xc
/*
* Bits in MMCR1 for POWER5+
*/
#define MMCR1_TTM0SEL_SH 62
#define MMCR1_TTM1SEL_SH 60
#define MMCR1_TTM2SEL_SH 58
#define MMCR1_TTM3SEL_SH 56
#define MMCR1_TTMSEL_MSK 3
#define MMCR1_TD_CP_DBG0SEL_SH 54
#define MMCR1_TD_CP_DBG1SEL_SH 52
#define MMCR1_TD_CP_DBG2SEL_SH 50
#define MMCR1_TD_CP_DBG3SEL_SH 48
#define MMCR1_GRS_L2SEL_SH 46
#define MMCR1_GRS_L2SEL_MSK 3
#define MMCR1_GRS_L3SEL_SH 44
#define MMCR1_GRS_L3SEL_MSK 3
#define MMCR1_GRS_MCSEL_SH 41
#define MMCR1_GRS_MCSEL_MSK 7
#define MMCR1_GRS_FABSEL_SH 39
#define MMCR1_GRS_FABSEL_MSK 3
#define MMCR1_PMC1_ADDER_SEL_SH 35
#define MMCR1_PMC2_ADDER_SEL_SH 34
#define MMCR1_PMC3_ADDER_SEL_SH 33
#define MMCR1_PMC4_ADDER_SEL_SH 32
#define MMCR1_PMC1SEL_SH 25
#define MMCR1_PMC2SEL_SH 17
#define MMCR1_PMC3SEL_SH 9
#define MMCR1_PMC4SEL_SH 1
#define MMCR1_PMCSEL_SH(n) (MMCR1_PMC1SEL_SH - (n) * 8)
#define MMCR1_PMCSEL_MSK 0x7f
/*
* Layout of constraint bits:
* 6666555555555544444444443333333333222222222211111111110000000000
* 3210987654321098765432109876543210987654321098765432109876543210
* [ ><><>< ><> <><>[ > < >< >< >< ><><><><><><>
* NC G0G1G2 G3 T0T1 UC B0 B1 B2 B3 P6P5P4P3P2P1
*
* NC - number of counters
* 51: NC error 0x0008_0000_0000_0000
* 48-50: number of events needing PMC1-4 0x0007_0000_0000_0000
*
* G0..G3 - GRS mux constraints
* 46-47: GRS_L2SEL value
* 44-45: GRS_L3SEL value
* 41-44: GRS_MCSEL value
* 39-40: GRS_FABSEL value
* Note that these match up with their bit positions in MMCR1
*
* T0 - TTM0 constraint
* 36-37: TTM0SEL value (0=FPU, 2=IFU, 3=ISU1) 0x30_0000_0000
*
* T1 - TTM1 constraint
* 34-35: TTM1SEL value (0=IDU, 3=GRS) 0x0c_0000_0000
*
* UC - unit constraint: can't have all three of FPU|IFU|ISU1, ISU0, IDU|GRS
* 33: UC3 error 0x02_0000_0000
* 32: FPU|IFU|ISU1 events needed 0x01_0000_0000
* 31: ISU0 events needed 0x01_8000_0000
* 30: IDU|GRS events needed 0x00_4000_0000
*
* B0
* 24-27: Byte 0 event source 0x0f00_0000
* Encoding as for the event code
*
* B1, B2, B3
* 20-23, 16-19, 12-15: Byte 1, 2, 3 event sources
*
* P6
* 11: P6 error 0x800
* 10-11: Count of events needing PMC6
*
* P1..P5
* 0-9: Count of events needing PMC1..PMC5
*/
static const int grsel_shift[8] = {
MMCR1_GRS_L2SEL_SH, MMCR1_GRS_L2SEL_SH, MMCR1_GRS_L2SEL_SH,
MMCR1_GRS_L3SEL_SH, MMCR1_GRS_L3SEL_SH, MMCR1_GRS_L3SEL_SH,
MMCR1_GRS_MCSEL_SH, MMCR1_GRS_FABSEL_SH
};
/* Masks and values for using events from the various units */
static unsigned long unit_cons[PM_LASTUNIT+1][2] = {
[PM_FPU] = { 0x3200000000ul, 0x0100000000ul },
[PM_ISU0] = { 0x0200000000ul, 0x0080000000ul },
[PM_ISU1] = { 0x3200000000ul, 0x3100000000ul },
[PM_IFU] = { 0x3200000000ul, 0x2100000000ul },
[PM_IDU] = { 0x0e00000000ul, 0x0040000000ul },
[PM_GRS] = { 0x0e00000000ul, 0x0c40000000ul },
};
static int power5p_get_constraint(u64 event, unsigned long *maskp,
unsigned long *valp, u64 event_config1 __maybe_unused)
{
int pmc, byte, unit, sh;
int bit, fmask;
unsigned long mask = 0, value = 0;
pmc = (event >> PM_PMC_SH) & PM_PMC_MSK;
if (pmc) {
if (pmc > 6)
return -1;
sh = (pmc - 1) * 2;
mask |= 2 << sh;
value |= 1 << sh;
if (pmc >= 5 && !(event == 0x500009 || event == 0x600005))
return -1;
}
if (event & PM_BUSEVENT_MSK) {
unit = (event >> PM_UNIT_SH) & PM_UNIT_MSK;
if (unit > PM_LASTUNIT)
return -1;
if (unit == PM_ISU0_ALT)
unit = PM_ISU0;
mask |= unit_cons[unit][0];
value |= unit_cons[unit][1];
byte = (event >> PM_BYTE_SH) & PM_BYTE_MSK;
if (byte >= 4) {
if (unit != PM_LSU1)
return -1;
/* Map LSU1 low word (bytes 4-7) to unit LSU1+1 */
++unit;
byte &= 3;
}
if (unit == PM_GRS) {
bit = event & 7;
fmask = (bit == 6)? 7: 3;
sh = grsel_shift[bit];
mask |= (unsigned long)fmask << sh;
value |= (unsigned long)((event >> PM_GRS_SH) & fmask)
<< sh;
}
/* Set byte lane select field */
mask |= 0xfUL << (24 - 4 * byte);
value |= (unsigned long)unit << (24 - 4 * byte);
}
if (pmc < 5) {
/* need a counter from PMC1-4 set */
mask |= 0x8000000000000ul;
value |= 0x1000000000000ul;
}
*maskp = mask;
*valp = value;
return 0;
}
static int power5p_limited_pmc_event(u64 event)
{
int pmc = (event >> PM_PMC_SH) & PM_PMC_MSK;
return pmc == 5 || pmc == 6;
}
#define MAX_ALT 3 /* at most 3 alternatives for any event */
static const unsigned int event_alternatives[][MAX_ALT] = {
{ 0x100c0, 0x40001f }, /* PM_GCT_FULL_CYC */
{ 0x120e4, 0x400002 }, /* PM_GRP_DISP_REJECT */
{ 0x230e2, 0x323087 }, /* PM_BR_PRED_CR */
{ 0x230e3, 0x223087, 0x3230a0 }, /* PM_BR_PRED_TA */
{ 0x410c7, 0x441084 }, /* PM_THRD_L2MISS_BOTH_CYC */
{ 0x800c4, 0xc20e0 }, /* PM_DTLB_MISS */
{ 0xc50c6, 0xc60e0 }, /* PM_MRK_DTLB_MISS */
{ 0x100005, 0x600005 }, /* PM_RUN_CYC */
{ 0x100009, 0x200009 }, /* PM_INST_CMPL */
{ 0x200015, 0x300015 }, /* PM_LSU_LMQ_SRQ_EMPTY_CYC */
{ 0x300009, 0x400009 }, /* PM_INST_DISP */
};
/*
* Scan the alternatives table for a match and return the
* index into the alternatives table if found, else -1.
*/
static int find_alternative(unsigned int event)
{
int i, j;
for (i = 0; i < ARRAY_SIZE(event_alternatives); ++i) {
if (event < event_alternatives[i][0])
break;
for (j = 0; j < MAX_ALT && event_alternatives[i][j]; ++j)
if (event == event_alternatives[i][j])
return i;
}
return -1;
}
static const unsigned char bytedecode_alternatives[4][4] = {
/* PMC 1 */ { 0x21, 0x23, 0x25, 0x27 },
/* PMC 2 */ { 0x07, 0x17, 0x0e, 0x1e },
/* PMC 3 */ { 0x20, 0x22, 0x24, 0x26 },
/* PMC 4 */ { 0x07, 0x17, 0x0e, 0x1e }
};
/*
* Some direct events for decodes of event bus byte 3 have alternative
* PMCSEL values on other counters. This returns the alternative
* event code for those that do, or -1 otherwise. This also handles
* alternative PCMSEL values for add events.
*/
static s64 find_alternative_bdecode(u64 event)
{
int pmc, altpmc, pp, j;
pmc = (event >> PM_PMC_SH) & PM_PMC_MSK;
if (pmc == 0 || pmc > 4)
return -1;
altpmc = 5 - pmc; /* 1 <-> 4, 2 <-> 3 */
pp = event & PM_PMCSEL_MSK;
for (j = 0; j < 4; ++j) {
if (bytedecode_alternatives[pmc - 1][j] == pp) {
return (event & ~(PM_PMC_MSKS | PM_PMCSEL_MSK)) |
(altpmc << PM_PMC_SH) |
bytedecode_alternatives[altpmc - 1][j];
}
}
/* new decode alternatives for power5+ */
if (pmc == 1 && (pp == 0x0d || pp == 0x0e))
return event + (2 << PM_PMC_SH) + (0x2e - 0x0d);
if (pmc == 3 && (pp == 0x2e || pp == 0x2f))
return event - (2 << PM_PMC_SH) - (0x2e - 0x0d);
/* alternative add event encodings */
if (pp == 0x10 || pp == 0x28)
return ((event ^ (0x10 ^ 0x28)) & ~PM_PMC_MSKS) |
(altpmc << PM_PMC_SH);
return -1;
}
static int power5p_get_alternatives(u64 event, unsigned int flags, u64 alt[])
{
int i, j, nalt = 1;
int nlim;
s64 ae;
alt[0] = event;
nalt = 1;
nlim = power5p_limited_pmc_event(event);
i = find_alternative(event);
if (i >= 0) {
for (j = 0; j < MAX_ALT; ++j) {
ae = event_alternatives[i][j];
if (ae && ae != event)
alt[nalt++] = ae;
nlim += power5p_limited_pmc_event(ae);
}
} else {
ae = find_alternative_bdecode(event);
if (ae > 0)
alt[nalt++] = ae;
}
if (flags & PPMU_ONLY_COUNT_RUN) {
/*
* We're only counting in RUN state,
* so PM_CYC is equivalent to PM_RUN_CYC
* and PM_INST_CMPL === PM_RUN_INST_CMPL.
* This doesn't include alternatives that don't provide
* any extra flexibility in assigning PMCs (e.g.
* 0x100005 for PM_RUN_CYC vs. 0xf for PM_CYC).
* Note that even with these additional alternatives
* we never end up with more than 3 alternatives for any event.
*/
j = nalt;
for (i = 0; i < nalt; ++i) {
switch (alt[i]) {
case 0xf: /* PM_CYC */
alt[j++] = 0x600005; /* PM_RUN_CYC */
++nlim;
break;
case 0x600005: /* PM_RUN_CYC */
alt[j++] = 0xf;
break;
case 0x100009: /* PM_INST_CMPL */
alt[j++] = 0x500009; /* PM_RUN_INST_CMPL */
++nlim;
break;
case 0x500009: /* PM_RUN_INST_CMPL */
alt[j++] = 0x100009; /* PM_INST_CMPL */
alt[j++] = 0x200009;
break;
}
}
nalt = j;
}
if (!(flags & PPMU_LIMITED_PMC_OK) && nlim) {
/* remove the limited PMC events */
j = 0;
for (i = 0; i < nalt; ++i) {
if (!power5p_limited_pmc_event(alt[i])) {
alt[j] = alt[i];
++j;
}
}
nalt = j;
} else if ((flags & PPMU_LIMITED_PMC_REQD) && nlim < nalt) {
/* remove all but the limited PMC events */
j = 0;
for (i = 0; i < nalt; ++i) {
if (power5p_limited_pmc_event(alt[i])) {
alt[j] = alt[i];
++j;
}
}
nalt = j;
}
return nalt;
}
/*
* Map of which direct events on which PMCs are marked instruction events.
* Indexed by PMCSEL value, bit i (LE) set if PMC i is a marked event.
* Bit 0 is set if it is marked for all PMCs.
* The 0x80 bit indicates a byte decode PMCSEL value.
*/
static unsigned char direct_event_is_marked[0x28] = {
0, /* 00 */
0x1f, /* 01 PM_IOPS_CMPL */
0x2, /* 02 PM_MRK_GRP_DISP */
0xe, /* 03 PM_MRK_ST_CMPL, PM_MRK_ST_GPS, PM_MRK_ST_CMPL_INT */
0, /* 04 */
0x1c, /* 05 PM_MRK_BRU_FIN, PM_MRK_INST_FIN, PM_MRK_CRU_FIN */
0x80, /* 06 */
0x80, /* 07 */
0, 0, 0,/* 08 - 0a */
0x18, /* 0b PM_THRESH_TIMEO, PM_MRK_GRP_TIMEO */
0, /* 0c */
0x80, /* 0d */
0x80, /* 0e */
0, /* 0f */
0, /* 10 */
0x14, /* 11 PM_MRK_GRP_BR_REDIR, PM_MRK_GRP_IC_MISS */
0, /* 12 */
0x10, /* 13 PM_MRK_GRP_CMPL */
0x1f, /* 14 PM_GRP_MRK, PM_MRK_{FXU,FPU,LSU}_FIN */
0x2, /* 15 PM_MRK_GRP_ISSUED */
0x80, /* 16 */
0x80, /* 17 */
0, 0, 0, 0, 0,
0x80, /* 1d */
0x80, /* 1e */
0, /* 1f */
0x80, /* 20 */
0x80, /* 21 */
0x80, /* 22 */
0x80, /* 23 */
0x80, /* 24 */
0x80, /* 25 */
0x80, /* 26 */
0x80, /* 27 */
};
/*
* Returns 1 if event counts things relating to marked instructions
* and thus needs the MMCRA_SAMPLE_ENABLE bit set, or 0 if not.
*/
static int power5p_marked_instr_event(u64 event)
{
int pmc, psel;
int bit, byte, unit;
u32 mask;
pmc = (event >> PM_PMC_SH) & PM_PMC_MSK;
psel = event & PM_PMCSEL_MSK;
if (pmc >= 5)
return 0;
bit = -1;
if (psel < sizeof(direct_event_is_marked)) {
if (direct_event_is_marked[psel] & (1 << pmc))
return 1;
if (direct_event_is_marked[psel] & 0x80)
bit = 4;
else if (psel == 0x08)
bit = pmc - 1;
else if (psel == 0x10)
bit = 4 - pmc;
else if (psel == 0x1b && (pmc == 1 || pmc == 3))
bit = 4;
} else if ((psel & 0x48) == 0x40) {
bit = psel & 7;
} else if (psel == 0x28) {
bit = pmc - 1;
} else if (pmc == 3 && (psel == 0x2e || psel == 0x2f)) {
bit = 4;
}
if (!(event & PM_BUSEVENT_MSK) || bit == -1)
return 0;
byte = (event >> PM_BYTE_SH) & PM_BYTE_MSK;
unit = (event >> PM_UNIT_SH) & PM_UNIT_MSK;
if (unit == PM_LSU0) {
/* byte 1 bits 0-7, byte 2 bits 0,2-4,6 */
mask = 0x5dff00;
} else if (unit == PM_LSU1 && byte >= 4) {
byte -= 4;
/* byte 5 bits 6-7, byte 6 bits 0,4, byte 7 bits 0-4,6 */
mask = 0x5f11c000;
} else
return 0;
return (mask >> (byte * 8 + bit)) & 1;
}
static int power5p_compute_mmcr(u64 event[], int n_ev,
unsigned int hwc[], struct mmcr_regs *mmcr,
struct perf_event *pevents[],
u32 flags __maybe_unused)
{
unsigned long mmcr1 = 0;
unsigned long mmcra = 0;
unsigned int pmc, unit, byte, psel;
unsigned int ttm;
int i, isbus, bit, grsel;
unsigned int pmc_inuse = 0;
unsigned char busbyte[4];
unsigned char unituse[16];
int ttmuse;
if (n_ev > 6)
return -1;
/* First pass to count resource use */
memset(busbyte, 0, sizeof(busbyte));
memset(unituse, 0, sizeof(unituse));
for (i = 0; i < n_ev; ++i) {
pmc = (event[i] >> PM_PMC_SH) & PM_PMC_MSK;
if (pmc) {
if (pmc > 6)
return -1;
if (pmc_inuse & (1 << (pmc - 1)))
return -1;
pmc_inuse |= 1 << (pmc - 1);
}
if (event[i] & PM_BUSEVENT_MSK) {
unit = (event[i] >> PM_UNIT_SH) & PM_UNIT_MSK;
byte = (event[i] >> PM_BYTE_SH) & PM_BYTE_MSK;
if (unit > PM_LASTUNIT)
return -1;
if (unit == PM_ISU0_ALT)
unit = PM_ISU0;
if (byte >= 4) {
if (unit != PM_LSU1)
return -1;
++unit;
byte &= 3;
}
if (busbyte[byte] && busbyte[byte] != unit)
return -1;
busbyte[byte] = unit;
unituse[unit] = 1;
}
}
/*
* Assign resources and set multiplexer selects.
*
* PM_ISU0 can go either on TTM0 or TTM1, but that's the only
* choice we have to deal with.
*/
if (unituse[PM_ISU0] &
(unituse[PM_FPU] | unituse[PM_IFU] | unituse[PM_ISU1])) {
unituse[PM_ISU0_ALT] = 1; /* move ISU to TTM1 */
unituse[PM_ISU0] = 0;
}
/* Set TTM[01]SEL fields. */
ttmuse = 0;
for (i = PM_FPU; i <= PM_ISU1; ++i) {
if (!unituse[i])
continue;
if (ttmuse++)
return -1;
mmcr1 |= (unsigned long)i << MMCR1_TTM0SEL_SH;
}
ttmuse = 0;
for (; i <= PM_GRS; ++i) {
if (!unituse[i])
continue;
if (ttmuse++)
return -1;
mmcr1 |= (unsigned long)(i & 3) << MMCR1_TTM1SEL_SH;
}
if (ttmuse > 1)
return -1;
/* Set byte lane select fields, TTM[23]SEL and GRS_*SEL. */
for (byte = 0; byte < 4; ++byte) {
unit = busbyte[byte];
if (!unit)
continue;
if (unit == PM_ISU0 && unituse[PM_ISU0_ALT]) {
/* get ISU0 through TTM1 rather than TTM0 */
unit = PM_ISU0_ALT;
} else if (unit == PM_LSU1 + 1) {
/* select lower word of LSU1 for this byte */
mmcr1 |= 1ul << (MMCR1_TTM3SEL_SH + 3 - byte);
}
ttm = unit >> 2;
mmcr1 |= (unsigned long)ttm
<< (MMCR1_TD_CP_DBG0SEL_SH - 2 * byte);
}
/* Second pass: assign PMCs, set PMCxSEL and PMCx_ADDER_SEL fields */
for (i = 0; i < n_ev; ++i) {
pmc = (event[i] >> PM_PMC_SH) & PM_PMC_MSK;
unit = (event[i] >> PM_UNIT_SH) & PM_UNIT_MSK;
byte = (event[i] >> PM_BYTE_SH) & PM_BYTE_MSK;
psel = event[i] & PM_PMCSEL_MSK;
isbus = event[i] & PM_BUSEVENT_MSK;
if (!pmc) {
/* Bus event or any-PMC direct event */
for (pmc = 0; pmc < 4; ++pmc) {
if (!(pmc_inuse & (1 << pmc)))
break;
}
if (pmc >= 4)
return -1;
pmc_inuse |= 1 << pmc;
} else if (pmc <= 4) {
/* Direct event */
--pmc;
if (isbus && (byte & 2) &&
(psel == 8 || psel == 0x10 || psel == 0x28))
/* add events on higher-numbered bus */
mmcr1 |= 1ul << (MMCR1_PMC1_ADDER_SEL_SH - pmc);
} else {
/* Instructions or run cycles on PMC5/6 */
--pmc;
}
if (isbus && unit == PM_GRS) {
bit = psel & 7;
grsel = (event[i] >> PM_GRS_SH) & PM_GRS_MSK;
mmcr1 |= (unsigned long)grsel << grsel_shift[bit];
}
if (power5p_marked_instr_event(event[i]))
mmcra |= MMCRA_SAMPLE_ENABLE;
if ((psel & 0x58) == 0x40 && (byte & 1) != ((pmc >> 1) & 1))
/* select alternate byte lane */
psel |= 0x10;
if (pmc <= 3)
mmcr1 |= psel << MMCR1_PMCSEL_SH(pmc);
hwc[i] = pmc;
}
/* Return MMCRx values */
mmcr->mmcr0 = 0;
if (pmc_inuse & 1)
mmcr->mmcr0 = MMCR0_PMC1CE;
if (pmc_inuse & 0x3e)
mmcr->mmcr0 |= MMCR0_PMCjCE;
mmcr->mmcr1 = mmcr1;
mmcr->mmcra = mmcra;
return 0;
}
static void power5p_disable_pmc(unsigned int pmc, struct mmcr_regs *mmcr)
{
if (pmc <= 3)
mmcr->mmcr1 &= ~(0x7fUL << MMCR1_PMCSEL_SH(pmc));
}
static int power5p_generic_events[] = {
[PERF_COUNT_HW_CPU_CYCLES] = 0xf,
[PERF_COUNT_HW_INSTRUCTIONS] = 0x100009,
[PERF_COUNT_HW_CACHE_REFERENCES] = 0x1c10a8, /* LD_REF_L1 */
[PERF_COUNT_HW_CACHE_MISSES] = 0x3c1088, /* LD_MISS_L1 */
[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x230e4, /* BR_ISSUED */
[PERF_COUNT_HW_BRANCH_MISSES] = 0x230e5, /* BR_MPRED_CR */
};
#define C(x) PERF_COUNT_HW_CACHE_##x
/*
* Table of generalized cache-related events.
* 0 means not supported, -1 means nonsensical, other values
* are event codes.
*/
static u64 power5p_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
[C(L1D)] = { /* RESULT_ACCESS RESULT_MISS */
[C(OP_READ)] = { 0x1c10a8, 0x3c1088 },
[C(OP_WRITE)] = { 0x2c10a8, 0xc10c3 },
[C(OP_PREFETCH)] = { 0xc70e7, -1 },
},
[C(L1I)] = { /* RESULT_ACCESS RESULT_MISS */
[C(OP_READ)] = { 0, 0 },
[C(OP_WRITE)] = { -1, -1 },
[C(OP_PREFETCH)] = { 0, 0 },
},
[C(LL)] = { /* RESULT_ACCESS RESULT_MISS */
[C(OP_READ)] = { 0, 0 },
[C(OP_WRITE)] = { 0, 0 },
[C(OP_PREFETCH)] = { 0xc50c3, 0 },
},
[C(DTLB)] = { /* RESULT_ACCESS RESULT_MISS */
[C(OP_READ)] = { 0xc20e4, 0x800c4 },
[C(OP_WRITE)] = { -1, -1 },
[C(OP_PREFETCH)] = { -1, -1 },
},
[C(ITLB)] = { /* RESULT_ACCESS RESULT_MISS */
[C(OP_READ)] = { 0, 0x800c0 },
[C(OP_WRITE)] = { -1, -1 },
[C(OP_PREFETCH)] = { -1, -1 },
},
[C(BPU)] = { /* RESULT_ACCESS RESULT_MISS */
[C(OP_READ)] = { 0x230e4, 0x230e5 },
[C(OP_WRITE)] = { -1, -1 },
[C(OP_PREFETCH)] = { -1, -1 },
},
[C(NODE)] = { /* RESULT_ACCESS RESULT_MISS */
[C(OP_READ)] = { -1, -1 },
[C(OP_WRITE)] = { -1, -1 },
[C(OP_PREFETCH)] = { -1, -1 },
},
};
static struct power_pmu power5p_pmu = {
.name = "POWER5+/++",
.n_counter = 6,
.max_alternatives = MAX_ALT,
.add_fields = 0x7000000000055ul,
.test_adder = 0x3000040000000ul,
.compute_mmcr = power5p_compute_mmcr,
.get_constraint = power5p_get_constraint,
.get_alternatives = power5p_get_alternatives,
.disable_pmc = power5p_disable_pmc,
.limited_pmc_event = power5p_limited_pmc_event,
.flags = PPMU_LIMITED_PMC5_6 | PPMU_HAS_SSLOT,
.n_generic = ARRAY_SIZE(power5p_generic_events),
.generic_events = power5p_generic_events,
.cache_events = &power5p_cache_events,
};
int __init init_power5p_pmu(void)
{
unsigned int pvr = mfspr(SPRN_PVR);
if (PVR_VER(pvr) != PVR_POWER5p)
return -ENODEV;
return register_power_pmu(&power5p_pmu);
}
| linux-master | arch/powerpc/perf/power5+-pmu.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Performance counter callchain support - powerpc architecture code
*
* Copyright © 2009 Paul Mackerras, IBM Corporation.
*/
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/perf_event.h>
#include <linux/percpu.h>
#include <linux/uaccess.h>
#include <linux/mm.h>
#include <asm/ptrace.h>
#include <asm/sigcontext.h>
#include <asm/ucontext.h>
#include <asm/vdso.h>
#include <asm/pte-walk.h>
#include "callchain.h"
static int read_user_stack_64(const unsigned long __user *ptr, unsigned long *ret)
{
return __read_user_stack(ptr, ret, sizeof(*ret));
}
/*
* 64-bit user processes use the same stack frame for RT and non-RT signals.
*/
struct signal_frame_64 {
char dummy[__SIGNAL_FRAMESIZE];
struct ucontext uc;
unsigned long unused[2];
unsigned int tramp[6];
struct siginfo *pinfo;
void *puc;
struct siginfo info;
char abigap[288];
};
static int is_sigreturn_64_address(unsigned long nip, unsigned long fp)
{
if (nip == fp + offsetof(struct signal_frame_64, tramp))
return 1;
if (current->mm->context.vdso &&
nip == VDSO64_SYMBOL(current->mm->context.vdso, sigtramp_rt64))
return 1;
return 0;
}
/*
* Do some sanity checking on the signal frame pointed to by sp.
* We check the pinfo and puc pointers in the frame.
*/
static int sane_signal_64_frame(unsigned long sp)
{
struct signal_frame_64 __user *sf;
unsigned long pinfo, puc;
sf = (struct signal_frame_64 __user *) sp;
if (read_user_stack_64((unsigned long __user *) &sf->pinfo, &pinfo) ||
read_user_stack_64((unsigned long __user *) &sf->puc, &puc))
return 0;
return pinfo == (unsigned long) &sf->info &&
puc == (unsigned long) &sf->uc;
}
void perf_callchain_user_64(struct perf_callchain_entry_ctx *entry,
struct pt_regs *regs)
{
unsigned long sp, next_sp;
unsigned long next_ip;
unsigned long lr;
long level = 0;
struct signal_frame_64 __user *sigframe;
unsigned long __user *fp, *uregs;
next_ip = perf_instruction_pointer(regs);
lr = regs->link;
sp = regs->gpr[1];
perf_callchain_store(entry, next_ip);
while (entry->nr < entry->max_stack) {
fp = (unsigned long __user *) sp;
if (invalid_user_sp(sp) || read_user_stack_64(fp, &next_sp))
return;
if (level > 0 && read_user_stack_64(&fp[2], &next_ip))
return;
/*
* Note: the next_sp - sp >= signal frame size check
* is true when next_sp < sp, which can happen when
* transitioning from an alternate signal stack to the
* normal stack.
*/
if (next_sp - sp >= sizeof(struct signal_frame_64) &&
(is_sigreturn_64_address(next_ip, sp) ||
(level <= 1 && is_sigreturn_64_address(lr, sp))) &&
sane_signal_64_frame(sp)) {
/*
* This looks like an signal frame
*/
sigframe = (struct signal_frame_64 __user *) sp;
uregs = sigframe->uc.uc_mcontext.gp_regs;
if (read_user_stack_64(&uregs[PT_NIP], &next_ip) ||
read_user_stack_64(&uregs[PT_LNK], &lr) ||
read_user_stack_64(&uregs[PT_R1], &sp))
return;
level = 0;
perf_callchain_store_context(entry, PERF_CONTEXT_USER);
perf_callchain_store(entry, next_ip);
continue;
}
if (level == 0)
next_ip = lr;
perf_callchain_store(entry, next_ip);
++level;
sp = next_sp;
}
}
| linux-master | arch/powerpc/perf/callchain_64.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Performance counter support for POWER9 processors.
*
* Copyright 2009 Paul Mackerras, IBM Corporation.
* Copyright 2013 Michael Ellerman, IBM Corporation.
* Copyright 2016 Madhavan Srinivasan, IBM Corporation.
*/
#define pr_fmt(fmt) "power9-pmu: " fmt
#include "isa207-common.h"
/*
* Raw event encoding for Power9:
*
* 60 56 52 48 44 40 36 32
* | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - |
* | | [ ] [ ] [ thresh_cmp ] [ thresh_ctl ]
* | | | | |
* | | *- IFM (Linux) | thresh start/stop -*
* | *- BHRB (Linux) *sm
* *- EBB (Linux)
*
* 28 24 20 16 12 8 4 0
* | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - |
* [ ] [ sample ] [cache] [ pmc ] [unit ] [] m [ pmcxsel ]
* | | | | |
* | | | | *- mark
* | | *- L1/L2/L3 cache_sel |
* | | |
* | *- sampling mode for marked events *- combine
* |
* *- thresh_sel
*
* Below uses IBM bit numbering.
*
* MMCR1[x:y] = unit (PMCxUNIT)
* MMCR1[24] = pmc1combine[0]
* MMCR1[25] = pmc1combine[1]
* MMCR1[26] = pmc2combine[0]
* MMCR1[27] = pmc2combine[1]
* MMCR1[28] = pmc3combine[0]
* MMCR1[29] = pmc3combine[1]
* MMCR1[30] = pmc4combine[0]
* MMCR1[31] = pmc4combine[1]
*
* if pmc == 3 and unit == 0 and pmcxsel[0:6] == 0b0101011
* MMCR1[20:27] = thresh_ctl
* else if pmc == 4 and unit == 0xf and pmcxsel[0:6] == 0b0101001
* MMCR1[20:27] = thresh_ctl
* else
* MMCRA[48:55] = thresh_ctl (THRESH START/END)
*
* if thresh_sel:
* MMCRA[45:47] = thresh_sel
*
* if thresh_cmp:
* MMCRA[9:11] = thresh_cmp[0:2]
* MMCRA[12:18] = thresh_cmp[3:9]
*
* MMCR1[16] = cache_sel[2]
* MMCR1[17] = cache_sel[3]
*
* if mark:
* MMCRA[63] = 1 (SAMPLE_ENABLE)
* MMCRA[57:59] = sample[0:2] (RAND_SAMP_ELIG)
* MMCRA[61:62] = sample[3:4] (RAND_SAMP_MODE)
*
* if EBB and BHRB:
* MMCRA[32:33] = IFM
*
* MMCRA[SDAR_MODE] = sm
*/
/*
* Some power9 event codes.
*/
#define EVENT(_name, _code) _name = _code,
enum {
#include "power9-events-list.h"
};
#undef EVENT
/* MMCRA IFM bits - POWER9 */
#define POWER9_MMCRA_IFM1 0x0000000040000000UL
#define POWER9_MMCRA_IFM2 0x0000000080000000UL
#define POWER9_MMCRA_IFM3 0x00000000C0000000UL
#define POWER9_MMCRA_BHRB_MASK 0x00000000C0000000UL
extern u64 PERF_REG_EXTENDED_MASK;
/* Nasty Power9 specific hack */
#define PVR_POWER9_CUMULUS 0x00002000
/* PowerISA v2.07 format attribute structure*/
extern const struct attribute_group isa207_pmu_format_group;
static int p9_dd21_bl_ev[] = {
PM_MRK_ST_DONE_L2,
PM_RADIX_PWC_L1_HIT,
PM_FLOP_CMPL,
PM_MRK_NTF_FIN,
PM_RADIX_PWC_L2_HIT,
PM_IFETCH_THROTTLE,
PM_MRK_L2_TM_ST_ABORT_SISTER,
PM_RADIX_PWC_L3_HIT,
PM_RUN_CYC_SMT2_MODE,
PM_TM_TX_PASS_RUN_INST,
PM_DISP_HELD_SYNC_HOLD,
};
static int p9_dd22_bl_ev[] = {
PM_DTLB_MISS_16G,
PM_DERAT_MISS_2M,
PM_DTLB_MISS_2M,
PM_MRK_DTLB_MISS_1G,
PM_DTLB_MISS_4K,
PM_DERAT_MISS_1G,
PM_MRK_DERAT_MISS_2M,
PM_MRK_DTLB_MISS_4K,
PM_MRK_DTLB_MISS_16G,
PM_DTLB_MISS_64K,
PM_MRK_DERAT_MISS_1G,
PM_MRK_DTLB_MISS_64K,
PM_DISP_HELD_SYNC_HOLD,
PM_DTLB_MISS_16M,
PM_DTLB_MISS_1G,
PM_MRK_DTLB_MISS_16M,
};
/* Table of alternatives, sorted by column 0 */
static const unsigned int power9_event_alternatives[][MAX_ALT] = {
{ PM_BR_2PATH, PM_BR_2PATH_ALT },
{ PM_INST_DISP, PM_INST_DISP_ALT },
{ PM_RUN_CYC_ALT, PM_RUN_CYC },
{ PM_LD_MISS_L1, PM_LD_MISS_L1_ALT },
{ PM_RUN_INST_CMPL_ALT, PM_RUN_INST_CMPL },
};
static int power9_get_alternatives(u64 event, unsigned int flags, u64 alt[])
{
int num_alt = 0;
num_alt = isa207_get_alternatives(event, alt,
ARRAY_SIZE(power9_event_alternatives), flags,
power9_event_alternatives);
return num_alt;
}
static int power9_check_attr_config(struct perf_event *ev)
{
u64 val;
u64 event = ev->attr.config;
val = (event >> EVENT_SAMPLE_SHIFT) & EVENT_SAMPLE_MASK;
if (val == 0xC || isa3XX_check_attr_config(ev))
return -EINVAL;
return 0;
}
GENERIC_EVENT_ATTR(cpu-cycles, PM_CYC);
GENERIC_EVENT_ATTR(stalled-cycles-frontend, PM_ICT_NOSLOT_CYC);
GENERIC_EVENT_ATTR(stalled-cycles-backend, PM_CMPLU_STALL);
GENERIC_EVENT_ATTR(instructions, PM_INST_CMPL);
GENERIC_EVENT_ATTR(branch-instructions, PM_BR_CMPL);
GENERIC_EVENT_ATTR(branch-misses, PM_BR_MPRED_CMPL);
GENERIC_EVENT_ATTR(cache-references, PM_LD_REF_L1);
GENERIC_EVENT_ATTR(cache-misses, PM_LD_MISS_L1_FIN);
GENERIC_EVENT_ATTR(mem-loads, MEM_LOADS);
GENERIC_EVENT_ATTR(mem-stores, MEM_STORES);
CACHE_EVENT_ATTR(L1-dcache-load-misses, PM_LD_MISS_L1_FIN);
CACHE_EVENT_ATTR(L1-dcache-loads, PM_LD_REF_L1);
CACHE_EVENT_ATTR(L1-dcache-prefetches, PM_L1_PREF);
CACHE_EVENT_ATTR(L1-dcache-store-misses, PM_ST_MISS_L1);
CACHE_EVENT_ATTR(L1-icache-load-misses, PM_L1_ICACHE_MISS);
CACHE_EVENT_ATTR(L1-icache-loads, PM_INST_FROM_L1);
CACHE_EVENT_ATTR(L1-icache-prefetches, PM_IC_PREF_WRITE);
CACHE_EVENT_ATTR(LLC-load-misses, PM_DATA_FROM_L3MISS);
CACHE_EVENT_ATTR(LLC-loads, PM_DATA_FROM_L3);
CACHE_EVENT_ATTR(LLC-prefetches, PM_L3_PREF_ALL);
CACHE_EVENT_ATTR(branch-load-misses, PM_BR_MPRED_CMPL);
CACHE_EVENT_ATTR(branch-loads, PM_BR_CMPL);
CACHE_EVENT_ATTR(dTLB-load-misses, PM_DTLB_MISS);
CACHE_EVENT_ATTR(iTLB-load-misses, PM_ITLB_MISS);
static struct attribute *power9_events_attr[] = {
GENERIC_EVENT_PTR(PM_CYC),
GENERIC_EVENT_PTR(PM_ICT_NOSLOT_CYC),
GENERIC_EVENT_PTR(PM_CMPLU_STALL),
GENERIC_EVENT_PTR(PM_INST_CMPL),
GENERIC_EVENT_PTR(PM_BR_CMPL),
GENERIC_EVENT_PTR(PM_BR_MPRED_CMPL),
GENERIC_EVENT_PTR(PM_LD_REF_L1),
GENERIC_EVENT_PTR(PM_LD_MISS_L1_FIN),
GENERIC_EVENT_PTR(MEM_LOADS),
GENERIC_EVENT_PTR(MEM_STORES),
CACHE_EVENT_PTR(PM_LD_MISS_L1_FIN),
CACHE_EVENT_PTR(PM_LD_REF_L1),
CACHE_EVENT_PTR(PM_L1_PREF),
CACHE_EVENT_PTR(PM_ST_MISS_L1),
CACHE_EVENT_PTR(PM_L1_ICACHE_MISS),
CACHE_EVENT_PTR(PM_INST_FROM_L1),
CACHE_EVENT_PTR(PM_IC_PREF_WRITE),
CACHE_EVENT_PTR(PM_DATA_FROM_L3MISS),
CACHE_EVENT_PTR(PM_DATA_FROM_L3),
CACHE_EVENT_PTR(PM_L3_PREF_ALL),
CACHE_EVENT_PTR(PM_BR_MPRED_CMPL),
CACHE_EVENT_PTR(PM_BR_CMPL),
CACHE_EVENT_PTR(PM_DTLB_MISS),
CACHE_EVENT_PTR(PM_ITLB_MISS),
NULL
};
static const struct attribute_group power9_pmu_events_group = {
.name = "events",
.attrs = power9_events_attr,
};
PMU_FORMAT_ATTR(event, "config:0-51");
PMU_FORMAT_ATTR(pmcxsel, "config:0-7");
PMU_FORMAT_ATTR(mark, "config:8");
PMU_FORMAT_ATTR(combine, "config:10-11");
PMU_FORMAT_ATTR(unit, "config:12-15");
PMU_FORMAT_ATTR(pmc, "config:16-19");
PMU_FORMAT_ATTR(cache_sel, "config:20-23");
PMU_FORMAT_ATTR(sample_mode, "config:24-28");
PMU_FORMAT_ATTR(thresh_sel, "config:29-31");
PMU_FORMAT_ATTR(thresh_stop, "config:32-35");
PMU_FORMAT_ATTR(thresh_start, "config:36-39");
PMU_FORMAT_ATTR(thresh_cmp, "config:40-49");
PMU_FORMAT_ATTR(sdar_mode, "config:50-51");
static struct attribute *power9_pmu_format_attr[] = {
&format_attr_event.attr,
&format_attr_pmcxsel.attr,
&format_attr_mark.attr,
&format_attr_combine.attr,
&format_attr_unit.attr,
&format_attr_pmc.attr,
&format_attr_cache_sel.attr,
&format_attr_sample_mode.attr,
&format_attr_thresh_sel.attr,
&format_attr_thresh_stop.attr,
&format_attr_thresh_start.attr,
&format_attr_thresh_cmp.attr,
&format_attr_sdar_mode.attr,
NULL,
};
static const struct attribute_group power9_pmu_format_group = {
.name = "format",
.attrs = power9_pmu_format_attr,
};
static struct attribute *power9_pmu_caps_attrs[] = {
NULL
};
static struct attribute_group power9_pmu_caps_group = {
.name = "caps",
.attrs = power9_pmu_caps_attrs,
};
static const struct attribute_group *power9_pmu_attr_groups[] = {
&power9_pmu_format_group,
&power9_pmu_events_group,
&power9_pmu_caps_group,
NULL,
};
static int power9_generic_events[] = {
[PERF_COUNT_HW_CPU_CYCLES] = PM_CYC,
[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = PM_ICT_NOSLOT_CYC,
[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = PM_CMPLU_STALL,
[PERF_COUNT_HW_INSTRUCTIONS] = PM_INST_CMPL,
[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = PM_BR_CMPL,
[PERF_COUNT_HW_BRANCH_MISSES] = PM_BR_MPRED_CMPL,
[PERF_COUNT_HW_CACHE_REFERENCES] = PM_LD_REF_L1,
[PERF_COUNT_HW_CACHE_MISSES] = PM_LD_MISS_L1_FIN,
};
static u64 power9_bhrb_filter_map(u64 branch_sample_type)
{
u64 pmu_bhrb_filter = 0;
/* BHRB and regular PMU events share the same privilege state
* filter configuration. BHRB is always recorded along with a
* regular PMU event. As the privilege state filter is handled
* in the basic PMC configuration of the accompanying regular
* PMU event, we ignore any separate BHRB specific request.
*/
/* No branch filter requested */
if (branch_sample_type & PERF_SAMPLE_BRANCH_ANY)
return pmu_bhrb_filter;
/* Invalid branch filter options - HW does not support */
if (branch_sample_type & PERF_SAMPLE_BRANCH_ANY_RETURN)
return -1;
if (branch_sample_type & PERF_SAMPLE_BRANCH_IND_CALL)
return -1;
if (branch_sample_type & PERF_SAMPLE_BRANCH_CALL)
return -1;
if (branch_sample_type & PERF_SAMPLE_BRANCH_ANY_CALL) {
pmu_bhrb_filter |= POWER9_MMCRA_IFM1;
return pmu_bhrb_filter;
}
/* Every thing else is unsupported */
return -1;
}
static void power9_config_bhrb(u64 pmu_bhrb_filter)
{
pmu_bhrb_filter &= POWER9_MMCRA_BHRB_MASK;
/* Enable BHRB filter in PMU */
mtspr(SPRN_MMCRA, (mfspr(SPRN_MMCRA) | pmu_bhrb_filter));
}
#define C(x) PERF_COUNT_HW_CACHE_##x
/*
* Table of generalized cache-related events.
* 0 means not supported, -1 means nonsensical, other values
* are event codes.
*/
static u64 power9_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
[ C(L1D) ] = {
[ C(OP_READ) ] = {
[ C(RESULT_ACCESS) ] = PM_LD_REF_L1,
[ C(RESULT_MISS) ] = PM_LD_MISS_L1_FIN,
},
[ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = 0,
[ C(RESULT_MISS) ] = PM_ST_MISS_L1,
},
[ C(OP_PREFETCH) ] = {
[ C(RESULT_ACCESS) ] = PM_L1_PREF,
[ C(RESULT_MISS) ] = 0,
},
},
[ C(L1I) ] = {
[ C(OP_READ) ] = {
[ C(RESULT_ACCESS) ] = PM_INST_FROM_L1,
[ C(RESULT_MISS) ] = PM_L1_ICACHE_MISS,
},
[ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = PM_L1_DEMAND_WRITE,
[ C(RESULT_MISS) ] = -1,
},
[ C(OP_PREFETCH) ] = {
[ C(RESULT_ACCESS) ] = PM_IC_PREF_WRITE,
[ C(RESULT_MISS) ] = 0,
},
},
[ C(LL) ] = {
[ C(OP_READ) ] = {
[ C(RESULT_ACCESS) ] = PM_DATA_FROM_L3,
[ C(RESULT_MISS) ] = PM_DATA_FROM_L3MISS,
},
[ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = 0,
[ C(RESULT_MISS) ] = 0,
},
[ C(OP_PREFETCH) ] = {
[ C(RESULT_ACCESS) ] = PM_L3_PREF_ALL,
[ C(RESULT_MISS) ] = 0,
},
},
[ C(DTLB) ] = {
[ C(OP_READ) ] = {
[ C(RESULT_ACCESS) ] = 0,
[ C(RESULT_MISS) ] = PM_DTLB_MISS,
},
[ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = -1,
[ C(RESULT_MISS) ] = -1,
},
[ C(OP_PREFETCH) ] = {
[ C(RESULT_ACCESS) ] = -1,
[ C(RESULT_MISS) ] = -1,
},
},
[ C(ITLB) ] = {
[ C(OP_READ) ] = {
[ C(RESULT_ACCESS) ] = 0,
[ C(RESULT_MISS) ] = PM_ITLB_MISS,
},
[ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = -1,
[ C(RESULT_MISS) ] = -1,
},
[ C(OP_PREFETCH) ] = {
[ C(RESULT_ACCESS) ] = -1,
[ C(RESULT_MISS) ] = -1,
},
},
[ C(BPU) ] = {
[ C(OP_READ) ] = {
[ C(RESULT_ACCESS) ] = PM_BR_CMPL,
[ C(RESULT_MISS) ] = PM_BR_MPRED_CMPL,
},
[ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = -1,
[ C(RESULT_MISS) ] = -1,
},
[ C(OP_PREFETCH) ] = {
[ C(RESULT_ACCESS) ] = -1,
[ C(RESULT_MISS) ] = -1,
},
},
[ C(NODE) ] = {
[ C(OP_READ) ] = {
[ C(RESULT_ACCESS) ] = -1,
[ C(RESULT_MISS) ] = -1,
},
[ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = -1,
[ C(RESULT_MISS) ] = -1,
},
[ C(OP_PREFETCH) ] = {
[ C(RESULT_ACCESS) ] = -1,
[ C(RESULT_MISS) ] = -1,
},
},
};
#undef C
static struct power_pmu power9_pmu = {
.name = "POWER9",
.n_counter = MAX_PMU_COUNTERS,
.add_fields = ISA207_ADD_FIELDS,
.test_adder = ISA207_TEST_ADDER,
.group_constraint_mask = CNST_CACHE_PMC4_MASK,
.group_constraint_val = CNST_CACHE_PMC4_VAL,
.compute_mmcr = isa207_compute_mmcr,
.config_bhrb = power9_config_bhrb,
.bhrb_filter_map = power9_bhrb_filter_map,
.get_constraint = isa207_get_constraint,
.get_alternatives = power9_get_alternatives,
.get_mem_data_src = isa207_get_mem_data_src,
.get_mem_weight = isa207_get_mem_weight,
.disable_pmc = isa207_disable_pmc,
.flags = PPMU_HAS_SIER | PPMU_ARCH_207S,
.n_generic = ARRAY_SIZE(power9_generic_events),
.generic_events = power9_generic_events,
.cache_events = &power9_cache_events,
.attr_groups = power9_pmu_attr_groups,
.bhrb_nr = 32,
.capabilities = PERF_PMU_CAP_EXTENDED_REGS,
.check_attr_config = power9_check_attr_config,
};
int __init init_power9_pmu(void)
{
int rc = 0;
unsigned int pvr = mfspr(SPRN_PVR);
if (PVR_VER(pvr) != PVR_POWER9)
return -ENODEV;
/* Blacklist events */
if (!(pvr & PVR_POWER9_CUMULUS)) {
if ((PVR_CFG(pvr) == 2) && (PVR_MIN(pvr) == 1)) {
power9_pmu.blacklist_ev = p9_dd21_bl_ev;
power9_pmu.n_blacklist_ev = ARRAY_SIZE(p9_dd21_bl_ev);
} else if ((PVR_CFG(pvr) == 2) && (PVR_MIN(pvr) == 2)) {
power9_pmu.blacklist_ev = p9_dd22_bl_ev;
power9_pmu.n_blacklist_ev = ARRAY_SIZE(p9_dd22_bl_ev);
}
}
/* Set the PERF_REG_EXTENDED_MASK here */
PERF_REG_EXTENDED_MASK = PERF_REG_PMU_MASK_300;
rc = register_power_pmu(&power9_pmu);
if (rc)
return rc;
/* Tell userspace that EBB is supported */
cur_cpu_spec->cpu_user_features2 |= PPC_FEATURE2_EBB;
return 0;
}
| linux-master | arch/powerpc/perf/power9-pmu.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* PPC64 code to handle Linux booting another kernel.
*
* Copyright (C) 2004-2005, IBM Corp.
*
* Created by: Milton D Miller II
*/
#include <linux/kexec.h>
#include <linux/smp.h>
#include <linux/thread_info.h>
#include <linux/init_task.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/cpu.h>
#include <linux/hardirq.h>
#include <linux/of.h>
#include <asm/page.h>
#include <asm/current.h>
#include <asm/machdep.h>
#include <asm/cacheflush.h>
#include <asm/firmware.h>
#include <asm/paca.h>
#include <asm/mmu.h>
#include <asm/sections.h> /* _end */
#include <asm/smp.h>
#include <asm/hw_breakpoint.h>
#include <asm/svm.h>
#include <asm/ultravisor.h>
int machine_kexec_prepare(struct kimage *image)
{
int i;
unsigned long begin, end; /* limits of segment */
unsigned long low, high; /* limits of blocked memory range */
struct device_node *node;
const unsigned long *basep;
const unsigned int *sizep;
/*
* Since we use the kernel fault handlers and paging code to
* handle the virtual mode, we must make sure no destination
* overlaps kernel static data or bss.
*/
for (i = 0; i < image->nr_segments; i++)
if (image->segment[i].mem < __pa(_end))
return -ETXTBSY;
/* We also should not overwrite the tce tables */
for_each_node_by_type(node, "pci") {
basep = of_get_property(node, "linux,tce-base", NULL);
sizep = of_get_property(node, "linux,tce-size", NULL);
if (basep == NULL || sizep == NULL)
continue;
low = *basep;
high = low + (*sizep);
for (i = 0; i < image->nr_segments; i++) {
begin = image->segment[i].mem;
end = begin + image->segment[i].memsz;
if ((begin < high) && (end > low)) {
of_node_put(node);
return -ETXTBSY;
}
}
}
return 0;
}
/* Called during kexec sequence with MMU off */
static notrace void copy_segments(unsigned long ind)
{
unsigned long entry;
unsigned long *ptr;
void *dest;
void *addr;
/*
* We rely on kexec_load to create a lists that properly
* initializes these pointers before they are used.
* We will still crash if the list is wrong, but at least
* the compiler will be quiet.
*/
ptr = NULL;
dest = NULL;
for (entry = ind; !(entry & IND_DONE); entry = *ptr++) {
addr = __va(entry & PAGE_MASK);
switch (entry & IND_FLAGS) {
case IND_DESTINATION:
dest = addr;
break;
case IND_INDIRECTION:
ptr = addr;
break;
case IND_SOURCE:
copy_page(dest, addr);
dest += PAGE_SIZE;
}
}
}
/* Called during kexec sequence with MMU off */
notrace void kexec_copy_flush(struct kimage *image)
{
long i, nr_segments = image->nr_segments;
struct kexec_segment ranges[KEXEC_SEGMENT_MAX];
/* save the ranges on the stack to efficiently flush the icache */
memcpy(ranges, image->segment, sizeof(ranges));
/*
* After this call we may not use anything allocated in dynamic
* memory, including *image.
*
* Only globals and the stack are allowed.
*/
copy_segments(image->head);
/*
* we need to clear the icache for all dest pages sometime,
* including ones that were in place on the original copy
*/
for (i = 0; i < nr_segments; i++)
flush_icache_range((unsigned long)__va(ranges[i].mem),
(unsigned long)__va(ranges[i].mem + ranges[i].memsz));
}
#ifdef CONFIG_SMP
static int kexec_all_irq_disabled = 0;
static void kexec_smp_down(void *arg)
{
local_irq_disable();
hard_irq_disable();
mb(); /* make sure our irqs are disabled before we say they are */
get_paca()->kexec_state = KEXEC_STATE_IRQS_OFF;
while(kexec_all_irq_disabled == 0)
cpu_relax();
mb(); /* make sure all irqs are disabled before this */
hw_breakpoint_disable();
/*
* Now every CPU has IRQs off, we can clear out any pending
* IPIs and be sure that no more will come in after this.
*/
if (ppc_md.kexec_cpu_down)
ppc_md.kexec_cpu_down(0, 1);
reset_sprs();
kexec_smp_wait();
/* NOTREACHED */
}
static void kexec_prepare_cpus_wait(int wait_state)
{
int my_cpu, i, notified=-1;
hw_breakpoint_disable();
my_cpu = get_cpu();
/* Make sure each CPU has at least made it to the state we need.
*
* FIXME: There is a (slim) chance of a problem if not all of the CPUs
* are correctly onlined. If somehow we start a CPU on boot with RTAS
* start-cpu, but somehow that CPU doesn't write callin_cpu_map[] in
* time, the boot CPU will timeout. If it does eventually execute
* stuff, the secondary will start up (paca_ptrs[]->cpu_start was
* written) and get into a peculiar state.
* If the platform supports smp_ops->take_timebase(), the secondary CPU
* will probably be spinning in there. If not (i.e. pseries), the
* secondary will continue on and try to online itself/idle/etc. If it
* survives that, we need to find these
* possible-but-not-online-but-should-be CPUs and chaperone them into
* kexec_smp_wait().
*/
for_each_online_cpu(i) {
if (i == my_cpu)
continue;
while (paca_ptrs[i]->kexec_state < wait_state) {
barrier();
if (i != notified) {
printk(KERN_INFO "kexec: waiting for cpu %d "
"(physical %d) to enter %i state\n",
i, paca_ptrs[i]->hw_cpu_id, wait_state);
notified = i;
}
}
}
mb();
}
/*
* We need to make sure each present CPU is online. The next kernel will scan
* the device tree and assume primary threads are online and query secondary
* threads via RTAS to online them if required. If we don't online primary
* threads, they will be stuck. However, we also online secondary threads as we
* may be using 'cede offline'. In this case RTAS doesn't see the secondary
* threads as offline -- and again, these CPUs will be stuck.
*
* So, we online all CPUs that should be running, including secondary threads.
*/
static void wake_offline_cpus(void)
{
int cpu = 0;
for_each_present_cpu(cpu) {
if (!cpu_online(cpu)) {
printk(KERN_INFO "kexec: Waking offline cpu %d.\n",
cpu);
WARN_ON(add_cpu(cpu));
}
}
}
static void kexec_prepare_cpus(void)
{
wake_offline_cpus();
smp_call_function(kexec_smp_down, NULL, /* wait */0);
local_irq_disable();
hard_irq_disable();
mb(); /* make sure IRQs are disabled before we say they are */
get_paca()->kexec_state = KEXEC_STATE_IRQS_OFF;
kexec_prepare_cpus_wait(KEXEC_STATE_IRQS_OFF);
/* we are sure every CPU has IRQs off at this point */
kexec_all_irq_disabled = 1;
/*
* Before removing MMU mappings make sure all CPUs have entered real
* mode:
*/
kexec_prepare_cpus_wait(KEXEC_STATE_REAL_MODE);
/* after we tell the others to go down */
if (ppc_md.kexec_cpu_down)
ppc_md.kexec_cpu_down(0, 0);
put_cpu();
}
#else /* ! SMP */
static void kexec_prepare_cpus(void)
{
/*
* move the secondarys to us so that we can copy
* the new kernel 0-0x100 safely
*
* do this if kexec in setup.c ?
*
* We need to release the cpus if we are ever going from an
* UP to an SMP kernel.
*/
smp_release_cpus();
if (ppc_md.kexec_cpu_down)
ppc_md.kexec_cpu_down(0, 0);
local_irq_disable();
hard_irq_disable();
}
#endif /* SMP */
/*
* kexec thread structure and stack.
*
* We need to make sure that this is 16384-byte aligned due to the
* way process stacks are handled. It also must be statically allocated
* or allocated as part of the kimage, because everything else may be
* overwritten when we copy the kexec image. We piggyback on the
* "init_task" linker section here to statically allocate a stack.
*
* We could use a smaller stack if we don't care about anything using
* current, but that audit has not been performed.
*/
static union thread_union kexec_stack __init_task_data =
{ };
/*
* For similar reasons to the stack above, the kexecing CPU needs to be on a
* static PACA; we switch to kexec_paca.
*/
static struct paca_struct kexec_paca;
/* Our assembly helper, in misc_64.S */
extern void kexec_sequence(void *newstack, unsigned long start,
void *image, void *control,
void (*clear_all)(void),
bool copy_with_mmu_off) __noreturn;
/* too late to fail here */
void default_machine_kexec(struct kimage *image)
{
bool copy_with_mmu_off;
/* prepare control code if any */
/*
* If the kexec boot is the normal one, need to shutdown other cpus
* into our wait loop and quiesce interrupts.
* Otherwise, in the case of crashed mode (crashing_cpu >= 0),
* stopping other CPUs and collecting their pt_regs is done before
* using debugger IPI.
*/
if (!kdump_in_progress())
kexec_prepare_cpus();
printk("kexec: Starting switchover sequence.\n");
/* switch to a staticly allocated stack. Based on irq stack code.
* We setup preempt_count to avoid using VMX in memcpy.
* XXX: the task struct will likely be invalid once we do the copy!
*/
current_thread_info()->flags = 0;
current_thread_info()->preempt_count = HARDIRQ_OFFSET;
/* We need a static PACA, too; copy this CPU's PACA over and switch to
* it. Also poison per_cpu_offset and NULL lppaca to catch anyone using
* non-static data.
*/
memcpy(&kexec_paca, get_paca(), sizeof(struct paca_struct));
kexec_paca.data_offset = 0xedeaddeadeeeeeeeUL;
#ifdef CONFIG_PPC_PSERIES
kexec_paca.lppaca_ptr = NULL;
#endif
if (is_secure_guest() && !(image->preserve_context ||
image->type == KEXEC_TYPE_CRASH)) {
uv_unshare_all_pages();
printk("kexec: Unshared all shared pages.\n");
}
paca_ptrs[kexec_paca.paca_index] = &kexec_paca;
setup_paca(&kexec_paca);
/*
* The lppaca should be unregistered at this point so the HV won't
* touch it. In the case of a crash, none of the lppacas are
* unregistered so there is not much we can do about it here.
*/
/*
* On Book3S, the copy must happen with the MMU off if we are either
* using Radix page tables or we are not in an LPAR since we can
* overwrite the page tables while copying.
*
* In an LPAR, we keep the MMU on otherwise we can't access beyond
* the RMA. On BookE there is no real MMU off mode, so we have to
* keep it enabled as well (but then we have bolted TLB entries).
*/
#ifdef CONFIG_PPC_BOOK3E_64
copy_with_mmu_off = false;
#else
copy_with_mmu_off = radix_enabled() ||
!(firmware_has_feature(FW_FEATURE_LPAR) ||
firmware_has_feature(FW_FEATURE_PS3_LV1));
#endif
/* Some things are best done in assembly. Finding globals with
* a toc is easier in C, so pass in what we can.
*/
kexec_sequence(&kexec_stack, image->start, image,
page_address(image->control_code_page),
mmu_cleanup_all, copy_with_mmu_off);
/* NOTREACHED */
}
#ifdef CONFIG_PPC_64S_HASH_MMU
/* Values we need to export to the second kernel via the device tree. */
static unsigned long htab_base;
static unsigned long htab_size;
static struct property htab_base_prop = {
.name = "linux,htab-base",
.length = sizeof(unsigned long),
.value = &htab_base,
};
static struct property htab_size_prop = {
.name = "linux,htab-size",
.length = sizeof(unsigned long),
.value = &htab_size,
};
static int __init export_htab_values(void)
{
struct device_node *node;
/* On machines with no htab htab_address is NULL */
if (!htab_address)
return -ENODEV;
node = of_find_node_by_path("/chosen");
if (!node)
return -ENODEV;
/* remove any stale properties so ours can be found */
of_remove_property(node, of_find_property(node, htab_base_prop.name, NULL));
of_remove_property(node, of_find_property(node, htab_size_prop.name, NULL));
htab_base = cpu_to_be64(__pa(htab_address));
of_add_property(node, &htab_base_prop);
htab_size = cpu_to_be64(htab_size_bytes);
of_add_property(node, &htab_size_prop);
of_node_put(node);
return 0;
}
late_initcall(export_htab_values);
#endif /* CONFIG_PPC_64S_HASH_MMU */
| linux-master | arch/powerpc/kexec/core_64.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* powerpc code to implement the kexec_file_load syscall
*
* Copyright (C) 2004 Adam Litke ([email protected])
* Copyright (C) 2004 IBM Corp.
* Copyright (C) 2004,2005 Milton D Miller II, IBM Corporation
* Copyright (C) 2005 R Sharada ([email protected])
* Copyright (C) 2006 Mohan Kumar M ([email protected])
* Copyright (C) 2016 IBM Corporation
*
* Based on kexec-tools' kexec-elf-ppc64.c, fs2dt.c.
* Heavily modified for the kernel by
* Thiago Jung Bauermann <[email protected]>.
*/
#include <linux/slab.h>
#include <linux/kexec.h>
#include <linux/of_fdt.h>
#include <linux/libfdt.h>
#include <asm/setup.h>
#define SLAVE_CODE_SIZE 256 /* First 0x100 bytes */
/**
* setup_kdump_cmdline - Prepend "elfcorehdr=<addr> " to command line
* of kdump kernel for exporting the core.
* @image: Kexec image
* @cmdline: Command line parameters to update.
* @cmdline_len: Length of the cmdline parameters.
*
* kdump segment must be setup before calling this function.
*
* Returns new cmdline buffer for kdump kernel on success, NULL otherwise.
*/
char *setup_kdump_cmdline(struct kimage *image, char *cmdline,
unsigned long cmdline_len)
{
int elfcorehdr_strlen;
char *cmdline_ptr;
cmdline_ptr = kzalloc(COMMAND_LINE_SIZE, GFP_KERNEL);
if (!cmdline_ptr)
return NULL;
elfcorehdr_strlen = sprintf(cmdline_ptr, "elfcorehdr=0x%lx ",
image->elf_load_addr);
if (elfcorehdr_strlen + cmdline_len > COMMAND_LINE_SIZE) {
pr_err("Appending elfcorehdr=<addr> exceeds cmdline size\n");
kfree(cmdline_ptr);
return NULL;
}
memcpy(cmdline_ptr + elfcorehdr_strlen, cmdline, cmdline_len);
// Ensure it's nul terminated
cmdline_ptr[COMMAND_LINE_SIZE - 1] = '\0';
return cmdline_ptr;
}
/**
* setup_purgatory - initialize the purgatory's global variables
* @image: kexec image.
* @slave_code: Slave code for the purgatory.
* @fdt: Flattened device tree for the next kernel.
* @kernel_load_addr: Address where the kernel is loaded.
* @fdt_load_addr: Address where the flattened device tree is loaded.
*
* Return: 0 on success, or negative errno on error.
*/
int setup_purgatory(struct kimage *image, const void *slave_code,
const void *fdt, unsigned long kernel_load_addr,
unsigned long fdt_load_addr)
{
unsigned int *slave_code_buf, master_entry;
int ret;
slave_code_buf = kmalloc(SLAVE_CODE_SIZE, GFP_KERNEL);
if (!slave_code_buf)
return -ENOMEM;
/* Get the slave code from the new kernel and put it in purgatory. */
ret = kexec_purgatory_get_set_symbol(image, "purgatory_start",
slave_code_buf, SLAVE_CODE_SIZE,
true);
if (ret) {
kfree(slave_code_buf);
return ret;
}
master_entry = slave_code_buf[0];
memcpy(slave_code_buf, slave_code, SLAVE_CODE_SIZE);
slave_code_buf[0] = master_entry;
ret = kexec_purgatory_get_set_symbol(image, "purgatory_start",
slave_code_buf, SLAVE_CODE_SIZE,
false);
kfree(slave_code_buf);
ret = kexec_purgatory_get_set_symbol(image, "kernel", &kernel_load_addr,
sizeof(kernel_load_addr), false);
if (ret)
return ret;
ret = kexec_purgatory_get_set_symbol(image, "dt_offset", &fdt_load_addr,
sizeof(fdt_load_addr), false);
if (ret)
return ret;
return 0;
}
| linux-master | arch/powerpc/kexec/file_load.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Architecture specific (PPC64) functions for kexec based crash dumps.
*
* Copyright (C) 2005, IBM Corp.
*
* Created by: Haren Myneni
*/
#include <linux/kernel.h>
#include <linux/smp.h>
#include <linux/reboot.h>
#include <linux/kexec.h>
#include <linux/export.h>
#include <linux/crash_dump.h>
#include <linux/delay.h>
#include <linux/irq.h>
#include <linux/types.h>
#include <asm/processor.h>
#include <asm/machdep.h>
#include <asm/kexec.h>
#include <asm/smp.h>
#include <asm/setjmp.h>
#include <asm/debug.h>
#include <asm/interrupt.h>
/*
* The primary CPU waits a while for all secondary CPUs to enter. This is to
* avoid sending an IPI if the secondary CPUs are entering
* crash_kexec_secondary on their own (eg via a system reset).
*
* The secondary timeout has to be longer than the primary. Both timeouts are
* in milliseconds.
*/
#define PRIMARY_TIMEOUT 500
#define SECONDARY_TIMEOUT 1000
#define IPI_TIMEOUT 10000
#define REAL_MODE_TIMEOUT 10000
static int time_to_dump;
/*
* In case of system reset, secondary CPUs enter crash_kexec_secondary with out
* having to send an IPI explicitly. So, indicate if the crash is via
* system reset to avoid sending another IPI.
*/
static int is_via_system_reset;
/*
* crash_wake_offline should be set to 1 by platforms that intend to wake
* up offline cpus prior to jumping to a kdump kernel. Currently powernv
* sets it to 1, since we want to avoid things from happening when an
* offline CPU wakes up due to something like an HMI (malfunction error),
* which propagates to all threads.
*/
int crash_wake_offline;
#define CRASH_HANDLER_MAX 3
/* List of shutdown handles */
static crash_shutdown_t crash_shutdown_handles[CRASH_HANDLER_MAX];
static DEFINE_SPINLOCK(crash_handlers_lock);
static unsigned long crash_shutdown_buf[JMP_BUF_LEN];
static int crash_shutdown_cpu = -1;
static int handle_fault(struct pt_regs *regs)
{
if (crash_shutdown_cpu == smp_processor_id())
longjmp(crash_shutdown_buf, 1);
return 0;
}
#ifdef CONFIG_SMP
static atomic_t cpus_in_crash;
void crash_ipi_callback(struct pt_regs *regs)
{
static cpumask_t cpus_state_saved = CPU_MASK_NONE;
int cpu = smp_processor_id();
hard_irq_disable();
if (!cpumask_test_cpu(cpu, &cpus_state_saved)) {
crash_save_cpu(regs, cpu);
cpumask_set_cpu(cpu, &cpus_state_saved);
}
atomic_inc(&cpus_in_crash);
smp_mb__after_atomic();
/*
* Starting the kdump boot.
* This barrier is needed to make sure that all CPUs are stopped.
*/
while (!time_to_dump)
cpu_relax();
if (ppc_md.kexec_cpu_down)
ppc_md.kexec_cpu_down(1, 1);
#ifdef CONFIG_PPC64
kexec_smp_wait();
#else
for (;;); /* FIXME */
#endif
/* NOTREACHED */
}
static void crash_kexec_prepare_cpus(void)
{
unsigned int msecs;
volatile unsigned int ncpus = num_online_cpus() - 1;/* Excluding the panic cpu */
volatile int tries = 0;
int (*old_handler)(struct pt_regs *regs);
printk(KERN_EMERG "Sending IPI to other CPUs\n");
if (crash_wake_offline)
ncpus = num_present_cpus() - 1;
/*
* If we came in via system reset, secondaries enter via crash_kexec_secondary().
* So, wait a while for the secondary CPUs to enter for that case.
* Else, send IPI to all other CPUs.
*/
if (is_via_system_reset)
mdelay(PRIMARY_TIMEOUT);
else
crash_send_ipi(crash_ipi_callback);
smp_wmb();
again:
/*
* FIXME: Until we will have the way to stop other CPUs reliably,
* the crash CPU will send an IPI and wait for other CPUs to
* respond.
*/
msecs = IPI_TIMEOUT;
while ((atomic_read(&cpus_in_crash) < ncpus) && (--msecs > 0))
mdelay(1);
/* Would it be better to replace the trap vector here? */
if (atomic_read(&cpus_in_crash) >= ncpus) {
printk(KERN_EMERG "IPI complete\n");
return;
}
printk(KERN_EMERG "ERROR: %d cpu(s) not responding\n",
ncpus - atomic_read(&cpus_in_crash));
/*
* If we have a panic timeout set then we can't wait indefinitely
* for someone to activate system reset. We also give up on the
* second time through if system reset fail to work.
*/
if ((panic_timeout > 0) || (tries > 0))
return;
/*
* A system reset will cause all CPUs to take an 0x100 exception.
* The primary CPU returns here via setjmp, and the secondary
* CPUs reexecute the crash_kexec_secondary path.
*/
old_handler = __debugger;
__debugger = handle_fault;
crash_shutdown_cpu = smp_processor_id();
if (setjmp(crash_shutdown_buf) == 0) {
printk(KERN_EMERG "Activate system reset (dumprestart) "
"to stop other cpu(s)\n");
/*
* A system reset will force all CPUs to execute the
* crash code again. We need to reset cpus_in_crash so we
* wait for everyone to do this.
*/
atomic_set(&cpus_in_crash, 0);
smp_mb();
while (atomic_read(&cpus_in_crash) < ncpus)
cpu_relax();
}
crash_shutdown_cpu = -1;
__debugger = old_handler;
tries++;
goto again;
}
/*
* This function will be called by secondary cpus.
*/
void crash_kexec_secondary(struct pt_regs *regs)
{
unsigned long flags;
int msecs = SECONDARY_TIMEOUT;
local_irq_save(flags);
/* Wait for the primary crash CPU to signal its progress */
while (crashing_cpu < 0) {
if (--msecs < 0) {
/* No response, kdump image may not have been loaded */
local_irq_restore(flags);
return;
}
mdelay(1);
}
crash_ipi_callback(regs);
}
#else /* ! CONFIG_SMP */
static void crash_kexec_prepare_cpus(void)
{
/*
* move the secondaries to us so that we can copy
* the new kernel 0-0x100 safely
*
* do this if kexec in setup.c ?
*/
#ifdef CONFIG_PPC64
smp_release_cpus();
#else
/* FIXME */
#endif
}
void crash_kexec_secondary(struct pt_regs *regs)
{
}
#endif /* CONFIG_SMP */
/* wait for all the CPUs to hit real mode but timeout if they don't come in */
#if defined(CONFIG_SMP) && defined(CONFIG_PPC64)
noinstr static void __maybe_unused crash_kexec_wait_realmode(int cpu)
{
unsigned int msecs;
int i;
msecs = REAL_MODE_TIMEOUT;
for (i=0; i < nr_cpu_ids && msecs > 0; i++) {
if (i == cpu)
continue;
while (paca_ptrs[i]->kexec_state < KEXEC_STATE_REAL_MODE) {
barrier();
if (!cpu_possible(i) || !cpu_online(i) || (msecs <= 0))
break;
msecs--;
mdelay(1);
}
}
mb();
}
#else
static inline void crash_kexec_wait_realmode(int cpu) {}
#endif /* CONFIG_SMP && CONFIG_PPC64 */
void crash_kexec_prepare(void)
{
/* Avoid hardlocking with irresponsive CPU holding logbuf_lock */
printk_deferred_enter();
/*
* This function is only called after the system
* has panicked or is otherwise in a critical state.
* The minimum amount of code to allow a kexec'd kernel
* to run successfully needs to happen here.
*
* In practice this means stopping other cpus in
* an SMP system.
* The kernel is broken so disable interrupts.
*/
hard_irq_disable();
/*
* Make a note of crashing cpu. Will be used in machine_kexec
* such that another IPI will not be sent.
*/
crashing_cpu = smp_processor_id();
crash_kexec_prepare_cpus();
}
/*
* Register a function to be called on shutdown. Only use this if you
* can't reset your device in the second kernel.
*/
int crash_shutdown_register(crash_shutdown_t handler)
{
unsigned int i, rc;
spin_lock(&crash_handlers_lock);
for (i = 0 ; i < CRASH_HANDLER_MAX; i++)
if (!crash_shutdown_handles[i]) {
/* Insert handle at first empty entry */
crash_shutdown_handles[i] = handler;
rc = 0;
break;
}
if (i == CRASH_HANDLER_MAX) {
printk(KERN_ERR "Crash shutdown handles full, "
"not registered.\n");
rc = 1;
}
spin_unlock(&crash_handlers_lock);
return rc;
}
EXPORT_SYMBOL(crash_shutdown_register);
int crash_shutdown_unregister(crash_shutdown_t handler)
{
unsigned int i, rc;
spin_lock(&crash_handlers_lock);
for (i = 0 ; i < CRASH_HANDLER_MAX; i++)
if (crash_shutdown_handles[i] == handler)
break;
if (i == CRASH_HANDLER_MAX) {
printk(KERN_ERR "Crash shutdown handle not found\n");
rc = 1;
} else {
/* Shift handles down */
for (; i < (CRASH_HANDLER_MAX - 1); i++)
crash_shutdown_handles[i] =
crash_shutdown_handles[i+1];
/*
* Reset last entry to NULL now that it has been shifted down,
* this will allow new handles to be added here.
*/
crash_shutdown_handles[i] = NULL;
rc = 0;
}
spin_unlock(&crash_handlers_lock);
return rc;
}
EXPORT_SYMBOL(crash_shutdown_unregister);
void default_machine_crash_shutdown(struct pt_regs *regs)
{
volatile unsigned int i;
int (*old_handler)(struct pt_regs *regs);
if (TRAP(regs) == INTERRUPT_SYSTEM_RESET)
is_via_system_reset = 1;
crash_smp_send_stop();
crash_save_cpu(regs, crashing_cpu);
time_to_dump = 1;
crash_kexec_wait_realmode(crashing_cpu);
machine_kexec_mask_interrupts();
/*
* Call registered shutdown routines safely. Swap out
* __debugger_fault_handler, and replace on exit.
*/
old_handler = __debugger_fault_handler;
__debugger_fault_handler = handle_fault;
crash_shutdown_cpu = smp_processor_id();
for (i = 0; i < CRASH_HANDLER_MAX && crash_shutdown_handles[i]; i++) {
if (setjmp(crash_shutdown_buf) == 0) {
/*
* Insert syncs and delay to ensure
* instructions in the dangerous region don't
* leak away from this protected region.
*/
asm volatile("sync; isync");
/* dangerous region */
crash_shutdown_handles[i]();
asm volatile("sync; isync");
}
}
crash_shutdown_cpu = -1;
__debugger_fault_handler = old_handler;
if (ppc_md.kexec_cpu_down)
ppc_md.kexec_cpu_down(1, 0);
}
| linux-master | arch/powerpc/kexec/crash.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Load ELF vmlinux file for the kexec_file_load syscall.
*
* Copyright (C) 2004 Adam Litke ([email protected])
* Copyright (C) 2004 IBM Corp.
* Copyright (C) 2005 R Sharada ([email protected])
* Copyright (C) 2006 Mohan Kumar M ([email protected])
* Copyright (C) 2016 IBM Corporation
*
* Based on kexec-tools' kexec-elf-exec.c and kexec-elf-ppc64.c.
* Heavily modified for the kernel by
* Thiago Jung Bauermann <[email protected]>.
*/
#define pr_fmt(fmt) "kexec_elf: " fmt
#include <linux/elf.h>
#include <linux/kexec.h>
#include <linux/libfdt.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_fdt.h>
#include <linux/slab.h>
#include <linux/types.h>
static void *elf64_load(struct kimage *image, char *kernel_buf,
unsigned long kernel_len, char *initrd,
unsigned long initrd_len, char *cmdline,
unsigned long cmdline_len)
{
int ret;
unsigned long kernel_load_addr;
unsigned long initrd_load_addr = 0, fdt_load_addr;
void *fdt;
const void *slave_code;
struct elfhdr ehdr;
char *modified_cmdline = NULL;
struct kexec_elf_info elf_info;
struct kexec_buf kbuf = { .image = image, .buf_min = 0,
.buf_max = ppc64_rma_size };
struct kexec_buf pbuf = { .image = image, .buf_min = 0,
.buf_max = ppc64_rma_size, .top_down = true,
.mem = KEXEC_BUF_MEM_UNKNOWN };
ret = kexec_build_elf_info(kernel_buf, kernel_len, &ehdr, &elf_info);
if (ret)
return ERR_PTR(ret);
if (image->type == KEXEC_TYPE_CRASH) {
/* min & max buffer values for kdump case */
kbuf.buf_min = pbuf.buf_min = crashk_res.start;
kbuf.buf_max = pbuf.buf_max =
((crashk_res.end < ppc64_rma_size) ?
crashk_res.end : (ppc64_rma_size - 1));
}
ret = kexec_elf_load(image, &ehdr, &elf_info, &kbuf, &kernel_load_addr);
if (ret)
goto out;
pr_debug("Loaded the kernel at 0x%lx\n", kernel_load_addr);
ret = kexec_load_purgatory(image, &pbuf);
if (ret) {
pr_err("Loading purgatory failed.\n");
goto out;
}
pr_debug("Loaded purgatory at 0x%lx\n", pbuf.mem);
/* Load additional segments needed for panic kernel */
if (image->type == KEXEC_TYPE_CRASH) {
ret = load_crashdump_segments_ppc64(image, &kbuf);
if (ret) {
pr_err("Failed to load kdump kernel segments\n");
goto out;
}
/* Setup cmdline for kdump kernel case */
modified_cmdline = setup_kdump_cmdline(image, cmdline,
cmdline_len);
if (!modified_cmdline) {
pr_err("Setting up cmdline for kdump kernel failed\n");
ret = -EINVAL;
goto out;
}
cmdline = modified_cmdline;
}
if (initrd != NULL) {
kbuf.buffer = initrd;
kbuf.bufsz = kbuf.memsz = initrd_len;
kbuf.buf_align = PAGE_SIZE;
kbuf.top_down = false;
kbuf.mem = KEXEC_BUF_MEM_UNKNOWN;
ret = kexec_add_buffer(&kbuf);
if (ret)
goto out;
initrd_load_addr = kbuf.mem;
pr_debug("Loaded initrd at 0x%lx\n", initrd_load_addr);
}
fdt = of_kexec_alloc_and_setup_fdt(image, initrd_load_addr,
initrd_len, cmdline,
kexec_extra_fdt_size_ppc64(image));
if (!fdt) {
pr_err("Error setting up the new device tree.\n");
ret = -EINVAL;
goto out;
}
ret = setup_new_fdt_ppc64(image, fdt, initrd_load_addr,
initrd_len, cmdline);
if (ret)
goto out_free_fdt;
fdt_pack(fdt);
kbuf.buffer = fdt;
kbuf.bufsz = kbuf.memsz = fdt_totalsize(fdt);
kbuf.buf_align = PAGE_SIZE;
kbuf.top_down = true;
kbuf.mem = KEXEC_BUF_MEM_UNKNOWN;
ret = kexec_add_buffer(&kbuf);
if (ret)
goto out_free_fdt;
/* FDT will be freed in arch_kimage_file_post_load_cleanup */
image->arch.fdt = fdt;
fdt_load_addr = kbuf.mem;
pr_debug("Loaded device tree at 0x%lx\n", fdt_load_addr);
slave_code = elf_info.buffer + elf_info.proghdrs[0].p_offset;
ret = setup_purgatory_ppc64(image, slave_code, fdt, kernel_load_addr,
fdt_load_addr);
if (ret)
pr_err("Error setting up the purgatory.\n");
goto out;
out_free_fdt:
kvfree(fdt);
out:
kfree(modified_cmdline);
kexec_free_elf_info(&elf_info);
return ret ? ERR_PTR(ret) : NULL;
}
const struct kexec_file_ops kexec_elf64_ops = {
.probe = kexec_elf_probe,
.load = elf64_load,
};
| linux-master | arch/powerpc/kexec/elf_64.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* powerpc code to implement the kexec_file_load syscall
*
* Copyright (C) 2004 Adam Litke ([email protected])
* Copyright (C) 2004 IBM Corp.
* Copyright (C) 2004,2005 Milton D Miller II, IBM Corporation
* Copyright (C) 2005 R Sharada ([email protected])
* Copyright (C) 2006 Mohan Kumar M ([email protected])
* Copyright (C) 2020 IBM Corporation
*
* Based on kexec-tools' kexec-ppc64.c, fs2dt.c.
* Heavily modified for the kernel by
* Hari Bathini, IBM Corporation.
*/
#define pr_fmt(fmt) "kexec ranges: " fmt
#include <linux/sort.h>
#include <linux/kexec.h>
#include <linux/of.h>
#include <linux/slab.h>
#include <asm/sections.h>
#include <asm/kexec_ranges.h>
/**
* get_max_nr_ranges - Get the max no. of ranges crash_mem structure
* could hold, given the size allocated for it.
* @size: Allocation size of crash_mem structure.
*
* Returns the maximum no. of ranges.
*/
static inline unsigned int get_max_nr_ranges(size_t size)
{
return ((size - sizeof(struct crash_mem)) /
sizeof(struct range));
}
/**
* get_mem_rngs_size - Get the allocated size of mem_rngs based on
* max_nr_ranges and chunk size.
* @mem_rngs: Memory ranges.
*
* Returns the maximum size of @mem_rngs.
*/
static inline size_t get_mem_rngs_size(struct crash_mem *mem_rngs)
{
size_t size;
if (!mem_rngs)
return 0;
size = (sizeof(struct crash_mem) +
(mem_rngs->max_nr_ranges * sizeof(struct range)));
/*
* Memory is allocated in size multiple of MEM_RANGE_CHUNK_SZ.
* So, align to get the actual length.
*/
return ALIGN(size, MEM_RANGE_CHUNK_SZ);
}
/**
* __add_mem_range - add a memory range to memory ranges list.
* @mem_ranges: Range list to add the memory range to.
* @base: Base address of the range to add.
* @size: Size of the memory range to add.
*
* (Re)allocates memory, if needed.
*
* Returns 0 on success, negative errno on error.
*/
static int __add_mem_range(struct crash_mem **mem_ranges, u64 base, u64 size)
{
struct crash_mem *mem_rngs = *mem_ranges;
if (!mem_rngs || (mem_rngs->nr_ranges == mem_rngs->max_nr_ranges)) {
mem_rngs = realloc_mem_ranges(mem_ranges);
if (!mem_rngs)
return -ENOMEM;
}
mem_rngs->ranges[mem_rngs->nr_ranges].start = base;
mem_rngs->ranges[mem_rngs->nr_ranges].end = base + size - 1;
pr_debug("Added memory range [%#016llx - %#016llx] at index %d\n",
base, base + size - 1, mem_rngs->nr_ranges);
mem_rngs->nr_ranges++;
return 0;
}
/**
* __merge_memory_ranges - Merges the given memory ranges list.
* @mem_rngs: Range list to merge.
*
* Assumes a sorted range list.
*
* Returns nothing.
*/
static void __merge_memory_ranges(struct crash_mem *mem_rngs)
{
struct range *ranges;
int i, idx;
if (!mem_rngs)
return;
idx = 0;
ranges = &(mem_rngs->ranges[0]);
for (i = 1; i < mem_rngs->nr_ranges; i++) {
if (ranges[i].start <= (ranges[i-1].end + 1))
ranges[idx].end = ranges[i].end;
else {
idx++;
if (i == idx)
continue;
ranges[idx] = ranges[i];
}
}
mem_rngs->nr_ranges = idx + 1;
}
/* cmp_func_t callback to sort ranges with sort() */
static int rngcmp(const void *_x, const void *_y)
{
const struct range *x = _x, *y = _y;
if (x->start > y->start)
return 1;
if (x->start < y->start)
return -1;
return 0;
}
/**
* sort_memory_ranges - Sorts the given memory ranges list.
* @mem_rngs: Range list to sort.
* @merge: If true, merge the list after sorting.
*
* Returns nothing.
*/
void sort_memory_ranges(struct crash_mem *mem_rngs, bool merge)
{
int i;
if (!mem_rngs)
return;
/* Sort the ranges in-place */
sort(&(mem_rngs->ranges[0]), mem_rngs->nr_ranges,
sizeof(mem_rngs->ranges[0]), rngcmp, NULL);
if (merge)
__merge_memory_ranges(mem_rngs);
/* For debugging purpose */
pr_debug("Memory ranges:\n");
for (i = 0; i < mem_rngs->nr_ranges; i++) {
pr_debug("\t[%03d][%#016llx - %#016llx]\n", i,
mem_rngs->ranges[i].start,
mem_rngs->ranges[i].end);
}
}
/**
* realloc_mem_ranges - reallocate mem_ranges with size incremented
* by MEM_RANGE_CHUNK_SZ. Frees up the old memory,
* if memory allocation fails.
* @mem_ranges: Memory ranges to reallocate.
*
* Returns pointer to reallocated memory on success, NULL otherwise.
*/
struct crash_mem *realloc_mem_ranges(struct crash_mem **mem_ranges)
{
struct crash_mem *mem_rngs = *mem_ranges;
unsigned int nr_ranges;
size_t size;
size = get_mem_rngs_size(mem_rngs);
nr_ranges = mem_rngs ? mem_rngs->nr_ranges : 0;
size += MEM_RANGE_CHUNK_SZ;
mem_rngs = krealloc(*mem_ranges, size, GFP_KERNEL);
if (!mem_rngs) {
kfree(*mem_ranges);
*mem_ranges = NULL;
return NULL;
}
mem_rngs->nr_ranges = nr_ranges;
mem_rngs->max_nr_ranges = get_max_nr_ranges(size);
*mem_ranges = mem_rngs;
return mem_rngs;
}
/**
* add_mem_range - Updates existing memory range, if there is an overlap.
* Else, adds a new memory range.
* @mem_ranges: Range list to add the memory range to.
* @base: Base address of the range to add.
* @size: Size of the memory range to add.
*
* (Re)allocates memory, if needed.
*
* Returns 0 on success, negative errno on error.
*/
int add_mem_range(struct crash_mem **mem_ranges, u64 base, u64 size)
{
struct crash_mem *mem_rngs = *mem_ranges;
u64 mstart, mend, end;
unsigned int i;
if (!size)
return 0;
end = base + size - 1;
if (!mem_rngs || !(mem_rngs->nr_ranges))
return __add_mem_range(mem_ranges, base, size);
for (i = 0; i < mem_rngs->nr_ranges; i++) {
mstart = mem_rngs->ranges[i].start;
mend = mem_rngs->ranges[i].end;
if (base < mend && end > mstart) {
if (base < mstart)
mem_rngs->ranges[i].start = base;
if (end > mend)
mem_rngs->ranges[i].end = end;
return 0;
}
}
return __add_mem_range(mem_ranges, base, size);
}
/**
* add_tce_mem_ranges - Adds tce-table range to the given memory ranges list.
* @mem_ranges: Range list to add the memory range(s) to.
*
* Returns 0 on success, negative errno on error.
*/
int add_tce_mem_ranges(struct crash_mem **mem_ranges)
{
struct device_node *dn = NULL;
int ret = 0;
for_each_node_by_type(dn, "pci") {
u64 base;
u32 size;
ret = of_property_read_u64(dn, "linux,tce-base", &base);
ret |= of_property_read_u32(dn, "linux,tce-size", &size);
if (ret) {
/*
* It is ok to have pci nodes without tce. So, ignore
* property does not exist error.
*/
if (ret == -EINVAL) {
ret = 0;
continue;
}
break;
}
ret = add_mem_range(mem_ranges, base, size);
if (ret)
break;
}
of_node_put(dn);
return ret;
}
/**
* add_initrd_mem_range - Adds initrd range to the given memory ranges list,
* if the initrd was retained.
* @mem_ranges: Range list to add the memory range to.
*
* Returns 0 on success, negative errno on error.
*/
int add_initrd_mem_range(struct crash_mem **mem_ranges)
{
u64 base, end;
int ret;
/* This range means something, only if initrd was retained */
if (!strstr(saved_command_line, "retain_initrd"))
return 0;
ret = of_property_read_u64(of_chosen, "linux,initrd-start", &base);
ret |= of_property_read_u64(of_chosen, "linux,initrd-end", &end);
if (!ret)
ret = add_mem_range(mem_ranges, base, end - base + 1);
return ret;
}
#ifdef CONFIG_PPC_64S_HASH_MMU
/**
* add_htab_mem_range - Adds htab range to the given memory ranges list,
* if it exists
* @mem_ranges: Range list to add the memory range to.
*
* Returns 0 on success, negative errno on error.
*/
int add_htab_mem_range(struct crash_mem **mem_ranges)
{
if (!htab_address)
return 0;
return add_mem_range(mem_ranges, __pa(htab_address), htab_size_bytes);
}
#endif
/**
* add_kernel_mem_range - Adds kernel text region to the given
* memory ranges list.
* @mem_ranges: Range list to add the memory range to.
*
* Returns 0 on success, negative errno on error.
*/
int add_kernel_mem_range(struct crash_mem **mem_ranges)
{
return add_mem_range(mem_ranges, 0, __pa(_end));
}
/**
* add_rtas_mem_range - Adds RTAS region to the given memory ranges list.
* @mem_ranges: Range list to add the memory range to.
*
* Returns 0 on success, negative errno on error.
*/
int add_rtas_mem_range(struct crash_mem **mem_ranges)
{
struct device_node *dn;
u32 base, size;
int ret = 0;
dn = of_find_node_by_path("/rtas");
if (!dn)
return 0;
ret = of_property_read_u32(dn, "linux,rtas-base", &base);
ret |= of_property_read_u32(dn, "rtas-size", &size);
if (!ret)
ret = add_mem_range(mem_ranges, base, size);
of_node_put(dn);
return ret;
}
/**
* add_opal_mem_range - Adds OPAL region to the given memory ranges list.
* @mem_ranges: Range list to add the memory range to.
*
* Returns 0 on success, negative errno on error.
*/
int add_opal_mem_range(struct crash_mem **mem_ranges)
{
struct device_node *dn;
u64 base, size;
int ret;
dn = of_find_node_by_path("/ibm,opal");
if (!dn)
return 0;
ret = of_property_read_u64(dn, "opal-base-address", &base);
ret |= of_property_read_u64(dn, "opal-runtime-size", &size);
if (!ret)
ret = add_mem_range(mem_ranges, base, size);
of_node_put(dn);
return ret;
}
/**
* add_reserved_mem_ranges - Adds "/reserved-ranges" regions exported by f/w
* to the given memory ranges list.
* @mem_ranges: Range list to add the memory ranges to.
*
* Returns 0 on success, negative errno on error.
*/
int add_reserved_mem_ranges(struct crash_mem **mem_ranges)
{
int n_mem_addr_cells, n_mem_size_cells, i, len, cells, ret = 0;
const __be32 *prop;
prop = of_get_property(of_root, "reserved-ranges", &len);
if (!prop)
return 0;
n_mem_addr_cells = of_n_addr_cells(of_root);
n_mem_size_cells = of_n_size_cells(of_root);
cells = n_mem_addr_cells + n_mem_size_cells;
/* Each reserved range is an (address,size) pair */
for (i = 0; i < (len / (sizeof(u32) * cells)); i++) {
u64 base, size;
base = of_read_number(prop + (i * cells), n_mem_addr_cells);
size = of_read_number(prop + (i * cells) + n_mem_addr_cells,
n_mem_size_cells);
ret = add_mem_range(mem_ranges, base, size);
if (ret)
break;
}
return ret;
}
| linux-master | arch/powerpc/kexec/ranges.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Code to handle transition of Linux booting another kernel.
*
* Copyright (C) 2002-2003 Eric Biederman <[email protected]>
* GameCube/ppc32 port Copyright (C) 2004 Albert Herranz
* Copyright (C) 2005 IBM Corporation.
*/
#include <linux/kexec.h>
#include <linux/reboot.h>
#include <linux/threads.h>
#include <linux/memblock.h>
#include <linux/of.h>
#include <linux/irq.h>
#include <linux/ftrace.h>
#include <asm/kdump.h>
#include <asm/machdep.h>
#include <asm/pgalloc.h>
#include <asm/sections.h>
#include <asm/setup.h>
#include <asm/firmware.h>
void machine_kexec_mask_interrupts(void) {
unsigned int i;
struct irq_desc *desc;
for_each_irq_desc(i, desc) {
struct irq_chip *chip;
chip = irq_desc_get_chip(desc);
if (!chip)
continue;
if (chip->irq_eoi && irqd_irq_inprogress(&desc->irq_data))
chip->irq_eoi(&desc->irq_data);
if (chip->irq_mask)
chip->irq_mask(&desc->irq_data);
if (chip->irq_disable && !irqd_irq_disabled(&desc->irq_data))
chip->irq_disable(&desc->irq_data);
}
}
void machine_crash_shutdown(struct pt_regs *regs)
{
default_machine_crash_shutdown(regs);
}
void machine_kexec_cleanup(struct kimage *image)
{
}
void arch_crash_save_vmcoreinfo(void)
{
#ifdef CONFIG_NUMA
VMCOREINFO_SYMBOL(node_data);
VMCOREINFO_LENGTH(node_data, MAX_NUMNODES);
#endif
#ifndef CONFIG_NUMA
VMCOREINFO_SYMBOL(contig_page_data);
#endif
#if defined(CONFIG_PPC64) && defined(CONFIG_SPARSEMEM_VMEMMAP)
VMCOREINFO_SYMBOL(vmemmap_list);
VMCOREINFO_SYMBOL(mmu_vmemmap_psize);
VMCOREINFO_SYMBOL(mmu_psize_defs);
VMCOREINFO_STRUCT_SIZE(vmemmap_backing);
VMCOREINFO_OFFSET(vmemmap_backing, list);
VMCOREINFO_OFFSET(vmemmap_backing, phys);
VMCOREINFO_OFFSET(vmemmap_backing, virt_addr);
VMCOREINFO_STRUCT_SIZE(mmu_psize_def);
VMCOREINFO_OFFSET(mmu_psize_def, shift);
#endif
vmcoreinfo_append_str("KERNELOFFSET=%lx\n", kaslr_offset());
}
/*
* Do not allocate memory (or fail in any way) in machine_kexec().
* We are past the point of no return, committed to rebooting now.
*/
void machine_kexec(struct kimage *image)
{
int save_ftrace_enabled;
save_ftrace_enabled = __ftrace_enabled_save();
this_cpu_disable_ftrace();
if (ppc_md.machine_kexec)
ppc_md.machine_kexec(image);
else
default_machine_kexec(image);
this_cpu_enable_ftrace();
__ftrace_enabled_restore(save_ftrace_enabled);
/* Fall back to normal restart if we're still alive. */
machine_restart(NULL);
for(;;);
}
void __init reserve_crashkernel(void)
{
unsigned long long crash_size, crash_base, total_mem_sz;
int ret;
total_mem_sz = memory_limit ? memory_limit : memblock_phys_mem_size();
/* use common parsing */
ret = parse_crashkernel(boot_command_line, total_mem_sz,
&crash_size, &crash_base);
if (ret == 0 && crash_size > 0) {
crashk_res.start = crash_base;
crashk_res.end = crash_base + crash_size - 1;
}
if (crashk_res.end == crashk_res.start) {
crashk_res.start = crashk_res.end = 0;
return;
}
/* We might have got these values via the command line or the
* device tree, either way sanitise them now. */
crash_size = resource_size(&crashk_res);
#ifndef CONFIG_NONSTATIC_KERNEL
if (crashk_res.start != KDUMP_KERNELBASE)
printk("Crash kernel location must be 0x%x\n",
KDUMP_KERNELBASE);
crashk_res.start = KDUMP_KERNELBASE;
#else
if (!crashk_res.start) {
#ifdef CONFIG_PPC64
/*
* On the LPAR platform place the crash kernel to mid of
* RMA size (max. of 512MB) to ensure the crash kernel
* gets enough space to place itself and some stack to be
* in the first segment. At the same time normal kernel
* also get enough space to allocate memory for essential
* system resource in the first segment. Keep the crash
* kernel starts at 128MB offset on other platforms.
*/
if (firmware_has_feature(FW_FEATURE_LPAR))
crashk_res.start = min_t(u64, ppc64_rma_size / 2, SZ_512M);
else
crashk_res.start = min_t(u64, ppc64_rma_size / 2, SZ_128M);
#else
crashk_res.start = KDUMP_KERNELBASE;
#endif
}
crash_base = PAGE_ALIGN(crashk_res.start);
if (crash_base != crashk_res.start) {
printk("Crash kernel base must be aligned to 0x%lx\n",
PAGE_SIZE);
crashk_res.start = crash_base;
}
#endif
crash_size = PAGE_ALIGN(crash_size);
crashk_res.end = crashk_res.start + crash_size - 1;
/* The crash region must not overlap the current kernel */
if (overlaps_crashkernel(__pa(_stext), _end - _stext)) {
printk(KERN_WARNING
"Crash kernel can not overlap current kernel\n");
crashk_res.start = crashk_res.end = 0;
return;
}
/* Crash kernel trumps memory limit */
if (memory_limit && memory_limit <= crashk_res.end) {
memory_limit = crashk_res.end + 1;
total_mem_sz = memory_limit;
printk("Adjusted memory limit for crashkernel, now 0x%llx\n",
memory_limit);
}
printk(KERN_INFO "Reserving %ldMB of memory at %ldMB "
"for crashkernel (System RAM: %ldMB)\n",
(unsigned long)(crash_size >> 20),
(unsigned long)(crashk_res.start >> 20),
(unsigned long)(total_mem_sz >> 20));
if (!memblock_is_region_memory(crashk_res.start, crash_size) ||
memblock_reserve(crashk_res.start, crash_size)) {
pr_err("Failed to reserve memory for crashkernel!\n");
crashk_res.start = crashk_res.end = 0;
return;
}
}
int __init overlaps_crashkernel(unsigned long start, unsigned long size)
{
return (start + size) > crashk_res.start && start <= crashk_res.end;
}
/* Values we need to export to the second kernel via the device tree. */
static phys_addr_t kernel_end;
static phys_addr_t crashk_base;
static phys_addr_t crashk_size;
static unsigned long long mem_limit;
static struct property kernel_end_prop = {
.name = "linux,kernel-end",
.length = sizeof(phys_addr_t),
.value = &kernel_end,
};
static struct property crashk_base_prop = {
.name = "linux,crashkernel-base",
.length = sizeof(phys_addr_t),
.value = &crashk_base
};
static struct property crashk_size_prop = {
.name = "linux,crashkernel-size",
.length = sizeof(phys_addr_t),
.value = &crashk_size,
};
static struct property memory_limit_prop = {
.name = "linux,memory-limit",
.length = sizeof(unsigned long long),
.value = &mem_limit,
};
#define cpu_to_be_ulong __PASTE(cpu_to_be, BITS_PER_LONG)
static void __init export_crashk_values(struct device_node *node)
{
/* There might be existing crash kernel properties, but we can't
* be sure what's in them, so remove them. */
of_remove_property(node, of_find_property(node,
"linux,crashkernel-base", NULL));
of_remove_property(node, of_find_property(node,
"linux,crashkernel-size", NULL));
if (crashk_res.start != 0) {
crashk_base = cpu_to_be_ulong(crashk_res.start),
of_add_property(node, &crashk_base_prop);
crashk_size = cpu_to_be_ulong(resource_size(&crashk_res));
of_add_property(node, &crashk_size_prop);
}
/*
* memory_limit is required by the kexec-tools to limit the
* crash regions to the actual memory used.
*/
mem_limit = cpu_to_be_ulong(memory_limit);
of_update_property(node, &memory_limit_prop);
}
static int __init kexec_setup(void)
{
struct device_node *node;
node = of_find_node_by_path("/chosen");
if (!node)
return -ENOENT;
/* remove any stale properties so ours can be found */
of_remove_property(node, of_find_property(node, kernel_end_prop.name, NULL));
/* information needed by userspace when using default_machine_kexec */
kernel_end = cpu_to_be_ulong(__pa(_end));
of_add_property(node, &kernel_end_prop);
export_crashk_values(node);
of_node_put(node);
return 0;
}
late_initcall(kexec_setup);
| linux-master | arch/powerpc/kexec/core.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* PPC32 code to handle Linux booting another kernel.
*
* Copyright (C) 2002-2003 Eric Biederman <[email protected]>
* GameCube/ppc32 port Copyright (C) 2004 Albert Herranz
* Copyright (C) 2005 IBM Corporation.
*/
#include <linux/kexec.h>
#include <linux/mm.h>
#include <linux/string.h>
#include <asm/cacheflush.h>
#include <asm/hw_irq.h>
#include <asm/io.h>
typedef void (*relocate_new_kernel_t)(
unsigned long indirection_page,
unsigned long reboot_code_buffer,
unsigned long start_address) __noreturn;
/*
* This is a generic machine_kexec function suitable at least for
* non-OpenFirmware embedded platforms.
* It merely copies the image relocation code to the control page and
* jumps to it.
* A platform specific function may just call this one.
*/
void default_machine_kexec(struct kimage *image)
{
extern const unsigned int relocate_new_kernel_size;
unsigned long page_list;
unsigned long reboot_code_buffer, reboot_code_buffer_phys;
relocate_new_kernel_t rnk;
/* Interrupts aren't acceptable while we reboot */
local_irq_disable();
/* mask each interrupt so we are in a more sane state for the
* kexec kernel */
machine_kexec_mask_interrupts();
page_list = image->head;
/* we need both effective and real address here */
reboot_code_buffer =
(unsigned long)page_address(image->control_code_page);
reboot_code_buffer_phys = virt_to_phys((void *)reboot_code_buffer);
/* copy our kernel relocation code to the control code page */
memcpy((void *)reboot_code_buffer, relocate_new_kernel,
relocate_new_kernel_size);
flush_icache_range(reboot_code_buffer,
reboot_code_buffer + KEXEC_CONTROL_PAGE_SIZE);
printk(KERN_INFO "Bye!\n");
if (!IS_ENABLED(CONFIG_PPC_85xx) && !IS_ENABLED(CONFIG_44x))
relocate_new_kernel(page_list, reboot_code_buffer_phys, image->start);
/* now call it */
rnk = (relocate_new_kernel_t) reboot_code_buffer;
(*rnk)(page_list, reboot_code_buffer_phys, image->start);
}
int machine_kexec_prepare(struct kimage *image)
{
return 0;
}
| linux-master | arch/powerpc/kexec/core_32.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* ppc64 code to implement the kexec_file_load syscall
*
* Copyright (C) 2004 Adam Litke ([email protected])
* Copyright (C) 2004 IBM Corp.
* Copyright (C) 2004,2005 Milton D Miller II, IBM Corporation
* Copyright (C) 2005 R Sharada ([email protected])
* Copyright (C) 2006 Mohan Kumar M ([email protected])
* Copyright (C) 2020 IBM Corporation
*
* Based on kexec-tools' kexec-ppc64.c, kexec-elf-rel-ppc64.c, fs2dt.c.
* Heavily modified for the kernel by
* Hari Bathini, IBM Corporation.
*/
#include <linux/kexec.h>
#include <linux/of_fdt.h>
#include <linux/libfdt.h>
#include <linux/of.h>
#include <linux/memblock.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <asm/setup.h>
#include <asm/drmem.h>
#include <asm/firmware.h>
#include <asm/kexec_ranges.h>
#include <asm/crashdump-ppc64.h>
#include <asm/mmzone.h>
#include <asm/iommu.h>
#include <asm/prom.h>
#include <asm/plpks.h>
struct umem_info {
u64 *buf; /* data buffer for usable-memory property */
u32 size; /* size allocated for the data buffer */
u32 max_entries; /* maximum no. of entries */
u32 idx; /* index of current entry */
/* usable memory ranges to look up */
unsigned int nr_ranges;
const struct range *ranges;
};
const struct kexec_file_ops * const kexec_file_loaders[] = {
&kexec_elf64_ops,
NULL
};
/**
* get_exclude_memory_ranges - Get exclude memory ranges. This list includes
* regions like opal/rtas, tce-table, initrd,
* kernel, htab which should be avoided while
* setting up kexec load segments.
* @mem_ranges: Range list to add the memory ranges to.
*
* Returns 0 on success, negative errno on error.
*/
static int get_exclude_memory_ranges(struct crash_mem **mem_ranges)
{
int ret;
ret = add_tce_mem_ranges(mem_ranges);
if (ret)
goto out;
ret = add_initrd_mem_range(mem_ranges);
if (ret)
goto out;
ret = add_htab_mem_range(mem_ranges);
if (ret)
goto out;
ret = add_kernel_mem_range(mem_ranges);
if (ret)
goto out;
ret = add_rtas_mem_range(mem_ranges);
if (ret)
goto out;
ret = add_opal_mem_range(mem_ranges);
if (ret)
goto out;
ret = add_reserved_mem_ranges(mem_ranges);
if (ret)
goto out;
/* exclude memory ranges should be sorted for easy lookup */
sort_memory_ranges(*mem_ranges, true);
out:
if (ret)
pr_err("Failed to setup exclude memory ranges\n");
return ret;
}
/**
* get_usable_memory_ranges - Get usable memory ranges. This list includes
* regions like crashkernel, opal/rtas & tce-table,
* that kdump kernel could use.
* @mem_ranges: Range list to add the memory ranges to.
*
* Returns 0 on success, negative errno on error.
*/
static int get_usable_memory_ranges(struct crash_mem **mem_ranges)
{
int ret;
/*
* Early boot failure observed on guests when low memory (first memory
* block?) is not added to usable memory. So, add [0, crashk_res.end]
* instead of [crashk_res.start, crashk_res.end] to workaround it.
* Also, crashed kernel's memory must be added to reserve map to
* avoid kdump kernel from using it.
*/
ret = add_mem_range(mem_ranges, 0, crashk_res.end + 1);
if (ret)
goto out;
ret = add_rtas_mem_range(mem_ranges);
if (ret)
goto out;
ret = add_opal_mem_range(mem_ranges);
if (ret)
goto out;
ret = add_tce_mem_ranges(mem_ranges);
out:
if (ret)
pr_err("Failed to setup usable memory ranges\n");
return ret;
}
/**
* get_crash_memory_ranges - Get crash memory ranges. This list includes
* first/crashing kernel's memory regions that
* would be exported via an elfcore.
* @mem_ranges: Range list to add the memory ranges to.
*
* Returns 0 on success, negative errno on error.
*/
static int get_crash_memory_ranges(struct crash_mem **mem_ranges)
{
phys_addr_t base, end;
struct crash_mem *tmem;
u64 i;
int ret;
for_each_mem_range(i, &base, &end) {
u64 size = end - base;
/* Skip backup memory region, which needs a separate entry */
if (base == BACKUP_SRC_START) {
if (size > BACKUP_SRC_SIZE) {
base = BACKUP_SRC_END + 1;
size -= BACKUP_SRC_SIZE;
} else
continue;
}
ret = add_mem_range(mem_ranges, base, size);
if (ret)
goto out;
/* Try merging adjacent ranges before reallocation attempt */
if ((*mem_ranges)->nr_ranges == (*mem_ranges)->max_nr_ranges)
sort_memory_ranges(*mem_ranges, true);
}
/* Reallocate memory ranges if there is no space to split ranges */
tmem = *mem_ranges;
if (tmem && (tmem->nr_ranges == tmem->max_nr_ranges)) {
tmem = realloc_mem_ranges(mem_ranges);
if (!tmem)
goto out;
}
/* Exclude crashkernel region */
ret = crash_exclude_mem_range(tmem, crashk_res.start, crashk_res.end);
if (ret)
goto out;
/*
* FIXME: For now, stay in parity with kexec-tools but if RTAS/OPAL
* regions are exported to save their context at the time of
* crash, they should actually be backed up just like the
* first 64K bytes of memory.
*/
ret = add_rtas_mem_range(mem_ranges);
if (ret)
goto out;
ret = add_opal_mem_range(mem_ranges);
if (ret)
goto out;
/* create a separate program header for the backup region */
ret = add_mem_range(mem_ranges, BACKUP_SRC_START, BACKUP_SRC_SIZE);
if (ret)
goto out;
sort_memory_ranges(*mem_ranges, false);
out:
if (ret)
pr_err("Failed to setup crash memory ranges\n");
return ret;
}
/**
* get_reserved_memory_ranges - Get reserve memory ranges. This list includes
* memory regions that should be added to the
* memory reserve map to ensure the region is
* protected from any mischief.
* @mem_ranges: Range list to add the memory ranges to.
*
* Returns 0 on success, negative errno on error.
*/
static int get_reserved_memory_ranges(struct crash_mem **mem_ranges)
{
int ret;
ret = add_rtas_mem_range(mem_ranges);
if (ret)
goto out;
ret = add_tce_mem_ranges(mem_ranges);
if (ret)
goto out;
ret = add_reserved_mem_ranges(mem_ranges);
out:
if (ret)
pr_err("Failed to setup reserved memory ranges\n");
return ret;
}
/**
* __locate_mem_hole_top_down - Looks top down for a large enough memory hole
* in the memory regions between buf_min & buf_max
* for the buffer. If found, sets kbuf->mem.
* @kbuf: Buffer contents and memory parameters.
* @buf_min: Minimum address for the buffer.
* @buf_max: Maximum address for the buffer.
*
* Returns 0 on success, negative errno on error.
*/
static int __locate_mem_hole_top_down(struct kexec_buf *kbuf,
u64 buf_min, u64 buf_max)
{
int ret = -EADDRNOTAVAIL;
phys_addr_t start, end;
u64 i;
for_each_mem_range_rev(i, &start, &end) {
/*
* memblock uses [start, end) convention while it is
* [start, end] here. Fix the off-by-one to have the
* same convention.
*/
end -= 1;
if (start > buf_max)
continue;
/* Memory hole not found */
if (end < buf_min)
break;
/* Adjust memory region based on the given range */
if (start < buf_min)
start = buf_min;
if (end > buf_max)
end = buf_max;
start = ALIGN(start, kbuf->buf_align);
if (start < end && (end - start + 1) >= kbuf->memsz) {
/* Suitable memory range found. Set kbuf->mem */
kbuf->mem = ALIGN_DOWN(end - kbuf->memsz + 1,
kbuf->buf_align);
ret = 0;
break;
}
}
return ret;
}
/**
* locate_mem_hole_top_down_ppc64 - Skip special memory regions to find a
* suitable buffer with top down approach.
* @kbuf: Buffer contents and memory parameters.
* @buf_min: Minimum address for the buffer.
* @buf_max: Maximum address for the buffer.
* @emem: Exclude memory ranges.
*
* Returns 0 on success, negative errno on error.
*/
static int locate_mem_hole_top_down_ppc64(struct kexec_buf *kbuf,
u64 buf_min, u64 buf_max,
const struct crash_mem *emem)
{
int i, ret = 0, err = -EADDRNOTAVAIL;
u64 start, end, tmin, tmax;
tmax = buf_max;
for (i = (emem->nr_ranges - 1); i >= 0; i--) {
start = emem->ranges[i].start;
end = emem->ranges[i].end;
if (start > tmax)
continue;
if (end < tmax) {
tmin = (end < buf_min ? buf_min : end + 1);
ret = __locate_mem_hole_top_down(kbuf, tmin, tmax);
if (!ret)
return 0;
}
tmax = start - 1;
if (tmax < buf_min) {
ret = err;
break;
}
ret = 0;
}
if (!ret) {
tmin = buf_min;
ret = __locate_mem_hole_top_down(kbuf, tmin, tmax);
}
return ret;
}
/**
* __locate_mem_hole_bottom_up - Looks bottom up for a large enough memory hole
* in the memory regions between buf_min & buf_max
* for the buffer. If found, sets kbuf->mem.
* @kbuf: Buffer contents and memory parameters.
* @buf_min: Minimum address for the buffer.
* @buf_max: Maximum address for the buffer.
*
* Returns 0 on success, negative errno on error.
*/
static int __locate_mem_hole_bottom_up(struct kexec_buf *kbuf,
u64 buf_min, u64 buf_max)
{
int ret = -EADDRNOTAVAIL;
phys_addr_t start, end;
u64 i;
for_each_mem_range(i, &start, &end) {
/*
* memblock uses [start, end) convention while it is
* [start, end] here. Fix the off-by-one to have the
* same convention.
*/
end -= 1;
if (end < buf_min)
continue;
/* Memory hole not found */
if (start > buf_max)
break;
/* Adjust memory region based on the given range */
if (start < buf_min)
start = buf_min;
if (end > buf_max)
end = buf_max;
start = ALIGN(start, kbuf->buf_align);
if (start < end && (end - start + 1) >= kbuf->memsz) {
/* Suitable memory range found. Set kbuf->mem */
kbuf->mem = start;
ret = 0;
break;
}
}
return ret;
}
/**
* locate_mem_hole_bottom_up_ppc64 - Skip special memory regions to find a
* suitable buffer with bottom up approach.
* @kbuf: Buffer contents and memory parameters.
* @buf_min: Minimum address for the buffer.
* @buf_max: Maximum address for the buffer.
* @emem: Exclude memory ranges.
*
* Returns 0 on success, negative errno on error.
*/
static int locate_mem_hole_bottom_up_ppc64(struct kexec_buf *kbuf,
u64 buf_min, u64 buf_max,
const struct crash_mem *emem)
{
int i, ret = 0, err = -EADDRNOTAVAIL;
u64 start, end, tmin, tmax;
tmin = buf_min;
for (i = 0; i < emem->nr_ranges; i++) {
start = emem->ranges[i].start;
end = emem->ranges[i].end;
if (end < tmin)
continue;
if (start > tmin) {
tmax = (start > buf_max ? buf_max : start - 1);
ret = __locate_mem_hole_bottom_up(kbuf, tmin, tmax);
if (!ret)
return 0;
}
tmin = end + 1;
if (tmin > buf_max) {
ret = err;
break;
}
ret = 0;
}
if (!ret) {
tmax = buf_max;
ret = __locate_mem_hole_bottom_up(kbuf, tmin, tmax);
}
return ret;
}
/**
* check_realloc_usable_mem - Reallocate buffer if it can't accommodate entries
* @um_info: Usable memory buffer and ranges info.
* @cnt: No. of entries to accommodate.
*
* Frees up the old buffer if memory reallocation fails.
*
* Returns buffer on success, NULL on error.
*/
static u64 *check_realloc_usable_mem(struct umem_info *um_info, int cnt)
{
u32 new_size;
u64 *tbuf;
if ((um_info->idx + cnt) <= um_info->max_entries)
return um_info->buf;
new_size = um_info->size + MEM_RANGE_CHUNK_SZ;
tbuf = krealloc(um_info->buf, new_size, GFP_KERNEL);
if (tbuf) {
um_info->buf = tbuf;
um_info->size = new_size;
um_info->max_entries = (um_info->size / sizeof(u64));
}
return tbuf;
}
/**
* add_usable_mem - Add the usable memory ranges within the given memory range
* to the buffer
* @um_info: Usable memory buffer and ranges info.
* @base: Base address of memory range to look for.
* @end: End address of memory range to look for.
*
* Returns 0 on success, negative errno on error.
*/
static int add_usable_mem(struct umem_info *um_info, u64 base, u64 end)
{
u64 loc_base, loc_end;
bool add;
int i;
for (i = 0; i < um_info->nr_ranges; i++) {
add = false;
loc_base = um_info->ranges[i].start;
loc_end = um_info->ranges[i].end;
if (loc_base >= base && loc_end <= end)
add = true;
else if (base < loc_end && end > loc_base) {
if (loc_base < base)
loc_base = base;
if (loc_end > end)
loc_end = end;
add = true;
}
if (add) {
if (!check_realloc_usable_mem(um_info, 2))
return -ENOMEM;
um_info->buf[um_info->idx++] = cpu_to_be64(loc_base);
um_info->buf[um_info->idx++] =
cpu_to_be64(loc_end - loc_base + 1);
}
}
return 0;
}
/**
* kdump_setup_usable_lmb - This is a callback function that gets called by
* walk_drmem_lmbs for every LMB to set its
* usable memory ranges.
* @lmb: LMB info.
* @usm: linux,drconf-usable-memory property value.
* @data: Pointer to usable memory buffer and ranges info.
*
* Returns 0 on success, negative errno on error.
*/
static int kdump_setup_usable_lmb(struct drmem_lmb *lmb, const __be32 **usm,
void *data)
{
struct umem_info *um_info;
int tmp_idx, ret;
u64 base, end;
/*
* kdump load isn't supported on kernels already booted with
* linux,drconf-usable-memory property.
*/
if (*usm) {
pr_err("linux,drconf-usable-memory property already exists!");
return -EINVAL;
}
um_info = data;
tmp_idx = um_info->idx;
if (!check_realloc_usable_mem(um_info, 1))
return -ENOMEM;
um_info->idx++;
base = lmb->base_addr;
end = base + drmem_lmb_size() - 1;
ret = add_usable_mem(um_info, base, end);
if (!ret) {
/*
* Update the no. of ranges added. Two entries (base & size)
* for every range added.
*/
um_info->buf[tmp_idx] =
cpu_to_be64((um_info->idx - tmp_idx - 1) / 2);
}
return ret;
}
#define NODE_PATH_LEN 256
/**
* add_usable_mem_property - Add usable memory property for the given
* memory node.
* @fdt: Flattened device tree for the kdump kernel.
* @dn: Memory node.
* @um_info: Usable memory buffer and ranges info.
*
* Returns 0 on success, negative errno on error.
*/
static int add_usable_mem_property(void *fdt, struct device_node *dn,
struct umem_info *um_info)
{
int n_mem_addr_cells, n_mem_size_cells, node;
char path[NODE_PATH_LEN];
int i, len, ranges, ret;
const __be32 *prop;
u64 base, end;
of_node_get(dn);
if (snprintf(path, NODE_PATH_LEN, "%pOF", dn) > (NODE_PATH_LEN - 1)) {
pr_err("Buffer (%d) too small for memory node: %pOF\n",
NODE_PATH_LEN, dn);
return -EOVERFLOW;
}
pr_debug("Memory node path: %s\n", path);
/* Now that we know the path, find its offset in kdump kernel's fdt */
node = fdt_path_offset(fdt, path);
if (node < 0) {
pr_err("Malformed device tree: error reading %s\n", path);
ret = -EINVAL;
goto out;
}
/* Get the address & size cells */
n_mem_addr_cells = of_n_addr_cells(dn);
n_mem_size_cells = of_n_size_cells(dn);
pr_debug("address cells: %d, size cells: %d\n", n_mem_addr_cells,
n_mem_size_cells);
um_info->idx = 0;
if (!check_realloc_usable_mem(um_info, 2)) {
ret = -ENOMEM;
goto out;
}
prop = of_get_property(dn, "reg", &len);
if (!prop || len <= 0) {
ret = 0;
goto out;
}
/*
* "reg" property represents sequence of (addr,size) tuples
* each representing a memory range.
*/
ranges = (len >> 2) / (n_mem_addr_cells + n_mem_size_cells);
for (i = 0; i < ranges; i++) {
base = of_read_number(prop, n_mem_addr_cells);
prop += n_mem_addr_cells;
end = base + of_read_number(prop, n_mem_size_cells) - 1;
prop += n_mem_size_cells;
ret = add_usable_mem(um_info, base, end);
if (ret)
goto out;
}
/*
* No kdump kernel usable memory found in this memory node.
* Write (0,0) tuple in linux,usable-memory property for
* this region to be ignored.
*/
if (um_info->idx == 0) {
um_info->buf[0] = 0;
um_info->buf[1] = 0;
um_info->idx = 2;
}
ret = fdt_setprop(fdt, node, "linux,usable-memory", um_info->buf,
(um_info->idx * sizeof(u64)));
out:
of_node_put(dn);
return ret;
}
/**
* update_usable_mem_fdt - Updates kdump kernel's fdt with linux,usable-memory
* and linux,drconf-usable-memory DT properties as
* appropriate to restrict its memory usage.
* @fdt: Flattened device tree for the kdump kernel.
* @usable_mem: Usable memory ranges for kdump kernel.
*
* Returns 0 on success, negative errno on error.
*/
static int update_usable_mem_fdt(void *fdt, struct crash_mem *usable_mem)
{
struct umem_info um_info;
struct device_node *dn;
int node, ret = 0;
if (!usable_mem) {
pr_err("Usable memory ranges for kdump kernel not found\n");
return -ENOENT;
}
node = fdt_path_offset(fdt, "/ibm,dynamic-reconfiguration-memory");
if (node == -FDT_ERR_NOTFOUND)
pr_debug("No dynamic reconfiguration memory found\n");
else if (node < 0) {
pr_err("Malformed device tree: error reading /ibm,dynamic-reconfiguration-memory.\n");
return -EINVAL;
}
um_info.buf = NULL;
um_info.size = 0;
um_info.max_entries = 0;
um_info.idx = 0;
/* Memory ranges to look up */
um_info.ranges = &(usable_mem->ranges[0]);
um_info.nr_ranges = usable_mem->nr_ranges;
dn = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
if (dn) {
ret = walk_drmem_lmbs(dn, &um_info, kdump_setup_usable_lmb);
of_node_put(dn);
if (ret) {
pr_err("Could not setup linux,drconf-usable-memory property for kdump\n");
goto out;
}
ret = fdt_setprop(fdt, node, "linux,drconf-usable-memory",
um_info.buf, (um_info.idx * sizeof(u64)));
if (ret) {
pr_err("Failed to update fdt with linux,drconf-usable-memory property: %s",
fdt_strerror(ret));
goto out;
}
}
/*
* Walk through each memory node and set linux,usable-memory property
* for the corresponding node in kdump kernel's fdt.
*/
for_each_node_by_type(dn, "memory") {
ret = add_usable_mem_property(fdt, dn, &um_info);
if (ret) {
pr_err("Failed to set linux,usable-memory property for %s node",
dn->full_name);
of_node_put(dn);
goto out;
}
}
out:
kfree(um_info.buf);
return ret;
}
/**
* load_backup_segment - Locate a memory hole to place the backup region.
* @image: Kexec image.
* @kbuf: Buffer contents and memory parameters.
*
* Returns 0 on success, negative errno on error.
*/
static int load_backup_segment(struct kimage *image, struct kexec_buf *kbuf)
{
void *buf;
int ret;
/*
* Setup a source buffer for backup segment.
*
* A source buffer has no meaning for backup region as data will
* be copied from backup source, after crash, in the purgatory.
* But as load segment code doesn't recognize such segments,
* setup a dummy source buffer to keep it happy for now.
*/
buf = vzalloc(BACKUP_SRC_SIZE);
if (!buf)
return -ENOMEM;
kbuf->buffer = buf;
kbuf->mem = KEXEC_BUF_MEM_UNKNOWN;
kbuf->bufsz = kbuf->memsz = BACKUP_SRC_SIZE;
kbuf->top_down = false;
ret = kexec_add_buffer(kbuf);
if (ret) {
vfree(buf);
return ret;
}
image->arch.backup_buf = buf;
image->arch.backup_start = kbuf->mem;
return 0;
}
/**
* update_backup_region_phdr - Update backup region's offset for the core to
* export the region appropriately.
* @image: Kexec image.
* @ehdr: ELF core header.
*
* Assumes an exclusive program header is setup for the backup region
* in the ELF headers
*
* Returns nothing.
*/
static void update_backup_region_phdr(struct kimage *image, Elf64_Ehdr *ehdr)
{
Elf64_Phdr *phdr;
unsigned int i;
phdr = (Elf64_Phdr *)(ehdr + 1);
for (i = 0; i < ehdr->e_phnum; i++) {
if (phdr->p_paddr == BACKUP_SRC_START) {
phdr->p_offset = image->arch.backup_start;
pr_debug("Backup region offset updated to 0x%lx\n",
image->arch.backup_start);
return;
}
}
}
/**
* load_elfcorehdr_segment - Setup crash memory ranges and initialize elfcorehdr
* segment needed to load kdump kernel.
* @image: Kexec image.
* @kbuf: Buffer contents and memory parameters.
*
* Returns 0 on success, negative errno on error.
*/
static int load_elfcorehdr_segment(struct kimage *image, struct kexec_buf *kbuf)
{
struct crash_mem *cmem = NULL;
unsigned long headers_sz;
void *headers = NULL;
int ret;
ret = get_crash_memory_ranges(&cmem);
if (ret)
goto out;
/* Setup elfcorehdr segment */
ret = crash_prepare_elf64_headers(cmem, false, &headers, &headers_sz);
if (ret) {
pr_err("Failed to prepare elf headers for the core\n");
goto out;
}
/* Fix the offset for backup region in the ELF header */
update_backup_region_phdr(image, headers);
kbuf->buffer = headers;
kbuf->mem = KEXEC_BUF_MEM_UNKNOWN;
kbuf->bufsz = kbuf->memsz = headers_sz;
kbuf->top_down = false;
ret = kexec_add_buffer(kbuf);
if (ret) {
vfree(headers);
goto out;
}
image->elf_load_addr = kbuf->mem;
image->elf_headers_sz = headers_sz;
image->elf_headers = headers;
out:
kfree(cmem);
return ret;
}
/**
* load_crashdump_segments_ppc64 - Initialize the additional segements needed
* to load kdump kernel.
* @image: Kexec image.
* @kbuf: Buffer contents and memory parameters.
*
* Returns 0 on success, negative errno on error.
*/
int load_crashdump_segments_ppc64(struct kimage *image,
struct kexec_buf *kbuf)
{
int ret;
/* Load backup segment - first 64K bytes of the crashing kernel */
ret = load_backup_segment(image, kbuf);
if (ret) {
pr_err("Failed to load backup segment\n");
return ret;
}
pr_debug("Loaded the backup region at 0x%lx\n", kbuf->mem);
/* Load elfcorehdr segment - to export crashing kernel's vmcore */
ret = load_elfcorehdr_segment(image, kbuf);
if (ret) {
pr_err("Failed to load elfcorehdr segment\n");
return ret;
}
pr_debug("Loaded elf core header at 0x%lx, bufsz=0x%lx memsz=0x%lx\n",
image->elf_load_addr, kbuf->bufsz, kbuf->memsz);
return 0;
}
/**
* setup_purgatory_ppc64 - initialize PPC64 specific purgatory's global
* variables and call setup_purgatory() to initialize
* common global variable.
* @image: kexec image.
* @slave_code: Slave code for the purgatory.
* @fdt: Flattened device tree for the next kernel.
* @kernel_load_addr: Address where the kernel is loaded.
* @fdt_load_addr: Address where the flattened device tree is loaded.
*
* Returns 0 on success, negative errno on error.
*/
int setup_purgatory_ppc64(struct kimage *image, const void *slave_code,
const void *fdt, unsigned long kernel_load_addr,
unsigned long fdt_load_addr)
{
struct device_node *dn = NULL;
int ret;
ret = setup_purgatory(image, slave_code, fdt, kernel_load_addr,
fdt_load_addr);
if (ret)
goto out;
if (image->type == KEXEC_TYPE_CRASH) {
u32 my_run_at_load = 1;
/*
* Tell relocatable kernel to run at load address
* via the word meant for that at 0x5c.
*/
ret = kexec_purgatory_get_set_symbol(image, "run_at_load",
&my_run_at_load,
sizeof(my_run_at_load),
false);
if (ret)
goto out;
}
/* Tell purgatory where to look for backup region */
ret = kexec_purgatory_get_set_symbol(image, "backup_start",
&image->arch.backup_start,
sizeof(image->arch.backup_start),
false);
if (ret)
goto out;
/* Setup OPAL base & entry values */
dn = of_find_node_by_path("/ibm,opal");
if (dn) {
u64 val;
of_property_read_u64(dn, "opal-base-address", &val);
ret = kexec_purgatory_get_set_symbol(image, "opal_base", &val,
sizeof(val), false);
if (ret)
goto out;
of_property_read_u64(dn, "opal-entry-address", &val);
ret = kexec_purgatory_get_set_symbol(image, "opal_entry", &val,
sizeof(val), false);
}
out:
if (ret)
pr_err("Failed to setup purgatory symbols");
of_node_put(dn);
return ret;
}
/**
* cpu_node_size - Compute the size of a CPU node in the FDT.
* This should be done only once and the value is stored in
* a static variable.
* Returns the max size of a CPU node in the FDT.
*/
static unsigned int cpu_node_size(void)
{
static unsigned int size;
struct device_node *dn;
struct property *pp;
/*
* Don't compute it twice, we are assuming that the per CPU node size
* doesn't change during the system's life.
*/
if (size)
return size;
dn = of_find_node_by_type(NULL, "cpu");
if (WARN_ON_ONCE(!dn)) {
// Unlikely to happen
return 0;
}
/*
* We compute the sub node size for a CPU node, assuming it
* will be the same for all.
*/
size += strlen(dn->name) + 5;
for_each_property_of_node(dn, pp) {
size += strlen(pp->name);
size += pp->length;
}
of_node_put(dn);
return size;
}
/**
* kexec_extra_fdt_size_ppc64 - Return the estimated additional size needed to
* setup FDT for kexec/kdump kernel.
* @image: kexec image being loaded.
*
* Returns the estimated extra size needed for kexec/kdump kernel FDT.
*/
unsigned int kexec_extra_fdt_size_ppc64(struct kimage *image)
{
unsigned int cpu_nodes, extra_size = 0;
struct device_node *dn;
u64 usm_entries;
// Budget some space for the password blob. There's already extra space
// for the key name
if (plpks_is_available())
extra_size += (unsigned int)plpks_get_passwordlen();
if (image->type != KEXEC_TYPE_CRASH)
return extra_size;
/*
* For kdump kernel, account for linux,usable-memory and
* linux,drconf-usable-memory properties. Get an approximate on the
* number of usable memory entries and use for FDT size estimation.
*/
if (drmem_lmb_size()) {
usm_entries = ((memory_hotplug_max() / drmem_lmb_size()) +
(2 * (resource_size(&crashk_res) / drmem_lmb_size())));
extra_size += (unsigned int)(usm_entries * sizeof(u64));
}
/*
* Get the number of CPU nodes in the current DT. This allows to
* reserve places for CPU nodes added since the boot time.
*/
cpu_nodes = 0;
for_each_node_by_type(dn, "cpu") {
cpu_nodes++;
}
if (cpu_nodes > boot_cpu_node_count)
extra_size += (cpu_nodes - boot_cpu_node_count) * cpu_node_size();
return extra_size;
}
/**
* add_node_props - Reads node properties from device node structure and add
* them to fdt.
* @fdt: Flattened device tree of the kernel
* @node_offset: offset of the node to add a property at
* @dn: device node pointer
*
* Returns 0 on success, negative errno on error.
*/
static int add_node_props(void *fdt, int node_offset, const struct device_node *dn)
{
int ret = 0;
struct property *pp;
if (!dn)
return -EINVAL;
for_each_property_of_node(dn, pp) {
ret = fdt_setprop(fdt, node_offset, pp->name, pp->value, pp->length);
if (ret < 0) {
pr_err("Unable to add %s property: %s\n", pp->name, fdt_strerror(ret));
return ret;
}
}
return ret;
}
/**
* update_cpus_node - Update cpus node of flattened device tree using of_root
* device node.
* @fdt: Flattened device tree of the kernel.
*
* Returns 0 on success, negative errno on error.
*/
static int update_cpus_node(void *fdt)
{
struct device_node *cpus_node, *dn;
int cpus_offset, cpus_subnode_offset, ret = 0;
cpus_offset = fdt_path_offset(fdt, "/cpus");
if (cpus_offset < 0 && cpus_offset != -FDT_ERR_NOTFOUND) {
pr_err("Malformed device tree: error reading /cpus node: %s\n",
fdt_strerror(cpus_offset));
return cpus_offset;
}
if (cpus_offset > 0) {
ret = fdt_del_node(fdt, cpus_offset);
if (ret < 0) {
pr_err("Error deleting /cpus node: %s\n", fdt_strerror(ret));
return -EINVAL;
}
}
/* Add cpus node to fdt */
cpus_offset = fdt_add_subnode(fdt, fdt_path_offset(fdt, "/"), "cpus");
if (cpus_offset < 0) {
pr_err("Error creating /cpus node: %s\n", fdt_strerror(cpus_offset));
return -EINVAL;
}
/* Add cpus node properties */
cpus_node = of_find_node_by_path("/cpus");
ret = add_node_props(fdt, cpus_offset, cpus_node);
of_node_put(cpus_node);
if (ret < 0)
return ret;
/* Loop through all subnodes of cpus and add them to fdt */
for_each_node_by_type(dn, "cpu") {
cpus_subnode_offset = fdt_add_subnode(fdt, cpus_offset, dn->full_name);
if (cpus_subnode_offset < 0) {
pr_err("Unable to add %s subnode: %s\n", dn->full_name,
fdt_strerror(cpus_subnode_offset));
ret = cpus_subnode_offset;
goto out;
}
ret = add_node_props(fdt, cpus_subnode_offset, dn);
if (ret < 0)
goto out;
}
out:
of_node_put(dn);
return ret;
}
static int copy_property(void *fdt, int node_offset, const struct device_node *dn,
const char *propname)
{
const void *prop, *fdtprop;
int len = 0, fdtlen = 0;
prop = of_get_property(dn, propname, &len);
fdtprop = fdt_getprop(fdt, node_offset, propname, &fdtlen);
if (fdtprop && !prop)
return fdt_delprop(fdt, node_offset, propname);
else if (prop)
return fdt_setprop(fdt, node_offset, propname, prop, len);
else
return -FDT_ERR_NOTFOUND;
}
static int update_pci_dma_nodes(void *fdt, const char *dmapropname)
{
struct device_node *dn;
int pci_offset, root_offset, ret = 0;
if (!firmware_has_feature(FW_FEATURE_LPAR))
return 0;
root_offset = fdt_path_offset(fdt, "/");
for_each_node_with_property(dn, dmapropname) {
pci_offset = fdt_subnode_offset(fdt, root_offset, of_node_full_name(dn));
if (pci_offset < 0)
continue;
ret = copy_property(fdt, pci_offset, dn, "ibm,dma-window");
if (ret < 0)
break;
ret = copy_property(fdt, pci_offset, dn, dmapropname);
if (ret < 0)
break;
}
return ret;
}
/**
* setup_new_fdt_ppc64 - Update the flattend device-tree of the kernel
* being loaded.
* @image: kexec image being loaded.
* @fdt: Flattened device tree for the next kernel.
* @initrd_load_addr: Address where the next initrd will be loaded.
* @initrd_len: Size of the next initrd, or 0 if there will be none.
* @cmdline: Command line for the next kernel, or NULL if there will
* be none.
*
* Returns 0 on success, negative errno on error.
*/
int setup_new_fdt_ppc64(const struct kimage *image, void *fdt,
unsigned long initrd_load_addr,
unsigned long initrd_len, const char *cmdline)
{
struct crash_mem *umem = NULL, *rmem = NULL;
int i, nr_ranges, ret;
/*
* Restrict memory usage for kdump kernel by setting up
* usable memory ranges and memory reserve map.
*/
if (image->type == KEXEC_TYPE_CRASH) {
ret = get_usable_memory_ranges(&umem);
if (ret)
goto out;
ret = update_usable_mem_fdt(fdt, umem);
if (ret) {
pr_err("Error setting up usable-memory property for kdump kernel\n");
goto out;
}
/*
* Ensure we don't touch crashed kernel's memory except the
* first 64K of RAM, which will be backed up.
*/
ret = fdt_add_mem_rsv(fdt, BACKUP_SRC_END + 1,
crashk_res.start - BACKUP_SRC_SIZE);
if (ret) {
pr_err("Error reserving crash memory: %s\n",
fdt_strerror(ret));
goto out;
}
/* Ensure backup region is not used by kdump/capture kernel */
ret = fdt_add_mem_rsv(fdt, image->arch.backup_start,
BACKUP_SRC_SIZE);
if (ret) {
pr_err("Error reserving memory for backup: %s\n",
fdt_strerror(ret));
goto out;
}
}
/* Update cpus nodes information to account hotplug CPUs. */
ret = update_cpus_node(fdt);
if (ret < 0)
goto out;
ret = update_pci_dma_nodes(fdt, DIRECT64_PROPNAME);
if (ret < 0)
goto out;
ret = update_pci_dma_nodes(fdt, DMA64_PROPNAME);
if (ret < 0)
goto out;
/* Update memory reserve map */
ret = get_reserved_memory_ranges(&rmem);
if (ret)
goto out;
nr_ranges = rmem ? rmem->nr_ranges : 0;
for (i = 0; i < nr_ranges; i++) {
u64 base, size;
base = rmem->ranges[i].start;
size = rmem->ranges[i].end - base + 1;
ret = fdt_add_mem_rsv(fdt, base, size);
if (ret) {
pr_err("Error updating memory reserve map: %s\n",
fdt_strerror(ret));
goto out;
}
}
// If we have PLPKS active, we need to provide the password to the new kernel
if (plpks_is_available())
ret = plpks_populate_fdt(fdt);
out:
kfree(rmem);
kfree(umem);
return ret;
}
/**
* arch_kexec_locate_mem_hole - Skip special memory regions like rtas, opal,
* tce-table, reserved-ranges & such (exclude
* memory ranges) as they can't be used for kexec
* segment buffer. Sets kbuf->mem when a suitable
* memory hole is found.
* @kbuf: Buffer contents and memory parameters.
*
* Assumes minimum of PAGE_SIZE alignment for kbuf->memsz & kbuf->buf_align.
*
* Returns 0 on success, negative errno on error.
*/
int arch_kexec_locate_mem_hole(struct kexec_buf *kbuf)
{
struct crash_mem **emem;
u64 buf_min, buf_max;
int ret;
/* Look up the exclude ranges list while locating the memory hole */
emem = &(kbuf->image->arch.exclude_ranges);
if (!(*emem) || ((*emem)->nr_ranges == 0)) {
pr_warn("No exclude range list. Using the default locate mem hole method\n");
return kexec_locate_mem_hole(kbuf);
}
buf_min = kbuf->buf_min;
buf_max = kbuf->buf_max;
/* Segments for kdump kernel should be within crashkernel region */
if (kbuf->image->type == KEXEC_TYPE_CRASH) {
buf_min = (buf_min < crashk_res.start ?
crashk_res.start : buf_min);
buf_max = (buf_max > crashk_res.end ?
crashk_res.end : buf_max);
}
if (buf_min > buf_max) {
pr_err("Invalid buffer min and/or max values\n");
return -EINVAL;
}
if (kbuf->top_down)
ret = locate_mem_hole_top_down_ppc64(kbuf, buf_min, buf_max,
*emem);
else
ret = locate_mem_hole_bottom_up_ppc64(kbuf, buf_min, buf_max,
*emem);
/* Add the buffer allocated to the exclude list for the next lookup */
if (!ret) {
add_mem_range(emem, kbuf->mem, kbuf->memsz);
sort_memory_ranges(*emem, true);
} else {
pr_err("Failed to locate memory buffer of size %lu\n",
kbuf->memsz);
}
return ret;
}
/**
* arch_kexec_kernel_image_probe - Does additional handling needed to setup
* kexec segments.
* @image: kexec image being loaded.
* @buf: Buffer pointing to elf data.
* @buf_len: Length of the buffer.
*
* Returns 0 on success, negative errno on error.
*/
int arch_kexec_kernel_image_probe(struct kimage *image, void *buf,
unsigned long buf_len)
{
int ret;
/* Get exclude memory ranges needed for setting up kexec segments */
ret = get_exclude_memory_ranges(&(image->arch.exclude_ranges));
if (ret) {
pr_err("Failed to setup exclude memory ranges for buffer lookup\n");
return ret;
}
return kexec_image_probe_default(image, buf, buf_len);
}
/**
* arch_kimage_file_post_load_cleanup - Frees up all the allocations done
* while loading the image.
* @image: kexec image being loaded.
*
* Returns 0 on success, negative errno on error.
*/
int arch_kimage_file_post_load_cleanup(struct kimage *image)
{
kfree(image->arch.exclude_ranges);
image->arch.exclude_ranges = NULL;
vfree(image->arch.backup_buf);
image->arch.backup_buf = NULL;
vfree(image->elf_headers);
image->elf_headers = NULL;
image->elf_headers_sz = 0;
kvfree(image->arch.fdt);
image->arch.fdt = NULL;
return kexec_image_post_load_cleanup_default(image);
}
| linux-master | arch/powerpc/kexec/file_load_64.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Machine check exception handling.
*
* Copyright 2013 IBM Corporation
* Author: Mahesh Salgaonkar <[email protected]>
*/
#undef DEBUG
#define pr_fmt(fmt) "mce: " fmt
#include <linux/hardirq.h>
#include <linux/types.h>
#include <linux/ptrace.h>
#include <linux/percpu.h>
#include <linux/export.h>
#include <linux/irq_work.h>
#include <linux/extable.h>
#include <linux/ftrace.h>
#include <linux/memblock.h>
#include <linux/of.h>
#include <asm/interrupt.h>
#include <asm/machdep.h>
#include <asm/mce.h>
#include <asm/nmi.h>
#include "setup.h"
static void machine_check_ue_event(struct machine_check_event *evt);
static void machine_process_ue_event(struct work_struct *work);
static DECLARE_WORK(mce_ue_event_work, machine_process_ue_event);
static BLOCKING_NOTIFIER_HEAD(mce_notifier_list);
int mce_register_notifier(struct notifier_block *nb)
{
return blocking_notifier_chain_register(&mce_notifier_list, nb);
}
EXPORT_SYMBOL_GPL(mce_register_notifier);
int mce_unregister_notifier(struct notifier_block *nb)
{
return blocking_notifier_chain_unregister(&mce_notifier_list, nb);
}
EXPORT_SYMBOL_GPL(mce_unregister_notifier);
static void mce_set_error_info(struct machine_check_event *mce,
struct mce_error_info *mce_err)
{
mce->error_type = mce_err->error_type;
switch (mce_err->error_type) {
case MCE_ERROR_TYPE_UE:
mce->u.ue_error.ue_error_type = mce_err->u.ue_error_type;
break;
case MCE_ERROR_TYPE_SLB:
mce->u.slb_error.slb_error_type = mce_err->u.slb_error_type;
break;
case MCE_ERROR_TYPE_ERAT:
mce->u.erat_error.erat_error_type = mce_err->u.erat_error_type;
break;
case MCE_ERROR_TYPE_TLB:
mce->u.tlb_error.tlb_error_type = mce_err->u.tlb_error_type;
break;
case MCE_ERROR_TYPE_USER:
mce->u.user_error.user_error_type = mce_err->u.user_error_type;
break;
case MCE_ERROR_TYPE_RA:
mce->u.ra_error.ra_error_type = mce_err->u.ra_error_type;
break;
case MCE_ERROR_TYPE_LINK:
mce->u.link_error.link_error_type = mce_err->u.link_error_type;
break;
case MCE_ERROR_TYPE_UNKNOWN:
default:
break;
}
}
void mce_irq_work_queue(void)
{
/* Raise decrementer interrupt */
arch_irq_work_raise();
set_mce_pending_irq_work();
}
/*
* Decode and save high level MCE information into per cpu buffer which
* is an array of machine_check_event structure.
*/
void save_mce_event(struct pt_regs *regs, long handled,
struct mce_error_info *mce_err,
uint64_t nip, uint64_t addr, uint64_t phys_addr)
{
int index = local_paca->mce_info->mce_nest_count++;
struct machine_check_event *mce;
mce = &local_paca->mce_info->mce_event[index];
/*
* Return if we don't have enough space to log mce event.
* mce_nest_count may go beyond MAX_MC_EVT but that's ok,
* the check below will stop buffer overrun.
*/
if (index >= MAX_MC_EVT)
return;
/* Populate generic machine check info */
mce->version = MCE_V1;
mce->srr0 = nip;
mce->srr1 = regs->msr;
mce->gpr3 = regs->gpr[3];
mce->in_use = 1;
mce->cpu = get_paca()->paca_index;
/* Mark it recovered if we have handled it and MSR(RI=1). */
if (handled && (regs->msr & MSR_RI))
mce->disposition = MCE_DISPOSITION_RECOVERED;
else
mce->disposition = MCE_DISPOSITION_NOT_RECOVERED;
mce->initiator = mce_err->initiator;
mce->severity = mce_err->severity;
mce->sync_error = mce_err->sync_error;
mce->error_class = mce_err->error_class;
/*
* Populate the mce error_type and type-specific error_type.
*/
mce_set_error_info(mce, mce_err);
if (mce->error_type == MCE_ERROR_TYPE_UE)
mce->u.ue_error.ignore_event = mce_err->ignore_event;
/*
* Raise irq work, So that we don't miss to log the error for
* unrecoverable errors.
*/
if (mce->disposition == MCE_DISPOSITION_NOT_RECOVERED)
mce_irq_work_queue();
if (!addr)
return;
if (mce->error_type == MCE_ERROR_TYPE_TLB) {
mce->u.tlb_error.effective_address_provided = true;
mce->u.tlb_error.effective_address = addr;
} else if (mce->error_type == MCE_ERROR_TYPE_SLB) {
mce->u.slb_error.effective_address_provided = true;
mce->u.slb_error.effective_address = addr;
} else if (mce->error_type == MCE_ERROR_TYPE_ERAT) {
mce->u.erat_error.effective_address_provided = true;
mce->u.erat_error.effective_address = addr;
} else if (mce->error_type == MCE_ERROR_TYPE_USER) {
mce->u.user_error.effective_address_provided = true;
mce->u.user_error.effective_address = addr;
} else if (mce->error_type == MCE_ERROR_TYPE_RA) {
mce->u.ra_error.effective_address_provided = true;
mce->u.ra_error.effective_address = addr;
} else if (mce->error_type == MCE_ERROR_TYPE_LINK) {
mce->u.link_error.effective_address_provided = true;
mce->u.link_error.effective_address = addr;
} else if (mce->error_type == MCE_ERROR_TYPE_UE) {
mce->u.ue_error.effective_address_provided = true;
mce->u.ue_error.effective_address = addr;
if (phys_addr != ULONG_MAX) {
mce->u.ue_error.physical_address_provided = true;
mce->u.ue_error.physical_address = phys_addr;
machine_check_ue_event(mce);
}
}
return;
}
/*
* get_mce_event:
* mce Pointer to machine_check_event structure to be filled.
* release Flag to indicate whether to free the event slot or not.
* 0 <= do not release the mce event. Caller will invoke
* release_mce_event() once event has been consumed.
* 1 <= release the slot.
*
* return 1 = success
* 0 = failure
*
* get_mce_event() will be called by platform specific machine check
* handle routine and in KVM.
* When we call get_mce_event(), we are still in interrupt context and
* preemption will not be scheduled until ret_from_expect() routine
* is called.
*/
int get_mce_event(struct machine_check_event *mce, bool release)
{
int index = local_paca->mce_info->mce_nest_count - 1;
struct machine_check_event *mc_evt;
int ret = 0;
/* Sanity check */
if (index < 0)
return ret;
/* Check if we have MCE info to process. */
if (index < MAX_MC_EVT) {
mc_evt = &local_paca->mce_info->mce_event[index];
/* Copy the event structure and release the original */
if (mce)
*mce = *mc_evt;
if (release)
mc_evt->in_use = 0;
ret = 1;
}
/* Decrement the count to free the slot. */
if (release)
local_paca->mce_info->mce_nest_count--;
return ret;
}
void release_mce_event(void)
{
get_mce_event(NULL, true);
}
static void machine_check_ue_work(void)
{
schedule_work(&mce_ue_event_work);
}
/*
* Queue up the MCE event which then can be handled later.
*/
static void machine_check_ue_event(struct machine_check_event *evt)
{
int index;
index = local_paca->mce_info->mce_ue_count++;
/* If queue is full, just return for now. */
if (index >= MAX_MC_EVT) {
local_paca->mce_info->mce_ue_count--;
return;
}
memcpy(&local_paca->mce_info->mce_ue_event_queue[index],
evt, sizeof(*evt));
}
/*
* Queue up the MCE event which then can be handled later.
*/
void machine_check_queue_event(void)
{
int index;
struct machine_check_event evt;
if (!get_mce_event(&evt, MCE_EVENT_RELEASE))
return;
index = local_paca->mce_info->mce_queue_count++;
/* If queue is full, just return for now. */
if (index >= MAX_MC_EVT) {
local_paca->mce_info->mce_queue_count--;
return;
}
memcpy(&local_paca->mce_info->mce_event_queue[index],
&evt, sizeof(evt));
mce_irq_work_queue();
}
void mce_common_process_ue(struct pt_regs *regs,
struct mce_error_info *mce_err)
{
const struct exception_table_entry *entry;
entry = search_kernel_exception_table(regs->nip);
if (entry) {
mce_err->ignore_event = true;
regs_set_return_ip(regs, extable_fixup(entry));
}
}
/*
* process pending MCE event from the mce event queue. This function will be
* called during syscall exit.
*/
static void machine_process_ue_event(struct work_struct *work)
{
int index;
struct machine_check_event *evt;
while (local_paca->mce_info->mce_ue_count > 0) {
index = local_paca->mce_info->mce_ue_count - 1;
evt = &local_paca->mce_info->mce_ue_event_queue[index];
blocking_notifier_call_chain(&mce_notifier_list, 0, evt);
#ifdef CONFIG_MEMORY_FAILURE
/*
* This should probably queued elsewhere, but
* oh! well
*
* Don't report this machine check because the caller has a
* asked us to ignore the event, it has a fixup handler which
* will do the appropriate error handling and reporting.
*/
if (evt->error_type == MCE_ERROR_TYPE_UE) {
if (evt->u.ue_error.ignore_event) {
local_paca->mce_info->mce_ue_count--;
continue;
}
if (evt->u.ue_error.physical_address_provided) {
unsigned long pfn;
pfn = evt->u.ue_error.physical_address >>
PAGE_SHIFT;
memory_failure(pfn, 0);
} else
pr_warn("Failed to identify bad address from "
"where the uncorrectable error (UE) "
"was generated\n");
}
#endif
local_paca->mce_info->mce_ue_count--;
}
}
/*
* process pending MCE event from the mce event queue. This function will be
* called during syscall exit.
*/
static void machine_check_process_queued_event(void)
{
int index;
struct machine_check_event *evt;
add_taint(TAINT_MACHINE_CHECK, LOCKDEP_NOW_UNRELIABLE);
/*
* For now just print it to console.
* TODO: log this error event to FSP or nvram.
*/
while (local_paca->mce_info->mce_queue_count > 0) {
index = local_paca->mce_info->mce_queue_count - 1;
evt = &local_paca->mce_info->mce_event_queue[index];
if (evt->error_type == MCE_ERROR_TYPE_UE &&
evt->u.ue_error.ignore_event) {
local_paca->mce_info->mce_queue_count--;
continue;
}
machine_check_print_event_info(evt, false, false);
local_paca->mce_info->mce_queue_count--;
}
}
void set_mce_pending_irq_work(void)
{
local_paca->mce_pending_irq_work = 1;
}
void clear_mce_pending_irq_work(void)
{
local_paca->mce_pending_irq_work = 0;
}
void mce_run_irq_context_handlers(void)
{
if (unlikely(local_paca->mce_pending_irq_work)) {
if (ppc_md.machine_check_log_err)
ppc_md.machine_check_log_err();
machine_check_process_queued_event();
machine_check_ue_work();
clear_mce_pending_irq_work();
}
}
void machine_check_print_event_info(struct machine_check_event *evt,
bool user_mode, bool in_guest)
{
const char *level, *sevstr, *subtype, *err_type, *initiator;
uint64_t ea = 0, pa = 0;
int n = 0;
char dar_str[50];
char pa_str[50];
static const char *mc_ue_types[] = {
"Indeterminate",
"Instruction fetch",
"Page table walk ifetch",
"Load/Store",
"Page table walk Load/Store",
};
static const char *mc_slb_types[] = {
"Indeterminate",
"Parity",
"Multihit",
};
static const char *mc_erat_types[] = {
"Indeterminate",
"Parity",
"Multihit",
};
static const char *mc_tlb_types[] = {
"Indeterminate",
"Parity",
"Multihit",
};
static const char *mc_user_types[] = {
"Indeterminate",
"tlbie(l) invalid",
"scv invalid",
};
static const char *mc_ra_types[] = {
"Indeterminate",
"Instruction fetch (bad)",
"Instruction fetch (foreign/control memory)",
"Page table walk ifetch (bad)",
"Page table walk ifetch (foreign/control memory)",
"Load (bad)",
"Store (bad)",
"Page table walk Load/Store (bad)",
"Page table walk Load/Store (foreign/control memory)",
"Load/Store (foreign/control memory)",
};
static const char *mc_link_types[] = {
"Indeterminate",
"Instruction fetch (timeout)",
"Page table walk ifetch (timeout)",
"Load (timeout)",
"Store (timeout)",
"Page table walk Load/Store (timeout)",
};
static const char *mc_error_class[] = {
"Unknown",
"Hardware error",
"Probable Hardware error (some chance of software cause)",
"Software error",
"Probable Software error (some chance of hardware cause)",
};
/* Print things out */
if (evt->version != MCE_V1) {
pr_err("Machine Check Exception, Unknown event version %d !\n",
evt->version);
return;
}
switch (evt->severity) {
case MCE_SEV_NO_ERROR:
level = KERN_INFO;
sevstr = "Harmless";
break;
case MCE_SEV_WARNING:
level = KERN_WARNING;
sevstr = "Warning";
break;
case MCE_SEV_SEVERE:
level = KERN_ERR;
sevstr = "Severe";
break;
case MCE_SEV_FATAL:
default:
level = KERN_ERR;
sevstr = "Fatal";
break;
}
switch(evt->initiator) {
case MCE_INITIATOR_CPU:
initiator = "CPU";
break;
case MCE_INITIATOR_PCI:
initiator = "PCI";
break;
case MCE_INITIATOR_ISA:
initiator = "ISA";
break;
case MCE_INITIATOR_MEMORY:
initiator = "Memory";
break;
case MCE_INITIATOR_POWERMGM:
initiator = "Power Management";
break;
case MCE_INITIATOR_UNKNOWN:
default:
initiator = "Unknown";
break;
}
switch (evt->error_type) {
case MCE_ERROR_TYPE_UE:
err_type = "UE";
subtype = evt->u.ue_error.ue_error_type <
ARRAY_SIZE(mc_ue_types) ?
mc_ue_types[evt->u.ue_error.ue_error_type]
: "Unknown";
if (evt->u.ue_error.effective_address_provided)
ea = evt->u.ue_error.effective_address;
if (evt->u.ue_error.physical_address_provided)
pa = evt->u.ue_error.physical_address;
break;
case MCE_ERROR_TYPE_SLB:
err_type = "SLB";
subtype = evt->u.slb_error.slb_error_type <
ARRAY_SIZE(mc_slb_types) ?
mc_slb_types[evt->u.slb_error.slb_error_type]
: "Unknown";
if (evt->u.slb_error.effective_address_provided)
ea = evt->u.slb_error.effective_address;
break;
case MCE_ERROR_TYPE_ERAT:
err_type = "ERAT";
subtype = evt->u.erat_error.erat_error_type <
ARRAY_SIZE(mc_erat_types) ?
mc_erat_types[evt->u.erat_error.erat_error_type]
: "Unknown";
if (evt->u.erat_error.effective_address_provided)
ea = evt->u.erat_error.effective_address;
break;
case MCE_ERROR_TYPE_TLB:
err_type = "TLB";
subtype = evt->u.tlb_error.tlb_error_type <
ARRAY_SIZE(mc_tlb_types) ?
mc_tlb_types[evt->u.tlb_error.tlb_error_type]
: "Unknown";
if (evt->u.tlb_error.effective_address_provided)
ea = evt->u.tlb_error.effective_address;
break;
case MCE_ERROR_TYPE_USER:
err_type = "User";
subtype = evt->u.user_error.user_error_type <
ARRAY_SIZE(mc_user_types) ?
mc_user_types[evt->u.user_error.user_error_type]
: "Unknown";
if (evt->u.user_error.effective_address_provided)
ea = evt->u.user_error.effective_address;
break;
case MCE_ERROR_TYPE_RA:
err_type = "Real address";
subtype = evt->u.ra_error.ra_error_type <
ARRAY_SIZE(mc_ra_types) ?
mc_ra_types[evt->u.ra_error.ra_error_type]
: "Unknown";
if (evt->u.ra_error.effective_address_provided)
ea = evt->u.ra_error.effective_address;
break;
case MCE_ERROR_TYPE_LINK:
err_type = "Link";
subtype = evt->u.link_error.link_error_type <
ARRAY_SIZE(mc_link_types) ?
mc_link_types[evt->u.link_error.link_error_type]
: "Unknown";
if (evt->u.link_error.effective_address_provided)
ea = evt->u.link_error.effective_address;
break;
case MCE_ERROR_TYPE_DCACHE:
err_type = "D-Cache";
subtype = "Unknown";
break;
case MCE_ERROR_TYPE_ICACHE:
err_type = "I-Cache";
subtype = "Unknown";
break;
default:
case MCE_ERROR_TYPE_UNKNOWN:
err_type = "Unknown";
subtype = "";
break;
}
dar_str[0] = pa_str[0] = '\0';
if (ea && evt->srr0 != ea) {
/* Load/Store address */
n = sprintf(dar_str, "DAR: %016llx ", ea);
if (pa)
sprintf(dar_str + n, "paddr: %016llx ", pa);
} else if (pa) {
sprintf(pa_str, " paddr: %016llx", pa);
}
printk("%sMCE: CPU%d: machine check (%s) %s %s %s %s[%s]\n",
level, evt->cpu, sevstr, in_guest ? "Guest" : "",
err_type, subtype, dar_str,
evt->disposition == MCE_DISPOSITION_RECOVERED ?
"Recovered" : "Not recovered");
if (in_guest || user_mode) {
printk("%sMCE: CPU%d: PID: %d Comm: %s %sNIP: [%016llx]%s\n",
level, evt->cpu, current->pid, current->comm,
in_guest ? "Guest " : "", evt->srr0, pa_str);
} else {
printk("%sMCE: CPU%d: NIP: [%016llx] %pS%s\n",
level, evt->cpu, evt->srr0, (void *)evt->srr0, pa_str);
}
printk("%sMCE: CPU%d: Initiator %s\n", level, evt->cpu, initiator);
subtype = evt->error_class < ARRAY_SIZE(mc_error_class) ?
mc_error_class[evt->error_class] : "Unknown";
printk("%sMCE: CPU%d: %s\n", level, evt->cpu, subtype);
#ifdef CONFIG_PPC_64S_HASH_MMU
/* Display faulty slb contents for SLB errors. */
if (evt->error_type == MCE_ERROR_TYPE_SLB && !in_guest)
slb_dump_contents(local_paca->mce_faulty_slbs);
#endif
}
EXPORT_SYMBOL_GPL(machine_check_print_event_info);
/*
* This function is called in real mode. Strictly no printk's please.
*
* regs->nip and regs->msr contains srr0 and ssr1.
*/
DEFINE_INTERRUPT_HANDLER_NMI(machine_check_early)
{
long handled = 0;
hv_nmi_check_nonrecoverable(regs);
/*
* See if platform is capable of handling machine check.
*/
if (ppc_md.machine_check_early)
handled = ppc_md.machine_check_early(regs);
return handled;
}
/* Possible meanings for HMER_DEBUG_TRIG bit being set on POWER9 */
static enum {
DTRIG_UNKNOWN,
DTRIG_VECTOR_CI, /* need to emulate vector CI load instr */
DTRIG_SUSPEND_ESCAPE, /* need to escape from TM suspend mode */
} hmer_debug_trig_function;
static int init_debug_trig_function(void)
{
int pvr;
struct device_node *cpun;
struct property *prop = NULL;
const char *str;
/* First look in the device tree */
preempt_disable();
cpun = of_get_cpu_node(smp_processor_id(), NULL);
if (cpun) {
of_property_for_each_string(cpun, "ibm,hmi-special-triggers",
prop, str) {
if (strcmp(str, "bit17-vector-ci-load") == 0)
hmer_debug_trig_function = DTRIG_VECTOR_CI;
else if (strcmp(str, "bit17-tm-suspend-escape") == 0)
hmer_debug_trig_function = DTRIG_SUSPEND_ESCAPE;
}
of_node_put(cpun);
}
preempt_enable();
/* If we found the property, don't look at PVR */
if (prop)
goto out;
pvr = mfspr(SPRN_PVR);
/* Check for POWER9 Nimbus (scale-out) */
if ((PVR_VER(pvr) == PVR_POWER9) && (pvr & 0xe000) == 0) {
/* DD2.2 and later */
if ((pvr & 0xfff) >= 0x202)
hmer_debug_trig_function = DTRIG_SUSPEND_ESCAPE;
/* DD2.0 and DD2.1 - used for vector CI load emulation */
else if ((pvr & 0xfff) >= 0x200)
hmer_debug_trig_function = DTRIG_VECTOR_CI;
}
out:
switch (hmer_debug_trig_function) {
case DTRIG_VECTOR_CI:
pr_debug("HMI debug trigger used for vector CI load\n");
break;
case DTRIG_SUSPEND_ESCAPE:
pr_debug("HMI debug trigger used for TM suspend escape\n");
break;
default:
break;
}
return 0;
}
__initcall(init_debug_trig_function);
/*
* Handle HMIs that occur as a result of a debug trigger.
* Return values:
* -1 means this is not a HMI cause that we know about
* 0 means no further handling is required
* 1 means further handling is required
*/
long hmi_handle_debugtrig(struct pt_regs *regs)
{
unsigned long hmer = mfspr(SPRN_HMER);
long ret = 0;
/* HMER_DEBUG_TRIG bit is used for various workarounds on P9 */
if (!((hmer & HMER_DEBUG_TRIG)
&& hmer_debug_trig_function != DTRIG_UNKNOWN))
return -1;
hmer &= ~HMER_DEBUG_TRIG;
/* HMER is a write-AND register */
mtspr(SPRN_HMER, ~HMER_DEBUG_TRIG);
switch (hmer_debug_trig_function) {
case DTRIG_VECTOR_CI:
/*
* Now to avoid problems with soft-disable we
* only do the emulation if we are coming from
* host user space
*/
if (regs && user_mode(regs))
ret = local_paca->hmi_p9_special_emu = 1;
break;
default:
break;
}
/*
* See if any other HMI causes remain to be handled
*/
if (hmer & mfspr(SPRN_HMEER))
return -1;
return ret;
}
/*
* Return values:
*/
DEFINE_INTERRUPT_HANDLER_NMI(hmi_exception_realmode)
{
int ret;
local_paca->hmi_irqs++;
ret = hmi_handle_debugtrig(regs);
if (ret >= 0)
return ret;
wait_for_subcore_guest_exit();
if (ppc_md.hmi_exception_early)
ppc_md.hmi_exception_early(regs);
wait_for_tb_resync();
return 1;
}
void __init mce_init(void)
{
struct mce_info *mce_info;
u64 limit;
int i;
limit = min(ppc64_bolted_size(), ppc64_rma_size);
for_each_possible_cpu(i) {
mce_info = memblock_alloc_try_nid(sizeof(*mce_info),
__alignof__(*mce_info),
MEMBLOCK_LOW_LIMIT,
limit, early_cpu_to_node(i));
if (!mce_info)
goto err;
paca_ptrs[i]->mce_info = mce_info;
}
return;
err:
panic("Failed to allocate memory for MCE event data\n");
}
| linux-master | arch/powerpc/kernel/mce.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/* Kernel module help for PPC64.
Copyright (C) 2001, 2003 Rusty Russell IBM Corporation.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/elf.h>
#include <linux/moduleloader.h>
#include <linux/err.h>
#include <linux/vmalloc.h>
#include <linux/ftrace.h>
#include <linux/bug.h>
#include <linux/uaccess.h>
#include <linux/kernel.h>
#include <asm/module.h>
#include <asm/firmware.h>
#include <asm/code-patching.h>
#include <linux/sort.h>
#include <asm/setup.h>
#include <asm/sections.h>
#include <asm/inst.h>
/* FIXME: We don't do .init separately. To do this, we'd need to have
a separate r2 value in the init and core section, and stub between
them, too.
Using a magic allocator which places modules within 32MB solves
this, and makes other things simpler. Anton?
--RR. */
bool module_elf_check_arch(Elf_Ehdr *hdr)
{
unsigned long abi_level = hdr->e_flags & 0x3;
if (IS_ENABLED(CONFIG_PPC64_ELF_ABI_V2))
return abi_level == 2;
else
return abi_level < 2;
}
#ifdef CONFIG_PPC64_ELF_ABI_V2
static func_desc_t func_desc(unsigned long addr)
{
func_desc_t desc = {
.addr = addr,
};
return desc;
}
/* PowerPC64 specific values for the Elf64_Sym st_other field. */
#define STO_PPC64_LOCAL_BIT 5
#define STO_PPC64_LOCAL_MASK (7 << STO_PPC64_LOCAL_BIT)
#define PPC64_LOCAL_ENTRY_OFFSET(other) \
(((1 << (((other) & STO_PPC64_LOCAL_MASK) >> STO_PPC64_LOCAL_BIT)) >> 2) << 2)
static unsigned int local_entry_offset(const Elf64_Sym *sym)
{
/* sym->st_other indicates offset to local entry point
* (otherwise it will assume r12 is the address of the start
* of function and try to derive r2 from it). */
return PPC64_LOCAL_ENTRY_OFFSET(sym->st_other);
}
#else
static func_desc_t func_desc(unsigned long addr)
{
return *(struct func_desc *)addr;
}
static unsigned int local_entry_offset(const Elf64_Sym *sym)
{
return 0;
}
void *dereference_module_function_descriptor(struct module *mod, void *ptr)
{
if (ptr < (void *)mod->arch.start_opd ||
ptr >= (void *)mod->arch.end_opd)
return ptr;
return dereference_function_descriptor(ptr);
}
#endif
static unsigned long func_addr(unsigned long addr)
{
return func_desc(addr).addr;
}
static unsigned long stub_func_addr(func_desc_t func)
{
return func.addr;
}
#define STUB_MAGIC 0x73747562 /* stub */
/* Like PPC32, we need little trampolines to do > 24-bit jumps (into
the kernel itself). But on PPC64, these need to be used for every
jump, actually, to reset r2 (TOC+0x8000). */
struct ppc64_stub_entry {
/*
* 28 byte jump instruction sequence (7 instructions) that can
* hold ppc64_stub_insns or stub_insns. Must be 8-byte aligned
* with PCREL kernels that use prefix instructions in the stub.
*/
u32 jump[7];
/* Used by ftrace to identify stubs */
u32 magic;
/* Data for the above code */
func_desc_t funcdata;
} __aligned(8);
struct ppc64_got_entry {
u64 addr;
};
/*
* PPC64 uses 24 bit jumps, but we need to jump into other modules or
* the kernel which may be further. So we jump to a stub.
*
* Target address and TOC are loaded from function descriptor in the
* ppc64_stub_entry.
*
* r12 is used to generate the target address, which is required for the
* ELFv2 global entry point calling convention.
*
* TOC handling:
* - PCREL does not have a TOC.
* - ELFv2 non-PCREL just has to save r2, the callee is responsible for
* setting its own TOC pointer at the global entry address.
* - ELFv1 must load the new TOC pointer from the function descriptor.
*/
static u32 ppc64_stub_insns[] = {
#ifdef CONFIG_PPC_KERNEL_PCREL
/* pld r12,addr */
PPC_PREFIX_8LS | __PPC_PRFX_R(1),
PPC_INST_PLD | ___PPC_RT(_R12),
#else
PPC_RAW_ADDIS(_R11, _R2, 0),
PPC_RAW_ADDI(_R11, _R11, 0),
/* Save current r2 value in magic place on the stack. */
PPC_RAW_STD(_R2, _R1, R2_STACK_OFFSET),
PPC_RAW_LD(_R12, _R11, 32),
#ifdef CONFIG_PPC64_ELF_ABI_V1
/* Set up new r2 from function descriptor */
PPC_RAW_LD(_R2, _R11, 40),
#endif
#endif
PPC_RAW_MTCTR(_R12),
PPC_RAW_BCTR(),
};
/*
* Count how many different r_type relocations (different symbol,
* different addend).
*/
static unsigned int count_relocs(const Elf64_Rela *rela, unsigned int num,
unsigned long r_type)
{
unsigned int i, r_info, r_addend, _count_relocs;
/* FIXME: Only count external ones --RR */
_count_relocs = 0;
r_info = 0;
r_addend = 0;
for (i = 0; i < num; i++)
/* Only count r_type relocs, others don't need stubs */
if (ELF64_R_TYPE(rela[i].r_info) == r_type &&
(r_info != ELF64_R_SYM(rela[i].r_info) ||
r_addend != rela[i].r_addend)) {
_count_relocs++;
r_info = ELF64_R_SYM(rela[i].r_info);
r_addend = rela[i].r_addend;
}
return _count_relocs;
}
static int relacmp(const void *_x, const void *_y)
{
const Elf64_Rela *x, *y;
y = (Elf64_Rela *)_x;
x = (Elf64_Rela *)_y;
/* Compare the entire r_info (as opposed to ELF64_R_SYM(r_info) only) to
* make the comparison cheaper/faster. It won't affect the sorting or
* the counting algorithms' performance
*/
if (x->r_info < y->r_info)
return -1;
else if (x->r_info > y->r_info)
return 1;
else if (x->r_addend < y->r_addend)
return -1;
else if (x->r_addend > y->r_addend)
return 1;
else
return 0;
}
/* Get size of potential trampolines required. */
static unsigned long get_stubs_size(const Elf64_Ehdr *hdr,
const Elf64_Shdr *sechdrs)
{
/* One extra reloc so it's always 0-addr terminated */
unsigned long relocs = 1;
unsigned i;
/* Every relocated section... */
for (i = 1; i < hdr->e_shnum; i++) {
if (sechdrs[i].sh_type == SHT_RELA) {
pr_debug("Found relocations in section %u\n", i);
pr_debug("Ptr: %p. Number: %Lu\n",
(void *)sechdrs[i].sh_addr,
sechdrs[i].sh_size / sizeof(Elf64_Rela));
/* Sort the relocation information based on a symbol and
* addend key. This is a stable O(n*log n) complexity
* algorithm but it will reduce the complexity of
* count_relocs() to linear complexity O(n)
*/
sort((void *)sechdrs[i].sh_addr,
sechdrs[i].sh_size / sizeof(Elf64_Rela),
sizeof(Elf64_Rela), relacmp, NULL);
relocs += count_relocs((void *)sechdrs[i].sh_addr,
sechdrs[i].sh_size
/ sizeof(Elf64_Rela),
R_PPC_REL24);
#ifdef CONFIG_PPC_KERNEL_PCREL
relocs += count_relocs((void *)sechdrs[i].sh_addr,
sechdrs[i].sh_size
/ sizeof(Elf64_Rela),
R_PPC64_REL24_NOTOC);
#endif
}
}
#ifdef CONFIG_DYNAMIC_FTRACE
/* make the trampoline to the ftrace_caller */
relocs++;
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
/* an additional one for ftrace_regs_caller */
relocs++;
#endif
#endif
pr_debug("Looks like a total of %lu stubs, max\n", relocs);
return relocs * sizeof(struct ppc64_stub_entry);
}
#ifdef CONFIG_PPC_KERNEL_PCREL
static int count_pcpu_relocs(const Elf64_Shdr *sechdrs,
const Elf64_Rela *rela, unsigned int num,
unsigned int symindex, unsigned int pcpu)
{
unsigned int i, r_info, r_addend, _count_relocs;
_count_relocs = 0;
r_info = 0;
r_addend = 0;
for (i = 0; i < num; i++) {
Elf64_Sym *sym;
/* This is the symbol it is referring to */
sym = (Elf64_Sym *)sechdrs[symindex].sh_addr
+ ELF64_R_SYM(rela[i].r_info);
if (sym->st_shndx == pcpu &&
(r_info != ELF64_R_SYM(rela[i].r_info) ||
r_addend != rela[i].r_addend)) {
_count_relocs++;
r_info = ELF64_R_SYM(rela[i].r_info);
r_addend = rela[i].r_addend;
}
}
return _count_relocs;
}
/* Get size of potential GOT required. */
static unsigned long get_got_size(const Elf64_Ehdr *hdr,
const Elf64_Shdr *sechdrs,
struct module *me)
{
/* One extra reloc so it's always 0-addr terminated */
unsigned long relocs = 1;
unsigned int i, symindex = 0;
for (i = 1; i < hdr->e_shnum; i++) {
if (sechdrs[i].sh_type == SHT_SYMTAB) {
symindex = i;
break;
}
}
WARN_ON_ONCE(!symindex);
/* Every relocated section... */
for (i = 1; i < hdr->e_shnum; i++) {
if (sechdrs[i].sh_type == SHT_RELA) {
pr_debug("Found relocations in section %u\n", i);
pr_debug("Ptr: %p. Number: %llu\n", (void *)sechdrs[i].sh_addr,
sechdrs[i].sh_size / sizeof(Elf64_Rela));
/*
* Sort the relocation information based on a symbol and
* addend key. This is a stable O(n*log n) complexity
* algorithm but it will reduce the complexity of
* count_relocs() to linear complexity O(n)
*/
sort((void *)sechdrs[i].sh_addr,
sechdrs[i].sh_size / sizeof(Elf64_Rela),
sizeof(Elf64_Rela), relacmp, NULL);
relocs += count_relocs((void *)sechdrs[i].sh_addr,
sechdrs[i].sh_size
/ sizeof(Elf64_Rela),
R_PPC64_GOT_PCREL34);
/*
* Percpu data access typically gets linked with
* REL34 relocations, but the percpu section gets
* moved at load time and requires that to be
* converted to GOT linkage.
*/
if (IS_ENABLED(CONFIG_SMP) && symindex)
relocs += count_pcpu_relocs(sechdrs,
(void *)sechdrs[i].sh_addr,
sechdrs[i].sh_size
/ sizeof(Elf64_Rela),
symindex, me->arch.pcpu_section);
}
}
pr_debug("Looks like a total of %lu GOT entries, max\n", relocs);
return relocs * sizeof(struct ppc64_got_entry);
}
#else /* CONFIG_PPC_KERNEL_PCREL */
/* Still needed for ELFv2, for .TOC. */
static void dedotify_versions(struct modversion_info *vers,
unsigned long size)
{
struct modversion_info *end;
for (end = (void *)vers + size; vers < end; vers++)
if (vers->name[0] == '.') {
memmove(vers->name, vers->name+1, strlen(vers->name));
}
}
/*
* Undefined symbols which refer to .funcname, hack to funcname. Make .TOC.
* seem to be defined (value set later).
*/
static void dedotify(Elf64_Sym *syms, unsigned int numsyms, char *strtab)
{
unsigned int i;
for (i = 1; i < numsyms; i++) {
if (syms[i].st_shndx == SHN_UNDEF) {
char *name = strtab + syms[i].st_name;
if (name[0] == '.') {
if (strcmp(name+1, "TOC.") == 0)
syms[i].st_shndx = SHN_ABS;
syms[i].st_name++;
}
}
}
}
static Elf64_Sym *find_dot_toc(Elf64_Shdr *sechdrs,
const char *strtab,
unsigned int symindex)
{
unsigned int i, numsyms;
Elf64_Sym *syms;
syms = (Elf64_Sym *)sechdrs[symindex].sh_addr;
numsyms = sechdrs[symindex].sh_size / sizeof(Elf64_Sym);
for (i = 1; i < numsyms; i++) {
if (syms[i].st_shndx == SHN_ABS
&& strcmp(strtab + syms[i].st_name, "TOC.") == 0)
return &syms[i];
}
return NULL;
}
#endif /* CONFIG_PPC_KERNEL_PCREL */
bool module_init_section(const char *name)
{
/* We don't handle .init for the moment: always return false. */
return false;
}
int module_frob_arch_sections(Elf64_Ehdr *hdr,
Elf64_Shdr *sechdrs,
char *secstrings,
struct module *me)
{
unsigned int i;
/* Find .toc and .stubs sections, symtab and strtab */
for (i = 1; i < hdr->e_shnum; i++) {
if (strcmp(secstrings + sechdrs[i].sh_name, ".stubs") == 0)
me->arch.stubs_section = i;
#ifdef CONFIG_PPC_KERNEL_PCREL
else if (strcmp(secstrings + sechdrs[i].sh_name, ".data..percpu") == 0)
me->arch.pcpu_section = i;
else if (strcmp(secstrings + sechdrs[i].sh_name, ".mygot") == 0) {
me->arch.got_section = i;
if (sechdrs[i].sh_addralign < 8)
sechdrs[i].sh_addralign = 8;
}
#else
else if (strcmp(secstrings + sechdrs[i].sh_name, ".toc") == 0) {
me->arch.toc_section = i;
if (sechdrs[i].sh_addralign < 8)
sechdrs[i].sh_addralign = 8;
}
else if (strcmp(secstrings+sechdrs[i].sh_name,"__versions")==0)
dedotify_versions((void *)hdr + sechdrs[i].sh_offset,
sechdrs[i].sh_size);
if (sechdrs[i].sh_type == SHT_SYMTAB)
dedotify((void *)hdr + sechdrs[i].sh_offset,
sechdrs[i].sh_size / sizeof(Elf64_Sym),
(void *)hdr
+ sechdrs[sechdrs[i].sh_link].sh_offset);
#endif
}
if (!me->arch.stubs_section) {
pr_err("%s: doesn't contain .stubs.\n", me->name);
return -ENOEXEC;
}
#ifdef CONFIG_PPC_KERNEL_PCREL
if (!me->arch.got_section) {
pr_err("%s: doesn't contain .mygot.\n", me->name);
return -ENOEXEC;
}
/* Override the got size */
sechdrs[me->arch.got_section].sh_size = get_got_size(hdr, sechdrs, me);
#else
/* If we don't have a .toc, just use .stubs. We need to set r2
to some reasonable value in case the module calls out to
other functions via a stub, or if a function pointer escapes
the module by some means. */
if (!me->arch.toc_section)
me->arch.toc_section = me->arch.stubs_section;
#endif
/* Override the stubs size */
sechdrs[me->arch.stubs_section].sh_size = get_stubs_size(hdr, sechdrs);
return 0;
}
#if defined(CONFIG_MPROFILE_KERNEL) || defined(CONFIG_ARCH_USING_PATCHABLE_FUNCTION_ENTRY)
static u32 stub_insns[] = {
#ifdef CONFIG_PPC_KERNEL_PCREL
PPC_RAW_LD(_R12, _R13, offsetof(struct paca_struct, kernelbase)),
PPC_RAW_NOP(), /* align the prefix insn */
/* paddi r12,r12,addr */
PPC_PREFIX_MLS | __PPC_PRFX_R(0),
PPC_INST_PADDI | ___PPC_RT(_R12) | ___PPC_RA(_R12),
PPC_RAW_MTCTR(_R12),
PPC_RAW_BCTR(),
#else
PPC_RAW_LD(_R12, _R13, offsetof(struct paca_struct, kernel_toc)),
PPC_RAW_ADDIS(_R12, _R12, 0),
PPC_RAW_ADDI(_R12, _R12, 0),
PPC_RAW_MTCTR(_R12),
PPC_RAW_BCTR(),
#endif
};
/*
* For mprofile-kernel we use a special stub for ftrace_caller() because we
* can't rely on r2 containing this module's TOC when we enter the stub.
*
* That can happen if the function calling us didn't need to use the toc. In
* that case it won't have setup r2, and the r2 value will be either the
* kernel's toc, or possibly another modules toc.
*
* To deal with that this stub uses the kernel toc, which is always accessible
* via the paca (in r13). The target (ftrace_caller()) is responsible for
* saving and restoring the toc before returning.
*/
static inline int create_ftrace_stub(struct ppc64_stub_entry *entry,
unsigned long addr,
struct module *me)
{
long reladdr;
if ((unsigned long)entry->jump % 8 != 0) {
pr_err("%s: Address of stub entry is not 8-byte aligned\n", me->name);
return 0;
}
BUILD_BUG_ON(sizeof(stub_insns) > sizeof(entry->jump));
memcpy(entry->jump, stub_insns, sizeof(stub_insns));
if (IS_ENABLED(CONFIG_PPC_KERNEL_PCREL)) {
/* Stub uses address relative to kernel base (from the paca) */
reladdr = addr - local_paca->kernelbase;
if (reladdr > 0x1FFFFFFFFL || reladdr < -0x200000000L) {
pr_err("%s: Address of %ps out of range of 34-bit relative address.\n",
me->name, (void *)addr);
return 0;
}
entry->jump[2] |= IMM_H18(reladdr);
entry->jump[3] |= IMM_L(reladdr);
} else {
/* Stub uses address relative to kernel toc (from the paca) */
reladdr = addr - kernel_toc_addr();
if (reladdr > 0x7FFFFFFF || reladdr < -(0x80000000L)) {
pr_err("%s: Address of %ps out of range of kernel_toc.\n",
me->name, (void *)addr);
return 0;
}
entry->jump[1] |= PPC_HA(reladdr);
entry->jump[2] |= PPC_LO(reladdr);
}
/* Even though we don't use funcdata in the stub, it's needed elsewhere. */
entry->funcdata = func_desc(addr);
entry->magic = STUB_MAGIC;
return 1;
}
static bool is_mprofile_ftrace_call(const char *name)
{
if (!strcmp("_mcount", name))
return true;
#ifdef CONFIG_DYNAMIC_FTRACE
if (!strcmp("ftrace_caller", name))
return true;
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
if (!strcmp("ftrace_regs_caller", name))
return true;
#endif
#endif
return false;
}
#else
static inline int create_ftrace_stub(struct ppc64_stub_entry *entry,
unsigned long addr,
struct module *me)
{
return 0;
}
static bool is_mprofile_ftrace_call(const char *name)
{
return false;
}
#endif
/*
* r2 is the TOC pointer: it actually points 0x8000 into the TOC (this gives the
* value maximum span in an instruction which uses a signed offset). Round down
* to a 256 byte boundary for the odd case where we are setting up r2 without a
* .toc section.
*/
static inline unsigned long my_r2(const Elf64_Shdr *sechdrs, struct module *me)
{
#ifndef CONFIG_PPC_KERNEL_PCREL
return (sechdrs[me->arch.toc_section].sh_addr & ~0xfful) + 0x8000;
#else
return -1;
#endif
}
/* Patch stub to reference function and correct r2 value. */
static inline int create_stub(const Elf64_Shdr *sechdrs,
struct ppc64_stub_entry *entry,
unsigned long addr,
struct module *me,
const char *name)
{
long reladdr;
func_desc_t desc;
int i;
if (is_mprofile_ftrace_call(name))
return create_ftrace_stub(entry, addr, me);
if ((unsigned long)entry->jump % 8 != 0) {
pr_err("%s: Address of stub entry is not 8-byte aligned\n", me->name);
return 0;
}
BUILD_BUG_ON(sizeof(ppc64_stub_insns) > sizeof(entry->jump));
for (i = 0; i < ARRAY_SIZE(ppc64_stub_insns); i++) {
if (patch_instruction(&entry->jump[i],
ppc_inst(ppc64_stub_insns[i])))
return 0;
}
if (IS_ENABLED(CONFIG_PPC_KERNEL_PCREL)) {
/* Stub uses address relative to itself! */
reladdr = 0 + offsetof(struct ppc64_stub_entry, funcdata);
BUILD_BUG_ON(reladdr != 32);
if (reladdr > 0x1FFFFFFFFL || reladdr < -0x200000000L) {
pr_err("%s: Address of %p out of range of 34-bit relative address.\n",
me->name, (void *)reladdr);
return 0;
}
pr_debug("Stub %p get data from reladdr %li\n", entry, reladdr);
/* May not even need this if we're relative to 0 */
if (patch_instruction(&entry->jump[0],
ppc_inst_prefix(entry->jump[0] | IMM_H18(reladdr),
entry->jump[1] | IMM_L(reladdr))))
return 0;
} else {
/* Stub uses address relative to r2. */
reladdr = (unsigned long)entry - my_r2(sechdrs, me);
if (reladdr > 0x7FFFFFFF || reladdr < -(0x80000000L)) {
pr_err("%s: Address %p of stub out of range of %p.\n",
me->name, (void *)reladdr, (void *)my_r2);
return 0;
}
pr_debug("Stub %p get data from reladdr %li\n", entry, reladdr);
if (patch_instruction(&entry->jump[0],
ppc_inst(entry->jump[0] | PPC_HA(reladdr))))
return 0;
if (patch_instruction(&entry->jump[1],
ppc_inst(entry->jump[1] | PPC_LO(reladdr))))
return 0;
}
// func_desc_t is 8 bytes if ABIv2, else 16 bytes
desc = func_desc(addr);
for (i = 0; i < sizeof(func_desc_t) / sizeof(u32); i++) {
if (patch_instruction(((u32 *)&entry->funcdata) + i,
ppc_inst(((u32 *)(&desc))[i])))
return 0;
}
if (patch_instruction(&entry->magic, ppc_inst(STUB_MAGIC)))
return 0;
return 1;
}
/* Create stub to jump to function described in this OPD/ptr: we need the
stub to set up the TOC ptr (r2) for the function. */
static unsigned long stub_for_addr(const Elf64_Shdr *sechdrs,
unsigned long addr,
struct module *me,
const char *name)
{
struct ppc64_stub_entry *stubs;
unsigned int i, num_stubs;
num_stubs = sechdrs[me->arch.stubs_section].sh_size / sizeof(*stubs);
/* Find this stub, or if that fails, the next avail. entry */
stubs = (void *)sechdrs[me->arch.stubs_section].sh_addr;
for (i = 0; stub_func_addr(stubs[i].funcdata); i++) {
if (WARN_ON(i >= num_stubs))
return 0;
if (stub_func_addr(stubs[i].funcdata) == func_addr(addr))
return (unsigned long)&stubs[i];
}
if (!create_stub(sechdrs, &stubs[i], addr, me, name))
return 0;
return (unsigned long)&stubs[i];
}
#ifdef CONFIG_PPC_KERNEL_PCREL
/* Create GOT to load the location described in this ptr */
static unsigned long got_for_addr(const Elf64_Shdr *sechdrs,
unsigned long addr,
struct module *me,
const char *name)
{
struct ppc64_got_entry *got;
unsigned int i, num_got;
if (!IS_ENABLED(CONFIG_PPC_KERNEL_PCREL))
return addr;
num_got = sechdrs[me->arch.got_section].sh_size / sizeof(*got);
/* Find this stub, or if that fails, the next avail. entry */
got = (void *)sechdrs[me->arch.got_section].sh_addr;
for (i = 0; got[i].addr; i++) {
if (WARN_ON(i >= num_got))
return 0;
if (got[i].addr == addr)
return (unsigned long)&got[i];
}
got[i].addr = addr;
return (unsigned long)&got[i];
}
#endif
/* We expect a noop next: if it is, replace it with instruction to
restore r2. */
static int restore_r2(const char *name, u32 *instruction, struct module *me)
{
u32 *prev_insn = instruction - 1;
u32 insn_val = *instruction;
if (IS_ENABLED(CONFIG_PPC_KERNEL_PCREL))
return 0;
if (is_mprofile_ftrace_call(name))
return 0;
/*
* Make sure the branch isn't a sibling call. Sibling calls aren't
* "link" branches and they don't return, so they don't need the r2
* restore afterwards.
*/
if (!instr_is_relative_link_branch(ppc_inst(*prev_insn)))
return 0;
/*
* For livepatch, the restore r2 instruction might have already been
* written previously, if the referenced symbol is in a previously
* unloaded module which is now being loaded again. In that case, skip
* the warning and the instruction write.
*/
if (insn_val == PPC_INST_LD_TOC)
return 0;
if (insn_val != PPC_RAW_NOP()) {
pr_err("%s: Expected nop after call, got %08x at %pS\n",
me->name, insn_val, instruction);
return -ENOEXEC;
}
/* ld r2,R2_STACK_OFFSET(r1) */
return patch_instruction(instruction, ppc_inst(PPC_INST_LD_TOC));
}
int apply_relocate_add(Elf64_Shdr *sechdrs,
const char *strtab,
unsigned int symindex,
unsigned int relsec,
struct module *me)
{
unsigned int i;
Elf64_Rela *rela = (void *)sechdrs[relsec].sh_addr;
Elf64_Sym *sym;
unsigned long *location;
unsigned long value;
pr_debug("Applying ADD relocate section %u to %u\n", relsec,
sechdrs[relsec].sh_info);
#ifndef CONFIG_PPC_KERNEL_PCREL
/* First time we're called, we can fix up .TOC. */
if (!me->arch.toc_fixed) {
sym = find_dot_toc(sechdrs, strtab, symindex);
/* It's theoretically possible that a module doesn't want a
* .TOC. so don't fail it just for that. */
if (sym)
sym->st_value = my_r2(sechdrs, me);
me->arch.toc_fixed = true;
}
#endif
for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rela); i++) {
/* This is where to make the change */
location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
+ rela[i].r_offset;
/* This is the symbol it is referring to */
sym = (Elf64_Sym *)sechdrs[symindex].sh_addr
+ ELF64_R_SYM(rela[i].r_info);
pr_debug("RELOC at %p: %li-type as %s (0x%lx) + %li\n",
location, (long)ELF64_R_TYPE(rela[i].r_info),
strtab + sym->st_name, (unsigned long)sym->st_value,
(long)rela[i].r_addend);
/* `Everything is relative'. */
value = sym->st_value + rela[i].r_addend;
switch (ELF64_R_TYPE(rela[i].r_info)) {
case R_PPC64_ADDR32:
/* Simply set it */
*(u32 *)location = value;
break;
case R_PPC64_ADDR64:
/* Simply set it */
*(unsigned long *)location = value;
break;
#ifndef CONFIG_PPC_KERNEL_PCREL
case R_PPC64_TOC:
*(unsigned long *)location = my_r2(sechdrs, me);
break;
case R_PPC64_TOC16:
/* Subtract TOC pointer */
value -= my_r2(sechdrs, me);
if (value + 0x8000 > 0xffff) {
pr_err("%s: bad TOC16 relocation (0x%lx)\n",
me->name, value);
return -ENOEXEC;
}
*((uint16_t *) location)
= (*((uint16_t *) location) & ~0xffff)
| (value & 0xffff);
break;
case R_PPC64_TOC16_LO:
/* Subtract TOC pointer */
value -= my_r2(sechdrs, me);
*((uint16_t *) location)
= (*((uint16_t *) location) & ~0xffff)
| (value & 0xffff);
break;
case R_PPC64_TOC16_DS:
/* Subtract TOC pointer */
value -= my_r2(sechdrs, me);
if ((value & 3) != 0 || value + 0x8000 > 0xffff) {
pr_err("%s: bad TOC16_DS relocation (0x%lx)\n",
me->name, value);
return -ENOEXEC;
}
*((uint16_t *) location)
= (*((uint16_t *) location) & ~0xfffc)
| (value & 0xfffc);
break;
case R_PPC64_TOC16_LO_DS:
/* Subtract TOC pointer */
value -= my_r2(sechdrs, me);
if ((value & 3) != 0) {
pr_err("%s: bad TOC16_LO_DS relocation (0x%lx)\n",
me->name, value);
return -ENOEXEC;
}
*((uint16_t *) location)
= (*((uint16_t *) location) & ~0xfffc)
| (value & 0xfffc);
break;
case R_PPC64_TOC16_HA:
/* Subtract TOC pointer */
value -= my_r2(sechdrs, me);
value = ((value + 0x8000) >> 16);
*((uint16_t *) location)
= (*((uint16_t *) location) & ~0xffff)
| (value & 0xffff);
break;
#endif
case R_PPC_REL24:
#ifdef CONFIG_PPC_KERNEL_PCREL
/* PCREL still generates REL24 for mcount */
case R_PPC64_REL24_NOTOC:
#endif
/* FIXME: Handle weak symbols here --RR */
if (sym->st_shndx == SHN_UNDEF ||
sym->st_shndx == SHN_LIVEPATCH) {
/* External: go via stub */
value = stub_for_addr(sechdrs, value, me,
strtab + sym->st_name);
if (!value)
return -ENOENT;
if (restore_r2(strtab + sym->st_name,
(u32 *)location + 1, me))
return -ENOEXEC;
} else
value += local_entry_offset(sym);
/* Convert value to relative */
value -= (unsigned long)location;
if (value + 0x2000000 > 0x3ffffff || (value & 3) != 0){
pr_err("%s: REL24 %li out of range!\n",
me->name, (long int)value);
return -ENOEXEC;
}
/* Only replace bits 2 through 26 */
value = (*(uint32_t *)location & ~PPC_LI_MASK) | PPC_LI(value);
if (patch_instruction((u32 *)location, ppc_inst(value)))
return -EFAULT;
break;
case R_PPC64_REL64:
/* 64 bits relative (used by features fixups) */
*location = value - (unsigned long)location;
break;
case R_PPC64_REL32:
/* 32 bits relative (used by relative exception tables) */
/* Convert value to relative */
value -= (unsigned long)location;
if (value + 0x80000000 > 0xffffffff) {
pr_err("%s: REL32 %li out of range!\n",
me->name, (long int)value);
return -ENOEXEC;
}
*(u32 *)location = value;
break;
#ifdef CONFIG_PPC_KERNEL_PCREL
case R_PPC64_PCREL34: {
unsigned long absvalue = value;
/* Convert value to relative */
value -= (unsigned long)location;
if (value + 0x200000000 > 0x3ffffffff) {
if (sym->st_shndx != me->arch.pcpu_section) {
pr_err("%s: REL34 %li out of range!\n",
me->name, (long)value);
return -ENOEXEC;
}
/*
* per-cpu section is special cased because
* it is moved during loading, so has to be
* converted to use GOT.
*/
value = got_for_addr(sechdrs, absvalue, me,
strtab + sym->st_name);
if (!value)
return -ENOENT;
value -= (unsigned long)location;
/* Turn pla into pld */
if (patch_instruction((u32 *)location,
ppc_inst_prefix((*(u32 *)location & ~0x02000000),
(*((u32 *)location + 1) & ~0xf8000000) | 0xe4000000)))
return -EFAULT;
}
if (patch_instruction((u32 *)location,
ppc_inst_prefix((*(u32 *)location & ~0x3ffff) | IMM_H18(value),
(*((u32 *)location + 1) & ~0xffff) | IMM_L(value))))
return -EFAULT;
break;
}
#else
case R_PPC64_TOCSAVE:
/*
* Marker reloc indicates we don't have to save r2.
* That would only save us one instruction, so ignore
* it.
*/
break;
#endif
case R_PPC64_ENTRY:
if (IS_ENABLED(CONFIG_PPC_KERNEL_PCREL))
break;
/*
* Optimize ELFv2 large code model entry point if
* the TOC is within 2GB range of current location.
*/
value = my_r2(sechdrs, me) - (unsigned long)location;
if (value + 0x80008000 > 0xffffffff)
break;
/*
* Check for the large code model prolog sequence:
* ld r2, ...(r12)
* add r2, r2, r12
*/
if ((((uint32_t *)location)[0] & ~0xfffc) != PPC_RAW_LD(_R2, _R12, 0))
break;
if (((uint32_t *)location)[1] != PPC_RAW_ADD(_R2, _R2, _R12))
break;
/*
* If found, replace it with:
* addis r2, r12, (.TOC.-func)@ha
* addi r2, r2, (.TOC.-func)@l
*/
((uint32_t *)location)[0] = PPC_RAW_ADDIS(_R2, _R12, PPC_HA(value));
((uint32_t *)location)[1] = PPC_RAW_ADDI(_R2, _R2, PPC_LO(value));
break;
case R_PPC64_REL16_HA:
/* Subtract location pointer */
value -= (unsigned long)location;
value = ((value + 0x8000) >> 16);
*((uint16_t *) location)
= (*((uint16_t *) location) & ~0xffff)
| (value & 0xffff);
break;
case R_PPC64_REL16_LO:
/* Subtract location pointer */
value -= (unsigned long)location;
*((uint16_t *) location)
= (*((uint16_t *) location) & ~0xffff)
| (value & 0xffff);
break;
#ifdef CONFIG_PPC_KERNEL_PCREL
case R_PPC64_GOT_PCREL34:
value = got_for_addr(sechdrs, value, me,
strtab + sym->st_name);
if (!value)
return -ENOENT;
value -= (unsigned long)location;
((uint32_t *)location)[0] = (((uint32_t *)location)[0] & ~0x3ffff) |
((value >> 16) & 0x3ffff);
((uint32_t *)location)[1] = (((uint32_t *)location)[1] & ~0xffff) |
(value & 0xffff);
break;
#endif
default:
pr_err("%s: Unknown ADD relocation: %lu\n",
me->name,
(unsigned long)ELF64_R_TYPE(rela[i].r_info));
return -ENOEXEC;
}
}
return 0;
}
#ifdef CONFIG_DYNAMIC_FTRACE
int module_trampoline_target(struct module *mod, unsigned long addr,
unsigned long *target)
{
struct ppc64_stub_entry *stub;
func_desc_t funcdata;
u32 magic;
if (!within_module_core(addr, mod)) {
pr_err("%s: stub %lx not in module %s\n", __func__, addr, mod->name);
return -EFAULT;
}
stub = (struct ppc64_stub_entry *)addr;
if (copy_from_kernel_nofault(&magic, &stub->magic,
sizeof(magic))) {
pr_err("%s: fault reading magic for stub %lx for %s\n", __func__, addr, mod->name);
return -EFAULT;
}
if (magic != STUB_MAGIC) {
pr_err("%s: bad magic for stub %lx for %s\n", __func__, addr, mod->name);
return -EFAULT;
}
if (copy_from_kernel_nofault(&funcdata, &stub->funcdata,
sizeof(funcdata))) {
pr_err("%s: fault reading funcdata for stub %lx for %s\n", __func__, addr, mod->name);
return -EFAULT;
}
*target = stub_func_addr(funcdata);
return 0;
}
int module_finalize_ftrace(struct module *mod, const Elf_Shdr *sechdrs)
{
mod->arch.tramp = stub_for_addr(sechdrs,
(unsigned long)ftrace_caller,
mod,
"ftrace_caller");
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
mod->arch.tramp_regs = stub_for_addr(sechdrs,
(unsigned long)ftrace_regs_caller,
mod,
"ftrace_regs_caller");
if (!mod->arch.tramp_regs)
return -ENOENT;
#endif
if (!mod->arch.tramp)
return -ENOENT;
return 0;
}
#endif
| linux-master | arch/powerpc/kernel/module_64.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Signal handling for 32bit PPC and 32bit tasks on 64bit PPC
*
* PowerPC version
* Copyright (C) 1995-1996 Gary Thomas ([email protected])
* Copyright (C) 2001 IBM
* Copyright (C) 1997,1998 Jakub Jelinek ([email protected])
* Copyright (C) 1997 David S. Miller ([email protected])
*
* Derived from "arch/i386/kernel/signal.c"
* Copyright (C) 1991, 1992 Linus Torvalds
* 1997-11-28 Modified for POSIX.1b signals by Richard Henderson
*/
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/smp.h>
#include <linux/kernel.h>
#include <linux/signal.h>
#include <linux/errno.h>
#include <linux/elf.h>
#include <linux/ptrace.h>
#include <linux/pagemap.h>
#include <linux/ratelimit.h>
#include <linux/syscalls.h>
#ifdef CONFIG_PPC64
#include <linux/compat.h>
#else
#include <linux/wait.h>
#include <linux/unistd.h>
#include <linux/stddef.h>
#include <linux/tty.h>
#include <linux/binfmts.h>
#endif
#include <linux/uaccess.h>
#include <asm/cacheflush.h>
#include <asm/syscalls.h>
#include <asm/sigcontext.h>
#include <asm/vdso.h>
#include <asm/switch_to.h>
#include <asm/tm.h>
#include <asm/asm-prototypes.h>
#ifdef CONFIG_PPC64
#include <asm/syscalls_32.h>
#include <asm/unistd.h>
#else
#include <asm/ucontext.h>
#endif
#include "signal.h"
#ifdef CONFIG_PPC64
#define old_sigaction old_sigaction32
#define sigcontext sigcontext32
#define mcontext mcontext32
#define ucontext ucontext32
/*
* Userspace code may pass a ucontext which doesn't include VSX added
* at the end. We need to check for this case.
*/
#define UCONTEXTSIZEWITHOUTVSX \
(sizeof(struct ucontext) - sizeof(elf_vsrreghalf_t32))
/*
* Returning 0 means we return to userspace via
* ret_from_except and thus restore all user
* registers from *regs. This is what we need
* to do when a signal has been delivered.
*/
#define GP_REGS_SIZE min(sizeof(elf_gregset_t32), sizeof(struct pt_regs32))
#undef __SIGNAL_FRAMESIZE
#define __SIGNAL_FRAMESIZE __SIGNAL_FRAMESIZE32
#undef ELF_NVRREG
#define ELF_NVRREG ELF_NVRREG32
/*
* Functions for flipping sigsets (thanks to brain dead generic
* implementation that makes things simple for little endian only)
*/
#define unsafe_put_sigset_t unsafe_put_compat_sigset
#define unsafe_get_sigset_t unsafe_get_compat_sigset
#define to_user_ptr(p) ptr_to_compat(p)
#define from_user_ptr(p) compat_ptr(p)
static __always_inline int
__unsafe_save_general_regs(struct pt_regs *regs, struct mcontext __user *frame)
{
elf_greg_t64 *gregs = (elf_greg_t64 *)regs;
int val, i;
for (i = 0; i <= PT_RESULT; i ++) {
/* Force usr to alway see softe as 1 (interrupts enabled) */
if (i == PT_SOFTE)
val = 1;
else
val = gregs[i];
unsafe_put_user(val, &frame->mc_gregs[i], failed);
}
return 0;
failed:
return 1;
}
static __always_inline int
__unsafe_restore_general_regs(struct pt_regs *regs, struct mcontext __user *sr)
{
elf_greg_t64 *gregs = (elf_greg_t64 *)regs;
int i;
for (i = 0; i <= PT_RESULT; i++) {
if ((i == PT_MSR) || (i == PT_SOFTE))
continue;
unsafe_get_user(gregs[i], &sr->mc_gregs[i], failed);
}
return 0;
failed:
return 1;
}
#else /* CONFIG_PPC64 */
#define GP_REGS_SIZE min(sizeof(elf_gregset_t), sizeof(struct pt_regs))
#define unsafe_put_sigset_t(uset, set, label) do { \
sigset_t __user *__us = uset ; \
const sigset_t *__s = set; \
\
unsafe_copy_to_user(__us, __s, sizeof(*__us), label); \
} while (0)
#define unsafe_get_sigset_t unsafe_get_user_sigset
#define to_user_ptr(p) ((unsigned long)(p))
#define from_user_ptr(p) ((void __user *)(p))
static __always_inline int
__unsafe_save_general_regs(struct pt_regs *regs, struct mcontext __user *frame)
{
unsafe_copy_to_user(&frame->mc_gregs, regs, GP_REGS_SIZE, failed);
return 0;
failed:
return 1;
}
static __always_inline
int __unsafe_restore_general_regs(struct pt_regs *regs, struct mcontext __user *sr)
{
/* copy up to but not including MSR */
unsafe_copy_from_user(regs, &sr->mc_gregs, PT_MSR * sizeof(elf_greg_t), failed);
/* copy from orig_r3 (the word after the MSR) up to the end */
unsafe_copy_from_user(®s->orig_gpr3, &sr->mc_gregs[PT_ORIG_R3],
GP_REGS_SIZE - PT_ORIG_R3 * sizeof(elf_greg_t), failed);
return 0;
failed:
return 1;
}
#endif
#define unsafe_save_general_regs(regs, frame, label) do { \
if (__unsafe_save_general_regs(regs, frame)) \
goto label; \
} while (0)
#define unsafe_restore_general_regs(regs, frame, label) do { \
if (__unsafe_restore_general_regs(regs, frame)) \
goto label; \
} while (0)
/*
* When we have signals to deliver, we set up on the
* user stack, going down from the original stack pointer:
* an ABI gap of 56 words
* an mcontext struct
* a sigcontext struct
* a gap of __SIGNAL_FRAMESIZE bytes
*
* Each of these things must be a multiple of 16 bytes in size. The following
* structure represent all of this except the __SIGNAL_FRAMESIZE gap
*
*/
struct sigframe {
struct sigcontext sctx; /* the sigcontext */
struct mcontext mctx; /* all the register values */
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
struct sigcontext sctx_transact;
struct mcontext mctx_transact;
#endif
/*
* Programs using the rs6000/xcoff abi can save up to 19 gp
* regs and 18 fp regs below sp before decrementing it.
*/
int abigap[56];
};
/*
* When we have rt signals to deliver, we set up on the
* user stack, going down from the original stack pointer:
* one rt_sigframe struct (siginfo + ucontext + ABI gap)
* a gap of __SIGNAL_FRAMESIZE+16 bytes
* (the +16 is to get the siginfo and ucontext in the same
* positions as in older kernels).
*
* Each of these things must be a multiple of 16 bytes in size.
*
*/
struct rt_sigframe {
#ifdef CONFIG_PPC64
compat_siginfo_t info;
#else
struct siginfo info;
#endif
struct ucontext uc;
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
struct ucontext uc_transact;
#endif
/*
* Programs using the rs6000/xcoff abi can save up to 19 gp
* regs and 18 fp regs below sp before decrementing it.
*/
int abigap[56];
};
unsigned long get_min_sigframe_size_32(void)
{
return max(sizeof(struct rt_sigframe) + __SIGNAL_FRAMESIZE + 16,
sizeof(struct sigframe) + __SIGNAL_FRAMESIZE);
}
/*
* Save the current user registers on the user stack.
* We only save the altivec/spe registers if the process has used
* altivec/spe instructions at some point.
*/
static void prepare_save_user_regs(int ctx_has_vsx_region)
{
/* Make sure floating point registers are stored in regs */
flush_fp_to_thread(current);
#ifdef CONFIG_ALTIVEC
if (current->thread.used_vr)
flush_altivec_to_thread(current);
if (cpu_has_feature(CPU_FTR_ALTIVEC))
current->thread.vrsave = mfspr(SPRN_VRSAVE);
#endif
#ifdef CONFIG_VSX
if (current->thread.used_vsr && ctx_has_vsx_region)
flush_vsx_to_thread(current);
#endif
#ifdef CONFIG_SPE
if (current->thread.used_spe)
flush_spe_to_thread(current);
#endif
}
static __always_inline int
__unsafe_save_user_regs(struct pt_regs *regs, struct mcontext __user *frame,
struct mcontext __user *tm_frame, int ctx_has_vsx_region)
{
unsigned long msr = regs->msr;
/* save general registers */
unsafe_save_general_regs(regs, frame, failed);
#ifdef CONFIG_ALTIVEC
/* save altivec registers */
if (current->thread.used_vr) {
unsafe_copy_to_user(&frame->mc_vregs, ¤t->thread.vr_state,
ELF_NVRREG * sizeof(vector128), failed);
/* set MSR_VEC in the saved MSR value to indicate that
frame->mc_vregs contains valid data */
msr |= MSR_VEC;
}
/* else assert((regs->msr & MSR_VEC) == 0) */
/* We always copy to/from vrsave, it's 0 if we don't have or don't
* use altivec. Since VSCR only contains 32 bits saved in the least
* significant bits of a vector, we "cheat" and stuff VRSAVE in the
* most significant bits of that same vector. --BenH
* Note that the current VRSAVE value is in the SPR at this point.
*/
unsafe_put_user(current->thread.vrsave, (u32 __user *)&frame->mc_vregs[32],
failed);
#endif /* CONFIG_ALTIVEC */
unsafe_copy_fpr_to_user(&frame->mc_fregs, current, failed);
/*
* Clear the MSR VSX bit to indicate there is no valid state attached
* to this context, except in the specific case below where we set it.
*/
msr &= ~MSR_VSX;
#ifdef CONFIG_VSX
/*
* Copy VSR 0-31 upper half from thread_struct to local
* buffer, then write that to userspace. Also set MSR_VSX in
* the saved MSR value to indicate that frame->mc_vregs
* contains valid data
*/
if (current->thread.used_vsr && ctx_has_vsx_region) {
unsafe_copy_vsx_to_user(&frame->mc_vsregs, current, failed);
msr |= MSR_VSX;
}
#endif /* CONFIG_VSX */
#ifdef CONFIG_SPE
/* save spe registers */
if (current->thread.used_spe) {
unsafe_copy_to_user(&frame->mc_vregs, current->thread.evr,
ELF_NEVRREG * sizeof(u32), failed);
/* set MSR_SPE in the saved MSR value to indicate that
frame->mc_vregs contains valid data */
msr |= MSR_SPE;
}
/* else assert((regs->msr & MSR_SPE) == 0) */
/* We always copy to/from spefscr */
unsafe_put_user(current->thread.spefscr,
(u32 __user *)&frame->mc_vregs + ELF_NEVRREG, failed);
#endif /* CONFIG_SPE */
unsafe_put_user(msr, &frame->mc_gregs[PT_MSR], failed);
/* We need to write 0 the MSR top 32 bits in the tm frame so that we
* can check it on the restore to see if TM is active
*/
if (tm_frame)
unsafe_put_user(0, &tm_frame->mc_gregs[PT_MSR], failed);
return 0;
failed:
return 1;
}
#define unsafe_save_user_regs(regs, frame, tm_frame, has_vsx, label) do { \
if (__unsafe_save_user_regs(regs, frame, tm_frame, has_vsx)) \
goto label; \
} while (0)
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
/*
* Save the current user registers on the user stack.
* We only save the altivec/spe registers if the process has used
* altivec/spe instructions at some point.
* We also save the transactional registers to a second ucontext in the
* frame.
*
* See __unsafe_save_user_regs() and signal_64.c:setup_tm_sigcontexts().
*/
static void prepare_save_tm_user_regs(void)
{
WARN_ON(tm_suspend_disabled);
if (cpu_has_feature(CPU_FTR_ALTIVEC))
current->thread.ckvrsave = mfspr(SPRN_VRSAVE);
}
static __always_inline int
save_tm_user_regs_unsafe(struct pt_regs *regs, struct mcontext __user *frame,
struct mcontext __user *tm_frame, unsigned long msr)
{
/* Save both sets of general registers */
unsafe_save_general_regs(¤t->thread.ckpt_regs, frame, failed);
unsafe_save_general_regs(regs, tm_frame, failed);
/* Stash the top half of the 64bit MSR into the 32bit MSR word
* of the transactional mcontext. This way we have a backward-compatible
* MSR in the 'normal' (checkpointed) mcontext and additionally one can
* also look at what type of transaction (T or S) was active at the
* time of the signal.
*/
unsafe_put_user((msr >> 32), &tm_frame->mc_gregs[PT_MSR], failed);
/* save altivec registers */
if (current->thread.used_vr) {
unsafe_copy_to_user(&frame->mc_vregs, ¤t->thread.ckvr_state,
ELF_NVRREG * sizeof(vector128), failed);
if (msr & MSR_VEC)
unsafe_copy_to_user(&tm_frame->mc_vregs,
¤t->thread.vr_state,
ELF_NVRREG * sizeof(vector128), failed);
else
unsafe_copy_to_user(&tm_frame->mc_vregs,
¤t->thread.ckvr_state,
ELF_NVRREG * sizeof(vector128), failed);
/* set MSR_VEC in the saved MSR value to indicate that
* frame->mc_vregs contains valid data
*/
msr |= MSR_VEC;
}
/* We always copy to/from vrsave, it's 0 if we don't have or don't
* use altivec. Since VSCR only contains 32 bits saved in the least
* significant bits of a vector, we "cheat" and stuff VRSAVE in the
* most significant bits of that same vector. --BenH
*/
unsafe_put_user(current->thread.ckvrsave,
(u32 __user *)&frame->mc_vregs[32], failed);
if (msr & MSR_VEC)
unsafe_put_user(current->thread.vrsave,
(u32 __user *)&tm_frame->mc_vregs[32], failed);
else
unsafe_put_user(current->thread.ckvrsave,
(u32 __user *)&tm_frame->mc_vregs[32], failed);
unsafe_copy_ckfpr_to_user(&frame->mc_fregs, current, failed);
if (msr & MSR_FP)
unsafe_copy_fpr_to_user(&tm_frame->mc_fregs, current, failed);
else
unsafe_copy_ckfpr_to_user(&tm_frame->mc_fregs, current, failed);
/*
* Copy VSR 0-31 upper half from thread_struct to local
* buffer, then write that to userspace. Also set MSR_VSX in
* the saved MSR value to indicate that frame->mc_vregs
* contains valid data
*/
if (current->thread.used_vsr) {
unsafe_copy_ckvsx_to_user(&frame->mc_vsregs, current, failed);
if (msr & MSR_VSX)
unsafe_copy_vsx_to_user(&tm_frame->mc_vsregs, current, failed);
else
unsafe_copy_ckvsx_to_user(&tm_frame->mc_vsregs, current, failed);
msr |= MSR_VSX;
}
unsafe_put_user(msr, &frame->mc_gregs[PT_MSR], failed);
return 0;
failed:
return 1;
}
#else
static void prepare_save_tm_user_regs(void) { }
static __always_inline int
save_tm_user_regs_unsafe(struct pt_regs *regs, struct mcontext __user *frame,
struct mcontext __user *tm_frame, unsigned long msr)
{
return 0;
}
#endif
#define unsafe_save_tm_user_regs(regs, frame, tm_frame, msr, label) do { \
if (save_tm_user_regs_unsafe(regs, frame, tm_frame, msr)) \
goto label; \
} while (0)
/*
* Restore the current user register values from the user stack,
* (except for MSR).
*/
static long restore_user_regs(struct pt_regs *regs,
struct mcontext __user *sr, int sig)
{
unsigned int save_r2 = 0;
unsigned long msr;
#ifdef CONFIG_VSX
int i;
#endif
if (!user_read_access_begin(sr, sizeof(*sr)))
return 1;
/*
* restore general registers but not including MSR or SOFTE. Also
* take care of keeping r2 (TLS) intact if not a signal
*/
if (!sig)
save_r2 = (unsigned int)regs->gpr[2];
unsafe_restore_general_regs(regs, sr, failed);
set_trap_norestart(regs);
unsafe_get_user(msr, &sr->mc_gregs[PT_MSR], failed);
if (!sig)
regs->gpr[2] = (unsigned long) save_r2;
/* if doing signal return, restore the previous little-endian mode */
if (sig)
regs_set_return_msr(regs, (regs->msr & ~MSR_LE) | (msr & MSR_LE));
#ifdef CONFIG_ALTIVEC
/*
* Force the process to reload the altivec registers from
* current->thread when it next does altivec instructions
*/
regs_set_return_msr(regs, regs->msr & ~MSR_VEC);
if (msr & MSR_VEC) {
/* restore altivec registers from the stack */
unsafe_copy_from_user(¤t->thread.vr_state, &sr->mc_vregs,
sizeof(sr->mc_vregs), failed);
current->thread.used_vr = true;
} else if (current->thread.used_vr)
memset(¤t->thread.vr_state, 0,
ELF_NVRREG * sizeof(vector128));
/* Always get VRSAVE back */
unsafe_get_user(current->thread.vrsave, (u32 __user *)&sr->mc_vregs[32], failed);
if (cpu_has_feature(CPU_FTR_ALTIVEC))
mtspr(SPRN_VRSAVE, current->thread.vrsave);
#endif /* CONFIG_ALTIVEC */
unsafe_copy_fpr_from_user(current, &sr->mc_fregs, failed);
#ifdef CONFIG_VSX
/*
* Force the process to reload the VSX registers from
* current->thread when it next does VSX instruction.
*/
regs_set_return_msr(regs, regs->msr & ~MSR_VSX);
if (msr & MSR_VSX) {
/*
* Restore altivec registers from the stack to a local
* buffer, then write this out to the thread_struct
*/
unsafe_copy_vsx_from_user(current, &sr->mc_vsregs, failed);
current->thread.used_vsr = true;
} else if (current->thread.used_vsr)
for (i = 0; i < 32 ; i++)
current->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = 0;
#endif /* CONFIG_VSX */
/*
* force the process to reload the FP registers from
* current->thread when it next does FP instructions
*/
regs_set_return_msr(regs, regs->msr & ~(MSR_FP | MSR_FE0 | MSR_FE1));
#ifdef CONFIG_SPE
/*
* Force the process to reload the spe registers from
* current->thread when it next does spe instructions.
* Since this is user ABI, we must enforce the sizing.
*/
BUILD_BUG_ON(sizeof(current->thread.spe) != ELF_NEVRREG * sizeof(u32));
regs_set_return_msr(regs, regs->msr & ~MSR_SPE);
if (msr & MSR_SPE) {
/* restore spe registers from the stack */
unsafe_copy_from_user(¤t->thread.spe, &sr->mc_vregs,
sizeof(current->thread.spe), failed);
current->thread.used_spe = true;
} else if (current->thread.used_spe)
memset(¤t->thread.spe, 0, sizeof(current->thread.spe));
/* Always get SPEFSCR back */
unsafe_get_user(current->thread.spefscr, (u32 __user *)&sr->mc_vregs + ELF_NEVRREG, failed);
#endif /* CONFIG_SPE */
user_read_access_end();
return 0;
failed:
user_read_access_end();
return 1;
}
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
/*
* Restore the current user register values from the user stack, except for
* MSR, and recheckpoint the original checkpointed register state for processes
* in transactions.
*/
static long restore_tm_user_regs(struct pt_regs *regs,
struct mcontext __user *sr,
struct mcontext __user *tm_sr)
{
unsigned long msr, msr_hi;
int i;
if (tm_suspend_disabled)
return 1;
/*
* restore general registers but not including MSR or SOFTE. Also
* take care of keeping r2 (TLS) intact if not a signal.
* See comment in signal_64.c:restore_tm_sigcontexts();
* TFHAR is restored from the checkpointed NIP; TEXASR and TFIAR
* were set by the signal delivery.
*/
if (!user_read_access_begin(sr, sizeof(*sr)))
return 1;
unsafe_restore_general_regs(¤t->thread.ckpt_regs, sr, failed);
unsafe_get_user(current->thread.tm_tfhar, &sr->mc_gregs[PT_NIP], failed);
unsafe_get_user(msr, &sr->mc_gregs[PT_MSR], failed);
/* Restore the previous little-endian mode */
regs_set_return_msr(regs, (regs->msr & ~MSR_LE) | (msr & MSR_LE));
regs_set_return_msr(regs, regs->msr & ~MSR_VEC);
if (msr & MSR_VEC) {
/* restore altivec registers from the stack */
unsafe_copy_from_user(¤t->thread.ckvr_state, &sr->mc_vregs,
sizeof(sr->mc_vregs), failed);
current->thread.used_vr = true;
} else if (current->thread.used_vr) {
memset(¤t->thread.vr_state, 0,
ELF_NVRREG * sizeof(vector128));
memset(¤t->thread.ckvr_state, 0,
ELF_NVRREG * sizeof(vector128));
}
/* Always get VRSAVE back */
unsafe_get_user(current->thread.ckvrsave,
(u32 __user *)&sr->mc_vregs[32], failed);
if (cpu_has_feature(CPU_FTR_ALTIVEC))
mtspr(SPRN_VRSAVE, current->thread.ckvrsave);
regs_set_return_msr(regs, regs->msr & ~(MSR_FP | MSR_FE0 | MSR_FE1));
unsafe_copy_fpr_from_user(current, &sr->mc_fregs, failed);
regs_set_return_msr(regs, regs->msr & ~MSR_VSX);
if (msr & MSR_VSX) {
/*
* Restore altivec registers from the stack to a local
* buffer, then write this out to the thread_struct
*/
unsafe_copy_ckvsx_from_user(current, &sr->mc_vsregs, failed);
current->thread.used_vsr = true;
} else if (current->thread.used_vsr)
for (i = 0; i < 32 ; i++) {
current->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = 0;
current->thread.ckfp_state.fpr[i][TS_VSRLOWOFFSET] = 0;
}
user_read_access_end();
if (!user_read_access_begin(tm_sr, sizeof(*tm_sr)))
return 1;
unsafe_restore_general_regs(regs, tm_sr, failed);
/* restore altivec registers from the stack */
if (msr & MSR_VEC)
unsafe_copy_from_user(¤t->thread.vr_state, &tm_sr->mc_vregs,
sizeof(sr->mc_vregs), failed);
/* Always get VRSAVE back */
unsafe_get_user(current->thread.vrsave,
(u32 __user *)&tm_sr->mc_vregs[32], failed);
unsafe_copy_ckfpr_from_user(current, &tm_sr->mc_fregs, failed);
if (msr & MSR_VSX) {
/*
* Restore altivec registers from the stack to a local
* buffer, then write this out to the thread_struct
*/
unsafe_copy_vsx_from_user(current, &tm_sr->mc_vsregs, failed);
current->thread.used_vsr = true;
}
/* Get the top half of the MSR from the user context */
unsafe_get_user(msr_hi, &tm_sr->mc_gregs[PT_MSR], failed);
msr_hi <<= 32;
user_read_access_end();
/* If TM bits are set to the reserved value, it's an invalid context */
if (MSR_TM_RESV(msr_hi))
return 1;
/*
* Disabling preemption, since it is unsafe to be preempted
* with MSR[TS] set without recheckpointing.
*/
preempt_disable();
/*
* CAUTION:
* After regs->MSR[TS] being updated, make sure that get_user(),
* put_user() or similar functions are *not* called. These
* functions can generate page faults which will cause the process
* to be de-scheduled with MSR[TS] set but without calling
* tm_recheckpoint(). This can cause a bug.
*
* Pull in the MSR TM bits from the user context
*/
regs_set_return_msr(regs, (regs->msr & ~MSR_TS_MASK) | (msr_hi & MSR_TS_MASK));
/* Now, recheckpoint. This loads up all of the checkpointed (older)
* registers, including FP and V[S]Rs. After recheckpointing, the
* transactional versions should be loaded.
*/
tm_enable();
/* Make sure the transaction is marked as failed */
current->thread.tm_texasr |= TEXASR_FS;
/* This loads the checkpointed FP/VEC state, if used */
tm_recheckpoint(¤t->thread);
/* This loads the speculative FP/VEC state, if used */
msr_check_and_set(msr & (MSR_FP | MSR_VEC));
if (msr & MSR_FP) {
load_fp_state(¤t->thread.fp_state);
regs_set_return_msr(regs, regs->msr | (MSR_FP | current->thread.fpexc_mode));
}
if (msr & MSR_VEC) {
load_vr_state(¤t->thread.vr_state);
regs_set_return_msr(regs, regs->msr | MSR_VEC);
}
preempt_enable();
return 0;
failed:
user_read_access_end();
return 1;
}
#else
static long restore_tm_user_regs(struct pt_regs *regs, struct mcontext __user *sr,
struct mcontext __user *tm_sr)
{
return 0;
}
#endif
#ifdef CONFIG_PPC64
#define copy_siginfo_to_user copy_siginfo_to_user32
#endif /* CONFIG_PPC64 */
/*
* Set up a signal frame for a "real-time" signal handler
* (one which gets siginfo).
*/
int handle_rt_signal32(struct ksignal *ksig, sigset_t *oldset,
struct task_struct *tsk)
{
struct rt_sigframe __user *frame;
struct mcontext __user *mctx;
struct mcontext __user *tm_mctx = NULL;
unsigned long newsp = 0;
unsigned long tramp;
struct pt_regs *regs = tsk->thread.regs;
/* Save the thread's msr before get_tm_stackpointer() changes it */
unsigned long msr = regs->msr;
/* Set up Signal Frame */
frame = get_sigframe(ksig, tsk, sizeof(*frame), 1);
mctx = &frame->uc.uc_mcontext;
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
tm_mctx = &frame->uc_transact.uc_mcontext;
#endif
if (MSR_TM_ACTIVE(msr))
prepare_save_tm_user_regs();
else
prepare_save_user_regs(1);
if (!user_access_begin(frame, sizeof(*frame)))
goto badframe;
/* Put the siginfo & fill in most of the ucontext */
unsafe_put_user(0, &frame->uc.uc_flags, failed);
#ifdef CONFIG_PPC64
unsafe_compat_save_altstack(&frame->uc.uc_stack, regs->gpr[1], failed);
#else
unsafe_save_altstack(&frame->uc.uc_stack, regs->gpr[1], failed);
#endif
unsafe_put_user(to_user_ptr(&frame->uc.uc_mcontext), &frame->uc.uc_regs, failed);
if (MSR_TM_ACTIVE(msr)) {
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
unsafe_put_user((unsigned long)&frame->uc_transact,
&frame->uc.uc_link, failed);
unsafe_put_user((unsigned long)tm_mctx,
&frame->uc_transact.uc_regs, failed);
#endif
unsafe_save_tm_user_regs(regs, mctx, tm_mctx, msr, failed);
} else {
unsafe_put_user(0, &frame->uc.uc_link, failed);
unsafe_save_user_regs(regs, mctx, tm_mctx, 1, failed);
}
/* Save user registers on the stack */
if (tsk->mm->context.vdso) {
tramp = VDSO32_SYMBOL(tsk->mm->context.vdso, sigtramp_rt32);
} else {
tramp = (unsigned long)mctx->mc_pad;
unsafe_put_user(PPC_RAW_LI(_R0, __NR_rt_sigreturn), &mctx->mc_pad[0], failed);
unsafe_put_user(PPC_RAW_SC(), &mctx->mc_pad[1], failed);
asm("dcbst %y0; sync; icbi %y0; sync" :: "Z" (mctx->mc_pad[0]));
}
unsafe_put_sigset_t(&frame->uc.uc_sigmask, oldset, failed);
user_access_end();
if (copy_siginfo_to_user(&frame->info, &ksig->info))
goto badframe;
regs->link = tramp;
#ifdef CONFIG_PPC_FPU_REGS
tsk->thread.fp_state.fpscr = 0; /* turn off all fp exceptions */
#endif
/* create a stack frame for the caller of the handler */
newsp = ((unsigned long)frame) - (__SIGNAL_FRAMESIZE + 16);
if (put_user(regs->gpr[1], (u32 __user *)newsp))
goto badframe;
/* Fill registers for signal handler */
regs->gpr[1] = newsp;
regs->gpr[3] = ksig->sig;
regs->gpr[4] = (unsigned long)&frame->info;
regs->gpr[5] = (unsigned long)&frame->uc;
regs->gpr[6] = (unsigned long)frame;
regs_set_return_ip(regs, (unsigned long) ksig->ka.sa.sa_handler);
/* enter the signal handler in native-endian mode */
regs_set_return_msr(regs, (regs->msr & ~MSR_LE) | (MSR_KERNEL & MSR_LE));
return 0;
failed:
user_access_end();
badframe:
signal_fault(tsk, regs, "handle_rt_signal32", frame);
return 1;
}
/*
* OK, we're invoking a handler
*/
int handle_signal32(struct ksignal *ksig, sigset_t *oldset,
struct task_struct *tsk)
{
struct sigcontext __user *sc;
struct sigframe __user *frame;
struct mcontext __user *mctx;
struct mcontext __user *tm_mctx = NULL;
unsigned long newsp = 0;
unsigned long tramp;
struct pt_regs *regs = tsk->thread.regs;
/* Save the thread's msr before get_tm_stackpointer() changes it */
unsigned long msr = regs->msr;
/* Set up Signal Frame */
frame = get_sigframe(ksig, tsk, sizeof(*frame), 1);
mctx = &frame->mctx;
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
tm_mctx = &frame->mctx_transact;
#endif
if (MSR_TM_ACTIVE(msr))
prepare_save_tm_user_regs();
else
prepare_save_user_regs(1);
if (!user_access_begin(frame, sizeof(*frame)))
goto badframe;
sc = (struct sigcontext __user *) &frame->sctx;
#if _NSIG != 64
#error "Please adjust handle_signal()"
#endif
unsafe_put_user(to_user_ptr(ksig->ka.sa.sa_handler), &sc->handler, failed);
unsafe_put_user(oldset->sig[0], &sc->oldmask, failed);
#ifdef CONFIG_PPC64
unsafe_put_user((oldset->sig[0] >> 32), &sc->_unused[3], failed);
#else
unsafe_put_user(oldset->sig[1], &sc->_unused[3], failed);
#endif
unsafe_put_user(to_user_ptr(mctx), &sc->regs, failed);
unsafe_put_user(ksig->sig, &sc->signal, failed);
if (MSR_TM_ACTIVE(msr))
unsafe_save_tm_user_regs(regs, mctx, tm_mctx, msr, failed);
else
unsafe_save_user_regs(regs, mctx, tm_mctx, 1, failed);
if (tsk->mm->context.vdso) {
tramp = VDSO32_SYMBOL(tsk->mm->context.vdso, sigtramp32);
} else {
tramp = (unsigned long)mctx->mc_pad;
unsafe_put_user(PPC_RAW_LI(_R0, __NR_sigreturn), &mctx->mc_pad[0], failed);
unsafe_put_user(PPC_RAW_SC(), &mctx->mc_pad[1], failed);
asm("dcbst %y0; sync; icbi %y0; sync" :: "Z" (mctx->mc_pad[0]));
}
user_access_end();
regs->link = tramp;
#ifdef CONFIG_PPC_FPU_REGS
tsk->thread.fp_state.fpscr = 0; /* turn off all fp exceptions */
#endif
/* create a stack frame for the caller of the handler */
newsp = ((unsigned long)frame) - __SIGNAL_FRAMESIZE;
if (put_user(regs->gpr[1], (u32 __user *)newsp))
goto badframe;
regs->gpr[1] = newsp;
regs->gpr[3] = ksig->sig;
regs->gpr[4] = (unsigned long) sc;
regs_set_return_ip(regs, (unsigned long) ksig->ka.sa.sa_handler);
/* enter the signal handler in native-endian mode */
regs_set_return_msr(regs, (regs->msr & ~MSR_LE) | (MSR_KERNEL & MSR_LE));
return 0;
failed:
user_access_end();
badframe:
signal_fault(tsk, regs, "handle_signal32", frame);
return 1;
}
static int do_setcontext(struct ucontext __user *ucp, struct pt_regs *regs, int sig)
{
sigset_t set;
struct mcontext __user *mcp;
if (!user_read_access_begin(ucp, sizeof(*ucp)))
return -EFAULT;
unsafe_get_sigset_t(&set, &ucp->uc_sigmask, failed);
#ifdef CONFIG_PPC64
{
u32 cmcp;
unsafe_get_user(cmcp, &ucp->uc_regs, failed);
mcp = (struct mcontext __user *)(u64)cmcp;
}
#else
unsafe_get_user(mcp, &ucp->uc_regs, failed);
#endif
user_read_access_end();
set_current_blocked(&set);
if (restore_user_regs(regs, mcp, sig))
return -EFAULT;
return 0;
failed:
user_read_access_end();
return -EFAULT;
}
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
static int do_setcontext_tm(struct ucontext __user *ucp,
struct ucontext __user *tm_ucp,
struct pt_regs *regs)
{
sigset_t set;
struct mcontext __user *mcp;
struct mcontext __user *tm_mcp;
u32 cmcp;
u32 tm_cmcp;
if (!user_read_access_begin(ucp, sizeof(*ucp)))
return -EFAULT;
unsafe_get_sigset_t(&set, &ucp->uc_sigmask, failed);
unsafe_get_user(cmcp, &ucp->uc_regs, failed);
user_read_access_end();
if (__get_user(tm_cmcp, &tm_ucp->uc_regs))
return -EFAULT;
mcp = (struct mcontext __user *)(u64)cmcp;
tm_mcp = (struct mcontext __user *)(u64)tm_cmcp;
/* no need to check access_ok(mcp), since mcp < 4GB */
set_current_blocked(&set);
if (restore_tm_user_regs(regs, mcp, tm_mcp))
return -EFAULT;
return 0;
failed:
user_read_access_end();
return -EFAULT;
}
#endif
#ifdef CONFIG_PPC64
COMPAT_SYSCALL_DEFINE3(swapcontext, struct ucontext __user *, old_ctx,
struct ucontext __user *, new_ctx, int, ctx_size)
#else
SYSCALL_DEFINE3(swapcontext, struct ucontext __user *, old_ctx,
struct ucontext __user *, new_ctx, long, ctx_size)
#endif
{
struct pt_regs *regs = current_pt_regs();
int ctx_has_vsx_region = 0;
#ifdef CONFIG_PPC64
unsigned long new_msr = 0;
if (new_ctx) {
struct mcontext __user *mcp;
u32 cmcp;
/*
* Get pointer to the real mcontext. No need for
* access_ok since we are dealing with compat
* pointers.
*/
if (__get_user(cmcp, &new_ctx->uc_regs))
return -EFAULT;
mcp = (struct mcontext __user *)(u64)cmcp;
if (__get_user(new_msr, &mcp->mc_gregs[PT_MSR]))
return -EFAULT;
}
/*
* Check that the context is not smaller than the original
* size (with VMX but without VSX)
*/
if (ctx_size < UCONTEXTSIZEWITHOUTVSX)
return -EINVAL;
/*
* If the new context state sets the MSR VSX bits but
* it doesn't provide VSX state.
*/
if ((ctx_size < sizeof(struct ucontext)) &&
(new_msr & MSR_VSX))
return -EINVAL;
/* Does the context have enough room to store VSX data? */
if (ctx_size >= sizeof(struct ucontext))
ctx_has_vsx_region = 1;
#else
/* Context size is for future use. Right now, we only make sure
* we are passed something we understand
*/
if (ctx_size < sizeof(struct ucontext))
return -EINVAL;
#endif
if (old_ctx != NULL) {
struct mcontext __user *mctx;
/*
* old_ctx might not be 16-byte aligned, in which
* case old_ctx->uc_mcontext won't be either.
* Because we have the old_ctx->uc_pad2 field
* before old_ctx->uc_mcontext, we need to round down
* from &old_ctx->uc_mcontext to a 16-byte boundary.
*/
mctx = (struct mcontext __user *)
((unsigned long) &old_ctx->uc_mcontext & ~0xfUL);
prepare_save_user_regs(ctx_has_vsx_region);
if (!user_write_access_begin(old_ctx, ctx_size))
return -EFAULT;
unsafe_save_user_regs(regs, mctx, NULL, ctx_has_vsx_region, failed);
unsafe_put_sigset_t(&old_ctx->uc_sigmask, ¤t->blocked, failed);
unsafe_put_user(to_user_ptr(mctx), &old_ctx->uc_regs, failed);
user_write_access_end();
}
if (new_ctx == NULL)
return 0;
if (!access_ok(new_ctx, ctx_size) ||
fault_in_readable((char __user *)new_ctx, ctx_size))
return -EFAULT;
/*
* If we get a fault copying the context into the kernel's
* image of the user's registers, we can't just return -EFAULT
* because the user's registers will be corrupted. For instance
* the NIP value may have been updated but not some of the
* other registers. Given that we have done the access_ok
* and successfully read the first and last bytes of the region
* above, this should only happen in an out-of-memory situation
* or if another thread unmaps the region containing the context.
* We kill the task with a SIGSEGV in this situation.
*/
if (do_setcontext(new_ctx, regs, 0)) {
force_exit_sig(SIGSEGV);
return -EFAULT;
}
set_thread_flag(TIF_RESTOREALL);
return 0;
failed:
user_write_access_end();
return -EFAULT;
}
#ifdef CONFIG_PPC64
COMPAT_SYSCALL_DEFINE0(rt_sigreturn)
#else
SYSCALL_DEFINE0(rt_sigreturn)
#endif
{
struct rt_sigframe __user *rt_sf;
struct pt_regs *regs = current_pt_regs();
int tm_restore = 0;
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
struct ucontext __user *uc_transact;
unsigned long msr_hi;
unsigned long tmp;
#endif
/* Always make any pending restarted system calls return -EINTR */
current->restart_block.fn = do_no_restart_syscall;
rt_sf = (struct rt_sigframe __user *)
(regs->gpr[1] + __SIGNAL_FRAMESIZE + 16);
if (!access_ok(rt_sf, sizeof(*rt_sf)))
goto bad;
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
/*
* If there is a transactional state then throw it away.
* The purpose of a sigreturn is to destroy all traces of the
* signal frame, this includes any transactional state created
* within in. We only check for suspended as we can never be
* active in the kernel, we are active, there is nothing better to
* do than go ahead and Bad Thing later.
* The cause is not important as there will never be a
* recheckpoint so it's not user visible.
*/
if (MSR_TM_SUSPENDED(mfmsr()))
tm_reclaim_current(0);
if (__get_user(tmp, &rt_sf->uc.uc_link))
goto bad;
uc_transact = (struct ucontext __user *)(uintptr_t)tmp;
if (uc_transact) {
u32 cmcp;
struct mcontext __user *mcp;
if (__get_user(cmcp, &uc_transact->uc_regs))
return -EFAULT;
mcp = (struct mcontext __user *)(u64)cmcp;
/* The top 32 bits of the MSR are stashed in the transactional
* ucontext. */
if (__get_user(msr_hi, &mcp->mc_gregs[PT_MSR]))
goto bad;
if (MSR_TM_ACTIVE(msr_hi<<32)) {
/* Trying to start TM on non TM system */
if (!cpu_has_feature(CPU_FTR_TM))
goto bad;
/* We only recheckpoint on return if we're
* transaction.
*/
tm_restore = 1;
if (do_setcontext_tm(&rt_sf->uc, uc_transact, regs))
goto bad;
}
}
if (!tm_restore) {
/*
* Unset regs->msr because ucontext MSR TS is not
* set, and recheckpoint was not called. This avoid
* hitting a TM Bad thing at RFID
*/
regs_set_return_msr(regs, regs->msr & ~MSR_TS_MASK);
}
/* Fall through, for non-TM restore */
#endif
if (!tm_restore)
if (do_setcontext(&rt_sf->uc, regs, 1))
goto bad;
/*
* It's not clear whether or why it is desirable to save the
* sigaltstack setting on signal delivery and restore it on
* signal return. But other architectures do this and we have
* always done it up until now so it is probably better not to
* change it. -- paulus
*/
#ifdef CONFIG_PPC64
if (compat_restore_altstack(&rt_sf->uc.uc_stack))
goto bad;
#else
if (restore_altstack(&rt_sf->uc.uc_stack))
goto bad;
#endif
set_thread_flag(TIF_RESTOREALL);
return 0;
bad:
signal_fault(current, regs, "sys_rt_sigreturn", rt_sf);
force_sig(SIGSEGV);
return 0;
}
#ifdef CONFIG_PPC32
SYSCALL_DEFINE3(debug_setcontext, struct ucontext __user *, ctx,
int, ndbg, struct sig_dbg_op __user *, dbg)
{
struct pt_regs *regs = current_pt_regs();
struct sig_dbg_op op;
int i;
unsigned long new_msr = regs->msr;
#ifdef CONFIG_PPC_ADV_DEBUG_REGS
unsigned long new_dbcr0 = current->thread.debug.dbcr0;
#endif
for (i=0; i<ndbg; i++) {
if (copy_from_user(&op, dbg + i, sizeof(op)))
return -EFAULT;
switch (op.dbg_type) {
case SIG_DBG_SINGLE_STEPPING:
#ifdef CONFIG_PPC_ADV_DEBUG_REGS
if (op.dbg_value) {
new_msr |= MSR_DE;
new_dbcr0 |= (DBCR0_IDM | DBCR0_IC);
} else {
new_dbcr0 &= ~DBCR0_IC;
if (!DBCR_ACTIVE_EVENTS(new_dbcr0,
current->thread.debug.dbcr1)) {
new_msr &= ~MSR_DE;
new_dbcr0 &= ~DBCR0_IDM;
}
}
#else
if (op.dbg_value)
new_msr |= MSR_SE;
else
new_msr &= ~MSR_SE;
#endif
break;
case SIG_DBG_BRANCH_TRACING:
#ifdef CONFIG_PPC_ADV_DEBUG_REGS
return -EINVAL;
#else
if (op.dbg_value)
new_msr |= MSR_BE;
else
new_msr &= ~MSR_BE;
#endif
break;
default:
return -EINVAL;
}
}
/* We wait until here to actually install the values in the
registers so if we fail in the above loop, it will not
affect the contents of these registers. After this point,
failure is a problem, anyway, and it's very unlikely unless
the user is really doing something wrong. */
regs_set_return_msr(regs, new_msr);
#ifdef CONFIG_PPC_ADV_DEBUG_REGS
current->thread.debug.dbcr0 = new_dbcr0;
#endif
if (!access_ok(ctx, sizeof(*ctx)) ||
fault_in_readable((char __user *)ctx, sizeof(*ctx)))
return -EFAULT;
/*
* If we get a fault copying the context into the kernel's
* image of the user's registers, we can't just return -EFAULT
* because the user's registers will be corrupted. For instance
* the NIP value may have been updated but not some of the
* other registers. Given that we have done the access_ok
* and successfully read the first and last bytes of the region
* above, this should only happen in an out-of-memory situation
* or if another thread unmaps the region containing the context.
* We kill the task with a SIGSEGV in this situation.
*/
if (do_setcontext(ctx, regs, 1)) {
signal_fault(current, regs, "sys_debug_setcontext", ctx);
force_sig(SIGSEGV);
goto out;
}
/*
* It's not clear whether or why it is desirable to save the
* sigaltstack setting on signal delivery and restore it on
* signal return. But other architectures do this and we have
* always done it up until now so it is probably better not to
* change it. -- paulus
*/
restore_altstack(&ctx->uc_stack);
set_thread_flag(TIF_RESTOREALL);
out:
return 0;
}
#endif
/*
* Do a signal return; undo the signal stack.
*/
#ifdef CONFIG_PPC64
COMPAT_SYSCALL_DEFINE0(sigreturn)
#else
SYSCALL_DEFINE0(sigreturn)
#endif
{
struct pt_regs *regs = current_pt_regs();
struct sigframe __user *sf;
struct sigcontext __user *sc;
struct sigcontext sigctx;
struct mcontext __user *sr;
sigset_t set;
struct mcontext __user *mcp;
struct mcontext __user *tm_mcp = NULL;
unsigned long long msr_hi = 0;
/* Always make any pending restarted system calls return -EINTR */
current->restart_block.fn = do_no_restart_syscall;
sf = (struct sigframe __user *)(regs->gpr[1] + __SIGNAL_FRAMESIZE);
sc = &sf->sctx;
if (copy_from_user(&sigctx, sc, sizeof(sigctx)))
goto badframe;
#ifdef CONFIG_PPC64
/*
* Note that PPC32 puts the upper 32 bits of the sigmask in the
* unused part of the signal stackframe
*/
set.sig[0] = sigctx.oldmask + ((long)(sigctx._unused[3]) << 32);
#else
set.sig[0] = sigctx.oldmask;
set.sig[1] = sigctx._unused[3];
#endif
set_current_blocked(&set);
mcp = (struct mcontext __user *)&sf->mctx;
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
tm_mcp = (struct mcontext __user *)&sf->mctx_transact;
if (__get_user(msr_hi, &tm_mcp->mc_gregs[PT_MSR]))
goto badframe;
#endif
if (MSR_TM_ACTIVE(msr_hi<<32)) {
if (!cpu_has_feature(CPU_FTR_TM))
goto badframe;
if (restore_tm_user_regs(regs, mcp, tm_mcp))
goto badframe;
} else {
sr = (struct mcontext __user *)from_user_ptr(sigctx.regs);
if (restore_user_regs(regs, sr, 1)) {
signal_fault(current, regs, "sys_sigreturn", sr);
force_sig(SIGSEGV);
return 0;
}
}
set_thread_flag(TIF_RESTOREALL);
return 0;
badframe:
signal_fault(current, regs, "sys_sigreturn", sc);
force_sig(SIGSEGV);
return 0;
}
| linux-master | arch/powerpc/kernel/signal_32.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Support PCI IO workaround
*
* Copyright (C) 2006 Benjamin Herrenschmidt <[email protected]>
* IBM, Corp.
* (C) Copyright 2007-2008 TOSHIBA CORPORATION
*/
#undef DEBUG
#include <linux/kernel.h>
#include <linux/sched/mm.h> /* for init_mm */
#include <linux/pgtable.h>
#include <asm/io.h>
#include <asm/machdep.h>
#include <asm/ppc-pci.h>
#include <asm/io-workarounds.h>
#include <asm/pte-walk.h>
#define IOWA_MAX_BUS 8
static struct iowa_bus iowa_busses[IOWA_MAX_BUS];
static unsigned int iowa_bus_count;
static struct iowa_bus *iowa_pci_find(unsigned long vaddr, unsigned long paddr)
{
int i, j;
struct resource *res;
unsigned long vstart, vend;
for (i = 0; i < iowa_bus_count; i++) {
struct iowa_bus *bus = &iowa_busses[i];
struct pci_controller *phb = bus->phb;
if (vaddr) {
vstart = (unsigned long)phb->io_base_virt;
vend = vstart + phb->pci_io_size - 1;
if ((vaddr >= vstart) && (vaddr <= vend))
return bus;
}
if (paddr)
for (j = 0; j < 3; j++) {
res = &phb->mem_resources[j];
if (paddr >= res->start && paddr <= res->end)
return bus;
}
}
return NULL;
}
#ifdef CONFIG_PPC_INDIRECT_MMIO
struct iowa_bus *iowa_mem_find_bus(const PCI_IO_ADDR addr)
{
struct iowa_bus *bus;
int token;
token = PCI_GET_ADDR_TOKEN(addr);
if (token && token <= iowa_bus_count)
bus = &iowa_busses[token - 1];
else {
unsigned long vaddr, paddr;
vaddr = (unsigned long)PCI_FIX_ADDR(addr);
if (vaddr < PHB_IO_BASE || vaddr >= PHB_IO_END)
return NULL;
paddr = ppc_find_vmap_phys(vaddr);
bus = iowa_pci_find(vaddr, paddr);
if (bus == NULL)
return NULL;
}
return bus;
}
#else /* CONFIG_PPC_INDIRECT_MMIO */
struct iowa_bus *iowa_mem_find_bus(const PCI_IO_ADDR addr)
{
return NULL;
}
#endif /* !CONFIG_PPC_INDIRECT_MMIO */
#ifdef CONFIG_PPC_INDIRECT_PIO
struct iowa_bus *iowa_pio_find_bus(unsigned long port)
{
unsigned long vaddr = (unsigned long)pci_io_base + port;
return iowa_pci_find(vaddr, 0);
}
#else
struct iowa_bus *iowa_pio_find_bus(unsigned long port)
{
return NULL;
}
#endif
#define DEF_PCI_AC_RET(name, ret, at, al, space, aa) \
static ret iowa_##name at \
{ \
struct iowa_bus *bus; \
bus = iowa_##space##_find_bus(aa); \
if (bus && bus->ops && bus->ops->name) \
return bus->ops->name al; \
return __do_##name al; \
}
#define DEF_PCI_AC_NORET(name, at, al, space, aa) \
static void iowa_##name at \
{ \
struct iowa_bus *bus; \
bus = iowa_##space##_find_bus(aa); \
if (bus && bus->ops && bus->ops->name) { \
bus->ops->name al; \
return; \
} \
__do_##name al; \
}
#include <asm/io-defs.h>
#undef DEF_PCI_AC_RET
#undef DEF_PCI_AC_NORET
static const struct ppc_pci_io iowa_pci_io = {
#define DEF_PCI_AC_RET(name, ret, at, al, space, aa) .name = iowa_##name,
#define DEF_PCI_AC_NORET(name, at, al, space, aa) .name = iowa_##name,
#include <asm/io-defs.h>
#undef DEF_PCI_AC_RET
#undef DEF_PCI_AC_NORET
};
#ifdef CONFIG_PPC_INDIRECT_MMIO
void __iomem *iowa_ioremap(phys_addr_t addr, unsigned long size,
pgprot_t prot, void *caller)
{
struct iowa_bus *bus;
void __iomem *res = __ioremap_caller(addr, size, prot, caller);
int busno;
bus = iowa_pci_find(0, (unsigned long)addr);
if (bus != NULL) {
busno = bus - iowa_busses;
PCI_SET_ADDR_TOKEN(res, busno + 1);
}
return res;
}
#endif /* !CONFIG_PPC_INDIRECT_MMIO */
bool io_workaround_inited;
/* Enable IO workaround */
static void io_workaround_init(void)
{
if (io_workaround_inited)
return;
ppc_pci_io = iowa_pci_io;
io_workaround_inited = true;
}
/* Register new bus to support workaround */
void iowa_register_bus(struct pci_controller *phb, struct ppc_pci_io *ops,
int (*initfunc)(struct iowa_bus *, void *), void *data)
{
struct iowa_bus *bus;
struct device_node *np = phb->dn;
io_workaround_init();
if (iowa_bus_count >= IOWA_MAX_BUS) {
pr_err("IOWA:Too many pci bridges, "
"workarounds disabled for %pOF\n", np);
return;
}
bus = &iowa_busses[iowa_bus_count];
bus->phb = phb;
bus->ops = ops;
bus->private = data;
if (initfunc)
if ((*initfunc)(bus, data))
return;
iowa_bus_count++;
pr_debug("IOWA:[%d]Add bus, %pOF.\n", iowa_bus_count-1, np);
}
| linux-master | arch/powerpc/kernel/io-workarounds.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright 2006-2007, Michael Ellerman, IBM Corporation.
*/
#include <linux/kernel.h>
#include <linux/msi.h>
#include <linux/pci.h>
#include <asm/machdep.h>
int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
{
struct pci_controller *phb = pci_bus_to_host(dev->bus);
if (!phb->controller_ops.setup_msi_irqs ||
!phb->controller_ops.teardown_msi_irqs) {
pr_debug("msi: Platform doesn't provide MSI callbacks.\n");
return -ENOSYS;
}
/* PowerPC doesn't support multiple MSI yet */
if (type == PCI_CAP_ID_MSI && nvec > 1)
return 1;
return phb->controller_ops.setup_msi_irqs(dev, nvec, type);
}
void arch_teardown_msi_irqs(struct pci_dev *dev)
{
struct pci_controller *phb = pci_bus_to_host(dev->bus);
/*
* We can be called even when arch_setup_msi_irqs() returns -ENOSYS,
* so check the pointer again.
*/
if (phb->controller_ops.teardown_msi_irqs)
phb->controller_ops.teardown_msi_irqs(dev);
}
| linux-master | arch/powerpc/kernel/msi.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2010 SUSE Linux Products GmbH. All rights reserved.
* Copyright 2010-2011 Freescale Semiconductor, Inc.
*
* Authors:
* Alexander Graf <[email protected]>
*/
#include <linux/kvm_host.h>
#include <linux/init.h>
#include <linux/export.h>
#include <linux/kmemleak.h>
#include <linux/kvm_para.h>
#include <linux/slab.h>
#include <linux/of.h>
#include <linux/pagemap.h>
#include <asm/reg.h>
#include <asm/sections.h>
#include <asm/cacheflush.h>
#include <asm/disassemble.h>
#include <asm/ppc-opcode.h>
#include <asm/epapr_hcalls.h>
#define KVM_MAGIC_PAGE (-4096L)
#define magic_var(x) KVM_MAGIC_PAGE + offsetof(struct kvm_vcpu_arch_shared, x)
#define KVM_INST_LWZ 0x80000000
#define KVM_INST_STW 0x90000000
#define KVM_INST_LD 0xe8000000
#define KVM_INST_STD 0xf8000000
#define KVM_INST_NOP 0x60000000
#define KVM_INST_B 0x48000000
#define KVM_INST_B_MASK 0x03ffffff
#define KVM_INST_B_MAX 0x01ffffff
#define KVM_INST_LI 0x38000000
#define KVM_MASK_RT 0x03e00000
#define KVM_RT_30 0x03c00000
#define KVM_MASK_RB 0x0000f800
#define KVM_INST_MFMSR 0x7c0000a6
#define SPR_FROM 0
#define SPR_TO 0x100
#define KVM_INST_SPR(sprn, moveto) (0x7c0002a6 | \
(((sprn) & 0x1f) << 16) | \
(((sprn) & 0x3e0) << 6) | \
(moveto))
#define KVM_INST_MFSPR(sprn) KVM_INST_SPR(sprn, SPR_FROM)
#define KVM_INST_MTSPR(sprn) KVM_INST_SPR(sprn, SPR_TO)
#define KVM_INST_TLBSYNC 0x7c00046c
#define KVM_INST_MTMSRD_L0 0x7c000164
#define KVM_INST_MTMSRD_L1 0x7c010164
#define KVM_INST_MTMSR 0x7c000124
#define KVM_INST_WRTEE 0x7c000106
#define KVM_INST_WRTEEI_0 0x7c000146
#define KVM_INST_WRTEEI_1 0x7c008146
#define KVM_INST_MTSRIN 0x7c0001e4
static bool kvm_patching_worked = true;
extern char kvm_tmp[];
extern char kvm_tmp_end[];
static int kvm_tmp_index;
static void __init kvm_patch_ins(u32 *inst, u32 new_inst)
{
*inst = new_inst;
flush_icache_range((ulong)inst, (ulong)inst + 4);
}
static void __init kvm_patch_ins_ll(u32 *inst, long addr, u32 rt)
{
#ifdef CONFIG_64BIT
kvm_patch_ins(inst, KVM_INST_LD | rt | (addr & 0x0000fffc));
#else
kvm_patch_ins(inst, KVM_INST_LWZ | rt | (addr & 0x0000fffc));
#endif
}
static void __init kvm_patch_ins_ld(u32 *inst, long addr, u32 rt)
{
#ifdef CONFIG_64BIT
kvm_patch_ins(inst, KVM_INST_LD | rt | (addr & 0x0000fffc));
#else
kvm_patch_ins(inst, KVM_INST_LWZ | rt | ((addr + 4) & 0x0000fffc));
#endif
}
static void __init kvm_patch_ins_lwz(u32 *inst, long addr, u32 rt)
{
kvm_patch_ins(inst, KVM_INST_LWZ | rt | (addr & 0x0000ffff));
}
static void __init kvm_patch_ins_std(u32 *inst, long addr, u32 rt)
{
#ifdef CONFIG_64BIT
kvm_patch_ins(inst, KVM_INST_STD | rt | (addr & 0x0000fffc));
#else
kvm_patch_ins(inst, KVM_INST_STW | rt | ((addr + 4) & 0x0000fffc));
#endif
}
static void __init kvm_patch_ins_stw(u32 *inst, long addr, u32 rt)
{
kvm_patch_ins(inst, KVM_INST_STW | rt | (addr & 0x0000fffc));
}
static void __init kvm_patch_ins_nop(u32 *inst)
{
kvm_patch_ins(inst, KVM_INST_NOP);
}
static void __init kvm_patch_ins_b(u32 *inst, int addr)
{
#if defined(CONFIG_RELOCATABLE) && defined(CONFIG_PPC_BOOK3S)
/* On relocatable kernels interrupts handlers and our code
can be in different regions, so we don't patch them */
if ((ulong)inst < (ulong)&__end_interrupts)
return;
#endif
kvm_patch_ins(inst, KVM_INST_B | (addr & KVM_INST_B_MASK));
}
static u32 * __init kvm_alloc(int len)
{
u32 *p;
if ((kvm_tmp_index + len) > (kvm_tmp_end - kvm_tmp)) {
printk(KERN_ERR "KVM: No more space (%d + %d)\n",
kvm_tmp_index, len);
kvm_patching_worked = false;
return NULL;
}
p = (void*)&kvm_tmp[kvm_tmp_index];
kvm_tmp_index += len;
return p;
}
extern u32 kvm_emulate_mtmsrd_branch_offs;
extern u32 kvm_emulate_mtmsrd_reg_offs;
extern u32 kvm_emulate_mtmsrd_orig_ins_offs;
extern u32 kvm_emulate_mtmsrd_len;
extern u32 kvm_emulate_mtmsrd[];
static void __init kvm_patch_ins_mtmsrd(u32 *inst, u32 rt)
{
u32 *p;
int distance_start;
int distance_end;
ulong next_inst;
p = kvm_alloc(kvm_emulate_mtmsrd_len * 4);
if (!p)
return;
/* Find out where we are and put everything there */
distance_start = (ulong)p - (ulong)inst;
next_inst = ((ulong)inst + 4);
distance_end = next_inst - (ulong)&p[kvm_emulate_mtmsrd_branch_offs];
/* Make sure we only write valid b instructions */
if (distance_start > KVM_INST_B_MAX) {
kvm_patching_worked = false;
return;
}
/* Modify the chunk to fit the invocation */
memcpy(p, kvm_emulate_mtmsrd, kvm_emulate_mtmsrd_len * 4);
p[kvm_emulate_mtmsrd_branch_offs] |= distance_end & KVM_INST_B_MASK;
switch (get_rt(rt)) {
case 30:
kvm_patch_ins_ll(&p[kvm_emulate_mtmsrd_reg_offs],
magic_var(scratch2), KVM_RT_30);
break;
case 31:
kvm_patch_ins_ll(&p[kvm_emulate_mtmsrd_reg_offs],
magic_var(scratch1), KVM_RT_30);
break;
default:
p[kvm_emulate_mtmsrd_reg_offs] |= rt;
break;
}
p[kvm_emulate_mtmsrd_orig_ins_offs] = *inst;
flush_icache_range((ulong)p, (ulong)p + kvm_emulate_mtmsrd_len * 4);
/* Patch the invocation */
kvm_patch_ins_b(inst, distance_start);
}
extern u32 kvm_emulate_mtmsr_branch_offs;
extern u32 kvm_emulate_mtmsr_reg1_offs;
extern u32 kvm_emulate_mtmsr_reg2_offs;
extern u32 kvm_emulate_mtmsr_orig_ins_offs;
extern u32 kvm_emulate_mtmsr_len;
extern u32 kvm_emulate_mtmsr[];
static void __init kvm_patch_ins_mtmsr(u32 *inst, u32 rt)
{
u32 *p;
int distance_start;
int distance_end;
ulong next_inst;
p = kvm_alloc(kvm_emulate_mtmsr_len * 4);
if (!p)
return;
/* Find out where we are and put everything there */
distance_start = (ulong)p - (ulong)inst;
next_inst = ((ulong)inst + 4);
distance_end = next_inst - (ulong)&p[kvm_emulate_mtmsr_branch_offs];
/* Make sure we only write valid b instructions */
if (distance_start > KVM_INST_B_MAX) {
kvm_patching_worked = false;
return;
}
/* Modify the chunk to fit the invocation */
memcpy(p, kvm_emulate_mtmsr, kvm_emulate_mtmsr_len * 4);
p[kvm_emulate_mtmsr_branch_offs] |= distance_end & KVM_INST_B_MASK;
/* Make clobbered registers work too */
switch (get_rt(rt)) {
case 30:
kvm_patch_ins_ll(&p[kvm_emulate_mtmsr_reg1_offs],
magic_var(scratch2), KVM_RT_30);
kvm_patch_ins_ll(&p[kvm_emulate_mtmsr_reg2_offs],
magic_var(scratch2), KVM_RT_30);
break;
case 31:
kvm_patch_ins_ll(&p[kvm_emulate_mtmsr_reg1_offs],
magic_var(scratch1), KVM_RT_30);
kvm_patch_ins_ll(&p[kvm_emulate_mtmsr_reg2_offs],
magic_var(scratch1), KVM_RT_30);
break;
default:
p[kvm_emulate_mtmsr_reg1_offs] |= rt;
p[kvm_emulate_mtmsr_reg2_offs] |= rt;
break;
}
p[kvm_emulate_mtmsr_orig_ins_offs] = *inst;
flush_icache_range((ulong)p, (ulong)p + kvm_emulate_mtmsr_len * 4);
/* Patch the invocation */
kvm_patch_ins_b(inst, distance_start);
}
#ifdef CONFIG_BOOKE
extern u32 kvm_emulate_wrtee_branch_offs;
extern u32 kvm_emulate_wrtee_reg_offs;
extern u32 kvm_emulate_wrtee_orig_ins_offs;
extern u32 kvm_emulate_wrtee_len;
extern u32 kvm_emulate_wrtee[];
static void __init kvm_patch_ins_wrtee(u32 *inst, u32 rt, int imm_one)
{
u32 *p;
int distance_start;
int distance_end;
ulong next_inst;
p = kvm_alloc(kvm_emulate_wrtee_len * 4);
if (!p)
return;
/* Find out where we are and put everything there */
distance_start = (ulong)p - (ulong)inst;
next_inst = ((ulong)inst + 4);
distance_end = next_inst - (ulong)&p[kvm_emulate_wrtee_branch_offs];
/* Make sure we only write valid b instructions */
if (distance_start > KVM_INST_B_MAX) {
kvm_patching_worked = false;
return;
}
/* Modify the chunk to fit the invocation */
memcpy(p, kvm_emulate_wrtee, kvm_emulate_wrtee_len * 4);
p[kvm_emulate_wrtee_branch_offs] |= distance_end & KVM_INST_B_MASK;
if (imm_one) {
p[kvm_emulate_wrtee_reg_offs] =
KVM_INST_LI | __PPC_RT(R30) | MSR_EE;
} else {
/* Make clobbered registers work too */
switch (get_rt(rt)) {
case 30:
kvm_patch_ins_ll(&p[kvm_emulate_wrtee_reg_offs],
magic_var(scratch2), KVM_RT_30);
break;
case 31:
kvm_patch_ins_ll(&p[kvm_emulate_wrtee_reg_offs],
magic_var(scratch1), KVM_RT_30);
break;
default:
p[kvm_emulate_wrtee_reg_offs] |= rt;
break;
}
}
p[kvm_emulate_wrtee_orig_ins_offs] = *inst;
flush_icache_range((ulong)p, (ulong)p + kvm_emulate_wrtee_len * 4);
/* Patch the invocation */
kvm_patch_ins_b(inst, distance_start);
}
extern u32 kvm_emulate_wrteei_0_branch_offs;
extern u32 kvm_emulate_wrteei_0_len;
extern u32 kvm_emulate_wrteei_0[];
static void __init kvm_patch_ins_wrteei_0(u32 *inst)
{
u32 *p;
int distance_start;
int distance_end;
ulong next_inst;
p = kvm_alloc(kvm_emulate_wrteei_0_len * 4);
if (!p)
return;
/* Find out where we are and put everything there */
distance_start = (ulong)p - (ulong)inst;
next_inst = ((ulong)inst + 4);
distance_end = next_inst - (ulong)&p[kvm_emulate_wrteei_0_branch_offs];
/* Make sure we only write valid b instructions */
if (distance_start > KVM_INST_B_MAX) {
kvm_patching_worked = false;
return;
}
memcpy(p, kvm_emulate_wrteei_0, kvm_emulate_wrteei_0_len * 4);
p[kvm_emulate_wrteei_0_branch_offs] |= distance_end & KVM_INST_B_MASK;
flush_icache_range((ulong)p, (ulong)p + kvm_emulate_wrteei_0_len * 4);
/* Patch the invocation */
kvm_patch_ins_b(inst, distance_start);
}
#endif
#ifdef CONFIG_PPC_BOOK3S_32
extern u32 kvm_emulate_mtsrin_branch_offs;
extern u32 kvm_emulate_mtsrin_reg1_offs;
extern u32 kvm_emulate_mtsrin_reg2_offs;
extern u32 kvm_emulate_mtsrin_orig_ins_offs;
extern u32 kvm_emulate_mtsrin_len;
extern u32 kvm_emulate_mtsrin[];
static void __init kvm_patch_ins_mtsrin(u32 *inst, u32 rt, u32 rb)
{
u32 *p;
int distance_start;
int distance_end;
ulong next_inst;
p = kvm_alloc(kvm_emulate_mtsrin_len * 4);
if (!p)
return;
/* Find out where we are and put everything there */
distance_start = (ulong)p - (ulong)inst;
next_inst = ((ulong)inst + 4);
distance_end = next_inst - (ulong)&p[kvm_emulate_mtsrin_branch_offs];
/* Make sure we only write valid b instructions */
if (distance_start > KVM_INST_B_MAX) {
kvm_patching_worked = false;
return;
}
/* Modify the chunk to fit the invocation */
memcpy(p, kvm_emulate_mtsrin, kvm_emulate_mtsrin_len * 4);
p[kvm_emulate_mtsrin_branch_offs] |= distance_end & KVM_INST_B_MASK;
p[kvm_emulate_mtsrin_reg1_offs] |= (rb << 10);
p[kvm_emulate_mtsrin_reg2_offs] |= rt;
p[kvm_emulate_mtsrin_orig_ins_offs] = *inst;
flush_icache_range((ulong)p, (ulong)p + kvm_emulate_mtsrin_len * 4);
/* Patch the invocation */
kvm_patch_ins_b(inst, distance_start);
}
#endif
static void __init kvm_map_magic_page(void *data)
{
u32 *features = data;
ulong in[8] = {0};
ulong out[8];
in[0] = KVM_MAGIC_PAGE;
in[1] = KVM_MAGIC_PAGE | MAGIC_PAGE_FLAG_NOT_MAPPED_NX;
epapr_hypercall(in, out, KVM_HCALL_TOKEN(KVM_HC_PPC_MAP_MAGIC_PAGE));
*features = out[0];
}
static void __init kvm_check_ins(u32 *inst, u32 features)
{
u32 _inst = *inst;
u32 inst_no_rt = _inst & ~KVM_MASK_RT;
u32 inst_rt = _inst & KVM_MASK_RT;
switch (inst_no_rt) {
/* Loads */
case KVM_INST_MFMSR:
kvm_patch_ins_ld(inst, magic_var(msr), inst_rt);
break;
case KVM_INST_MFSPR(SPRN_SPRG0):
kvm_patch_ins_ld(inst, magic_var(sprg0), inst_rt);
break;
case KVM_INST_MFSPR(SPRN_SPRG1):
kvm_patch_ins_ld(inst, magic_var(sprg1), inst_rt);
break;
case KVM_INST_MFSPR(SPRN_SPRG2):
kvm_patch_ins_ld(inst, magic_var(sprg2), inst_rt);
break;
case KVM_INST_MFSPR(SPRN_SPRG3):
kvm_patch_ins_ld(inst, magic_var(sprg3), inst_rt);
break;
case KVM_INST_MFSPR(SPRN_SRR0):
kvm_patch_ins_ld(inst, magic_var(srr0), inst_rt);
break;
case KVM_INST_MFSPR(SPRN_SRR1):
kvm_patch_ins_ld(inst, magic_var(srr1), inst_rt);
break;
#ifdef CONFIG_BOOKE
case KVM_INST_MFSPR(SPRN_DEAR):
#else
case KVM_INST_MFSPR(SPRN_DAR):
#endif
kvm_patch_ins_ld(inst, magic_var(dar), inst_rt);
break;
case KVM_INST_MFSPR(SPRN_DSISR):
kvm_patch_ins_lwz(inst, magic_var(dsisr), inst_rt);
break;
#ifdef CONFIG_PPC_E500
case KVM_INST_MFSPR(SPRN_MAS0):
if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
kvm_patch_ins_lwz(inst, magic_var(mas0), inst_rt);
break;
case KVM_INST_MFSPR(SPRN_MAS1):
if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
kvm_patch_ins_lwz(inst, magic_var(mas1), inst_rt);
break;
case KVM_INST_MFSPR(SPRN_MAS2):
if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
kvm_patch_ins_ld(inst, magic_var(mas2), inst_rt);
break;
case KVM_INST_MFSPR(SPRN_MAS3):
if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
kvm_patch_ins_lwz(inst, magic_var(mas7_3) + 4, inst_rt);
break;
case KVM_INST_MFSPR(SPRN_MAS4):
if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
kvm_patch_ins_lwz(inst, magic_var(mas4), inst_rt);
break;
case KVM_INST_MFSPR(SPRN_MAS6):
if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
kvm_patch_ins_lwz(inst, magic_var(mas6), inst_rt);
break;
case KVM_INST_MFSPR(SPRN_MAS7):
if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
kvm_patch_ins_lwz(inst, magic_var(mas7_3), inst_rt);
break;
#endif /* CONFIG_PPC_E500 */
case KVM_INST_MFSPR(SPRN_SPRG4):
#ifdef CONFIG_BOOKE
case KVM_INST_MFSPR(SPRN_SPRG4R):
#endif
if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
kvm_patch_ins_ld(inst, magic_var(sprg4), inst_rt);
break;
case KVM_INST_MFSPR(SPRN_SPRG5):
#ifdef CONFIG_BOOKE
case KVM_INST_MFSPR(SPRN_SPRG5R):
#endif
if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
kvm_patch_ins_ld(inst, magic_var(sprg5), inst_rt);
break;
case KVM_INST_MFSPR(SPRN_SPRG6):
#ifdef CONFIG_BOOKE
case KVM_INST_MFSPR(SPRN_SPRG6R):
#endif
if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
kvm_patch_ins_ld(inst, magic_var(sprg6), inst_rt);
break;
case KVM_INST_MFSPR(SPRN_SPRG7):
#ifdef CONFIG_BOOKE
case KVM_INST_MFSPR(SPRN_SPRG7R):
#endif
if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
kvm_patch_ins_ld(inst, magic_var(sprg7), inst_rt);
break;
#ifdef CONFIG_BOOKE
case KVM_INST_MFSPR(SPRN_ESR):
if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
kvm_patch_ins_lwz(inst, magic_var(esr), inst_rt);
break;
#endif
case KVM_INST_MFSPR(SPRN_PIR):
if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
kvm_patch_ins_lwz(inst, magic_var(pir), inst_rt);
break;
/* Stores */
case KVM_INST_MTSPR(SPRN_SPRG0):
kvm_patch_ins_std(inst, magic_var(sprg0), inst_rt);
break;
case KVM_INST_MTSPR(SPRN_SPRG1):
kvm_patch_ins_std(inst, magic_var(sprg1), inst_rt);
break;
case KVM_INST_MTSPR(SPRN_SPRG2):
kvm_patch_ins_std(inst, magic_var(sprg2), inst_rt);
break;
case KVM_INST_MTSPR(SPRN_SPRG3):
kvm_patch_ins_std(inst, magic_var(sprg3), inst_rt);
break;
case KVM_INST_MTSPR(SPRN_SRR0):
kvm_patch_ins_std(inst, magic_var(srr0), inst_rt);
break;
case KVM_INST_MTSPR(SPRN_SRR1):
kvm_patch_ins_std(inst, magic_var(srr1), inst_rt);
break;
#ifdef CONFIG_BOOKE
case KVM_INST_MTSPR(SPRN_DEAR):
#else
case KVM_INST_MTSPR(SPRN_DAR):
#endif
kvm_patch_ins_std(inst, magic_var(dar), inst_rt);
break;
case KVM_INST_MTSPR(SPRN_DSISR):
kvm_patch_ins_stw(inst, magic_var(dsisr), inst_rt);
break;
#ifdef CONFIG_PPC_E500
case KVM_INST_MTSPR(SPRN_MAS0):
if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
kvm_patch_ins_stw(inst, magic_var(mas0), inst_rt);
break;
case KVM_INST_MTSPR(SPRN_MAS1):
if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
kvm_patch_ins_stw(inst, magic_var(mas1), inst_rt);
break;
case KVM_INST_MTSPR(SPRN_MAS2):
if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
kvm_patch_ins_std(inst, magic_var(mas2), inst_rt);
break;
case KVM_INST_MTSPR(SPRN_MAS3):
if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
kvm_patch_ins_stw(inst, magic_var(mas7_3) + 4, inst_rt);
break;
case KVM_INST_MTSPR(SPRN_MAS4):
if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
kvm_patch_ins_stw(inst, magic_var(mas4), inst_rt);
break;
case KVM_INST_MTSPR(SPRN_MAS6):
if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
kvm_patch_ins_stw(inst, magic_var(mas6), inst_rt);
break;
case KVM_INST_MTSPR(SPRN_MAS7):
if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
kvm_patch_ins_stw(inst, magic_var(mas7_3), inst_rt);
break;
#endif /* CONFIG_PPC_E500 */
case KVM_INST_MTSPR(SPRN_SPRG4):
if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
kvm_patch_ins_std(inst, magic_var(sprg4), inst_rt);
break;
case KVM_INST_MTSPR(SPRN_SPRG5):
if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
kvm_patch_ins_std(inst, magic_var(sprg5), inst_rt);
break;
case KVM_INST_MTSPR(SPRN_SPRG6):
if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
kvm_patch_ins_std(inst, magic_var(sprg6), inst_rt);
break;
case KVM_INST_MTSPR(SPRN_SPRG7):
if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
kvm_patch_ins_std(inst, magic_var(sprg7), inst_rt);
break;
#ifdef CONFIG_BOOKE
case KVM_INST_MTSPR(SPRN_ESR):
if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
kvm_patch_ins_stw(inst, magic_var(esr), inst_rt);
break;
#endif
/* Nops */
case KVM_INST_TLBSYNC:
kvm_patch_ins_nop(inst);
break;
/* Rewrites */
case KVM_INST_MTMSRD_L1:
kvm_patch_ins_mtmsrd(inst, inst_rt);
break;
case KVM_INST_MTMSR:
case KVM_INST_MTMSRD_L0:
kvm_patch_ins_mtmsr(inst, inst_rt);
break;
#ifdef CONFIG_BOOKE
case KVM_INST_WRTEE:
kvm_patch_ins_wrtee(inst, inst_rt, 0);
break;
#endif
}
switch (inst_no_rt & ~KVM_MASK_RB) {
#ifdef CONFIG_PPC_BOOK3S_32
case KVM_INST_MTSRIN:
if (features & KVM_MAGIC_FEAT_SR) {
u32 inst_rb = _inst & KVM_MASK_RB;
kvm_patch_ins_mtsrin(inst, inst_rt, inst_rb);
}
break;
#endif
}
switch (_inst) {
#ifdef CONFIG_BOOKE
case KVM_INST_WRTEEI_0:
kvm_patch_ins_wrteei_0(inst);
break;
case KVM_INST_WRTEEI_1:
kvm_patch_ins_wrtee(inst, 0, 1);
break;
#endif
}
}
extern u32 kvm_template_start[];
extern u32 kvm_template_end[];
static void __init kvm_use_magic_page(void)
{
u32 *p;
u32 *start, *end;
u32 features;
/* Tell the host to map the magic page to -4096 on all CPUs */
on_each_cpu(kvm_map_magic_page, &features, 1);
/* Quick self-test to see if the mapping works */
if (fault_in_readable((const char __user *)KVM_MAGIC_PAGE,
sizeof(u32))) {
kvm_patching_worked = false;
return;
}
/* Now loop through all code and find instructions */
start = (void*)_stext;
end = (void*)_etext;
/*
* Being interrupted in the middle of patching would
* be bad for SPRG4-7, which KVM can't keep in sync
* with emulated accesses because reads don't trap.
*/
local_irq_disable();
for (p = start; p < end; p++) {
/* Avoid patching the template code */
if (p >= kvm_template_start && p < kvm_template_end) {
p = kvm_template_end - 1;
continue;
}
kvm_check_ins(p, features);
}
local_irq_enable();
printk(KERN_INFO "KVM: Live patching for a fast VM %s\n",
kvm_patching_worked ? "worked" : "failed");
}
static int __init kvm_guest_init(void)
{
if (!kvm_para_available())
return 0;
if (!epapr_paravirt_enabled)
return 0;
if (kvm_para_has_feature(KVM_FEATURE_MAGIC_PAGE))
kvm_use_magic_page();
#ifdef CONFIG_PPC_BOOK3S_64
/* Enable napping */
powersave_nap = 1;
#endif
return 0;
}
postcore_initcall(kvm_guest_init);
| linux-master | arch/powerpc/kernel/kvm.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Common powerpc suspend code for 32 and 64 bits
*
* Copyright 2007 Johannes Berg <[email protected]>
*/
#include <linux/sched.h>
#include <linux/suspend.h>
#include <asm/current.h>
#include <asm/mmu_context.h>
#include <asm/switch_to.h>
void save_processor_state(void)
{
/*
* flush out all the special registers so we don't need
* to save them in the snapshot
*/
flush_all_to_thread(current);
#ifdef CONFIG_PPC64
hard_irq_disable();
#endif
}
void restore_processor_state(void)
{
#ifdef CONFIG_PPC32
switch_mmu_context(current->active_mm, current->active_mm, NULL);
#endif
}
| linux-master | arch/powerpc/kernel/swsusp.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* PCI Error Recovery Driver for RPA-compliant PPC64 platform.
* Copyright IBM Corp. 2004 2005
* Copyright Linas Vepstas <[email protected]> 2004, 2005
*
* Send comments and feedback to Linas Vepstas <[email protected]>
*/
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/pci_hotplug.h>
#include <asm/eeh.h>
#include <asm/eeh_event.h>
#include <asm/ppc-pci.h>
#include <asm/pci-bridge.h>
#include <asm/rtas.h>
struct eeh_rmv_data {
struct list_head removed_vf_list;
int removed_dev_count;
};
static int eeh_result_priority(enum pci_ers_result result)
{
switch (result) {
case PCI_ERS_RESULT_NONE:
return 1;
case PCI_ERS_RESULT_NO_AER_DRIVER:
return 2;
case PCI_ERS_RESULT_RECOVERED:
return 3;
case PCI_ERS_RESULT_CAN_RECOVER:
return 4;
case PCI_ERS_RESULT_DISCONNECT:
return 5;
case PCI_ERS_RESULT_NEED_RESET:
return 6;
default:
WARN_ONCE(1, "Unknown pci_ers_result value: %d\n", (int)result);
return 0;
}
};
static const char *pci_ers_result_name(enum pci_ers_result result)
{
switch (result) {
case PCI_ERS_RESULT_NONE:
return "none";
case PCI_ERS_RESULT_CAN_RECOVER:
return "can recover";
case PCI_ERS_RESULT_NEED_RESET:
return "need reset";
case PCI_ERS_RESULT_DISCONNECT:
return "disconnect";
case PCI_ERS_RESULT_RECOVERED:
return "recovered";
case PCI_ERS_RESULT_NO_AER_DRIVER:
return "no AER driver";
default:
WARN_ONCE(1, "Unknown result type: %d\n", (int)result);
return "unknown";
}
};
static enum pci_ers_result pci_ers_merge_result(enum pci_ers_result old,
enum pci_ers_result new)
{
if (eeh_result_priority(new) > eeh_result_priority(old))
return new;
return old;
}
static bool eeh_dev_removed(struct eeh_dev *edev)
{
return !edev || (edev->mode & EEH_DEV_REMOVED);
}
static bool eeh_edev_actionable(struct eeh_dev *edev)
{
if (!edev->pdev)
return false;
if (edev->pdev->error_state == pci_channel_io_perm_failure)
return false;
if (eeh_dev_removed(edev))
return false;
if (eeh_pe_passed(edev->pe))
return false;
return true;
}
/**
* eeh_pcid_get - Get the PCI device driver
* @pdev: PCI device
*
* The function is used to retrieve the PCI device driver for
* the indicated PCI device. Besides, we will increase the reference
* of the PCI device driver to prevent that being unloaded on
* the fly. Otherwise, kernel crash would be seen.
*/
static inline struct pci_driver *eeh_pcid_get(struct pci_dev *pdev)
{
if (!pdev || !pdev->dev.driver)
return NULL;
if (!try_module_get(pdev->dev.driver->owner))
return NULL;
return to_pci_driver(pdev->dev.driver);
}
/**
* eeh_pcid_put - Dereference on the PCI device driver
* @pdev: PCI device
*
* The function is called to do dereference on the PCI device
* driver of the indicated PCI device.
*/
static inline void eeh_pcid_put(struct pci_dev *pdev)
{
if (!pdev || !pdev->dev.driver)
return;
module_put(pdev->dev.driver->owner);
}
/**
* eeh_disable_irq - Disable interrupt for the recovering device
* @dev: PCI device
*
* This routine must be called when reporting temporary or permanent
* error to the particular PCI device to disable interrupt of that
* device. If the device has enabled MSI or MSI-X interrupt, we needn't
* do real work because EEH should freeze DMA transfers for those PCI
* devices encountering EEH errors, which includes MSI or MSI-X.
*/
static void eeh_disable_irq(struct eeh_dev *edev)
{
/* Don't disable MSI and MSI-X interrupts. They are
* effectively disabled by the DMA Stopped state
* when an EEH error occurs.
*/
if (edev->pdev->msi_enabled || edev->pdev->msix_enabled)
return;
if (!irq_has_action(edev->pdev->irq))
return;
edev->mode |= EEH_DEV_IRQ_DISABLED;
disable_irq_nosync(edev->pdev->irq);
}
/**
* eeh_enable_irq - Enable interrupt for the recovering device
* @dev: PCI device
*
* This routine must be called to enable interrupt while failed
* device could be resumed.
*/
static void eeh_enable_irq(struct eeh_dev *edev)
{
if ((edev->mode) & EEH_DEV_IRQ_DISABLED) {
edev->mode &= ~EEH_DEV_IRQ_DISABLED;
/*
* FIXME !!!!!
*
* This is just ass backwards. This maze has
* unbalanced irq_enable/disable calls. So instead of
* finding the root cause it works around the warning
* in the irq_enable code by conditionally calling
* into it.
*
* That's just wrong.The warning in the core code is
* there to tell people to fix their asymmetries in
* their own code, not by abusing the core information
* to avoid it.
*
* I so wish that the assymetry would be the other way
* round and a few more irq_disable calls render that
* shit unusable forever.
*
* tglx
*/
if (irqd_irq_disabled(irq_get_irq_data(edev->pdev->irq)))
enable_irq(edev->pdev->irq);
}
}
static void eeh_dev_save_state(struct eeh_dev *edev, void *userdata)
{
struct pci_dev *pdev;
if (!edev)
return;
/*
* We cannot access the config space on some adapters.
* Otherwise, it will cause fenced PHB. We don't save
* the content in their config space and will restore
* from the initial config space saved when the EEH
* device is created.
*/
if (edev->pe && (edev->pe->state & EEH_PE_CFG_RESTRICTED))
return;
pdev = eeh_dev_to_pci_dev(edev);
if (!pdev)
return;
pci_save_state(pdev);
}
static void eeh_set_channel_state(struct eeh_pe *root, pci_channel_state_t s)
{
struct eeh_pe *pe;
struct eeh_dev *edev, *tmp;
eeh_for_each_pe(root, pe)
eeh_pe_for_each_dev(pe, edev, tmp)
if (eeh_edev_actionable(edev))
edev->pdev->error_state = s;
}
static void eeh_set_irq_state(struct eeh_pe *root, bool enable)
{
struct eeh_pe *pe;
struct eeh_dev *edev, *tmp;
eeh_for_each_pe(root, pe) {
eeh_pe_for_each_dev(pe, edev, tmp) {
if (!eeh_edev_actionable(edev))
continue;
if (!eeh_pcid_get(edev->pdev))
continue;
if (enable)
eeh_enable_irq(edev);
else
eeh_disable_irq(edev);
eeh_pcid_put(edev->pdev);
}
}
}
typedef enum pci_ers_result (*eeh_report_fn)(struct eeh_dev *,
struct pci_dev *,
struct pci_driver *);
static void eeh_pe_report_edev(struct eeh_dev *edev, eeh_report_fn fn,
enum pci_ers_result *result)
{
struct pci_dev *pdev;
struct pci_driver *driver;
enum pci_ers_result new_result;
pci_lock_rescan_remove();
pdev = edev->pdev;
if (pdev)
get_device(&pdev->dev);
pci_unlock_rescan_remove();
if (!pdev) {
eeh_edev_info(edev, "no device");
return;
}
device_lock(&pdev->dev);
if (eeh_edev_actionable(edev)) {
driver = eeh_pcid_get(pdev);
if (!driver)
eeh_edev_info(edev, "no driver");
else if (!driver->err_handler)
eeh_edev_info(edev, "driver not EEH aware");
else if (edev->mode & EEH_DEV_NO_HANDLER)
eeh_edev_info(edev, "driver bound too late");
else {
new_result = fn(edev, pdev, driver);
eeh_edev_info(edev, "%s driver reports: '%s'",
driver->name,
pci_ers_result_name(new_result));
if (result)
*result = pci_ers_merge_result(*result,
new_result);
}
if (driver)
eeh_pcid_put(pdev);
} else {
eeh_edev_info(edev, "not actionable (%d,%d,%d)", !!pdev,
!eeh_dev_removed(edev), !eeh_pe_passed(edev->pe));
}
device_unlock(&pdev->dev);
if (edev->pdev != pdev)
eeh_edev_warn(edev, "Device changed during processing!\n");
put_device(&pdev->dev);
}
static void eeh_pe_report(const char *name, struct eeh_pe *root,
eeh_report_fn fn, enum pci_ers_result *result)
{
struct eeh_pe *pe;
struct eeh_dev *edev, *tmp;
pr_info("EEH: Beginning: '%s'\n", name);
eeh_for_each_pe(root, pe) eeh_pe_for_each_dev(pe, edev, tmp)
eeh_pe_report_edev(edev, fn, result);
if (result)
pr_info("EEH: Finished:'%s' with aggregate recovery state:'%s'\n",
name, pci_ers_result_name(*result));
else
pr_info("EEH: Finished:'%s'", name);
}
/**
* eeh_report_error - Report pci error to each device driver
* @edev: eeh device
* @driver: device's PCI driver
*
* Report an EEH error to each device driver.
*/
static enum pci_ers_result eeh_report_error(struct eeh_dev *edev,
struct pci_dev *pdev,
struct pci_driver *driver)
{
enum pci_ers_result rc;
if (!driver->err_handler->error_detected)
return PCI_ERS_RESULT_NONE;
eeh_edev_info(edev, "Invoking %s->error_detected(IO frozen)",
driver->name);
rc = driver->err_handler->error_detected(pdev, pci_channel_io_frozen);
edev->in_error = true;
pci_uevent_ers(pdev, PCI_ERS_RESULT_NONE);
return rc;
}
/**
* eeh_report_mmio_enabled - Tell drivers that MMIO has been enabled
* @edev: eeh device
* @driver: device's PCI driver
*
* Tells each device driver that IO ports, MMIO and config space I/O
* are now enabled.
*/
static enum pci_ers_result eeh_report_mmio_enabled(struct eeh_dev *edev,
struct pci_dev *pdev,
struct pci_driver *driver)
{
if (!driver->err_handler->mmio_enabled)
return PCI_ERS_RESULT_NONE;
eeh_edev_info(edev, "Invoking %s->mmio_enabled()", driver->name);
return driver->err_handler->mmio_enabled(pdev);
}
/**
* eeh_report_reset - Tell device that slot has been reset
* @edev: eeh device
* @driver: device's PCI driver
*
* This routine must be called while EEH tries to reset particular
* PCI device so that the associated PCI device driver could take
* some actions, usually to save data the driver needs so that the
* driver can work again while the device is recovered.
*/
static enum pci_ers_result eeh_report_reset(struct eeh_dev *edev,
struct pci_dev *pdev,
struct pci_driver *driver)
{
if (!driver->err_handler->slot_reset || !edev->in_error)
return PCI_ERS_RESULT_NONE;
eeh_edev_info(edev, "Invoking %s->slot_reset()", driver->name);
return driver->err_handler->slot_reset(pdev);
}
static void eeh_dev_restore_state(struct eeh_dev *edev, void *userdata)
{
struct pci_dev *pdev;
if (!edev)
return;
/*
* The content in the config space isn't saved because
* the blocked config space on some adapters. We have
* to restore the initial saved config space when the
* EEH device is created.
*/
if (edev->pe && (edev->pe->state & EEH_PE_CFG_RESTRICTED)) {
if (list_is_last(&edev->entry, &edev->pe->edevs))
eeh_pe_restore_bars(edev->pe);
return;
}
pdev = eeh_dev_to_pci_dev(edev);
if (!pdev)
return;
pci_restore_state(pdev);
}
/**
* eeh_report_resume - Tell device to resume normal operations
* @edev: eeh device
* @driver: device's PCI driver
*
* This routine must be called to notify the device driver that it
* could resume so that the device driver can do some initialization
* to make the recovered device work again.
*/
static enum pci_ers_result eeh_report_resume(struct eeh_dev *edev,
struct pci_dev *pdev,
struct pci_driver *driver)
{
if (!driver->err_handler->resume || !edev->in_error)
return PCI_ERS_RESULT_NONE;
eeh_edev_info(edev, "Invoking %s->resume()", driver->name);
driver->err_handler->resume(pdev);
pci_uevent_ers(edev->pdev, PCI_ERS_RESULT_RECOVERED);
#ifdef CONFIG_PCI_IOV
if (eeh_ops->notify_resume)
eeh_ops->notify_resume(edev);
#endif
return PCI_ERS_RESULT_NONE;
}
/**
* eeh_report_failure - Tell device driver that device is dead.
* @edev: eeh device
* @driver: device's PCI driver
*
* This informs the device driver that the device is permanently
* dead, and that no further recovery attempts will be made on it.
*/
static enum pci_ers_result eeh_report_failure(struct eeh_dev *edev,
struct pci_dev *pdev,
struct pci_driver *driver)
{
enum pci_ers_result rc;
if (!driver->err_handler->error_detected)
return PCI_ERS_RESULT_NONE;
eeh_edev_info(edev, "Invoking %s->error_detected(permanent failure)",
driver->name);
rc = driver->err_handler->error_detected(pdev,
pci_channel_io_perm_failure);
pci_uevent_ers(pdev, PCI_ERS_RESULT_DISCONNECT);
return rc;
}
static void *eeh_add_virt_device(struct eeh_dev *edev)
{
struct pci_driver *driver;
struct pci_dev *dev = eeh_dev_to_pci_dev(edev);
if (!(edev->physfn)) {
eeh_edev_warn(edev, "Not for VF\n");
return NULL;
}
driver = eeh_pcid_get(dev);
if (driver) {
if (driver->err_handler) {
eeh_pcid_put(dev);
return NULL;
}
eeh_pcid_put(dev);
}
#ifdef CONFIG_PCI_IOV
pci_iov_add_virtfn(edev->physfn, edev->vf_index);
#endif
return NULL;
}
static void eeh_rmv_device(struct eeh_dev *edev, void *userdata)
{
struct pci_driver *driver;
struct pci_dev *dev = eeh_dev_to_pci_dev(edev);
struct eeh_rmv_data *rmv_data = (struct eeh_rmv_data *)userdata;
/*
* Actually, we should remove the PCI bridges as well.
* However, that's lots of complexity to do that,
* particularly some of devices under the bridge might
* support EEH. So we just care about PCI devices for
* simplicity here.
*/
if (!eeh_edev_actionable(edev) ||
(dev->hdr_type == PCI_HEADER_TYPE_BRIDGE))
return;
if (rmv_data) {
driver = eeh_pcid_get(dev);
if (driver) {
if (driver->err_handler &&
driver->err_handler->error_detected &&
driver->err_handler->slot_reset) {
eeh_pcid_put(dev);
return;
}
eeh_pcid_put(dev);
}
}
/* Remove it from PCI subsystem */
pr_info("EEH: Removing %s without EEH sensitive driver\n",
pci_name(dev));
edev->mode |= EEH_DEV_DISCONNECTED;
if (rmv_data)
rmv_data->removed_dev_count++;
if (edev->physfn) {
#ifdef CONFIG_PCI_IOV
pci_iov_remove_virtfn(edev->physfn, edev->vf_index);
edev->pdev = NULL;
#endif
if (rmv_data)
list_add(&edev->rmv_entry, &rmv_data->removed_vf_list);
} else {
pci_lock_rescan_remove();
pci_stop_and_remove_bus_device(dev);
pci_unlock_rescan_remove();
}
}
static void *eeh_pe_detach_dev(struct eeh_pe *pe, void *userdata)
{
struct eeh_dev *edev, *tmp;
eeh_pe_for_each_dev(pe, edev, tmp) {
if (!(edev->mode & EEH_DEV_DISCONNECTED))
continue;
edev->mode &= ~(EEH_DEV_DISCONNECTED | EEH_DEV_IRQ_DISABLED);
eeh_pe_tree_remove(edev);
}
return NULL;
}
/*
* Explicitly clear PE's frozen state for PowerNV where
* we have frozen PE until BAR restore is completed. It's
* harmless to clear it for pSeries. To be consistent with
* PE reset (for 3 times), we try to clear the frozen state
* for 3 times as well.
*/
static int eeh_clear_pe_frozen_state(struct eeh_pe *root, bool include_passed)
{
struct eeh_pe *pe;
int i;
eeh_for_each_pe(root, pe) {
if (include_passed || !eeh_pe_passed(pe)) {
for (i = 0; i < 3; i++)
if (!eeh_unfreeze_pe(pe))
break;
if (i >= 3)
return -EIO;
}
}
eeh_pe_state_clear(root, EEH_PE_ISOLATED, include_passed);
return 0;
}
int eeh_pe_reset_and_recover(struct eeh_pe *pe)
{
int ret;
/* Bail if the PE is being recovered */
if (pe->state & EEH_PE_RECOVERING)
return 0;
/* Put the PE into recovery mode */
eeh_pe_state_mark(pe, EEH_PE_RECOVERING);
/* Save states */
eeh_pe_dev_traverse(pe, eeh_dev_save_state, NULL);
/* Issue reset */
ret = eeh_pe_reset_full(pe, true);
if (ret) {
eeh_pe_state_clear(pe, EEH_PE_RECOVERING, true);
return ret;
}
/* Unfreeze the PE */
ret = eeh_clear_pe_frozen_state(pe, true);
if (ret) {
eeh_pe_state_clear(pe, EEH_PE_RECOVERING, true);
return ret;
}
/* Restore device state */
eeh_pe_dev_traverse(pe, eeh_dev_restore_state, NULL);
/* Clear recovery mode */
eeh_pe_state_clear(pe, EEH_PE_RECOVERING, true);
return 0;
}
/**
* eeh_reset_device - Perform actual reset of a pci slot
* @driver_eeh_aware: Does the device's driver provide EEH support?
* @pe: EEH PE
* @bus: PCI bus corresponding to the isolcated slot
* @rmv_data: Optional, list to record removed devices
*
* This routine must be called to do reset on the indicated PE.
* During the reset, udev might be invoked because those affected
* PCI devices will be removed and then added.
*/
static int eeh_reset_device(struct eeh_pe *pe, struct pci_bus *bus,
struct eeh_rmv_data *rmv_data,
bool driver_eeh_aware)
{
time64_t tstamp;
int cnt, rc;
struct eeh_dev *edev;
struct eeh_pe *tmp_pe;
bool any_passed = false;
eeh_for_each_pe(pe, tmp_pe)
any_passed |= eeh_pe_passed(tmp_pe);
/* pcibios will clear the counter; save the value */
cnt = pe->freeze_count;
tstamp = pe->tstamp;
/*
* We don't remove the corresponding PE instances because
* we need the information afterwords. The attached EEH
* devices are expected to be attached soon when calling
* into pci_hp_add_devices().
*/
eeh_pe_state_mark(pe, EEH_PE_KEEP);
if (any_passed || driver_eeh_aware || (pe->type & EEH_PE_VF)) {
eeh_pe_dev_traverse(pe, eeh_rmv_device, rmv_data);
} else {
pci_lock_rescan_remove();
pci_hp_remove_devices(bus);
pci_unlock_rescan_remove();
}
/*
* Reset the pci controller. (Asserts RST#; resets config space).
* Reconfigure bridges and devices. Don't try to bring the system
* up if the reset failed for some reason.
*
* During the reset, it's very dangerous to have uncontrolled PCI
* config accesses. So we prefer to block them. However, controlled
* PCI config accesses initiated from EEH itself are allowed.
*/
rc = eeh_pe_reset_full(pe, false);
if (rc)
return rc;
pci_lock_rescan_remove();
/* Restore PE */
eeh_ops->configure_bridge(pe);
eeh_pe_restore_bars(pe);
/* Clear frozen state */
rc = eeh_clear_pe_frozen_state(pe, false);
if (rc) {
pci_unlock_rescan_remove();
return rc;
}
/* Give the system 5 seconds to finish running the user-space
* hotplug shutdown scripts, e.g. ifdown for ethernet. Yes,
* this is a hack, but if we don't do this, and try to bring
* the device up before the scripts have taken it down,
* potentially weird things happen.
*/
if (!driver_eeh_aware || rmv_data->removed_dev_count) {
pr_info("EEH: Sleep 5s ahead of %s hotplug\n",
(driver_eeh_aware ? "partial" : "complete"));
ssleep(5);
/*
* The EEH device is still connected with its parent
* PE. We should disconnect it so the binding can be
* rebuilt when adding PCI devices.
*/
edev = list_first_entry(&pe->edevs, struct eeh_dev, entry);
eeh_pe_traverse(pe, eeh_pe_detach_dev, NULL);
if (pe->type & EEH_PE_VF) {
eeh_add_virt_device(edev);
} else {
if (!driver_eeh_aware)
eeh_pe_state_clear(pe, EEH_PE_PRI_BUS, true);
pci_hp_add_devices(bus);
}
}
eeh_pe_state_clear(pe, EEH_PE_KEEP, true);
pe->tstamp = tstamp;
pe->freeze_count = cnt;
pci_unlock_rescan_remove();
return 0;
}
/* The longest amount of time to wait for a pci device
* to come back on line, in seconds.
*/
#define MAX_WAIT_FOR_RECOVERY 300
/* Walks the PE tree after processing an event to remove any stale PEs.
*
* NB: This needs to be recursive to ensure the leaf PEs get removed
* before their parents do. Although this is possible to do recursively
* we don't since this is easier to read and we need to garantee
* the leaf nodes will be handled first.
*/
static void eeh_pe_cleanup(struct eeh_pe *pe)
{
struct eeh_pe *child_pe, *tmp;
list_for_each_entry_safe(child_pe, tmp, &pe->child_list, child)
eeh_pe_cleanup(child_pe);
if (pe->state & EEH_PE_KEEP)
return;
if (!(pe->state & EEH_PE_INVALID))
return;
if (list_empty(&pe->edevs) && list_empty(&pe->child_list)) {
list_del(&pe->child);
kfree(pe);
}
}
/**
* eeh_check_slot_presence - Check if a device is still present in a slot
* @pdev: pci_dev to check
*
* This function may return a false positive if we can't determine the slot's
* presence state. This might happen for PCIe slots if the PE containing
* the upstream bridge is also frozen, or the bridge is part of the same PE
* as the device.
*
* This shouldn't happen often, but you might see it if you hotplug a PCIe
* switch.
*/
static bool eeh_slot_presence_check(struct pci_dev *pdev)
{
const struct hotplug_slot_ops *ops;
struct pci_slot *slot;
u8 state;
int rc;
if (!pdev)
return false;
if (pdev->error_state == pci_channel_io_perm_failure)
return false;
slot = pdev->slot;
if (!slot || !slot->hotplug)
return true;
ops = slot->hotplug->ops;
if (!ops || !ops->get_adapter_status)
return true;
/* set the attention indicator while we've got the slot ops */
if (ops->set_attention_status)
ops->set_attention_status(slot->hotplug, 1);
rc = ops->get_adapter_status(slot->hotplug, &state);
if (rc)
return true;
return !!state;
}
static void eeh_clear_slot_attention(struct pci_dev *pdev)
{
const struct hotplug_slot_ops *ops;
struct pci_slot *slot;
if (!pdev)
return;
if (pdev->error_state == pci_channel_io_perm_failure)
return;
slot = pdev->slot;
if (!slot || !slot->hotplug)
return;
ops = slot->hotplug->ops;
if (!ops || !ops->set_attention_status)
return;
ops->set_attention_status(slot->hotplug, 0);
}
/**
* eeh_handle_normal_event - Handle EEH events on a specific PE
* @pe: EEH PE - which should not be used after we return, as it may
* have been invalidated.
*
* Attempts to recover the given PE. If recovery fails or the PE has failed
* too many times, remove the PE.
*
* While PHB detects address or data parity errors on particular PCI
* slot, the associated PE will be frozen. Besides, DMA's occurring
* to wild addresses (which usually happen due to bugs in device
* drivers or in PCI adapter firmware) can cause EEH error. #SERR,
* #PERR or other misc PCI-related errors also can trigger EEH errors.
*
* Recovery process consists of unplugging the device driver (which
* generated hotplug events to userspace), then issuing a PCI #RST to
* the device, then reconfiguring the PCI config space for all bridges
* & devices under this slot, and then finally restarting the device
* drivers (which cause a second set of hotplug events to go out to
* userspace).
*/
void eeh_handle_normal_event(struct eeh_pe *pe)
{
struct pci_bus *bus;
struct eeh_dev *edev, *tmp;
struct eeh_pe *tmp_pe;
int rc = 0;
enum pci_ers_result result = PCI_ERS_RESULT_NONE;
struct eeh_rmv_data rmv_data =
{LIST_HEAD_INIT(rmv_data.removed_vf_list), 0};
int devices = 0;
bus = eeh_pe_bus_get(pe);
if (!bus) {
pr_err("%s: Cannot find PCI bus for PHB#%x-PE#%x\n",
__func__, pe->phb->global_number, pe->addr);
return;
}
/*
* When devices are hot-removed we might get an EEH due to
* a driver attempting to touch the MMIO space of a removed
* device. In this case we don't have a device to recover
* so suppress the event if we can't find any present devices.
*
* The hotplug driver should take care of tearing down the
* device itself.
*/
eeh_for_each_pe(pe, tmp_pe)
eeh_pe_for_each_dev(tmp_pe, edev, tmp)
if (eeh_slot_presence_check(edev->pdev))
devices++;
if (!devices) {
pr_debug("EEH: Frozen PHB#%x-PE#%x is empty!\n",
pe->phb->global_number, pe->addr);
goto out; /* nothing to recover */
}
/* Log the event */
if (pe->type & EEH_PE_PHB) {
pr_err("EEH: Recovering PHB#%x, location: %s\n",
pe->phb->global_number, eeh_pe_loc_get(pe));
} else {
struct eeh_pe *phb_pe = eeh_phb_pe_get(pe->phb);
pr_err("EEH: Recovering PHB#%x-PE#%x\n",
pe->phb->global_number, pe->addr);
pr_err("EEH: PE location: %s, PHB location: %s\n",
eeh_pe_loc_get(pe), eeh_pe_loc_get(phb_pe));
}
#ifdef CONFIG_STACKTRACE
/*
* Print the saved stack trace now that we've verified there's
* something to recover.
*/
if (pe->trace_entries) {
void **ptrs = (void **) pe->stack_trace;
int i;
pr_err("EEH: Frozen PHB#%x-PE#%x detected\n",
pe->phb->global_number, pe->addr);
/* FIXME: Use the same format as dump_stack() */
pr_err("EEH: Call Trace:\n");
for (i = 0; i < pe->trace_entries; i++)
pr_err("EEH: [%pK] %pS\n", ptrs[i], ptrs[i]);
pe->trace_entries = 0;
}
#endif /* CONFIG_STACKTRACE */
eeh_for_each_pe(pe, tmp_pe)
eeh_pe_for_each_dev(tmp_pe, edev, tmp)
edev->mode &= ~EEH_DEV_NO_HANDLER;
eeh_pe_update_time_stamp(pe);
pe->freeze_count++;
if (pe->freeze_count > eeh_max_freezes) {
pr_err("EEH: PHB#%x-PE#%x has failed %d times in the last hour and has been permanently disabled.\n",
pe->phb->global_number, pe->addr,
pe->freeze_count);
goto recover_failed;
}
/* Walk the various device drivers attached to this slot through
* a reset sequence, giving each an opportunity to do what it needs
* to accomplish the reset. Each child gets a report of the
* status ... if any child can't handle the reset, then the entire
* slot is dlpar removed and added.
*
* When the PHB is fenced, we have to issue a reset to recover from
* the error. Override the result if necessary to have partially
* hotplug for this case.
*/
pr_warn("EEH: This PCI device has failed %d times in the last hour and will be permanently disabled after %d failures.\n",
pe->freeze_count, eeh_max_freezes);
pr_info("EEH: Notify device drivers to shutdown\n");
eeh_set_channel_state(pe, pci_channel_io_frozen);
eeh_set_irq_state(pe, false);
eeh_pe_report("error_detected(IO frozen)", pe,
eeh_report_error, &result);
if (result == PCI_ERS_RESULT_DISCONNECT)
goto recover_failed;
/*
* Error logged on a PHB are always fences which need a full
* PHB reset to clear so force that to happen.
*/
if ((pe->type & EEH_PE_PHB) && result != PCI_ERS_RESULT_NONE)
result = PCI_ERS_RESULT_NEED_RESET;
/* Get the current PCI slot state. This can take a long time,
* sometimes over 300 seconds for certain systems.
*/
rc = eeh_wait_state(pe, MAX_WAIT_FOR_RECOVERY * 1000);
if (rc < 0 || rc == EEH_STATE_NOT_SUPPORT) {
pr_warn("EEH: Permanent failure\n");
goto recover_failed;
}
/* Since rtas may enable MMIO when posting the error log,
* don't post the error log until after all dev drivers
* have been informed.
*/
pr_info("EEH: Collect temporary log\n");
eeh_slot_error_detail(pe, EEH_LOG_TEMP);
/* If all device drivers were EEH-unaware, then shut
* down all of the device drivers, and hope they
* go down willingly, without panicing the system.
*/
if (result == PCI_ERS_RESULT_NONE) {
pr_info("EEH: Reset with hotplug activity\n");
rc = eeh_reset_device(pe, bus, NULL, false);
if (rc) {
pr_warn("%s: Unable to reset, err=%d\n", __func__, rc);
goto recover_failed;
}
}
/* If all devices reported they can proceed, then re-enable MMIO */
if (result == PCI_ERS_RESULT_CAN_RECOVER) {
pr_info("EEH: Enable I/O for affected devices\n");
rc = eeh_pci_enable(pe, EEH_OPT_THAW_MMIO);
if (rc < 0)
goto recover_failed;
if (rc) {
result = PCI_ERS_RESULT_NEED_RESET;
} else {
pr_info("EEH: Notify device drivers to resume I/O\n");
eeh_pe_report("mmio_enabled", pe,
eeh_report_mmio_enabled, &result);
}
}
if (result == PCI_ERS_RESULT_CAN_RECOVER) {
pr_info("EEH: Enabled DMA for affected devices\n");
rc = eeh_pci_enable(pe, EEH_OPT_THAW_DMA);
if (rc < 0)
goto recover_failed;
if (rc) {
result = PCI_ERS_RESULT_NEED_RESET;
} else {
/*
* We didn't do PE reset for the case. The PE
* is still in frozen state. Clear it before
* resuming the PE.
*/
eeh_pe_state_clear(pe, EEH_PE_ISOLATED, true);
result = PCI_ERS_RESULT_RECOVERED;
}
}
/* If any device called out for a reset, then reset the slot */
if (result == PCI_ERS_RESULT_NEED_RESET) {
pr_info("EEH: Reset without hotplug activity\n");
rc = eeh_reset_device(pe, bus, &rmv_data, true);
if (rc) {
pr_warn("%s: Cannot reset, err=%d\n", __func__, rc);
goto recover_failed;
}
result = PCI_ERS_RESULT_NONE;
eeh_set_channel_state(pe, pci_channel_io_normal);
eeh_set_irq_state(pe, true);
eeh_pe_report("slot_reset", pe, eeh_report_reset,
&result);
}
if ((result == PCI_ERS_RESULT_RECOVERED) ||
(result == PCI_ERS_RESULT_NONE)) {
/*
* For those hot removed VFs, we should add back them after PF
* get recovered properly.
*/
list_for_each_entry_safe(edev, tmp, &rmv_data.removed_vf_list,
rmv_entry) {
eeh_add_virt_device(edev);
list_del(&edev->rmv_entry);
}
/* Tell all device drivers that they can resume operations */
pr_info("EEH: Notify device driver to resume\n");
eeh_set_channel_state(pe, pci_channel_io_normal);
eeh_set_irq_state(pe, true);
eeh_pe_report("resume", pe, eeh_report_resume, NULL);
eeh_for_each_pe(pe, tmp_pe) {
eeh_pe_for_each_dev(tmp_pe, edev, tmp) {
edev->mode &= ~EEH_DEV_NO_HANDLER;
edev->in_error = false;
}
}
pr_info("EEH: Recovery successful.\n");
goto out;
}
recover_failed:
/*
* About 90% of all real-life EEH failures in the field
* are due to poorly seated PCI cards. Only 10% or so are
* due to actual, failed cards.
*/
pr_err("EEH: Unable to recover from failure from PHB#%x-PE#%x.\n"
"Please try reseating or replacing it\n",
pe->phb->global_number, pe->addr);
eeh_slot_error_detail(pe, EEH_LOG_PERM);
/* Notify all devices that they're about to go down. */
eeh_set_irq_state(pe, false);
eeh_pe_report("error_detected(permanent failure)", pe,
eeh_report_failure, NULL);
eeh_set_channel_state(pe, pci_channel_io_perm_failure);
/* Mark the PE to be removed permanently */
eeh_pe_state_mark(pe, EEH_PE_REMOVED);
/*
* Shut down the device drivers for good. We mark
* all removed devices correctly to avoid access
* the their PCI config any more.
*/
if (pe->type & EEH_PE_VF) {
eeh_pe_dev_traverse(pe, eeh_rmv_device, NULL);
eeh_pe_dev_mode_mark(pe, EEH_DEV_REMOVED);
} else {
eeh_pe_state_clear(pe, EEH_PE_PRI_BUS, true);
eeh_pe_dev_mode_mark(pe, EEH_DEV_REMOVED);
pci_lock_rescan_remove();
pci_hp_remove_devices(bus);
pci_unlock_rescan_remove();
/* The passed PE should no longer be used */
return;
}
out:
/*
* Clean up any PEs without devices. While marked as EEH_PE_RECOVERYING
* we don't want to modify the PE tree structure so we do it here.
*/
eeh_pe_cleanup(pe);
/* clear the slot attention LED for all recovered devices */
eeh_for_each_pe(pe, tmp_pe)
eeh_pe_for_each_dev(tmp_pe, edev, tmp)
eeh_clear_slot_attention(edev->pdev);
eeh_pe_state_clear(pe, EEH_PE_RECOVERING, true);
}
/**
* eeh_handle_special_event - Handle EEH events without a specific failing PE
*
* Called when an EEH event is detected but can't be narrowed down to a
* specific PE. Iterates through possible failures and handles them as
* necessary.
*/
void eeh_handle_special_event(void)
{
struct eeh_pe *pe, *phb_pe, *tmp_pe;
struct eeh_dev *edev, *tmp_edev;
struct pci_bus *bus;
struct pci_controller *hose;
unsigned long flags;
int rc;
do {
rc = eeh_ops->next_error(&pe);
switch (rc) {
case EEH_NEXT_ERR_DEAD_IOC:
/* Mark all PHBs in dead state */
eeh_serialize_lock(&flags);
/* Purge all events */
eeh_remove_event(NULL, true);
list_for_each_entry(hose, &hose_list, list_node) {
phb_pe = eeh_phb_pe_get(hose);
if (!phb_pe) continue;
eeh_pe_mark_isolated(phb_pe);
}
eeh_serialize_unlock(flags);
break;
case EEH_NEXT_ERR_FROZEN_PE:
case EEH_NEXT_ERR_FENCED_PHB:
case EEH_NEXT_ERR_DEAD_PHB:
/* Mark the PE in fenced state */
eeh_serialize_lock(&flags);
/* Purge all events of the PHB */
eeh_remove_event(pe, true);
if (rc != EEH_NEXT_ERR_DEAD_PHB)
eeh_pe_state_mark(pe, EEH_PE_RECOVERING);
eeh_pe_mark_isolated(pe);
eeh_serialize_unlock(flags);
break;
case EEH_NEXT_ERR_NONE:
return;
default:
pr_warn("%s: Invalid value %d from next_error()\n",
__func__, rc);
return;
}
/*
* For fenced PHB and frozen PE, it's handled as normal
* event. We have to remove the affected PHBs for dead
* PHB and IOC
*/
if (rc == EEH_NEXT_ERR_FROZEN_PE ||
rc == EEH_NEXT_ERR_FENCED_PHB) {
eeh_pe_state_mark(pe, EEH_PE_RECOVERING);
eeh_handle_normal_event(pe);
} else {
eeh_for_each_pe(pe, tmp_pe)
eeh_pe_for_each_dev(tmp_pe, edev, tmp_edev)
edev->mode &= ~EEH_DEV_NO_HANDLER;
/* Notify all devices to be down */
eeh_pe_state_clear(pe, EEH_PE_PRI_BUS, true);
eeh_pe_report(
"error_detected(permanent failure)", pe,
eeh_report_failure, NULL);
eeh_set_channel_state(pe, pci_channel_io_perm_failure);
pci_lock_rescan_remove();
list_for_each_entry(hose, &hose_list, list_node) {
phb_pe = eeh_phb_pe_get(hose);
if (!phb_pe ||
!(phb_pe->state & EEH_PE_ISOLATED) ||
(phb_pe->state & EEH_PE_RECOVERING))
continue;
bus = eeh_pe_bus_get(phb_pe);
if (!bus) {
pr_err("%s: Cannot find PCI bus for "
"PHB#%x-PE#%x\n",
__func__,
pe->phb->global_number,
pe->addr);
break;
}
pci_hp_remove_devices(bus);
}
pci_unlock_rescan_remove();
}
/*
* If we have detected dead IOC, we needn't proceed
* any more since all PHBs would have been removed
*/
if (rc == EEH_NEXT_ERR_DEAD_IOC)
break;
} while (rc != EEH_NEXT_ERR_NONE);
}
| linux-master | arch/powerpc/kernel/eeh_driver.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* arch/powerpc/kernel/pmc.c
*
* Copyright (C) 2004 David Gibson, IBM Corporation.
* Includes code formerly from arch/ppc/kernel/perfmon.c:
* Author: Andy Fleming
* Copyright (c) 2004 Freescale Semiconductor, Inc
*/
#include <linux/errno.h>
#include <linux/bug.h>
#include <linux/spinlock.h>
#include <linux/export.h>
#include <asm/processor.h>
#include <asm/cputable.h>
#include <asm/pmc.h>
#ifndef MMCR0_PMAO
#define MMCR0_PMAO 0
#endif
static void dummy_perf(struct pt_regs *regs)
{
#if defined(CONFIG_FSL_EMB_PERFMON)
mtpmr(PMRN_PMGC0, mfpmr(PMRN_PMGC0) & ~PMGC0_PMIE);
#elif defined(CONFIG_PPC64) || defined(CONFIG_PPC_BOOK3S_32)
if (cur_cpu_spec->pmc_type == PPC_PMC_IBM)
mtspr(SPRN_MMCR0, mfspr(SPRN_MMCR0) & ~(MMCR0_PMXE|MMCR0_PMAO));
#else
mtspr(SPRN_MMCR0, mfspr(SPRN_MMCR0) & ~MMCR0_PMXE);
#endif
}
static DEFINE_RAW_SPINLOCK(pmc_owner_lock);
static void *pmc_owner_caller; /* mostly for debugging */
perf_irq_t perf_irq = dummy_perf;
int reserve_pmc_hardware(perf_irq_t new_perf_irq)
{
int err = 0;
raw_spin_lock(&pmc_owner_lock);
if (pmc_owner_caller) {
printk(KERN_WARNING "reserve_pmc_hardware: "
"PMC hardware busy (reserved by caller %p)\n",
pmc_owner_caller);
err = -EBUSY;
goto out;
}
pmc_owner_caller = __builtin_return_address(0);
perf_irq = new_perf_irq ? new_perf_irq : dummy_perf;
out:
raw_spin_unlock(&pmc_owner_lock);
return err;
}
EXPORT_SYMBOL_GPL(reserve_pmc_hardware);
void release_pmc_hardware(void)
{
raw_spin_lock(&pmc_owner_lock);
WARN_ON(! pmc_owner_caller);
pmc_owner_caller = NULL;
perf_irq = dummy_perf;
raw_spin_unlock(&pmc_owner_lock);
}
EXPORT_SYMBOL_GPL(release_pmc_hardware);
#ifdef CONFIG_PPC_BOOK3S_64
void power4_enable_pmcs(void)
{
unsigned long hid0;
hid0 = mfspr(SPRN_HID0);
hid0 |= 1UL << (63 - 20);
/* POWER4 requires the following sequence */
asm volatile(
"sync\n"
"mtspr %1, %0\n"
"mfspr %0, %1\n"
"mfspr %0, %1\n"
"mfspr %0, %1\n"
"mfspr %0, %1\n"
"mfspr %0, %1\n"
"mfspr %0, %1\n"
"isync" : "=&r" (hid0) : "i" (SPRN_HID0), "0" (hid0):
"memory");
}
#endif /* CONFIG_PPC64 */
| linux-master | arch/powerpc/kernel/pmc.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Routines for tracking a legacy ISA bridge
*
* Copyrigh 2007 Benjamin Herrenschmidt <[email protected]>, IBM Corp.
*
* Some bits and pieces moved over from pci_64.c
*
* Copyrigh 2003 Anton Blanchard <[email protected]>, IBM Corp.
*/
#define DEBUG
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/string.h>
#include <linux/export.h>
#include <linux/init.h>
#include <linux/mm.h>
#include <linux/notifier.h>
#include <linux/of_address.h>
#include <linux/vmalloc.h>
#include <asm/processor.h>
#include <asm/io.h>
#include <asm/pci-bridge.h>
#include <asm/machdep.h>
#include <asm/ppc-pci.h>
#include <asm/isa-bridge.h>
unsigned long isa_io_base; /* NULL if no ISA bus */
EXPORT_SYMBOL(isa_io_base);
/* Cached ISA bridge dev. */
static struct device_node *isa_bridge_devnode;
struct pci_dev *isa_bridge_pcidev;
EXPORT_SYMBOL_GPL(isa_bridge_pcidev);
#define ISA_SPACE_MASK 0x1
#define ISA_SPACE_IO 0x1
static void remap_isa_base(phys_addr_t pa, unsigned long size)
{
WARN_ON_ONCE(ISA_IO_BASE & ~PAGE_MASK);
WARN_ON_ONCE(pa & ~PAGE_MASK);
WARN_ON_ONCE(size & ~PAGE_MASK);
if (slab_is_available()) {
if (ioremap_page_range(ISA_IO_BASE, ISA_IO_BASE + size, pa,
pgprot_noncached(PAGE_KERNEL)))
vunmap_range(ISA_IO_BASE, ISA_IO_BASE + size);
} else {
early_ioremap_range(ISA_IO_BASE, pa, size,
pgprot_noncached(PAGE_KERNEL));
}
}
static int process_ISA_OF_ranges(struct device_node *isa_node,
unsigned long phb_io_base_phys)
{
unsigned int size;
struct of_range_parser parser;
struct of_range range;
if (of_range_parser_init(&parser, isa_node))
goto inval_range;
for_each_of_range(&parser, &range) {
if ((range.flags & ISA_SPACE_MASK) != ISA_SPACE_IO)
continue;
if (range.cpu_addr == OF_BAD_ADDR) {
pr_err("ISA: Bad CPU mapping: %s\n", __func__);
return -EINVAL;
}
/* We need page alignment */
if ((range.bus_addr & ~PAGE_MASK) || (range.cpu_addr & ~PAGE_MASK)) {
pr_warn("ISA: bridge %pOF has non aligned IO range\n", isa_node);
return -EINVAL;
}
/* Align size and make sure it's cropped to 64K */
size = PAGE_ALIGN(range.size);
if (size > 0x10000)
size = 0x10000;
if (!phb_io_base_phys)
phb_io_base_phys = range.cpu_addr;
remap_isa_base(phb_io_base_phys, size);
return 0;
}
inval_range:
if (phb_io_base_phys) {
pr_err("no ISA IO ranges or unexpected isa range, mapping 64k\n");
remap_isa_base(phb_io_base_phys, 0x10000);
return 0;
}
return -EINVAL;
}
/**
* isa_bridge_find_early - Find and map the ISA IO space early before
* main PCI discovery. This is optionally called by
* the arch code when adding PCI PHBs to get early
* access to ISA IO ports
*/
void __init isa_bridge_find_early(struct pci_controller *hose)
{
struct device_node *np, *parent = NULL, *tmp;
/* If we already have an ISA bridge, bail off */
if (isa_bridge_devnode != NULL)
return;
/* For each "isa" node in the system. Note : we do a search by
* type and not by name. It might be better to do by name but that's
* what the code used to do and I don't want to break too much at
* once. We can look into changing that separately
*/
for_each_node_by_type(np, "isa") {
/* Look for our hose being a parent */
for (parent = of_get_parent(np); parent;) {
if (parent == hose->dn) {
of_node_put(parent);
break;
}
tmp = parent;
parent = of_get_parent(parent);
of_node_put(tmp);
}
if (parent != NULL)
break;
}
if (np == NULL)
return;
isa_bridge_devnode = np;
/* Now parse the "ranges" property and setup the ISA mapping */
process_ISA_OF_ranges(np, hose->io_base_phys);
/* Set the global ISA io base to indicate we have an ISA bridge */
isa_io_base = ISA_IO_BASE;
pr_debug("ISA bridge (early) is %pOF\n", np);
}
/**
* isa_bridge_find_early - Find and map the ISA IO space early before
* main PCI discovery. This is optionally called by
* the arch code when adding PCI PHBs to get early
* access to ISA IO ports
*/
void __init isa_bridge_init_non_pci(struct device_node *np)
{
int ret;
/* If we already have an ISA bridge, bail off */
if (isa_bridge_devnode != NULL)
return;
ret = process_ISA_OF_ranges(np, 0);
if (ret)
return;
/* Got it */
isa_bridge_devnode = np;
/* Set the global ISA io base to indicate we have an ISA bridge
* and map it
*/
isa_io_base = ISA_IO_BASE;
pr_debug("ISA: Non-PCI bridge is %pOF\n", np);
}
/**
* isa_bridge_find_late - Find and map the ISA IO space upon discovery of
* a new ISA bridge
*/
static void isa_bridge_find_late(struct pci_dev *pdev,
struct device_node *devnode)
{
struct pci_controller *hose = pci_bus_to_host(pdev->bus);
/* Store ISA device node and PCI device */
isa_bridge_devnode = of_node_get(devnode);
isa_bridge_pcidev = pdev;
/* Now parse the "ranges" property and setup the ISA mapping */
process_ISA_OF_ranges(devnode, hose->io_base_phys);
/* Set the global ISA io base to indicate we have an ISA bridge */
isa_io_base = ISA_IO_BASE;
pr_debug("ISA bridge (late) is %pOF on %s\n",
devnode, pci_name(pdev));
}
/**
* isa_bridge_remove - Remove/unmap an ISA bridge
*/
static void isa_bridge_remove(void)
{
pr_debug("ISA bridge removed !\n");
/* Clear the global ISA io base to indicate that we have no more
* ISA bridge. Note that drivers don't quite handle that, though
* we should probably do something about it. But do we ever really
* have ISA bridges being removed on machines using legacy devices ?
*/
isa_io_base = ISA_IO_BASE;
/* Clear references to the bridge */
of_node_put(isa_bridge_devnode);
isa_bridge_devnode = NULL;
isa_bridge_pcidev = NULL;
/* Unmap the ISA area */
vunmap_range(ISA_IO_BASE, ISA_IO_BASE + 0x10000);
}
/**
* isa_bridge_notify - Get notified of PCI devices addition/removal
*/
static int isa_bridge_notify(struct notifier_block *nb, unsigned long action,
void *data)
{
struct device *dev = data;
struct pci_dev *pdev = to_pci_dev(dev);
struct device_node *devnode = pci_device_to_OF_node(pdev);
switch(action) {
case BUS_NOTIFY_ADD_DEVICE:
/* Check if we have an early ISA device, without PCI dev */
if (isa_bridge_devnode && isa_bridge_devnode == devnode &&
!isa_bridge_pcidev) {
pr_debug("ISA bridge PCI attached: %s\n",
pci_name(pdev));
isa_bridge_pcidev = pdev;
}
/* Check if we have no ISA device, and this happens to be one,
* register it as such if it has an OF device
*/
if (!isa_bridge_devnode && of_node_is_type(devnode, "isa"))
isa_bridge_find_late(pdev, devnode);
return 0;
case BUS_NOTIFY_DEL_DEVICE:
/* Check if this our existing ISA device */
if (pdev == isa_bridge_pcidev ||
(devnode && devnode == isa_bridge_devnode))
isa_bridge_remove();
return 0;
}
return 0;
}
static struct notifier_block isa_bridge_notifier = {
.notifier_call = isa_bridge_notify
};
/**
* isa_bridge_init - register to be notified of ISA bridge addition/removal
*
*/
static int __init isa_bridge_init(void)
{
bus_register_notifier(&pci_bus_type, &isa_bridge_notifier);
return 0;
}
arch_initcall(isa_bridge_init);
| linux-master | arch/powerpc/kernel/isa-bridge.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Common pmac/prep/chrp pci routines. -- Cort
*/
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/delay.h>
#include <linux/string.h>
#include <linux/init.h>
#include <linux/capability.h>
#include <linux/sched.h>
#include <linux/errno.h>
#include <linux/memblock.h>
#include <linux/syscalls.h>
#include <linux/irq.h>
#include <linux/list.h>
#include <linux/of.h>
#include <linux/slab.h>
#include <linux/export.h>
#include <asm/processor.h>
#include <asm/io.h>
#include <asm/sections.h>
#include <asm/pci-bridge.h>
#include <asm/ppc-pci.h>
#include <asm/byteorder.h>
#include <linux/uaccess.h>
#include <asm/machdep.h>
#undef DEBUG
unsigned long isa_io_base = 0;
unsigned long pci_dram_offset = 0;
int pcibios_assign_bus_offset = 1;
EXPORT_SYMBOL(isa_io_base);
EXPORT_SYMBOL(pci_dram_offset);
static void fixup_cpc710_pci64(struct pci_dev* dev);
/* By default, we don't re-assign bus numbers. We do this only on
* some pmacs
*/
static int pci_assign_all_buses;
/* This will remain NULL for now, until isa-bridge.c is made common
* to both 32-bit and 64-bit.
*/
struct pci_dev *isa_bridge_pcidev;
EXPORT_SYMBOL_GPL(isa_bridge_pcidev);
static void
fixup_cpc710_pci64(struct pci_dev* dev)
{
/* Hide the PCI64 BARs from the kernel as their content doesn't
* fit well in the resource management
*/
dev->resource[0].start = dev->resource[0].end = 0;
dev->resource[0].flags = 0;
dev->resource[1].start = dev->resource[1].end = 0;
dev->resource[1].flags = 0;
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CPC710_PCI64, fixup_cpc710_pci64);
#ifdef CONFIG_PPC_PCI_OF_BUS_MAP
static u8* pci_to_OF_bus_map;
static int pci_bus_count;
/*
* Functions below are used on OpenFirmware machines.
*/
static void
make_one_node_map(struct device_node* node, u8 pci_bus)
{
const int *bus_range;
int len;
if (pci_bus >= pci_bus_count)
return;
bus_range = of_get_property(node, "bus-range", &len);
if (bus_range == NULL || len < 2 * sizeof(int)) {
printk(KERN_WARNING "Can't get bus-range for %pOF, "
"assuming it starts at 0\n", node);
pci_to_OF_bus_map[pci_bus] = 0;
} else
pci_to_OF_bus_map[pci_bus] = bus_range[0];
for_each_child_of_node(node, node) {
struct pci_dev* dev;
const unsigned int *class_code, *reg;
class_code = of_get_property(node, "class-code", NULL);
if (!class_code || ((*class_code >> 8) != PCI_CLASS_BRIDGE_PCI &&
(*class_code >> 8) != PCI_CLASS_BRIDGE_CARDBUS))
continue;
reg = of_get_property(node, "reg", NULL);
if (!reg)
continue;
dev = pci_get_domain_bus_and_slot(0, pci_bus,
((reg[0] >> 8) & 0xff));
if (!dev || !dev->subordinate) {
pci_dev_put(dev);
continue;
}
make_one_node_map(node, dev->subordinate->number);
pci_dev_put(dev);
}
}
static void __init
pcibios_make_OF_bus_map(void)
{
int i;
struct pci_controller *hose, *tmp;
struct property *map_prop;
struct device_node *dn;
pci_to_OF_bus_map = kmalloc(pci_bus_count, GFP_KERNEL);
if (!pci_to_OF_bus_map) {
printk(KERN_ERR "Can't allocate OF bus map !\n");
return;
}
/* We fill the bus map with invalid values, that helps
* debugging.
*/
for (i=0; i<pci_bus_count; i++)
pci_to_OF_bus_map[i] = 0xff;
/* For each hose, we begin searching bridges */
list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
struct device_node* node = hose->dn;
if (!node)
continue;
make_one_node_map(node, hose->first_busno);
}
dn = of_find_node_by_path("/");
map_prop = of_find_property(dn, "pci-OF-bus-map", NULL);
if (map_prop) {
BUG_ON(pci_bus_count > map_prop->length);
memcpy(map_prop->value, pci_to_OF_bus_map, pci_bus_count);
}
of_node_put(dn);
#ifdef DEBUG
printk("PCI->OF bus map:\n");
for (i=0; i<pci_bus_count; i++) {
if (pci_to_OF_bus_map[i] == 0xff)
continue;
printk("%d -> %d\n", i, pci_to_OF_bus_map[i]);
}
#endif
}
#endif // CONFIG_PPC_PCI_OF_BUS_MAP
#ifdef CONFIG_PPC_PMAC
/*
* Returns the PCI device matching a given OF node
*/
int pci_device_from_OF_node(struct device_node *node, u8 *bus, u8 *devfn)
{
#ifdef CONFIG_PPC_PCI_OF_BUS_MAP
struct pci_dev *dev = NULL;
#endif
const __be32 *reg;
int size;
/* Check if it might have a chance to be a PCI device */
if (!pci_find_hose_for_OF_device(node))
return -ENODEV;
reg = of_get_property(node, "reg", &size);
if (!reg || size < 5 * sizeof(u32))
return -ENODEV;
*bus = (be32_to_cpup(®[0]) >> 16) & 0xff;
*devfn = (be32_to_cpup(®[0]) >> 8) & 0xff;
#ifndef CONFIG_PPC_PCI_OF_BUS_MAP
return 0;
#else
/* Ok, here we need some tweak. If we have already renumbered
* all busses, we can't rely on the OF bus number any more.
* the pci_to_OF_bus_map is not enough as several PCI busses
* may match the same OF bus number.
*/
if (!pci_to_OF_bus_map)
return 0;
for_each_pci_dev(dev)
if (pci_to_OF_bus_map[dev->bus->number] == *bus &&
dev->devfn == *devfn) {
*bus = dev->bus->number;
pci_dev_put(dev);
return 0;
}
return -ENODEV;
#endif // CONFIG_PPC_PCI_OF_BUS_MAP
}
EXPORT_SYMBOL(pci_device_from_OF_node);
#endif
#ifdef CONFIG_PPC_PCI_OF_BUS_MAP
/* We create the "pci-OF-bus-map" property now so it appears in the
* /proc device tree
*/
void __init
pci_create_OF_bus_map(void)
{
struct property* of_prop;
struct device_node *dn;
of_prop = memblock_alloc(sizeof(struct property) + 256,
SMP_CACHE_BYTES);
if (!of_prop)
panic("%s: Failed to allocate %zu bytes\n", __func__,
sizeof(struct property) + 256);
dn = of_find_node_by_path("/");
if (dn) {
memset(of_prop, -1, sizeof(struct property) + 256);
of_prop->name = "pci-OF-bus-map";
of_prop->length = 256;
of_prop->value = &of_prop[1];
of_add_property(dn, of_prop);
of_node_put(dn);
}
}
#endif // CONFIG_PPC_PCI_OF_BUS_MAP
void pcibios_setup_phb_io_space(struct pci_controller *hose)
{
unsigned long io_offset;
struct resource *res = &hose->io_resource;
/* Fixup IO space offset */
io_offset = pcibios_io_space_offset(hose);
res->start += io_offset;
res->end += io_offset;
}
static int __init pcibios_init(void)
{
struct pci_controller *hose, *tmp;
#ifndef CONFIG_PPC_PCI_BUS_NUM_DOMAIN_DEPENDENT
int next_busno = 0;
#endif
printk(KERN_INFO "PCI: Probing PCI hardware\n");
#ifdef CONFIG_PPC_PCI_BUS_NUM_DOMAIN_DEPENDENT
/*
* Enable PCI domains in /proc when PCI bus numbers are not unique
* across all PCI domains to prevent conflicts. And keep PCI domain 0
* backward compatible in /proc for video cards.
*/
pci_add_flags(PCI_ENABLE_PROC_DOMAINS | PCI_COMPAT_DOMAIN_0);
#endif
if (pci_has_flag(PCI_REASSIGN_ALL_BUS))
pci_assign_all_buses = 1;
/* Scan all of the recorded PCI controllers. */
list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
#ifndef CONFIG_PPC_PCI_BUS_NUM_DOMAIN_DEPENDENT
if (pci_assign_all_buses)
hose->first_busno = next_busno;
#endif
hose->last_busno = 0xff;
pcibios_scan_phb(hose);
pci_bus_add_devices(hose->bus);
#ifndef CONFIG_PPC_PCI_BUS_NUM_DOMAIN_DEPENDENT
if (pci_assign_all_buses || next_busno <= hose->last_busno)
next_busno = hose->last_busno + pcibios_assign_bus_offset;
#endif
}
#if defined(CONFIG_PPC_PMAC) || defined(CONFIG_PPC_CHRP)
#ifdef CONFIG_PPC_PCI_OF_BUS_MAP
pci_bus_count = next_busno;
/* OpenFirmware based machines need a map of OF bus
* numbers vs. kernel bus numbers since we may have to
* remap them.
*/
if (pci_assign_all_buses)
pcibios_make_OF_bus_map();
#endif
#endif
/* Call common code to handle resource allocation */
pcibios_resource_survey();
/* Call machine dependent fixup */
if (ppc_md.pcibios_fixup)
ppc_md.pcibios_fixup();
/* Call machine dependent post-init code */
if (ppc_md.pcibios_after_init)
ppc_md.pcibios_after_init();
return 0;
}
subsys_initcall(pcibios_init);
static struct pci_controller*
pci_bus_to_hose(int bus)
{
struct pci_controller *hose, *tmp;
list_for_each_entry_safe(hose, tmp, &hose_list, list_node)
if (bus >= hose->first_busno && bus <= hose->last_busno)
return hose;
return NULL;
}
/* Provide information on locations of various I/O regions in physical
* memory. Do this on a per-card basis so that we choose the right
* root bridge.
* Note that the returned IO or memory base is a physical address
*/
SYSCALL_DEFINE3(pciconfig_iobase, long, which,
unsigned long, bus, unsigned long, devfn)
{
struct pci_controller* hose;
long result = -EOPNOTSUPP;
hose = pci_bus_to_hose(bus);
if (!hose)
return -ENODEV;
switch (which) {
case IOBASE_BRIDGE_NUMBER:
return (long)hose->first_busno;
case IOBASE_MEMORY:
return (long)hose->mem_offset[0];
case IOBASE_IO:
return (long)hose->io_base_phys;
case IOBASE_ISA_IO:
return (long)isa_io_base;
case IOBASE_ISA_MEM:
return (long)isa_mem_base;
}
return result;
}
| linux-master | arch/powerpc/kernel/pci_32.c |
// SPDX-License-Identifier: GPL-2.0-or-later
#include <linux/context_tracking.h>
#include <linux/err.h>
#include <linux/compat.h>
#include <linux/sched/debug.h> /* for show_regs */
#include <asm/kup.h>
#include <asm/cputime.h>
#include <asm/hw_irq.h>
#include <asm/interrupt.h>
#include <asm/kprobes.h>
#include <asm/paca.h>
#include <asm/ptrace.h>
#include <asm/reg.h>
#include <asm/signal.h>
#include <asm/switch_to.h>
#include <asm/syscall.h>
#include <asm/time.h>
#include <asm/tm.h>
#include <asm/unistd.h>
#if defined(CONFIG_PPC_ADV_DEBUG_REGS) && defined(CONFIG_PPC32)
unsigned long global_dbcr0[NR_CPUS];
#endif
#ifdef CONFIG_PPC_BOOK3S_64
DEFINE_STATIC_KEY_FALSE(interrupt_exit_not_reentrant);
static inline bool exit_must_hard_disable(void)
{
return static_branch_unlikely(&interrupt_exit_not_reentrant);
}
#else
static inline bool exit_must_hard_disable(void)
{
return true;
}
#endif
/*
* local irqs must be disabled. Returns false if the caller must re-enable
* them, check for new work, and try again.
*
* This should be called with local irqs disabled, but if they were previously
* enabled when the interrupt handler returns (indicating a process-context /
* synchronous interrupt) then irqs_enabled should be true.
*
* restartable is true then EE/RI can be left on because interrupts are handled
* with a restart sequence.
*/
static notrace __always_inline bool prep_irq_for_enabled_exit(bool restartable)
{
bool must_hard_disable = (exit_must_hard_disable() || !restartable);
/* This must be done with RI=1 because tracing may touch vmaps */
trace_hardirqs_on();
if (must_hard_disable)
__hard_EE_RI_disable();
#ifdef CONFIG_PPC64
/* This pattern matches prep_irq_for_idle */
if (unlikely(lazy_irq_pending_nocheck())) {
if (must_hard_disable) {
local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
__hard_RI_enable();
}
trace_hardirqs_off();
return false;
}
#endif
return true;
}
static notrace void booke_load_dbcr0(void)
{
#ifdef CONFIG_PPC_ADV_DEBUG_REGS
unsigned long dbcr0 = current->thread.debug.dbcr0;
if (likely(!(dbcr0 & DBCR0_IDM)))
return;
/*
* Check to see if the dbcr0 register is set up to debug.
* Use the internal debug mode bit to do this.
*/
mtmsr(mfmsr() & ~MSR_DE);
if (IS_ENABLED(CONFIG_PPC32)) {
isync();
global_dbcr0[smp_processor_id()] = mfspr(SPRN_DBCR0);
}
mtspr(SPRN_DBCR0, dbcr0);
mtspr(SPRN_DBSR, -1);
#endif
}
static notrace void check_return_regs_valid(struct pt_regs *regs)
{
#ifdef CONFIG_PPC_BOOK3S_64
unsigned long trap, srr0, srr1;
static bool warned;
u8 *validp;
char *h;
if (trap_is_scv(regs))
return;
trap = TRAP(regs);
// EE in HV mode sets HSRRs like 0xea0
if (cpu_has_feature(CPU_FTR_HVMODE) && trap == INTERRUPT_EXTERNAL)
trap = 0xea0;
switch (trap) {
case 0x980:
case INTERRUPT_H_DATA_STORAGE:
case 0xe20:
case 0xe40:
case INTERRUPT_HMI:
case 0xe80:
case 0xea0:
case INTERRUPT_H_FAC_UNAVAIL:
case 0x1200:
case 0x1500:
case 0x1600:
case 0x1800:
validp = &local_paca->hsrr_valid;
if (!READ_ONCE(*validp))
return;
srr0 = mfspr(SPRN_HSRR0);
srr1 = mfspr(SPRN_HSRR1);
h = "H";
break;
default:
validp = &local_paca->srr_valid;
if (!READ_ONCE(*validp))
return;
srr0 = mfspr(SPRN_SRR0);
srr1 = mfspr(SPRN_SRR1);
h = "";
break;
}
if (srr0 == regs->nip && srr1 == regs->msr)
return;
/*
* A NMI / soft-NMI interrupt may have come in after we found
* srr_valid and before the SRRs are loaded. The interrupt then
* comes in and clobbers SRRs and clears srr_valid. Then we load
* the SRRs here and test them above and find they don't match.
*
* Test validity again after that, to catch such false positives.
*
* This test in general will have some window for false negatives
* and may not catch and fix all such cases if an NMI comes in
* later and clobbers SRRs without clearing srr_valid, but hopefully
* such things will get caught most of the time, statistically
* enough to be able to get a warning out.
*/
if (!READ_ONCE(*validp))
return;
if (!data_race(warned)) {
data_race(warned = true);
printk("%sSRR0 was: %lx should be: %lx\n", h, srr0, regs->nip);
printk("%sSRR1 was: %lx should be: %lx\n", h, srr1, regs->msr);
show_regs(regs);
}
WRITE_ONCE(*validp, 0); /* fixup */
#endif
}
static notrace unsigned long
interrupt_exit_user_prepare_main(unsigned long ret, struct pt_regs *regs)
{
unsigned long ti_flags;
again:
ti_flags = read_thread_flags();
while (unlikely(ti_flags & (_TIF_USER_WORK_MASK & ~_TIF_RESTORE_TM))) {
local_irq_enable();
if (ti_flags & _TIF_NEED_RESCHED) {
schedule();
} else {
/*
* SIGPENDING must restore signal handler function
* argument GPRs, and some non-volatiles (e.g., r1).
* Restore all for now. This could be made lighter.
*/
if (ti_flags & _TIF_SIGPENDING)
ret |= _TIF_RESTOREALL;
do_notify_resume(regs, ti_flags);
}
local_irq_disable();
ti_flags = read_thread_flags();
}
if (IS_ENABLED(CONFIG_PPC_BOOK3S_64) && IS_ENABLED(CONFIG_PPC_FPU)) {
if (IS_ENABLED(CONFIG_PPC_TRANSACTIONAL_MEM) &&
unlikely((ti_flags & _TIF_RESTORE_TM))) {
restore_tm_state(regs);
} else {
unsigned long mathflags = MSR_FP;
if (cpu_has_feature(CPU_FTR_VSX))
mathflags |= MSR_VEC | MSR_VSX;
else if (cpu_has_feature(CPU_FTR_ALTIVEC))
mathflags |= MSR_VEC;
/*
* If userspace MSR has all available FP bits set,
* then they are live and no need to restore. If not,
* it means the regs were given up and restore_math
* may decide to restore them (to avoid taking an FP
* fault).
*/
if ((regs->msr & mathflags) != mathflags)
restore_math(regs);
}
}
check_return_regs_valid(regs);
user_enter_irqoff();
if (!prep_irq_for_enabled_exit(true)) {
user_exit_irqoff();
local_irq_enable();
local_irq_disable();
goto again;
}
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
local_paca->tm_scratch = regs->msr;
#endif
booke_load_dbcr0();
account_cpu_user_exit();
/* Restore user access locks last */
kuap_user_restore(regs);
return ret;
}
/*
* This should be called after a syscall returns, with r3 the return value
* from the syscall. If this function returns non-zero, the system call
* exit assembly should additionally load all GPR registers and CTR and XER
* from the interrupt frame.
*
* The function graph tracer can not trace the return side of this function,
* because RI=0 and soft mask state is "unreconciled", so it is marked notrace.
*/
notrace unsigned long syscall_exit_prepare(unsigned long r3,
struct pt_regs *regs,
long scv)
{
unsigned long ti_flags;
unsigned long ret = 0;
bool is_not_scv = !IS_ENABLED(CONFIG_PPC_BOOK3S_64) || !scv;
CT_WARN_ON(ct_state() == CONTEXT_USER);
kuap_assert_locked();
regs->result = r3;
/* Check whether the syscall is issued inside a restartable sequence */
rseq_syscall(regs);
ti_flags = read_thread_flags();
if (unlikely(r3 >= (unsigned long)-MAX_ERRNO) && is_not_scv) {
if (likely(!(ti_flags & (_TIF_NOERROR | _TIF_RESTOREALL)))) {
r3 = -r3;
regs->ccr |= 0x10000000; /* Set SO bit in CR */
}
}
if (unlikely(ti_flags & _TIF_PERSYSCALL_MASK)) {
if (ti_flags & _TIF_RESTOREALL)
ret = _TIF_RESTOREALL;
else
regs->gpr[3] = r3;
clear_bits(_TIF_PERSYSCALL_MASK, ¤t_thread_info()->flags);
} else {
regs->gpr[3] = r3;
}
if (unlikely(ti_flags & _TIF_SYSCALL_DOTRACE)) {
do_syscall_trace_leave(regs);
ret |= _TIF_RESTOREALL;
}
local_irq_disable();
ret = interrupt_exit_user_prepare_main(ret, regs);
#ifdef CONFIG_PPC64
regs->exit_result = ret;
#endif
return ret;
}
#ifdef CONFIG_PPC64
notrace unsigned long syscall_exit_restart(unsigned long r3, struct pt_regs *regs)
{
/*
* This is called when detecting a soft-pending interrupt as well as
* an alternate-return interrupt. So we can't just have the alternate
* return path clear SRR1[MSR] and set PACA_IRQ_HARD_DIS (unless
* the soft-pending case were to fix things up as well). RI might be
* disabled, in which case it gets re-enabled by __hard_irq_disable().
*/
__hard_irq_disable();
local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
#ifdef CONFIG_PPC_BOOK3S_64
set_kuap(AMR_KUAP_BLOCKED);
#endif
trace_hardirqs_off();
user_exit_irqoff();
account_cpu_user_entry();
BUG_ON(!user_mode(regs));
regs->exit_result = interrupt_exit_user_prepare_main(regs->exit_result, regs);
return regs->exit_result;
}
#endif
notrace unsigned long interrupt_exit_user_prepare(struct pt_regs *regs)
{
unsigned long ret;
BUG_ON(regs_is_unrecoverable(regs));
BUG_ON(arch_irq_disabled_regs(regs));
CT_WARN_ON(ct_state() == CONTEXT_USER);
/*
* We don't need to restore AMR on the way back to userspace for KUAP.
* AMR can only have been unlocked if we interrupted the kernel.
*/
kuap_assert_locked();
local_irq_disable();
ret = interrupt_exit_user_prepare_main(0, regs);
#ifdef CONFIG_PPC64
regs->exit_result = ret;
#endif
return ret;
}
void preempt_schedule_irq(void);
notrace unsigned long interrupt_exit_kernel_prepare(struct pt_regs *regs)
{
unsigned long ret = 0;
unsigned long kuap;
bool stack_store = read_thread_flags() & _TIF_EMULATE_STACK_STORE;
if (regs_is_unrecoverable(regs))
unrecoverable_exception(regs);
/*
* CT_WARN_ON comes here via program_check_exception, so avoid
* recursion.
*
* Skip the assertion on PMIs on 64e to work around a problem caused
* by NMI PMIs incorrectly taking this interrupt return path, it's
* possible for this to hit after interrupt exit to user switches
* context to user. See also the comment in the performance monitor
* handler in exceptions-64e.S
*/
if (!IS_ENABLED(CONFIG_PPC_BOOK3E_64) &&
TRAP(regs) != INTERRUPT_PROGRAM &&
TRAP(regs) != INTERRUPT_PERFMON)
CT_WARN_ON(ct_state() == CONTEXT_USER);
kuap = kuap_get_and_assert_locked();
local_irq_disable();
if (!arch_irq_disabled_regs(regs)) {
/* Returning to a kernel context with local irqs enabled. */
WARN_ON_ONCE(!(regs->msr & MSR_EE));
again:
if (IS_ENABLED(CONFIG_PREEMPT)) {
/* Return to preemptible kernel context */
if (unlikely(read_thread_flags() & _TIF_NEED_RESCHED)) {
if (preempt_count() == 0)
preempt_schedule_irq();
}
}
check_return_regs_valid(regs);
/*
* Stack store exit can't be restarted because the interrupt
* stack frame might have been clobbered.
*/
if (!prep_irq_for_enabled_exit(unlikely(stack_store))) {
/*
* Replay pending soft-masked interrupts now. Don't
* just local_irq_enabe(); local_irq_disable(); because
* if we are returning from an asynchronous interrupt
* here, another one might hit after irqs are enabled,
* and it would exit via this same path allowing
* another to fire, and so on unbounded.
*/
hard_irq_disable();
replay_soft_interrupts();
/* Took an interrupt, may have more exit work to do. */
goto again;
}
#ifdef CONFIG_PPC64
/*
* An interrupt may clear MSR[EE] and set this concurrently,
* but it will be marked pending and the exit will be retried.
* This leaves a racy window where MSR[EE]=0 and HARD_DIS is
* clear, until interrupt_exit_kernel_restart() calls
* hard_irq_disable(), which will set HARD_DIS again.
*/
local_paca->irq_happened &= ~PACA_IRQ_HARD_DIS;
} else {
check_return_regs_valid(regs);
if (unlikely(stack_store))
__hard_EE_RI_disable();
#endif /* CONFIG_PPC64 */
}
if (unlikely(stack_store)) {
clear_bits(_TIF_EMULATE_STACK_STORE, ¤t_thread_info()->flags);
ret = 1;
}
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
local_paca->tm_scratch = regs->msr;
#endif
/*
* 64s does not want to mfspr(SPRN_AMR) here, because this comes after
* mtmsr, which would cause Read-After-Write stalls. Hence, take the
* AMR value from the check above.
*/
kuap_kernel_restore(regs, kuap);
return ret;
}
#ifdef CONFIG_PPC64
notrace unsigned long interrupt_exit_user_restart(struct pt_regs *regs)
{
__hard_irq_disable();
local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
#ifdef CONFIG_PPC_BOOK3S_64
set_kuap(AMR_KUAP_BLOCKED);
#endif
trace_hardirqs_off();
user_exit_irqoff();
account_cpu_user_entry();
BUG_ON(!user_mode(regs));
regs->exit_result |= interrupt_exit_user_prepare(regs);
return regs->exit_result;
}
/*
* No real need to return a value here because the stack store case does not
* get restarted.
*/
notrace unsigned long interrupt_exit_kernel_restart(struct pt_regs *regs)
{
__hard_irq_disable();
local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
#ifdef CONFIG_PPC_BOOK3S_64
set_kuap(AMR_KUAP_BLOCKED);
#endif
if (regs->softe == IRQS_ENABLED)
trace_hardirqs_off();
BUG_ON(user_mode(regs));
return interrupt_exit_kernel_prepare(regs);
}
#endif
| linux-master | arch/powerpc/kernel/interrupt.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Derived from "arch/i386/kernel/process.c"
* Copyright (C) 1995 Linus Torvalds
*
* Updated and modified by Cort Dougan ([email protected]) and
* Paul Mackerras ([email protected])
*
* PowerPC version
* Copyright (C) 1995-1996 Gary Thomas ([email protected])
*/
#include <linux/errno.h>
#include <linux/sched.h>
#include <linux/sched/debug.h>
#include <linux/sched/task.h>
#include <linux/sched/task_stack.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/smp.h>
#include <linux/stddef.h>
#include <linux/unistd.h>
#include <linux/ptrace.h>
#include <linux/slab.h>
#include <linux/user.h>
#include <linux/elf.h>
#include <linux/prctl.h>
#include <linux/init_task.h>
#include <linux/export.h>
#include <linux/kallsyms.h>
#include <linux/mqueue.h>
#include <linux/hardirq.h>
#include <linux/utsname.h>
#include <linux/ftrace.h>
#include <linux/kernel_stat.h>
#include <linux/personality.h>
#include <linux/hw_breakpoint.h>
#include <linux/uaccess.h>
#include <linux/pkeys.h>
#include <linux/seq_buf.h>
#include <asm/interrupt.h>
#include <asm/io.h>
#include <asm/processor.h>
#include <asm/mmu.h>
#include <asm/machdep.h>
#include <asm/time.h>
#include <asm/runlatch.h>
#include <asm/syscalls.h>
#include <asm/switch_to.h>
#include <asm/tm.h>
#include <asm/debug.h>
#ifdef CONFIG_PPC64
#include <asm/firmware.h>
#include <asm/hw_irq.h>
#endif
#include <asm/code-patching.h>
#include <asm/exec.h>
#include <asm/livepatch.h>
#include <asm/cpu_has_feature.h>
#include <asm/asm-prototypes.h>
#include <asm/stacktrace.h>
#include <asm/hw_breakpoint.h>
#include <linux/kprobes.h>
#include <linux/kdebug.h>
/* Transactional Memory debug */
#ifdef TM_DEBUG_SW
#define TM_DEBUG(x...) printk(KERN_INFO x)
#else
#define TM_DEBUG(x...) do { } while(0)
#endif
extern unsigned long _get_SP(void);
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
/*
* Are we running in "Suspend disabled" mode? If so we have to block any
* sigreturn that would get us into suspended state, and we also warn in some
* other paths that we should never reach with suspend disabled.
*/
bool tm_suspend_disabled __ro_after_init = false;
static void check_if_tm_restore_required(struct task_struct *tsk)
{
/*
* If we are saving the current thread's registers, and the
* thread is in a transactional state, set the TIF_RESTORE_TM
* bit so that we know to restore the registers before
* returning to userspace.
*/
if (tsk == current && tsk->thread.regs &&
MSR_TM_ACTIVE(tsk->thread.regs->msr) &&
!test_thread_flag(TIF_RESTORE_TM)) {
regs_set_return_msr(&tsk->thread.ckpt_regs,
tsk->thread.regs->msr);
set_thread_flag(TIF_RESTORE_TM);
}
}
#else
static inline void check_if_tm_restore_required(struct task_struct *tsk) { }
#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
bool strict_msr_control;
EXPORT_SYMBOL(strict_msr_control);
static int __init enable_strict_msr_control(char *str)
{
strict_msr_control = true;
pr_info("Enabling strict facility control\n");
return 0;
}
early_param("ppc_strict_facility_enable", enable_strict_msr_control);
/* notrace because it's called by restore_math */
unsigned long notrace msr_check_and_set(unsigned long bits)
{
unsigned long oldmsr = mfmsr();
unsigned long newmsr;
newmsr = oldmsr | bits;
if (cpu_has_feature(CPU_FTR_VSX) && (bits & MSR_FP))
newmsr |= MSR_VSX;
if (oldmsr != newmsr)
newmsr = mtmsr_isync_irqsafe(newmsr);
return newmsr;
}
EXPORT_SYMBOL_GPL(msr_check_and_set);
/* notrace because it's called by restore_math */
void notrace __msr_check_and_clear(unsigned long bits)
{
unsigned long oldmsr = mfmsr();
unsigned long newmsr;
newmsr = oldmsr & ~bits;
if (cpu_has_feature(CPU_FTR_VSX) && (bits & MSR_FP))
newmsr &= ~MSR_VSX;
if (oldmsr != newmsr)
mtmsr_isync_irqsafe(newmsr);
}
EXPORT_SYMBOL(__msr_check_and_clear);
#ifdef CONFIG_PPC_FPU
static void __giveup_fpu(struct task_struct *tsk)
{
unsigned long msr;
save_fpu(tsk);
msr = tsk->thread.regs->msr;
msr &= ~(MSR_FP|MSR_FE0|MSR_FE1);
if (cpu_has_feature(CPU_FTR_VSX))
msr &= ~MSR_VSX;
regs_set_return_msr(tsk->thread.regs, msr);
}
void giveup_fpu(struct task_struct *tsk)
{
check_if_tm_restore_required(tsk);
msr_check_and_set(MSR_FP);
__giveup_fpu(tsk);
msr_check_and_clear(MSR_FP);
}
EXPORT_SYMBOL(giveup_fpu);
/*
* Make sure the floating-point register state in the
* the thread_struct is up to date for task tsk.
*/
void flush_fp_to_thread(struct task_struct *tsk)
{
if (tsk->thread.regs) {
/*
* We need to disable preemption here because if we didn't,
* another process could get scheduled after the regs->msr
* test but before we have finished saving the FP registers
* to the thread_struct. That process could take over the
* FPU, and then when we get scheduled again we would store
* bogus values for the remaining FP registers.
*/
preempt_disable();
if (tsk->thread.regs->msr & MSR_FP) {
/*
* This should only ever be called for current or
* for a stopped child process. Since we save away
* the FP register state on context switch,
* there is something wrong if a stopped child appears
* to still have its FP state in the CPU registers.
*/
BUG_ON(tsk != current);
giveup_fpu(tsk);
}
preempt_enable();
}
}
EXPORT_SYMBOL_GPL(flush_fp_to_thread);
void enable_kernel_fp(void)
{
unsigned long cpumsr;
WARN_ON(preemptible());
cpumsr = msr_check_and_set(MSR_FP);
if (current->thread.regs && (current->thread.regs->msr & MSR_FP)) {
check_if_tm_restore_required(current);
/*
* If a thread has already been reclaimed then the
* checkpointed registers are on the CPU but have definitely
* been saved by the reclaim code. Don't need to and *cannot*
* giveup as this would save to the 'live' structure not the
* checkpointed structure.
*/
if (!MSR_TM_ACTIVE(cpumsr) &&
MSR_TM_ACTIVE(current->thread.regs->msr))
return;
__giveup_fpu(current);
}
}
EXPORT_SYMBOL(enable_kernel_fp);
#else
static inline void __giveup_fpu(struct task_struct *tsk) { }
#endif /* CONFIG_PPC_FPU */
#ifdef CONFIG_ALTIVEC
static void __giveup_altivec(struct task_struct *tsk)
{
unsigned long msr;
save_altivec(tsk);
msr = tsk->thread.regs->msr;
msr &= ~MSR_VEC;
if (cpu_has_feature(CPU_FTR_VSX))
msr &= ~MSR_VSX;
regs_set_return_msr(tsk->thread.regs, msr);
}
void giveup_altivec(struct task_struct *tsk)
{
check_if_tm_restore_required(tsk);
msr_check_and_set(MSR_VEC);
__giveup_altivec(tsk);
msr_check_and_clear(MSR_VEC);
}
EXPORT_SYMBOL(giveup_altivec);
void enable_kernel_altivec(void)
{
unsigned long cpumsr;
WARN_ON(preemptible());
cpumsr = msr_check_and_set(MSR_VEC);
if (current->thread.regs && (current->thread.regs->msr & MSR_VEC)) {
check_if_tm_restore_required(current);
/*
* If a thread has already been reclaimed then the
* checkpointed registers are on the CPU but have definitely
* been saved by the reclaim code. Don't need to and *cannot*
* giveup as this would save to the 'live' structure not the
* checkpointed structure.
*/
if (!MSR_TM_ACTIVE(cpumsr) &&
MSR_TM_ACTIVE(current->thread.regs->msr))
return;
__giveup_altivec(current);
}
}
EXPORT_SYMBOL(enable_kernel_altivec);
/*
* Make sure the VMX/Altivec register state in the
* the thread_struct is up to date for task tsk.
*/
void flush_altivec_to_thread(struct task_struct *tsk)
{
if (tsk->thread.regs) {
preempt_disable();
if (tsk->thread.regs->msr & MSR_VEC) {
BUG_ON(tsk != current);
giveup_altivec(tsk);
}
preempt_enable();
}
}
EXPORT_SYMBOL_GPL(flush_altivec_to_thread);
#endif /* CONFIG_ALTIVEC */
#ifdef CONFIG_VSX
static void __giveup_vsx(struct task_struct *tsk)
{
unsigned long msr = tsk->thread.regs->msr;
/*
* We should never be setting MSR_VSX without also setting
* MSR_FP and MSR_VEC
*/
WARN_ON((msr & MSR_VSX) && !((msr & MSR_FP) && (msr & MSR_VEC)));
/* __giveup_fpu will clear MSR_VSX */
if (msr & MSR_FP)
__giveup_fpu(tsk);
if (msr & MSR_VEC)
__giveup_altivec(tsk);
}
static void giveup_vsx(struct task_struct *tsk)
{
check_if_tm_restore_required(tsk);
msr_check_and_set(MSR_FP|MSR_VEC|MSR_VSX);
__giveup_vsx(tsk);
msr_check_and_clear(MSR_FP|MSR_VEC|MSR_VSX);
}
void enable_kernel_vsx(void)
{
unsigned long cpumsr;
WARN_ON(preemptible());
cpumsr = msr_check_and_set(MSR_FP|MSR_VEC|MSR_VSX);
if (current->thread.regs &&
(current->thread.regs->msr & (MSR_VSX|MSR_VEC|MSR_FP))) {
check_if_tm_restore_required(current);
/*
* If a thread has already been reclaimed then the
* checkpointed registers are on the CPU but have definitely
* been saved by the reclaim code. Don't need to and *cannot*
* giveup as this would save to the 'live' structure not the
* checkpointed structure.
*/
if (!MSR_TM_ACTIVE(cpumsr) &&
MSR_TM_ACTIVE(current->thread.regs->msr))
return;
__giveup_vsx(current);
}
}
EXPORT_SYMBOL(enable_kernel_vsx);
void flush_vsx_to_thread(struct task_struct *tsk)
{
if (tsk->thread.regs) {
preempt_disable();
if (tsk->thread.regs->msr & (MSR_VSX|MSR_VEC|MSR_FP)) {
BUG_ON(tsk != current);
giveup_vsx(tsk);
}
preempt_enable();
}
}
EXPORT_SYMBOL_GPL(flush_vsx_to_thread);
#endif /* CONFIG_VSX */
#ifdef CONFIG_SPE
void giveup_spe(struct task_struct *tsk)
{
check_if_tm_restore_required(tsk);
msr_check_and_set(MSR_SPE);
__giveup_spe(tsk);
msr_check_and_clear(MSR_SPE);
}
EXPORT_SYMBOL(giveup_spe);
void enable_kernel_spe(void)
{
WARN_ON(preemptible());
msr_check_and_set(MSR_SPE);
if (current->thread.regs && (current->thread.regs->msr & MSR_SPE)) {
check_if_tm_restore_required(current);
__giveup_spe(current);
}
}
EXPORT_SYMBOL(enable_kernel_spe);
void flush_spe_to_thread(struct task_struct *tsk)
{
if (tsk->thread.regs) {
preempt_disable();
if (tsk->thread.regs->msr & MSR_SPE) {
BUG_ON(tsk != current);
tsk->thread.spefscr = mfspr(SPRN_SPEFSCR);
giveup_spe(tsk);
}
preempt_enable();
}
}
#endif /* CONFIG_SPE */
static unsigned long msr_all_available;
static int __init init_msr_all_available(void)
{
if (IS_ENABLED(CONFIG_PPC_FPU))
msr_all_available |= MSR_FP;
if (cpu_has_feature(CPU_FTR_ALTIVEC))
msr_all_available |= MSR_VEC;
if (cpu_has_feature(CPU_FTR_VSX))
msr_all_available |= MSR_VSX;
if (cpu_has_feature(CPU_FTR_SPE))
msr_all_available |= MSR_SPE;
return 0;
}
early_initcall(init_msr_all_available);
void giveup_all(struct task_struct *tsk)
{
unsigned long usermsr;
if (!tsk->thread.regs)
return;
check_if_tm_restore_required(tsk);
usermsr = tsk->thread.regs->msr;
if ((usermsr & msr_all_available) == 0)
return;
msr_check_and_set(msr_all_available);
WARN_ON((usermsr & MSR_VSX) && !((usermsr & MSR_FP) && (usermsr & MSR_VEC)));
if (usermsr & MSR_FP)
__giveup_fpu(tsk);
if (usermsr & MSR_VEC)
__giveup_altivec(tsk);
if (usermsr & MSR_SPE)
__giveup_spe(tsk);
msr_check_and_clear(msr_all_available);
}
EXPORT_SYMBOL(giveup_all);
#ifdef CONFIG_PPC_BOOK3S_64
#ifdef CONFIG_PPC_FPU
static bool should_restore_fp(void)
{
if (current->thread.load_fp) {
current->thread.load_fp++;
return true;
}
return false;
}
static void do_restore_fp(void)
{
load_fp_state(¤t->thread.fp_state);
}
#else
static bool should_restore_fp(void) { return false; }
static void do_restore_fp(void) { }
#endif /* CONFIG_PPC_FPU */
#ifdef CONFIG_ALTIVEC
static bool should_restore_altivec(void)
{
if (cpu_has_feature(CPU_FTR_ALTIVEC) && (current->thread.load_vec)) {
current->thread.load_vec++;
return true;
}
return false;
}
static void do_restore_altivec(void)
{
load_vr_state(¤t->thread.vr_state);
current->thread.used_vr = 1;
}
#else
static bool should_restore_altivec(void) { return false; }
static void do_restore_altivec(void) { }
#endif /* CONFIG_ALTIVEC */
static bool should_restore_vsx(void)
{
if (cpu_has_feature(CPU_FTR_VSX))
return true;
return false;
}
#ifdef CONFIG_VSX
static void do_restore_vsx(void)
{
current->thread.used_vsr = 1;
}
#else
static void do_restore_vsx(void) { }
#endif /* CONFIG_VSX */
/*
* The exception exit path calls restore_math() with interrupts hard disabled
* but the soft irq state not "reconciled". ftrace code that calls
* local_irq_save/restore causes warnings.
*
* Rather than complicate the exit path, just don't trace restore_math. This
* could be done by having ftrace entry code check for this un-reconciled
* condition where MSR[EE]=0 and PACA_IRQ_HARD_DIS is not set, and
* temporarily fix it up for the duration of the ftrace call.
*/
void notrace restore_math(struct pt_regs *regs)
{
unsigned long msr;
unsigned long new_msr = 0;
msr = regs->msr;
/*
* new_msr tracks the facilities that are to be restored. Only reload
* if the bit is not set in the user MSR (if it is set, the registers
* are live for the user thread).
*/
if ((!(msr & MSR_FP)) && should_restore_fp())
new_msr |= MSR_FP;
if ((!(msr & MSR_VEC)) && should_restore_altivec())
new_msr |= MSR_VEC;
if ((!(msr & MSR_VSX)) && should_restore_vsx()) {
if (((msr | new_msr) & (MSR_FP | MSR_VEC)) == (MSR_FP | MSR_VEC))
new_msr |= MSR_VSX;
}
if (new_msr) {
unsigned long fpexc_mode = 0;
msr_check_and_set(new_msr);
if (new_msr & MSR_FP) {
do_restore_fp();
// This also covers VSX, because VSX implies FP
fpexc_mode = current->thread.fpexc_mode;
}
if (new_msr & MSR_VEC)
do_restore_altivec();
if (new_msr & MSR_VSX)
do_restore_vsx();
msr_check_and_clear(new_msr);
regs_set_return_msr(regs, regs->msr | new_msr | fpexc_mode);
}
}
#endif /* CONFIG_PPC_BOOK3S_64 */
static void save_all(struct task_struct *tsk)
{
unsigned long usermsr;
if (!tsk->thread.regs)
return;
usermsr = tsk->thread.regs->msr;
if ((usermsr & msr_all_available) == 0)
return;
msr_check_and_set(msr_all_available);
WARN_ON((usermsr & MSR_VSX) && !((usermsr & MSR_FP) && (usermsr & MSR_VEC)));
if (usermsr & MSR_FP)
save_fpu(tsk);
if (usermsr & MSR_VEC)
save_altivec(tsk);
if (usermsr & MSR_SPE)
__giveup_spe(tsk);
msr_check_and_clear(msr_all_available);
}
void flush_all_to_thread(struct task_struct *tsk)
{
if (tsk->thread.regs) {
preempt_disable();
BUG_ON(tsk != current);
#ifdef CONFIG_SPE
if (tsk->thread.regs->msr & MSR_SPE)
tsk->thread.spefscr = mfspr(SPRN_SPEFSCR);
#endif
save_all(tsk);
preempt_enable();
}
}
EXPORT_SYMBOL(flush_all_to_thread);
#ifdef CONFIG_PPC_ADV_DEBUG_REGS
void do_send_trap(struct pt_regs *regs, unsigned long address,
unsigned long error_code, int breakpt)
{
current->thread.trap_nr = TRAP_HWBKPT;
if (notify_die(DIE_DABR_MATCH, "dabr_match", regs, error_code,
11, SIGSEGV) == NOTIFY_STOP)
return;
/* Deliver the signal to userspace */
force_sig_ptrace_errno_trap(breakpt, /* breakpoint or watchpoint id */
(void __user *)address);
}
#else /* !CONFIG_PPC_ADV_DEBUG_REGS */
static void do_break_handler(struct pt_regs *regs)
{
struct arch_hw_breakpoint null_brk = {0};
struct arch_hw_breakpoint *info;
ppc_inst_t instr = ppc_inst(0);
int type = 0;
int size = 0;
unsigned long ea;
int i;
/*
* If underneath hw supports only one watchpoint, we know it
* caused exception. 8xx also falls into this category.
*/
if (nr_wp_slots() == 1) {
__set_breakpoint(0, &null_brk);
current->thread.hw_brk[0] = null_brk;
current->thread.hw_brk[0].flags |= HW_BRK_FLAG_DISABLED;
return;
}
/* Otherwise find out which DAWR caused exception and disable it. */
wp_get_instr_detail(regs, &instr, &type, &size, &ea);
for (i = 0; i < nr_wp_slots(); i++) {
info = ¤t->thread.hw_brk[i];
if (!info->address)
continue;
if (wp_check_constraints(regs, instr, ea, type, size, info)) {
__set_breakpoint(i, &null_brk);
current->thread.hw_brk[i] = null_brk;
current->thread.hw_brk[i].flags |= HW_BRK_FLAG_DISABLED;
}
}
}
DEFINE_INTERRUPT_HANDLER(do_break)
{
current->thread.trap_nr = TRAP_HWBKPT;
if (notify_die(DIE_DABR_MATCH, "dabr_match", regs, regs->dsisr,
11, SIGSEGV) == NOTIFY_STOP)
return;
if (debugger_break_match(regs))
return;
/*
* We reach here only when watchpoint exception is generated by ptrace
* event (or hw is buggy!). Now if CONFIG_HAVE_HW_BREAKPOINT is set,
* watchpoint is already handled by hw_breakpoint_handler() so we don't
* have to do anything. But when CONFIG_HAVE_HW_BREAKPOINT is not set,
* we need to manually handle the watchpoint here.
*/
if (!IS_ENABLED(CONFIG_HAVE_HW_BREAKPOINT))
do_break_handler(regs);
/* Deliver the signal to userspace */
force_sig_fault(SIGTRAP, TRAP_HWBKPT, (void __user *)regs->dar);
}
#endif /* CONFIG_PPC_ADV_DEBUG_REGS */
static DEFINE_PER_CPU(struct arch_hw_breakpoint, current_brk[HBP_NUM_MAX]);
#ifdef CONFIG_PPC_ADV_DEBUG_REGS
/*
* Set the debug registers back to their default "safe" values.
*/
static void set_debug_reg_defaults(struct thread_struct *thread)
{
thread->debug.iac1 = thread->debug.iac2 = 0;
#if CONFIG_PPC_ADV_DEBUG_IACS > 2
thread->debug.iac3 = thread->debug.iac4 = 0;
#endif
thread->debug.dac1 = thread->debug.dac2 = 0;
#if CONFIG_PPC_ADV_DEBUG_DVCS > 0
thread->debug.dvc1 = thread->debug.dvc2 = 0;
#endif
thread->debug.dbcr0 = 0;
#ifdef CONFIG_BOOKE
/*
* Force User/Supervisor bits to b11 (user-only MSR[PR]=1)
*/
thread->debug.dbcr1 = DBCR1_IAC1US | DBCR1_IAC2US |
DBCR1_IAC3US | DBCR1_IAC4US;
/*
* Force Data Address Compare User/Supervisor bits to be User-only
* (0b11 MSR[PR]=1) and set all other bits in DBCR2 register to be 0.
*/
thread->debug.dbcr2 = DBCR2_DAC1US | DBCR2_DAC2US;
#else
thread->debug.dbcr1 = 0;
#endif
}
static void prime_debug_regs(struct debug_reg *debug)
{
/*
* We could have inherited MSR_DE from userspace, since
* it doesn't get cleared on exception entry. Make sure
* MSR_DE is clear before we enable any debug events.
*/
mtmsr(mfmsr() & ~MSR_DE);
mtspr(SPRN_IAC1, debug->iac1);
mtspr(SPRN_IAC2, debug->iac2);
#if CONFIG_PPC_ADV_DEBUG_IACS > 2
mtspr(SPRN_IAC3, debug->iac3);
mtspr(SPRN_IAC4, debug->iac4);
#endif
mtspr(SPRN_DAC1, debug->dac1);
mtspr(SPRN_DAC2, debug->dac2);
#if CONFIG_PPC_ADV_DEBUG_DVCS > 0
mtspr(SPRN_DVC1, debug->dvc1);
mtspr(SPRN_DVC2, debug->dvc2);
#endif
mtspr(SPRN_DBCR0, debug->dbcr0);
mtspr(SPRN_DBCR1, debug->dbcr1);
#ifdef CONFIG_BOOKE
mtspr(SPRN_DBCR2, debug->dbcr2);
#endif
}
/*
* Unless neither the old or new thread are making use of the
* debug registers, set the debug registers from the values
* stored in the new thread.
*/
void switch_booke_debug_regs(struct debug_reg *new_debug)
{
if ((current->thread.debug.dbcr0 & DBCR0_IDM)
|| (new_debug->dbcr0 & DBCR0_IDM))
prime_debug_regs(new_debug);
}
EXPORT_SYMBOL_GPL(switch_booke_debug_regs);
#else /* !CONFIG_PPC_ADV_DEBUG_REGS */
#ifndef CONFIG_HAVE_HW_BREAKPOINT
static void set_breakpoint(int i, struct arch_hw_breakpoint *brk)
{
preempt_disable();
__set_breakpoint(i, brk);
preempt_enable();
}
static void set_debug_reg_defaults(struct thread_struct *thread)
{
int i;
struct arch_hw_breakpoint null_brk = {0};
for (i = 0; i < nr_wp_slots(); i++) {
thread->hw_brk[i] = null_brk;
if (ppc_breakpoint_available())
set_breakpoint(i, &thread->hw_brk[i]);
}
}
static inline bool hw_brk_match(struct arch_hw_breakpoint *a,
struct arch_hw_breakpoint *b)
{
if (a->address != b->address)
return false;
if (a->type != b->type)
return false;
if (a->len != b->len)
return false;
/* no need to check hw_len. it's calculated from address and len */
return true;
}
static void switch_hw_breakpoint(struct task_struct *new)
{
int i;
for (i = 0; i < nr_wp_slots(); i++) {
if (likely(hw_brk_match(this_cpu_ptr(¤t_brk[i]),
&new->thread.hw_brk[i])))
continue;
__set_breakpoint(i, &new->thread.hw_brk[i]);
}
}
#endif /* !CONFIG_HAVE_HW_BREAKPOINT */
#endif /* CONFIG_PPC_ADV_DEBUG_REGS */
static inline int set_dabr(struct arch_hw_breakpoint *brk)
{
unsigned long dabr, dabrx;
dabr = brk->address | (brk->type & HW_BRK_TYPE_DABR);
dabrx = ((brk->type >> 3) & 0x7);
if (ppc_md.set_dabr)
return ppc_md.set_dabr(dabr, dabrx);
if (IS_ENABLED(CONFIG_PPC_ADV_DEBUG_REGS)) {
mtspr(SPRN_DAC1, dabr);
if (IS_ENABLED(CONFIG_PPC_47x))
isync();
return 0;
} else if (IS_ENABLED(CONFIG_PPC_BOOK3S)) {
mtspr(SPRN_DABR, dabr);
if (cpu_has_feature(CPU_FTR_DABRX))
mtspr(SPRN_DABRX, dabrx);
return 0;
} else {
return -EINVAL;
}
}
static inline int set_breakpoint_8xx(struct arch_hw_breakpoint *brk)
{
unsigned long lctrl1 = LCTRL1_CTE_GT | LCTRL1_CTF_LT | LCTRL1_CRWE_RW |
LCTRL1_CRWF_RW;
unsigned long lctrl2 = LCTRL2_LW0EN | LCTRL2_LW0LADC | LCTRL2_SLW0EN;
unsigned long start_addr = ALIGN_DOWN(brk->address, HW_BREAKPOINT_SIZE);
unsigned long end_addr = ALIGN(brk->address + brk->len, HW_BREAKPOINT_SIZE);
if (start_addr == 0)
lctrl2 |= LCTRL2_LW0LA_F;
else if (end_addr == 0)
lctrl2 |= LCTRL2_LW0LA_E;
else
lctrl2 |= LCTRL2_LW0LA_EandF;
mtspr(SPRN_LCTRL2, 0);
if ((brk->type & HW_BRK_TYPE_RDWR) == 0)
return 0;
if ((brk->type & HW_BRK_TYPE_RDWR) == HW_BRK_TYPE_READ)
lctrl1 |= LCTRL1_CRWE_RO | LCTRL1_CRWF_RO;
if ((brk->type & HW_BRK_TYPE_RDWR) == HW_BRK_TYPE_WRITE)
lctrl1 |= LCTRL1_CRWE_WO | LCTRL1_CRWF_WO;
mtspr(SPRN_CMPE, start_addr - 1);
mtspr(SPRN_CMPF, end_addr);
mtspr(SPRN_LCTRL1, lctrl1);
mtspr(SPRN_LCTRL2, lctrl2);
return 0;
}
static void set_hw_breakpoint(int nr, struct arch_hw_breakpoint *brk)
{
if (dawr_enabled())
// Power8 or later
set_dawr(nr, brk);
else if (IS_ENABLED(CONFIG_PPC_8xx))
set_breakpoint_8xx(brk);
else if (!cpu_has_feature(CPU_FTR_ARCH_207S))
// Power7 or earlier
set_dabr(brk);
else
// Shouldn't happen due to higher level checks
WARN_ON_ONCE(1);
}
void __set_breakpoint(int nr, struct arch_hw_breakpoint *brk)
{
memcpy(this_cpu_ptr(¤t_brk[nr]), brk, sizeof(*brk));
set_hw_breakpoint(nr, brk);
}
/* Check if we have DAWR or DABR hardware */
bool ppc_breakpoint_available(void)
{
if (dawr_enabled())
return true; /* POWER8 DAWR or POWER9 forced DAWR */
if (cpu_has_feature(CPU_FTR_ARCH_207S))
return false; /* POWER9 with DAWR disabled */
/* DABR: Everything but POWER8 and POWER9 */
return true;
}
EXPORT_SYMBOL_GPL(ppc_breakpoint_available);
/* Disable the breakpoint in hardware without touching current_brk[] */
void suspend_breakpoints(void)
{
struct arch_hw_breakpoint brk = {0};
int i;
if (!ppc_breakpoint_available())
return;
for (i = 0; i < nr_wp_slots(); i++)
set_hw_breakpoint(i, &brk);
}
/*
* Re-enable breakpoints suspended by suspend_breakpoints() in hardware
* from current_brk[]
*/
void restore_breakpoints(void)
{
int i;
if (!ppc_breakpoint_available())
return;
for (i = 0; i < nr_wp_slots(); i++)
set_hw_breakpoint(i, this_cpu_ptr(¤t_brk[i]));
}
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
static inline bool tm_enabled(struct task_struct *tsk)
{
return tsk && tsk->thread.regs && (tsk->thread.regs->msr & MSR_TM);
}
static void tm_reclaim_thread(struct thread_struct *thr, uint8_t cause)
{
/*
* Use the current MSR TM suspended bit to track if we have
* checkpointed state outstanding.
* On signal delivery, we'd normally reclaim the checkpointed
* state to obtain stack pointer (see:get_tm_stackpointer()).
* This will then directly return to userspace without going
* through __switch_to(). However, if the stack frame is bad,
* we need to exit this thread which calls __switch_to() which
* will again attempt to reclaim the already saved tm state.
* Hence we need to check that we've not already reclaimed
* this state.
* We do this using the current MSR, rather tracking it in
* some specific thread_struct bit, as it has the additional
* benefit of checking for a potential TM bad thing exception.
*/
if (!MSR_TM_SUSPENDED(mfmsr()))
return;
giveup_all(container_of(thr, struct task_struct, thread));
tm_reclaim(thr, cause);
/*
* If we are in a transaction and FP is off then we can't have
* used FP inside that transaction. Hence the checkpointed
* state is the same as the live state. We need to copy the
* live state to the checkpointed state so that when the
* transaction is restored, the checkpointed state is correct
* and the aborted transaction sees the correct state. We use
* ckpt_regs.msr here as that's what tm_reclaim will use to
* determine if it's going to write the checkpointed state or
* not. So either this will write the checkpointed registers,
* or reclaim will. Similarly for VMX.
*/
if ((thr->ckpt_regs.msr & MSR_FP) == 0)
memcpy(&thr->ckfp_state, &thr->fp_state,
sizeof(struct thread_fp_state));
if ((thr->ckpt_regs.msr & MSR_VEC) == 0)
memcpy(&thr->ckvr_state, &thr->vr_state,
sizeof(struct thread_vr_state));
}
void tm_reclaim_current(uint8_t cause)
{
tm_enable();
tm_reclaim_thread(¤t->thread, cause);
}
static inline void tm_reclaim_task(struct task_struct *tsk)
{
/* We have to work out if we're switching from/to a task that's in the
* middle of a transaction.
*
* In switching we need to maintain a 2nd register state as
* oldtask->thread.ckpt_regs. We tm_reclaim(oldproc); this saves the
* checkpointed (tbegin) state in ckpt_regs, ckfp_state and
* ckvr_state
*
* We also context switch (save) TFHAR/TEXASR/TFIAR in here.
*/
struct thread_struct *thr = &tsk->thread;
if (!thr->regs)
return;
if (!MSR_TM_ACTIVE(thr->regs->msr))
goto out_and_saveregs;
WARN_ON(tm_suspend_disabled);
TM_DEBUG("--- tm_reclaim on pid %d (NIP=%lx, "
"ccr=%lx, msr=%lx, trap=%lx)\n",
tsk->pid, thr->regs->nip,
thr->regs->ccr, thr->regs->msr,
thr->regs->trap);
tm_reclaim_thread(thr, TM_CAUSE_RESCHED);
TM_DEBUG("--- tm_reclaim on pid %d complete\n",
tsk->pid);
out_and_saveregs:
/* Always save the regs here, even if a transaction's not active.
* This context-switches a thread's TM info SPRs. We do it here to
* be consistent with the restore path (in recheckpoint) which
* cannot happen later in _switch().
*/
tm_save_sprs(thr);
}
extern void __tm_recheckpoint(struct thread_struct *thread);
void tm_recheckpoint(struct thread_struct *thread)
{
unsigned long flags;
if (!(thread->regs->msr & MSR_TM))
return;
/* We really can't be interrupted here as the TEXASR registers can't
* change and later in the trecheckpoint code, we have a userspace R1.
* So let's hard disable over this region.
*/
local_irq_save(flags);
hard_irq_disable();
/* The TM SPRs are restored here, so that TEXASR.FS can be set
* before the trecheckpoint and no explosion occurs.
*/
tm_restore_sprs(thread);
__tm_recheckpoint(thread);
local_irq_restore(flags);
}
static inline void tm_recheckpoint_new_task(struct task_struct *new)
{
if (!cpu_has_feature(CPU_FTR_TM))
return;
/* Recheckpoint the registers of the thread we're about to switch to.
*
* If the task was using FP, we non-lazily reload both the original and
* the speculative FP register states. This is because the kernel
* doesn't see if/when a TM rollback occurs, so if we take an FP
* unavailable later, we are unable to determine which set of FP regs
* need to be restored.
*/
if (!tm_enabled(new))
return;
if (!MSR_TM_ACTIVE(new->thread.regs->msr)){
tm_restore_sprs(&new->thread);
return;
}
/* Recheckpoint to restore original checkpointed register state. */
TM_DEBUG("*** tm_recheckpoint of pid %d (new->msr 0x%lx)\n",
new->pid, new->thread.regs->msr);
tm_recheckpoint(&new->thread);
/*
* The checkpointed state has been restored but the live state has
* not, ensure all the math functionality is turned off to trigger
* restore_math() to reload.
*/
new->thread.regs->msr &= ~(MSR_FP | MSR_VEC | MSR_VSX);
TM_DEBUG("*** tm_recheckpoint of pid %d complete "
"(kernel msr 0x%lx)\n",
new->pid, mfmsr());
}
static inline void __switch_to_tm(struct task_struct *prev,
struct task_struct *new)
{
if (cpu_has_feature(CPU_FTR_TM)) {
if (tm_enabled(prev) || tm_enabled(new))
tm_enable();
if (tm_enabled(prev)) {
prev->thread.load_tm++;
tm_reclaim_task(prev);
if (!MSR_TM_ACTIVE(prev->thread.regs->msr) && prev->thread.load_tm == 0)
prev->thread.regs->msr &= ~MSR_TM;
}
tm_recheckpoint_new_task(new);
}
}
/*
* This is called if we are on the way out to userspace and the
* TIF_RESTORE_TM flag is set. It checks if we need to reload
* FP and/or vector state and does so if necessary.
* If userspace is inside a transaction (whether active or
* suspended) and FP/VMX/VSX instructions have ever been enabled
* inside that transaction, then we have to keep them enabled
* and keep the FP/VMX/VSX state loaded while ever the transaction
* continues. The reason is that if we didn't, and subsequently
* got a FP/VMX/VSX unavailable interrupt inside a transaction,
* we don't know whether it's the same transaction, and thus we
* don't know which of the checkpointed state and the transactional
* state to use.
*/
void restore_tm_state(struct pt_regs *regs)
{
unsigned long msr_diff;
/*
* This is the only moment we should clear TIF_RESTORE_TM as
* it is here that ckpt_regs.msr and pt_regs.msr become the same
* again, anything else could lead to an incorrect ckpt_msr being
* saved and therefore incorrect signal contexts.
*/
clear_thread_flag(TIF_RESTORE_TM);
if (!MSR_TM_ACTIVE(regs->msr))
return;
msr_diff = current->thread.ckpt_regs.msr & ~regs->msr;
msr_diff &= MSR_FP | MSR_VEC | MSR_VSX;
/* Ensure that restore_math() will restore */
if (msr_diff & MSR_FP)
current->thread.load_fp = 1;
#ifdef CONFIG_ALTIVEC
if (cpu_has_feature(CPU_FTR_ALTIVEC) && msr_diff & MSR_VEC)
current->thread.load_vec = 1;
#endif
restore_math(regs);
regs_set_return_msr(regs, regs->msr | msr_diff);
}
#else /* !CONFIG_PPC_TRANSACTIONAL_MEM */
#define tm_recheckpoint_new_task(new)
#define __switch_to_tm(prev, new)
void tm_reclaim_current(uint8_t cause) {}
#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
static inline void save_sprs(struct thread_struct *t)
{
#ifdef CONFIG_ALTIVEC
if (cpu_has_feature(CPU_FTR_ALTIVEC))
t->vrsave = mfspr(SPRN_VRSAVE);
#endif
#ifdef CONFIG_SPE
if (cpu_has_feature(CPU_FTR_SPE))
t->spefscr = mfspr(SPRN_SPEFSCR);
#endif
#ifdef CONFIG_PPC_BOOK3S_64
if (cpu_has_feature(CPU_FTR_DSCR))
t->dscr = mfspr(SPRN_DSCR);
if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
t->bescr = mfspr(SPRN_BESCR);
t->ebbhr = mfspr(SPRN_EBBHR);
t->ebbrr = mfspr(SPRN_EBBRR);
t->fscr = mfspr(SPRN_FSCR);
/*
* Note that the TAR is not available for use in the kernel.
* (To provide this, the TAR should be backed up/restored on
* exception entry/exit instead, and be in pt_regs. FIXME,
* this should be in pt_regs anyway (for debug).)
*/
t->tar = mfspr(SPRN_TAR);
}
if (cpu_has_feature(CPU_FTR_DEXCR_NPHIE))
t->hashkeyr = mfspr(SPRN_HASHKEYR);
#endif
}
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
void kvmppc_save_user_regs(void)
{
unsigned long usermsr;
if (!current->thread.regs)
return;
usermsr = current->thread.regs->msr;
if (usermsr & MSR_FP)
save_fpu(current);
if (usermsr & MSR_VEC)
save_altivec(current);
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
if (usermsr & MSR_TM) {
current->thread.tm_tfhar = mfspr(SPRN_TFHAR);
current->thread.tm_tfiar = mfspr(SPRN_TFIAR);
current->thread.tm_texasr = mfspr(SPRN_TEXASR);
current->thread.regs->msr &= ~MSR_TM;
}
#endif
}
EXPORT_SYMBOL_GPL(kvmppc_save_user_regs);
void kvmppc_save_current_sprs(void)
{
save_sprs(¤t->thread);
}
EXPORT_SYMBOL_GPL(kvmppc_save_current_sprs);
#endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
static inline void restore_sprs(struct thread_struct *old_thread,
struct thread_struct *new_thread)
{
#ifdef CONFIG_ALTIVEC
if (cpu_has_feature(CPU_FTR_ALTIVEC) &&
old_thread->vrsave != new_thread->vrsave)
mtspr(SPRN_VRSAVE, new_thread->vrsave);
#endif
#ifdef CONFIG_SPE
if (cpu_has_feature(CPU_FTR_SPE) &&
old_thread->spefscr != new_thread->spefscr)
mtspr(SPRN_SPEFSCR, new_thread->spefscr);
#endif
#ifdef CONFIG_PPC_BOOK3S_64
if (cpu_has_feature(CPU_FTR_DSCR)) {
u64 dscr = get_paca()->dscr_default;
if (new_thread->dscr_inherit)
dscr = new_thread->dscr;
if (old_thread->dscr != dscr)
mtspr(SPRN_DSCR, dscr);
}
if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
if (old_thread->bescr != new_thread->bescr)
mtspr(SPRN_BESCR, new_thread->bescr);
if (old_thread->ebbhr != new_thread->ebbhr)
mtspr(SPRN_EBBHR, new_thread->ebbhr);
if (old_thread->ebbrr != new_thread->ebbrr)
mtspr(SPRN_EBBRR, new_thread->ebbrr);
if (old_thread->fscr != new_thread->fscr)
mtspr(SPRN_FSCR, new_thread->fscr);
if (old_thread->tar != new_thread->tar)
mtspr(SPRN_TAR, new_thread->tar);
}
if (cpu_has_feature(CPU_FTR_P9_TIDR) &&
old_thread->tidr != new_thread->tidr)
mtspr(SPRN_TIDR, new_thread->tidr);
if (cpu_has_feature(CPU_FTR_DEXCR_NPHIE) &&
old_thread->hashkeyr != new_thread->hashkeyr)
mtspr(SPRN_HASHKEYR, new_thread->hashkeyr);
#endif
}
struct task_struct *__switch_to(struct task_struct *prev,
struct task_struct *new)
{
struct thread_struct *new_thread, *old_thread;
struct task_struct *last;
#ifdef CONFIG_PPC_64S_HASH_MMU
struct ppc64_tlb_batch *batch;
#endif
new_thread = &new->thread;
old_thread = ¤t->thread;
WARN_ON(!irqs_disabled());
#ifdef CONFIG_PPC_64S_HASH_MMU
batch = this_cpu_ptr(&ppc64_tlb_batch);
if (batch->active) {
current_thread_info()->local_flags |= _TLF_LAZY_MMU;
if (batch->index)
__flush_tlb_pending(batch);
batch->active = 0;
}
/*
* On POWER9 the copy-paste buffer can only paste into
* foreign real addresses, so unprivileged processes can not
* see the data or use it in any way unless they have
* foreign real mappings. If the new process has the foreign
* real address mappings, we must issue a cp_abort to clear
* any state and prevent snooping, corruption or a covert
* channel. ISA v3.1 supports paste into local memory.
*/
if (new->mm && (cpu_has_feature(CPU_FTR_ARCH_31) ||
atomic_read(&new->mm->context.vas_windows)))
asm volatile(PPC_CP_ABORT);
#endif /* CONFIG_PPC_BOOK3S_64 */
#ifdef CONFIG_PPC_ADV_DEBUG_REGS
switch_booke_debug_regs(&new->thread.debug);
#else
/*
* For PPC_BOOK3S_64, we use the hw-breakpoint interfaces that would
* schedule DABR
*/
#ifndef CONFIG_HAVE_HW_BREAKPOINT
switch_hw_breakpoint(new);
#endif /* CONFIG_HAVE_HW_BREAKPOINT */
#endif
/*
* We need to save SPRs before treclaim/trecheckpoint as these will
* change a number of them.
*/
save_sprs(&prev->thread);
/* Save FPU, Altivec, VSX and SPE state */
giveup_all(prev);
__switch_to_tm(prev, new);
if (!radix_enabled()) {
/*
* We can't take a PMU exception inside _switch() since there
* is a window where the kernel stack SLB and the kernel stack
* are out of sync. Hard disable here.
*/
hard_irq_disable();
}
/*
* Call restore_sprs() and set_return_regs_changed() before calling
* _switch(). If we move it after _switch() then we miss out on calling
* it for new tasks. The reason for this is we manually create a stack
* frame for new tasks that directly returns through ret_from_fork() or
* ret_from_kernel_thread(). See copy_thread() for details.
*/
restore_sprs(old_thread, new_thread);
set_return_regs_changed(); /* _switch changes stack (and regs) */
if (!IS_ENABLED(CONFIG_PPC_BOOK3S_64))
kuap_assert_locked();
last = _switch(old_thread, new_thread);
/*
* Nothing after _switch will be run for newly created tasks,
* because they switch directly to ret_from_fork/ret_from_kernel_thread
* etc. Code added here should have a comment explaining why that is
* okay.
*/
#ifdef CONFIG_PPC_BOOK3S_64
#ifdef CONFIG_PPC_64S_HASH_MMU
/*
* This applies to a process that was context switched while inside
* arch_enter_lazy_mmu_mode(), to re-activate the batch that was
* deactivated above, before _switch(). This will never be the case
* for new tasks.
*/
if (current_thread_info()->local_flags & _TLF_LAZY_MMU) {
current_thread_info()->local_flags &= ~_TLF_LAZY_MMU;
batch = this_cpu_ptr(&ppc64_tlb_batch);
batch->active = 1;
}
#endif
/*
* Math facilities are masked out of the child MSR in copy_thread.
* A new task does not need to restore_math because it will
* demand fault them.
*/
if (current->thread.regs)
restore_math(current->thread.regs);
#endif /* CONFIG_PPC_BOOK3S_64 */
return last;
}
#define NR_INSN_TO_PRINT 16
static void show_instructions(struct pt_regs *regs)
{
int i;
unsigned long nip = regs->nip;
unsigned long pc = regs->nip - (NR_INSN_TO_PRINT * 3 / 4 * sizeof(int));
printk("Code: ");
/*
* If we were executing with the MMU off for instructions, adjust pc
* rather than printing XXXXXXXX.
*/
if (!IS_ENABLED(CONFIG_BOOKE) && !(regs->msr & MSR_IR)) {
pc = (unsigned long)phys_to_virt(pc);
nip = (unsigned long)phys_to_virt(regs->nip);
}
for (i = 0; i < NR_INSN_TO_PRINT; i++) {
int instr;
if (get_kernel_nofault(instr, (const void *)pc)) {
pr_cont("XXXXXXXX ");
} else {
if (nip == pc)
pr_cont("<%08x> ", instr);
else
pr_cont("%08x ", instr);
}
pc += sizeof(int);
}
pr_cont("\n");
}
void show_user_instructions(struct pt_regs *regs)
{
unsigned long pc;
int n = NR_INSN_TO_PRINT;
struct seq_buf s;
char buf[96]; /* enough for 8 times 9 + 2 chars */
pc = regs->nip - (NR_INSN_TO_PRINT * 3 / 4 * sizeof(int));
seq_buf_init(&s, buf, sizeof(buf));
while (n) {
int i;
seq_buf_clear(&s);
for (i = 0; i < 8 && n; i++, n--, pc += sizeof(int)) {
int instr;
if (copy_from_user_nofault(&instr, (void __user *)pc,
sizeof(instr))) {
seq_buf_printf(&s, "XXXXXXXX ");
continue;
}
seq_buf_printf(&s, regs->nip == pc ? "<%08x> " : "%08x ", instr);
}
if (!seq_buf_has_overflowed(&s))
pr_info("%s[%d]: code: %s\n", current->comm,
current->pid, s.buffer);
}
}
struct regbit {
unsigned long bit;
const char *name;
};
static struct regbit msr_bits[] = {
#if defined(CONFIG_PPC64) && !defined(CONFIG_BOOKE)
{MSR_SF, "SF"},
{MSR_HV, "HV"},
#endif
{MSR_VEC, "VEC"},
{MSR_VSX, "VSX"},
#ifdef CONFIG_BOOKE
{MSR_CE, "CE"},
#endif
{MSR_EE, "EE"},
{MSR_PR, "PR"},
{MSR_FP, "FP"},
{MSR_ME, "ME"},
#ifdef CONFIG_BOOKE
{MSR_DE, "DE"},
#else
{MSR_SE, "SE"},
{MSR_BE, "BE"},
#endif
{MSR_IR, "IR"},
{MSR_DR, "DR"},
{MSR_PMM, "PMM"},
#ifndef CONFIG_BOOKE
{MSR_RI, "RI"},
{MSR_LE, "LE"},
#endif
{0, NULL}
};
static void print_bits(unsigned long val, struct regbit *bits, const char *sep)
{
const char *s = "";
for (; bits->bit; ++bits)
if (val & bits->bit) {
pr_cont("%s%s", s, bits->name);
s = sep;
}
}
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
static struct regbit msr_tm_bits[] = {
{MSR_TS_T, "T"},
{MSR_TS_S, "S"},
{MSR_TM, "E"},
{0, NULL}
};
static void print_tm_bits(unsigned long val)
{
/*
* This only prints something if at least one of the TM bit is set.
* Inside the TM[], the output means:
* E: Enabled (bit 32)
* S: Suspended (bit 33)
* T: Transactional (bit 34)
*/
if (val & (MSR_TM | MSR_TS_S | MSR_TS_T)) {
pr_cont(",TM[");
print_bits(val, msr_tm_bits, "");
pr_cont("]");
}
}
#else
static void print_tm_bits(unsigned long val) {}
#endif
static void print_msr_bits(unsigned long val)
{
pr_cont("<");
print_bits(val, msr_bits, ",");
print_tm_bits(val);
pr_cont(">");
}
#ifdef CONFIG_PPC64
#define REG "%016lx"
#define REGS_PER_LINE 4
#else
#define REG "%08lx"
#define REGS_PER_LINE 8
#endif
static void __show_regs(struct pt_regs *regs)
{
int i, trap;
printk("NIP: "REG" LR: "REG" CTR: "REG"\n",
regs->nip, regs->link, regs->ctr);
printk("REGS: %px TRAP: %04lx %s (%s)\n",
regs, regs->trap, print_tainted(), init_utsname()->release);
printk("MSR: "REG" ", regs->msr);
print_msr_bits(regs->msr);
pr_cont(" CR: %08lx XER: %08lx\n", regs->ccr, regs->xer);
trap = TRAP(regs);
if (!trap_is_syscall(regs) && cpu_has_feature(CPU_FTR_CFAR))
pr_cont("CFAR: "REG" ", regs->orig_gpr3);
if (trap == INTERRUPT_MACHINE_CHECK ||
trap == INTERRUPT_DATA_STORAGE ||
trap == INTERRUPT_ALIGNMENT) {
if (IS_ENABLED(CONFIG_4xx) || IS_ENABLED(CONFIG_BOOKE))
pr_cont("DEAR: "REG" ESR: "REG" ", regs->dear, regs->esr);
else
pr_cont("DAR: "REG" DSISR: %08lx ", regs->dar, regs->dsisr);
}
#ifdef CONFIG_PPC64
pr_cont("IRQMASK: %lx ", regs->softe);
#endif
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
if (MSR_TM_ACTIVE(regs->msr))
pr_cont("\nPACATMSCRATCH: %016llx ", get_paca()->tm_scratch);
#endif
for (i = 0; i < 32; i++) {
if ((i % REGS_PER_LINE) == 0)
pr_cont("\nGPR%02d: ", i);
pr_cont(REG " ", regs->gpr[i]);
}
pr_cont("\n");
/*
* Lookup NIP late so we have the best change of getting the
* above info out without failing
*/
if (IS_ENABLED(CONFIG_KALLSYMS)) {
printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
}
}
void show_regs(struct pt_regs *regs)
{
show_regs_print_info(KERN_DEFAULT);
__show_regs(regs);
show_stack(current, (unsigned long *) regs->gpr[1], KERN_DEFAULT);
if (!user_mode(regs))
show_instructions(regs);
}
void flush_thread(void)
{
#ifdef CONFIG_HAVE_HW_BREAKPOINT
flush_ptrace_hw_breakpoint(current);
#else /* CONFIG_HAVE_HW_BREAKPOINT */
set_debug_reg_defaults(¤t->thread);
#endif /* CONFIG_HAVE_HW_BREAKPOINT */
}
void arch_setup_new_exec(void)
{
#ifdef CONFIG_PPC_BOOK3S_64
if (!radix_enabled())
hash__setup_new_exec();
#endif
/*
* If we exec out of a kernel thread then thread.regs will not be
* set. Do it now.
*/
if (!current->thread.regs) {
struct pt_regs *regs = task_stack_page(current) + THREAD_SIZE;
current->thread.regs = regs - 1;
}
#ifdef CONFIG_PPC_MEM_KEYS
current->thread.regs->amr = default_amr;
current->thread.regs->iamr = default_iamr;
#endif
}
#ifdef CONFIG_PPC64
/*
* Assign a TIDR (thread ID) for task @t and set it in the thread
* structure. For now, we only support setting TIDR for 'current' task.
*
* Since the TID value is a truncated form of it PID, it is possible
* (but unlikely) for 2 threads to have the same TID. In the unlikely event
* that 2 threads share the same TID and are waiting, one of the following
* cases will happen:
*
* 1. The correct thread is running, the wrong thread is not
* In this situation, the correct thread is woken and proceeds to pass it's
* condition check.
*
* 2. Neither threads are running
* In this situation, neither thread will be woken. When scheduled, the waiting
* threads will execute either a wait, which will return immediately, followed
* by a condition check, which will pass for the correct thread and fail
* for the wrong thread, or they will execute the condition check immediately.
*
* 3. The wrong thread is running, the correct thread is not
* The wrong thread will be woken, but will fail it's condition check and
* re-execute wait. The correct thread, when scheduled, will execute either
* it's condition check (which will pass), or wait, which returns immediately
* when called the first time after the thread is scheduled, followed by it's
* condition check (which will pass).
*
* 4. Both threads are running
* Both threads will be woken. The wrong thread will fail it's condition check
* and execute another wait, while the correct thread will pass it's condition
* check.
*
* @t: the task to set the thread ID for
*/
int set_thread_tidr(struct task_struct *t)
{
if (!cpu_has_feature(CPU_FTR_P9_TIDR))
return -EINVAL;
if (t != current)
return -EINVAL;
if (t->thread.tidr)
return 0;
t->thread.tidr = (u16)task_pid_nr(t);
mtspr(SPRN_TIDR, t->thread.tidr);
return 0;
}
EXPORT_SYMBOL_GPL(set_thread_tidr);
#endif /* CONFIG_PPC64 */
/*
* this gets called so that we can store coprocessor state into memory and
* copy the current task into the new thread.
*/
int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
{
flush_all_to_thread(src);
/*
* Flush TM state out so we can copy it. __switch_to_tm() does this
* flush but it removes the checkpointed state from the current CPU and
* transitions the CPU out of TM mode. Hence we need to call
* tm_recheckpoint_new_task() (on the same task) to restore the
* checkpointed state back and the TM mode.
*
* Can't pass dst because it isn't ready. Doesn't matter, passing
* dst is only important for __switch_to()
*/
__switch_to_tm(src, src);
*dst = *src;
clear_task_ebb(dst);
return 0;
}
static void setup_ksp_vsid(struct task_struct *p, unsigned long sp)
{
#ifdef CONFIG_PPC_64S_HASH_MMU
unsigned long sp_vsid;
unsigned long llp = mmu_psize_defs[mmu_linear_psize].sllp;
if (radix_enabled())
return;
if (mmu_has_feature(MMU_FTR_1T_SEGMENT))
sp_vsid = get_kernel_vsid(sp, MMU_SEGSIZE_1T)
<< SLB_VSID_SHIFT_1T;
else
sp_vsid = get_kernel_vsid(sp, MMU_SEGSIZE_256M)
<< SLB_VSID_SHIFT;
sp_vsid |= SLB_VSID_KERNEL | llp;
p->thread.ksp_vsid = sp_vsid;
#endif
}
/*
* Copy a thread..
*/
/*
* Copy architecture-specific thread state
*/
int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
{
struct pt_regs *kregs; /* Switch frame regs */
extern void ret_from_fork(void);
extern void ret_from_fork_scv(void);
extern void ret_from_kernel_user_thread(void);
extern void start_kernel_thread(void);
void (*f)(void);
unsigned long sp = (unsigned long)task_stack_page(p) + THREAD_SIZE;
#ifdef CONFIG_HAVE_HW_BREAKPOINT
int i;
#endif
klp_init_thread_info(p);
if (unlikely(p->flags & PF_KTHREAD)) {
/* kernel thread */
/* Create initial minimum stack frame. */
sp -= STACK_FRAME_MIN_SIZE;
((unsigned long *)sp)[0] = 0;
f = start_kernel_thread;
p->thread.regs = NULL; /* no user register state */
clear_tsk_compat_task(p);
} else {
/* user thread */
struct pt_regs *childregs;
/* Create initial user return stack frame. */
sp -= STACK_USER_INT_FRAME_SIZE;
*(unsigned long *)(sp + STACK_INT_FRAME_MARKER) = STACK_FRAME_REGS_MARKER;
childregs = (struct pt_regs *)(sp + STACK_INT_FRAME_REGS);
if (unlikely(args->fn)) {
/*
* A user space thread, but it first runs a kernel
* thread, and then returns as though it had called
* execve rather than fork, so user regs will be
* filled in (e.g., by kernel_execve()).
*/
((unsigned long *)sp)[0] = 0;
memset(childregs, 0, sizeof(struct pt_regs));
#ifdef CONFIG_PPC64
childregs->softe = IRQS_ENABLED;
#endif
f = ret_from_kernel_user_thread;
} else {
struct pt_regs *regs = current_pt_regs();
unsigned long clone_flags = args->flags;
unsigned long usp = args->stack;
/* Copy registers */
*childregs = *regs;
if (usp)
childregs->gpr[1] = usp;
((unsigned long *)sp)[0] = childregs->gpr[1];
#ifdef CONFIG_PPC_IRQ_SOFT_MASK_DEBUG
WARN_ON_ONCE(childregs->softe != IRQS_ENABLED);
#endif
if (clone_flags & CLONE_SETTLS) {
unsigned long tls = args->tls;
if (!is_32bit_task())
childregs->gpr[13] = tls;
else
childregs->gpr[2] = tls;
}
if (trap_is_scv(regs))
f = ret_from_fork_scv;
else
f = ret_from_fork;
}
childregs->msr &= ~(MSR_FP|MSR_VEC|MSR_VSX);
p->thread.regs = childregs;
}
/*
* The way this works is that at some point in the future
* some task will call _switch to switch to the new task.
* That will pop off the stack frame created below and start
* the new task running at ret_from_fork. The new task will
* do some house keeping and then return from the fork or clone
* system call, using the stack frame created above.
*/
((unsigned long *)sp)[STACK_FRAME_LR_SAVE] = (unsigned long)f;
sp -= STACK_SWITCH_FRAME_SIZE;
((unsigned long *)sp)[0] = sp + STACK_SWITCH_FRAME_SIZE;
kregs = (struct pt_regs *)(sp + STACK_SWITCH_FRAME_REGS);
kregs->nip = ppc_function_entry(f);
if (unlikely(args->fn)) {
/*
* Put kthread fn, arg parameters in non-volatile GPRs in the
* switch frame so they are loaded by _switch before it returns
* to ret_from_kernel_thread.
*/
kregs->gpr[14] = ppc_function_entry((void *)args->fn);
kregs->gpr[15] = (unsigned long)args->fn_arg;
}
p->thread.ksp = sp;
#ifdef CONFIG_HAVE_HW_BREAKPOINT
for (i = 0; i < nr_wp_slots(); i++)
p->thread.ptrace_bps[i] = NULL;
#endif
#ifdef CONFIG_PPC_FPU_REGS
p->thread.fp_save_area = NULL;
#endif
#ifdef CONFIG_ALTIVEC
p->thread.vr_save_area = NULL;
#endif
#if defined(CONFIG_PPC_BOOK3S_32) && defined(CONFIG_PPC_KUAP)
p->thread.kuap = KUAP_NONE;
#endif
#if defined(CONFIG_BOOKE_OR_40x) && defined(CONFIG_PPC_KUAP)
p->thread.pid = MMU_NO_CONTEXT;
#endif
setup_ksp_vsid(p, sp);
#ifdef CONFIG_PPC64
if (cpu_has_feature(CPU_FTR_DSCR)) {
p->thread.dscr_inherit = current->thread.dscr_inherit;
p->thread.dscr = mfspr(SPRN_DSCR);
}
p->thread.tidr = 0;
#endif
#ifdef CONFIG_PPC_BOOK3S_64
if (cpu_has_feature(CPU_FTR_DEXCR_NPHIE))
p->thread.hashkeyr = current->thread.hashkeyr;
#endif
return 0;
}
void preload_new_slb_context(unsigned long start, unsigned long sp);
/*
* Set up a thread for executing a new program
*/
void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
{
#ifdef CONFIG_PPC64
unsigned long load_addr = regs->gpr[2]; /* saved by ELF_PLAT_INIT */
if (IS_ENABLED(CONFIG_PPC_BOOK3S_64) && !radix_enabled())
preload_new_slb_context(start, sp);
#endif
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
/*
* Clear any transactional state, we're exec()ing. The cause is
* not important as there will never be a recheckpoint so it's not
* user visible.
*/
if (MSR_TM_SUSPENDED(mfmsr()))
tm_reclaim_current(0);
#endif
memset(®s->gpr[1], 0, sizeof(regs->gpr) - sizeof(regs->gpr[0]));
regs->ctr = 0;
regs->link = 0;
regs->xer = 0;
regs->ccr = 0;
regs->gpr[1] = sp;
#ifdef CONFIG_PPC32
regs->mq = 0;
regs->nip = start;
regs->msr = MSR_USER;
#else
if (!is_32bit_task()) {
unsigned long entry;
if (is_elf2_task()) {
/* Look ma, no function descriptors! */
entry = start;
/*
* Ulrich says:
* The latest iteration of the ABI requires that when
* calling a function (at its global entry point),
* the caller must ensure r12 holds the entry point
* address (so that the function can quickly
* establish addressability).
*/
regs->gpr[12] = start;
/* Make sure that's restored on entry to userspace. */
set_thread_flag(TIF_RESTOREALL);
} else {
unsigned long toc;
/* start is a relocated pointer to the function
* descriptor for the elf _start routine. The first
* entry in the function descriptor is the entry
* address of _start and the second entry is the TOC
* value we need to use.
*/
__get_user(entry, (unsigned long __user *)start);
__get_user(toc, (unsigned long __user *)start+1);
/* Check whether the e_entry function descriptor entries
* need to be relocated before we can use them.
*/
if (load_addr != 0) {
entry += load_addr;
toc += load_addr;
}
regs->gpr[2] = toc;
}
regs_set_return_ip(regs, entry);
regs_set_return_msr(regs, MSR_USER64);
} else {
regs->gpr[2] = 0;
regs_set_return_ip(regs, start);
regs_set_return_msr(regs, MSR_USER32);
}
#endif
#ifdef CONFIG_VSX
current->thread.used_vsr = 0;
#endif
current->thread.load_slb = 0;
current->thread.load_fp = 0;
#ifdef CONFIG_PPC_FPU_REGS
memset(¤t->thread.fp_state, 0, sizeof(current->thread.fp_state));
current->thread.fp_save_area = NULL;
#endif
#ifdef CONFIG_ALTIVEC
memset(¤t->thread.vr_state, 0, sizeof(current->thread.vr_state));
current->thread.vr_state.vscr.u[3] = 0x00010000; /* Java mode disabled */
current->thread.vr_save_area = NULL;
current->thread.vrsave = 0;
current->thread.used_vr = 0;
current->thread.load_vec = 0;
#endif /* CONFIG_ALTIVEC */
#ifdef CONFIG_SPE
memset(current->thread.evr, 0, sizeof(current->thread.evr));
current->thread.acc = 0;
current->thread.spefscr = 0;
current->thread.used_spe = 0;
#endif /* CONFIG_SPE */
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
current->thread.tm_tfhar = 0;
current->thread.tm_texasr = 0;
current->thread.tm_tfiar = 0;
current->thread.load_tm = 0;
#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
#ifdef CONFIG_PPC_BOOK3S_64
if (cpu_has_feature(CPU_FTR_DEXCR_NPHIE)) {
current->thread.hashkeyr = get_random_long();
mtspr(SPRN_HASHKEYR, current->thread.hashkeyr);
}
#endif /* CONFIG_PPC_BOOK3S_64 */
}
EXPORT_SYMBOL(start_thread);
#define PR_FP_ALL_EXCEPT (PR_FP_EXC_DIV | PR_FP_EXC_OVF | PR_FP_EXC_UND \
| PR_FP_EXC_RES | PR_FP_EXC_INV)
int set_fpexc_mode(struct task_struct *tsk, unsigned int val)
{
struct pt_regs *regs = tsk->thread.regs;
/* This is a bit hairy. If we are an SPE enabled processor
* (have embedded fp) we store the IEEE exception enable flags in
* fpexc_mode. fpexc_mode is also used for setting FP exception
* mode (asyn, precise, disabled) for 'Classic' FP. */
if (val & PR_FP_EXC_SW_ENABLE) {
if (cpu_has_feature(CPU_FTR_SPE)) {
/*
* When the sticky exception bits are set
* directly by userspace, it must call prctl
* with PR_GET_FPEXC (with PR_FP_EXC_SW_ENABLE
* in the existing prctl settings) or
* PR_SET_FPEXC (with PR_FP_EXC_SW_ENABLE in
* the bits being set). <fenv.h> functions
* saving and restoring the whole
* floating-point environment need to do so
* anyway to restore the prctl settings from
* the saved environment.
*/
#ifdef CONFIG_SPE
tsk->thread.spefscr_last = mfspr(SPRN_SPEFSCR);
tsk->thread.fpexc_mode = val &
(PR_FP_EXC_SW_ENABLE | PR_FP_ALL_EXCEPT);
#endif
return 0;
} else {
return -EINVAL;
}
}
/* on a CONFIG_SPE this does not hurt us. The bits that
* __pack_fe01 use do not overlap with bits used for
* PR_FP_EXC_SW_ENABLE. Additionally, the MSR[FE0,FE1] bits
* on CONFIG_SPE implementations are reserved so writing to
* them does not change anything */
if (val > PR_FP_EXC_PRECISE)
return -EINVAL;
tsk->thread.fpexc_mode = __pack_fe01(val);
if (regs != NULL && (regs->msr & MSR_FP) != 0) {
regs_set_return_msr(regs, (regs->msr & ~(MSR_FE0|MSR_FE1))
| tsk->thread.fpexc_mode);
}
return 0;
}
int get_fpexc_mode(struct task_struct *tsk, unsigned long adr)
{
unsigned int val = 0;
if (tsk->thread.fpexc_mode & PR_FP_EXC_SW_ENABLE) {
if (cpu_has_feature(CPU_FTR_SPE)) {
/*
* When the sticky exception bits are set
* directly by userspace, it must call prctl
* with PR_GET_FPEXC (with PR_FP_EXC_SW_ENABLE
* in the existing prctl settings) or
* PR_SET_FPEXC (with PR_FP_EXC_SW_ENABLE in
* the bits being set). <fenv.h> functions
* saving and restoring the whole
* floating-point environment need to do so
* anyway to restore the prctl settings from
* the saved environment.
*/
#ifdef CONFIG_SPE
tsk->thread.spefscr_last = mfspr(SPRN_SPEFSCR);
val = tsk->thread.fpexc_mode;
#endif
} else
return -EINVAL;
} else {
val = __unpack_fe01(tsk->thread.fpexc_mode);
}
return put_user(val, (unsigned int __user *) adr);
}
int set_endian(struct task_struct *tsk, unsigned int val)
{
struct pt_regs *regs = tsk->thread.regs;
if ((val == PR_ENDIAN_LITTLE && !cpu_has_feature(CPU_FTR_REAL_LE)) ||
(val == PR_ENDIAN_PPC_LITTLE && !cpu_has_feature(CPU_FTR_PPC_LE)))
return -EINVAL;
if (regs == NULL)
return -EINVAL;
if (val == PR_ENDIAN_BIG)
regs_set_return_msr(regs, regs->msr & ~MSR_LE);
else if (val == PR_ENDIAN_LITTLE || val == PR_ENDIAN_PPC_LITTLE)
regs_set_return_msr(regs, regs->msr | MSR_LE);
else
return -EINVAL;
return 0;
}
int get_endian(struct task_struct *tsk, unsigned long adr)
{
struct pt_regs *regs = tsk->thread.regs;
unsigned int val;
if (!cpu_has_feature(CPU_FTR_PPC_LE) &&
!cpu_has_feature(CPU_FTR_REAL_LE))
return -EINVAL;
if (regs == NULL)
return -EINVAL;
if (regs->msr & MSR_LE) {
if (cpu_has_feature(CPU_FTR_REAL_LE))
val = PR_ENDIAN_LITTLE;
else
val = PR_ENDIAN_PPC_LITTLE;
} else
val = PR_ENDIAN_BIG;
return put_user(val, (unsigned int __user *)adr);
}
int set_unalign_ctl(struct task_struct *tsk, unsigned int val)
{
tsk->thread.align_ctl = val;
return 0;
}
int get_unalign_ctl(struct task_struct *tsk, unsigned long adr)
{
return put_user(tsk->thread.align_ctl, (unsigned int __user *)adr);
}
static inline int valid_irq_stack(unsigned long sp, struct task_struct *p,
unsigned long nbytes)
{
unsigned long stack_page;
unsigned long cpu = task_cpu(p);
if (!hardirq_ctx[cpu] || !softirq_ctx[cpu])
return 0;
stack_page = (unsigned long)hardirq_ctx[cpu];
if (sp >= stack_page && sp <= stack_page + THREAD_SIZE - nbytes)
return 1;
stack_page = (unsigned long)softirq_ctx[cpu];
if (sp >= stack_page && sp <= stack_page + THREAD_SIZE - nbytes)
return 1;
return 0;
}
static inline int valid_emergency_stack(unsigned long sp, struct task_struct *p,
unsigned long nbytes)
{
#ifdef CONFIG_PPC64
unsigned long stack_page;
unsigned long cpu = task_cpu(p);
if (!paca_ptrs)
return 0;
if (!paca_ptrs[cpu]->emergency_sp)
return 0;
# ifdef CONFIG_PPC_BOOK3S_64
if (!paca_ptrs[cpu]->nmi_emergency_sp || !paca_ptrs[cpu]->mc_emergency_sp)
return 0;
#endif
stack_page = (unsigned long)paca_ptrs[cpu]->emergency_sp - THREAD_SIZE;
if (sp >= stack_page && sp <= stack_page + THREAD_SIZE - nbytes)
return 1;
# ifdef CONFIG_PPC_BOOK3S_64
stack_page = (unsigned long)paca_ptrs[cpu]->nmi_emergency_sp - THREAD_SIZE;
if (sp >= stack_page && sp <= stack_page + THREAD_SIZE - nbytes)
return 1;
stack_page = (unsigned long)paca_ptrs[cpu]->mc_emergency_sp - THREAD_SIZE;
if (sp >= stack_page && sp <= stack_page + THREAD_SIZE - nbytes)
return 1;
# endif
#endif
return 0;
}
/*
* validate the stack frame of a particular minimum size, used for when we are
* looking at a certain object in the stack beyond the minimum.
*/
int validate_sp_size(unsigned long sp, struct task_struct *p,
unsigned long nbytes)
{
unsigned long stack_page = (unsigned long)task_stack_page(p);
if (sp < THREAD_SIZE)
return 0;
if (sp >= stack_page && sp <= stack_page + THREAD_SIZE - nbytes)
return 1;
if (valid_irq_stack(sp, p, nbytes))
return 1;
return valid_emergency_stack(sp, p, nbytes);
}
int validate_sp(unsigned long sp, struct task_struct *p)
{
return validate_sp_size(sp, p, STACK_FRAME_MIN_SIZE);
}
static unsigned long ___get_wchan(struct task_struct *p)
{
unsigned long ip, sp;
int count = 0;
sp = p->thread.ksp;
if (!validate_sp(sp, p))
return 0;
do {
sp = READ_ONCE_NOCHECK(*(unsigned long *)sp);
if (!validate_sp(sp, p) || task_is_running(p))
return 0;
if (count > 0) {
ip = READ_ONCE_NOCHECK(((unsigned long *)sp)[STACK_FRAME_LR_SAVE]);
if (!in_sched_functions(ip))
return ip;
}
} while (count++ < 16);
return 0;
}
unsigned long __get_wchan(struct task_struct *p)
{
unsigned long ret;
if (!try_get_task_stack(p))
return 0;
ret = ___get_wchan(p);
put_task_stack(p);
return ret;
}
static int kstack_depth_to_print = CONFIG_PRINT_STACK_DEPTH;
void __no_sanitize_address show_stack(struct task_struct *tsk,
unsigned long *stack,
const char *loglvl)
{
unsigned long sp, ip, lr, newsp;
int count = 0;
int firstframe = 1;
unsigned long ret_addr;
int ftrace_idx = 0;
if (tsk == NULL)
tsk = current;
if (!try_get_task_stack(tsk))
return;
sp = (unsigned long) stack;
if (sp == 0) {
if (tsk == current)
sp = current_stack_frame();
else
sp = tsk->thread.ksp;
}
lr = 0;
printk("%sCall Trace:\n", loglvl);
do {
if (!validate_sp(sp, tsk))
break;
stack = (unsigned long *) sp;
newsp = stack[0];
ip = stack[STACK_FRAME_LR_SAVE];
if (!firstframe || ip != lr) {
printk("%s["REG"] ["REG"] %pS",
loglvl, sp, ip, (void *)ip);
ret_addr = ftrace_graph_ret_addr(current,
&ftrace_idx, ip, stack);
if (ret_addr != ip)
pr_cont(" (%pS)", (void *)ret_addr);
if (firstframe)
pr_cont(" (unreliable)");
pr_cont("\n");
}
firstframe = 0;
/*
* See if this is an exception frame.
* We look for the "regs" marker in the current frame.
*
* STACK_SWITCH_FRAME_SIZE being the smallest frame that
* could hold a pt_regs, if that does not fit then it can't
* have regs.
*/
if (validate_sp_size(sp, tsk, STACK_SWITCH_FRAME_SIZE)
&& stack[STACK_INT_FRAME_MARKER_LONGS] == STACK_FRAME_REGS_MARKER) {
struct pt_regs *regs = (struct pt_regs *)
(sp + STACK_INT_FRAME_REGS);
lr = regs->link;
printk("%s--- interrupt: %lx at %pS\n",
loglvl, regs->trap, (void *)regs->nip);
__show_regs(regs);
printk("%s--- interrupt: %lx\n",
loglvl, regs->trap);
firstframe = 1;
}
sp = newsp;
} while (count++ < kstack_depth_to_print);
put_task_stack(tsk);
}
#ifdef CONFIG_PPC64
/* Called with hard IRQs off */
void notrace __ppc64_runlatch_on(void)
{
struct thread_info *ti = current_thread_info();
if (cpu_has_feature(CPU_FTR_ARCH_206)) {
/*
* Least significant bit (RUN) is the only writable bit of
* the CTRL register, so we can avoid mfspr. 2.06 is not the
* earliest ISA where this is the case, but it's convenient.
*/
mtspr(SPRN_CTRLT, CTRL_RUNLATCH);
} else {
unsigned long ctrl;
/*
* Some architectures (e.g., Cell) have writable fields other
* than RUN, so do the read-modify-write.
*/
ctrl = mfspr(SPRN_CTRLF);
ctrl |= CTRL_RUNLATCH;
mtspr(SPRN_CTRLT, ctrl);
}
ti->local_flags |= _TLF_RUNLATCH;
}
/* Called with hard IRQs off */
void notrace __ppc64_runlatch_off(void)
{
struct thread_info *ti = current_thread_info();
ti->local_flags &= ~_TLF_RUNLATCH;
if (cpu_has_feature(CPU_FTR_ARCH_206)) {
mtspr(SPRN_CTRLT, 0);
} else {
unsigned long ctrl;
ctrl = mfspr(SPRN_CTRLF);
ctrl &= ~CTRL_RUNLATCH;
mtspr(SPRN_CTRLT, ctrl);
}
}
#endif /* CONFIG_PPC64 */
unsigned long arch_align_stack(unsigned long sp)
{
if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
sp -= get_random_u32_below(PAGE_SIZE);
return sp & ~0xf;
}
| linux-master | arch/powerpc/kernel/process.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Firmware Assisted dump: A robust mechanism to get reliable kernel crash
* dump with assistance from firmware. This approach does not use kexec,
* instead firmware assists in booting the kdump kernel while preserving
* memory contents. The most of the code implementation has been adapted
* from phyp assisted dump implementation written by Linas Vepstas and
* Manish Ahuja
*
* Copyright 2011 IBM Corporation
* Author: Mahesh Salgaonkar <[email protected]>
*/
#undef DEBUG
#define pr_fmt(fmt) "fadump: " fmt
#include <linux/string.h>
#include <linux/memblock.h>
#include <linux/delay.h>
#include <linux/seq_file.h>
#include <linux/crash_dump.h>
#include <linux/kobject.h>
#include <linux/sysfs.h>
#include <linux/slab.h>
#include <linux/cma.h>
#include <linux/hugetlb.h>
#include <linux/debugfs.h>
#include <linux/of.h>
#include <linux/of_fdt.h>
#include <asm/page.h>
#include <asm/fadump.h>
#include <asm/fadump-internal.h>
#include <asm/setup.h>
#include <asm/interrupt.h>
/*
* The CPU who acquired the lock to trigger the fadump crash should
* wait for other CPUs to enter.
*
* The timeout is in milliseconds.
*/
#define CRASH_TIMEOUT 500
static struct fw_dump fw_dump;
static void __init fadump_reserve_crash_area(u64 base);
#ifndef CONFIG_PRESERVE_FA_DUMP
static struct kobject *fadump_kobj;
static atomic_t cpus_in_fadump;
static DEFINE_MUTEX(fadump_mutex);
static struct fadump_mrange_info crash_mrange_info = { "crash", NULL, 0, 0, 0, false };
#define RESERVED_RNGS_SZ 16384 /* 16K - 128 entries */
#define RESERVED_RNGS_CNT (RESERVED_RNGS_SZ / \
sizeof(struct fadump_memory_range))
static struct fadump_memory_range rngs[RESERVED_RNGS_CNT];
static struct fadump_mrange_info
reserved_mrange_info = { "reserved", rngs, RESERVED_RNGS_SZ, 0, RESERVED_RNGS_CNT, true };
static void __init early_init_dt_scan_reserved_ranges(unsigned long node);
#ifdef CONFIG_CMA
static struct cma *fadump_cma;
/*
* fadump_cma_init() - Initialize CMA area from a fadump reserved memory
*
* This function initializes CMA area from fadump reserved memory.
* The total size of fadump reserved memory covers for boot memory size
* + cpu data size + hpte size and metadata.
* Initialize only the area equivalent to boot memory size for CMA use.
* The remaining portion of fadump reserved memory will be not given
* to CMA and pages for those will stay reserved. boot memory size is
* aligned per CMA requirement to satisy cma_init_reserved_mem() call.
* But for some reason even if it fails we still have the memory reservation
* with us and we can still continue doing fadump.
*/
static int __init fadump_cma_init(void)
{
unsigned long long base, size;
int rc;
if (!fw_dump.fadump_enabled)
return 0;
/*
* Do not use CMA if user has provided fadump=nocma kernel parameter.
* Return 1 to continue with fadump old behaviour.
*/
if (fw_dump.nocma)
return 1;
base = fw_dump.reserve_dump_area_start;
size = fw_dump.boot_memory_size;
if (!size)
return 0;
rc = cma_init_reserved_mem(base, size, 0, "fadump_cma", &fadump_cma);
if (rc) {
pr_err("Failed to init cma area for firmware-assisted dump,%d\n", rc);
/*
* Though the CMA init has failed we still have memory
* reservation with us. The reserved memory will be
* blocked from production system usage. Hence return 1,
* so that we can continue with fadump.
*/
return 1;
}
/*
* If CMA activation fails, keep the pages reserved, instead of
* exposing them to buddy allocator. Same as 'fadump=nocma' case.
*/
cma_reserve_pages_on_error(fadump_cma);
/*
* So we now have successfully initialized cma area for fadump.
*/
pr_info("Initialized 0x%lx bytes cma area at %ldMB from 0x%lx "
"bytes of memory reserved for firmware-assisted dump\n",
cma_get_size(fadump_cma),
(unsigned long)cma_get_base(fadump_cma) >> 20,
fw_dump.reserve_dump_area_size);
return 1;
}
#else
static int __init fadump_cma_init(void) { return 1; }
#endif /* CONFIG_CMA */
/* Scan the Firmware Assisted dump configuration details. */
int __init early_init_dt_scan_fw_dump(unsigned long node, const char *uname,
int depth, void *data)
{
if (depth == 0) {
early_init_dt_scan_reserved_ranges(node);
return 0;
}
if (depth != 1)
return 0;
if (strcmp(uname, "rtas") == 0) {
rtas_fadump_dt_scan(&fw_dump, node);
return 1;
}
if (strcmp(uname, "ibm,opal") == 0) {
opal_fadump_dt_scan(&fw_dump, node);
return 1;
}
return 0;
}
/*
* If fadump is registered, check if the memory provided
* falls within boot memory area and reserved memory area.
*/
int is_fadump_memory_area(u64 addr, unsigned long size)
{
u64 d_start, d_end;
if (!fw_dump.dump_registered)
return 0;
if (!size)
return 0;
d_start = fw_dump.reserve_dump_area_start;
d_end = d_start + fw_dump.reserve_dump_area_size;
if (((addr + size) > d_start) && (addr <= d_end))
return 1;
return (addr <= fw_dump.boot_mem_top);
}
int should_fadump_crash(void)
{
if (!fw_dump.dump_registered || !fw_dump.fadumphdr_addr)
return 0;
return 1;
}
int is_fadump_active(void)
{
return fw_dump.dump_active;
}
/*
* Returns true, if there are no holes in memory area between d_start to d_end,
* false otherwise.
*/
static bool is_fadump_mem_area_contiguous(u64 d_start, u64 d_end)
{
phys_addr_t reg_start, reg_end;
bool ret = false;
u64 i, start, end;
for_each_mem_range(i, ®_start, ®_end) {
start = max_t(u64, d_start, reg_start);
end = min_t(u64, d_end, reg_end);
if (d_start < end) {
/* Memory hole from d_start to start */
if (start > d_start)
break;
if (end == d_end) {
ret = true;
break;
}
d_start = end + 1;
}
}
return ret;
}
/*
* Returns true, if there are no holes in boot memory area,
* false otherwise.
*/
bool is_fadump_boot_mem_contiguous(void)
{
unsigned long d_start, d_end;
bool ret = false;
int i;
for (i = 0; i < fw_dump.boot_mem_regs_cnt; i++) {
d_start = fw_dump.boot_mem_addr[i];
d_end = d_start + fw_dump.boot_mem_sz[i];
ret = is_fadump_mem_area_contiguous(d_start, d_end);
if (!ret)
break;
}
return ret;
}
/*
* Returns true, if there are no holes in reserved memory area,
* false otherwise.
*/
bool is_fadump_reserved_mem_contiguous(void)
{
u64 d_start, d_end;
d_start = fw_dump.reserve_dump_area_start;
d_end = d_start + fw_dump.reserve_dump_area_size;
return is_fadump_mem_area_contiguous(d_start, d_end);
}
/* Print firmware assisted dump configurations for debugging purpose. */
static void __init fadump_show_config(void)
{
int i;
pr_debug("Support for firmware-assisted dump (fadump): %s\n",
(fw_dump.fadump_supported ? "present" : "no support"));
if (!fw_dump.fadump_supported)
return;
pr_debug("Fadump enabled : %s\n",
(fw_dump.fadump_enabled ? "yes" : "no"));
pr_debug("Dump Active : %s\n",
(fw_dump.dump_active ? "yes" : "no"));
pr_debug("Dump section sizes:\n");
pr_debug(" CPU state data size: %lx\n", fw_dump.cpu_state_data_size);
pr_debug(" HPTE region size : %lx\n", fw_dump.hpte_region_size);
pr_debug(" Boot memory size : %lx\n", fw_dump.boot_memory_size);
pr_debug(" Boot memory top : %llx\n", fw_dump.boot_mem_top);
pr_debug("Boot memory regions cnt: %llx\n", fw_dump.boot_mem_regs_cnt);
for (i = 0; i < fw_dump.boot_mem_regs_cnt; i++) {
pr_debug("[%03d] base = %llx, size = %llx\n", i,
fw_dump.boot_mem_addr[i], fw_dump.boot_mem_sz[i]);
}
}
/**
* fadump_calculate_reserve_size(): reserve variable boot area 5% of System RAM
*
* Function to find the largest memory size we need to reserve during early
* boot process. This will be the size of the memory that is required for a
* kernel to boot successfully.
*
* This function has been taken from phyp-assisted dump feature implementation.
*
* returns larger of 256MB or 5% rounded down to multiples of 256MB.
*
* TODO: Come up with better approach to find out more accurate memory size
* that is required for a kernel to boot successfully.
*
*/
static __init u64 fadump_calculate_reserve_size(void)
{
u64 base, size, bootmem_min;
int ret;
if (fw_dump.reserve_bootvar)
pr_warn("'fadump_reserve_mem=' parameter is deprecated in favor of 'crashkernel=' parameter.\n");
/*
* Check if the size is specified through crashkernel= cmdline
* option. If yes, then use that but ignore base as fadump reserves
* memory at a predefined offset.
*/
ret = parse_crashkernel(boot_command_line, memblock_phys_mem_size(),
&size, &base);
if (ret == 0 && size > 0) {
unsigned long max_size;
if (fw_dump.reserve_bootvar)
pr_info("Using 'crashkernel=' parameter for memory reservation.\n");
fw_dump.reserve_bootvar = (unsigned long)size;
/*
* Adjust if the boot memory size specified is above
* the upper limit.
*/
max_size = memblock_phys_mem_size() / MAX_BOOT_MEM_RATIO;
if (fw_dump.reserve_bootvar > max_size) {
fw_dump.reserve_bootvar = max_size;
pr_info("Adjusted boot memory size to %luMB\n",
(fw_dump.reserve_bootvar >> 20));
}
return fw_dump.reserve_bootvar;
} else if (fw_dump.reserve_bootvar) {
/*
* 'fadump_reserve_mem=' is being used to reserve memory
* for firmware-assisted dump.
*/
return fw_dump.reserve_bootvar;
}
/* divide by 20 to get 5% of value */
size = memblock_phys_mem_size() / 20;
/* round it down in multiples of 256 */
size = size & ~0x0FFFFFFFUL;
/* Truncate to memory_limit. We don't want to over reserve the memory.*/
if (memory_limit && size > memory_limit)
size = memory_limit;
bootmem_min = fw_dump.ops->fadump_get_bootmem_min();
return (size > bootmem_min ? size : bootmem_min);
}
/*
* Calculate the total memory size required to be reserved for
* firmware-assisted dump registration.
*/
static unsigned long __init get_fadump_area_size(void)
{
unsigned long size = 0;
size += fw_dump.cpu_state_data_size;
size += fw_dump.hpte_region_size;
/*
* Account for pagesize alignment of boot memory area destination address.
* This faciliates in mmap reading of first kernel's memory.
*/
size = PAGE_ALIGN(size);
size += fw_dump.boot_memory_size;
size += sizeof(struct fadump_crash_info_header);
size += sizeof(struct elfhdr); /* ELF core header.*/
size += sizeof(struct elf_phdr); /* place holder for cpu notes */
/* Program headers for crash memory regions. */
size += sizeof(struct elf_phdr) * (memblock_num_regions(memory) + 2);
size = PAGE_ALIGN(size);
/* This is to hold kernel metadata on platforms that support it */
size += (fw_dump.ops->fadump_get_metadata_size ?
fw_dump.ops->fadump_get_metadata_size() : 0);
return size;
}
static int __init add_boot_mem_region(unsigned long rstart,
unsigned long rsize)
{
int i = fw_dump.boot_mem_regs_cnt++;
if (fw_dump.boot_mem_regs_cnt > FADUMP_MAX_MEM_REGS) {
fw_dump.boot_mem_regs_cnt = FADUMP_MAX_MEM_REGS;
return 0;
}
pr_debug("Added boot memory range[%d] [%#016lx-%#016lx)\n",
i, rstart, (rstart + rsize));
fw_dump.boot_mem_addr[i] = rstart;
fw_dump.boot_mem_sz[i] = rsize;
return 1;
}
/*
* Firmware usually has a hard limit on the data it can copy per region.
* Honour that by splitting a memory range into multiple regions.
*/
static int __init add_boot_mem_regions(unsigned long mstart,
unsigned long msize)
{
unsigned long rstart, rsize, max_size;
int ret = 1;
rstart = mstart;
max_size = fw_dump.max_copy_size ? fw_dump.max_copy_size : msize;
while (msize) {
if (msize > max_size)
rsize = max_size;
else
rsize = msize;
ret = add_boot_mem_region(rstart, rsize);
if (!ret)
break;
msize -= rsize;
rstart += rsize;
}
return ret;
}
static int __init fadump_get_boot_mem_regions(void)
{
unsigned long size, cur_size, hole_size, last_end;
unsigned long mem_size = fw_dump.boot_memory_size;
phys_addr_t reg_start, reg_end;
int ret = 1;
u64 i;
fw_dump.boot_mem_regs_cnt = 0;
last_end = 0;
hole_size = 0;
cur_size = 0;
for_each_mem_range(i, ®_start, ®_end) {
size = reg_end - reg_start;
hole_size += (reg_start - last_end);
if ((cur_size + size) >= mem_size) {
size = (mem_size - cur_size);
ret = add_boot_mem_regions(reg_start, size);
break;
}
mem_size -= size;
cur_size += size;
ret = add_boot_mem_regions(reg_start, size);
if (!ret)
break;
last_end = reg_end;
}
fw_dump.boot_mem_top = PAGE_ALIGN(fw_dump.boot_memory_size + hole_size);
return ret;
}
/*
* Returns true, if the given range overlaps with reserved memory ranges
* starting at idx. Also, updates idx to index of overlapping memory range
* with the given memory range.
* False, otherwise.
*/
static bool __init overlaps_reserved_ranges(u64 base, u64 end, int *idx)
{
bool ret = false;
int i;
for (i = *idx; i < reserved_mrange_info.mem_range_cnt; i++) {
u64 rbase = reserved_mrange_info.mem_ranges[i].base;
u64 rend = rbase + reserved_mrange_info.mem_ranges[i].size;
if (end <= rbase)
break;
if ((end > rbase) && (base < rend)) {
*idx = i;
ret = true;
break;
}
}
return ret;
}
/*
* Locate a suitable memory area to reserve memory for FADump. While at it,
* lookup reserved-ranges & avoid overlap with them, as they are used by F/W.
*/
static u64 __init fadump_locate_reserve_mem(u64 base, u64 size)
{
struct fadump_memory_range *mrngs;
phys_addr_t mstart, mend;
int idx = 0;
u64 i, ret = 0;
mrngs = reserved_mrange_info.mem_ranges;
for_each_free_mem_range(i, NUMA_NO_NODE, MEMBLOCK_NONE,
&mstart, &mend, NULL) {
pr_debug("%llu) mstart: %llx, mend: %llx, base: %llx\n",
i, mstart, mend, base);
if (mstart > base)
base = PAGE_ALIGN(mstart);
while ((mend > base) && ((mend - base) >= size)) {
if (!overlaps_reserved_ranges(base, base+size, &idx)) {
ret = base;
goto out;
}
base = mrngs[idx].base + mrngs[idx].size;
base = PAGE_ALIGN(base);
}
}
out:
return ret;
}
int __init fadump_reserve_mem(void)
{
u64 base, size, mem_boundary, bootmem_min;
int ret = 1;
if (!fw_dump.fadump_enabled)
return 0;
if (!fw_dump.fadump_supported) {
pr_info("Firmware-Assisted Dump is not supported on this hardware\n");
goto error_out;
}
/*
* Initialize boot memory size
* If dump is active then we have already calculated the size during
* first kernel.
*/
if (!fw_dump.dump_active) {
fw_dump.boot_memory_size =
PAGE_ALIGN(fadump_calculate_reserve_size());
#ifdef CONFIG_CMA
if (!fw_dump.nocma) {
fw_dump.boot_memory_size =
ALIGN(fw_dump.boot_memory_size,
CMA_MIN_ALIGNMENT_BYTES);
}
#endif
bootmem_min = fw_dump.ops->fadump_get_bootmem_min();
if (fw_dump.boot_memory_size < bootmem_min) {
pr_err("Can't enable fadump with boot memory size (0x%lx) less than 0x%llx\n",
fw_dump.boot_memory_size, bootmem_min);
goto error_out;
}
if (!fadump_get_boot_mem_regions()) {
pr_err("Too many holes in boot memory area to enable fadump\n");
goto error_out;
}
}
/*
* Calculate the memory boundary.
* If memory_limit is less than actual memory boundary then reserve
* the memory for fadump beyond the memory_limit and adjust the
* memory_limit accordingly, so that the running kernel can run with
* specified memory_limit.
*/
if (memory_limit && memory_limit < memblock_end_of_DRAM()) {
size = get_fadump_area_size();
if ((memory_limit + size) < memblock_end_of_DRAM())
memory_limit += size;
else
memory_limit = memblock_end_of_DRAM();
printk(KERN_INFO "Adjusted memory_limit for firmware-assisted"
" dump, now %#016llx\n", memory_limit);
}
if (memory_limit)
mem_boundary = memory_limit;
else
mem_boundary = memblock_end_of_DRAM();
base = fw_dump.boot_mem_top;
size = get_fadump_area_size();
fw_dump.reserve_dump_area_size = size;
if (fw_dump.dump_active) {
pr_info("Firmware-assisted dump is active.\n");
#ifdef CONFIG_HUGETLB_PAGE
/*
* FADump capture kernel doesn't care much about hugepages.
* In fact, handling hugepages in capture kernel is asking for
* trouble. So, disable HugeTLB support when fadump is active.
*/
hugetlb_disabled = true;
#endif
/*
* If last boot has crashed then reserve all the memory
* above boot memory size so that we don't touch it until
* dump is written to disk by userspace tool. This memory
* can be released for general use by invalidating fadump.
*/
fadump_reserve_crash_area(base);
pr_debug("fadumphdr_addr = %#016lx\n", fw_dump.fadumphdr_addr);
pr_debug("Reserve dump area start address: 0x%lx\n",
fw_dump.reserve_dump_area_start);
} else {
/*
* Reserve memory at an offset closer to bottom of the RAM to
* minimize the impact of memory hot-remove operation.
*/
base = fadump_locate_reserve_mem(base, size);
if (!base || (base + size > mem_boundary)) {
pr_err("Failed to find memory chunk for reservation!\n");
goto error_out;
}
fw_dump.reserve_dump_area_start = base;
/*
* Calculate the kernel metadata address and register it with
* f/w if the platform supports.
*/
if (fw_dump.ops->fadump_setup_metadata &&
(fw_dump.ops->fadump_setup_metadata(&fw_dump) < 0))
goto error_out;
if (memblock_reserve(base, size)) {
pr_err("Failed to reserve memory!\n");
goto error_out;
}
pr_info("Reserved %lldMB of memory at %#016llx (System RAM: %lldMB)\n",
(size >> 20), base, (memblock_phys_mem_size() >> 20));
ret = fadump_cma_init();
}
return ret;
error_out:
fw_dump.fadump_enabled = 0;
fw_dump.reserve_dump_area_size = 0;
return 0;
}
/* Look for fadump= cmdline option. */
static int __init early_fadump_param(char *p)
{
if (!p)
return 1;
if (strncmp(p, "on", 2) == 0)
fw_dump.fadump_enabled = 1;
else if (strncmp(p, "off", 3) == 0)
fw_dump.fadump_enabled = 0;
else if (strncmp(p, "nocma", 5) == 0) {
fw_dump.fadump_enabled = 1;
fw_dump.nocma = 1;
}
return 0;
}
early_param("fadump", early_fadump_param);
/*
* Look for fadump_reserve_mem= cmdline option
* TODO: Remove references to 'fadump_reserve_mem=' parameter,
* the sooner 'crashkernel=' parameter is accustomed to.
*/
static int __init early_fadump_reserve_mem(char *p)
{
if (p)
fw_dump.reserve_bootvar = memparse(p, &p);
return 0;
}
early_param("fadump_reserve_mem", early_fadump_reserve_mem);
void crash_fadump(struct pt_regs *regs, const char *str)
{
unsigned int msecs;
struct fadump_crash_info_header *fdh = NULL;
int old_cpu, this_cpu;
/* Do not include first CPU */
unsigned int ncpus = num_online_cpus() - 1;
if (!should_fadump_crash())
return;
/*
* old_cpu == -1 means this is the first CPU which has come here,
* go ahead and trigger fadump.
*
* old_cpu != -1 means some other CPU has already on it's way
* to trigger fadump, just keep looping here.
*/
this_cpu = smp_processor_id();
old_cpu = cmpxchg(&crashing_cpu, -1, this_cpu);
if (old_cpu != -1) {
atomic_inc(&cpus_in_fadump);
/*
* We can't loop here indefinitely. Wait as long as fadump
* is in force. If we race with fadump un-registration this
* loop will break and then we go down to normal panic path
* and reboot. If fadump is in force the first crashing
* cpu will definitely trigger fadump.
*/
while (fw_dump.dump_registered)
cpu_relax();
return;
}
fdh = __va(fw_dump.fadumphdr_addr);
fdh->crashing_cpu = crashing_cpu;
crash_save_vmcoreinfo();
if (regs)
fdh->regs = *regs;
else
ppc_save_regs(&fdh->regs);
fdh->cpu_mask = *cpu_online_mask;
/*
* If we came in via system reset, wait a while for the secondary
* CPUs to enter.
*/
if (TRAP(&(fdh->regs)) == INTERRUPT_SYSTEM_RESET) {
msecs = CRASH_TIMEOUT;
while ((atomic_read(&cpus_in_fadump) < ncpus) && (--msecs > 0))
mdelay(1);
}
fw_dump.ops->fadump_trigger(fdh, str);
}
u32 *__init fadump_regs_to_elf_notes(u32 *buf, struct pt_regs *regs)
{
struct elf_prstatus prstatus;
memset(&prstatus, 0, sizeof(prstatus));
/*
* FIXME: How do i get PID? Do I really need it?
* prstatus.pr_pid = ????
*/
elf_core_copy_regs(&prstatus.pr_reg, regs);
buf = append_elf_note(buf, CRASH_CORE_NOTE_NAME, NT_PRSTATUS,
&prstatus, sizeof(prstatus));
return buf;
}
void __init fadump_update_elfcore_header(char *bufp)
{
struct elf_phdr *phdr;
bufp += sizeof(struct elfhdr);
/* First note is a place holder for cpu notes info. */
phdr = (struct elf_phdr *)bufp;
if (phdr->p_type == PT_NOTE) {
phdr->p_paddr = __pa(fw_dump.cpu_notes_buf_vaddr);
phdr->p_offset = phdr->p_paddr;
phdr->p_filesz = fw_dump.cpu_notes_buf_size;
phdr->p_memsz = fw_dump.cpu_notes_buf_size;
}
return;
}
static void *__init fadump_alloc_buffer(unsigned long size)
{
unsigned long count, i;
struct page *page;
void *vaddr;
vaddr = alloc_pages_exact(size, GFP_KERNEL | __GFP_ZERO);
if (!vaddr)
return NULL;
count = PAGE_ALIGN(size) / PAGE_SIZE;
page = virt_to_page(vaddr);
for (i = 0; i < count; i++)
mark_page_reserved(page + i);
return vaddr;
}
static void fadump_free_buffer(unsigned long vaddr, unsigned long size)
{
free_reserved_area((void *)vaddr, (void *)(vaddr + size), -1, NULL);
}
s32 __init fadump_setup_cpu_notes_buf(u32 num_cpus)
{
/* Allocate buffer to hold cpu crash notes. */
fw_dump.cpu_notes_buf_size = num_cpus * sizeof(note_buf_t);
fw_dump.cpu_notes_buf_size = PAGE_ALIGN(fw_dump.cpu_notes_buf_size);
fw_dump.cpu_notes_buf_vaddr =
(unsigned long)fadump_alloc_buffer(fw_dump.cpu_notes_buf_size);
if (!fw_dump.cpu_notes_buf_vaddr) {
pr_err("Failed to allocate %ld bytes for CPU notes buffer\n",
fw_dump.cpu_notes_buf_size);
return -ENOMEM;
}
pr_debug("Allocated buffer for cpu notes of size %ld at 0x%lx\n",
fw_dump.cpu_notes_buf_size,
fw_dump.cpu_notes_buf_vaddr);
return 0;
}
void fadump_free_cpu_notes_buf(void)
{
if (!fw_dump.cpu_notes_buf_vaddr)
return;
fadump_free_buffer(fw_dump.cpu_notes_buf_vaddr,
fw_dump.cpu_notes_buf_size);
fw_dump.cpu_notes_buf_vaddr = 0;
fw_dump.cpu_notes_buf_size = 0;
}
static void fadump_free_mem_ranges(struct fadump_mrange_info *mrange_info)
{
if (mrange_info->is_static) {
mrange_info->mem_range_cnt = 0;
return;
}
kfree(mrange_info->mem_ranges);
memset((void *)((u64)mrange_info + RNG_NAME_SZ), 0,
(sizeof(struct fadump_mrange_info) - RNG_NAME_SZ));
}
/*
* Allocate or reallocate mem_ranges array in incremental units
* of PAGE_SIZE.
*/
static int fadump_alloc_mem_ranges(struct fadump_mrange_info *mrange_info)
{
struct fadump_memory_range *new_array;
u64 new_size;
new_size = mrange_info->mem_ranges_sz + PAGE_SIZE;
pr_debug("Allocating %llu bytes of memory for %s memory ranges\n",
new_size, mrange_info->name);
new_array = krealloc(mrange_info->mem_ranges, new_size, GFP_KERNEL);
if (new_array == NULL) {
pr_err("Insufficient memory for setting up %s memory ranges\n",
mrange_info->name);
fadump_free_mem_ranges(mrange_info);
return -ENOMEM;
}
mrange_info->mem_ranges = new_array;
mrange_info->mem_ranges_sz = new_size;
mrange_info->max_mem_ranges = (new_size /
sizeof(struct fadump_memory_range));
return 0;
}
static inline int fadump_add_mem_range(struct fadump_mrange_info *mrange_info,
u64 base, u64 end)
{
struct fadump_memory_range *mem_ranges = mrange_info->mem_ranges;
bool is_adjacent = false;
u64 start, size;
if (base == end)
return 0;
/*
* Fold adjacent memory ranges to bring down the memory ranges/
* PT_LOAD segments count.
*/
if (mrange_info->mem_range_cnt) {
start = mem_ranges[mrange_info->mem_range_cnt - 1].base;
size = mem_ranges[mrange_info->mem_range_cnt - 1].size;
/*
* Boot memory area needs separate PT_LOAD segment(s) as it
* is moved to a different location at the time of crash.
* So, fold only if the region is not boot memory area.
*/
if ((start + size) == base && start >= fw_dump.boot_mem_top)
is_adjacent = true;
}
if (!is_adjacent) {
/* resize the array on reaching the limit */
if (mrange_info->mem_range_cnt == mrange_info->max_mem_ranges) {
int ret;
if (mrange_info->is_static) {
pr_err("Reached array size limit for %s memory ranges\n",
mrange_info->name);
return -ENOSPC;
}
ret = fadump_alloc_mem_ranges(mrange_info);
if (ret)
return ret;
/* Update to the new resized array */
mem_ranges = mrange_info->mem_ranges;
}
start = base;
mem_ranges[mrange_info->mem_range_cnt].base = start;
mrange_info->mem_range_cnt++;
}
mem_ranges[mrange_info->mem_range_cnt - 1].size = (end - start);
pr_debug("%s_memory_range[%d] [%#016llx-%#016llx], %#llx bytes\n",
mrange_info->name, (mrange_info->mem_range_cnt - 1),
start, end - 1, (end - start));
return 0;
}
static int fadump_exclude_reserved_area(u64 start, u64 end)
{
u64 ra_start, ra_end;
int ret = 0;
ra_start = fw_dump.reserve_dump_area_start;
ra_end = ra_start + fw_dump.reserve_dump_area_size;
if ((ra_start < end) && (ra_end > start)) {
if ((start < ra_start) && (end > ra_end)) {
ret = fadump_add_mem_range(&crash_mrange_info,
start, ra_start);
if (ret)
return ret;
ret = fadump_add_mem_range(&crash_mrange_info,
ra_end, end);
} else if (start < ra_start) {
ret = fadump_add_mem_range(&crash_mrange_info,
start, ra_start);
} else if (ra_end < end) {
ret = fadump_add_mem_range(&crash_mrange_info,
ra_end, end);
}
} else
ret = fadump_add_mem_range(&crash_mrange_info, start, end);
return ret;
}
static int fadump_init_elfcore_header(char *bufp)
{
struct elfhdr *elf;
elf = (struct elfhdr *) bufp;
bufp += sizeof(struct elfhdr);
memcpy(elf->e_ident, ELFMAG, SELFMAG);
elf->e_ident[EI_CLASS] = ELF_CLASS;
elf->e_ident[EI_DATA] = ELF_DATA;
elf->e_ident[EI_VERSION] = EV_CURRENT;
elf->e_ident[EI_OSABI] = ELF_OSABI;
memset(elf->e_ident+EI_PAD, 0, EI_NIDENT-EI_PAD);
elf->e_type = ET_CORE;
elf->e_machine = ELF_ARCH;
elf->e_version = EV_CURRENT;
elf->e_entry = 0;
elf->e_phoff = sizeof(struct elfhdr);
elf->e_shoff = 0;
if (IS_ENABLED(CONFIG_PPC64_ELF_ABI_V2))
elf->e_flags = 2;
else if (IS_ENABLED(CONFIG_PPC64_ELF_ABI_V1))
elf->e_flags = 1;
else
elf->e_flags = 0;
elf->e_ehsize = sizeof(struct elfhdr);
elf->e_phentsize = sizeof(struct elf_phdr);
elf->e_phnum = 0;
elf->e_shentsize = 0;
elf->e_shnum = 0;
elf->e_shstrndx = 0;
return 0;
}
/*
* Traverse through memblock structure and setup crash memory ranges. These
* ranges will be used create PT_LOAD program headers in elfcore header.
*/
static int fadump_setup_crash_memory_ranges(void)
{
u64 i, start, end;
int ret;
pr_debug("Setup crash memory ranges.\n");
crash_mrange_info.mem_range_cnt = 0;
/*
* Boot memory region(s) registered with firmware are moved to
* different location at the time of crash. Create separate program
* header(s) for this memory chunk(s) with the correct offset.
*/
for (i = 0; i < fw_dump.boot_mem_regs_cnt; i++) {
start = fw_dump.boot_mem_addr[i];
end = start + fw_dump.boot_mem_sz[i];
ret = fadump_add_mem_range(&crash_mrange_info, start, end);
if (ret)
return ret;
}
for_each_mem_range(i, &start, &end) {
/*
* skip the memory chunk that is already added
* (0 through boot_memory_top).
*/
if (start < fw_dump.boot_mem_top) {
if (end > fw_dump.boot_mem_top)
start = fw_dump.boot_mem_top;
else
continue;
}
/* add this range excluding the reserved dump area. */
ret = fadump_exclude_reserved_area(start, end);
if (ret)
return ret;
}
return 0;
}
/*
* If the given physical address falls within the boot memory region then
* return the relocated address that points to the dump region reserved
* for saving initial boot memory contents.
*/
static inline unsigned long fadump_relocate(unsigned long paddr)
{
unsigned long raddr, rstart, rend, rlast, hole_size;
int i;
hole_size = 0;
rlast = 0;
raddr = paddr;
for (i = 0; i < fw_dump.boot_mem_regs_cnt; i++) {
rstart = fw_dump.boot_mem_addr[i];
rend = rstart + fw_dump.boot_mem_sz[i];
hole_size += (rstart - rlast);
if (paddr >= rstart && paddr < rend) {
raddr += fw_dump.boot_mem_dest_addr - hole_size;
break;
}
rlast = rend;
}
pr_debug("vmcoreinfo: paddr = 0x%lx, raddr = 0x%lx\n", paddr, raddr);
return raddr;
}
static int fadump_create_elfcore_headers(char *bufp)
{
unsigned long long raddr, offset;
struct elf_phdr *phdr;
struct elfhdr *elf;
int i, j;
fadump_init_elfcore_header(bufp);
elf = (struct elfhdr *)bufp;
bufp += sizeof(struct elfhdr);
/*
* setup ELF PT_NOTE, place holder for cpu notes info. The notes info
* will be populated during second kernel boot after crash. Hence
* this PT_NOTE will always be the first elf note.
*
* NOTE: Any new ELF note addition should be placed after this note.
*/
phdr = (struct elf_phdr *)bufp;
bufp += sizeof(struct elf_phdr);
phdr->p_type = PT_NOTE;
phdr->p_flags = 0;
phdr->p_vaddr = 0;
phdr->p_align = 0;
phdr->p_offset = 0;
phdr->p_paddr = 0;
phdr->p_filesz = 0;
phdr->p_memsz = 0;
(elf->e_phnum)++;
/* setup ELF PT_NOTE for vmcoreinfo */
phdr = (struct elf_phdr *)bufp;
bufp += sizeof(struct elf_phdr);
phdr->p_type = PT_NOTE;
phdr->p_flags = 0;
phdr->p_vaddr = 0;
phdr->p_align = 0;
phdr->p_paddr = fadump_relocate(paddr_vmcoreinfo_note());
phdr->p_offset = phdr->p_paddr;
phdr->p_memsz = phdr->p_filesz = VMCOREINFO_NOTE_SIZE;
/* Increment number of program headers. */
(elf->e_phnum)++;
/* setup PT_LOAD sections. */
j = 0;
offset = 0;
raddr = fw_dump.boot_mem_addr[0];
for (i = 0; i < crash_mrange_info.mem_range_cnt; i++) {
u64 mbase, msize;
mbase = crash_mrange_info.mem_ranges[i].base;
msize = crash_mrange_info.mem_ranges[i].size;
if (!msize)
continue;
phdr = (struct elf_phdr *)bufp;
bufp += sizeof(struct elf_phdr);
phdr->p_type = PT_LOAD;
phdr->p_flags = PF_R|PF_W|PF_X;
phdr->p_offset = mbase;
if (mbase == raddr) {
/*
* The entire real memory region will be moved by
* firmware to the specified destination_address.
* Hence set the correct offset.
*/
phdr->p_offset = fw_dump.boot_mem_dest_addr + offset;
if (j < (fw_dump.boot_mem_regs_cnt - 1)) {
offset += fw_dump.boot_mem_sz[j];
raddr = fw_dump.boot_mem_addr[++j];
}
}
phdr->p_paddr = mbase;
phdr->p_vaddr = (unsigned long)__va(mbase);
phdr->p_filesz = msize;
phdr->p_memsz = msize;
phdr->p_align = 0;
/* Increment number of program headers. */
(elf->e_phnum)++;
}
return 0;
}
static unsigned long init_fadump_header(unsigned long addr)
{
struct fadump_crash_info_header *fdh;
if (!addr)
return 0;
fdh = __va(addr);
addr += sizeof(struct fadump_crash_info_header);
memset(fdh, 0, sizeof(struct fadump_crash_info_header));
fdh->magic_number = FADUMP_CRASH_INFO_MAGIC;
fdh->elfcorehdr_addr = addr;
/* We will set the crashing cpu id in crash_fadump() during crash. */
fdh->crashing_cpu = FADUMP_CPU_UNKNOWN;
/*
* When LPAR is terminated by PYHP, ensure all possible CPUs'
* register data is processed while exporting the vmcore.
*/
fdh->cpu_mask = *cpu_possible_mask;
return addr;
}
static int register_fadump(void)
{
unsigned long addr;
void *vaddr;
int ret;
/*
* If no memory is reserved then we can not register for firmware-
* assisted dump.
*/
if (!fw_dump.reserve_dump_area_size)
return -ENODEV;
ret = fadump_setup_crash_memory_ranges();
if (ret)
return ret;
addr = fw_dump.fadumphdr_addr;
/* Initialize fadump crash info header. */
addr = init_fadump_header(addr);
vaddr = __va(addr);
pr_debug("Creating ELF core headers at %#016lx\n", addr);
fadump_create_elfcore_headers(vaddr);
/* register the future kernel dump with firmware. */
pr_debug("Registering for firmware-assisted kernel dump...\n");
return fw_dump.ops->fadump_register(&fw_dump);
}
void fadump_cleanup(void)
{
if (!fw_dump.fadump_supported)
return;
/* Invalidate the registration only if dump is active. */
if (fw_dump.dump_active) {
pr_debug("Invalidating firmware-assisted dump registration\n");
fw_dump.ops->fadump_invalidate(&fw_dump);
} else if (fw_dump.dump_registered) {
/* Un-register Firmware-assisted dump if it was registered. */
fw_dump.ops->fadump_unregister(&fw_dump);
fadump_free_mem_ranges(&crash_mrange_info);
}
if (fw_dump.ops->fadump_cleanup)
fw_dump.ops->fadump_cleanup(&fw_dump);
}
static void fadump_free_reserved_memory(unsigned long start_pfn,
unsigned long end_pfn)
{
unsigned long pfn;
unsigned long time_limit = jiffies + HZ;
pr_info("freeing reserved memory (0x%llx - 0x%llx)\n",
PFN_PHYS(start_pfn), PFN_PHYS(end_pfn));
for (pfn = start_pfn; pfn < end_pfn; pfn++) {
free_reserved_page(pfn_to_page(pfn));
if (time_after(jiffies, time_limit)) {
cond_resched();
time_limit = jiffies + HZ;
}
}
}
/*
* Skip memory holes and free memory that was actually reserved.
*/
static void fadump_release_reserved_area(u64 start, u64 end)
{
unsigned long reg_spfn, reg_epfn;
u64 tstart, tend, spfn, epfn;
int i;
spfn = PHYS_PFN(start);
epfn = PHYS_PFN(end);
for_each_mem_pfn_range(i, MAX_NUMNODES, ®_spfn, ®_epfn, NULL) {
tstart = max_t(u64, spfn, reg_spfn);
tend = min_t(u64, epfn, reg_epfn);
if (tstart < tend) {
fadump_free_reserved_memory(tstart, tend);
if (tend == epfn)
break;
spfn = tend;
}
}
}
/*
* Sort the mem ranges in-place and merge adjacent ranges
* to minimize the memory ranges count.
*/
static void sort_and_merge_mem_ranges(struct fadump_mrange_info *mrange_info)
{
struct fadump_memory_range *mem_ranges;
u64 base, size;
int i, j, idx;
if (!reserved_mrange_info.mem_range_cnt)
return;
/* Sort the memory ranges */
mem_ranges = mrange_info->mem_ranges;
for (i = 0; i < mrange_info->mem_range_cnt; i++) {
idx = i;
for (j = (i + 1); j < mrange_info->mem_range_cnt; j++) {
if (mem_ranges[idx].base > mem_ranges[j].base)
idx = j;
}
if (idx != i)
swap(mem_ranges[idx], mem_ranges[i]);
}
/* Merge adjacent reserved ranges */
idx = 0;
for (i = 1; i < mrange_info->mem_range_cnt; i++) {
base = mem_ranges[i-1].base;
size = mem_ranges[i-1].size;
if (mem_ranges[i].base == (base + size))
mem_ranges[idx].size += mem_ranges[i].size;
else {
idx++;
if (i == idx)
continue;
mem_ranges[idx] = mem_ranges[i];
}
}
mrange_info->mem_range_cnt = idx + 1;
}
/*
* Scan reserved-ranges to consider them while reserving/releasing
* memory for FADump.
*/
static void __init early_init_dt_scan_reserved_ranges(unsigned long node)
{
const __be32 *prop;
int len, ret = -1;
unsigned long i;
/* reserved-ranges already scanned */
if (reserved_mrange_info.mem_range_cnt != 0)
return;
prop = of_get_flat_dt_prop(node, "reserved-ranges", &len);
if (!prop)
return;
/*
* Each reserved range is an (address,size) pair, 2 cells each,
* totalling 4 cells per range.
*/
for (i = 0; i < len / (sizeof(*prop) * 4); i++) {
u64 base, size;
base = of_read_number(prop + (i * 4) + 0, 2);
size = of_read_number(prop + (i * 4) + 2, 2);
if (size) {
ret = fadump_add_mem_range(&reserved_mrange_info,
base, base + size);
if (ret < 0) {
pr_warn("some reserved ranges are ignored!\n");
break;
}
}
}
/* Compact reserved ranges */
sort_and_merge_mem_ranges(&reserved_mrange_info);
}
/*
* Release the memory that was reserved during early boot to preserve the
* crash'ed kernel's memory contents except reserved dump area (permanent
* reservation) and reserved ranges used by F/W. The released memory will
* be available for general use.
*/
static void fadump_release_memory(u64 begin, u64 end)
{
u64 ra_start, ra_end, tstart;
int i, ret;
ra_start = fw_dump.reserve_dump_area_start;
ra_end = ra_start + fw_dump.reserve_dump_area_size;
/*
* If reserved ranges array limit is hit, overwrite the last reserved
* memory range with reserved dump area to ensure it is excluded from
* the memory being released (reused for next FADump registration).
*/
if (reserved_mrange_info.mem_range_cnt ==
reserved_mrange_info.max_mem_ranges)
reserved_mrange_info.mem_range_cnt--;
ret = fadump_add_mem_range(&reserved_mrange_info, ra_start, ra_end);
if (ret != 0)
return;
/* Get the reserved ranges list in order first. */
sort_and_merge_mem_ranges(&reserved_mrange_info);
/* Exclude reserved ranges and release remaining memory */
tstart = begin;
for (i = 0; i < reserved_mrange_info.mem_range_cnt; i++) {
ra_start = reserved_mrange_info.mem_ranges[i].base;
ra_end = ra_start + reserved_mrange_info.mem_ranges[i].size;
if (tstart >= ra_end)
continue;
if (tstart < ra_start)
fadump_release_reserved_area(tstart, ra_start);
tstart = ra_end;
}
if (tstart < end)
fadump_release_reserved_area(tstart, end);
}
static void fadump_invalidate_release_mem(void)
{
mutex_lock(&fadump_mutex);
if (!fw_dump.dump_active) {
mutex_unlock(&fadump_mutex);
return;
}
fadump_cleanup();
mutex_unlock(&fadump_mutex);
fadump_release_memory(fw_dump.boot_mem_top, memblock_end_of_DRAM());
fadump_free_cpu_notes_buf();
/*
* Setup kernel metadata and initialize the kernel dump
* memory structure for FADump re-registration.
*/
if (fw_dump.ops->fadump_setup_metadata &&
(fw_dump.ops->fadump_setup_metadata(&fw_dump) < 0))
pr_warn("Failed to setup kernel metadata!\n");
fw_dump.ops->fadump_init_mem_struct(&fw_dump);
}
static ssize_t release_mem_store(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf, size_t count)
{
int input = -1;
if (!fw_dump.dump_active)
return -EPERM;
if (kstrtoint(buf, 0, &input))
return -EINVAL;
if (input == 1) {
/*
* Take away the '/proc/vmcore'. We are releasing the dump
* memory, hence it will not be valid anymore.
*/
#ifdef CONFIG_PROC_VMCORE
vmcore_cleanup();
#endif
fadump_invalidate_release_mem();
} else
return -EINVAL;
return count;
}
/* Release the reserved memory and disable the FADump */
static void __init unregister_fadump(void)
{
fadump_cleanup();
fadump_release_memory(fw_dump.reserve_dump_area_start,
fw_dump.reserve_dump_area_size);
fw_dump.fadump_enabled = 0;
kobject_put(fadump_kobj);
}
static ssize_t enabled_show(struct kobject *kobj,
struct kobj_attribute *attr,
char *buf)
{
return sprintf(buf, "%d\n", fw_dump.fadump_enabled);
}
static ssize_t mem_reserved_show(struct kobject *kobj,
struct kobj_attribute *attr,
char *buf)
{
return sprintf(buf, "%ld\n", fw_dump.reserve_dump_area_size);
}
static ssize_t registered_show(struct kobject *kobj,
struct kobj_attribute *attr,
char *buf)
{
return sprintf(buf, "%d\n", fw_dump.dump_registered);
}
static ssize_t registered_store(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf, size_t count)
{
int ret = 0;
int input = -1;
if (!fw_dump.fadump_enabled || fw_dump.dump_active)
return -EPERM;
if (kstrtoint(buf, 0, &input))
return -EINVAL;
mutex_lock(&fadump_mutex);
switch (input) {
case 0:
if (fw_dump.dump_registered == 0) {
goto unlock_out;
}
/* Un-register Firmware-assisted dump */
pr_debug("Un-register firmware-assisted dump\n");
fw_dump.ops->fadump_unregister(&fw_dump);
break;
case 1:
if (fw_dump.dump_registered == 1) {
/* Un-register Firmware-assisted dump */
fw_dump.ops->fadump_unregister(&fw_dump);
}
/* Register Firmware-assisted dump */
ret = register_fadump();
break;
default:
ret = -EINVAL;
break;
}
unlock_out:
mutex_unlock(&fadump_mutex);
return ret < 0 ? ret : count;
}
static int fadump_region_show(struct seq_file *m, void *private)
{
if (!fw_dump.fadump_enabled)
return 0;
mutex_lock(&fadump_mutex);
fw_dump.ops->fadump_region_show(&fw_dump, m);
mutex_unlock(&fadump_mutex);
return 0;
}
static struct kobj_attribute release_attr = __ATTR_WO(release_mem);
static struct kobj_attribute enable_attr = __ATTR_RO(enabled);
static struct kobj_attribute register_attr = __ATTR_RW(registered);
static struct kobj_attribute mem_reserved_attr = __ATTR_RO(mem_reserved);
static struct attribute *fadump_attrs[] = {
&enable_attr.attr,
®ister_attr.attr,
&mem_reserved_attr.attr,
NULL,
};
ATTRIBUTE_GROUPS(fadump);
DEFINE_SHOW_ATTRIBUTE(fadump_region);
static void __init fadump_init_files(void)
{
int rc = 0;
fadump_kobj = kobject_create_and_add("fadump", kernel_kobj);
if (!fadump_kobj) {
pr_err("failed to create fadump kobject\n");
return;
}
debugfs_create_file("fadump_region", 0444, arch_debugfs_dir, NULL,
&fadump_region_fops);
if (fw_dump.dump_active) {
rc = sysfs_create_file(fadump_kobj, &release_attr.attr);
if (rc)
pr_err("unable to create release_mem sysfs file (%d)\n",
rc);
}
rc = sysfs_create_groups(fadump_kobj, fadump_groups);
if (rc) {
pr_err("sysfs group creation failed (%d), unregistering FADump",
rc);
unregister_fadump();
return;
}
/*
* The FADump sysfs are moved from kernel_kobj to fadump_kobj need to
* create symlink at old location to maintain backward compatibility.
*
* - fadump_enabled -> fadump/enabled
* - fadump_registered -> fadump/registered
* - fadump_release_mem -> fadump/release_mem
*/
rc = compat_only_sysfs_link_entry_to_kobj(kernel_kobj, fadump_kobj,
"enabled", "fadump_enabled");
if (rc) {
pr_err("unable to create fadump_enabled symlink (%d)", rc);
return;
}
rc = compat_only_sysfs_link_entry_to_kobj(kernel_kobj, fadump_kobj,
"registered",
"fadump_registered");
if (rc) {
pr_err("unable to create fadump_registered symlink (%d)", rc);
sysfs_remove_link(kernel_kobj, "fadump_enabled");
return;
}
if (fw_dump.dump_active) {
rc = compat_only_sysfs_link_entry_to_kobj(kernel_kobj,
fadump_kobj,
"release_mem",
"fadump_release_mem");
if (rc)
pr_err("unable to create fadump_release_mem symlink (%d)",
rc);
}
return;
}
/*
* Prepare for firmware-assisted dump.
*/
int __init setup_fadump(void)
{
if (!fw_dump.fadump_supported)
return 0;
fadump_init_files();
fadump_show_config();
if (!fw_dump.fadump_enabled)
return 1;
/*
* If dump data is available then see if it is valid and prepare for
* saving it to the disk.
*/
if (fw_dump.dump_active) {
/*
* if dump process fails then invalidate the registration
* and release memory before proceeding for re-registration.
*/
if (fw_dump.ops->fadump_process(&fw_dump) < 0)
fadump_invalidate_release_mem();
}
/* Initialize the kernel dump memory structure and register with f/w */
else if (fw_dump.reserve_dump_area_size) {
fw_dump.ops->fadump_init_mem_struct(&fw_dump);
register_fadump();
}
/*
* In case of panic, fadump is triggered via ppc_panic_event()
* panic notifier. Setting crash_kexec_post_notifiers to 'true'
* lets panic() function take crash friendly path before panic
* notifiers are invoked.
*/
crash_kexec_post_notifiers = true;
return 1;
}
/*
* Use subsys_initcall_sync() here because there is dependency with
* crash_save_vmcoreinfo_init(), which must run first to ensure vmcoreinfo initialization
* is done before registering with f/w.
*/
subsys_initcall_sync(setup_fadump);
#else /* !CONFIG_PRESERVE_FA_DUMP */
/* Scan the Firmware Assisted dump configuration details. */
int __init early_init_dt_scan_fw_dump(unsigned long node, const char *uname,
int depth, void *data)
{
if ((depth != 1) || (strcmp(uname, "ibm,opal") != 0))
return 0;
opal_fadump_dt_scan(&fw_dump, node);
return 1;
}
/*
* When dump is active but PRESERVE_FA_DUMP is enabled on the kernel,
* preserve crash data. The subsequent memory preserving kernel boot
* is likely to process this crash data.
*/
int __init fadump_reserve_mem(void)
{
if (fw_dump.dump_active) {
/*
* If last boot has crashed then reserve all the memory
* above boot memory to preserve crash data.
*/
pr_info("Preserving crash data for processing in next boot.\n");
fadump_reserve_crash_area(fw_dump.boot_mem_top);
} else
pr_debug("FADump-aware kernel..\n");
return 1;
}
#endif /* CONFIG_PRESERVE_FA_DUMP */
/* Preserve everything above the base address */
static void __init fadump_reserve_crash_area(u64 base)
{
u64 i, mstart, mend, msize;
for_each_mem_range(i, &mstart, &mend) {
msize = mend - mstart;
if ((mstart + msize) < base)
continue;
if (mstart < base) {
msize -= (base - mstart);
mstart = base;
}
pr_info("Reserving %lluMB of memory at %#016llx for preserving crash data",
(msize >> 20), mstart);
memblock_reserve(mstart, msize);
}
}
unsigned long __init arch_reserved_kernel_pages(void)
{
return memblock_reserved_size() / PAGE_SIZE;
}
| linux-master | arch/powerpc/kernel/fadump.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2001 Anton Blanchard <[email protected]>, IBM
*
* Communication to userspace based on kernel/printk.c
*/
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/of.h>
#include <linux/poll.h>
#include <linux/proc_fs.h>
#include <linux/init.h>
#include <linux/vmalloc.h>
#include <linux/spinlock.h>
#include <linux/cpu.h>
#include <linux/workqueue.h>
#include <linux/slab.h>
#include <linux/topology.h>
#include <linux/uaccess.h>
#include <asm/io.h>
#include <asm/rtas.h>
#include <asm/nvram.h>
#include <linux/atomic.h>
#include <asm/machdep.h>
#include <asm/topology.h>
static DEFINE_SPINLOCK(rtasd_log_lock);
static DECLARE_WAIT_QUEUE_HEAD(rtas_log_wait);
static char *rtas_log_buf;
static unsigned long rtas_log_start;
static unsigned long rtas_log_size;
static int surveillance_timeout = -1;
static unsigned int rtas_error_log_max;
static unsigned int rtas_error_log_buffer_max;
/* RTAS service tokens */
static unsigned int event_scan;
static unsigned int rtas_event_scan_rate;
static bool full_rtas_msgs;
/* Stop logging to nvram after first fatal error */
static int logging_enabled; /* Until we initialize everything,
* make sure we don't try logging
* anything */
static int error_log_cnt;
/*
* Since we use 32 bit RTAS, the physical address of this must be below
* 4G or else bad things happen. Allocate this in the kernel data and
* make it big enough.
*/
static unsigned char logdata[RTAS_ERROR_LOG_MAX];
static char *rtas_type[] = {
"Unknown", "Retry", "TCE Error", "Internal Device Failure",
"Timeout", "Data Parity", "Address Parity", "Cache Parity",
"Address Invalid", "ECC Uncorrected", "ECC Corrupted",
};
static char *rtas_event_type(int type)
{
if ((type > 0) && (type < 11))
return rtas_type[type];
switch (type) {
case RTAS_TYPE_EPOW:
return "EPOW";
case RTAS_TYPE_PLATFORM:
return "Platform Error";
case RTAS_TYPE_IO:
return "I/O Event";
case RTAS_TYPE_INFO:
return "Platform Information Event";
case RTAS_TYPE_DEALLOC:
return "Resource Deallocation Event";
case RTAS_TYPE_DUMP:
return "Dump Notification Event";
case RTAS_TYPE_PRRN:
return "Platform Resource Reassignment Event";
case RTAS_TYPE_HOTPLUG:
return "Hotplug Event";
}
return rtas_type[0];
}
/* To see this info, grep RTAS /var/log/messages and each entry
* will be collected together with obvious begin/end.
* There will be a unique identifier on the begin and end lines.
* This will persist across reboots.
*
* format of error logs returned from RTAS:
* bytes (size) : contents
* --------------------------------------------------------
* 0-7 (8) : rtas_error_log
* 8-47 (40) : extended info
* 48-51 (4) : vendor id
* 52-1023 (vendor specific) : location code and debug data
*/
static void printk_log_rtas(char *buf, int len)
{
int i,j,n = 0;
int perline = 16;
char buffer[64];
char * str = "RTAS event";
if (full_rtas_msgs) {
printk(RTAS_DEBUG "%d -------- %s begin --------\n",
error_log_cnt, str);
/*
* Print perline bytes on each line, each line will start
* with RTAS and a changing number, so syslogd will
* print lines that are otherwise the same. Separate every
* 4 bytes with a space.
*/
for (i = 0; i < len; i++) {
j = i % perline;
if (j == 0) {
memset(buffer, 0, sizeof(buffer));
n = sprintf(buffer, "RTAS %d:", i/perline);
}
if ((i % 4) == 0)
n += sprintf(buffer+n, " ");
n += sprintf(buffer+n, "%02x", (unsigned char)buf[i]);
if (j == (perline-1))
printk(KERN_DEBUG "%s\n", buffer);
}
if ((i % perline) != 0)
printk(KERN_DEBUG "%s\n", buffer);
printk(RTAS_DEBUG "%d -------- %s end ----------\n",
error_log_cnt, str);
} else {
struct rtas_error_log *errlog = (struct rtas_error_log *)buf;
printk(RTAS_DEBUG "event: %d, Type: %s (%d), Severity: %d\n",
error_log_cnt,
rtas_event_type(rtas_error_type(errlog)),
rtas_error_type(errlog),
rtas_error_severity(errlog));
}
}
static int log_rtas_len(char * buf)
{
int len;
struct rtas_error_log *err;
uint32_t extended_log_length;
/* rtas fixed header */
len = 8;
err = (struct rtas_error_log *)buf;
extended_log_length = rtas_error_extended_log_length(err);
if (rtas_error_extended(err) && extended_log_length) {
/* extended header */
len += extended_log_length;
}
if (rtas_error_log_max == 0)
rtas_error_log_max = rtas_get_error_log_max();
if (len > rtas_error_log_max)
len = rtas_error_log_max;
return len;
}
/*
* First write to nvram, if fatal error, that is the only
* place we log the info. The error will be picked up
* on the next reboot by rtasd. If not fatal, run the
* method for the type of error. Currently, only RTAS
* errors have methods implemented, but in the future
* there might be a need to store data in nvram before a
* call to panic().
*
* XXX We write to nvram periodically, to indicate error has
* been written and sync'd, but there is a possibility
* that if we don't shutdown correctly, a duplicate error
* record will be created on next reboot.
*/
void pSeries_log_error(char *buf, unsigned int err_type, int fatal)
{
unsigned long offset;
unsigned long s;
int len = 0;
pr_debug("rtasd: logging event\n");
if (buf == NULL)
return;
spin_lock_irqsave(&rtasd_log_lock, s);
/* get length and increase count */
switch (err_type & ERR_TYPE_MASK) {
case ERR_TYPE_RTAS_LOG:
len = log_rtas_len(buf);
if (!(err_type & ERR_FLAG_BOOT))
error_log_cnt++;
break;
case ERR_TYPE_KERNEL_PANIC:
default:
WARN_ON_ONCE(!irqs_disabled()); /* @@@ DEBUG @@@ */
spin_unlock_irqrestore(&rtasd_log_lock, s);
return;
}
#ifdef CONFIG_PPC64
/* Write error to NVRAM */
if (logging_enabled && !(err_type & ERR_FLAG_BOOT))
nvram_write_error_log(buf, len, err_type, error_log_cnt);
#endif /* CONFIG_PPC64 */
/*
* rtas errors can occur during boot, and we do want to capture
* those somewhere, even if nvram isn't ready (why not?), and even
* if rtasd isn't ready. Put them into the boot log, at least.
*/
if ((err_type & ERR_TYPE_MASK) == ERR_TYPE_RTAS_LOG)
printk_log_rtas(buf, len);
/* Check to see if we need to or have stopped logging */
if (fatal || !logging_enabled) {
logging_enabled = 0;
WARN_ON_ONCE(!irqs_disabled()); /* @@@ DEBUG @@@ */
spin_unlock_irqrestore(&rtasd_log_lock, s);
return;
}
/* call type specific method for error */
switch (err_type & ERR_TYPE_MASK) {
case ERR_TYPE_RTAS_LOG:
offset = rtas_error_log_buffer_max *
((rtas_log_start+rtas_log_size) & LOG_NUMBER_MASK);
/* First copy over sequence number */
memcpy(&rtas_log_buf[offset], (void *) &error_log_cnt, sizeof(int));
/* Second copy over error log data */
offset += sizeof(int);
memcpy(&rtas_log_buf[offset], buf, len);
if (rtas_log_size < LOG_NUMBER)
rtas_log_size += 1;
else
rtas_log_start += 1;
WARN_ON_ONCE(!irqs_disabled()); /* @@@ DEBUG @@@ */
spin_unlock_irqrestore(&rtasd_log_lock, s);
wake_up_interruptible(&rtas_log_wait);
break;
case ERR_TYPE_KERNEL_PANIC:
default:
WARN_ON_ONCE(!irqs_disabled()); /* @@@ DEBUG @@@ */
spin_unlock_irqrestore(&rtasd_log_lock, s);
return;
}
}
static void handle_rtas_event(const struct rtas_error_log *log)
{
if (!machine_is(pseries))
return;
if (rtas_error_type(log) == RTAS_TYPE_PRRN)
pr_info_ratelimited("Platform resource reassignment ignored.\n");
}
static int rtas_log_open(struct inode * inode, struct file * file)
{
return 0;
}
static int rtas_log_release(struct inode * inode, struct file * file)
{
return 0;
}
/* This will check if all events are logged, if they are then, we
* know that we can safely clear the events in NVRAM.
* Next we'll sit and wait for something else to log.
*/
static ssize_t rtas_log_read(struct file * file, char __user * buf,
size_t count, loff_t *ppos)
{
int error;
char *tmp;
unsigned long s;
unsigned long offset;
if (!buf || count < rtas_error_log_buffer_max)
return -EINVAL;
count = rtas_error_log_buffer_max;
if (!access_ok(buf, count))
return -EFAULT;
tmp = kmalloc(count, GFP_KERNEL);
if (!tmp)
return -ENOMEM;
spin_lock_irqsave(&rtasd_log_lock, s);
/* if it's 0, then we know we got the last one (the one in NVRAM) */
while (rtas_log_size == 0) {
if (file->f_flags & O_NONBLOCK) {
spin_unlock_irqrestore(&rtasd_log_lock, s);
error = -EAGAIN;
goto out;
}
if (!logging_enabled) {
spin_unlock_irqrestore(&rtasd_log_lock, s);
error = -ENODATA;
goto out;
}
#ifdef CONFIG_PPC64
nvram_clear_error_log();
#endif /* CONFIG_PPC64 */
spin_unlock_irqrestore(&rtasd_log_lock, s);
error = wait_event_interruptible(rtas_log_wait, rtas_log_size);
if (error)
goto out;
spin_lock_irqsave(&rtasd_log_lock, s);
}
offset = rtas_error_log_buffer_max * (rtas_log_start & LOG_NUMBER_MASK);
memcpy(tmp, &rtas_log_buf[offset], count);
rtas_log_start += 1;
rtas_log_size -= 1;
spin_unlock_irqrestore(&rtasd_log_lock, s);
error = copy_to_user(buf, tmp, count) ? -EFAULT : count;
out:
kfree(tmp);
return error;
}
static __poll_t rtas_log_poll(struct file *file, poll_table * wait)
{
poll_wait(file, &rtas_log_wait, wait);
if (rtas_log_size)
return EPOLLIN | EPOLLRDNORM;
return 0;
}
static const struct proc_ops rtas_log_proc_ops = {
.proc_read = rtas_log_read,
.proc_poll = rtas_log_poll,
.proc_open = rtas_log_open,
.proc_release = rtas_log_release,
.proc_lseek = noop_llseek,
};
static int enable_surveillance(int timeout)
{
int error;
error = rtas_set_indicator(SURVEILLANCE_TOKEN, 0, timeout);
if (error == 0)
return 0;
if (error == -EINVAL) {
printk(KERN_DEBUG "rtasd: surveillance not supported\n");
return 0;
}
printk(KERN_ERR "rtasd: could not update surveillance\n");
return -1;
}
static void do_event_scan(void)
{
int error;
do {
memset(logdata, 0, rtas_error_log_max);
error = rtas_call(event_scan, 4, 1, NULL,
RTAS_EVENT_SCAN_ALL_EVENTS, 0,
__pa(logdata), rtas_error_log_max);
if (error == -1) {
printk(KERN_ERR "event-scan failed\n");
break;
}
if (error == 0) {
if (rtas_error_type((struct rtas_error_log *)logdata) !=
RTAS_TYPE_PRRN)
pSeries_log_error(logdata, ERR_TYPE_RTAS_LOG,
0);
handle_rtas_event((struct rtas_error_log *)logdata);
}
} while(error == 0);
}
static void rtas_event_scan(struct work_struct *w);
static DECLARE_DELAYED_WORK(event_scan_work, rtas_event_scan);
/*
* Delay should be at least one second since some machines have problems if
* we call event-scan too quickly.
*/
static unsigned long event_scan_delay = 1*HZ;
static int first_pass = 1;
static void rtas_event_scan(struct work_struct *w)
{
unsigned int cpu;
do_event_scan();
cpus_read_lock();
/* raw_ OK because just using CPU as starting point. */
cpu = cpumask_next(raw_smp_processor_id(), cpu_online_mask);
if (cpu >= nr_cpu_ids) {
cpu = cpumask_first(cpu_online_mask);
if (first_pass) {
first_pass = 0;
event_scan_delay = 30*HZ/rtas_event_scan_rate;
if (surveillance_timeout != -1) {
pr_debug("rtasd: enabling surveillance\n");
enable_surveillance(surveillance_timeout);
pr_debug("rtasd: surveillance enabled\n");
}
}
}
schedule_delayed_work_on(cpu, &event_scan_work,
__round_jiffies_relative(event_scan_delay, cpu));
cpus_read_unlock();
}
#ifdef CONFIG_PPC64
static void __init retrieve_nvram_error_log(void)
{
unsigned int err_type ;
int rc ;
/* See if we have any error stored in NVRAM */
memset(logdata, 0, rtas_error_log_max);
rc = nvram_read_error_log(logdata, rtas_error_log_max,
&err_type, &error_log_cnt);
/* We can use rtas_log_buf now */
logging_enabled = 1;
if (!rc) {
if (err_type != ERR_FLAG_ALREADY_LOGGED) {
pSeries_log_error(logdata, err_type | ERR_FLAG_BOOT, 0);
}
}
}
#else /* CONFIG_PPC64 */
static void __init retrieve_nvram_error_log(void)
{
}
#endif /* CONFIG_PPC64 */
static void __init start_event_scan(void)
{
printk(KERN_DEBUG "RTAS daemon started\n");
pr_debug("rtasd: will sleep for %d milliseconds\n",
(30000 / rtas_event_scan_rate));
/* Retrieve errors from nvram if any */
retrieve_nvram_error_log();
schedule_delayed_work_on(cpumask_first(cpu_online_mask),
&event_scan_work, event_scan_delay);
}
/* Cancel the rtas event scan work */
void rtas_cancel_event_scan(void)
{
cancel_delayed_work_sync(&event_scan_work);
}
EXPORT_SYMBOL_GPL(rtas_cancel_event_scan);
static int __init rtas_event_scan_init(void)
{
int err;
if (!machine_is(pseries) && !machine_is(chrp))
return 0;
/* No RTAS */
event_scan = rtas_function_token(RTAS_FN_EVENT_SCAN);
if (event_scan == RTAS_UNKNOWN_SERVICE) {
printk(KERN_INFO "rtasd: No event-scan on system\n");
return -ENODEV;
}
err = of_property_read_u32(rtas.dev, "rtas-event-scan-rate", &rtas_event_scan_rate);
if (err) {
printk(KERN_ERR "rtasd: no rtas-event-scan-rate on system\n");
return -ENODEV;
}
if (!rtas_event_scan_rate) {
/* Broken firmware: take a rate of zero to mean don't scan */
printk(KERN_DEBUG "rtasd: scan rate is 0, not scanning\n");
return 0;
}
/* Make room for the sequence number */
rtas_error_log_max = rtas_get_error_log_max();
rtas_error_log_buffer_max = rtas_error_log_max + sizeof(int);
rtas_log_buf = vmalloc(array_size(LOG_NUMBER,
rtas_error_log_buffer_max));
if (!rtas_log_buf) {
printk(KERN_ERR "rtasd: no memory\n");
return -ENOMEM;
}
start_event_scan();
return 0;
}
arch_initcall(rtas_event_scan_init);
static int __init rtas_init(void)
{
struct proc_dir_entry *entry;
if (!machine_is(pseries) && !machine_is(chrp))
return 0;
if (!rtas_log_buf)
return -ENODEV;
entry = proc_create("powerpc/rtas/error_log", 0400, NULL,
&rtas_log_proc_ops);
if (!entry)
printk(KERN_ERR "Failed to create error_log proc entry\n");
return 0;
}
__initcall(rtas_init);
static int __init surveillance_setup(char *str)
{
int i;
/* We only do surveillance on pseries */
if (!machine_is(pseries))
return 0;
if (get_option(&str,&i)) {
if (i >= 0 && i <= 255)
surveillance_timeout = i;
}
return 1;
}
__setup("surveillance=", surveillance_setup);
static int __init rtasmsgs_setup(char *str)
{
return (kstrtobool(str, &full_rtas_msgs) == 0);
}
__setup("rtasmsgs=", rtasmsgs_setup);
| linux-master | arch/powerpc/kernel/rtasd.c |
// SPDX-License-Identifier: GPL-2.0-only
#include <linux/device.h>
#include <linux/cpu.h>
#include <linux/smp.h>
#include <linux/percpu.h>
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/export.h>
#include <linux/nodemask.h>
#include <linux/cpumask.h>
#include <linux/notifier.h>
#include <linux/of.h>
#include <asm/current.h>
#include <asm/processor.h>
#include <asm/cputable.h>
#include <asm/hvcall.h>
#include <asm/machdep.h>
#include <asm/smp.h>
#include <asm/pmc.h>
#include <asm/firmware.h>
#include <asm/idle.h>
#include <asm/svm.h>
#include "cacheinfo.h"
#include "setup.h"
#ifdef CONFIG_PPC64
#include <asm/paca.h>
#include <asm/lppaca.h>
#endif
static DEFINE_PER_CPU(struct cpu, cpu_devices);
#ifdef CONFIG_PPC64
/*
* Snooze delay has not been hooked up since 3fa8cad82b94 ("powerpc/pseries/cpuidle:
* smt-snooze-delay cleanup.") and has been broken even longer. As was foretold in
* 2014:
*
* "ppc64_util currently utilises it. Once we fix ppc64_util, propose to clean
* up the kernel code."
*
* powerpc-utils stopped using it as of 1.3.8. At some point in the future this
* code should be removed.
*/
static ssize_t store_smt_snooze_delay(struct device *dev,
struct device_attribute *attr,
const char *buf,
size_t count)
{
pr_warn_once("%s (%d) stored to unsupported smt_snooze_delay, which has no effect.\n",
current->comm, current->pid);
return count;
}
static ssize_t show_smt_snooze_delay(struct device *dev,
struct device_attribute *attr,
char *buf)
{
pr_warn_once("%s (%d) read from unsupported smt_snooze_delay\n",
current->comm, current->pid);
return sprintf(buf, "100\n");
}
static DEVICE_ATTR(smt_snooze_delay, 0644, show_smt_snooze_delay,
store_smt_snooze_delay);
static int __init setup_smt_snooze_delay(char *str)
{
if (!cpu_has_feature(CPU_FTR_SMT))
return 1;
pr_warn("smt-snooze-delay command line option has no effect\n");
return 1;
}
__setup("smt-snooze-delay=", setup_smt_snooze_delay);
#endif /* CONFIG_PPC64 */
#define __SYSFS_SPRSETUP_READ_WRITE(NAME, ADDRESS, EXTRA) \
static void read_##NAME(void *val) \
{ \
*(unsigned long *)val = mfspr(ADDRESS); \
} \
static void write_##NAME(void *val) \
{ \
EXTRA; \
mtspr(ADDRESS, *(unsigned long *)val); \
}
#define __SYSFS_SPRSETUP_SHOW_STORE(NAME) \
static ssize_t show_##NAME(struct device *dev, \
struct device_attribute *attr, \
char *buf) \
{ \
struct cpu *cpu = container_of(dev, struct cpu, dev); \
unsigned long val; \
smp_call_function_single(cpu->dev.id, read_##NAME, &val, 1); \
return sprintf(buf, "%lx\n", val); \
} \
static ssize_t __used \
store_##NAME(struct device *dev, struct device_attribute *attr, \
const char *buf, size_t count) \
{ \
struct cpu *cpu = container_of(dev, struct cpu, dev); \
unsigned long val; \
int ret = sscanf(buf, "%lx", &val); \
if (ret != 1) \
return -EINVAL; \
smp_call_function_single(cpu->dev.id, write_##NAME, &val, 1); \
return count; \
}
#define SYSFS_PMCSETUP(NAME, ADDRESS) \
__SYSFS_SPRSETUP_READ_WRITE(NAME, ADDRESS, ppc_enable_pmcs()) \
__SYSFS_SPRSETUP_SHOW_STORE(NAME)
#define SYSFS_SPRSETUP(NAME, ADDRESS) \
__SYSFS_SPRSETUP_READ_WRITE(NAME, ADDRESS, ) \
__SYSFS_SPRSETUP_SHOW_STORE(NAME)
#define SYSFS_SPRSETUP_SHOW_STORE(NAME) \
__SYSFS_SPRSETUP_SHOW_STORE(NAME)
#ifdef CONFIG_PPC64
/*
* This is the system wide DSCR register default value. Any
* change to this default value through the sysfs interface
* will update all per cpu DSCR default values across the
* system stored in their respective PACA structures.
*/
static unsigned long dscr_default;
/**
* read_dscr() - Fetch the cpu specific DSCR default
* @val: Returned cpu specific DSCR default value
*
* This function returns the per cpu DSCR default value
* for any cpu which is contained in it's PACA structure.
*/
static void read_dscr(void *val)
{
*(unsigned long *)val = get_paca()->dscr_default;
}
/**
* write_dscr() - Update the cpu specific DSCR default
* @val: New cpu specific DSCR default value to update
*
* This function updates the per cpu DSCR default value
* for any cpu which is contained in it's PACA structure.
*/
static void write_dscr(void *val)
{
get_paca()->dscr_default = *(unsigned long *)val;
if (!current->thread.dscr_inherit) {
current->thread.dscr = *(unsigned long *)val;
mtspr(SPRN_DSCR, *(unsigned long *)val);
}
}
SYSFS_SPRSETUP_SHOW_STORE(dscr);
static DEVICE_ATTR(dscr, 0600, show_dscr, store_dscr);
static void add_write_permission_dev_attr(struct device_attribute *attr)
{
attr->attr.mode |= 0200;
}
/**
* show_dscr_default() - Fetch the system wide DSCR default
* @dev: Device structure
* @attr: Device attribute structure
* @buf: Interface buffer
*
* This function returns the system wide DSCR default value.
*/
static ssize_t show_dscr_default(struct device *dev,
struct device_attribute *attr, char *buf)
{
return sprintf(buf, "%lx\n", dscr_default);
}
/**
* store_dscr_default() - Update the system wide DSCR default
* @dev: Device structure
* @attr: Device attribute structure
* @buf: Interface buffer
* @count: Size of the update
*
* This function updates the system wide DSCR default value.
*/
static ssize_t __used store_dscr_default(struct device *dev,
struct device_attribute *attr, const char *buf,
size_t count)
{
unsigned long val;
int ret = 0;
ret = sscanf(buf, "%lx", &val);
if (ret != 1)
return -EINVAL;
dscr_default = val;
on_each_cpu(write_dscr, &val, 1);
return count;
}
static DEVICE_ATTR(dscr_default, 0600,
show_dscr_default, store_dscr_default);
static void __init sysfs_create_dscr_default(void)
{
if (cpu_has_feature(CPU_FTR_DSCR)) {
struct device *dev_root;
int cpu;
dscr_default = spr_default_dscr;
for_each_possible_cpu(cpu)
paca_ptrs[cpu]->dscr_default = dscr_default;
dev_root = bus_get_dev_root(&cpu_subsys);
if (dev_root) {
device_create_file(dev_root, &dev_attr_dscr_default);
put_device(dev_root);
}
}
}
#endif /* CONFIG_PPC64 */
#ifdef CONFIG_PPC_E500
#define MAX_BIT 63
static u64 pw20_wt;
static u64 altivec_idle_wt;
static unsigned int get_idle_ticks_bit(u64 ns)
{
u64 cycle;
if (ns >= 10000)
cycle = div_u64(ns + 500, 1000) * tb_ticks_per_usec;
else
cycle = div_u64(ns * tb_ticks_per_usec, 1000);
if (!cycle)
return 0;
return ilog2(cycle);
}
static void do_show_pwrmgtcr0(void *val)
{
u32 *value = val;
*value = mfspr(SPRN_PWRMGTCR0);
}
static ssize_t show_pw20_state(struct device *dev,
struct device_attribute *attr, char *buf)
{
u32 value;
unsigned int cpu = dev->id;
smp_call_function_single(cpu, do_show_pwrmgtcr0, &value, 1);
value &= PWRMGTCR0_PW20_WAIT;
return sprintf(buf, "%u\n", value ? 1 : 0);
}
static void do_store_pw20_state(void *val)
{
u32 *value = val;
u32 pw20_state;
pw20_state = mfspr(SPRN_PWRMGTCR0);
if (*value)
pw20_state |= PWRMGTCR0_PW20_WAIT;
else
pw20_state &= ~PWRMGTCR0_PW20_WAIT;
mtspr(SPRN_PWRMGTCR0, pw20_state);
}
static ssize_t store_pw20_state(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
u32 value;
unsigned int cpu = dev->id;
if (kstrtou32(buf, 0, &value))
return -EINVAL;
if (value > 1)
return -EINVAL;
smp_call_function_single(cpu, do_store_pw20_state, &value, 1);
return count;
}
static ssize_t show_pw20_wait_time(struct device *dev,
struct device_attribute *attr, char *buf)
{
u32 value;
u64 tb_cycle = 1;
u64 time;
unsigned int cpu = dev->id;
if (!pw20_wt) {
smp_call_function_single(cpu, do_show_pwrmgtcr0, &value, 1);
value = (value & PWRMGTCR0_PW20_ENT) >>
PWRMGTCR0_PW20_ENT_SHIFT;
tb_cycle = (tb_cycle << (MAX_BIT - value + 1));
/* convert ms to ns */
if (tb_ticks_per_usec > 1000) {
time = div_u64(tb_cycle, tb_ticks_per_usec / 1000);
} else {
u32 rem_us;
time = div_u64_rem(tb_cycle, tb_ticks_per_usec,
&rem_us);
time = time * 1000 + rem_us * 1000 / tb_ticks_per_usec;
}
} else {
time = pw20_wt;
}
return sprintf(buf, "%llu\n", time > 0 ? time : 0);
}
static void set_pw20_wait_entry_bit(void *val)
{
u32 *value = val;
u32 pw20_idle;
pw20_idle = mfspr(SPRN_PWRMGTCR0);
/* Set Automatic PW20 Core Idle Count */
/* clear count */
pw20_idle &= ~PWRMGTCR0_PW20_ENT;
/* set count */
pw20_idle |= ((MAX_BIT - *value) << PWRMGTCR0_PW20_ENT_SHIFT);
mtspr(SPRN_PWRMGTCR0, pw20_idle);
}
static ssize_t store_pw20_wait_time(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
u32 entry_bit;
u64 value;
unsigned int cpu = dev->id;
if (kstrtou64(buf, 0, &value))
return -EINVAL;
if (!value)
return -EINVAL;
entry_bit = get_idle_ticks_bit(value);
if (entry_bit > MAX_BIT)
return -EINVAL;
pw20_wt = value;
smp_call_function_single(cpu, set_pw20_wait_entry_bit,
&entry_bit, 1);
return count;
}
static ssize_t show_altivec_idle(struct device *dev,
struct device_attribute *attr, char *buf)
{
u32 value;
unsigned int cpu = dev->id;
smp_call_function_single(cpu, do_show_pwrmgtcr0, &value, 1);
value &= PWRMGTCR0_AV_IDLE_PD_EN;
return sprintf(buf, "%u\n", value ? 1 : 0);
}
static void do_store_altivec_idle(void *val)
{
u32 *value = val;
u32 altivec_idle;
altivec_idle = mfspr(SPRN_PWRMGTCR0);
if (*value)
altivec_idle |= PWRMGTCR0_AV_IDLE_PD_EN;
else
altivec_idle &= ~PWRMGTCR0_AV_IDLE_PD_EN;
mtspr(SPRN_PWRMGTCR0, altivec_idle);
}
static ssize_t store_altivec_idle(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
u32 value;
unsigned int cpu = dev->id;
if (kstrtou32(buf, 0, &value))
return -EINVAL;
if (value > 1)
return -EINVAL;
smp_call_function_single(cpu, do_store_altivec_idle, &value, 1);
return count;
}
static ssize_t show_altivec_idle_wait_time(struct device *dev,
struct device_attribute *attr, char *buf)
{
u32 value;
u64 tb_cycle = 1;
u64 time;
unsigned int cpu = dev->id;
if (!altivec_idle_wt) {
smp_call_function_single(cpu, do_show_pwrmgtcr0, &value, 1);
value = (value & PWRMGTCR0_AV_IDLE_CNT) >>
PWRMGTCR0_AV_IDLE_CNT_SHIFT;
tb_cycle = (tb_cycle << (MAX_BIT - value + 1));
/* convert ms to ns */
if (tb_ticks_per_usec > 1000) {
time = div_u64(tb_cycle, tb_ticks_per_usec / 1000);
} else {
u32 rem_us;
time = div_u64_rem(tb_cycle, tb_ticks_per_usec,
&rem_us);
time = time * 1000 + rem_us * 1000 / tb_ticks_per_usec;
}
} else {
time = altivec_idle_wt;
}
return sprintf(buf, "%llu\n", time > 0 ? time : 0);
}
static void set_altivec_idle_wait_entry_bit(void *val)
{
u32 *value = val;
u32 altivec_idle;
altivec_idle = mfspr(SPRN_PWRMGTCR0);
/* Set Automatic AltiVec Idle Count */
/* clear count */
altivec_idle &= ~PWRMGTCR0_AV_IDLE_CNT;
/* set count */
altivec_idle |= ((MAX_BIT - *value) << PWRMGTCR0_AV_IDLE_CNT_SHIFT);
mtspr(SPRN_PWRMGTCR0, altivec_idle);
}
static ssize_t store_altivec_idle_wait_time(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
u32 entry_bit;
u64 value;
unsigned int cpu = dev->id;
if (kstrtou64(buf, 0, &value))
return -EINVAL;
if (!value)
return -EINVAL;
entry_bit = get_idle_ticks_bit(value);
if (entry_bit > MAX_BIT)
return -EINVAL;
altivec_idle_wt = value;
smp_call_function_single(cpu, set_altivec_idle_wait_entry_bit,
&entry_bit, 1);
return count;
}
/*
* Enable/Disable interface:
* 0, disable. 1, enable.
*/
static DEVICE_ATTR(pw20_state, 0600, show_pw20_state, store_pw20_state);
static DEVICE_ATTR(altivec_idle, 0600, show_altivec_idle, store_altivec_idle);
/*
* Set wait time interface:(Nanosecond)
* Example: Base on TBfreq is 41MHZ.
* 1~48(ns): TB[63]
* 49~97(ns): TB[62]
* 98~195(ns): TB[61]
* 196~390(ns): TB[60]
* 391~780(ns): TB[59]
* 781~1560(ns): TB[58]
* ...
*/
static DEVICE_ATTR(pw20_wait_time, 0600,
show_pw20_wait_time,
store_pw20_wait_time);
static DEVICE_ATTR(altivec_idle_wait_time, 0600,
show_altivec_idle_wait_time,
store_altivec_idle_wait_time);
#endif
/*
* Enabling PMCs will slow partition context switch times so we only do
* it the first time we write to the PMCs.
*/
static DEFINE_PER_CPU(char, pmcs_enabled);
void ppc_enable_pmcs(void)
{
ppc_set_pmu_inuse(1);
/* Only need to enable them once */
if (__this_cpu_read(pmcs_enabled))
return;
__this_cpu_write(pmcs_enabled, 1);
if (ppc_md.enable_pmcs)
ppc_md.enable_pmcs();
}
EXPORT_SYMBOL(ppc_enable_pmcs);
/* Let's define all possible registers, we'll only hook up the ones
* that are implemented on the current processor
*/
#ifdef CONFIG_PMU_SYSFS
#if defined(CONFIG_PPC64) || defined(CONFIG_PPC_BOOK3S_32)
#define HAS_PPC_PMC_CLASSIC 1
#define HAS_PPC_PMC_IBM 1
#endif
#ifdef CONFIG_PPC64
#define HAS_PPC_PMC_PA6T 1
#define HAS_PPC_PMC56 1
#endif
#ifdef CONFIG_PPC_BOOK3S_32
#define HAS_PPC_PMC_G4 1
#endif
#endif /* CONFIG_PMU_SYSFS */
#if defined(CONFIG_PPC64) && defined(CONFIG_DEBUG_MISC)
#define HAS_PPC_PA6T
#endif
/*
* SPRs which are not related to PMU.
*/
#ifdef CONFIG_PPC64
SYSFS_SPRSETUP(purr, SPRN_PURR);
SYSFS_SPRSETUP(spurr, SPRN_SPURR);
SYSFS_SPRSETUP(pir, SPRN_PIR);
SYSFS_SPRSETUP(tscr, SPRN_TSCR);
/*
Lets only enable read for phyp resources and
enable write when needed with a separate function.
Lets be conservative and default to pseries.
*/
static DEVICE_ATTR(spurr, 0400, show_spurr, NULL);
static DEVICE_ATTR(purr, 0400, show_purr, store_purr);
static DEVICE_ATTR(pir, 0400, show_pir, NULL);
static DEVICE_ATTR(tscr, 0600, show_tscr, store_tscr);
#endif /* CONFIG_PPC64 */
#ifdef HAS_PPC_PMC_CLASSIC
SYSFS_PMCSETUP(mmcr0, SPRN_MMCR0);
SYSFS_PMCSETUP(mmcr1, SPRN_MMCR1);
SYSFS_PMCSETUP(pmc1, SPRN_PMC1);
SYSFS_PMCSETUP(pmc2, SPRN_PMC2);
SYSFS_PMCSETUP(pmc3, SPRN_PMC3);
SYSFS_PMCSETUP(pmc4, SPRN_PMC4);
SYSFS_PMCSETUP(pmc5, SPRN_PMC5);
SYSFS_PMCSETUP(pmc6, SPRN_PMC6);
#endif
#ifdef HAS_PPC_PMC_G4
SYSFS_PMCSETUP(mmcr2, SPRN_MMCR2);
#endif
#ifdef HAS_PPC_PMC56
SYSFS_PMCSETUP(pmc7, SPRN_PMC7);
SYSFS_PMCSETUP(pmc8, SPRN_PMC8);
SYSFS_PMCSETUP(mmcra, SPRN_MMCRA);
SYSFS_PMCSETUP(mmcr3, SPRN_MMCR3);
static DEVICE_ATTR(mmcra, 0600, show_mmcra, store_mmcra);
static DEVICE_ATTR(mmcr3, 0600, show_mmcr3, store_mmcr3);
#endif /* HAS_PPC_PMC56 */
#ifdef HAS_PPC_PMC_PA6T
SYSFS_PMCSETUP(pa6t_pmc0, SPRN_PA6T_PMC0);
SYSFS_PMCSETUP(pa6t_pmc1, SPRN_PA6T_PMC1);
SYSFS_PMCSETUP(pa6t_pmc2, SPRN_PA6T_PMC2);
SYSFS_PMCSETUP(pa6t_pmc3, SPRN_PA6T_PMC3);
SYSFS_PMCSETUP(pa6t_pmc4, SPRN_PA6T_PMC4);
SYSFS_PMCSETUP(pa6t_pmc5, SPRN_PA6T_PMC5);
#endif
#ifdef HAS_PPC_PA6T
SYSFS_SPRSETUP(hid0, SPRN_HID0);
SYSFS_SPRSETUP(hid1, SPRN_HID1);
SYSFS_SPRSETUP(hid4, SPRN_HID4);
SYSFS_SPRSETUP(hid5, SPRN_HID5);
SYSFS_SPRSETUP(ima0, SPRN_PA6T_IMA0);
SYSFS_SPRSETUP(ima1, SPRN_PA6T_IMA1);
SYSFS_SPRSETUP(ima2, SPRN_PA6T_IMA2);
SYSFS_SPRSETUP(ima3, SPRN_PA6T_IMA3);
SYSFS_SPRSETUP(ima4, SPRN_PA6T_IMA4);
SYSFS_SPRSETUP(ima5, SPRN_PA6T_IMA5);
SYSFS_SPRSETUP(ima6, SPRN_PA6T_IMA6);
SYSFS_SPRSETUP(ima7, SPRN_PA6T_IMA7);
SYSFS_SPRSETUP(ima8, SPRN_PA6T_IMA8);
SYSFS_SPRSETUP(ima9, SPRN_PA6T_IMA9);
SYSFS_SPRSETUP(imaat, SPRN_PA6T_IMAAT);
SYSFS_SPRSETUP(btcr, SPRN_PA6T_BTCR);
SYSFS_SPRSETUP(pccr, SPRN_PA6T_PCCR);
SYSFS_SPRSETUP(rpccr, SPRN_PA6T_RPCCR);
SYSFS_SPRSETUP(der, SPRN_PA6T_DER);
SYSFS_SPRSETUP(mer, SPRN_PA6T_MER);
SYSFS_SPRSETUP(ber, SPRN_PA6T_BER);
SYSFS_SPRSETUP(ier, SPRN_PA6T_IER);
SYSFS_SPRSETUP(sier, SPRN_PA6T_SIER);
SYSFS_SPRSETUP(siar, SPRN_PA6T_SIAR);
SYSFS_SPRSETUP(tsr0, SPRN_PA6T_TSR0);
SYSFS_SPRSETUP(tsr1, SPRN_PA6T_TSR1);
SYSFS_SPRSETUP(tsr2, SPRN_PA6T_TSR2);
SYSFS_SPRSETUP(tsr3, SPRN_PA6T_TSR3);
#endif /* HAS_PPC_PA6T */
#ifdef HAS_PPC_PMC_IBM
static struct device_attribute ibm_common_attrs[] = {
__ATTR(mmcr0, 0600, show_mmcr0, store_mmcr0),
__ATTR(mmcr1, 0600, show_mmcr1, store_mmcr1),
};
#endif /* HAS_PPC_PMC_IBM */
#ifdef HAS_PPC_PMC_G4
static struct device_attribute g4_common_attrs[] = {
__ATTR(mmcr0, 0600, show_mmcr0, store_mmcr0),
__ATTR(mmcr1, 0600, show_mmcr1, store_mmcr1),
__ATTR(mmcr2, 0600, show_mmcr2, store_mmcr2),
};
#endif /* HAS_PPC_PMC_G4 */
#ifdef HAS_PPC_PMC_CLASSIC
static struct device_attribute classic_pmc_attrs[] = {
__ATTR(pmc1, 0600, show_pmc1, store_pmc1),
__ATTR(pmc2, 0600, show_pmc2, store_pmc2),
__ATTR(pmc3, 0600, show_pmc3, store_pmc3),
__ATTR(pmc4, 0600, show_pmc4, store_pmc4),
__ATTR(pmc5, 0600, show_pmc5, store_pmc5),
__ATTR(pmc6, 0600, show_pmc6, store_pmc6),
#ifdef HAS_PPC_PMC56
__ATTR(pmc7, 0600, show_pmc7, store_pmc7),
__ATTR(pmc8, 0600, show_pmc8, store_pmc8),
#endif
};
#endif
#if defined(HAS_PPC_PMC_PA6T) || defined(HAS_PPC_PA6T)
static struct device_attribute pa6t_attrs[] = {
#ifdef HAS_PPC_PMC_PA6T
__ATTR(mmcr0, 0600, show_mmcr0, store_mmcr0),
__ATTR(mmcr1, 0600, show_mmcr1, store_mmcr1),
__ATTR(pmc0, 0600, show_pa6t_pmc0, store_pa6t_pmc0),
__ATTR(pmc1, 0600, show_pa6t_pmc1, store_pa6t_pmc1),
__ATTR(pmc2, 0600, show_pa6t_pmc2, store_pa6t_pmc2),
__ATTR(pmc3, 0600, show_pa6t_pmc3, store_pa6t_pmc3),
__ATTR(pmc4, 0600, show_pa6t_pmc4, store_pa6t_pmc4),
__ATTR(pmc5, 0600, show_pa6t_pmc5, store_pa6t_pmc5),
#endif
#ifdef HAS_PPC_PA6T
__ATTR(hid0, 0600, show_hid0, store_hid0),
__ATTR(hid1, 0600, show_hid1, store_hid1),
__ATTR(hid4, 0600, show_hid4, store_hid4),
__ATTR(hid5, 0600, show_hid5, store_hid5),
__ATTR(ima0, 0600, show_ima0, store_ima0),
__ATTR(ima1, 0600, show_ima1, store_ima1),
__ATTR(ima2, 0600, show_ima2, store_ima2),
__ATTR(ima3, 0600, show_ima3, store_ima3),
__ATTR(ima4, 0600, show_ima4, store_ima4),
__ATTR(ima5, 0600, show_ima5, store_ima5),
__ATTR(ima6, 0600, show_ima6, store_ima6),
__ATTR(ima7, 0600, show_ima7, store_ima7),
__ATTR(ima8, 0600, show_ima8, store_ima8),
__ATTR(ima9, 0600, show_ima9, store_ima9),
__ATTR(imaat, 0600, show_imaat, store_imaat),
__ATTR(btcr, 0600, show_btcr, store_btcr),
__ATTR(pccr, 0600, show_pccr, store_pccr),
__ATTR(rpccr, 0600, show_rpccr, store_rpccr),
__ATTR(der, 0600, show_der, store_der),
__ATTR(mer, 0600, show_mer, store_mer),
__ATTR(ber, 0600, show_ber, store_ber),
__ATTR(ier, 0600, show_ier, store_ier),
__ATTR(sier, 0600, show_sier, store_sier),
__ATTR(siar, 0600, show_siar, store_siar),
__ATTR(tsr0, 0600, show_tsr0, store_tsr0),
__ATTR(tsr1, 0600, show_tsr1, store_tsr1),
__ATTR(tsr2, 0600, show_tsr2, store_tsr2),
__ATTR(tsr3, 0600, show_tsr3, store_tsr3),
#endif /* HAS_PPC_PA6T */
};
#endif
#ifdef CONFIG_PPC_SVM
static ssize_t show_svm(struct device *dev, struct device_attribute *attr, char *buf)
{
return sprintf(buf, "%u\n", is_secure_guest());
}
static DEVICE_ATTR(svm, 0444, show_svm, NULL);
static void __init create_svm_file(void)
{
struct device *dev_root = bus_get_dev_root(&cpu_subsys);
if (dev_root) {
device_create_file(dev_root, &dev_attr_svm);
put_device(dev_root);
}
}
#else
static void __init create_svm_file(void)
{
}
#endif /* CONFIG_PPC_SVM */
#ifdef CONFIG_PPC_PSERIES
static void read_idle_purr(void *val)
{
u64 *ret = val;
*ret = read_this_idle_purr();
}
static ssize_t idle_purr_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct cpu *cpu = container_of(dev, struct cpu, dev);
u64 val;
smp_call_function_single(cpu->dev.id, read_idle_purr, &val, 1);
return sprintf(buf, "%llx\n", val);
}
static DEVICE_ATTR(idle_purr, 0400, idle_purr_show, NULL);
static void create_idle_purr_file(struct device *s)
{
if (firmware_has_feature(FW_FEATURE_LPAR))
device_create_file(s, &dev_attr_idle_purr);
}
static void remove_idle_purr_file(struct device *s)
{
if (firmware_has_feature(FW_FEATURE_LPAR))
device_remove_file(s, &dev_attr_idle_purr);
}
static void read_idle_spurr(void *val)
{
u64 *ret = val;
*ret = read_this_idle_spurr();
}
static ssize_t idle_spurr_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct cpu *cpu = container_of(dev, struct cpu, dev);
u64 val;
smp_call_function_single(cpu->dev.id, read_idle_spurr, &val, 1);
return sprintf(buf, "%llx\n", val);
}
static DEVICE_ATTR(idle_spurr, 0400, idle_spurr_show, NULL);
static void create_idle_spurr_file(struct device *s)
{
if (firmware_has_feature(FW_FEATURE_LPAR))
device_create_file(s, &dev_attr_idle_spurr);
}
static void remove_idle_spurr_file(struct device *s)
{
if (firmware_has_feature(FW_FEATURE_LPAR))
device_remove_file(s, &dev_attr_idle_spurr);
}
#else /* CONFIG_PPC_PSERIES */
#define create_idle_purr_file(s)
#define remove_idle_purr_file(s)
#define create_idle_spurr_file(s)
#define remove_idle_spurr_file(s)
#endif /* CONFIG_PPC_PSERIES */
static int register_cpu_online(unsigned int cpu)
{
struct cpu *c = &per_cpu(cpu_devices, cpu);
struct device *s = &c->dev;
struct device_attribute *attrs, *pmc_attrs;
int i, nattrs;
/* For cpus present at boot a reference was already grabbed in register_cpu() */
if (!s->of_node)
s->of_node = of_get_cpu_node(cpu, NULL);
#ifdef CONFIG_PPC64
if (cpu_has_feature(CPU_FTR_SMT))
device_create_file(s, &dev_attr_smt_snooze_delay);
#endif
/* PMC stuff */
switch (cur_cpu_spec->pmc_type) {
#ifdef HAS_PPC_PMC_IBM
case PPC_PMC_IBM:
attrs = ibm_common_attrs;
nattrs = ARRAY_SIZE(ibm_common_attrs);
pmc_attrs = classic_pmc_attrs;
break;
#endif /* HAS_PPC_PMC_IBM */
#ifdef HAS_PPC_PMC_G4
case PPC_PMC_G4:
attrs = g4_common_attrs;
nattrs = ARRAY_SIZE(g4_common_attrs);
pmc_attrs = classic_pmc_attrs;
break;
#endif /* HAS_PPC_PMC_G4 */
#if defined(HAS_PPC_PMC_PA6T) || defined(HAS_PPC_PA6T)
case PPC_PMC_PA6T:
/* PA Semi starts counting at PMC0 */
attrs = pa6t_attrs;
nattrs = ARRAY_SIZE(pa6t_attrs);
pmc_attrs = NULL;
break;
#endif
default:
attrs = NULL;
nattrs = 0;
pmc_attrs = NULL;
}
for (i = 0; i < nattrs; i++)
device_create_file(s, &attrs[i]);
if (pmc_attrs)
for (i = 0; i < cur_cpu_spec->num_pmcs; i++)
device_create_file(s, &pmc_attrs[i]);
#ifdef CONFIG_PPC64
#ifdef CONFIG_PMU_SYSFS
if (cpu_has_feature(CPU_FTR_MMCRA))
device_create_file(s, &dev_attr_mmcra);
if (cpu_has_feature(CPU_FTR_ARCH_31))
device_create_file(s, &dev_attr_mmcr3);
#endif /* CONFIG_PMU_SYSFS */
if (cpu_has_feature(CPU_FTR_PURR)) {
if (!firmware_has_feature(FW_FEATURE_LPAR))
add_write_permission_dev_attr(&dev_attr_purr);
device_create_file(s, &dev_attr_purr);
create_idle_purr_file(s);
}
if (cpu_has_feature(CPU_FTR_SPURR)) {
device_create_file(s, &dev_attr_spurr);
create_idle_spurr_file(s);
}
if (cpu_has_feature(CPU_FTR_DSCR))
device_create_file(s, &dev_attr_dscr);
if (cpu_has_feature(CPU_FTR_PPCAS_ARCH_V2))
device_create_file(s, &dev_attr_pir);
if (cpu_has_feature(CPU_FTR_ARCH_206) &&
!firmware_has_feature(FW_FEATURE_LPAR))
device_create_file(s, &dev_attr_tscr);
#endif /* CONFIG_PPC64 */
#ifdef CONFIG_PPC_E500
if (PVR_VER(cur_cpu_spec->pvr_value) == PVR_VER_E6500) {
device_create_file(s, &dev_attr_pw20_state);
device_create_file(s, &dev_attr_pw20_wait_time);
device_create_file(s, &dev_attr_altivec_idle);
device_create_file(s, &dev_attr_altivec_idle_wait_time);
}
#endif
cacheinfo_cpu_online(cpu);
return 0;
}
#ifdef CONFIG_HOTPLUG_CPU
static int unregister_cpu_online(unsigned int cpu)
{
struct cpu *c = &per_cpu(cpu_devices, cpu);
struct device *s = &c->dev;
struct device_attribute *attrs, *pmc_attrs;
int i, nattrs;
if (WARN_RATELIMIT(!c->hotpluggable, "cpu %d can't be offlined\n", cpu))
return -EBUSY;
#ifdef CONFIG_PPC64
if (cpu_has_feature(CPU_FTR_SMT))
device_remove_file(s, &dev_attr_smt_snooze_delay);
#endif
/* PMC stuff */
switch (cur_cpu_spec->pmc_type) {
#ifdef HAS_PPC_PMC_IBM
case PPC_PMC_IBM:
attrs = ibm_common_attrs;
nattrs = ARRAY_SIZE(ibm_common_attrs);
pmc_attrs = classic_pmc_attrs;
break;
#endif /* HAS_PPC_PMC_IBM */
#ifdef HAS_PPC_PMC_G4
case PPC_PMC_G4:
attrs = g4_common_attrs;
nattrs = ARRAY_SIZE(g4_common_attrs);
pmc_attrs = classic_pmc_attrs;
break;
#endif /* HAS_PPC_PMC_G4 */
#if defined(HAS_PPC_PMC_PA6T) || defined(HAS_PPC_PA6T)
case PPC_PMC_PA6T:
/* PA Semi starts counting at PMC0 */
attrs = pa6t_attrs;
nattrs = ARRAY_SIZE(pa6t_attrs);
pmc_attrs = NULL;
break;
#endif
default:
attrs = NULL;
nattrs = 0;
pmc_attrs = NULL;
}
for (i = 0; i < nattrs; i++)
device_remove_file(s, &attrs[i]);
if (pmc_attrs)
for (i = 0; i < cur_cpu_spec->num_pmcs; i++)
device_remove_file(s, &pmc_attrs[i]);
#ifdef CONFIG_PPC64
#ifdef CONFIG_PMU_SYSFS
if (cpu_has_feature(CPU_FTR_MMCRA))
device_remove_file(s, &dev_attr_mmcra);
if (cpu_has_feature(CPU_FTR_ARCH_31))
device_remove_file(s, &dev_attr_mmcr3);
#endif /* CONFIG_PMU_SYSFS */
if (cpu_has_feature(CPU_FTR_PURR)) {
device_remove_file(s, &dev_attr_purr);
remove_idle_purr_file(s);
}
if (cpu_has_feature(CPU_FTR_SPURR)) {
device_remove_file(s, &dev_attr_spurr);
remove_idle_spurr_file(s);
}
if (cpu_has_feature(CPU_FTR_DSCR))
device_remove_file(s, &dev_attr_dscr);
if (cpu_has_feature(CPU_FTR_PPCAS_ARCH_V2))
device_remove_file(s, &dev_attr_pir);
if (cpu_has_feature(CPU_FTR_ARCH_206) &&
!firmware_has_feature(FW_FEATURE_LPAR))
device_remove_file(s, &dev_attr_tscr);
#endif /* CONFIG_PPC64 */
#ifdef CONFIG_PPC_E500
if (PVR_VER(cur_cpu_spec->pvr_value) == PVR_VER_E6500) {
device_remove_file(s, &dev_attr_pw20_state);
device_remove_file(s, &dev_attr_pw20_wait_time);
device_remove_file(s, &dev_attr_altivec_idle);
device_remove_file(s, &dev_attr_altivec_idle_wait_time);
}
#endif
cacheinfo_cpu_offline(cpu);
of_node_put(s->of_node);
s->of_node = NULL;
return 0;
}
#else /* !CONFIG_HOTPLUG_CPU */
#define unregister_cpu_online NULL
#endif
#ifdef CONFIG_ARCH_CPU_PROBE_RELEASE
ssize_t arch_cpu_probe(const char *buf, size_t count)
{
if (ppc_md.cpu_probe)
return ppc_md.cpu_probe(buf, count);
return -EINVAL;
}
ssize_t arch_cpu_release(const char *buf, size_t count)
{
if (ppc_md.cpu_release)
return ppc_md.cpu_release(buf, count);
return -EINVAL;
}
#endif /* CONFIG_ARCH_CPU_PROBE_RELEASE */
static DEFINE_MUTEX(cpu_mutex);
int cpu_add_dev_attr(struct device_attribute *attr)
{
int cpu;
mutex_lock(&cpu_mutex);
for_each_possible_cpu(cpu) {
device_create_file(get_cpu_device(cpu), attr);
}
mutex_unlock(&cpu_mutex);
return 0;
}
EXPORT_SYMBOL_GPL(cpu_add_dev_attr);
int cpu_add_dev_attr_group(struct attribute_group *attrs)
{
int cpu;
struct device *dev;
int ret;
mutex_lock(&cpu_mutex);
for_each_possible_cpu(cpu) {
dev = get_cpu_device(cpu);
ret = sysfs_create_group(&dev->kobj, attrs);
WARN_ON(ret != 0);
}
mutex_unlock(&cpu_mutex);
return 0;
}
EXPORT_SYMBOL_GPL(cpu_add_dev_attr_group);
void cpu_remove_dev_attr(struct device_attribute *attr)
{
int cpu;
mutex_lock(&cpu_mutex);
for_each_possible_cpu(cpu) {
device_remove_file(get_cpu_device(cpu), attr);
}
mutex_unlock(&cpu_mutex);
}
EXPORT_SYMBOL_GPL(cpu_remove_dev_attr);
void cpu_remove_dev_attr_group(struct attribute_group *attrs)
{
int cpu;
struct device *dev;
mutex_lock(&cpu_mutex);
for_each_possible_cpu(cpu) {
dev = get_cpu_device(cpu);
sysfs_remove_group(&dev->kobj, attrs);
}
mutex_unlock(&cpu_mutex);
}
EXPORT_SYMBOL_GPL(cpu_remove_dev_attr_group);
/* NUMA stuff */
#ifdef CONFIG_NUMA
int sysfs_add_device_to_node(struct device *dev, int nid)
{
struct node *node = node_devices[nid];
return sysfs_create_link(&node->dev.kobj, &dev->kobj,
kobject_name(&dev->kobj));
}
EXPORT_SYMBOL_GPL(sysfs_add_device_to_node);
void sysfs_remove_device_from_node(struct device *dev, int nid)
{
struct node *node = node_devices[nid];
sysfs_remove_link(&node->dev.kobj, kobject_name(&dev->kobj));
}
EXPORT_SYMBOL_GPL(sysfs_remove_device_from_node);
#endif
/* Only valid if CPU is present. */
static ssize_t show_physical_id(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct cpu *cpu = container_of(dev, struct cpu, dev);
return sprintf(buf, "%d\n", get_hard_smp_processor_id(cpu->dev.id));
}
static DEVICE_ATTR(physical_id, 0444, show_physical_id, NULL);
static int __init topology_init(void)
{
int cpu, r;
for_each_possible_cpu(cpu) {
struct cpu *c = &per_cpu(cpu_devices, cpu);
#ifdef CONFIG_HOTPLUG_CPU
/*
* For now, we just see if the system supports making
* the RTAS calls for CPU hotplug. But, there may be a
* more comprehensive way to do this for an individual
* CPU. For instance, the boot cpu might never be valid
* for hotplugging.
*/
if (smp_ops && smp_ops->cpu_offline_self)
c->hotpluggable = 1;
#endif
if (cpu_online(cpu) || c->hotpluggable) {
register_cpu(c, cpu);
device_create_file(&c->dev, &dev_attr_physical_id);
}
}
r = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "powerpc/topology:online",
register_cpu_online, unregister_cpu_online);
WARN_ON(r < 0);
#ifdef CONFIG_PPC64
sysfs_create_dscr_default();
#endif /* CONFIG_PPC64 */
create_svm_file();
return 0;
}
subsys_initcall(topology_init);
| linux-master | arch/powerpc/kernel/sysfs.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2019 IBM Corporation
* Author: Nayna Jain
*/
#include <linux/types.h>
#include <linux/of.h>
#include <asm/secure_boot.h>
static struct device_node *get_ppc_fw_sb_node(void)
{
static const struct of_device_id ids[] = {
{ .compatible = "ibm,secureboot", },
{ .compatible = "ibm,secureboot-v1", },
{ .compatible = "ibm,secureboot-v2", },
{},
};
return of_find_matching_node(NULL, ids);
}
bool is_ppc_secureboot_enabled(void)
{
struct device_node *node;
bool enabled = false;
u32 secureboot;
node = get_ppc_fw_sb_node();
enabled = of_property_read_bool(node, "os-secureboot-enforcing");
of_node_put(node);
if (enabled)
goto out;
if (!of_property_read_u32(of_root, "ibm,secure-boot", &secureboot))
enabled = (secureboot > 1);
out:
pr_info("Secure boot mode %s\n", enabled ? "enabled" : "disabled");
return enabled;
}
bool is_ppc_trustedboot_enabled(void)
{
struct device_node *node;
bool enabled = false;
u32 trustedboot;
node = get_ppc_fw_sb_node();
enabled = of_property_read_bool(node, "trusted-enabled");
of_node_put(node);
if (enabled)
goto out;
if (!of_property_read_u32(of_root, "ibm,trusted-boot", &trustedboot))
enabled = (trustedboot > 0);
out:
pr_info("Trusted boot mode %s\n", enabled ? "enabled" : "disabled");
return enabled;
}
| linux-master | arch/powerpc/kernel/secure_boot.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Common prep/pmac/chrp boot and setup code.
*/
#include <linux/module.h>
#include <linux/string.h>
#include <linux/sched.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/reboot.h>
#include <linux/delay.h>
#include <linux/initrd.h>
#include <linux/tty.h>
#include <linux/seq_file.h>
#include <linux/root_dev.h>
#include <linux/cpu.h>
#include <linux/console.h>
#include <linux/memblock.h>
#include <linux/export.h>
#include <linux/nvram.h>
#include <linux/pgtable.h>
#include <linux/of_fdt.h>
#include <linux/irq.h>
#include <asm/io.h>
#include <asm/processor.h>
#include <asm/setup.h>
#include <asm/smp.h>
#include <asm/elf.h>
#include <asm/cputable.h>
#include <asm/bootx.h>
#include <asm/btext.h>
#include <asm/machdep.h>
#include <linux/uaccess.h>
#include <asm/pmac_feature.h>
#include <asm/sections.h>
#include <asm/nvram.h>
#include <asm/xmon.h>
#include <asm/time.h>
#include <asm/serial.h>
#include <asm/udbg.h>
#include <asm/code-patching.h>
#include <asm/cpu_has_feature.h>
#include <asm/asm-prototypes.h>
#include <asm/kdump.h>
#include <asm/feature-fixups.h>
#include <asm/early_ioremap.h>
#include "setup.h"
#define DBG(fmt...)
extern void bootx_init(unsigned long r4, unsigned long phys);
int boot_cpuid_phys;
EXPORT_SYMBOL_GPL(boot_cpuid_phys);
int smp_hw_index[NR_CPUS];
EXPORT_SYMBOL(smp_hw_index);
unsigned int DMA_MODE_READ;
unsigned int DMA_MODE_WRITE;
EXPORT_SYMBOL(DMA_MODE_READ);
EXPORT_SYMBOL(DMA_MODE_WRITE);
/*
* This is run before start_kernel(), the kernel has been relocated
* and we are running with enough of the MMU enabled to have our
* proper kernel virtual addresses
*
* We do the initial parsing of the flat device-tree and prepares
* for the MMU to be fully initialized.
*/
notrace void __init machine_init(u64 dt_ptr)
{
u32 *addr = (u32 *)patch_site_addr(&patch__memset_nocache);
ppc_inst_t insn;
/* Configure static keys first, now that we're relocated. */
setup_feature_keys();
early_ioremap_init();
/* Enable early debugging if any specified (see udbg.h) */
udbg_early_init();
patch_instruction_site(&patch__memcpy_nocache, ppc_inst(PPC_RAW_NOP()));
create_cond_branch(&insn, addr, branch_target(addr), 0x820000);
patch_instruction(addr, insn); /* replace b by bne cr0 */
/* Do some early initialization based on the flat device tree */
early_init_devtree(__va(dt_ptr));
early_init_mmu();
setup_kdump_trampoline();
}
/* Checks "l2cr=xxxx" command-line option */
static int __init ppc_setup_l2cr(char *str)
{
if (cpu_has_feature(CPU_FTR_L2CR)) {
unsigned long val = simple_strtoul(str, NULL, 0);
printk(KERN_INFO "l2cr set to %lx\n", val);
_set_L2CR(0); /* force invalidate by disable cache */
_set_L2CR(val); /* and enable it */
}
return 1;
}
__setup("l2cr=", ppc_setup_l2cr);
/* Checks "l3cr=xxxx" command-line option */
static int __init ppc_setup_l3cr(char *str)
{
if (cpu_has_feature(CPU_FTR_L3CR)) {
unsigned long val = simple_strtoul(str, NULL, 0);
printk(KERN_INFO "l3cr set to %lx\n", val);
_set_L3CR(val); /* and enable it */
}
return 1;
}
__setup("l3cr=", ppc_setup_l3cr);
static int __init ppc_init(void)
{
/* clear the progress line */
if (ppc_md.progress)
ppc_md.progress(" ", 0xffff);
/* call platform init */
if (ppc_md.init != NULL) {
ppc_md.init();
}
return 0;
}
arch_initcall(ppc_init);
static void *__init alloc_stack(void)
{
void *ptr = memblock_alloc(THREAD_SIZE, THREAD_ALIGN);
if (!ptr)
panic("cannot allocate %d bytes for stack at %pS\n",
THREAD_SIZE, (void *)_RET_IP_);
return ptr;
}
void __init irqstack_early_init(void)
{
unsigned int i;
if (IS_ENABLED(CONFIG_VMAP_STACK))
return;
/* interrupt stacks must be in lowmem, we get that for free on ppc32
* as the memblock is limited to lowmem by default */
for_each_possible_cpu(i) {
softirq_ctx[i] = alloc_stack();
hardirq_ctx[i] = alloc_stack();
}
}
#ifdef CONFIG_VMAP_STACK
void *emergency_ctx[NR_CPUS] __ro_after_init = {[0] = &init_stack};
void __init emergency_stack_init(void)
{
unsigned int i;
for_each_possible_cpu(i)
emergency_ctx[i] = alloc_stack();
}
#endif
#ifdef CONFIG_BOOKE_OR_40x
void __init exc_lvl_early_init(void)
{
unsigned int i, hw_cpu;
/* interrupt stacks must be in lowmem, we get that for free on ppc32
* as the memblock is limited to lowmem by MEMBLOCK_REAL_LIMIT */
for_each_possible_cpu(i) {
#ifdef CONFIG_SMP
hw_cpu = get_hard_smp_processor_id(i);
#else
hw_cpu = 0;
#endif
critirq_ctx[hw_cpu] = alloc_stack();
#ifdef CONFIG_BOOKE
dbgirq_ctx[hw_cpu] = alloc_stack();
mcheckirq_ctx[hw_cpu] = alloc_stack();
#endif
}
}
#endif
void __init setup_power_save(void)
{
#ifdef CONFIG_PPC_BOOK3S_32
if (cpu_has_feature(CPU_FTR_CAN_DOZE) ||
cpu_has_feature(CPU_FTR_CAN_NAP))
ppc_md.power_save = ppc6xx_idle;
#endif
#ifdef CONFIG_PPC_E500
if (cpu_has_feature(CPU_FTR_CAN_DOZE) ||
cpu_has_feature(CPU_FTR_CAN_NAP))
ppc_md.power_save = e500_idle;
#endif
}
__init void initialize_cache_info(void)
{
/*
* Set cache line size based on type of cpu as a default.
* Systems with OF can look in the properties on the cpu node(s)
* for a possibly more accurate value.
*/
dcache_bsize = cur_cpu_spec->dcache_bsize;
icache_bsize = cur_cpu_spec->icache_bsize;
}
| linux-master | arch/powerpc/kernel/setup_32.c |
// SPDX-License-Identifier: GPL-2.0
#undef __powerpc64__
#include <linux/audit_arch.h>
#include <asm/unistd.h>
#include "audit_32.h"
unsigned ppc32_dir_class[] = {
#include <asm-generic/audit_dir_write.h>
~0U
};
unsigned ppc32_chattr_class[] = {
#include <asm-generic/audit_change_attr.h>
~0U
};
unsigned ppc32_write_class[] = {
#include <asm-generic/audit_write.h>
~0U
};
unsigned ppc32_read_class[] = {
#include <asm-generic/audit_read.h>
~0U
};
unsigned ppc32_signal_class[] = {
#include <asm-generic/audit_signal.h>
~0U
};
int ppc32_classify_syscall(unsigned syscall)
{
switch(syscall) {
case __NR_open:
return AUDITSC_OPEN;
case __NR_openat:
return AUDITSC_OPENAT;
case __NR_socketcall:
return AUDITSC_SOCKETCALL;
case __NR_execve:
return AUDITSC_EXECVE;
case __NR_openat2:
return AUDITSC_OPENAT2;
default:
return AUDITSC_COMPAT;
}
}
| linux-master | arch/powerpc/kernel/compat_audit.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Derived from arch/i386/kernel/irq.c
* Copyright (C) 1992 Linus Torvalds
* Adapted from arch/i386 by Gary Thomas
* Copyright (C) 1995-1996 Gary Thomas ([email protected])
* Updated and modified by Cort Dougan <[email protected]>
* Copyright (C) 1996-2001 Cort Dougan
* Adapted for Power Macintosh by Paul Mackerras
* Copyright (C) 1996 Paul Mackerras ([email protected])
*
* This file contains the code used by various IRQ handling routines:
* asking for different IRQ's should be done through these routines
* instead of just grabbing them. Thus setups with different IRQ numbers
* shouldn't result in any weird surprises, and installing new handlers
* should be easier.
*
* The MPC8xx has an interrupt mask in the SIU. If a bit is set, the
* interrupt is _enabled_. As expected, IRQ0 is bit 0 in the 32-bit
* mask register (of which only 16 are defined), hence the weird shifting
* and complement of the cached_irq_mask. I want to be able to stuff
* this right into the SIU SMASK register.
* Many of the prep/chrp functions are conditional compiled on CONFIG_PPC_8xx
* to reduce code space and undefined function references.
*/
#undef DEBUG
#include <linux/export.h>
#include <linux/threads.h>
#include <linux/kernel_stat.h>
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/ptrace.h>
#include <linux/ioport.h>
#include <linux/interrupt.h>
#include <linux/timex.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/irq.h>
#include <linux/seq_file.h>
#include <linux/cpumask.h>
#include <linux/profile.h>
#include <linux/bitops.h>
#include <linux/list.h>
#include <linux/radix-tree.h>
#include <linux/mutex.h>
#include <linux/pci.h>
#include <linux/debugfs.h>
#include <linux/of.h>
#include <linux/of_irq.h>
#include <linux/vmalloc.h>
#include <linux/pgtable.h>
#include <linux/static_call.h>
#include <linux/uaccess.h>
#include <asm/interrupt.h>
#include <asm/io.h>
#include <asm/irq.h>
#include <asm/cache.h>
#include <asm/ptrace.h>
#include <asm/machdep.h>
#include <asm/udbg.h>
#include <asm/smp.h>
#include <asm/hw_irq.h>
#include <asm/softirq_stack.h>
#include <asm/ppc_asm.h>
#define CREATE_TRACE_POINTS
#include <asm/trace.h>
#include <asm/cpu_has_feature.h>
DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
EXPORT_PER_CPU_SYMBOL(irq_stat);
#ifdef CONFIG_PPC32
atomic_t ppc_n_lost_interrupts;
#ifdef CONFIG_TAU_INT
extern int tau_initialized;
u32 tau_interrupts(unsigned long cpu);
#endif
#endif /* CONFIG_PPC32 */
int arch_show_interrupts(struct seq_file *p, int prec)
{
int j;
#if defined(CONFIG_PPC32) && defined(CONFIG_TAU_INT)
if (tau_initialized) {
seq_printf(p, "%*s: ", prec, "TAU");
for_each_online_cpu(j)
seq_printf(p, "%10u ", tau_interrupts(j));
seq_puts(p, " PowerPC Thermal Assist (cpu temp)\n");
}
#endif /* CONFIG_PPC32 && CONFIG_TAU_INT */
seq_printf(p, "%*s: ", prec, "LOC");
for_each_online_cpu(j)
seq_printf(p, "%10u ", per_cpu(irq_stat, j).timer_irqs_event);
seq_printf(p, " Local timer interrupts for timer event device\n");
seq_printf(p, "%*s: ", prec, "BCT");
for_each_online_cpu(j)
seq_printf(p, "%10u ", per_cpu(irq_stat, j).broadcast_irqs_event);
seq_printf(p, " Broadcast timer interrupts for timer event device\n");
seq_printf(p, "%*s: ", prec, "LOC");
for_each_online_cpu(j)
seq_printf(p, "%10u ", per_cpu(irq_stat, j).timer_irqs_others);
seq_printf(p, " Local timer interrupts for others\n");
seq_printf(p, "%*s: ", prec, "SPU");
for_each_online_cpu(j)
seq_printf(p, "%10u ", per_cpu(irq_stat, j).spurious_irqs);
seq_printf(p, " Spurious interrupts\n");
seq_printf(p, "%*s: ", prec, "PMI");
for_each_online_cpu(j)
seq_printf(p, "%10u ", per_cpu(irq_stat, j).pmu_irqs);
seq_printf(p, " Performance monitoring interrupts\n");
seq_printf(p, "%*s: ", prec, "MCE");
for_each_online_cpu(j)
seq_printf(p, "%10u ", per_cpu(irq_stat, j).mce_exceptions);
seq_printf(p, " Machine check exceptions\n");
#ifdef CONFIG_PPC_BOOK3S_64
if (cpu_has_feature(CPU_FTR_HVMODE)) {
seq_printf(p, "%*s: ", prec, "HMI");
for_each_online_cpu(j)
seq_printf(p, "%10u ", paca_ptrs[j]->hmi_irqs);
seq_printf(p, " Hypervisor Maintenance Interrupts\n");
}
#endif
seq_printf(p, "%*s: ", prec, "NMI");
for_each_online_cpu(j)
seq_printf(p, "%10u ", per_cpu(irq_stat, j).sreset_irqs);
seq_printf(p, " System Reset interrupts\n");
#ifdef CONFIG_PPC_WATCHDOG
seq_printf(p, "%*s: ", prec, "WDG");
for_each_online_cpu(j)
seq_printf(p, "%10u ", per_cpu(irq_stat, j).soft_nmi_irqs);
seq_printf(p, " Watchdog soft-NMI interrupts\n");
#endif
#ifdef CONFIG_PPC_DOORBELL
if (cpu_has_feature(CPU_FTR_DBELL)) {
seq_printf(p, "%*s: ", prec, "DBL");
for_each_online_cpu(j)
seq_printf(p, "%10u ", per_cpu(irq_stat, j).doorbell_irqs);
seq_printf(p, " Doorbell interrupts\n");
}
#endif
return 0;
}
/*
* /proc/stat helpers
*/
u64 arch_irq_stat_cpu(unsigned int cpu)
{
u64 sum = per_cpu(irq_stat, cpu).timer_irqs_event;
sum += per_cpu(irq_stat, cpu).broadcast_irqs_event;
sum += per_cpu(irq_stat, cpu).pmu_irqs;
sum += per_cpu(irq_stat, cpu).mce_exceptions;
sum += per_cpu(irq_stat, cpu).spurious_irqs;
sum += per_cpu(irq_stat, cpu).timer_irqs_others;
#ifdef CONFIG_PPC_BOOK3S_64
sum += paca_ptrs[cpu]->hmi_irqs;
#endif
sum += per_cpu(irq_stat, cpu).sreset_irqs;
#ifdef CONFIG_PPC_WATCHDOG
sum += per_cpu(irq_stat, cpu).soft_nmi_irqs;
#endif
#ifdef CONFIG_PPC_DOORBELL
sum += per_cpu(irq_stat, cpu).doorbell_irqs;
#endif
return sum;
}
static inline void check_stack_overflow(unsigned long sp)
{
if (!IS_ENABLED(CONFIG_DEBUG_STACKOVERFLOW))
return;
sp &= THREAD_SIZE - 1;
/* check for stack overflow: is there less than 1/4th free? */
if (unlikely(sp < THREAD_SIZE / 4)) {
pr_err("do_IRQ: stack overflow: %ld\n", sp);
dump_stack();
}
}
#ifdef CONFIG_SOFTIRQ_ON_OWN_STACK
static __always_inline void call_do_softirq(const void *sp)
{
/* Temporarily switch r1 to sp, call __do_softirq() then restore r1. */
asm volatile (
PPC_STLU " %%r1, %[offset](%[sp]) ;"
"mr %%r1, %[sp] ;"
#ifdef CONFIG_PPC_KERNEL_PCREL
"bl %[callee]@notoc ;"
#else
"bl %[callee] ;"
#endif
PPC_LL " %%r1, 0(%%r1) ;"
: // Outputs
: // Inputs
[sp] "b" (sp), [offset] "i" (THREAD_SIZE - STACK_FRAME_MIN_SIZE),
[callee] "i" (__do_softirq)
: // Clobbers
"lr", "xer", "ctr", "memory", "cr0", "cr1", "cr5", "cr6",
"cr7", "r0", "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10",
"r11", "r12"
);
}
#endif
DEFINE_STATIC_CALL_RET0(ppc_get_irq, *ppc_md.get_irq);
static void __do_irq(struct pt_regs *regs, unsigned long oldsp)
{
unsigned int irq;
trace_irq_entry(regs);
check_stack_overflow(oldsp);
/*
* Query the platform PIC for the interrupt & ack it.
*
* This will typically lower the interrupt line to the CPU
*/
irq = static_call(ppc_get_irq)();
/* We can hard enable interrupts now to allow perf interrupts */
if (should_hard_irq_enable(regs))
do_hard_irq_enable();
/* And finally process it */
if (unlikely(!irq))
__this_cpu_inc(irq_stat.spurious_irqs);
else
generic_handle_irq(irq);
trace_irq_exit(regs);
}
static __always_inline void call_do_irq(struct pt_regs *regs, void *sp)
{
register unsigned long r3 asm("r3") = (unsigned long)regs;
/* Temporarily switch r1 to sp, call __do_irq() then restore r1. */
asm volatile (
PPC_STLU " %%r1, %[offset](%[sp]) ;"
"mr %%r4, %%r1 ;"
"mr %%r1, %[sp] ;"
#ifdef CONFIG_PPC_KERNEL_PCREL
"bl %[callee]@notoc ;"
#else
"bl %[callee] ;"
#endif
PPC_LL " %%r1, 0(%%r1) ;"
: // Outputs
"+r" (r3)
: // Inputs
[sp] "b" (sp), [offset] "i" (THREAD_SIZE - STACK_FRAME_MIN_SIZE),
[callee] "i" (__do_irq)
: // Clobbers
"lr", "xer", "ctr", "memory", "cr0", "cr1", "cr5", "cr6",
"cr7", "r0", "r4", "r5", "r6", "r7", "r8", "r9", "r10",
"r11", "r12"
);
}
void __do_IRQ(struct pt_regs *regs)
{
struct pt_regs *old_regs = set_irq_regs(regs);
void *cursp, *irqsp, *sirqsp;
/* Switch to the irq stack to handle this */
cursp = (void *)(current_stack_pointer & ~(THREAD_SIZE - 1));
irqsp = hardirq_ctx[raw_smp_processor_id()];
sirqsp = softirq_ctx[raw_smp_processor_id()];
/* Already there ? If not switch stack and call */
if (unlikely(cursp == irqsp || cursp == sirqsp))
__do_irq(regs, current_stack_pointer);
else
call_do_irq(regs, irqsp);
set_irq_regs(old_regs);
}
DEFINE_INTERRUPT_HANDLER_ASYNC(do_IRQ)
{
__do_IRQ(regs);
}
static void *__init alloc_vm_stack(void)
{
return __vmalloc_node(THREAD_SIZE, THREAD_ALIGN, THREADINFO_GFP,
NUMA_NO_NODE, (void *)_RET_IP_);
}
static void __init vmap_irqstack_init(void)
{
int i;
for_each_possible_cpu(i) {
softirq_ctx[i] = alloc_vm_stack();
hardirq_ctx[i] = alloc_vm_stack();
}
}
void __init init_IRQ(void)
{
if (IS_ENABLED(CONFIG_VMAP_STACK))
vmap_irqstack_init();
if (ppc_md.init_IRQ)
ppc_md.init_IRQ();
if (!WARN_ON(!ppc_md.get_irq))
static_call_update(ppc_get_irq, ppc_md.get_irq);
}
#ifdef CONFIG_BOOKE_OR_40x
void *critirq_ctx[NR_CPUS] __read_mostly;
void *dbgirq_ctx[NR_CPUS] __read_mostly;
void *mcheckirq_ctx[NR_CPUS] __read_mostly;
#endif
void *softirq_ctx[NR_CPUS] __read_mostly;
void *hardirq_ctx[NR_CPUS] __read_mostly;
#ifdef CONFIG_SOFTIRQ_ON_OWN_STACK
void do_softirq_own_stack(void)
{
call_do_softirq(softirq_ctx[smp_processor_id()]);
}
#endif
irq_hw_number_t virq_to_hw(unsigned int virq)
{
struct irq_data *irq_data = irq_get_irq_data(virq);
return WARN_ON(!irq_data) ? 0 : irq_data->hwirq;
}
EXPORT_SYMBOL_GPL(virq_to_hw);
#ifdef CONFIG_SMP
int irq_choose_cpu(const struct cpumask *mask)
{
int cpuid;
if (cpumask_equal(mask, cpu_online_mask)) {
static int irq_rover;
static DEFINE_RAW_SPINLOCK(irq_rover_lock);
unsigned long flags;
/* Round-robin distribution... */
do_round_robin:
raw_spin_lock_irqsave(&irq_rover_lock, flags);
irq_rover = cpumask_next(irq_rover, cpu_online_mask);
if (irq_rover >= nr_cpu_ids)
irq_rover = cpumask_first(cpu_online_mask);
cpuid = irq_rover;
raw_spin_unlock_irqrestore(&irq_rover_lock, flags);
} else {
cpuid = cpumask_first_and(mask, cpu_online_mask);
if (cpuid >= nr_cpu_ids)
goto do_round_robin;
}
return get_hard_smp_processor_id(cpuid);
}
#else
int irq_choose_cpu(const struct cpumask *mask)
{
return hard_smp_processor_id();
}
#endif
| linux-master | arch/powerpc/kernel/irq.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2006 Benjamin Herrenschmidt, IBM Corporation
*
* Provide default implementations of the DMA mapping callbacks for
* busses using the iommu infrastructure
*/
#include <linux/dma-direct.h>
#include <linux/pci.h>
#include <asm/iommu.h>
#ifdef CONFIG_ARCH_HAS_DMA_MAP_DIRECT
#define can_map_direct(dev, addr) \
((dev)->bus_dma_limit >= phys_to_dma((dev), (addr)))
bool arch_dma_map_page_direct(struct device *dev, phys_addr_t addr)
{
if (likely(!dev->bus_dma_limit))
return false;
return can_map_direct(dev, addr);
}
#define is_direct_handle(dev, h) ((h) >= (dev)->archdata.dma_offset)
bool arch_dma_unmap_page_direct(struct device *dev, dma_addr_t dma_handle)
{
if (likely(!dev->bus_dma_limit))
return false;
return is_direct_handle(dev, dma_handle);
}
bool arch_dma_map_sg_direct(struct device *dev, struct scatterlist *sg,
int nents)
{
struct scatterlist *s;
int i;
if (likely(!dev->bus_dma_limit))
return false;
for_each_sg(sg, s, nents, i) {
if (!can_map_direct(dev, sg_phys(s) + s->offset + s->length))
return false;
}
return true;
}
bool arch_dma_unmap_sg_direct(struct device *dev, struct scatterlist *sg,
int nents)
{
struct scatterlist *s;
int i;
if (likely(!dev->bus_dma_limit))
return false;
for_each_sg(sg, s, nents, i) {
if (!is_direct_handle(dev, s->dma_address + s->length))
return false;
}
return true;
}
#endif /* CONFIG_ARCH_HAS_DMA_MAP_DIRECT */
/*
* Generic iommu implementation
*/
/* Allocates a contiguous real buffer and creates mappings over it.
* Returns the virtual address of the buffer and sets dma_handle
* to the dma address (mapping) of the first page.
*/
static void *dma_iommu_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flag,
unsigned long attrs)
{
return iommu_alloc_coherent(dev, get_iommu_table_base(dev), size,
dma_handle, dev->coherent_dma_mask, flag,
dev_to_node(dev));
}
static void dma_iommu_free_coherent(struct device *dev, size_t size,
void *vaddr, dma_addr_t dma_handle,
unsigned long attrs)
{
iommu_free_coherent(get_iommu_table_base(dev), size, vaddr, dma_handle);
}
/* Creates TCEs for a user provided buffer. The user buffer must be
* contiguous real kernel storage (not vmalloc). The address passed here
* comprises a page address and offset into that page. The dma_addr_t
* returned will point to the same byte within the page as was passed in.
*/
static dma_addr_t dma_iommu_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size,
enum dma_data_direction direction,
unsigned long attrs)
{
return iommu_map_page(dev, get_iommu_table_base(dev), page, offset,
size, dma_get_mask(dev), direction, attrs);
}
static void dma_iommu_unmap_page(struct device *dev, dma_addr_t dma_handle,
size_t size, enum dma_data_direction direction,
unsigned long attrs)
{
iommu_unmap_page(get_iommu_table_base(dev), dma_handle, size, direction,
attrs);
}
static int dma_iommu_map_sg(struct device *dev, struct scatterlist *sglist,
int nelems, enum dma_data_direction direction,
unsigned long attrs)
{
return ppc_iommu_map_sg(dev, get_iommu_table_base(dev), sglist, nelems,
dma_get_mask(dev), direction, attrs);
}
static void dma_iommu_unmap_sg(struct device *dev, struct scatterlist *sglist,
int nelems, enum dma_data_direction direction,
unsigned long attrs)
{
ppc_iommu_unmap_sg(get_iommu_table_base(dev), sglist, nelems,
direction, attrs);
}
static bool dma_iommu_bypass_supported(struct device *dev, u64 mask)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct pci_controller *phb = pci_bus_to_host(pdev->bus);
if (iommu_fixed_is_weak || !phb->controller_ops.iommu_bypass_supported)
return false;
return phb->controller_ops.iommu_bypass_supported(pdev, mask);
}
/* We support DMA to/from any memory page via the iommu */
int dma_iommu_dma_supported(struct device *dev, u64 mask)
{
struct iommu_table *tbl;
if (dev_is_pci(dev) && dma_iommu_bypass_supported(dev, mask)) {
/*
* dma_iommu_bypass_supported() sets dma_max when there is
* 1:1 mapping but it is somehow limited.
* ibm,pmemory is one example.
*/
dev->dma_ops_bypass = dev->bus_dma_limit == 0;
if (!dev->dma_ops_bypass)
dev_warn(dev,
"iommu: 64-bit OK but direct DMA is limited by %llx\n",
dev->bus_dma_limit);
else
dev_dbg(dev, "iommu: 64-bit OK, using fixed ops\n");
return 1;
}
tbl = get_iommu_table_base(dev);
if (!tbl) {
dev_err(dev, "Warning: IOMMU dma not supported: mask 0x%08llx, table unavailable\n", mask);
return 0;
}
if (tbl->it_offset > (mask >> tbl->it_page_shift)) {
dev_info(dev, "Warning: IOMMU offset too big for device mask\n");
dev_info(dev, "mask: 0x%08llx, table offset: 0x%08lx\n",
mask, tbl->it_offset << tbl->it_page_shift);
return 0;
}
dev_dbg(dev, "iommu: not 64-bit, using default ops\n");
dev->dma_ops_bypass = false;
return 1;
}
u64 dma_iommu_get_required_mask(struct device *dev)
{
struct iommu_table *tbl = get_iommu_table_base(dev);
u64 mask;
if (dev_is_pci(dev)) {
u64 bypass_mask = dma_direct_get_required_mask(dev);
if (dma_iommu_dma_supported(dev, bypass_mask)) {
dev_info(dev, "%s: returning bypass mask 0x%llx\n", __func__, bypass_mask);
return bypass_mask;
}
}
if (!tbl)
return 0;
mask = 1ULL << (fls_long(tbl->it_offset + tbl->it_size) +
tbl->it_page_shift - 1);
mask += mask - 1;
return mask;
}
const struct dma_map_ops dma_iommu_ops = {
.alloc = dma_iommu_alloc_coherent,
.free = dma_iommu_free_coherent,
.map_sg = dma_iommu_map_sg,
.unmap_sg = dma_iommu_unmap_sg,
.dma_supported = dma_iommu_dma_supported,
.map_page = dma_iommu_map_page,
.unmap_page = dma_iommu_unmap_page,
.get_required_mask = dma_iommu_get_required_mask,
.mmap = dma_common_mmap,
.get_sgtable = dma_common_get_sgtable,
.alloc_pages = dma_common_alloc_pages,
.free_pages = dma_common_free_pages,
};
| linux-master | arch/powerpc/kernel/dma-iommu.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2017, Nicholas Piggin, IBM Corporation
*/
#define pr_fmt(fmt) "dt-cpu-ftrs: " fmt
#include <linux/export.h>
#include <linux/init.h>
#include <linux/jump_label.h>
#include <linux/libfdt.h>
#include <linux/memblock.h>
#include <linux/of_fdt.h>
#include <linux/printk.h>
#include <linux/sched.h>
#include <linux/string.h>
#include <linux/threads.h>
#include <asm/cputable.h>
#include <asm/dt_cpu_ftrs.h>
#include <asm/mce.h>
#include <asm/mmu.h>
#include <asm/setup.h>
/* Device-tree visible constants follow */
#define ISA_V3_0B 3000
#define ISA_V3_1 3100
#define USABLE_PR (1U << 0)
#define USABLE_OS (1U << 1)
#define USABLE_HV (1U << 2)
#define HV_SUPPORT_HFSCR (1U << 0)
#define OS_SUPPORT_FSCR (1U << 0)
/* For parsing, we define all bits set as "NONE" case */
#define HV_SUPPORT_NONE 0xffffffffU
#define OS_SUPPORT_NONE 0xffffffffU
struct dt_cpu_feature {
const char *name;
uint32_t isa;
uint32_t usable_privilege;
uint32_t hv_support;
uint32_t os_support;
uint32_t hfscr_bit_nr;
uint32_t fscr_bit_nr;
uint32_t hwcap_bit_nr;
/* fdt parsing */
unsigned long node;
int enabled;
int disabled;
};
#define MMU_FTRS_HASH_BASE (MMU_FTRS_POWER8)
#define COMMON_USER_BASE (PPC_FEATURE_32 | PPC_FEATURE_64 | \
PPC_FEATURE_ARCH_2_06 |\
PPC_FEATURE_ICACHE_SNOOP)
#define COMMON_USER2_BASE (PPC_FEATURE2_ARCH_2_07 | \
PPC_FEATURE2_ISEL)
/*
* Set up the base CPU
*/
static int hv_mode;
static struct {
u64 lpcr;
u64 hfscr;
u64 fscr;
u64 pcr;
} system_registers;
static void (*init_pmu_registers)(void);
static void __restore_cpu_cpufeatures(void)
{
mtspr(SPRN_LPCR, system_registers.lpcr);
if (hv_mode) {
mtspr(SPRN_LPID, 0);
mtspr(SPRN_AMOR, ~0);
mtspr(SPRN_HFSCR, system_registers.hfscr);
mtspr(SPRN_PCR, system_registers.pcr);
}
mtspr(SPRN_FSCR, system_registers.fscr);
if (init_pmu_registers)
init_pmu_registers();
}
static char dt_cpu_name[64];
static struct cpu_spec __initdata base_cpu_spec = {
.cpu_name = NULL,
.cpu_features = CPU_FTRS_DT_CPU_BASE,
.cpu_user_features = COMMON_USER_BASE,
.cpu_user_features2 = COMMON_USER2_BASE,
.mmu_features = 0,
.icache_bsize = 32, /* minimum block size, fixed by */
.dcache_bsize = 32, /* cache info init. */
.num_pmcs = 0,
.pmc_type = PPC_PMC_DEFAULT,
.cpu_setup = NULL,
.cpu_restore = __restore_cpu_cpufeatures,
.machine_check_early = NULL,
.platform = NULL,
};
static void __init cpufeatures_setup_cpu(void)
{
set_cur_cpu_spec(&base_cpu_spec);
cur_cpu_spec->pvr_mask = -1;
cur_cpu_spec->pvr_value = mfspr(SPRN_PVR);
/* Initialize the base environment -- clear FSCR/HFSCR. */
hv_mode = !!(mfmsr() & MSR_HV);
if (hv_mode) {
cur_cpu_spec->cpu_features |= CPU_FTR_HVMODE;
mtspr(SPRN_HFSCR, 0);
}
mtspr(SPRN_FSCR, 0);
mtspr(SPRN_PCR, PCR_MASK);
/*
* LPCR does not get cleared, to match behaviour with secondaries
* in __restore_cpu_cpufeatures. Once the idle code is fixed, this
* could clear LPCR too.
*/
}
static int __init feat_try_enable_unknown(struct dt_cpu_feature *f)
{
if (f->hv_support == HV_SUPPORT_NONE) {
} else if (f->hv_support & HV_SUPPORT_HFSCR) {
u64 hfscr = mfspr(SPRN_HFSCR);
hfscr |= 1UL << f->hfscr_bit_nr;
mtspr(SPRN_HFSCR, hfscr);
} else {
/* Does not have a known recipe */
return 0;
}
if (f->os_support == OS_SUPPORT_NONE) {
} else if (f->os_support & OS_SUPPORT_FSCR) {
u64 fscr = mfspr(SPRN_FSCR);
fscr |= 1UL << f->fscr_bit_nr;
mtspr(SPRN_FSCR, fscr);
} else {
/* Does not have a known recipe */
return 0;
}
if ((f->usable_privilege & USABLE_PR) && (f->hwcap_bit_nr != -1)) {
uint32_t word = f->hwcap_bit_nr / 32;
uint32_t bit = f->hwcap_bit_nr % 32;
if (word == 0)
cur_cpu_spec->cpu_user_features |= 1U << bit;
else if (word == 1)
cur_cpu_spec->cpu_user_features2 |= 1U << bit;
else
pr_err("%s could not advertise to user (no hwcap bits)\n", f->name);
}
return 1;
}
static int __init feat_enable(struct dt_cpu_feature *f)
{
if (f->hv_support != HV_SUPPORT_NONE) {
if (f->hfscr_bit_nr != -1) {
u64 hfscr = mfspr(SPRN_HFSCR);
hfscr |= 1UL << f->hfscr_bit_nr;
mtspr(SPRN_HFSCR, hfscr);
}
}
if (f->os_support != OS_SUPPORT_NONE) {
if (f->fscr_bit_nr != -1) {
u64 fscr = mfspr(SPRN_FSCR);
fscr |= 1UL << f->fscr_bit_nr;
mtspr(SPRN_FSCR, fscr);
}
}
if ((f->usable_privilege & USABLE_PR) && (f->hwcap_bit_nr != -1)) {
uint32_t word = f->hwcap_bit_nr / 32;
uint32_t bit = f->hwcap_bit_nr % 32;
if (word == 0)
cur_cpu_spec->cpu_user_features |= 1U << bit;
else if (word == 1)
cur_cpu_spec->cpu_user_features2 |= 1U << bit;
else
pr_err("CPU feature: %s could not advertise to user (no hwcap bits)\n", f->name);
}
return 1;
}
static int __init feat_disable(struct dt_cpu_feature *f)
{
return 0;
}
static int __init feat_enable_hv(struct dt_cpu_feature *f)
{
u64 lpcr;
if (!hv_mode) {
pr_err("CPU feature hypervisor present in device tree but HV mode not enabled in the CPU. Ignoring.\n");
return 0;
}
mtspr(SPRN_LPID, 0);
mtspr(SPRN_AMOR, ~0);
lpcr = mfspr(SPRN_LPCR);
lpcr &= ~LPCR_LPES0; /* HV external interrupts */
mtspr(SPRN_LPCR, lpcr);
cur_cpu_spec->cpu_features |= CPU_FTR_HVMODE;
return 1;
}
static int __init feat_enable_le(struct dt_cpu_feature *f)
{
cur_cpu_spec->cpu_user_features |= PPC_FEATURE_TRUE_LE;
return 1;
}
static int __init feat_enable_smt(struct dt_cpu_feature *f)
{
cur_cpu_spec->cpu_features |= CPU_FTR_SMT;
cur_cpu_spec->cpu_user_features |= PPC_FEATURE_SMT;
return 1;
}
static int __init feat_enable_idle_nap(struct dt_cpu_feature *f)
{
u64 lpcr;
/* Set PECE wakeup modes for ISA 207 */
lpcr = mfspr(SPRN_LPCR);
lpcr |= LPCR_PECE0;
lpcr |= LPCR_PECE1;
lpcr |= LPCR_PECE2;
mtspr(SPRN_LPCR, lpcr);
return 1;
}
static int __init feat_enable_idle_stop(struct dt_cpu_feature *f)
{
u64 lpcr;
/* Set PECE wakeup modes for ISAv3.0B */
lpcr = mfspr(SPRN_LPCR);
lpcr |= LPCR_PECE0;
lpcr |= LPCR_PECE1;
lpcr |= LPCR_PECE2;
mtspr(SPRN_LPCR, lpcr);
return 1;
}
static int __init feat_enable_mmu_hash(struct dt_cpu_feature *f)
{
u64 lpcr;
if (!IS_ENABLED(CONFIG_PPC_64S_HASH_MMU))
return 0;
lpcr = mfspr(SPRN_LPCR);
lpcr &= ~LPCR_ISL;
/* VRMASD */
lpcr |= LPCR_VPM0;
lpcr &= ~LPCR_VPM1;
lpcr |= 0x10UL << LPCR_VRMASD_SH; /* L=1 LP=00 */
mtspr(SPRN_LPCR, lpcr);
cur_cpu_spec->mmu_features |= MMU_FTRS_HASH_BASE;
cur_cpu_spec->cpu_user_features |= PPC_FEATURE_HAS_MMU;
return 1;
}
static int __init feat_enable_mmu_hash_v3(struct dt_cpu_feature *f)
{
u64 lpcr;
if (!IS_ENABLED(CONFIG_PPC_64S_HASH_MMU))
return 0;
lpcr = mfspr(SPRN_LPCR);
lpcr &= ~(LPCR_ISL | LPCR_UPRT | LPCR_HR);
mtspr(SPRN_LPCR, lpcr);
cur_cpu_spec->mmu_features |= MMU_FTRS_HASH_BASE;
cur_cpu_spec->cpu_user_features |= PPC_FEATURE_HAS_MMU;
return 1;
}
static int __init feat_enable_mmu_radix(struct dt_cpu_feature *f)
{
if (!IS_ENABLED(CONFIG_PPC_RADIX_MMU))
return 0;
cur_cpu_spec->mmu_features |= MMU_FTR_KERNEL_RO;
cur_cpu_spec->mmu_features |= MMU_FTR_TYPE_RADIX;
cur_cpu_spec->mmu_features |= MMU_FTR_GTSE;
cur_cpu_spec->cpu_user_features |= PPC_FEATURE_HAS_MMU;
return 1;
}
static int __init feat_enable_dscr(struct dt_cpu_feature *f)
{
u64 lpcr;
/*
* Linux relies on FSCR[DSCR] being clear, so that we can take the
* facility unavailable interrupt and track the task's usage of DSCR.
* See facility_unavailable_exception().
* Clear the bit here so that feat_enable() doesn't set it.
*/
f->fscr_bit_nr = -1;
feat_enable(f);
lpcr = mfspr(SPRN_LPCR);
lpcr &= ~LPCR_DPFD;
lpcr |= (4UL << LPCR_DPFD_SH);
mtspr(SPRN_LPCR, lpcr);
return 1;
}
static void __init hfscr_pmu_enable(void)
{
u64 hfscr = mfspr(SPRN_HFSCR);
hfscr |= PPC_BIT(60);
mtspr(SPRN_HFSCR, hfscr);
}
static void init_pmu_power8(void)
{
if (hv_mode) {
mtspr(SPRN_MMCRC, 0);
mtspr(SPRN_MMCRH, 0);
}
mtspr(SPRN_MMCRA, 0);
mtspr(SPRN_MMCR0, MMCR0_FC);
mtspr(SPRN_MMCR1, 0);
mtspr(SPRN_MMCR2, 0);
mtspr(SPRN_MMCRS, 0);
}
static int __init feat_enable_mce_power8(struct dt_cpu_feature *f)
{
cur_cpu_spec->platform = "power8";
cur_cpu_spec->machine_check_early = __machine_check_early_realmode_p8;
return 1;
}
static int __init feat_enable_pmu_power8(struct dt_cpu_feature *f)
{
hfscr_pmu_enable();
init_pmu_power8();
init_pmu_registers = init_pmu_power8;
cur_cpu_spec->cpu_features |= CPU_FTR_MMCRA;
cur_cpu_spec->cpu_user_features |= PPC_FEATURE_PSERIES_PERFMON_COMPAT;
if (pvr_version_is(PVR_POWER8E))
cur_cpu_spec->cpu_features |= CPU_FTR_PMAO_BUG;
cur_cpu_spec->num_pmcs = 6;
cur_cpu_spec->pmc_type = PPC_PMC_IBM;
return 1;
}
static void init_pmu_power9(void)
{
if (hv_mode)
mtspr(SPRN_MMCRC, 0);
mtspr(SPRN_MMCRA, 0);
mtspr(SPRN_MMCR0, MMCR0_FC);
mtspr(SPRN_MMCR1, 0);
mtspr(SPRN_MMCR2, 0);
}
static int __init feat_enable_mce_power9(struct dt_cpu_feature *f)
{
cur_cpu_spec->platform = "power9";
cur_cpu_spec->machine_check_early = __machine_check_early_realmode_p9;
return 1;
}
static int __init feat_enable_pmu_power9(struct dt_cpu_feature *f)
{
hfscr_pmu_enable();
init_pmu_power9();
init_pmu_registers = init_pmu_power9;
cur_cpu_spec->cpu_features |= CPU_FTR_MMCRA;
cur_cpu_spec->cpu_user_features |= PPC_FEATURE_PSERIES_PERFMON_COMPAT;
cur_cpu_spec->num_pmcs = 6;
cur_cpu_spec->pmc_type = PPC_PMC_IBM;
return 1;
}
static void init_pmu_power10(void)
{
init_pmu_power9();
mtspr(SPRN_MMCR3, 0);
mtspr(SPRN_MMCRA, MMCRA_BHRB_DISABLE);
mtspr(SPRN_MMCR0, MMCR0_FC | MMCR0_PMCCEXT);
}
static int __init feat_enable_pmu_power10(struct dt_cpu_feature *f)
{
hfscr_pmu_enable();
init_pmu_power10();
init_pmu_registers = init_pmu_power10;
cur_cpu_spec->cpu_features |= CPU_FTR_MMCRA;
cur_cpu_spec->cpu_user_features |= PPC_FEATURE_PSERIES_PERFMON_COMPAT;
cur_cpu_spec->num_pmcs = 6;
cur_cpu_spec->pmc_type = PPC_PMC_IBM;
return 1;
}
static int __init feat_enable_mce_power10(struct dt_cpu_feature *f)
{
cur_cpu_spec->platform = "power10";
cur_cpu_spec->machine_check_early = __machine_check_early_realmode_p10;
return 1;
}
static int __init feat_enable_tm(struct dt_cpu_feature *f)
{
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
feat_enable(f);
cur_cpu_spec->cpu_user_features2 |= PPC_FEATURE2_HTM_NOSC;
return 1;
#endif
return 0;
}
static int __init feat_enable_fp(struct dt_cpu_feature *f)
{
feat_enable(f);
cur_cpu_spec->cpu_features &= ~CPU_FTR_FPU_UNAVAILABLE;
return 1;
}
static int __init feat_enable_vector(struct dt_cpu_feature *f)
{
#ifdef CONFIG_ALTIVEC
feat_enable(f);
cur_cpu_spec->cpu_features |= CPU_FTR_ALTIVEC;
cur_cpu_spec->cpu_features |= CPU_FTR_VMX_COPY;
cur_cpu_spec->cpu_user_features |= PPC_FEATURE_HAS_ALTIVEC;
return 1;
#endif
return 0;
}
static int __init feat_enable_vsx(struct dt_cpu_feature *f)
{
#ifdef CONFIG_VSX
feat_enable(f);
cur_cpu_spec->cpu_features |= CPU_FTR_VSX;
cur_cpu_spec->cpu_user_features |= PPC_FEATURE_HAS_VSX;
return 1;
#endif
return 0;
}
static int __init feat_enable_purr(struct dt_cpu_feature *f)
{
cur_cpu_spec->cpu_features |= CPU_FTR_PURR | CPU_FTR_SPURR;
return 1;
}
static int __init feat_enable_ebb(struct dt_cpu_feature *f)
{
/*
* PPC_FEATURE2_EBB is enabled in PMU init code because it has
* historically been related to the PMU facility. This may have
* to be decoupled if EBB becomes more generic. For now, follow
* existing convention.
*/
f->hwcap_bit_nr = -1;
feat_enable(f);
return 1;
}
static int __init feat_enable_dbell(struct dt_cpu_feature *f)
{
u64 lpcr;
/* P9 has an HFSCR for privileged state */
feat_enable(f);
cur_cpu_spec->cpu_features |= CPU_FTR_DBELL;
lpcr = mfspr(SPRN_LPCR);
lpcr |= LPCR_PECEDH; /* hyp doorbell wakeup */
mtspr(SPRN_LPCR, lpcr);
return 1;
}
static int __init feat_enable_hvi(struct dt_cpu_feature *f)
{
u64 lpcr;
/*
* POWER9 XIVE interrupts including in OPAL XICS compatibility
* are always delivered as hypervisor virtualization interrupts (HVI)
* rather than EE.
*
* However LPES0 is not set here, in the chance that an EE does get
* delivered to the host somehow, the EE handler would not expect it
* to be delivered in LPES0 mode (e.g., using SRR[01]). This could
* happen if there is a bug in interrupt controller code, or IC is
* misconfigured in systemsim.
*/
lpcr = mfspr(SPRN_LPCR);
lpcr |= LPCR_HVICE; /* enable hvi interrupts */
lpcr |= LPCR_HEIC; /* disable ee interrupts when MSR_HV */
lpcr |= LPCR_PECE_HVEE; /* hvi can wake from stop */
mtspr(SPRN_LPCR, lpcr);
return 1;
}
static int __init feat_enable_large_ci(struct dt_cpu_feature *f)
{
cur_cpu_spec->mmu_features |= MMU_FTR_CI_LARGE_PAGE;
return 1;
}
static int __init feat_enable_mma(struct dt_cpu_feature *f)
{
u64 pcr;
feat_enable(f);
pcr = mfspr(SPRN_PCR);
pcr &= ~PCR_MMA_DIS;
mtspr(SPRN_PCR, pcr);
return 1;
}
struct dt_cpu_feature_match {
const char *name;
int (*enable)(struct dt_cpu_feature *f);
u64 cpu_ftr_bit_mask;
};
static struct dt_cpu_feature_match __initdata
dt_cpu_feature_match_table[] = {
{"hypervisor", feat_enable_hv, 0},
{"big-endian", feat_enable, 0},
{"little-endian", feat_enable_le, CPU_FTR_REAL_LE},
{"smt", feat_enable_smt, 0},
{"interrupt-facilities", feat_enable, 0},
{"system-call-vectored", feat_enable, 0},
{"timer-facilities", feat_enable, 0},
{"timer-facilities-v3", feat_enable, 0},
{"debug-facilities", feat_enable, 0},
{"come-from-address-register", feat_enable, CPU_FTR_CFAR},
{"branch-tracing", feat_enable, 0},
{"floating-point", feat_enable_fp, 0},
{"vector", feat_enable_vector, 0},
{"vector-scalar", feat_enable_vsx, 0},
{"vector-scalar-v3", feat_enable, 0},
{"decimal-floating-point", feat_enable, 0},
{"decimal-integer", feat_enable, 0},
{"quadword-load-store", feat_enable, 0},
{"vector-crypto", feat_enable, 0},
{"mmu-hash", feat_enable_mmu_hash, 0},
{"mmu-radix", feat_enable_mmu_radix, 0},
{"mmu-hash-v3", feat_enable_mmu_hash_v3, 0},
{"virtual-page-class-key-protection", feat_enable, 0},
{"transactional-memory", feat_enable_tm, CPU_FTR_TM},
{"transactional-memory-v3", feat_enable_tm, 0},
{"tm-suspend-hypervisor-assist", feat_enable, CPU_FTR_P9_TM_HV_ASSIST},
{"tm-suspend-xer-so-bug", feat_enable, CPU_FTR_P9_TM_XER_SO_BUG},
{"idle-nap", feat_enable_idle_nap, 0},
/* alignment-interrupt-dsisr ignored */
{"idle-stop", feat_enable_idle_stop, 0},
{"machine-check-power8", feat_enable_mce_power8, 0},
{"performance-monitor-power8", feat_enable_pmu_power8, 0},
{"data-stream-control-register", feat_enable_dscr, CPU_FTR_DSCR},
{"event-based-branch", feat_enable_ebb, 0},
{"target-address-register", feat_enable, 0},
{"branch-history-rolling-buffer", feat_enable, 0},
{"control-register", feat_enable, CPU_FTR_CTRL},
{"processor-control-facility", feat_enable_dbell, CPU_FTR_DBELL},
{"processor-control-facility-v3", feat_enable_dbell, CPU_FTR_DBELL},
{"processor-utilization-of-resources-register", feat_enable_purr, 0},
{"no-execute", feat_enable, 0},
{"strong-access-ordering", feat_enable, CPU_FTR_SAO},
{"cache-inhibited-large-page", feat_enable_large_ci, 0},
{"coprocessor-icswx", feat_enable, 0},
{"hypervisor-virtualization-interrupt", feat_enable_hvi, 0},
{"program-priority-register", feat_enable, CPU_FTR_HAS_PPR},
{"wait", feat_enable, 0},
{"atomic-memory-operations", feat_enable, 0},
{"branch-v3", feat_enable, 0},
{"copy-paste", feat_enable, 0},
{"decimal-floating-point-v3", feat_enable, 0},
{"decimal-integer-v3", feat_enable, 0},
{"fixed-point-v3", feat_enable, 0},
{"floating-point-v3", feat_enable, 0},
{"group-start-register", feat_enable, 0},
{"pc-relative-addressing", feat_enable, 0},
{"machine-check-power9", feat_enable_mce_power9, 0},
{"machine-check-power10", feat_enable_mce_power10, 0},
{"performance-monitor-power9", feat_enable_pmu_power9, 0},
{"performance-monitor-power10", feat_enable_pmu_power10, 0},
{"event-based-branch-v3", feat_enable, 0},
{"random-number-generator", feat_enable, 0},
{"system-call-vectored", feat_disable, 0},
{"trace-interrupt-v3", feat_enable, 0},
{"vector-v3", feat_enable, 0},
{"vector-binary128", feat_enable, 0},
{"vector-binary16", feat_enable, 0},
{"wait-v3", feat_enable, 0},
{"prefix-instructions", feat_enable, 0},
{"matrix-multiply-assist", feat_enable_mma, 0},
{"debug-facilities-v31", feat_enable, CPU_FTR_DAWR1},
};
static bool __initdata using_dt_cpu_ftrs;
static bool __initdata enable_unknown = true;
static int __init dt_cpu_ftrs_parse(char *str)
{
if (!str)
return 0;
if (!strcmp(str, "off"))
using_dt_cpu_ftrs = false;
else if (!strcmp(str, "known"))
enable_unknown = false;
else
return 1;
return 0;
}
early_param("dt_cpu_ftrs", dt_cpu_ftrs_parse);
static void __init cpufeatures_setup_start(u32 isa)
{
pr_info("setup for ISA %d\n", isa);
if (isa >= ISA_V3_0B) {
cur_cpu_spec->cpu_features |= CPU_FTR_ARCH_300;
cur_cpu_spec->cpu_user_features2 |= PPC_FEATURE2_ARCH_3_00;
}
if (isa >= ISA_V3_1) {
cur_cpu_spec->cpu_features |= CPU_FTR_ARCH_31;
cur_cpu_spec->cpu_user_features2 |= PPC_FEATURE2_ARCH_3_1;
}
}
static bool __init cpufeatures_process_feature(struct dt_cpu_feature *f)
{
const struct dt_cpu_feature_match *m;
bool known = false;
int i;
for (i = 0; i < ARRAY_SIZE(dt_cpu_feature_match_table); i++) {
m = &dt_cpu_feature_match_table[i];
if (!strcmp(f->name, m->name)) {
known = true;
if (m->enable(f)) {
cur_cpu_spec->cpu_features |= m->cpu_ftr_bit_mask;
break;
}
pr_info("not enabling: %s (disabled or unsupported by kernel)\n",
f->name);
return false;
}
}
if (!known && (!enable_unknown || !feat_try_enable_unknown(f))) {
pr_info("not enabling: %s (unknown and unsupported by kernel)\n",
f->name);
return false;
}
if (known)
pr_debug("enabling: %s\n", f->name);
else
pr_debug("enabling: %s (unknown)\n", f->name);
return true;
}
/*
* Handle POWER9 broadcast tlbie invalidation issue using
* cpu feature flag.
*/
static __init void update_tlbie_feature_flag(unsigned long pvr)
{
if (PVR_VER(pvr) == PVR_POWER9) {
/*
* Set the tlbie feature flag for anything below
* Nimbus DD 2.3 and Cumulus DD 1.3
*/
if ((pvr & 0xe000) == 0) {
/* Nimbus */
if ((pvr & 0xfff) < 0x203)
cur_cpu_spec->cpu_features |= CPU_FTR_P9_TLBIE_STQ_BUG;
} else if ((pvr & 0xc000) == 0) {
/* Cumulus */
if ((pvr & 0xfff) < 0x103)
cur_cpu_spec->cpu_features |= CPU_FTR_P9_TLBIE_STQ_BUG;
} else {
WARN_ONCE(1, "Unknown PVR");
cur_cpu_spec->cpu_features |= CPU_FTR_P9_TLBIE_STQ_BUG;
}
cur_cpu_spec->cpu_features |= CPU_FTR_P9_TLBIE_ERAT_BUG;
}
}
static __init void cpufeatures_cpu_quirks(void)
{
unsigned long version = mfspr(SPRN_PVR);
/*
* Not all quirks can be derived from the cpufeatures device tree.
*/
if ((version & 0xffffefff) == 0x004e0200) {
/* DD2.0 has no feature flag */
cur_cpu_spec->cpu_features |= CPU_FTR_P9_RADIX_PREFETCH_BUG;
cur_cpu_spec->cpu_features &= ~(CPU_FTR_DAWR);
} else if ((version & 0xffffefff) == 0x004e0201) {
cur_cpu_spec->cpu_features |= CPU_FTR_POWER9_DD2_1;
cur_cpu_spec->cpu_features |= CPU_FTR_P9_RADIX_PREFETCH_BUG;
cur_cpu_spec->cpu_features &= ~(CPU_FTR_DAWR);
} else if ((version & 0xffffefff) == 0x004e0202) {
cur_cpu_spec->cpu_features |= CPU_FTR_P9_TM_HV_ASSIST;
cur_cpu_spec->cpu_features |= CPU_FTR_P9_TM_XER_SO_BUG;
cur_cpu_spec->cpu_features |= CPU_FTR_POWER9_DD2_1;
cur_cpu_spec->cpu_features &= ~(CPU_FTR_DAWR);
} else if ((version & 0xffffefff) == 0x004e0203) {
cur_cpu_spec->cpu_features |= CPU_FTR_P9_TM_HV_ASSIST;
cur_cpu_spec->cpu_features |= CPU_FTR_P9_TM_XER_SO_BUG;
cur_cpu_spec->cpu_features |= CPU_FTR_POWER9_DD2_1;
} else if ((version & 0xffff0000) == 0x004e0000) {
/* DD2.1 and up have DD2_1 */
cur_cpu_spec->cpu_features |= CPU_FTR_POWER9_DD2_1;
}
if ((version & 0xffff0000) == 0x004e0000) {
cur_cpu_spec->cpu_features |= CPU_FTR_P9_TIDR;
}
update_tlbie_feature_flag(version);
}
static void __init cpufeatures_setup_finished(void)
{
cpufeatures_cpu_quirks();
if (hv_mode && !(cur_cpu_spec->cpu_features & CPU_FTR_HVMODE)) {
pr_err("hypervisor not present in device tree but HV mode is enabled in the CPU. Enabling.\n");
cur_cpu_spec->cpu_features |= CPU_FTR_HVMODE;
}
/* Make sure powerpc_base_platform is non-NULL */
powerpc_base_platform = cur_cpu_spec->platform;
system_registers.lpcr = mfspr(SPRN_LPCR);
system_registers.hfscr = mfspr(SPRN_HFSCR);
system_registers.fscr = mfspr(SPRN_FSCR);
system_registers.pcr = mfspr(SPRN_PCR);
pr_info("final cpu/mmu features = 0x%016lx 0x%08x\n",
cur_cpu_spec->cpu_features, cur_cpu_spec->mmu_features);
}
static int __init disabled_on_cmdline(void)
{
unsigned long root, chosen;
const char *p;
root = of_get_flat_dt_root();
chosen = of_get_flat_dt_subnode_by_name(root, "chosen");
if (chosen == -FDT_ERR_NOTFOUND)
return false;
p = of_get_flat_dt_prop(chosen, "bootargs", NULL);
if (!p)
return false;
if (strstr(p, "dt_cpu_ftrs=off"))
return true;
return false;
}
static int __init fdt_find_cpu_features(unsigned long node, const char *uname,
int depth, void *data)
{
if (of_flat_dt_is_compatible(node, "ibm,powerpc-cpu-features")
&& of_get_flat_dt_prop(node, "isa", NULL))
return 1;
return 0;
}
bool __init dt_cpu_ftrs_in_use(void)
{
return using_dt_cpu_ftrs;
}
bool __init dt_cpu_ftrs_init(void *fdt)
{
using_dt_cpu_ftrs = false;
/* Setup and verify the FDT, if it fails we just bail */
if (!early_init_dt_verify(fdt))
return false;
if (!of_scan_flat_dt(fdt_find_cpu_features, NULL))
return false;
if (disabled_on_cmdline())
return false;
cpufeatures_setup_cpu();
using_dt_cpu_ftrs = true;
return true;
}
static int nr_dt_cpu_features;
static struct dt_cpu_feature *dt_cpu_features;
static int __init process_cpufeatures_node(unsigned long node,
const char *uname, int i)
{
const __be32 *prop;
struct dt_cpu_feature *f;
int len;
f = &dt_cpu_features[i];
f->node = node;
f->name = uname;
prop = of_get_flat_dt_prop(node, "isa", &len);
if (!prop) {
pr_warn("%s: missing isa property\n", uname);
return 0;
}
f->isa = be32_to_cpup(prop);
prop = of_get_flat_dt_prop(node, "usable-privilege", &len);
if (!prop) {
pr_warn("%s: missing usable-privilege property", uname);
return 0;
}
f->usable_privilege = be32_to_cpup(prop);
prop = of_get_flat_dt_prop(node, "hv-support", &len);
if (prop)
f->hv_support = be32_to_cpup(prop);
else
f->hv_support = HV_SUPPORT_NONE;
prop = of_get_flat_dt_prop(node, "os-support", &len);
if (prop)
f->os_support = be32_to_cpup(prop);
else
f->os_support = OS_SUPPORT_NONE;
prop = of_get_flat_dt_prop(node, "hfscr-bit-nr", &len);
if (prop)
f->hfscr_bit_nr = be32_to_cpup(prop);
else
f->hfscr_bit_nr = -1;
prop = of_get_flat_dt_prop(node, "fscr-bit-nr", &len);
if (prop)
f->fscr_bit_nr = be32_to_cpup(prop);
else
f->fscr_bit_nr = -1;
prop = of_get_flat_dt_prop(node, "hwcap-bit-nr", &len);
if (prop)
f->hwcap_bit_nr = be32_to_cpup(prop);
else
f->hwcap_bit_nr = -1;
if (f->usable_privilege & USABLE_HV) {
if (!(mfmsr() & MSR_HV)) {
pr_warn("%s: HV feature passed to guest\n", uname);
return 0;
}
if (f->hv_support == HV_SUPPORT_NONE && f->hfscr_bit_nr != -1) {
pr_warn("%s: unwanted hfscr_bit_nr\n", uname);
return 0;
}
if (f->hv_support == HV_SUPPORT_HFSCR) {
if (f->hfscr_bit_nr == -1) {
pr_warn("%s: missing hfscr_bit_nr\n", uname);
return 0;
}
}
} else {
if (f->hv_support != HV_SUPPORT_NONE || f->hfscr_bit_nr != -1) {
pr_warn("%s: unwanted hv_support/hfscr_bit_nr\n", uname);
return 0;
}
}
if (f->usable_privilege & USABLE_OS) {
if (f->os_support == OS_SUPPORT_NONE && f->fscr_bit_nr != -1) {
pr_warn("%s: unwanted fscr_bit_nr\n", uname);
return 0;
}
if (f->os_support == OS_SUPPORT_FSCR) {
if (f->fscr_bit_nr == -1) {
pr_warn("%s: missing fscr_bit_nr\n", uname);
return 0;
}
}
} else {
if (f->os_support != OS_SUPPORT_NONE || f->fscr_bit_nr != -1) {
pr_warn("%s: unwanted os_support/fscr_bit_nr\n", uname);
return 0;
}
}
if (!(f->usable_privilege & USABLE_PR)) {
if (f->hwcap_bit_nr != -1) {
pr_warn("%s: unwanted hwcap_bit_nr\n", uname);
return 0;
}
}
/* Do all the independent features in the first pass */
if (!of_get_flat_dt_prop(node, "dependencies", &len)) {
if (cpufeatures_process_feature(f))
f->enabled = 1;
else
f->disabled = 1;
}
return 0;
}
static void __init cpufeatures_deps_enable(struct dt_cpu_feature *f)
{
const __be32 *prop;
int len;
int nr_deps;
int i;
if (f->enabled || f->disabled)
return;
prop = of_get_flat_dt_prop(f->node, "dependencies", &len);
if (!prop) {
pr_warn("%s: missing dependencies property", f->name);
return;
}
nr_deps = len / sizeof(int);
for (i = 0; i < nr_deps; i++) {
unsigned long phandle = be32_to_cpu(prop[i]);
int j;
for (j = 0; j < nr_dt_cpu_features; j++) {
struct dt_cpu_feature *d = &dt_cpu_features[j];
if (of_get_flat_dt_phandle(d->node) == phandle) {
cpufeatures_deps_enable(d);
if (d->disabled) {
f->disabled = 1;
return;
}
}
}
}
if (cpufeatures_process_feature(f))
f->enabled = 1;
else
f->disabled = 1;
}
static int __init scan_cpufeatures_subnodes(unsigned long node,
const char *uname,
void *data)
{
int *count = data;
process_cpufeatures_node(node, uname, *count);
(*count)++;
return 0;
}
static int __init count_cpufeatures_subnodes(unsigned long node,
const char *uname,
void *data)
{
int *count = data;
(*count)++;
return 0;
}
static int __init dt_cpu_ftrs_scan_callback(unsigned long node, const char
*uname, int depth, void *data)
{
const __be32 *prop;
int count, i;
u32 isa;
/* We are scanning "ibm,powerpc-cpu-features" nodes only */
if (!of_flat_dt_is_compatible(node, "ibm,powerpc-cpu-features"))
return 0;
prop = of_get_flat_dt_prop(node, "isa", NULL);
if (!prop)
/* We checked before, "can't happen" */
return 0;
isa = be32_to_cpup(prop);
/* Count and allocate space for cpu features */
of_scan_flat_dt_subnodes(node, count_cpufeatures_subnodes,
&nr_dt_cpu_features);
dt_cpu_features = memblock_alloc(sizeof(struct dt_cpu_feature) * nr_dt_cpu_features, PAGE_SIZE);
if (!dt_cpu_features)
panic("%s: Failed to allocate %zu bytes align=0x%lx\n",
__func__,
sizeof(struct dt_cpu_feature) * nr_dt_cpu_features,
PAGE_SIZE);
cpufeatures_setup_start(isa);
/* Scan nodes into dt_cpu_features and enable those without deps */
count = 0;
of_scan_flat_dt_subnodes(node, scan_cpufeatures_subnodes, &count);
/* Recursive enable remaining features with dependencies */
for (i = 0; i < nr_dt_cpu_features; i++) {
struct dt_cpu_feature *f = &dt_cpu_features[i];
cpufeatures_deps_enable(f);
}
prop = of_get_flat_dt_prop(node, "display-name", NULL);
if (prop && strlen((char *)prop) != 0) {
strscpy(dt_cpu_name, (char *)prop, sizeof(dt_cpu_name));
cur_cpu_spec->cpu_name = dt_cpu_name;
}
cpufeatures_setup_finished();
memblock_free(dt_cpu_features,
sizeof(struct dt_cpu_feature) * nr_dt_cpu_features);
return 0;
}
void __init dt_cpu_ftrs_scan(void)
{
if (!using_dt_cpu_ftrs)
return;
of_scan_flat_dt(dt_cpu_ftrs_scan_callback, NULL);
}
| linux-master | arch/powerpc/kernel/dt_cpu_ftrs.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Sysfs entries for PCI Error Recovery for PAPR-compliant platform.
* Copyright IBM Corporation 2007
* Copyright Linas Vepstas <[email protected]> 2007
*
* Send comments and feedback to Linas Vepstas <[email protected]>
*/
#include <linux/of.h>
#include <linux/pci.h>
#include <linux/stat.h>
#include <asm/ppc-pci.h>
#include <asm/pci-bridge.h>
/**
* EEH_SHOW_ATTR -- Create sysfs entry for eeh statistic
* @_name: name of file in sysfs directory
* @_memb: name of member in struct eeh_dev to access
* @_format: printf format for display
*
* All of the attributes look very similar, so just
* auto-gen a cut-n-paste routine to display them.
*/
#define EEH_SHOW_ATTR(_name,_memb,_format) \
static ssize_t eeh_show_##_name(struct device *dev, \
struct device_attribute *attr, char *buf) \
{ \
struct pci_dev *pdev = to_pci_dev(dev); \
struct eeh_dev *edev = pci_dev_to_eeh_dev(pdev); \
\
if (!edev) \
return 0; \
\
return sprintf(buf, _format "\n", edev->_memb); \
} \
static DEVICE_ATTR(_name, 0444, eeh_show_##_name, NULL);
EEH_SHOW_ATTR(eeh_mode, mode, "0x%x");
EEH_SHOW_ATTR(eeh_pe_config_addr, pe_config_addr, "0x%x");
static ssize_t eeh_pe_state_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct eeh_dev *edev = pci_dev_to_eeh_dev(pdev);
int state;
if (!edev || !edev->pe)
return -ENODEV;
state = eeh_ops->get_state(edev->pe, NULL);
return sprintf(buf, "0x%08x 0x%08x\n",
state, edev->pe->state);
}
static ssize_t eeh_pe_state_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct eeh_dev *edev = pci_dev_to_eeh_dev(pdev);
if (!edev || !edev->pe)
return -ENODEV;
/* Nothing to do if it's not frozen */
if (!(edev->pe->state & EEH_PE_ISOLATED))
return count;
if (eeh_unfreeze_pe(edev->pe))
return -EIO;
eeh_pe_state_clear(edev->pe, EEH_PE_ISOLATED, true);
return count;
}
static DEVICE_ATTR_RW(eeh_pe_state);
#if defined(CONFIG_PCI_IOV) && defined(CONFIG_PPC_PSERIES)
static ssize_t eeh_notify_resume_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct eeh_dev *edev = pci_dev_to_eeh_dev(pdev);
struct pci_dn *pdn = pci_get_pdn(pdev);
if (!edev || !edev->pe)
return -ENODEV;
return sprintf(buf, "%d\n", pdn->last_allow_rc);
}
static ssize_t eeh_notify_resume_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct eeh_dev *edev = pci_dev_to_eeh_dev(pdev);
if (!edev || !edev->pe || !eeh_ops->notify_resume)
return -ENODEV;
if (eeh_ops->notify_resume(edev))
return -EIO;
return count;
}
static DEVICE_ATTR_RW(eeh_notify_resume);
static int eeh_notify_resume_add(struct pci_dev *pdev)
{
struct device_node *np;
int rc = 0;
np = pci_device_to_OF_node(pdev->is_physfn ? pdev : pdev->physfn);
if (of_property_read_bool(np, "ibm,is-open-sriov-pf"))
rc = device_create_file(&pdev->dev, &dev_attr_eeh_notify_resume);
return rc;
}
static void eeh_notify_resume_remove(struct pci_dev *pdev)
{
struct device_node *np;
np = pci_device_to_OF_node(pdev->is_physfn ? pdev : pdev->physfn);
if (of_property_read_bool(np, "ibm,is-open-sriov-pf"))
device_remove_file(&pdev->dev, &dev_attr_eeh_notify_resume);
}
#else
static inline int eeh_notify_resume_add(struct pci_dev *pdev) { return 0; }
static inline void eeh_notify_resume_remove(struct pci_dev *pdev) { }
#endif /* CONFIG_PCI_IOV && CONFIG PPC_PSERIES*/
void eeh_sysfs_add_device(struct pci_dev *pdev)
{
struct eeh_dev *edev = pci_dev_to_eeh_dev(pdev);
int rc=0;
if (!eeh_enabled())
return;
if (edev && (edev->mode & EEH_DEV_SYSFS))
return;
rc += device_create_file(&pdev->dev, &dev_attr_eeh_mode);
rc += device_create_file(&pdev->dev, &dev_attr_eeh_pe_config_addr);
rc += device_create_file(&pdev->dev, &dev_attr_eeh_pe_state);
rc += eeh_notify_resume_add(pdev);
if (rc)
pr_warn("EEH: Unable to create sysfs entries\n");
else if (edev)
edev->mode |= EEH_DEV_SYSFS;
}
void eeh_sysfs_remove_device(struct pci_dev *pdev)
{
struct eeh_dev *edev = pci_dev_to_eeh_dev(pdev);
if (!edev) {
WARN_ON(eeh_enabled());
return;
}
edev->mode &= ~EEH_DEV_SYSFS;
/*
* The parent directory might have been removed. We needn't
* continue for that case.
*/
if (!pdev->dev.kobj.sd)
return;
device_remove_file(&pdev->dev, &dev_attr_eeh_mode);
device_remove_file(&pdev->dev, &dev_attr_eeh_pe_config_addr);
device_remove_file(&pdev->dev, &dev_attr_eeh_pe_state);
eeh_notify_resume_remove(pdev);
}
| linux-master | arch/powerpc/kernel/eeh_sysfs.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Extracted from cputable.c
*
* Copyright (C) 2001 Ben. Herrenschmidt ([email protected])
*
* Modifications for ppc64:
* Copyright (C) 2003 Dave Engebretsen <[email protected]>
* Copyright (C) 2005 Stephen Rothwell, IBM Corporation
*/
#include <linux/export.h>
#include <linux/cache.h>
#include <linux/of.h>
#include <asm/firmware.h>
#include <asm/kvm_guest.h>
#ifdef CONFIG_PPC64
unsigned long powerpc_firmware_features __read_mostly;
EXPORT_SYMBOL_GPL(powerpc_firmware_features);
#endif
#if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_KVM_GUEST)
DEFINE_STATIC_KEY_FALSE(kvm_guest);
int __init check_kvm_guest(void)
{
struct device_node *hyper_node;
hyper_node = of_find_node_by_path("/hypervisor");
if (!hyper_node)
return 0;
if (of_device_is_compatible(hyper_node, "linux,kvm"))
static_branch_enable(&kvm_guest);
of_node_put(hyper_node);
return 0;
}
core_initcall(check_kvm_guest); // before kvm_guest_init()
#endif
| linux-master | arch/powerpc/kernel/firmware.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/memory.h>
#include <linux/static_call.h>
#include <asm/code-patching.h>
void arch_static_call_transform(void *site, void *tramp, void *func, bool tail)
{
int err;
bool is_ret0 = (func == __static_call_return0);
unsigned long target = (unsigned long)(is_ret0 ? tramp + PPC_SCT_RET0 : func);
bool is_short = is_offset_in_branch_range((long)target - (long)tramp);
if (!tramp)
return;
mutex_lock(&text_mutex);
if (func && !is_short) {
err = patch_instruction(tramp + PPC_SCT_DATA, ppc_inst(target));
if (err)
goto out;
}
if (!func)
err = patch_instruction(tramp, ppc_inst(PPC_RAW_BLR()));
else if (is_short)
err = patch_branch(tramp, target, 0);
else
err = patch_instruction(tramp, ppc_inst(PPC_RAW_NOP()));
out:
mutex_unlock(&text_mutex);
if (err)
panic("%s: patching failed %pS at %pS\n", __func__, func, tramp);
}
EXPORT_SYMBOL_GPL(arch_static_call_transform);
| linux-master | arch/powerpc/kernel/static_call.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Author: Kumar Gala <[email protected]>
*
* Copyright 2009 Freescale Semiconductor Inc.
*/
#include <linux/stddef.h>
#include <linux/kernel.h>
#include <linux/smp.h>
#include <linux/threads.h>
#include <linux/hardirq.h>
#include <asm/dbell.h>
#include <asm/interrupt.h>
#include <asm/irq_regs.h>
#include <asm/kvm_ppc.h>
#include <asm/trace.h>
#ifdef CONFIG_SMP
DEFINE_INTERRUPT_HANDLER_ASYNC(doorbell_exception)
{
struct pt_regs *old_regs = set_irq_regs(regs);
trace_doorbell_entry(regs);
ppc_msgsync();
if (should_hard_irq_enable(regs))
do_hard_irq_enable();
kvmppc_clear_host_ipi(smp_processor_id());
__this_cpu_inc(irq_stat.doorbell_irqs);
smp_ipi_demux_relaxed(); /* already performed the barrier */
trace_doorbell_exit(regs);
set_irq_regs(old_regs);
}
#else /* CONFIG_SMP */
DEFINE_INTERRUPT_HANDLER_ASYNC(doorbell_exception)
{
printk(KERN_WARNING "Received doorbell on non-smp system\n");
}
#endif /* CONFIG_SMP */
| linux-master | arch/powerpc/kernel/dbell.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* I/O string operations
* Copyright (C) 1995-1996 Gary Thomas ([email protected])
* Copyright (C) 2006 IBM Corporation
*
* Largely rewritten by Cort Dougan ([email protected])
* and Paul Mackerras.
*
* Adapted for iSeries by Mike Corrigan ([email protected])
* PPC64 updates by Dave Engebretsen ([email protected])
*
* Rewritten in C by Stephen Rothwell.
*/
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/compiler.h>
#include <linux/export.h>
#include <asm/io.h>
#include <asm/firmware.h>
#include <asm/bug.h>
/* See definition in io.h */
bool isa_io_special;
void _insb(const volatile u8 __iomem *port, void *buf, long count)
{
u8 *tbuf = buf;
u8 tmp;
if (unlikely(count <= 0))
return;
asm volatile("sync");
do {
tmp = *port;
eieio();
*tbuf++ = tmp;
} while (--count != 0);
asm volatile("twi 0,%0,0; isync" : : "r" (tmp));
}
EXPORT_SYMBOL(_insb);
void _outsb(volatile u8 __iomem *port, const void *buf, long count)
{
const u8 *tbuf = buf;
if (unlikely(count <= 0))
return;
asm volatile("sync");
do {
*port = *tbuf++;
} while (--count != 0);
asm volatile("sync");
}
EXPORT_SYMBOL(_outsb);
void _insw_ns(const volatile u16 __iomem *port, void *buf, long count)
{
u16 *tbuf = buf;
u16 tmp;
if (unlikely(count <= 0))
return;
asm volatile("sync");
do {
tmp = *port;
eieio();
*tbuf++ = tmp;
} while (--count != 0);
asm volatile("twi 0,%0,0; isync" : : "r" (tmp));
}
EXPORT_SYMBOL(_insw_ns);
void _outsw_ns(volatile u16 __iomem *port, const void *buf, long count)
{
const u16 *tbuf = buf;
if (unlikely(count <= 0))
return;
asm volatile("sync");
do {
*port = *tbuf++;
} while (--count != 0);
asm volatile("sync");
}
EXPORT_SYMBOL(_outsw_ns);
void _insl_ns(const volatile u32 __iomem *port, void *buf, long count)
{
u32 *tbuf = buf;
u32 tmp;
if (unlikely(count <= 0))
return;
asm volatile("sync");
do {
tmp = *port;
eieio();
*tbuf++ = tmp;
} while (--count != 0);
asm volatile("twi 0,%0,0; isync" : : "r" (tmp));
}
EXPORT_SYMBOL(_insl_ns);
void _outsl_ns(volatile u32 __iomem *port, const void *buf, long count)
{
const u32 *tbuf = buf;
if (unlikely(count <= 0))
return;
asm volatile("sync");
do {
*port = *tbuf++;
} while (--count != 0);
asm volatile("sync");
}
EXPORT_SYMBOL(_outsl_ns);
#define IO_CHECK_ALIGN(v,a) ((((unsigned long)(v)) & ((a) - 1)) == 0)
notrace void
_memset_io(volatile void __iomem *addr, int c, unsigned long n)
{
void *p = (void __force *)addr;
u32 lc = c;
lc |= lc << 8;
lc |= lc << 16;
__asm__ __volatile__ ("sync" : : : "memory");
while(n && !IO_CHECK_ALIGN(p, 4)) {
*((volatile u8 *)p) = c;
p++;
n--;
}
while(n >= 4) {
*((volatile u32 *)p) = lc;
p += 4;
n -= 4;
}
while(n) {
*((volatile u8 *)p) = c;
p++;
n--;
}
__asm__ __volatile__ ("sync" : : : "memory");
}
EXPORT_SYMBOL(_memset_io);
void _memcpy_fromio(void *dest, const volatile void __iomem *src,
unsigned long n)
{
void *vsrc = (void __force *) src;
__asm__ __volatile__ ("sync" : : : "memory");
while(n && (!IO_CHECK_ALIGN(vsrc, 4) || !IO_CHECK_ALIGN(dest, 4))) {
*((u8 *)dest) = *((volatile u8 *)vsrc);
eieio();
vsrc++;
dest++;
n--;
}
while(n >= 4) {
*((u32 *)dest) = *((volatile u32 *)vsrc);
eieio();
vsrc += 4;
dest += 4;
n -= 4;
}
while(n) {
*((u8 *)dest) = *((volatile u8 *)vsrc);
eieio();
vsrc++;
dest++;
n--;
}
__asm__ __volatile__ ("sync" : : : "memory");
}
EXPORT_SYMBOL(_memcpy_fromio);
void _memcpy_toio(volatile void __iomem *dest, const void *src, unsigned long n)
{
void *vdest = (void __force *) dest;
__asm__ __volatile__ ("sync" : : : "memory");
while(n && (!IO_CHECK_ALIGN(vdest, 4) || !IO_CHECK_ALIGN(src, 4))) {
*((volatile u8 *)vdest) = *((u8 *)src);
src++;
vdest++;
n--;
}
while(n >= 4) {
*((volatile u32 *)vdest) = *((volatile u32 *)src);
src += 4;
vdest += 4;
n-=4;
}
while(n) {
*((volatile u8 *)vdest) = *((u8 *)src);
src++;
vdest++;
n--;
}
__asm__ __volatile__ ("sync" : : : "memory");
}
EXPORT_SYMBOL(_memcpy_toio);
| linux-master | arch/powerpc/kernel/io.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* PowerPC 64-bit swsusp implementation
*
* Copyright 2006 Johannes Berg <[email protected]>
*/
#include <asm/iommu.h>
#include <linux/irq.h>
#include <linux/sched.h>
#include <linux/interrupt.h>
#include <linux/nmi.h>
void do_after_copyback(void)
{
iommu_restore();
touch_softlockup_watchdog();
mb();
}
| linux-master | arch/powerpc/kernel/swsusp_64.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 1995-1996 Gary Thomas ([email protected])
* Copyright 2007-2010 Freescale Semiconductor, Inc.
*
* Modified by Cort Dougan ([email protected])
* and Paul Mackerras ([email protected])
*/
/*
* This file handles the architecture-dependent parts of hardware exceptions
*/
#include <linux/errno.h>
#include <linux/sched.h>
#include <linux/sched/debug.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/pkeys.h>
#include <linux/stddef.h>
#include <linux/unistd.h>
#include <linux/ptrace.h>
#include <linux/user.h>
#include <linux/interrupt.h>
#include <linux/init.h>
#include <linux/extable.h>
#include <linux/module.h> /* print_modules */
#include <linux/prctl.h>
#include <linux/delay.h>
#include <linux/kprobes.h>
#include <linux/kexec.h>
#include <linux/backlight.h>
#include <linux/bug.h>
#include <linux/kdebug.h>
#include <linux/ratelimit.h>
#include <linux/context_tracking.h>
#include <linux/smp.h>
#include <linux/console.h>
#include <linux/kmsg_dump.h>
#include <linux/debugfs.h>
#include <asm/emulated_ops.h>
#include <linux/uaccess.h>
#include <asm/interrupt.h>
#include <asm/io.h>
#include <asm/machdep.h>
#include <asm/rtas.h>
#include <asm/pmc.h>
#include <asm/reg.h>
#ifdef CONFIG_PMAC_BACKLIGHT
#include <asm/backlight.h>
#endif
#ifdef CONFIG_PPC64
#include <asm/firmware.h>
#include <asm/processor.h>
#endif
#include <asm/kexec.h>
#include <asm/ppc-opcode.h>
#include <asm/rio.h>
#include <asm/fadump.h>
#include <asm/switch_to.h>
#include <asm/tm.h>
#include <asm/debug.h>
#include <asm/asm-prototypes.h>
#include <asm/hmi.h>
#include <sysdev/fsl_pci.h>
#include <asm/kprobes.h>
#include <asm/stacktrace.h>
#include <asm/nmi.h>
#include <asm/disassemble.h>
#include <asm/udbg.h>
#if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC_CORE)
int (*__debugger)(struct pt_regs *regs) __read_mostly;
int (*__debugger_ipi)(struct pt_regs *regs) __read_mostly;
int (*__debugger_bpt)(struct pt_regs *regs) __read_mostly;
int (*__debugger_sstep)(struct pt_regs *regs) __read_mostly;
int (*__debugger_iabr_match)(struct pt_regs *regs) __read_mostly;
int (*__debugger_break_match)(struct pt_regs *regs) __read_mostly;
int (*__debugger_fault_handler)(struct pt_regs *regs) __read_mostly;
EXPORT_SYMBOL(__debugger);
EXPORT_SYMBOL(__debugger_ipi);
EXPORT_SYMBOL(__debugger_bpt);
EXPORT_SYMBOL(__debugger_sstep);
EXPORT_SYMBOL(__debugger_iabr_match);
EXPORT_SYMBOL(__debugger_break_match);
EXPORT_SYMBOL(__debugger_fault_handler);
#endif
/* Transactional Memory trap debug */
#ifdef TM_DEBUG_SW
#define TM_DEBUG(x...) printk(KERN_INFO x)
#else
#define TM_DEBUG(x...) do { } while(0)
#endif
static const char *signame(int signr)
{
switch (signr) {
case SIGBUS: return "bus error";
case SIGFPE: return "floating point exception";
case SIGILL: return "illegal instruction";
case SIGSEGV: return "segfault";
case SIGTRAP: return "unhandled trap";
}
return "unknown signal";
}
/*
* Trap & Exception support
*/
#ifdef CONFIG_PMAC_BACKLIGHT
static void pmac_backlight_unblank(void)
{
mutex_lock(&pmac_backlight_mutex);
if (pmac_backlight) {
struct backlight_properties *props;
props = &pmac_backlight->props;
props->brightness = props->max_brightness;
props->power = FB_BLANK_UNBLANK;
backlight_update_status(pmac_backlight);
}
mutex_unlock(&pmac_backlight_mutex);
}
#else
static inline void pmac_backlight_unblank(void) { }
#endif
/*
* If oops/die is expected to crash the machine, return true here.
*
* This should not be expected to be 100% accurate, there may be
* notifiers registered or other unexpected conditions that may bring
* down the kernel. Or if the current process in the kernel is holding
* locks or has other critical state, the kernel may become effectively
* unusable anyway.
*/
bool die_will_crash(void)
{
if (should_fadump_crash())
return true;
if (kexec_should_crash(current))
return true;
if (in_interrupt() || panic_on_oops ||
!current->pid || is_global_init(current))
return true;
return false;
}
static arch_spinlock_t die_lock = __ARCH_SPIN_LOCK_UNLOCKED;
static int die_owner = -1;
static unsigned int die_nest_count;
static int die_counter;
extern void panic_flush_kmsg_start(void)
{
/*
* These are mostly taken from kernel/panic.c, but tries to do
* relatively minimal work. Don't use delay functions (TB may
* be broken), don't crash dump (need to set a firmware log),
* don't run notifiers. We do want to get some information to
* Linux console.
*/
console_verbose();
bust_spinlocks(1);
}
extern void panic_flush_kmsg_end(void)
{
kmsg_dump(KMSG_DUMP_PANIC);
bust_spinlocks(0);
debug_locks_off();
console_flush_on_panic(CONSOLE_FLUSH_PENDING);
}
static unsigned long oops_begin(struct pt_regs *regs)
{
int cpu;
unsigned long flags;
oops_enter();
/* racy, but better than risking deadlock. */
raw_local_irq_save(flags);
cpu = smp_processor_id();
if (!arch_spin_trylock(&die_lock)) {
if (cpu == die_owner)
/* nested oops. should stop eventually */;
else
arch_spin_lock(&die_lock);
}
die_nest_count++;
die_owner = cpu;
console_verbose();
bust_spinlocks(1);
if (machine_is(powermac))
pmac_backlight_unblank();
return flags;
}
NOKPROBE_SYMBOL(oops_begin);
static void oops_end(unsigned long flags, struct pt_regs *regs,
int signr)
{
bust_spinlocks(0);
add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
die_nest_count--;
oops_exit();
printk("\n");
if (!die_nest_count) {
/* Nest count reaches zero, release the lock. */
die_owner = -1;
arch_spin_unlock(&die_lock);
}
raw_local_irq_restore(flags);
/*
* system_reset_excption handles debugger, crash dump, panic, for 0x100
*/
if (TRAP(regs) == INTERRUPT_SYSTEM_RESET)
return;
crash_fadump(regs, "die oops");
if (kexec_should_crash(current))
crash_kexec(regs);
if (!signr)
return;
/*
* While our oops output is serialised by a spinlock, output
* from panic() called below can race and corrupt it. If we
* know we are going to panic, delay for 1 second so we have a
* chance to get clean backtraces from all CPUs that are oopsing.
*/
if (in_interrupt() || panic_on_oops || !current->pid ||
is_global_init(current)) {
mdelay(MSEC_PER_SEC);
}
if (panic_on_oops)
panic("Fatal exception");
make_task_dead(signr);
}
NOKPROBE_SYMBOL(oops_end);
static char *get_mmu_str(void)
{
if (early_radix_enabled())
return " MMU=Radix";
if (early_mmu_has_feature(MMU_FTR_HPTE_TABLE))
return " MMU=Hash";
return "";
}
static int __die(const char *str, struct pt_regs *regs, long err)
{
printk("Oops: %s, sig: %ld [#%d]\n", str, err, ++die_counter);
printk("%s PAGE_SIZE=%luK%s%s%s%s%s%s %s\n",
IS_ENABLED(CONFIG_CPU_LITTLE_ENDIAN) ? "LE" : "BE",
PAGE_SIZE / 1024, get_mmu_str(),
IS_ENABLED(CONFIG_PREEMPT) ? " PREEMPT" : "",
IS_ENABLED(CONFIG_SMP) ? " SMP" : "",
IS_ENABLED(CONFIG_SMP) ? (" NR_CPUS=" __stringify(NR_CPUS)) : "",
debug_pagealloc_enabled() ? " DEBUG_PAGEALLOC" : "",
IS_ENABLED(CONFIG_NUMA) ? " NUMA" : "",
ppc_md.name ? ppc_md.name : "");
if (notify_die(DIE_OOPS, str, regs, err, 255, SIGSEGV) == NOTIFY_STOP)
return 1;
print_modules();
show_regs(regs);
return 0;
}
NOKPROBE_SYMBOL(__die);
void die(const char *str, struct pt_regs *regs, long err)
{
unsigned long flags;
/*
* system_reset_excption handles debugger, crash dump, panic, for 0x100
*/
if (TRAP(regs) != INTERRUPT_SYSTEM_RESET) {
if (debugger(regs))
return;
}
flags = oops_begin(regs);
if (__die(str, regs, err))
err = 0;
oops_end(flags, regs, err);
}
NOKPROBE_SYMBOL(die);
void user_single_step_report(struct pt_regs *regs)
{
force_sig_fault(SIGTRAP, TRAP_TRACE, (void __user *)regs->nip);
}
static void show_signal_msg(int signr, struct pt_regs *regs, int code,
unsigned long addr)
{
static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL,
DEFAULT_RATELIMIT_BURST);
if (!show_unhandled_signals)
return;
if (!unhandled_signal(current, signr))
return;
if (!__ratelimit(&rs))
return;
pr_info("%s[%d]: %s (%d) at %lx nip %lx lr %lx code %x",
current->comm, current->pid, signame(signr), signr,
addr, regs->nip, regs->link, code);
print_vma_addr(KERN_CONT " in ", regs->nip);
pr_cont("\n");
show_user_instructions(regs);
}
static bool exception_common(int signr, struct pt_regs *regs, int code,
unsigned long addr)
{
if (!user_mode(regs)) {
die("Exception in kernel mode", regs, signr);
return false;
}
/*
* Must not enable interrupts even for user-mode exception, because
* this can be called from machine check, which may be a NMI or IRQ
* which don't like interrupts being enabled. Could check for
* in_hardirq || in_nmi perhaps, but there doesn't seem to be a good
* reason why _exception() should enable irqs for an exception handler,
* the handlers themselves do that directly.
*/
show_signal_msg(signr, regs, code, addr);
current->thread.trap_nr = code;
return true;
}
void _exception_pkey(struct pt_regs *regs, unsigned long addr, int key)
{
if (!exception_common(SIGSEGV, regs, SEGV_PKUERR, addr))
return;
force_sig_pkuerr((void __user *) addr, key);
}
void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr)
{
if (!exception_common(signr, regs, code, addr))
return;
force_sig_fault(signr, code, (void __user *)addr);
}
/*
* The interrupt architecture has a quirk in that the HV interrupts excluding
* the NMIs (0x100 and 0x200) do not clear MSR[RI] at entry. The first thing
* that an interrupt handler must do is save off a GPR into a scratch register,
* and all interrupts on POWERNV (HV=1) use the HSPRG1 register as scratch.
* Therefore an NMI can clobber an HV interrupt's live HSPRG1 without noticing
* that it is non-reentrant, which leads to random data corruption.
*
* The solution is for NMI interrupts in HV mode to check if they originated
* from these critical HV interrupt regions. If so, then mark them not
* recoverable.
*
* An alternative would be for HV NMIs to use SPRG for scratch to avoid the
* HSPRG1 clobber, however this would cause guest SPRG to be clobbered. Linux
* guests should always have MSR[RI]=0 when its scratch SPRG is in use, so
* that would work. However any other guest OS that may have the SPRG live
* and MSR[RI]=1 could encounter silent corruption.
*
* Builds that do not support KVM could take this second option to increase
* the recoverability of NMIs.
*/
noinstr void hv_nmi_check_nonrecoverable(struct pt_regs *regs)
{
#ifdef CONFIG_PPC_POWERNV
unsigned long kbase = (unsigned long)_stext;
unsigned long nip = regs->nip;
if (!(regs->msr & MSR_RI))
return;
if (!(regs->msr & MSR_HV))
return;
if (regs->msr & MSR_PR)
return;
/*
* Now test if the interrupt has hit a range that may be using
* HSPRG1 without having RI=0 (i.e., an HSRR interrupt). The
* problem ranges all run un-relocated. Test real and virt modes
* at the same time by dropping the high bit of the nip (virt mode
* entry points still have the +0x4000 offset).
*/
nip &= ~0xc000000000000000ULL;
if ((nip >= 0x500 && nip < 0x600) || (nip >= 0x4500 && nip < 0x4600))
goto nonrecoverable;
if ((nip >= 0x980 && nip < 0xa00) || (nip >= 0x4980 && nip < 0x4a00))
goto nonrecoverable;
if ((nip >= 0xe00 && nip < 0xec0) || (nip >= 0x4e00 && nip < 0x4ec0))
goto nonrecoverable;
if ((nip >= 0xf80 && nip < 0xfa0) || (nip >= 0x4f80 && nip < 0x4fa0))
goto nonrecoverable;
/* Trampoline code runs un-relocated so subtract kbase. */
if (nip >= (unsigned long)(start_real_trampolines - kbase) &&
nip < (unsigned long)(end_real_trampolines - kbase))
goto nonrecoverable;
if (nip >= (unsigned long)(start_virt_trampolines - kbase) &&
nip < (unsigned long)(end_virt_trampolines - kbase))
goto nonrecoverable;
return;
nonrecoverable:
regs->msr &= ~MSR_RI;
local_paca->hsrr_valid = 0;
local_paca->srr_valid = 0;
#endif
}
DEFINE_INTERRUPT_HANDLER_NMI(system_reset_exception)
{
unsigned long hsrr0, hsrr1;
bool saved_hsrrs = false;
/*
* System reset can interrupt code where HSRRs are live and MSR[RI]=1.
* The system reset interrupt itself may clobber HSRRs (e.g., to call
* OPAL), so save them here and restore them before returning.
*
* Machine checks don't need to save HSRRs, as the real mode handler
* is careful to avoid them, and the regular handler is not delivered
* as an NMI.
*/
if (cpu_has_feature(CPU_FTR_HVMODE)) {
hsrr0 = mfspr(SPRN_HSRR0);
hsrr1 = mfspr(SPRN_HSRR1);
saved_hsrrs = true;
}
hv_nmi_check_nonrecoverable(regs);
__this_cpu_inc(irq_stat.sreset_irqs);
/* See if any machine dependent calls */
if (ppc_md.system_reset_exception) {
if (ppc_md.system_reset_exception(regs))
goto out;
}
if (debugger(regs))
goto out;
kmsg_dump(KMSG_DUMP_OOPS);
/*
* A system reset is a request to dump, so we always send
* it through the crashdump code (if fadump or kdump are
* registered).
*/
crash_fadump(regs, "System Reset");
crash_kexec(regs);
/*
* We aren't the primary crash CPU. We need to send it
* to a holding pattern to avoid it ending up in the panic
* code.
*/
crash_kexec_secondary(regs);
/*
* No debugger or crash dump registered, print logs then
* panic.
*/
die("System Reset", regs, SIGABRT);
mdelay(2*MSEC_PER_SEC); /* Wait a little while for others to print */
add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
nmi_panic(regs, "System Reset");
out:
#ifdef CONFIG_PPC_BOOK3S_64
BUG_ON(get_paca()->in_nmi == 0);
if (get_paca()->in_nmi > 1)
die("Unrecoverable nested System Reset", regs, SIGABRT);
#endif
/* Must die if the interrupt is not recoverable */
if (regs_is_unrecoverable(regs)) {
/* For the reason explained in die_mce, nmi_exit before die */
nmi_exit();
die("Unrecoverable System Reset", regs, SIGABRT);
}
if (saved_hsrrs) {
mtspr(SPRN_HSRR0, hsrr0);
mtspr(SPRN_HSRR1, hsrr1);
}
/* What should we do here? We could issue a shutdown or hard reset. */
return 0;
}
/*
* I/O accesses can cause machine checks on powermacs.
* Check if the NIP corresponds to the address of a sync
* instruction for which there is an entry in the exception
* table.
* -- paulus.
*/
static inline int check_io_access(struct pt_regs *regs)
{
#ifdef CONFIG_PPC32
unsigned long msr = regs->msr;
const struct exception_table_entry *entry;
unsigned int *nip = (unsigned int *)regs->nip;
if (((msr & 0xffff0000) == 0 || (msr & (0x80000 | 0x40000)))
&& (entry = search_exception_tables(regs->nip)) != NULL) {
/*
* Check that it's a sync instruction, or somewhere
* in the twi; isync; nop sequence that inb/inw/inl uses.
* As the address is in the exception table
* we should be able to read the instr there.
* For the debug message, we look at the preceding
* load or store.
*/
if (*nip == PPC_RAW_NOP())
nip -= 2;
else if (*nip == PPC_RAW_ISYNC())
--nip;
if (*nip == PPC_RAW_SYNC() || get_op(*nip) == OP_TRAP) {
unsigned int rb;
--nip;
rb = (*nip >> 11) & 0x1f;
printk(KERN_DEBUG "%s bad port %lx at %p\n",
(*nip & 0x100)? "OUT to": "IN from",
regs->gpr[rb] - _IO_BASE, nip);
regs_set_recoverable(regs);
regs_set_return_ip(regs, extable_fixup(entry));
return 1;
}
}
#endif /* CONFIG_PPC32 */
return 0;
}
#ifdef CONFIG_PPC_ADV_DEBUG_REGS
/* On 4xx, the reason for the machine check or program exception
is in the ESR. */
#define get_reason(regs) ((regs)->esr)
#define REASON_FP ESR_FP
#define REASON_ILLEGAL (ESR_PIL | ESR_PUO)
#define REASON_PRIVILEGED ESR_PPR
#define REASON_TRAP ESR_PTR
#define REASON_PREFIXED 0
#define REASON_BOUNDARY 0
/* single-step stuff */
#define single_stepping(regs) (current->thread.debug.dbcr0 & DBCR0_IC)
#define clear_single_step(regs) (current->thread.debug.dbcr0 &= ~DBCR0_IC)
#define clear_br_trace(regs) do {} while(0)
#else
/* On non-4xx, the reason for the machine check or program
exception is in the MSR. */
#define get_reason(regs) ((regs)->msr)
#define REASON_TM SRR1_PROGTM
#define REASON_FP SRR1_PROGFPE
#define REASON_ILLEGAL SRR1_PROGILL
#define REASON_PRIVILEGED SRR1_PROGPRIV
#define REASON_TRAP SRR1_PROGTRAP
#define REASON_PREFIXED SRR1_PREFIXED
#define REASON_BOUNDARY SRR1_BOUNDARY
#define single_stepping(regs) ((regs)->msr & MSR_SE)
#define clear_single_step(regs) (regs_set_return_msr((regs), (regs)->msr & ~MSR_SE))
#define clear_br_trace(regs) (regs_set_return_msr((regs), (regs)->msr & ~MSR_BE))
#endif
#define inst_length(reason) (((reason) & REASON_PREFIXED) ? 8 : 4)
#if defined(CONFIG_PPC_E500)
int machine_check_e500mc(struct pt_regs *regs)
{
unsigned long mcsr = mfspr(SPRN_MCSR);
unsigned long pvr = mfspr(SPRN_PVR);
unsigned long reason = mcsr;
int recoverable = 1;
if (reason & MCSR_LD) {
recoverable = fsl_rio_mcheck_exception(regs);
if (recoverable == 1)
goto silent_out;
}
printk("Machine check in kernel mode.\n");
printk("Caused by (from MCSR=%lx): ", reason);
if (reason & MCSR_MCP)
pr_cont("Machine Check Signal\n");
if (reason & MCSR_ICPERR) {
pr_cont("Instruction Cache Parity Error\n");
/*
* This is recoverable by invalidating the i-cache.
*/
mtspr(SPRN_L1CSR1, mfspr(SPRN_L1CSR1) | L1CSR1_ICFI);
while (mfspr(SPRN_L1CSR1) & L1CSR1_ICFI)
;
/*
* This will generally be accompanied by an instruction
* fetch error report -- only treat MCSR_IF as fatal
* if it wasn't due to an L1 parity error.
*/
reason &= ~MCSR_IF;
}
if (reason & MCSR_DCPERR_MC) {
pr_cont("Data Cache Parity Error\n");
/*
* In write shadow mode we auto-recover from the error, but it
* may still get logged and cause a machine check. We should
* only treat the non-write shadow case as non-recoverable.
*/
/* On e6500 core, L1 DCWS (Data cache write shadow mode) bit
* is not implemented but L1 data cache always runs in write
* shadow mode. Hence on data cache parity errors HW will
* automatically invalidate the L1 Data Cache.
*/
if (PVR_VER(pvr) != PVR_VER_E6500) {
if (!(mfspr(SPRN_L1CSR2) & L1CSR2_DCWS))
recoverable = 0;
}
}
if (reason & MCSR_L2MMU_MHIT) {
pr_cont("Hit on multiple TLB entries\n");
recoverable = 0;
}
if (reason & MCSR_NMI)
pr_cont("Non-maskable interrupt\n");
if (reason & MCSR_IF) {
pr_cont("Instruction Fetch Error Report\n");
recoverable = 0;
}
if (reason & MCSR_LD) {
pr_cont("Load Error Report\n");
recoverable = 0;
}
if (reason & MCSR_ST) {
pr_cont("Store Error Report\n");
recoverable = 0;
}
if (reason & MCSR_LDG) {
pr_cont("Guarded Load Error Report\n");
recoverable = 0;
}
if (reason & MCSR_TLBSYNC)
pr_cont("Simultaneous tlbsync operations\n");
if (reason & MCSR_BSL2_ERR) {
pr_cont("Level 2 Cache Error\n");
recoverable = 0;
}
if (reason & MCSR_MAV) {
u64 addr;
addr = mfspr(SPRN_MCAR);
addr |= (u64)mfspr(SPRN_MCARU) << 32;
pr_cont("Machine Check %s Address: %#llx\n",
reason & MCSR_MEA ? "Effective" : "Physical", addr);
}
silent_out:
mtspr(SPRN_MCSR, mcsr);
return mfspr(SPRN_MCSR) == 0 && recoverable;
}
int machine_check_e500(struct pt_regs *regs)
{
unsigned long reason = mfspr(SPRN_MCSR);
if (reason & MCSR_BUS_RBERR) {
if (fsl_rio_mcheck_exception(regs))
return 1;
if (fsl_pci_mcheck_exception(regs))
return 1;
}
printk("Machine check in kernel mode.\n");
printk("Caused by (from MCSR=%lx): ", reason);
if (reason & MCSR_MCP)
pr_cont("Machine Check Signal\n");
if (reason & MCSR_ICPERR)
pr_cont("Instruction Cache Parity Error\n");
if (reason & MCSR_DCP_PERR)
pr_cont("Data Cache Push Parity Error\n");
if (reason & MCSR_DCPERR)
pr_cont("Data Cache Parity Error\n");
if (reason & MCSR_BUS_IAERR)
pr_cont("Bus - Instruction Address Error\n");
if (reason & MCSR_BUS_RAERR)
pr_cont("Bus - Read Address Error\n");
if (reason & MCSR_BUS_WAERR)
pr_cont("Bus - Write Address Error\n");
if (reason & MCSR_BUS_IBERR)
pr_cont("Bus - Instruction Data Error\n");
if (reason & MCSR_BUS_RBERR)
pr_cont("Bus - Read Data Bus Error\n");
if (reason & MCSR_BUS_WBERR)
pr_cont("Bus - Write Data Bus Error\n");
if (reason & MCSR_BUS_IPERR)
pr_cont("Bus - Instruction Parity Error\n");
if (reason & MCSR_BUS_RPERR)
pr_cont("Bus - Read Parity Error\n");
return 0;
}
int machine_check_generic(struct pt_regs *regs)
{
return 0;
}
#elif defined(CONFIG_PPC32)
int machine_check_generic(struct pt_regs *regs)
{
unsigned long reason = regs->msr;
printk("Machine check in kernel mode.\n");
printk("Caused by (from SRR1=%lx): ", reason);
switch (reason & 0x601F0000) {
case 0x80000:
pr_cont("Machine check signal\n");
break;
case 0x40000:
case 0x140000: /* 7450 MSS error and TEA */
pr_cont("Transfer error ack signal\n");
break;
case 0x20000:
pr_cont("Data parity error signal\n");
break;
case 0x10000:
pr_cont("Address parity error signal\n");
break;
case 0x20000000:
pr_cont("L1 Data Cache error\n");
break;
case 0x40000000:
pr_cont("L1 Instruction Cache error\n");
break;
case 0x00100000:
pr_cont("L2 data cache parity error\n");
break;
default:
pr_cont("Unknown values in msr\n");
}
return 0;
}
#endif /* everything else */
void die_mce(const char *str, struct pt_regs *regs, long err)
{
/*
* The machine check wants to kill the interrupted context,
* but make_task_dead() checks for in_interrupt() and panics
* in that case, so exit the irq/nmi before calling die.
*/
if (in_nmi())
nmi_exit();
else
irq_exit();
die(str, regs, err);
}
/*
* BOOK3S_64 does not usually call this handler as a non-maskable interrupt
* (it uses its own early real-mode handler to handle the MCE proper
* and then raises irq_work to call this handler when interrupts are
* enabled). The only time when this is not true is if the early handler
* is unrecoverable, then it does call this directly to try to get a
* message out.
*/
static void __machine_check_exception(struct pt_regs *regs)
{
int recover = 0;
__this_cpu_inc(irq_stat.mce_exceptions);
add_taint(TAINT_MACHINE_CHECK, LOCKDEP_NOW_UNRELIABLE);
/* See if any machine dependent calls. In theory, we would want
* to call the CPU first, and call the ppc_md. one if the CPU
* one returns a positive number. However there is existing code
* that assumes the board gets a first chance, so let's keep it
* that way for now and fix things later. --BenH.
*/
if (ppc_md.machine_check_exception)
recover = ppc_md.machine_check_exception(regs);
else if (cur_cpu_spec->machine_check)
recover = cur_cpu_spec->machine_check(regs);
if (recover > 0)
goto bail;
if (debugger_fault_handler(regs))
goto bail;
if (check_io_access(regs))
goto bail;
die_mce("Machine check", regs, SIGBUS);
bail:
/* Must die if the interrupt is not recoverable */
if (regs_is_unrecoverable(regs))
die_mce("Unrecoverable Machine check", regs, SIGBUS);
}
#ifdef CONFIG_PPC_BOOK3S_64
DEFINE_INTERRUPT_HANDLER_RAW(machine_check_early_boot)
{
udbg_printf("Machine check (early boot)\n");
udbg_printf("SRR0=0x%016lx SRR1=0x%016lx\n", regs->nip, regs->msr);
udbg_printf(" DAR=0x%016lx DSISR=0x%08lx\n", regs->dar, regs->dsisr);
udbg_printf(" LR=0x%016lx R1=0x%08lx\n", regs->link, regs->gpr[1]);
udbg_printf("------\n");
die("Machine check (early boot)", regs, SIGBUS);
for (;;)
;
return 0;
}
DEFINE_INTERRUPT_HANDLER_ASYNC(machine_check_exception_async)
{
__machine_check_exception(regs);
}
#endif
DEFINE_INTERRUPT_HANDLER_NMI(machine_check_exception)
{
__machine_check_exception(regs);
return 0;
}
DEFINE_INTERRUPT_HANDLER(SMIException) /* async? */
{
die("System Management Interrupt", regs, SIGABRT);
}
#ifdef CONFIG_VSX
static void p9_hmi_special_emu(struct pt_regs *regs)
{
unsigned int ra, rb, t, i, sel, instr, rc;
const void __user *addr;
u8 vbuf[16] __aligned(16), *vdst;
unsigned long ea, msr, msr_mask;
bool swap;
if (__get_user(instr, (unsigned int __user *)regs->nip))
return;
/*
* lxvb16x opcode: 0x7c0006d8
* lxvd2x opcode: 0x7c000698
* lxvh8x opcode: 0x7c000658
* lxvw4x opcode: 0x7c000618
*/
if ((instr & 0xfc00073e) != 0x7c000618) {
pr_devel("HMI vec emu: not vector CI %i:%s[%d] nip=%016lx"
" instr=%08x\n",
smp_processor_id(), current->comm, current->pid,
regs->nip, instr);
return;
}
/* Grab vector registers into the task struct */
msr = regs->msr; /* Grab msr before we flush the bits */
flush_vsx_to_thread(current);
enable_kernel_altivec();
/*
* Is userspace running with a different endian (this is rare but
* not impossible)
*/
swap = (msr & MSR_LE) != (MSR_KERNEL & MSR_LE);
/* Decode the instruction */
ra = (instr >> 16) & 0x1f;
rb = (instr >> 11) & 0x1f;
t = (instr >> 21) & 0x1f;
if (instr & 1)
vdst = (u8 *)¤t->thread.vr_state.vr[t];
else
vdst = (u8 *)¤t->thread.fp_state.fpr[t][0];
/* Grab the vector address */
ea = regs->gpr[rb] + (ra ? regs->gpr[ra] : 0);
if (is_32bit_task())
ea &= 0xfffffffful;
addr = (__force const void __user *)ea;
/* Check it */
if (!access_ok(addr, 16)) {
pr_devel("HMI vec emu: bad access %i:%s[%d] nip=%016lx"
" instr=%08x addr=%016lx\n",
smp_processor_id(), current->comm, current->pid,
regs->nip, instr, (unsigned long)addr);
return;
}
/* Read the vector */
rc = 0;
if ((unsigned long)addr & 0xfUL)
/* unaligned case */
rc = __copy_from_user_inatomic(vbuf, addr, 16);
else
__get_user_atomic_128_aligned(vbuf, addr, rc);
if (rc) {
pr_devel("HMI vec emu: page fault %i:%s[%d] nip=%016lx"
" instr=%08x addr=%016lx\n",
smp_processor_id(), current->comm, current->pid,
regs->nip, instr, (unsigned long)addr);
return;
}
pr_devel("HMI vec emu: emulated vector CI %i:%s[%d] nip=%016lx"
" instr=%08x addr=%016lx\n",
smp_processor_id(), current->comm, current->pid, regs->nip,
instr, (unsigned long) addr);
/* Grab instruction "selector" */
sel = (instr >> 6) & 3;
/*
* Check to make sure the facility is actually enabled. This
* could happen if we get a false positive hit.
*
* lxvd2x/lxvw4x always check MSR VSX sel = 0,2
* lxvh8x/lxvb16x check MSR VSX or VEC depending on VSR used sel = 1,3
*/
msr_mask = MSR_VSX;
if ((sel & 1) && (instr & 1)) /* lxvh8x & lxvb16x + VSR >= 32 */
msr_mask = MSR_VEC;
if (!(msr & msr_mask)) {
pr_devel("HMI vec emu: MSR fac clear %i:%s[%d] nip=%016lx"
" instr=%08x msr:%016lx\n",
smp_processor_id(), current->comm, current->pid,
regs->nip, instr, msr);
return;
}
/* Do logging here before we modify sel based on endian */
switch (sel) {
case 0: /* lxvw4x */
PPC_WARN_EMULATED(lxvw4x, regs);
break;
case 1: /* lxvh8x */
PPC_WARN_EMULATED(lxvh8x, regs);
break;
case 2: /* lxvd2x */
PPC_WARN_EMULATED(lxvd2x, regs);
break;
case 3: /* lxvb16x */
PPC_WARN_EMULATED(lxvb16x, regs);
break;
}
#ifdef __LITTLE_ENDIAN__
/*
* An LE kernel stores the vector in the task struct as an LE
* byte array (effectively swapping both the components and
* the content of the components). Those instructions expect
* the components to remain in ascending address order, so we
* swap them back.
*
* If we are running a BE user space, the expectation is that
* of a simple memcpy, so forcing the emulation to look like
* a lxvb16x should do the trick.
*/
if (swap)
sel = 3;
switch (sel) {
case 0: /* lxvw4x */
for (i = 0; i < 4; i++)
((u32 *)vdst)[i] = ((u32 *)vbuf)[3-i];
break;
case 1: /* lxvh8x */
for (i = 0; i < 8; i++)
((u16 *)vdst)[i] = ((u16 *)vbuf)[7-i];
break;
case 2: /* lxvd2x */
for (i = 0; i < 2; i++)
((u64 *)vdst)[i] = ((u64 *)vbuf)[1-i];
break;
case 3: /* lxvb16x */
for (i = 0; i < 16; i++)
vdst[i] = vbuf[15-i];
break;
}
#else /* __LITTLE_ENDIAN__ */
/* On a big endian kernel, a BE userspace only needs a memcpy */
if (!swap)
sel = 3;
/* Otherwise, we need to swap the content of the components */
switch (sel) {
case 0: /* lxvw4x */
for (i = 0; i < 4; i++)
((u32 *)vdst)[i] = cpu_to_le32(((u32 *)vbuf)[i]);
break;
case 1: /* lxvh8x */
for (i = 0; i < 8; i++)
((u16 *)vdst)[i] = cpu_to_le16(((u16 *)vbuf)[i]);
break;
case 2: /* lxvd2x */
for (i = 0; i < 2; i++)
((u64 *)vdst)[i] = cpu_to_le64(((u64 *)vbuf)[i]);
break;
case 3: /* lxvb16x */
memcpy(vdst, vbuf, 16);
break;
}
#endif /* !__LITTLE_ENDIAN__ */
/* Go to next instruction */
regs_add_return_ip(regs, 4);
}
#endif /* CONFIG_VSX */
DEFINE_INTERRUPT_HANDLER_ASYNC(handle_hmi_exception)
{
struct pt_regs *old_regs;
old_regs = set_irq_regs(regs);
#ifdef CONFIG_VSX
/* Real mode flagged P9 special emu is needed */
if (local_paca->hmi_p9_special_emu) {
local_paca->hmi_p9_special_emu = 0;
/*
* We don't want to take page faults while doing the
* emulation, we just replay the instruction if necessary.
*/
pagefault_disable();
p9_hmi_special_emu(regs);
pagefault_enable();
}
#endif /* CONFIG_VSX */
if (ppc_md.handle_hmi_exception)
ppc_md.handle_hmi_exception(regs);
set_irq_regs(old_regs);
}
DEFINE_INTERRUPT_HANDLER(unknown_exception)
{
printk("Bad trap at PC: %lx, SR: %lx, vector=%lx\n",
regs->nip, regs->msr, regs->trap);
_exception(SIGTRAP, regs, TRAP_UNK, 0);
}
DEFINE_INTERRUPT_HANDLER_ASYNC(unknown_async_exception)
{
printk("Bad trap at PC: %lx, SR: %lx, vector=%lx\n",
regs->nip, regs->msr, regs->trap);
_exception(SIGTRAP, regs, TRAP_UNK, 0);
}
DEFINE_INTERRUPT_HANDLER_NMI(unknown_nmi_exception)
{
printk("Bad trap at PC: %lx, SR: %lx, vector=%lx\n",
regs->nip, regs->msr, regs->trap);
_exception(SIGTRAP, regs, TRAP_UNK, 0);
return 0;
}
DEFINE_INTERRUPT_HANDLER(instruction_breakpoint_exception)
{
if (notify_die(DIE_IABR_MATCH, "iabr_match", regs, 5,
5, SIGTRAP) == NOTIFY_STOP)
return;
if (debugger_iabr_match(regs))
return;
_exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip);
}
DEFINE_INTERRUPT_HANDLER(RunModeException)
{
_exception(SIGTRAP, regs, TRAP_UNK, 0);
}
static void __single_step_exception(struct pt_regs *regs)
{
clear_single_step(regs);
clear_br_trace(regs);
if (kprobe_post_handler(regs))
return;
if (notify_die(DIE_SSTEP, "single_step", regs, 5,
5, SIGTRAP) == NOTIFY_STOP)
return;
if (debugger_sstep(regs))
return;
_exception(SIGTRAP, regs, TRAP_TRACE, regs->nip);
}
DEFINE_INTERRUPT_HANDLER(single_step_exception)
{
__single_step_exception(regs);
}
/*
* After we have successfully emulated an instruction, we have to
* check if the instruction was being single-stepped, and if so,
* pretend we got a single-step exception. This was pointed out
* by Kumar Gala. -- paulus
*/
void emulate_single_step(struct pt_regs *regs)
{
if (single_stepping(regs))
__single_step_exception(regs);
}
static inline int __parse_fpscr(unsigned long fpscr)
{
int ret = FPE_FLTUNK;
/* Invalid operation */
if ((fpscr & FPSCR_VE) && (fpscr & FPSCR_VX))
ret = FPE_FLTINV;
/* Overflow */
else if ((fpscr & FPSCR_OE) && (fpscr & FPSCR_OX))
ret = FPE_FLTOVF;
/* Underflow */
else if ((fpscr & FPSCR_UE) && (fpscr & FPSCR_UX))
ret = FPE_FLTUND;
/* Divide by zero */
else if ((fpscr & FPSCR_ZE) && (fpscr & FPSCR_ZX))
ret = FPE_FLTDIV;
/* Inexact result */
else if ((fpscr & FPSCR_XE) && (fpscr & FPSCR_XX))
ret = FPE_FLTRES;
return ret;
}
static void parse_fpe(struct pt_regs *regs)
{
int code = 0;
flush_fp_to_thread(current);
#ifdef CONFIG_PPC_FPU_REGS
code = __parse_fpscr(current->thread.fp_state.fpscr);
#endif
_exception(SIGFPE, regs, code, regs->nip);
}
/*
* Illegal instruction emulation support. Originally written to
* provide the PVR to user applications using the mfspr rd, PVR.
* Return non-zero if we can't emulate, or -EFAULT if the associated
* memory access caused an access fault. Return zero on success.
*
* There are a couple of ways to do this, either "decode" the instruction
* or directly match lots of bits. In this case, matching lots of
* bits is faster and easier.
*
*/
static int emulate_string_inst(struct pt_regs *regs, u32 instword)
{
u8 rT = (instword >> 21) & 0x1f;
u8 rA = (instword >> 16) & 0x1f;
u8 NB_RB = (instword >> 11) & 0x1f;
u32 num_bytes;
unsigned long EA;
int pos = 0;
/* Early out if we are an invalid form of lswx */
if ((instword & PPC_INST_STRING_MASK) == PPC_INST_LSWX)
if ((rT == rA) || (rT == NB_RB))
return -EINVAL;
EA = (rA == 0) ? 0 : regs->gpr[rA];
switch (instword & PPC_INST_STRING_MASK) {
case PPC_INST_LSWX:
case PPC_INST_STSWX:
EA += NB_RB;
num_bytes = regs->xer & 0x7f;
break;
case PPC_INST_LSWI:
case PPC_INST_STSWI:
num_bytes = (NB_RB == 0) ? 32 : NB_RB;
break;
default:
return -EINVAL;
}
while (num_bytes != 0)
{
u8 val;
u32 shift = 8 * (3 - (pos & 0x3));
/* if process is 32-bit, clear upper 32 bits of EA */
if ((regs->msr & MSR_64BIT) == 0)
EA &= 0xFFFFFFFF;
switch ((instword & PPC_INST_STRING_MASK)) {
case PPC_INST_LSWX:
case PPC_INST_LSWI:
if (get_user(val, (u8 __user *)EA))
return -EFAULT;
/* first time updating this reg,
* zero it out */
if (pos == 0)
regs->gpr[rT] = 0;
regs->gpr[rT] |= val << shift;
break;
case PPC_INST_STSWI:
case PPC_INST_STSWX:
val = regs->gpr[rT] >> shift;
if (put_user(val, (u8 __user *)EA))
return -EFAULT;
break;
}
/* move EA to next address */
EA += 1;
num_bytes--;
/* manage our position within the register */
if (++pos == 4) {
pos = 0;
if (++rT == 32)
rT = 0;
}
}
return 0;
}
static int emulate_popcntb_inst(struct pt_regs *regs, u32 instword)
{
u32 ra,rs;
unsigned long tmp;
ra = (instword >> 16) & 0x1f;
rs = (instword >> 21) & 0x1f;
tmp = regs->gpr[rs];
tmp = tmp - ((tmp >> 1) & 0x5555555555555555ULL);
tmp = (tmp & 0x3333333333333333ULL) + ((tmp >> 2) & 0x3333333333333333ULL);
tmp = (tmp + (tmp >> 4)) & 0x0f0f0f0f0f0f0f0fULL;
regs->gpr[ra] = tmp;
return 0;
}
static int emulate_isel(struct pt_regs *regs, u32 instword)
{
u8 rT = (instword >> 21) & 0x1f;
u8 rA = (instword >> 16) & 0x1f;
u8 rB = (instword >> 11) & 0x1f;
u8 BC = (instword >> 6) & 0x1f;
u8 bit;
unsigned long tmp;
tmp = (rA == 0) ? 0 : regs->gpr[rA];
bit = (regs->ccr >> (31 - BC)) & 0x1;
regs->gpr[rT] = bit ? tmp : regs->gpr[rB];
return 0;
}
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
static inline bool tm_abort_check(struct pt_regs *regs, int cause)
{
/* If we're emulating a load/store in an active transaction, we cannot
* emulate it as the kernel operates in transaction suspended context.
* We need to abort the transaction. This creates a persistent TM
* abort so tell the user what caused it with a new code.
*/
if (MSR_TM_TRANSACTIONAL(regs->msr)) {
tm_enable();
tm_abort(cause);
return true;
}
return false;
}
#else
static inline bool tm_abort_check(struct pt_regs *regs, int reason)
{
return false;
}
#endif
static int emulate_instruction(struct pt_regs *regs)
{
u32 instword;
u32 rd;
if (!user_mode(regs))
return -EINVAL;
if (get_user(instword, (u32 __user *)(regs->nip)))
return -EFAULT;
/* Emulate the mfspr rD, PVR. */
if ((instword & PPC_INST_MFSPR_PVR_MASK) == PPC_INST_MFSPR_PVR) {
PPC_WARN_EMULATED(mfpvr, regs);
rd = (instword >> 21) & 0x1f;
regs->gpr[rd] = mfspr(SPRN_PVR);
return 0;
}
/* Emulating the dcba insn is just a no-op. */
if ((instword & PPC_INST_DCBA_MASK) == PPC_INST_DCBA) {
PPC_WARN_EMULATED(dcba, regs);
return 0;
}
/* Emulate the mcrxr insn. */
if ((instword & PPC_INST_MCRXR_MASK) == PPC_INST_MCRXR) {
int shift = (instword >> 21) & 0x1c;
unsigned long msk = 0xf0000000UL >> shift;
PPC_WARN_EMULATED(mcrxr, regs);
regs->ccr = (regs->ccr & ~msk) | ((regs->xer >> shift) & msk);
regs->xer &= ~0xf0000000UL;
return 0;
}
/* Emulate load/store string insn. */
if ((instword & PPC_INST_STRING_GEN_MASK) == PPC_INST_STRING) {
if (tm_abort_check(regs,
TM_CAUSE_EMULATE | TM_CAUSE_PERSISTENT))
return -EINVAL;
PPC_WARN_EMULATED(string, regs);
return emulate_string_inst(regs, instword);
}
/* Emulate the popcntb (Population Count Bytes) instruction. */
if ((instword & PPC_INST_POPCNTB_MASK) == PPC_INST_POPCNTB) {
PPC_WARN_EMULATED(popcntb, regs);
return emulate_popcntb_inst(regs, instword);
}
/* Emulate isel (Integer Select) instruction */
if ((instword & PPC_INST_ISEL_MASK) == PPC_INST_ISEL) {
PPC_WARN_EMULATED(isel, regs);
return emulate_isel(regs, instword);
}
/* Emulate sync instruction variants */
if ((instword & PPC_INST_SYNC_MASK) == PPC_INST_SYNC) {
PPC_WARN_EMULATED(sync, regs);
asm volatile("sync");
return 0;
}
#ifdef CONFIG_PPC64
/* Emulate the mfspr rD, DSCR. */
if ((((instword & PPC_INST_MFSPR_DSCR_USER_MASK) ==
PPC_INST_MFSPR_DSCR_USER) ||
((instword & PPC_INST_MFSPR_DSCR_MASK) ==
PPC_INST_MFSPR_DSCR)) &&
cpu_has_feature(CPU_FTR_DSCR)) {
PPC_WARN_EMULATED(mfdscr, regs);
rd = (instword >> 21) & 0x1f;
regs->gpr[rd] = mfspr(SPRN_DSCR);
return 0;
}
/* Emulate the mtspr DSCR, rD. */
if ((((instword & PPC_INST_MTSPR_DSCR_USER_MASK) ==
PPC_INST_MTSPR_DSCR_USER) ||
((instword & PPC_INST_MTSPR_DSCR_MASK) ==
PPC_INST_MTSPR_DSCR)) &&
cpu_has_feature(CPU_FTR_DSCR)) {
PPC_WARN_EMULATED(mtdscr, regs);
rd = (instword >> 21) & 0x1f;
current->thread.dscr = regs->gpr[rd];
current->thread.dscr_inherit = 1;
mtspr(SPRN_DSCR, current->thread.dscr);
return 0;
}
#endif
return -EINVAL;
}
int is_valid_bugaddr(unsigned long addr)
{
return is_kernel_addr(addr);
}
#ifdef CONFIG_MATH_EMULATION
static int emulate_math(struct pt_regs *regs)
{
int ret;
ret = do_mathemu(regs);
if (ret >= 0)
PPC_WARN_EMULATED(math, regs);
switch (ret) {
case 0:
emulate_single_step(regs);
return 0;
case 1: {
int code = 0;
code = __parse_fpscr(current->thread.fp_state.fpscr);
_exception(SIGFPE, regs, code, regs->nip);
return 0;
}
case -EFAULT:
_exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip);
return 0;
}
return -1;
}
#else
static inline int emulate_math(struct pt_regs *regs) { return -1; }
#endif
static void do_program_check(struct pt_regs *regs)
{
unsigned int reason = get_reason(regs);
/* We can now get here via a FP Unavailable exception if the core
* has no FPU, in that case the reason flags will be 0 */
if (reason & REASON_FP) {
/* IEEE FP exception */
parse_fpe(regs);
return;
}
if (reason & REASON_TRAP) {
unsigned long bugaddr;
/* Debugger is first in line to stop recursive faults in
* rcu_lock, notify_die, or atomic_notifier_call_chain */
if (debugger_bpt(regs))
return;
if (kprobe_handler(regs))
return;
/* trap exception */
if (notify_die(DIE_BPT, "breakpoint", regs, 5, 5, SIGTRAP)
== NOTIFY_STOP)
return;
bugaddr = regs->nip;
/*
* Fixup bugaddr for BUG_ON() in real mode
*/
if (!is_kernel_addr(bugaddr) && !(regs->msr & MSR_IR))
bugaddr += PAGE_OFFSET;
if (!(regs->msr & MSR_PR) && /* not user-mode */
report_bug(bugaddr, regs) == BUG_TRAP_TYPE_WARN) {
regs_add_return_ip(regs, 4);
return;
}
/* User mode considers other cases after enabling IRQs */
if (!user_mode(regs)) {
_exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip);
return;
}
}
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
if (reason & REASON_TM) {
/* This is a TM "Bad Thing Exception" program check.
* This occurs when:
* - An rfid/hrfid/mtmsrd attempts to cause an illegal
* transition in TM states.
* - A trechkpt is attempted when transactional.
* - A treclaim is attempted when non transactional.
* - A tend is illegally attempted.
* - writing a TM SPR when transactional.
*
* If usermode caused this, it's done something illegal and
* gets a SIGILL slap on the wrist. We call it an illegal
* operand to distinguish from the instruction just being bad
* (e.g. executing a 'tend' on a CPU without TM!); it's an
* illegal /placement/ of a valid instruction.
*/
if (user_mode(regs)) {
_exception(SIGILL, regs, ILL_ILLOPN, regs->nip);
return;
} else {
printk(KERN_EMERG "Unexpected TM Bad Thing exception "
"at %lx (msr 0x%lx) tm_scratch=%llx\n",
regs->nip, regs->msr, get_paca()->tm_scratch);
die("Unrecoverable exception", regs, SIGABRT);
}
}
#endif
/*
* If we took the program check in the kernel skip down to sending a
* SIGILL. The subsequent cases all relate to user space, such as
* emulating instructions which we should only do for user space. We
* also do not want to enable interrupts for kernel faults because that
* might lead to further faults, and loose the context of the original
* exception.
*/
if (!user_mode(regs))
goto sigill;
interrupt_cond_local_irq_enable(regs);
/*
* (reason & REASON_TRAP) is mostly handled before enabling IRQs,
* except get_user_instr() can sleep so we cannot reliably inspect the
* current instruction in that context. Now that we know we are
* handling a user space trap and can sleep, we can check if the trap
* was a hashchk failure.
*/
if (reason & REASON_TRAP) {
if (cpu_has_feature(CPU_FTR_DEXCR_NPHIE)) {
ppc_inst_t insn;
if (get_user_instr(insn, (void __user *)regs->nip)) {
_exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip);
return;
}
if (ppc_inst_primary_opcode(insn) == 31 &&
get_xop(ppc_inst_val(insn)) == OP_31_XOP_HASHCHK) {
_exception(SIGILL, regs, ILL_ILLOPN, regs->nip);
return;
}
}
_exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip);
return;
}
/* (reason & REASON_ILLEGAL) would be the obvious thing here,
* but there seems to be a hardware bug on the 405GP (RevD)
* that means ESR is sometimes set incorrectly - either to
* ESR_DST (!?) or 0. In the process of chasing this with the
* hardware people - not sure if it can happen on any illegal
* instruction or only on FP instructions, whether there is a
* pattern to occurrences etc. -dgibson 31/Mar/2003
*/
if (!emulate_math(regs))
return;
/* Try to emulate it if we should. */
if (reason & (REASON_ILLEGAL | REASON_PRIVILEGED)) {
switch (emulate_instruction(regs)) {
case 0:
regs_add_return_ip(regs, 4);
emulate_single_step(regs);
return;
case -EFAULT:
_exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip);
return;
}
}
sigill:
if (reason & REASON_PRIVILEGED)
_exception(SIGILL, regs, ILL_PRVOPC, regs->nip);
else
_exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
}
DEFINE_INTERRUPT_HANDLER(program_check_exception)
{
do_program_check(regs);
}
/*
* This occurs when running in hypervisor mode on POWER6 or later
* and an illegal instruction is encountered.
*/
DEFINE_INTERRUPT_HANDLER(emulation_assist_interrupt)
{
regs_set_return_msr(regs, regs->msr | REASON_ILLEGAL);
do_program_check(regs);
}
DEFINE_INTERRUPT_HANDLER(alignment_exception)
{
int sig, code, fixed = 0;
unsigned long reason;
interrupt_cond_local_irq_enable(regs);
reason = get_reason(regs);
if (reason & REASON_BOUNDARY) {
sig = SIGBUS;
code = BUS_ADRALN;
goto bad;
}
if (tm_abort_check(regs, TM_CAUSE_ALIGNMENT | TM_CAUSE_PERSISTENT))
return;
/* we don't implement logging of alignment exceptions */
if (!(current->thread.align_ctl & PR_UNALIGN_SIGBUS))
fixed = fix_alignment(regs);
if (fixed == 1) {
/* skip over emulated instruction */
regs_add_return_ip(regs, inst_length(reason));
emulate_single_step(regs);
return;
}
/* Operand address was bad */
if (fixed == -EFAULT) {
sig = SIGSEGV;
code = SEGV_ACCERR;
} else {
sig = SIGBUS;
code = BUS_ADRALN;
}
bad:
if (user_mode(regs))
_exception(sig, regs, code, regs->dar);
else
bad_page_fault(regs, sig);
}
DEFINE_INTERRUPT_HANDLER(stack_overflow_exception)
{
die("Kernel stack overflow", regs, SIGSEGV);
}
DEFINE_INTERRUPT_HANDLER(kernel_fp_unavailable_exception)
{
printk(KERN_EMERG "Unrecoverable FP Unavailable Exception "
"%lx at %lx\n", regs->trap, regs->nip);
die("Unrecoverable FP Unavailable Exception", regs, SIGABRT);
}
DEFINE_INTERRUPT_HANDLER(altivec_unavailable_exception)
{
if (user_mode(regs)) {
/* A user program has executed an altivec instruction,
but this kernel doesn't support altivec. */
_exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
return;
}
printk(KERN_EMERG "Unrecoverable VMX/Altivec Unavailable Exception "
"%lx at %lx\n", regs->trap, regs->nip);
die("Unrecoverable VMX/Altivec Unavailable Exception", regs, SIGABRT);
}
DEFINE_INTERRUPT_HANDLER(vsx_unavailable_exception)
{
if (user_mode(regs)) {
/* A user program has executed an vsx instruction,
but this kernel doesn't support vsx. */
_exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
return;
}
printk(KERN_EMERG "Unrecoverable VSX Unavailable Exception "
"%lx at %lx\n", regs->trap, regs->nip);
die("Unrecoverable VSX Unavailable Exception", regs, SIGABRT);
}
#ifdef CONFIG_PPC_BOOK3S_64
static void tm_unavailable(struct pt_regs *regs)
{
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
if (user_mode(regs)) {
current->thread.load_tm++;
regs_set_return_msr(regs, regs->msr | MSR_TM);
tm_enable();
tm_restore_sprs(¤t->thread);
return;
}
#endif
pr_emerg("Unrecoverable TM Unavailable Exception "
"%lx at %lx\n", regs->trap, regs->nip);
die("Unrecoverable TM Unavailable Exception", regs, SIGABRT);
}
DEFINE_INTERRUPT_HANDLER(facility_unavailable_exception)
{
static char *facility_strings[] = {
[FSCR_FP_LG] = "FPU",
[FSCR_VECVSX_LG] = "VMX/VSX",
[FSCR_DSCR_LG] = "DSCR",
[FSCR_PM_LG] = "PMU SPRs",
[FSCR_BHRB_LG] = "BHRB",
[FSCR_TM_LG] = "TM",
[FSCR_EBB_LG] = "EBB",
[FSCR_TAR_LG] = "TAR",
[FSCR_MSGP_LG] = "MSGP",
[FSCR_SCV_LG] = "SCV",
[FSCR_PREFIX_LG] = "PREFIX",
};
char *facility = "unknown";
u64 value;
u32 instword, rd;
u8 status;
bool hv;
hv = (TRAP(regs) == INTERRUPT_H_FAC_UNAVAIL);
if (hv)
value = mfspr(SPRN_HFSCR);
else
value = mfspr(SPRN_FSCR);
status = value >> 56;
if ((hv || status >= 2) &&
(status < ARRAY_SIZE(facility_strings)) &&
facility_strings[status])
facility = facility_strings[status];
/* We should not have taken this interrupt in kernel */
if (!user_mode(regs)) {
pr_emerg("Facility '%s' unavailable (%d) exception in kernel mode at %lx\n",
facility, status, regs->nip);
die("Unexpected facility unavailable exception", regs, SIGABRT);
}
interrupt_cond_local_irq_enable(regs);
if (status == FSCR_DSCR_LG) {
/*
* User is accessing the DSCR register using the problem
* state only SPR number (0x03) either through a mfspr or
* a mtspr instruction. If it is a write attempt through
* a mtspr, then we set the inherit bit. This also allows
* the user to write or read the register directly in the
* future by setting via the FSCR DSCR bit. But in case it
* is a read DSCR attempt through a mfspr instruction, we
* just emulate the instruction instead. This code path will
* always emulate all the mfspr instructions till the user
* has attempted at least one mtspr instruction. This way it
* preserves the same behaviour when the user is accessing
* the DSCR through privilege level only SPR number (0x11)
* which is emulated through illegal instruction exception.
* We always leave HFSCR DSCR set.
*/
if (get_user(instword, (u32 __user *)(regs->nip))) {
pr_err("Failed to fetch the user instruction\n");
return;
}
/* Write into DSCR (mtspr 0x03, RS) */
if ((instword & PPC_INST_MTSPR_DSCR_USER_MASK)
== PPC_INST_MTSPR_DSCR_USER) {
rd = (instword >> 21) & 0x1f;
current->thread.dscr = regs->gpr[rd];
current->thread.dscr_inherit = 1;
current->thread.fscr |= FSCR_DSCR;
mtspr(SPRN_FSCR, current->thread.fscr);
}
/* Read from DSCR (mfspr RT, 0x03) */
if ((instword & PPC_INST_MFSPR_DSCR_USER_MASK)
== PPC_INST_MFSPR_DSCR_USER) {
if (emulate_instruction(regs)) {
pr_err("DSCR based mfspr emulation failed\n");
return;
}
regs_add_return_ip(regs, 4);
emulate_single_step(regs);
}
return;
}
if (status == FSCR_TM_LG) {
/*
* If we're here then the hardware is TM aware because it
* generated an exception with FSRM_TM set.
*
* If cpu_has_feature(CPU_FTR_TM) is false, then either firmware
* told us not to do TM, or the kernel is not built with TM
* support.
*
* If both of those things are true, then userspace can spam the
* console by triggering the printk() below just by continually
* doing tbegin (or any TM instruction). So in that case just
* send the process a SIGILL immediately.
*/
if (!cpu_has_feature(CPU_FTR_TM))
goto out;
tm_unavailable(regs);
return;
}
pr_err_ratelimited("%sFacility '%s' unavailable (%d), exception at 0x%lx, MSR=%lx\n",
hv ? "Hypervisor " : "", facility, status, regs->nip, regs->msr);
out:
_exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
}
#endif
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
DEFINE_INTERRUPT_HANDLER(fp_unavailable_tm)
{
/* Note: This does not handle any kind of FP laziness. */
TM_DEBUG("FP Unavailable trap whilst transactional at 0x%lx, MSR=%lx\n",
regs->nip, regs->msr);
/* We can only have got here if the task started using FP after
* beginning the transaction. So, the transactional regs are just a
* copy of the checkpointed ones. But, we still need to recheckpoint
* as we're enabling FP for the process; it will return, abort the
* transaction, and probably retry but now with FP enabled. So the
* checkpointed FP registers need to be loaded.
*/
tm_reclaim_current(TM_CAUSE_FAC_UNAV);
/*
* Reclaim initially saved out bogus (lazy) FPRs to ckfp_state, and
* then it was overwrite by the thr->fp_state by tm_reclaim_thread().
*
* At this point, ck{fp,vr}_state contains the exact values we want to
* recheckpoint.
*/
/* Enable FP for the task: */
current->thread.load_fp = 1;
/*
* Recheckpoint all the checkpointed ckpt, ck{fp, vr}_state registers.
*/
tm_recheckpoint(¤t->thread);
}
DEFINE_INTERRUPT_HANDLER(altivec_unavailable_tm)
{
/* See the comments in fp_unavailable_tm(). This function operates
* the same way.
*/
TM_DEBUG("Vector Unavailable trap whilst transactional at 0x%lx,"
"MSR=%lx\n",
regs->nip, regs->msr);
tm_reclaim_current(TM_CAUSE_FAC_UNAV);
current->thread.load_vec = 1;
tm_recheckpoint(¤t->thread);
current->thread.used_vr = 1;
}
DEFINE_INTERRUPT_HANDLER(vsx_unavailable_tm)
{
/* See the comments in fp_unavailable_tm(). This works similarly,
* though we're loading both FP and VEC registers in here.
*
* If FP isn't in use, load FP regs. If VEC isn't in use, load VEC
* regs. Either way, set MSR_VSX.
*/
TM_DEBUG("VSX Unavailable trap whilst transactional at 0x%lx,"
"MSR=%lx\n",
regs->nip, regs->msr);
current->thread.used_vsr = 1;
/* This reclaims FP and/or VR regs if they're already enabled */
tm_reclaim_current(TM_CAUSE_FAC_UNAV);
current->thread.load_vec = 1;
current->thread.load_fp = 1;
tm_recheckpoint(¤t->thread);
}
#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
#ifdef CONFIG_PPC64
DECLARE_INTERRUPT_HANDLER_NMI(performance_monitor_exception_nmi);
DEFINE_INTERRUPT_HANDLER_NMI(performance_monitor_exception_nmi)
{
__this_cpu_inc(irq_stat.pmu_irqs);
perf_irq(regs);
return 0;
}
#endif
DECLARE_INTERRUPT_HANDLER_ASYNC(performance_monitor_exception_async);
DEFINE_INTERRUPT_HANDLER_ASYNC(performance_monitor_exception_async)
{
__this_cpu_inc(irq_stat.pmu_irqs);
perf_irq(regs);
}
DEFINE_INTERRUPT_HANDLER_RAW(performance_monitor_exception)
{
/*
* On 64-bit, if perf interrupts hit in a local_irq_disable
* (soft-masked) region, we consider them as NMIs. This is required to
* prevent hash faults on user addresses when reading callchains (and
* looks better from an irq tracing perspective).
*/
if (IS_ENABLED(CONFIG_PPC64) && unlikely(arch_irq_disabled_regs(regs)))
performance_monitor_exception_nmi(regs);
else
performance_monitor_exception_async(regs);
return 0;
}
#ifdef CONFIG_PPC_ADV_DEBUG_REGS
static void handle_debug(struct pt_regs *regs, unsigned long debug_status)
{
int changed = 0;
/*
* Determine the cause of the debug event, clear the
* event flags and send a trap to the handler. Torez
*/
if (debug_status & (DBSR_DAC1R | DBSR_DAC1W)) {
dbcr_dac(current) &= ~(DBCR_DAC1R | DBCR_DAC1W);
#ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
current->thread.debug.dbcr2 &= ~DBCR2_DAC12MODE;
#endif
do_send_trap(regs, mfspr(SPRN_DAC1), debug_status,
5);
changed |= 0x01;
} else if (debug_status & (DBSR_DAC2R | DBSR_DAC2W)) {
dbcr_dac(current) &= ~(DBCR_DAC2R | DBCR_DAC2W);
do_send_trap(regs, mfspr(SPRN_DAC2), debug_status,
6);
changed |= 0x01;
} else if (debug_status & DBSR_IAC1) {
current->thread.debug.dbcr0 &= ~DBCR0_IAC1;
dbcr_iac_range(current) &= ~DBCR_IAC12MODE;
do_send_trap(regs, mfspr(SPRN_IAC1), debug_status,
1);
changed |= 0x01;
} else if (debug_status & DBSR_IAC2) {
current->thread.debug.dbcr0 &= ~DBCR0_IAC2;
do_send_trap(regs, mfspr(SPRN_IAC2), debug_status,
2);
changed |= 0x01;
} else if (debug_status & DBSR_IAC3) {
current->thread.debug.dbcr0 &= ~DBCR0_IAC3;
dbcr_iac_range(current) &= ~DBCR_IAC34MODE;
do_send_trap(regs, mfspr(SPRN_IAC3), debug_status,
3);
changed |= 0x01;
} else if (debug_status & DBSR_IAC4) {
current->thread.debug.dbcr0 &= ~DBCR0_IAC4;
do_send_trap(regs, mfspr(SPRN_IAC4), debug_status,
4);
changed |= 0x01;
}
/*
* At the point this routine was called, the MSR(DE) was turned off.
* Check all other debug flags and see if that bit needs to be turned
* back on or not.
*/
if (DBCR_ACTIVE_EVENTS(current->thread.debug.dbcr0,
current->thread.debug.dbcr1))
regs_set_return_msr(regs, regs->msr | MSR_DE);
else
/* Make sure the IDM flag is off */
current->thread.debug.dbcr0 &= ~DBCR0_IDM;
if (changed & 0x01)
mtspr(SPRN_DBCR0, current->thread.debug.dbcr0);
}
DEFINE_INTERRUPT_HANDLER(DebugException)
{
unsigned long debug_status = regs->dsisr;
current->thread.debug.dbsr = debug_status;
/* Hack alert: On BookE, Branch Taken stops on the branch itself, while
* on server, it stops on the target of the branch. In order to simulate
* the server behaviour, we thus restart right away with a single step
* instead of stopping here when hitting a BT
*/
if (debug_status & DBSR_BT) {
regs_set_return_msr(regs, regs->msr & ~MSR_DE);
/* Disable BT */
mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) & ~DBCR0_BT);
/* Clear the BT event */
mtspr(SPRN_DBSR, DBSR_BT);
/* Do the single step trick only when coming from userspace */
if (user_mode(regs)) {
current->thread.debug.dbcr0 &= ~DBCR0_BT;
current->thread.debug.dbcr0 |= DBCR0_IDM | DBCR0_IC;
regs_set_return_msr(regs, regs->msr | MSR_DE);
return;
}
if (kprobe_post_handler(regs))
return;
if (notify_die(DIE_SSTEP, "block_step", regs, 5,
5, SIGTRAP) == NOTIFY_STOP) {
return;
}
if (debugger_sstep(regs))
return;
} else if (debug_status & DBSR_IC) { /* Instruction complete */
regs_set_return_msr(regs, regs->msr & ~MSR_DE);
/* Disable instruction completion */
mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) & ~DBCR0_IC);
/* Clear the instruction completion event */
mtspr(SPRN_DBSR, DBSR_IC);
if (kprobe_post_handler(regs))
return;
if (notify_die(DIE_SSTEP, "single_step", regs, 5,
5, SIGTRAP) == NOTIFY_STOP) {
return;
}
if (debugger_sstep(regs))
return;
if (user_mode(regs)) {
current->thread.debug.dbcr0 &= ~DBCR0_IC;
if (DBCR_ACTIVE_EVENTS(current->thread.debug.dbcr0,
current->thread.debug.dbcr1))
regs_set_return_msr(regs, regs->msr | MSR_DE);
else
/* Make sure the IDM bit is off */
current->thread.debug.dbcr0 &= ~DBCR0_IDM;
}
_exception(SIGTRAP, regs, TRAP_TRACE, regs->nip);
} else
handle_debug(regs, debug_status);
}
#endif /* CONFIG_PPC_ADV_DEBUG_REGS */
#ifdef CONFIG_ALTIVEC
DEFINE_INTERRUPT_HANDLER(altivec_assist_exception)
{
int err;
if (!user_mode(regs)) {
printk(KERN_EMERG "VMX/Altivec assist exception in kernel mode"
" at %lx\n", regs->nip);
die("Kernel VMX/Altivec assist exception", regs, SIGILL);
}
flush_altivec_to_thread(current);
PPC_WARN_EMULATED(altivec, regs);
err = emulate_altivec(regs);
if (err == 0) {
regs_add_return_ip(regs, 4); /* skip emulated instruction */
emulate_single_step(regs);
return;
}
if (err == -EFAULT) {
/* got an error reading the instruction */
_exception(SIGSEGV, regs, SEGV_ACCERR, regs->nip);
} else {
/* didn't recognize the instruction */
/* XXX quick hack for now: set the non-Java bit in the VSCR */
printk_ratelimited(KERN_ERR "Unrecognized altivec instruction "
"in %s at %lx\n", current->comm, regs->nip);
current->thread.vr_state.vscr.u[3] |= 0x10000;
}
}
#endif /* CONFIG_ALTIVEC */
#ifdef CONFIG_PPC_85xx
DEFINE_INTERRUPT_HANDLER(CacheLockingException)
{
unsigned long error_code = regs->dsisr;
/* We treat cache locking instructions from the user
* as priv ops, in the future we could try to do
* something smarter
*/
if (error_code & (ESR_DLK|ESR_ILK))
_exception(SIGILL, regs, ILL_PRVOPC, regs->nip);
return;
}
#endif /* CONFIG_PPC_85xx */
#ifdef CONFIG_SPE
DEFINE_INTERRUPT_HANDLER(SPEFloatingPointException)
{
unsigned long spefscr;
int fpexc_mode;
int code = FPE_FLTUNK;
int err;
interrupt_cond_local_irq_enable(regs);
flush_spe_to_thread(current);
spefscr = current->thread.spefscr;
fpexc_mode = current->thread.fpexc_mode;
if ((spefscr & SPEFSCR_FOVF) && (fpexc_mode & PR_FP_EXC_OVF)) {
code = FPE_FLTOVF;
}
else if ((spefscr & SPEFSCR_FUNF) && (fpexc_mode & PR_FP_EXC_UND)) {
code = FPE_FLTUND;
}
else if ((spefscr & SPEFSCR_FDBZ) && (fpexc_mode & PR_FP_EXC_DIV))
code = FPE_FLTDIV;
else if ((spefscr & SPEFSCR_FINV) && (fpexc_mode & PR_FP_EXC_INV)) {
code = FPE_FLTINV;
}
else if ((spefscr & (SPEFSCR_FG | SPEFSCR_FX)) && (fpexc_mode & PR_FP_EXC_RES))
code = FPE_FLTRES;
err = do_spe_mathemu(regs);
if (err == 0) {
regs_add_return_ip(regs, 4); /* skip emulated instruction */
emulate_single_step(regs);
return;
}
if (err == -EFAULT) {
/* got an error reading the instruction */
_exception(SIGSEGV, regs, SEGV_ACCERR, regs->nip);
} else if (err == -EINVAL) {
/* didn't recognize the instruction */
printk(KERN_ERR "unrecognized spe instruction "
"in %s at %lx\n", current->comm, regs->nip);
} else {
_exception(SIGFPE, regs, code, regs->nip);
}
return;
}
DEFINE_INTERRUPT_HANDLER(SPEFloatingPointRoundException)
{
int err;
interrupt_cond_local_irq_enable(regs);
preempt_disable();
if (regs->msr & MSR_SPE)
giveup_spe(current);
preempt_enable();
regs_add_return_ip(regs, -4);
err = speround_handler(regs);
if (err == 0) {
regs_add_return_ip(regs, 4); /* skip emulated instruction */
emulate_single_step(regs);
return;
}
if (err == -EFAULT) {
/* got an error reading the instruction */
_exception(SIGSEGV, regs, SEGV_ACCERR, regs->nip);
} else if (err == -EINVAL) {
/* didn't recognize the instruction */
printk(KERN_ERR "unrecognized spe instruction "
"in %s at %lx\n", current->comm, regs->nip);
} else {
_exception(SIGFPE, regs, FPE_FLTUNK, regs->nip);
return;
}
}
#endif
/*
* We enter here if we get an unrecoverable exception, that is, one
* that happened at a point where the RI (recoverable interrupt) bit
* in the MSR is 0. This indicates that SRR0/1 are live, and that
* we therefore lost state by taking this exception.
*/
void __noreturn unrecoverable_exception(struct pt_regs *regs)
{
pr_emerg("Unrecoverable exception %lx at %lx (msr=%lx)\n",
regs->trap, regs->nip, regs->msr);
die("Unrecoverable exception", regs, SIGABRT);
/* die() should not return */
for (;;)
;
}
#if defined(CONFIG_BOOKE_WDT) || defined(CONFIG_40x)
DEFINE_INTERRUPT_HANDLER_NMI(WatchdogException)
{
printk (KERN_EMERG "PowerPC Book-E Watchdog Exception\n");
mtspr(SPRN_TCR, mfspr(SPRN_TCR) & ~TCR_WIE);
return 0;
}
#endif
/*
* We enter here if we discover during exception entry that we are
* running in supervisor mode with a userspace value in the stack pointer.
*/
DEFINE_INTERRUPT_HANDLER(kernel_bad_stack)
{
printk(KERN_EMERG "Bad kernel stack pointer %lx at %lx\n",
regs->gpr[1], regs->nip);
die("Bad kernel stack pointer", regs, SIGABRT);
}
#ifdef CONFIG_PPC_EMULATED_STATS
#define WARN_EMULATED_SETUP(type) .type = { .name = #type }
struct ppc_emulated ppc_emulated = {
#ifdef CONFIG_ALTIVEC
WARN_EMULATED_SETUP(altivec),
#endif
WARN_EMULATED_SETUP(dcba),
WARN_EMULATED_SETUP(dcbz),
WARN_EMULATED_SETUP(fp_pair),
WARN_EMULATED_SETUP(isel),
WARN_EMULATED_SETUP(mcrxr),
WARN_EMULATED_SETUP(mfpvr),
WARN_EMULATED_SETUP(multiple),
WARN_EMULATED_SETUP(popcntb),
WARN_EMULATED_SETUP(spe),
WARN_EMULATED_SETUP(string),
WARN_EMULATED_SETUP(sync),
WARN_EMULATED_SETUP(unaligned),
#ifdef CONFIG_MATH_EMULATION
WARN_EMULATED_SETUP(math),
#endif
#ifdef CONFIG_VSX
WARN_EMULATED_SETUP(vsx),
#endif
#ifdef CONFIG_PPC64
WARN_EMULATED_SETUP(mfdscr),
WARN_EMULATED_SETUP(mtdscr),
WARN_EMULATED_SETUP(lq_stq),
WARN_EMULATED_SETUP(lxvw4x),
WARN_EMULATED_SETUP(lxvh8x),
WARN_EMULATED_SETUP(lxvd2x),
WARN_EMULATED_SETUP(lxvb16x),
#endif
};
u32 ppc_warn_emulated;
void ppc_warn_emulated_print(const char *type)
{
pr_warn_ratelimited("%s used emulated %s instruction\n", current->comm,
type);
}
static int __init ppc_warn_emulated_init(void)
{
struct dentry *dir;
unsigned int i;
struct ppc_emulated_entry *entries = (void *)&ppc_emulated;
dir = debugfs_create_dir("emulated_instructions",
arch_debugfs_dir);
debugfs_create_u32("do_warn", 0644, dir, &ppc_warn_emulated);
for (i = 0; i < sizeof(ppc_emulated)/sizeof(*entries); i++)
debugfs_create_u32(entries[i].name, 0644, dir,
(u32 *)&entries[i].val.counter);
return 0;
}
device_initcall(ppc_warn_emulated_init);
#endif /* CONFIG_PPC_EMULATED_STATS */
| linux-master | arch/powerpc/kernel/traps.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Procedures for drawing on the screen early on in the boot process.
*
* Benjamin Herrenschmidt <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/init.h>
#include <linux/export.h>
#include <linux/memblock.h>
#include <linux/pgtable.h>
#include <linux/of.h>
#include <asm/sections.h>
#include <asm/btext.h>
#include <asm/page.h>
#include <asm/mmu.h>
#include <asm/io.h>
#include <asm/processor.h>
#include <asm/udbg.h>
#define NO_SCROLL
#ifndef NO_SCROLL
static void scrollscreen(void);
#endif
#define __force_data __section(".data")
static int g_loc_X __force_data;
static int g_loc_Y __force_data;
static int g_max_loc_X __force_data;
static int g_max_loc_Y __force_data;
static int dispDeviceRowBytes __force_data;
static int dispDeviceDepth __force_data;
static int dispDeviceRect[4] __force_data;
static unsigned char *dispDeviceBase __force_data;
static unsigned char *logicalDisplayBase __force_data;
unsigned long disp_BAT[2] __initdata = {0, 0};
#define cmapsz (16*256)
static unsigned char vga_font[cmapsz];
static int boot_text_mapped __force_data;
extern void rmci_on(void);
extern void rmci_off(void);
static inline void rmci_maybe_on(void)
{
#if defined(CONFIG_PPC_EARLY_DEBUG_BOOTX) && defined(CONFIG_PPC64)
if (!(mfmsr() & MSR_DR))
rmci_on();
#endif
}
static inline void rmci_maybe_off(void)
{
#if defined(CONFIG_PPC_EARLY_DEBUG_BOOTX) && defined(CONFIG_PPC64)
if (!(mfmsr() & MSR_DR))
rmci_off();
#endif
}
#ifdef CONFIG_PPC32
/* Calc BAT values for mapping the display and store them
* in disp_BAT. Those values are then used from head.S to map
* the display during identify_machine() and MMU_Init()
*
* The display is mapped to virtual address 0xD0000000, rather
* than 1:1, because some CHRP machines put the frame buffer
* in the region starting at 0xC0000000 (PAGE_OFFSET).
* This mapping is temporary and will disappear as soon as the
* setup done by MMU_Init() is applied.
*
* For now, we align the BAT and then map 8Mb on 601 and 16Mb
* on other PPCs. This may cause trouble if the framebuffer
* is really badly aligned, but I didn't encounter this case
* yet.
*/
void __init btext_prepare_BAT(void)
{
unsigned long vaddr = PAGE_OFFSET + 0x10000000;
unsigned long addr;
unsigned long lowbits;
addr = (unsigned long)dispDeviceBase;
if (!addr) {
boot_text_mapped = 0;
return;
}
lowbits = addr & ~0xFF000000UL;
addr &= 0xFF000000UL;
disp_BAT[0] = vaddr | (BL_16M<<2) | 2;
disp_BAT[1] = addr | (_PAGE_NO_CACHE | _PAGE_GUARDED | BPP_RW);
logicalDisplayBase = (void *) (vaddr + lowbits);
}
#endif
/* This function can be used to enable the early boot text when doing
* OF booting or within bootx init. It must be followed by a btext_unmap()
* call before the logical address becomes unusable
*/
void __init btext_setup_display(int width, int height, int depth, int pitch,
unsigned long address)
{
g_loc_X = 0;
g_loc_Y = 0;
g_max_loc_X = width / 8;
g_max_loc_Y = height / 16;
logicalDisplayBase = (unsigned char *)address;
dispDeviceBase = (unsigned char *)address;
dispDeviceRowBytes = pitch;
dispDeviceDepth = depth == 15 ? 16 : depth;
dispDeviceRect[0] = dispDeviceRect[1] = 0;
dispDeviceRect[2] = width;
dispDeviceRect[3] = height;
boot_text_mapped = 1;
}
void __init btext_unmap(void)
{
boot_text_mapped = 0;
}
/* Here's a small text engine to use during early boot
* or for debugging purposes
*
* todo:
*
* - build some kind of vgacon with it to enable early printk
* - move to a separate file
* - add a few video driver hooks to keep in sync with display
* changes.
*/
void btext_map(void)
{
unsigned long base, offset, size;
unsigned char *vbase;
/* By default, we are no longer mapped */
boot_text_mapped = 0;
if (!dispDeviceBase)
return;
base = ((unsigned long) dispDeviceBase) & 0xFFFFF000UL;
offset = ((unsigned long) dispDeviceBase) - base;
size = dispDeviceRowBytes * dispDeviceRect[3] + offset
+ dispDeviceRect[0];
vbase = ioremap_wc(base, size);
if (!vbase)
return;
logicalDisplayBase = vbase + offset;
boot_text_mapped = 1;
}
static int __init btext_initialize(struct device_node *np)
{
unsigned int width, height, depth, pitch;
unsigned long address = 0;
const u32 *prop;
prop = of_get_property(np, "linux,bootx-width", NULL);
if (prop == NULL)
prop = of_get_property(np, "width", NULL);
if (prop == NULL)
return -EINVAL;
width = *prop;
prop = of_get_property(np, "linux,bootx-height", NULL);
if (prop == NULL)
prop = of_get_property(np, "height", NULL);
if (prop == NULL)
return -EINVAL;
height = *prop;
prop = of_get_property(np, "linux,bootx-depth", NULL);
if (prop == NULL)
prop = of_get_property(np, "depth", NULL);
if (prop == NULL)
return -EINVAL;
depth = *prop;
pitch = width * ((depth + 7) / 8);
prop = of_get_property(np, "linux,bootx-linebytes", NULL);
if (prop == NULL)
prop = of_get_property(np, "linebytes", NULL);
if (prop && *prop != 0xffffffffu)
pitch = *prop;
if (pitch == 1)
pitch = 0x1000;
prop = of_get_property(np, "linux,bootx-addr", NULL);
if (prop == NULL)
prop = of_get_property(np, "address", NULL);
if (prop)
address = *prop;
/* FIXME: Add support for PCI reg properties. Right now, only
* reliable on macs
*/
if (address == 0)
return -EINVAL;
g_loc_X = 0;
g_loc_Y = 0;
g_max_loc_X = width / 8;
g_max_loc_Y = height / 16;
dispDeviceBase = (unsigned char *)address;
dispDeviceRowBytes = pitch;
dispDeviceDepth = depth == 15 ? 16 : depth;
dispDeviceRect[0] = dispDeviceRect[1] = 0;
dispDeviceRect[2] = width;
dispDeviceRect[3] = height;
btext_map();
return 0;
}
int __init btext_find_display(int allow_nonstdout)
{
struct device_node *np = of_stdout;
int rc = -ENODEV;
if (!of_node_is_type(np, "display")) {
printk("boot stdout isn't a display !\n");
np = NULL;
}
if (np)
rc = btext_initialize(np);
if (rc == 0 || !allow_nonstdout)
return rc;
for_each_node_by_type(np, "display") {
if (of_property_read_bool(np, "linux,opened")) {
printk("trying %pOF ...\n", np);
rc = btext_initialize(np);
printk("result: %d\n", rc);
}
if (rc == 0) {
of_node_put(np);
break;
}
}
return rc;
}
/* Calc the base address of a given point (x,y) */
static unsigned char * calc_base(int x, int y)
{
unsigned char *base;
base = logicalDisplayBase;
if (!base)
base = dispDeviceBase;
base += (x + dispDeviceRect[0]) * (dispDeviceDepth >> 3);
base += (y + dispDeviceRect[1]) * dispDeviceRowBytes;
return base;
}
/* Adjust the display to a new resolution */
void btext_update_display(unsigned long phys, int width, int height,
int depth, int pitch)
{
if (!dispDeviceBase)
return;
/* check it's the same frame buffer (within 256MB) */
if ((phys ^ (unsigned long)dispDeviceBase) & 0xf0000000)
return;
dispDeviceBase = (__u8 *) phys;
dispDeviceRect[0] = 0;
dispDeviceRect[1] = 0;
dispDeviceRect[2] = width;
dispDeviceRect[3] = height;
dispDeviceDepth = depth;
dispDeviceRowBytes = pitch;
if (boot_text_mapped) {
iounmap(logicalDisplayBase);
boot_text_mapped = 0;
}
btext_map();
g_loc_X = 0;
g_loc_Y = 0;
g_max_loc_X = width / 8;
g_max_loc_Y = height / 16;
}
EXPORT_SYMBOL(btext_update_display);
void __init btext_clearscreen(void)
{
unsigned int *base = (unsigned int *)calc_base(0, 0);
unsigned long width = ((dispDeviceRect[2] - dispDeviceRect[0]) *
(dispDeviceDepth >> 3)) >> 2;
int i,j;
rmci_maybe_on();
for (i=0; i<(dispDeviceRect[3] - dispDeviceRect[1]); i++)
{
unsigned int *ptr = base;
for(j=width; j; --j)
*(ptr++) = 0;
base += (dispDeviceRowBytes >> 2);
}
rmci_maybe_off();
}
void __init btext_flushscreen(void)
{
unsigned int *base = (unsigned int *)calc_base(0, 0);
unsigned long width = ((dispDeviceRect[2] - dispDeviceRect[0]) *
(dispDeviceDepth >> 3)) >> 2;
int i,j;
for (i=0; i < (dispDeviceRect[3] - dispDeviceRect[1]); i++)
{
unsigned int *ptr = base;
for(j = width; j > 0; j -= 8) {
__asm__ __volatile__ ("dcbst 0,%0" :: "r" (ptr));
ptr += 8;
}
base += (dispDeviceRowBytes >> 2);
}
__asm__ __volatile__ ("sync" ::: "memory");
}
void __init btext_flushline(void)
{
unsigned int *base = (unsigned int *)calc_base(0, g_loc_Y << 4);
unsigned long width = ((dispDeviceRect[2] - dispDeviceRect[0]) *
(dispDeviceDepth >> 3)) >> 2;
int i,j;
for (i=0; i < 16; i++)
{
unsigned int *ptr = base;
for(j = width; j > 0; j -= 8) {
__asm__ __volatile__ ("dcbst 0,%0" :: "r" (ptr));
ptr += 8;
}
base += (dispDeviceRowBytes >> 2);
}
__asm__ __volatile__ ("sync" ::: "memory");
}
#ifndef NO_SCROLL
static void scrollscreen(void)
{
unsigned int *src = (unsigned int *)calc_base(0,16);
unsigned int *dst = (unsigned int *)calc_base(0,0);
unsigned long width = ((dispDeviceRect[2] - dispDeviceRect[0]) *
(dispDeviceDepth >> 3)) >> 2;
int i,j;
rmci_maybe_on();
for (i=0; i<(dispDeviceRect[3] - dispDeviceRect[1] - 16); i++)
{
unsigned int *src_ptr = src;
unsigned int *dst_ptr = dst;
for(j=width; j; --j)
*(dst_ptr++) = *(src_ptr++);
src += (dispDeviceRowBytes >> 2);
dst += (dispDeviceRowBytes >> 2);
}
for (i=0; i<16; i++)
{
unsigned int *dst_ptr = dst;
for(j=width; j; --j)
*(dst_ptr++) = 0;
dst += (dispDeviceRowBytes >> 2);
}
rmci_maybe_off();
}
#endif /* ndef NO_SCROLL */
static unsigned int expand_bits_8[16] = {
0x00000000,
0x000000ff,
0x0000ff00,
0x0000ffff,
0x00ff0000,
0x00ff00ff,
0x00ffff00,
0x00ffffff,
0xff000000,
0xff0000ff,
0xff00ff00,
0xff00ffff,
0xffff0000,
0xffff00ff,
0xffffff00,
0xffffffff
};
static unsigned int expand_bits_16[4] = {
0x00000000,
0x0000ffff,
0xffff0000,
0xffffffff
};
static void draw_byte_32(unsigned char *font, unsigned int *base, int rb)
{
int l, bits;
int fg = 0xFFFFFFFFUL;
int bg = 0x00000000UL;
for (l = 0; l < 16; ++l)
{
bits = *font++;
base[0] = (-(bits >> 7) & fg) ^ bg;
base[1] = (-((bits >> 6) & 1) & fg) ^ bg;
base[2] = (-((bits >> 5) & 1) & fg) ^ bg;
base[3] = (-((bits >> 4) & 1) & fg) ^ bg;
base[4] = (-((bits >> 3) & 1) & fg) ^ bg;
base[5] = (-((bits >> 2) & 1) & fg) ^ bg;
base[6] = (-((bits >> 1) & 1) & fg) ^ bg;
base[7] = (-(bits & 1) & fg) ^ bg;
base = (unsigned int *) ((char *)base + rb);
}
}
static inline void draw_byte_16(unsigned char *font, unsigned int *base, int rb)
{
int l, bits;
int fg = 0xFFFFFFFFUL;
int bg = 0x00000000UL;
unsigned int *eb = (int *)expand_bits_16;
for (l = 0; l < 16; ++l)
{
bits = *font++;
base[0] = (eb[bits >> 6] & fg) ^ bg;
base[1] = (eb[(bits >> 4) & 3] & fg) ^ bg;
base[2] = (eb[(bits >> 2) & 3] & fg) ^ bg;
base[3] = (eb[bits & 3] & fg) ^ bg;
base = (unsigned int *) ((char *)base + rb);
}
}
static inline void draw_byte_8(unsigned char *font, unsigned int *base, int rb)
{
int l, bits;
int fg = 0x0F0F0F0FUL;
int bg = 0x00000000UL;
unsigned int *eb = (int *)expand_bits_8;
for (l = 0; l < 16; ++l)
{
bits = *font++;
base[0] = (eb[bits >> 4] & fg) ^ bg;
base[1] = (eb[bits & 0xf] & fg) ^ bg;
base = (unsigned int *) ((char *)base + rb);
}
}
static noinline void draw_byte(unsigned char c, long locX, long locY)
{
unsigned char *base = calc_base(locX << 3, locY << 4);
unsigned char *font = &vga_font[((unsigned int)c) * 16];
int rb = dispDeviceRowBytes;
rmci_maybe_on();
switch(dispDeviceDepth) {
case 24:
case 32:
draw_byte_32(font, (unsigned int *)base, rb);
break;
case 15:
case 16:
draw_byte_16(font, (unsigned int *)base, rb);
break;
case 8:
draw_byte_8(font, (unsigned int *)base, rb);
break;
}
rmci_maybe_off();
}
void btext_drawchar(char c)
{
int cline = 0;
#ifdef NO_SCROLL
int x;
#endif
if (!boot_text_mapped)
return;
switch (c) {
case '\b':
if (g_loc_X > 0)
--g_loc_X;
break;
case '\t':
g_loc_X = (g_loc_X & -8) + 8;
break;
case '\r':
g_loc_X = 0;
break;
case '\n':
g_loc_X = 0;
g_loc_Y++;
cline = 1;
break;
default:
draw_byte(c, g_loc_X++, g_loc_Y);
}
if (g_loc_X >= g_max_loc_X) {
g_loc_X = 0;
g_loc_Y++;
cline = 1;
}
#ifndef NO_SCROLL
while (g_loc_Y >= g_max_loc_Y) {
scrollscreen();
g_loc_Y--;
}
#else
/* wrap around from bottom to top of screen so we don't
waste time scrolling each line. -- paulus. */
if (g_loc_Y >= g_max_loc_Y)
g_loc_Y = 0;
if (cline) {
for (x = 0; x < g_max_loc_X; ++x)
draw_byte(' ', x, g_loc_Y);
}
#endif
}
void btext_drawstring(const char *c)
{
if (!boot_text_mapped)
return;
while (*c)
btext_drawchar(*c++);
}
void __init btext_drawtext(const char *c, unsigned int len)
{
if (!boot_text_mapped)
return;
while (len--)
btext_drawchar(*c++);
}
void __init btext_drawhex(unsigned long v)
{
if (!boot_text_mapped)
return;
#ifdef CONFIG_PPC64
btext_drawchar(hex_asc_hi(v >> 56));
btext_drawchar(hex_asc_lo(v >> 56));
btext_drawchar(hex_asc_hi(v >> 48));
btext_drawchar(hex_asc_lo(v >> 48));
btext_drawchar(hex_asc_hi(v >> 40));
btext_drawchar(hex_asc_lo(v >> 40));
btext_drawchar(hex_asc_hi(v >> 32));
btext_drawchar(hex_asc_lo(v >> 32));
#endif
btext_drawchar(hex_asc_hi(v >> 24));
btext_drawchar(hex_asc_lo(v >> 24));
btext_drawchar(hex_asc_hi(v >> 16));
btext_drawchar(hex_asc_lo(v >> 16));
btext_drawchar(hex_asc_hi(v >> 8));
btext_drawchar(hex_asc_lo(v >> 8));
btext_drawchar(hex_asc_hi(v));
btext_drawchar(hex_asc_lo(v));
btext_drawchar(' ');
}
void __init udbg_init_btext(void)
{
/* If btext is enabled, we might have a BAT setup for early display,
* thus we do enable some very basic udbg output
*/
udbg_putc = btext_drawchar;
}
static unsigned char vga_font[cmapsz] = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7e, 0x81, 0xa5, 0x81, 0x81, 0xbd,
0x99, 0x81, 0x81, 0x7e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7e, 0xff,
0xdb, 0xff, 0xff, 0xc3, 0xe7, 0xff, 0xff, 0x7e, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x6c, 0xfe, 0xfe, 0xfe, 0xfe, 0x7c, 0x38, 0x10,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x38, 0x7c, 0xfe,
0x7c, 0x38, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18,
0x3c, 0x3c, 0xe7, 0xe7, 0xe7, 0x18, 0x18, 0x3c, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x18, 0x3c, 0x7e, 0xff, 0xff, 0x7e, 0x18, 0x18, 0x3c,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x3c,
0x3c, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xe7, 0xc3, 0xc3, 0xe7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0x00, 0x00, 0x00, 0x00, 0x00, 0x3c, 0x66, 0x42, 0x42, 0x66, 0x3c, 0x00,
0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc3, 0x99, 0xbd,
0xbd, 0x99, 0xc3, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x1e, 0x0e,
0x1a, 0x32, 0x78, 0xcc, 0xcc, 0xcc, 0xcc, 0x78, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x3c, 0x66, 0x66, 0x66, 0x66, 0x3c, 0x18, 0x7e, 0x18, 0x18,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3f, 0x33, 0x3f, 0x30, 0x30, 0x30,
0x30, 0x70, 0xf0, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7f, 0x63,
0x7f, 0x63, 0x63, 0x63, 0x63, 0x67, 0xe7, 0xe6, 0xc0, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x18, 0x18, 0xdb, 0x3c, 0xe7, 0x3c, 0xdb, 0x18, 0x18,
0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0xc0, 0xe0, 0xf0, 0xf8, 0xfe, 0xf8,
0xf0, 0xe0, 0xc0, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x06, 0x0e,
0x1e, 0x3e, 0xfe, 0x3e, 0x1e, 0x0e, 0x06, 0x02, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x18, 0x3c, 0x7e, 0x18, 0x18, 0x18, 0x7e, 0x3c, 0x18, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66,
0x66, 0x00, 0x66, 0x66, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7f, 0xdb,
0xdb, 0xdb, 0x7b, 0x1b, 0x1b, 0x1b, 0x1b, 0x1b, 0x00, 0x00, 0x00, 0x00,
0x00, 0x7c, 0xc6, 0x60, 0x38, 0x6c, 0xc6, 0xc6, 0x6c, 0x38, 0x0c, 0xc6,
0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0xfe, 0xfe, 0xfe, 0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x3c,
0x7e, 0x18, 0x18, 0x18, 0x7e, 0x3c, 0x18, 0x7e, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x18, 0x3c, 0x7e, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
0x18, 0x7e, 0x3c, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x18, 0x0c, 0xfe, 0x0c, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x30, 0x60, 0xfe, 0x60, 0x30, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc0, 0xc0,
0xc0, 0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x24, 0x66, 0xff, 0x66, 0x24, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x10, 0x38, 0x38, 0x7c, 0x7c, 0xfe, 0xfe, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0xfe, 0x7c, 0x7c,
0x38, 0x38, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x18, 0x3c, 0x3c, 0x3c, 0x18, 0x18, 0x18, 0x00, 0x18, 0x18,
0x00, 0x00, 0x00, 0x00, 0x00, 0x66, 0x66, 0x66, 0x24, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x6c,
0x6c, 0xfe, 0x6c, 0x6c, 0x6c, 0xfe, 0x6c, 0x6c, 0x00, 0x00, 0x00, 0x00,
0x18, 0x18, 0x7c, 0xc6, 0xc2, 0xc0, 0x7c, 0x06, 0x06, 0x86, 0xc6, 0x7c,
0x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc2, 0xc6, 0x0c, 0x18,
0x30, 0x60, 0xc6, 0x86, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x38, 0x6c,
0x6c, 0x38, 0x76, 0xdc, 0xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00,
0x00, 0x30, 0x30, 0x30, 0x60, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0c, 0x18, 0x30, 0x30, 0x30, 0x30,
0x30, 0x30, 0x18, 0x0c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x30, 0x18,
0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x18, 0x30, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x66, 0x3c, 0xff, 0x3c, 0x66, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x7e,
0x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x18, 0x30, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7e, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x02, 0x06, 0x0c, 0x18, 0x30, 0x60, 0xc0, 0x80, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x7c, 0xc6, 0xc6, 0xce, 0xde, 0xf6, 0xe6, 0xc6, 0xc6, 0x7c,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x38, 0x78, 0x18, 0x18, 0x18,
0x18, 0x18, 0x18, 0x7e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0xc6,
0x06, 0x0c, 0x18, 0x30, 0x60, 0xc0, 0xc6, 0xfe, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x7c, 0xc6, 0x06, 0x06, 0x3c, 0x06, 0x06, 0x06, 0xc6, 0x7c,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0c, 0x1c, 0x3c, 0x6c, 0xcc, 0xfe,
0x0c, 0x0c, 0x0c, 0x1e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0xc0,
0xc0, 0xc0, 0xfc, 0x06, 0x06, 0x06, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x38, 0x60, 0xc0, 0xc0, 0xfc, 0xc6, 0xc6, 0xc6, 0xc6, 0x7c,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0xc6, 0x06, 0x06, 0x0c, 0x18,
0x30, 0x30, 0x30, 0x30, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0xc6,
0xc6, 0xc6, 0x7c, 0xc6, 0xc6, 0xc6, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0x7e, 0x06, 0x06, 0x06, 0x0c, 0x78,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x00, 0x00,
0x00, 0x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x18, 0x18, 0x00, 0x00, 0x00, 0x18, 0x18, 0x30, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x06, 0x0c, 0x18, 0x30, 0x60, 0x30, 0x18, 0x0c, 0x06,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7e, 0x00, 0x00,
0x7e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x60,
0x30, 0x18, 0x0c, 0x06, 0x0c, 0x18, 0x30, 0x60, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x7c, 0xc6, 0xc6, 0x0c, 0x18, 0x18, 0x18, 0x00, 0x18, 0x18,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0xde, 0xde,
0xde, 0xdc, 0xc0, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x38,
0x6c, 0xc6, 0xc6, 0xfe, 0xc6, 0xc6, 0xc6, 0xc6, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0xfc, 0x66, 0x66, 0x66, 0x7c, 0x66, 0x66, 0x66, 0x66, 0xfc,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3c, 0x66, 0xc2, 0xc0, 0xc0, 0xc0,
0xc0, 0xc2, 0x66, 0x3c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf8, 0x6c,
0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x6c, 0xf8, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0xfe, 0x66, 0x62, 0x68, 0x78, 0x68, 0x60, 0x62, 0x66, 0xfe,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0x66, 0x62, 0x68, 0x78, 0x68,
0x60, 0x60, 0x60, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3c, 0x66,
0xc2, 0xc0, 0xc0, 0xde, 0xc6, 0xc6, 0x66, 0x3a, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0xc6, 0xc6, 0xc6, 0xc6, 0xfe, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3c, 0x18, 0x18, 0x18, 0x18, 0x18,
0x18, 0x18, 0x18, 0x3c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1e, 0x0c,
0x0c, 0x0c, 0x0c, 0x0c, 0xcc, 0xcc, 0xcc, 0x78, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0xe6, 0x66, 0x66, 0x6c, 0x78, 0x78, 0x6c, 0x66, 0x66, 0xe6,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf0, 0x60, 0x60, 0x60, 0x60, 0x60,
0x60, 0x62, 0x66, 0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc3, 0xe7,
0xff, 0xff, 0xdb, 0xc3, 0xc3, 0xc3, 0xc3, 0xc3, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0xc6, 0xe6, 0xf6, 0xfe, 0xde, 0xce, 0xc6, 0xc6, 0xc6, 0xc6,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6,
0xc6, 0xc6, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfc, 0x66,
0x66, 0x66, 0x7c, 0x60, 0x60, 0x60, 0x60, 0xf0, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xd6, 0xde, 0x7c,
0x0c, 0x0e, 0x00, 0x00, 0x00, 0x00, 0xfc, 0x66, 0x66, 0x66, 0x7c, 0x6c,
0x66, 0x66, 0x66, 0xe6, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0xc6,
0xc6, 0x60, 0x38, 0x0c, 0x06, 0xc6, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0xff, 0xdb, 0x99, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x3c,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6,
0xc6, 0xc6, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc3, 0xc3,
0xc3, 0xc3, 0xc3, 0xc3, 0xc3, 0x66, 0x3c, 0x18, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0xc3, 0xc3, 0xc3, 0xc3, 0xc3, 0xdb, 0xdb, 0xff, 0x66, 0x66,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc3, 0xc3, 0x66, 0x3c, 0x18, 0x18,
0x3c, 0x66, 0xc3, 0xc3, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc3, 0xc3,
0xc3, 0x66, 0x3c, 0x18, 0x18, 0x18, 0x18, 0x3c, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0xff, 0xc3, 0x86, 0x0c, 0x18, 0x30, 0x60, 0xc1, 0xc3, 0xff,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3c, 0x30, 0x30, 0x30, 0x30, 0x30,
0x30, 0x30, 0x30, 0x3c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80,
0xc0, 0xe0, 0x70, 0x38, 0x1c, 0x0e, 0x06, 0x02, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x3c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x3c,
0x00, 0x00, 0x00, 0x00, 0x10, 0x38, 0x6c, 0xc6, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0x00, 0x00,
0x30, 0x30, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x78, 0x0c, 0x7c,
0xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xe0, 0x60,
0x60, 0x78, 0x6c, 0x66, 0x66, 0x66, 0x66, 0x7c, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0xc6, 0xc0, 0xc0, 0xc0, 0xc6, 0x7c,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1c, 0x0c, 0x0c, 0x3c, 0x6c, 0xcc,
0xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x7c, 0xc6, 0xfe, 0xc0, 0xc0, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x38, 0x6c, 0x64, 0x60, 0xf0, 0x60, 0x60, 0x60, 0x60, 0xf0,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x76, 0xcc, 0xcc,
0xcc, 0xcc, 0xcc, 0x7c, 0x0c, 0xcc, 0x78, 0x00, 0x00, 0x00, 0xe0, 0x60,
0x60, 0x6c, 0x76, 0x66, 0x66, 0x66, 0x66, 0xe6, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x18, 0x18, 0x00, 0x38, 0x18, 0x18, 0x18, 0x18, 0x18, 0x3c,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, 0x06, 0x00, 0x0e, 0x06, 0x06,
0x06, 0x06, 0x06, 0x06, 0x66, 0x66, 0x3c, 0x00, 0x00, 0x00, 0xe0, 0x60,
0x60, 0x66, 0x6c, 0x78, 0x78, 0x6c, 0x66, 0xe6, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x38, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x3c,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xe6, 0xff, 0xdb,
0xdb, 0xdb, 0xdb, 0xdb, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0xdc, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0x7c,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xdc, 0x66, 0x66,
0x66, 0x66, 0x66, 0x7c, 0x60, 0x60, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x76, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0x7c, 0x0c, 0x0c, 0x1e, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0xdc, 0x76, 0x66, 0x60, 0x60, 0x60, 0xf0,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0xc6, 0x60,
0x38, 0x0c, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x30,
0x30, 0xfc, 0x30, 0x30, 0x30, 0x30, 0x36, 0x1c, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0x76,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc3, 0xc3, 0xc3,
0xc3, 0x66, 0x3c, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0xc3, 0xc3, 0xc3, 0xdb, 0xdb, 0xff, 0x66, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0xc3, 0x66, 0x3c, 0x18, 0x3c, 0x66, 0xc3,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc6, 0xc6, 0xc6,
0xc6, 0xc6, 0xc6, 0x7e, 0x06, 0x0c, 0xf8, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0xfe, 0xcc, 0x18, 0x30, 0x60, 0xc6, 0xfe, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x0e, 0x18, 0x18, 0x18, 0x70, 0x18, 0x18, 0x18, 0x18, 0x0e,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x18, 0x18, 0x00, 0x18,
0x18, 0x18, 0x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x70, 0x18,
0x18, 0x18, 0x0e, 0x18, 0x18, 0x18, 0x18, 0x70, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x76, 0xdc, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x38, 0x6c, 0xc6,
0xc6, 0xc6, 0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3c, 0x66,
0xc2, 0xc0, 0xc0, 0xc0, 0xc2, 0x66, 0x3c, 0x0c, 0x06, 0x7c, 0x00, 0x00,
0x00, 0x00, 0xcc, 0x00, 0x00, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0x76,
0x00, 0x00, 0x00, 0x00, 0x00, 0x0c, 0x18, 0x30, 0x00, 0x7c, 0xc6, 0xfe,
0xc0, 0xc0, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x38, 0x6c,
0x00, 0x78, 0x0c, 0x7c, 0xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0xcc, 0x00, 0x00, 0x78, 0x0c, 0x7c, 0xcc, 0xcc, 0xcc, 0x76,
0x00, 0x00, 0x00, 0x00, 0x00, 0x60, 0x30, 0x18, 0x00, 0x78, 0x0c, 0x7c,
0xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x38, 0x6c, 0x38,
0x00, 0x78, 0x0c, 0x7c, 0xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x3c, 0x66, 0x60, 0x60, 0x66, 0x3c, 0x0c, 0x06,
0x3c, 0x00, 0x00, 0x00, 0x00, 0x10, 0x38, 0x6c, 0x00, 0x7c, 0xc6, 0xfe,
0xc0, 0xc0, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc6, 0x00,
0x00, 0x7c, 0xc6, 0xfe, 0xc0, 0xc0, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00,
0x00, 0x60, 0x30, 0x18, 0x00, 0x7c, 0xc6, 0xfe, 0xc0, 0xc0, 0xc6, 0x7c,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x66, 0x00, 0x00, 0x38, 0x18, 0x18,
0x18, 0x18, 0x18, 0x3c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x3c, 0x66,
0x00, 0x38, 0x18, 0x18, 0x18, 0x18, 0x18, 0x3c, 0x00, 0x00, 0x00, 0x00,
0x00, 0x60, 0x30, 0x18, 0x00, 0x38, 0x18, 0x18, 0x18, 0x18, 0x18, 0x3c,
0x00, 0x00, 0x00, 0x00, 0x00, 0xc6, 0x00, 0x10, 0x38, 0x6c, 0xc6, 0xc6,
0xfe, 0xc6, 0xc6, 0xc6, 0x00, 0x00, 0x00, 0x00, 0x38, 0x6c, 0x38, 0x00,
0x38, 0x6c, 0xc6, 0xc6, 0xfe, 0xc6, 0xc6, 0xc6, 0x00, 0x00, 0x00, 0x00,
0x18, 0x30, 0x60, 0x00, 0xfe, 0x66, 0x60, 0x7c, 0x60, 0x60, 0x66, 0xfe,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x6e, 0x3b, 0x1b,
0x7e, 0xd8, 0xdc, 0x77, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3e, 0x6c,
0xcc, 0xcc, 0xfe, 0xcc, 0xcc, 0xcc, 0xcc, 0xce, 0x00, 0x00, 0x00, 0x00,
0x00, 0x10, 0x38, 0x6c, 0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0x7c,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc6, 0x00, 0x00, 0x7c, 0xc6, 0xc6,
0xc6, 0xc6, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x60, 0x30, 0x18,
0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00,
0x00, 0x30, 0x78, 0xcc, 0x00, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0x76,
0x00, 0x00, 0x00, 0x00, 0x00, 0x60, 0x30, 0x18, 0x00, 0xcc, 0xcc, 0xcc,
0xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc6, 0x00,
0x00, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0x7e, 0x06, 0x0c, 0x78, 0x00,
0x00, 0xc6, 0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0x7c,
0x00, 0x00, 0x00, 0x00, 0x00, 0xc6, 0x00, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6,
0xc6, 0xc6, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x7e,
0xc3, 0xc0, 0xc0, 0xc0, 0xc3, 0x7e, 0x18, 0x18, 0x00, 0x00, 0x00, 0x00,
0x00, 0x38, 0x6c, 0x64, 0x60, 0xf0, 0x60, 0x60, 0x60, 0x60, 0xe6, 0xfc,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc3, 0x66, 0x3c, 0x18, 0xff, 0x18,
0xff, 0x18, 0x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfc, 0x66, 0x66,
0x7c, 0x62, 0x66, 0x6f, 0x66, 0x66, 0x66, 0xf3, 0x00, 0x00, 0x00, 0x00,
0x00, 0x0e, 0x1b, 0x18, 0x18, 0x18, 0x7e, 0x18, 0x18, 0x18, 0x18, 0x18,
0xd8, 0x70, 0x00, 0x00, 0x00, 0x18, 0x30, 0x60, 0x00, 0x78, 0x0c, 0x7c,
0xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0c, 0x18, 0x30,
0x00, 0x38, 0x18, 0x18, 0x18, 0x18, 0x18, 0x3c, 0x00, 0x00, 0x00, 0x00,
0x00, 0x18, 0x30, 0x60, 0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0x7c,
0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x30, 0x60, 0x00, 0xcc, 0xcc, 0xcc,
0xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x76, 0xdc,
0x00, 0xdc, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x00, 0x00, 0x00, 0x00,
0x76, 0xdc, 0x00, 0xc6, 0xe6, 0xf6, 0xfe, 0xde, 0xce, 0xc6, 0xc6, 0xc6,
0x00, 0x00, 0x00, 0x00, 0x00, 0x3c, 0x6c, 0x6c, 0x3e, 0x00, 0x7e, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x38, 0x6c, 0x6c,
0x38, 0x00, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x30, 0x30, 0x00, 0x30, 0x30, 0x60, 0xc0, 0xc6, 0xc6, 0x7c,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0xc0,
0xc0, 0xc0, 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0xfe, 0x06, 0x06, 0x06, 0x06, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0xc0, 0xc0, 0xc2, 0xc6, 0xcc, 0x18, 0x30, 0x60, 0xce, 0x9b, 0x06,
0x0c, 0x1f, 0x00, 0x00, 0x00, 0xc0, 0xc0, 0xc2, 0xc6, 0xcc, 0x18, 0x30,
0x66, 0xce, 0x96, 0x3e, 0x06, 0x06, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18,
0x00, 0x18, 0x18, 0x18, 0x3c, 0x3c, 0x3c, 0x18, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x36, 0x6c, 0xd8, 0x6c, 0x36, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xd8, 0x6c, 0x36,
0x6c, 0xd8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x11, 0x44, 0x11, 0x44,
0x11, 0x44, 0x11, 0x44, 0x11, 0x44, 0x11, 0x44, 0x11, 0x44, 0x11, 0x44,
0x55, 0xaa, 0x55, 0xaa, 0x55, 0xaa, 0x55, 0xaa, 0x55, 0xaa, 0x55, 0xaa,
0x55, 0xaa, 0x55, 0xaa, 0xdd, 0x77, 0xdd, 0x77, 0xdd, 0x77, 0xdd, 0x77,
0xdd, 0x77, 0xdd, 0x77, 0xdd, 0x77, 0xdd, 0x77, 0x18, 0x18, 0x18, 0x18,
0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0xf8, 0x18, 0x18, 0x18, 0x18,
0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0xf8, 0x18, 0xf8,
0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x36, 0x36, 0x36, 0x36,
0x36, 0x36, 0x36, 0xf6, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0x36, 0x36, 0x36, 0x36,
0x36, 0x36, 0x36, 0x36, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf8, 0x18, 0xf8,
0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x36, 0x36, 0x36, 0x36,
0x36, 0xf6, 0x06, 0xf6, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
0x36, 0x36, 0x36, 0x36, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0x06, 0xf6,
0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
0x36, 0xf6, 0x06, 0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0xfe, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x18, 0x18, 0x18, 0xf8, 0x18, 0xf8,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0xf8, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x1f, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0xff,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0xff, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x1f, 0x18, 0x18, 0x18, 0x18,
0x18, 0x18, 0x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x18, 0x18,
0x18, 0x18, 0x18, 0xff, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
0x18, 0x18, 0x18, 0x18, 0x18, 0x1f, 0x18, 0x1f, 0x18, 0x18, 0x18, 0x18,
0x18, 0x18, 0x18, 0x18, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x37,
0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
0x36, 0x37, 0x30, 0x3f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x3f, 0x30, 0x37, 0x36, 0x36, 0x36, 0x36,
0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0xf7, 0x00, 0xff,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0xff, 0x00, 0xf7, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
0x36, 0x36, 0x36, 0x36, 0x36, 0x37, 0x30, 0x37, 0x36, 0x36, 0x36, 0x36,
0x36, 0x36, 0x36, 0x36, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0x00, 0xff,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x36, 0x36, 0x36, 0x36,
0x36, 0xf7, 0x00, 0xf7, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
0x18, 0x18, 0x18, 0x18, 0x18, 0xff, 0x00, 0xff, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0xff,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0xff, 0x00, 0xff, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0x36, 0x36, 0x36, 0x36,
0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x3f,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x18, 0x18,
0x18, 0x1f, 0x18, 0x1f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x1f, 0x18, 0x1f, 0x18, 0x18, 0x18, 0x18,
0x18, 0x18, 0x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3f,
0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
0x36, 0x36, 0x36, 0xff, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
0x18, 0x18, 0x18, 0x18, 0x18, 0xff, 0x18, 0xff, 0x18, 0x18, 0x18, 0x18,
0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0xf8,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x1f, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf0, 0xf0, 0xf0, 0xf0,
0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0,
0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f,
0x0f, 0x0f, 0x0f, 0x0f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x76, 0xdc, 0xd8, 0xd8, 0xd8, 0xdc, 0x76, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x78, 0xcc, 0xcc, 0xcc, 0xd8, 0xcc, 0xc6, 0xc6, 0xc6, 0xcc,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0xc6, 0xc6, 0xc0, 0xc0, 0xc0,
0xc0, 0xc0, 0xc0, 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0xfe, 0x6c, 0x6c, 0x6c, 0x6c, 0x6c, 0x6c, 0x6c, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0xfe, 0xc6, 0x60, 0x30, 0x18, 0x30, 0x60, 0xc6, 0xfe,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7e, 0xd8, 0xd8,
0xd8, 0xd8, 0xd8, 0x70, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x66, 0x66, 0x66, 0x66, 0x66, 0x7c, 0x60, 0x60, 0xc0, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x76, 0xdc, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7e, 0x18, 0x3c, 0x66, 0x66,
0x66, 0x3c, 0x18, 0x7e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x38,
0x6c, 0xc6, 0xc6, 0xfe, 0xc6, 0xc6, 0x6c, 0x38, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x38, 0x6c, 0xc6, 0xc6, 0xc6, 0x6c, 0x6c, 0x6c, 0x6c, 0xee,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1e, 0x30, 0x18, 0x0c, 0x3e, 0x66,
0x66, 0x66, 0x66, 0x3c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x7e, 0xdb, 0xdb, 0xdb, 0x7e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x03, 0x06, 0x7e, 0xdb, 0xdb, 0xf3, 0x7e, 0x60, 0xc0,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1c, 0x30, 0x60, 0x60, 0x7c, 0x60,
0x60, 0x60, 0x30, 0x1c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7c,
0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0xfe, 0x00, 0x00, 0xfe, 0x00, 0x00, 0xfe, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x7e, 0x18,
0x18, 0x00, 0x00, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x30,
0x18, 0x0c, 0x06, 0x0c, 0x18, 0x30, 0x00, 0x7e, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x0c, 0x18, 0x30, 0x60, 0x30, 0x18, 0x0c, 0x00, 0x7e,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0e, 0x1b, 0x1b, 0x1b, 0x18, 0x18,
0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
0x18, 0x18, 0x18, 0x18, 0xd8, 0xd8, 0xd8, 0x70, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x00, 0x7e, 0x00, 0x18, 0x18, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x76, 0xdc, 0x00,
0x76, 0xdc, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x38, 0x6c, 0x6c,
0x38, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0f, 0x0c, 0x0c,
0x0c, 0x0c, 0x0c, 0xec, 0x6c, 0x6c, 0x3c, 0x1c, 0x00, 0x00, 0x00, 0x00,
0x00, 0xd8, 0x6c, 0x6c, 0x6c, 0x6c, 0x6c, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x70, 0xd8, 0x30, 0x60, 0xc8, 0xf8, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x7c, 0x7c, 0x7c, 0x7c, 0x7c, 0x7c, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
};
| linux-master | arch/powerpc/kernel/btext.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2019 IBM Corporation
* Author: Nayna Jain
*/
#include <linux/ima.h>
#include <asm/secure_boot.h>
bool arch_ima_get_secureboot(void)
{
return is_ppc_secureboot_enabled();
}
/*
* The "secure_rules" are enabled only on "secureboot" enabled systems.
* These rules verify the file signatures against known good values.
* The "appraise_type=imasig|modsig" option allows the known good signature
* to be stored as an xattr or as an appended signature.
*
* To avoid duplicate signature verification as much as possible, the IMA
* policy rule for module appraisal is added only if CONFIG_MODULE_SIG
* is not enabled.
*/
static const char *const secure_rules[] = {
"appraise func=KEXEC_KERNEL_CHECK appraise_type=imasig|modsig",
#ifndef CONFIG_MODULE_SIG
"appraise func=MODULE_CHECK appraise_type=imasig|modsig",
#endif
NULL
};
/*
* The "trusted_rules" are enabled only on "trustedboot" enabled systems.
* These rules add the kexec kernel image and kernel modules file hashes to
* the IMA measurement list.
*/
static const char *const trusted_rules[] = {
"measure func=KEXEC_KERNEL_CHECK",
"measure func=MODULE_CHECK",
NULL
};
/*
* The "secure_and_trusted_rules" contains rules for both the secure boot and
* trusted boot. The "template=ima-modsig" option includes the appended
* signature, when available, in the IMA measurement list.
*/
static const char *const secure_and_trusted_rules[] = {
"measure func=KEXEC_KERNEL_CHECK template=ima-modsig",
"measure func=MODULE_CHECK template=ima-modsig",
"appraise func=KEXEC_KERNEL_CHECK appraise_type=imasig|modsig",
#ifndef CONFIG_MODULE_SIG
"appraise func=MODULE_CHECK appraise_type=imasig|modsig",
#endif
NULL
};
/*
* Returns the relevant IMA arch-specific policies based on the system secure
* boot state.
*/
const char *const *arch_get_ima_policy(void)
{
if (is_ppc_secureboot_enabled()) {
if (IS_ENABLED(CONFIG_MODULE_SIG))
set_module_sig_enforced();
if (is_ppc_trustedboot_enabled())
return secure_and_trusted_rules;
else
return secure_rules;
} else if (is_ppc_trustedboot_enabled()) {
return trusted_rules;
}
return NULL;
}
| linux-master | arch/powerpc/kernel/ima_arch.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* This program is used to generate definitions needed by
* assembly language modules.
*
* We use the technique used in the OSF Mach kernel code:
* generate asm statements containing #defines,
* compile this file to assembler, and then extract the
* #defines from the assembly-language output.
*/
#include <linux/compat.h>
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/mman.h>
#include <linux/mm.h>
#include <linux/suspend.h>
#include <linux/hrtimer.h>
#ifdef CONFIG_PPC64
#include <linux/time.h>
#include <linux/hardirq.h>
#endif
#include <linux/kbuild.h>
#include <asm/io.h>
#include <asm/page.h>
#include <asm/processor.h>
#include <asm/cputable.h>
#include <asm/thread_info.h>
#include <asm/rtas.h>
#include <asm/vdso_datapage.h>
#include <asm/dbell.h>
#ifdef CONFIG_PPC64
#include <asm/paca.h>
#include <asm/lppaca.h>
#include <asm/cache.h>
#include <asm/mmu.h>
#include <asm/hvcall.h>
#include <asm/xics.h>
#endif
#ifdef CONFIG_PPC_POWERNV
#include <asm/opal.h>
#endif
#if defined(CONFIG_KVM) || defined(CONFIG_KVM_GUEST)
#include <linux/kvm_host.h>
#endif
#if defined(CONFIG_KVM) && defined(CONFIG_PPC_BOOK3S)
#include <asm/kvm_book3s.h>
#include <asm/kvm_ppc.h>
#endif
#ifdef CONFIG_PPC32
#ifdef CONFIG_BOOKE_OR_40x
#include "head_booke.h"
#endif
#endif
#if defined(CONFIG_PPC_E500)
#include "../mm/mmu_decl.h"
#endif
#ifdef CONFIG_PPC_8xx
#include <asm/fixmap.h>
#endif
#ifdef CONFIG_XMON
#include "../xmon/xmon_bpts.h"
#endif
#define STACK_PT_REGS_OFFSET(sym, val) \
DEFINE(sym, STACK_INT_FRAME_REGS + offsetof(struct pt_regs, val))
int main(void)
{
OFFSET(THREAD, task_struct, thread);
OFFSET(MM, task_struct, mm);
#ifdef CONFIG_STACKPROTECTOR
OFFSET(TASK_CANARY, task_struct, stack_canary);
#ifdef CONFIG_PPC64
OFFSET(PACA_CANARY, paca_struct, canary);
#endif
#endif
#ifdef CONFIG_PPC32
#ifdef CONFIG_PPC_RTAS
OFFSET(RTAS_SP, thread_struct, rtas_sp);
#endif
#endif /* CONFIG_PPC64 */
OFFSET(TASK_STACK, task_struct, stack);
#ifdef CONFIG_SMP
OFFSET(TASK_CPU, task_struct, thread_info.cpu);
#endif
#ifdef CONFIG_LIVEPATCH_64
OFFSET(TI_livepatch_sp, thread_info, livepatch_sp);
#endif
OFFSET(KSP, thread_struct, ksp);
OFFSET(PT_REGS, thread_struct, regs);
#ifdef CONFIG_BOOKE
OFFSET(THREAD_NORMSAVES, thread_struct, normsave[0]);
#endif
#ifdef CONFIG_PPC_FPU
OFFSET(THREAD_FPEXC_MODE, thread_struct, fpexc_mode);
OFFSET(THREAD_FPSTATE, thread_struct, fp_state.fpr);
OFFSET(THREAD_FPSAVEAREA, thread_struct, fp_save_area);
#endif
OFFSET(FPSTATE_FPSCR, thread_fp_state, fpscr);
OFFSET(THREAD_LOAD_FP, thread_struct, load_fp);
#ifdef CONFIG_ALTIVEC
OFFSET(THREAD_VRSTATE, thread_struct, vr_state.vr);
OFFSET(THREAD_VRSAVEAREA, thread_struct, vr_save_area);
OFFSET(THREAD_USED_VR, thread_struct, used_vr);
OFFSET(VRSTATE_VSCR, thread_vr_state, vscr);
OFFSET(THREAD_LOAD_VEC, thread_struct, load_vec);
#endif /* CONFIG_ALTIVEC */
#ifdef CONFIG_VSX
OFFSET(THREAD_USED_VSR, thread_struct, used_vsr);
#endif /* CONFIG_VSX */
#ifdef CONFIG_PPC64
OFFSET(KSP_VSID, thread_struct, ksp_vsid);
#else /* CONFIG_PPC64 */
OFFSET(PGDIR, thread_struct, pgdir);
OFFSET(SRR0, thread_struct, srr0);
OFFSET(SRR1, thread_struct, srr1);
OFFSET(DAR, thread_struct, dar);
OFFSET(DSISR, thread_struct, dsisr);
#ifdef CONFIG_PPC_BOOK3S_32
OFFSET(THR0, thread_struct, r0);
OFFSET(THR3, thread_struct, r3);
OFFSET(THR4, thread_struct, r4);
OFFSET(THR5, thread_struct, r5);
OFFSET(THR6, thread_struct, r6);
OFFSET(THR8, thread_struct, r8);
OFFSET(THR9, thread_struct, r9);
OFFSET(THR11, thread_struct, r11);
OFFSET(THLR, thread_struct, lr);
OFFSET(THCTR, thread_struct, ctr);
OFFSET(THSR0, thread_struct, sr0);
#endif
#ifdef CONFIG_SPE
OFFSET(THREAD_EVR0, thread_struct, evr[0]);
OFFSET(THREAD_ACC, thread_struct, acc);
OFFSET(THREAD_USED_SPE, thread_struct, used_spe);
#endif /* CONFIG_SPE */
#endif /* CONFIG_PPC64 */
#ifdef CONFIG_KVM_BOOK3S_32_HANDLER
OFFSET(THREAD_KVM_SVCPU, thread_struct, kvm_shadow_vcpu);
#endif
#if defined(CONFIG_KVM) && defined(CONFIG_BOOKE)
OFFSET(THREAD_KVM_VCPU, thread_struct, kvm_vcpu);
#endif
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
OFFSET(PACATMSCRATCH, paca_struct, tm_scratch);
OFFSET(THREAD_TM_TFHAR, thread_struct, tm_tfhar);
OFFSET(THREAD_TM_TEXASR, thread_struct, tm_texasr);
OFFSET(THREAD_TM_TFIAR, thread_struct, tm_tfiar);
OFFSET(THREAD_TM_TAR, thread_struct, tm_tar);
OFFSET(THREAD_TM_PPR, thread_struct, tm_ppr);
OFFSET(THREAD_TM_DSCR, thread_struct, tm_dscr);
OFFSET(THREAD_TM_AMR, thread_struct, tm_amr);
OFFSET(PT_CKPT_REGS, thread_struct, ckpt_regs);
OFFSET(THREAD_CKVRSTATE, thread_struct, ckvr_state.vr);
OFFSET(THREAD_CKVRSAVE, thread_struct, ckvrsave);
OFFSET(THREAD_CKFPSTATE, thread_struct, ckfp_state.fpr);
/* Local pt_regs on stack in int frame form, plus 16 bytes for TM */
DEFINE(TM_FRAME_SIZE, STACK_INT_FRAME_SIZE + 16);
#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
OFFSET(TI_LOCAL_FLAGS, thread_info, local_flags);
#ifdef CONFIG_PPC64
OFFSET(DCACHEL1BLOCKSIZE, ppc64_caches, l1d.block_size);
OFFSET(DCACHEL1LOGBLOCKSIZE, ppc64_caches, l1d.log_block_size);
/* paca */
OFFSET(PACAPACAINDEX, paca_struct, paca_index);
OFFSET(PACAPROCSTART, paca_struct, cpu_start);
OFFSET(PACAKSAVE, paca_struct, kstack);
OFFSET(PACACURRENT, paca_struct, __current);
DEFINE(PACA_THREAD_INFO, offsetof(struct paca_struct, __current) +
offsetof(struct task_struct, thread_info));
OFFSET(PACASAVEDMSR, paca_struct, saved_msr);
OFFSET(PACAR1, paca_struct, saved_r1);
#ifndef CONFIG_PPC_KERNEL_PCREL
OFFSET(PACATOC, paca_struct, kernel_toc);
#endif
OFFSET(PACAKBASE, paca_struct, kernelbase);
OFFSET(PACAKMSR, paca_struct, kernel_msr);
#ifdef CONFIG_PPC_BOOK3S_64
OFFSET(PACAHSRR_VALID, paca_struct, hsrr_valid);
OFFSET(PACASRR_VALID, paca_struct, srr_valid);
#endif
OFFSET(PACAIRQSOFTMASK, paca_struct, irq_soft_mask);
OFFSET(PACAIRQHAPPENED, paca_struct, irq_happened);
OFFSET(PACA_FTRACE_ENABLED, paca_struct, ftrace_enabled);
#ifdef CONFIG_PPC_BOOK3E_64
OFFSET(PACAPGD, paca_struct, pgd);
OFFSET(PACA_KERNELPGD, paca_struct, kernel_pgd);
OFFSET(PACA_EXGEN, paca_struct, exgen);
OFFSET(PACA_EXTLB, paca_struct, extlb);
OFFSET(PACA_EXMC, paca_struct, exmc);
OFFSET(PACA_EXCRIT, paca_struct, excrit);
OFFSET(PACA_EXDBG, paca_struct, exdbg);
OFFSET(PACA_MC_STACK, paca_struct, mc_kstack);
OFFSET(PACA_CRIT_STACK, paca_struct, crit_kstack);
OFFSET(PACA_DBG_STACK, paca_struct, dbg_kstack);
OFFSET(PACA_TCD_PTR, paca_struct, tcd_ptr);
OFFSET(TCD_ESEL_NEXT, tlb_core_data, esel_next);
OFFSET(TCD_ESEL_MAX, tlb_core_data, esel_max);
OFFSET(TCD_ESEL_FIRST, tlb_core_data, esel_first);
#endif /* CONFIG_PPC_BOOK3E_64 */
#ifdef CONFIG_PPC_BOOK3S_64
OFFSET(PACA_EXGEN, paca_struct, exgen);
OFFSET(PACA_EXMC, paca_struct, exmc);
OFFSET(PACA_EXNMI, paca_struct, exnmi);
#ifdef CONFIG_PPC_64S_HASH_MMU
OFFSET(PACA_SLBSHADOWPTR, paca_struct, slb_shadow_ptr);
OFFSET(SLBSHADOW_STACKVSID, slb_shadow, save_area[SLB_NUM_BOLTED - 1].vsid);
OFFSET(SLBSHADOW_STACKESID, slb_shadow, save_area[SLB_NUM_BOLTED - 1].esid);
OFFSET(SLBSHADOW_SAVEAREA, slb_shadow, save_area);
#endif
OFFSET(LPPACA_PMCINUSE, lppaca, pmcregs_in_use);
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
OFFSET(PACA_PMCINUSE, paca_struct, pmcregs_in_use);
#endif
OFFSET(LPPACA_YIELDCOUNT, lppaca, yield_count);
#endif /* CONFIG_PPC_BOOK3S_64 */
OFFSET(PACAEMERGSP, paca_struct, emergency_sp);
#ifdef CONFIG_PPC_BOOK3S_64
OFFSET(PACAMCEMERGSP, paca_struct, mc_emergency_sp);
OFFSET(PACA_NMI_EMERG_SP, paca_struct, nmi_emergency_sp);
OFFSET(PACA_IN_MCE, paca_struct, in_mce);
OFFSET(PACA_IN_NMI, paca_struct, in_nmi);
OFFSET(PACA_RFI_FLUSH_FALLBACK_AREA, paca_struct, rfi_flush_fallback_area);
OFFSET(PACA_EXRFI, paca_struct, exrfi);
OFFSET(PACA_L1D_FLUSH_SIZE, paca_struct, l1d_flush_size);
#endif
OFFSET(PACAHWCPUID, paca_struct, hw_cpu_id);
OFFSET(PACAKEXECSTATE, paca_struct, kexec_state);
OFFSET(PACA_DSCR_DEFAULT, paca_struct, dscr_default);
#ifdef CONFIG_PPC64
OFFSET(PACA_EXIT_SAVE_R1, paca_struct, exit_save_r1);
#endif
#ifdef CONFIG_PPC_BOOK3E_64
OFFSET(PACA_TRAP_SAVE, paca_struct, trap_save);
#endif
OFFSET(PACA_SPRG_VDSO, paca_struct, sprg_vdso);
#else /* CONFIG_PPC64 */
#endif /* CONFIG_PPC64 */
/* RTAS */
OFFSET(RTASBASE, rtas_t, base);
OFFSET(RTASENTRY, rtas_t, entry);
/* Interrupt register frame */
DEFINE(INT_FRAME_SIZE, STACK_INT_FRAME_SIZE);
DEFINE(SWITCH_FRAME_SIZE, STACK_SWITCH_FRAME_SIZE);
STACK_PT_REGS_OFFSET(GPR0, gpr[0]);
STACK_PT_REGS_OFFSET(GPR1, gpr[1]);
STACK_PT_REGS_OFFSET(GPR2, gpr[2]);
STACK_PT_REGS_OFFSET(GPR3, gpr[3]);
STACK_PT_REGS_OFFSET(GPR4, gpr[4]);
STACK_PT_REGS_OFFSET(GPR5, gpr[5]);
STACK_PT_REGS_OFFSET(GPR6, gpr[6]);
STACK_PT_REGS_OFFSET(GPR7, gpr[7]);
STACK_PT_REGS_OFFSET(GPR8, gpr[8]);
STACK_PT_REGS_OFFSET(GPR9, gpr[9]);
STACK_PT_REGS_OFFSET(GPR10, gpr[10]);
STACK_PT_REGS_OFFSET(GPR11, gpr[11]);
STACK_PT_REGS_OFFSET(GPR12, gpr[12]);
STACK_PT_REGS_OFFSET(GPR13, gpr[13]);
/*
* Note: these symbols include _ because they overlap with special
* register names
*/
STACK_PT_REGS_OFFSET(_NIP, nip);
STACK_PT_REGS_OFFSET(_MSR, msr);
STACK_PT_REGS_OFFSET(_CTR, ctr);
STACK_PT_REGS_OFFSET(_LINK, link);
STACK_PT_REGS_OFFSET(_CCR, ccr);
STACK_PT_REGS_OFFSET(_XER, xer);
STACK_PT_REGS_OFFSET(_DAR, dar);
STACK_PT_REGS_OFFSET(_DEAR, dear);
STACK_PT_REGS_OFFSET(_DSISR, dsisr);
STACK_PT_REGS_OFFSET(_ESR, esr);
STACK_PT_REGS_OFFSET(ORIG_GPR3, orig_gpr3);
STACK_PT_REGS_OFFSET(RESULT, result);
STACK_PT_REGS_OFFSET(_TRAP, trap);
#ifdef CONFIG_PPC64
STACK_PT_REGS_OFFSET(SOFTE, softe);
STACK_PT_REGS_OFFSET(_PPR, ppr);
#endif
#ifdef CONFIG_PPC_PKEY
STACK_PT_REGS_OFFSET(STACK_REGS_AMR, amr);
STACK_PT_REGS_OFFSET(STACK_REGS_IAMR, iamr);
#endif
#if defined(CONFIG_PPC32) && defined(CONFIG_BOOKE)
STACK_PT_REGS_OFFSET(MAS0, mas0);
/* we overload MMUCR for 44x on MAS0 since they are mutually exclusive */
STACK_PT_REGS_OFFSET(MMUCR, mas0);
STACK_PT_REGS_OFFSET(MAS1, mas1);
STACK_PT_REGS_OFFSET(MAS2, mas2);
STACK_PT_REGS_OFFSET(MAS3, mas3);
STACK_PT_REGS_OFFSET(MAS6, mas6);
STACK_PT_REGS_OFFSET(MAS7, mas7);
STACK_PT_REGS_OFFSET(_SRR0, srr0);
STACK_PT_REGS_OFFSET(_SRR1, srr1);
STACK_PT_REGS_OFFSET(_CSRR0, csrr0);
STACK_PT_REGS_OFFSET(_CSRR1, csrr1);
STACK_PT_REGS_OFFSET(_DSRR0, dsrr0);
STACK_PT_REGS_OFFSET(_DSRR1, dsrr1);
#endif
/* About the CPU features table */
OFFSET(CPU_SPEC_FEATURES, cpu_spec, cpu_features);
OFFSET(CPU_SPEC_SETUP, cpu_spec, cpu_setup);
OFFSET(CPU_SPEC_RESTORE, cpu_spec, cpu_restore);
OFFSET(pbe_address, pbe, address);
OFFSET(pbe_orig_address, pbe, orig_address);
OFFSET(pbe_next, pbe, next);
#ifndef CONFIG_PPC64
DEFINE(TASK_SIZE, TASK_SIZE);
DEFINE(NUM_USER_SEGMENTS, TASK_SIZE>>28);
#endif /* ! CONFIG_PPC64 */
/* datapage offsets for use by vdso */
OFFSET(VDSO_DATA_OFFSET, vdso_arch_data, data);
OFFSET(CFG_TB_TICKS_PER_SEC, vdso_arch_data, tb_ticks_per_sec);
#ifdef CONFIG_PPC64
OFFSET(CFG_ICACHE_BLOCKSZ, vdso_arch_data, icache_block_size);
OFFSET(CFG_DCACHE_BLOCKSZ, vdso_arch_data, dcache_block_size);
OFFSET(CFG_ICACHE_LOGBLOCKSZ, vdso_arch_data, icache_log_block_size);
OFFSET(CFG_DCACHE_LOGBLOCKSZ, vdso_arch_data, dcache_log_block_size);
OFFSET(CFG_SYSCALL_MAP64, vdso_arch_data, syscall_map);
OFFSET(CFG_SYSCALL_MAP32, vdso_arch_data, compat_syscall_map);
#else
OFFSET(CFG_SYSCALL_MAP32, vdso_arch_data, syscall_map);
#endif
#ifdef CONFIG_BUG
DEFINE(BUG_ENTRY_SIZE, sizeof(struct bug_entry));
#endif
#ifdef CONFIG_KVM
OFFSET(VCPU_HOST_STACK, kvm_vcpu, arch.host_stack);
OFFSET(VCPU_HOST_PID, kvm_vcpu, arch.host_pid);
OFFSET(VCPU_GUEST_PID, kvm_vcpu, arch.pid);
OFFSET(VCPU_GPRS, kvm_vcpu, arch.regs.gpr);
OFFSET(VCPU_VRSAVE, kvm_vcpu, arch.vrsave);
OFFSET(VCPU_FPRS, kvm_vcpu, arch.fp.fpr);
#ifdef CONFIG_ALTIVEC
OFFSET(VCPU_VRS, kvm_vcpu, arch.vr.vr);
#endif
OFFSET(VCPU_XER, kvm_vcpu, arch.regs.xer);
OFFSET(VCPU_CTR, kvm_vcpu, arch.regs.ctr);
OFFSET(VCPU_LR, kvm_vcpu, arch.regs.link);
#ifdef CONFIG_PPC_BOOK3S
OFFSET(VCPU_TAR, kvm_vcpu, arch.tar);
#endif
OFFSET(VCPU_CR, kvm_vcpu, arch.regs.ccr);
OFFSET(VCPU_PC, kvm_vcpu, arch.regs.nip);
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
OFFSET(VCPU_MSR, kvm_vcpu, arch.shregs.msr);
OFFSET(VCPU_SRR0, kvm_vcpu, arch.shregs.srr0);
OFFSET(VCPU_SRR1, kvm_vcpu, arch.shregs.srr1);
OFFSET(VCPU_SPRG0, kvm_vcpu, arch.shregs.sprg0);
OFFSET(VCPU_SPRG1, kvm_vcpu, arch.shregs.sprg1);
OFFSET(VCPU_SPRG2, kvm_vcpu, arch.shregs.sprg2);
OFFSET(VCPU_SPRG3, kvm_vcpu, arch.shregs.sprg3);
#endif
#ifdef CONFIG_KVM_BOOK3S_HV_P8_TIMING
OFFSET(VCPU_TB_RMENTRY, kvm_vcpu, arch.rm_entry);
OFFSET(VCPU_TB_RMINTR, kvm_vcpu, arch.rm_intr);
OFFSET(VCPU_TB_RMEXIT, kvm_vcpu, arch.rm_exit);
OFFSET(VCPU_TB_GUEST, kvm_vcpu, arch.guest_time);
OFFSET(VCPU_TB_CEDE, kvm_vcpu, arch.cede_time);
OFFSET(VCPU_CUR_ACTIVITY, kvm_vcpu, arch.cur_activity);
OFFSET(VCPU_ACTIVITY_START, kvm_vcpu, arch.cur_tb_start);
OFFSET(TAS_SEQCOUNT, kvmhv_tb_accumulator, seqcount);
OFFSET(TAS_TOTAL, kvmhv_tb_accumulator, tb_total);
OFFSET(TAS_MIN, kvmhv_tb_accumulator, tb_min);
OFFSET(TAS_MAX, kvmhv_tb_accumulator, tb_max);
#endif
OFFSET(VCPU_SHARED_SPRG3, kvm_vcpu_arch_shared, sprg3);
OFFSET(VCPU_SHARED_SPRG4, kvm_vcpu_arch_shared, sprg4);
OFFSET(VCPU_SHARED_SPRG5, kvm_vcpu_arch_shared, sprg5);
OFFSET(VCPU_SHARED_SPRG6, kvm_vcpu_arch_shared, sprg6);
OFFSET(VCPU_SHARED_SPRG7, kvm_vcpu_arch_shared, sprg7);
OFFSET(VCPU_SHADOW_PID, kvm_vcpu, arch.shadow_pid);
OFFSET(VCPU_SHADOW_PID1, kvm_vcpu, arch.shadow_pid1);
OFFSET(VCPU_SHARED, kvm_vcpu, arch.shared);
OFFSET(VCPU_SHARED_MSR, kvm_vcpu_arch_shared, msr);
OFFSET(VCPU_SHADOW_MSR, kvm_vcpu, arch.shadow_msr);
#if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE)
OFFSET(VCPU_SHAREDBE, kvm_vcpu, arch.shared_big_endian);
#endif
OFFSET(VCPU_SHARED_MAS0, kvm_vcpu_arch_shared, mas0);
OFFSET(VCPU_SHARED_MAS1, kvm_vcpu_arch_shared, mas1);
OFFSET(VCPU_SHARED_MAS2, kvm_vcpu_arch_shared, mas2);
OFFSET(VCPU_SHARED_MAS7_3, kvm_vcpu_arch_shared, mas7_3);
OFFSET(VCPU_SHARED_MAS4, kvm_vcpu_arch_shared, mas4);
OFFSET(VCPU_SHARED_MAS6, kvm_vcpu_arch_shared, mas6);
OFFSET(VCPU_KVM, kvm_vcpu, kvm);
OFFSET(KVM_LPID, kvm, arch.lpid);
/* book3s */
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
OFFSET(KVM_SDR1, kvm, arch.sdr1);
OFFSET(KVM_HOST_LPID, kvm, arch.host_lpid);
OFFSET(KVM_HOST_LPCR, kvm, arch.host_lpcr);
OFFSET(KVM_HOST_SDR1, kvm, arch.host_sdr1);
OFFSET(KVM_ENABLED_HCALLS, kvm, arch.enabled_hcalls);
OFFSET(KVM_VRMA_SLB_V, kvm, arch.vrma_slb_v);
OFFSET(KVM_SECURE_GUEST, kvm, arch.secure_guest);
OFFSET(VCPU_DSISR, kvm_vcpu, arch.shregs.dsisr);
OFFSET(VCPU_DAR, kvm_vcpu, arch.shregs.dar);
OFFSET(VCPU_VPA, kvm_vcpu, arch.vpa.pinned_addr);
OFFSET(VCPU_VPA_DIRTY, kvm_vcpu, arch.vpa.dirty);
OFFSET(VCPU_HEIR, kvm_vcpu, arch.emul_inst);
OFFSET(VCPU_CPU, kvm_vcpu, cpu);
OFFSET(VCPU_THREAD_CPU, kvm_vcpu, arch.thread_cpu);
#endif
#ifdef CONFIG_PPC_BOOK3S
OFFSET(VCPU_PURR, kvm_vcpu, arch.purr);
OFFSET(VCPU_SPURR, kvm_vcpu, arch.spurr);
OFFSET(VCPU_IC, kvm_vcpu, arch.ic);
OFFSET(VCPU_DSCR, kvm_vcpu, arch.dscr);
OFFSET(VCPU_AMR, kvm_vcpu, arch.amr);
OFFSET(VCPU_UAMOR, kvm_vcpu, arch.uamor);
OFFSET(VCPU_IAMR, kvm_vcpu, arch.iamr);
OFFSET(VCPU_CTRL, kvm_vcpu, arch.ctrl);
OFFSET(VCPU_DABR, kvm_vcpu, arch.dabr);
OFFSET(VCPU_DABRX, kvm_vcpu, arch.dabrx);
OFFSET(VCPU_DAWR0, kvm_vcpu, arch.dawr0);
OFFSET(VCPU_DAWRX0, kvm_vcpu, arch.dawrx0);
OFFSET(VCPU_CIABR, kvm_vcpu, arch.ciabr);
OFFSET(VCPU_HFLAGS, kvm_vcpu, arch.hflags);
OFFSET(VCPU_DEC_EXPIRES, kvm_vcpu, arch.dec_expires);
OFFSET(VCPU_PENDING_EXC, kvm_vcpu, arch.pending_exceptions);
OFFSET(VCPU_CEDED, kvm_vcpu, arch.ceded);
OFFSET(VCPU_PRODDED, kvm_vcpu, arch.prodded);
OFFSET(VCPU_MMCR, kvm_vcpu, arch.mmcr);
OFFSET(VCPU_MMCRA, kvm_vcpu, arch.mmcra);
OFFSET(VCPU_MMCRS, kvm_vcpu, arch.mmcrs);
OFFSET(VCPU_PMC, kvm_vcpu, arch.pmc);
OFFSET(VCPU_SIAR, kvm_vcpu, arch.siar);
OFFSET(VCPU_SDAR, kvm_vcpu, arch.sdar);
OFFSET(VCPU_SIER, kvm_vcpu, arch.sier);
OFFSET(VCPU_SLB, kvm_vcpu, arch.slb);
OFFSET(VCPU_SLB_MAX, kvm_vcpu, arch.slb_max);
OFFSET(VCPU_SLB_NR, kvm_vcpu, arch.slb_nr);
OFFSET(VCPU_FAULT_DSISR, kvm_vcpu, arch.fault_dsisr);
OFFSET(VCPU_FAULT_DAR, kvm_vcpu, arch.fault_dar);
OFFSET(VCPU_INTR_MSR, kvm_vcpu, arch.intr_msr);
OFFSET(VCPU_LAST_INST, kvm_vcpu, arch.last_inst);
OFFSET(VCPU_TRAP, kvm_vcpu, arch.trap);
OFFSET(VCPU_CFAR, kvm_vcpu, arch.cfar);
OFFSET(VCPU_PPR, kvm_vcpu, arch.ppr);
OFFSET(VCPU_FSCR, kvm_vcpu, arch.fscr);
OFFSET(VCPU_PSPB, kvm_vcpu, arch.pspb);
OFFSET(VCPU_EBBHR, kvm_vcpu, arch.ebbhr);
OFFSET(VCPU_EBBRR, kvm_vcpu, arch.ebbrr);
OFFSET(VCPU_BESCR, kvm_vcpu, arch.bescr);
OFFSET(VCPU_CSIGR, kvm_vcpu, arch.csigr);
OFFSET(VCPU_TACR, kvm_vcpu, arch.tacr);
OFFSET(VCPU_TCSCR, kvm_vcpu, arch.tcscr);
OFFSET(VCPU_ACOP, kvm_vcpu, arch.acop);
OFFSET(VCPU_WORT, kvm_vcpu, arch.wort);
OFFSET(VCPU_HFSCR, kvm_vcpu, arch.hfscr);
OFFSET(VCORE_ENTRY_EXIT, kvmppc_vcore, entry_exit_map);
OFFSET(VCORE_IN_GUEST, kvmppc_vcore, in_guest);
OFFSET(VCORE_NAPPING_THREADS, kvmppc_vcore, napping_threads);
OFFSET(VCORE_KVM, kvmppc_vcore, kvm);
OFFSET(VCORE_TB_OFFSET, kvmppc_vcore, tb_offset);
OFFSET(VCORE_TB_OFFSET_APPL, kvmppc_vcore, tb_offset_applied);
OFFSET(VCORE_LPCR, kvmppc_vcore, lpcr);
OFFSET(VCORE_PCR, kvmppc_vcore, pcr);
OFFSET(VCORE_DPDES, kvmppc_vcore, dpdes);
OFFSET(VCORE_VTB, kvmppc_vcore, vtb);
OFFSET(VCPU_SLB_E, kvmppc_slb, orige);
OFFSET(VCPU_SLB_V, kvmppc_slb, origv);
DEFINE(VCPU_SLB_SIZE, sizeof(struct kvmppc_slb));
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
OFFSET(VCPU_TFHAR, kvm_vcpu, arch.tfhar);
OFFSET(VCPU_TFIAR, kvm_vcpu, arch.tfiar);
OFFSET(VCPU_TEXASR, kvm_vcpu, arch.texasr);
OFFSET(VCPU_ORIG_TEXASR, kvm_vcpu, arch.orig_texasr);
OFFSET(VCPU_GPR_TM, kvm_vcpu, arch.gpr_tm);
OFFSET(VCPU_FPRS_TM, kvm_vcpu, arch.fp_tm.fpr);
OFFSET(VCPU_VRS_TM, kvm_vcpu, arch.vr_tm.vr);
OFFSET(VCPU_VRSAVE_TM, kvm_vcpu, arch.vrsave_tm);
OFFSET(VCPU_CR_TM, kvm_vcpu, arch.cr_tm);
OFFSET(VCPU_XER_TM, kvm_vcpu, arch.xer_tm);
OFFSET(VCPU_LR_TM, kvm_vcpu, arch.lr_tm);
OFFSET(VCPU_CTR_TM, kvm_vcpu, arch.ctr_tm);
OFFSET(VCPU_AMR_TM, kvm_vcpu, arch.amr_tm);
OFFSET(VCPU_PPR_TM, kvm_vcpu, arch.ppr_tm);
OFFSET(VCPU_DSCR_TM, kvm_vcpu, arch.dscr_tm);
OFFSET(VCPU_TAR_TM, kvm_vcpu, arch.tar_tm);
#endif
#ifdef CONFIG_PPC_BOOK3S_64
#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
OFFSET(PACA_SVCPU, paca_struct, shadow_vcpu);
# define SVCPU_FIELD(x, f) DEFINE(x, offsetof(struct paca_struct, shadow_vcpu.f))
#else
# define SVCPU_FIELD(x, f)
#endif
# define HSTATE_FIELD(x, f) DEFINE(x, offsetof(struct paca_struct, kvm_hstate.f))
#else /* 32-bit */
# define SVCPU_FIELD(x, f) DEFINE(x, offsetof(struct kvmppc_book3s_shadow_vcpu, f))
# define HSTATE_FIELD(x, f) DEFINE(x, offsetof(struct kvmppc_book3s_shadow_vcpu, hstate.f))
#endif
SVCPU_FIELD(SVCPU_CR, cr);
SVCPU_FIELD(SVCPU_XER, xer);
SVCPU_FIELD(SVCPU_CTR, ctr);
SVCPU_FIELD(SVCPU_LR, lr);
SVCPU_FIELD(SVCPU_PC, pc);
SVCPU_FIELD(SVCPU_R0, gpr[0]);
SVCPU_FIELD(SVCPU_R1, gpr[1]);
SVCPU_FIELD(SVCPU_R2, gpr[2]);
SVCPU_FIELD(SVCPU_R3, gpr[3]);
SVCPU_FIELD(SVCPU_R4, gpr[4]);
SVCPU_FIELD(SVCPU_R5, gpr[5]);
SVCPU_FIELD(SVCPU_R6, gpr[6]);
SVCPU_FIELD(SVCPU_R7, gpr[7]);
SVCPU_FIELD(SVCPU_R8, gpr[8]);
SVCPU_FIELD(SVCPU_R9, gpr[9]);
SVCPU_FIELD(SVCPU_R10, gpr[10]);
SVCPU_FIELD(SVCPU_R11, gpr[11]);
SVCPU_FIELD(SVCPU_R12, gpr[12]);
SVCPU_FIELD(SVCPU_R13, gpr[13]);
SVCPU_FIELD(SVCPU_FAULT_DSISR, fault_dsisr);
SVCPU_FIELD(SVCPU_FAULT_DAR, fault_dar);
SVCPU_FIELD(SVCPU_LAST_INST, last_inst);
SVCPU_FIELD(SVCPU_SHADOW_SRR1, shadow_srr1);
#ifdef CONFIG_PPC_BOOK3S_32
SVCPU_FIELD(SVCPU_SR, sr);
#endif
#ifdef CONFIG_PPC64
SVCPU_FIELD(SVCPU_SLB, slb);
SVCPU_FIELD(SVCPU_SLB_MAX, slb_max);
SVCPU_FIELD(SVCPU_SHADOW_FSCR, shadow_fscr);
#endif
HSTATE_FIELD(HSTATE_HOST_R1, host_r1);
HSTATE_FIELD(HSTATE_HOST_R2, host_r2);
HSTATE_FIELD(HSTATE_HOST_MSR, host_msr);
HSTATE_FIELD(HSTATE_VMHANDLER, vmhandler);
HSTATE_FIELD(HSTATE_SCRATCH0, scratch0);
HSTATE_FIELD(HSTATE_SCRATCH1, scratch1);
HSTATE_FIELD(HSTATE_SCRATCH2, scratch2);
HSTATE_FIELD(HSTATE_IN_GUEST, in_guest);
HSTATE_FIELD(HSTATE_RESTORE_HID5, restore_hid5);
HSTATE_FIELD(HSTATE_NAPPING, napping);
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
HSTATE_FIELD(HSTATE_HWTHREAD_REQ, hwthread_req);
HSTATE_FIELD(HSTATE_HWTHREAD_STATE, hwthread_state);
HSTATE_FIELD(HSTATE_KVM_VCPU, kvm_vcpu);
HSTATE_FIELD(HSTATE_KVM_VCORE, kvm_vcore);
HSTATE_FIELD(HSTATE_HOST_IPI, host_ipi);
HSTATE_FIELD(HSTATE_PTID, ptid);
HSTATE_FIELD(HSTATE_FAKE_SUSPEND, fake_suspend);
HSTATE_FIELD(HSTATE_MMCR0, host_mmcr[0]);
HSTATE_FIELD(HSTATE_MMCR1, host_mmcr[1]);
HSTATE_FIELD(HSTATE_MMCRA, host_mmcr[2]);
HSTATE_FIELD(HSTATE_SIAR, host_mmcr[3]);
HSTATE_FIELD(HSTATE_SDAR, host_mmcr[4]);
HSTATE_FIELD(HSTATE_MMCR2, host_mmcr[5]);
HSTATE_FIELD(HSTATE_SIER, host_mmcr[6]);
HSTATE_FIELD(HSTATE_PMC1, host_pmc[0]);
HSTATE_FIELD(HSTATE_PMC2, host_pmc[1]);
HSTATE_FIELD(HSTATE_PMC3, host_pmc[2]);
HSTATE_FIELD(HSTATE_PMC4, host_pmc[3]);
HSTATE_FIELD(HSTATE_PMC5, host_pmc[4]);
HSTATE_FIELD(HSTATE_PMC6, host_pmc[5]);
HSTATE_FIELD(HSTATE_PURR, host_purr);
HSTATE_FIELD(HSTATE_SPURR, host_spurr);
HSTATE_FIELD(HSTATE_DSCR, host_dscr);
HSTATE_FIELD(HSTATE_DABR, dabr);
HSTATE_FIELD(HSTATE_DECEXP, dec_expires);
HSTATE_FIELD(HSTATE_SPLIT_MODE, kvm_split_mode);
DEFINE(IPI_PRIORITY, IPI_PRIORITY);
OFFSET(KVM_SPLIT_RPR, kvm_split_mode, rpr);
OFFSET(KVM_SPLIT_PMMAR, kvm_split_mode, pmmar);
OFFSET(KVM_SPLIT_LDBAR, kvm_split_mode, ldbar);
OFFSET(KVM_SPLIT_DO_NAP, kvm_split_mode, do_nap);
OFFSET(KVM_SPLIT_NAPPED, kvm_split_mode, napped);
#endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
#ifdef CONFIG_PPC_BOOK3S_64
HSTATE_FIELD(HSTATE_CFAR, cfar);
HSTATE_FIELD(HSTATE_PPR, ppr);
HSTATE_FIELD(HSTATE_HOST_FSCR, host_fscr);
#endif /* CONFIG_PPC_BOOK3S_64 */
#else /* CONFIG_PPC_BOOK3S */
OFFSET(VCPU_CR, kvm_vcpu, arch.regs.ccr);
OFFSET(VCPU_XER, kvm_vcpu, arch.regs.xer);
OFFSET(VCPU_LR, kvm_vcpu, arch.regs.link);
OFFSET(VCPU_CTR, kvm_vcpu, arch.regs.ctr);
OFFSET(VCPU_PC, kvm_vcpu, arch.regs.nip);
OFFSET(VCPU_SPRG9, kvm_vcpu, arch.sprg9);
OFFSET(VCPU_LAST_INST, kvm_vcpu, arch.last_inst);
OFFSET(VCPU_FAULT_DEAR, kvm_vcpu, arch.fault_dear);
OFFSET(VCPU_FAULT_ESR, kvm_vcpu, arch.fault_esr);
OFFSET(VCPU_CRIT_SAVE, kvm_vcpu, arch.crit_save);
#endif /* CONFIG_PPC_BOOK3S */
#endif /* CONFIG_KVM */
#ifdef CONFIG_KVM_GUEST
OFFSET(KVM_MAGIC_SCRATCH1, kvm_vcpu_arch_shared, scratch1);
OFFSET(KVM_MAGIC_SCRATCH2, kvm_vcpu_arch_shared, scratch2);
OFFSET(KVM_MAGIC_SCRATCH3, kvm_vcpu_arch_shared, scratch3);
OFFSET(KVM_MAGIC_INT, kvm_vcpu_arch_shared, int_pending);
OFFSET(KVM_MAGIC_MSR, kvm_vcpu_arch_shared, msr);
OFFSET(KVM_MAGIC_CRITICAL, kvm_vcpu_arch_shared, critical);
OFFSET(KVM_MAGIC_SR, kvm_vcpu_arch_shared, sr);
#endif
#ifdef CONFIG_44x
DEFINE(PGD_T_LOG2, PGD_T_LOG2);
DEFINE(PTE_T_LOG2, PTE_T_LOG2);
#endif
#ifdef CONFIG_PPC_E500
DEFINE(TLBCAM_SIZE, sizeof(struct tlbcam));
OFFSET(TLBCAM_MAS0, tlbcam, MAS0);
OFFSET(TLBCAM_MAS1, tlbcam, MAS1);
OFFSET(TLBCAM_MAS2, tlbcam, MAS2);
OFFSET(TLBCAM_MAS3, tlbcam, MAS3);
OFFSET(TLBCAM_MAS7, tlbcam, MAS7);
#endif
#if defined(CONFIG_KVM) && defined(CONFIG_SPE)
OFFSET(VCPU_EVR, kvm_vcpu, arch.evr[0]);
OFFSET(VCPU_ACC, kvm_vcpu, arch.acc);
OFFSET(VCPU_SPEFSCR, kvm_vcpu, arch.spefscr);
OFFSET(VCPU_HOST_SPEFSCR, kvm_vcpu, arch.host_spefscr);
#endif
#ifdef CONFIG_KVM_BOOKE_HV
OFFSET(VCPU_HOST_MAS4, kvm_vcpu, arch.host_mas4);
OFFSET(VCPU_HOST_MAS6, kvm_vcpu, arch.host_mas6);
#endif
#ifdef CONFIG_KVM_EXIT_TIMING
OFFSET(VCPU_TIMING_EXIT_TBU, kvm_vcpu, arch.timing_exit.tv32.tbu);
OFFSET(VCPU_TIMING_EXIT_TBL, kvm_vcpu, arch.timing_exit.tv32.tbl);
OFFSET(VCPU_TIMING_LAST_ENTER_TBU, kvm_vcpu, arch.timing_last_enter.tv32.tbu);
OFFSET(VCPU_TIMING_LAST_ENTER_TBL, kvm_vcpu, arch.timing_last_enter.tv32.tbl);
#endif
DEFINE(PPC_DBELL_SERVER, PPC_DBELL_SERVER);
#ifdef CONFIG_PPC_8xx
DEFINE(VIRT_IMMR_BASE, (u64)__fix_to_virt(FIX_IMMR_BASE));
#endif
#ifdef CONFIG_XMON
DEFINE(BPT_SIZE, BPT_SIZE);
#endif
return 0;
}
| linux-master | arch/powerpc/kernel/asm-offsets.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Suspend support specific for power.
*
* Copyright (c) 2002 Pavel Machek <[email protected]>
* Copyright (c) 2001 Patrick Mochel <[email protected]>
*/
#include <linux/mm.h>
#include <linux/suspend.h>
#include <asm/page.h>
#include <asm/sections.h>
/*
* pfn_is_nosave - check if given pfn is in the 'nosave' section
*/
int pfn_is_nosave(unsigned long pfn)
{
unsigned long nosave_begin_pfn = __pa(&__nosave_begin) >> PAGE_SHIFT;
unsigned long nosave_end_pfn = PAGE_ALIGN(__pa(&__nosave_end)) >> PAGE_SHIFT;
return (pfn >= nosave_begin_pfn) && (pfn < nosave_end_pfn);
}
| linux-master | arch/powerpc/kernel/suspend.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/* Kernel module help for powerpc.
Copyright (C) 2001, 2003 Rusty Russell IBM Corporation.
Copyright (C) 2008 Freescale Semiconductor, Inc.
*/
#include <linux/elf.h>
#include <linux/moduleloader.h>
#include <linux/err.h>
#include <linux/vmalloc.h>
#include <linux/mm.h>
#include <linux/bug.h>
#include <asm/module.h>
#include <linux/uaccess.h>
#include <asm/firmware.h>
#include <linux/sort.h>
#include <asm/setup.h>
#include <asm/sections.h>
static LIST_HEAD(module_bug_list);
static const Elf_Shdr *find_section(const Elf_Ehdr *hdr,
const Elf_Shdr *sechdrs,
const char *name)
{
char *secstrings;
unsigned int i;
secstrings = (char *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
for (i = 1; i < hdr->e_shnum; i++)
if (strcmp(secstrings+sechdrs[i].sh_name, name) == 0)
return &sechdrs[i];
return NULL;
}
int module_finalize(const Elf_Ehdr *hdr,
const Elf_Shdr *sechdrs, struct module *me)
{
const Elf_Shdr *sect;
int rc;
rc = module_finalize_ftrace(me, sechdrs);
if (rc)
return rc;
/* Apply feature fixups */
sect = find_section(hdr, sechdrs, "__ftr_fixup");
if (sect != NULL)
do_feature_fixups(cur_cpu_spec->cpu_features,
(void *)sect->sh_addr,
(void *)sect->sh_addr + sect->sh_size);
sect = find_section(hdr, sechdrs, "__mmu_ftr_fixup");
if (sect != NULL)
do_feature_fixups(cur_cpu_spec->mmu_features,
(void *)sect->sh_addr,
(void *)sect->sh_addr + sect->sh_size);
#ifdef CONFIG_PPC64
sect = find_section(hdr, sechdrs, "__fw_ftr_fixup");
if (sect != NULL)
do_feature_fixups(powerpc_firmware_features,
(void *)sect->sh_addr,
(void *)sect->sh_addr + sect->sh_size);
#endif /* CONFIG_PPC64 */
#ifdef CONFIG_PPC64_ELF_ABI_V1
sect = find_section(hdr, sechdrs, ".opd");
if (sect != NULL) {
me->arch.start_opd = sect->sh_addr;
me->arch.end_opd = sect->sh_addr + sect->sh_size;
}
#endif /* CONFIG_PPC64_ELF_ABI_V1 */
#ifdef CONFIG_PPC_BARRIER_NOSPEC
sect = find_section(hdr, sechdrs, "__spec_barrier_fixup");
if (sect != NULL)
do_barrier_nospec_fixups_range(barrier_nospec_enabled,
(void *)sect->sh_addr,
(void *)sect->sh_addr + sect->sh_size);
#endif /* CONFIG_PPC_BARRIER_NOSPEC */
sect = find_section(hdr, sechdrs, "__lwsync_fixup");
if (sect != NULL)
do_lwsync_fixups(cur_cpu_spec->cpu_features,
(void *)sect->sh_addr,
(void *)sect->sh_addr + sect->sh_size);
return 0;
}
static __always_inline void *
__module_alloc(unsigned long size, unsigned long start, unsigned long end, bool nowarn)
{
pgprot_t prot = strict_module_rwx_enabled() ? PAGE_KERNEL : PAGE_KERNEL_EXEC;
gfp_t gfp = GFP_KERNEL | (nowarn ? __GFP_NOWARN : 0);
/*
* Don't do huge page allocations for modules yet until more testing
* is done. STRICT_MODULE_RWX may require extra work to support this
* too.
*/
return __vmalloc_node_range(size, 1, start, end, gfp, prot,
VM_FLUSH_RESET_PERMS,
NUMA_NO_NODE, __builtin_return_address(0));
}
void *module_alloc(unsigned long size)
{
#ifdef MODULES_VADDR
unsigned long limit = (unsigned long)_etext - SZ_32M;
void *ptr = NULL;
BUILD_BUG_ON(TASK_SIZE > MODULES_VADDR);
/* First try within 32M limit from _etext to avoid branch trampolines */
if (MODULES_VADDR < PAGE_OFFSET && MODULES_END > limit)
ptr = __module_alloc(size, limit, MODULES_END, true);
if (!ptr)
ptr = __module_alloc(size, MODULES_VADDR, MODULES_END, false);
return ptr;
#else
return __module_alloc(size, VMALLOC_START, VMALLOC_END, false);
#endif
}
| linux-master | arch/powerpc/kernel/module.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* udbg for NS16550 compatible serial ports
*
* Copyright (C) 2001-2005 PPC 64 Team, IBM Corp
*/
#include <linux/types.h>
#include <asm/udbg.h>
#include <asm/io.h>
#include <asm/reg_a2.h>
#include <asm/early_ioremap.h>
extern u8 real_readb(volatile u8 __iomem *addr);
extern void real_writeb(u8 data, volatile u8 __iomem *addr);
extern u8 real_205_readb(volatile u8 __iomem *addr);
extern void real_205_writeb(u8 data, volatile u8 __iomem *addr);
#define UART_RBR 0
#define UART_IER 1
#define UART_FCR 2
#define UART_LCR 3
#define UART_MCR 4
#define UART_LSR 5
#define UART_MSR 6
#define UART_SCR 7
#define UART_THR UART_RBR
#define UART_IIR UART_FCR
#define UART_DLL UART_RBR
#define UART_DLM UART_IER
#define UART_DLAB UART_LCR
#define LSR_DR 0x01 /* Data ready */
#define LSR_OE 0x02 /* Overrun */
#define LSR_PE 0x04 /* Parity error */
#define LSR_FE 0x08 /* Framing error */
#define LSR_BI 0x10 /* Break */
#define LSR_THRE 0x20 /* Xmit holding register empty */
#define LSR_TEMT 0x40 /* Xmitter empty */
#define LSR_ERR 0x80 /* Error */
#define LCR_DLAB 0x80
static u8 (*udbg_uart_in)(unsigned int reg);
static void (*udbg_uart_out)(unsigned int reg, u8 data);
static void udbg_uart_flush(void)
{
if (!udbg_uart_in)
return;
/* wait for idle */
while ((udbg_uart_in(UART_LSR) & LSR_THRE) == 0)
cpu_relax();
}
static void udbg_uart_putc(char c)
{
if (!udbg_uart_out)
return;
if (c == '\n')
udbg_uart_putc('\r');
udbg_uart_flush();
udbg_uart_out(UART_THR, c);
}
static int udbg_uart_getc_poll(void)
{
if (!udbg_uart_in)
return -1;
if (!(udbg_uart_in(UART_LSR) & LSR_DR))
return udbg_uart_in(UART_RBR);
return -1;
}
static int udbg_uart_getc(void)
{
if (!udbg_uart_in)
return -1;
/* wait for char */
while (!(udbg_uart_in(UART_LSR) & LSR_DR))
cpu_relax();
return udbg_uart_in(UART_RBR);
}
static void __init udbg_use_uart(void)
{
udbg_putc = udbg_uart_putc;
udbg_flush = udbg_uart_flush;
udbg_getc = udbg_uart_getc;
udbg_getc_poll = udbg_uart_getc_poll;
}
void __init udbg_uart_setup(unsigned int speed, unsigned int clock)
{
unsigned int dll, base_bauds;
if (!udbg_uart_out)
return;
if (clock == 0)
clock = 1843200;
if (speed == 0)
speed = 9600;
base_bauds = clock / 16;
dll = base_bauds / speed;
udbg_uart_out(UART_LCR, 0x00);
udbg_uart_out(UART_IER, 0xff);
udbg_uart_out(UART_IER, 0x00);
udbg_uart_out(UART_LCR, LCR_DLAB);
udbg_uart_out(UART_DLL, dll & 0xff);
udbg_uart_out(UART_DLM, dll >> 8);
/* 8 data, 1 stop, no parity */
udbg_uart_out(UART_LCR, 0x3);
/* RTS/DTR */
udbg_uart_out(UART_MCR, 0x3);
/* Clear & enable FIFOs */
udbg_uart_out(UART_FCR, 0x7);
}
unsigned int __init udbg_probe_uart_speed(unsigned int clock)
{
unsigned int dll, dlm, divisor, prescaler, speed;
u8 old_lcr;
old_lcr = udbg_uart_in(UART_LCR);
/* select divisor latch registers. */
udbg_uart_out(UART_LCR, old_lcr | LCR_DLAB);
/* now, read the divisor */
dll = udbg_uart_in(UART_DLL);
dlm = udbg_uart_in(UART_DLM);
divisor = dlm << 8 | dll;
/* check prescaling */
if (udbg_uart_in(UART_MCR) & 0x80)
prescaler = 4;
else
prescaler = 1;
/* restore the LCR */
udbg_uart_out(UART_LCR, old_lcr);
/* calculate speed */
speed = (clock / prescaler) / (divisor * 16);
/* sanity check */
if (speed > (clock / 16))
speed = 9600;
return speed;
}
static union {
unsigned char __iomem *mmio_base;
unsigned long pio_base;
} udbg_uart;
static unsigned int udbg_uart_stride = 1;
static u8 udbg_uart_in_pio(unsigned int reg)
{
return inb(udbg_uart.pio_base + (reg * udbg_uart_stride));
}
static void udbg_uart_out_pio(unsigned int reg, u8 data)
{
outb(data, udbg_uart.pio_base + (reg * udbg_uart_stride));
}
void __init udbg_uart_init_pio(unsigned long port, unsigned int stride)
{
if (!port)
return;
udbg_uart.pio_base = port;
udbg_uart_stride = stride;
udbg_uart_in = udbg_uart_in_pio;
udbg_uart_out = udbg_uart_out_pio;
udbg_use_uart();
}
static u8 udbg_uart_in_mmio(unsigned int reg)
{
return in_8(udbg_uart.mmio_base + (reg * udbg_uart_stride));
}
static void udbg_uart_out_mmio(unsigned int reg, u8 data)
{
out_8(udbg_uart.mmio_base + (reg * udbg_uart_stride), data);
}
void __init udbg_uart_init_mmio(void __iomem *addr, unsigned int stride)
{
if (!addr)
return;
udbg_uart.mmio_base = addr;
udbg_uart_stride = stride;
udbg_uart_in = udbg_uart_in_mmio;
udbg_uart_out = udbg_uart_out_mmio;
udbg_use_uart();
}
#ifdef CONFIG_PPC_MAPLE
#define UDBG_UART_MAPLE_ADDR ((void __iomem *)0xf40003f8)
static u8 udbg_uart_in_maple(unsigned int reg)
{
return real_readb(UDBG_UART_MAPLE_ADDR + reg);
}
static void udbg_uart_out_maple(unsigned int reg, u8 val)
{
real_writeb(val, UDBG_UART_MAPLE_ADDR + reg);
}
void __init udbg_init_maple_realmode(void)
{
udbg_uart_in = udbg_uart_in_maple;
udbg_uart_out = udbg_uart_out_maple;
udbg_use_uart();
}
#endif /* CONFIG_PPC_MAPLE */
#ifdef CONFIG_PPC_PASEMI
#define UDBG_UART_PAS_ADDR ((void __iomem *)0xfcff03f8UL)
static u8 udbg_uart_in_pas(unsigned int reg)
{
return real_205_readb(UDBG_UART_PAS_ADDR + reg);
}
static void udbg_uart_out_pas(unsigned int reg, u8 val)
{
real_205_writeb(val, UDBG_UART_PAS_ADDR + reg);
}
void __init udbg_init_pas_realmode(void)
{
udbg_uart_in = udbg_uart_in_pas;
udbg_uart_out = udbg_uart_out_pas;
udbg_use_uart();
}
#endif /* CONFIG_PPC_PASEMI */
#ifdef CONFIG_PPC_EARLY_DEBUG_44x
#include <platforms/44x/44x.h>
static u8 udbg_uart_in_44x_as1(unsigned int reg)
{
return as1_readb((void __iomem *)PPC44x_EARLY_DEBUG_VIRTADDR + reg);
}
static void udbg_uart_out_44x_as1(unsigned int reg, u8 val)
{
as1_writeb(val, (void __iomem *)PPC44x_EARLY_DEBUG_VIRTADDR + reg);
}
void __init udbg_init_44x_as1(void)
{
udbg_uart_in = udbg_uart_in_44x_as1;
udbg_uart_out = udbg_uart_out_44x_as1;
udbg_use_uart();
}
#endif /* CONFIG_PPC_EARLY_DEBUG_44x */
#ifdef CONFIG_PPC_EARLY_DEBUG_40x
static u8 udbg_uart_in_40x(unsigned int reg)
{
return real_readb((void __iomem *)CONFIG_PPC_EARLY_DEBUG_40x_PHYSADDR
+ reg);
}
static void udbg_uart_out_40x(unsigned int reg, u8 val)
{
real_writeb(val, (void __iomem *)CONFIG_PPC_EARLY_DEBUG_40x_PHYSADDR
+ reg);
}
void __init udbg_init_40x_realmode(void)
{
udbg_uart_in = udbg_uart_in_40x;
udbg_uart_out = udbg_uart_out_40x;
udbg_use_uart();
}
#endif /* CONFIG_PPC_EARLY_DEBUG_40x */
#ifdef CONFIG_PPC_EARLY_DEBUG_16550
static void __iomem *udbg_uart_early_addr;
void __init udbg_init_debug_16550(void)
{
udbg_uart_early_addr = early_ioremap(CONFIG_PPC_EARLY_DEBUG_16550_PHYSADDR, 0x1000);
udbg_uart_init_mmio(udbg_uart_early_addr, CONFIG_PPC_EARLY_DEBUG_16550_STRIDE);
}
static int __init udbg_init_debug_16550_ioremap(void)
{
void __iomem *addr;
if (!udbg_uart_early_addr)
return 0;
addr = ioremap(CONFIG_PPC_EARLY_DEBUG_16550_PHYSADDR, 0x1000);
if (WARN_ON(!addr))
return -ENOMEM;
udbg_uart_init_mmio(addr, CONFIG_PPC_EARLY_DEBUG_16550_STRIDE);
early_iounmap(udbg_uart_early_addr, 0x1000);
udbg_uart_early_addr = NULL;
return 0;
}
early_initcall(udbg_init_debug_16550_ioremap);
#endif /* CONFIG_PPC_EARLY_DEBUG_16550 */
| linux-master | arch/powerpc/kernel/udbg_16550.c |
// SPDX-License-Identifier: GPL-2.0+
#include <linux/kernel.h>
#include <linux/uaccess.h>
#include <linux/sched.h>
#include <asm/hw_breakpoint.h>
#include <asm/sstep.h>
#include <asm/cache.h>
static bool dar_in_user_range(unsigned long dar, struct arch_hw_breakpoint *info)
{
return ((info->address <= dar) && (dar - info->address < info->len));
}
static bool ea_user_range_overlaps(unsigned long ea, int size,
struct arch_hw_breakpoint *info)
{
return ((ea < info->address + info->len) &&
(ea + size > info->address));
}
static bool dar_in_hw_range(unsigned long dar, struct arch_hw_breakpoint *info)
{
unsigned long hw_start_addr, hw_end_addr;
hw_start_addr = ALIGN_DOWN(info->address, HW_BREAKPOINT_SIZE);
hw_end_addr = ALIGN(info->address + info->len, HW_BREAKPOINT_SIZE);
return ((hw_start_addr <= dar) && (hw_end_addr > dar));
}
static bool ea_hw_range_overlaps(unsigned long ea, int size,
struct arch_hw_breakpoint *info)
{
unsigned long hw_start_addr, hw_end_addr;
unsigned long align_size = HW_BREAKPOINT_SIZE;
/*
* On p10 predecessors, quadword is handle differently then
* other instructions.
*/
if (!cpu_has_feature(CPU_FTR_ARCH_31) && size == 16)
align_size = HW_BREAKPOINT_SIZE_QUADWORD;
hw_start_addr = ALIGN_DOWN(info->address, align_size);
hw_end_addr = ALIGN(info->address + info->len, align_size);
return ((ea < hw_end_addr) && (ea + size > hw_start_addr));
}
/*
* If hw has multiple DAWR registers, we also need to check all
* dawrx constraint bits to confirm this is _really_ a valid event.
* If type is UNKNOWN, but privilege level matches, consider it as
* a positive match.
*/
static bool check_dawrx_constraints(struct pt_regs *regs, int type,
struct arch_hw_breakpoint *info)
{
if (OP_IS_LOAD(type) && !(info->type & HW_BRK_TYPE_READ))
return false;
/*
* The Cache Management instructions other than dcbz never
* cause a match. i.e. if type is CACHEOP, the instruction
* is dcbz, and dcbz is treated as Store.
*/
if ((OP_IS_STORE(type) || type == CACHEOP) && !(info->type & HW_BRK_TYPE_WRITE))
return false;
if (is_kernel_addr(regs->nip) && !(info->type & HW_BRK_TYPE_KERNEL))
return false;
if (user_mode(regs) && !(info->type & HW_BRK_TYPE_USER))
return false;
return true;
}
/*
* Return true if the event is valid wrt dawr configuration,
* including extraneous exception. Otherwise return false.
*/
bool wp_check_constraints(struct pt_regs *regs, ppc_inst_t instr,
unsigned long ea, int type, int size,
struct arch_hw_breakpoint *info)
{
bool in_user_range = dar_in_user_range(regs->dar, info);
bool dawrx_constraints;
/*
* 8xx supports only one breakpoint and thus we can
* unconditionally return true.
*/
if (IS_ENABLED(CONFIG_PPC_8xx)) {
if (!in_user_range)
info->type |= HW_BRK_TYPE_EXTRANEOUS_IRQ;
return true;
}
if (unlikely(ppc_inst_equal(instr, ppc_inst(0)))) {
if (cpu_has_feature(CPU_FTR_ARCH_31) &&
!dar_in_hw_range(regs->dar, info))
return false;
return true;
}
dawrx_constraints = check_dawrx_constraints(regs, type, info);
if (type == UNKNOWN) {
if (cpu_has_feature(CPU_FTR_ARCH_31) &&
!dar_in_hw_range(regs->dar, info))
return false;
return dawrx_constraints;
}
if (ea_user_range_overlaps(ea, size, info))
return dawrx_constraints;
if (ea_hw_range_overlaps(ea, size, info)) {
if (dawrx_constraints) {
info->type |= HW_BRK_TYPE_EXTRANEOUS_IRQ;
return true;
}
}
return false;
}
void wp_get_instr_detail(struct pt_regs *regs, ppc_inst_t *instr,
int *type, int *size, unsigned long *ea)
{
struct instruction_op op;
int err;
pagefault_disable();
err = __get_user_instr(*instr, (void __user *)regs->nip);
pagefault_enable();
if (err)
return;
analyse_instr(&op, regs, *instr);
*type = GETTYPE(op.type);
*ea = op.ea;
if (!(regs->msr & MSR_64BIT))
*ea &= 0xffffffffUL;
*size = GETSIZE(op.type);
if (*type == CACHEOP) {
*size = l1_dcache_bytes();
*ea &= ~(*size - 1);
} else if (*type == LOAD_VMX || *type == STORE_VMX) {
*ea &= ~(*size - 1);
}
}
| linux-master | arch/powerpc/kernel/hw_breakpoint_constraints.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright IBM Corporation 2001, 2005, 2006
* Copyright Dave Engebretsen & Todd Inglett 2001
* Copyright Linas Vepstas 2005, 2006
* Copyright 2001-2012 IBM Corporation.
*
* Please address comments and feedback to Linas Vepstas <[email protected]>
*/
#include <linux/delay.h>
#include <linux/sched.h>
#include <linux/init.h>
#include <linux/list.h>
#include <linux/pci.h>
#include <linux/iommu.h>
#include <linux/proc_fs.h>
#include <linux/rbtree.h>
#include <linux/reboot.h>
#include <linux/seq_file.h>
#include <linux/spinlock.h>
#include <linux/export.h>
#include <linux/of.h>
#include <linux/debugfs.h>
#include <linux/atomic.h>
#include <asm/eeh.h>
#include <asm/eeh_event.h>
#include <asm/io.h>
#include <asm/iommu.h>
#include <asm/machdep.h>
#include <asm/ppc-pci.h>
#include <asm/rtas.h>
#include <asm/pte-walk.h>
/** Overview:
* EEH, or "Enhanced Error Handling" is a PCI bridge technology for
* dealing with PCI bus errors that can't be dealt with within the
* usual PCI framework, except by check-stopping the CPU. Systems
* that are designed for high-availability/reliability cannot afford
* to crash due to a "mere" PCI error, thus the need for EEH.
* An EEH-capable bridge operates by converting a detected error
* into a "slot freeze", taking the PCI adapter off-line, making
* the slot behave, from the OS'es point of view, as if the slot
* were "empty": all reads return 0xff's and all writes are silently
* ignored. EEH slot isolation events can be triggered by parity
* errors on the address or data busses (e.g. during posted writes),
* which in turn might be caused by low voltage on the bus, dust,
* vibration, humidity, radioactivity or plain-old failed hardware.
*
* Note, however, that one of the leading causes of EEH slot
* freeze events are buggy device drivers, buggy device microcode,
* or buggy device hardware. This is because any attempt by the
* device to bus-master data to a memory address that is not
* assigned to the device will trigger a slot freeze. (The idea
* is to prevent devices-gone-wild from corrupting system memory).
* Buggy hardware/drivers will have a miserable time co-existing
* with EEH.
*
* Ideally, a PCI device driver, when suspecting that an isolation
* event has occurred (e.g. by reading 0xff's), will then ask EEH
* whether this is the case, and then take appropriate steps to
* reset the PCI slot, the PCI device, and then resume operations.
* However, until that day, the checking is done here, with the
* eeh_check_failure() routine embedded in the MMIO macros. If
* the slot is found to be isolated, an "EEH Event" is synthesized
* and sent out for processing.
*/
/* If a device driver keeps reading an MMIO register in an interrupt
* handler after a slot isolation event, it might be broken.
* This sets the threshold for how many read attempts we allow
* before printing an error message.
*/
#define EEH_MAX_FAILS 2100000
/* Time to wait for a PCI slot to report status, in milliseconds */
#define PCI_BUS_RESET_WAIT_MSEC (5*60*1000)
/*
* EEH probe mode support, which is part of the flags,
* is to support multiple platforms for EEH. Some platforms
* like pSeries do PCI emunation based on device tree.
* However, other platforms like powernv probe PCI devices
* from hardware. The flag is used to distinguish that.
* In addition, struct eeh_ops::probe would be invoked for
* particular OF node or PCI device so that the corresponding
* PE would be created there.
*/
int eeh_subsystem_flags;
EXPORT_SYMBOL(eeh_subsystem_flags);
/*
* EEH allowed maximal frozen times. If one particular PE's
* frozen count in last hour exceeds this limit, the PE will
* be forced to be offline permanently.
*/
u32 eeh_max_freezes = 5;
/*
* Controls whether a recovery event should be scheduled when an
* isolated device is discovered. This is only really useful for
* debugging problems with the EEH core.
*/
bool eeh_debugfs_no_recover;
/* Platform dependent EEH operations */
struct eeh_ops *eeh_ops = NULL;
/* Lock to avoid races due to multiple reports of an error */
DEFINE_RAW_SPINLOCK(confirm_error_lock);
EXPORT_SYMBOL_GPL(confirm_error_lock);
/* Lock to protect passed flags */
static DEFINE_MUTEX(eeh_dev_mutex);
/* Buffer for reporting pci register dumps. Its here in BSS, and
* not dynamically alloced, so that it ends up in RMO where RTAS
* can access it.
*/
#define EEH_PCI_REGS_LOG_LEN 8192
static unsigned char pci_regs_buf[EEH_PCI_REGS_LOG_LEN];
/*
* The struct is used to maintain the EEH global statistic
* information. Besides, the EEH global statistics will be
* exported to user space through procfs
*/
struct eeh_stats {
u64 no_device; /* PCI device not found */
u64 no_dn; /* OF node not found */
u64 no_cfg_addr; /* Config address not found */
u64 ignored_check; /* EEH check skipped */
u64 total_mmio_ffs; /* Total EEH checks */
u64 false_positives; /* Unnecessary EEH checks */
u64 slot_resets; /* PE reset */
};
static struct eeh_stats eeh_stats;
static int __init eeh_setup(char *str)
{
if (!strcmp(str, "off"))
eeh_add_flag(EEH_FORCE_DISABLED);
else if (!strcmp(str, "early_log"))
eeh_add_flag(EEH_EARLY_DUMP_LOG);
return 1;
}
__setup("eeh=", eeh_setup);
void eeh_show_enabled(void)
{
if (eeh_has_flag(EEH_FORCE_DISABLED))
pr_info("EEH: Recovery disabled by kernel parameter.\n");
else if (eeh_has_flag(EEH_ENABLED))
pr_info("EEH: Capable adapter found: recovery enabled.\n");
else
pr_info("EEH: No capable adapters found: recovery disabled.\n");
}
/*
* This routine captures assorted PCI configuration space data
* for the indicated PCI device, and puts them into a buffer
* for RTAS error logging.
*/
static size_t eeh_dump_dev_log(struct eeh_dev *edev, char *buf, size_t len)
{
u32 cfg;
int cap, i;
int n = 0, l = 0;
char buffer[128];
n += scnprintf(buf+n, len-n, "%04x:%02x:%02x.%01x\n",
edev->pe->phb->global_number, edev->bdfn >> 8,
PCI_SLOT(edev->bdfn), PCI_FUNC(edev->bdfn));
pr_warn("EEH: of node=%04x:%02x:%02x.%01x\n",
edev->pe->phb->global_number, edev->bdfn >> 8,
PCI_SLOT(edev->bdfn), PCI_FUNC(edev->bdfn));
eeh_ops->read_config(edev, PCI_VENDOR_ID, 4, &cfg);
n += scnprintf(buf+n, len-n, "dev/vend:%08x\n", cfg);
pr_warn("EEH: PCI device/vendor: %08x\n", cfg);
eeh_ops->read_config(edev, PCI_COMMAND, 4, &cfg);
n += scnprintf(buf+n, len-n, "cmd/stat:%x\n", cfg);
pr_warn("EEH: PCI cmd/status register: %08x\n", cfg);
/* Gather bridge-specific registers */
if (edev->mode & EEH_DEV_BRIDGE) {
eeh_ops->read_config(edev, PCI_SEC_STATUS, 2, &cfg);
n += scnprintf(buf+n, len-n, "sec stat:%x\n", cfg);
pr_warn("EEH: Bridge secondary status: %04x\n", cfg);
eeh_ops->read_config(edev, PCI_BRIDGE_CONTROL, 2, &cfg);
n += scnprintf(buf+n, len-n, "brdg ctl:%x\n", cfg);
pr_warn("EEH: Bridge control: %04x\n", cfg);
}
/* Dump out the PCI-X command and status regs */
cap = edev->pcix_cap;
if (cap) {
eeh_ops->read_config(edev, cap, 4, &cfg);
n += scnprintf(buf+n, len-n, "pcix-cmd:%x\n", cfg);
pr_warn("EEH: PCI-X cmd: %08x\n", cfg);
eeh_ops->read_config(edev, cap+4, 4, &cfg);
n += scnprintf(buf+n, len-n, "pcix-stat:%x\n", cfg);
pr_warn("EEH: PCI-X status: %08x\n", cfg);
}
/* If PCI-E capable, dump PCI-E cap 10 */
cap = edev->pcie_cap;
if (cap) {
n += scnprintf(buf+n, len-n, "pci-e cap10:\n");
pr_warn("EEH: PCI-E capabilities and status follow:\n");
for (i=0; i<=8; i++) {
eeh_ops->read_config(edev, cap+4*i, 4, &cfg);
n += scnprintf(buf+n, len-n, "%02x:%x\n", 4*i, cfg);
if ((i % 4) == 0) {
if (i != 0)
pr_warn("%s\n", buffer);
l = scnprintf(buffer, sizeof(buffer),
"EEH: PCI-E %02x: %08x ",
4*i, cfg);
} else {
l += scnprintf(buffer+l, sizeof(buffer)-l,
"%08x ", cfg);
}
}
pr_warn("%s\n", buffer);
}
/* If AER capable, dump it */
cap = edev->aer_cap;
if (cap) {
n += scnprintf(buf+n, len-n, "pci-e AER:\n");
pr_warn("EEH: PCI-E AER capability register set follows:\n");
for (i=0; i<=13; i++) {
eeh_ops->read_config(edev, cap+4*i, 4, &cfg);
n += scnprintf(buf+n, len-n, "%02x:%x\n", 4*i, cfg);
if ((i % 4) == 0) {
if (i != 0)
pr_warn("%s\n", buffer);
l = scnprintf(buffer, sizeof(buffer),
"EEH: PCI-E AER %02x: %08x ",
4*i, cfg);
} else {
l += scnprintf(buffer+l, sizeof(buffer)-l,
"%08x ", cfg);
}
}
pr_warn("%s\n", buffer);
}
return n;
}
static void *eeh_dump_pe_log(struct eeh_pe *pe, void *flag)
{
struct eeh_dev *edev, *tmp;
size_t *plen = flag;
eeh_pe_for_each_dev(pe, edev, tmp)
*plen += eeh_dump_dev_log(edev, pci_regs_buf + *plen,
EEH_PCI_REGS_LOG_LEN - *plen);
return NULL;
}
/**
* eeh_slot_error_detail - Generate combined log including driver log and error log
* @pe: EEH PE
* @severity: temporary or permanent error log
*
* This routine should be called to generate the combined log, which
* is comprised of driver log and error log. The driver log is figured
* out from the config space of the corresponding PCI device, while
* the error log is fetched through platform dependent function call.
*/
void eeh_slot_error_detail(struct eeh_pe *pe, int severity)
{
size_t loglen = 0;
/*
* When the PHB is fenced or dead, it's pointless to collect
* the data from PCI config space because it should return
* 0xFF's. For ER, we still retrieve the data from the PCI
* config space.
*
* For pHyp, we have to enable IO for log retrieval. Otherwise,
* 0xFF's is always returned from PCI config space.
*
* When the @severity is EEH_LOG_PERM, the PE is going to be
* removed. Prior to that, the drivers for devices included in
* the PE will be closed. The drivers rely on working IO path
* to bring the devices to quiet state. Otherwise, PCI traffic
* from those devices after they are removed is like to cause
* another unexpected EEH error.
*/
if (!(pe->type & EEH_PE_PHB)) {
if (eeh_has_flag(EEH_ENABLE_IO_FOR_LOG) ||
severity == EEH_LOG_PERM)
eeh_pci_enable(pe, EEH_OPT_THAW_MMIO);
/*
* The config space of some PCI devices can't be accessed
* when their PEs are in frozen state. Otherwise, fenced
* PHB might be seen. Those PEs are identified with flag
* EEH_PE_CFG_RESTRICTED, indicating EEH_PE_CFG_BLOCKED
* is set automatically when the PE is put to EEH_PE_ISOLATED.
*
* Restoring BARs possibly triggers PCI config access in
* (OPAL) firmware and then causes fenced PHB. If the
* PCI config is blocked with flag EEH_PE_CFG_BLOCKED, it's
* pointless to restore BARs and dump config space.
*/
eeh_ops->configure_bridge(pe);
if (!(pe->state & EEH_PE_CFG_BLOCKED)) {
eeh_pe_restore_bars(pe);
pci_regs_buf[0] = 0;
eeh_pe_traverse(pe, eeh_dump_pe_log, &loglen);
}
}
eeh_ops->get_log(pe, severity, pci_regs_buf, loglen);
}
/**
* eeh_token_to_phys - Convert EEH address token to phys address
* @token: I/O token, should be address in the form 0xA....
*
* This routine should be called to convert virtual I/O address
* to physical one.
*/
static inline unsigned long eeh_token_to_phys(unsigned long token)
{
return ppc_find_vmap_phys(token);
}
/*
* On PowerNV platform, we might already have fenced PHB there.
* For that case, it's meaningless to recover frozen PE. Intead,
* We have to handle fenced PHB firstly.
*/
static int eeh_phb_check_failure(struct eeh_pe *pe)
{
struct eeh_pe *phb_pe;
unsigned long flags;
int ret;
if (!eeh_has_flag(EEH_PROBE_MODE_DEV))
return -EPERM;
/* Find the PHB PE */
phb_pe = eeh_phb_pe_get(pe->phb);
if (!phb_pe) {
pr_warn("%s Can't find PE for PHB#%x\n",
__func__, pe->phb->global_number);
return -EEXIST;
}
/* If the PHB has been in problematic state */
eeh_serialize_lock(&flags);
if (phb_pe->state & EEH_PE_ISOLATED) {
ret = 0;
goto out;
}
/* Check PHB state */
ret = eeh_ops->get_state(phb_pe, NULL);
if ((ret < 0) ||
(ret == EEH_STATE_NOT_SUPPORT) || eeh_state_active(ret)) {
ret = 0;
goto out;
}
/* Isolate the PHB and send event */
eeh_pe_mark_isolated(phb_pe);
eeh_serialize_unlock(flags);
pr_debug("EEH: PHB#%x failure detected, location: %s\n",
phb_pe->phb->global_number, eeh_pe_loc_get(phb_pe));
eeh_send_failure_event(phb_pe);
return 1;
out:
eeh_serialize_unlock(flags);
return ret;
}
static inline const char *eeh_driver_name(struct pci_dev *pdev)
{
if (pdev)
return dev_driver_string(&pdev->dev);
return "<null>";
}
/**
* eeh_dev_check_failure - Check if all 1's data is due to EEH slot freeze
* @edev: eeh device
*
* Check for an EEH failure for the given device node. Call this
* routine if the result of a read was all 0xff's and you want to
* find out if this is due to an EEH slot freeze. This routine
* will query firmware for the EEH status.
*
* Returns 0 if there has not been an EEH error; otherwise returns
* a non-zero value and queues up a slot isolation event notification.
*
* It is safe to call this routine in an interrupt context.
*/
int eeh_dev_check_failure(struct eeh_dev *edev)
{
int ret;
unsigned long flags;
struct device_node *dn;
struct pci_dev *dev;
struct eeh_pe *pe, *parent_pe;
int rc = 0;
const char *location = NULL;
eeh_stats.total_mmio_ffs++;
if (!eeh_enabled())
return 0;
if (!edev) {
eeh_stats.no_dn++;
return 0;
}
dev = eeh_dev_to_pci_dev(edev);
pe = eeh_dev_to_pe(edev);
/* Access to IO BARs might get this far and still not want checking. */
if (!pe) {
eeh_stats.ignored_check++;
eeh_edev_dbg(edev, "Ignored check\n");
return 0;
}
/*
* On PowerNV platform, we might already have fenced PHB
* there and we need take care of that firstly.
*/
ret = eeh_phb_check_failure(pe);
if (ret > 0)
return ret;
/*
* If the PE isn't owned by us, we shouldn't check the
* state. Instead, let the owner handle it if the PE has
* been frozen.
*/
if (eeh_pe_passed(pe))
return 0;
/* If we already have a pending isolation event for this
* slot, we know it's bad already, we don't need to check.
* Do this checking under a lock; as multiple PCI devices
* in one slot might report errors simultaneously, and we
* only want one error recovery routine running.
*/
eeh_serialize_lock(&flags);
rc = 1;
if (pe->state & EEH_PE_ISOLATED) {
pe->check_count++;
if (pe->check_count == EEH_MAX_FAILS) {
dn = pci_device_to_OF_node(dev);
if (dn)
location = of_get_property(dn, "ibm,loc-code",
NULL);
eeh_edev_err(edev, "%d reads ignored for recovering device at location=%s driver=%s\n",
pe->check_count,
location ? location : "unknown",
eeh_driver_name(dev));
eeh_edev_err(edev, "Might be infinite loop in %s driver\n",
eeh_driver_name(dev));
dump_stack();
}
goto dn_unlock;
}
/*
* Now test for an EEH failure. This is VERY expensive.
* Note that the eeh_config_addr may be a parent device
* in the case of a device behind a bridge, or it may be
* function zero of a multi-function device.
* In any case they must share a common PHB.
*/
ret = eeh_ops->get_state(pe, NULL);
/* Note that config-io to empty slots may fail;
* they are empty when they don't have children.
* We will punt with the following conditions: Failure to get
* PE's state, EEH not support and Permanently unavailable
* state, PE is in good state.
*/
if ((ret < 0) ||
(ret == EEH_STATE_NOT_SUPPORT) || eeh_state_active(ret)) {
eeh_stats.false_positives++;
pe->false_positives++;
rc = 0;
goto dn_unlock;
}
/*
* It should be corner case that the parent PE has been
* put into frozen state as well. We should take care
* that at first.
*/
parent_pe = pe->parent;
while (parent_pe) {
/* Hit the ceiling ? */
if (parent_pe->type & EEH_PE_PHB)
break;
/* Frozen parent PE ? */
ret = eeh_ops->get_state(parent_pe, NULL);
if (ret > 0 && !eeh_state_active(ret)) {
pe = parent_pe;
pr_err("EEH: Failure of PHB#%x-PE#%x will be handled at parent PHB#%x-PE#%x.\n",
pe->phb->global_number, pe->addr,
pe->phb->global_number, parent_pe->addr);
}
/* Next parent level */
parent_pe = parent_pe->parent;
}
eeh_stats.slot_resets++;
/* Avoid repeated reports of this failure, including problems
* with other functions on this device, and functions under
* bridges.
*/
eeh_pe_mark_isolated(pe);
eeh_serialize_unlock(flags);
/* Most EEH events are due to device driver bugs. Having
* a stack trace will help the device-driver authors figure
* out what happened. So print that out.
*/
pr_debug("EEH: %s: Frozen PHB#%x-PE#%x detected\n",
__func__, pe->phb->global_number, pe->addr);
eeh_send_failure_event(pe);
return 1;
dn_unlock:
eeh_serialize_unlock(flags);
return rc;
}
EXPORT_SYMBOL_GPL(eeh_dev_check_failure);
/**
* eeh_check_failure - Check if all 1's data is due to EEH slot freeze
* @token: I/O address
*
* Check for an EEH failure at the given I/O address. Call this
* routine if the result of a read was all 0xff's and you want to
* find out if this is due to an EEH slot freeze event. This routine
* will query firmware for the EEH status.
*
* Note this routine is safe to call in an interrupt context.
*/
int eeh_check_failure(const volatile void __iomem *token)
{
unsigned long addr;
struct eeh_dev *edev;
/* Finding the phys addr + pci device; this is pretty quick. */
addr = eeh_token_to_phys((unsigned long __force) token);
edev = eeh_addr_cache_get_dev(addr);
if (!edev) {
eeh_stats.no_device++;
return 0;
}
return eeh_dev_check_failure(edev);
}
EXPORT_SYMBOL(eeh_check_failure);
/**
* eeh_pci_enable - Enable MMIO or DMA transfers for this slot
* @pe: EEH PE
* @function: EEH option
*
* This routine should be called to reenable frozen MMIO or DMA
* so that it would work correctly again. It's useful while doing
* recovery or log collection on the indicated device.
*/
int eeh_pci_enable(struct eeh_pe *pe, int function)
{
int active_flag, rc;
/*
* pHyp doesn't allow to enable IO or DMA on unfrozen PE.
* Also, it's pointless to enable them on unfrozen PE. So
* we have to check before enabling IO or DMA.
*/
switch (function) {
case EEH_OPT_THAW_MMIO:
active_flag = EEH_STATE_MMIO_ACTIVE | EEH_STATE_MMIO_ENABLED;
break;
case EEH_OPT_THAW_DMA:
active_flag = EEH_STATE_DMA_ACTIVE;
break;
case EEH_OPT_DISABLE:
case EEH_OPT_ENABLE:
case EEH_OPT_FREEZE_PE:
active_flag = 0;
break;
default:
pr_warn("%s: Invalid function %d\n",
__func__, function);
return -EINVAL;
}
/*
* Check if IO or DMA has been enabled before
* enabling them.
*/
if (active_flag) {
rc = eeh_ops->get_state(pe, NULL);
if (rc < 0)
return rc;
/* Needn't enable it at all */
if (rc == EEH_STATE_NOT_SUPPORT)
return 0;
/* It's already enabled */
if (rc & active_flag)
return 0;
}
/* Issue the request */
rc = eeh_ops->set_option(pe, function);
if (rc)
pr_warn("%s: Unexpected state change %d on "
"PHB#%x-PE#%x, err=%d\n",
__func__, function, pe->phb->global_number,
pe->addr, rc);
/* Check if the request is finished successfully */
if (active_flag) {
rc = eeh_wait_state(pe, PCI_BUS_RESET_WAIT_MSEC);
if (rc < 0)
return rc;
if (rc & active_flag)
return 0;
return -EIO;
}
return rc;
}
static void eeh_disable_and_save_dev_state(struct eeh_dev *edev,
void *userdata)
{
struct pci_dev *pdev = eeh_dev_to_pci_dev(edev);
struct pci_dev *dev = userdata;
/*
* The caller should have disabled and saved the
* state for the specified device
*/
if (!pdev || pdev == dev)
return;
/* Ensure we have D0 power state */
pci_set_power_state(pdev, PCI_D0);
/* Save device state */
pci_save_state(pdev);
/*
* Disable device to avoid any DMA traffic and
* interrupt from the device
*/
pci_write_config_word(pdev, PCI_COMMAND, PCI_COMMAND_INTX_DISABLE);
}
static void eeh_restore_dev_state(struct eeh_dev *edev, void *userdata)
{
struct pci_dev *pdev = eeh_dev_to_pci_dev(edev);
struct pci_dev *dev = userdata;
if (!pdev)
return;
/* Apply customization from firmware */
if (eeh_ops->restore_config)
eeh_ops->restore_config(edev);
/* The caller should restore state for the specified device */
if (pdev != dev)
pci_restore_state(pdev);
}
/**
* pcibios_set_pcie_reset_state - Set PCI-E reset state
* @dev: pci device struct
* @state: reset state to enter
*
* Return value:
* 0 if success
*/
int pcibios_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state)
{
struct eeh_dev *edev = pci_dev_to_eeh_dev(dev);
struct eeh_pe *pe = eeh_dev_to_pe(edev);
if (!pe) {
pr_err("%s: No PE found on PCI device %s\n",
__func__, pci_name(dev));
return -EINVAL;
}
switch (state) {
case pcie_deassert_reset:
eeh_ops->reset(pe, EEH_RESET_DEACTIVATE);
eeh_unfreeze_pe(pe);
if (!(pe->type & EEH_PE_VF))
eeh_pe_state_clear(pe, EEH_PE_CFG_BLOCKED, true);
eeh_pe_dev_traverse(pe, eeh_restore_dev_state, dev);
eeh_pe_state_clear(pe, EEH_PE_ISOLATED, true);
break;
case pcie_hot_reset:
eeh_pe_mark_isolated(pe);
eeh_pe_state_clear(pe, EEH_PE_CFG_BLOCKED, true);
eeh_ops->set_option(pe, EEH_OPT_FREEZE_PE);
eeh_pe_dev_traverse(pe, eeh_disable_and_save_dev_state, dev);
if (!(pe->type & EEH_PE_VF))
eeh_pe_state_mark(pe, EEH_PE_CFG_BLOCKED);
eeh_ops->reset(pe, EEH_RESET_HOT);
break;
case pcie_warm_reset:
eeh_pe_mark_isolated(pe);
eeh_pe_state_clear(pe, EEH_PE_CFG_BLOCKED, true);
eeh_ops->set_option(pe, EEH_OPT_FREEZE_PE);
eeh_pe_dev_traverse(pe, eeh_disable_and_save_dev_state, dev);
if (!(pe->type & EEH_PE_VF))
eeh_pe_state_mark(pe, EEH_PE_CFG_BLOCKED);
eeh_ops->reset(pe, EEH_RESET_FUNDAMENTAL);
break;
default:
eeh_pe_state_clear(pe, EEH_PE_ISOLATED | EEH_PE_CFG_BLOCKED, true);
return -EINVAL;
}
return 0;
}
/**
* eeh_set_dev_freset - Check the required reset for the indicated device
* @edev: EEH device
* @flag: return value
*
* Each device might have its preferred reset type: fundamental or
* hot reset. The routine is used to collected the information for
* the indicated device and its children so that the bunch of the
* devices could be reset properly.
*/
static void eeh_set_dev_freset(struct eeh_dev *edev, void *flag)
{
struct pci_dev *dev;
unsigned int *freset = (unsigned int *)flag;
dev = eeh_dev_to_pci_dev(edev);
if (dev)
*freset |= dev->needs_freset;
}
static void eeh_pe_refreeze_passed(struct eeh_pe *root)
{
struct eeh_pe *pe;
int state;
eeh_for_each_pe(root, pe) {
if (eeh_pe_passed(pe)) {
state = eeh_ops->get_state(pe, NULL);
if (state &
(EEH_STATE_MMIO_ACTIVE | EEH_STATE_MMIO_ENABLED)) {
pr_info("EEH: Passed-through PE PHB#%x-PE#%x was thawed by reset, re-freezing for safety.\n",
pe->phb->global_number, pe->addr);
eeh_pe_set_option(pe, EEH_OPT_FREEZE_PE);
}
}
}
}
/**
* eeh_pe_reset_full - Complete a full reset process on the indicated PE
* @pe: EEH PE
* @include_passed: include passed-through devices?
*
* This function executes a full reset procedure on a PE, including setting
* the appropriate flags, performing a fundamental or hot reset, and then
* deactivating the reset status. It is designed to be used within the EEH
* subsystem, as opposed to eeh_pe_reset which is exported to drivers and
* only performs a single operation at a time.
*
* This function will attempt to reset a PE three times before failing.
*/
int eeh_pe_reset_full(struct eeh_pe *pe, bool include_passed)
{
int reset_state = (EEH_PE_RESET | EEH_PE_CFG_BLOCKED);
int type = EEH_RESET_HOT;
unsigned int freset = 0;
int i, state = 0, ret;
/*
* Determine the type of reset to perform - hot or fundamental.
* Hot reset is the default operation, unless any device under the
* PE requires a fundamental reset.
*/
eeh_pe_dev_traverse(pe, eeh_set_dev_freset, &freset);
if (freset)
type = EEH_RESET_FUNDAMENTAL;
/* Mark the PE as in reset state and block config space accesses */
eeh_pe_state_mark(pe, reset_state);
/* Make three attempts at resetting the bus */
for (i = 0; i < 3; i++) {
ret = eeh_pe_reset(pe, type, include_passed);
if (!ret)
ret = eeh_pe_reset(pe, EEH_RESET_DEACTIVATE,
include_passed);
if (ret) {
ret = -EIO;
pr_warn("EEH: Failure %d resetting PHB#%x-PE#%x (attempt %d)\n\n",
state, pe->phb->global_number, pe->addr, i + 1);
continue;
}
if (i)
pr_warn("EEH: PHB#%x-PE#%x: Successful reset (attempt %d)\n",
pe->phb->global_number, pe->addr, i + 1);
/* Wait until the PE is in a functioning state */
state = eeh_wait_state(pe, PCI_BUS_RESET_WAIT_MSEC);
if (state < 0) {
pr_warn("EEH: Unrecoverable slot failure on PHB#%x-PE#%x",
pe->phb->global_number, pe->addr);
ret = -ENOTRECOVERABLE;
break;
}
if (eeh_state_active(state))
break;
else
pr_warn("EEH: PHB#%x-PE#%x: Slot inactive after reset: 0x%x (attempt %d)\n",
pe->phb->global_number, pe->addr, state, i + 1);
}
/* Resetting the PE may have unfrozen child PEs. If those PEs have been
* (potentially) passed through to a guest, re-freeze them:
*/
if (!include_passed)
eeh_pe_refreeze_passed(pe);
eeh_pe_state_clear(pe, reset_state, true);
return ret;
}
/**
* eeh_save_bars - Save device bars
* @edev: PCI device associated EEH device
*
* Save the values of the device bars. Unlike the restore
* routine, this routine is *not* recursive. This is because
* PCI devices are added individually; but, for the restore,
* an entire slot is reset at a time.
*/
void eeh_save_bars(struct eeh_dev *edev)
{
int i;
if (!edev)
return;
for (i = 0; i < 16; i++)
eeh_ops->read_config(edev, i * 4, 4, &edev->config_space[i]);
/*
* For PCI bridges including root port, we need enable bus
* master explicitly. Otherwise, it can't fetch IODA table
* entries correctly. So we cache the bit in advance so that
* we can restore it after reset, either PHB range or PE range.
*/
if (edev->mode & EEH_DEV_BRIDGE)
edev->config_space[1] |= PCI_COMMAND_MASTER;
}
static int eeh_reboot_notifier(struct notifier_block *nb,
unsigned long action, void *unused)
{
eeh_clear_flag(EEH_ENABLED);
return NOTIFY_DONE;
}
static struct notifier_block eeh_reboot_nb = {
.notifier_call = eeh_reboot_notifier,
};
static int eeh_device_notifier(struct notifier_block *nb,
unsigned long action, void *data)
{
struct device *dev = data;
switch (action) {
/*
* Note: It's not possible to perform EEH device addition (i.e.
* {pseries,pnv}_pcibios_bus_add_device()) here because it depends on
* the device's resources, which have not yet been set up.
*/
case BUS_NOTIFY_DEL_DEVICE:
eeh_remove_device(to_pci_dev(dev));
break;
default:
break;
}
return NOTIFY_DONE;
}
static struct notifier_block eeh_device_nb = {
.notifier_call = eeh_device_notifier,
};
/**
* eeh_init - System wide EEH initialization
* @ops: struct to trace EEH operation callback functions
*
* It's the platform's job to call this from an arch_initcall().
*/
int eeh_init(struct eeh_ops *ops)
{
struct pci_controller *hose, *tmp;
int ret = 0;
/* the platform should only initialise EEH once */
if (WARN_ON(eeh_ops))
return -EEXIST;
if (WARN_ON(!ops))
return -ENOENT;
eeh_ops = ops;
/* Register reboot notifier */
ret = register_reboot_notifier(&eeh_reboot_nb);
if (ret) {
pr_warn("%s: Failed to register reboot notifier (%d)\n",
__func__, ret);
return ret;
}
ret = bus_register_notifier(&pci_bus_type, &eeh_device_nb);
if (ret) {
pr_warn("%s: Failed to register bus notifier (%d)\n",
__func__, ret);
return ret;
}
/* Initialize PHB PEs */
list_for_each_entry_safe(hose, tmp, &hose_list, list_node)
eeh_phb_pe_create(hose);
eeh_addr_cache_init();
/* Initialize EEH event */
return eeh_event_init();
}
/**
* eeh_probe_device() - Perform EEH initialization for the indicated pci device
* @dev: pci device for which to set up EEH
*
* This routine must be used to complete EEH initialization for PCI
* devices that were added after system boot (e.g. hotplug, dlpar).
*/
void eeh_probe_device(struct pci_dev *dev)
{
struct eeh_dev *edev;
pr_debug("EEH: Adding device %s\n", pci_name(dev));
/*
* pci_dev_to_eeh_dev() can only work if eeh_probe_dev() was
* already called for this device.
*/
if (WARN_ON_ONCE(pci_dev_to_eeh_dev(dev))) {
pci_dbg(dev, "Already bound to an eeh_dev!\n");
return;
}
edev = eeh_ops->probe(dev);
if (!edev) {
pr_debug("EEH: Adding device failed\n");
return;
}
/*
* FIXME: We rely on pcibios_release_device() to remove the
* existing EEH state. The release function is only called if
* the pci_dev's refcount drops to zero so if something is
* keeping a ref to a device (e.g. a filesystem) we need to
* remove the old EEH state.
*
* FIXME: HEY MA, LOOK AT ME, NO LOCKING!
*/
if (edev->pdev && edev->pdev != dev) {
eeh_pe_tree_remove(edev);
eeh_addr_cache_rmv_dev(edev->pdev);
eeh_sysfs_remove_device(edev->pdev);
/*
* We definitely should have the PCI device removed
* though it wasn't correctly. So we needn't call
* into error handler afterwards.
*/
edev->mode |= EEH_DEV_NO_HANDLER;
}
/* bind the pdev and the edev together */
edev->pdev = dev;
dev->dev.archdata.edev = edev;
eeh_addr_cache_insert_dev(dev);
eeh_sysfs_add_device(dev);
}
/**
* eeh_remove_device - Undo EEH setup for the indicated pci device
* @dev: pci device to be removed
*
* This routine should be called when a device is removed from
* a running system (e.g. by hotplug or dlpar). It unregisters
* the PCI device from the EEH subsystem. I/O errors affecting
* this device will no longer be detected after this call; thus,
* i/o errors affecting this slot may leave this device unusable.
*/
void eeh_remove_device(struct pci_dev *dev)
{
struct eeh_dev *edev;
if (!dev || !eeh_enabled())
return;
edev = pci_dev_to_eeh_dev(dev);
/* Unregister the device with the EEH/PCI address search system */
dev_dbg(&dev->dev, "EEH: Removing device\n");
if (!edev || !edev->pdev || !edev->pe) {
dev_dbg(&dev->dev, "EEH: Device not referenced!\n");
return;
}
/*
* During the hotplug for EEH error recovery, we need the EEH
* device attached to the parent PE in order for BAR restore
* a bit later. So we keep it for BAR restore and remove it
* from the parent PE during the BAR resotre.
*/
edev->pdev = NULL;
/*
* eeh_sysfs_remove_device() uses pci_dev_to_eeh_dev() so we need to
* remove the sysfs files before clearing dev.archdata.edev
*/
if (edev->mode & EEH_DEV_SYSFS)
eeh_sysfs_remove_device(dev);
/*
* We're removing from the PCI subsystem, that means
* the PCI device driver can't support EEH or not
* well. So we rely on hotplug completely to do recovery
* for the specific PCI device.
*/
edev->mode |= EEH_DEV_NO_HANDLER;
eeh_addr_cache_rmv_dev(dev);
/*
* The flag "in_error" is used to trace EEH devices for VFs
* in error state or not. It's set in eeh_report_error(). If
* it's not set, eeh_report_{reset,resume}() won't be called
* for the VF EEH device.
*/
edev->in_error = false;
dev->dev.archdata.edev = NULL;
if (!(edev->pe->state & EEH_PE_KEEP))
eeh_pe_tree_remove(edev);
else
edev->mode |= EEH_DEV_DISCONNECTED;
}
int eeh_unfreeze_pe(struct eeh_pe *pe)
{
int ret;
ret = eeh_pci_enable(pe, EEH_OPT_THAW_MMIO);
if (ret) {
pr_warn("%s: Failure %d enabling IO on PHB#%x-PE#%x\n",
__func__, ret, pe->phb->global_number, pe->addr);
return ret;
}
ret = eeh_pci_enable(pe, EEH_OPT_THAW_DMA);
if (ret) {
pr_warn("%s: Failure %d enabling DMA on PHB#%x-PE#%x\n",
__func__, ret, pe->phb->global_number, pe->addr);
return ret;
}
return ret;
}
static struct pci_device_id eeh_reset_ids[] = {
{ PCI_DEVICE(0x19a2, 0x0710) }, /* Emulex, BE */
{ PCI_DEVICE(0x10df, 0xe220) }, /* Emulex, Lancer */
{ PCI_DEVICE(0x14e4, 0x1657) }, /* Broadcom BCM5719 */
{ 0 }
};
static int eeh_pe_change_owner(struct eeh_pe *pe)
{
struct eeh_dev *edev, *tmp;
struct pci_dev *pdev;
struct pci_device_id *id;
int ret;
/* Check PE state */
ret = eeh_ops->get_state(pe, NULL);
if (ret < 0 || ret == EEH_STATE_NOT_SUPPORT)
return 0;
/* Unfrozen PE, nothing to do */
if (eeh_state_active(ret))
return 0;
/* Frozen PE, check if it needs PE level reset */
eeh_pe_for_each_dev(pe, edev, tmp) {
pdev = eeh_dev_to_pci_dev(edev);
if (!pdev)
continue;
for (id = &eeh_reset_ids[0]; id->vendor != 0; id++) {
if (id->vendor != PCI_ANY_ID &&
id->vendor != pdev->vendor)
continue;
if (id->device != PCI_ANY_ID &&
id->device != pdev->device)
continue;
if (id->subvendor != PCI_ANY_ID &&
id->subvendor != pdev->subsystem_vendor)
continue;
if (id->subdevice != PCI_ANY_ID &&
id->subdevice != pdev->subsystem_device)
continue;
return eeh_pe_reset_and_recover(pe);
}
}
ret = eeh_unfreeze_pe(pe);
if (!ret)
eeh_pe_state_clear(pe, EEH_PE_ISOLATED, true);
return ret;
}
/**
* eeh_dev_open - Increase count of pass through devices for PE
* @pdev: PCI device
*
* Increase count of passed through devices for the indicated
* PE. In the result, the EEH errors detected on the PE won't be
* reported. The PE owner will be responsible for detection
* and recovery.
*/
int eeh_dev_open(struct pci_dev *pdev)
{
struct eeh_dev *edev;
int ret = -ENODEV;
mutex_lock(&eeh_dev_mutex);
/* No PCI device ? */
if (!pdev)
goto out;
/* No EEH device or PE ? */
edev = pci_dev_to_eeh_dev(pdev);
if (!edev || !edev->pe)
goto out;
/*
* The PE might have been put into frozen state, but we
* didn't detect that yet. The passed through PCI devices
* in frozen PE won't work properly. Clear the frozen state
* in advance.
*/
ret = eeh_pe_change_owner(edev->pe);
if (ret)
goto out;
/* Increase PE's pass through count */
atomic_inc(&edev->pe->pass_dev_cnt);
mutex_unlock(&eeh_dev_mutex);
return 0;
out:
mutex_unlock(&eeh_dev_mutex);
return ret;
}
EXPORT_SYMBOL_GPL(eeh_dev_open);
/**
* eeh_dev_release - Decrease count of pass through devices for PE
* @pdev: PCI device
*
* Decrease count of pass through devices for the indicated PE. If
* there is no passed through device in PE, the EEH errors detected
* on the PE will be reported and handled as usual.
*/
void eeh_dev_release(struct pci_dev *pdev)
{
struct eeh_dev *edev;
mutex_lock(&eeh_dev_mutex);
/* No PCI device ? */
if (!pdev)
goto out;
/* No EEH device ? */
edev = pci_dev_to_eeh_dev(pdev);
if (!edev || !edev->pe || !eeh_pe_passed(edev->pe))
goto out;
/* Decrease PE's pass through count */
WARN_ON(atomic_dec_if_positive(&edev->pe->pass_dev_cnt) < 0);
eeh_pe_change_owner(edev->pe);
out:
mutex_unlock(&eeh_dev_mutex);
}
EXPORT_SYMBOL(eeh_dev_release);
#ifdef CONFIG_IOMMU_API
static int dev_has_iommu_table(struct device *dev, void *data)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct pci_dev **ppdev = data;
if (!dev)
return 0;
if (device_iommu_mapped(dev)) {
*ppdev = pdev;
return 1;
}
return 0;
}
/**
* eeh_iommu_group_to_pe - Convert IOMMU group to EEH PE
* @group: IOMMU group
*
* The routine is called to convert IOMMU group to EEH PE.
*/
struct eeh_pe *eeh_iommu_group_to_pe(struct iommu_group *group)
{
struct pci_dev *pdev = NULL;
struct eeh_dev *edev;
int ret;
/* No IOMMU group ? */
if (!group)
return NULL;
ret = iommu_group_for_each_dev(group, &pdev, dev_has_iommu_table);
if (!ret || !pdev)
return NULL;
/* No EEH device or PE ? */
edev = pci_dev_to_eeh_dev(pdev);
if (!edev || !edev->pe)
return NULL;
return edev->pe;
}
EXPORT_SYMBOL_GPL(eeh_iommu_group_to_pe);
#endif /* CONFIG_IOMMU_API */
/**
* eeh_pe_set_option - Set options for the indicated PE
* @pe: EEH PE
* @option: requested option
*
* The routine is called to enable or disable EEH functionality
* on the indicated PE, to enable IO or DMA for the frozen PE.
*/
int eeh_pe_set_option(struct eeh_pe *pe, int option)
{
int ret = 0;
/* Invalid PE ? */
if (!pe)
return -ENODEV;
/*
* EEH functionality could possibly be disabled, just
* return error for the case. And the EEH functionality
* isn't expected to be disabled on one specific PE.
*/
switch (option) {
case EEH_OPT_ENABLE:
if (eeh_enabled()) {
ret = eeh_pe_change_owner(pe);
break;
}
ret = -EIO;
break;
case EEH_OPT_DISABLE:
break;
case EEH_OPT_THAW_MMIO:
case EEH_OPT_THAW_DMA:
case EEH_OPT_FREEZE_PE:
if (!eeh_ops || !eeh_ops->set_option) {
ret = -ENOENT;
break;
}
ret = eeh_pci_enable(pe, option);
break;
default:
pr_debug("%s: Option %d out of range (%d, %d)\n",
__func__, option, EEH_OPT_DISABLE, EEH_OPT_THAW_DMA);
ret = -EINVAL;
}
return ret;
}
EXPORT_SYMBOL_GPL(eeh_pe_set_option);
/**
* eeh_pe_get_state - Retrieve PE's state
* @pe: EEH PE
*
* Retrieve the PE's state, which includes 3 aspects: enabled
* DMA, enabled IO and asserted reset.
*/
int eeh_pe_get_state(struct eeh_pe *pe)
{
int result, ret = 0;
bool rst_active, dma_en, mmio_en;
/* Existing PE ? */
if (!pe)
return -ENODEV;
if (!eeh_ops || !eeh_ops->get_state)
return -ENOENT;
/*
* If the parent PE is owned by the host kernel and is undergoing
* error recovery, we should return the PE state as temporarily
* unavailable so that the error recovery on the guest is suspended
* until the recovery completes on the host.
*/
if (pe->parent &&
!(pe->state & EEH_PE_REMOVED) &&
(pe->parent->state & (EEH_PE_ISOLATED | EEH_PE_RECOVERING)))
return EEH_PE_STATE_UNAVAIL;
result = eeh_ops->get_state(pe, NULL);
rst_active = !!(result & EEH_STATE_RESET_ACTIVE);
dma_en = !!(result & EEH_STATE_DMA_ENABLED);
mmio_en = !!(result & EEH_STATE_MMIO_ENABLED);
if (rst_active)
ret = EEH_PE_STATE_RESET;
else if (dma_en && mmio_en)
ret = EEH_PE_STATE_NORMAL;
else if (!dma_en && !mmio_en)
ret = EEH_PE_STATE_STOPPED_IO_DMA;
else if (!dma_en && mmio_en)
ret = EEH_PE_STATE_STOPPED_DMA;
else
ret = EEH_PE_STATE_UNAVAIL;
return ret;
}
EXPORT_SYMBOL_GPL(eeh_pe_get_state);
static int eeh_pe_reenable_devices(struct eeh_pe *pe, bool include_passed)
{
struct eeh_dev *edev, *tmp;
struct pci_dev *pdev;
int ret = 0;
eeh_pe_restore_bars(pe);
/*
* Reenable PCI devices as the devices passed
* through are always enabled before the reset.
*/
eeh_pe_for_each_dev(pe, edev, tmp) {
pdev = eeh_dev_to_pci_dev(edev);
if (!pdev)
continue;
ret = pci_reenable_device(pdev);
if (ret) {
pr_warn("%s: Failure %d reenabling %s\n",
__func__, ret, pci_name(pdev));
return ret;
}
}
/* The PE is still in frozen state */
if (include_passed || !eeh_pe_passed(pe)) {
ret = eeh_unfreeze_pe(pe);
} else
pr_info("EEH: Note: Leaving passthrough PHB#%x-PE#%x frozen.\n",
pe->phb->global_number, pe->addr);
if (!ret)
eeh_pe_state_clear(pe, EEH_PE_ISOLATED, include_passed);
return ret;
}
/**
* eeh_pe_reset - Issue PE reset according to specified type
* @pe: EEH PE
* @option: reset type
* @include_passed: include passed-through devices?
*
* The routine is called to reset the specified PE with the
* indicated type, either fundamental reset or hot reset.
* PE reset is the most important part for error recovery.
*/
int eeh_pe_reset(struct eeh_pe *pe, int option, bool include_passed)
{
int ret = 0;
/* Invalid PE ? */
if (!pe)
return -ENODEV;
if (!eeh_ops || !eeh_ops->set_option || !eeh_ops->reset)
return -ENOENT;
switch (option) {
case EEH_RESET_DEACTIVATE:
ret = eeh_ops->reset(pe, option);
eeh_pe_state_clear(pe, EEH_PE_CFG_BLOCKED, include_passed);
if (ret)
break;
ret = eeh_pe_reenable_devices(pe, include_passed);
break;
case EEH_RESET_HOT:
case EEH_RESET_FUNDAMENTAL:
/*
* Proactively freeze the PE to drop all MMIO access
* during reset, which should be banned as it's always
* cause recursive EEH error.
*/
eeh_ops->set_option(pe, EEH_OPT_FREEZE_PE);
eeh_pe_state_mark(pe, EEH_PE_CFG_BLOCKED);
ret = eeh_ops->reset(pe, option);
break;
default:
pr_debug("%s: Unsupported option %d\n",
__func__, option);
ret = -EINVAL;
}
return ret;
}
EXPORT_SYMBOL_GPL(eeh_pe_reset);
/**
* eeh_pe_configure - Configure PCI bridges after PE reset
* @pe: EEH PE
*
* The routine is called to restore the PCI config space for
* those PCI devices, especially PCI bridges affected by PE
* reset issued previously.
*/
int eeh_pe_configure(struct eeh_pe *pe)
{
int ret = 0;
/* Invalid PE ? */
if (!pe)
return -ENODEV;
return ret;
}
EXPORT_SYMBOL_GPL(eeh_pe_configure);
/**
* eeh_pe_inject_err - Injecting the specified PCI error to the indicated PE
* @pe: the indicated PE
* @type: error type
* @func: error function
* @addr: address
* @mask: address mask
*
* The routine is called to inject the specified PCI error, which
* is determined by @type and @func, to the indicated PE for
* testing purpose.
*/
int eeh_pe_inject_err(struct eeh_pe *pe, int type, int func,
unsigned long addr, unsigned long mask)
{
/* Invalid PE ? */
if (!pe)
return -ENODEV;
/* Unsupported operation ? */
if (!eeh_ops || !eeh_ops->err_inject)
return -ENOENT;
/* Check on PCI error type */
if (type != EEH_ERR_TYPE_32 && type != EEH_ERR_TYPE_64)
return -EINVAL;
/* Check on PCI error function */
if (func < EEH_ERR_FUNC_MIN || func > EEH_ERR_FUNC_MAX)
return -EINVAL;
return eeh_ops->err_inject(pe, type, func, addr, mask);
}
EXPORT_SYMBOL_GPL(eeh_pe_inject_err);
#ifdef CONFIG_PROC_FS
static int proc_eeh_show(struct seq_file *m, void *v)
{
if (!eeh_enabled()) {
seq_printf(m, "EEH Subsystem is globally disabled\n");
seq_printf(m, "eeh_total_mmio_ffs=%llu\n", eeh_stats.total_mmio_ffs);
} else {
seq_printf(m, "EEH Subsystem is enabled\n");
seq_printf(m,
"no device=%llu\n"
"no device node=%llu\n"
"no config address=%llu\n"
"check not wanted=%llu\n"
"eeh_total_mmio_ffs=%llu\n"
"eeh_false_positives=%llu\n"
"eeh_slot_resets=%llu\n",
eeh_stats.no_device,
eeh_stats.no_dn,
eeh_stats.no_cfg_addr,
eeh_stats.ignored_check,
eeh_stats.total_mmio_ffs,
eeh_stats.false_positives,
eeh_stats.slot_resets);
}
return 0;
}
#endif /* CONFIG_PROC_FS */
#ifdef CONFIG_DEBUG_FS
static struct pci_dev *eeh_debug_lookup_pdev(struct file *filp,
const char __user *user_buf,
size_t count, loff_t *ppos)
{
uint32_t domain, bus, dev, fn;
struct pci_dev *pdev;
char buf[20];
int ret;
memset(buf, 0, sizeof(buf));
ret = simple_write_to_buffer(buf, sizeof(buf)-1, ppos, user_buf, count);
if (!ret)
return ERR_PTR(-EFAULT);
ret = sscanf(buf, "%x:%x:%x.%x", &domain, &bus, &dev, &fn);
if (ret != 4) {
pr_err("%s: expected 4 args, got %d\n", __func__, ret);
return ERR_PTR(-EINVAL);
}
pdev = pci_get_domain_bus_and_slot(domain, bus, (dev << 3) | fn);
if (!pdev)
return ERR_PTR(-ENODEV);
return pdev;
}
static int eeh_enable_dbgfs_set(void *data, u64 val)
{
if (val)
eeh_clear_flag(EEH_FORCE_DISABLED);
else
eeh_add_flag(EEH_FORCE_DISABLED);
return 0;
}
static int eeh_enable_dbgfs_get(void *data, u64 *val)
{
if (eeh_enabled())
*val = 0x1ul;
else
*val = 0x0ul;
return 0;
}
DEFINE_DEBUGFS_ATTRIBUTE(eeh_enable_dbgfs_ops, eeh_enable_dbgfs_get,
eeh_enable_dbgfs_set, "0x%llx\n");
static ssize_t eeh_force_recover_write(struct file *filp,
const char __user *user_buf,
size_t count, loff_t *ppos)
{
struct pci_controller *hose;
uint32_t phbid, pe_no;
struct eeh_pe *pe;
char buf[20];
int ret;
ret = simple_write_to_buffer(buf, sizeof(buf), ppos, user_buf, count);
if (!ret)
return -EFAULT;
/*
* When PE is NULL the event is a "special" event. Rather than
* recovering a specific PE it forces the EEH core to scan for failed
* PHBs and recovers each. This needs to be done before any device
* recoveries can occur.
*/
if (!strncmp(buf, "hwcheck", 7)) {
__eeh_send_failure_event(NULL);
return count;
}
ret = sscanf(buf, "%x:%x", &phbid, &pe_no);
if (ret != 2)
return -EINVAL;
hose = pci_find_controller_for_domain(phbid);
if (!hose)
return -ENODEV;
/* Retrieve PE */
pe = eeh_pe_get(hose, pe_no);
if (!pe)
return -ENODEV;
/*
* We don't do any state checking here since the detection
* process is async to the recovery process. The recovery
* thread *should* not break even if we schedule a recovery
* from an odd state (e.g. PE removed, or recovery of a
* non-isolated PE)
*/
__eeh_send_failure_event(pe);
return ret < 0 ? ret : count;
}
static const struct file_operations eeh_force_recover_fops = {
.open = simple_open,
.llseek = no_llseek,
.write = eeh_force_recover_write,
};
static ssize_t eeh_debugfs_dev_usage(struct file *filp,
char __user *user_buf,
size_t count, loff_t *ppos)
{
static const char usage[] = "input format: <domain>:<bus>:<dev>.<fn>\n";
return simple_read_from_buffer(user_buf, count, ppos,
usage, sizeof(usage) - 1);
}
static ssize_t eeh_dev_check_write(struct file *filp,
const char __user *user_buf,
size_t count, loff_t *ppos)
{
struct pci_dev *pdev;
struct eeh_dev *edev;
int ret;
pdev = eeh_debug_lookup_pdev(filp, user_buf, count, ppos);
if (IS_ERR(pdev))
return PTR_ERR(pdev);
edev = pci_dev_to_eeh_dev(pdev);
if (!edev) {
pci_err(pdev, "No eeh_dev for this device!\n");
pci_dev_put(pdev);
return -ENODEV;
}
ret = eeh_dev_check_failure(edev);
pci_info(pdev, "eeh_dev_check_failure(%s) = %d\n",
pci_name(pdev), ret);
pci_dev_put(pdev);
return count;
}
static const struct file_operations eeh_dev_check_fops = {
.open = simple_open,
.llseek = no_llseek,
.write = eeh_dev_check_write,
.read = eeh_debugfs_dev_usage,
};
static int eeh_debugfs_break_device(struct pci_dev *pdev)
{
struct resource *bar = NULL;
void __iomem *mapped;
u16 old, bit;
int i, pos;
/* Do we have an MMIO BAR to disable? */
for (i = 0; i <= PCI_STD_RESOURCE_END; i++) {
struct resource *r = &pdev->resource[i];
if (!r->flags || !r->start)
continue;
if (r->flags & IORESOURCE_IO)
continue;
if (r->flags & IORESOURCE_UNSET)
continue;
bar = r;
break;
}
if (!bar) {
pci_err(pdev, "Unable to find Memory BAR to cause EEH with\n");
return -ENXIO;
}
pci_err(pdev, "Going to break: %pR\n", bar);
if (pdev->is_virtfn) {
#ifndef CONFIG_PCI_IOV
return -ENXIO;
#else
/*
* VFs don't have a per-function COMMAND register, so the best
* we can do is clear the Memory Space Enable bit in the PF's
* SRIOV control reg.
*
* Unfortunately, this requires that we have a PF (i.e doesn't
* work for a passed-through VF) and it has the potential side
* effect of also causing an EEH on every other VF under the
* PF. Oh well.
*/
pdev = pdev->physfn;
if (!pdev)
return -ENXIO; /* passed through VFs have no PF */
pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
pos += PCI_SRIOV_CTRL;
bit = PCI_SRIOV_CTRL_MSE;
#endif /* !CONFIG_PCI_IOV */
} else {
bit = PCI_COMMAND_MEMORY;
pos = PCI_COMMAND;
}
/*
* Process here is:
*
* 1. Disable Memory space.
*
* 2. Perform an MMIO to the device. This should result in an error
* (CA / UR) being raised by the device which results in an EEH
* PE freeze. Using the in_8() accessor skips the eeh detection hook
* so the freeze hook so the EEH Detection machinery won't be
* triggered here. This is to match the usual behaviour of EEH
* where the HW will asynchronously freeze a PE and it's up to
* the kernel to notice and deal with it.
*
* 3. Turn Memory space back on. This is more important for VFs
* since recovery will probably fail if we don't. For normal
* the COMMAND register is reset as a part of re-initialising
* the device.
*
* Breaking stuff is the point so who cares if it's racy ;)
*/
pci_read_config_word(pdev, pos, &old);
mapped = ioremap(bar->start, PAGE_SIZE);
if (!mapped) {
pci_err(pdev, "Unable to map MMIO BAR %pR\n", bar);
return -ENXIO;
}
pci_write_config_word(pdev, pos, old & ~bit);
in_8(mapped);
pci_write_config_word(pdev, pos, old);
iounmap(mapped);
return 0;
}
static ssize_t eeh_dev_break_write(struct file *filp,
const char __user *user_buf,
size_t count, loff_t *ppos)
{
struct pci_dev *pdev;
int ret;
pdev = eeh_debug_lookup_pdev(filp, user_buf, count, ppos);
if (IS_ERR(pdev))
return PTR_ERR(pdev);
ret = eeh_debugfs_break_device(pdev);
pci_dev_put(pdev);
if (ret < 0)
return ret;
return count;
}
static const struct file_operations eeh_dev_break_fops = {
.open = simple_open,
.llseek = no_llseek,
.write = eeh_dev_break_write,
.read = eeh_debugfs_dev_usage,
};
static ssize_t eeh_dev_can_recover(struct file *filp,
const char __user *user_buf,
size_t count, loff_t *ppos)
{
struct pci_driver *drv;
struct pci_dev *pdev;
size_t ret;
pdev = eeh_debug_lookup_pdev(filp, user_buf, count, ppos);
if (IS_ERR(pdev))
return PTR_ERR(pdev);
/*
* In order for error recovery to work the driver needs to implement
* .error_detected(), so it can quiesce IO to the device, and
* .slot_reset() so it can re-initialise the device after a reset.
*
* Ideally they'd implement .resume() too, but some drivers which
* we need to support (notably IPR) don't so I guess we can tolerate
* that.
*
* .mmio_enabled() is mostly there as a work-around for devices which
* take forever to re-init after a hot reset. Implementing that is
* strictly optional.
*/
drv = pci_dev_driver(pdev);
if (drv &&
drv->err_handler &&
drv->err_handler->error_detected &&
drv->err_handler->slot_reset) {
ret = count;
} else {
ret = -EOPNOTSUPP;
}
pci_dev_put(pdev);
return ret;
}
static const struct file_operations eeh_dev_can_recover_fops = {
.open = simple_open,
.llseek = no_llseek,
.write = eeh_dev_can_recover,
.read = eeh_debugfs_dev_usage,
};
#endif
static int __init eeh_init_proc(void)
{
if (machine_is(pseries) || machine_is(powernv)) {
proc_create_single("powerpc/eeh", 0, NULL, proc_eeh_show);
#ifdef CONFIG_DEBUG_FS
debugfs_create_file_unsafe("eeh_enable", 0600,
arch_debugfs_dir, NULL,
&eeh_enable_dbgfs_ops);
debugfs_create_u32("eeh_max_freezes", 0600,
arch_debugfs_dir, &eeh_max_freezes);
debugfs_create_bool("eeh_disable_recovery", 0600,
arch_debugfs_dir,
&eeh_debugfs_no_recover);
debugfs_create_file_unsafe("eeh_dev_check", 0600,
arch_debugfs_dir, NULL,
&eeh_dev_check_fops);
debugfs_create_file_unsafe("eeh_dev_break", 0600,
arch_debugfs_dir, NULL,
&eeh_dev_break_fops);
debugfs_create_file_unsafe("eeh_force_recover", 0600,
arch_debugfs_dir, NULL,
&eeh_force_recover_fops);
debugfs_create_file_unsafe("eeh_dev_can_recover", 0600,
arch_debugfs_dir, NULL,
&eeh_dev_can_recover_fops);
eeh_cache_debugfs_init();
#endif
}
return 0;
}
__initcall(eeh_init_proc);
| linux-master | arch/powerpc/kernel/eeh.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2004 Benjamin Herrenschmidt, IBM Corp.
* <[email protected]>
*/
#include <linux/errno.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/smp.h>
#include <linux/stddef.h>
#include <linux/unistd.h>
#include <linux/slab.h>
#include <linux/user.h>
#include <linux/elf.h>
#include <linux/security.h>
#include <linux/memblock.h>
#include <linux/syscalls.h>
#include <linux/time_namespace.h>
#include <vdso/datapage.h>
#include <asm/syscall.h>
#include <asm/processor.h>
#include <asm/mmu.h>
#include <asm/mmu_context.h>
#include <asm/machdep.h>
#include <asm/cputable.h>
#include <asm/sections.h>
#include <asm/firmware.h>
#include <asm/vdso.h>
#include <asm/vdso_datapage.h>
#include <asm/setup.h>
/* The alignment of the vDSO */
#define VDSO_ALIGNMENT (1 << 16)
extern char vdso32_start, vdso32_end;
extern char vdso64_start, vdso64_end;
long sys_ni_syscall(void);
/*
* The vdso data page (aka. systemcfg for old ppc64 fans) is here.
* Once the early boot kernel code no longer needs to muck around
* with it, it will become dynamically allocated
*/
static union {
struct vdso_arch_data data;
u8 page[PAGE_SIZE];
} vdso_data_store __page_aligned_data;
struct vdso_arch_data *vdso_data = &vdso_data_store.data;
enum vvar_pages {
VVAR_DATA_PAGE_OFFSET,
VVAR_TIMENS_PAGE_OFFSET,
VVAR_NR_PAGES,
};
static int vdso_mremap(const struct vm_special_mapping *sm, struct vm_area_struct *new_vma,
unsigned long text_size)
{
unsigned long new_size = new_vma->vm_end - new_vma->vm_start;
if (new_size != text_size)
return -EINVAL;
current->mm->context.vdso = (void __user *)new_vma->vm_start;
return 0;
}
static int vdso32_mremap(const struct vm_special_mapping *sm, struct vm_area_struct *new_vma)
{
return vdso_mremap(sm, new_vma, &vdso32_end - &vdso32_start);
}
static int vdso64_mremap(const struct vm_special_mapping *sm, struct vm_area_struct *new_vma)
{
return vdso_mremap(sm, new_vma, &vdso64_end - &vdso64_start);
}
static vm_fault_t vvar_fault(const struct vm_special_mapping *sm,
struct vm_area_struct *vma, struct vm_fault *vmf);
static struct vm_special_mapping vvar_spec __ro_after_init = {
.name = "[vvar]",
.fault = vvar_fault,
};
static struct vm_special_mapping vdso32_spec __ro_after_init = {
.name = "[vdso]",
.mremap = vdso32_mremap,
};
static struct vm_special_mapping vdso64_spec __ro_after_init = {
.name = "[vdso]",
.mremap = vdso64_mremap,
};
#ifdef CONFIG_TIME_NS
struct vdso_data *arch_get_vdso_data(void *vvar_page)
{
return ((struct vdso_arch_data *)vvar_page)->data;
}
/*
* The vvar mapping contains data for a specific time namespace, so when a task
* changes namespace we must unmap its vvar data for the old namespace.
* Subsequent faults will map in data for the new namespace.
*
* For more details see timens_setup_vdso_data().
*/
int vdso_join_timens(struct task_struct *task, struct time_namespace *ns)
{
struct mm_struct *mm = task->mm;
VMA_ITERATOR(vmi, mm, 0);
struct vm_area_struct *vma;
mmap_read_lock(mm);
for_each_vma(vmi, vma) {
if (vma_is_special_mapping(vma, &vvar_spec))
zap_vma_pages(vma);
}
mmap_read_unlock(mm);
return 0;
}
#endif
static vm_fault_t vvar_fault(const struct vm_special_mapping *sm,
struct vm_area_struct *vma, struct vm_fault *vmf)
{
struct page *timens_page = find_timens_vvar_page(vma);
unsigned long pfn;
switch (vmf->pgoff) {
case VVAR_DATA_PAGE_OFFSET:
if (timens_page)
pfn = page_to_pfn(timens_page);
else
pfn = virt_to_pfn(vdso_data);
break;
#ifdef CONFIG_TIME_NS
case VVAR_TIMENS_PAGE_OFFSET:
/*
* If a task belongs to a time namespace then a namespace
* specific VVAR is mapped with the VVAR_DATA_PAGE_OFFSET and
* the real VVAR page is mapped with the VVAR_TIMENS_PAGE_OFFSET
* offset.
* See also the comment near timens_setup_vdso_data().
*/
if (!timens_page)
return VM_FAULT_SIGBUS;
pfn = virt_to_pfn(vdso_data);
break;
#endif /* CONFIG_TIME_NS */
default:
return VM_FAULT_SIGBUS;
}
return vmf_insert_pfn(vma, vmf->address, pfn);
}
/*
* This is called from binfmt_elf, we create the special vma for the
* vDSO and insert it into the mm struct tree
*/
static int __arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
{
unsigned long vdso_size, vdso_base, mappings_size;
struct vm_special_mapping *vdso_spec;
unsigned long vvar_size = VVAR_NR_PAGES * PAGE_SIZE;
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
if (is_32bit_task()) {
vdso_spec = &vdso32_spec;
vdso_size = &vdso32_end - &vdso32_start;
} else {
vdso_spec = &vdso64_spec;
vdso_size = &vdso64_end - &vdso64_start;
}
mappings_size = vdso_size + vvar_size;
mappings_size += (VDSO_ALIGNMENT - 1) & PAGE_MASK;
/*
* Pick a base address for the vDSO in process space.
* Add enough to the size so that the result can be aligned.
*/
vdso_base = get_unmapped_area(NULL, 0, mappings_size, 0, 0);
if (IS_ERR_VALUE(vdso_base))
return vdso_base;
/* Add required alignment. */
vdso_base = ALIGN(vdso_base, VDSO_ALIGNMENT);
/*
* Put vDSO base into mm struct. We need to do this before calling
* install_special_mapping or the perf counter mmap tracking code
* will fail to recognise it as a vDSO.
*/
mm->context.vdso = (void __user *)vdso_base + vvar_size;
vma = _install_special_mapping(mm, vdso_base, vvar_size,
VM_READ | VM_MAYREAD | VM_IO |
VM_DONTDUMP | VM_PFNMAP, &vvar_spec);
if (IS_ERR(vma))
return PTR_ERR(vma);
/*
* our vma flags don't have VM_WRITE so by default, the process isn't
* allowed to write those pages.
* gdb can break that with ptrace interface, and thus trigger COW on
* those pages but it's then your responsibility to never do that on
* the "data" page of the vDSO or you'll stop getting kernel updates
* and your nice userland gettimeofday will be totally dead.
* It's fine to use that for setting breakpoints in the vDSO code
* pages though.
*/
vma = _install_special_mapping(mm, vdso_base + vvar_size, vdso_size,
VM_READ | VM_EXEC | VM_MAYREAD |
VM_MAYWRITE | VM_MAYEXEC, vdso_spec);
if (IS_ERR(vma))
do_munmap(mm, vdso_base, vvar_size, NULL);
return PTR_ERR_OR_ZERO(vma);
}
int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
{
struct mm_struct *mm = current->mm;
int rc;
mm->context.vdso = NULL;
if (mmap_write_lock_killable(mm))
return -EINTR;
rc = __arch_setup_additional_pages(bprm, uses_interp);
if (rc)
mm->context.vdso = NULL;
mmap_write_unlock(mm);
return rc;
}
#define VDSO_DO_FIXUPS(type, value, bits, sec) do { \
void *__start = (void *)VDSO##bits##_SYMBOL(&vdso##bits##_start, sec##_start); \
void *__end = (void *)VDSO##bits##_SYMBOL(&vdso##bits##_start, sec##_end); \
\
do_##type##_fixups((value), __start, __end); \
} while (0)
static void __init vdso_fixup_features(void)
{
#ifdef CONFIG_PPC64
VDSO_DO_FIXUPS(feature, cur_cpu_spec->cpu_features, 64, ftr_fixup);
VDSO_DO_FIXUPS(feature, cur_cpu_spec->mmu_features, 64, mmu_ftr_fixup);
VDSO_DO_FIXUPS(feature, powerpc_firmware_features, 64, fw_ftr_fixup);
VDSO_DO_FIXUPS(lwsync, cur_cpu_spec->cpu_features, 64, lwsync_fixup);
#endif /* CONFIG_PPC64 */
#ifdef CONFIG_VDSO32
VDSO_DO_FIXUPS(feature, cur_cpu_spec->cpu_features, 32, ftr_fixup);
VDSO_DO_FIXUPS(feature, cur_cpu_spec->mmu_features, 32, mmu_ftr_fixup);
#ifdef CONFIG_PPC64
VDSO_DO_FIXUPS(feature, powerpc_firmware_features, 32, fw_ftr_fixup);
#endif /* CONFIG_PPC64 */
VDSO_DO_FIXUPS(lwsync, cur_cpu_spec->cpu_features, 32, lwsync_fixup);
#endif
}
/*
* Called from setup_arch to initialize the bitmap of available
* syscalls in the systemcfg page
*/
static void __init vdso_setup_syscall_map(void)
{
unsigned int i;
for (i = 0; i < NR_syscalls; i++) {
if (sys_call_table[i] != (void *)&sys_ni_syscall)
vdso_data->syscall_map[i >> 5] |= 0x80000000UL >> (i & 0x1f);
if (IS_ENABLED(CONFIG_COMPAT) &&
compat_sys_call_table[i] != (void *)&sys_ni_syscall)
vdso_data->compat_syscall_map[i >> 5] |= 0x80000000UL >> (i & 0x1f);
}
}
#ifdef CONFIG_PPC64
int vdso_getcpu_init(void)
{
unsigned long cpu, node, val;
/*
* SPRG_VDSO contains the CPU in the bottom 16 bits and the NUMA node
* in the next 16 bits. The VDSO uses this to implement getcpu().
*/
cpu = get_cpu();
WARN_ON_ONCE(cpu > 0xffff);
node = cpu_to_node(cpu);
WARN_ON_ONCE(node > 0xffff);
val = (cpu & 0xffff) | ((node & 0xffff) << 16);
mtspr(SPRN_SPRG_VDSO_WRITE, val);
get_paca()->sprg_vdso = val;
put_cpu();
return 0;
}
/* We need to call this before SMP init */
early_initcall(vdso_getcpu_init);
#endif
static struct page ** __init vdso_setup_pages(void *start, void *end)
{
int i;
struct page **pagelist;
int pages = (end - start) >> PAGE_SHIFT;
pagelist = kcalloc(pages + 1, sizeof(struct page *), GFP_KERNEL);
if (!pagelist)
panic("%s: Cannot allocate page list for VDSO", __func__);
for (i = 0; i < pages; i++)
pagelist[i] = virt_to_page(start + i * PAGE_SIZE);
return pagelist;
}
static int __init vdso_init(void)
{
#ifdef CONFIG_PPC64
/*
* Fill up the "systemcfg" stuff for backward compatibility
*/
strcpy((char *)vdso_data->eye_catcher, "SYSTEMCFG:PPC64");
vdso_data->version.major = SYSTEMCFG_MAJOR;
vdso_data->version.minor = SYSTEMCFG_MINOR;
vdso_data->processor = mfspr(SPRN_PVR);
/*
* Fake the old platform number for pSeries and add
* in LPAR bit if necessary
*/
vdso_data->platform = 0x100;
if (firmware_has_feature(FW_FEATURE_LPAR))
vdso_data->platform |= 1;
vdso_data->physicalMemorySize = memblock_phys_mem_size();
vdso_data->dcache_size = ppc64_caches.l1d.size;
vdso_data->dcache_line_size = ppc64_caches.l1d.line_size;
vdso_data->icache_size = ppc64_caches.l1i.size;
vdso_data->icache_line_size = ppc64_caches.l1i.line_size;
vdso_data->dcache_block_size = ppc64_caches.l1d.block_size;
vdso_data->icache_block_size = ppc64_caches.l1i.block_size;
vdso_data->dcache_log_block_size = ppc64_caches.l1d.log_block_size;
vdso_data->icache_log_block_size = ppc64_caches.l1i.log_block_size;
#endif /* CONFIG_PPC64 */
vdso_setup_syscall_map();
vdso_fixup_features();
if (IS_ENABLED(CONFIG_VDSO32))
vdso32_spec.pages = vdso_setup_pages(&vdso32_start, &vdso32_end);
if (IS_ENABLED(CONFIG_PPC64))
vdso64_spec.pages = vdso_setup_pages(&vdso64_start, &vdso64_end);
smp_wmb();
return 0;
}
arch_initcall(vdso_init);
| linux-master | arch/powerpc/kernel/vdso.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2019 IBM Corporation <[email protected]>
*
* This code exposes secure variables to user via sysfs
*/
#define pr_fmt(fmt) "secvar-sysfs: "fmt
#include <linux/slab.h>
#include <linux/compat.h>
#include <linux/string.h>
#include <linux/of.h>
#include <asm/secvar.h>
#define NAME_MAX_SIZE 1024
static struct kobject *secvar_kobj;
static struct kset *secvar_kset;
static ssize_t format_show(struct kobject *kobj, struct kobj_attribute *attr,
char *buf)
{
char tmp[32];
ssize_t len = secvar_ops->format(tmp, sizeof(tmp));
if (len > 0)
return sysfs_emit(buf, "%s\n", tmp);
else if (len < 0)
pr_err("Error %zd reading format string\n", len);
else
pr_err("Got empty format string from backend\n");
return -EIO;
}
static ssize_t size_show(struct kobject *kobj, struct kobj_attribute *attr,
char *buf)
{
u64 dsize;
int rc;
rc = secvar_ops->get(kobj->name, strlen(kobj->name) + 1, NULL, &dsize);
if (rc) {
if (rc != -ENOENT)
pr_err("Error retrieving %s variable size %d\n", kobj->name, rc);
return rc;
}
return sysfs_emit(buf, "%llu\n", dsize);
}
static ssize_t data_read(struct file *filep, struct kobject *kobj,
struct bin_attribute *attr, char *buf, loff_t off,
size_t count)
{
char *data;
u64 dsize;
int rc;
rc = secvar_ops->get(kobj->name, strlen(kobj->name) + 1, NULL, &dsize);
if (rc) {
if (rc != -ENOENT)
pr_err("Error getting %s variable size %d\n", kobj->name, rc);
return rc;
}
pr_debug("dsize is %llu\n", dsize);
data = kzalloc(dsize, GFP_KERNEL);
if (!data)
return -ENOMEM;
rc = secvar_ops->get(kobj->name, strlen(kobj->name) + 1, data, &dsize);
if (rc) {
pr_err("Error getting %s variable %d\n", kobj->name, rc);
goto data_fail;
}
rc = memory_read_from_buffer(buf, count, &off, data, dsize);
data_fail:
kfree(data);
return rc;
}
static ssize_t update_write(struct file *filep, struct kobject *kobj,
struct bin_attribute *attr, char *buf, loff_t off,
size_t count)
{
int rc;
pr_debug("count is %ld\n", count);
rc = secvar_ops->set(kobj->name, strlen(kobj->name) + 1, buf, count);
if (rc) {
pr_err("Error setting the %s variable %d\n", kobj->name, rc);
return rc;
}
return count;
}
static struct kobj_attribute format_attr = __ATTR_RO(format);
static struct kobj_attribute size_attr = __ATTR_RO(size);
static struct bin_attribute data_attr = __BIN_ATTR_RO(data, 0);
static struct bin_attribute update_attr = __BIN_ATTR_WO(update, 0);
static struct bin_attribute *secvar_bin_attrs[] = {
&data_attr,
&update_attr,
NULL,
};
static struct attribute *secvar_attrs[] = {
&size_attr.attr,
NULL,
};
static const struct attribute_group secvar_attr_group = {
.attrs = secvar_attrs,
.bin_attrs = secvar_bin_attrs,
};
__ATTRIBUTE_GROUPS(secvar_attr);
static struct kobj_type secvar_ktype = {
.sysfs_ops = &kobj_sysfs_ops,
.default_groups = secvar_attr_groups,
};
static int update_kobj_size(void)
{
u64 varsize;
int rc = secvar_ops->max_size(&varsize);
if (rc)
return rc;
data_attr.size = varsize;
update_attr.size = varsize;
return 0;
}
static int secvar_sysfs_config(struct kobject *kobj)
{
struct attribute_group config_group = {
.name = "config",
.attrs = (struct attribute **)secvar_ops->config_attrs,
};
if (secvar_ops->config_attrs)
return sysfs_create_group(kobj, &config_group);
return 0;
}
static int add_var(const char *name)
{
struct kobject *kobj;
int rc;
kobj = kzalloc(sizeof(*kobj), GFP_KERNEL);
if (!kobj)
return -ENOMEM;
kobject_init(kobj, &secvar_ktype);
rc = kobject_add(kobj, &secvar_kset->kobj, "%s", name);
if (rc) {
pr_warn("kobject_add error %d for attribute: %s\n", rc,
name);
kobject_put(kobj);
return rc;
}
kobject_uevent(kobj, KOBJ_ADD);
return 0;
}
static int secvar_sysfs_load(void)
{
u64 namesize = 0;
char *name;
int rc;
name = kzalloc(NAME_MAX_SIZE, GFP_KERNEL);
if (!name)
return -ENOMEM;
do {
rc = secvar_ops->get_next(name, &namesize, NAME_MAX_SIZE);
if (rc) {
if (rc != -ENOENT)
pr_err("error getting secvar from firmware %d\n", rc);
else
rc = 0;
break;
}
rc = add_var(name);
} while (!rc);
kfree(name);
return rc;
}
static int secvar_sysfs_load_static(void)
{
const char * const *name_ptr = secvar_ops->var_names;
int rc;
while (*name_ptr) {
rc = add_var(*name_ptr);
if (rc)
return rc;
name_ptr++;
}
return 0;
}
static int secvar_sysfs_init(void)
{
u64 max_size;
int rc;
if (!secvar_ops) {
pr_warn("Failed to retrieve secvar operations\n");
return -ENODEV;
}
secvar_kobj = kobject_create_and_add("secvar", firmware_kobj);
if (!secvar_kobj) {
pr_err("Failed to create firmware kobj\n");
return -ENOMEM;
}
rc = sysfs_create_file(secvar_kobj, &format_attr.attr);
if (rc) {
pr_err("Failed to create format object\n");
rc = -ENOMEM;
goto err;
}
secvar_kset = kset_create_and_add("vars", NULL, secvar_kobj);
if (!secvar_kset) {
pr_err("sysfs kobject registration failed\n");
rc = -ENOMEM;
goto err;
}
rc = update_kobj_size();
if (rc) {
pr_err("Cannot read the size of the attribute\n");
goto err;
}
rc = secvar_sysfs_config(secvar_kobj);
if (rc) {
pr_err("Failed to create config directory\n");
goto err;
}
if (secvar_ops->get_next)
rc = secvar_sysfs_load();
else
rc = secvar_sysfs_load_static();
if (rc) {
pr_err("Failed to create variable attributes\n");
goto err;
}
// Due to sysfs limitations, we will only ever get a write buffer of
// up to 1 page in size. Print a warning if this is potentially going
// to cause problems, so that the user is aware.
secvar_ops->max_size(&max_size);
if (max_size > PAGE_SIZE)
pr_warn_ratelimited("PAGE_SIZE (%lu) is smaller than maximum object size (%llu), writes are limited to PAGE_SIZE\n",
PAGE_SIZE, max_size);
return 0;
err:
kobject_put(secvar_kobj);
return rc;
}
late_initcall(secvar_sysfs_init);
| linux-master | arch/powerpc/kernel/secvar-sysfs.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Implementation of various system calls for Linux/PowerPC
*
* Copyright (C) 1995-1996 Gary Thomas ([email protected])
*
* Derived from "arch/i386/kernel/sys_i386.c"
* Adapted from the i386 version by Gary Thomas
* Modified by Cort Dougan ([email protected])
* and Paul Mackerras ([email protected]).
*
* This file contains various random system calls that
* have a non-standard calling sequence on the Linux/PPC
* platform.
*/
#include <linux/errno.h>
#include <linux/sched.h>
#include <linux/syscalls.h>
#include <linux/mm.h>
#include <linux/fs.h>
#include <linux/smp.h>
#include <linux/sem.h>
#include <linux/msg.h>
#include <linux/shm.h>
#include <linux/stat.h>
#include <linux/mman.h>
#include <linux/sys.h>
#include <linux/ipc.h>
#include <linux/utsname.h>
#include <linux/file.h>
#include <linux/personality.h>
#include <linux/uaccess.h>
#include <asm/syscalls.h>
#include <asm/time.h>
#include <asm/unistd.h>
static long do_mmap2(unsigned long addr, size_t len,
unsigned long prot, unsigned long flags,
unsigned long fd, unsigned long off, int shift)
{
if (!arch_validate_prot(prot, addr))
return -EINVAL;
if (!IS_ALIGNED(off, 1 << shift))
return -EINVAL;
return ksys_mmap_pgoff(addr, len, prot, flags, fd, off >> shift);
}
SYSCALL_DEFINE6(mmap2, unsigned long, addr, size_t, len,
unsigned long, prot, unsigned long, flags,
unsigned long, fd, unsigned long, pgoff)
{
return do_mmap2(addr, len, prot, flags, fd, pgoff, PAGE_SHIFT-12);
}
#ifdef CONFIG_COMPAT
COMPAT_SYSCALL_DEFINE6(mmap2,
unsigned long, addr, size_t, len,
unsigned long, prot, unsigned long, flags,
unsigned long, fd, unsigned long, off_4k)
{
return do_mmap2(addr, len, prot, flags, fd, off_4k, PAGE_SHIFT-12);
}
#endif
SYSCALL_DEFINE6(mmap, unsigned long, addr, size_t, len,
unsigned long, prot, unsigned long, flags,
unsigned long, fd, off_t, offset)
{
return do_mmap2(addr, len, prot, flags, fd, offset, PAGE_SHIFT);
}
#ifdef CONFIG_PPC64
static long do_ppc64_personality(unsigned long personality)
{
long ret;
if (personality(current->personality) == PER_LINUX32
&& personality(personality) == PER_LINUX)
personality = (personality & ~PER_MASK) | PER_LINUX32;
ret = ksys_personality(personality);
if (personality(ret) == PER_LINUX32)
ret = (ret & ~PER_MASK) | PER_LINUX;
return ret;
}
SYSCALL_DEFINE1(ppc64_personality, unsigned long, personality)
{
return do_ppc64_personality(personality);
}
#ifdef CONFIG_COMPAT
COMPAT_SYSCALL_DEFINE1(ppc64_personality, unsigned long, personality)
{
return do_ppc64_personality(personality);
}
#endif /* CONFIG_COMPAT */
#endif /* CONFIG_PPC64 */
SYSCALL_DEFINE6(ppc_fadvise64_64,
int, fd, int, advice, u32, offset_high, u32, offset_low,
u32, len_high, u32, len_low)
{
return ksys_fadvise64_64(fd, merge_64(offset_high, offset_low),
merge_64(len_high, len_low), advice);
}
SYSCALL_DEFINE0(switch_endian)
{
struct thread_info *ti;
regs_set_return_msr(current->thread.regs,
current->thread.regs->msr ^ MSR_LE);
/*
* Set TIF_RESTOREALL so that r3 isn't clobbered on return to
* userspace. That also has the effect of restoring the non-volatile
* GPRs, so we saved them on the way in here.
*/
ti = current_thread_info();
ti->flags |= _TIF_RESTOREALL;
return 0;
}
| linux-master | arch/powerpc/kernel/syscalls.c |
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* This file contains the table of syscall-handling functions.
* Copyright (C) 1995-1996 Gary Thomas ([email protected])
*
* Largely rewritten by Cort Dougan ([email protected])
* and Paul Mackerras.
*
* Adapted for iSeries by Mike Corrigan ([email protected])
* PPC64 updates by Dave Engebretsen ([email protected])
*/
#include <linux/syscalls.h>
#include <linux/compat.h>
#include <asm/unistd.h>
#include <asm/syscalls.h>
#undef __SYSCALL_WITH_COMPAT
#define __SYSCALL_WITH_COMPAT(nr, entry, compat) __SYSCALL(nr, entry)
#undef __SYSCALL
#ifdef CONFIG_ARCH_HAS_SYSCALL_WRAPPER
#define __SYSCALL(nr, entry) [nr] = entry,
#else
/*
* Coerce syscall handlers with arbitrary parameters to common type
* requires cast to void* to avoid -Wcast-function-type.
*/
#define __SYSCALL(nr, entry) [nr] = (void *) entry,
#endif
const syscall_fn sys_call_table[] = {
#ifdef CONFIG_PPC64
#include <asm/syscall_table_64.h>
#else
#include <asm/syscall_table_32.h>
#endif
};
#ifdef CONFIG_COMPAT
#undef __SYSCALL_WITH_COMPAT
#define __SYSCALL_WITH_COMPAT(nr, native, compat) __SYSCALL(nr, compat)
const syscall_fn compat_sys_call_table[] = {
#include <asm/syscall_table_32.h>
};
#endif /* CONFIG_COMPAT */
| linux-master | arch/powerpc/kernel/systbl.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Routines for doing kexec-based kdump.
*
* Copyright (C) 2005, IBM Corp.
*
* Created by: Michael Ellerman
*/
#undef DEBUG
#include <linux/crash_dump.h>
#include <linux/io.h>
#include <linux/memblock.h>
#include <linux/of.h>
#include <asm/code-patching.h>
#include <asm/kdump.h>
#include <asm/firmware.h>
#include <linux/uio.h>
#include <asm/rtas.h>
#include <asm/inst.h>
#ifdef DEBUG
#include <asm/udbg.h>
#define DBG(fmt...) udbg_printf(fmt)
#else
#define DBG(fmt...)
#endif
#ifndef CONFIG_NONSTATIC_KERNEL
void __init reserve_kdump_trampoline(void)
{
memblock_reserve(0, KDUMP_RESERVE_LIMIT);
}
static void __init create_trampoline(unsigned long addr)
{
u32 *p = (u32 *)addr;
/* The maximum range of a single instruction branch, is the current
* instruction's address + (32 MB - 4) bytes. For the trampoline we
* need to branch to current address + 32 MB. So we insert a nop at
* the trampoline address, then the next instruction (+ 4 bytes)
* does a branch to (32 MB - 4). The net effect is that when we
* branch to "addr" we jump to ("addr" + 32 MB). Although it requires
* two instructions it doesn't require any registers.
*/
patch_instruction(p, ppc_inst(PPC_RAW_NOP()));
patch_branch(p + 1, addr + PHYSICAL_START, 0);
}
void __init setup_kdump_trampoline(void)
{
unsigned long i;
DBG(" -> setup_kdump_trampoline()\n");
for (i = KDUMP_TRAMPOLINE_START; i < KDUMP_TRAMPOLINE_END; i += 8) {
create_trampoline(i);
}
#ifdef CONFIG_PPC_PSERIES
create_trampoline(__pa(system_reset_fwnmi) - PHYSICAL_START);
create_trampoline(__pa(machine_check_fwnmi) - PHYSICAL_START);
#endif /* CONFIG_PPC_PSERIES */
DBG(" <- setup_kdump_trampoline()\n");
}
#endif /* CONFIG_NONSTATIC_KERNEL */
ssize_t copy_oldmem_page(struct iov_iter *iter, unsigned long pfn,
size_t csize, unsigned long offset)
{
void *vaddr;
phys_addr_t paddr;
if (!csize)
return 0;
csize = min_t(size_t, csize, PAGE_SIZE);
paddr = pfn << PAGE_SHIFT;
if (memblock_is_region_memory(paddr, csize)) {
vaddr = __va(paddr);
csize = copy_to_iter(vaddr + offset, csize, iter);
} else {
vaddr = ioremap_cache(paddr, PAGE_SIZE);
csize = copy_to_iter(vaddr + offset, csize, iter);
iounmap(vaddr);
}
return csize;
}
#ifdef CONFIG_PPC_RTAS
/*
* The crashkernel region will almost always overlap the RTAS region, so
* we have to be careful when shrinking the crashkernel region.
*/
void crash_free_reserved_phys_range(unsigned long begin, unsigned long end)
{
unsigned long addr;
const __be32 *basep, *sizep;
unsigned int rtas_start = 0, rtas_end = 0;
basep = of_get_property(rtas.dev, "linux,rtas-base", NULL);
sizep = of_get_property(rtas.dev, "rtas-size", NULL);
if (basep && sizep) {
rtas_start = be32_to_cpup(basep);
rtas_end = rtas_start + be32_to_cpup(sizep);
}
for (addr = begin; addr < end; addr += PAGE_SIZE) {
/* Does this page overlap with the RTAS region? */
if (addr <= rtas_end && ((addr + PAGE_SIZE) > rtas_start))
continue;
free_reserved_page(pfn_to_page(addr >> PAGE_SHIFT));
}
}
#endif
| linux-master | arch/powerpc/kernel/crash_dump.c |
// SPDX-License-Identifier: GPL-2.0-or-later
#include <linux/compat.h>
#include <linux/context_tracking.h>
#include <linux/randomize_kstack.h>
#include <asm/interrupt.h>
#include <asm/kup.h>
#include <asm/syscall.h>
#include <asm/time.h>
#include <asm/tm.h>
#include <asm/unistd.h>
/* Has to run notrace because it is entered not completely "reconciled" */
notrace long system_call_exception(struct pt_regs *regs, unsigned long r0)
{
long ret;
syscall_fn f;
kuap_lock();
add_random_kstack_offset();
if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
BUG_ON(irq_soft_mask_return() != IRQS_ALL_DISABLED);
trace_hardirqs_off(); /* finish reconciling */
CT_WARN_ON(ct_state() == CONTEXT_KERNEL);
user_exit_irqoff();
BUG_ON(regs_is_unrecoverable(regs));
BUG_ON(!(regs->msr & MSR_PR));
BUG_ON(arch_irq_disabled_regs(regs));
#ifdef CONFIG_PPC_PKEY
if (mmu_has_feature(MMU_FTR_PKEY)) {
unsigned long amr, iamr;
bool flush_needed = false;
/*
* When entering from userspace we mostly have the AMR/IAMR
* different from kernel default values. Hence don't compare.
*/
amr = mfspr(SPRN_AMR);
iamr = mfspr(SPRN_IAMR);
regs->amr = amr;
regs->iamr = iamr;
if (mmu_has_feature(MMU_FTR_KUAP)) {
mtspr(SPRN_AMR, AMR_KUAP_BLOCKED);
flush_needed = true;
}
if (mmu_has_feature(MMU_FTR_BOOK3S_KUEP)) {
mtspr(SPRN_IAMR, AMR_KUEP_BLOCKED);
flush_needed = true;
}
if (flush_needed)
isync();
} else
#endif
kuap_assert_locked();
booke_restore_dbcr0();
account_cpu_user_entry();
account_stolen_time();
/*
* This is not required for the syscall exit path, but makes the
* stack frame look nicer. If this was initialised in the first stack
* frame, or if the unwinder was taught the first stack frame always
* returns to user with IRQS_ENABLED, this store could be avoided!
*/
irq_soft_mask_regs_set_state(regs, IRQS_ENABLED);
/*
* If system call is called with TM active, set _TIF_RESTOREALL to
* prevent RFSCV being used to return to userspace, because POWER9
* TM implementation has problems with this instruction returning to
* transactional state. Final register values are not relevant because
* the transaction will be aborted upon return anyway. Or in the case
* of unsupported_scv SIGILL fault, the return state does not much
* matter because it's an edge case.
*/
if (IS_ENABLED(CONFIG_PPC_TRANSACTIONAL_MEM) &&
unlikely(MSR_TM_TRANSACTIONAL(regs->msr)))
set_bits(_TIF_RESTOREALL, ¤t_thread_info()->flags);
/*
* If the system call was made with a transaction active, doom it and
* return without performing the system call. Unless it was an
* unsupported scv vector, in which case it's treated like an illegal
* instruction.
*/
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
if (unlikely(MSR_TM_TRANSACTIONAL(regs->msr)) &&
!trap_is_unsupported_scv(regs)) {
/* Enable TM in the kernel, and disable EE (for scv) */
hard_irq_disable();
mtmsr(mfmsr() | MSR_TM);
/* tabort, this dooms the transaction, nothing else */
asm volatile(".long 0x7c00071d | ((%0) << 16)"
:: "r"(TM_CAUSE_SYSCALL|TM_CAUSE_PERSISTENT));
/*
* Userspace will never see the return value. Execution will
* resume after the tbegin. of the aborted transaction with the
* checkpointed register state. A context switch could occur
* or signal delivered to the process before resuming the
* doomed transaction context, but that should all be handled
* as expected.
*/
return -ENOSYS;
}
#endif // CONFIG_PPC_TRANSACTIONAL_MEM
local_irq_enable();
if (unlikely(read_thread_flags() & _TIF_SYSCALL_DOTRACE)) {
if (unlikely(trap_is_unsupported_scv(regs))) {
/* Unsupported scv vector */
_exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
return regs->gpr[3];
}
/*
* We use the return value of do_syscall_trace_enter() as the
* syscall number. If the syscall was rejected for any reason
* do_syscall_trace_enter() returns an invalid syscall number
* and the test against NR_syscalls will fail and the return
* value to be used is in regs->gpr[3].
*/
r0 = do_syscall_trace_enter(regs);
if (unlikely(r0 >= NR_syscalls))
return regs->gpr[3];
} else if (unlikely(r0 >= NR_syscalls)) {
if (unlikely(trap_is_unsupported_scv(regs))) {
/* Unsupported scv vector */
_exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
return regs->gpr[3];
}
return -ENOSYS;
}
/* May be faster to do array_index_nospec? */
barrier_nospec();
#ifdef CONFIG_ARCH_HAS_SYSCALL_WRAPPER
// No COMPAT if we have SYSCALL_WRAPPER, see Kconfig
f = (void *)sys_call_table[r0];
ret = f(regs);
#else
if (unlikely(is_compat_task())) {
unsigned long r3, r4, r5, r6, r7, r8;
f = (void *)compat_sys_call_table[r0];
r3 = regs->gpr[3] & 0x00000000ffffffffULL;
r4 = regs->gpr[4] & 0x00000000ffffffffULL;
r5 = regs->gpr[5] & 0x00000000ffffffffULL;
r6 = regs->gpr[6] & 0x00000000ffffffffULL;
r7 = regs->gpr[7] & 0x00000000ffffffffULL;
r8 = regs->gpr[8] & 0x00000000ffffffffULL;
ret = f(r3, r4, r5, r6, r7, r8);
} else {
f = (void *)sys_call_table[r0];
ret = f(regs->gpr[3], regs->gpr[4], regs->gpr[5],
regs->gpr[6], regs->gpr[7], regs->gpr[8]);
}
#endif
/*
* Ultimately, this value will get limited by KSTACK_OFFSET_MAX(),
* so the maximum stack offset is 1k bytes (10 bits).
*
* The actual entropy will be further reduced by the compiler when
* applying stack alignment constraints: the powerpc architecture
* may have two kinds of stack alignment (16-bytes and 8-bytes).
*
* So the resulting 6 or 7 bits of entropy is seen in SP[9:4] or SP[9:3].
*/
choose_random_kstack_offset(mftb());
return ret;
}
| linux-master | arch/powerpc/kernel/syscall.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/dma-mapping.h>
#include <linux/dma-map-ops.h>
#include <linux/export.h>
#include <asm/machdep.h>
void arch_dma_set_mask(struct device *dev, u64 dma_mask)
{
if (ppc_md.dma_set_mask)
ppc_md.dma_set_mask(dev, dma_mask);
}
EXPORT_SYMBOL(arch_dma_set_mask);
| linux-master | arch/powerpc/kernel/dma-mask.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* The file intends to implement PE based on the information from
* platforms. Basically, there have 3 types of PEs: PHB/Bus/Device.
* All the PEs should be organized as hierarchy tree. The first level
* of the tree will be associated to existing PHBs since the particular
* PE is only meaningful in one PHB domain.
*
* Copyright Benjamin Herrenschmidt & Gavin Shan, IBM Corporation 2012.
*/
#include <linux/delay.h>
#include <linux/export.h>
#include <linux/gfp.h>
#include <linux/kernel.h>
#include <linux/of.h>
#include <linux/pci.h>
#include <linux/string.h>
#include <asm/pci-bridge.h>
#include <asm/ppc-pci.h>
static int eeh_pe_aux_size = 0;
static LIST_HEAD(eeh_phb_pe);
/**
* eeh_set_pe_aux_size - Set PE auxillary data size
* @size: PE auxillary data size
*
* Set PE auxillary data size
*/
void eeh_set_pe_aux_size(int size)
{
if (size < 0)
return;
eeh_pe_aux_size = size;
}
/**
* eeh_pe_alloc - Allocate PE
* @phb: PCI controller
* @type: PE type
*
* Allocate PE instance dynamically.
*/
static struct eeh_pe *eeh_pe_alloc(struct pci_controller *phb, int type)
{
struct eeh_pe *pe;
size_t alloc_size;
alloc_size = sizeof(struct eeh_pe);
if (eeh_pe_aux_size) {
alloc_size = ALIGN(alloc_size, cache_line_size());
alloc_size += eeh_pe_aux_size;
}
/* Allocate PHB PE */
pe = kzalloc(alloc_size, GFP_KERNEL);
if (!pe) return NULL;
/* Initialize PHB PE */
pe->type = type;
pe->phb = phb;
INIT_LIST_HEAD(&pe->child_list);
INIT_LIST_HEAD(&pe->edevs);
pe->data = (void *)pe + ALIGN(sizeof(struct eeh_pe),
cache_line_size());
return pe;
}
/**
* eeh_phb_pe_create - Create PHB PE
* @phb: PCI controller
*
* The function should be called while the PHB is detected during
* system boot or PCI hotplug in order to create PHB PE.
*/
int eeh_phb_pe_create(struct pci_controller *phb)
{
struct eeh_pe *pe;
/* Allocate PHB PE */
pe = eeh_pe_alloc(phb, EEH_PE_PHB);
if (!pe) {
pr_err("%s: out of memory!\n", __func__);
return -ENOMEM;
}
/* Put it into the list */
list_add_tail(&pe->child, &eeh_phb_pe);
pr_debug("EEH: Add PE for PHB#%x\n", phb->global_number);
return 0;
}
/**
* eeh_wait_state - Wait for PE state
* @pe: EEH PE
* @max_wait: maximal period in millisecond
*
* Wait for the state of associated PE. It might take some time
* to retrieve the PE's state.
*/
int eeh_wait_state(struct eeh_pe *pe, int max_wait)
{
int ret;
int mwait;
/*
* According to PAPR, the state of PE might be temporarily
* unavailable. Under the circumstance, we have to wait
* for indicated time determined by firmware. The maximal
* wait time is 5 minutes, which is acquired from the original
* EEH implementation. Also, the original implementation
* also defined the minimal wait time as 1 second.
*/
#define EEH_STATE_MIN_WAIT_TIME (1000)
#define EEH_STATE_MAX_WAIT_TIME (300 * 1000)
while (1) {
ret = eeh_ops->get_state(pe, &mwait);
if (ret != EEH_STATE_UNAVAILABLE)
return ret;
if (max_wait <= 0) {
pr_warn("%s: Timeout when getting PE's state (%d)\n",
__func__, max_wait);
return EEH_STATE_NOT_SUPPORT;
}
if (mwait < EEH_STATE_MIN_WAIT_TIME) {
pr_warn("%s: Firmware returned bad wait value %d\n",
__func__, mwait);
mwait = EEH_STATE_MIN_WAIT_TIME;
} else if (mwait > EEH_STATE_MAX_WAIT_TIME) {
pr_warn("%s: Firmware returned too long wait value %d\n",
__func__, mwait);
mwait = EEH_STATE_MAX_WAIT_TIME;
}
msleep(min(mwait, max_wait));
max_wait -= mwait;
}
}
/**
* eeh_phb_pe_get - Retrieve PHB PE based on the given PHB
* @phb: PCI controller
*
* The overall PEs form hierarchy tree. The first layer of the
* hierarchy tree is composed of PHB PEs. The function is used
* to retrieve the corresponding PHB PE according to the given PHB.
*/
struct eeh_pe *eeh_phb_pe_get(struct pci_controller *phb)
{
struct eeh_pe *pe;
list_for_each_entry(pe, &eeh_phb_pe, child) {
/*
* Actually, we needn't check the type since
* the PE for PHB has been determined when that
* was created.
*/
if ((pe->type & EEH_PE_PHB) && pe->phb == phb)
return pe;
}
return NULL;
}
/**
* eeh_pe_next - Retrieve the next PE in the tree
* @pe: current PE
* @root: root PE
*
* The function is used to retrieve the next PE in the
* hierarchy PE tree.
*/
struct eeh_pe *eeh_pe_next(struct eeh_pe *pe, struct eeh_pe *root)
{
struct list_head *next = pe->child_list.next;
if (next == &pe->child_list) {
while (1) {
if (pe == root)
return NULL;
next = pe->child.next;
if (next != &pe->parent->child_list)
break;
pe = pe->parent;
}
}
return list_entry(next, struct eeh_pe, child);
}
/**
* eeh_pe_traverse - Traverse PEs in the specified PHB
* @root: root PE
* @fn: callback
* @flag: extra parameter to callback
*
* The function is used to traverse the specified PE and its
* child PEs. The traversing is to be terminated once the
* callback returns something other than NULL, or no more PEs
* to be traversed.
*/
void *eeh_pe_traverse(struct eeh_pe *root,
eeh_pe_traverse_func fn, void *flag)
{
struct eeh_pe *pe;
void *ret;
eeh_for_each_pe(root, pe) {
ret = fn(pe, flag);
if (ret) return ret;
}
return NULL;
}
/**
* eeh_pe_dev_traverse - Traverse the devices from the PE
* @root: EEH PE
* @fn: function callback
* @flag: extra parameter to callback
*
* The function is used to traverse the devices of the specified
* PE and its child PEs.
*/
void eeh_pe_dev_traverse(struct eeh_pe *root,
eeh_edev_traverse_func fn, void *flag)
{
struct eeh_pe *pe;
struct eeh_dev *edev, *tmp;
if (!root) {
pr_warn("%s: Invalid PE %p\n",
__func__, root);
return;
}
/* Traverse root PE */
eeh_for_each_pe(root, pe)
eeh_pe_for_each_dev(pe, edev, tmp)
fn(edev, flag);
}
/**
* __eeh_pe_get - Check the PE address
*
* For one particular PE, it can be identified by PE address
* or tranditional BDF address. BDF address is composed of
* Bus/Device/Function number. The extra data referred by flag
* indicates which type of address should be used.
*/
static void *__eeh_pe_get(struct eeh_pe *pe, void *flag)
{
int *target_pe = flag;
/* PHB PEs are special and should be ignored */
if (pe->type & EEH_PE_PHB)
return NULL;
if (*target_pe == pe->addr)
return pe;
return NULL;
}
/**
* eeh_pe_get - Search PE based on the given address
* @phb: PCI controller
* @pe_no: PE number
*
* Search the corresponding PE based on the specified address which
* is included in the eeh device. The function is used to check if
* the associated PE has been created against the PE address. It's
* notable that the PE address has 2 format: traditional PE address
* which is composed of PCI bus/device/function number, or unified
* PE address.
*/
struct eeh_pe *eeh_pe_get(struct pci_controller *phb, int pe_no)
{
struct eeh_pe *root = eeh_phb_pe_get(phb);
return eeh_pe_traverse(root, __eeh_pe_get, &pe_no);
}
/**
* eeh_pe_tree_insert - Add EEH device to parent PE
* @edev: EEH device
* @new_pe_parent: PE to create additional PEs under
*
* Add EEH device to the PE in edev->pe_config_addr. If a PE already
* exists with that address then @edev is added to that PE. Otherwise
* a new PE is created and inserted into the PE tree as a child of
* @new_pe_parent.
*
* If @new_pe_parent is NULL then the new PE will be inserted under
* directly under the PHB.
*/
int eeh_pe_tree_insert(struct eeh_dev *edev, struct eeh_pe *new_pe_parent)
{
struct pci_controller *hose = edev->controller;
struct eeh_pe *pe, *parent;
/*
* Search the PE has been existing or not according
* to the PE address. If that has been existing, the
* PE should be composed of PCI bus and its subordinate
* components.
*/
pe = eeh_pe_get(hose, edev->pe_config_addr);
if (pe) {
if (pe->type & EEH_PE_INVALID) {
list_add_tail(&edev->entry, &pe->edevs);
edev->pe = pe;
/*
* We're running to here because of PCI hotplug caused by
* EEH recovery. We need clear EEH_PE_INVALID until the top.
*/
parent = pe;
while (parent) {
if (!(parent->type & EEH_PE_INVALID))
break;
parent->type &= ~EEH_PE_INVALID;
parent = parent->parent;
}
eeh_edev_dbg(edev, "Added to existing PE (parent: PE#%x)\n",
pe->parent->addr);
} else {
/* Mark the PE as type of PCI bus */
pe->type = EEH_PE_BUS;
edev->pe = pe;
/* Put the edev to PE */
list_add_tail(&edev->entry, &pe->edevs);
eeh_edev_dbg(edev, "Added to bus PE\n");
}
return 0;
}
/* Create a new EEH PE */
if (edev->physfn)
pe = eeh_pe_alloc(hose, EEH_PE_VF);
else
pe = eeh_pe_alloc(hose, EEH_PE_DEVICE);
if (!pe) {
pr_err("%s: out of memory!\n", __func__);
return -ENOMEM;
}
pe->addr = edev->pe_config_addr;
/*
* Put the new EEH PE into hierarchy tree. If the parent
* can't be found, the newly created PE will be attached
* to PHB directly. Otherwise, we have to associate the
* PE with its parent.
*/
if (!new_pe_parent) {
new_pe_parent = eeh_phb_pe_get(hose);
if (!new_pe_parent) {
pr_err("%s: No PHB PE is found (PHB Domain=%d)\n",
__func__, hose->global_number);
edev->pe = NULL;
kfree(pe);
return -EEXIST;
}
}
/* link new PE into the tree */
pe->parent = new_pe_parent;
list_add_tail(&pe->child, &new_pe_parent->child_list);
/*
* Put the newly created PE into the child list and
* link the EEH device accordingly.
*/
list_add_tail(&edev->entry, &pe->edevs);
edev->pe = pe;
eeh_edev_dbg(edev, "Added to new (parent: PE#%x)\n",
new_pe_parent->addr);
return 0;
}
/**
* eeh_pe_tree_remove - Remove one EEH device from the associated PE
* @edev: EEH device
*
* The PE hierarchy tree might be changed when doing PCI hotplug.
* Also, the PCI devices or buses could be removed from the system
* during EEH recovery. So we have to call the function remove the
* corresponding PE accordingly if necessary.
*/
int eeh_pe_tree_remove(struct eeh_dev *edev)
{
struct eeh_pe *pe, *parent, *child;
bool keep, recover;
int cnt;
pe = eeh_dev_to_pe(edev);
if (!pe) {
eeh_edev_dbg(edev, "No PE found for device.\n");
return -EEXIST;
}
/* Remove the EEH device */
edev->pe = NULL;
list_del(&edev->entry);
/*
* Check if the parent PE includes any EEH devices.
* If not, we should delete that. Also, we should
* delete the parent PE if it doesn't have associated
* child PEs and EEH devices.
*/
while (1) {
parent = pe->parent;
/* PHB PEs should never be removed */
if (pe->type & EEH_PE_PHB)
break;
/*
* XXX: KEEP is set while resetting a PE. I don't think it's
* ever set without RECOVERING also being set. I could
* be wrong though so catch that with a WARN.
*/
keep = !!(pe->state & EEH_PE_KEEP);
recover = !!(pe->state & EEH_PE_RECOVERING);
WARN_ON(keep && !recover);
if (!keep && !recover) {
if (list_empty(&pe->edevs) &&
list_empty(&pe->child_list)) {
list_del(&pe->child);
kfree(pe);
} else {
break;
}
} else {
/*
* Mark the PE as invalid. At the end of the recovery
* process any invalid PEs will be garbage collected.
*
* We need to delay the free()ing of them since we can
* remove edev's while traversing the PE tree which
* might trigger the removal of a PE and we can't
* deal with that (yet).
*/
if (list_empty(&pe->edevs)) {
cnt = 0;
list_for_each_entry(child, &pe->child_list, child) {
if (!(child->type & EEH_PE_INVALID)) {
cnt++;
break;
}
}
if (!cnt)
pe->type |= EEH_PE_INVALID;
else
break;
}
}
pe = parent;
}
return 0;
}
/**
* eeh_pe_update_time_stamp - Update PE's frozen time stamp
* @pe: EEH PE
*
* We have time stamp for each PE to trace its time of getting
* frozen in last hour. The function should be called to update
* the time stamp on first error of the specific PE. On the other
* handle, we needn't account for errors happened in last hour.
*/
void eeh_pe_update_time_stamp(struct eeh_pe *pe)
{
time64_t tstamp;
if (!pe) return;
if (pe->freeze_count <= 0) {
pe->freeze_count = 0;
pe->tstamp = ktime_get_seconds();
} else {
tstamp = ktime_get_seconds();
if (tstamp - pe->tstamp > 3600) {
pe->tstamp = tstamp;
pe->freeze_count = 0;
}
}
}
/**
* eeh_pe_state_mark - Mark specified state for PE and its associated device
* @pe: EEH PE
*
* EEH error affects the current PE and its child PEs. The function
* is used to mark appropriate state for the affected PEs and the
* associated devices.
*/
void eeh_pe_state_mark(struct eeh_pe *root, int state)
{
struct eeh_pe *pe;
eeh_for_each_pe(root, pe)
if (!(pe->state & EEH_PE_REMOVED))
pe->state |= state;
}
EXPORT_SYMBOL_GPL(eeh_pe_state_mark);
/**
* eeh_pe_mark_isolated
* @pe: EEH PE
*
* Record that a PE has been isolated by marking the PE and it's children as
* EEH_PE_ISOLATED (and EEH_PE_CFG_BLOCKED, if required) and their PCI devices
* as pci_channel_io_frozen.
*/
void eeh_pe_mark_isolated(struct eeh_pe *root)
{
struct eeh_pe *pe;
struct eeh_dev *edev;
struct pci_dev *pdev;
eeh_pe_state_mark(root, EEH_PE_ISOLATED);
eeh_for_each_pe(root, pe) {
list_for_each_entry(edev, &pe->edevs, entry) {
pdev = eeh_dev_to_pci_dev(edev);
if (pdev)
pdev->error_state = pci_channel_io_frozen;
}
/* Block PCI config access if required */
if (pe->state & EEH_PE_CFG_RESTRICTED)
pe->state |= EEH_PE_CFG_BLOCKED;
}
}
EXPORT_SYMBOL_GPL(eeh_pe_mark_isolated);
static void __eeh_pe_dev_mode_mark(struct eeh_dev *edev, void *flag)
{
int mode = *((int *)flag);
edev->mode |= mode;
}
/**
* eeh_pe_dev_state_mark - Mark state for all device under the PE
* @pe: EEH PE
*
* Mark specific state for all child devices of the PE.
*/
void eeh_pe_dev_mode_mark(struct eeh_pe *pe, int mode)
{
eeh_pe_dev_traverse(pe, __eeh_pe_dev_mode_mark, &mode);
}
/**
* eeh_pe_state_clear - Clear state for the PE
* @data: EEH PE
* @state: state
* @include_passed: include passed-through devices?
*
* The function is used to clear the indicated state from the
* given PE. Besides, we also clear the check count of the PE
* as well.
*/
void eeh_pe_state_clear(struct eeh_pe *root, int state, bool include_passed)
{
struct eeh_pe *pe;
struct eeh_dev *edev, *tmp;
struct pci_dev *pdev;
eeh_for_each_pe(root, pe) {
/* Keep the state of permanently removed PE intact */
if (pe->state & EEH_PE_REMOVED)
continue;
if (!include_passed && eeh_pe_passed(pe))
continue;
pe->state &= ~state;
/*
* Special treatment on clearing isolated state. Clear
* check count since last isolation and put all affected
* devices to normal state.
*/
if (!(state & EEH_PE_ISOLATED))
continue;
pe->check_count = 0;
eeh_pe_for_each_dev(pe, edev, tmp) {
pdev = eeh_dev_to_pci_dev(edev);
if (!pdev)
continue;
pdev->error_state = pci_channel_io_normal;
}
/* Unblock PCI config access if required */
if (pe->state & EEH_PE_CFG_RESTRICTED)
pe->state &= ~EEH_PE_CFG_BLOCKED;
}
}
/*
* Some PCI bridges (e.g. PLX bridges) have primary/secondary
* buses assigned explicitly by firmware, and we probably have
* lost that after reset. So we have to delay the check until
* the PCI-CFG registers have been restored for the parent
* bridge.
*
* Don't use normal PCI-CFG accessors, which probably has been
* blocked on normal path during the stage. So we need utilize
* eeh operations, which is always permitted.
*/
static void eeh_bridge_check_link(struct eeh_dev *edev)
{
int cap;
uint32_t val;
int timeout = 0;
/*
* We only check root port and downstream ports of
* PCIe switches
*/
if (!(edev->mode & (EEH_DEV_ROOT_PORT | EEH_DEV_DS_PORT)))
return;
eeh_edev_dbg(edev, "Checking PCIe link...\n");
/* Check slot status */
cap = edev->pcie_cap;
eeh_ops->read_config(edev, cap + PCI_EXP_SLTSTA, 2, &val);
if (!(val & PCI_EXP_SLTSTA_PDS)) {
eeh_edev_dbg(edev, "No card in the slot (0x%04x) !\n", val);
return;
}
/* Check power status if we have the capability */
eeh_ops->read_config(edev, cap + PCI_EXP_SLTCAP, 2, &val);
if (val & PCI_EXP_SLTCAP_PCP) {
eeh_ops->read_config(edev, cap + PCI_EXP_SLTCTL, 2, &val);
if (val & PCI_EXP_SLTCTL_PCC) {
eeh_edev_dbg(edev, "In power-off state, power it on ...\n");
val &= ~(PCI_EXP_SLTCTL_PCC | PCI_EXP_SLTCTL_PIC);
val |= (0x0100 & PCI_EXP_SLTCTL_PIC);
eeh_ops->write_config(edev, cap + PCI_EXP_SLTCTL, 2, val);
msleep(2 * 1000);
}
}
/* Enable link */
eeh_ops->read_config(edev, cap + PCI_EXP_LNKCTL, 2, &val);
val &= ~PCI_EXP_LNKCTL_LD;
eeh_ops->write_config(edev, cap + PCI_EXP_LNKCTL, 2, val);
/* Check link */
if (!edev->pdev->link_active_reporting) {
eeh_edev_dbg(edev, "No link reporting capability\n");
msleep(1000);
return;
}
/* Wait the link is up until timeout (5s) */
timeout = 0;
while (timeout < 5000) {
msleep(20);
timeout += 20;
eeh_ops->read_config(edev, cap + PCI_EXP_LNKSTA, 2, &val);
if (val & PCI_EXP_LNKSTA_DLLLA)
break;
}
if (val & PCI_EXP_LNKSTA_DLLLA)
eeh_edev_dbg(edev, "Link up (%s)\n",
(val & PCI_EXP_LNKSTA_CLS_2_5GB) ? "2.5GB" : "5GB");
else
eeh_edev_dbg(edev, "Link not ready (0x%04x)\n", val);
}
#define BYTE_SWAP(OFF) (8*((OFF)/4)+3-(OFF))
#define SAVED_BYTE(OFF) (((u8 *)(edev->config_space))[BYTE_SWAP(OFF)])
static void eeh_restore_bridge_bars(struct eeh_dev *edev)
{
int i;
/*
* Device BARs: 0x10 - 0x18
* Bus numbers and windows: 0x18 - 0x30
*/
for (i = 4; i < 13; i++)
eeh_ops->write_config(edev, i*4, 4, edev->config_space[i]);
/* Rom: 0x38 */
eeh_ops->write_config(edev, 14*4, 4, edev->config_space[14]);
/* Cache line & Latency timer: 0xC 0xD */
eeh_ops->write_config(edev, PCI_CACHE_LINE_SIZE, 1,
SAVED_BYTE(PCI_CACHE_LINE_SIZE));
eeh_ops->write_config(edev, PCI_LATENCY_TIMER, 1,
SAVED_BYTE(PCI_LATENCY_TIMER));
/* Max latency, min grant, interrupt ping and line: 0x3C */
eeh_ops->write_config(edev, 15*4, 4, edev->config_space[15]);
/* PCI Command: 0x4 */
eeh_ops->write_config(edev, PCI_COMMAND, 4, edev->config_space[1] |
PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER);
/* Check the PCIe link is ready */
eeh_bridge_check_link(edev);
}
static void eeh_restore_device_bars(struct eeh_dev *edev)
{
int i;
u32 cmd;
for (i = 4; i < 10; i++)
eeh_ops->write_config(edev, i*4, 4, edev->config_space[i]);
/* 12 == Expansion ROM Address */
eeh_ops->write_config(edev, 12*4, 4, edev->config_space[12]);
eeh_ops->write_config(edev, PCI_CACHE_LINE_SIZE, 1,
SAVED_BYTE(PCI_CACHE_LINE_SIZE));
eeh_ops->write_config(edev, PCI_LATENCY_TIMER, 1,
SAVED_BYTE(PCI_LATENCY_TIMER));
/* max latency, min grant, interrupt pin and line */
eeh_ops->write_config(edev, 15*4, 4, edev->config_space[15]);
/*
* Restore PERR & SERR bits, some devices require it,
* don't touch the other command bits
*/
eeh_ops->read_config(edev, PCI_COMMAND, 4, &cmd);
if (edev->config_space[1] & PCI_COMMAND_PARITY)
cmd |= PCI_COMMAND_PARITY;
else
cmd &= ~PCI_COMMAND_PARITY;
if (edev->config_space[1] & PCI_COMMAND_SERR)
cmd |= PCI_COMMAND_SERR;
else
cmd &= ~PCI_COMMAND_SERR;
eeh_ops->write_config(edev, PCI_COMMAND, 4, cmd);
}
/**
* eeh_restore_one_device_bars - Restore the Base Address Registers for one device
* @data: EEH device
* @flag: Unused
*
* Loads the PCI configuration space base address registers,
* the expansion ROM base address, the latency timer, and etc.
* from the saved values in the device node.
*/
static void eeh_restore_one_device_bars(struct eeh_dev *edev, void *flag)
{
/* Do special restore for bridges */
if (edev->mode & EEH_DEV_BRIDGE)
eeh_restore_bridge_bars(edev);
else
eeh_restore_device_bars(edev);
if (eeh_ops->restore_config)
eeh_ops->restore_config(edev);
}
/**
* eeh_pe_restore_bars - Restore the PCI config space info
* @pe: EEH PE
*
* This routine performs a recursive walk to the children
* of this device as well.
*/
void eeh_pe_restore_bars(struct eeh_pe *pe)
{
/*
* We needn't take the EEH lock since eeh_pe_dev_traverse()
* will take that.
*/
eeh_pe_dev_traverse(pe, eeh_restore_one_device_bars, NULL);
}
/**
* eeh_pe_loc_get - Retrieve location code binding to the given PE
* @pe: EEH PE
*
* Retrieve the location code of the given PE. If the primary PE bus
* is root bus, we will grab location code from PHB device tree node
* or root port. Otherwise, the upstream bridge's device tree node
* of the primary PE bus will be checked for the location code.
*/
const char *eeh_pe_loc_get(struct eeh_pe *pe)
{
struct pci_bus *bus = eeh_pe_bus_get(pe);
struct device_node *dn;
const char *loc = NULL;
while (bus) {
dn = pci_bus_to_OF_node(bus);
if (!dn) {
bus = bus->parent;
continue;
}
if (pci_is_root_bus(bus))
loc = of_get_property(dn, "ibm,io-base-loc-code", NULL);
else
loc = of_get_property(dn, "ibm,slot-location-code",
NULL);
if (loc)
return loc;
bus = bus->parent;
}
return "N/A";
}
/**
* eeh_pe_bus_get - Retrieve PCI bus according to the given PE
* @pe: EEH PE
*
* Retrieve the PCI bus according to the given PE. Basically,
* there're 3 types of PEs: PHB/Bus/Device. For PHB PE, the
* primary PCI bus will be retrieved. The parent bus will be
* returned for BUS PE. However, we don't have associated PCI
* bus for DEVICE PE.
*/
struct pci_bus *eeh_pe_bus_get(struct eeh_pe *pe)
{
struct eeh_dev *edev;
struct pci_dev *pdev;
if (pe->type & EEH_PE_PHB)
return pe->phb->bus;
/* The primary bus might be cached during probe time */
if (pe->state & EEH_PE_PRI_BUS)
return pe->bus;
/* Retrieve the parent PCI bus of first (top) PCI device */
edev = list_first_entry_or_null(&pe->edevs, struct eeh_dev, entry);
pdev = eeh_dev_to_pci_dev(edev);
if (pdev)
return pdev->bus;
return NULL;
}
| linux-master | arch/powerpc/kernel/eeh_pe.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/* align.c - handle alignment exceptions for the Power PC.
*
* Copyright (c) 1996 Paul Mackerras <[email protected]>
* Copyright (c) 1998-1999 TiVo, Inc.
* PowerPC 403GCX modifications.
* Copyright (c) 1999 Grant Erickson <[email protected]>
* PowerPC 403GCX/405GP modifications.
* Copyright (c) 2001-2002 PPC64 team, IBM Corp
* 64-bit and Power4 support
* Copyright (c) 2005 Benjamin Herrenschmidt, IBM Corp
* <[email protected]>
* Merge ppc32 and ppc64 implementations
*/
#include <linux/kernel.h>
#include <linux/mm.h>
#include <asm/processor.h>
#include <linux/uaccess.h>
#include <asm/cache.h>
#include <asm/cputable.h>
#include <asm/emulated_ops.h>
#include <asm/switch_to.h>
#include <asm/disassemble.h>
#include <asm/cpu_has_feature.h>
#include <asm/sstep.h>
#include <asm/inst.h>
struct aligninfo {
unsigned char len;
unsigned char flags;
};
#define INVALID { 0, 0 }
/* Bits in the flags field */
#define LD 0 /* load */
#define ST 1 /* store */
#define SE 2 /* sign-extend value, or FP ld/st as word */
#define SW 0x20 /* byte swap */
#define E4 0x40 /* SPE endianness is word */
#define E8 0x80 /* SPE endianness is double word */
#ifdef CONFIG_SPE
static struct aligninfo spe_aligninfo[32] = {
{ 8, LD+E8 }, /* 0 00 00: evldd[x] */
{ 8, LD+E4 }, /* 0 00 01: evldw[x] */
{ 8, LD }, /* 0 00 10: evldh[x] */
INVALID, /* 0 00 11 */
{ 2, LD }, /* 0 01 00: evlhhesplat[x] */
INVALID, /* 0 01 01 */
{ 2, LD }, /* 0 01 10: evlhhousplat[x] */
{ 2, LD+SE }, /* 0 01 11: evlhhossplat[x] */
{ 4, LD }, /* 0 10 00: evlwhe[x] */
INVALID, /* 0 10 01 */
{ 4, LD }, /* 0 10 10: evlwhou[x] */
{ 4, LD+SE }, /* 0 10 11: evlwhos[x] */
{ 4, LD+E4 }, /* 0 11 00: evlwwsplat[x] */
INVALID, /* 0 11 01 */
{ 4, LD }, /* 0 11 10: evlwhsplat[x] */
INVALID, /* 0 11 11 */
{ 8, ST+E8 }, /* 1 00 00: evstdd[x] */
{ 8, ST+E4 }, /* 1 00 01: evstdw[x] */
{ 8, ST }, /* 1 00 10: evstdh[x] */
INVALID, /* 1 00 11 */
INVALID, /* 1 01 00 */
INVALID, /* 1 01 01 */
INVALID, /* 1 01 10 */
INVALID, /* 1 01 11 */
{ 4, ST }, /* 1 10 00: evstwhe[x] */
INVALID, /* 1 10 01 */
{ 4, ST }, /* 1 10 10: evstwho[x] */
INVALID, /* 1 10 11 */
{ 4, ST+E4 }, /* 1 11 00: evstwwe[x] */
INVALID, /* 1 11 01 */
{ 4, ST+E4 }, /* 1 11 10: evstwwo[x] */
INVALID, /* 1 11 11 */
};
#define EVLDD 0x00
#define EVLDW 0x01
#define EVLDH 0x02
#define EVLHHESPLAT 0x04
#define EVLHHOUSPLAT 0x06
#define EVLHHOSSPLAT 0x07
#define EVLWHE 0x08
#define EVLWHOU 0x0A
#define EVLWHOS 0x0B
#define EVLWWSPLAT 0x0C
#define EVLWHSPLAT 0x0E
#define EVSTDD 0x10
#define EVSTDW 0x11
#define EVSTDH 0x12
#define EVSTWHE 0x18
#define EVSTWHO 0x1A
#define EVSTWWE 0x1C
#define EVSTWWO 0x1E
/*
* Emulate SPE loads and stores.
* Only Book-E has these instructions, and it does true little-endian,
* so we don't need the address swizzling.
*/
static int emulate_spe(struct pt_regs *regs, unsigned int reg,
ppc_inst_t ppc_instr)
{
union {
u64 ll;
u32 w[2];
u16 h[4];
u8 v[8];
} data, temp;
unsigned char __user *p, *addr;
unsigned long *evr = ¤t->thread.evr[reg];
unsigned int nb, flags, instr;
instr = ppc_inst_val(ppc_instr);
instr = (instr >> 1) & 0x1f;
/* DAR has the operand effective address */
addr = (unsigned char __user *)regs->dar;
nb = spe_aligninfo[instr].len;
flags = spe_aligninfo[instr].flags;
/* userland only */
if (unlikely(!user_mode(regs)))
return 0;
flush_spe_to_thread(current);
/* If we are loading, get the data from user space, else
* get it from register values
*/
if (flags & ST) {
data.ll = 0;
switch (instr) {
case EVSTDD:
case EVSTDW:
case EVSTDH:
data.w[0] = *evr;
data.w[1] = regs->gpr[reg];
break;
case EVSTWHE:
data.h[2] = *evr >> 16;
data.h[3] = regs->gpr[reg] >> 16;
break;
case EVSTWHO:
data.h[2] = *evr & 0xffff;
data.h[3] = regs->gpr[reg] & 0xffff;
break;
case EVSTWWE:
data.w[1] = *evr;
break;
case EVSTWWO:
data.w[1] = regs->gpr[reg];
break;
default:
return -EINVAL;
}
} else {
temp.ll = data.ll = 0;
p = addr;
if (!user_read_access_begin(addr, nb))
return -EFAULT;
switch (nb) {
case 8:
unsafe_get_user(temp.v[0], p++, Efault_read);
unsafe_get_user(temp.v[1], p++, Efault_read);
unsafe_get_user(temp.v[2], p++, Efault_read);
unsafe_get_user(temp.v[3], p++, Efault_read);
fallthrough;
case 4:
unsafe_get_user(temp.v[4], p++, Efault_read);
unsafe_get_user(temp.v[5], p++, Efault_read);
fallthrough;
case 2:
unsafe_get_user(temp.v[6], p++, Efault_read);
unsafe_get_user(temp.v[7], p++, Efault_read);
}
user_read_access_end();
switch (instr) {
case EVLDD:
case EVLDW:
case EVLDH:
data.ll = temp.ll;
break;
case EVLHHESPLAT:
data.h[0] = temp.h[3];
data.h[2] = temp.h[3];
break;
case EVLHHOUSPLAT:
case EVLHHOSSPLAT:
data.h[1] = temp.h[3];
data.h[3] = temp.h[3];
break;
case EVLWHE:
data.h[0] = temp.h[2];
data.h[2] = temp.h[3];
break;
case EVLWHOU:
case EVLWHOS:
data.h[1] = temp.h[2];
data.h[3] = temp.h[3];
break;
case EVLWWSPLAT:
data.w[0] = temp.w[1];
data.w[1] = temp.w[1];
break;
case EVLWHSPLAT:
data.h[0] = temp.h[2];
data.h[1] = temp.h[2];
data.h[2] = temp.h[3];
data.h[3] = temp.h[3];
break;
default:
return -EINVAL;
}
}
if (flags & SW) {
switch (flags & 0xf0) {
case E8:
data.ll = swab64(data.ll);
break;
case E4:
data.w[0] = swab32(data.w[0]);
data.w[1] = swab32(data.w[1]);
break;
/* Its half word endian */
default:
data.h[0] = swab16(data.h[0]);
data.h[1] = swab16(data.h[1]);
data.h[2] = swab16(data.h[2]);
data.h[3] = swab16(data.h[3]);
break;
}
}
if (flags & SE) {
data.w[0] = (s16)data.h[1];
data.w[1] = (s16)data.h[3];
}
/* Store result to memory or update registers */
if (flags & ST) {
p = addr;
if (!user_write_access_begin(addr, nb))
return -EFAULT;
switch (nb) {
case 8:
unsafe_put_user(data.v[0], p++, Efault_write);
unsafe_put_user(data.v[1], p++, Efault_write);
unsafe_put_user(data.v[2], p++, Efault_write);
unsafe_put_user(data.v[3], p++, Efault_write);
fallthrough;
case 4:
unsafe_put_user(data.v[4], p++, Efault_write);
unsafe_put_user(data.v[5], p++, Efault_write);
fallthrough;
case 2:
unsafe_put_user(data.v[6], p++, Efault_write);
unsafe_put_user(data.v[7], p++, Efault_write);
}
user_write_access_end();
} else {
*evr = data.w[0];
regs->gpr[reg] = data.w[1];
}
return 1;
Efault_read:
user_read_access_end();
return -EFAULT;
Efault_write:
user_write_access_end();
return -EFAULT;
}
#endif /* CONFIG_SPE */
/*
* Called on alignment exception. Attempts to fixup
*
* Return 1 on success
* Return 0 if unable to handle the interrupt
* Return -EFAULT if data address is bad
* Other negative return values indicate that the instruction can't
* be emulated, and the process should be given a SIGBUS.
*/
int fix_alignment(struct pt_regs *regs)
{
ppc_inst_t instr;
struct instruction_op op;
int r, type;
if (is_kernel_addr(regs->nip))
r = copy_inst_from_kernel_nofault(&instr, (void *)regs->nip);
else
r = __get_user_instr(instr, (void __user *)regs->nip);
if (unlikely(r))
return -EFAULT;
if ((regs->msr & MSR_LE) != (MSR_KERNEL & MSR_LE)) {
/* We don't handle PPC little-endian any more... */
if (cpu_has_feature(CPU_FTR_PPC_LE))
return -EIO;
instr = ppc_inst_swab(instr);
}
#ifdef CONFIG_SPE
if (ppc_inst_primary_opcode(instr) == 0x4) {
int reg = (ppc_inst_val(instr) >> 21) & 0x1f;
PPC_WARN_ALIGNMENT(spe, regs);
return emulate_spe(regs, reg, instr);
}
#endif
/*
* ISA 3.0 (such as P9) copy, copy_first, paste and paste_last alignment
* check.
*
* Send a SIGBUS to the process that caused the fault.
*
* We do not emulate these because paste may contain additional metadata
* when pasting to a co-processor. Furthermore, paste_last is the
* synchronisation point for preceding copy/paste sequences.
*/
if ((ppc_inst_val(instr) & 0xfc0006fe) == (PPC_INST_COPY & 0xfc0006fe))
return -EIO;
r = analyse_instr(&op, regs, instr);
if (r < 0)
return -EINVAL;
type = GETTYPE(op.type);
if (!OP_IS_LOAD_STORE(type)) {
if (op.type != CACHEOP + DCBZ)
return -EINVAL;
PPC_WARN_ALIGNMENT(dcbz, regs);
WARN_ON_ONCE(!user_mode(regs));
r = emulate_dcbz(op.ea, regs);
} else {
if (type == LARX || type == STCX)
return -EIO;
PPC_WARN_ALIGNMENT(unaligned, regs);
r = emulate_loadstore(regs, &op);
}
if (!r)
return 1;
return r;
}
| linux-master | arch/powerpc/kernel/align.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* c 2001 PPC 64 Team, IBM Corp
*/
#include <linux/smp.h>
#include <linux/export.h>
#include <linux/memblock.h>
#include <linux/sched/task.h>
#include <linux/numa.h>
#include <linux/pgtable.h>
#include <asm/lppaca.h>
#include <asm/paca.h>
#include <asm/sections.h>
#include <asm/kexec.h>
#include <asm/svm.h>
#include <asm/ultravisor.h>
#include "setup.h"
#ifndef CONFIG_SMP
#define boot_cpuid 0
#endif
static void *__init alloc_paca_data(unsigned long size, unsigned long align,
unsigned long limit, int cpu)
{
void *ptr;
int nid;
/*
* boot_cpuid paca is allocated very early before cpu_to_node is up.
* Set bottom-up mode, because the boot CPU should be on node-0,
* which will put its paca in the right place.
*/
if (cpu == boot_cpuid) {
nid = NUMA_NO_NODE;
memblock_set_bottom_up(true);
} else {
nid = early_cpu_to_node(cpu);
}
ptr = memblock_alloc_try_nid(size, align, MEMBLOCK_LOW_LIMIT,
limit, nid);
if (!ptr)
panic("cannot allocate paca data");
if (cpu == boot_cpuid)
memblock_set_bottom_up(false);
return ptr;
}
#ifdef CONFIG_PPC_PSERIES
#define LPPACA_SIZE 0x400
static void *__init alloc_shared_lppaca(unsigned long size, unsigned long limit,
int cpu)
{
size_t shared_lppaca_total_size = PAGE_ALIGN(nr_cpu_ids * LPPACA_SIZE);
static unsigned long shared_lppaca_size;
static void *shared_lppaca;
void *ptr;
if (!shared_lppaca) {
memblock_set_bottom_up(true);
/*
* See Documentation/powerpc/ultravisor.rst for more details.
*
* UV/HV data sharing is in PAGE_SIZE granularity. In order to
* minimize the number of pages shared, align the allocation to
* PAGE_SIZE.
*/
shared_lppaca =
memblock_alloc_try_nid(shared_lppaca_total_size,
PAGE_SIZE, MEMBLOCK_LOW_LIMIT,
limit, NUMA_NO_NODE);
if (!shared_lppaca)
panic("cannot allocate shared data");
memblock_set_bottom_up(false);
uv_share_page(PHYS_PFN(__pa(shared_lppaca)),
shared_lppaca_total_size >> PAGE_SHIFT);
}
ptr = shared_lppaca + shared_lppaca_size;
shared_lppaca_size += size;
/*
* This is very early in boot, so no harm done if the kernel crashes at
* this point.
*/
BUG_ON(shared_lppaca_size > shared_lppaca_total_size);
return ptr;
}
/*
* See asm/lppaca.h for more detail.
*
* lppaca structures must must be 1kB in size, L1 cache line aligned,
* and not cross 4kB boundary. A 1kB size and 1kB alignment will satisfy
* these requirements.
*/
static inline void init_lppaca(struct lppaca *lppaca)
{
BUILD_BUG_ON(sizeof(struct lppaca) != 640);
*lppaca = (struct lppaca) {
.desc = cpu_to_be32(0xd397d781), /* "LpPa" */
.size = cpu_to_be16(LPPACA_SIZE),
.fpregs_in_use = 1,
.slb_count = cpu_to_be16(64),
.vmxregs_in_use = 0,
.page_ins = 0, };
};
static struct lppaca * __init new_lppaca(int cpu, unsigned long limit)
{
struct lppaca *lp;
BUILD_BUG_ON(sizeof(struct lppaca) > LPPACA_SIZE);
if (early_cpu_has_feature(CPU_FTR_HVMODE))
return NULL;
if (is_secure_guest())
lp = alloc_shared_lppaca(LPPACA_SIZE, limit, cpu);
else
lp = alloc_paca_data(LPPACA_SIZE, 0x400, limit, cpu);
init_lppaca(lp);
return lp;
}
#endif /* CONFIG_PPC_PSERIES */
#ifdef CONFIG_PPC_64S_HASH_MMU
/*
* 3 persistent SLBs are allocated here. The buffer will be zero
* initially, hence will all be invaild until we actually write them.
*
* If you make the number of persistent SLB entries dynamic, please also
* update PR KVM to flush and restore them accordingly.
*/
static struct slb_shadow * __init new_slb_shadow(int cpu, unsigned long limit)
{
struct slb_shadow *s;
if (cpu != boot_cpuid) {
/*
* Boot CPU comes here before early_radix_enabled
* is parsed (e.g., for disable_radix). So allocate
* always and this will be fixed up in free_unused_pacas.
*/
if (early_radix_enabled())
return NULL;
}
s = alloc_paca_data(sizeof(*s), L1_CACHE_BYTES, limit, cpu);
s->persistent = cpu_to_be32(SLB_NUM_BOLTED);
s->buffer_length = cpu_to_be32(sizeof(*s));
return s;
}
#endif /* CONFIG_PPC_64S_HASH_MMU */
/* The Paca is an array with one entry per processor. Each contains an
* lppaca, which contains the information shared between the
* hypervisor and Linux.
* On systems with hardware multi-threading, there are two threads
* per processor. The Paca array must contain an entry for each thread.
* The VPD Areas will give a max logical processors = 2 * max physical
* processors. The processor VPD array needs one entry per physical
* processor (not thread).
*/
struct paca_struct **paca_ptrs __read_mostly;
EXPORT_SYMBOL(paca_ptrs);
void __init initialise_paca(struct paca_struct *new_paca, int cpu)
{
#ifdef CONFIG_PPC_PSERIES
new_paca->lppaca_ptr = NULL;
#endif
#ifdef CONFIG_PPC_BOOK3E_64
new_paca->kernel_pgd = swapper_pg_dir;
#endif
new_paca->lock_token = 0x8000;
new_paca->paca_index = cpu;
#ifndef CONFIG_PPC_KERNEL_PCREL
new_paca->kernel_toc = kernel_toc_addr();
#endif
new_paca->kernelbase = (unsigned long) _stext;
/* Only set MSR:IR/DR when MMU is initialized */
new_paca->kernel_msr = MSR_KERNEL & ~(MSR_IR | MSR_DR);
new_paca->hw_cpu_id = 0xffff;
new_paca->kexec_state = KEXEC_STATE_NONE;
new_paca->__current = &init_task;
new_paca->data_offset = 0xfeeeeeeeeeeeeeeeULL;
#ifdef CONFIG_PPC_64S_HASH_MMU
new_paca->slb_shadow_ptr = NULL;
#endif
#ifdef CONFIG_PPC_BOOK3E_64
/* For now -- if we have threads this will be adjusted later */
new_paca->tcd_ptr = &new_paca->tcd;
#endif
}
/* Put the paca pointer into r13 and SPRG_PACA */
void setup_paca(struct paca_struct *new_paca)
{
/* Setup r13 */
local_paca = new_paca;
#ifdef CONFIG_PPC_BOOK3E_64
/* On Book3E, initialize the TLB miss exception frames */
mtspr(SPRN_SPRG_TLB_EXFRAME, local_paca->extlb);
#else
/*
* In HV mode, we setup both HPACA and PACA to avoid problems
* if we do a GET_PACA() before the feature fixups have been
* applied.
*
* Normally you should test against CPU_FTR_HVMODE, but CPU features
* are not yet set up when we first reach here.
*/
if (mfmsr() & MSR_HV)
mtspr(SPRN_SPRG_HPACA, local_paca);
#endif
mtspr(SPRN_SPRG_PACA, local_paca);
}
static int __initdata paca_nr_cpu_ids;
static int __initdata paca_ptrs_size;
static int __initdata paca_struct_size;
void __init allocate_paca_ptrs(void)
{
paca_nr_cpu_ids = nr_cpu_ids;
paca_ptrs_size = sizeof(struct paca_struct *) * nr_cpu_ids;
paca_ptrs = memblock_alloc_raw(paca_ptrs_size, SMP_CACHE_BYTES);
if (!paca_ptrs)
panic("Failed to allocate %d bytes for paca pointers\n",
paca_ptrs_size);
memset(paca_ptrs, 0x88, paca_ptrs_size);
}
void __init allocate_paca(int cpu)
{
u64 limit;
struct paca_struct *paca;
BUG_ON(cpu >= paca_nr_cpu_ids);
#ifdef CONFIG_PPC_BOOK3S_64
/*
* We access pacas in real mode, and cannot take SLB faults
* on them when in virtual mode, so allocate them accordingly.
*/
limit = min(ppc64_bolted_size(), ppc64_rma_size);
#else
limit = ppc64_rma_size;
#endif
paca = alloc_paca_data(sizeof(struct paca_struct), L1_CACHE_BYTES,
limit, cpu);
paca_ptrs[cpu] = paca;
initialise_paca(paca, cpu);
#ifdef CONFIG_PPC_PSERIES
paca->lppaca_ptr = new_lppaca(cpu, limit);
#endif
#ifdef CONFIG_PPC_64S_HASH_MMU
paca->slb_shadow_ptr = new_slb_shadow(cpu, limit);
#endif
paca_struct_size += sizeof(struct paca_struct);
}
void __init free_unused_pacas(void)
{
int new_ptrs_size;
new_ptrs_size = sizeof(struct paca_struct *) * nr_cpu_ids;
if (new_ptrs_size < paca_ptrs_size)
memblock_phys_free(__pa(paca_ptrs) + new_ptrs_size,
paca_ptrs_size - new_ptrs_size);
paca_nr_cpu_ids = nr_cpu_ids;
paca_ptrs_size = new_ptrs_size;
#ifdef CONFIG_PPC_64S_HASH_MMU
if (early_radix_enabled()) {
/* Ugly fixup, see new_slb_shadow() */
memblock_phys_free(__pa(paca_ptrs[boot_cpuid]->slb_shadow_ptr),
sizeof(struct slb_shadow));
paca_ptrs[boot_cpuid]->slb_shadow_ptr = NULL;
}
#endif
printk(KERN_DEBUG "Allocated %u bytes for %u pacas\n",
paca_ptrs_size + paca_struct_size, nr_cpu_ids);
}
#ifdef CONFIG_PPC_64S_HASH_MMU
void copy_mm_to_paca(struct mm_struct *mm)
{
mm_context_t *context = &mm->context;
VM_BUG_ON(!mm_ctx_slb_addr_limit(context));
memcpy(&get_paca()->mm_ctx_low_slices_psize, mm_ctx_low_slices(context),
LOW_SLICE_ARRAY_SZ);
memcpy(&get_paca()->mm_ctx_high_slices_psize, mm_ctx_high_slices(context),
TASK_SLICE_ARRAY_SZ(context));
}
#endif /* CONFIG_PPC_64S_HASH_MMU */
| linux-master | arch/powerpc/kernel/paca.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Contains routines needed to support swiotlb for ppc.
*
* Copyright (C) 2009-2010 Freescale Semiconductor, Inc.
* Author: Becky Bruce
*/
#include <linux/memblock.h>
#include <asm/machdep.h>
#include <asm/swiotlb.h>
unsigned int ppc_swiotlb_enable;
unsigned int ppc_swiotlb_flags;
void __init swiotlb_detect_4g(void)
{
if ((memblock_end_of_DRAM() - 1) > 0xffffffff)
ppc_swiotlb_enable = 1;
}
static int __init check_swiotlb_enabled(void)
{
if (ppc_swiotlb_enable)
swiotlb_print_info();
else
swiotlb_exit();
return 0;
}
subsys_initcall(check_swiotlb_enabled);
| linux-master | arch/powerpc/kernel/dma-swiotlb.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Processor cache information made available to userspace via sysfs;
* intended to be compatible with x86 intel_cacheinfo implementation.
*
* Copyright 2008 IBM Corporation
* Author: Nathan Lynch
*/
#define pr_fmt(fmt) "cacheinfo: " fmt
#include <linux/cpu.h>
#include <linux/cpumask.h>
#include <linux/kernel.h>
#include <linux/kobject.h>
#include <linux/list.h>
#include <linux/notifier.h>
#include <linux/of.h>
#include <linux/percpu.h>
#include <linux/slab.h>
#include <asm/cputhreads.h>
#include <asm/smp.h>
#include "cacheinfo.h"
/* per-cpu object for tracking:
* - a "cache" kobject for the top-level directory
* - a list of "index" objects representing the cpu's local cache hierarchy
*/
struct cache_dir {
struct kobject *kobj; /* bare (not embedded) kobject for cache
* directory */
struct cache_index_dir *index; /* list of index objects */
};
/* "index" object: each cpu's cache directory has an index
* subdirectory corresponding to a cache object associated with the
* cpu. This object's lifetime is managed via the embedded kobject.
*/
struct cache_index_dir {
struct kobject kobj;
struct cache_index_dir *next; /* next index in parent directory */
struct cache *cache;
};
/* Template for determining which OF properties to query for a given
* cache type */
struct cache_type_info {
const char *name;
const char *size_prop;
/* Allow for both [di]-cache-line-size and
* [di]-cache-block-size properties. According to the PowerPC
* Processor binding, -line-size should be provided if it
* differs from the cache block size (that which is operated
* on by cache instructions), so we look for -line-size first.
* See cache_get_line_size(). */
const char *line_size_props[2];
const char *nr_sets_prop;
};
/* These are used to index the cache_type_info array. */
#define CACHE_TYPE_UNIFIED 0 /* cache-size, cache-block-size, etc. */
#define CACHE_TYPE_UNIFIED_D 1 /* d-cache-size, d-cache-block-size, etc */
#define CACHE_TYPE_INSTRUCTION 2
#define CACHE_TYPE_DATA 3
static const struct cache_type_info cache_type_info[] = {
{
/* Embedded systems that use cache-size, cache-block-size,
* etc. for the Unified (typically L2) cache. */
.name = "Unified",
.size_prop = "cache-size",
.line_size_props = { "cache-line-size",
"cache-block-size", },
.nr_sets_prop = "cache-sets",
},
{
/* PowerPC Processor binding says the [di]-cache-*
* must be equal on unified caches, so just use
* d-cache properties. */
.name = "Unified",
.size_prop = "d-cache-size",
.line_size_props = { "d-cache-line-size",
"d-cache-block-size", },
.nr_sets_prop = "d-cache-sets",
},
{
.name = "Instruction",
.size_prop = "i-cache-size",
.line_size_props = { "i-cache-line-size",
"i-cache-block-size", },
.nr_sets_prop = "i-cache-sets",
},
{
.name = "Data",
.size_prop = "d-cache-size",
.line_size_props = { "d-cache-line-size",
"d-cache-block-size", },
.nr_sets_prop = "d-cache-sets",
},
};
/* Cache object: each instance of this corresponds to a distinct cache
* in the system. There are separate objects for Harvard caches: one
* each for instruction and data, and each refers to the same OF node.
* The refcount of the OF node is elevated for the lifetime of the
* cache object. A cache object is released when its shared_cpu_map
* is cleared (see cache_cpu_clear).
*
* A cache object is on two lists: an unsorted global list
* (cache_list) of cache objects; and a singly-linked list
* representing the local cache hierarchy, which is ordered by level
* (e.g. L1d -> L1i -> L2 -> L3).
*/
struct cache {
struct device_node *ofnode; /* OF node for this cache, may be cpu */
struct cpumask shared_cpu_map; /* online CPUs using this cache */
int type; /* split cache disambiguation */
int level; /* level not explicit in device tree */
int group_id; /* id of the group of threads that share this cache */
struct list_head list; /* global list of cache objects */
struct cache *next_local; /* next cache of >= level */
};
static DEFINE_PER_CPU(struct cache_dir *, cache_dir_pcpu);
/* traversal/modification of this list occurs only at cpu hotplug time;
* access is serialized by cpu hotplug locking
*/
static LIST_HEAD(cache_list);
static struct cache_index_dir *kobj_to_cache_index_dir(struct kobject *k)
{
return container_of(k, struct cache_index_dir, kobj);
}
static const char *cache_type_string(const struct cache *cache)
{
return cache_type_info[cache->type].name;
}
static void cache_init(struct cache *cache, int type, int level,
struct device_node *ofnode, int group_id)
{
cache->type = type;
cache->level = level;
cache->ofnode = of_node_get(ofnode);
cache->group_id = group_id;
INIT_LIST_HEAD(&cache->list);
list_add(&cache->list, &cache_list);
}
static struct cache *new_cache(int type, int level,
struct device_node *ofnode, int group_id)
{
struct cache *cache;
cache = kzalloc(sizeof(*cache), GFP_KERNEL);
if (cache)
cache_init(cache, type, level, ofnode, group_id);
return cache;
}
static void release_cache_debugcheck(struct cache *cache)
{
struct cache *iter;
list_for_each_entry(iter, &cache_list, list)
WARN_ONCE(iter->next_local == cache,
"cache for %pOFP(%s) refers to cache for %pOFP(%s)\n",
iter->ofnode,
cache_type_string(iter),
cache->ofnode,
cache_type_string(cache));
}
static void release_cache(struct cache *cache)
{
if (!cache)
return;
pr_debug("freeing L%d %s cache for %pOFP\n", cache->level,
cache_type_string(cache), cache->ofnode);
release_cache_debugcheck(cache);
list_del(&cache->list);
of_node_put(cache->ofnode);
kfree(cache);
}
static void cache_cpu_set(struct cache *cache, int cpu)
{
struct cache *next = cache;
while (next) {
WARN_ONCE(cpumask_test_cpu(cpu, &next->shared_cpu_map),
"CPU %i already accounted in %pOFP(%s)\n",
cpu, next->ofnode,
cache_type_string(next));
cpumask_set_cpu(cpu, &next->shared_cpu_map);
next = next->next_local;
}
}
static int cache_size(const struct cache *cache, unsigned int *ret)
{
const char *propname;
const __be32 *cache_size;
propname = cache_type_info[cache->type].size_prop;
cache_size = of_get_property(cache->ofnode, propname, NULL);
if (!cache_size)
return -ENODEV;
*ret = of_read_number(cache_size, 1);
return 0;
}
static int cache_size_kb(const struct cache *cache, unsigned int *ret)
{
unsigned int size;
if (cache_size(cache, &size))
return -ENODEV;
*ret = size / 1024;
return 0;
}
/* not cache_line_size() because that's a macro in include/linux/cache.h */
static int cache_get_line_size(const struct cache *cache, unsigned int *ret)
{
const __be32 *line_size;
int i, lim;
lim = ARRAY_SIZE(cache_type_info[cache->type].line_size_props);
for (i = 0; i < lim; i++) {
const char *propname;
propname = cache_type_info[cache->type].line_size_props[i];
line_size = of_get_property(cache->ofnode, propname, NULL);
if (line_size)
break;
}
if (!line_size)
return -ENODEV;
*ret = of_read_number(line_size, 1);
return 0;
}
static int cache_nr_sets(const struct cache *cache, unsigned int *ret)
{
const char *propname;
const __be32 *nr_sets;
propname = cache_type_info[cache->type].nr_sets_prop;
nr_sets = of_get_property(cache->ofnode, propname, NULL);
if (!nr_sets)
return -ENODEV;
*ret = of_read_number(nr_sets, 1);
return 0;
}
static int cache_associativity(const struct cache *cache, unsigned int *ret)
{
unsigned int line_size;
unsigned int nr_sets;
unsigned int size;
if (cache_nr_sets(cache, &nr_sets))
goto err;
/* If the cache is fully associative, there is no need to
* check the other properties.
*/
if (nr_sets == 1) {
*ret = 0;
return 0;
}
if (cache_get_line_size(cache, &line_size))
goto err;
if (cache_size(cache, &size))
goto err;
if (!(nr_sets > 0 && size > 0 && line_size > 0))
goto err;
*ret = (size / nr_sets) / line_size;
return 0;
err:
return -ENODEV;
}
/* helper for dealing with split caches */
static struct cache *cache_find_first_sibling(struct cache *cache)
{
struct cache *iter;
if (cache->type == CACHE_TYPE_UNIFIED ||
cache->type == CACHE_TYPE_UNIFIED_D)
return cache;
list_for_each_entry(iter, &cache_list, list)
if (iter->ofnode == cache->ofnode &&
iter->group_id == cache->group_id &&
iter->next_local == cache)
return iter;
return cache;
}
/* return the first cache on a local list matching node and thread-group id */
static struct cache *cache_lookup_by_node_group(const struct device_node *node,
int group_id)
{
struct cache *cache = NULL;
struct cache *iter;
list_for_each_entry(iter, &cache_list, list) {
if (iter->ofnode != node ||
iter->group_id != group_id)
continue;
cache = cache_find_first_sibling(iter);
break;
}
return cache;
}
static bool cache_node_is_unified(const struct device_node *np)
{
return of_get_property(np, "cache-unified", NULL);
}
/*
* Unified caches can have two different sets of tags. Most embedded
* use cache-size, etc. for the unified cache size, but open firmware systems
* use d-cache-size, etc. Check on initialization for which type we have, and
* return the appropriate structure type. Assume it's embedded if it isn't
* open firmware. If it's yet a 3rd type, then there will be missing entries
* in /sys/devices/system/cpu/cpu0/cache/index2/, and this code will need
* to be extended further.
*/
static int cache_is_unified_d(const struct device_node *np)
{
return of_get_property(np,
cache_type_info[CACHE_TYPE_UNIFIED_D].size_prop, NULL) ?
CACHE_TYPE_UNIFIED_D : CACHE_TYPE_UNIFIED;
}
static struct cache *cache_do_one_devnode_unified(struct device_node *node, int group_id,
int level)
{
pr_debug("creating L%d ucache for %pOFP\n", level, node);
return new_cache(cache_is_unified_d(node), level, node, group_id);
}
static struct cache *cache_do_one_devnode_split(struct device_node *node, int group_id,
int level)
{
struct cache *dcache, *icache;
pr_debug("creating L%d dcache and icache for %pOFP\n", level,
node);
dcache = new_cache(CACHE_TYPE_DATA, level, node, group_id);
icache = new_cache(CACHE_TYPE_INSTRUCTION, level, node, group_id);
if (!dcache || !icache)
goto err;
dcache->next_local = icache;
return dcache;
err:
release_cache(dcache);
release_cache(icache);
return NULL;
}
static struct cache *cache_do_one_devnode(struct device_node *node, int group_id, int level)
{
struct cache *cache;
if (cache_node_is_unified(node))
cache = cache_do_one_devnode_unified(node, group_id, level);
else
cache = cache_do_one_devnode_split(node, group_id, level);
return cache;
}
static struct cache *cache_lookup_or_instantiate(struct device_node *node,
int group_id,
int level)
{
struct cache *cache;
cache = cache_lookup_by_node_group(node, group_id);
WARN_ONCE(cache && cache->level != level,
"cache level mismatch on lookup (got %d, expected %d)\n",
cache->level, level);
if (!cache)
cache = cache_do_one_devnode(node, group_id, level);
return cache;
}
static void link_cache_lists(struct cache *smaller, struct cache *bigger)
{
while (smaller->next_local) {
if (smaller->next_local == bigger)
return; /* already linked */
smaller = smaller->next_local;
}
smaller->next_local = bigger;
/*
* The cache->next_local list sorts by level ascending:
* L1d -> L1i -> L2 -> L3 ...
*/
WARN_ONCE((smaller->level == 1 && bigger->level > 2) ||
(smaller->level > 1 && bigger->level != smaller->level + 1),
"linking L%i cache %pOFP to L%i cache %pOFP; skipped a level?\n",
smaller->level, smaller->ofnode, bigger->level, bigger->ofnode);
}
static void do_subsidiary_caches_debugcheck(struct cache *cache)
{
WARN_ONCE(cache->level != 1,
"instantiating cache chain from L%d %s cache for "
"%pOFP instead of an L1\n", cache->level,
cache_type_string(cache), cache->ofnode);
WARN_ONCE(!of_node_is_type(cache->ofnode, "cpu"),
"instantiating cache chain from node %pOFP of type '%s' "
"instead of a cpu node\n", cache->ofnode,
of_node_get_device_type(cache->ofnode));
}
/*
* If sub-groups of threads in a core containing @cpu_id share the
* L@level-cache (information obtained via "ibm,thread-groups"
* device-tree property), then we identify the group by the first
* thread-sibling in the group. We define this to be the group-id.
*
* In the absence of any thread-group information for L@level-cache,
* this function returns -1.
*/
static int get_group_id(unsigned int cpu_id, int level)
{
if (has_big_cores && level == 1)
return cpumask_first(per_cpu(thread_group_l1_cache_map,
cpu_id));
else if (thread_group_shares_l2 && level == 2)
return cpumask_first(per_cpu(thread_group_l2_cache_map,
cpu_id));
else if (thread_group_shares_l3 && level == 3)
return cpumask_first(per_cpu(thread_group_l3_cache_map,
cpu_id));
return -1;
}
static void do_subsidiary_caches(struct cache *cache, unsigned int cpu_id)
{
struct device_node *subcache_node;
int level = cache->level;
do_subsidiary_caches_debugcheck(cache);
while ((subcache_node = of_find_next_cache_node(cache->ofnode))) {
struct cache *subcache;
int group_id;
level++;
group_id = get_group_id(cpu_id, level);
subcache = cache_lookup_or_instantiate(subcache_node, group_id, level);
of_node_put(subcache_node);
if (!subcache)
break;
link_cache_lists(cache, subcache);
cache = subcache;
}
}
static struct cache *cache_chain_instantiate(unsigned int cpu_id)
{
struct device_node *cpu_node;
struct cache *cpu_cache = NULL;
int group_id;
pr_debug("creating cache object(s) for CPU %i\n", cpu_id);
cpu_node = of_get_cpu_node(cpu_id, NULL);
WARN_ONCE(!cpu_node, "no OF node found for CPU %i\n", cpu_id);
if (!cpu_node)
goto out;
group_id = get_group_id(cpu_id, 1);
cpu_cache = cache_lookup_or_instantiate(cpu_node, group_id, 1);
if (!cpu_cache)
goto out;
do_subsidiary_caches(cpu_cache, cpu_id);
cache_cpu_set(cpu_cache, cpu_id);
out:
of_node_put(cpu_node);
return cpu_cache;
}
static struct cache_dir *cacheinfo_create_cache_dir(unsigned int cpu_id)
{
struct cache_dir *cache_dir;
struct device *dev;
struct kobject *kobj = NULL;
dev = get_cpu_device(cpu_id);
WARN_ONCE(!dev, "no dev for CPU %i\n", cpu_id);
if (!dev)
goto err;
kobj = kobject_create_and_add("cache", &dev->kobj);
if (!kobj)
goto err;
cache_dir = kzalloc(sizeof(*cache_dir), GFP_KERNEL);
if (!cache_dir)
goto err;
cache_dir->kobj = kobj;
WARN_ON_ONCE(per_cpu(cache_dir_pcpu, cpu_id) != NULL);
per_cpu(cache_dir_pcpu, cpu_id) = cache_dir;
return cache_dir;
err:
kobject_put(kobj);
return NULL;
}
static void cache_index_release(struct kobject *kobj)
{
struct cache_index_dir *index;
index = kobj_to_cache_index_dir(kobj);
pr_debug("freeing index directory for L%d %s cache\n",
index->cache->level, cache_type_string(index->cache));
kfree(index);
}
static ssize_t cache_index_show(struct kobject *k, struct attribute *attr, char *buf)
{
struct kobj_attribute *kobj_attr;
kobj_attr = container_of(attr, struct kobj_attribute, attr);
return kobj_attr->show(k, kobj_attr, buf);
}
static struct cache *index_kobj_to_cache(struct kobject *k)
{
struct cache_index_dir *index;
index = kobj_to_cache_index_dir(k);
return index->cache;
}
static ssize_t size_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
{
unsigned int size_kb;
struct cache *cache;
cache = index_kobj_to_cache(k);
if (cache_size_kb(cache, &size_kb))
return -ENODEV;
return sprintf(buf, "%uK\n", size_kb);
}
static struct kobj_attribute cache_size_attr =
__ATTR(size, 0444, size_show, NULL);
static ssize_t line_size_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
{
unsigned int line_size;
struct cache *cache;
cache = index_kobj_to_cache(k);
if (cache_get_line_size(cache, &line_size))
return -ENODEV;
return sprintf(buf, "%u\n", line_size);
}
static struct kobj_attribute cache_line_size_attr =
__ATTR(coherency_line_size, 0444, line_size_show, NULL);
static ssize_t nr_sets_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
{
unsigned int nr_sets;
struct cache *cache;
cache = index_kobj_to_cache(k);
if (cache_nr_sets(cache, &nr_sets))
return -ENODEV;
return sprintf(buf, "%u\n", nr_sets);
}
static struct kobj_attribute cache_nr_sets_attr =
__ATTR(number_of_sets, 0444, nr_sets_show, NULL);
static ssize_t associativity_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
{
unsigned int associativity;
struct cache *cache;
cache = index_kobj_to_cache(k);
if (cache_associativity(cache, &associativity))
return -ENODEV;
return sprintf(buf, "%u\n", associativity);
}
static struct kobj_attribute cache_assoc_attr =
__ATTR(ways_of_associativity, 0444, associativity_show, NULL);
static ssize_t type_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
{
struct cache *cache;
cache = index_kobj_to_cache(k);
return sprintf(buf, "%s\n", cache_type_string(cache));
}
static struct kobj_attribute cache_type_attr =
__ATTR(type, 0444, type_show, NULL);
static ssize_t level_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
{
struct cache_index_dir *index;
struct cache *cache;
index = kobj_to_cache_index_dir(k);
cache = index->cache;
return sprintf(buf, "%d\n", cache->level);
}
static struct kobj_attribute cache_level_attr =
__ATTR(level, 0444, level_show, NULL);
static ssize_t
show_shared_cpumap(struct kobject *k, struct kobj_attribute *attr, char *buf, bool list)
{
struct cache_index_dir *index;
struct cache *cache;
const struct cpumask *mask;
index = kobj_to_cache_index_dir(k);
cache = index->cache;
mask = &cache->shared_cpu_map;
return cpumap_print_to_pagebuf(list, buf, mask);
}
static ssize_t shared_cpu_map_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
{
return show_shared_cpumap(k, attr, buf, false);
}
static ssize_t shared_cpu_list_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
{
return show_shared_cpumap(k, attr, buf, true);
}
static struct kobj_attribute cache_shared_cpu_map_attr =
__ATTR(shared_cpu_map, 0444, shared_cpu_map_show, NULL);
static struct kobj_attribute cache_shared_cpu_list_attr =
__ATTR(shared_cpu_list, 0444, shared_cpu_list_show, NULL);
/* Attributes which should always be created -- the kobject/sysfs core
* does this automatically via kobj_type->default_groups. This is the
* minimum data required to uniquely identify a cache.
*/
static struct attribute *cache_index_default_attrs[] = {
&cache_type_attr.attr,
&cache_level_attr.attr,
&cache_shared_cpu_map_attr.attr,
&cache_shared_cpu_list_attr.attr,
NULL,
};
ATTRIBUTE_GROUPS(cache_index_default);
/* Attributes which should be created if the cache device node has the
* right properties -- see cacheinfo_create_index_opt_attrs
*/
static struct kobj_attribute *cache_index_opt_attrs[] = {
&cache_size_attr,
&cache_line_size_attr,
&cache_nr_sets_attr,
&cache_assoc_attr,
};
static const struct sysfs_ops cache_index_ops = {
.show = cache_index_show,
};
static struct kobj_type cache_index_type = {
.release = cache_index_release,
.sysfs_ops = &cache_index_ops,
.default_groups = cache_index_default_groups,
};
static void cacheinfo_create_index_opt_attrs(struct cache_index_dir *dir)
{
const char *cache_type;
struct cache *cache;
char *buf;
int i;
buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
if (!buf)
return;
cache = dir->cache;
cache_type = cache_type_string(cache);
/* We don't want to create an attribute that can't provide a
* meaningful value. Check the return value of each optional
* attribute's ->show method before registering the
* attribute.
*/
for (i = 0; i < ARRAY_SIZE(cache_index_opt_attrs); i++) {
struct kobj_attribute *attr;
ssize_t rc;
attr = cache_index_opt_attrs[i];
rc = attr->show(&dir->kobj, attr, buf);
if (rc <= 0) {
pr_debug("not creating %s attribute for "
"%pOFP(%s) (rc = %zd)\n",
attr->attr.name, cache->ofnode,
cache_type, rc);
continue;
}
if (sysfs_create_file(&dir->kobj, &attr->attr))
pr_debug("could not create %s attribute for %pOFP(%s)\n",
attr->attr.name, cache->ofnode, cache_type);
}
kfree(buf);
}
static void cacheinfo_create_index_dir(struct cache *cache, int index,
struct cache_dir *cache_dir)
{
struct cache_index_dir *index_dir;
int rc;
index_dir = kzalloc(sizeof(*index_dir), GFP_KERNEL);
if (!index_dir)
return;
index_dir->cache = cache;
rc = kobject_init_and_add(&index_dir->kobj, &cache_index_type,
cache_dir->kobj, "index%d", index);
if (rc) {
kobject_put(&index_dir->kobj);
return;
}
index_dir->next = cache_dir->index;
cache_dir->index = index_dir;
cacheinfo_create_index_opt_attrs(index_dir);
}
static void cacheinfo_sysfs_populate(unsigned int cpu_id,
struct cache *cache_list)
{
struct cache_dir *cache_dir;
struct cache *cache;
int index = 0;
cache_dir = cacheinfo_create_cache_dir(cpu_id);
if (!cache_dir)
return;
cache = cache_list;
while (cache) {
cacheinfo_create_index_dir(cache, index, cache_dir);
index++;
cache = cache->next_local;
}
}
void cacheinfo_cpu_online(unsigned int cpu_id)
{
struct cache *cache;
cache = cache_chain_instantiate(cpu_id);
if (!cache)
return;
cacheinfo_sysfs_populate(cpu_id, cache);
}
/* functions needed to remove cache entry for cpu offline or suspend/resume */
#if (defined(CONFIG_PPC_PSERIES) && defined(CONFIG_SUSPEND)) || \
defined(CONFIG_HOTPLUG_CPU)
static struct cache *cache_lookup_by_cpu(unsigned int cpu_id)
{
struct device_node *cpu_node;
struct cache *cache;
int group_id;
cpu_node = of_get_cpu_node(cpu_id, NULL);
WARN_ONCE(!cpu_node, "no OF node found for CPU %i\n", cpu_id);
if (!cpu_node)
return NULL;
group_id = get_group_id(cpu_id, 1);
cache = cache_lookup_by_node_group(cpu_node, group_id);
of_node_put(cpu_node);
return cache;
}
static void remove_index_dirs(struct cache_dir *cache_dir)
{
struct cache_index_dir *index;
index = cache_dir->index;
while (index) {
struct cache_index_dir *next;
next = index->next;
kobject_put(&index->kobj);
index = next;
}
}
static void remove_cache_dir(struct cache_dir *cache_dir)
{
remove_index_dirs(cache_dir);
/* Remove cache dir from sysfs */
kobject_del(cache_dir->kobj);
kobject_put(cache_dir->kobj);
kfree(cache_dir);
}
static void cache_cpu_clear(struct cache *cache, int cpu)
{
while (cache) {
struct cache *next = cache->next_local;
WARN_ONCE(!cpumask_test_cpu(cpu, &cache->shared_cpu_map),
"CPU %i not accounted in %pOFP(%s)\n",
cpu, cache->ofnode,
cache_type_string(cache));
cpumask_clear_cpu(cpu, &cache->shared_cpu_map);
/* Release the cache object if all the cpus using it
* are offline */
if (cpumask_empty(&cache->shared_cpu_map))
release_cache(cache);
cache = next;
}
}
void cacheinfo_cpu_offline(unsigned int cpu_id)
{
struct cache_dir *cache_dir;
struct cache *cache;
/* Prevent userspace from seeing inconsistent state - remove
* the sysfs hierarchy first */
cache_dir = per_cpu(cache_dir_pcpu, cpu_id);
/* careful, sysfs population may have failed */
if (cache_dir)
remove_cache_dir(cache_dir);
per_cpu(cache_dir_pcpu, cpu_id) = NULL;
/* clear the CPU's bit in its cache chain, possibly freeing
* cache objects */
cache = cache_lookup_by_cpu(cpu_id);
if (cache)
cache_cpu_clear(cache, cpu_id);
}
void cacheinfo_teardown(void)
{
unsigned int cpu;
lockdep_assert_cpus_held();
for_each_online_cpu(cpu)
cacheinfo_cpu_offline(cpu);
}
void cacheinfo_rebuild(void)
{
unsigned int cpu;
lockdep_assert_cpus_held();
for_each_online_cpu(cpu)
cacheinfo_cpu_online(cpu);
}
#endif /* (CONFIG_PPC_PSERIES && CONFIG_SUSPEND) || CONFIG_HOTPLUG_CPU */
| linux-master | arch/powerpc/kernel/cacheinfo.c |
// SPDX-License-Identifier: GPL-2.0
#undef DEBUG
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/ioport.h>
#include <linux/etherdevice.h>
#include <linux/of_address.h>
#include <asm/prom.h>
void of_parse_dma_window(struct device_node *dn, const __be32 *dma_window,
unsigned long *busno, unsigned long *phys,
unsigned long *size)
{
u32 cells;
const __be32 *prop;
/* busno is always one cell */
*busno = of_read_number(dma_window, 1);
dma_window++;
prop = of_get_property(dn, "ibm,#dma-address-cells", NULL);
if (!prop)
prop = of_get_property(dn, "#address-cells", NULL);
cells = prop ? of_read_number(prop, 1) : of_n_addr_cells(dn);
*phys = of_read_number(dma_window, cells);
dma_window += cells;
prop = of_get_property(dn, "ibm,#dma-size-cells", NULL);
cells = prop ? of_read_number(prop, 1) : of_n_size_cells(dn);
*size = of_read_number(dma_window, cells);
}
| linux-master | arch/powerpc/kernel/prom_parse.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Watchdog support on powerpc systems.
*
* Copyright 2017, IBM Corporation.
*
* This uses code from arch/sparc/kernel/nmi.c and kernel/watchdog.c
*/
#define pr_fmt(fmt) "watchdog: " fmt
#include <linux/kernel.h>
#include <linux/param.h>
#include <linux/init.h>
#include <linux/percpu.h>
#include <linux/cpu.h>
#include <linux/nmi.h>
#include <linux/module.h>
#include <linux/export.h>
#include <linux/kprobes.h>
#include <linux/hardirq.h>
#include <linux/reboot.h>
#include <linux/slab.h>
#include <linux/kdebug.h>
#include <linux/sched/debug.h>
#include <linux/delay.h>
#include <linux/processor.h>
#include <linux/smp.h>
#include <asm/interrupt.h>
#include <asm/paca.h>
#include <asm/nmi.h>
/*
* The powerpc watchdog ensures that each CPU is able to service timers.
* The watchdog sets up a simple timer on each CPU to run once per timer
* period, and updates a per-cpu timestamp and a "pending" cpumask. This is
* the heartbeat.
*
* Then there are two systems to check that the heartbeat is still running.
* The local soft-NMI, and the SMP checker.
*
* The soft-NMI checker can detect lockups on the local CPU. When interrupts
* are disabled with local_irq_disable(), platforms that use soft-masking
* can leave hardware interrupts enabled and handle them with a masked
* interrupt handler. The masked handler can send the timer interrupt to the
* watchdog's soft_nmi_interrupt(), which appears to Linux as an NMI
* interrupt, and can be used to detect CPUs stuck with IRQs disabled.
*
* The soft-NMI checker will compare the heartbeat timestamp for this CPU
* with the current time, and take action if the difference exceeds the
* watchdog threshold.
*
* The limitation of the soft-NMI watchdog is that it does not work when
* interrupts are hard disabled or otherwise not being serviced. This is
* solved by also having a SMP watchdog where all CPUs check all other
* CPUs heartbeat.
*
* The SMP checker can detect lockups on other CPUs. A global "pending"
* cpumask is kept, containing all CPUs which enable the watchdog. Each
* CPU clears their pending bit in their heartbeat timer. When the bitmask
* becomes empty, the last CPU to clear its pending bit updates a global
* timestamp and refills the pending bitmask.
*
* In the heartbeat timer, if any CPU notices that the global timestamp has
* not been updated for a period exceeding the watchdog threshold, then it
* means the CPU(s) with their bit still set in the pending mask have had
* their heartbeat stop, and action is taken.
*
* Some platforms implement true NMI IPIs, which can be used by the SMP
* watchdog to detect an unresponsive CPU and pull it out of its stuck
* state with the NMI IPI, to get crash/debug data from it. This way the
* SMP watchdog can detect hardware interrupts off lockups.
*/
static cpumask_t wd_cpus_enabled __read_mostly;
static u64 wd_panic_timeout_tb __read_mostly; /* timebase ticks until panic */
static u64 wd_smp_panic_timeout_tb __read_mostly; /* panic other CPUs */
static u64 wd_timer_period_ms __read_mostly; /* interval between heartbeat */
static DEFINE_PER_CPU(struct hrtimer, wd_hrtimer);
static DEFINE_PER_CPU(u64, wd_timer_tb);
/* SMP checker bits */
static unsigned long __wd_smp_lock;
static unsigned long __wd_reporting;
static unsigned long __wd_nmi_output;
static cpumask_t wd_smp_cpus_pending;
static cpumask_t wd_smp_cpus_stuck;
static u64 wd_smp_last_reset_tb;
#ifdef CONFIG_PPC_PSERIES
static u64 wd_timeout_pct;
#endif
/*
* Try to take the exclusive watchdog action / NMI IPI / printing lock.
* wd_smp_lock must be held. If this fails, we should return and wait
* for the watchdog to kick in again (or another CPU to trigger it).
*
* Importantly, if hardlockup_panic is set, wd_try_report failure should
* not delay the panic, because whichever other CPU is reporting will
* call panic.
*/
static bool wd_try_report(void)
{
if (__wd_reporting)
return false;
__wd_reporting = 1;
return true;
}
/* End printing after successful wd_try_report. wd_smp_lock not required. */
static void wd_end_reporting(void)
{
smp_mb(); /* End printing "critical section" */
WARN_ON_ONCE(__wd_reporting == 0);
WRITE_ONCE(__wd_reporting, 0);
}
static inline void wd_smp_lock(unsigned long *flags)
{
/*
* Avoid locking layers if possible.
* This may be called from low level interrupt handlers at some
* point in future.
*/
raw_local_irq_save(*flags);
hard_irq_disable(); /* Make it soft-NMI safe */
while (unlikely(test_and_set_bit_lock(0, &__wd_smp_lock))) {
raw_local_irq_restore(*flags);
spin_until_cond(!test_bit(0, &__wd_smp_lock));
raw_local_irq_save(*flags);
hard_irq_disable();
}
}
static inline void wd_smp_unlock(unsigned long *flags)
{
clear_bit_unlock(0, &__wd_smp_lock);
raw_local_irq_restore(*flags);
}
static void wd_lockup_ipi(struct pt_regs *regs)
{
int cpu = raw_smp_processor_id();
u64 tb = get_tb();
pr_emerg("CPU %d Hard LOCKUP\n", cpu);
pr_emerg("CPU %d TB:%lld, last heartbeat TB:%lld (%lldms ago)\n",
cpu, tb, per_cpu(wd_timer_tb, cpu),
tb_to_ns(tb - per_cpu(wd_timer_tb, cpu)) / 1000000);
print_modules();
print_irqtrace_events(current);
if (regs)
show_regs(regs);
else
dump_stack();
/*
* __wd_nmi_output must be set after we printk from NMI context.
*
* printk from NMI context defers printing to the console to irq_work.
* If that NMI was taken in some code that is hard-locked, then irqs
* are disabled so irq_work will never fire. That can result in the
* hard lockup messages being delayed (indefinitely, until something
* else kicks the console drivers).
*
* Setting __wd_nmi_output will cause another CPU to notice and kick
* the console drivers for us.
*
* xchg is not needed here (it could be a smp_mb and store), but xchg
* gives the memory ordering and atomicity required.
*/
xchg(&__wd_nmi_output, 1);
/* Do not panic from here because that can recurse into NMI IPI layer */
}
static bool set_cpu_stuck(int cpu)
{
cpumask_set_cpu(cpu, &wd_smp_cpus_stuck);
cpumask_clear_cpu(cpu, &wd_smp_cpus_pending);
/*
* See wd_smp_clear_cpu_pending()
*/
smp_mb();
if (cpumask_empty(&wd_smp_cpus_pending)) {
wd_smp_last_reset_tb = get_tb();
cpumask_andnot(&wd_smp_cpus_pending,
&wd_cpus_enabled,
&wd_smp_cpus_stuck);
return true;
}
return false;
}
static void watchdog_smp_panic(int cpu)
{
static cpumask_t wd_smp_cpus_ipi; // protected by reporting
unsigned long flags;
u64 tb, last_reset;
int c;
wd_smp_lock(&flags);
/* Double check some things under lock */
tb = get_tb();
last_reset = wd_smp_last_reset_tb;
if ((s64)(tb - last_reset) < (s64)wd_smp_panic_timeout_tb)
goto out;
if (cpumask_test_cpu(cpu, &wd_smp_cpus_pending))
goto out;
if (!wd_try_report())
goto out;
for_each_online_cpu(c) {
if (!cpumask_test_cpu(c, &wd_smp_cpus_pending))
continue;
if (c == cpu)
continue; // should not happen
__cpumask_set_cpu(c, &wd_smp_cpus_ipi);
if (set_cpu_stuck(c))
break;
}
if (cpumask_empty(&wd_smp_cpus_ipi)) {
wd_end_reporting();
goto out;
}
wd_smp_unlock(&flags);
pr_emerg("CPU %d detected hard LOCKUP on other CPUs %*pbl\n",
cpu, cpumask_pr_args(&wd_smp_cpus_ipi));
pr_emerg("CPU %d TB:%lld, last SMP heartbeat TB:%lld (%lldms ago)\n",
cpu, tb, last_reset, tb_to_ns(tb - last_reset) / 1000000);
if (!sysctl_hardlockup_all_cpu_backtrace) {
/*
* Try to trigger the stuck CPUs, unless we are going to
* get a backtrace on all of them anyway.
*/
for_each_cpu(c, &wd_smp_cpus_ipi) {
smp_send_nmi_ipi(c, wd_lockup_ipi, 1000000);
__cpumask_clear_cpu(c, &wd_smp_cpus_ipi);
}
} else {
trigger_allbutcpu_cpu_backtrace(cpu);
cpumask_clear(&wd_smp_cpus_ipi);
}
if (hardlockup_panic)
nmi_panic(NULL, "Hard LOCKUP");
wd_end_reporting();
return;
out:
wd_smp_unlock(&flags);
}
static void wd_smp_clear_cpu_pending(int cpu)
{
if (!cpumask_test_cpu(cpu, &wd_smp_cpus_pending)) {
if (unlikely(cpumask_test_cpu(cpu, &wd_smp_cpus_stuck))) {
struct pt_regs *regs = get_irq_regs();
unsigned long flags;
pr_emerg("CPU %d became unstuck TB:%lld\n",
cpu, get_tb());
print_irqtrace_events(current);
if (regs)
show_regs(regs);
else
dump_stack();
wd_smp_lock(&flags);
cpumask_clear_cpu(cpu, &wd_smp_cpus_stuck);
wd_smp_unlock(&flags);
} else {
/*
* The last CPU to clear pending should have reset the
* watchdog so we generally should not find it empty
* here if our CPU was clear. However it could happen
* due to a rare race with another CPU taking the
* last CPU out of the mask concurrently.
*
* We can't add a warning for it. But just in case
* there is a problem with the watchdog that is causing
* the mask to not be reset, try to kick it along here.
*/
if (unlikely(cpumask_empty(&wd_smp_cpus_pending)))
goto none_pending;
}
return;
}
/*
* All other updates to wd_smp_cpus_pending are performed under
* wd_smp_lock. All of them are atomic except the case where the
* mask becomes empty and is reset. This will not happen here because
* cpu was tested to be in the bitmap (above), and a CPU only clears
* its own bit. _Except_ in the case where another CPU has detected a
* hard lockup on our CPU and takes us out of the pending mask. So in
* normal operation there will be no race here, no problem.
*
* In the lockup case, this atomic clear-bit vs a store that refills
* other bits in the accessed word wll not be a problem. The bit clear
* is atomic so it will not cause the store to get lost, and the store
* will never set this bit so it will not overwrite the bit clear. The
* only way for a stuck CPU to return to the pending bitmap is to
* become unstuck itself.
*/
cpumask_clear_cpu(cpu, &wd_smp_cpus_pending);
/*
* Order the store to clear pending with the load(s) to check all
* words in the pending mask to check they are all empty. This orders
* with the same barrier on another CPU. This prevents two CPUs
* clearing the last 2 pending bits, but neither seeing the other's
* store when checking if the mask is empty, and missing an empty
* mask, which ends with a false positive.
*/
smp_mb();
if (cpumask_empty(&wd_smp_cpus_pending)) {
unsigned long flags;
none_pending:
/*
* Double check under lock because more than one CPU could see
* a clear mask with the lockless check after clearing their
* pending bits.
*/
wd_smp_lock(&flags);
if (cpumask_empty(&wd_smp_cpus_pending)) {
wd_smp_last_reset_tb = get_tb();
cpumask_andnot(&wd_smp_cpus_pending,
&wd_cpus_enabled,
&wd_smp_cpus_stuck);
}
wd_smp_unlock(&flags);
}
}
static void watchdog_timer_interrupt(int cpu)
{
u64 tb = get_tb();
per_cpu(wd_timer_tb, cpu) = tb;
wd_smp_clear_cpu_pending(cpu);
if ((s64)(tb - wd_smp_last_reset_tb) >= (s64)wd_smp_panic_timeout_tb)
watchdog_smp_panic(cpu);
if (__wd_nmi_output && xchg(&__wd_nmi_output, 0)) {
/*
* Something has called printk from NMI context. It might be
* stuck, so this triggers a flush that will get that
* printk output to the console.
*
* See wd_lockup_ipi.
*/
printk_trigger_flush();
}
}
DEFINE_INTERRUPT_HANDLER_NMI(soft_nmi_interrupt)
{
unsigned long flags;
int cpu = raw_smp_processor_id();
u64 tb;
/* should only arrive from kernel, with irqs disabled */
WARN_ON_ONCE(!arch_irq_disabled_regs(regs));
if (!cpumask_test_cpu(cpu, &wd_cpus_enabled))
return 0;
__this_cpu_inc(irq_stat.soft_nmi_irqs);
tb = get_tb();
if (tb - per_cpu(wd_timer_tb, cpu) >= wd_panic_timeout_tb) {
/*
* Taking wd_smp_lock here means it is a soft-NMI lock, which
* means we can't take any regular or irqsafe spin locks while
* holding this lock. This is why timers can't printk while
* holding the lock.
*/
wd_smp_lock(&flags);
if (cpumask_test_cpu(cpu, &wd_smp_cpus_stuck)) {
wd_smp_unlock(&flags);
return 0;
}
if (!wd_try_report()) {
wd_smp_unlock(&flags);
/* Couldn't report, try again in 100ms */
mtspr(SPRN_DEC, 100 * tb_ticks_per_usec * 1000);
return 0;
}
set_cpu_stuck(cpu);
wd_smp_unlock(&flags);
pr_emerg("CPU %d self-detected hard LOCKUP @ %pS\n",
cpu, (void *)regs->nip);
pr_emerg("CPU %d TB:%lld, last heartbeat TB:%lld (%lldms ago)\n",
cpu, tb, per_cpu(wd_timer_tb, cpu),
tb_to_ns(tb - per_cpu(wd_timer_tb, cpu)) / 1000000);
print_modules();
print_irqtrace_events(current);
show_regs(regs);
xchg(&__wd_nmi_output, 1); // see wd_lockup_ipi
if (sysctl_hardlockup_all_cpu_backtrace)
trigger_allbutcpu_cpu_backtrace(cpu);
if (hardlockup_panic)
nmi_panic(regs, "Hard LOCKUP");
wd_end_reporting();
}
/*
* We are okay to change DEC in soft_nmi_interrupt because the masked
* handler has marked a DEC as pending, so the timer interrupt will be
* replayed as soon as local irqs are enabled again.
*/
if (wd_panic_timeout_tb < 0x7fffffff)
mtspr(SPRN_DEC, wd_panic_timeout_tb);
return 0;
}
static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
{
int cpu = smp_processor_id();
if (!(watchdog_enabled & WATCHDOG_HARDLOCKUP_ENABLED))
return HRTIMER_NORESTART;
if (!cpumask_test_cpu(cpu, &watchdog_cpumask))
return HRTIMER_NORESTART;
watchdog_timer_interrupt(cpu);
hrtimer_forward_now(hrtimer, ms_to_ktime(wd_timer_period_ms));
return HRTIMER_RESTART;
}
void arch_touch_nmi_watchdog(void)
{
unsigned long ticks = tb_ticks_per_usec * wd_timer_period_ms * 1000;
int cpu = smp_processor_id();
u64 tb;
if (!cpumask_test_cpu(cpu, &watchdog_cpumask))
return;
tb = get_tb();
if (tb - per_cpu(wd_timer_tb, cpu) >= ticks) {
per_cpu(wd_timer_tb, cpu) = tb;
wd_smp_clear_cpu_pending(cpu);
}
}
EXPORT_SYMBOL(arch_touch_nmi_watchdog);
static void start_watchdog(void *arg)
{
struct hrtimer *hrtimer = this_cpu_ptr(&wd_hrtimer);
int cpu = smp_processor_id();
unsigned long flags;
if (cpumask_test_cpu(cpu, &wd_cpus_enabled)) {
WARN_ON(1);
return;
}
if (!(watchdog_enabled & WATCHDOG_HARDLOCKUP_ENABLED))
return;
if (!cpumask_test_cpu(cpu, &watchdog_cpumask))
return;
wd_smp_lock(&flags);
cpumask_set_cpu(cpu, &wd_cpus_enabled);
if (cpumask_weight(&wd_cpus_enabled) == 1) {
cpumask_set_cpu(cpu, &wd_smp_cpus_pending);
wd_smp_last_reset_tb = get_tb();
}
wd_smp_unlock(&flags);
*this_cpu_ptr(&wd_timer_tb) = get_tb();
hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
hrtimer->function = watchdog_timer_fn;
hrtimer_start(hrtimer, ms_to_ktime(wd_timer_period_ms),
HRTIMER_MODE_REL_PINNED);
}
static int start_watchdog_on_cpu(unsigned int cpu)
{
return smp_call_function_single(cpu, start_watchdog, NULL, true);
}
static void stop_watchdog(void *arg)
{
struct hrtimer *hrtimer = this_cpu_ptr(&wd_hrtimer);
int cpu = smp_processor_id();
unsigned long flags;
if (!cpumask_test_cpu(cpu, &wd_cpus_enabled))
return; /* Can happen in CPU unplug case */
hrtimer_cancel(hrtimer);
wd_smp_lock(&flags);
cpumask_clear_cpu(cpu, &wd_cpus_enabled);
wd_smp_unlock(&flags);
wd_smp_clear_cpu_pending(cpu);
}
static int stop_watchdog_on_cpu(unsigned int cpu)
{
return smp_call_function_single(cpu, stop_watchdog, NULL, true);
}
static void watchdog_calc_timeouts(void)
{
u64 threshold = watchdog_thresh;
#ifdef CONFIG_PPC_PSERIES
threshold += (READ_ONCE(wd_timeout_pct) * threshold) / 100;
#endif
wd_panic_timeout_tb = threshold * ppc_tb_freq;
/* Have the SMP detector trigger a bit later */
wd_smp_panic_timeout_tb = wd_panic_timeout_tb * 3 / 2;
/* 2/5 is the factor that the perf based detector uses */
wd_timer_period_ms = watchdog_thresh * 1000 * 2 / 5;
}
void watchdog_hardlockup_stop(void)
{
int cpu;
for_each_cpu(cpu, &wd_cpus_enabled)
stop_watchdog_on_cpu(cpu);
}
void watchdog_hardlockup_start(void)
{
int cpu;
watchdog_calc_timeouts();
for_each_cpu_and(cpu, cpu_online_mask, &watchdog_cpumask)
start_watchdog_on_cpu(cpu);
}
/*
* Invoked from core watchdog init.
*/
int __init watchdog_hardlockup_probe(void)
{
int err;
err = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
"powerpc/watchdog:online",
start_watchdog_on_cpu,
stop_watchdog_on_cpu);
if (err < 0) {
pr_warn("could not be initialized");
return err;
}
return 0;
}
#ifdef CONFIG_PPC_PSERIES
void watchdog_hardlockup_set_timeout_pct(u64 pct)
{
pr_info("Set the NMI watchdog timeout factor to %llu%%\n", pct);
WRITE_ONCE(wd_timeout_pct, pct);
lockup_detector_reconfigure();
}
#endif
| linux-master | arch/powerpc/kernel/watchdog.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Early init before relocation
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <asm/setup.h>
#include <asm/sections.h>
/*
* We're called here very early in the boot.
*
* Note that the kernel may be running at an address which is different
* from the address that it was linked at, so we must use RELOC/PTRRELOC
* to access static data (including strings). -- paulus
*/
notrace unsigned long __init early_init(unsigned long dt_ptr)
{
unsigned long kva, offset = reloc_offset();
kva = *PTRRELOC(&kernstart_virt_addr);
/* First zero the BSS */
if (kva == KERNELBASE)
memset(PTRRELOC(&__bss_start), 0, __bss_stop - __bss_start);
/*
* Identify the CPU type and fix up code sections
* that depend on which cpu we have.
*/
identify_cpu(offset, mfspr(SPRN_PVR));
apply_feature_fixups();
return kva + offset;
}
| linux-master | arch/powerpc/kernel/early_32.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Dynamic Ftrace based Kprobes Optimization
*
* Copyright (C) Hitachi Ltd., 2012
* Copyright 2016 Naveen N. Rao <[email protected]>
* IBM Corporation
*/
#include <linux/kprobes.h>
#include <linux/ptrace.h>
#include <linux/hardirq.h>
#include <linux/preempt.h>
#include <linux/ftrace.h>
/* Ftrace callback handler for kprobes */
void kprobe_ftrace_handler(unsigned long nip, unsigned long parent_nip,
struct ftrace_ops *ops, struct ftrace_regs *fregs)
{
struct kprobe *p;
struct kprobe_ctlblk *kcb;
struct pt_regs *regs;
int bit;
bit = ftrace_test_recursion_trylock(nip, parent_nip);
if (bit < 0)
return;
regs = ftrace_get_regs(fregs);
p = get_kprobe((kprobe_opcode_t *)nip);
if (unlikely(!p) || kprobe_disabled(p))
goto out;
kcb = get_kprobe_ctlblk();
if (kprobe_running()) {
kprobes_inc_nmissed_count(p);
} else {
/*
* On powerpc, NIP is *before* this instruction for the
* pre handler
*/
regs_add_return_ip(regs, -MCOUNT_INSN_SIZE);
__this_cpu_write(current_kprobe, p);
kcb->kprobe_status = KPROBE_HIT_ACTIVE;
if (!p->pre_handler || !p->pre_handler(p, regs)) {
/*
* Emulate singlestep (and also recover regs->nip)
* as if there is a nop
*/
regs_add_return_ip(regs, MCOUNT_INSN_SIZE);
if (unlikely(p->post_handler)) {
kcb->kprobe_status = KPROBE_HIT_SSDONE;
p->post_handler(p, regs, 0);
}
}
/*
* If pre_handler returns !0, it changes regs->nip. We have to
* skip emulating post_handler.
*/
__this_cpu_write(current_kprobe, NULL);
}
out:
ftrace_test_recursion_unlock(bit);
}
NOKPROBE_SYMBOL(kprobe_ftrace_handler);
int arch_prepare_kprobe_ftrace(struct kprobe *p)
{
p->ainsn.insn = NULL;
p->ainsn.boostable = -1;
return 0;
}
| linux-master | arch/powerpc/kernel/kprobes-ftrace.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Contains common pci routines for ALL ppc platform
* (based on pci_32.c and pci_64.c)
*
* Port for PPC64 David Engebretsen, IBM Corp.
* Contains common pci routines for ppc64 platform, pSeries and iSeries brands.
*
* Copyright (C) 2003 Anton Blanchard <[email protected]>, IBM
* Rework, based on alpha PCI code.
*
* Common pmac/prep/chrp pci routines. -- Cort
*/
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/string.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/export.h>
#include <linux/of_address.h>
#include <linux/of_pci.h>
#include <linux/mm.h>
#include <linux/shmem_fs.h>
#include <linux/list.h>
#include <linux/syscalls.h>
#include <linux/irq.h>
#include <linux/vmalloc.h>
#include <linux/slab.h>
#include <linux/vgaarb.h>
#include <linux/numa.h>
#include <linux/msi.h>
#include <linux/irqdomain.h>
#include <asm/processor.h>
#include <asm/io.h>
#include <asm/pci-bridge.h>
#include <asm/byteorder.h>
#include <asm/machdep.h>
#include <asm/ppc-pci.h>
#include <asm/eeh.h>
#include <asm/setup.h>
#include "../../../drivers/pci/pci.h"
/* hose_spinlock protects accesses to the phb_bitmap. */
static DEFINE_SPINLOCK(hose_spinlock);
LIST_HEAD(hose_list);
/* For dynamic PHB numbering on get_phb_number(): max number of PHBs. */
#define MAX_PHBS 0x10000
/*
* For dynamic PHB numbering: used/free PHBs tracking bitmap.
* Accesses to this bitmap should be protected by hose_spinlock.
*/
static DECLARE_BITMAP(phb_bitmap, MAX_PHBS);
/* ISA Memory physical address */
resource_size_t isa_mem_base;
EXPORT_SYMBOL(isa_mem_base);
static const struct dma_map_ops *pci_dma_ops;
void __init set_pci_dma_ops(const struct dma_map_ops *dma_ops)
{
pci_dma_ops = dma_ops;
}
static int get_phb_number(struct device_node *dn)
{
int ret, phb_id = -1;
u64 prop;
/*
* Try fixed PHB numbering first, by checking archs and reading
* the respective device-tree properties. Firstly, try reading
* standard "linux,pci-domain", then try reading "ibm,opal-phbid"
* (only present in powernv OPAL environment), then try device-tree
* alias and as the last try to use lower bits of "reg" property.
*/
ret = of_get_pci_domain_nr(dn);
if (ret >= 0) {
prop = ret;
ret = 0;
}
if (ret)
ret = of_property_read_u64(dn, "ibm,opal-phbid", &prop);
if (ret) {
ret = of_alias_get_id(dn, "pci");
if (ret >= 0) {
prop = ret;
ret = 0;
}
}
if (ret) {
u32 prop_32;
ret = of_property_read_u32_index(dn, "reg", 1, &prop_32);
prop = prop_32;
}
if (!ret)
phb_id = (int)(prop & (MAX_PHBS - 1));
spin_lock(&hose_spinlock);
/* We need to be sure to not use the same PHB number twice. */
if ((phb_id >= 0) && !test_and_set_bit(phb_id, phb_bitmap))
goto out_unlock;
/* If everything fails then fallback to dynamic PHB numbering. */
phb_id = find_first_zero_bit(phb_bitmap, MAX_PHBS);
BUG_ON(phb_id >= MAX_PHBS);
set_bit(phb_id, phb_bitmap);
out_unlock:
spin_unlock(&hose_spinlock);
return phb_id;
}
struct pci_controller *pcibios_alloc_controller(struct device_node *dev)
{
struct pci_controller *phb;
phb = kzalloc(sizeof(struct pci_controller), GFP_KERNEL);
if (phb == NULL)
return NULL;
phb->global_number = get_phb_number(dev);
spin_lock(&hose_spinlock);
list_add_tail(&phb->list_node, &hose_list);
spin_unlock(&hose_spinlock);
phb->dn = of_node_get(dev);
phb->is_dynamic = slab_is_available();
#ifdef CONFIG_PPC64
if (dev) {
int nid = of_node_to_nid(dev);
if (nid < 0 || !node_online(nid))
nid = NUMA_NO_NODE;
PHB_SET_NODE(phb, nid);
}
#endif
return phb;
}
EXPORT_SYMBOL_GPL(pcibios_alloc_controller);
void pcibios_free_controller(struct pci_controller *phb)
{
spin_lock(&hose_spinlock);
/* Clear bit of phb_bitmap to allow reuse of this PHB number. */
if (phb->global_number < MAX_PHBS)
clear_bit(phb->global_number, phb_bitmap);
of_node_put(phb->dn);
list_del(&phb->list_node);
spin_unlock(&hose_spinlock);
if (phb->is_dynamic)
kfree(phb);
}
EXPORT_SYMBOL_GPL(pcibios_free_controller);
/*
* This function is used to call pcibios_free_controller()
* in a deferred manner: a callback from the PCI subsystem.
*
* _*DO NOT*_ call pcibios_free_controller() explicitly if
* this is used (or it may access an invalid *phb pointer).
*
* The callback occurs when all references to the root bus
* are dropped (e.g., child buses/devices and their users).
*
* It's called as .release_fn() of 'struct pci_host_bridge'
* which is associated with the 'struct pci_controller.bus'
* (root bus) - it expects .release_data to hold a pointer
* to 'struct pci_controller'.
*
* In order to use it, register .release_fn()/release_data
* like this:
*
* pci_set_host_bridge_release(bridge,
* pcibios_free_controller_deferred
* (void *) phb);
*
* e.g. in the pcibios_root_bridge_prepare() callback from
* pci_create_root_bus().
*/
void pcibios_free_controller_deferred(struct pci_host_bridge *bridge)
{
struct pci_controller *phb = (struct pci_controller *)
bridge->release_data;
pr_debug("domain %d, dynamic %d\n", phb->global_number, phb->is_dynamic);
pcibios_free_controller(phb);
}
EXPORT_SYMBOL_GPL(pcibios_free_controller_deferred);
/*
* The function is used to return the minimal alignment
* for memory or I/O windows of the associated P2P bridge.
* By default, 4KiB alignment for I/O windows and 1MiB for
* memory windows.
*/
resource_size_t pcibios_window_alignment(struct pci_bus *bus,
unsigned long type)
{
struct pci_controller *phb = pci_bus_to_host(bus);
if (phb->controller_ops.window_alignment)
return phb->controller_ops.window_alignment(bus, type);
/*
* PCI core will figure out the default
* alignment: 4KiB for I/O and 1MiB for
* memory window.
*/
return 1;
}
void pcibios_setup_bridge(struct pci_bus *bus, unsigned long type)
{
struct pci_controller *hose = pci_bus_to_host(bus);
if (hose->controller_ops.setup_bridge)
hose->controller_ops.setup_bridge(bus, type);
}
void pcibios_reset_secondary_bus(struct pci_dev *dev)
{
struct pci_controller *phb = pci_bus_to_host(dev->bus);
if (phb->controller_ops.reset_secondary_bus) {
phb->controller_ops.reset_secondary_bus(dev);
return;
}
pci_reset_secondary_bus(dev);
}
resource_size_t pcibios_default_alignment(void)
{
if (ppc_md.pcibios_default_alignment)
return ppc_md.pcibios_default_alignment();
return 0;
}
#ifdef CONFIG_PCI_IOV
resource_size_t pcibios_iov_resource_alignment(struct pci_dev *pdev, int resno)
{
if (ppc_md.pcibios_iov_resource_alignment)
return ppc_md.pcibios_iov_resource_alignment(pdev, resno);
return pci_iov_resource_size(pdev, resno);
}
int pcibios_sriov_enable(struct pci_dev *pdev, u16 num_vfs)
{
if (ppc_md.pcibios_sriov_enable)
return ppc_md.pcibios_sriov_enable(pdev, num_vfs);
return 0;
}
int pcibios_sriov_disable(struct pci_dev *pdev)
{
if (ppc_md.pcibios_sriov_disable)
return ppc_md.pcibios_sriov_disable(pdev);
return 0;
}
#endif /* CONFIG_PCI_IOV */
static resource_size_t pcibios_io_size(const struct pci_controller *hose)
{
#ifdef CONFIG_PPC64
return hose->pci_io_size;
#else
return resource_size(&hose->io_resource);
#endif
}
int pcibios_vaddr_is_ioport(void __iomem *address)
{
int ret = 0;
struct pci_controller *hose;
resource_size_t size;
spin_lock(&hose_spinlock);
list_for_each_entry(hose, &hose_list, list_node) {
size = pcibios_io_size(hose);
if (address >= hose->io_base_virt &&
address < (hose->io_base_virt + size)) {
ret = 1;
break;
}
}
spin_unlock(&hose_spinlock);
return ret;
}
unsigned long pci_address_to_pio(phys_addr_t address)
{
struct pci_controller *hose;
resource_size_t size;
unsigned long ret = ~0;
spin_lock(&hose_spinlock);
list_for_each_entry(hose, &hose_list, list_node) {
size = pcibios_io_size(hose);
if (address >= hose->io_base_phys &&
address < (hose->io_base_phys + size)) {
unsigned long base =
(unsigned long)hose->io_base_virt - _IO_BASE;
ret = base + (address - hose->io_base_phys);
break;
}
}
spin_unlock(&hose_spinlock);
return ret;
}
EXPORT_SYMBOL_GPL(pci_address_to_pio);
/*
* Return the domain number for this bus.
*/
int pci_domain_nr(struct pci_bus *bus)
{
struct pci_controller *hose = pci_bus_to_host(bus);
return hose->global_number;
}
EXPORT_SYMBOL(pci_domain_nr);
/* This routine is meant to be used early during boot, when the
* PCI bus numbers have not yet been assigned, and you need to
* issue PCI config cycles to an OF device.
* It could also be used to "fix" RTAS config cycles if you want
* to set pci_assign_all_buses to 1 and still use RTAS for PCI
* config cycles.
*/
struct pci_controller* pci_find_hose_for_OF_device(struct device_node* node)
{
while(node) {
struct pci_controller *hose, *tmp;
list_for_each_entry_safe(hose, tmp, &hose_list, list_node)
if (hose->dn == node)
return hose;
node = node->parent;
}
return NULL;
}
struct pci_controller *pci_find_controller_for_domain(int domain_nr)
{
struct pci_controller *hose;
list_for_each_entry(hose, &hose_list, list_node)
if (hose->global_number == domain_nr)
return hose;
return NULL;
}
struct pci_intx_virq {
int virq;
struct kref kref;
struct list_head list_node;
};
static LIST_HEAD(intx_list);
static DEFINE_MUTEX(intx_mutex);
static void ppc_pci_intx_release(struct kref *kref)
{
struct pci_intx_virq *vi = container_of(kref, struct pci_intx_virq, kref);
list_del(&vi->list_node);
irq_dispose_mapping(vi->virq);
kfree(vi);
}
static int ppc_pci_unmap_irq_line(struct notifier_block *nb,
unsigned long action, void *data)
{
struct pci_dev *pdev = to_pci_dev(data);
if (action == BUS_NOTIFY_DEL_DEVICE) {
struct pci_intx_virq *vi;
mutex_lock(&intx_mutex);
list_for_each_entry(vi, &intx_list, list_node) {
if (vi->virq == pdev->irq) {
kref_put(&vi->kref, ppc_pci_intx_release);
break;
}
}
mutex_unlock(&intx_mutex);
}
return NOTIFY_DONE;
}
static struct notifier_block ppc_pci_unmap_irq_notifier = {
.notifier_call = ppc_pci_unmap_irq_line,
};
static int ppc_pci_register_irq_notifier(void)
{
return bus_register_notifier(&pci_bus_type, &ppc_pci_unmap_irq_notifier);
}
arch_initcall(ppc_pci_register_irq_notifier);
/*
* Reads the interrupt pin to determine if interrupt is use by card.
* If the interrupt is used, then gets the interrupt line from the
* openfirmware and sets it in the pci_dev and pci_config line.
*/
static int pci_read_irq_line(struct pci_dev *pci_dev)
{
int virq;
struct pci_intx_virq *vi, *vitmp;
/* Preallocate vi as rewind is complex if this fails after mapping */
vi = kzalloc(sizeof(struct pci_intx_virq), GFP_KERNEL);
if (!vi)
return -1;
pr_debug("PCI: Try to map irq for %s...\n", pci_name(pci_dev));
/* Try to get a mapping from the device-tree */
virq = of_irq_parse_and_map_pci(pci_dev, 0, 0);
if (virq <= 0) {
u8 line, pin;
/* If that fails, lets fallback to what is in the config
* space and map that through the default controller. We
* also set the type to level low since that's what PCI
* interrupts are. If your platform does differently, then
* either provide a proper interrupt tree or don't use this
* function.
*/
if (pci_read_config_byte(pci_dev, PCI_INTERRUPT_PIN, &pin))
goto error_exit;
if (pin == 0)
goto error_exit;
if (pci_read_config_byte(pci_dev, PCI_INTERRUPT_LINE, &line) ||
line == 0xff || line == 0) {
goto error_exit;
}
pr_debug(" No map ! Using line %d (pin %d) from PCI config\n",
line, pin);
virq = irq_create_mapping(NULL, line);
if (virq)
irq_set_irq_type(virq, IRQ_TYPE_LEVEL_LOW);
}
if (!virq) {
pr_debug(" Failed to map !\n");
goto error_exit;
}
pr_debug(" Mapped to linux irq %d\n", virq);
pci_dev->irq = virq;
mutex_lock(&intx_mutex);
list_for_each_entry(vitmp, &intx_list, list_node) {
if (vitmp->virq == virq) {
kref_get(&vitmp->kref);
kfree(vi);
vi = NULL;
break;
}
}
if (vi) {
vi->virq = virq;
kref_init(&vi->kref);
list_add_tail(&vi->list_node, &intx_list);
}
mutex_unlock(&intx_mutex);
return 0;
error_exit:
kfree(vi);
return -1;
}
/*
* Platform support for /proc/bus/pci/X/Y mmap()s.
* -- paulus.
*/
int pci_iobar_pfn(struct pci_dev *pdev, int bar, struct vm_area_struct *vma)
{
struct pci_controller *hose = pci_bus_to_host(pdev->bus);
resource_size_t ioaddr = pci_resource_start(pdev, bar);
if (!hose)
return -EINVAL;
/* Convert to an offset within this PCI controller */
ioaddr -= (unsigned long)hose->io_base_virt - _IO_BASE;
vma->vm_pgoff += (ioaddr + hose->io_base_phys) >> PAGE_SHIFT;
return 0;
}
/*
* This one is used by /dev/mem and fbdev who have no clue about the
* PCI device, it tries to find the PCI device first and calls the
* above routine
*/
pgprot_t pci_phys_mem_access_prot(struct file *file,
unsigned long pfn,
unsigned long size,
pgprot_t prot)
{
struct pci_dev *pdev = NULL;
struct resource *found = NULL;
resource_size_t offset = ((resource_size_t)pfn) << PAGE_SHIFT;
int i;
if (page_is_ram(pfn))
return prot;
prot = pgprot_noncached(prot);
for_each_pci_dev(pdev) {
for (i = 0; i <= PCI_ROM_RESOURCE; i++) {
struct resource *rp = &pdev->resource[i];
int flags = rp->flags;
/* Active and same type? */
if ((flags & IORESOURCE_MEM) == 0)
continue;
/* In the range of this resource? */
if (offset < (rp->start & PAGE_MASK) ||
offset > rp->end)
continue;
found = rp;
break;
}
if (found)
break;
}
if (found) {
if (found->flags & IORESOURCE_PREFETCH)
prot = pgprot_noncached_wc(prot);
pci_dev_put(pdev);
}
pr_debug("PCI: Non-PCI map for %llx, prot: %lx\n",
(unsigned long long)offset, pgprot_val(prot));
return prot;
}
/* This provides legacy IO read access on a bus */
int pci_legacy_read(struct pci_bus *bus, loff_t port, u32 *val, size_t size)
{
unsigned long offset;
struct pci_controller *hose = pci_bus_to_host(bus);
struct resource *rp = &hose->io_resource;
void __iomem *addr;
/* Check if port can be supported by that bus. We only check
* the ranges of the PHB though, not the bus itself as the rules
* for forwarding legacy cycles down bridges are not our problem
* here. So if the host bridge supports it, we do it.
*/
offset = (unsigned long)hose->io_base_virt - _IO_BASE;
offset += port;
if (!(rp->flags & IORESOURCE_IO))
return -ENXIO;
if (offset < rp->start || (offset + size) > rp->end)
return -ENXIO;
addr = hose->io_base_virt + port;
switch(size) {
case 1:
*((u8 *)val) = in_8(addr);
return 1;
case 2:
if (port & 1)
return -EINVAL;
*((u16 *)val) = in_le16(addr);
return 2;
case 4:
if (port & 3)
return -EINVAL;
*((u32 *)val) = in_le32(addr);
return 4;
}
return -EINVAL;
}
/* This provides legacy IO write access on a bus */
int pci_legacy_write(struct pci_bus *bus, loff_t port, u32 val, size_t size)
{
unsigned long offset;
struct pci_controller *hose = pci_bus_to_host(bus);
struct resource *rp = &hose->io_resource;
void __iomem *addr;
/* Check if port can be supported by that bus. We only check
* the ranges of the PHB though, not the bus itself as the rules
* for forwarding legacy cycles down bridges are not our problem
* here. So if the host bridge supports it, we do it.
*/
offset = (unsigned long)hose->io_base_virt - _IO_BASE;
offset += port;
if (!(rp->flags & IORESOURCE_IO))
return -ENXIO;
if (offset < rp->start || (offset + size) > rp->end)
return -ENXIO;
addr = hose->io_base_virt + port;
/* WARNING: The generic code is idiotic. It gets passed a pointer
* to what can be a 1, 2 or 4 byte quantity and always reads that
* as a u32, which means that we have to correct the location of
* the data read within those 32 bits for size 1 and 2
*/
switch(size) {
case 1:
out_8(addr, val >> 24);
return 1;
case 2:
if (port & 1)
return -EINVAL;
out_le16(addr, val >> 16);
return 2;
case 4:
if (port & 3)
return -EINVAL;
out_le32(addr, val);
return 4;
}
return -EINVAL;
}
/* This provides legacy IO or memory mmap access on a bus */
int pci_mmap_legacy_page_range(struct pci_bus *bus,
struct vm_area_struct *vma,
enum pci_mmap_state mmap_state)
{
struct pci_controller *hose = pci_bus_to_host(bus);
resource_size_t offset =
((resource_size_t)vma->vm_pgoff) << PAGE_SHIFT;
resource_size_t size = vma->vm_end - vma->vm_start;
struct resource *rp;
pr_debug("pci_mmap_legacy_page_range(%04x:%02x, %s @%llx..%llx)\n",
pci_domain_nr(bus), bus->number,
mmap_state == pci_mmap_mem ? "MEM" : "IO",
(unsigned long long)offset,
(unsigned long long)(offset + size - 1));
if (mmap_state == pci_mmap_mem) {
/* Hack alert !
*
* Because X is lame and can fail starting if it gets an error trying
* to mmap legacy_mem (instead of just moving on without legacy memory
* access) we fake it here by giving it anonymous memory, effectively
* behaving just like /dev/zero
*/
if ((offset + size) > hose->isa_mem_size) {
printk(KERN_DEBUG
"Process %s (pid:%d) mapped non-existing PCI legacy memory for 0%04x:%02x\n",
current->comm, current->pid, pci_domain_nr(bus), bus->number);
if (vma->vm_flags & VM_SHARED)
return shmem_zero_setup(vma);
return 0;
}
offset += hose->isa_mem_phys;
} else {
unsigned long io_offset = (unsigned long)hose->io_base_virt - _IO_BASE;
unsigned long roffset = offset + io_offset;
rp = &hose->io_resource;
if (!(rp->flags & IORESOURCE_IO))
return -ENXIO;
if (roffset < rp->start || (roffset + size) > rp->end)
return -ENXIO;
offset += hose->io_base_phys;
}
pr_debug(" -> mapping phys %llx\n", (unsigned long long)offset);
vma->vm_pgoff = offset >> PAGE_SHIFT;
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
return remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
vma->vm_end - vma->vm_start,
vma->vm_page_prot);
}
void pci_resource_to_user(const struct pci_dev *dev, int bar,
const struct resource *rsrc,
resource_size_t *start, resource_size_t *end)
{
struct pci_bus_region region;
if (rsrc->flags & IORESOURCE_IO) {
pcibios_resource_to_bus(dev->bus, ®ion,
(struct resource *) rsrc);
*start = region.start;
*end = region.end;
return;
}
/* We pass a CPU physical address to userland for MMIO instead of a
* BAR value because X is lame and expects to be able to use that
* to pass to /dev/mem!
*
* That means we may have 64-bit values where some apps only expect
* 32 (like X itself since it thinks only Sparc has 64-bit MMIO).
*/
*start = rsrc->start;
*end = rsrc->end;
}
/**
* pci_process_bridge_OF_ranges - Parse PCI bridge resources from device tree
* @hose: newly allocated pci_controller to be setup
* @dev: device node of the host bridge
* @primary: set if primary bus (32 bits only, soon to be deprecated)
*
* This function will parse the "ranges" property of a PCI host bridge device
* node and setup the resource mapping of a pci controller based on its
* content.
*
* Life would be boring if it wasn't for a few issues that we have to deal
* with here:
*
* - We can only cope with one IO space range and up to 3 Memory space
* ranges. However, some machines (thanks Apple !) tend to split their
* space into lots of small contiguous ranges. So we have to coalesce.
*
* - Some busses have IO space not starting at 0, which causes trouble with
* the way we do our IO resource renumbering. The code somewhat deals with
* it for 64 bits but I would expect problems on 32 bits.
*
* - Some 32 bits platforms such as 4xx can have physical space larger than
* 32 bits so we need to use 64 bits values for the parsing
*/
void pci_process_bridge_OF_ranges(struct pci_controller *hose,
struct device_node *dev, int primary)
{
int memno = 0;
struct resource *res;
struct of_pci_range range;
struct of_pci_range_parser parser;
printk(KERN_INFO "PCI host bridge %pOF %s ranges:\n",
dev, primary ? "(primary)" : "");
/* Check for ranges property */
if (of_pci_range_parser_init(&parser, dev))
return;
/* Parse it */
for_each_of_pci_range(&parser, &range) {
/* If we failed translation or got a zero-sized region
* (some FW try to feed us with non sensical zero sized regions
* such as power3 which look like some kind of attempt at exposing
* the VGA memory hole)
*/
if (range.cpu_addr == OF_BAD_ADDR || range.size == 0)
continue;
/* Act based on address space type */
res = NULL;
switch (range.flags & IORESOURCE_TYPE_BITS) {
case IORESOURCE_IO:
printk(KERN_INFO
" IO 0x%016llx..0x%016llx -> 0x%016llx\n",
range.cpu_addr, range.cpu_addr + range.size - 1,
range.pci_addr);
/* We support only one IO range */
if (hose->pci_io_size) {
printk(KERN_INFO
" \\--> Skipped (too many) !\n");
continue;
}
#ifdef CONFIG_PPC32
/* On 32 bits, limit I/O space to 16MB */
if (range.size > 0x01000000)
range.size = 0x01000000;
/* 32 bits needs to map IOs here */
hose->io_base_virt = ioremap(range.cpu_addr,
range.size);
/* Expect trouble if pci_addr is not 0 */
if (primary)
isa_io_base =
(unsigned long)hose->io_base_virt;
#endif /* CONFIG_PPC32 */
/* pci_io_size and io_base_phys always represent IO
* space starting at 0 so we factor in pci_addr
*/
hose->pci_io_size = range.pci_addr + range.size;
hose->io_base_phys = range.cpu_addr - range.pci_addr;
/* Build resource */
res = &hose->io_resource;
range.cpu_addr = range.pci_addr;
break;
case IORESOURCE_MEM:
printk(KERN_INFO
" MEM 0x%016llx..0x%016llx -> 0x%016llx %s\n",
range.cpu_addr, range.cpu_addr + range.size - 1,
range.pci_addr,
(range.flags & IORESOURCE_PREFETCH) ?
"Prefetch" : "");
/* We support only 3 memory ranges */
if (memno >= 3) {
printk(KERN_INFO
" \\--> Skipped (too many) !\n");
continue;
}
/* Handles ISA memory hole space here */
if (range.pci_addr == 0) {
if (primary || isa_mem_base == 0)
isa_mem_base = range.cpu_addr;
hose->isa_mem_phys = range.cpu_addr;
hose->isa_mem_size = range.size;
}
/* Build resource */
hose->mem_offset[memno] = range.cpu_addr -
range.pci_addr;
res = &hose->mem_resources[memno++];
break;
}
if (res != NULL) {
res->name = dev->full_name;
res->flags = range.flags;
res->start = range.cpu_addr;
res->end = range.cpu_addr + range.size - 1;
res->parent = res->child = res->sibling = NULL;
}
}
}
/* Decide whether to display the domain number in /proc */
int pci_proc_domain(struct pci_bus *bus)
{
struct pci_controller *hose = pci_bus_to_host(bus);
if (!pci_has_flag(PCI_ENABLE_PROC_DOMAINS))
return 0;
if (pci_has_flag(PCI_COMPAT_DOMAIN_0))
return hose->global_number != 0;
return 1;
}
int pcibios_root_bridge_prepare(struct pci_host_bridge *bridge)
{
if (ppc_md.pcibios_root_bridge_prepare)
return ppc_md.pcibios_root_bridge_prepare(bridge);
return 0;
}
/* This header fixup will do the resource fixup for all devices as they are
* probed, but not for bridge ranges
*/
static void pcibios_fixup_resources(struct pci_dev *dev)
{
struct pci_controller *hose = pci_bus_to_host(dev->bus);
struct resource *res;
int i;
if (!hose) {
printk(KERN_ERR "No host bridge for PCI dev %s !\n",
pci_name(dev));
return;
}
if (dev->is_virtfn)
return;
pci_dev_for_each_resource(dev, res, i) {
struct pci_bus_region reg;
if (!res->flags)
continue;
/* If we're going to re-assign everything, we mark all resources
* as unset (and 0-base them). In addition, we mark BARs starting
* at 0 as unset as well, except if PCI_PROBE_ONLY is also set
* since in that case, we don't want to re-assign anything
*/
pcibios_resource_to_bus(dev->bus, ®, res);
if (pci_has_flag(PCI_REASSIGN_ALL_RSRC) ||
(reg.start == 0 && !pci_has_flag(PCI_PROBE_ONLY))) {
/* Only print message if not re-assigning */
if (!pci_has_flag(PCI_REASSIGN_ALL_RSRC))
pr_debug("PCI:%s Resource %d %pR is unassigned\n",
pci_name(dev), i, res);
res->end -= res->start;
res->start = 0;
res->flags |= IORESOURCE_UNSET;
continue;
}
pr_debug("PCI:%s Resource %d %pR\n", pci_name(dev), i, res);
}
/* Call machine specific resource fixup */
if (ppc_md.pcibios_fixup_resources)
ppc_md.pcibios_fixup_resources(dev);
}
DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, pcibios_fixup_resources);
/* This function tries to figure out if a bridge resource has been initialized
* by the firmware or not. It doesn't have to be absolutely bullet proof, but
* things go more smoothly when it gets it right. It should covers cases such
* as Apple "closed" bridge resources and bare-metal pSeries unassigned bridges
*/
static int pcibios_uninitialized_bridge_resource(struct pci_bus *bus,
struct resource *res)
{
struct pci_controller *hose = pci_bus_to_host(bus);
struct pci_dev *dev = bus->self;
resource_size_t offset;
struct pci_bus_region region;
u16 command;
int i;
/* We don't do anything if PCI_PROBE_ONLY is set */
if (pci_has_flag(PCI_PROBE_ONLY))
return 0;
/* Job is a bit different between memory and IO */
if (res->flags & IORESOURCE_MEM) {
pcibios_resource_to_bus(dev->bus, ®ion, res);
/* If the BAR is non-0 then it's probably been initialized */
if (region.start != 0)
return 0;
/* The BAR is 0, let's check if memory decoding is enabled on
* the bridge. If not, we consider it unassigned
*/
pci_read_config_word(dev, PCI_COMMAND, &command);
if ((command & PCI_COMMAND_MEMORY) == 0)
return 1;
/* Memory decoding is enabled and the BAR is 0. If any of the bridge
* resources covers that starting address (0 then it's good enough for
* us for memory space)
*/
for (i = 0; i < 3; i++) {
if ((hose->mem_resources[i].flags & IORESOURCE_MEM) &&
hose->mem_resources[i].start == hose->mem_offset[i])
return 0;
}
/* Well, it starts at 0 and we know it will collide so we may as
* well consider it as unassigned. That covers the Apple case.
*/
return 1;
} else {
/* If the BAR is non-0, then we consider it assigned */
offset = (unsigned long)hose->io_base_virt - _IO_BASE;
if (((res->start - offset) & 0xfffffffful) != 0)
return 0;
/* Here, we are a bit different than memory as typically IO space
* starting at low addresses -is- valid. What we do instead if that
* we consider as unassigned anything that doesn't have IO enabled
* in the PCI command register, and that's it.
*/
pci_read_config_word(dev, PCI_COMMAND, &command);
if (command & PCI_COMMAND_IO)
return 0;
/* It's starting at 0 and IO is disabled in the bridge, consider
* it unassigned
*/
return 1;
}
}
/* Fixup resources of a PCI<->PCI bridge */
static void pcibios_fixup_bridge(struct pci_bus *bus)
{
struct resource *res;
int i;
struct pci_dev *dev = bus->self;
pci_bus_for_each_resource(bus, res, i) {
if (!res || !res->flags)
continue;
if (i >= 3 && bus->self->transparent)
continue;
/* If we're going to reassign everything, we can
* shrink the P2P resource to have size as being
* of 0 in order to save space.
*/
if (pci_has_flag(PCI_REASSIGN_ALL_RSRC)) {
res->flags |= IORESOURCE_UNSET;
res->start = 0;
res->end = -1;
continue;
}
pr_debug("PCI:%s Bus rsrc %d %pR\n", pci_name(dev), i, res);
/* Try to detect uninitialized P2P bridge resources,
* and clear them out so they get re-assigned later
*/
if (pcibios_uninitialized_bridge_resource(bus, res)) {
res->flags = 0;
pr_debug("PCI:%s (unassigned)\n", pci_name(dev));
}
}
}
void pcibios_setup_bus_self(struct pci_bus *bus)
{
struct pci_controller *phb;
/* Fix up the bus resources for P2P bridges */
if (bus->self != NULL)
pcibios_fixup_bridge(bus);
/* Platform specific bus fixups. This is currently only used
* by fsl_pci and I'm hoping to get rid of it at some point
*/
if (ppc_md.pcibios_fixup_bus)
ppc_md.pcibios_fixup_bus(bus);
/* Setup bus DMA mappings */
phb = pci_bus_to_host(bus);
if (phb->controller_ops.dma_bus_setup)
phb->controller_ops.dma_bus_setup(bus);
}
void pcibios_bus_add_device(struct pci_dev *dev)
{
struct pci_controller *phb;
/* Fixup NUMA node as it may not be setup yet by the generic
* code and is needed by the DMA init
*/
set_dev_node(&dev->dev, pcibus_to_node(dev->bus));
/* Hook up default DMA ops */
set_dma_ops(&dev->dev, pci_dma_ops);
dev->dev.archdata.dma_offset = PCI_DRAM_OFFSET;
/* Additional platform DMA/iommu setup */
phb = pci_bus_to_host(dev->bus);
if (phb->controller_ops.dma_dev_setup)
phb->controller_ops.dma_dev_setup(dev);
/* Read default IRQs and fixup if necessary */
pci_read_irq_line(dev);
if (ppc_md.pci_irq_fixup)
ppc_md.pci_irq_fixup(dev);
if (ppc_md.pcibios_bus_add_device)
ppc_md.pcibios_bus_add_device(dev);
}
int pcibios_device_add(struct pci_dev *dev)
{
struct irq_domain *d;
#ifdef CONFIG_PCI_IOV
if (ppc_md.pcibios_fixup_sriov)
ppc_md.pcibios_fixup_sriov(dev);
#endif /* CONFIG_PCI_IOV */
d = dev_get_msi_domain(&dev->bus->dev);
if (d)
dev_set_msi_domain(&dev->dev, d);
return 0;
}
void pcibios_set_master(struct pci_dev *dev)
{
/* No special bus mastering setup handling */
}
void pcibios_fixup_bus(struct pci_bus *bus)
{
/* When called from the generic PCI probe, read PCI<->PCI bridge
* bases. This is -not- called when generating the PCI tree from
* the OF device-tree.
*/
pci_read_bridge_bases(bus);
/* Now fixup the bus */
pcibios_setup_bus_self(bus);
}
EXPORT_SYMBOL(pcibios_fixup_bus);
static int skip_isa_ioresource_align(struct pci_dev *dev)
{
if (pci_has_flag(PCI_CAN_SKIP_ISA_ALIGN) &&
!(dev->bus->bridge_ctl & PCI_BRIDGE_CTL_ISA))
return 1;
return 0;
}
/*
* We need to avoid collisions with `mirrored' VGA ports
* and other strange ISA hardware, so we always want the
* addresses to be allocated in the 0x000-0x0ff region
* modulo 0x400.
*
* Why? Because some silly external IO cards only decode
* the low 10 bits of the IO address. The 0x00-0xff region
* is reserved for motherboard devices that decode all 16
* bits, so it's ok to allocate at, say, 0x2800-0x28ff,
* but we want to try to avoid allocating at 0x2900-0x2bff
* which might have be mirrored at 0x0100-0x03ff..
*/
resource_size_t pcibios_align_resource(void *data, const struct resource *res,
resource_size_t size, resource_size_t align)
{
struct pci_dev *dev = data;
resource_size_t start = res->start;
if (res->flags & IORESOURCE_IO) {
if (skip_isa_ioresource_align(dev))
return start;
if (start & 0x300)
start = (start + 0x3ff) & ~0x3ff;
}
return start;
}
EXPORT_SYMBOL(pcibios_align_resource);
/*
* Reparent resource children of pr that conflict with res
* under res, and make res replace those children.
*/
static int reparent_resources(struct resource *parent,
struct resource *res)
{
struct resource *p, **pp;
struct resource **firstpp = NULL;
for (pp = &parent->child; (p = *pp) != NULL; pp = &p->sibling) {
if (p->end < res->start)
continue;
if (res->end < p->start)
break;
if (p->start < res->start || p->end > res->end)
return -1; /* not completely contained */
if (firstpp == NULL)
firstpp = pp;
}
if (firstpp == NULL)
return -1; /* didn't find any conflicting entries? */
res->parent = parent;
res->child = *firstpp;
res->sibling = *pp;
*firstpp = res;
*pp = NULL;
for (p = res->child; p != NULL; p = p->sibling) {
p->parent = res;
pr_debug("PCI: Reparented %s %pR under %s\n",
p->name, p, res->name);
}
return 0;
}
/*
* Handle resources of PCI devices. If the world were perfect, we could
* just allocate all the resource regions and do nothing more. It isn't.
* On the other hand, we cannot just re-allocate all devices, as it would
* require us to know lots of host bridge internals. So we attempt to
* keep as much of the original configuration as possible, but tweak it
* when it's found to be wrong.
*
* Known BIOS problems we have to work around:
* - I/O or memory regions not configured
* - regions configured, but not enabled in the command register
* - bogus I/O addresses above 64K used
* - expansion ROMs left enabled (this may sound harmless, but given
* the fact the PCI specs explicitly allow address decoders to be
* shared between expansion ROMs and other resource regions, it's
* at least dangerous)
*
* Our solution:
* (1) Allocate resources for all buses behind PCI-to-PCI bridges.
* This gives us fixed barriers on where we can allocate.
* (2) Allocate resources for all enabled devices. If there is
* a collision, just mark the resource as unallocated. Also
* disable expansion ROMs during this step.
* (3) Try to allocate resources for disabled devices. If the
* resources were assigned correctly, everything goes well,
* if they weren't, they won't disturb allocation of other
* resources.
* (4) Assign new addresses to resources which were either
* not configured at all or misconfigured. If explicitly
* requested by the user, configure expansion ROM address
* as well.
*/
static void pcibios_allocate_bus_resources(struct pci_bus *bus)
{
struct pci_bus *b;
int i;
struct resource *res, *pr;
pr_debug("PCI: Allocating bus resources for %04x:%02x...\n",
pci_domain_nr(bus), bus->number);
pci_bus_for_each_resource(bus, res, i) {
if (!res || !res->flags || res->start > res->end || res->parent)
continue;
/* If the resource was left unset at this point, we clear it */
if (res->flags & IORESOURCE_UNSET)
goto clear_resource;
if (bus->parent == NULL)
pr = (res->flags & IORESOURCE_IO) ?
&ioport_resource : &iomem_resource;
else {
pr = pci_find_parent_resource(bus->self, res);
if (pr == res) {
/* this happens when the generic PCI
* code (wrongly) decides that this
* bridge is transparent -- paulus
*/
continue;
}
}
pr_debug("PCI: %s (bus %d) bridge rsrc %d: %pR, parent %p (%s)\n",
bus->self ? pci_name(bus->self) : "PHB", bus->number,
i, res, pr, (pr && pr->name) ? pr->name : "nil");
if (pr && !(pr->flags & IORESOURCE_UNSET)) {
struct pci_dev *dev = bus->self;
if (request_resource(pr, res) == 0)
continue;
/*
* Must be a conflict with an existing entry.
* Move that entry (or entries) under the
* bridge resource and try again.
*/
if (reparent_resources(pr, res) == 0)
continue;
if (dev && i < PCI_BRIDGE_RESOURCE_NUM &&
pci_claim_bridge_resource(dev,
i + PCI_BRIDGE_RESOURCES) == 0)
continue;
}
pr_warn("PCI: Cannot allocate resource region %d of PCI bridge %d, will remap\n",
i, bus->number);
clear_resource:
/* The resource might be figured out when doing
* reassignment based on the resources required
* by the downstream PCI devices. Here we set
* the size of the resource to be 0 in order to
* save more space.
*/
res->start = 0;
res->end = -1;
res->flags = 0;
}
list_for_each_entry(b, &bus->children, node)
pcibios_allocate_bus_resources(b);
}
static inline void alloc_resource(struct pci_dev *dev, int idx)
{
struct resource *pr, *r = &dev->resource[idx];
pr_debug("PCI: Allocating %s: Resource %d: %pR\n",
pci_name(dev), idx, r);
pr = pci_find_parent_resource(dev, r);
if (!pr || (pr->flags & IORESOURCE_UNSET) ||
request_resource(pr, r) < 0) {
printk(KERN_WARNING "PCI: Cannot allocate resource region %d"
" of device %s, will remap\n", idx, pci_name(dev));
if (pr)
pr_debug("PCI: parent is %p: %pR\n", pr, pr);
/* We'll assign a new address later */
r->flags |= IORESOURCE_UNSET;
r->end -= r->start;
r->start = 0;
}
}
static void __init pcibios_allocate_resources(int pass)
{
struct pci_dev *dev = NULL;
int idx, disabled;
u16 command;
struct resource *r;
for_each_pci_dev(dev) {
pci_read_config_word(dev, PCI_COMMAND, &command);
for (idx = 0; idx <= PCI_ROM_RESOURCE; idx++) {
r = &dev->resource[idx];
if (r->parent) /* Already allocated */
continue;
if (!r->flags || (r->flags & IORESOURCE_UNSET))
continue; /* Not assigned at all */
/* We only allocate ROMs on pass 1 just in case they
* have been screwed up by firmware
*/
if (idx == PCI_ROM_RESOURCE )
disabled = 1;
if (r->flags & IORESOURCE_IO)
disabled = !(command & PCI_COMMAND_IO);
else
disabled = !(command & PCI_COMMAND_MEMORY);
if (pass == disabled)
alloc_resource(dev, idx);
}
if (pass)
continue;
r = &dev->resource[PCI_ROM_RESOURCE];
if (r->flags) {
/* Turn the ROM off, leave the resource region,
* but keep it unregistered.
*/
u32 reg;
pci_read_config_dword(dev, dev->rom_base_reg, ®);
if (reg & PCI_ROM_ADDRESS_ENABLE) {
pr_debug("PCI: Switching off ROM of %s\n",
pci_name(dev));
r->flags &= ~IORESOURCE_ROM_ENABLE;
pci_write_config_dword(dev, dev->rom_base_reg,
reg & ~PCI_ROM_ADDRESS_ENABLE);
}
}
}
}
static void __init pcibios_reserve_legacy_regions(struct pci_bus *bus)
{
struct pci_controller *hose = pci_bus_to_host(bus);
resource_size_t offset;
struct resource *res, *pres;
int i;
pr_debug("Reserving legacy ranges for domain %04x\n", pci_domain_nr(bus));
/* Check for IO */
if (!(hose->io_resource.flags & IORESOURCE_IO))
goto no_io;
offset = (unsigned long)hose->io_base_virt - _IO_BASE;
res = kzalloc(sizeof(struct resource), GFP_KERNEL);
BUG_ON(res == NULL);
res->name = "Legacy IO";
res->flags = IORESOURCE_IO;
res->start = offset;
res->end = (offset + 0xfff) & 0xfffffffful;
pr_debug("Candidate legacy IO: %pR\n", res);
if (request_resource(&hose->io_resource, res)) {
printk(KERN_DEBUG
"PCI %04x:%02x Cannot reserve Legacy IO %pR\n",
pci_domain_nr(bus), bus->number, res);
kfree(res);
}
no_io:
/* Check for memory */
for (i = 0; i < 3; i++) {
pres = &hose->mem_resources[i];
offset = hose->mem_offset[i];
if (!(pres->flags & IORESOURCE_MEM))
continue;
pr_debug("hose mem res: %pR\n", pres);
if ((pres->start - offset) <= 0xa0000 &&
(pres->end - offset) >= 0xbffff)
break;
}
if (i >= 3)
return;
res = kzalloc(sizeof(struct resource), GFP_KERNEL);
BUG_ON(res == NULL);
res->name = "Legacy VGA memory";
res->flags = IORESOURCE_MEM;
res->start = 0xa0000 + offset;
res->end = 0xbffff + offset;
pr_debug("Candidate VGA memory: %pR\n", res);
if (request_resource(pres, res)) {
printk(KERN_DEBUG
"PCI %04x:%02x Cannot reserve VGA memory %pR\n",
pci_domain_nr(bus), bus->number, res);
kfree(res);
}
}
void __init pcibios_resource_survey(void)
{
struct pci_bus *b;
/* Allocate and assign resources */
list_for_each_entry(b, &pci_root_buses, node)
pcibios_allocate_bus_resources(b);
if (!pci_has_flag(PCI_REASSIGN_ALL_RSRC)) {
pcibios_allocate_resources(0);
pcibios_allocate_resources(1);
}
/* Before we start assigning unassigned resource, we try to reserve
* the low IO area and the VGA memory area if they intersect the
* bus available resources to avoid allocating things on top of them
*/
if (!pci_has_flag(PCI_PROBE_ONLY)) {
list_for_each_entry(b, &pci_root_buses, node)
pcibios_reserve_legacy_regions(b);
}
/* Now, if the platform didn't decide to blindly trust the firmware,
* we proceed to assigning things that were left unassigned
*/
if (!pci_has_flag(PCI_PROBE_ONLY)) {
pr_debug("PCI: Assigning unassigned resources...\n");
pci_assign_unassigned_resources();
}
}
/* This is used by the PCI hotplug driver to allocate resource
* of newly plugged busses. We can try to consolidate with the
* rest of the code later, for now, keep it as-is as our main
* resource allocation function doesn't deal with sub-trees yet.
*/
void pcibios_claim_one_bus(struct pci_bus *bus)
{
struct pci_dev *dev;
struct pci_bus *child_bus;
list_for_each_entry(dev, &bus->devices, bus_list) {
struct resource *r;
int i;
pci_dev_for_each_resource(dev, r, i) {
if (r->parent || !r->start || !r->flags)
continue;
pr_debug("PCI: Claiming %s: Resource %d: %pR\n",
pci_name(dev), i, r);
if (pci_claim_resource(dev, i) == 0)
continue;
pci_claim_bridge_resource(dev, i);
}
}
list_for_each_entry(child_bus, &bus->children, node)
pcibios_claim_one_bus(child_bus);
}
EXPORT_SYMBOL_GPL(pcibios_claim_one_bus);
/* pcibios_finish_adding_to_bus
*
* This is to be called by the hotplug code after devices have been
* added to a bus, this include calling it for a PHB that is just
* being added
*/
void pcibios_finish_adding_to_bus(struct pci_bus *bus)
{
pr_debug("PCI: Finishing adding to hotplug bus %04x:%02x\n",
pci_domain_nr(bus), bus->number);
/* Allocate bus and devices resources */
pcibios_allocate_bus_resources(bus);
pcibios_claim_one_bus(bus);
if (!pci_has_flag(PCI_PROBE_ONLY)) {
if (bus->self)
pci_assign_unassigned_bridge_resources(bus->self);
else
pci_assign_unassigned_bus_resources(bus);
}
/* Add new devices to global lists. Register in proc, sysfs. */
pci_bus_add_devices(bus);
}
EXPORT_SYMBOL_GPL(pcibios_finish_adding_to_bus);
int pcibios_enable_device(struct pci_dev *dev, int mask)
{
struct pci_controller *phb = pci_bus_to_host(dev->bus);
if (phb->controller_ops.enable_device_hook)
if (!phb->controller_ops.enable_device_hook(dev))
return -EINVAL;
return pci_enable_resources(dev, mask);
}
void pcibios_disable_device(struct pci_dev *dev)
{
struct pci_controller *phb = pci_bus_to_host(dev->bus);
if (phb->controller_ops.disable_device)
phb->controller_ops.disable_device(dev);
}
resource_size_t pcibios_io_space_offset(struct pci_controller *hose)
{
return (unsigned long) hose->io_base_virt - _IO_BASE;
}
static void pcibios_setup_phb_resources(struct pci_controller *hose,
struct list_head *resources)
{
struct resource *res;
resource_size_t offset;
int i;
/* Hookup PHB IO resource */
res = &hose->io_resource;
if (!res->flags) {
pr_debug("PCI: I/O resource not set for host"
" bridge %pOF (domain %d)\n",
hose->dn, hose->global_number);
} else {
offset = pcibios_io_space_offset(hose);
pr_debug("PCI: PHB IO resource = %pR off 0x%08llx\n",
res, (unsigned long long)offset);
pci_add_resource_offset(resources, res, offset);
}
/* Hookup PHB Memory resources */
for (i = 0; i < 3; ++i) {
res = &hose->mem_resources[i];
if (!res->flags)
continue;
offset = hose->mem_offset[i];
pr_debug("PCI: PHB MEM resource %d = %pR off 0x%08llx\n", i,
res, (unsigned long long)offset);
pci_add_resource_offset(resources, res, offset);
}
}
/*
* Null PCI config access functions, for the case when we can't
* find a hose.
*/
#define NULL_PCI_OP(rw, size, type) \
static int \
null_##rw##_config_##size(struct pci_dev *dev, int offset, type val) \
{ \
return PCIBIOS_DEVICE_NOT_FOUND; \
}
static int
null_read_config(struct pci_bus *bus, unsigned int devfn, int offset,
int len, u32 *val)
{
return PCIBIOS_DEVICE_NOT_FOUND;
}
static int
null_write_config(struct pci_bus *bus, unsigned int devfn, int offset,
int len, u32 val)
{
return PCIBIOS_DEVICE_NOT_FOUND;
}
static struct pci_ops null_pci_ops =
{
.read = null_read_config,
.write = null_write_config,
};
/*
* These functions are used early on before PCI scanning is done
* and all of the pci_dev and pci_bus structures have been created.
*/
static struct pci_bus *
fake_pci_bus(struct pci_controller *hose, int busnr)
{
static struct pci_bus bus;
if (hose == NULL) {
printk(KERN_ERR "Can't find hose for PCI bus %d!\n", busnr);
}
bus.number = busnr;
bus.sysdata = hose;
bus.ops = hose? hose->ops: &null_pci_ops;
return &bus;
}
#define EARLY_PCI_OP(rw, size, type) \
int early_##rw##_config_##size(struct pci_controller *hose, int bus, \
int devfn, int offset, type value) \
{ \
return pci_bus_##rw##_config_##size(fake_pci_bus(hose, bus), \
devfn, offset, value); \
}
EARLY_PCI_OP(read, byte, u8 *)
EARLY_PCI_OP(read, word, u16 *)
EARLY_PCI_OP(read, dword, u32 *)
EARLY_PCI_OP(write, byte, u8)
EARLY_PCI_OP(write, word, u16)
EARLY_PCI_OP(write, dword, u32)
int early_find_capability(struct pci_controller *hose, int bus, int devfn,
int cap)
{
return pci_bus_find_capability(fake_pci_bus(hose, bus), devfn, cap);
}
struct device_node *pcibios_get_phb_of_node(struct pci_bus *bus)
{
struct pci_controller *hose = bus->sysdata;
return of_node_get(hose->dn);
}
/**
* pci_scan_phb - Given a pci_controller, setup and scan the PCI bus
* @hose: Pointer to the PCI host controller instance structure
*/
void pcibios_scan_phb(struct pci_controller *hose)
{
LIST_HEAD(resources);
struct pci_bus *bus;
struct device_node *node = hose->dn;
int mode;
pr_debug("PCI: Scanning PHB %pOF\n", node);
/* Get some IO space for the new PHB */
pcibios_setup_phb_io_space(hose);
/* Wire up PHB bus resources */
pcibios_setup_phb_resources(hose, &resources);
hose->busn.start = hose->first_busno;
hose->busn.end = hose->last_busno;
hose->busn.flags = IORESOURCE_BUS;
pci_add_resource(&resources, &hose->busn);
/* Create an empty bus for the toplevel */
bus = pci_create_root_bus(hose->parent, hose->first_busno,
hose->ops, hose, &resources);
if (bus == NULL) {
pr_err("Failed to create bus for PCI domain %04x\n",
hose->global_number);
pci_free_resource_list(&resources);
return;
}
hose->bus = bus;
/* Get probe mode and perform scan */
mode = PCI_PROBE_NORMAL;
if (node && hose->controller_ops.probe_mode)
mode = hose->controller_ops.probe_mode(bus);
pr_debug(" probe mode: %d\n", mode);
if (mode == PCI_PROBE_DEVTREE)
of_scan_bus(node, bus);
if (mode == PCI_PROBE_NORMAL) {
pci_bus_update_busn_res_end(bus, 255);
hose->last_busno = pci_scan_child_bus(bus);
pci_bus_update_busn_res_end(bus, hose->last_busno);
}
/* Platform gets a chance to do some global fixups before
* we proceed to resource allocation
*/
if (ppc_md.pcibios_fixup_phb)
ppc_md.pcibios_fixup_phb(hose);
/* Configure PCI Express settings */
if (bus && !pci_has_flag(PCI_PROBE_ONLY)) {
struct pci_bus *child;
list_for_each_entry(child, &bus->children, node)
pcie_bus_configure_settings(child);
}
}
EXPORT_SYMBOL_GPL(pcibios_scan_phb);
static void fixup_hide_host_resource_fsl(struct pci_dev *dev)
{
int class = dev->class >> 8;
/* When configured as agent, programming interface = 1 */
int prog_if = dev->class & 0xf;
struct resource *r;
if ((class == PCI_CLASS_PROCESSOR_POWERPC ||
class == PCI_CLASS_BRIDGE_OTHER) &&
(dev->hdr_type == PCI_HEADER_TYPE_NORMAL) &&
(prog_if == 0) &&
(dev->bus->parent == NULL)) {
pci_dev_for_each_resource(dev, r) {
r->start = 0;
r->end = 0;
r->flags = 0;
}
}
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MOTOROLA, PCI_ANY_ID, fixup_hide_host_resource_fsl);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_FREESCALE, PCI_ANY_ID, fixup_hide_host_resource_fsl);
static int __init discover_phbs(void)
{
if (ppc_md.discover_phbs)
ppc_md.discover_phbs();
return 0;
}
core_initcall(discover_phbs);
| linux-master | arch/powerpc/kernel/pci-common.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/init.h>
#include <linux/types.h>
#include <linux/audit.h>
#include <asm/unistd.h>
#include "audit_32.h"
static unsigned dir_class[] = {
#include <asm-generic/audit_dir_write.h>
~0U
};
static unsigned read_class[] = {
#include <asm-generic/audit_read.h>
~0U
};
static unsigned write_class[] = {
#include <asm-generic/audit_write.h>
~0U
};
static unsigned chattr_class[] = {
#include <asm-generic/audit_change_attr.h>
~0U
};
static unsigned signal_class[] = {
#include <asm-generic/audit_signal.h>
~0U
};
int audit_classify_arch(int arch)
{
#ifdef CONFIG_PPC64
if (arch == AUDIT_ARCH_PPC)
return 1;
#endif
return 0;
}
int audit_classify_syscall(int abi, unsigned syscall)
{
#ifdef CONFIG_PPC64
if (abi == AUDIT_ARCH_PPC)
return ppc32_classify_syscall(syscall);
#endif
switch(syscall) {
case __NR_open:
return AUDITSC_OPEN;
case __NR_openat:
return AUDITSC_OPENAT;
case __NR_socketcall:
return AUDITSC_SOCKETCALL;
case __NR_execve:
return AUDITSC_EXECVE;
case __NR_openat2:
return AUDITSC_OPENAT2;
default:
return AUDITSC_NATIVE;
}
}
static int __init audit_classes_init(void)
{
#ifdef CONFIG_PPC64
extern __u32 ppc32_dir_class[];
extern __u32 ppc32_write_class[];
extern __u32 ppc32_read_class[];
extern __u32 ppc32_chattr_class[];
extern __u32 ppc32_signal_class[];
audit_register_class(AUDIT_CLASS_WRITE_32, ppc32_write_class);
audit_register_class(AUDIT_CLASS_READ_32, ppc32_read_class);
audit_register_class(AUDIT_CLASS_DIR_WRITE_32, ppc32_dir_class);
audit_register_class(AUDIT_CLASS_CHATTR_32, ppc32_chattr_class);
audit_register_class(AUDIT_CLASS_SIGNAL_32, ppc32_signal_class);
#endif
audit_register_class(AUDIT_CLASS_WRITE, write_class);
audit_register_class(AUDIT_CLASS_READ, read_class);
audit_register_class(AUDIT_CLASS_DIR_WRITE, dir_class);
audit_register_class(AUDIT_CLASS_CHATTR, chattr_class);
audit_register_class(AUDIT_CLASS_SIGNAL, signal_class);
return 0;
}
__initcall(audit_classes_init);
| linux-master | arch/powerpc/kernel/audit.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* ePAPR para-virtualization support.
*
* Copyright (C) 2012 Freescale Semiconductor, Inc.
*/
#include <linux/of.h>
#include <linux/of_fdt.h>
#include <asm/epapr_hcalls.h>
#include <asm/cacheflush.h>
#include <asm/code-patching.h>
#include <asm/machdep.h>
#include <asm/inst.h>
#if !defined(CONFIG_64BIT) || defined(CONFIG_PPC_BOOK3E_64)
extern void epapr_ev_idle(void);
extern u32 epapr_ev_idle_start[];
#endif
bool epapr_paravirt_enabled;
static bool __maybe_unused epapr_has_idle;
static int __init early_init_dt_scan_epapr(unsigned long node,
const char *uname,
int depth, void *data)
{
const u32 *insts;
int len;
int i;
insts = of_get_flat_dt_prop(node, "hcall-instructions", &len);
if (!insts)
return 0;
if (len % 4 || len > (4 * 4))
return -1;
for (i = 0; i < (len / 4); i++) {
ppc_inst_t inst = ppc_inst(be32_to_cpu(insts[i]));
patch_instruction(epapr_hypercall_start + i, inst);
#if !defined(CONFIG_64BIT) || defined(CONFIG_PPC_BOOK3E_64)
patch_instruction(epapr_ev_idle_start + i, inst);
#endif
}
#if !defined(CONFIG_64BIT) || defined(CONFIG_PPC_BOOK3E_64)
if (of_get_flat_dt_prop(node, "has-idle", NULL))
epapr_has_idle = true;
#endif
epapr_paravirt_enabled = true;
return 1;
}
int __init epapr_paravirt_early_init(void)
{
of_scan_flat_dt(early_init_dt_scan_epapr, NULL);
return 0;
}
static int __init epapr_idle_init(void)
{
#if !defined(CONFIG_64BIT) || defined(CONFIG_PPC_BOOK3E_64)
if (epapr_has_idle)
ppc_md.power_save = epapr_ev_idle;
#endif
return 0;
}
postcore_initcall(epapr_idle_init);
| linux-master | arch/powerpc/kernel/epapr_paravirt.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Common time routines among all ppc machines.
*
* Written by Cort Dougan ([email protected]) to merge
* Paul Mackerras' version and mine for PReP and Pmac.
* MPC8xx/MBX changes by Dan Malek ([email protected]).
* Converted for 64-bit by Mike Corrigan ([email protected])
*
* First round of bugfixes by Gabriel Paubert ([email protected])
* to make clock more stable (2.4.0-test5). The only thing
* that this code assumes is that the timebases have been synchronized
* by firmware on SMP and are never stopped (never do sleep
* on SMP then, nap and doze are OK).
*
* Speeded up do_gettimeofday by getting rid of references to
* xtime (which required locks for consistency). ([email protected])
*
* TODO (not necessarily in this file):
* - improve precision and reproducibility of timebase frequency
* measurement at boot time.
* - for astronomical applications: add a new function to get
* non ambiguous timestamps even around leap seconds. This needs
* a new timestamp format and a good name.
*
* 1997-09-10 Updated NTP code according to technical memorandum Jan '96
* "A Kernel Model for Precision Timekeeping" by Dave Mills
*/
#include <linux/errno.h>
#include <linux/export.h>
#include <linux/sched.h>
#include <linux/sched/clock.h>
#include <linux/sched/cputime.h>
#include <linux/kernel.h>
#include <linux/param.h>
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/interrupt.h>
#include <linux/timex.h>
#include <linux/kernel_stat.h>
#include <linux/time.h>
#include <linux/init.h>
#include <linux/profile.h>
#include <linux/cpu.h>
#include <linux/security.h>
#include <linux/percpu.h>
#include <linux/rtc.h>
#include <linux/jiffies.h>
#include <linux/posix-timers.h>
#include <linux/irq.h>
#include <linux/delay.h>
#include <linux/irq_work.h>
#include <linux/of_clk.h>
#include <linux/suspend.h>
#include <linux/processor.h>
#include <linux/mc146818rtc.h>
#include <linux/platform_device.h>
#include <asm/trace.h>
#include <asm/interrupt.h>
#include <asm/io.h>
#include <asm/nvram.h>
#include <asm/cache.h>
#include <asm/machdep.h>
#include <linux/uaccess.h>
#include <asm/time.h>
#include <asm/irq.h>
#include <asm/div64.h>
#include <asm/smp.h>
#include <asm/vdso_datapage.h>
#include <asm/firmware.h>
#include <asm/mce.h>
/* powerpc clocksource/clockevent code */
#include <linux/clockchips.h>
#include <linux/timekeeper_internal.h>
static u64 timebase_read(struct clocksource *);
static struct clocksource clocksource_timebase = {
.name = "timebase",
.rating = 400,
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
.mask = CLOCKSOURCE_MASK(64),
.read = timebase_read,
.vdso_clock_mode = VDSO_CLOCKMODE_ARCHTIMER,
};
#define DECREMENTER_DEFAULT_MAX 0x7FFFFFFF
u64 decrementer_max = DECREMENTER_DEFAULT_MAX;
EXPORT_SYMBOL_GPL(decrementer_max); /* for KVM HDEC */
static int decrementer_set_next_event(unsigned long evt,
struct clock_event_device *dev);
static int decrementer_shutdown(struct clock_event_device *evt);
struct clock_event_device decrementer_clockevent = {
.name = "decrementer",
.rating = 200,
.irq = 0,
.set_next_event = decrementer_set_next_event,
.set_state_oneshot_stopped = decrementer_shutdown,
.set_state_shutdown = decrementer_shutdown,
.tick_resume = decrementer_shutdown,
.features = CLOCK_EVT_FEAT_ONESHOT |
CLOCK_EVT_FEAT_C3STOP,
};
EXPORT_SYMBOL(decrementer_clockevent);
/*
* This always puts next_tb beyond now, so the clock event will never fire
* with the usual comparison, no need for a separate test for stopped.
*/
#define DEC_CLOCKEVENT_STOPPED ~0ULL
DEFINE_PER_CPU(u64, decrementers_next_tb) = DEC_CLOCKEVENT_STOPPED;
EXPORT_SYMBOL_GPL(decrementers_next_tb);
static DEFINE_PER_CPU(struct clock_event_device, decrementers);
#define XSEC_PER_SEC (1024*1024)
#ifdef CONFIG_PPC64
#define SCALE_XSEC(xsec, max) (((xsec) * max) / XSEC_PER_SEC)
#else
/* compute ((xsec << 12) * max) >> 32 */
#define SCALE_XSEC(xsec, max) mulhwu((xsec) << 12, max)
#endif
unsigned long tb_ticks_per_jiffy;
unsigned long tb_ticks_per_usec = 100; /* sane default */
EXPORT_SYMBOL(tb_ticks_per_usec);
unsigned long tb_ticks_per_sec;
EXPORT_SYMBOL(tb_ticks_per_sec); /* for cputime conversions */
DEFINE_SPINLOCK(rtc_lock);
EXPORT_SYMBOL_GPL(rtc_lock);
static u64 tb_to_ns_scale __read_mostly;
static unsigned tb_to_ns_shift __read_mostly;
static u64 boot_tb __read_mostly;
extern struct timezone sys_tz;
static long timezone_offset;
unsigned long ppc_proc_freq;
EXPORT_SYMBOL_GPL(ppc_proc_freq);
unsigned long ppc_tb_freq;
EXPORT_SYMBOL_GPL(ppc_tb_freq);
bool tb_invalid;
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
/*
* Read the SPURR on systems that have it, otherwise the PURR,
* or if that doesn't exist return the timebase value passed in.
*/
static inline unsigned long read_spurr(unsigned long tb)
{
if (cpu_has_feature(CPU_FTR_SPURR))
return mfspr(SPRN_SPURR);
if (cpu_has_feature(CPU_FTR_PURR))
return mfspr(SPRN_PURR);
return tb;
}
/*
* Account time for a transition between system, hard irq
* or soft irq state.
*/
static unsigned long vtime_delta_scaled(struct cpu_accounting_data *acct,
unsigned long now, unsigned long stime)
{
unsigned long stime_scaled = 0;
#ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
unsigned long nowscaled, deltascaled;
unsigned long utime, utime_scaled;
nowscaled = read_spurr(now);
deltascaled = nowscaled - acct->startspurr;
acct->startspurr = nowscaled;
utime = acct->utime - acct->utime_sspurr;
acct->utime_sspurr = acct->utime;
/*
* Because we don't read the SPURR on every kernel entry/exit,
* deltascaled includes both user and system SPURR ticks.
* Apportion these ticks to system SPURR ticks and user
* SPURR ticks in the same ratio as the system time (delta)
* and user time (udelta) values obtained from the timebase
* over the same interval. The system ticks get accounted here;
* the user ticks get saved up in paca->user_time_scaled to be
* used by account_process_tick.
*/
stime_scaled = stime;
utime_scaled = utime;
if (deltascaled != stime + utime) {
if (utime) {
stime_scaled = deltascaled * stime / (stime + utime);
utime_scaled = deltascaled - stime_scaled;
} else {
stime_scaled = deltascaled;
}
}
acct->utime_scaled += utime_scaled;
#endif
return stime_scaled;
}
static unsigned long vtime_delta(struct cpu_accounting_data *acct,
unsigned long *stime_scaled,
unsigned long *steal_time)
{
unsigned long now, stime;
WARN_ON_ONCE(!irqs_disabled());
now = mftb();
stime = now - acct->starttime;
acct->starttime = now;
*stime_scaled = vtime_delta_scaled(acct, now, stime);
if (IS_ENABLED(CONFIG_PPC_SPLPAR) &&
firmware_has_feature(FW_FEATURE_SPLPAR))
*steal_time = pseries_calculate_stolen_time(now);
else
*steal_time = 0;
return stime;
}
static void vtime_delta_kernel(struct cpu_accounting_data *acct,
unsigned long *stime, unsigned long *stime_scaled)
{
unsigned long steal_time;
*stime = vtime_delta(acct, stime_scaled, &steal_time);
*stime -= min(*stime, steal_time);
acct->steal_time += steal_time;
}
void vtime_account_kernel(struct task_struct *tsk)
{
struct cpu_accounting_data *acct = get_accounting(tsk);
unsigned long stime, stime_scaled;
vtime_delta_kernel(acct, &stime, &stime_scaled);
if (tsk->flags & PF_VCPU) {
acct->gtime += stime;
#ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
acct->utime_scaled += stime_scaled;
#endif
} else {
acct->stime += stime;
#ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
acct->stime_scaled += stime_scaled;
#endif
}
}
EXPORT_SYMBOL_GPL(vtime_account_kernel);
void vtime_account_idle(struct task_struct *tsk)
{
unsigned long stime, stime_scaled, steal_time;
struct cpu_accounting_data *acct = get_accounting(tsk);
stime = vtime_delta(acct, &stime_scaled, &steal_time);
acct->idle_time += stime + steal_time;
}
static void vtime_account_irq_field(struct cpu_accounting_data *acct,
unsigned long *field)
{
unsigned long stime, stime_scaled;
vtime_delta_kernel(acct, &stime, &stime_scaled);
*field += stime;
#ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
acct->stime_scaled += stime_scaled;
#endif
}
void vtime_account_softirq(struct task_struct *tsk)
{
struct cpu_accounting_data *acct = get_accounting(tsk);
vtime_account_irq_field(acct, &acct->softirq_time);
}
void vtime_account_hardirq(struct task_struct *tsk)
{
struct cpu_accounting_data *acct = get_accounting(tsk);
vtime_account_irq_field(acct, &acct->hardirq_time);
}
static void vtime_flush_scaled(struct task_struct *tsk,
struct cpu_accounting_data *acct)
{
#ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
if (acct->utime_scaled)
tsk->utimescaled += cputime_to_nsecs(acct->utime_scaled);
if (acct->stime_scaled)
tsk->stimescaled += cputime_to_nsecs(acct->stime_scaled);
acct->utime_scaled = 0;
acct->utime_sspurr = 0;
acct->stime_scaled = 0;
#endif
}
/*
* Account the whole cputime accumulated in the paca
* Must be called with interrupts disabled.
* Assumes that vtime_account_kernel/idle() has been called
* recently (i.e. since the last entry from usermode) so that
* get_paca()->user_time_scaled is up to date.
*/
void vtime_flush(struct task_struct *tsk)
{
struct cpu_accounting_data *acct = get_accounting(tsk);
if (acct->utime)
account_user_time(tsk, cputime_to_nsecs(acct->utime));
if (acct->gtime)
account_guest_time(tsk, cputime_to_nsecs(acct->gtime));
if (IS_ENABLED(CONFIG_PPC_SPLPAR) && acct->steal_time) {
account_steal_time(cputime_to_nsecs(acct->steal_time));
acct->steal_time = 0;
}
if (acct->idle_time)
account_idle_time(cputime_to_nsecs(acct->idle_time));
if (acct->stime)
account_system_index_time(tsk, cputime_to_nsecs(acct->stime),
CPUTIME_SYSTEM);
if (acct->hardirq_time)
account_system_index_time(tsk, cputime_to_nsecs(acct->hardirq_time),
CPUTIME_IRQ);
if (acct->softirq_time)
account_system_index_time(tsk, cputime_to_nsecs(acct->softirq_time),
CPUTIME_SOFTIRQ);
vtime_flush_scaled(tsk, acct);
acct->utime = 0;
acct->gtime = 0;
acct->idle_time = 0;
acct->stime = 0;
acct->hardirq_time = 0;
acct->softirq_time = 0;
}
#endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
void __no_kcsan __delay(unsigned long loops)
{
unsigned long start;
spin_begin();
if (tb_invalid) {
/*
* TB is in error state and isn't ticking anymore.
* HMI handler was unable to recover from TB error.
* Return immediately, so that kernel won't get stuck here.
*/
spin_cpu_relax();
} else {
start = mftb();
while (mftb() - start < loops)
spin_cpu_relax();
}
spin_end();
}
EXPORT_SYMBOL(__delay);
void __no_kcsan udelay(unsigned long usecs)
{
__delay(tb_ticks_per_usec * usecs);
}
EXPORT_SYMBOL(udelay);
#ifdef CONFIG_SMP
unsigned long profile_pc(struct pt_regs *regs)
{
unsigned long pc = instruction_pointer(regs);
if (in_lock_functions(pc))
return regs->link;
return pc;
}
EXPORT_SYMBOL(profile_pc);
#endif
#ifdef CONFIG_IRQ_WORK
/*
* 64-bit uses a byte in the PACA, 32-bit uses a per-cpu variable...
*/
#ifdef CONFIG_PPC64
static inline unsigned long test_irq_work_pending(void)
{
unsigned long x;
asm volatile("lbz %0,%1(13)"
: "=r" (x)
: "i" (offsetof(struct paca_struct, irq_work_pending)));
return x;
}
static inline void set_irq_work_pending_flag(void)
{
asm volatile("stb %0,%1(13)" : :
"r" (1),
"i" (offsetof(struct paca_struct, irq_work_pending)));
}
static inline void clear_irq_work_pending(void)
{
asm volatile("stb %0,%1(13)" : :
"r" (0),
"i" (offsetof(struct paca_struct, irq_work_pending)));
}
#else /* 32-bit */
DEFINE_PER_CPU(u8, irq_work_pending);
#define set_irq_work_pending_flag() __this_cpu_write(irq_work_pending, 1)
#define test_irq_work_pending() __this_cpu_read(irq_work_pending)
#define clear_irq_work_pending() __this_cpu_write(irq_work_pending, 0)
#endif /* 32 vs 64 bit */
void arch_irq_work_raise(void)
{
/*
* 64-bit code that uses irq soft-mask can just cause an immediate
* interrupt here that gets soft masked, if this is called under
* local_irq_disable(). It might be possible to prevent that happening
* by noticing interrupts are disabled and setting decrementer pending
* to be replayed when irqs are enabled. The problem there is that
* tracing can call irq_work_raise, including in code that does low
* level manipulations of irq soft-mask state (e.g., trace_hardirqs_on)
* which could get tangled up if we're messing with the same state
* here.
*/
preempt_disable();
set_irq_work_pending_flag();
set_dec(1);
preempt_enable();
}
static void set_dec_or_work(u64 val)
{
set_dec(val);
/* We may have raced with new irq work */
if (unlikely(test_irq_work_pending()))
set_dec(1);
}
#else /* CONFIG_IRQ_WORK */
#define test_irq_work_pending() 0
#define clear_irq_work_pending()
static void set_dec_or_work(u64 val)
{
set_dec(val);
}
#endif /* CONFIG_IRQ_WORK */
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
void timer_rearm_host_dec(u64 now)
{
u64 *next_tb = this_cpu_ptr(&decrementers_next_tb);
WARN_ON_ONCE(!arch_irqs_disabled());
WARN_ON_ONCE(mfmsr() & MSR_EE);
if (now >= *next_tb) {
local_paca->irq_happened |= PACA_IRQ_DEC;
} else {
now = *next_tb - now;
if (now > decrementer_max)
now = decrementer_max;
set_dec_or_work(now);
}
}
EXPORT_SYMBOL_GPL(timer_rearm_host_dec);
#endif
/*
* timer_interrupt - gets called when the decrementer overflows,
* with interrupts disabled.
*/
DEFINE_INTERRUPT_HANDLER_ASYNC(timer_interrupt)
{
struct clock_event_device *evt = this_cpu_ptr(&decrementers);
u64 *next_tb = this_cpu_ptr(&decrementers_next_tb);
struct pt_regs *old_regs;
u64 now;
/*
* Some implementations of hotplug will get timer interrupts while
* offline, just ignore these.
*/
if (unlikely(!cpu_online(smp_processor_id()))) {
set_dec(decrementer_max);
return;
}
/* Conditionally hard-enable interrupts. */
if (should_hard_irq_enable(regs)) {
/*
* Ensure a positive value is written to the decrementer, or
* else some CPUs will continue to take decrementer exceptions.
* When the PPC_WATCHDOG (decrementer based) is configured,
* keep this at most 31 bits, which is about 4 seconds on most
* systems, which gives the watchdog a chance of catching timer
* interrupt hard lockups.
*/
if (IS_ENABLED(CONFIG_PPC_WATCHDOG))
set_dec(0x7fffffff);
else
set_dec(decrementer_max);
do_hard_irq_enable();
}
#if defined(CONFIG_PPC32) && defined(CONFIG_PPC_PMAC)
if (atomic_read(&ppc_n_lost_interrupts) != 0)
__do_IRQ(regs);
#endif
old_regs = set_irq_regs(regs);
trace_timer_interrupt_entry(regs);
if (test_irq_work_pending()) {
clear_irq_work_pending();
mce_run_irq_context_handlers();
irq_work_run();
}
now = get_tb();
if (now >= *next_tb) {
evt->event_handler(evt);
__this_cpu_inc(irq_stat.timer_irqs_event);
} else {
now = *next_tb - now;
if (now > decrementer_max)
now = decrementer_max;
set_dec_or_work(now);
__this_cpu_inc(irq_stat.timer_irqs_others);
}
trace_timer_interrupt_exit(regs);
set_irq_regs(old_regs);
}
EXPORT_SYMBOL(timer_interrupt);
#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
void timer_broadcast_interrupt(void)
{
tick_receive_broadcast();
__this_cpu_inc(irq_stat.broadcast_irqs_event);
}
#endif
#ifdef CONFIG_SUSPEND
/* Overrides the weak version in kernel/power/main.c */
void arch_suspend_disable_irqs(void)
{
if (ppc_md.suspend_disable_irqs)
ppc_md.suspend_disable_irqs();
/* Disable the decrementer, so that it doesn't interfere
* with suspending.
*/
set_dec(decrementer_max);
local_irq_disable();
set_dec(decrementer_max);
}
/* Overrides the weak version in kernel/power/main.c */
void arch_suspend_enable_irqs(void)
{
local_irq_enable();
if (ppc_md.suspend_enable_irqs)
ppc_md.suspend_enable_irqs();
}
#endif
unsigned long long tb_to_ns(unsigned long long ticks)
{
return mulhdu(ticks, tb_to_ns_scale) << tb_to_ns_shift;
}
EXPORT_SYMBOL_GPL(tb_to_ns);
/*
* Scheduler clock - returns current time in nanosec units.
*
* Note: mulhdu(a, b) (multiply high double unsigned) returns
* the high 64 bits of a * b, i.e. (a * b) >> 64, where a and b
* are 64-bit unsigned numbers.
*/
notrace unsigned long long sched_clock(void)
{
return mulhdu(get_tb() - boot_tb, tb_to_ns_scale) << tb_to_ns_shift;
}
#ifdef CONFIG_PPC_PSERIES
/*
* Running clock - attempts to give a view of time passing for a virtualised
* kernels.
* Uses the VTB register if available otherwise a next best guess.
*/
unsigned long long running_clock(void)
{
/*
* Don't read the VTB as a host since KVM does not switch in host
* timebase into the VTB when it takes a guest off the CPU, reading the
* VTB would result in reading 'last switched out' guest VTB.
*
* Host kernels are often compiled with CONFIG_PPC_PSERIES checked, it
* would be unsafe to rely only on the #ifdef above.
*/
if (firmware_has_feature(FW_FEATURE_LPAR) &&
cpu_has_feature(CPU_FTR_ARCH_207S))
return mulhdu(get_vtb() - boot_tb, tb_to_ns_scale) << tb_to_ns_shift;
/*
* This is a next best approximation without a VTB.
* On a host which is running bare metal there should never be any stolen
* time and on a host which doesn't do any virtualisation TB *should* equal
* VTB so it makes no difference anyway.
*/
return local_clock() - kcpustat_this_cpu->cpustat[CPUTIME_STEAL];
}
#endif
static int __init get_freq(char *name, int cells, unsigned long *val)
{
struct device_node *cpu;
const __be32 *fp;
int found = 0;
/* The cpu node should have timebase and clock frequency properties */
cpu = of_find_node_by_type(NULL, "cpu");
if (cpu) {
fp = of_get_property(cpu, name, NULL);
if (fp) {
found = 1;
*val = of_read_ulong(fp, cells);
}
of_node_put(cpu);
}
return found;
}
static void start_cpu_decrementer(void)
{
#ifdef CONFIG_BOOKE_OR_40x
unsigned int tcr;
/* Clear any pending timer interrupts */
mtspr(SPRN_TSR, TSR_ENW | TSR_WIS | TSR_DIS | TSR_FIS);
tcr = mfspr(SPRN_TCR);
/*
* The watchdog may have already been enabled by u-boot. So leave
* TRC[WP] (Watchdog Period) alone.
*/
tcr &= TCR_WP_MASK; /* Clear all bits except for TCR[WP] */
tcr |= TCR_DIE; /* Enable decrementer */
mtspr(SPRN_TCR, tcr);
#endif
}
void __init generic_calibrate_decr(void)
{
ppc_tb_freq = DEFAULT_TB_FREQ; /* hardcoded default */
if (!get_freq("ibm,extended-timebase-frequency", 2, &ppc_tb_freq) &&
!get_freq("timebase-frequency", 1, &ppc_tb_freq)) {
printk(KERN_ERR "WARNING: Estimating decrementer frequency "
"(not found)\n");
}
ppc_proc_freq = DEFAULT_PROC_FREQ; /* hardcoded default */
if (!get_freq("ibm,extended-clock-frequency", 2, &ppc_proc_freq) &&
!get_freq("clock-frequency", 1, &ppc_proc_freq)) {
printk(KERN_ERR "WARNING: Estimating processor frequency "
"(not found)\n");
}
}
int update_persistent_clock64(struct timespec64 now)
{
struct rtc_time tm;
if (!ppc_md.set_rtc_time)
return -ENODEV;
rtc_time64_to_tm(now.tv_sec + 1 + timezone_offset, &tm);
return ppc_md.set_rtc_time(&tm);
}
static void __read_persistent_clock(struct timespec64 *ts)
{
struct rtc_time tm;
static int first = 1;
ts->tv_nsec = 0;
/* XXX this is a little fragile but will work okay in the short term */
if (first) {
first = 0;
if (ppc_md.time_init)
timezone_offset = ppc_md.time_init();
/* get_boot_time() isn't guaranteed to be safe to call late */
if (ppc_md.get_boot_time) {
ts->tv_sec = ppc_md.get_boot_time() - timezone_offset;
return;
}
}
if (!ppc_md.get_rtc_time) {
ts->tv_sec = 0;
return;
}
ppc_md.get_rtc_time(&tm);
ts->tv_sec = rtc_tm_to_time64(&tm);
}
void read_persistent_clock64(struct timespec64 *ts)
{
__read_persistent_clock(ts);
/* Sanitize it in case real time clock is set below EPOCH */
if (ts->tv_sec < 0) {
ts->tv_sec = 0;
ts->tv_nsec = 0;
}
}
/* clocksource code */
static notrace u64 timebase_read(struct clocksource *cs)
{
return (u64)get_tb();
}
static void __init clocksource_init(void)
{
struct clocksource *clock = &clocksource_timebase;
if (clocksource_register_hz(clock, tb_ticks_per_sec)) {
printk(KERN_ERR "clocksource: %s is already registered\n",
clock->name);
return;
}
printk(KERN_INFO "clocksource: %s mult[%x] shift[%d] registered\n",
clock->name, clock->mult, clock->shift);
}
static int decrementer_set_next_event(unsigned long evt,
struct clock_event_device *dev)
{
__this_cpu_write(decrementers_next_tb, get_tb() + evt);
set_dec_or_work(evt);
return 0;
}
static int decrementer_shutdown(struct clock_event_device *dev)
{
__this_cpu_write(decrementers_next_tb, DEC_CLOCKEVENT_STOPPED);
set_dec_or_work(decrementer_max);
return 0;
}
static void register_decrementer_clockevent(int cpu)
{
struct clock_event_device *dec = &per_cpu(decrementers, cpu);
*dec = decrementer_clockevent;
dec->cpumask = cpumask_of(cpu);
clockevents_config_and_register(dec, ppc_tb_freq, 2, decrementer_max);
printk_once(KERN_DEBUG "clockevent: %s mult[%x] shift[%d] cpu[%d]\n",
dec->name, dec->mult, dec->shift, cpu);
/* Set values for KVM, see kvm_emulate_dec() */
decrementer_clockevent.mult = dec->mult;
decrementer_clockevent.shift = dec->shift;
}
static void enable_large_decrementer(void)
{
if (!cpu_has_feature(CPU_FTR_ARCH_300))
return;
if (decrementer_max <= DECREMENTER_DEFAULT_MAX)
return;
/*
* If we're running as the hypervisor we need to enable the LD manually
* otherwise firmware should have done it for us.
*/
if (cpu_has_feature(CPU_FTR_HVMODE))
mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) | LPCR_LD);
}
static void __init set_decrementer_max(void)
{
struct device_node *cpu;
u32 bits = 32;
/* Prior to ISAv3 the decrementer is always 32 bit */
if (!cpu_has_feature(CPU_FTR_ARCH_300))
return;
cpu = of_find_node_by_type(NULL, "cpu");
if (of_property_read_u32(cpu, "ibm,dec-bits", &bits) == 0) {
if (bits > 64 || bits < 32) {
pr_warn("time_init: firmware supplied invalid ibm,dec-bits");
bits = 32;
}
/* calculate the signed maximum given this many bits */
decrementer_max = (1ul << (bits - 1)) - 1;
}
of_node_put(cpu);
pr_info("time_init: %u bit decrementer (max: %llx)\n",
bits, decrementer_max);
}
static void __init init_decrementer_clockevent(void)
{
register_decrementer_clockevent(smp_processor_id());
}
void secondary_cpu_time_init(void)
{
/* Enable and test the large decrementer for this cpu */
enable_large_decrementer();
/* Start the decrementer on CPUs that have manual control
* such as BookE
*/
start_cpu_decrementer();
/* FIME: Should make unrelated change to move snapshot_timebase
* call here ! */
register_decrementer_clockevent(smp_processor_id());
}
/* This function is only called on the boot processor */
void __init time_init(void)
{
struct div_result res;
u64 scale;
unsigned shift;
/* Normal PowerPC with timebase register */
if (ppc_md.calibrate_decr)
ppc_md.calibrate_decr();
else
generic_calibrate_decr();
printk(KERN_DEBUG "time_init: decrementer frequency = %lu.%.6lu MHz\n",
ppc_tb_freq / 1000000, ppc_tb_freq % 1000000);
printk(KERN_DEBUG "time_init: processor frequency = %lu.%.6lu MHz\n",
ppc_proc_freq / 1000000, ppc_proc_freq % 1000000);
tb_ticks_per_jiffy = ppc_tb_freq / HZ;
tb_ticks_per_sec = ppc_tb_freq;
tb_ticks_per_usec = ppc_tb_freq / 1000000;
/*
* Compute scale factor for sched_clock.
* The calibrate_decr() function has set tb_ticks_per_sec,
* which is the timebase frequency.
* We compute 1e9 * 2^64 / tb_ticks_per_sec and interpret
* the 128-bit result as a 64.64 fixed-point number.
* We then shift that number right until it is less than 1.0,
* giving us the scale factor and shift count to use in
* sched_clock().
*/
div128_by_32(1000000000, 0, tb_ticks_per_sec, &res);
scale = res.result_low;
for (shift = 0; res.result_high != 0; ++shift) {
scale = (scale >> 1) | (res.result_high << 63);
res.result_high >>= 1;
}
tb_to_ns_scale = scale;
tb_to_ns_shift = shift;
/* Save the current timebase to pretty up CONFIG_PRINTK_TIME */
boot_tb = get_tb();
/* If platform provided a timezone (pmac), we correct the time */
if (timezone_offset) {
sys_tz.tz_minuteswest = -timezone_offset / 60;
sys_tz.tz_dsttime = 0;
}
vdso_data->tb_ticks_per_sec = tb_ticks_per_sec;
/* initialise and enable the large decrementer (if we have one) */
set_decrementer_max();
enable_large_decrementer();
/* Start the decrementer on CPUs that have manual control
* such as BookE
*/
start_cpu_decrementer();
/* Register the clocksource */
clocksource_init();
init_decrementer_clockevent();
tick_setup_hrtimer_broadcast();
of_clk_init(NULL);
enable_sched_clock_irqtime();
}
/*
* Divide a 128-bit dividend by a 32-bit divisor, leaving a 128 bit
* result.
*/
void div128_by_32(u64 dividend_high, u64 dividend_low,
unsigned divisor, struct div_result *dr)
{
unsigned long a, b, c, d;
unsigned long w, x, y, z;
u64 ra, rb, rc;
a = dividend_high >> 32;
b = dividend_high & 0xffffffff;
c = dividend_low >> 32;
d = dividend_low & 0xffffffff;
w = a / divisor;
ra = ((u64)(a - (w * divisor)) << 32) + b;
rb = ((u64) do_div(ra, divisor) << 32) + c;
x = ra;
rc = ((u64) do_div(rb, divisor) << 32) + d;
y = rb;
do_div(rc, divisor);
z = rc;
dr->result_high = ((u64)w << 32) + x;
dr->result_low = ((u64)y << 32) + z;
}
/* We don't need to calibrate delay, we use the CPU timebase for that */
void calibrate_delay(void)
{
/* Some generic code (such as spinlock debug) use loops_per_jiffy
* as the number of __delay(1) in a jiffy, so make it so
*/
loops_per_jiffy = tb_ticks_per_jiffy;
}
#if IS_ENABLED(CONFIG_RTC_DRV_GENERIC)
static int rtc_generic_get_time(struct device *dev, struct rtc_time *tm)
{
ppc_md.get_rtc_time(tm);
return 0;
}
static int rtc_generic_set_time(struct device *dev, struct rtc_time *tm)
{
if (!ppc_md.set_rtc_time)
return -EOPNOTSUPP;
if (ppc_md.set_rtc_time(tm) < 0)
return -EOPNOTSUPP;
return 0;
}
static const struct rtc_class_ops rtc_generic_ops = {
.read_time = rtc_generic_get_time,
.set_time = rtc_generic_set_time,
};
static int __init rtc_init(void)
{
struct platform_device *pdev;
if (!ppc_md.get_rtc_time)
return -ENODEV;
pdev = platform_device_register_data(NULL, "rtc-generic", -1,
&rtc_generic_ops,
sizeof(rtc_generic_ops));
return PTR_ERR_OR_ZERO(pdev);
}
device_initcall(rtc_init);
#endif
| linux-master | arch/powerpc/kernel/time.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
*
* Common boot and setup code.
*
* Copyright (C) 2001 PPC64 Team, IBM Corp
*/
#include <linux/export.h>
#include <linux/string.h>
#include <linux/sched.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/reboot.h>
#include <linux/delay.h>
#include <linux/initrd.h>
#include <linux/seq_file.h>
#include <linux/ioport.h>
#include <linux/console.h>
#include <linux/utsname.h>
#include <linux/tty.h>
#include <linux/root_dev.h>
#include <linux/notifier.h>
#include <linux/cpu.h>
#include <linux/unistd.h>
#include <linux/serial.h>
#include <linux/serial_8250.h>
#include <linux/memblock.h>
#include <linux/pci.h>
#include <linux/lockdep.h>
#include <linux/memory.h>
#include <linux/nmi.h>
#include <linux/pgtable.h>
#include <linux/of.h>
#include <linux/of_fdt.h>
#include <asm/asm-prototypes.h>
#include <asm/kvm_guest.h>
#include <asm/io.h>
#include <asm/kdump.h>
#include <asm/processor.h>
#include <asm/smp.h>
#include <asm/elf.h>
#include <asm/machdep.h>
#include <asm/paca.h>
#include <asm/time.h>
#include <asm/cputable.h>
#include <asm/dt_cpu_ftrs.h>
#include <asm/sections.h>
#include <asm/btext.h>
#include <asm/nvram.h>
#include <asm/setup.h>
#include <asm/rtas.h>
#include <asm/iommu.h>
#include <asm/serial.h>
#include <asm/cache.h>
#include <asm/page.h>
#include <asm/mmu.h>
#include <asm/firmware.h>
#include <asm/xmon.h>
#include <asm/udbg.h>
#include <asm/kexec.h>
#include <asm/code-patching.h>
#include <asm/ftrace.h>
#include <asm/opal.h>
#include <asm/cputhreads.h>
#include <asm/hw_irq.h>
#include <asm/feature-fixups.h>
#include <asm/kup.h>
#include <asm/early_ioremap.h>
#include <asm/pgalloc.h>
#include "setup.h"
int spinning_secondaries;
u64 ppc64_pft_size;
struct ppc64_caches ppc64_caches = {
.l1d = {
.block_size = 0x40,
.log_block_size = 6,
},
.l1i = {
.block_size = 0x40,
.log_block_size = 6
},
};
EXPORT_SYMBOL_GPL(ppc64_caches);
#if defined(CONFIG_PPC_BOOK3E_64) && defined(CONFIG_SMP)
void __init setup_tlb_core_data(void)
{
int cpu;
BUILD_BUG_ON(offsetof(struct tlb_core_data, lock) != 0);
for_each_possible_cpu(cpu) {
int first = cpu_first_thread_sibling(cpu);
/*
* If we boot via kdump on a non-primary thread,
* make sure we point at the thread that actually
* set up this TLB.
*/
if (cpu_first_thread_sibling(boot_cpuid) == first)
first = boot_cpuid;
paca_ptrs[cpu]->tcd_ptr = &paca_ptrs[first]->tcd;
/*
* If we have threads, we need either tlbsrx.
* or e6500 tablewalk mode, or else TLB handlers
* will be racy and could produce duplicate entries.
* Should we panic instead?
*/
WARN_ONCE(smt_enabled_at_boot >= 2 &&
book3e_htw_mode != PPC_HTW_E6500,
"%s: unsupported MMU configuration\n", __func__);
}
}
#endif
#ifdef CONFIG_SMP
static char *smt_enabled_cmdline;
/* Look for ibm,smt-enabled OF option */
void __init check_smt_enabled(void)
{
struct device_node *dn;
const char *smt_option;
/* Default to enabling all threads */
smt_enabled_at_boot = threads_per_core;
/* Allow the command line to overrule the OF option */
if (smt_enabled_cmdline) {
if (!strcmp(smt_enabled_cmdline, "on"))
smt_enabled_at_boot = threads_per_core;
else if (!strcmp(smt_enabled_cmdline, "off"))
smt_enabled_at_boot = 0;
else {
int smt;
int rc;
rc = kstrtoint(smt_enabled_cmdline, 10, &smt);
if (!rc)
smt_enabled_at_boot =
min(threads_per_core, smt);
}
} else {
dn = of_find_node_by_path("/options");
if (dn) {
smt_option = of_get_property(dn, "ibm,smt-enabled",
NULL);
if (smt_option) {
if (!strcmp(smt_option, "on"))
smt_enabled_at_boot = threads_per_core;
else if (!strcmp(smt_option, "off"))
smt_enabled_at_boot = 0;
}
of_node_put(dn);
}
}
}
/* Look for smt-enabled= cmdline option */
static int __init early_smt_enabled(char *p)
{
smt_enabled_cmdline = p;
return 0;
}
early_param("smt-enabled", early_smt_enabled);
#endif /* CONFIG_SMP */
/** Fix up paca fields required for the boot cpu */
static void __init fixup_boot_paca(struct paca_struct *boot_paca)
{
/* The boot cpu is started */
boot_paca->cpu_start = 1;
#ifdef CONFIG_PPC_BOOK3S_64
/*
* Give the early boot machine check stack somewhere to use, use
* half of the init stack. This is a bit hacky but there should not be
* deep stack usage in early init so shouldn't overflow it or overwrite
* things.
*/
boot_paca->mc_emergency_sp = (void *)&init_thread_union +
(THREAD_SIZE/2);
#endif
/* Allow percpu accesses to work until we setup percpu data */
boot_paca->data_offset = 0;
/* Mark interrupts soft and hard disabled in PACA */
boot_paca->irq_soft_mask = IRQS_DISABLED;
boot_paca->irq_happened = PACA_IRQ_HARD_DIS;
WARN_ON(mfmsr() & MSR_EE);
}
static void __init configure_exceptions(void)
{
/*
* Setup the trampolines from the lowmem exception vectors
* to the kdump kernel when not using a relocatable kernel.
*/
setup_kdump_trampoline();
/* Under a PAPR hypervisor, we need hypercalls */
if (firmware_has_feature(FW_FEATURE_SET_MODE)) {
/*
* - PR KVM does not support AIL mode interrupts in the host
* while a PR guest is running.
*
* - SCV system call interrupt vectors are only implemented for
* AIL mode interrupts.
*
* - On pseries, AIL mode can only be enabled and disabled
* system-wide so when a PR VM is created on a pseries host,
* all CPUs of the host are set to AIL=0 mode.
*
* - Therefore host CPUs must not execute scv while a PR VM
* exists.
*
* - SCV support can not be disabled dynamically because the
* feature is advertised to host userspace. Disabling the
* facility and emulating it would be possible but is not
* implemented.
*
* - So SCV support is blanket disabled if PR KVM could possibly
* run. That is, PR support compiled in, booting on pseries
* with hash MMU.
*/
if (IS_ENABLED(CONFIG_KVM_BOOK3S_PR_POSSIBLE) && !radix_enabled()) {
init_task.thread.fscr &= ~FSCR_SCV;
cur_cpu_spec->cpu_user_features2 &= ~PPC_FEATURE2_SCV;
}
/* Enable AIL if possible */
if (!pseries_enable_reloc_on_exc()) {
init_task.thread.fscr &= ~FSCR_SCV;
cur_cpu_spec->cpu_user_features2 &= ~PPC_FEATURE2_SCV;
}
/*
* Tell the hypervisor that we want our exceptions to
* be taken in little endian mode.
*
* We don't call this for big endian as our calling convention
* makes us always enter in BE, and the call may fail under
* some circumstances with kdump.
*/
#ifdef __LITTLE_ENDIAN__
pseries_little_endian_exceptions();
#endif
} else {
/* Set endian mode using OPAL */
if (firmware_has_feature(FW_FEATURE_OPAL))
opal_configure_cores();
/* AIL on native is done in cpu_ready_for_interrupts() */
}
}
static void cpu_ready_for_interrupts(void)
{
/*
* Enable AIL if supported, and we are in hypervisor mode. This
* is called once for every processor.
*
* If we are not in hypervisor mode the job is done once for
* the whole partition in configure_exceptions().
*/
if (cpu_has_feature(CPU_FTR_HVMODE)) {
unsigned long lpcr = mfspr(SPRN_LPCR);
unsigned long new_lpcr = lpcr;
if (cpu_has_feature(CPU_FTR_ARCH_31)) {
/* P10 DD1 does not have HAIL */
if (pvr_version_is(PVR_POWER10) &&
(mfspr(SPRN_PVR) & 0xf00) == 0x100)
new_lpcr |= LPCR_AIL_3;
else
new_lpcr |= LPCR_HAIL;
} else if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
new_lpcr |= LPCR_AIL_3;
}
if (new_lpcr != lpcr)
mtspr(SPRN_LPCR, new_lpcr);
}
/*
* Set HFSCR:TM based on CPU features:
* In the special case of TM no suspend (P9N DD2.1), Linux is
* told TM is off via the dt-ftrs but told to (partially) use
* it via OPAL_REINIT_CPUS_TM_SUSPEND_DISABLED. So HFSCR[TM]
* will be off from dt-ftrs but we need to turn it on for the
* no suspend case.
*/
if (cpu_has_feature(CPU_FTR_HVMODE)) {
if (cpu_has_feature(CPU_FTR_TM_COMP))
mtspr(SPRN_HFSCR, mfspr(SPRN_HFSCR) | HFSCR_TM);
else
mtspr(SPRN_HFSCR, mfspr(SPRN_HFSCR) & ~HFSCR_TM);
}
/* Set IR and DR in PACA MSR */
get_paca()->kernel_msr = MSR_KERNEL;
}
unsigned long spr_default_dscr = 0;
static void __init record_spr_defaults(void)
{
if (early_cpu_has_feature(CPU_FTR_DSCR))
spr_default_dscr = mfspr(SPRN_DSCR);
}
/*
* Early initialization entry point. This is called by head.S
* with MMU translation disabled. We rely on the "feature" of
* the CPU that ignores the top 2 bits of the address in real
* mode so we can access kernel globals normally provided we
* only toy with things in the RMO region. From here, we do
* some early parsing of the device-tree to setup out MEMBLOCK
* data structures, and allocate & initialize the hash table
* and segment tables so we can start running with translation
* enabled.
*
* It is this function which will call the probe() callback of
* the various platform types and copy the matching one to the
* global ppc_md structure. Your platform can eventually do
* some very early initializations from the probe() routine, but
* this is not recommended, be very careful as, for example, the
* device-tree is not accessible via normal means at this point.
*/
void __init early_setup(unsigned long dt_ptr)
{
static __initdata struct paca_struct boot_paca;
/* -------- printk is _NOT_ safe to use here ! ------- */
/*
* Assume we're on cpu 0 for now.
*
* We need to load a PACA very early for a few reasons.
*
* The stack protector canary is stored in the paca, so as soon as we
* call any stack protected code we need r13 pointing somewhere valid.
*
* If we are using kcov it will call in_task() in its instrumentation,
* which relies on the current task from the PACA.
*
* dt_cpu_ftrs_init() calls into generic OF/fdt code, as well as
* printk(), which can trigger both stack protector and kcov.
*
* percpu variables and spin locks also use the paca.
*
* So set up a temporary paca. It will be replaced below once we know
* what CPU we are on.
*/
initialise_paca(&boot_paca, 0);
fixup_boot_paca(&boot_paca);
WARN_ON(local_paca != 0);
setup_paca(&boot_paca); /* install the paca into registers */
/* -------- printk is now safe to use ------- */
if (IS_ENABLED(CONFIG_PPC_BOOK3S_64) && (mfmsr() & MSR_HV))
enable_machine_check();
/* Try new device tree based feature discovery ... */
if (!dt_cpu_ftrs_init(__va(dt_ptr)))
/* Otherwise use the old style CPU table */
identify_cpu(0, mfspr(SPRN_PVR));
/* Enable early debugging if any specified (see udbg.h) */
udbg_early_init();
udbg_printf(" -> %s(), dt_ptr: 0x%lx\n", __func__, dt_ptr);
/*
* Do early initialization using the flattened device
* tree, such as retrieving the physical memory map or
* calculating/retrieving the hash table size, discover
* boot_cpuid and boot_cpu_hwid.
*/
early_init_devtree(__va(dt_ptr));
allocate_paca_ptrs();
allocate_paca(boot_cpuid);
set_hard_smp_processor_id(boot_cpuid, boot_cpu_hwid);
fixup_boot_paca(paca_ptrs[boot_cpuid]);
setup_paca(paca_ptrs[boot_cpuid]); /* install the paca into registers */
// smp_processor_id() now reports boot_cpuid
#ifdef CONFIG_SMP
task_thread_info(current)->cpu = boot_cpuid; // fix task_cpu(current)
#endif
/*
* Configure exception handlers. This include setting up trampolines
* if needed, setting exception endian mode, etc...
*/
configure_exceptions();
/*
* Configure Kernel Userspace Protection. This needs to happen before
* feature fixups for platforms that implement this using features.
*/
setup_kup();
/* Apply all the dynamic patching */
apply_feature_fixups();
setup_feature_keys();
/* Initialize the hash table or TLB handling */
early_init_mmu();
early_ioremap_setup();
/*
* After firmware and early platform setup code has set things up,
* we note the SPR values for configurable control/performance
* registers, and use those as initial defaults.
*/
record_spr_defaults();
/*
* At this point, we can let interrupts switch to virtual mode
* (the MMU has been setup), so adjust the MSR in the PACA to
* have IR and DR set and enable AIL if it exists
*/
cpu_ready_for_interrupts();
/*
* We enable ftrace here, but since we only support DYNAMIC_FTRACE, it
* will only actually get enabled on the boot cpu much later once
* ftrace itself has been initialized.
*/
this_cpu_enable_ftrace();
udbg_printf(" <- %s()\n", __func__);
#ifdef CONFIG_PPC_EARLY_DEBUG_BOOTX
/*
* This needs to be done *last* (after the above udbg_printf() even)
*
* Right after we return from this function, we turn on the MMU
* which means the real-mode access trick that btext does will
* no longer work, it needs to switch to using a real MMU
* mapping. This call will ensure that it does
*/
btext_map();
#endif /* CONFIG_PPC_EARLY_DEBUG_BOOTX */
}
#ifdef CONFIG_SMP
void early_setup_secondary(void)
{
/* Mark interrupts disabled in PACA */
irq_soft_mask_set(IRQS_DISABLED);
/* Initialize the hash table or TLB handling */
early_init_mmu_secondary();
/* Perform any KUP setup that is per-cpu */
setup_kup();
/*
* At this point, we can let interrupts switch to virtual mode
* (the MMU has been setup), so adjust the MSR in the PACA to
* have IR and DR set.
*/
cpu_ready_for_interrupts();
}
#endif /* CONFIG_SMP */
void __noreturn panic_smp_self_stop(void)
{
hard_irq_disable();
spin_begin();
while (1)
spin_cpu_relax();
}
#if defined(CONFIG_SMP) || defined(CONFIG_KEXEC_CORE)
static bool use_spinloop(void)
{
if (IS_ENABLED(CONFIG_PPC_BOOK3S)) {
/*
* See comments in head_64.S -- not all platforms insert
* secondaries at __secondary_hold and wait at the spin
* loop.
*/
if (firmware_has_feature(FW_FEATURE_OPAL))
return false;
return true;
}
/*
* When book3e boots from kexec, the ePAPR spin table does
* not get used.
*/
return of_property_read_bool(of_chosen, "linux,booted-from-kexec");
}
void smp_release_cpus(void)
{
unsigned long *ptr;
int i;
if (!use_spinloop())
return;
/* All secondary cpus are spinning on a common spinloop, release them
* all now so they can start to spin on their individual paca
* spinloops. For non SMP kernels, the secondary cpus never get out
* of the common spinloop.
*/
ptr = (unsigned long *)((unsigned long)&__secondary_hold_spinloop
- PHYSICAL_START);
*ptr = ppc_function_entry(generic_secondary_smp_init);
/* And wait a bit for them to catch up */
for (i = 0; i < 100000; i++) {
mb();
HMT_low();
if (spinning_secondaries == 0)
break;
udelay(1);
}
pr_debug("spinning_secondaries = %d\n", spinning_secondaries);
}
#endif /* CONFIG_SMP || CONFIG_KEXEC_CORE */
/*
* Initialize some remaining members of the ppc64_caches and systemcfg
* structures
* (at least until we get rid of them completely). This is mostly some
* cache informations about the CPU that will be used by cache flush
* routines and/or provided to userland
*/
static void __init init_cache_info(struct ppc_cache_info *info, u32 size, u32 lsize,
u32 bsize, u32 sets)
{
info->size = size;
info->sets = sets;
info->line_size = lsize;
info->block_size = bsize;
info->log_block_size = __ilog2(bsize);
if (bsize)
info->blocks_per_page = PAGE_SIZE / bsize;
else
info->blocks_per_page = 0;
if (sets == 0)
info->assoc = 0xffff;
else
info->assoc = size / (sets * lsize);
}
static bool __init parse_cache_info(struct device_node *np,
bool icache,
struct ppc_cache_info *info)
{
static const char *ipropnames[] __initdata = {
"i-cache-size",
"i-cache-sets",
"i-cache-block-size",
"i-cache-line-size",
};
static const char *dpropnames[] __initdata = {
"d-cache-size",
"d-cache-sets",
"d-cache-block-size",
"d-cache-line-size",
};
const char **propnames = icache ? ipropnames : dpropnames;
const __be32 *sizep, *lsizep, *bsizep, *setsp;
u32 size, lsize, bsize, sets;
bool success = true;
size = 0;
sets = -1u;
lsize = bsize = cur_cpu_spec->dcache_bsize;
sizep = of_get_property(np, propnames[0], NULL);
if (sizep != NULL)
size = be32_to_cpu(*sizep);
setsp = of_get_property(np, propnames[1], NULL);
if (setsp != NULL)
sets = be32_to_cpu(*setsp);
bsizep = of_get_property(np, propnames[2], NULL);
lsizep = of_get_property(np, propnames[3], NULL);
if (bsizep == NULL)
bsizep = lsizep;
if (lsizep == NULL)
lsizep = bsizep;
if (lsizep != NULL)
lsize = be32_to_cpu(*lsizep);
if (bsizep != NULL)
bsize = be32_to_cpu(*bsizep);
if (sizep == NULL || bsizep == NULL || lsizep == NULL)
success = false;
/*
* OF is weird .. it represents fully associative caches
* as "1 way" which doesn't make much sense and doesn't
* leave room for direct mapped. We'll assume that 0
* in OF means direct mapped for that reason.
*/
if (sets == 1)
sets = 0;
else if (sets == 0)
sets = 1;
init_cache_info(info, size, lsize, bsize, sets);
return success;
}
void __init initialize_cache_info(void)
{
struct device_node *cpu = NULL, *l2, *l3 = NULL;
u32 pvr;
/*
* All shipping POWER8 machines have a firmware bug that
* puts incorrect information in the device-tree. This will
* be (hopefully) fixed for future chips but for now hard
* code the values if we are running on one of these
*/
pvr = PVR_VER(mfspr(SPRN_PVR));
if (pvr == PVR_POWER8 || pvr == PVR_POWER8E ||
pvr == PVR_POWER8NVL) {
/* size lsize blk sets */
init_cache_info(&ppc64_caches.l1i, 0x8000, 128, 128, 32);
init_cache_info(&ppc64_caches.l1d, 0x10000, 128, 128, 64);
init_cache_info(&ppc64_caches.l2, 0x80000, 128, 0, 512);
init_cache_info(&ppc64_caches.l3, 0x800000, 128, 0, 8192);
} else
cpu = of_find_node_by_type(NULL, "cpu");
/*
* We're assuming *all* of the CPUs have the same
* d-cache and i-cache sizes... -Peter
*/
if (cpu) {
if (!parse_cache_info(cpu, false, &ppc64_caches.l1d))
pr_warn("Argh, can't find dcache properties !\n");
if (!parse_cache_info(cpu, true, &ppc64_caches.l1i))
pr_warn("Argh, can't find icache properties !\n");
/*
* Try to find the L2 and L3 if any. Assume they are
* unified and use the D-side properties.
*/
l2 = of_find_next_cache_node(cpu);
of_node_put(cpu);
if (l2) {
parse_cache_info(l2, false, &ppc64_caches.l2);
l3 = of_find_next_cache_node(l2);
of_node_put(l2);
}
if (l3) {
parse_cache_info(l3, false, &ppc64_caches.l3);
of_node_put(l3);
}
}
/* For use by binfmt_elf */
dcache_bsize = ppc64_caches.l1d.block_size;
icache_bsize = ppc64_caches.l1i.block_size;
cur_cpu_spec->dcache_bsize = dcache_bsize;
cur_cpu_spec->icache_bsize = icache_bsize;
}
/*
* This returns the limit below which memory accesses to the linear
* mapping are guarnateed not to cause an architectural exception (e.g.,
* TLB or SLB miss fault).
*
* This is used to allocate PACAs and various interrupt stacks that
* that are accessed early in interrupt handlers that must not cause
* re-entrant interrupts.
*/
__init u64 ppc64_bolted_size(void)
{
#ifdef CONFIG_PPC_BOOK3E_64
/* Freescale BookE bolts the entire linear mapping */
/* XXX: BookE ppc64_rma_limit setup seems to disagree? */
if (early_mmu_has_feature(MMU_FTR_TYPE_FSL_E))
return linear_map_top;
/* Other BookE, we assume the first GB is bolted */
return 1ul << 30;
#else
/* BookS radix, does not take faults on linear mapping */
if (early_radix_enabled())
return ULONG_MAX;
/* BookS hash, the first segment is bolted */
if (early_mmu_has_feature(MMU_FTR_1T_SEGMENT))
return 1UL << SID_SHIFT_1T;
return 1UL << SID_SHIFT;
#endif
}
static void *__init alloc_stack(unsigned long limit, int cpu)
{
void *ptr;
BUILD_BUG_ON(STACK_INT_FRAME_SIZE % 16);
ptr = memblock_alloc_try_nid(THREAD_SIZE, THREAD_ALIGN,
MEMBLOCK_LOW_LIMIT, limit,
early_cpu_to_node(cpu));
if (!ptr)
panic("cannot allocate stacks");
return ptr;
}
void __init irqstack_early_init(void)
{
u64 limit = ppc64_bolted_size();
unsigned int i;
/*
* Interrupt stacks must be in the first segment since we
* cannot afford to take SLB misses on them. They are not
* accessed in realmode.
*/
for_each_possible_cpu(i) {
softirq_ctx[i] = alloc_stack(limit, i);
hardirq_ctx[i] = alloc_stack(limit, i);
}
}
#ifdef CONFIG_PPC_BOOK3E_64
void __init exc_lvl_early_init(void)
{
unsigned int i;
for_each_possible_cpu(i) {
void *sp;
sp = alloc_stack(ULONG_MAX, i);
critirq_ctx[i] = sp;
paca_ptrs[i]->crit_kstack = sp + THREAD_SIZE;
sp = alloc_stack(ULONG_MAX, i);
dbgirq_ctx[i] = sp;
paca_ptrs[i]->dbg_kstack = sp + THREAD_SIZE;
sp = alloc_stack(ULONG_MAX, i);
mcheckirq_ctx[i] = sp;
paca_ptrs[i]->mc_kstack = sp + THREAD_SIZE;
}
if (cpu_has_feature(CPU_FTR_DEBUG_LVL_EXC))
patch_exception(0x040, exc_debug_debug_book3e);
}
#endif
/*
* Stack space used when we detect a bad kernel stack pointer, and
* early in SMP boots before relocation is enabled. Exclusive emergency
* stack for machine checks.
*/
void __init emergency_stack_init(void)
{
u64 limit, mce_limit;
unsigned int i;
/*
* Emergency stacks must be under 256MB, we cannot afford to take
* SLB misses on them. The ABI also requires them to be 128-byte
* aligned.
*
* Since we use these as temporary stacks during secondary CPU
* bringup, machine check, system reset, and HMI, we need to get
* at them in real mode. This means they must also be within the RMO
* region.
*
* The IRQ stacks allocated elsewhere in this file are zeroed and
* initialized in kernel/irq.c. These are initialized here in order
* to have emergency stacks available as early as possible.
*/
limit = mce_limit = min(ppc64_bolted_size(), ppc64_rma_size);
/*
* Machine check on pseries calls rtas, but can't use the static
* rtas_args due to a machine check hitting while the lock is held.
* rtas args have to be under 4GB, so the machine check stack is
* limited to 4GB so args can be put on stack.
*/
if (firmware_has_feature(FW_FEATURE_LPAR) && mce_limit > SZ_4G)
mce_limit = SZ_4G;
for_each_possible_cpu(i) {
paca_ptrs[i]->emergency_sp = alloc_stack(limit, i) + THREAD_SIZE;
#ifdef CONFIG_PPC_BOOK3S_64
/* emergency stack for NMI exception handling. */
paca_ptrs[i]->nmi_emergency_sp = alloc_stack(limit, i) + THREAD_SIZE;
/* emergency stack for machine check exception handling. */
paca_ptrs[i]->mc_emergency_sp = alloc_stack(mce_limit, i) + THREAD_SIZE;
#endif
}
}
#ifdef CONFIG_SMP
static int pcpu_cpu_distance(unsigned int from, unsigned int to)
{
if (early_cpu_to_node(from) == early_cpu_to_node(to))
return LOCAL_DISTANCE;
else
return REMOTE_DISTANCE;
}
static __init int pcpu_cpu_to_node(int cpu)
{
return early_cpu_to_node(cpu);
}
unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
EXPORT_SYMBOL(__per_cpu_offset);
void __init setup_per_cpu_areas(void)
{
const size_t dyn_size = PERCPU_MODULE_RESERVE + PERCPU_DYNAMIC_RESERVE;
size_t atom_size;
unsigned long delta;
unsigned int cpu;
int rc = -EINVAL;
/*
* BookE and BookS radix are historical values and should be revisited.
*/
if (IS_ENABLED(CONFIG_PPC_BOOK3E_64)) {
atom_size = SZ_1M;
} else if (radix_enabled()) {
atom_size = PAGE_SIZE;
} else if (IS_ENABLED(CONFIG_PPC_64S_HASH_MMU)) {
/*
* Linear mapping is one of 4K, 1M and 16M. For 4K, no need
* to group units. For larger mappings, use 1M atom which
* should be large enough to contain a number of units.
*/
if (mmu_linear_psize == MMU_PAGE_4K)
atom_size = PAGE_SIZE;
else
atom_size = SZ_1M;
}
if (pcpu_chosen_fc != PCPU_FC_PAGE) {
rc = pcpu_embed_first_chunk(0, dyn_size, atom_size, pcpu_cpu_distance,
pcpu_cpu_to_node);
if (rc)
pr_warn("PERCPU: %s allocator failed (%d), "
"falling back to page size\n",
pcpu_fc_names[pcpu_chosen_fc], rc);
}
if (rc < 0)
rc = pcpu_page_first_chunk(0, pcpu_cpu_to_node);
if (rc < 0)
panic("cannot initialize percpu area (err=%d)", rc);
delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
for_each_possible_cpu(cpu) {
__per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu];
paca_ptrs[cpu]->data_offset = __per_cpu_offset[cpu];
}
}
#endif
#ifdef CONFIG_MEMORY_HOTPLUG
unsigned long memory_block_size_bytes(void)
{
if (ppc_md.memory_block_size)
return ppc_md.memory_block_size();
return MIN_MEMORY_BLOCK_SIZE;
}
#endif
#if defined(CONFIG_PPC_INDIRECT_PIO) || defined(CONFIG_PPC_INDIRECT_MMIO)
struct ppc_pci_io ppc_pci_io;
EXPORT_SYMBOL(ppc_pci_io);
#endif
#ifdef CONFIG_HARDLOCKUP_DETECTOR_PERF
u64 hw_nmi_get_sample_period(int watchdog_thresh)
{
return ppc_proc_freq * watchdog_thresh;
}
#endif
/*
* The perf based hardlockup detector breaks PMU event based branches, so
* disable it by default. Book3S has a soft-nmi hardlockup detector based
* on the decrementer interrupt, so it does not suffer from this problem.
*
* It is likely to get false positives in KVM guests, so disable it there
* by default too. PowerVM will not stop or arbitrarily oversubscribe
* CPUs, but give a minimum regular allotment even with SPLPAR, so enable
* the detector for non-KVM guests, assume PowerVM.
*/
static int __init disable_hardlockup_detector(void)
{
#ifdef CONFIG_HARDLOCKUP_DETECTOR_PERF
hardlockup_detector_disable();
#else
if (firmware_has_feature(FW_FEATURE_LPAR)) {
if (is_kvm_guest())
hardlockup_detector_disable();
}
#endif
return 0;
}
early_initcall(disable_hardlockup_detector);
| linux-master | arch/powerpc/kernel/setup_64.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright 2010 Michael Ellerman, IBM Corp.
*/
#include <linux/kernel.h>
#include <linux/jump_label.h>
#include <asm/code-patching.h>
#include <asm/inst.h>
void arch_jump_label_transform(struct jump_entry *entry,
enum jump_label_type type)
{
u32 *addr = (u32 *)jump_entry_code(entry);
if (type == JUMP_LABEL_JMP)
patch_branch(addr, jump_entry_target(entry), 0);
else
patch_instruction(addr, ppc_inst(PPC_RAW_NOP()));
}
| linux-master | arch/powerpc/kernel/jump_label.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/kernel.h>
#include <linux/serial.h>
#include <linux/serial_8250.h>
#include <linux/serial_core.h>
#include <linux/console.h>
#include <linux/pci.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/serial_reg.h>
#include <asm/io.h>
#include <asm/mmu.h>
#include <asm/serial.h>
#include <asm/udbg.h>
#include <asm/pci-bridge.h>
#include <asm/ppc-pci.h>
#include <asm/early_ioremap.h>
#undef DEBUG
#ifdef DEBUG
#define DBG(fmt...) do { printk(fmt); } while(0)
#else
#define DBG(fmt...) do { } while(0)
#endif
#define MAX_LEGACY_SERIAL_PORTS 8
static struct plat_serial8250_port
legacy_serial_ports[MAX_LEGACY_SERIAL_PORTS+1];
static struct legacy_serial_info {
struct device_node *np;
unsigned int speed;
unsigned int clock;
int irq_check_parent;
phys_addr_t taddr;
void __iomem *early_addr;
} legacy_serial_infos[MAX_LEGACY_SERIAL_PORTS];
static const struct of_device_id legacy_serial_parents[] __initconst = {
{.type = "soc",},
{.type = "tsi-bridge",},
{.type = "opb", },
{.compatible = "ibm,opb",},
{.compatible = "simple-bus",},
{.compatible = "wrs,epld-localbus",},
{},
};
static unsigned int legacy_serial_count;
static int legacy_serial_console = -1;
static const upf_t legacy_port_flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST |
UPF_SHARE_IRQ | UPF_FIXED_PORT;
static unsigned int tsi_serial_in(struct uart_port *p, int offset)
{
unsigned int tmp;
offset = offset << p->regshift;
if (offset == UART_IIR) {
tmp = readl(p->membase + (UART_IIR & ~3));
return (tmp >> 16) & 0xff; /* UART_IIR % 4 == 2 */
} else
return readb(p->membase + offset);
}
static void tsi_serial_out(struct uart_port *p, int offset, int value)
{
offset = offset << p->regshift;
if (!((offset == UART_IER) && (value & UART_IER_UUE)))
writeb(value, p->membase + offset);
}
static int __init add_legacy_port(struct device_node *np, int want_index,
int iotype, phys_addr_t base,
phys_addr_t taddr, unsigned long irq,
upf_t flags, int irq_check_parent)
{
const __be32 *clk, *spd, *rs;
u32 clock = BASE_BAUD * 16;
u32 shift = 0;
int index;
/* get clock freq. if present */
clk = of_get_property(np, "clock-frequency", NULL);
if (clk && *clk)
clock = be32_to_cpup(clk);
/* get default speed if present */
spd = of_get_property(np, "current-speed", NULL);
/* get register shift if present */
rs = of_get_property(np, "reg-shift", NULL);
if (rs && *rs)
shift = be32_to_cpup(rs);
/* If we have a location index, then try to use it */
if (want_index >= 0 && want_index < MAX_LEGACY_SERIAL_PORTS)
index = want_index;
else
index = legacy_serial_count;
/* if our index is still out of range, that mean that
* array is full, we could scan for a free slot but that
* make little sense to bother, just skip the port
*/
if (index >= MAX_LEGACY_SERIAL_PORTS)
return -1;
if (index >= legacy_serial_count)
legacy_serial_count = index + 1;
/* Check if there is a port who already claimed our slot */
if (legacy_serial_infos[index].np != NULL) {
/* if we still have some room, move it, else override */
if (legacy_serial_count < MAX_LEGACY_SERIAL_PORTS) {
printk(KERN_DEBUG "Moved legacy port %d -> %d\n",
index, legacy_serial_count);
legacy_serial_ports[legacy_serial_count] =
legacy_serial_ports[index];
legacy_serial_infos[legacy_serial_count] =
legacy_serial_infos[index];
legacy_serial_count++;
} else {
printk(KERN_DEBUG "Replacing legacy port %d\n", index);
}
}
/* Now fill the entry */
memset(&legacy_serial_ports[index], 0,
sizeof(struct plat_serial8250_port));
if (iotype == UPIO_PORT)
legacy_serial_ports[index].iobase = base;
else
legacy_serial_ports[index].mapbase = base;
legacy_serial_ports[index].iotype = iotype;
legacy_serial_ports[index].uartclk = clock;
legacy_serial_ports[index].irq = irq;
legacy_serial_ports[index].flags = flags;
legacy_serial_ports[index].regshift = shift;
legacy_serial_infos[index].taddr = taddr;
legacy_serial_infos[index].np = of_node_get(np);
legacy_serial_infos[index].clock = clock;
legacy_serial_infos[index].speed = spd ? be32_to_cpup(spd) : 0;
legacy_serial_infos[index].irq_check_parent = irq_check_parent;
if (iotype == UPIO_TSI) {
legacy_serial_ports[index].serial_in = tsi_serial_in;
legacy_serial_ports[index].serial_out = tsi_serial_out;
}
printk(KERN_DEBUG "Found legacy serial port %d for %pOF\n",
index, np);
printk(KERN_DEBUG " %s=%llx, taddr=%llx, irq=%lx, clk=%d, speed=%d\n",
(iotype == UPIO_PORT) ? "port" : "mem",
(unsigned long long)base, (unsigned long long)taddr, irq,
legacy_serial_ports[index].uartclk,
legacy_serial_infos[index].speed);
return index;
}
static int __init add_legacy_soc_port(struct device_node *np,
struct device_node *soc_dev)
{
u64 addr;
const __be32 *addrp;
struct device_node *tsi = of_get_parent(np);
/* We only support ports that have a clock frequency properly
* encoded in the device-tree.
*/
if (!of_property_present(np, "clock-frequency"))
return -1;
/* if reg-offset don't try to use it */
if (of_property_present(np, "reg-offset"))
return -1;
/* if rtas uses this device, don't try to use it as well */
if (of_property_read_bool(np, "used-by-rtas"))
return -1;
/* Get the address */
addrp = of_get_address(soc_dev, 0, NULL, NULL);
if (addrp == NULL)
return -1;
addr = of_translate_address(soc_dev, addrp);
if (addr == OF_BAD_ADDR)
return -1;
/* Add port, irq will be dealt with later. We passed a translated
* IO port value. It will be fixed up later along with the irq
*/
if (of_node_is_type(tsi, "tsi-bridge"))
return add_legacy_port(np, -1, UPIO_TSI, addr, addr,
0, legacy_port_flags, 0);
else
return add_legacy_port(np, -1, UPIO_MEM, addr, addr,
0, legacy_port_flags, 0);
}
static int __init add_legacy_isa_port(struct device_node *np,
struct device_node *isa_brg)
{
const __be32 *reg;
const char *typep;
int index = -1;
u64 taddr;
DBG(" -> add_legacy_isa_port(%pOF)\n", np);
/* Get the ISA port number */
reg = of_get_property(np, "reg", NULL);
if (reg == NULL)
return -1;
/* Verify it's an IO port, we don't support anything else */
if (!(be32_to_cpu(reg[0]) & 0x00000001))
return -1;
/* Now look for an "ibm,aix-loc" property that gives us ordering
* if any...
*/
typep = of_get_property(np, "ibm,aix-loc", NULL);
/* If we have a location index, then use it */
if (typep && *typep == 'S')
index = simple_strtol(typep+1, NULL, 0) - 1;
/* Translate ISA address. If it fails, we still register the port
* with no translated address so that it can be picked up as an IO
* port later by the serial driver
*
* Note: Don't even try on P8 lpc, we know it's not directly mapped
*/
if (!of_device_is_compatible(isa_brg, "ibm,power8-lpc") ||
of_property_present(isa_brg, "ranges")) {
taddr = of_translate_address(np, reg);
if (taddr == OF_BAD_ADDR)
taddr = 0;
} else
taddr = 0;
/* Add port, irq will be dealt with later */
return add_legacy_port(np, index, UPIO_PORT, be32_to_cpu(reg[1]),
taddr, 0, legacy_port_flags, 0);
}
#ifdef CONFIG_PCI
static int __init add_legacy_pci_port(struct device_node *np,
struct device_node *pci_dev)
{
u64 addr, base;
const __be32 *addrp;
unsigned int flags;
int iotype, index = -1, lindex = 0;
DBG(" -> add_legacy_pci_port(%pOF)\n", np);
/* We only support ports that have a clock frequency properly
* encoded in the device-tree (that is have an fcode). Anything
* else can't be used that early and will be normally probed by
* the generic 8250_pci driver later on. The reason is that 8250
* compatible UARTs on PCI need all sort of quirks (port offsets
* etc...) that this code doesn't know about
*/
if (!of_property_present(np, "clock-frequency"))
return -1;
/* Get the PCI address. Assume BAR 0 */
addrp = of_get_pci_address(pci_dev, 0, NULL, &flags);
if (addrp == NULL)
return -1;
/* We only support BAR 0 for now */
iotype = (flags & IORESOURCE_MEM) ? UPIO_MEM : UPIO_PORT;
addr = of_translate_address(pci_dev, addrp);
if (addr == OF_BAD_ADDR)
return -1;
/* Set the IO base to the same as the translated address for MMIO,
* or to the domain local IO base for PIO (it will be fixed up later)
*/
if (iotype == UPIO_MEM)
base = addr;
else
base = of_read_number(&addrp[2], 1);
/* Try to guess an index... If we have subdevices of the pci dev,
* we get to their "reg" property
*/
if (np != pci_dev) {
const __be32 *reg = of_get_property(np, "reg", NULL);
if (reg && (be32_to_cpup(reg) < 4))
index = lindex = be32_to_cpup(reg);
}
/* Local index means it's the Nth port in the PCI chip. Unfortunately
* the offset to add here is device specific. We know about those
* EXAR ports and we default to the most common case. If your UART
* doesn't work for these settings, you'll have to add your own special
* cases here
*/
if (of_device_is_compatible(pci_dev, "pci13a8,152") ||
of_device_is_compatible(pci_dev, "pci13a8,154") ||
of_device_is_compatible(pci_dev, "pci13a8,158")) {
addr += 0x200 * lindex;
base += 0x200 * lindex;
} else {
addr += 8 * lindex;
base += 8 * lindex;
}
/* Add port, irq will be dealt with later. We passed a translated
* IO port value. It will be fixed up later along with the irq
*/
return add_legacy_port(np, index, iotype, base, addr, 0,
legacy_port_flags, np != pci_dev);
}
#endif
static void __init setup_legacy_serial_console(int console)
{
struct legacy_serial_info *info = &legacy_serial_infos[console];
struct plat_serial8250_port *port = &legacy_serial_ports[console];
unsigned int stride;
stride = 1 << port->regshift;
/* Check if a translated MMIO address has been found */
if (info->taddr) {
info->early_addr = early_ioremap(info->taddr, 0x1000);
if (info->early_addr == NULL)
return;
udbg_uart_init_mmio(info->early_addr, stride);
} else {
/* Check if it's PIO and we support untranslated PIO */
if (port->iotype == UPIO_PORT && isa_io_special)
udbg_uart_init_pio(port->iobase, stride);
else
return;
}
/* Try to query the current speed */
if (info->speed == 0)
info->speed = udbg_probe_uart_speed(info->clock);
/* Set it up */
DBG("default console speed = %d\n", info->speed);
udbg_uart_setup(info->speed, info->clock);
}
static int __init ioremap_legacy_serial_console(void)
{
struct plat_serial8250_port *port;
struct legacy_serial_info *info;
void __iomem *vaddr;
if (legacy_serial_console < 0)
return 0;
info = &legacy_serial_infos[legacy_serial_console];
port = &legacy_serial_ports[legacy_serial_console];
if (!info->early_addr)
return 0;
vaddr = ioremap(info->taddr, 0x1000);
if (WARN_ON(!vaddr))
return -ENOMEM;
udbg_uart_init_mmio(vaddr, 1 << port->regshift);
early_iounmap(info->early_addr, 0x1000);
info->early_addr = NULL;
return 0;
}
early_initcall(ioremap_legacy_serial_console);
/*
* This is called very early, as part of setup_system() or eventually
* setup_arch(), basically before anything else in this file. This function
* will try to build a list of all the available 8250-compatible serial ports
* in the machine using the Open Firmware device-tree. It currently only deals
* with ISA and PCI busses but could be extended. It allows a very early boot
* console to be initialized, that list is also used later to provide 8250 with
* the machine non-PCI ports and to properly pick the default console port
*/
void __init find_legacy_serial_ports(void)
{
struct device_node *np, *stdout = NULL;
const char *path;
int index;
DBG(" -> find_legacy_serial_port()\n");
/* Now find out if one of these is out firmware console */
path = of_get_property(of_chosen, "linux,stdout-path", NULL);
if (path == NULL)
path = of_get_property(of_chosen, "stdout-path", NULL);
if (path != NULL) {
stdout = of_find_node_by_path(path);
if (stdout)
DBG("stdout is %pOF\n", stdout);
} else {
DBG(" no linux,stdout-path !\n");
}
/* Iterate over all the 16550 ports, looking for known parents */
for_each_compatible_node(np, "serial", "ns16550") {
struct device_node *parent = of_get_parent(np);
if (!parent)
continue;
if (of_match_node(legacy_serial_parents, parent) != NULL) {
if (of_device_is_available(np)) {
index = add_legacy_soc_port(np, np);
if (index >= 0 && np == stdout)
legacy_serial_console = index;
}
}
of_node_put(parent);
}
/* Next, fill our array with ISA ports */
for_each_node_by_type(np, "serial") {
struct device_node *isa = of_get_parent(np);
if (of_node_name_eq(isa, "isa") || of_node_name_eq(isa, "lpc")) {
if (of_device_is_available(np)) {
index = add_legacy_isa_port(np, isa);
if (index >= 0 && np == stdout)
legacy_serial_console = index;
}
}
of_node_put(isa);
}
#ifdef CONFIG_PCI
/* Next, try to locate PCI ports */
for (np = NULL; (np = of_find_all_nodes(np));) {
struct device_node *pci, *parent = of_get_parent(np);
if (of_node_name_eq(parent, "isa")) {
of_node_put(parent);
continue;
}
if (!of_node_name_eq(np, "serial") &&
!of_node_is_type(np, "serial")) {
of_node_put(parent);
continue;
}
/* Check for known pciclass, and also check whether we have
* a device with child nodes for ports or not
*/
if (of_device_is_compatible(np, "pciclass,0700") ||
of_device_is_compatible(np, "pciclass,070002"))
pci = np;
else if (of_device_is_compatible(parent, "pciclass,0700") ||
of_device_is_compatible(parent, "pciclass,070002"))
pci = parent;
else {
of_node_put(parent);
continue;
}
index = add_legacy_pci_port(np, pci);
if (index >= 0 && np == stdout)
legacy_serial_console = index;
of_node_put(parent);
}
#endif
of_node_put(stdout);
DBG("legacy_serial_console = %d\n", legacy_serial_console);
if (legacy_serial_console >= 0)
setup_legacy_serial_console(legacy_serial_console);
DBG(" <- find_legacy_serial_port()\n");
}
static struct platform_device serial_device = {
.name = "serial8250",
.id = PLAT8250_DEV_PLATFORM,
.dev = {
.platform_data = legacy_serial_ports,
},
};
static void __init fixup_port_irq(int index,
struct device_node *np,
struct plat_serial8250_port *port)
{
unsigned int virq;
DBG("fixup_port_irq(%d)\n", index);
virq = irq_of_parse_and_map(np, 0);
if (!virq && legacy_serial_infos[index].irq_check_parent) {
np = of_get_parent(np);
if (np == NULL)
return;
virq = irq_of_parse_and_map(np, 0);
of_node_put(np);
}
if (!virq)
return;
port->irq = virq;
if (IS_ENABLED(CONFIG_SERIAL_8250) &&
of_device_is_compatible(np, "fsl,ns16550")) {
if (IS_REACHABLE(CONFIG_SERIAL_8250_FSL)) {
port->handle_irq = fsl8250_handle_irq;
port->has_sysrq = IS_ENABLED(CONFIG_SERIAL_8250_CONSOLE);
} else {
pr_warn_once("Not activating Freescale specific workaround for device %pOFP\n",
np);
}
}
}
static void __init fixup_port_pio(int index,
struct device_node *np,
struct plat_serial8250_port *port)
{
#ifdef CONFIG_PCI
struct pci_controller *hose;
DBG("fixup_port_pio(%d)\n", index);
hose = pci_find_hose_for_OF_device(np);
if (hose) {
unsigned long offset = (unsigned long)hose->io_base_virt -
#ifdef CONFIG_PPC64
pci_io_base;
#else
isa_io_base;
#endif
DBG("port %d, IO %lx -> %lx\n",
index, port->iobase, port->iobase + offset);
port->iobase += offset;
}
#endif
}
static void __init fixup_port_mmio(int index,
struct device_node *np,
struct plat_serial8250_port *port)
{
DBG("fixup_port_mmio(%d)\n", index);
port->membase = ioremap(port->mapbase, 0x100);
}
/*
* This is called as an arch initcall, hopefully before the PCI bus is
* probed and/or the 8250 driver loaded since we need to register our
* platform devices before 8250 PCI ones are detected as some of them
* must properly "override" the platform ones.
*
* This function fixes up the interrupt value for platform ports as it
* couldn't be done earlier before interrupt maps have been parsed. It
* also "corrects" the IO address for PIO ports for the same reason,
* since earlier, the PHBs virtual IO space wasn't assigned yet. It then
* registers all those platform ports for use by the 8250 driver when it
* finally loads.
*/
static int __init serial_dev_init(void)
{
int i;
if (legacy_serial_count == 0)
return -ENODEV;
/*
* Before we register the platform serial devices, we need
* to fixup their interrupts and their IO ports.
*/
DBG("Fixing serial ports interrupts and IO ports ...\n");
for (i = 0; i < legacy_serial_count; i++) {
struct plat_serial8250_port *port = &legacy_serial_ports[i];
struct device_node *np = legacy_serial_infos[i].np;
if (!port->irq)
fixup_port_irq(i, np, port);
if (port->iotype == UPIO_PORT)
fixup_port_pio(i, np, port);
if ((port->iotype == UPIO_MEM) || (port->iotype == UPIO_TSI))
fixup_port_mmio(i, np, port);
}
DBG("Registering platform serial ports\n");
return platform_device_register(&serial_device);
}
device_initcall(serial_dev_init);
#ifdef CONFIG_SERIAL_8250_CONSOLE
/*
* This is called very early, as part of console_init() (typically just after
* time_init()). This function is respondible for trying to find a good
* default console on serial ports. It tries to match the open firmware
* default output with one of the available serial console drivers that have
* been probed earlier by find_legacy_serial_ports()
*/
static int __init check_legacy_serial_console(void)
{
struct device_node *prom_stdout = NULL;
int i, speed = 0, offset = 0;
const char *name;
const __be32 *spd;
DBG(" -> check_legacy_serial_console()\n");
/* The user has requested a console so this is already set up. */
if (strstr(boot_command_line, "console=")) {
DBG(" console was specified !\n");
return -EBUSY;
}
if (!of_chosen) {
DBG(" of_chosen is NULL !\n");
return -ENODEV;
}
if (legacy_serial_console < 0) {
DBG(" legacy_serial_console not found !\n");
return -ENODEV;
}
/* We are getting a weird phandle from OF ... */
/* ... So use the full path instead */
name = of_get_property(of_chosen, "linux,stdout-path", NULL);
if (name == NULL)
name = of_get_property(of_chosen, "stdout-path", NULL);
if (name == NULL) {
DBG(" no stdout-path !\n");
return -ENODEV;
}
prom_stdout = of_find_node_by_path(name);
if (!prom_stdout) {
DBG(" can't find stdout package %s !\n", name);
return -ENODEV;
}
DBG("stdout is %pOF\n", prom_stdout);
name = of_get_property(prom_stdout, "name", NULL);
if (!name) {
DBG(" stdout package has no name !\n");
goto not_found;
}
spd = of_get_property(prom_stdout, "current-speed", NULL);
if (spd)
speed = be32_to_cpup(spd);
if (strcmp(name, "serial") != 0)
goto not_found;
/* Look for it in probed array */
for (i = 0; i < legacy_serial_count; i++) {
if (prom_stdout != legacy_serial_infos[i].np)
continue;
offset = i;
speed = legacy_serial_infos[i].speed;
break;
}
if (i >= legacy_serial_count)
goto not_found;
of_node_put(prom_stdout);
DBG("Found serial console at ttyS%d\n", offset);
if (speed) {
static char __initdata opt[16];
sprintf(opt, "%d", speed);
return add_preferred_console("ttyS", offset, opt);
} else
return add_preferred_console("ttyS", offset, NULL);
not_found:
DBG("No preferred console found !\n");
of_node_put(prom_stdout);
return -ENODEV;
}
console_initcall(check_legacy_serial_console);
#endif /* CONFIG_SERIAL_8250_CONSOLE */
| linux-master | arch/powerpc/kernel/legacy_serial.c |
// SPDX-License-Identifier: GPL-2.0
/*
* temp.c Thermal management for cpu's with Thermal Assist Units
*
* Written by Troy Benjegerdes <[email protected]>
*
* TODO:
* dynamic power management to limit peak CPU temp (using ICTC)
* calibration???
*
* Silly, crazy ideas: use cpu load (from scheduler) and ICTC to extend battery
* life in portables, and add a 'performance/watt' metric somewhere in /proc
*/
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/param.h>
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/interrupt.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/workqueue.h>
#include <asm/interrupt.h>
#include <asm/io.h>
#include <asm/reg.h>
#include <asm/nvram.h>
#include <asm/cache.h>
#include <asm/8xx_immap.h>
#include <asm/machdep.h>
#include "setup.h"
static struct tau_temp
{
int interrupts;
unsigned char low;
unsigned char high;
unsigned char grew;
} tau[NR_CPUS];
static bool tau_int_enable;
/* TODO: put these in a /proc interface, with some sanity checks, and maybe
* dynamic adjustment to minimize # of interrupts */
/* configurable values for step size and how much to expand the window when
* we get an interrupt. These are based on the limit that was out of range */
#define step_size 2 /* step size when temp goes out of range */
#define window_expand 1 /* expand the window by this much */
/* configurable values for shrinking the window */
#define shrink_timer 2000 /* period between shrinking the window */
#define min_window 2 /* minimum window size, degrees C */
static void set_thresholds(unsigned long cpu)
{
u32 maybe_tie = tau_int_enable ? THRM1_TIE : 0;
/* setup THRM1, threshold, valid bit, interrupt when below threshold */
mtspr(SPRN_THRM1, THRM1_THRES(tau[cpu].low) | THRM1_V | maybe_tie | THRM1_TID);
/* setup THRM2, threshold, valid bit, interrupt when above threshold */
mtspr(SPRN_THRM2, THRM1_THRES(tau[cpu].high) | THRM1_V | maybe_tie);
}
static void TAUupdate(int cpu)
{
u32 thrm;
u32 bits = THRM1_TIV | THRM1_TIN | THRM1_V;
/* if both thresholds are crossed, the step_sizes cancel out
* and the window winds up getting expanded twice. */
thrm = mfspr(SPRN_THRM1);
if ((thrm & bits) == bits) {
mtspr(SPRN_THRM1, 0);
if (tau[cpu].low >= step_size) {
tau[cpu].low -= step_size;
tau[cpu].high -= (step_size - window_expand);
}
tau[cpu].grew = 1;
pr_debug("%s: low threshold crossed\n", __func__);
}
thrm = mfspr(SPRN_THRM2);
if ((thrm & bits) == bits) {
mtspr(SPRN_THRM2, 0);
if (tau[cpu].high <= 127 - step_size) {
tau[cpu].low += (step_size - window_expand);
tau[cpu].high += step_size;
}
tau[cpu].grew = 1;
pr_debug("%s: high threshold crossed\n", __func__);
}
}
#ifdef CONFIG_TAU_INT
/*
* TAU interrupts - called when we have a thermal assist unit interrupt
* with interrupts disabled
*/
DEFINE_INTERRUPT_HANDLER_ASYNC(TAUException)
{
int cpu = smp_processor_id();
tau[cpu].interrupts++;
TAUupdate(cpu);
}
#endif /* CONFIG_TAU_INT */
static void tau_timeout(void * info)
{
int cpu;
int size;
int shrink;
cpu = smp_processor_id();
if (!tau_int_enable)
TAUupdate(cpu);
/* Stop thermal sensor comparisons and interrupts */
mtspr(SPRN_THRM3, 0);
size = tau[cpu].high - tau[cpu].low;
if (size > min_window && ! tau[cpu].grew) {
/* do an exponential shrink of half the amount currently over size */
shrink = (2 + size - min_window) / 4;
if (shrink) {
tau[cpu].low += shrink;
tau[cpu].high -= shrink;
} else { /* size must have been min_window + 1 */
tau[cpu].low += 1;
#if 1 /* debug */
if ((tau[cpu].high - tau[cpu].low) != min_window){
printk(KERN_ERR "temp.c: line %d, logic error\n", __LINE__);
}
#endif
}
}
tau[cpu].grew = 0;
set_thresholds(cpu);
/* Restart thermal sensor comparisons and interrupts.
* The "PowerPC 740 and PowerPC 750 Microprocessor Datasheet"
* recommends that "the maximum value be set in THRM3 under all
* conditions."
*/
mtspr(SPRN_THRM3, THRM3_SITV(0x1fff) | THRM3_E);
}
static struct workqueue_struct *tau_workq;
static void tau_work_func(struct work_struct *work)
{
msleep(shrink_timer);
on_each_cpu(tau_timeout, NULL, 0);
/* schedule ourselves to be run again */
queue_work(tau_workq, work);
}
static DECLARE_WORK(tau_work, tau_work_func);
/*
* setup the TAU
*
* Set things up to use THRM1 as a temperature lower bound, and THRM2 as an upper bound.
* Start off at zero
*/
int tau_initialized = 0;
static void __init TAU_init_smp(void *info)
{
unsigned long cpu = smp_processor_id();
/* set these to a reasonable value and let the timer shrink the
* window */
tau[cpu].low = 5;
tau[cpu].high = 120;
set_thresholds(cpu);
}
static int __init TAU_init(void)
{
/* We assume in SMP that if one CPU has TAU support, they
* all have it --BenH
*/
if (!cpu_has_feature(CPU_FTR_TAU)) {
printk("Thermal assist unit not available\n");
tau_initialized = 0;
return 1;
}
tau_int_enable = IS_ENABLED(CONFIG_TAU_INT) &&
!strcmp(cur_cpu_spec->platform, "ppc750");
tau_workq = alloc_ordered_workqueue("tau", 0);
if (!tau_workq)
return -ENOMEM;
on_each_cpu(TAU_init_smp, NULL, 0);
queue_work(tau_workq, &tau_work);
pr_info("Thermal assist unit using %s, shrink_timer: %d ms\n",
tau_int_enable ? "interrupts" : "workqueue", shrink_timer);
tau_initialized = 1;
return 0;
}
__initcall(TAU_init);
/*
* return current temp
*/
u32 cpu_temp_both(unsigned long cpu)
{
return ((tau[cpu].high << 16) | tau[cpu].low);
}
u32 cpu_temp(unsigned long cpu)
{
return ((tau[cpu].high + tau[cpu].low) / 2);
}
u32 tau_interrupts(unsigned long cpu)
{
return (tau[cpu].interrupts);
}
| linux-master | arch/powerpc/kernel/tau_6xx.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* sys_ppc32.c: 32-bit system calls with complex calling conventions.
*
* Copyright (C) 2001 IBM
* Copyright (C) 1997,1998 Jakub Jelinek ([email protected])
* Copyright (C) 1997 David S. Miller ([email protected])
*
* 32-bit system calls with 64-bit arguments pass those in register pairs.
* This must be specially dealt with on 64-bit kernels. The compat_arg_u64_dual
* in generic compat syscalls is not always usable because the register
* pairing is constrained depending on preceding arguments.
*
* An analogous problem exists on 32-bit kernels with ARCH_HAS_SYSCALL_WRAPPER,
* the defined system call functions take the pt_regs as an argument, and there
* is a mapping macro which maps registers to arguments
* (SC_POWERPC_REGS_TO_ARGS) which also does not deal with these 64-bit
* arguments.
*
* This file contains these system calls.
*/
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/file.h>
#include <linux/signal.h>
#include <linux/resource.h>
#include <linux/times.h>
#include <linux/smp.h>
#include <linux/sem.h>
#include <linux/msg.h>
#include <linux/shm.h>
#include <linux/poll.h>
#include <linux/personality.h>
#include <linux/stat.h>
#include <linux/in.h>
#include <linux/syscalls.h>
#include <linux/unistd.h>
#include <linux/sysctl.h>
#include <linux/binfmts.h>
#include <linux/security.h>
#include <linux/compat.h>
#include <linux/ptrace.h>
#include <linux/elf.h>
#include <linux/ipc.h>
#include <linux/slab.h>
#include <asm/ptrace.h>
#include <asm/types.h>
#include <linux/uaccess.h>
#include <asm/unistd.h>
#include <asm/time.h>
#include <asm/mmu_context.h>
#include <asm/ppc-pci.h>
#include <asm/syscalls.h>
#include <asm/switch_to.h>
#ifdef CONFIG_PPC32
#define PPC32_SYSCALL_DEFINE4 SYSCALL_DEFINE4
#define PPC32_SYSCALL_DEFINE5 SYSCALL_DEFINE5
#define PPC32_SYSCALL_DEFINE6 SYSCALL_DEFINE6
#else
#define PPC32_SYSCALL_DEFINE4 COMPAT_SYSCALL_DEFINE4
#define PPC32_SYSCALL_DEFINE5 COMPAT_SYSCALL_DEFINE5
#define PPC32_SYSCALL_DEFINE6 COMPAT_SYSCALL_DEFINE6
#endif
PPC32_SYSCALL_DEFINE6(ppc_pread64,
unsigned int, fd,
char __user *, ubuf, compat_size_t, count,
u32, reg6, u32, pos1, u32, pos2)
{
return ksys_pread64(fd, ubuf, count, merge_64(pos1, pos2));
}
PPC32_SYSCALL_DEFINE6(ppc_pwrite64,
unsigned int, fd,
const char __user *, ubuf, compat_size_t, count,
u32, reg6, u32, pos1, u32, pos2)
{
return ksys_pwrite64(fd, ubuf, count, merge_64(pos1, pos2));
}
PPC32_SYSCALL_DEFINE5(ppc_readahead,
int, fd, u32, r4,
u32, offset1, u32, offset2, u32, count)
{
return ksys_readahead(fd, merge_64(offset1, offset2), count);
}
PPC32_SYSCALL_DEFINE4(ppc_truncate64,
const char __user *, path, u32, reg4,
unsigned long, len1, unsigned long, len2)
{
return ksys_truncate(path, merge_64(len1, len2));
}
PPC32_SYSCALL_DEFINE4(ppc_ftruncate64,
unsigned int, fd, u32, reg4,
unsigned long, len1, unsigned long, len2)
{
return ksys_ftruncate(fd, merge_64(len1, len2));
}
PPC32_SYSCALL_DEFINE6(ppc32_fadvise64,
int, fd, u32, unused, u32, offset1, u32, offset2,
size_t, len, int, advice)
{
return ksys_fadvise64_64(fd, merge_64(offset1, offset2), len,
advice);
}
PPC32_SYSCALL_DEFINE6(ppc_sync_file_range2,
int, fd, unsigned int, flags,
unsigned int, offset1, unsigned int, offset2,
unsigned int, nbytes1, unsigned int, nbytes2)
{
loff_t offset = merge_64(offset1, offset2);
loff_t nbytes = merge_64(nbytes1, nbytes2);
return ksys_sync_file_range(fd, offset, nbytes, flags);
}
#ifdef CONFIG_PPC32
SYSCALL_DEFINE6(ppc_fallocate,
int, fd, int, mode,
u32, offset1, u32, offset2, u32, len1, u32, len2)
{
return ksys_fallocate(fd, mode,
merge_64(offset1, offset2),
merge_64(len1, len2));
}
#endif
| linux-master | arch/powerpc/kernel/sys_ppc32.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2001 Mike Corrigan & Dave Engebretsen IBM Corporation
*/
#include <linux/init.h>
#include <linux/mm.h>
#include <linux/proc_fs.h>
#include <linux/kernel.h>
#include <linux/of.h>
#include <asm/machdep.h>
#include <asm/vdso_datapage.h>
#include <asm/rtas.h>
#include <linux/uaccess.h>
#ifdef CONFIG_PPC64
static loff_t page_map_seek(struct file *file, loff_t off, int whence)
{
return fixed_size_llseek(file, off, whence, PAGE_SIZE);
}
static ssize_t page_map_read( struct file *file, char __user *buf, size_t nbytes,
loff_t *ppos)
{
return simple_read_from_buffer(buf, nbytes, ppos,
pde_data(file_inode(file)), PAGE_SIZE);
}
static int page_map_mmap( struct file *file, struct vm_area_struct *vma )
{
if ((vma->vm_end - vma->vm_start) > PAGE_SIZE)
return -EINVAL;
remap_pfn_range(vma, vma->vm_start,
__pa(pde_data(file_inode(file))) >> PAGE_SHIFT,
PAGE_SIZE, vma->vm_page_prot);
return 0;
}
static const struct proc_ops page_map_proc_ops = {
.proc_lseek = page_map_seek,
.proc_read = page_map_read,
.proc_mmap = page_map_mmap,
};
static int __init proc_ppc64_init(void)
{
struct proc_dir_entry *pde;
pde = proc_create_data("powerpc/systemcfg", S_IFREG | 0444, NULL,
&page_map_proc_ops, vdso_data);
if (!pde)
return 1;
proc_set_size(pde, PAGE_SIZE);
return 0;
}
__initcall(proc_ppc64_init);
#endif /* CONFIG_PPC64 */
/*
* Create the ppc64 and ppc64/rtas directories early. This allows us to
* assume that they have been previously created in drivers.
*/
static int __init proc_ppc64_create(void)
{
struct proc_dir_entry *root;
root = proc_mkdir("powerpc", NULL);
if (!root)
return 1;
#ifdef CONFIG_PPC64
if (!proc_symlink("ppc64", NULL, "powerpc"))
pr_err("Failed to create link /proc/ppc64 -> /proc/powerpc\n");
#endif
if (!of_find_node_by_path("/rtas"))
return 0;
if (!proc_mkdir("rtas", root))
return 1;
if (!proc_symlink("rtas", NULL, "powerpc/rtas"))
return 1;
return 0;
}
core_initcall(proc_ppc64_create);
| linux-master | arch/powerpc/kernel/proc_powerpc.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2001 Mike Corrigan & Dave Engebretsen, IBM Corporation
*
* Rewrite, cleanup, new allocation schemes, virtual merging:
* Copyright (C) 2004 Olof Johansson, IBM Corporation
* and Ben. Herrenschmidt, IBM Corporation
*
* Dynamic DMA mapping support, bus-independent parts.
*/
#include <linux/init.h>
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/mm.h>
#include <linux/spinlock.h>
#include <linux/string.h>
#include <linux/dma-mapping.h>
#include <linux/bitmap.h>
#include <linux/iommu-helper.h>
#include <linux/crash_dump.h>
#include <linux/hash.h>
#include <linux/fault-inject.h>
#include <linux/pci.h>
#include <linux/iommu.h>
#include <linux/sched.h>
#include <linux/debugfs.h>
#include <asm/io.h>
#include <asm/iommu.h>
#include <asm/pci-bridge.h>
#include <asm/machdep.h>
#include <asm/kdump.h>
#include <asm/fadump.h>
#include <asm/vio.h>
#include <asm/tce.h>
#include <asm/mmu_context.h>
#include <asm/ppc-pci.h>
#define DBG(...)
#ifdef CONFIG_IOMMU_DEBUGFS
static int iommu_debugfs_weight_get(void *data, u64 *val)
{
struct iommu_table *tbl = data;
*val = bitmap_weight(tbl->it_map, tbl->it_size);
return 0;
}
DEFINE_DEBUGFS_ATTRIBUTE(iommu_debugfs_fops_weight, iommu_debugfs_weight_get, NULL, "%llu\n");
static void iommu_debugfs_add(struct iommu_table *tbl)
{
char name[10];
struct dentry *liobn_entry;
sprintf(name, "%08lx", tbl->it_index);
liobn_entry = debugfs_create_dir(name, iommu_debugfs_dir);
debugfs_create_file_unsafe("weight", 0400, liobn_entry, tbl, &iommu_debugfs_fops_weight);
debugfs_create_ulong("it_size", 0400, liobn_entry, &tbl->it_size);
debugfs_create_ulong("it_page_shift", 0400, liobn_entry, &tbl->it_page_shift);
debugfs_create_ulong("it_reserved_start", 0400, liobn_entry, &tbl->it_reserved_start);
debugfs_create_ulong("it_reserved_end", 0400, liobn_entry, &tbl->it_reserved_end);
debugfs_create_ulong("it_indirect_levels", 0400, liobn_entry, &tbl->it_indirect_levels);
debugfs_create_ulong("it_level_size", 0400, liobn_entry, &tbl->it_level_size);
}
static void iommu_debugfs_del(struct iommu_table *tbl)
{
char name[10];
sprintf(name, "%08lx", tbl->it_index);
debugfs_lookup_and_remove(name, iommu_debugfs_dir);
}
#else
static void iommu_debugfs_add(struct iommu_table *tbl){}
static void iommu_debugfs_del(struct iommu_table *tbl){}
#endif
static int novmerge;
static void __iommu_free(struct iommu_table *, dma_addr_t, unsigned int);
static int __init setup_iommu(char *str)
{
if (!strcmp(str, "novmerge"))
novmerge = 1;
else if (!strcmp(str, "vmerge"))
novmerge = 0;
return 1;
}
__setup("iommu=", setup_iommu);
static DEFINE_PER_CPU(unsigned int, iommu_pool_hash);
/*
* We precalculate the hash to avoid doing it on every allocation.
*
* The hash is important to spread CPUs across all the pools. For example,
* on a POWER7 with 4 way SMT we want interrupts on the primary threads and
* with 4 pools all primary threads would map to the same pool.
*/
static int __init setup_iommu_pool_hash(void)
{
unsigned int i;
for_each_possible_cpu(i)
per_cpu(iommu_pool_hash, i) = hash_32(i, IOMMU_POOL_HASHBITS);
return 0;
}
subsys_initcall(setup_iommu_pool_hash);
#ifdef CONFIG_FAIL_IOMMU
static DECLARE_FAULT_ATTR(fail_iommu);
static int __init setup_fail_iommu(char *str)
{
return setup_fault_attr(&fail_iommu, str);
}
__setup("fail_iommu=", setup_fail_iommu);
static bool should_fail_iommu(struct device *dev)
{
return dev->archdata.fail_iommu && should_fail(&fail_iommu, 1);
}
static int __init fail_iommu_debugfs(void)
{
struct dentry *dir = fault_create_debugfs_attr("fail_iommu",
NULL, &fail_iommu);
return PTR_ERR_OR_ZERO(dir);
}
late_initcall(fail_iommu_debugfs);
static ssize_t fail_iommu_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
return sprintf(buf, "%d\n", dev->archdata.fail_iommu);
}
static ssize_t fail_iommu_store(struct device *dev,
struct device_attribute *attr, const char *buf,
size_t count)
{
int i;
if (count > 0 && sscanf(buf, "%d", &i) > 0)
dev->archdata.fail_iommu = (i == 0) ? 0 : 1;
return count;
}
static DEVICE_ATTR_RW(fail_iommu);
static int fail_iommu_bus_notify(struct notifier_block *nb,
unsigned long action, void *data)
{
struct device *dev = data;
if (action == BUS_NOTIFY_ADD_DEVICE) {
if (device_create_file(dev, &dev_attr_fail_iommu))
pr_warn("Unable to create IOMMU fault injection sysfs "
"entries\n");
} else if (action == BUS_NOTIFY_DEL_DEVICE) {
device_remove_file(dev, &dev_attr_fail_iommu);
}
return 0;
}
/*
* PCI and VIO buses need separate notifier_block structs, since they're linked
* list nodes. Sharing a notifier_block would mean that any notifiers later
* registered for PCI buses would also get called by VIO buses and vice versa.
*/
static struct notifier_block fail_iommu_pci_bus_notifier = {
.notifier_call = fail_iommu_bus_notify
};
#ifdef CONFIG_IBMVIO
static struct notifier_block fail_iommu_vio_bus_notifier = {
.notifier_call = fail_iommu_bus_notify
};
#endif
static int __init fail_iommu_setup(void)
{
#ifdef CONFIG_PCI
bus_register_notifier(&pci_bus_type, &fail_iommu_pci_bus_notifier);
#endif
#ifdef CONFIG_IBMVIO
bus_register_notifier(&vio_bus_type, &fail_iommu_vio_bus_notifier);
#endif
return 0;
}
/*
* Must execute after PCI and VIO subsystem have initialised but before
* devices are probed.
*/
arch_initcall(fail_iommu_setup);
#else
static inline bool should_fail_iommu(struct device *dev)
{
return false;
}
#endif
static unsigned long iommu_range_alloc(struct device *dev,
struct iommu_table *tbl,
unsigned long npages,
unsigned long *handle,
unsigned long mask,
unsigned int align_order)
{
unsigned long n, end, start;
unsigned long limit;
int largealloc = npages > 15;
int pass = 0;
unsigned long align_mask;
unsigned long flags;
unsigned int pool_nr;
struct iommu_pool *pool;
align_mask = (1ull << align_order) - 1;
/* This allocator was derived from x86_64's bit string search */
/* Sanity check */
if (unlikely(npages == 0)) {
if (printk_ratelimit())
WARN_ON(1);
return DMA_MAPPING_ERROR;
}
if (should_fail_iommu(dev))
return DMA_MAPPING_ERROR;
/*
* We don't need to disable preemption here because any CPU can
* safely use any IOMMU pool.
*/
pool_nr = raw_cpu_read(iommu_pool_hash) & (tbl->nr_pools - 1);
if (largealloc)
pool = &(tbl->large_pool);
else
pool = &(tbl->pools[pool_nr]);
spin_lock_irqsave(&(pool->lock), flags);
again:
if ((pass == 0) && handle && *handle &&
(*handle >= pool->start) && (*handle < pool->end))
start = *handle;
else
start = pool->hint;
limit = pool->end;
/* The case below can happen if we have a small segment appended
* to a large, or when the previous alloc was at the very end of
* the available space. If so, go back to the initial start.
*/
if (start >= limit)
start = pool->start;
if (limit + tbl->it_offset > mask) {
limit = mask - tbl->it_offset + 1;
/* If we're constrained on address range, first try
* at the masked hint to avoid O(n) search complexity,
* but on second pass, start at 0 in pool 0.
*/
if ((start & mask) >= limit || pass > 0) {
spin_unlock(&(pool->lock));
pool = &(tbl->pools[0]);
spin_lock(&(pool->lock));
start = pool->start;
} else {
start &= mask;
}
}
n = iommu_area_alloc(tbl->it_map, limit, start, npages, tbl->it_offset,
dma_get_seg_boundary_nr_pages(dev, tbl->it_page_shift),
align_mask);
if (n == -1) {
if (likely(pass == 0)) {
/* First try the pool from the start */
pool->hint = pool->start;
pass++;
goto again;
} else if (pass <= tbl->nr_pools) {
/* Now try scanning all the other pools */
spin_unlock(&(pool->lock));
pool_nr = (pool_nr + 1) & (tbl->nr_pools - 1);
pool = &tbl->pools[pool_nr];
spin_lock(&(pool->lock));
pool->hint = pool->start;
pass++;
goto again;
} else if (pass == tbl->nr_pools + 1) {
/* Last resort: try largepool */
spin_unlock(&pool->lock);
pool = &tbl->large_pool;
spin_lock(&pool->lock);
pool->hint = pool->start;
pass++;
goto again;
} else {
/* Give up */
spin_unlock_irqrestore(&(pool->lock), flags);
return DMA_MAPPING_ERROR;
}
}
end = n + npages;
/* Bump the hint to a new block for small allocs. */
if (largealloc) {
/* Don't bump to new block to avoid fragmentation */
pool->hint = end;
} else {
/* Overflow will be taken care of at the next allocation */
pool->hint = (end + tbl->it_blocksize - 1) &
~(tbl->it_blocksize - 1);
}
/* Update handle for SG allocations */
if (handle)
*handle = end;
spin_unlock_irqrestore(&(pool->lock), flags);
return n;
}
static dma_addr_t iommu_alloc(struct device *dev, struct iommu_table *tbl,
void *page, unsigned int npages,
enum dma_data_direction direction,
unsigned long mask, unsigned int align_order,
unsigned long attrs)
{
unsigned long entry;
dma_addr_t ret = DMA_MAPPING_ERROR;
int build_fail;
entry = iommu_range_alloc(dev, tbl, npages, NULL, mask, align_order);
if (unlikely(entry == DMA_MAPPING_ERROR))
return DMA_MAPPING_ERROR;
entry += tbl->it_offset; /* Offset into real TCE table */
ret = entry << tbl->it_page_shift; /* Set the return dma address */
/* Put the TCEs in the HW table */
build_fail = tbl->it_ops->set(tbl, entry, npages,
(unsigned long)page &
IOMMU_PAGE_MASK(tbl), direction, attrs);
/* tbl->it_ops->set() only returns non-zero for transient errors.
* Clean up the table bitmap in this case and return
* DMA_MAPPING_ERROR. For all other errors the functionality is
* not altered.
*/
if (unlikely(build_fail)) {
__iommu_free(tbl, ret, npages);
return DMA_MAPPING_ERROR;
}
/* Flush/invalidate TLB caches if necessary */
if (tbl->it_ops->flush)
tbl->it_ops->flush(tbl);
/* Make sure updates are seen by hardware */
mb();
return ret;
}
static bool iommu_free_check(struct iommu_table *tbl, dma_addr_t dma_addr,
unsigned int npages)
{
unsigned long entry, free_entry;
entry = dma_addr >> tbl->it_page_shift;
free_entry = entry - tbl->it_offset;
if (((free_entry + npages) > tbl->it_size) ||
(entry < tbl->it_offset)) {
if (printk_ratelimit()) {
printk(KERN_INFO "iommu_free: invalid entry\n");
printk(KERN_INFO "\tentry = 0x%lx\n", entry);
printk(KERN_INFO "\tdma_addr = 0x%llx\n", (u64)dma_addr);
printk(KERN_INFO "\tTable = 0x%llx\n", (u64)tbl);
printk(KERN_INFO "\tbus# = 0x%llx\n", (u64)tbl->it_busno);
printk(KERN_INFO "\tsize = 0x%llx\n", (u64)tbl->it_size);
printk(KERN_INFO "\tstartOff = 0x%llx\n", (u64)tbl->it_offset);
printk(KERN_INFO "\tindex = 0x%llx\n", (u64)tbl->it_index);
WARN_ON(1);
}
return false;
}
return true;
}
static struct iommu_pool *get_pool(struct iommu_table *tbl,
unsigned long entry)
{
struct iommu_pool *p;
unsigned long largepool_start = tbl->large_pool.start;
/* The large pool is the last pool at the top of the table */
if (entry >= largepool_start) {
p = &tbl->large_pool;
} else {
unsigned int pool_nr = entry / tbl->poolsize;
BUG_ON(pool_nr > tbl->nr_pools);
p = &tbl->pools[pool_nr];
}
return p;
}
static void __iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
unsigned int npages)
{
unsigned long entry, free_entry;
unsigned long flags;
struct iommu_pool *pool;
entry = dma_addr >> tbl->it_page_shift;
free_entry = entry - tbl->it_offset;
pool = get_pool(tbl, free_entry);
if (!iommu_free_check(tbl, dma_addr, npages))
return;
tbl->it_ops->clear(tbl, entry, npages);
spin_lock_irqsave(&(pool->lock), flags);
bitmap_clear(tbl->it_map, free_entry, npages);
spin_unlock_irqrestore(&(pool->lock), flags);
}
static void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
unsigned int npages)
{
__iommu_free(tbl, dma_addr, npages);
/* Make sure TLB cache is flushed if the HW needs it. We do
* not do an mb() here on purpose, it is not needed on any of
* the current platforms.
*/
if (tbl->it_ops->flush)
tbl->it_ops->flush(tbl);
}
int ppc_iommu_map_sg(struct device *dev, struct iommu_table *tbl,
struct scatterlist *sglist, int nelems,
unsigned long mask, enum dma_data_direction direction,
unsigned long attrs)
{
dma_addr_t dma_next = 0, dma_addr;
struct scatterlist *s, *outs, *segstart;
int outcount, incount, i, build_fail = 0;
unsigned int align;
unsigned long handle;
unsigned int max_seg_size;
BUG_ON(direction == DMA_NONE);
if ((nelems == 0) || !tbl)
return -EINVAL;
outs = s = segstart = &sglist[0];
outcount = 1;
incount = nelems;
handle = 0;
/* Init first segment length for backout at failure */
outs->dma_length = 0;
DBG("sg mapping %d elements:\n", nelems);
max_seg_size = dma_get_max_seg_size(dev);
for_each_sg(sglist, s, nelems, i) {
unsigned long vaddr, npages, entry, slen;
slen = s->length;
/* Sanity check */
if (slen == 0) {
dma_next = 0;
continue;
}
/* Allocate iommu entries for that segment */
vaddr = (unsigned long) sg_virt(s);
npages = iommu_num_pages(vaddr, slen, IOMMU_PAGE_SIZE(tbl));
align = 0;
if (tbl->it_page_shift < PAGE_SHIFT && slen >= PAGE_SIZE &&
(vaddr & ~PAGE_MASK) == 0)
align = PAGE_SHIFT - tbl->it_page_shift;
entry = iommu_range_alloc(dev, tbl, npages, &handle,
mask >> tbl->it_page_shift, align);
DBG(" - vaddr: %lx, size: %lx\n", vaddr, slen);
/* Handle failure */
if (unlikely(entry == DMA_MAPPING_ERROR)) {
if (!(attrs & DMA_ATTR_NO_WARN) &&
printk_ratelimit())
dev_info(dev, "iommu_alloc failed, tbl %p "
"vaddr %lx npages %lu\n", tbl, vaddr,
npages);
goto failure;
}
/* Convert entry to a dma_addr_t */
entry += tbl->it_offset;
dma_addr = entry << tbl->it_page_shift;
dma_addr |= (vaddr & ~IOMMU_PAGE_MASK(tbl));
DBG(" - %lu pages, entry: %lx, dma_addr: %lx\n",
npages, entry, dma_addr);
/* Insert into HW table */
build_fail = tbl->it_ops->set(tbl, entry, npages,
vaddr & IOMMU_PAGE_MASK(tbl),
direction, attrs);
if(unlikely(build_fail))
goto failure;
/* If we are in an open segment, try merging */
if (segstart != s) {
DBG(" - trying merge...\n");
/* We cannot merge if:
* - allocated dma_addr isn't contiguous to previous allocation
*/
if (novmerge || (dma_addr != dma_next) ||
(outs->dma_length + s->length > max_seg_size)) {
/* Can't merge: create a new segment */
segstart = s;
outcount++;
outs = sg_next(outs);
DBG(" can't merge, new segment.\n");
} else {
outs->dma_length += s->length;
DBG(" merged, new len: %ux\n", outs->dma_length);
}
}
if (segstart == s) {
/* This is a new segment, fill entries */
DBG(" - filling new segment.\n");
outs->dma_address = dma_addr;
outs->dma_length = slen;
}
/* Calculate next page pointer for contiguous check */
dma_next = dma_addr + slen;
DBG(" - dma next is: %lx\n", dma_next);
}
/* Flush/invalidate TLB caches if necessary */
if (tbl->it_ops->flush)
tbl->it_ops->flush(tbl);
DBG("mapped %d elements:\n", outcount);
/* For the sake of ppc_iommu_unmap_sg, we clear out the length in the
* next entry of the sglist if we didn't fill the list completely
*/
if (outcount < incount) {
outs = sg_next(outs);
outs->dma_length = 0;
}
/* Make sure updates are seen by hardware */
mb();
return outcount;
failure:
for_each_sg(sglist, s, nelems, i) {
if (s->dma_length != 0) {
unsigned long vaddr, npages;
vaddr = s->dma_address & IOMMU_PAGE_MASK(tbl);
npages = iommu_num_pages(s->dma_address, s->dma_length,
IOMMU_PAGE_SIZE(tbl));
__iommu_free(tbl, vaddr, npages);
s->dma_length = 0;
}
if (s == outs)
break;
}
return -EIO;
}
void ppc_iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist,
int nelems, enum dma_data_direction direction,
unsigned long attrs)
{
struct scatterlist *sg;
BUG_ON(direction == DMA_NONE);
if (!tbl)
return;
sg = sglist;
while (nelems--) {
unsigned int npages;
dma_addr_t dma_handle = sg->dma_address;
if (sg->dma_length == 0)
break;
npages = iommu_num_pages(dma_handle, sg->dma_length,
IOMMU_PAGE_SIZE(tbl));
__iommu_free(tbl, dma_handle, npages);
sg = sg_next(sg);
}
/* Flush/invalidate TLBs if necessary. As for iommu_free(), we
* do not do an mb() here, the affected platforms do not need it
* when freeing.
*/
if (tbl->it_ops->flush)
tbl->it_ops->flush(tbl);
}
static void iommu_table_clear(struct iommu_table *tbl)
{
/*
* In case of firmware assisted dump system goes through clean
* reboot process at the time of system crash. Hence it's safe to
* clear the TCE entries if firmware assisted dump is active.
*/
if (!is_kdump_kernel() || is_fadump_active()) {
/* Clear the table in case firmware left allocations in it */
tbl->it_ops->clear(tbl, tbl->it_offset, tbl->it_size);
return;
}
#ifdef CONFIG_CRASH_DUMP
if (tbl->it_ops->get) {
unsigned long index, tceval, tcecount = 0;
/* Reserve the existing mappings left by the first kernel. */
for (index = 0; index < tbl->it_size; index++) {
tceval = tbl->it_ops->get(tbl, index + tbl->it_offset);
/*
* Freed TCE entry contains 0x7fffffffffffffff on JS20
*/
if (tceval && (tceval != 0x7fffffffffffffffUL)) {
__set_bit(index, tbl->it_map);
tcecount++;
}
}
if ((tbl->it_size - tcecount) < KDUMP_MIN_TCE_ENTRIES) {
printk(KERN_WARNING "TCE table is full; freeing ");
printk(KERN_WARNING "%d entries for the kdump boot\n",
KDUMP_MIN_TCE_ENTRIES);
for (index = tbl->it_size - KDUMP_MIN_TCE_ENTRIES;
index < tbl->it_size; index++)
__clear_bit(index, tbl->it_map);
}
}
#endif
}
static void iommu_table_reserve_pages(struct iommu_table *tbl,
unsigned long res_start, unsigned long res_end)
{
int i;
WARN_ON_ONCE(res_end < res_start);
/*
* Reserve page 0 so it will not be used for any mappings.
* This avoids buggy drivers that consider page 0 to be invalid
* to crash the machine or even lose data.
*/
if (tbl->it_offset == 0)
set_bit(0, tbl->it_map);
if (res_start < tbl->it_offset)
res_start = tbl->it_offset;
if (res_end > (tbl->it_offset + tbl->it_size))
res_end = tbl->it_offset + tbl->it_size;
/* Check if res_start..res_end is a valid range in the table */
if (res_start >= res_end) {
tbl->it_reserved_start = tbl->it_offset;
tbl->it_reserved_end = tbl->it_offset;
return;
}
tbl->it_reserved_start = res_start;
tbl->it_reserved_end = res_end;
for (i = tbl->it_reserved_start; i < tbl->it_reserved_end; ++i)
set_bit(i - tbl->it_offset, tbl->it_map);
}
/*
* Build a iommu_table structure. This contains a bit map which
* is used to manage allocation of the tce space.
*/
struct iommu_table *iommu_init_table(struct iommu_table *tbl, int nid,
unsigned long res_start, unsigned long res_end)
{
unsigned long sz;
static int welcomed = 0;
unsigned int i;
struct iommu_pool *p;
BUG_ON(!tbl->it_ops);
/* number of bytes needed for the bitmap */
sz = BITS_TO_LONGS(tbl->it_size) * sizeof(unsigned long);
tbl->it_map = vzalloc_node(sz, nid);
if (!tbl->it_map) {
pr_err("%s: Can't allocate %ld bytes\n", __func__, sz);
return NULL;
}
iommu_table_reserve_pages(tbl, res_start, res_end);
/* We only split the IOMMU table if we have 1GB or more of space */
if ((tbl->it_size << tbl->it_page_shift) >= (1UL * 1024 * 1024 * 1024))
tbl->nr_pools = IOMMU_NR_POOLS;
else
tbl->nr_pools = 1;
/* We reserve the top 1/4 of the table for large allocations */
tbl->poolsize = (tbl->it_size * 3 / 4) / tbl->nr_pools;
for (i = 0; i < tbl->nr_pools; i++) {
p = &tbl->pools[i];
spin_lock_init(&(p->lock));
p->start = tbl->poolsize * i;
p->hint = p->start;
p->end = p->start + tbl->poolsize;
}
p = &tbl->large_pool;
spin_lock_init(&(p->lock));
p->start = tbl->poolsize * i;
p->hint = p->start;
p->end = tbl->it_size;
iommu_table_clear(tbl);
if (!welcomed) {
printk(KERN_INFO "IOMMU table initialized, virtual merging %s\n",
novmerge ? "disabled" : "enabled");
welcomed = 1;
}
iommu_debugfs_add(tbl);
return tbl;
}
bool iommu_table_in_use(struct iommu_table *tbl)
{
unsigned long start = 0, end;
/* ignore reserved bit0 */
if (tbl->it_offset == 0)
start = 1;
/* Simple case with no reserved MMIO32 region */
if (!tbl->it_reserved_start && !tbl->it_reserved_end)
return find_next_bit(tbl->it_map, tbl->it_size, start) != tbl->it_size;
end = tbl->it_reserved_start - tbl->it_offset;
if (find_next_bit(tbl->it_map, end, start) != end)
return true;
start = tbl->it_reserved_end - tbl->it_offset;
end = tbl->it_size;
return find_next_bit(tbl->it_map, end, start) != end;
}
static void iommu_table_free(struct kref *kref)
{
struct iommu_table *tbl;
tbl = container_of(kref, struct iommu_table, it_kref);
if (tbl->it_ops->free)
tbl->it_ops->free(tbl);
if (!tbl->it_map) {
kfree(tbl);
return;
}
iommu_debugfs_del(tbl);
/* verify that table contains no entries */
if (iommu_table_in_use(tbl))
pr_warn("%s: Unexpected TCEs\n", __func__);
/* free bitmap */
vfree(tbl->it_map);
/* free table */
kfree(tbl);
}
struct iommu_table *iommu_tce_table_get(struct iommu_table *tbl)
{
if (kref_get_unless_zero(&tbl->it_kref))
return tbl;
return NULL;
}
EXPORT_SYMBOL_GPL(iommu_tce_table_get);
int iommu_tce_table_put(struct iommu_table *tbl)
{
if (WARN_ON(!tbl))
return 0;
return kref_put(&tbl->it_kref, iommu_table_free);
}
EXPORT_SYMBOL_GPL(iommu_tce_table_put);
/* Creates TCEs for a user provided buffer. The user buffer must be
* contiguous real kernel storage (not vmalloc). The address passed here
* comprises a page address and offset into that page. The dma_addr_t
* returned will point to the same byte within the page as was passed in.
*/
dma_addr_t iommu_map_page(struct device *dev, struct iommu_table *tbl,
struct page *page, unsigned long offset, size_t size,
unsigned long mask, enum dma_data_direction direction,
unsigned long attrs)
{
dma_addr_t dma_handle = DMA_MAPPING_ERROR;
void *vaddr;
unsigned long uaddr;
unsigned int npages, align;
BUG_ON(direction == DMA_NONE);
vaddr = page_address(page) + offset;
uaddr = (unsigned long)vaddr;
if (tbl) {
npages = iommu_num_pages(uaddr, size, IOMMU_PAGE_SIZE(tbl));
align = 0;
if (tbl->it_page_shift < PAGE_SHIFT && size >= PAGE_SIZE &&
((unsigned long)vaddr & ~PAGE_MASK) == 0)
align = PAGE_SHIFT - tbl->it_page_shift;
dma_handle = iommu_alloc(dev, tbl, vaddr, npages, direction,
mask >> tbl->it_page_shift, align,
attrs);
if (dma_handle == DMA_MAPPING_ERROR) {
if (!(attrs & DMA_ATTR_NO_WARN) &&
printk_ratelimit()) {
dev_info(dev, "iommu_alloc failed, tbl %p "
"vaddr %p npages %d\n", tbl, vaddr,
npages);
}
} else
dma_handle |= (uaddr & ~IOMMU_PAGE_MASK(tbl));
}
return dma_handle;
}
void iommu_unmap_page(struct iommu_table *tbl, dma_addr_t dma_handle,
size_t size, enum dma_data_direction direction,
unsigned long attrs)
{
unsigned int npages;
BUG_ON(direction == DMA_NONE);
if (tbl) {
npages = iommu_num_pages(dma_handle, size,
IOMMU_PAGE_SIZE(tbl));
iommu_free(tbl, dma_handle, npages);
}
}
/* Allocates a contiguous real buffer and creates mappings over it.
* Returns the virtual address of the buffer and sets dma_handle
* to the dma address (mapping) of the first page.
*/
void *iommu_alloc_coherent(struct device *dev, struct iommu_table *tbl,
size_t size, dma_addr_t *dma_handle,
unsigned long mask, gfp_t flag, int node)
{
void *ret = NULL;
dma_addr_t mapping;
unsigned int order;
unsigned int nio_pages, io_order;
struct page *page;
int tcesize = (1 << tbl->it_page_shift);
size = PAGE_ALIGN(size);
order = get_order(size);
/*
* Client asked for way too much space. This is checked later
* anyway. It is easier to debug here for the drivers than in
* the tce tables.
*/
if (order >= IOMAP_MAX_ORDER) {
dev_info(dev, "iommu_alloc_consistent size too large: 0x%lx\n",
size);
return NULL;
}
if (!tbl)
return NULL;
/* Alloc enough pages (and possibly more) */
page = alloc_pages_node(node, flag, order);
if (!page)
return NULL;
ret = page_address(page);
memset(ret, 0, size);
/* Set up tces to cover the allocated range */
nio_pages = IOMMU_PAGE_ALIGN(size, tbl) >> tbl->it_page_shift;
io_order = get_iommu_order(size, tbl);
mapping = iommu_alloc(dev, tbl, ret, nio_pages, DMA_BIDIRECTIONAL,
mask >> tbl->it_page_shift, io_order, 0);
if (mapping == DMA_MAPPING_ERROR) {
free_pages((unsigned long)ret, order);
return NULL;
}
*dma_handle = mapping | ((u64)ret & (tcesize - 1));
return ret;
}
void iommu_free_coherent(struct iommu_table *tbl, size_t size,
void *vaddr, dma_addr_t dma_handle)
{
if (tbl) {
unsigned int nio_pages;
size = PAGE_ALIGN(size);
nio_pages = IOMMU_PAGE_ALIGN(size, tbl) >> tbl->it_page_shift;
iommu_free(tbl, dma_handle, nio_pages);
size = PAGE_ALIGN(size);
free_pages((unsigned long)vaddr, get_order(size));
}
}
unsigned long iommu_direction_to_tce_perm(enum dma_data_direction dir)
{
switch (dir) {
case DMA_BIDIRECTIONAL:
return TCE_PCI_READ | TCE_PCI_WRITE;
case DMA_FROM_DEVICE:
return TCE_PCI_WRITE;
case DMA_TO_DEVICE:
return TCE_PCI_READ;
default:
return 0;
}
}
EXPORT_SYMBOL_GPL(iommu_direction_to_tce_perm);
#ifdef CONFIG_IOMMU_API
/*
* SPAPR TCE API
*/
static void group_release(void *iommu_data)
{
struct iommu_table_group *table_group = iommu_data;
table_group->group = NULL;
}
void iommu_register_group(struct iommu_table_group *table_group,
int pci_domain_number, unsigned long pe_num)
{
struct iommu_group *grp;
char *name;
grp = iommu_group_alloc();
if (IS_ERR(grp)) {
pr_warn("powerpc iommu api: cannot create new group, err=%ld\n",
PTR_ERR(grp));
return;
}
table_group->group = grp;
iommu_group_set_iommudata(grp, table_group, group_release);
name = kasprintf(GFP_KERNEL, "domain%d-pe%lx",
pci_domain_number, pe_num);
if (!name)
return;
iommu_group_set_name(grp, name);
kfree(name);
}
enum dma_data_direction iommu_tce_direction(unsigned long tce)
{
if ((tce & TCE_PCI_READ) && (tce & TCE_PCI_WRITE))
return DMA_BIDIRECTIONAL;
else if (tce & TCE_PCI_READ)
return DMA_TO_DEVICE;
else if (tce & TCE_PCI_WRITE)
return DMA_FROM_DEVICE;
else
return DMA_NONE;
}
EXPORT_SYMBOL_GPL(iommu_tce_direction);
void iommu_flush_tce(struct iommu_table *tbl)
{
/* Flush/invalidate TLB caches if necessary */
if (tbl->it_ops->flush)
tbl->it_ops->flush(tbl);
/* Make sure updates are seen by hardware */
mb();
}
EXPORT_SYMBOL_GPL(iommu_flush_tce);
int iommu_tce_check_ioba(unsigned long page_shift,
unsigned long offset, unsigned long size,
unsigned long ioba, unsigned long npages)
{
unsigned long mask = (1UL << page_shift) - 1;
if (ioba & mask)
return -EINVAL;
ioba >>= page_shift;
if (ioba < offset)
return -EINVAL;
if ((ioba + 1) > (offset + size))
return -EINVAL;
return 0;
}
EXPORT_SYMBOL_GPL(iommu_tce_check_ioba);
int iommu_tce_check_gpa(unsigned long page_shift, unsigned long gpa)
{
unsigned long mask = (1UL << page_shift) - 1;
if (gpa & mask)
return -EINVAL;
return 0;
}
EXPORT_SYMBOL_GPL(iommu_tce_check_gpa);
extern long iommu_tce_xchg_no_kill(struct mm_struct *mm,
struct iommu_table *tbl,
unsigned long entry, unsigned long *hpa,
enum dma_data_direction *direction)
{
long ret;
unsigned long size = 0;
ret = tbl->it_ops->xchg_no_kill(tbl, entry, hpa, direction);
if (!ret && ((*direction == DMA_FROM_DEVICE) ||
(*direction == DMA_BIDIRECTIONAL)) &&
!mm_iommu_is_devmem(mm, *hpa, tbl->it_page_shift,
&size))
SetPageDirty(pfn_to_page(*hpa >> PAGE_SHIFT));
return ret;
}
EXPORT_SYMBOL_GPL(iommu_tce_xchg_no_kill);
void iommu_tce_kill(struct iommu_table *tbl,
unsigned long entry, unsigned long pages)
{
if (tbl->it_ops->tce_kill)
tbl->it_ops->tce_kill(tbl, entry, pages);
}
EXPORT_SYMBOL_GPL(iommu_tce_kill);
#if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV)
static int iommu_take_ownership(struct iommu_table *tbl)
{
unsigned long flags, i, sz = (tbl->it_size + 7) >> 3;
int ret = 0;
/*
* VFIO does not control TCE entries allocation and the guest
* can write new TCEs on top of existing ones so iommu_tce_build()
* must be able to release old pages. This functionality
* requires exchange() callback defined so if it is not
* implemented, we disallow taking ownership over the table.
*/
if (!tbl->it_ops->xchg_no_kill)
return -EINVAL;
spin_lock_irqsave(&tbl->large_pool.lock, flags);
for (i = 0; i < tbl->nr_pools; i++)
spin_lock_nest_lock(&tbl->pools[i].lock, &tbl->large_pool.lock);
if (iommu_table_in_use(tbl)) {
pr_err("iommu_tce: it_map is not empty");
ret = -EBUSY;
} else {
memset(tbl->it_map, 0xff, sz);
}
for (i = 0; i < tbl->nr_pools; i++)
spin_unlock(&tbl->pools[i].lock);
spin_unlock_irqrestore(&tbl->large_pool.lock, flags);
return ret;
}
static void iommu_release_ownership(struct iommu_table *tbl)
{
unsigned long flags, i, sz = (tbl->it_size + 7) >> 3;
spin_lock_irqsave(&tbl->large_pool.lock, flags);
for (i = 0; i < tbl->nr_pools; i++)
spin_lock_nest_lock(&tbl->pools[i].lock, &tbl->large_pool.lock);
memset(tbl->it_map, 0, sz);
iommu_table_reserve_pages(tbl, tbl->it_reserved_start,
tbl->it_reserved_end);
for (i = 0; i < tbl->nr_pools; i++)
spin_unlock(&tbl->pools[i].lock);
spin_unlock_irqrestore(&tbl->large_pool.lock, flags);
}
#endif
int iommu_add_device(struct iommu_table_group *table_group, struct device *dev)
{
/*
* The sysfs entries should be populated before
* binding IOMMU group. If sysfs entries isn't
* ready, we simply bail.
*/
if (!device_is_registered(dev))
return -ENOENT;
if (device_iommu_mapped(dev)) {
pr_debug("%s: Skipping device %s with iommu group %d\n",
__func__, dev_name(dev),
iommu_group_id(dev->iommu_group));
return -EBUSY;
}
pr_debug("%s: Adding %s to iommu group %d\n",
__func__, dev_name(dev), iommu_group_id(table_group->group));
/*
* This is still not adding devices via the IOMMU bus notifier because
* of pcibios_init() from arch/powerpc/kernel/pci_64.c which calls
* pcibios_scan_phb() first (and this guy adds devices and triggers
* the notifier) and only then it calls pci_bus_add_devices() which
* configures DMA for buses which also creates PEs and IOMMU groups.
*/
return iommu_probe_device(dev);
}
EXPORT_SYMBOL_GPL(iommu_add_device);
#if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV)
/*
* A simple iommu_table_group_ops which only allows reusing the existing
* iommu_table. This handles VFIO for POWER7 or the nested KVM.
* The ops does not allow creating windows and only allows reusing the existing
* one if it matches table_group->tce32_start/tce32_size/page_shift.
*/
static unsigned long spapr_tce_get_table_size(__u32 page_shift,
__u64 window_size, __u32 levels)
{
unsigned long size;
if (levels > 1)
return ~0U;
size = window_size >> (page_shift - 3);
return size;
}
static long spapr_tce_create_table(struct iommu_table_group *table_group, int num,
__u32 page_shift, __u64 window_size, __u32 levels,
struct iommu_table **ptbl)
{
struct iommu_table *tbl = table_group->tables[0];
if (num > 0)
return -EPERM;
if (tbl->it_page_shift != page_shift ||
tbl->it_size != (window_size >> page_shift) ||
tbl->it_indirect_levels != levels - 1)
return -EINVAL;
*ptbl = iommu_tce_table_get(tbl);
return 0;
}
static long spapr_tce_set_window(struct iommu_table_group *table_group,
int num, struct iommu_table *tbl)
{
return tbl == table_group->tables[num] ? 0 : -EPERM;
}
static long spapr_tce_unset_window(struct iommu_table_group *table_group, int num)
{
return 0;
}
static long spapr_tce_take_ownership(struct iommu_table_group *table_group)
{
int i, j, rc = 0;
for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
struct iommu_table *tbl = table_group->tables[i];
if (!tbl || !tbl->it_map)
continue;
rc = iommu_take_ownership(tbl);
if (!rc)
continue;
for (j = 0; j < i; ++j)
iommu_release_ownership(table_group->tables[j]);
return rc;
}
return 0;
}
static void spapr_tce_release_ownership(struct iommu_table_group *table_group)
{
int i;
for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
struct iommu_table *tbl = table_group->tables[i];
if (!tbl)
continue;
iommu_table_clear(tbl);
if (tbl->it_map)
iommu_release_ownership(tbl);
}
}
struct iommu_table_group_ops spapr_tce_table_group_ops = {
.get_table_size = spapr_tce_get_table_size,
.create_table = spapr_tce_create_table,
.set_window = spapr_tce_set_window,
.unset_window = spapr_tce_unset_window,
.take_ownership = spapr_tce_take_ownership,
.release_ownership = spapr_tce_release_ownership,
};
/*
* A simple iommu_ops to allow less cruft in generic VFIO code.
*/
static int spapr_tce_blocking_iommu_attach_dev(struct iommu_domain *dom,
struct device *dev)
{
struct iommu_group *grp = iommu_group_get(dev);
struct iommu_table_group *table_group;
int ret = -EINVAL;
if (!grp)
return -ENODEV;
table_group = iommu_group_get_iommudata(grp);
ret = table_group->ops->take_ownership(table_group);
iommu_group_put(grp);
return ret;
}
static void spapr_tce_blocking_iommu_set_platform_dma(struct device *dev)
{
struct iommu_group *grp = iommu_group_get(dev);
struct iommu_table_group *table_group;
table_group = iommu_group_get_iommudata(grp);
table_group->ops->release_ownership(table_group);
}
static const struct iommu_domain_ops spapr_tce_blocking_domain_ops = {
.attach_dev = spapr_tce_blocking_iommu_attach_dev,
};
static bool spapr_tce_iommu_capable(struct device *dev, enum iommu_cap cap)
{
switch (cap) {
case IOMMU_CAP_CACHE_COHERENCY:
return true;
default:
break;
}
return false;
}
static struct iommu_domain *spapr_tce_iommu_domain_alloc(unsigned int type)
{
struct iommu_domain *dom;
if (type != IOMMU_DOMAIN_BLOCKED)
return NULL;
dom = kzalloc(sizeof(*dom), GFP_KERNEL);
if (!dom)
return NULL;
dom->ops = &spapr_tce_blocking_domain_ops;
return dom;
}
static struct iommu_device *spapr_tce_iommu_probe_device(struct device *dev)
{
struct pci_dev *pdev;
struct pci_controller *hose;
if (!dev_is_pci(dev))
return ERR_PTR(-EPERM);
pdev = to_pci_dev(dev);
hose = pdev->bus->sysdata;
return &hose->iommu;
}
static void spapr_tce_iommu_release_device(struct device *dev)
{
}
static struct iommu_group *spapr_tce_iommu_device_group(struct device *dev)
{
struct pci_controller *hose;
struct pci_dev *pdev;
pdev = to_pci_dev(dev);
hose = pdev->bus->sysdata;
if (!hose->controller_ops.device_group)
return ERR_PTR(-ENOENT);
return hose->controller_ops.device_group(hose, pdev);
}
static const struct iommu_ops spapr_tce_iommu_ops = {
.capable = spapr_tce_iommu_capable,
.domain_alloc = spapr_tce_iommu_domain_alloc,
.probe_device = spapr_tce_iommu_probe_device,
.release_device = spapr_tce_iommu_release_device,
.device_group = spapr_tce_iommu_device_group,
.set_platform_dma_ops = spapr_tce_blocking_iommu_set_platform_dma,
};
static struct attribute *spapr_tce_iommu_attrs[] = {
NULL,
};
static struct attribute_group spapr_tce_iommu_group = {
.name = "spapr-tce-iommu",
.attrs = spapr_tce_iommu_attrs,
};
static const struct attribute_group *spapr_tce_iommu_groups[] = {
&spapr_tce_iommu_group,
NULL,
};
/*
* This registers IOMMU devices of PHBs. This needs to happen
* after core_initcall(iommu_init) + postcore_initcall(pci_driver_init) and
* before subsys_initcall(iommu_subsys_init).
*/
static int __init spapr_tce_setup_phb_iommus_initcall(void)
{
struct pci_controller *hose;
list_for_each_entry(hose, &hose_list, list_node) {
iommu_device_sysfs_add(&hose->iommu, hose->parent,
spapr_tce_iommu_groups, "iommu-phb%04x",
hose->global_number);
iommu_device_register(&hose->iommu, &spapr_tce_iommu_ops,
hose->parent);
}
return 0;
}
postcore_initcall_sync(spapr_tce_setup_phb_iommus_initcall);
#endif
#endif /* CONFIG_IOMMU_API */
| linux-master | arch/powerpc/kernel/iommu.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
*
* Procedures for interfacing to the RTAS on CHRP machines.
*
* Peter Bergner, IBM March 2001.
* Copyright (C) 2001 IBM.
*/
#define pr_fmt(fmt) "rtas: " fmt
#include <linux/bsearch.h>
#include <linux/capability.h>
#include <linux/delay.h>
#include <linux/export.h>
#include <linux/init.h>
#include <linux/kconfig.h>
#include <linux/kernel.h>
#include <linux/lockdep.h>
#include <linux/memblock.h>
#include <linux/of.h>
#include <linux/of_fdt.h>
#include <linux/reboot.h>
#include <linux/sched.h>
#include <linux/security.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/stdarg.h>
#include <linux/syscalls.h>
#include <linux/types.h>
#include <linux/uaccess.h>
#include <linux/xarray.h>
#include <asm/delay.h>
#include <asm/firmware.h>
#include <asm/interrupt.h>
#include <asm/machdep.h>
#include <asm/mmu.h>
#include <asm/page.h>
#include <asm/rtas-work-area.h>
#include <asm/rtas.h>
#include <asm/time.h>
#include <asm/trace.h>
#include <asm/udbg.h>
struct rtas_filter {
/* Indexes into the args buffer, -1 if not used */
const int buf_idx1;
const int size_idx1;
const int buf_idx2;
const int size_idx2;
/*
* Assumed buffer size per the spec if the function does not
* have a size parameter, e.g. ibm,errinjct. 0 if unused.
*/
const int fixed_size;
};
/**
* struct rtas_function - Descriptor for RTAS functions.
*
* @token: Value of @name if it exists under the /rtas node.
* @name: Function name.
* @filter: If non-NULL, invoking this function via the rtas syscall is
* generally allowed, and @filter describes constraints on the
* arguments. See also @banned_for_syscall_on_le.
* @banned_for_syscall_on_le: Set when call via sys_rtas is generally allowed
* but specifically restricted on ppc64le. Such
* functions are believed to have no users on
* ppc64le, and we want to keep it that way. It does
* not make sense for this to be set when @filter
* is NULL.
*/
struct rtas_function {
s32 token;
const bool banned_for_syscall_on_le:1;
const char * const name;
const struct rtas_filter *filter;
};
static struct rtas_function rtas_function_table[] __ro_after_init = {
[RTAS_FNIDX__CHECK_EXCEPTION] = {
.name = "check-exception",
},
[RTAS_FNIDX__DISPLAY_CHARACTER] = {
.name = "display-character",
.filter = &(const struct rtas_filter) {
.buf_idx1 = -1, .size_idx1 = -1,
.buf_idx2 = -1, .size_idx2 = -1,
},
},
[RTAS_FNIDX__EVENT_SCAN] = {
.name = "event-scan",
},
[RTAS_FNIDX__FREEZE_TIME_BASE] = {
.name = "freeze-time-base",
},
[RTAS_FNIDX__GET_POWER_LEVEL] = {
.name = "get-power-level",
.filter = &(const struct rtas_filter) {
.buf_idx1 = -1, .size_idx1 = -1,
.buf_idx2 = -1, .size_idx2 = -1,
},
},
[RTAS_FNIDX__GET_SENSOR_STATE] = {
.name = "get-sensor-state",
.filter = &(const struct rtas_filter) {
.buf_idx1 = -1, .size_idx1 = -1,
.buf_idx2 = -1, .size_idx2 = -1,
},
},
[RTAS_FNIDX__GET_TERM_CHAR] = {
.name = "get-term-char",
},
[RTAS_FNIDX__GET_TIME_OF_DAY] = {
.name = "get-time-of-day",
.filter = &(const struct rtas_filter) {
.buf_idx1 = -1, .size_idx1 = -1,
.buf_idx2 = -1, .size_idx2 = -1,
},
},
[RTAS_FNIDX__IBM_ACTIVATE_FIRMWARE] = {
.name = "ibm,activate-firmware",
.filter = &(const struct rtas_filter) {
.buf_idx1 = -1, .size_idx1 = -1,
.buf_idx2 = -1, .size_idx2 = -1,
},
},
[RTAS_FNIDX__IBM_CBE_START_PTCAL] = {
.name = "ibm,cbe-start-ptcal",
},
[RTAS_FNIDX__IBM_CBE_STOP_PTCAL] = {
.name = "ibm,cbe-stop-ptcal",
},
[RTAS_FNIDX__IBM_CHANGE_MSI] = {
.name = "ibm,change-msi",
},
[RTAS_FNIDX__IBM_CLOSE_ERRINJCT] = {
.name = "ibm,close-errinjct",
.filter = &(const struct rtas_filter) {
.buf_idx1 = -1, .size_idx1 = -1,
.buf_idx2 = -1, .size_idx2 = -1,
},
},
[RTAS_FNIDX__IBM_CONFIGURE_BRIDGE] = {
.name = "ibm,configure-bridge",
},
[RTAS_FNIDX__IBM_CONFIGURE_CONNECTOR] = {
.name = "ibm,configure-connector",
.filter = &(const struct rtas_filter) {
.buf_idx1 = 0, .size_idx1 = -1,
.buf_idx2 = 1, .size_idx2 = -1,
.fixed_size = 4096,
},
},
[RTAS_FNIDX__IBM_CONFIGURE_KERNEL_DUMP] = {
.name = "ibm,configure-kernel-dump",
},
[RTAS_FNIDX__IBM_CONFIGURE_PE] = {
.name = "ibm,configure-pe",
},
[RTAS_FNIDX__IBM_CREATE_PE_DMA_WINDOW] = {
.name = "ibm,create-pe-dma-window",
},
[RTAS_FNIDX__IBM_DISPLAY_MESSAGE] = {
.name = "ibm,display-message",
.filter = &(const struct rtas_filter) {
.buf_idx1 = 0, .size_idx1 = -1,
.buf_idx2 = -1, .size_idx2 = -1,
},
},
[RTAS_FNIDX__IBM_ERRINJCT] = {
.name = "ibm,errinjct",
.filter = &(const struct rtas_filter) {
.buf_idx1 = 2, .size_idx1 = -1,
.buf_idx2 = -1, .size_idx2 = -1,
.fixed_size = 1024,
},
},
[RTAS_FNIDX__IBM_EXTI2C] = {
.name = "ibm,exti2c",
},
[RTAS_FNIDX__IBM_GET_CONFIG_ADDR_INFO] = {
.name = "ibm,get-config-addr-info",
},
[RTAS_FNIDX__IBM_GET_CONFIG_ADDR_INFO2] = {
.name = "ibm,get-config-addr-info2",
.filter = &(const struct rtas_filter) {
.buf_idx1 = -1, .size_idx1 = -1,
.buf_idx2 = -1, .size_idx2 = -1,
},
},
[RTAS_FNIDX__IBM_GET_DYNAMIC_SENSOR_STATE] = {
.name = "ibm,get-dynamic-sensor-state",
.filter = &(const struct rtas_filter) {
.buf_idx1 = 1, .size_idx1 = -1,
.buf_idx2 = -1, .size_idx2 = -1,
},
},
[RTAS_FNIDX__IBM_GET_INDICES] = {
.name = "ibm,get-indices",
.filter = &(const struct rtas_filter) {
.buf_idx1 = 2, .size_idx1 = 3,
.buf_idx2 = -1, .size_idx2 = -1,
},
},
[RTAS_FNIDX__IBM_GET_RIO_TOPOLOGY] = {
.name = "ibm,get-rio-topology",
},
[RTAS_FNIDX__IBM_GET_SYSTEM_PARAMETER] = {
.name = "ibm,get-system-parameter",
.filter = &(const struct rtas_filter) {
.buf_idx1 = 1, .size_idx1 = 2,
.buf_idx2 = -1, .size_idx2 = -1,
},
},
[RTAS_FNIDX__IBM_GET_VPD] = {
.name = "ibm,get-vpd",
.filter = &(const struct rtas_filter) {
.buf_idx1 = 0, .size_idx1 = -1,
.buf_idx2 = 1, .size_idx2 = 2,
},
},
[RTAS_FNIDX__IBM_GET_XIVE] = {
.name = "ibm,get-xive",
},
[RTAS_FNIDX__IBM_INT_OFF] = {
.name = "ibm,int-off",
},
[RTAS_FNIDX__IBM_INT_ON] = {
.name = "ibm,int-on",
},
[RTAS_FNIDX__IBM_IO_QUIESCE_ACK] = {
.name = "ibm,io-quiesce-ack",
},
[RTAS_FNIDX__IBM_LPAR_PERFTOOLS] = {
.name = "ibm,lpar-perftools",
.filter = &(const struct rtas_filter) {
.buf_idx1 = 2, .size_idx1 = 3,
.buf_idx2 = -1, .size_idx2 = -1,
},
},
[RTAS_FNIDX__IBM_MANAGE_FLASH_IMAGE] = {
.name = "ibm,manage-flash-image",
},
[RTAS_FNIDX__IBM_MANAGE_STORAGE_PRESERVATION] = {
.name = "ibm,manage-storage-preservation",
},
[RTAS_FNIDX__IBM_NMI_INTERLOCK] = {
.name = "ibm,nmi-interlock",
},
[RTAS_FNIDX__IBM_NMI_REGISTER] = {
.name = "ibm,nmi-register",
},
[RTAS_FNIDX__IBM_OPEN_ERRINJCT] = {
.name = "ibm,open-errinjct",
.filter = &(const struct rtas_filter) {
.buf_idx1 = -1, .size_idx1 = -1,
.buf_idx2 = -1, .size_idx2 = -1,
},
},
[RTAS_FNIDX__IBM_OPEN_SRIOV_ALLOW_UNFREEZE] = {
.name = "ibm,open-sriov-allow-unfreeze",
},
[RTAS_FNIDX__IBM_OPEN_SRIOV_MAP_PE_NUMBER] = {
.name = "ibm,open-sriov-map-pe-number",
},
[RTAS_FNIDX__IBM_OS_TERM] = {
.name = "ibm,os-term",
},
[RTAS_FNIDX__IBM_PARTNER_CONTROL] = {
.name = "ibm,partner-control",
},
[RTAS_FNIDX__IBM_PHYSICAL_ATTESTATION] = {
.name = "ibm,physical-attestation",
.filter = &(const struct rtas_filter) {
.buf_idx1 = 0, .size_idx1 = 1,
.buf_idx2 = -1, .size_idx2 = -1,
},
},
[RTAS_FNIDX__IBM_PLATFORM_DUMP] = {
.name = "ibm,platform-dump",
.filter = &(const struct rtas_filter) {
.buf_idx1 = 4, .size_idx1 = 5,
.buf_idx2 = -1, .size_idx2 = -1,
},
},
[RTAS_FNIDX__IBM_POWER_OFF_UPS] = {
.name = "ibm,power-off-ups",
},
[RTAS_FNIDX__IBM_QUERY_INTERRUPT_SOURCE_NUMBER] = {
.name = "ibm,query-interrupt-source-number",
},
[RTAS_FNIDX__IBM_QUERY_PE_DMA_WINDOW] = {
.name = "ibm,query-pe-dma-window",
},
[RTAS_FNIDX__IBM_READ_PCI_CONFIG] = {
.name = "ibm,read-pci-config",
},
[RTAS_FNIDX__IBM_READ_SLOT_RESET_STATE] = {
.name = "ibm,read-slot-reset-state",
.filter = &(const struct rtas_filter) {
.buf_idx1 = -1, .size_idx1 = -1,
.buf_idx2 = -1, .size_idx2 = -1,
},
},
[RTAS_FNIDX__IBM_READ_SLOT_RESET_STATE2] = {
.name = "ibm,read-slot-reset-state2",
},
[RTAS_FNIDX__IBM_REMOVE_PE_DMA_WINDOW] = {
.name = "ibm,remove-pe-dma-window",
},
[RTAS_FNIDX__IBM_RESET_PE_DMA_WINDOWS] = {
.name = "ibm,reset-pe-dma-windows",
},
[RTAS_FNIDX__IBM_SCAN_LOG_DUMP] = {
.name = "ibm,scan-log-dump",
.filter = &(const struct rtas_filter) {
.buf_idx1 = 0, .size_idx1 = 1,
.buf_idx2 = -1, .size_idx2 = -1,
},
},
[RTAS_FNIDX__IBM_SET_DYNAMIC_INDICATOR] = {
.name = "ibm,set-dynamic-indicator",
.filter = &(const struct rtas_filter) {
.buf_idx1 = 2, .size_idx1 = -1,
.buf_idx2 = -1, .size_idx2 = -1,
},
},
[RTAS_FNIDX__IBM_SET_EEH_OPTION] = {
.name = "ibm,set-eeh-option",
.filter = &(const struct rtas_filter) {
.buf_idx1 = -1, .size_idx1 = -1,
.buf_idx2 = -1, .size_idx2 = -1,
},
},
[RTAS_FNIDX__IBM_SET_SLOT_RESET] = {
.name = "ibm,set-slot-reset",
},
[RTAS_FNIDX__IBM_SET_SYSTEM_PARAMETER] = {
.name = "ibm,set-system-parameter",
.filter = &(const struct rtas_filter) {
.buf_idx1 = 1, .size_idx1 = -1,
.buf_idx2 = -1, .size_idx2 = -1,
},
},
[RTAS_FNIDX__IBM_SET_XIVE] = {
.name = "ibm,set-xive",
},
[RTAS_FNIDX__IBM_SLOT_ERROR_DETAIL] = {
.name = "ibm,slot-error-detail",
},
[RTAS_FNIDX__IBM_SUSPEND_ME] = {
.name = "ibm,suspend-me",
.banned_for_syscall_on_le = true,
.filter = &(const struct rtas_filter) {
.buf_idx1 = -1, .size_idx1 = -1,
.buf_idx2 = -1, .size_idx2 = -1,
},
},
[RTAS_FNIDX__IBM_TUNE_DMA_PARMS] = {
.name = "ibm,tune-dma-parms",
},
[RTAS_FNIDX__IBM_UPDATE_FLASH_64_AND_REBOOT] = {
.name = "ibm,update-flash-64-and-reboot",
},
[RTAS_FNIDX__IBM_UPDATE_NODES] = {
.name = "ibm,update-nodes",
.banned_for_syscall_on_le = true,
.filter = &(const struct rtas_filter) {
.buf_idx1 = 0, .size_idx1 = -1,
.buf_idx2 = -1, .size_idx2 = -1,
.fixed_size = 4096,
},
},
[RTAS_FNIDX__IBM_UPDATE_PROPERTIES] = {
.name = "ibm,update-properties",
.banned_for_syscall_on_le = true,
.filter = &(const struct rtas_filter) {
.buf_idx1 = 0, .size_idx1 = -1,
.buf_idx2 = -1, .size_idx2 = -1,
.fixed_size = 4096,
},
},
[RTAS_FNIDX__IBM_VALIDATE_FLASH_IMAGE] = {
.name = "ibm,validate-flash-image",
},
[RTAS_FNIDX__IBM_WRITE_PCI_CONFIG] = {
.name = "ibm,write-pci-config",
},
[RTAS_FNIDX__NVRAM_FETCH] = {
.name = "nvram-fetch",
},
[RTAS_FNIDX__NVRAM_STORE] = {
.name = "nvram-store",
},
[RTAS_FNIDX__POWER_OFF] = {
.name = "power-off",
},
[RTAS_FNIDX__PUT_TERM_CHAR] = {
.name = "put-term-char",
},
[RTAS_FNIDX__QUERY_CPU_STOPPED_STATE] = {
.name = "query-cpu-stopped-state",
},
[RTAS_FNIDX__READ_PCI_CONFIG] = {
.name = "read-pci-config",
},
[RTAS_FNIDX__RTAS_LAST_ERROR] = {
.name = "rtas-last-error",
},
[RTAS_FNIDX__SET_INDICATOR] = {
.name = "set-indicator",
.filter = &(const struct rtas_filter) {
.buf_idx1 = -1, .size_idx1 = -1,
.buf_idx2 = -1, .size_idx2 = -1,
},
},
[RTAS_FNIDX__SET_POWER_LEVEL] = {
.name = "set-power-level",
.filter = &(const struct rtas_filter) {
.buf_idx1 = -1, .size_idx1 = -1,
.buf_idx2 = -1, .size_idx2 = -1,
},
},
[RTAS_FNIDX__SET_TIME_FOR_POWER_ON] = {
.name = "set-time-for-power-on",
.filter = &(const struct rtas_filter) {
.buf_idx1 = -1, .size_idx1 = -1,
.buf_idx2 = -1, .size_idx2 = -1,
},
},
[RTAS_FNIDX__SET_TIME_OF_DAY] = {
.name = "set-time-of-day",
.filter = &(const struct rtas_filter) {
.buf_idx1 = -1, .size_idx1 = -1,
.buf_idx2 = -1, .size_idx2 = -1,
},
},
[RTAS_FNIDX__START_CPU] = {
.name = "start-cpu",
},
[RTAS_FNIDX__STOP_SELF] = {
.name = "stop-self",
},
[RTAS_FNIDX__SYSTEM_REBOOT] = {
.name = "system-reboot",
},
[RTAS_FNIDX__THAW_TIME_BASE] = {
.name = "thaw-time-base",
},
[RTAS_FNIDX__WRITE_PCI_CONFIG] = {
.name = "write-pci-config",
},
};
/*
* Nearly all RTAS calls need to be serialized. All uses of the
* default rtas_args block must hold rtas_lock.
*
* Exceptions to the RTAS serialization requirement (e.g. stop-self)
* must use a separate rtas_args structure.
*/
static DEFINE_RAW_SPINLOCK(rtas_lock);
static struct rtas_args rtas_args;
/**
* rtas_function_token() - RTAS function token lookup.
* @handle: Function handle, e.g. RTAS_FN_EVENT_SCAN.
*
* Context: Any context.
* Return: the token value for the function if implemented by this platform,
* otherwise RTAS_UNKNOWN_SERVICE.
*/
s32 rtas_function_token(const rtas_fn_handle_t handle)
{
const size_t index = handle.index;
const bool out_of_bounds = index >= ARRAY_SIZE(rtas_function_table);
if (WARN_ONCE(out_of_bounds, "invalid function index %zu", index))
return RTAS_UNKNOWN_SERVICE;
/*
* Various drivers attempt token lookups on non-RTAS
* platforms.
*/
if (!rtas.dev)
return RTAS_UNKNOWN_SERVICE;
return rtas_function_table[index].token;
}
EXPORT_SYMBOL_GPL(rtas_function_token);
static int rtas_function_cmp(const void *a, const void *b)
{
const struct rtas_function *f1 = a;
const struct rtas_function *f2 = b;
return strcmp(f1->name, f2->name);
}
/*
* Boot-time initialization of the function table needs the lookup to
* return a non-const-qualified object. Use rtas_name_to_function()
* in all other contexts.
*/
static struct rtas_function *__rtas_name_to_function(const char *name)
{
const struct rtas_function key = {
.name = name,
};
struct rtas_function *found;
found = bsearch(&key, rtas_function_table, ARRAY_SIZE(rtas_function_table),
sizeof(rtas_function_table[0]), rtas_function_cmp);
return found;
}
static const struct rtas_function *rtas_name_to_function(const char *name)
{
return __rtas_name_to_function(name);
}
static DEFINE_XARRAY(rtas_token_to_function_xarray);
static int __init rtas_token_to_function_xarray_init(void)
{
int err = 0;
for (size_t i = 0; i < ARRAY_SIZE(rtas_function_table); ++i) {
const struct rtas_function *func = &rtas_function_table[i];
const s32 token = func->token;
if (token == RTAS_UNKNOWN_SERVICE)
continue;
err = xa_err(xa_store(&rtas_token_to_function_xarray,
token, (void *)func, GFP_KERNEL));
if (err)
break;
}
return err;
}
arch_initcall(rtas_token_to_function_xarray_init);
static const struct rtas_function *rtas_token_to_function(s32 token)
{
const struct rtas_function *func;
if (WARN_ONCE(token < 0, "invalid token %d", token))
return NULL;
func = xa_load(&rtas_token_to_function_xarray, token);
if (WARN_ONCE(!func, "unexpected failed lookup for token %d", token))
return NULL;
return func;
}
/* This is here deliberately so it's only used in this file */
void enter_rtas(unsigned long);
static void __do_enter_rtas(struct rtas_args *args)
{
enter_rtas(__pa(args));
srr_regs_clobbered(); /* rtas uses SRRs, invalidate */
}
static void __do_enter_rtas_trace(struct rtas_args *args)
{
const char *name = NULL;
if (args == &rtas_args)
lockdep_assert_held(&rtas_lock);
/*
* If the tracepoints that consume the function name aren't
* active, avoid the lookup.
*/
if ((trace_rtas_input_enabled() || trace_rtas_output_enabled())) {
const s32 token = be32_to_cpu(args->token);
const struct rtas_function *func = rtas_token_to_function(token);
name = func->name;
}
trace_rtas_input(args, name);
trace_rtas_ll_entry(args);
__do_enter_rtas(args);
trace_rtas_ll_exit(args);
trace_rtas_output(args, name);
}
static void do_enter_rtas(struct rtas_args *args)
{
const unsigned long msr = mfmsr();
/*
* Situations where we want to skip any active tracepoints for
* safety reasons:
*
* 1. The last code executed on an offline CPU as it stops,
* i.e. we're about to call stop-self. The tracepoints'
* function name lookup uses xarray, which uses RCU, which
* isn't valid to call on an offline CPU. Any events
* emitted on an offline CPU will be discarded anyway.
*
* 2. In real mode, as when invoking ibm,nmi-interlock from
* the pseries MCE handler. We cannot count on trace
* buffers or the entries in rtas_token_to_function_xarray
* to be contained in the RMO.
*/
const unsigned long mask = MSR_IR | MSR_DR;
const bool can_trace = likely(cpu_online(raw_smp_processor_id()) &&
(msr & mask) == mask);
/*
* Make sure MSR[RI] is currently enabled as it will be forced later
* in enter_rtas.
*/
BUG_ON(!(msr & MSR_RI));
BUG_ON(!irqs_disabled());
hard_irq_disable(); /* Ensure MSR[EE] is disabled on PPC64 */
if (can_trace)
__do_enter_rtas_trace(args);
else
__do_enter_rtas(args);
}
struct rtas_t rtas;
DEFINE_SPINLOCK(rtas_data_buf_lock);
EXPORT_SYMBOL_GPL(rtas_data_buf_lock);
char rtas_data_buf[RTAS_DATA_BUF_SIZE] __aligned(SZ_4K);
EXPORT_SYMBOL_GPL(rtas_data_buf);
unsigned long rtas_rmo_buf;
/*
* If non-NULL, this gets called when the kernel terminates.
* This is done like this so rtas_flash can be a module.
*/
void (*rtas_flash_term_hook)(int);
EXPORT_SYMBOL_GPL(rtas_flash_term_hook);
/*
* call_rtas_display_status and call_rtas_display_status_delay
* are designed only for very early low-level debugging, which
* is why the token is hard-coded to 10.
*/
static void call_rtas_display_status(unsigned char c)
{
unsigned long flags;
if (!rtas.base)
return;
raw_spin_lock_irqsave(&rtas_lock, flags);
rtas_call_unlocked(&rtas_args, 10, 1, 1, NULL, c);
raw_spin_unlock_irqrestore(&rtas_lock, flags);
}
static void call_rtas_display_status_delay(char c)
{
static int pending_newline = 0; /* did last write end with unprinted newline? */
static int width = 16;
if (c == '\n') {
while (width-- > 0)
call_rtas_display_status(' ');
width = 16;
mdelay(500);
pending_newline = 1;
} else {
if (pending_newline) {
call_rtas_display_status('\r');
call_rtas_display_status('\n');
}
pending_newline = 0;
if (width--) {
call_rtas_display_status(c);
udelay(10000);
}
}
}
void __init udbg_init_rtas_panel(void)
{
udbg_putc = call_rtas_display_status_delay;
}
#ifdef CONFIG_UDBG_RTAS_CONSOLE
/* If you think you're dying before early_init_dt_scan_rtas() does its
* work, you can hard code the token values for your firmware here and
* hardcode rtas.base/entry etc.
*/
static unsigned int rtas_putchar_token = RTAS_UNKNOWN_SERVICE;
static unsigned int rtas_getchar_token = RTAS_UNKNOWN_SERVICE;
static void udbg_rtascon_putc(char c)
{
int tries;
if (!rtas.base)
return;
/* Add CRs before LFs */
if (c == '\n')
udbg_rtascon_putc('\r');
/* if there is more than one character to be displayed, wait a bit */
for (tries = 0; tries < 16; tries++) {
if (rtas_call(rtas_putchar_token, 1, 1, NULL, c) == 0)
break;
udelay(1000);
}
}
static int udbg_rtascon_getc_poll(void)
{
int c;
if (!rtas.base)
return -1;
if (rtas_call(rtas_getchar_token, 0, 2, &c))
return -1;
return c;
}
static int udbg_rtascon_getc(void)
{
int c;
while ((c = udbg_rtascon_getc_poll()) == -1)
;
return c;
}
void __init udbg_init_rtas_console(void)
{
udbg_putc = udbg_rtascon_putc;
udbg_getc = udbg_rtascon_getc;
udbg_getc_poll = udbg_rtascon_getc_poll;
}
#endif /* CONFIG_UDBG_RTAS_CONSOLE */
void rtas_progress(char *s, unsigned short hex)
{
struct device_node *root;
int width;
const __be32 *p;
char *os;
static int display_character, set_indicator;
static int display_width, display_lines, form_feed;
static const int *row_width;
static DEFINE_SPINLOCK(progress_lock);
static int current_line;
static int pending_newline = 0; /* did last write end with unprinted newline? */
if (!rtas.base)
return;
if (display_width == 0) {
display_width = 0x10;
if ((root = of_find_node_by_path("/rtas"))) {
if ((p = of_get_property(root,
"ibm,display-line-length", NULL)))
display_width = be32_to_cpu(*p);
if ((p = of_get_property(root,
"ibm,form-feed", NULL)))
form_feed = be32_to_cpu(*p);
if ((p = of_get_property(root,
"ibm,display-number-of-lines", NULL)))
display_lines = be32_to_cpu(*p);
row_width = of_get_property(root,
"ibm,display-truncation-length", NULL);
of_node_put(root);
}
display_character = rtas_function_token(RTAS_FN_DISPLAY_CHARACTER);
set_indicator = rtas_function_token(RTAS_FN_SET_INDICATOR);
}
if (display_character == RTAS_UNKNOWN_SERVICE) {
/* use hex display if available */
if (set_indicator != RTAS_UNKNOWN_SERVICE)
rtas_call(set_indicator, 3, 1, NULL, 6, 0, hex);
return;
}
spin_lock(&progress_lock);
/*
* Last write ended with newline, but we didn't print it since
* it would just clear the bottom line of output. Print it now
* instead.
*
* If no newline is pending and form feed is supported, clear the
* display with a form feed; otherwise, print a CR to start output
* at the beginning of the line.
*/
if (pending_newline) {
rtas_call(display_character, 1, 1, NULL, '\r');
rtas_call(display_character, 1, 1, NULL, '\n');
pending_newline = 0;
} else {
current_line = 0;
if (form_feed)
rtas_call(display_character, 1, 1, NULL,
(char)form_feed);
else
rtas_call(display_character, 1, 1, NULL, '\r');
}
if (row_width)
width = row_width[current_line];
else
width = display_width;
os = s;
while (*os) {
if (*os == '\n' || *os == '\r') {
/* If newline is the last character, save it
* until next call to avoid bumping up the
* display output.
*/
if (*os == '\n' && !os[1]) {
pending_newline = 1;
current_line++;
if (current_line > display_lines-1)
current_line = display_lines-1;
spin_unlock(&progress_lock);
return;
}
/* RTAS wants CR-LF, not just LF */
if (*os == '\n') {
rtas_call(display_character, 1, 1, NULL, '\r');
rtas_call(display_character, 1, 1, NULL, '\n');
} else {
/* CR might be used to re-draw a line, so we'll
* leave it alone and not add LF.
*/
rtas_call(display_character, 1, 1, NULL, *os);
}
if (row_width)
width = row_width[current_line];
else
width = display_width;
} else {
width--;
rtas_call(display_character, 1, 1, NULL, *os);
}
os++;
/* if we overwrite the screen length */
if (width <= 0)
while ((*os != 0) && (*os != '\n') && (*os != '\r'))
os++;
}
spin_unlock(&progress_lock);
}
EXPORT_SYMBOL_GPL(rtas_progress); /* needed by rtas_flash module */
int rtas_token(const char *service)
{
const struct rtas_function *func;
const __be32 *tokp;
if (rtas.dev == NULL)
return RTAS_UNKNOWN_SERVICE;
func = rtas_name_to_function(service);
if (func)
return func->token;
/*
* The caller is looking up a name that is not known to be an
* RTAS function. Either it's a function that needs to be
* added to the table, or they're misusing rtas_token() to
* access non-function properties of the /rtas node. Warn and
* fall back to the legacy behavior.
*/
WARN_ONCE(1, "unknown function `%s`, should it be added to rtas_function_table?\n",
service);
tokp = of_get_property(rtas.dev, service, NULL);
return tokp ? be32_to_cpu(*tokp) : RTAS_UNKNOWN_SERVICE;
}
EXPORT_SYMBOL_GPL(rtas_token);
int rtas_service_present(const char *service)
{
return rtas_token(service) != RTAS_UNKNOWN_SERVICE;
}
#ifdef CONFIG_RTAS_ERROR_LOGGING
static u32 rtas_error_log_max __ro_after_init = RTAS_ERROR_LOG_MAX;
/*
* Return the firmware-specified size of the error log buffer
* for all rtas calls that require an error buffer argument.
* This includes 'check-exception' and 'rtas-last-error'.
*/
int rtas_get_error_log_max(void)
{
return rtas_error_log_max;
}
static void __init init_error_log_max(void)
{
static const char propname[] __initconst = "rtas-error-log-max";
u32 max;
if (of_property_read_u32(rtas.dev, propname, &max)) {
pr_warn("%s not found, using default of %u\n",
propname, RTAS_ERROR_LOG_MAX);
max = RTAS_ERROR_LOG_MAX;
}
if (max > RTAS_ERROR_LOG_MAX) {
pr_warn("%s = %u, clamping max error log size to %u\n",
propname, max, RTAS_ERROR_LOG_MAX);
max = RTAS_ERROR_LOG_MAX;
}
rtas_error_log_max = max;
}
static char rtas_err_buf[RTAS_ERROR_LOG_MAX];
/** Return a copy of the detailed error text associated with the
* most recent failed call to rtas. Because the error text
* might go stale if there are any other intervening rtas calls,
* this routine must be called atomically with whatever produced
* the error (i.e. with rtas_lock still held from the previous call).
*/
static char *__fetch_rtas_last_error(char *altbuf)
{
const s32 token = rtas_function_token(RTAS_FN_RTAS_LAST_ERROR);
struct rtas_args err_args, save_args;
u32 bufsz;
char *buf = NULL;
lockdep_assert_held(&rtas_lock);
if (token == -1)
return NULL;
bufsz = rtas_get_error_log_max();
err_args.token = cpu_to_be32(token);
err_args.nargs = cpu_to_be32(2);
err_args.nret = cpu_to_be32(1);
err_args.args[0] = cpu_to_be32(__pa(rtas_err_buf));
err_args.args[1] = cpu_to_be32(bufsz);
err_args.args[2] = 0;
save_args = rtas_args;
rtas_args = err_args;
do_enter_rtas(&rtas_args);
err_args = rtas_args;
rtas_args = save_args;
/* Log the error in the unlikely case that there was one. */
if (unlikely(err_args.args[2] == 0)) {
if (altbuf) {
buf = altbuf;
} else {
buf = rtas_err_buf;
if (slab_is_available())
buf = kmalloc(RTAS_ERROR_LOG_MAX, GFP_ATOMIC);
}
if (buf)
memmove(buf, rtas_err_buf, RTAS_ERROR_LOG_MAX);
}
return buf;
}
#define get_errorlog_buffer() kmalloc(RTAS_ERROR_LOG_MAX, GFP_KERNEL)
#else /* CONFIG_RTAS_ERROR_LOGGING */
#define __fetch_rtas_last_error(x) NULL
#define get_errorlog_buffer() NULL
static void __init init_error_log_max(void) {}
#endif
static void
va_rtas_call_unlocked(struct rtas_args *args, int token, int nargs, int nret,
va_list list)
{
int i;
args->token = cpu_to_be32(token);
args->nargs = cpu_to_be32(nargs);
args->nret = cpu_to_be32(nret);
args->rets = &(args->args[nargs]);
for (i = 0; i < nargs; ++i)
args->args[i] = cpu_to_be32(va_arg(list, __u32));
for (i = 0; i < nret; ++i)
args->rets[i] = 0;
do_enter_rtas(args);
}
/**
* rtas_call_unlocked() - Invoke an RTAS firmware function without synchronization.
* @args: RTAS parameter block to be used for the call, must obey RTAS addressing
* constraints.
* @token: Identifies the function being invoked.
* @nargs: Number of input parameters. Does not include token.
* @nret: Number of output parameters, including the call status.
* @....: List of @nargs input parameters.
*
* Invokes the RTAS function indicated by @token, which the caller
* should obtain via rtas_function_token().
*
* This function is similar to rtas_call(), but must be used with a
* limited set of RTAS calls specifically exempted from the general
* requirement that only one RTAS call may be in progress at any
* time. Examples include stop-self and ibm,nmi-interlock.
*/
void rtas_call_unlocked(struct rtas_args *args, int token, int nargs, int nret, ...)
{
va_list list;
va_start(list, nret);
va_rtas_call_unlocked(args, token, nargs, nret, list);
va_end(list);
}
static bool token_is_restricted_errinjct(s32 token)
{
return token == rtas_function_token(RTAS_FN_IBM_OPEN_ERRINJCT) ||
token == rtas_function_token(RTAS_FN_IBM_ERRINJCT);
}
/**
* rtas_call() - Invoke an RTAS firmware function.
* @token: Identifies the function being invoked.
* @nargs: Number of input parameters. Does not include token.
* @nret: Number of output parameters, including the call status.
* @outputs: Array of @nret output words.
* @....: List of @nargs input parameters.
*
* Invokes the RTAS function indicated by @token, which the caller
* should obtain via rtas_function_token().
*
* The @nargs and @nret arguments must match the number of input and
* output parameters specified for the RTAS function.
*
* rtas_call() returns RTAS status codes, not conventional Linux errno
* values. Callers must translate any failure to an appropriate errno
* in syscall context. Most callers of RTAS functions that can return
* -2 or 990x should use rtas_busy_delay() to correctly handle those
* statuses before calling again.
*
* The return value descriptions are adapted from 7.2.8 [RTAS] Return
* Codes of the PAPR and CHRP specifications.
*
* Context: Process context preferably, interrupt context if
* necessary. Acquires an internal spinlock and may perform
* GFP_ATOMIC slab allocation in error path. Unsafe for NMI
* context.
* Return:
* * 0 - RTAS function call succeeded.
* * -1 - RTAS function encountered a hardware or
* platform error, or the token is invalid,
* or the function is restricted by kernel policy.
* * -2 - Specs say "A necessary hardware device was busy,
* and the requested function could not be
* performed. The operation should be retried at
* a later time." This is misleading, at least with
* respect to current RTAS implementations. What it
* usually means in practice is that the function
* could not be completed while meeting RTAS's
* deadline for returning control to the OS (250us
* for PAPR/PowerVM, typically), but the call may be
* immediately reattempted to resume work on it.
* * -3 - Parameter error.
* * -7 - Unexpected state change.
* * 9000...9899 - Vendor-specific success codes.
* * 9900...9905 - Advisory extended delay. Caller should try
* again after ~10^x ms has elapsed, where x is
* the last digit of the status [0-5]. Again going
* beyond the PAPR text, 990x on PowerVM indicates
* contention for RTAS-internal resources. Other
* RTAS call sequences in progress should be
* allowed to complete before reattempting the
* call.
* * -9000 - Multi-level isolation error.
* * -9999...-9004 - Vendor-specific error codes.
* * Additional negative values - Function-specific error.
* * Additional positive values - Function-specific success.
*/
int rtas_call(int token, int nargs, int nret, int *outputs, ...)
{
struct pin_cookie cookie;
va_list list;
int i;
unsigned long flags;
struct rtas_args *args;
char *buff_copy = NULL;
int ret;
if (!rtas.entry || token == RTAS_UNKNOWN_SERVICE)
return -1;
if (token_is_restricted_errinjct(token)) {
/*
* It would be nicer to not discard the error value
* from security_locked_down(), but callers expect an
* RTAS status, not an errno.
*/
if (security_locked_down(LOCKDOWN_RTAS_ERROR_INJECTION))
return -1;
}
if ((mfmsr() & (MSR_IR|MSR_DR)) != (MSR_IR|MSR_DR)) {
WARN_ON_ONCE(1);
return -1;
}
raw_spin_lock_irqsave(&rtas_lock, flags);
cookie = lockdep_pin_lock(&rtas_lock);
/* We use the global rtas args buffer */
args = &rtas_args;
va_start(list, outputs);
va_rtas_call_unlocked(args, token, nargs, nret, list);
va_end(list);
/* A -1 return code indicates that the last command couldn't
be completed due to a hardware error. */
if (be32_to_cpu(args->rets[0]) == -1)
buff_copy = __fetch_rtas_last_error(NULL);
if (nret > 1 && outputs != NULL)
for (i = 0; i < nret-1; ++i)
outputs[i] = be32_to_cpu(args->rets[i + 1]);
ret = (nret > 0) ? be32_to_cpu(args->rets[0]) : 0;
lockdep_unpin_lock(&rtas_lock, cookie);
raw_spin_unlock_irqrestore(&rtas_lock, flags);
if (buff_copy) {
log_error(buff_copy, ERR_TYPE_RTAS_LOG, 0);
if (slab_is_available())
kfree(buff_copy);
}
return ret;
}
EXPORT_SYMBOL_GPL(rtas_call);
/**
* rtas_busy_delay_time() - From an RTAS status value, calculate the
* suggested delay time in milliseconds.
*
* @status: a value returned from rtas_call() or similar APIs which return
* the status of a RTAS function call.
*
* Context: Any context.
*
* Return:
* * 100000 - If @status is 9905.
* * 10000 - If @status is 9904.
* * 1000 - If @status is 9903.
* * 100 - If @status is 9902.
* * 10 - If @status is 9901.
* * 1 - If @status is either 9900 or -2. This is "wrong" for -2, but
* some callers depend on this behavior, and the worst outcome
* is that they will delay for longer than necessary.
* * 0 - If @status is not a busy or extended delay value.
*/
unsigned int rtas_busy_delay_time(int status)
{
int order;
unsigned int ms = 0;
if (status == RTAS_BUSY) {
ms = 1;
} else if (status >= RTAS_EXTENDED_DELAY_MIN &&
status <= RTAS_EXTENDED_DELAY_MAX) {
order = status - RTAS_EXTENDED_DELAY_MIN;
for (ms = 1; order > 0; order--)
ms *= 10;
}
return ms;
}
/*
* Early boot fallback for rtas_busy_delay().
*/
static bool __init rtas_busy_delay_early(int status)
{
static size_t successive_ext_delays __initdata;
bool retry;
switch (status) {
case RTAS_EXTENDED_DELAY_MIN...RTAS_EXTENDED_DELAY_MAX:
/*
* In the unlikely case that we receive an extended
* delay status in early boot, the OS is probably not
* the cause, and there's nothing we can do to clear
* the condition. Best we can do is delay for a bit
* and hope it's transient. Lie to the caller if it
* seems like we're stuck in a retry loop.
*/
mdelay(1);
retry = true;
successive_ext_delays += 1;
if (successive_ext_delays > 1000) {
pr_err("too many extended delays, giving up\n");
dump_stack();
retry = false;
successive_ext_delays = 0;
}
break;
case RTAS_BUSY:
retry = true;
successive_ext_delays = 0;
break;
default:
retry = false;
successive_ext_delays = 0;
break;
}
return retry;
}
/**
* rtas_busy_delay() - helper for RTAS busy and extended delay statuses
*
* @status: a value returned from rtas_call() or similar APIs which return
* the status of a RTAS function call.
*
* Context: Process context. May sleep or schedule.
*
* Return:
* * true - @status is RTAS_BUSY or an extended delay hint. The
* caller may assume that the CPU has been yielded if necessary,
* and that an appropriate delay for @status has elapsed.
* Generally the caller should reattempt the RTAS call which
* yielded @status.
*
* * false - @status is not @RTAS_BUSY nor an extended delay hint. The
* caller is responsible for handling @status.
*/
bool __ref rtas_busy_delay(int status)
{
unsigned int ms;
bool ret;
/*
* Can't do timed sleeps before timekeeping is up.
*/
if (system_state < SYSTEM_SCHEDULING)
return rtas_busy_delay_early(status);
switch (status) {
case RTAS_EXTENDED_DELAY_MIN...RTAS_EXTENDED_DELAY_MAX:
ret = true;
ms = rtas_busy_delay_time(status);
/*
* The extended delay hint can be as high as 100 seconds.
* Surely any function returning such a status is either
* buggy or isn't going to be significantly slowed by us
* polling at 1HZ. Clamp the sleep time to one second.
*/
ms = clamp(ms, 1U, 1000U);
/*
* The delay hint is an order-of-magnitude suggestion, not
* a minimum. It is fine, possibly even advantageous, for
* us to pause for less time than hinted. For small values,
* use usleep_range() to ensure we don't sleep much longer
* than actually needed.
*
* See Documentation/timers/timers-howto.rst for
* explanation of the threshold used here. In effect we use
* usleep_range() for 9900 and 9901, msleep() for
* 9902-9905.
*/
if (ms <= 20)
usleep_range(ms * 100, ms * 1000);
else
msleep(ms);
break;
case RTAS_BUSY:
ret = true;
/*
* We should call again immediately if there's no other
* work to do.
*/
cond_resched();
break;
default:
ret = false;
/*
* Not a busy or extended delay status; the caller should
* handle @status itself. Ensure we warn on misuses in
* atomic context regardless.
*/
might_sleep();
break;
}
return ret;
}
EXPORT_SYMBOL_GPL(rtas_busy_delay);
int rtas_error_rc(int rtas_rc)
{
int rc;
switch (rtas_rc) {
case RTAS_HARDWARE_ERROR: /* Hardware Error */
rc = -EIO;
break;
case RTAS_INVALID_PARAMETER: /* Bad indicator/domain/etc */
rc = -EINVAL;
break;
case -9000: /* Isolation error */
rc = -EFAULT;
break;
case -9001: /* Outstanding TCE/PTE */
rc = -EEXIST;
break;
case -9002: /* No usable slot */
rc = -ENODEV;
break;
default:
pr_err("%s: unexpected error %d\n", __func__, rtas_rc);
rc = -ERANGE;
break;
}
return rc;
}
EXPORT_SYMBOL_GPL(rtas_error_rc);
int rtas_get_power_level(int powerdomain, int *level)
{
int token = rtas_function_token(RTAS_FN_GET_POWER_LEVEL);
int rc;
if (token == RTAS_UNKNOWN_SERVICE)
return -ENOENT;
while ((rc = rtas_call(token, 1, 2, level, powerdomain)) == RTAS_BUSY)
udelay(1);
if (rc < 0)
return rtas_error_rc(rc);
return rc;
}
EXPORT_SYMBOL_GPL(rtas_get_power_level);
int rtas_set_power_level(int powerdomain, int level, int *setlevel)
{
int token = rtas_function_token(RTAS_FN_SET_POWER_LEVEL);
int rc;
if (token == RTAS_UNKNOWN_SERVICE)
return -ENOENT;
do {
rc = rtas_call(token, 2, 2, setlevel, powerdomain, level);
} while (rtas_busy_delay(rc));
if (rc < 0)
return rtas_error_rc(rc);
return rc;
}
EXPORT_SYMBOL_GPL(rtas_set_power_level);
int rtas_get_sensor(int sensor, int index, int *state)
{
int token = rtas_function_token(RTAS_FN_GET_SENSOR_STATE);
int rc;
if (token == RTAS_UNKNOWN_SERVICE)
return -ENOENT;
do {
rc = rtas_call(token, 2, 2, state, sensor, index);
} while (rtas_busy_delay(rc));
if (rc < 0)
return rtas_error_rc(rc);
return rc;
}
EXPORT_SYMBOL_GPL(rtas_get_sensor);
int rtas_get_sensor_fast(int sensor, int index, int *state)
{
int token = rtas_function_token(RTAS_FN_GET_SENSOR_STATE);
int rc;
if (token == RTAS_UNKNOWN_SERVICE)
return -ENOENT;
rc = rtas_call(token, 2, 2, state, sensor, index);
WARN_ON(rc == RTAS_BUSY || (rc >= RTAS_EXTENDED_DELAY_MIN &&
rc <= RTAS_EXTENDED_DELAY_MAX));
if (rc < 0)
return rtas_error_rc(rc);
return rc;
}
bool rtas_indicator_present(int token, int *maxindex)
{
int proplen, count, i;
const struct indicator_elem {
__be32 token;
__be32 maxindex;
} *indicators;
indicators = of_get_property(rtas.dev, "rtas-indicators", &proplen);
if (!indicators)
return false;
count = proplen / sizeof(struct indicator_elem);
for (i = 0; i < count; i++) {
if (__be32_to_cpu(indicators[i].token) != token)
continue;
if (maxindex)
*maxindex = __be32_to_cpu(indicators[i].maxindex);
return true;
}
return false;
}
int rtas_set_indicator(int indicator, int index, int new_value)
{
int token = rtas_function_token(RTAS_FN_SET_INDICATOR);
int rc;
if (token == RTAS_UNKNOWN_SERVICE)
return -ENOENT;
do {
rc = rtas_call(token, 3, 1, NULL, indicator, index, new_value);
} while (rtas_busy_delay(rc));
if (rc < 0)
return rtas_error_rc(rc);
return rc;
}
EXPORT_SYMBOL_GPL(rtas_set_indicator);
/*
* Ignoring RTAS extended delay
*/
int rtas_set_indicator_fast(int indicator, int index, int new_value)
{
int token = rtas_function_token(RTAS_FN_SET_INDICATOR);
int rc;
if (token == RTAS_UNKNOWN_SERVICE)
return -ENOENT;
rc = rtas_call(token, 3, 1, NULL, indicator, index, new_value);
WARN_ON(rc == RTAS_BUSY || (rc >= RTAS_EXTENDED_DELAY_MIN &&
rc <= RTAS_EXTENDED_DELAY_MAX));
if (rc < 0)
return rtas_error_rc(rc);
return rc;
}
/**
* rtas_ibm_suspend_me() - Call ibm,suspend-me to suspend the LPAR.
*
* @fw_status: RTAS call status will be placed here if not NULL.
*
* rtas_ibm_suspend_me() should be called only on a CPU which has
* received H_CONTINUE from the H_JOIN hcall. All other active CPUs
* should be waiting to return from H_JOIN.
*
* rtas_ibm_suspend_me() may suspend execution of the OS
* indefinitely. Callers should take appropriate measures upon return, such as
* resetting watchdog facilities.
*
* Callers may choose to retry this call if @fw_status is
* %RTAS_THREADS_ACTIVE.
*
* Return:
* 0 - The partition has resumed from suspend, possibly after
* migration to a different host.
* -ECANCELED - The operation was aborted.
* -EAGAIN - There were other CPUs not in H_JOIN at the time of the call.
* -EBUSY - Some other condition prevented the suspend from succeeding.
* -EIO - Hardware/platform error.
*/
int rtas_ibm_suspend_me(int *fw_status)
{
int token = rtas_function_token(RTAS_FN_IBM_SUSPEND_ME);
int fwrc;
int ret;
fwrc = rtas_call(token, 0, 1, NULL);
switch (fwrc) {
case 0:
ret = 0;
break;
case RTAS_SUSPEND_ABORTED:
ret = -ECANCELED;
break;
case RTAS_THREADS_ACTIVE:
ret = -EAGAIN;
break;
case RTAS_NOT_SUSPENDABLE:
case RTAS_OUTSTANDING_COPROC:
ret = -EBUSY;
break;
case -1:
default:
ret = -EIO;
break;
}
if (fw_status)
*fw_status = fwrc;
return ret;
}
void __noreturn rtas_restart(char *cmd)
{
if (rtas_flash_term_hook)
rtas_flash_term_hook(SYS_RESTART);
pr_emerg("system-reboot returned %d\n",
rtas_call(rtas_function_token(RTAS_FN_SYSTEM_REBOOT), 0, 1, NULL));
for (;;);
}
void rtas_power_off(void)
{
if (rtas_flash_term_hook)
rtas_flash_term_hook(SYS_POWER_OFF);
/* allow power on only with power button press */
pr_emerg("power-off returned %d\n",
rtas_call(rtas_function_token(RTAS_FN_POWER_OFF), 2, 1, NULL, -1, -1));
for (;;);
}
void __noreturn rtas_halt(void)
{
if (rtas_flash_term_hook)
rtas_flash_term_hook(SYS_HALT);
/* allow power on only with power button press */
pr_emerg("power-off returned %d\n",
rtas_call(rtas_function_token(RTAS_FN_POWER_OFF), 2, 1, NULL, -1, -1));
for (;;);
}
/* Must be in the RMO region, so we place it here */
static char rtas_os_term_buf[2048];
static bool ibm_extended_os_term;
void rtas_os_term(char *str)
{
s32 token = rtas_function_token(RTAS_FN_IBM_OS_TERM);
static struct rtas_args args;
int status;
/*
* Firmware with the ibm,extended-os-term property is guaranteed
* to always return from an ibm,os-term call. Earlier versions without
* this property may terminate the partition which we want to avoid
* since it interferes with panic_timeout.
*/
if (token == RTAS_UNKNOWN_SERVICE || !ibm_extended_os_term)
return;
snprintf(rtas_os_term_buf, 2048, "OS panic: %s", str);
/*
* Keep calling as long as RTAS returns a "try again" status,
* but don't use rtas_busy_delay(), which potentially
* schedules.
*/
do {
rtas_call_unlocked(&args, token, 1, 1, NULL, __pa(rtas_os_term_buf));
status = be32_to_cpu(args.rets[0]);
} while (rtas_busy_delay_time(status));
if (status != 0)
pr_emerg("ibm,os-term call failed %d\n", status);
}
/**
* rtas_activate_firmware() - Activate a new version of firmware.
*
* Context: This function may sleep.
*
* Activate a new version of partition firmware. The OS must call this
* after resuming from a partition hibernation or migration in order
* to maintain the ability to perform live firmware updates. It's not
* catastrophic for this method to be absent or to fail; just log the
* condition in that case.
*/
void rtas_activate_firmware(void)
{
int token = rtas_function_token(RTAS_FN_IBM_ACTIVATE_FIRMWARE);
int fwrc;
if (token == RTAS_UNKNOWN_SERVICE) {
pr_notice("ibm,activate-firmware method unavailable\n");
return;
}
do {
fwrc = rtas_call(token, 0, 1, NULL);
} while (rtas_busy_delay(fwrc));
if (fwrc)
pr_err("ibm,activate-firmware failed (%i)\n", fwrc);
}
/**
* get_pseries_errorlog() - Find a specific pseries error log in an RTAS
* extended event log.
* @log: RTAS error/event log
* @section_id: two character section identifier
*
* Return: A pointer to the specified errorlog or NULL if not found.
*/
noinstr struct pseries_errorlog *get_pseries_errorlog(struct rtas_error_log *log,
uint16_t section_id)
{
struct rtas_ext_event_log_v6 *ext_log =
(struct rtas_ext_event_log_v6 *)log->buffer;
struct pseries_errorlog *sect;
unsigned char *p, *log_end;
uint32_t ext_log_length = rtas_error_extended_log_length(log);
uint8_t log_format = rtas_ext_event_log_format(ext_log);
uint32_t company_id = rtas_ext_event_company_id(ext_log);
/* Check that we understand the format */
if (ext_log_length < sizeof(struct rtas_ext_event_log_v6) ||
log_format != RTAS_V6EXT_LOG_FORMAT_EVENT_LOG ||
company_id != RTAS_V6EXT_COMPANY_ID_IBM)
return NULL;
log_end = log->buffer + ext_log_length;
p = ext_log->vendor_log;
while (p < log_end) {
sect = (struct pseries_errorlog *)p;
if (pseries_errorlog_id(sect) == section_id)
return sect;
p += pseries_errorlog_length(sect);
}
return NULL;
}
/*
* The sys_rtas syscall, as originally designed, allows root to pass
* arbitrary physical addresses to RTAS calls. A number of RTAS calls
* can be abused to write to arbitrary memory and do other things that
* are potentially harmful to system integrity, and thus should only
* be used inside the kernel and not exposed to userspace.
*
* All known legitimate users of the sys_rtas syscall will only ever
* pass addresses that fall within the RMO buffer, and use a known
* subset of RTAS calls.
*
* Accordingly, we filter RTAS requests to check that the call is
* permitted, and that provided pointers fall within the RMO buffer.
* If a function is allowed to be invoked via the syscall, then its
* entry in the rtas_functions table points to a rtas_filter that
* describes its constraints, with the indexes of the parameters which
* are expected to contain addresses and sizes of buffers allocated
* inside the RMO buffer.
*/
static bool in_rmo_buf(u32 base, u32 end)
{
return base >= rtas_rmo_buf &&
base < (rtas_rmo_buf + RTAS_USER_REGION_SIZE) &&
base <= end &&
end >= rtas_rmo_buf &&
end < (rtas_rmo_buf + RTAS_USER_REGION_SIZE);
}
static bool block_rtas_call(int token, int nargs,
struct rtas_args *args)
{
const struct rtas_function *func;
const struct rtas_filter *f;
const bool is_platform_dump = token == rtas_function_token(RTAS_FN_IBM_PLATFORM_DUMP);
const bool is_config_conn = token == rtas_function_token(RTAS_FN_IBM_CONFIGURE_CONNECTOR);
u32 base, size, end;
/*
* If this token doesn't correspond to a function the kernel
* understands, you're not allowed to call it.
*/
func = rtas_token_to_function(token);
if (!func)
goto err;
/*
* And only functions with filters attached are allowed.
*/
f = func->filter;
if (!f)
goto err;
/*
* And some functions aren't allowed on LE.
*/
if (IS_ENABLED(CONFIG_CPU_LITTLE_ENDIAN) && func->banned_for_syscall_on_le)
goto err;
if (f->buf_idx1 != -1) {
base = be32_to_cpu(args->args[f->buf_idx1]);
if (f->size_idx1 != -1)
size = be32_to_cpu(args->args[f->size_idx1]);
else if (f->fixed_size)
size = f->fixed_size;
else
size = 1;
end = base + size - 1;
/*
* Special case for ibm,platform-dump - NULL buffer
* address is used to indicate end of dump processing
*/
if (is_platform_dump && base == 0)
return false;
if (!in_rmo_buf(base, end))
goto err;
}
if (f->buf_idx2 != -1) {
base = be32_to_cpu(args->args[f->buf_idx2]);
if (f->size_idx2 != -1)
size = be32_to_cpu(args->args[f->size_idx2]);
else if (f->fixed_size)
size = f->fixed_size;
else
size = 1;
end = base + size - 1;
/*
* Special case for ibm,configure-connector where the
* address can be 0
*/
if (is_config_conn && base == 0)
return false;
if (!in_rmo_buf(base, end))
goto err;
}
return false;
err:
pr_err_ratelimited("sys_rtas: RTAS call blocked - exploit attempt?\n");
pr_err_ratelimited("sys_rtas: token=0x%x, nargs=%d (called by %s)\n",
token, nargs, current->comm);
return true;
}
/* We assume to be passed big endian arguments */
SYSCALL_DEFINE1(rtas, struct rtas_args __user *, uargs)
{
struct pin_cookie cookie;
struct rtas_args args;
unsigned long flags;
char *buff_copy, *errbuf = NULL;
int nargs, nret, token;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
if (!rtas.entry)
return -EINVAL;
if (copy_from_user(&args, uargs, 3 * sizeof(u32)) != 0)
return -EFAULT;
nargs = be32_to_cpu(args.nargs);
nret = be32_to_cpu(args.nret);
token = be32_to_cpu(args.token);
if (nargs >= ARRAY_SIZE(args.args)
|| nret > ARRAY_SIZE(args.args)
|| nargs + nret > ARRAY_SIZE(args.args))
return -EINVAL;
/* Copy in args. */
if (copy_from_user(args.args, uargs->args,
nargs * sizeof(rtas_arg_t)) != 0)
return -EFAULT;
if (token == RTAS_UNKNOWN_SERVICE)
return -EINVAL;
args.rets = &args.args[nargs];
memset(args.rets, 0, nret * sizeof(rtas_arg_t));
if (block_rtas_call(token, nargs, &args))
return -EINVAL;
if (token_is_restricted_errinjct(token)) {
int err;
err = security_locked_down(LOCKDOWN_RTAS_ERROR_INJECTION);
if (err)
return err;
}
/* Need to handle ibm,suspend_me call specially */
if (token == rtas_function_token(RTAS_FN_IBM_SUSPEND_ME)) {
/*
* rtas_ibm_suspend_me assumes the streamid handle is in cpu
* endian, or at least the hcall within it requires it.
*/
int rc = 0;
u64 handle = ((u64)be32_to_cpu(args.args[0]) << 32)
| be32_to_cpu(args.args[1]);
rc = rtas_syscall_dispatch_ibm_suspend_me(handle);
if (rc == -EAGAIN)
args.rets[0] = cpu_to_be32(RTAS_NOT_SUSPENDABLE);
else if (rc == -EIO)
args.rets[0] = cpu_to_be32(-1);
else if (rc)
return rc;
goto copy_return;
}
buff_copy = get_errorlog_buffer();
raw_spin_lock_irqsave(&rtas_lock, flags);
cookie = lockdep_pin_lock(&rtas_lock);
rtas_args = args;
do_enter_rtas(&rtas_args);
args = rtas_args;
/* A -1 return code indicates that the last command couldn't
be completed due to a hardware error. */
if (be32_to_cpu(args.rets[0]) == -1)
errbuf = __fetch_rtas_last_error(buff_copy);
lockdep_unpin_lock(&rtas_lock, cookie);
raw_spin_unlock_irqrestore(&rtas_lock, flags);
if (buff_copy) {
if (errbuf)
log_error(errbuf, ERR_TYPE_RTAS_LOG, 0);
kfree(buff_copy);
}
copy_return:
/* Copy out args. */
if (copy_to_user(uargs->args + nargs,
args.args + nargs,
nret * sizeof(rtas_arg_t)) != 0)
return -EFAULT;
return 0;
}
static void __init rtas_function_table_init(void)
{
struct property *prop;
for (size_t i = 0; i < ARRAY_SIZE(rtas_function_table); ++i) {
struct rtas_function *curr = &rtas_function_table[i];
struct rtas_function *prior;
int cmp;
curr->token = RTAS_UNKNOWN_SERVICE;
if (i == 0)
continue;
/*
* Ensure table is sorted correctly for binary search
* on function names.
*/
prior = &rtas_function_table[i - 1];
cmp = strcmp(prior->name, curr->name);
if (cmp < 0)
continue;
if (cmp == 0) {
pr_err("'%s' has duplicate function table entries\n",
curr->name);
} else {
pr_err("function table unsorted: '%s' wrongly precedes '%s'\n",
prior->name, curr->name);
}
}
for_each_property_of_node(rtas.dev, prop) {
struct rtas_function *func;
if (prop->length != sizeof(u32))
continue;
func = __rtas_name_to_function(prop->name);
if (!func)
continue;
func->token = be32_to_cpup((__be32 *)prop->value);
pr_debug("function %s has token %u\n", func->name, func->token);
}
}
/*
* Call early during boot, before mem init, to retrieve the RTAS
* information from the device-tree and allocate the RMO buffer for userland
* accesses.
*/
void __init rtas_initialize(void)
{
unsigned long rtas_region = RTAS_INSTANTIATE_MAX;
u32 base, size, entry;
int no_base, no_size, no_entry;
/* Get RTAS dev node and fill up our "rtas" structure with infos
* about it.
*/
rtas.dev = of_find_node_by_name(NULL, "rtas");
if (!rtas.dev)
return;
no_base = of_property_read_u32(rtas.dev, "linux,rtas-base", &base);
no_size = of_property_read_u32(rtas.dev, "rtas-size", &size);
if (no_base || no_size) {
of_node_put(rtas.dev);
rtas.dev = NULL;
return;
}
rtas.base = base;
rtas.size = size;
no_entry = of_property_read_u32(rtas.dev, "linux,rtas-entry", &entry);
rtas.entry = no_entry ? rtas.base : entry;
init_error_log_max();
/* Must be called before any function token lookups */
rtas_function_table_init();
/*
* Discover this now to avoid a device tree lookup in the
* panic path.
*/
ibm_extended_os_term = of_property_read_bool(rtas.dev, "ibm,extended-os-term");
/* If RTAS was found, allocate the RMO buffer for it and look for
* the stop-self token if any
*/
#ifdef CONFIG_PPC64
if (firmware_has_feature(FW_FEATURE_LPAR))
rtas_region = min(ppc64_rma_size, RTAS_INSTANTIATE_MAX);
#endif
rtas_rmo_buf = memblock_phys_alloc_range(RTAS_USER_REGION_SIZE, PAGE_SIZE,
0, rtas_region);
if (!rtas_rmo_buf)
panic("ERROR: RTAS: Failed to allocate %lx bytes below %pa\n",
PAGE_SIZE, &rtas_region);
rtas_work_area_reserve_arena(rtas_region);
}
int __init early_init_dt_scan_rtas(unsigned long node,
const char *uname, int depth, void *data)
{
const u32 *basep, *entryp, *sizep;
if (depth != 1 || strcmp(uname, "rtas") != 0)
return 0;
basep = of_get_flat_dt_prop(node, "linux,rtas-base", NULL);
entryp = of_get_flat_dt_prop(node, "linux,rtas-entry", NULL);
sizep = of_get_flat_dt_prop(node, "rtas-size", NULL);
#ifdef CONFIG_PPC64
/* need this feature to decide the crashkernel offset */
if (of_get_flat_dt_prop(node, "ibm,hypertas-functions", NULL))
powerpc_firmware_features |= FW_FEATURE_LPAR;
#endif
if (basep && entryp && sizep) {
rtas.base = *basep;
rtas.entry = *entryp;
rtas.size = *sizep;
}
#ifdef CONFIG_UDBG_RTAS_CONSOLE
basep = of_get_flat_dt_prop(node, "put-term-char", NULL);
if (basep)
rtas_putchar_token = *basep;
basep = of_get_flat_dt_prop(node, "get-term-char", NULL);
if (basep)
rtas_getchar_token = *basep;
if (rtas_putchar_token != RTAS_UNKNOWN_SERVICE &&
rtas_getchar_token != RTAS_UNKNOWN_SERVICE)
udbg_init_rtas_console();
#endif
/* break now */
return 1;
}
static DEFINE_RAW_SPINLOCK(timebase_lock);
static u64 timebase = 0;
void rtas_give_timebase(void)
{
unsigned long flags;
raw_spin_lock_irqsave(&timebase_lock, flags);
hard_irq_disable();
rtas_call(rtas_function_token(RTAS_FN_FREEZE_TIME_BASE), 0, 1, NULL);
timebase = get_tb();
raw_spin_unlock(&timebase_lock);
while (timebase)
barrier();
rtas_call(rtas_function_token(RTAS_FN_THAW_TIME_BASE), 0, 1, NULL);
local_irq_restore(flags);
}
void rtas_take_timebase(void)
{
while (!timebase)
barrier();
raw_spin_lock(&timebase_lock);
set_tb(timebase >> 32, timebase & 0xffffffff);
timebase = 0;
raw_spin_unlock(&timebase_lock);
}
| linux-master | arch/powerpc/kernel/rtas.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Port for PPC64 David Engebretsen, IBM Corp.
* Contains common pci routines for ppc64 platform, pSeries and iSeries brands.
*
* Copyright (C) 2003 Anton Blanchard <[email protected]>, IBM
* Rework, based on alpha PCI code.
*/
#undef DEBUG
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/string.h>
#include <linux/init.h>
#include <linux/export.h>
#include <linux/mm.h>
#include <linux/list.h>
#include <linux/syscalls.h>
#include <linux/irq.h>
#include <linux/vmalloc.h>
#include <linux/of.h>
#include <asm/processor.h>
#include <asm/io.h>
#include <asm/pci-bridge.h>
#include <asm/byteorder.h>
#include <asm/machdep.h>
#include <asm/ppc-pci.h>
/* pci_io_base -- the base address from which io bars are offsets.
* This is the lowest I/O base address (so bar values are always positive),
* and it *must* be the start of ISA space if an ISA bus exists because
* ISA drivers use hard coded offsets. If no ISA bus exists nothing
* is mapped on the first 64K of IO space
*/
unsigned long pci_io_base;
EXPORT_SYMBOL(pci_io_base);
static int __init pcibios_init(void)
{
struct pci_controller *hose, *tmp;
printk(KERN_INFO "PCI: Probing PCI hardware\n");
/* For now, override phys_mem_access_prot. If we need it,g
* later, we may move that initialization to each ppc_md
*/
ppc_md.phys_mem_access_prot = pci_phys_mem_access_prot;
/* On ppc64, we always enable PCI domains and we keep domain 0
* backward compatible in /proc for video cards
*/
pci_add_flags(PCI_ENABLE_PROC_DOMAINS | PCI_COMPAT_DOMAIN_0);
/* Scan all of the recorded PCI controllers. */
list_for_each_entry_safe(hose, tmp, &hose_list, list_node)
pcibios_scan_phb(hose);
/* Call common code to handle resource allocation */
pcibios_resource_survey();
/* Add devices. */
list_for_each_entry_safe(hose, tmp, &hose_list, list_node)
pci_bus_add_devices(hose->bus);
/* Call machine dependent fixup */
if (ppc_md.pcibios_fixup)
ppc_md.pcibios_fixup();
printk(KERN_DEBUG "PCI: Probing PCI hardware done\n");
return 0;
}
subsys_initcall_sync(pcibios_init);
int pcibios_unmap_io_space(struct pci_bus *bus)
{
struct pci_controller *hose;
WARN_ON(bus == NULL);
/* If this is not a PHB, we only flush the hash table over
* the area mapped by this bridge. We don't play with the PTE
* mappings since we might have to deal with sub-page alignments
* so flushing the hash table is the only sane way to make sure
* that no hash entries are covering that removed bridge area
* while still allowing other busses overlapping those pages
*
* Note: If we ever support P2P hotplug on Book3E, we'll have
* to do an appropriate TLB flush here too
*/
if (bus->self) {
#ifdef CONFIG_PPC_BOOK3S_64
struct resource *res = bus->resource[0];
#endif
pr_debug("IO unmapping for PCI-PCI bridge %s\n",
pci_name(bus->self));
#ifdef CONFIG_PPC_BOOK3S_64
__flush_hash_table_range(res->start + _IO_BASE,
res->end + _IO_BASE + 1);
#endif
return 0;
}
/* Get the host bridge */
hose = pci_bus_to_host(bus);
pr_debug("IO unmapping for PHB %pOF\n", hose->dn);
pr_debug(" alloc=0x%p\n", hose->io_base_alloc);
iounmap(hose->io_base_alloc);
return 0;
}
EXPORT_SYMBOL_GPL(pcibios_unmap_io_space);
void __iomem *ioremap_phb(phys_addr_t paddr, unsigned long size)
{
struct vm_struct *area;
unsigned long addr;
WARN_ON_ONCE(paddr & ~PAGE_MASK);
WARN_ON_ONCE(size & ~PAGE_MASK);
/*
* Let's allocate some IO space for that guy. We don't pass VM_IOREMAP
* because we don't care about alignment tricks that the core does in
* that case. Maybe we should due to stupid card with incomplete
* address decoding but I'd rather not deal with those outside of the
* reserved 64K legacy region.
*/
area = __get_vm_area_caller(size, VM_IOREMAP, PHB_IO_BASE, PHB_IO_END,
__builtin_return_address(0));
if (!area)
return NULL;
addr = (unsigned long)area->addr;
if (ioremap_page_range(addr, addr + size, paddr,
pgprot_noncached(PAGE_KERNEL))) {
vunmap_range(addr, addr + size);
return NULL;
}
return (void __iomem *)addr;
}
EXPORT_SYMBOL_GPL(ioremap_phb);
static int pcibios_map_phb_io_space(struct pci_controller *hose)
{
unsigned long phys_page;
unsigned long size_page;
unsigned long io_virt_offset;
phys_page = ALIGN_DOWN(hose->io_base_phys, PAGE_SIZE);
size_page = ALIGN(hose->pci_io_size, PAGE_SIZE);
/* Make sure IO area address is clear */
hose->io_base_alloc = NULL;
/* If there's no IO to map on that bus, get away too */
if (hose->pci_io_size == 0 || hose->io_base_phys == 0)
return 0;
/* Let's allocate some IO space for that guy. We don't pass
* VM_IOREMAP because we don't care about alignment tricks that
* the core does in that case. Maybe we should due to stupid card
* with incomplete address decoding but I'd rather not deal with
* those outside of the reserved 64K legacy region.
*/
hose->io_base_alloc = ioremap_phb(phys_page, size_page);
if (!hose->io_base_alloc)
return -ENOMEM;
hose->io_base_virt = hose->io_base_alloc +
hose->io_base_phys - phys_page;
pr_debug("IO mapping for PHB %pOF\n", hose->dn);
pr_debug(" phys=0x%016llx, virt=0x%p (alloc=0x%p)\n",
hose->io_base_phys, hose->io_base_virt, hose->io_base_alloc);
pr_debug(" size=0x%016llx (alloc=0x%016lx)\n",
hose->pci_io_size, size_page);
/* Fixup hose IO resource */
io_virt_offset = pcibios_io_space_offset(hose);
hose->io_resource.start += io_virt_offset;
hose->io_resource.end += io_virt_offset;
pr_debug(" hose->io_resource=%pR\n", &hose->io_resource);
return 0;
}
int pcibios_map_io_space(struct pci_bus *bus)
{
WARN_ON(bus == NULL);
/* If this not a PHB, nothing to do, page tables still exist and
* thus HPTEs will be faulted in when needed
*/
if (bus->self) {
pr_debug("IO mapping for PCI-PCI bridge %s\n",
pci_name(bus->self));
pr_debug(" virt=0x%016llx...0x%016llx\n",
bus->resource[0]->start + _IO_BASE,
bus->resource[0]->end + _IO_BASE);
return 0;
}
return pcibios_map_phb_io_space(pci_bus_to_host(bus));
}
EXPORT_SYMBOL_GPL(pcibios_map_io_space);
void pcibios_setup_phb_io_space(struct pci_controller *hose)
{
pcibios_map_phb_io_space(hose);
}
#define IOBASE_BRIDGE_NUMBER 0
#define IOBASE_MEMORY 1
#define IOBASE_IO 2
#define IOBASE_ISA_IO 3
#define IOBASE_ISA_MEM 4
SYSCALL_DEFINE3(pciconfig_iobase, long, which, unsigned long, in_bus,
unsigned long, in_devfn)
{
struct pci_controller* hose;
struct pci_bus *tmp_bus, *bus = NULL;
struct device_node *hose_node;
/* Argh ! Please forgive me for that hack, but that's the
* simplest way to get existing XFree to not lockup on some
* G5 machines... So when something asks for bus 0 io base
* (bus 0 is HT root), we return the AGP one instead.
*/
if (in_bus == 0 && of_machine_is_compatible("MacRISC4")) {
struct device_node *agp;
agp = of_find_compatible_node(NULL, NULL, "u3-agp");
if (agp)
in_bus = 0xf0;
of_node_put(agp);
}
/* That syscall isn't quite compatible with PCI domains, but it's
* used on pre-domains setup. We return the first match
*/
list_for_each_entry(tmp_bus, &pci_root_buses, node) {
if (in_bus >= tmp_bus->number &&
in_bus <= tmp_bus->busn_res.end) {
bus = tmp_bus;
break;
}
}
if (bus == NULL || bus->dev.of_node == NULL)
return -ENODEV;
hose_node = bus->dev.of_node;
hose = PCI_DN(hose_node)->phb;
switch (which) {
case IOBASE_BRIDGE_NUMBER:
return (long)hose->first_busno;
case IOBASE_MEMORY:
return (long)hose->mem_offset[0];
case IOBASE_IO:
return (long)hose->io_base_phys;
case IOBASE_ISA_IO:
return (long)isa_io_base;
case IOBASE_ISA_MEM:
return -EINVAL;
}
return -EOPNOTSUPP;
}
#ifdef CONFIG_NUMA
int pcibus_to_node(struct pci_bus *bus)
{
struct pci_controller *phb = pci_bus_to_host(bus);
return phb->node;
}
EXPORT_SYMBOL(pcibus_to_node);
#endif
#ifdef CONFIG_PPC_PMAC
int pci_device_from_OF_node(struct device_node *np, u8 *bus, u8 *devfn)
{
if (!PCI_DN(np))
return -ENODEV;
*bus = PCI_DN(np)->busno;
*devfn = PCI_DN(np)->devfn;
return 0;
}
#endif
| linux-master | arch/powerpc/kernel/pci_64.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* User-space Probes (UProbes) for powerpc
*
* Copyright IBM Corporation, 2007-2012
*
* Adapted from the x86 port by Ananth N Mavinakayanahalli <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/ptrace.h>
#include <linux/uprobes.h>
#include <linux/uaccess.h>
#include <linux/kdebug.h>
#include <asm/sstep.h>
#include <asm/inst.h>
#define UPROBE_TRAP_NR UINT_MAX
/**
* is_trap_insn - check if the instruction is a trap variant
* @insn: instruction to be checked.
* Returns true if @insn is a trap variant.
*/
bool is_trap_insn(uprobe_opcode_t *insn)
{
return (is_trap(*insn));
}
/**
* arch_uprobe_analyze_insn
* @mm: the probed address space.
* @arch_uprobe: the probepoint information.
* @addr: vaddr to probe.
* Return 0 on success or a -ve number on error.
*/
int arch_uprobe_analyze_insn(struct arch_uprobe *auprobe,
struct mm_struct *mm, unsigned long addr)
{
if (addr & 0x03)
return -EINVAL;
if (cpu_has_feature(CPU_FTR_ARCH_31) &&
ppc_inst_prefixed(ppc_inst_read(auprobe->insn)) &&
(addr & 0x3f) == 60) {
pr_info_ratelimited("Cannot register a uprobe on 64 byte unaligned prefixed instruction\n");
return -EINVAL;
}
if (!can_single_step(ppc_inst_val(ppc_inst_read(auprobe->insn)))) {
pr_info_ratelimited("Cannot register a uprobe on instructions that can't be single stepped\n");
return -ENOTSUPP;
}
return 0;
}
/*
* arch_uprobe_pre_xol - prepare to execute out of line.
* @auprobe: the probepoint information.
* @regs: reflects the saved user state of current task.
*/
int arch_uprobe_pre_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
{
struct arch_uprobe_task *autask = ¤t->utask->autask;
autask->saved_trap_nr = current->thread.trap_nr;
current->thread.trap_nr = UPROBE_TRAP_NR;
regs_set_return_ip(regs, current->utask->xol_vaddr);
user_enable_single_step(current);
return 0;
}
/**
* uprobe_get_swbp_addr - compute address of swbp given post-swbp regs
* @regs: Reflects the saved state of the task after it has hit a breakpoint
* instruction.
* Return the address of the breakpoint instruction.
*/
unsigned long uprobe_get_swbp_addr(struct pt_regs *regs)
{
return instruction_pointer(regs);
}
/*
* If xol insn itself traps and generates a signal (SIGILL/SIGSEGV/etc),
* then detect the case where a singlestepped instruction jumps back to its
* own address. It is assumed that anything like do_page_fault/do_trap/etc
* sets thread.trap_nr != UINT_MAX.
*
* arch_uprobe_pre_xol/arch_uprobe_post_xol save/restore thread.trap_nr,
* arch_uprobe_xol_was_trapped() simply checks that ->trap_nr is not equal to
* UPROBE_TRAP_NR == UINT_MAX set by arch_uprobe_pre_xol().
*/
bool arch_uprobe_xol_was_trapped(struct task_struct *t)
{
if (t->thread.trap_nr != UPROBE_TRAP_NR)
return true;
return false;
}
/*
* Called after single-stepping. To avoid the SMP problems that can
* occur when we temporarily put back the original opcode to
* single-step, we single-stepped a copy of the instruction.
*
* This function prepares to resume execution after the single-step.
*/
int arch_uprobe_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
{
struct uprobe_task *utask = current->utask;
WARN_ON_ONCE(current->thread.trap_nr != UPROBE_TRAP_NR);
current->thread.trap_nr = utask->autask.saved_trap_nr;
/*
* On powerpc, except for loads and stores, most instructions
* including ones that alter code flow (branches, calls, returns)
* are emulated in the kernel. We get here only if the emulation
* support doesn't exist and have to fix-up the next instruction
* to be executed.
*/
regs_set_return_ip(regs, (unsigned long)ppc_inst_next((void *)utask->vaddr, auprobe->insn));
user_disable_single_step(current);
return 0;
}
/* callback routine for handling exceptions. */
int arch_uprobe_exception_notify(struct notifier_block *self,
unsigned long val, void *data)
{
struct die_args *args = data;
struct pt_regs *regs = args->regs;
/* regs == NULL is a kernel bug */
if (WARN_ON(!regs))
return NOTIFY_DONE;
/* We are only interested in userspace traps */
if (!user_mode(regs))
return NOTIFY_DONE;
switch (val) {
case DIE_BPT:
if (uprobe_pre_sstep_notifier(regs))
return NOTIFY_STOP;
break;
case DIE_SSTEP:
if (uprobe_post_sstep_notifier(regs))
return NOTIFY_STOP;
break;
default:
break;
}
return NOTIFY_DONE;
}
/*
* This function gets called when XOL instruction either gets trapped or
* the thread has a fatal signal, so reset the instruction pointer to its
* probed address.
*/
void arch_uprobe_abort_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
{
struct uprobe_task *utask = current->utask;
current->thread.trap_nr = utask->autask.saved_trap_nr;
instruction_pointer_set(regs, utask->vaddr);
user_disable_single_step(current);
}
/*
* See if the instruction can be emulated.
* Returns true if instruction was emulated, false otherwise.
*/
bool arch_uprobe_skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs)
{
int ret;
/*
* emulate_step() returns 1 if the insn was successfully emulated.
* For all other cases, we need to single-step in hardware.
*/
ret = emulate_step(regs, ppc_inst_read(auprobe->insn));
if (ret > 0)
return true;
return false;
}
unsigned long
arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr, struct pt_regs *regs)
{
unsigned long orig_ret_vaddr;
orig_ret_vaddr = regs->link;
/* Replace the return addr with trampoline addr */
regs->link = trampoline_vaddr;
return orig_ret_vaddr;
}
bool arch_uretprobe_is_alive(struct return_instance *ret, enum rp_check ctx,
struct pt_regs *regs)
{
if (ctx == RP_CHECK_CHAIN_CALL)
return regs->gpr[1] <= ret->stack;
else
return regs->gpr[1] < ret->stack;
}
| linux-master | arch/powerpc/kernel/uprobes.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Procedures for creating, accessing and interpreting the device tree.
*
* Paul Mackerras August 1996.
* Copyright (C) 1996-2005 Paul Mackerras.
*
* Adapted for 64bit PowerPC by Dave Engebretsen and Peter Bergner.
* {engebret|bergner}@us.ibm.com
*/
#undef DEBUG
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/init.h>
#include <linux/threads.h>
#include <linux/spinlock.h>
#include <linux/types.h>
#include <linux/pci.h>
#include <linux/delay.h>
#include <linux/initrd.h>
#include <linux/bitops.h>
#include <linux/export.h>
#include <linux/kexec.h>
#include <linux/irq.h>
#include <linux/memblock.h>
#include <linux/of.h>
#include <linux/of_fdt.h>
#include <linux/libfdt.h>
#include <linux/cpu.h>
#include <linux/pgtable.h>
#include <linux/seq_buf.h>
#include <asm/rtas.h>
#include <asm/page.h>
#include <asm/processor.h>
#include <asm/irq.h>
#include <asm/io.h>
#include <asm/kdump.h>
#include <asm/smp.h>
#include <asm/mmu.h>
#include <asm/paca.h>
#include <asm/powernv.h>
#include <asm/iommu.h>
#include <asm/btext.h>
#include <asm/sections.h>
#include <asm/setup.h>
#include <asm/pci-bridge.h>
#include <asm/kexec.h>
#include <asm/opal.h>
#include <asm/fadump.h>
#include <asm/epapr_hcalls.h>
#include <asm/firmware.h>
#include <asm/dt_cpu_ftrs.h>
#include <asm/drmem.h>
#include <asm/ultravisor.h>
#include <asm/prom.h>
#include <asm/plpks.h>
#include <mm/mmu_decl.h>
#ifdef DEBUG
#define DBG(fmt...) printk(KERN_ERR fmt)
#else
#define DBG(fmt...)
#endif
int *chip_id_lookup_table;
#ifdef CONFIG_PPC64
int __initdata iommu_is_off;
int __initdata iommu_force_on;
unsigned long tce_alloc_start, tce_alloc_end;
u64 ppc64_rma_size;
unsigned int boot_cpu_node_count __ro_after_init;
#endif
static phys_addr_t first_memblock_size;
static int __initdata boot_cpu_count;
static int __init early_parse_mem(char *p)
{
if (!p)
return 1;
memory_limit = PAGE_ALIGN(memparse(p, &p));
DBG("memory limit = 0x%llx\n", memory_limit);
return 0;
}
early_param("mem", early_parse_mem);
/*
* overlaps_initrd - check for overlap with page aligned extension of
* initrd.
*/
static inline int overlaps_initrd(unsigned long start, unsigned long size)
{
#ifdef CONFIG_BLK_DEV_INITRD
if (!initrd_start)
return 0;
return (start + size) > ALIGN_DOWN(initrd_start, PAGE_SIZE) &&
start <= ALIGN(initrd_end, PAGE_SIZE);
#else
return 0;
#endif
}
/**
* move_device_tree - move tree to an unused area, if needed.
*
* The device tree may be allocated beyond our memory limit, or inside the
* crash kernel region for kdump, or within the page aligned range of initrd.
* If so, move it out of the way.
*/
static void __init move_device_tree(void)
{
unsigned long start, size;
void *p;
DBG("-> move_device_tree\n");
start = __pa(initial_boot_params);
size = fdt_totalsize(initial_boot_params);
if ((memory_limit && (start + size) > PHYSICAL_START + memory_limit) ||
!memblock_is_memory(start + size - 1) ||
overlaps_crashkernel(start, size) || overlaps_initrd(start, size)) {
p = memblock_alloc_raw(size, PAGE_SIZE);
if (!p)
panic("Failed to allocate %lu bytes to move device tree\n",
size);
memcpy(p, initial_boot_params, size);
initial_boot_params = p;
DBG("Moved device tree to 0x%px\n", p);
}
DBG("<- move_device_tree\n");
}
/*
* ibm,pa/pi-features is a per-cpu property that contains a string of
* attribute descriptors, each of which has a 2 byte header plus up
* to 254 bytes worth of processor attribute bits. First header
* byte specifies the number of bytes following the header.
* Second header byte is an "attribute-specifier" type, of which
* zero is the only currently-defined value.
* Implementation: Pass in the byte and bit offset for the feature
* that we are interested in. The function will return -1 if the
* pa-features property is missing, or a 1/0 to indicate if the feature
* is supported/not supported. Note that the bit numbers are
* big-endian to match the definition in PAPR.
*/
struct ibm_feature {
unsigned long cpu_features; /* CPU_FTR_xxx bit */
unsigned long mmu_features; /* MMU_FTR_xxx bit */
unsigned int cpu_user_ftrs; /* PPC_FEATURE_xxx bit */
unsigned int cpu_user_ftrs2; /* PPC_FEATURE2_xxx bit */
unsigned char pabyte; /* byte number in ibm,pa/pi-features */
unsigned char pabit; /* bit number (big-endian) */
unsigned char invert; /* if 1, pa bit set => clear feature */
};
static struct ibm_feature ibm_pa_features[] __initdata = {
{ .pabyte = 0, .pabit = 0, .cpu_user_ftrs = PPC_FEATURE_HAS_MMU },
{ .pabyte = 0, .pabit = 1, .cpu_user_ftrs = PPC_FEATURE_HAS_FPU },
{ .pabyte = 0, .pabit = 3, .cpu_features = CPU_FTR_CTRL },
{ .pabyte = 0, .pabit = 6, .cpu_features = CPU_FTR_NOEXECUTE },
{ .pabyte = 1, .pabit = 2, .mmu_features = MMU_FTR_CI_LARGE_PAGE },
#ifdef CONFIG_PPC_RADIX_MMU
{ .pabyte = 40, .pabit = 0, .mmu_features = MMU_FTR_TYPE_RADIX | MMU_FTR_GTSE },
#endif
{ .pabyte = 5, .pabit = 0, .cpu_features = CPU_FTR_REAL_LE,
.cpu_user_ftrs = PPC_FEATURE_TRUE_LE },
/*
* If the kernel doesn't support TM (ie CONFIG_PPC_TRANSACTIONAL_MEM=n),
* we don't want to turn on TM here, so we use the *_COMP versions
* which are 0 if the kernel doesn't support TM.
*/
{ .pabyte = 22, .pabit = 0, .cpu_features = CPU_FTR_TM_COMP,
.cpu_user_ftrs2 = PPC_FEATURE2_HTM_COMP | PPC_FEATURE2_HTM_NOSC_COMP },
{ .pabyte = 64, .pabit = 0, .cpu_features = CPU_FTR_DAWR1 },
{ .pabyte = 68, .pabit = 5, .cpu_features = CPU_FTR_DEXCR_NPHIE },
};
/*
* ibm,pi-features property provides the support of processor specific
* options not described in ibm,pa-features. Right now use byte 0, bit 3
* which indicates the occurrence of DSI interrupt when the paste operation
* on the suspended NX window.
*/
static struct ibm_feature ibm_pi_features[] __initdata = {
{ .pabyte = 0, .pabit = 3, .mmu_features = MMU_FTR_NX_DSI },
};
static void __init scan_features(unsigned long node, const unsigned char *ftrs,
unsigned long tablelen,
struct ibm_feature *fp,
unsigned long ft_size)
{
unsigned long i, len, bit;
/* find descriptor with type == 0 */
for (;;) {
if (tablelen < 3)
return;
len = 2 + ftrs[0];
if (tablelen < len)
return; /* descriptor 0 not found */
if (ftrs[1] == 0)
break;
tablelen -= len;
ftrs += len;
}
/* loop over bits we know about */
for (i = 0; i < ft_size; ++i, ++fp) {
if (fp->pabyte >= ftrs[0])
continue;
bit = (ftrs[2 + fp->pabyte] >> (7 - fp->pabit)) & 1;
if (bit ^ fp->invert) {
cur_cpu_spec->cpu_features |= fp->cpu_features;
cur_cpu_spec->cpu_user_features |= fp->cpu_user_ftrs;
cur_cpu_spec->cpu_user_features2 |= fp->cpu_user_ftrs2;
cur_cpu_spec->mmu_features |= fp->mmu_features;
} else {
cur_cpu_spec->cpu_features &= ~fp->cpu_features;
cur_cpu_spec->cpu_user_features &= ~fp->cpu_user_ftrs;
cur_cpu_spec->cpu_user_features2 &= ~fp->cpu_user_ftrs2;
cur_cpu_spec->mmu_features &= ~fp->mmu_features;
}
}
}
static void __init check_cpu_features(unsigned long node, char *name,
struct ibm_feature *fp,
unsigned long size)
{
const unsigned char *pa_ftrs;
int tablelen;
pa_ftrs = of_get_flat_dt_prop(node, name, &tablelen);
if (pa_ftrs == NULL)
return;
scan_features(node, pa_ftrs, tablelen, fp, size);
}
#ifdef CONFIG_PPC_64S_HASH_MMU
static void __init init_mmu_slb_size(unsigned long node)
{
const __be32 *slb_size_ptr;
slb_size_ptr = of_get_flat_dt_prop(node, "slb-size", NULL) ? :
of_get_flat_dt_prop(node, "ibm,slb-size", NULL);
if (slb_size_ptr)
mmu_slb_size = be32_to_cpup(slb_size_ptr);
}
#else
#define init_mmu_slb_size(node) do { } while(0)
#endif
static struct feature_property {
const char *name;
u32 min_value;
unsigned long cpu_feature;
unsigned long cpu_user_ftr;
} feature_properties[] __initdata = {
#ifdef CONFIG_ALTIVEC
{"altivec", 0, CPU_FTR_ALTIVEC, PPC_FEATURE_HAS_ALTIVEC},
{"ibm,vmx", 1, CPU_FTR_ALTIVEC, PPC_FEATURE_HAS_ALTIVEC},
#endif /* CONFIG_ALTIVEC */
#ifdef CONFIG_VSX
/* Yes, this _really_ is ibm,vmx == 2 to enable VSX */
{"ibm,vmx", 2, CPU_FTR_VSX, PPC_FEATURE_HAS_VSX},
#endif /* CONFIG_VSX */
#ifdef CONFIG_PPC64
{"ibm,dfp", 1, 0, PPC_FEATURE_HAS_DFP},
{"ibm,purr", 1, CPU_FTR_PURR, 0},
{"ibm,spurr", 1, CPU_FTR_SPURR, 0},
#endif /* CONFIG_PPC64 */
};
#if defined(CONFIG_44x) && defined(CONFIG_PPC_FPU)
static __init void identical_pvr_fixup(unsigned long node)
{
unsigned int pvr;
const char *model = of_get_flat_dt_prop(node, "model", NULL);
/*
* Since 440GR(x)/440EP(x) processors have the same pvr,
* we check the node path and set bit 28 in the cur_cpu_spec
* pvr for EP(x) processor version. This bit is always 0 in
* the "real" pvr. Then we call identify_cpu again with
* the new logical pvr to enable FPU support.
*/
if (model && strstr(model, "440EP")) {
pvr = cur_cpu_spec->pvr_value | 0x8;
identify_cpu(0, pvr);
DBG("Using logical pvr %x for %s\n", pvr, model);
}
}
#else
#define identical_pvr_fixup(node) do { } while(0)
#endif
static void __init check_cpu_feature_properties(unsigned long node)
{
int i;
struct feature_property *fp = feature_properties;
const __be32 *prop;
for (i = 0; i < (int)ARRAY_SIZE(feature_properties); ++i, ++fp) {
prop = of_get_flat_dt_prop(node, fp->name, NULL);
if (prop && be32_to_cpup(prop) >= fp->min_value) {
cur_cpu_spec->cpu_features |= fp->cpu_feature;
cur_cpu_spec->cpu_user_features |= fp->cpu_user_ftr;
}
}
}
static int __init early_init_dt_scan_cpus(unsigned long node,
const char *uname, int depth,
void *data)
{
const char *type = of_get_flat_dt_prop(node, "device_type", NULL);
const __be32 *prop;
const __be32 *intserv;
int i, nthreads;
int len;
int found = -1;
int found_thread = 0;
/* We are scanning "cpu" nodes only */
if (type == NULL || strcmp(type, "cpu") != 0)
return 0;
if (IS_ENABLED(CONFIG_PPC64))
boot_cpu_node_count++;
/* Get physical cpuid */
intserv = of_get_flat_dt_prop(node, "ibm,ppc-interrupt-server#s", &len);
if (!intserv)
intserv = of_get_flat_dt_prop(node, "reg", &len);
nthreads = len / sizeof(int);
/*
* Now see if any of these threads match our boot cpu.
* NOTE: This must match the parsing done in smp_setup_cpu_maps.
*/
for (i = 0; i < nthreads; i++) {
if (be32_to_cpu(intserv[i]) ==
fdt_boot_cpuid_phys(initial_boot_params)) {
found = boot_cpu_count;
found_thread = i;
}
#ifdef CONFIG_SMP
/* logical cpu id is always 0 on UP kernels */
boot_cpu_count++;
#endif
}
/* Not the boot CPU */
if (found < 0)
return 0;
DBG("boot cpu: logical %d physical %d\n", found,
be32_to_cpu(intserv[found_thread]));
boot_cpuid = found;
if (IS_ENABLED(CONFIG_PPC64))
boot_cpu_hwid = be32_to_cpu(intserv[found_thread]);
/*
* PAPR defines "logical" PVR values for cpus that
* meet various levels of the architecture:
* 0x0f000001 Architecture version 2.04
* 0x0f000002 Architecture version 2.05
* If the cpu-version property in the cpu node contains
* such a value, we call identify_cpu again with the
* logical PVR value in order to use the cpu feature
* bits appropriate for the architecture level.
*
* A POWER6 partition in "POWER6 architected" mode
* uses the 0x0f000002 PVR value; in POWER5+ mode
* it uses 0x0f000001.
*
* If we're using device tree CPU feature discovery then we don't
* support the cpu-version property, and it's the responsibility of the
* firmware/hypervisor to provide the correct feature set for the
* architecture level via the ibm,powerpc-cpu-features binding.
*/
if (!dt_cpu_ftrs_in_use()) {
prop = of_get_flat_dt_prop(node, "cpu-version", NULL);
if (prop && (be32_to_cpup(prop) & 0xff000000) == 0x0f000000) {
identify_cpu(0, be32_to_cpup(prop));
seq_buf_printf(&ppc_hw_desc, "0x%04x ", be32_to_cpup(prop));
}
check_cpu_feature_properties(node);
check_cpu_features(node, "ibm,pa-features", ibm_pa_features,
ARRAY_SIZE(ibm_pa_features));
check_cpu_features(node, "ibm,pi-features", ibm_pi_features,
ARRAY_SIZE(ibm_pi_features));
}
identical_pvr_fixup(node);
init_mmu_slb_size(node);
#ifdef CONFIG_PPC64
if (nthreads == 1)
cur_cpu_spec->cpu_features &= ~CPU_FTR_SMT;
else if (!dt_cpu_ftrs_in_use())
cur_cpu_spec->cpu_features |= CPU_FTR_SMT;
#endif
return 0;
}
static int __init early_init_dt_scan_chosen_ppc(unsigned long node,
const char *uname,
int depth, void *data)
{
const unsigned long *lprop; /* All these set by kernel, so no need to convert endian */
/* Use common scan routine to determine if this is the chosen node */
if (early_init_dt_scan_chosen(data) < 0)
return 0;
#ifdef CONFIG_PPC64
/* check if iommu is forced on or off */
if (of_get_flat_dt_prop(node, "linux,iommu-off", NULL) != NULL)
iommu_is_off = 1;
if (of_get_flat_dt_prop(node, "linux,iommu-force-on", NULL) != NULL)
iommu_force_on = 1;
#endif
/* mem=x on the command line is the preferred mechanism */
lprop = of_get_flat_dt_prop(node, "linux,memory-limit", NULL);
if (lprop)
memory_limit = *lprop;
#ifdef CONFIG_PPC64
lprop = of_get_flat_dt_prop(node, "linux,tce-alloc-start", NULL);
if (lprop)
tce_alloc_start = *lprop;
lprop = of_get_flat_dt_prop(node, "linux,tce-alloc-end", NULL);
if (lprop)
tce_alloc_end = *lprop;
#endif
#ifdef CONFIG_KEXEC_CORE
lprop = of_get_flat_dt_prop(node, "linux,crashkernel-base", NULL);
if (lprop)
crashk_res.start = *lprop;
lprop = of_get_flat_dt_prop(node, "linux,crashkernel-size", NULL);
if (lprop)
crashk_res.end = crashk_res.start + *lprop - 1;
#endif
/* break now */
return 1;
}
/*
* Compare the range against max mem limit and update
* size if it cross the limit.
*/
#ifdef CONFIG_SPARSEMEM
static bool __init validate_mem_limit(u64 base, u64 *size)
{
u64 max_mem = 1UL << (MAX_PHYSMEM_BITS);
if (base >= max_mem)
return false;
if ((base + *size) > max_mem)
*size = max_mem - base;
return true;
}
#else
static bool __init validate_mem_limit(u64 base, u64 *size)
{
return true;
}
#endif
#ifdef CONFIG_PPC_PSERIES
/*
* Interpret the ibm dynamic reconfiguration memory LMBs.
* This contains a list of memory blocks along with NUMA affinity
* information.
*/
static int __init early_init_drmem_lmb(struct drmem_lmb *lmb,
const __be32 **usm,
void *data)
{
u64 base, size;
int is_kexec_kdump = 0, rngs;
base = lmb->base_addr;
size = drmem_lmb_size();
rngs = 1;
/*
* Skip this block if the reserved bit is set in flags
* or if the block is not assigned to this partition.
*/
if ((lmb->flags & DRCONF_MEM_RESERVED) ||
!(lmb->flags & DRCONF_MEM_ASSIGNED))
return 0;
if (*usm)
is_kexec_kdump = 1;
if (is_kexec_kdump) {
/*
* For each memblock in ibm,dynamic-memory, a
* corresponding entry in linux,drconf-usable-memory
* property contains a counter 'p' followed by 'p'
* (base, size) duple. Now read the counter from
* linux,drconf-usable-memory property
*/
rngs = dt_mem_next_cell(dt_root_size_cells, usm);
if (!rngs) /* there are no (base, size) duple */
return 0;
}
do {
if (is_kexec_kdump) {
base = dt_mem_next_cell(dt_root_addr_cells, usm);
size = dt_mem_next_cell(dt_root_size_cells, usm);
}
if (iommu_is_off) {
if (base >= 0x80000000ul)
continue;
if ((base + size) > 0x80000000ul)
size = 0x80000000ul - base;
}
if (!validate_mem_limit(base, &size))
continue;
DBG("Adding: %llx -> %llx\n", base, size);
memblock_add(base, size);
if (lmb->flags & DRCONF_MEM_HOTREMOVABLE)
memblock_mark_hotplug(base, size);
} while (--rngs);
return 0;
}
#endif /* CONFIG_PPC_PSERIES */
static int __init early_init_dt_scan_memory_ppc(void)
{
#ifdef CONFIG_PPC_PSERIES
const void *fdt = initial_boot_params;
int node = fdt_path_offset(fdt, "/ibm,dynamic-reconfiguration-memory");
if (node > 0)
walk_drmem_lmbs_early(node, NULL, early_init_drmem_lmb);
#endif
return early_init_dt_scan_memory();
}
/*
* For a relocatable kernel, we need to get the memstart_addr first,
* then use it to calculate the virtual kernel start address. This has
* to happen at a very early stage (before machine_init). In this case,
* we just want to get the memstart_address and would not like to mess the
* memblock at this stage. So introduce a variable to skip the memblock_add()
* for this reason.
*/
#ifdef CONFIG_RELOCATABLE
static int add_mem_to_memblock = 1;
#else
#define add_mem_to_memblock 1
#endif
void __init early_init_dt_add_memory_arch(u64 base, u64 size)
{
#ifdef CONFIG_PPC64
if (iommu_is_off) {
if (base >= 0x80000000ul)
return;
if ((base + size) > 0x80000000ul)
size = 0x80000000ul - base;
}
#endif
/* Keep track of the beginning of memory -and- the size of
* the very first block in the device-tree as it represents
* the RMA on ppc64 server
*/
if (base < memstart_addr) {
memstart_addr = base;
first_memblock_size = size;
}
/* Add the chunk to the MEMBLOCK list */
if (add_mem_to_memblock) {
if (validate_mem_limit(base, &size))
memblock_add(base, size);
}
}
static void __init early_reserve_mem_dt(void)
{
unsigned long i, dt_root;
int len;
const __be32 *prop;
early_init_fdt_reserve_self();
early_init_fdt_scan_reserved_mem();
dt_root = of_get_flat_dt_root();
prop = of_get_flat_dt_prop(dt_root, "reserved-ranges", &len);
if (!prop)
return;
DBG("Found new-style reserved-ranges\n");
/* Each reserved range is an (address,size) pair, 2 cells each,
* totalling 4 cells per range. */
for (i = 0; i < len / (sizeof(*prop) * 4); i++) {
u64 base, size;
base = of_read_number(prop + (i * 4) + 0, 2);
size = of_read_number(prop + (i * 4) + 2, 2);
if (size) {
DBG("reserving: %llx -> %llx\n", base, size);
memblock_reserve(base, size);
}
}
}
static void __init early_reserve_mem(void)
{
__be64 *reserve_map;
reserve_map = (__be64 *)(((unsigned long)initial_boot_params) +
fdt_off_mem_rsvmap(initial_boot_params));
/* Look for the new "reserved-regions" property in the DT */
early_reserve_mem_dt();
#ifdef CONFIG_BLK_DEV_INITRD
/* Then reserve the initrd, if any */
if (initrd_start && (initrd_end > initrd_start)) {
memblock_reserve(ALIGN_DOWN(__pa(initrd_start), PAGE_SIZE),
ALIGN(initrd_end, PAGE_SIZE) -
ALIGN_DOWN(initrd_start, PAGE_SIZE));
}
#endif /* CONFIG_BLK_DEV_INITRD */
if (!IS_ENABLED(CONFIG_PPC32))
return;
/*
* Handle the case where we might be booting from an old kexec
* image that setup the mem_rsvmap as pairs of 32-bit values
*/
if (be64_to_cpup(reserve_map) > 0xffffffffull) {
u32 base_32, size_32;
__be32 *reserve_map_32 = (__be32 *)reserve_map;
DBG("Found old 32-bit reserve map\n");
while (1) {
base_32 = be32_to_cpup(reserve_map_32++);
size_32 = be32_to_cpup(reserve_map_32++);
if (size_32 == 0)
break;
DBG("reserving: %x -> %x\n", base_32, size_32);
memblock_reserve(base_32, size_32);
}
return;
}
}
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
static bool tm_disabled __initdata;
static int __init parse_ppc_tm(char *str)
{
bool res;
if (kstrtobool(str, &res))
return -EINVAL;
tm_disabled = !res;
return 0;
}
early_param("ppc_tm", parse_ppc_tm);
static void __init tm_init(void)
{
if (tm_disabled) {
pr_info("Disabling hardware transactional memory (HTM)\n");
cur_cpu_spec->cpu_user_features2 &=
~(PPC_FEATURE2_HTM_NOSC | PPC_FEATURE2_HTM);
cur_cpu_spec->cpu_features &= ~CPU_FTR_TM;
return;
}
pnv_tm_init();
}
#else
static void tm_init(void) { }
#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
static int __init
early_init_dt_scan_model(unsigned long node, const char *uname,
int depth, void *data)
{
const char *prop;
if (depth != 0)
return 0;
prop = of_get_flat_dt_prop(node, "model", NULL);
if (prop)
seq_buf_printf(&ppc_hw_desc, "%s ", prop);
/* break now */
return 1;
}
#ifdef CONFIG_PPC64
static void __init save_fscr_to_task(void)
{
/*
* Ensure the init_task (pid 0, aka swapper) uses the value of FSCR we
* have configured via the device tree features or via __init_FSCR().
* That value will then be propagated to pid 1 (init) and all future
* processes.
*/
if (early_cpu_has_feature(CPU_FTR_ARCH_207S))
init_task.thread.fscr = mfspr(SPRN_FSCR);
}
#else
static inline void save_fscr_to_task(void) {}
#endif
void __init early_init_devtree(void *params)
{
phys_addr_t limit;
DBG(" -> early_init_devtree(%px)\n", params);
/* Too early to BUG_ON(), do it by hand */
if (!early_init_dt_verify(params))
panic("BUG: Failed verifying flat device tree, bad version?");
of_scan_flat_dt(early_init_dt_scan_model, NULL);
#ifdef CONFIG_PPC_RTAS
/* Some machines might need RTAS info for debugging, grab it now. */
of_scan_flat_dt(early_init_dt_scan_rtas, NULL);
#endif
#ifdef CONFIG_PPC_POWERNV
/* Some machines might need OPAL info for debugging, grab it now. */
of_scan_flat_dt(early_init_dt_scan_opal, NULL);
/* Scan tree for ultravisor feature */
of_scan_flat_dt(early_init_dt_scan_ultravisor, NULL);
#endif
#if defined(CONFIG_FA_DUMP) || defined(CONFIG_PRESERVE_FA_DUMP)
/* scan tree to see if dump is active during last boot */
of_scan_flat_dt(early_init_dt_scan_fw_dump, NULL);
#endif
/* Retrieve various informations from the /chosen node of the
* device-tree, including the platform type, initrd location and
* size, TCE reserve, and more ...
*/
of_scan_flat_dt(early_init_dt_scan_chosen_ppc, boot_command_line);
/* Scan memory nodes and rebuild MEMBLOCKs */
early_init_dt_scan_root();
early_init_dt_scan_memory_ppc();
/*
* As generic code authors expect to be able to use static keys
* in early_param() handlers, we initialize the static keys just
* before parsing early params (it's fine to call jump_label_init()
* more than once).
*/
jump_label_init();
parse_early_param();
/* make sure we've parsed cmdline for mem= before this */
if (memory_limit)
first_memblock_size = min_t(u64, first_memblock_size, memory_limit);
setup_initial_memory_limit(memstart_addr, first_memblock_size);
/* Reserve MEMBLOCK regions used by kernel, initrd, dt, etc... */
memblock_reserve(PHYSICAL_START, __pa(_end) - PHYSICAL_START);
/* If relocatable, reserve first 32k for interrupt vectors etc. */
if (PHYSICAL_START > MEMORY_START)
memblock_reserve(MEMORY_START, 0x8000);
reserve_kdump_trampoline();
#if defined(CONFIG_FA_DUMP) || defined(CONFIG_PRESERVE_FA_DUMP)
/*
* If we fail to reserve memory for firmware-assisted dump then
* fallback to kexec based kdump.
*/
if (fadump_reserve_mem() == 0)
#endif
reserve_crashkernel();
early_reserve_mem();
/* Ensure that total memory size is page-aligned. */
limit = ALIGN(memory_limit ?: memblock_phys_mem_size(), PAGE_SIZE);
memblock_enforce_memory_limit(limit);
#if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_PPC_4K_PAGES)
if (!early_radix_enabled())
memblock_cap_memory_range(0, 1UL << (H_MAX_PHYSMEM_BITS));
#endif
memblock_allow_resize();
memblock_dump_all();
DBG("Phys. mem: %llx\n", (unsigned long long)memblock_phys_mem_size());
/* We may need to relocate the flat tree, do it now.
* FIXME .. and the initrd too? */
move_device_tree();
DBG("Scanning CPUs ...\n");
dt_cpu_ftrs_scan();
// We can now add the CPU name & PVR to the hardware description
seq_buf_printf(&ppc_hw_desc, "%s 0x%04lx ", cur_cpu_spec->cpu_name, mfspr(SPRN_PVR));
/* Retrieve CPU related informations from the flat tree
* (altivec support, boot CPU ID, ...)
*/
of_scan_flat_dt(early_init_dt_scan_cpus, NULL);
if (boot_cpuid < 0) {
printk("Failed to identify boot CPU !\n");
BUG();
}
save_fscr_to_task();
#if defined(CONFIG_SMP) && defined(CONFIG_PPC64)
/* We'll later wait for secondaries to check in; there are
* NCPUS-1 non-boot CPUs :-)
*/
spinning_secondaries = boot_cpu_count - 1;
#endif
mmu_early_init_devtree();
#ifdef CONFIG_PPC_POWERNV
/* Scan and build the list of machine check recoverable ranges */
of_scan_flat_dt(early_init_dt_scan_recoverable_ranges, NULL);
#endif
epapr_paravirt_early_init();
/* Now try to figure out if we are running on LPAR and so on */
pseries_probe_fw_features();
/*
* Initialize pkey features and default AMR/IAMR values
*/
pkey_early_init_devtree();
#ifdef CONFIG_PPC_PS3
/* Identify PS3 firmware */
if (of_flat_dt_is_compatible(of_get_flat_dt_root(), "sony,ps3"))
powerpc_firmware_features |= FW_FEATURE_PS3_POSSIBLE;
#endif
/* If kexec left a PLPKS password in the DT, get it and clear it */
plpks_early_init_devtree();
tm_init();
DBG(" <- early_init_devtree()\n");
}
#ifdef CONFIG_RELOCATABLE
/*
* This function run before early_init_devtree, so we have to init
* initial_boot_params.
*/
void __init early_get_first_memblock_info(void *params, phys_addr_t *size)
{
/* Setup flat device-tree pointer */
initial_boot_params = params;
/*
* Scan the memory nodes and set add_mem_to_memblock to 0 to avoid
* mess the memblock.
*/
add_mem_to_memblock = 0;
early_init_dt_scan_root();
early_init_dt_scan_memory_ppc();
add_mem_to_memblock = 1;
if (size)
*size = first_memblock_size;
}
#endif
/*******
*
* New implementation of the OF "find" APIs, return a refcounted
* object, call of_node_put() when done. The device tree and list
* are protected by a rw_lock.
*
* Note that property management will need some locking as well,
* this isn't dealt with yet.
*
*******/
/**
* of_get_ibm_chip_id - Returns the IBM "chip-id" of a device
* @np: device node of the device
*
* This looks for a property "ibm,chip-id" in the node or any
* of its parents and returns its content, or -1 if it cannot
* be found.
*/
int of_get_ibm_chip_id(struct device_node *np)
{
of_node_get(np);
while (np) {
u32 chip_id;
/*
* Skiboot may produce memory nodes that contain more than one
* cell in chip-id, we only read the first one here.
*/
if (!of_property_read_u32(np, "ibm,chip-id", &chip_id)) {
of_node_put(np);
return chip_id;
}
np = of_get_next_parent(np);
}
return -1;
}
EXPORT_SYMBOL(of_get_ibm_chip_id);
/**
* cpu_to_chip_id - Return the cpus chip-id
* @cpu: The logical cpu number.
*
* Return the value of the ibm,chip-id property corresponding to the given
* logical cpu number. If the chip-id can not be found, returns -1.
*/
int cpu_to_chip_id(int cpu)
{
struct device_node *np;
int ret = -1, idx;
idx = cpu / threads_per_core;
if (chip_id_lookup_table && chip_id_lookup_table[idx] != -1)
return chip_id_lookup_table[idx];
np = of_get_cpu_node(cpu, NULL);
if (np) {
ret = of_get_ibm_chip_id(np);
of_node_put(np);
if (chip_id_lookup_table)
chip_id_lookup_table[idx] = ret;
}
return ret;
}
EXPORT_SYMBOL(cpu_to_chip_id);
bool arch_match_cpu_phys_id(int cpu, u64 phys_id)
{
#ifdef CONFIG_SMP
/*
* Early firmware scanning must use this rather than
* get_hard_smp_processor_id because we don't have pacas allocated
* until memory topology is discovered.
*/
if (cpu_to_phys_id != NULL)
return (int)phys_id == cpu_to_phys_id[cpu];
#endif
return (int)phys_id == get_hard_smp_processor_id(cpu);
}
| linux-master | arch/powerpc/kernel/prom.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* c 2001 PPC 64 Team, IBM Corp
*
* /proc/powerpc/rtas/firmware_flash interface
*
* This file implements a firmware_flash interface to pump a firmware
* image into the kernel. At reboot time rtas_restart() will see the
* firmware image and flash it as it reboots (see rtas.c).
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/proc_fs.h>
#include <linux/reboot.h>
#include <asm/delay.h>
#include <linux/uaccess.h>
#include <asm/rtas.h>
#define MODULE_VERS "1.0"
#define MODULE_NAME "rtas_flash"
#define FIRMWARE_FLASH_NAME "firmware_flash"
#define FIRMWARE_UPDATE_NAME "firmware_update"
#define MANAGE_FLASH_NAME "manage_flash"
#define VALIDATE_FLASH_NAME "validate_flash"
/* General RTAS Status Codes */
#define RTAS_RC_SUCCESS 0
#define RTAS_RC_HW_ERR -1
#define RTAS_RC_BUSY -2
/* Flash image status values */
#define FLASH_AUTH -9002 /* RTAS Not Service Authority Partition */
#define FLASH_NO_OP -1099 /* No operation initiated by user */
#define FLASH_IMG_SHORT -1005 /* Flash image shorter than expected */
#define FLASH_IMG_BAD_LEN -1004 /* Bad length value in flash list block */
#define FLASH_IMG_NULL_DATA -1003 /* Bad data value in flash list block */
#define FLASH_IMG_READY 0 /* Firmware img ready for flash on reboot */
/* Manage image status values */
#define MANAGE_AUTH -9002 /* RTAS Not Service Authority Partition */
#define MANAGE_ACTIVE_ERR -9001 /* RTAS Cannot Overwrite Active Img */
#define MANAGE_NO_OP -1099 /* No operation initiated by user */
#define MANAGE_PARAM_ERR -3 /* RTAS Parameter Error */
#define MANAGE_HW_ERR -1 /* RTAS Hardware Error */
/* Validate image status values */
#define VALIDATE_AUTH -9002 /* RTAS Not Service Authority Partition */
#define VALIDATE_NO_OP -1099 /* No operation initiated by the user */
#define VALIDATE_INCOMPLETE -1002 /* User copied < VALIDATE_BUF_SIZE */
#define VALIDATE_READY -1001 /* Firmware image ready for validation */
#define VALIDATE_PARAM_ERR -3 /* RTAS Parameter Error */
#define VALIDATE_HW_ERR -1 /* RTAS Hardware Error */
/* ibm,validate-flash-image update result tokens */
#define VALIDATE_TMP_UPDATE 0 /* T side will be updated */
#define VALIDATE_FLASH_AUTH 1 /* Partition does not have authority */
#define VALIDATE_INVALID_IMG 2 /* Candidate image is not valid */
#define VALIDATE_CUR_UNKNOWN 3 /* Current fixpack level is unknown */
/*
* Current T side will be committed to P side before being replace with new
* image, and the new image is downlevel from current image
*/
#define VALIDATE_TMP_COMMIT_DL 4
/*
* Current T side will be committed to P side before being replaced with new
* image
*/
#define VALIDATE_TMP_COMMIT 5
/*
* T side will be updated with a downlevel image
*/
#define VALIDATE_TMP_UPDATE_DL 6
/*
* The candidate image's release date is later than the system's firmware
* service entitlement date - service warranty period has expired
*/
#define VALIDATE_OUT_OF_WRNTY 7
/* ibm,manage-flash-image operation tokens */
#define RTAS_REJECT_TMP_IMG 0
#define RTAS_COMMIT_TMP_IMG 1
/* Array sizes */
#define VALIDATE_BUF_SIZE 4096
#define VALIDATE_MSG_LEN 256
#define RTAS_MSG_MAXLEN 64
/* Quirk - RTAS requires 4k list length and block size */
#define RTAS_BLKLIST_LENGTH 4096
#define RTAS_BLK_SIZE 4096
struct flash_block {
char *data;
unsigned long length;
};
/* This struct is very similar but not identical to
* that needed by the rtas flash update.
* All we need to do for rtas is rewrite num_blocks
* into a version/length and translate the pointers
* to absolute.
*/
#define FLASH_BLOCKS_PER_NODE ((RTAS_BLKLIST_LENGTH - 16) / sizeof(struct flash_block))
struct flash_block_list {
unsigned long num_blocks;
struct flash_block_list *next;
struct flash_block blocks[FLASH_BLOCKS_PER_NODE];
};
static struct flash_block_list *rtas_firmware_flash_list;
/* Use slab cache to guarantee 4k alignment */
static struct kmem_cache *flash_block_cache = NULL;
#define FLASH_BLOCK_LIST_VERSION (1UL)
/*
* Local copy of the flash block list.
*
* The rtas_firmware_flash_list variable will be
* set once the data is fully read.
*
* For convenience as we build the list we use virtual addrs,
* we do not fill in the version number, and the length field
* is treated as the number of entries currently in the block
* (i.e. not a byte count). This is all fixed when calling
* the flash routine.
*/
/* Status int must be first member of struct */
struct rtas_update_flash_t
{
int status; /* Flash update status */
struct flash_block_list *flist; /* Local copy of flash block list */
};
/* Status int must be first member of struct */
struct rtas_manage_flash_t
{
int status; /* Returned status */
};
/* Status int must be first member of struct */
struct rtas_validate_flash_t
{
int status; /* Returned status */
char *buf; /* Candidate image buffer */
unsigned int buf_size; /* Size of image buf */
unsigned int update_results; /* Update results token */
};
static struct rtas_update_flash_t rtas_update_flash_data;
static struct rtas_manage_flash_t rtas_manage_flash_data;
static struct rtas_validate_flash_t rtas_validate_flash_data;
static DEFINE_MUTEX(rtas_update_flash_mutex);
static DEFINE_MUTEX(rtas_manage_flash_mutex);
static DEFINE_MUTEX(rtas_validate_flash_mutex);
/* Do simple sanity checks on the flash image. */
static int flash_list_valid(struct flash_block_list *flist)
{
struct flash_block_list *f;
int i;
unsigned long block_size, image_size;
/* Paranoid self test here. We also collect the image size. */
image_size = 0;
for (f = flist; f; f = f->next) {
for (i = 0; i < f->num_blocks; i++) {
if (f->blocks[i].data == NULL) {
return FLASH_IMG_NULL_DATA;
}
block_size = f->blocks[i].length;
if (block_size <= 0 || block_size > RTAS_BLK_SIZE) {
return FLASH_IMG_BAD_LEN;
}
image_size += block_size;
}
}
if (image_size < (256 << 10)) {
if (image_size < 2)
return FLASH_NO_OP;
}
printk(KERN_INFO "FLASH: flash image with %ld bytes stored for hardware flash on reboot\n", image_size);
return FLASH_IMG_READY;
}
static void free_flash_list(struct flash_block_list *f)
{
struct flash_block_list *next;
int i;
while (f) {
for (i = 0; i < f->num_blocks; i++)
kmem_cache_free(flash_block_cache, f->blocks[i].data);
next = f->next;
kmem_cache_free(flash_block_cache, f);
f = next;
}
}
static int rtas_flash_release(struct inode *inode, struct file *file)
{
struct rtas_update_flash_t *const uf = &rtas_update_flash_data;
mutex_lock(&rtas_update_flash_mutex);
if (uf->flist) {
/* File was opened in write mode for a new flash attempt */
/* Clear saved list */
if (rtas_firmware_flash_list) {
free_flash_list(rtas_firmware_flash_list);
rtas_firmware_flash_list = NULL;
}
if (uf->status != FLASH_AUTH)
uf->status = flash_list_valid(uf->flist);
if (uf->status == FLASH_IMG_READY)
rtas_firmware_flash_list = uf->flist;
else
free_flash_list(uf->flist);
uf->flist = NULL;
}
mutex_unlock(&rtas_update_flash_mutex);
return 0;
}
static size_t get_flash_status_msg(int status, char *buf)
{
const char *msg;
size_t len;
switch (status) {
case FLASH_AUTH:
msg = "error: this partition does not have service authority\n";
break;
case FLASH_NO_OP:
msg = "info: no firmware image for flash\n";
break;
case FLASH_IMG_SHORT:
msg = "error: flash image short\n";
break;
case FLASH_IMG_BAD_LEN:
msg = "error: internal error bad length\n";
break;
case FLASH_IMG_NULL_DATA:
msg = "error: internal error null data\n";
break;
case FLASH_IMG_READY:
msg = "ready: firmware image ready for flash on reboot\n";
break;
default:
return sprintf(buf, "error: unexpected status value %d\n",
status);
}
len = strlen(msg);
memcpy(buf, msg, len + 1);
return len;
}
/* Reading the proc file will show status (not the firmware contents) */
static ssize_t rtas_flash_read_msg(struct file *file, char __user *buf,
size_t count, loff_t *ppos)
{
struct rtas_update_flash_t *const uf = &rtas_update_flash_data;
char msg[RTAS_MSG_MAXLEN];
size_t len;
int status;
mutex_lock(&rtas_update_flash_mutex);
status = uf->status;
mutex_unlock(&rtas_update_flash_mutex);
/* Read as text message */
len = get_flash_status_msg(status, msg);
return simple_read_from_buffer(buf, count, ppos, msg, len);
}
static ssize_t rtas_flash_read_num(struct file *file, char __user *buf,
size_t count, loff_t *ppos)
{
struct rtas_update_flash_t *const uf = &rtas_update_flash_data;
char msg[RTAS_MSG_MAXLEN];
int status;
mutex_lock(&rtas_update_flash_mutex);
status = uf->status;
mutex_unlock(&rtas_update_flash_mutex);
/* Read as number */
sprintf(msg, "%d\n", status);
return simple_read_from_buffer(buf, count, ppos, msg, strlen(msg));
}
/* We could be much more efficient here. But to keep this function
* simple we allocate a page to the block list no matter how small the
* count is. If the system is low on memory it will be just as well
* that we fail....
*/
static ssize_t rtas_flash_write(struct file *file, const char __user *buffer,
size_t count, loff_t *off)
{
struct rtas_update_flash_t *const uf = &rtas_update_flash_data;
char *p;
int next_free, rc;
struct flash_block_list *fl;
mutex_lock(&rtas_update_flash_mutex);
if (uf->status == FLASH_AUTH || count == 0)
goto out; /* discard data */
/* In the case that the image is not ready for flashing, the memory
* allocated for the block list will be freed upon the release of the
* proc file
*/
if (uf->flist == NULL) {
uf->flist = kmem_cache_zalloc(flash_block_cache, GFP_KERNEL);
if (!uf->flist)
goto nomem;
}
fl = uf->flist;
while (fl->next)
fl = fl->next; /* seek to last block_list for append */
next_free = fl->num_blocks;
if (next_free == FLASH_BLOCKS_PER_NODE) {
/* Need to allocate another block_list */
fl->next = kmem_cache_zalloc(flash_block_cache, GFP_KERNEL);
if (!fl->next)
goto nomem;
fl = fl->next;
next_free = 0;
}
if (count > RTAS_BLK_SIZE)
count = RTAS_BLK_SIZE;
p = kmem_cache_zalloc(flash_block_cache, GFP_KERNEL);
if (!p)
goto nomem;
if(copy_from_user(p, buffer, count)) {
kmem_cache_free(flash_block_cache, p);
rc = -EFAULT;
goto error;
}
fl->blocks[next_free].data = p;
fl->blocks[next_free].length = count;
fl->num_blocks++;
out:
mutex_unlock(&rtas_update_flash_mutex);
return count;
nomem:
rc = -ENOMEM;
error:
mutex_unlock(&rtas_update_flash_mutex);
return rc;
}
/*
* Flash management routines.
*/
static void manage_flash(struct rtas_manage_flash_t *args_buf, unsigned int op)
{
s32 rc;
do {
rc = rtas_call(rtas_function_token(RTAS_FN_IBM_MANAGE_FLASH_IMAGE), 1, 1,
NULL, op);
} while (rtas_busy_delay(rc));
args_buf->status = rc;
}
static ssize_t manage_flash_read(struct file *file, char __user *buf,
size_t count, loff_t *ppos)
{
struct rtas_manage_flash_t *const args_buf = &rtas_manage_flash_data;
char msg[RTAS_MSG_MAXLEN];
int msglen, status;
mutex_lock(&rtas_manage_flash_mutex);
status = args_buf->status;
mutex_unlock(&rtas_manage_flash_mutex);
msglen = sprintf(msg, "%d\n", status);
return simple_read_from_buffer(buf, count, ppos, msg, msglen);
}
static ssize_t manage_flash_write(struct file *file, const char __user *buf,
size_t count, loff_t *off)
{
struct rtas_manage_flash_t *const args_buf = &rtas_manage_flash_data;
static const char reject_str[] = "0";
static const char commit_str[] = "1";
char stkbuf[10];
int op, rc;
mutex_lock(&rtas_manage_flash_mutex);
if ((args_buf->status == MANAGE_AUTH) || (count == 0))
goto out;
op = -1;
if (buf) {
if (count > 9) count = 9;
rc = -EFAULT;
if (copy_from_user (stkbuf, buf, count))
goto error;
if (strncmp(stkbuf, reject_str, strlen(reject_str)) == 0)
op = RTAS_REJECT_TMP_IMG;
else if (strncmp(stkbuf, commit_str, strlen(commit_str)) == 0)
op = RTAS_COMMIT_TMP_IMG;
}
if (op == -1) { /* buf is empty, or contains invalid string */
rc = -EINVAL;
goto error;
}
manage_flash(args_buf, op);
out:
mutex_unlock(&rtas_manage_flash_mutex);
return count;
error:
mutex_unlock(&rtas_manage_flash_mutex);
return rc;
}
/*
* Validation routines.
*/
static void validate_flash(struct rtas_validate_flash_t *args_buf)
{
int token = rtas_function_token(RTAS_FN_IBM_VALIDATE_FLASH_IMAGE);
int update_results;
s32 rc;
rc = 0;
do {
spin_lock(&rtas_data_buf_lock);
memcpy(rtas_data_buf, args_buf->buf, VALIDATE_BUF_SIZE);
rc = rtas_call(token, 2, 2, &update_results,
(u32) __pa(rtas_data_buf), args_buf->buf_size);
memcpy(args_buf->buf, rtas_data_buf, VALIDATE_BUF_SIZE);
spin_unlock(&rtas_data_buf_lock);
} while (rtas_busy_delay(rc));
args_buf->status = rc;
args_buf->update_results = update_results;
}
static int get_validate_flash_msg(struct rtas_validate_flash_t *args_buf,
char *msg, int msglen)
{
int n;
if (args_buf->status >= VALIDATE_TMP_UPDATE) {
n = sprintf(msg, "%d\n", args_buf->update_results);
if ((args_buf->update_results >= VALIDATE_CUR_UNKNOWN) ||
(args_buf->update_results == VALIDATE_TMP_UPDATE))
n += snprintf(msg + n, msglen - n, "%s\n",
args_buf->buf);
} else {
n = sprintf(msg, "%d\n", args_buf->status);
}
return n;
}
static ssize_t validate_flash_read(struct file *file, char __user *buf,
size_t count, loff_t *ppos)
{
struct rtas_validate_flash_t *const args_buf =
&rtas_validate_flash_data;
char msg[VALIDATE_MSG_LEN];
int msglen;
mutex_lock(&rtas_validate_flash_mutex);
msglen = get_validate_flash_msg(args_buf, msg, VALIDATE_MSG_LEN);
mutex_unlock(&rtas_validate_flash_mutex);
return simple_read_from_buffer(buf, count, ppos, msg, msglen);
}
static ssize_t validate_flash_write(struct file *file, const char __user *buf,
size_t count, loff_t *off)
{
struct rtas_validate_flash_t *const args_buf =
&rtas_validate_flash_data;
int rc;
mutex_lock(&rtas_validate_flash_mutex);
/* We are only interested in the first 4K of the
* candidate image */
if ((*off >= VALIDATE_BUF_SIZE) ||
(args_buf->status == VALIDATE_AUTH)) {
*off += count;
mutex_unlock(&rtas_validate_flash_mutex);
return count;
}
if (*off + count >= VALIDATE_BUF_SIZE) {
count = VALIDATE_BUF_SIZE - *off;
args_buf->status = VALIDATE_READY;
} else {
args_buf->status = VALIDATE_INCOMPLETE;
}
if (!access_ok(buf, count)) {
rc = -EFAULT;
goto done;
}
if (copy_from_user(args_buf->buf + *off, buf, count)) {
rc = -EFAULT;
goto done;
}
*off += count;
rc = count;
done:
mutex_unlock(&rtas_validate_flash_mutex);
return rc;
}
static int validate_flash_release(struct inode *inode, struct file *file)
{
struct rtas_validate_flash_t *const args_buf =
&rtas_validate_flash_data;
mutex_lock(&rtas_validate_flash_mutex);
if (args_buf->status == VALIDATE_READY) {
args_buf->buf_size = VALIDATE_BUF_SIZE;
validate_flash(args_buf);
}
mutex_unlock(&rtas_validate_flash_mutex);
return 0;
}
/*
* On-reboot flash update applicator.
*/
static void rtas_flash_firmware(int reboot_type)
{
unsigned long image_size;
struct flash_block_list *f, *next, *flist;
unsigned long rtas_block_list;
int i, status, update_token;
if (rtas_firmware_flash_list == NULL)
return; /* nothing to do */
if (reboot_type != SYS_RESTART) {
printk(KERN_ALERT "FLASH: firmware flash requires a reboot\n");
printk(KERN_ALERT "FLASH: the firmware image will NOT be flashed\n");
return;
}
update_token = rtas_function_token(RTAS_FN_IBM_UPDATE_FLASH_64_AND_REBOOT);
if (update_token == RTAS_UNKNOWN_SERVICE) {
printk(KERN_ALERT "FLASH: ibm,update-flash-64-and-reboot "
"is not available -- not a service partition?\n");
printk(KERN_ALERT "FLASH: firmware will not be flashed\n");
return;
}
/*
* Just before starting the firmware flash, cancel the event scan work
* to avoid any soft lockup issues.
*/
rtas_cancel_event_scan();
/*
* NOTE: the "first" block must be under 4GB, so we create
* an entry with no data blocks in the reserved buffer in
* the kernel data segment.
*/
spin_lock(&rtas_data_buf_lock);
flist = (struct flash_block_list *)&rtas_data_buf[0];
flist->num_blocks = 0;
flist->next = rtas_firmware_flash_list;
rtas_block_list = __pa(flist);
if (rtas_block_list >= 4UL*1024*1024*1024) {
printk(KERN_ALERT "FLASH: kernel bug...flash list header addr above 4GB\n");
spin_unlock(&rtas_data_buf_lock);
return;
}
printk(KERN_ALERT "FLASH: preparing saved firmware image for flash\n");
/* Update the block_list in place. */
rtas_firmware_flash_list = NULL; /* too hard to backout on error */
image_size = 0;
for (f = flist; f; f = next) {
/* Translate data addrs to absolute */
for (i = 0; i < f->num_blocks; i++) {
f->blocks[i].data = (char *)cpu_to_be64(__pa(f->blocks[i].data));
image_size += f->blocks[i].length;
f->blocks[i].length = cpu_to_be64(f->blocks[i].length);
}
next = f->next;
/* Don't translate NULL pointer for last entry */
if (f->next)
f->next = (struct flash_block_list *)cpu_to_be64(__pa(f->next));
else
f->next = NULL;
/* make num_blocks into the version/length field */
f->num_blocks = (FLASH_BLOCK_LIST_VERSION << 56) | ((f->num_blocks+1)*16);
f->num_blocks = cpu_to_be64(f->num_blocks);
}
printk(KERN_ALERT "FLASH: flash image is %ld bytes\n", image_size);
printk(KERN_ALERT "FLASH: performing flash and reboot\n");
rtas_progress("Flashing \n", 0x0);
rtas_progress("Please Wait... ", 0x0);
printk(KERN_ALERT "FLASH: this will take several minutes. Do not power off!\n");
status = rtas_call(update_token, 1, 1, NULL, rtas_block_list);
switch (status) { /* should only get "bad" status */
case 0:
printk(KERN_ALERT "FLASH: success\n");
break;
case -1:
printk(KERN_ALERT "FLASH: hardware error. Firmware may not be not flashed\n");
break;
case -3:
printk(KERN_ALERT "FLASH: image is corrupt or not correct for this platform. Firmware not flashed\n");
break;
case -4:
printk(KERN_ALERT "FLASH: flash failed when partially complete. System may not reboot\n");
break;
default:
printk(KERN_ALERT "FLASH: unknown flash return code %d\n", status);
break;
}
spin_unlock(&rtas_data_buf_lock);
}
/*
* Manifest of proc files to create
*/
struct rtas_flash_file {
const char *filename;
const rtas_fn_handle_t handle;
int *status;
const struct proc_ops ops;
};
static const struct rtas_flash_file rtas_flash_files[] = {
{
.filename = "powerpc/rtas/" FIRMWARE_FLASH_NAME,
.handle = RTAS_FN_IBM_UPDATE_FLASH_64_AND_REBOOT,
.status = &rtas_update_flash_data.status,
.ops.proc_read = rtas_flash_read_msg,
.ops.proc_write = rtas_flash_write,
.ops.proc_release = rtas_flash_release,
.ops.proc_lseek = default_llseek,
},
{
.filename = "powerpc/rtas/" FIRMWARE_UPDATE_NAME,
.handle = RTAS_FN_IBM_UPDATE_FLASH_64_AND_REBOOT,
.status = &rtas_update_flash_data.status,
.ops.proc_read = rtas_flash_read_num,
.ops.proc_write = rtas_flash_write,
.ops.proc_release = rtas_flash_release,
.ops.proc_lseek = default_llseek,
},
{
.filename = "powerpc/rtas/" VALIDATE_FLASH_NAME,
.handle = RTAS_FN_IBM_VALIDATE_FLASH_IMAGE,
.status = &rtas_validate_flash_data.status,
.ops.proc_read = validate_flash_read,
.ops.proc_write = validate_flash_write,
.ops.proc_release = validate_flash_release,
.ops.proc_lseek = default_llseek,
},
{
.filename = "powerpc/rtas/" MANAGE_FLASH_NAME,
.handle = RTAS_FN_IBM_MANAGE_FLASH_IMAGE,
.status = &rtas_manage_flash_data.status,
.ops.proc_read = manage_flash_read,
.ops.proc_write = manage_flash_write,
.ops.proc_lseek = default_llseek,
}
};
static int __init rtas_flash_init(void)
{
int i;
if (rtas_function_token(RTAS_FN_IBM_UPDATE_FLASH_64_AND_REBOOT) == RTAS_UNKNOWN_SERVICE) {
pr_info("rtas_flash: no firmware flash support\n");
return -EINVAL;
}
rtas_validate_flash_data.buf = kzalloc(VALIDATE_BUF_SIZE, GFP_KERNEL);
if (!rtas_validate_flash_data.buf)
return -ENOMEM;
flash_block_cache = kmem_cache_create_usercopy("rtas_flash_cache",
RTAS_BLK_SIZE, RTAS_BLK_SIZE,
0, 0, RTAS_BLK_SIZE, NULL);
if (!flash_block_cache) {
printk(KERN_ERR "%s: failed to create block cache\n",
__func__);
goto enomem_buf;
}
for (i = 0; i < ARRAY_SIZE(rtas_flash_files); i++) {
const struct rtas_flash_file *f = &rtas_flash_files[i];
int token;
if (!proc_create(f->filename, 0600, NULL, &f->ops))
goto enomem;
/*
* This code assumes that the status int is the first member of the
* struct
*/
token = rtas_function_token(f->handle);
if (token == RTAS_UNKNOWN_SERVICE)
*f->status = FLASH_AUTH;
else
*f->status = FLASH_NO_OP;
}
rtas_flash_term_hook = rtas_flash_firmware;
return 0;
enomem:
while (--i >= 0) {
const struct rtas_flash_file *f = &rtas_flash_files[i];
remove_proc_entry(f->filename, NULL);
}
kmem_cache_destroy(flash_block_cache);
enomem_buf:
kfree(rtas_validate_flash_data.buf);
return -ENOMEM;
}
static void __exit rtas_flash_cleanup(void)
{
int i;
rtas_flash_term_hook = NULL;
if (rtas_firmware_flash_list) {
free_flash_list(rtas_firmware_flash_list);
rtas_firmware_flash_list = NULL;
}
for (i = 0; i < ARRAY_SIZE(rtas_flash_files); i++) {
const struct rtas_flash_file *f = &rtas_flash_files[i];
remove_proc_entry(f->filename, NULL);
}
kmem_cache_destroy(flash_block_cache);
kfree(rtas_validate_flash_data.buf);
}
module_init(rtas_flash_init);
module_exit(rtas_flash_cleanup);
MODULE_LICENSE("GPL");
| linux-master | arch/powerpc/kernel/rtas_flash.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* polling mode stateless debugging stuff, originally for NS16550 Serial Ports
*
* c 2001 PPC 64 Team, IBM Corp
*/
#include <linux/stdarg.h>
#include <linux/types.h>
#include <linux/sched.h>
#include <linux/console.h>
#include <linux/init.h>
#include <asm/processor.h>
#include <asm/udbg.h>
void (*udbg_putc)(char c);
void (*udbg_flush)(void);
int (*udbg_getc)(void);
int (*udbg_getc_poll)(void);
/*
* Early debugging facilities. You can enable _one_ of these via .config,
* if you do so your kernel _will not boot_ on anything else. Be careful.
*/
void __init udbg_early_init(void)
{
#if defined(CONFIG_PPC_EARLY_DEBUG_LPAR)
/* For LPAR machines that have an HVC console on vterm 0 */
udbg_init_debug_lpar();
#elif defined(CONFIG_PPC_EARLY_DEBUG_LPAR_HVSI)
/* For LPAR machines that have an HVSI console on vterm 0 */
udbg_init_debug_lpar_hvsi();
#elif defined(CONFIG_PPC_EARLY_DEBUG_G5)
/* For use on Apple G5 machines */
udbg_init_pmac_realmode();
#elif defined(CONFIG_PPC_EARLY_DEBUG_RTAS_PANEL)
/* RTAS panel debug */
udbg_init_rtas_panel();
#elif defined(CONFIG_PPC_EARLY_DEBUG_RTAS_CONSOLE)
/* RTAS console debug */
udbg_init_rtas_console();
#elif defined(CONFIG_PPC_EARLY_DEBUG_MAPLE)
/* Maple real mode debug */
udbg_init_maple_realmode();
#elif defined(CONFIG_PPC_EARLY_DEBUG_PAS_REALMODE)
udbg_init_pas_realmode();
#elif defined(CONFIG_PPC_EARLY_DEBUG_BOOTX)
udbg_init_btext();
#elif defined(CONFIG_PPC_EARLY_DEBUG_44x)
/* PPC44x debug */
udbg_init_44x_as1();
#elif defined(CONFIG_PPC_EARLY_DEBUG_40x)
/* PPC40x debug */
udbg_init_40x_realmode();
#elif defined(CONFIG_PPC_EARLY_DEBUG_CPM)
udbg_init_cpm();
#elif defined(CONFIG_PPC_EARLY_DEBUG_USBGECKO)
udbg_init_usbgecko();
#elif defined(CONFIG_PPC_EARLY_DEBUG_MEMCONS)
/* In memory console */
udbg_init_memcons();
#elif defined(CONFIG_PPC_EARLY_DEBUG_EHV_BC)
udbg_init_ehv_bc();
#elif defined(CONFIG_PPC_EARLY_DEBUG_PS3GELIC)
udbg_init_ps3gelic();
#elif defined(CONFIG_PPC_EARLY_DEBUG_OPAL_RAW)
udbg_init_debug_opal_raw();
#elif defined(CONFIG_PPC_EARLY_DEBUG_OPAL_HVSI)
udbg_init_debug_opal_hvsi();
#elif defined(CONFIG_PPC_EARLY_DEBUG_16550)
udbg_init_debug_16550();
#endif
#ifdef CONFIG_PPC_EARLY_DEBUG
console_loglevel = CONSOLE_LOGLEVEL_DEBUG;
register_early_udbg_console();
#endif
}
/* udbg library, used by xmon et al */
void udbg_puts(const char *s)
{
if (udbg_putc) {
char c;
if (s && *s != '\0') {
while ((c = *s++) != '\0')
udbg_putc(c);
}
if (udbg_flush)
udbg_flush();
}
#if 0
else {
printk("%s", s);
}
#endif
}
int udbg_write(const char *s, int n)
{
int remain = n;
char c;
if (!udbg_putc)
return 0;
if (s && *s != '\0') {
while (((c = *s++) != '\0') && (remain-- > 0)) {
udbg_putc(c);
}
}
if (udbg_flush)
udbg_flush();
return n - remain;
}
#define UDBG_BUFSIZE 256
void udbg_printf(const char *fmt, ...)
{
if (udbg_putc) {
char buf[UDBG_BUFSIZE];
va_list args;
va_start(args, fmt);
vsnprintf(buf, UDBG_BUFSIZE, fmt, args);
udbg_puts(buf);
va_end(args);
}
}
void __init udbg_progress(char *s, unsigned short hex)
{
udbg_puts(s);
udbg_puts("\n");
}
/*
* Early boot console based on udbg
*/
static void udbg_console_write(struct console *con, const char *s,
unsigned int n)
{
udbg_write(s, n);
}
static struct console udbg_console = {
.name = "udbg",
.write = udbg_console_write,
.flags = CON_PRINTBUFFER | CON_ENABLED | CON_BOOT | CON_ANYTIME,
.index = 0,
};
/*
* Called by setup_system after ppc_md->probe and ppc_md->early_init.
* Call it again after setting udbg_putc in ppc_md->setup_arch.
*/
void __init register_early_udbg_console(void)
{
if (early_console)
return;
if (!udbg_putc)
return;
if (strstr(boot_command_line, "udbg-immortal")) {
printk(KERN_INFO "early console immortal !\n");
udbg_console.flags &= ~CON_BOOT;
}
early_console = &udbg_console;
register_console(&udbg_console);
}
#if 0 /* if you want to use this as a regular output console */
console_initcall(register_udbg_console);
#endif
| linux-master | arch/powerpc/kernel/udbg.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
*
* Copyright (c) 2005 Linas Vepstas <[email protected]>
*/
#include <linux/delay.h>
#include <linux/list.h>
#include <linux/sched.h>
#include <linux/semaphore.h>
#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/kthread.h>
#include <asm/eeh_event.h>
#include <asm/ppc-pci.h>
/** Overview:
* EEH error states may be detected within exception handlers;
* however, the recovery processing needs to occur asynchronously
* in a normal kernel context and not an interrupt context.
* This pair of routines creates an event and queues it onto a
* work-queue, where a worker thread can drive recovery.
*/
static DEFINE_SPINLOCK(eeh_eventlist_lock);
static DECLARE_COMPLETION(eeh_eventlist_event);
static LIST_HEAD(eeh_eventlist);
/**
* eeh_event_handler - Dispatch EEH events.
* @dummy - unused
*
* The detection of a frozen slot can occur inside an interrupt,
* where it can be hard to do anything about it. The goal of this
* routine is to pull these detection events out of the context
* of the interrupt handler, and re-dispatch them for processing
* at a later time in a normal context.
*/
static int eeh_event_handler(void * dummy)
{
unsigned long flags;
struct eeh_event *event;
while (!kthread_should_stop()) {
if (wait_for_completion_interruptible(&eeh_eventlist_event))
break;
/* Fetch EEH event from the queue */
spin_lock_irqsave(&eeh_eventlist_lock, flags);
event = NULL;
if (!list_empty(&eeh_eventlist)) {
event = list_entry(eeh_eventlist.next,
struct eeh_event, list);
list_del(&event->list);
}
spin_unlock_irqrestore(&eeh_eventlist_lock, flags);
if (!event)
continue;
/* We might have event without binding PE */
if (event->pe)
eeh_handle_normal_event(event->pe);
else
eeh_handle_special_event();
kfree(event);
}
return 0;
}
/**
* eeh_event_init - Start kernel thread to handle EEH events
*
* This routine is called to start the kernel thread for processing
* EEH event.
*/
int eeh_event_init(void)
{
struct task_struct *t;
int ret = 0;
t = kthread_run(eeh_event_handler, NULL, "eehd");
if (IS_ERR(t)) {
ret = PTR_ERR(t);
pr_err("%s: Failed to start EEH daemon (%d)\n",
__func__, ret);
return ret;
}
return 0;
}
/**
* eeh_send_failure_event - Generate a PCI error event
* @pe: EEH PE
*
* This routine can be called within an interrupt context;
* the actual event will be delivered in a normal context
* (from a workqueue).
*/
int __eeh_send_failure_event(struct eeh_pe *pe)
{
unsigned long flags;
struct eeh_event *event;
event = kzalloc(sizeof(*event), GFP_ATOMIC);
if (!event) {
pr_err("EEH: out of memory, event not handled\n");
return -ENOMEM;
}
event->pe = pe;
/*
* Mark the PE as recovering before inserting it in the queue.
* This prevents the PE from being free()ed by a hotplug driver
* while the PE is sitting in the event queue.
*/
if (pe) {
#ifdef CONFIG_STACKTRACE
/*
* Save the current stack trace so we can dump it from the
* event handler thread.
*/
pe->trace_entries = stack_trace_save(pe->stack_trace,
ARRAY_SIZE(pe->stack_trace), 0);
#endif /* CONFIG_STACKTRACE */
eeh_pe_state_mark(pe, EEH_PE_RECOVERING);
}
/* We may or may not be called in an interrupt context */
spin_lock_irqsave(&eeh_eventlist_lock, flags);
list_add(&event->list, &eeh_eventlist);
spin_unlock_irqrestore(&eeh_eventlist_lock, flags);
/* For EEH deamon to knick in */
complete(&eeh_eventlist_event);
return 0;
}
int eeh_send_failure_event(struct eeh_pe *pe)
{
/*
* If we've manually suppressed recovery events via debugfs
* then just drop it on the floor.
*/
if (eeh_debugfs_no_recover) {
pr_err("EEH: Event dropped due to no_recover setting\n");
return 0;
}
return __eeh_send_failure_event(pe);
}
/**
* eeh_remove_event - Remove EEH event from the queue
* @pe: Event binding to the PE
* @force: Event will be removed unconditionally
*
* On PowerNV platform, we might have subsequent coming events
* is part of the former one. For that case, those subsequent
* coming events are totally duplicated and unnecessary, thus
* they should be removed.
*/
void eeh_remove_event(struct eeh_pe *pe, bool force)
{
unsigned long flags;
struct eeh_event *event, *tmp;
/*
* If we have NULL PE passed in, we have dead IOC
* or we're sure we can report all existing errors
* by the caller.
*
* With "force", the event with associated PE that
* have been isolated, the event won't be removed
* to avoid event lost.
*/
spin_lock_irqsave(&eeh_eventlist_lock, flags);
list_for_each_entry_safe(event, tmp, &eeh_eventlist, list) {
if (!force && event->pe &&
(event->pe->state & EEH_PE_ISOLATED))
continue;
if (!pe) {
list_del(&event->list);
kfree(event);
} else if (pe->type & EEH_PE_PHB) {
if (event->pe && event->pe->phb == pe->phb) {
list_del(&event->list);
kfree(event);
}
} else if (event->pe == pe) {
list_del(&event->list);
kfree(event);
}
}
spin_unlock_irqrestore(&eeh_eventlist_lock, flags);
}
| linux-master | arch/powerpc/kernel/eeh_event.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Stack trace utility functions etc.
*
* Copyright 2008 Christoph Hellwig, IBM Corp.
* Copyright 2018 SUSE Linux GmbH
* Copyright 2018 Nick Piggin, Michael Ellerman, IBM Corp.
*/
#include <linux/delay.h>
#include <linux/export.h>
#include <linux/kallsyms.h>
#include <linux/module.h>
#include <linux/nmi.h>
#include <linux/sched.h>
#include <linux/sched/debug.h>
#include <linux/sched/task_stack.h>
#include <linux/stacktrace.h>
#include <asm/ptrace.h>
#include <asm/processor.h>
#include <linux/ftrace.h>
#include <asm/kprobes.h>
#include <asm/paca.h>
void __no_sanitize_address arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie,
struct task_struct *task, struct pt_regs *regs)
{
unsigned long sp;
if (regs && !consume_entry(cookie, regs->nip))
return;
if (regs)
sp = regs->gpr[1];
else if (task == current)
sp = current_stack_frame();
else
sp = task->thread.ksp;
for (;;) {
unsigned long *stack = (unsigned long *) sp;
unsigned long newsp, ip;
if (!validate_sp(sp, task))
return;
newsp = stack[0];
ip = stack[STACK_FRAME_LR_SAVE];
if (!consume_entry(cookie, ip))
return;
sp = newsp;
}
}
/*
* This function returns an error if it detects any unreliable features of the
* stack. Otherwise it guarantees that the stack trace is reliable.
*
* If the task is not 'current', the caller *must* ensure the task is inactive.
*/
int __no_sanitize_address arch_stack_walk_reliable(stack_trace_consume_fn consume_entry,
void *cookie, struct task_struct *task)
{
unsigned long sp;
unsigned long newsp;
unsigned long stack_page = (unsigned long)task_stack_page(task);
unsigned long stack_end;
int graph_idx = 0;
bool firstframe;
stack_end = stack_page + THREAD_SIZE;
if (!is_idle_task(task)) {
/*
* For user tasks, this is the SP value loaded on
* kernel entry, see "PACAKSAVE(r13)" in _switch() and
* system_call_common().
*
* Likewise for non-swapper kernel threads,
* this also happens to be the top of the stack
* as setup by copy_thread().
*
* Note that stack backlinks are not properly setup by
* copy_thread() and thus, a forked task() will have
* an unreliable stack trace until it's been
* _switch()'ed to for the first time.
*/
stack_end -= STACK_USER_INT_FRAME_SIZE;
} else {
/*
* idle tasks have a custom stack layout,
* c.f. cpu_idle_thread_init().
*/
stack_end -= STACK_FRAME_MIN_SIZE;
}
if (task == current)
sp = current_stack_frame();
else
sp = task->thread.ksp;
if (sp < stack_page + sizeof(struct thread_struct) ||
sp > stack_end - STACK_FRAME_MIN_SIZE) {
return -EINVAL;
}
for (firstframe = true; sp != stack_end;
firstframe = false, sp = newsp) {
unsigned long *stack = (unsigned long *) sp;
unsigned long ip;
/* sanity check: ABI requires SP to be aligned 16 bytes. */
if (sp & 0xF)
return -EINVAL;
newsp = stack[0];
/* Stack grows downwards; unwinder may only go up. */
if (newsp <= sp)
return -EINVAL;
if (newsp != stack_end &&
newsp > stack_end - STACK_FRAME_MIN_SIZE) {
return -EINVAL; /* invalid backlink, too far up. */
}
/*
* We can only trust the bottom frame's backlink, the
* rest of the frame may be uninitialized, continue to
* the next.
*/
if (firstframe)
continue;
/* Mark stacktraces with exception frames as unreliable. */
if (sp <= stack_end - STACK_INT_FRAME_SIZE &&
stack[STACK_INT_FRAME_MARKER_LONGS] == STACK_FRAME_REGS_MARKER) {
return -EINVAL;
}
/* Examine the saved LR: it must point into kernel code. */
ip = stack[STACK_FRAME_LR_SAVE];
if (!__kernel_text_address(ip))
return -EINVAL;
/*
* FIXME: IMHO these tests do not belong in
* arch-dependent code, they are generic.
*/
ip = ftrace_graph_ret_addr(task, &graph_idx, ip, stack);
#ifdef CONFIG_KPROBES
/*
* Mark stacktraces with kretprobed functions on them
* as unreliable.
*/
if (ip == (unsigned long)__kretprobe_trampoline)
return -EINVAL;
#endif
if (!consume_entry(cookie, ip))
return -EINVAL;
}
return 0;
}
#if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_NMI_IPI)
static void handle_backtrace_ipi(struct pt_regs *regs)
{
nmi_cpu_backtrace(regs);
}
static void raise_backtrace_ipi(cpumask_t *mask)
{
struct paca_struct *p;
unsigned int cpu;
u64 delay_us;
for_each_cpu(cpu, mask) {
if (cpu == smp_processor_id()) {
handle_backtrace_ipi(NULL);
continue;
}
delay_us = 5 * USEC_PER_SEC;
if (smp_send_safe_nmi_ipi(cpu, handle_backtrace_ipi, delay_us)) {
// Now wait up to 5s for the other CPU to do its backtrace
while (cpumask_test_cpu(cpu, mask) && delay_us) {
udelay(1);
delay_us--;
}
// Other CPU cleared itself from the mask
if (delay_us)
continue;
}
p = paca_ptrs[cpu];
cpumask_clear_cpu(cpu, mask);
pr_warn("CPU %d didn't respond to backtrace IPI, inspecting paca.\n", cpu);
if (!virt_addr_valid(p)) {
pr_warn("paca pointer appears corrupt? (%px)\n", p);
continue;
}
pr_warn("irq_soft_mask: 0x%02x in_mce: %d in_nmi: %d",
p->irq_soft_mask, p->in_mce, p->in_nmi);
if (virt_addr_valid(p->__current))
pr_cont(" current: %d (%s)\n", p->__current->pid,
p->__current->comm);
else
pr_cont(" current pointer corrupt? (%px)\n", p->__current);
pr_warn("Back trace of paca->saved_r1 (0x%016llx) (possibly stale):\n", p->saved_r1);
show_stack(p->__current, (unsigned long *)p->saved_r1, KERN_WARNING);
}
}
void arch_trigger_cpumask_backtrace(const cpumask_t *mask, int exclude_cpu)
{
nmi_trigger_cpumask_backtrace(mask, exclude_cpu, raise_backtrace_ipi);
}
#endif /* defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_NMI_IPI) */
| linux-master | arch/powerpc/kernel/stacktrace.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/* Kernel module help for PPC.
Copyright (C) 2001 Rusty Russell.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/moduleloader.h>
#include <linux/elf.h>
#include <linux/vmalloc.h>
#include <linux/fs.h>
#include <linux/string.h>
#include <linux/kernel.h>
#include <linux/ftrace.h>
#include <linux/cache.h>
#include <linux/bug.h>
#include <linux/sort.h>
#include <asm/setup.h>
#include <asm/code-patching.h>
/* Count how many different relocations (different symbol, different
addend) */
static unsigned int count_relocs(const Elf32_Rela *rela, unsigned int num)
{
unsigned int i, r_info, r_addend, _count_relocs;
_count_relocs = 0;
r_info = 0;
r_addend = 0;
for (i = 0; i < num; i++)
/* Only count 24-bit relocs, others don't need stubs */
if (ELF32_R_TYPE(rela[i].r_info) == R_PPC_REL24 &&
(r_info != ELF32_R_SYM(rela[i].r_info) ||
r_addend != rela[i].r_addend)) {
_count_relocs++;
r_info = ELF32_R_SYM(rela[i].r_info);
r_addend = rela[i].r_addend;
}
#ifdef CONFIG_DYNAMIC_FTRACE
_count_relocs++; /* add one for ftrace_caller */
#endif
return _count_relocs;
}
static int relacmp(const void *_x, const void *_y)
{
const Elf32_Rela *x, *y;
y = (Elf32_Rela *)_x;
x = (Elf32_Rela *)_y;
/* Compare the entire r_info (as opposed to ELF32_R_SYM(r_info) only) to
* make the comparison cheaper/faster. It won't affect the sorting or
* the counting algorithms' performance
*/
if (x->r_info < y->r_info)
return -1;
else if (x->r_info > y->r_info)
return 1;
else if (x->r_addend < y->r_addend)
return -1;
else if (x->r_addend > y->r_addend)
return 1;
else
return 0;
}
/* Get the potential trampolines size required of the init and
non-init sections */
static unsigned long get_plt_size(const Elf32_Ehdr *hdr,
const Elf32_Shdr *sechdrs,
const char *secstrings,
int is_init)
{
unsigned long ret = 0;
unsigned i;
/* Everything marked ALLOC (this includes the exported
symbols) */
for (i = 1; i < hdr->e_shnum; i++) {
/* If it's called *.init*, and we're not init, we're
not interested */
if ((strstr(secstrings + sechdrs[i].sh_name, ".init") != NULL)
!= is_init)
continue;
/* We don't want to look at debug sections. */
if (strstr(secstrings + sechdrs[i].sh_name, ".debug"))
continue;
if (sechdrs[i].sh_type == SHT_RELA) {
pr_debug("Found relocations in section %u\n", i);
pr_debug("Ptr: %p. Number: %u\n",
(void *)hdr + sechdrs[i].sh_offset,
sechdrs[i].sh_size / sizeof(Elf32_Rela));
/* Sort the relocation information based on a symbol and
* addend key. This is a stable O(n*log n) complexity
* algorithm but it will reduce the complexity of
* count_relocs() to linear complexity O(n)
*/
sort((void *)hdr + sechdrs[i].sh_offset,
sechdrs[i].sh_size / sizeof(Elf32_Rela),
sizeof(Elf32_Rela), relacmp, NULL);
ret += count_relocs((void *)hdr
+ sechdrs[i].sh_offset,
sechdrs[i].sh_size
/ sizeof(Elf32_Rela))
* sizeof(struct ppc_plt_entry);
}
}
return ret;
}
int module_frob_arch_sections(Elf32_Ehdr *hdr,
Elf32_Shdr *sechdrs,
char *secstrings,
struct module *me)
{
unsigned int i;
/* Find .plt and .init.plt sections */
for (i = 0; i < hdr->e_shnum; i++) {
if (strcmp(secstrings + sechdrs[i].sh_name, ".init.plt") == 0)
me->arch.init_plt_section = i;
else if (strcmp(secstrings + sechdrs[i].sh_name, ".plt") == 0)
me->arch.core_plt_section = i;
}
if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
pr_err("Module doesn't contain .plt or .init.plt sections.\n");
return -ENOEXEC;
}
/* Override their sizes */
sechdrs[me->arch.core_plt_section].sh_size
= get_plt_size(hdr, sechdrs, secstrings, 0);
sechdrs[me->arch.init_plt_section].sh_size
= get_plt_size(hdr, sechdrs, secstrings, 1);
return 0;
}
static inline int entry_matches(struct ppc_plt_entry *entry, Elf32_Addr val)
{
if (entry->jump[0] != PPC_RAW_LIS(_R12, PPC_HA(val)))
return 0;
if (entry->jump[1] != PPC_RAW_ADDI(_R12, _R12, PPC_LO(val)))
return 0;
return 1;
}
/* Set up a trampoline in the PLT to bounce us to the distant function */
static uint32_t do_plt_call(void *location,
Elf32_Addr val,
const Elf32_Shdr *sechdrs,
struct module *mod)
{
struct ppc_plt_entry *entry;
pr_debug("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
/* Init, or core PLT? */
if (within_module_core((unsigned long)location, mod))
entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
else
entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
/* Find this entry, or if that fails, the next avail. entry */
while (entry->jump[0]) {
if (entry_matches(entry, val)) return (uint32_t)entry;
entry++;
}
if (patch_instruction(&entry->jump[0], ppc_inst(PPC_RAW_LIS(_R12, PPC_HA(val)))))
return 0;
if (patch_instruction(&entry->jump[1], ppc_inst(PPC_RAW_ADDI(_R12, _R12, PPC_LO(val)))))
return 0;
if (patch_instruction(&entry->jump[2], ppc_inst(PPC_RAW_MTCTR(_R12))))
return 0;
if (patch_instruction(&entry->jump[3], ppc_inst(PPC_RAW_BCTR())))
return 0;
pr_debug("Initialized plt for 0x%x at %p\n", val, entry);
return (uint32_t)entry;
}
static int patch_location_16(uint32_t *loc, u16 value)
{
loc = PTR_ALIGN_DOWN(loc, sizeof(u32));
return patch_instruction(loc, ppc_inst((*loc & 0xffff0000) | value));
}
int apply_relocate_add(Elf32_Shdr *sechdrs,
const char *strtab,
unsigned int symindex,
unsigned int relsec,
struct module *module)
{
unsigned int i;
Elf32_Rela *rela = (void *)sechdrs[relsec].sh_addr;
Elf32_Sym *sym;
uint32_t *location;
uint32_t value;
pr_debug("Applying ADD relocate section %u to %u\n", relsec,
sechdrs[relsec].sh_info);
for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rela); i++) {
/* This is where to make the change */
location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
+ rela[i].r_offset;
/* This is the symbol it is referring to. Note that all
undefined symbols have been resolved. */
sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
+ ELF32_R_SYM(rela[i].r_info);
/* `Everything is relative'. */
value = sym->st_value + rela[i].r_addend;
switch (ELF32_R_TYPE(rela[i].r_info)) {
case R_PPC_ADDR32:
/* Simply set it */
*(uint32_t *)location = value;
break;
case R_PPC_ADDR16_LO:
/* Low half of the symbol */
if (patch_location_16(location, PPC_LO(value)))
return -EFAULT;
break;
case R_PPC_ADDR16_HI:
/* Higher half of the symbol */
if (patch_location_16(location, PPC_HI(value)))
return -EFAULT;
break;
case R_PPC_ADDR16_HA:
if (patch_location_16(location, PPC_HA(value)))
return -EFAULT;
break;
case R_PPC_REL24:
if ((int)(value - (uint32_t)location) < -0x02000000
|| (int)(value - (uint32_t)location) >= 0x02000000) {
value = do_plt_call(location, value,
sechdrs, module);
if (!value)
return -EFAULT;
}
/* Only replace bits 2 through 26 */
pr_debug("REL24 value = %08X. location = %08X\n",
value, (uint32_t)location);
pr_debug("Location before: %08X.\n",
*(uint32_t *)location);
value = (*(uint32_t *)location & ~PPC_LI_MASK) |
PPC_LI(value - (uint32_t)location);
if (patch_instruction(location, ppc_inst(value)))
return -EFAULT;
pr_debug("Location after: %08X.\n",
*(uint32_t *)location);
pr_debug("ie. jump to %08X+%08X = %08X\n",
*(uint32_t *)PPC_LI((uint32_t)location), (uint32_t)location,
(*(uint32_t *)PPC_LI((uint32_t)location)) + (uint32_t)location);
break;
case R_PPC_REL32:
/* 32-bit relative jump. */
*(uint32_t *)location = value - (uint32_t)location;
break;
default:
pr_err("%s: unknown ADD relocation: %u\n",
module->name,
ELF32_R_TYPE(rela[i].r_info));
return -ENOEXEC;
}
}
return 0;
}
#ifdef CONFIG_DYNAMIC_FTRACE
notrace int module_trampoline_target(struct module *mod, unsigned long addr,
unsigned long *target)
{
ppc_inst_t jmp[4];
/* Find where the trampoline jumps to */
if (copy_inst_from_kernel_nofault(jmp, (void *)addr))
return -EFAULT;
if (__copy_inst_from_kernel_nofault(jmp + 1, (void *)addr + 4))
return -EFAULT;
if (__copy_inst_from_kernel_nofault(jmp + 2, (void *)addr + 8))
return -EFAULT;
if (__copy_inst_from_kernel_nofault(jmp + 3, (void *)addr + 12))
return -EFAULT;
/* verify that this is what we expect it to be */
if ((ppc_inst_val(jmp[0]) & 0xffff0000) != PPC_RAW_LIS(_R12, 0))
return -EINVAL;
if ((ppc_inst_val(jmp[1]) & 0xffff0000) != PPC_RAW_ADDI(_R12, _R12, 0))
return -EINVAL;
if (ppc_inst_val(jmp[2]) != PPC_RAW_MTCTR(_R12))
return -EINVAL;
if (ppc_inst_val(jmp[3]) != PPC_RAW_BCTR())
return -EINVAL;
addr = (ppc_inst_val(jmp[1]) & 0xffff) | ((ppc_inst_val(jmp[0]) & 0xffff) << 16);
if (addr & 0x8000)
addr -= 0x10000;
*target = addr;
return 0;
}
int module_finalize_ftrace(struct module *module, const Elf_Shdr *sechdrs)
{
module->arch.tramp = do_plt_call(module->mem[MOD_TEXT].base,
(unsigned long)ftrace_caller,
sechdrs, module);
if (!module->arch.tramp)
return -ENOENT;
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
module->arch.tramp_regs = do_plt_call(module->mem[MOD_TEXT].base,
(unsigned long)ftrace_regs_caller,
sechdrs, module);
if (!module->arch.tramp_regs)
return -ENOENT;
#endif
return 0;
}
#endif
| linux-master | arch/powerpc/kernel/module_32.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Helper routines to scan the device tree for PCI devices and busses
*
* Migrated out of PowerPC architecture pci_64.c file by Grant Likely
* <[email protected]> so that these routines are available for
* 32 bit also.
*
* Copyright (C) 2003 Anton Blanchard <[email protected]>, IBM
* Rework, based on alpha PCI code.
* Copyright (c) 2009 Secret Lab Technologies Ltd.
*/
#include <linux/pci.h>
#include <linux/export.h>
#include <linux/of.h>
#include <asm/pci-bridge.h>
/**
* get_int_prop - Decode a u32 from a device tree property
*/
static u32 get_int_prop(struct device_node *np, const char *name, u32 def)
{
const __be32 *prop;
int len;
prop = of_get_property(np, name, &len);
if (prop && len >= 4)
return of_read_number(prop, 1);
return def;
}
/**
* pci_parse_of_flags - Parse the flags cell of a device tree PCI address
* @addr0: value of 1st cell of a device tree PCI address.
* @bridge: Set this flag if the address is from a bridge 'ranges' property
*
* PCI Bus Binding to IEEE Std 1275-1994
*
* Bit# 33222222 22221111 11111100 00000000
* 10987654 32109876 54321098 76543210
* phys.hi cell: npt000ss bbbbbbbb dddddfff rrrrrrrr
* phys.mid cell: hhhhhhhh hhhhhhhh hhhhhhhh hhhhhhhh
* phys.lo cell: llllllll llllllll llllllll llllllll
*
* where:
* n is 0 if the address is relocatable, 1 otherwise
* p is 1 if the addressable region is "prefetchable", 0 otherwise
* t is 1 if the address is aliased (for non-relocatable I/O),
* below 1 MB (for Memory),or below 64 KB (for relocatable I/O).
* ss is the space code, denoting the address space:
* 00 denotes Configuration Space
* 01 denotes I/O Space
* 10 denotes 32-bit-address Memory Space
* 11 denotes 64-bit-address Memory Space
* bbbbbbbb is the 8-bit Bus Number
* ddddd is the 5-bit Device Number
* fff is the 3-bit Function Number
* rrrrrrrr is the 8-bit Register Number
*/
#define OF_PCI_ADDR0_SPACE(ss) (((ss)&3)<<24)
#define OF_PCI_ADDR0_SPACE_CFG OF_PCI_ADDR0_SPACE(0)
#define OF_PCI_ADDR0_SPACE_IO OF_PCI_ADDR0_SPACE(1)
#define OF_PCI_ADDR0_SPACE_MMIO32 OF_PCI_ADDR0_SPACE(2)
#define OF_PCI_ADDR0_SPACE_MMIO64 OF_PCI_ADDR0_SPACE(3)
#define OF_PCI_ADDR0_SPACE_MASK OF_PCI_ADDR0_SPACE(3)
#define OF_PCI_ADDR0_RELOC (1UL<<31)
#define OF_PCI_ADDR0_PREFETCH (1UL<<30)
#define OF_PCI_ADDR0_ALIAS (1UL<<29)
#define OF_PCI_ADDR0_BUS 0x00FF0000UL
#define OF_PCI_ADDR0_DEV 0x0000F800UL
#define OF_PCI_ADDR0_FN 0x00000700UL
#define OF_PCI_ADDR0_BARREG 0x000000FFUL
unsigned int pci_parse_of_flags(u32 addr0, int bridge)
{
unsigned int flags = 0, as = addr0 & OF_PCI_ADDR0_SPACE_MASK;
if (as == OF_PCI_ADDR0_SPACE_MMIO32 || as == OF_PCI_ADDR0_SPACE_MMIO64) {
flags = IORESOURCE_MEM | PCI_BASE_ADDRESS_SPACE_MEMORY;
if (as == OF_PCI_ADDR0_SPACE_MMIO64)
flags |= PCI_BASE_ADDRESS_MEM_TYPE_64 | IORESOURCE_MEM_64;
if (addr0 & OF_PCI_ADDR0_ALIAS)
flags |= PCI_BASE_ADDRESS_MEM_TYPE_1M;
if (addr0 & OF_PCI_ADDR0_PREFETCH)
flags |= IORESOURCE_PREFETCH |
PCI_BASE_ADDRESS_MEM_PREFETCH;
/* Note: We don't know whether the ROM has been left enabled
* by the firmware or not. We mark it as disabled (ie, we do
* not set the IORESOURCE_ROM_ENABLE flag) for now rather than
* do a config space read, it will be force-enabled if needed
*/
if (!bridge && (addr0 & OF_PCI_ADDR0_BARREG) == PCI_ROM_ADDRESS)
flags |= IORESOURCE_READONLY;
} else if (as == OF_PCI_ADDR0_SPACE_IO)
flags = IORESOURCE_IO | PCI_BASE_ADDRESS_SPACE_IO;
if (flags)
flags |= IORESOURCE_SIZEALIGN;
return flags;
}
/**
* of_pci_parse_addrs - Parse PCI addresses assigned in the device tree node
* @node: device tree node for the PCI device
* @dev: pci_dev structure for the device
*
* This function parses the 'assigned-addresses' property of a PCI devices'
* device tree node and writes them into the associated pci_dev structure.
*/
static void of_pci_parse_addrs(struct device_node *node, struct pci_dev *dev)
{
u64 base, size;
unsigned int flags;
struct pci_bus_region region;
struct resource *res;
const __be32 *addrs;
u32 i;
int proplen;
bool mark_unset = false;
addrs = of_get_property(node, "assigned-addresses", &proplen);
if (!addrs || !proplen) {
addrs = of_get_property(node, "reg", &proplen);
if (!addrs || !proplen)
return;
mark_unset = true;
}
pr_debug(" parse addresses (%d bytes) @ %p\n", proplen, addrs);
for (; proplen >= 20; proplen -= 20, addrs += 5) {
flags = pci_parse_of_flags(of_read_number(addrs, 1), 0);
if (!flags)
continue;
base = of_read_number(&addrs[1], 2);
size = of_read_number(&addrs[3], 2);
if (!size)
continue;
i = of_read_number(addrs, 1) & 0xff;
pr_debug(" base: %llx, size: %llx, i: %x\n",
(unsigned long long)base,
(unsigned long long)size, i);
if (PCI_BASE_ADDRESS_0 <= i && i <= PCI_BASE_ADDRESS_5) {
res = &dev->resource[(i - PCI_BASE_ADDRESS_0) >> 2];
} else if (i == dev->rom_base_reg) {
res = &dev->resource[PCI_ROM_RESOURCE];
flags |= IORESOURCE_READONLY;
} else {
printk(KERN_ERR "PCI: bad cfg reg num 0x%x\n", i);
continue;
}
res->flags = flags;
if (mark_unset)
res->flags |= IORESOURCE_UNSET;
res->name = pci_name(dev);
region.start = base;
region.end = base + size - 1;
pcibios_bus_to_resource(dev->bus, res, ®ion);
}
}
/**
* of_create_pci_dev - Given a device tree node on a pci bus, create a pci_dev
* @node: device tree node pointer
* @bus: bus the device is sitting on
* @devfn: PCI function number, extracted from device tree by caller.
*/
struct pci_dev *of_create_pci_dev(struct device_node *node,
struct pci_bus *bus, int devfn)
{
struct pci_dev *dev;
dev = pci_alloc_dev(bus);
if (!dev)
return NULL;
pr_debug(" create device, devfn: %x, type: %s\n", devfn,
of_node_get_device_type(node));
dev->dev.of_node = of_node_get(node);
dev->dev.parent = bus->bridge;
dev->dev.bus = &pci_bus_type;
dev->devfn = devfn;
dev->multifunction = 0; /* maybe a lie? */
dev->needs_freset = 0; /* pcie fundamental reset required */
set_pcie_port_type(dev);
pci_dev_assign_slot(dev);
dev->vendor = get_int_prop(node, "vendor-id", 0xffff);
dev->device = get_int_prop(node, "device-id", 0xffff);
dev->subsystem_vendor = get_int_prop(node, "subsystem-vendor-id", 0);
dev->subsystem_device = get_int_prop(node, "subsystem-id", 0);
dev->cfg_size = pci_cfg_space_size(dev);
dev_set_name(&dev->dev, "%04x:%02x:%02x.%d", pci_domain_nr(bus),
dev->bus->number, PCI_SLOT(devfn), PCI_FUNC(devfn));
dev->class = get_int_prop(node, "class-code", 0);
dev->revision = get_int_prop(node, "revision-id", 0);
pr_debug(" class: 0x%x\n", dev->class);
pr_debug(" revision: 0x%x\n", dev->revision);
dev->current_state = PCI_UNKNOWN; /* unknown power state */
dev->error_state = pci_channel_io_normal;
dev->dma_mask = 0xffffffff;
/* Early fixups, before probing the BARs */
pci_fixup_device(pci_fixup_early, dev);
if (of_node_is_type(node, "pci") || of_node_is_type(node, "pciex")) {
/* a PCI-PCI bridge */
dev->hdr_type = PCI_HEADER_TYPE_BRIDGE;
dev->rom_base_reg = PCI_ROM_ADDRESS1;
set_pcie_hotplug_bridge(dev);
} else if (of_node_is_type(node, "cardbus")) {
dev->hdr_type = PCI_HEADER_TYPE_CARDBUS;
} else {
dev->hdr_type = PCI_HEADER_TYPE_NORMAL;
dev->rom_base_reg = PCI_ROM_ADDRESS;
/* Maybe do a default OF mapping here */
dev->irq = 0;
}
of_pci_parse_addrs(node, dev);
pr_debug(" adding to system ...\n");
pci_device_add(dev, bus);
return dev;
}
EXPORT_SYMBOL(of_create_pci_dev);
/**
* of_scan_pci_bridge - Set up a PCI bridge and scan for child nodes
* @dev: pci_dev structure for the bridge
*
* of_scan_bus() calls this routine for each PCI bridge that it finds, and
* this routine in turn call of_scan_bus() recursively to scan for more child
* devices.
*/
void of_scan_pci_bridge(struct pci_dev *dev)
{
struct device_node *node = dev->dev.of_node;
struct pci_bus *bus;
struct pci_controller *phb;
const __be32 *busrange, *ranges;
int len, i, mode;
struct pci_bus_region region;
struct resource *res;
unsigned int flags;
u64 size;
pr_debug("of_scan_pci_bridge(%pOF)\n", node);
/* parse bus-range property */
busrange = of_get_property(node, "bus-range", &len);
if (busrange == NULL || len != 8) {
printk(KERN_DEBUG "Can't get bus-range for PCI-PCI bridge %pOF\n",
node);
return;
}
ranges = of_get_property(node, "ranges", &len);
if (ranges == NULL) {
printk(KERN_DEBUG "Can't get ranges for PCI-PCI bridge %pOF\n",
node);
return;
}
bus = pci_find_bus(pci_domain_nr(dev->bus),
of_read_number(busrange, 1));
if (!bus) {
bus = pci_add_new_bus(dev->bus, dev,
of_read_number(busrange, 1));
if (!bus) {
printk(KERN_ERR "Failed to create pci bus for %pOF\n",
node);
return;
}
}
bus->primary = dev->bus->number;
pci_bus_insert_busn_res(bus, of_read_number(busrange, 1),
of_read_number(busrange+1, 1));
bus->bridge_ctl = 0;
/* parse ranges property */
/* PCI #address-cells == 3 and #size-cells == 2 always */
res = &dev->resource[PCI_BRIDGE_RESOURCES];
for (i = 0; i < PCI_NUM_RESOURCES - PCI_BRIDGE_RESOURCES; ++i) {
res->flags = 0;
bus->resource[i] = res;
++res;
}
i = 1;
for (; len >= 32; len -= 32, ranges += 8) {
flags = pci_parse_of_flags(of_read_number(ranges, 1), 1);
size = of_read_number(&ranges[6], 2);
if (flags == 0 || size == 0)
continue;
if (flags & IORESOURCE_IO) {
res = bus->resource[0];
if (res->flags) {
printk(KERN_ERR "PCI: ignoring extra I/O range"
" for bridge %pOF\n", node);
continue;
}
} else {
if (i >= PCI_NUM_RESOURCES - PCI_BRIDGE_RESOURCES) {
printk(KERN_ERR "PCI: too many memory ranges"
" for bridge %pOF\n", node);
continue;
}
res = bus->resource[i];
++i;
}
res->flags = flags;
region.start = of_read_number(&ranges[1], 2);
region.end = region.start + size - 1;
pcibios_bus_to_resource(dev->bus, res, ®ion);
}
sprintf(bus->name, "PCI Bus %04x:%02x", pci_domain_nr(bus),
bus->number);
pr_debug(" bus name: %s\n", bus->name);
phb = pci_bus_to_host(bus);
mode = PCI_PROBE_NORMAL;
if (phb->controller_ops.probe_mode)
mode = phb->controller_ops.probe_mode(bus);
pr_debug(" probe mode: %d\n", mode);
if (mode == PCI_PROBE_DEVTREE)
of_scan_bus(node, bus);
else if (mode == PCI_PROBE_NORMAL)
pci_scan_child_bus(bus);
}
EXPORT_SYMBOL(of_scan_pci_bridge);
static struct pci_dev *of_scan_pci_dev(struct pci_bus *bus,
struct device_node *dn)
{
struct pci_dev *dev = NULL;
const __be32 *reg;
int reglen, devfn;
#ifdef CONFIG_EEH
struct eeh_dev *edev = pdn_to_eeh_dev(PCI_DN(dn));
#endif
pr_debug(" * %pOF\n", dn);
if (!of_device_is_available(dn))
return NULL;
reg = of_get_property(dn, "reg", ®len);
if (reg == NULL || reglen < 20)
return NULL;
devfn = (of_read_number(reg, 1) >> 8) & 0xff;
/* Check if the PCI device is already there */
dev = pci_get_slot(bus, devfn);
if (dev) {
pci_dev_put(dev);
return dev;
}
/* Device removed permanently ? */
#ifdef CONFIG_EEH
if (edev && (edev->mode & EEH_DEV_REMOVED))
return NULL;
#endif
/* create a new pci_dev for this device */
dev = of_create_pci_dev(dn, bus, devfn);
if (!dev)
return NULL;
pr_debug(" dev header type: %x\n", dev->hdr_type);
return dev;
}
/**
* __of_scan_bus - given a PCI bus node, setup bus and scan for child devices
* @node: device tree node for the PCI bus
* @bus: pci_bus structure for the PCI bus
* @rescan_existing: Flag indicating bus has already been set up
*/
static void __of_scan_bus(struct device_node *node, struct pci_bus *bus,
int rescan_existing)
{
struct device_node *child;
struct pci_dev *dev;
pr_debug("of_scan_bus(%pOF) bus no %d...\n",
node, bus->number);
/* Scan direct children */
for_each_child_of_node(node, child) {
dev = of_scan_pci_dev(bus, child);
if (!dev)
continue;
pr_debug(" dev header type: %x\n", dev->hdr_type);
}
/* Apply all fixups necessary. We don't fixup the bus "self"
* for an existing bridge that is being rescanned
*/
if (!rescan_existing)
pcibios_setup_bus_self(bus);
/* Now scan child busses */
for_each_pci_bridge(dev, bus)
of_scan_pci_bridge(dev);
}
/**
* of_scan_bus - given a PCI bus node, setup bus and scan for child devices
* @node: device tree node for the PCI bus
* @bus: pci_bus structure for the PCI bus
*/
void of_scan_bus(struct device_node *node, struct pci_bus *bus)
{
__of_scan_bus(node, bus, 0);
}
EXPORT_SYMBOL_GPL(of_scan_bus);
/**
* of_rescan_bus - given a PCI bus node, scan for child devices
* @node: device tree node for the PCI bus
* @bus: pci_bus structure for the PCI bus
*
* Same as of_scan_bus, but for a pci_bus structure that has already been
* setup.
*/
void of_rescan_bus(struct device_node *node, struct pci_bus *bus)
{
__of_scan_bus(node, bus, 1);
}
EXPORT_SYMBOL_GPL(of_rescan_bus);
| linux-master | arch/powerpc/kernel/pci_of_scan.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* PowerPC version
* Copyright (C) 1995-1996 Gary Thomas ([email protected])
*
* Derived from "arch/i386/kernel/signal.c"
* Copyright (C) 1991, 1992 Linus Torvalds
* 1997-11-28 Modified for POSIX.1b signals by Richard Henderson
*/
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/smp.h>
#include <linux/kernel.h>
#include <linux/signal.h>
#include <linux/errno.h>
#include <linux/wait.h>
#include <linux/unistd.h>
#include <linux/stddef.h>
#include <linux/elf.h>
#include <linux/ptrace.h>
#include <linux/ratelimit.h>
#include <linux/syscalls.h>
#include <linux/pagemap.h>
#include <asm/sigcontext.h>
#include <asm/ucontext.h>
#include <linux/uaccess.h>
#include <asm/unistd.h>
#include <asm/cacheflush.h>
#include <asm/syscalls.h>
#include <asm/vdso.h>
#include <asm/switch_to.h>
#include <asm/tm.h>
#include <asm/asm-prototypes.h>
#include "signal.h"
#define GP_REGS_SIZE min(sizeof(elf_gregset_t), sizeof(struct pt_regs))
#define FP_REGS_SIZE sizeof(elf_fpregset_t)
#define TRAMP_TRACEBACK 4
#define TRAMP_SIZE 7
/*
* When we have signals to deliver, we set up on the user stack,
* going down from the original stack pointer:
* 1) a rt_sigframe struct which contains the ucontext
* 2) a gap of __SIGNAL_FRAMESIZE bytes which acts as a dummy caller
* frame for the signal handler.
*/
struct rt_sigframe {
/* sys_rt_sigreturn requires the ucontext be the first field */
struct ucontext uc;
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
struct ucontext uc_transact;
#endif
unsigned long _unused[2];
unsigned int tramp[TRAMP_SIZE];
struct siginfo __user *pinfo;
void __user *puc;
struct siginfo info;
/* New 64 bit little-endian ABI allows redzone of 512 bytes below sp */
char abigap[USER_REDZONE_SIZE];
} __attribute__ ((aligned (16)));
unsigned long get_min_sigframe_size_64(void)
{
return sizeof(struct rt_sigframe) + __SIGNAL_FRAMESIZE;
}
/*
* This computes a quad word aligned pointer inside the vmx_reserve array
* element. For historical reasons sigcontext might not be quad word aligned,
* but the location we write the VMX regs to must be. See the comment in
* sigcontext for more detail.
*/
#ifdef CONFIG_ALTIVEC
static elf_vrreg_t __user *sigcontext_vmx_regs(struct sigcontext __user *sc)
{
return (elf_vrreg_t __user *) (((unsigned long)sc->vmx_reserve + 15) & ~0xful);
}
#endif
static void prepare_setup_sigcontext(struct task_struct *tsk)
{
#ifdef CONFIG_ALTIVEC
/* save altivec registers */
if (tsk->thread.used_vr)
flush_altivec_to_thread(tsk);
if (cpu_has_feature(CPU_FTR_ALTIVEC))
tsk->thread.vrsave = mfspr(SPRN_VRSAVE);
#endif /* CONFIG_ALTIVEC */
flush_fp_to_thread(tsk);
#ifdef CONFIG_VSX
if (tsk->thread.used_vsr)
flush_vsx_to_thread(tsk);
#endif /* CONFIG_VSX */
}
/*
* Set up the sigcontext for the signal frame.
*/
#define unsafe_setup_sigcontext(sc, tsk, signr, set, handler, ctx_has_vsx_region, label)\
do { \
if (__unsafe_setup_sigcontext(sc, tsk, signr, set, handler, ctx_has_vsx_region))\
goto label; \
} while (0)
static long notrace __unsafe_setup_sigcontext(struct sigcontext __user *sc,
struct task_struct *tsk, int signr, sigset_t *set,
unsigned long handler, int ctx_has_vsx_region)
{
/* When CONFIG_ALTIVEC is set, we _always_ setup v_regs even if the
* process never used altivec yet (MSR_VEC is zero in pt_regs of
* the context). This is very important because we must ensure we
* don't lose the VRSAVE content that may have been set prior to
* the process doing its first vector operation
* Userland shall check AT_HWCAP to know whether it can rely on the
* v_regs pointer or not
*/
#ifdef CONFIG_ALTIVEC
elf_vrreg_t __user *v_regs = sigcontext_vmx_regs(sc);
#endif
struct pt_regs *regs = tsk->thread.regs;
unsigned long msr = regs->msr;
/* Force usr to always see softe as 1 (interrupts enabled) */
unsigned long softe = 0x1;
BUG_ON(tsk != current);
#ifdef CONFIG_ALTIVEC
unsafe_put_user(v_regs, &sc->v_regs, efault_out);
/* save altivec registers */
if (tsk->thread.used_vr) {
/* Copy 33 vec registers (vr0..31 and vscr) to the stack */
unsafe_copy_to_user(v_regs, &tsk->thread.vr_state,
33 * sizeof(vector128), efault_out);
/* set MSR_VEC in the MSR value in the frame to indicate that sc->v_reg)
* contains valid data.
*/
msr |= MSR_VEC;
}
/* We always copy to/from vrsave, it's 0 if we don't have or don't
* use altivec.
*/
unsafe_put_user(tsk->thread.vrsave, (u32 __user *)&v_regs[33], efault_out);
#else /* CONFIG_ALTIVEC */
unsafe_put_user(0, &sc->v_regs, efault_out);
#endif /* CONFIG_ALTIVEC */
/* copy fpr regs and fpscr */
unsafe_copy_fpr_to_user(&sc->fp_regs, tsk, efault_out);
/*
* Clear the MSR VSX bit to indicate there is no valid state attached
* to this context, except in the specific case below where we set it.
*/
msr &= ~MSR_VSX;
#ifdef CONFIG_VSX
/*
* Copy VSX low doubleword to local buffer for formatting,
* then out to userspace. Update v_regs to point after the
* VMX data.
*/
if (tsk->thread.used_vsr && ctx_has_vsx_region) {
v_regs += ELF_NVRREG;
unsafe_copy_vsx_to_user(v_regs, tsk, efault_out);
/* set MSR_VSX in the MSR value in the frame to
* indicate that sc->vs_reg) contains valid data.
*/
msr |= MSR_VSX;
}
#endif /* CONFIG_VSX */
unsafe_put_user(&sc->gp_regs, &sc->regs, efault_out);
unsafe_copy_to_user(&sc->gp_regs, regs, GP_REGS_SIZE, efault_out);
unsafe_put_user(msr, &sc->gp_regs[PT_MSR], efault_out);
unsafe_put_user(softe, &sc->gp_regs[PT_SOFTE], efault_out);
unsafe_put_user(signr, &sc->signal, efault_out);
unsafe_put_user(handler, &sc->handler, efault_out);
if (set != NULL)
unsafe_put_user(set->sig[0], &sc->oldmask, efault_out);
return 0;
efault_out:
return -EFAULT;
}
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
/*
* As above, but Transactional Memory is in use, so deliver sigcontexts
* containing checkpointed and transactional register states.
*
* To do this, we treclaim (done before entering here) to gather both sets of
* registers and set up the 'normal' sigcontext registers with rolled-back
* register values such that a simple signal handler sees a correct
* checkpointed register state. If interested, a TM-aware sighandler can
* examine the transactional registers in the 2nd sigcontext to determine the
* real origin of the signal.
*/
static long setup_tm_sigcontexts(struct sigcontext __user *sc,
struct sigcontext __user *tm_sc,
struct task_struct *tsk,
int signr, sigset_t *set, unsigned long handler,
unsigned long msr)
{
/* When CONFIG_ALTIVEC is set, we _always_ setup v_regs even if the
* process never used altivec yet (MSR_VEC is zero in pt_regs of
* the context). This is very important because we must ensure we
* don't lose the VRSAVE content that may have been set prior to
* the process doing its first vector operation
* Userland shall check AT_HWCAP to know wether it can rely on the
* v_regs pointer or not.
*/
#ifdef CONFIG_ALTIVEC
elf_vrreg_t __user *v_regs = sigcontext_vmx_regs(sc);
elf_vrreg_t __user *tm_v_regs = sigcontext_vmx_regs(tm_sc);
#endif
struct pt_regs *regs = tsk->thread.regs;
long err = 0;
BUG_ON(tsk != current);
BUG_ON(!MSR_TM_ACTIVE(msr));
WARN_ON(tm_suspend_disabled);
/* Restore checkpointed FP, VEC, and VSX bits from ckpt_regs as
* it contains the correct FP, VEC, VSX state after we treclaimed
* the transaction and giveup_all() was called on reclaiming.
*/
msr |= tsk->thread.ckpt_regs.msr & (MSR_FP | MSR_VEC | MSR_VSX);
#ifdef CONFIG_ALTIVEC
err |= __put_user(v_regs, &sc->v_regs);
err |= __put_user(tm_v_regs, &tm_sc->v_regs);
/* save altivec registers */
if (tsk->thread.used_vr) {
/* Copy 33 vec registers (vr0..31 and vscr) to the stack */
err |= __copy_to_user(v_regs, &tsk->thread.ckvr_state,
33 * sizeof(vector128));
/* If VEC was enabled there are transactional VRs valid too,
* else they're a copy of the checkpointed VRs.
*/
if (msr & MSR_VEC)
err |= __copy_to_user(tm_v_regs,
&tsk->thread.vr_state,
33 * sizeof(vector128));
else
err |= __copy_to_user(tm_v_regs,
&tsk->thread.ckvr_state,
33 * sizeof(vector128));
/* set MSR_VEC in the MSR value in the frame to indicate
* that sc->v_reg contains valid data.
*/
msr |= MSR_VEC;
}
/* We always copy to/from vrsave, it's 0 if we don't have or don't
* use altivec.
*/
if (cpu_has_feature(CPU_FTR_ALTIVEC))
tsk->thread.ckvrsave = mfspr(SPRN_VRSAVE);
err |= __put_user(tsk->thread.ckvrsave, (u32 __user *)&v_regs[33]);
if (msr & MSR_VEC)
err |= __put_user(tsk->thread.vrsave,
(u32 __user *)&tm_v_regs[33]);
else
err |= __put_user(tsk->thread.ckvrsave,
(u32 __user *)&tm_v_regs[33]);
#else /* CONFIG_ALTIVEC */
err |= __put_user(0, &sc->v_regs);
err |= __put_user(0, &tm_sc->v_regs);
#endif /* CONFIG_ALTIVEC */
/* copy fpr regs and fpscr */
err |= copy_ckfpr_to_user(&sc->fp_regs, tsk);
if (msr & MSR_FP)
err |= copy_fpr_to_user(&tm_sc->fp_regs, tsk);
else
err |= copy_ckfpr_to_user(&tm_sc->fp_regs, tsk);
#ifdef CONFIG_VSX
/*
* Copy VSX low doubleword to local buffer for formatting,
* then out to userspace. Update v_regs to point after the
* VMX data.
*/
if (tsk->thread.used_vsr) {
v_regs += ELF_NVRREG;
tm_v_regs += ELF_NVRREG;
err |= copy_ckvsx_to_user(v_regs, tsk);
if (msr & MSR_VSX)
err |= copy_vsx_to_user(tm_v_regs, tsk);
else
err |= copy_ckvsx_to_user(tm_v_regs, tsk);
/* set MSR_VSX in the MSR value in the frame to
* indicate that sc->vs_reg) contains valid data.
*/
msr |= MSR_VSX;
}
#endif /* CONFIG_VSX */
err |= __put_user(&sc->gp_regs, &sc->regs);
err |= __put_user(&tm_sc->gp_regs, &tm_sc->regs);
err |= __copy_to_user(&tm_sc->gp_regs, regs, GP_REGS_SIZE);
err |= __copy_to_user(&sc->gp_regs,
&tsk->thread.ckpt_regs, GP_REGS_SIZE);
err |= __put_user(msr, &tm_sc->gp_regs[PT_MSR]);
err |= __put_user(msr, &sc->gp_regs[PT_MSR]);
err |= __put_user(signr, &sc->signal);
err |= __put_user(handler, &sc->handler);
if (set != NULL)
err |= __put_user(set->sig[0], &sc->oldmask);
return err;
}
#endif
/*
* Restore the sigcontext from the signal frame.
*/
#define unsafe_restore_sigcontext(tsk, set, sig, sc, label) do { \
if (__unsafe_restore_sigcontext(tsk, set, sig, sc)) \
goto label; \
} while (0)
static long notrace __unsafe_restore_sigcontext(struct task_struct *tsk, sigset_t *set,
int sig, struct sigcontext __user *sc)
{
#ifdef CONFIG_ALTIVEC
elf_vrreg_t __user *v_regs;
#endif
unsigned long save_r13 = 0;
unsigned long msr;
struct pt_regs *regs = tsk->thread.regs;
#ifdef CONFIG_VSX
int i;
#endif
BUG_ON(tsk != current);
/* If this is not a signal return, we preserve the TLS in r13 */
if (!sig)
save_r13 = regs->gpr[13];
/* copy the GPRs */
unsafe_copy_from_user(regs->gpr, sc->gp_regs, sizeof(regs->gpr), efault_out);
unsafe_get_user(regs->nip, &sc->gp_regs[PT_NIP], efault_out);
/* get MSR separately, transfer the LE bit if doing signal return */
unsafe_get_user(msr, &sc->gp_regs[PT_MSR], efault_out);
if (sig)
regs_set_return_msr(regs, (regs->msr & ~MSR_LE) | (msr & MSR_LE));
unsafe_get_user(regs->orig_gpr3, &sc->gp_regs[PT_ORIG_R3], efault_out);
unsafe_get_user(regs->ctr, &sc->gp_regs[PT_CTR], efault_out);
unsafe_get_user(regs->link, &sc->gp_regs[PT_LNK], efault_out);
unsafe_get_user(regs->xer, &sc->gp_regs[PT_XER], efault_out);
unsafe_get_user(regs->ccr, &sc->gp_regs[PT_CCR], efault_out);
/* Don't allow userspace to set SOFTE */
set_trap_norestart(regs);
unsafe_get_user(regs->dar, &sc->gp_regs[PT_DAR], efault_out);
unsafe_get_user(regs->dsisr, &sc->gp_regs[PT_DSISR], efault_out);
unsafe_get_user(regs->result, &sc->gp_regs[PT_RESULT], efault_out);
if (!sig)
regs->gpr[13] = save_r13;
if (set != NULL)
unsafe_get_user(set->sig[0], &sc->oldmask, efault_out);
/*
* Force reload of FP/VEC/VSX so userspace sees any changes.
* Clear these bits from the user process' MSR before copying into the
* thread struct. If we are rescheduled or preempted and another task
* uses FP/VEC/VSX, and this process has the MSR bits set, then the
* context switch code will save the current CPU state into the
* thread_struct - possibly overwriting the data we are updating here.
*/
regs_set_return_msr(regs, regs->msr & ~(MSR_FP | MSR_FE0 | MSR_FE1 | MSR_VEC | MSR_VSX));
#ifdef CONFIG_ALTIVEC
unsafe_get_user(v_regs, &sc->v_regs, efault_out);
if (v_regs && !access_ok(v_regs, 34 * sizeof(vector128)))
return -EFAULT;
/* Copy 33 vec registers (vr0..31 and vscr) from the stack */
if (v_regs != NULL && (msr & MSR_VEC) != 0) {
unsafe_copy_from_user(&tsk->thread.vr_state, v_regs,
33 * sizeof(vector128), efault_out);
tsk->thread.used_vr = true;
} else if (tsk->thread.used_vr) {
memset(&tsk->thread.vr_state, 0, 33 * sizeof(vector128));
}
/* Always get VRSAVE back */
if (v_regs != NULL)
unsafe_get_user(tsk->thread.vrsave, (u32 __user *)&v_regs[33], efault_out);
else
tsk->thread.vrsave = 0;
if (cpu_has_feature(CPU_FTR_ALTIVEC))
mtspr(SPRN_VRSAVE, tsk->thread.vrsave);
#endif /* CONFIG_ALTIVEC */
/* restore floating point */
unsafe_copy_fpr_from_user(tsk, &sc->fp_regs, efault_out);
#ifdef CONFIG_VSX
/*
* Get additional VSX data. Update v_regs to point after the
* VMX data. Copy VSX low doubleword from userspace to local
* buffer for formatting, then into the taskstruct.
*/
v_regs += ELF_NVRREG;
if ((msr & MSR_VSX) != 0) {
unsafe_copy_vsx_from_user(tsk, v_regs, efault_out);
tsk->thread.used_vsr = true;
} else {
for (i = 0; i < 32 ; i++)
tsk->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = 0;
}
#endif
return 0;
efault_out:
return -EFAULT;
}
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
/*
* Restore the two sigcontexts from the frame of a transactional processes.
*/
static long restore_tm_sigcontexts(struct task_struct *tsk,
struct sigcontext __user *sc,
struct sigcontext __user *tm_sc)
{
#ifdef CONFIG_ALTIVEC
elf_vrreg_t __user *v_regs, *tm_v_regs;
#endif
unsigned long err = 0;
unsigned long msr;
struct pt_regs *regs = tsk->thread.regs;
#ifdef CONFIG_VSX
int i;
#endif
BUG_ON(tsk != current);
if (tm_suspend_disabled)
return -EINVAL;
/* copy the GPRs */
err |= __copy_from_user(regs->gpr, tm_sc->gp_regs, sizeof(regs->gpr));
err |= __copy_from_user(&tsk->thread.ckpt_regs, sc->gp_regs,
sizeof(regs->gpr));
/*
* TFHAR is restored from the checkpointed 'wound-back' ucontext's NIP.
* TEXASR was set by the signal delivery reclaim, as was TFIAR.
* Users doing anything abhorrent like thread-switching w/ signals for
* TM-Suspended code will have to back TEXASR/TFIAR up themselves.
* For the case of getting a signal and simply returning from it,
* we don't need to re-copy them here.
*/
err |= __get_user(regs->nip, &tm_sc->gp_regs[PT_NIP]);
err |= __get_user(tsk->thread.tm_tfhar, &sc->gp_regs[PT_NIP]);
/* get MSR separately, transfer the LE bit if doing signal return */
err |= __get_user(msr, &sc->gp_regs[PT_MSR]);
/* Don't allow reserved mode. */
if (MSR_TM_RESV(msr))
return -EINVAL;
/* pull in MSR LE from user context */
regs_set_return_msr(regs, (regs->msr & ~MSR_LE) | (msr & MSR_LE));
/* The following non-GPR non-FPR non-VR state is also checkpointed: */
err |= __get_user(regs->ctr, &tm_sc->gp_regs[PT_CTR]);
err |= __get_user(regs->link, &tm_sc->gp_regs[PT_LNK]);
err |= __get_user(regs->xer, &tm_sc->gp_regs[PT_XER]);
err |= __get_user(regs->ccr, &tm_sc->gp_regs[PT_CCR]);
err |= __get_user(tsk->thread.ckpt_regs.ctr,
&sc->gp_regs[PT_CTR]);
err |= __get_user(tsk->thread.ckpt_regs.link,
&sc->gp_regs[PT_LNK]);
err |= __get_user(tsk->thread.ckpt_regs.xer,
&sc->gp_regs[PT_XER]);
err |= __get_user(tsk->thread.ckpt_regs.ccr,
&sc->gp_regs[PT_CCR]);
/* Don't allow userspace to set SOFTE */
set_trap_norestart(regs);
/* These regs are not checkpointed; they can go in 'regs'. */
err |= __get_user(regs->dar, &sc->gp_regs[PT_DAR]);
err |= __get_user(regs->dsisr, &sc->gp_regs[PT_DSISR]);
err |= __get_user(regs->result, &sc->gp_regs[PT_RESULT]);
/*
* Force reload of FP/VEC.
* This has to be done before copying stuff into tsk->thread.fpr/vr
* for the reasons explained in the previous comment.
*/
regs_set_return_msr(regs, regs->msr & ~(MSR_FP | MSR_FE0 | MSR_FE1 | MSR_VEC | MSR_VSX));
#ifdef CONFIG_ALTIVEC
err |= __get_user(v_regs, &sc->v_regs);
err |= __get_user(tm_v_regs, &tm_sc->v_regs);
if (err)
return err;
if (v_regs && !access_ok(v_regs, 34 * sizeof(vector128)))
return -EFAULT;
if (tm_v_regs && !access_ok(tm_v_regs, 34 * sizeof(vector128)))
return -EFAULT;
/* Copy 33 vec registers (vr0..31 and vscr) from the stack */
if (v_regs != NULL && tm_v_regs != NULL && (msr & MSR_VEC) != 0) {
err |= __copy_from_user(&tsk->thread.ckvr_state, v_regs,
33 * sizeof(vector128));
err |= __copy_from_user(&tsk->thread.vr_state, tm_v_regs,
33 * sizeof(vector128));
current->thread.used_vr = true;
}
else if (tsk->thread.used_vr) {
memset(&tsk->thread.vr_state, 0, 33 * sizeof(vector128));
memset(&tsk->thread.ckvr_state, 0, 33 * sizeof(vector128));
}
/* Always get VRSAVE back */
if (v_regs != NULL && tm_v_regs != NULL) {
err |= __get_user(tsk->thread.ckvrsave,
(u32 __user *)&v_regs[33]);
err |= __get_user(tsk->thread.vrsave,
(u32 __user *)&tm_v_regs[33]);
}
else {
tsk->thread.vrsave = 0;
tsk->thread.ckvrsave = 0;
}
if (cpu_has_feature(CPU_FTR_ALTIVEC))
mtspr(SPRN_VRSAVE, tsk->thread.vrsave);
#endif /* CONFIG_ALTIVEC */
/* restore floating point */
err |= copy_fpr_from_user(tsk, &tm_sc->fp_regs);
err |= copy_ckfpr_from_user(tsk, &sc->fp_regs);
#ifdef CONFIG_VSX
/*
* Get additional VSX data. Update v_regs to point after the
* VMX data. Copy VSX low doubleword from userspace to local
* buffer for formatting, then into the taskstruct.
*/
if (v_regs && ((msr & MSR_VSX) != 0)) {
v_regs += ELF_NVRREG;
tm_v_regs += ELF_NVRREG;
err |= copy_vsx_from_user(tsk, tm_v_regs);
err |= copy_ckvsx_from_user(tsk, v_regs);
tsk->thread.used_vsr = true;
} else {
for (i = 0; i < 32 ; i++) {
tsk->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = 0;
tsk->thread.ckfp_state.fpr[i][TS_VSRLOWOFFSET] = 0;
}
}
#endif
tm_enable();
/* Make sure the transaction is marked as failed */
tsk->thread.tm_texasr |= TEXASR_FS;
/*
* Disabling preemption, since it is unsafe to be preempted
* with MSR[TS] set without recheckpointing.
*/
preempt_disable();
/* pull in MSR TS bits from user context */
regs_set_return_msr(regs, regs->msr | (msr & MSR_TS_MASK));
/*
* Ensure that TM is enabled in regs->msr before we leave the signal
* handler. It could be the case that (a) user disabled the TM bit
* through the manipulation of the MSR bits in uc_mcontext or (b) the
* TM bit was disabled because a sufficient number of context switches
* happened whilst in the signal handler and load_tm overflowed,
* disabling the TM bit. In either case we can end up with an illegal
* TM state leading to a TM Bad Thing when we return to userspace.
*
* CAUTION:
* After regs->MSR[TS] being updated, make sure that get_user(),
* put_user() or similar functions are *not* called. These
* functions can generate page faults which will cause the process
* to be de-scheduled with MSR[TS] set but without calling
* tm_recheckpoint(). This can cause a bug.
*/
regs_set_return_msr(regs, regs->msr | MSR_TM);
/* This loads the checkpointed FP/VEC state, if used */
tm_recheckpoint(&tsk->thread);
msr_check_and_set(msr & (MSR_FP | MSR_VEC));
if (msr & MSR_FP) {
load_fp_state(&tsk->thread.fp_state);
regs_set_return_msr(regs, regs->msr | (MSR_FP | tsk->thread.fpexc_mode));
}
if (msr & MSR_VEC) {
load_vr_state(&tsk->thread.vr_state);
regs_set_return_msr(regs, regs->msr | MSR_VEC);
}
preempt_enable();
return err;
}
#else /* !CONFIG_PPC_TRANSACTIONAL_MEM */
static long restore_tm_sigcontexts(struct task_struct *tsk, struct sigcontext __user *sc,
struct sigcontext __user *tm_sc)
{
return -EINVAL;
}
#endif
/*
* Setup the trampoline code on the stack
*/
static long setup_trampoline(unsigned int syscall, unsigned int __user *tramp)
{
int i;
long err = 0;
/* Call the handler and pop the dummy stackframe*/
err |= __put_user(PPC_RAW_BCTRL(), &tramp[0]);
err |= __put_user(PPC_RAW_ADDI(_R1, _R1, __SIGNAL_FRAMESIZE), &tramp[1]);
err |= __put_user(PPC_RAW_LI(_R0, syscall), &tramp[2]);
err |= __put_user(PPC_RAW_SC(), &tramp[3]);
/* Minimal traceback info */
for (i=TRAMP_TRACEBACK; i < TRAMP_SIZE ;i++)
err |= __put_user(0, &tramp[i]);
if (!err)
flush_icache_range((unsigned long) &tramp[0],
(unsigned long) &tramp[TRAMP_SIZE]);
return err;
}
/*
* Userspace code may pass a ucontext which doesn't include VSX added
* at the end. We need to check for this case.
*/
#define UCONTEXTSIZEWITHOUTVSX \
(sizeof(struct ucontext) - 32*sizeof(long))
/*
* Handle {get,set,swap}_context operations
*/
SYSCALL_DEFINE3(swapcontext, struct ucontext __user *, old_ctx,
struct ucontext __user *, new_ctx, long, ctx_size)
{
sigset_t set;
unsigned long new_msr = 0;
int ctx_has_vsx_region = 0;
if (new_ctx &&
get_user(new_msr, &new_ctx->uc_mcontext.gp_regs[PT_MSR]))
return -EFAULT;
/*
* Check that the context is not smaller than the original
* size (with VMX but without VSX)
*/
if (ctx_size < UCONTEXTSIZEWITHOUTVSX)
return -EINVAL;
/*
* If the new context state sets the MSR VSX bits but
* it doesn't provide VSX state.
*/
if ((ctx_size < sizeof(struct ucontext)) &&
(new_msr & MSR_VSX))
return -EINVAL;
/* Does the context have enough room to store VSX data? */
if (ctx_size >= sizeof(struct ucontext))
ctx_has_vsx_region = 1;
if (old_ctx != NULL) {
prepare_setup_sigcontext(current);
if (!user_write_access_begin(old_ctx, ctx_size))
return -EFAULT;
unsafe_setup_sigcontext(&old_ctx->uc_mcontext, current, 0, NULL,
0, ctx_has_vsx_region, efault_out);
unsafe_copy_to_user(&old_ctx->uc_sigmask, ¤t->blocked,
sizeof(sigset_t), efault_out);
user_write_access_end();
}
if (new_ctx == NULL)
return 0;
if (!access_ok(new_ctx, ctx_size) ||
fault_in_readable((char __user *)new_ctx, ctx_size))
return -EFAULT;
/*
* If we get a fault copying the context into the kernel's
* image of the user's registers, we can't just return -EFAULT
* because the user's registers will be corrupted. For instance
* the NIP value may have been updated but not some of the
* other registers. Given that we have done the access_ok
* and successfully read the first and last bytes of the region
* above, this should only happen in an out-of-memory situation
* or if another thread unmaps the region containing the context.
* We kill the task with a SIGSEGV in this situation.
*/
if (__get_user_sigset(&set, &new_ctx->uc_sigmask)) {
force_exit_sig(SIGSEGV);
return -EFAULT;
}
set_current_blocked(&set);
if (!user_read_access_begin(new_ctx, ctx_size))
return -EFAULT;
if (__unsafe_restore_sigcontext(current, NULL, 0, &new_ctx->uc_mcontext)) {
user_read_access_end();
force_exit_sig(SIGSEGV);
return -EFAULT;
}
user_read_access_end();
/* This returns like rt_sigreturn */
set_thread_flag(TIF_RESTOREALL);
return 0;
efault_out:
user_write_access_end();
return -EFAULT;
}
/*
* Do a signal return; undo the signal stack.
*/
SYSCALL_DEFINE0(rt_sigreturn)
{
struct pt_regs *regs = current_pt_regs();
struct ucontext __user *uc = (struct ucontext __user *)regs->gpr[1];
sigset_t set;
unsigned long msr;
/* Always make any pending restarted system calls return -EINTR */
current->restart_block.fn = do_no_restart_syscall;
if (!access_ok(uc, sizeof(*uc)))
goto badframe;
if (__get_user_sigset(&set, &uc->uc_sigmask))
goto badframe;
set_current_blocked(&set);
if (IS_ENABLED(CONFIG_PPC_TRANSACTIONAL_MEM)) {
/*
* If there is a transactional state then throw it away.
* The purpose of a sigreturn is to destroy all traces of the
* signal frame, this includes any transactional state created
* within in. We only check for suspended as we can never be
* active in the kernel, we are active, there is nothing better to
* do than go ahead and Bad Thing later.
* The cause is not important as there will never be a
* recheckpoint so it's not user visible.
*/
if (MSR_TM_SUSPENDED(mfmsr()))
tm_reclaim_current(0);
/*
* Disable MSR[TS] bit also, so, if there is an exception in the
* code below (as a page fault in copy_ckvsx_to_user()), it does
* not recheckpoint this task if there was a context switch inside
* the exception.
*
* A major page fault can indirectly call schedule(). A reschedule
* process in the middle of an exception can have a side effect
* (Changing the CPU MSR[TS] state), since schedule() is called
* with the CPU MSR[TS] disable and returns with MSR[TS]=Suspended
* (switch_to() calls tm_recheckpoint() for the 'new' process). In
* this case, the process continues to be the same in the CPU, but
* the CPU state just changed.
*
* This can cause a TM Bad Thing, since the MSR in the stack will
* have the MSR[TS]=0, and this is what will be used to RFID.
*
* Clearing MSR[TS] state here will avoid a recheckpoint if there
* is any process reschedule in kernel space. The MSR[TS] state
* does not need to be saved also, since it will be replaced with
* the MSR[TS] that came from user context later, at
* restore_tm_sigcontexts.
*/
regs_set_return_msr(regs, regs->msr & ~MSR_TS_MASK);
if (__get_user(msr, &uc->uc_mcontext.gp_regs[PT_MSR]))
goto badframe;
}
if (IS_ENABLED(CONFIG_PPC_TRANSACTIONAL_MEM) && MSR_TM_ACTIVE(msr)) {
/* We recheckpoint on return. */
struct ucontext __user *uc_transact;
/* Trying to start TM on non TM system */
if (!cpu_has_feature(CPU_FTR_TM))
goto badframe;
if (__get_user(uc_transact, &uc->uc_link))
goto badframe;
if (restore_tm_sigcontexts(current, &uc->uc_mcontext,
&uc_transact->uc_mcontext))
goto badframe;
} else {
/*
* Fall through, for non-TM restore
*
* Unset MSR[TS] on the thread regs since MSR from user
* context does not have MSR active, and recheckpoint was
* not called since restore_tm_sigcontexts() was not called
* also.
*
* If not unsetting it, the code can RFID to userspace with
* MSR[TS] set, but without CPU in the proper state,
* causing a TM bad thing.
*/
regs_set_return_msr(current->thread.regs,
current->thread.regs->msr & ~MSR_TS_MASK);
if (!user_read_access_begin(&uc->uc_mcontext, sizeof(uc->uc_mcontext)))
goto badframe;
unsafe_restore_sigcontext(current, NULL, 1, &uc->uc_mcontext,
badframe_block);
user_read_access_end();
}
if (restore_altstack(&uc->uc_stack))
goto badframe;
set_thread_flag(TIF_RESTOREALL);
return 0;
badframe_block:
user_read_access_end();
badframe:
signal_fault(current, regs, "rt_sigreturn", uc);
force_sig(SIGSEGV);
return 0;
}
int handle_rt_signal64(struct ksignal *ksig, sigset_t *set,
struct task_struct *tsk)
{
struct rt_sigframe __user *frame;
unsigned long newsp = 0;
long err = 0;
struct pt_regs *regs = tsk->thread.regs;
/* Save the thread's msr before get_tm_stackpointer() changes it */
unsigned long msr = regs->msr;
frame = get_sigframe(ksig, tsk, sizeof(*frame), 0);
/*
* This only applies when calling unsafe_setup_sigcontext() and must be
* called before opening the uaccess window.
*/
if (!MSR_TM_ACTIVE(msr))
prepare_setup_sigcontext(tsk);
if (!user_write_access_begin(frame, sizeof(*frame)))
goto badframe;
unsafe_put_user(&frame->info, &frame->pinfo, badframe_block);
unsafe_put_user(&frame->uc, &frame->puc, badframe_block);
/* Create the ucontext. */
unsafe_put_user(0, &frame->uc.uc_flags, badframe_block);
unsafe_save_altstack(&frame->uc.uc_stack, regs->gpr[1], badframe_block);
if (MSR_TM_ACTIVE(msr)) {
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
/* The ucontext_t passed to userland points to the second
* ucontext_t (for transactional state) with its uc_link ptr.
*/
unsafe_put_user(&frame->uc_transact, &frame->uc.uc_link, badframe_block);
user_write_access_end();
err |= setup_tm_sigcontexts(&frame->uc.uc_mcontext,
&frame->uc_transact.uc_mcontext,
tsk, ksig->sig, NULL,
(unsigned long)ksig->ka.sa.sa_handler,
msr);
if (!user_write_access_begin(&frame->uc.uc_sigmask,
sizeof(frame->uc.uc_sigmask)))
goto badframe;
#endif
} else {
unsafe_put_user(0, &frame->uc.uc_link, badframe_block);
unsafe_setup_sigcontext(&frame->uc.uc_mcontext, tsk, ksig->sig,
NULL, (unsigned long)ksig->ka.sa.sa_handler,
1, badframe_block);
}
unsafe_copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set), badframe_block);
user_write_access_end();
/* Save the siginfo outside of the unsafe block. */
if (copy_siginfo_to_user(&frame->info, &ksig->info))
goto badframe;
/* Make sure signal handler doesn't get spurious FP exceptions */
tsk->thread.fp_state.fpscr = 0;
/* Set up to return from userspace. */
if (tsk->mm->context.vdso) {
regs_set_return_ip(regs, VDSO64_SYMBOL(tsk->mm->context.vdso, sigtramp_rt64));
} else {
err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
if (err)
goto badframe;
regs_set_return_ip(regs, (unsigned long) &frame->tramp[0]);
}
/* Allocate a dummy caller frame for the signal handler. */
newsp = ((unsigned long)frame) - __SIGNAL_FRAMESIZE;
err |= put_user(regs->gpr[1], (unsigned long __user *)newsp);
/* Set up "regs" so we "return" to the signal handler. */
if (is_elf2_task()) {
regs->ctr = (unsigned long) ksig->ka.sa.sa_handler;
regs->gpr[12] = regs->ctr;
} else {
/* Handler is *really* a pointer to the function descriptor for
* the signal routine. The first entry in the function
* descriptor is the entry address of signal and the second
* entry is the TOC value we need to use.
*/
struct func_desc __user *ptr =
(struct func_desc __user *)ksig->ka.sa.sa_handler;
err |= get_user(regs->ctr, &ptr->addr);
err |= get_user(regs->gpr[2], &ptr->toc);
}
/* enter the signal handler in native-endian mode */
regs_set_return_msr(regs, (regs->msr & ~MSR_LE) | (MSR_KERNEL & MSR_LE));
regs->gpr[1] = newsp;
regs->gpr[3] = ksig->sig;
regs->result = 0;
if (ksig->ka.sa.sa_flags & SA_SIGINFO) {
regs->gpr[4] = (unsigned long)&frame->info;
regs->gpr[5] = (unsigned long)&frame->uc;
regs->gpr[6] = (unsigned long) frame;
} else {
regs->gpr[4] = (unsigned long)&frame->uc.uc_mcontext;
}
if (err)
goto badframe;
return 0;
badframe_block:
user_write_access_end();
badframe:
signal_fault(current, regs, "handle_rt_signal64", frame);
return 1;
}
| linux-master | arch/powerpc/kernel/signal_64.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Procedures for interfacing to Open Firmware.
*
* Paul Mackerras August 1996.
* Copyright (C) 1996-2005 Paul Mackerras.
*
* Adapted for 64bit PowerPC by Dave Engebretsen and Peter Bergner.
* {engebret|bergner}@us.ibm.com
*/
#undef DEBUG_PROM
/* we cannot use FORTIFY as it brings in new symbols */
#define __NO_FORTIFY
#include <linux/stdarg.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/init.h>
#include <linux/threads.h>
#include <linux/spinlock.h>
#include <linux/types.h>
#include <linux/pci.h>
#include <linux/proc_fs.h>
#include <linux/delay.h>
#include <linux/initrd.h>
#include <linux/bitops.h>
#include <linux/pgtable.h>
#include <linux/printk.h>
#include <linux/of.h>
#include <linux/of_fdt.h>
#include <asm/prom.h>
#include <asm/rtas.h>
#include <asm/page.h>
#include <asm/processor.h>
#include <asm/interrupt.h>
#include <asm/irq.h>
#include <asm/io.h>
#include <asm/smp.h>
#include <asm/mmu.h>
#include <asm/iommu.h>
#include <asm/btext.h>
#include <asm/sections.h>
#include <asm/setup.h>
#include <asm/asm-prototypes.h>
#include <asm/ultravisor-api.h>
#include <linux/linux_logo.h>
/* All of prom_init bss lives here */
#define __prombss __section(".bss.prominit")
/*
* Eventually bump that one up
*/
#define DEVTREE_CHUNK_SIZE 0x100000
/*
* This is the size of the local memory reserve map that gets copied
* into the boot params passed to the kernel. That size is totally
* flexible as the kernel just reads the list until it encounters an
* entry with size 0, so it can be changed without breaking binary
* compatibility
*/
#define MEM_RESERVE_MAP_SIZE 8
/*
* prom_init() is called very early on, before the kernel text
* and data have been mapped to KERNELBASE. At this point the code
* is running at whatever address it has been loaded at.
* On ppc32 we compile with -mrelocatable, which means that references
* to extern and static variables get relocated automatically.
* ppc64 objects are always relocatable, we just need to relocate the
* TOC.
*
* Because OF may have mapped I/O devices into the area starting at
* KERNELBASE, particularly on CHRP machines, we can't safely call
* OF once the kernel has been mapped to KERNELBASE. Therefore all
* OF calls must be done within prom_init().
*
* ADDR is used in calls to call_prom. The 4th and following
* arguments to call_prom should be 32-bit values.
* On ppc64, 64 bit values are truncated to 32 bits (and
* fortunately don't get interpreted as two arguments).
*/
#define ADDR(x) (u32)(unsigned long)(x)
#ifdef CONFIG_PPC64
#define OF_WORKAROUNDS 0
#else
#define OF_WORKAROUNDS of_workarounds
static int of_workarounds __prombss;
#endif
#define OF_WA_CLAIM 1 /* do phys/virt claim separately, then map */
#define OF_WA_LONGTRAIL 2 /* work around longtrail bugs */
#ifdef DEBUG_PROM
#define prom_debug(x...) prom_printf(x)
#else
#define prom_debug(x...) do { } while (0)
#endif
typedef u32 prom_arg_t;
struct prom_args {
__be32 service;
__be32 nargs;
__be32 nret;
__be32 args[10];
};
struct prom_t {
ihandle root;
phandle chosen;
int cpu;
ihandle stdout;
ihandle mmumap;
ihandle memory;
};
struct mem_map_entry {
__be64 base;
__be64 size;
};
typedef __be32 cell_t;
extern void __start(unsigned long r3, unsigned long r4, unsigned long r5,
unsigned long r6, unsigned long r7, unsigned long r8,
unsigned long r9);
#ifdef CONFIG_PPC64
extern int enter_prom(struct prom_args *args, unsigned long entry);
#else
static inline int enter_prom(struct prom_args *args, unsigned long entry)
{
return ((int (*)(struct prom_args *))entry)(args);
}
#endif
extern void copy_and_flush(unsigned long dest, unsigned long src,
unsigned long size, unsigned long offset);
/* prom structure */
static struct prom_t __prombss prom;
static unsigned long __prombss prom_entry;
static char __prombss of_stdout_device[256];
static char __prombss prom_scratch[256];
static unsigned long __prombss dt_header_start;
static unsigned long __prombss dt_struct_start, dt_struct_end;
static unsigned long __prombss dt_string_start, dt_string_end;
static unsigned long __prombss prom_initrd_start, prom_initrd_end;
#ifdef CONFIG_PPC64
static int __prombss prom_iommu_force_on;
static int __prombss prom_iommu_off;
static unsigned long __prombss prom_tce_alloc_start;
static unsigned long __prombss prom_tce_alloc_end;
#endif
#ifdef CONFIG_PPC_PSERIES
static bool __prombss prom_radix_disable;
static bool __prombss prom_radix_gtse_disable;
static bool __prombss prom_xive_disable;
#endif
#ifdef CONFIG_PPC_SVM
static bool __prombss prom_svm_enable;
#endif
struct platform_support {
bool hash_mmu;
bool radix_mmu;
bool radix_gtse;
bool xive;
};
/* Platforms codes are now obsolete in the kernel. Now only used within this
* file and ultimately gone too. Feel free to change them if you need, they
* are not shared with anything outside of this file anymore
*/
#define PLATFORM_PSERIES 0x0100
#define PLATFORM_PSERIES_LPAR 0x0101
#define PLATFORM_LPAR 0x0001
#define PLATFORM_POWERMAC 0x0400
#define PLATFORM_GENERIC 0x0500
static int __prombss of_platform;
static char __prombss prom_cmd_line[COMMAND_LINE_SIZE];
static unsigned long __prombss prom_memory_limit;
static unsigned long __prombss alloc_top;
static unsigned long __prombss alloc_top_high;
static unsigned long __prombss alloc_bottom;
static unsigned long __prombss rmo_top;
static unsigned long __prombss ram_top;
static struct mem_map_entry __prombss mem_reserve_map[MEM_RESERVE_MAP_SIZE];
static int __prombss mem_reserve_cnt;
static cell_t __prombss regbuf[1024];
static bool __prombss rtas_has_query_cpu_stopped;
/*
* Error results ... some OF calls will return "-1" on error, some
* will return 0, some will return either. To simplify, here are
* macros to use with any ihandle or phandle return value to check if
* it is valid
*/
#define PROM_ERROR (-1u)
#define PHANDLE_VALID(p) ((p) != 0 && (p) != PROM_ERROR)
#define IHANDLE_VALID(i) ((i) != 0 && (i) != PROM_ERROR)
/* Copied from lib/string.c and lib/kstrtox.c */
static int __init prom_strcmp(const char *cs, const char *ct)
{
unsigned char c1, c2;
while (1) {
c1 = *cs++;
c2 = *ct++;
if (c1 != c2)
return c1 < c2 ? -1 : 1;
if (!c1)
break;
}
return 0;
}
static ssize_t __init prom_strscpy_pad(char *dest, const char *src, size_t n)
{
ssize_t rc;
size_t i;
if (n == 0 || n > INT_MAX)
return -E2BIG;
// Copy up to n bytes
for (i = 0; i < n && src[i] != '\0'; i++)
dest[i] = src[i];
rc = i;
// If we copied all n then we have run out of space for the nul
if (rc == n) {
// Rewind by one character to ensure nul termination
i--;
rc = -E2BIG;
}
for (; i < n; i++)
dest[i] = '\0';
return rc;
}
static int __init prom_strncmp(const char *cs, const char *ct, size_t count)
{
unsigned char c1, c2;
while (count) {
c1 = *cs++;
c2 = *ct++;
if (c1 != c2)
return c1 < c2 ? -1 : 1;
if (!c1)
break;
count--;
}
return 0;
}
static size_t __init prom_strlen(const char *s)
{
const char *sc;
for (sc = s; *sc != '\0'; ++sc)
/* nothing */;
return sc - s;
}
static int __init prom_memcmp(const void *cs, const void *ct, size_t count)
{
const unsigned char *su1, *su2;
int res = 0;
for (su1 = cs, su2 = ct; 0 < count; ++su1, ++su2, count--)
if ((res = *su1 - *su2) != 0)
break;
return res;
}
static char __init *prom_strstr(const char *s1, const char *s2)
{
size_t l1, l2;
l2 = prom_strlen(s2);
if (!l2)
return (char *)s1;
l1 = prom_strlen(s1);
while (l1 >= l2) {
l1--;
if (!prom_memcmp(s1, s2, l2))
return (char *)s1;
s1++;
}
return NULL;
}
static size_t __init prom_strlcat(char *dest, const char *src, size_t count)
{
size_t dsize = prom_strlen(dest);
size_t len = prom_strlen(src);
size_t res = dsize + len;
/* This would be a bug */
if (dsize >= count)
return count;
dest += dsize;
count -= dsize;
if (len >= count)
len = count-1;
memcpy(dest, src, len);
dest[len] = 0;
return res;
}
#ifdef CONFIG_PPC_PSERIES
static int __init prom_strtobool(const char *s, bool *res)
{
if (!s)
return -EINVAL;
switch (s[0]) {
case 'y':
case 'Y':
case '1':
*res = true;
return 0;
case 'n':
case 'N':
case '0':
*res = false;
return 0;
case 'o':
case 'O':
switch (s[1]) {
case 'n':
case 'N':
*res = true;
return 0;
case 'f':
case 'F':
*res = false;
return 0;
default:
break;
}
break;
default:
break;
}
return -EINVAL;
}
#endif
/* This is the one and *ONLY* place where we actually call open
* firmware.
*/
static int __init call_prom(const char *service, int nargs, int nret, ...)
{
int i;
struct prom_args args;
va_list list;
args.service = cpu_to_be32(ADDR(service));
args.nargs = cpu_to_be32(nargs);
args.nret = cpu_to_be32(nret);
va_start(list, nret);
for (i = 0; i < nargs; i++)
args.args[i] = cpu_to_be32(va_arg(list, prom_arg_t));
va_end(list);
for (i = 0; i < nret; i++)
args.args[nargs+i] = 0;
if (enter_prom(&args, prom_entry) < 0)
return PROM_ERROR;
return (nret > 0) ? be32_to_cpu(args.args[nargs]) : 0;
}
static int __init call_prom_ret(const char *service, int nargs, int nret,
prom_arg_t *rets, ...)
{
int i;
struct prom_args args;
va_list list;
args.service = cpu_to_be32(ADDR(service));
args.nargs = cpu_to_be32(nargs);
args.nret = cpu_to_be32(nret);
va_start(list, rets);
for (i = 0; i < nargs; i++)
args.args[i] = cpu_to_be32(va_arg(list, prom_arg_t));
va_end(list);
for (i = 0; i < nret; i++)
args.args[nargs+i] = 0;
if (enter_prom(&args, prom_entry) < 0)
return PROM_ERROR;
if (rets != NULL)
for (i = 1; i < nret; ++i)
rets[i-1] = be32_to_cpu(args.args[nargs+i]);
return (nret > 0) ? be32_to_cpu(args.args[nargs]) : 0;
}
static void __init prom_print(const char *msg)
{
const char *p, *q;
if (prom.stdout == 0)
return;
for (p = msg; *p != 0; p = q) {
for (q = p; *q != 0 && *q != '\n'; ++q)
;
if (q > p)
call_prom("write", 3, 1, prom.stdout, p, q - p);
if (*q == 0)
break;
++q;
call_prom("write", 3, 1, prom.stdout, ADDR("\r\n"), 2);
}
}
/*
* Both prom_print_hex & prom_print_dec takes an unsigned long as input so that
* we do not need __udivdi3 or __umoddi3 on 32bits.
*/
static void __init prom_print_hex(unsigned long val)
{
int i, nibbles = sizeof(val)*2;
char buf[sizeof(val)*2+1];
for (i = nibbles-1; i >= 0; i--) {
buf[i] = (val & 0xf) + '0';
if (buf[i] > '9')
buf[i] += ('a'-'0'-10);
val >>= 4;
}
buf[nibbles] = '\0';
call_prom("write", 3, 1, prom.stdout, buf, nibbles);
}
/* max number of decimal digits in an unsigned long */
#define UL_DIGITS 21
static void __init prom_print_dec(unsigned long val)
{
int i, size;
char buf[UL_DIGITS+1];
for (i = UL_DIGITS-1; i >= 0; i--) {
buf[i] = (val % 10) + '0';
val = val/10;
if (val == 0)
break;
}
/* shift stuff down */
size = UL_DIGITS - i;
call_prom("write", 3, 1, prom.stdout, buf+i, size);
}
__printf(1, 2)
static void __init prom_printf(const char *format, ...)
{
const char *p, *q, *s;
va_list args;
unsigned long v;
long vs;
int n = 0;
va_start(args, format);
for (p = format; *p != 0; p = q) {
for (q = p; *q != 0 && *q != '\n' && *q != '%'; ++q)
;
if (q > p)
call_prom("write", 3, 1, prom.stdout, p, q - p);
if (*q == 0)
break;
if (*q == '\n') {
++q;
call_prom("write", 3, 1, prom.stdout,
ADDR("\r\n"), 2);
continue;
}
++q;
if (*q == 0)
break;
while (*q == 'l') {
++q;
++n;
}
switch (*q) {
case 's':
++q;
s = va_arg(args, const char *);
prom_print(s);
break;
case 'x':
++q;
switch (n) {
case 0:
v = va_arg(args, unsigned int);
break;
case 1:
v = va_arg(args, unsigned long);
break;
case 2:
default:
v = va_arg(args, unsigned long long);
break;
}
prom_print_hex(v);
break;
case 'u':
++q;
switch (n) {
case 0:
v = va_arg(args, unsigned int);
break;
case 1:
v = va_arg(args, unsigned long);
break;
case 2:
default:
v = va_arg(args, unsigned long long);
break;
}
prom_print_dec(v);
break;
case 'd':
++q;
switch (n) {
case 0:
vs = va_arg(args, int);
break;
case 1:
vs = va_arg(args, long);
break;
case 2:
default:
vs = va_arg(args, long long);
break;
}
if (vs < 0) {
prom_print("-");
vs = -vs;
}
prom_print_dec(vs);
break;
}
}
va_end(args);
}
static unsigned int __init prom_claim(unsigned long virt, unsigned long size,
unsigned long align)
{
if (align == 0 && (OF_WORKAROUNDS & OF_WA_CLAIM)) {
/*
* Old OF requires we claim physical and virtual separately
* and then map explicitly (assuming virtual mode)
*/
int ret;
prom_arg_t result;
ret = call_prom_ret("call-method", 5, 2, &result,
ADDR("claim"), prom.memory,
align, size, virt);
if (ret != 0 || result == -1)
return -1;
ret = call_prom_ret("call-method", 5, 2, &result,
ADDR("claim"), prom.mmumap,
align, size, virt);
if (ret != 0) {
call_prom("call-method", 4, 1, ADDR("release"),
prom.memory, size, virt);
return -1;
}
/* the 0x12 is M (coherence) + PP == read/write */
call_prom("call-method", 6, 1,
ADDR("map"), prom.mmumap, 0x12, size, virt, virt);
return virt;
}
return call_prom("claim", 3, 1, (prom_arg_t)virt, (prom_arg_t)size,
(prom_arg_t)align);
}
static void __init __attribute__((noreturn)) prom_panic(const char *reason)
{
prom_print(reason);
/* Do not call exit because it clears the screen on pmac
* it also causes some sort of double-fault on early pmacs */
if (of_platform == PLATFORM_POWERMAC)
asm("trap\n");
/* ToDo: should put up an SRC here on pSeries */
call_prom("exit", 0, 0);
for (;;) /* should never get here */
;
}
static int __init prom_next_node(phandle *nodep)
{
phandle node;
if ((node = *nodep) != 0
&& (*nodep = call_prom("child", 1, 1, node)) != 0)
return 1;
if ((*nodep = call_prom("peer", 1, 1, node)) != 0)
return 1;
for (;;) {
if ((node = call_prom("parent", 1, 1, node)) == 0)
return 0;
if ((*nodep = call_prom("peer", 1, 1, node)) != 0)
return 1;
}
}
static inline int __init prom_getprop(phandle node, const char *pname,
void *value, size_t valuelen)
{
return call_prom("getprop", 4, 1, node, ADDR(pname),
(u32)(unsigned long) value, (u32) valuelen);
}
static inline int __init prom_getproplen(phandle node, const char *pname)
{
return call_prom("getproplen", 2, 1, node, ADDR(pname));
}
static void __init add_string(char **str, const char *q)
{
char *p = *str;
while (*q)
*p++ = *q++;
*p++ = ' ';
*str = p;
}
static char *__init tohex(unsigned int x)
{
static const char digits[] __initconst = "0123456789abcdef";
static char result[9] __prombss;
int i;
result[8] = 0;
i = 8;
do {
--i;
result[i] = digits[x & 0xf];
x >>= 4;
} while (x != 0 && i > 0);
return &result[i];
}
static int __init prom_setprop(phandle node, const char *nodename,
const char *pname, void *value, size_t valuelen)
{
char cmd[256], *p;
if (!(OF_WORKAROUNDS & OF_WA_LONGTRAIL))
return call_prom("setprop", 4, 1, node, ADDR(pname),
(u32)(unsigned long) value, (u32) valuelen);
/* gah... setprop doesn't work on longtrail, have to use interpret */
p = cmd;
add_string(&p, "dev");
add_string(&p, nodename);
add_string(&p, tohex((u32)(unsigned long) value));
add_string(&p, tohex(valuelen));
add_string(&p, tohex(ADDR(pname)));
add_string(&p, tohex(prom_strlen(pname)));
add_string(&p, "property");
*p = 0;
return call_prom("interpret", 1, 1, (u32)(unsigned long) cmd);
}
/* We can't use the standard versions because of relocation headaches. */
#define prom_isxdigit(c) \
(('0' <= (c) && (c) <= '9') || ('a' <= (c) && (c) <= 'f') || ('A' <= (c) && (c) <= 'F'))
#define prom_isdigit(c) ('0' <= (c) && (c) <= '9')
#define prom_islower(c) ('a' <= (c) && (c) <= 'z')
#define prom_toupper(c) (prom_islower(c) ? ((c) - 'a' + 'A') : (c))
static unsigned long __init prom_strtoul(const char *cp, const char **endp)
{
unsigned long result = 0, base = 10, value;
if (*cp == '0') {
base = 8;
cp++;
if (prom_toupper(*cp) == 'X') {
cp++;
base = 16;
}
}
while (prom_isxdigit(*cp) &&
(value = prom_isdigit(*cp) ? *cp - '0' : prom_toupper(*cp) - 'A' + 10) < base) {
result = result * base + value;
cp++;
}
if (endp)
*endp = cp;
return result;
}
static unsigned long __init prom_memparse(const char *ptr, const char **retptr)
{
unsigned long ret = prom_strtoul(ptr, retptr);
int shift = 0;
/*
* We can't use a switch here because GCC *may* generate a
* jump table which won't work, because we're not running at
* the address we're linked at.
*/
if ('G' == **retptr || 'g' == **retptr)
shift = 30;
if ('M' == **retptr || 'm' == **retptr)
shift = 20;
if ('K' == **retptr || 'k' == **retptr)
shift = 10;
if (shift) {
ret <<= shift;
(*retptr)++;
}
return ret;
}
/*
* Early parsing of the command line passed to the kernel, used for
* "mem=x" and the options that affect the iommu
*/
static void __init early_cmdline_parse(void)
{
const char *opt;
char *p;
int l = 0;
prom_cmd_line[0] = 0;
p = prom_cmd_line;
if (!IS_ENABLED(CONFIG_CMDLINE_FORCE) && (long)prom.chosen > 0)
l = prom_getprop(prom.chosen, "bootargs", p, COMMAND_LINE_SIZE-1);
if (IS_ENABLED(CONFIG_CMDLINE_EXTEND) || l <= 0 || p[0] == '\0')
prom_strlcat(prom_cmd_line, " " CONFIG_CMDLINE,
sizeof(prom_cmd_line));
prom_printf("command line: %s\n", prom_cmd_line);
#ifdef CONFIG_PPC64
opt = prom_strstr(prom_cmd_line, "iommu=");
if (opt) {
prom_printf("iommu opt is: %s\n", opt);
opt += 6;
while (*opt && *opt == ' ')
opt++;
if (!prom_strncmp(opt, "off", 3))
prom_iommu_off = 1;
else if (!prom_strncmp(opt, "force", 5))
prom_iommu_force_on = 1;
}
#endif
opt = prom_strstr(prom_cmd_line, "mem=");
if (opt) {
opt += 4;
prom_memory_limit = prom_memparse(opt, (const char **)&opt);
#ifdef CONFIG_PPC64
/* Align to 16 MB == size of ppc64 large page */
prom_memory_limit = ALIGN(prom_memory_limit, 0x1000000);
#endif
}
#ifdef CONFIG_PPC_PSERIES
prom_radix_disable = !IS_ENABLED(CONFIG_PPC_RADIX_MMU_DEFAULT);
opt = prom_strstr(prom_cmd_line, "disable_radix");
if (opt) {
opt += 13;
if (*opt && *opt == '=') {
bool val;
if (prom_strtobool(++opt, &val))
prom_radix_disable = false;
else
prom_radix_disable = val;
} else
prom_radix_disable = true;
}
if (prom_radix_disable)
prom_debug("Radix disabled from cmdline\n");
opt = prom_strstr(prom_cmd_line, "radix_hcall_invalidate=on");
if (opt) {
prom_radix_gtse_disable = true;
prom_debug("Radix GTSE disabled from cmdline\n");
}
opt = prom_strstr(prom_cmd_line, "xive=off");
if (opt) {
prom_xive_disable = true;
prom_debug("XIVE disabled from cmdline\n");
}
#endif /* CONFIG_PPC_PSERIES */
#ifdef CONFIG_PPC_SVM
opt = prom_strstr(prom_cmd_line, "svm=");
if (opt) {
bool val;
opt += sizeof("svm=") - 1;
if (!prom_strtobool(opt, &val))
prom_svm_enable = val;
}
#endif /* CONFIG_PPC_SVM */
}
#ifdef CONFIG_PPC_PSERIES
/*
* The architecture vector has an array of PVR mask/value pairs,
* followed by # option vectors - 1, followed by the option vectors.
*
* See prom.h for the definition of the bits specified in the
* architecture vector.
*/
/* Firmware expects the value to be n - 1, where n is the # of vectors */
#define NUM_VECTORS(n) ((n) - 1)
/*
* Firmware expects 1 + n - 2, where n is the length of the option vector in
* bytes. The 1 accounts for the length byte itself, the - 2 .. ?
*/
#define VECTOR_LENGTH(n) (1 + (n) - 2)
struct option_vector1 {
u8 byte1;
u8 arch_versions;
u8 arch_versions3;
} __packed;
struct option_vector2 {
u8 byte1;
__be16 reserved;
__be32 real_base;
__be32 real_size;
__be32 virt_base;
__be32 virt_size;
__be32 load_base;
__be32 min_rma;
__be32 min_load;
u8 min_rma_percent;
u8 max_pft_size;
} __packed;
struct option_vector3 {
u8 byte1;
u8 byte2;
} __packed;
struct option_vector4 {
u8 byte1;
u8 min_vp_cap;
} __packed;
struct option_vector5 {
u8 byte1;
u8 byte2;
u8 byte3;
u8 cmo;
u8 associativity;
u8 bin_opts;
u8 micro_checkpoint;
u8 reserved0;
__be32 max_cpus;
__be16 papr_level;
__be16 reserved1;
u8 platform_facilities;
u8 reserved2;
__be16 reserved3;
u8 subprocessors;
u8 byte22;
u8 intarch;
u8 mmu;
u8 hash_ext;
u8 radix_ext;
} __packed;
struct option_vector6 {
u8 reserved;
u8 secondary_pteg;
u8 os_name;
} __packed;
struct option_vector7 {
u8 os_id[256];
} __packed;
struct ibm_arch_vec {
struct { u32 mask, val; } pvrs[14];
u8 num_vectors;
u8 vec1_len;
struct option_vector1 vec1;
u8 vec2_len;
struct option_vector2 vec2;
u8 vec3_len;
struct option_vector3 vec3;
u8 vec4_len;
struct option_vector4 vec4;
u8 vec5_len;
struct option_vector5 vec5;
u8 vec6_len;
struct option_vector6 vec6;
u8 vec7_len;
struct option_vector7 vec7;
} __packed;
static const struct ibm_arch_vec ibm_architecture_vec_template __initconst = {
.pvrs = {
{
.mask = cpu_to_be32(0xfffe0000), /* POWER5/POWER5+ */
.val = cpu_to_be32(0x003a0000),
},
{
.mask = cpu_to_be32(0xffff0000), /* POWER6 */
.val = cpu_to_be32(0x003e0000),
},
{
.mask = cpu_to_be32(0xffff0000), /* POWER7 */
.val = cpu_to_be32(0x003f0000),
},
{
.mask = cpu_to_be32(0xffff0000), /* POWER8E */
.val = cpu_to_be32(0x004b0000),
},
{
.mask = cpu_to_be32(0xffff0000), /* POWER8NVL */
.val = cpu_to_be32(0x004c0000),
},
{
.mask = cpu_to_be32(0xffff0000), /* POWER8 */
.val = cpu_to_be32(0x004d0000),
},
{
.mask = cpu_to_be32(0xffff0000), /* POWER9 */
.val = cpu_to_be32(0x004e0000),
},
{
.mask = cpu_to_be32(0xffff0000), /* POWER10 */
.val = cpu_to_be32(0x00800000),
},
{
.mask = cpu_to_be32(0xffffffff), /* all 3.1-compliant */
.val = cpu_to_be32(0x0f000006),
},
{
.mask = cpu_to_be32(0xffffffff), /* all 3.00-compliant */
.val = cpu_to_be32(0x0f000005),
},
{
.mask = cpu_to_be32(0xffffffff), /* all 2.07-compliant */
.val = cpu_to_be32(0x0f000004),
},
{
.mask = cpu_to_be32(0xffffffff), /* all 2.06-compliant */
.val = cpu_to_be32(0x0f000003),
},
{
.mask = cpu_to_be32(0xffffffff), /* all 2.05-compliant */
.val = cpu_to_be32(0x0f000002),
},
{
.mask = cpu_to_be32(0xfffffffe), /* all 2.04-compliant and earlier */
.val = cpu_to_be32(0x0f000001),
},
},
.num_vectors = NUM_VECTORS(6),
.vec1_len = VECTOR_LENGTH(sizeof(struct option_vector1)),
.vec1 = {
.byte1 = 0,
.arch_versions = OV1_PPC_2_00 | OV1_PPC_2_01 | OV1_PPC_2_02 | OV1_PPC_2_03 |
OV1_PPC_2_04 | OV1_PPC_2_05 | OV1_PPC_2_06 | OV1_PPC_2_07,
.arch_versions3 = OV1_PPC_3_00 | OV1_PPC_3_1,
},
.vec2_len = VECTOR_LENGTH(sizeof(struct option_vector2)),
/* option vector 2: Open Firmware options supported */
.vec2 = {
.byte1 = OV2_REAL_MODE,
.reserved = 0,
.real_base = cpu_to_be32(0xffffffff),
.real_size = cpu_to_be32(0xffffffff),
.virt_base = cpu_to_be32(0xffffffff),
.virt_size = cpu_to_be32(0xffffffff),
.load_base = cpu_to_be32(0xffffffff),
.min_rma = cpu_to_be32(512), /* 512MB min RMA */
.min_load = cpu_to_be32(0xffffffff), /* full client load */
.min_rma_percent = 0, /* min RMA percentage of total RAM */
.max_pft_size = 48, /* max log_2(hash table size) */
},
.vec3_len = VECTOR_LENGTH(sizeof(struct option_vector3)),
/* option vector 3: processor options supported */
.vec3 = {
.byte1 = 0, /* don't ignore, don't halt */
.byte2 = OV3_FP | OV3_VMX | OV3_DFP,
},
.vec4_len = VECTOR_LENGTH(sizeof(struct option_vector4)),
/* option vector 4: IBM PAPR implementation */
.vec4 = {
.byte1 = 0, /* don't halt */
.min_vp_cap = OV4_MIN_ENT_CAP, /* minimum VP entitled capacity */
},
.vec5_len = VECTOR_LENGTH(sizeof(struct option_vector5)),
/* option vector 5: PAPR/OF options */
.vec5 = {
.byte1 = 0, /* don't ignore, don't halt */
.byte2 = OV5_FEAT(OV5_LPAR) | OV5_FEAT(OV5_SPLPAR) | OV5_FEAT(OV5_LARGE_PAGES) |
OV5_FEAT(OV5_DRCONF_MEMORY) | OV5_FEAT(OV5_DONATE_DEDICATE_CPU) |
#ifdef CONFIG_PCI_MSI
/* PCIe/MSI support. Without MSI full PCIe is not supported */
OV5_FEAT(OV5_MSI),
#else
0,
#endif
.byte3 = 0,
.cmo =
#ifdef CONFIG_PPC_SMLPAR
OV5_FEAT(OV5_CMO) | OV5_FEAT(OV5_XCMO),
#else
0,
#endif
.associativity = OV5_FEAT(OV5_FORM1_AFFINITY) | OV5_FEAT(OV5_PRRN) |
OV5_FEAT(OV5_FORM2_AFFINITY),
.bin_opts = OV5_FEAT(OV5_RESIZE_HPT) | OV5_FEAT(OV5_HP_EVT),
.micro_checkpoint = 0,
.reserved0 = 0,
.max_cpus = cpu_to_be32(NR_CPUS), /* number of cores supported */
.papr_level = 0,
.reserved1 = 0,
.platform_facilities = OV5_FEAT(OV5_PFO_HW_RNG) | OV5_FEAT(OV5_PFO_HW_ENCR) | OV5_FEAT(OV5_PFO_HW_842),
.reserved2 = 0,
.reserved3 = 0,
.subprocessors = 1,
.byte22 = OV5_FEAT(OV5_DRMEM_V2) | OV5_FEAT(OV5_DRC_INFO),
.intarch = 0,
.mmu = 0,
.hash_ext = 0,
.radix_ext = 0,
},
/* option vector 6: IBM PAPR hints */
.vec6_len = VECTOR_LENGTH(sizeof(struct option_vector6)),
.vec6 = {
.reserved = 0,
.secondary_pteg = 0,
.os_name = OV6_LINUX,
},
/* option vector 7: OS Identification */
.vec7_len = VECTOR_LENGTH(sizeof(struct option_vector7)),
};
static struct ibm_arch_vec __prombss ibm_architecture_vec ____cacheline_aligned;
/* Old method - ELF header with PT_NOTE sections only works on BE */
#ifdef __BIG_ENDIAN__
static const struct fake_elf {
Elf32_Ehdr elfhdr;
Elf32_Phdr phdr[2];
struct chrpnote {
u32 namesz;
u32 descsz;
u32 type;
char name[8]; /* "PowerPC" */
struct chrpdesc {
u32 real_mode;
u32 real_base;
u32 real_size;
u32 virt_base;
u32 virt_size;
u32 load_base;
} chrpdesc;
} chrpnote;
struct rpanote {
u32 namesz;
u32 descsz;
u32 type;
char name[24]; /* "IBM,RPA-Client-Config" */
struct rpadesc {
u32 lpar_affinity;
u32 min_rmo_size;
u32 min_rmo_percent;
u32 max_pft_size;
u32 splpar;
u32 min_load;
u32 new_mem_def;
u32 ignore_me;
} rpadesc;
} rpanote;
} fake_elf __initconst = {
.elfhdr = {
.e_ident = { 0x7f, 'E', 'L', 'F',
ELFCLASS32, ELFDATA2MSB, EV_CURRENT },
.e_type = ET_EXEC, /* yeah right */
.e_machine = EM_PPC,
.e_version = EV_CURRENT,
.e_phoff = offsetof(struct fake_elf, phdr),
.e_phentsize = sizeof(Elf32_Phdr),
.e_phnum = 2
},
.phdr = {
[0] = {
.p_type = PT_NOTE,
.p_offset = offsetof(struct fake_elf, chrpnote),
.p_filesz = sizeof(struct chrpnote)
}, [1] = {
.p_type = PT_NOTE,
.p_offset = offsetof(struct fake_elf, rpanote),
.p_filesz = sizeof(struct rpanote)
}
},
.chrpnote = {
.namesz = sizeof("PowerPC"),
.descsz = sizeof(struct chrpdesc),
.type = 0x1275,
.name = "PowerPC",
.chrpdesc = {
.real_mode = ~0U, /* ~0 means "don't care" */
.real_base = ~0U,
.real_size = ~0U,
.virt_base = ~0U,
.virt_size = ~0U,
.load_base = ~0U
},
},
.rpanote = {
.namesz = sizeof("IBM,RPA-Client-Config"),
.descsz = sizeof(struct rpadesc),
.type = 0x12759999,
.name = "IBM,RPA-Client-Config",
.rpadesc = {
.lpar_affinity = 0,
.min_rmo_size = 64, /* in megabytes */
.min_rmo_percent = 0,
.max_pft_size = 48, /* 2^48 bytes max PFT size */
.splpar = 1,
.min_load = ~0U,
.new_mem_def = 0
}
}
};
#endif /* __BIG_ENDIAN__ */
static int __init prom_count_smt_threads(void)
{
phandle node;
char type[64];
unsigned int plen;
/* Pick up th first CPU node we can find */
for (node = 0; prom_next_node(&node); ) {
type[0] = 0;
prom_getprop(node, "device_type", type, sizeof(type));
if (prom_strcmp(type, "cpu"))
continue;
/*
* There is an entry for each smt thread, each entry being
* 4 bytes long. All cpus should have the same number of
* smt threads, so return after finding the first.
*/
plen = prom_getproplen(node, "ibm,ppc-interrupt-server#s");
if (plen == PROM_ERROR)
break;
plen >>= 2;
prom_debug("Found %lu smt threads per core\n", (unsigned long)plen);
/* Sanity check */
if (plen < 1 || plen > 64) {
prom_printf("Threads per core %lu out of bounds, assuming 1\n",
(unsigned long)plen);
return 1;
}
return plen;
}
prom_debug("No threads found, assuming 1 per core\n");
return 1;
}
static void __init prom_parse_mmu_model(u8 val,
struct platform_support *support)
{
switch (val) {
case OV5_FEAT(OV5_MMU_DYNAMIC):
case OV5_FEAT(OV5_MMU_EITHER): /* Either Available */
prom_debug("MMU - either supported\n");
support->radix_mmu = !prom_radix_disable;
support->hash_mmu = true;
break;
case OV5_FEAT(OV5_MMU_RADIX): /* Only Radix */
prom_debug("MMU - radix only\n");
if (prom_radix_disable) {
/*
* If we __have__ to do radix, we're better off ignoring
* the command line rather than not booting.
*/
prom_printf("WARNING: Ignoring cmdline option disable_radix\n");
}
support->radix_mmu = true;
break;
case OV5_FEAT(OV5_MMU_HASH):
prom_debug("MMU - hash only\n");
support->hash_mmu = true;
break;
default:
prom_debug("Unknown mmu support option: 0x%x\n", val);
break;
}
}
static void __init prom_parse_xive_model(u8 val,
struct platform_support *support)
{
switch (val) {
case OV5_FEAT(OV5_XIVE_EITHER): /* Either Available */
prom_debug("XIVE - either mode supported\n");
support->xive = !prom_xive_disable;
break;
case OV5_FEAT(OV5_XIVE_EXPLOIT): /* Only Exploitation mode */
prom_debug("XIVE - exploitation mode supported\n");
if (prom_xive_disable) {
/*
* If we __have__ to do XIVE, we're better off ignoring
* the command line rather than not booting.
*/
prom_printf("WARNING: Ignoring cmdline option xive=off\n");
}
support->xive = true;
break;
case OV5_FEAT(OV5_XIVE_LEGACY): /* Only Legacy mode */
prom_debug("XIVE - legacy mode supported\n");
break;
default:
prom_debug("Unknown xive support option: 0x%x\n", val);
break;
}
}
static void __init prom_parse_platform_support(u8 index, u8 val,
struct platform_support *support)
{
switch (index) {
case OV5_INDX(OV5_MMU_SUPPORT): /* MMU Model */
prom_parse_mmu_model(val & OV5_FEAT(OV5_MMU_SUPPORT), support);
break;
case OV5_INDX(OV5_RADIX_GTSE): /* Radix Extensions */
if (val & OV5_FEAT(OV5_RADIX_GTSE))
support->radix_gtse = !prom_radix_gtse_disable;
break;
case OV5_INDX(OV5_XIVE_SUPPORT): /* Interrupt mode */
prom_parse_xive_model(val & OV5_FEAT(OV5_XIVE_SUPPORT),
support);
break;
}
}
static void __init prom_check_platform_support(void)
{
struct platform_support supported = {
.hash_mmu = false,
.radix_mmu = false,
.radix_gtse = false,
.xive = false
};
int prop_len = prom_getproplen(prom.chosen,
"ibm,arch-vec-5-platform-support");
/*
* First copy the architecture vec template
*
* use memcpy() instead of *vec = *vec_template so that GCC replaces it
* by __memcpy() when KASAN is active
*/
memcpy(&ibm_architecture_vec, &ibm_architecture_vec_template,
sizeof(ibm_architecture_vec));
prom_strscpy_pad(ibm_architecture_vec.vec7.os_id, linux_banner, 256);
if (prop_len > 1) {
int i;
u8 vec[8];
prom_debug("Found ibm,arch-vec-5-platform-support, len: %d\n",
prop_len);
if (prop_len > sizeof(vec))
prom_printf("WARNING: ibm,arch-vec-5-platform-support longer than expected (len: %d)\n",
prop_len);
prom_getprop(prom.chosen, "ibm,arch-vec-5-platform-support", &vec, sizeof(vec));
for (i = 0; i < prop_len; i += 2) {
prom_debug("%d: index = 0x%x val = 0x%x\n", i / 2, vec[i], vec[i + 1]);
prom_parse_platform_support(vec[i], vec[i + 1], &supported);
}
}
if (supported.radix_mmu && IS_ENABLED(CONFIG_PPC_RADIX_MMU)) {
/* Radix preferred - Check if GTSE is also supported */
prom_debug("Asking for radix\n");
ibm_architecture_vec.vec5.mmu = OV5_FEAT(OV5_MMU_RADIX);
if (supported.radix_gtse)
ibm_architecture_vec.vec5.radix_ext =
OV5_FEAT(OV5_RADIX_GTSE);
else
prom_debug("Radix GTSE isn't supported\n");
} else if (supported.hash_mmu) {
/* Default to hash mmu (if we can) */
prom_debug("Asking for hash\n");
ibm_architecture_vec.vec5.mmu = OV5_FEAT(OV5_MMU_HASH);
} else {
/* We're probably on a legacy hypervisor */
prom_debug("Assuming legacy hash support\n");
}
if (supported.xive) {
prom_debug("Asking for XIVE\n");
ibm_architecture_vec.vec5.intarch = OV5_FEAT(OV5_XIVE_EXPLOIT);
}
}
static void __init prom_send_capabilities(void)
{
ihandle root;
prom_arg_t ret;
u32 cores;
/* Check ibm,arch-vec-5-platform-support and fixup vec5 if required */
prom_check_platform_support();
root = call_prom("open", 1, 1, ADDR("/"));
if (root != 0) {
/* We need to tell the FW about the number of cores we support.
*
* To do that, we count the number of threads on the first core
* (we assume this is the same for all cores) and use it to
* divide NR_CPUS.
*/
cores = DIV_ROUND_UP(NR_CPUS, prom_count_smt_threads());
prom_printf("Max number of cores passed to firmware: %u (NR_CPUS = %d)\n",
cores, NR_CPUS);
ibm_architecture_vec.vec5.max_cpus = cpu_to_be32(cores);
/* try calling the ibm,client-architecture-support method */
prom_printf("Calling ibm,client-architecture-support...");
if (call_prom_ret("call-method", 3, 2, &ret,
ADDR("ibm,client-architecture-support"),
root,
ADDR(&ibm_architecture_vec)) == 0) {
/* the call exists... */
if (ret)
prom_printf("\nWARNING: ibm,client-architecture"
"-support call FAILED!\n");
call_prom("close", 1, 0, root);
prom_printf(" done\n");
return;
}
call_prom("close", 1, 0, root);
prom_printf(" not implemented\n");
}
#ifdef __BIG_ENDIAN__
{
ihandle elfloader;
/* no ibm,client-architecture-support call, try the old way */
elfloader = call_prom("open", 1, 1,
ADDR("/packages/elf-loader"));
if (elfloader == 0) {
prom_printf("couldn't open /packages/elf-loader\n");
return;
}
call_prom("call-method", 3, 1, ADDR("process-elf-header"),
elfloader, ADDR(&fake_elf));
call_prom("close", 1, 0, elfloader);
}
#endif /* __BIG_ENDIAN__ */
}
#endif /* CONFIG_PPC_PSERIES */
/*
* Memory allocation strategy... our layout is normally:
*
* at 14Mb or more we have vmlinux, then a gap and initrd. In some
* rare cases, initrd might end up being before the kernel though.
* We assume this won't override the final kernel at 0, we have no
* provision to handle that in this version, but it should hopefully
* never happen.
*
* alloc_top is set to the top of RMO, eventually shrink down if the
* TCEs overlap
*
* alloc_bottom is set to the top of kernel/initrd
*
* from there, allocations are done this way : rtas is allocated
* topmost, and the device-tree is allocated from the bottom. We try
* to grow the device-tree allocation as we progress. If we can't,
* then we fail, we don't currently have a facility to restart
* elsewhere, but that shouldn't be necessary.
*
* Note that calls to reserve_mem have to be done explicitly, memory
* allocated with either alloc_up or alloc_down isn't automatically
* reserved.
*/
/*
* Allocates memory in the RMO upward from the kernel/initrd
*
* When align is 0, this is a special case, it means to allocate in place
* at the current location of alloc_bottom or fail (that is basically
* extending the previous allocation). Used for the device-tree flattening
*/
static unsigned long __init alloc_up(unsigned long size, unsigned long align)
{
unsigned long base = alloc_bottom;
unsigned long addr = 0;
if (align)
base = ALIGN(base, align);
prom_debug("%s(%lx, %lx)\n", __func__, size, align);
if (ram_top == 0)
prom_panic("alloc_up() called with mem not initialized\n");
if (align)
base = ALIGN(alloc_bottom, align);
else
base = alloc_bottom;
for(; (base + size) <= alloc_top;
base = ALIGN(base + 0x100000, align)) {
prom_debug(" trying: 0x%lx\n\r", base);
addr = (unsigned long)prom_claim(base, size, 0);
if (addr != PROM_ERROR && addr != 0)
break;
addr = 0;
if (align == 0)
break;
}
if (addr == 0)
return 0;
alloc_bottom = addr + size;
prom_debug(" -> %lx\n", addr);
prom_debug(" alloc_bottom : %lx\n", alloc_bottom);
prom_debug(" alloc_top : %lx\n", alloc_top);
prom_debug(" alloc_top_hi : %lx\n", alloc_top_high);
prom_debug(" rmo_top : %lx\n", rmo_top);
prom_debug(" ram_top : %lx\n", ram_top);
return addr;
}
/*
* Allocates memory downward, either from top of RMO, or if highmem
* is set, from the top of RAM. Note that this one doesn't handle
* failures. It does claim memory if highmem is not set.
*/
static unsigned long __init alloc_down(unsigned long size, unsigned long align,
int highmem)
{
unsigned long base, addr = 0;
prom_debug("%s(%lx, %lx, %s)\n", __func__, size, align,
highmem ? "(high)" : "(low)");
if (ram_top == 0)
prom_panic("alloc_down() called with mem not initialized\n");
if (highmem) {
/* Carve out storage for the TCE table. */
addr = ALIGN_DOWN(alloc_top_high - size, align);
if (addr <= alloc_bottom)
return 0;
/* Will we bump into the RMO ? If yes, check out that we
* didn't overlap existing allocations there, if we did,
* we are dead, we must be the first in town !
*/
if (addr < rmo_top) {
/* Good, we are first */
if (alloc_top == rmo_top)
alloc_top = rmo_top = addr;
else
return 0;
}
alloc_top_high = addr;
goto bail;
}
base = ALIGN_DOWN(alloc_top - size, align);
for (; base > alloc_bottom;
base = ALIGN_DOWN(base - 0x100000, align)) {
prom_debug(" trying: 0x%lx\n\r", base);
addr = (unsigned long)prom_claim(base, size, 0);
if (addr != PROM_ERROR && addr != 0)
break;
addr = 0;
}
if (addr == 0)
return 0;
alloc_top = addr;
bail:
prom_debug(" -> %lx\n", addr);
prom_debug(" alloc_bottom : %lx\n", alloc_bottom);
prom_debug(" alloc_top : %lx\n", alloc_top);
prom_debug(" alloc_top_hi : %lx\n", alloc_top_high);
prom_debug(" rmo_top : %lx\n", rmo_top);
prom_debug(" ram_top : %lx\n", ram_top);
return addr;
}
/*
* Parse a "reg" cell
*/
static unsigned long __init prom_next_cell(int s, cell_t **cellp)
{
cell_t *p = *cellp;
unsigned long r = 0;
/* Ignore more than 2 cells */
while (s > sizeof(unsigned long) / 4) {
p++;
s--;
}
r = be32_to_cpu(*p++);
#ifdef CONFIG_PPC64
if (s > 1) {
r <<= 32;
r |= be32_to_cpu(*(p++));
}
#endif
*cellp = p;
return r;
}
/*
* Very dumb function for adding to the memory reserve list, but
* we don't need anything smarter at this point
*
* XXX Eventually check for collisions. They should NEVER happen.
* If problems seem to show up, it would be a good start to track
* them down.
*/
static void __init reserve_mem(u64 base, u64 size)
{
u64 top = base + size;
unsigned long cnt = mem_reserve_cnt;
if (size == 0)
return;
/* We need to always keep one empty entry so that we
* have our terminator with "size" set to 0 since we are
* dumb and just copy this entire array to the boot params
*/
base = ALIGN_DOWN(base, PAGE_SIZE);
top = ALIGN(top, PAGE_SIZE);
size = top - base;
if (cnt >= (MEM_RESERVE_MAP_SIZE - 1))
prom_panic("Memory reserve map exhausted !\n");
mem_reserve_map[cnt].base = cpu_to_be64(base);
mem_reserve_map[cnt].size = cpu_to_be64(size);
mem_reserve_cnt = cnt + 1;
}
/*
* Initialize memory allocation mechanism, parse "memory" nodes and
* obtain that way the top of memory and RMO to setup out local allocator
*/
static void __init prom_init_mem(void)
{
phandle node;
char type[64];
unsigned int plen;
cell_t *p, *endp;
__be32 val;
u32 rac, rsc;
/*
* We iterate the memory nodes to find
* 1) top of RMO (first node)
* 2) top of memory
*/
val = cpu_to_be32(2);
prom_getprop(prom.root, "#address-cells", &val, sizeof(val));
rac = be32_to_cpu(val);
val = cpu_to_be32(1);
prom_getprop(prom.root, "#size-cells", &val, sizeof(rsc));
rsc = be32_to_cpu(val);
prom_debug("root_addr_cells: %x\n", rac);
prom_debug("root_size_cells: %x\n", rsc);
prom_debug("scanning memory:\n");
for (node = 0; prom_next_node(&node); ) {
type[0] = 0;
prom_getprop(node, "device_type", type, sizeof(type));
if (type[0] == 0) {
/*
* CHRP Longtrail machines have no device_type
* on the memory node, so check the name instead...
*/
prom_getprop(node, "name", type, sizeof(type));
}
if (prom_strcmp(type, "memory"))
continue;
plen = prom_getprop(node, "reg", regbuf, sizeof(regbuf));
if (plen > sizeof(regbuf)) {
prom_printf("memory node too large for buffer !\n");
plen = sizeof(regbuf);
}
p = regbuf;
endp = p + (plen / sizeof(cell_t));
#ifdef DEBUG_PROM
memset(prom_scratch, 0, sizeof(prom_scratch));
call_prom("package-to-path", 3, 1, node, prom_scratch,
sizeof(prom_scratch) - 1);
prom_debug(" node %s :\n", prom_scratch);
#endif /* DEBUG_PROM */
while ((endp - p) >= (rac + rsc)) {
unsigned long base, size;
base = prom_next_cell(rac, &p);
size = prom_next_cell(rsc, &p);
if (size == 0)
continue;
prom_debug(" %lx %lx\n", base, size);
if (base == 0 && (of_platform & PLATFORM_LPAR))
rmo_top = size;
if ((base + size) > ram_top)
ram_top = base + size;
}
}
alloc_bottom = PAGE_ALIGN((unsigned long)&_end + 0x4000);
/*
* If prom_memory_limit is set we reduce the upper limits *except* for
* alloc_top_high. This must be the real top of RAM so we can put
* TCE's up there.
*/
alloc_top_high = ram_top;
if (prom_memory_limit) {
if (prom_memory_limit <= alloc_bottom) {
prom_printf("Ignoring mem=%lx <= alloc_bottom.\n",
prom_memory_limit);
prom_memory_limit = 0;
} else if (prom_memory_limit >= ram_top) {
prom_printf("Ignoring mem=%lx >= ram_top.\n",
prom_memory_limit);
prom_memory_limit = 0;
} else {
ram_top = prom_memory_limit;
rmo_top = min(rmo_top, prom_memory_limit);
}
}
/*
* Setup our top alloc point, that is top of RMO or top of
* segment 0 when running non-LPAR.
* Some RS64 machines have buggy firmware where claims up at
* 1GB fail. Cap at 768MB as a workaround.
* Since 768MB is plenty of room, and we need to cap to something
* reasonable on 32-bit, cap at 768MB on all machines.
*/
if (!rmo_top)
rmo_top = ram_top;
rmo_top = min(0x30000000ul, rmo_top);
alloc_top = rmo_top;
alloc_top_high = ram_top;
/*
* Check if we have an initrd after the kernel but still inside
* the RMO. If we do move our bottom point to after it.
*/
if (prom_initrd_start &&
prom_initrd_start < rmo_top &&
prom_initrd_end > alloc_bottom)
alloc_bottom = PAGE_ALIGN(prom_initrd_end);
prom_printf("memory layout at init:\n");
prom_printf(" memory_limit : %lx (16 MB aligned)\n",
prom_memory_limit);
prom_printf(" alloc_bottom : %lx\n", alloc_bottom);
prom_printf(" alloc_top : %lx\n", alloc_top);
prom_printf(" alloc_top_hi : %lx\n", alloc_top_high);
prom_printf(" rmo_top : %lx\n", rmo_top);
prom_printf(" ram_top : %lx\n", ram_top);
}
static void __init prom_close_stdin(void)
{
__be32 val;
ihandle stdin;
if (prom_getprop(prom.chosen, "stdin", &val, sizeof(val)) > 0) {
stdin = be32_to_cpu(val);
call_prom("close", 1, 0, stdin);
}
}
#ifdef CONFIG_PPC_SVM
static int __init prom_rtas_hcall(uint64_t args)
{
register uint64_t arg1 asm("r3") = H_RTAS;
register uint64_t arg2 asm("r4") = args;
asm volatile("sc 1\n" : "=r" (arg1) :
"r" (arg1),
"r" (arg2) :);
srr_regs_clobbered();
return arg1;
}
static struct rtas_args __prombss os_term_args;
static void __init prom_rtas_os_term(char *str)
{
phandle rtas_node;
__be32 val;
u32 token;
prom_debug("%s: start...\n", __func__);
rtas_node = call_prom("finddevice", 1, 1, ADDR("/rtas"));
prom_debug("rtas_node: %x\n", rtas_node);
if (!PHANDLE_VALID(rtas_node))
return;
val = 0;
prom_getprop(rtas_node, "ibm,os-term", &val, sizeof(val));
token = be32_to_cpu(val);
prom_debug("ibm,os-term: %x\n", token);
if (token == 0)
prom_panic("Could not get token for ibm,os-term\n");
os_term_args.token = cpu_to_be32(token);
os_term_args.nargs = cpu_to_be32(1);
os_term_args.nret = cpu_to_be32(1);
os_term_args.args[0] = cpu_to_be32(__pa(str));
prom_rtas_hcall((uint64_t)&os_term_args);
}
#endif /* CONFIG_PPC_SVM */
/*
* Allocate room for and instantiate RTAS
*/
static void __init prom_instantiate_rtas(void)
{
phandle rtas_node;
ihandle rtas_inst;
u32 base, entry = 0;
__be32 val;
u32 size = 0;
prom_debug("prom_instantiate_rtas: start...\n");
rtas_node = call_prom("finddevice", 1, 1, ADDR("/rtas"));
prom_debug("rtas_node: %x\n", rtas_node);
if (!PHANDLE_VALID(rtas_node))
return;
val = 0;
prom_getprop(rtas_node, "rtas-size", &val, sizeof(size));
size = be32_to_cpu(val);
if (size == 0)
return;
base = alloc_down(size, PAGE_SIZE, 0);
if (base == 0)
prom_panic("Could not allocate memory for RTAS\n");
rtas_inst = call_prom("open", 1, 1, ADDR("/rtas"));
if (!IHANDLE_VALID(rtas_inst)) {
prom_printf("opening rtas package failed (%x)\n", rtas_inst);
return;
}
prom_printf("instantiating rtas at 0x%x...", base);
if (call_prom_ret("call-method", 3, 2, &entry,
ADDR("instantiate-rtas"),
rtas_inst, base) != 0
|| entry == 0) {
prom_printf(" failed\n");
return;
}
prom_printf(" done\n");
reserve_mem(base, size);
val = cpu_to_be32(base);
prom_setprop(rtas_node, "/rtas", "linux,rtas-base",
&val, sizeof(val));
val = cpu_to_be32(entry);
prom_setprop(rtas_node, "/rtas", "linux,rtas-entry",
&val, sizeof(val));
/* Check if it supports "query-cpu-stopped-state" */
if (prom_getprop(rtas_node, "query-cpu-stopped-state",
&val, sizeof(val)) != PROM_ERROR)
rtas_has_query_cpu_stopped = true;
prom_debug("rtas base = 0x%x\n", base);
prom_debug("rtas entry = 0x%x\n", entry);
prom_debug("rtas size = 0x%x\n", size);
prom_debug("prom_instantiate_rtas: end...\n");
}
#ifdef CONFIG_PPC64
/*
* Allocate room for and instantiate Stored Measurement Log (SML)
*/
static void __init prom_instantiate_sml(void)
{
phandle ibmvtpm_node;
ihandle ibmvtpm_inst;
u32 entry = 0, size = 0, succ = 0;
u64 base;
__be32 val;
prom_debug("prom_instantiate_sml: start...\n");
ibmvtpm_node = call_prom("finddevice", 1, 1, ADDR("/vdevice/vtpm"));
prom_debug("ibmvtpm_node: %x\n", ibmvtpm_node);
if (!PHANDLE_VALID(ibmvtpm_node))
return;
ibmvtpm_inst = call_prom("open", 1, 1, ADDR("/vdevice/vtpm"));
if (!IHANDLE_VALID(ibmvtpm_inst)) {
prom_printf("opening vtpm package failed (%x)\n", ibmvtpm_inst);
return;
}
if (prom_getprop(ibmvtpm_node, "ibm,sml-efi-reformat-supported",
&val, sizeof(val)) != PROM_ERROR) {
if (call_prom_ret("call-method", 2, 2, &succ,
ADDR("reformat-sml-to-efi-alignment"),
ibmvtpm_inst) != 0 || succ == 0) {
prom_printf("Reformat SML to EFI alignment failed\n");
return;
}
if (call_prom_ret("call-method", 2, 2, &size,
ADDR("sml-get-allocated-size"),
ibmvtpm_inst) != 0 || size == 0) {
prom_printf("SML get allocated size failed\n");
return;
}
} else {
if (call_prom_ret("call-method", 2, 2, &size,
ADDR("sml-get-handover-size"),
ibmvtpm_inst) != 0 || size == 0) {
prom_printf("SML get handover size failed\n");
return;
}
}
base = alloc_down(size, PAGE_SIZE, 0);
if (base == 0)
prom_panic("Could not allocate memory for sml\n");
prom_printf("instantiating sml at 0x%llx...", base);
memset((void *)base, 0, size);
if (call_prom_ret("call-method", 4, 2, &entry,
ADDR("sml-handover"),
ibmvtpm_inst, size, base) != 0 || entry == 0) {
prom_printf("SML handover failed\n");
return;
}
prom_printf(" done\n");
reserve_mem(base, size);
prom_setprop(ibmvtpm_node, "/vdevice/vtpm", "linux,sml-base",
&base, sizeof(base));
prom_setprop(ibmvtpm_node, "/vdevice/vtpm", "linux,sml-size",
&size, sizeof(size));
prom_debug("sml base = 0x%llx\n", base);
prom_debug("sml size = 0x%x\n", size);
prom_debug("prom_instantiate_sml: end...\n");
}
/*
* Allocate room for and initialize TCE tables
*/
#ifdef __BIG_ENDIAN__
static void __init prom_initialize_tce_table(void)
{
phandle node;
ihandle phb_node;
char compatible[64], type[64], model[64];
char *path = prom_scratch;
u64 base, align;
u32 minalign, minsize;
u64 tce_entry, *tce_entryp;
u64 local_alloc_top, local_alloc_bottom;
u64 i;
if (prom_iommu_off)
return;
prom_debug("starting prom_initialize_tce_table\n");
/* Cache current top of allocs so we reserve a single block */
local_alloc_top = alloc_top_high;
local_alloc_bottom = local_alloc_top;
/* Search all nodes looking for PHBs. */
for (node = 0; prom_next_node(&node); ) {
compatible[0] = 0;
type[0] = 0;
model[0] = 0;
prom_getprop(node, "compatible",
compatible, sizeof(compatible));
prom_getprop(node, "device_type", type, sizeof(type));
prom_getprop(node, "model", model, sizeof(model));
if ((type[0] == 0) || (prom_strstr(type, "pci") == NULL))
continue;
/* Keep the old logic intact to avoid regression. */
if (compatible[0] != 0) {
if ((prom_strstr(compatible, "python") == NULL) &&
(prom_strstr(compatible, "Speedwagon") == NULL) &&
(prom_strstr(compatible, "Winnipeg") == NULL))
continue;
} else if (model[0] != 0) {
if ((prom_strstr(model, "ython") == NULL) &&
(prom_strstr(model, "peedwagon") == NULL) &&
(prom_strstr(model, "innipeg") == NULL))
continue;
}
if (prom_getprop(node, "tce-table-minalign", &minalign,
sizeof(minalign)) == PROM_ERROR)
minalign = 0;
if (prom_getprop(node, "tce-table-minsize", &minsize,
sizeof(minsize)) == PROM_ERROR)
minsize = 4UL << 20;
/*
* Even though we read what OF wants, we just set the table
* size to 4 MB. This is enough to map 2GB of PCI DMA space.
* By doing this, we avoid the pitfalls of trying to DMA to
* MMIO space and the DMA alias hole.
*/
minsize = 4UL << 20;
/* Align to the greater of the align or size */
align = max(minalign, minsize);
base = alloc_down(minsize, align, 1);
if (base == 0)
prom_panic("ERROR, cannot find space for TCE table.\n");
if (base < local_alloc_bottom)
local_alloc_bottom = base;
/* It seems OF doesn't null-terminate the path :-( */
memset(path, 0, sizeof(prom_scratch));
/* Call OF to setup the TCE hardware */
if (call_prom("package-to-path", 3, 1, node,
path, sizeof(prom_scratch) - 1) == PROM_ERROR) {
prom_printf("package-to-path failed\n");
}
/* Save away the TCE table attributes for later use. */
prom_setprop(node, path, "linux,tce-base", &base, sizeof(base));
prom_setprop(node, path, "linux,tce-size", &minsize, sizeof(minsize));
prom_debug("TCE table: %s\n", path);
prom_debug("\tnode = 0x%x\n", node);
prom_debug("\tbase = 0x%llx\n", base);
prom_debug("\tsize = 0x%x\n", minsize);
/* Initialize the table to have a one-to-one mapping
* over the allocated size.
*/
tce_entryp = (u64 *)base;
for (i = 0; i < (minsize >> 3) ;tce_entryp++, i++) {
tce_entry = (i << PAGE_SHIFT);
tce_entry |= 0x3;
*tce_entryp = tce_entry;
}
prom_printf("opening PHB %s", path);
phb_node = call_prom("open", 1, 1, path);
if (phb_node == 0)
prom_printf("... failed\n");
else
prom_printf("... done\n");
call_prom("call-method", 6, 0, ADDR("set-64-bit-addressing"),
phb_node, -1, minsize,
(u32) base, (u32) (base >> 32));
call_prom("close", 1, 0, phb_node);
}
reserve_mem(local_alloc_bottom, local_alloc_top - local_alloc_bottom);
/* These are only really needed if there is a memory limit in
* effect, but we don't know so export them always. */
prom_tce_alloc_start = local_alloc_bottom;
prom_tce_alloc_end = local_alloc_top;
/* Flag the first invalid entry */
prom_debug("ending prom_initialize_tce_table\n");
}
#endif /* __BIG_ENDIAN__ */
#endif /* CONFIG_PPC64 */
/*
* With CHRP SMP we need to use the OF to start the other processors.
* We can't wait until smp_boot_cpus (the OF is trashed by then)
* so we have to put the processors into a holding pattern controlled
* by the kernel (not OF) before we destroy the OF.
*
* This uses a chunk of low memory, puts some holding pattern
* code there and sends the other processors off to there until
* smp_boot_cpus tells them to do something. The holding pattern
* checks that address until its cpu # is there, when it is that
* cpu jumps to __secondary_start(). smp_boot_cpus() takes care
* of setting those values.
*
* We also use physical address 0x4 here to tell when a cpu
* is in its holding pattern code.
*
* -- Cort
*/
/*
* We want to reference the copy of __secondary_hold_* in the
* 0 - 0x100 address range
*/
#define LOW_ADDR(x) (((unsigned long) &(x)) & 0xff)
static void __init prom_hold_cpus(void)
{
unsigned long i;
phandle node;
char type[64];
unsigned long *spinloop
= (void *) LOW_ADDR(__secondary_hold_spinloop);
unsigned long *acknowledge
= (void *) LOW_ADDR(__secondary_hold_acknowledge);
unsigned long secondary_hold = LOW_ADDR(__secondary_hold);
/*
* On pseries, if RTAS supports "query-cpu-stopped-state",
* we skip this stage, the CPUs will be started by the
* kernel using RTAS.
*/
if ((of_platform == PLATFORM_PSERIES ||
of_platform == PLATFORM_PSERIES_LPAR) &&
rtas_has_query_cpu_stopped) {
prom_printf("prom_hold_cpus: skipped\n");
return;
}
prom_debug("prom_hold_cpus: start...\n");
prom_debug(" 1) spinloop = 0x%lx\n", (unsigned long)spinloop);
prom_debug(" 1) *spinloop = 0x%lx\n", *spinloop);
prom_debug(" 1) acknowledge = 0x%lx\n",
(unsigned long)acknowledge);
prom_debug(" 1) *acknowledge = 0x%lx\n", *acknowledge);
prom_debug(" 1) secondary_hold = 0x%lx\n", secondary_hold);
/* Set the common spinloop variable, so all of the secondary cpus
* will block when they are awakened from their OF spinloop.
* This must occur for both SMP and non SMP kernels, since OF will
* be trashed when we move the kernel.
*/
*spinloop = 0;
/* look for cpus */
for (node = 0; prom_next_node(&node); ) {
unsigned int cpu_no;
__be32 reg;
type[0] = 0;
prom_getprop(node, "device_type", type, sizeof(type));
if (prom_strcmp(type, "cpu") != 0)
continue;
/* Skip non-configured cpus. */
if (prom_getprop(node, "status", type, sizeof(type)) > 0)
if (prom_strcmp(type, "okay") != 0)
continue;
reg = cpu_to_be32(-1); /* make sparse happy */
prom_getprop(node, "reg", ®, sizeof(reg));
cpu_no = be32_to_cpu(reg);
prom_debug("cpu hw idx = %u\n", cpu_no);
/* Init the acknowledge var which will be reset by
* the secondary cpu when it awakens from its OF
* spinloop.
*/
*acknowledge = (unsigned long)-1;
if (cpu_no != prom.cpu) {
/* Primary Thread of non-boot cpu or any thread */
prom_printf("starting cpu hw idx %u... ", cpu_no);
call_prom("start-cpu", 3, 0, node,
secondary_hold, cpu_no);
for (i = 0; (i < 100000000) &&
(*acknowledge == ((unsigned long)-1)); i++ )
mb();
if (*acknowledge == cpu_no)
prom_printf("done\n");
else
prom_printf("failed: %lx\n", *acknowledge);
}
#ifdef CONFIG_SMP
else
prom_printf("boot cpu hw idx %u\n", cpu_no);
#endif /* CONFIG_SMP */
}
prom_debug("prom_hold_cpus: end...\n");
}
static void __init prom_init_client_services(unsigned long pp)
{
/* Get a handle to the prom entry point before anything else */
prom_entry = pp;
/* get a handle for the stdout device */
prom.chosen = call_prom("finddevice", 1, 1, ADDR("/chosen"));
if (!PHANDLE_VALID(prom.chosen))
prom_panic("cannot find chosen"); /* msg won't be printed :( */
/* get device tree root */
prom.root = call_prom("finddevice", 1, 1, ADDR("/"));
if (!PHANDLE_VALID(prom.root))
prom_panic("cannot find device tree root"); /* msg won't be printed :( */
prom.mmumap = 0;
}
#ifdef CONFIG_PPC32
/*
* For really old powermacs, we need to map things we claim.
* For that, we need the ihandle of the mmu.
* Also, on the longtrail, we need to work around other bugs.
*/
static void __init prom_find_mmu(void)
{
phandle oprom;
char version[64];
oprom = call_prom("finddevice", 1, 1, ADDR("/openprom"));
if (!PHANDLE_VALID(oprom))
return;
if (prom_getprop(oprom, "model", version, sizeof(version)) <= 0)
return;
version[sizeof(version) - 1] = 0;
/* XXX might need to add other versions here */
if (prom_strcmp(version, "Open Firmware, 1.0.5") == 0)
of_workarounds = OF_WA_CLAIM;
else if (prom_strncmp(version, "FirmWorks,3.", 12) == 0) {
of_workarounds = OF_WA_CLAIM | OF_WA_LONGTRAIL;
call_prom("interpret", 1, 1, "dev /memory 0 to allow-reclaim");
} else
return;
prom.memory = call_prom("open", 1, 1, ADDR("/memory"));
prom_getprop(prom.chosen, "mmu", &prom.mmumap,
sizeof(prom.mmumap));
prom.mmumap = be32_to_cpu(prom.mmumap);
if (!IHANDLE_VALID(prom.memory) || !IHANDLE_VALID(prom.mmumap))
of_workarounds &= ~OF_WA_CLAIM; /* hmmm */
}
#else
#define prom_find_mmu()
#endif
static void __init prom_init_stdout(void)
{
char *path = of_stdout_device;
char type[16];
phandle stdout_node;
__be32 val;
if (prom_getprop(prom.chosen, "stdout", &val, sizeof(val)) <= 0)
prom_panic("cannot find stdout");
prom.stdout = be32_to_cpu(val);
/* Get the full OF pathname of the stdout device */
memset(path, 0, 256);
call_prom("instance-to-path", 3, 1, prom.stdout, path, 255);
prom_printf("OF stdout device is: %s\n", of_stdout_device);
prom_setprop(prom.chosen, "/chosen", "linux,stdout-path",
path, prom_strlen(path) + 1);
/* instance-to-package fails on PA-Semi */
stdout_node = call_prom("instance-to-package", 1, 1, prom.stdout);
if (stdout_node != PROM_ERROR) {
val = cpu_to_be32(stdout_node);
/* If it's a display, note it */
memset(type, 0, sizeof(type));
prom_getprop(stdout_node, "device_type", type, sizeof(type));
if (prom_strcmp(type, "display") == 0)
prom_setprop(stdout_node, path, "linux,boot-display", NULL, 0);
}
}
static int __init prom_find_machine_type(void)
{
static char compat[256] __prombss;
int len, i = 0;
#ifdef CONFIG_PPC64
phandle rtas;
int x;
#endif
/* Look for a PowerMac or a Cell */
len = prom_getprop(prom.root, "compatible",
compat, sizeof(compat)-1);
if (len > 0) {
compat[len] = 0;
while (i < len) {
char *p = &compat[i];
int sl = prom_strlen(p);
if (sl == 0)
break;
if (prom_strstr(p, "Power Macintosh") ||
prom_strstr(p, "MacRISC"))
return PLATFORM_POWERMAC;
#ifdef CONFIG_PPC64
/* We must make sure we don't detect the IBM Cell
* blades as pSeries due to some firmware issues,
* so we do it here.
*/
if (prom_strstr(p, "IBM,CBEA") ||
prom_strstr(p, "IBM,CPBW-1.0"))
return PLATFORM_GENERIC;
#endif /* CONFIG_PPC64 */
i += sl + 1;
}
}
#ifdef CONFIG_PPC64
/* Try to figure out if it's an IBM pSeries or any other
* PAPR compliant platform. We assume it is if :
* - /device_type is "chrp" (please, do NOT use that for future
* non-IBM designs !
* - it has /rtas
*/
len = prom_getprop(prom.root, "device_type",
compat, sizeof(compat)-1);
if (len <= 0)
return PLATFORM_GENERIC;
if (prom_strcmp(compat, "chrp"))
return PLATFORM_GENERIC;
/* Default to pSeries. We need to know if we are running LPAR */
rtas = call_prom("finddevice", 1, 1, ADDR("/rtas"));
if (!PHANDLE_VALID(rtas))
return PLATFORM_GENERIC;
x = prom_getproplen(rtas, "ibm,hypertas-functions");
if (x != PROM_ERROR) {
prom_debug("Hypertas detected, assuming LPAR !\n");
return PLATFORM_PSERIES_LPAR;
}
return PLATFORM_PSERIES;
#else
return PLATFORM_GENERIC;
#endif
}
static int __init prom_set_color(ihandle ih, int i, int r, int g, int b)
{
return call_prom("call-method", 6, 1, ADDR("color!"), ih, i, b, g, r);
}
/*
* If we have a display that we don't know how to drive,
* we will want to try to execute OF's open method for it
* later. However, OF will probably fall over if we do that
* we've taken over the MMU.
* So we check whether we will need to open the display,
* and if so, open it now.
*/
static void __init prom_check_displays(void)
{
char type[16], *path;
phandle node;
ihandle ih;
int i;
static const unsigned char default_colors[] __initconst = {
0x00, 0x00, 0x00,
0x00, 0x00, 0xaa,
0x00, 0xaa, 0x00,
0x00, 0xaa, 0xaa,
0xaa, 0x00, 0x00,
0xaa, 0x00, 0xaa,
0xaa, 0xaa, 0x00,
0xaa, 0xaa, 0xaa,
0x55, 0x55, 0x55,
0x55, 0x55, 0xff,
0x55, 0xff, 0x55,
0x55, 0xff, 0xff,
0xff, 0x55, 0x55,
0xff, 0x55, 0xff,
0xff, 0xff, 0x55,
0xff, 0xff, 0xff
};
const unsigned char *clut;
prom_debug("Looking for displays\n");
for (node = 0; prom_next_node(&node); ) {
memset(type, 0, sizeof(type));
prom_getprop(node, "device_type", type, sizeof(type));
if (prom_strcmp(type, "display") != 0)
continue;
/* It seems OF doesn't null-terminate the path :-( */
path = prom_scratch;
memset(path, 0, sizeof(prom_scratch));
/*
* leave some room at the end of the path for appending extra
* arguments
*/
if (call_prom("package-to-path", 3, 1, node, path,
sizeof(prom_scratch) - 10) == PROM_ERROR)
continue;
prom_printf("found display : %s, opening... ", path);
ih = call_prom("open", 1, 1, path);
if (ih == 0) {
prom_printf("failed\n");
continue;
}
/* Success */
prom_printf("done\n");
prom_setprop(node, path, "linux,opened", NULL, 0);
/* Setup a usable color table when the appropriate
* method is available. Should update this to set-colors */
clut = default_colors;
for (i = 0; i < 16; i++, clut += 3)
if (prom_set_color(ih, i, clut[0], clut[1],
clut[2]) != 0)
break;
#ifdef CONFIG_LOGO_LINUX_CLUT224
clut = PTRRELOC(logo_linux_clut224.clut);
for (i = 0; i < logo_linux_clut224.clutsize; i++, clut += 3)
if (prom_set_color(ih, i + 32, clut[0], clut[1],
clut[2]) != 0)
break;
#endif /* CONFIG_LOGO_LINUX_CLUT224 */
#ifdef CONFIG_PPC_EARLY_DEBUG_BOOTX
if (prom_getprop(node, "linux,boot-display", NULL, 0) !=
PROM_ERROR) {
u32 width, height, pitch, addr;
prom_printf("Setting btext !\n");
if (prom_getprop(node, "width", &width, 4) == PROM_ERROR)
return;
if (prom_getprop(node, "height", &height, 4) == PROM_ERROR)
return;
if (prom_getprop(node, "linebytes", &pitch, 4) == PROM_ERROR)
return;
if (prom_getprop(node, "address", &addr, 4) == PROM_ERROR)
return;
prom_printf("W=%d H=%d LB=%d addr=0x%x\n",
width, height, pitch, addr);
btext_setup_display(width, height, 8, pitch, addr);
btext_prepare_BAT();
}
#endif /* CONFIG_PPC_EARLY_DEBUG_BOOTX */
}
}
/* Return (relocated) pointer to this much memory: moves initrd if reqd. */
static void __init *make_room(unsigned long *mem_start, unsigned long *mem_end,
unsigned long needed, unsigned long align)
{
void *ret;
*mem_start = ALIGN(*mem_start, align);
while ((*mem_start + needed) > *mem_end) {
unsigned long room, chunk;
prom_debug("Chunk exhausted, claiming more at %lx...\n",
alloc_bottom);
room = alloc_top - alloc_bottom;
if (room > DEVTREE_CHUNK_SIZE)
room = DEVTREE_CHUNK_SIZE;
if (room < PAGE_SIZE)
prom_panic("No memory for flatten_device_tree "
"(no room)\n");
chunk = alloc_up(room, 0);
if (chunk == 0)
prom_panic("No memory for flatten_device_tree "
"(claim failed)\n");
*mem_end = chunk + room;
}
ret = (void *)*mem_start;
*mem_start += needed;
return ret;
}
#define dt_push_token(token, mem_start, mem_end) do { \
void *room = make_room(mem_start, mem_end, 4, 4); \
*(__be32 *)room = cpu_to_be32(token); \
} while(0)
static unsigned long __init dt_find_string(char *str)
{
char *s, *os;
s = os = (char *)dt_string_start;
s += 4;
while (s < (char *)dt_string_end) {
if (prom_strcmp(s, str) == 0)
return s - os;
s += prom_strlen(s) + 1;
}
return 0;
}
/*
* The Open Firmware 1275 specification states properties must be 31 bytes or
* less, however not all firmwares obey this. Make it 64 bytes to be safe.
*/
#define MAX_PROPERTY_NAME 64
static void __init scan_dt_build_strings(phandle node,
unsigned long *mem_start,
unsigned long *mem_end)
{
char *prev_name, *namep, *sstart;
unsigned long soff;
phandle child;
sstart = (char *)dt_string_start;
/* get and store all property names */
prev_name = "";
for (;;) {
/* 64 is max len of name including nul. */
namep = make_room(mem_start, mem_end, MAX_PROPERTY_NAME, 1);
if (call_prom("nextprop", 3, 1, node, prev_name, namep) != 1) {
/* No more nodes: unwind alloc */
*mem_start = (unsigned long)namep;
break;
}
/* skip "name" */
if (prom_strcmp(namep, "name") == 0) {
*mem_start = (unsigned long)namep;
prev_name = "name";
continue;
}
/* get/create string entry */
soff = dt_find_string(namep);
if (soff != 0) {
*mem_start = (unsigned long)namep;
namep = sstart + soff;
} else {
/* Trim off some if we can */
*mem_start = (unsigned long)namep + prom_strlen(namep) + 1;
dt_string_end = *mem_start;
}
prev_name = namep;
}
/* do all our children */
child = call_prom("child", 1, 1, node);
while (child != 0) {
scan_dt_build_strings(child, mem_start, mem_end);
child = call_prom("peer", 1, 1, child);
}
}
static void __init scan_dt_build_struct(phandle node, unsigned long *mem_start,
unsigned long *mem_end)
{
phandle child;
char *namep, *prev_name, *sstart, *p, *ep, *lp, *path;
unsigned long soff;
unsigned char *valp;
static char pname[MAX_PROPERTY_NAME] __prombss;
int l, room, has_phandle = 0;
dt_push_token(OF_DT_BEGIN_NODE, mem_start, mem_end);
/* get the node's full name */
namep = (char *)*mem_start;
room = *mem_end - *mem_start;
if (room > 255)
room = 255;
l = call_prom("package-to-path", 3, 1, node, namep, room);
if (l >= 0) {
/* Didn't fit? Get more room. */
if (l >= room) {
if (l >= *mem_end - *mem_start)
namep = make_room(mem_start, mem_end, l+1, 1);
call_prom("package-to-path", 3, 1, node, namep, l);
}
namep[l] = '\0';
/* Fixup an Apple bug where they have bogus \0 chars in the
* middle of the path in some properties, and extract
* the unit name (everything after the last '/').
*/
for (lp = p = namep, ep = namep + l; p < ep; p++) {
if (*p == '/')
lp = namep;
else if (*p != 0)
*lp++ = *p;
}
*lp = 0;
*mem_start = ALIGN((unsigned long)lp + 1, 4);
}
/* get it again for debugging */
path = prom_scratch;
memset(path, 0, sizeof(prom_scratch));
call_prom("package-to-path", 3, 1, node, path, sizeof(prom_scratch) - 1);
/* get and store all properties */
prev_name = "";
sstart = (char *)dt_string_start;
for (;;) {
if (call_prom("nextprop", 3, 1, node, prev_name,
pname) != 1)
break;
/* skip "name" */
if (prom_strcmp(pname, "name") == 0) {
prev_name = "name";
continue;
}
/* find string offset */
soff = dt_find_string(pname);
if (soff == 0) {
prom_printf("WARNING: Can't find string index for"
" <%s>, node %s\n", pname, path);
break;
}
prev_name = sstart + soff;
/* get length */
l = call_prom("getproplen", 2, 1, node, pname);
/* sanity checks */
if (l == PROM_ERROR)
continue;
/* push property head */
dt_push_token(OF_DT_PROP, mem_start, mem_end);
dt_push_token(l, mem_start, mem_end);
dt_push_token(soff, mem_start, mem_end);
/* push property content */
valp = make_room(mem_start, mem_end, l, 4);
call_prom("getprop", 4, 1, node, pname, valp, l);
*mem_start = ALIGN(*mem_start, 4);
if (!prom_strcmp(pname, "phandle"))
has_phandle = 1;
}
/* Add a "phandle" property if none already exist */
if (!has_phandle) {
soff = dt_find_string("phandle");
if (soff == 0)
prom_printf("WARNING: Can't find string index for <phandle> node %s\n", path);
else {
dt_push_token(OF_DT_PROP, mem_start, mem_end);
dt_push_token(4, mem_start, mem_end);
dt_push_token(soff, mem_start, mem_end);
valp = make_room(mem_start, mem_end, 4, 4);
*(__be32 *)valp = cpu_to_be32(node);
}
}
/* do all our children */
child = call_prom("child", 1, 1, node);
while (child != 0) {
scan_dt_build_struct(child, mem_start, mem_end);
child = call_prom("peer", 1, 1, child);
}
dt_push_token(OF_DT_END_NODE, mem_start, mem_end);
}
static void __init flatten_device_tree(void)
{
phandle root;
unsigned long mem_start, mem_end, room;
struct boot_param_header *hdr;
char *namep;
u64 *rsvmap;
/*
* Check how much room we have between alloc top & bottom (+/- a
* few pages), crop to 1MB, as this is our "chunk" size
*/
room = alloc_top - alloc_bottom - 0x4000;
if (room > DEVTREE_CHUNK_SIZE)
room = DEVTREE_CHUNK_SIZE;
prom_debug("starting device tree allocs at %lx\n", alloc_bottom);
/* Now try to claim that */
mem_start = (unsigned long)alloc_up(room, PAGE_SIZE);
if (mem_start == 0)
prom_panic("Can't allocate initial device-tree chunk\n");
mem_end = mem_start + room;
/* Get root of tree */
root = call_prom("peer", 1, 1, (phandle)0);
if (root == (phandle)0)
prom_panic ("couldn't get device tree root\n");
/* Build header and make room for mem rsv map */
mem_start = ALIGN(mem_start, 4);
hdr = make_room(&mem_start, &mem_end,
sizeof(struct boot_param_header), 4);
dt_header_start = (unsigned long)hdr;
rsvmap = make_room(&mem_start, &mem_end, sizeof(mem_reserve_map), 8);
/* Start of strings */
mem_start = PAGE_ALIGN(mem_start);
dt_string_start = mem_start;
mem_start += 4; /* hole */
/* Add "phandle" in there, we'll need it */
namep = make_room(&mem_start, &mem_end, 16, 1);
prom_strscpy_pad(namep, "phandle", sizeof("phandle"));
mem_start = (unsigned long)namep + prom_strlen(namep) + 1;
/* Build string array */
prom_printf("Building dt strings...\n");
scan_dt_build_strings(root, &mem_start, &mem_end);
dt_string_end = mem_start;
/* Build structure */
mem_start = PAGE_ALIGN(mem_start);
dt_struct_start = mem_start;
prom_printf("Building dt structure...\n");
scan_dt_build_struct(root, &mem_start, &mem_end);
dt_push_token(OF_DT_END, &mem_start, &mem_end);
dt_struct_end = PAGE_ALIGN(mem_start);
/* Finish header */
hdr->boot_cpuid_phys = cpu_to_be32(prom.cpu);
hdr->magic = cpu_to_be32(OF_DT_HEADER);
hdr->totalsize = cpu_to_be32(dt_struct_end - dt_header_start);
hdr->off_dt_struct = cpu_to_be32(dt_struct_start - dt_header_start);
hdr->off_dt_strings = cpu_to_be32(dt_string_start - dt_header_start);
hdr->dt_strings_size = cpu_to_be32(dt_string_end - dt_string_start);
hdr->off_mem_rsvmap = cpu_to_be32(((unsigned long)rsvmap) - dt_header_start);
hdr->version = cpu_to_be32(OF_DT_VERSION);
/* Version 16 is not backward compatible */
hdr->last_comp_version = cpu_to_be32(0x10);
/* Copy the reserve map in */
memcpy(rsvmap, mem_reserve_map, sizeof(mem_reserve_map));
#ifdef DEBUG_PROM
{
int i;
prom_printf("reserved memory map:\n");
for (i = 0; i < mem_reserve_cnt; i++)
prom_printf(" %llx - %llx\n",
be64_to_cpu(mem_reserve_map[i].base),
be64_to_cpu(mem_reserve_map[i].size));
}
#endif
/* Bump mem_reserve_cnt to cause further reservations to fail
* since it's too late.
*/
mem_reserve_cnt = MEM_RESERVE_MAP_SIZE;
prom_printf("Device tree strings 0x%lx -> 0x%lx\n",
dt_string_start, dt_string_end);
prom_printf("Device tree struct 0x%lx -> 0x%lx\n",
dt_struct_start, dt_struct_end);
}
#ifdef CONFIG_PPC_MAPLE
/* PIBS Version 1.05.0000 04/26/2005 has an incorrect /ht/isa/ranges property.
* The values are bad, and it doesn't even have the right number of cells. */
static void __init fixup_device_tree_maple(void)
{
phandle isa;
u32 rloc = 0x01002000; /* IO space; PCI device = 4 */
u32 isa_ranges[6];
char *name;
name = "/ht@0/isa@4";
isa = call_prom("finddevice", 1, 1, ADDR(name));
if (!PHANDLE_VALID(isa)) {
name = "/ht@0/isa@6";
isa = call_prom("finddevice", 1, 1, ADDR(name));
rloc = 0x01003000; /* IO space; PCI device = 6 */
}
if (!PHANDLE_VALID(isa))
return;
if (prom_getproplen(isa, "ranges") != 12)
return;
if (prom_getprop(isa, "ranges", isa_ranges, sizeof(isa_ranges))
== PROM_ERROR)
return;
if (isa_ranges[0] != 0x1 ||
isa_ranges[1] != 0xf4000000 ||
isa_ranges[2] != 0x00010000)
return;
prom_printf("Fixing up bogus ISA range on Maple/Apache...\n");
isa_ranges[0] = 0x1;
isa_ranges[1] = 0x0;
isa_ranges[2] = rloc;
isa_ranges[3] = 0x0;
isa_ranges[4] = 0x0;
isa_ranges[5] = 0x00010000;
prom_setprop(isa, name, "ranges",
isa_ranges, sizeof(isa_ranges));
}
#define CPC925_MC_START 0xf8000000
#define CPC925_MC_LENGTH 0x1000000
/* The values for memory-controller don't have right number of cells */
static void __init fixup_device_tree_maple_memory_controller(void)
{
phandle mc;
u32 mc_reg[4];
char *name = "/hostbridge@f8000000";
u32 ac, sc;
mc = call_prom("finddevice", 1, 1, ADDR(name));
if (!PHANDLE_VALID(mc))
return;
if (prom_getproplen(mc, "reg") != 8)
return;
prom_getprop(prom.root, "#address-cells", &ac, sizeof(ac));
prom_getprop(prom.root, "#size-cells", &sc, sizeof(sc));
if ((ac != 2) || (sc != 2))
return;
if (prom_getprop(mc, "reg", mc_reg, sizeof(mc_reg)) == PROM_ERROR)
return;
if (mc_reg[0] != CPC925_MC_START || mc_reg[1] != CPC925_MC_LENGTH)
return;
prom_printf("Fixing up bogus hostbridge on Maple...\n");
mc_reg[0] = 0x0;
mc_reg[1] = CPC925_MC_START;
mc_reg[2] = 0x0;
mc_reg[3] = CPC925_MC_LENGTH;
prom_setprop(mc, name, "reg", mc_reg, sizeof(mc_reg));
}
#else
#define fixup_device_tree_maple()
#define fixup_device_tree_maple_memory_controller()
#endif
#ifdef CONFIG_PPC_CHRP
/*
* Pegasos and BriQ lacks the "ranges" property in the isa node
* Pegasos needs decimal IRQ 14/15, not hexadecimal
* Pegasos has the IDE configured in legacy mode, but advertised as native
*/
static void __init fixup_device_tree_chrp(void)
{
phandle ph;
u32 prop[6];
u32 rloc = 0x01006000; /* IO space; PCI device = 12 */
char *name;
int rc;
name = "/pci@80000000/isa@c";
ph = call_prom("finddevice", 1, 1, ADDR(name));
if (!PHANDLE_VALID(ph)) {
name = "/pci@ff500000/isa@6";
ph = call_prom("finddevice", 1, 1, ADDR(name));
rloc = 0x01003000; /* IO space; PCI device = 6 */
}
if (PHANDLE_VALID(ph)) {
rc = prom_getproplen(ph, "ranges");
if (rc == 0 || rc == PROM_ERROR) {
prom_printf("Fixing up missing ISA range on Pegasos...\n");
prop[0] = 0x1;
prop[1] = 0x0;
prop[2] = rloc;
prop[3] = 0x0;
prop[4] = 0x0;
prop[5] = 0x00010000;
prom_setprop(ph, name, "ranges", prop, sizeof(prop));
}
}
name = "/pci@80000000/ide@C,1";
ph = call_prom("finddevice", 1, 1, ADDR(name));
if (PHANDLE_VALID(ph)) {
prom_printf("Fixing up IDE interrupt on Pegasos...\n");
prop[0] = 14;
prop[1] = 0x0;
prom_setprop(ph, name, "interrupts", prop, 2*sizeof(u32));
prom_printf("Fixing up IDE class-code on Pegasos...\n");
rc = prom_getprop(ph, "class-code", prop, sizeof(u32));
if (rc == sizeof(u32)) {
prop[0] &= ~0x5;
prom_setprop(ph, name, "class-code", prop, sizeof(u32));
}
}
}
#else
#define fixup_device_tree_chrp()
#endif
#if defined(CONFIG_PPC64) && defined(CONFIG_PPC_PMAC)
static void __init fixup_device_tree_pmac(void)
{
phandle u3, i2c, mpic;
u32 u3_rev;
u32 interrupts[2];
u32 parent;
/* Some G5s have a missing interrupt definition, fix it up here */
u3 = call_prom("finddevice", 1, 1, ADDR("/u3@0,f8000000"));
if (!PHANDLE_VALID(u3))
return;
i2c = call_prom("finddevice", 1, 1, ADDR("/u3@0,f8000000/i2c@f8001000"));
if (!PHANDLE_VALID(i2c))
return;
mpic = call_prom("finddevice", 1, 1, ADDR("/u3@0,f8000000/mpic@f8040000"));
if (!PHANDLE_VALID(mpic))
return;
/* check if proper rev of u3 */
if (prom_getprop(u3, "device-rev", &u3_rev, sizeof(u3_rev))
== PROM_ERROR)
return;
if (u3_rev < 0x35 || u3_rev > 0x39)
return;
/* does it need fixup ? */
if (prom_getproplen(i2c, "interrupts") > 0)
return;
prom_printf("fixing up bogus interrupts for u3 i2c...\n");
/* interrupt on this revision of u3 is number 0 and level */
interrupts[0] = 0;
interrupts[1] = 1;
prom_setprop(i2c, "/u3@0,f8000000/i2c@f8001000", "interrupts",
&interrupts, sizeof(interrupts));
parent = (u32)mpic;
prom_setprop(i2c, "/u3@0,f8000000/i2c@f8001000", "interrupt-parent",
&parent, sizeof(parent));
}
#else
#define fixup_device_tree_pmac()
#endif
#ifdef CONFIG_PPC_EFIKA
/*
* The MPC5200 FEC driver requires an phy-handle property to tell it how
* to talk to the phy. If the phy-handle property is missing, then this
* function is called to add the appropriate nodes and link it to the
* ethernet node.
*/
static void __init fixup_device_tree_efika_add_phy(void)
{
u32 node;
char prop[64];
int rv;
/* Check if /builtin/ethernet exists - bail if it doesn't */
node = call_prom("finddevice", 1, 1, ADDR("/builtin/ethernet"));
if (!PHANDLE_VALID(node))
return;
/* Check if the phy-handle property exists - bail if it does */
rv = prom_getprop(node, "phy-handle", prop, sizeof(prop));
if (rv <= 0)
return;
/*
* At this point the ethernet device doesn't have a phy described.
* Now we need to add the missing phy node and linkage
*/
/* Check for an MDIO bus node - if missing then create one */
node = call_prom("finddevice", 1, 1, ADDR("/builtin/mdio"));
if (!PHANDLE_VALID(node)) {
prom_printf("Adding Ethernet MDIO node\n");
call_prom("interpret", 1, 1,
" s\" /builtin\" find-device"
" new-device"
" 1 encode-int s\" #address-cells\" property"
" 0 encode-int s\" #size-cells\" property"
" s\" mdio\" device-name"
" s\" fsl,mpc5200b-mdio\" encode-string"
" s\" compatible\" property"
" 0xf0003000 0x400 reg"
" 0x2 encode-int"
" 0x5 encode-int encode+"
" 0x3 encode-int encode+"
" s\" interrupts\" property"
" finish-device");
}
/* Check for a PHY device node - if missing then create one and
* give it's phandle to the ethernet node */
node = call_prom("finddevice", 1, 1,
ADDR("/builtin/mdio/ethernet-phy"));
if (!PHANDLE_VALID(node)) {
prom_printf("Adding Ethernet PHY node\n");
call_prom("interpret", 1, 1,
" s\" /builtin/mdio\" find-device"
" new-device"
" s\" ethernet-phy\" device-name"
" 0x10 encode-int s\" reg\" property"
" my-self"
" ihandle>phandle"
" finish-device"
" s\" /builtin/ethernet\" find-device"
" encode-int"
" s\" phy-handle\" property"
" device-end");
}
}
static void __init fixup_device_tree_efika(void)
{
int sound_irq[3] = { 2, 2, 0 };
int bcomm_irq[3*16] = { 3,0,0, 3,1,0, 3,2,0, 3,3,0,
3,4,0, 3,5,0, 3,6,0, 3,7,0,
3,8,0, 3,9,0, 3,10,0, 3,11,0,
3,12,0, 3,13,0, 3,14,0, 3,15,0 };
u32 node;
char prop[64];
int rv, len;
/* Check if we're really running on a EFIKA */
node = call_prom("finddevice", 1, 1, ADDR("/"));
if (!PHANDLE_VALID(node))
return;
rv = prom_getprop(node, "model", prop, sizeof(prop));
if (rv == PROM_ERROR)
return;
if (prom_strcmp(prop, "EFIKA5K2"))
return;
prom_printf("Applying EFIKA device tree fixups\n");
/* Claiming to be 'chrp' is death */
node = call_prom("finddevice", 1, 1, ADDR("/"));
rv = prom_getprop(node, "device_type", prop, sizeof(prop));
if (rv != PROM_ERROR && (prom_strcmp(prop, "chrp") == 0))
prom_setprop(node, "/", "device_type", "efika", sizeof("efika"));
/* CODEGEN,description is exposed in /proc/cpuinfo so
fix that too */
rv = prom_getprop(node, "CODEGEN,description", prop, sizeof(prop));
if (rv != PROM_ERROR && (prom_strstr(prop, "CHRP")))
prom_setprop(node, "/", "CODEGEN,description",
"Efika 5200B PowerPC System",
sizeof("Efika 5200B PowerPC System"));
/* Fixup bestcomm interrupts property */
node = call_prom("finddevice", 1, 1, ADDR("/builtin/bestcomm"));
if (PHANDLE_VALID(node)) {
len = prom_getproplen(node, "interrupts");
if (len == 12) {
prom_printf("Fixing bestcomm interrupts property\n");
prom_setprop(node, "/builtin/bestcom", "interrupts",
bcomm_irq, sizeof(bcomm_irq));
}
}
/* Fixup sound interrupts property */
node = call_prom("finddevice", 1, 1, ADDR("/builtin/sound"));
if (PHANDLE_VALID(node)) {
rv = prom_getprop(node, "interrupts", prop, sizeof(prop));
if (rv == PROM_ERROR) {
prom_printf("Adding sound interrupts property\n");
prom_setprop(node, "/builtin/sound", "interrupts",
sound_irq, sizeof(sound_irq));
}
}
/* Make sure ethernet phy-handle property exists */
fixup_device_tree_efika_add_phy();
}
#else
#define fixup_device_tree_efika()
#endif
#ifdef CONFIG_PPC_PASEMI_NEMO
/*
* CFE supplied on Nemo is broken in several ways, biggest
* problem is that it reassigns ISA interrupts to unused mpic ints.
* Add an interrupt-controller property for the io-bridge to use
* and correct the ints so we can attach them to an irq_domain
*/
static void __init fixup_device_tree_pasemi(void)
{
u32 interrupts[2], parent, rval, val = 0;
char *name, *pci_name;
phandle iob, node;
/* Find the root pci node */
name = "/pxp@0,e0000000";
iob = call_prom("finddevice", 1, 1, ADDR(name));
if (!PHANDLE_VALID(iob))
return;
/* check if interrupt-controller node set yet */
if (prom_getproplen(iob, "interrupt-controller") !=PROM_ERROR)
return;
prom_printf("adding interrupt-controller property for SB600...\n");
prom_setprop(iob, name, "interrupt-controller", &val, 0);
pci_name = "/pxp@0,e0000000/pci@11";
node = call_prom("finddevice", 1, 1, ADDR(pci_name));
parent = ADDR(iob);
for( ; prom_next_node(&node); ) {
/* scan each node for one with an interrupt */
if (!PHANDLE_VALID(node))
continue;
rval = prom_getproplen(node, "interrupts");
if (rval == 0 || rval == PROM_ERROR)
continue;
prom_getprop(node, "interrupts", &interrupts, sizeof(interrupts));
if ((interrupts[0] < 212) || (interrupts[0] > 222))
continue;
/* found a node, update both interrupts and interrupt-parent */
if ((interrupts[0] >= 212) && (interrupts[0] <= 215))
interrupts[0] -= 203;
if ((interrupts[0] >= 216) && (interrupts[0] <= 220))
interrupts[0] -= 213;
if (interrupts[0] == 221)
interrupts[0] = 14;
if (interrupts[0] == 222)
interrupts[0] = 8;
prom_setprop(node, pci_name, "interrupts", interrupts,
sizeof(interrupts));
prom_setprop(node, pci_name, "interrupt-parent", &parent,
sizeof(parent));
}
/*
* The io-bridge has device_type set to 'io-bridge' change it to 'isa'
* so that generic isa-bridge code can add the SB600 and its on-board
* peripherals.
*/
name = "/pxp@0,e0000000/io-bridge@0";
iob = call_prom("finddevice", 1, 1, ADDR(name));
if (!PHANDLE_VALID(iob))
return;
/* device_type is already set, just change it. */
prom_printf("Changing device_type of SB600 node...\n");
prom_setprop(iob, name, "device_type", "isa", sizeof("isa"));
}
#else /* !CONFIG_PPC_PASEMI_NEMO */
static inline void fixup_device_tree_pasemi(void) { }
#endif
static void __init fixup_device_tree(void)
{
fixup_device_tree_maple();
fixup_device_tree_maple_memory_controller();
fixup_device_tree_chrp();
fixup_device_tree_pmac();
fixup_device_tree_efika();
fixup_device_tree_pasemi();
}
static void __init prom_find_boot_cpu(void)
{
__be32 rval;
ihandle prom_cpu;
phandle cpu_pkg;
rval = 0;
if (prom_getprop(prom.chosen, "cpu", &rval, sizeof(rval)) <= 0)
return;
prom_cpu = be32_to_cpu(rval);
cpu_pkg = call_prom("instance-to-package", 1, 1, prom_cpu);
if (!PHANDLE_VALID(cpu_pkg))
return;
prom_getprop(cpu_pkg, "reg", &rval, sizeof(rval));
prom.cpu = be32_to_cpu(rval);
prom_debug("Booting CPU hw index = %d\n", prom.cpu);
}
static void __init prom_check_initrd(unsigned long r3, unsigned long r4)
{
#ifdef CONFIG_BLK_DEV_INITRD
if (r3 && r4 && r4 != 0xdeadbeef) {
__be64 val;
prom_initrd_start = is_kernel_addr(r3) ? __pa(r3) : r3;
prom_initrd_end = prom_initrd_start + r4;
val = cpu_to_be64(prom_initrd_start);
prom_setprop(prom.chosen, "/chosen", "linux,initrd-start",
&val, sizeof(val));
val = cpu_to_be64(prom_initrd_end);
prom_setprop(prom.chosen, "/chosen", "linux,initrd-end",
&val, sizeof(val));
reserve_mem(prom_initrd_start,
prom_initrd_end - prom_initrd_start);
prom_debug("initrd_start=0x%lx\n", prom_initrd_start);
prom_debug("initrd_end=0x%lx\n", prom_initrd_end);
}
#endif /* CONFIG_BLK_DEV_INITRD */
}
#ifdef CONFIG_PPC_SVM
/*
* Perform the Enter Secure Mode ultracall.
*/
static int __init enter_secure_mode(unsigned long kbase, unsigned long fdt)
{
register unsigned long r3 asm("r3") = UV_ESM;
register unsigned long r4 asm("r4") = kbase;
register unsigned long r5 asm("r5") = fdt;
asm volatile("sc 2" : "+r"(r3) : "r"(r4), "r"(r5));
return r3;
}
/*
* Call the Ultravisor to transfer us to secure memory if we have an ESM blob.
*/
static void __init setup_secure_guest(unsigned long kbase, unsigned long fdt)
{
int ret;
if (!prom_svm_enable)
return;
/* Switch to secure mode. */
prom_printf("Switching to secure mode.\n");
/*
* The ultravisor will do an integrity check of the kernel image but we
* relocated it so the check will fail. Restore the original image by
* relocating it back to the kernel virtual base address.
*/
relocate(KERNELBASE);
ret = enter_secure_mode(kbase, fdt);
/* Relocate the kernel again. */
relocate(kbase);
if (ret != U_SUCCESS) {
prom_printf("Returned %d from switching to secure mode.\n", ret);
prom_rtas_os_term("Switch to secure mode failed.\n");
}
}
#else
static void __init setup_secure_guest(unsigned long kbase, unsigned long fdt)
{
}
#endif /* CONFIG_PPC_SVM */
/*
* We enter here early on, when the Open Firmware prom is still
* handling exceptions and the MMU hash table for us.
*/
unsigned long __init prom_init(unsigned long r3, unsigned long r4,
unsigned long pp,
unsigned long r6, unsigned long r7,
unsigned long kbase)
{
unsigned long hdr;
#ifdef CONFIG_PPC32
unsigned long offset = reloc_offset();
reloc_got2(offset);
#endif
/*
* First zero the BSS
*/
memset(&__bss_start, 0, __bss_stop - __bss_start);
/*
* Init interface to Open Firmware, get some node references,
* like /chosen
*/
prom_init_client_services(pp);
/*
* See if this OF is old enough that we need to do explicit maps
* and other workarounds
*/
prom_find_mmu();
/*
* Init prom stdout device
*/
prom_init_stdout();
prom_printf("Preparing to boot %s", linux_banner);
/*
* Get default machine type. At this point, we do not differentiate
* between pSeries SMP and pSeries LPAR
*/
of_platform = prom_find_machine_type();
prom_printf("Detected machine type: %x\n", of_platform);
#ifndef CONFIG_NONSTATIC_KERNEL
/* Bail if this is a kdump kernel. */
if (PHYSICAL_START > 0)
prom_panic("Error: You can't boot a kdump kernel from OF!\n");
#endif
/*
* Check for an initrd
*/
prom_check_initrd(r3, r4);
/*
* Do early parsing of command line
*/
early_cmdline_parse();
#ifdef CONFIG_PPC_PSERIES
/*
* On pSeries, inform the firmware about our capabilities
*/
if (of_platform == PLATFORM_PSERIES ||
of_platform == PLATFORM_PSERIES_LPAR)
prom_send_capabilities();
#endif
/*
* Copy the CPU hold code
*/
if (of_platform != PLATFORM_POWERMAC)
copy_and_flush(0, kbase, 0x100, 0);
/*
* Initialize memory management within prom_init
*/
prom_init_mem();
/*
* Determine which cpu is actually running right _now_
*/
prom_find_boot_cpu();
/*
* Initialize display devices
*/
prom_check_displays();
#if defined(CONFIG_PPC64) && defined(__BIG_ENDIAN__)
/*
* Initialize IOMMU (TCE tables) on pSeries. Do that before anything else
* that uses the allocator, we need to make sure we get the top of memory
* available for us here...
*/
if (of_platform == PLATFORM_PSERIES)
prom_initialize_tce_table();
#endif
/*
* On non-powermacs, try to instantiate RTAS. PowerMacs don't
* have a usable RTAS implementation.
*/
if (of_platform != PLATFORM_POWERMAC)
prom_instantiate_rtas();
#ifdef CONFIG_PPC64
/* instantiate sml */
prom_instantiate_sml();
#endif
/*
* On non-powermacs, put all CPUs in spin-loops.
*
* PowerMacs use a different mechanism to spin CPUs
*
* (This must be done after instantiating RTAS)
*/
if (of_platform != PLATFORM_POWERMAC)
prom_hold_cpus();
/*
* Fill in some infos for use by the kernel later on
*/
if (prom_memory_limit) {
__be64 val = cpu_to_be64(prom_memory_limit);
prom_setprop(prom.chosen, "/chosen", "linux,memory-limit",
&val, sizeof(val));
}
#ifdef CONFIG_PPC64
if (prom_iommu_off)
prom_setprop(prom.chosen, "/chosen", "linux,iommu-off",
NULL, 0);
if (prom_iommu_force_on)
prom_setprop(prom.chosen, "/chosen", "linux,iommu-force-on",
NULL, 0);
if (prom_tce_alloc_start) {
prom_setprop(prom.chosen, "/chosen", "linux,tce-alloc-start",
&prom_tce_alloc_start,
sizeof(prom_tce_alloc_start));
prom_setprop(prom.chosen, "/chosen", "linux,tce-alloc-end",
&prom_tce_alloc_end,
sizeof(prom_tce_alloc_end));
}
#endif
/*
* Fixup any known bugs in the device-tree
*/
fixup_device_tree();
/*
* Now finally create the flattened device-tree
*/
prom_printf("copying OF device tree...\n");
flatten_device_tree();
/*
* in case stdin is USB and still active on IBM machines...
* Unfortunately quiesce crashes on some powermacs if we have
* closed stdin already (in particular the powerbook 101).
*/
if (of_platform != PLATFORM_POWERMAC)
prom_close_stdin();
/*
* Call OF "quiesce" method to shut down pending DMA's from
* devices etc...
*/
prom_printf("Quiescing Open Firmware ...\n");
call_prom("quiesce", 0, 0);
/*
* And finally, call the kernel passing it the flattened device
* tree and NULL as r5, thus triggering the new entry point which
* is common to us and kexec
*/
hdr = dt_header_start;
prom_printf("Booting Linux via __start() @ 0x%lx ...\n", kbase);
prom_debug("->dt_header_start=0x%lx\n", hdr);
#ifdef CONFIG_PPC32
reloc_got2(-offset);
#endif
/* Move to secure memory if we're supposed to be secure guests. */
setup_secure_guest(kbase, hdr);
__start(hdr, kbase, 0, 0, 0, 0, 0);
return 0;
}
| linux-master | arch/powerpc/kernel/prom_init.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* pci_dn.c
*
* Copyright (C) 2001 Todd Inglett, IBM Corporation
*
* PCI manipulation via device_nodes.
*/
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/string.h>
#include <linux/export.h>
#include <linux/init.h>
#include <linux/gfp.h>
#include <linux/of.h>
#include <asm/io.h>
#include <asm/pci-bridge.h>
#include <asm/ppc-pci.h>
#include <asm/firmware.h>
#include <asm/eeh.h>
/*
* The function is used to find the firmware data of one
* specific PCI device, which is attached to the indicated
* PCI bus. For VFs, their firmware data is linked to that
* one of PF's bridge. For other devices, their firmware
* data is linked to that of their bridge.
*/
static struct pci_dn *pci_bus_to_pdn(struct pci_bus *bus)
{
struct pci_bus *pbus;
struct device_node *dn;
struct pci_dn *pdn;
/*
* We probably have virtual bus which doesn't
* have associated bridge.
*/
pbus = bus;
while (pbus) {
if (pci_is_root_bus(pbus) || pbus->self)
break;
pbus = pbus->parent;
}
/*
* Except virtual bus, all PCI buses should
* have device nodes.
*/
dn = pci_bus_to_OF_node(pbus);
pdn = dn ? PCI_DN(dn) : NULL;
return pdn;
}
struct pci_dn *pci_get_pdn_by_devfn(struct pci_bus *bus,
int devfn)
{
struct device_node *dn = NULL;
struct pci_dn *parent, *pdn;
struct pci_dev *pdev = NULL;
/* Fast path: fetch from PCI device */
list_for_each_entry(pdev, &bus->devices, bus_list) {
if (pdev->devfn == devfn) {
if (pdev->dev.archdata.pci_data)
return pdev->dev.archdata.pci_data;
dn = pci_device_to_OF_node(pdev);
break;
}
}
/* Fast path: fetch from device node */
pdn = dn ? PCI_DN(dn) : NULL;
if (pdn)
return pdn;
/* Slow path: fetch from firmware data hierarchy */
parent = pci_bus_to_pdn(bus);
if (!parent)
return NULL;
list_for_each_entry(pdn, &parent->child_list, list) {
if (pdn->busno == bus->number &&
pdn->devfn == devfn)
return pdn;
}
return NULL;
}
struct pci_dn *pci_get_pdn(struct pci_dev *pdev)
{
struct device_node *dn;
struct pci_dn *parent, *pdn;
/* Search device directly */
if (pdev->dev.archdata.pci_data)
return pdev->dev.archdata.pci_data;
/* Check device node */
dn = pci_device_to_OF_node(pdev);
pdn = dn ? PCI_DN(dn) : NULL;
if (pdn)
return pdn;
/*
* VFs don't have device nodes. We hook their
* firmware data to PF's bridge.
*/
parent = pci_bus_to_pdn(pdev->bus);
if (!parent)
return NULL;
list_for_each_entry(pdn, &parent->child_list, list) {
if (pdn->busno == pdev->bus->number &&
pdn->devfn == pdev->devfn)
return pdn;
}
return NULL;
}
#ifdef CONFIG_EEH
static struct eeh_dev *eeh_dev_init(struct pci_dn *pdn)
{
struct eeh_dev *edev;
/* Allocate EEH device */
edev = kzalloc(sizeof(*edev), GFP_KERNEL);
if (!edev)
return NULL;
/* Associate EEH device with OF node */
pdn->edev = edev;
edev->pdn = pdn;
edev->bdfn = (pdn->busno << 8) | pdn->devfn;
edev->controller = pdn->phb;
return edev;
}
#endif /* CONFIG_EEH */
#ifdef CONFIG_PCI_IOV
static struct pci_dn *add_one_sriov_vf_pdn(struct pci_dn *parent,
int busno, int devfn)
{
struct pci_dn *pdn;
/* Except PHB, we always have the parent */
if (!parent)
return NULL;
pdn = kzalloc(sizeof(*pdn), GFP_KERNEL);
if (!pdn)
return NULL;
pdn->phb = parent->phb;
pdn->parent = parent;
pdn->busno = busno;
pdn->devfn = devfn;
pdn->pe_number = IODA_INVALID_PE;
INIT_LIST_HEAD(&pdn->child_list);
INIT_LIST_HEAD(&pdn->list);
list_add_tail(&pdn->list, &parent->child_list);
return pdn;
}
struct pci_dn *add_sriov_vf_pdns(struct pci_dev *pdev)
{
struct pci_dn *parent, *pdn;
int i;
/* Only support IOV for now */
if (WARN_ON(!pdev->is_physfn))
return NULL;
/* Check if VFs have been populated */
pdn = pci_get_pdn(pdev);
if (!pdn || (pdn->flags & PCI_DN_FLAG_IOV_VF))
return NULL;
pdn->flags |= PCI_DN_FLAG_IOV_VF;
parent = pci_bus_to_pdn(pdev->bus);
if (!parent)
return NULL;
for (i = 0; i < pci_sriov_get_totalvfs(pdev); i++) {
struct eeh_dev *edev __maybe_unused;
pdn = add_one_sriov_vf_pdn(parent,
pci_iov_virtfn_bus(pdev, i),
pci_iov_virtfn_devfn(pdev, i));
if (!pdn) {
dev_warn(&pdev->dev, "%s: Cannot create firmware data for VF#%d\n",
__func__, i);
return NULL;
}
#ifdef CONFIG_EEH
/* Create the EEH device for the VF */
edev = eeh_dev_init(pdn);
BUG_ON(!edev);
/* FIXME: these should probably be populated by the EEH probe */
edev->physfn = pdev;
edev->vf_index = i;
#endif /* CONFIG_EEH */
}
return pci_get_pdn(pdev);
}
void remove_sriov_vf_pdns(struct pci_dev *pdev)
{
struct pci_dn *parent;
struct pci_dn *pdn, *tmp;
int i;
/* Only support IOV PF for now */
if (WARN_ON(!pdev->is_physfn))
return;
/* Check if VFs have been populated */
pdn = pci_get_pdn(pdev);
if (!pdn || !(pdn->flags & PCI_DN_FLAG_IOV_VF))
return;
pdn->flags &= ~PCI_DN_FLAG_IOV_VF;
parent = pci_bus_to_pdn(pdev->bus);
if (!parent)
return;
/*
* We might introduce flag to pci_dn in future
* so that we can release VF's firmware data in
* a batch mode.
*/
for (i = 0; i < pci_sriov_get_totalvfs(pdev); i++) {
struct eeh_dev *edev __maybe_unused;
list_for_each_entry_safe(pdn, tmp,
&parent->child_list, list) {
if (pdn->busno != pci_iov_virtfn_bus(pdev, i) ||
pdn->devfn != pci_iov_virtfn_devfn(pdev, i))
continue;
#ifdef CONFIG_EEH
/*
* Release EEH state for this VF. The PCI core
* has already torn down the pci_dev for this VF, but
* we're responsible to removing the eeh_dev since it
* has the same lifetime as the pci_dn that spawned it.
*/
edev = pdn_to_eeh_dev(pdn);
if (edev) {
/*
* We allocate pci_dn's for the totalvfs count,
* but only the vfs that were activated
* have a configured PE.
*/
if (edev->pe)
eeh_pe_tree_remove(edev);
pdn->edev = NULL;
kfree(edev);
}
#endif /* CONFIG_EEH */
if (!list_empty(&pdn->list))
list_del(&pdn->list);
kfree(pdn);
}
}
}
#endif /* CONFIG_PCI_IOV */
struct pci_dn *pci_add_device_node_info(struct pci_controller *hose,
struct device_node *dn)
{
const __be32 *type = of_get_property(dn, "ibm,pci-config-space-type", NULL);
const __be32 *regs;
struct device_node *parent;
struct pci_dn *pdn;
#ifdef CONFIG_EEH
struct eeh_dev *edev;
#endif
pdn = kzalloc(sizeof(*pdn), GFP_KERNEL);
if (pdn == NULL)
return NULL;
dn->data = pdn;
pdn->phb = hose;
pdn->pe_number = IODA_INVALID_PE;
regs = of_get_property(dn, "reg", NULL);
if (regs) {
u32 addr = of_read_number(regs, 1);
/* First register entry is addr (00BBSS00) */
pdn->busno = (addr >> 16) & 0xff;
pdn->devfn = (addr >> 8) & 0xff;
}
/* vendor/device IDs and class code */
regs = of_get_property(dn, "vendor-id", NULL);
pdn->vendor_id = regs ? of_read_number(regs, 1) : 0;
regs = of_get_property(dn, "device-id", NULL);
pdn->device_id = regs ? of_read_number(regs, 1) : 0;
regs = of_get_property(dn, "class-code", NULL);
pdn->class_code = regs ? of_read_number(regs, 1) : 0;
/* Extended config space */
pdn->pci_ext_config_space = (type && of_read_number(type, 1) == 1);
/* Create EEH device */
#ifdef CONFIG_EEH
edev = eeh_dev_init(pdn);
if (!edev) {
kfree(pdn);
return NULL;
}
#endif
/* Attach to parent node */
INIT_LIST_HEAD(&pdn->child_list);
INIT_LIST_HEAD(&pdn->list);
parent = of_get_parent(dn);
pdn->parent = parent ? PCI_DN(parent) : NULL;
of_node_put(parent);
if (pdn->parent)
list_add_tail(&pdn->list, &pdn->parent->child_list);
return pdn;
}
EXPORT_SYMBOL_GPL(pci_add_device_node_info);
void pci_remove_device_node_info(struct device_node *dn)
{
struct pci_dn *pdn = dn ? PCI_DN(dn) : NULL;
struct device_node *parent;
struct pci_dev *pdev;
#ifdef CONFIG_EEH
struct eeh_dev *edev = pdn_to_eeh_dev(pdn);
if (edev)
edev->pdn = NULL;
#endif
if (!pdn)
return;
WARN_ON(!list_empty(&pdn->child_list));
list_del(&pdn->list);
/* Drop the parent pci_dn's ref to our backing dt node */
parent = of_get_parent(dn);
if (parent)
of_node_put(parent);
/*
* At this point we *might* still have a pci_dev that was
* instantiated from this pci_dn. So defer free()ing it until
* the pci_dev's release function is called.
*/
pdev = pci_get_domain_bus_and_slot(pdn->phb->global_number,
pdn->busno, pdn->devfn);
if (pdev) {
/* NB: pdev has a ref to dn */
pci_dbg(pdev, "marked pdn (from %pOF) as dead\n", dn);
pdn->flags |= PCI_DN_FLAG_DEAD;
} else {
dn->data = NULL;
kfree(pdn);
}
pci_dev_put(pdev);
}
EXPORT_SYMBOL_GPL(pci_remove_device_node_info);
/*
* Traverse a device tree stopping each PCI device in the tree.
* This is done depth first. As each node is processed, a "pre"
* function is called and the children are processed recursively.
*
* The "pre" func returns a value. If non-zero is returned from
* the "pre" func, the traversal stops and this value is returned.
* This return value is useful when using traverse as a method of
* finding a device.
*
* NOTE: we do not run the func for devices that do not appear to
* be PCI except for the start node which we assume (this is good
* because the start node is often a phb which may be missing PCI
* properties).
* We use the class-code as an indicator. If we run into
* one of these nodes we also assume its siblings are non-pci for
* performance.
*/
void *pci_traverse_device_nodes(struct device_node *start,
void *(*fn)(struct device_node *, void *),
void *data)
{
struct device_node *dn, *nextdn;
void *ret;
/* We started with a phb, iterate all childs */
for (dn = start->child; dn; dn = nextdn) {
const __be32 *classp;
u32 class = 0;
nextdn = NULL;
classp = of_get_property(dn, "class-code", NULL);
if (classp)
class = of_read_number(classp, 1);
if (fn) {
ret = fn(dn, data);
if (ret)
return ret;
}
/* If we are a PCI bridge, go down */
if (dn->child && ((class >> 8) == PCI_CLASS_BRIDGE_PCI ||
(class >> 8) == PCI_CLASS_BRIDGE_CARDBUS))
/* Depth first...do children */
nextdn = dn->child;
else if (dn->sibling)
/* ok, try next sibling instead. */
nextdn = dn->sibling;
if (!nextdn) {
/* Walk up to next valid sibling. */
do {
dn = dn->parent;
if (dn == start)
return NULL;
} while (dn->sibling == NULL);
nextdn = dn->sibling;
}
}
return NULL;
}
EXPORT_SYMBOL_GPL(pci_traverse_device_nodes);
static void *add_pdn(struct device_node *dn, void *data)
{
struct pci_controller *hose = data;
struct pci_dn *pdn;
pdn = pci_add_device_node_info(hose, dn);
if (!pdn)
return ERR_PTR(-ENOMEM);
return NULL;
}
/**
* pci_devs_phb_init_dynamic - setup pci devices under this PHB
* phb: pci-to-host bridge (top-level bridge connecting to cpu)
*
* This routine is called both during boot, (before the memory
* subsystem is set up, before kmalloc is valid) and during the
* dynamic lpar operation of adding a PHB to a running system.
*/
void pci_devs_phb_init_dynamic(struct pci_controller *phb)
{
struct device_node *dn = phb->dn;
struct pci_dn *pdn;
/* PHB nodes themselves must not match */
pdn = pci_add_device_node_info(phb, dn);
if (pdn) {
pdn->devfn = pdn->busno = -1;
pdn->vendor_id = pdn->device_id = pdn->class_code = 0;
pdn->phb = phb;
phb->pci_data = pdn;
}
/* Update dn->phb ptrs for new phb and children devices */
pci_traverse_device_nodes(dn, add_pdn, phb);
}
static void pci_dev_pdn_setup(struct pci_dev *pdev)
{
struct pci_dn *pdn;
if (pdev->dev.archdata.pci_data)
return;
/* Setup the fast path */
pdn = pci_get_pdn(pdev);
pdev->dev.archdata.pci_data = pdn;
}
DECLARE_PCI_FIXUP_EARLY(PCI_ANY_ID, PCI_ANY_ID, pci_dev_pdn_setup);
| linux-master | arch/powerpc/kernel/pci_dn.c |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.