python_code
stringlengths 0
1.8M
| repo_name
stringclasses 7
values | file_path
stringlengths 5
99
|
---|---|---|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2022, Athira Rajeev, IBM Corp.
*/
#include <stdio.h>
#include <stdlib.h>
#include "../event.h"
#include "misc.h"
#include "utils.h"
#define MALLOC_SIZE (0x10000 * 10) /* Ought to be enough .. */
/* The data cache was reloaded from local core's L3 due to a demand load */
#define EventCode 0x21c040
/*
* A perf sampling test for mmcr1
* fields : pmcxsel, unit, cache.
*/
static int mmcr1_sel_unit_cache(void)
{
struct event event;
u64 *intr_regs;
char *p;
int i;
/* Check for platform support for the test */
SKIP_IF(check_pvr_for_sampling_tests());
p = malloc(MALLOC_SIZE);
FAIL_IF(!p);
/* Init the event for the sampling test */
event_init_sampling(&event, EventCode);
event.attr.sample_regs_intr = platform_extended_mask;
event.attr.sample_period = 1;
FAIL_IF(event_open(&event));
event.mmap_buffer = event_sample_buf_mmap(event.fd, 1);
event_enable(&event);
/* workload to make the event overflow */
for (i = 0; i < MALLOC_SIZE; i += 0x10000)
p[i] = i;
event_disable(&event);
/* Check for sample count */
FAIL_IF(!collect_samples(event.mmap_buffer));
intr_regs = get_intr_regs(&event, event.mmap_buffer);
/* Check for intr_regs */
FAIL_IF(!intr_regs);
/*
* Verify that pmcxsel, unit and cache field of MMCR1
* match with corresponding event code fields
*/
FAIL_IF(EV_CODE_EXTRACT(event.attr.config, pmcxsel) !=
get_mmcr1_pmcxsel(get_reg_value(intr_regs, "MMCR1"), 1));
FAIL_IF(EV_CODE_EXTRACT(event.attr.config, unit) !=
get_mmcr1_unit(get_reg_value(intr_regs, "MMCR1"), 1));
FAIL_IF(EV_CODE_EXTRACT(event.attr.config, cache) !=
get_mmcr1_cache(get_reg_value(intr_regs, "MMCR1"), 1));
free(p);
event_close(&event);
return 0;
}
int main(void)
{
FAIL_IF(test_harness(mmcr1_sel_unit_cache, "mmcr1_sel_unit_cache"));
}
| linux-master | tools/testing/selftests/powerpc/pmu/sampling_tests/mmcr1_sel_unit_cache_test.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2022, Athira Rajeev, IBM Corp.
*/
#include <stdio.h>
#include <stdlib.h>
#include "../event.h"
#include "misc.h"
#include "utils.h"
extern void thirty_two_instruction_loop(int loops);
/*
* A perf sampling test for mmcr0
* fields: fc56, pmc1ce.
*/
static int mmcr0_fc56_pmc1ce(void)
{
struct event event;
u64 *intr_regs;
/* Check for platform support for the test */
SKIP_IF(check_pvr_for_sampling_tests());
/* Init the event for the sampling test */
event_init_sampling(&event, 0x1001e);
event.attr.sample_regs_intr = platform_extended_mask;
FAIL_IF(event_open(&event));
event.mmap_buffer = event_sample_buf_mmap(event.fd, 1);
FAIL_IF(event_enable(&event));
/* workload to make the event overflow */
thirty_two_instruction_loop(10000);
FAIL_IF(event_disable(&event));
/* Check for sample count */
FAIL_IF(!collect_samples(event.mmap_buffer));
intr_regs = get_intr_regs(&event, event.mmap_buffer);
/* Check for intr_regs */
FAIL_IF(!intr_regs);
/* Verify that fc56, pmc1ce fields are set in MMCR0 */
FAIL_IF(!get_mmcr0_fc56(get_reg_value(intr_regs, "MMCR0"), 1));
FAIL_IF(!get_mmcr0_pmc1ce(get_reg_value(intr_regs, "MMCR0"), 1));
event_close(&event);
return 0;
}
int main(void)
{
return test_harness(mmcr0_fc56_pmc1ce, "mmcr0_fc56_pmc1ce");
}
| linux-master | tools/testing/selftests/powerpc/pmu/sampling_tests/mmcr0_fc56_pmc1ce_test.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2022, Athira Rajeev, IBM Corp.
*/
#include <stdio.h>
#include <stdlib.h>
#include "../event.h"
#include "misc.h"
#include "utils.h"
/*
* A perf sampling test for making sure
* enabling branch stack doesn't crash in any
* environment, say:
* - With generic compat PMU
* - without any PMU registered
* - With platform specific PMU
* A fix for bhrb sampling crash was added in kernel
* via commit: b460b512417a ("powerpc/perf: Fix crashes
* with generic_compat_pmu & BHRB")
*
* This testcase exercises this code by doing branch
* stack enable for software event. s/w event is used
* since software event will work even in platform
* without PMU.
*/
static int bhrb_no_crash_wo_pmu_test(void)
{
struct event event;
/*
* Init the event for the sampling test.
* This uses software event which works on
* any platform.
*/
event_init_opts(&event, 0, PERF_TYPE_SOFTWARE, "cycles");
event.attr.sample_period = 1000;
event.attr.sample_type = PERF_SAMPLE_BRANCH_STACK;
event.attr.disabled = 1;
/*
* Return code of event_open is not
* considered since test just expects no crash from
* using PERF_SAMPLE_BRANCH_STACK. Also for environment
* like generic compat PMU, branch stack is unsupported.
*/
event_open(&event);
event_close(&event);
return 0;
}
int main(void)
{
return test_harness(bhrb_no_crash_wo_pmu_test, "bhrb_no_crash_wo_pmu_test");
}
| linux-master | tools/testing/selftests/powerpc/pmu/sampling_tests/bhrb_no_crash_wo_pmu_test.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2022, Athira Rajeev, IBM Corp.
*/
#include <stdio.h>
#include <stdlib.h>
#include "../event.h"
#include "misc.h"
#include "utils.h"
/*
* A perf sampling test for making sure
* sampling with -intr-regs doesn't crash
* in any environment, say:
* - With generic compat PMU
* - without any PMU registered
* - With platform specific PMU.
* A fix for crash with intr_regs was
* addressed in commit: f75e7d73bdf7 in kernel.
*
* This testcase exercises this code path by doing
* intr_regs using software event. Software event is
* used since s/w event will work even in platform
* without PMU.
*/
static int intr_regs_no_crash_wo_pmu_test(void)
{
struct event event;
/*
* Init the event for the sampling test.
* This uses software event which works on
* any platform.
*/
event_init_opts(&event, 0, PERF_TYPE_SOFTWARE, "cycles");
event.attr.sample_period = 1000;
event.attr.sample_type = PERF_SAMPLE_REGS_INTR;
event.attr.disabled = 1;
/*
* Return code of event_open is not considered
* since test just expects no crash from using
* PERF_SAMPLE_REGS_INTR.
*/
event_open(&event);
event_close(&event);
return 0;
}
int main(void)
{
return test_harness(intr_regs_no_crash_wo_pmu_test, "intr_regs_no_crash_wo_pmu_test");
}
| linux-master | tools/testing/selftests/powerpc/pmu/sampling_tests/intr_regs_no_crash_wo_pmu_test.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2022, Madhavan Srinivasan, IBM Corp.
*/
#include <signal.h>
#include <stdio.h>
#include <stdlib.h>
#include <sys/types.h>
#include "../event.h"
#include "misc.h"
#include "utils.h"
extern void thirty_two_instruction_loop(int loops);
static bool is_hv;
static void sig_usr2_handler(int signum, siginfo_t *info, void *data)
{
ucontext_t *uctx = data;
is_hv = !!(uctx->uc_mcontext.gp_regs[PT_MSR] & MSR_HV);
}
/*
* A perf sampling test for mmcr2
* fields : fcs, fch.
*/
static int mmcr2_fcs_fch(void)
{
struct sigaction sigact = {
.sa_sigaction = sig_usr2_handler,
.sa_flags = SA_SIGINFO
};
struct event event;
u64 *intr_regs;
FAIL_IF(sigaction(SIGUSR2, &sigact, NULL));
FAIL_IF(kill(getpid(), SIGUSR2));
/* Check for platform support for the test */
SKIP_IF(check_pvr_for_sampling_tests());
/* Init the event for the sampling test */
event_init_sampling(&event, 0x1001e);
event.attr.sample_regs_intr = platform_extended_mask;
event.attr.exclude_kernel = 1;
FAIL_IF(event_open(&event));
event.mmap_buffer = event_sample_buf_mmap(event.fd, 1);
FAIL_IF(event_enable(&event));
/* workload to make the event overflow */
thirty_two_instruction_loop(10000);
FAIL_IF(event_disable(&event));
/* Check for sample count */
FAIL_IF(!collect_samples(event.mmap_buffer));
intr_regs = get_intr_regs(&event, event.mmap_buffer);
/* Check for intr_regs */
FAIL_IF(!intr_regs);
/*
* Verify that fcs and fch field of MMCR2 match
* with corresponding modifier fields.
*/
if (is_hv)
FAIL_IF(event.attr.exclude_kernel !=
get_mmcr2_fch(get_reg_value(intr_regs, "MMCR2"), 1));
else
FAIL_IF(event.attr.exclude_kernel !=
get_mmcr2_fcs(get_reg_value(intr_regs, "MMCR2"), 1));
event_close(&event);
return 0;
}
int main(void)
{
return test_harness(mmcr2_fcs_fch, "mmcr2_fcs_fch");
}
| linux-master | tools/testing/selftests/powerpc/pmu/sampling_tests/mmcr2_fcs_fch_test.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2022, Kajol Jain, IBM Corp.
*/
#include <stdio.h>
#include <stdlib.h>
#include "../event.h"
#include "misc.h"
#include "utils.h"
extern void indirect_branch_loop(void);
/* Instructions */
#define EventCode 0x500fa
/* ifm field for indirect branch mode */
#define IFM_IND_BRANCH 0x2
/*
* A perf sampling test for mmcra
* field: ifm for bhrb ind_call.
*/
static int mmcra_bhrb_ind_call_test(void)
{
struct event event;
u64 *intr_regs;
/*
* Check for platform support for the test.
* This test is only aplicable on power10
*/
SKIP_IF(check_pvr_for_sampling_tests());
SKIP_IF(!have_hwcap2(PPC_FEATURE2_ARCH_3_1));
/* Init the event for the sampling test */
event_init_sampling(&event, EventCode);
event.attr.sample_regs_intr = platform_extended_mask;
event.attr.sample_type |= PERF_SAMPLE_BRANCH_STACK;
event.attr.branch_sample_type = PERF_SAMPLE_BRANCH_IND_CALL;
event.attr.exclude_kernel = 1;
FAIL_IF(event_open(&event));
event.mmap_buffer = event_sample_buf_mmap(event.fd, 1);
FAIL_IF(event_enable(&event));
/* workload to make the event overflow */
indirect_branch_loop();
FAIL_IF(event_disable(&event));
intr_regs = get_intr_regs(&event, event.mmap_buffer);
/* Check for intr_regs */
FAIL_IF(!intr_regs);
/* Verify that ifm bit is set properly in MMCRA */
FAIL_IF(get_mmcra_ifm(get_reg_value(intr_regs, "MMCRA"), 5) != IFM_IND_BRANCH);
event_close(&event);
return 0;
}
int main(void)
{
return test_harness(mmcra_bhrb_ind_call_test, "mmcra_bhrb_ind_call_test");
}
| linux-master | tools/testing/selftests/powerpc/pmu/sampling_tests/mmcra_bhrb_ind_call_test.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2022, Kajol Jain, IBM Corp.
*/
#include <stdio.h>
#include <stdlib.h>
#include "../event.h"
#include "misc.h"
#include "utils.h"
extern void thirty_two_instruction_loop(int loops);
/* Instructions */
#define EventCode 0x500fa
/* ifm field for any branch mode */
#define IFM_ANY_BRANCH 0x0
/*
* A perf sampling test for mmcra
* field: ifm for bhrb any call.
*/
static int mmcra_bhrb_any_test(void)
{
struct event event;
u64 *intr_regs;
/* Check for platform support for the test */
SKIP_IF(check_pvr_for_sampling_tests());
/* Init the event for the sampling test */
event_init_sampling(&event, EventCode);
event.attr.sample_regs_intr = platform_extended_mask;
event.attr.sample_type |= PERF_SAMPLE_BRANCH_STACK;
event.attr.branch_sample_type = PERF_SAMPLE_BRANCH_ANY;
event.attr.exclude_kernel = 1;
FAIL_IF(event_open(&event));
event.mmap_buffer = event_sample_buf_mmap(event.fd, 1);
FAIL_IF(event_enable(&event));
/* workload to make the event overflow */
thirty_two_instruction_loop(10000);
FAIL_IF(event_disable(&event));
intr_regs = get_intr_regs(&event, event.mmap_buffer);
/* Check for intr_regs */
FAIL_IF(!intr_regs);
/* Verify that ifm bit is set properly in MMCRA */
FAIL_IF(get_mmcra_ifm(get_reg_value(intr_regs, "MMCRA"), 5) != IFM_ANY_BRANCH);
event_close(&event);
return 0;
}
int main(void)
{
return test_harness(mmcra_bhrb_any_test, "mmcra_bhrb_any_test");
}
| linux-master | tools/testing/selftests/powerpc/pmu/sampling_tests/mmcra_bhrb_any_test.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2022, Kajol Jain, IBM Corp.
*/
#include <stdio.h>
#include <stdlib.h>
#include "../event.h"
#include "misc.h"
#include "utils.h"
extern void thirty_two_instruction_loop(int loops);
/* Instructions */
#define EventCode 0x500fa
/*
* A perf sampling test for mmcra
* field: bhrb_disable.
*/
static int mmcra_bhrb_disable_test(void)
{
struct event event;
u64 *intr_regs;
/*
* Check for platform support for the test.
* This test is only aplicable on power10
*/
SKIP_IF(check_pvr_for_sampling_tests());
SKIP_IF(!have_hwcap2(PPC_FEATURE2_ARCH_3_1));
/* Init the event for the sampling test */
event_init_sampling(&event, EventCode);
event.attr.sample_regs_intr = platform_extended_mask;
event.attr.sample_type |= PERF_SAMPLE_BRANCH_STACK;
event.attr.branch_sample_type = PERF_SAMPLE_BRANCH_ANY;
event.attr.exclude_kernel = 1;
FAIL_IF(event_open(&event));
event.mmap_buffer = event_sample_buf_mmap(event.fd, 1);
FAIL_IF(event_enable(&event));
/* workload to make the event overflow */
thirty_two_instruction_loop(10000);
FAIL_IF(event_disable(&event));
intr_regs = get_intr_regs(&event, event.mmap_buffer);
/* Check for intr_regs */
FAIL_IF(!intr_regs);
/* Verify that bhrb_disable bit is set in MMCRA */
FAIL_IF(get_mmcra_bhrb_disable(get_reg_value(intr_regs, "MMCRA"), 5));
event_close(&event);
return 0;
}
int main(void)
{
return test_harness(mmcra_bhrb_disable_test, "mmcra_bhrb_disable_test");
}
| linux-master | tools/testing/selftests/powerpc/pmu/sampling_tests/mmcra_bhrb_disable_test.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2022, Madhavan Srinivasan, IBM Corp.
*/
#include <stdio.h>
#include <stdlib.h>
#include "../event.h"
#include "misc.h"
#include "utils.h"
/* All successful D-side store dispatches for this thread */
#define EventCode 0x010000046080
#define MALLOC_SIZE (0x10000 * 10) /* Ought to be enough .. */
/*
* A perf sampling test for mmcr2
* fields : l2l3
*/
static int mmcr2_l2l3(void)
{
struct event event;
u64 *intr_regs;
char *p;
int i;
/* Check for platform support for the test */
SKIP_IF(check_pvr_for_sampling_tests());
SKIP_IF(!have_hwcap2(PPC_FEATURE2_ARCH_3_1));
/* Init the event for the sampling test */
event_init_sampling(&event, EventCode);
event.attr.sample_regs_intr = platform_extended_mask;
FAIL_IF(event_open(&event));
event.mmap_buffer = event_sample_buf_mmap(event.fd, 1);
FAIL_IF(event_enable(&event));
/* workload to make the event overflow */
p = malloc(MALLOC_SIZE);
FAIL_IF(!p);
for (i = 0; i < MALLOC_SIZE; i += 0x10000)
p[i] = i;
FAIL_IF(event_disable(&event));
/* Check for sample count */
FAIL_IF(!collect_samples(event.mmap_buffer));
intr_regs = get_intr_regs(&event, event.mmap_buffer);
/* Check for intr_regs */
FAIL_IF(!intr_regs);
/*
* Verify that l2l3 field of MMCR2 match with
* corresponding event code field
*/
FAIL_IF(EV_CODE_EXTRACT(event.attr.config, l2l3) !=
get_mmcr2_l2l3(get_reg_value(intr_regs, "MMCR2"), 4));
event_close(&event);
free(p);
return 0;
}
int main(void)
{
return test_harness(mmcr2_l2l3, "mmcr2_l2l3");
}
| linux-master | tools/testing/selftests/powerpc/pmu/sampling_tests/mmcr2_l2l3_test.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2022, Athira Rajeev, IBM Corp.
*/
#include <stdio.h>
#include <stdlib.h>
#include "../event.h"
#include "misc.h"
#include "utils.h"
extern void thirty_two_instruction_loop(int loops);
/*
* A perf sampling test for mmcr0
* fields: fc56_pmc56
*/
static int mmcr0_fc56_pmc56(void)
{
struct event event;
u64 *intr_regs;
/* Check for platform support for the test */
SKIP_IF(check_pvr_for_sampling_tests());
/* Init the event for the sampling test */
event_init_sampling(&event, 0x500fa);
event.attr.sample_regs_intr = platform_extended_mask;
FAIL_IF(event_open(&event));
event.mmap_buffer = event_sample_buf_mmap(event.fd, 1);
FAIL_IF(event_enable(&event));
/* workload to make the event overflow */
thirty_two_instruction_loop(10000);
FAIL_IF(event_disable(&event));
/* Check for sample count */
FAIL_IF(!collect_samples(event.mmap_buffer));
intr_regs = get_intr_regs(&event, event.mmap_buffer);
/* Check for intr_regs */
FAIL_IF(!intr_regs);
/* Verify that fc56 is not set in MMCR0 when using PMC5 */
FAIL_IF(get_mmcr0_fc56(get_reg_value(intr_regs, "MMCR0"), 5));
event_close(&event);
return 0;
}
int main(void)
{
return test_harness(mmcr0_fc56_pmc56, "mmcr0_fc56_pmc56");
}
| linux-master | tools/testing/selftests/powerpc/pmu/sampling_tests/mmcr0_fc56_pmc56_test.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2022, Athira Rajeev, IBM Corp.
* Copyright 2022, Madhavan Srinivasan, IBM Corp.
* Copyright 2022, Kajol Jain, IBM Corp.
*/
#include <unistd.h>
#include <sys/syscall.h>
#include <string.h>
#include <stdio.h>
#include <sys/ioctl.h>
#include <sys/mman.h>
#include <stdlib.h>
#include <ctype.h>
#include "misc.h"
#define PAGE_SIZE sysconf(_SC_PAGESIZE)
/* Storage for platform version */
int pvr;
u64 platform_extended_mask;
/* Mask and Shift for Event code fields */
int ev_mask_pmcxsel, ev_shift_pmcxsel; //pmcxsel field
int ev_mask_marked, ev_shift_marked; //marked filed
int ev_mask_comb, ev_shift_comb; //combine field
int ev_mask_unit, ev_shift_unit; //unit field
int ev_mask_pmc, ev_shift_pmc; //pmc field
int ev_mask_cache, ev_shift_cache; //Cache sel field
int ev_mask_sample, ev_shift_sample; //Random sampling field
int ev_mask_thd_sel, ev_shift_thd_sel; //thresh_sel field
int ev_mask_thd_start, ev_shift_thd_start; //thresh_start field
int ev_mask_thd_stop, ev_shift_thd_stop; //thresh_stop field
int ev_mask_thd_cmp, ev_shift_thd_cmp; //thresh cmp field
int ev_mask_sm, ev_shift_sm; //SDAR mode field
int ev_mask_rsq, ev_shift_rsq; //radix scope qual field
int ev_mask_l2l3, ev_shift_l2l3; //l2l3 sel field
int ev_mask_mmcr3_src, ev_shift_mmcr3_src; //mmcr3 field
static void init_ev_encodes(void)
{
ev_mask_pmcxsel = 0xff;
ev_shift_pmcxsel = 0;
ev_mask_marked = 1;
ev_shift_marked = 8;
ev_mask_unit = 0xf;
ev_shift_unit = 12;
ev_mask_pmc = 0xf;
ev_shift_pmc = 16;
ev_mask_sample = 0x1f;
ev_shift_sample = 24;
ev_mask_thd_sel = 0x7;
ev_shift_thd_sel = 29;
ev_mask_thd_start = 0xf;
ev_shift_thd_start = 36;
ev_mask_thd_stop = 0xf;
ev_shift_thd_stop = 32;
switch (pvr) {
case POWER10:
ev_mask_thd_cmp = 0x3ffff;
ev_shift_thd_cmp = 0;
ev_mask_rsq = 1;
ev_shift_rsq = 9;
ev_mask_comb = 3;
ev_shift_comb = 10;
ev_mask_cache = 3;
ev_shift_cache = 20;
ev_mask_sm = 0x3;
ev_shift_sm = 22;
ev_mask_l2l3 = 0x1f;
ev_shift_l2l3 = 40;
ev_mask_mmcr3_src = 0x7fff;
ev_shift_mmcr3_src = 45;
break;
case POWER9:
ev_mask_comb = 3;
ev_shift_comb = 10;
ev_mask_cache = 0xf;
ev_shift_cache = 20;
ev_mask_thd_cmp = 0x3ff;
ev_shift_thd_cmp = 40;
ev_mask_sm = 0x3;
ev_shift_sm = 50;
break;
default:
FAIL_IF_EXIT(1);
}
}
/* Return the extended regs mask value */
static u64 perf_get_platform_reg_mask(void)
{
if (have_hwcap2(PPC_FEATURE2_ARCH_3_1))
return PERF_POWER10_MASK;
if (have_hwcap2(PPC_FEATURE2_ARCH_3_00))
return PERF_POWER9_MASK;
return -1;
}
int check_extended_regs_support(void)
{
int fd;
struct event event;
event_init(&event, 0x1001e);
event.attr.type = 4;
event.attr.sample_period = 1;
event.attr.disabled = 1;
event.attr.sample_type = PERF_SAMPLE_REGS_INTR;
event.attr.sample_regs_intr = platform_extended_mask;
fd = event_open(&event);
if (fd != -1)
return 0;
return -1;
}
int platform_check_for_tests(void)
{
pvr = PVR_VER(mfspr(SPRN_PVR));
/*
* Check for supported platforms
* for sampling test
*/
if ((pvr != POWER10) && (pvr != POWER9))
goto out;
/*
* Check PMU driver registered by looking for
* PPC_FEATURE2_EBB bit in AT_HWCAP2
*/
if (!have_hwcap2(PPC_FEATURE2_EBB) || !have_hwcap2(PPC_FEATURE2_ARCH_3_00))
goto out;
return 0;
out:
printf("%s: Tests unsupported for this platform\n", __func__);
return -1;
}
int check_pvr_for_sampling_tests(void)
{
SKIP_IF(platform_check_for_tests());
platform_extended_mask = perf_get_platform_reg_mask();
/* check if platform supports extended regs */
if (check_extended_regs_support())
goto out;
init_ev_encodes();
return 0;
out:
printf("%s: Sampling tests un-supported\n", __func__);
return -1;
}
/*
* Allocate mmap buffer of "mmap_pages" number of
* pages.
*/
void *event_sample_buf_mmap(int fd, int mmap_pages)
{
size_t page_size = sysconf(_SC_PAGESIZE);
size_t mmap_size;
void *buff;
if (mmap_pages <= 0)
return NULL;
if (fd <= 0)
return NULL;
mmap_size = page_size * (1 + mmap_pages);
buff = mmap(NULL, mmap_size,
PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
if (buff == MAP_FAILED) {
perror("mmap() failed.");
return NULL;
}
return buff;
}
/*
* Post process the mmap buffer.
* - If sample_count != NULL then return count of total
* number of samples present in the mmap buffer.
* - If sample_count == NULL then return the address
* of first sample from the mmap buffer
*/
void *__event_read_samples(void *sample_buff, size_t *size, u64 *sample_count)
{
size_t page_size = sysconf(_SC_PAGESIZE);
struct perf_event_header *header = sample_buff + page_size;
struct perf_event_mmap_page *metadata_page = sample_buff;
unsigned long data_head, data_tail;
/*
* PERF_RECORD_SAMPLE:
* struct {
* struct perf_event_header hdr;
* u64 data[];
* };
*/
data_head = metadata_page->data_head;
/* sync memory before reading sample */
mb();
data_tail = metadata_page->data_tail;
/* Check for sample_count */
if (sample_count)
*sample_count = 0;
while (1) {
/*
* Reads the mmap data buffer by moving
* the data_tail to know the last read data.
* data_head points to head in data buffer.
* refer "struct perf_event_mmap_page" in
* "include/uapi/linux/perf_event.h".
*/
if (data_head - data_tail < sizeof(header))
return NULL;
data_tail += sizeof(header);
if (header->type == PERF_RECORD_SAMPLE) {
*size = (header->size - sizeof(header));
if (!sample_count)
return sample_buff + page_size + data_tail;
data_tail += *size;
*sample_count += 1;
} else {
*size = (header->size - sizeof(header));
if ((metadata_page->data_tail + *size) > metadata_page->data_head)
data_tail = metadata_page->data_head;
else
data_tail += *size;
}
header = (struct perf_event_header *)((void *)header + header->size);
}
return NULL;
}
int collect_samples(void *sample_buff)
{
u64 sample_count;
size_t size = 0;
__event_read_samples(sample_buff, &size, &sample_count);
return sample_count;
}
static void *perf_read_first_sample(void *sample_buff, size_t *size)
{
return __event_read_samples(sample_buff, size, NULL);
}
u64 *get_intr_regs(struct event *event, void *sample_buff)
{
u64 type = event->attr.sample_type;
u64 *intr_regs;
size_t size = 0;
if ((type ^ (PERF_SAMPLE_REGS_INTR | PERF_SAMPLE_BRANCH_STACK)) &&
(type ^ PERF_SAMPLE_REGS_INTR))
return NULL;
intr_regs = (u64 *)perf_read_first_sample(sample_buff, &size);
if (!intr_regs)
return NULL;
if (type & PERF_SAMPLE_BRANCH_STACK) {
/*
* PERF_RECORD_SAMPLE and PERF_SAMPLE_BRANCH_STACK:
* struct {
* struct perf_event_header hdr;
* u64 number_of_branches;
* struct perf_branch_entry[number_of_branches];
* u64 data[];
* };
* struct perf_branch_entry {
* u64 from;
* u64 to;
* u64 misc;
* };
*/
intr_regs += ((*intr_regs) * 3) + 1;
}
/*
* First entry in the sample buffer used to specify
* PERF_SAMPLE_REGS_ABI_64, skip perf regs abi to access
* interrupt registers.
*/
++intr_regs;
return intr_regs;
}
static const int __perf_reg_mask(const char *register_name)
{
if (!strcmp(register_name, "R0"))
return 0;
else if (!strcmp(register_name, "R1"))
return 1;
else if (!strcmp(register_name, "R2"))
return 2;
else if (!strcmp(register_name, "R3"))
return 3;
else if (!strcmp(register_name, "R4"))
return 4;
else if (!strcmp(register_name, "R5"))
return 5;
else if (!strcmp(register_name, "R6"))
return 6;
else if (!strcmp(register_name, "R7"))
return 7;
else if (!strcmp(register_name, "R8"))
return 8;
else if (!strcmp(register_name, "R9"))
return 9;
else if (!strcmp(register_name, "R10"))
return 10;
else if (!strcmp(register_name, "R11"))
return 11;
else if (!strcmp(register_name, "R12"))
return 12;
else if (!strcmp(register_name, "R13"))
return 13;
else if (!strcmp(register_name, "R14"))
return 14;
else if (!strcmp(register_name, "R15"))
return 15;
else if (!strcmp(register_name, "R16"))
return 16;
else if (!strcmp(register_name, "R17"))
return 17;
else if (!strcmp(register_name, "R18"))
return 18;
else if (!strcmp(register_name, "R19"))
return 19;
else if (!strcmp(register_name, "R20"))
return 20;
else if (!strcmp(register_name, "R21"))
return 21;
else if (!strcmp(register_name, "R22"))
return 22;
else if (!strcmp(register_name, "R23"))
return 23;
else if (!strcmp(register_name, "R24"))
return 24;
else if (!strcmp(register_name, "R25"))
return 25;
else if (!strcmp(register_name, "R26"))
return 26;
else if (!strcmp(register_name, "R27"))
return 27;
else if (!strcmp(register_name, "R28"))
return 28;
else if (!strcmp(register_name, "R29"))
return 29;
else if (!strcmp(register_name, "R30"))
return 30;
else if (!strcmp(register_name, "R31"))
return 31;
else if (!strcmp(register_name, "NIP"))
return 32;
else if (!strcmp(register_name, "MSR"))
return 33;
else if (!strcmp(register_name, "ORIG_R3"))
return 34;
else if (!strcmp(register_name, "CTR"))
return 35;
else if (!strcmp(register_name, "LINK"))
return 36;
else if (!strcmp(register_name, "XER"))
return 37;
else if (!strcmp(register_name, "CCR"))
return 38;
else if (!strcmp(register_name, "SOFTE"))
return 39;
else if (!strcmp(register_name, "TRAP"))
return 40;
else if (!strcmp(register_name, "DAR"))
return 41;
else if (!strcmp(register_name, "DSISR"))
return 42;
else if (!strcmp(register_name, "SIER"))
return 43;
else if (!strcmp(register_name, "MMCRA"))
return 44;
else if (!strcmp(register_name, "MMCR0"))
return 45;
else if (!strcmp(register_name, "MMCR1"))
return 46;
else if (!strcmp(register_name, "MMCR2"))
return 47;
else if (!strcmp(register_name, "MMCR3"))
return 48;
else if (!strcmp(register_name, "SIER2"))
return 49;
else if (!strcmp(register_name, "SIER3"))
return 50;
else if (!strcmp(register_name, "PMC1"))
return 51;
else if (!strcmp(register_name, "PMC2"))
return 52;
else if (!strcmp(register_name, "PMC3"))
return 53;
else if (!strcmp(register_name, "PMC4"))
return 54;
else if (!strcmp(register_name, "PMC5"))
return 55;
else if (!strcmp(register_name, "PMC6"))
return 56;
else if (!strcmp(register_name, "SDAR"))
return 57;
else if (!strcmp(register_name, "SIAR"))
return 58;
else
return -1;
}
u64 get_reg_value(u64 *intr_regs, char *register_name)
{
int register_bit_position;
register_bit_position = __perf_reg_mask(register_name);
if (register_bit_position < 0 || (!((platform_extended_mask >>
(register_bit_position - 1)) & 1)))
return -1;
return *(intr_regs + register_bit_position);
}
int get_thresh_cmp_val(struct event event)
{
int exp = 0;
u64 result = 0;
u64 value;
if (!have_hwcap2(PPC_FEATURE2_ARCH_3_1))
return EV_CODE_EXTRACT(event.attr.config, thd_cmp);
value = EV_CODE_EXTRACT(event.attr.config1, thd_cmp);
if (!value)
return value;
/*
* Incase of P10, thresh_cmp value is not part of raw event code
* and provided via attr.config1 parameter. To program threshold in MMCRA,
* take a 18 bit number N and shift right 2 places and increment
* the exponent E by 1 until the upper 10 bits of N are zero.
* Write E to the threshold exponent and write the lower 8 bits of N
* to the threshold mantissa.
* The max threshold that can be written is 261120.
*/
if (value > 261120)
value = 261120;
while ((64 - __builtin_clzl(value)) > 8) {
exp++;
value >>= 2;
}
/*
* Note that it is invalid to write a mantissa with the
* upper 2 bits of mantissa being zero, unless the
* exponent is also zero.
*/
if (!(value & 0xC0) && exp)
result = -1;
else
result = (exp << 8) | value;
return result;
}
/*
* Utility function to check for generic compat PMU
* by comparing base_platform value from auxv and real
* PVR value.
*/
static bool auxv_generic_compat_pmu(void)
{
int base_pvr = 0;
if (!strcmp(auxv_base_platform(), "power9"))
base_pvr = POWER9;
else if (!strcmp(auxv_base_platform(), "power10"))
base_pvr = POWER10;
return (!base_pvr);
}
/*
* Check for generic compat PMU.
* First check for presence of pmu_name from
* "/sys/bus/event_source/devices/cpu/caps".
* If doesn't exist, fallback to using value
* auxv.
*/
bool check_for_generic_compat_pmu(void)
{
char pmu_name[256];
memset(pmu_name, 0, sizeof(pmu_name));
if (read_sysfs_file("bus/event_source/devices/cpu/caps/pmu_name",
pmu_name, sizeof(pmu_name)) < 0)
return auxv_generic_compat_pmu();
if (!strcmp(pmu_name, "ISAv3"))
return true;
else
return false;
}
/*
* Check if system is booted in compat mode.
*/
bool check_for_compat_mode(void)
{
char *platform = auxv_platform();
char *base_platform = auxv_base_platform();
return strcmp(platform, base_platform);
}
| linux-master | tools/testing/selftests/powerpc/pmu/sampling_tests/misc.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2022, Kajol Jain, IBM Corp.
*/
#include <stdio.h>
#include <stdlib.h>
#include "../event.h"
#include "misc.h"
#include "utils.h"
extern void thirty_two_instruction_loop(int loops);
/* Instructions */
#define EventCode 0x500fa
/*
* A perf sampling test for mmcra
* field: bhrb_disable.
*/
static int mmcra_bhrb_disable_no_branch_test(void)
{
struct event event;
u64 *intr_regs;
/*
* Check for platform support for the test.
* This test is only aplicable on power10
*/
SKIP_IF(check_pvr_for_sampling_tests());
SKIP_IF(!have_hwcap2(PPC_FEATURE2_ARCH_3_1));
/* Init the event for the sampling test */
event_init_sampling(&event, EventCode);
event.attr.sample_regs_intr = platform_extended_mask;
event.attr.exclude_kernel = 1;
FAIL_IF(event_open(&event));
event.mmap_buffer = event_sample_buf_mmap(event.fd, 1);
FAIL_IF(event_enable(&event));
/* workload to make the event overflow */
thirty_two_instruction_loop(10000);
FAIL_IF(event_disable(&event));
intr_regs = get_intr_regs(&event, event.mmap_buffer);
/* Check for intr_regs */
FAIL_IF(!intr_regs);
/* Verify that bhrb_disable bit is set in MMCRA for non-branch samples */
FAIL_IF(!get_mmcra_bhrb_disable(get_reg_value(intr_regs, "MMCRA"), 5));
event_close(&event);
return 0;
}
int main(void)
{
return test_harness(mmcra_bhrb_disable_no_branch_test, "mmcra_bhrb_disable_no_branch_test");
}
| linux-master | tools/testing/selftests/powerpc/pmu/sampling_tests/mmcra_bhrb_disable_no_branch_test.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2022, Athira Rajeev, IBM Corp.
*/
#include <stdio.h>
#include <stdlib.h>
#include "../event.h"
#include "misc.h"
#include "utils.h"
extern void thirty_two_instruction_loop(int loops);
/*
* A perf sampling test for mmcr0
* field: pmcjce
*/
static int mmcr0_pmcjce(void)
{
struct event event;
u64 *intr_regs;
/* Check for platform support for the test */
SKIP_IF(check_pvr_for_sampling_tests());
/* Init the event for the sampling test */
event_init_sampling(&event, 0x500fa);
event.attr.sample_regs_intr = platform_extended_mask;
FAIL_IF(event_open(&event));
event.mmap_buffer = event_sample_buf_mmap(event.fd, 1);
FAIL_IF(event_enable(&event));
/* workload to make the event overflow */
thirty_two_instruction_loop(10000);
FAIL_IF(event_disable(&event));
/* Check for sample count */
FAIL_IF(!collect_samples(event.mmap_buffer));
intr_regs = get_intr_regs(&event, event.mmap_buffer);
/* Check for intr_regs */
FAIL_IF(!intr_regs);
/* Verify that pmcjce field is set in MMCR0 */
FAIL_IF(!get_mmcr0_pmcjce(get_reg_value(intr_regs, "MMCR0"), 5));
event_close(&event);
return 0;
}
int main(void)
{
return test_harness(mmcr0_pmcjce, "mmcr0_pmcjce");
}
| linux-master | tools/testing/selftests/powerpc/pmu/sampling_tests/mmcr0_pmcjce_test.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2022, Athira Rajeev, IBM Corp.
*/
#include <stdio.h>
#include "../event.h"
#include "../sampling_tests/misc.h"
/* PM_DATA_RADIX_PROCESS_L2_PTE_FROM_L2 */
#define EventCode_1 0x14242
/* PM_DATA_RADIX_PROCESS_L2_PTE_FROM_L3 */
#define EventCode_2 0x24242
/*
* Testcase for group constraint check for radix_scope_qual
* field which is used to program Monitor Mode Control
* egister (MMCR1) bit 18.
* All events in the group should match radix_scope_qual,
* bits otherwise event_open for the group should fail.
*/
static int group_constraint_radix_scope_qual(void)
{
struct event event, leader;
/*
* Check for platform support for the test.
* This test is aplicable on power10 only.
*/
SKIP_IF(platform_check_for_tests());
SKIP_IF(!have_hwcap2(PPC_FEATURE2_ARCH_3_1));
/* Init the events for the group contraint check for radix_scope_qual bits */
event_init(&leader, EventCode_1);
FAIL_IF(event_open(&leader));
event_init(&event, 0x200fc);
/* Expected to fail as sibling event doesn't request same radix_scope_qual bits as leader */
FAIL_IF(!event_open_with_group(&event, leader.fd));
event_init(&event, EventCode_2);
/* Expected to pass as sibling event request same radix_scope_qual bits as leader */
FAIL_IF(event_open_with_group(&event, leader.fd));
event_close(&leader);
event_close(&event);
return 0;
}
int main(void)
{
return test_harness(group_constraint_radix_scope_qual,
"group_constraint_radix_scope_qual");
}
| linux-master | tools/testing/selftests/powerpc/pmu/event_code_tests/group_constraint_radix_scope_qual_test.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2022, Athira Rajeev, IBM Corp.
*/
#include <stdio.h>
#include "../event.h"
#include "../sampling_tests/misc.h"
#define PM_RUN_CYC_ALT 0x200f4
#define PM_INST_DISP 0x200f2
#define PM_BR_2PATH 0x20036
#define PM_LD_MISS_L1 0x3e054
#define PM_RUN_INST_CMPL_ALT 0x400fa
#define EventCode_1 0x200fa
#define EventCode_2 0x200fc
#define EventCode_3 0x300fc
#define EventCode_4 0x400fc
/*
* Check for event alternatives.
*/
static int event_alternatives_tests_p9(void)
{
struct event event, leader;
/* Check for platform support for the test */
SKIP_IF(platform_check_for_tests());
/*
* PVR check is used here since PMU specific data like
* alternative events is handled by respective PMU driver
* code and using PVR will work correctly for all cases
* including generic compat mode.
*/
SKIP_IF(PVR_VER(mfspr(SPRN_PVR)) != POWER9);
/* Skip for generic compat PMU */
SKIP_IF(check_for_generic_compat_pmu());
/* Init the event for PM_RUN_CYC_ALT */
event_init(&leader, PM_RUN_CYC_ALT);
FAIL_IF(event_open(&leader));
event_init(&event, EventCode_1);
/*
* Expected to pass since PM_RUN_CYC_ALT in PMC2 has alternative event
* 0x600f4. So it can go in with EventCode_1 which is using PMC2
*/
FAIL_IF(event_open_with_group(&event, leader.fd));
event_close(&leader);
event_close(&event);
event_init(&leader, PM_INST_DISP);
FAIL_IF(event_open(&leader));
event_init(&event, EventCode_2);
/*
* Expected to pass since PM_INST_DISP in PMC2 has alternative event
* 0x300f2 in PMC3. So it can go in with EventCode_2 which is using PMC2
*/
FAIL_IF(event_open_with_group(&event, leader.fd));
event_close(&leader);
event_close(&event);
event_init(&leader, PM_BR_2PATH);
FAIL_IF(event_open(&leader));
event_init(&event, EventCode_2);
/*
* Expected to pass since PM_BR_2PATH in PMC2 has alternative event
* 0x40036 in PMC4. So it can go in with EventCode_2 which is using PMC2
*/
FAIL_IF(event_open_with_group(&event, leader.fd));
event_close(&leader);
event_close(&event);
event_init(&leader, PM_LD_MISS_L1);
FAIL_IF(event_open(&leader));
event_init(&event, EventCode_3);
/*
* Expected to pass since PM_LD_MISS_L1 in PMC3 has alternative event
* 0x400f0 in PMC4. So it can go in with EventCode_3 which is using PMC3
*/
FAIL_IF(event_open_with_group(&event, leader.fd));
event_close(&leader);
event_close(&event);
event_init(&leader, PM_RUN_INST_CMPL_ALT);
FAIL_IF(event_open(&leader));
event_init(&event, EventCode_4);
/*
* Expected to pass since PM_RUN_INST_CMPL_ALT in PMC4 has alternative event
* 0x500fa in PMC5. So it can go in with EventCode_4 which is using PMC4
*/
FAIL_IF(event_open_with_group(&event, leader.fd));
event_close(&leader);
event_close(&event);
return 0;
}
int main(void)
{
return test_harness(event_alternatives_tests_p9, "event_alternatives_tests_p9");
}
| linux-master | tools/testing/selftests/powerpc/pmu/event_code_tests/event_alternatives_tests_p9.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2022, Athira Rajeev, IBM Corp.
*/
#include <stdio.h>
#include "../event.h"
#include "../sampling_tests/misc.h"
/*
* Testcase for reserved bits in Monitor Mode Control
* Register A (MMCRA) Random Sampling Mode (SM) value.
* As per Instruction Set Architecture (ISA), the values
* 0x5, 0x9, 0xD, 0x19, 0x1D, 0x1A, 0x1E are reserved
* for sampling mode field. Test that having these reserved
* bit values should cause event_open to fail.
* Input event code uses these sampling bits along with
* 401e0 (PM_MRK_INST_CMPL).
*/
static int reserved_bits_mmcra_sample_elig_mode(void)
{
struct event event;
/* Check for platform support for the test */
SKIP_IF(platform_check_for_tests());
/* Skip for Generic compat PMU */
SKIP_IF(check_for_generic_compat_pmu());
/*
* MMCRA Random Sampling Mode (SM) values: 0x5
* 0x9, 0xD, 0x19, 0x1D, 0x1A, 0x1E is reserved.
* Expected to fail when using these reserved values.
*/
event_init(&event, 0x50401e0);
FAIL_IF(!event_open(&event));
event_init(&event, 0x90401e0);
FAIL_IF(!event_open(&event));
event_init(&event, 0xD0401e0);
FAIL_IF(!event_open(&event));
event_init(&event, 0x190401e0);
FAIL_IF(!event_open(&event));
event_init(&event, 0x1D0401e0);
FAIL_IF(!event_open(&event));
event_init(&event, 0x1A0401e0);
FAIL_IF(!event_open(&event));
event_init(&event, 0x1E0401e0);
FAIL_IF(!event_open(&event));
/*
* MMCRA Random Sampling Mode (SM) value 0x10
* is reserved in power10 and 0xC is reserved in
* power9.
*/
if (PVR_VER(mfspr(SPRN_PVR)) == POWER10) {
event_init(&event, 0x100401e0);
FAIL_IF(!event_open(&event));
} else if (PVR_VER(mfspr(SPRN_PVR)) == POWER9) {
event_init(&event, 0xC0401e0);
FAIL_IF(!event_open(&event));
}
return 0;
}
int main(void)
{
return test_harness(reserved_bits_mmcra_sample_elig_mode,
"reserved_bits_mmcra_sample_elig_mode");
}
| linux-master | tools/testing/selftests/powerpc/pmu/event_code_tests/reserved_bits_mmcra_sample_elig_mode_test.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2022, Kajol Jain, IBM Corp.
*/
#include <stdio.h>
#include <stdlib.h>
#include "../event.h"
#include "utils.h"
#include "../sampling_tests/misc.h"
/*
* Primary PMU events used here is PM_MRK_INST_CMPL (0x401e0) and
* PM_THRESH_MET (0x101ec)
* Threshold event selection used is issue to complete for cycles
* Sampling criteria is Load or Store only sampling
*/
#define p9_EventCode_1 0x13e35340401e0
#define p9_EventCode_2 0x17d34340101ec
#define p9_EventCode_3 0x13e35340101ec
#define p10_EventCode_1 0x35340401e0
#define p10_EventCode_2 0x35340101ec
/*
* Testcase for group constraint check of thresh_cmp bits which is
* used to program thresh compare field in Monitor Mode Control Register A
* (MMCRA: 9-18 bits for power9 and MMCRA: 8-18 bits for power10).
* All events in the group should match thresh compare bits otherwise
* event_open for the group will fail.
*/
static int group_constraint_thresh_cmp(void)
{
struct event event, leader;
/* Check for platform support for the test */
SKIP_IF(platform_check_for_tests());
if (have_hwcap2(PPC_FEATURE2_ARCH_3_1)) {
/* Init the events for the group contraint check for thresh_cmp bits */
event_init(&leader, p10_EventCode_1);
/* Add the thresh_cmp value for leader in config1 */
leader.attr.config1 = 1000;
FAIL_IF(event_open(&leader));
event_init(&event, p10_EventCode_2);
/* Add the different thresh_cmp value from the leader event in config1 */
event.attr.config1 = 2000;
/* Expected to fail as sibling and leader event request different thresh_cmp bits */
FAIL_IF(!event_open_with_group(&event, leader.fd));
event_close(&event);
/* Init the event for the group contraint thresh compare test */
event_init(&event, p10_EventCode_2);
/* Add the same thresh_cmp value for leader and sibling event in config1 */
event.attr.config1 = 1000;
/* Expected to succeed as sibling and leader event request same thresh_cmp bits */
FAIL_IF(event_open_with_group(&event, leader.fd));
event_close(&leader);
event_close(&event);
} else {
/* Init the events for the group contraint check for thresh_cmp bits */
event_init(&leader, p9_EventCode_1);
FAIL_IF(event_open(&leader));
event_init(&event, p9_EventCode_2);
/* Expected to fail as sibling and leader event request different thresh_cmp bits */
FAIL_IF(!event_open_with_group(&event, leader.fd));
event_close(&event);
/* Init the event for the group contraint thresh compare test */
event_init(&event, p9_EventCode_3);
/* Expected to succeed as sibling and leader event request same thresh_cmp bits */
FAIL_IF(event_open_with_group(&event, leader.fd));
event_close(&leader);
event_close(&event);
}
return 0;
}
int main(void)
{
return test_harness(group_constraint_thresh_cmp, "group_constraint_thresh_cmp");
}
| linux-master | tools/testing/selftests/powerpc/pmu/event_code_tests/group_constraint_thresh_cmp_test.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2022, Athira Rajeev, IBM Corp.
*/
#include <stdio.h>
#include "../event.h"
#include "../sampling_tests/misc.h"
/*
* Testcase for reserved bits in Monitor Mode
* Control Register A (MMCRA) thresh_ctl bits.
* For MMCRA[48:51]/[52:55]) Threshold Start/Stop,
* 0b11110000/0b00001111 is reserved.
*/
static int reserved_bits_mmcra_thresh_ctl(void)
{
struct event event;
/* Check for platform support for the test */
SKIP_IF(platform_check_for_tests());
/* Skip for Generic compat PMU */
SKIP_IF(check_for_generic_compat_pmu());
/*
* MMCRA[48:51]/[52:55]) Threshold Start/Stop
* events Selection. 0b11110000/0b00001111 is reserved.
* Expected to fail when using these reserved values.
*/
event_init(&event, 0xf0340401e0);
FAIL_IF(!event_open(&event));
event_init(&event, 0x0f340401e0);
FAIL_IF(!event_open(&event));
return 0;
}
int main(void)
{
return test_harness(reserved_bits_mmcra_thresh_ctl, "reserved_bits_mmcra_thresh_ctl");
}
| linux-master | tools/testing/selftests/powerpc/pmu/event_code_tests/reserved_bits_mmcra_thresh_ctl_test.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2022, Kajol Jain, IBM Corp.
*/
#include <stdio.h>
#include <stdlib.h>
#include "../event.h"
#include "utils.h"
#include "../sampling_tests/misc.h"
/*
* Load Missed L1, for power9 its pointing to PM_LD_MISS_L1_FIN (0x2c04e) and
* for power10 its pointing to PM_LD_MISS_L1 (0x3e054)
*
* Hardware cache level : PERF_COUNT_HW_CACHE_L1D
* Hardware cache event operation type : PERF_COUNT_HW_CACHE_OP_READ
* Hardware cache event result type : PERF_COUNT_HW_CACHE_RESULT_MISS
*/
#define EventCode_1 0x10000
/*
* Hardware cache level : PERF_COUNT_HW_CACHE_L1D
* Hardware cache event operation type : PERF_COUNT_HW_CACHE_OP_WRITE
* Hardware cache event result type : PERF_COUNT_HW_CACHE_RESULT_ACCESS
*/
#define EventCode_2 0x0100
/*
* Hardware cache level : PERF_COUNT_HW_CACHE_DTLB
* Hardware cache event operation type : PERF_COUNT_HW_CACHE_OP_WRITE
* Hardware cache event result type : PERF_COUNT_HW_CACHE_RESULT_ACCESS
*/
#define EventCode_3 0x0103
/*
* Hardware cache level : PERF_COUNT_HW_CACHE_L1D
* Hardware cache event operation type : PERF_COUNT_HW_CACHE_OP_READ
* Hardware cache event result type : Invalid ( > PERF_COUNT_HW_CACHE_RESULT_MAX)
*/
#define EventCode_4 0x030000
/*
* A perf test to check valid hardware cache events.
*/
static int hw_cache_event_type_test(void)
{
struct event event;
/* Check for platform support for the test */
SKIP_IF(platform_check_for_tests());
/* Skip for Generic compat PMU */
SKIP_IF(check_for_generic_compat_pmu());
/* Init the event to test hardware cache event */
event_init_opts(&event, EventCode_1, PERF_TYPE_HW_CACHE, "event");
/* Expected to success as its pointing to L1 load miss */
FAIL_IF(event_open(&event));
event_close(&event);
/* Init the event to test hardware cache event */
event_init_opts(&event, EventCode_2, PERF_TYPE_HW_CACHE, "event");
/* Expected to fail as the corresponding cache event entry have 0 in that index */
FAIL_IF(!event_open(&event));
event_close(&event);
/* Init the event to test hardware cache event */
event_init_opts(&event, EventCode_3, PERF_TYPE_HW_CACHE, "event");
/* Expected to fail as the corresponding cache event entry have -1 in that index */
FAIL_IF(!event_open(&event));
event_close(&event);
/* Init the event to test hardware cache event */
event_init_opts(&event, EventCode_4, PERF_TYPE_HW_CACHE, "event");
/* Expected to fail as hardware cache event result type is Invalid */
FAIL_IF(!event_open(&event));
event_close(&event);
return 0;
}
int main(void)
{
return test_harness(hw_cache_event_type_test, "hw_cache_event_type_test");
}
| linux-master | tools/testing/selftests/powerpc/pmu/event_code_tests/hw_cache_event_type_test.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2022, Athira Rajeev, IBM Corp.
*/
#include <stdio.h>
#include "../event.h"
#include "../sampling_tests/misc.h"
/* The processor's L1 data cache was reloaded */
#define EventCode1 0x21C040
#define EventCode2 0x22C040
/*
* Testcase for group constraint check
* when using events with same PMC.
* Multiple events in a group shouldn't
* ask for same PMC. If so it should fail.
*/
static int group_constraint_repeat(void)
{
struct event event, leader;
/* Check for platform support for the test */
SKIP_IF(platform_check_for_tests());
/*
* Two events in a group using same PMC
* should fail to get scheduled. Usei same PMC2
* for leader and sibling event which is expected
* to fail.
*/
event_init(&leader, EventCode1);
FAIL_IF(event_open(&leader));
event_init(&event, EventCode1);
/* Expected to fail since sibling event is requesting same PMC as leader */
FAIL_IF(!event_open_with_group(&event, leader.fd));
event_init(&event, EventCode2);
/* Expected to pass since sibling event is requesting different PMC */
FAIL_IF(event_open_with_group(&event, leader.fd));
event_close(&leader);
event_close(&event);
return 0;
}
int main(void)
{
return test_harness(group_constraint_repeat, "group_constraint_repeat");
}
| linux-master | tools/testing/selftests/powerpc/pmu/event_code_tests/group_constraint_repeat_test.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2022, Kajol Jain, IBM Corp.
*/
#include <stdio.h>
#include <stdlib.h>
#include "../event.h"
#include "utils.h"
#include "../sampling_tests/misc.h"
/* All L1 D cache load references counted at finish, gated by reject */
#define EventCode_1 0x1100fc
/* Load Missed L1 */
#define EventCode_2 0x23e054
/* Load Missed L1 */
#define EventCode_3 0x13e054
/*
* Testcase for group constraint check of data and instructions
* cache qualifier bits which is used to program cache select field in
* Monitor Mode Control Register 1 (MMCR1: 16-17) for l1 cache.
* All events in the group should match cache select bits otherwise
* event_open for the group will fail.
*/
static int group_constraint_cache(void)
{
struct event event, leader;
/* Check for platform support for the test */
SKIP_IF(platform_check_for_tests());
/* Init the events for the group contraint check for l1 cache select bits */
event_init(&leader, EventCode_1);
FAIL_IF(event_open(&leader));
event_init(&event, EventCode_2);
/* Expected to fail as sibling event doesn't request same l1 cache select bits as leader */
FAIL_IF(!event_open_with_group(&event, leader.fd));
event_close(&event);
/* Init the event for the group contraint l1 cache select test */
event_init(&event, EventCode_3);
/* Expected to succeed as sibling event request same l1 cache select bits as leader */
FAIL_IF(event_open_with_group(&event, leader.fd));
event_close(&leader);
event_close(&event);
return 0;
}
int main(void)
{
return test_harness(group_constraint_cache, "group_constraint_cache");
}
| linux-master | tools/testing/selftests/powerpc/pmu/event_code_tests/group_constraint_cache_test.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2022, Kajol Jain, IBM Corp.
*/
#include <stdio.h>
#include <stdlib.h>
#include "../event.h"
#include "utils.h"
#include "../sampling_tests/misc.h"
/* All successful D-side store dispatches for this thread */
#define EventCode_1 0x010000046080
/* All successful D-side store dispatches for this thread that were L2 Miss */
#define EventCode_2 0x26880
/* All successful D-side store dispatches for this thread that were L2 Miss */
#define EventCode_3 0x010000026880
/*
* Testcase for group constraint check of l2l3_sel bits which is
* used to program l2l3 select field in Monitor Mode Control Register 0
* (MMCR0: 56-60).
* All events in the group should match l2l3_sel bits otherwise
* event_open for the group should fail.
*/
static int group_constraint_l2l3_sel(void)
{
struct event event, leader;
/*
* Check for platform support for the test.
* This test is only aplicable on power10
*/
SKIP_IF(platform_check_for_tests());
SKIP_IF(!have_hwcap2(PPC_FEATURE2_ARCH_3_1));
/* Init the events for the group contraint check for l2l3_sel bits */
event_init(&leader, EventCode_1);
FAIL_IF(event_open(&leader));
event_init(&event, EventCode_2);
/* Expected to fail as sibling event doesn't request same l2l3_sel bits as leader */
FAIL_IF(!event_open_with_group(&event, leader.fd));
event_close(&event);
/* Init the event for the group contraint l2l3_sel test */
event_init(&event, EventCode_3);
/* Expected to succeed as sibling event request same l2l3_sel bits as leader */
FAIL_IF(event_open_with_group(&event, leader.fd));
event_close(&leader);
event_close(&event);
return 0;
}
int main(void)
{
return test_harness(group_constraint_l2l3_sel, "group_constraint_l2l3_sel");
}
| linux-master | tools/testing/selftests/powerpc/pmu/event_code_tests/group_constraint_l2l3_sel_test.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2022, Athira Rajeev, IBM Corp.
*/
#include <stdio.h>
#include "../event.h"
#include "../sampling_tests/misc.h"
#define PM_RUN_CYC_ALT 0x200f4
#define PM_INST_DISP 0x200f2
#define PM_BR_2PATH 0x20036
#define PM_LD_MISS_L1 0x3e054
#define PM_RUN_INST_CMPL_ALT 0x400fa
#define EventCode_1 0x100fc
#define EventCode_2 0x200fa
#define EventCode_3 0x300fc
#define EventCode_4 0x400fc
/*
* Check for event alternatives.
*/
static int event_alternatives_tests_p10(void)
{
struct event *e, events[5];
int i;
/* Check for platform support for the test */
SKIP_IF(platform_check_for_tests());
/*
* PVR check is used here since PMU specific data like
* alternative events is handled by respective PMU driver
* code and using PVR will work correctly for all cases
* including generic compat mode.
*/
SKIP_IF(PVR_VER(mfspr(SPRN_PVR)) != POWER10);
SKIP_IF(check_for_generic_compat_pmu());
/*
* Test for event alternative for 0x0001e
* and 0x00002.
*/
e = &events[0];
event_init(e, 0x0001e);
e = &events[1];
event_init(e, EventCode_1);
e = &events[2];
event_init(e, EventCode_2);
e = &events[3];
event_init(e, EventCode_3);
e = &events[4];
event_init(e, EventCode_4);
FAIL_IF(event_open(&events[0]));
/*
* Expected to pass since 0x0001e has alternative event
* 0x600f4 in PMC6. So it can go in with other events
* in PMC1 to PMC4.
*/
for (i = 1; i < 5; i++)
FAIL_IF(event_open_with_group(&events[i], events[0].fd));
for (i = 0; i < 5; i++)
event_close(&events[i]);
e = &events[0];
event_init(e, 0x00002);
e = &events[1];
event_init(e, EventCode_1);
e = &events[2];
event_init(e, EventCode_2);
e = &events[3];
event_init(e, EventCode_3);
e = &events[4];
event_init(e, EventCode_4);
FAIL_IF(event_open(&events[0]));
/*
* Expected to pass since 0x00020 has alternative event
* 0x500fa in PMC5. So it can go in with other events
* in PMC1 to PMC4.
*/
for (i = 1; i < 5; i++)
FAIL_IF(event_open_with_group(&events[i], events[0].fd));
for (i = 0; i < 5; i++)
event_close(&events[i]);
return 0;
}
int main(void)
{
return test_harness(event_alternatives_tests_p10, "event_alternatives_tests_p10");
}
| linux-master | tools/testing/selftests/powerpc/pmu/event_code_tests/event_alternatives_tests_p10.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2022, Kajol Jain, IBM Corp.
*/
#include <stdio.h>
#include <stdlib.h>
#include "../event.h"
#include "utils.h"
#include "../sampling_tests/misc.h"
/*
* Primary PMU events used here are PM_MRK_INST_CMPL (0x401e0) and
* PM_THRESH_MET (0x101ec).
* Threshold event selection used is issue to complete
* Sampling criteria is Load or Store only sampling
*/
#define EventCode_1 0x35340401e0
#define EventCode_2 0x35540101ec
#define EventCode_3 0x35340101ec
/*
* Testcase for group constraint check of thresh_sel bits which is
* used to program thresh select field in Monitor Mode Control Register A
* (MMCRA: 45-57).
* All events in the group should match thresh sel bits otherwise
* event_open for the group will fail.
*/
static int group_constraint_thresh_sel(void)
{
struct event event, leader;
/* Check for platform support for the test */
SKIP_IF(platform_check_for_tests());
/* Init the events for the group contraint thresh select test */
event_init(&leader, EventCode_1);
FAIL_IF(event_open(&leader));
event_init(&event, EventCode_2);
/* Expected to fail as sibling and leader event request different thresh_sel bits */
FAIL_IF(!event_open_with_group(&event, leader.fd));
event_close(&event);
/* Init the event for the group contraint thresh select test */
event_init(&event, EventCode_3);
/* Expected to succeed as sibling and leader event request same thresh_sel bits */
FAIL_IF(event_open_with_group(&event, leader.fd));
event_close(&leader);
event_close(&event);
return 0;
}
int main(void)
{
return test_harness(group_constraint_thresh_sel, "group_constraint_thresh_sel");
}
| linux-master | tools/testing/selftests/powerpc/pmu/event_code_tests/group_constraint_thresh_sel_test.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2022, Athira Rajeev, IBM Corp.
*/
#include <stdio.h>
#include "../event.h"
#include "../sampling_tests/misc.h"
/*
* Testcase for checking constraint checks for
* Performance Monitor Counter 5 (PMC5) and also
* Performance Monitor Counter 6 (PMC6). Events using
* PMC5/PMC6 shouldn't have other fields in event
* code like cache bits, thresholding or marked bit.
*/
static int group_constraint_pmc56(void)
{
struct event event;
/* Check for platform support for the test */
SKIP_IF(platform_check_for_tests());
/*
* Events using PMC5 and PMC6 with cache bit
* set in event code is expected to fail.
*/
event_init(&event, 0x2500fa);
FAIL_IF(!event_open(&event));
event_init(&event, 0x2600f4);
FAIL_IF(!event_open(&event));
/*
* PMC5 and PMC6 only supports base events:
* ie 500fa and 600f4. Other combinations
* should fail.
*/
event_init(&event, 0x501e0);
FAIL_IF(!event_open(&event));
event_init(&event, 0x6001e);
FAIL_IF(!event_open(&event));
event_init(&event, 0x501fa);
FAIL_IF(!event_open(&event));
/*
* Events using PMC5 and PMC6 with random
* sampling bits set in event code should fail
* to schedule.
*/
event_init(&event, 0x35340500fa);
FAIL_IF(!event_open(&event));
return 0;
}
int main(void)
{
return test_harness(group_constraint_pmc56, "group_constraint_pmc56");
}
| linux-master | tools/testing/selftests/powerpc/pmu/event_code_tests/group_constraint_pmc56_test.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2022, Kajol Jain, IBM Corp.
*/
#include <stdio.h>
#include <stdlib.h>
#include "../event.h"
#include "utils.h"
#include "../sampling_tests/misc.h"
/*
* Primary PMU events used here are PM_MRK_INST_CMPL (0x401e0) and
* PM_THRESH_MET (0x101ec).
* Threshold event selection used is issue to complete and issue to
* finished for cycles
* Sampling criteria is Load or Store only sampling
*/
#define EventCode_1 0x35340401e0
#define EventCode_2 0x34340101ec
#define EventCode_3 0x35340101ec
/*
* Testcase for group constraint check of thresh_ctl bits which is
* used to program thresh compare field in Monitor Mode Control Register A
* (MMCR0: 48-55).
* All events in the group should match thresh ctl bits otherwise
* event_open for the group will fail.
*/
static int group_constraint_thresh_ctl(void)
{
struct event event, leader;
/* Check for platform support for the test */
SKIP_IF(platform_check_for_tests());
/* Init the events for the group contraint thresh control test */
event_init(&leader, EventCode_1);
FAIL_IF(event_open(&leader));
event_init(&event, EventCode_2);
/* Expected to fail as sibling and leader event request different thresh_ctl bits */
FAIL_IF(!event_open_with_group(&event, leader.fd));
event_close(&event);
/* Init the event for the group contraint thresh control test */
event_init(&event, EventCode_3);
/* Expected to succeed as sibling and leader event request same thresh_ctl bits */
FAIL_IF(event_open_with_group(&event, leader.fd));
event_close(&leader);
event_close(&event);
return 0;
}
int main(void)
{
return test_harness(group_constraint_thresh_ctl, "group_constraint_thresh_ctl");
}
| linux-master | tools/testing/selftests/powerpc/pmu/event_code_tests/group_constraint_thresh_ctl_test.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2022, Athira Rajeev, IBM Corp.
*/
#include <stdio.h>
#include <sys/prctl.h>
#include <limits.h>
#include "../event.h"
#include "../sampling_tests/misc.h"
/* The data cache was reloaded from local core's L3 due to a demand load */
#define EventCode_1 0x1340000001c040
/* PM_DATA_RADIX_PROCESS_L2_PTE_FROM_L2 */
#define EventCode_2 0x14242
/* Event code with IFM, EBB, BHRB bits set in event code */
#define EventCode_3 0xf00000000000001e
/*
* Some of the bits in the event code is
* reserved for specific platforms.
* Event code bits 52-59 are reserved in power9,
* whereas in power10, these are used for programming
* Monitor Mode Control Register 3 (MMCR3).
* Bit 9 in event code is reserved in power9,
* whereas it is used for programming "radix_scope_qual"
* bit 18 in Monitor Mode Control Register 1 (MMCR1).
*
* Testcase to ensure that using reserved bits in
* event code should cause event_open to fail.
*/
static int invalid_event_code(void)
{
struct event event;
/* Check for platform support for the test */
SKIP_IF(platform_check_for_tests());
/*
* Events using MMCR3 bits and radix scope qual bits
* should fail in power9 and should succeed in power10.
* Init the events and check for pass/fail in event open.
*/
if (have_hwcap2(PPC_FEATURE2_ARCH_3_1)) {
event_init(&event, EventCode_1);
FAIL_IF(event_open(&event));
event_close(&event);
event_init(&event, EventCode_2);
FAIL_IF(event_open(&event));
event_close(&event);
} else {
event_init(&event, EventCode_1);
FAIL_IF(!event_open(&event));
event_init(&event, EventCode_2);
FAIL_IF(!event_open(&event));
}
return 0;
}
int main(void)
{
return test_harness(invalid_event_code, "invalid_event_code");
}
| linux-master | tools/testing/selftests/powerpc/pmu/event_code_tests/invalid_event_code_test.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2022, Athira Rajeev, IBM Corp.
*/
#include <stdio.h>
#include "../event.h"
#include "../sampling_tests/misc.h"
#define EventCode_1 0x35340401e0
#define EventCode_2 0x353c0101ec
#define EventCode_3 0x35340101ec
/*
* Test that using different sample bits in
* event code cause failure in schedule for
* group of events.
*/
static int group_constraint_mmcra_sample(void)
{
struct event event, leader;
SKIP_IF(platform_check_for_tests());
/*
* Events with different "sample" field values
* in a group will fail to schedule.
* Use event with load only sampling mode as
* group leader. Use event with store only sampling
* as sibling event.
*/
event_init(&leader, EventCode_1);
FAIL_IF(event_open(&leader));
event_init(&event, EventCode_2);
/* Expected to fail as sibling event doesn't use same sampling bits as leader */
FAIL_IF(!event_open_with_group(&event, leader.fd));
event_init(&event, EventCode_3);
/* Expected to pass as sibling event use same sampling bits as leader */
FAIL_IF(event_open_with_group(&event, leader.fd));
event_close(&leader);
event_close(&event);
return 0;
}
int main(void)
{
return test_harness(group_constraint_mmcra_sample, "group_constraint_mmcra_sample");
}
| linux-master | tools/testing/selftests/powerpc/pmu/event_code_tests/group_constraint_mmcra_sample_test.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2022, Athira Rajeev, IBM Corp.
*/
#include <stdio.h>
#include <sys/prctl.h>
#include <limits.h>
#include "../event.h"
#include "../sampling_tests/misc.h"
#define PM_DTLB_MISS_16G 0x1c058
#define PM_DERAT_MISS_2M 0x1c05a
#define PM_DTLB_MISS_2M 0x1c05c
#define PM_MRK_DTLB_MISS_1G 0x1d15c
#define PM_DTLB_MISS_4K 0x2c056
#define PM_DERAT_MISS_1G 0x2c05a
#define PM_MRK_DERAT_MISS_2M 0x2d152
#define PM_MRK_DTLB_MISS_4K 0x2d156
#define PM_MRK_DTLB_MISS_16G 0x2d15e
#define PM_DTLB_MISS_64K 0x3c056
#define PM_MRK_DERAT_MISS_1G 0x3d152
#define PM_MRK_DTLB_MISS_64K 0x3d156
#define PM_DISP_HELD_SYNC_HOLD 0x4003c
#define PM_DTLB_MISS_16M 0x4c056
#define PM_DTLB_MISS_1G 0x4c05a
#define PM_MRK_DTLB_MISS_16M 0x4c15e
#define PM_MRK_ST_DONE_L2 0x10134
#define PM_RADIX_PWC_L1_HIT 0x1f056
#define PM_FLOP_CMPL 0x100f4
#define PM_MRK_NTF_FIN 0x20112
#define PM_RADIX_PWC_L2_HIT 0x2d024
#define PM_IFETCH_THROTTLE 0x3405e
#define PM_MRK_L2_TM_ST_ABORT_SISTER 0x3e15c
#define PM_RADIX_PWC_L3_HIT 0x3f056
#define PM_RUN_CYC_SMT2_MODE 0x3006c
#define PM_TM_TX_PASS_RUN_INST 0x4e014
#define PVR_POWER9_CUMULUS 0x00002000
int blacklist_events_dd21[] = {
PM_MRK_ST_DONE_L2,
PM_RADIX_PWC_L1_HIT,
PM_FLOP_CMPL,
PM_MRK_NTF_FIN,
PM_RADIX_PWC_L2_HIT,
PM_IFETCH_THROTTLE,
PM_MRK_L2_TM_ST_ABORT_SISTER,
PM_RADIX_PWC_L3_HIT,
PM_RUN_CYC_SMT2_MODE,
PM_TM_TX_PASS_RUN_INST,
PM_DISP_HELD_SYNC_HOLD,
};
int blacklist_events_dd22[] = {
PM_DTLB_MISS_16G,
PM_DERAT_MISS_2M,
PM_DTLB_MISS_2M,
PM_MRK_DTLB_MISS_1G,
PM_DTLB_MISS_4K,
PM_DERAT_MISS_1G,
PM_MRK_DERAT_MISS_2M,
PM_MRK_DTLB_MISS_4K,
PM_MRK_DTLB_MISS_16G,
PM_DTLB_MISS_64K,
PM_MRK_DERAT_MISS_1G,
PM_MRK_DTLB_MISS_64K,
PM_DISP_HELD_SYNC_HOLD,
PM_DTLB_MISS_16M,
PM_DTLB_MISS_1G,
PM_MRK_DTLB_MISS_16M,
};
int pvr_min;
/*
* check for power9 support for 2.1 and
* 2.2 model where blacklist is applicable.
*/
int check_for_power9_version(void)
{
pvr_min = PVR_MIN(mfspr(SPRN_PVR));
SKIP_IF(PVR_VER(pvr) != POWER9);
SKIP_IF(!(pvr & PVR_POWER9_CUMULUS));
SKIP_IF(!(3 - pvr_min));
return 0;
}
/*
* Testcase to ensure that using blacklisted bits in
* event code should cause event_open to fail in power9
*/
static int blacklisted_events(void)
{
struct event event;
int i = 0;
/* Check for platform support for the test */
SKIP_IF(platform_check_for_tests());
/*
* check for power9 support for 2.1 and
* 2.2 model where blacklist is applicable.
*/
SKIP_IF(check_for_power9_version());
/* Skip for Generic compat mode */
SKIP_IF(check_for_generic_compat_pmu());
if (pvr_min == 1) {
for (i = 0; i < ARRAY_SIZE(blacklist_events_dd21); i++) {
event_init(&event, blacklist_events_dd21[i]);
FAIL_IF(!event_open(&event));
}
} else if (pvr_min == 2) {
for (i = 0; i < ARRAY_SIZE(blacklist_events_dd22); i++) {
event_init(&event, blacklist_events_dd22[i]);
FAIL_IF(!event_open(&event));
}
}
return 0;
}
int main(void)
{
return test_harness(blacklisted_events, "blacklisted_events");
}
| linux-master | tools/testing/selftests/powerpc/pmu/event_code_tests/blacklisted_events_test.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2022, Kajol Jain, IBM Corp.
*/
#include <stdio.h>
#include <stdlib.h>
#include "../event.h"
#include "utils.h"
#include "../sampling_tests/misc.h"
/* All successful D-side store dispatches for this thread with PMC 2 */
#define EventCode_1 0x26080
/* All successful D-side store dispatches for this thread with PMC 4 */
#define EventCode_2 0x46080
/* All successful D-side store dispatches for this thread that were L2 Miss with PMC 3 */
#define EventCode_3 0x36880
/*
* Testcase for group constraint check of unit and pmc bits which is
* used to program corresponding unit and pmc field in Monitor Mode
* Control Register 1 (MMCR1)
* One of the event in the group should use PMC 4 incase units field
* value is within 6 to 9 otherwise event_open for the group will fail.
*/
static int group_constraint_unit(void)
{
struct event *e, events[3];
/*
* Check for platform support for the test.
* Constraint to use PMC4 with one of the event in group,
* when the unit is within 6 to 9 is only applicable on
* power9.
*/
SKIP_IF(platform_check_for_tests());
SKIP_IF(have_hwcap2(PPC_FEATURE2_ARCH_3_1));
/* Init the events for the group contraint check for unit bits */
e = &events[0];
event_init(e, EventCode_1);
/* Expected to fail as PMC 4 is not used with unit field value 6 to 9 */
FAIL_IF(!event_open(&events[0]));
/* Init the events for the group contraint check for unit bits */
e = &events[1];
event_init(e, EventCode_2);
/* Expected to pass as PMC 4 is used with unit field value 6 to 9 */
FAIL_IF(event_open(&events[1]));
/* Init the event for the group contraint unit test */
e = &events[2];
event_init(e, EventCode_3);
/* Expected to fail as PMC4 is not being used */
FAIL_IF(!event_open_with_group(&events[2], events[0].fd));
/* Expected to succeed as event using PMC4 */
FAIL_IF(event_open_with_group(&events[2], events[1].fd));
event_close(&events[0]);
event_close(&events[1]);
event_close(&events[2]);
return 0;
}
int main(void)
{
return test_harness(group_constraint_unit, "group_constraint_unit");
}
| linux-master | tools/testing/selftests/powerpc/pmu/event_code_tests/group_constraint_unit_test.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2022, Athira Rajeev, IBM Corp.
*/
#include <stdio.h>
#include <sys/prctl.h>
#include <limits.h>
#include "../event.h"
#include "../sampling_tests/misc.h"
/*
* Testcase to ensure that using invalid event in generic
* event for PERF_TYPE_HARDWARE should fail
*/
static int generic_events_valid_test(void)
{
struct event event;
/* Check for platform support for the test */
SKIP_IF(platform_check_for_tests());
/* generic events is different in compat_mode */
SKIP_IF(check_for_generic_compat_pmu());
/*
* Invalid generic events in power10:
* - PERF_COUNT_HW_BUS_CYCLES
* - PERF_COUNT_HW_STALLED_CYCLES_FRONTEND
* - PERF_COUNT_HW_STALLED_CYCLES_BACKEND
* - PERF_COUNT_HW_REF_CPU_CYCLES
*/
if (PVR_VER(mfspr(SPRN_PVR)) == POWER10) {
event_init_opts(&event, PERF_COUNT_HW_CPU_CYCLES, PERF_TYPE_HARDWARE, "event");
FAIL_IF(event_open(&event));
event_close(&event);
event_init_opts(&event, PERF_COUNT_HW_INSTRUCTIONS,
PERF_TYPE_HARDWARE, "event");
FAIL_IF(event_open(&event));
event_close(&event);
event_init_opts(&event, PERF_COUNT_HW_CACHE_REFERENCES,
PERF_TYPE_HARDWARE, "event");
FAIL_IF(event_open(&event));
event_close(&event);
event_init_opts(&event, PERF_COUNT_HW_CACHE_MISSES, PERF_TYPE_HARDWARE, "event");
FAIL_IF(event_open(&event));
event_close(&event);
event_init_opts(&event, PERF_COUNT_HW_BRANCH_INSTRUCTIONS,
PERF_TYPE_HARDWARE, "event");
FAIL_IF(event_open(&event));
event_close(&event);
event_init_opts(&event, PERF_COUNT_HW_BRANCH_MISSES, PERF_TYPE_HARDWARE, "event");
FAIL_IF(event_open(&event));
event_close(&event);
event_init_opts(&event, PERF_COUNT_HW_BUS_CYCLES, PERF_TYPE_HARDWARE, "event");
FAIL_IF(!event_open(&event));
event_init_opts(&event, PERF_COUNT_HW_STALLED_CYCLES_FRONTEND,
PERF_TYPE_HARDWARE, "event");
FAIL_IF(!event_open(&event));
event_init_opts(&event, PERF_COUNT_HW_STALLED_CYCLES_BACKEND,
PERF_TYPE_HARDWARE, "event");
FAIL_IF(!event_open(&event));
event_init_opts(&event, PERF_COUNT_HW_REF_CPU_CYCLES, PERF_TYPE_HARDWARE, "event");
FAIL_IF(!event_open(&event));
} else if (PVR_VER(mfspr(SPRN_PVR)) == POWER9) {
/*
* Invalid generic events in power9:
* - PERF_COUNT_HW_BUS_CYCLES
* - PERF_COUNT_HW_REF_CPU_CYCLES
*/
event_init_opts(&event, PERF_COUNT_HW_CPU_CYCLES, PERF_TYPE_HARDWARE, "event");
FAIL_IF(event_open(&event));
event_close(&event);
event_init_opts(&event, PERF_COUNT_HW_INSTRUCTIONS, PERF_TYPE_HARDWARE, "event");
FAIL_IF(event_open(&event));
event_close(&event);
event_init_opts(&event, PERF_COUNT_HW_CACHE_REFERENCES,
PERF_TYPE_HARDWARE, "event");
FAIL_IF(event_open(&event));
event_close(&event);
event_init_opts(&event, PERF_COUNT_HW_CACHE_MISSES, PERF_TYPE_HARDWARE, "event");
FAIL_IF(event_open(&event));
event_close(&event);
event_init_opts(&event, PERF_COUNT_HW_BRANCH_INSTRUCTIONS,
PERF_TYPE_HARDWARE, "event");
FAIL_IF(event_open(&event));
event_close(&event);
event_init_opts(&event, PERF_COUNT_HW_BRANCH_MISSES, PERF_TYPE_HARDWARE, "event");
FAIL_IF(event_open(&event));
event_close(&event);
event_init_opts(&event, PERF_COUNT_HW_BUS_CYCLES, PERF_TYPE_HARDWARE, "event");
FAIL_IF(!event_open(&event));
event_init_opts(&event, PERF_COUNT_HW_STALLED_CYCLES_FRONTEND,
PERF_TYPE_HARDWARE, "event");
FAIL_IF(event_open(&event));
event_close(&event);
event_init_opts(&event, PERF_COUNT_HW_STALLED_CYCLES_BACKEND,
PERF_TYPE_HARDWARE, "event");
FAIL_IF(event_open(&event));
event_close(&event);
event_init_opts(&event, PERF_COUNT_HW_REF_CPU_CYCLES, PERF_TYPE_HARDWARE, "event");
FAIL_IF(!event_open(&event));
}
return 0;
}
int main(void)
{
return test_harness(generic_events_valid_test, "generic_events_valid_test");
}
| linux-master | tools/testing/selftests/powerpc/pmu/event_code_tests/generic_events_valid_test.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2022, Athira Rajeev, IBM Corp.
*/
#include <stdio.h>
#include "../event.h"
#include <sys/prctl.h>
#include <limits.h>
#include "../sampling_tests/misc.h"
/*
* Testcase for group constraint check for
* Performance Monitor Counter 5 (PMC5) and also
* Performance Monitor Counter 6 (PMC6).
* Test that pmc5/6 is excluded from constraint
* check when scheduled along with group of events.
*/
static int group_pmc56_exclude_constraints(void)
{
struct event *e, events[3];
int i;
/* Check for platform support for the test */
SKIP_IF(platform_check_for_tests());
/*
* PMC5/6 is excluded from constraint bit
* check along with group of events. Use
* group of events with PMC5, PMC6 and also
* event with cache bit (dc_ic) set. Test expects
* this set of events to go in as a group.
*/
e = &events[0];
event_init(e, 0x500fa);
e = &events[1];
event_init(e, 0x600f4);
e = &events[2];
event_init(e, 0x22C040);
FAIL_IF(event_open(&events[0]));
/*
* The event_open will fail if constraint check fails.
* Since we are asking for events in a group and since
* PMC5/PMC6 is excluded from group constraints, even_open
* should pass.
*/
for (i = 1; i < 3; i++)
FAIL_IF(event_open_with_group(&events[i], events[0].fd));
for (i = 0; i < 3; i++)
event_close(&events[i]);
return 0;
}
int main(void)
{
return test_harness(group_pmc56_exclude_constraints, "group_pmc56_exclude_constraints");
}
| linux-master | tools/testing/selftests/powerpc/pmu/event_code_tests/group_pmc56_exclude_constraints_test.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2022, Athira Rajeev, IBM Corp.
*/
#include <stdio.h>
#include "../event.h"
#include "../sampling_tests/misc.h"
/*
* Testcase for number of counters in use.
* The number of programmable counters is from
* performance monitor counter 1 to performance
* monitor counter 4 (PMC1-PMC4). If number of
* counters in use exceeds the limit, next event
* should fail to schedule.
*/
static int group_constraint_pmc_count(void)
{
struct event *e, events[5];
int i;
/* Check for platform support for the test */
SKIP_IF(platform_check_for_tests());
/*
* Test for number of counters in use.
* Use PMC1 to PMC4 for leader and 3 sibling
* events. Trying to open fourth event should
* fail here.
*/
e = &events[0];
event_init(e, 0x1001a);
e = &events[1];
event_init(e, 0x200fc);
e = &events[2];
event_init(e, 0x30080);
e = &events[3];
event_init(e, 0x40054);
e = &events[4];
event_init(e, 0x0002c);
FAIL_IF(event_open(&events[0]));
/*
* The event_open will fail on event 4 if constraint
* check fails
*/
for (i = 1; i < 5; i++) {
if (i == 4)
FAIL_IF(!event_open_with_group(&events[i], events[0].fd));
else
FAIL_IF(event_open_with_group(&events[i], events[0].fd));
}
for (i = 1; i < 4; i++)
event_close(&events[i]);
return 0;
}
int main(void)
{
return test_harness(group_constraint_pmc_count, "group_constraint_pmc_count");
}
| linux-master | tools/testing/selftests/powerpc/pmu/event_code_tests/group_constraint_pmc_count_test.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2015, Michael Neuling, IBM Corp.
*
* Test the kernel's signal delievery code to ensure that we don't
* trelaim twice in the kernel signal delivery code. This can happen
* if we trigger a signal when in a transaction and the stack pointer
* is bogus.
*
* This test case registers a SEGV handler, sets the stack pointer
* (r1) to NULL, starts a transaction and then generates a SEGV. The
* SEGV should be handled but we exit here as the stack pointer is
* invalid and hance we can't sigreturn. We only need to check that
* this flow doesn't crash the kernel.
*/
#include <unistd.h>
#include <sys/types.h>
#include <sys/wait.h>
#include <stdlib.h>
#include <stdio.h>
#include <signal.h>
#include "utils.h"
#include "tm.h"
void signal_segv(int signum)
{
/* This should never actually run since stack is foobar */
exit(1);
}
int tm_signal_stack()
{
int pid;
SKIP_IF(!have_htm());
SKIP_IF(htm_is_synthetic());
pid = fork();
if (pid < 0)
exit(1);
if (pid) { /* Parent */
/*
* It's likely the whole machine will crash here so if
* the child ever exits, we are good.
*/
wait(NULL);
return 0;
}
/*
* The flow here is:
* 1) register a signal handler (so signal delievery occurs)
* 2) make stack pointer (r1) = NULL
* 3) start transaction
* 4) cause segv
*/
if (signal(SIGSEGV, signal_segv) == SIG_ERR)
exit(1);
asm volatile("li 1, 0 ;" /* stack ptr == NULL */
"1:"
"tbegin.;"
"beq 1b ;" /* retry forever */
"tsuspend.;"
"ld 2, 0(1) ;" /* trigger segv" */
: : : "memory");
/* This should never get here due to above segv */
return 1;
}
int main(void)
{
return test_harness(tm_signal_stack, "tm_signal_stack");
}
| linux-master | tools/testing/selftests/powerpc/tm/tm-signal-stack.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright 2015, Laurent Dufour, IBM Corp.
*
* Test the kernel's signal returning code to check reclaim is done if the
* sigreturn() is called while in a transaction (suspended since active is
* already dropped trough the system call path).
*
* The kernel must discard the transaction when entering sigreturn, since
* restoring the potential TM SPRS from the signal frame is requiring to not be
* in a transaction.
*/
#include <signal.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/types.h>
#include <sys/wait.h>
#include <unistd.h>
#include "tm.h"
#include "utils.h"
void handler(int sig)
{
uint64_t ret;
asm __volatile__(
"li 3,1 ;"
"tbegin. ;"
"beq 1f ;"
"li 3,0 ;"
"tsuspend. ;"
"1: ;"
"std%X[ret] 3, %[ret] ;"
: [ret] "=m"(ret)
:
: "memory", "3", "cr0");
if (ret)
exit(1);
/*
* We return from the signal handle while in a suspended transaction
*/
}
int tm_sigreturn(void)
{
struct sigaction sa;
uint64_t ret = 0;
SKIP_IF(!have_htm());
SKIP_IF(htm_is_synthetic());
SKIP_IF(!is_ppc64le());
memset(&sa, 0, sizeof(sa));
sa.sa_handler = handler;
sigemptyset(&sa.sa_mask);
if (sigaction(SIGSEGV, &sa, NULL))
exit(1);
asm __volatile__(
"tbegin. ;"
"beq 1f ;"
"li 3,0 ;"
"std 3,0(3) ;" /* trigger SEGV */
"li 3,1 ;"
"std%X[ret] 3,%[ret] ;"
"tend. ;"
"b 2f ;"
"1: ;"
"li 3,2 ;"
"std%X[ret] 3,%[ret] ;"
"2: ;"
: [ret] "=m"(ret)
:
: "memory", "3", "cr0");
if (ret != 2)
exit(1);
exit(0);
}
int main(void)
{
return test_harness(tm_sigreturn, "tm_sigreturn");
}
| linux-master | tools/testing/selftests/powerpc/tm/tm-sigreturn.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright 2016, Cyril Bur, IBM Corp.
*
* Test the kernel's signal frame code.
*
* The kernel sets up two sets of ucontexts if the signal was to be
* delivered while the thread was in a transaction (referred too as
* first and second contexts).
* Expected behaviour is that the checkpointed state is in the user
* context passed to the signal handler (first context). The speculated
* state can be accessed with the uc_link pointer (second context).
*
* The rationale for this is that if TM unaware code (which linked
* against TM libs) installs a signal handler it will not know of the
* speculative nature of the 'live' registers and may infer the wrong
* thing.
*/
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <signal.h>
#include <unistd.h>
#include <altivec.h>
#include "utils.h"
#include "tm.h"
#define MAX_ATTEMPT 500000
#define NV_VMX_REGS 12 /* Number of non-volatile VMX registers */
#define VMX20 20 /* First non-volatile register to check in vr20-31 subset */
long tm_signal_self_context_load(pid_t pid, long *gprs, double *fps, vector int *vms, vector int *vss);
static sig_atomic_t fail, broken;
/* Test only non-volatile registers, i.e. 12 vmx registers from vr20 to vr31 */
vector int vms[] = {
/* First context will be set with these values, i.e. non-speculative */
/* VMX20 , VMX21 , ... */
{ 1, 2, 3, 4},{ 5, 6, 7, 8},{ 9,10,11,12},
{13,14,15,16},{17,18,19,20},{21,22,23,24},
{25,26,27,28},{29,30,31,32},{33,34,35,36},
{37,38,39,40},{41,42,43,44},{45,46,47,48},
/* Second context will be set with these values, i.e. speculative */
/* VMX20 , VMX21 , ... */
{ -1, -2, -3, -4},{ -5, -6, -7, -8},{ -9,-10,-11,-12},
{-13,-14,-15,-16},{-17,-18,-19,-20},{-21,-22,-23,-24},
{-25,-26,-27,-28},{-29,-30,-31,-32},{-33,-34,-35,-36},
{-37,-38,-39,-40},{-41,-42,-43,-44},{-45,-46,-47,-48}
};
static void signal_usr1(int signum, siginfo_t *info, void *uc)
{
int i, j;
ucontext_t *ucp = uc;
ucontext_t *tm_ucp = ucp->uc_link;
for (i = 0; i < NV_VMX_REGS; i++) {
/* Check first context. Print all mismatches. */
fail = memcmp(ucp->uc_mcontext.v_regs->vrregs[VMX20 + i],
&vms[i], sizeof(vector int));
if (fail) {
broken = 1;
printf("VMX%d (1st context) == 0x", VMX20 + i);
/* Print actual value in first context. */
for (j = 0; j < 4; j++)
printf("%08x", ucp->uc_mcontext.v_regs->vrregs[VMX20 + i][j]);
printf(" instead of 0x");
/* Print expected value. */
for (j = 0; j < 4; j++)
printf("%08x", vms[i][j]);
printf(" (expected)\n");
}
}
for (i = 0; i < NV_VMX_REGS; i++) {
/* Check second context. Print all mismatches. */
fail = memcmp(tm_ucp->uc_mcontext.v_regs->vrregs[VMX20 + i],
&vms[NV_VMX_REGS + i], sizeof (vector int));
if (fail) {
broken = 1;
printf("VMX%d (2nd context) == 0x", NV_VMX_REGS + i);
/* Print actual value in second context. */
for (j = 0; j < 4; j++)
printf("%08x", tm_ucp->uc_mcontext.v_regs->vrregs[VMX20 + i][j]);
printf(" instead of 0x");
/* Print expected value. */
for (j = 0; j < 4; j++)
printf("%08x", vms[NV_VMX_REGS + i][j]);
printf(" (expected)\n");
}
}
}
static int tm_signal_context_chk()
{
struct sigaction act;
int i;
long rc;
pid_t pid = getpid();
SKIP_IF(!have_htm());
SKIP_IF(htm_is_synthetic());
act.sa_sigaction = signal_usr1;
sigemptyset(&act.sa_mask);
act.sa_flags = SA_SIGINFO;
if (sigaction(SIGUSR1, &act, NULL) < 0) {
perror("sigaction sigusr1");
exit(1);
}
i = 0;
while (i < MAX_ATTEMPT && !broken) {
/*
* tm_signal_self_context_load will set both first and second
* contexts accordingly to the values passed through non-NULL
* array pointers to it, in that case 'vms', and invoke the
* signal handler installed for SIGUSR1.
*/
rc = tm_signal_self_context_load(pid, NULL, NULL, vms, NULL);
FAIL_IF(rc != pid);
i++;
}
return (broken);
}
int main(void)
{
return test_harness(tm_signal_context_chk, "tm_signal_context_chk_vmx");
}
| linux-master | tools/testing/selftests/powerpc/tm/tm-signal-context-chk-vmx.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2015, Michael Neuling, IBM Corp.
*
* Test the kernel's signal return code to ensure that it doesn't
* crash when both the transactional and suspend MSR bits are set in
* the signal context.
*
* For this test, we send ourselves a SIGUSR1. In the SIGUSR1 handler
* we modify the signal context to set both MSR TM S and T bits (which
* is "reserved" by the PowerISA). When we return from the signal
* handler (implicit sigreturn), the kernel should detect reserved MSR
* value and send us with a SIGSEGV.
*/
#include <stdlib.h>
#include <stdio.h>
#include <signal.h>
#include <unistd.h>
#include "utils.h"
#include "tm.h"
int segv_expected = 0;
void signal_segv(int signum)
{
if (segv_expected && (signum == SIGSEGV))
_exit(0);
_exit(1);
}
void signal_usr1(int signum, siginfo_t *info, void *uc)
{
ucontext_t *ucp = uc;
/* Link tm checkpointed context to normal context */
ucp->uc_link = ucp;
/* Set all TM bits so that the context is now invalid */
#ifdef __powerpc64__
ucp->uc_mcontext.gp_regs[PT_MSR] |= (7ULL << 32);
#else
ucp->uc_mcontext.uc_regs->gregs[PT_MSR] |= (7ULL);
#endif
/* Should segv on return becuase of invalid context */
segv_expected = 1;
}
int tm_signal_msr_resv()
{
struct sigaction act;
SKIP_IF(!have_htm());
act.sa_sigaction = signal_usr1;
sigemptyset(&act.sa_mask);
act.sa_flags = SA_SIGINFO;
if (sigaction(SIGUSR1, &act, NULL) < 0) {
perror("sigaction sigusr1");
exit(1);
}
if (signal(SIGSEGV, signal_segv) == SIG_ERR)
exit(1);
raise(SIGUSR1);
/* We shouldn't get here as we exit in the segv handler */
return 1;
}
int main(void)
{
return test_harness(tm_signal_msr_resv, "tm_signal_msr_resv");
}
| linux-master | tools/testing/selftests/powerpc/tm/tm-signal-msr-resv.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright 2016, Cyril Bur, IBM Corp.
*
* Test the kernel's signal frame code.
*
* The kernel sets up two sets of ucontexts if the signal was to be
* delivered while the thread was in a transaction (referred too as
* first and second contexts).
* Expected behaviour is that the checkpointed state is in the user
* context passed to the signal handler (first context). The speculated
* state can be accessed with the uc_link pointer (second context).
*
* The rationale for this is that if TM unaware code (which linked
* against TM libs) installs a signal handler it will not know of the
* speculative nature of the 'live' registers and may infer the wrong
* thing.
*/
#include <stdlib.h>
#include <stdio.h>
#include <signal.h>
#include <unistd.h>
#include <altivec.h>
#include "utils.h"
#include "tm.h"
#define MAX_ATTEMPT 500000
#define NV_GPR_REGS 18 /* Number of non-volatile GPR registers */
#define R14 14 /* First non-volatile register to check in r14-r31 subset */
long tm_signal_self_context_load(pid_t pid, long *gprs, double *fps, vector int *vms, vector int *vss);
static sig_atomic_t fail, broken;
/* Test only non-volatile general purpose registers, i.e. r14-r31 */
static long gprs[] = {
/* First context will be set with these values, i.e. non-speculative */
/* R14, R15, ... */
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18,
/* Second context will be set with these values, i.e. speculative */
/* R14, R15, ... */
-1,-2,-3,-4,-5,-6,-7,-8,-9,-10,-11,-12,-13,-14,-15,-16,-17,-18
};
static void signal_usr1(int signum, siginfo_t *info, void *uc)
{
int i;
ucontext_t *ucp = uc;
ucontext_t *tm_ucp = ucp->uc_link;
/* Check first context. Print all mismatches. */
for (i = 0; i < NV_GPR_REGS; i++) {
fail = (ucp->uc_mcontext.gp_regs[R14 + i] != gprs[i]);
if (fail) {
broken = 1;
printf("GPR%d (1st context) == %lu instead of %lu (expected)\n",
R14 + i, ucp->uc_mcontext.gp_regs[R14 + i], gprs[i]);
}
}
/* Check second context. Print all mismatches. */
for (i = 0; i < NV_GPR_REGS; i++) {
fail = (tm_ucp->uc_mcontext.gp_regs[R14 + i] != gprs[NV_GPR_REGS + i]);
if (fail) {
broken = 1;
printf("GPR%d (2nd context) == %lu instead of %lu (expected)\n",
R14 + i, tm_ucp->uc_mcontext.gp_regs[R14 + i], gprs[NV_GPR_REGS + i]);
}
}
}
static int tm_signal_context_chk_gpr()
{
struct sigaction act;
int i;
long rc;
pid_t pid = getpid();
SKIP_IF(!have_htm());
SKIP_IF(htm_is_synthetic());
act.sa_sigaction = signal_usr1;
sigemptyset(&act.sa_mask);
act.sa_flags = SA_SIGINFO;
if (sigaction(SIGUSR1, &act, NULL) < 0) {
perror("sigaction sigusr1");
exit(1);
}
i = 0;
while (i < MAX_ATTEMPT && !broken) {
/*
* tm_signal_self_context_load will set both first and second
* contexts accordingly to the values passed through non-NULL
* array pointers to it, in that case 'gprs', and invoke the
* signal handler installed for SIGUSR1.
*/
rc = tm_signal_self_context_load(pid, gprs, NULL, NULL, NULL);
FAIL_IF(rc != pid);
i++;
}
return broken;
}
int main(void)
{
return test_harness(tm_signal_context_chk_gpr, "tm_signal_context_chk_gpr");
}
| linux-master | tools/testing/selftests/powerpc/tm/tm-signal-context-chk-gpr.c |
// SPDX-License-Identifier: GPL-2.0
/* Test context switching to see if the DSCR SPR is correctly preserved
* when within a transaction.
*
* Note: We assume that the DSCR has been left at the default value (0)
* for all CPUs.
*
* Method:
*
* Set a value into the DSCR.
*
* Start a transaction, and suspend it (*).
*
* Hard loop checking to see if the transaction has become doomed.
*
* Now that we *may* have been preempted, record the DSCR and TEXASR SPRS.
*
* If the abort was because of a context switch, check the DSCR value.
* Otherwise, try again.
*
* (*) If the transaction is not suspended we can't see the problem because
* the transaction abort handler will restore the DSCR to it's checkpointed
* value before we regain control.
*/
#include <inttypes.h>
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#include <asm/tm.h>
#include "utils.h"
#include "tm.h"
#include "../pmu/lib.h"
#define SPRN_DSCR 0x03
int test_body(void)
{
uint64_t rv, dscr1 = 1, dscr2, texasr;
SKIP_IF(!have_htm());
SKIP_IF(htm_is_synthetic());
printf("Check DSCR TM context switch: ");
fflush(stdout);
for (;;) {
asm __volatile__ (
/* set a known value into the DSCR */
"ld 3, %[dscr1];"
"mtspr %[sprn_dscr], 3;"
"li %[rv], 1;"
/* start and suspend a transaction */
"tbegin.;"
"beq 1f;"
"tsuspend.;"
/* hard loop until the transaction becomes doomed */
"2: ;"
"tcheck 0;"
"bc 4, 0, 2b;"
/* record DSCR and TEXASR */
"mfspr 3, %[sprn_dscr];"
"std 3, %[dscr2];"
"mfspr 3, %[sprn_texasr];"
"std 3, %[texasr];"
"tresume.;"
"tend.;"
"li %[rv], 0;"
"1: ;"
: [rv]"=r"(rv), [dscr2]"=m"(dscr2), [texasr]"=m"(texasr)
: [dscr1]"m"(dscr1)
, [sprn_dscr]"i"(SPRN_DSCR), [sprn_texasr]"i"(SPRN_TEXASR)
: "memory", "r3"
);
assert(rv); /* make sure the transaction aborted */
if ((texasr >> 56) != TM_CAUSE_RESCHED) {
continue;
}
if (dscr2 != dscr1) {
printf(" FAIL\n");
return 1;
} else {
printf(" OK\n");
return 0;
}
}
}
static int tm_resched_dscr(void)
{
return eat_cpu(test_body);
}
int main(int argc, const char *argv[])
{
return test_harness(tm_resched_dscr, "tm_resched_dscr");
}
| linux-master | tools/testing/selftests/powerpc/tm/tm-resched-dscr.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2017, Gustavo Romero, Breno Leitao, Cyril Bur, IBM Corp.
*
* Force FP, VEC and VSX unavailable exception during transaction in all
* possible scenarios regarding the MSR.FP and MSR.VEC state, e.g. when FP
* is enable and VEC is disable, when FP is disable and VEC is enable, and
* so on. Then we check if the restored state is correctly set for the
* FP and VEC registers to the previous state we set just before we entered
* in TM, i.e. we check if it corrupts somehow the recheckpointed FP and
* VEC/Altivec registers on abortion due to an unavailable exception in TM.
* N.B. In this test we do not test all the FP/Altivec/VSX registers for
* corruption, but only for registers vs0 and vs32, which are respectively
* representatives of FP and VEC/Altivec reg sets.
*/
#define _GNU_SOURCE
#include <error.h>
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <inttypes.h>
#include <stdbool.h>
#include <pthread.h>
#include <sched.h>
#include "tm.h"
#define DEBUG 0
/* Unavailable exceptions to test in HTM */
#define FP_UNA_EXCEPTION 0
#define VEC_UNA_EXCEPTION 1
#define VSX_UNA_EXCEPTION 2
#define NUM_EXCEPTIONS 3
#define err_at_line(status, errnum, format, ...) \
error_at_line(status, errnum, __FILE__, __LINE__, format ##__VA_ARGS__)
#define pr_warn(code, format, ...) err_at_line(0, code, format, ##__VA_ARGS__)
#define pr_err(code, format, ...) err_at_line(1, code, format, ##__VA_ARGS__)
struct Flags {
int touch_fp;
int touch_vec;
int result;
int exception;
} flags;
bool expecting_failure(void)
{
if (flags.touch_fp && flags.exception == FP_UNA_EXCEPTION)
return false;
if (flags.touch_vec && flags.exception == VEC_UNA_EXCEPTION)
return false;
/*
* If both FP and VEC are touched it does not mean that touching VSX
* won't raise an exception. However since FP and VEC state are already
* correctly loaded, the transaction is not aborted (i.e.
* treclaimed/trecheckpointed) and MSR.VSX is just set as 1, so a TM
* failure is not expected also in this case.
*/
if ((flags.touch_fp && flags.touch_vec) &&
flags.exception == VSX_UNA_EXCEPTION)
return false;
return true;
}
/* Check if failure occurred whilst in transaction. */
bool is_failure(uint64_t condition_reg)
{
/*
* When failure handling occurs, CR0 is set to 0b1010 (0xa). Otherwise
* transaction completes without failure and hence reaches out 'tend.'
* that sets CR0 to 0b0100 (0x4).
*/
return ((condition_reg >> 28) & 0xa) == 0xa;
}
void *tm_una_ping(void *input)
{
/*
* Expected values for vs0 and vs32 after a TM failure. They must never
* change, otherwise they got corrupted.
*/
uint64_t high_vs0 = 0x5555555555555555;
uint64_t low_vs0 = 0xffffffffffffffff;
uint64_t high_vs32 = 0x5555555555555555;
uint64_t low_vs32 = 0xffffffffffffffff;
/* Counter for busy wait */
uint64_t counter = 0x1ff000000;
/*
* Variable to keep a copy of CR register content taken just after we
* leave the transactional state.
*/
uint64_t cr_ = 0;
/*
* Wait a bit so thread can get its name "ping". This is not important
* to reproduce the issue but it's nice to have for systemtap debugging.
*/
if (DEBUG)
sleep(1);
printf("If MSR.FP=%d MSR.VEC=%d: ", flags.touch_fp, flags.touch_vec);
if (flags.exception != FP_UNA_EXCEPTION &&
flags.exception != VEC_UNA_EXCEPTION &&
flags.exception != VSX_UNA_EXCEPTION) {
printf("No valid exception specified to test.\n");
return NULL;
}
asm (
/* Prepare to merge low and high. */
" mtvsrd 33, %[high_vs0] ;"
" mtvsrd 34, %[low_vs0] ;"
/*
* Adjust VS0 expected value after an TM failure,
* i.e. vs0 = 0x5555555555555555555FFFFFFFFFFFFFFFF
*/
" xxmrghd 0, 33, 34 ;"
/*
* Adjust VS32 expected value after an TM failure,
* i.e. vs32 = 0x5555555555555555555FFFFFFFFFFFFFFFF
*/
" xxmrghd 32, 33, 34 ;"
/*
* Wait an amount of context switches so load_fp and load_vec
* overflow and MSR.FP, MSR.VEC, and MSR.VSX become zero (off).
*/
" mtctr %[counter] ;"
/* Decrement CTR branch if CTR non zero. */
"1: bdnz 1b ;"
/*
* Check if we want to touch FP prior to the test in order
* to set MSR.FP = 1 before provoking an unavailable
* exception in TM.
*/
" cmpldi %[touch_fp], 0 ;"
" beq no_fp ;"
" fadd 10, 10, 10 ;"
"no_fp: ;"
/*
* Check if we want to touch VEC prior to the test in order
* to set MSR.VEC = 1 before provoking an unavailable
* exception in TM.
*/
" cmpldi %[touch_vec], 0 ;"
" beq no_vec ;"
" vaddcuw 10, 10, 10 ;"
"no_vec: ;"
/*
* Perhaps it would be a better idea to do the
* compares outside transactional context and simply
* duplicate code.
*/
" tbegin. ;"
" beq trans_fail ;"
/* Do we do FP Unavailable? */
" cmpldi %[exception], %[ex_fp] ;"
" bne 1f ;"
" fadd 10, 10, 10 ;"
" b done ;"
/* Do we do VEC Unavailable? */
"1: cmpldi %[exception], %[ex_vec] ;"
" bne 2f ;"
" vaddcuw 10, 10, 10 ;"
" b done ;"
/*
* Not FP or VEC, therefore VSX. Ensure this
* instruction always generates a VSX Unavailable.
* ISA 3.0 is tricky here.
* (xxmrghd will on ISA 2.07 and ISA 3.0)
*/
"2: xxmrghd 10, 10, 10 ;"
"done: tend. ;"
"trans_fail: ;"
/* Give values back to C. */
" mfvsrd %[high_vs0], 0 ;"
" xxsldwi 3, 0, 0, 2 ;"
" mfvsrd %[low_vs0], 3 ;"
" mfvsrd %[high_vs32], 32 ;"
" xxsldwi 3, 32, 32, 2 ;"
" mfvsrd %[low_vs32], 3 ;"
/* Give CR back to C so that it can check what happened. */
" mfcr %[cr_] ;"
: [high_vs0] "+r" (high_vs0),
[low_vs0] "+r" (low_vs0),
[high_vs32] "=r" (high_vs32),
[low_vs32] "=r" (low_vs32),
[cr_] "+r" (cr_)
: [touch_fp] "r" (flags.touch_fp),
[touch_vec] "r" (flags.touch_vec),
[exception] "r" (flags.exception),
[ex_fp] "i" (FP_UNA_EXCEPTION),
[ex_vec] "i" (VEC_UNA_EXCEPTION),
[ex_vsx] "i" (VSX_UNA_EXCEPTION),
[counter] "r" (counter)
: "cr0", "ctr", "v10", "vs0", "vs10", "vs3", "vs32", "vs33",
"vs34", "fr10"
);
/*
* Check if we were expecting a failure and it did not occur by checking
* CR0 state just after we leave the transaction. Either way we check if
* vs0 or vs32 got corrupted.
*/
if (expecting_failure() && !is_failure(cr_)) {
printf("\n\tExpecting the transaction to fail, %s",
"but it didn't\n\t");
flags.result++;
}
/* Check if we were not expecting a failure and a it occurred. */
if (!expecting_failure() && is_failure(cr_) &&
!failure_is_reschedule()) {
printf("\n\tUnexpected transaction failure 0x%02lx\n\t",
failure_code());
return (void *) -1;
}
/*
* Check if TM failed due to the cause we were expecting. 0xda is a
* TM_CAUSE_FAC_UNAV cause, otherwise it's an unexpected cause, unless
* it was caused by a reschedule.
*/
if (is_failure(cr_) && !failure_is_unavailable() &&
!failure_is_reschedule()) {
printf("\n\tUnexpected failure cause 0x%02lx\n\t",
failure_code());
return (void *) -1;
}
/* 0x4 is a success and 0xa is a fail. See comment in is_failure(). */
if (DEBUG)
printf("CR0: 0x%1lx ", cr_ >> 28);
/* Check FP (vs0) for the expected value. */
if (high_vs0 != 0x5555555555555555 || low_vs0 != 0xFFFFFFFFFFFFFFFF) {
printf("FP corrupted!");
printf(" high = %#16" PRIx64 " low = %#16" PRIx64 " ",
high_vs0, low_vs0);
flags.result++;
} else
printf("FP ok ");
/* Check VEC (vs32) for the expected value. */
if (high_vs32 != 0x5555555555555555 || low_vs32 != 0xFFFFFFFFFFFFFFFF) {
printf("VEC corrupted!");
printf(" high = %#16" PRIx64 " low = %#16" PRIx64,
high_vs32, low_vs32);
flags.result++;
} else
printf("VEC ok");
putchar('\n');
return NULL;
}
/* Thread to force context switch */
void *tm_una_pong(void *not_used)
{
/* Wait thread get its name "pong". */
if (DEBUG)
sleep(1);
/* Classed as an interactive-like thread. */
while (1)
sched_yield();
}
/* Function that creates a thread and launches the "ping" task. */
void test_fp_vec(int fp, int vec, pthread_attr_t *attr)
{
int retries = 2;
void *ret_value;
pthread_t t0;
flags.touch_fp = fp;
flags.touch_vec = vec;
/*
* Without luck it's possible that the transaction is aborted not due to
* the unavailable exception caught in the middle as we expect but also,
* for instance, due to a context switch or due to a KVM reschedule (if
* it's running on a VM). Thus we try a few times before giving up,
* checking if the failure cause is the one we expect.
*/
do {
int rc;
/* Bind to CPU 0, as specified in 'attr'. */
rc = pthread_create(&t0, attr, tm_una_ping, (void *) &flags);
if (rc)
pr_err(rc, "pthread_create()");
rc = pthread_setname_np(t0, "tm_una_ping");
if (rc)
pr_warn(rc, "pthread_setname_np");
rc = pthread_join(t0, &ret_value);
if (rc)
pr_err(rc, "pthread_join");
retries--;
} while (ret_value != NULL && retries);
if (!retries) {
flags.result = 1;
if (DEBUG)
printf("All transactions failed unexpectedly\n");
}
}
int tm_unavailable_test(void)
{
int cpu, rc, exception; /* FP = 0, VEC = 1, VSX = 2 */
pthread_t t1;
pthread_attr_t attr;
cpu_set_t cpuset;
SKIP_IF(!have_htm());
SKIP_IF(htm_is_synthetic());
cpu = pick_online_cpu();
FAIL_IF(cpu < 0);
// Set only one CPU in the mask. Both threads will be bound to that CPU.
CPU_ZERO(&cpuset);
CPU_SET(cpu, &cpuset);
/* Init pthread attribute. */
rc = pthread_attr_init(&attr);
if (rc)
pr_err(rc, "pthread_attr_init()");
/* Set CPU 0 mask into the pthread attribute. */
rc = pthread_attr_setaffinity_np(&attr, sizeof(cpu_set_t), &cpuset);
if (rc)
pr_err(rc, "pthread_attr_setaffinity_np()");
rc = pthread_create(&t1, &attr /* Bind to CPU 0 */, tm_una_pong, NULL);
if (rc)
pr_err(rc, "pthread_create()");
/* Name it for systemtap convenience */
rc = pthread_setname_np(t1, "tm_una_pong");
if (rc)
pr_warn(rc, "pthread_create()");
flags.result = 0;
for (exception = 0; exception < NUM_EXCEPTIONS; exception++) {
printf("Checking if FP/VEC registers are sane after");
if (exception == FP_UNA_EXCEPTION)
printf(" a FP unavailable exception...\n");
else if (exception == VEC_UNA_EXCEPTION)
printf(" a VEC unavailable exception...\n");
else
printf(" a VSX unavailable exception...\n");
flags.exception = exception;
test_fp_vec(0, 0, &attr);
test_fp_vec(1, 0, &attr);
test_fp_vec(0, 1, &attr);
test_fp_vec(1, 1, &attr);
}
if (flags.result > 0) {
printf("result: failed!\n");
exit(1);
} else {
printf("result: success\n");
exit(0);
}
}
int main(int argc, char **argv)
{
test_harness_set_timeout(220);
return test_harness(tm_unavailable_test, "tm_unavailable_test");
}
| linux-master | tools/testing/selftests/powerpc/tm/tm-unavailable.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2015, Michael Neuling, IBM Corp.
*
* Original: Michael Neuling 4/12/2013
* Edited: Rashmica Gupta 4/12/2015
*
* See if the altivec state is leaked out of an aborted transaction due to
* kernel vmx copy loops.
*
* When the transaction aborts, VSR values should rollback to the values
* they held before the transaction commenced. Using VSRs while transaction
* is suspended should not affect the checkpointed values.
*
* (1) write A to a VSR
* (2) start transaction
* (3) suspend transaction
* (4) change the VSR to B
* (5) trigger kernel vmx copy loop
* (6) abort transaction
* (7) check that the VSR value is A
*/
#include <inttypes.h>
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <sys/mman.h>
#include <string.h>
#include <assert.h>
#include "tm.h"
#include "utils.h"
int test_vmxcopy()
{
long double vecin = 1.3;
long double vecout;
unsigned long pgsize = getpagesize();
int i;
int fd;
int size = pgsize*16;
char tmpfile[] = "/tmp/page_faultXXXXXX";
char buf[pgsize];
char *a;
uint64_t aborted = 0;
SKIP_IF(!have_htm());
SKIP_IF(htm_is_synthetic());
SKIP_IF(!is_ppc64le());
fd = mkstemp(tmpfile);
assert(fd >= 0);
memset(buf, 0, pgsize);
for (i = 0; i < size; i += pgsize)
assert(write(fd, buf, pgsize) == pgsize);
unlink(tmpfile);
a = mmap(NULL, size, PROT_READ|PROT_WRITE, MAP_PRIVATE, fd, 0);
assert(a != MAP_FAILED);
asm __volatile__(
"lxvd2x 40,0,%[vecinptr];" /* set 40 to initial value*/
"tbegin.;"
"beq 3f;"
"tsuspend.;"
"xxlxor 40,40,40;" /* set 40 to 0 */
"std 5, 0(%[map]);" /* cause kernel vmx copy page */
"tabort. 0;"
"tresume.;"
"tend.;"
"li %[res], 0;"
"b 5f;"
/* Abort handler */
"3:;"
"li %[res], 1;"
"5:;"
"stxvd2x 40,0,%[vecoutptr];"
: [res]"=&r"(aborted)
: [vecinptr]"r"(&vecin),
[vecoutptr]"r"(&vecout),
[map]"r"(a)
: "memory", "r0", "r3", "r4", "r5", "r6", "r7");
if (aborted && (vecin != vecout)){
printf("FAILED: vector state leaked on abort %f != %f\n",
(double)vecin, (double)vecout);
return 1;
}
munmap(a, size);
close(fd);
return 0;
}
int main(void)
{
return test_harness(test_vmxcopy, "tm_vmxcopy");
}
| linux-master | tools/testing/selftests/powerpc/tm/tm-vmxcopy.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright 2016, Cyril Bur, IBM Corp.
*
* Test the kernel's signal frame code.
*
* The kernel sets up two sets of ucontexts if the signal was to be
* delivered while the thread was in a transaction (referred too as
* first and second contexts).
* Expected behaviour is that the checkpointed state is in the user
* context passed to the signal handler (first context). The speculated
* state can be accessed with the uc_link pointer (second context).
*
* The rationale for this is that if TM unaware code (which linked
* against TM libs) installs a signal handler it will not know of the
* speculative nature of the 'live' registers and may infer the wrong
* thing.
*/
#include <stdlib.h>
#include <stdio.h>
#include <signal.h>
#include <unistd.h>
#include <altivec.h>
#include "utils.h"
#include "tm.h"
#define MAX_ATTEMPT 500000
#define NV_FPU_REGS 18 /* Number of non-volatile FP registers */
#define FPR14 14 /* First non-volatile FP register to check in f14-31 subset */
long tm_signal_self_context_load(pid_t pid, long *gprs, double *fps, vector int *vms, vector int *vss);
/* Test only non-volatile registers, i.e. 18 fpr registers from f14 to f31 */
static double fps[] = {
/* First context will be set with these values, i.e. non-speculative */
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18,
/* Second context will be set with these values, i.e. speculative */
-1,-2,-3,-4,-5,-6,-7,-8,-9,-10,-11,-12,-13,-14,-15,-16,-17,-18
};
static sig_atomic_t fail, broken;
static void signal_usr1(int signum, siginfo_t *info, void *uc)
{
int i;
ucontext_t *ucp = uc;
ucontext_t *tm_ucp = ucp->uc_link;
for (i = 0; i < NV_FPU_REGS; i++) {
/* Check first context. Print all mismatches. */
fail = (ucp->uc_mcontext.fp_regs[FPR14 + i] != fps[i]);
if (fail) {
broken = 1;
printf("FPR%d (1st context) == %g instead of %g (expected)\n",
FPR14 + i, ucp->uc_mcontext.fp_regs[FPR14 + i], fps[i]);
}
}
for (i = 0; i < NV_FPU_REGS; i++) {
/* Check second context. Print all mismatches. */
fail = (tm_ucp->uc_mcontext.fp_regs[FPR14 + i] != fps[NV_FPU_REGS + i]);
if (fail) {
broken = 1;
printf("FPR%d (2nd context) == %g instead of %g (expected)\n",
FPR14 + i, tm_ucp->uc_mcontext.fp_regs[FPR14 + i], fps[NV_FPU_REGS + i]);
}
}
}
static int tm_signal_context_chk_fpu()
{
struct sigaction act;
int i;
long rc;
pid_t pid = getpid();
SKIP_IF(!have_htm());
SKIP_IF(htm_is_synthetic());
act.sa_sigaction = signal_usr1;
sigemptyset(&act.sa_mask);
act.sa_flags = SA_SIGINFO;
if (sigaction(SIGUSR1, &act, NULL) < 0) {
perror("sigaction sigusr1");
exit(1);
}
i = 0;
while (i < MAX_ATTEMPT && !broken) {
/*
* tm_signal_self_context_load will set both first and second
* contexts accordingly to the values passed through non-NULL
* array pointers to it, in that case 'fps', and invoke the
* signal handler installed for SIGUSR1.
*/
rc = tm_signal_self_context_load(pid, NULL, fps, NULL, NULL);
FAIL_IF(rc != pid);
i++;
}
return (broken);
}
int main(void)
{
return test_harness(tm_signal_context_chk_fpu, "tm_signal_context_chk_fpu");
}
| linux-master | tools/testing/selftests/powerpc/tm/tm-signal-context-chk-fpu.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2015, Michael Neuling, IBM Corp.
*
* Edited: Rashmica Gupta, Nov 2015
*
* This test does a fork syscall inside a transaction. Basic sniff test
* to see if we can enter the kernel during a transaction.
*/
#include <errno.h>
#include <inttypes.h>
#include <pthread.h>
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include "utils.h"
#include "tm.h"
int test_fork(void)
{
SKIP_IF(!have_htm());
SKIP_IF(htm_is_synthetic());
asm __volatile__(
"tbegin.;"
"blt 1f; "
"li 0, 2;" /* fork syscall */
"sc ;"
"tend.;"
"1: ;"
: : : "memory", "r0");
/* If we reach here, we've passed. Otherwise we've probably crashed
* the kernel */
return 0;
}
int main(int argc, char *argv[])
{
return test_harness(test_fork, "tm_fork");
}
| linux-master | tools/testing/selftests/powerpc/tm/tm-fork.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2019, Gustavo Romero, Michael Neuling, IBM Corp.
*
* This test will spawn two processes. Both will be attached to the same
* CPU (CPU 0). The child will be in a loop writing to FP register f31 and
* VMX/VEC/Altivec register vr31 a known value, called poison, calling
* sched_yield syscall after to allow the parent to switch on the CPU.
* Parent will set f31 and vr31 to 1 and in a loop will check if f31 and
* vr31 remain 1 as expected until a given timeout (2m). If the issue is
* present child's poison will leak into parent's f31 or vr31 registers,
* otherwise, poison will never leak into parent's f31 and vr31 registers.
*/
#define _GNU_SOURCE
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <inttypes.h>
#include <sched.h>
#include <sys/types.h>
#include <signal.h>
#include "tm.h"
int tm_poison_test(void)
{
int cpu, pid;
cpu_set_t cpuset;
uint64_t poison = 0xdeadbeefc0dec0fe;
uint64_t unknown = 0;
bool fail_fp = false;
bool fail_vr = false;
SKIP_IF(!have_htm());
SKIP_IF(htm_is_synthetic());
cpu = pick_online_cpu();
FAIL_IF(cpu < 0);
// Attach both Child and Parent to the same CPU
CPU_ZERO(&cpuset);
CPU_SET(cpu, &cpuset);
FAIL_IF(sched_setaffinity(0, sizeof(cpuset), &cpuset) != 0);
pid = fork();
if (!pid) {
/**
* child
*/
while (1) {
sched_yield();
asm (
"mtvsrd 31, %[poison];" // f31 = poison
"mtvsrd 63, %[poison];" // vr31 = poison
: : [poison] "r" (poison) : );
}
}
/**
* parent
*/
asm (
/*
* Set r3, r4, and f31 to known value 1 before entering
* in transaction. They won't be written after that.
*/
" li 3, 0x1 ;"
" li 4, 0x1 ;"
" mtvsrd 31, 4 ;"
/*
* The Time Base (TB) is a 64-bit counter register that is
* independent of the CPU clock and which is incremented
* at a frequency of 512000000 Hz, so every 1.953125ns.
* So it's necessary 120s/0.000000001953125s = 61440000000
* increments to get a 2 minutes timeout. Below we set that
* value in r5 and then use r6 to track initial TB value,
* updating TB values in r7 at every iteration and comparing it
* to r6. When r7 (current) - r6 (initial) > 61440000000 we bail
* out since for sure we spent already 2 minutes in the loop.
* SPR 268 is the TB register.
*/
" lis 5, 14 ;"
" ori 5, 5, 19996 ;"
" sldi 5, 5, 16 ;" // r5 = 61440000000
" mfspr 6, 268 ;" // r6 (TB initial)
"1: mfspr 7, 268 ;" // r7 (TB current)
" subf 7, 6, 7 ;" // r7 - r6 > 61440000000 ?
" cmpd 7, 5 ;"
" bgt 3f ;" // yes, exit
/*
* Main loop to check f31
*/
" tbegin. ;" // no, try again
" beq 1b ;" // restart if no timeout
" mfvsrd 3, 31 ;" // read f31
" cmpd 3, 4 ;" // f31 == 1 ?
" bne 2f ;" // broken :-(
" tabort. 3 ;" // try another transaction
"2: tend. ;" // commit transaction
"3: mr %[unknown], 3 ;" // record r3
: [unknown] "=r" (unknown)
:
: "cr0", "r3", "r4", "r5", "r6", "r7", "vs31"
);
/*
* On leak 'unknown' will contain 'poison' value from child,
* otherwise (no leak) 'unknown' will contain the same value
* as r3 before entering in transactional mode, i.e. 0x1.
*/
fail_fp = unknown != 0x1;
if (fail_fp)
printf("Unknown value %#"PRIx64" leaked into f31!\n", unknown);
else
printf("Good, no poison or leaked value into FP registers\n");
asm (
/*
* Set r3, r4, and vr31 to known value 1 before entering
* in transaction. They won't be written after that.
*/
" li 3, 0x1 ;"
" li 4, 0x1 ;"
" mtvsrd 63, 4 ;"
" lis 5, 14 ;"
" ori 5, 5, 19996 ;"
" sldi 5, 5, 16 ;" // r5 = 61440000000
" mfspr 6, 268 ;" // r6 (TB initial)
"1: mfspr 7, 268 ;" // r7 (TB current)
" subf 7, 6, 7 ;" // r7 - r6 > 61440000000 ?
" cmpd 7, 5 ;"
" bgt 3f ;" // yes, exit
/*
* Main loop to check vr31
*/
" tbegin. ;" // no, try again
" beq 1b ;" // restart if no timeout
" mfvsrd 3, 63 ;" // read vr31
" cmpd 3, 4 ;" // vr31 == 1 ?
" bne 2f ;" // broken :-(
" tabort. 3 ;" // try another transaction
"2: tend. ;" // commit transaction
"3: mr %[unknown], 3 ;" // record r3
: [unknown] "=r" (unknown)
:
: "cr0", "r3", "r4", "r5", "r6", "r7", "vs63"
);
/*
* On leak 'unknown' will contain 'poison' value from child,
* otherwise (no leak) 'unknown' will contain the same value
* as r3 before entering in transactional mode, i.e. 0x1.
*/
fail_vr = unknown != 0x1;
if (fail_vr)
printf("Unknown value %#"PRIx64" leaked into vr31!\n", unknown);
else
printf("Good, no poison or leaked value into VEC registers\n");
kill(pid, SIGKILL);
return (fail_fp | fail_vr);
}
int main(int argc, char *argv[])
{
/* Test completes in about 4m */
test_harness_set_timeout(250);
return test_harness(tm_poison_test, "tm_poison_test");
}
| linux-master | tools/testing/selftests/powerpc/tm/tm-poison.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright 2016, Cyril Bur, IBM Corp.
*
* Syscalls can be performed provided the transactions are suspended.
* The exec() class of syscall is unique as a new process is loaded.
*
* It makes little sense for after an exec() call for the previously
* suspended transaction to still exist.
*/
#define _GNU_SOURCE
#include <errno.h>
#include <inttypes.h>
#include <libgen.h>
#include <pthread.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include "utils.h"
#include "tm.h"
static char *path;
static int test_exec(void)
{
SKIP_IF(!have_htm());
SKIP_IF(htm_is_synthetic());
asm __volatile__(
"tbegin.;"
"blt 1f; "
"tsuspend.;"
"1: ;"
: : : "memory");
execl(path, "tm-exec", "--child", NULL);
/* Shouldn't get here */
perror("execl() failed");
return 1;
}
static int after_exec(void)
{
asm __volatile__(
"tbegin.;"
"blt 1f;"
"tsuspend.;"
"1: ;"
: : : "memory");
FAIL_IF(failure_is_nesting());
return 0;
}
int main(int argc, char *argv[])
{
path = argv[0];
if (argc > 1 && strcmp(argv[1], "--child") == 0)
return after_exec();
return test_harness(test_exec, "tm_exec");
}
| linux-master | tools/testing/selftests/powerpc/tm/tm-exec.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright 2018, Breno Leitao, Gustavo Romero, IBM Corp.
*
* A test case that creates a signal and starts a suspended transaction
* inside the signal handler.
*
* It returns from the signal handler with the CPU at suspended state, but
* without setting usercontext MSR Transaction State (TS) fields.
*/
#define _GNU_SOURCE
#include <stdio.h>
#include <stdlib.h>
#include <signal.h>
#include "utils.h"
#include "tm.h"
void trap_signal_handler(int signo, siginfo_t *si, void *uc)
{
ucontext_t *ucp = (ucontext_t *) uc;
asm("tbegin.; tsuspend.;");
/* Skip 'trap' instruction if it succeed */
ucp->uc_mcontext.regs->nip += 4;
}
int tm_signal_sigreturn_nt(void)
{
struct sigaction trap_sa;
SKIP_IF(!have_htm());
SKIP_IF(htm_is_synthetic());
trap_sa.sa_flags = SA_SIGINFO;
trap_sa.sa_sigaction = trap_signal_handler;
sigaction(SIGTRAP, &trap_sa, NULL);
raise(SIGTRAP);
return EXIT_SUCCESS;
}
int main(int argc, char **argv)
{
test_harness(tm_signal_sigreturn_nt, "tm_signal_sigreturn_nt");
}
| linux-master | tools/testing/selftests/powerpc/tm/tm-signal-sigreturn-nt.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2015, Sam Bobroff, IBM Corp.
*
* Test the kernel's system call code to ensure that a system call
* made from within an active HTM transaction is aborted with the
* correct failure code.
* Conversely, ensure that a system call made from within a
* suspended transaction can succeed.
*/
#include <stdio.h>
#include <unistd.h>
#include <sys/syscall.h>
#include <asm/tm.h>
#include <sys/time.h>
#include <stdlib.h>
#include "utils.h"
#include "tm.h"
#ifndef PPC_FEATURE2_SCV
#define PPC_FEATURE2_SCV 0x00100000 /* scv syscall */
#endif
extern int getppid_tm_active(void);
extern int getppid_tm_suspended(void);
extern int getppid_scv_tm_active(void);
extern int getppid_scv_tm_suspended(void);
unsigned retries = 0;
#define TEST_DURATION 10 /* seconds */
pid_t getppid_tm(bool scv, bool suspend)
{
int i;
pid_t pid;
for (i = 0; i < TM_RETRIES; i++) {
if (suspend) {
if (scv)
pid = getppid_scv_tm_suspended();
else
pid = getppid_tm_suspended();
} else {
if (scv)
pid = getppid_scv_tm_active();
else
pid = getppid_tm_active();
}
if (pid >= 0)
return pid;
if (failure_is_persistent()) {
if (failure_is_syscall())
return -1;
printf("Unexpected persistent transaction failure.\n");
printf("TEXASR 0x%016lx, TFIAR 0x%016lx.\n",
__builtin_get_texasr(), __builtin_get_tfiar());
exit(-1);
}
retries++;
}
printf("Exceeded limit of %d temporary transaction failures.\n", TM_RETRIES);
printf("TEXASR 0x%016lx, TFIAR 0x%016lx.\n",
__builtin_get_texasr(), __builtin_get_tfiar());
exit(-1);
}
int tm_syscall(void)
{
unsigned count = 0;
struct timeval end, now;
SKIP_IF(!have_htm_nosc());
SKIP_IF(htm_is_synthetic());
setbuf(stdout, NULL);
printf("Testing transactional syscalls for %d seconds...\n", TEST_DURATION);
gettimeofday(&end, NULL);
now.tv_sec = TEST_DURATION;
now.tv_usec = 0;
timeradd(&end, &now, &end);
for (count = 0; timercmp(&now, &end, <); count++) {
/*
* Test a syscall within a suspended transaction and verify
* that it succeeds.
*/
FAIL_IF(getppid_tm(false, true) == -1); /* Should succeed. */
/*
* Test a syscall within an active transaction and verify that
* it fails with the correct failure code.
*/
FAIL_IF(getppid_tm(false, false) != -1); /* Should fail... */
FAIL_IF(!failure_is_persistent()); /* ...persistently... */
FAIL_IF(!failure_is_syscall()); /* ...with code syscall. */
/* Now do it all again with scv if it is available. */
if (have_hwcap2(PPC_FEATURE2_SCV)) {
FAIL_IF(getppid_tm(true, true) == -1); /* Should succeed. */
FAIL_IF(getppid_tm(true, false) != -1); /* Should fail... */
FAIL_IF(!failure_is_persistent()); /* ...persistently... */
FAIL_IF(!failure_is_syscall()); /* ...with code syscall. */
}
gettimeofday(&now, 0);
}
printf("%d active and suspended transactions behaved correctly.\n", count);
printf("(There were %d transaction retries.)\n", retries);
return 0;
}
int main(void)
{
return test_harness(tm_syscall, "tm_syscall");
}
| linux-master | tools/testing/selftests/powerpc/tm/tm-syscall.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright 2020, Gustavo Luiz Duarte, IBM Corp.
*
* This test starts a transaction and triggers a signal, forcing a pagefault to
* happen when the kernel signal handling code touches the user signal stack.
*
* In order to avoid pre-faulting the signal stack memory and to force the
* pagefault to happen precisely in the kernel signal handling code, the
* pagefault handling is done in userspace using the userfaultfd facility.
*
* Further pagefaults are triggered by crafting the signal handler's ucontext
* to point to additional memory regions managed by the userfaultfd, so using
* the same mechanism used to avoid pre-faulting the signal stack memory.
*
* On failure (bug is present) kernel crashes or never returns control back to
* userspace. If bug is not present, tests completes almost immediately.
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <linux/userfaultfd.h>
#include <poll.h>
#include <unistd.h>
#include <sys/ioctl.h>
#include <sys/syscall.h>
#include <fcntl.h>
#include <sys/mman.h>
#include <pthread.h>
#include <signal.h>
#include <errno.h>
#include "tm.h"
#define UF_MEM_SIZE 655360 /* 10 x 64k pages */
/* Memory handled by userfaultfd */
static char *uf_mem;
static size_t uf_mem_offset = 0;
/*
* Data that will be copied into the faulting pages (instead of zero-filled
* pages). This is used to make the test more reliable and avoid segfaulting
* when we return from the signal handler. Since we are making the signal
* handler's ucontext point to newly allocated memory, when that memory is
* paged-in it will contain the expected content.
*/
static char backing_mem[UF_MEM_SIZE];
static size_t pagesize;
/*
* Return a chunk of at least 'size' bytes of memory that will be handled by
* userfaultfd. If 'backing_data' is not NULL, its content will be save to
* 'backing_mem' and then copied into the faulting pages when the page fault
* is handled.
*/
void *get_uf_mem(size_t size, void *backing_data)
{
void *ret;
if (uf_mem_offset + size > UF_MEM_SIZE) {
fprintf(stderr, "Requesting more uf_mem than expected!\n");
exit(EXIT_FAILURE);
}
ret = &uf_mem[uf_mem_offset];
/* Save the data that will be copied into the faulting page */
if (backing_data != NULL)
memcpy(&backing_mem[uf_mem_offset], backing_data, size);
/* Reserve the requested amount of uf_mem */
uf_mem_offset += size;
/* Keep uf_mem_offset aligned to the page size (round up) */
uf_mem_offset = (uf_mem_offset + pagesize - 1) & ~(pagesize - 1);
return ret;
}
void *fault_handler_thread(void *arg)
{
struct uffd_msg msg; /* Data read from userfaultfd */
long uffd; /* userfaultfd file descriptor */
struct uffdio_copy uffdio_copy;
struct pollfd pollfd;
ssize_t nread, offset;
uffd = (long) arg;
for (;;) {
pollfd.fd = uffd;
pollfd.events = POLLIN;
if (poll(&pollfd, 1, -1) == -1) {
perror("poll() failed");
exit(EXIT_FAILURE);
}
nread = read(uffd, &msg, sizeof(msg));
if (nread == 0) {
fprintf(stderr, "read(): EOF on userfaultfd\n");
exit(EXIT_FAILURE);
}
if (nread == -1) {
perror("read() failed");
exit(EXIT_FAILURE);
}
/* We expect only one kind of event */
if (msg.event != UFFD_EVENT_PAGEFAULT) {
fprintf(stderr, "Unexpected event on userfaultfd\n");
exit(EXIT_FAILURE);
}
/*
* We need to handle page faults in units of pages(!).
* So, round faulting address down to page boundary.
*/
uffdio_copy.dst = msg.arg.pagefault.address & ~(pagesize-1);
offset = (char *) uffdio_copy.dst - uf_mem;
uffdio_copy.src = (unsigned long) &backing_mem[offset];
uffdio_copy.len = pagesize;
uffdio_copy.mode = 0;
uffdio_copy.copy = 0;
if (ioctl(uffd, UFFDIO_COPY, &uffdio_copy) == -1) {
perror("ioctl-UFFDIO_COPY failed");
exit(EXIT_FAILURE);
}
}
}
void setup_uf_mem(void)
{
long uffd; /* userfaultfd file descriptor */
pthread_t thr;
struct uffdio_api uffdio_api;
struct uffdio_register uffdio_register;
int ret;
pagesize = sysconf(_SC_PAGE_SIZE);
/* Create and enable userfaultfd object */
uffd = syscall(__NR_userfaultfd, O_CLOEXEC | O_NONBLOCK);
if (uffd == -1) {
perror("userfaultfd() failed");
exit(EXIT_FAILURE);
}
uffdio_api.api = UFFD_API;
uffdio_api.features = 0;
if (ioctl(uffd, UFFDIO_API, &uffdio_api) == -1) {
perror("ioctl-UFFDIO_API failed");
exit(EXIT_FAILURE);
}
/*
* Create a private anonymous mapping. The memory will be demand-zero
* paged, that is, not yet allocated. When we actually touch the memory
* the related page will be allocated via the userfaultfd mechanism.
*/
uf_mem = mmap(NULL, UF_MEM_SIZE, PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
if (uf_mem == MAP_FAILED) {
perror("mmap() failed");
exit(EXIT_FAILURE);
}
/*
* Register the memory range of the mapping we've just mapped to be
* handled by the userfaultfd object. In 'mode' we request to track
* missing pages (i.e. pages that have not yet been faulted-in).
*/
uffdio_register.range.start = (unsigned long) uf_mem;
uffdio_register.range.len = UF_MEM_SIZE;
uffdio_register.mode = UFFDIO_REGISTER_MODE_MISSING;
if (ioctl(uffd, UFFDIO_REGISTER, &uffdio_register) == -1) {
perror("ioctl-UFFDIO_REGISTER");
exit(EXIT_FAILURE);
}
/* Create a thread that will process the userfaultfd events */
ret = pthread_create(&thr, NULL, fault_handler_thread, (void *) uffd);
if (ret != 0) {
fprintf(stderr, "pthread_create(): Error. Returned %d\n", ret);
exit(EXIT_FAILURE);
}
}
/*
* Assumption: the signal was delivered while userspace was in transactional or
* suspended state, i.e. uc->uc_link != NULL.
*/
void signal_handler(int signo, siginfo_t *si, void *uc)
{
ucontext_t *ucp = uc;
/* Skip 'trap' after returning, otherwise we get a SIGTRAP again */
ucp->uc_link->uc_mcontext.regs->nip += 4;
ucp->uc_mcontext.v_regs =
get_uf_mem(sizeof(elf_vrreg_t), ucp->uc_mcontext.v_regs);
ucp->uc_link->uc_mcontext.v_regs =
get_uf_mem(sizeof(elf_vrreg_t), ucp->uc_link->uc_mcontext.v_regs);
ucp->uc_link = get_uf_mem(sizeof(ucontext_t), ucp->uc_link);
}
bool have_userfaultfd(void)
{
long rc;
errno = 0;
rc = syscall(__NR_userfaultfd, -1);
return rc == 0 || errno != ENOSYS;
}
int tm_signal_pagefault(void)
{
struct sigaction sa;
stack_t ss;
SKIP_IF(!have_htm());
SKIP_IF(htm_is_synthetic());
SKIP_IF(!have_userfaultfd());
setup_uf_mem();
/*
* Set an alternative stack that will generate a page fault when the
* signal is raised. The page fault will be treated via userfaultfd,
* i.e. via fault_handler_thread.
*/
ss.ss_sp = get_uf_mem(SIGSTKSZ, NULL);
ss.ss_size = SIGSTKSZ;
ss.ss_flags = 0;
if (sigaltstack(&ss, NULL) == -1) {
perror("sigaltstack() failed");
exit(EXIT_FAILURE);
}
sa.sa_flags = SA_SIGINFO | SA_ONSTACK;
sa.sa_sigaction = signal_handler;
if (sigaction(SIGTRAP, &sa, NULL) == -1) {
perror("sigaction() failed");
exit(EXIT_FAILURE);
}
/* Trigger a SIGTRAP in transactional state */
asm __volatile__(
"tbegin.;"
"beq 1f;"
"trap;"
"1: ;"
: : : "memory");
/* Trigger a SIGTRAP in suspended state */
asm __volatile__(
"tbegin.;"
"beq 1f;"
"tsuspend.;"
"trap;"
"tresume.;"
"1: ;"
: : : "memory");
return EXIT_SUCCESS;
}
int main(int argc, char **argv)
{
/*
* Depending on kernel config, the TM Bad Thing might not result in a
* crash, instead the kernel never returns control back to userspace, so
* set a tight timeout. If the test passes it completes almost
* immediately.
*/
test_harness_set_timeout(2);
return test_harness(tm_signal_pagefault, "tm_signal_pagefault");
}
| linux-master | tools/testing/selftests/powerpc/tm/tm-signal-pagefault.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2017, Michael Neuling, IBM Corp.
* Original: Breno Leitao <[email protected]> &
* Gustavo Bueno Romero <[email protected]>
* Edited: Michael Neuling
*
* Force VMX unavailable during a transaction and see if it corrupts
* the checkpointed VMX register state after the abort.
*/
#include <inttypes.h>
#include <htmintrin.h>
#include <string.h>
#include <stdlib.h>
#include <stdio.h>
#include <pthread.h>
#include <sys/mman.h>
#include <unistd.h>
#include "tm.h"
#include "utils.h"
int passed;
void *worker(void *unused)
{
__int128 vmx0;
uint64_t texasr;
asm goto (
"li 3, 1;" /* Stick non-zero value in VMX0 */
"std 3, 0(%[vmx0_ptr]);"
"lvx 0, 0, %[vmx0_ptr];"
/* Wait here a bit so we get scheduled out 255 times */
"lis 3, 0x3fff;"
"1: ;"
"addi 3, 3, -1;"
"cmpdi 3, 0;"
"bne 1b;"
/* Kernel will hopefully turn VMX off now */
"tbegin. ;"
"beq failure;"
/* Cause VMX unavail. Any VMX instruction */
"vaddcuw 0,0,0;"
"tend. ;"
"b %l[success];"
/* Check VMX0 sanity after abort */
"failure: ;"
"lvx 1, 0, %[vmx0_ptr];"
"vcmpequb. 2, 0, 1;"
"bc 4, 24, %l[value_mismatch];"
"b %l[value_match];"
:
: [vmx0_ptr] "r"(&vmx0)
: "r3"
: success, value_match, value_mismatch
);
/* HTM aborted and VMX0 is corrupted */
value_mismatch:
texasr = __builtin_get_texasr();
printf("\n\n==============\n\n");
printf("Failure with error: %lx\n", _TEXASR_FAILURE_CODE(texasr));
printf("Summary error : %lx\n", _TEXASR_FAILURE_SUMMARY(texasr));
printf("TFIAR exact : %lx\n\n", _TEXASR_TFIAR_EXACT(texasr));
passed = 0;
return NULL;
/* HTM aborted but VMX0 is correct */
value_match:
// printf("!");
return NULL;
success:
// printf(".");
return NULL;
}
int tm_vmx_unavail_test()
{
int threads;
pthread_t *thread;
SKIP_IF(!have_htm());
SKIP_IF(htm_is_synthetic());
passed = 1;
threads = sysconf(_SC_NPROCESSORS_ONLN) * 4;
thread = malloc(sizeof(pthread_t)*threads);
if (!thread)
return EXIT_FAILURE;
for (uint64_t i = 0; i < threads; i++)
pthread_create(&thread[i], NULL, &worker, NULL);
for (uint64_t i = 0; i < threads; i++)
pthread_join(thread[i], NULL);
free(thread);
return passed ? EXIT_SUCCESS : EXIT_FAILURE;
}
int main(int argc, char **argv)
{
return test_harness(tm_vmx_unavail_test, "tm_vmx_unavail_test");
}
| linux-master | tools/testing/selftests/powerpc/tm/tm-vmx-unavail.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2017, Gustavo Romero, IBM Corp.
*
* Check if thread endianness is flipped inadvertently to BE on trap
* caught in TM whilst MSR.FP and MSR.VEC are zero (i.e. just after
* load_fp and load_vec overflowed).
*
* The issue can be checked on LE machines simply by zeroing load_fp
* and load_vec and then causing a trap in TM. Since the endianness
* changes to BE on return from the signal handler, 'nop' is
* thread as an illegal instruction in following sequence:
* tbegin.
* beq 1f
* trap
* tend.
* 1: nop
*
* However, although the issue is also present on BE machines, it's a
* bit trickier to check it on BE machines because MSR.LE bit is set
* to zero which determines a BE endianness that is the native
* endianness on BE machines, so nothing notably critical happens,
* i.e. no illegal instruction is observed immediately after returning
* from the signal handler (as it happens on LE machines). Thus to test
* it on BE machines LE endianness is forced after a first trap and then
* the endianness is verified on subsequent traps to determine if the
* endianness "flipped back" to the native endianness (BE).
*/
#define _GNU_SOURCE
#include <error.h>
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <htmintrin.h>
#include <inttypes.h>
#include <pthread.h>
#include <sched.h>
#include <signal.h>
#include <stdbool.h>
#include "tm.h"
#include "utils.h"
#define pr_error(error_code, format, ...) \
error_at_line(1, error_code, __FILE__, __LINE__, format, ##__VA_ARGS__)
#define MSR_LE 1UL
#define LE 1UL
pthread_t t0_ping;
pthread_t t1_pong;
int exit_from_pong;
int trap_event;
int le;
bool success;
void trap_signal_handler(int signo, siginfo_t *si, void *uc)
{
ucontext_t *ucp = uc;
uint64_t thread_endianness;
/* Get thread endianness: extract bit LE from MSR */
thread_endianness = MSR_LE & ucp->uc_mcontext.gp_regs[PT_MSR];
/*
* Little-Endian Machine
*/
if (le) {
/* First trap event */
if (trap_event == 0) {
/* Do nothing. Since it is returning from this trap
* event that endianness is flipped by the bug, so just
* let the process return from the signal handler and
* check on the second trap event if endianness is
* flipped or not.
*/
}
/* Second trap event */
else if (trap_event == 1) {
/*
* Since trap was caught in TM on first trap event, if
* endianness was still LE (not flipped inadvertently)
* after returning from the signal handler instruction
* (1) is executed (basically a 'nop'), as it's located
* at address of tbegin. +4 (rollback addr). As (1) on
* LE endianness does in effect nothing, instruction (2)
* is then executed again as 'trap', generating a second
* trap event (note that in that case 'trap' is caught
* not in transacional mode). On te other hand, if after
* the return from the signal handler the endianness in-
* advertently flipped, instruction (1) is tread as a
* branch instruction, i.e. b .+8, hence instruction (3)
* and (4) are executed (tbegin.; trap;) and we get sim-
* ilaly on the trap signal handler, but now in TM mode.
* Either way, it's now possible to check the MSR LE bit
* once in the trap handler to verify if endianness was
* flipped or not after the return from the second trap
* event. If endianness is flipped, the bug is present.
* Finally, getting a trap in TM mode or not is just
* worth noting because it affects the math to determine
* the offset added to the NIP on return: the NIP for a
* trap caught in TM is the rollback address, i.e. the
* next instruction after 'tbegin.', whilst the NIP for
* a trap caught in non-transactional mode is the very
* same address of the 'trap' instruction that generated
* the trap event.
*/
if (thread_endianness == LE) {
/* Go to 'success', i.e. instruction (6) */
ucp->uc_mcontext.gp_regs[PT_NIP] += 16;
} else {
/*
* Thread endianness is BE, so it flipped
* inadvertently. Thus we flip back to LE and
* set NIP to go to 'failure', instruction (5).
*/
ucp->uc_mcontext.gp_regs[PT_MSR] |= 1UL;
ucp->uc_mcontext.gp_regs[PT_NIP] += 4;
}
}
}
/*
* Big-Endian Machine
*/
else {
/* First trap event */
if (trap_event == 0) {
/*
* Force thread endianness to be LE. Instructions (1),
* (3), and (4) will be executed, generating a second
* trap in TM mode.
*/
ucp->uc_mcontext.gp_regs[PT_MSR] |= 1UL;
}
/* Second trap event */
else if (trap_event == 1) {
/*
* Do nothing. If bug is present on return from this
* second trap event endianness will flip back "automat-
* ically" to BE, otherwise thread endianness will
* continue to be LE, just as it was set above.
*/
}
/* A third trap event */
else {
/*
* Once here it means that after returning from the sec-
* ond trap event instruction (4) (trap) was executed
* as LE, generating a third trap event. In that case
* endianness is still LE as set on return from the
* first trap event, hence no bug. Otherwise, bug
* flipped back to BE on return from the second trap
* event and instruction (4) was executed as 'tdi' (so
* basically a 'nop') and branch to 'failure' in
* instruction (5) was taken to indicate failure and we
* never get here.
*/
/*
* Flip back to BE and go to instruction (6), i.e. go to
* 'success'.
*/
ucp->uc_mcontext.gp_regs[PT_MSR] &= ~1UL;
ucp->uc_mcontext.gp_regs[PT_NIP] += 8;
}
}
trap_event++;
}
void usr1_signal_handler(int signo, siginfo_t *si, void *not_used)
{
/* Got a USR1 signal from ping(), so just tell pong() to exit */
exit_from_pong = 1;
}
void *ping(void *not_used)
{
uint64_t i;
trap_event = 0;
/*
* Wait an amount of context switches so load_fp and load_vec overflows
* and MSR_[FP|VEC|V] is 0.
*/
for (i = 0; i < 1024*1024*512; i++)
;
asm goto(
/*
* [NA] means "Native Endianness", i.e. it tells how a
* instruction is executed on machine's native endianness (in
* other words, native endianness matches kernel endianness).
* [OP] means "Opposite Endianness", i.e. on a BE machine, it
* tells how a instruction is executed as a LE instruction; con-
* versely, on a LE machine, it tells how a instruction is
* executed as a BE instruction. When [NA] is omitted, it means
* that the native interpretation of a given instruction is not
* relevant for the test. Likewise when [OP] is omitted.
*/
" tbegin. ;" /* (0) tbegin. [NA] */
" tdi 0, 0, 0x48;" /* (1) nop [NA]; b (3) [OP] */
" trap ;" /* (2) trap [NA] */
".long 0x1D05007C;" /* (3) tbegin. [OP] */
".long 0x0800E07F;" /* (4) trap [OP]; nop [NA] */
" b %l[failure] ;" /* (5) b [NA]; MSR.LE flipped (bug) */
" b %l[success] ;" /* (6) b [NA]; MSR.LE did not flip (ok)*/
: : : : failure, success);
failure:
success = false;
goto exit_from_ping;
success:
success = true;
exit_from_ping:
/* Tell pong() to exit before leaving */
pthread_kill(t1_pong, SIGUSR1);
return NULL;
}
void *pong(void *not_used)
{
while (!exit_from_pong)
/*
* Induce context switches on ping() thread
* until ping() finishes its job and signs
* to exit from this loop.
*/
sched_yield();
return NULL;
}
int tm_trap_test(void)
{
uint16_t k = 1;
int cpu, rc;
pthread_attr_t attr;
cpu_set_t cpuset;
struct sigaction trap_sa;
SKIP_IF(!have_htm());
SKIP_IF(htm_is_synthetic());
trap_sa.sa_flags = SA_SIGINFO;
trap_sa.sa_sigaction = trap_signal_handler;
sigaction(SIGTRAP, &trap_sa, NULL);
struct sigaction usr1_sa;
usr1_sa.sa_flags = SA_SIGINFO;
usr1_sa.sa_sigaction = usr1_signal_handler;
sigaction(SIGUSR1, &usr1_sa, NULL);
cpu = pick_online_cpu();
FAIL_IF(cpu < 0);
// Set only one CPU in the mask. Both threads will be bound to that CPU.
CPU_ZERO(&cpuset);
CPU_SET(cpu, &cpuset);
/* Init pthread attribute */
rc = pthread_attr_init(&attr);
if (rc)
pr_error(rc, "pthread_attr_init()");
/*
* Bind thread ping() and pong() both to CPU 0 so they ping-pong and
* speed up context switches on ping() thread, speeding up the load_fp
* and load_vec overflow.
*/
rc = pthread_attr_setaffinity_np(&attr, sizeof(cpu_set_t), &cpuset);
if (rc)
pr_error(rc, "pthread_attr_setaffinity()");
/* Figure out the machine endianness */
le = (int) *(uint8_t *)&k;
printf("%s machine detected. Checking if endianness flips %s",
le ? "Little-Endian" : "Big-Endian",
"inadvertently on trap in TM... ");
rc = fflush(0);
if (rc)
pr_error(rc, "fflush()");
/* Launch ping() */
rc = pthread_create(&t0_ping, &attr, ping, NULL);
if (rc)
pr_error(rc, "pthread_create()");
exit_from_pong = 0;
/* Launch pong() */
rc = pthread_create(&t1_pong, &attr, pong, NULL);
if (rc)
pr_error(rc, "pthread_create()");
rc = pthread_join(t0_ping, NULL);
if (rc)
pr_error(rc, "pthread_join()");
rc = pthread_join(t1_pong, NULL);
if (rc)
pr_error(rc, "pthread_join()");
if (success) {
printf("no.\n"); /* no, endianness did not flip inadvertently */
return EXIT_SUCCESS;
}
printf("yes!\n"); /* yes, endianness did flip inadvertently */
return EXIT_FAILURE;
}
int main(int argc, char **argv)
{
return test_harness(tm_trap_test, "tm_trap_test");
}
| linux-master | tools/testing/selftests/powerpc/tm/tm-trap.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2015, Michael Neuling, IBM Corp.
*
* Original: Michael Neuling 3/4/2014
* Modified: Rashmica Gupta 8/12/2015
*
* Check if any of the Transaction Memory SPRs get corrupted.
* - TFIAR - stores address of location of transaction failure
* - TFHAR - stores address of software failure handler (if transaction
* fails)
* - TEXASR - lots of info about the transacion(s)
*
* (1) create more threads than cpus
* (2) in each thread:
* (a) set TFIAR and TFHAR a unique value
* (b) loop for awhile, continually checking to see if
* either register has been corrupted.
*
* (3) Loop:
* (a) begin transaction
* (b) abort transaction
* (c) check TEXASR to see if FS has been corrupted
*/
#define _GNU_SOURCE
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <pthread.h>
#include <string.h>
#include "utils.h"
#include "tm.h"
int num_loops = 1000000;
int passed = 1;
void tfiar_tfhar(void *in)
{
unsigned long tfhar, tfhar_rd, tfiar, tfiar_rd;
int i;
/* TFIAR: Last bit has to be high so userspace can read register */
tfiar = ((unsigned long)in) + 1;
tfiar += 2;
mtspr(SPRN_TFIAR, tfiar);
/* TFHAR: Last two bits are reserved */
tfhar = ((unsigned long)in);
tfhar &= ~0x3UL;
tfhar += 4;
mtspr(SPRN_TFHAR, tfhar);
for (i = 0; i < num_loops; i++) {
tfhar_rd = mfspr(SPRN_TFHAR);
tfiar_rd = mfspr(SPRN_TFIAR);
if ( (tfhar != tfhar_rd) || (tfiar != tfiar_rd) ) {
passed = 0;
return;
}
}
return;
}
void texasr(void *in)
{
unsigned long i;
uint64_t result = 0;
for (i = 0; i < num_loops; i++) {
asm __volatile__(
"tbegin.;"
"beq 3f ;"
"tabort. 0 ;"
"tend.;"
/* Abort handler */
"3: ;"
::: "memory");
/* Check the TEXASR */
result = mfspr(SPRN_TEXASR);
if ((result & TEXASR_FS) == 0) {
passed = 0;
return;
}
}
return;
}
int test_tmspr()
{
pthread_t *thread;
int thread_num;
unsigned long i;
SKIP_IF(!have_htm());
SKIP_IF(htm_is_synthetic());
/* To cause some context switching */
thread_num = 10 * sysconf(_SC_NPROCESSORS_ONLN);
thread = malloc(thread_num * sizeof(pthread_t));
if (thread == NULL)
return EXIT_FAILURE;
/* Test TFIAR and TFHAR */
for (i = 0; i < thread_num; i += 2) {
if (pthread_create(&thread[i], NULL, (void *)tfiar_tfhar,
(void *)i))
return EXIT_FAILURE;
}
/* Test TEXASR */
for (i = 1; i < thread_num; i += 2) {
if (pthread_create(&thread[i], NULL, (void *)texasr, (void *)i))
return EXIT_FAILURE;
}
for (i = 0; i < thread_num; i++) {
if (pthread_join(thread[i], NULL) != 0)
return EXIT_FAILURE;
}
free(thread);
if (passed)
return 0;
else
return 1;
}
int main(int argc, char *argv[])
{
if (argc > 1) {
if (strcmp(argv[1], "-h") == 0) {
printf("Syntax:\t [<num loops>]\n");
return 0;
} else {
num_loops = atoi(argv[1]);
}
}
return test_harness(test_tmspr, "tm_tmspr");
}
| linux-master | tools/testing/selftests/powerpc/tm/tm-tmspr.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright 2018, Breno Leitao, Gustavo Romero, IBM Corp.
*
* This test raises a SIGUSR1 signal, and toggle the MSR[TS]
* fields at the signal handler. With MSR[TS] being set, the kernel will
* force a recheckpoint, which may cause a segfault when returning to
* user space. Since the test needs to re-run, the segfault needs to be
* caught and handled.
*
* In order to continue the test even after a segfault, the context is
* saved prior to the signal being raised, and it is restored when there is
* a segmentation fault. This happens for COUNT_MAX times.
*
* This test never fails (as returning EXIT_FAILURE). It either succeeds,
* or crash the kernel (on a buggy kernel).
*/
#define _GNU_SOURCE
#include <stdio.h>
#include <stdlib.h>
#include <signal.h>
#include <string.h>
#include <ucontext.h>
#include <unistd.h>
#include <sys/mman.h>
#include "tm.h"
#include "utils.h"
#include "reg.h"
#define COUNT_MAX 5000 /* Number of interactions */
/*
* This test only runs on 64 bits system. Unsetting MSR_TS_S to avoid
* compilation issue on 32 bits system. There is no side effect, since the
* whole test will be skipped if it is not running on 64 bits system.
*/
#ifndef __powerpc64__
#undef MSR_TS_S
#define MSR_TS_S 0
#endif
/* Setting contexts because the test will crash and we want to recover */
ucontext_t init_context;
/* count is changed in the signal handler, so it must be volatile */
static volatile int count;
void usr_signal_handler(int signo, siginfo_t *si, void *uc)
{
ucontext_t *ucp = uc;
int ret;
/*
* Allocating memory in a signal handler, and never freeing it on
* purpose, forcing the heap increase, so, the memory leak is what
* we want here.
*/
ucp->uc_link = mmap(NULL, sizeof(ucontext_t),
PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS, 0, 0);
if (ucp->uc_link == (void *)-1) {
perror("Mmap failed");
exit(-1);
}
/* Forcing the page to be allocated in a page fault */
ret = madvise(ucp->uc_link, sizeof(ucontext_t), MADV_DONTNEED);
if (ret) {
perror("madvise failed");
exit(-1);
}
memcpy(&ucp->uc_link->uc_mcontext, &ucp->uc_mcontext,
sizeof(ucp->uc_mcontext));
/* Forcing to enable MSR[TM] */
UCONTEXT_MSR(ucp) |= MSR_TS_S;
/*
* A fork inside a signal handler seems to be more efficient than a
* fork() prior to the signal being raised.
*/
if (fork() == 0) {
/*
* Both child and parent will return, but, child returns
* with count set so it will exit in the next segfault.
* Parent will continue to loop.
*/
count = COUNT_MAX;
}
/*
* If the change above does not hit the bug, it will cause a
* segmentation fault, since the ck structures are NULL.
*/
}
void seg_signal_handler(int signo, siginfo_t *si, void *uc)
{
count++;
/* Reexecute the test */
setcontext(&init_context);
}
void tm_trap_test(void)
{
struct sigaction usr_sa, seg_sa;
stack_t ss;
usr_sa.sa_flags = SA_SIGINFO | SA_ONSTACK;
usr_sa.sa_sigaction = usr_signal_handler;
seg_sa.sa_flags = SA_SIGINFO;
seg_sa.sa_sigaction = seg_signal_handler;
/*
* Set initial context. Will get back here from
* seg_signal_handler()
*/
getcontext(&init_context);
while (count < COUNT_MAX) {
/* Allocated an alternative signal stack area */
ss.ss_sp = mmap(NULL, SIGSTKSZ, PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS, 0, 0);
ss.ss_size = SIGSTKSZ;
ss.ss_flags = 0;
if (ss.ss_sp == (void *)-1) {
perror("mmap error\n");
exit(-1);
}
/* Force the allocation through a page fault */
if (madvise(ss.ss_sp, SIGSTKSZ, MADV_DONTNEED)) {
perror("madvise\n");
exit(-1);
}
/*
* Setting an alternative stack to generate a page fault when
* the signal is raised.
*/
if (sigaltstack(&ss, NULL)) {
perror("sigaltstack\n");
exit(-1);
}
/* The signal handler will enable MSR_TS */
sigaction(SIGUSR1, &usr_sa, NULL);
/* If it does not crash, it might segfault, avoid it to retest */
sigaction(SIGSEGV, &seg_sa, NULL);
raise(SIGUSR1);
count++;
}
}
int tm_signal_context_force_tm(void)
{
SKIP_IF(!have_htm());
/*
* Skipping if not running on 64 bits system, since I think it is
* not possible to set mcontext's [MSR] with TS, due to it being 32
* bits.
*/
SKIP_IF(!is_ppc64le());
tm_trap_test();
return EXIT_SUCCESS;
}
int main(int argc, char **argv)
{
test_harness(tm_signal_context_force_tm, "tm_signal_context_force_tm");
}
| linux-master | tools/testing/selftests/powerpc/tm/tm-signal-context-force-tm.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2015, Michael Neuling, IBM Corp.
* Original: Michael Neuling 19/7/2013
* Edited: Rashmica Gupta 01/12/2015
*
* Do some transactions, see if the tar is corrupted.
* If the transaction is aborted, the TAR should be rolled back to the
* checkpointed value before the transaction began. The value written to
* TAR in suspended mode should only remain in TAR if the transaction
* completes.
*/
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <string.h>
#include "tm.h"
#include "utils.h"
int num_loops = 10000;
int test_tar(void)
{
int i;
SKIP_IF(!have_htm());
SKIP_IF(htm_is_synthetic());
SKIP_IF(!is_ppc64le());
for (i = 0; i < num_loops; i++)
{
uint64_t result = 0;
asm __volatile__(
"li 7, 1;"
"mtspr %[tar], 7;" /* tar = 1 */
"tbegin.;"
"beq 3f;"
"li 4, 0x7000;" /* Loop lots, to use time */
"2:;" /* Start loop */
"li 7, 2;"
"mtspr %[tar], 7;" /* tar = 2 */
"tsuspend.;"
"li 7, 3;"
"mtspr %[tar], 7;" /* tar = 3 */
"tresume.;"
"subi 4, 4, 1;"
"cmpdi 4, 0;"
"bne 2b;"
"tend.;"
/* Transaction sucess! TAR should be 3 */
"mfspr 7, %[tar];"
"ori %[res], 7, 4;" // res = 3|4 = 7
"b 4f;"
/* Abort handler. TAR should be rolled back to 1 */
"3:;"
"mfspr 7, %[tar];"
"ori %[res], 7, 8;" // res = 1|8 = 9
"4:;"
: [res]"=r"(result)
: [tar]"i"(SPRN_TAR)
: "memory", "r0", "r4", "r7");
/* If result is anything else other than 7 or 9, the tar
* value must have been corrupted. */
if ((result != 7) && (result != 9))
return 1;
}
return 0;
}
int main(int argc, char *argv[])
{
/* A low number of iterations (eg 100) can cause a false pass */
if (argc > 1) {
if (strcmp(argv[1], "-h") == 0) {
printf("Syntax:\n\t%s [<num loops>]\n",
argv[0]);
return 1;
} else {
num_loops = atoi(argv[1]);
}
}
printf("Starting, %d loops\n", num_loops);
return test_harness(test_tar, "tm_tar");
}
| linux-master | tools/testing/selftests/powerpc/tm/tm-tar.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright 2016, Cyril Bur, IBM Corp.
*
* Test the kernel's signal frame code.
*
* The kernel sets up two sets of ucontexts if the signal was to be
* delivered while the thread was in a transaction (referred too as
* first and second contexts).
* Expected behaviour is that the checkpointed state is in the user
* context passed to the signal handler (first context). The speculated
* state can be accessed with the uc_link pointer (second context).
*
* The rationale for this is that if TM unaware code (which linked
* against TM libs) installs a signal handler it will not know of the
* speculative nature of the 'live' registers and may infer the wrong
* thing.
*/
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <signal.h>
#include <unistd.h>
#include <altivec.h>
#include "utils.h"
#include "tm.h"
#define MAX_ATTEMPT 500000
#define NV_VSX_REGS 12 /* Number of VSX registers to check. */
#define VSX20 20 /* First VSX register to check in vsr20-vsr31 subset */
#define FPR20 20 /* FPR20 overlaps VSX20 most significant doubleword */
long tm_signal_self_context_load(pid_t pid, long *gprs, double *fps, vector int *vms, vector int *vss);
static sig_atomic_t fail, broken;
/* Test only 12 vsx registers from vsr20 to vsr31 */
vector int vsxs[] = {
/* First context will be set with these values, i.e. non-speculative */
/* VSX20 , VSX21 , ... */
{ 1, 2, 3, 4},{ 5, 6, 7, 8},{ 9,10,11,12},
{13,14,15,16},{17,18,19,20},{21,22,23,24},
{25,26,27,28},{29,30,31,32},{33,34,35,36},
{37,38,39,40},{41,42,43,44},{45,46,47,48},
/* Second context will be set with these values, i.e. speculative */
/* VSX20 , VSX21 , ... */
{-1, -2, -3, -4 },{-5, -6, -7, -8 },{-9, -10,-11,-12},
{-13,-14,-15,-16},{-17,-18,-19,-20},{-21,-22,-23,-24},
{-25,-26,-27,-28},{-29,-30,-31,-32},{-33,-34,-35,-36},
{-37,-38,-39,-40},{-41,-42,-43,-44},{-45,-46,-47,-48}
};
static void signal_usr1(int signum, siginfo_t *info, void *uc)
{
int i, j;
uint8_t vsx[sizeof(vector int)];
uint8_t vsx_tm[sizeof(vector int)];
ucontext_t *ucp = uc;
ucontext_t *tm_ucp = ucp->uc_link;
/*
* FP registers and VMX registers overlap the VSX registers.
*
* FP registers (f0-31) overlap the most significant 64 bits of VSX
* registers vsr0-31, whilst VMX registers vr0-31, being 128-bit like
* the VSX registers, overlap fully the other half of VSX registers,
* i.e. vr0-31 overlaps fully vsr32-63.
*
* Due to compatibility and historical reasons (VMX/Altivec support
* appeared first on the architecture), VMX registers vr0-31 (so VSX
* half vsr32-63 too) are stored right after the v_regs pointer, in an
* area allocated for 'vmx_reverse' array (please see
* arch/powerpc/include/uapi/asm/sigcontext.h for details about the
* mcontext_t structure on Power).
*
* The other VSX half (vsr0-31) is hence stored below vr0-31/vsr32-63
* registers, but only the least significant 64 bits of vsr0-31. The
* most significant 64 bits of vsr0-31 (f0-31), as it overlaps the FP
* registers, is kept in fp_regs.
*
* v_regs is a 16 byte aligned pointer at the start of vmx_reserve
* (vmx_reserve may or may not be 16 aligned) where the v_regs structure
* exists, so v_regs points to where vr0-31 / vsr32-63 registers are
* fully stored. Since v_regs type is elf_vrregset_t, v_regs + 1
* skips all the slots used to store vr0-31 / vsr32-64 and points to
* part of one VSX half, i.e. v_regs + 1 points to the least significant
* 64 bits of vsr0-31. The other part of this half (the most significant
* part of vsr0-31) is stored in fp_regs.
*
*/
/* Get pointer to least significant doubleword of vsr0-31 */
long *vsx_ptr = (long *)(ucp->uc_mcontext.v_regs + 1);
long *tm_vsx_ptr = (long *)(tm_ucp->uc_mcontext.v_regs + 1);
/* Check first context. Print all mismatches. */
for (i = 0; i < NV_VSX_REGS; i++) {
/*
* Copy VSX most significant doubleword from fp_regs and
* copy VSX least significant one from 64-bit slots below
* saved VMX registers.
*/
memcpy(vsx, &ucp->uc_mcontext.fp_regs[FPR20 + i], 8);
memcpy(vsx + 8, &vsx_ptr[VSX20 + i], 8);
fail = memcmp(vsx, &vsxs[i], sizeof(vector int));
if (fail) {
broken = 1;
printf("VSX%d (1st context) == 0x", VSX20 + i);
for (j = 0; j < 16; j++)
printf("%02x", vsx[j]);
printf(" instead of 0x");
for (j = 0; j < 4; j++)
printf("%08x", vsxs[i][j]);
printf(" (expected)\n");
}
}
/* Check second context. Print all mismatches. */
for (i = 0; i < NV_VSX_REGS; i++) {
/*
* Copy VSX most significant doubleword from fp_regs and
* copy VSX least significant one from 64-bit slots below
* saved VMX registers.
*/
memcpy(vsx_tm, &tm_ucp->uc_mcontext.fp_regs[FPR20 + i], 8);
memcpy(vsx_tm + 8, &tm_vsx_ptr[VSX20 + i], 8);
fail = memcmp(vsx_tm, &vsxs[NV_VSX_REGS + i], sizeof(vector int));
if (fail) {
broken = 1;
printf("VSX%d (2nd context) == 0x", VSX20 + i);
for (j = 0; j < 16; j++)
printf("%02x", vsx_tm[j]);
printf(" instead of 0x");
for (j = 0; j < 4; j++)
printf("%08x", vsxs[NV_VSX_REGS + i][j]);
printf("(expected)\n");
}
}
}
static int tm_signal_context_chk()
{
struct sigaction act;
int i;
long rc;
pid_t pid = getpid();
SKIP_IF(!have_htm());
SKIP_IF(htm_is_synthetic());
act.sa_sigaction = signal_usr1;
sigemptyset(&act.sa_mask);
act.sa_flags = SA_SIGINFO;
if (sigaction(SIGUSR1, &act, NULL) < 0) {
perror("sigaction sigusr1");
exit(1);
}
i = 0;
while (i < MAX_ATTEMPT && !broken) {
/*
* tm_signal_self_context_load will set both first and second
* contexts accordingly to the values passed through non-NULL
* array pointers to it, in that case 'vsxs', and invoke the
* signal handler installed for SIGUSR1.
*/
rc = tm_signal_self_context_load(pid, NULL, NULL, NULL, vsxs);
FAIL_IF(rc != pid);
i++;
}
return (broken);
}
int main(void)
{
return test_harness(tm_signal_context_chk, "tm_signal_context_chk_vsx");
}
| linux-master | tools/testing/selftests/powerpc/tm/tm-signal-context-chk-vsx.c |
#include <stdlib.h>
#include <string.h>
#include <stdio.h>
#include <signal.h>
#include <unistd.h>
#include <sys/mman.h>
#include "utils.h"
extern char __start___ex_table[];
extern char __stop___ex_table[];
#if defined(__powerpc64__)
#define UCONTEXT_NIA(UC) (UC)->uc_mcontext.gp_regs[PT_NIP]
#elif defined(__powerpc__)
#define UCONTEXT_NIA(UC) (UC)->uc_mcontext.uc_regs->gregs[PT_NIP]
#else
#error implement UCONTEXT_NIA
#endif
static void segv_handler(int signr, siginfo_t *info, void *ptr)
{
ucontext_t *uc = (ucontext_t *)ptr;
unsigned long addr = (unsigned long)info->si_addr;
unsigned long *ip = &UCONTEXT_NIA(uc);
unsigned long *ex_p = (unsigned long *)__start___ex_table;
while (ex_p < (unsigned long *)__stop___ex_table) {
unsigned long insn, fixup;
insn = *ex_p++;
fixup = *ex_p++;
if (insn == *ip) {
*ip = fixup;
return;
}
}
printf("No exception table match for NIA %lx ADDR %lx\n", *ip, addr);
abort();
}
static void setup_segv_handler(void)
{
struct sigaction action;
memset(&action, 0, sizeof(action));
action.sa_sigaction = segv_handler;
action.sa_flags = SA_SIGINFO;
sigaction(SIGSEGV, &action, NULL);
}
unsigned long COPY_LOOP(void *to, const void *from, unsigned long size);
unsigned long test_copy_tofrom_user_reference(void *to, const void *from, unsigned long size);
static int total_passed;
static int total_failed;
static void do_one_test(char *dstp, char *srcp, unsigned long len)
{
unsigned long got, expected;
got = COPY_LOOP(dstp, srcp, len);
expected = test_copy_tofrom_user_reference(dstp, srcp, len);
if (got != expected) {
total_failed++;
printf("FAIL from=%p to=%p len=%ld returned %ld, expected %ld\n",
srcp, dstp, len, got, expected);
//abort();
} else
total_passed++;
}
//#define MAX_LEN 512
#define MAX_LEN 16
int test_copy_exception(void)
{
int page_size;
static char *p, *q;
unsigned long src, dst, len;
page_size = getpagesize();
p = mmap(NULL, page_size * 2, PROT_READ|PROT_WRITE,
MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
if (p == MAP_FAILED) {
perror("mmap");
exit(1);
}
memset(p, 0, page_size);
setup_segv_handler();
if (mprotect(p + page_size, page_size, PROT_NONE)) {
perror("mprotect");
exit(1);
}
q = p + page_size - MAX_LEN;
for (src = 0; src < MAX_LEN; src++) {
for (dst = 0; dst < MAX_LEN; dst++) {
for (len = 0; len < MAX_LEN+1; len++) {
// printf("from=%p to=%p len=%ld\n", q+dst, q+src, len);
do_one_test(q+dst, q+src, len);
}
}
}
printf("Totals:\n");
printf(" Pass: %d\n", total_passed);
printf(" Fail: %d\n", total_failed);
return 0;
}
int main(void)
{
return test_harness(test_copy_exception, str(COPY_LOOP));
}
| linux-master | tools/testing/selftests/powerpc/copyloops/exc_validate.c |
// SPDX-License-Identifier: GPL-2.0
#include <malloc.h>
#include <string.h>
#include <stdlib.h>
#include <stdbool.h>
#include "utils.h"
#define MAX_LEN 8192
#define MAX_OFFSET 16
#define MIN_REDZONE 128
#define BUFLEN (MAX_LEN+MAX_OFFSET+2*MIN_REDZONE)
#define POISON 0xa5
unsigned long COPY_LOOP(void *to, const void *from, unsigned long size);
static void do_one(char *src, char *dst, unsigned long src_off,
unsigned long dst_off, unsigned long len, void *redzone,
void *fill)
{
char *srcp, *dstp;
unsigned long ret;
unsigned long i;
srcp = src + MIN_REDZONE + src_off;
dstp = dst + MIN_REDZONE + dst_off;
memset(src, POISON, BUFLEN);
memset(dst, POISON, BUFLEN);
memcpy(srcp, fill, len);
ret = COPY_LOOP(dstp, srcp, len);
if (ret && ret != (unsigned long)dstp) {
printf("(%p,%p,%ld) returned %ld\n", dstp, srcp, len, ret);
abort();
}
if (memcmp(dstp, srcp, len)) {
printf("(%p,%p,%ld) miscompare\n", dstp, srcp, len);
printf("src: ");
for (i = 0; i < len; i++)
printf("%02x ", srcp[i]);
printf("\ndst: ");
for (i = 0; i < len; i++)
printf("%02x ", dstp[i]);
printf("\n");
abort();
}
if (memcmp(dst, redzone, dstp - dst)) {
printf("(%p,%p,%ld) redzone before corrupted\n",
dstp, srcp, len);
abort();
}
if (memcmp(dstp+len, redzone, dst+BUFLEN-(dstp+len))) {
printf("(%p,%p,%ld) redzone after corrupted\n",
dstp, srcp, len);
abort();
}
}
int test_copy_loop(void)
{
char *src, *dst, *redzone, *fill;
unsigned long len, src_off, dst_off;
unsigned long i;
src = memalign(BUFLEN, BUFLEN);
dst = memalign(BUFLEN, BUFLEN);
redzone = malloc(BUFLEN);
fill = malloc(BUFLEN);
if (!src || !dst || !redzone || !fill) {
fprintf(stderr, "malloc failed\n");
exit(1);
}
memset(redzone, POISON, BUFLEN);
/* Fill with sequential bytes */
for (i = 0; i < BUFLEN; i++)
fill[i] = i & 0xff;
for (len = 1; len < MAX_LEN; len++) {
for (src_off = 0; src_off < MAX_OFFSET; src_off++) {
for (dst_off = 0; dst_off < MAX_OFFSET; dst_off++) {
do_one(src, dst, src_off, dst_off, len,
redzone, fill);
}
}
}
return 0;
}
int main(void)
{
return test_harness(test_copy_loop, str(COPY_LOOP));
}
| linux-master | tools/testing/selftests/powerpc/copyloops/validate.c |
// SPDX-License-Identifier: GPL-2.0
#include <malloc.h>
#include <stdlib.h>
#include <string.h>
#include <assert.h>
#include "utils.h"
void *TEST_MEMMOVE(const void *s1, const void *s2, size_t n);
#define BUF_LEN 65536
#define MAX_OFFSET 512
size_t max(size_t a, size_t b)
{
if (a >= b)
return a;
return b;
}
static int testcase_run(void)
{
size_t i, src_off, dst_off, len;
char *usermap = memalign(BUF_LEN, BUF_LEN);
char *kernelmap = memalign(BUF_LEN, BUF_LEN);
assert(usermap != NULL);
assert(kernelmap != NULL);
memset(usermap, 0, BUF_LEN);
memset(kernelmap, 0, BUF_LEN);
for (i = 0; i < BUF_LEN; i++) {
usermap[i] = i & 0xff;
kernelmap[i] = i & 0xff;
}
for (src_off = 0; src_off < MAX_OFFSET; src_off++) {
for (dst_off = 0; dst_off < MAX_OFFSET; dst_off++) {
for (len = 1; len < MAX_OFFSET - max(src_off, dst_off); len++) {
memmove(usermap + dst_off, usermap + src_off, len);
TEST_MEMMOVE(kernelmap + dst_off, kernelmap + src_off, len);
if (memcmp(usermap, kernelmap, MAX_OFFSET) != 0) {
printf("memmove failed at %ld %ld %ld\n",
src_off, dst_off, len);
abort();
}
}
}
}
return 0;
}
int main(void)
{
return test_harness(testcase_run, "memmove");
}
| linux-master | tools/testing/selftests/powerpc/copyloops/memmove_validate.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright 2015, Cyril Bur, IBM Corp.
*
* This test attempts to see if the FPU registers change across preemption.
* Two things should be noted here a) The check_fpu function in asm only checks
* the non volatile registers as it is reused from the syscall test b) There is
* no way to be sure preemption happened so this test just uses many threads
* and a long wait. As such, a successful test doesn't mean much but a failure
* is bad.
*/
#include <stdio.h>
#include <unistd.h>
#include <sys/syscall.h>
#include <sys/time.h>
#include <sys/types.h>
#include <sys/wait.h>
#include <stdlib.h>
#include <pthread.h>
#include "utils.h"
/* Time to wait for workers to get preempted (seconds) */
#define PREEMPT_TIME 20
/*
* Factor by which to multiply number of online CPUs for total number of
* worker threads
*/
#define THREAD_FACTOR 8
__thread double darray[] = {0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0,
1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7, 1.8, 1.9, 2.0,
2.1};
int threads_starting;
int running;
extern void preempt_fpu(double *darray, int *threads_starting, int *running);
void *preempt_fpu_c(void *p)
{
int i;
srand(pthread_self());
for (i = 0; i < 21; i++)
darray[i] = rand();
/* Test failed if it ever returns */
preempt_fpu(darray, &threads_starting, &running);
return p;
}
int test_preempt_fpu(void)
{
int i, rc, threads;
pthread_t *tids;
threads = sysconf(_SC_NPROCESSORS_ONLN) * THREAD_FACTOR;
tids = malloc((threads) * sizeof(pthread_t));
FAIL_IF(!tids);
running = true;
threads_starting = threads;
for (i = 0; i < threads; i++) {
rc = pthread_create(&tids[i], NULL, preempt_fpu_c, NULL);
FAIL_IF(rc);
}
setbuf(stdout, NULL);
/* Not really necessary but nice to wait for every thread to start */
printf("\tWaiting for all workers to start...");
while(threads_starting)
asm volatile("": : :"memory");
printf("done\n");
printf("\tWaiting for %d seconds to let some workers get preempted...", PREEMPT_TIME);
sleep(PREEMPT_TIME);
printf("done\n");
printf("\tStopping workers...");
/*
* Working are checking this value every loop. In preempt_fpu 'cmpwi r5,0; bne 2b'.
* r5 will have loaded the value of running.
*/
running = 0;
for (i = 0; i < threads; i++) {
void *rc_p;
pthread_join(tids[i], &rc_p);
/*
* Harness will say the fail was here, look at why preempt_fpu
* returned
*/
if ((long) rc_p)
printf("oops\n");
FAIL_IF((long) rc_p);
}
printf("done\n");
free(tids);
return 0;
}
int main(int argc, char *argv[])
{
return test_harness(test_preempt_fpu, "fpu_preempt");
}
| linux-master | tools/testing/selftests/powerpc/math/fpu_preempt.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright 2015, Cyril Bur, IBM Corp.
*
* This test attempts to see if the VMX registers change across a syscall (fork).
*/
#include <altivec.h>
#include <stdio.h>
#include <unistd.h>
#include <sys/syscall.h>
#include <sys/time.h>
#include <stdlib.h>
#include <sys/types.h>
#include <sys/wait.h>
#include "utils.h"
vector int varray[] = {{1, 2, 3, 4}, {5, 6, 7, 8}, {9, 10,11,12},
{13,14,15,16},{17,18,19,20},{21,22,23,24},
{25,26,27,28},{29,30,31,32},{33,34,35,36},
{37,38,39,40},{41,42,43,44},{45,46,47,48}};
extern int test_vmx(vector int *varray, pid_t *pid);
int vmx_syscall(void)
{
pid_t fork_pid;
int i;
int ret;
int child_ret;
for (i = 0; i < 1000; i++) {
/* test_vmx will fork() */
ret = test_vmx(varray, &fork_pid);
if (fork_pid == -1)
return -1;
if (fork_pid == 0)
exit(ret);
waitpid(fork_pid, &child_ret, 0);
if (ret || child_ret)
return 1;
}
return 0;
}
int test_vmx_syscall(void)
{
/*
* Setup an environment with much context switching
*/
pid_t pid2;
pid_t pid;
int ret;
int child_ret;
// vcmpequd used in vmx_asm.S is v2.07
SKIP_IF(!have_hwcap2(PPC_FEATURE2_ARCH_2_07));
pid = fork();
FAIL_IF(pid == -1);
pid2 = fork();
ret = vmx_syscall();
/* Can't FAIL_IF(pid2 == -1); because we've already forked */
if (pid2 == -1) {
/*
* Couldn't fork, ensure child_ret is set and is a fail
*/
ret = child_ret = 1;
} else {
if (pid2)
waitpid(pid2, &child_ret, 0);
else
exit(ret);
}
ret |= child_ret;
if (pid)
waitpid(pid, &child_ret, 0);
else
exit(ret);
FAIL_IF(ret || child_ret);
return 0;
}
int main(int argc, char *argv[])
{
return test_harness(test_vmx_syscall, "vmx_syscall");
}
| linux-master | tools/testing/selftests/powerpc/math/vmx_syscall.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright 2015, Cyril Bur, IBM Corp.
*
* This test attempts to see if the VMX registers are correctly reported in a
* signal context. Each worker just spins checking its VMX registers, at some
* point a signal will interrupt it and C code will check the signal context
* ensuring it is also the same.
*/
#include <stdio.h>
#include <unistd.h>
#include <sys/syscall.h>
#include <sys/time.h>
#include <sys/types.h>
#include <sys/wait.h>
#include <stdlib.h>
#include <string.h>
#include <pthread.h>
#include <altivec.h>
#include "utils.h"
/* Number of times each thread should receive the signal */
#define ITERATIONS 10
/*
* Factor by which to multiply number of online CPUs for total number of
* worker threads
*/
#define THREAD_FACTOR 8
__thread vector int varray[] = {{1, 2, 3, 4}, {5, 6, 7, 8}, {9, 10,11,12},
{13,14,15,16},{17,18,19,20},{21,22,23,24},
{25,26,27,28},{29,30,31,32},{33,34,35,36},
{37,38,39,40},{41,42,43,44},{45,46,47,48}};
bool bad_context;
int running;
int threads_starting;
extern int preempt_vmx(vector int *varray, int *threads_starting, int *sentinal);
void signal_vmx_sig(int sig, siginfo_t *info, void *context)
{
int i;
ucontext_t *uc = context;
mcontext_t *mc = &uc->uc_mcontext;
/* Only the non volatiles were loaded up */
for (i = 20; i < 32; i++) {
if (memcmp(mc->v_regs->vrregs[i], &varray[i - 20], 16)) {
int j;
/*
* Shouldn't printf() in a signal handler, however, this is a
* test and we've detected failure. Understanding what failed
* is paramount. All that happens after this is tests exit with
* failure.
*/
printf("VMX mismatch at reg %d!\n", i);
printf("Reg | Actual | Expected\n");
for (j = 20; j < 32; j++) {
printf("%d | 0x%04x%04x%04x%04x | 0x%04x%04x%04x%04x\n", j, mc->v_regs->vrregs[j][0],
mc->v_regs->vrregs[j][1], mc->v_regs->vrregs[j][2], mc->v_regs->vrregs[j][3],
varray[j - 20][0], varray[j - 20][1], varray[j - 20][2], varray[j - 20][3]);
}
bad_context = true;
break;
}
}
}
void *signal_vmx_c(void *p)
{
int i, j;
long rc;
struct sigaction act;
act.sa_sigaction = signal_vmx_sig;
act.sa_flags = SA_SIGINFO;
rc = sigaction(SIGUSR1, &act, NULL);
if (rc)
return p;
srand(pthread_self());
for (i = 0; i < 12; i++)
for (j = 0; j < 4; j++)
varray[i][j] = rand();
rc = preempt_vmx(varray, &threads_starting, &running);
return (void *) rc;
}
int test_signal_vmx(void)
{
int i, j, rc, threads;
void *rc_p;
pthread_t *tids;
// vcmpequd used in vmx_asm.S is v2.07
SKIP_IF(!have_hwcap2(PPC_FEATURE2_ARCH_2_07));
threads = sysconf(_SC_NPROCESSORS_ONLN) * THREAD_FACTOR;
tids = malloc(threads * sizeof(pthread_t));
FAIL_IF(!tids);
running = true;
threads_starting = threads;
for (i = 0; i < threads; i++) {
rc = pthread_create(&tids[i], NULL, signal_vmx_c, NULL);
FAIL_IF(rc);
}
setbuf(stdout, NULL);
printf("\tWaiting for %d workers to start... %d", threads, threads_starting);
while (threads_starting) {
asm volatile("": : :"memory");
usleep(1000);
printf(", %d", threads_starting);
}
printf(" ...done\n");
printf("\tSending signals to all threads %d times...", ITERATIONS);
for (i = 0; i < ITERATIONS; i++) {
for (j = 0; j < threads; j++) {
pthread_kill(tids[j], SIGUSR1);
}
sleep(1);
}
printf("done\n");
printf("\tKilling workers...");
running = 0;
for (i = 0; i < threads; i++) {
pthread_join(tids[i], &rc_p);
/*
* Harness will say the fail was here, look at why signal_vmx
* returned
*/
if ((long) rc_p || bad_context)
printf("oops\n");
if (bad_context)
fprintf(stderr, "\t!! bad_context is true\n");
FAIL_IF((long) rc_p || bad_context);
}
printf("done\n");
free(tids);
return 0;
}
int main(int argc, char *argv[])
{
test_harness_set_timeout(360);
return test_harness(test_signal_vmx, "vmx_signal");
}
| linux-master | tools/testing/selftests/powerpc/math/vmx_signal.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright IBM Corp. 2020
*
* This test attempts to cause a FP denormal exception on POWER8 CPUs. Unfortunately
* if the denormal handler is not configured or working properly, this can cause a bad
* crash in kernel mode when the kernel tries to save FP registers when the process
* exits.
*/
#include <stdio.h>
#include <string.h>
#include "utils.h"
static int test_denormal_fpu(void)
{
unsigned int m32;
unsigned long m64;
volatile float f;
volatile double d;
/* try to induce lfs <denormal> ; stfd */
m32 = 0x00715fcf; /* random denormal */
memcpy((float *)&f, &m32, sizeof(f));
d = f;
memcpy(&m64, (double *)&d, sizeof(d));
FAIL_IF((long)(m64 != 0x380c57f3c0000000)); /* renormalised value */
return 0;
}
int main(int argc, char *argv[])
{
return test_harness(test_denormal_fpu, "fpu_denormal");
}
| linux-master | tools/testing/selftests/powerpc/math/fpu_denormal.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright 2015, Cyril Bur, IBM Corp.
*
* This test attempts to see if the FPU registers are correctly reported in a
* signal context. Each worker just spins checking its FPU registers, at some
* point a signal will interrupt it and C code will check the signal context
* ensuring it is also the same.
*/
#include <stdio.h>
#include <unistd.h>
#include <sys/syscall.h>
#include <sys/time.h>
#include <sys/types.h>
#include <sys/wait.h>
#include <stdlib.h>
#include <pthread.h>
#include "utils.h"
/* Number of times each thread should receive the signal */
#define ITERATIONS 10
/*
* Factor by which to multiply number of online CPUs for total number of
* worker threads
*/
#define THREAD_FACTOR 8
__thread double darray[] = {0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0,
1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7, 1.8, 1.9, 2.0,
2.1};
bool bad_context;
int threads_starting;
int running;
extern long preempt_fpu(double *darray, int *threads_starting, int *running);
void signal_fpu_sig(int sig, siginfo_t *info, void *context)
{
int i;
ucontext_t *uc = context;
mcontext_t *mc = &uc->uc_mcontext;
/* Only the non volatiles were loaded up */
for (i = 14; i < 32; i++) {
if (mc->fp_regs[i] != darray[i - 14]) {
bad_context = true;
break;
}
}
}
void *signal_fpu_c(void *p)
{
int i;
long rc;
struct sigaction act;
act.sa_sigaction = signal_fpu_sig;
act.sa_flags = SA_SIGINFO;
rc = sigaction(SIGUSR1, &act, NULL);
if (rc)
return p;
srand(pthread_self());
for (i = 0; i < 21; i++)
darray[i] = rand();
rc = preempt_fpu(darray, &threads_starting, &running);
return (void *) rc;
}
int test_signal_fpu(void)
{
int i, j, rc, threads;
void *rc_p;
pthread_t *tids;
threads = sysconf(_SC_NPROCESSORS_ONLN) * THREAD_FACTOR;
tids = malloc(threads * sizeof(pthread_t));
FAIL_IF(!tids);
running = true;
threads_starting = threads;
for (i = 0; i < threads; i++) {
rc = pthread_create(&tids[i], NULL, signal_fpu_c, NULL);
FAIL_IF(rc);
}
setbuf(stdout, NULL);
printf("\tWaiting for all workers to start...");
while (threads_starting)
asm volatile("": : :"memory");
printf("done\n");
printf("\tSending signals to all threads %d times...", ITERATIONS);
for (i = 0; i < ITERATIONS; i++) {
for (j = 0; j < threads; j++) {
pthread_kill(tids[j], SIGUSR1);
}
sleep(1);
}
printf("done\n");
printf("\tStopping workers...");
running = 0;
for (i = 0; i < threads; i++) {
pthread_join(tids[i], &rc_p);
/*
* Harness will say the fail was here, look at why signal_fpu
* returned
*/
if ((long) rc_p || bad_context)
printf("oops\n");
if (bad_context)
fprintf(stderr, "\t!! bad_context is true\n");
FAIL_IF((long) rc_p || bad_context);
}
printf("done\n");
free(tids);
return 0;
}
int main(int argc, char *argv[])
{
return test_harness(test_signal_fpu, "fpu_signal");
}
| linux-master | tools/testing/selftests/powerpc/math/fpu_signal.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Test basic matrix multiply assist (MMA) functionality if available.
*
* Copyright 2020, Alistair Popple, IBM Corp.
*/
#include <stdio.h>
#include <stdint.h>
#include "utils.h"
extern void test_mma(uint16_t (*)[8], uint16_t (*)[8], uint32_t (*)[4*4]);
static int mma(void)
{
int i;
int rc = 0;
uint16_t x[] = {1, 0, 2, 0, 3, 0, 4, 0};
uint16_t y[] = {1, 0, 2, 0, 3, 0, 4, 0};
uint32_t z[4*4];
uint32_t exp[4*4] = {1, 2, 3, 4,
2, 4, 6, 8,
3, 6, 9, 12,
4, 8, 12, 16};
SKIP_IF_MSG(!have_hwcap2(PPC_FEATURE2_ARCH_3_1), "Need ISAv3.1");
SKIP_IF_MSG(!have_hwcap2(PPC_FEATURE2_MMA), "Need MMA");
test_mma(&x, &y, &z);
for (i = 0; i < 16; i++) {
printf("MMA[%d] = %d ", i, z[i]);
if (z[i] == exp[i]) {
printf(" (Correct)\n");
} else {
printf(" (Incorrect)\n");
rc = 1;
}
}
return rc;
}
int main(int argc, char *argv[])
{
return test_harness(mma, "mma");
}
| linux-master | tools/testing/selftests/powerpc/math/mma.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright 2015, Cyril Bur, IBM Corp.
*
* This test attempts to see if the VMX registers change across preemption.
* Two things should be noted here a) The check_vmx function in asm only checks
* the non volatile registers as it is reused from the syscall test b) There is
* no way to be sure preemption happened so this test just uses many threads
* and a long wait. As such, a successful test doesn't mean much but a failure
* is bad.
*/
#include <stdio.h>
#include <unistd.h>
#include <sys/syscall.h>
#include <sys/time.h>
#include <sys/types.h>
#include <sys/wait.h>
#include <stdlib.h>
#include <pthread.h>
#include "utils.h"
/* Time to wait for workers to get preempted (seconds) */
#define PREEMPT_TIME 20
/*
* Factor by which to multiply number of online CPUs for total number of
* worker threads
*/
#define THREAD_FACTOR 8
__thread vector int varray[] = {{1, 2, 3, 4}, {5, 6, 7, 8}, {9, 10,11,12},
{13,14,15,16},{17,18,19,20},{21,22,23,24},
{25,26,27,28},{29,30,31,32},{33,34,35,36},
{37,38,39,40},{41,42,43,44},{45,46,47,48}};
int threads_starting;
int running;
extern void preempt_vmx(vector int *varray, int *threads_starting, int *running);
void *preempt_vmx_c(void *p)
{
int i, j;
srand(pthread_self());
for (i = 0; i < 12; i++)
for (j = 0; j < 4; j++)
varray[i][j] = rand();
/* Test fails if it ever returns */
preempt_vmx(varray, &threads_starting, &running);
return p;
}
int test_preempt_vmx(void)
{
int i, rc, threads;
pthread_t *tids;
// vcmpequd used in vmx_asm.S is v2.07
SKIP_IF(!have_hwcap2(PPC_FEATURE2_ARCH_2_07));
threads = sysconf(_SC_NPROCESSORS_ONLN) * THREAD_FACTOR;
tids = malloc(threads * sizeof(pthread_t));
FAIL_IF(!tids);
running = true;
threads_starting = threads;
for (i = 0; i < threads; i++) {
rc = pthread_create(&tids[i], NULL, preempt_vmx_c, NULL);
FAIL_IF(rc);
}
setbuf(stdout, NULL);
/* Not really nessesary but nice to wait for every thread to start */
printf("\tWaiting for all workers to start...");
while(threads_starting)
asm volatile("": : :"memory");
printf("done\n");
printf("\tWaiting for %d seconds to let some workers get preempted...", PREEMPT_TIME);
sleep(PREEMPT_TIME);
printf("done\n");
printf("\tStopping workers...");
/*
* Working are checking this value every loop. In preempt_vmx 'cmpwi r5,0; bne 2b'.
* r5 will have loaded the value of running.
*/
running = 0;
for (i = 0; i < threads; i++) {
void *rc_p;
pthread_join(tids[i], &rc_p);
/*
* Harness will say the fail was here, look at why preempt_vmx
* returned
*/
if ((long) rc_p)
printf("oops\n");
FAIL_IF((long) rc_p);
}
printf("done\n");
return 0;
}
int main(int argc, char *argv[])
{
return test_harness(test_preempt_vmx, "vmx_preempt");
}
| linux-master | tools/testing/selftests/powerpc/math/vmx_preempt.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright 2015, Cyril Bur, IBM Corp.
*
* This test attempts to see if the FPU registers change across a syscall (fork).
*/
#include <stdio.h>
#include <unistd.h>
#include <sys/syscall.h>
#include <sys/time.h>
#include <sys/types.h>
#include <sys/wait.h>
#include <stdlib.h>
#include "utils.h"
extern int test_fpu(double *darray, pid_t *pid);
double darray[] = {0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0,
1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7, 1.8, 1.9, 2.0,
2.1};
int syscall_fpu(void)
{
pid_t fork_pid;
int i;
int ret;
int child_ret;
for (i = 0; i < 1000; i++) {
/* test_fpu will fork() */
ret = test_fpu(darray, &fork_pid);
if (fork_pid == -1)
return -1;
if (fork_pid == 0)
exit(ret);
waitpid(fork_pid, &child_ret, 0);
if (ret || child_ret)
return 1;
}
return 0;
}
int test_syscall_fpu(void)
{
/*
* Setup an environment with much context switching
*/
pid_t pid2;
pid_t pid = fork();
int ret;
int child_ret;
FAIL_IF(pid == -1);
pid2 = fork();
/* Can't FAIL_IF(pid2 == -1); because already forked once */
if (pid2 == -1) {
/*
* Couldn't fork, ensure test is a fail
*/
child_ret = ret = 1;
} else {
ret = syscall_fpu();
if (pid2)
waitpid(pid2, &child_ret, 0);
else
exit(ret);
}
ret |= child_ret;
if (pid)
waitpid(pid, &child_ret, 0);
else
exit(ret);
FAIL_IF(ret || child_ret);
return 0;
}
int main(int argc, char *argv[])
{
return test_harness(test_syscall_fpu, "syscall_fpu");
}
| linux-master | tools/testing/selftests/powerpc/math/fpu_syscall.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright 2015, Cyril Bur, IBM Corp.
*
* This test attempts to see if the VSX registers change across preemption.
* There is no way to be sure preemption happened so this test just
* uses many threads and a long wait. As such, a successful test
* doesn't mean much but a failure is bad.
*/
#include <stdio.h>
#include <string.h>
#include <unistd.h>
#include <sys/syscall.h>
#include <sys/time.h>
#include <sys/types.h>
#include <sys/wait.h>
#include <stdlib.h>
#include <pthread.h>
#include "utils.h"
/* Time to wait for workers to get preempted (seconds) */
#define PREEMPT_TIME 20
/*
* Factor by which to multiply number of online CPUs for total number of
* worker threads
*/
#define THREAD_FACTOR 8
/*
* Ensure there is twice the number of non-volatile VMX regs!
* check_vmx() is going to use the other half as space to put the live
* registers before calling vsx_memcmp()
*/
__thread vector int varray[24] = {
{1, 2, 3, 4 }, {5, 6, 7, 8 }, {9, 10,11,12},
{13,14,15,16}, {17,18,19,20}, {21,22,23,24},
{25,26,27,28}, {29,30,31,32}, {33,34,35,36},
{37,38,39,40}, {41,42,43,44}, {45,46,47,48}
};
int threads_starting;
int running;
extern long preempt_vsx(vector int *varray, int *threads_starting, int *running);
long vsx_memcmp(vector int *a) {
vector int zero = {0, 0, 0, 0};
int i;
FAIL_IF(a != varray);
for(i = 0; i < 12; i++) {
if (memcmp(&a[i + 12], &zero, sizeof(vector int)) == 0) {
fprintf(stderr, "Detected zero from the VSX reg %d\n", i + 12);
return 2;
}
}
if (memcmp(a, &a[12], 12 * sizeof(vector int))) {
long *p = (long *)a;
fprintf(stderr, "VSX mismatch\n");
for (i = 0; i < 24; i=i+2)
fprintf(stderr, "%d: 0x%08lx%08lx | 0x%08lx%08lx\n",
i/2 + i%2 + 20, p[i], p[i + 1], p[i + 24], p[i + 25]);
return 1;
}
return 0;
}
void *preempt_vsx_c(void *p)
{
int i, j;
long rc;
srand(pthread_self());
for (i = 0; i < 12; i++)
for (j = 0; j < 4; j++) {
varray[i][j] = rand();
/* Don't want zero because it hides kernel problems */
if (varray[i][j] == 0)
j--;
}
rc = preempt_vsx(varray, &threads_starting, &running);
if (rc == 2)
fprintf(stderr, "Caught zeros in VSX compares\n");
return (void *)rc;
}
int test_preempt_vsx(void)
{
int i, rc, threads;
pthread_t *tids;
SKIP_IF(!have_hwcap(PPC_FEATURE_HAS_VSX));
threads = sysconf(_SC_NPROCESSORS_ONLN) * THREAD_FACTOR;
tids = malloc(threads * sizeof(pthread_t));
FAIL_IF(!tids);
running = true;
threads_starting = threads;
for (i = 0; i < threads; i++) {
rc = pthread_create(&tids[i], NULL, preempt_vsx_c, NULL);
FAIL_IF(rc);
}
setbuf(stdout, NULL);
/* Not really nessesary but nice to wait for every thread to start */
printf("\tWaiting for %d workers to start...", threads_starting);
while(threads_starting)
asm volatile("": : :"memory");
printf("done\n");
printf("\tWaiting for %d seconds to let some workers get preempted...", PREEMPT_TIME);
sleep(PREEMPT_TIME);
printf("done\n");
printf("\tStopping workers...");
/*
* Working are checking this value every loop. In preempt_vsx 'cmpwi r5,0; bne 2b'.
* r5 will have loaded the value of running.
*/
running = 0;
for (i = 0; i < threads; i++) {
void *rc_p;
pthread_join(tids[i], &rc_p);
/*
* Harness will say the fail was here, look at why preempt_vsx
* returned
*/
if ((long) rc_p)
printf("oops\n");
FAIL_IF((long) rc_p);
}
printf("done\n");
return 0;
}
int main(int argc, char *argv[])
{
return test_harness(test_preempt_vsx, "vsx_preempt");
}
| linux-master | tools/testing/selftests/powerpc/math/vsx_preempt.c |
// SPDX-License-Identifier: GPL-2.0-or-later
#include <errno.h>
#include <fcntl.h>
#include <signal.h>
#include <stdio.h>
#include <string.h>
#include <sys/ioctl.h>
#include <sys/mman.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <unistd.h>
#include "vas-api.h"
#include "utils.h"
static bool faulted;
static void sigbus_handler(int n, siginfo_t *info, void *ctxt_v)
{
ucontext_t *ctxt = (ucontext_t *)ctxt_v;
struct pt_regs *regs = ctxt->uc_mcontext.regs;
faulted = true;
regs->nip += 4;
}
static int test_ra_error(void)
{
struct vas_tx_win_open_attr attr;
int fd, *paste_addr;
char *devname = "/dev/crypto/nx-gzip";
struct sigaction act = {
.sa_sigaction = sigbus_handler,
.sa_flags = SA_SIGINFO,
};
memset(&attr, 0, sizeof(attr));
attr.version = 1;
attr.vas_id = 0;
SKIP_IF(access(devname, F_OK));
fd = open(devname, O_RDWR);
FAIL_IF(fd < 0);
FAIL_IF(ioctl(fd, VAS_TX_WIN_OPEN, &attr) < 0);
FAIL_IF(sigaction(SIGBUS, &act, NULL) != 0);
paste_addr = mmap(NULL, 4096, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0ULL);
/* The following assignment triggers exception */
mb();
*paste_addr = 1;
mb();
FAIL_IF(!faulted);
return 0;
}
int main(void)
{
return test_harness(test_ra_error, "inject-ra-err");
}
| linux-master | tools/testing/selftests/powerpc/mce/inject-ra-err.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Userspace test harness for load_unaligned_zeropad. Creates two
* pages and uses mprotect to prevent access to the second page and
* a SEGV handler that walks the exception tables and runs the fixup
* routine.
*
* The results are compared against a normal load that is that is
* performed while access to the second page is enabled via mprotect.
*
* Copyright (C) 2014 Anton Blanchard <[email protected]>, IBM
*/
#include <stdlib.h>
#include <string.h>
#include <stdio.h>
#include <stdbool.h>
#include <signal.h>
#include <unistd.h>
#include <sys/mman.h>
#define FIXUP_SECTION ".ex_fixup"
static inline unsigned long __fls(unsigned long x);
#include "word-at-a-time.h"
#include "utils.h"
static inline unsigned long __fls(unsigned long x)
{
int lz;
asm (PPC_CNTLZL "%0,%1" : "=r" (lz) : "r" (x));
return sizeof(unsigned long) - 1 - lz;
}
static int page_size;
static char *mem_region;
static int protect_region(void)
{
if (mprotect(mem_region + page_size, page_size, PROT_NONE)) {
perror("mprotect");
return 1;
}
return 0;
}
static int unprotect_region(void)
{
if (mprotect(mem_region + page_size, page_size, PROT_READ|PROT_WRITE)) {
perror("mprotect");
return 1;
}
return 0;
}
extern char __start___ex_table[];
extern char __stop___ex_table[];
struct extbl_entry {
int insn;
int fixup;
};
static void segv_handler(int signr, siginfo_t *info, void *ptr)
{
ucontext_t *uc = (ucontext_t *)ptr;
unsigned long addr = (unsigned long)info->si_addr;
unsigned long *ip = &UCONTEXT_NIA(uc);
struct extbl_entry *entry = (struct extbl_entry *)__start___ex_table;
while (entry < (struct extbl_entry *)__stop___ex_table) {
unsigned long insn, fixup;
insn = (unsigned long)&entry->insn + entry->insn;
fixup = (unsigned long)&entry->fixup + entry->fixup;
if (insn == *ip) {
*ip = fixup;
return;
}
}
printf("No exception table match for NIA %lx ADDR %lx\n", *ip, addr);
abort();
}
static void setup_segv_handler(void)
{
struct sigaction action;
memset(&action, 0, sizeof(action));
action.sa_sigaction = segv_handler;
action.sa_flags = SA_SIGINFO;
sigaction(SIGSEGV, &action, NULL);
}
static int do_one_test(char *p, int page_offset)
{
unsigned long should;
unsigned long got;
FAIL_IF(unprotect_region());
should = *(unsigned long *)p;
FAIL_IF(protect_region());
got = load_unaligned_zeropad(p);
if (should != got) {
printf("offset %u load_unaligned_zeropad returned 0x%lx, should be 0x%lx\n", page_offset, got, should);
return 1;
}
return 0;
}
static int test_body(void)
{
unsigned long i;
page_size = getpagesize();
mem_region = mmap(NULL, page_size * 2, PROT_READ|PROT_WRITE,
MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
FAIL_IF(mem_region == MAP_FAILED);
for (i = 0; i < page_size; i++)
mem_region[i] = i;
memset(mem_region+page_size, 0, page_size);
setup_segv_handler();
for (i = 0; i < page_size; i++)
FAIL_IF(do_one_test(mem_region+i, i));
return 0;
}
int main(void)
{
return test_harness(test_body, "load_unaligned_zeropad");
}
| linux-master | tools/testing/selftests/powerpc/primitives/load_unaligned_zeropad.c |
../../../../../arch/powerpc/platforms/pseries/vphn.c | linux-master | tools/testing/selftests/powerpc/vphn/vphn.c |
// SPDX-License-Identifier: GPL-2.0
#include <stdio.h>
#include <byteswap.h>
#include "utils.h"
#include "subunit.h"
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
#define cpu_to_be32(x) bswap_32(x)
#define be32_to_cpu(x) bswap_32(x)
#define be16_to_cpup(x) bswap_16(*x)
#define cpu_to_be64(x) bswap_64(x)
#else
#define cpu_to_be32(x) (x)
#define be32_to_cpu(x) (x)
#define be16_to_cpup(x) (*x)
#define cpu_to_be64(x) (x)
#endif
#include "vphn.c"
static struct test {
char *descr;
long input[VPHN_REGISTER_COUNT];
u32 expected[VPHN_ASSOC_BUFSIZE];
} all_tests[] = {
{
"vphn: no data",
{
0xffffffffffffffff,
0xffffffffffffffff,
0xffffffffffffffff,
0xffffffffffffffff,
0xffffffffffffffff,
0xffffffffffffffff,
},
{
0x00000000
}
},
{
"vphn: 1 x 16-bit value",
{
0x8001ffffffffffff,
0xffffffffffffffff,
0xffffffffffffffff,
0xffffffffffffffff,
0xffffffffffffffff,
0xffffffffffffffff,
},
{
0x00000001,
0x00000001
}
},
{
"vphn: 2 x 16-bit values",
{
0x80018002ffffffff,
0xffffffffffffffff,
0xffffffffffffffff,
0xffffffffffffffff,
0xffffffffffffffff,
0xffffffffffffffff,
},
{
0x00000002,
0x00000001,
0x00000002
}
},
{
"vphn: 3 x 16-bit values",
{
0x800180028003ffff,
0xffffffffffffffff,
0xffffffffffffffff,
0xffffffffffffffff,
0xffffffffffffffff,
0xffffffffffffffff,
},
{
0x00000003,
0x00000001,
0x00000002,
0x00000003
}
},
{
"vphn: 4 x 16-bit values",
{
0x8001800280038004,
0xffffffffffffffff,
0xffffffffffffffff,
0xffffffffffffffff,
0xffffffffffffffff,
0xffffffffffffffff,
},
{
0x00000004,
0x00000001,
0x00000002,
0x00000003,
0x00000004
}
},
{
/* Parsing the next 16-bit value out of the next 64-bit input
* value.
*/
"vphn: 5 x 16-bit values",
{
0x8001800280038004,
0x8005ffffffffffff,
0xffffffffffffffff,
0xffffffffffffffff,
0xffffffffffffffff,
0xffffffffffffffff,
},
{
0x00000005,
0x00000001,
0x00000002,
0x00000003,
0x00000004,
0x00000005
}
},
{
/* Parse at most 6 x 64-bit input values */
"vphn: 24 x 16-bit values",
{
0x8001800280038004,
0x8005800680078008,
0x8009800a800b800c,
0x800d800e800f8010,
0x8011801280138014,
0x8015801680178018
},
{
0x00000018,
0x00000001,
0x00000002,
0x00000003,
0x00000004,
0x00000005,
0x00000006,
0x00000007,
0x00000008,
0x00000009,
0x0000000a,
0x0000000b,
0x0000000c,
0x0000000d,
0x0000000e,
0x0000000f,
0x00000010,
0x00000011,
0x00000012,
0x00000013,
0x00000014,
0x00000015,
0x00000016,
0x00000017,
0x00000018
}
},
{
"vphn: 1 x 32-bit value",
{
0x00000001ffffffff,
0xffffffffffffffff,
0xffffffffffffffff,
0xffffffffffffffff,
0xffffffffffffffff,
0xffffffffffffffff
},
{
0x00000001,
0x00000001
}
},
{
"vphn: 2 x 32-bit values",
{
0x0000000100000002,
0xffffffffffffffff,
0xffffffffffffffff,
0xffffffffffffffff,
0xffffffffffffffff,
0xffffffffffffffff
},
{
0x00000002,
0x00000001,
0x00000002
}
},
{
/* Parsing the next 32-bit value out of the next 64-bit input
* value.
*/
"vphn: 3 x 32-bit values",
{
0x0000000100000002,
0x00000003ffffffff,
0xffffffffffffffff,
0xffffffffffffffff,
0xffffffffffffffff,
0xffffffffffffffff
},
{
0x00000003,
0x00000001,
0x00000002,
0x00000003
}
},
{
/* Parse at most 6 x 64-bit input values */
"vphn: 12 x 32-bit values",
{
0x0000000100000002,
0x0000000300000004,
0x0000000500000006,
0x0000000700000008,
0x000000090000000a,
0x0000000b0000000c
},
{
0x0000000c,
0x00000001,
0x00000002,
0x00000003,
0x00000004,
0x00000005,
0x00000006,
0x00000007,
0x00000008,
0x00000009,
0x0000000a,
0x0000000b,
0x0000000c
}
},
{
"vphn: 16-bit value followed by 32-bit value",
{
0x800100000002ffff,
0xffffffffffffffff,
0xffffffffffffffff,
0xffffffffffffffff,
0xffffffffffffffff,
0xffffffffffffffff
},
{
0x00000002,
0x00000001,
0x00000002
}
},
{
"vphn: 32-bit value followed by 16-bit value",
{
0x000000018002ffff,
0xffffffffffffffff,
0xffffffffffffffff,
0xffffffffffffffff,
0xffffffffffffffff,
0xffffffffffffffff
},
{
0x00000002,
0x00000001,
0x00000002
}
},
{
/* Parse a 32-bit value split accross two consecutives 64-bit
* input values.
*/
"vphn: 16-bit value followed by 2 x 32-bit values",
{
0x8001000000020000,
0x0003ffffffffffff,
0xffffffffffffffff,
0xffffffffffffffff,
0xffffffffffffffff,
0xffffffffffffffff
},
{
0x00000003,
0x00000001,
0x00000002,
0x00000003,
0x00000004,
0x00000005
}
},
{
/* The lower bits in 0x0001ffff don't get mixed up with the
* 0xffff terminator.
*/
"vphn: 32-bit value has all ones in 16 lower bits",
{
0x0001ffff80028003,
0xffffffffffffffff,
0xffffffffffffffff,
0xffffffffffffffff,
0xffffffffffffffff,
0xffffffffffffffff
},
{
0x00000003,
0x0001ffff,
0x00000002,
0x00000003
}
},
{
/* The following input doesn't follow the specification.
*/
"vphn: last 32-bit value is truncated",
{
0x0000000100000002,
0x0000000300000004,
0x0000000500000006,
0x0000000700000008,
0x000000090000000a,
0x0000000b800c2bad
},
{
0x0000000c,
0x00000001,
0x00000002,
0x00000003,
0x00000004,
0x00000005,
0x00000006,
0x00000007,
0x00000008,
0x00000009,
0x0000000a,
0x0000000b,
0x0000000c
}
},
{
"vphn: garbage after terminator",
{
0xffff2bad2bad2bad,
0x2bad2bad2bad2bad,
0x2bad2bad2bad2bad,
0x2bad2bad2bad2bad,
0x2bad2bad2bad2bad,
0x2bad2bad2bad2bad
},
{
0x00000000
}
},
{
NULL
}
};
static int test_one(struct test *test)
{
__be32 output[VPHN_ASSOC_BUFSIZE] = { 0 };
int i, len;
vphn_unpack_associativity(test->input, output);
len = be32_to_cpu(output[0]);
if (len != test->expected[0]) {
printf("expected %d elements, got %d\n", test->expected[0],
len);
return 1;
}
for (i = 1; i < len; i++) {
u32 val = be32_to_cpu(output[i]);
if (val != test->expected[i]) {
printf("element #%d is 0x%x, should be 0x%x\n", i, val,
test->expected[i]);
return 1;
}
}
return 0;
}
static int test_vphn(void)
{
static struct test *test;
for (test = all_tests; test->descr; test++) {
int ret;
ret = test_one(test);
test_finish(test->descr, ret);
if (ret)
return ret;
}
return 0;
}
int main(int argc, char **argv)
{
return test_harness(test_vphn, "test-vphn");
}
| linux-master | tools/testing/selftests/powerpc/vphn/test-vphn.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2015, Anton Blanchard, IBM Corp.
*/
#include <sys/time.h>
#include <stdio.h>
#include "utils.h"
static int test_gettimeofday(void)
{
int i;
struct timeval tv_start, tv_end, tv_diff;
gettimeofday(&tv_start, NULL);
for(i = 0; i < 100000000; i++) {
gettimeofday(&tv_end, NULL);
}
timersub(&tv_start, &tv_end, &tv_diff);
printf("time = %.6f\n", tv_diff.tv_sec + (tv_diff.tv_usec) * 1e-6);
return 0;
}
int main(void)
{
return test_harness(test_gettimeofday, "gettimeofday");
}
| linux-master | tools/testing/selftests/powerpc/benchmarks/gettimeofday.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2016, Anton Blanchard, Michael Ellerman, IBM Corp.
*/
#define _GNU_SOURCE
#include <stdio.h>
#include <sys/syscall.h>
#include <time.h>
#include <unistd.h>
#include <linux/futex.h>
#include "utils.h"
#define ITERATIONS 100000000
#define futex(A, B, C, D, E, F) syscall(__NR_futex, A, B, C, D, E, F)
int test_futex(void)
{
struct timespec ts_start, ts_end;
unsigned long i = ITERATIONS;
clock_gettime(CLOCK_MONOTONIC, &ts_start);
while (i--) {
unsigned int addr = 0;
futex(&addr, FUTEX_WAKE, 1, NULL, NULL, 0);
}
clock_gettime(CLOCK_MONOTONIC, &ts_end);
printf("time = %.6f\n", ts_end.tv_sec - ts_start.tv_sec + (ts_end.tv_nsec - ts_start.tv_nsec) / 1e9);
return 0;
}
int main(void)
{
test_harness_set_timeout(300);
return test_harness(test_futex, "futex_bench");
}
| linux-master | tools/testing/selftests/powerpc/benchmarks/futex_bench.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Context switch microbenchmark.
*
* Copyright 2018, Anton Blanchard, IBM Corp.
*/
#define _GNU_SOURCE
#include <assert.h>
#include <errno.h>
#include <getopt.h>
#include <limits.h>
#include <linux/futex.h>
#include <pthread.h>
#include <sched.h>
#include <signal.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/shm.h>
#include <sys/syscall.h>
#include <sys/time.h>
#include <sys/types.h>
#include <sys/wait.h>
#include <unistd.h>
static unsigned int timeout = 30;
static void set_cpu(int cpu)
{
cpu_set_t cpuset;
if (cpu == -1)
return;
CPU_ZERO(&cpuset);
CPU_SET(cpu, &cpuset);
if (sched_setaffinity(0, sizeof(cpuset), &cpuset)) {
perror("sched_setaffinity");
exit(1);
}
}
static void start_process_on(void *(*fn)(void *), void *arg, int cpu)
{
int pid;
pid = fork();
if (pid == -1) {
perror("fork");
exit(1);
}
if (pid)
return;
set_cpu(cpu);
fn(arg);
exit(0);
}
static int cpu;
static int do_fork = 0;
static int do_vfork = 0;
static int do_exec = 0;
static char *exec_file;
static int exec_target = 0;
static unsigned long iterations;
static unsigned long iterations_prev;
static void run_exec(void)
{
char *const argv[] = { "./exec_target", NULL };
if (execve("./exec_target", argv, NULL) == -1) {
perror("execve");
exit(1);
}
}
static void bench_fork(void)
{
while (1) {
pid_t pid = fork();
if (pid == -1) {
perror("fork");
exit(1);
}
if (pid == 0) {
if (do_exec)
run_exec();
_exit(0);
}
pid = waitpid(pid, NULL, 0);
if (pid == -1) {
perror("waitpid");
exit(1);
}
iterations++;
}
}
static void bench_vfork(void)
{
while (1) {
pid_t pid = vfork();
if (pid == -1) {
perror("fork");
exit(1);
}
if (pid == 0) {
if (do_exec)
run_exec();
_exit(0);
}
pid = waitpid(pid, NULL, 0);
if (pid == -1) {
perror("waitpid");
exit(1);
}
iterations++;
}
}
static void *null_fn(void *arg)
{
pthread_exit(NULL);
}
static void bench_thread(void)
{
pthread_t tid;
cpu_set_t cpuset;
pthread_attr_t attr;
int rc;
rc = pthread_attr_init(&attr);
if (rc) {
errno = rc;
perror("pthread_attr_init");
exit(1);
}
if (cpu != -1) {
CPU_ZERO(&cpuset);
CPU_SET(cpu, &cpuset);
rc = pthread_attr_setaffinity_np(&attr, sizeof(cpu_set_t), &cpuset);
if (rc) {
errno = rc;
perror("pthread_attr_setaffinity_np");
exit(1);
}
}
while (1) {
rc = pthread_create(&tid, &attr, null_fn, NULL);
if (rc) {
errno = rc;
perror("pthread_create");
exit(1);
}
rc = pthread_join(tid, NULL);
if (rc) {
errno = rc;
perror("pthread_join");
exit(1);
}
iterations++;
}
}
static void sigalrm_handler(int junk)
{
unsigned long i = iterations;
printf("%ld\n", i - iterations_prev);
iterations_prev = i;
if (--timeout == 0)
kill(0, SIGUSR1);
alarm(1);
}
static void sigusr1_handler(int junk)
{
exit(0);
}
static void *bench_proc(void *arg)
{
signal(SIGALRM, sigalrm_handler);
alarm(1);
if (do_fork)
bench_fork();
else if (do_vfork)
bench_vfork();
else
bench_thread();
return NULL;
}
static struct option options[] = {
{ "fork", no_argument, &do_fork, 1 },
{ "vfork", no_argument, &do_vfork, 1 },
{ "exec", no_argument, &do_exec, 1 },
{ "timeout", required_argument, 0, 's' },
{ "exec-target", no_argument, &exec_target, 1 },
{ NULL },
};
static void usage(void)
{
fprintf(stderr, "Usage: fork <options> CPU\n\n");
fprintf(stderr, "\t\t--fork\tUse fork() (default threads)\n");
fprintf(stderr, "\t\t--vfork\tUse vfork() (default threads)\n");
fprintf(stderr, "\t\t--exec\tAlso exec() (default no exec)\n");
fprintf(stderr, "\t\t--timeout=X\tDuration in seconds to run (default 30)\n");
fprintf(stderr, "\t\t--exec-target\tInternal option for exec workload\n");
}
int main(int argc, char *argv[])
{
signed char c;
while (1) {
int option_index = 0;
c = getopt_long(argc, argv, "", options, &option_index);
if (c == -1)
break;
switch (c) {
case 0:
if (options[option_index].flag != 0)
break;
usage();
exit(1);
break;
case 's':
timeout = atoi(optarg);
break;
default:
usage();
exit(1);
}
}
if (do_fork && do_vfork) {
usage();
exit(1);
}
if (do_exec && !do_fork && !do_vfork) {
usage();
exit(1);
}
if (do_exec) {
char *dirname = strdup(argv[0]);
int i;
i = strlen(dirname) - 1;
while (i) {
if (dirname[i] == '/') {
dirname[i] = '\0';
if (chdir(dirname) == -1) {
perror("chdir");
exit(1);
}
break;
}
i--;
}
}
if (exec_target) {
exit(0);
}
if (((argc - optind) != 1)) {
cpu = -1;
} else {
cpu = atoi(argv[optind++]);
}
if (do_exec)
exec_file = argv[0];
set_cpu(cpu);
printf("Using ");
if (do_fork)
printf("fork");
else if (do_vfork)
printf("vfork");
else
printf("clone");
if (do_exec)
printf(" + exec");
printf(" on cpu %d\n", cpu);
/* Create a new process group so we can signal everyone for exit */
setpgid(getpid(), getpid());
signal(SIGUSR1, sigusr1_handler);
start_process_on(bench_proc, NULL, cpu);
while (1)
sleep(3600);
return 0;
}
| linux-master | tools/testing/selftests/powerpc/benchmarks/fork.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Context switch microbenchmark.
*
* Copyright (C) 2015 Anton Blanchard <[email protected]>, IBM
*/
#define _GNU_SOURCE
#include <errno.h>
#include <sched.h>
#include <string.h>
#include <stdio.h>
#include <unistd.h>
#include <stdlib.h>
#include <getopt.h>
#include <signal.h>
#include <assert.h>
#include <pthread.h>
#include <limits.h>
#include <sys/time.h>
#include <sys/syscall.h>
#include <sys/sysinfo.h>
#include <sys/types.h>
#include <sys/shm.h>
#include <linux/futex.h>
#ifdef __powerpc__
#include <altivec.h>
#endif
#include "utils.h"
static unsigned int timeout = 30;
static int touch_vdso;
struct timeval tv;
static int touch_fp = 1;
double fp;
static int touch_vector = 1;
vector int a, b, c;
#ifdef __powerpc__
static int touch_altivec = 1;
/*
* Note: LTO (Link Time Optimisation) doesn't play well with this function
* attribute. Be very careful enabling LTO for this test.
*/
static void __attribute__((__target__("no-vsx"))) altivec_touch_fn(void)
{
c = a + b;
}
#endif
static void touch(void)
{
if (touch_vdso)
gettimeofday(&tv, NULL);
if (touch_fp)
fp += 0.1;
#ifdef __powerpc__
if (touch_altivec)
altivec_touch_fn();
#endif
if (touch_vector)
c = a + b;
asm volatile("# %0 %1 %2": : "r"(&tv), "r"(&fp), "r"(&c));
}
static void start_thread_on(void *(*fn)(void *), void *arg, unsigned long cpu)
{
int rc;
pthread_t tid;
cpu_set_t cpuset;
pthread_attr_t attr;
CPU_ZERO(&cpuset);
CPU_SET(cpu, &cpuset);
rc = pthread_attr_init(&attr);
if (rc) {
errno = rc;
perror("pthread_attr_init");
exit(1);
}
rc = pthread_attr_setaffinity_np(&attr, sizeof(cpu_set_t), &cpuset);
if (rc) {
errno = rc;
perror("pthread_attr_setaffinity_np");
exit(1);
}
rc = pthread_create(&tid, &attr, fn, arg);
if (rc) {
errno = rc;
perror("pthread_create");
exit(1);
}
}
static void start_process_on(void *(*fn)(void *), void *arg, unsigned long cpu)
{
int pid, ncpus;
cpu_set_t *cpuset;
size_t size;
pid = fork();
if (pid == -1) {
perror("fork");
exit(1);
}
if (pid)
return;
ncpus = get_nprocs();
size = CPU_ALLOC_SIZE(ncpus);
cpuset = CPU_ALLOC(ncpus);
if (!cpuset) {
perror("malloc");
exit(1);
}
CPU_ZERO_S(size, cpuset);
CPU_SET_S(cpu, size, cpuset);
if (sched_setaffinity(0, size, cpuset)) {
perror("sched_setaffinity");
CPU_FREE(cpuset);
exit(1);
}
CPU_FREE(cpuset);
fn(arg);
exit(0);
}
static unsigned long iterations;
static unsigned long iterations_prev;
static void sigalrm_handler(int junk)
{
unsigned long i = iterations;
printf("%ld\n", i - iterations_prev);
iterations_prev = i;
if (--timeout == 0)
kill(0, SIGUSR1);
alarm(1);
}
static void sigusr1_handler(int junk)
{
exit(0);
}
struct actions {
void (*setup)(int, int);
void *(*thread1)(void *);
void *(*thread2)(void *);
};
#define READ 0
#define WRITE 1
static int pipe_fd1[2];
static int pipe_fd2[2];
static void pipe_setup(int cpu1, int cpu2)
{
if (pipe(pipe_fd1) || pipe(pipe_fd2))
exit(1);
}
static void *pipe_thread1(void *arg)
{
signal(SIGALRM, sigalrm_handler);
alarm(1);
while (1) {
assert(read(pipe_fd1[READ], &c, 1) == 1);
touch();
assert(write(pipe_fd2[WRITE], &c, 1) == 1);
touch();
iterations += 2;
}
return NULL;
}
static void *pipe_thread2(void *arg)
{
while (1) {
assert(write(pipe_fd1[WRITE], &c, 1) == 1);
touch();
assert(read(pipe_fd2[READ], &c, 1) == 1);
touch();
}
return NULL;
}
static struct actions pipe_actions = {
.setup = pipe_setup,
.thread1 = pipe_thread1,
.thread2 = pipe_thread2,
};
static void yield_setup(int cpu1, int cpu2)
{
if (cpu1 != cpu2) {
fprintf(stderr, "Both threads must be on the same CPU for yield test\n");
exit(1);
}
}
static void *yield_thread1(void *arg)
{
signal(SIGALRM, sigalrm_handler);
alarm(1);
while (1) {
sched_yield();
touch();
iterations += 2;
}
return NULL;
}
static void *yield_thread2(void *arg)
{
while (1) {
sched_yield();
touch();
}
return NULL;
}
static struct actions yield_actions = {
.setup = yield_setup,
.thread1 = yield_thread1,
.thread2 = yield_thread2,
};
static long sys_futex(void *addr1, int op, int val1, struct timespec *timeout,
void *addr2, int val3)
{
return syscall(SYS_futex, addr1, op, val1, timeout, addr2, val3);
}
static unsigned long cmpxchg(unsigned long *p, unsigned long expected,
unsigned long desired)
{
unsigned long exp = expected;
__atomic_compare_exchange_n(p, &exp, desired, 0,
__ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST);
return exp;
}
static unsigned long xchg(unsigned long *p, unsigned long val)
{
return __atomic_exchange_n(p, val, __ATOMIC_SEQ_CST);
}
static int processes;
static int mutex_lock(unsigned long *m)
{
int c;
int flags = FUTEX_WAIT;
if (!processes)
flags |= FUTEX_PRIVATE_FLAG;
c = cmpxchg(m, 0, 1);
if (!c)
return 0;
if (c == 1)
c = xchg(m, 2);
while (c) {
sys_futex(m, flags, 2, NULL, NULL, 0);
c = xchg(m, 2);
}
return 0;
}
static int mutex_unlock(unsigned long *m)
{
int flags = FUTEX_WAKE;
if (!processes)
flags |= FUTEX_PRIVATE_FLAG;
if (*m == 2)
*m = 0;
else if (xchg(m, 0) == 1)
return 0;
sys_futex(m, flags, 1, NULL, NULL, 0);
return 0;
}
static unsigned long *m1, *m2;
static void futex_setup(int cpu1, int cpu2)
{
if (!processes) {
static unsigned long _m1, _m2;
m1 = &_m1;
m2 = &_m2;
} else {
int shmid;
void *shmaddr;
shmid = shmget(IPC_PRIVATE, getpagesize(), SHM_R | SHM_W);
if (shmid < 0) {
perror("shmget");
exit(1);
}
shmaddr = shmat(shmid, NULL, 0);
if (shmaddr == (char *)-1) {
perror("shmat");
shmctl(shmid, IPC_RMID, NULL);
exit(1);
}
shmctl(shmid, IPC_RMID, NULL);
m1 = shmaddr;
m2 = shmaddr + sizeof(*m1);
}
*m1 = 0;
*m2 = 0;
mutex_lock(m1);
mutex_lock(m2);
}
static void *futex_thread1(void *arg)
{
signal(SIGALRM, sigalrm_handler);
alarm(1);
while (1) {
mutex_lock(m2);
mutex_unlock(m1);
iterations += 2;
}
return NULL;
}
static void *futex_thread2(void *arg)
{
while (1) {
mutex_unlock(m2);
mutex_lock(m1);
}
return NULL;
}
static struct actions futex_actions = {
.setup = futex_setup,
.thread1 = futex_thread1,
.thread2 = futex_thread2,
};
static struct option options[] = {
{ "test", required_argument, 0, 't' },
{ "process", no_argument, &processes, 1 },
{ "timeout", required_argument, 0, 's' },
{ "vdso", no_argument, &touch_vdso, 1 },
{ "no-fp", no_argument, &touch_fp, 0 },
#ifdef __powerpc__
{ "no-altivec", no_argument, &touch_altivec, 0 },
#endif
{ "no-vector", no_argument, &touch_vector, 0 },
{ 0, },
};
static void usage(void)
{
fprintf(stderr, "Usage: context_switch2 <options> CPU1 CPU2\n\n");
fprintf(stderr, "\t\t--test=X\tpipe, futex or yield (default)\n");
fprintf(stderr, "\t\t--process\tUse processes (default threads)\n");
fprintf(stderr, "\t\t--timeout=X\tDuration in seconds to run (default 30)\n");
fprintf(stderr, "\t\t--vdso\t\ttouch VDSO\n");
fprintf(stderr, "\t\t--no-fp\t\tDon't touch FP\n");
#ifdef __powerpc__
fprintf(stderr, "\t\t--no-altivec\tDon't touch altivec\n");
#endif
fprintf(stderr, "\t\t--no-vector\tDon't touch vector\n");
}
int main(int argc, char *argv[])
{
signed char c;
struct actions *actions = &yield_actions;
int cpu1;
int cpu2;
static void (*start_fn)(void *(*fn)(void *), void *arg, unsigned long cpu);
while (1) {
int option_index = 0;
c = getopt_long(argc, argv, "", options, &option_index);
if (c == -1)
break;
switch (c) {
case 0:
if (options[option_index].flag != 0)
break;
usage();
exit(1);
break;
case 't':
if (!strcmp(optarg, "pipe")) {
actions = &pipe_actions;
} else if (!strcmp(optarg, "yield")) {
actions = &yield_actions;
} else if (!strcmp(optarg, "futex")) {
actions = &futex_actions;
} else {
usage();
exit(1);
}
break;
case 's':
timeout = atoi(optarg);
break;
default:
usage();
exit(1);
}
}
if (processes)
start_fn = start_process_on;
else
start_fn = start_thread_on;
if (((argc - optind) != 2)) {
cpu1 = cpu2 = pick_online_cpu();
} else {
cpu1 = atoi(argv[optind++]);
cpu2 = atoi(argv[optind++]);
}
printf("Using %s with ", processes ? "processes" : "threads");
if (actions == &pipe_actions)
printf("pipe");
else if (actions == &yield_actions)
printf("yield");
else
printf("futex");
if (!have_hwcap(PPC_FEATURE_HAS_ALTIVEC))
touch_altivec = 0;
if (!have_hwcap(PPC_FEATURE_HAS_VSX))
touch_vector = 0;
printf(" on cpus %d/%d touching FP:%s altivec:%s vector:%s vdso:%s\n",
cpu1, cpu2, touch_fp ? "yes" : "no", touch_altivec ? "yes" : "no",
touch_vector ? "yes" : "no", touch_vdso ? "yes" : "no");
/* Create a new process group so we can signal everyone for exit */
setpgid(getpid(), getpid());
signal(SIGUSR1, sigusr1_handler);
actions->setup(cpu1, cpu2);
start_fn(actions->thread1, NULL, cpu1);
start_fn(actions->thread2, NULL, cpu2);
while (1)
sleep(3600);
return 0;
}
| linux-master | tools/testing/selftests/powerpc/benchmarks/context_switch.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2016, Anton Blanchard, Michael Ellerman, IBM Corp.
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/mman.h>
#include <time.h>
#include <getopt.h>
#include "utils.h"
#define ITERATIONS 5000000
#define MEMSIZE (1UL << 27)
#define PAGE_SIZE (1UL << 16)
#define CHUNK_COUNT (MEMSIZE/PAGE_SIZE)
static int pg_fault;
static int iterations = ITERATIONS;
static struct option options[] = {
{ "pgfault", no_argument, &pg_fault, 1 },
{ "iterations", required_argument, 0, 'i' },
{ 0, },
};
static void usage(void)
{
printf("mmap_bench <--pgfault> <--iterations count>\n");
}
int test_mmap(void)
{
struct timespec ts_start, ts_end;
unsigned long i = iterations;
clock_gettime(CLOCK_MONOTONIC, &ts_start);
while (i--) {
char *c = mmap(NULL, MEMSIZE, PROT_READ|PROT_WRITE,
MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
FAIL_IF(c == MAP_FAILED);
if (pg_fault) {
int count;
for (count = 0; count < CHUNK_COUNT; count++)
c[count << 16] = 'c';
}
munmap(c, MEMSIZE);
}
clock_gettime(CLOCK_MONOTONIC, &ts_end);
printf("time = %.6f\n", ts_end.tv_sec - ts_start.tv_sec + (ts_end.tv_nsec - ts_start.tv_nsec) / 1e9);
return 0;
}
int main(int argc, char *argv[])
{
signed char c;
while (1) {
int option_index = 0;
c = getopt_long(argc, argv, "", options, &option_index);
if (c == -1)
break;
switch (c) {
case 0:
if (options[option_index].flag != 0)
break;
usage();
exit(1);
break;
case 'i':
iterations = atoi(optarg);
break;
default:
usage();
exit(1);
}
}
test_harness_set_timeout(300);
return test_harness(test_mmap, "mmap_bench");
}
| linux-master | tools/testing/selftests/powerpc/benchmarks/mmap_bench.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Part of fork context switch microbenchmark.
*
* Copyright 2018, Anton Blanchard, IBM Corp.
*/
#define _GNU_SOURCE
#include <unistd.h>
#include <sys/syscall.h>
void _start(void)
{
syscall(SYS_exit, 0);
}
| linux-master | tools/testing/selftests/powerpc/benchmarks/exec_target.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Test null syscall performance
*
* Copyright (C) 2009-2015 Anton Blanchard, IBM
*/
#define NR_LOOPS 10000000
#include <string.h>
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <time.h>
#include <sys/types.h>
#include <sys/time.h>
#include <sys/syscall.h>
#include <signal.h>
static volatile int soak_done;
unsigned long long clock_frequency;
unsigned long long timebase_frequency;
double timebase_multiplier;
static inline unsigned long mftb(void)
{
unsigned long low;
asm volatile("mftb %0" : "=r" (low));
return low;
}
static void sigalrm_handler(int unused)
{
soak_done = 1;
}
/*
* Use a timer instead of busy looping on clock_gettime() so we don't
* pollute profiles with glibc and VDSO hits.
*/
static void cpu_soak_usecs(unsigned long usecs)
{
struct itimerval val;
memset(&val, 0, sizeof(val));
val.it_value.tv_usec = usecs;
signal(SIGALRM, sigalrm_handler);
setitimer(ITIMER_REAL, &val, NULL);
while (1) {
if (soak_done)
break;
}
signal(SIGALRM, SIG_DFL);
}
/*
* This only works with recent kernels where cpufreq modifies
* /proc/cpuinfo dynamically.
*/
static void get_proc_frequency(void)
{
FILE *f;
char line[128];
char *p, *end;
unsigned long v;
double d;
char *override;
/* Try to get out of low power/low frequency mode */
cpu_soak_usecs(0.25 * 1000000);
f = fopen("/proc/cpuinfo", "r");
if (f == NULL)
return;
timebase_frequency = 0;
while (fgets(line, sizeof(line), f) != NULL) {
if (strncmp(line, "timebase", 8) == 0) {
p = strchr(line, ':');
if (p != NULL) {
v = strtoull(p + 1, &end, 0);
if (end != p + 1)
timebase_frequency = v;
}
}
if (((strncmp(line, "clock", 5) == 0) ||
(strncmp(line, "cpu MHz", 7) == 0))) {
p = strchr(line, ':');
if (p != NULL) {
d = strtod(p + 1, &end);
if (end != p + 1) {
/* Find fastest clock frequency */
if ((d * 1000000ULL) > clock_frequency)
clock_frequency = d * 1000000ULL;
}
}
}
}
fclose(f);
override = getenv("FREQUENCY");
if (override)
clock_frequency = strtoull(override, NULL, 10);
if (timebase_frequency)
timebase_multiplier = (double)clock_frequency
/ timebase_frequency;
else
timebase_multiplier = 1;
}
static void do_null_syscall(unsigned long nr)
{
unsigned long i;
for (i = 0; i < nr; i++)
syscall(__NR_gettid);
}
#define TIME(A, STR) \
int main(void)
{
unsigned long tb_start, tb_now;
struct timespec tv_start, tv_now;
unsigned long long elapsed_ns, elapsed_tb;
get_proc_frequency();
clock_gettime(CLOCK_MONOTONIC, &tv_start);
tb_start = mftb();
do_null_syscall(NR_LOOPS);
clock_gettime(CLOCK_MONOTONIC, &tv_now);
tb_now = mftb();
elapsed_ns = (tv_now.tv_sec - tv_start.tv_sec) * 1000000000ULL +
(tv_now.tv_nsec - tv_start.tv_nsec);
elapsed_tb = tb_now - tb_start;
printf("%10.2f ns %10.2f cycles\n", (float)elapsed_ns / NR_LOOPS,
(float)elapsed_tb * timebase_multiplier / NR_LOOPS);
return 0;
}
| linux-master | tools/testing/selftests/powerpc/benchmarks/null_syscall.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright 2016, Chris Smart, IBM Corporation.
*
* Calls to copy_first which are not 128-byte aligned should be
* caught and sent a SIGBUS.
*/
#include <signal.h>
#include <string.h>
#include <unistd.h>
#include "utils.h"
#include "instructions.h"
unsigned int expected_instruction = PPC_INST_COPY_FIRST;
unsigned int instruction_mask = 0xfc2007fe;
void signal_action_handler(int signal_num, siginfo_t *info, void *ptr)
{
ucontext_t *ctx = ptr;
#ifdef __powerpc64__
unsigned int *pc = (unsigned int *)ctx->uc_mcontext.gp_regs[PT_NIP];
#else
unsigned int *pc = (unsigned int *)ctx->uc_mcontext.uc_regs->gregs[PT_NIP];
#endif
/*
* Check that the signal was on the correct instruction, using a
* mask because the compiler assigns the register at RB.
*/
if ((*pc & instruction_mask) == expected_instruction)
_exit(0); /* We hit the right instruction */
_exit(1);
}
void setup_signal_handler(void)
{
struct sigaction signal_action;
memset(&signal_action, 0, sizeof(signal_action));
signal_action.sa_sigaction = signal_action_handler;
signal_action.sa_flags = SA_SIGINFO;
sigaction(SIGBUS, &signal_action, NULL);
}
char cacheline_buf[128] __cacheline_aligned;
int test_copy_first_unaligned(void)
{
/* Only run this test on a P9 or later */
SKIP_IF(!have_hwcap2(PPC_FEATURE2_ARCH_3_00));
/* Register our signal handler with SIGBUS */
setup_signal_handler();
/* +1 makes buf unaligned */
copy_first(cacheline_buf+1);
/* We should not get here */
return 1;
}
int main(int argc, char *argv[])
{
return test_harness(test_copy_first_unaligned, "test_copy_first_unaligned");
}
| linux-master | tools/testing/selftests/powerpc/alignment/copy_first_unaligned.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Test the powerpc alignment handler on POWER8/POWER9
*
* Copyright (C) 2017 IBM Corporation (Michael Neuling, Andrew Donnellan)
*/
/*
* This selftest exercises the powerpc alignment fault handler.
*
* We create two sets of source and destination buffers, one in regular memory,
* the other cache-inhibited (by default we use /dev/fb0 for this, but an
* alterative path for cache-inhibited memory may be provided, e.g. memtrace).
*
* We initialise the source buffers, then use whichever set of load/store
* instructions is under test to copy bytes from the source buffers to the
* destination buffers. For the regular buffers, these instructions will
* execute normally. For the cache-inhibited buffers, these instructions
* will trap and cause an alignment fault, and the alignment fault handler
* will emulate the particular instruction under test. We then compare the
* destination buffers to ensure that the native and emulated cases give the
* same result.
*
* TODO:
* - Any FIXMEs below
* - Test VSX regs < 32 and > 32
* - Test all loads and stores
* - Check update forms do update register
* - Test alignment faults over page boundary
*
* Some old binutils may not support all the instructions.
*/
#include <sys/mman.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <unistd.h>
#include <stdbool.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <assert.h>
#include <getopt.h>
#include <setjmp.h>
#include <signal.h>
#include "utils.h"
#include "instructions.h"
int bufsize;
int debug;
int testing;
volatile int gotsig;
bool prefixes_enabled;
char *cipath = "/dev/fb0";
long cioffset;
void sighandler(int sig, siginfo_t *info, void *ctx)
{
ucontext_t *ucp = ctx;
if (!testing) {
signal(sig, SIG_DFL);
kill(0, sig);
}
gotsig = sig;
#ifdef __powerpc64__
if (prefixes_enabled) {
u32 inst = *(u32 *)ucp->uc_mcontext.gp_regs[PT_NIP];
ucp->uc_mcontext.gp_regs[PT_NIP] += ((inst >> 26 == 1) ? 8 : 4);
} else {
ucp->uc_mcontext.gp_regs[PT_NIP] += 4;
}
#else
ucp->uc_mcontext.uc_regs->gregs[PT_NIP] += 4;
#endif
}
#define XFORM(reg, n) " " #reg " ,%"#n",%2 ;"
#define DFORM(reg, n) " " #reg " ,0(%"#n") ;"
#define TEST(name, ld_op, st_op, form, ld_reg, st_reg) \
void test_##name(char *s, char *d) \
{ \
asm volatile( \
#ld_op form(ld_reg, 0) \
#st_op form(st_reg, 1) \
:: "r"(s), "r"(d), "r"(0) \
: "memory", "vs0", "vs32", "r31"); \
} \
rc |= do_test(#name, test_##name)
#define TESTP(name, ld_op, st_op, ld_reg, st_reg) \
void test_##name(char *s, char *d) \
{ \
asm volatile( \
ld_op(ld_reg, %0, 0, 0) \
st_op(st_reg, %1, 0, 0) \
:: "r"(s), "r"(d), "r"(0) \
: "memory", "vs0", "vs32", "r31"); \
} \
rc |= do_test(#name, test_##name)
#define LOAD_VSX_XFORM_TEST(op) TEST(op, op, stxvd2x, XFORM, 32, 32)
#define STORE_VSX_XFORM_TEST(op) TEST(op, lxvd2x, op, XFORM, 32, 32)
#define LOAD_VSX_DFORM_TEST(op) TEST(op, op, stxv, DFORM, 32, 32)
#define STORE_VSX_DFORM_TEST(op) TEST(op, lxv, op, DFORM, 32, 32)
#define LOAD_VMX_XFORM_TEST(op) TEST(op, op, stxvd2x, XFORM, 0, 32)
#define STORE_VMX_XFORM_TEST(op) TEST(op, lxvd2x, op, XFORM, 32, 0)
#define LOAD_VMX_DFORM_TEST(op) TEST(op, op, stxv, DFORM, 0, 32)
#define STORE_VMX_DFORM_TEST(op) TEST(op, lxv, op, DFORM, 32, 0)
#define LOAD_XFORM_TEST(op) TEST(op, op, stdx, XFORM, 31, 31)
#define STORE_XFORM_TEST(op) TEST(op, ldx, op, XFORM, 31, 31)
#define LOAD_DFORM_TEST(op) TEST(op, op, std, DFORM, 31, 31)
#define STORE_DFORM_TEST(op) TEST(op, ld, op, DFORM, 31, 31)
#define LOAD_FLOAT_DFORM_TEST(op) TEST(op, op, stfd, DFORM, 0, 0)
#define STORE_FLOAT_DFORM_TEST(op) TEST(op, lfd, op, DFORM, 0, 0)
#define LOAD_FLOAT_XFORM_TEST(op) TEST(op, op, stfdx, XFORM, 0, 0)
#define STORE_FLOAT_XFORM_TEST(op) TEST(op, lfdx, op, XFORM, 0, 0)
#define LOAD_MLS_PREFIX_TEST(op) TESTP(op, op, PSTD, 31, 31)
#define STORE_MLS_PREFIX_TEST(op) TESTP(op, PLD, op, 31, 31)
#define LOAD_8LS_PREFIX_TEST(op) TESTP(op, op, PSTD, 31, 31)
#define STORE_8LS_PREFIX_TEST(op) TESTP(op, PLD, op, 31, 31)
#define LOAD_FLOAT_MLS_PREFIX_TEST(op) TESTP(op, op, PSTFD, 0, 0)
#define STORE_FLOAT_MLS_PREFIX_TEST(op) TESTP(op, PLFD, op, 0, 0)
#define LOAD_VSX_8LS_PREFIX_TEST(op, tail) TESTP(op, op, PSTXV ## tail, 0, 32)
#define STORE_VSX_8LS_PREFIX_TEST(op, tail) TESTP(op, PLXV ## tail, op, 32, 0)
/* FIXME: Unimplemented tests: */
// STORE_DFORM_TEST(stq) /* FIXME: need two registers for quad */
// STORE_DFORM_TEST(stswi) /* FIXME: string instruction */
// STORE_XFORM_TEST(stwat) /* AMO can't emulate or run on CI */
// STORE_XFORM_TEST(stdat) /* ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ */
/* preload byte by byte */
void preload_data(void *dst, int offset, int width)
{
char *c = dst;
int i;
c += offset;
for (i = 0 ; i < width ; i++)
c[i] = i;
}
int test_memcpy(void *dst, void *src, int size, int offset,
void (*test_func)(char *, char *))
{
char *s, *d;
s = src;
s += offset;
d = dst;
d += offset;
assert(size == 16);
gotsig = 0;
testing = 1;
test_func(s, d); /* run the actual test */
testing = 0;
if (gotsig) {
if (debug)
printf(" Got signal %i\n", gotsig);
return 1;
}
return 0;
}
void dumpdata(char *s1, char *s2, int n, char *test_name)
{
int i;
printf(" %s: unexpected result:\n", test_name);
printf(" mem:");
for (i = 0; i < n; i++)
printf(" %02x", s1[i]);
printf("\n");
printf(" ci: ");
for (i = 0; i < n; i++)
printf(" %02x", s2[i]);
printf("\n");
}
int test_memcmp(void *s1, void *s2, int n, int offset, char *test_name)
{
char *s1c, *s2c;
s1c = s1;
s1c += offset;
s2c = s2;
s2c += offset;
if (memcmp(s1c, s2c, n)) {
if (debug) {
printf("\n Compare failed. Offset:%i length:%i\n",
offset, n);
dumpdata(s1c, s2c, n, test_name);
}
return 1;
}
return 0;
}
/*
* Do two memcpy tests using the same instructions. One cachable
* memory and the other doesn't.
*/
int do_test(char *test_name, void (*test_func)(char *, char *))
{
int offset, width, fd, rc, r;
void *mem0, *mem1, *ci0, *ci1;
printf("\tDoing %s:\t", test_name);
fd = open(cipath, O_RDWR);
if (fd < 0) {
printf("\n");
perror("Can't open ci file now?");
return 1;
}
ci0 = mmap(NULL, bufsize, PROT_WRITE | PROT_READ, MAP_SHARED,
fd, cioffset);
ci1 = mmap(NULL, bufsize, PROT_WRITE | PROT_READ, MAP_SHARED,
fd, cioffset + bufsize);
if ((ci0 == MAP_FAILED) || (ci1 == MAP_FAILED)) {
printf("\n");
perror("mmap failed");
SKIP_IF(1);
}
rc = posix_memalign(&mem0, bufsize, bufsize);
if (rc) {
printf("\n");
return rc;
}
rc = posix_memalign(&mem1, bufsize, bufsize);
if (rc) {
printf("\n");
free(mem0);
return rc;
}
rc = 0;
/*
* offset = 0 is aligned but tests the workaround for the P9N
* DD2.1 vector CI load issue (see 5080332c2c89 "powerpc/64s:
* Add workaround for P9 vector CI load issue")
*/
for (offset = 0; offset < 16; offset++) {
width = 16; /* vsx == 16 bytes */
r = 0;
/* load pattern into memory byte by byte */
preload_data(ci0, offset, width);
preload_data(mem0, offset, width); // FIXME: remove??
memcpy(ci0, mem0, bufsize);
memcpy(ci1, mem1, bufsize); /* initialise output to the same */
/* sanity check */
test_memcmp(mem0, ci0, width, offset, test_name);
r |= test_memcpy(ci1, ci0, width, offset, test_func);
r |= test_memcpy(mem1, mem0, width, offset, test_func);
if (r && !debug) {
printf("FAILED: Got signal");
rc = 1;
break;
}
r |= test_memcmp(mem1, ci1, width, offset, test_name);
if (r && !debug) {
printf("FAILED: Wrong Data");
rc = 1;
break;
}
}
if (rc == 0)
printf("PASSED");
printf("\n");
munmap(ci0, bufsize);
munmap(ci1, bufsize);
free(mem0);
free(mem1);
close(fd);
return rc;
}
static bool can_open_cifile(void)
{
int fd;
fd = open(cipath, O_RDWR);
if (fd < 0)
return false;
close(fd);
return true;
}
int test_alignment_handler_vsx_206(void)
{
int rc = 0;
SKIP_IF(!can_open_cifile());
SKIP_IF(!have_hwcap(PPC_FEATURE_ARCH_2_06));
printf("VSX: 2.06B\n");
LOAD_VSX_XFORM_TEST(lxvd2x);
LOAD_VSX_XFORM_TEST(lxvw4x);
LOAD_VSX_XFORM_TEST(lxsdx);
LOAD_VSX_XFORM_TEST(lxvdsx);
STORE_VSX_XFORM_TEST(stxvd2x);
STORE_VSX_XFORM_TEST(stxvw4x);
STORE_VSX_XFORM_TEST(stxsdx);
return rc;
}
int test_alignment_handler_vsx_207(void)
{
int rc = 0;
SKIP_IF(!can_open_cifile());
SKIP_IF(!have_hwcap2(PPC_FEATURE2_ARCH_2_07));
printf("VSX: 2.07B\n");
LOAD_VSX_XFORM_TEST(lxsspx);
LOAD_VSX_XFORM_TEST(lxsiwax);
LOAD_VSX_XFORM_TEST(lxsiwzx);
STORE_VSX_XFORM_TEST(stxsspx);
STORE_VSX_XFORM_TEST(stxsiwx);
return rc;
}
int test_alignment_handler_vsx_300(void)
{
int rc = 0;
SKIP_IF(!can_open_cifile());
SKIP_IF(!have_hwcap2(PPC_FEATURE2_ARCH_3_00));
printf("VSX: 3.00B\n");
LOAD_VMX_DFORM_TEST(lxsd);
LOAD_VSX_XFORM_TEST(lxsibzx);
LOAD_VSX_XFORM_TEST(lxsihzx);
LOAD_VMX_DFORM_TEST(lxssp);
LOAD_VSX_DFORM_TEST(lxv);
LOAD_VSX_XFORM_TEST(lxvb16x);
LOAD_VSX_XFORM_TEST(lxvh8x);
LOAD_VSX_XFORM_TEST(lxvx);
LOAD_VSX_XFORM_TEST(lxvwsx);
LOAD_VSX_XFORM_TEST(lxvl);
LOAD_VSX_XFORM_TEST(lxvll);
STORE_VMX_DFORM_TEST(stxsd);
STORE_VSX_XFORM_TEST(stxsibx);
STORE_VSX_XFORM_TEST(stxsihx);
STORE_VMX_DFORM_TEST(stxssp);
STORE_VSX_DFORM_TEST(stxv);
STORE_VSX_XFORM_TEST(stxvb16x);
STORE_VSX_XFORM_TEST(stxvh8x);
STORE_VSX_XFORM_TEST(stxvx);
STORE_VSX_XFORM_TEST(stxvl);
STORE_VSX_XFORM_TEST(stxvll);
return rc;
}
int test_alignment_handler_vsx_prefix(void)
{
int rc = 0;
SKIP_IF(!can_open_cifile());
SKIP_IF(!have_hwcap2(PPC_FEATURE2_ARCH_3_1));
printf("VSX: PREFIX\n");
LOAD_VSX_8LS_PREFIX_TEST(PLXSD, 0);
LOAD_VSX_8LS_PREFIX_TEST(PLXSSP, 0);
LOAD_VSX_8LS_PREFIX_TEST(PLXV0, 0);
LOAD_VSX_8LS_PREFIX_TEST(PLXV1, 1);
STORE_VSX_8LS_PREFIX_TEST(PSTXSD, 0);
STORE_VSX_8LS_PREFIX_TEST(PSTXSSP, 0);
STORE_VSX_8LS_PREFIX_TEST(PSTXV0, 0);
STORE_VSX_8LS_PREFIX_TEST(PSTXV1, 1);
return rc;
}
int test_alignment_handler_integer(void)
{
int rc = 0;
SKIP_IF(!can_open_cifile());
printf("Integer\n");
LOAD_DFORM_TEST(lbz);
LOAD_DFORM_TEST(lbzu);
LOAD_XFORM_TEST(lbzx);
LOAD_XFORM_TEST(lbzux);
LOAD_DFORM_TEST(lhz);
LOAD_DFORM_TEST(lhzu);
LOAD_XFORM_TEST(lhzx);
LOAD_XFORM_TEST(lhzux);
LOAD_DFORM_TEST(lha);
LOAD_DFORM_TEST(lhau);
LOAD_XFORM_TEST(lhax);
LOAD_XFORM_TEST(lhaux);
LOAD_XFORM_TEST(lhbrx);
LOAD_DFORM_TEST(lwz);
LOAD_DFORM_TEST(lwzu);
LOAD_XFORM_TEST(lwzx);
LOAD_XFORM_TEST(lwzux);
LOAD_DFORM_TEST(lwa);
LOAD_XFORM_TEST(lwax);
LOAD_XFORM_TEST(lwaux);
LOAD_XFORM_TEST(lwbrx);
LOAD_DFORM_TEST(ld);
LOAD_DFORM_TEST(ldu);
LOAD_XFORM_TEST(ldx);
LOAD_XFORM_TEST(ldux);
STORE_DFORM_TEST(stb);
STORE_XFORM_TEST(stbx);
STORE_DFORM_TEST(stbu);
STORE_XFORM_TEST(stbux);
STORE_DFORM_TEST(sth);
STORE_XFORM_TEST(sthx);
STORE_DFORM_TEST(sthu);
STORE_XFORM_TEST(sthux);
STORE_XFORM_TEST(sthbrx);
STORE_DFORM_TEST(stw);
STORE_XFORM_TEST(stwx);
STORE_DFORM_TEST(stwu);
STORE_XFORM_TEST(stwux);
STORE_XFORM_TEST(stwbrx);
STORE_DFORM_TEST(std);
STORE_XFORM_TEST(stdx);
STORE_DFORM_TEST(stdu);
STORE_XFORM_TEST(stdux);
#ifdef __BIG_ENDIAN__
LOAD_DFORM_TEST(lmw);
STORE_DFORM_TEST(stmw);
#endif
return rc;
}
int test_alignment_handler_integer_206(void)
{
int rc = 0;
SKIP_IF(!can_open_cifile());
SKIP_IF(!have_hwcap(PPC_FEATURE_ARCH_2_06));
printf("Integer: 2.06\n");
LOAD_XFORM_TEST(ldbrx);
STORE_XFORM_TEST(stdbrx);
return rc;
}
int test_alignment_handler_integer_prefix(void)
{
int rc = 0;
SKIP_IF(!can_open_cifile());
SKIP_IF(!have_hwcap2(PPC_FEATURE2_ARCH_3_1));
printf("Integer: PREFIX\n");
LOAD_MLS_PREFIX_TEST(PLBZ);
LOAD_MLS_PREFIX_TEST(PLHZ);
LOAD_MLS_PREFIX_TEST(PLHA);
LOAD_MLS_PREFIX_TEST(PLWZ);
LOAD_8LS_PREFIX_TEST(PLWA);
LOAD_8LS_PREFIX_TEST(PLD);
STORE_MLS_PREFIX_TEST(PSTB);
STORE_MLS_PREFIX_TEST(PSTH);
STORE_MLS_PREFIX_TEST(PSTW);
STORE_8LS_PREFIX_TEST(PSTD);
return rc;
}
int test_alignment_handler_vmx(void)
{
int rc = 0;
SKIP_IF(!can_open_cifile());
SKIP_IF(!have_hwcap(PPC_FEATURE_HAS_ALTIVEC));
printf("VMX\n");
LOAD_VMX_XFORM_TEST(lvx);
/*
* FIXME: These loads only load part of the register, so our
* testing method doesn't work. Also they don't take alignment
* faults, so it's kinda pointless anyway
*
LOAD_VMX_XFORM_TEST(lvebx)
LOAD_VMX_XFORM_TEST(lvehx)
LOAD_VMX_XFORM_TEST(lvewx)
LOAD_VMX_XFORM_TEST(lvxl)
*/
STORE_VMX_XFORM_TEST(stvx);
STORE_VMX_XFORM_TEST(stvebx);
STORE_VMX_XFORM_TEST(stvehx);
STORE_VMX_XFORM_TEST(stvewx);
STORE_VMX_XFORM_TEST(stvxl);
return rc;
}
int test_alignment_handler_fp(void)
{
int rc = 0;
SKIP_IF(!can_open_cifile());
printf("Floating point\n");
LOAD_FLOAT_DFORM_TEST(lfd);
LOAD_FLOAT_XFORM_TEST(lfdx);
LOAD_FLOAT_DFORM_TEST(lfdu);
LOAD_FLOAT_XFORM_TEST(lfdux);
LOAD_FLOAT_DFORM_TEST(lfs);
LOAD_FLOAT_XFORM_TEST(lfsx);
LOAD_FLOAT_DFORM_TEST(lfsu);
LOAD_FLOAT_XFORM_TEST(lfsux);
STORE_FLOAT_DFORM_TEST(stfd);
STORE_FLOAT_XFORM_TEST(stfdx);
STORE_FLOAT_DFORM_TEST(stfdu);
STORE_FLOAT_XFORM_TEST(stfdux);
STORE_FLOAT_DFORM_TEST(stfs);
STORE_FLOAT_XFORM_TEST(stfsx);
STORE_FLOAT_DFORM_TEST(stfsu);
STORE_FLOAT_XFORM_TEST(stfsux);
STORE_FLOAT_XFORM_TEST(stfiwx);
return rc;
}
int test_alignment_handler_fp_205(void)
{
int rc = 0;
SKIP_IF(!can_open_cifile());
SKIP_IF(!have_hwcap(PPC_FEATURE_ARCH_2_05));
printf("Floating point: 2.05\n");
LOAD_FLOAT_DFORM_TEST(lfdp);
LOAD_FLOAT_XFORM_TEST(lfdpx);
LOAD_FLOAT_XFORM_TEST(lfiwax);
STORE_FLOAT_DFORM_TEST(stfdp);
STORE_FLOAT_XFORM_TEST(stfdpx);
return rc;
}
int test_alignment_handler_fp_206(void)
{
int rc = 0;
SKIP_IF(!can_open_cifile());
SKIP_IF(!have_hwcap(PPC_FEATURE_ARCH_2_06));
printf("Floating point: 2.06\n");
LOAD_FLOAT_XFORM_TEST(lfiwzx);
return rc;
}
int test_alignment_handler_fp_prefix(void)
{
int rc = 0;
SKIP_IF(!can_open_cifile());
SKIP_IF(!have_hwcap2(PPC_FEATURE2_ARCH_3_1));
printf("Floating point: PREFIX\n");
LOAD_FLOAT_DFORM_TEST(lfs);
LOAD_FLOAT_MLS_PREFIX_TEST(PLFS);
LOAD_FLOAT_MLS_PREFIX_TEST(PLFD);
STORE_FLOAT_MLS_PREFIX_TEST(PSTFS);
STORE_FLOAT_MLS_PREFIX_TEST(PSTFD);
return rc;
}
void usage(char *prog)
{
printf("Usage: %s [options] [path [offset]]\n", prog);
printf(" -d Enable debug error output\n");
printf("\n");
printf("This test requires a POWER8, POWER9 or POWER10 CPU ");
printf("and either a usable framebuffer at /dev/fb0 or ");
printf("the path to usable cache inhibited memory and optional ");
printf("offset to be provided\n");
}
int main(int argc, char *argv[])
{
struct sigaction sa;
int rc = 0;
int option = 0;
while ((option = getopt(argc, argv, "d")) != -1) {
switch (option) {
case 'd':
debug++;
break;
default:
usage(argv[0]);
exit(1);
}
}
argc -= optind;
argv += optind;
if (argc > 0)
cipath = argv[0];
if (argc > 1)
cioffset = strtol(argv[1], 0, 0x10);
bufsize = getpagesize();
sa.sa_sigaction = sighandler;
sigemptyset(&sa.sa_mask);
sa.sa_flags = SA_SIGINFO;
if (sigaction(SIGSEGV, &sa, NULL) == -1
|| sigaction(SIGBUS, &sa, NULL) == -1
|| sigaction(SIGILL, &sa, NULL) == -1) {
perror("sigaction");
exit(1);
}
prefixes_enabled = have_hwcap2(PPC_FEATURE2_ARCH_3_1);
rc |= test_harness(test_alignment_handler_vsx_206,
"test_alignment_handler_vsx_206");
rc |= test_harness(test_alignment_handler_vsx_207,
"test_alignment_handler_vsx_207");
rc |= test_harness(test_alignment_handler_vsx_300,
"test_alignment_handler_vsx_300");
rc |= test_harness(test_alignment_handler_vsx_prefix,
"test_alignment_handler_vsx_prefix");
rc |= test_harness(test_alignment_handler_integer,
"test_alignment_handler_integer");
rc |= test_harness(test_alignment_handler_integer_206,
"test_alignment_handler_integer_206");
rc |= test_harness(test_alignment_handler_integer_prefix,
"test_alignment_handler_integer_prefix");
rc |= test_harness(test_alignment_handler_vmx,
"test_alignment_handler_vmx");
rc |= test_harness(test_alignment_handler_fp,
"test_alignment_handler_fp");
rc |= test_harness(test_alignment_handler_fp_205,
"test_alignment_handler_fp_205");
rc |= test_harness(test_alignment_handler_fp_206,
"test_alignment_handler_fp_206");
rc |= test_harness(test_alignment_handler_fp_prefix,
"test_alignment_handler_fp_prefix");
return rc;
}
| linux-master | tools/testing/selftests/powerpc/alignment/alignment_handler.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Test that signal delivery is able to expand the stack segment without
* triggering a SEGV.
*
* Based on test code by Tom Lane.
*/
#include <err.h>
#include <stdio.h>
#include <stdlib.h>
#include <signal.h>
#include <sys/types.h>
#include <unistd.h>
#include "../pmu/lib.h"
#include "utils.h"
#define _KB (1024)
#define _MB (1024 * 1024)
static char *stack_base_ptr;
static char *stack_top_ptr;
static volatile sig_atomic_t sig_occurred = 0;
static void sigusr1_handler(int signal_arg)
{
sig_occurred = 1;
}
static int consume_stack(unsigned int stack_size, union pipe write_pipe)
{
char stack_cur;
if ((stack_base_ptr - &stack_cur) < stack_size)
return consume_stack(stack_size, write_pipe);
else {
stack_top_ptr = &stack_cur;
FAIL_IF(notify_parent(write_pipe));
while (!sig_occurred)
barrier();
}
return 0;
}
static int child(unsigned int stack_size, union pipe write_pipe)
{
struct sigaction act;
char stack_base;
act.sa_handler = sigusr1_handler;
sigemptyset(&act.sa_mask);
act.sa_flags = 0;
if (sigaction(SIGUSR1, &act, NULL) < 0)
err(1, "sigaction");
stack_base_ptr = (char *) (((size_t) &stack_base + 65535) & ~65535UL);
FAIL_IF(consume_stack(stack_size, write_pipe));
printf("size 0x%06x: OK, stack base %p top %p (%zx used)\n",
stack_size, stack_base_ptr, stack_top_ptr,
stack_base_ptr - stack_top_ptr);
return 0;
}
static int test_one_size(unsigned int stack_size)
{
union pipe read_pipe, write_pipe;
pid_t pid;
FAIL_IF(pipe(read_pipe.fds) == -1);
FAIL_IF(pipe(write_pipe.fds) == -1);
pid = fork();
if (pid == 0) {
close(read_pipe.read_fd);
close(write_pipe.write_fd);
exit(child(stack_size, read_pipe));
}
close(read_pipe.write_fd);
close(write_pipe.read_fd);
FAIL_IF(sync_with_child(read_pipe, write_pipe));
kill(pid, SIGUSR1);
FAIL_IF(wait_for_child(pid));
close(read_pipe.read_fd);
close(write_pipe.write_fd);
return 0;
}
int test(void)
{
unsigned int i, size;
// Test with used stack from 1MB - 64K to 1MB + 64K
// Increment by 64 to get more coverage of odd sizes
for (i = 0; i < (128 * _KB); i += 64) {
size = i + (1 * _MB) - (64 * _KB);
FAIL_IF(test_one_size(size));
}
return 0;
}
int main(void)
{
return test_harness(test, "stack_expansion_signal");
}
| linux-master | tools/testing/selftests/powerpc/mm/stack_expansion_signal.c |
// SPDX-License-Identifier: GPL-2.0+
//
// Copyright 2022, Michael Ellerman, IBM Corp.
//
// Test that the 4PB address space SLB handling doesn't corrupt userspace registers
// (r9-r13) due to a SLB fault while saving the PPR.
//
// The bug was introduced in f384796c4 ("powerpc/mm: Add support for handling > 512TB
// address in SLB miss") and fixed in 4c2de74cc869 ("powerpc/64: Interrupts save PPR on
// stack rather than thread_struct").
//
// To hit the bug requires the task struct and kernel stack to be in different segments.
// Usually that requires more than 1TB of RAM, or if that's not practical, boot the kernel
// with "disable_1tb_segments".
//
// The test works by creating mappings above 512TB, to trigger the large address space
// support. It creates 64 mappings, double the size of the SLB, to cause SLB faults on
// each access (assuming naive replacement). It then loops over those mappings touching
// each, and checks that r9-r13 aren't corrupted.
//
// It then forks another child and tries again, because a new child process will get a new
// kernel stack and thread struct allocated, which may be more optimally placed to trigger
// the bug. It would probably be better to leave the previous child processes hanging
// around, so that kernel stack & thread struct allocations are not reused, but that would
// amount to a 30 second fork bomb. The current design reliably triggers the bug on
// unpatched kernels.
#include <signal.h>
#include <stdio.h>
#include <stdlib.h>
#include <sys/mman.h>
#include <sys/types.h>
#include <sys/wait.h>
#include <unistd.h>
#include "utils.h"
#ifndef MAP_FIXED_NOREPLACE
#define MAP_FIXED_NOREPLACE MAP_FIXED // "Should be safe" above 512TB
#endif
#define BASE_ADDRESS (1ul << 50) // 1PB
#define STRIDE (2ul << 40) // 2TB
#define SLB_SIZE 32
#define NR_MAPPINGS (SLB_SIZE * 2)
static volatile sig_atomic_t signaled;
static void signal_handler(int sig)
{
signaled = 1;
}
#define CHECK_REG(_reg) \
if (_reg != _reg##_orig) { \
printf(str(_reg) " corrupted! Expected 0x%lx != 0x%lx\n", _reg##_orig, \
_reg); \
_exit(1); \
}
static int touch_mappings(void)
{
unsigned long r9_orig, r10_orig, r11_orig, r12_orig, r13_orig;
unsigned long r9, r10, r11, r12, r13;
unsigned long addr, *p;
int i;
for (i = 0; i < NR_MAPPINGS; i++) {
addr = BASE_ADDRESS + (i * STRIDE);
p = (unsigned long *)addr;
asm volatile("mr %0, %%r9 ;" // Read original GPR values
"mr %1, %%r10 ;"
"mr %2, %%r11 ;"
"mr %3, %%r12 ;"
"mr %4, %%r13 ;"
"std %10, 0(%11) ;" // Trigger SLB fault
"mr %5, %%r9 ;" // Save possibly corrupted values
"mr %6, %%r10 ;"
"mr %7, %%r11 ;"
"mr %8, %%r12 ;"
"mr %9, %%r13 ;"
"mr %%r9, %0 ;" // Restore original values
"mr %%r10, %1 ;"
"mr %%r11, %2 ;"
"mr %%r12, %3 ;"
"mr %%r13, %4 ;"
: "=&b"(r9_orig), "=&b"(r10_orig), "=&b"(r11_orig),
"=&b"(r12_orig), "=&b"(r13_orig), "=&b"(r9), "=&b"(r10),
"=&b"(r11), "=&b"(r12), "=&b"(r13)
: "b"(i), "b"(p)
: "r9", "r10", "r11", "r12", "r13");
CHECK_REG(r9);
CHECK_REG(r10);
CHECK_REG(r11);
CHECK_REG(r12);
CHECK_REG(r13);
}
return 0;
}
static int test(void)
{
unsigned long page_size, addr, *p;
struct sigaction action;
bool hash_mmu;
int i, status;
pid_t pid;
// This tests a hash MMU specific bug.
FAIL_IF(using_hash_mmu(&hash_mmu));
SKIP_IF(!hash_mmu);
// 4K kernels don't support 4PB address space
SKIP_IF(sysconf(_SC_PAGESIZE) < 65536);
page_size = sysconf(_SC_PAGESIZE);
for (i = 0; i < NR_MAPPINGS; i++) {
addr = BASE_ADDRESS + (i * STRIDE);
p = mmap((void *)addr, page_size, PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED_NOREPLACE, -1, 0);
if (p == MAP_FAILED) {
perror("mmap");
printf("Error: couldn't mmap(), confirm kernel has 4PB support?\n");
return 1;
}
}
action.sa_handler = signal_handler;
action.sa_flags = SA_RESTART;
FAIL_IF(sigaction(SIGALRM, &action, NULL) < 0);
// Seen to always crash in under ~10s on affected kernels.
alarm(30);
while (!signaled) {
// Fork new processes, to increase the chance that we hit the case where
// the kernel stack and task struct are in different segments.
pid = fork();
if (pid == 0)
exit(touch_mappings());
FAIL_IF(waitpid(-1, &status, 0) == -1);
FAIL_IF(WIFSIGNALED(status));
FAIL_IF(!WIFEXITED(status));
FAIL_IF(WEXITSTATUS(status));
}
return 0;
}
int main(void)
{
return test_harness(test, "large_vm_gpr_corruption");
}
| linux-master | tools/testing/selftests/powerpc/mm/large_vm_gpr_corruption.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright 2019, Nick Piggin, Gautham R. Shenoy, Aneesh Kumar K.V, IBM Corp.
*/
/*
*
* Test tlbie/mtpidr race. We have 4 threads doing flush/load/compare/store
* sequence in a loop. The same threads also rung a context switch task
* that does sched_yield() in loop.
*
* The snapshot thread mark the mmap area PROT_READ in between, make a copy
* and copy it back to the original area. This helps us to detect if any
* store continued to happen after we marked the memory PROT_READ.
*/
#define _GNU_SOURCE
#include <stdio.h>
#include <sys/mman.h>
#include <sys/types.h>
#include <sys/wait.h>
#include <sys/ipc.h>
#include <sys/shm.h>
#include <sys/stat.h>
#include <sys/time.h>
#include <linux/futex.h>
#include <unistd.h>
#include <asm/unistd.h>
#include <string.h>
#include <stdlib.h>
#include <fcntl.h>
#include <sched.h>
#include <time.h>
#include <stdarg.h>
#include <pthread.h>
#include <signal.h>
#include <sys/prctl.h>
static inline void dcbf(volatile unsigned int *addr)
{
__asm__ __volatile__ ("dcbf %y0; sync" : : "Z"(*(unsigned char *)addr) : "memory");
}
static void err_msg(char *msg)
{
time_t now;
time(&now);
printf("=================================\n");
printf(" Error: %s\n", msg);
printf(" %s", ctime(&now));
printf("=================================\n");
exit(1);
}
static char *map1;
static char *map2;
static pid_t rim_process_pid;
/*
* A "rim-sequence" is defined to be the sequence of the following
* operations performed on a memory word:
* 1) FLUSH the contents of that word.
* 2) LOAD the contents of that word.
* 3) COMPARE the contents of that word with the content that was
* previously stored at that word
* 4) STORE new content into that word.
*
* The threads in this test that perform the rim-sequence are termed
* as rim_threads.
*/
/*
* A "corruption" is defined to be the failed COMPARE operation in a
* rim-sequence.
*
* A rim_thread that detects a corruption informs about it to all the
* other rim_threads, and the mem_snapshot thread.
*/
static volatile unsigned int corruption_found;
/*
* This defines the maximum number of rim_threads in this test.
*
* The THREAD_ID_BITS denote the number of bits required
* to represent the thread_ids [0..MAX_THREADS - 1].
* We are being a bit paranoid here and set it to 8 bits,
* though 6 bits suffice.
*
*/
#define MAX_THREADS 64
#define THREAD_ID_BITS 8
#define THREAD_ID_MASK ((1 << THREAD_ID_BITS) - 1)
static unsigned int rim_thread_ids[MAX_THREADS];
static pthread_t rim_threads[MAX_THREADS];
/*
* Each rim_thread works on an exclusive "chunk" of size
* RIM_CHUNK_SIZE.
*
* The ith rim_thread works on the ith chunk.
*
* The ith chunk begins at
* map1 + (i * RIM_CHUNK_SIZE)
*/
#define RIM_CHUNK_SIZE 1024
#define BITS_PER_BYTE 8
#define WORD_SIZE (sizeof(unsigned int))
#define WORD_BITS (WORD_SIZE * BITS_PER_BYTE)
#define WORDS_PER_CHUNK (RIM_CHUNK_SIZE/WORD_SIZE)
static inline char *compute_chunk_start_addr(unsigned int thread_id)
{
char *chunk_start;
chunk_start = (char *)((unsigned long)map1 +
(thread_id * RIM_CHUNK_SIZE));
return chunk_start;
}
/*
* The "word-offset" of a word-aligned address inside a chunk, is
* defined to be the number of words that precede the address in that
* chunk.
*
* WORD_OFFSET_BITS denote the number of bits required to represent
* the word-offsets of all the word-aligned addresses of a chunk.
*/
#define WORD_OFFSET_BITS (__builtin_ctz(WORDS_PER_CHUNK))
#define WORD_OFFSET_MASK ((1 << WORD_OFFSET_BITS) - 1)
static inline unsigned int compute_word_offset(char *start, unsigned int *addr)
{
unsigned int delta_bytes, ret;
delta_bytes = (unsigned long)addr - (unsigned long)start;
ret = delta_bytes/WORD_SIZE;
return ret;
}
/*
* A "sweep" is defined to be the sequential execution of the
* rim-sequence by a rim_thread on its chunk one word at a time,
* starting from the first word of its chunk and ending with the last
* word of its chunk.
*
* Each sweep of a rim_thread is uniquely identified by a sweep_id.
* SWEEP_ID_BITS denote the number of bits required to represent
* the sweep_ids of rim_threads.
*
* As to why SWEEP_ID_BITS are computed as a function of THREAD_ID_BITS,
* WORD_OFFSET_BITS, and WORD_BITS, see the "store-pattern" below.
*/
#define SWEEP_ID_BITS (WORD_BITS - (THREAD_ID_BITS + WORD_OFFSET_BITS))
#define SWEEP_ID_MASK ((1 << SWEEP_ID_BITS) - 1)
/*
* A "store-pattern" is the word-pattern that is stored into a word
* location in the 4)STORE step of the rim-sequence.
*
* In the store-pattern, we shall encode:
*
* - The thread-id of the rim_thread performing the store
* (The most significant THREAD_ID_BITS)
*
* - The word-offset of the address into which the store is being
* performed (The next WORD_OFFSET_BITS)
*
* - The sweep_id of the current sweep in which the store is
* being performed. (The lower SWEEP_ID_BITS)
*
* Store Pattern: 32 bits
* |------------------|--------------------|---------------------------------|
* | Thread id | Word offset | sweep_id |
* |------------------|--------------------|---------------------------------|
* THREAD_ID_BITS WORD_OFFSET_BITS SWEEP_ID_BITS
*
* In the store pattern, the (Thread-id + Word-offset) uniquely identify the
* address to which the store is being performed i.e,
* address == map1 +
* (Thread-id * RIM_CHUNK_SIZE) + (Word-offset * WORD_SIZE)
*
* And the sweep_id in the store pattern identifies the time when the
* store was performed by the rim_thread.
*
* We shall use this property in the 3)COMPARE step of the
* rim-sequence.
*/
#define SWEEP_ID_SHIFT 0
#define WORD_OFFSET_SHIFT (SWEEP_ID_BITS)
#define THREAD_ID_SHIFT (WORD_OFFSET_BITS + SWEEP_ID_BITS)
/*
* Compute the store pattern for a given thread with id @tid, at
* location @addr in the sweep identified by @sweep_id
*/
static inline unsigned int compute_store_pattern(unsigned int tid,
unsigned int *addr,
unsigned int sweep_id)
{
unsigned int ret = 0;
char *start = compute_chunk_start_addr(tid);
unsigned int word_offset = compute_word_offset(start, addr);
ret += (tid & THREAD_ID_MASK) << THREAD_ID_SHIFT;
ret += (word_offset & WORD_OFFSET_MASK) << WORD_OFFSET_SHIFT;
ret += (sweep_id & SWEEP_ID_MASK) << SWEEP_ID_SHIFT;
return ret;
}
/* Extract the thread-id from the given store-pattern */
static inline unsigned int extract_tid(unsigned int pattern)
{
unsigned int ret;
ret = (pattern >> THREAD_ID_SHIFT) & THREAD_ID_MASK;
return ret;
}
/* Extract the word-offset from the given store-pattern */
static inline unsigned int extract_word_offset(unsigned int pattern)
{
unsigned int ret;
ret = (pattern >> WORD_OFFSET_SHIFT) & WORD_OFFSET_MASK;
return ret;
}
/* Extract the sweep-id from the given store-pattern */
static inline unsigned int extract_sweep_id(unsigned int pattern)
{
unsigned int ret;
ret = (pattern >> SWEEP_ID_SHIFT) & SWEEP_ID_MASK;
return ret;
}
/************************************************************
* *
* Logging the output of the verification *
* *
************************************************************/
#define LOGDIR_NAME_SIZE 100
static char logdir[LOGDIR_NAME_SIZE];
static FILE *fp[MAX_THREADS];
static const char logfilename[] ="Thread-%02d-Chunk";
static inline void start_verification_log(unsigned int tid,
unsigned int *addr,
unsigned int cur_sweep_id,
unsigned int prev_sweep_id)
{
FILE *f;
char logfile[30];
char path[LOGDIR_NAME_SIZE + 30];
char separator[2] = "/";
char *chunk_start = compute_chunk_start_addr(tid);
unsigned int size = RIM_CHUNK_SIZE;
sprintf(logfile, logfilename, tid);
strcpy(path, logdir);
strcat(path, separator);
strcat(path, logfile);
f = fopen(path, "w");
if (!f) {
err_msg("Unable to create logfile\n");
}
fp[tid] = f;
fprintf(f, "----------------------------------------------------------\n");
fprintf(f, "PID = %d\n", rim_process_pid);
fprintf(f, "Thread id = %02d\n", tid);
fprintf(f, "Chunk Start Addr = 0x%016lx\n", (unsigned long)chunk_start);
fprintf(f, "Chunk Size = %d\n", size);
fprintf(f, "Next Store Addr = 0x%016lx\n", (unsigned long)addr);
fprintf(f, "Current sweep-id = 0x%08x\n", cur_sweep_id);
fprintf(f, "Previous sweep-id = 0x%08x\n", prev_sweep_id);
fprintf(f, "----------------------------------------------------------\n");
}
static inline void log_anamoly(unsigned int tid, unsigned int *addr,
unsigned int expected, unsigned int observed)
{
FILE *f = fp[tid];
fprintf(f, "Thread %02d: Addr 0x%lx: Expected 0x%x, Observed 0x%x\n",
tid, (unsigned long)addr, expected, observed);
fprintf(f, "Thread %02d: Expected Thread id = %02d\n", tid, extract_tid(expected));
fprintf(f, "Thread %02d: Observed Thread id = %02d\n", tid, extract_tid(observed));
fprintf(f, "Thread %02d: Expected Word offset = %03d\n", tid, extract_word_offset(expected));
fprintf(f, "Thread %02d: Observed Word offset = %03d\n", tid, extract_word_offset(observed));
fprintf(f, "Thread %02d: Expected sweep-id = 0x%x\n", tid, extract_sweep_id(expected));
fprintf(f, "Thread %02d: Observed sweep-id = 0x%x\n", tid, extract_sweep_id(observed));
fprintf(f, "----------------------------------------------------------\n");
}
static inline void end_verification_log(unsigned int tid, unsigned nr_anamolies)
{
FILE *f = fp[tid];
char logfile[30];
char path[LOGDIR_NAME_SIZE + 30];
char separator[] = "/";
fclose(f);
if (nr_anamolies == 0) {
remove(path);
return;
}
sprintf(logfile, logfilename, tid);
strcpy(path, logdir);
strcat(path, separator);
strcat(path, logfile);
printf("Thread %02d chunk has %d corrupted words. For details check %s\n",
tid, nr_anamolies, path);
}
/*
* When a COMPARE step of a rim-sequence fails, the rim_thread informs
* everyone else via the shared_memory pointed to by
* corruption_found variable. On seeing this, every thread verifies the
* content of its chunk as follows.
*
* Suppose a thread identified with @tid was about to store (but not
* yet stored) to @next_store_addr in its current sweep identified
* @cur_sweep_id. Let @prev_sweep_id indicate the previous sweep_id.
*
* This implies that for all the addresses @addr < @next_store_addr,
* Thread @tid has already performed a store as part of its current
* sweep. Hence we expect the content of such @addr to be:
* |-------------------------------------------------|
* | tid | word_offset(addr) | cur_sweep_id |
* |-------------------------------------------------|
*
* Since Thread @tid is yet to perform stores on address
* @next_store_addr and above, we expect the content of such an
* address @addr to be:
* |-------------------------------------------------|
* | tid | word_offset(addr) | prev_sweep_id |
* |-------------------------------------------------|
*
* The verifier function @verify_chunk does this verification and logs
* any anamolies that it finds.
*/
static void verify_chunk(unsigned int tid, unsigned int *next_store_addr,
unsigned int cur_sweep_id,
unsigned int prev_sweep_id)
{
unsigned int *iter_ptr;
unsigned int size = RIM_CHUNK_SIZE;
unsigned int expected;
unsigned int observed;
char *chunk_start = compute_chunk_start_addr(tid);
int nr_anamolies = 0;
start_verification_log(tid, next_store_addr,
cur_sweep_id, prev_sweep_id);
for (iter_ptr = (unsigned int *)chunk_start;
(unsigned long)iter_ptr < (unsigned long)chunk_start + size;
iter_ptr++) {
unsigned int expected_sweep_id;
if (iter_ptr < next_store_addr) {
expected_sweep_id = cur_sweep_id;
} else {
expected_sweep_id = prev_sweep_id;
}
expected = compute_store_pattern(tid, iter_ptr, expected_sweep_id);
dcbf((volatile unsigned int*)iter_ptr); //Flush before reading
observed = *iter_ptr;
if (observed != expected) {
nr_anamolies++;
log_anamoly(tid, iter_ptr, expected, observed);
}
}
end_verification_log(tid, nr_anamolies);
}
static void set_pthread_cpu(pthread_t th, int cpu)
{
cpu_set_t run_cpu_mask;
struct sched_param param;
CPU_ZERO(&run_cpu_mask);
CPU_SET(cpu, &run_cpu_mask);
pthread_setaffinity_np(th, sizeof(cpu_set_t), &run_cpu_mask);
param.sched_priority = 1;
if (0 && sched_setscheduler(0, SCHED_FIFO, ¶m) == -1) {
/* haven't reproduced with this setting, it kills random preemption which may be a factor */
fprintf(stderr, "could not set SCHED_FIFO, run as root?\n");
}
}
static void set_mycpu(int cpu)
{
cpu_set_t run_cpu_mask;
struct sched_param param;
CPU_ZERO(&run_cpu_mask);
CPU_SET(cpu, &run_cpu_mask);
sched_setaffinity(0, sizeof(cpu_set_t), &run_cpu_mask);
param.sched_priority = 1;
if (0 && sched_setscheduler(0, SCHED_FIFO, ¶m) == -1) {
fprintf(stderr, "could not set SCHED_FIFO, run as root?\n");
}
}
static volatile int segv_wait;
static void segv_handler(int signo, siginfo_t *info, void *extra)
{
while (segv_wait) {
sched_yield();
}
}
static void set_segv_handler(void)
{
struct sigaction sa;
sa.sa_flags = SA_SIGINFO;
sa.sa_sigaction = segv_handler;
if (sigaction(SIGSEGV, &sa, NULL) == -1) {
perror("sigaction");
exit(EXIT_FAILURE);
}
}
int timeout = 0;
/*
* This function is executed by every rim_thread.
*
* This function performs sweeps over the exclusive chunks of the
* rim_threads executing the rim-sequence one word at a time.
*/
static void *rim_fn(void *arg)
{
unsigned int tid = *((unsigned int *)arg);
int size = RIM_CHUNK_SIZE;
char *chunk_start = compute_chunk_start_addr(tid);
unsigned int prev_sweep_id;
unsigned int cur_sweep_id = 0;
/* word access */
unsigned int pattern = cur_sweep_id;
unsigned int *pattern_ptr = &pattern;
unsigned int *w_ptr, read_data;
set_segv_handler();
/*
* Let us initialize the chunk:
*
* Each word-aligned address addr in the chunk,
* is initialized to :
* |-------------------------------------------------|
* | tid | word_offset(addr) | 0 |
* |-------------------------------------------------|
*/
for (w_ptr = (unsigned int *)chunk_start;
(unsigned long)w_ptr < (unsigned long)(chunk_start) + size;
w_ptr++) {
*pattern_ptr = compute_store_pattern(tid, w_ptr, cur_sweep_id);
*w_ptr = *pattern_ptr;
}
while (!corruption_found && !timeout) {
prev_sweep_id = cur_sweep_id;
cur_sweep_id = cur_sweep_id + 1;
for (w_ptr = (unsigned int *)chunk_start;
(unsigned long)w_ptr < (unsigned long)(chunk_start) + size;
w_ptr++) {
unsigned int old_pattern;
/*
* Compute the pattern that we would have
* stored at this location in the previous
* sweep.
*/
old_pattern = compute_store_pattern(tid, w_ptr, prev_sweep_id);
/*
* FLUSH:Ensure that we flush the contents of
* the cache before loading
*/
dcbf((volatile unsigned int*)w_ptr); //Flush
/* LOAD: Read the value */
read_data = *w_ptr; //Load
/*
* COMPARE: Is it the same as what we had stored
* in the previous sweep ? It better be!
*/
if (read_data != old_pattern) {
/* No it isn't! Tell everyone */
corruption_found = 1;
}
/*
* Before performing a store, let us check if
* any rim_thread has found a corruption.
*/
if (corruption_found || timeout) {
/*
* Yes. Someone (including us!) has found
* a corruption :(
*
* Let us verify that our chunk is
* correct.
*/
/* But first, let us allow the dust to settle down! */
verify_chunk(tid, w_ptr, cur_sweep_id, prev_sweep_id);
return 0;
}
/*
* Compute the new pattern that we are going
* to write to this location
*/
*pattern_ptr = compute_store_pattern(tid, w_ptr, cur_sweep_id);
/*
* STORE: Now let us write this pattern into
* the location
*/
*w_ptr = *pattern_ptr;
}
}
return NULL;
}
static unsigned long start_cpu = 0;
static unsigned long nrthreads = 4;
static pthread_t mem_snapshot_thread;
static void *mem_snapshot_fn(void *arg)
{
int page_size = getpagesize();
size_t size = page_size;
void *tmp = malloc(size);
while (!corruption_found && !timeout) {
/* Stop memory migration once corruption is found */
segv_wait = 1;
mprotect(map1, size, PROT_READ);
/*
* Load from the working alias (map1). Loading from map2
* also fails.
*/
memcpy(tmp, map1, size);
/*
* Stores must go via map2 which has write permissions, but
* the corrupted data tends to be seen in the snapshot buffer,
* so corruption does not appear to be introduced at the
* copy-back via map2 alias here.
*/
memcpy(map2, tmp, size);
/*
* Before releasing other threads, must ensure the copy
* back to
*/
asm volatile("sync" ::: "memory");
mprotect(map1, size, PROT_READ|PROT_WRITE);
asm volatile("sync" ::: "memory");
segv_wait = 0;
usleep(1); /* This value makes a big difference */
}
return 0;
}
void alrm_sighandler(int sig)
{
timeout = 1;
}
int main(int argc, char *argv[])
{
int c;
int page_size = getpagesize();
time_t now;
int i, dir_error;
pthread_attr_t attr;
key_t shm_key = (key_t) getpid();
int shmid, run_time = 20 * 60;
struct sigaction sa_alrm;
snprintf(logdir, LOGDIR_NAME_SIZE,
"/tmp/logdir-%u", (unsigned int)getpid());
while ((c = getopt(argc, argv, "r:hn:l:t:")) != -1) {
switch(c) {
case 'r':
start_cpu = strtoul(optarg, NULL, 10);
break;
case 'h':
printf("%s [-r <start_cpu>] [-n <nrthreads>] [-l <logdir>] [-t <timeout>]\n", argv[0]);
exit(0);
break;
case 'n':
nrthreads = strtoul(optarg, NULL, 10);
break;
case 'l':
strncpy(logdir, optarg, LOGDIR_NAME_SIZE - 1);
break;
case 't':
run_time = strtoul(optarg, NULL, 10);
break;
default:
printf("invalid option\n");
exit(0);
break;
}
}
if (nrthreads > MAX_THREADS)
nrthreads = MAX_THREADS;
shmid = shmget(shm_key, page_size, IPC_CREAT|0666);
if (shmid < 0) {
err_msg("Failed shmget\n");
}
map1 = shmat(shmid, NULL, 0);
if (map1 == (void *) -1) {
err_msg("Failed shmat");
}
map2 = shmat(shmid, NULL, 0);
if (map2 == (void *) -1) {
err_msg("Failed shmat");
}
dir_error = mkdir(logdir, 0755);
if (dir_error) {
err_msg("Failed mkdir");
}
printf("start_cpu list:%lu\n", start_cpu);
printf("number of worker threads:%lu + 1 snapshot thread\n", nrthreads);
printf("Allocated address:0x%016lx + secondary map:0x%016lx\n", (unsigned long)map1, (unsigned long)map2);
printf("logdir at : %s\n", logdir);
printf("Timeout: %d seconds\n", run_time);
time(&now);
printf("=================================\n");
printf(" Starting Test\n");
printf(" %s", ctime(&now));
printf("=================================\n");
for (i = 0; i < nrthreads; i++) {
if (1 && !fork()) {
prctl(PR_SET_PDEATHSIG, SIGKILL);
set_mycpu(start_cpu + i);
for (;;)
sched_yield();
exit(0);
}
}
sa_alrm.sa_handler = &alrm_sighandler;
sigemptyset(&sa_alrm.sa_mask);
sa_alrm.sa_flags = 0;
if (sigaction(SIGALRM, &sa_alrm, 0) == -1) {
err_msg("Failed signal handler registration\n");
}
alarm(run_time);
pthread_attr_init(&attr);
for (i = 0; i < nrthreads; i++) {
rim_thread_ids[i] = i;
pthread_create(&rim_threads[i], &attr, rim_fn, &rim_thread_ids[i]);
set_pthread_cpu(rim_threads[i], start_cpu + i);
}
pthread_create(&mem_snapshot_thread, &attr, mem_snapshot_fn, map1);
set_pthread_cpu(mem_snapshot_thread, start_cpu + i);
pthread_join(mem_snapshot_thread, NULL);
for (i = 0; i < nrthreads; i++) {
pthread_join(rim_threads[i], NULL);
}
if (!timeout) {
time(&now);
printf("=================================\n");
printf(" Data Corruption Detected\n");
printf(" %s", ctime(&now));
printf(" See logfiles in %s\n", logdir);
printf("=================================\n");
return 1;
}
return 0;
}
| linux-master | tools/testing/selftests/powerpc/mm/tlbie_test.c |
/*
* Copyright IBM Corp.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2.1 of the GNU Lesser General Public License
* as published by the Free Software Foundation.
*
* This program is distributed in the hope that it would be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
*
*/
#include <assert.h>
#include <errno.h>
#include <fcntl.h>
#include <signal.h>
#include <stdarg.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/mman.h>
#include <sys/ptrace.h>
#include <sys/syscall.h>
#include <ucontext.h>
#include <unistd.h>
#include "utils.h"
char *file_name;
int in_test;
volatile int faulted;
volatile void *dar;
int errors;
static void segv(int signum, siginfo_t *info, void *ctxt_v)
{
ucontext_t *ctxt = (ucontext_t *)ctxt_v;
struct pt_regs *regs = ctxt->uc_mcontext.regs;
if (!in_test) {
fprintf(stderr, "Segfault outside of test !\n");
exit(1);
}
faulted = 1;
dar = (void *)regs->dar;
regs->nip += 4;
}
static inline void do_read(const volatile void *addr)
{
int ret;
asm volatile("lwz %0,0(%1); twi 0,%0,0; isync;\n"
: "=r" (ret) : "r" (addr) : "memory");
}
static inline void do_write(const volatile void *addr)
{
int val = 0x1234567;
asm volatile("stw %0,0(%1); sync; \n"
: : "r" (val), "r" (addr) : "memory");
}
static inline void check_faulted(void *addr, long page, long subpage, int write)
{
int want_fault = (subpage == ((page + 3) % 16));
if (write)
want_fault |= (subpage == ((page + 1) % 16));
if (faulted != want_fault) {
printf("Failed at %p (p=%ld,sp=%ld,w=%d), want=%s, got=%s !\n",
addr, page, subpage, write,
want_fault ? "fault" : "pass",
faulted ? "fault" : "pass");
++errors;
}
if (faulted) {
if (dar != addr) {
printf("Fault expected at %p and happened at %p !\n",
addr, dar);
}
faulted = 0;
asm volatile("sync" : : : "memory");
}
}
static int run_test(void *addr, unsigned long size)
{
unsigned int *map;
long i, j, pages, err;
pages = size / 0x10000;
map = malloc(pages * 4);
assert(map);
/*
* for each page, mark subpage i % 16 read only and subpage
* (i + 3) % 16 inaccessible
*/
for (i = 0; i < pages; i++) {
map[i] = (0x40000000 >> (((i + 1) * 2) % 32)) |
(0xc0000000 >> (((i + 3) * 2) % 32));
}
err = syscall(__NR_subpage_prot, addr, size, map);
if (err) {
perror("subpage_perm");
return 1;
}
free(map);
in_test = 1;
errors = 0;
for (i = 0; i < pages; i++) {
for (j = 0; j < 16; j++, addr += 0x1000) {
do_read(addr);
check_faulted(addr, i, j, 0);
do_write(addr);
check_faulted(addr, i, j, 1);
}
}
in_test = 0;
if (errors) {
printf("%d errors detected\n", errors);
return 1;
}
return 0;
}
static int syscall_available(void)
{
int rc;
errno = 0;
rc = syscall(__NR_subpage_prot, 0, 0, 0);
return rc == 0 || (errno != ENOENT && errno != ENOSYS);
}
int test_anon(void)
{
unsigned long align;
struct sigaction act = {
.sa_sigaction = segv,
.sa_flags = SA_SIGINFO
};
void *mallocblock;
unsigned long mallocsize;
SKIP_IF(!syscall_available());
if (getpagesize() != 0x10000) {
fprintf(stderr, "Kernel page size must be 64K!\n");
return 1;
}
sigaction(SIGSEGV, &act, NULL);
mallocsize = 4 * 16 * 1024 * 1024;
FAIL_IF(posix_memalign(&mallocblock, 64 * 1024, mallocsize));
align = (unsigned long)mallocblock;
if (align & 0xffff)
align = (align | 0xffff) + 1;
mallocblock = (void *)align;
printf("allocated malloc block of 0x%lx bytes at %p\n",
mallocsize, mallocblock);
printf("testing malloc block...\n");
return run_test(mallocblock, mallocsize);
}
int test_file(void)
{
struct sigaction act = {
.sa_sigaction = segv,
.sa_flags = SA_SIGINFO
};
void *fileblock;
off_t filesize;
int fd;
SKIP_IF(!syscall_available());
fd = open(file_name, O_RDWR);
if (fd == -1) {
perror("failed to open file");
return 1;
}
sigaction(SIGSEGV, &act, NULL);
filesize = lseek(fd, 0, SEEK_END);
if (filesize & 0xffff)
filesize &= ~0xfffful;
fileblock = mmap(NULL, filesize, PROT_READ | PROT_WRITE,
MAP_SHARED, fd, 0);
if (fileblock == MAP_FAILED) {
perror("failed to map file");
return 1;
}
printf("allocated %s for 0x%lx bytes at %p\n",
file_name, filesize, fileblock);
printf("testing file map...\n");
return run_test(fileblock, filesize);
}
int main(int argc, char *argv[])
{
int rc;
rc = test_harness(test_anon, "subpage_prot_anon");
if (rc)
return rc;
if (argc > 1)
file_name = argv[1];
else
file_name = "tempfile";
return test_harness(test_file, "subpage_prot_file");
}
| linux-master | tools/testing/selftests/powerpc/mm/subpage_prot.c |
// SPDX-License-Identifier: GPL-2.0+
//
// Copyright 2019, Michael Ellerman, IBM Corp.
//
// Test that allocating memory beyond the memory limit and then forking is
// handled correctly, ie. the child is able to access the mappings beyond the
// memory limit and the child's writes are not visible to the parent.
#include <stdio.h>
#include <stdlib.h>
#include <sys/mman.h>
#include <sys/types.h>
#include <sys/wait.h>
#include <unistd.h>
#include "utils.h"
#ifndef MAP_FIXED_NOREPLACE
#define MAP_FIXED_NOREPLACE MAP_FIXED // "Should be safe" above 512TB
#endif
static int test(void)
{
int p2c[2], c2p[2], rc, status, c, *p;
unsigned long page_size;
pid_t pid;
page_size = sysconf(_SC_PAGESIZE);
SKIP_IF(page_size != 65536);
// Create a mapping at 512TB to allocate an extended_id
p = mmap((void *)(512ul << 40), page_size, PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED_NOREPLACE, -1, 0);
if (p == MAP_FAILED) {
perror("mmap");
printf("Error: couldn't mmap(), confirm kernel has 4TB support?\n");
return 1;
}
printf("parent writing %p = 1\n", p);
*p = 1;
FAIL_IF(pipe(p2c) == -1 || pipe(c2p) == -1);
pid = fork();
if (pid == 0) {
FAIL_IF(read(p2c[0], &c, 1) != 1);
pid = getpid();
printf("child writing %p = %d\n", p, pid);
*p = pid;
FAIL_IF(write(c2p[1], &c, 1) != 1);
FAIL_IF(read(p2c[0], &c, 1) != 1);
exit(0);
}
c = 0;
FAIL_IF(write(p2c[1], &c, 1) != 1);
FAIL_IF(read(c2p[0], &c, 1) != 1);
// Prevent compiler optimisation
barrier();
rc = 0;
printf("parent reading %p = %d\n", p, *p);
if (*p != 1) {
printf("Error: BUG! parent saw child's write! *p = %d\n", *p);
rc = 1;
}
FAIL_IF(write(p2c[1], &c, 1) != 1);
FAIL_IF(waitpid(pid, &status, 0) == -1);
FAIL_IF(!WIFEXITED(status) || WEXITSTATUS(status));
if (rc == 0)
printf("success: test completed OK\n");
return rc;
}
int main(void)
{
return test_harness(test, "large_vm_fork_separation");
}
| linux-master | tools/testing/selftests/powerpc/mm/large_vm_fork_separation.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright 2017 John Sperbeck
*
* Test that an access to a mapped but inaccessible area causes a SEGV and
* reports si_code == SEGV_ACCERR.
*/
#include <stdbool.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <signal.h>
#include <sys/mman.h>
#include <assert.h>
#include <ucontext.h>
#include "utils.h"
static bool faulted;
static int si_code;
static void segv_handler(int n, siginfo_t *info, void *ctxt_v)
{
ucontext_t *ctxt = (ucontext_t *)ctxt_v;
struct pt_regs *regs = ctxt->uc_mcontext.regs;
faulted = true;
si_code = info->si_code;
regs->nip += 4;
}
int test_segv_errors(void)
{
struct sigaction act = {
.sa_sigaction = segv_handler,
.sa_flags = SA_SIGINFO,
};
char c, *p = NULL;
p = mmap(NULL, getpagesize(), 0, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
FAIL_IF(p == MAP_FAILED);
FAIL_IF(sigaction(SIGSEGV, &act, NULL) != 0);
faulted = false;
si_code = 0;
/*
* We just need a compiler barrier, but mb() works and has the nice
* property of being easy to spot in the disassembly.
*/
mb();
c = *p;
mb();
FAIL_IF(!faulted);
FAIL_IF(si_code != SEGV_ACCERR);
faulted = false;
si_code = 0;
mb();
*p = c;
mb();
FAIL_IF(!faulted);
FAIL_IF(si_code != SEGV_ACCERR);
return 0;
}
int main(void)
{
return test_harness(test_segv_errors, "segv_errors");
}
| linux-master | tools/testing/selftests/powerpc/mm/segv_errors.c |
// SPDX-License-Identifier: GPL-2.0
#include <stdio.h>
#include <sys/mman.h>
#include <unistd.h>
#include "utils.h"
/* This must match the huge page & THP size */
#define SIZE (16 * 1024 * 1024)
static int test_body(void)
{
void *addr;
char *p;
addr = (void *)0xa0000000;
p = mmap(addr, SIZE, PROT_READ | PROT_WRITE,
MAP_HUGETLB | MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
if (p != MAP_FAILED) {
/*
* Typically the mmap will fail because no huge pages are
* allocated on the system. But if there are huge pages
* allocated the mmap will succeed. That's fine too, we just
* munmap here before continuing. munmap() length of
* MAP_HUGETLB memory must be hugepage aligned.
*/
if (munmap(addr, SIZE)) {
perror("munmap");
return 1;
}
}
p = mmap(addr, SIZE, PROT_READ | PROT_WRITE,
MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
if (p == MAP_FAILED) {
printf("Mapping failed @ %p\n", addr);
perror("mmap");
return 1;
}
/*
* Either a user or kernel access is sufficient to trigger the bug.
* A kernel access is easier to spot & debug, as it will trigger the
* softlockup or RCU stall detectors, and when the system is kicked
* into xmon we get a backtrace in the kernel.
*
* A good option is:
* getcwd(p, SIZE);
*
* For the purposes of this testcase it's preferable to spin in
* userspace, so the harness can kill us if we get stuck. That way we
* see a test failure rather than a dead system.
*/
*p = 0xf;
munmap(addr, SIZE);
return 0;
}
static int test_main(void)
{
int i;
/* 10,000 because it's a "bunch", and completes reasonably quickly */
for (i = 0; i < 10000; i++)
if (test_body())
return 1;
return 0;
}
int main(void)
{
return test_harness(test_main, "hugetlb_vs_thp");
}
| linux-master | tools/testing/selftests/powerpc/mm/hugetlb_vs_thp_test.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright 2020, Sandipan Das, IBM Corp.
*
* Test if applying execute protection on pages using memory
* protection keys works as expected.
*/
#define _GNU_SOURCE
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <signal.h>
#include <unistd.h>
#include "pkeys.h"
#define PPC_INST_NOP 0x60000000
#define PPC_INST_TRAP 0x7fe00008
#define PPC_INST_BLR 0x4e800020
static volatile sig_atomic_t fault_pkey, fault_code, fault_type;
static volatile sig_atomic_t remaining_faults;
static volatile unsigned int *fault_addr;
static unsigned long pgsize, numinsns;
static unsigned int *insns;
static void trap_handler(int signum, siginfo_t *sinfo, void *ctx)
{
/* Check if this fault originated from the expected address */
if (sinfo->si_addr != (void *) fault_addr)
sigsafe_err("got a fault for an unexpected address\n");
_exit(1);
}
static void segv_handler(int signum, siginfo_t *sinfo, void *ctx)
{
int signal_pkey;
signal_pkey = siginfo_pkey(sinfo);
fault_code = sinfo->si_code;
/* Check if this fault originated from the expected address */
if (sinfo->si_addr != (void *) fault_addr) {
sigsafe_err("got a fault for an unexpected address\n");
_exit(1);
}
/* Check if too many faults have occurred for a single test case */
if (!remaining_faults) {
sigsafe_err("got too many faults for the same address\n");
_exit(1);
}
/* Restore permissions in order to continue */
switch (fault_code) {
case SEGV_ACCERR:
if (mprotect(insns, pgsize, PROT_READ | PROT_WRITE)) {
sigsafe_err("failed to set access permissions\n");
_exit(1);
}
break;
case SEGV_PKUERR:
if (signal_pkey != fault_pkey) {
sigsafe_err("got a fault for an unexpected pkey\n");
_exit(1);
}
switch (fault_type) {
case PKEY_DISABLE_ACCESS:
pkey_set_rights(fault_pkey, 0);
break;
case PKEY_DISABLE_EXECUTE:
/*
* Reassociate the exec-only pkey with the region
* to be able to continue. Unlike AMR, we cannot
* set IAMR directly from userspace to restore the
* permissions.
*/
if (mprotect(insns, pgsize, PROT_EXEC)) {
sigsafe_err("failed to set execute permissions\n");
_exit(1);
}
break;
default:
sigsafe_err("got a fault with an unexpected type\n");
_exit(1);
}
break;
default:
sigsafe_err("got a fault with an unexpected code\n");
_exit(1);
}
remaining_faults--;
}
static int test(void)
{
struct sigaction segv_act, trap_act;
unsigned long rights;
int pkey, ret, i;
ret = pkeys_unsupported();
if (ret)
return ret;
/* Setup SIGSEGV handler */
segv_act.sa_handler = 0;
segv_act.sa_sigaction = segv_handler;
FAIL_IF(sigprocmask(SIG_SETMASK, 0, &segv_act.sa_mask) != 0);
segv_act.sa_flags = SA_SIGINFO;
segv_act.sa_restorer = 0;
FAIL_IF(sigaction(SIGSEGV, &segv_act, NULL) != 0);
/* Setup SIGTRAP handler */
trap_act.sa_handler = 0;
trap_act.sa_sigaction = trap_handler;
FAIL_IF(sigprocmask(SIG_SETMASK, 0, &trap_act.sa_mask) != 0);
trap_act.sa_flags = SA_SIGINFO;
trap_act.sa_restorer = 0;
FAIL_IF(sigaction(SIGTRAP, &trap_act, NULL) != 0);
/* Setup executable region */
pgsize = getpagesize();
numinsns = pgsize / sizeof(unsigned int);
insns = (unsigned int *) mmap(NULL, pgsize, PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
FAIL_IF(insns == MAP_FAILED);
/* Write the instruction words */
for (i = 1; i < numinsns - 1; i++)
insns[i] = PPC_INST_NOP;
/*
* Set the first instruction as an unconditional trap. If
* the last write to this address succeeds, this should
* get overwritten by a no-op.
*/
insns[0] = PPC_INST_TRAP;
/*
* Later, to jump to the executable region, we use a branch
* and link instruction (bctrl) which sets the return address
* automatically in LR. Use that to return back.
*/
insns[numinsns - 1] = PPC_INST_BLR;
/* Allocate a pkey that restricts execution */
rights = PKEY_DISABLE_EXECUTE;
pkey = sys_pkey_alloc(0, rights);
FAIL_IF(pkey < 0);
/*
* Pick the first instruction's address from the executable
* region.
*/
fault_addr = insns;
/* The following two cases will avoid SEGV_PKUERR */
fault_type = -1;
fault_pkey = -1;
/*
* Read an instruction word from the address when AMR bits
* are not set i.e. the pkey permits both read and write
* access.
*
* This should not generate a fault as having PROT_EXEC
* implies PROT_READ on GNU systems. The pkey currently
* restricts execution only based on the IAMR bits. The
* AMR bits are cleared.
*/
remaining_faults = 0;
FAIL_IF(sys_pkey_mprotect(insns, pgsize, PROT_EXEC, pkey) != 0);
printf("read from %p, pkey permissions are %s\n", fault_addr,
pkey_rights(rights));
i = *fault_addr;
FAIL_IF(remaining_faults != 0);
/*
* Write an instruction word to the address when AMR bits
* are not set i.e. the pkey permits both read and write
* access.
*
* This should generate an access fault as having just
* PROT_EXEC also restricts writes. The pkey currently
* restricts execution only based on the IAMR bits. The
* AMR bits are cleared.
*/
remaining_faults = 1;
FAIL_IF(sys_pkey_mprotect(insns, pgsize, PROT_EXEC, pkey) != 0);
printf("write to %p, pkey permissions are %s\n", fault_addr,
pkey_rights(rights));
*fault_addr = PPC_INST_TRAP;
FAIL_IF(remaining_faults != 0 || fault_code != SEGV_ACCERR);
/* The following three cases will generate SEGV_PKUERR */
rights |= PKEY_DISABLE_ACCESS;
fault_type = PKEY_DISABLE_ACCESS;
fault_pkey = pkey;
/*
* Read an instruction word from the address when AMR bits
* are set i.e. the pkey permits neither read nor write
* access.
*
* This should generate a pkey fault based on AMR bits only
* as having PROT_EXEC implicitly allows reads.
*/
remaining_faults = 1;
FAIL_IF(sys_pkey_mprotect(insns, pgsize, PROT_EXEC, pkey) != 0);
pkey_set_rights(pkey, rights);
printf("read from %p, pkey permissions are %s\n", fault_addr,
pkey_rights(rights));
i = *fault_addr;
FAIL_IF(remaining_faults != 0 || fault_code != SEGV_PKUERR);
/*
* Write an instruction word to the address when AMR bits
* are set i.e. the pkey permits neither read nor write
* access.
*
* This should generate two faults. First, a pkey fault
* based on AMR bits and then an access fault since
* PROT_EXEC does not allow writes.
*/
remaining_faults = 2;
FAIL_IF(sys_pkey_mprotect(insns, pgsize, PROT_EXEC, pkey) != 0);
pkey_set_rights(pkey, rights);
printf("write to %p, pkey permissions are %s\n", fault_addr,
pkey_rights(rights));
*fault_addr = PPC_INST_NOP;
FAIL_IF(remaining_faults != 0 || fault_code != SEGV_ACCERR);
/* Free the current pkey */
sys_pkey_free(pkey);
rights = 0;
do {
/*
* Allocate pkeys with all valid combinations of read,
* write and execute restrictions.
*/
pkey = sys_pkey_alloc(0, rights);
FAIL_IF(pkey < 0);
/*
* Jump to the executable region. AMR bits may or may not
* be set but they should not affect execution.
*
* This should generate pkey faults based on IAMR bits which
* may be set to restrict execution.
*
* The first iteration also checks if the overwrite of the
* first instruction word from a trap to a no-op succeeded.
*/
fault_pkey = pkey;
fault_type = -1;
remaining_faults = 0;
if (rights & PKEY_DISABLE_EXECUTE) {
fault_type = PKEY_DISABLE_EXECUTE;
remaining_faults = 1;
}
FAIL_IF(sys_pkey_mprotect(insns, pgsize, PROT_EXEC, pkey) != 0);
printf("execute at %p, pkey permissions are %s\n", fault_addr,
pkey_rights(rights));
asm volatile("mtctr %0; bctrl" : : "r"(insns));
FAIL_IF(remaining_faults != 0);
if (rights & PKEY_DISABLE_EXECUTE)
FAIL_IF(fault_code != SEGV_PKUERR);
/* Free the current pkey */
sys_pkey_free(pkey);
/* Find next valid combination of pkey rights */
rights = next_pkey_rights(rights);
} while (rights);
/* Cleanup */
munmap((void *) insns, pgsize);
return 0;
}
int main(void)
{
return test_harness(test, "pkey_exec_prot");
}
| linux-master | tools/testing/selftests/powerpc/mm/pkey_exec_prot.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright 2020, Sandipan Das, IBM Corp.
*
* Test if the signal information reports the correct memory protection
* key upon getting a key access violation fault for a page that was
* attempted to be protected by two different keys from two competing
* threads at the same time.
*/
#define _GNU_SOURCE
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <signal.h>
#include <unistd.h>
#include <pthread.h>
#include <sys/mman.h>
#include "pkeys.h"
#define PPC_INST_NOP 0x60000000
#define PPC_INST_BLR 0x4e800020
#define PROT_RWX (PROT_READ | PROT_WRITE | PROT_EXEC)
#define NUM_ITERATIONS 1000000
static volatile sig_atomic_t perm_pkey, rest_pkey;
static volatile sig_atomic_t rights, fault_count;
static volatile unsigned int *volatile fault_addr;
static pthread_barrier_t iteration_barrier;
static void segv_handler(int signum, siginfo_t *sinfo, void *ctx)
{
void *pgstart;
size_t pgsize;
int pkey;
pkey = siginfo_pkey(sinfo);
/* Check if this fault originated from a pkey access violation */
if (sinfo->si_code != SEGV_PKUERR) {
sigsafe_err("got a fault for an unexpected reason\n");
_exit(1);
}
/* Check if this fault originated from the expected address */
if (sinfo->si_addr != (void *) fault_addr) {
sigsafe_err("got a fault for an unexpected address\n");
_exit(1);
}
/* Check if this fault originated from the restrictive pkey */
if (pkey != rest_pkey) {
sigsafe_err("got a fault for an unexpected pkey\n");
_exit(1);
}
/* Check if too many faults have occurred for the same iteration */
if (fault_count > 0) {
sigsafe_err("got too many faults for the same address\n");
_exit(1);
}
pgsize = getpagesize();
pgstart = (void *) ((unsigned long) fault_addr & ~(pgsize - 1));
/*
* If the current fault occurred due to lack of execute rights,
* reassociate the page with the exec-only pkey since execute
* rights cannot be changed directly for the faulting pkey as
* IAMR is inaccessible from userspace.
*
* Otherwise, if the current fault occurred due to lack of
* read-write rights, change the AMR permission bits for the
* pkey.
*
* This will let the test continue.
*/
if (rights == PKEY_DISABLE_EXECUTE &&
mprotect(pgstart, pgsize, PROT_EXEC))
_exit(1);
else
pkey_set_rights(pkey, 0);
fault_count++;
}
struct region {
unsigned long rights;
unsigned int *base;
size_t size;
};
static void *protect(void *p)
{
unsigned long rights;
unsigned int *base;
size_t size;
int tid, i;
tid = gettid();
base = ((struct region *) p)->base;
size = ((struct region *) p)->size;
FAIL_IF_EXIT(!base);
/* No read, write and execute restrictions */
rights = 0;
printf("tid %d, pkey permissions are %s\n", tid, pkey_rights(rights));
/* Allocate the permissive pkey */
perm_pkey = sys_pkey_alloc(0, rights);
FAIL_IF_EXIT(perm_pkey < 0);
/*
* Repeatedly try to protect the common region with a permissive
* pkey
*/
for (i = 0; i < NUM_ITERATIONS; i++) {
/*
* Wait until the other thread has finished allocating the
* restrictive pkey or until the next iteration has begun
*/
pthread_barrier_wait(&iteration_barrier);
/* Try to associate the permissive pkey with the region */
FAIL_IF_EXIT(sys_pkey_mprotect(base, size, PROT_RWX,
perm_pkey));
}
/* Free the permissive pkey */
sys_pkey_free(perm_pkey);
return NULL;
}
static void *protect_access(void *p)
{
size_t size, numinsns;
unsigned int *base;
int tid, i;
tid = gettid();
base = ((struct region *) p)->base;
size = ((struct region *) p)->size;
rights = ((struct region *) p)->rights;
numinsns = size / sizeof(base[0]);
FAIL_IF_EXIT(!base);
/* Allocate the restrictive pkey */
rest_pkey = sys_pkey_alloc(0, rights);
FAIL_IF_EXIT(rest_pkey < 0);
printf("tid %d, pkey permissions are %s\n", tid, pkey_rights(rights));
printf("tid %d, %s randomly in range [%p, %p]\n", tid,
(rights == PKEY_DISABLE_EXECUTE) ? "execute" :
(rights == PKEY_DISABLE_WRITE) ? "write" : "read",
base, base + numinsns);
/*
* Repeatedly try to protect the common region with a restrictive
* pkey and read, write or execute from it
*/
for (i = 0; i < NUM_ITERATIONS; i++) {
/*
* Wait until the other thread has finished allocating the
* permissive pkey or until the next iteration has begun
*/
pthread_barrier_wait(&iteration_barrier);
/* Try to associate the restrictive pkey with the region */
FAIL_IF_EXIT(sys_pkey_mprotect(base, size, PROT_RWX,
rest_pkey));
/* Choose a random instruction word address from the region */
fault_addr = base + (rand() % numinsns);
fault_count = 0;
switch (rights) {
/* Read protection test */
case PKEY_DISABLE_ACCESS:
/*
* Read an instruction word from the region and
* verify if it has not been overwritten to
* something unexpected
*/
FAIL_IF_EXIT(*fault_addr != PPC_INST_NOP &&
*fault_addr != PPC_INST_BLR);
break;
/* Write protection test */
case PKEY_DISABLE_WRITE:
/*
* Write an instruction word to the region and
* verify if the overwrite has succeeded
*/
*fault_addr = PPC_INST_BLR;
FAIL_IF_EXIT(*fault_addr != PPC_INST_BLR);
break;
/* Execute protection test */
case PKEY_DISABLE_EXECUTE:
/* Jump to the region and execute instructions */
asm volatile(
"mtctr %0; bctrl"
: : "r"(fault_addr) : "ctr", "lr");
break;
}
/*
* Restore the restrictions originally imposed by the
* restrictive pkey as the signal handler would have
* cleared out the corresponding AMR bits
*/
pkey_set_rights(rest_pkey, rights);
}
/* Free restrictive pkey */
sys_pkey_free(rest_pkey);
return NULL;
}
static void reset_pkeys(unsigned long rights)
{
int pkeys[NR_PKEYS], i;
/* Exhaustively allocate all available pkeys */
for (i = 0; i < NR_PKEYS; i++)
pkeys[i] = sys_pkey_alloc(0, rights);
/* Free all allocated pkeys */
for (i = 0; i < NR_PKEYS; i++)
sys_pkey_free(pkeys[i]);
}
static int test(void)
{
pthread_t prot_thread, pacc_thread;
struct sigaction act;
pthread_attr_t attr;
size_t numinsns;
struct region r;
int ret, i;
srand(time(NULL));
ret = pkeys_unsupported();
if (ret)
return ret;
/* Allocate the region */
r.size = getpagesize();
r.base = mmap(NULL, r.size, PROT_RWX,
MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
FAIL_IF(r.base == MAP_FAILED);
/*
* Fill the region with no-ops with a branch at the end
* for returning to the caller
*/
numinsns = r.size / sizeof(r.base[0]);
for (i = 0; i < numinsns - 1; i++)
r.base[i] = PPC_INST_NOP;
r.base[i] = PPC_INST_BLR;
/* Setup SIGSEGV handler */
act.sa_handler = 0;
act.sa_sigaction = segv_handler;
FAIL_IF(sigprocmask(SIG_SETMASK, 0, &act.sa_mask) != 0);
act.sa_flags = SA_SIGINFO;
act.sa_restorer = 0;
FAIL_IF(sigaction(SIGSEGV, &act, NULL) != 0);
/*
* For these tests, the parent process should clear all bits of
* AMR and IAMR, i.e. impose no restrictions, for all available
* pkeys. This will be the base for the initial AMR and IAMR
* values for all the test thread pairs.
*
* If the AMR and IAMR bits of all available pkeys are cleared
* before running the tests and a fault is generated when
* attempting to read, write or execute instructions from a
* pkey protected region, the pkey responsible for this must be
* the one from the protect-and-access thread since the other
* one is fully permissive. Despite that, if the pkey reported
* by siginfo is not the restrictive pkey, then there must be a
* kernel bug.
*/
reset_pkeys(0);
/* Setup barrier for protect and protect-and-access threads */
FAIL_IF(pthread_attr_init(&attr) != 0);
FAIL_IF(pthread_barrier_init(&iteration_barrier, NULL, 2) != 0);
/* Setup and start protect and protect-and-read threads */
puts("starting thread pair (protect, protect-and-read)");
r.rights = PKEY_DISABLE_ACCESS;
FAIL_IF(pthread_create(&prot_thread, &attr, &protect, &r) != 0);
FAIL_IF(pthread_create(&pacc_thread, &attr, &protect_access, &r) != 0);
FAIL_IF(pthread_join(prot_thread, NULL) != 0);
FAIL_IF(pthread_join(pacc_thread, NULL) != 0);
/* Setup and start protect and protect-and-write threads */
puts("starting thread pair (protect, protect-and-write)");
r.rights = PKEY_DISABLE_WRITE;
FAIL_IF(pthread_create(&prot_thread, &attr, &protect, &r) != 0);
FAIL_IF(pthread_create(&pacc_thread, &attr, &protect_access, &r) != 0);
FAIL_IF(pthread_join(prot_thread, NULL) != 0);
FAIL_IF(pthread_join(pacc_thread, NULL) != 0);
/* Setup and start protect and protect-and-execute threads */
puts("starting thread pair (protect, protect-and-execute)");
r.rights = PKEY_DISABLE_EXECUTE;
FAIL_IF(pthread_create(&prot_thread, &attr, &protect, &r) != 0);
FAIL_IF(pthread_create(&pacc_thread, &attr, &protect_access, &r) != 0);
FAIL_IF(pthread_join(prot_thread, NULL) != 0);
FAIL_IF(pthread_join(pacc_thread, NULL) != 0);
/* Cleanup */
FAIL_IF(pthread_attr_destroy(&attr) != 0);
FAIL_IF(pthread_barrier_destroy(&iteration_barrier) != 0);
munmap(r.base, r.size);
return 0;
}
int main(void)
{
return test_harness(test, "pkey_siginfo");
}
| linux-master | tools/testing/selftests/powerpc/mm/pkey_siginfo.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Test that loads/stores expand the stack segment, or trigger a SEGV, in
* various conditions.
*
* Based on test code by Tom Lane.
*/
#undef NDEBUG
#include <assert.h>
#include <err.h>
#include <errno.h>
#include <stdio.h>
#include <signal.h>
#include <stdlib.h>
#include <string.h>
#include <sys/resource.h>
#include <sys/time.h>
#include <sys/types.h>
#include <sys/wait.h>
#include <unistd.h>
#define _KB (1024)
#define _MB (1024 * 1024)
volatile char *stack_top_ptr;
volatile unsigned long stack_top_sp;
volatile char c;
enum access_type {
LOAD,
STORE,
};
/*
* Consume stack until the stack pointer is below @target_sp, then do an access
* (load or store) at offset @delta from either the base of the stack or the
* current stack pointer.
*/
__attribute__ ((noinline))
int consume_stack(unsigned long target_sp, unsigned long stack_high, int delta, enum access_type type)
{
unsigned long target;
char stack_cur;
if ((unsigned long)&stack_cur > target_sp)
return consume_stack(target_sp, stack_high, delta, type);
else {
// We don't really need this, but without it GCC might not
// generate a recursive call above.
stack_top_ptr = &stack_cur;
#ifdef __powerpc__
asm volatile ("mr %[sp], %%r1" : [sp] "=r" (stack_top_sp));
#else
asm volatile ("mov %%rsp, %[sp]" : [sp] "=r" (stack_top_sp));
#endif
target = stack_high - delta + 1;
volatile char *p = (char *)target;
if (type == STORE)
*p = c;
else
c = *p;
// Do something to prevent the stack frame being popped prior to
// our access above.
getpid();
}
return 0;
}
static int search_proc_maps(char *needle, unsigned long *low, unsigned long *high)
{
unsigned long start, end;
static char buf[4096];
char name[128];
FILE *f;
int rc;
f = fopen("/proc/self/maps", "r");
if (!f) {
perror("fopen");
return -1;
}
while (fgets(buf, sizeof(buf), f)) {
rc = sscanf(buf, "%lx-%lx %*c%*c%*c%*c %*x %*d:%*d %*d %127s\n",
&start, &end, name);
if (rc == 2)
continue;
if (rc != 3) {
printf("sscanf errored\n");
rc = -1;
break;
}
if (strstr(name, needle)) {
*low = start;
*high = end - 1;
rc = 0;
break;
}
}
fclose(f);
return rc;
}
int child(unsigned int stack_used, int delta, enum access_type type)
{
unsigned long low, stack_high;
assert(search_proc_maps("[stack]", &low, &stack_high) == 0);
assert(consume_stack(stack_high - stack_used, stack_high, delta, type) == 0);
printf("Access OK: %s delta %-7d used size 0x%06x stack high 0x%lx top_ptr %p top sp 0x%lx actual used 0x%lx\n",
type == LOAD ? "load" : "store", delta, stack_used, stack_high,
stack_top_ptr, stack_top_sp, stack_high - stack_top_sp + 1);
return 0;
}
static int test_one(unsigned int stack_used, int delta, enum access_type type)
{
pid_t pid;
int rc;
pid = fork();
if (pid == 0)
exit(child(stack_used, delta, type));
assert(waitpid(pid, &rc, 0) != -1);
if (WIFEXITED(rc) && WEXITSTATUS(rc) == 0)
return 0;
// We don't expect a non-zero exit that's not a signal
assert(!WIFEXITED(rc));
printf("Faulted: %s delta %-7d used size 0x%06x signal %d\n",
type == LOAD ? "load" : "store", delta, stack_used,
WTERMSIG(rc));
return 1;
}
// This is fairly arbitrary but is well below any of the targets below,
// so that the delta between the stack pointer and the target is large.
#define DEFAULT_SIZE (32 * _KB)
static void test_one_type(enum access_type type, unsigned long page_size, unsigned long rlim_cur)
{
unsigned long delta;
// We should be able to access anywhere within the rlimit
for (delta = page_size; delta <= rlim_cur; delta += page_size)
assert(test_one(DEFAULT_SIZE, delta, type) == 0);
assert(test_one(DEFAULT_SIZE, rlim_cur, type) == 0);
// But if we go past the rlimit it should fail
assert(test_one(DEFAULT_SIZE, rlim_cur + 1, type) != 0);
}
static int test(void)
{
unsigned long page_size;
struct rlimit rlimit;
page_size = getpagesize();
getrlimit(RLIMIT_STACK, &rlimit);
printf("Stack rlimit is 0x%lx\n", rlimit.rlim_cur);
printf("Testing loads ...\n");
test_one_type(LOAD, page_size, rlimit.rlim_cur);
printf("Testing stores ...\n");
test_one_type(STORE, page_size, rlimit.rlim_cur);
printf("All OK\n");
return 0;
}
#ifdef __powerpc__
#include "utils.h"
int main(void)
{
return test_harness(test, "stack_expansion_ldst");
}
#else
int main(void)
{
return test();
}
#endif
| linux-master | tools/testing/selftests/powerpc/mm/stack_expansion_ldst.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright 2018, Michael Ellerman, IBM Corp.
*
* Test that an out-of-bounds branch to counter behaves as expected.
*/
#include <setjmp.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/mman.h>
#include <sys/types.h>
#include <sys/wait.h>
#include <ucontext.h>
#include <unistd.h>
#include "utils.h"
#define BAD_NIP 0x788c545a18000000ull
static struct pt_regs signal_regs;
static jmp_buf setjmp_env;
static void save_regs(ucontext_t *ctxt)
{
struct pt_regs *regs = ctxt->uc_mcontext.regs;
memcpy(&signal_regs, regs, sizeof(signal_regs));
}
static void segv_handler(int signum, siginfo_t *info, void *ctxt_v)
{
save_regs(ctxt_v);
longjmp(setjmp_env, 1);
}
static void usr2_handler(int signum, siginfo_t *info, void *ctxt_v)
{
save_regs(ctxt_v);
}
static int ok(void)
{
printf("Everything is OK in here.\n");
return 0;
}
#define REG_POISON 0x5a5a
#define POISONED_REG(n) ((((unsigned long)REG_POISON) << 48) | ((n) << 32) | \
(((unsigned long)REG_POISON) << 16) | (n))
static inline void poison_regs(void)
{
#define POISON_REG(n) \
"lis " __stringify(n) "," __stringify(REG_POISON) ";" \
"addi " __stringify(n) "," __stringify(n) "," __stringify(n) ";" \
"sldi " __stringify(n) "," __stringify(n) ", 32 ;" \
"oris " __stringify(n) "," __stringify(n) "," __stringify(REG_POISON) ";" \
"addi " __stringify(n) "," __stringify(n) "," __stringify(n) ";"
asm (POISON_REG(15)
POISON_REG(16)
POISON_REG(17)
POISON_REG(18)
POISON_REG(19)
POISON_REG(20)
POISON_REG(21)
POISON_REG(22)
POISON_REG(23)
POISON_REG(24)
POISON_REG(25)
POISON_REG(26)
POISON_REG(27)
POISON_REG(28)
POISON_REG(29)
: // inputs
: // outputs
: "15", "16", "17", "18", "19", "20", "21", "22", "23", "24", "25",
"26", "27", "28", "29"
);
#undef POISON_REG
}
static int check_regs(void)
{
unsigned long i;
for (i = 15; i <= 29; i++)
FAIL_IF(signal_regs.gpr[i] != POISONED_REG(i));
printf("Regs OK\n");
return 0;
}
static void dump_regs(void)
{
for (int i = 0; i < 32; i += 4) {
printf("r%02d 0x%016lx r%02d 0x%016lx " \
"r%02d 0x%016lx r%02d 0x%016lx\n",
i, signal_regs.gpr[i],
i+1, signal_regs.gpr[i+1],
i+2, signal_regs.gpr[i+2],
i+3, signal_regs.gpr[i+3]);
}
}
#ifdef _CALL_AIXDESC
struct opd {
unsigned long ip;
unsigned long toc;
unsigned long env;
};
static struct opd bad_opd = {
.ip = BAD_NIP,
};
#define BAD_FUNC (&bad_opd)
#else
#define BAD_FUNC BAD_NIP
#endif
int test_wild_bctr(void)
{
int (*func_ptr)(void);
struct sigaction segv = {
.sa_sigaction = segv_handler,
.sa_flags = SA_SIGINFO
};
struct sigaction usr2 = {
.sa_sigaction = usr2_handler,
.sa_flags = SA_SIGINFO
};
FAIL_IF(sigaction(SIGSEGV, &segv, NULL));
FAIL_IF(sigaction(SIGUSR2, &usr2, NULL));
bzero(&signal_regs, sizeof(signal_regs));
if (setjmp(setjmp_env) == 0) {
func_ptr = ok;
func_ptr();
kill(getpid(), SIGUSR2);
printf("Regs before:\n");
dump_regs();
bzero(&signal_regs, sizeof(signal_regs));
poison_regs();
func_ptr = (int (*)(void))BAD_FUNC;
func_ptr();
FAIL_IF(1); /* we didn't segv? */
}
FAIL_IF(signal_regs.nip != BAD_NIP);
printf("All good - took SEGV as expected branching to 0x%llx\n", BAD_NIP);
dump_regs();
FAIL_IF(check_regs());
return 0;
}
int main(void)
{
return test_harness(test_wild_bctr, "wild_bctr");
}
| linux-master | tools/testing/selftests/powerpc/mm/wild_bctr.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright 2022, Nicholas Miehlbradt, IBM Corporation
* based on pkey_exec_prot.c
*
* Test if applying execute protection on pages works as expected.
*/
#define _GNU_SOURCE
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <signal.h>
#include <unistd.h>
#include <sys/mman.h>
#include "pkeys.h"
#define PPC_INST_NOP 0x60000000
#define PPC_INST_TRAP 0x7fe00008
#define PPC_INST_BLR 0x4e800020
static volatile sig_atomic_t fault_code;
static volatile sig_atomic_t remaining_faults;
static volatile unsigned int *fault_addr;
static unsigned long pgsize, numinsns;
static unsigned int *insns;
static bool pkeys_supported;
static bool is_fault_expected(int fault_code)
{
if (fault_code == SEGV_ACCERR)
return true;
/* Assume any pkey error is fine since pkey_exec_prot test covers them */
if (fault_code == SEGV_PKUERR && pkeys_supported)
return true;
return false;
}
static void trap_handler(int signum, siginfo_t *sinfo, void *ctx)
{
/* Check if this fault originated from the expected address */
if (sinfo->si_addr != (void *)fault_addr)
sigsafe_err("got a fault for an unexpected address\n");
_exit(1);
}
static void segv_handler(int signum, siginfo_t *sinfo, void *ctx)
{
fault_code = sinfo->si_code;
/* Check if this fault originated from the expected address */
if (sinfo->si_addr != (void *)fault_addr) {
sigsafe_err("got a fault for an unexpected address\n");
_exit(1);
}
/* Check if too many faults have occurred for a single test case */
if (!remaining_faults) {
sigsafe_err("got too many faults for the same address\n");
_exit(1);
}
/* Restore permissions in order to continue */
if (is_fault_expected(fault_code)) {
if (mprotect(insns, pgsize, PROT_READ | PROT_WRITE | PROT_EXEC)) {
sigsafe_err("failed to set access permissions\n");
_exit(1);
}
} else {
sigsafe_err("got a fault with an unexpected code\n");
_exit(1);
}
remaining_faults--;
}
static int check_exec_fault(int rights)
{
/*
* Jump to the executable region.
*
* The first iteration also checks if the overwrite of the
* first instruction word from a trap to a no-op succeeded.
*/
fault_code = -1;
remaining_faults = 0;
if (!(rights & PROT_EXEC))
remaining_faults = 1;
FAIL_IF(mprotect(insns, pgsize, rights) != 0);
asm volatile("mtctr %0; bctrl" : : "r"(insns));
FAIL_IF(remaining_faults != 0);
if (!(rights & PROT_EXEC))
FAIL_IF(!is_fault_expected(fault_code));
return 0;
}
static int test(void)
{
struct sigaction segv_act, trap_act;
int i;
/* Skip the test if the CPU doesn't support Radix */
SKIP_IF(!have_hwcap2(PPC_FEATURE2_ARCH_3_00));
/* Check if pkeys are supported */
pkeys_supported = pkeys_unsupported() == 0;
/* Setup SIGSEGV handler */
segv_act.sa_handler = 0;
segv_act.sa_sigaction = segv_handler;
FAIL_IF(sigprocmask(SIG_SETMASK, 0, &segv_act.sa_mask) != 0);
segv_act.sa_flags = SA_SIGINFO;
segv_act.sa_restorer = 0;
FAIL_IF(sigaction(SIGSEGV, &segv_act, NULL) != 0);
/* Setup SIGTRAP handler */
trap_act.sa_handler = 0;
trap_act.sa_sigaction = trap_handler;
FAIL_IF(sigprocmask(SIG_SETMASK, 0, &trap_act.sa_mask) != 0);
trap_act.sa_flags = SA_SIGINFO;
trap_act.sa_restorer = 0;
FAIL_IF(sigaction(SIGTRAP, &trap_act, NULL) != 0);
/* Setup executable region */
pgsize = getpagesize();
numinsns = pgsize / sizeof(unsigned int);
insns = (unsigned int *)mmap(NULL, pgsize, PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
FAIL_IF(insns == MAP_FAILED);
/* Write the instruction words */
for (i = 1; i < numinsns - 1; i++)
insns[i] = PPC_INST_NOP;
/*
* Set the first instruction as an unconditional trap. If
* the last write to this address succeeds, this should
* get overwritten by a no-op.
*/
insns[0] = PPC_INST_TRAP;
/*
* Later, to jump to the executable region, we use a branch
* and link instruction (bctrl) which sets the return address
* automatically in LR. Use that to return back.
*/
insns[numinsns - 1] = PPC_INST_BLR;
/*
* Pick the first instruction's address from the executable
* region.
*/
fault_addr = insns;
/*
* Read an instruction word from the address when the page
* is execute only. This should generate an access fault.
*/
fault_code = -1;
remaining_faults = 1;
printf("Testing read on --x, should fault...");
FAIL_IF(mprotect(insns, pgsize, PROT_EXEC) != 0);
i = *fault_addr;
FAIL_IF(remaining_faults != 0 || !is_fault_expected(fault_code));
printf("ok!\n");
/*
* Write an instruction word to the address when the page
* execute only. This should also generate an access fault.
*/
fault_code = -1;
remaining_faults = 1;
printf("Testing write on --x, should fault...");
FAIL_IF(mprotect(insns, pgsize, PROT_EXEC) != 0);
*fault_addr = PPC_INST_NOP;
FAIL_IF(remaining_faults != 0 || !is_fault_expected(fault_code));
printf("ok!\n");
printf("Testing exec on ---, should fault...");
FAIL_IF(check_exec_fault(PROT_NONE));
printf("ok!\n");
printf("Testing exec on r--, should fault...");
FAIL_IF(check_exec_fault(PROT_READ));
printf("ok!\n");
printf("Testing exec on -w-, should fault...");
FAIL_IF(check_exec_fault(PROT_WRITE));
printf("ok!\n");
printf("Testing exec on rw-, should fault...");
FAIL_IF(check_exec_fault(PROT_READ | PROT_WRITE));
printf("ok!\n");
printf("Testing exec on --x, should succeed...");
FAIL_IF(check_exec_fault(PROT_EXEC));
printf("ok!\n");
printf("Testing exec on r-x, should succeed...");
FAIL_IF(check_exec_fault(PROT_READ | PROT_EXEC));
printf("ok!\n");
printf("Testing exec on -wx, should succeed...");
FAIL_IF(check_exec_fault(PROT_WRITE | PROT_EXEC));
printf("ok!\n");
printf("Testing exec on rwx, should succeed...");
FAIL_IF(check_exec_fault(PROT_READ | PROT_WRITE | PROT_EXEC));
printf("ok!\n");
/* Cleanup */
FAIL_IF(munmap((void *)insns, pgsize));
return 0;
}
int main(void)
{
return test_harness(test, "exec_prot");
}
| linux-master | tools/testing/selftests/powerpc/mm/exec_prot.c |
// SPDX-License-Identifier: GPL-2.0+
//
// Copyright 2019, Michael Ellerman, IBM Corp.
//
// Test that out-of-bounds reads/writes behave as expected.
#include <setjmp.h>
#include <stdbool.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/types.h>
#include <sys/wait.h>
#include <unistd.h>
#include "utils.h"
// Old distros (Ubuntu 16.04 at least) don't define this
#ifndef SEGV_BNDERR
#define SEGV_BNDERR 3
#endif
// 64-bit kernel is always here
#define PAGE_OFFSET (0xcul << 60)
static unsigned long kernel_virt_end;
static volatile int fault_code;
static volatile unsigned long fault_addr;
static jmp_buf setjmp_env;
static void segv_handler(int n, siginfo_t *info, void *ctxt_v)
{
fault_code = info->si_code;
fault_addr = (unsigned long)info->si_addr;
siglongjmp(setjmp_env, 1);
}
int bad_access(char *p, bool write)
{
char x = 0;
fault_code = 0;
fault_addr = 0;
if (sigsetjmp(setjmp_env, 1) == 0) {
if (write)
*p = 1;
else
x = *p;
printf("Bad - no SEGV! (%c)\n", x);
return 1;
}
// If we see MAPERR that means we took a page fault rather than an SLB
// miss. We only expect to take page faults for addresses within the
// valid kernel range.
FAIL_IF(fault_code == SEGV_MAPERR && \
(fault_addr < PAGE_OFFSET || fault_addr >= kernel_virt_end));
FAIL_IF(fault_code != SEGV_MAPERR && fault_code != SEGV_BNDERR);
return 0;
}
static int test(void)
{
unsigned long i, j, addr, region_shift, page_shift, page_size;
struct sigaction sig;
bool hash_mmu;
sig = (struct sigaction) {
.sa_sigaction = segv_handler,
.sa_flags = SA_SIGINFO,
};
FAIL_IF(sigaction(SIGSEGV, &sig, NULL) != 0);
FAIL_IF(using_hash_mmu(&hash_mmu));
page_size = sysconf(_SC_PAGESIZE);
if (page_size == (64 * 1024))
page_shift = 16;
else
page_shift = 12;
if (page_size == (64 * 1024) || !hash_mmu) {
region_shift = 52;
// We have 7 512T regions (4 kernel linear, vmalloc, io, vmemmap)
kernel_virt_end = PAGE_OFFSET + (7 * (512ul << 40));
} else if (page_size == (4 * 1024) && hash_mmu) {
region_shift = 46;
// We have 7 64T regions (4 kernel linear, vmalloc, io, vmemmap)
kernel_virt_end = PAGE_OFFSET + (7 * (64ul << 40));
} else
FAIL_IF(true);
printf("Using %s MMU, PAGE_SIZE = %dKB start address 0x%016lx\n",
hash_mmu ? "hash" : "radix",
(1 << page_shift) >> 10,
1ul << region_shift);
// This generates access patterns like:
// 0x0010000000000000
// 0x0010000000010000
// 0x0010000000020000
// ...
// 0x0014000000000000
// 0x0018000000000000
// 0x0020000000000000
// 0x0020000000010000
// 0x0020000000020000
// ...
// 0xf400000000000000
// 0xf800000000000000
for (i = 1; i <= ((0xful << 60) >> region_shift); i++) {
for (j = page_shift - 1; j < 60; j++) {
unsigned long base, delta;
base = i << region_shift;
delta = 1ul << j;
if (delta >= base)
break;
addr = (base | delta) & ~((1 << page_shift) - 1);
FAIL_IF(bad_access((char *)addr, false));
FAIL_IF(bad_access((char *)addr, true));
}
}
return 0;
}
int main(void)
{
test_harness_set_timeout(300);
return test_harness(test, "bad_accesses");
}
| linux-master | tools/testing/selftests/powerpc/mm/bad_accesses.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2016, Michael Ellerman, IBM Corp.
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/mman.h>
#include <unistd.h>
#include <asm/cputable.h>
#include "utils.h"
#define SIZE (64 * 1024)
int test_prot_sao(void)
{
char *p;
/*
* SAO was introduced in 2.06 and removed in 3.1. It's disabled in
* guests/LPARs by default, so also skip if we are running in a guest.
*/
SKIP_IF(!have_hwcap(PPC_FEATURE_ARCH_2_06) ||
have_hwcap2(PPC_FEATURE2_ARCH_3_1) ||
access("/proc/device-tree/rtas/ibm,hypertas-functions", F_OK) == 0);
/*
* Ensure we can ask for PROT_SAO.
* We can't really verify that it does the right thing, but at least we
* confirm the kernel will accept it.
*/
p = mmap(NULL, SIZE, PROT_READ | PROT_WRITE | PROT_SAO,
MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
FAIL_IF(p == MAP_FAILED);
/* Write to the mapping, to at least cause a fault */
memset(p, 0xaa, SIZE);
return 0;
}
int main(void)
{
return test_harness(test_prot_sao, "prot-sao");
}
| linux-master | tools/testing/selftests/powerpc/mm/prot_sao.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright 2017, Michael Ellerman, IBM Corp.
*/
#include <elf.h>
#include <errno.h>
#include <fcntl.h>
#include <link.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <sys/wait.h>
#include <unistd.h>
#include "utils.h"
#ifndef AT_L1I_CACHESIZE
#define AT_L1I_CACHESIZE 40
#define AT_L1I_CACHEGEOMETRY 41
#define AT_L1D_CACHESIZE 42
#define AT_L1D_CACHEGEOMETRY 43
#define AT_L2_CACHESIZE 44
#define AT_L2_CACHEGEOMETRY 45
#define AT_L3_CACHESIZE 46
#define AT_L3_CACHEGEOMETRY 47
#endif
static void print_size(const char *label, uint32_t val)
{
printf("%s cache size: %#10x %10dB %10dK\n", label, val, val, val / 1024);
}
static void print_geo(const char *label, uint32_t val)
{
uint16_t assoc;
printf("%s line size: %#10x ", label, val & 0xFFFF);
assoc = val >> 16;
if (assoc)
printf("%u-way", assoc);
else
printf("fully");
printf(" associative\n");
}
static int test_cache_shape()
{
static char buffer[4096];
ElfW(auxv_t) *p;
int found;
FAIL_IF(read_auxv(buffer, sizeof(buffer)));
found = 0;
p = find_auxv_entry(AT_L1I_CACHESIZE, buffer);
if (p) {
found++;
print_size("L1I ", (uint32_t)p->a_un.a_val);
}
p = find_auxv_entry(AT_L1I_CACHEGEOMETRY, buffer);
if (p) {
found++;
print_geo("L1I ", (uint32_t)p->a_un.a_val);
}
p = find_auxv_entry(AT_L1D_CACHESIZE, buffer);
if (p) {
found++;
print_size("L1D ", (uint32_t)p->a_un.a_val);
}
p = find_auxv_entry(AT_L1D_CACHEGEOMETRY, buffer);
if (p) {
found++;
print_geo("L1D ", (uint32_t)p->a_un.a_val);
}
p = find_auxv_entry(AT_L2_CACHESIZE, buffer);
if (p) {
found++;
print_size("L2 ", (uint32_t)p->a_un.a_val);
}
p = find_auxv_entry(AT_L2_CACHEGEOMETRY, buffer);
if (p) {
found++;
print_geo("L2 ", (uint32_t)p->a_un.a_val);
}
p = find_auxv_entry(AT_L3_CACHESIZE, buffer);
if (p) {
found++;
print_size("L3 ", (uint32_t)p->a_un.a_val);
}
p = find_auxv_entry(AT_L3_CACHEGEOMETRY, buffer);
if (p) {
found++;
print_geo("L3 ", (uint32_t)p->a_un.a_val);
}
/* If we found none we're probably on a system where they don't exist */
SKIP_IF(found == 0);
/* But if we found any, we expect to find them all */
FAIL_IF(found != 8);
return 0;
}
int main(void)
{
return test_harness(test_cache_shape, "cache_shape");
}
| linux-master | tools/testing/selftests/powerpc/cache_shape/cache_shape.c |
// SPDX-License-Identifier: GPL-2.0
#include <malloc.h>
#include <stdlib.h>
#include <string.h>
#include <sys/mman.h>
#include <time.h>
#include "utils.h"
#define SIZE 256
#define ITERATIONS 10000
#define LARGE_SIZE (5 * 1024)
#define LARGE_ITERATIONS 1000
#define LARGE_MAX_OFFSET 32
#define LARGE_SIZE_START 4096
/* This is big enough to fit LARGE_SIZE and works on 4K & 64K kernels */
#define MAP_SIZE (64 * 1024)
#define MAX_OFFSET_DIFF_S1_S2 48
int vmx_count;
int enter_vmx_ops(void)
{
vmx_count++;
return 1;
}
void exit_vmx_ops(void)
{
vmx_count--;
}
int test_memcmp(const void *s1, const void *s2, size_t n);
/* test all offsets and lengths */
static void test_one(char *s1, char *s2, unsigned long max_offset,
unsigned long size_start, unsigned long max_size)
{
unsigned long offset, size;
for (offset = 0; offset < max_offset; offset++) {
for (size = size_start; size < (max_size - offset); size++) {
int x, y;
unsigned long i;
y = memcmp(s1+offset, s2+offset, size);
x = test_memcmp(s1+offset, s2+offset, size);
if (((x ^ y) < 0) && /* Trick to compare sign */
((x | y) != 0)) { /* check for zero */
printf("memcmp returned %d, should have returned %d (offset %ld size %ld)\n", x, y, offset, size);
for (i = offset; i < offset+size; i++)
printf("%02x ", s1[i]);
printf("\n");
for (i = offset; i < offset+size; i++)
printf("%02x ", s2[i]);
printf("\n");
abort();
}
if (vmx_count != 0) {
printf("vmx enter/exit not paired.(offset:%ld size:%ld s1:%p s2:%p vc:%d\n",
offset, size, s1, s2, vmx_count);
printf("\n");
abort();
}
}
}
}
static int testcase(bool islarge)
{
unsigned long i, comp_size, alloc_size;
char *p, *s1, *s2;
int iterations;
comp_size = (islarge ? LARGE_SIZE : SIZE);
alloc_size = comp_size + MAX_OFFSET_DIFF_S1_S2;
iterations = islarge ? LARGE_ITERATIONS : ITERATIONS;
p = mmap(NULL, 4 * MAP_SIZE, PROT_READ | PROT_WRITE,
MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
FAIL_IF(p == MAP_FAILED);
/* Put s1/s2 at the end of a page */
s1 = p + MAP_SIZE - alloc_size;
s2 = p + 3 * MAP_SIZE - alloc_size;
/* And unmap the subsequent page to force a fault if we overread */
munmap(p + MAP_SIZE, MAP_SIZE);
munmap(p + 3 * MAP_SIZE, MAP_SIZE);
srandom(time(0));
for (i = 0; i < iterations; i++) {
unsigned long j;
unsigned long change;
char *rand_s1 = s1;
char *rand_s2 = s2;
for (j = 0; j < alloc_size; j++)
s1[j] = random();
rand_s1 += random() % MAX_OFFSET_DIFF_S1_S2;
rand_s2 += random() % MAX_OFFSET_DIFF_S1_S2;
memcpy(rand_s2, rand_s1, comp_size);
/* change one byte */
change = random() % comp_size;
rand_s2[change] = random() & 0xff;
if (islarge)
test_one(rand_s1, rand_s2, LARGE_MAX_OFFSET,
LARGE_SIZE_START, comp_size);
else
test_one(rand_s1, rand_s2, SIZE, 0, comp_size);
}
srandom(time(0));
for (i = 0; i < iterations; i++) {
unsigned long j;
unsigned long change;
char *rand_s1 = s1;
char *rand_s2 = s2;
for (j = 0; j < alloc_size; j++)
s1[j] = random();
rand_s1 += random() % MAX_OFFSET_DIFF_S1_S2;
rand_s2 += random() % MAX_OFFSET_DIFF_S1_S2;
memcpy(rand_s2, rand_s1, comp_size);
/* change multiple bytes, 1/8 of total */
for (j = 0; j < comp_size / 8; j++) {
change = random() % comp_size;
s2[change] = random() & 0xff;
}
if (islarge)
test_one(rand_s1, rand_s2, LARGE_MAX_OFFSET,
LARGE_SIZE_START, comp_size);
else
test_one(rand_s1, rand_s2, SIZE, 0, comp_size);
}
return 0;
}
static int testcases(void)
{
#ifdef __powerpc64__
// vcmpequd used in memcmp_64.S is v2.07
SKIP_IF(!have_hwcap2(PPC_FEATURE2_ARCH_2_07));
#endif
testcase(0);
testcase(1);
return 0;
}
int main(void)
{
test_harness_set_timeout(300);
return test_harness(testcases, "memcmp");
}
| linux-master | tools/testing/selftests/powerpc/stringloops/memcmp.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copied from linux/lib/string.c
*
* Copyright (C) 1991, 1992 Linus Torvalds
*/
#include <stddef.h>
/**
* strlen - Find the length of a string
* @s: The string to be sized
*/
size_t test_strlen(const char *s)
{
const char *sc;
for (sc = s; *sc != '\0'; ++sc)
/* nothing */;
return sc - s;
}
| linux-master | tools/testing/selftests/powerpc/stringloops/string.c |
// SPDX-License-Identifier: GPL-2.0
#include <malloc.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include "utils.h"
#define SIZE 256
#define ITERATIONS 1000
#define ITERATIONS_BENCH 100000
int test_strlen(const void *s);
/* test all offsets and lengths */
static void test_one(char *s)
{
unsigned long offset;
for (offset = 0; offset < SIZE; offset++) {
int x, y;
unsigned long i;
y = strlen(s + offset);
x = test_strlen(s + offset);
if (x != y) {
printf("strlen() returned %d, should have returned %d (%p offset %ld)\n", x, y, s, offset);
for (i = offset; i < SIZE; i++)
printf("%02x ", s[i]);
printf("\n");
}
}
}
static void bench_test(char *s)
{
struct timespec ts_start, ts_end;
int i;
clock_gettime(CLOCK_MONOTONIC, &ts_start);
for (i = 0; i < ITERATIONS_BENCH; i++)
test_strlen(s);
clock_gettime(CLOCK_MONOTONIC, &ts_end);
printf("len %3.3d : time = %.6f\n", test_strlen(s), ts_end.tv_sec - ts_start.tv_sec + (ts_end.tv_nsec - ts_start.tv_nsec) / 1e9);
}
static int testcase(void)
{
char *s;
unsigned long i;
s = memalign(128, SIZE);
if (!s) {
perror("memalign");
exit(1);
}
srandom(1);
memset(s, 0, SIZE);
for (i = 0; i < SIZE; i++) {
char c;
do {
c = random() & 0x7f;
} while (!c);
s[i] = c;
test_one(s);
}
for (i = 0; i < ITERATIONS; i++) {
unsigned long j;
for (j = 0; j < SIZE; j++) {
char c;
do {
c = random() & 0x7f;
} while (!c);
s[j] = c;
}
for (j = 0; j < sizeof(long); j++) {
s[SIZE - 1 - j] = 0;
test_one(s);
}
}
for (i = 0; i < SIZE; i++) {
char c;
do {
c = random() & 0x7f;
} while (!c);
s[i] = c;
}
bench_test(s);
s[16] = 0;
bench_test(s);
s[8] = 0;
bench_test(s);
s[4] = 0;
bench_test(s);
s[3] = 0;
bench_test(s);
s[2] = 0;
bench_test(s);
s[1] = 0;
bench_test(s);
return 0;
}
int main(void)
{
return test_harness(testcase, "strlen");
}
| linux-master | tools/testing/selftests/powerpc/stringloops/strlen.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* PAPR Energy attributes sniff test
* This checks if the papr folders and contents are populated relating to
* the energy and frequency attributes
*
* Copyright 2022, Pratik Rajesh Sampat, IBM Corp.
*/
#include <errno.h>
#include <stdio.h>
#include <string.h>
#include <dirent.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <unistd.h>
#include <stdlib.h>
#include "utils.h"
enum energy_freq_attrs {
POWER_PERFORMANCE_MODE = 1,
IDLE_POWER_SAVER_STATUS = 2,
MIN_FREQ = 3,
STAT_FREQ = 4,
MAX_FREQ = 6,
PROC_FOLDING_STATUS = 8
};
enum type {
INVALID,
STR_VAL,
NUM_VAL
};
static int value_type(int id)
{
int val_type;
switch (id) {
case POWER_PERFORMANCE_MODE:
case IDLE_POWER_SAVER_STATUS:
val_type = STR_VAL;
break;
case MIN_FREQ:
case STAT_FREQ:
case MAX_FREQ:
case PROC_FOLDING_STATUS:
val_type = NUM_VAL;
break;
default:
val_type = INVALID;
}
return val_type;
}
static int verify_energy_info(void)
{
const char *path = "/sys/firmware/papr/energy_scale_info";
struct dirent *entry;
struct stat s;
DIR *dirp;
errno = 0;
if (stat(path, &s)) {
SKIP_IF(errno == ENOENT);
FAIL_IF(errno);
}
FAIL_IF(!S_ISDIR(s.st_mode));
dirp = opendir(path);
while ((entry = readdir(dirp)) != NULL) {
char file_name[64];
int id, attr_type;
FILE *f;
if (strcmp(entry->d_name, ".") == 0 ||
strcmp(entry->d_name, "..") == 0)
continue;
id = atoi(entry->d_name);
attr_type = value_type(id);
FAIL_IF(attr_type == INVALID);
/* Check if the files exist and have data in them */
sprintf(file_name, "%s/%d/desc", path, id);
f = fopen(file_name, "r");
FAIL_IF(!f);
FAIL_IF(fgetc(f) == EOF);
sprintf(file_name, "%s/%d/value", path, id);
f = fopen(file_name, "r");
FAIL_IF(!f);
FAIL_IF(fgetc(f) == EOF);
if (attr_type == STR_VAL) {
sprintf(file_name, "%s/%d/value_desc", path, id);
f = fopen(file_name, "r");
FAIL_IF(!f);
FAIL_IF(fgetc(f) == EOF);
}
}
return 0;
}
int main(void)
{
return test_harness(verify_energy_info, "papr_attributes");
}
| linux-master | tools/testing/selftests/powerpc/papr_attributes/attr_test.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Test that we can't sigreturn to kernel addresses, or to kernel mode.
*/
#define _GNU_SOURCE
#include <stdio.h>
#include <signal.h>
#include <stdlib.h>
#include <sys/types.h>
#include <sys/wait.h>
#include <unistd.h>
#include "utils.h"
#define MSR_PR (1ul << 14)
static volatile unsigned long long sigreturn_addr;
static volatile unsigned long long sigreturn_msr_mask;
static void sigusr1_handler(int signo, siginfo_t *si, void *uc_ptr)
{
ucontext_t *uc = (ucontext_t *)uc_ptr;
if (sigreturn_addr)
UCONTEXT_NIA(uc) = sigreturn_addr;
if (sigreturn_msr_mask)
UCONTEXT_MSR(uc) &= sigreturn_msr_mask;
}
static pid_t fork_child(void)
{
pid_t pid;
pid = fork();
if (pid == 0) {
raise(SIGUSR1);
exit(0);
}
return pid;
}
static int expect_segv(pid_t pid)
{
int child_ret;
waitpid(pid, &child_ret, 0);
FAIL_IF(WIFEXITED(child_ret));
FAIL_IF(!WIFSIGNALED(child_ret));
FAIL_IF(WTERMSIG(child_ret) != 11);
return 0;
}
int test_sigreturn_kernel(void)
{
struct sigaction act;
int child_ret, i;
pid_t pid;
act.sa_sigaction = sigusr1_handler;
act.sa_flags = SA_SIGINFO;
sigemptyset(&act.sa_mask);
FAIL_IF(sigaction(SIGUSR1, &act, NULL));
for (i = 0; i < 2; i++) {
// Return to kernel
sigreturn_addr = 0xcull << 60;
pid = fork_child();
expect_segv(pid);
// Return to kernel virtual
sigreturn_addr = 0xc008ull << 48;
pid = fork_child();
expect_segv(pid);
// Return out of range
sigreturn_addr = 0xc010ull << 48;
pid = fork_child();
expect_segv(pid);
// Return to no-man's land, just below PAGE_OFFSET
sigreturn_addr = (0xcull << 60) - (64 * 1024);
pid = fork_child();
expect_segv(pid);
// Return to no-man's land, above TASK_SIZE_4PB
sigreturn_addr = 0x1ull << 52;
pid = fork_child();
expect_segv(pid);
// Return to 0xd space
sigreturn_addr = 0xdull << 60;
pid = fork_child();
expect_segv(pid);
// Return to 0xe space
sigreturn_addr = 0xeull << 60;
pid = fork_child();
expect_segv(pid);
// Return to 0xf space
sigreturn_addr = 0xfull << 60;
pid = fork_child();
expect_segv(pid);
// Attempt to set PR=0 for 2nd loop (should be blocked by kernel)
sigreturn_msr_mask = ~MSR_PR;
}
printf("All children killed as expected\n");
// Don't change address, just MSR, should return to user as normal
sigreturn_addr = 0;
sigreturn_msr_mask = ~MSR_PR;
pid = fork_child();
waitpid(pid, &child_ret, 0);
FAIL_IF(!WIFEXITED(child_ret));
FAIL_IF(WIFSIGNALED(child_ret));
FAIL_IF(WEXITSTATUS(child_ret) != 0);
return 0;
}
int main(void)
{
return test_harness(test_sigreturn_kernel, "sigreturn_kernel");
}
| linux-master | tools/testing/selftests/powerpc/signal/sigreturn_kernel.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright 2016, Cyril Bur, IBM Corp.
*
* Sending one self a signal should always get delivered.
*/
#include <errno.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <signal.h>
#include <unistd.h>
#include <altivec.h>
#include "utils.h"
#include "../tm/tm.h"
#define MAX_ATTEMPT 500000
#define TIMEOUT 10
extern long tm_signal_self(pid_t pid, int sig, long *ret);
static sig_atomic_t signaled;
static sig_atomic_t fail;
static void signal_handler(int sig)
{
if (tcheck_active()) {
fail = 2;
return;
}
if (sig == SIGUSR1)
signaled = 1;
else
fail = 1;
}
static int test_signal_tm()
{
int i;
struct sigaction act;
act.sa_handler = signal_handler;
act.sa_flags = 0;
sigemptyset(&act.sa_mask);
if (sigaction(SIGUSR1, &act, NULL) < 0) {
perror("sigaction SIGUSR1");
exit(1);
}
if (sigaction(SIGALRM, &act, NULL) < 0) {
perror("sigaction SIGALRM");
exit(1);
}
SKIP_IF(!have_htm());
SKIP_IF(htm_is_synthetic());
for (i = 0; i < MAX_ATTEMPT; i++) {
/*
* If anything bad happens in ASM and we fail to set ret
* because *handwave* TM this will cause failure
*/
long ret = 0xdead;
long rc = 0xbeef;
alarm(0); /* Disable any pending */
signaled = 0;
alarm(TIMEOUT);
FAIL_IF(tcheck_transactional());
rc = tm_signal_self(getpid(), SIGUSR1, &ret);
if (ret == 0xdead)
/*
* This basically means the transaction aborted before we
* even got to the suspend... this is crazy but it
* happens.
* Yes this also means we might never make forward
* progress... the alarm() will trip eventually...
*/
continue;
if (rc || ret) {
/* Ret is actually an errno */
printf("TEXASR 0x%016lx, TFIAR 0x%016lx\n",
__builtin_get_texasr(), __builtin_get_tfiar());
fprintf(stderr, "(%d) Fail reason: %d rc=0x%lx ret=0x%lx\n",
i, fail, rc, ret);
FAIL_IF(ret);
}
while(!signaled && !fail)
asm volatile("": : :"memory");
if (!signaled) {
fprintf(stderr, "(%d) Fail reason: %d rc=0x%lx ret=0x%lx\n",
i, fail, rc, ret);
FAIL_IF(fail); /* For the line number */
}
}
return 0;
}
int main(void)
{
return test_harness(test_signal_tm, "signal_tm");
}
| linux-master | tools/testing/selftests/powerpc/signal/signal_tm.c |
Subsets and Splits