python_code
stringlengths 0
1.8M
| repo_name
stringclasses 7
values | file_path
stringlengths 5
99
|
---|---|---|
#include "core_reloc_types.h"
void f(struct core_reloc_nesting___err_missing_container x) {}
| linux-master | tools/testing/selftests/bpf/progs/btf__core_reloc_nesting___err_missing_container.c |
// SPDX-License-Identifier: GPL-2.0
#include <vmlinux.h>
#include "xdp_metadata.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_endian.h>
extern int bpf_xdp_metadata_rx_hash(const struct xdp_md *ctx, __u32 *hash,
enum xdp_rss_hash_type *rss_type) __ksym;
int called;
SEC("freplace/rx")
int freplace_rx(struct xdp_md *ctx)
{
enum xdp_rss_hash_type type = 0;
u32 hash = 0;
/* Call _any_ metadata function to make sure we don't crash. */
bpf_xdp_metadata_rx_hash(ctx, &hash, &type);
called++;
return XDP_PASS;
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/xdp_metadata2.c |
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2019 Facebook
#include <linux/sched.h>
#include <linux/ptrace.h>
#include <stdint.h>
#include <stddef.h>
#include <stdbool.h>
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
char _license[] SEC("license") = "GPL";
SEC("raw_tracepoint/consume_skb")
int while_true(struct pt_regs *ctx)
{
volatile __u64 i = 0, sum = 0;
do {
i++;
sum += PT_REGS_RC(ctx);
} while (i < 0x100000000ULL);
return sum;
}
| linux-master | tools/testing/selftests/bpf/progs/loop3.c |
// SPDX-License-Identifier: GPL-2.0
/* Converted from tools/testing/selftests/bpf/verifier/value_illegal_alu.c */
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include "bpf_misc.h"
#define MAX_ENTRIES 11
struct test_val {
unsigned int index;
int foo[MAX_ENTRIES];
};
struct {
__uint(type, BPF_MAP_TYPE_HASH);
__uint(max_entries, 1);
__type(key, long long);
__type(value, struct test_val);
} map_hash_48b SEC(".maps");
SEC("socket")
__description("map element value illegal alu op, 1")
__failure __msg("R0 bitwise operator &= on pointer")
__failure_unpriv
__naked void value_illegal_alu_op_1(void)
{
asm volatile (" \
r2 = r10; \
r2 += -8; \
r1 = 0; \
*(u64*)(r2 + 0) = r1; \
r1 = %[map_hash_48b] ll; \
call %[bpf_map_lookup_elem]; \
if r0 == 0 goto l0_%=; \
r0 &= 8; \
r1 = 22; \
*(u64*)(r0 + 0) = r1; \
l0_%=: exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm_addr(map_hash_48b)
: __clobber_all);
}
SEC("socket")
__description("map element value illegal alu op, 2")
__failure __msg("R0 32-bit pointer arithmetic prohibited")
__failure_unpriv
__naked void value_illegal_alu_op_2(void)
{
asm volatile (" \
r2 = r10; \
r2 += -8; \
r1 = 0; \
*(u64*)(r2 + 0) = r1; \
r1 = %[map_hash_48b] ll; \
call %[bpf_map_lookup_elem]; \
if r0 == 0 goto l0_%=; \
w0 += 0; \
r1 = 22; \
*(u64*)(r0 + 0) = r1; \
l0_%=: exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm_addr(map_hash_48b)
: __clobber_all);
}
SEC("socket")
__description("map element value illegal alu op, 3")
__failure __msg("R0 pointer arithmetic with /= operator")
__failure_unpriv
__naked void value_illegal_alu_op_3(void)
{
asm volatile (" \
r2 = r10; \
r2 += -8; \
r1 = 0; \
*(u64*)(r2 + 0) = r1; \
r1 = %[map_hash_48b] ll; \
call %[bpf_map_lookup_elem]; \
if r0 == 0 goto l0_%=; \
r0 /= 42; \
r1 = 22; \
*(u64*)(r0 + 0) = r1; \
l0_%=: exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm_addr(map_hash_48b)
: __clobber_all);
}
SEC("socket")
__description("map element value illegal alu op, 4")
__failure __msg("invalid mem access 'scalar'")
__failure_unpriv __msg_unpriv("R0 pointer arithmetic prohibited")
__flag(BPF_F_ANY_ALIGNMENT)
__naked void value_illegal_alu_op_4(void)
{
asm volatile (" \
r2 = r10; \
r2 += -8; \
r1 = 0; \
*(u64*)(r2 + 0) = r1; \
r1 = %[map_hash_48b] ll; \
call %[bpf_map_lookup_elem]; \
if r0 == 0 goto l0_%=; \
r0 = be64 r0; \
r1 = 22; \
*(u64*)(r0 + 0) = r1; \
l0_%=: exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm_addr(map_hash_48b)
: __clobber_all);
}
SEC("socket")
__description("map element value illegal alu op, 5")
__failure __msg("R0 invalid mem access 'scalar'")
__msg_unpriv("leaking pointer from stack off -8")
__flag(BPF_F_ANY_ALIGNMENT)
__naked void value_illegal_alu_op_5(void)
{
asm volatile (" \
r2 = r10; \
r2 += -8; \
r1 = 0; \
*(u64*)(r2 + 0) = r1; \
r1 = %[map_hash_48b] ll; \
call %[bpf_map_lookup_elem]; \
if r0 == 0 goto l0_%=; \
r3 = 4096; \
r2 = r10; \
r2 += -8; \
*(u64*)(r2 + 0) = r0; \
lock *(u64 *)(r2 + 0) += r3; \
r0 = *(u64*)(r2 + 0); \
r1 = 22; \
*(u64*)(r0 + 0) = r1; \
l0_%=: exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm_addr(map_hash_48b)
: __clobber_all);
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/verifier_value_illegal_alu.c |
// SPDX-License-Identifier: GPL-2.0
#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_endian.h>
#include "bpf_tracing_net.h"
__be16 serv_port = 0;
int bpf_sock_destroy(struct sock_common *sk) __ksym;
struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
__uint(max_entries, 1);
__type(key, __u32);
__type(value, __u64);
} tcp_conn_sockets SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
__uint(max_entries, 1);
__type(key, __u32);
__type(value, __u64);
} udp_conn_sockets SEC(".maps");
SEC("cgroup/connect6")
int sock_connect(struct bpf_sock_addr *ctx)
{
__u64 sock_cookie = 0;
int key = 0;
__u32 keyc = 0;
if (ctx->family != AF_INET6 || ctx->user_family != AF_INET6)
return 1;
sock_cookie = bpf_get_socket_cookie(ctx);
if (ctx->protocol == IPPROTO_TCP)
bpf_map_update_elem(&tcp_conn_sockets, &key, &sock_cookie, 0);
else if (ctx->protocol == IPPROTO_UDP)
bpf_map_update_elem(&udp_conn_sockets, &keyc, &sock_cookie, 0);
else
return 1;
return 1;
}
SEC("iter/tcp")
int iter_tcp6_client(struct bpf_iter__tcp *ctx)
{
struct sock_common *sk_common = ctx->sk_common;
__u64 sock_cookie = 0;
__u64 *val;
int key = 0;
if (!sk_common)
return 0;
if (sk_common->skc_family != AF_INET6)
return 0;
sock_cookie = bpf_get_socket_cookie(sk_common);
val = bpf_map_lookup_elem(&tcp_conn_sockets, &key);
if (!val)
return 0;
/* Destroy connected client sockets. */
if (sock_cookie == *val)
bpf_sock_destroy(sk_common);
return 0;
}
SEC("iter/tcp")
int iter_tcp6_server(struct bpf_iter__tcp *ctx)
{
struct sock_common *sk_common = ctx->sk_common;
const struct inet_connection_sock *icsk;
const struct inet_sock *inet;
struct tcp6_sock *tcp_sk;
__be16 srcp;
if (!sk_common)
return 0;
if (sk_common->skc_family != AF_INET6)
return 0;
tcp_sk = bpf_skc_to_tcp6_sock(sk_common);
if (!tcp_sk)
return 0;
icsk = &tcp_sk->tcp.inet_conn;
inet = &icsk->icsk_inet;
srcp = inet->inet_sport;
/* Destroy server sockets. */
if (srcp == serv_port)
bpf_sock_destroy(sk_common);
return 0;
}
SEC("iter/udp")
int iter_udp6_client(struct bpf_iter__udp *ctx)
{
struct udp_sock *udp_sk = ctx->udp_sk;
struct sock *sk = (struct sock *) udp_sk;
__u64 sock_cookie = 0, *val;
int key = 0;
if (!sk)
return 0;
sock_cookie = bpf_get_socket_cookie(sk);
val = bpf_map_lookup_elem(&udp_conn_sockets, &key);
if (!val)
return 0;
/* Destroy connected client sockets. */
if (sock_cookie == *val)
bpf_sock_destroy((struct sock_common *)sk);
return 0;
}
SEC("iter/udp")
int iter_udp6_server(struct bpf_iter__udp *ctx)
{
struct udp_sock *udp_sk = ctx->udp_sk;
struct sock *sk = (struct sock *) udp_sk;
struct inet_sock *inet;
__be16 srcp;
if (!sk)
return 0;
inet = &udp_sk->inet;
srcp = inet->inet_sport;
if (srcp == serv_port)
bpf_sock_destroy((struct sock_common *)sk);
return 0;
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/sock_destroy_prog.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
#include <vmlinux.h>
#include <bpf/bpf_tracing.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_core_read.h>
#include "bpf_experimental.h"
struct node_data {
int key;
int data;
struct bpf_rb_node node;
};
struct node_data2 {
int key;
struct bpf_rb_node node;
int data;
};
static bool less2(struct bpf_rb_node *a, const struct bpf_rb_node *b)
{
struct node_data2 *node_a;
struct node_data2 *node_b;
node_a = container_of(a, struct node_data2, node);
node_b = container_of(b, struct node_data2, node);
return node_a->key < node_b->key;
}
#define private(name) SEC(".data." #name) __hidden __attribute__((aligned(8)))
private(A) struct bpf_spin_lock glock;
private(A) struct bpf_rb_root groot __contains(node_data, node);
SEC("tc")
long rbtree_api_add__add_wrong_type(void *ctx)
{
struct node_data2 *n;
n = bpf_obj_new(typeof(*n));
if (!n)
return 1;
bpf_spin_lock(&glock);
bpf_rbtree_add(&groot, &n->node, less2);
bpf_spin_unlock(&glock);
return 0;
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/rbtree_btf_fail__add_wrong_type.c |
#include "core_reloc_types.h"
void f(struct core_reloc_type_based___fn_wrong_args x) {}
| linux-master | tools/testing/selftests/bpf/progs/btf__core_reloc_type_based___fn_wrong_args.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2021 Facebook */
#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
#include "bpf_misc.h"
char _license[] SEC("license") = "GPL";
u32 nr_loops;
long hits;
static int empty_callback(__u32 index, void *data)
{
return 0;
}
SEC("fentry/" SYS_PREFIX "sys_getpgid")
int benchmark(void *ctx)
{
for (int i = 0; i < 1000; i++) {
bpf_loop(nr_loops, empty_callback, NULL, 0);
__sync_add_and_fetch(&hits, nr_loops);
}
return 0;
}
| linux-master | tools/testing/selftests/bpf/progs/bpf_loop_bench.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2021 Facebook */
#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
extern int LINUX_KERNEL_VERSION __kconfig;
/* when an extern is defined as both strong and weak, resulting symbol will be strong */
extern bool CONFIG_BPF_SYSCALL __kconfig;
extern const void __start_BTF __ksym;
int input_bss2;
int input_data2 = 2;
const volatile int input_rodata2 = 22;
int input_bss_weak __weak;
/* these two weak variables should lose */
int input_data_weak __weak = 20;
const volatile int input_rodata_weak __weak = 200;
extern int input_bss1;
extern int input_data1;
extern const int input_rodata1;
int output_bss2;
int output_data2;
int output_rodata2;
int output_sink2;
static __noinline int get_data_res(void)
{
/* just make sure all the relocations work against .text as well */
return input_data1 + input_data2 + input_data_weak;
}
SEC("raw_tp/sys_enter")
int BPF_PROG(handler2)
{
output_bss2 = input_bss1 + input_bss2 + input_bss_weak;
output_data2 = get_data_res();
output_rodata2 = input_rodata1 + input_rodata2 + input_rodata_weak;
/* make sure we actually use above special externs, otherwise compiler
* will optimize them out
*/
output_sink2 = LINUX_KERNEL_VERSION
+ CONFIG_BPF_SYSCALL
+ (long)&__start_BTF;
return 0;
}
char LICENSE[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/linked_vars2.c |
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2017 Facebook
*/
#include <stddef.h>
#include <string.h>
#include <linux/bpf.h>
#include <linux/if_ether.h>
#include <linux/if_packet.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <linux/in.h>
#include <linux/tcp.h>
#include <linux/pkt_cls.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_endian.h>
#include "bpf_misc.h"
/* llvm will optimize both subprograms into exactly the same BPF assembly
*
* Disassembly of section .text:
*
* 0000000000000000 test_pkt_access_subprog1:
* ; return skb->len * 2;
* 0: 61 10 00 00 00 00 00 00 r0 = *(u32 *)(r1 + 0)
* 1: 64 00 00 00 01 00 00 00 w0 <<= 1
* 2: 95 00 00 00 00 00 00 00 exit
*
* 0000000000000018 test_pkt_access_subprog2:
* ; return skb->len * val;
* 3: 61 10 00 00 00 00 00 00 r0 = *(u32 *)(r1 + 0)
* 4: 64 00 00 00 01 00 00 00 w0 <<= 1
* 5: 95 00 00 00 00 00 00 00 exit
*
* Which makes it an interesting test for BTF-enabled verifier.
*/
static __attribute__ ((noinline))
int test_pkt_access_subprog1(volatile struct __sk_buff *skb)
{
return skb->len * 2;
}
static __attribute__ ((noinline))
int test_pkt_access_subprog2(int val, volatile struct __sk_buff *skb)
{
return skb->len * val;
}
#define MAX_STACK (512 - 2 * 32)
__attribute__ ((noinline))
int get_skb_len(struct __sk_buff *skb)
{
volatile char buf[MAX_STACK] = {};
__sink(buf[MAX_STACK - 1]);
return skb->len;
}
__attribute__ ((noinline))
int get_constant(long val)
{
return val - 122;
}
int get_skb_ifindex(int, struct __sk_buff *skb, int);
__attribute__ ((noinline))
int test_pkt_access_subprog3(int val, struct __sk_buff *skb)
{
return get_skb_len(skb) * get_skb_ifindex(val, skb, get_constant(123));
}
__attribute__ ((noinline))
int get_skb_ifindex(int val, struct __sk_buff *skb, int var)
{
volatile char buf[MAX_STACK] = {};
__sink(buf[MAX_STACK - 1]);
return skb->ifindex * val * var;
}
__attribute__ ((noinline))
int test_pkt_write_access_subprog(struct __sk_buff *skb, __u32 off)
{
void *data = (void *)(long)skb->data;
void *data_end = (void *)(long)skb->data_end;
struct tcphdr *tcp = NULL;
if (off > sizeof(struct ethhdr) + sizeof(struct ipv6hdr))
return -1;
tcp = data + off;
if (tcp + 1 > data_end)
return -1;
/* make modification to the packet data */
tcp->check++;
return 0;
}
SEC("tc")
int test_pkt_access(struct __sk_buff *skb)
{
void *data_end = (void *)(long)skb->data_end;
void *data = (void *)(long)skb->data;
struct ethhdr *eth = (struct ethhdr *)(data);
struct tcphdr *tcp = NULL;
__u8 proto = 255;
__u64 ihl_len;
if (eth + 1 > data_end)
return TC_ACT_SHOT;
if (eth->h_proto == bpf_htons(ETH_P_IP)) {
struct iphdr *iph = (struct iphdr *)(eth + 1);
if (iph + 1 > data_end)
return TC_ACT_SHOT;
ihl_len = iph->ihl * 4;
proto = iph->protocol;
tcp = (struct tcphdr *)((void *)(iph) + ihl_len);
} else if (eth->h_proto == bpf_htons(ETH_P_IPV6)) {
struct ipv6hdr *ip6h = (struct ipv6hdr *)(eth + 1);
if (ip6h + 1 > data_end)
return TC_ACT_SHOT;
ihl_len = sizeof(*ip6h);
proto = ip6h->nexthdr;
tcp = (struct tcphdr *)((void *)(ip6h) + ihl_len);
}
if (test_pkt_access_subprog1(skb) != skb->len * 2)
return TC_ACT_SHOT;
if (test_pkt_access_subprog2(2, skb) != skb->len * 2)
return TC_ACT_SHOT;
if (test_pkt_access_subprog3(3, skb) != skb->len * 3 * skb->ifindex)
return TC_ACT_SHOT;
if (tcp) {
if (test_pkt_write_access_subprog(skb, (void *)tcp - data))
return TC_ACT_SHOT;
if (((void *)(tcp) + 20) > data_end || proto != 6)
return TC_ACT_SHOT;
barrier(); /* to force ordering of checks */
if (((void *)(tcp) + 18) > data_end)
return TC_ACT_SHOT;
if (tcp->urg_ptr == 123)
return TC_ACT_OK;
}
return TC_ACT_UNSPEC;
}
| linux-master | tools/testing/selftests/bpf/progs/test_pkt_access.c |
// SPDX-License-Identifier: GPL-2.0
/* Converted from tools/testing/selftests/bpf/verifier/unpriv.c */
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include "../../../include/linux/filter.h"
#include "bpf_misc.h"
#define BPF_SK_LOOKUP(func) \
/* struct bpf_sock_tuple tuple = {} */ \
"r2 = 0;" \
"*(u32*)(r10 - 8) = r2;" \
"*(u64*)(r10 - 16) = r2;" \
"*(u64*)(r10 - 24) = r2;" \
"*(u64*)(r10 - 32) = r2;" \
"*(u64*)(r10 - 40) = r2;" \
"*(u64*)(r10 - 48) = r2;" \
/* sk = func(ctx, &tuple, sizeof tuple, 0, 0) */ \
"r2 = r10;" \
"r2 += -48;" \
"r3 = %[sizeof_bpf_sock_tuple];"\
"r4 = 0;" \
"r5 = 0;" \
"call %[" #func "];"
struct {
__uint(type, BPF_MAP_TYPE_HASH);
__uint(max_entries, 1);
__type(key, long long);
__type(value, long long);
} map_hash_8b SEC(".maps");
void dummy_prog_42_socket(void);
void dummy_prog_24_socket(void);
void dummy_prog_loop1_socket(void);
struct {
__uint(type, BPF_MAP_TYPE_PROG_ARRAY);
__uint(max_entries, 4);
__uint(key_size, sizeof(int));
__array(values, void (void));
} map_prog1_socket SEC(".maps") = {
.values = {
[0] = (void *)&dummy_prog_42_socket,
[1] = (void *)&dummy_prog_loop1_socket,
[2] = (void *)&dummy_prog_24_socket,
},
};
SEC("socket")
__auxiliary __auxiliary_unpriv
__naked void dummy_prog_42_socket(void)
{
asm volatile ("r0 = 42; exit;");
}
SEC("socket")
__auxiliary __auxiliary_unpriv
__naked void dummy_prog_24_socket(void)
{
asm volatile ("r0 = 24; exit;");
}
SEC("socket")
__auxiliary __auxiliary_unpriv
__naked void dummy_prog_loop1_socket(void)
{
asm volatile (" \
r3 = 1; \
r2 = %[map_prog1_socket] ll; \
call %[bpf_tail_call]; \
r0 = 41; \
exit; \
" :
: __imm(bpf_tail_call),
__imm_addr(map_prog1_socket)
: __clobber_all);
}
SEC("socket")
__description("unpriv: return pointer")
__success __failure_unpriv __msg_unpriv("R0 leaks addr")
__retval(POINTER_VALUE)
__naked void unpriv_return_pointer(void)
{
asm volatile (" \
r0 = r10; \
exit; \
" ::: __clobber_all);
}
SEC("socket")
__description("unpriv: add const to pointer")
__success __success_unpriv __retval(0)
__naked void unpriv_add_const_to_pointer(void)
{
asm volatile (" \
r1 += 8; \
r0 = 0; \
exit; \
" ::: __clobber_all);
}
SEC("socket")
__description("unpriv: add pointer to pointer")
__failure __msg("R1 pointer += pointer")
__failure_unpriv
__naked void unpriv_add_pointer_to_pointer(void)
{
asm volatile (" \
r1 += r10; \
r0 = 0; \
exit; \
" ::: __clobber_all);
}
SEC("socket")
__description("unpriv: neg pointer")
__success __failure_unpriv __msg_unpriv("R1 pointer arithmetic")
__retval(0)
__naked void unpriv_neg_pointer(void)
{
asm volatile (" \
r1 = -r1; \
r0 = 0; \
exit; \
" ::: __clobber_all);
}
SEC("socket")
__description("unpriv: cmp pointer with const")
__success __failure_unpriv __msg_unpriv("R1 pointer comparison")
__retval(0)
__naked void unpriv_cmp_pointer_with_const(void)
{
asm volatile (" \
if r1 == 0 goto l0_%=; \
l0_%=: r0 = 0; \
exit; \
" ::: __clobber_all);
}
SEC("socket")
__description("unpriv: cmp pointer with pointer")
__success __failure_unpriv __msg_unpriv("R10 pointer comparison")
__retval(0)
__naked void unpriv_cmp_pointer_with_pointer(void)
{
asm volatile (" \
if r1 == r10 goto l0_%=; \
l0_%=: r0 = 0; \
exit; \
" ::: __clobber_all);
}
SEC("tracepoint")
__description("unpriv: check that printk is disallowed")
__success
__naked void check_that_printk_is_disallowed(void)
{
asm volatile (" \
r1 = 0; \
*(u64*)(r10 - 8) = r1; \
r1 = r10; \
r1 += -8; \
r2 = 8; \
r3 = r1; \
call %[bpf_trace_printk]; \
r0 = 0; \
exit; \
" :
: __imm(bpf_trace_printk)
: __clobber_all);
}
SEC("socket")
__description("unpriv: pass pointer to helper function")
__success __failure_unpriv __msg_unpriv("R4 leaks addr")
__retval(0)
__naked void pass_pointer_to_helper_function(void)
{
asm volatile (" \
r1 = 0; \
*(u64*)(r10 - 8) = r1; \
r2 = r10; \
r2 += -8; \
r1 = %[map_hash_8b] ll; \
r3 = r2; \
r4 = r2; \
call %[bpf_map_update_elem]; \
r0 = 0; \
exit; \
" :
: __imm(bpf_map_update_elem),
__imm_addr(map_hash_8b)
: __clobber_all);
}
SEC("socket")
__description("unpriv: indirectly pass pointer on stack to helper function")
__success __failure_unpriv
__msg_unpriv("invalid indirect read from stack R2 off -8+0 size 8")
__retval(0)
__naked void on_stack_to_helper_function(void)
{
asm volatile (" \
*(u64*)(r10 - 8) = r10; \
r2 = r10; \
r2 += -8; \
r1 = %[map_hash_8b] ll; \
call %[bpf_map_lookup_elem]; \
r0 = 0; \
exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm_addr(map_hash_8b)
: __clobber_all);
}
SEC("socket")
__description("unpriv: mangle pointer on stack 1")
__success __failure_unpriv __msg_unpriv("attempt to corrupt spilled")
__retval(0)
__naked void mangle_pointer_on_stack_1(void)
{
asm volatile (" \
*(u64*)(r10 - 8) = r10; \
r0 = 0; \
*(u32*)(r10 - 8) = r0; \
r0 = 0; \
exit; \
" ::: __clobber_all);
}
SEC("socket")
__description("unpriv: mangle pointer on stack 2")
__success __failure_unpriv __msg_unpriv("attempt to corrupt spilled")
__retval(0)
__naked void mangle_pointer_on_stack_2(void)
{
asm volatile (" \
*(u64*)(r10 - 8) = r10; \
r0 = 0; \
*(u8*)(r10 - 1) = r0; \
r0 = 0; \
exit; \
" ::: __clobber_all);
}
SEC("socket")
__description("unpriv: read pointer from stack in small chunks")
__failure __msg("invalid size")
__failure_unpriv
__naked void from_stack_in_small_chunks(void)
{
asm volatile (" \
*(u64*)(r10 - 8) = r10; \
r0 = *(u32*)(r10 - 8); \
r0 = 0; \
exit; \
" ::: __clobber_all);
}
SEC("socket")
__description("unpriv: write pointer into ctx")
__failure __msg("invalid bpf_context access")
__failure_unpriv __msg_unpriv("R1 leaks addr")
__naked void unpriv_write_pointer_into_ctx(void)
{
asm volatile (" \
*(u64*)(r1 + 0) = r1; \
r0 = 0; \
exit; \
" ::: __clobber_all);
}
SEC("socket")
__description("unpriv: spill/fill of ctx")
__success __success_unpriv __retval(0)
__naked void unpriv_spill_fill_of_ctx(void)
{
asm volatile (" \
r6 = r10; \
r6 += -8; \
*(u64*)(r6 + 0) = r1; \
r1 = *(u64*)(r6 + 0); \
r0 = 0; \
exit; \
" ::: __clobber_all);
}
SEC("tc")
__description("unpriv: spill/fill of ctx 2")
__success __retval(0)
__naked void spill_fill_of_ctx_2(void)
{
asm volatile (" \
r6 = r10; \
r6 += -8; \
*(u64*)(r6 + 0) = r1; \
r1 = *(u64*)(r6 + 0); \
call %[bpf_get_hash_recalc]; \
r0 = 0; \
exit; \
" :
: __imm(bpf_get_hash_recalc)
: __clobber_all);
}
SEC("tc")
__description("unpriv: spill/fill of ctx 3")
__failure __msg("R1 type=fp expected=ctx")
__naked void spill_fill_of_ctx_3(void)
{
asm volatile (" \
r6 = r10; \
r6 += -8; \
*(u64*)(r6 + 0) = r1; \
*(u64*)(r6 + 0) = r10; \
r1 = *(u64*)(r6 + 0); \
call %[bpf_get_hash_recalc]; \
exit; \
" :
: __imm(bpf_get_hash_recalc)
: __clobber_all);
}
SEC("tc")
__description("unpriv: spill/fill of ctx 4")
__failure __msg("R1 type=scalar expected=ctx")
__naked void spill_fill_of_ctx_4(void)
{
asm volatile (" \
r6 = r10; \
r6 += -8; \
*(u64*)(r6 + 0) = r1; \
r0 = 1; \
lock *(u64 *)(r10 - 8) += r0; \
r1 = *(u64*)(r6 + 0); \
call %[bpf_get_hash_recalc]; \
exit; \
" :
: __imm(bpf_get_hash_recalc)
: __clobber_all);
}
SEC("tc")
__description("unpriv: spill/fill of different pointers stx")
__failure __msg("same insn cannot be used with different pointers")
__naked void fill_of_different_pointers_stx(void)
{
asm volatile (" \
r3 = 42; \
r6 = r10; \
r6 += -8; \
if r1 == 0 goto l0_%=; \
r2 = r10; \
r2 += -16; \
*(u64*)(r6 + 0) = r2; \
l0_%=: if r1 != 0 goto l1_%=; \
*(u64*)(r6 + 0) = r1; \
l1_%=: r1 = *(u64*)(r6 + 0); \
*(u32*)(r1 + %[__sk_buff_mark]) = r3; \
r0 = 0; \
exit; \
" :
: __imm_const(__sk_buff_mark, offsetof(struct __sk_buff, mark))
: __clobber_all);
}
/* Same as above, but use BPF_ST_MEM to save 42
* instead of BPF_STX_MEM.
*/
SEC("tc")
__description("unpriv: spill/fill of different pointers st")
__failure __msg("same insn cannot be used with different pointers")
__naked void fill_of_different_pointers_st(void)
{
asm volatile (" \
r6 = r10; \
r6 += -8; \
if r1 == 0 goto l0_%=; \
r2 = r10; \
r2 += -16; \
*(u64*)(r6 + 0) = r2; \
l0_%=: if r1 != 0 goto l1_%=; \
*(u64*)(r6 + 0) = r1; \
l1_%=: r1 = *(u64*)(r6 + 0); \
.8byte %[st_mem]; \
r0 = 0; \
exit; \
" :
: __imm_const(__sk_buff_mark, offsetof(struct __sk_buff, mark)),
__imm_insn(st_mem,
BPF_ST_MEM(BPF_W, BPF_REG_1, offsetof(struct __sk_buff, mark), 42))
: __clobber_all);
}
SEC("tc")
__description("unpriv: spill/fill of different pointers stx - ctx and sock")
__failure __msg("type=ctx expected=sock")
__naked void pointers_stx_ctx_and_sock(void)
{
asm volatile (" \
r8 = r1; \
/* struct bpf_sock *sock = bpf_sock_lookup(...); */\
" BPF_SK_LOOKUP(bpf_sk_lookup_tcp)
" r2 = r0; \
/* u64 foo; */ \
/* void *target = &foo; */ \
r6 = r10; \
r6 += -8; \
r1 = r8; \
/* if (skb == NULL) *target = sock; */ \
if r1 == 0 goto l0_%=; \
*(u64*)(r6 + 0) = r2; \
l0_%=: /* else *target = skb; */ \
if r1 != 0 goto l1_%=; \
*(u64*)(r6 + 0) = r1; \
l1_%=: /* struct __sk_buff *skb = *target; */ \
r1 = *(u64*)(r6 + 0); \
/* skb->mark = 42; */ \
r3 = 42; \
*(u32*)(r1 + %[__sk_buff_mark]) = r3; \
/* if (sk) bpf_sk_release(sk) */ \
if r1 == 0 goto l2_%=; \
call %[bpf_sk_release]; \
l2_%=: r0 = 0; \
exit; \
" :
: __imm(bpf_sk_lookup_tcp),
__imm(bpf_sk_release),
__imm_const(__sk_buff_mark, offsetof(struct __sk_buff, mark)),
__imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
: __clobber_all);
}
SEC("tc")
__description("unpriv: spill/fill of different pointers stx - leak sock")
__failure
//.errstr = "same insn cannot be used with different pointers",
__msg("Unreleased reference")
__naked void different_pointers_stx_leak_sock(void)
{
asm volatile (" \
r8 = r1; \
/* struct bpf_sock *sock = bpf_sock_lookup(...); */\
" BPF_SK_LOOKUP(bpf_sk_lookup_tcp)
" r2 = r0; \
/* u64 foo; */ \
/* void *target = &foo; */ \
r6 = r10; \
r6 += -8; \
r1 = r8; \
/* if (skb == NULL) *target = sock; */ \
if r1 == 0 goto l0_%=; \
*(u64*)(r6 + 0) = r2; \
l0_%=: /* else *target = skb; */ \
if r1 != 0 goto l1_%=; \
*(u64*)(r6 + 0) = r1; \
l1_%=: /* struct __sk_buff *skb = *target; */ \
r1 = *(u64*)(r6 + 0); \
/* skb->mark = 42; */ \
r3 = 42; \
*(u32*)(r1 + %[__sk_buff_mark]) = r3; \
exit; \
" :
: __imm(bpf_sk_lookup_tcp),
__imm_const(__sk_buff_mark, offsetof(struct __sk_buff, mark)),
__imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
: __clobber_all);
}
SEC("tc")
__description("unpriv: spill/fill of different pointers stx - sock and ctx (read)")
__failure __msg("same insn cannot be used with different pointers")
__naked void stx_sock_and_ctx_read(void)
{
asm volatile (" \
r8 = r1; \
/* struct bpf_sock *sock = bpf_sock_lookup(...); */\
" BPF_SK_LOOKUP(bpf_sk_lookup_tcp)
" r2 = r0; \
/* u64 foo; */ \
/* void *target = &foo; */ \
r6 = r10; \
r6 += -8; \
r1 = r8; \
/* if (skb) *target = skb */ \
if r1 == 0 goto l0_%=; \
*(u64*)(r6 + 0) = r1; \
l0_%=: /* else *target = sock */ \
if r1 != 0 goto l1_%=; \
*(u64*)(r6 + 0) = r2; \
l1_%=: /* struct bpf_sock *sk = *target; */ \
r1 = *(u64*)(r6 + 0); \
/* if (sk) u32 foo = sk->mark; bpf_sk_release(sk); */\
if r1 == 0 goto l2_%=; \
r3 = *(u32*)(r1 + %[bpf_sock_mark]); \
call %[bpf_sk_release]; \
l2_%=: r0 = 0; \
exit; \
" :
: __imm(bpf_sk_lookup_tcp),
__imm(bpf_sk_release),
__imm_const(bpf_sock_mark, offsetof(struct bpf_sock, mark)),
__imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
: __clobber_all);
}
SEC("tc")
__description("unpriv: spill/fill of different pointers stx - sock and ctx (write)")
__failure
//.errstr = "same insn cannot be used with different pointers",
__msg("cannot write into sock")
__naked void stx_sock_and_ctx_write(void)
{
asm volatile (" \
r8 = r1; \
/* struct bpf_sock *sock = bpf_sock_lookup(...); */\
" BPF_SK_LOOKUP(bpf_sk_lookup_tcp)
" r2 = r0; \
/* u64 foo; */ \
/* void *target = &foo; */ \
r6 = r10; \
r6 += -8; \
r1 = r8; \
/* if (skb) *target = skb */ \
if r1 == 0 goto l0_%=; \
*(u64*)(r6 + 0) = r1; \
l0_%=: /* else *target = sock */ \
if r1 != 0 goto l1_%=; \
*(u64*)(r6 + 0) = r2; \
l1_%=: /* struct bpf_sock *sk = *target; */ \
r1 = *(u64*)(r6 + 0); \
/* if (sk) sk->mark = 42; bpf_sk_release(sk); */\
if r1 == 0 goto l2_%=; \
r3 = 42; \
*(u32*)(r1 + %[bpf_sock_mark]) = r3; \
call %[bpf_sk_release]; \
l2_%=: r0 = 0; \
exit; \
" :
: __imm(bpf_sk_lookup_tcp),
__imm(bpf_sk_release),
__imm_const(bpf_sock_mark, offsetof(struct bpf_sock, mark)),
__imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
: __clobber_all);
}
SEC("socket")
__description("unpriv: write pointer into map elem value")
__success __failure_unpriv __msg_unpriv("R0 leaks addr")
__retval(0)
__naked void pointer_into_map_elem_value(void)
{
asm volatile (" \
r1 = 0; \
*(u64*)(r10 - 8) = r1; \
r2 = r10; \
r2 += -8; \
r1 = %[map_hash_8b] ll; \
call %[bpf_map_lookup_elem]; \
if r0 == 0 goto l0_%=; \
*(u64*)(r0 + 0) = r0; \
l0_%=: exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm_addr(map_hash_8b)
: __clobber_all);
}
SEC("socket")
__description("alu32: mov u32 const")
__success __failure_unpriv __msg_unpriv("R7 invalid mem access 'scalar'")
__retval(0)
__naked void alu32_mov_u32_const(void)
{
asm volatile (" \
w7 = 0; \
w7 &= 1; \
w0 = w7; \
if r0 == 0 goto l0_%=; \
r0 = *(u64*)(r7 + 0); \
l0_%=: exit; \
" ::: __clobber_all);
}
SEC("socket")
__description("unpriv: partial copy of pointer")
__success __failure_unpriv __msg_unpriv("R10 partial copy")
__retval(0)
__naked void unpriv_partial_copy_of_pointer(void)
{
asm volatile (" \
w1 = w10; \
r0 = 0; \
exit; \
" ::: __clobber_all);
}
SEC("socket")
__description("unpriv: pass pointer to tail_call")
__success __failure_unpriv __msg_unpriv("R3 leaks addr into helper")
__retval(0)
__naked void pass_pointer_to_tail_call(void)
{
asm volatile (" \
r3 = r1; \
r2 = %[map_prog1_socket] ll; \
call %[bpf_tail_call]; \
r0 = 0; \
exit; \
" :
: __imm(bpf_tail_call),
__imm_addr(map_prog1_socket)
: __clobber_all);
}
SEC("socket")
__description("unpriv: cmp map pointer with zero")
__success __failure_unpriv __msg_unpriv("R1 pointer comparison")
__retval(0)
__naked void cmp_map_pointer_with_zero(void)
{
asm volatile (" \
r1 = 0; \
r1 = %[map_hash_8b] ll; \
if r1 == 0 goto l0_%=; \
l0_%=: r0 = 0; \
exit; \
" :
: __imm_addr(map_hash_8b)
: __clobber_all);
}
SEC("socket")
__description("unpriv: write into frame pointer")
__failure __msg("frame pointer is read only")
__failure_unpriv
__naked void unpriv_write_into_frame_pointer(void)
{
asm volatile (" \
r10 = r1; \
r0 = 0; \
exit; \
" ::: __clobber_all);
}
SEC("socket")
__description("unpriv: spill/fill frame pointer")
__failure __msg("frame pointer is read only")
__failure_unpriv
__naked void unpriv_spill_fill_frame_pointer(void)
{
asm volatile (" \
r6 = r10; \
r6 += -8; \
*(u64*)(r6 + 0) = r10; \
r10 = *(u64*)(r6 + 0); \
r0 = 0; \
exit; \
" ::: __clobber_all);
}
SEC("socket")
__description("unpriv: cmp of frame pointer")
__success __failure_unpriv __msg_unpriv("R10 pointer comparison")
__retval(0)
__naked void unpriv_cmp_of_frame_pointer(void)
{
asm volatile (" \
if r10 == 0 goto l0_%=; \
l0_%=: r0 = 0; \
exit; \
" ::: __clobber_all);
}
SEC("socket")
__description("unpriv: adding of fp, reg")
__success __failure_unpriv
__msg_unpriv("R1 stack pointer arithmetic goes out of range")
__retval(0)
__naked void unpriv_adding_of_fp_reg(void)
{
asm volatile (" \
r0 = 0; \
r1 = 0; \
r1 += r10; \
*(u64*)(r1 - 8) = r0; \
exit; \
" ::: __clobber_all);
}
SEC("socket")
__description("unpriv: adding of fp, imm")
__success __failure_unpriv
__msg_unpriv("R1 stack pointer arithmetic goes out of range")
__retval(0)
__naked void unpriv_adding_of_fp_imm(void)
{
asm volatile (" \
r0 = 0; \
r1 = r10; \
r1 += 0; \
*(u64*)(r1 - 8) = r0; \
exit; \
" ::: __clobber_all);
}
SEC("socket")
__description("unpriv: cmp of stack pointer")
__success __failure_unpriv __msg_unpriv("R2 pointer comparison")
__retval(0)
__naked void unpriv_cmp_of_stack_pointer(void)
{
asm volatile (" \
r2 = r10; \
r2 += -8; \
if r2 == 0 goto l0_%=; \
l0_%=: r0 = 0; \
exit; \
" ::: __clobber_all);
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/verifier_unpriv.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
#include "bpf_misc.h"
struct bpf_iter_testmod_seq {
u64 :64;
u64 :64;
};
extern int bpf_iter_testmod_seq_new(struct bpf_iter_testmod_seq *it, s64 value, int cnt) __ksym;
extern s64 *bpf_iter_testmod_seq_next(struct bpf_iter_testmod_seq *it) __ksym;
extern void bpf_iter_testmod_seq_destroy(struct bpf_iter_testmod_seq *it) __ksym;
const volatile __s64 exp_empty = 0 + 1;
__s64 res_empty;
SEC("raw_tp/sys_enter")
__success __log_level(2)
__msg("fp-16_w=iter_testmod_seq(ref_id=1,state=active,depth=0)")
__msg("fp-16=iter_testmod_seq(ref_id=1,state=drained,depth=0)")
__msg("call bpf_iter_testmod_seq_destroy")
int testmod_seq_empty(const void *ctx)
{
__s64 sum = 0, *i;
bpf_for_each(testmod_seq, i, 1000, 0) sum += *i;
res_empty = 1 + sum;
return 0;
}
const volatile __s64 exp_full = 1000000;
__s64 res_full;
SEC("raw_tp/sys_enter")
__success __log_level(2)
__msg("fp-16_w=iter_testmod_seq(ref_id=1,state=active,depth=0)")
__msg("fp-16=iter_testmod_seq(ref_id=1,state=drained,depth=0)")
__msg("call bpf_iter_testmod_seq_destroy")
int testmod_seq_full(const void *ctx)
{
__s64 sum = 0, *i;
bpf_for_each(testmod_seq, i, 1000, 1000) sum += *i;
res_full = sum;
return 0;
}
const volatile __s64 exp_truncated = 10 * 1000000;
__s64 res_truncated;
static volatile int zero = 0;
SEC("raw_tp/sys_enter")
__success __log_level(2)
__msg("fp-16_w=iter_testmod_seq(ref_id=1,state=active,depth=0)")
__msg("fp-16=iter_testmod_seq(ref_id=1,state=drained,depth=0)")
__msg("call bpf_iter_testmod_seq_destroy")
int testmod_seq_truncated(const void *ctx)
{
__s64 sum = 0, *i;
int cnt = zero;
bpf_for_each(testmod_seq, i, 10, 2000000) {
sum += *i;
cnt++;
if (cnt >= 1000000)
break;
}
res_truncated = sum;
return 0;
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/iters_testmod_seq.c |
#include "core_reloc_types.h"
void f(struct core_reloc_primitives___err_non_enum x) {}
| linux-master | tools/testing/selftests/bpf/progs/btf__core_reloc_primitives___err_non_enum.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020 Facebook */
#define STACK_MAX_LEN 50
#define SUBPROGS
#include "pyperf.h"
| linux-master | tools/testing/selftests/bpf/progs/pyperf_subprogs.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020 Google LLC. */
#include "bpf_iter.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
char _license[] SEC("license") = "GPL";
struct {
__uint(type, BPF_MAP_TYPE_SK_STORAGE);
__uint(map_flags, BPF_F_NO_PREALLOC);
__type(key, int);
__type(value, int);
} sk_stg_map SEC(".maps");
SEC("iter/bpf_sk_storage_map")
int delete_bpf_sk_storage_map(struct bpf_iter__bpf_sk_storage_map *ctx)
{
if (ctx->sk)
bpf_sk_storage_delete(&sk_stg_map, ctx->sk);
return 0;
}
SEC("iter/task_file")
int fill_socket_owner(struct bpf_iter__task_file *ctx)
{
struct task_struct *task = ctx->task;
struct file *file = ctx->file;
struct socket *sock;
int *sock_tgid;
if (!task || !file)
return 0;
sock = bpf_sock_from_file(file);
if (!sock)
return 0;
sock_tgid = bpf_sk_storage_get(&sk_stg_map, sock->sk, 0, 0);
if (!sock_tgid)
return 0;
*sock_tgid = task->tgid;
return 0;
}
SEC("iter/tcp")
int negate_socket_local_storage(struct bpf_iter__tcp *ctx)
{
struct sock_common *sk_common = ctx->sk_common;
int *sock_tgid;
if (!sk_common)
return 0;
sock_tgid = bpf_sk_storage_get(&sk_stg_map, sk_common, 0, 0);
if (!sock_tgid)
return 0;
*sock_tgid = -*sock_tgid;
return 0;
}
| linux-master | tools/testing/selftests/bpf/progs/bpf_iter_bpf_sk_storage_helpers.c |
// SPDX-License-Identifier: GPL-2.0
#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
#include <bpf/bpf_core_read.h>
#include "bpf_misc.h"
static struct sockaddr_in old;
static int handle_sys_connect_common(struct sockaddr_in *uservaddr)
{
struct sockaddr_in new;
bpf_probe_read_user(&old, sizeof(old), uservaddr);
__builtin_memset(&new, 0xab, sizeof(new));
bpf_probe_write_user(uservaddr, &new, sizeof(new));
return 0;
}
SEC("ksyscall/connect")
int BPF_KSYSCALL(handle_sys_connect, int fd, struct sockaddr_in *uservaddr,
int addrlen)
{
return handle_sys_connect_common(uservaddr);
}
#if defined(bpf_target_s390)
#ifndef SYS_CONNECT
#define SYS_CONNECT 3
#endif
SEC("ksyscall/socketcall")
int BPF_KSYSCALL(handle_sys_socketcall, int call, unsigned long *args)
{
if (call == SYS_CONNECT) {
struct sockaddr_in *uservaddr;
bpf_probe_read_user(&uservaddr, sizeof(uservaddr), &args[1]);
return handle_sys_connect_common(uservaddr);
}
return 0;
}
#endif
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/test_probe_user.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include "bpf_misc.h"
#define __unused __attribute__((unused))
struct {
__uint(type, BPF_MAP_TYPE_PROG_ARRAY);
__uint(max_entries, 1);
__uint(key_size, sizeof(__u32));
__uint(value_size, sizeof(__u32));
} jmp_table SEC(".maps");
int done = 0;
SEC("tc")
int classifier_0(struct __sk_buff *skb __unused)
{
done = 1;
return 0;
}
static __noinline
int subprog_tail(struct __sk_buff *skb)
{
/* Don't propagate the constant to the caller */
volatile int ret = 1;
bpf_tail_call_static(skb, &jmp_table, 0);
return ret;
}
SEC("tc")
int entry(struct __sk_buff *skb)
{
/* Have data on stack which size is not a multiple of 8 */
volatile char arr[1] = {};
__sink(arr[0]);
return subprog_tail(skb);
}
char __license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/tailcall_bpf2bpf6.c |
// SPDX-License-Identifier: GPL-2.0+
/* This testcase operates with the test_fpu kernel driver.
* It modifies the FPU control register in user mode and calls the kernel
* module to perform floating point operations in the kernel. The control
* register value should be independent between kernel and user mode.
*/
#define _GNU_SOURCE
#include <stdio.h>
#include <errno.h>
#include <string.h>
#include <fenv.h>
#include <unistd.h>
#include <fcntl.h>
const char *test_fpu_path = "/sys/kernel/debug/selftest_helpers/test_fpu";
int main(void)
{
char dummy[1];
int fd = open(test_fpu_path, O_RDONLY);
if (fd < 0) {
printf("[SKIP]\tcan't access %s: %s\n",
test_fpu_path, strerror(errno));
return 0;
}
if (read(fd, dummy, 1) < 0) {
printf("[FAIL]\taccess with default rounding mode failed\n");
return 1;
}
fesetround(FE_DOWNWARD);
if (read(fd, dummy, 1) < 0) {
printf("[FAIL]\taccess with downward rounding mode failed\n");
return 2;
}
if (fegetround() != FE_DOWNWARD) {
printf("[FAIL]\tusermode rounding mode clobbered\n");
return 3;
}
/* Note: the tests up to this point are quite safe and will only return
* an error. But the exception mask setting can cause misbehaving kernel
* to crash.
*/
feclearexcept(FE_ALL_EXCEPT);
feenableexcept(FE_ALL_EXCEPT);
if (read(fd, dummy, 1) < 0) {
printf("[FAIL]\taccess with fpu exceptions unmasked failed\n");
return 4;
}
if (fegetexcept() != FE_ALL_EXCEPT) {
printf("[FAIL]\tusermode fpu exception mask clobbered\n");
return 5;
}
printf("[OK]\ttest_fpu\n");
return 0;
}
| linux-master | tools/testing/selftests/fpu/test_fpu.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Author: SeongJae Park <[email protected]>
*/
#include <fcntl.h>
#include <stdlib.h>
#include <unistd.h>
#include <stdio.h>
#pragma GCC diagnostic push
#if __GNUC__ >= 11 && __GNUC_MINOR__ >= 1
/* Ignore read(2) overflow and write(2) overread compile warnings */
#pragma GCC diagnostic ignored "-Wstringop-overread"
#pragma GCC diagnostic ignored "-Wstringop-overflow"
#endif
void write_read_with_huge_count(char *file)
{
int filedesc = open(file, O_RDWR);
char buf[25];
int ret;
printf("%s %s\n", __func__, file);
if (filedesc < 0) {
fprintf(stderr, "failed opening %s\n", file);
exit(1);
}
write(filedesc, "", 0xfffffffful);
perror("after write: ");
ret = read(filedesc, buf, 0xfffffffful);
perror("after read: ");
close(filedesc);
}
#pragma GCC diagnostic pop
int main(int argc, char *argv[])
{
if (argc != 2) {
fprintf(stderr, "Usage: %s <file>\n", argv[0]);
exit(1);
}
write_read_with_huge_count(argv[1]);
return 0;
}
| linux-master | tools/testing/selftests/damon/huge_count_read_write.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* GPIO character device helper for reading chip information.
*
* Copyright (C) 2021 Bartosz Golaszewski <[email protected]>
*/
#include <fcntl.h>
#include <linux/gpio.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/ioctl.h>
#include <sys/types.h>
static void print_usage(void)
{
printf("usage:\n");
printf(" gpio-chip-info <chip path> [name|label|num-lines]\n");
}
int main(int argc, char **argv)
{
struct gpiochip_info info;
int fd, ret;
if (argc != 3) {
print_usage();
return EXIT_FAILURE;
}
fd = open(argv[1], O_RDWR);
if (fd < 0) {
perror("unable to open the GPIO chip");
return EXIT_FAILURE;
}
memset(&info, 0, sizeof(info));
ret = ioctl(fd, GPIO_GET_CHIPINFO_IOCTL, &info);
if (ret) {
perror("chip info ioctl failed");
return EXIT_FAILURE;
}
if (strcmp(argv[2], "name") == 0) {
printf("%s\n", info.name);
} else if (strcmp(argv[2], "label") == 0) {
printf("%s\n", info.label);
} else if (strcmp(argv[2], "num-lines") == 0) {
printf("%u\n", info.lines);
} else {
fprintf(stderr, "unknown command: %s\n", argv[2]);
return EXIT_FAILURE;
}
return EXIT_SUCCESS;
}
| linux-master | tools/testing/selftests/gpio/gpio-chip-info.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* GPIO character device helper for reading line names.
*
* Copyright (C) 2021 Bartosz Golaszewski <[email protected]>
*/
#include <fcntl.h>
#include <linux/gpio.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/ioctl.h>
#include <sys/types.h>
static void print_usage(void)
{
printf("usage:\n");
printf(" gpio-line-name <chip path> <line offset>\n");
}
int main(int argc, char **argv)
{
struct gpio_v2_line_info info;
int fd, ret;
char *endp;
if (argc != 3) {
print_usage();
return EXIT_FAILURE;
}
fd = open(argv[1], O_RDWR);
if (fd < 0) {
perror("unable to open the GPIO chip");
return EXIT_FAILURE;
}
memset(&info, 0, sizeof(info));
info.offset = strtoul(argv[2], &endp, 10);
if (*endp != '\0') {
print_usage();
return EXIT_FAILURE;
}
ret = ioctl(fd, GPIO_V2_GET_LINEINFO_IOCTL, &info);
if (ret) {
perror("line info ioctl failed");
return EXIT_FAILURE;
}
printf("%s\n", info.name);
return EXIT_SUCCESS;
}
| linux-master | tools/testing/selftests/gpio/gpio-line-name.c |
// SPDX-License-Identifier: GPL-2.0
/*
* GPIO mockup cdev test helper
*
* Copyright (C) 2020 Kent Gibson
*/
#include <errno.h>
#include <fcntl.h>
#include <signal.h>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <sys/ioctl.h>
#include <linux/gpio.h>
#define CONSUMER "gpio-mockup-cdev"
static int request_line_v2(int cfd, unsigned int offset,
uint64_t flags, unsigned int val)
{
struct gpio_v2_line_request req;
int ret;
memset(&req, 0, sizeof(req));
req.num_lines = 1;
req.offsets[0] = offset;
req.config.flags = flags;
strcpy(req.consumer, CONSUMER);
if (flags & GPIO_V2_LINE_FLAG_OUTPUT) {
req.config.num_attrs = 1;
req.config.attrs[0].mask = 1;
req.config.attrs[0].attr.id = GPIO_V2_LINE_ATTR_ID_OUTPUT_VALUES;
if (val)
req.config.attrs[0].attr.values = 1;
}
ret = ioctl(cfd, GPIO_V2_GET_LINE_IOCTL, &req);
if (ret == -1)
return -errno;
return req.fd;
}
static int get_value_v2(int lfd)
{
struct gpio_v2_line_values vals;
int ret;
memset(&vals, 0, sizeof(vals));
vals.mask = 1;
ret = ioctl(lfd, GPIO_V2_LINE_GET_VALUES_IOCTL, &vals);
if (ret == -1)
return -errno;
return vals.bits & 0x1;
}
static int request_line_v1(int cfd, unsigned int offset,
uint32_t flags, unsigned int val)
{
struct gpiohandle_request req;
int ret;
memset(&req, 0, sizeof(req));
req.lines = 1;
req.lineoffsets[0] = offset;
req.flags = flags;
strcpy(req.consumer_label, CONSUMER);
if (flags & GPIOHANDLE_REQUEST_OUTPUT)
req.default_values[0] = val;
ret = ioctl(cfd, GPIO_GET_LINEHANDLE_IOCTL, &req);
if (ret == -1)
return -errno;
return req.fd;
}
static int get_value_v1(int lfd)
{
struct gpiohandle_data vals;
int ret;
memset(&vals, 0, sizeof(vals));
ret = ioctl(lfd, GPIOHANDLE_GET_LINE_VALUES_IOCTL, &vals);
if (ret == -1)
return -errno;
return vals.values[0];
}
static void usage(char *prog)
{
printf("Usage: %s [-l] [-b <bias>] [-s <value>] [-u <uAPI>] <gpiochip> <offset>\n", prog);
printf(" -b: set line bias to one of pull-down, pull-up, disabled\n");
printf(" (default is to leave bias unchanged):\n");
printf(" -l: set line active low (default is active high)\n");
printf(" -s: set line value (default is to get line value)\n");
printf(" -u: uAPI version to use (default is 2)\n");
exit(-1);
}
static int wait_signal(void)
{
int sig;
sigset_t wset;
sigemptyset(&wset);
sigaddset(&wset, SIGHUP);
sigaddset(&wset, SIGINT);
sigaddset(&wset, SIGTERM);
sigwait(&wset, &sig);
return sig;
}
int main(int argc, char *argv[])
{
char *chip;
int opt, ret, cfd, lfd;
unsigned int offset, val = 0, abiv;
uint32_t flags_v1;
uint64_t flags_v2;
abiv = 2;
ret = 0;
flags_v1 = GPIOHANDLE_REQUEST_INPUT;
flags_v2 = GPIO_V2_LINE_FLAG_INPUT;
while ((opt = getopt(argc, argv, "lb:s:u:")) != -1) {
switch (opt) {
case 'l':
flags_v1 |= GPIOHANDLE_REQUEST_ACTIVE_LOW;
flags_v2 |= GPIO_V2_LINE_FLAG_ACTIVE_LOW;
break;
case 'b':
if (strcmp("pull-up", optarg) == 0) {
flags_v1 |= GPIOHANDLE_REQUEST_BIAS_PULL_UP;
flags_v2 |= GPIO_V2_LINE_FLAG_BIAS_PULL_UP;
} else if (strcmp("pull-down", optarg) == 0) {
flags_v1 |= GPIOHANDLE_REQUEST_BIAS_PULL_DOWN;
flags_v2 |= GPIO_V2_LINE_FLAG_BIAS_PULL_DOWN;
} else if (strcmp("disabled", optarg) == 0) {
flags_v1 |= GPIOHANDLE_REQUEST_BIAS_DISABLE;
flags_v2 |= GPIO_V2_LINE_FLAG_BIAS_DISABLED;
}
break;
case 's':
val = atoi(optarg);
flags_v1 &= ~GPIOHANDLE_REQUEST_INPUT;
flags_v1 |= GPIOHANDLE_REQUEST_OUTPUT;
flags_v2 &= ~GPIO_V2_LINE_FLAG_INPUT;
flags_v2 |= GPIO_V2_LINE_FLAG_OUTPUT;
break;
case 'u':
abiv = atoi(optarg);
break;
default:
usage(argv[0]);
}
}
if (argc < optind + 2)
usage(argv[0]);
chip = argv[optind];
offset = atoi(argv[optind + 1]);
cfd = open(chip, 0);
if (cfd == -1) {
fprintf(stderr, "Failed to open %s: %s\n", chip, strerror(errno));
return -errno;
}
if (abiv == 1)
lfd = request_line_v1(cfd, offset, flags_v1, val);
else
lfd = request_line_v2(cfd, offset, flags_v2, val);
close(cfd);
if (lfd < 0) {
fprintf(stderr, "Failed to request %s:%d: %s\n", chip, offset, strerror(-lfd));
return lfd;
}
if (flags_v2 & GPIO_V2_LINE_FLAG_OUTPUT) {
wait_signal();
} else {
if (abiv == 1)
ret = get_value_v1(lfd);
else
ret = get_value_v2(lfd);
}
close(lfd);
return ret;
}
| linux-master | tools/testing/selftests/gpio/gpio-mockup-cdev.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Resctrl tests
*
* Copyright (C) 2018 Intel Corporation
*
* Authors:
* Sai Praneeth Prakhya <[email protected]>,
* Fenghua Yu <[email protected]>
*/
#include "resctrl.h"
#define BENCHMARK_ARGS 64
#define BENCHMARK_ARG_SIZE 64
static int detect_vendor(void)
{
FILE *inf = fopen("/proc/cpuinfo", "r");
int vendor_id = 0;
char *s = NULL;
char *res;
if (!inf)
return vendor_id;
res = fgrep(inf, "vendor_id");
if (res)
s = strchr(res, ':');
if (s && !strcmp(s, ": GenuineIntel\n"))
vendor_id = ARCH_INTEL;
else if (s && !strcmp(s, ": AuthenticAMD\n"))
vendor_id = ARCH_AMD;
fclose(inf);
free(res);
return vendor_id;
}
int get_vendor(void)
{
static int vendor = -1;
if (vendor == -1)
vendor = detect_vendor();
if (vendor == 0)
ksft_print_msg("Can not get vendor info...\n");
return vendor;
}
static void cmd_help(void)
{
printf("usage: resctrl_tests [-h] [-b \"benchmark_cmd [options]\"] [-t test list] [-n no_of_bits]\n");
printf("\t-b benchmark_cmd [options]: run specified benchmark for MBM, MBA and CMT\n");
printf("\t default benchmark is builtin fill_buf\n");
printf("\t-t test list: run tests specified in the test list, ");
printf("e.g. -t mbm,mba,cmt,cat\n");
printf("\t-n no_of_bits: run cache tests using specified no of bits in cache bit mask\n");
printf("\t-p cpu_no: specify CPU number to run the test. 1 is default\n");
printf("\t-h: help\n");
}
void tests_cleanup(void)
{
mbm_test_cleanup();
mba_test_cleanup();
cmt_test_cleanup();
cat_test_cleanup();
}
static void run_mbm_test(char **benchmark_cmd, size_t span,
int cpu_no, char *bw_report)
{
int res;
ksft_print_msg("Starting MBM BW change ...\n");
res = mount_resctrlfs();
if (res) {
ksft_exit_fail_msg("Failed to mount resctrl FS\n");
return;
}
if (!validate_resctrl_feature_request(MBM_STR) || (get_vendor() != ARCH_INTEL)) {
ksft_test_result_skip("Hardware does not support MBM or MBM is disabled\n");
goto umount;
}
res = mbm_bw_change(span, cpu_no, bw_report, benchmark_cmd);
ksft_test_result(!res, "MBM: bw change\n");
if ((get_vendor() == ARCH_INTEL) && res)
ksft_print_msg("Intel MBM may be inaccurate when Sub-NUMA Clustering is enabled. Check BIOS configuration.\n");
umount:
umount_resctrlfs();
}
static void run_mba_test(char **benchmark_cmd, int cpu_no, char *bw_report)
{
int res;
ksft_print_msg("Starting MBA Schemata change ...\n");
res = mount_resctrlfs();
if (res) {
ksft_exit_fail_msg("Failed to mount resctrl FS\n");
return;
}
if (!validate_resctrl_feature_request(MBA_STR) || (get_vendor() != ARCH_INTEL)) {
ksft_test_result_skip("Hardware does not support MBA or MBA is disabled\n");
goto umount;
}
res = mba_schemata_change(cpu_no, bw_report, benchmark_cmd);
ksft_test_result(!res, "MBA: schemata change\n");
umount:
umount_resctrlfs();
}
static void run_cmt_test(char **benchmark_cmd, int cpu_no)
{
int res;
ksft_print_msg("Starting CMT test ...\n");
res = mount_resctrlfs();
if (res) {
ksft_exit_fail_msg("Failed to mount resctrl FS\n");
return;
}
if (!validate_resctrl_feature_request(CMT_STR)) {
ksft_test_result_skip("Hardware does not support CMT or CMT is disabled\n");
goto umount;
}
res = cmt_resctrl_val(cpu_no, 5, benchmark_cmd);
ksft_test_result(!res, "CMT: test\n");
if ((get_vendor() == ARCH_INTEL) && res)
ksft_print_msg("Intel CMT may be inaccurate when Sub-NUMA Clustering is enabled. Check BIOS configuration.\n");
umount:
umount_resctrlfs();
}
static void run_cat_test(int cpu_no, int no_of_bits)
{
int res;
ksft_print_msg("Starting CAT test ...\n");
res = mount_resctrlfs();
if (res) {
ksft_exit_fail_msg("Failed to mount resctrl FS\n");
return;
}
if (!validate_resctrl_feature_request(CAT_STR)) {
ksft_test_result_skip("Hardware does not support CAT or CAT is disabled\n");
goto umount;
}
res = cat_perf_miss_val(cpu_no, no_of_bits, "L3");
ksft_test_result(!res, "CAT: test\n");
umount:
umount_resctrlfs();
}
int main(int argc, char **argv)
{
bool has_ben = false, mbm_test = true, mba_test = true, cmt_test = true;
char *benchmark_cmd[BENCHMARK_ARGS], bw_report[64], bm_type[64];
char benchmark_cmd_area[BENCHMARK_ARGS][BENCHMARK_ARG_SIZE];
int c, cpu_no = 1, argc_new = argc, i, no_of_bits = 0;
int ben_ind, ben_count, tests = 0;
size_t span = 250 * MB;
bool cat_test = true;
for (i = 0; i < argc; i++) {
if (strcmp(argv[i], "-b") == 0) {
ben_ind = i + 1;
ben_count = argc - ben_ind;
argc_new = ben_ind - 1;
has_ben = true;
break;
}
}
while ((c = getopt(argc_new, argv, "ht:b:n:p:")) != -1) {
char *token;
switch (c) {
case 't':
token = strtok(optarg, ",");
mbm_test = false;
mba_test = false;
cmt_test = false;
cat_test = false;
while (token) {
if (!strncmp(token, MBM_STR, sizeof(MBM_STR))) {
mbm_test = true;
tests++;
} else if (!strncmp(token, MBA_STR, sizeof(MBA_STR))) {
mba_test = true;
tests++;
} else if (!strncmp(token, CMT_STR, sizeof(CMT_STR))) {
cmt_test = true;
tests++;
} else if (!strncmp(token, CAT_STR, sizeof(CAT_STR))) {
cat_test = true;
tests++;
} else {
printf("invalid argument\n");
return -1;
}
token = strtok(NULL, ",");
}
break;
case 'p':
cpu_no = atoi(optarg);
break;
case 'n':
no_of_bits = atoi(optarg);
if (no_of_bits <= 0) {
printf("Bail out! invalid argument for no_of_bits\n");
return -1;
}
break;
case 'h':
cmd_help();
return 0;
default:
printf("invalid argument\n");
return -1;
}
}
ksft_print_header();
/*
* Typically we need root privileges, because:
* 1. We write to resctrl FS
* 2. We execute perf commands
*/
if (geteuid() != 0)
return ksft_exit_skip("Not running as root. Skipping...\n");
if (has_ben) {
/* Extract benchmark command from command line. */
for (i = ben_ind; i < argc; i++) {
benchmark_cmd[i - ben_ind] = benchmark_cmd_area[i];
sprintf(benchmark_cmd[i - ben_ind], "%s", argv[i]);
}
benchmark_cmd[ben_count] = NULL;
} else {
/* If no benchmark is given by "-b" argument, use fill_buf. */
for (i = 0; i < 5; i++)
benchmark_cmd[i] = benchmark_cmd_area[i];
strcpy(benchmark_cmd[0], "fill_buf");
sprintf(benchmark_cmd[1], "%zu", span);
strcpy(benchmark_cmd[2], "1");
strcpy(benchmark_cmd[3], "0");
strcpy(benchmark_cmd[4], "false");
benchmark_cmd[5] = NULL;
}
sprintf(bw_report, "reads");
sprintf(bm_type, "fill_buf");
if (!check_resctrlfs_support())
return ksft_exit_skip("resctrl FS does not exist. Enable X86_CPU_RESCTRL config option.\n");
if (umount_resctrlfs())
return ksft_exit_skip("resctrl FS unmount failed.\n");
filter_dmesg();
ksft_set_plan(tests ? : 4);
if (mbm_test)
run_mbm_test(benchmark_cmd, span, cpu_no, bw_report);
if (mba_test)
run_mba_test(benchmark_cmd, cpu_no, bw_report);
if (cmt_test)
run_cmt_test(benchmark_cmd, cpu_no);
if (cat_test)
run_cat_test(cpu_no, no_of_bits);
ksft_finished();
}
| linux-master | tools/testing/selftests/resctrl/resctrl_tests.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Cache Monitoring Technology (CMT) test
*
* Copyright (C) 2018 Intel Corporation
*
* Authors:
* Sai Praneeth Prakhya <[email protected]>,
* Fenghua Yu <[email protected]>
*/
#include "resctrl.h"
#include <unistd.h>
#define RESULT_FILE_NAME "result_cmt"
#define NUM_OF_RUNS 5
#define MAX_DIFF 2000000
#define MAX_DIFF_PERCENT 15
static int cmt_setup(struct resctrl_val_param *p)
{
/* Run NUM_OF_RUNS times */
if (p->num_of_runs >= NUM_OF_RUNS)
return END_OF_TESTS;
p->num_of_runs++;
return 0;
}
static int check_results(struct resctrl_val_param *param, int no_of_bits)
{
char *token_array[8], temp[512];
unsigned long sum_llc_occu_resc = 0;
int runs = 0;
FILE *fp;
ksft_print_msg("Checking for pass/fail\n");
fp = fopen(param->filename, "r");
if (!fp) {
perror("# Error in opening file\n");
return errno;
}
while (fgets(temp, sizeof(temp), fp)) {
char *token = strtok(temp, ":\t");
int fields = 0;
while (token) {
token_array[fields++] = token;
token = strtok(NULL, ":\t");
}
/* Field 3 is llc occ resc value */
if (runs > 0)
sum_llc_occu_resc += strtoul(token_array[3], NULL, 0);
runs++;
}
fclose(fp);
return show_cache_info(sum_llc_occu_resc, no_of_bits, param->span,
MAX_DIFF, MAX_DIFF_PERCENT, runs - 1,
true, true);
}
void cmt_test_cleanup(void)
{
remove(RESULT_FILE_NAME);
}
int cmt_resctrl_val(int cpu_no, int n, char **benchmark_cmd)
{
unsigned long cache_size = 0;
unsigned long long_mask;
char cbm_mask[256];
int count_of_bits;
int ret;
if (!validate_resctrl_feature_request(CMT_STR))
return -1;
ret = get_cbm_mask("L3", cbm_mask);
if (ret)
return ret;
long_mask = strtoul(cbm_mask, NULL, 16);
ret = get_cache_size(cpu_no, "L3", &cache_size);
if (ret)
return ret;
ksft_print_msg("Cache size :%lu\n", cache_size);
count_of_bits = count_bits(long_mask);
if (n < 1 || n > count_of_bits) {
ksft_print_msg("Invalid input value for numbr_of_bits n!\n");
ksft_print_msg("Please enter value in range 1 to %d\n", count_of_bits);
return -1;
}
struct resctrl_val_param param = {
.resctrl_val = CMT_STR,
.ctrlgrp = "c1",
.mongrp = "m1",
.cpu_no = cpu_no,
.filename = RESULT_FILE_NAME,
.mask = ~(long_mask << n) & long_mask,
.span = cache_size * n / count_of_bits,
.num_of_runs = 0,
.setup = cmt_setup,
};
if (strcmp(benchmark_cmd[0], "fill_buf") == 0)
sprintf(benchmark_cmd[1], "%zu", param.span);
remove(RESULT_FILE_NAME);
ret = resctrl_val(benchmark_cmd, ¶m);
if (ret)
goto out;
ret = check_results(¶m, n);
out:
cmt_test_cleanup();
return ret;
}
| linux-master | tools/testing/selftests/resctrl/cmt_test.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Memory Bandwidth Allocation (MBA) test
*
* Copyright (C) 2018 Intel Corporation
*
* Authors:
* Sai Praneeth Prakhya <[email protected]>,
* Fenghua Yu <[email protected]>
*/
#include "resctrl.h"
#define RESULT_FILE_NAME "result_mba"
#define NUM_OF_RUNS 5
#define MAX_DIFF_PERCENT 5
#define ALLOCATION_MAX 100
#define ALLOCATION_MIN 10
#define ALLOCATION_STEP 10
/*
* Change schemata percentage from 100 to 10%. Write schemata to specified
* con_mon grp, mon_grp in resctrl FS.
* For each allocation, run 5 times in order to get average values.
*/
static int mba_setup(struct resctrl_val_param *p)
{
static int runs_per_allocation, allocation = 100;
char allocation_str[64];
int ret;
if (runs_per_allocation >= NUM_OF_RUNS)
runs_per_allocation = 0;
/* Only set up schemata once every NUM_OF_RUNS of allocations */
if (runs_per_allocation++ != 0)
return 0;
if (allocation < ALLOCATION_MIN || allocation > ALLOCATION_MAX)
return END_OF_TESTS;
sprintf(allocation_str, "%d", allocation);
ret = write_schemata(p->ctrlgrp, allocation_str, p->cpu_no,
p->resctrl_val);
if (ret < 0)
return ret;
allocation -= ALLOCATION_STEP;
return 0;
}
static bool show_mba_info(unsigned long *bw_imc, unsigned long *bw_resc)
{
int allocation, runs;
bool ret = false;
ksft_print_msg("Results are displayed in (MB)\n");
/* Memory bandwidth from 100% down to 10% */
for (allocation = 0; allocation < ALLOCATION_MAX / ALLOCATION_STEP;
allocation++) {
unsigned long avg_bw_imc, avg_bw_resc;
unsigned long sum_bw_imc = 0, sum_bw_resc = 0;
int avg_diff_per;
float avg_diff;
/*
* The first run is discarded due to inaccurate value from
* phase transition.
*/
for (runs = NUM_OF_RUNS * allocation + 1;
runs < NUM_OF_RUNS * allocation + NUM_OF_RUNS ; runs++) {
sum_bw_imc += bw_imc[runs];
sum_bw_resc += bw_resc[runs];
}
avg_bw_imc = sum_bw_imc / (NUM_OF_RUNS - 1);
avg_bw_resc = sum_bw_resc / (NUM_OF_RUNS - 1);
avg_diff = (float)labs(avg_bw_resc - avg_bw_imc) / avg_bw_imc;
avg_diff_per = (int)(avg_diff * 100);
ksft_print_msg("%s Check MBA diff within %d%% for schemata %u\n",
avg_diff_per > MAX_DIFF_PERCENT ?
"Fail:" : "Pass:",
MAX_DIFF_PERCENT,
ALLOCATION_MAX - ALLOCATION_STEP * allocation);
ksft_print_msg("avg_diff_per: %d%%\n", avg_diff_per);
ksft_print_msg("avg_bw_imc: %lu\n", avg_bw_imc);
ksft_print_msg("avg_bw_resc: %lu\n", avg_bw_resc);
if (avg_diff_per > MAX_DIFF_PERCENT)
ret = true;
}
ksft_print_msg("%s Check schemata change using MBA\n",
ret ? "Fail:" : "Pass:");
if (ret)
ksft_print_msg("At least one test failed\n");
return ret;
}
static int check_results(void)
{
char *token_array[8], output[] = RESULT_FILE_NAME, temp[512];
unsigned long bw_imc[1024], bw_resc[1024];
int runs;
FILE *fp;
fp = fopen(output, "r");
if (!fp) {
perror(output);
return errno;
}
runs = 0;
while (fgets(temp, sizeof(temp), fp)) {
char *token = strtok(temp, ":\t");
int fields = 0;
while (token) {
token_array[fields++] = token;
token = strtok(NULL, ":\t");
}
/* Field 3 is perf imc value */
bw_imc[runs] = strtoul(token_array[3], NULL, 0);
/* Field 5 is resctrl value */
bw_resc[runs] = strtoul(token_array[5], NULL, 0);
runs++;
}
fclose(fp);
return show_mba_info(bw_imc, bw_resc);
}
void mba_test_cleanup(void)
{
remove(RESULT_FILE_NAME);
}
int mba_schemata_change(int cpu_no, char *bw_report, char **benchmark_cmd)
{
struct resctrl_val_param param = {
.resctrl_val = MBA_STR,
.ctrlgrp = "c1",
.mongrp = "m1",
.cpu_no = cpu_no,
.filename = RESULT_FILE_NAME,
.bw_report = bw_report,
.setup = mba_setup
};
int ret;
remove(RESULT_FILE_NAME);
ret = resctrl_val(benchmark_cmd, ¶m);
if (ret)
goto out;
ret = check_results();
out:
mba_test_cleanup();
return ret;
}
| linux-master | tools/testing/selftests/resctrl/mba_test.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Cache Allocation Technology (CAT) test
*
* Copyright (C) 2018 Intel Corporation
*
* Authors:
* Sai Praneeth Prakhya <[email protected]>,
* Fenghua Yu <[email protected]>
*/
#include "resctrl.h"
#include <unistd.h>
#define RESULT_FILE_NAME1 "result_cat1"
#define RESULT_FILE_NAME2 "result_cat2"
#define NUM_OF_RUNS 5
#define MAX_DIFF_PERCENT 4
#define MAX_DIFF 1000000
/*
* Change schemata. Write schemata to specified
* con_mon grp, mon_grp in resctrl FS.
* Run 5 times in order to get average values.
*/
static int cat_setup(struct resctrl_val_param *p)
{
char schemata[64];
int ret = 0;
/* Run NUM_OF_RUNS times */
if (p->num_of_runs >= NUM_OF_RUNS)
return END_OF_TESTS;
if (p->num_of_runs == 0) {
sprintf(schemata, "%lx", p->mask);
ret = write_schemata(p->ctrlgrp, schemata, p->cpu_no,
p->resctrl_val);
}
p->num_of_runs++;
return ret;
}
static int check_results(struct resctrl_val_param *param)
{
char *token_array[8], temp[512];
unsigned long sum_llc_perf_miss = 0;
int runs = 0, no_of_bits = 0;
FILE *fp;
ksft_print_msg("Checking for pass/fail\n");
fp = fopen(param->filename, "r");
if (!fp) {
perror("# Cannot open file");
return errno;
}
while (fgets(temp, sizeof(temp), fp)) {
char *token = strtok(temp, ":\t");
int fields = 0;
while (token) {
token_array[fields++] = token;
token = strtok(NULL, ":\t");
}
/*
* Discard the first value which is inaccurate due to monitoring
* setup transition phase.
*/
if (runs > 0)
sum_llc_perf_miss += strtoul(token_array[3], NULL, 0);
runs++;
}
fclose(fp);
no_of_bits = count_bits(param->mask);
return show_cache_info(sum_llc_perf_miss, no_of_bits, param->span / 64,
MAX_DIFF, MAX_DIFF_PERCENT, runs - 1,
get_vendor() == ARCH_INTEL, false);
}
void cat_test_cleanup(void)
{
remove(RESULT_FILE_NAME1);
remove(RESULT_FILE_NAME2);
}
int cat_perf_miss_val(int cpu_no, int n, char *cache_type)
{
unsigned long l_mask, l_mask_1;
int ret, pipefd[2], sibling_cpu_no;
unsigned long cache_size = 0;
unsigned long long_mask;
char cbm_mask[256];
int count_of_bits;
char pipe_message;
/* Get default cbm mask for L3/L2 cache */
ret = get_cbm_mask(cache_type, cbm_mask);
if (ret)
return ret;
long_mask = strtoul(cbm_mask, NULL, 16);
/* Get L3/L2 cache size */
ret = get_cache_size(cpu_no, cache_type, &cache_size);
if (ret)
return ret;
ksft_print_msg("Cache size :%lu\n", cache_size);
/* Get max number of bits from default-cabm mask */
count_of_bits = count_bits(long_mask);
if (!n)
n = count_of_bits / 2;
if (n > count_of_bits - 1) {
ksft_print_msg("Invalid input value for no_of_bits n!\n");
ksft_print_msg("Please enter value in range 1 to %d\n",
count_of_bits - 1);
return -1;
}
/* Get core id from same socket for running another thread */
sibling_cpu_no = get_core_sibling(cpu_no);
if (sibling_cpu_no < 0)
return -1;
struct resctrl_val_param param = {
.resctrl_val = CAT_STR,
.cpu_no = cpu_no,
.setup = cat_setup,
};
l_mask = long_mask >> n;
l_mask_1 = ~l_mask & long_mask;
/* Set param values for parent thread which will be allocated bitmask
* with (max_bits - n) bits
*/
param.span = cache_size * (count_of_bits - n) / count_of_bits;
strcpy(param.ctrlgrp, "c2");
strcpy(param.mongrp, "m2");
strcpy(param.filename, RESULT_FILE_NAME2);
param.mask = l_mask;
param.num_of_runs = 0;
if (pipe(pipefd)) {
perror("# Unable to create pipe");
return errno;
}
fflush(stdout);
bm_pid = fork();
/* Set param values for child thread which will be allocated bitmask
* with n bits
*/
if (bm_pid == 0) {
param.mask = l_mask_1;
strcpy(param.ctrlgrp, "c1");
strcpy(param.mongrp, "m1");
param.span = cache_size * n / count_of_bits;
strcpy(param.filename, RESULT_FILE_NAME1);
param.num_of_runs = 0;
param.cpu_no = sibling_cpu_no;
} else {
ret = signal_handler_register();
if (ret) {
kill(bm_pid, SIGKILL);
goto out;
}
}
remove(param.filename);
ret = cat_val(¶m);
if (ret == 0)
ret = check_results(¶m);
if (bm_pid == 0) {
/* Tell parent that child is ready */
close(pipefd[0]);
pipe_message = 1;
if (write(pipefd[1], &pipe_message, sizeof(pipe_message)) <
sizeof(pipe_message))
/*
* Just print the error message.
* Let while(1) run and wait for itself to be killed.
*/
perror("# failed signaling parent process");
close(pipefd[1]);
while (1)
;
} else {
/* Parent waits for child to be ready. */
close(pipefd[1]);
pipe_message = 0;
while (pipe_message != 1) {
if (read(pipefd[0], &pipe_message,
sizeof(pipe_message)) < sizeof(pipe_message)) {
perror("# failed reading from child process");
break;
}
}
close(pipefd[0]);
kill(bm_pid, SIGKILL);
signal_handler_unregister();
}
out:
cat_test_cleanup();
return ret;
}
| linux-master | tools/testing/selftests/resctrl/cat_test.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Memory Bandwidth Monitoring (MBM) test
*
* Copyright (C) 2018 Intel Corporation
*
* Authors:
* Sai Praneeth Prakhya <[email protected]>,
* Fenghua Yu <[email protected]>
*/
#include "resctrl.h"
#define RESULT_FILE_NAME "result_mbm"
#define MAX_DIFF_PERCENT 5
#define NUM_OF_RUNS 5
static int
show_bw_info(unsigned long *bw_imc, unsigned long *bw_resc, size_t span)
{
unsigned long avg_bw_imc = 0, avg_bw_resc = 0;
unsigned long sum_bw_imc = 0, sum_bw_resc = 0;
int runs, ret, avg_diff_per;
float avg_diff = 0;
/*
* Discard the first value which is inaccurate due to monitoring setup
* transition phase.
*/
for (runs = 1; runs < NUM_OF_RUNS ; runs++) {
sum_bw_imc += bw_imc[runs];
sum_bw_resc += bw_resc[runs];
}
avg_bw_imc = sum_bw_imc / 4;
avg_bw_resc = sum_bw_resc / 4;
avg_diff = (float)labs(avg_bw_resc - avg_bw_imc) / avg_bw_imc;
avg_diff_per = (int)(avg_diff * 100);
ret = avg_diff_per > MAX_DIFF_PERCENT;
ksft_print_msg("%s Check MBM diff within %d%%\n",
ret ? "Fail:" : "Pass:", MAX_DIFF_PERCENT);
ksft_print_msg("avg_diff_per: %d%%\n", avg_diff_per);
ksft_print_msg("Span (MB): %zu\n", span / MB);
ksft_print_msg("avg_bw_imc: %lu\n", avg_bw_imc);
ksft_print_msg("avg_bw_resc: %lu\n", avg_bw_resc);
return ret;
}
static int check_results(size_t span)
{
unsigned long bw_imc[NUM_OF_RUNS], bw_resc[NUM_OF_RUNS];
char temp[1024], *token_array[8];
char output[] = RESULT_FILE_NAME;
int runs, ret;
FILE *fp;
ksft_print_msg("Checking for pass/fail\n");
fp = fopen(output, "r");
if (!fp) {
perror(output);
return errno;
}
runs = 0;
while (fgets(temp, sizeof(temp), fp)) {
char *token = strtok(temp, ":\t");
int i = 0;
while (token) {
token_array[i++] = token;
token = strtok(NULL, ":\t");
}
bw_resc[runs] = strtoul(token_array[5], NULL, 0);
bw_imc[runs] = strtoul(token_array[3], NULL, 0);
runs++;
}
ret = show_bw_info(bw_imc, bw_resc, span);
fclose(fp);
return ret;
}
static int mbm_setup(struct resctrl_val_param *p)
{
int ret = 0;
/* Run NUM_OF_RUNS times */
if (p->num_of_runs >= NUM_OF_RUNS)
return END_OF_TESTS;
/* Set up shemata with 100% allocation on the first run. */
if (p->num_of_runs == 0)
ret = write_schemata(p->ctrlgrp, "100", p->cpu_no,
p->resctrl_val);
p->num_of_runs++;
return ret;
}
void mbm_test_cleanup(void)
{
remove(RESULT_FILE_NAME);
}
int mbm_bw_change(size_t span, int cpu_no, char *bw_report, char **benchmark_cmd)
{
struct resctrl_val_param param = {
.resctrl_val = MBM_STR,
.ctrlgrp = "c1",
.mongrp = "m1",
.span = span,
.cpu_no = cpu_no,
.filename = RESULT_FILE_NAME,
.bw_report = bw_report,
.setup = mbm_setup
};
int ret;
remove(RESULT_FILE_NAME);
ret = resctrl_val(benchmark_cmd, ¶m);
if (ret)
goto out;
ret = check_results(span);
out:
mbm_test_cleanup();
return ret;
}
| linux-master | tools/testing/selftests/resctrl/mbm_test.c |
// SPDX-License-Identifier: GPL-2.0
#include <stdint.h>
#include "resctrl.h"
struct read_format {
__u64 nr; /* The number of events */
struct {
__u64 value; /* The value of the event */
} values[2];
};
static struct perf_event_attr pea_llc_miss;
static struct read_format rf_cqm;
static int fd_lm;
char llc_occup_path[1024];
static void initialize_perf_event_attr(void)
{
pea_llc_miss.type = PERF_TYPE_HARDWARE;
pea_llc_miss.size = sizeof(struct perf_event_attr);
pea_llc_miss.read_format = PERF_FORMAT_GROUP;
pea_llc_miss.exclude_kernel = 1;
pea_llc_miss.exclude_hv = 1;
pea_llc_miss.exclude_idle = 1;
pea_llc_miss.exclude_callchain_kernel = 1;
pea_llc_miss.inherit = 1;
pea_llc_miss.exclude_guest = 1;
pea_llc_miss.disabled = 1;
}
static void ioctl_perf_event_ioc_reset_enable(void)
{
ioctl(fd_lm, PERF_EVENT_IOC_RESET, 0);
ioctl(fd_lm, PERF_EVENT_IOC_ENABLE, 0);
}
static int perf_event_open_llc_miss(pid_t pid, int cpu_no)
{
fd_lm = perf_event_open(&pea_llc_miss, pid, cpu_no, -1,
PERF_FLAG_FD_CLOEXEC);
if (fd_lm == -1) {
perror("Error opening leader");
ctrlc_handler(0, NULL, NULL);
return -1;
}
return 0;
}
static void initialize_llc_perf(void)
{
memset(&pea_llc_miss, 0, sizeof(struct perf_event_attr));
memset(&rf_cqm, 0, sizeof(struct read_format));
/* Initialize perf_event_attr structures for HW_CACHE_MISSES */
initialize_perf_event_attr();
pea_llc_miss.config = PERF_COUNT_HW_CACHE_MISSES;
rf_cqm.nr = 1;
}
static int reset_enable_llc_perf(pid_t pid, int cpu_no)
{
int ret = 0;
ret = perf_event_open_llc_miss(pid, cpu_no);
if (ret < 0)
return ret;
/* Start counters to log values */
ioctl_perf_event_ioc_reset_enable();
return 0;
}
/*
* get_llc_perf: llc cache miss through perf events
* @llc_perf_miss: LLC miss counter that is filled on success
*
* Perf events like HW_CACHE_MISSES could be used to validate number of
* cache lines allocated.
*
* Return: =0 on success. <0 on failure.
*/
static int get_llc_perf(unsigned long *llc_perf_miss)
{
__u64 total_misses;
int ret;
/* Stop counters after one span to get miss rate */
ioctl(fd_lm, PERF_EVENT_IOC_DISABLE, 0);
ret = read(fd_lm, &rf_cqm, sizeof(struct read_format));
if (ret == -1) {
perror("Could not get llc misses through perf");
return -1;
}
total_misses = rf_cqm.values[0].value;
*llc_perf_miss = total_misses;
return 0;
}
/*
* Get LLC Occupancy as reported by RESCTRL FS
* For CMT,
* 1. If con_mon grp and mon grp given, then read from mon grp in
* con_mon grp
* 2. If only con_mon grp given, then read from con_mon grp
* 3. If both not given, then read from root con_mon grp
* For CAT,
* 1. If con_mon grp given, then read from it
* 2. If con_mon grp not given, then read from root con_mon grp
*
* Return: =0 on success. <0 on failure.
*/
static int get_llc_occu_resctrl(unsigned long *llc_occupancy)
{
FILE *fp;
fp = fopen(llc_occup_path, "r");
if (!fp) {
perror("Failed to open results file");
return errno;
}
if (fscanf(fp, "%lu", llc_occupancy) <= 0) {
perror("Could not get llc occupancy");
fclose(fp);
return -1;
}
fclose(fp);
return 0;
}
/*
* print_results_cache: the cache results are stored in a file
* @filename: file that stores the results
* @bm_pid: child pid that runs benchmark
* @llc_value: perf miss value /
* llc occupancy value reported by resctrl FS
*
* Return: 0 on success. non-zero on failure.
*/
static int print_results_cache(char *filename, int bm_pid,
unsigned long llc_value)
{
FILE *fp;
if (strcmp(filename, "stdio") == 0 || strcmp(filename, "stderr") == 0) {
printf("Pid: %d \t LLC_value: %lu\n", bm_pid,
llc_value);
} else {
fp = fopen(filename, "a");
if (!fp) {
perror("Cannot open results file");
return errno;
}
fprintf(fp, "Pid: %d \t llc_value: %lu\n", bm_pid, llc_value);
fclose(fp);
}
return 0;
}
int measure_cache_vals(struct resctrl_val_param *param, int bm_pid)
{
unsigned long llc_perf_miss = 0, llc_occu_resc = 0, llc_value = 0;
int ret;
/*
* Measure cache miss from perf.
*/
if (!strncmp(param->resctrl_val, CAT_STR, sizeof(CAT_STR))) {
ret = get_llc_perf(&llc_perf_miss);
if (ret < 0)
return ret;
llc_value = llc_perf_miss;
}
/*
* Measure llc occupancy from resctrl.
*/
if (!strncmp(param->resctrl_val, CMT_STR, sizeof(CMT_STR))) {
ret = get_llc_occu_resctrl(&llc_occu_resc);
if (ret < 0)
return ret;
llc_value = llc_occu_resc;
}
ret = print_results_cache(param->filename, bm_pid, llc_value);
if (ret)
return ret;
return 0;
}
/*
* cache_val: execute benchmark and measure LLC occupancy resctrl
* and perf cache miss for the benchmark
* @param: parameters passed to cache_val()
*
* Return: 0 on success. non-zero on failure.
*/
int cat_val(struct resctrl_val_param *param)
{
int memflush = 1, operation = 0, ret = 0;
char *resctrl_val = param->resctrl_val;
pid_t bm_pid;
if (strcmp(param->filename, "") == 0)
sprintf(param->filename, "stdio");
bm_pid = getpid();
/* Taskset benchmark to specified cpu */
ret = taskset_benchmark(bm_pid, param->cpu_no);
if (ret)
return ret;
/* Write benchmark to specified con_mon grp, mon_grp in resctrl FS*/
ret = write_bm_pid_to_resctrl(bm_pid, param->ctrlgrp, param->mongrp,
resctrl_val);
if (ret)
return ret;
initialize_llc_perf();
/* Test runs until the callback setup() tells the test to stop. */
while (1) {
ret = param->setup(param);
if (ret == END_OF_TESTS) {
ret = 0;
break;
}
if (ret < 0)
break;
ret = reset_enable_llc_perf(bm_pid, param->cpu_no);
if (ret)
break;
if (run_fill_buf(param->span, memflush, operation, true)) {
fprintf(stderr, "Error-running fill buffer\n");
ret = -1;
goto pe_close;
}
sleep(1);
ret = measure_cache_vals(param, bm_pid);
if (ret)
goto pe_close;
}
return ret;
pe_close:
close(fd_lm);
return ret;
}
/*
* show_cache_info: show cache test result information
* @sum_llc_val: sum of LLC cache result data
* @no_of_bits: number of bits
* @cache_span: cache span in bytes for CMT or in lines for CAT
* @max_diff: max difference
* @max_diff_percent: max difference percentage
* @num_of_runs: number of runs
* @platform: show test information on this platform
* @cmt: CMT test or CAT test
*
* Return: 0 on success. non-zero on failure.
*/
int show_cache_info(unsigned long sum_llc_val, int no_of_bits,
size_t cache_span, unsigned long max_diff,
unsigned long max_diff_percent, unsigned long num_of_runs,
bool platform, bool cmt)
{
unsigned long avg_llc_val = 0;
float diff_percent;
long avg_diff = 0;
int ret;
avg_llc_val = sum_llc_val / num_of_runs;
avg_diff = (long)abs(cache_span - avg_llc_val);
diff_percent = ((float)cache_span - avg_llc_val) / cache_span * 100;
ret = platform && abs((int)diff_percent) > max_diff_percent &&
(cmt ? (abs(avg_diff) > max_diff) : true);
ksft_print_msg("%s Check cache miss rate within %d%%\n",
ret ? "Fail:" : "Pass:", max_diff_percent);
ksft_print_msg("Percent diff=%d\n", abs((int)diff_percent));
ksft_print_msg("Number of bits: %d\n", no_of_bits);
ksft_print_msg("Average LLC val: %lu\n", avg_llc_val);
ksft_print_msg("Cache span (%s): %zu\n", cmt ? "bytes" : "lines",
cache_span);
return ret;
}
| linux-master | tools/testing/selftests/resctrl/cache.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Memory bandwidth monitoring and allocation library
*
* Copyright (C) 2018 Intel Corporation
*
* Authors:
* Sai Praneeth Prakhya <[email protected]>,
* Fenghua Yu <[email protected]>
*/
#include "resctrl.h"
#define UNCORE_IMC "uncore_imc"
#define READ_FILE_NAME "events/cas_count_read"
#define WRITE_FILE_NAME "events/cas_count_write"
#define DYN_PMU_PATH "/sys/bus/event_source/devices"
#define SCALE 0.00006103515625
#define MAX_IMCS 20
#define MAX_TOKENS 5
#define READ 0
#define WRITE 1
#define CON_MON_MBM_LOCAL_BYTES_PATH \
"%s/%s/mon_groups/%s/mon_data/mon_L3_%02d/mbm_local_bytes"
#define CON_MBM_LOCAL_BYTES_PATH \
"%s/%s/mon_data/mon_L3_%02d/mbm_local_bytes"
#define MON_MBM_LOCAL_BYTES_PATH \
"%s/mon_groups/%s/mon_data/mon_L3_%02d/mbm_local_bytes"
#define MBM_LOCAL_BYTES_PATH \
"%s/mon_data/mon_L3_%02d/mbm_local_bytes"
#define CON_MON_LCC_OCCUP_PATH \
"%s/%s/mon_groups/%s/mon_data/mon_L3_%02d/llc_occupancy"
#define CON_LCC_OCCUP_PATH \
"%s/%s/mon_data/mon_L3_%02d/llc_occupancy"
#define MON_LCC_OCCUP_PATH \
"%s/mon_groups/%s/mon_data/mon_L3_%02d/llc_occupancy"
#define LCC_OCCUP_PATH \
"%s/mon_data/mon_L3_%02d/llc_occupancy"
struct membw_read_format {
__u64 value; /* The value of the event */
__u64 time_enabled; /* if PERF_FORMAT_TOTAL_TIME_ENABLED */
__u64 time_running; /* if PERF_FORMAT_TOTAL_TIME_RUNNING */
__u64 id; /* if PERF_FORMAT_ID */
};
struct imc_counter_config {
__u32 type;
__u64 event;
__u64 umask;
struct perf_event_attr pe;
struct membw_read_format return_value;
int fd;
};
static char mbm_total_path[1024];
static int imcs;
static struct imc_counter_config imc_counters_config[MAX_IMCS][2];
void membw_initialize_perf_event_attr(int i, int j)
{
memset(&imc_counters_config[i][j].pe, 0,
sizeof(struct perf_event_attr));
imc_counters_config[i][j].pe.type = imc_counters_config[i][j].type;
imc_counters_config[i][j].pe.size = sizeof(struct perf_event_attr);
imc_counters_config[i][j].pe.disabled = 1;
imc_counters_config[i][j].pe.inherit = 1;
imc_counters_config[i][j].pe.exclude_guest = 0;
imc_counters_config[i][j].pe.config =
imc_counters_config[i][j].umask << 8 |
imc_counters_config[i][j].event;
imc_counters_config[i][j].pe.sample_type = PERF_SAMPLE_IDENTIFIER;
imc_counters_config[i][j].pe.read_format =
PERF_FORMAT_TOTAL_TIME_ENABLED | PERF_FORMAT_TOTAL_TIME_RUNNING;
}
void membw_ioctl_perf_event_ioc_reset_enable(int i, int j)
{
ioctl(imc_counters_config[i][j].fd, PERF_EVENT_IOC_RESET, 0);
ioctl(imc_counters_config[i][j].fd, PERF_EVENT_IOC_ENABLE, 0);
}
void membw_ioctl_perf_event_ioc_disable(int i, int j)
{
ioctl(imc_counters_config[i][j].fd, PERF_EVENT_IOC_DISABLE, 0);
}
/*
* get_event_and_umask: Parse config into event and umask
* @cas_count_cfg: Config
* @count: iMC number
* @op: Operation (read/write)
*/
void get_event_and_umask(char *cas_count_cfg, int count, bool op)
{
char *token[MAX_TOKENS];
int i = 0;
strcat(cas_count_cfg, ",");
token[0] = strtok(cas_count_cfg, "=,");
for (i = 1; i < MAX_TOKENS; i++)
token[i] = strtok(NULL, "=,");
for (i = 0; i < MAX_TOKENS; i++) {
if (!token[i])
break;
if (strcmp(token[i], "event") == 0) {
if (op == READ)
imc_counters_config[count][READ].event =
strtol(token[i + 1], NULL, 16);
else
imc_counters_config[count][WRITE].event =
strtol(token[i + 1], NULL, 16);
}
if (strcmp(token[i], "umask") == 0) {
if (op == READ)
imc_counters_config[count][READ].umask =
strtol(token[i + 1], NULL, 16);
else
imc_counters_config[count][WRITE].umask =
strtol(token[i + 1], NULL, 16);
}
}
}
static int open_perf_event(int i, int cpu_no, int j)
{
imc_counters_config[i][j].fd =
perf_event_open(&imc_counters_config[i][j].pe, -1, cpu_no, -1,
PERF_FLAG_FD_CLOEXEC);
if (imc_counters_config[i][j].fd == -1) {
fprintf(stderr, "Error opening leader %llx\n",
imc_counters_config[i][j].pe.config);
return -1;
}
return 0;
}
/* Get type and config (read and write) of an iMC counter */
static int read_from_imc_dir(char *imc_dir, int count)
{
char cas_count_cfg[1024], imc_counter_cfg[1024], imc_counter_type[1024];
FILE *fp;
/* Get type of iMC counter */
sprintf(imc_counter_type, "%s%s", imc_dir, "type");
fp = fopen(imc_counter_type, "r");
if (!fp) {
perror("Failed to open imc counter type file");
return -1;
}
if (fscanf(fp, "%u", &imc_counters_config[count][READ].type) <= 0) {
perror("Could not get imc type");
fclose(fp);
return -1;
}
fclose(fp);
imc_counters_config[count][WRITE].type =
imc_counters_config[count][READ].type;
/* Get read config */
sprintf(imc_counter_cfg, "%s%s", imc_dir, READ_FILE_NAME);
fp = fopen(imc_counter_cfg, "r");
if (!fp) {
perror("Failed to open imc config file");
return -1;
}
if (fscanf(fp, "%s", cas_count_cfg) <= 0) {
perror("Could not get imc cas count read");
fclose(fp);
return -1;
}
fclose(fp);
get_event_and_umask(cas_count_cfg, count, READ);
/* Get write config */
sprintf(imc_counter_cfg, "%s%s", imc_dir, WRITE_FILE_NAME);
fp = fopen(imc_counter_cfg, "r");
if (!fp) {
perror("Failed to open imc config file");
return -1;
}
if (fscanf(fp, "%s", cas_count_cfg) <= 0) {
perror("Could not get imc cas count write");
fclose(fp);
return -1;
}
fclose(fp);
get_event_and_umask(cas_count_cfg, count, WRITE);
return 0;
}
/*
* A system can have 'n' number of iMC (Integrated Memory Controller)
* counters, get that 'n'. For each iMC counter get it's type and config.
* Also, each counter has two configs, one for read and the other for write.
* A config again has two parts, event and umask.
* Enumerate all these details into an array of structures.
*
* Return: >= 0 on success. < 0 on failure.
*/
static int num_of_imcs(void)
{
char imc_dir[512], *temp;
unsigned int count = 0;
struct dirent *ep;
int ret;
DIR *dp;
dp = opendir(DYN_PMU_PATH);
if (dp) {
while ((ep = readdir(dp))) {
temp = strstr(ep->d_name, UNCORE_IMC);
if (!temp)
continue;
/*
* imc counters are named as "uncore_imc_<n>", hence
* increment the pointer to point to <n>. Note that
* sizeof(UNCORE_IMC) would count for null character as
* well and hence the last underscore character in
* uncore_imc'_' need not be counted.
*/
temp = temp + sizeof(UNCORE_IMC);
/*
* Some directories under "DYN_PMU_PATH" could have
* names like "uncore_imc_free_running", hence, check if
* first character is a numerical digit or not.
*/
if (temp[0] >= '0' && temp[0] <= '9') {
sprintf(imc_dir, "%s/%s/", DYN_PMU_PATH,
ep->d_name);
ret = read_from_imc_dir(imc_dir, count);
if (ret) {
closedir(dp);
return ret;
}
count++;
}
}
closedir(dp);
if (count == 0) {
perror("Unable find iMC counters!\n");
return -1;
}
} else {
perror("Unable to open PMU directory!\n");
return -1;
}
return count;
}
static int initialize_mem_bw_imc(void)
{
int imc, j;
imcs = num_of_imcs();
if (imcs <= 0)
return imcs;
/* Initialize perf_event_attr structures for all iMC's */
for (imc = 0; imc < imcs; imc++) {
for (j = 0; j < 2; j++)
membw_initialize_perf_event_attr(imc, j);
}
return 0;
}
/*
* get_mem_bw_imc: Memory band width as reported by iMC counters
* @cpu_no: CPU number that the benchmark PID is binded to
* @bw_report: Bandwidth report type (reads, writes)
*
* Memory B/W utilized by a process on a socket can be calculated using
* iMC counters. Perf events are used to read these counters.
*
* Return: = 0 on success. < 0 on failure.
*/
static int get_mem_bw_imc(int cpu_no, char *bw_report, float *bw_imc)
{
float reads, writes, of_mul_read, of_mul_write;
int imc, j, ret;
/* Start all iMC counters to log values (both read and write) */
reads = 0, writes = 0, of_mul_read = 1, of_mul_write = 1;
for (imc = 0; imc < imcs; imc++) {
for (j = 0; j < 2; j++) {
ret = open_perf_event(imc, cpu_no, j);
if (ret)
return -1;
}
for (j = 0; j < 2; j++)
membw_ioctl_perf_event_ioc_reset_enable(imc, j);
}
sleep(1);
/* Stop counters after a second to get results (both read and write) */
for (imc = 0; imc < imcs; imc++) {
for (j = 0; j < 2; j++)
membw_ioctl_perf_event_ioc_disable(imc, j);
}
/*
* Get results which are stored in struct type imc_counter_config
* Take over flow into consideration before calculating total b/w
*/
for (imc = 0; imc < imcs; imc++) {
struct imc_counter_config *r =
&imc_counters_config[imc][READ];
struct imc_counter_config *w =
&imc_counters_config[imc][WRITE];
if (read(r->fd, &r->return_value,
sizeof(struct membw_read_format)) == -1) {
perror("Couldn't get read b/w through iMC");
return -1;
}
if (read(w->fd, &w->return_value,
sizeof(struct membw_read_format)) == -1) {
perror("Couldn't get write bw through iMC");
return -1;
}
__u64 r_time_enabled = r->return_value.time_enabled;
__u64 r_time_running = r->return_value.time_running;
if (r_time_enabled != r_time_running)
of_mul_read = (float)r_time_enabled /
(float)r_time_running;
__u64 w_time_enabled = w->return_value.time_enabled;
__u64 w_time_running = w->return_value.time_running;
if (w_time_enabled != w_time_running)
of_mul_write = (float)w_time_enabled /
(float)w_time_running;
reads += r->return_value.value * of_mul_read * SCALE;
writes += w->return_value.value * of_mul_write * SCALE;
}
for (imc = 0; imc < imcs; imc++) {
close(imc_counters_config[imc][READ].fd);
close(imc_counters_config[imc][WRITE].fd);
}
if (strcmp(bw_report, "reads") == 0) {
*bw_imc = reads;
return 0;
}
if (strcmp(bw_report, "writes") == 0) {
*bw_imc = writes;
return 0;
}
*bw_imc = reads + writes;
return 0;
}
void set_mbm_path(const char *ctrlgrp, const char *mongrp, int resource_id)
{
if (ctrlgrp && mongrp)
sprintf(mbm_total_path, CON_MON_MBM_LOCAL_BYTES_PATH,
RESCTRL_PATH, ctrlgrp, mongrp, resource_id);
else if (!ctrlgrp && mongrp)
sprintf(mbm_total_path, MON_MBM_LOCAL_BYTES_PATH, RESCTRL_PATH,
mongrp, resource_id);
else if (ctrlgrp && !mongrp)
sprintf(mbm_total_path, CON_MBM_LOCAL_BYTES_PATH, RESCTRL_PATH,
ctrlgrp, resource_id);
else if (!ctrlgrp && !mongrp)
sprintf(mbm_total_path, MBM_LOCAL_BYTES_PATH, RESCTRL_PATH,
resource_id);
}
/*
* initialize_mem_bw_resctrl: Appropriately populate "mbm_total_path"
* @ctrlgrp: Name of the control monitor group (con_mon grp)
* @mongrp: Name of the monitor group (mon grp)
* @cpu_no: CPU number that the benchmark PID is binded to
* @resctrl_val: Resctrl feature (Eg: mbm, mba.. etc)
*/
static void initialize_mem_bw_resctrl(const char *ctrlgrp, const char *mongrp,
int cpu_no, char *resctrl_val)
{
int resource_id;
if (get_resource_id(cpu_no, &resource_id) < 0) {
perror("Could not get resource_id");
return;
}
if (!strncmp(resctrl_val, MBM_STR, sizeof(MBM_STR)))
set_mbm_path(ctrlgrp, mongrp, resource_id);
if (!strncmp(resctrl_val, MBA_STR, sizeof(MBA_STR))) {
if (ctrlgrp)
sprintf(mbm_total_path, CON_MBM_LOCAL_BYTES_PATH,
RESCTRL_PATH, ctrlgrp, resource_id);
else
sprintf(mbm_total_path, MBM_LOCAL_BYTES_PATH,
RESCTRL_PATH, resource_id);
}
}
/*
* Get MBM Local bytes as reported by resctrl FS
* For MBM,
* 1. If con_mon grp and mon grp are given, then read from con_mon grp's mon grp
* 2. If only con_mon grp is given, then read from con_mon grp
* 3. If both are not given, then read from root con_mon grp
* For MBA,
* 1. If con_mon grp is given, then read from it
* 2. If con_mon grp is not given, then read from root con_mon grp
*/
static int get_mem_bw_resctrl(unsigned long *mbm_total)
{
FILE *fp;
fp = fopen(mbm_total_path, "r");
if (!fp) {
perror("Failed to open total bw file");
return -1;
}
if (fscanf(fp, "%lu", mbm_total) <= 0) {
perror("Could not get mbm local bytes");
fclose(fp);
return -1;
}
fclose(fp);
return 0;
}
pid_t bm_pid, ppid;
void ctrlc_handler(int signum, siginfo_t *info, void *ptr)
{
kill(bm_pid, SIGKILL);
umount_resctrlfs();
tests_cleanup();
ksft_print_msg("Ending\n\n");
exit(EXIT_SUCCESS);
}
/*
* Register CTRL-C handler for parent, as it has to kill
* child process before exiting.
*/
int signal_handler_register(void)
{
struct sigaction sigact;
int ret = 0;
sigact.sa_sigaction = ctrlc_handler;
sigemptyset(&sigact.sa_mask);
sigact.sa_flags = SA_SIGINFO;
if (sigaction(SIGINT, &sigact, NULL) ||
sigaction(SIGTERM, &sigact, NULL) ||
sigaction(SIGHUP, &sigact, NULL)) {
perror("# sigaction");
ret = -1;
}
return ret;
}
/*
* Reset signal handler to SIG_DFL.
* Non-Value return because the caller should keep
* the error code of other path even if sigaction fails.
*/
void signal_handler_unregister(void)
{
struct sigaction sigact;
sigact.sa_handler = SIG_DFL;
sigemptyset(&sigact.sa_mask);
if (sigaction(SIGINT, &sigact, NULL) ||
sigaction(SIGTERM, &sigact, NULL) ||
sigaction(SIGHUP, &sigact, NULL)) {
perror("# sigaction");
}
}
/*
* print_results_bw: the memory bandwidth results are stored in a file
* @filename: file that stores the results
* @bm_pid: child pid that runs benchmark
* @bw_imc: perf imc counter value
* @bw_resc: memory bandwidth value
*
* Return: 0 on success. non-zero on failure.
*/
static int print_results_bw(char *filename, int bm_pid, float bw_imc,
unsigned long bw_resc)
{
unsigned long diff = fabs(bw_imc - bw_resc);
FILE *fp;
if (strcmp(filename, "stdio") == 0 || strcmp(filename, "stderr") == 0) {
printf("Pid: %d \t Mem_BW_iMC: %f \t ", bm_pid, bw_imc);
printf("Mem_BW_resc: %lu \t Difference: %lu\n", bw_resc, diff);
} else {
fp = fopen(filename, "a");
if (!fp) {
perror("Cannot open results file");
return errno;
}
if (fprintf(fp, "Pid: %d \t Mem_BW_iMC: %f \t Mem_BW_resc: %lu \t Difference: %lu\n",
bm_pid, bw_imc, bw_resc, diff) <= 0) {
fclose(fp);
perror("Could not log results.");
return errno;
}
fclose(fp);
}
return 0;
}
static void set_cmt_path(const char *ctrlgrp, const char *mongrp, char sock_num)
{
if (strlen(ctrlgrp) && strlen(mongrp))
sprintf(llc_occup_path, CON_MON_LCC_OCCUP_PATH, RESCTRL_PATH,
ctrlgrp, mongrp, sock_num);
else if (!strlen(ctrlgrp) && strlen(mongrp))
sprintf(llc_occup_path, MON_LCC_OCCUP_PATH, RESCTRL_PATH,
mongrp, sock_num);
else if (strlen(ctrlgrp) && !strlen(mongrp))
sprintf(llc_occup_path, CON_LCC_OCCUP_PATH, RESCTRL_PATH,
ctrlgrp, sock_num);
else if (!strlen(ctrlgrp) && !strlen(mongrp))
sprintf(llc_occup_path, LCC_OCCUP_PATH, RESCTRL_PATH, sock_num);
}
/*
* initialize_llc_occu_resctrl: Appropriately populate "llc_occup_path"
* @ctrlgrp: Name of the control monitor group (con_mon grp)
* @mongrp: Name of the monitor group (mon grp)
* @cpu_no: CPU number that the benchmark PID is binded to
* @resctrl_val: Resctrl feature (Eg: cat, cmt.. etc)
*/
static void initialize_llc_occu_resctrl(const char *ctrlgrp, const char *mongrp,
int cpu_no, char *resctrl_val)
{
int resource_id;
if (get_resource_id(cpu_no, &resource_id) < 0) {
perror("# Unable to resource_id");
return;
}
if (!strncmp(resctrl_val, CMT_STR, sizeof(CMT_STR)))
set_cmt_path(ctrlgrp, mongrp, resource_id);
}
static int
measure_vals(struct resctrl_val_param *param, unsigned long *bw_resc_start)
{
unsigned long bw_resc, bw_resc_end;
float bw_imc;
int ret;
/*
* Measure memory bandwidth from resctrl and from
* another source which is perf imc value or could
* be something else if perf imc event is not available.
* Compare the two values to validate resctrl value.
* It takes 1sec to measure the data.
*/
ret = get_mem_bw_imc(param->cpu_no, param->bw_report, &bw_imc);
if (ret < 0)
return ret;
ret = get_mem_bw_resctrl(&bw_resc_end);
if (ret < 0)
return ret;
bw_resc = (bw_resc_end - *bw_resc_start) / MB;
ret = print_results_bw(param->filename, bm_pid, bw_imc, bw_resc);
if (ret)
return ret;
*bw_resc_start = bw_resc_end;
return 0;
}
/*
* resctrl_val: execute benchmark and measure memory bandwidth on
* the benchmark
* @benchmark_cmd: benchmark command and its arguments
* @param: parameters passed to resctrl_val()
*
* Return: 0 on success. non-zero on failure.
*/
int resctrl_val(char **benchmark_cmd, struct resctrl_val_param *param)
{
char *resctrl_val = param->resctrl_val;
unsigned long bw_resc_start = 0;
struct sigaction sigact;
int ret = 0, pipefd[2];
char pipe_message = 0;
union sigval value;
if (strcmp(param->filename, "") == 0)
sprintf(param->filename, "stdio");
if (!strncmp(resctrl_val, MBA_STR, sizeof(MBA_STR)) ||
!strncmp(resctrl_val, MBM_STR, sizeof(MBM_STR))) {
ret = validate_bw_report_request(param->bw_report);
if (ret)
return ret;
}
/*
* If benchmark wasn't successfully started by child, then child should
* kill parent, so save parent's pid
*/
ppid = getpid();
if (pipe(pipefd)) {
perror("# Unable to create pipe");
return -1;
}
/*
* Fork to start benchmark, save child's pid so that it can be killed
* when needed
*/
fflush(stdout);
bm_pid = fork();
if (bm_pid == -1) {
perror("# Unable to fork");
return -1;
}
if (bm_pid == 0) {
/*
* Mask all signals except SIGUSR1, parent uses SIGUSR1 to
* start benchmark
*/
sigfillset(&sigact.sa_mask);
sigdelset(&sigact.sa_mask, SIGUSR1);
sigact.sa_sigaction = run_benchmark;
sigact.sa_flags = SA_SIGINFO;
/* Register for "SIGUSR1" signal from parent */
if (sigaction(SIGUSR1, &sigact, NULL))
PARENT_EXIT("Can't register child for signal");
/* Tell parent that child is ready */
close(pipefd[0]);
pipe_message = 1;
if (write(pipefd[1], &pipe_message, sizeof(pipe_message)) <
sizeof(pipe_message)) {
perror("# failed signaling parent process");
close(pipefd[1]);
return -1;
}
close(pipefd[1]);
/* Suspend child until delivery of "SIGUSR1" from parent */
sigsuspend(&sigact.sa_mask);
PARENT_EXIT("Child is done");
}
ksft_print_msg("Benchmark PID: %d\n", bm_pid);
ret = signal_handler_register();
if (ret)
goto out;
value.sival_ptr = benchmark_cmd;
/* Taskset benchmark to specified cpu */
ret = taskset_benchmark(bm_pid, param->cpu_no);
if (ret)
goto unregister;
/* Write benchmark to specified control&monitoring grp in resctrl FS */
ret = write_bm_pid_to_resctrl(bm_pid, param->ctrlgrp, param->mongrp,
resctrl_val);
if (ret)
goto unregister;
if (!strncmp(resctrl_val, MBM_STR, sizeof(MBM_STR)) ||
!strncmp(resctrl_val, MBA_STR, sizeof(MBA_STR))) {
ret = initialize_mem_bw_imc();
if (ret)
goto unregister;
initialize_mem_bw_resctrl(param->ctrlgrp, param->mongrp,
param->cpu_no, resctrl_val);
} else if (!strncmp(resctrl_val, CMT_STR, sizeof(CMT_STR)))
initialize_llc_occu_resctrl(param->ctrlgrp, param->mongrp,
param->cpu_no, resctrl_val);
/* Parent waits for child to be ready. */
close(pipefd[1]);
while (pipe_message != 1) {
if (read(pipefd[0], &pipe_message, sizeof(pipe_message)) <
sizeof(pipe_message)) {
perror("# failed reading message from child process");
close(pipefd[0]);
goto unregister;
}
}
close(pipefd[0]);
/* Signal child to start benchmark */
if (sigqueue(bm_pid, SIGUSR1, value) == -1) {
perror("# sigqueue SIGUSR1 to child");
ret = errno;
goto unregister;
}
/* Give benchmark enough time to fully run */
sleep(1);
/* Test runs until the callback setup() tells the test to stop. */
while (1) {
ret = param->setup(param);
if (ret == END_OF_TESTS) {
ret = 0;
break;
}
if (ret < 0)
break;
if (!strncmp(resctrl_val, MBM_STR, sizeof(MBM_STR)) ||
!strncmp(resctrl_val, MBA_STR, sizeof(MBA_STR))) {
ret = measure_vals(param, &bw_resc_start);
if (ret)
break;
} else if (!strncmp(resctrl_val, CMT_STR, sizeof(CMT_STR))) {
sleep(1);
ret = measure_cache_vals(param, bm_pid);
if (ret)
break;
}
}
unregister:
signal_handler_unregister();
out:
kill(bm_pid, SIGKILL);
return ret;
}
| linux-master | tools/testing/selftests/resctrl/resctrl_val.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Basic resctrl file system operations
*
* Copyright (C) 2018 Intel Corporation
*
* Authors:
* Sai Praneeth Prakhya <[email protected]>,
* Fenghua Yu <[email protected]>
*/
#include "resctrl.h"
static int find_resctrl_mount(char *buffer)
{
FILE *mounts;
char line[256], *fs, *mntpoint;
mounts = fopen("/proc/mounts", "r");
if (!mounts) {
perror("/proc/mounts");
return -ENXIO;
}
while (!feof(mounts)) {
if (!fgets(line, 256, mounts))
break;
fs = strtok(line, " \t");
if (!fs)
continue;
mntpoint = strtok(NULL, " \t");
if (!mntpoint)
continue;
fs = strtok(NULL, " \t");
if (!fs)
continue;
if (strcmp(fs, "resctrl"))
continue;
fclose(mounts);
if (buffer)
strncpy(buffer, mntpoint, 256);
return 0;
}
fclose(mounts);
return -ENOENT;
}
/*
* mount_resctrlfs - Mount resctrl FS at /sys/fs/resctrl
*
* Mounts resctrl FS. Fails if resctrl FS is already mounted to avoid
* pre-existing settings interfering with the test results.
*
* Return: 0 on success, non-zero on failure
*/
int mount_resctrlfs(void)
{
int ret;
ret = find_resctrl_mount(NULL);
if (ret != -ENOENT)
return -1;
ksft_print_msg("Mounting resctrl to \"%s\"\n", RESCTRL_PATH);
ret = mount("resctrl", RESCTRL_PATH, "resctrl", 0, NULL);
if (ret)
perror("# mount");
return ret;
}
int umount_resctrlfs(void)
{
char mountpoint[256];
int ret;
ret = find_resctrl_mount(mountpoint);
if (ret == -ENOENT)
return 0;
if (ret)
return ret;
if (umount(mountpoint)) {
perror("# Unable to umount resctrl");
return errno;
}
return 0;
}
/*
* get_resource_id - Get socket number/l3 id for a specified CPU
* @cpu_no: CPU number
* @resource_id: Socket number or l3_id
*
* Return: >= 0 on success, < 0 on failure.
*/
int get_resource_id(int cpu_no, int *resource_id)
{
char phys_pkg_path[1024];
FILE *fp;
if (get_vendor() == ARCH_AMD)
sprintf(phys_pkg_path, "%s%d/cache/index3/id",
PHYS_ID_PATH, cpu_no);
else
sprintf(phys_pkg_path, "%s%d/topology/physical_package_id",
PHYS_ID_PATH, cpu_no);
fp = fopen(phys_pkg_path, "r");
if (!fp) {
perror("Failed to open physical_package_id");
return -1;
}
if (fscanf(fp, "%d", resource_id) <= 0) {
perror("Could not get socket number or l3 id");
fclose(fp);
return -1;
}
fclose(fp);
return 0;
}
/*
* get_cache_size - Get cache size for a specified CPU
* @cpu_no: CPU number
* @cache_type: Cache level L2/L3
* @cache_size: pointer to cache_size
*
* Return: = 0 on success, < 0 on failure.
*/
int get_cache_size(int cpu_no, char *cache_type, unsigned long *cache_size)
{
char cache_path[1024], cache_str[64];
int length, i, cache_num;
FILE *fp;
if (!strcmp(cache_type, "L3")) {
cache_num = 3;
} else if (!strcmp(cache_type, "L2")) {
cache_num = 2;
} else {
perror("Invalid cache level");
return -1;
}
sprintf(cache_path, "/sys/bus/cpu/devices/cpu%d/cache/index%d/size",
cpu_no, cache_num);
fp = fopen(cache_path, "r");
if (!fp) {
perror("Failed to open cache size");
return -1;
}
if (fscanf(fp, "%s", cache_str) <= 0) {
perror("Could not get cache_size");
fclose(fp);
return -1;
}
fclose(fp);
length = (int)strlen(cache_str);
*cache_size = 0;
for (i = 0; i < length; i++) {
if ((cache_str[i] >= '0') && (cache_str[i] <= '9'))
*cache_size = *cache_size * 10 + (cache_str[i] - '0');
else if (cache_str[i] == 'K')
*cache_size = *cache_size * 1024;
else if (cache_str[i] == 'M')
*cache_size = *cache_size * 1024 * 1024;
else
break;
}
return 0;
}
#define CORE_SIBLINGS_PATH "/sys/bus/cpu/devices/cpu"
/*
* get_cbm_mask - Get cbm mask for given cache
* @cache_type: Cache level L2/L3
* @cbm_mask: cbm_mask returned as a string
*
* Return: = 0 on success, < 0 on failure.
*/
int get_cbm_mask(char *cache_type, char *cbm_mask)
{
char cbm_mask_path[1024];
FILE *fp;
if (!cbm_mask)
return -1;
sprintf(cbm_mask_path, "%s/%s/cbm_mask", INFO_PATH, cache_type);
fp = fopen(cbm_mask_path, "r");
if (!fp) {
perror("Failed to open cache level");
return -1;
}
if (fscanf(fp, "%s", cbm_mask) <= 0) {
perror("Could not get max cbm_mask");
fclose(fp);
return -1;
}
fclose(fp);
return 0;
}
/*
* get_core_sibling - Get sibling core id from the same socket for given CPU
* @cpu_no: CPU number
*
* Return: > 0 on success, < 0 on failure.
*/
int get_core_sibling(int cpu_no)
{
char core_siblings_path[1024], cpu_list_str[64];
int sibling_cpu_no = -1;
FILE *fp;
sprintf(core_siblings_path, "%s%d/topology/core_siblings_list",
CORE_SIBLINGS_PATH, cpu_no);
fp = fopen(core_siblings_path, "r");
if (!fp) {
perror("Failed to open core siblings path");
return -1;
}
if (fscanf(fp, "%s", cpu_list_str) <= 0) {
perror("Could not get core_siblings list");
fclose(fp);
return -1;
}
fclose(fp);
char *token = strtok(cpu_list_str, "-,");
while (token) {
sibling_cpu_no = atoi(token);
/* Skipping core 0 as we don't want to run test on core 0 */
if (sibling_cpu_no != 0 && sibling_cpu_no != cpu_no)
break;
token = strtok(NULL, "-,");
}
return sibling_cpu_no;
}
/*
* taskset_benchmark - Taskset PID (i.e. benchmark) to a specified cpu
* @bm_pid: PID that should be binded
* @cpu_no: CPU number at which the PID would be binded
*
* Return: 0 on success, non-zero on failure
*/
int taskset_benchmark(pid_t bm_pid, int cpu_no)
{
cpu_set_t my_set;
CPU_ZERO(&my_set);
CPU_SET(cpu_no, &my_set);
if (sched_setaffinity(bm_pid, sizeof(cpu_set_t), &my_set)) {
perror("Unable to taskset benchmark");
return -1;
}
return 0;
}
/*
* run_benchmark - Run a specified benchmark or fill_buf (default benchmark)
* in specified signal. Direct benchmark stdio to /dev/null.
* @signum: signal number
* @info: signal info
* @ucontext: user context in signal handling
*
* Return: void
*/
void run_benchmark(int signum, siginfo_t *info, void *ucontext)
{
int operation, ret, memflush;
char **benchmark_cmd;
size_t span;
bool once;
FILE *fp;
benchmark_cmd = info->si_ptr;
/*
* Direct stdio of child to /dev/null, so that only parent writes to
* stdio (console)
*/
fp = freopen("/dev/null", "w", stdout);
if (!fp)
PARENT_EXIT("Unable to direct benchmark status to /dev/null");
if (strcmp(benchmark_cmd[0], "fill_buf") == 0) {
/* Execute default fill_buf benchmark */
span = strtoul(benchmark_cmd[1], NULL, 10);
memflush = atoi(benchmark_cmd[2]);
operation = atoi(benchmark_cmd[3]);
if (!strcmp(benchmark_cmd[4], "true"))
once = true;
else if (!strcmp(benchmark_cmd[4], "false"))
once = false;
else
PARENT_EXIT("Invalid once parameter");
if (run_fill_buf(span, memflush, operation, once))
fprintf(stderr, "Error in running fill buffer\n");
} else {
/* Execute specified benchmark */
ret = execvp(benchmark_cmd[0], benchmark_cmd);
if (ret)
perror("wrong\n");
}
fclose(stdout);
PARENT_EXIT("Unable to run specified benchmark");
}
/*
* create_grp - Create a group only if one doesn't exist
* @grp_name: Name of the group
* @grp: Full path and name of the group
* @parent_grp: Full path and name of the parent group
*
* Return: 0 on success, non-zero on failure
*/
static int create_grp(const char *grp_name, char *grp, const char *parent_grp)
{
int found_grp = 0;
struct dirent *ep;
DIR *dp;
/*
* At this point, we are guaranteed to have resctrl FS mounted and if
* length of grp_name == 0, it means, user wants to use root con_mon
* grp, so do nothing
*/
if (strlen(grp_name) == 0)
return 0;
/* Check if requested grp exists or not */
dp = opendir(parent_grp);
if (dp) {
while ((ep = readdir(dp)) != NULL) {
if (strcmp(ep->d_name, grp_name) == 0)
found_grp = 1;
}
closedir(dp);
} else {
perror("Unable to open resctrl for group");
return -1;
}
/* Requested grp doesn't exist, hence create it */
if (found_grp == 0) {
if (mkdir(grp, 0) == -1) {
perror("Unable to create group");
return -1;
}
}
return 0;
}
static int write_pid_to_tasks(char *tasks, pid_t pid)
{
FILE *fp;
fp = fopen(tasks, "w");
if (!fp) {
perror("Failed to open tasks file");
return -1;
}
if (fprintf(fp, "%d\n", pid) < 0) {
perror("Failed to wr pid to tasks file");
fclose(fp);
return -1;
}
fclose(fp);
return 0;
}
/*
* write_bm_pid_to_resctrl - Write a PID (i.e. benchmark) to resctrl FS
* @bm_pid: PID that should be written
* @ctrlgrp: Name of the control monitor group (con_mon grp)
* @mongrp: Name of the monitor group (mon grp)
* @resctrl_val: Resctrl feature (Eg: mbm, mba.. etc)
*
* If a con_mon grp is requested, create it and write pid to it, otherwise
* write pid to root con_mon grp.
* If a mon grp is requested, create it and write pid to it, otherwise
* pid is not written, this means that pid is in con_mon grp and hence
* should consult con_mon grp's mon_data directory for results.
*
* Return: 0 on success, non-zero on failure
*/
int write_bm_pid_to_resctrl(pid_t bm_pid, char *ctrlgrp, char *mongrp,
char *resctrl_val)
{
char controlgroup[128], monitorgroup[512], monitorgroup_p[256];
char tasks[1024];
int ret = 0;
if (strlen(ctrlgrp))
sprintf(controlgroup, "%s/%s", RESCTRL_PATH, ctrlgrp);
else
sprintf(controlgroup, "%s", RESCTRL_PATH);
/* Create control and monitoring group and write pid into it */
ret = create_grp(ctrlgrp, controlgroup, RESCTRL_PATH);
if (ret)
goto out;
sprintf(tasks, "%s/tasks", controlgroup);
ret = write_pid_to_tasks(tasks, bm_pid);
if (ret)
goto out;
/* Create mon grp and write pid into it for "mbm" and "cmt" test */
if (!strncmp(resctrl_val, CMT_STR, sizeof(CMT_STR)) ||
!strncmp(resctrl_val, MBM_STR, sizeof(MBM_STR))) {
if (strlen(mongrp)) {
sprintf(monitorgroup_p, "%s/mon_groups", controlgroup);
sprintf(monitorgroup, "%s/%s", monitorgroup_p, mongrp);
ret = create_grp(mongrp, monitorgroup, monitorgroup_p);
if (ret)
goto out;
sprintf(tasks, "%s/mon_groups/%s/tasks",
controlgroup, mongrp);
ret = write_pid_to_tasks(tasks, bm_pid);
if (ret)
goto out;
}
}
out:
ksft_print_msg("Writing benchmark parameters to resctrl FS\n");
if (ret)
perror("# writing to resctrlfs");
return ret;
}
/*
* write_schemata - Update schemata of a con_mon grp
* @ctrlgrp: Name of the con_mon grp
* @schemata: Schemata that should be updated to
* @cpu_no: CPU number that the benchmark PID is binded to
* @resctrl_val: Resctrl feature (Eg: mbm, mba.. etc)
*
* Update schemata of a con_mon grp *only* if requested resctrl feature is
* allocation type
*
* Return: 0 on success, non-zero on failure
*/
int write_schemata(char *ctrlgrp, char *schemata, int cpu_no, char *resctrl_val)
{
char controlgroup[1024], schema[1024], reason[64];
int resource_id, ret = 0;
FILE *fp;
if (strncmp(resctrl_val, MBA_STR, sizeof(MBA_STR)) &&
strncmp(resctrl_val, MBM_STR, sizeof(MBM_STR)) &&
strncmp(resctrl_val, CAT_STR, sizeof(CAT_STR)) &&
strncmp(resctrl_val, CMT_STR, sizeof(CMT_STR)))
return -ENOENT;
if (!schemata) {
ksft_print_msg("Skipping empty schemata update\n");
return -1;
}
if (get_resource_id(cpu_no, &resource_id) < 0) {
sprintf(reason, "Failed to get resource id");
ret = -1;
goto out;
}
if (strlen(ctrlgrp) != 0)
sprintf(controlgroup, "%s/%s/schemata", RESCTRL_PATH, ctrlgrp);
else
sprintf(controlgroup, "%s/schemata", RESCTRL_PATH);
if (!strncmp(resctrl_val, CAT_STR, sizeof(CAT_STR)) ||
!strncmp(resctrl_val, CMT_STR, sizeof(CMT_STR)))
sprintf(schema, "%s%d%c%s", "L3:", resource_id, '=', schemata);
if (!strncmp(resctrl_val, MBA_STR, sizeof(MBA_STR)) ||
!strncmp(resctrl_val, MBM_STR, sizeof(MBM_STR)))
sprintf(schema, "%s%d%c%s", "MB:", resource_id, '=', schemata);
fp = fopen(controlgroup, "w");
if (!fp) {
sprintf(reason, "Failed to open control group");
ret = -1;
goto out;
}
if (fprintf(fp, "%s\n", schema) < 0) {
sprintf(reason, "Failed to write schemata in control group");
fclose(fp);
ret = -1;
goto out;
}
fclose(fp);
out:
ksft_print_msg("Write schema \"%s\" to resctrl FS%s%s\n",
schema, ret ? " # " : "",
ret ? reason : "");
return ret;
}
bool check_resctrlfs_support(void)
{
FILE *inf = fopen("/proc/filesystems", "r");
DIR *dp;
char *res;
bool ret = false;
if (!inf)
return false;
res = fgrep(inf, "nodev\tresctrl\n");
if (res) {
ret = true;
free(res);
}
fclose(inf);
ksft_print_msg("%s Check kernel supports resctrl filesystem\n",
ret ? "Pass:" : "Fail:");
if (!ret)
return ret;
dp = opendir(RESCTRL_PATH);
ksft_print_msg("%s Check resctrl mountpoint \"%s\" exists\n",
dp ? "Pass:" : "Fail:", RESCTRL_PATH);
if (dp)
closedir(dp);
ksft_print_msg("resctrl filesystem %s mounted\n",
find_resctrl_mount(NULL) ? "not" : "is");
return ret;
}
char *fgrep(FILE *inf, const char *str)
{
char line[256];
int slen = strlen(str);
while (!feof(inf)) {
if (!fgets(line, 256, inf))
break;
if (strncmp(line, str, slen))
continue;
return strdup(line);
}
return NULL;
}
/*
* validate_resctrl_feature_request - Check if requested feature is valid.
* @resctrl_val: Requested feature
*
* Return: True if the feature is supported, else false. False is also
* returned if resctrl FS is not mounted.
*/
bool validate_resctrl_feature_request(const char *resctrl_val)
{
struct stat statbuf;
bool found = false;
char *res;
FILE *inf;
int ret;
if (!resctrl_val)
return false;
ret = find_resctrl_mount(NULL);
if (ret)
return false;
if (!strncmp(resctrl_val, CAT_STR, sizeof(CAT_STR))) {
if (!stat(L3_PATH, &statbuf))
return true;
} else if (!strncmp(resctrl_val, MBA_STR, sizeof(MBA_STR))) {
if (!stat(MB_PATH, &statbuf))
return true;
} else if (!strncmp(resctrl_val, MBM_STR, sizeof(MBM_STR)) ||
!strncmp(resctrl_val, CMT_STR, sizeof(CMT_STR))) {
if (!stat(L3_MON_PATH, &statbuf)) {
inf = fopen(L3_MON_FEATURES_PATH, "r");
if (!inf)
return false;
if (!strncmp(resctrl_val, CMT_STR, sizeof(CMT_STR))) {
res = fgrep(inf, "llc_occupancy");
if (res) {
found = true;
free(res);
}
}
if (!strncmp(resctrl_val, MBM_STR, sizeof(MBM_STR))) {
res = fgrep(inf, "mbm_total_bytes");
if (res) {
free(res);
res = fgrep(inf, "mbm_local_bytes");
if (res) {
found = true;
free(res);
}
}
}
fclose(inf);
}
}
return found;
}
int filter_dmesg(void)
{
char line[1024];
FILE *fp;
int pipefds[2];
pid_t pid;
int ret;
ret = pipe(pipefds);
if (ret) {
perror("pipe");
return ret;
}
fflush(stdout);
pid = fork();
if (pid == 0) {
close(pipefds[0]);
dup2(pipefds[1], STDOUT_FILENO);
execlp("dmesg", "dmesg", NULL);
perror("executing dmesg");
exit(1);
}
close(pipefds[1]);
fp = fdopen(pipefds[0], "r");
if (!fp) {
perror("fdopen(pipe)");
kill(pid, SIGTERM);
return -1;
}
while (fgets(line, 1024, fp)) {
if (strstr(line, "intel_rdt:"))
ksft_print_msg("dmesg: %s", line);
if (strstr(line, "resctrl:"))
ksft_print_msg("dmesg: %s", line);
}
fclose(fp);
waitpid(pid, NULL, 0);
return 0;
}
int validate_bw_report_request(char *bw_report)
{
if (strcmp(bw_report, "reads") == 0)
return 0;
if (strcmp(bw_report, "writes") == 0)
return 0;
if (strcmp(bw_report, "nt-writes") == 0) {
strcpy(bw_report, "writes");
return 0;
}
if (strcmp(bw_report, "total") == 0)
return 0;
fprintf(stderr, "Requested iMC B/W report type unavailable\n");
return -1;
}
int perf_event_open(struct perf_event_attr *hw_event, pid_t pid, int cpu,
int group_fd, unsigned long flags)
{
int ret;
ret = syscall(__NR_perf_event_open, hw_event, pid, cpu,
group_fd, flags);
return ret;
}
unsigned int count_bits(unsigned long n)
{
unsigned int count = 0;
while (n) {
count += n & 1;
n >>= 1;
}
return count;
}
| linux-master | tools/testing/selftests/resctrl/resctrlfs.c |
// SPDX-License-Identifier: GPL-2.0
/*
* fill_buf benchmark
*
* Copyright (C) 2018 Intel Corporation
*
* Authors:
* Sai Praneeth Prakhya <[email protected]>,
* Fenghua Yu <[email protected]>
*/
#include <stdio.h>
#include <unistd.h>
#include <stdlib.h>
#include <sys/types.h>
#include <sys/wait.h>
#include <inttypes.h>
#include <string.h>
#include "resctrl.h"
#define CL_SIZE (64)
#define PAGE_SIZE (4 * 1024)
#define MB (1024 * 1024)
static void sb(void)
{
#if defined(__i386) || defined(__x86_64)
asm volatile("sfence\n\t"
: : : "memory");
#endif
}
static void cl_flush(void *p)
{
#if defined(__i386) || defined(__x86_64)
asm volatile("clflush (%0)\n\t"
: : "r"(p) : "memory");
#endif
}
static void mem_flush(unsigned char *buf, size_t buf_size)
{
unsigned char *cp = buf;
size_t i = 0;
buf_size = buf_size / CL_SIZE; /* mem size in cache lines */
for (i = 0; i < buf_size; i++)
cl_flush(&cp[i * CL_SIZE]);
sb();
}
static void *malloc_and_init_memory(size_t buf_size)
{
void *p = NULL;
uint64_t *p64;
size_t s64;
int ret;
ret = posix_memalign(&p, PAGE_SIZE, buf_size);
if (ret < 0)
return NULL;
p64 = (uint64_t *)p;
s64 = buf_size / sizeof(uint64_t);
while (s64 > 0) {
*p64 = (uint64_t)rand();
p64 += (CL_SIZE / sizeof(uint64_t));
s64 -= (CL_SIZE / sizeof(uint64_t));
}
return p;
}
static int fill_one_span_read(unsigned char *buf, size_t buf_size)
{
unsigned char *end_ptr = buf + buf_size;
unsigned char sum, *p;
sum = 0;
p = buf;
while (p < end_ptr) {
sum += *p;
p += (CL_SIZE / 2);
}
return sum;
}
static void fill_one_span_write(unsigned char *buf, size_t buf_size)
{
unsigned char *end_ptr = buf + buf_size;
unsigned char *p;
p = buf;
while (p < end_ptr) {
*p = '1';
p += (CL_SIZE / 2);
}
}
static int fill_cache_read(unsigned char *buf, size_t buf_size, bool once)
{
int ret = 0;
FILE *fp;
while (1) {
ret = fill_one_span_read(buf, buf_size);
if (once)
break;
}
/* Consume read result so that reading memory is not optimized out. */
fp = fopen("/dev/null", "w");
if (!fp) {
perror("Unable to write to /dev/null");
return -1;
}
fprintf(fp, "Sum: %d ", ret);
fclose(fp);
return 0;
}
static int fill_cache_write(unsigned char *buf, size_t buf_size, bool once)
{
while (1) {
fill_one_span_write(buf, buf_size);
if (once)
break;
}
return 0;
}
static int fill_cache(size_t buf_size, int memflush, int op, bool once)
{
unsigned char *buf;
int ret;
buf = malloc_and_init_memory(buf_size);
if (!buf)
return -1;
/* Flush the memory before using to avoid "cache hot pages" effect */
if (memflush)
mem_flush(buf, buf_size);
if (op == 0)
ret = fill_cache_read(buf, buf_size, once);
else
ret = fill_cache_write(buf, buf_size, once);
free(buf);
if (ret) {
printf("\n Error in fill cache read/write...\n");
return -1;
}
return 0;
}
int run_fill_buf(size_t span, int memflush, int op, bool once)
{
size_t cache_size = span;
int ret;
ret = fill_cache(cache_size, memflush, op, once);
if (ret) {
printf("\n Error in fill cache\n");
return -1;
}
return 0;
}
| linux-master | tools/testing/selftests/resctrl/fill_buf.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Test TDX guest features
*
* Copyright (C) 2022 Intel Corporation.
*
* Author: Kuppuswamy Sathyanarayanan <[email protected]>
*/
#include <sys/ioctl.h>
#include <errno.h>
#include <fcntl.h>
#include <linux/tdx-guest.h>
#include "../kselftest_harness.h"
#define TDX_GUEST_DEVNAME "/dev/tdx_guest"
#define HEX_DUMP_SIZE 8
#define DEBUG 0
/**
* struct tdreport_type - Type header of TDREPORT_STRUCT.
* @type: Type of the TDREPORT (0 - SGX, 81 - TDX, rest are reserved)
* @sub_type: Subtype of the TDREPORT (Default value is 0).
* @version: TDREPORT version (Default value is 0).
* @reserved: Added for future extension.
*
* More details can be found in TDX v1.0 module specification, sec
* titled "REPORTTYPE".
*/
struct tdreport_type {
__u8 type;
__u8 sub_type;
__u8 version;
__u8 reserved;
};
/**
* struct reportmac - TDX guest report data, MAC and TEE hashes.
* @type: TDREPORT type header.
* @reserved1: Reserved for future extension.
* @cpu_svn: CPU security version.
* @tee_tcb_info_hash: SHA384 hash of TEE TCB INFO.
* @tee_td_info_hash: SHA384 hash of TDINFO_STRUCT.
* @reportdata: User defined unique data passed in TDG.MR.REPORT request.
* @reserved2: Reserved for future extension.
* @mac: CPU MAC ID.
*
* It is MAC-protected and contains hashes of the remainder of the
* report structure along with user provided report data. More details can
* be found in TDX v1.0 Module specification, sec titled "REPORTMACSTRUCT"
*/
struct reportmac {
struct tdreport_type type;
__u8 reserved1[12];
__u8 cpu_svn[16];
__u8 tee_tcb_info_hash[48];
__u8 tee_td_info_hash[48];
__u8 reportdata[64];
__u8 reserved2[32];
__u8 mac[32];
};
/**
* struct td_info - TDX guest measurements and configuration.
* @attr: TDX Guest attributes (like debug, spet_disable, etc).
* @xfam: Extended features allowed mask.
* @mrtd: Build time measurement register.
* @mrconfigid: Software-defined ID for non-owner-defined configuration
* of the guest - e.g., run-time or OS configuration.
* @mrowner: Software-defined ID for the guest owner.
* @mrownerconfig: Software-defined ID for owner-defined configuration of
* the guest - e.g., specific to the workload.
* @rtmr: Run time measurement registers.
* @reserved: Added for future extension.
*
* It contains the measurements and initial configuration of the TDX guest
* that was locked at initialization and a set of measurement registers
* that are run-time extendable. More details can be found in TDX v1.0
* Module specification, sec titled "TDINFO_STRUCT".
*/
struct td_info {
__u8 attr[8];
__u64 xfam;
__u64 mrtd[6];
__u64 mrconfigid[6];
__u64 mrowner[6];
__u64 mrownerconfig[6];
__u64 rtmr[24];
__u64 reserved[14];
};
/*
* struct tdreport - Output of TDCALL[TDG.MR.REPORT].
* @reportmac: Mac protected header of size 256 bytes.
* @tee_tcb_info: Additional attestable elements in the TCB are not
* reflected in the reportmac.
* @reserved: Added for future extension.
* @tdinfo: Measurements and configuration data of size 512 bytes.
*
* More details can be found in TDX v1.0 Module specification, sec
* titled "TDREPORT_STRUCT".
*/
struct tdreport {
struct reportmac reportmac;
__u8 tee_tcb_info[239];
__u8 reserved[17];
struct td_info tdinfo;
};
static void print_array_hex(const char *title, const char *prefix_str,
const void *buf, int len)
{
int i, j, line_len, rowsize = HEX_DUMP_SIZE;
const __u8 *ptr = buf;
printf("\t\t%s", title);
for (j = 0; j < len; j += rowsize) {
line_len = rowsize < (len - j) ? rowsize : (len - j);
printf("%s%.8x:", prefix_str, j);
for (i = 0; i < line_len; i++)
printf(" %.2x", ptr[j + i]);
printf("\n");
}
printf("\n");
}
TEST(verify_report)
{
struct tdx_report_req req;
struct tdreport *tdreport;
int devfd, i;
devfd = open(TDX_GUEST_DEVNAME, O_RDWR | O_SYNC);
ASSERT_LT(0, devfd);
/* Generate sample report data */
for (i = 0; i < TDX_REPORTDATA_LEN; i++)
req.reportdata[i] = i;
/* Get TDREPORT */
ASSERT_EQ(0, ioctl(devfd, TDX_CMD_GET_REPORT0, &req));
if (DEBUG) {
print_array_hex("\n\t\tTDX report data\n", "",
req.reportdata, sizeof(req.reportdata));
print_array_hex("\n\t\tTDX tdreport data\n", "",
req.tdreport, sizeof(req.tdreport));
}
/* Make sure TDREPORT data includes the REPORTDATA passed */
tdreport = (struct tdreport *)req.tdreport;
ASSERT_EQ(0, memcmp(&tdreport->reportmac.reportdata[0],
req.reportdata, sizeof(req.reportdata)));
ASSERT_EQ(0, close(devfd));
}
TEST_HARNESS_MAIN
| linux-master | tools/testing/selftests/tdx/tdx_guest_test.c |
// SPDX-License-Identifier: GPL-2.0-only
#include <stdio.h>
#include "rdvl.h"
int main(void)
{
int vl = rdvl_sme();
printf("%d\n", vl);
return 0;
}
| linux-master | tools/testing/selftests/arm64/fp/rdvl-sme.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2022 ARM Limited.
*/
#define _GNU_SOURCE
#define _POSIX_C_SOURCE 199309L
#include <errno.h>
#include <getopt.h>
#include <poll.h>
#include <signal.h>
#include <stdbool.h>
#include <stddef.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <sys/auxv.h>
#include <sys/epoll.h>
#include <sys/prctl.h>
#include <sys/types.h>
#include <sys/uio.h>
#include <sys/wait.h>
#include <asm/hwcap.h>
#include "../../kselftest.h"
#define MAX_VLS 16
struct child_data {
char *name, *output;
pid_t pid;
int stdout;
bool output_seen;
bool exited;
int exit_status;
};
static int epoll_fd;
static struct child_data *children;
static struct epoll_event *evs;
static int tests;
static int num_children;
static bool terminate;
static int startup_pipe[2];
static int num_processors(void)
{
long nproc = sysconf(_SC_NPROCESSORS_CONF);
if (nproc < 0) {
perror("Unable to read number of processors\n");
exit(EXIT_FAILURE);
}
return nproc;
}
static void child_start(struct child_data *child, const char *program)
{
int ret, pipefd[2], i;
struct epoll_event ev;
ret = pipe(pipefd);
if (ret != 0)
ksft_exit_fail_msg("Failed to create stdout pipe: %s (%d)\n",
strerror(errno), errno);
child->pid = fork();
if (child->pid == -1)
ksft_exit_fail_msg("fork() failed: %s (%d)\n",
strerror(errno), errno);
if (!child->pid) {
/*
* In child, replace stdout with the pipe, errors to
* stderr from here as kselftest prints to stdout.
*/
ret = dup2(pipefd[1], 1);
if (ret == -1) {
fprintf(stderr, "dup2() %d\n", errno);
exit(EXIT_FAILURE);
}
/*
* Duplicate the read side of the startup pipe to
* FD 3 so we can close everything else.
*/
ret = dup2(startup_pipe[0], 3);
if (ret == -1) {
fprintf(stderr, "dup2() %d\n", errno);
exit(EXIT_FAILURE);
}
/*
* Very dumb mechanism to clean open FDs other than
* stdio. We don't want O_CLOEXEC for the pipes...
*/
for (i = 4; i < 8192; i++)
close(i);
/*
* Read from the startup pipe, there should be no data
* and we should block until it is closed. We just
* carry on on error since this isn't super critical.
*/
ret = read(3, &i, sizeof(i));
if (ret < 0)
fprintf(stderr, "read(startp pipe) failed: %s (%d)\n",
strerror(errno), errno);
if (ret > 0)
fprintf(stderr, "%d bytes of data on startup pipe\n",
ret);
close(3);
ret = execl(program, program, NULL);
fprintf(stderr, "execl(%s) failed: %d (%s)\n",
program, errno, strerror(errno));
exit(EXIT_FAILURE);
} else {
/*
* In parent, remember the child and close our copy of the
* write side of stdout.
*/
close(pipefd[1]);
child->stdout = pipefd[0];
child->output = NULL;
child->exited = false;
child->output_seen = false;
ev.events = EPOLLIN | EPOLLHUP;
ev.data.ptr = child;
ret = epoll_ctl(epoll_fd, EPOLL_CTL_ADD, child->stdout, &ev);
if (ret < 0) {
ksft_exit_fail_msg("%s EPOLL_CTL_ADD failed: %s (%d)\n",
child->name, strerror(errno), errno);
}
}
}
static bool child_output_read(struct child_data *child)
{
char read_data[1024];
char work[1024];
int ret, len, cur_work, cur_read;
ret = read(child->stdout, read_data, sizeof(read_data));
if (ret < 0) {
if (errno == EINTR)
return true;
ksft_print_msg("%s: read() failed: %s (%d)\n",
child->name, strerror(errno),
errno);
return false;
}
len = ret;
child->output_seen = true;
/* Pick up any partial read */
if (child->output) {
strncpy(work, child->output, sizeof(work) - 1);
cur_work = strnlen(work, sizeof(work));
free(child->output);
child->output = NULL;
} else {
cur_work = 0;
}
cur_read = 0;
while (cur_read < len) {
work[cur_work] = read_data[cur_read++];
if (work[cur_work] == '\n') {
work[cur_work] = '\0';
ksft_print_msg("%s: %s\n", child->name, work);
cur_work = 0;
} else {
cur_work++;
}
}
if (cur_work) {
work[cur_work] = '\0';
ret = asprintf(&child->output, "%s", work);
if (ret == -1)
ksft_exit_fail_msg("Out of memory\n");
}
return false;
}
static void child_output(struct child_data *child, uint32_t events,
bool flush)
{
bool read_more;
if (events & EPOLLIN) {
do {
read_more = child_output_read(child);
} while (read_more);
}
if (events & EPOLLHUP) {
close(child->stdout);
child->stdout = -1;
flush = true;
}
if (flush && child->output) {
ksft_print_msg("%s: %s<EOF>\n", child->name, child->output);
free(child->output);
child->output = NULL;
}
}
static void child_tickle(struct child_data *child)
{
if (child->output_seen && !child->exited)
kill(child->pid, SIGUSR2);
}
static void child_stop(struct child_data *child)
{
if (!child->exited)
kill(child->pid, SIGTERM);
}
static void child_cleanup(struct child_data *child)
{
pid_t ret;
int status;
bool fail = false;
if (!child->exited) {
do {
ret = waitpid(child->pid, &status, 0);
if (ret == -1 && errno == EINTR)
continue;
if (ret == -1) {
ksft_print_msg("waitpid(%d) failed: %s (%d)\n",
child->pid, strerror(errno),
errno);
fail = true;
break;
}
} while (!WIFEXITED(status));
child->exit_status = WEXITSTATUS(status);
}
if (!child->output_seen) {
ksft_print_msg("%s no output seen\n", child->name);
fail = true;
}
if (child->exit_status != 0) {
ksft_print_msg("%s exited with error code %d\n",
child->name, child->exit_status);
fail = true;
}
ksft_test_result(!fail, "%s\n", child->name);
}
static void handle_child_signal(int sig, siginfo_t *info, void *context)
{
int i;
bool found = false;
for (i = 0; i < num_children; i++) {
if (children[i].pid == info->si_pid) {
children[i].exited = true;
children[i].exit_status = info->si_status;
found = true;
break;
}
}
if (!found)
ksft_print_msg("SIGCHLD for unknown PID %d with status %d\n",
info->si_pid, info->si_status);
}
static void handle_exit_signal(int sig, siginfo_t *info, void *context)
{
int i;
/* If we're already exiting then don't signal again */
if (terminate)
return;
ksft_print_msg("Got signal, exiting...\n");
terminate = true;
/*
* This should be redundant, the main loop should clean up
* after us, but for safety stop everything we can here.
*/
for (i = 0; i < num_children; i++)
child_stop(&children[i]);
}
static void start_fpsimd(struct child_data *child, int cpu, int copy)
{
int ret;
ret = asprintf(&child->name, "FPSIMD-%d-%d", cpu, copy);
if (ret == -1)
ksft_exit_fail_msg("asprintf() failed\n");
child_start(child, "./fpsimd-test");
ksft_print_msg("Started %s\n", child->name);
}
static void start_sve(struct child_data *child, int vl, int cpu)
{
int ret;
ret = prctl(PR_SVE_SET_VL, vl | PR_SVE_VL_INHERIT);
if (ret < 0)
ksft_exit_fail_msg("Failed to set SVE VL %d\n", vl);
ret = asprintf(&child->name, "SVE-VL-%d-%d", vl, cpu);
if (ret == -1)
ksft_exit_fail_msg("asprintf() failed\n");
child_start(child, "./sve-test");
ksft_print_msg("Started %s\n", child->name);
}
static void start_ssve(struct child_data *child, int vl, int cpu)
{
int ret;
ret = asprintf(&child->name, "SSVE-VL-%d-%d", vl, cpu);
if (ret == -1)
ksft_exit_fail_msg("asprintf() failed\n");
ret = prctl(PR_SME_SET_VL, vl | PR_SME_VL_INHERIT);
if (ret < 0)
ksft_exit_fail_msg("Failed to set SME VL %d\n", ret);
child_start(child, "./ssve-test");
ksft_print_msg("Started %s\n", child->name);
}
static void start_za(struct child_data *child, int vl, int cpu)
{
int ret;
ret = prctl(PR_SME_SET_VL, vl | PR_SVE_VL_INHERIT);
if (ret < 0)
ksft_exit_fail_msg("Failed to set SME VL %d\n", ret);
ret = asprintf(&child->name, "ZA-VL-%d-%d", vl, cpu);
if (ret == -1)
ksft_exit_fail_msg("asprintf() failed\n");
child_start(child, "./za-test");
ksft_print_msg("Started %s\n", child->name);
}
static void start_zt(struct child_data *child, int cpu)
{
int ret;
ret = asprintf(&child->name, "ZT-%d", cpu);
if (ret == -1)
ksft_exit_fail_msg("asprintf() failed\n");
child_start(child, "./zt-test");
ksft_print_msg("Started %s\n", child->name);
}
static void probe_vls(int vls[], int *vl_count, int set_vl)
{
unsigned int vq;
int vl;
*vl_count = 0;
for (vq = SVE_VQ_MAX; vq > 0; vq /= 2) {
vl = prctl(set_vl, vq * 16);
if (vl == -1)
ksft_exit_fail_msg("SET_VL failed: %s (%d)\n",
strerror(errno), errno);
vl &= PR_SVE_VL_LEN_MASK;
if (*vl_count && (vl == vls[*vl_count - 1]))
break;
vq = sve_vq_from_vl(vl);
vls[*vl_count] = vl;
*vl_count += 1;
}
}
/* Handle any pending output without blocking */
static void drain_output(bool flush)
{
int ret = 1;
int i;
while (ret > 0) {
ret = epoll_wait(epoll_fd, evs, tests, 0);
if (ret < 0) {
if (errno == EINTR)
continue;
ksft_print_msg("epoll_wait() failed: %s (%d)\n",
strerror(errno), errno);
}
for (i = 0; i < ret; i++)
child_output(evs[i].data.ptr, evs[i].events, flush);
}
}
static const struct option options[] = {
{ "timeout", required_argument, NULL, 't' },
{ }
};
int main(int argc, char **argv)
{
int ret;
int timeout = 10;
int cpus, i, j, c;
int sve_vl_count, sme_vl_count, fpsimd_per_cpu;
bool all_children_started = false;
int seen_children;
int sve_vls[MAX_VLS], sme_vls[MAX_VLS];
bool have_sme2;
struct sigaction sa;
while ((c = getopt_long(argc, argv, "t:", options, NULL)) != -1) {
switch (c) {
case 't':
ret = sscanf(optarg, "%d", &timeout);
if (ret != 1)
ksft_exit_fail_msg("Failed to parse timeout %s\n",
optarg);
break;
default:
ksft_exit_fail_msg("Unknown argument\n");
}
}
cpus = num_processors();
tests = 0;
if (getauxval(AT_HWCAP) & HWCAP_SVE) {
probe_vls(sve_vls, &sve_vl_count, PR_SVE_SET_VL);
tests += sve_vl_count * cpus;
} else {
sve_vl_count = 0;
}
if (getauxval(AT_HWCAP2) & HWCAP2_SME) {
probe_vls(sme_vls, &sme_vl_count, PR_SME_SET_VL);
tests += sme_vl_count * cpus * 2;
} else {
sme_vl_count = 0;
}
if (getauxval(AT_HWCAP2) & HWCAP2_SME2) {
tests += cpus;
have_sme2 = true;
} else {
have_sme2 = false;
}
/* Force context switching if we only have FPSIMD */
if (!sve_vl_count && !sme_vl_count)
fpsimd_per_cpu = 2;
else
fpsimd_per_cpu = 1;
tests += cpus * fpsimd_per_cpu;
ksft_print_header();
ksft_set_plan(tests);
ksft_print_msg("%d CPUs, %d SVE VLs, %d SME VLs, SME2 %s\n",
cpus, sve_vl_count, sme_vl_count,
have_sme2 ? "present" : "absent");
if (timeout > 0)
ksft_print_msg("Will run for %ds\n", timeout);
else
ksft_print_msg("Will run until terminated\n");
children = calloc(sizeof(*children), tests);
if (!children)
ksft_exit_fail_msg("Unable to allocate child data\n");
ret = epoll_create1(EPOLL_CLOEXEC);
if (ret < 0)
ksft_exit_fail_msg("epoll_create1() failed: %s (%d)\n",
strerror(errno), ret);
epoll_fd = ret;
/* Create a pipe which children will block on before execing */
ret = pipe(startup_pipe);
if (ret != 0)
ksft_exit_fail_msg("Failed to create startup pipe: %s (%d)\n",
strerror(errno), errno);
/* Get signal handers ready before we start any children */
memset(&sa, 0, sizeof(sa));
sa.sa_sigaction = handle_exit_signal;
sa.sa_flags = SA_RESTART | SA_SIGINFO;
sigemptyset(&sa.sa_mask);
ret = sigaction(SIGINT, &sa, NULL);
if (ret < 0)
ksft_print_msg("Failed to install SIGINT handler: %s (%d)\n",
strerror(errno), errno);
ret = sigaction(SIGTERM, &sa, NULL);
if (ret < 0)
ksft_print_msg("Failed to install SIGTERM handler: %s (%d)\n",
strerror(errno), errno);
sa.sa_sigaction = handle_child_signal;
ret = sigaction(SIGCHLD, &sa, NULL);
if (ret < 0)
ksft_print_msg("Failed to install SIGCHLD handler: %s (%d)\n",
strerror(errno), errno);
evs = calloc(tests, sizeof(*evs));
if (!evs)
ksft_exit_fail_msg("Failed to allocated %d epoll events\n",
tests);
for (i = 0; i < cpus; i++) {
for (j = 0; j < fpsimd_per_cpu; j++)
start_fpsimd(&children[num_children++], i, j);
for (j = 0; j < sve_vl_count; j++)
start_sve(&children[num_children++], sve_vls[j], i);
for (j = 0; j < sme_vl_count; j++) {
start_ssve(&children[num_children++], sme_vls[j], i);
start_za(&children[num_children++], sme_vls[j], i);
}
if (have_sme2)
start_zt(&children[num_children++], i);
}
/*
* All children started, close the startup pipe and let them
* run.
*/
close(startup_pipe[0]);
close(startup_pipe[1]);
for (;;) {
/* Did we get a signal asking us to exit? */
if (terminate)
break;
/*
* Timeout is counted in seconds with no output, the
* tests print during startup then are silent when
* running so this should ensure they all ran enough
* to install the signal handler, this is especially
* useful in emulation where we will both be slow and
* likely to have a large set of VLs.
*/
ret = epoll_wait(epoll_fd, evs, tests, 1000);
if (ret < 0) {
if (errno == EINTR)
continue;
ksft_exit_fail_msg("epoll_wait() failed: %s (%d)\n",
strerror(errno), errno);
}
/* Output? */
if (ret > 0) {
for (i = 0; i < ret; i++) {
child_output(evs[i].data.ptr, evs[i].events,
false);
}
continue;
}
/* Otherwise epoll_wait() timed out */
/*
* If the child processes have not produced output they
* aren't actually running the tests yet .
*/
if (!all_children_started) {
seen_children = 0;
for (i = 0; i < num_children; i++)
if (children[i].output_seen ||
children[i].exited)
seen_children++;
if (seen_children != num_children) {
ksft_print_msg("Waiting for %d children\n",
num_children - seen_children);
continue;
}
all_children_started = true;
}
ksft_print_msg("Sending signals, timeout remaining: %d\n",
timeout);
for (i = 0; i < num_children; i++)
child_tickle(&children[i]);
/* Negative timeout means run indefinitely */
if (timeout < 0)
continue;
if (--timeout == 0)
break;
}
ksft_print_msg("Finishing up...\n");
terminate = true;
for (i = 0; i < tests; i++)
child_stop(&children[i]);
drain_output(false);
for (i = 0; i < tests; i++)
child_cleanup(&children[i]);
drain_output(true);
ksft_print_cnts();
return 0;
}
| linux-master | tools/testing/selftests/arm64/fp/fp-stress.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2015-2021 ARM Limited.
* Original author: Dave Martin <[email protected]>
*/
#include <errno.h>
#include <stdbool.h>
#include <stddef.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <sys/auxv.h>
#include <sys/prctl.h>
#include <sys/ptrace.h>
#include <sys/types.h>
#include <sys/uio.h>
#include <sys/wait.h>
#include <asm/sigcontext.h>
#include <asm/ptrace.h>
#include "../../kselftest.h"
/* <linux/elf.h> and <sys/auxv.h> don't like each other, so: */
#ifndef NT_ARM_SVE
#define NT_ARM_SVE 0x405
#endif
#ifndef NT_ARM_SSVE
#define NT_ARM_SSVE 0x40b
#endif
/*
* The architecture defines the maximum VQ as 16 but for extensibility
* the kernel specifies the SVE_VQ_MAX as 512 resulting in us running
* a *lot* more tests than are useful if we use it. Until the
* architecture is extended let's limit our coverage to what is
* currently allowed, plus one extra to ensure we cover constraining
* the VL as expected.
*/
#define TEST_VQ_MAX 17
struct vec_type {
const char *name;
unsigned long hwcap_type;
unsigned long hwcap;
int regset;
int prctl_set;
};
static const struct vec_type vec_types[] = {
{
.name = "SVE",
.hwcap_type = AT_HWCAP,
.hwcap = HWCAP_SVE,
.regset = NT_ARM_SVE,
.prctl_set = PR_SVE_SET_VL,
},
{
.name = "Streaming SVE",
.hwcap_type = AT_HWCAP2,
.hwcap = HWCAP2_SME,
.regset = NT_ARM_SSVE,
.prctl_set = PR_SME_SET_VL,
},
};
#define VL_TESTS (((TEST_VQ_MAX - SVE_VQ_MIN) + 1) * 4)
#define FLAG_TESTS 2
#define FPSIMD_TESTS 2
#define EXPECTED_TESTS ((VL_TESTS + FLAG_TESTS + FPSIMD_TESTS) * ARRAY_SIZE(vec_types))
static void fill_buf(char *buf, size_t size)
{
int i;
for (i = 0; i < size; i++)
buf[i] = random();
}
static int do_child(void)
{
if (ptrace(PTRACE_TRACEME, -1, NULL, NULL))
ksft_exit_fail_msg("PTRACE_TRACEME", strerror(errno));
if (raise(SIGSTOP))
ksft_exit_fail_msg("raise(SIGSTOP)", strerror(errno));
return EXIT_SUCCESS;
}
static int get_fpsimd(pid_t pid, struct user_fpsimd_state *fpsimd)
{
struct iovec iov;
iov.iov_base = fpsimd;
iov.iov_len = sizeof(*fpsimd);
return ptrace(PTRACE_GETREGSET, pid, NT_PRFPREG, &iov);
}
static int set_fpsimd(pid_t pid, struct user_fpsimd_state *fpsimd)
{
struct iovec iov;
iov.iov_base = fpsimd;
iov.iov_len = sizeof(*fpsimd);
return ptrace(PTRACE_SETREGSET, pid, NT_PRFPREG, &iov);
}
static struct user_sve_header *get_sve(pid_t pid, const struct vec_type *type,
void **buf, size_t *size)
{
struct user_sve_header *sve;
void *p;
size_t sz = sizeof *sve;
struct iovec iov;
while (1) {
if (*size < sz) {
p = realloc(*buf, sz);
if (!p) {
errno = ENOMEM;
goto error;
}
*buf = p;
*size = sz;
}
iov.iov_base = *buf;
iov.iov_len = sz;
if (ptrace(PTRACE_GETREGSET, pid, type->regset, &iov))
goto error;
sve = *buf;
if (sve->size <= sz)
break;
sz = sve->size;
}
return sve;
error:
return NULL;
}
static int set_sve(pid_t pid, const struct vec_type *type,
const struct user_sve_header *sve)
{
struct iovec iov;
iov.iov_base = (void *)sve;
iov.iov_len = sve->size;
return ptrace(PTRACE_SETREGSET, pid, type->regset, &iov);
}
/* Validate setting and getting the inherit flag */
static void ptrace_set_get_inherit(pid_t child, const struct vec_type *type)
{
struct user_sve_header sve;
struct user_sve_header *new_sve = NULL;
size_t new_sve_size = 0;
int ret;
/* First set the flag */
memset(&sve, 0, sizeof(sve));
sve.size = sizeof(sve);
sve.vl = sve_vl_from_vq(SVE_VQ_MIN);
sve.flags = SVE_PT_VL_INHERIT;
ret = set_sve(child, type, &sve);
if (ret != 0) {
ksft_test_result_fail("Failed to set %s SVE_PT_VL_INHERIT\n",
type->name);
return;
}
/*
* Read back the new register state and verify that we have
* set the flags we expected.
*/
if (!get_sve(child, type, (void **)&new_sve, &new_sve_size)) {
ksft_test_result_fail("Failed to read %s SVE flags\n",
type->name);
return;
}
ksft_test_result(new_sve->flags & SVE_PT_VL_INHERIT,
"%s SVE_PT_VL_INHERIT set\n", type->name);
/* Now clear */
sve.flags &= ~SVE_PT_VL_INHERIT;
ret = set_sve(child, type, &sve);
if (ret != 0) {
ksft_test_result_fail("Failed to clear %s SVE_PT_VL_INHERIT\n",
type->name);
return;
}
if (!get_sve(child, type, (void **)&new_sve, &new_sve_size)) {
ksft_test_result_fail("Failed to read %s SVE flags\n",
type->name);
return;
}
ksft_test_result(!(new_sve->flags & SVE_PT_VL_INHERIT),
"%s SVE_PT_VL_INHERIT cleared\n", type->name);
free(new_sve);
}
/* Validate attempting to set the specfied VL via ptrace */
static void ptrace_set_get_vl(pid_t child, const struct vec_type *type,
unsigned int vl, bool *supported)
{
struct user_sve_header sve;
struct user_sve_header *new_sve = NULL;
size_t new_sve_size = 0;
int ret, prctl_vl;
*supported = false;
/* Check if the VL is supported in this process */
prctl_vl = prctl(type->prctl_set, vl);
if (prctl_vl == -1)
ksft_exit_fail_msg("prctl(PR_%s_SET_VL) failed: %s (%d)\n",
type->name, strerror(errno), errno);
/* If the VL is not supported then a supported VL will be returned */
*supported = (prctl_vl == vl);
/* Set the VL by doing a set with no register payload */
memset(&sve, 0, sizeof(sve));
sve.size = sizeof(sve);
sve.vl = vl;
ret = set_sve(child, type, &sve);
if (ret != 0) {
ksft_test_result_fail("Failed to set %s VL %u\n",
type->name, vl);
return;
}
/*
* Read back the new register state and verify that we have the
* same VL that we got from prctl() on ourselves.
*/
if (!get_sve(child, type, (void **)&new_sve, &new_sve_size)) {
ksft_test_result_fail("Failed to read %s VL %u\n",
type->name, vl);
return;
}
ksft_test_result(new_sve->vl = prctl_vl, "Set %s VL %u\n",
type->name, vl);
free(new_sve);
}
static void check_u32(unsigned int vl, const char *reg,
uint32_t *in, uint32_t *out, int *errors)
{
if (*in != *out) {
printf("# VL %d %s wrote %x read %x\n",
vl, reg, *in, *out);
(*errors)++;
}
}
/* Access the FPSIMD registers via the SVE regset */
static void ptrace_sve_fpsimd(pid_t child, const struct vec_type *type)
{
void *svebuf;
struct user_sve_header *sve;
struct user_fpsimd_state *fpsimd, new_fpsimd;
unsigned int i, j;
unsigned char *p;
int ret;
svebuf = malloc(SVE_PT_SIZE(0, SVE_PT_REGS_FPSIMD));
if (!svebuf) {
ksft_test_result_fail("Failed to allocate FPSIMD buffer\n");
return;
}
memset(svebuf, 0, SVE_PT_SIZE(0, SVE_PT_REGS_FPSIMD));
sve = svebuf;
sve->flags = SVE_PT_REGS_FPSIMD;
sve->size = SVE_PT_SIZE(0, SVE_PT_REGS_FPSIMD);
sve->vl = 16; /* We don't care what the VL is */
/* Try to set a known FPSIMD state via PT_REGS_SVE */
fpsimd = (struct user_fpsimd_state *)((char *)sve +
SVE_PT_FPSIMD_OFFSET);
for (i = 0; i < 32; ++i) {
p = (unsigned char *)&fpsimd->vregs[i];
for (j = 0; j < sizeof(fpsimd->vregs[i]); ++j)
p[j] = j;
}
ret = set_sve(child, type, sve);
ksft_test_result(ret == 0, "%s FPSIMD set via SVE: %d\n",
type->name, ret);
if (ret)
goto out;
/* Verify via the FPSIMD regset */
if (get_fpsimd(child, &new_fpsimd)) {
ksft_test_result_fail("get_fpsimd(): %s\n",
strerror(errno));
goto out;
}
if (memcmp(fpsimd, &new_fpsimd, sizeof(*fpsimd)) == 0)
ksft_test_result_pass("%s get_fpsimd() gave same state\n",
type->name);
else
ksft_test_result_fail("%s get_fpsimd() gave different state\n",
type->name);
out:
free(svebuf);
}
/* Validate attempting to set SVE data and read SVE data */
static void ptrace_set_sve_get_sve_data(pid_t child,
const struct vec_type *type,
unsigned int vl)
{
void *write_buf;
void *read_buf = NULL;
struct user_sve_header *write_sve;
struct user_sve_header *read_sve;
size_t read_sve_size = 0;
unsigned int vq = sve_vq_from_vl(vl);
int ret, i;
size_t data_size;
int errors = 0;
data_size = SVE_PT_SVE_OFFSET + SVE_PT_SVE_SIZE(vq, SVE_PT_REGS_SVE);
write_buf = malloc(data_size);
if (!write_buf) {
ksft_test_result_fail("Error allocating %d byte buffer for %s VL %u\n",
data_size, type->name, vl);
return;
}
write_sve = write_buf;
/* Set up some data and write it out */
memset(write_sve, 0, data_size);
write_sve->size = data_size;
write_sve->vl = vl;
write_sve->flags = SVE_PT_REGS_SVE;
for (i = 0; i < __SVE_NUM_ZREGS; i++)
fill_buf(write_buf + SVE_PT_SVE_ZREG_OFFSET(vq, i),
SVE_PT_SVE_ZREG_SIZE(vq));
for (i = 0; i < __SVE_NUM_PREGS; i++)
fill_buf(write_buf + SVE_PT_SVE_PREG_OFFSET(vq, i),
SVE_PT_SVE_PREG_SIZE(vq));
fill_buf(write_buf + SVE_PT_SVE_FPSR_OFFSET(vq), SVE_PT_SVE_FPSR_SIZE);
fill_buf(write_buf + SVE_PT_SVE_FPCR_OFFSET(vq), SVE_PT_SVE_FPCR_SIZE);
/* TODO: Generate a valid FFR pattern */
ret = set_sve(child, type, write_sve);
if (ret != 0) {
ksft_test_result_fail("Failed to set %s VL %u data\n",
type->name, vl);
goto out;
}
/* Read the data back */
if (!get_sve(child, type, (void **)&read_buf, &read_sve_size)) {
ksft_test_result_fail("Failed to read %s VL %u data\n",
type->name, vl);
goto out;
}
read_sve = read_buf;
/* We might read more data if there's extensions we don't know */
if (read_sve->size < write_sve->size) {
ksft_test_result_fail("%s wrote %d bytes, only read %d\n",
type->name, write_sve->size,
read_sve->size);
goto out_read;
}
for (i = 0; i < __SVE_NUM_ZREGS; i++) {
if (memcmp(write_buf + SVE_PT_SVE_ZREG_OFFSET(vq, i),
read_buf + SVE_PT_SVE_ZREG_OFFSET(vq, i),
SVE_PT_SVE_ZREG_SIZE(vq)) != 0) {
printf("# Mismatch in %u Z%d\n", vl, i);
errors++;
}
}
for (i = 0; i < __SVE_NUM_PREGS; i++) {
if (memcmp(write_buf + SVE_PT_SVE_PREG_OFFSET(vq, i),
read_buf + SVE_PT_SVE_PREG_OFFSET(vq, i),
SVE_PT_SVE_PREG_SIZE(vq)) != 0) {
printf("# Mismatch in %u P%d\n", vl, i);
errors++;
}
}
check_u32(vl, "FPSR", write_buf + SVE_PT_SVE_FPSR_OFFSET(vq),
read_buf + SVE_PT_SVE_FPSR_OFFSET(vq), &errors);
check_u32(vl, "FPCR", write_buf + SVE_PT_SVE_FPCR_OFFSET(vq),
read_buf + SVE_PT_SVE_FPCR_OFFSET(vq), &errors);
ksft_test_result(errors == 0, "Set and get %s data for VL %u\n",
type->name, vl);
out_read:
free(read_buf);
out:
free(write_buf);
}
/* Validate attempting to set SVE data and read it via the FPSIMD regset */
static void ptrace_set_sve_get_fpsimd_data(pid_t child,
const struct vec_type *type,
unsigned int vl)
{
void *write_buf;
struct user_sve_header *write_sve;
unsigned int vq = sve_vq_from_vl(vl);
struct user_fpsimd_state fpsimd_state;
int ret, i;
size_t data_size;
int errors = 0;
if (__BYTE_ORDER == __BIG_ENDIAN) {
ksft_test_result_skip("Big endian not supported\n");
return;
}
data_size = SVE_PT_SVE_OFFSET + SVE_PT_SVE_SIZE(vq, SVE_PT_REGS_SVE);
write_buf = malloc(data_size);
if (!write_buf) {
ksft_test_result_fail("Error allocating %d byte buffer for %s VL %u\n",
data_size, type->name, vl);
return;
}
write_sve = write_buf;
/* Set up some data and write it out */
memset(write_sve, 0, data_size);
write_sve->size = data_size;
write_sve->vl = vl;
write_sve->flags = SVE_PT_REGS_SVE;
for (i = 0; i < __SVE_NUM_ZREGS; i++)
fill_buf(write_buf + SVE_PT_SVE_ZREG_OFFSET(vq, i),
SVE_PT_SVE_ZREG_SIZE(vq));
fill_buf(write_buf + SVE_PT_SVE_FPSR_OFFSET(vq), SVE_PT_SVE_FPSR_SIZE);
fill_buf(write_buf + SVE_PT_SVE_FPCR_OFFSET(vq), SVE_PT_SVE_FPCR_SIZE);
ret = set_sve(child, type, write_sve);
if (ret != 0) {
ksft_test_result_fail("Failed to set %s VL %u data\n",
type->name, vl);
goto out;
}
/* Read the data back */
if (get_fpsimd(child, &fpsimd_state)) {
ksft_test_result_fail("Failed to read %s VL %u FPSIMD data\n",
type->name, vl);
goto out;
}
for (i = 0; i < __SVE_NUM_ZREGS; i++) {
__uint128_t tmp = 0;
/*
* Z regs are stored endianness invariant, this won't
* work for big endian
*/
memcpy(&tmp, write_buf + SVE_PT_SVE_ZREG_OFFSET(vq, i),
sizeof(tmp));
if (tmp != fpsimd_state.vregs[i]) {
printf("# Mismatch in FPSIMD for %s VL %u Z%d\n",
type->name, vl, i);
errors++;
}
}
check_u32(vl, "FPSR", write_buf + SVE_PT_SVE_FPSR_OFFSET(vq),
&fpsimd_state.fpsr, &errors);
check_u32(vl, "FPCR", write_buf + SVE_PT_SVE_FPCR_OFFSET(vq),
&fpsimd_state.fpcr, &errors);
ksft_test_result(errors == 0, "Set and get FPSIMD data for %s VL %u\n",
type->name, vl);
out:
free(write_buf);
}
/* Validate attempting to set FPSIMD data and read it via the SVE regset */
static void ptrace_set_fpsimd_get_sve_data(pid_t child,
const struct vec_type *type,
unsigned int vl)
{
void *read_buf = NULL;
unsigned char *p;
struct user_sve_header *read_sve;
unsigned int vq = sve_vq_from_vl(vl);
struct user_fpsimd_state write_fpsimd;
int ret, i, j;
size_t read_sve_size = 0;
size_t expected_size;
int errors = 0;
if (__BYTE_ORDER == __BIG_ENDIAN) {
ksft_test_result_skip("Big endian not supported\n");
return;
}
for (i = 0; i < 32; ++i) {
p = (unsigned char *)&write_fpsimd.vregs[i];
for (j = 0; j < sizeof(write_fpsimd.vregs[i]); ++j)
p[j] = j;
}
ret = set_fpsimd(child, &write_fpsimd);
if (ret != 0) {
ksft_test_result_fail("Failed to set FPSIMD state: %d\n)",
ret);
return;
}
if (!get_sve(child, type, (void **)&read_buf, &read_sve_size)) {
ksft_test_result_fail("Failed to read %s VL %u data\n",
type->name, vl);
return;
}
read_sve = read_buf;
if (read_sve->vl != vl) {
ksft_test_result_fail("Child VL != expected VL %d\n",
read_sve->vl, vl);
goto out;
}
/* The kernel may return either SVE or FPSIMD format */
switch (read_sve->flags & SVE_PT_REGS_MASK) {
case SVE_PT_REGS_FPSIMD:
expected_size = SVE_PT_FPSIMD_SIZE(vq, SVE_PT_REGS_FPSIMD);
if (read_sve_size < expected_size) {
ksft_test_result_fail("Read %d bytes, expected %d\n",
read_sve_size, expected_size);
goto out;
}
ret = memcmp(&write_fpsimd, read_buf + SVE_PT_FPSIMD_OFFSET,
sizeof(write_fpsimd));
if (ret != 0) {
ksft_print_msg("Read FPSIMD data mismatch\n");
errors++;
}
break;
case SVE_PT_REGS_SVE:
expected_size = SVE_PT_SVE_SIZE(vq, SVE_PT_REGS_SVE);
if (read_sve_size < expected_size) {
ksft_test_result_fail("Read %d bytes, expected %d\n",
read_sve_size, expected_size);
goto out;
}
for (i = 0; i < __SVE_NUM_ZREGS; i++) {
__uint128_t tmp = 0;
/*
* Z regs are stored endianness invariant, this won't
* work for big endian
*/
memcpy(&tmp, read_buf + SVE_PT_SVE_ZREG_OFFSET(vq, i),
sizeof(tmp));
if (tmp != write_fpsimd.vregs[i]) {
ksft_print_msg("Mismatch in FPSIMD for %s VL %u Z%d/V%d\n",
type->name, vl, i, i);
errors++;
}
}
check_u32(vl, "FPSR", &write_fpsimd.fpsr,
read_buf + SVE_PT_SVE_FPSR_OFFSET(vq), &errors);
check_u32(vl, "FPCR", &write_fpsimd.fpcr,
read_buf + SVE_PT_SVE_FPCR_OFFSET(vq), &errors);
break;
default:
ksft_print_msg("Unexpected regs type %d\n",
read_sve->flags & SVE_PT_REGS_MASK);
errors++;
break;
}
ksft_test_result(errors == 0, "Set FPSIMD, read via SVE for %s VL %u\n",
type->name, vl);
out:
free(read_buf);
}
static int do_parent(pid_t child)
{
int ret = EXIT_FAILURE;
pid_t pid;
int status, i;
siginfo_t si;
unsigned int vq, vl;
bool vl_supported;
ksft_print_msg("Parent is %d, child is %d\n", getpid(), child);
/* Attach to the child */
while (1) {
int sig;
pid = wait(&status);
if (pid == -1) {
perror("wait");
goto error;
}
/*
* This should never happen but it's hard to flag in
* the framework.
*/
if (pid != child)
continue;
if (WIFEXITED(status) || WIFSIGNALED(status))
ksft_exit_fail_msg("Child died unexpectedly\n");
if (!WIFSTOPPED(status))
goto error;
sig = WSTOPSIG(status);
if (ptrace(PTRACE_GETSIGINFO, pid, NULL, &si)) {
if (errno == ESRCH)
goto disappeared;
if (errno == EINVAL) {
sig = 0; /* bust group-stop */
goto cont;
}
ksft_test_result_fail("PTRACE_GETSIGINFO: %s\n",
strerror(errno));
goto error;
}
if (sig == SIGSTOP && si.si_code == SI_TKILL &&
si.si_pid == pid)
break;
cont:
if (ptrace(PTRACE_CONT, pid, NULL, sig)) {
if (errno == ESRCH)
goto disappeared;
ksft_test_result_fail("PTRACE_CONT: %s\n",
strerror(errno));
goto error;
}
}
for (i = 0; i < ARRAY_SIZE(vec_types); i++) {
/* FPSIMD via SVE regset */
if (getauxval(vec_types[i].hwcap_type) & vec_types[i].hwcap) {
ptrace_sve_fpsimd(child, &vec_types[i]);
} else {
ksft_test_result_skip("%s FPSIMD set via SVE\n",
vec_types[i].name);
ksft_test_result_skip("%s FPSIMD read\n",
vec_types[i].name);
}
/* prctl() flags */
if (getauxval(vec_types[i].hwcap_type) & vec_types[i].hwcap) {
ptrace_set_get_inherit(child, &vec_types[i]);
} else {
ksft_test_result_skip("%s SVE_PT_VL_INHERIT set\n",
vec_types[i].name);
ksft_test_result_skip("%s SVE_PT_VL_INHERIT cleared\n",
vec_types[i].name);
}
/* Step through every possible VQ */
for (vq = SVE_VQ_MIN; vq <= TEST_VQ_MAX; vq++) {
vl = sve_vl_from_vq(vq);
/* First, try to set this vector length */
if (getauxval(vec_types[i].hwcap_type) &
vec_types[i].hwcap) {
ptrace_set_get_vl(child, &vec_types[i], vl,
&vl_supported);
} else {
ksft_test_result_skip("%s get/set VL %d\n",
vec_types[i].name, vl);
vl_supported = false;
}
/* If the VL is supported validate data set/get */
if (vl_supported) {
ptrace_set_sve_get_sve_data(child, &vec_types[i], vl);
ptrace_set_sve_get_fpsimd_data(child, &vec_types[i], vl);
ptrace_set_fpsimd_get_sve_data(child, &vec_types[i], vl);
} else {
ksft_test_result_skip("%s set SVE get SVE for VL %d\n",
vec_types[i].name, vl);
ksft_test_result_skip("%s set SVE get FPSIMD for VL %d\n",
vec_types[i].name, vl);
ksft_test_result_skip("%s set FPSIMD get SVE for VL %d\n",
vec_types[i].name, vl);
}
}
}
ret = EXIT_SUCCESS;
error:
kill(child, SIGKILL);
disappeared:
return ret;
}
int main(void)
{
int ret = EXIT_SUCCESS;
pid_t child;
srandom(getpid());
ksft_print_header();
ksft_set_plan(EXPECTED_TESTS);
if (!(getauxval(AT_HWCAP) & HWCAP_SVE))
ksft_exit_skip("SVE not available\n");
child = fork();
if (!child)
return do_child();
if (do_parent(child))
ret = EXIT_FAILURE;
ksft_print_cnts();
return ret;
}
| linux-master | tools/testing/selftests/arm64/fp/sve-ptrace.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2021 ARM Limited.
*/
#include <errno.h>
#include <stdbool.h>
#include <stddef.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <sys/auxv.h>
#include <sys/prctl.h>
#include <sys/ptrace.h>
#include <sys/types.h>
#include <sys/uio.h>
#include <sys/wait.h>
#include <asm/sigcontext.h>
#include <asm/ptrace.h>
#include "../../kselftest.h"
/* <linux/elf.h> and <sys/auxv.h> don't like each other, so: */
#ifndef NT_ARM_ZA
#define NT_ARM_ZA 0x40c
#endif
#ifndef NT_ARM_ZT
#define NT_ARM_ZT 0x40d
#endif
#define EXPECTED_TESTS 3
static int sme_vl;
static void fill_buf(char *buf, size_t size)
{
int i;
for (i = 0; i < size; i++)
buf[i] = random();
}
static int do_child(void)
{
if (ptrace(PTRACE_TRACEME, -1, NULL, NULL))
ksft_exit_fail_msg("PTRACE_TRACEME", strerror(errno));
if (raise(SIGSTOP))
ksft_exit_fail_msg("raise(SIGSTOP)", strerror(errno));
return EXIT_SUCCESS;
}
static struct user_za_header *get_za(pid_t pid, void **buf, size_t *size)
{
struct user_za_header *za;
void *p;
size_t sz = sizeof(*za);
struct iovec iov;
while (1) {
if (*size < sz) {
p = realloc(*buf, sz);
if (!p) {
errno = ENOMEM;
goto error;
}
*buf = p;
*size = sz;
}
iov.iov_base = *buf;
iov.iov_len = sz;
if (ptrace(PTRACE_GETREGSET, pid, NT_ARM_ZA, &iov))
goto error;
za = *buf;
if (za->size <= sz)
break;
sz = za->size;
}
return za;
error:
return NULL;
}
static int set_za(pid_t pid, const struct user_za_header *za)
{
struct iovec iov;
iov.iov_base = (void *)za;
iov.iov_len = za->size;
return ptrace(PTRACE_SETREGSET, pid, NT_ARM_ZA, &iov);
}
static int get_zt(pid_t pid, char zt[ZT_SIG_REG_BYTES])
{
struct iovec iov;
iov.iov_base = zt;
iov.iov_len = ZT_SIG_REG_BYTES;
return ptrace(PTRACE_GETREGSET, pid, NT_ARM_ZT, &iov);
}
static int set_zt(pid_t pid, const char zt[ZT_SIG_REG_BYTES])
{
struct iovec iov;
iov.iov_base = (void *)zt;
iov.iov_len = ZT_SIG_REG_BYTES;
return ptrace(PTRACE_SETREGSET, pid, NT_ARM_ZT, &iov);
}
/* Reading with ZA disabled returns all zeros */
static void ptrace_za_disabled_read_zt(pid_t child)
{
struct user_za_header za;
char zt[ZT_SIG_REG_BYTES];
int ret, i;
bool fail = false;
/* Disable PSTATE.ZA using the ZA interface */
memset(&za, 0, sizeof(za));
za.vl = sme_vl;
za.size = sizeof(za);
ret = set_za(child, &za);
if (ret != 0) {
ksft_print_msg("Failed to disable ZA\n");
fail = true;
}
/* Read back ZT */
ret = get_zt(child, zt);
if (ret != 0) {
ksft_print_msg("Failed to read ZT\n");
fail = true;
}
for (i = 0; i < ARRAY_SIZE(zt); i++) {
if (zt[i]) {
ksft_print_msg("zt[%d]: 0x%x != 0\n", i, zt[i]);
fail = true;
}
}
ksft_test_result(!fail, "ptrace_za_disabled_read_zt\n");
}
/* Writing then reading ZT should return the data written */
static void ptrace_set_get_zt(pid_t child)
{
char zt_in[ZT_SIG_REG_BYTES];
char zt_out[ZT_SIG_REG_BYTES];
int ret, i;
bool fail = false;
fill_buf(zt_in, sizeof(zt_in));
ret = set_zt(child, zt_in);
if (ret != 0) {
ksft_print_msg("Failed to set ZT\n");
fail = true;
}
ret = get_zt(child, zt_out);
if (ret != 0) {
ksft_print_msg("Failed to read ZT\n");
fail = true;
}
for (i = 0; i < ARRAY_SIZE(zt_in); i++) {
if (zt_in[i] != zt_out[i]) {
ksft_print_msg("zt[%d]: 0x%x != 0x%x\n", i,
zt_in[i], zt_out[i]);
fail = true;
}
}
ksft_test_result(!fail, "ptrace_set_get_zt\n");
}
/* Writing ZT should set PSTATE.ZA */
static void ptrace_enable_za_via_zt(pid_t child)
{
struct user_za_header za_in;
struct user_za_header *za_out;
char zt[ZT_SIG_REG_BYTES];
char *za_data;
size_t za_out_size;
int ret, i, vq;
bool fail = false;
/* Disable PSTATE.ZA using the ZA interface */
memset(&za_in, 0, sizeof(za_in));
za_in.vl = sme_vl;
za_in.size = sizeof(za_in);
ret = set_za(child, &za_in);
if (ret != 0) {
ksft_print_msg("Failed to disable ZA\n");
fail = true;
}
/* Write ZT */
fill_buf(zt, sizeof(zt));
ret = set_zt(child, zt);
if (ret != 0) {
ksft_print_msg("Failed to set ZT\n");
fail = true;
}
/* Read back ZA and check for register data */
za_out = NULL;
za_out_size = 0;
if (get_za(child, (void **)&za_out, &za_out_size)) {
/* Should have an unchanged VL */
if (za_out->vl != sme_vl) {
ksft_print_msg("VL changed from %d to %d\n",
sme_vl, za_out->vl);
fail = true;
}
vq = __sve_vq_from_vl(za_out->vl);
za_data = (char *)za_out + ZA_PT_ZA_OFFSET;
/* Should have register data */
if (za_out->size < ZA_PT_SIZE(vq)) {
ksft_print_msg("ZA data less than expected: %u < %u\n",
za_out->size, ZA_PT_SIZE(vq));
fail = true;
vq = 0;
}
/* That register data should be non-zero */
for (i = 0; i < ZA_PT_ZA_SIZE(vq); i++) {
if (za_data[i]) {
ksft_print_msg("ZA byte %d is %x\n",
i, za_data[i]);
fail = true;
}
}
} else {
ksft_print_msg("Failed to read ZA\n");
fail = true;
}
ksft_test_result(!fail, "ptrace_enable_za_via_zt\n");
}
static int do_parent(pid_t child)
{
int ret = EXIT_FAILURE;
pid_t pid;
int status;
siginfo_t si;
/* Attach to the child */
while (1) {
int sig;
pid = wait(&status);
if (pid == -1) {
perror("wait");
goto error;
}
/*
* This should never happen but it's hard to flag in
* the framework.
*/
if (pid != child)
continue;
if (WIFEXITED(status) || WIFSIGNALED(status))
ksft_exit_fail_msg("Child died unexpectedly\n");
if (!WIFSTOPPED(status))
goto error;
sig = WSTOPSIG(status);
if (ptrace(PTRACE_GETSIGINFO, pid, NULL, &si)) {
if (errno == ESRCH)
goto disappeared;
if (errno == EINVAL) {
sig = 0; /* bust group-stop */
goto cont;
}
ksft_test_result_fail("PTRACE_GETSIGINFO: %s\n",
strerror(errno));
goto error;
}
if (sig == SIGSTOP && si.si_code == SI_TKILL &&
si.si_pid == pid)
break;
cont:
if (ptrace(PTRACE_CONT, pid, NULL, sig)) {
if (errno == ESRCH)
goto disappeared;
ksft_test_result_fail("PTRACE_CONT: %s\n",
strerror(errno));
goto error;
}
}
ksft_print_msg("Parent is %d, child is %d\n", getpid(), child);
ptrace_za_disabled_read_zt(child);
ptrace_set_get_zt(child);
ptrace_enable_za_via_zt(child);
ret = EXIT_SUCCESS;
error:
kill(child, SIGKILL);
disappeared:
return ret;
}
int main(void)
{
int ret = EXIT_SUCCESS;
pid_t child;
srandom(getpid());
ksft_print_header();
if (!(getauxval(AT_HWCAP2) & HWCAP2_SME2)) {
ksft_set_plan(1);
ksft_exit_skip("SME2 not available\n");
}
/* We need a valid SME VL to enable/disable ZA */
sme_vl = prctl(PR_SME_GET_VL);
if (sme_vl == -1) {
ksft_set_plan(1);
ksft_exit_skip("Failed to read SME VL: %d (%s)\n",
errno, strerror(errno));
}
ksft_set_plan(EXPECTED_TESTS);
child = fork();
if (!child)
return do_child();
if (do_parent(child))
ret = EXIT_FAILURE;
ksft_print_cnts();
return ret;
}
| linux-master | tools/testing/selftests/arm64/fp/zt-ptrace.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2021 ARM Limited.
*/
#include <errno.h>
#include <stdbool.h>
#include <stddef.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <sys/auxv.h>
#include <sys/prctl.h>
#include <sys/ptrace.h>
#include <sys/types.h>
#include <sys/uio.h>
#include <sys/wait.h>
#include <asm/sigcontext.h>
#include <asm/ptrace.h>
#include "../../kselftest.h"
/* <linux/elf.h> and <sys/auxv.h> don't like each other, so: */
#ifndef NT_ARM_ZA
#define NT_ARM_ZA 0x40c
#endif
/*
* The architecture defines the maximum VQ as 16 but for extensibility
* the kernel specifies the SVE_VQ_MAX as 512 resulting in us running
* a *lot* more tests than are useful if we use it. Until the
* architecture is extended let's limit our coverage to what is
* currently allowed, plus one extra to ensure we cover constraining
* the VL as expected.
*/
#define TEST_VQ_MAX 17
#define EXPECTED_TESTS (((TEST_VQ_MAX - SVE_VQ_MIN) + 1) * 3)
static void fill_buf(char *buf, size_t size)
{
int i;
for (i = 0; i < size; i++)
buf[i] = random();
}
static int do_child(void)
{
if (ptrace(PTRACE_TRACEME, -1, NULL, NULL))
ksft_exit_fail_msg("PTRACE_TRACEME", strerror(errno));
if (raise(SIGSTOP))
ksft_exit_fail_msg("raise(SIGSTOP)", strerror(errno));
return EXIT_SUCCESS;
}
static struct user_za_header *get_za(pid_t pid, void **buf, size_t *size)
{
struct user_za_header *za;
void *p;
size_t sz = sizeof(*za);
struct iovec iov;
while (1) {
if (*size < sz) {
p = realloc(*buf, sz);
if (!p) {
errno = ENOMEM;
goto error;
}
*buf = p;
*size = sz;
}
iov.iov_base = *buf;
iov.iov_len = sz;
if (ptrace(PTRACE_GETREGSET, pid, NT_ARM_ZA, &iov))
goto error;
za = *buf;
if (za->size <= sz)
break;
sz = za->size;
}
return za;
error:
return NULL;
}
static int set_za(pid_t pid, const struct user_za_header *za)
{
struct iovec iov;
iov.iov_base = (void *)za;
iov.iov_len = za->size;
return ptrace(PTRACE_SETREGSET, pid, NT_ARM_ZA, &iov);
}
/* Validate attempting to set the specfied VL via ptrace */
static void ptrace_set_get_vl(pid_t child, unsigned int vl, bool *supported)
{
struct user_za_header za;
struct user_za_header *new_za = NULL;
size_t new_za_size = 0;
int ret, prctl_vl;
*supported = false;
/* Check if the VL is supported in this process */
prctl_vl = prctl(PR_SME_SET_VL, vl);
if (prctl_vl == -1)
ksft_exit_fail_msg("prctl(PR_SME_SET_VL) failed: %s (%d)\n",
strerror(errno), errno);
/* If the VL is not supported then a supported VL will be returned */
*supported = (prctl_vl == vl);
/* Set the VL by doing a set with no register payload */
memset(&za, 0, sizeof(za));
za.size = sizeof(za);
za.vl = vl;
ret = set_za(child, &za);
if (ret != 0) {
ksft_test_result_fail("Failed to set VL %u\n", vl);
return;
}
/*
* Read back the new register state and verify that we have the
* same VL that we got from prctl() on ourselves.
*/
if (!get_za(child, (void **)&new_za, &new_za_size)) {
ksft_test_result_fail("Failed to read VL %u\n", vl);
return;
}
ksft_test_result(new_za->vl = prctl_vl, "Set VL %u\n", vl);
free(new_za);
}
/* Validate attempting to set no ZA data and read it back */
static void ptrace_set_no_data(pid_t child, unsigned int vl)
{
void *read_buf = NULL;
struct user_za_header write_za;
struct user_za_header *read_za;
size_t read_za_size = 0;
int ret;
/* Set up some data and write it out */
memset(&write_za, 0, sizeof(write_za));
write_za.size = ZA_PT_ZA_OFFSET;
write_za.vl = vl;
ret = set_za(child, &write_za);
if (ret != 0) {
ksft_test_result_fail("Failed to set VL %u no data\n", vl);
return;
}
/* Read the data back */
if (!get_za(child, (void **)&read_buf, &read_za_size)) {
ksft_test_result_fail("Failed to read VL %u no data\n", vl);
return;
}
read_za = read_buf;
/* We might read more data if there's extensions we don't know */
if (read_za->size < write_za.size) {
ksft_test_result_fail("VL %u wrote %d bytes, only read %d\n",
vl, write_za.size, read_za->size);
goto out_read;
}
ksft_test_result(read_za->size == write_za.size,
"Disabled ZA for VL %u\n", vl);
out_read:
free(read_buf);
}
/* Validate attempting to set data and read it back */
static void ptrace_set_get_data(pid_t child, unsigned int vl)
{
void *write_buf;
void *read_buf = NULL;
struct user_za_header *write_za;
struct user_za_header *read_za;
size_t read_za_size = 0;
unsigned int vq = sve_vq_from_vl(vl);
int ret;
size_t data_size;
data_size = ZA_PT_SIZE(vq);
write_buf = malloc(data_size);
if (!write_buf) {
ksft_test_result_fail("Error allocating %d byte buffer for VL %u\n",
data_size, vl);
return;
}
write_za = write_buf;
/* Set up some data and write it out */
memset(write_za, 0, data_size);
write_za->size = data_size;
write_za->vl = vl;
fill_buf(write_buf + ZA_PT_ZA_OFFSET, ZA_PT_ZA_SIZE(vq));
ret = set_za(child, write_za);
if (ret != 0) {
ksft_test_result_fail("Failed to set VL %u data\n", vl);
goto out;
}
/* Read the data back */
if (!get_za(child, (void **)&read_buf, &read_za_size)) {
ksft_test_result_fail("Failed to read VL %u data\n", vl);
goto out;
}
read_za = read_buf;
/* We might read more data if there's extensions we don't know */
if (read_za->size < write_za->size) {
ksft_test_result_fail("VL %u wrote %d bytes, only read %d\n",
vl, write_za->size, read_za->size);
goto out_read;
}
ksft_test_result(memcmp(write_buf + ZA_PT_ZA_OFFSET,
read_buf + ZA_PT_ZA_OFFSET,
ZA_PT_ZA_SIZE(vq)) == 0,
"Data match for VL %u\n", vl);
out_read:
free(read_buf);
out:
free(write_buf);
}
static int do_parent(pid_t child)
{
int ret = EXIT_FAILURE;
pid_t pid;
int status;
siginfo_t si;
unsigned int vq, vl;
bool vl_supported;
/* Attach to the child */
while (1) {
int sig;
pid = wait(&status);
if (pid == -1) {
perror("wait");
goto error;
}
/*
* This should never happen but it's hard to flag in
* the framework.
*/
if (pid != child)
continue;
if (WIFEXITED(status) || WIFSIGNALED(status))
ksft_exit_fail_msg("Child died unexpectedly\n");
if (!WIFSTOPPED(status))
goto error;
sig = WSTOPSIG(status);
if (ptrace(PTRACE_GETSIGINFO, pid, NULL, &si)) {
if (errno == ESRCH)
goto disappeared;
if (errno == EINVAL) {
sig = 0; /* bust group-stop */
goto cont;
}
ksft_test_result_fail("PTRACE_GETSIGINFO: %s\n",
strerror(errno));
goto error;
}
if (sig == SIGSTOP && si.si_code == SI_TKILL &&
si.si_pid == pid)
break;
cont:
if (ptrace(PTRACE_CONT, pid, NULL, sig)) {
if (errno == ESRCH)
goto disappeared;
ksft_test_result_fail("PTRACE_CONT: %s\n",
strerror(errno));
goto error;
}
}
ksft_print_msg("Parent is %d, child is %d\n", getpid(), child);
/* Step through every possible VQ */
for (vq = SVE_VQ_MIN; vq <= TEST_VQ_MAX; vq++) {
vl = sve_vl_from_vq(vq);
/* First, try to set this vector length */
ptrace_set_get_vl(child, vl, &vl_supported);
/* If the VL is supported validate data set/get */
if (vl_supported) {
ptrace_set_no_data(child, vl);
ptrace_set_get_data(child, vl);
} else {
ksft_test_result_skip("Disabled ZA for VL %u\n", vl);
ksft_test_result_skip("Get and set data for VL %u\n",
vl);
}
}
ret = EXIT_SUCCESS;
error:
kill(child, SIGKILL);
disappeared:
return ret;
}
int main(void)
{
int ret = EXIT_SUCCESS;
pid_t child;
srandom(getpid());
ksft_print_header();
if (!(getauxval(AT_HWCAP2) & HWCAP2_SME)) {
ksft_set_plan(1);
ksft_exit_skip("SME not available\n");
}
ksft_set_plan(EXPECTED_TESTS);
child = fork();
if (!child)
return do_child();
if (do_parent(child))
ret = EXIT_FAILURE;
ksft_print_cnts();
return ret;
}
| linux-master | tools/testing/selftests/arm64/fp/za-ptrace.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2015-2020 ARM Limited.
* Original author: Dave Martin <[email protected]>
*/
#include <assert.h>
#include <errno.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/auxv.h>
#include <sys/prctl.h>
#include <asm/sigcontext.h>
#include "../../kselftest.h"
#include "rdvl.h"
int main(int argc, char **argv)
{
unsigned int vq;
int vl;
static unsigned int vqs[SVE_VQ_MAX];
unsigned int nvqs = 0;
ksft_print_header();
ksft_set_plan(2);
if (!(getauxval(AT_HWCAP) & HWCAP_SVE))
ksft_exit_skip("SVE not available\n");
/*
* Enumerate up to SVE_VQ_MAX vector lengths
*/
for (vq = SVE_VQ_MAX; vq > 0; --vq) {
vl = prctl(PR_SVE_SET_VL, vq * 16);
if (vl == -1)
ksft_exit_fail_msg("PR_SVE_SET_VL failed: %s (%d)\n",
strerror(errno), errno);
vl &= PR_SVE_VL_LEN_MASK;
if (rdvl_sve() != vl)
ksft_exit_fail_msg("PR_SVE_SET_VL reports %d, RDVL %d\n",
vl, rdvl_sve());
if (!sve_vl_valid(vl))
ksft_exit_fail_msg("VL %d invalid\n", vl);
vq = sve_vq_from_vl(vl);
if (!(nvqs < SVE_VQ_MAX))
ksft_exit_fail_msg("Too many VLs %u >= SVE_VQ_MAX\n",
nvqs);
vqs[nvqs++] = vq;
}
ksft_test_result_pass("Enumerated %d vector lengths\n", nvqs);
ksft_test_result_pass("All vector lengths valid\n");
/* Print out the vector lengths in ascending order: */
while (nvqs--)
ksft_print_msg("%u\n", 16 * vqs[nvqs]);
ksft_exit_pass();
}
| linux-master | tools/testing/selftests/arm64/fp/sve-probe-vls.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2021 ARM Limited.
* Original author: Mark Brown <[email protected]>
*/
#include <assert.h>
#include <errno.h>
#include <fcntl.h>
#include <stdbool.h>
#include <stddef.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <sys/auxv.h>
#include <sys/prctl.h>
#include <sys/types.h>
#include <sys/wait.h>
#include <asm/sigcontext.h>
#include <asm/hwcap.h>
#include "../../kselftest.h"
#include "rdvl.h"
#define ARCH_MIN_VL SVE_VL_MIN
struct vec_data {
const char *name;
unsigned long hwcap_type;
unsigned long hwcap;
const char *rdvl_binary;
int (*rdvl)(void);
int prctl_get;
int prctl_set;
const char *default_vl_file;
int default_vl;
int min_vl;
int max_vl;
};
#define VEC_SVE 0
#define VEC_SME 1
static struct vec_data vec_data[] = {
[VEC_SVE] = {
.name = "SVE",
.hwcap_type = AT_HWCAP,
.hwcap = HWCAP_SVE,
.rdvl = rdvl_sve,
.rdvl_binary = "./rdvl-sve",
.prctl_get = PR_SVE_GET_VL,
.prctl_set = PR_SVE_SET_VL,
.default_vl_file = "/proc/sys/abi/sve_default_vector_length",
},
[VEC_SME] = {
.name = "SME",
.hwcap_type = AT_HWCAP2,
.hwcap = HWCAP2_SME,
.rdvl = rdvl_sme,
.rdvl_binary = "./rdvl-sme",
.prctl_get = PR_SME_GET_VL,
.prctl_set = PR_SME_SET_VL,
.default_vl_file = "/proc/sys/abi/sme_default_vector_length",
},
};
static int stdio_read_integer(FILE *f, const char *what, int *val)
{
int n = 0;
int ret;
ret = fscanf(f, "%d%*1[\n]%n", val, &n);
if (ret < 1 || n < 1) {
ksft_print_msg("failed to parse integer from %s\n", what);
return -1;
}
return 0;
}
/* Start a new process and return the vector length it sees */
static int get_child_rdvl(struct vec_data *data)
{
FILE *out;
int pipefd[2];
pid_t pid, child;
int read_vl, ret;
ret = pipe(pipefd);
if (ret == -1) {
ksft_print_msg("pipe() failed: %d (%s)\n",
errno, strerror(errno));
return -1;
}
fflush(stdout);
child = fork();
if (child == -1) {
ksft_print_msg("fork() failed: %d (%s)\n",
errno, strerror(errno));
close(pipefd[0]);
close(pipefd[1]);
return -1;
}
/* Child: put vector length on the pipe */
if (child == 0) {
/*
* Replace stdout with the pipe, errors to stderr from
* here as kselftest prints to stdout.
*/
ret = dup2(pipefd[1], 1);
if (ret == -1) {
fprintf(stderr, "dup2() %d\n", errno);
exit(EXIT_FAILURE);
}
/* exec() a new binary which puts the VL on stdout */
ret = execl(data->rdvl_binary, data->rdvl_binary, NULL);
fprintf(stderr, "execl(%s) failed: %d (%s)\n",
data->rdvl_binary, errno, strerror(errno));
exit(EXIT_FAILURE);
}
close(pipefd[1]);
/* Parent; wait for the exit status from the child & verify it */
do {
pid = wait(&ret);
if (pid == -1) {
ksft_print_msg("wait() failed: %d (%s)\n",
errno, strerror(errno));
close(pipefd[0]);
return -1;
}
} while (pid != child);
assert(pid == child);
if (!WIFEXITED(ret)) {
ksft_print_msg("child exited abnormally\n");
close(pipefd[0]);
return -1;
}
if (WEXITSTATUS(ret) != 0) {
ksft_print_msg("child returned error %d\n",
WEXITSTATUS(ret));
close(pipefd[0]);
return -1;
}
out = fdopen(pipefd[0], "r");
if (!out) {
ksft_print_msg("failed to open child stdout\n");
close(pipefd[0]);
return -1;
}
ret = stdio_read_integer(out, "child", &read_vl);
fclose(out);
if (ret != 0)
return ret;
return read_vl;
}
static int file_read_integer(const char *name, int *val)
{
FILE *f;
int ret;
f = fopen(name, "r");
if (!f) {
ksft_test_result_fail("Unable to open %s: %d (%s)\n",
name, errno,
strerror(errno));
return -1;
}
ret = stdio_read_integer(f, name, val);
fclose(f);
return ret;
}
static int file_write_integer(const char *name, int val)
{
FILE *f;
f = fopen(name, "w");
if (!f) {
ksft_test_result_fail("Unable to open %s: %d (%s)\n",
name, errno,
strerror(errno));
return -1;
}
fprintf(f, "%d", val);
fclose(f);
return 0;
}
/*
* Verify that we can read the default VL via proc, checking that it
* is set in a freshly spawned child.
*/
static void proc_read_default(struct vec_data *data)
{
int default_vl, child_vl, ret;
ret = file_read_integer(data->default_vl_file, &default_vl);
if (ret != 0)
return;
/* Is this the actual default seen by new processes? */
child_vl = get_child_rdvl(data);
if (child_vl != default_vl) {
ksft_test_result_fail("%s is %d but child VL is %d\n",
data->default_vl_file,
default_vl, child_vl);
return;
}
ksft_test_result_pass("%s default vector length %d\n", data->name,
default_vl);
data->default_vl = default_vl;
}
/* Verify that we can write a minimum value and have it take effect */
static void proc_write_min(struct vec_data *data)
{
int ret, new_default, child_vl;
if (geteuid() != 0) {
ksft_test_result_skip("Need to be root to write to /proc\n");
return;
}
ret = file_write_integer(data->default_vl_file, ARCH_MIN_VL);
if (ret != 0)
return;
/* What was the new value? */
ret = file_read_integer(data->default_vl_file, &new_default);
if (ret != 0)
return;
/* Did it take effect in a new process? */
child_vl = get_child_rdvl(data);
if (child_vl != new_default) {
ksft_test_result_fail("%s is %d but child VL is %d\n",
data->default_vl_file,
new_default, child_vl);
return;
}
ksft_test_result_pass("%s minimum vector length %d\n", data->name,
new_default);
data->min_vl = new_default;
file_write_integer(data->default_vl_file, data->default_vl);
}
/* Verify that we can write a maximum value and have it take effect */
static void proc_write_max(struct vec_data *data)
{
int ret, new_default, child_vl;
if (geteuid() != 0) {
ksft_test_result_skip("Need to be root to write to /proc\n");
return;
}
/* -1 is accepted by the /proc interface as the maximum VL */
ret = file_write_integer(data->default_vl_file, -1);
if (ret != 0)
return;
/* What was the new value? */
ret = file_read_integer(data->default_vl_file, &new_default);
if (ret != 0)
return;
/* Did it take effect in a new process? */
child_vl = get_child_rdvl(data);
if (child_vl != new_default) {
ksft_test_result_fail("%s is %d but child VL is %d\n",
data->default_vl_file,
new_default, child_vl);
return;
}
ksft_test_result_pass("%s maximum vector length %d\n", data->name,
new_default);
data->max_vl = new_default;
file_write_integer(data->default_vl_file, data->default_vl);
}
/* Can we read back a VL from prctl? */
static void prctl_get(struct vec_data *data)
{
int ret;
ret = prctl(data->prctl_get);
if (ret == -1) {
ksft_test_result_fail("%s prctl() read failed: %d (%s)\n",
data->name, errno, strerror(errno));
return;
}
/* Mask out any flags */
ret &= PR_SVE_VL_LEN_MASK;
/* Is that what we can read back directly? */
if (ret == data->rdvl())
ksft_test_result_pass("%s current VL is %d\n",
data->name, ret);
else
ksft_test_result_fail("%s prctl() VL %d but RDVL is %d\n",
data->name, ret, data->rdvl());
}
/* Does the prctl let us set the VL we already have? */
static void prctl_set_same(struct vec_data *data)
{
int cur_vl = data->rdvl();
int ret;
ret = prctl(data->prctl_set, cur_vl);
if (ret < 0) {
ksft_test_result_fail("%s prctl set failed: %d (%s)\n",
data->name, errno, strerror(errno));
return;
}
ksft_test_result(cur_vl == data->rdvl(),
"%s set VL %d and have VL %d\n",
data->name, cur_vl, data->rdvl());
}
/* Can we set a new VL for this process? */
static void prctl_set(struct vec_data *data)
{
int ret;
if (data->min_vl == data->max_vl) {
ksft_test_result_skip("%s only one VL supported\n",
data->name);
return;
}
/* Try to set the minimum VL */
ret = prctl(data->prctl_set, data->min_vl);
if (ret < 0) {
ksft_test_result_fail("%s prctl set failed for %d: %d (%s)\n",
data->name, data->min_vl,
errno, strerror(errno));
return;
}
if ((ret & PR_SVE_VL_LEN_MASK) != data->min_vl) {
ksft_test_result_fail("%s prctl set %d but return value is %d\n",
data->name, data->min_vl, data->rdvl());
return;
}
if (data->rdvl() != data->min_vl) {
ksft_test_result_fail("%s set %d but RDVL is %d\n",
data->name, data->min_vl, data->rdvl());
return;
}
/* Try to set the maximum VL */
ret = prctl(data->prctl_set, data->max_vl);
if (ret < 0) {
ksft_test_result_fail("%s prctl set failed for %d: %d (%s)\n",
data->name, data->max_vl,
errno, strerror(errno));
return;
}
if ((ret & PR_SVE_VL_LEN_MASK) != data->max_vl) {
ksft_test_result_fail("%s prctl() set %d but return value is %d\n",
data->name, data->max_vl, data->rdvl());
return;
}
/* The _INHERIT flag should not be present when we read the VL */
ret = prctl(data->prctl_get);
if (ret == -1) {
ksft_test_result_fail("%s prctl() read failed: %d (%s)\n",
data->name, errno, strerror(errno));
return;
}
if (ret & PR_SVE_VL_INHERIT) {
ksft_test_result_fail("%s prctl() reports _INHERIT\n",
data->name);
return;
}
ksft_test_result_pass("%s prctl() set min/max\n", data->name);
}
/* If we didn't request it a new VL shouldn't affect the child */
static void prctl_set_no_child(struct vec_data *data)
{
int ret, child_vl;
if (data->min_vl == data->max_vl) {
ksft_test_result_skip("%s only one VL supported\n",
data->name);
return;
}
ret = prctl(data->prctl_set, data->min_vl);
if (ret < 0) {
ksft_test_result_fail("%s prctl set failed for %d: %d (%s)\n",
data->name, data->min_vl,
errno, strerror(errno));
return;
}
/* Ensure the default VL is different */
ret = file_write_integer(data->default_vl_file, data->max_vl);
if (ret != 0)
return;
/* Check that the child has the default we just set */
child_vl = get_child_rdvl(data);
if (child_vl != data->max_vl) {
ksft_test_result_fail("%s is %d but child VL is %d\n",
data->default_vl_file,
data->max_vl, child_vl);
return;
}
ksft_test_result_pass("%s vector length used default\n", data->name);
file_write_integer(data->default_vl_file, data->default_vl);
}
/* If we didn't request it a new VL shouldn't affect the child */
static void prctl_set_for_child(struct vec_data *data)
{
int ret, child_vl;
if (data->min_vl == data->max_vl) {
ksft_test_result_skip("%s only one VL supported\n",
data->name);
return;
}
ret = prctl(data->prctl_set, data->min_vl | PR_SVE_VL_INHERIT);
if (ret < 0) {
ksft_test_result_fail("%s prctl set failed for %d: %d (%s)\n",
data->name, data->min_vl,
errno, strerror(errno));
return;
}
/* The _INHERIT flag should be present when we read the VL */
ret = prctl(data->prctl_get);
if (ret == -1) {
ksft_test_result_fail("%s prctl() read failed: %d (%s)\n",
data->name, errno, strerror(errno));
return;
}
if (!(ret & PR_SVE_VL_INHERIT)) {
ksft_test_result_fail("%s prctl() does not report _INHERIT\n",
data->name);
return;
}
/* Ensure the default VL is different */
ret = file_write_integer(data->default_vl_file, data->max_vl);
if (ret != 0)
return;
/* Check that the child inherited our VL */
child_vl = get_child_rdvl(data);
if (child_vl != data->min_vl) {
ksft_test_result_fail("%s is %d but child VL is %d\n",
data->default_vl_file,
data->min_vl, child_vl);
return;
}
ksft_test_result_pass("%s vector length was inherited\n", data->name);
file_write_integer(data->default_vl_file, data->default_vl);
}
/* _ONEXEC takes effect only in the child process */
static void prctl_set_onexec(struct vec_data *data)
{
int ret, child_vl;
if (data->min_vl == data->max_vl) {
ksft_test_result_skip("%s only one VL supported\n",
data->name);
return;
}
/* Set a known value for the default and our current VL */
ret = file_write_integer(data->default_vl_file, data->max_vl);
if (ret != 0)
return;
ret = prctl(data->prctl_set, data->max_vl);
if (ret < 0) {
ksft_test_result_fail("%s prctl set failed for %d: %d (%s)\n",
data->name, data->min_vl,
errno, strerror(errno));
return;
}
/* Set a different value for the child to have on exec */
ret = prctl(data->prctl_set, data->min_vl | PR_SVE_SET_VL_ONEXEC);
if (ret < 0) {
ksft_test_result_fail("%s prctl set failed for %d: %d (%s)\n",
data->name, data->min_vl,
errno, strerror(errno));
return;
}
/* Our current VL should stay the same */
if (data->rdvl() != data->max_vl) {
ksft_test_result_fail("%s VL changed by _ONEXEC prctl()\n",
data->name);
return;
}
/* Check that the child inherited our VL */
child_vl = get_child_rdvl(data);
if (child_vl != data->min_vl) {
ksft_test_result_fail("Set %d _ONEXEC but child VL is %d\n",
data->min_vl, child_vl);
return;
}
ksft_test_result_pass("%s vector length set on exec\n", data->name);
file_write_integer(data->default_vl_file, data->default_vl);
}
/* For each VQ verify that setting via prctl() does the right thing */
static void prctl_set_all_vqs(struct vec_data *data)
{
int ret, vq, vl, new_vl, i;
int orig_vls[ARRAY_SIZE(vec_data)];
int errors = 0;
if (!data->min_vl || !data->max_vl) {
ksft_test_result_skip("%s Failed to enumerate VLs, not testing VL setting\n",
data->name);
return;
}
for (i = 0; i < ARRAY_SIZE(vec_data); i++)
orig_vls[i] = vec_data[i].rdvl();
for (vq = SVE_VQ_MIN; vq <= SVE_VQ_MAX; vq++) {
vl = sve_vl_from_vq(vq);
/* Attempt to set the VL */
ret = prctl(data->prctl_set, vl);
if (ret < 0) {
errors++;
ksft_print_msg("%s prctl set failed for %d: %d (%s)\n",
data->name, vl,
errno, strerror(errno));
continue;
}
new_vl = ret & PR_SVE_VL_LEN_MASK;
/* Check that we actually have the reported new VL */
if (data->rdvl() != new_vl) {
ksft_print_msg("Set %s VL %d but RDVL reports %d\n",
data->name, new_vl, data->rdvl());
errors++;
}
/* Did any other VLs change? */
for (i = 0; i < ARRAY_SIZE(vec_data); i++) {
if (&vec_data[i] == data)
continue;
if (!(getauxval(vec_data[i].hwcap_type) & vec_data[i].hwcap))
continue;
if (vec_data[i].rdvl() != orig_vls[i]) {
ksft_print_msg("%s VL changed from %d to %d\n",
vec_data[i].name, orig_vls[i],
vec_data[i].rdvl());
errors++;
}
}
/* Was that the VL we asked for? */
if (new_vl == vl)
continue;
/* Should round up to the minimum VL if below it */
if (vl < data->min_vl) {
if (new_vl != data->min_vl) {
ksft_print_msg("%s VL %d returned %d not minimum %d\n",
data->name, vl, new_vl,
data->min_vl);
errors++;
}
continue;
}
/* Should round down to maximum VL if above it */
if (vl > data->max_vl) {
if (new_vl != data->max_vl) {
ksft_print_msg("%s VL %d returned %d not maximum %d\n",
data->name, vl, new_vl,
data->max_vl);
errors++;
}
continue;
}
/* Otherwise we should've rounded down */
if (!(new_vl < vl)) {
ksft_print_msg("%s VL %d returned %d, did not round down\n",
data->name, vl, new_vl);
errors++;
continue;
}
}
ksft_test_result(errors == 0, "%s prctl() set all VLs, %d errors\n",
data->name, errors);
}
typedef void (*test_type)(struct vec_data *);
static const test_type tests[] = {
/*
* The default/min/max tests must be first and in this order
* to provide data for other tests.
*/
proc_read_default,
proc_write_min,
proc_write_max,
prctl_get,
prctl_set_same,
prctl_set,
prctl_set_no_child,
prctl_set_for_child,
prctl_set_onexec,
prctl_set_all_vqs,
};
static inline void smstart(void)
{
asm volatile("msr S0_3_C4_C7_3, xzr");
}
static inline void smstart_sm(void)
{
asm volatile("msr S0_3_C4_C3_3, xzr");
}
static inline void smstop(void)
{
asm volatile("msr S0_3_C4_C6_3, xzr");
}
/*
* Verify we can change the SVE vector length while SME is active and
* continue to use SME afterwards.
*/
static void change_sve_with_za(void)
{
struct vec_data *sve_data = &vec_data[VEC_SVE];
bool pass = true;
int ret, i;
if (sve_data->min_vl == sve_data->max_vl) {
ksft_print_msg("Only one SVE VL supported, can't change\n");
ksft_test_result_skip("change_sve_while_sme\n");
return;
}
/* Ensure we will trigger a change when we set the maximum */
ret = prctl(sve_data->prctl_set, sve_data->min_vl);
if (ret != sve_data->min_vl) {
ksft_print_msg("Failed to set SVE VL %d: %d\n",
sve_data->min_vl, ret);
pass = false;
}
/* Enable SM and ZA */
smstart();
/* Trigger another VL change */
ret = prctl(sve_data->prctl_set, sve_data->max_vl);
if (ret != sve_data->max_vl) {
ksft_print_msg("Failed to set SVE VL %d: %d\n",
sve_data->max_vl, ret);
pass = false;
}
/*
* Spin for a bit with SM enabled to try to trigger another
* save/restore. We can't use syscalls without exiting
* streaming mode.
*/
for (i = 0; i < 100000000; i++)
smstart_sm();
/*
* TODO: Verify that ZA was preserved over the VL change and
* spin.
*/
/* Clean up after ourselves */
smstop();
ret = prctl(sve_data->prctl_set, sve_data->default_vl);
if (ret != sve_data->default_vl) {
ksft_print_msg("Failed to restore SVE VL %d: %d\n",
sve_data->default_vl, ret);
pass = false;
}
ksft_test_result(pass, "change_sve_with_za\n");
}
typedef void (*test_all_type)(void);
static const struct {
const char *name;
test_all_type test;
} all_types_tests[] = {
{ "change_sve_with_za", change_sve_with_za },
};
int main(void)
{
bool all_supported = true;
int i, j;
ksft_print_header();
ksft_set_plan(ARRAY_SIZE(tests) * ARRAY_SIZE(vec_data) +
ARRAY_SIZE(all_types_tests));
for (i = 0; i < ARRAY_SIZE(vec_data); i++) {
struct vec_data *data = &vec_data[i];
unsigned long supported;
supported = getauxval(data->hwcap_type) & data->hwcap;
if (!supported)
all_supported = false;
for (j = 0; j < ARRAY_SIZE(tests); j++) {
if (supported)
tests[j](data);
else
ksft_test_result_skip("%s not supported\n",
data->name);
}
}
for (i = 0; i < ARRAY_SIZE(all_types_tests); i++) {
if (all_supported)
all_types_tests[i].test();
else
ksft_test_result_skip("%s\n", all_types_tests[i].name);
}
ksft_exit_pass();
}
| linux-master | tools/testing/selftests/arm64/fp/vec-syscfg.c |
// SPDX-License-Identifier: GPL-2.0-only
#include <stdio.h>
#include "rdvl.h"
int main(void)
{
int vl = rdvl_sve();
printf("%d\n", vl);
return 0;
}
| linux-master | tools/testing/selftests/arm64/fp/rdvl-sve.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2022 ARM Limited.
* Original author: Mark Brown <[email protected]>
*/
// SPDX-License-Identifier: GPL-2.0-only
#include <linux/sched.h>
#include <linux/wait.h>
#include "kselftest.h"
#define EXPECTED_TESTS 1
int fork_test(void);
int verify_fork(void);
/*
* If we fork the value in the parent should be unchanged and the
* child should start with the same value. This is called from the
* fork_test() asm function.
*/
int fork_test_c(void)
{
pid_t newpid, waiting;
int child_status, parent_result;
newpid = fork();
if (newpid == 0) {
/* In child */
if (!verify_fork()) {
ksft_print_msg("ZA state invalid in child\n");
exit(0);
} else {
exit(1);
}
}
if (newpid < 0) {
ksft_print_msg("fork() failed: %d\n", newpid);
return 0;
}
parent_result = verify_fork();
if (!parent_result)
ksft_print_msg("ZA state invalid in parent\n");
for (;;) {
waiting = waitpid(newpid, &child_status, 0);
if (waiting < 0) {
if (errno == EINTR)
continue;
ksft_print_msg("waitpid() failed: %d\n", errno);
return 0;
}
if (waiting != newpid) {
ksft_print_msg("waitpid() returned wrong PID\n");
return 0;
}
if (!WIFEXITED(child_status)) {
ksft_print_msg("child did not exit\n");
return 0;
}
return WEXITSTATUS(child_status) && parent_result;
}
}
int main(int argc, char **argv)
{
int ret, i;
ksft_print_header();
ksft_set_plan(EXPECTED_TESTS);
ksft_print_msg("PID: %d\n", getpid());
/*
* This test is run with nolibc which doesn't support hwcap and
* it's probably disproportionate to implement so instead check
* for the default vector length configuration in /proc.
*/
ret = open("/proc/sys/abi/sme_default_vector_length", O_RDONLY, 0);
if (ret >= 0) {
ksft_test_result(fork_test(), "fork_test");
} else {
ksft_print_msg("SME not supported\n");
for (i = 0; i < EXPECTED_TESTS; i++) {
ksft_test_result_skip("fork_test\n");
}
}
ksft_finished();
return 0;
}
| linux-master | tools/testing/selftests/arm64/fp/za-fork.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2015-2019 ARM Limited.
* Original author: Dave Martin <[email protected]>
*/
#define _GNU_SOURCE
#include <assert.h>
#include <errno.h>
#include <limits.h>
#include <stddef.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <getopt.h>
#include <unistd.h>
#include <sys/auxv.h>
#include <sys/prctl.h>
#include <asm/hwcap.h>
#include <asm/sigcontext.h>
static int inherit = 0;
static int no_inherit = 0;
static int force = 0;
static unsigned long vl;
static int set_ctl = PR_SVE_SET_VL;
static int get_ctl = PR_SVE_GET_VL;
static const struct option options[] = {
{ "force", no_argument, NULL, 'f' },
{ "inherit", no_argument, NULL, 'i' },
{ "max", no_argument, NULL, 'M' },
{ "no-inherit", no_argument, &no_inherit, 1 },
{ "sme", no_argument, NULL, 's' },
{ "help", no_argument, NULL, '?' },
{}
};
static char const *program_name;
static int parse_options(int argc, char **argv)
{
int c;
char *rest;
program_name = strrchr(argv[0], '/');
if (program_name)
++program_name;
else
program_name = argv[0];
while ((c = getopt_long(argc, argv, "Mfhi", options, NULL)) != -1)
switch (c) {
case 'M': vl = SVE_VL_MAX; break;
case 'f': force = 1; break;
case 'i': inherit = 1; break;
case 's': set_ctl = PR_SME_SET_VL;
get_ctl = PR_SME_GET_VL;
break;
case 0: break;
default: goto error;
}
if (inherit && no_inherit)
goto error;
if (!vl) {
/* vector length */
if (optind >= argc)
goto error;
errno = 0;
vl = strtoul(argv[optind], &rest, 0);
if (*rest) {
vl = ULONG_MAX;
errno = EINVAL;
}
if (vl == ULONG_MAX && errno) {
fprintf(stderr, "%s: %s: %s\n",
program_name, argv[optind], strerror(errno));
goto error;
}
++optind;
}
/* command */
if (optind >= argc)
goto error;
return 0;
error:
fprintf(stderr,
"Usage: %s [-f | --force] "
"[-i | --inherit | --no-inherit] "
"{-M | --max | <vector length>} "
"<command> [<arguments> ...]\n",
program_name);
return -1;
}
int main(int argc, char **argv)
{
int ret = 126; /* same as sh(1) command-not-executable error */
long flags;
char *path;
int t, e;
if (parse_options(argc, argv))
return 2; /* same as sh(1) builtin incorrect-usage */
if (vl & ~(vl & PR_SVE_VL_LEN_MASK)) {
fprintf(stderr, "%s: Invalid vector length %lu\n",
program_name, vl);
return 2; /* same as sh(1) builtin incorrect-usage */
}
if (!(getauxval(AT_HWCAP) & HWCAP_SVE)) {
fprintf(stderr, "%s: Scalable Vector Extension not present\n",
program_name);
if (!force)
goto error;
fputs("Going ahead anyway (--force): "
"This is a debug option. Don't rely on it.\n",
stderr);
}
flags = PR_SVE_SET_VL_ONEXEC;
if (inherit)
flags |= PR_SVE_VL_INHERIT;
t = prctl(set_ctl, vl | flags);
if (t < 0) {
fprintf(stderr, "%s: PR_SVE_SET_VL: %s\n",
program_name, strerror(errno));
goto error;
}
t = prctl(get_ctl);
if (t == -1) {
fprintf(stderr, "%s: PR_SVE_GET_VL: %s\n",
program_name, strerror(errno));
goto error;
}
flags = PR_SVE_VL_LEN_MASK;
flags = t & ~flags;
assert(optind < argc);
path = argv[optind];
execvp(path, &argv[optind]);
e = errno;
if (errno == ENOENT)
ret = 127; /* same as sh(1) not-found error */
fprintf(stderr, "%s: %s: %s\n", program_name, path, strerror(e));
error:
return ret; /* same as sh(1) not-executable error */
}
| linux-master | tools/testing/selftests/arm64/fp/vlset.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2021 ARM Limited.
*/
#include <errno.h>
#include <stdbool.h>
#include <stddef.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <sys/auxv.h>
#include <sys/prctl.h>
#include <asm/hwcap.h>
#include <asm/sigcontext.h>
#include <asm/unistd.h>
#include "../../kselftest.h"
#include "syscall-abi.h"
/*
* The kernel defines a much larger SVE_VQ_MAX than is expressable in
* the architecture, this creates a *lot* of overhead filling the
* buffers (especially ZA) on emulated platforms so use the actual
* architectural maximum instead.
*/
#define ARCH_SVE_VQ_MAX 16
static int default_sme_vl;
static int sve_vl_count;
static unsigned int sve_vls[ARCH_SVE_VQ_MAX];
static int sme_vl_count;
static unsigned int sme_vls[ARCH_SVE_VQ_MAX];
extern void do_syscall(int sve_vl, int sme_vl);
static void fill_random(void *buf, size_t size)
{
int i;
uint32_t *lbuf = buf;
/* random() returns a 32 bit number regardless of the size of long */
for (i = 0; i < size / sizeof(uint32_t); i++)
lbuf[i] = random();
}
/*
* We also repeat the test for several syscalls to try to expose different
* behaviour.
*/
static struct syscall_cfg {
int syscall_nr;
const char *name;
} syscalls[] = {
{ __NR_getpid, "getpid()" },
{ __NR_sched_yield, "sched_yield()" },
};
#define NUM_GPR 31
uint64_t gpr_in[NUM_GPR];
uint64_t gpr_out[NUM_GPR];
static void setup_gpr(struct syscall_cfg *cfg, int sve_vl, int sme_vl,
uint64_t svcr)
{
fill_random(gpr_in, sizeof(gpr_in));
gpr_in[8] = cfg->syscall_nr;
memset(gpr_out, 0, sizeof(gpr_out));
}
static int check_gpr(struct syscall_cfg *cfg, int sve_vl, int sme_vl, uint64_t svcr)
{
int errors = 0;
int i;
/*
* GPR x0-x7 may be clobbered, and all others should be preserved.
*/
for (i = 9; i < ARRAY_SIZE(gpr_in); i++) {
if (gpr_in[i] != gpr_out[i]) {
ksft_print_msg("%s SVE VL %d mismatch in GPR %d: %llx != %llx\n",
cfg->name, sve_vl, i,
gpr_in[i], gpr_out[i]);
errors++;
}
}
return errors;
}
#define NUM_FPR 32
uint64_t fpr_in[NUM_FPR * 2];
uint64_t fpr_out[NUM_FPR * 2];
uint64_t fpr_zero[NUM_FPR * 2];
static void setup_fpr(struct syscall_cfg *cfg, int sve_vl, int sme_vl,
uint64_t svcr)
{
fill_random(fpr_in, sizeof(fpr_in));
memset(fpr_out, 0, sizeof(fpr_out));
}
static int check_fpr(struct syscall_cfg *cfg, int sve_vl, int sme_vl,
uint64_t svcr)
{
int errors = 0;
int i;
if (!sve_vl && !(svcr & SVCR_SM_MASK)) {
for (i = 0; i < ARRAY_SIZE(fpr_in); i++) {
if (fpr_in[i] != fpr_out[i]) {
ksft_print_msg("%s Q%d/%d mismatch %llx != %llx\n",
cfg->name,
i / 2, i % 2,
fpr_in[i], fpr_out[i]);
errors++;
}
}
}
/*
* In streaming mode the whole register set should be cleared
* by the transition out of streaming mode.
*/
if (svcr & SVCR_SM_MASK) {
if (memcmp(fpr_zero, fpr_out, sizeof(fpr_out)) != 0) {
ksft_print_msg("%s FPSIMD registers non-zero exiting SM\n",
cfg->name);
errors++;
}
}
return errors;
}
#define SVE_Z_SHARED_BYTES (128 / 8)
static uint8_t z_zero[__SVE_ZREG_SIZE(ARCH_SVE_VQ_MAX)];
uint8_t z_in[SVE_NUM_ZREGS * __SVE_ZREG_SIZE(ARCH_SVE_VQ_MAX)];
uint8_t z_out[SVE_NUM_ZREGS * __SVE_ZREG_SIZE(ARCH_SVE_VQ_MAX)];
static void setup_z(struct syscall_cfg *cfg, int sve_vl, int sme_vl,
uint64_t svcr)
{
fill_random(z_in, sizeof(z_in));
fill_random(z_out, sizeof(z_out));
}
static int check_z(struct syscall_cfg *cfg, int sve_vl, int sme_vl,
uint64_t svcr)
{
size_t reg_size = sve_vl;
int errors = 0;
int i;
if (!sve_vl)
return 0;
for (i = 0; i < SVE_NUM_ZREGS; i++) {
uint8_t *in = &z_in[reg_size * i];
uint8_t *out = &z_out[reg_size * i];
if (svcr & SVCR_SM_MASK) {
/*
* In streaming mode the whole register should
* be cleared by the transition out of
* streaming mode.
*/
if (memcmp(z_zero, out, reg_size) != 0) {
ksft_print_msg("%s SVE VL %d Z%d non-zero\n",
cfg->name, sve_vl, i);
errors++;
}
} else {
/*
* For standard SVE the low 128 bits should be
* preserved and any additional bits cleared.
*/
if (memcmp(in, out, SVE_Z_SHARED_BYTES) != 0) {
ksft_print_msg("%s SVE VL %d Z%d low 128 bits changed\n",
cfg->name, sve_vl, i);
errors++;
}
if (reg_size > SVE_Z_SHARED_BYTES &&
(memcmp(z_zero, out + SVE_Z_SHARED_BYTES,
reg_size - SVE_Z_SHARED_BYTES) != 0)) {
ksft_print_msg("%s SVE VL %d Z%d high bits non-zero\n",
cfg->name, sve_vl, i);
errors++;
}
}
}
return errors;
}
uint8_t p_in[SVE_NUM_PREGS * __SVE_PREG_SIZE(ARCH_SVE_VQ_MAX)];
uint8_t p_out[SVE_NUM_PREGS * __SVE_PREG_SIZE(ARCH_SVE_VQ_MAX)];
static void setup_p(struct syscall_cfg *cfg, int sve_vl, int sme_vl,
uint64_t svcr)
{
fill_random(p_in, sizeof(p_in));
fill_random(p_out, sizeof(p_out));
}
static int check_p(struct syscall_cfg *cfg, int sve_vl, int sme_vl,
uint64_t svcr)
{
size_t reg_size = sve_vq_from_vl(sve_vl) * 2; /* 1 bit per VL byte */
int errors = 0;
int i;
if (!sve_vl)
return 0;
/* After a syscall the P registers should be zeroed */
for (i = 0; i < SVE_NUM_PREGS * reg_size; i++)
if (p_out[i])
errors++;
if (errors)
ksft_print_msg("%s SVE VL %d predicate registers non-zero\n",
cfg->name, sve_vl);
return errors;
}
uint8_t ffr_in[__SVE_PREG_SIZE(ARCH_SVE_VQ_MAX)];
uint8_t ffr_out[__SVE_PREG_SIZE(ARCH_SVE_VQ_MAX)];
static void setup_ffr(struct syscall_cfg *cfg, int sve_vl, int sme_vl,
uint64_t svcr)
{
/*
* If we are in streaming mode and do not have FA64 then FFR
* is unavailable.
*/
if ((svcr & SVCR_SM_MASK) &&
!(getauxval(AT_HWCAP2) & HWCAP2_SME_FA64)) {
memset(&ffr_in, 0, sizeof(ffr_in));
return;
}
/*
* It is only valid to set a contiguous set of bits starting
* at 0. For now since we're expecting this to be cleared by
* a syscall just set all bits.
*/
memset(ffr_in, 0xff, sizeof(ffr_in));
fill_random(ffr_out, sizeof(ffr_out));
}
static int check_ffr(struct syscall_cfg *cfg, int sve_vl, int sme_vl,
uint64_t svcr)
{
size_t reg_size = sve_vq_from_vl(sve_vl) * 2; /* 1 bit per VL byte */
int errors = 0;
int i;
if (!sve_vl)
return 0;
if ((svcr & SVCR_SM_MASK) &&
!(getauxval(AT_HWCAP2) & HWCAP2_SME_FA64))
return 0;
/* After a syscall FFR should be zeroed */
for (i = 0; i < reg_size; i++)
if (ffr_out[i])
errors++;
if (errors)
ksft_print_msg("%s SVE VL %d FFR non-zero\n",
cfg->name, sve_vl);
return errors;
}
uint64_t svcr_in, svcr_out;
static void setup_svcr(struct syscall_cfg *cfg, int sve_vl, int sme_vl,
uint64_t svcr)
{
svcr_in = svcr;
}
static int check_svcr(struct syscall_cfg *cfg, int sve_vl, int sme_vl,
uint64_t svcr)
{
int errors = 0;
if (svcr_out & SVCR_SM_MASK) {
ksft_print_msg("%s Still in SM, SVCR %llx\n",
cfg->name, svcr_out);
errors++;
}
if ((svcr_in & SVCR_ZA_MASK) != (svcr_out & SVCR_ZA_MASK)) {
ksft_print_msg("%s PSTATE.ZA changed, SVCR %llx != %llx\n",
cfg->name, svcr_in, svcr_out);
errors++;
}
return errors;
}
uint8_t za_in[ZA_SIG_REGS_SIZE(ARCH_SVE_VQ_MAX)];
uint8_t za_out[ZA_SIG_REGS_SIZE(ARCH_SVE_VQ_MAX)];
static void setup_za(struct syscall_cfg *cfg, int sve_vl, int sme_vl,
uint64_t svcr)
{
fill_random(za_in, sizeof(za_in));
memset(za_out, 0, sizeof(za_out));
}
static int check_za(struct syscall_cfg *cfg, int sve_vl, int sme_vl,
uint64_t svcr)
{
size_t reg_size = sme_vl * sme_vl;
int errors = 0;
if (!(svcr & SVCR_ZA_MASK))
return 0;
if (memcmp(za_in, za_out, reg_size) != 0) {
ksft_print_msg("SME VL %d ZA does not match\n", sme_vl);
errors++;
}
return errors;
}
uint8_t zt_in[ZT_SIG_REG_BYTES] __attribute__((aligned(16)));
uint8_t zt_out[ZT_SIG_REG_BYTES] __attribute__((aligned(16)));
static void setup_zt(struct syscall_cfg *cfg, int sve_vl, int sme_vl,
uint64_t svcr)
{
fill_random(zt_in, sizeof(zt_in));
memset(zt_out, 0, sizeof(zt_out));
}
static int check_zt(struct syscall_cfg *cfg, int sve_vl, int sme_vl,
uint64_t svcr)
{
int errors = 0;
if (!(getauxval(AT_HWCAP2) & HWCAP2_SME2))
return 0;
if (!(svcr & SVCR_ZA_MASK))
return 0;
if (memcmp(zt_in, zt_out, sizeof(zt_in)) != 0) {
ksft_print_msg("SME VL %d ZT does not match\n", sme_vl);
errors++;
}
return errors;
}
typedef void (*setup_fn)(struct syscall_cfg *cfg, int sve_vl, int sme_vl,
uint64_t svcr);
typedef int (*check_fn)(struct syscall_cfg *cfg, int sve_vl, int sme_vl,
uint64_t svcr);
/*
* Each set of registers has a setup function which is called before
* the syscall to fill values in a global variable for loading by the
* test code and a check function which validates that the results are
* as expected. Vector lengths are passed everywhere, a vector length
* of 0 should be treated as do not test.
*/
static struct {
setup_fn setup;
check_fn check;
} regset[] = {
{ setup_gpr, check_gpr },
{ setup_fpr, check_fpr },
{ setup_z, check_z },
{ setup_p, check_p },
{ setup_ffr, check_ffr },
{ setup_svcr, check_svcr },
{ setup_za, check_za },
{ setup_zt, check_zt },
};
static bool do_test(struct syscall_cfg *cfg, int sve_vl, int sme_vl,
uint64_t svcr)
{
int errors = 0;
int i;
for (i = 0; i < ARRAY_SIZE(regset); i++)
regset[i].setup(cfg, sve_vl, sme_vl, svcr);
do_syscall(sve_vl, sme_vl);
for (i = 0; i < ARRAY_SIZE(regset); i++)
errors += regset[i].check(cfg, sve_vl, sme_vl, svcr);
return errors == 0;
}
static void test_one_syscall(struct syscall_cfg *cfg)
{
int sve, sme;
int ret;
/* FPSIMD only case */
ksft_test_result(do_test(cfg, 0, default_sme_vl, 0),
"%s FPSIMD\n", cfg->name);
for (sve = 0; sve < sve_vl_count; sve++) {
ret = prctl(PR_SVE_SET_VL, sve_vls[sve]);
if (ret == -1)
ksft_exit_fail_msg("PR_SVE_SET_VL failed: %s (%d)\n",
strerror(errno), errno);
ksft_test_result(do_test(cfg, sve_vls[sve], default_sme_vl, 0),
"%s SVE VL %d\n", cfg->name, sve_vls[sve]);
for (sme = 0; sme < sme_vl_count; sme++) {
ret = prctl(PR_SME_SET_VL, sme_vls[sme]);
if (ret == -1)
ksft_exit_fail_msg("PR_SME_SET_VL failed: %s (%d)\n",
strerror(errno), errno);
ksft_test_result(do_test(cfg, sve_vls[sve],
sme_vls[sme],
SVCR_ZA_MASK | SVCR_SM_MASK),
"%s SVE VL %d/SME VL %d SM+ZA\n",
cfg->name, sve_vls[sve],
sme_vls[sme]);
ksft_test_result(do_test(cfg, sve_vls[sve],
sme_vls[sme], SVCR_SM_MASK),
"%s SVE VL %d/SME VL %d SM\n",
cfg->name, sve_vls[sve],
sme_vls[sme]);
ksft_test_result(do_test(cfg, sve_vls[sve],
sme_vls[sme], SVCR_ZA_MASK),
"%s SVE VL %d/SME VL %d ZA\n",
cfg->name, sve_vls[sve],
sme_vls[sme]);
}
}
for (sme = 0; sme < sme_vl_count; sme++) {
ret = prctl(PR_SME_SET_VL, sme_vls[sme]);
if (ret == -1)
ksft_exit_fail_msg("PR_SME_SET_VL failed: %s (%d)\n",
strerror(errno), errno);
ksft_test_result(do_test(cfg, 0, sme_vls[sme],
SVCR_ZA_MASK | SVCR_SM_MASK),
"%s SME VL %d SM+ZA\n",
cfg->name, sme_vls[sme]);
ksft_test_result(do_test(cfg, 0, sme_vls[sme], SVCR_SM_MASK),
"%s SME VL %d SM\n",
cfg->name, sme_vls[sme]);
ksft_test_result(do_test(cfg, 0, sme_vls[sme], SVCR_ZA_MASK),
"%s SME VL %d ZA\n",
cfg->name, sme_vls[sme]);
}
}
void sve_count_vls(void)
{
unsigned int vq;
int vl;
if (!(getauxval(AT_HWCAP) & HWCAP_SVE))
return;
/*
* Enumerate up to ARCH_SVE_VQ_MAX vector lengths
*/
for (vq = ARCH_SVE_VQ_MAX; vq > 0; vq /= 2) {
vl = prctl(PR_SVE_SET_VL, vq * 16);
if (vl == -1)
ksft_exit_fail_msg("PR_SVE_SET_VL failed: %s (%d)\n",
strerror(errno), errno);
vl &= PR_SVE_VL_LEN_MASK;
if (vq != sve_vq_from_vl(vl))
vq = sve_vq_from_vl(vl);
sve_vls[sve_vl_count++] = vl;
}
}
void sme_count_vls(void)
{
unsigned int vq;
int vl;
if (!(getauxval(AT_HWCAP2) & HWCAP2_SME))
return;
/*
* Enumerate up to ARCH_SVE_VQ_MAX vector lengths
*/
for (vq = ARCH_SVE_VQ_MAX; vq > 0; vq /= 2) {
vl = prctl(PR_SME_SET_VL, vq * 16);
if (vl == -1)
ksft_exit_fail_msg("PR_SME_SET_VL failed: %s (%d)\n",
strerror(errno), errno);
vl &= PR_SME_VL_LEN_MASK;
/* Found lowest VL */
if (sve_vq_from_vl(vl) > vq)
break;
if (vq != sve_vq_from_vl(vl))
vq = sve_vq_from_vl(vl);
sme_vls[sme_vl_count++] = vl;
}
/* Ensure we configure a SME VL, used to flag if SVCR is set */
default_sme_vl = sme_vls[0];
}
int main(void)
{
int i;
int tests = 1; /* FPSIMD */
int sme_ver;
srandom(getpid());
ksft_print_header();
sve_count_vls();
sme_count_vls();
tests += sve_vl_count;
tests += sme_vl_count * 3;
tests += (sve_vl_count * sme_vl_count) * 3;
ksft_set_plan(ARRAY_SIZE(syscalls) * tests);
if (getauxval(AT_HWCAP2) & HWCAP2_SME2)
sme_ver = 2;
else
sme_ver = 1;
if (getauxval(AT_HWCAP2) & HWCAP2_SME_FA64)
ksft_print_msg("SME%d with FA64\n", sme_ver);
else if (getauxval(AT_HWCAP2) & HWCAP2_SME)
ksft_print_msg("SME%d without FA64\n", sme_ver);
for (i = 0; i < ARRAY_SIZE(syscalls); i++)
test_one_syscall(&syscalls[i]);
ksft_print_cnts();
return 0;
}
| linux-master | tools/testing/selftests/arm64/abi/syscall-abi.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2022 ARM Limited.
*/
#include <errno.h>
#include <stdbool.h>
#include <stddef.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <sys/auxv.h>
#include <sys/prctl.h>
#include <sys/ptrace.h>
#include <sys/types.h>
#include <sys/uio.h>
#include <sys/wait.h>
#include <asm/sigcontext.h>
#include <asm/ptrace.h>
#include "../../kselftest.h"
#define EXPECTED_TESTS 11
#define MAX_TPIDRS 2
static bool have_sme(void)
{
return getauxval(AT_HWCAP2) & HWCAP2_SME;
}
static void test_tpidr(pid_t child)
{
uint64_t read_val[MAX_TPIDRS];
uint64_t write_val[MAX_TPIDRS];
struct iovec read_iov, write_iov;
bool test_tpidr2 = false;
int ret, i;
read_iov.iov_base = read_val;
write_iov.iov_base = write_val;
/* Should be able to read a single TPIDR... */
read_iov.iov_len = sizeof(uint64_t);
ret = ptrace(PTRACE_GETREGSET, child, NT_ARM_TLS, &read_iov);
ksft_test_result(ret == 0, "read_tpidr_one\n");
/* ...write a new value.. */
write_iov.iov_len = sizeof(uint64_t);
write_val[0] = read_val[0]++;
ret = ptrace(PTRACE_SETREGSET, child, NT_ARM_TLS, &write_iov);
ksft_test_result(ret == 0, "write_tpidr_one\n");
/* ...then read it back */
ret = ptrace(PTRACE_GETREGSET, child, NT_ARM_TLS, &read_iov);
ksft_test_result(ret == 0 && write_val[0] == read_val[0],
"verify_tpidr_one\n");
/* If we have TPIDR2 we should be able to read it */
read_iov.iov_len = sizeof(read_val);
ret = ptrace(PTRACE_GETREGSET, child, NT_ARM_TLS, &read_iov);
if (ret == 0) {
/* If we have SME there should be two TPIDRs */
if (read_iov.iov_len >= sizeof(read_val))
test_tpidr2 = true;
if (have_sme() && test_tpidr2) {
ksft_test_result(test_tpidr2, "count_tpidrs\n");
} else {
ksft_test_result(read_iov.iov_len % sizeof(uint64_t) == 0,
"count_tpidrs\n");
}
} else {
ksft_test_result_fail("count_tpidrs\n");
}
if (test_tpidr2) {
/* Try to write new values to all known TPIDRs... */
write_iov.iov_len = sizeof(write_val);
for (i = 0; i < MAX_TPIDRS; i++)
write_val[i] = read_val[i] + 1;
ret = ptrace(PTRACE_SETREGSET, child, NT_ARM_TLS, &write_iov);
ksft_test_result(ret == 0 &&
write_iov.iov_len == sizeof(write_val),
"tpidr2_write\n");
/* ...then read them back */
read_iov.iov_len = sizeof(read_val);
ret = ptrace(PTRACE_GETREGSET, child, NT_ARM_TLS, &read_iov);
if (have_sme()) {
/* Should read back the written value */
ksft_test_result(ret == 0 &&
read_iov.iov_len >= sizeof(read_val) &&
memcmp(read_val, write_val,
sizeof(read_val)) == 0,
"tpidr2_read\n");
} else {
/* TPIDR2 should read as zero */
ksft_test_result(ret == 0 &&
read_iov.iov_len >= sizeof(read_val) &&
read_val[0] == write_val[0] &&
read_val[1] == 0,
"tpidr2_read\n");
}
/* Writing only TPIDR... */
write_iov.iov_len = sizeof(uint64_t);
memcpy(write_val, read_val, sizeof(read_val));
write_val[0] += 1;
ret = ptrace(PTRACE_SETREGSET, child, NT_ARM_TLS, &write_iov);
if (ret == 0) {
/* ...should leave TPIDR2 untouched */
read_iov.iov_len = sizeof(read_val);
ret = ptrace(PTRACE_GETREGSET, child, NT_ARM_TLS,
&read_iov);
ksft_test_result(ret == 0 &&
read_iov.iov_len >= sizeof(read_val) &&
memcmp(read_val, write_val,
sizeof(read_val)) == 0,
"write_tpidr_only\n");
} else {
ksft_test_result_fail("write_tpidr_only\n");
}
} else {
ksft_test_result_skip("tpidr2_write\n");
ksft_test_result_skip("tpidr2_read\n");
ksft_test_result_skip("write_tpidr_only\n");
}
}
static void test_hw_debug(pid_t child, int type, const char *type_name)
{
struct user_hwdebug_state state;
struct iovec iov;
int slots, arch, ret;
iov.iov_len = sizeof(state);
iov.iov_base = &state;
/* Should be able to read the values */
ret = ptrace(PTRACE_GETREGSET, child, type, &iov);
ksft_test_result(ret == 0, "read_%s\n", type_name);
if (ret == 0) {
/* Low 8 bits is the number of slots, next 4 bits the arch */
slots = state.dbg_info & 0xff;
arch = (state.dbg_info >> 8) & 0xf;
ksft_print_msg("%s version %d with %d slots\n", type_name,
arch, slots);
/* Zero is not currently architecturally valid */
ksft_test_result(arch, "%s_arch_set\n", type_name);
} else {
ksft_test_result_skip("%s_arch_set\n");
}
}
static int do_child(void)
{
if (ptrace(PTRACE_TRACEME, -1, NULL, NULL))
ksft_exit_fail_msg("PTRACE_TRACEME", strerror(errno));
if (raise(SIGSTOP))
ksft_exit_fail_msg("raise(SIGSTOP)", strerror(errno));
return EXIT_SUCCESS;
}
static int do_parent(pid_t child)
{
int ret = EXIT_FAILURE;
pid_t pid;
int status;
siginfo_t si;
/* Attach to the child */
while (1) {
int sig;
pid = wait(&status);
if (pid == -1) {
perror("wait");
goto error;
}
/*
* This should never happen but it's hard to flag in
* the framework.
*/
if (pid != child)
continue;
if (WIFEXITED(status) || WIFSIGNALED(status))
ksft_exit_fail_msg("Child died unexpectedly\n");
if (!WIFSTOPPED(status))
goto error;
sig = WSTOPSIG(status);
if (ptrace(PTRACE_GETSIGINFO, pid, NULL, &si)) {
if (errno == ESRCH)
goto disappeared;
if (errno == EINVAL) {
sig = 0; /* bust group-stop */
goto cont;
}
ksft_test_result_fail("PTRACE_GETSIGINFO: %s\n",
strerror(errno));
goto error;
}
if (sig == SIGSTOP && si.si_code == SI_TKILL &&
si.si_pid == pid)
break;
cont:
if (ptrace(PTRACE_CONT, pid, NULL, sig)) {
if (errno == ESRCH)
goto disappeared;
ksft_test_result_fail("PTRACE_CONT: %s\n",
strerror(errno));
goto error;
}
}
ksft_print_msg("Parent is %d, child is %d\n", getpid(), child);
test_tpidr(child);
test_hw_debug(child, NT_ARM_HW_WATCH, "NT_ARM_HW_WATCH");
test_hw_debug(child, NT_ARM_HW_BREAK, "NT_ARM_HW_BREAK");
ret = EXIT_SUCCESS;
error:
kill(child, SIGKILL);
disappeared:
return ret;
}
int main(void)
{
int ret = EXIT_SUCCESS;
pid_t child;
srandom(getpid());
ksft_print_header();
ksft_set_plan(EXPECTED_TESTS);
child = fork();
if (!child)
return do_child();
if (do_parent(child))
ret = EXIT_FAILURE;
ksft_print_cnts();
return ret;
}
| linux-master | tools/testing/selftests/arm64/abi/ptrace.c |
// SPDX-License-Identifier: GPL-2.0-only
#include <linux/sched.h>
#include <linux/wait.h>
#define SYS_TPIDR2 "S3_3_C13_C0_5"
#define EXPECTED_TESTS 5
static void putstr(const char *str)
{
write(1, str, strlen(str));
}
static void putnum(unsigned int num)
{
char c;
if (num / 10)
putnum(num / 10);
c = '0' + (num % 10);
write(1, &c, 1);
}
static int tests_run;
static int tests_passed;
static int tests_failed;
static int tests_skipped;
static void set_tpidr2(uint64_t val)
{
asm volatile (
"msr " SYS_TPIDR2 ", %0\n"
:
: "r"(val)
: "cc");
}
static uint64_t get_tpidr2(void)
{
uint64_t val;
asm volatile (
"mrs %0, " SYS_TPIDR2 "\n"
: "=r"(val)
:
: "cc");
return val;
}
static void print_summary(void)
{
if (tests_passed + tests_failed + tests_skipped != EXPECTED_TESTS)
putstr("# UNEXPECTED TEST COUNT: ");
putstr("# Totals: pass:");
putnum(tests_passed);
putstr(" fail:");
putnum(tests_failed);
putstr(" xfail:0 xpass:0 skip:");
putnum(tests_skipped);
putstr(" error:0\n");
}
/* Processes should start with TPIDR2 == 0 */
static int default_value(void)
{
return get_tpidr2() == 0;
}
/* If we set TPIDR2 we should read that value */
static int write_read(void)
{
set_tpidr2(getpid());
return getpid() == get_tpidr2();
}
/* If we set a value we should read the same value after scheduling out */
static int write_sleep_read(void)
{
set_tpidr2(getpid());
msleep(100);
return getpid() == get_tpidr2();
}
/*
* If we fork the value in the parent should be unchanged and the
* child should start with the same value and be able to set its own
* value.
*/
static int write_fork_read(void)
{
pid_t newpid, waiting, oldpid;
int status;
set_tpidr2(getpid());
oldpid = getpid();
newpid = fork();
if (newpid == 0) {
/* In child */
if (get_tpidr2() != oldpid) {
putstr("# TPIDR2 changed in child: ");
putnum(get_tpidr2());
putstr("\n");
exit(0);
}
set_tpidr2(getpid());
if (get_tpidr2() == getpid()) {
exit(1);
} else {
putstr("# Failed to set TPIDR2 in child\n");
exit(0);
}
}
if (newpid < 0) {
putstr("# fork() failed: -");
putnum(-newpid);
putstr("\n");
return 0;
}
for (;;) {
waiting = waitpid(newpid, &status, 0);
if (waiting < 0) {
if (errno == EINTR)
continue;
putstr("# waitpid() failed: ");
putnum(errno);
putstr("\n");
return 0;
}
if (waiting != newpid) {
putstr("# waitpid() returned wrong PID\n");
return 0;
}
if (!WIFEXITED(status)) {
putstr("# child did not exit\n");
return 0;
}
if (getpid() != get_tpidr2()) {
putstr("# TPIDR2 corrupted in parent\n");
return 0;
}
return WEXITSTATUS(status);
}
}
/*
* sys_clone() has a lot of per architecture variation so just define
* it here rather than adding it to nolibc, plus the raw API is a
* little more convenient for this test.
*/
static int sys_clone(unsigned long clone_flags, unsigned long newsp,
int *parent_tidptr, unsigned long tls,
int *child_tidptr)
{
return my_syscall5(__NR_clone, clone_flags, newsp, parent_tidptr, tls,
child_tidptr);
}
/*
* If we clone with CLONE_SETTLS then the value in the parent should
* be unchanged and the child should start with zero and be able to
* set its own value.
*/
static int write_clone_read(void)
{
int parent_tid, child_tid;
pid_t parent, waiting;
int ret, status;
parent = getpid();
set_tpidr2(parent);
ret = sys_clone(CLONE_SETTLS, 0, &parent_tid, 0, &child_tid);
if (ret == -1) {
putstr("# clone() failed\n");
putnum(errno);
putstr("\n");
return 0;
}
if (ret == 0) {
/* In child */
if (get_tpidr2() != 0) {
putstr("# TPIDR2 non-zero in child: ");
putnum(get_tpidr2());
putstr("\n");
exit(0);
}
if (gettid() == 0)
putstr("# Child TID==0\n");
set_tpidr2(gettid());
if (get_tpidr2() == gettid()) {
exit(1);
} else {
putstr("# Failed to set TPIDR2 in child\n");
exit(0);
}
}
for (;;) {
waiting = wait4(ret, &status, __WCLONE, NULL);
if (waiting < 0) {
if (errno == EINTR)
continue;
putstr("# wait4() failed: ");
putnum(errno);
putstr("\n");
return 0;
}
if (waiting != ret) {
putstr("# wait4() returned wrong PID ");
putnum(waiting);
putstr("\n");
return 0;
}
if (!WIFEXITED(status)) {
putstr("# child did not exit\n");
return 0;
}
if (parent != get_tpidr2()) {
putstr("# TPIDR2 corrupted in parent\n");
return 0;
}
return WEXITSTATUS(status);
}
}
#define run_test(name) \
if (name()) { \
tests_passed++; \
} else { \
tests_failed++; \
putstr("not "); \
} \
putstr("ok "); \
putnum(++tests_run); \
putstr(" " #name "\n");
int main(int argc, char **argv)
{
int ret, i;
putstr("TAP version 13\n");
putstr("1..");
putnum(EXPECTED_TESTS);
putstr("\n");
putstr("# PID: ");
putnum(getpid());
putstr("\n");
/*
* This test is run with nolibc which doesn't support hwcap and
* it's probably disproportionate to implement so instead check
* for the default vector length configuration in /proc.
*/
ret = open("/proc/sys/abi/sme_default_vector_length", O_RDONLY, 0);
if (ret >= 0) {
run_test(default_value);
run_test(write_read);
run_test(write_sleep_read);
run_test(write_fork_read);
run_test(write_clone_read);
} else {
putstr("# SME support not present\n");
for (i = 0; i < EXPECTED_TESTS; i++) {
putstr("ok ");
putnum(i);
putstr(" skipped, TPIDR2 not supported\n");
}
tests_skipped += EXPECTED_TESTS;
}
print_summary();
return 0;
}
| linux-master | tools/testing/selftests/arm64/abi/tpidr2.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2022 ARM Limited.
*/
#include <errno.h>
#include <signal.h>
#include <stdbool.h>
#include <stddef.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <sys/auxv.h>
#include <sys/prctl.h>
#include <asm/hwcap.h>
#include <asm/sigcontext.h>
#include <asm/unistd.h>
#include "../../kselftest.h"
#define TESTS_PER_HWCAP 3
/*
* Function expected to generate exception when the feature is not
* supported and return when it is supported. If the specific exception
* is generated then the handler must be able to skip over the
* instruction safely.
*
* Note that it is expected that for many architecture extensions
* there are no specific traps due to no architecture state being
* added so we may not fault if running on a kernel which doesn't know
* to add the hwcap.
*/
typedef void (*sig_fn)(void);
static void aes_sigill(void)
{
/* AESE V0.16B, V0.16B */
asm volatile(".inst 0x4e284800" : : : );
}
static void atomics_sigill(void)
{
/* STADD W0, [SP] */
asm volatile(".inst 0xb82003ff" : : : );
}
static void crc32_sigill(void)
{
/* CRC32W W0, W0, W1 */
asm volatile(".inst 0x1ac14800" : : : );
}
static void cssc_sigill(void)
{
/* CNT x0, x0 */
asm volatile(".inst 0xdac01c00" : : : "x0");
}
static void fp_sigill(void)
{
asm volatile("fmov s0, #1");
}
static void ilrcpc_sigill(void)
{
/* LDAPUR W0, [SP, #8] */
asm volatile(".inst 0x994083e0" : : : );
}
static void jscvt_sigill(void)
{
/* FJCVTZS W0, D0 */
asm volatile(".inst 0x1e7e0000" : : : );
}
static void lrcpc_sigill(void)
{
/* LDAPR W0, [SP, #0] */
asm volatile(".inst 0xb8bfc3e0" : : : );
}
static void mops_sigill(void)
{
char dst[1], src[1];
register char *dstp asm ("x0") = dst;
register char *srcp asm ("x1") = src;
register long size asm ("x2") = 1;
/* CPYP [x0]!, [x1]!, x2! */
asm volatile(".inst 0x1d010440"
: "+r" (dstp), "+r" (srcp), "+r" (size)
:
: "cc", "memory");
}
static void pmull_sigill(void)
{
/* PMULL V0.1Q, V0.1D, V0.1D */
asm volatile(".inst 0x0ee0e000" : : : );
}
static void rng_sigill(void)
{
asm volatile("mrs x0, S3_3_C2_C4_0" : : : "x0");
}
static void sha1_sigill(void)
{
/* SHA1H S0, S0 */
asm volatile(".inst 0x5e280800" : : : );
}
static void sha2_sigill(void)
{
/* SHA256H Q0, Q0, V0.4S */
asm volatile(".inst 0x5e004000" : : : );
}
static void sha512_sigill(void)
{
/* SHA512H Q0, Q0, V0.2D */
asm volatile(".inst 0xce608000" : : : );
}
static void sme_sigill(void)
{
/* RDSVL x0, #0 */
asm volatile(".inst 0x04bf5800" : : : "x0");
}
static void sme2_sigill(void)
{
/* SMSTART ZA */
asm volatile("msr S0_3_C4_C5_3, xzr" : : : );
/* ZERO ZT0 */
asm volatile(".inst 0xc0480001" : : : );
/* SMSTOP */
asm volatile("msr S0_3_C4_C6_3, xzr" : : : );
}
static void sme2p1_sigill(void)
{
/* SMSTART SM */
asm volatile("msr S0_3_C4_C3_3, xzr" : : : );
/* BFCLAMP { Z0.H - Z1.H }, Z0.H, Z0.H */
asm volatile(".inst 0xc120C000" : : : );
/* SMSTOP */
asm volatile("msr S0_3_C4_C6_3, xzr" : : : );
}
static void smei16i32_sigill(void)
{
/* SMSTART */
asm volatile("msr S0_3_C4_C7_3, xzr" : : : );
/* SMOPA ZA0.S, P0/M, P0/M, Z0.B, Z0.B */
asm volatile(".inst 0xa0800000" : : : );
/* SMSTOP */
asm volatile("msr S0_3_C4_C6_3, xzr" : : : );
}
static void smebi32i32_sigill(void)
{
/* SMSTART */
asm volatile("msr S0_3_C4_C7_3, xzr" : : : );
/* BMOPA ZA0.S, P0/M, P0/M, Z0.B, Z0.B */
asm volatile(".inst 0x80800008" : : : );
/* SMSTOP */
asm volatile("msr S0_3_C4_C6_3, xzr" : : : );
}
static void smeb16b16_sigill(void)
{
/* SMSTART */
asm volatile("msr S0_3_C4_C7_3, xzr" : : : );
/* BFADD ZA.H[W0, 0], {Z0.H-Z1.H} */
asm volatile(".inst 0xC1E41C00" : : : );
/* SMSTOP */
asm volatile("msr S0_3_C4_C6_3, xzr" : : : );
}
static void smef16f16_sigill(void)
{
/* SMSTART */
asm volatile("msr S0_3_C4_C7_3, xzr" : : : );
/* FADD ZA.H[W0, 0], { Z0.H-Z1.H } */
asm volatile(".inst 0xc1a41C00" : : : );
/* SMSTOP */
asm volatile("msr S0_3_C4_C6_3, xzr" : : : );
}
static void sve_sigill(void)
{
/* RDVL x0, #0 */
asm volatile(".inst 0x04bf5000" : : : "x0");
}
static void sve2_sigill(void)
{
/* SQABS Z0.b, P0/M, Z0.B */
asm volatile(".inst 0x4408A000" : : : "z0");
}
static void sve2p1_sigill(void)
{
/* BFADD Z0.H, Z0.H, Z0.H */
asm volatile(".inst 0x65000000" : : : "z0");
}
static void sveaes_sigill(void)
{
/* AESD z0.b, z0.b, z0.b */
asm volatile(".inst 0x4522e400" : : : "z0");
}
static void svepmull_sigill(void)
{
/* PMULLB Z0.Q, Z0.D, Z0.D */
asm volatile(".inst 0x45006800" : : : "z0");
}
static void svebitperm_sigill(void)
{
/* BDEP Z0.B, Z0.B, Z0.B */
asm volatile(".inst 0x4500b400" : : : "z0");
}
static void svesha3_sigill(void)
{
/* EOR3 Z0.D, Z0.D, Z0.D, Z0.D */
asm volatile(".inst 0x4203800" : : : "z0");
}
static void svesm4_sigill(void)
{
/* SM4E Z0.S, Z0.S, Z0.S */
asm volatile(".inst 0x4523e000" : : : "z0");
}
static void svei8mm_sigill(void)
{
/* USDOT Z0.S, Z0.B, Z0.B[0] */
asm volatile(".inst 0x44a01800" : : : "z0");
}
static void svef32mm_sigill(void)
{
/* FMMLA Z0.S, Z0.S, Z0.S */
asm volatile(".inst 0x64a0e400" : : : "z0");
}
static void svef64mm_sigill(void)
{
/* FMMLA Z0.D, Z0.D, Z0.D */
asm volatile(".inst 0x64e0e400" : : : "z0");
}
static void svebf16_sigill(void)
{
/* BFCVT Z0.H, P0/M, Z0.S */
asm volatile(".inst 0x658aa000" : : : "z0");
}
static void hbc_sigill(void)
{
/* BC.EQ +4 */
asm volatile("cmp xzr, xzr\n"
".inst 0x54000030" : : : "cc");
}
static void uscat_sigbus(void)
{
/* unaligned atomic access */
asm volatile("ADD x1, sp, #2" : : : );
/* STADD W0, [X1] */
asm volatile(".inst 0xb820003f" : : : );
}
static const struct hwcap_data {
const char *name;
unsigned long at_hwcap;
unsigned long hwcap_bit;
const char *cpuinfo;
sig_fn sigill_fn;
bool sigill_reliable;
sig_fn sigbus_fn;
bool sigbus_reliable;
} hwcaps[] = {
{
.name = "AES",
.at_hwcap = AT_HWCAP,
.hwcap_bit = HWCAP_AES,
.cpuinfo = "aes",
.sigill_fn = aes_sigill,
},
{
.name = "CRC32",
.at_hwcap = AT_HWCAP,
.hwcap_bit = HWCAP_CRC32,
.cpuinfo = "crc32",
.sigill_fn = crc32_sigill,
},
{
.name = "CSSC",
.at_hwcap = AT_HWCAP2,
.hwcap_bit = HWCAP2_CSSC,
.cpuinfo = "cssc",
.sigill_fn = cssc_sigill,
},
{
.name = "FP",
.at_hwcap = AT_HWCAP,
.hwcap_bit = HWCAP_FP,
.cpuinfo = "fp",
.sigill_fn = fp_sigill,
},
{
.name = "JSCVT",
.at_hwcap = AT_HWCAP,
.hwcap_bit = HWCAP_JSCVT,
.cpuinfo = "jscvt",
.sigill_fn = jscvt_sigill,
},
{
.name = "LRCPC",
.at_hwcap = AT_HWCAP,
.hwcap_bit = HWCAP_LRCPC,
.cpuinfo = "lrcpc",
.sigill_fn = lrcpc_sigill,
},
{
.name = "LRCPC2",
.at_hwcap = AT_HWCAP,
.hwcap_bit = HWCAP_ILRCPC,
.cpuinfo = "ilrcpc",
.sigill_fn = ilrcpc_sigill,
},
{
.name = "LSE",
.at_hwcap = AT_HWCAP,
.hwcap_bit = HWCAP_ATOMICS,
.cpuinfo = "atomics",
.sigill_fn = atomics_sigill,
},
{
.name = "LSE2",
.at_hwcap = AT_HWCAP,
.hwcap_bit = HWCAP_USCAT,
.cpuinfo = "uscat",
.sigill_fn = atomics_sigill,
.sigbus_fn = uscat_sigbus,
.sigbus_reliable = true,
},
{
.name = "MOPS",
.at_hwcap = AT_HWCAP2,
.hwcap_bit = HWCAP2_MOPS,
.cpuinfo = "mops",
.sigill_fn = mops_sigill,
.sigill_reliable = true,
},
{
.name = "PMULL",
.at_hwcap = AT_HWCAP,
.hwcap_bit = HWCAP_PMULL,
.cpuinfo = "pmull",
.sigill_fn = pmull_sigill,
},
{
.name = "RNG",
.at_hwcap = AT_HWCAP2,
.hwcap_bit = HWCAP2_RNG,
.cpuinfo = "rng",
.sigill_fn = rng_sigill,
},
{
.name = "RPRFM",
.at_hwcap = AT_HWCAP2,
.hwcap_bit = HWCAP2_RPRFM,
.cpuinfo = "rprfm",
},
{
.name = "SHA1",
.at_hwcap = AT_HWCAP,
.hwcap_bit = HWCAP_SHA1,
.cpuinfo = "sha1",
.sigill_fn = sha1_sigill,
},
{
.name = "SHA2",
.at_hwcap = AT_HWCAP,
.hwcap_bit = HWCAP_SHA2,
.cpuinfo = "sha2",
.sigill_fn = sha2_sigill,
},
{
.name = "SHA512",
.at_hwcap = AT_HWCAP,
.hwcap_bit = HWCAP_SHA512,
.cpuinfo = "sha512",
.sigill_fn = sha512_sigill,
},
{
.name = "SME",
.at_hwcap = AT_HWCAP2,
.hwcap_bit = HWCAP2_SME,
.cpuinfo = "sme",
.sigill_fn = sme_sigill,
.sigill_reliable = true,
},
{
.name = "SME2",
.at_hwcap = AT_HWCAP2,
.hwcap_bit = HWCAP2_SME2,
.cpuinfo = "sme2",
.sigill_fn = sme2_sigill,
.sigill_reliable = true,
},
{
.name = "SME 2.1",
.at_hwcap = AT_HWCAP2,
.hwcap_bit = HWCAP2_SME2P1,
.cpuinfo = "sme2p1",
.sigill_fn = sme2p1_sigill,
},
{
.name = "SME I16I32",
.at_hwcap = AT_HWCAP2,
.hwcap_bit = HWCAP2_SME_I16I32,
.cpuinfo = "smei16i32",
.sigill_fn = smei16i32_sigill,
},
{
.name = "SME BI32I32",
.at_hwcap = AT_HWCAP2,
.hwcap_bit = HWCAP2_SME_BI32I32,
.cpuinfo = "smebi32i32",
.sigill_fn = smebi32i32_sigill,
},
{
.name = "SME B16B16",
.at_hwcap = AT_HWCAP2,
.hwcap_bit = HWCAP2_SME_B16B16,
.cpuinfo = "smeb16b16",
.sigill_fn = smeb16b16_sigill,
},
{
.name = "SME F16F16",
.at_hwcap = AT_HWCAP2,
.hwcap_bit = HWCAP2_SME_F16F16,
.cpuinfo = "smef16f16",
.sigill_fn = smef16f16_sigill,
},
{
.name = "SVE",
.at_hwcap = AT_HWCAP,
.hwcap_bit = HWCAP_SVE,
.cpuinfo = "sve",
.sigill_fn = sve_sigill,
.sigill_reliable = true,
},
{
.name = "SVE 2",
.at_hwcap = AT_HWCAP2,
.hwcap_bit = HWCAP2_SVE2,
.cpuinfo = "sve2",
.sigill_fn = sve2_sigill,
},
{
.name = "SVE 2.1",
.at_hwcap = AT_HWCAP2,
.hwcap_bit = HWCAP2_SVE2P1,
.cpuinfo = "sve2p1",
.sigill_fn = sve2p1_sigill,
},
{
.name = "SVE AES",
.at_hwcap = AT_HWCAP2,
.hwcap_bit = HWCAP2_SVEAES,
.cpuinfo = "sveaes",
.sigill_fn = sveaes_sigill,
},
{
.name = "SVE2 PMULL",
.at_hwcap = AT_HWCAP2,
.hwcap_bit = HWCAP2_SVEPMULL,
.cpuinfo = "svepmull",
.sigill_fn = svepmull_sigill,
},
{
.name = "SVE2 BITPERM",
.at_hwcap = AT_HWCAP2,
.hwcap_bit = HWCAP2_SVEBITPERM,
.cpuinfo = "svebitperm",
.sigill_fn = svebitperm_sigill,
},
{
.name = "SVE2 SHA3",
.at_hwcap = AT_HWCAP2,
.hwcap_bit = HWCAP2_SVESHA3,
.cpuinfo = "svesha3",
.sigill_fn = svesha3_sigill,
},
{
.name = "SVE2 SM4",
.at_hwcap = AT_HWCAP2,
.hwcap_bit = HWCAP2_SVESM4,
.cpuinfo = "svesm4",
.sigill_fn = svesm4_sigill,
},
{
.name = "SVE2 I8MM",
.at_hwcap = AT_HWCAP2,
.hwcap_bit = HWCAP2_SVEI8MM,
.cpuinfo = "svei8mm",
.sigill_fn = svei8mm_sigill,
},
{
.name = "SVE2 F32MM",
.at_hwcap = AT_HWCAP2,
.hwcap_bit = HWCAP2_SVEF32MM,
.cpuinfo = "svef32mm",
.sigill_fn = svef32mm_sigill,
},
{
.name = "SVE2 F64MM",
.at_hwcap = AT_HWCAP2,
.hwcap_bit = HWCAP2_SVEF64MM,
.cpuinfo = "svef64mm",
.sigill_fn = svef64mm_sigill,
},
{
.name = "SVE2 BF16",
.at_hwcap = AT_HWCAP2,
.hwcap_bit = HWCAP2_SVEBF16,
.cpuinfo = "svebf16",
.sigill_fn = svebf16_sigill,
},
{
.name = "SVE2 EBF16",
.at_hwcap = AT_HWCAP2,
.hwcap_bit = HWCAP2_SVE_EBF16,
.cpuinfo = "sveebf16",
},
{
.name = "HBC",
.at_hwcap = AT_HWCAP2,
.hwcap_bit = HWCAP2_HBC,
.cpuinfo = "hbc",
.sigill_fn = hbc_sigill,
.sigill_reliable = true,
},
};
typedef void (*sighandler_fn)(int, siginfo_t *, void *);
#define DEF_SIGHANDLER_FUNC(SIG, NUM) \
static bool seen_##SIG; \
static void handle_##SIG(int sig, siginfo_t *info, void *context) \
{ \
ucontext_t *uc = context; \
\
seen_##SIG = true; \
/* Skip over the offending instruction */ \
uc->uc_mcontext.pc += 4; \
}
DEF_SIGHANDLER_FUNC(sigill, SIGILL);
DEF_SIGHANDLER_FUNC(sigbus, SIGBUS);
bool cpuinfo_present(const char *name)
{
FILE *f;
char buf[2048], name_space[30], name_newline[30];
char *s;
/*
* The feature should appear with a leading space and either a
* trailing space or a newline.
*/
snprintf(name_space, sizeof(name_space), " %s ", name);
snprintf(name_newline, sizeof(name_newline), " %s\n", name);
f = fopen("/proc/cpuinfo", "r");
if (!f) {
ksft_print_msg("Failed to open /proc/cpuinfo\n");
return false;
}
while (fgets(buf, sizeof(buf), f)) {
/* Features: line? */
if (strncmp(buf, "Features\t:", strlen("Features\t:")) != 0)
continue;
/* All CPUs should be symmetric, don't read any more */
fclose(f);
s = strstr(buf, name_space);
if (s)
return true;
s = strstr(buf, name_newline);
if (s)
return true;
return false;
}
ksft_print_msg("Failed to find Features in /proc/cpuinfo\n");
fclose(f);
return false;
}
static int install_sigaction(int signum, sighandler_fn handler)
{
int ret;
struct sigaction sa;
memset(&sa, 0, sizeof(sa));
sa.sa_sigaction = handler;
sa.sa_flags = SA_RESTART | SA_SIGINFO;
sigemptyset(&sa.sa_mask);
ret = sigaction(signum, &sa, NULL);
if (ret < 0)
ksft_exit_fail_msg("Failed to install SIGNAL handler: %s (%d)\n",
strerror(errno), errno);
return ret;
}
static void uninstall_sigaction(int signum)
{
if (sigaction(signum, NULL, NULL) < 0)
ksft_exit_fail_msg("Failed to uninstall SIGNAL handler: %s (%d)\n",
strerror(errno), errno);
}
#define DEF_INST_RAISE_SIG(SIG, NUM) \
static bool inst_raise_##SIG(const struct hwcap_data *hwcap, \
bool have_hwcap) \
{ \
if (!hwcap->SIG##_fn) { \
ksft_test_result_skip(#SIG"_%s\n", hwcap->name); \
/* assume that it would raise exception in default */ \
return true; \
} \
\
install_sigaction(NUM, handle_##SIG); \
\
seen_##SIG = false; \
hwcap->SIG##_fn(); \
\
if (have_hwcap) { \
/* Should be able to use the extension */ \
ksft_test_result(!seen_##SIG, \
#SIG"_%s\n", hwcap->name); \
} else if (hwcap->SIG##_reliable) { \
/* Guaranteed a SIGNAL */ \
ksft_test_result(seen_##SIG, \
#SIG"_%s\n", hwcap->name); \
} else { \
/* Missing SIGNAL might be fine */ \
ksft_print_msg(#SIG"_%sreported for %s\n", \
seen_##SIG ? "" : "not ", \
hwcap->name); \
ksft_test_result_skip(#SIG"_%s\n", \
hwcap->name); \
} \
\
uninstall_sigaction(NUM); \
return seen_##SIG; \
}
DEF_INST_RAISE_SIG(sigill, SIGILL);
DEF_INST_RAISE_SIG(sigbus, SIGBUS);
int main(void)
{
int i;
const struct hwcap_data *hwcap;
bool have_cpuinfo, have_hwcap, raise_sigill;
ksft_print_header();
ksft_set_plan(ARRAY_SIZE(hwcaps) * TESTS_PER_HWCAP);
for (i = 0; i < ARRAY_SIZE(hwcaps); i++) {
hwcap = &hwcaps[i];
have_hwcap = getauxval(hwcap->at_hwcap) & hwcap->hwcap_bit;
have_cpuinfo = cpuinfo_present(hwcap->cpuinfo);
if (have_hwcap)
ksft_print_msg("%s present\n", hwcap->name);
ksft_test_result(have_hwcap == have_cpuinfo,
"cpuinfo_match_%s\n", hwcap->name);
/*
* Testing for SIGBUS only makes sense after make sure
* that the instruction does not cause a SIGILL signal.
*/
raise_sigill = inst_raise_sigill(hwcap, have_hwcap);
if (!raise_sigill)
inst_raise_sigbus(hwcap, have_hwcap);
else
ksft_test_result_skip("sigbus_%s\n", hwcap->name);
}
ksft_print_cnts();
return 0;
}
| linux-master | tools/testing/selftests/arm64/abi/hwcap.c |
// SPDX-License-Identifier: GPL-2.0
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <stdint.h>
#include <sys/prctl.h>
#include <sys/utsname.h>
#define SHIFT_TAG(tag) ((uint64_t)(tag) << 56)
#define SET_TAG(ptr, tag) (((uint64_t)(ptr) & ~SHIFT_TAG(0xff)) | \
SHIFT_TAG(tag))
int main(void)
{
static int tbi_enabled = 0;
unsigned long tag = 0;
struct utsname *ptr;
int err;
if (prctl(PR_SET_TAGGED_ADDR_CTRL, PR_TAGGED_ADDR_ENABLE, 0, 0, 0) == 0)
tbi_enabled = 1;
ptr = (struct utsname *)malloc(sizeof(*ptr));
if (tbi_enabled)
tag = 0x42;
ptr = (struct utsname *)SET_TAG(ptr, tag);
err = uname(ptr);
free(ptr);
return err;
}
| linux-master | tools/testing/selftests/arm64/tags/tags_test.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2019 Arm Limited
* Original author: Dave Martin <[email protected]>
*/
#include "system.h"
#include <asm/unistd.h>
void __noreturn exit(int n)
{
syscall(__NR_exit, n);
unreachable();
}
ssize_t write(int fd, const void *buf, size_t size)
{
return syscall(__NR_write, fd, buf, size);
}
| linux-master | tools/testing/selftests/arm64/bti/system.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2019 Arm Limited
* Original author: Dave Martin <[email protected]>
*/
#include "system.h"
#include "signal.h"
int sigemptyset(sigset_t *s)
{
unsigned int i;
for (i = 0; i < _NSIG_WORDS; ++i)
s->sig[i] = 0;
return 0;
}
int sigaddset(sigset_t *s, int n)
{
if (n < 1 || n > _NSIG)
return -EINVAL;
s->sig[(n - 1) / _NSIG_BPW] |= 1UL << (n - 1) % _NSIG_BPW;
return 0;
}
int sigaction(int n, struct sigaction *sa, const struct sigaction *old)
{
return syscall(__NR_rt_sigaction, n, sa, old, sizeof(sa->sa_mask));
}
int sigprocmask(int how, const sigset_t *mask, sigset_t *old)
{
return syscall(__NR_rt_sigprocmask, how, mask, old, sizeof(*mask));
}
| linux-master | tools/testing/selftests/arm64/bti/signal.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2019,2021 Arm Limited
* Original author: Dave Martin <[email protected]>
*/
#include "system.h"
#include <stdbool.h>
#include <stddef.h>
#include <linux/errno.h>
#include <linux/auxvec.h>
#include <linux/signal.h>
#include <asm/sigcontext.h>
#include <asm/ucontext.h>
typedef struct ucontext ucontext_t;
#include "btitest.h"
#include "signal.h"
#define EXPECTED_TESTS 18
static volatile unsigned int test_num = 1;
static unsigned int test_passed;
static unsigned int test_failed;
static unsigned int test_skipped;
static void fdputs(int fd, const char *str)
{
size_t len = 0;
const char *p = str;
while (*p++)
++len;
write(fd, str, len);
}
static void putstr(const char *str)
{
fdputs(1, str);
}
static void putnum(unsigned int num)
{
char c;
if (num / 10)
putnum(num / 10);
c = '0' + (num % 10);
write(1, &c, 1);
}
#define puttestname(test_name, trampoline_name) do { \
putstr(test_name); \
putstr("/"); \
putstr(trampoline_name); \
} while (0)
void print_summary(void)
{
putstr("# Totals: pass:");
putnum(test_passed);
putstr(" fail:");
putnum(test_failed);
putstr(" xfail:0 xpass:0 skip:");
putnum(test_skipped);
putstr(" error:0\n");
}
static const char *volatile current_test_name;
static const char *volatile current_trampoline_name;
static volatile int sigill_expected, sigill_received;
static void handler(int n, siginfo_t *si __always_unused,
void *uc_ __always_unused)
{
ucontext_t *uc = uc_;
putstr("# \t[SIGILL in ");
puttestname(current_test_name, current_trampoline_name);
putstr(", BTYPE=");
write(1, &"00011011"[((uc->uc_mcontext.pstate & PSR_BTYPE_MASK)
>> PSR_BTYPE_SHIFT) * 2], 2);
if (!sigill_expected) {
putstr("]\n");
putstr("not ok ");
putnum(test_num);
putstr(" ");
puttestname(current_test_name, current_trampoline_name);
putstr("(unexpected SIGILL)\n");
print_summary();
exit(128 + n);
}
putstr(" (expected)]\n");
sigill_received = 1;
/* zap BTYPE so that resuming the faulting code will work */
uc->uc_mcontext.pstate &= ~PSR_BTYPE_MASK;
}
/* Does the system have BTI? */
static bool have_bti;
static void __do_test(void (*trampoline)(void (*)(void)),
void (*fn)(void),
const char *trampoline_name,
const char *name,
int expect_sigill)
{
/*
* Branch Target exceptions should only happen for BTI
* binaries running on a system with BTI:
*/
if (!BTI || !have_bti)
expect_sigill = 0;
sigill_expected = expect_sigill;
sigill_received = 0;
current_test_name = name;
current_trampoline_name = trampoline_name;
trampoline(fn);
if (expect_sigill && !sigill_received) {
putstr("not ok ");
test_failed++;
} else {
putstr("ok ");
test_passed++;
}
putnum(test_num++);
putstr(" ");
puttestname(name, trampoline_name);
putstr("\n");
}
#define do_test(expect_sigill_br_x0, \
expect_sigill_br_x16, \
expect_sigill_blr, \
name) \
do { \
__do_test(call_using_br_x0, name, "call_using_br_x0", #name, \
expect_sigill_br_x0); \
__do_test(call_using_br_x16, name, "call_using_br_x16", #name, \
expect_sigill_br_x16); \
__do_test(call_using_blr, name, "call_using_blr", #name, \
expect_sigill_blr); \
} while (0)
void start(int *argcp)
{
struct sigaction sa;
void *const *p;
const struct auxv_entry {
unsigned long type;
unsigned long val;
} *auxv;
unsigned long hwcap = 0, hwcap2 = 0;
putstr("TAP version 13\n");
putstr("1..");
putnum(EXPECTED_TESTS);
putstr("\n");
/* Gross hack for finding AT_HWCAP2 from the initial process stack: */
p = (void *const *)argcp + 1 + *argcp + 1; /* start of environment */
/* step over environment */
while (*p++)
;
for (auxv = (const struct auxv_entry *)p; auxv->type != AT_NULL; ++auxv) {
switch (auxv->type) {
case AT_HWCAP:
hwcap = auxv->val;
break;
case AT_HWCAP2:
hwcap2 = auxv->val;
break;
default:
break;
}
}
if (hwcap & HWCAP_PACA)
putstr("# HWCAP_PACA present\n");
else
putstr("# HWCAP_PACA not present\n");
if (hwcap2 & HWCAP2_BTI) {
putstr("# HWCAP2_BTI present\n");
if (!(hwcap & HWCAP_PACA))
putstr("# Bad hardware? Expect problems.\n");
have_bti = true;
} else {
putstr("# HWCAP2_BTI not present\n");
have_bti = false;
}
putstr("# Test binary");
if (!BTI)
putstr(" not");
putstr(" built for BTI\n");
sa.sa_handler = (sighandler_t)(void *)handler;
sa.sa_flags = SA_SIGINFO;
sigemptyset(&sa.sa_mask);
sigaction(SIGILL, &sa, NULL);
sigaddset(&sa.sa_mask, SIGILL);
sigprocmask(SIG_UNBLOCK, &sa.sa_mask, NULL);
do_test(1, 1, 1, nohint_func);
do_test(1, 1, 1, bti_none_func);
do_test(1, 0, 0, bti_c_func);
do_test(0, 0, 1, bti_j_func);
do_test(0, 0, 0, bti_jc_func);
do_test(1, 0, 0, paciasp_func);
print_summary();
if (test_num - 1 != EXPECTED_TESTS)
putstr("# WARNING - EXPECTED TEST COUNT WRONG\n");
if (test_failed)
exit(1);
else
exit(0);
}
| linux-master | tools/testing/selftests/arm64/bti/test.c |
// SPDX-License-Identifier: GPL-2.0
// Copyright (C) 2020 ARM Limited
#define _GNU_SOURCE
#include <errno.h>
#include <signal.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <ucontext.h>
#include <sys/wait.h>
#include "kselftest.h"
#include "mte_common_util.h"
#include "mte_def.h"
#define BUFFER_SIZE (5 * MT_GRANULE_SIZE)
#define RUNS (MT_TAG_COUNT)
#define UNDERFLOW MT_GRANULE_SIZE
#define OVERFLOW MT_GRANULE_SIZE
static size_t page_size;
static int sizes[] = {
1, 537, 989, 1269, MT_GRANULE_SIZE - 1, MT_GRANULE_SIZE,
/* page size - 1*/ 0, /* page_size */ 0, /* page size + 1 */ 0
};
static int check_child_tag_inheritance(char *ptr, int size, int mode)
{
int i, parent_tag, child_tag, fault, child_status;
pid_t child;
parent_tag = MT_FETCH_TAG((uintptr_t)ptr);
fault = 0;
child = fork();
if (child == -1) {
ksft_print_msg("FAIL: child process creation\n");
return KSFT_FAIL;
} else if (child == 0) {
mte_initialize_current_context(mode, (uintptr_t)ptr, size);
/* Do copy on write */
memset(ptr, '1', size);
mte_wait_after_trig();
if (cur_mte_cxt.fault_valid == true) {
fault = 1;
goto check_child_tag_inheritance_err;
}
for (i = 0 ; i < size ; i += MT_GRANULE_SIZE) {
child_tag = MT_FETCH_TAG((uintptr_t)(mte_get_tag_address(ptr + i)));
if (parent_tag != child_tag) {
ksft_print_msg("FAIL: child mte tag mismatch\n");
fault = 1;
goto check_child_tag_inheritance_err;
}
}
mte_initialize_current_context(mode, (uintptr_t)ptr, -UNDERFLOW);
memset(ptr - UNDERFLOW, '2', UNDERFLOW);
mte_wait_after_trig();
if (cur_mte_cxt.fault_valid == false) {
fault = 1;
goto check_child_tag_inheritance_err;
}
mte_initialize_current_context(mode, (uintptr_t)ptr, size + OVERFLOW);
memset(ptr + size, '3', OVERFLOW);
mte_wait_after_trig();
if (cur_mte_cxt.fault_valid == false) {
fault = 1;
goto check_child_tag_inheritance_err;
}
check_child_tag_inheritance_err:
_exit(fault);
}
/* Wait for child process to terminate */
wait(&child_status);
if (WIFEXITED(child_status))
fault = WEXITSTATUS(child_status);
else
fault = 1;
return (fault) ? KSFT_FAIL : KSFT_PASS;
}
static int check_child_memory_mapping(int mem_type, int mode, int mapping)
{
char *ptr;
int run, result;
int item = ARRAY_SIZE(sizes);
item = ARRAY_SIZE(sizes);
mte_switch_mode(mode, MTE_ALLOW_NON_ZERO_TAG);
for (run = 0; run < item; run++) {
ptr = (char *)mte_allocate_memory_tag_range(sizes[run], mem_type, mapping,
UNDERFLOW, OVERFLOW);
if (check_allocated_memory_range(ptr, sizes[run], mem_type,
UNDERFLOW, OVERFLOW) != KSFT_PASS)
return KSFT_FAIL;
result = check_child_tag_inheritance(ptr, sizes[run], mode);
mte_free_memory_tag_range((void *)ptr, sizes[run], mem_type, UNDERFLOW, OVERFLOW);
if (result == KSFT_FAIL)
return result;
}
return KSFT_PASS;
}
static int check_child_file_mapping(int mem_type, int mode, int mapping)
{
char *ptr, *map_ptr;
int run, fd, map_size, result = KSFT_PASS;
int total = ARRAY_SIZE(sizes);
mte_switch_mode(mode, MTE_ALLOW_NON_ZERO_TAG);
for (run = 0; run < total; run++) {
fd = create_temp_file();
if (fd == -1)
return KSFT_FAIL;
map_size = sizes[run] + OVERFLOW + UNDERFLOW;
map_ptr = (char *)mte_allocate_file_memory(map_size, mem_type, mapping, false, fd);
if (check_allocated_memory(map_ptr, map_size, mem_type, false) != KSFT_PASS) {
close(fd);
return KSFT_FAIL;
}
ptr = map_ptr + UNDERFLOW;
mte_initialize_current_context(mode, (uintptr_t)ptr, sizes[run]);
/* Only mte enabled memory will allow tag insertion */
ptr = mte_insert_tags((void *)ptr, sizes[run]);
if (!ptr || cur_mte_cxt.fault_valid == true) {
ksft_print_msg("FAIL: Insert tags on file based memory\n");
munmap((void *)map_ptr, map_size);
close(fd);
return KSFT_FAIL;
}
result = check_child_tag_inheritance(ptr, sizes[run], mode);
mte_clear_tags((void *)ptr, sizes[run]);
munmap((void *)map_ptr, map_size);
close(fd);
if (result != KSFT_PASS)
return KSFT_FAIL;
}
return KSFT_PASS;
}
int main(int argc, char *argv[])
{
int err;
int item = ARRAY_SIZE(sizes);
page_size = getpagesize();
if (!page_size) {
ksft_print_msg("ERR: Unable to get page size\n");
return KSFT_FAIL;
}
sizes[item - 3] = page_size - 1;
sizes[item - 2] = page_size;
sizes[item - 1] = page_size + 1;
err = mte_default_setup();
if (err)
return err;
/* Register SIGSEGV handler */
mte_register_signal(SIGSEGV, mte_default_handler);
mte_register_signal(SIGBUS, mte_default_handler);
/* Set test plan */
ksft_set_plan(12);
evaluate_test(check_child_memory_mapping(USE_MMAP, MTE_SYNC_ERR, MAP_PRIVATE),
"Check child anonymous memory with private mapping, precise mode and mmap memory\n");
evaluate_test(check_child_memory_mapping(USE_MMAP, MTE_SYNC_ERR, MAP_SHARED),
"Check child anonymous memory with shared mapping, precise mode and mmap memory\n");
evaluate_test(check_child_memory_mapping(USE_MMAP, MTE_ASYNC_ERR, MAP_PRIVATE),
"Check child anonymous memory with private mapping, imprecise mode and mmap memory\n");
evaluate_test(check_child_memory_mapping(USE_MMAP, MTE_ASYNC_ERR, MAP_SHARED),
"Check child anonymous memory with shared mapping, imprecise mode and mmap memory\n");
evaluate_test(check_child_memory_mapping(USE_MPROTECT, MTE_SYNC_ERR, MAP_PRIVATE),
"Check child anonymous memory with private mapping, precise mode and mmap/mprotect memory\n");
evaluate_test(check_child_memory_mapping(USE_MPROTECT, MTE_SYNC_ERR, MAP_SHARED),
"Check child anonymous memory with shared mapping, precise mode and mmap/mprotect memory\n");
evaluate_test(check_child_file_mapping(USE_MMAP, MTE_SYNC_ERR, MAP_PRIVATE),
"Check child file memory with private mapping, precise mode and mmap memory\n");
evaluate_test(check_child_file_mapping(USE_MMAP, MTE_SYNC_ERR, MAP_SHARED),
"Check child file memory with shared mapping, precise mode and mmap memory\n");
evaluate_test(check_child_memory_mapping(USE_MMAP, MTE_ASYNC_ERR, MAP_PRIVATE),
"Check child file memory with private mapping, imprecise mode and mmap memory\n");
evaluate_test(check_child_memory_mapping(USE_MMAP, MTE_ASYNC_ERR, MAP_SHARED),
"Check child file memory with shared mapping, imprecise mode and mmap memory\n");
evaluate_test(check_child_memory_mapping(USE_MPROTECT, MTE_SYNC_ERR, MAP_PRIVATE),
"Check child file memory with private mapping, precise mode and mmap/mprotect memory\n");
evaluate_test(check_child_memory_mapping(USE_MPROTECT, MTE_SYNC_ERR, MAP_SHARED),
"Check child file memory with shared mapping, precise mode and mmap/mprotect memory\n");
mte_restore_setup();
ksft_print_cnts();
return ksft_get_fail_cnt() == 0 ? KSFT_PASS : KSFT_FAIL;
}
| linux-master | tools/testing/selftests/arm64/mte/check_child_memory.c |
// SPDX-License-Identifier: GPL-2.0
// Copyright (C) 2020 ARM Limited
#define _GNU_SOURCE
#include <errno.h>
#include <fcntl.h>
#include <signal.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <ucontext.h>
#include <sys/mman.h>
#include <sys/stat.h>
#include <sys/types.h>
#include "kselftest.h"
#include "mte_common_util.h"
#include "mte_def.h"
#define RUNS (MT_TAG_COUNT)
#define UNDERFLOW MT_GRANULE_SIZE
#define OVERFLOW MT_GRANULE_SIZE
#define TAG_CHECK_ON 0
#define TAG_CHECK_OFF 1
static size_t page_size;
static int sizes[] = {
1, 537, 989, 1269, MT_GRANULE_SIZE - 1, MT_GRANULE_SIZE,
/* page size - 1*/ 0, /* page_size */ 0, /* page size + 1 */ 0
};
static int check_mte_memory(char *ptr, int size, int mode, int tag_check)
{
mte_initialize_current_context(mode, (uintptr_t)ptr, size);
memset(ptr, '1', size);
mte_wait_after_trig();
if (cur_mte_cxt.fault_valid == true)
return KSFT_FAIL;
mte_initialize_current_context(mode, (uintptr_t)ptr, -UNDERFLOW);
memset(ptr - UNDERFLOW, '2', UNDERFLOW);
mte_wait_after_trig();
if (cur_mte_cxt.fault_valid == false && tag_check == TAG_CHECK_ON)
return KSFT_FAIL;
if (cur_mte_cxt.fault_valid == true && tag_check == TAG_CHECK_OFF)
return KSFT_FAIL;
mte_initialize_current_context(mode, (uintptr_t)ptr, size + OVERFLOW);
memset(ptr + size, '3', OVERFLOW);
mte_wait_after_trig();
if (cur_mte_cxt.fault_valid == false && tag_check == TAG_CHECK_ON)
return KSFT_FAIL;
if (cur_mte_cxt.fault_valid == true && tag_check == TAG_CHECK_OFF)
return KSFT_FAIL;
return KSFT_PASS;
}
static int check_anonymous_memory_mapping(int mem_type, int mode, int mapping, int tag_check)
{
char *ptr, *map_ptr;
int run, result, map_size;
int item = ARRAY_SIZE(sizes);
mte_switch_mode(mode, MTE_ALLOW_NON_ZERO_TAG);
for (run = 0; run < item; run++) {
map_size = sizes[run] + OVERFLOW + UNDERFLOW;
map_ptr = (char *)mte_allocate_memory(map_size, mem_type, mapping, false);
if (check_allocated_memory(map_ptr, map_size, mem_type, false) != KSFT_PASS)
return KSFT_FAIL;
ptr = map_ptr + UNDERFLOW;
mte_initialize_current_context(mode, (uintptr_t)ptr, sizes[run]);
/* Only mte enabled memory will allow tag insertion */
ptr = mte_insert_tags((void *)ptr, sizes[run]);
if (!ptr || cur_mte_cxt.fault_valid == true) {
ksft_print_msg("FAIL: Insert tags on anonymous mmap memory\n");
munmap((void *)map_ptr, map_size);
return KSFT_FAIL;
}
result = check_mte_memory(ptr, sizes[run], mode, tag_check);
mte_clear_tags((void *)ptr, sizes[run]);
mte_free_memory((void *)map_ptr, map_size, mem_type, false);
if (result == KSFT_FAIL)
return KSFT_FAIL;
}
return KSFT_PASS;
}
static int check_file_memory_mapping(int mem_type, int mode, int mapping, int tag_check)
{
char *ptr, *map_ptr;
int run, fd, map_size;
int total = ARRAY_SIZE(sizes);
int result = KSFT_PASS;
mte_switch_mode(mode, MTE_ALLOW_NON_ZERO_TAG);
for (run = 0; run < total; run++) {
fd = create_temp_file();
if (fd == -1)
return KSFT_FAIL;
map_size = sizes[run] + UNDERFLOW + OVERFLOW;
map_ptr = (char *)mte_allocate_file_memory(map_size, mem_type, mapping, false, fd);
if (check_allocated_memory(map_ptr, map_size, mem_type, false) != KSFT_PASS) {
close(fd);
return KSFT_FAIL;
}
ptr = map_ptr + UNDERFLOW;
mte_initialize_current_context(mode, (uintptr_t)ptr, sizes[run]);
/* Only mte enabled memory will allow tag insertion */
ptr = mte_insert_tags((void *)ptr, sizes[run]);
if (!ptr || cur_mte_cxt.fault_valid == true) {
ksft_print_msg("FAIL: Insert tags on file based memory\n");
munmap((void *)map_ptr, map_size);
close(fd);
return KSFT_FAIL;
}
result = check_mte_memory(ptr, sizes[run], mode, tag_check);
mte_clear_tags((void *)ptr, sizes[run]);
munmap((void *)map_ptr, map_size);
close(fd);
if (result == KSFT_FAIL)
break;
}
return result;
}
static int check_clear_prot_mte_flag(int mem_type, int mode, int mapping)
{
char *ptr, *map_ptr;
int run, prot_flag, result, fd, map_size;
int total = ARRAY_SIZE(sizes);
prot_flag = PROT_READ | PROT_WRITE;
mte_switch_mode(mode, MTE_ALLOW_NON_ZERO_TAG);
for (run = 0; run < total; run++) {
map_size = sizes[run] + OVERFLOW + UNDERFLOW;
ptr = (char *)mte_allocate_memory_tag_range(sizes[run], mem_type, mapping,
UNDERFLOW, OVERFLOW);
if (check_allocated_memory_range(ptr, sizes[run], mem_type,
UNDERFLOW, OVERFLOW) != KSFT_PASS)
return KSFT_FAIL;
map_ptr = ptr - UNDERFLOW;
/* Try to clear PROT_MTE property and verify it by tag checking */
if (mprotect(map_ptr, map_size, prot_flag)) {
mte_free_memory_tag_range((void *)ptr, sizes[run], mem_type,
UNDERFLOW, OVERFLOW);
ksft_print_msg("FAIL: mprotect not ignoring clear PROT_MTE property\n");
return KSFT_FAIL;
}
result = check_mte_memory(ptr, sizes[run], mode, TAG_CHECK_ON);
mte_free_memory_tag_range((void *)ptr, sizes[run], mem_type, UNDERFLOW, OVERFLOW);
if (result != KSFT_PASS)
return KSFT_FAIL;
fd = create_temp_file();
if (fd == -1)
return KSFT_FAIL;
ptr = (char *)mte_allocate_file_memory_tag_range(sizes[run], mem_type, mapping,
UNDERFLOW, OVERFLOW, fd);
if (check_allocated_memory_range(ptr, sizes[run], mem_type,
UNDERFLOW, OVERFLOW) != KSFT_PASS) {
close(fd);
return KSFT_FAIL;
}
map_ptr = ptr - UNDERFLOW;
/* Try to clear PROT_MTE property and verify it by tag checking */
if (mprotect(map_ptr, map_size, prot_flag)) {
ksft_print_msg("FAIL: mprotect not ignoring clear PROT_MTE property\n");
mte_free_memory_tag_range((void *)ptr, sizes[run], mem_type,
UNDERFLOW, OVERFLOW);
close(fd);
return KSFT_FAIL;
}
result = check_mte_memory(ptr, sizes[run], mode, TAG_CHECK_ON);
mte_free_memory_tag_range((void *)ptr, sizes[run], mem_type, UNDERFLOW, OVERFLOW);
close(fd);
if (result != KSFT_PASS)
return KSFT_FAIL;
}
return KSFT_PASS;
}
int main(int argc, char *argv[])
{
int err;
int item = ARRAY_SIZE(sizes);
err = mte_default_setup();
if (err)
return err;
page_size = getpagesize();
if (!page_size) {
ksft_print_msg("ERR: Unable to get page size\n");
return KSFT_FAIL;
}
sizes[item - 3] = page_size - 1;
sizes[item - 2] = page_size;
sizes[item - 1] = page_size + 1;
/* Register signal handlers */
mte_register_signal(SIGBUS, mte_default_handler);
mte_register_signal(SIGSEGV, mte_default_handler);
/* Set test plan */
ksft_set_plan(22);
mte_enable_pstate_tco();
evaluate_test(check_anonymous_memory_mapping(USE_MMAP, MTE_SYNC_ERR, MAP_PRIVATE, TAG_CHECK_OFF),
"Check anonymous memory with private mapping, sync error mode, mmap memory and tag check off\n");
evaluate_test(check_file_memory_mapping(USE_MPROTECT, MTE_SYNC_ERR, MAP_PRIVATE, TAG_CHECK_OFF),
"Check file memory with private mapping, sync error mode, mmap/mprotect memory and tag check off\n");
mte_disable_pstate_tco();
evaluate_test(check_anonymous_memory_mapping(USE_MMAP, MTE_NONE_ERR, MAP_PRIVATE, TAG_CHECK_OFF),
"Check anonymous memory with private mapping, no error mode, mmap memory and tag check off\n");
evaluate_test(check_file_memory_mapping(USE_MPROTECT, MTE_NONE_ERR, MAP_PRIVATE, TAG_CHECK_OFF),
"Check file memory with private mapping, no error mode, mmap/mprotect memory and tag check off\n");
evaluate_test(check_anonymous_memory_mapping(USE_MMAP, MTE_SYNC_ERR, MAP_PRIVATE, TAG_CHECK_ON),
"Check anonymous memory with private mapping, sync error mode, mmap memory and tag check on\n");
evaluate_test(check_anonymous_memory_mapping(USE_MPROTECT, MTE_SYNC_ERR, MAP_PRIVATE, TAG_CHECK_ON),
"Check anonymous memory with private mapping, sync error mode, mmap/mprotect memory and tag check on\n");
evaluate_test(check_anonymous_memory_mapping(USE_MMAP, MTE_SYNC_ERR, MAP_SHARED, TAG_CHECK_ON),
"Check anonymous memory with shared mapping, sync error mode, mmap memory and tag check on\n");
evaluate_test(check_anonymous_memory_mapping(USE_MPROTECT, MTE_SYNC_ERR, MAP_SHARED, TAG_CHECK_ON),
"Check anonymous memory with shared mapping, sync error mode, mmap/mprotect memory and tag check on\n");
evaluate_test(check_anonymous_memory_mapping(USE_MMAP, MTE_ASYNC_ERR, MAP_PRIVATE, TAG_CHECK_ON),
"Check anonymous memory with private mapping, async error mode, mmap memory and tag check on\n");
evaluate_test(check_anonymous_memory_mapping(USE_MPROTECT, MTE_ASYNC_ERR, MAP_PRIVATE, TAG_CHECK_ON),
"Check anonymous memory with private mapping, async error mode, mmap/mprotect memory and tag check on\n");
evaluate_test(check_anonymous_memory_mapping(USE_MMAP, MTE_ASYNC_ERR, MAP_SHARED, TAG_CHECK_ON),
"Check anonymous memory with shared mapping, async error mode, mmap memory and tag check on\n");
evaluate_test(check_anonymous_memory_mapping(USE_MPROTECT, MTE_ASYNC_ERR, MAP_SHARED, TAG_CHECK_ON),
"Check anonymous memory with shared mapping, async error mode, mmap/mprotect memory and tag check on\n");
evaluate_test(check_file_memory_mapping(USE_MMAP, MTE_SYNC_ERR, MAP_PRIVATE, TAG_CHECK_ON),
"Check file memory with private mapping, sync error mode, mmap memory and tag check on\n");
evaluate_test(check_file_memory_mapping(USE_MPROTECT, MTE_SYNC_ERR, MAP_PRIVATE, TAG_CHECK_ON),
"Check file memory with private mapping, sync error mode, mmap/mprotect memory and tag check on\n");
evaluate_test(check_file_memory_mapping(USE_MMAP, MTE_SYNC_ERR, MAP_SHARED, TAG_CHECK_ON),
"Check file memory with shared mapping, sync error mode, mmap memory and tag check on\n");
evaluate_test(check_file_memory_mapping(USE_MPROTECT, MTE_SYNC_ERR, MAP_SHARED, TAG_CHECK_ON),
"Check file memory with shared mapping, sync error mode, mmap/mprotect memory and tag check on\n");
evaluate_test(check_file_memory_mapping(USE_MMAP, MTE_ASYNC_ERR, MAP_PRIVATE, TAG_CHECK_ON),
"Check file memory with private mapping, async error mode, mmap memory and tag check on\n");
evaluate_test(check_file_memory_mapping(USE_MPROTECT, MTE_ASYNC_ERR, MAP_PRIVATE, TAG_CHECK_ON),
"Check file memory with private mapping, async error mode, mmap/mprotect memory and tag check on\n");
evaluate_test(check_file_memory_mapping(USE_MMAP, MTE_ASYNC_ERR, MAP_SHARED, TAG_CHECK_ON),
"Check file memory with shared mapping, async error mode, mmap memory and tag check on\n");
evaluate_test(check_file_memory_mapping(USE_MPROTECT, MTE_ASYNC_ERR, MAP_SHARED, TAG_CHECK_ON),
"Check file memory with shared mapping, async error mode, mmap/mprotect memory and tag check on\n");
evaluate_test(check_clear_prot_mte_flag(USE_MMAP, MTE_SYNC_ERR, MAP_PRIVATE),
"Check clear PROT_MTE flags with private mapping, sync error mode and mmap memory\n");
evaluate_test(check_clear_prot_mte_flag(USE_MPROTECT, MTE_SYNC_ERR, MAP_PRIVATE),
"Check clear PROT_MTE flags with private mapping and sync error mode and mmap/mprotect memory\n");
mte_restore_setup();
ksft_print_cnts();
return ksft_get_fail_cnt() == 0 ? KSFT_PASS : KSFT_FAIL;
}
| linux-master | tools/testing/selftests/arm64/mte/check_mmap_options.c |
// SPDX-License-Identifier: GPL-2.0
// Copyright (C) 2020 ARM Limited
#define _GNU_SOURCE
#include <errno.h>
#include <fcntl.h>
#include <signal.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <ucontext.h>
#include <sys/mman.h>
#include "kselftest.h"
#include "mte_common_util.h"
#include "mte_def.h"
#define TEST_UNIT 10
#define PATH_KSM "/sys/kernel/mm/ksm/"
#define MAX_LOOP 4
static size_t page_sz;
static unsigned long ksm_sysfs[5];
static unsigned long read_sysfs(char *str)
{
FILE *f;
unsigned long val = 0;
f = fopen(str, "r");
if (!f) {
ksft_print_msg("ERR: missing %s\n", str);
return 0;
}
if (fscanf(f, "%lu", &val) != 1) {
ksft_print_msg("ERR: parsing %s\n", str);
val = 0;
}
fclose(f);
return val;
}
static void write_sysfs(char *str, unsigned long val)
{
FILE *f;
f = fopen(str, "w");
if (!f) {
ksft_print_msg("ERR: missing %s\n", str);
return;
}
fprintf(f, "%lu", val);
fclose(f);
}
static void mte_ksm_setup(void)
{
ksm_sysfs[0] = read_sysfs(PATH_KSM "merge_across_nodes");
write_sysfs(PATH_KSM "merge_across_nodes", 1);
ksm_sysfs[1] = read_sysfs(PATH_KSM "sleep_millisecs");
write_sysfs(PATH_KSM "sleep_millisecs", 0);
ksm_sysfs[2] = read_sysfs(PATH_KSM "run");
write_sysfs(PATH_KSM "run", 1);
ksm_sysfs[3] = read_sysfs(PATH_KSM "max_page_sharing");
write_sysfs(PATH_KSM "max_page_sharing", ksm_sysfs[3] + TEST_UNIT);
ksm_sysfs[4] = read_sysfs(PATH_KSM "pages_to_scan");
write_sysfs(PATH_KSM "pages_to_scan", ksm_sysfs[4] + TEST_UNIT);
}
static void mte_ksm_restore(void)
{
write_sysfs(PATH_KSM "merge_across_nodes", ksm_sysfs[0]);
write_sysfs(PATH_KSM "sleep_millisecs", ksm_sysfs[1]);
write_sysfs(PATH_KSM "run", ksm_sysfs[2]);
write_sysfs(PATH_KSM "max_page_sharing", ksm_sysfs[3]);
write_sysfs(PATH_KSM "pages_to_scan", ksm_sysfs[4]);
}
static void mte_ksm_scan(void)
{
int cur_count = read_sysfs(PATH_KSM "full_scans");
int scan_count = cur_count + 1;
int max_loop_count = MAX_LOOP;
while ((cur_count < scan_count) && max_loop_count) {
sleep(1);
cur_count = read_sysfs(PATH_KSM "full_scans");
max_loop_count--;
}
#ifdef DEBUG
ksft_print_msg("INFO: pages_shared=%lu pages_sharing=%lu\n",
read_sysfs(PATH_KSM "pages_shared"),
read_sysfs(PATH_KSM "pages_sharing"));
#endif
}
static int check_madvise_options(int mem_type, int mode, int mapping)
{
char *ptr;
int err, ret;
err = KSFT_FAIL;
if (access(PATH_KSM, F_OK) == -1) {
ksft_print_msg("ERR: Kernel KSM config not enabled\n");
return err;
}
mte_switch_mode(mode, MTE_ALLOW_NON_ZERO_TAG);
ptr = mte_allocate_memory(TEST_UNIT * page_sz, mem_type, mapping, true);
if (check_allocated_memory(ptr, TEST_UNIT * page_sz, mem_type, false) != KSFT_PASS)
return KSFT_FAIL;
/* Insert same data in all the pages */
memset(ptr, 'A', TEST_UNIT * page_sz);
ret = madvise(ptr, TEST_UNIT * page_sz, MADV_MERGEABLE);
if (ret) {
ksft_print_msg("ERR: madvise failed to set MADV_UNMERGEABLE\n");
goto madvise_err;
}
mte_ksm_scan();
/* Tagged pages should not merge */
if ((read_sysfs(PATH_KSM "pages_shared") < 1) ||
(read_sysfs(PATH_KSM "pages_sharing") < (TEST_UNIT - 1)))
err = KSFT_PASS;
madvise_err:
mte_free_memory(ptr, TEST_UNIT * page_sz, mem_type, true);
return err;
}
int main(int argc, char *argv[])
{
int err;
err = mte_default_setup();
if (err)
return err;
page_sz = getpagesize();
if (!page_sz) {
ksft_print_msg("ERR: Unable to get page size\n");
return KSFT_FAIL;
}
/* Register signal handlers */
mte_register_signal(SIGBUS, mte_default_handler);
mte_register_signal(SIGSEGV, mte_default_handler);
/* Set test plan */
ksft_set_plan(4);
/* Enable KSM */
mte_ksm_setup();
evaluate_test(check_madvise_options(USE_MMAP, MTE_SYNC_ERR, MAP_PRIVATE),
"Check KSM mte page merge for private mapping, sync mode and mmap memory\n");
evaluate_test(check_madvise_options(USE_MMAP, MTE_ASYNC_ERR, MAP_PRIVATE),
"Check KSM mte page merge for private mapping, async mode and mmap memory\n");
evaluate_test(check_madvise_options(USE_MMAP, MTE_SYNC_ERR, MAP_SHARED),
"Check KSM mte page merge for shared mapping, sync mode and mmap memory\n");
evaluate_test(check_madvise_options(USE_MMAP, MTE_ASYNC_ERR, MAP_SHARED),
"Check KSM mte page merge for shared mapping, async mode and mmap memory\n");
mte_ksm_restore();
mte_restore_setup();
ksft_print_cnts();
return ksft_get_fail_cnt() == 0 ? KSFT_PASS : KSFT_FAIL;
}
| linux-master | tools/testing/selftests/arm64/mte/check_ksm_options.c |
// SPDX-License-Identifier: GPL-2.0
// Copyright (C) 2022 ARM Limited
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <sys/auxv.h>
#include <sys/prctl.h>
#include <asm/hwcap.h>
#include "kselftest.h"
static int set_tagged_addr_ctrl(int val)
{
int ret;
ret = prctl(PR_SET_TAGGED_ADDR_CTRL, val, 0, 0, 0);
if (ret < 0)
ksft_print_msg("PR_SET_TAGGED_ADDR_CTRL: failed %d %d (%s)\n",
ret, errno, strerror(errno));
return ret;
}
static int get_tagged_addr_ctrl(void)
{
int ret;
ret = prctl(PR_GET_TAGGED_ADDR_CTRL, 0, 0, 0, 0);
if (ret < 0)
ksft_print_msg("PR_GET_TAGGED_ADDR_CTRL failed: %d %d (%s)\n",
ret, errno, strerror(errno));
return ret;
}
/*
* Read the current mode without having done any configuration, should
* run first.
*/
void check_basic_read(void)
{
int ret;
ret = get_tagged_addr_ctrl();
if (ret < 0) {
ksft_test_result_fail("check_basic_read\n");
return;
}
if (ret & PR_MTE_TCF_SYNC)
ksft_print_msg("SYNC enabled\n");
if (ret & PR_MTE_TCF_ASYNC)
ksft_print_msg("ASYNC enabled\n");
/* Any configuration is valid */
ksft_test_result_pass("check_basic_read\n");
}
/*
* Attempt to set a specified combination of modes.
*/
void set_mode_test(const char *name, int hwcap2, int mask)
{
int ret;
if ((getauxval(AT_HWCAP2) & hwcap2) != hwcap2) {
ksft_test_result_skip("%s\n", name);
return;
}
ret = set_tagged_addr_ctrl(mask);
if (ret < 0) {
ksft_test_result_fail("%s\n", name);
return;
}
ret = get_tagged_addr_ctrl();
if (ret < 0) {
ksft_test_result_fail("%s\n", name);
return;
}
if ((ret & PR_MTE_TCF_MASK) == mask) {
ksft_test_result_pass("%s\n", name);
} else {
ksft_print_msg("Got %x, expected %x\n",
(ret & PR_MTE_TCF_MASK), mask);
ksft_test_result_fail("%s\n", name);
}
}
struct mte_mode {
int mask;
int hwcap2;
const char *name;
} mte_modes[] = {
{ PR_MTE_TCF_NONE, 0, "NONE" },
{ PR_MTE_TCF_SYNC, HWCAP2_MTE, "SYNC" },
{ PR_MTE_TCF_ASYNC, HWCAP2_MTE, "ASYNC" },
{ PR_MTE_TCF_SYNC | PR_MTE_TCF_ASYNC, HWCAP2_MTE, "SYNC+ASYNC" },
};
int main(void)
{
int i;
ksft_print_header();
ksft_set_plan(5);
check_basic_read();
for (i = 0; i < ARRAY_SIZE(mte_modes); i++)
set_mode_test(mte_modes[i].name, mte_modes[i].hwcap2,
mte_modes[i].mask);
ksft_print_cnts();
return 0;
}
| linux-master | tools/testing/selftests/arm64/mte/check_prctl.c |
// SPDX-License-Identifier: GPL-2.0
// Copyright (C) 2020 ARM Limited
#define _GNU_SOURCE
#include <stddef.h>
#include <stdio.h>
#include <string.h>
#include "kselftest.h"
#include "mte_common_util.h"
#include "mte_def.h"
#define OVERFLOW_RANGE MT_GRANULE_SIZE
static int sizes[] = {
1, 555, 1033, MT_GRANULE_SIZE - 1, MT_GRANULE_SIZE,
/* page size - 1*/ 0, /* page_size */ 0, /* page size + 1 */ 0
};
enum mte_block_test_alloc {
UNTAGGED_TAGGED,
TAGGED_UNTAGGED,
TAGGED_TAGGED,
BLOCK_ALLOC_MAX,
};
static int check_buffer_by_byte(int mem_type, int mode)
{
char *ptr;
int i, j, item;
bool err;
mte_switch_mode(mode, MTE_ALLOW_NON_ZERO_TAG);
item = ARRAY_SIZE(sizes);
for (i = 0; i < item; i++) {
ptr = (char *)mte_allocate_memory(sizes[i], mem_type, 0, true);
if (check_allocated_memory(ptr, sizes[i], mem_type, true) != KSFT_PASS)
return KSFT_FAIL;
mte_initialize_current_context(mode, (uintptr_t)ptr, sizes[i]);
/* Set some value in tagged memory */
for (j = 0; j < sizes[i]; j++)
ptr[j] = '1';
mte_wait_after_trig();
err = cur_mte_cxt.fault_valid;
/* Check the buffer whether it is filled. */
for (j = 0; j < sizes[i] && !err; j++) {
if (ptr[j] != '1')
err = true;
}
mte_free_memory((void *)ptr, sizes[i], mem_type, true);
if (err)
break;
}
if (!err)
return KSFT_PASS;
else
return KSFT_FAIL;
}
static int check_buffer_underflow_by_byte(int mem_type, int mode,
int underflow_range)
{
char *ptr;
int i, j, item, last_index;
bool err;
char *und_ptr = NULL;
mte_switch_mode(mode, MTE_ALLOW_NON_ZERO_TAG);
item = ARRAY_SIZE(sizes);
for (i = 0; i < item; i++) {
ptr = (char *)mte_allocate_memory_tag_range(sizes[i], mem_type, 0,
underflow_range, 0);
if (check_allocated_memory_range(ptr, sizes[i], mem_type,
underflow_range, 0) != KSFT_PASS)
return KSFT_FAIL;
mte_initialize_current_context(mode, (uintptr_t)ptr, -underflow_range);
last_index = 0;
/* Set some value in tagged memory and make the buffer underflow */
for (j = sizes[i] - 1; (j >= -underflow_range) &&
(!cur_mte_cxt.fault_valid); j--) {
ptr[j] = '1';
last_index = j;
}
mte_wait_after_trig();
err = false;
/* Check whether the buffer is filled */
for (j = 0; j < sizes[i]; j++) {
if (ptr[j] != '1') {
err = true;
ksft_print_msg("Buffer is not filled at index:%d of ptr:0x%lx\n",
j, ptr);
break;
}
}
if (err)
goto check_buffer_underflow_by_byte_err;
switch (mode) {
case MTE_NONE_ERR:
if (cur_mte_cxt.fault_valid == true || last_index != -underflow_range) {
err = true;
break;
}
/* There were no fault so the underflow area should be filled */
und_ptr = (char *) MT_CLEAR_TAG((size_t) ptr - underflow_range);
for (j = 0 ; j < underflow_range; j++) {
if (und_ptr[j] != '1') {
err = true;
break;
}
}
break;
case MTE_ASYNC_ERR:
/* Imprecise fault should occur otherwise return error */
if (cur_mte_cxt.fault_valid == false) {
err = true;
break;
}
/*
* The imprecise fault is checked after the write to the buffer,
* so the underflow area before the fault should be filled.
*/
und_ptr = (char *) MT_CLEAR_TAG((size_t) ptr);
for (j = last_index ; j < 0 ; j++) {
if (und_ptr[j] != '1') {
err = true;
break;
}
}
break;
case MTE_SYNC_ERR:
/* Precise fault should occur otherwise return error */
if (!cur_mte_cxt.fault_valid || (last_index != (-1))) {
err = true;
break;
}
/* Underflow area should not be filled */
und_ptr = (char *) MT_CLEAR_TAG((size_t) ptr);
if (und_ptr[-1] == '1')
err = true;
break;
default:
err = true;
break;
}
check_buffer_underflow_by_byte_err:
mte_free_memory_tag_range((void *)ptr, sizes[i], mem_type, underflow_range, 0);
if (err)
break;
}
return (err ? KSFT_FAIL : KSFT_PASS);
}
static int check_buffer_overflow_by_byte(int mem_type, int mode,
int overflow_range)
{
char *ptr;
int i, j, item, last_index;
bool err;
size_t tagged_size, overflow_size;
char *over_ptr = NULL;
mte_switch_mode(mode, MTE_ALLOW_NON_ZERO_TAG);
item = ARRAY_SIZE(sizes);
for (i = 0; i < item; i++) {
ptr = (char *)mte_allocate_memory_tag_range(sizes[i], mem_type, 0,
0, overflow_range);
if (check_allocated_memory_range(ptr, sizes[i], mem_type,
0, overflow_range) != KSFT_PASS)
return KSFT_FAIL;
tagged_size = MT_ALIGN_UP(sizes[i]);
mte_initialize_current_context(mode, (uintptr_t)ptr, sizes[i] + overflow_range);
/* Set some value in tagged memory and make the buffer underflow */
for (j = 0, last_index = 0 ; (j < (sizes[i] + overflow_range)) &&
(cur_mte_cxt.fault_valid == false); j++) {
ptr[j] = '1';
last_index = j;
}
mte_wait_after_trig();
err = false;
/* Check whether the buffer is filled */
for (j = 0; j < sizes[i]; j++) {
if (ptr[j] != '1') {
err = true;
ksft_print_msg("Buffer is not filled at index:%d of ptr:0x%lx\n",
j, ptr);
break;
}
}
if (err)
goto check_buffer_overflow_by_byte_err;
overflow_size = overflow_range - (tagged_size - sizes[i]);
switch (mode) {
case MTE_NONE_ERR:
if ((cur_mte_cxt.fault_valid == true) ||
(last_index != (sizes[i] + overflow_range - 1))) {
err = true;
break;
}
/* There were no fault so the overflow area should be filled */
over_ptr = (char *) MT_CLEAR_TAG((size_t) ptr + tagged_size);
for (j = 0 ; j < overflow_size; j++) {
if (over_ptr[j] != '1') {
err = true;
break;
}
}
break;
case MTE_ASYNC_ERR:
/* Imprecise fault should occur otherwise return error */
if (cur_mte_cxt.fault_valid == false) {
err = true;
break;
}
/*
* The imprecise fault is checked after the write to the buffer,
* so the overflow area should be filled before the fault.
*/
over_ptr = (char *) MT_CLEAR_TAG((size_t) ptr);
for (j = tagged_size ; j < last_index; j++) {
if (over_ptr[j] != '1') {
err = true;
break;
}
}
break;
case MTE_SYNC_ERR:
/* Precise fault should occur otherwise return error */
if (!cur_mte_cxt.fault_valid || (last_index != tagged_size)) {
err = true;
break;
}
/* Underflow area should not be filled */
over_ptr = (char *) MT_CLEAR_TAG((size_t) ptr + tagged_size);
for (j = 0 ; j < overflow_size; j++) {
if (over_ptr[j] == '1')
err = true;
}
break;
default:
err = true;
break;
}
check_buffer_overflow_by_byte_err:
mte_free_memory_tag_range((void *)ptr, sizes[i], mem_type, 0, overflow_range);
if (err)
break;
}
return (err ? KSFT_FAIL : KSFT_PASS);
}
static int check_buffer_by_block_iterate(int mem_type, int mode, size_t size)
{
char *src, *dst;
int j, result = KSFT_PASS;
enum mte_block_test_alloc alloc_type = UNTAGGED_TAGGED;
for (alloc_type = UNTAGGED_TAGGED; alloc_type < (int) BLOCK_ALLOC_MAX; alloc_type++) {
switch (alloc_type) {
case UNTAGGED_TAGGED:
src = (char *)mte_allocate_memory(size, mem_type, 0, false);
if (check_allocated_memory(src, size, mem_type, false) != KSFT_PASS)
return KSFT_FAIL;
dst = (char *)mte_allocate_memory(size, mem_type, 0, true);
if (check_allocated_memory(dst, size, mem_type, true) != KSFT_PASS) {
mte_free_memory((void *)src, size, mem_type, false);
return KSFT_FAIL;
}
break;
case TAGGED_UNTAGGED:
dst = (char *)mte_allocate_memory(size, mem_type, 0, false);
if (check_allocated_memory(dst, size, mem_type, false) != KSFT_PASS)
return KSFT_FAIL;
src = (char *)mte_allocate_memory(size, mem_type, 0, true);
if (check_allocated_memory(src, size, mem_type, true) != KSFT_PASS) {
mte_free_memory((void *)dst, size, mem_type, false);
return KSFT_FAIL;
}
break;
case TAGGED_TAGGED:
src = (char *)mte_allocate_memory(size, mem_type, 0, true);
if (check_allocated_memory(src, size, mem_type, true) != KSFT_PASS)
return KSFT_FAIL;
dst = (char *)mte_allocate_memory(size, mem_type, 0, true);
if (check_allocated_memory(dst, size, mem_type, true) != KSFT_PASS) {
mte_free_memory((void *)src, size, mem_type, true);
return KSFT_FAIL;
}
break;
default:
return KSFT_FAIL;
}
cur_mte_cxt.fault_valid = false;
result = KSFT_PASS;
mte_initialize_current_context(mode, (uintptr_t)dst, size);
/* Set some value in memory and copy*/
memset((void *)src, (int)'1', size);
memcpy((void *)dst, (void *)src, size);
mte_wait_after_trig();
if (cur_mte_cxt.fault_valid) {
result = KSFT_FAIL;
goto check_buffer_by_block_err;
}
/* Check the buffer whether it is filled. */
for (j = 0; j < size; j++) {
if (src[j] != dst[j] || src[j] != '1') {
result = KSFT_FAIL;
break;
}
}
check_buffer_by_block_err:
mte_free_memory((void *)src, size, mem_type,
MT_FETCH_TAG((uintptr_t)src) ? true : false);
mte_free_memory((void *)dst, size, mem_type,
MT_FETCH_TAG((uintptr_t)dst) ? true : false);
if (result != KSFT_PASS)
return result;
}
return result;
}
static int check_buffer_by_block(int mem_type, int mode)
{
int i, item, result = KSFT_PASS;
mte_switch_mode(mode, MTE_ALLOW_NON_ZERO_TAG);
item = ARRAY_SIZE(sizes);
cur_mte_cxt.fault_valid = false;
for (i = 0; i < item; i++) {
result = check_buffer_by_block_iterate(mem_type, mode, sizes[i]);
if (result != KSFT_PASS)
break;
}
return result;
}
static int compare_memory_tags(char *ptr, size_t size, int tag)
{
int i, new_tag;
for (i = 0 ; i < size ; i += MT_GRANULE_SIZE) {
new_tag = MT_FETCH_TAG((uintptr_t)(mte_get_tag_address(ptr + i)));
if (tag != new_tag) {
ksft_print_msg("FAIL: child mte tag mismatch\n");
return KSFT_FAIL;
}
}
return KSFT_PASS;
}
static int check_memory_initial_tags(int mem_type, int mode, int mapping)
{
char *ptr;
int run, fd;
int total = ARRAY_SIZE(sizes);
mte_switch_mode(mode, MTE_ALLOW_NON_ZERO_TAG);
for (run = 0; run < total; run++) {
/* check initial tags for anonymous mmap */
ptr = (char *)mte_allocate_memory(sizes[run], mem_type, mapping, false);
if (check_allocated_memory(ptr, sizes[run], mem_type, false) != KSFT_PASS)
return KSFT_FAIL;
if (compare_memory_tags(ptr, sizes[run], 0) != KSFT_PASS) {
mte_free_memory((void *)ptr, sizes[run], mem_type, false);
return KSFT_FAIL;
}
mte_free_memory((void *)ptr, sizes[run], mem_type, false);
/* check initial tags for file mmap */
fd = create_temp_file();
if (fd == -1)
return KSFT_FAIL;
ptr = (char *)mte_allocate_file_memory(sizes[run], mem_type, mapping, false, fd);
if (check_allocated_memory(ptr, sizes[run], mem_type, false) != KSFT_PASS) {
close(fd);
return KSFT_FAIL;
}
if (compare_memory_tags(ptr, sizes[run], 0) != KSFT_PASS) {
mte_free_memory((void *)ptr, sizes[run], mem_type, false);
close(fd);
return KSFT_FAIL;
}
mte_free_memory((void *)ptr, sizes[run], mem_type, false);
close(fd);
}
return KSFT_PASS;
}
int main(int argc, char *argv[])
{
int err;
size_t page_size = getpagesize();
int item = ARRAY_SIZE(sizes);
sizes[item - 3] = page_size - 1;
sizes[item - 2] = page_size;
sizes[item - 1] = page_size + 1;
err = mte_default_setup();
if (err)
return err;
/* Register SIGSEGV handler */
mte_register_signal(SIGSEGV, mte_default_handler);
/* Set test plan */
ksft_set_plan(20);
/* Buffer by byte tests */
evaluate_test(check_buffer_by_byte(USE_MMAP, MTE_SYNC_ERR),
"Check buffer correctness by byte with sync err mode and mmap memory\n");
evaluate_test(check_buffer_by_byte(USE_MMAP, MTE_ASYNC_ERR),
"Check buffer correctness by byte with async err mode and mmap memory\n");
evaluate_test(check_buffer_by_byte(USE_MPROTECT, MTE_SYNC_ERR),
"Check buffer correctness by byte with sync err mode and mmap/mprotect memory\n");
evaluate_test(check_buffer_by_byte(USE_MPROTECT, MTE_ASYNC_ERR),
"Check buffer correctness by byte with async err mode and mmap/mprotect memory\n");
/* Check buffer underflow with underflow size as 16 */
evaluate_test(check_buffer_underflow_by_byte(USE_MMAP, MTE_SYNC_ERR, MT_GRANULE_SIZE),
"Check buffer write underflow by byte with sync mode and mmap memory\n");
evaluate_test(check_buffer_underflow_by_byte(USE_MMAP, MTE_ASYNC_ERR, MT_GRANULE_SIZE),
"Check buffer write underflow by byte with async mode and mmap memory\n");
evaluate_test(check_buffer_underflow_by_byte(USE_MMAP, MTE_NONE_ERR, MT_GRANULE_SIZE),
"Check buffer write underflow by byte with tag check fault ignore and mmap memory\n");
/* Check buffer underflow with underflow size as page size */
evaluate_test(check_buffer_underflow_by_byte(USE_MMAP, MTE_SYNC_ERR, page_size),
"Check buffer write underflow by byte with sync mode and mmap memory\n");
evaluate_test(check_buffer_underflow_by_byte(USE_MMAP, MTE_ASYNC_ERR, page_size),
"Check buffer write underflow by byte with async mode and mmap memory\n");
evaluate_test(check_buffer_underflow_by_byte(USE_MMAP, MTE_NONE_ERR, page_size),
"Check buffer write underflow by byte with tag check fault ignore and mmap memory\n");
/* Check buffer overflow with overflow size as 16 */
evaluate_test(check_buffer_overflow_by_byte(USE_MMAP, MTE_SYNC_ERR, MT_GRANULE_SIZE),
"Check buffer write overflow by byte with sync mode and mmap memory\n");
evaluate_test(check_buffer_overflow_by_byte(USE_MMAP, MTE_ASYNC_ERR, MT_GRANULE_SIZE),
"Check buffer write overflow by byte with async mode and mmap memory\n");
evaluate_test(check_buffer_overflow_by_byte(USE_MMAP, MTE_NONE_ERR, MT_GRANULE_SIZE),
"Check buffer write overflow by byte with tag fault ignore mode and mmap memory\n");
/* Buffer by block tests */
evaluate_test(check_buffer_by_block(USE_MMAP, MTE_SYNC_ERR),
"Check buffer write correctness by block with sync mode and mmap memory\n");
evaluate_test(check_buffer_by_block(USE_MMAP, MTE_ASYNC_ERR),
"Check buffer write correctness by block with async mode and mmap memory\n");
evaluate_test(check_buffer_by_block(USE_MMAP, MTE_NONE_ERR),
"Check buffer write correctness by block with tag fault ignore and mmap memory\n");
/* Initial tags are supposed to be 0 */
evaluate_test(check_memory_initial_tags(USE_MMAP, MTE_SYNC_ERR, MAP_PRIVATE),
"Check initial tags with private mapping, sync error mode and mmap memory\n");
evaluate_test(check_memory_initial_tags(USE_MPROTECT, MTE_SYNC_ERR, MAP_PRIVATE),
"Check initial tags with private mapping, sync error mode and mmap/mprotect memory\n");
evaluate_test(check_memory_initial_tags(USE_MMAP, MTE_SYNC_ERR, MAP_SHARED),
"Check initial tags with shared mapping, sync error mode and mmap memory\n");
evaluate_test(check_memory_initial_tags(USE_MPROTECT, MTE_SYNC_ERR, MAP_SHARED),
"Check initial tags with shared mapping, sync error mode and mmap/mprotect memory\n");
mte_restore_setup();
ksft_print_cnts();
return ksft_get_fail_cnt() == 0 ? KSFT_PASS : KSFT_FAIL;
}
| linux-master | tools/testing/selftests/arm64/mte/check_buffer_fill.c |
// SPDX-License-Identifier: GPL-2.0
// Copyright (C) 2020 ARM Limited
#define _GNU_SOURCE
#include <assert.h>
#include <errno.h>
#include <fcntl.h>
#include <signal.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <ucontext.h>
#include <unistd.h>
#include <sys/uio.h>
#include <sys/mman.h>
#include "kselftest.h"
#include "mte_common_util.h"
#include "mte_def.h"
static size_t page_sz;
#define TEST_NAME_MAX 100
enum test_type {
READ_TEST,
WRITE_TEST,
READV_TEST,
WRITEV_TEST,
LAST_TEST,
};
static int check_usermem_access_fault(int mem_type, int mode, int mapping,
int tag_offset, int tag_len,
enum test_type test_type)
{
int fd, i, err;
char val = 'A';
ssize_t len, syscall_len;
void *ptr, *ptr_next;
int fileoff, ptroff, size;
int sizes[] = {1, 2, 3, 8, 16, 32, 4096, page_sz};
err = KSFT_PASS;
len = 2 * page_sz;
mte_switch_mode(mode, MTE_ALLOW_NON_ZERO_TAG);
fd = create_temp_file();
if (fd == -1)
return KSFT_FAIL;
for (i = 0; i < len; i++)
if (write(fd, &val, sizeof(val)) != sizeof(val))
return KSFT_FAIL;
lseek(fd, 0, 0);
ptr = mte_allocate_memory(len, mem_type, mapping, true);
if (check_allocated_memory(ptr, len, mem_type, true) != KSFT_PASS) {
close(fd);
return KSFT_FAIL;
}
mte_initialize_current_context(mode, (uintptr_t)ptr, len);
/* Copy from file into buffer with valid tag */
syscall_len = read(fd, ptr, len);
mte_wait_after_trig();
if (cur_mte_cxt.fault_valid || syscall_len < len)
goto usermem_acc_err;
/* Verify same pattern is read */
for (i = 0; i < len; i++)
if (*(char *)(ptr + i) != val)
break;
if (i < len)
goto usermem_acc_err;
if (!tag_len)
tag_len = len - tag_offset;
/* Tag a part of memory with different value */
ptr_next = (void *)((unsigned long)ptr + tag_offset);
ptr_next = mte_insert_new_tag(ptr_next);
mte_set_tag_address_range(ptr_next, tag_len);
for (fileoff = 0; fileoff < 16; fileoff++) {
for (ptroff = 0; ptroff < 16; ptroff++) {
for (i = 0; i < ARRAY_SIZE(sizes); i++) {
size = sizes[i];
lseek(fd, 0, 0);
/* perform file operation on buffer with invalid tag */
switch (test_type) {
case READ_TEST:
syscall_len = read(fd, ptr + ptroff, size);
break;
case WRITE_TEST:
syscall_len = write(fd, ptr + ptroff, size);
break;
case READV_TEST: {
struct iovec iov[1];
iov[0].iov_base = ptr + ptroff;
iov[0].iov_len = size;
syscall_len = readv(fd, iov, 1);
break;
}
case WRITEV_TEST: {
struct iovec iov[1];
iov[0].iov_base = ptr + ptroff;
iov[0].iov_len = size;
syscall_len = writev(fd, iov, 1);
break;
}
case LAST_TEST:
goto usermem_acc_err;
}
mte_wait_after_trig();
/*
* Accessing user memory in kernel with invalid tag should fail in sync
* mode without fault but may not fail in async mode as per the
* implemented MTE userspace support in Arm64 kernel.
*/
if (cur_mte_cxt.fault_valid) {
goto usermem_acc_err;
}
if (mode == MTE_SYNC_ERR && syscall_len < len) {
/* test passed */
} else if (mode == MTE_ASYNC_ERR && syscall_len == size) {
/* test passed */
} else {
goto usermem_acc_err;
}
}
}
}
goto exit;
usermem_acc_err:
err = KSFT_FAIL;
exit:
mte_free_memory((void *)ptr, len, mem_type, true);
close(fd);
return err;
}
void format_test_name(char* name, int name_len, int type, int sync, int map, int len, int offset) {
const char* test_type;
const char* mte_type;
const char* map_type;
switch (type) {
case READ_TEST:
test_type = "read";
break;
case WRITE_TEST:
test_type = "write";
break;
case READV_TEST:
test_type = "readv";
break;
case WRITEV_TEST:
test_type = "writev";
break;
default:
assert(0);
break;
}
switch (sync) {
case MTE_SYNC_ERR:
mte_type = "MTE_SYNC_ERR";
break;
case MTE_ASYNC_ERR:
mte_type = "MTE_ASYNC_ERR";
break;
default:
assert(0);
break;
}
switch (map) {
case MAP_SHARED:
map_type = "MAP_SHARED";
break;
case MAP_PRIVATE:
map_type = "MAP_PRIVATE";
break;
default:
assert(0);
break;
}
snprintf(name, name_len,
"test type: %s, %s, %s, tag len: %d, tag offset: %d\n",
test_type, mte_type, map_type, len, offset);
}
int main(int argc, char *argv[])
{
int err;
int t, s, m, l, o;
int mte_sync[] = {MTE_SYNC_ERR, MTE_ASYNC_ERR};
int maps[] = {MAP_SHARED, MAP_PRIVATE};
int tag_lens[] = {0, MT_GRANULE_SIZE};
int tag_offsets[] = {page_sz, MT_GRANULE_SIZE};
char test_name[TEST_NAME_MAX];
page_sz = getpagesize();
if (!page_sz) {
ksft_print_msg("ERR: Unable to get page size\n");
return KSFT_FAIL;
}
err = mte_default_setup();
if (err)
return err;
/* Register signal handlers */
mte_register_signal(SIGSEGV, mte_default_handler);
/* Set test plan */
ksft_set_plan(64);
for (t = 0; t < LAST_TEST; t++) {
for (s = 0; s < ARRAY_SIZE(mte_sync); s++) {
for (m = 0; m < ARRAY_SIZE(maps); m++) {
for (l = 0; l < ARRAY_SIZE(tag_lens); l++) {
for (o = 0; o < ARRAY_SIZE(tag_offsets); o++) {
int sync = mte_sync[s];
int map = maps[m];
int offset = tag_offsets[o];
int tag_len = tag_lens[l];
int res = check_usermem_access_fault(USE_MMAP, sync,
map, offset,
tag_len, t);
format_test_name(test_name, TEST_NAME_MAX,
t, sync, map, tag_len, offset);
evaluate_test(res, test_name);
}
}
}
}
}
mte_restore_setup();
ksft_print_cnts();
return ksft_get_fail_cnt() == 0 ? KSFT_PASS : KSFT_FAIL;
}
| linux-master | tools/testing/selftests/arm64/mte/check_user_mem.c |
// SPDX-License-Identifier: GPL-2.0
// Copyright (C) 2020 ARM Limited
#define _GNU_SOURCE
#include <errno.h>
#include <pthread.h>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <unistd.h>
#include <sys/auxv.h>
#include <sys/mman.h>
#include <sys/prctl.h>
#include <sys/types.h>
#include <sys/wait.h>
#include "kselftest.h"
#include "mte_common_util.h"
#include "mte_def.h"
#define NUM_ITERATIONS 1024
#define MAX_THREADS 5
#define THREAD_ITERATIONS 1000
void *execute_thread(void *x)
{
pid_t pid = *((pid_t *)x);
pid_t tid = gettid();
uint64_t prctl_tag_mask;
uint64_t prctl_set;
uint64_t prctl_get;
uint64_t prctl_tcf;
srand(time(NULL) ^ (pid << 16) ^ (tid << 16));
prctl_tag_mask = rand() & 0xffff;
if (prctl_tag_mask % 2)
prctl_tcf = PR_MTE_TCF_SYNC;
else
prctl_tcf = PR_MTE_TCF_ASYNC;
prctl_set = PR_TAGGED_ADDR_ENABLE | prctl_tcf | (prctl_tag_mask << PR_MTE_TAG_SHIFT);
for (int j = 0; j < THREAD_ITERATIONS; j++) {
if (prctl(PR_SET_TAGGED_ADDR_CTRL, prctl_set, 0, 0, 0)) {
perror("prctl() failed");
goto fail;
}
prctl_get = prctl(PR_GET_TAGGED_ADDR_CTRL, 0, 0, 0, 0);
if (prctl_set != prctl_get) {
ksft_print_msg("Error: prctl_set: 0x%lx != prctl_get: 0x%lx\n",
prctl_set, prctl_get);
goto fail;
}
}
return (void *)KSFT_PASS;
fail:
return (void *)KSFT_FAIL;
}
int execute_test(pid_t pid)
{
pthread_t thread_id[MAX_THREADS];
int thread_data[MAX_THREADS];
for (int i = 0; i < MAX_THREADS; i++)
pthread_create(&thread_id[i], NULL,
execute_thread, (void *)&pid);
for (int i = 0; i < MAX_THREADS; i++)
pthread_join(thread_id[i], (void *)&thread_data[i]);
for (int i = 0; i < MAX_THREADS; i++)
if (thread_data[i] == KSFT_FAIL)
return KSFT_FAIL;
return KSFT_PASS;
}
int mte_gcr_fork_test(void)
{
pid_t pid;
int results[NUM_ITERATIONS];
pid_t cpid;
int res;
for (int i = 0; i < NUM_ITERATIONS; i++) {
pid = fork();
if (pid < 0)
return KSFT_FAIL;
if (pid == 0) {
cpid = getpid();
res = execute_test(cpid);
exit(res);
}
}
for (int i = 0; i < NUM_ITERATIONS; i++) {
wait(&res);
if (WIFEXITED(res))
results[i] = WEXITSTATUS(res);
else
--i;
}
for (int i = 0; i < NUM_ITERATIONS; i++)
if (results[i] == KSFT_FAIL)
return KSFT_FAIL;
return KSFT_PASS;
}
int main(int argc, char *argv[])
{
int err;
err = mte_default_setup();
if (err)
return err;
ksft_set_plan(1);
evaluate_test(mte_gcr_fork_test(),
"Verify that GCR_EL1 is set correctly on context switch\n");
mte_restore_setup();
ksft_print_cnts();
return ksft_get_fail_cnt() == 0 ? KSFT_PASS : KSFT_FAIL;
}
| linux-master | tools/testing/selftests/arm64/mte/check_gcr_el1_cswitch.c |
// SPDX-License-Identifier: GPL-2.0
// Copyright (C) 2020 ARM Limited
#include <fcntl.h>
#include <sched.h>
#include <signal.h>
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <linux/auxvec.h>
#include <sys/auxv.h>
#include <sys/mman.h>
#include <sys/prctl.h>
#include <asm/hwcap.h>
#include "kselftest.h"
#include "mte_common_util.h"
#include "mte_def.h"
#define INIT_BUFFER_SIZE 256
struct mte_fault_cxt cur_mte_cxt;
static unsigned int mte_cur_mode;
static unsigned int mte_cur_pstate_tco;
void mte_default_handler(int signum, siginfo_t *si, void *uc)
{
unsigned long addr = (unsigned long)si->si_addr;
if (signum == SIGSEGV) {
#ifdef DEBUG
ksft_print_msg("INFO: SIGSEGV signal at pc=%lx, fault addr=%lx, si_code=%lx\n",
((ucontext_t *)uc)->uc_mcontext.pc, addr, si->si_code);
#endif
if (si->si_code == SEGV_MTEAERR) {
if (cur_mte_cxt.trig_si_code == si->si_code)
cur_mte_cxt.fault_valid = true;
else
ksft_print_msg("Got unexpected SEGV_MTEAERR at pc=$lx, fault addr=%lx\n",
((ucontext_t *)uc)->uc_mcontext.pc,
addr);
return;
}
/* Compare the context for precise error */
else if (si->si_code == SEGV_MTESERR) {
if (cur_mte_cxt.trig_si_code == si->si_code &&
((cur_mte_cxt.trig_range >= 0 &&
addr >= MT_CLEAR_TAG(cur_mte_cxt.trig_addr) &&
addr <= (MT_CLEAR_TAG(cur_mte_cxt.trig_addr) + cur_mte_cxt.trig_range)) ||
(cur_mte_cxt.trig_range < 0 &&
addr <= MT_CLEAR_TAG(cur_mte_cxt.trig_addr) &&
addr >= (MT_CLEAR_TAG(cur_mte_cxt.trig_addr) + cur_mte_cxt.trig_range)))) {
cur_mte_cxt.fault_valid = true;
/* Adjust the pc by 4 */
((ucontext_t *)uc)->uc_mcontext.pc += 4;
} else {
ksft_print_msg("Invalid MTE synchronous exception caught!\n");
exit(1);
}
} else {
ksft_print_msg("Unknown SIGSEGV exception caught!\n");
exit(1);
}
} else if (signum == SIGBUS) {
ksft_print_msg("INFO: SIGBUS signal at pc=%lx, fault addr=%lx, si_code=%lx\n",
((ucontext_t *)uc)->uc_mcontext.pc, addr, si->si_code);
if ((cur_mte_cxt.trig_range >= 0 &&
addr >= MT_CLEAR_TAG(cur_mte_cxt.trig_addr) &&
addr <= (MT_CLEAR_TAG(cur_mte_cxt.trig_addr) + cur_mte_cxt.trig_range)) ||
(cur_mte_cxt.trig_range < 0 &&
addr <= MT_CLEAR_TAG(cur_mte_cxt.trig_addr) &&
addr >= (MT_CLEAR_TAG(cur_mte_cxt.trig_addr) + cur_mte_cxt.trig_range))) {
cur_mte_cxt.fault_valid = true;
/* Adjust the pc by 4 */
((ucontext_t *)uc)->uc_mcontext.pc += 4;
}
}
}
void mte_register_signal(int signal, void (*handler)(int, siginfo_t *, void *))
{
struct sigaction sa;
sa.sa_sigaction = handler;
sa.sa_flags = SA_SIGINFO;
sigemptyset(&sa.sa_mask);
sigaction(signal, &sa, NULL);
}
void mte_wait_after_trig(void)
{
sched_yield();
}
void *mte_insert_tags(void *ptr, size_t size)
{
void *tag_ptr;
int align_size;
if (!ptr || (unsigned long)(ptr) & MT_ALIGN_GRANULE) {
ksft_print_msg("FAIL: Addr=%lx: invalid\n", ptr);
return NULL;
}
align_size = MT_ALIGN_UP(size);
tag_ptr = mte_insert_random_tag(ptr);
mte_set_tag_address_range(tag_ptr, align_size);
return tag_ptr;
}
void mte_clear_tags(void *ptr, size_t size)
{
if (!ptr || (unsigned long)(ptr) & MT_ALIGN_GRANULE) {
ksft_print_msg("FAIL: Addr=%lx: invalid\n", ptr);
return;
}
size = MT_ALIGN_UP(size);
ptr = (void *)MT_CLEAR_TAG((unsigned long)ptr);
mte_clear_tag_address_range(ptr, size);
}
static void *__mte_allocate_memory_range(size_t size, int mem_type, int mapping,
size_t range_before, size_t range_after,
bool tags, int fd)
{
void *ptr;
int prot_flag, map_flag;
size_t entire_size = size + range_before + range_after;
switch (mem_type) {
case USE_MALLOC:
return malloc(entire_size) + range_before;
case USE_MMAP:
case USE_MPROTECT:
break;
default:
ksft_print_msg("FAIL: Invalid allocate request\n");
return NULL;
}
prot_flag = PROT_READ | PROT_WRITE;
if (mem_type == USE_MMAP)
prot_flag |= PROT_MTE;
map_flag = mapping;
if (fd == -1)
map_flag = MAP_ANONYMOUS | map_flag;
if (!(mapping & MAP_SHARED))
map_flag |= MAP_PRIVATE;
ptr = mmap(NULL, entire_size, prot_flag, map_flag, fd, 0);
if (ptr == MAP_FAILED) {
ksft_print_msg("FAIL: mmap allocation\n");
return NULL;
}
if (mem_type == USE_MPROTECT) {
if (mprotect(ptr, entire_size, prot_flag | PROT_MTE)) {
munmap(ptr, size);
ksft_print_msg("FAIL: mprotect PROT_MTE property\n");
return NULL;
}
}
if (tags)
ptr = mte_insert_tags(ptr + range_before, size);
return ptr;
}
void *mte_allocate_memory_tag_range(size_t size, int mem_type, int mapping,
size_t range_before, size_t range_after)
{
return __mte_allocate_memory_range(size, mem_type, mapping, range_before,
range_after, true, -1);
}
void *mte_allocate_memory(size_t size, int mem_type, int mapping, bool tags)
{
return __mte_allocate_memory_range(size, mem_type, mapping, 0, 0, tags, -1);
}
void *mte_allocate_file_memory(size_t size, int mem_type, int mapping, bool tags, int fd)
{
int index;
char buffer[INIT_BUFFER_SIZE];
if (mem_type != USE_MPROTECT && mem_type != USE_MMAP) {
ksft_print_msg("FAIL: Invalid mmap file request\n");
return NULL;
}
/* Initialize the file for mappable size */
lseek(fd, 0, SEEK_SET);
for (index = INIT_BUFFER_SIZE; index < size; index += INIT_BUFFER_SIZE) {
if (write(fd, buffer, INIT_BUFFER_SIZE) != INIT_BUFFER_SIZE) {
perror("initialising buffer");
return NULL;
}
}
index -= INIT_BUFFER_SIZE;
if (write(fd, buffer, size - index) != size - index) {
perror("initialising buffer");
return NULL;
}
return __mte_allocate_memory_range(size, mem_type, mapping, 0, 0, tags, fd);
}
void *mte_allocate_file_memory_tag_range(size_t size, int mem_type, int mapping,
size_t range_before, size_t range_after, int fd)
{
int index;
char buffer[INIT_BUFFER_SIZE];
int map_size = size + range_before + range_after;
if (mem_type != USE_MPROTECT && mem_type != USE_MMAP) {
ksft_print_msg("FAIL: Invalid mmap file request\n");
return NULL;
}
/* Initialize the file for mappable size */
lseek(fd, 0, SEEK_SET);
for (index = INIT_BUFFER_SIZE; index < map_size; index += INIT_BUFFER_SIZE)
if (write(fd, buffer, INIT_BUFFER_SIZE) != INIT_BUFFER_SIZE) {
perror("initialising buffer");
return NULL;
}
index -= INIT_BUFFER_SIZE;
if (write(fd, buffer, map_size - index) != map_size - index) {
perror("initialising buffer");
return NULL;
}
return __mte_allocate_memory_range(size, mem_type, mapping, range_before,
range_after, true, fd);
}
static void __mte_free_memory_range(void *ptr, size_t size, int mem_type,
size_t range_before, size_t range_after, bool tags)
{
switch (mem_type) {
case USE_MALLOC:
free(ptr - range_before);
break;
case USE_MMAP:
case USE_MPROTECT:
if (tags)
mte_clear_tags(ptr, size);
munmap(ptr - range_before, size + range_before + range_after);
break;
default:
ksft_print_msg("FAIL: Invalid free request\n");
break;
}
}
void mte_free_memory_tag_range(void *ptr, size_t size, int mem_type,
size_t range_before, size_t range_after)
{
__mte_free_memory_range(ptr, size, mem_type, range_before, range_after, true);
}
void mte_free_memory(void *ptr, size_t size, int mem_type, bool tags)
{
__mte_free_memory_range(ptr, size, mem_type, 0, 0, tags);
}
void mte_initialize_current_context(int mode, uintptr_t ptr, ssize_t range)
{
cur_mte_cxt.fault_valid = false;
cur_mte_cxt.trig_addr = ptr;
cur_mte_cxt.trig_range = range;
if (mode == MTE_SYNC_ERR)
cur_mte_cxt.trig_si_code = SEGV_MTESERR;
else if (mode == MTE_ASYNC_ERR)
cur_mte_cxt.trig_si_code = SEGV_MTEAERR;
else
cur_mte_cxt.trig_si_code = 0;
}
int mte_switch_mode(int mte_option, unsigned long incl_mask)
{
unsigned long en = 0;
switch (mte_option) {
case MTE_NONE_ERR:
case MTE_SYNC_ERR:
case MTE_ASYNC_ERR:
break;
default:
ksft_print_msg("FAIL: Invalid MTE option %x\n", mte_option);
return -EINVAL;
}
if (incl_mask & ~MT_INCLUDE_TAG_MASK) {
ksft_print_msg("FAIL: Invalid incl_mask %lx\n", incl_mask);
return -EINVAL;
}
en = PR_TAGGED_ADDR_ENABLE;
switch (mte_option) {
case MTE_SYNC_ERR:
en |= PR_MTE_TCF_SYNC;
break;
case MTE_ASYNC_ERR:
en |= PR_MTE_TCF_ASYNC;
break;
case MTE_NONE_ERR:
en |= PR_MTE_TCF_NONE;
break;
}
en |= (incl_mask << PR_MTE_TAG_SHIFT);
/* Enable address tagging ABI, mte error reporting mode and tag inclusion mask. */
if (prctl(PR_SET_TAGGED_ADDR_CTRL, en, 0, 0, 0) != 0) {
ksft_print_msg("FAIL:prctl PR_SET_TAGGED_ADDR_CTRL for mte mode\n");
return -EINVAL;
}
return 0;
}
int mte_default_setup(void)
{
unsigned long hwcaps2 = getauxval(AT_HWCAP2);
unsigned long en = 0;
int ret;
if (!(hwcaps2 & HWCAP2_MTE)) {
ksft_print_msg("SKIP: MTE features unavailable\n");
return KSFT_SKIP;
}
/* Get current mte mode */
ret = prctl(PR_GET_TAGGED_ADDR_CTRL, en, 0, 0, 0);
if (ret < 0) {
ksft_print_msg("FAIL:prctl PR_GET_TAGGED_ADDR_CTRL with error =%d\n", ret);
return KSFT_FAIL;
}
if (ret & PR_MTE_TCF_SYNC)
mte_cur_mode = MTE_SYNC_ERR;
else if (ret & PR_MTE_TCF_ASYNC)
mte_cur_mode = MTE_ASYNC_ERR;
else if (ret & PR_MTE_TCF_NONE)
mte_cur_mode = MTE_NONE_ERR;
mte_cur_pstate_tco = mte_get_pstate_tco();
/* Disable PSTATE.TCO */
mte_disable_pstate_tco();
return 0;
}
void mte_restore_setup(void)
{
mte_switch_mode(mte_cur_mode, MTE_ALLOW_NON_ZERO_TAG);
if (mte_cur_pstate_tco == MT_PSTATE_TCO_EN)
mte_enable_pstate_tco();
else if (mte_cur_pstate_tco == MT_PSTATE_TCO_DIS)
mte_disable_pstate_tco();
}
int create_temp_file(void)
{
int fd;
char filename[] = "/dev/shm/tmp_XXXXXX";
/* Create a file in the tmpfs filesystem */
fd = mkstemp(&filename[0]);
if (fd == -1) {
perror(filename);
ksft_print_msg("FAIL: Unable to open temporary file\n");
return 0;
}
unlink(&filename[0]);
return fd;
}
| linux-master | tools/testing/selftests/arm64/mte/mte_common_util.c |
// SPDX-License-Identifier: GPL-2.0
// Copyright (C) 2020 ARM Limited
#define _GNU_SOURCE
#include <errno.h>
#include <signal.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <ucontext.h>
#include <sys/wait.h>
#include "kselftest.h"
#include "mte_common_util.h"
#include "mte_def.h"
#define BUFFER_SIZE (5 * MT_GRANULE_SIZE)
#define RUNS (MT_TAG_COUNT * 2)
#define MTE_LAST_TAG_MASK (0x7FFF)
static int verify_mte_pointer_validity(char *ptr, int mode)
{
mte_initialize_current_context(mode, (uintptr_t)ptr, BUFFER_SIZE);
/* Check the validity of the tagged pointer */
memset(ptr, '1', BUFFER_SIZE);
mte_wait_after_trig();
if (cur_mte_cxt.fault_valid) {
ksft_print_msg("Unexpected fault recorded for %p-%p in mode %x\n",
ptr, ptr + BUFFER_SIZE, mode);
return KSFT_FAIL;
}
/* Proceed further for nonzero tags */
if (!MT_FETCH_TAG((uintptr_t)ptr))
return KSFT_PASS;
mte_initialize_current_context(mode, (uintptr_t)ptr, BUFFER_SIZE + 1);
/* Check the validity outside the range */
ptr[BUFFER_SIZE] = '2';
mte_wait_after_trig();
if (!cur_mte_cxt.fault_valid) {
ksft_print_msg("No valid fault recorded for %p in mode %x\n",
ptr, mode);
return KSFT_FAIL;
} else {
return KSFT_PASS;
}
}
static int check_single_included_tags(int mem_type, int mode)
{
char *ptr;
int tag, run, ret, result = KSFT_PASS;
ptr = mte_allocate_memory(BUFFER_SIZE + MT_GRANULE_SIZE, mem_type, 0, false);
if (check_allocated_memory(ptr, BUFFER_SIZE + MT_GRANULE_SIZE,
mem_type, false) != KSFT_PASS)
return KSFT_FAIL;
for (tag = 0; (tag < MT_TAG_COUNT) && (result == KSFT_PASS); tag++) {
ret = mte_switch_mode(mode, MT_INCLUDE_VALID_TAG(tag));
if (ret != 0)
result = KSFT_FAIL;
/* Try to catch a excluded tag by a number of tries. */
for (run = 0; (run < RUNS) && (result == KSFT_PASS); run++) {
ptr = mte_insert_tags(ptr, BUFFER_SIZE);
/* Check tag value */
if (MT_FETCH_TAG((uintptr_t)ptr) == tag) {
ksft_print_msg("FAIL: wrong tag = 0x%x with include mask=0x%x\n",
MT_FETCH_TAG((uintptr_t)ptr),
MT_INCLUDE_VALID_TAG(tag));
result = KSFT_FAIL;
break;
}
result = verify_mte_pointer_validity(ptr, mode);
}
}
mte_free_memory_tag_range(ptr, BUFFER_SIZE, mem_type, 0, MT_GRANULE_SIZE);
return result;
}
static int check_multiple_included_tags(int mem_type, int mode)
{
char *ptr;
int tag, run, result = KSFT_PASS;
unsigned long excl_mask = 0;
ptr = mte_allocate_memory(BUFFER_SIZE + MT_GRANULE_SIZE, mem_type, 0, false);
if (check_allocated_memory(ptr, BUFFER_SIZE + MT_GRANULE_SIZE,
mem_type, false) != KSFT_PASS)
return KSFT_FAIL;
for (tag = 0; (tag < MT_TAG_COUNT - 1) && (result == KSFT_PASS); tag++) {
excl_mask |= 1 << tag;
mte_switch_mode(mode, MT_INCLUDE_VALID_TAGS(excl_mask));
/* Try to catch a excluded tag by a number of tries. */
for (run = 0; (run < RUNS) && (result == KSFT_PASS); run++) {
ptr = mte_insert_tags(ptr, BUFFER_SIZE);
/* Check tag value */
if (MT_FETCH_TAG((uintptr_t)ptr) < tag) {
ksft_print_msg("FAIL: wrong tag = 0x%x with include mask=0x%x\n",
MT_FETCH_TAG((uintptr_t)ptr),
MT_INCLUDE_VALID_TAGS(excl_mask));
result = KSFT_FAIL;
break;
}
result = verify_mte_pointer_validity(ptr, mode);
}
}
mte_free_memory_tag_range(ptr, BUFFER_SIZE, mem_type, 0, MT_GRANULE_SIZE);
return result;
}
static int check_all_included_tags(int mem_type, int mode)
{
char *ptr;
int run, ret, result = KSFT_PASS;
ptr = mte_allocate_memory(BUFFER_SIZE + MT_GRANULE_SIZE, mem_type, 0, false);
if (check_allocated_memory(ptr, BUFFER_SIZE + MT_GRANULE_SIZE,
mem_type, false) != KSFT_PASS)
return KSFT_FAIL;
ret = mte_switch_mode(mode, MT_INCLUDE_TAG_MASK);
if (ret != 0)
return KSFT_FAIL;
/* Try to catch a excluded tag by a number of tries. */
for (run = 0; (run < RUNS) && (result == KSFT_PASS); run++) {
ptr = (char *)mte_insert_tags(ptr, BUFFER_SIZE);
/*
* Here tag byte can be between 0x0 to 0xF (full allowed range)
* so no need to match so just verify if it is writable.
*/
result = verify_mte_pointer_validity(ptr, mode);
}
mte_free_memory_tag_range(ptr, BUFFER_SIZE, mem_type, 0, MT_GRANULE_SIZE);
return result;
}
static int check_none_included_tags(int mem_type, int mode)
{
char *ptr;
int run, ret;
ptr = mte_allocate_memory(BUFFER_SIZE, mem_type, 0, false);
if (check_allocated_memory(ptr, BUFFER_SIZE, mem_type, false) != KSFT_PASS)
return KSFT_FAIL;
ret = mte_switch_mode(mode, MT_EXCLUDE_TAG_MASK);
if (ret != 0)
return KSFT_FAIL;
/* Try to catch a excluded tag by a number of tries. */
for (run = 0; run < RUNS; run++) {
ptr = (char *)mte_insert_tags(ptr, BUFFER_SIZE);
/* Here all tags exluded so tag value generated should be 0 */
if (MT_FETCH_TAG((uintptr_t)ptr)) {
ksft_print_msg("FAIL: included tag value found\n");
mte_free_memory((void *)ptr, BUFFER_SIZE, mem_type, true);
return KSFT_FAIL;
}
mte_initialize_current_context(mode, (uintptr_t)ptr, BUFFER_SIZE);
/* Check the write validity of the untagged pointer */
memset(ptr, '1', BUFFER_SIZE);
mte_wait_after_trig();
if (cur_mte_cxt.fault_valid)
break;
}
mte_free_memory(ptr, BUFFER_SIZE, mem_type, false);
if (cur_mte_cxt.fault_valid)
return KSFT_FAIL;
else
return KSFT_PASS;
}
int main(int argc, char *argv[])
{
int err;
err = mte_default_setup();
if (err)
return err;
/* Register SIGSEGV handler */
mte_register_signal(SIGSEGV, mte_default_handler);
/* Set test plan */
ksft_set_plan(4);
evaluate_test(check_single_included_tags(USE_MMAP, MTE_SYNC_ERR),
"Check an included tag value with sync mode\n");
evaluate_test(check_multiple_included_tags(USE_MMAP, MTE_SYNC_ERR),
"Check different included tags value with sync mode\n");
evaluate_test(check_none_included_tags(USE_MMAP, MTE_SYNC_ERR),
"Check none included tags value with sync mode\n");
evaluate_test(check_all_included_tags(USE_MMAP, MTE_SYNC_ERR),
"Check all included tags value with sync mode\n");
mte_restore_setup();
ksft_print_cnts();
return ksft_get_fail_cnt() == 0 ? KSFT_PASS : KSFT_FAIL;
}
| linux-master | tools/testing/selftests/arm64/mte/check_tags_inclusion.c |
// SPDX-License-Identifier: GPL-2.0
// Copyright (C) 2020 ARM Limited
#include "helper.h"
size_t keyia_sign(size_t ptr)
{
asm volatile("paciza %0" : "+r" (ptr));
return ptr;
}
size_t keyib_sign(size_t ptr)
{
asm volatile("pacizb %0" : "+r" (ptr));
return ptr;
}
size_t keyda_sign(size_t ptr)
{
asm volatile("pacdza %0" : "+r" (ptr));
return ptr;
}
size_t keydb_sign(size_t ptr)
{
asm volatile("pacdzb %0" : "+r" (ptr));
return ptr;
}
size_t keyg_sign(size_t ptr)
{
/* output is encoded in the upper 32 bits */
size_t dest = 0;
size_t modifier = 0;
asm volatile("pacga %0, %1, %2" : "=r" (dest) : "r" (ptr), "r" (modifier));
return dest;
}
| linux-master | tools/testing/selftests/arm64/pauth/helper.c |
// SPDX-License-Identifier: GPL-2.0
// Copyright (C) 2020 ARM Limited
#define _GNU_SOURCE
#include <sys/auxv.h>
#include <sys/types.h>
#include <sys/wait.h>
#include <signal.h>
#include <setjmp.h>
#include <sched.h>
#include "../../kselftest_harness.h"
#include "helper.h"
#define PAC_COLLISION_ATTEMPTS 10
/*
* The kernel sets TBID by default. So bits 55 and above should remain
* untouched no matter what.
* The VA space size is 48 bits. Bigger is opt-in.
*/
#define PAC_MASK (~0xff80ffffffffffff)
#define ARBITRARY_VALUE (0x1234)
#define ASSERT_PAUTH_ENABLED() \
do { \
unsigned long hwcaps = getauxval(AT_HWCAP); \
/* data key instructions are not in NOP space. This prevents a SIGILL */ \
if (!(hwcaps & HWCAP_PACA)) \
SKIP(return, "PAUTH not enabled"); \
} while (0)
#define ASSERT_GENERIC_PAUTH_ENABLED() \
do { \
unsigned long hwcaps = getauxval(AT_HWCAP); \
/* generic key instructions are not in NOP space. This prevents a SIGILL */ \
if (!(hwcaps & HWCAP_PACG)) \
SKIP(return, "Generic PAUTH not enabled"); \
} while (0)
void sign_specific(struct signatures *sign, size_t val)
{
sign->keyia = keyia_sign(val);
sign->keyib = keyib_sign(val);
sign->keyda = keyda_sign(val);
sign->keydb = keydb_sign(val);
}
void sign_all(struct signatures *sign, size_t val)
{
sign->keyia = keyia_sign(val);
sign->keyib = keyib_sign(val);
sign->keyda = keyda_sign(val);
sign->keydb = keydb_sign(val);
sign->keyg = keyg_sign(val);
}
int n_same(struct signatures *old, struct signatures *new, int nkeys)
{
int res = 0;
res += old->keyia == new->keyia;
res += old->keyib == new->keyib;
res += old->keyda == new->keyda;
res += old->keydb == new->keydb;
if (nkeys == NKEYS)
res += old->keyg == new->keyg;
return res;
}
int n_same_single_set(struct signatures *sign, int nkeys)
{
size_t vals[nkeys];
int same = 0;
vals[0] = sign->keyia & PAC_MASK;
vals[1] = sign->keyib & PAC_MASK;
vals[2] = sign->keyda & PAC_MASK;
vals[3] = sign->keydb & PAC_MASK;
if (nkeys >= 4)
vals[4] = sign->keyg & PAC_MASK;
for (int i = 0; i < nkeys - 1; i++) {
for (int j = i + 1; j < nkeys; j++) {
if (vals[i] == vals[j])
same += 1;
}
}
return same;
}
int exec_sign_all(struct signatures *signed_vals, size_t val)
{
int new_stdin[2];
int new_stdout[2];
int status;
int i;
ssize_t ret;
pid_t pid;
cpu_set_t mask;
ret = pipe(new_stdin);
if (ret == -1) {
perror("pipe returned error");
return -1;
}
ret = pipe(new_stdout);
if (ret == -1) {
perror("pipe returned error");
return -1;
}
/*
* pin this process and all its children to a single CPU, so it can also
* guarantee a context switch with its child
*/
sched_getaffinity(0, sizeof(mask), &mask);
for (i = 0; i < sizeof(cpu_set_t); i++)
if (CPU_ISSET(i, &mask))
break;
CPU_ZERO(&mask);
CPU_SET(i, &mask);
sched_setaffinity(0, sizeof(mask), &mask);
pid = fork();
// child
if (pid == 0) {
dup2(new_stdin[0], STDIN_FILENO);
if (ret == -1) {
perror("dup2 returned error");
exit(1);
}
dup2(new_stdout[1], STDOUT_FILENO);
if (ret == -1) {
perror("dup2 returned error");
exit(1);
}
close(new_stdin[0]);
close(new_stdin[1]);
close(new_stdout[0]);
close(new_stdout[1]);
ret = execl("exec_target", "exec_target", (char *)NULL);
if (ret == -1) {
perror("exec returned error");
exit(1);
}
}
close(new_stdin[0]);
close(new_stdout[1]);
ret = write(new_stdin[1], &val, sizeof(size_t));
if (ret == -1) {
perror("write returned error");
return -1;
}
/*
* wait for the worker to finish, so that read() reads all data
* will also context switch with worker so that this function can be used
* for context switch tests
*/
waitpid(pid, &status, 0);
if (WIFEXITED(status) == 0) {
fprintf(stderr, "worker exited unexpectedly\n");
return -1;
}
if (WEXITSTATUS(status) != 0) {
fprintf(stderr, "worker exited with error\n");
return -1;
}
ret = read(new_stdout[0], signed_vals, sizeof(struct signatures));
if (ret == -1) {
perror("read returned error");
return -1;
}
return 0;
}
sigjmp_buf jmpbuf;
void pac_signal_handler(int signum, siginfo_t *si, void *uc)
{
if (signum == SIGSEGV || signum == SIGILL)
siglongjmp(jmpbuf, 1);
}
/* check that a corrupted PAC results in SIGSEGV or SIGILL */
TEST(corrupt_pac)
{
struct sigaction sa;
ASSERT_PAUTH_ENABLED();
if (sigsetjmp(jmpbuf, 1) == 0) {
sa.sa_sigaction = pac_signal_handler;
sa.sa_flags = SA_SIGINFO | SA_RESETHAND;
sigemptyset(&sa.sa_mask);
sigaction(SIGSEGV, &sa, NULL);
sigaction(SIGILL, &sa, NULL);
pac_corruptor();
ASSERT_TRUE(0) TH_LOG("SIGSEGV/SIGILL signal did not occur");
}
}
/*
* There are no separate pac* and aut* controls so checking only the pac*
* instructions is sufficient
*/
TEST(pac_instructions_not_nop)
{
size_t keyia = 0;
size_t keyib = 0;
size_t keyda = 0;
size_t keydb = 0;
ASSERT_PAUTH_ENABLED();
for (int i = 0; i < PAC_COLLISION_ATTEMPTS; i++) {
keyia |= keyia_sign(i) & PAC_MASK;
keyib |= keyib_sign(i) & PAC_MASK;
keyda |= keyda_sign(i) & PAC_MASK;
keydb |= keydb_sign(i) & PAC_MASK;
}
ASSERT_NE(0, keyia) TH_LOG("keyia instructions did nothing");
ASSERT_NE(0, keyib) TH_LOG("keyib instructions did nothing");
ASSERT_NE(0, keyda) TH_LOG("keyda instructions did nothing");
ASSERT_NE(0, keydb) TH_LOG("keydb instructions did nothing");
}
TEST(pac_instructions_not_nop_generic)
{
size_t keyg = 0;
ASSERT_GENERIC_PAUTH_ENABLED();
for (int i = 0; i < PAC_COLLISION_ATTEMPTS; i++)
keyg |= keyg_sign(i) & PAC_MASK;
ASSERT_NE(0, keyg) TH_LOG("keyg instructions did nothing");
}
TEST(single_thread_different_keys)
{
int same = 10;
int nkeys = NKEYS;
int tmp;
struct signatures signed_vals;
unsigned long hwcaps = getauxval(AT_HWCAP);
/* generic and data key instructions are not in NOP space. This prevents a SIGILL */
ASSERT_PAUTH_ENABLED();
if (!(hwcaps & HWCAP_PACG)) {
TH_LOG("WARNING: Generic PAUTH not enabled. Skipping generic key checks");
nkeys = NKEYS - 1;
}
/*
* In Linux the PAC field can be up to 7 bits wide. Even if keys are
* different, there is about 5% chance for PACs to collide with
* different addresses. This chance rapidly increases with fewer bits
* allocated for the PAC (e.g. wider address). A comparison of the keys
* directly will be more reliable.
* All signed values need to be different at least once out of n
* attempts to be certain that the keys are different
*/
for (int i = 0; i < PAC_COLLISION_ATTEMPTS; i++) {
if (nkeys == NKEYS)
sign_all(&signed_vals, i);
else
sign_specific(&signed_vals, i);
tmp = n_same_single_set(&signed_vals, nkeys);
if (tmp < same)
same = tmp;
}
ASSERT_EQ(0, same) TH_LOG("%d keys clashed every time", same);
}
/*
* fork() does not change keys. Only exec() does so call a worker program.
* Its only job is to sign a value and report back the resutls
*/
TEST(exec_changed_keys)
{
struct signatures new_keys;
struct signatures old_keys;
int ret;
int same = 10;
int nkeys = NKEYS;
unsigned long hwcaps = getauxval(AT_HWCAP);
/* generic and data key instructions are not in NOP space. This prevents a SIGILL */
ASSERT_PAUTH_ENABLED();
if (!(hwcaps & HWCAP_PACG)) {
TH_LOG("WARNING: Generic PAUTH not enabled. Skipping generic key checks");
nkeys = NKEYS - 1;
}
for (int i = 0; i < PAC_COLLISION_ATTEMPTS; i++) {
ret = exec_sign_all(&new_keys, i);
ASSERT_EQ(0, ret) TH_LOG("failed to run worker");
if (nkeys == NKEYS)
sign_all(&old_keys, i);
else
sign_specific(&old_keys, i);
ret = n_same(&old_keys, &new_keys, nkeys);
if (ret < same)
same = ret;
}
ASSERT_EQ(0, same) TH_LOG("exec() did not change %d keys", same);
}
TEST(context_switch_keep_keys)
{
int ret;
struct signatures trash;
struct signatures before;
struct signatures after;
ASSERT_PAUTH_ENABLED();
sign_specific(&before, ARBITRARY_VALUE);
/* will context switch with a process with different keys at least once */
ret = exec_sign_all(&trash, ARBITRARY_VALUE);
ASSERT_EQ(0, ret) TH_LOG("failed to run worker");
sign_specific(&after, ARBITRARY_VALUE);
ASSERT_EQ(before.keyia, after.keyia) TH_LOG("keyia changed after context switching");
ASSERT_EQ(before.keyib, after.keyib) TH_LOG("keyib changed after context switching");
ASSERT_EQ(before.keyda, after.keyda) TH_LOG("keyda changed after context switching");
ASSERT_EQ(before.keydb, after.keydb) TH_LOG("keydb changed after context switching");
}
TEST(context_switch_keep_keys_generic)
{
int ret;
struct signatures trash;
size_t before;
size_t after;
ASSERT_GENERIC_PAUTH_ENABLED();
before = keyg_sign(ARBITRARY_VALUE);
/* will context switch with a process with different keys at least once */
ret = exec_sign_all(&trash, ARBITRARY_VALUE);
ASSERT_EQ(0, ret) TH_LOG("failed to run worker");
after = keyg_sign(ARBITRARY_VALUE);
ASSERT_EQ(before, after) TH_LOG("keyg changed after context switching");
}
TEST_HARNESS_MAIN
| linux-master | tools/testing/selftests/arm64/pauth/pac.c |
// SPDX-License-Identifier: GPL-2.0
// Copyright (C) 2020 ARM Limited
#include <stdio.h>
#include <stdlib.h>
#include <sys/auxv.h>
#include "helper.h"
int main(void)
{
struct signatures signed_vals;
unsigned long hwcaps;
size_t val;
fread(&val, sizeof(size_t), 1, stdin);
/* don't try to execute illegal (unimplemented) instructions) caller
* should have checked this and keep worker simple
*/
hwcaps = getauxval(AT_HWCAP);
if (hwcaps & HWCAP_PACA) {
signed_vals.keyia = keyia_sign(val);
signed_vals.keyib = keyib_sign(val);
signed_vals.keyda = keyda_sign(val);
signed_vals.keydb = keydb_sign(val);
}
signed_vals.keyg = (hwcaps & HWCAP_PACG) ? keyg_sign(val) : 0;
fwrite(&signed_vals, sizeof(struct signatures), 1, stdout);
return 0;
}
| linux-master | tools/testing/selftests/arm64/pauth/exec_target.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2019 ARM Limited
*
* Generic test wrapper for arm64 signal tests.
*
* Each test provides its own tde struct tdescr descriptor to link with
* this wrapper. Framework provides common helpers.
*/
#include <kselftest.h>
#include "test_signals.h"
#include "test_signals_utils.h"
struct tdescr *current = &tde;
int main(int argc, char *argv[])
{
ksft_print_msg("%s :: %s\n", current->name, current->descr);
if (test_setup(current) && test_init(current)) {
test_run(current);
test_cleanup(current);
}
test_result(current);
return current->result;
}
| linux-master | tools/testing/selftests/arm64/signal/test_signals.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) 2019 ARM Limited */
#include <stdio.h>
#include <stdlib.h>
#include <signal.h>
#include <string.h>
#include <unistd.h>
#include <assert.h>
#include <sys/auxv.h>
#include <linux/auxvec.h>
#include <ucontext.h>
#include <asm/unistd.h>
#include <kselftest.h>
#include "test_signals.h"
#include "test_signals_utils.h"
#include "testcases/testcases.h"
extern struct tdescr *current;
static int sig_copyctx = SIGTRAP;
static char const *const feats_names[FMAX_END] = {
" SSBS ",
" SVE ",
" SME ",
" FA64 ",
" SME2 ",
};
#define MAX_FEATS_SZ 128
static char feats_string[MAX_FEATS_SZ];
static inline char *feats_to_string(unsigned long feats)
{
size_t flen = MAX_FEATS_SZ - 1;
feats_string[0] = '\0';
for (int i = 0; i < FMAX_END; i++) {
if (feats & (1UL << i)) {
size_t tlen = strlen(feats_names[i]);
assert(flen > tlen);
flen -= tlen;
strncat(feats_string, feats_names[i], flen);
}
}
return feats_string;
}
static void unblock_signal(int signum)
{
sigset_t sset;
sigemptyset(&sset);
sigaddset(&sset, signum);
sigprocmask(SIG_UNBLOCK, &sset, NULL);
}
static void default_result(struct tdescr *td, bool force_exit)
{
if (td->result == KSFT_SKIP) {
fprintf(stderr, "==>> completed. SKIP.\n");
} else if (td->pass) {
fprintf(stderr, "==>> completed. PASS(1)\n");
td->result = KSFT_PASS;
} else {
fprintf(stdout, "==>> completed. FAIL(0)\n");
td->result = KSFT_FAIL;
}
if (force_exit)
exit(td->result);
}
/*
* The following handle_signal_* helpers are used by main default_handler
* and are meant to return true when signal is handled successfully:
* when false is returned instead, it means that the signal was somehow
* unexpected in that context and it was NOT handled; default_handler will
* take care of such unexpected situations.
*/
static bool handle_signal_unsupported(struct tdescr *td,
siginfo_t *si, void *uc)
{
if (feats_ok(td))
return false;
/* Mangling PC to avoid loops on original SIGILL */
((ucontext_t *)uc)->uc_mcontext.pc += 4;
if (!td->initialized) {
fprintf(stderr,
"Got SIG_UNSUPP @test_init. Ignore.\n");
} else {
fprintf(stderr,
"-- RX SIG_UNSUPP on unsupported feat...OK\n");
td->pass = 1;
default_result(current, 1);
}
return true;
}
static bool handle_signal_trigger(struct tdescr *td,
siginfo_t *si, void *uc)
{
td->triggered = 1;
/* ->run was asserted NON-NULL in test_setup() already */
td->run(td, si, uc);
return true;
}
static bool handle_signal_ok(struct tdescr *td,
siginfo_t *si, void *uc)
{
/*
* it's a bug in the test code when this assert fail:
* if sig_trig was defined, it must have been used before getting here.
*/
assert(!td->sig_trig || td->triggered);
fprintf(stderr,
"SIG_OK -- SP:0x%llX si_addr@:%p si_code:%d token@:%p offset:%ld\n",
((ucontext_t *)uc)->uc_mcontext.sp,
si->si_addr, si->si_code, td->token, td->token - si->si_addr);
/*
* fake_sigreturn tests, which have sanity_enabled=1, set, at the very
* last time, the token field to the SP address used to place the fake
* sigframe: so token==0 means we never made it to the end,
* segfaulting well-before, and the test is possibly broken.
*/
if (!td->sanity_disabled && !td->token) {
fprintf(stdout,
"current->token ZEROED...test is probably broken!\n");
abort();
}
/*
* Trying to narrow down the SEGV to the ones generated by Kernel itself
* via arm64_notify_segfault(). This is a best-effort check anyway, and
* the si_code check may need to change if this aspect of the kernel
* ABI changes.
*/
if (td->sig_ok == SIGSEGV && si->si_code != SEGV_ACCERR) {
fprintf(stdout,
"si_code != SEGV_ACCERR...test is probably broken!\n");
abort();
}
td->pass = 1;
/*
* Some tests can lead to SEGV loops: in such a case we want to
* terminate immediately exiting straight away; some others are not
* supposed to outlive the signal handler code, due to the content of
* the fake sigframe which caused the signal itself.
*/
default_result(current, 1);
return true;
}
static bool handle_signal_copyctx(struct tdescr *td,
siginfo_t *si, void *uc_in)
{
ucontext_t *uc = uc_in;
struct _aarch64_ctx *head;
struct extra_context *extra, *copied_extra;
size_t offset = 0;
size_t to_copy;
ASSERT_GOOD_CONTEXT(uc);
/* Mangling PC to avoid loops on original BRK instr */
uc->uc_mcontext.pc += 4;
/*
* Check for an preserve any extra data too with fixups.
*/
head = (struct _aarch64_ctx *)uc->uc_mcontext.__reserved;
head = get_header(head, EXTRA_MAGIC, td->live_sz, &offset);
if (head) {
extra = (struct extra_context *)head;
/*
* The extra buffer must be immediately after the
* extra_context and a 16 byte terminator. Include it
* in the copy, this was previously validated in
* ASSERT_GOOD_CONTEXT().
*/
to_copy = __builtin_offsetof(ucontext_t,
uc_mcontext.__reserved);
to_copy += offset + sizeof(struct extra_context) + 16;
to_copy += extra->size;
copied_extra = (struct extra_context *)&(td->live_uc->uc_mcontext.__reserved[offset]);
} else {
copied_extra = NULL;
to_copy = sizeof(ucontext_t);
}
if (to_copy > td->live_sz) {
fprintf(stderr,
"Not enough space to grab context, %lu/%lu bytes\n",
td->live_sz, to_copy);
return false;
}
memcpy(td->live_uc, uc, to_copy);
/*
* If there was any EXTRA_CONTEXT fix up the size to be the
* struct extra_context and the following terminator record,
* this means that the rest of the code does not need to have
* special handling for the record and we don't need to fix up
* datap for the new location.
*/
if (copied_extra)
copied_extra->head.size = sizeof(*copied_extra) + 16;
td->live_uc_valid = 1;
fprintf(stderr,
"%lu byte GOOD CONTEXT grabbed from sig_copyctx handler\n",
to_copy);
return true;
}
static void default_handler(int signum, siginfo_t *si, void *uc)
{
if (current->sig_unsupp && signum == current->sig_unsupp &&
handle_signal_unsupported(current, si, uc)) {
fprintf(stderr, "Handled SIG_UNSUPP\n");
} else if (current->sig_trig && signum == current->sig_trig &&
handle_signal_trigger(current, si, uc)) {
fprintf(stderr, "Handled SIG_TRIG\n");
} else if (current->sig_ok && signum == current->sig_ok &&
handle_signal_ok(current, si, uc)) {
fprintf(stderr, "Handled SIG_OK\n");
} else if (signum == sig_copyctx && current->live_uc &&
handle_signal_copyctx(current, si, uc)) {
fprintf(stderr, "Handled SIG_COPYCTX\n");
} else {
if (signum == SIGALRM && current->timeout) {
fprintf(stderr, "-- Timeout !\n");
} else {
fprintf(stderr,
"-- RX UNEXPECTED SIGNAL: %d code %d address %p\n",
signum, si->si_code, si->si_addr);
}
default_result(current, 1);
}
}
static int default_setup(struct tdescr *td)
{
struct sigaction sa;
sa.sa_sigaction = default_handler;
sa.sa_flags = SA_SIGINFO | SA_RESTART;
sa.sa_flags |= td->sa_flags;
sigemptyset(&sa.sa_mask);
/* uncatchable signals naturally skipped ... */
for (int sig = 1; sig < 32; sig++)
sigaction(sig, &sa, NULL);
/*
* RT Signals default disposition is Term but they cannot be
* generated by the Kernel in response to our tests; so just catch
* them all and report them as UNEXPECTED signals.
*/
for (int sig = SIGRTMIN; sig <= SIGRTMAX; sig++)
sigaction(sig, &sa, NULL);
/* just in case...unblock explicitly all we need */
if (td->sig_trig)
unblock_signal(td->sig_trig);
if (td->sig_ok)
unblock_signal(td->sig_ok);
if (td->sig_unsupp)
unblock_signal(td->sig_unsupp);
if (td->timeout) {
unblock_signal(SIGALRM);
alarm(td->timeout);
}
fprintf(stderr, "Registered handlers for all signals.\n");
return 1;
}
static inline int default_trigger(struct tdescr *td)
{
return !raise(td->sig_trig);
}
int test_init(struct tdescr *td)
{
if (td->sig_trig == sig_copyctx) {
fprintf(stdout,
"Signal %d is RESERVED, cannot be used as a trigger. Aborting\n",
sig_copyctx);
return 0;
}
/* just in case */
unblock_signal(sig_copyctx);
td->minsigstksz = getauxval(AT_MINSIGSTKSZ);
if (!td->minsigstksz)
td->minsigstksz = MINSIGSTKSZ;
fprintf(stderr, "Detected MINSTKSIGSZ:%d\n", td->minsigstksz);
if (td->feats_required || td->feats_incompatible) {
td->feats_supported = 0;
/*
* Checking for CPU required features using both the
* auxval and the arm64 MRS Emulation to read sysregs.
*/
if (getauxval(AT_HWCAP) & HWCAP_SSBS)
td->feats_supported |= FEAT_SSBS;
if (getauxval(AT_HWCAP) & HWCAP_SVE)
td->feats_supported |= FEAT_SVE;
if (getauxval(AT_HWCAP2) & HWCAP2_SME)
td->feats_supported |= FEAT_SME;
if (getauxval(AT_HWCAP2) & HWCAP2_SME_FA64)
td->feats_supported |= FEAT_SME_FA64;
if (getauxval(AT_HWCAP2) & HWCAP2_SME2)
td->feats_supported |= FEAT_SME2;
if (feats_ok(td)) {
if (td->feats_required & td->feats_supported)
fprintf(stderr,
"Required Features: [%s] supported\n",
feats_to_string(td->feats_required &
td->feats_supported));
if (!(td->feats_incompatible & td->feats_supported))
fprintf(stderr,
"Incompatible Features: [%s] absent\n",
feats_to_string(td->feats_incompatible));
} else {
if ((td->feats_required & td->feats_supported) !=
td->feats_supported)
fprintf(stderr,
"Required Features: [%s] NOT supported\n",
feats_to_string(td->feats_required &
~td->feats_supported));
if (td->feats_incompatible & td->feats_supported)
fprintf(stderr,
"Incompatible Features: [%s] supported\n",
feats_to_string(td->feats_incompatible &
~td->feats_supported));
td->result = KSFT_SKIP;
return 0;
}
}
/* Perform test specific additional initialization */
if (td->init && !td->init(td)) {
fprintf(stderr, "FAILED Testcase initialization.\n");
return 0;
}
td->initialized = 1;
fprintf(stderr, "Testcase initialized.\n");
return 1;
}
int test_setup(struct tdescr *td)
{
/* assert core invariants symptom of a rotten testcase */
assert(current);
assert(td);
assert(td->name);
assert(td->run);
/* Default result is FAIL if test setup fails */
td->result = KSFT_FAIL;
if (td->setup)
return td->setup(td);
else
return default_setup(td);
}
int test_run(struct tdescr *td)
{
if (td->trigger)
return td->trigger(td);
else if (td->sig_trig)
return default_trigger(td);
else
return td->run(td, NULL, NULL);
}
void test_result(struct tdescr *td)
{
if (td->initialized && td->result != KSFT_SKIP && td->check_result)
td->check_result(td);
default_result(td, 0);
}
void test_cleanup(struct tdescr *td)
{
if (td->cleanup)
td->cleanup(td);
}
| linux-master | tools/testing/selftests/arm64/signal/test_signals_utils.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2019 ARM Limited
*
* Place a fake sigframe on the stack including a BAD Unknown magic
* record: on sigreturn Kernel must spot this attempt and the test
* case is expected to be terminated via SEGV.
*/
#include <signal.h>
#include <ucontext.h>
#include "test_signals_utils.h"
#include "testcases.h"
struct fake_sigframe sf;
static int fake_sigreturn_bad_magic_run(struct tdescr *td,
siginfo_t *si, ucontext_t *uc)
{
struct _aarch64_ctx *shead = GET_SF_RESV_HEAD(sf), *head;
/* just to fill the ucontext_t with something real */
if (!get_current_context(td, &sf.uc, sizeof(sf.uc)))
return 1;
/* need at least 2*HDR_SZ space: KSFT_BAD_MAGIC + terminator. */
head = get_starting_head(shead, HDR_SZ * 2, GET_SF_RESV_SIZE(sf), NULL);
if (!head)
return 0;
/*
* use a well known NON existent bad magic...something
* we should pretty sure won't be ever defined in Kernel
*/
head->magic = KSFT_BAD_MAGIC;
head->size = HDR_SZ;
write_terminator_record(GET_RESV_NEXT_HEAD(head));
ASSERT_BAD_CONTEXT(&sf.uc);
fake_sigreturn(&sf, sizeof(sf), 0);
return 1;
}
struct tdescr tde = {
.name = "FAKE_SIGRETURN_BAD_MAGIC",
.descr = "Trigger a sigreturn with a sigframe with a bad magic",
.sig_ok = SIGSEGV,
.timeout = 3,
.run = fake_sigreturn_bad_magic_run,
};
| linux-master | tools/testing/selftests/arm64/signal/testcases/fake_sigreturn_bad_magic.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2019 ARM Limited
*
* Try to mangle the ucontext from inside a signal handler, mangling the
* DAIF bits in an illegal manner: this attempt must be spotted by Kernel
* and the test case is expected to be terminated via SEGV.
*
*/
#include "test_signals_utils.h"
#include "testcases.h"
static int mangle_invalid_pstate_run(struct tdescr *td, siginfo_t *si,
ucontext_t *uc)
{
ASSERT_GOOD_CONTEXT(uc);
/*
* This config should trigger a SIGSEGV by Kernel when it checks
* the sigframe consistency in valid_user_regs() routine.
*/
uc->uc_mcontext.pstate |= PSR_D_BIT | PSR_A_BIT | PSR_I_BIT | PSR_F_BIT;
return 1;
}
struct tdescr tde = {
.sanity_disabled = true,
.name = "MANGLE_PSTATE_INVALID_DAIF_BITS",
.descr = "Mangling uc_mcontext with INVALID DAIF_BITS",
.sig_trig = SIGUSR1,
.sig_ok = SIGSEGV,
.run = mangle_invalid_pstate_run,
};
| linux-master | tools/testing/selftests/arm64/signal/testcases/mangle_pstate_invalid_daif_bits.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2021 ARM Limited
*
* Verify that both the streaming SVE and ZA register context in
* signal frames is set up as expected when enabled simultaneously.
*/
#include <signal.h>
#include <ucontext.h>
#include <sys/prctl.h>
#include "test_signals_utils.h"
#include "testcases.h"
static union {
ucontext_t uc;
char buf[1024 * 128];
} context;
static unsigned int vls[SVE_VQ_MAX];
unsigned int nvls = 0;
static bool sme_get_vls(struct tdescr *td)
{
int vq, vl;
/*
* Enumerate up to SVE_VQ_MAX vector lengths
*/
for (vq = SVE_VQ_MAX; vq > 0; --vq) {
vl = prctl(PR_SME_SET_VL, vq * 16);
if (vl == -1)
return false;
vl &= PR_SME_VL_LEN_MASK;
/* Did we find the lowest supported VL? */
if (vq < sve_vq_from_vl(vl))
break;
/* Skip missing VLs */
vq = sve_vq_from_vl(vl);
vls[nvls++] = vl;
}
/* We need at least one VL */
if (nvls < 1) {
fprintf(stderr, "Only %d VL supported\n", nvls);
return false;
}
return true;
}
static void setup_regs(void)
{
/* smstart sm; real data is TODO */
asm volatile(".inst 0xd503437f" : : : );
/* smstart za; real data is TODO */
asm volatile(".inst 0xd503457f" : : : );
}
static char zeros[ZA_SIG_REGS_SIZE(SVE_VQ_MAX)];
static int do_one_sme_vl(struct tdescr *td, siginfo_t *si, ucontext_t *uc,
unsigned int vl)
{
size_t offset;
struct _aarch64_ctx *head = GET_BUF_RESV_HEAD(context);
struct _aarch64_ctx *regs;
struct sve_context *ssve;
struct za_context *za;
int ret;
fprintf(stderr, "Testing VL %d\n", vl);
ret = prctl(PR_SME_SET_VL, vl);
if (ret != vl) {
fprintf(stderr, "Failed to set VL, got %d\n", ret);
return 1;
}
/*
* Get a signal context which should have the SVE and ZA
* frames in it.
*/
setup_regs();
if (!get_current_context(td, &context.uc, sizeof(context)))
return 1;
regs = get_header(head, SVE_MAGIC, GET_BUF_RESV_SIZE(context),
&offset);
if (!regs) {
fprintf(stderr, "No SVE context\n");
return 1;
}
ssve = (struct sve_context *)regs;
if (ssve->vl != vl) {
fprintf(stderr, "Got SSVE VL %d, expected %d\n", ssve->vl, vl);
return 1;
}
if (!(ssve->flags & SVE_SIG_FLAG_SM)) {
fprintf(stderr, "SVE_SIG_FLAG_SM not set in SVE record\n");
return 1;
}
fprintf(stderr, "Got expected SSVE size %u and VL %d\n",
regs->size, ssve->vl);
regs = get_header(head, ZA_MAGIC, GET_BUF_RESV_SIZE(context),
&offset);
if (!regs) {
fprintf(stderr, "No ZA context\n");
return 1;
}
za = (struct za_context *)regs;
if (za->vl != vl) {
fprintf(stderr, "Got ZA VL %d, expected %d\n", za->vl, vl);
return 1;
}
fprintf(stderr, "Got expected ZA size %u and VL %d\n",
regs->size, za->vl);
/* We didn't load any data into ZA so it should be all zeros */
if (memcmp(zeros, (char *)za + ZA_SIG_REGS_OFFSET,
ZA_SIG_REGS_SIZE(sve_vq_from_vl(za->vl))) != 0) {
fprintf(stderr, "ZA data invalid\n");
return 1;
}
return 0;
}
static int sme_regs(struct tdescr *td, siginfo_t *si, ucontext_t *uc)
{
int i;
for (i = 0; i < nvls; i++) {
if (do_one_sme_vl(td, si, uc, vls[i]))
return 1;
}
td->pass = 1;
return 0;
}
struct tdescr tde = {
.name = "Streaming SVE registers",
.descr = "Check that we get the right Streaming SVE registers reported",
.feats_required = FEAT_SME,
.timeout = 3,
.init = sme_get_vls,
.run = sme_regs,
};
| linux-master | tools/testing/selftests/arm64/signal/testcases/ssve_za_regs.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2021 ARM Limited
*
* Verify that the ZA register context in signal frames is set up as
* expected.
*/
#include <signal.h>
#include <ucontext.h>
#include <sys/prctl.h>
#include "test_signals_utils.h"
#include "testcases.h"
static union {
ucontext_t uc;
char buf[1024 * 128];
} context;
static unsigned int vls[SVE_VQ_MAX];
unsigned int nvls = 0;
static bool sme_get_vls(struct tdescr *td)
{
int vq, vl;
/*
* Enumerate up to SME_VQ_MAX vector lengths
*/
for (vq = SVE_VQ_MAX; vq > 0; --vq) {
vl = prctl(PR_SME_SET_VL, vq * 16);
if (vl == -1)
return false;
vl &= PR_SME_VL_LEN_MASK;
/* Did we find the lowest supported VL? */
if (vq < sve_vq_from_vl(vl))
break;
/* Skip missing VLs */
vq = sve_vq_from_vl(vl);
vls[nvls++] = vl;
}
/* We need at least one VL */
if (nvls < 1) {
fprintf(stderr, "Only %d VL supported\n", nvls);
return false;
}
return true;
}
static void setup_za_regs(void)
{
/* smstart za; real data is TODO */
asm volatile(".inst 0xd503457f" : : : );
}
static char zeros[ZA_SIG_REGS_SIZE(SVE_VQ_MAX)];
static int do_one_sme_vl(struct tdescr *td, siginfo_t *si, ucontext_t *uc,
unsigned int vl)
{
size_t offset;
struct _aarch64_ctx *head = GET_BUF_RESV_HEAD(context);
struct za_context *za;
fprintf(stderr, "Testing VL %d\n", vl);
if (prctl(PR_SME_SET_VL, vl) != vl) {
fprintf(stderr, "Failed to set VL\n");
return 1;
}
/*
* Get a signal context which should have a SVE frame and registers
* in it.
*/
setup_za_regs();
if (!get_current_context(td, &context.uc, sizeof(context)))
return 1;
head = get_header(head, ZA_MAGIC, GET_BUF_RESV_SIZE(context), &offset);
if (!head) {
fprintf(stderr, "No ZA context\n");
return 1;
}
za = (struct za_context *)head;
if (za->vl != vl) {
fprintf(stderr, "Got VL %d, expected %d\n", za->vl, vl);
return 1;
}
if (head->size != ZA_SIG_CONTEXT_SIZE(sve_vq_from_vl(vl))) {
fprintf(stderr, "ZA context size %u, expected %lu\n",
head->size, ZA_SIG_CONTEXT_SIZE(sve_vq_from_vl(vl)));
return 1;
}
fprintf(stderr, "Got expected size %u and VL %d\n",
head->size, za->vl);
/* We didn't load any data into ZA so it should be all zeros */
if (memcmp(zeros, (char *)za + ZA_SIG_REGS_OFFSET,
ZA_SIG_REGS_SIZE(sve_vq_from_vl(za->vl))) != 0) {
fprintf(stderr, "ZA data invalid\n");
return 1;
}
return 0;
}
static int sme_regs(struct tdescr *td, siginfo_t *si, ucontext_t *uc)
{
int i;
for (i = 0; i < nvls; i++) {
if (do_one_sme_vl(td, si, uc, vls[i]))
return 1;
}
td->pass = 1;
return 0;
}
struct tdescr tde = {
.name = "ZA register",
.descr = "Check that we get the right ZA registers reported",
.feats_required = FEAT_SME,
.timeout = 3,
.init = sme_get_vls,
.run = sme_regs,
};
| linux-master | tools/testing/selftests/arm64/signal/testcases/za_regs.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2021 ARM Limited
*
* Check that the SME vector length reported in signal contexts is the
* expected one.
*/
#include <signal.h>
#include <ucontext.h>
#include <sys/prctl.h>
#include "test_signals_utils.h"
#include "testcases.h"
struct fake_sigframe sf;
unsigned int vl;
static bool get_sme_vl(struct tdescr *td)
{
int ret = prctl(PR_SME_GET_VL);
if (ret == -1)
return false;
vl = ret;
return true;
}
static int sme_vl(struct tdescr *td, siginfo_t *si, ucontext_t *uc)
{
size_t resv_sz, offset;
struct _aarch64_ctx *head = GET_SF_RESV_HEAD(sf);
struct za_context *za;
/* Get a signal context which should have a ZA frame in it */
if (!get_current_context(td, &sf.uc, sizeof(sf.uc)))
return 1;
resv_sz = GET_SF_RESV_SIZE(sf);
head = get_header(head, ZA_MAGIC, resv_sz, &offset);
if (!head) {
fprintf(stderr, "No ZA context\n");
return 1;
}
za = (struct za_context *)head;
if (za->vl != vl) {
fprintf(stderr, "ZA sigframe VL %u, expected %u\n",
za->vl, vl);
return 1;
} else {
fprintf(stderr, "got expected VL %u\n", vl);
}
td->pass = 1;
return 0;
}
struct tdescr tde = {
.name = "SME VL",
.descr = "Check that we get the right SME VL reported",
.feats_required = FEAT_SME,
.timeout = 3,
.init = get_sme_vl,
.run = sme_vl,
};
| linux-master | tools/testing/selftests/arm64/signal/testcases/sme_vl.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2021 ARM Limited
*
* Check that the SVE vector length reported in signal contexts is the
* expected one.
*/
#include <signal.h>
#include <ucontext.h>
#include <sys/prctl.h>
#include "test_signals_utils.h"
#include "testcases.h"
struct fake_sigframe sf;
unsigned int vl;
static bool get_sve_vl(struct tdescr *td)
{
int ret = prctl(PR_SVE_GET_VL);
if (ret == -1)
return false;
vl = ret;
return true;
}
static int sve_vl(struct tdescr *td, siginfo_t *si, ucontext_t *uc)
{
size_t resv_sz, offset;
struct _aarch64_ctx *head = GET_SF_RESV_HEAD(sf);
struct sve_context *sve;
/* Get a signal context which should have a SVE frame in it */
if (!get_current_context(td, &sf.uc, sizeof(sf.uc)))
return 1;
resv_sz = GET_SF_RESV_SIZE(sf);
head = get_header(head, SVE_MAGIC, resv_sz, &offset);
if (!head) {
fprintf(stderr, "No SVE context\n");
return 1;
}
sve = (struct sve_context *)head;
if (sve->vl != vl) {
fprintf(stderr, "sigframe VL %u, expected %u\n",
sve->vl, vl);
return 1;
} else {
fprintf(stderr, "got expected VL %u\n", vl);
}
td->pass = 1;
return 0;
}
struct tdescr tde = {
.name = "SVE VL",
.descr = "Check that we get the right SVE VL reported",
.feats_required = FEAT_SVE,
.timeout = 3,
.init = get_sve_vl,
.run = sve_vl,
};
| linux-master | tools/testing/selftests/arm64/signal/testcases/sve_vl.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2019 ARM Limited
*
* Place a fake sigframe on the stack including a bad record overflowing
* the __reserved space: on sigreturn Kernel must spot this attempt and
* the test case is expected to be terminated via SEGV.
*/
#include <signal.h>
#include <ucontext.h>
#include "test_signals_utils.h"
#include "testcases.h"
struct fake_sigframe sf;
#define MIN_SZ_ALIGN 16
static int fake_sigreturn_bad_size_run(struct tdescr *td,
siginfo_t *si, ucontext_t *uc)
{
size_t resv_sz, need_sz, offset;
struct _aarch64_ctx *shead = GET_SF_RESV_HEAD(sf), *head;
/* just to fill the ucontext_t with something real */
if (!get_current_context(td, &sf.uc, sizeof(sf.uc)))
return 1;
resv_sz = GET_SF_RESV_SIZE(sf);
/* at least HDR_SZ + bad sized esr_context needed */
need_sz = sizeof(struct esr_context) + HDR_SZ;
head = get_starting_head(shead, need_sz, resv_sz, &offset);
if (!head)
return 0;
/*
* Use an esr_context to build a fake header with a
* size greater then the free __reserved area minus HDR_SZ;
* using ESR_MAGIC here since it is not checked for size nor
* is limited to one instance.
*
* At first inject an additional normal esr_context
*/
head->magic = ESR_MAGIC;
head->size = sizeof(struct esr_context);
/* and terminate properly */
write_terminator_record(GET_RESV_NEXT_HEAD(head));
ASSERT_GOOD_CONTEXT(&sf.uc);
/*
* now mess with fake esr_context size: leaving less space than
* needed while keeping size value 16-aligned
*
* It must trigger a SEGV from Kernel on:
*
* resv_sz - offset < sizeof(*head)
*/
/* at first set the maximum good 16-aligned size */
head->size = (resv_sz - offset - need_sz + MIN_SZ_ALIGN) & ~0xfUL;
/* plus a bit more of 16-aligned sized stuff */
head->size += MIN_SZ_ALIGN;
/* and terminate properly */
write_terminator_record(GET_RESV_NEXT_HEAD(head));
ASSERT_BAD_CONTEXT(&sf.uc);
fake_sigreturn(&sf, sizeof(sf), 0);
return 1;
}
struct tdescr tde = {
.name = "FAKE_SIGRETURN_BAD_SIZE",
.descr = "Triggers a sigreturn with a overrun __reserved area",
.sig_ok = SIGSEGV,
.timeout = 3,
.run = fake_sigreturn_bad_size_run,
};
| linux-master | tools/testing/selftests/arm64/signal/testcases/fake_sigreturn_bad_size.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2022 ARM Limited
*
* Verify that the TPIDR2 register context in signal frames is set up as
* expected.
*/
#include <signal.h>
#include <ucontext.h>
#include <sys/auxv.h>
#include <sys/prctl.h>
#include <unistd.h>
#include <asm/sigcontext.h>
#include "test_signals_utils.h"
#include "testcases.h"
static union {
ucontext_t uc;
char buf[1024 * 128];
} context;
#define SYS_TPIDR2 "S3_3_C13_C0_5"
static uint64_t get_tpidr2(void)
{
uint64_t val;
asm volatile (
"mrs %0, " SYS_TPIDR2 "\n"
: "=r"(val)
:
: "cc");
return val;
}
int tpidr2_present(struct tdescr *td, siginfo_t *si, ucontext_t *uc)
{
struct _aarch64_ctx *head = GET_BUF_RESV_HEAD(context);
struct tpidr2_context *tpidr2_ctx;
size_t offset;
bool in_sigframe;
bool have_sme;
__u64 orig_tpidr2;
have_sme = getauxval(AT_HWCAP2) & HWCAP2_SME;
if (have_sme)
orig_tpidr2 = get_tpidr2();
if (!get_current_context(td, &context.uc, sizeof(context)))
return 1;
tpidr2_ctx = (struct tpidr2_context *)
get_header(head, TPIDR2_MAGIC, td->live_sz, &offset);
in_sigframe = tpidr2_ctx != NULL;
fprintf(stderr, "TPIDR2 sigframe %s on system %s SME\n",
in_sigframe ? "present" : "absent",
have_sme ? "with" : "without");
td->pass = (in_sigframe == have_sme);
/*
* Check that the value we read back was the one present at
* the time that the signal was triggered. TPIDR2 is owned by
* libc so we can't safely choose the value and it is possible
* that we may need to revisit this in future if something
* starts deciding to set a new TPIDR2 between us reading and
* the signal.
*/
if (have_sme && tpidr2_ctx) {
if (tpidr2_ctx->tpidr2 != orig_tpidr2) {
fprintf(stderr, "TPIDR2 in frame is %llx, was %llx\n",
tpidr2_ctx->tpidr2, orig_tpidr2);
td->pass = false;
}
}
return 0;
}
struct tdescr tde = {
.name = "TPIDR2",
.descr = "Validate that TPIDR2 is present as expected",
.timeout = 3,
.run = tpidr2_present,
};
| linux-master | tools/testing/selftests/arm64/signal/testcases/tpidr2_siginfo.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2021 ARM Limited
*
* Attempt to change the SVE vector length in a signal hander, this is not
* supported and is expected to segfault.
*/
#include <kselftest.h>
#include <signal.h>
#include <ucontext.h>
#include <sys/prctl.h>
#include "test_signals_utils.h"
#include "testcases.h"
struct fake_sigframe sf;
static unsigned int vls[SVE_VQ_MAX];
unsigned int nvls = 0;
static bool sve_get_vls(struct tdescr *td)
{
int vq, vl;
/*
* Enumerate up to SVE_VQ_MAX vector lengths
*/
for (vq = SVE_VQ_MAX; vq > 0; --vq) {
vl = prctl(PR_SVE_SET_VL, vq * 16);
if (vl == -1)
return false;
vl &= PR_SVE_VL_LEN_MASK;
/* Skip missing VLs */
vq = sve_vq_from_vl(vl);
vls[nvls++] = vl;
}
/* We need at least two VLs */
if (nvls < 2) {
fprintf(stderr, "Only %d VL supported\n", nvls);
td->result = KSFT_SKIP;
return false;
}
return true;
}
static int fake_sigreturn_sve_change_vl(struct tdescr *td,
siginfo_t *si, ucontext_t *uc)
{
size_t resv_sz, offset;
struct _aarch64_ctx *head = GET_SF_RESV_HEAD(sf);
struct sve_context *sve;
/* Get a signal context with a SVE frame in it */
if (!get_current_context(td, &sf.uc, sizeof(sf.uc)))
return 1;
resv_sz = GET_SF_RESV_SIZE(sf);
head = get_header(head, SVE_MAGIC, resv_sz, &offset);
if (!head) {
fprintf(stderr, "No SVE context\n");
return 1;
}
if (head->size != sizeof(struct sve_context)) {
fprintf(stderr, "SVE register state active, skipping\n");
return 1;
}
sve = (struct sve_context *)head;
/* No changes are supported; init left us at minimum VL so go to max */
fprintf(stderr, "Attempting to change VL from %d to %d\n",
sve->vl, vls[0]);
sve->vl = vls[0];
fake_sigreturn(&sf, sizeof(sf), 0);
return 1;
}
struct tdescr tde = {
.name = "FAKE_SIGRETURN_SVE_CHANGE",
.descr = "Attempt to change SVE VL",
.feats_required = FEAT_SVE,
.sig_ok = SIGSEGV,
.timeout = 3,
.init = sve_get_vls,
.run = fake_sigreturn_sve_change_vl,
};
| linux-master | tools/testing/selftests/arm64/signal/testcases/fake_sigreturn_sve_change_vl.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2019 ARM Limited
*
* Place a fake sigframe on the stack missing the mandatory FPSIMD
* record: on sigreturn Kernel must spot this attempt and the test
* case is expected to be terminated via SEGV.
*/
#include <stdio.h>
#include <signal.h>
#include <ucontext.h>
#include "test_signals_utils.h"
#include "testcases.h"
struct fake_sigframe sf;
static int fake_sigreturn_missing_fpsimd_run(struct tdescr *td,
siginfo_t *si, ucontext_t *uc)
{
size_t resv_sz, offset;
struct _aarch64_ctx *head = GET_SF_RESV_HEAD(sf);
/* just to fill the ucontext_t with something real */
if (!get_current_context(td, &sf.uc, sizeof(sf.uc)))
return 1;
resv_sz = GET_SF_RESV_SIZE(sf);
head = get_header(head, FPSIMD_MAGIC, resv_sz, &offset);
if (head && resv_sz - offset >= HDR_SZ) {
fprintf(stderr, "Mangling template header. Spare space:%zd\n",
resv_sz - offset);
/* Just overwrite fpsmid_context */
write_terminator_record(head);
ASSERT_BAD_CONTEXT(&sf.uc);
fake_sigreturn(&sf, sizeof(sf), 0);
}
return 1;
}
struct tdescr tde = {
.name = "FAKE_SIGRETURN_MISSING_FPSIMD",
.descr = "Triggers a sigreturn with a missing fpsimd_context",
.sig_ok = SIGSEGV,
.timeout = 3,
.run = fake_sigreturn_missing_fpsimd_run,
};
| linux-master | tools/testing/selftests/arm64/signal/testcases/fake_sigreturn_missing_fpsimd.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2019 ARM Limited
*
* Place a fake sigframe on the stack including an additional FPSIMD
* record: on sigreturn Kernel must spot this attempt and the test
* case is expected to be terminated via SEGV.
*/
#include <signal.h>
#include <ucontext.h>
#include "test_signals_utils.h"
#include "testcases.h"
struct fake_sigframe sf;
static int fake_sigreturn_duplicated_fpsimd_run(struct tdescr *td,
siginfo_t *si, ucontext_t *uc)
{
struct _aarch64_ctx *shead = GET_SF_RESV_HEAD(sf), *head;
/* just to fill the ucontext_t with something real */
if (!get_current_context(td, &sf.uc, sizeof(sf.uc)))
return 1;
head = get_starting_head(shead, sizeof(struct fpsimd_context) + HDR_SZ,
GET_SF_RESV_SIZE(sf), NULL);
if (!head)
return 0;
/* Add a spurious fpsimd_context */
head->magic = FPSIMD_MAGIC;
head->size = sizeof(struct fpsimd_context);
/* and terminate */
write_terminator_record(GET_RESV_NEXT_HEAD(head));
ASSERT_BAD_CONTEXT(&sf.uc);
fake_sigreturn(&sf, sizeof(sf), 0);
return 1;
}
struct tdescr tde = {
.name = "FAKE_SIGRETURN_DUPLICATED_FPSIMD",
.descr = "Triggers a sigreturn including two fpsimd_context",
.sig_ok = SIGSEGV,
.timeout = 3,
.run = fake_sigreturn_duplicated_fpsimd_run,
};
| linux-master | tools/testing/selftests/arm64/signal/testcases/fake_sigreturn_duplicated_fpsimd.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2019 ARM Limited
*
* Try to mangle the ucontext from inside a signal handler, toggling
* the mode bit to escalate exception level: this attempt must be spotted
* by Kernel and the test case is expected to be termninated via SEGV.
*/
#include "test_signals_utils.h"
#include "testcases.h"
#include "mangle_pstate_invalid_mode_template.h"
DEFINE_TESTCASE_MANGLE_PSTATE_INVALID_MODE(1t);
| linux-master | tools/testing/selftests/arm64/signal/testcases/mangle_pstate_invalid_mode_el1t.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2019 ARM Limited
*
* Try to mangle the ucontext from inside a signal handler, toggling
* the execution state bit: this attempt must be spotted by Kernel and
* the test case is expected to be terminated via SEGV.
*/
#include "test_signals_utils.h"
#include "testcases.h"
static int mangle_invalid_pstate_run(struct tdescr *td, siginfo_t *si,
ucontext_t *uc)
{
ASSERT_GOOD_CONTEXT(uc);
/* This config should trigger a SIGSEGV by Kernel */
uc->uc_mcontext.pstate ^= PSR_MODE32_BIT;
return 1;
}
struct tdescr tde = {
.sanity_disabled = true,
.name = "MANGLE_PSTATE_INVALID_STATE_TOGGLE",
.descr = "Mangling uc_mcontext with INVALID STATE_TOGGLE",
.sig_trig = SIGUSR1,
.sig_ok = SIGSEGV,
.run = mangle_invalid_pstate_run,
};
| linux-master | tools/testing/selftests/arm64/signal/testcases/mangle_pstate_invalid_compat_toggle.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2019 ARM Limited
*
* Try to mangle the ucontext from inside a signal handler, toggling
* the mode bit to escalate exception level: this attempt must be spotted
* by Kernel and the test case is expected to be termninated via SEGV.
*/
#include "test_signals_utils.h"
#include "testcases.h"
#include "mangle_pstate_invalid_mode_template.h"
DEFINE_TESTCASE_MANGLE_PSTATE_INVALID_MODE(1h);
| linux-master | tools/testing/selftests/arm64/signal/testcases/mangle_pstate_invalid_mode_el1h.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2021 ARM Limited
*
* Verify that the SVE register context in signal frames is set up as
* expected.
*/
#include <signal.h>
#include <ucontext.h>
#include <sys/prctl.h>
#include "test_signals_utils.h"
#include "testcases.h"
static union {
ucontext_t uc;
char buf[1024 * 64];
} context;
static unsigned int vls[SVE_VQ_MAX];
unsigned int nvls = 0;
static bool sve_get_vls(struct tdescr *td)
{
int vq, vl;
/*
* Enumerate up to SVE_VQ_MAX vector lengths
*/
for (vq = SVE_VQ_MAX; vq > 0; --vq) {
vl = prctl(PR_SVE_SET_VL, vq * 16);
if (vl == -1)
return false;
vl &= PR_SVE_VL_LEN_MASK;
/* Skip missing VLs */
vq = sve_vq_from_vl(vl);
vls[nvls++] = vl;
}
/* We need at least one VL */
if (nvls < 1) {
fprintf(stderr, "Only %d VL supported\n", nvls);
return false;
}
return true;
}
static void setup_sve_regs(void)
{
/* RDVL x16, #1 so we should have SVE regs; real data is TODO */
asm volatile(".inst 0x04bf5030" : : : "x16" );
}
static int do_one_sve_vl(struct tdescr *td, siginfo_t *si, ucontext_t *uc,
unsigned int vl)
{
size_t offset;
struct _aarch64_ctx *head = GET_BUF_RESV_HEAD(context);
struct sve_context *sve;
fprintf(stderr, "Testing VL %d\n", vl);
if (prctl(PR_SVE_SET_VL, vl) == -1) {
fprintf(stderr, "Failed to set VL\n");
return 1;
}
/*
* Get a signal context which should have a SVE frame and registers
* in it.
*/
setup_sve_regs();
if (!get_current_context(td, &context.uc, sizeof(context)))
return 1;
head = get_header(head, SVE_MAGIC, GET_BUF_RESV_SIZE(context),
&offset);
if (!head) {
fprintf(stderr, "No SVE context\n");
return 1;
}
sve = (struct sve_context *)head;
if (sve->vl != vl) {
fprintf(stderr, "Got VL %d, expected %d\n", sve->vl, vl);
return 1;
}
/* The actual size validation is done in get_current_context() */
fprintf(stderr, "Got expected size %u and VL %d\n",
head->size, sve->vl);
return 0;
}
static int sve_regs(struct tdescr *td, siginfo_t *si, ucontext_t *uc)
{
int i;
for (i = 0; i < nvls; i++) {
if (do_one_sve_vl(td, si, uc, vls[i]))
return 1;
}
td->pass = 1;
return 0;
}
struct tdescr tde = {
.name = "SVE registers",
.descr = "Check that we get the right SVE registers reported",
.feats_required = FEAT_SVE,
.timeout = 3,
.init = sve_get_vls,
.run = sve_regs,
};
| linux-master | tools/testing/selftests/arm64/signal/testcases/sve_regs.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2021 ARM Limited
*
* Verify that the ZA register context in signal frames is set up as
* expected.
*/
#include <signal.h>
#include <ucontext.h>
#include <sys/prctl.h>
#include "test_signals_utils.h"
#include "testcases.h"
static union {
ucontext_t uc;
char buf[1024 * 128];
} context;
static unsigned int vls[SVE_VQ_MAX];
unsigned int nvls = 0;
static bool sme_get_vls(struct tdescr *td)
{
int vq, vl;
/*
* Enumerate up to SME_VQ_MAX vector lengths
*/
for (vq = SVE_VQ_MAX; vq > 0; --vq) {
vl = prctl(PR_SME_SET_VL, vq * 16);
if (vl == -1)
return false;
vl &= PR_SME_VL_LEN_MASK;
/* Skip missing VLs */
vq = sve_vq_from_vl(vl);
vls[nvls++] = vl;
}
/* We need at least one VL */
if (nvls < 1) {
fprintf(stderr, "Only %d VL supported\n", nvls);
return false;
}
return true;
}
static int do_one_sme_vl(struct tdescr *td, siginfo_t *si, ucontext_t *uc,
unsigned int vl)
{
size_t offset;
struct _aarch64_ctx *head = GET_BUF_RESV_HEAD(context);
struct za_context *za;
fprintf(stderr, "Testing VL %d\n", vl);
if (prctl(PR_SME_SET_VL, vl) != vl) {
fprintf(stderr, "Failed to set VL\n");
return 1;
}
/*
* Get a signal context which should have a SVE frame and registers
* in it.
*/
if (!get_current_context(td, &context.uc, sizeof(context)))
return 1;
head = get_header(head, ZA_MAGIC, GET_BUF_RESV_SIZE(context), &offset);
if (!head) {
fprintf(stderr, "No ZA context\n");
return 1;
}
za = (struct za_context *)head;
if (za->vl != vl) {
fprintf(stderr, "Got VL %d, expected %d\n", za->vl, vl);
return 1;
}
if (head->size != ZA_SIG_REGS_OFFSET) {
fprintf(stderr, "Context size %u, expected %lu\n",
head->size, ZA_SIG_REGS_OFFSET);
return 1;
}
/* The actual size validation is done in get_current_context() */
fprintf(stderr, "Got expected size %u and VL %d\n",
head->size, za->vl);
return 0;
}
static int sme_regs(struct tdescr *td, siginfo_t *si, ucontext_t *uc)
{
int i;
for (i = 0; i < nvls; i++) {
if (do_one_sme_vl(td, si, uc, vls[i]))
return 1;
}
td->pass = 1;
return 0;
}
struct tdescr tde = {
.name = "ZA registers - ZA disabled",
.descr = "Check ZA context with ZA disabled",
.feats_required = FEAT_SME,
.timeout = 3,
.init = sme_get_vls,
.run = sme_regs,
};
| linux-master | tools/testing/selftests/arm64/signal/testcases/za_no_regs.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2021 ARM Limited
*
* Verify that using a streaming mode instruction without enabling it
* generates a SIGILL.
*/
#include <signal.h>
#include <ucontext.h>
#include <sys/prctl.h>
#include "test_signals_utils.h"
#include "testcases.h"
int sme_trap_no_sm_trigger(struct tdescr *td)
{
/* SMSTART ZA ; ADDHA ZA0.S, P0/M, P0/M, Z0.S */
asm volatile(".inst 0xd503457f ; .inst 0xc0900000");
return 0;
}
int sme_trap_no_sm_run(struct tdescr *td, siginfo_t *si, ucontext_t *uc)
{
return 1;
}
struct tdescr tde = {
.name = "SME trap without SM",
.descr = "Check that we get a SIGILL if we use streaming mode without enabling it",
.timeout = 3,
.feats_required = FEAT_SME, /* We need a SMSTART ZA */
.sanity_disabled = true,
.trigger = sme_trap_no_sm_trigger,
.run = sme_trap_no_sm_run,
.sig_ok = SIGILL,
};
| linux-master | tools/testing/selftests/arm64/signal/testcases/sme_trap_no_sm.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2021 ARM Limited
*
* Verify that using an instruction not supported in streaming mode
* traps when in streaming mode.
*/
#include <signal.h>
#include <ucontext.h>
#include <sys/prctl.h>
#include "test_signals_utils.h"
#include "testcases.h"
static union {
ucontext_t uc;
char buf[1024 * 128];
} context;
int zt_no_regs_run(struct tdescr *td, siginfo_t *si, ucontext_t *uc)
{
size_t offset;
struct _aarch64_ctx *head = GET_BUF_RESV_HEAD(context);
/*
* Get a signal context which should not have a ZT frame and
* registers in it.
*/
if (!get_current_context(td, &context.uc, sizeof(context)))
return 1;
head = get_header(head, ZT_MAGIC, GET_BUF_RESV_SIZE(context), &offset);
if (head) {
fprintf(stderr, "Got unexpected ZT context\n");
return 1;
}
td->pass = 1;
return 0;
}
struct tdescr tde = {
.name = "ZT register data not present",
.descr = "Validate that ZT is not present when ZA is disabled",
.feats_required = FEAT_SME2,
.timeout = 3,
.sanity_disabled = true,
.run = zt_no_regs_run,
};
| linux-master | tools/testing/selftests/arm64/signal/testcases/zt_no_regs.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2019 ARM Limited
*
* Try to mangle the ucontext from inside a signal handler, toggling
* the mode bit to escalate exception level: this attempt must be spotted
* by Kernel and the test case is expected to be termninated via SEGV.
*/
#include "test_signals_utils.h"
#include "testcases.h"
#include "mangle_pstate_invalid_mode_template.h"
DEFINE_TESTCASE_MANGLE_PSTATE_INVALID_MODE(3t);
| linux-master | tools/testing/selftests/arm64/signal/testcases/mangle_pstate_invalid_mode_el3t.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2019 ARM Limited
*
* Try to mangle the ucontext from inside a signal handler, toggling
* the mode bit to escalate exception level: this attempt must be spotted
* by Kernel and the test case is expected to be termninated via SEGV.
*/
#include "test_signals_utils.h"
#include "testcases.h"
#include "mangle_pstate_invalid_mode_template.h"
DEFINE_TESTCASE_MANGLE_PSTATE_INVALID_MODE(2h);
| linux-master | tools/testing/selftests/arm64/signal/testcases/mangle_pstate_invalid_mode_el2h.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2023 ARM Limited
*
* Verify that the TPIDR2 register context in signal frames is restored.
*/
#include <signal.h>
#include <ucontext.h>
#include <sys/auxv.h>
#include <sys/prctl.h>
#include <unistd.h>
#include <asm/sigcontext.h>
#include "test_signals_utils.h"
#include "testcases.h"
#define SYS_TPIDR2 "S3_3_C13_C0_5"
static uint64_t get_tpidr2(void)
{
uint64_t val;
asm volatile (
"mrs %0, " SYS_TPIDR2 "\n"
: "=r"(val)
:
: "cc");
return val;
}
static void set_tpidr2(uint64_t val)
{
asm volatile (
"msr " SYS_TPIDR2 ", %0\n"
:
: "r"(val)
: "cc");
}
static uint64_t initial_tpidr2;
static bool save_tpidr2(struct tdescr *td)
{
initial_tpidr2 = get_tpidr2();
fprintf(stderr, "Initial TPIDR2: %lx\n", initial_tpidr2);
return true;
}
static int modify_tpidr2(struct tdescr *td, siginfo_t *si, ucontext_t *uc)
{
uint64_t my_tpidr2 = get_tpidr2();
my_tpidr2++;
fprintf(stderr, "Setting TPIDR2 to %lx\n", my_tpidr2);
set_tpidr2(my_tpidr2);
return 0;
}
static void check_tpidr2(struct tdescr *td)
{
uint64_t tpidr2 = get_tpidr2();
td->pass = tpidr2 == initial_tpidr2;
if (td->pass)
fprintf(stderr, "TPIDR2 restored\n");
else
fprintf(stderr, "TPIDR2 was %lx but is now %lx\n",
initial_tpidr2, tpidr2);
}
struct tdescr tde = {
.name = "TPIDR2 restore",
.descr = "Validate that TPIDR2 is restored from the sigframe",
.feats_required = FEAT_SME,
.timeout = 3,
.sig_trig = SIGUSR1,
.init = save_tpidr2,
.run = modify_tpidr2,
.check_result = check_tpidr2,
};
| linux-master | tools/testing/selftests/arm64/signal/testcases/tpidr2_restore.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2019 ARM Limited
*
* Place a fake sigframe on the stack including a badly sized terminator
* record: on sigreturn Kernel must spot this attempt and the test case
* is expected to be terminated via SEGV.
*/
#include <signal.h>
#include <ucontext.h>
#include "test_signals_utils.h"
#include "testcases.h"
struct fake_sigframe sf;
static int fake_sigreturn_bad_size_for_magic0_run(struct tdescr *td,
siginfo_t *si, ucontext_t *uc)
{
struct _aarch64_ctx *shead = GET_SF_RESV_HEAD(sf), *head;
/* just to fill the ucontext_t with something real */
if (!get_current_context(td, &sf.uc, sizeof(sf.uc)))
return 1;
/* at least HDR_SZ for the badly sized terminator. */
head = get_starting_head(shead, HDR_SZ, GET_SF_RESV_SIZE(sf), NULL);
if (!head)
return 0;
head->magic = 0;
head->size = HDR_SZ;
ASSERT_BAD_CONTEXT(&sf.uc);
fake_sigreturn(&sf, sizeof(sf), 0);
return 1;
}
struct tdescr tde = {
.name = "FAKE_SIGRETURN_BAD_SIZE_FOR_TERMINATOR",
.descr = "Trigger a sigreturn using non-zero size terminator",
.sig_ok = SIGSEGV,
.timeout = 3,
.run = fake_sigreturn_bad_size_for_magic0_run,
};
| linux-master | tools/testing/selftests/arm64/signal/testcases/fake_sigreturn_bad_size_for_magic0.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2019 ARM Limited
*
* Try to mangle the ucontext from inside a signal handler, toggling
* the mode bit to escalate exception level: this attempt must be spotted
* by Kernel and the test case is expected to be termninated via SEGV.
*/
#include "test_signals_utils.h"
#include "testcases.h"
#include "mangle_pstate_invalid_mode_template.h"
DEFINE_TESTCASE_MANGLE_PSTATE_INVALID_MODE(2t);
| linux-master | tools/testing/selftests/arm64/signal/testcases/mangle_pstate_invalid_mode_el2t.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2021 ARM Limited
*
* Verify that using an instruction not supported in streaming mode
* traps when in streaming mode.
*/
#include <signal.h>
#include <ucontext.h>
#include <sys/prctl.h>
#include "test_signals_utils.h"
#include "testcases.h"
static union {
ucontext_t uc;
char buf[1024 * 128];
} context;
static void enable_za(void)
{
/* smstart za; real data is TODO */
asm volatile(".inst 0xd503457f" : : : );
}
int zt_regs_run(struct tdescr *td, siginfo_t *si, ucontext_t *uc)
{
size_t offset;
struct _aarch64_ctx *head = GET_BUF_RESV_HEAD(context);
struct zt_context *zt;
char *zeros;
/*
* Get a signal context which should have a ZT frame and registers
* in it.
*/
enable_za();
if (!get_current_context(td, &context.uc, sizeof(context)))
return 1;
head = get_header(head, ZT_MAGIC, GET_BUF_RESV_SIZE(context), &offset);
if (!head) {
fprintf(stderr, "No ZT context\n");
return 1;
}
zt = (struct zt_context *)head;
if (zt->nregs == 0) {
fprintf(stderr, "Got context with no registers\n");
return 1;
}
fprintf(stderr, "Got expected size %u for %d registers\n",
head->size, zt->nregs);
/* We didn't load any data into ZT so it should be all zeros */
zeros = malloc(ZT_SIG_REGS_SIZE(zt->nregs));
if (!zeros) {
fprintf(stderr, "Out of memory, nregs=%u\n", zt->nregs);
return 1;
}
memset(zeros, 0, ZT_SIG_REGS_SIZE(zt->nregs));
if (memcmp(zeros, (char *)zt + ZT_SIG_REGS_OFFSET,
ZT_SIG_REGS_SIZE(zt->nregs)) != 0) {
fprintf(stderr, "ZT data invalid\n");
free(zeros);
return 1;
}
free(zeros);
td->pass = 1;
return 0;
}
struct tdescr tde = {
.name = "ZT register data",
.descr = "Validate that ZT is present and has data when ZA is enabled",
.feats_required = FEAT_SME2,
.timeout = 3,
.sanity_disabled = true,
.run = zt_regs_run,
};
| linux-master | tools/testing/selftests/arm64/signal/testcases/zt_regs.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2019 ARM Limited
*
* Try to mangle the ucontext from inside a signal handler, toggling
* the mode bit to escalate exception level: this attempt must be spotted
* by Kernel and the test case is expected to be termninated via SEGV.
*/
#include "test_signals_utils.h"
#include "testcases.h"
#include "mangle_pstate_invalid_mode_template.h"
DEFINE_TESTCASE_MANGLE_PSTATE_INVALID_MODE(3h);
| linux-master | tools/testing/selftests/arm64/signal/testcases/mangle_pstate_invalid_mode_el3h.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2021 ARM Limited
*
* Verify that accessing ZA without enabling it generates a SIGILL.
*/
#include <signal.h>
#include <ucontext.h>
#include <sys/prctl.h>
#include "test_signals_utils.h"
#include "testcases.h"
int sme_trap_za_trigger(struct tdescr *td)
{
/* ZERO ZA */
asm volatile(".inst 0xc00800ff");
return 0;
}
int sme_trap_za_run(struct tdescr *td, siginfo_t *si, ucontext_t *uc)
{
return 1;
}
struct tdescr tde = {
.name = "SME ZA trap",
.descr = "Check that we get a SIGILL if we access ZA without enabling",
.timeout = 3,
.sanity_disabled = true,
.trigger = sme_trap_za_trigger,
.run = sme_trap_za_run,
.sig_ok = SIGILL,
};
| linux-master | tools/testing/selftests/arm64/signal/testcases/sme_trap_za.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2021 ARM Limited
*
* Attempt to change the streaming SVE vector length in a signal
* handler, this is not supported and is expected to segfault.
*/
#include <signal.h>
#include <ucontext.h>
#include <sys/prctl.h>
#include "test_signals_utils.h"
#include "testcases.h"
struct fake_sigframe sf;
static unsigned int vls[SVE_VQ_MAX];
unsigned int nvls = 0;
static bool sme_get_vls(struct tdescr *td)
{
int vq, vl;
/*
* Enumerate up to SVE_VQ_MAX vector lengths
*/
for (vq = SVE_VQ_MAX; vq > 0; --vq) {
vl = prctl(PR_SVE_SET_VL, vq * 16);
if (vl == -1)
return false;
vl &= PR_SME_VL_LEN_MASK;
/* Skip missing VLs */
vq = sve_vq_from_vl(vl);
vls[nvls++] = vl;
}
/* We need at least two VLs */
if (nvls < 2) {
fprintf(stderr, "Only %d VL supported\n", nvls);
return false;
}
return true;
}
static int fake_sigreturn_ssve_change_vl(struct tdescr *td,
siginfo_t *si, ucontext_t *uc)
{
size_t resv_sz, offset;
struct _aarch64_ctx *head = GET_SF_RESV_HEAD(sf);
struct sve_context *sve;
/* Get a signal context with a SME ZA frame in it */
if (!get_current_context(td, &sf.uc, sizeof(sf.uc)))
return 1;
resv_sz = GET_SF_RESV_SIZE(sf);
head = get_header(head, SVE_MAGIC, resv_sz, &offset);
if (!head) {
fprintf(stderr, "No SVE context\n");
return 1;
}
if (head->size != sizeof(struct sve_context)) {
fprintf(stderr, "Register data present, aborting\n");
return 1;
}
sve = (struct sve_context *)head;
/* No changes are supported; init left us at minimum VL so go to max */
fprintf(stderr, "Attempting to change VL from %d to %d\n",
sve->vl, vls[0]);
sve->vl = vls[0];
fake_sigreturn(&sf, sizeof(sf), 0);
return 1;
}
struct tdescr tde = {
.name = "FAKE_SIGRETURN_SSVE_CHANGE",
.descr = "Attempt to change Streaming SVE VL",
.feats_required = FEAT_SME,
.sig_ok = SIGSEGV,
.timeout = 3,
.init = sme_get_vls,
.run = fake_sigreturn_ssve_change_vl,
};
| linux-master | tools/testing/selftests/arm64/signal/testcases/fake_sigreturn_sme_change_vl.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) 2019 ARM Limited */
#include <ctype.h>
#include <string.h>
#include "testcases.h"
struct _aarch64_ctx *get_header(struct _aarch64_ctx *head, uint32_t magic,
size_t resv_sz, size_t *offset)
{
size_t offs = 0;
struct _aarch64_ctx *found = NULL;
if (!head || resv_sz < HDR_SZ)
return found;
while (offs <= resv_sz - HDR_SZ &&
head->magic != magic && head->magic) {
offs += head->size;
head = GET_RESV_NEXT_HEAD(head);
}
if (head->magic == magic) {
found = head;
if (offset)
*offset = offs;
}
return found;
}
bool validate_extra_context(struct extra_context *extra, char **err,
void **extra_data, size_t *extra_size)
{
struct _aarch64_ctx *term;
if (!extra || !err)
return false;
fprintf(stderr, "Validating EXTRA...\n");
term = GET_RESV_NEXT_HEAD(&extra->head);
if (!term || term->magic || term->size) {
*err = "Missing terminator after EXTRA context";
return false;
}
if (extra->datap & 0x0fUL)
*err = "Extra DATAP misaligned";
else if (extra->size & 0x0fUL)
*err = "Extra SIZE misaligned";
else if (extra->datap != (uint64_t)term + 0x10UL)
*err = "Extra DATAP misplaced (not contiguous)";
if (*err)
return false;
*extra_data = (void *)extra->datap;
*extra_size = extra->size;
return true;
}
bool validate_sve_context(struct sve_context *sve, char **err)
{
/* Size will be rounded up to a multiple of 16 bytes */
size_t regs_size
= ((SVE_SIG_CONTEXT_SIZE(sve_vq_from_vl(sve->vl)) + 15) / 16) * 16;
if (!sve || !err)
return false;
/* Either a bare sve_context or a sve_context followed by regs data */
if ((sve->head.size != sizeof(struct sve_context)) &&
(sve->head.size != regs_size)) {
*err = "bad size for SVE context";
return false;
}
if (!sve_vl_valid(sve->vl)) {
*err = "SVE VL invalid";
return false;
}
return true;
}
bool validate_za_context(struct za_context *za, char **err)
{
/* Size will be rounded up to a multiple of 16 bytes */
size_t regs_size
= ((ZA_SIG_CONTEXT_SIZE(sve_vq_from_vl(za->vl)) + 15) / 16) * 16;
if (!za || !err)
return false;
/* Either a bare za_context or a za_context followed by regs data */
if ((za->head.size != sizeof(struct za_context)) &&
(za->head.size != regs_size)) {
*err = "bad size for ZA context";
return false;
}
if (!sve_vl_valid(za->vl)) {
*err = "SME VL in ZA context invalid";
return false;
}
return true;
}
bool validate_zt_context(struct zt_context *zt, char **err)
{
if (!zt || !err)
return false;
/* If the context is present there should be at least one register */
if (zt->nregs == 0) {
*err = "no registers";
return false;
}
/* Size should agree with the number of registers */
if (zt->head.size != ZT_SIG_CONTEXT_SIZE(zt->nregs)) {
*err = "register count does not match size";
return false;
}
return true;
}
bool validate_reserved(ucontext_t *uc, size_t resv_sz, char **err)
{
bool terminated = false;
size_t offs = 0;
int flags = 0;
int new_flags, i;
struct extra_context *extra = NULL;
struct sve_context *sve = NULL;
struct za_context *za = NULL;
struct zt_context *zt = NULL;
struct _aarch64_ctx *head =
(struct _aarch64_ctx *)uc->uc_mcontext.__reserved;
void *extra_data = NULL;
size_t extra_sz = 0;
char magic[4];
if (!err)
return false;
/* Walk till the end terminator verifying __reserved contents */
while (head && !terminated && offs < resv_sz) {
if ((uint64_t)head & 0x0fUL) {
*err = "Misaligned HEAD";
return false;
}
new_flags = 0;
switch (head->magic) {
case 0:
if (head->size) {
*err = "Bad size for terminator";
} else if (extra_data) {
/* End of main data, walking the extra data */
head = extra_data;
resv_sz = extra_sz;
offs = 0;
extra_data = NULL;
extra_sz = 0;
continue;
} else {
terminated = true;
}
break;
case FPSIMD_MAGIC:
if (flags & FPSIMD_CTX)
*err = "Multiple FPSIMD_MAGIC";
else if (head->size !=
sizeof(struct fpsimd_context))
*err = "Bad size for fpsimd_context";
new_flags |= FPSIMD_CTX;
break;
case ESR_MAGIC:
if (head->size != sizeof(struct esr_context))
*err = "Bad size for esr_context";
break;
case TPIDR2_MAGIC:
if (head->size != sizeof(struct tpidr2_context))
*err = "Bad size for tpidr2_context";
break;
case SVE_MAGIC:
if (flags & SVE_CTX)
*err = "Multiple SVE_MAGIC";
/* Size is validated in validate_sve_context() */
sve = (struct sve_context *)head;
new_flags |= SVE_CTX;
break;
case ZA_MAGIC:
if (flags & ZA_CTX)
*err = "Multiple ZA_MAGIC";
/* Size is validated in validate_za_context() */
za = (struct za_context *)head;
new_flags |= ZA_CTX;
break;
case ZT_MAGIC:
if (flags & ZT_CTX)
*err = "Multiple ZT_MAGIC";
/* Size is validated in validate_za_context() */
zt = (struct zt_context *)head;
new_flags |= ZT_CTX;
break;
case EXTRA_MAGIC:
if (flags & EXTRA_CTX)
*err = "Multiple EXTRA_MAGIC";
else if (head->size !=
sizeof(struct extra_context))
*err = "Bad size for extra_context";
new_flags |= EXTRA_CTX;
extra = (struct extra_context *)head;
break;
case KSFT_BAD_MAGIC:
/*
* This is a BAD magic header defined
* artificially by a testcase and surely
* unknown to the Kernel parse_user_sigframe().
* It MUST cause a Kernel induced SEGV
*/
*err = "BAD MAGIC !";
break;
default:
/*
* A still unknown Magic: potentially freshly added
* to the Kernel code and still unknown to the
* tests. Magic numbers are supposed to be allocated
* as somewhat meaningful ASCII strings so try to
* print as such as well as the raw number.
*/
memcpy(magic, &head->magic, sizeof(magic));
for (i = 0; i < sizeof(magic); i++)
if (!isalnum(magic[i]))
magic[i] = '?';
fprintf(stdout,
"SKIP Unknown MAGIC: 0x%X (%c%c%c%c) - Is KSFT arm64/signal up to date ?\n",
head->magic,
magic[3], magic[2], magic[1], magic[0]);
break;
}
if (*err)
return false;
offs += head->size;
if (resv_sz < offs + sizeof(*head)) {
*err = "HEAD Overrun";
return false;
}
if (new_flags & EXTRA_CTX)
if (!validate_extra_context(extra, err,
&extra_data, &extra_sz))
return false;
if (new_flags & SVE_CTX)
if (!validate_sve_context(sve, err))
return false;
if (new_flags & ZA_CTX)
if (!validate_za_context(za, err))
return false;
if (new_flags & ZT_CTX)
if (!validate_zt_context(zt, err))
return false;
flags |= new_flags;
head = GET_RESV_NEXT_HEAD(head);
}
if (terminated && !(flags & FPSIMD_CTX)) {
*err = "Missing FPSIMD";
return false;
}
if (terminated && (flags & ZT_CTX) && !(flags & ZA_CTX)) {
*err = "ZT context but no ZA context";
return false;
}
return true;
}
/*
* This function walks through the records inside the provided reserved area
* trying to find enough space to fit @need_sz bytes: if not enough space is
* available and an extra_context record is present, it throws away the
* extra_context record.
*
* It returns a pointer to a new header where it is possible to start storing
* our need_sz bytes.
*
* @shead: points to the start of reserved area
* @need_sz: needed bytes
* @resv_sz: reserved area size in bytes
* @offset: if not null, this will be filled with the offset of the return
* head pointer from @shead
*
* @return: pointer to a new head where to start storing need_sz bytes, or
* NULL if space could not be made available.
*/
struct _aarch64_ctx *get_starting_head(struct _aarch64_ctx *shead,
size_t need_sz, size_t resv_sz,
size_t *offset)
{
size_t offs = 0;
struct _aarch64_ctx *head;
head = get_terminator(shead, resv_sz, &offs);
/* not found a terminator...no need to update offset if any */
if (!head)
return head;
if (resv_sz - offs < need_sz) {
fprintf(stderr, "Low on space:%zd. Discarding extra_context.\n",
resv_sz - offs);
head = get_header(shead, EXTRA_MAGIC, resv_sz, &offs);
if (!head || resv_sz - offs < need_sz) {
fprintf(stderr,
"Failed to reclaim space on sigframe.\n");
return NULL;
}
}
fprintf(stderr, "Available space:%zd\n", resv_sz - offs);
if (offset)
*offset = offs;
return head;
}
| linux-master | tools/testing/selftests/arm64/signal/testcases/testcases.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2019 ARM Limited
*
* Place a fake sigframe on the stack at a misaligned SP: on sigreturn
* Kernel must spot this attempt and the test case is expected to be
* terminated via SEGV.
*/
#include <signal.h>
#include <ucontext.h>
#include "test_signals_utils.h"
#include "testcases.h"
struct fake_sigframe sf;
static int fake_sigreturn_misaligned_run(struct tdescr *td,
siginfo_t *si, ucontext_t *uc)
{
/* just to fill the ucontext_t with something real */
if (!get_current_context(td, &sf.uc, sizeof(sf.uc)))
return 1;
/* Forcing sigframe on misaligned SP (16 + 3) */
fake_sigreturn(&sf, sizeof(sf), 3);
return 1;
}
struct tdescr tde = {
.name = "FAKE_SIGRETURN_MISALIGNED_SP",
.descr = "Triggers a sigreturn with a misaligned sigframe",
.sig_ok = SIGSEGV,
.timeout = 3,
.run = fake_sigreturn_misaligned_run,
};
| linux-master | tools/testing/selftests/arm64/signal/testcases/fake_sigreturn_misaligned_sp.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2021 ARM Limited
*
* Verify that the streaming SVE register context in signal frames is
* set up as expected.
*/
#include <signal.h>
#include <ucontext.h>
#include <sys/prctl.h>
#include "test_signals_utils.h"
#include "testcases.h"
static union {
ucontext_t uc;
char buf[1024 * 64];
} context;
static unsigned int vls[SVE_VQ_MAX];
unsigned int nvls = 0;
static bool sme_get_vls(struct tdescr *td)
{
int vq, vl;
/*
* Enumerate up to SVE_VQ_MAX vector lengths
*/
for (vq = SVE_VQ_MAX; vq > 0; --vq) {
vl = prctl(PR_SME_SET_VL, vq * 16);
if (vl == -1)
return false;
vl &= PR_SME_VL_LEN_MASK;
/* Did we find the lowest supported VL? */
if (vq < sve_vq_from_vl(vl))
break;
/* Skip missing VLs */
vq = sve_vq_from_vl(vl);
vls[nvls++] = vl;
}
/* We need at least one VL */
if (nvls < 1) {
fprintf(stderr, "Only %d VL supported\n", nvls);
return false;
}
return true;
}
static void setup_ssve_regs(void)
{
/* smstart sm; real data is TODO */
asm volatile(".inst 0xd503437f" : : : );
}
static int do_one_sme_vl(struct tdescr *td, siginfo_t *si, ucontext_t *uc,
unsigned int vl)
{
size_t offset;
struct _aarch64_ctx *head = GET_BUF_RESV_HEAD(context);
struct sve_context *ssve;
int ret;
fprintf(stderr, "Testing VL %d\n", vl);
ret = prctl(PR_SME_SET_VL, vl);
if (ret != vl) {
fprintf(stderr, "Failed to set VL, got %d\n", ret);
return 1;
}
/*
* Get a signal context which should have a SVE frame and registers
* in it.
*/
setup_ssve_regs();
if (!get_current_context(td, &context.uc, sizeof(context)))
return 1;
head = get_header(head, SVE_MAGIC, GET_BUF_RESV_SIZE(context),
&offset);
if (!head) {
fprintf(stderr, "No SVE context\n");
return 1;
}
ssve = (struct sve_context *)head;
if (ssve->vl != vl) {
fprintf(stderr, "Got VL %d, expected %d\n", ssve->vl, vl);
return 1;
}
if (!(ssve->flags & SVE_SIG_FLAG_SM)) {
fprintf(stderr, "SVE_SIG_FLAG_SM not set in SVE record\n");
return 1;
}
/* The actual size validation is done in get_current_context() */
fprintf(stderr, "Got expected size %u and VL %d\n",
head->size, ssve->vl);
return 0;
}
static int sme_regs(struct tdescr *td, siginfo_t *si, ucontext_t *uc)
{
int i;
for (i = 0; i < nvls; i++) {
if (do_one_sme_vl(td, si, uc, vls[i]))
return 1;
}
td->pass = 1;
return 0;
}
struct tdescr tde = {
.name = "Streaming SVE registers",
.descr = "Check that we get the right Streaming SVE registers reported",
.feats_required = FEAT_SME,
.timeout = 3,
.init = sme_get_vls,
.run = sme_regs,
};
| linux-master | tools/testing/selftests/arm64/signal/testcases/ssve_regs.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2021 ARM Limited
*
* Verify that using an instruction not supported in streaming mode
* traps when in streaming mode.
*/
#include <signal.h>
#include <ucontext.h>
#include <sys/prctl.h>
#include "test_signals_utils.h"
#include "testcases.h"
int sme_trap_non_streaming_trigger(struct tdescr *td)
{
/*
* The framework will handle SIGILL so we need to exit SM to
* stop any other code triggering a further SIGILL down the
* line from using a streaming-illegal instruction.
*/
asm volatile(".inst 0xd503437f; /* SMSTART ZA */ \
cnt v0.16b, v0.16b; \
.inst 0xd503447f /* SMSTOP ZA */");
return 0;
}
int sme_trap_non_streaming_run(struct tdescr *td, siginfo_t *si, ucontext_t *uc)
{
return 1;
}
struct tdescr tde = {
.name = "SME SM trap unsupported instruction",
.descr = "Check that we get a SIGILL if we use an unsupported instruction in streaming mode",
.feats_required = FEAT_SME,
.feats_incompatible = FEAT_SME_FA64,
.timeout = 3,
.sanity_disabled = true,
.trigger = sme_trap_non_streaming_trigger,
.run = sme_trap_non_streaming_run,
.sig_ok = SIGILL,
};
| linux-master | tools/testing/selftests/arm64/signal/testcases/sme_trap_non_streaming.c |
// SPDX-License-Identifier: GPL-2.0
#define _GNU_SOURCE
#include <errno.h>
#include <fcntl.h>
#include <sched.h>
#include <stdbool.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <asm/ioctls.h>
#include <sys/mount.h>
#include <sys/wait.h>
#include "../kselftest.h"
static bool terminal_dup2(int duplicate, int original)
{
int ret;
ret = dup2(duplicate, original);
if (ret < 0)
return false;
return true;
}
static int terminal_set_stdfds(int fd)
{
int i;
if (fd < 0)
return 0;
for (i = 0; i < 3; i++)
if (!terminal_dup2(fd, (int[]){STDIN_FILENO, STDOUT_FILENO,
STDERR_FILENO}[i]))
return -1;
return 0;
}
static int login_pty(int fd)
{
int ret;
setsid();
ret = ioctl(fd, TIOCSCTTY, NULL);
if (ret < 0)
return -1;
ret = terminal_set_stdfds(fd);
if (ret < 0)
return -1;
if (fd > STDERR_FILENO)
close(fd);
return 0;
}
static int wait_for_pid(pid_t pid)
{
int status, ret;
again:
ret = waitpid(pid, &status, 0);
if (ret == -1) {
if (errno == EINTR)
goto again;
return -1;
}
if (ret != pid)
goto again;
if (!WIFEXITED(status) || WEXITSTATUS(status) != 0)
return -1;
return 0;
}
static int resolve_procfd_symlink(int fd, char *buf, size_t buflen)
{
int ret;
char procfd[4096];
ret = snprintf(procfd, 4096, "/proc/self/fd/%d", fd);
if (ret < 0 || ret >= 4096)
return -1;
ret = readlink(procfd, buf, buflen);
if (ret < 0 || (size_t)ret >= buflen)
return -1;
buf[ret] = '\0';
return 0;
}
static int do_tiocgptpeer(char *ptmx, char *expected_procfd_contents)
{
int ret;
int master = -1, slave = -1, fret = -1;
master = open(ptmx, O_RDWR | O_NOCTTY | O_CLOEXEC);
if (master < 0) {
fprintf(stderr, "Failed to open \"%s\": %s\n", ptmx,
strerror(errno));
return -1;
}
/*
* grantpt() makes assumptions about /dev/pts/ so ignore it. It's also
* not really needed.
*/
ret = unlockpt(master);
if (ret < 0) {
fprintf(stderr, "Failed to unlock terminal\n");
goto do_cleanup;
}
#ifdef TIOCGPTPEER
slave = ioctl(master, TIOCGPTPEER, O_RDWR | O_NOCTTY | O_CLOEXEC);
#endif
if (slave < 0) {
if (errno == EINVAL) {
fprintf(stderr, "TIOCGPTPEER is not supported. "
"Skipping test.\n");
fret = KSFT_SKIP;
} else {
fprintf(stderr,
"Failed to perform TIOCGPTPEER ioctl\n");
fret = EXIT_FAILURE;
}
goto do_cleanup;
}
pid_t pid = fork();
if (pid < 0)
goto do_cleanup;
if (pid == 0) {
char buf[4096];
ret = login_pty(slave);
if (ret < 0) {
fprintf(stderr, "Failed to setup terminal\n");
_exit(EXIT_FAILURE);
}
ret = resolve_procfd_symlink(STDIN_FILENO, buf, sizeof(buf));
if (ret < 0) {
fprintf(stderr, "Failed to retrieve pathname of pts "
"slave file descriptor\n");
_exit(EXIT_FAILURE);
}
if (strncmp(expected_procfd_contents, buf,
strlen(expected_procfd_contents)) != 0) {
fprintf(stderr, "Received invalid contents for "
"\"/proc/<pid>/fd/%d\" symlink: %s\n",
STDIN_FILENO, buf);
_exit(-1);
}
fprintf(stderr, "Contents of \"/proc/<pid>/fd/%d\" "
"symlink are valid: %s\n", STDIN_FILENO, buf);
_exit(EXIT_SUCCESS);
}
ret = wait_for_pid(pid);
if (ret < 0)
goto do_cleanup;
fret = EXIT_SUCCESS;
do_cleanup:
if (master >= 0)
close(master);
if (slave >= 0)
close(slave);
return fret;
}
static int verify_non_standard_devpts_mount(void)
{
char *mntpoint;
int ret = -1;
char devpts[] = P_tmpdir "/devpts_fs_XXXXXX";
char ptmx[] = P_tmpdir "/devpts_fs_XXXXXX/ptmx";
ret = umount("/dev/pts");
if (ret < 0) {
fprintf(stderr, "Failed to unmount \"/dev/pts\": %s\n",
strerror(errno));
return -1;
}
(void)umount("/dev/ptmx");
mntpoint = mkdtemp(devpts);
if (!mntpoint) {
fprintf(stderr, "Failed to create temporary mountpoint: %s\n",
strerror(errno));
return -1;
}
ret = mount("devpts", mntpoint, "devpts", MS_NOSUID | MS_NOEXEC,
"newinstance,ptmxmode=0666,mode=0620,gid=5");
if (ret < 0) {
fprintf(stderr, "Failed to mount devpts fs to \"%s\" in new "
"mount namespace: %s\n", mntpoint,
strerror(errno));
unlink(mntpoint);
return -1;
}
ret = snprintf(ptmx, sizeof(ptmx), "%s/ptmx", devpts);
if (ret < 0 || (size_t)ret >= sizeof(ptmx)) {
unlink(mntpoint);
return -1;
}
ret = do_tiocgptpeer(ptmx, mntpoint);
unlink(mntpoint);
if (ret < 0)
return -1;
return 0;
}
static int verify_ptmx_bind_mount(void)
{
int ret;
ret = mount("/dev/pts/ptmx", "/dev/ptmx", NULL, MS_BIND, NULL);
if (ret < 0) {
fprintf(stderr, "Failed to bind mount \"/dev/pts/ptmx\" to "
"\"/dev/ptmx\" mount namespace\n");
return -1;
}
ret = do_tiocgptpeer("/dev/ptmx", "/dev/pts/");
if (ret < 0)
return -1;
return 0;
}
static int verify_invalid_ptmx_bind_mount(void)
{
int ret;
char mntpoint_fd;
char ptmx[] = P_tmpdir "/devpts_ptmx_XXXXXX";
mntpoint_fd = mkstemp(ptmx);
if (mntpoint_fd < 0) {
fprintf(stderr, "Failed to create temporary directory: %s\n",
strerror(errno));
return -1;
}
ret = mount("/dev/pts/ptmx", ptmx, NULL, MS_BIND, NULL);
close(mntpoint_fd);
if (ret < 0) {
fprintf(stderr, "Failed to bind mount \"/dev/pts/ptmx\" to "
"\"%s\" mount namespace\n", ptmx);
return -1;
}
ret = do_tiocgptpeer(ptmx, "/dev/pts/");
if (ret == 0)
return -1;
return 0;
}
int main(int argc, char *argv[])
{
int ret;
if (!isatty(STDIN_FILENO)) {
fprintf(stderr, "Standard input file descriptor is not attached "
"to a terminal. Skipping test\n");
exit(KSFT_SKIP);
}
ret = unshare(CLONE_NEWNS);
if (ret < 0) {
fprintf(stderr, "Failed to unshare mount namespace\n");
exit(EXIT_FAILURE);
}
ret = mount("", "/", NULL, MS_PRIVATE | MS_REC, 0);
if (ret < 0) {
fprintf(stderr, "Failed to make \"/\" MS_PRIVATE in new mount "
"namespace\n");
exit(EXIT_FAILURE);
}
ret = verify_ptmx_bind_mount();
if (ret < 0)
exit(EXIT_FAILURE);
ret = verify_invalid_ptmx_bind_mount();
if (ret < 0)
exit(EXIT_FAILURE);
ret = verify_non_standard_devpts_mount();
if (ret < 0)
exit(EXIT_FAILURE);
exit(EXIT_SUCCESS);
}
| linux-master | tools/testing/selftests/filesystems/devpts_pts.c |
// SPDX-License-Identifier: GPL-2.0
#define _GNU_SOURCE /* needed to get the defines */
#include <fcntl.h> /* in glibc 2.2 this has the needed
values defined */
#include <signal.h>
#include <stdio.h>
#include <unistd.h>
static volatile int event_fd;
static void handler(int sig, siginfo_t *si, void *data)
{
event_fd = si->si_fd;
}
int main(void)
{
struct sigaction act;
int fd;
act.sa_sigaction = handler;
sigemptyset(&act.sa_mask);
act.sa_flags = SA_SIGINFO;
sigaction(SIGRTMIN + 1, &act, NULL);
fd = open(".", O_RDONLY);
fcntl(fd, F_SETSIG, SIGRTMIN + 1);
fcntl(fd, F_NOTIFY, DN_MODIFY|DN_CREATE|DN_MULTISHOT);
/* we will now be notified if any of the files
in "." is modified or new files are created */
while (1) {
pause();
printf("Got event on fd=%d\n", event_fd);
}
}
| linux-master | tools/testing/selftests/filesystems/dnotify_test.c |
// SPDX-License-Identifier: GPL-2.0
#define _GNU_SOURCE
#include <asm/unistd.h>
#include <linux/time_types.h>
#include <poll.h>
#include <unistd.h>
#include <assert.h>
#include <signal.h>
#include <pthread.h>
#include <sys/epoll.h>
#include <sys/socket.h>
#include <sys/eventfd.h>
#include "../../kselftest_harness.h"
struct epoll_mtcontext
{
int efd[3];
int sfd[4];
volatile int count;
pthread_t main;
pthread_t waiter;
};
#ifndef __NR_epoll_pwait2
#define __NR_epoll_pwait2 -1
#endif
static inline int sys_epoll_pwait2(int fd, struct epoll_event *events,
int maxevents,
const struct __kernel_timespec *timeout,
const sigset_t *sigset, size_t sigsetsize)
{
return syscall(__NR_epoll_pwait2, fd, events, maxevents, timeout,
sigset, sigsetsize);
}
static void signal_handler(int signum)
{
}
static void kill_timeout(struct epoll_mtcontext *ctx)
{
usleep(1000000);
pthread_kill(ctx->main, SIGUSR1);
pthread_kill(ctx->waiter, SIGUSR1);
}
static void *waiter_entry1a(void *data)
{
struct epoll_event e;
struct epoll_mtcontext *ctx = data;
if (epoll_wait(ctx->efd[0], &e, 1, -1) > 0)
__sync_fetch_and_add(&ctx->count, 1);
return NULL;
}
static void *waiter_entry1ap(void *data)
{
struct pollfd pfd;
struct epoll_event e;
struct epoll_mtcontext *ctx = data;
pfd.fd = ctx->efd[0];
pfd.events = POLLIN;
if (poll(&pfd, 1, -1) > 0) {
if (epoll_wait(ctx->efd[0], &e, 1, 0) > 0)
__sync_fetch_and_add(&ctx->count, 1);
}
return NULL;
}
static void *waiter_entry1o(void *data)
{
struct epoll_event e;
struct epoll_mtcontext *ctx = data;
if (epoll_wait(ctx->efd[0], &e, 1, -1) > 0)
__sync_fetch_and_or(&ctx->count, 1);
return NULL;
}
static void *waiter_entry1op(void *data)
{
struct pollfd pfd;
struct epoll_event e;
struct epoll_mtcontext *ctx = data;
pfd.fd = ctx->efd[0];
pfd.events = POLLIN;
if (poll(&pfd, 1, -1) > 0) {
if (epoll_wait(ctx->efd[0], &e, 1, 0) > 0)
__sync_fetch_and_or(&ctx->count, 1);
}
return NULL;
}
static void *waiter_entry2a(void *data)
{
struct epoll_event events[2];
struct epoll_mtcontext *ctx = data;
if (epoll_wait(ctx->efd[0], events, 2, -1) > 0)
__sync_fetch_and_add(&ctx->count, 1);
return NULL;
}
static void *waiter_entry2ap(void *data)
{
struct pollfd pfd;
struct epoll_event events[2];
struct epoll_mtcontext *ctx = data;
pfd.fd = ctx->efd[0];
pfd.events = POLLIN;
if (poll(&pfd, 1, -1) > 0) {
if (epoll_wait(ctx->efd[0], events, 2, 0) > 0)
__sync_fetch_and_add(&ctx->count, 1);
}
return NULL;
}
static void *emitter_entry1(void *data)
{
struct epoll_mtcontext *ctx = data;
usleep(100000);
write(ctx->sfd[1], "w", 1);
kill_timeout(ctx);
return NULL;
}
static void *emitter_entry2(void *data)
{
struct epoll_mtcontext *ctx = data;
usleep(100000);
write(ctx->sfd[1], "w", 1);
write(ctx->sfd[3], "w", 1);
kill_timeout(ctx);
return NULL;
}
/*
* t0
* | (ew)
* e0
* | (lt)
* s0
*/
TEST(epoll1)
{
int efd;
int sfd[2];
struct epoll_event e;
ASSERT_EQ(socketpair(AF_UNIX, SOCK_STREAM, 0, sfd), 0);
efd = epoll_create(1);
ASSERT_GE(efd, 0);
e.events = EPOLLIN;
ASSERT_EQ(epoll_ctl(efd, EPOLL_CTL_ADD, sfd[0], &e), 0);
ASSERT_EQ(write(sfd[1], "w", 1), 1);
EXPECT_EQ(epoll_wait(efd, &e, 1, 0), 1);
EXPECT_EQ(epoll_wait(efd, &e, 1, 0), 1);
close(efd);
close(sfd[0]);
close(sfd[1]);
}
/*
* t0
* | (ew)
* e0
* | (et)
* s0
*/
TEST(epoll2)
{
int efd;
int sfd[2];
struct epoll_event e;
ASSERT_EQ(socketpair(AF_UNIX, SOCK_STREAM, 0, sfd), 0);
efd = epoll_create(1);
ASSERT_GE(efd, 0);
e.events = EPOLLIN | EPOLLET;
ASSERT_EQ(epoll_ctl(efd, EPOLL_CTL_ADD, sfd[0], &e), 0);
ASSERT_EQ(write(sfd[1], "w", 1), 1);
EXPECT_EQ(epoll_wait(efd, &e, 1, 0), 1);
EXPECT_EQ(epoll_wait(efd, &e, 1, 0), 0);
close(efd);
close(sfd[0]);
close(sfd[1]);
}
/*
* t0
* | (ew)
* e0
* (lt) / \ (lt)
* s0 s2
*/
TEST(epoll3)
{
int efd;
int sfd[4];
struct epoll_event events[2];
ASSERT_EQ(socketpair(AF_UNIX, SOCK_STREAM, 0, &sfd[0]), 0);
ASSERT_EQ(socketpair(AF_UNIX, SOCK_STREAM, 0, &sfd[2]), 0);
efd = epoll_create(1);
ASSERT_GE(efd, 0);
events[0].events = EPOLLIN;
ASSERT_EQ(epoll_ctl(efd, EPOLL_CTL_ADD, sfd[0], events), 0);
events[0].events = EPOLLIN;
ASSERT_EQ(epoll_ctl(efd, EPOLL_CTL_ADD, sfd[2], events), 0);
ASSERT_EQ(write(sfd[1], "w", 1), 1);
ASSERT_EQ(write(sfd[3], "w", 1), 1);
EXPECT_EQ(epoll_wait(efd, events, 2, 0), 2);
EXPECT_EQ(epoll_wait(efd, events, 2, 0), 2);
close(efd);
close(sfd[0]);
close(sfd[1]);
close(sfd[2]);
close(sfd[3]);
}
/*
* t0
* | (ew)
* e0
* (et) / \ (et)
* s0 s2
*/
TEST(epoll4)
{
int efd;
int sfd[4];
struct epoll_event events[2];
ASSERT_EQ(socketpair(AF_UNIX, SOCK_STREAM, 0, &sfd[0]), 0);
ASSERT_EQ(socketpair(AF_UNIX, SOCK_STREAM, 0, &sfd[2]), 0);
efd = epoll_create(1);
ASSERT_GE(efd, 0);
events[0].events = EPOLLIN | EPOLLET;
ASSERT_EQ(epoll_ctl(efd, EPOLL_CTL_ADD, sfd[0], events), 0);
events[0].events = EPOLLIN | EPOLLET;
ASSERT_EQ(epoll_ctl(efd, EPOLL_CTL_ADD, sfd[2], events), 0);
ASSERT_EQ(write(sfd[1], "w", 1), 1);
ASSERT_EQ(write(sfd[3], "w", 1), 1);
EXPECT_EQ(epoll_wait(efd, events, 2, 0), 2);
EXPECT_EQ(epoll_wait(efd, events, 2, 0), 0);
close(efd);
close(sfd[0]);
close(sfd[1]);
close(sfd[2]);
close(sfd[3]);
}
/*
* t0
* | (p)
* e0
* | (lt)
* s0
*/
TEST(epoll5)
{
int efd;
int sfd[2];
struct pollfd pfd;
struct epoll_event e;
ASSERT_EQ(socketpair(AF_UNIX, SOCK_STREAM, 0, &sfd[0]), 0);
efd = epoll_create(1);
ASSERT_GE(efd, 0);
e.events = EPOLLIN;
ASSERT_EQ(epoll_ctl(efd, EPOLL_CTL_ADD, sfd[0], &e), 0);
ASSERT_EQ(write(sfd[1], "w", 1), 1);
pfd.fd = efd;
pfd.events = POLLIN;
ASSERT_EQ(poll(&pfd, 1, 0), 1);
ASSERT_EQ(epoll_wait(efd, &e, 1, 0), 1);
pfd.fd = efd;
pfd.events = POLLIN;
ASSERT_EQ(poll(&pfd, 1, 0), 1);
ASSERT_EQ(epoll_wait(efd, &e, 1, 0), 1);
close(efd);
close(sfd[0]);
close(sfd[1]);
}
/*
* t0
* | (p)
* e0
* | (et)
* s0
*/
TEST(epoll6)
{
int efd;
int sfd[2];
struct pollfd pfd;
struct epoll_event e;
ASSERT_EQ(socketpair(AF_UNIX, SOCK_STREAM, 0, &sfd[0]), 0);
efd = epoll_create(1);
ASSERT_GE(efd, 0);
e.events = EPOLLIN | EPOLLET;
ASSERT_EQ(epoll_ctl(efd, EPOLL_CTL_ADD, sfd[0], &e), 0);
ASSERT_EQ(write(sfd[1], "w", 1), 1);
pfd.fd = efd;
pfd.events = POLLIN;
ASSERT_EQ(poll(&pfd, 1, 0), 1);
ASSERT_EQ(epoll_wait(efd, &e, 1, 0), 1);
pfd.fd = efd;
pfd.events = POLLIN;
ASSERT_EQ(poll(&pfd, 1, 0), 0);
ASSERT_EQ(epoll_wait(efd, &e, 1, 0), 0);
close(efd);
close(sfd[0]);
close(sfd[1]);
}
/*
* t0
* | (p)
* e0
* (lt) / \ (lt)
* s0 s2
*/
TEST(epoll7)
{
int efd;
int sfd[4];
struct pollfd pfd;
struct epoll_event events[2];
ASSERT_EQ(socketpair(AF_UNIX, SOCK_STREAM, 0, &sfd[0]), 0);
ASSERT_EQ(socketpair(AF_UNIX, SOCK_STREAM, 0, &sfd[2]), 0);
efd = epoll_create(1);
ASSERT_GE(efd, 0);
events[0].events = EPOLLIN;
ASSERT_EQ(epoll_ctl(efd, EPOLL_CTL_ADD, sfd[0], events), 0);
events[0].events = EPOLLIN;
ASSERT_EQ(epoll_ctl(efd, EPOLL_CTL_ADD, sfd[2], events), 0);
ASSERT_EQ(write(sfd[1], "w", 1), 1);
ASSERT_EQ(write(sfd[3], "w", 1), 1);
pfd.fd = efd;
pfd.events = POLLIN;
EXPECT_EQ(poll(&pfd, 1, 0), 1);
EXPECT_EQ(epoll_wait(efd, events, 2, 0), 2);
pfd.fd = efd;
pfd.events = POLLIN;
EXPECT_EQ(poll(&pfd, 1, 0), 1);
EXPECT_EQ(epoll_wait(efd, events, 2, 0), 2);
close(efd);
close(sfd[0]);
close(sfd[1]);
close(sfd[2]);
close(sfd[3]);
}
/*
* t0
* | (p)
* e0
* (et) / \ (et)
* s0 s2
*/
TEST(epoll8)
{
int efd;
int sfd[4];
struct pollfd pfd;
struct epoll_event events[2];
ASSERT_EQ(socketpair(AF_UNIX, SOCK_STREAM, 0, &sfd[0]), 0);
ASSERT_EQ(socketpair(AF_UNIX, SOCK_STREAM, 0, &sfd[2]), 0);
efd = epoll_create(1);
ASSERT_GE(efd, 0);
events[0].events = EPOLLIN | EPOLLET;
ASSERT_EQ(epoll_ctl(efd, EPOLL_CTL_ADD, sfd[0], events), 0);
events[0].events = EPOLLIN | EPOLLET;
ASSERT_EQ(epoll_ctl(efd, EPOLL_CTL_ADD, sfd[2], events), 0);
ASSERT_EQ(write(sfd[1], "w", 1), 1);
ASSERT_EQ(write(sfd[3], "w", 1), 1);
pfd.fd = efd;
pfd.events = POLLIN;
EXPECT_EQ(poll(&pfd, 1, 0), 1);
EXPECT_EQ(epoll_wait(efd, events, 2, 0), 2);
pfd.fd = efd;
pfd.events = POLLIN;
EXPECT_EQ(poll(&pfd, 1, 0), 0);
EXPECT_EQ(epoll_wait(efd, events, 2, 0), 0);
close(efd);
close(sfd[0]);
close(sfd[1]);
close(sfd[2]);
close(sfd[3]);
}
/*
* t0 t1
* (ew) \ / (ew)
* e0
* | (lt)
* s0
*/
TEST(epoll9)
{
pthread_t emitter;
struct epoll_event e;
struct epoll_mtcontext ctx = { 0 };
signal(SIGUSR1, signal_handler);
ASSERT_EQ(socketpair(AF_UNIX, SOCK_STREAM, 0, ctx.sfd), 0);
ctx.efd[0] = epoll_create(1);
ASSERT_GE(ctx.efd[0], 0);
e.events = EPOLLIN;
ASSERT_EQ(epoll_ctl(ctx.efd[0], EPOLL_CTL_ADD, ctx.sfd[0], &e), 0);
ctx.main = pthread_self();
ASSERT_EQ(pthread_create(&ctx.waiter, NULL, waiter_entry1a, &ctx), 0);
ASSERT_EQ(pthread_create(&emitter, NULL, emitter_entry1, &ctx), 0);
if (epoll_wait(ctx.efd[0], &e, 1, -1) > 0)
__sync_fetch_and_add(&ctx.count, 1);
ASSERT_EQ(pthread_join(ctx.waiter, NULL), 0);
EXPECT_EQ(ctx.count, 2);
if (pthread_tryjoin_np(emitter, NULL) < 0) {
pthread_kill(emitter, SIGUSR1);
pthread_join(emitter, NULL);
}
close(ctx.efd[0]);
close(ctx.sfd[0]);
close(ctx.sfd[1]);
}
/*
* t0 t1
* (ew) \ / (ew)
* e0
* | (et)
* s0
*/
TEST(epoll10)
{
pthread_t emitter;
struct epoll_event e;
struct epoll_mtcontext ctx = { 0 };
signal(SIGUSR1, signal_handler);
ASSERT_EQ(socketpair(AF_UNIX, SOCK_STREAM, 0, ctx.sfd), 0);
ctx.efd[0] = epoll_create(1);
ASSERT_GE(ctx.efd[0], 0);
e.events = EPOLLIN | EPOLLET;
ASSERT_EQ(epoll_ctl(ctx.efd[0], EPOLL_CTL_ADD, ctx.sfd[0], &e), 0);
ctx.main = pthread_self();
ASSERT_EQ(pthread_create(&ctx.waiter, NULL, waiter_entry1a, &ctx), 0);
ASSERT_EQ(pthread_create(&emitter, NULL, emitter_entry1, &ctx), 0);
if (epoll_wait(ctx.efd[0], &e, 1, -1) > 0)
__sync_fetch_and_add(&ctx.count, 1);
ASSERT_EQ(pthread_join(ctx.waiter, NULL), 0);
EXPECT_EQ(ctx.count, 1);
if (pthread_tryjoin_np(emitter, NULL) < 0) {
pthread_kill(emitter, SIGUSR1);
pthread_join(emitter, NULL);
}
close(ctx.efd[0]);
close(ctx.sfd[0]);
close(ctx.sfd[1]);
}
/*
* t0 t1
* (ew) \ / (ew)
* e0
* (lt) / \ (lt)
* s0 s2
*/
TEST(epoll11)
{
pthread_t emitter;
struct epoll_event events[2];
struct epoll_mtcontext ctx = { 0 };
signal(SIGUSR1, signal_handler);
ASSERT_EQ(socketpair(AF_UNIX, SOCK_STREAM, 0, &ctx.sfd[0]), 0);
ASSERT_EQ(socketpair(AF_UNIX, SOCK_STREAM, 0, &ctx.sfd[2]), 0);
ctx.efd[0] = epoll_create(1);
ASSERT_GE(ctx.efd[0], 0);
events[0].events = EPOLLIN;
ASSERT_EQ(epoll_ctl(ctx.efd[0], EPOLL_CTL_ADD, ctx.sfd[0], events), 0);
events[0].events = EPOLLIN;
ASSERT_EQ(epoll_ctl(ctx.efd[0], EPOLL_CTL_ADD, ctx.sfd[2], events), 0);
ctx.main = pthread_self();
ASSERT_EQ(pthread_create(&ctx.waiter, NULL, waiter_entry2a, &ctx), 0);
ASSERT_EQ(pthread_create(&emitter, NULL, emitter_entry2, &ctx), 0);
if (epoll_wait(ctx.efd[0], events, 2, -1) > 0)
__sync_fetch_and_add(&ctx.count, 1);
ASSERT_EQ(pthread_join(ctx.waiter, NULL), 0);
EXPECT_EQ(ctx.count, 2);
if (pthread_tryjoin_np(emitter, NULL) < 0) {
pthread_kill(emitter, SIGUSR1);
pthread_join(emitter, NULL);
}
close(ctx.efd[0]);
close(ctx.sfd[0]);
close(ctx.sfd[1]);
close(ctx.sfd[2]);
close(ctx.sfd[3]);
}
/*
* t0 t1
* (ew) \ / (ew)
* e0
* (et) / \ (et)
* s0 s2
*/
TEST(epoll12)
{
pthread_t emitter;
struct epoll_event events[2];
struct epoll_mtcontext ctx = { 0 };
signal(SIGUSR1, signal_handler);
ASSERT_EQ(socketpair(AF_UNIX, SOCK_STREAM, 0, &ctx.sfd[0]), 0);
ASSERT_EQ(socketpair(AF_UNIX, SOCK_STREAM, 0, &ctx.sfd[2]), 0);
ctx.efd[0] = epoll_create(1);
ASSERT_GE(ctx.efd[0], 0);
events[0].events = EPOLLIN | EPOLLET;
ASSERT_EQ(epoll_ctl(ctx.efd[0], EPOLL_CTL_ADD, ctx.sfd[0], events), 0);
events[0].events = EPOLLIN | EPOLLET;
ASSERT_EQ(epoll_ctl(ctx.efd[0], EPOLL_CTL_ADD, ctx.sfd[2], events), 0);
ctx.main = pthread_self();
ASSERT_EQ(pthread_create(&ctx.waiter, NULL, waiter_entry1a, &ctx), 0);
ASSERT_EQ(pthread_create(&emitter, NULL, emitter_entry2, &ctx), 0);
if (epoll_wait(ctx.efd[0], events, 1, -1) > 0)
__sync_fetch_and_add(&ctx.count, 1);
ASSERT_EQ(pthread_join(ctx.waiter, NULL), 0);
EXPECT_EQ(ctx.count, 2);
if (pthread_tryjoin_np(emitter, NULL) < 0) {
pthread_kill(emitter, SIGUSR1);
pthread_join(emitter, NULL);
}
close(ctx.efd[0]);
close(ctx.sfd[0]);
close(ctx.sfd[1]);
close(ctx.sfd[2]);
close(ctx.sfd[3]);
}
/*
* t0 t1
* (ew) \ / (p)
* e0
* | (lt)
* s0
*/
TEST(epoll13)
{
pthread_t emitter;
struct epoll_event e;
struct epoll_mtcontext ctx = { 0 };
signal(SIGUSR1, signal_handler);
ASSERT_EQ(socketpair(AF_UNIX, SOCK_STREAM, 0, ctx.sfd), 0);
ctx.efd[0] = epoll_create(1);
ASSERT_GE(ctx.efd[0], 0);
e.events = EPOLLIN;
ASSERT_EQ(epoll_ctl(ctx.efd[0], EPOLL_CTL_ADD, ctx.sfd[0], &e), 0);
ctx.main = pthread_self();
ASSERT_EQ(pthread_create(&ctx.waiter, NULL, waiter_entry1ap, &ctx), 0);
ASSERT_EQ(pthread_create(&emitter, NULL, emitter_entry1, &ctx), 0);
if (epoll_wait(ctx.efd[0], &e, 1, -1) > 0)
__sync_fetch_and_add(&ctx.count, 1);
ASSERT_EQ(pthread_join(ctx.waiter, NULL), 0);
EXPECT_EQ(ctx.count, 2);
if (pthread_tryjoin_np(emitter, NULL) < 0) {
pthread_kill(emitter, SIGUSR1);
pthread_join(emitter, NULL);
}
close(ctx.efd[0]);
close(ctx.sfd[0]);
close(ctx.sfd[1]);
}
/*
* t0 t1
* (ew) \ / (p)
* e0
* | (et)
* s0
*/
TEST(epoll14)
{
pthread_t emitter;
struct epoll_event e;
struct epoll_mtcontext ctx = { 0 };
signal(SIGUSR1, signal_handler);
ASSERT_EQ(socketpair(AF_UNIX, SOCK_STREAM, 0, ctx.sfd), 0);
ctx.efd[0] = epoll_create(1);
ASSERT_GE(ctx.efd[0], 0);
e.events = EPOLLIN | EPOLLET;
ASSERT_EQ(epoll_ctl(ctx.efd[0], EPOLL_CTL_ADD, ctx.sfd[0], &e), 0);
ctx.main = pthread_self();
ASSERT_EQ(pthread_create(&ctx.waiter, NULL, waiter_entry1ap, &ctx), 0);
ASSERT_EQ(pthread_create(&emitter, NULL, emitter_entry1, &ctx), 0);
if (epoll_wait(ctx.efd[0], &e, 1, -1) > 0)
__sync_fetch_and_add(&ctx.count, 1);
ASSERT_EQ(pthread_join(ctx.waiter, NULL), 0);
EXPECT_EQ(ctx.count, 1);
if (pthread_tryjoin_np(emitter, NULL) < 0) {
pthread_kill(emitter, SIGUSR1);
pthread_join(emitter, NULL);
}
close(ctx.efd[0]);
close(ctx.sfd[0]);
close(ctx.sfd[1]);
}
/*
* t0 t1
* (ew) \ / (p)
* e0
* (lt) / \ (lt)
* s0 s2
*/
TEST(epoll15)
{
pthread_t emitter;
struct epoll_event events[2];
struct epoll_mtcontext ctx = { 0 };
signal(SIGUSR1, signal_handler);
ASSERT_EQ(socketpair(AF_UNIX, SOCK_STREAM, 0, &ctx.sfd[0]), 0);
ASSERT_EQ(socketpair(AF_UNIX, SOCK_STREAM, 0, &ctx.sfd[2]), 0);
ctx.efd[0] = epoll_create(1);
ASSERT_GE(ctx.efd[0], 0);
events[0].events = EPOLLIN;
ASSERT_EQ(epoll_ctl(ctx.efd[0], EPOLL_CTL_ADD, ctx.sfd[0], events), 0);
events[0].events = EPOLLIN;
ASSERT_EQ(epoll_ctl(ctx.efd[0], EPOLL_CTL_ADD, ctx.sfd[2], events), 0);
ctx.main = pthread_self();
ASSERT_EQ(pthread_create(&ctx.waiter, NULL, waiter_entry2ap, &ctx), 0);
ASSERT_EQ(pthread_create(&emitter, NULL, emitter_entry2, &ctx), 0);
if (epoll_wait(ctx.efd[0], events, 2, -1) > 0)
__sync_fetch_and_add(&ctx.count, 1);
ASSERT_EQ(pthread_join(ctx.waiter, NULL), 0);
EXPECT_EQ(ctx.count, 2);
if (pthread_tryjoin_np(emitter, NULL) < 0) {
pthread_kill(emitter, SIGUSR1);
pthread_join(emitter, NULL);
}
close(ctx.efd[0]);
close(ctx.sfd[0]);
close(ctx.sfd[1]);
close(ctx.sfd[2]);
close(ctx.sfd[3]);
}
/*
* t0 t1
* (ew) \ / (p)
* e0
* (et) / \ (et)
* s0 s2
*/
TEST(epoll16)
{
pthread_t emitter;
struct epoll_event events[2];
struct epoll_mtcontext ctx = { 0 };
signal(SIGUSR1, signal_handler);
ASSERT_EQ(socketpair(AF_UNIX, SOCK_STREAM, 0, &ctx.sfd[0]), 0);
ASSERT_EQ(socketpair(AF_UNIX, SOCK_STREAM, 0, &ctx.sfd[2]), 0);
ctx.efd[0] = epoll_create(1);
ASSERT_GE(ctx.efd[0], 0);
events[0].events = EPOLLIN | EPOLLET;
ASSERT_EQ(epoll_ctl(ctx.efd[0], EPOLL_CTL_ADD, ctx.sfd[0], events), 0);
events[0].events = EPOLLIN | EPOLLET;
ASSERT_EQ(epoll_ctl(ctx.efd[0], EPOLL_CTL_ADD, ctx.sfd[2], events), 0);
ctx.main = pthread_self();
ASSERT_EQ(pthread_create(&ctx.waiter, NULL, waiter_entry1ap, &ctx), 0);
ASSERT_EQ(pthread_create(&emitter, NULL, emitter_entry2, &ctx), 0);
if (epoll_wait(ctx.efd[0], events, 1, -1) > 0)
__sync_fetch_and_add(&ctx.count, 1);
ASSERT_EQ(pthread_join(ctx.waiter, NULL), 0);
EXPECT_EQ(ctx.count, 2);
if (pthread_tryjoin_np(emitter, NULL) < 0) {
pthread_kill(emitter, SIGUSR1);
pthread_join(emitter, NULL);
}
close(ctx.efd[0]);
close(ctx.sfd[0]);
close(ctx.sfd[1]);
close(ctx.sfd[2]);
close(ctx.sfd[3]);
}
/*
* t0
* | (ew)
* e0
* | (lt)
* e1
* | (lt)
* s0
*/
TEST(epoll17)
{
int efd[2];
int sfd[2];
struct epoll_event e;
ASSERT_EQ(socketpair(AF_UNIX, SOCK_STREAM, 0, sfd), 0);
efd[0] = epoll_create(1);
ASSERT_GE(efd[0], 0);
efd[1] = epoll_create(1);
ASSERT_GE(efd[1], 0);
e.events = EPOLLIN;
ASSERT_EQ(epoll_ctl(efd[1], EPOLL_CTL_ADD, sfd[0], &e), 0);
e.events = EPOLLIN;
ASSERT_EQ(epoll_ctl(efd[0], EPOLL_CTL_ADD, efd[1], &e), 0);
ASSERT_EQ(write(sfd[1], "w", 1), 1);
EXPECT_EQ(epoll_wait(efd[0], &e, 1, 0), 1);
EXPECT_EQ(epoll_wait(efd[0], &e, 1, 0), 1);
close(efd[0]);
close(efd[1]);
close(sfd[0]);
close(sfd[1]);
}
/*
* t0
* | (ew)
* e0
* | (lt)
* e1
* | (et)
* s0
*/
TEST(epoll18)
{
int efd[2];
int sfd[2];
struct epoll_event e;
ASSERT_EQ(socketpair(AF_UNIX, SOCK_STREAM, 0, sfd), 0);
efd[0] = epoll_create(1);
ASSERT_GE(efd[0], 0);
efd[1] = epoll_create(1);
ASSERT_GE(efd[1], 0);
e.events = EPOLLIN | EPOLLET;
ASSERT_EQ(epoll_ctl(efd[1], EPOLL_CTL_ADD, sfd[0], &e), 0);
e.events = EPOLLIN;
ASSERT_EQ(epoll_ctl(efd[0], EPOLL_CTL_ADD, efd[1], &e), 0);
ASSERT_EQ(write(sfd[1], "w", 1), 1);
EXPECT_EQ(epoll_wait(efd[0], &e, 1, 0), 1);
EXPECT_EQ(epoll_wait(efd[0], &e, 1, 0), 1);
close(efd[0]);
close(efd[1]);
close(sfd[0]);
close(sfd[1]);
}
/*
* t0
* | (ew)
* e0
* | (et)
* e1
* | (lt)
* s0
*/
TEST(epoll19)
{
int efd[2];
int sfd[2];
struct epoll_event e;
ASSERT_EQ(socketpair(AF_UNIX, SOCK_STREAM, 0, sfd), 0);
efd[0] = epoll_create(1);
ASSERT_GE(efd[0], 0);
efd[1] = epoll_create(1);
ASSERT_GE(efd[1], 0);
e.events = EPOLLIN;
ASSERT_EQ(epoll_ctl(efd[1], EPOLL_CTL_ADD, sfd[0], &e), 0);
e.events = EPOLLIN | EPOLLET;
ASSERT_EQ(epoll_ctl(efd[0], EPOLL_CTL_ADD, efd[1], &e), 0);
ASSERT_EQ(write(sfd[1], "w", 1), 1);
EXPECT_EQ(epoll_wait(efd[0], &e, 1, 0), 1);
EXPECT_EQ(epoll_wait(efd[0], &e, 1, 0), 0);
close(efd[0]);
close(efd[1]);
close(sfd[0]);
close(sfd[1]);
}
/*
* t0
* | (ew)
* e0
* | (et)
* e1
* | (et)
* s0
*/
TEST(epoll20)
{
int efd[2];
int sfd[2];
struct epoll_event e;
ASSERT_EQ(socketpair(AF_UNIX, SOCK_STREAM, 0, sfd), 0);
efd[0] = epoll_create(1);
ASSERT_GE(efd[0], 0);
efd[1] = epoll_create(1);
ASSERT_GE(efd[1], 0);
e.events = EPOLLIN | EPOLLET;
ASSERT_EQ(epoll_ctl(efd[1], EPOLL_CTL_ADD, sfd[0], &e), 0);
e.events = EPOLLIN | EPOLLET;
ASSERT_EQ(epoll_ctl(efd[0], EPOLL_CTL_ADD, efd[1], &e), 0);
ASSERT_EQ(write(sfd[1], "w", 1), 1);
EXPECT_EQ(epoll_wait(efd[0], &e, 1, 0), 1);
EXPECT_EQ(epoll_wait(efd[0], &e, 1, 0), 0);
close(efd[0]);
close(efd[1]);
close(sfd[0]);
close(sfd[1]);
}
/*
* t0
* | (p)
* e0
* | (lt)
* e1
* | (lt)
* s0
*/
TEST(epoll21)
{
int efd[2];
int sfd[2];
struct pollfd pfd;
struct epoll_event e;
ASSERT_EQ(socketpair(AF_UNIX, SOCK_STREAM, 0, sfd), 0);
efd[0] = epoll_create(1);
ASSERT_GE(efd[0], 0);
efd[1] = epoll_create(1);
ASSERT_GE(efd[1], 0);
e.events = EPOLLIN;
ASSERT_EQ(epoll_ctl(efd[1], EPOLL_CTL_ADD, sfd[0], &e), 0);
e.events = EPOLLIN;
ASSERT_EQ(epoll_ctl(efd[0], EPOLL_CTL_ADD, efd[1], &e), 0);
ASSERT_EQ(write(sfd[1], "w", 1), 1);
pfd.fd = efd[0];
pfd.events = POLLIN;
EXPECT_EQ(poll(&pfd, 1, 0), 1);
EXPECT_EQ(epoll_wait(efd[0], &e, 1, 0), 1);
pfd.fd = efd[0];
pfd.events = POLLIN;
EXPECT_EQ(poll(&pfd, 1, 0), 1);
EXPECT_EQ(epoll_wait(efd[0], &e, 1, 0), 1);
close(efd[0]);
close(efd[1]);
close(sfd[0]);
close(sfd[1]);
}
/*
* t0
* | (p)
* e0
* | (lt)
* e1
* | (et)
* s0
*/
TEST(epoll22)
{
int efd[2];
int sfd[2];
struct pollfd pfd;
struct epoll_event e;
ASSERT_EQ(socketpair(AF_UNIX, SOCK_STREAM, 0, sfd), 0);
efd[0] = epoll_create(1);
ASSERT_GE(efd[0], 0);
efd[1] = epoll_create(1);
ASSERT_GE(efd[1], 0);
e.events = EPOLLIN | EPOLLET;
ASSERT_EQ(epoll_ctl(efd[1], EPOLL_CTL_ADD, sfd[0], &e), 0);
e.events = EPOLLIN;
ASSERT_EQ(epoll_ctl(efd[0], EPOLL_CTL_ADD, efd[1], &e), 0);
ASSERT_EQ(write(sfd[1], "w", 1), 1);
pfd.fd = efd[0];
pfd.events = POLLIN;
EXPECT_EQ(poll(&pfd, 1, 0), 1);
EXPECT_EQ(epoll_wait(efd[0], &e, 1, 0), 1);
pfd.fd = efd[0];
pfd.events = POLLIN;
EXPECT_EQ(poll(&pfd, 1, 0), 1);
EXPECT_EQ(epoll_wait(efd[0], &e, 1, 0), 1);
close(efd[0]);
close(efd[1]);
close(sfd[0]);
close(sfd[1]);
}
/*
* t0
* | (p)
* e0
* | (et)
* e1
* | (lt)
* s0
*/
TEST(epoll23)
{
int efd[2];
int sfd[2];
struct pollfd pfd;
struct epoll_event e;
ASSERT_EQ(socketpair(AF_UNIX, SOCK_STREAM, 0, sfd), 0);
efd[0] = epoll_create(1);
ASSERT_GE(efd[0], 0);
efd[1] = epoll_create(1);
ASSERT_GE(efd[1], 0);
e.events = EPOLLIN;
ASSERT_EQ(epoll_ctl(efd[1], EPOLL_CTL_ADD, sfd[0], &e), 0);
e.events = EPOLLIN | EPOLLET;
ASSERT_EQ(epoll_ctl(efd[0], EPOLL_CTL_ADD, efd[1], &e), 0);
ASSERT_EQ(write(sfd[1], "w", 1), 1);
pfd.fd = efd[0];
pfd.events = POLLIN;
EXPECT_EQ(poll(&pfd, 1, 0), 1);
EXPECT_EQ(epoll_wait(efd[0], &e, 1, 0), 1);
pfd.fd = efd[0];
pfd.events = POLLIN;
EXPECT_EQ(poll(&pfd, 1, 0), 0);
EXPECT_EQ(epoll_wait(efd[0], &e, 1, 0), 0);
close(efd[0]);
close(efd[1]);
close(sfd[0]);
close(sfd[1]);
}
/*
* t0
* | (p)
* e0
* | (et)
* e1
* | (et)
* s0
*/
TEST(epoll24)
{
int efd[2];
int sfd[2];
struct pollfd pfd;
struct epoll_event e;
ASSERT_EQ(socketpair(AF_UNIX, SOCK_STREAM, 0, sfd), 0);
efd[0] = epoll_create(1);
ASSERT_GE(efd[0], 0);
efd[1] = epoll_create(1);
ASSERT_GE(efd[1], 0);
e.events = EPOLLIN | EPOLLET;
ASSERT_EQ(epoll_ctl(efd[1], EPOLL_CTL_ADD, sfd[0], &e), 0);
e.events = EPOLLIN | EPOLLET;
ASSERT_EQ(epoll_ctl(efd[0], EPOLL_CTL_ADD, efd[1], &e), 0);
ASSERT_EQ(write(sfd[1], "w", 1), 1);
pfd.fd = efd[0];
pfd.events = POLLIN;
EXPECT_EQ(poll(&pfd, 1, 0), 1);
EXPECT_EQ(epoll_wait(efd[0], &e, 1, 0), 1);
pfd.fd = efd[0];
pfd.events = POLLIN;
EXPECT_EQ(poll(&pfd, 1, 0), 0);
EXPECT_EQ(epoll_wait(efd[0], &e, 1, 0), 0);
close(efd[0]);
close(efd[1]);
close(sfd[0]);
close(sfd[1]);
}
/*
* t0 t1
* (ew) \ / (ew)
* e0
* | (lt)
* e1
* | (lt)
* s0
*/
TEST(epoll25)
{
pthread_t emitter;
struct epoll_event e;
struct epoll_mtcontext ctx = { 0 };
signal(SIGUSR1, signal_handler);
ASSERT_EQ(socketpair(AF_UNIX, SOCK_STREAM, 0, ctx.sfd), 0);
ctx.efd[0] = epoll_create(1);
ASSERT_GE(ctx.efd[0], 0);
ctx.efd[1] = epoll_create(1);
ASSERT_GE(ctx.efd[1], 0);
e.events = EPOLLIN;
ASSERT_EQ(epoll_ctl(ctx.efd[1], EPOLL_CTL_ADD, ctx.sfd[0], &e), 0);
e.events = EPOLLIN;
ASSERT_EQ(epoll_ctl(ctx.efd[0], EPOLL_CTL_ADD, ctx.efd[1], &e), 0);
ctx.main = pthread_self();
ASSERT_EQ(pthread_create(&ctx.waiter, NULL, waiter_entry1a, &ctx), 0);
ASSERT_EQ(pthread_create(&emitter, NULL, emitter_entry1, &ctx), 0);
if (epoll_wait(ctx.efd[0], &e, 1, -1) > 0)
__sync_fetch_and_add(&ctx.count, 1);
ASSERT_EQ(pthread_join(ctx.waiter, NULL), 0);
EXPECT_EQ(ctx.count, 2);
if (pthread_tryjoin_np(emitter, NULL) < 0) {
pthread_kill(emitter, SIGUSR1);
pthread_join(emitter, NULL);
}
close(ctx.efd[0]);
close(ctx.efd[1]);
close(ctx.sfd[0]);
close(ctx.sfd[1]);
}
/*
* t0 t1
* (ew) \ / (ew)
* e0
* | (lt)
* e1
* | (et)
* s0
*/
TEST(epoll26)
{
pthread_t emitter;
struct epoll_event e;
struct epoll_mtcontext ctx = { 0 };
signal(SIGUSR1, signal_handler);
ASSERT_EQ(socketpair(AF_UNIX, SOCK_STREAM, 0, ctx.sfd), 0);
ctx.efd[0] = epoll_create(1);
ASSERT_GE(ctx.efd[0], 0);
ctx.efd[1] = epoll_create(1);
ASSERT_GE(ctx.efd[1], 0);
e.events = EPOLLIN | EPOLLET;
ASSERT_EQ(epoll_ctl(ctx.efd[1], EPOLL_CTL_ADD, ctx.sfd[0], &e), 0);
e.events = EPOLLIN;
ASSERT_EQ(epoll_ctl(ctx.efd[0], EPOLL_CTL_ADD, ctx.efd[1], &e), 0);
ctx.main = pthread_self();
ASSERT_EQ(pthread_create(&ctx.waiter, NULL, waiter_entry1a, &ctx), 0);
ASSERT_EQ(pthread_create(&emitter, NULL, emitter_entry1, &ctx), 0);
if (epoll_wait(ctx.efd[0], &e, 1, -1) > 0)
__sync_fetch_and_add(&ctx.count, 1);
ASSERT_EQ(pthread_join(ctx.waiter, NULL), 0);
EXPECT_EQ(ctx.count, 2);
if (pthread_tryjoin_np(emitter, NULL) < 0) {
pthread_kill(emitter, SIGUSR1);
pthread_join(emitter, NULL);
}
close(ctx.efd[0]);
close(ctx.efd[1]);
close(ctx.sfd[0]);
close(ctx.sfd[1]);
}
/*
* t0 t1
* (ew) \ / (ew)
* e0
* | (et)
* e1
* | (lt)
* s0
*/
TEST(epoll27)
{
pthread_t emitter;
struct epoll_event e;
struct epoll_mtcontext ctx = { 0 };
signal(SIGUSR1, signal_handler);
ASSERT_EQ(socketpair(AF_UNIX, SOCK_STREAM, 0, ctx.sfd), 0);
ctx.efd[0] = epoll_create(1);
ASSERT_GE(ctx.efd[0], 0);
ctx.efd[1] = epoll_create(1);
ASSERT_GE(ctx.efd[1], 0);
e.events = EPOLLIN;
ASSERT_EQ(epoll_ctl(ctx.efd[1], EPOLL_CTL_ADD, ctx.sfd[0], &e), 0);
e.events = EPOLLIN | EPOLLET;
ASSERT_EQ(epoll_ctl(ctx.efd[0], EPOLL_CTL_ADD, ctx.efd[1], &e), 0);
ctx.main = pthread_self();
ASSERT_EQ(pthread_create(&ctx.waiter, NULL, waiter_entry1a, &ctx), 0);
ASSERT_EQ(pthread_create(&emitter, NULL, emitter_entry1, &ctx), 0);
if (epoll_wait(ctx.efd[0], &e, 1, -1) > 0)
__sync_fetch_and_add(&ctx.count, 1);
ASSERT_EQ(pthread_join(ctx.waiter, NULL), 0);
EXPECT_EQ(ctx.count, 1);
if (pthread_tryjoin_np(emitter, NULL) < 0) {
pthread_kill(emitter, SIGUSR1);
pthread_join(emitter, NULL);
}
close(ctx.efd[0]);
close(ctx.efd[1]);
close(ctx.sfd[0]);
close(ctx.sfd[1]);
}
/*
* t0 t1
* (ew) \ / (ew)
* e0
* | (et)
* e1
* | (et)
* s0
*/
TEST(epoll28)
{
pthread_t emitter;
struct epoll_event e;
struct epoll_mtcontext ctx = { 0 };
signal(SIGUSR1, signal_handler);
ASSERT_EQ(socketpair(AF_UNIX, SOCK_STREAM, 0, ctx.sfd), 0);
ctx.efd[0] = epoll_create(1);
ASSERT_GE(ctx.efd[0], 0);
ctx.efd[1] = epoll_create(1);
ASSERT_GE(ctx.efd[1], 0);
e.events = EPOLLIN | EPOLLET;
ASSERT_EQ(epoll_ctl(ctx.efd[1], EPOLL_CTL_ADD, ctx.sfd[0], &e), 0);
e.events = EPOLLIN | EPOLLET;
ASSERT_EQ(epoll_ctl(ctx.efd[0], EPOLL_CTL_ADD, ctx.efd[1], &e), 0);
ctx.main = pthread_self();
ASSERT_EQ(pthread_create(&ctx.waiter, NULL, waiter_entry1a, &ctx), 0);
ASSERT_EQ(pthread_create(&emitter, NULL, emitter_entry1, &ctx), 0);
if (epoll_wait(ctx.efd[0], &e, 1, -1) > 0)
__sync_fetch_and_add(&ctx.count, 1);
ASSERT_EQ(pthread_join(ctx.waiter, NULL), 0);
EXPECT_EQ(ctx.count, 1);
if (pthread_tryjoin_np(emitter, NULL) < 0) {
pthread_kill(emitter, SIGUSR1);
pthread_join(emitter, NULL);
}
close(ctx.efd[0]);
close(ctx.efd[1]);
close(ctx.sfd[0]);
close(ctx.sfd[1]);
}
/*
* t0 t1
* (ew) \ / (p)
* e0
* | (lt)
* e1
* | (lt)
* s0
*/
TEST(epoll29)
{
pthread_t emitter;
struct epoll_event e;
struct epoll_mtcontext ctx = { 0 };
signal(SIGUSR1, signal_handler);
ASSERT_EQ(socketpair(AF_UNIX, SOCK_STREAM, 0, ctx.sfd), 0);
ctx.efd[0] = epoll_create(1);
ASSERT_GE(ctx.efd[0], 0);
ctx.efd[1] = epoll_create(1);
ASSERT_GE(ctx.efd[1], 0);
e.events = EPOLLIN;
ASSERT_EQ(epoll_ctl(ctx.efd[1], EPOLL_CTL_ADD, ctx.sfd[0], &e), 0);
e.events = EPOLLIN;
ASSERT_EQ(epoll_ctl(ctx.efd[0], EPOLL_CTL_ADD, ctx.efd[1], &e), 0);
ctx.main = pthread_self();
ASSERT_EQ(pthread_create(&ctx.waiter, NULL, waiter_entry1ap, &ctx), 0);
ASSERT_EQ(pthread_create(&emitter, NULL, emitter_entry1, &ctx), 0);
if (epoll_wait(ctx.efd[0], &e, 1, -1) > 0)
__sync_fetch_and_add(&ctx.count, 1);
ASSERT_EQ(pthread_join(ctx.waiter, NULL), 0);
EXPECT_EQ(ctx.count, 2);
if (pthread_tryjoin_np(emitter, NULL) < 0) {
pthread_kill(emitter, SIGUSR1);
pthread_join(emitter, NULL);
}
close(ctx.efd[0]);
close(ctx.sfd[0]);
close(ctx.sfd[1]);
}
/*
* t0 t1
* (ew) \ / (p)
* e0
* | (lt)
* e1
* | (et)
* s0
*/
TEST(epoll30)
{
pthread_t emitter;
struct epoll_event e;
struct epoll_mtcontext ctx = { 0 };
signal(SIGUSR1, signal_handler);
ASSERT_EQ(socketpair(AF_UNIX, SOCK_STREAM, 0, ctx.sfd), 0);
ctx.efd[0] = epoll_create(1);
ASSERT_GE(ctx.efd[0], 0);
ctx.efd[1] = epoll_create(1);
ASSERT_GE(ctx.efd[1], 0);
e.events = EPOLLIN | EPOLLET;
ASSERT_EQ(epoll_ctl(ctx.efd[1], EPOLL_CTL_ADD, ctx.sfd[0], &e), 0);
e.events = EPOLLIN;
ASSERT_EQ(epoll_ctl(ctx.efd[0], EPOLL_CTL_ADD, ctx.efd[1], &e), 0);
ctx.main = pthread_self();
ASSERT_EQ(pthread_create(&ctx.waiter, NULL, waiter_entry1ap, &ctx), 0);
ASSERT_EQ(pthread_create(&emitter, NULL, emitter_entry1, &ctx), 0);
if (epoll_wait(ctx.efd[0], &e, 1, -1) > 0)
__sync_fetch_and_add(&ctx.count, 1);
ASSERT_EQ(pthread_join(ctx.waiter, NULL), 0);
EXPECT_EQ(ctx.count, 2);
if (pthread_tryjoin_np(emitter, NULL) < 0) {
pthread_kill(emitter, SIGUSR1);
pthread_join(emitter, NULL);
}
close(ctx.efd[0]);
close(ctx.sfd[0]);
close(ctx.sfd[1]);
}
/*
* t0 t1
* (ew) \ / (p)
* e0
* | (et)
* e1
* | (lt)
* s0
*/
TEST(epoll31)
{
pthread_t emitter;
struct epoll_event e;
struct epoll_mtcontext ctx = { 0 };
signal(SIGUSR1, signal_handler);
ASSERT_EQ(socketpair(AF_UNIX, SOCK_STREAM, 0, ctx.sfd), 0);
ctx.efd[0] = epoll_create(1);
ASSERT_GE(ctx.efd[0], 0);
ctx.efd[1] = epoll_create(1);
ASSERT_GE(ctx.efd[1], 0);
e.events = EPOLLIN;
ASSERT_EQ(epoll_ctl(ctx.efd[1], EPOLL_CTL_ADD, ctx.sfd[0], &e), 0);
e.events = EPOLLIN | EPOLLET;
ASSERT_EQ(epoll_ctl(ctx.efd[0], EPOLL_CTL_ADD, ctx.efd[1], &e), 0);
ctx.main = pthread_self();
ASSERT_EQ(pthread_create(&ctx.waiter, NULL, waiter_entry1ap, &ctx), 0);
ASSERT_EQ(pthread_create(&emitter, NULL, emitter_entry1, &ctx), 0);
if (epoll_wait(ctx.efd[0], &e, 1, -1) > 0)
__sync_fetch_and_add(&ctx.count, 1);
ASSERT_EQ(pthread_join(ctx.waiter, NULL), 0);
EXPECT_EQ(ctx.count, 1);
if (pthread_tryjoin_np(emitter, NULL) < 0) {
pthread_kill(emitter, SIGUSR1);
pthread_join(emitter, NULL);
}
close(ctx.efd[0]);
close(ctx.sfd[0]);
close(ctx.sfd[1]);
}
/*
* t0 t1
* (ew) \ / (p)
* e0
* | (et)
* e1
* | (et)
* s0
*/
TEST(epoll32)
{
pthread_t emitter;
struct epoll_event e;
struct epoll_mtcontext ctx = { 0 };
signal(SIGUSR1, signal_handler);
ASSERT_EQ(socketpair(AF_UNIX, SOCK_STREAM, 0, ctx.sfd), 0);
ctx.efd[0] = epoll_create(1);
ASSERT_GE(ctx.efd[0], 0);
ctx.efd[1] = epoll_create(1);
ASSERT_GE(ctx.efd[1], 0);
e.events = EPOLLIN | EPOLLET;
ASSERT_EQ(epoll_ctl(ctx.efd[1], EPOLL_CTL_ADD, ctx.sfd[0], &e), 0);
e.events = EPOLLIN | EPOLLET;
ASSERT_EQ(epoll_ctl(ctx.efd[0], EPOLL_CTL_ADD, ctx.efd[1], &e), 0);
ctx.main = pthread_self();
ASSERT_EQ(pthread_create(&ctx.waiter, NULL, waiter_entry1ap, &ctx), 0);
ASSERT_EQ(pthread_create(&emitter, NULL, emitter_entry1, &ctx), 0);
if (epoll_wait(ctx.efd[0], &e, 1, -1) > 0)
__sync_fetch_and_add(&ctx.count, 1);
ASSERT_EQ(pthread_join(ctx.waiter, NULL), 0);
EXPECT_EQ(ctx.count, 1);
if (pthread_tryjoin_np(emitter, NULL) < 0) {
pthread_kill(emitter, SIGUSR1);
pthread_join(emitter, NULL);
}
close(ctx.efd[0]);
close(ctx.sfd[0]);
close(ctx.sfd[1]);
}
/*
* t0 t1
* (ew) | | (ew)
* | e0
* \ / (lt)
* e1
* | (lt)
* s0
*/
TEST(epoll33)
{
pthread_t emitter;
struct epoll_event e;
struct epoll_mtcontext ctx = { 0 };
signal(SIGUSR1, signal_handler);
ASSERT_EQ(socketpair(AF_UNIX, SOCK_STREAM, 0, ctx.sfd), 0);
ctx.efd[0] = epoll_create(1);
ASSERT_GE(ctx.efd[0], 0);
ctx.efd[1] = epoll_create(1);
ASSERT_GE(ctx.efd[1], 0);
e.events = EPOLLIN;
ASSERT_EQ(epoll_ctl(ctx.efd[1], EPOLL_CTL_ADD, ctx.sfd[0], &e), 0);
e.events = EPOLLIN;
ASSERT_EQ(epoll_ctl(ctx.efd[0], EPOLL_CTL_ADD, ctx.efd[1], &e), 0);
ctx.main = pthread_self();
ASSERT_EQ(pthread_create(&ctx.waiter, NULL, waiter_entry1a, &ctx), 0);
ASSERT_EQ(pthread_create(&emitter, NULL, emitter_entry1, &ctx), 0);
if (epoll_wait(ctx.efd[1], &e, 1, -1) > 0)
__sync_fetch_and_add(&ctx.count, 1);
ASSERT_EQ(pthread_join(ctx.waiter, NULL), 0);
EXPECT_EQ(ctx.count, 2);
if (pthread_tryjoin_np(emitter, NULL) < 0) {
pthread_kill(emitter, SIGUSR1);
pthread_join(emitter, NULL);
}
close(ctx.efd[0]);
close(ctx.efd[1]);
close(ctx.sfd[0]);
close(ctx.sfd[1]);
}
/*
* t0 t1
* (ew) | | (ew)
* | e0
* \ / (lt)
* e1
* | (et)
* s0
*/
TEST(epoll34)
{
pthread_t emitter;
struct epoll_event e;
struct epoll_mtcontext ctx = { 0 };
signal(SIGUSR1, signal_handler);
ASSERT_EQ(socketpair(AF_UNIX, SOCK_STREAM, 0, ctx.sfd), 0);
ctx.efd[0] = epoll_create(1);
ASSERT_GE(ctx.efd[0], 0);
ctx.efd[1] = epoll_create(1);
ASSERT_GE(ctx.efd[1], 0);
e.events = EPOLLIN | EPOLLET;
ASSERT_EQ(epoll_ctl(ctx.efd[1], EPOLL_CTL_ADD, ctx.sfd[0], &e), 0);
e.events = EPOLLIN;
ASSERT_EQ(epoll_ctl(ctx.efd[0], EPOLL_CTL_ADD, ctx.efd[1], &e), 0);
ctx.main = pthread_self();
ASSERT_EQ(pthread_create(&ctx.waiter, NULL, waiter_entry1o, &ctx), 0);
ASSERT_EQ(pthread_create(&emitter, NULL, emitter_entry1, &ctx), 0);
if (epoll_wait(ctx.efd[1], &e, 1, -1) > 0)
__sync_fetch_and_or(&ctx.count, 2);
ASSERT_EQ(pthread_join(ctx.waiter, NULL), 0);
EXPECT_TRUE((ctx.count == 2) || (ctx.count == 3));
if (pthread_tryjoin_np(emitter, NULL) < 0) {
pthread_kill(emitter, SIGUSR1);
pthread_join(emitter, NULL);
}
close(ctx.efd[0]);
close(ctx.efd[1]);
close(ctx.sfd[0]);
close(ctx.sfd[1]);
}
/*
* t0 t1
* (ew) | | (ew)
* | e0
* \ / (et)
* e1
* | (lt)
* s0
*/
TEST(epoll35)
{
pthread_t emitter;
struct epoll_event e;
struct epoll_mtcontext ctx = { 0 };
signal(SIGUSR1, signal_handler);
ASSERT_EQ(socketpair(AF_UNIX, SOCK_STREAM, 0, ctx.sfd), 0);
ctx.efd[0] = epoll_create(1);
ASSERT_GE(ctx.efd[0], 0);
ctx.efd[1] = epoll_create(1);
ASSERT_GE(ctx.efd[1], 0);
e.events = EPOLLIN;
ASSERT_EQ(epoll_ctl(ctx.efd[1], EPOLL_CTL_ADD, ctx.sfd[0], &e), 0);
e.events = EPOLLIN | EPOLLET;
ASSERT_EQ(epoll_ctl(ctx.efd[0], EPOLL_CTL_ADD, ctx.efd[1], &e), 0);
ctx.main = pthread_self();
ASSERT_EQ(pthread_create(&ctx.waiter, NULL, waiter_entry1a, &ctx), 0);
ASSERT_EQ(pthread_create(&emitter, NULL, emitter_entry1, &ctx), 0);
if (epoll_wait(ctx.efd[1], &e, 1, -1) > 0)
__sync_fetch_and_add(&ctx.count, 1);
ASSERT_EQ(pthread_join(ctx.waiter, NULL), 0);
EXPECT_EQ(ctx.count, 2);
if (pthread_tryjoin_np(emitter, NULL) < 0) {
pthread_kill(emitter, SIGUSR1);
pthread_join(emitter, NULL);
}
close(ctx.efd[0]);
close(ctx.efd[1]);
close(ctx.sfd[0]);
close(ctx.sfd[1]);
}
/*
* t0 t1
* (ew) | | (ew)
* | e0
* \ / (et)
* e1
* | (et)
* s0
*/
TEST(epoll36)
{
pthread_t emitter;
struct epoll_event e;
struct epoll_mtcontext ctx = { 0 };
signal(SIGUSR1, signal_handler);
ASSERT_EQ(socketpair(AF_UNIX, SOCK_STREAM, 0, ctx.sfd), 0);
ctx.efd[0] = epoll_create(1);
ASSERT_GE(ctx.efd[0], 0);
ctx.efd[1] = epoll_create(1);
ASSERT_GE(ctx.efd[1], 0);
e.events = EPOLLIN | EPOLLET;
ASSERT_EQ(epoll_ctl(ctx.efd[1], EPOLL_CTL_ADD, ctx.sfd[0], &e), 0);
e.events = EPOLLIN | EPOLLET;
ASSERT_EQ(epoll_ctl(ctx.efd[0], EPOLL_CTL_ADD, ctx.efd[1], &e), 0);
ctx.main = pthread_self();
ASSERT_EQ(pthread_create(&ctx.waiter, NULL, waiter_entry1o, &ctx), 0);
ASSERT_EQ(pthread_create(&emitter, NULL, emitter_entry1, &ctx), 0);
if (epoll_wait(ctx.efd[1], &e, 1, -1) > 0)
__sync_fetch_and_or(&ctx.count, 2);
ASSERT_EQ(pthread_join(ctx.waiter, NULL), 0);
EXPECT_TRUE((ctx.count == 2) || (ctx.count == 3));
if (pthread_tryjoin_np(emitter, NULL) < 0) {
pthread_kill(emitter, SIGUSR1);
pthread_join(emitter, NULL);
}
close(ctx.efd[0]);
close(ctx.efd[1]);
close(ctx.sfd[0]);
close(ctx.sfd[1]);
}
/*
* t0 t1
* (p) | | (ew)
* | e0
* \ / (lt)
* e1
* | (lt)
* s0
*/
TEST(epoll37)
{
pthread_t emitter;
struct pollfd pfd;
struct epoll_event e;
struct epoll_mtcontext ctx = { 0 };
signal(SIGUSR1, signal_handler);
ASSERT_EQ(socketpair(AF_UNIX, SOCK_STREAM, 0, ctx.sfd), 0);
ctx.efd[0] = epoll_create(1);
ASSERT_GE(ctx.efd[0], 0);
ctx.efd[1] = epoll_create(1);
ASSERT_GE(ctx.efd[1], 0);
e.events = EPOLLIN;
ASSERT_EQ(epoll_ctl(ctx.efd[1], EPOLL_CTL_ADD, ctx.sfd[0], &e), 0);
e.events = EPOLLIN;
ASSERT_EQ(epoll_ctl(ctx.efd[0], EPOLL_CTL_ADD, ctx.efd[1], &e), 0);
ctx.main = pthread_self();
ASSERT_EQ(pthread_create(&ctx.waiter, NULL, waiter_entry1a, &ctx), 0);
ASSERT_EQ(pthread_create(&emitter, NULL, emitter_entry1, &ctx), 0);
pfd.fd = ctx.efd[1];
pfd.events = POLLIN;
if (poll(&pfd, 1, -1) > 0) {
if (epoll_wait(ctx.efd[1], &e, 1, 0) > 0)
__sync_fetch_and_add(&ctx.count, 1);
}
ASSERT_EQ(pthread_join(ctx.waiter, NULL), 0);
EXPECT_EQ(ctx.count, 2);
if (pthread_tryjoin_np(emitter, NULL) < 0) {
pthread_kill(emitter, SIGUSR1);
pthread_join(emitter, NULL);
}
close(ctx.efd[0]);
close(ctx.efd[1]);
close(ctx.sfd[0]);
close(ctx.sfd[1]);
}
/*
* t0 t1
* (p) | | (ew)
* | e0
* \ / (lt)
* e1
* | (et)
* s0
*/
TEST(epoll38)
{
pthread_t emitter;
struct pollfd pfd;
struct epoll_event e;
struct epoll_mtcontext ctx = { 0 };
signal(SIGUSR1, signal_handler);
ASSERT_EQ(socketpair(AF_UNIX, SOCK_STREAM, 0, ctx.sfd), 0);
ctx.efd[0] = epoll_create(1);
ASSERT_GE(ctx.efd[0], 0);
ctx.efd[1] = epoll_create(1);
ASSERT_GE(ctx.efd[1], 0);
e.events = EPOLLIN | EPOLLET;
ASSERT_EQ(epoll_ctl(ctx.efd[1], EPOLL_CTL_ADD, ctx.sfd[0], &e), 0);
e.events = EPOLLIN;
ASSERT_EQ(epoll_ctl(ctx.efd[0], EPOLL_CTL_ADD, ctx.efd[1], &e), 0);
ctx.main = pthread_self();
ASSERT_EQ(pthread_create(&ctx.waiter, NULL, waiter_entry1o, &ctx), 0);
ASSERT_EQ(pthread_create(&emitter, NULL, emitter_entry1, &ctx), 0);
pfd.fd = ctx.efd[1];
pfd.events = POLLIN;
if (poll(&pfd, 1, -1) > 0) {
if (epoll_wait(ctx.efd[1], &e, 1, 0) > 0)
__sync_fetch_and_or(&ctx.count, 2);
}
ASSERT_EQ(pthread_join(ctx.waiter, NULL), 0);
EXPECT_TRUE((ctx.count == 2) || (ctx.count == 3));
if (pthread_tryjoin_np(emitter, NULL) < 0) {
pthread_kill(emitter, SIGUSR1);
pthread_join(emitter, NULL);
}
close(ctx.efd[0]);
close(ctx.efd[1]);
close(ctx.sfd[0]);
close(ctx.sfd[1]);
}
/*
* t0 t1
* (p) | | (ew)
* | e0
* \ / (et)
* e1
* | (lt)
* s0
*/
TEST(epoll39)
{
pthread_t emitter;
struct pollfd pfd;
struct epoll_event e;
struct epoll_mtcontext ctx = { 0 };
signal(SIGUSR1, signal_handler);
ASSERT_EQ(socketpair(AF_UNIX, SOCK_STREAM, 0, ctx.sfd), 0);
ctx.efd[0] = epoll_create(1);
ASSERT_GE(ctx.efd[0], 0);
ctx.efd[1] = epoll_create(1);
ASSERT_GE(ctx.efd[1], 0);
e.events = EPOLLIN;
ASSERT_EQ(epoll_ctl(ctx.efd[1], EPOLL_CTL_ADD, ctx.sfd[0], &e), 0);
e.events = EPOLLIN | EPOLLET;
ASSERT_EQ(epoll_ctl(ctx.efd[0], EPOLL_CTL_ADD, ctx.efd[1], &e), 0);
ctx.main = pthread_self();
ASSERT_EQ(pthread_create(&ctx.waiter, NULL, waiter_entry1a, &ctx), 0);
ASSERT_EQ(pthread_create(&emitter, NULL, emitter_entry1, &ctx), 0);
pfd.fd = ctx.efd[1];
pfd.events = POLLIN;
if (poll(&pfd, 1, -1) > 0) {
if (epoll_wait(ctx.efd[1], &e, 1, 0) > 0)
__sync_fetch_and_add(&ctx.count, 1);
}
ASSERT_EQ(pthread_join(ctx.waiter, NULL), 0);
EXPECT_EQ(ctx.count, 2);
if (pthread_tryjoin_np(emitter, NULL) < 0) {
pthread_kill(emitter, SIGUSR1);
pthread_join(emitter, NULL);
}
close(ctx.efd[0]);
close(ctx.efd[1]);
close(ctx.sfd[0]);
close(ctx.sfd[1]);
}
/*
* t0 t1
* (p) | | (ew)
* | e0
* \ / (et)
* e1
* | (et)
* s0
*/
TEST(epoll40)
{
pthread_t emitter;
struct pollfd pfd;
struct epoll_event e;
struct epoll_mtcontext ctx = { 0 };
signal(SIGUSR1, signal_handler);
ASSERT_EQ(socketpair(AF_UNIX, SOCK_STREAM, 0, ctx.sfd), 0);
ctx.efd[0] = epoll_create(1);
ASSERT_GE(ctx.efd[0], 0);
ctx.efd[1] = epoll_create(1);
ASSERT_GE(ctx.efd[1], 0);
e.events = EPOLLIN | EPOLLET;
ASSERT_EQ(epoll_ctl(ctx.efd[1], EPOLL_CTL_ADD, ctx.sfd[0], &e), 0);
e.events = EPOLLIN | EPOLLET;
ASSERT_EQ(epoll_ctl(ctx.efd[0], EPOLL_CTL_ADD, ctx.efd[1], &e), 0);
ctx.main = pthread_self();
ASSERT_EQ(pthread_create(&ctx.waiter, NULL, waiter_entry1o, &ctx), 0);
ASSERT_EQ(pthread_create(&emitter, NULL, emitter_entry1, &ctx), 0);
pfd.fd = ctx.efd[1];
pfd.events = POLLIN;
if (poll(&pfd, 1, -1) > 0) {
if (epoll_wait(ctx.efd[1], &e, 1, 0) > 0)
__sync_fetch_and_or(&ctx.count, 2);
}
ASSERT_EQ(pthread_join(ctx.waiter, NULL), 0);
EXPECT_TRUE((ctx.count == 2) || (ctx.count == 3));
if (pthread_tryjoin_np(emitter, NULL) < 0) {
pthread_kill(emitter, SIGUSR1);
pthread_join(emitter, NULL);
}
close(ctx.efd[0]);
close(ctx.efd[1]);
close(ctx.sfd[0]);
close(ctx.sfd[1]);
}
/*
* t0 t1
* (ew) | | (p)
* | e0
* \ / (lt)
* e1
* | (lt)
* s0
*/
TEST(epoll41)
{
pthread_t emitter;
struct epoll_event e;
struct epoll_mtcontext ctx = { 0 };
signal(SIGUSR1, signal_handler);
ASSERT_EQ(socketpair(AF_UNIX, SOCK_STREAM, 0, ctx.sfd), 0);
ctx.efd[0] = epoll_create(1);
ASSERT_GE(ctx.efd[0], 0);
ctx.efd[1] = epoll_create(1);
ASSERT_GE(ctx.efd[1], 0);
e.events = EPOLLIN;
ASSERT_EQ(epoll_ctl(ctx.efd[1], EPOLL_CTL_ADD, ctx.sfd[0], &e), 0);
e.events = EPOLLIN;
ASSERT_EQ(epoll_ctl(ctx.efd[0], EPOLL_CTL_ADD, ctx.efd[1], &e), 0);
ctx.main = pthread_self();
ASSERT_EQ(pthread_create(&ctx.waiter, NULL, waiter_entry1ap, &ctx), 0);
ASSERT_EQ(pthread_create(&emitter, NULL, emitter_entry1, &ctx), 0);
if (epoll_wait(ctx.efd[1], &e, 1, -1) > 0)
__sync_fetch_and_add(&ctx.count, 1);
ASSERT_EQ(pthread_join(ctx.waiter, NULL), 0);
EXPECT_EQ(ctx.count, 2);
if (pthread_tryjoin_np(emitter, NULL) < 0) {
pthread_kill(emitter, SIGUSR1);
pthread_join(emitter, NULL);
}
close(ctx.efd[0]);
close(ctx.efd[1]);
close(ctx.sfd[0]);
close(ctx.sfd[1]);
}
/*
* t0 t1
* (ew) | | (p)
* | e0
* \ / (lt)
* e1
* | (et)
* s0
*/
TEST(epoll42)
{
pthread_t emitter;
struct epoll_event e;
struct epoll_mtcontext ctx = { 0 };
signal(SIGUSR1, signal_handler);
ASSERT_EQ(socketpair(AF_UNIX, SOCK_STREAM, 0, ctx.sfd), 0);
ctx.efd[0] = epoll_create(1);
ASSERT_GE(ctx.efd[0], 0);
ctx.efd[1] = epoll_create(1);
ASSERT_GE(ctx.efd[1], 0);
e.events = EPOLLIN | EPOLLET;
ASSERT_EQ(epoll_ctl(ctx.efd[1], EPOLL_CTL_ADD, ctx.sfd[0], &e), 0);
e.events = EPOLLIN;
ASSERT_EQ(epoll_ctl(ctx.efd[0], EPOLL_CTL_ADD, ctx.efd[1], &e), 0);
ctx.main = pthread_self();
ASSERT_EQ(pthread_create(&ctx.waiter, NULL, waiter_entry1op, &ctx), 0);
ASSERT_EQ(pthread_create(&emitter, NULL, emitter_entry1, &ctx), 0);
if (epoll_wait(ctx.efd[1], &e, 1, -1) > 0)
__sync_fetch_and_or(&ctx.count, 2);
ASSERT_EQ(pthread_join(ctx.waiter, NULL), 0);
EXPECT_TRUE((ctx.count == 2) || (ctx.count == 3));
if (pthread_tryjoin_np(emitter, NULL) < 0) {
pthread_kill(emitter, SIGUSR1);
pthread_join(emitter, NULL);
}
close(ctx.efd[0]);
close(ctx.efd[1]);
close(ctx.sfd[0]);
close(ctx.sfd[1]);
}
/*
* t0 t1
* (ew) | | (p)
* | e0
* \ / (et)
* e1
* | (lt)
* s0
*/
TEST(epoll43)
{
pthread_t emitter;
struct epoll_event e;
struct epoll_mtcontext ctx = { 0 };
signal(SIGUSR1, signal_handler);
ASSERT_EQ(socketpair(AF_UNIX, SOCK_STREAM, 0, ctx.sfd), 0);
ctx.efd[0] = epoll_create(1);
ASSERT_GE(ctx.efd[0], 0);
ctx.efd[1] = epoll_create(1);
ASSERT_GE(ctx.efd[1], 0);
e.events = EPOLLIN;
ASSERT_EQ(epoll_ctl(ctx.efd[1], EPOLL_CTL_ADD, ctx.sfd[0], &e), 0);
e.events = EPOLLIN | EPOLLET;
ASSERT_EQ(epoll_ctl(ctx.efd[0], EPOLL_CTL_ADD, ctx.efd[1], &e), 0);
ctx.main = pthread_self();
ASSERT_EQ(pthread_create(&ctx.waiter, NULL, waiter_entry1ap, &ctx), 0);
ASSERT_EQ(pthread_create(&emitter, NULL, emitter_entry1, &ctx), 0);
if (epoll_wait(ctx.efd[1], &e, 1, -1) > 0)
__sync_fetch_and_add(&ctx.count, 1);
ASSERT_EQ(pthread_join(ctx.waiter, NULL), 0);
EXPECT_EQ(ctx.count, 2);
if (pthread_tryjoin_np(emitter, NULL) < 0) {
pthread_kill(emitter, SIGUSR1);
pthread_join(emitter, NULL);
}
close(ctx.efd[0]);
close(ctx.efd[1]);
close(ctx.sfd[0]);
close(ctx.sfd[1]);
}
/*
* t0 t1
* (ew) | | (p)
* | e0
* \ / (et)
* e1
* | (et)
* s0
*/
TEST(epoll44)
{
pthread_t emitter;
struct epoll_event e;
struct epoll_mtcontext ctx = { 0 };
signal(SIGUSR1, signal_handler);
ASSERT_EQ(socketpair(AF_UNIX, SOCK_STREAM, 0, ctx.sfd), 0);
ctx.efd[0] = epoll_create(1);
ASSERT_GE(ctx.efd[0], 0);
ctx.efd[1] = epoll_create(1);
ASSERT_GE(ctx.efd[1], 0);
e.events = EPOLLIN | EPOLLET;
ASSERT_EQ(epoll_ctl(ctx.efd[1], EPOLL_CTL_ADD, ctx.sfd[0], &e), 0);
e.events = EPOLLIN | EPOLLET;
ASSERT_EQ(epoll_ctl(ctx.efd[0], EPOLL_CTL_ADD, ctx.efd[1], &e), 0);
ctx.main = pthread_self();
ASSERT_EQ(pthread_create(&ctx.waiter, NULL, waiter_entry1op, &ctx), 0);
ASSERT_EQ(pthread_create(&emitter, NULL, emitter_entry1, &ctx), 0);
if (epoll_wait(ctx.efd[1], &e, 1, -1) > 0)
__sync_fetch_and_or(&ctx.count, 2);
ASSERT_EQ(pthread_join(ctx.waiter, NULL), 0);
EXPECT_TRUE((ctx.count == 2) || (ctx.count == 3));
if (pthread_tryjoin_np(emitter, NULL) < 0) {
pthread_kill(emitter, SIGUSR1);
pthread_join(emitter, NULL);
}
close(ctx.efd[0]);
close(ctx.efd[1]);
close(ctx.sfd[0]);
close(ctx.sfd[1]);
}
/*
* t0 t1
* (p) | | (p)
* | e0
* \ / (lt)
* e1
* | (lt)
* s0
*/
TEST(epoll45)
{
pthread_t emitter;
struct pollfd pfd;
struct epoll_event e;
struct epoll_mtcontext ctx = { 0 };
signal(SIGUSR1, signal_handler);
ASSERT_EQ(socketpair(AF_UNIX, SOCK_STREAM, 0, ctx.sfd), 0);
ctx.efd[0] = epoll_create(1);
ASSERT_GE(ctx.efd[0], 0);
ctx.efd[1] = epoll_create(1);
ASSERT_GE(ctx.efd[1], 0);
e.events = EPOLLIN;
ASSERT_EQ(epoll_ctl(ctx.efd[1], EPOLL_CTL_ADD, ctx.sfd[0], &e), 0);
e.events = EPOLLIN;
ASSERT_EQ(epoll_ctl(ctx.efd[0], EPOLL_CTL_ADD, ctx.efd[1], &e), 0);
ctx.main = pthread_self();
ASSERT_EQ(pthread_create(&ctx.waiter, NULL, waiter_entry1ap, &ctx), 0);
ASSERT_EQ(pthread_create(&emitter, NULL, emitter_entry1, &ctx), 0);
pfd.fd = ctx.efd[1];
pfd.events = POLLIN;
if (poll(&pfd, 1, -1) > 0) {
if (epoll_wait(ctx.efd[1], &e, 1, 0) > 0)
__sync_fetch_and_add(&ctx.count, 1);
}
ASSERT_EQ(pthread_join(ctx.waiter, NULL), 0);
EXPECT_EQ(ctx.count, 2);
if (pthread_tryjoin_np(emitter, NULL) < 0) {
pthread_kill(emitter, SIGUSR1);
pthread_join(emitter, NULL);
}
close(ctx.efd[0]);
close(ctx.efd[1]);
close(ctx.sfd[0]);
close(ctx.sfd[1]);
}
/*
* t0 t1
* (p) | | (p)
* | e0
* \ / (lt)
* e1
* | (et)
* s0
*/
TEST(epoll46)
{
pthread_t emitter;
struct epoll_event e;
struct epoll_mtcontext ctx = { 0 };
signal(SIGUSR1, signal_handler);
ASSERT_EQ(socketpair(AF_UNIX, SOCK_STREAM, 0, ctx.sfd), 0);
ctx.efd[0] = epoll_create(1);
ASSERT_GE(ctx.efd[0], 0);
ctx.efd[1] = epoll_create(1);
ASSERT_GE(ctx.efd[1], 0);
e.events = EPOLLIN | EPOLLET;
ASSERT_EQ(epoll_ctl(ctx.efd[1], EPOLL_CTL_ADD, ctx.sfd[0], &e), 0);
e.events = EPOLLIN;
ASSERT_EQ(epoll_ctl(ctx.efd[0], EPOLL_CTL_ADD, ctx.efd[1], &e), 0);
ctx.main = pthread_self();
ASSERT_EQ(pthread_create(&ctx.waiter, NULL, waiter_entry1op, &ctx), 0);
ASSERT_EQ(pthread_create(&emitter, NULL, emitter_entry1, &ctx), 0);
if (epoll_wait(ctx.efd[1], &e, 1, -1) > 0)
__sync_fetch_and_or(&ctx.count, 2);
ASSERT_EQ(pthread_join(ctx.waiter, NULL), 0);
EXPECT_TRUE((ctx.count == 2) || (ctx.count == 3));
if (pthread_tryjoin_np(emitter, NULL) < 0) {
pthread_kill(emitter, SIGUSR1);
pthread_join(emitter, NULL);
}
close(ctx.efd[0]);
close(ctx.efd[1]);
close(ctx.sfd[0]);
close(ctx.sfd[1]);
}
/*
* t0 t1
* (p) | | (p)
* | e0
* \ / (et)
* e1
* | (lt)
* s0
*/
TEST(epoll47)
{
pthread_t emitter;
struct pollfd pfd;
struct epoll_event e;
struct epoll_mtcontext ctx = { 0 };
signal(SIGUSR1, signal_handler);
ASSERT_EQ(socketpair(AF_UNIX, SOCK_STREAM, 0, ctx.sfd), 0);
ctx.efd[0] = epoll_create(1);
ASSERT_GE(ctx.efd[0], 0);
ctx.efd[1] = epoll_create(1);
ASSERT_GE(ctx.efd[1], 0);
e.events = EPOLLIN;
ASSERT_EQ(epoll_ctl(ctx.efd[1], EPOLL_CTL_ADD, ctx.sfd[0], &e), 0);
e.events = EPOLLIN | EPOLLET;
ASSERT_EQ(epoll_ctl(ctx.efd[0], EPOLL_CTL_ADD, ctx.efd[1], &e), 0);
ctx.main = pthread_self();
ASSERT_EQ(pthread_create(&ctx.waiter, NULL, waiter_entry1ap, &ctx), 0);
ASSERT_EQ(pthread_create(&emitter, NULL, emitter_entry1, &ctx), 0);
pfd.fd = ctx.efd[1];
pfd.events = POLLIN;
if (poll(&pfd, 1, -1) > 0) {
if (epoll_wait(ctx.efd[1], &e, 1, 0) > 0)
__sync_fetch_and_add(&ctx.count, 1);
}
ASSERT_EQ(pthread_join(ctx.waiter, NULL), 0);
EXPECT_EQ(ctx.count, 2);
if (pthread_tryjoin_np(emitter, NULL) < 0) {
pthread_kill(emitter, SIGUSR1);
pthread_join(emitter, NULL);
}
close(ctx.efd[0]);
close(ctx.efd[1]);
close(ctx.sfd[0]);
close(ctx.sfd[1]);
}
/*
* t0 t1
* (p) | | (p)
* | e0
* \ / (et)
* e1
* | (et)
* s0
*/
TEST(epoll48)
{
pthread_t emitter;
struct epoll_event e;
struct epoll_mtcontext ctx = { 0 };
signal(SIGUSR1, signal_handler);
ASSERT_EQ(socketpair(AF_UNIX, SOCK_STREAM, 0, ctx.sfd), 0);
ctx.efd[0] = epoll_create(1);
ASSERT_GE(ctx.efd[0], 0);
ctx.efd[1] = epoll_create(1);
ASSERT_GE(ctx.efd[1], 0);
e.events = EPOLLIN | EPOLLET;
ASSERT_EQ(epoll_ctl(ctx.efd[1], EPOLL_CTL_ADD, ctx.sfd[0], &e), 0);
e.events = EPOLLIN | EPOLLET;
ASSERT_EQ(epoll_ctl(ctx.efd[0], EPOLL_CTL_ADD, ctx.efd[1], &e), 0);
ctx.main = pthread_self();
ASSERT_EQ(pthread_create(&ctx.waiter, NULL, waiter_entry1op, &ctx), 0);
ASSERT_EQ(pthread_create(&emitter, NULL, emitter_entry1, &ctx), 0);
if (epoll_wait(ctx.efd[1], &e, 1, -1) > 0)
__sync_fetch_and_or(&ctx.count, 2);
ASSERT_EQ(pthread_join(ctx.waiter, NULL), 0);
EXPECT_TRUE((ctx.count == 2) || (ctx.count == 3));
if (pthread_tryjoin_np(emitter, NULL) < 0) {
pthread_kill(emitter, SIGUSR1);
pthread_join(emitter, NULL);
}
close(ctx.efd[0]);
close(ctx.efd[1]);
close(ctx.sfd[0]);
close(ctx.sfd[1]);
}
/*
* t0
* | (ew)
* e0
* (lt) / \ (lt)
* e1 e2
* (lt) | | (lt)
* s0 s2
*/
TEST(epoll49)
{
int efd[3];
int sfd[4];
struct epoll_event events[2];
ASSERT_EQ(socketpair(AF_UNIX, SOCK_STREAM, 0, &sfd[0]), 0);
ASSERT_EQ(socketpair(AF_UNIX, SOCK_STREAM, 0, &sfd[2]), 0);
efd[0] = epoll_create(1);
ASSERT_GE(efd[0], 0);
efd[1] = epoll_create(1);
ASSERT_GE(efd[1], 0);
efd[2] = epoll_create(1);
ASSERT_GE(efd[2], 0);
events[0].events = EPOLLIN;
ASSERT_EQ(epoll_ctl(efd[1], EPOLL_CTL_ADD, sfd[0], events), 0);
events[0].events = EPOLLIN;
ASSERT_EQ(epoll_ctl(efd[2], EPOLL_CTL_ADD, sfd[2], events), 0);
events[0].events = EPOLLIN;
ASSERT_EQ(epoll_ctl(efd[0], EPOLL_CTL_ADD, efd[1], events), 0);
events[0].events = EPOLLIN;
ASSERT_EQ(epoll_ctl(efd[0], EPOLL_CTL_ADD, efd[2], events), 0);
ASSERT_EQ(write(sfd[1], "w", 1), 1);
ASSERT_EQ(write(sfd[3], "w", 1), 1);
EXPECT_EQ(epoll_wait(efd[0], events, 2, 0), 2);
EXPECT_EQ(epoll_wait(efd[0], events, 2, 0), 2);
close(efd[0]);
close(efd[1]);
close(efd[2]);
close(sfd[0]);
close(sfd[1]);
close(sfd[2]);
close(sfd[3]);
}
/*
* t0
* | (ew)
* e0
* (et) / \ (et)
* e1 e2
* (lt) | | (lt)
* s0 s2
*/
TEST(epoll50)
{
int efd[3];
int sfd[4];
struct epoll_event events[2];
ASSERT_EQ(socketpair(AF_UNIX, SOCK_STREAM, 0, &sfd[0]), 0);
ASSERT_EQ(socketpair(AF_UNIX, SOCK_STREAM, 0, &sfd[2]), 0);
efd[0] = epoll_create(1);
ASSERT_GE(efd[0], 0);
efd[1] = epoll_create(1);
ASSERT_GE(efd[1], 0);
efd[2] = epoll_create(1);
ASSERT_GE(efd[2], 0);
events[0].events = EPOLLIN;
ASSERT_EQ(epoll_ctl(efd[1], EPOLL_CTL_ADD, sfd[0], events), 0);
events[0].events = EPOLLIN;
ASSERT_EQ(epoll_ctl(efd[2], EPOLL_CTL_ADD, sfd[2], events), 0);
events[0].events = EPOLLIN | EPOLLET;
ASSERT_EQ(epoll_ctl(efd[0], EPOLL_CTL_ADD, efd[1], events), 0);
events[0].events = EPOLLIN | EPOLLET;
ASSERT_EQ(epoll_ctl(efd[0], EPOLL_CTL_ADD, efd[2], events), 0);
ASSERT_EQ(write(sfd[1], "w", 1), 1);
ASSERT_EQ(write(sfd[3], "w", 1), 1);
EXPECT_EQ(epoll_wait(efd[0], events, 2, 0), 2);
EXPECT_EQ(epoll_wait(efd[0], events, 2, 0), 0);
close(efd[0]);
close(efd[1]);
close(efd[2]);
close(sfd[0]);
close(sfd[1]);
close(sfd[2]);
close(sfd[3]);
}
/*
* t0
* | (p)
* e0
* (lt) / \ (lt)
* e1 e2
* (lt) | | (lt)
* s0 s2
*/
TEST(epoll51)
{
int efd[3];
int sfd[4];
struct pollfd pfd;
struct epoll_event events[2];
ASSERT_EQ(socketpair(AF_UNIX, SOCK_STREAM, 0, &sfd[0]), 0);
ASSERT_EQ(socketpair(AF_UNIX, SOCK_STREAM, 0, &sfd[2]), 0);
efd[0] = epoll_create(1);
ASSERT_GE(efd[0], 0);
efd[1] = epoll_create(1);
ASSERT_GE(efd[1], 0);
efd[2] = epoll_create(1);
ASSERT_GE(efd[2], 0);
events[0].events = EPOLLIN;
ASSERT_EQ(epoll_ctl(efd[1], EPOLL_CTL_ADD, sfd[0], events), 0);
events[0].events = EPOLLIN;
ASSERT_EQ(epoll_ctl(efd[2], EPOLL_CTL_ADD, sfd[2], events), 0);
events[0].events = EPOLLIN;
ASSERT_EQ(epoll_ctl(efd[0], EPOLL_CTL_ADD, efd[1], events), 0);
events[0].events = EPOLLIN;
ASSERT_EQ(epoll_ctl(efd[0], EPOLL_CTL_ADD, efd[2], events), 0);
ASSERT_EQ(write(sfd[1], "w", 1), 1);
ASSERT_EQ(write(sfd[3], "w", 1), 1);
pfd.fd = efd[0];
pfd.events = POLLIN;
EXPECT_EQ(poll(&pfd, 1, 0), 1);
EXPECT_EQ(epoll_wait(efd[0], events, 2, 0), 2);
pfd.fd = efd[0];
pfd.events = POLLIN;
EXPECT_EQ(poll(&pfd, 1, 0), 1);
EXPECT_EQ(epoll_wait(efd[0], events, 2, 0), 2);
close(efd[0]);
close(efd[1]);
close(efd[2]);
close(sfd[0]);
close(sfd[1]);
close(sfd[2]);
close(sfd[3]);
}
/*
* t0
* | (p)
* e0
* (et) / \ (et)
* e1 e2
* (lt) | | (lt)
* s0 s2
*/
TEST(epoll52)
{
int efd[3];
int sfd[4];
struct pollfd pfd;
struct epoll_event events[2];
ASSERT_EQ(socketpair(AF_UNIX, SOCK_STREAM, 0, &sfd[0]), 0);
ASSERT_EQ(socketpair(AF_UNIX, SOCK_STREAM, 0, &sfd[2]), 0);
efd[0] = epoll_create(1);
ASSERT_GE(efd[0], 0);
efd[1] = epoll_create(1);
ASSERT_GE(efd[1], 0);
efd[2] = epoll_create(1);
ASSERT_GE(efd[2], 0);
events[0].events = EPOLLIN;
ASSERT_EQ(epoll_ctl(efd[1], EPOLL_CTL_ADD, sfd[0], events), 0);
events[0].events = EPOLLIN;
ASSERT_EQ(epoll_ctl(efd[2], EPOLL_CTL_ADD, sfd[2], events), 0);
events[0].events = EPOLLIN | EPOLLET;
ASSERT_EQ(epoll_ctl(efd[0], EPOLL_CTL_ADD, efd[1], events), 0);
events[0].events = EPOLLIN | EPOLLET;
ASSERT_EQ(epoll_ctl(efd[0], EPOLL_CTL_ADD, efd[2], events), 0);
ASSERT_EQ(write(sfd[1], "w", 1), 1);
ASSERT_EQ(write(sfd[3], "w", 1), 1);
pfd.fd = efd[0];
pfd.events = POLLIN;
EXPECT_EQ(poll(&pfd, 1, 0), 1);
EXPECT_EQ(epoll_wait(efd[0], events, 2, 0), 2);
pfd.fd = efd[0];
pfd.events = POLLIN;
EXPECT_EQ(poll(&pfd, 1, 0), 0);
EXPECT_EQ(epoll_wait(efd[0], events, 2, 0), 0);
close(efd[0]);
close(efd[1]);
close(efd[2]);
close(sfd[0]);
close(sfd[1]);
close(sfd[2]);
close(sfd[3]);
}
/*
* t0 t1
* (ew) \ / (ew)
* e0
* (lt) / \ (lt)
* e1 e2
* (lt) | | (lt)
* s0 s2
*/
TEST(epoll53)
{
pthread_t emitter;
struct epoll_event e;
struct epoll_mtcontext ctx = { 0 };
signal(SIGUSR1, signal_handler);
ASSERT_EQ(socketpair(AF_UNIX, SOCK_STREAM, 0, &ctx.sfd[0]), 0);
ASSERT_EQ(socketpair(AF_UNIX, SOCK_STREAM, 0, &ctx.sfd[2]), 0);
ctx.efd[0] = epoll_create(1);
ASSERT_GE(ctx.efd[0], 0);
ctx.efd[1] = epoll_create(1);
ASSERT_GE(ctx.efd[1], 0);
ctx.efd[2] = epoll_create(1);
ASSERT_GE(ctx.efd[2], 0);
e.events = EPOLLIN;
ASSERT_EQ(epoll_ctl(ctx.efd[1], EPOLL_CTL_ADD, ctx.sfd[0], &e), 0);
e.events = EPOLLIN;
ASSERT_EQ(epoll_ctl(ctx.efd[2], EPOLL_CTL_ADD, ctx.sfd[2], &e), 0);
e.events = EPOLLIN;
ASSERT_EQ(epoll_ctl(ctx.efd[0], EPOLL_CTL_ADD, ctx.efd[1], &e), 0);
e.events = EPOLLIN;
ASSERT_EQ(epoll_ctl(ctx.efd[0], EPOLL_CTL_ADD, ctx.efd[2], &e), 0);
ctx.main = pthread_self();
ASSERT_EQ(pthread_create(&ctx.waiter, NULL, waiter_entry1a, &ctx), 0);
ASSERT_EQ(pthread_create(&emitter, NULL, emitter_entry2, &ctx), 0);
if (epoll_wait(ctx.efd[0], &e, 1, -1) > 0)
__sync_fetch_and_add(&ctx.count, 1);
ASSERT_EQ(pthread_join(ctx.waiter, NULL), 0);
EXPECT_EQ(ctx.count, 2);
if (pthread_tryjoin_np(emitter, NULL) < 0) {
pthread_kill(emitter, SIGUSR1);
pthread_join(emitter, NULL);
}
close(ctx.efd[0]);
close(ctx.efd[1]);
close(ctx.efd[2]);
close(ctx.sfd[0]);
close(ctx.sfd[1]);
close(ctx.sfd[2]);
close(ctx.sfd[3]);
}
/*
* t0 t1
* (ew) \ / (ew)
* e0
* (et) / \ (et)
* e1 e2
* (lt) | | (lt)
* s0 s2
*/
TEST(epoll54)
{
pthread_t emitter;
struct epoll_event e;
struct epoll_mtcontext ctx = { 0 };
signal(SIGUSR1, signal_handler);
ASSERT_EQ(socketpair(AF_UNIX, SOCK_STREAM, 0, &ctx.sfd[0]), 0);
ASSERT_EQ(socketpair(AF_UNIX, SOCK_STREAM, 0, &ctx.sfd[2]), 0);
ctx.efd[0] = epoll_create(1);
ASSERT_GE(ctx.efd[0], 0);
ctx.efd[1] = epoll_create(1);
ASSERT_GE(ctx.efd[1], 0);
ctx.efd[2] = epoll_create(1);
ASSERT_GE(ctx.efd[2], 0);
e.events = EPOLLIN;
ASSERT_EQ(epoll_ctl(ctx.efd[1], EPOLL_CTL_ADD, ctx.sfd[0], &e), 0);
e.events = EPOLLIN;
ASSERT_EQ(epoll_ctl(ctx.efd[2], EPOLL_CTL_ADD, ctx.sfd[2], &e), 0);
e.events = EPOLLIN | EPOLLET;
ASSERT_EQ(epoll_ctl(ctx.efd[0], EPOLL_CTL_ADD, ctx.efd[1], &e), 0);
e.events = EPOLLIN | EPOLLET;
ASSERT_EQ(epoll_ctl(ctx.efd[0], EPOLL_CTL_ADD, ctx.efd[2], &e), 0);
ctx.main = pthread_self();
ASSERT_EQ(pthread_create(&ctx.waiter, NULL, waiter_entry1a, &ctx), 0);
ASSERT_EQ(pthread_create(&emitter, NULL, emitter_entry2, &ctx), 0);
if (epoll_wait(ctx.efd[0], &e, 1, -1) > 0)
__sync_fetch_and_add(&ctx.count, 1);
ASSERT_EQ(pthread_join(ctx.waiter, NULL), 0);
EXPECT_EQ(ctx.count, 2);
if (pthread_tryjoin_np(emitter, NULL) < 0) {
pthread_kill(emitter, SIGUSR1);
pthread_join(emitter, NULL);
}
close(ctx.efd[0]);
close(ctx.efd[1]);
close(ctx.efd[2]);
close(ctx.sfd[0]);
close(ctx.sfd[1]);
close(ctx.sfd[2]);
close(ctx.sfd[3]);
}
/*
* t0 t1
* (ew) \ / (p)
* e0
* (lt) / \ (lt)
* e1 e2
* (lt) | | (lt)
* s0 s2
*/
TEST(epoll55)
{
pthread_t emitter;
struct epoll_event e;
struct epoll_mtcontext ctx = { 0 };
signal(SIGUSR1, signal_handler);
ASSERT_EQ(socketpair(AF_UNIX, SOCK_STREAM, 0, &ctx.sfd[0]), 0);
ASSERT_EQ(socketpair(AF_UNIX, SOCK_STREAM, 0, &ctx.sfd[2]), 0);
ctx.efd[0] = epoll_create(1);
ASSERT_GE(ctx.efd[0], 0);
ctx.efd[1] = epoll_create(1);
ASSERT_GE(ctx.efd[1], 0);
ctx.efd[2] = epoll_create(1);
ASSERT_GE(ctx.efd[2], 0);
e.events = EPOLLIN;
ASSERT_EQ(epoll_ctl(ctx.efd[1], EPOLL_CTL_ADD, ctx.sfd[0], &e), 0);
e.events = EPOLLIN;
ASSERT_EQ(epoll_ctl(ctx.efd[2], EPOLL_CTL_ADD, ctx.sfd[2], &e), 0);
e.events = EPOLLIN;
ASSERT_EQ(epoll_ctl(ctx.efd[0], EPOLL_CTL_ADD, ctx.efd[1], &e), 0);
e.events = EPOLLIN;
ASSERT_EQ(epoll_ctl(ctx.efd[0], EPOLL_CTL_ADD, ctx.efd[2], &e), 0);
ctx.main = pthread_self();
ASSERT_EQ(pthread_create(&ctx.waiter, NULL, waiter_entry1ap, &ctx), 0);
ASSERT_EQ(pthread_create(&emitter, NULL, emitter_entry2, &ctx), 0);
if (epoll_wait(ctx.efd[0], &e, 1, -1) > 0)
__sync_fetch_and_add(&ctx.count, 1);
ASSERT_EQ(pthread_join(ctx.waiter, NULL), 0);
EXPECT_EQ(ctx.count, 2);
if (pthread_tryjoin_np(emitter, NULL) < 0) {
pthread_kill(emitter, SIGUSR1);
pthread_join(emitter, NULL);
}
close(ctx.efd[0]);
close(ctx.efd[1]);
close(ctx.efd[2]);
close(ctx.sfd[0]);
close(ctx.sfd[1]);
close(ctx.sfd[2]);
close(ctx.sfd[3]);
}
/*
* t0 t1
* (ew) \ / (p)
* e0
* (et) / \ (et)
* e1 e2
* (lt) | | (lt)
* s0 s2
*/
TEST(epoll56)
{
pthread_t emitter;
struct epoll_event e;
struct epoll_mtcontext ctx = { 0 };
signal(SIGUSR1, signal_handler);
ASSERT_EQ(socketpair(AF_UNIX, SOCK_STREAM, 0, &ctx.sfd[0]), 0);
ASSERT_EQ(socketpair(AF_UNIX, SOCK_STREAM, 0, &ctx.sfd[2]), 0);
ctx.efd[0] = epoll_create(1);
ASSERT_GE(ctx.efd[0], 0);
ctx.efd[1] = epoll_create(1);
ASSERT_GE(ctx.efd[1], 0);
ctx.efd[2] = epoll_create(1);
ASSERT_GE(ctx.efd[2], 0);
e.events = EPOLLIN;
ASSERT_EQ(epoll_ctl(ctx.efd[1], EPOLL_CTL_ADD, ctx.sfd[0], &e), 0);
e.events = EPOLLIN;
ASSERT_EQ(epoll_ctl(ctx.efd[2], EPOLL_CTL_ADD, ctx.sfd[2], &e), 0);
e.events = EPOLLIN | EPOLLET;
ASSERT_EQ(epoll_ctl(ctx.efd[0], EPOLL_CTL_ADD, ctx.efd[1], &e), 0);
e.events = EPOLLIN | EPOLLET;
ASSERT_EQ(epoll_ctl(ctx.efd[0], EPOLL_CTL_ADD, ctx.efd[2], &e), 0);
ctx.main = pthread_self();
ASSERT_EQ(pthread_create(&ctx.waiter, NULL, waiter_entry1ap, &ctx), 0);
ASSERT_EQ(pthread_create(&emitter, NULL, emitter_entry2, &ctx), 0);
if (epoll_wait(ctx.efd[0], &e, 1, -1) > 0)
__sync_fetch_and_add(&ctx.count, 1);
ASSERT_EQ(pthread_join(ctx.waiter, NULL), 0);
EXPECT_EQ(ctx.count, 2);
if (pthread_tryjoin_np(emitter, NULL) < 0) {
pthread_kill(emitter, SIGUSR1);
pthread_join(emitter, NULL);
}
close(ctx.efd[0]);
close(ctx.efd[1]);
close(ctx.efd[2]);
close(ctx.sfd[0]);
close(ctx.sfd[1]);
close(ctx.sfd[2]);
close(ctx.sfd[3]);
}
/*
* t0 t1
* (p) \ / (p)
* e0
* (lt) / \ (lt)
* e1 e2
* (lt) | | (lt)
* s0 s2
*/
TEST(epoll57)
{
pthread_t emitter;
struct pollfd pfd;
struct epoll_event e;
struct epoll_mtcontext ctx = { 0 };
signal(SIGUSR1, signal_handler);
ASSERT_EQ(socketpair(AF_UNIX, SOCK_STREAM, 0, &ctx.sfd[0]), 0);
ASSERT_EQ(socketpair(AF_UNIX, SOCK_STREAM, 0, &ctx.sfd[2]), 0);
ctx.efd[0] = epoll_create(1);
ASSERT_GE(ctx.efd[0], 0);
ctx.efd[1] = epoll_create(1);
ASSERT_GE(ctx.efd[1], 0);
ctx.efd[2] = epoll_create(1);
ASSERT_GE(ctx.efd[2], 0);
e.events = EPOLLIN;
ASSERT_EQ(epoll_ctl(ctx.efd[1], EPOLL_CTL_ADD, ctx.sfd[0], &e), 0);
e.events = EPOLLIN;
ASSERT_EQ(epoll_ctl(ctx.efd[2], EPOLL_CTL_ADD, ctx.sfd[2], &e), 0);
e.events = EPOLLIN;
ASSERT_EQ(epoll_ctl(ctx.efd[0], EPOLL_CTL_ADD, ctx.efd[1], &e), 0);
e.events = EPOLLIN;
ASSERT_EQ(epoll_ctl(ctx.efd[0], EPOLL_CTL_ADD, ctx.efd[2], &e), 0);
ctx.main = pthread_self();
ASSERT_EQ(pthread_create(&ctx.waiter, NULL, waiter_entry1ap, &ctx), 0);
ASSERT_EQ(pthread_create(&emitter, NULL, emitter_entry2, &ctx), 0);
pfd.fd = ctx.efd[0];
pfd.events = POLLIN;
if (poll(&pfd, 1, -1) > 0) {
if (epoll_wait(ctx.efd[0], &e, 1, 0) > 0)
__sync_fetch_and_add(&ctx.count, 1);
}
ASSERT_EQ(pthread_join(ctx.waiter, NULL), 0);
EXPECT_EQ(ctx.count, 2);
if (pthread_tryjoin_np(emitter, NULL) < 0) {
pthread_kill(emitter, SIGUSR1);
pthread_join(emitter, NULL);
}
close(ctx.efd[0]);
close(ctx.efd[1]);
close(ctx.efd[2]);
close(ctx.sfd[0]);
close(ctx.sfd[1]);
close(ctx.sfd[2]);
close(ctx.sfd[3]);
}
/*
* t0 t1
* (p) \ / (p)
* e0
* (et) / \ (et)
* e1 e2
* (lt) | | (lt)
* s0 s2
*/
TEST(epoll58)
{
pthread_t emitter;
struct pollfd pfd;
struct epoll_event e;
struct epoll_mtcontext ctx = { 0 };
signal(SIGUSR1, signal_handler);
ASSERT_EQ(socketpair(AF_UNIX, SOCK_STREAM, 0, &ctx.sfd[0]), 0);
ASSERT_EQ(socketpair(AF_UNIX, SOCK_STREAM, 0, &ctx.sfd[2]), 0);
ctx.efd[0] = epoll_create(1);
ASSERT_GE(ctx.efd[0], 0);
ctx.efd[1] = epoll_create(1);
ASSERT_GE(ctx.efd[1], 0);
ctx.efd[2] = epoll_create(1);
ASSERT_GE(ctx.efd[2], 0);
e.events = EPOLLIN;
ASSERT_EQ(epoll_ctl(ctx.efd[1], EPOLL_CTL_ADD, ctx.sfd[0], &e), 0);
e.events = EPOLLIN;
ASSERT_EQ(epoll_ctl(ctx.efd[2], EPOLL_CTL_ADD, ctx.sfd[2], &e), 0);
e.events = EPOLLIN | EPOLLET;
ASSERT_EQ(epoll_ctl(ctx.efd[0], EPOLL_CTL_ADD, ctx.efd[1], &e), 0);
e.events = EPOLLIN | EPOLLET;
ASSERT_EQ(epoll_ctl(ctx.efd[0], EPOLL_CTL_ADD, ctx.efd[2], &e), 0);
ctx.main = pthread_self();
ASSERT_EQ(pthread_create(&ctx.waiter, NULL, waiter_entry1ap, &ctx), 0);
ASSERT_EQ(pthread_create(&emitter, NULL, emitter_entry2, &ctx), 0);
pfd.fd = ctx.efd[0];
pfd.events = POLLIN;
if (poll(&pfd, 1, -1) > 0) {
if (epoll_wait(ctx.efd[0], &e, 1, 0) > 0)
__sync_fetch_and_add(&ctx.count, 1);
}
ASSERT_EQ(pthread_join(ctx.waiter, NULL), 0);
EXPECT_EQ(ctx.count, 2);
if (pthread_tryjoin_np(emitter, NULL) < 0) {
pthread_kill(emitter, SIGUSR1);
pthread_join(emitter, NULL);
}
close(ctx.efd[0]);
close(ctx.efd[1]);
close(ctx.efd[2]);
close(ctx.sfd[0]);
close(ctx.sfd[1]);
close(ctx.sfd[2]);
close(ctx.sfd[3]);
}
static void *epoll59_thread(void *ctx_)
{
struct epoll_mtcontext *ctx = ctx_;
struct epoll_event e;
int i;
for (i = 0; i < 100000; i++) {
while (ctx->count == 0)
;
e.events = EPOLLIN | EPOLLERR | EPOLLET;
epoll_ctl(ctx->efd[0], EPOLL_CTL_MOD, ctx->sfd[0], &e);
ctx->count = 0;
}
return NULL;
}
/*
* t0
* (p) \
* e0
* (et) /
* e0
*
* Based on https://bugzilla.kernel.org/show_bug.cgi?id=205933
*/
TEST(epoll59)
{
pthread_t emitter;
struct pollfd pfd;
struct epoll_event e;
struct epoll_mtcontext ctx = { 0 };
int i, ret;
signal(SIGUSR1, signal_handler);
ctx.efd[0] = epoll_create1(0);
ASSERT_GE(ctx.efd[0], 0);
ctx.sfd[0] = eventfd(1, 0);
ASSERT_GE(ctx.sfd[0], 0);
e.events = EPOLLIN | EPOLLERR | EPOLLET;
ASSERT_EQ(epoll_ctl(ctx.efd[0], EPOLL_CTL_ADD, ctx.sfd[0], &e), 0);
ASSERT_EQ(pthread_create(&emitter, NULL, epoll59_thread, &ctx), 0);
for (i = 0; i < 100000; i++) {
ret = epoll_wait(ctx.efd[0], &e, 1, 1000);
ASSERT_GT(ret, 0);
while (ctx.count != 0)
;
ctx.count = 1;
}
if (pthread_tryjoin_np(emitter, NULL) < 0) {
pthread_kill(emitter, SIGUSR1);
pthread_join(emitter, NULL);
}
close(ctx.efd[0]);
close(ctx.sfd[0]);
}
enum {
EPOLL60_EVENTS_NR = 10,
};
struct epoll60_ctx {
volatile int stopped;
int ready;
int waiters;
int epfd;
int evfd[EPOLL60_EVENTS_NR];
};
static void *epoll60_wait_thread(void *ctx_)
{
struct epoll60_ctx *ctx = ctx_;
struct epoll_event e;
sigset_t sigmask;
uint64_t v;
int ret;
/* Block SIGUSR1 */
sigemptyset(&sigmask);
sigaddset(&sigmask, SIGUSR1);
sigprocmask(SIG_SETMASK, &sigmask, NULL);
/* Prepare empty mask for epoll_pwait() */
sigemptyset(&sigmask);
while (!ctx->stopped) {
/* Mark we are ready */
__atomic_fetch_add(&ctx->ready, 1, __ATOMIC_ACQUIRE);
/* Start when all are ready */
while (__atomic_load_n(&ctx->ready, __ATOMIC_ACQUIRE) &&
!ctx->stopped);
/* Account this waiter */
__atomic_fetch_add(&ctx->waiters, 1, __ATOMIC_ACQUIRE);
ret = epoll_pwait(ctx->epfd, &e, 1, 2000, &sigmask);
if (ret != 1) {
/* We expect only signal delivery on stop */
assert(ret < 0 && errno == EINTR && "Lost wakeup!\n");
assert(ctx->stopped);
break;
}
ret = read(e.data.fd, &v, sizeof(v));
/* Since we are on ET mode, thus each thread gets its own fd. */
assert(ret == sizeof(v));
__atomic_fetch_sub(&ctx->waiters, 1, __ATOMIC_RELEASE);
}
return NULL;
}
static inline unsigned long long msecs(void)
{
struct timespec ts;
unsigned long long msecs;
clock_gettime(CLOCK_REALTIME, &ts);
msecs = ts.tv_sec * 1000ull;
msecs += ts.tv_nsec / 1000000ull;
return msecs;
}
static inline int count_waiters(struct epoll60_ctx *ctx)
{
return __atomic_load_n(&ctx->waiters, __ATOMIC_ACQUIRE);
}
TEST(epoll60)
{
struct epoll60_ctx ctx = { 0 };
pthread_t waiters[ARRAY_SIZE(ctx.evfd)];
struct epoll_event e;
int i, n, ret;
signal(SIGUSR1, signal_handler);
ctx.epfd = epoll_create1(0);
ASSERT_GE(ctx.epfd, 0);
/* Create event fds */
for (i = 0; i < ARRAY_SIZE(ctx.evfd); i++) {
ctx.evfd[i] = eventfd(0, EFD_NONBLOCK);
ASSERT_GE(ctx.evfd[i], 0);
e.events = EPOLLIN | EPOLLET;
e.data.fd = ctx.evfd[i];
ASSERT_EQ(epoll_ctl(ctx.epfd, EPOLL_CTL_ADD, ctx.evfd[i], &e), 0);
}
/* Create waiter threads */
for (i = 0; i < ARRAY_SIZE(waiters); i++)
ASSERT_EQ(pthread_create(&waiters[i], NULL,
epoll60_wait_thread, &ctx), 0);
for (i = 0; i < 300; i++) {
uint64_t v = 1, ms;
/* Wait for all to be ready */
while (__atomic_load_n(&ctx.ready, __ATOMIC_ACQUIRE) !=
ARRAY_SIZE(ctx.evfd))
;
/* Steady, go */
__atomic_fetch_sub(&ctx.ready, ARRAY_SIZE(ctx.evfd),
__ATOMIC_ACQUIRE);
/* Wait all have gone to kernel */
while (count_waiters(&ctx) != ARRAY_SIZE(ctx.evfd))
;
/* 1ms should be enough to schedule away */
usleep(1000);
/* Quickly signal all handles at once */
for (n = 0; n < ARRAY_SIZE(ctx.evfd); n++) {
ret = write(ctx.evfd[n], &v, sizeof(v));
ASSERT_EQ(ret, sizeof(v));
}
/* Busy loop for 1s and wait for all waiters to wake up */
ms = msecs();
while (count_waiters(&ctx) && msecs() < ms + 1000)
;
ASSERT_EQ(count_waiters(&ctx), 0);
}
ctx.stopped = 1;
/* Stop waiters */
for (i = 0; i < ARRAY_SIZE(waiters); i++)
ret = pthread_kill(waiters[i], SIGUSR1);
for (i = 0; i < ARRAY_SIZE(waiters); i++)
pthread_join(waiters[i], NULL);
for (i = 0; i < ARRAY_SIZE(waiters); i++)
close(ctx.evfd[i]);
close(ctx.epfd);
}
struct epoll61_ctx {
int epfd;
int evfd;
};
static void *epoll61_write_eventfd(void *ctx_)
{
struct epoll61_ctx *ctx = ctx_;
int64_t l = 1;
usleep(10950);
write(ctx->evfd, &l, sizeof(l));
return NULL;
}
static void *epoll61_epoll_with_timeout(void *ctx_)
{
struct epoll61_ctx *ctx = ctx_;
struct epoll_event events[1];
int n;
n = epoll_wait(ctx->epfd, events, 1, 11);
/*
* If epoll returned the eventfd, write on the eventfd to wake up the
* blocking poller.
*/
if (n == 1) {
int64_t l = 1;
write(ctx->evfd, &l, sizeof(l));
}
return NULL;
}
static void *epoll61_blocking_epoll(void *ctx_)
{
struct epoll61_ctx *ctx = ctx_;
struct epoll_event events[1];
epoll_wait(ctx->epfd, events, 1, -1);
return NULL;
}
TEST(epoll61)
{
struct epoll61_ctx ctx;
struct epoll_event ev;
int i, r;
ctx.epfd = epoll_create1(0);
ASSERT_GE(ctx.epfd, 0);
ctx.evfd = eventfd(0, EFD_NONBLOCK);
ASSERT_GE(ctx.evfd, 0);
ev.events = EPOLLIN | EPOLLET | EPOLLERR | EPOLLHUP;
ev.data.ptr = NULL;
r = epoll_ctl(ctx.epfd, EPOLL_CTL_ADD, ctx.evfd, &ev);
ASSERT_EQ(r, 0);
/*
* We are testing a race. Repeat the test case 1000 times to make it
* more likely to fail in case of a bug.
*/
for (i = 0; i < 1000; i++) {
pthread_t threads[3];
int n;
/*
* Start 3 threads:
* Thread 1 sleeps for 10.9ms and writes to the evenfd.
* Thread 2 calls epoll with a timeout of 11ms.
* Thread 3 calls epoll with a timeout of -1.
*
* The eventfd write by Thread 1 should either wakeup Thread 2
* or Thread 3. If it wakes up Thread 2, Thread 2 writes on the
* eventfd to wake up Thread 3.
*
* If no events are missed, all three threads should eventually
* be joinable.
*/
ASSERT_EQ(pthread_create(&threads[0], NULL,
epoll61_write_eventfd, &ctx), 0);
ASSERT_EQ(pthread_create(&threads[1], NULL,
epoll61_epoll_with_timeout, &ctx), 0);
ASSERT_EQ(pthread_create(&threads[2], NULL,
epoll61_blocking_epoll, &ctx), 0);
for (n = 0; n < ARRAY_SIZE(threads); ++n)
ASSERT_EQ(pthread_join(threads[n], NULL), 0);
}
close(ctx.epfd);
close(ctx.evfd);
}
/* Equivalent to basic test epoll1, but exercising epoll_pwait2. */
TEST(epoll62)
{
int efd;
int sfd[2];
struct epoll_event e;
ASSERT_EQ(socketpair(AF_UNIX, SOCK_STREAM, 0, sfd), 0);
efd = epoll_create(1);
ASSERT_GE(efd, 0);
e.events = EPOLLIN;
ASSERT_EQ(epoll_ctl(efd, EPOLL_CTL_ADD, sfd[0], &e), 0);
ASSERT_EQ(write(sfd[1], "w", 1), 1);
EXPECT_EQ(sys_epoll_pwait2(efd, &e, 1, NULL, NULL, 0), 1);
EXPECT_EQ(sys_epoll_pwait2(efd, &e, 1, NULL, NULL, 0), 1);
close(efd);
close(sfd[0]);
close(sfd[1]);
}
/* Epoll_pwait2 basic timeout test. */
TEST(epoll63)
{
const int cfg_delay_ms = 10;
unsigned long long tdiff;
struct __kernel_timespec ts;
int efd;
int sfd[2];
struct epoll_event e;
ASSERT_EQ(socketpair(AF_UNIX, SOCK_STREAM, 0, sfd), 0);
efd = epoll_create(1);
ASSERT_GE(efd, 0);
e.events = EPOLLIN;
ASSERT_EQ(epoll_ctl(efd, EPOLL_CTL_ADD, sfd[0], &e), 0);
ts.tv_sec = 0;
ts.tv_nsec = cfg_delay_ms * 1000 * 1000;
tdiff = msecs();
EXPECT_EQ(sys_epoll_pwait2(efd, &e, 1, &ts, NULL, 0), 0);
tdiff = msecs() - tdiff;
EXPECT_GE(tdiff, cfg_delay_ms);
close(efd);
close(sfd[0]);
close(sfd[1]);
}
/*
* t0 t1
* (ew) \ / (ew)
* e0
* | (lt)
* s0
*/
TEST(epoll64)
{
pthread_t waiter[2];
struct epoll_event e;
struct epoll_mtcontext ctx = { 0 };
signal(SIGUSR1, signal_handler);
ASSERT_EQ(socketpair(AF_UNIX, SOCK_STREAM, 0, ctx.sfd), 0);
ctx.efd[0] = epoll_create(1);
ASSERT_GE(ctx.efd[0], 0);
e.events = EPOLLIN;
ASSERT_EQ(epoll_ctl(ctx.efd[0], EPOLL_CTL_ADD, ctx.sfd[0], &e), 0);
/*
* main will act as the emitter once both waiter threads are
* blocked and expects to both be awoken upon the ready event.
*/
ctx.main = pthread_self();
ASSERT_EQ(pthread_create(&waiter[0], NULL, waiter_entry1a, &ctx), 0);
ASSERT_EQ(pthread_create(&waiter[1], NULL, waiter_entry1a, &ctx), 0);
usleep(100000);
ASSERT_EQ(write(ctx.sfd[1], "w", 1), 1);
ASSERT_EQ(pthread_join(waiter[0], NULL), 0);
ASSERT_EQ(pthread_join(waiter[1], NULL), 0);
EXPECT_EQ(ctx.count, 2);
close(ctx.efd[0]);
close(ctx.sfd[0]);
close(ctx.sfd[1]);
}
TEST_HARNESS_MAIN
| linux-master | tools/testing/selftests/filesystems/epoll/epoll_wakeup_test.c |
// SPDX-License-Identifier: GPL-2.0
#define _GNU_SOURCE
#include <errno.h>
#include <fcntl.h>
#include <pthread.h>
#include <sched.h>
#include <stdbool.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/fsuid.h>
#include <sys/ioctl.h>
#include <sys/mount.h>
#include <sys/socket.h>
#include <sys/stat.h>
#include <sys/sysinfo.h>
#include <sys/types.h>
#include <sys/wait.h>
#include <unistd.h>
#include <linux/android/binder.h>
#include <linux/android/binderfs.h>
#include "../../kselftest_harness.h"
#define DEFAULT_THREADS 4
#define PTR_TO_INT(p) ((int)((intptr_t)(p)))
#define INT_TO_PTR(u) ((void *)((intptr_t)(u)))
#define close_prot_errno_disarm(fd) \
if (fd >= 0) { \
int _e_ = errno; \
close(fd); \
errno = _e_; \
fd = -EBADF; \
}
static void change_mountns(struct __test_metadata *_metadata)
{
int ret;
ret = unshare(CLONE_NEWNS);
ASSERT_EQ(ret, 0) {
TH_LOG("%s - Failed to unshare mount namespace",
strerror(errno));
}
ret = mount(NULL, "/", NULL, MS_REC | MS_PRIVATE, 0);
ASSERT_EQ(ret, 0) {
TH_LOG("%s - Failed to mount / as private",
strerror(errno));
}
}
static int __do_binderfs_test(struct __test_metadata *_metadata)
{
int fd, ret, saved_errno, result = 1;
size_t len;
ssize_t wret;
struct binderfs_device device = { 0 };
struct binder_version version = { 0 };
char binderfs_mntpt[] = P_tmpdir "/binderfs_XXXXXX",
device_path[sizeof(P_tmpdir "/binderfs_XXXXXX/") + BINDERFS_MAX_NAME];
static const char * const binder_features[] = {
"oneway_spam_detection",
"extended_error",
};
change_mountns(_metadata);
EXPECT_NE(mkdtemp(binderfs_mntpt), NULL) {
TH_LOG("%s - Failed to create binderfs mountpoint",
strerror(errno));
goto out;
}
ret = mount(NULL, binderfs_mntpt, "binder", 0, 0);
EXPECT_EQ(ret, 0) {
if (errno == ENODEV)
SKIP(goto out, "binderfs missing");
TH_LOG("%s - Failed to mount binderfs", strerror(errno));
goto rmdir;
}
/* success: binderfs mounted */
memcpy(device.name, "my-binder", strlen("my-binder"));
snprintf(device_path, sizeof(device_path), "%s/binder-control", binderfs_mntpt);
fd = open(device_path, O_RDONLY | O_CLOEXEC);
EXPECT_GE(fd, 0) {
TH_LOG("%s - Failed to open binder-control device",
strerror(errno));
goto umount;
}
ret = ioctl(fd, BINDER_CTL_ADD, &device);
saved_errno = errno;
close(fd);
errno = saved_errno;
EXPECT_GE(ret, 0) {
TH_LOG("%s - Failed to allocate new binder device",
strerror(errno));
goto umount;
}
TH_LOG("Allocated new binder device with major %d, minor %d, and name %s",
device.major, device.minor, device.name);
/* success: binder device allocation */
snprintf(device_path, sizeof(device_path), "%s/my-binder", binderfs_mntpt);
fd = open(device_path, O_CLOEXEC | O_RDONLY);
EXPECT_GE(fd, 0) {
TH_LOG("%s - Failed to open my-binder device",
strerror(errno));
goto umount;
}
ret = ioctl(fd, BINDER_VERSION, &version);
saved_errno = errno;
close(fd);
errno = saved_errno;
EXPECT_GE(ret, 0) {
TH_LOG("%s - Failed to open perform BINDER_VERSION request",
strerror(errno));
goto umount;
}
TH_LOG("Detected binder version: %d", version.protocol_version);
/* success: binder transaction with binderfs binder device */
ret = unlink(device_path);
EXPECT_EQ(ret, 0) {
TH_LOG("%s - Failed to delete binder device",
strerror(errno));
goto umount;
}
/* success: binder device removal */
snprintf(device_path, sizeof(device_path), "%s/binder-control", binderfs_mntpt);
ret = unlink(device_path);
EXPECT_NE(ret, 0) {
TH_LOG("Managed to delete binder-control device");
goto umount;
}
EXPECT_EQ(errno, EPERM) {
TH_LOG("%s - Failed to delete binder-control device but exited with unexpected error code",
strerror(errno));
goto umount;
}
/* success: binder-control device removal failed as expected */
for (int i = 0; i < ARRAY_SIZE(binder_features); i++) {
snprintf(device_path, sizeof(device_path), "%s/features/%s",
binderfs_mntpt, binder_features[i]);
fd = open(device_path, O_CLOEXEC | O_RDONLY);
EXPECT_GE(fd, 0) {
TH_LOG("%s - Failed to open binder feature: %s",
strerror(errno), binder_features[i]);
goto umount;
}
close(fd);
}
/* success: binder feature files found */
result = 0;
umount:
ret = umount2(binderfs_mntpt, MNT_DETACH);
EXPECT_EQ(ret, 0) {
TH_LOG("%s - Failed to unmount binderfs", strerror(errno));
}
rmdir:
ret = rmdir(binderfs_mntpt);
EXPECT_EQ(ret, 0) {
TH_LOG("%s - Failed to rmdir binderfs mount", strerror(errno));
}
out:
return result;
}
static int wait_for_pid(pid_t pid)
{
int status, ret;
again:
ret = waitpid(pid, &status, 0);
if (ret == -1) {
if (errno == EINTR)
goto again;
return -1;
}
if (!WIFEXITED(status))
return -1;
return WEXITSTATUS(status);
}
static int setid_userns_root(void)
{
if (setuid(0))
return -1;
if (setgid(0))
return -1;
setfsuid(0);
setfsgid(0);
return 0;
}
enum idmap_type {
UID_MAP,
GID_MAP,
};
static ssize_t read_nointr(int fd, void *buf, size_t count)
{
ssize_t ret;
again:
ret = read(fd, buf, count);
if (ret < 0 && errno == EINTR)
goto again;
return ret;
}
static ssize_t write_nointr(int fd, const void *buf, size_t count)
{
ssize_t ret;
again:
ret = write(fd, buf, count);
if (ret < 0 && errno == EINTR)
goto again;
return ret;
}
static int write_id_mapping(enum idmap_type type, pid_t pid, const char *buf,
size_t buf_size)
{
int fd;
int ret;
char path[4096];
if (type == GID_MAP) {
int setgroups_fd;
snprintf(path, sizeof(path), "/proc/%d/setgroups", pid);
setgroups_fd = open(path, O_WRONLY | O_CLOEXEC | O_NOFOLLOW);
if (setgroups_fd < 0 && errno != ENOENT)
return -1;
if (setgroups_fd >= 0) {
ret = write_nointr(setgroups_fd, "deny", sizeof("deny") - 1);
close_prot_errno_disarm(setgroups_fd);
if (ret != sizeof("deny") - 1)
return -1;
}
}
switch (type) {
case UID_MAP:
ret = snprintf(path, sizeof(path), "/proc/%d/uid_map", pid);
break;
case GID_MAP:
ret = snprintf(path, sizeof(path), "/proc/%d/gid_map", pid);
break;
default:
return -1;
}
if (ret < 0 || ret >= sizeof(path))
return -E2BIG;
fd = open(path, O_WRONLY | O_CLOEXEC | O_NOFOLLOW);
if (fd < 0)
return -1;
ret = write_nointr(fd, buf, buf_size);
close_prot_errno_disarm(fd);
if (ret != buf_size)
return -1;
return 0;
}
static void change_userns(struct __test_metadata *_metadata, int syncfds[2])
{
int ret;
char buf;
close_prot_errno_disarm(syncfds[1]);
ret = unshare(CLONE_NEWUSER);
ASSERT_EQ(ret, 0) {
TH_LOG("%s - Failed to unshare user namespace",
strerror(errno));
}
ret = write_nointr(syncfds[0], "1", 1);
ASSERT_EQ(ret, 1) {
TH_LOG("write_nointr() failed");
}
ret = read_nointr(syncfds[0], &buf, 1);
ASSERT_EQ(ret, 1) {
TH_LOG("read_nointr() failed");
}
close_prot_errno_disarm(syncfds[0]);
ASSERT_EQ(setid_userns_root(), 0) {
TH_LOG("setid_userns_root() failed");
}
}
static void change_idmaps(struct __test_metadata *_metadata, int syncfds[2], pid_t pid)
{
int ret;
char buf;
char id_map[4096];
close_prot_errno_disarm(syncfds[0]);
ret = read_nointr(syncfds[1], &buf, 1);
ASSERT_EQ(ret, 1) {
TH_LOG("read_nointr() failed");
}
snprintf(id_map, sizeof(id_map), "0 %d 1\n", getuid());
ret = write_id_mapping(UID_MAP, pid, id_map, strlen(id_map));
ASSERT_EQ(ret, 0) {
TH_LOG("write_id_mapping(UID_MAP) failed");
}
snprintf(id_map, sizeof(id_map), "0 %d 1\n", getgid());
ret = write_id_mapping(GID_MAP, pid, id_map, strlen(id_map));
ASSERT_EQ(ret, 0) {
TH_LOG("write_id_mapping(GID_MAP) failed");
}
ret = write_nointr(syncfds[1], "1", 1);
ASSERT_EQ(ret, 1) {
TH_LOG("write_nointr() failed");
}
close_prot_errno_disarm(syncfds[1]);
}
struct __test_metadata *_thread_metadata;
static void *binder_version_thread(void *data)
{
struct __test_metadata *_metadata = _thread_metadata;
int fd = PTR_TO_INT(data);
struct binder_version version = { 0 };
int ret;
ret = ioctl(fd, BINDER_VERSION, &version);
if (ret < 0)
TH_LOG("%s - Failed to open perform BINDER_VERSION request\n",
strerror(errno));
pthread_exit(data);
}
/*
* Regression test:
* 2669b8b0c798 ("binder: prevent UAF for binderfs devices")
* f0fe2c0f050d ("binder: prevent UAF for binderfs devices II")
* 211b64e4b5b6 ("binderfs: use refcount for binder control devices too")
*/
TEST(binderfs_stress)
{
int fds[1000];
int syncfds[2];
pid_t pid;
int fd, ret;
size_t len;
struct binderfs_device device = { 0 };
char binderfs_mntpt[] = P_tmpdir "/binderfs_XXXXXX",
device_path[sizeof(P_tmpdir "/binderfs_XXXXXX/") + BINDERFS_MAX_NAME];
ret = socketpair(PF_LOCAL, SOCK_STREAM | SOCK_CLOEXEC, 0, syncfds);
ASSERT_EQ(ret, 0) {
TH_LOG("%s - Failed to create socket pair", strerror(errno));
}
pid = fork();
ASSERT_GE(pid, 0) {
TH_LOG("%s - Failed to fork", strerror(errno));
close_prot_errno_disarm(syncfds[0]);
close_prot_errno_disarm(syncfds[1]);
}
if (pid == 0) {
int i, j, k, nthreads;
pthread_attr_t attr;
pthread_t threads[DEFAULT_THREADS];
change_userns(_metadata, syncfds);
change_mountns(_metadata);
ASSERT_NE(mkdtemp(binderfs_mntpt), NULL) {
TH_LOG("%s - Failed to create binderfs mountpoint",
strerror(errno));
}
ret = mount(NULL, binderfs_mntpt, "binder", 0, 0);
ASSERT_EQ(ret, 0) {
TH_LOG("%s - Failed to mount binderfs, check if CONFIG_ANDROID_BINDERFS is enabled in the running kernel",
strerror(errno));
}
for (int i = 0; i < ARRAY_SIZE(fds); i++) {
snprintf(device_path, sizeof(device_path),
"%s/binder-control", binderfs_mntpt);
fd = open(device_path, O_RDONLY | O_CLOEXEC);
ASSERT_GE(fd, 0) {
TH_LOG("%s - Failed to open binder-control device",
strerror(errno));
}
memset(&device, 0, sizeof(device));
snprintf(device.name, sizeof(device.name), "%d", i);
ret = ioctl(fd, BINDER_CTL_ADD, &device);
close_prot_errno_disarm(fd);
ASSERT_EQ(ret, 0) {
TH_LOG("%s - Failed to allocate new binder device",
strerror(errno));
}
snprintf(device_path, sizeof(device_path), "%s/%d",
binderfs_mntpt, i);
fds[i] = open(device_path, O_RDONLY | O_CLOEXEC);
ASSERT_GE(fds[i], 0) {
TH_LOG("%s - Failed to open binder device", strerror(errno));
}
}
ret = umount2(binderfs_mntpt, MNT_DETACH);
ASSERT_EQ(ret, 0) {
TH_LOG("%s - Failed to unmount binderfs", strerror(errno));
rmdir(binderfs_mntpt);
}
nthreads = get_nprocs_conf();
if (nthreads > DEFAULT_THREADS)
nthreads = DEFAULT_THREADS;
_thread_metadata = _metadata;
pthread_attr_init(&attr);
for (k = 0; k < ARRAY_SIZE(fds); k++) {
for (i = 0; i < nthreads; i++) {
ret = pthread_create(&threads[i], &attr, binder_version_thread, INT_TO_PTR(fds[k]));
if (ret) {
TH_LOG("%s - Failed to create thread %d",
strerror(errno), i);
break;
}
}
for (j = 0; j < i; j++) {
void *fdptr = NULL;
ret = pthread_join(threads[j], &fdptr);
if (ret)
TH_LOG("%s - Failed to join thread %d for fd %d",
strerror(errno), j, PTR_TO_INT(fdptr));
}
}
pthread_attr_destroy(&attr);
for (k = 0; k < ARRAY_SIZE(fds); k++)
close(fds[k]);
exit(EXIT_SUCCESS);
}
change_idmaps(_metadata, syncfds, pid);
ret = wait_for_pid(pid);
ASSERT_EQ(ret, 0) {
TH_LOG("wait_for_pid() failed");
}
}
TEST(binderfs_test_privileged)
{
if (geteuid() != 0)
SKIP(return, "Tests are not run as root. Skipping privileged tests");
if (__do_binderfs_test(_metadata))
SKIP(return, "The Android binderfs filesystem is not available");
}
TEST(binderfs_test_unprivileged)
{
int ret;
int syncfds[2];
pid_t pid;
ret = socketpair(PF_LOCAL, SOCK_STREAM | SOCK_CLOEXEC, 0, syncfds);
ASSERT_EQ(ret, 0) {
TH_LOG("%s - Failed to create socket pair", strerror(errno));
}
pid = fork();
ASSERT_GE(pid, 0) {
close_prot_errno_disarm(syncfds[0]);
close_prot_errno_disarm(syncfds[1]);
TH_LOG("%s - Failed to fork", strerror(errno));
}
if (pid == 0) {
change_userns(_metadata, syncfds);
if (__do_binderfs_test(_metadata))
exit(2);
exit(EXIT_SUCCESS);
}
change_idmaps(_metadata, syncfds, pid);
ret = wait_for_pid(pid);
if (ret) {
if (ret == 2)
SKIP(return, "The Android binderfs filesystem is not available");
ASSERT_EQ(ret, 0) {
TH_LOG("wait_for_pid() failed");
}
}
}
TEST_HARNESS_MAIN
| linux-master | tools/testing/selftests/filesystems/binderfs/binderfs_test.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Program that atomically exchanges two paths using
* the renameat2() system call RENAME_EXCHANGE flag.
*
* Copyright 2022 Red Hat Inc.
* Author: Javier Martinez Canillas <[email protected]>
*/
#define _GNU_SOURCE
#include <fcntl.h>
#include <stdio.h>
#include <stdlib.h>
void print_usage(const char *program)
{
printf("Usage: %s [oldpath] [newpath]\n", program);
printf("Atomically exchange oldpath and newpath\n");
}
int main(int argc, char *argv[])
{
int ret;
if (argc != 3) {
print_usage(argv[0]);
exit(EXIT_FAILURE);
}
ret = renameat2(AT_FDCWD, argv[1], AT_FDCWD, argv[2], RENAME_EXCHANGE);
if (ret) {
perror("rename exchange failed");
exit(EXIT_FAILURE);
}
exit(EXIT_SUCCESS);
}
| linux-master | tools/testing/selftests/filesystems/fat/rename_exchange.c |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.