python_code
stringlengths 0
1.8M
| repo_name
stringclasses 7
values | file_path
stringlengths 5
99
|
---|---|---|
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2020 Facebook
#define _GNU_SOURCE
#include <test_progs.h>
#include "test_core_retro.skel.h"
void test_core_retro(void)
{
int err, zero = 0, res, my_pid = getpid();
struct test_core_retro *skel;
/* load program */
skel = test_core_retro__open_and_load();
if (!ASSERT_OK_PTR(skel, "skel_load"))
goto out_close;
err = bpf_map__update_elem(skel->maps.exp_tgid_map, &zero, sizeof(zero),
&my_pid, sizeof(my_pid), 0);
if (!ASSERT_OK(err, "map_update"))
goto out_close;
/* attach probe */
err = test_core_retro__attach(skel);
if (!ASSERT_OK(err, "attach_kprobe"))
goto out_close;
/* trigger */
usleep(1);
err = bpf_map__lookup_elem(skel->maps.results, &zero, sizeof(zero), &res, sizeof(res), 0);
if (!ASSERT_OK(err, "map_lookup"))
goto out_close;
ASSERT_EQ(res, my_pid, "pid_check");
out_close:
test_core_retro__destroy(skel);
}
| linux-master | tools/testing/selftests/bpf/prog_tests/core_retro.c |
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2021 Facebook */
#include <test_progs.h>
#include <bpf/btf.h>
void test_libbpf_probe_prog_types(void)
{
struct btf *btf;
const struct btf_type *t;
const struct btf_enum *e;
int i, n, id;
btf = btf__parse("/sys/kernel/btf/vmlinux", NULL);
if (!ASSERT_OK_PTR(btf, "btf_parse"))
return;
/* find enum bpf_prog_type and enumerate each value */
id = btf__find_by_name_kind(btf, "bpf_prog_type", BTF_KIND_ENUM);
if (!ASSERT_GT(id, 0, "bpf_prog_type_id"))
goto cleanup;
t = btf__type_by_id(btf, id);
if (!ASSERT_OK_PTR(t, "bpf_prog_type_enum"))
goto cleanup;
for (e = btf_enum(t), i = 0, n = btf_vlen(t); i < n; e++, i++) {
const char *prog_type_name = btf__str_by_offset(btf, e->name_off);
enum bpf_prog_type prog_type = (enum bpf_prog_type)e->val;
int res;
if (prog_type == BPF_PROG_TYPE_UNSPEC)
continue;
if (!test__start_subtest(prog_type_name))
continue;
res = libbpf_probe_bpf_prog_type(prog_type, NULL);
ASSERT_EQ(res, 1, prog_type_name);
}
cleanup:
btf__free(btf);
}
void test_libbpf_probe_map_types(void)
{
struct btf *btf;
const struct btf_type *t;
const struct btf_enum *e;
int i, n, id;
btf = btf__parse("/sys/kernel/btf/vmlinux", NULL);
if (!ASSERT_OK_PTR(btf, "btf_parse"))
return;
/* find enum bpf_map_type and enumerate each value */
id = btf__find_by_name_kind(btf, "bpf_map_type", BTF_KIND_ENUM);
if (!ASSERT_GT(id, 0, "bpf_map_type_id"))
goto cleanup;
t = btf__type_by_id(btf, id);
if (!ASSERT_OK_PTR(t, "bpf_map_type_enum"))
goto cleanup;
for (e = btf_enum(t), i = 0, n = btf_vlen(t); i < n; e++, i++) {
const char *map_type_name = btf__str_by_offset(btf, e->name_off);
enum bpf_map_type map_type = (enum bpf_map_type)e->val;
int res;
if (map_type == BPF_MAP_TYPE_UNSPEC)
continue;
if (!test__start_subtest(map_type_name))
continue;
res = libbpf_probe_bpf_map_type(map_type, NULL);
ASSERT_EQ(res, 1, map_type_name);
}
cleanup:
btf__free(btf);
}
void test_libbpf_probe_helpers(void)
{
#define CASE(prog, helper, supp) { \
.prog_type_name = "BPF_PROG_TYPE_" # prog, \
.helper_name = "bpf_" # helper, \
.prog_type = BPF_PROG_TYPE_ ## prog, \
.helper_id = BPF_FUNC_ ## helper, \
.supported = supp, \
}
const struct case_def {
const char *prog_type_name;
const char *helper_name;
enum bpf_prog_type prog_type;
enum bpf_func_id helper_id;
bool supported;
} cases[] = {
CASE(KPROBE, unspec, false),
CASE(KPROBE, map_lookup_elem, true),
CASE(KPROBE, loop, true),
CASE(KPROBE, ktime_get_coarse_ns, false),
CASE(SOCKET_FILTER, ktime_get_coarse_ns, true),
CASE(KPROBE, sys_bpf, false),
CASE(SYSCALL, sys_bpf, true),
};
size_t case_cnt = ARRAY_SIZE(cases), i;
char buf[128];
for (i = 0; i < case_cnt; i++) {
const struct case_def *d = &cases[i];
int res;
snprintf(buf, sizeof(buf), "%s+%s", d->prog_type_name, d->helper_name);
if (!test__start_subtest(buf))
continue;
res = libbpf_probe_bpf_helper(d->prog_type, d->helper_id, NULL);
ASSERT_EQ(res, d->supported, buf);
}
}
| linux-master | tools/testing/selftests/bpf/prog_tests/libbpf_probes.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) 2023 Yafang Shao <[email protected]> */
#include <string.h>
#include <linux/bpf.h>
#include <linux/limits.h>
#include <test_progs.h>
#include "trace_helpers.h"
#include "test_fill_link_info.skel.h"
#define TP_CAT "sched"
#define TP_NAME "sched_switch"
static const char *kmulti_syms[] = {
"bpf_fentry_test2",
"bpf_fentry_test1",
"bpf_fentry_test3",
};
#define KMULTI_CNT ARRAY_SIZE(kmulti_syms)
static __u64 kmulti_addrs[KMULTI_CNT];
#define KPROBE_FUNC "bpf_fentry_test1"
static __u64 kprobe_addr;
#define UPROBE_FILE "/proc/self/exe"
static ssize_t uprobe_offset;
/* uprobe attach point */
static noinline void uprobe_func(void)
{
asm volatile ("");
}
static int verify_perf_link_info(int fd, enum bpf_perf_event_type type, long addr,
ssize_t offset, ssize_t entry_offset)
{
struct bpf_link_info info;
__u32 len = sizeof(info);
char buf[PATH_MAX];
int err;
memset(&info, 0, sizeof(info));
buf[0] = '\0';
again:
err = bpf_link_get_info_by_fd(fd, &info, &len);
if (!ASSERT_OK(err, "get_link_info"))
return -1;
if (!ASSERT_EQ(info.type, BPF_LINK_TYPE_PERF_EVENT, "link_type"))
return -1;
if (!ASSERT_EQ(info.perf_event.type, type, "perf_type_match"))
return -1;
switch (info.perf_event.type) {
case BPF_PERF_EVENT_KPROBE:
case BPF_PERF_EVENT_KRETPROBE:
ASSERT_EQ(info.perf_event.kprobe.offset, offset, "kprobe_offset");
/* In case kernel.kptr_restrict is not permitted or MAX_SYMS is reached */
if (addr)
ASSERT_EQ(info.perf_event.kprobe.addr, addr + entry_offset,
"kprobe_addr");
if (!info.perf_event.kprobe.func_name) {
ASSERT_EQ(info.perf_event.kprobe.name_len, 0, "name_len");
info.perf_event.kprobe.func_name = ptr_to_u64(&buf);
info.perf_event.kprobe.name_len = sizeof(buf);
goto again;
}
err = strncmp(u64_to_ptr(info.perf_event.kprobe.func_name), KPROBE_FUNC,
strlen(KPROBE_FUNC));
ASSERT_EQ(err, 0, "cmp_kprobe_func_name");
break;
case BPF_PERF_EVENT_TRACEPOINT:
if (!info.perf_event.tracepoint.tp_name) {
ASSERT_EQ(info.perf_event.tracepoint.name_len, 0, "name_len");
info.perf_event.tracepoint.tp_name = ptr_to_u64(&buf);
info.perf_event.tracepoint.name_len = sizeof(buf);
goto again;
}
err = strncmp(u64_to_ptr(info.perf_event.tracepoint.tp_name), TP_NAME,
strlen(TP_NAME));
ASSERT_EQ(err, 0, "cmp_tp_name");
break;
case BPF_PERF_EVENT_UPROBE:
case BPF_PERF_EVENT_URETPROBE:
ASSERT_EQ(info.perf_event.uprobe.offset, offset, "uprobe_offset");
if (!info.perf_event.uprobe.file_name) {
ASSERT_EQ(info.perf_event.uprobe.name_len, 0, "name_len");
info.perf_event.uprobe.file_name = ptr_to_u64(&buf);
info.perf_event.uprobe.name_len = sizeof(buf);
goto again;
}
err = strncmp(u64_to_ptr(info.perf_event.uprobe.file_name), UPROBE_FILE,
strlen(UPROBE_FILE));
ASSERT_EQ(err, 0, "cmp_file_name");
break;
default:
err = -1;
break;
}
return err;
}
static void kprobe_fill_invalid_user_buffer(int fd)
{
struct bpf_link_info info;
__u32 len = sizeof(info);
int err;
memset(&info, 0, sizeof(info));
info.perf_event.kprobe.func_name = 0x1; /* invalid address */
err = bpf_link_get_info_by_fd(fd, &info, &len);
ASSERT_EQ(err, -EINVAL, "invalid_buff_and_len");
info.perf_event.kprobe.name_len = 64;
err = bpf_link_get_info_by_fd(fd, &info, &len);
ASSERT_EQ(err, -EFAULT, "invalid_buff");
info.perf_event.kprobe.func_name = 0;
err = bpf_link_get_info_by_fd(fd, &info, &len);
ASSERT_EQ(err, -EINVAL, "invalid_len");
ASSERT_EQ(info.perf_event.kprobe.addr, 0, "func_addr");
ASSERT_EQ(info.perf_event.kprobe.offset, 0, "func_offset");
ASSERT_EQ(info.perf_event.type, 0, "type");
}
static void test_kprobe_fill_link_info(struct test_fill_link_info *skel,
enum bpf_perf_event_type type,
bool invalid)
{
DECLARE_LIBBPF_OPTS(bpf_kprobe_opts, opts,
.attach_mode = PROBE_ATTACH_MODE_LINK,
.retprobe = type == BPF_PERF_EVENT_KRETPROBE,
);
ssize_t entry_offset = 0;
int link_fd, err;
skel->links.kprobe_run = bpf_program__attach_kprobe_opts(skel->progs.kprobe_run,
KPROBE_FUNC, &opts);
if (!ASSERT_OK_PTR(skel->links.kprobe_run, "attach_kprobe"))
return;
link_fd = bpf_link__fd(skel->links.kprobe_run);
if (!invalid) {
/* See also arch_adjust_kprobe_addr(). */
if (skel->kconfig->CONFIG_X86_KERNEL_IBT)
entry_offset = 4;
err = verify_perf_link_info(link_fd, type, kprobe_addr, 0, entry_offset);
ASSERT_OK(err, "verify_perf_link_info");
} else {
kprobe_fill_invalid_user_buffer(link_fd);
}
bpf_link__detach(skel->links.kprobe_run);
}
static void test_tp_fill_link_info(struct test_fill_link_info *skel)
{
int link_fd, err;
skel->links.tp_run = bpf_program__attach_tracepoint(skel->progs.tp_run, TP_CAT, TP_NAME);
if (!ASSERT_OK_PTR(skel->links.tp_run, "attach_tp"))
return;
link_fd = bpf_link__fd(skel->links.tp_run);
err = verify_perf_link_info(link_fd, BPF_PERF_EVENT_TRACEPOINT, 0, 0, 0);
ASSERT_OK(err, "verify_perf_link_info");
bpf_link__detach(skel->links.tp_run);
}
static void test_uprobe_fill_link_info(struct test_fill_link_info *skel,
enum bpf_perf_event_type type)
{
int link_fd, err;
skel->links.uprobe_run = bpf_program__attach_uprobe(skel->progs.uprobe_run,
type == BPF_PERF_EVENT_URETPROBE,
0, /* self pid */
UPROBE_FILE, uprobe_offset);
if (!ASSERT_OK_PTR(skel->links.uprobe_run, "attach_uprobe"))
return;
link_fd = bpf_link__fd(skel->links.uprobe_run);
err = verify_perf_link_info(link_fd, type, 0, uprobe_offset, 0);
ASSERT_OK(err, "verify_perf_link_info");
bpf_link__detach(skel->links.uprobe_run);
}
static int verify_kmulti_link_info(int fd, bool retprobe)
{
struct bpf_link_info info;
__u32 len = sizeof(info);
__u64 addrs[KMULTI_CNT];
int flags, i, err;
memset(&info, 0, sizeof(info));
again:
err = bpf_link_get_info_by_fd(fd, &info, &len);
if (!ASSERT_OK(err, "get_link_info"))
return -1;
if (!ASSERT_EQ(info.type, BPF_LINK_TYPE_KPROBE_MULTI, "kmulti_type"))
return -1;
ASSERT_EQ(info.kprobe_multi.count, KMULTI_CNT, "func_cnt");
flags = info.kprobe_multi.flags & BPF_F_KPROBE_MULTI_RETURN;
if (!retprobe)
ASSERT_EQ(flags, 0, "kmulti_flags");
else
ASSERT_NEQ(flags, 0, "kretmulti_flags");
if (!info.kprobe_multi.addrs) {
info.kprobe_multi.addrs = ptr_to_u64(addrs);
goto again;
}
for (i = 0; i < KMULTI_CNT; i++)
ASSERT_EQ(addrs[i], kmulti_addrs[i], "kmulti_addrs");
return 0;
}
static void verify_kmulti_invalid_user_buffer(int fd)
{
struct bpf_link_info info;
__u32 len = sizeof(info);
__u64 addrs[KMULTI_CNT];
int err, i;
memset(&info, 0, sizeof(info));
info.kprobe_multi.count = KMULTI_CNT;
err = bpf_link_get_info_by_fd(fd, &info, &len);
ASSERT_EQ(err, -EINVAL, "no_addr");
info.kprobe_multi.addrs = ptr_to_u64(addrs);
info.kprobe_multi.count = 0;
err = bpf_link_get_info_by_fd(fd, &info, &len);
ASSERT_EQ(err, -EINVAL, "no_cnt");
for (i = 0; i < KMULTI_CNT; i++)
addrs[i] = 0;
info.kprobe_multi.count = KMULTI_CNT - 1;
err = bpf_link_get_info_by_fd(fd, &info, &len);
ASSERT_EQ(err, -ENOSPC, "smaller_cnt");
for (i = 0; i < KMULTI_CNT - 1; i++)
ASSERT_EQ(addrs[i], kmulti_addrs[i], "kmulti_addrs");
ASSERT_EQ(addrs[i], 0, "kmulti_addrs");
for (i = 0; i < KMULTI_CNT; i++)
addrs[i] = 0;
info.kprobe_multi.count = KMULTI_CNT + 1;
err = bpf_link_get_info_by_fd(fd, &info, &len);
ASSERT_EQ(err, 0, "bigger_cnt");
for (i = 0; i < KMULTI_CNT; i++)
ASSERT_EQ(addrs[i], kmulti_addrs[i], "kmulti_addrs");
info.kprobe_multi.count = KMULTI_CNT;
info.kprobe_multi.addrs = 0x1; /* invalid addr */
err = bpf_link_get_info_by_fd(fd, &info, &len);
ASSERT_EQ(err, -EFAULT, "invalid_buff");
}
static int symbols_cmp_r(const void *a, const void *b)
{
const char **str_a = (const char **) a;
const char **str_b = (const char **) b;
return strcmp(*str_a, *str_b);
}
static void test_kprobe_multi_fill_link_info(struct test_fill_link_info *skel,
bool retprobe, bool invalid)
{
LIBBPF_OPTS(bpf_kprobe_multi_opts, opts);
int link_fd, err;
opts.syms = kmulti_syms;
opts.cnt = KMULTI_CNT;
opts.retprobe = retprobe;
skel->links.kmulti_run = bpf_program__attach_kprobe_multi_opts(skel->progs.kmulti_run,
NULL, &opts);
if (!ASSERT_OK_PTR(skel->links.kmulti_run, "attach_kprobe_multi"))
return;
link_fd = bpf_link__fd(skel->links.kmulti_run);
if (!invalid) {
err = verify_kmulti_link_info(link_fd, retprobe);
ASSERT_OK(err, "verify_kmulti_link_info");
} else {
verify_kmulti_invalid_user_buffer(link_fd);
}
bpf_link__detach(skel->links.kmulti_run);
}
void test_fill_link_info(void)
{
struct test_fill_link_info *skel;
int i;
skel = test_fill_link_info__open_and_load();
if (!ASSERT_OK_PTR(skel, "skel_open"))
return;
/* load kallsyms to compare the addr */
if (!ASSERT_OK(load_kallsyms_refresh(), "load_kallsyms_refresh"))
goto cleanup;
kprobe_addr = ksym_get_addr(KPROBE_FUNC);
if (test__start_subtest("kprobe_link_info"))
test_kprobe_fill_link_info(skel, BPF_PERF_EVENT_KPROBE, false);
if (test__start_subtest("kretprobe_link_info"))
test_kprobe_fill_link_info(skel, BPF_PERF_EVENT_KRETPROBE, false);
if (test__start_subtest("kprobe_invalid_ubuff"))
test_kprobe_fill_link_info(skel, BPF_PERF_EVENT_KPROBE, true);
if (test__start_subtest("tracepoint_link_info"))
test_tp_fill_link_info(skel);
uprobe_offset = get_uprobe_offset(&uprobe_func);
if (test__start_subtest("uprobe_link_info"))
test_uprobe_fill_link_info(skel, BPF_PERF_EVENT_UPROBE);
if (test__start_subtest("uretprobe_link_info"))
test_uprobe_fill_link_info(skel, BPF_PERF_EVENT_URETPROBE);
qsort(kmulti_syms, KMULTI_CNT, sizeof(kmulti_syms[0]), symbols_cmp_r);
for (i = 0; i < KMULTI_CNT; i++)
kmulti_addrs[i] = ksym_get_addr(kmulti_syms[i]);
if (test__start_subtest("kprobe_multi_link_info"))
test_kprobe_multi_fill_link_info(skel, false, false);
if (test__start_subtest("kretprobe_multi_link_info"))
test_kprobe_multi_fill_link_info(skel, true, false);
if (test__start_subtest("kprobe_multi_invalid_ubuff"))
test_kprobe_multi_fill_link_info(skel, true, true);
cleanup:
test_fill_link_info__destroy(skel);
}
| linux-master | tools/testing/selftests/bpf/prog_tests/fill_link_info.c |
// SPDX-License-Identifier: GPL-2.0
#include <test_progs.h>
#include "kprobe_multi.skel.h"
#include "trace_helpers.h"
#include "kprobe_multi_empty.skel.h"
#include "kprobe_multi_override.skel.h"
#include "bpf/libbpf_internal.h"
#include "bpf/hashmap.h"
static void kprobe_multi_test_run(struct kprobe_multi *skel, bool test_return)
{
LIBBPF_OPTS(bpf_test_run_opts, topts);
int err, prog_fd;
prog_fd = bpf_program__fd(skel->progs.trigger);
err = bpf_prog_test_run_opts(prog_fd, &topts);
ASSERT_OK(err, "test_run");
ASSERT_EQ(topts.retval, 0, "test_run");
ASSERT_EQ(skel->bss->kprobe_test1_result, 1, "kprobe_test1_result");
ASSERT_EQ(skel->bss->kprobe_test2_result, 1, "kprobe_test2_result");
ASSERT_EQ(skel->bss->kprobe_test3_result, 1, "kprobe_test3_result");
ASSERT_EQ(skel->bss->kprobe_test4_result, 1, "kprobe_test4_result");
ASSERT_EQ(skel->bss->kprobe_test5_result, 1, "kprobe_test5_result");
ASSERT_EQ(skel->bss->kprobe_test6_result, 1, "kprobe_test6_result");
ASSERT_EQ(skel->bss->kprobe_test7_result, 1, "kprobe_test7_result");
ASSERT_EQ(skel->bss->kprobe_test8_result, 1, "kprobe_test8_result");
if (test_return) {
ASSERT_EQ(skel->bss->kretprobe_test1_result, 1, "kretprobe_test1_result");
ASSERT_EQ(skel->bss->kretprobe_test2_result, 1, "kretprobe_test2_result");
ASSERT_EQ(skel->bss->kretprobe_test3_result, 1, "kretprobe_test3_result");
ASSERT_EQ(skel->bss->kretprobe_test4_result, 1, "kretprobe_test4_result");
ASSERT_EQ(skel->bss->kretprobe_test5_result, 1, "kretprobe_test5_result");
ASSERT_EQ(skel->bss->kretprobe_test6_result, 1, "kretprobe_test6_result");
ASSERT_EQ(skel->bss->kretprobe_test7_result, 1, "kretprobe_test7_result");
ASSERT_EQ(skel->bss->kretprobe_test8_result, 1, "kretprobe_test8_result");
}
}
static void test_skel_api(void)
{
struct kprobe_multi *skel = NULL;
int err;
skel = kprobe_multi__open_and_load();
if (!ASSERT_OK_PTR(skel, "kprobe_multi__open_and_load"))
goto cleanup;
skel->bss->pid = getpid();
err = kprobe_multi__attach(skel);
if (!ASSERT_OK(err, "kprobe_multi__attach"))
goto cleanup;
kprobe_multi_test_run(skel, true);
cleanup:
kprobe_multi__destroy(skel);
}
static void test_link_api(struct bpf_link_create_opts *opts)
{
int prog_fd, link1_fd = -1, link2_fd = -1;
struct kprobe_multi *skel = NULL;
skel = kprobe_multi__open_and_load();
if (!ASSERT_OK_PTR(skel, "fentry_raw_skel_load"))
goto cleanup;
skel->bss->pid = getpid();
prog_fd = bpf_program__fd(skel->progs.test_kprobe);
link1_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_KPROBE_MULTI, opts);
if (!ASSERT_GE(link1_fd, 0, "link_fd"))
goto cleanup;
opts->kprobe_multi.flags = BPF_F_KPROBE_MULTI_RETURN;
prog_fd = bpf_program__fd(skel->progs.test_kretprobe);
link2_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_KPROBE_MULTI, opts);
if (!ASSERT_GE(link2_fd, 0, "link_fd"))
goto cleanup;
kprobe_multi_test_run(skel, true);
cleanup:
if (link1_fd != -1)
close(link1_fd);
if (link2_fd != -1)
close(link2_fd);
kprobe_multi__destroy(skel);
}
#define GET_ADDR(__sym, __addr) ({ \
__addr = ksym_get_addr(__sym); \
if (!ASSERT_NEQ(__addr, 0, "kallsyms load failed for " #__sym)) \
return; \
})
static void test_link_api_addrs(void)
{
LIBBPF_OPTS(bpf_link_create_opts, opts);
unsigned long long addrs[8];
GET_ADDR("bpf_fentry_test1", addrs[0]);
GET_ADDR("bpf_fentry_test2", addrs[1]);
GET_ADDR("bpf_fentry_test3", addrs[2]);
GET_ADDR("bpf_fentry_test4", addrs[3]);
GET_ADDR("bpf_fentry_test5", addrs[4]);
GET_ADDR("bpf_fentry_test6", addrs[5]);
GET_ADDR("bpf_fentry_test7", addrs[6]);
GET_ADDR("bpf_fentry_test8", addrs[7]);
opts.kprobe_multi.addrs = (const unsigned long*) addrs;
opts.kprobe_multi.cnt = ARRAY_SIZE(addrs);
test_link_api(&opts);
}
static void test_link_api_syms(void)
{
LIBBPF_OPTS(bpf_link_create_opts, opts);
const char *syms[8] = {
"bpf_fentry_test1",
"bpf_fentry_test2",
"bpf_fentry_test3",
"bpf_fentry_test4",
"bpf_fentry_test5",
"bpf_fentry_test6",
"bpf_fentry_test7",
"bpf_fentry_test8",
};
opts.kprobe_multi.syms = syms;
opts.kprobe_multi.cnt = ARRAY_SIZE(syms);
test_link_api(&opts);
}
static void
test_attach_api(const char *pattern, struct bpf_kprobe_multi_opts *opts)
{
struct bpf_link *link1 = NULL, *link2 = NULL;
struct kprobe_multi *skel = NULL;
skel = kprobe_multi__open_and_load();
if (!ASSERT_OK_PTR(skel, "fentry_raw_skel_load"))
goto cleanup;
skel->bss->pid = getpid();
link1 = bpf_program__attach_kprobe_multi_opts(skel->progs.test_kprobe_manual,
pattern, opts);
if (!ASSERT_OK_PTR(link1, "bpf_program__attach_kprobe_multi_opts"))
goto cleanup;
if (opts) {
opts->retprobe = true;
link2 = bpf_program__attach_kprobe_multi_opts(skel->progs.test_kretprobe_manual,
pattern, opts);
if (!ASSERT_OK_PTR(link2, "bpf_program__attach_kprobe_multi_opts"))
goto cleanup;
}
kprobe_multi_test_run(skel, !!opts);
cleanup:
bpf_link__destroy(link2);
bpf_link__destroy(link1);
kprobe_multi__destroy(skel);
}
static void test_attach_api_pattern(void)
{
LIBBPF_OPTS(bpf_kprobe_multi_opts, opts);
test_attach_api("bpf_fentry_test*", &opts);
test_attach_api("bpf_fentry_test?", NULL);
}
static void test_attach_api_addrs(void)
{
LIBBPF_OPTS(bpf_kprobe_multi_opts, opts);
unsigned long long addrs[8];
GET_ADDR("bpf_fentry_test1", addrs[0]);
GET_ADDR("bpf_fentry_test2", addrs[1]);
GET_ADDR("bpf_fentry_test3", addrs[2]);
GET_ADDR("bpf_fentry_test4", addrs[3]);
GET_ADDR("bpf_fentry_test5", addrs[4]);
GET_ADDR("bpf_fentry_test6", addrs[5]);
GET_ADDR("bpf_fentry_test7", addrs[6]);
GET_ADDR("bpf_fentry_test8", addrs[7]);
opts.addrs = (const unsigned long *) addrs;
opts.cnt = ARRAY_SIZE(addrs);
test_attach_api(NULL, &opts);
}
static void test_attach_api_syms(void)
{
LIBBPF_OPTS(bpf_kprobe_multi_opts, opts);
const char *syms[8] = {
"bpf_fentry_test1",
"bpf_fentry_test2",
"bpf_fentry_test3",
"bpf_fentry_test4",
"bpf_fentry_test5",
"bpf_fentry_test6",
"bpf_fentry_test7",
"bpf_fentry_test8",
};
opts.syms = syms;
opts.cnt = ARRAY_SIZE(syms);
test_attach_api(NULL, &opts);
}
static void test_attach_api_fails(void)
{
LIBBPF_OPTS(bpf_kprobe_multi_opts, opts);
struct kprobe_multi *skel = NULL;
struct bpf_link *link = NULL;
unsigned long long addrs[2];
const char *syms[2] = {
"bpf_fentry_test1",
"bpf_fentry_test2",
};
__u64 cookies[2];
addrs[0] = ksym_get_addr("bpf_fentry_test1");
addrs[1] = ksym_get_addr("bpf_fentry_test2");
if (!ASSERT_FALSE(!addrs[0] || !addrs[1], "ksym_get_addr"))
goto cleanup;
skel = kprobe_multi__open_and_load();
if (!ASSERT_OK_PTR(skel, "fentry_raw_skel_load"))
goto cleanup;
skel->bss->pid = getpid();
/* fail_1 - pattern and opts NULL */
link = bpf_program__attach_kprobe_multi_opts(skel->progs.test_kprobe_manual,
NULL, NULL);
if (!ASSERT_ERR_PTR(link, "fail_1"))
goto cleanup;
if (!ASSERT_EQ(libbpf_get_error(link), -EINVAL, "fail_1_error"))
goto cleanup;
/* fail_2 - both addrs and syms set */
opts.addrs = (const unsigned long *) addrs;
opts.syms = syms;
opts.cnt = ARRAY_SIZE(syms);
opts.cookies = NULL;
link = bpf_program__attach_kprobe_multi_opts(skel->progs.test_kprobe_manual,
NULL, &opts);
if (!ASSERT_ERR_PTR(link, "fail_2"))
goto cleanup;
if (!ASSERT_EQ(libbpf_get_error(link), -EINVAL, "fail_2_error"))
goto cleanup;
/* fail_3 - pattern and addrs set */
opts.addrs = (const unsigned long *) addrs;
opts.syms = NULL;
opts.cnt = ARRAY_SIZE(syms);
opts.cookies = NULL;
link = bpf_program__attach_kprobe_multi_opts(skel->progs.test_kprobe_manual,
"ksys_*", &opts);
if (!ASSERT_ERR_PTR(link, "fail_3"))
goto cleanup;
if (!ASSERT_EQ(libbpf_get_error(link), -EINVAL, "fail_3_error"))
goto cleanup;
/* fail_4 - pattern and cnt set */
opts.addrs = NULL;
opts.syms = NULL;
opts.cnt = ARRAY_SIZE(syms);
opts.cookies = NULL;
link = bpf_program__attach_kprobe_multi_opts(skel->progs.test_kprobe_manual,
"ksys_*", &opts);
if (!ASSERT_ERR_PTR(link, "fail_4"))
goto cleanup;
if (!ASSERT_EQ(libbpf_get_error(link), -EINVAL, "fail_4_error"))
goto cleanup;
/* fail_5 - pattern and cookies */
opts.addrs = NULL;
opts.syms = NULL;
opts.cnt = 0;
opts.cookies = cookies;
link = bpf_program__attach_kprobe_multi_opts(skel->progs.test_kprobe_manual,
"ksys_*", &opts);
if (!ASSERT_ERR_PTR(link, "fail_5"))
goto cleanup;
if (!ASSERT_EQ(libbpf_get_error(link), -EINVAL, "fail_5_error"))
goto cleanup;
cleanup:
bpf_link__destroy(link);
kprobe_multi__destroy(skel);
}
static size_t symbol_hash(long key, void *ctx __maybe_unused)
{
return str_hash((const char *) key);
}
static bool symbol_equal(long key1, long key2, void *ctx __maybe_unused)
{
return strcmp((const char *) key1, (const char *) key2) == 0;
}
static int get_syms(char ***symsp, size_t *cntp, bool kernel)
{
size_t cap = 0, cnt = 0, i;
char *name = NULL, **syms = NULL;
struct hashmap *map;
char buf[256];
FILE *f;
int err = 0;
/*
* The available_filter_functions contains many duplicates,
* but other than that all symbols are usable in kprobe multi
* interface.
* Filtering out duplicates by using hashmap__add, which won't
* add existing entry.
*/
if (access("/sys/kernel/tracing/trace", F_OK) == 0)
f = fopen("/sys/kernel/tracing/available_filter_functions", "r");
else
f = fopen("/sys/kernel/debug/tracing/available_filter_functions", "r");
if (!f)
return -EINVAL;
map = hashmap__new(symbol_hash, symbol_equal, NULL);
if (IS_ERR(map)) {
err = libbpf_get_error(map);
goto error;
}
while (fgets(buf, sizeof(buf), f)) {
if (kernel && strchr(buf, '['))
continue;
if (!kernel && !strchr(buf, '['))
continue;
free(name);
if (sscanf(buf, "%ms$*[^\n]\n", &name) != 1)
continue;
/*
* We attach to almost all kernel functions and some of them
* will cause 'suspicious RCU usage' when fprobe is attached
* to them. Filter out the current culprits - arch_cpu_idle
* default_idle and rcu_* functions.
*/
if (!strcmp(name, "arch_cpu_idle"))
continue;
if (!strcmp(name, "default_idle"))
continue;
if (!strncmp(name, "rcu_", 4))
continue;
if (!strcmp(name, "bpf_dispatcher_xdp_func"))
continue;
if (!strncmp(name, "__ftrace_invalid_address__",
sizeof("__ftrace_invalid_address__") - 1))
continue;
err = hashmap__add(map, name, 0);
if (err == -EEXIST) {
err = 0;
continue;
}
if (err)
goto error;
err = libbpf_ensure_mem((void **) &syms, &cap,
sizeof(*syms), cnt + 1);
if (err)
goto error;
syms[cnt++] = name;
name = NULL;
}
*symsp = syms;
*cntp = cnt;
error:
free(name);
fclose(f);
hashmap__free(map);
if (err) {
for (i = 0; i < cnt; i++)
free(syms[i]);
free(syms);
}
return err;
}
static void test_kprobe_multi_bench_attach(bool kernel)
{
LIBBPF_OPTS(bpf_kprobe_multi_opts, opts);
struct kprobe_multi_empty *skel = NULL;
long attach_start_ns, attach_end_ns;
long detach_start_ns, detach_end_ns;
double attach_delta, detach_delta;
struct bpf_link *link = NULL;
char **syms = NULL;
size_t cnt = 0, i;
if (!ASSERT_OK(get_syms(&syms, &cnt, kernel), "get_syms"))
return;
skel = kprobe_multi_empty__open_and_load();
if (!ASSERT_OK_PTR(skel, "kprobe_multi_empty__open_and_load"))
goto cleanup;
opts.syms = (const char **) syms;
opts.cnt = cnt;
attach_start_ns = get_time_ns();
link = bpf_program__attach_kprobe_multi_opts(skel->progs.test_kprobe_empty,
NULL, &opts);
attach_end_ns = get_time_ns();
if (!ASSERT_OK_PTR(link, "bpf_program__attach_kprobe_multi_opts"))
goto cleanup;
detach_start_ns = get_time_ns();
bpf_link__destroy(link);
detach_end_ns = get_time_ns();
attach_delta = (attach_end_ns - attach_start_ns) / 1000000000.0;
detach_delta = (detach_end_ns - detach_start_ns) / 1000000000.0;
printf("%s: found %lu functions\n", __func__, cnt);
printf("%s: attached in %7.3lfs\n", __func__, attach_delta);
printf("%s: detached in %7.3lfs\n", __func__, detach_delta);
cleanup:
kprobe_multi_empty__destroy(skel);
if (syms) {
for (i = 0; i < cnt; i++)
free(syms[i]);
free(syms);
}
}
static void test_attach_override(void)
{
struct kprobe_multi_override *skel = NULL;
struct bpf_link *link = NULL;
skel = kprobe_multi_override__open_and_load();
if (!ASSERT_OK_PTR(skel, "kprobe_multi_empty__open_and_load"))
goto cleanup;
/* The test_override calls bpf_override_return so it should fail
* to attach to bpf_fentry_test1 function, which is not on error
* injection list.
*/
link = bpf_program__attach_kprobe_multi_opts(skel->progs.test_override,
"bpf_fentry_test1", NULL);
if (!ASSERT_ERR_PTR(link, "override_attached_bpf_fentry_test1")) {
bpf_link__destroy(link);
goto cleanup;
}
/* The should_fail_bio function is on error injection list,
* attach should succeed.
*/
link = bpf_program__attach_kprobe_multi_opts(skel->progs.test_override,
"should_fail_bio", NULL);
if (!ASSERT_OK_PTR(link, "override_attached_should_fail_bio"))
goto cleanup;
bpf_link__destroy(link);
cleanup:
kprobe_multi_override__destroy(skel);
}
void serial_test_kprobe_multi_bench_attach(void)
{
if (test__start_subtest("kernel"))
test_kprobe_multi_bench_attach(true);
if (test__start_subtest("modules"))
test_kprobe_multi_bench_attach(false);
}
void test_kprobe_multi_test(void)
{
if (!ASSERT_OK(load_kallsyms(), "load_kallsyms"))
return;
if (test__start_subtest("skel_api"))
test_skel_api();
if (test__start_subtest("link_api_addrs"))
test_link_api_syms();
if (test__start_subtest("link_api_syms"))
test_link_api_addrs();
if (test__start_subtest("attach_api_pattern"))
test_attach_api_pattern();
if (test__start_subtest("attach_api_addrs"))
test_attach_api_addrs();
if (test__start_subtest("attach_api_syms"))
test_attach_api_syms();
if (test__start_subtest("attach_api_fails"))
test_attach_api_fails();
if (test__start_subtest("attach_override"))
test_attach_override();
}
| linux-master | tools/testing/selftests/bpf/prog_tests/kprobe_multi_test.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates.*/
#define _GNU_SOURCE
#include <unistd.h>
#include <sys/syscall.h>
#include <sys/types.h>
#include <test_progs.h>
#include <bpf/btf.h>
#include "rcu_read_lock.skel.h"
#include "cgroup_helpers.h"
static unsigned long long cgroup_id;
static void test_success(void)
{
struct rcu_read_lock *skel;
int err;
skel = rcu_read_lock__open();
if (!ASSERT_OK_PTR(skel, "skel_open"))
return;
skel->bss->target_pid = syscall(SYS_gettid);
bpf_program__set_autoload(skel->progs.get_cgroup_id, true);
bpf_program__set_autoload(skel->progs.task_succ, true);
bpf_program__set_autoload(skel->progs.two_regions, true);
bpf_program__set_autoload(skel->progs.non_sleepable_1, true);
bpf_program__set_autoload(skel->progs.non_sleepable_2, true);
bpf_program__set_autoload(skel->progs.task_trusted_non_rcuptr, true);
err = rcu_read_lock__load(skel);
if (!ASSERT_OK(err, "skel_load"))
goto out;
err = rcu_read_lock__attach(skel);
if (!ASSERT_OK(err, "skel_attach"))
goto out;
syscall(SYS_getpgid);
ASSERT_EQ(skel->bss->task_storage_val, 2, "task_storage_val");
ASSERT_EQ(skel->bss->cgroup_id, cgroup_id, "cgroup_id");
out:
rcu_read_lock__destroy(skel);
}
static void test_rcuptr_acquire(void)
{
struct rcu_read_lock *skel;
int err;
skel = rcu_read_lock__open();
if (!ASSERT_OK_PTR(skel, "skel_open"))
return;
skel->bss->target_pid = syscall(SYS_gettid);
bpf_program__set_autoload(skel->progs.task_acquire, true);
err = rcu_read_lock__load(skel);
if (!ASSERT_OK(err, "skel_load"))
goto out;
err = rcu_read_lock__attach(skel);
ASSERT_OK(err, "skel_attach");
out:
rcu_read_lock__destroy(skel);
}
static const char * const inproper_region_tests[] = {
"miss_lock",
"no_lock",
"miss_unlock",
"non_sleepable_rcu_mismatch",
"inproper_sleepable_helper",
"inproper_sleepable_kfunc",
"nested_rcu_region",
};
static void test_inproper_region(void)
{
struct rcu_read_lock *skel;
struct bpf_program *prog;
int i, err;
for (i = 0; i < ARRAY_SIZE(inproper_region_tests); i++) {
skel = rcu_read_lock__open();
if (!ASSERT_OK_PTR(skel, "skel_open"))
return;
prog = bpf_object__find_program_by_name(skel->obj, inproper_region_tests[i]);
if (!ASSERT_OK_PTR(prog, "bpf_object__find_program_by_name"))
goto out;
bpf_program__set_autoload(prog, true);
err = rcu_read_lock__load(skel);
ASSERT_ERR(err, "skel_load");
out:
rcu_read_lock__destroy(skel);
}
}
static const char * const rcuptr_misuse_tests[] = {
"task_untrusted_rcuptr",
"cross_rcu_region",
};
static void test_rcuptr_misuse(void)
{
struct rcu_read_lock *skel;
struct bpf_program *prog;
int i, err;
for (i = 0; i < ARRAY_SIZE(rcuptr_misuse_tests); i++) {
skel = rcu_read_lock__open();
if (!ASSERT_OK_PTR(skel, "skel_open"))
return;
prog = bpf_object__find_program_by_name(skel->obj, rcuptr_misuse_tests[i]);
if (!ASSERT_OK_PTR(prog, "bpf_object__find_program_by_name"))
goto out;
bpf_program__set_autoload(prog, true);
err = rcu_read_lock__load(skel);
ASSERT_ERR(err, "skel_load");
out:
rcu_read_lock__destroy(skel);
}
}
void test_rcu_read_lock(void)
{
int cgroup_fd;
cgroup_fd = test__join_cgroup("/rcu_read_lock");
if (!ASSERT_GE(cgroup_fd, 0, "join_cgroup /rcu_read_lock"))
goto out;
cgroup_id = get_cgroup_id("/rcu_read_lock");
if (test__start_subtest("success"))
test_success();
if (test__start_subtest("rcuptr_acquire"))
test_rcuptr_acquire();
if (test__start_subtest("negative_tests_inproper_region"))
test_inproper_region();
if (test__start_subtest("negative_tests_rcuptr_misuse"))
test_rcuptr_misuse();
close(cgroup_fd);
out:;
}
| linux-master | tools/testing/selftests/bpf/prog_tests/rcu_read_lock.c |
// SPDX-License-Identifier: GPL-2.0
#include <regex.h>
#include <test_progs.h>
#include <network_helpers.h>
#include "test_spin_lock.skel.h"
#include "test_spin_lock_fail.skel.h"
static char log_buf[1024 * 1024];
static struct {
const char *prog_name;
const char *err_msg;
} spin_lock_fail_tests[] = {
{ "lock_id_kptr_preserve",
"5: (bf) r1 = r0 ; R0_w=ptr_foo(id=2,ref_obj_id=2,off=0,imm=0) "
"R1_w=ptr_foo(id=2,ref_obj_id=2,off=0,imm=0) refs=2\n6: (85) call bpf_this_cpu_ptr#154\n"
"R1 type=ptr_ expected=percpu_ptr_" },
{ "lock_id_global_zero",
"; R1_w=map_value(off=0,ks=4,vs=4,imm=0)\n2: (85) call bpf_this_cpu_ptr#154\n"
"R1 type=map_value expected=percpu_ptr_" },
{ "lock_id_mapval_preserve",
"[0-9]\\+: (bf) r1 = r0 ;"
" R0_w=map_value(id=1,off=0,ks=4,vs=8,imm=0)"
" R1_w=map_value(id=1,off=0,ks=4,vs=8,imm=0)\n"
"[0-9]\\+: (85) call bpf_this_cpu_ptr#154\n"
"R1 type=map_value expected=percpu_ptr_" },
{ "lock_id_innermapval_preserve",
"[0-9]\\+: (bf) r1 = r0 ;"
" R0=map_value(id=2,off=0,ks=4,vs=8,imm=0)"
" R1_w=map_value(id=2,off=0,ks=4,vs=8,imm=0)\n"
"[0-9]\\+: (85) call bpf_this_cpu_ptr#154\n"
"R1 type=map_value expected=percpu_ptr_" },
{ "lock_id_mismatch_kptr_kptr", "bpf_spin_unlock of different lock" },
{ "lock_id_mismatch_kptr_global", "bpf_spin_unlock of different lock" },
{ "lock_id_mismatch_kptr_mapval", "bpf_spin_unlock of different lock" },
{ "lock_id_mismatch_kptr_innermapval", "bpf_spin_unlock of different lock" },
{ "lock_id_mismatch_global_global", "bpf_spin_unlock of different lock" },
{ "lock_id_mismatch_global_kptr", "bpf_spin_unlock of different lock" },
{ "lock_id_mismatch_global_mapval", "bpf_spin_unlock of different lock" },
{ "lock_id_mismatch_global_innermapval", "bpf_spin_unlock of different lock" },
{ "lock_id_mismatch_mapval_mapval", "bpf_spin_unlock of different lock" },
{ "lock_id_mismatch_mapval_kptr", "bpf_spin_unlock of different lock" },
{ "lock_id_mismatch_mapval_global", "bpf_spin_unlock of different lock" },
{ "lock_id_mismatch_mapval_innermapval", "bpf_spin_unlock of different lock" },
{ "lock_id_mismatch_innermapval_innermapval1", "bpf_spin_unlock of different lock" },
{ "lock_id_mismatch_innermapval_innermapval2", "bpf_spin_unlock of different lock" },
{ "lock_id_mismatch_innermapval_kptr", "bpf_spin_unlock of different lock" },
{ "lock_id_mismatch_innermapval_global", "bpf_spin_unlock of different lock" },
{ "lock_id_mismatch_innermapval_mapval", "bpf_spin_unlock of different lock" },
};
static int match_regex(const char *pattern, const char *string)
{
int err, rc;
regex_t re;
err = regcomp(&re, pattern, REG_NOSUB);
if (err) {
char errbuf[512];
regerror(err, &re, errbuf, sizeof(errbuf));
PRINT_FAIL("Can't compile regex: %s\n", errbuf);
return -1;
}
rc = regexec(&re, string, 0, NULL, 0);
regfree(&re);
return rc == 0 ? 1 : 0;
}
static void test_spin_lock_fail_prog(const char *prog_name, const char *err_msg)
{
LIBBPF_OPTS(bpf_object_open_opts, opts, .kernel_log_buf = log_buf,
.kernel_log_size = sizeof(log_buf),
.kernel_log_level = 1);
struct test_spin_lock_fail *skel;
struct bpf_program *prog;
int ret;
skel = test_spin_lock_fail__open_opts(&opts);
if (!ASSERT_OK_PTR(skel, "test_spin_lock_fail__open_opts"))
return;
prog = bpf_object__find_program_by_name(skel->obj, prog_name);
if (!ASSERT_OK_PTR(prog, "bpf_object__find_program_by_name"))
goto end;
bpf_program__set_autoload(prog, true);
ret = test_spin_lock_fail__load(skel);
if (!ASSERT_ERR(ret, "test_spin_lock_fail__load must fail"))
goto end;
/* Skip check if JIT does not support kfuncs */
if (strstr(log_buf, "JIT does not support calling kernel function")) {
test__skip();
goto end;
}
ret = match_regex(err_msg, log_buf);
if (!ASSERT_GE(ret, 0, "match_regex"))
goto end;
if (!ASSERT_TRUE(ret, "no match for expected error message")) {
fprintf(stderr, "Expected: %s\n", err_msg);
fprintf(stderr, "Verifier: %s\n", log_buf);
}
end:
test_spin_lock_fail__destroy(skel);
}
static void *spin_lock_thread(void *arg)
{
int err, prog_fd = *(u32 *) arg;
LIBBPF_OPTS(bpf_test_run_opts, topts,
.data_in = &pkt_v4,
.data_size_in = sizeof(pkt_v4),
.repeat = 10000,
);
err = bpf_prog_test_run_opts(prog_fd, &topts);
ASSERT_OK(err, "test_run");
ASSERT_OK(topts.retval, "test_run retval");
pthread_exit(arg);
}
void test_spin_lock_success(void)
{
struct test_spin_lock *skel;
pthread_t thread_id[4];
int prog_fd, i;
void *ret;
skel = test_spin_lock__open_and_load();
if (!ASSERT_OK_PTR(skel, "test_spin_lock__open_and_load"))
return;
prog_fd = bpf_program__fd(skel->progs.bpf_spin_lock_test);
for (i = 0; i < 4; i++) {
int err;
err = pthread_create(&thread_id[i], NULL, &spin_lock_thread, &prog_fd);
if (!ASSERT_OK(err, "pthread_create"))
goto end;
}
for (i = 0; i < 4; i++) {
if (!ASSERT_OK(pthread_join(thread_id[i], &ret), "pthread_join"))
goto end;
if (!ASSERT_EQ(ret, &prog_fd, "ret == prog_fd"))
goto end;
}
end:
test_spin_lock__destroy(skel);
}
void test_spin_lock(void)
{
int i;
test_spin_lock_success();
for (i = 0; i < ARRAY_SIZE(spin_lock_fail_tests); i++) {
if (!test__start_subtest(spin_lock_fail_tests[i].prog_name))
continue;
test_spin_lock_fail_prog(spin_lock_fail_tests[i].prog_name,
spin_lock_fail_tests[i].err_msg);
}
}
| linux-master | tools/testing/selftests/bpf/prog_tests/spin_lock.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2020 Google LLC.
*/
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <sys/wait.h>
#include <test_progs.h>
#include <linux/ring_buffer.h>
#include "ima.skel.h"
#define MAX_SAMPLES 4
static int _run_measured_process(const char *measured_dir, u32 *monitored_pid,
const char *cmd)
{
int child_pid, child_status;
child_pid = fork();
if (child_pid == 0) {
*monitored_pid = getpid();
execlp("./ima_setup.sh", "./ima_setup.sh", cmd, measured_dir,
NULL);
exit(errno);
} else if (child_pid > 0) {
waitpid(child_pid, &child_status, 0);
return WEXITSTATUS(child_status);
}
return -EINVAL;
}
static int run_measured_process(const char *measured_dir, u32 *monitored_pid)
{
return _run_measured_process(measured_dir, monitored_pid, "run");
}
static u64 ima_hash_from_bpf[MAX_SAMPLES];
static int ima_hash_from_bpf_idx;
static int process_sample(void *ctx, void *data, size_t len)
{
if (ima_hash_from_bpf_idx >= MAX_SAMPLES)
return -ENOSPC;
ima_hash_from_bpf[ima_hash_from_bpf_idx++] = *((u64 *)data);
return 0;
}
static void test_init(struct ima__bss *bss)
{
ima_hash_from_bpf_idx = 0;
bss->use_ima_file_hash = false;
bss->enable_bprm_creds_for_exec = false;
bss->enable_kernel_read_file = false;
bss->test_deny = false;
}
void test_test_ima(void)
{
char measured_dir_template[] = "/tmp/ima_measuredXXXXXX";
struct ring_buffer *ringbuf = NULL;
const char *measured_dir;
u64 bin_true_sample;
char cmd[256];
int err, duration = 0, fresh_digest_idx = 0;
struct ima *skel = NULL;
skel = ima__open_and_load();
if (CHECK(!skel, "skel_load", "skeleton failed\n"))
goto close_prog;
ringbuf = ring_buffer__new(bpf_map__fd(skel->maps.ringbuf),
process_sample, NULL, NULL);
if (!ASSERT_OK_PTR(ringbuf, "ringbuf"))
goto close_prog;
err = ima__attach(skel);
if (CHECK(err, "attach", "attach failed: %d\n", err))
goto close_prog;
measured_dir = mkdtemp(measured_dir_template);
if (CHECK(measured_dir == NULL, "mkdtemp", "err %d\n", errno))
goto close_prog;
snprintf(cmd, sizeof(cmd), "./ima_setup.sh setup %s", measured_dir);
err = system(cmd);
if (CHECK(err, "failed to run command", "%s, errno = %d\n", cmd, errno))
goto close_clean;
/*
* Test #1
* - Goal: obtain a sample with the bpf_ima_inode_hash() helper
* - Expected result: 1 sample (/bin/true)
*/
test_init(skel->bss);
err = run_measured_process(measured_dir, &skel->bss->monitored_pid);
if (CHECK(err, "run_measured_process #1", "err = %d\n", err))
goto close_clean;
err = ring_buffer__consume(ringbuf);
ASSERT_EQ(err, 1, "num_samples_or_err");
ASSERT_NEQ(ima_hash_from_bpf[0], 0, "ima_hash");
/*
* Test #2
* - Goal: obtain samples with the bpf_ima_file_hash() helper
* - Expected result: 2 samples (./ima_setup.sh, /bin/true)
*/
test_init(skel->bss);
skel->bss->use_ima_file_hash = true;
err = run_measured_process(measured_dir, &skel->bss->monitored_pid);
if (CHECK(err, "run_measured_process #2", "err = %d\n", err))
goto close_clean;
err = ring_buffer__consume(ringbuf);
ASSERT_EQ(err, 2, "num_samples_or_err");
ASSERT_NEQ(ima_hash_from_bpf[0], 0, "ima_hash");
ASSERT_NEQ(ima_hash_from_bpf[1], 0, "ima_hash");
bin_true_sample = ima_hash_from_bpf[1];
/*
* Test #3
* - Goal: confirm that bpf_ima_inode_hash() returns a non-fresh digest
* - Expected result:
* 1 sample (/bin/true: fresh) if commit 62622dab0a28 applied
* 2 samples (/bin/true: non-fresh, fresh) if commit 62622dab0a28 is
* not applied
*
* If commit 62622dab0a28 ("ima: return IMA digest value only when
* IMA_COLLECTED flag is set") is applied, bpf_ima_inode_hash() refuses
* to give a non-fresh digest, hence the correct result is 1 instead of
* 2.
*/
test_init(skel->bss);
err = _run_measured_process(measured_dir, &skel->bss->monitored_pid,
"modify-bin");
if (CHECK(err, "modify-bin #3", "err = %d\n", err))
goto close_clean;
skel->bss->enable_bprm_creds_for_exec = true;
err = run_measured_process(measured_dir, &skel->bss->monitored_pid);
if (CHECK(err, "run_measured_process #3", "err = %d\n", err))
goto close_clean;
err = ring_buffer__consume(ringbuf);
ASSERT_GE(err, 1, "num_samples_or_err");
if (err == 2) {
ASSERT_NEQ(ima_hash_from_bpf[0], 0, "ima_hash");
ASSERT_EQ(ima_hash_from_bpf[0], bin_true_sample,
"sample_equal_or_err");
fresh_digest_idx = 1;
}
ASSERT_NEQ(ima_hash_from_bpf[fresh_digest_idx], 0, "ima_hash");
/* IMA refreshed the digest. */
ASSERT_NEQ(ima_hash_from_bpf[fresh_digest_idx], bin_true_sample,
"sample_equal_or_err");
/*
* Test #4
* - Goal: verify that bpf_ima_file_hash() returns a fresh digest
* - Expected result: 4 samples (./ima_setup.sh: fresh, fresh;
* /bin/true: fresh, fresh)
*/
test_init(skel->bss);
skel->bss->use_ima_file_hash = true;
skel->bss->enable_bprm_creds_for_exec = true;
err = run_measured_process(measured_dir, &skel->bss->monitored_pid);
if (CHECK(err, "run_measured_process #4", "err = %d\n", err))
goto close_clean;
err = ring_buffer__consume(ringbuf);
ASSERT_EQ(err, 4, "num_samples_or_err");
ASSERT_NEQ(ima_hash_from_bpf[0], 0, "ima_hash");
ASSERT_NEQ(ima_hash_from_bpf[1], 0, "ima_hash");
ASSERT_NEQ(ima_hash_from_bpf[2], 0, "ima_hash");
ASSERT_NEQ(ima_hash_from_bpf[3], 0, "ima_hash");
ASSERT_NEQ(ima_hash_from_bpf[2], bin_true_sample,
"sample_different_or_err");
ASSERT_EQ(ima_hash_from_bpf[3], ima_hash_from_bpf[2],
"sample_equal_or_err");
skel->bss->use_ima_file_hash = false;
skel->bss->enable_bprm_creds_for_exec = false;
err = _run_measured_process(measured_dir, &skel->bss->monitored_pid,
"restore-bin");
if (CHECK(err, "restore-bin #3", "err = %d\n", err))
goto close_clean;
/*
* Test #5
* - Goal: obtain a sample from the kernel_read_file hook
* - Expected result: 2 samples (./ima_setup.sh, policy_test)
*/
test_init(skel->bss);
skel->bss->use_ima_file_hash = true;
skel->bss->enable_kernel_read_file = true;
err = _run_measured_process(measured_dir, &skel->bss->monitored_pid,
"load-policy");
if (CHECK(err, "run_measured_process #5", "err = %d\n", err))
goto close_clean;
err = ring_buffer__consume(ringbuf);
ASSERT_EQ(err, 2, "num_samples_or_err");
ASSERT_NEQ(ima_hash_from_bpf[0], 0, "ima_hash");
ASSERT_NEQ(ima_hash_from_bpf[1], 0, "ima_hash");
/*
* Test #6
* - Goal: ensure that the kernel_read_file hook denies an operation
* - Expected result: 0 samples
*/
test_init(skel->bss);
skel->bss->enable_kernel_read_file = true;
skel->bss->test_deny = true;
err = _run_measured_process(measured_dir, &skel->bss->monitored_pid,
"load-policy");
if (CHECK(!err, "run_measured_process #6", "err = %d\n", err))
goto close_clean;
err = ring_buffer__consume(ringbuf);
ASSERT_EQ(err, 0, "num_samples_or_err");
close_clean:
snprintf(cmd, sizeof(cmd), "./ima_setup.sh cleanup %s", measured_dir);
err = system(cmd);
CHECK(err, "failed to run command", "%s, errno = %d\n", cmd, errno);
close_prog:
ring_buffer__free(ringbuf);
ima__destroy(skel);
}
| linux-master | tools/testing/selftests/bpf/prog_tests/test_ima.c |
// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
#include "test_progs.h"
#include "testing_helpers.h"
static void clear_test_state(struct test_state *state)
{
state->error_cnt = 0;
state->sub_succ_cnt = 0;
state->skip_cnt = 0;
}
void test_prog_tests_framework(void)
{
struct test_state *state = env.test_state;
/* in all the ASSERT calls below we need to return on the first
* error due to the fact that we are cleaning the test state after
* each dummy subtest
*/
/* test we properly count skipped tests with subtests */
if (test__start_subtest("test_good_subtest"))
test__end_subtest();
if (!ASSERT_EQ(state->skip_cnt, 0, "skip_cnt_check"))
return;
if (!ASSERT_EQ(state->error_cnt, 0, "error_cnt_check"))
return;
if (!ASSERT_EQ(state->subtest_num, 1, "subtest_num_check"))
return;
clear_test_state(state);
if (test__start_subtest("test_skip_subtest")) {
test__skip();
test__end_subtest();
}
if (test__start_subtest("test_skip_subtest")) {
test__skip();
test__end_subtest();
}
if (!ASSERT_EQ(state->skip_cnt, 2, "skip_cnt_check"))
return;
if (!ASSERT_EQ(state->subtest_num, 3, "subtest_num_check"))
return;
clear_test_state(state);
if (test__start_subtest("test_fail_subtest")) {
test__fail();
test__end_subtest();
}
if (!ASSERT_EQ(state->error_cnt, 1, "error_cnt_check"))
return;
if (!ASSERT_EQ(state->subtest_num, 4, "subtest_num_check"))
return;
clear_test_state(state);
}
| linux-master | tools/testing/selftests/bpf/prog_tests/prog_tests_framework.c |
// SPDX-License-Identifier: GPL-2.0
#include <unistd.h>
#include <test_progs.h>
#include "uprobe_multi.skel.h"
#include "uprobe_multi_bench.skel.h"
#include "uprobe_multi_usdt.skel.h"
#include "bpf/libbpf_internal.h"
#include "testing_helpers.h"
static char test_data[] = "test_data";
noinline void uprobe_multi_func_1(void)
{
asm volatile ("");
}
noinline void uprobe_multi_func_2(void)
{
asm volatile ("");
}
noinline void uprobe_multi_func_3(void)
{
asm volatile ("");
}
struct child {
int go[2];
int pid;
};
static void release_child(struct child *child)
{
int child_status;
if (!child)
return;
close(child->go[1]);
close(child->go[0]);
if (child->pid > 0)
waitpid(child->pid, &child_status, 0);
}
static void kick_child(struct child *child)
{
char c = 1;
if (child) {
write(child->go[1], &c, 1);
release_child(child);
}
fflush(NULL);
}
static struct child *spawn_child(void)
{
static struct child child;
int err;
int c;
/* pipe to notify child to execute the trigger functions */
if (pipe(child.go))
return NULL;
child.pid = fork();
if (child.pid < 0) {
release_child(&child);
errno = EINVAL;
return NULL;
}
/* child */
if (child.pid == 0) {
close(child.go[1]);
/* wait for parent's kick */
err = read(child.go[0], &c, 1);
if (err != 1)
exit(err);
uprobe_multi_func_1();
uprobe_multi_func_2();
uprobe_multi_func_3();
exit(errno);
}
return &child;
}
static void uprobe_multi_test_run(struct uprobe_multi *skel, struct child *child)
{
skel->bss->uprobe_multi_func_1_addr = (__u64) uprobe_multi_func_1;
skel->bss->uprobe_multi_func_2_addr = (__u64) uprobe_multi_func_2;
skel->bss->uprobe_multi_func_3_addr = (__u64) uprobe_multi_func_3;
skel->bss->user_ptr = test_data;
/*
* Disable pid check in bpf program if we are pid filter test,
* because the probe should be executed only by child->pid
* passed at the probe attach.
*/
skel->bss->pid = child ? 0 : getpid();
if (child)
kick_child(child);
/* trigger all probes */
uprobe_multi_func_1();
uprobe_multi_func_2();
uprobe_multi_func_3();
/*
* There are 2 entry and 2 exit probe called for each uprobe_multi_func_[123]
* function and each slepable probe (6) increments uprobe_multi_sleep_result.
*/
ASSERT_EQ(skel->bss->uprobe_multi_func_1_result, 2, "uprobe_multi_func_1_result");
ASSERT_EQ(skel->bss->uprobe_multi_func_2_result, 2, "uprobe_multi_func_2_result");
ASSERT_EQ(skel->bss->uprobe_multi_func_3_result, 2, "uprobe_multi_func_3_result");
ASSERT_EQ(skel->bss->uretprobe_multi_func_1_result, 2, "uretprobe_multi_func_1_result");
ASSERT_EQ(skel->bss->uretprobe_multi_func_2_result, 2, "uretprobe_multi_func_2_result");
ASSERT_EQ(skel->bss->uretprobe_multi_func_3_result, 2, "uretprobe_multi_func_3_result");
ASSERT_EQ(skel->bss->uprobe_multi_sleep_result, 6, "uprobe_multi_sleep_result");
if (child)
ASSERT_EQ(skel->bss->child_pid, child->pid, "uprobe_multi_child_pid");
}
static void test_skel_api(void)
{
struct uprobe_multi *skel = NULL;
int err;
skel = uprobe_multi__open_and_load();
if (!ASSERT_OK_PTR(skel, "uprobe_multi__open_and_load"))
goto cleanup;
err = uprobe_multi__attach(skel);
if (!ASSERT_OK(err, "uprobe_multi__attach"))
goto cleanup;
uprobe_multi_test_run(skel, NULL);
cleanup:
uprobe_multi__destroy(skel);
}
static void
__test_attach_api(const char *binary, const char *pattern, struct bpf_uprobe_multi_opts *opts,
struct child *child)
{
pid_t pid = child ? child->pid : -1;
struct uprobe_multi *skel = NULL;
skel = uprobe_multi__open_and_load();
if (!ASSERT_OK_PTR(skel, "uprobe_multi__open_and_load"))
goto cleanup;
opts->retprobe = false;
skel->links.uprobe = bpf_program__attach_uprobe_multi(skel->progs.uprobe, pid,
binary, pattern, opts);
if (!ASSERT_OK_PTR(skel->links.uprobe, "bpf_program__attach_uprobe_multi"))
goto cleanup;
opts->retprobe = true;
skel->links.uretprobe = bpf_program__attach_uprobe_multi(skel->progs.uretprobe, pid,
binary, pattern, opts);
if (!ASSERT_OK_PTR(skel->links.uretprobe, "bpf_program__attach_uprobe_multi"))
goto cleanup;
opts->retprobe = false;
skel->links.uprobe_sleep = bpf_program__attach_uprobe_multi(skel->progs.uprobe_sleep, pid,
binary, pattern, opts);
if (!ASSERT_OK_PTR(skel->links.uprobe_sleep, "bpf_program__attach_uprobe_multi"))
goto cleanup;
opts->retprobe = true;
skel->links.uretprobe_sleep = bpf_program__attach_uprobe_multi(skel->progs.uretprobe_sleep,
pid, binary, pattern, opts);
if (!ASSERT_OK_PTR(skel->links.uretprobe_sleep, "bpf_program__attach_uprobe_multi"))
goto cleanup;
opts->retprobe = false;
skel->links.uprobe_extra = bpf_program__attach_uprobe_multi(skel->progs.uprobe_extra, -1,
binary, pattern, opts);
if (!ASSERT_OK_PTR(skel->links.uprobe_extra, "bpf_program__attach_uprobe_multi"))
goto cleanup;
uprobe_multi_test_run(skel, child);
cleanup:
uprobe_multi__destroy(skel);
}
static void
test_attach_api(const char *binary, const char *pattern, struct bpf_uprobe_multi_opts *opts)
{
struct child *child;
/* no pid filter */
__test_attach_api(binary, pattern, opts, NULL);
/* pid filter */
child = spawn_child();
if (!ASSERT_OK_PTR(child, "spawn_child"))
return;
__test_attach_api(binary, pattern, opts, child);
}
static void test_attach_api_pattern(void)
{
LIBBPF_OPTS(bpf_uprobe_multi_opts, opts);
test_attach_api("/proc/self/exe", "uprobe_multi_func_*", &opts);
test_attach_api("/proc/self/exe", "uprobe_multi_func_?", &opts);
}
static void test_attach_api_syms(void)
{
LIBBPF_OPTS(bpf_uprobe_multi_opts, opts);
const char *syms[3] = {
"uprobe_multi_func_1",
"uprobe_multi_func_2",
"uprobe_multi_func_3",
};
opts.syms = syms;
opts.cnt = ARRAY_SIZE(syms);
test_attach_api("/proc/self/exe", NULL, &opts);
}
static void __test_link_api(struct child *child)
{
int prog_fd, link1_fd = -1, link2_fd = -1, link3_fd = -1, link4_fd = -1;
LIBBPF_OPTS(bpf_link_create_opts, opts);
const char *path = "/proc/self/exe";
struct uprobe_multi *skel = NULL;
unsigned long *offsets = NULL;
const char *syms[3] = {
"uprobe_multi_func_1",
"uprobe_multi_func_2",
"uprobe_multi_func_3",
};
int link_extra_fd = -1;
int err;
err = elf_resolve_syms_offsets(path, 3, syms, (unsigned long **) &offsets);
if (!ASSERT_OK(err, "elf_resolve_syms_offsets"))
return;
opts.uprobe_multi.path = path;
opts.uprobe_multi.offsets = offsets;
opts.uprobe_multi.cnt = ARRAY_SIZE(syms);
opts.uprobe_multi.pid = child ? child->pid : 0;
skel = uprobe_multi__open_and_load();
if (!ASSERT_OK_PTR(skel, "uprobe_multi__open_and_load"))
goto cleanup;
opts.kprobe_multi.flags = 0;
prog_fd = bpf_program__fd(skel->progs.uprobe);
link1_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_UPROBE_MULTI, &opts);
if (!ASSERT_GE(link1_fd, 0, "link1_fd"))
goto cleanup;
opts.kprobe_multi.flags = BPF_F_UPROBE_MULTI_RETURN;
prog_fd = bpf_program__fd(skel->progs.uretprobe);
link2_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_UPROBE_MULTI, &opts);
if (!ASSERT_GE(link2_fd, 0, "link2_fd"))
goto cleanup;
opts.kprobe_multi.flags = 0;
prog_fd = bpf_program__fd(skel->progs.uprobe_sleep);
link3_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_UPROBE_MULTI, &opts);
if (!ASSERT_GE(link3_fd, 0, "link3_fd"))
goto cleanup;
opts.kprobe_multi.flags = BPF_F_UPROBE_MULTI_RETURN;
prog_fd = bpf_program__fd(skel->progs.uretprobe_sleep);
link4_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_UPROBE_MULTI, &opts);
if (!ASSERT_GE(link4_fd, 0, "link4_fd"))
goto cleanup;
opts.kprobe_multi.flags = 0;
opts.uprobe_multi.pid = 0;
prog_fd = bpf_program__fd(skel->progs.uprobe_extra);
link_extra_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_UPROBE_MULTI, &opts);
if (!ASSERT_GE(link_extra_fd, 0, "link_extra_fd"))
goto cleanup;
uprobe_multi_test_run(skel, child);
cleanup:
if (link1_fd >= 0)
close(link1_fd);
if (link2_fd >= 0)
close(link2_fd);
if (link3_fd >= 0)
close(link3_fd);
if (link4_fd >= 0)
close(link4_fd);
if (link_extra_fd >= 0)
close(link_extra_fd);
uprobe_multi__destroy(skel);
free(offsets);
}
void test_link_api(void)
{
struct child *child;
/* no pid filter */
__test_link_api(NULL);
/* pid filter */
child = spawn_child();
if (!ASSERT_OK_PTR(child, "spawn_child"))
return;
__test_link_api(child);
}
static void test_bench_attach_uprobe(void)
{
long attach_start_ns = 0, attach_end_ns = 0;
struct uprobe_multi_bench *skel = NULL;
long detach_start_ns, detach_end_ns;
double attach_delta, detach_delta;
int err;
skel = uprobe_multi_bench__open_and_load();
if (!ASSERT_OK_PTR(skel, "uprobe_multi_bench__open_and_load"))
goto cleanup;
attach_start_ns = get_time_ns();
err = uprobe_multi_bench__attach(skel);
if (!ASSERT_OK(err, "uprobe_multi_bench__attach"))
goto cleanup;
attach_end_ns = get_time_ns();
system("./uprobe_multi bench");
ASSERT_EQ(skel->bss->count, 50000, "uprobes_count");
cleanup:
detach_start_ns = get_time_ns();
uprobe_multi_bench__destroy(skel);
detach_end_ns = get_time_ns();
attach_delta = (attach_end_ns - attach_start_ns) / 1000000000.0;
detach_delta = (detach_end_ns - detach_start_ns) / 1000000000.0;
printf("%s: attached in %7.3lfs\n", __func__, attach_delta);
printf("%s: detached in %7.3lfs\n", __func__, detach_delta);
}
static void test_bench_attach_usdt(void)
{
long attach_start_ns = 0, attach_end_ns = 0;
struct uprobe_multi_usdt *skel = NULL;
long detach_start_ns, detach_end_ns;
double attach_delta, detach_delta;
skel = uprobe_multi_usdt__open_and_load();
if (!ASSERT_OK_PTR(skel, "uprobe_multi__open"))
goto cleanup;
attach_start_ns = get_time_ns();
skel->links.usdt0 = bpf_program__attach_usdt(skel->progs.usdt0, -1, "./uprobe_multi",
"test", "usdt", NULL);
if (!ASSERT_OK_PTR(skel->links.usdt0, "bpf_program__attach_usdt"))
goto cleanup;
attach_end_ns = get_time_ns();
system("./uprobe_multi usdt");
ASSERT_EQ(skel->bss->count, 50000, "usdt_count");
cleanup:
detach_start_ns = get_time_ns();
uprobe_multi_usdt__destroy(skel);
detach_end_ns = get_time_ns();
attach_delta = (attach_end_ns - attach_start_ns) / 1000000000.0;
detach_delta = (detach_end_ns - detach_start_ns) / 1000000000.0;
printf("%s: attached in %7.3lfs\n", __func__, attach_delta);
printf("%s: detached in %7.3lfs\n", __func__, detach_delta);
}
void test_uprobe_multi_test(void)
{
if (test__start_subtest("skel_api"))
test_skel_api();
if (test__start_subtest("attach_api_pattern"))
test_attach_api_pattern();
if (test__start_subtest("attach_api_syms"))
test_attach_api_syms();
if (test__start_subtest("link_api"))
test_link_api();
if (test__start_subtest("bench_uprobe"))
test_bench_attach_uprobe();
if (test__start_subtest("bench_usdt"))
test_bench_attach_usdt();
}
| linux-master | tools/testing/selftests/bpf/prog_tests/uprobe_multi_test.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2021 Facebook */
#include <test_progs.h>
#include "recursion.skel.h"
void test_recursion(void)
{
struct bpf_prog_info prog_info = {};
__u32 prog_info_len = sizeof(prog_info);
struct recursion *skel;
int key = 0;
int err;
skel = recursion__open_and_load();
if (!ASSERT_OK_PTR(skel, "skel_open_and_load"))
return;
err = recursion__attach(skel);
if (!ASSERT_OK(err, "skel_attach"))
goto out;
ASSERT_EQ(skel->bss->pass1, 0, "pass1 == 0");
bpf_map_delete_elem(bpf_map__fd(skel->maps.hash1), &key);
ASSERT_EQ(skel->bss->pass1, 1, "pass1 == 1");
bpf_map_delete_elem(bpf_map__fd(skel->maps.hash1), &key);
ASSERT_EQ(skel->bss->pass1, 2, "pass1 == 2");
ASSERT_EQ(skel->bss->pass2, 0, "pass2 == 0");
bpf_map_delete_elem(bpf_map__fd(skel->maps.hash2), &key);
ASSERT_EQ(skel->bss->pass2, 1, "pass2 == 1");
bpf_map_delete_elem(bpf_map__fd(skel->maps.hash2), &key);
ASSERT_EQ(skel->bss->pass2, 2, "pass2 == 2");
err = bpf_prog_get_info_by_fd(bpf_program__fd(skel->progs.on_delete),
&prog_info, &prog_info_len);
if (!ASSERT_OK(err, "get_prog_info"))
goto out;
ASSERT_EQ(prog_info.recursion_misses, 2, "recursion_misses");
out:
recursion__destroy(skel);
}
| linux-master | tools/testing/selftests/bpf/prog_tests/recursion.c |
// SPDX-License-Identifier: GPL-2.0
#include <test_progs.h>
#include <network_helpers.h>
#include "test_parse_tcp_hdr_opt.skel.h"
#include "test_parse_tcp_hdr_opt_dynptr.skel.h"
#include "test_tcp_hdr_options.h"
struct test_pkt {
struct ipv6_packet pk6_v6;
u8 options[16];
} __packed;
struct test_pkt pkt = {
.pk6_v6.eth.h_proto = __bpf_constant_htons(ETH_P_IPV6),
.pk6_v6.iph.nexthdr = IPPROTO_TCP,
.pk6_v6.iph.payload_len = __bpf_constant_htons(MAGIC_BYTES),
.pk6_v6.tcp.urg_ptr = 123,
.pk6_v6.tcp.doff = 9, /* 16 bytes of options */
.options = {
TCPOPT_MSS, 4, 0x05, 0xB4, TCPOPT_NOP, TCPOPT_NOP,
0, 6, 0xBB, 0xBB, 0xBB, 0xBB, TCPOPT_EOL
},
};
static void test_parse_opt(void)
{
struct test_parse_tcp_hdr_opt *skel;
struct bpf_program *prog;
char buf[128];
int err;
LIBBPF_OPTS(bpf_test_run_opts, topts,
.data_in = &pkt,
.data_size_in = sizeof(pkt),
.data_out = buf,
.data_size_out = sizeof(buf),
.repeat = 3,
);
skel = test_parse_tcp_hdr_opt__open_and_load();
if (!ASSERT_OK_PTR(skel, "skel_open_and_load"))
return;
pkt.options[6] = skel->rodata->tcp_hdr_opt_kind_tpr;
prog = skel->progs.xdp_ingress_v6;
err = bpf_prog_test_run_opts(bpf_program__fd(prog), &topts);
ASSERT_OK(err, "ipv6 test_run");
ASSERT_EQ(topts.retval, XDP_PASS, "ipv6 test_run retval");
ASSERT_EQ(skel->bss->server_id, 0xBBBBBBBB, "server id");
test_parse_tcp_hdr_opt__destroy(skel);
}
static void test_parse_opt_dynptr(void)
{
struct test_parse_tcp_hdr_opt_dynptr *skel;
struct bpf_program *prog;
char buf[128];
int err;
LIBBPF_OPTS(bpf_test_run_opts, topts,
.data_in = &pkt,
.data_size_in = sizeof(pkt),
.data_out = buf,
.data_size_out = sizeof(buf),
.repeat = 3,
);
skel = test_parse_tcp_hdr_opt_dynptr__open_and_load();
if (!ASSERT_OK_PTR(skel, "skel_open_and_load"))
return;
pkt.options[6] = skel->rodata->tcp_hdr_opt_kind_tpr;
prog = skel->progs.xdp_ingress_v6;
err = bpf_prog_test_run_opts(bpf_program__fd(prog), &topts);
ASSERT_OK(err, "ipv6 test_run");
ASSERT_EQ(topts.retval, XDP_PASS, "ipv6 test_run retval");
ASSERT_EQ(skel->bss->server_id, 0xBBBBBBBB, "server id");
test_parse_tcp_hdr_opt_dynptr__destroy(skel);
}
void test_parse_tcp_hdr_opt(void)
{
if (test__start_subtest("parse_tcp_hdr_opt"))
test_parse_opt();
if (test__start_subtest("parse_tcp_hdr_opt_dynptr"))
test_parse_opt_dynptr();
}
| linux-master | tools/testing/selftests/bpf/prog_tests/parse_tcp_hdr_opt.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2021 Facebook */
#include <test_progs.h>
#include <sys/syscall.h>
#include "linked_funcs.skel.h"
void test_linked_funcs(void)
{
int err;
struct linked_funcs *skel;
skel = linked_funcs__open();
if (!ASSERT_OK_PTR(skel, "skel_open"))
return;
/* handler1 and handler2 are marked as SEC("?raw_tp/sys_enter") and
* are set to not autoload by default
*/
bpf_program__set_autoload(skel->progs.handler1, true);
bpf_program__set_autoload(skel->progs.handler2, true);
skel->rodata->my_tid = syscall(SYS_gettid);
skel->bss->syscall_id = SYS_getpgid;
err = linked_funcs__load(skel);
if (!ASSERT_OK(err, "skel_load"))
goto cleanup;
err = linked_funcs__attach(skel);
if (!ASSERT_OK(err, "skel_attach"))
goto cleanup;
/* trigger */
syscall(SYS_getpgid);
ASSERT_EQ(skel->bss->output_val1, 2000 + 2000, "output_val1");
ASSERT_EQ(skel->bss->output_ctx1, SYS_getpgid, "output_ctx1");
ASSERT_EQ(skel->bss->output_weak1, 42, "output_weak1");
ASSERT_EQ(skel->bss->output_val2, 2 * 1000 + 2 * (2 * 1000), "output_val2");
ASSERT_EQ(skel->bss->output_ctx2, SYS_getpgid, "output_ctx2");
/* output_weak2 should never be updated */
ASSERT_EQ(skel->bss->output_weak2, 0, "output_weak2");
cleanup:
linked_funcs__destroy(skel);
}
| linux-master | tools/testing/selftests/bpf/prog_tests/linked_funcs.c |
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* End-to-end eBPF tunnel test suite
* The file tests BPF network tunnel implementation.
*
* Topology:
* ---------
* root namespace | at_ns0 namespace
* |
* ----------- | -----------
* | tnl dev | | | tnl dev | (overlay network)
* ----------- | -----------
* metadata-mode | metadata-mode
* with bpf | with bpf
* |
* ---------- | ----------
* | veth1 | --------- | veth0 | (underlay network)
* ---------- peer ----------
*
*
* Device Configuration
* --------------------
* root namespace with metadata-mode tunnel + BPF
* Device names and addresses:
* veth1 IP 1: 172.16.1.200, IPv6: 00::22 (underlay)
* IP 2: 172.16.1.20, IPv6: 00::bb (underlay)
* tunnel dev <type>11, ex: gre11, IPv4: 10.1.1.200, IPv6: 1::22 (overlay)
*
* Namespace at_ns0 with native tunnel
* Device names and addresses:
* veth0 IPv4: 172.16.1.100, IPv6: 00::11 (underlay)
* tunnel dev <type>00, ex: gre00, IPv4: 10.1.1.100, IPv6: 1::11 (overlay)
*
*
* End-to-end ping packet flow
* ---------------------------
* Most of the tests start by namespace creation, device configuration,
* then ping the underlay and overlay network. When doing 'ping 10.1.1.100'
* from root namespace, the following operations happen:
* 1) Route lookup shows 10.1.1.100/24 belongs to tnl dev, fwd to tnl dev.
* 2) Tnl device's egress BPF program is triggered and set the tunnel metadata,
* with local_ip=172.16.1.200, remote_ip=172.16.1.100. BPF program choose
* the primary or secondary ip of veth1 as the local ip of tunnel. The
* choice is made based on the value of bpf map local_ip_map.
* 3) Outer tunnel header is prepended and route the packet to veth1's egress.
* 4) veth0's ingress queue receive the tunneled packet at namespace at_ns0.
* 5) Tunnel protocol handler, ex: vxlan_rcv, decap the packet.
* 6) Forward the packet to the overlay tnl dev.
*/
#include <arpa/inet.h>
#include <linux/if_tun.h>
#include <linux/limits.h>
#include <linux/sysctl.h>
#include <linux/time_types.h>
#include <linux/net_tstamp.h>
#include <net/if.h>
#include <stdbool.h>
#include <stdio.h>
#include <sys/stat.h>
#include <unistd.h>
#include "test_progs.h"
#include "network_helpers.h"
#include "test_tunnel_kern.skel.h"
#define IP4_ADDR_VETH0 "172.16.1.100"
#define IP4_ADDR1_VETH1 "172.16.1.200"
#define IP4_ADDR2_VETH1 "172.16.1.20"
#define IP4_ADDR_TUNL_DEV0 "10.1.1.100"
#define IP4_ADDR_TUNL_DEV1 "10.1.1.200"
#define IP6_ADDR_VETH0 "::11"
#define IP6_ADDR1_VETH1 "::22"
#define IP6_ADDR2_VETH1 "::bb"
#define IP4_ADDR1_HEX_VETH1 0xac1001c8
#define IP4_ADDR2_HEX_VETH1 0xac100114
#define IP6_ADDR1_HEX_VETH1 0x22
#define IP6_ADDR2_HEX_VETH1 0xbb
#define MAC_TUNL_DEV0 "52:54:00:d9:01:00"
#define MAC_TUNL_DEV1 "52:54:00:d9:02:00"
#define MAC_VETH1 "52:54:00:d9:03:00"
#define VXLAN_TUNL_DEV0 "vxlan00"
#define VXLAN_TUNL_DEV1 "vxlan11"
#define IP6VXLAN_TUNL_DEV0 "ip6vxlan00"
#define IP6VXLAN_TUNL_DEV1 "ip6vxlan11"
#define IPIP_TUNL_DEV0 "ipip00"
#define IPIP_TUNL_DEV1 "ipip11"
#define PING_ARGS "-i 0.01 -c 3 -w 10 -q"
static int config_device(void)
{
SYS(fail, "ip netns add at_ns0");
SYS(fail, "ip link add veth0 address " MAC_VETH1 " type veth peer name veth1");
SYS(fail, "ip link set veth0 netns at_ns0");
SYS(fail, "ip addr add " IP4_ADDR1_VETH1 "/24 dev veth1");
SYS(fail, "ip link set dev veth1 up mtu 1500");
SYS(fail, "ip netns exec at_ns0 ip addr add " IP4_ADDR_VETH0 "/24 dev veth0");
SYS(fail, "ip netns exec at_ns0 ip link set dev veth0 up mtu 1500");
return 0;
fail:
return -1;
}
static void cleanup(void)
{
SYS_NOFAIL("test -f /var/run/netns/at_ns0 && ip netns delete at_ns0");
SYS_NOFAIL("ip link del veth1 2> /dev/null");
SYS_NOFAIL("ip link del %s 2> /dev/null", VXLAN_TUNL_DEV1);
SYS_NOFAIL("ip link del %s 2> /dev/null", IP6VXLAN_TUNL_DEV1);
}
static int add_vxlan_tunnel(void)
{
/* at_ns0 namespace */
SYS(fail, "ip netns exec at_ns0 ip link add dev %s type vxlan external gbp dstport 4789",
VXLAN_TUNL_DEV0);
SYS(fail, "ip netns exec at_ns0 ip link set dev %s address %s up",
VXLAN_TUNL_DEV0, MAC_TUNL_DEV0);
SYS(fail, "ip netns exec at_ns0 ip addr add dev %s %s/24",
VXLAN_TUNL_DEV0, IP4_ADDR_TUNL_DEV0);
SYS(fail, "ip netns exec at_ns0 ip neigh add %s lladdr %s dev %s",
IP4_ADDR_TUNL_DEV1, MAC_TUNL_DEV1, VXLAN_TUNL_DEV0);
SYS(fail, "ip netns exec at_ns0 ip neigh add %s lladdr %s dev veth0",
IP4_ADDR2_VETH1, MAC_VETH1);
/* root namespace */
SYS(fail, "ip link add dev %s type vxlan external gbp dstport 4789",
VXLAN_TUNL_DEV1);
SYS(fail, "ip link set dev %s address %s up", VXLAN_TUNL_DEV1, MAC_TUNL_DEV1);
SYS(fail, "ip addr add dev %s %s/24", VXLAN_TUNL_DEV1, IP4_ADDR_TUNL_DEV1);
SYS(fail, "ip neigh add %s lladdr %s dev %s",
IP4_ADDR_TUNL_DEV0, MAC_TUNL_DEV0, VXLAN_TUNL_DEV1);
return 0;
fail:
return -1;
}
static void delete_vxlan_tunnel(void)
{
SYS_NOFAIL("ip netns exec at_ns0 ip link delete dev %s",
VXLAN_TUNL_DEV0);
SYS_NOFAIL("ip link delete dev %s", VXLAN_TUNL_DEV1);
}
static int add_ip6vxlan_tunnel(void)
{
SYS(fail, "ip netns exec at_ns0 ip -6 addr add %s/96 dev veth0",
IP6_ADDR_VETH0);
SYS(fail, "ip netns exec at_ns0 ip link set dev veth0 up");
SYS(fail, "ip -6 addr add %s/96 dev veth1", IP6_ADDR1_VETH1);
SYS(fail, "ip -6 addr add %s/96 dev veth1", IP6_ADDR2_VETH1);
SYS(fail, "ip link set dev veth1 up");
/* at_ns0 namespace */
SYS(fail, "ip netns exec at_ns0 ip link add dev %s type vxlan external dstport 4789",
IP6VXLAN_TUNL_DEV0);
SYS(fail, "ip netns exec at_ns0 ip addr add dev %s %s/24",
IP6VXLAN_TUNL_DEV0, IP4_ADDR_TUNL_DEV0);
SYS(fail, "ip netns exec at_ns0 ip link set dev %s address %s up",
IP6VXLAN_TUNL_DEV0, MAC_TUNL_DEV0);
/* root namespace */
SYS(fail, "ip link add dev %s type vxlan external dstport 4789",
IP6VXLAN_TUNL_DEV1);
SYS(fail, "ip addr add dev %s %s/24", IP6VXLAN_TUNL_DEV1, IP4_ADDR_TUNL_DEV1);
SYS(fail, "ip link set dev %s address %s up",
IP6VXLAN_TUNL_DEV1, MAC_TUNL_DEV1);
return 0;
fail:
return -1;
}
static void delete_ip6vxlan_tunnel(void)
{
SYS_NOFAIL("ip netns exec at_ns0 ip -6 addr delete %s/96 dev veth0",
IP6_ADDR_VETH0);
SYS_NOFAIL("ip -6 addr delete %s/96 dev veth1", IP6_ADDR1_VETH1);
SYS_NOFAIL("ip -6 addr delete %s/96 dev veth1", IP6_ADDR2_VETH1);
SYS_NOFAIL("ip netns exec at_ns0 ip link delete dev %s",
IP6VXLAN_TUNL_DEV0);
SYS_NOFAIL("ip link delete dev %s", IP6VXLAN_TUNL_DEV1);
}
enum ipip_encap {
NONE = 0,
FOU = 1,
GUE = 2,
};
static int set_ipip_encap(const char *ipproto, const char *type)
{
SYS(fail, "ip -n at_ns0 fou add port 5555 %s", ipproto);
SYS(fail, "ip -n at_ns0 link set dev %s type ipip encap %s",
IPIP_TUNL_DEV0, type);
SYS(fail, "ip -n at_ns0 link set dev %s type ipip encap-dport 5555",
IPIP_TUNL_DEV0);
return 0;
fail:
return -1;
}
static int add_ipip_tunnel(enum ipip_encap encap)
{
int err;
const char *ipproto, *type;
switch (encap) {
case FOU:
ipproto = "ipproto 4";
type = "fou";
break;
case GUE:
ipproto = "gue";
type = ipproto;
break;
default:
ipproto = NULL;
type = ipproto;
}
/* at_ns0 namespace */
SYS(fail, "ip -n at_ns0 link add dev %s type ipip local %s remote %s",
IPIP_TUNL_DEV0, IP4_ADDR_VETH0, IP4_ADDR1_VETH1);
if (type && ipproto) {
err = set_ipip_encap(ipproto, type);
if (!ASSERT_OK(err, "set_ipip_encap"))
goto fail;
}
SYS(fail, "ip -n at_ns0 link set dev %s up", IPIP_TUNL_DEV0);
SYS(fail, "ip -n at_ns0 addr add dev %s %s/24",
IPIP_TUNL_DEV0, IP4_ADDR_TUNL_DEV0);
/* root namespace */
if (type && ipproto)
SYS(fail, "ip fou add port 5555 %s", ipproto);
SYS(fail, "ip link add dev %s type ipip external", IPIP_TUNL_DEV1);
SYS(fail, "ip link set dev %s up", IPIP_TUNL_DEV1);
SYS(fail, "ip addr add dev %s %s/24", IPIP_TUNL_DEV1,
IP4_ADDR_TUNL_DEV1);
return 0;
fail:
return -1;
}
static void delete_ipip_tunnel(void)
{
SYS_NOFAIL("ip -n at_ns0 link delete dev %s", IPIP_TUNL_DEV0);
SYS_NOFAIL("ip -n at_ns0 fou del port 5555 2> /dev/null");
SYS_NOFAIL("ip link delete dev %s", IPIP_TUNL_DEV1);
SYS_NOFAIL("ip fou del port 5555 2> /dev/null");
}
static int test_ping(int family, const char *addr)
{
SYS(fail, "%s %s %s > /dev/null", ping_command(family), PING_ARGS, addr);
return 0;
fail:
return -1;
}
static int attach_tc_prog(struct bpf_tc_hook *hook, int igr_fd, int egr_fd)
{
DECLARE_LIBBPF_OPTS(bpf_tc_opts, opts1, .handle = 1,
.priority = 1, .prog_fd = igr_fd);
DECLARE_LIBBPF_OPTS(bpf_tc_opts, opts2, .handle = 1,
.priority = 1, .prog_fd = egr_fd);
int ret;
ret = bpf_tc_hook_create(hook);
if (!ASSERT_OK(ret, "create tc hook"))
return ret;
if (igr_fd >= 0) {
hook->attach_point = BPF_TC_INGRESS;
ret = bpf_tc_attach(hook, &opts1);
if (!ASSERT_OK(ret, "bpf_tc_attach")) {
bpf_tc_hook_destroy(hook);
return ret;
}
}
if (egr_fd >= 0) {
hook->attach_point = BPF_TC_EGRESS;
ret = bpf_tc_attach(hook, &opts2);
if (!ASSERT_OK(ret, "bpf_tc_attach")) {
bpf_tc_hook_destroy(hook);
return ret;
}
}
return 0;
}
static void test_vxlan_tunnel(void)
{
struct test_tunnel_kern *skel = NULL;
struct nstoken *nstoken;
int local_ip_map_fd = -1;
int set_src_prog_fd, get_src_prog_fd;
int set_dst_prog_fd;
int key = 0, ifindex = -1;
uint local_ip;
int err;
DECLARE_LIBBPF_OPTS(bpf_tc_hook, tc_hook,
.attach_point = BPF_TC_INGRESS);
/* add vxlan tunnel */
err = add_vxlan_tunnel();
if (!ASSERT_OK(err, "add vxlan tunnel"))
goto done;
/* load and attach bpf prog to tunnel dev tc hook point */
skel = test_tunnel_kern__open_and_load();
if (!ASSERT_OK_PTR(skel, "test_tunnel_kern__open_and_load"))
goto done;
ifindex = if_nametoindex(VXLAN_TUNL_DEV1);
if (!ASSERT_NEQ(ifindex, 0, "vxlan11 ifindex"))
goto done;
tc_hook.ifindex = ifindex;
get_src_prog_fd = bpf_program__fd(skel->progs.vxlan_get_tunnel_src);
set_src_prog_fd = bpf_program__fd(skel->progs.vxlan_set_tunnel_src);
if (!ASSERT_GE(get_src_prog_fd, 0, "bpf_program__fd"))
goto done;
if (!ASSERT_GE(set_src_prog_fd, 0, "bpf_program__fd"))
goto done;
if (attach_tc_prog(&tc_hook, get_src_prog_fd, set_src_prog_fd))
goto done;
/* load and attach bpf prog to veth dev tc hook point */
ifindex = if_nametoindex("veth1");
if (!ASSERT_NEQ(ifindex, 0, "veth1 ifindex"))
goto done;
tc_hook.ifindex = ifindex;
set_dst_prog_fd = bpf_program__fd(skel->progs.veth_set_outer_dst);
if (!ASSERT_GE(set_dst_prog_fd, 0, "bpf_program__fd"))
goto done;
if (attach_tc_prog(&tc_hook, set_dst_prog_fd, -1))
goto done;
/* load and attach prog set_md to tunnel dev tc hook point at_ns0 */
nstoken = open_netns("at_ns0");
if (!ASSERT_OK_PTR(nstoken, "setns src"))
goto done;
ifindex = if_nametoindex(VXLAN_TUNL_DEV0);
if (!ASSERT_NEQ(ifindex, 0, "vxlan00 ifindex"))
goto done;
tc_hook.ifindex = ifindex;
set_dst_prog_fd = bpf_program__fd(skel->progs.vxlan_set_tunnel_dst);
if (!ASSERT_GE(set_dst_prog_fd, 0, "bpf_program__fd"))
goto done;
if (attach_tc_prog(&tc_hook, -1, set_dst_prog_fd))
goto done;
close_netns(nstoken);
/* use veth1 ip 2 as tunnel source ip */
local_ip_map_fd = bpf_map__fd(skel->maps.local_ip_map);
if (!ASSERT_GE(local_ip_map_fd, 0, "bpf_map__fd"))
goto done;
local_ip = IP4_ADDR2_HEX_VETH1;
err = bpf_map_update_elem(local_ip_map_fd, &key, &local_ip, BPF_ANY);
if (!ASSERT_OK(err, "update bpf local_ip_map"))
goto done;
/* ping test */
err = test_ping(AF_INET, IP4_ADDR_TUNL_DEV0);
if (!ASSERT_OK(err, "test_ping"))
goto done;
done:
/* delete vxlan tunnel */
delete_vxlan_tunnel();
if (local_ip_map_fd >= 0)
close(local_ip_map_fd);
if (skel)
test_tunnel_kern__destroy(skel);
}
static void test_ip6vxlan_tunnel(void)
{
struct test_tunnel_kern *skel = NULL;
struct nstoken *nstoken;
int local_ip_map_fd = -1;
int set_src_prog_fd, get_src_prog_fd;
int set_dst_prog_fd;
int key = 0, ifindex = -1;
uint local_ip;
int err;
DECLARE_LIBBPF_OPTS(bpf_tc_hook, tc_hook,
.attach_point = BPF_TC_INGRESS);
/* add vxlan tunnel */
err = add_ip6vxlan_tunnel();
if (!ASSERT_OK(err, "add_ip6vxlan_tunnel"))
goto done;
/* load and attach bpf prog to tunnel dev tc hook point */
skel = test_tunnel_kern__open_and_load();
if (!ASSERT_OK_PTR(skel, "test_tunnel_kern__open_and_load"))
goto done;
ifindex = if_nametoindex(IP6VXLAN_TUNL_DEV1);
if (!ASSERT_NEQ(ifindex, 0, "ip6vxlan11 ifindex"))
goto done;
tc_hook.ifindex = ifindex;
get_src_prog_fd = bpf_program__fd(skel->progs.ip6vxlan_get_tunnel_src);
set_src_prog_fd = bpf_program__fd(skel->progs.ip6vxlan_set_tunnel_src);
if (!ASSERT_GE(set_src_prog_fd, 0, "bpf_program__fd"))
goto done;
if (!ASSERT_GE(get_src_prog_fd, 0, "bpf_program__fd"))
goto done;
if (attach_tc_prog(&tc_hook, get_src_prog_fd, set_src_prog_fd))
goto done;
/* load and attach prog set_md to tunnel dev tc hook point at_ns0 */
nstoken = open_netns("at_ns0");
if (!ASSERT_OK_PTR(nstoken, "setns src"))
goto done;
ifindex = if_nametoindex(IP6VXLAN_TUNL_DEV0);
if (!ASSERT_NEQ(ifindex, 0, "ip6vxlan00 ifindex"))
goto done;
tc_hook.ifindex = ifindex;
set_dst_prog_fd = bpf_program__fd(skel->progs.ip6vxlan_set_tunnel_dst);
if (!ASSERT_GE(set_dst_prog_fd, 0, "bpf_program__fd"))
goto done;
if (attach_tc_prog(&tc_hook, -1, set_dst_prog_fd))
goto done;
close_netns(nstoken);
/* use veth1 ip 2 as tunnel source ip */
local_ip_map_fd = bpf_map__fd(skel->maps.local_ip_map);
if (!ASSERT_GE(local_ip_map_fd, 0, "get local_ip_map fd"))
goto done;
local_ip = IP6_ADDR2_HEX_VETH1;
err = bpf_map_update_elem(local_ip_map_fd, &key, &local_ip, BPF_ANY);
if (!ASSERT_OK(err, "update bpf local_ip_map"))
goto done;
/* ping test */
err = test_ping(AF_INET, IP4_ADDR_TUNL_DEV0);
if (!ASSERT_OK(err, "test_ping"))
goto done;
done:
/* delete ipv6 vxlan tunnel */
delete_ip6vxlan_tunnel();
if (local_ip_map_fd >= 0)
close(local_ip_map_fd);
if (skel)
test_tunnel_kern__destroy(skel);
}
static void test_ipip_tunnel(enum ipip_encap encap)
{
struct test_tunnel_kern *skel = NULL;
struct nstoken *nstoken;
int set_src_prog_fd, get_src_prog_fd;
int ifindex = -1;
int err;
DECLARE_LIBBPF_OPTS(bpf_tc_hook, tc_hook,
.attach_point = BPF_TC_INGRESS);
/* add ipip tunnel */
err = add_ipip_tunnel(encap);
if (!ASSERT_OK(err, "add_ipip_tunnel"))
goto done;
/* load and attach bpf prog to tunnel dev tc hook point */
skel = test_tunnel_kern__open_and_load();
if (!ASSERT_OK_PTR(skel, "test_tunnel_kern__open_and_load"))
goto done;
ifindex = if_nametoindex(IPIP_TUNL_DEV1);
if (!ASSERT_NEQ(ifindex, 0, "ipip11 ifindex"))
goto done;
tc_hook.ifindex = ifindex;
switch (encap) {
case FOU:
get_src_prog_fd = bpf_program__fd(
skel->progs.ipip_encap_get_tunnel);
set_src_prog_fd = bpf_program__fd(
skel->progs.ipip_fou_set_tunnel);
break;
case GUE:
get_src_prog_fd = bpf_program__fd(
skel->progs.ipip_encap_get_tunnel);
set_src_prog_fd = bpf_program__fd(
skel->progs.ipip_gue_set_tunnel);
break;
default:
get_src_prog_fd = bpf_program__fd(
skel->progs.ipip_get_tunnel);
set_src_prog_fd = bpf_program__fd(
skel->progs.ipip_set_tunnel);
}
if (!ASSERT_GE(set_src_prog_fd, 0, "bpf_program__fd"))
goto done;
if (!ASSERT_GE(get_src_prog_fd, 0, "bpf_program__fd"))
goto done;
if (attach_tc_prog(&tc_hook, get_src_prog_fd, set_src_prog_fd))
goto done;
/* ping from root namespace test */
err = test_ping(AF_INET, IP4_ADDR_TUNL_DEV0);
if (!ASSERT_OK(err, "test_ping"))
goto done;
/* ping from at_ns0 namespace test */
nstoken = open_netns("at_ns0");
err = test_ping(AF_INET, IP4_ADDR_TUNL_DEV1);
if (!ASSERT_OK(err, "test_ping"))
goto done;
close_netns(nstoken);
done:
/* delete ipip tunnel */
delete_ipip_tunnel();
if (skel)
test_tunnel_kern__destroy(skel);
}
#define RUN_TEST(name, ...) \
({ \
if (test__start_subtest(#name)) { \
test_ ## name(__VA_ARGS__); \
} \
})
static void *test_tunnel_run_tests(void *arg)
{
cleanup();
config_device();
RUN_TEST(vxlan_tunnel);
RUN_TEST(ip6vxlan_tunnel);
RUN_TEST(ipip_tunnel, NONE);
RUN_TEST(ipip_tunnel, FOU);
RUN_TEST(ipip_tunnel, GUE);
cleanup();
return NULL;
}
void test_tunnel(void)
{
pthread_t test_thread;
int err;
/* Run the tests in their own thread to isolate the namespace changes
* so they do not affect the environment of other tests.
* (specifically needed because of unshare(CLONE_NEWNS) in open_netns())
*/
err = pthread_create(&test_thread, NULL, &test_tunnel_run_tests, NULL);
if (ASSERT_OK(err, "pthread_create"))
ASSERT_OK(pthread_join(test_thread, NULL), "pthread_join");
}
| linux-master | tools/testing/selftests/bpf/prog_tests/test_tunnel.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
#include <test_progs.h>
#include <network_helpers.h>
#include "type_cast.skel.h"
static void test_xdp(void)
{
struct type_cast *skel;
int err, prog_fd;
char buf[128];
LIBBPF_OPTS(bpf_test_run_opts, topts,
.data_in = &pkt_v4,
.data_size_in = sizeof(pkt_v4),
.data_out = buf,
.data_size_out = sizeof(buf),
.repeat = 1,
);
skel = type_cast__open();
if (!ASSERT_OK_PTR(skel, "skel_open"))
return;
bpf_program__set_autoload(skel->progs.md_xdp, true);
err = type_cast__load(skel);
if (!ASSERT_OK(err, "skel_load"))
goto out;
prog_fd = bpf_program__fd(skel->progs.md_xdp);
err = bpf_prog_test_run_opts(prog_fd, &topts);
ASSERT_OK(err, "test_run");
ASSERT_EQ(topts.retval, XDP_PASS, "xdp test_run retval");
ASSERT_EQ(skel->bss->ifindex, 1, "xdp_md ifindex");
ASSERT_EQ(skel->bss->ifindex, skel->bss->ingress_ifindex, "xdp_md ingress_ifindex");
ASSERT_STREQ(skel->bss->name, "lo", "xdp_md name");
ASSERT_NEQ(skel->bss->inum, 0, "xdp_md inum");
out:
type_cast__destroy(skel);
}
static void test_tc(void)
{
struct type_cast *skel;
int err, prog_fd;
LIBBPF_OPTS(bpf_test_run_opts, topts,
.data_in = &pkt_v4,
.data_size_in = sizeof(pkt_v4),
.repeat = 1,
);
skel = type_cast__open();
if (!ASSERT_OK_PTR(skel, "skel_open"))
return;
bpf_program__set_autoload(skel->progs.md_skb, true);
err = type_cast__load(skel);
if (!ASSERT_OK(err, "skel_load"))
goto out;
prog_fd = bpf_program__fd(skel->progs.md_skb);
err = bpf_prog_test_run_opts(prog_fd, &topts);
ASSERT_OK(err, "test_run");
ASSERT_EQ(topts.retval, 0, "tc test_run retval");
ASSERT_EQ(skel->bss->meta_len, 0, "skb meta_len");
ASSERT_EQ(skel->bss->frag0_len, 0, "skb frag0_len");
ASSERT_NEQ(skel->bss->kskb_len, 0, "skb len");
ASSERT_NEQ(skel->bss->kskb2_len, 0, "skb2 len");
ASSERT_EQ(skel->bss->kskb_len, skel->bss->kskb2_len, "skb len compare");
out:
type_cast__destroy(skel);
}
static const char * const negative_tests[] = {
"untrusted_ptr",
"kctx_u64",
};
static void test_negative(void)
{
struct bpf_program *prog;
struct type_cast *skel;
int i, err;
for (i = 0; i < ARRAY_SIZE(negative_tests); i++) {
skel = type_cast__open();
if (!ASSERT_OK_PTR(skel, "skel_open"))
return;
prog = bpf_object__find_program_by_name(skel->obj, negative_tests[i]);
if (!ASSERT_OK_PTR(prog, "bpf_object__find_program_by_name"))
goto out;
bpf_program__set_autoload(prog, true);
err = type_cast__load(skel);
ASSERT_ERR(err, "skel_load");
out:
type_cast__destroy(skel);
}
}
void test_type_cast(void)
{
if (test__start_subtest("xdp"))
test_xdp();
if (test__start_subtest("tc"))
test_tc();
if (test__start_subtest("negative"))
test_negative();
}
| linux-master | tools/testing/selftests/bpf/prog_tests/type_cast.c |
// SPDX-License-Identifier: GPL-2.0
#include <test_progs.h>
#include <network_helpers.h>
#include "kfree_skb.skel.h"
struct meta {
int ifindex;
__u32 cb32_0;
__u8 cb8_0;
};
static union {
__u32 cb32[5];
__u8 cb8[20];
} cb = {
.cb32[0] = 0x81828384,
};
static void on_sample(void *ctx, int cpu, void *data, __u32 size)
{
struct meta *meta = (struct meta *)data;
struct ipv6_packet *pkt_v6 = data + sizeof(*meta);
int duration = 0;
if (CHECK(size != 72 + sizeof(*meta), "check_size", "size %u != %zu\n",
size, 72 + sizeof(*meta)))
return;
if (CHECK(meta->ifindex != 1, "check_meta_ifindex",
"meta->ifindex = %d\n", meta->ifindex))
/* spurious kfree_skb not on loopback device */
return;
if (CHECK(meta->cb8_0 != cb.cb8[0], "check_cb8_0", "cb8_0 %x != %x\n",
meta->cb8_0, cb.cb8[0]))
return;
if (CHECK(meta->cb32_0 != cb.cb32[0], "check_cb32_0",
"cb32_0 %x != %x\n",
meta->cb32_0, cb.cb32[0]))
return;
if (CHECK(pkt_v6->eth.h_proto != htons(ETH_P_IPV6), "check_eth",
"h_proto %x\n", pkt_v6->eth.h_proto))
return;
if (CHECK(pkt_v6->iph.nexthdr != 6, "check_ip",
"iph.nexthdr %x\n", pkt_v6->iph.nexthdr))
return;
if (CHECK(pkt_v6->tcp.doff != 5, "check_tcp",
"tcp.doff %x\n", pkt_v6->tcp.doff))
return;
*(bool *)ctx = true;
}
/* TODO: fix kernel panic caused by this test in parallel mode */
void serial_test_kfree_skb(void)
{
struct __sk_buff skb = {};
LIBBPF_OPTS(bpf_test_run_opts, topts,
.data_in = &pkt_v6,
.data_size_in = sizeof(pkt_v6),
.ctx_in = &skb,
.ctx_size_in = sizeof(skb),
);
struct kfree_skb *skel = NULL;
struct bpf_link *link;
struct bpf_object *obj;
struct perf_buffer *pb = NULL;
int err, prog_fd;
bool passed = false;
__u32 duration = 0;
const int zero = 0;
bool test_ok[2];
err = bpf_prog_test_load("./test_pkt_access.bpf.o", BPF_PROG_TYPE_SCHED_CLS,
&obj, &prog_fd);
if (CHECK(err, "prog_load sched cls", "err %d errno %d\n", err, errno))
return;
skel = kfree_skb__open_and_load();
if (!ASSERT_OK_PTR(skel, "kfree_skb_skel"))
goto close_prog;
link = bpf_program__attach_raw_tracepoint(skel->progs.trace_kfree_skb, NULL);
if (!ASSERT_OK_PTR(link, "attach_raw_tp"))
goto close_prog;
skel->links.trace_kfree_skb = link;
link = bpf_program__attach_trace(skel->progs.fentry_eth_type_trans);
if (!ASSERT_OK_PTR(link, "attach fentry"))
goto close_prog;
skel->links.fentry_eth_type_trans = link;
link = bpf_program__attach_trace(skel->progs.fexit_eth_type_trans);
if (!ASSERT_OK_PTR(link, "attach fexit"))
goto close_prog;
skel->links.fexit_eth_type_trans = link;
/* set up perf buffer */
pb = perf_buffer__new(bpf_map__fd(skel->maps.perf_buf_map), 1,
on_sample, NULL, &passed, NULL);
if (!ASSERT_OK_PTR(pb, "perf_buf__new"))
goto close_prog;
memcpy(skb.cb, &cb, sizeof(cb));
err = bpf_prog_test_run_opts(prog_fd, &topts);
ASSERT_OK(err, "ipv6 test_run");
ASSERT_OK(topts.retval, "ipv6 test_run retval");
/* read perf buffer */
err = perf_buffer__poll(pb, 100);
if (CHECK(err < 0, "perf_buffer__poll", "err %d\n", err))
goto close_prog;
/* make sure kfree_skb program was triggered
* and it sent expected skb into ring buffer
*/
ASSERT_TRUE(passed, "passed");
err = bpf_map_lookup_elem(bpf_map__fd(skel->maps.bss), &zero, test_ok);
if (CHECK(err, "get_result",
"failed to get output data: %d\n", err))
goto close_prog;
CHECK_FAIL(!test_ok[0] || !test_ok[1]);
close_prog:
perf_buffer__free(pb);
bpf_object__close(obj);
kfree_skb__destroy(skel);
}
| linux-master | tools/testing/selftests/bpf/prog_tests/kfree_skb.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020 Facebook */
#include <test_progs.h>
#include "progs/profiler.h"
#include "profiler1.skel.h"
#include "profiler2.skel.h"
#include "profiler3.skel.h"
static int sanity_run(struct bpf_program *prog)
{
LIBBPF_OPTS(bpf_test_run_opts, test_attr);
__u64 args[] = {1, 2, 3};
int err, prog_fd;
prog_fd = bpf_program__fd(prog);
test_attr.ctx_in = args;
test_attr.ctx_size_in = sizeof(args);
err = bpf_prog_test_run_opts(prog_fd, &test_attr);
if (!ASSERT_OK(err, "test_run"))
return -1;
if (!ASSERT_OK(test_attr.retval, "test_run retval"))
return -1;
return 0;
}
void test_test_profiler(void)
{
struct profiler1 *profiler1_skel = NULL;
struct profiler2 *profiler2_skel = NULL;
struct profiler3 *profiler3_skel = NULL;
__u32 duration = 0;
int err;
profiler1_skel = profiler1__open_and_load();
if (CHECK(!profiler1_skel, "profiler1_skel_load", "profiler1 skeleton failed\n"))
goto cleanup;
err = profiler1__attach(profiler1_skel);
if (CHECK(err, "profiler1_attach", "profiler1 attach failed: %d\n", err))
goto cleanup;
if (sanity_run(profiler1_skel->progs.raw_tracepoint__sched_process_exec))
goto cleanup;
profiler2_skel = profiler2__open_and_load();
if (CHECK(!profiler2_skel, "profiler2_skel_load", "profiler2 skeleton failed\n"))
goto cleanup;
err = profiler2__attach(profiler2_skel);
if (CHECK(err, "profiler2_attach", "profiler2 attach failed: %d\n", err))
goto cleanup;
if (sanity_run(profiler2_skel->progs.raw_tracepoint__sched_process_exec))
goto cleanup;
profiler3_skel = profiler3__open_and_load();
if (CHECK(!profiler3_skel, "profiler3_skel_load", "profiler3 skeleton failed\n"))
goto cleanup;
err = profiler3__attach(profiler3_skel);
if (CHECK(err, "profiler3_attach", "profiler3 attach failed: %d\n", err))
goto cleanup;
if (sanity_run(profiler3_skel->progs.raw_tracepoint__sched_process_exec))
goto cleanup;
cleanup:
profiler1__destroy(profiler1_skel);
profiler2__destroy(profiler2_skel);
profiler3__destroy(profiler3_skel);
}
| linux-master | tools/testing/selftests/bpf/prog_tests/test_profiler.c |
// SPDX-License-Identifier: GPL-2.0
#include <test_progs.h>
#include "udp_limit.skel.h"
#include <sys/types.h>
#include <sys/socket.h>
void test_udp_limit(void)
{
struct udp_limit *skel;
int fd1 = -1, fd2 = -1;
int cgroup_fd;
cgroup_fd = test__join_cgroup("/udp_limit");
if (!ASSERT_GE(cgroup_fd, 0, "cg-join"))
return;
skel = udp_limit__open_and_load();
if (!ASSERT_OK_PTR(skel, "skel-load"))
goto close_cgroup_fd;
skel->links.sock = bpf_program__attach_cgroup(skel->progs.sock, cgroup_fd);
if (!ASSERT_OK_PTR(skel->links.sock, "cg_attach_sock"))
goto close_skeleton;
skel->links.sock_release = bpf_program__attach_cgroup(skel->progs.sock_release, cgroup_fd);
if (!ASSERT_OK_PTR(skel->links.sock_release, "cg_attach_sock_release"))
goto close_skeleton;
/* BPF program enforces a single UDP socket per cgroup,
* verify that.
*/
fd1 = socket(AF_INET, SOCK_DGRAM, 0);
if (!ASSERT_GE(fd1, 0, "socket(fd1)"))
goto close_skeleton;
fd2 = socket(AF_INET, SOCK_DGRAM, 0);
if (!ASSERT_LT(fd2, 0, "socket(fd2)"))
goto close_skeleton;
/* We can reopen again after close. */
close(fd1);
fd1 = -1;
fd1 = socket(AF_INET, SOCK_DGRAM, 0);
if (!ASSERT_GE(fd1, 0, "socket(fd1-again)"))
goto close_skeleton;
/* Make sure the program was invoked the expected
* number of times:
* - open fd1 - BPF_CGROUP_INET_SOCK_CREATE
* - attempt to openfd2 - BPF_CGROUP_INET_SOCK_CREATE
* - close fd1 - BPF_CGROUP_INET_SOCK_RELEASE
* - open fd1 again - BPF_CGROUP_INET_SOCK_CREATE
*/
if (!ASSERT_EQ(skel->bss->invocations, 4, "bss-invocations"))
goto close_skeleton;
/* We should still have a single socket in use */
if (!ASSERT_EQ(skel->bss->in_use, 1, "bss-in_use"))
goto close_skeleton;
close_skeleton:
if (fd1 >= 0)
close(fd1);
if (fd2 >= 0)
close(fd2);
udp_limit__destroy(skel);
close_cgroup_fd:
close(cgroup_fd);
}
| linux-master | tools/testing/selftests/bpf/prog_tests/udp_limit.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020 Facebook */
#include <sys/types.h>
#include <bpf/bpf.h>
#include <bpf/libbpf.h>
#include "test_progs.h"
#include "network_helpers.h"
#include "test_sk_storage_trace_itself.skel.h"
#include "test_sk_storage_tracing.skel.h"
#define LO_ADDR6 "::1"
#define TEST_COMM "test_progs"
struct sk_stg {
__u32 pid;
__u32 last_notclose_state;
char comm[16];
};
static struct test_sk_storage_tracing *skel;
static __u32 duration;
static pid_t my_pid;
static int check_sk_stg(int sk_fd, __u32 expected_state)
{
struct sk_stg sk_stg;
int err;
err = bpf_map_lookup_elem(bpf_map__fd(skel->maps.sk_stg_map), &sk_fd,
&sk_stg);
if (!ASSERT_OK(err, "map_lookup(sk_stg_map)"))
return -1;
if (!ASSERT_EQ(sk_stg.last_notclose_state, expected_state,
"last_notclose_state"))
return -1;
if (!ASSERT_EQ(sk_stg.pid, my_pid, "pid"))
return -1;
if (!ASSERT_STREQ(sk_stg.comm, skel->bss->task_comm, "task_comm"))
return -1;
return 0;
}
static void do_test(void)
{
int listen_fd = -1, passive_fd = -1, active_fd = -1, value = 1, err;
char abyte;
listen_fd = start_server(AF_INET6, SOCK_STREAM, LO_ADDR6, 0, 0);
if (CHECK(listen_fd == -1, "start_server",
"listen_fd:%d errno:%d\n", listen_fd, errno))
return;
active_fd = connect_to_fd(listen_fd, 0);
if (CHECK(active_fd == -1, "connect_to_fd", "active_fd:%d errno:%d\n",
active_fd, errno))
goto out;
err = bpf_map_update_elem(bpf_map__fd(skel->maps.del_sk_stg_map),
&active_fd, &value, 0);
if (!ASSERT_OK(err, "map_update(del_sk_stg_map)"))
goto out;
passive_fd = accept(listen_fd, NULL, 0);
if (CHECK(passive_fd == -1, "accept", "passive_fd:%d errno:%d\n",
passive_fd, errno))
goto out;
shutdown(active_fd, SHUT_WR);
err = read(passive_fd, &abyte, 1);
if (!ASSERT_OK(err, "read(passive_fd)"))
goto out;
shutdown(passive_fd, SHUT_WR);
err = read(active_fd, &abyte, 1);
if (!ASSERT_OK(err, "read(active_fd)"))
goto out;
err = bpf_map_lookup_elem(bpf_map__fd(skel->maps.del_sk_stg_map),
&active_fd, &value);
if (!ASSERT_ERR(err, "map_lookup(del_sk_stg_map)"))
goto out;
err = check_sk_stg(listen_fd, BPF_TCP_LISTEN);
if (!ASSERT_OK(err, "listen_fd sk_stg"))
goto out;
err = check_sk_stg(active_fd, BPF_TCP_FIN_WAIT2);
if (!ASSERT_OK(err, "active_fd sk_stg"))
goto out;
err = check_sk_stg(passive_fd, BPF_TCP_LAST_ACK);
ASSERT_OK(err, "passive_fd sk_stg");
out:
if (active_fd != -1)
close(active_fd);
if (passive_fd != -1)
close(passive_fd);
if (listen_fd != -1)
close(listen_fd);
}
void serial_test_sk_storage_tracing(void)
{
struct test_sk_storage_trace_itself *skel_itself;
int err;
my_pid = getpid();
skel_itself = test_sk_storage_trace_itself__open_and_load();
if (!ASSERT_NULL(skel_itself, "test_sk_storage_trace_itself")) {
test_sk_storage_trace_itself__destroy(skel_itself);
return;
}
skel = test_sk_storage_tracing__open_and_load();
if (!ASSERT_OK_PTR(skel, "test_sk_storage_tracing"))
return;
err = test_sk_storage_tracing__attach(skel);
if (!ASSERT_OK(err, "test_sk_storage_tracing__attach")) {
test_sk_storage_tracing__destroy(skel);
return;
}
do_test();
test_sk_storage_tracing__destroy(skel);
}
| linux-master | tools/testing/selftests/bpf/prog_tests/sk_storage_tracing.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright 2022 Sony Group Corporation */
#define _GNU_SOURCE
#include <fcntl.h>
#include <sys/prctl.h>
#include <test_progs.h>
#include "bpf_syscall_macro.skel.h"
void test_bpf_syscall_macro(void)
{
struct bpf_syscall_macro *skel = NULL;
int err;
int exp_arg1 = 1001;
unsigned long exp_arg2 = 12;
unsigned long exp_arg3 = 13;
unsigned long exp_arg4 = 14;
unsigned long exp_arg5 = 15;
loff_t off_in, off_out;
ssize_t r;
/* check whether it can open program */
skel = bpf_syscall_macro__open();
if (!ASSERT_OK_PTR(skel, "bpf_syscall_macro__open"))
return;
skel->rodata->filter_pid = getpid();
/* check whether it can load program */
err = bpf_syscall_macro__load(skel);
if (!ASSERT_OK(err, "bpf_syscall_macro__load"))
goto cleanup;
/* check whether it can attach kprobe */
err = bpf_syscall_macro__attach(skel);
if (!ASSERT_OK(err, "bpf_syscall_macro__attach"))
goto cleanup;
/* check whether args of syscall are copied correctly */
prctl(exp_arg1, exp_arg2, exp_arg3, exp_arg4, exp_arg5);
#if defined(__aarch64__) || defined(__s390__)
ASSERT_NEQ(skel->bss->arg1, exp_arg1, "syscall_arg1");
#else
ASSERT_EQ(skel->bss->arg1, exp_arg1, "syscall_arg1");
#endif
ASSERT_EQ(skel->bss->arg2, exp_arg2, "syscall_arg2");
ASSERT_EQ(skel->bss->arg3, exp_arg3, "syscall_arg3");
/* it cannot copy arg4 when uses PT_REGS_PARM4 on x86_64 */
#ifdef __x86_64__
ASSERT_NEQ(skel->bss->arg4_cx, exp_arg4, "syscall_arg4_from_cx");
#else
ASSERT_EQ(skel->bss->arg4_cx, exp_arg4, "syscall_arg4_from_cx");
#endif
ASSERT_EQ(skel->bss->arg4, exp_arg4, "syscall_arg4");
ASSERT_EQ(skel->bss->arg5, exp_arg5, "syscall_arg5");
/* check whether args of syscall are copied correctly for CORE variants */
ASSERT_EQ(skel->bss->arg1_core, exp_arg1, "syscall_arg1_core_variant");
ASSERT_EQ(skel->bss->arg2_core, exp_arg2, "syscall_arg2_core_variant");
ASSERT_EQ(skel->bss->arg3_core, exp_arg3, "syscall_arg3_core_variant");
/* it cannot copy arg4 when uses PT_REGS_PARM4_CORE on x86_64 */
#ifdef __x86_64__
ASSERT_NEQ(skel->bss->arg4_core_cx, exp_arg4, "syscall_arg4_from_cx_core_variant");
#else
ASSERT_EQ(skel->bss->arg4_core_cx, exp_arg4, "syscall_arg4_from_cx_core_variant");
#endif
ASSERT_EQ(skel->bss->arg4_core, exp_arg4, "syscall_arg4_core_variant");
ASSERT_EQ(skel->bss->arg5_core, exp_arg5, "syscall_arg5_core_variant");
ASSERT_EQ(skel->bss->option_syscall, exp_arg1, "BPF_KPROBE_SYSCALL_option");
ASSERT_EQ(skel->bss->arg2_syscall, exp_arg2, "BPF_KPROBE_SYSCALL_arg2");
ASSERT_EQ(skel->bss->arg3_syscall, exp_arg3, "BPF_KPROBE_SYSCALL_arg3");
ASSERT_EQ(skel->bss->arg4_syscall, exp_arg4, "BPF_KPROBE_SYSCALL_arg4");
ASSERT_EQ(skel->bss->arg5_syscall, exp_arg5, "BPF_KPROBE_SYSCALL_arg5");
r = splice(-42, &off_in, 42, &off_out, 0x12340000, SPLICE_F_NONBLOCK);
err = -errno;
ASSERT_EQ(r, -1, "splice_res");
ASSERT_EQ(err, -EBADF, "splice_err");
ASSERT_EQ(skel->bss->splice_fd_in, -42, "splice_arg1");
ASSERT_EQ(skel->bss->splice_off_in, (__u64)&off_in, "splice_arg2");
ASSERT_EQ(skel->bss->splice_fd_out, 42, "splice_arg3");
ASSERT_EQ(skel->bss->splice_off_out, (__u64)&off_out, "splice_arg4");
ASSERT_EQ(skel->bss->splice_len, 0x12340000, "splice_arg5");
ASSERT_EQ(skel->bss->splice_flags, SPLICE_F_NONBLOCK, "splice_arg6");
cleanup:
bpf_syscall_macro__destroy(skel);
}
| linux-master | tools/testing/selftests/bpf/prog_tests/test_bpf_syscall_macro.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020 Facebook */
#define _GNU_SOURCE
#include <stdio.h>
#include <sched.h>
#include <sys/mount.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <test_progs.h>
#define TDIR "/sys/kernel/debug"
static int read_iter(char *file)
{
/* 1024 should be enough to get contiguous 4 "iter" letters at some point */
char buf[1024];
int fd, len;
fd = open(file, 0);
if (fd < 0)
return -1;
while ((len = read(fd, buf, sizeof(buf))) > 0) {
buf[sizeof(buf) - 1] = '\0';
if (strstr(buf, "iter")) {
close(fd);
return 0;
}
}
close(fd);
return -1;
}
static int fn(void)
{
struct stat a, b, c;
int err, map;
err = unshare(CLONE_NEWNS);
if (!ASSERT_OK(err, "unshare"))
goto out;
err = mount("", "/", "", MS_REC | MS_PRIVATE, NULL);
if (!ASSERT_OK(err, "mount /"))
goto out;
err = umount(TDIR);
if (!ASSERT_OK(err, "umount " TDIR))
goto out;
err = mount("none", TDIR, "tmpfs", 0, NULL);
if (!ASSERT_OK(err, "mount tmpfs"))
goto out;
err = mkdir(TDIR "/fs1", 0777);
if (!ASSERT_OK(err, "mkdir " TDIR "/fs1"))
goto out;
err = mkdir(TDIR "/fs2", 0777);
if (!ASSERT_OK(err, "mkdir " TDIR "/fs2"))
goto out;
err = mount("bpf", TDIR "/fs1", "bpf", 0, NULL);
if (!ASSERT_OK(err, "mount bpffs " TDIR "/fs1"))
goto out;
err = mount("bpf", TDIR "/fs2", "bpf", 0, NULL);
if (!ASSERT_OK(err, "mount bpffs " TDIR "/fs2"))
goto out;
err = read_iter(TDIR "/fs1/maps.debug");
if (!ASSERT_OK(err, "reading " TDIR "/fs1/maps.debug"))
goto out;
err = read_iter(TDIR "/fs2/progs.debug");
if (!ASSERT_OK(err, "reading " TDIR "/fs2/progs.debug"))
goto out;
err = mkdir(TDIR "/fs1/a", 0777);
if (!ASSERT_OK(err, "creating " TDIR "/fs1/a"))
goto out;
err = mkdir(TDIR "/fs1/a/1", 0777);
if (!ASSERT_OK(err, "creating " TDIR "/fs1/a/1"))
goto out;
err = mkdir(TDIR "/fs1/b", 0777);
if (!ASSERT_OK(err, "creating " TDIR "/fs1/b"))
goto out;
map = bpf_map_create(BPF_MAP_TYPE_ARRAY, NULL, 4, 4, 1, NULL);
if (!ASSERT_GT(map, 0, "create_map(ARRAY)"))
goto out;
err = bpf_obj_pin(map, TDIR "/fs1/c");
if (!ASSERT_OK(err, "pin map"))
goto out;
close(map);
/* Check that RENAME_EXCHANGE works for directories. */
err = stat(TDIR "/fs1/a", &a);
if (!ASSERT_OK(err, "stat(" TDIR "/fs1/a)"))
goto out;
err = renameat2(0, TDIR "/fs1/a", 0, TDIR "/fs1/b", RENAME_EXCHANGE);
if (!ASSERT_OK(err, "renameat2(/fs1/a, /fs1/b, RENAME_EXCHANGE)"))
goto out;
err = stat(TDIR "/fs1/b", &b);
if (!ASSERT_OK(err, "stat(" TDIR "/fs1/b)"))
goto out;
if (!ASSERT_EQ(a.st_ino, b.st_ino, "b should have a's inode"))
goto out;
err = access(TDIR "/fs1/b/1", F_OK);
if (!ASSERT_OK(err, "access(" TDIR "/fs1/b/1)"))
goto out;
/* Check that RENAME_EXCHANGE works for mixed file types. */
err = stat(TDIR "/fs1/c", &c);
if (!ASSERT_OK(err, "stat(" TDIR "/fs1/map)"))
goto out;
err = renameat2(0, TDIR "/fs1/c", 0, TDIR "/fs1/b", RENAME_EXCHANGE);
if (!ASSERT_OK(err, "renameat2(/fs1/c, /fs1/b, RENAME_EXCHANGE)"))
goto out;
err = stat(TDIR "/fs1/b", &b);
if (!ASSERT_OK(err, "stat(" TDIR "/fs1/b)"))
goto out;
if (!ASSERT_EQ(c.st_ino, b.st_ino, "b should have c's inode"))
goto out;
err = access(TDIR "/fs1/c/1", F_OK);
if (!ASSERT_OK(err, "access(" TDIR "/fs1/c/1)"))
goto out;
/* Check that RENAME_NOREPLACE works. */
err = renameat2(0, TDIR "/fs1/b", 0, TDIR "/fs1/a", RENAME_NOREPLACE);
if (!ASSERT_ERR(err, "renameat2(RENAME_NOREPLACE)")) {
err = -EINVAL;
goto out;
}
err = access(TDIR "/fs1/b", F_OK);
if (!ASSERT_OK(err, "access(" TDIR "/fs1/b)"))
goto out;
out:
umount(TDIR "/fs1");
umount(TDIR "/fs2");
rmdir(TDIR "/fs1");
rmdir(TDIR "/fs2");
umount(TDIR);
exit(err);
}
void test_test_bpffs(void)
{
int err, duration = 0, status = 0;
pid_t pid;
pid = fork();
if (CHECK(pid == -1, "clone", "clone failed %d", errno))
return;
if (pid == 0)
fn();
err = waitpid(pid, &status, 0);
if (CHECK(err == -1 && errno != ECHILD, "waitpid", "failed %d", errno))
return;
if (CHECK(WEXITSTATUS(status), "bpffs test ", "failed %d", WEXITSTATUS(status)))
return;
}
| linux-master | tools/testing/selftests/bpf/prog_tests/test_bpffs.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
#include <linux/rtnetlink.h>
#include <sys/types.h>
#include <net/if.h>
#include "test_progs.h"
#include "network_helpers.h"
#include "fib_lookup.skel.h"
#define NS_TEST "fib_lookup_ns"
#define IPV6_IFACE_ADDR "face::face"
#define IPV6_NUD_FAILED_ADDR "face::1"
#define IPV6_NUD_STALE_ADDR "face::2"
#define IPV4_IFACE_ADDR "10.0.0.254"
#define IPV4_NUD_FAILED_ADDR "10.0.0.1"
#define IPV4_NUD_STALE_ADDR "10.0.0.2"
#define IPV4_TBID_ADDR "172.0.0.254"
#define IPV4_TBID_NET "172.0.0.0"
#define IPV4_TBID_DST "172.0.0.2"
#define IPV6_TBID_ADDR "fd00::FFFF"
#define IPV6_TBID_NET "fd00::"
#define IPV6_TBID_DST "fd00::2"
#define DMAC "11:11:11:11:11:11"
#define DMAC_INIT { 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, }
#define DMAC2 "01:01:01:01:01:01"
#define DMAC_INIT2 { 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, }
struct fib_lookup_test {
const char *desc;
const char *daddr;
int expected_ret;
int lookup_flags;
__u32 tbid;
__u8 dmac[6];
};
static const struct fib_lookup_test tests[] = {
{ .desc = "IPv6 failed neigh",
.daddr = IPV6_NUD_FAILED_ADDR, .expected_ret = BPF_FIB_LKUP_RET_NO_NEIGH, },
{ .desc = "IPv6 stale neigh",
.daddr = IPV6_NUD_STALE_ADDR, .expected_ret = BPF_FIB_LKUP_RET_SUCCESS,
.dmac = DMAC_INIT, },
{ .desc = "IPv6 skip neigh",
.daddr = IPV6_NUD_FAILED_ADDR, .expected_ret = BPF_FIB_LKUP_RET_SUCCESS,
.lookup_flags = BPF_FIB_LOOKUP_SKIP_NEIGH, },
{ .desc = "IPv4 failed neigh",
.daddr = IPV4_NUD_FAILED_ADDR, .expected_ret = BPF_FIB_LKUP_RET_NO_NEIGH, },
{ .desc = "IPv4 stale neigh",
.daddr = IPV4_NUD_STALE_ADDR, .expected_ret = BPF_FIB_LKUP_RET_SUCCESS,
.dmac = DMAC_INIT, },
{ .desc = "IPv4 skip neigh",
.daddr = IPV4_NUD_FAILED_ADDR, .expected_ret = BPF_FIB_LKUP_RET_SUCCESS,
.lookup_flags = BPF_FIB_LOOKUP_SKIP_NEIGH, },
{ .desc = "IPv4 TBID lookup failure",
.daddr = IPV4_TBID_DST, .expected_ret = BPF_FIB_LKUP_RET_NOT_FWDED,
.lookup_flags = BPF_FIB_LOOKUP_DIRECT | BPF_FIB_LOOKUP_TBID,
.tbid = RT_TABLE_MAIN, },
{ .desc = "IPv4 TBID lookup success",
.daddr = IPV4_TBID_DST, .expected_ret = BPF_FIB_LKUP_RET_SUCCESS,
.lookup_flags = BPF_FIB_LOOKUP_DIRECT | BPF_FIB_LOOKUP_TBID, .tbid = 100,
.dmac = DMAC_INIT2, },
{ .desc = "IPv6 TBID lookup failure",
.daddr = IPV6_TBID_DST, .expected_ret = BPF_FIB_LKUP_RET_NOT_FWDED,
.lookup_flags = BPF_FIB_LOOKUP_DIRECT | BPF_FIB_LOOKUP_TBID,
.tbid = RT_TABLE_MAIN, },
{ .desc = "IPv6 TBID lookup success",
.daddr = IPV6_TBID_DST, .expected_ret = BPF_FIB_LKUP_RET_SUCCESS,
.lookup_flags = BPF_FIB_LOOKUP_DIRECT | BPF_FIB_LOOKUP_TBID, .tbid = 100,
.dmac = DMAC_INIT2, },
};
static int ifindex;
static int setup_netns(void)
{
int err;
SYS(fail, "ip link add veth1 type veth peer name veth2");
SYS(fail, "ip link set dev veth1 up");
SYS(fail, "ip link set dev veth2 up");
err = write_sysctl("/proc/sys/net/ipv4/neigh/veth1/gc_stale_time", "900");
if (!ASSERT_OK(err, "write_sysctl(net.ipv4.neigh.veth1.gc_stale_time)"))
goto fail;
err = write_sysctl("/proc/sys/net/ipv6/neigh/veth1/gc_stale_time", "900");
if (!ASSERT_OK(err, "write_sysctl(net.ipv6.neigh.veth1.gc_stale_time)"))
goto fail;
SYS(fail, "ip addr add %s/64 dev veth1 nodad", IPV6_IFACE_ADDR);
SYS(fail, "ip neigh add %s dev veth1 nud failed", IPV6_NUD_FAILED_ADDR);
SYS(fail, "ip neigh add %s dev veth1 lladdr %s nud stale", IPV6_NUD_STALE_ADDR, DMAC);
SYS(fail, "ip addr add %s/24 dev veth1", IPV4_IFACE_ADDR);
SYS(fail, "ip neigh add %s dev veth1 nud failed", IPV4_NUD_FAILED_ADDR);
SYS(fail, "ip neigh add %s dev veth1 lladdr %s nud stale", IPV4_NUD_STALE_ADDR, DMAC);
/* Setup for tbid lookup tests */
SYS(fail, "ip addr add %s/24 dev veth2", IPV4_TBID_ADDR);
SYS(fail, "ip route del %s/24 dev veth2", IPV4_TBID_NET);
SYS(fail, "ip route add table 100 %s/24 dev veth2", IPV4_TBID_NET);
SYS(fail, "ip neigh add %s dev veth2 lladdr %s nud stale", IPV4_TBID_DST, DMAC2);
SYS(fail, "ip addr add %s/64 dev veth2", IPV6_TBID_ADDR);
SYS(fail, "ip -6 route del %s/64 dev veth2", IPV6_TBID_NET);
SYS(fail, "ip -6 route add table 100 %s/64 dev veth2", IPV6_TBID_NET);
SYS(fail, "ip neigh add %s dev veth2 lladdr %s nud stale", IPV6_TBID_DST, DMAC2);
err = write_sysctl("/proc/sys/net/ipv4/conf/veth1/forwarding", "1");
if (!ASSERT_OK(err, "write_sysctl(net.ipv4.conf.veth1.forwarding)"))
goto fail;
err = write_sysctl("/proc/sys/net/ipv6/conf/veth1/forwarding", "1");
if (!ASSERT_OK(err, "write_sysctl(net.ipv6.conf.veth1.forwarding)"))
goto fail;
return 0;
fail:
return -1;
}
static int set_lookup_params(struct bpf_fib_lookup *params, const struct fib_lookup_test *test)
{
int ret;
memset(params, 0, sizeof(*params));
params->l4_protocol = IPPROTO_TCP;
params->ifindex = ifindex;
params->tbid = test->tbid;
if (inet_pton(AF_INET6, test->daddr, params->ipv6_dst) == 1) {
params->family = AF_INET6;
ret = inet_pton(AF_INET6, IPV6_IFACE_ADDR, params->ipv6_src);
if (!ASSERT_EQ(ret, 1, "inet_pton(IPV6_IFACE_ADDR)"))
return -1;
return 0;
}
ret = inet_pton(AF_INET, test->daddr, ¶ms->ipv4_dst);
if (!ASSERT_EQ(ret, 1, "convert IP[46] address"))
return -1;
params->family = AF_INET;
ret = inet_pton(AF_INET, IPV4_IFACE_ADDR, ¶ms->ipv4_src);
if (!ASSERT_EQ(ret, 1, "inet_pton(IPV4_IFACE_ADDR)"))
return -1;
return 0;
}
static void mac_str(char *b, const __u8 *mac)
{
sprintf(b, "%02X:%02X:%02X:%02X:%02X:%02X",
mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
}
void test_fib_lookup(void)
{
struct bpf_fib_lookup *fib_params;
struct nstoken *nstoken = NULL;
struct __sk_buff skb = { };
struct fib_lookup *skel;
int prog_fd, err, ret, i;
/* The test does not use the skb->data, so
* use pkt_v6 for both v6 and v4 test.
*/
LIBBPF_OPTS(bpf_test_run_opts, run_opts,
.data_in = &pkt_v6,
.data_size_in = sizeof(pkt_v6),
.ctx_in = &skb,
.ctx_size_in = sizeof(skb),
);
skel = fib_lookup__open_and_load();
if (!ASSERT_OK_PTR(skel, "skel open_and_load"))
return;
prog_fd = bpf_program__fd(skel->progs.fib_lookup);
SYS(fail, "ip netns add %s", NS_TEST);
nstoken = open_netns(NS_TEST);
if (!ASSERT_OK_PTR(nstoken, "open_netns"))
goto fail;
if (setup_netns())
goto fail;
ifindex = if_nametoindex("veth1");
skb.ifindex = ifindex;
fib_params = &skel->bss->fib_params;
for (i = 0; i < ARRAY_SIZE(tests); i++) {
printf("Testing %s ", tests[i].desc);
if (set_lookup_params(fib_params, &tests[i]))
continue;
skel->bss->fib_lookup_ret = -1;
skel->bss->lookup_flags = tests[i].lookup_flags;
err = bpf_prog_test_run_opts(prog_fd, &run_opts);
if (!ASSERT_OK(err, "bpf_prog_test_run_opts"))
continue;
ASSERT_EQ(skel->bss->fib_lookup_ret, tests[i].expected_ret,
"fib_lookup_ret");
ret = memcmp(tests[i].dmac, fib_params->dmac, sizeof(tests[i].dmac));
if (!ASSERT_EQ(ret, 0, "dmac not match")) {
char expected[18], actual[18];
mac_str(expected, tests[i].dmac);
mac_str(actual, fib_params->dmac);
printf("dmac expected %s actual %s ", expected, actual);
}
// ensure tbid is zero'd out after fib lookup.
if (tests[i].lookup_flags & BPF_FIB_LOOKUP_DIRECT) {
if (!ASSERT_EQ(skel->bss->fib_params.tbid, 0,
"expected fib_params.tbid to be zero"))
goto fail;
}
}
fail:
if (nstoken)
close_netns(nstoken);
SYS_NOFAIL("ip netns del " NS_TEST " &> /dev/null");
fib_lookup__destroy(skel);
}
| linux-master | tools/testing/selftests/bpf/prog_tests/fib_lookup.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020 Facebook */
#include <test_progs.h>
#include <bpf/btf.h>
static char *dump_buf;
static size_t dump_buf_sz;
static FILE *dump_buf_file;
static void btf_dump_printf(void *ctx, const char *fmt, va_list args)
{
vfprintf(ctx, fmt, args);
}
void test_btf_split() {
struct btf_dump *d = NULL;
const struct btf_type *t;
struct btf *btf1, *btf2;
int str_off, i, err;
btf1 = btf__new_empty();
if (!ASSERT_OK_PTR(btf1, "empty_main_btf"))
return;
btf__set_pointer_size(btf1, 8); /* enforce 64-bit arch */
btf__add_int(btf1, "int", 4, BTF_INT_SIGNED); /* [1] int */
btf__add_ptr(btf1, 1); /* [2] ptr to int */
btf__add_struct(btf1, "s1", 4); /* [3] struct s1 { */
btf__add_field(btf1, "f1", 1, 0, 0); /* int f1; */
/* } */
btf2 = btf__new_empty_split(btf1);
if (!ASSERT_OK_PTR(btf2, "empty_split_btf"))
goto cleanup;
/* pointer size should be "inherited" from main BTF */
ASSERT_EQ(btf__pointer_size(btf2), 8, "inherit_ptr_sz");
str_off = btf__find_str(btf2, "int");
ASSERT_NEQ(str_off, -ENOENT, "str_int_missing");
t = btf__type_by_id(btf2, 1);
if (!ASSERT_OK_PTR(t, "int_type"))
goto cleanup;
ASSERT_EQ(btf_is_int(t), true, "int_kind");
ASSERT_STREQ(btf__str_by_offset(btf2, t->name_off), "int", "int_name");
btf__add_struct(btf2, "s2", 16); /* [4] struct s2 { */
btf__add_field(btf2, "f1", 3, 0, 0); /* struct s1 f1; */
btf__add_field(btf2, "f2", 1, 32, 0); /* int f2; */
btf__add_field(btf2, "f3", 2, 64, 0); /* int *f3; */
/* } */
t = btf__type_by_id(btf1, 4);
ASSERT_NULL(t, "split_type_in_main");
t = btf__type_by_id(btf2, 4);
if (!ASSERT_OK_PTR(t, "split_struct_type"))
goto cleanup;
ASSERT_EQ(btf_is_struct(t), true, "split_struct_kind");
ASSERT_EQ(btf_vlen(t), 3, "split_struct_vlen");
ASSERT_STREQ(btf__str_by_offset(btf2, t->name_off), "s2", "split_struct_name");
/* BTF-to-C dump of split BTF */
dump_buf_file = open_memstream(&dump_buf, &dump_buf_sz);
if (!ASSERT_OK_PTR(dump_buf_file, "dump_memstream"))
return;
d = btf_dump__new(btf2, btf_dump_printf, dump_buf_file, NULL);
if (!ASSERT_OK_PTR(d, "btf_dump__new"))
goto cleanup;
for (i = 1; i < btf__type_cnt(btf2); i++) {
err = btf_dump__dump_type(d, i);
ASSERT_OK(err, "dump_type_ok");
}
fflush(dump_buf_file);
dump_buf[dump_buf_sz] = 0; /* some libc implementations don't do this */
ASSERT_STREQ(dump_buf,
"struct s1 {\n"
" int f1;\n"
"};\n"
"\n"
"struct s2 {\n"
" struct s1 f1;\n"
" int f2;\n"
" int *f3;\n"
"};\n\n", "c_dump");
cleanup:
if (dump_buf_file)
fclose(dump_buf_file);
free(dump_buf);
btf_dump__free(d);
btf__free(btf1);
btf__free(btf2);
}
| linux-master | tools/testing/selftests/bpf/prog_tests/btf_split.c |
// SPDX-License-Identifier: GPL-2.0
#include <test_progs.h>
#include "lru_bug.skel.h"
void test_lru_bug(void)
{
struct lru_bug *skel;
int ret;
skel = lru_bug__open_and_load();
if (!ASSERT_OK_PTR(skel, "lru_bug__open_and_load"))
return;
ret = lru_bug__attach(skel);
if (!ASSERT_OK(ret, "lru_bug__attach"))
goto end;
usleep(1);
ASSERT_OK(skel->data->result, "prealloc_lru_pop doesn't call check_and_init_map_value");
end:
lru_bug__destroy(skel);
}
| linux-master | tools/testing/selftests/bpf/prog_tests/lru_bug.c |
// SPDX-License-Identifier: GPL-2.0
#include <test_progs.h>
#include <network_helpers.h>
void test_skb_helpers(void)
{
struct __sk_buff skb = {
.wire_len = 100,
.gso_segs = 8,
.gso_size = 10,
};
LIBBPF_OPTS(bpf_test_run_opts, topts,
.data_in = &pkt_v4,
.data_size_in = sizeof(pkt_v4),
.ctx_in = &skb,
.ctx_size_in = sizeof(skb),
.ctx_out = &skb,
.ctx_size_out = sizeof(skb),
);
struct bpf_object *obj;
int err, prog_fd;
err = bpf_prog_test_load("./test_skb_helpers.bpf.o",
BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd);
if (!ASSERT_OK(err, "load"))
return;
err = bpf_prog_test_run_opts(prog_fd, &topts);
ASSERT_OK(err, "test_run");
bpf_object__close(obj);
}
| linux-master | tools/testing/selftests/bpf/prog_tests/skb_helpers.c |
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2019 Facebook */
#include <test_progs.h>
#include <linux/bpf.h>
#include "bpf/libbpf_internal.h"
#include "test_raw_tp_test_run.skel.h"
void test_raw_tp_test_run(void)
{
int comm_fd = -1, err, nr_online, i, prog_fd;
__u64 args[2] = {0x1234ULL, 0x5678ULL};
int expected_retval = 0x1234 + 0x5678;
struct test_raw_tp_test_run *skel;
char buf[] = "new_name";
bool *online = NULL;
LIBBPF_OPTS(bpf_test_run_opts, opts,
.ctx_in = args,
.ctx_size_in = sizeof(args),
.flags = BPF_F_TEST_RUN_ON_CPU,
);
err = parse_cpu_mask_file("/sys/devices/system/cpu/online", &online,
&nr_online);
if (!ASSERT_OK(err, "parse_cpu_mask_file"))
return;
skel = test_raw_tp_test_run__open_and_load();
if (!ASSERT_OK_PTR(skel, "skel_open"))
goto cleanup;
err = test_raw_tp_test_run__attach(skel);
if (!ASSERT_OK(err, "skel_attach"))
goto cleanup;
comm_fd = open("/proc/self/comm", O_WRONLY|O_TRUNC);
if (!ASSERT_GE(comm_fd, 0, "open /proc/self/comm"))
goto cleanup;
err = write(comm_fd, buf, sizeof(buf));
ASSERT_GE(err, 0, "task rename");
ASSERT_NEQ(skel->bss->count, 0, "check_count");
ASSERT_EQ(skel->data->on_cpu, 0xffffffff, "check_on_cpu");
prog_fd = bpf_program__fd(skel->progs.rename);
opts.ctx_in = args;
opts.ctx_size_in = sizeof(__u64);
err = bpf_prog_test_run_opts(prog_fd, &opts);
ASSERT_NEQ(err, 0, "test_run should fail for too small ctx");
opts.ctx_size_in = sizeof(args);
err = bpf_prog_test_run_opts(prog_fd, &opts);
ASSERT_OK(err, "test_run");
ASSERT_EQ(opts.retval, expected_retval, "check_retval");
for (i = 0; i < nr_online; i++) {
if (!online[i])
continue;
opts.cpu = i;
opts.retval = 0;
err = bpf_prog_test_run_opts(prog_fd, &opts);
ASSERT_OK(err, "test_run_opts");
ASSERT_EQ(skel->data->on_cpu, i, "check_on_cpu");
ASSERT_EQ(opts.retval, expected_retval, "check_retval");
}
/* invalid cpu ID should fail with ENXIO */
opts.cpu = 0xffffffff;
err = bpf_prog_test_run_opts(prog_fd, &opts);
ASSERT_EQ(errno, ENXIO, "test_run_opts should fail with ENXIO");
ASSERT_ERR(err, "test_run_opts_fail");
/* non-zero cpu w/o BPF_F_TEST_RUN_ON_CPU should fail with EINVAL */
opts.cpu = 1;
opts.flags = 0;
err = bpf_prog_test_run_opts(prog_fd, &opts);
ASSERT_EQ(errno, EINVAL, "test_run_opts should fail with EINVAL");
ASSERT_ERR(err, "test_run_opts_fail");
cleanup:
close(comm_fd);
test_raw_tp_test_run__destroy(skel);
free(online);
}
| linux-master | tools/testing/selftests/bpf/prog_tests/raw_tp_test_run.c |
// SPDX-License-Identifier: GPL-2.0-or-later
#include <netinet/in.h>
#include <linux/netfilter.h>
#include "test_progs.h"
#include "test_netfilter_link_attach.skel.h"
struct nf_link_test {
__u32 pf;
__u32 hooknum;
__s32 priority;
__u32 flags;
bool expect_success;
const char * const name;
};
static const struct nf_link_test nf_hook_link_tests[] = {
{ .name = "allzero", },
{ .pf = NFPROTO_NUMPROTO, .name = "invalid-pf", },
{ .pf = NFPROTO_IPV4, .hooknum = 42, .name = "invalid-hooknum", },
{ .pf = NFPROTO_IPV4, .priority = INT_MIN, .name = "invalid-priority-min", },
{ .pf = NFPROTO_IPV4, .priority = INT_MAX, .name = "invalid-priority-max", },
{ .pf = NFPROTO_IPV4, .flags = UINT_MAX, .name = "invalid-flags", },
{ .pf = NFPROTO_INET, .priority = 1, .name = "invalid-inet-not-supported", },
{ .pf = NFPROTO_IPV4, .priority = -10000, .expect_success = true, .name = "attach ipv4", },
{ .pf = NFPROTO_IPV6, .priority = 10001, .expect_success = true, .name = "attach ipv6", },
};
void test_netfilter_link_attach(void)
{
struct test_netfilter_link_attach *skel;
struct bpf_program *prog;
LIBBPF_OPTS(bpf_netfilter_opts, opts);
int i;
skel = test_netfilter_link_attach__open_and_load();
if (!ASSERT_OK_PTR(skel, "test_netfilter_link_attach__open_and_load"))
goto out;
prog = skel->progs.nf_link_attach_test;
if (!ASSERT_OK_PTR(prog, "attach program"))
goto out;
for (i = 0; i < ARRAY_SIZE(nf_hook_link_tests); i++) {
struct bpf_link *link;
if (!test__start_subtest(nf_hook_link_tests[i].name))
continue;
#define X(opts, m, i) opts.m = nf_hook_link_tests[(i)].m
X(opts, pf, i);
X(opts, hooknum, i);
X(opts, priority, i);
X(opts, flags, i);
#undef X
link = bpf_program__attach_netfilter(prog, &opts);
if (nf_hook_link_tests[i].expect_success) {
struct bpf_link *link2;
if (!ASSERT_OK_PTR(link, "program attach successful"))
continue;
link2 = bpf_program__attach_netfilter(prog, &opts);
ASSERT_ERR_PTR(link2, "attach program with same pf/hook/priority");
if (!ASSERT_OK(bpf_link__destroy(link), "link destroy"))
break;
link2 = bpf_program__attach_netfilter(prog, &opts);
if (!ASSERT_OK_PTR(link2, "program reattach successful"))
continue;
if (!ASSERT_OK(bpf_link__destroy(link2), "link destroy"))
break;
} else {
ASSERT_ERR_PTR(link, "program load failure");
}
}
out:
test_netfilter_link_attach__destroy(skel);
}
| linux-master | tools/testing/selftests/bpf/prog_tests/netfilter_link_attach.c |
// SPDX-License-Identifier: GPL-2.0
#include <test_progs.h>
#include <stdio.h>
#include <stdlib.h>
#include <sys/mman.h>
#include <pthread.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include "test_send_signal_kern.skel.h"
static void sigusr1_handler(int signum)
{
}
#define THREAD_COUNT 100
static void *worker(void *p)
{
int i;
for ( i = 0; i < 1000; i++)
usleep(1);
return NULL;
}
/* NOTE: cause events loss */
void serial_test_send_signal_sched_switch(void)
{
struct test_send_signal_kern *skel;
pthread_t threads[THREAD_COUNT];
u32 duration = 0;
int i, err;
signal(SIGUSR1, sigusr1_handler);
skel = test_send_signal_kern__open_and_load();
if (CHECK(!skel, "skel_open_and_load", "skeleton open_and_load failed\n"))
return;
skel->bss->pid = getpid();
skel->bss->sig = SIGUSR1;
err = test_send_signal_kern__attach(skel);
if (CHECK(err, "skel_attach", "skeleton attach failed\n"))
goto destroy_skel;
for (i = 0; i < THREAD_COUNT; i++) {
err = pthread_create(threads + i, NULL, worker, NULL);
if (CHECK(err, "pthread_create", "Error creating thread, %s\n",
strerror(errno)))
goto destroy_skel;
}
for (i = 0; i < THREAD_COUNT; i++)
pthread_join(threads[i], NULL);
destroy_skel:
test_send_signal_kern__destroy(skel);
}
| linux-master | tools/testing/selftests/bpf/prog_tests/send_signal_sched_switch.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright 2020 Google LLC.
*/
#include <test_progs.h>
#include "modify_return.skel.h"
#define LOWER(x) ((x) & 0xffff)
#define UPPER(x) ((x) >> 16)
static void run_test(__u32 input_retval, __u16 want_side_effect, __s16 want_ret)
{
struct modify_return *skel = NULL;
int err, prog_fd;
__u16 side_effect;
__s16 ret;
LIBBPF_OPTS(bpf_test_run_opts, topts);
skel = modify_return__open_and_load();
if (!ASSERT_OK_PTR(skel, "skel_load"))
goto cleanup;
err = modify_return__attach(skel);
if (!ASSERT_OK(err, "modify_return__attach failed"))
goto cleanup;
skel->bss->input_retval = input_retval;
prog_fd = bpf_program__fd(skel->progs.fmod_ret_test);
err = bpf_prog_test_run_opts(prog_fd, &topts);
ASSERT_OK(err, "test_run");
side_effect = UPPER(topts.retval);
ret = LOWER(topts.retval);
ASSERT_EQ(ret, want_ret, "test_run ret");
ASSERT_EQ(side_effect, want_side_effect, "modify_return side_effect");
ASSERT_EQ(skel->bss->fentry_result, 1, "modify_return fentry_result");
ASSERT_EQ(skel->bss->fexit_result, 1, "modify_return fexit_result");
ASSERT_EQ(skel->bss->fmod_ret_result, 1, "modify_return fmod_ret_result");
ASSERT_EQ(skel->bss->fentry_result2, 1, "modify_return fentry_result2");
ASSERT_EQ(skel->bss->fexit_result2, 1, "modify_return fexit_result2");
ASSERT_EQ(skel->bss->fmod_ret_result2, 1, "modify_return fmod_ret_result2");
cleanup:
modify_return__destroy(skel);
}
/* TODO: conflict with get_func_ip_test */
void serial_test_modify_return(void)
{
run_test(0 /* input_retval */,
2 /* want_side_effect */,
33 /* want_ret */);
run_test(-EINVAL /* input_retval */,
0 /* want_side_effect */,
-EINVAL * 2 /* want_ret */);
}
| linux-master | tools/testing/selftests/bpf/prog_tests/modify_return.c |
// SPDX-License-Identifier: GPL-2.0
#include <test_progs.h>
#include <linux/nbd.h>
/* NOTE: conflict with other tests. */
void serial_test_raw_tp_writable_test_run(void)
{
__u32 duration = 0;
char error[4096];
const struct bpf_insn trace_program[] = {
BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_6, 0),
BPF_MOV64_IMM(BPF_REG_0, 42),
BPF_STX_MEM(BPF_W, BPF_REG_6, BPF_REG_0, 0),
BPF_EXIT_INSN(),
};
LIBBPF_OPTS(bpf_prog_load_opts, trace_opts,
.log_level = 2,
.log_buf = error,
.log_size = sizeof(error),
);
int bpf_fd = bpf_prog_load(BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE, NULL, "GPL v2",
trace_program, sizeof(trace_program) / sizeof(struct bpf_insn),
&trace_opts);
if (CHECK(bpf_fd < 0, "bpf_raw_tracepoint_writable loaded",
"failed: %d errno %d\n", bpf_fd, errno))
return;
const struct bpf_insn skb_program[] = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
};
LIBBPF_OPTS(bpf_prog_load_opts, skb_opts,
.log_buf = error,
.log_size = sizeof(error),
);
int filter_fd = bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER, NULL, "GPL v2",
skb_program, sizeof(skb_program) / sizeof(struct bpf_insn),
&skb_opts);
if (CHECK(filter_fd < 0, "test_program_loaded", "failed: %d errno %d\n",
filter_fd, errno))
goto out_bpffd;
int tp_fd = bpf_raw_tracepoint_open("bpf_test_finish", bpf_fd);
if (CHECK(tp_fd < 0, "bpf_raw_tracepoint_writable opened",
"failed: %d errno %d\n", tp_fd, errno))
goto out_filterfd;
char test_skb[128] = {
0,
};
LIBBPF_OPTS(bpf_test_run_opts, topts,
.data_in = test_skb,
.data_size_in = sizeof(test_skb),
.repeat = 1,
);
int err = bpf_prog_test_run_opts(filter_fd, &topts);
CHECK(err != 42, "test_run",
"tracepoint did not modify return value\n");
CHECK(topts.retval != 0, "test_run_ret",
"socket_filter did not return 0\n");
close(tp_fd);
err = bpf_prog_test_run_opts(filter_fd, &topts);
CHECK(err != 0, "test_run_notrace",
"test_run failed with %d errno %d\n", err, errno);
CHECK(topts.retval != 0, "test_run_ret_notrace",
"socket_filter did not return 0\n");
out_filterfd:
close(filter_fd);
out_bpffd:
close(bpf_fd);
}
| linux-master | tools/testing/selftests/bpf/prog_tests/raw_tp_writable_test_run.c |
// SPDX-License-Identifier: GPL-2.0
#define _GNU_SOURCE
#include <pthread.h>
#include <sched.h>
#include <sys/socket.h>
#include <test_progs.h>
#include "bpf/libbpf_internal.h"
#include "test_perf_branches.skel.h"
static void check_good_sample(struct test_perf_branches *skel)
{
int written_global = skel->bss->written_global_out;
int required_size = skel->bss->required_size_out;
int written_stack = skel->bss->written_stack_out;
int pbe_size = sizeof(struct perf_branch_entry);
int duration = 0;
if (CHECK(!skel->bss->valid, "output not valid",
"no valid sample from prog"))
return;
/*
* It's hard to validate the contents of the branch entries b/c it
* would require some kind of disassembler and also encoding the
* valid jump instructions for supported architectures. So just check
* the easy stuff for now.
*/
CHECK(required_size <= 0, "read_branches_size", "err %d\n", required_size);
CHECK(written_stack < 0, "read_branches_stack", "err %d\n", written_stack);
CHECK(written_stack % pbe_size != 0, "read_branches_stack",
"stack bytes written=%d not multiple of struct size=%d\n",
written_stack, pbe_size);
CHECK(written_global < 0, "read_branches_global", "err %d\n", written_global);
CHECK(written_global % pbe_size != 0, "read_branches_global",
"global bytes written=%d not multiple of struct size=%d\n",
written_global, pbe_size);
CHECK(written_global < written_stack, "read_branches_size",
"written_global=%d < written_stack=%d\n", written_global, written_stack);
}
static void check_bad_sample(struct test_perf_branches *skel)
{
int written_global = skel->bss->written_global_out;
int required_size = skel->bss->required_size_out;
int written_stack = skel->bss->written_stack_out;
int duration = 0;
if (CHECK(!skel->bss->valid, "output not valid",
"no valid sample from prog"))
return;
CHECK((required_size != -EINVAL && required_size != -ENOENT),
"read_branches_size", "err %d\n", required_size);
CHECK((written_stack != -EINVAL && written_stack != -ENOENT),
"read_branches_stack", "written %d\n", written_stack);
CHECK((written_global != -EINVAL && written_global != -ENOENT),
"read_branches_global", "written %d\n", written_global);
}
static void test_perf_branches_common(int perf_fd,
void (*cb)(struct test_perf_branches *))
{
struct test_perf_branches *skel;
int err, i, duration = 0;
bool detached = false;
struct bpf_link *link;
volatile int j = 0;
cpu_set_t cpu_set;
skel = test_perf_branches__open_and_load();
if (CHECK(!skel, "test_perf_branches_load",
"perf_branches skeleton failed\n"))
return;
/* attach perf_event */
link = bpf_program__attach_perf_event(skel->progs.perf_branches, perf_fd);
if (!ASSERT_OK_PTR(link, "attach_perf_event"))
goto out_destroy_skel;
/* generate some branches on cpu 0 */
CPU_ZERO(&cpu_set);
CPU_SET(0, &cpu_set);
err = pthread_setaffinity_np(pthread_self(), sizeof(cpu_set), &cpu_set);
if (CHECK(err, "set_affinity", "cpu #0, err %d\n", err))
goto out_destroy;
/* spin the loop for a while (random high number) */
for (i = 0; i < 1000000; ++i)
++j;
test_perf_branches__detach(skel);
detached = true;
cb(skel);
out_destroy:
bpf_link__destroy(link);
out_destroy_skel:
if (!detached)
test_perf_branches__detach(skel);
test_perf_branches__destroy(skel);
}
static void test_perf_branches_hw(void)
{
struct perf_event_attr attr = {0};
int duration = 0;
int pfd;
/* create perf event */
attr.size = sizeof(attr);
attr.type = PERF_TYPE_HARDWARE;
attr.config = PERF_COUNT_HW_CPU_CYCLES;
attr.freq = 1;
attr.sample_freq = 1000;
attr.sample_type = PERF_SAMPLE_BRANCH_STACK;
attr.branch_sample_type = PERF_SAMPLE_BRANCH_USER | PERF_SAMPLE_BRANCH_ANY;
pfd = syscall(__NR_perf_event_open, &attr, -1, 0, -1, PERF_FLAG_FD_CLOEXEC);
/*
* Some setups don't support branch records (virtual machines, !x86),
* so skip test in this case.
*/
if (pfd < 0) {
if (errno == ENOENT || errno == EOPNOTSUPP) {
printf("%s:SKIP:no PERF_SAMPLE_BRANCH_STACK\n",
__func__);
test__skip();
return;
}
if (CHECK(pfd < 0, "perf_event_open", "err %d errno %d\n",
pfd, errno))
return;
}
test_perf_branches_common(pfd, check_good_sample);
close(pfd);
}
/*
* Tests negative case -- run bpf_read_branch_records() on improperly configured
* perf event.
*/
static void test_perf_branches_no_hw(void)
{
struct perf_event_attr attr = {0};
int duration = 0;
int pfd;
/* create perf event */
attr.size = sizeof(attr);
attr.type = PERF_TYPE_SOFTWARE;
attr.config = PERF_COUNT_SW_CPU_CLOCK;
attr.freq = 1;
attr.sample_freq = 1000;
pfd = syscall(__NR_perf_event_open, &attr, -1, 0, -1, PERF_FLAG_FD_CLOEXEC);
if (CHECK(pfd < 0, "perf_event_open", "err %d\n", pfd))
return;
test_perf_branches_common(pfd, check_bad_sample);
close(pfd);
}
void test_perf_branches(void)
{
if (test__start_subtest("perf_branches_hw"))
test_perf_branches_hw();
if (test__start_subtest("perf_branches_no_hw"))
test_perf_branches_no_hw();
}
| linux-master | tools/testing/selftests/bpf/prog_tests/perf_branches.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
#include <test_progs.h>
#include <network_helpers.h>
#include "refcounted_kptr.skel.h"
#include "refcounted_kptr_fail.skel.h"
void test_refcounted_kptr(void)
{
RUN_TESTS(refcounted_kptr);
}
void test_refcounted_kptr_fail(void)
{
RUN_TESTS(refcounted_kptr_fail);
}
void test_refcounted_kptr_wrong_owner(void)
{
LIBBPF_OPTS(bpf_test_run_opts, opts,
.data_in = &pkt_v4,
.data_size_in = sizeof(pkt_v4),
.repeat = 1,
);
struct refcounted_kptr *skel;
int ret;
skel = refcounted_kptr__open_and_load();
if (!ASSERT_OK_PTR(skel, "refcounted_kptr__open_and_load"))
return;
ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.rbtree_wrong_owner_remove_fail_a1), &opts);
ASSERT_OK(ret, "rbtree_wrong_owner_remove_fail_a1");
ASSERT_OK(opts.retval, "rbtree_wrong_owner_remove_fail_a1 retval");
ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.rbtree_wrong_owner_remove_fail_b), &opts);
ASSERT_OK(ret, "rbtree_wrong_owner_remove_fail_b");
ASSERT_OK(opts.retval, "rbtree_wrong_owner_remove_fail_b retval");
ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.rbtree_wrong_owner_remove_fail_a2), &opts);
ASSERT_OK(ret, "rbtree_wrong_owner_remove_fail_a2");
ASSERT_OK(opts.retval, "rbtree_wrong_owner_remove_fail_a2 retval");
refcounted_kptr__destroy(skel);
}
| linux-master | tools/testing/selftests/bpf/prog_tests/refcounted_kptr.c |
// SPDX-License-Identifier: GPL-2.0
#include <test_progs.h>
#include <bpf/bpf_endian.h>
#include "sock_destroy_prog.skel.h"
#include "sock_destroy_prog_fail.skel.h"
#include "network_helpers.h"
#define TEST_NS "sock_destroy_netns"
static void start_iter_sockets(struct bpf_program *prog)
{
struct bpf_link *link;
char buf[50] = {};
int iter_fd, len;
link = bpf_program__attach_iter(prog, NULL);
if (!ASSERT_OK_PTR(link, "attach_iter"))
return;
iter_fd = bpf_iter_create(bpf_link__fd(link));
if (!ASSERT_GE(iter_fd, 0, "create_iter"))
goto free_link;
while ((len = read(iter_fd, buf, sizeof(buf))) > 0)
;
ASSERT_GE(len, 0, "read");
close(iter_fd);
free_link:
bpf_link__destroy(link);
}
static void test_tcp_client(struct sock_destroy_prog *skel)
{
int serv = -1, clien = -1, accept_serv = -1, n;
serv = start_server(AF_INET6, SOCK_STREAM, NULL, 0, 0);
if (!ASSERT_GE(serv, 0, "start_server"))
goto cleanup;
clien = connect_to_fd(serv, 0);
if (!ASSERT_GE(clien, 0, "connect_to_fd"))
goto cleanup;
accept_serv = accept(serv, NULL, NULL);
if (!ASSERT_GE(accept_serv, 0, "serv accept"))
goto cleanup;
n = send(clien, "t", 1, 0);
if (!ASSERT_EQ(n, 1, "client send"))
goto cleanup;
/* Run iterator program that destroys connected client sockets. */
start_iter_sockets(skel->progs.iter_tcp6_client);
n = send(clien, "t", 1, 0);
if (!ASSERT_LT(n, 0, "client_send on destroyed socket"))
goto cleanup;
ASSERT_EQ(errno, ECONNABORTED, "error code on destroyed socket");
cleanup:
if (clien != -1)
close(clien);
if (accept_serv != -1)
close(accept_serv);
if (serv != -1)
close(serv);
}
static void test_tcp_server(struct sock_destroy_prog *skel)
{
int serv = -1, clien = -1, accept_serv = -1, n, serv_port;
serv = start_server(AF_INET6, SOCK_STREAM, NULL, 0, 0);
if (!ASSERT_GE(serv, 0, "start_server"))
goto cleanup;
serv_port = get_socket_local_port(serv);
if (!ASSERT_GE(serv_port, 0, "get_sock_local_port"))
goto cleanup;
skel->bss->serv_port = (__be16) serv_port;
clien = connect_to_fd(serv, 0);
if (!ASSERT_GE(clien, 0, "connect_to_fd"))
goto cleanup;
accept_serv = accept(serv, NULL, NULL);
if (!ASSERT_GE(accept_serv, 0, "serv accept"))
goto cleanup;
n = send(clien, "t", 1, 0);
if (!ASSERT_EQ(n, 1, "client send"))
goto cleanup;
/* Run iterator program that destroys server sockets. */
start_iter_sockets(skel->progs.iter_tcp6_server);
n = send(clien, "t", 1, 0);
if (!ASSERT_LT(n, 0, "client_send on destroyed socket"))
goto cleanup;
ASSERT_EQ(errno, ECONNRESET, "error code on destroyed socket");
cleanup:
if (clien != -1)
close(clien);
if (accept_serv != -1)
close(accept_serv);
if (serv != -1)
close(serv);
}
static void test_udp_client(struct sock_destroy_prog *skel)
{
int serv = -1, clien = -1, n = 0;
serv = start_server(AF_INET6, SOCK_DGRAM, NULL, 0, 0);
if (!ASSERT_GE(serv, 0, "start_server"))
goto cleanup;
clien = connect_to_fd(serv, 0);
if (!ASSERT_GE(clien, 0, "connect_to_fd"))
goto cleanup;
n = send(clien, "t", 1, 0);
if (!ASSERT_EQ(n, 1, "client send"))
goto cleanup;
/* Run iterator program that destroys sockets. */
start_iter_sockets(skel->progs.iter_udp6_client);
n = send(clien, "t", 1, 0);
if (!ASSERT_LT(n, 0, "client_send on destroyed socket"))
goto cleanup;
/* UDP sockets have an overriding error code after they are disconnected,
* so we don't check for ECONNABORTED error code.
*/
cleanup:
if (clien != -1)
close(clien);
if (serv != -1)
close(serv);
}
static void test_udp_server(struct sock_destroy_prog *skel)
{
int *listen_fds = NULL, n, i, serv_port;
unsigned int num_listens = 5;
char buf[1];
/* Start reuseport servers. */
listen_fds = start_reuseport_server(AF_INET6, SOCK_DGRAM,
"::1", 0, 0, num_listens);
if (!ASSERT_OK_PTR(listen_fds, "start_reuseport_server"))
goto cleanup;
serv_port = get_socket_local_port(listen_fds[0]);
if (!ASSERT_GE(serv_port, 0, "get_sock_local_port"))
goto cleanup;
skel->bss->serv_port = (__be16) serv_port;
/* Run iterator program that destroys server sockets. */
start_iter_sockets(skel->progs.iter_udp6_server);
for (i = 0; i < num_listens; ++i) {
n = read(listen_fds[i], buf, sizeof(buf));
if (!ASSERT_EQ(n, -1, "read") ||
!ASSERT_EQ(errno, ECONNABORTED, "error code on destroyed socket"))
break;
}
ASSERT_EQ(i, num_listens, "server socket");
cleanup:
free_fds(listen_fds, num_listens);
}
void test_sock_destroy(void)
{
struct sock_destroy_prog *skel;
struct nstoken *nstoken = NULL;
int cgroup_fd;
skel = sock_destroy_prog__open_and_load();
if (!ASSERT_OK_PTR(skel, "skel_open"))
return;
cgroup_fd = test__join_cgroup("/sock_destroy");
if (!ASSERT_GE(cgroup_fd, 0, "join_cgroup"))
goto cleanup;
skel->links.sock_connect = bpf_program__attach_cgroup(
skel->progs.sock_connect, cgroup_fd);
if (!ASSERT_OK_PTR(skel->links.sock_connect, "prog_attach"))
goto cleanup;
SYS(cleanup, "ip netns add %s", TEST_NS);
SYS(cleanup, "ip -net %s link set dev lo up", TEST_NS);
nstoken = open_netns(TEST_NS);
if (!ASSERT_OK_PTR(nstoken, "open_netns"))
goto cleanup;
if (test__start_subtest("tcp_client"))
test_tcp_client(skel);
if (test__start_subtest("tcp_server"))
test_tcp_server(skel);
if (test__start_subtest("udp_client"))
test_udp_client(skel);
if (test__start_subtest("udp_server"))
test_udp_server(skel);
RUN_TESTS(sock_destroy_prog_fail);
cleanup:
if (nstoken)
close_netns(nstoken);
SYS_NOFAIL("ip netns del " TEST_NS " &> /dev/null");
if (cgroup_fd >= 0)
close(cgroup_fd);
sock_destroy_prog__destroy(skel);
}
| linux-master | tools/testing/selftests/bpf/prog_tests/sock_destroy.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020 Facebook */
#include <uapi/linux/if_link.h>
#include <test_progs.h>
#include "test_xdp_link.skel.h"
#define IFINDEX_LO 1
void serial_test_xdp_link(void)
{
struct test_xdp_link *skel1 = NULL, *skel2 = NULL;
__u32 id1, id2, id0 = 0, prog_fd1, prog_fd2;
LIBBPF_OPTS(bpf_xdp_attach_opts, opts);
struct bpf_link_info link_info;
struct bpf_prog_info prog_info;
struct bpf_link *link;
int err;
__u32 link_info_len = sizeof(link_info);
__u32 prog_info_len = sizeof(prog_info);
skel1 = test_xdp_link__open_and_load();
if (!ASSERT_OK_PTR(skel1, "skel_load"))
goto cleanup;
prog_fd1 = bpf_program__fd(skel1->progs.xdp_handler);
skel2 = test_xdp_link__open_and_load();
if (!ASSERT_OK_PTR(skel2, "skel_load"))
goto cleanup;
prog_fd2 = bpf_program__fd(skel2->progs.xdp_handler);
memset(&prog_info, 0, sizeof(prog_info));
err = bpf_prog_get_info_by_fd(prog_fd1, &prog_info, &prog_info_len);
if (!ASSERT_OK(err, "fd_info1"))
goto cleanup;
id1 = prog_info.id;
memset(&prog_info, 0, sizeof(prog_info));
err = bpf_prog_get_info_by_fd(prog_fd2, &prog_info, &prog_info_len);
if (!ASSERT_OK(err, "fd_info2"))
goto cleanup;
id2 = prog_info.id;
/* set initial prog attachment */
err = bpf_xdp_attach(IFINDEX_LO, prog_fd1, XDP_FLAGS_REPLACE, &opts);
if (!ASSERT_OK(err, "fd_attach"))
goto cleanup;
/* validate prog ID */
err = bpf_xdp_query_id(IFINDEX_LO, 0, &id0);
if (!ASSERT_OK(err, "id1_check_err") || !ASSERT_EQ(id0, id1, "id1_check_val"))
goto cleanup;
/* BPF link is not allowed to replace prog attachment */
link = bpf_program__attach_xdp(skel1->progs.xdp_handler, IFINDEX_LO);
if (!ASSERT_ERR_PTR(link, "link_attach_should_fail")) {
bpf_link__destroy(link);
/* best-effort detach prog */
opts.old_prog_fd = prog_fd1;
bpf_xdp_detach(IFINDEX_LO, XDP_FLAGS_REPLACE, &opts);
goto cleanup;
}
/* detach BPF program */
opts.old_prog_fd = prog_fd1;
err = bpf_xdp_detach(IFINDEX_LO, XDP_FLAGS_REPLACE, &opts);
if (!ASSERT_OK(err, "prog_detach"))
goto cleanup;
/* now BPF link should attach successfully */
link = bpf_program__attach_xdp(skel1->progs.xdp_handler, IFINDEX_LO);
if (!ASSERT_OK_PTR(link, "link_attach"))
goto cleanup;
skel1->links.xdp_handler = link;
/* validate prog ID */
err = bpf_xdp_query_id(IFINDEX_LO, 0, &id0);
if (!ASSERT_OK(err, "id1_check_err") || !ASSERT_EQ(id0, id1, "id1_check_val"))
goto cleanup;
/* BPF prog attach is not allowed to replace BPF link */
opts.old_prog_fd = prog_fd1;
err = bpf_xdp_attach(IFINDEX_LO, prog_fd2, XDP_FLAGS_REPLACE, &opts);
if (!ASSERT_ERR(err, "prog_attach_fail"))
goto cleanup;
/* Can't force-update when BPF link is active */
err = bpf_xdp_attach(IFINDEX_LO, prog_fd2, 0, NULL);
if (!ASSERT_ERR(err, "prog_update_fail"))
goto cleanup;
/* Can't force-detach when BPF link is active */
err = bpf_xdp_detach(IFINDEX_LO, 0, NULL);
if (!ASSERT_ERR(err, "prog_detach_fail"))
goto cleanup;
/* BPF link is not allowed to replace another BPF link */
link = bpf_program__attach_xdp(skel2->progs.xdp_handler, IFINDEX_LO);
if (!ASSERT_ERR_PTR(link, "link_attach_should_fail")) {
bpf_link__destroy(link);
goto cleanup;
}
bpf_link__destroy(skel1->links.xdp_handler);
skel1->links.xdp_handler = NULL;
/* new link attach should succeed */
link = bpf_program__attach_xdp(skel2->progs.xdp_handler, IFINDEX_LO);
if (!ASSERT_OK_PTR(link, "link_attach"))
goto cleanup;
skel2->links.xdp_handler = link;
err = bpf_xdp_query_id(IFINDEX_LO, 0, &id0);
if (!ASSERT_OK(err, "id2_check_err") || !ASSERT_EQ(id0, id2, "id2_check_val"))
goto cleanup;
/* updating program under active BPF link works as expected */
err = bpf_link__update_program(link, skel1->progs.xdp_handler);
if (!ASSERT_OK(err, "link_upd"))
goto cleanup;
memset(&link_info, 0, sizeof(link_info));
err = bpf_link_get_info_by_fd(bpf_link__fd(link),
&link_info, &link_info_len);
if (!ASSERT_OK(err, "link_info"))
goto cleanup;
ASSERT_EQ(link_info.type, BPF_LINK_TYPE_XDP, "link_type");
ASSERT_EQ(link_info.prog_id, id1, "link_prog_id");
ASSERT_EQ(link_info.xdp.ifindex, IFINDEX_LO, "link_ifindex");
/* updating program under active BPF link with different type fails */
err = bpf_link__update_program(link, skel1->progs.tc_handler);
if (!ASSERT_ERR(err, "link_upd_invalid"))
goto cleanup;
err = bpf_link__detach(link);
if (!ASSERT_OK(err, "link_detach"))
goto cleanup;
memset(&link_info, 0, sizeof(link_info));
err = bpf_link_get_info_by_fd(bpf_link__fd(link),
&link_info, &link_info_len);
ASSERT_OK(err, "link_info");
ASSERT_EQ(link_info.prog_id, id1, "link_prog_id");
/* ifindex should be zeroed out */
ASSERT_EQ(link_info.xdp.ifindex, 0, "link_ifindex");
cleanup:
test_xdp_link__destroy(skel1);
test_xdp_link__destroy(skel2);
}
| linux-master | tools/testing/selftests/bpf/prog_tests/xdp_link.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2020 Google LLC.
*/
#include <test_progs.h>
#include <cgroup_helpers.h>
#include <network_helpers.h>
#include "progs/cg_storage_multi.h"
#include "cg_storage_multi_egress_only.skel.h"
#include "cg_storage_multi_isolated.skel.h"
#include "cg_storage_multi_shared.skel.h"
#define PARENT_CGROUP "/cgroup_storage"
#define CHILD_CGROUP "/cgroup_storage/child"
static int duration;
static bool assert_storage(struct bpf_map *map, const void *key,
struct cgroup_value *expected)
{
struct cgroup_value value;
int map_fd;
map_fd = bpf_map__fd(map);
if (CHECK(bpf_map_lookup_elem(map_fd, key, &value) < 0,
"map-lookup", "errno %d", errno))
return true;
if (CHECK(memcmp(&value, expected, sizeof(struct cgroup_value)),
"assert-storage", "storages differ"))
return true;
return false;
}
static bool assert_storage_noexist(struct bpf_map *map, const void *key)
{
struct cgroup_value value;
int map_fd;
map_fd = bpf_map__fd(map);
if (CHECK(bpf_map_lookup_elem(map_fd, key, &value) == 0,
"map-lookup", "succeeded, expected ENOENT"))
return true;
if (CHECK(errno != ENOENT,
"map-lookup", "errno %d, expected ENOENT", errno))
return true;
return false;
}
static bool connect_send(const char *cgroup_path)
{
int server_fd = -1, client_fd = -1;
char message[] = "message";
bool res = true;
if (join_cgroup(cgroup_path))
goto out_clean;
server_fd = start_server(AF_INET, SOCK_DGRAM, NULL, 0, 0);
if (server_fd < 0)
goto out_clean;
client_fd = connect_to_fd(server_fd, 0);
if (client_fd < 0)
goto out_clean;
if (send(client_fd, &message, sizeof(message), 0) < 0)
goto out_clean;
if (read(server_fd, &message, sizeof(message)) < 0)
goto out_clean;
res = false;
out_clean:
close(client_fd);
close(server_fd);
return res;
}
static void test_egress_only(int parent_cgroup_fd, int child_cgroup_fd)
{
struct cg_storage_multi_egress_only *obj;
struct cgroup_value expected_cgroup_value;
struct bpf_cgroup_storage_key key;
struct bpf_link *parent_link = NULL, *child_link = NULL;
bool err;
key.attach_type = BPF_CGROUP_INET_EGRESS;
obj = cg_storage_multi_egress_only__open_and_load();
if (CHECK(!obj, "skel-load", "errno %d", errno))
return;
/* Attach to parent cgroup, trigger packet from child.
* Assert that there is only one run and in that run the storage is
* parent cgroup's storage.
* Also assert that child cgroup's storage does not exist
*/
parent_link = bpf_program__attach_cgroup(obj->progs.egress,
parent_cgroup_fd);
if (!ASSERT_OK_PTR(parent_link, "parent-cg-attach"))
goto close_bpf_object;
err = connect_send(CHILD_CGROUP);
if (CHECK(err, "first-connect-send", "errno %d", errno))
goto close_bpf_object;
if (CHECK(obj->bss->invocations != 1,
"first-invoke", "invocations=%d", obj->bss->invocations))
goto close_bpf_object;
key.cgroup_inode_id = get_cgroup_id(PARENT_CGROUP);
expected_cgroup_value = (struct cgroup_value) { .egress_pkts = 1 };
if (assert_storage(obj->maps.cgroup_storage,
&key, &expected_cgroup_value))
goto close_bpf_object;
key.cgroup_inode_id = get_cgroup_id(CHILD_CGROUP);
if (assert_storage_noexist(obj->maps.cgroup_storage, &key))
goto close_bpf_object;
/* Attach to parent and child cgroup, trigger packet from child.
* Assert that there are two additional runs, one that run with parent
* cgroup's storage and one with child cgroup's storage.
*/
child_link = bpf_program__attach_cgroup(obj->progs.egress,
child_cgroup_fd);
if (!ASSERT_OK_PTR(child_link, "child-cg-attach"))
goto close_bpf_object;
err = connect_send(CHILD_CGROUP);
if (CHECK(err, "second-connect-send", "errno %d", errno))
goto close_bpf_object;
if (CHECK(obj->bss->invocations != 3,
"second-invoke", "invocations=%d", obj->bss->invocations))
goto close_bpf_object;
key.cgroup_inode_id = get_cgroup_id(PARENT_CGROUP);
expected_cgroup_value = (struct cgroup_value) { .egress_pkts = 2 };
if (assert_storage(obj->maps.cgroup_storage,
&key, &expected_cgroup_value))
goto close_bpf_object;
key.cgroup_inode_id = get_cgroup_id(CHILD_CGROUP);
expected_cgroup_value = (struct cgroup_value) { .egress_pkts = 1 };
if (assert_storage(obj->maps.cgroup_storage,
&key, &expected_cgroup_value))
goto close_bpf_object;
close_bpf_object:
bpf_link__destroy(parent_link);
bpf_link__destroy(child_link);
cg_storage_multi_egress_only__destroy(obj);
}
static void test_isolated(int parent_cgroup_fd, int child_cgroup_fd)
{
struct cg_storage_multi_isolated *obj;
struct cgroup_value expected_cgroup_value;
struct bpf_cgroup_storage_key key;
struct bpf_link *parent_egress1_link = NULL, *parent_egress2_link = NULL;
struct bpf_link *child_egress1_link = NULL, *child_egress2_link = NULL;
struct bpf_link *parent_ingress_link = NULL, *child_ingress_link = NULL;
bool err;
obj = cg_storage_multi_isolated__open_and_load();
if (CHECK(!obj, "skel-load", "errno %d", errno))
return;
/* Attach to parent cgroup, trigger packet from child.
* Assert that there is three runs, two with parent cgroup egress and
* one with parent cgroup ingress, stored in separate parent storages.
* Also assert that child cgroup's storages does not exist
*/
parent_egress1_link = bpf_program__attach_cgroup(obj->progs.egress1,
parent_cgroup_fd);
if (!ASSERT_OK_PTR(parent_egress1_link, "parent-egress1-cg-attach"))
goto close_bpf_object;
parent_egress2_link = bpf_program__attach_cgroup(obj->progs.egress2,
parent_cgroup_fd);
if (!ASSERT_OK_PTR(parent_egress2_link, "parent-egress2-cg-attach"))
goto close_bpf_object;
parent_ingress_link = bpf_program__attach_cgroup(obj->progs.ingress,
parent_cgroup_fd);
if (!ASSERT_OK_PTR(parent_ingress_link, "parent-ingress-cg-attach"))
goto close_bpf_object;
err = connect_send(CHILD_CGROUP);
if (CHECK(err, "first-connect-send", "errno %d", errno))
goto close_bpf_object;
if (CHECK(obj->bss->invocations != 3,
"first-invoke", "invocations=%d", obj->bss->invocations))
goto close_bpf_object;
key.cgroup_inode_id = get_cgroup_id(PARENT_CGROUP);
key.attach_type = BPF_CGROUP_INET_EGRESS;
expected_cgroup_value = (struct cgroup_value) { .egress_pkts = 2 };
if (assert_storage(obj->maps.cgroup_storage,
&key, &expected_cgroup_value))
goto close_bpf_object;
key.attach_type = BPF_CGROUP_INET_INGRESS;
expected_cgroup_value = (struct cgroup_value) { .ingress_pkts = 1 };
if (assert_storage(obj->maps.cgroup_storage,
&key, &expected_cgroup_value))
goto close_bpf_object;
key.cgroup_inode_id = get_cgroup_id(CHILD_CGROUP);
key.attach_type = BPF_CGROUP_INET_EGRESS;
if (assert_storage_noexist(obj->maps.cgroup_storage, &key))
goto close_bpf_object;
key.attach_type = BPF_CGROUP_INET_INGRESS;
if (assert_storage_noexist(obj->maps.cgroup_storage, &key))
goto close_bpf_object;
/* Attach to parent and child cgroup, trigger packet from child.
* Assert that there is six additional runs, parent cgroup egresses and
* ingress, child cgroup egresses and ingress.
* Assert that egree and ingress storages are separate.
*/
child_egress1_link = bpf_program__attach_cgroup(obj->progs.egress1,
child_cgroup_fd);
if (!ASSERT_OK_PTR(child_egress1_link, "child-egress1-cg-attach"))
goto close_bpf_object;
child_egress2_link = bpf_program__attach_cgroup(obj->progs.egress2,
child_cgroup_fd);
if (!ASSERT_OK_PTR(child_egress2_link, "child-egress2-cg-attach"))
goto close_bpf_object;
child_ingress_link = bpf_program__attach_cgroup(obj->progs.ingress,
child_cgroup_fd);
if (!ASSERT_OK_PTR(child_ingress_link, "child-ingress-cg-attach"))
goto close_bpf_object;
err = connect_send(CHILD_CGROUP);
if (CHECK(err, "second-connect-send", "errno %d", errno))
goto close_bpf_object;
if (CHECK(obj->bss->invocations != 9,
"second-invoke", "invocations=%d", obj->bss->invocations))
goto close_bpf_object;
key.cgroup_inode_id = get_cgroup_id(PARENT_CGROUP);
key.attach_type = BPF_CGROUP_INET_EGRESS;
expected_cgroup_value = (struct cgroup_value) { .egress_pkts = 4 };
if (assert_storage(obj->maps.cgroup_storage,
&key, &expected_cgroup_value))
goto close_bpf_object;
key.attach_type = BPF_CGROUP_INET_INGRESS;
expected_cgroup_value = (struct cgroup_value) { .ingress_pkts = 2 };
if (assert_storage(obj->maps.cgroup_storage,
&key, &expected_cgroup_value))
goto close_bpf_object;
key.cgroup_inode_id = get_cgroup_id(CHILD_CGROUP);
key.attach_type = BPF_CGROUP_INET_EGRESS;
expected_cgroup_value = (struct cgroup_value) { .egress_pkts = 2 };
if (assert_storage(obj->maps.cgroup_storage,
&key, &expected_cgroup_value))
goto close_bpf_object;
key.attach_type = BPF_CGROUP_INET_INGRESS;
expected_cgroup_value = (struct cgroup_value) { .ingress_pkts = 1 };
if (assert_storage(obj->maps.cgroup_storage,
&key, &expected_cgroup_value))
goto close_bpf_object;
close_bpf_object:
bpf_link__destroy(parent_egress1_link);
bpf_link__destroy(parent_egress2_link);
bpf_link__destroy(parent_ingress_link);
bpf_link__destroy(child_egress1_link);
bpf_link__destroy(child_egress2_link);
bpf_link__destroy(child_ingress_link);
cg_storage_multi_isolated__destroy(obj);
}
static void test_shared(int parent_cgroup_fd, int child_cgroup_fd)
{
struct cg_storage_multi_shared *obj;
struct cgroup_value expected_cgroup_value;
__u64 key;
struct bpf_link *parent_egress1_link = NULL, *parent_egress2_link = NULL;
struct bpf_link *child_egress1_link = NULL, *child_egress2_link = NULL;
struct bpf_link *parent_ingress_link = NULL, *child_ingress_link = NULL;
bool err;
obj = cg_storage_multi_shared__open_and_load();
if (CHECK(!obj, "skel-load", "errno %d", errno))
return;
/* Attach to parent cgroup, trigger packet from child.
* Assert that there is three runs, two with parent cgroup egress and
* one with parent cgroup ingress.
* Also assert that child cgroup's storage does not exist
*/
parent_egress1_link = bpf_program__attach_cgroup(obj->progs.egress1,
parent_cgroup_fd);
if (!ASSERT_OK_PTR(parent_egress1_link, "parent-egress1-cg-attach"))
goto close_bpf_object;
parent_egress2_link = bpf_program__attach_cgroup(obj->progs.egress2,
parent_cgroup_fd);
if (!ASSERT_OK_PTR(parent_egress2_link, "parent-egress2-cg-attach"))
goto close_bpf_object;
parent_ingress_link = bpf_program__attach_cgroup(obj->progs.ingress,
parent_cgroup_fd);
if (!ASSERT_OK_PTR(parent_ingress_link, "parent-ingress-cg-attach"))
goto close_bpf_object;
err = connect_send(CHILD_CGROUP);
if (CHECK(err, "first-connect-send", "errno %d", errno))
goto close_bpf_object;
if (CHECK(obj->bss->invocations != 3,
"first-invoke", "invocations=%d", obj->bss->invocations))
goto close_bpf_object;
key = get_cgroup_id(PARENT_CGROUP);
expected_cgroup_value = (struct cgroup_value) {
.egress_pkts = 2,
.ingress_pkts = 1,
};
if (assert_storage(obj->maps.cgroup_storage,
&key, &expected_cgroup_value))
goto close_bpf_object;
key = get_cgroup_id(CHILD_CGROUP);
if (assert_storage_noexist(obj->maps.cgroup_storage, &key))
goto close_bpf_object;
/* Attach to parent and child cgroup, trigger packet from child.
* Assert that there is six additional runs, parent cgroup egresses and
* ingress, child cgroup egresses and ingress.
*/
child_egress1_link = bpf_program__attach_cgroup(obj->progs.egress1,
child_cgroup_fd);
if (!ASSERT_OK_PTR(child_egress1_link, "child-egress1-cg-attach"))
goto close_bpf_object;
child_egress2_link = bpf_program__attach_cgroup(obj->progs.egress2,
child_cgroup_fd);
if (!ASSERT_OK_PTR(child_egress2_link, "child-egress2-cg-attach"))
goto close_bpf_object;
child_ingress_link = bpf_program__attach_cgroup(obj->progs.ingress,
child_cgroup_fd);
if (!ASSERT_OK_PTR(child_ingress_link, "child-ingress-cg-attach"))
goto close_bpf_object;
err = connect_send(CHILD_CGROUP);
if (CHECK(err, "second-connect-send", "errno %d", errno))
goto close_bpf_object;
if (CHECK(obj->bss->invocations != 9,
"second-invoke", "invocations=%d", obj->bss->invocations))
goto close_bpf_object;
key = get_cgroup_id(PARENT_CGROUP);
expected_cgroup_value = (struct cgroup_value) {
.egress_pkts = 4,
.ingress_pkts = 2,
};
if (assert_storage(obj->maps.cgroup_storage,
&key, &expected_cgroup_value))
goto close_bpf_object;
key = get_cgroup_id(CHILD_CGROUP);
expected_cgroup_value = (struct cgroup_value) {
.egress_pkts = 2,
.ingress_pkts = 1,
};
if (assert_storage(obj->maps.cgroup_storage,
&key, &expected_cgroup_value))
goto close_bpf_object;
close_bpf_object:
bpf_link__destroy(parent_egress1_link);
bpf_link__destroy(parent_egress2_link);
bpf_link__destroy(parent_ingress_link);
bpf_link__destroy(child_egress1_link);
bpf_link__destroy(child_egress2_link);
bpf_link__destroy(child_ingress_link);
cg_storage_multi_shared__destroy(obj);
}
void serial_test_cg_storage_multi(void)
{
int parent_cgroup_fd = -1, child_cgroup_fd = -1;
parent_cgroup_fd = test__join_cgroup(PARENT_CGROUP);
if (CHECK(parent_cgroup_fd < 0, "cg-create-parent", "errno %d", errno))
goto close_cgroup_fd;
child_cgroup_fd = create_and_get_cgroup(CHILD_CGROUP);
if (CHECK(child_cgroup_fd < 0, "cg-create-child", "errno %d", errno))
goto close_cgroup_fd;
if (test__start_subtest("egress_only"))
test_egress_only(parent_cgroup_fd, child_cgroup_fd);
if (test__start_subtest("isolated"))
test_isolated(parent_cgroup_fd, child_cgroup_fd);
if (test__start_subtest("shared"))
test_shared(parent_cgroup_fd, child_cgroup_fd);
close_cgroup_fd:
close(child_cgroup_fd);
close(parent_cgroup_fd);
}
| linux-master | tools/testing/selftests/bpf/prog_tests/cg_storage_multi.c |
// SPDX-License-Identifier: GPL-2.0
#include <test_progs.h>
#include <network_helpers.h>
#include "xdp_metadata.skel.h"
#include "xdp_metadata2.skel.h"
#include "xdp_metadata.h"
#include "xsk.h"
#include <bpf/btf.h>
#include <linux/errqueue.h>
#include <linux/if_link.h>
#include <linux/net_tstamp.h>
#include <linux/udp.h>
#include <sys/mman.h>
#include <net/if.h>
#include <poll.h>
#define TX_NAME "veTX"
#define RX_NAME "veRX"
#define UDP_PAYLOAD_BYTES 4
#define AF_XDP_SOURCE_PORT 1234
#define AF_XDP_CONSUMER_PORT 8080
#define UMEM_NUM 16
#define UMEM_FRAME_SIZE XSK_UMEM__DEFAULT_FRAME_SIZE
#define UMEM_SIZE (UMEM_FRAME_SIZE * UMEM_NUM)
#define XDP_FLAGS XDP_FLAGS_DRV_MODE
#define QUEUE_ID 0
#define TX_ADDR "10.0.0.1"
#define RX_ADDR "10.0.0.2"
#define PREFIX_LEN "8"
#define FAMILY AF_INET
struct xsk {
void *umem_area;
struct xsk_umem *umem;
struct xsk_ring_prod fill;
struct xsk_ring_cons comp;
struct xsk_ring_prod tx;
struct xsk_ring_cons rx;
struct xsk_socket *socket;
};
static int open_xsk(int ifindex, struct xsk *xsk)
{
int mmap_flags = MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE;
const struct xsk_socket_config socket_config = {
.rx_size = XSK_RING_PROD__DEFAULT_NUM_DESCS,
.tx_size = XSK_RING_PROD__DEFAULT_NUM_DESCS,
.bind_flags = XDP_COPY,
};
const struct xsk_umem_config umem_config = {
.fill_size = XSK_RING_PROD__DEFAULT_NUM_DESCS,
.comp_size = XSK_RING_CONS__DEFAULT_NUM_DESCS,
.frame_size = XSK_UMEM__DEFAULT_FRAME_SIZE,
.flags = XDP_UMEM_UNALIGNED_CHUNK_FLAG,
};
__u32 idx;
u64 addr;
int ret;
int i;
xsk->umem_area = mmap(NULL, UMEM_SIZE, PROT_READ | PROT_WRITE, mmap_flags, -1, 0);
if (!ASSERT_NEQ(xsk->umem_area, MAP_FAILED, "mmap"))
return -1;
ret = xsk_umem__create(&xsk->umem,
xsk->umem_area, UMEM_SIZE,
&xsk->fill,
&xsk->comp,
&umem_config);
if (!ASSERT_OK(ret, "xsk_umem__create"))
return ret;
ret = xsk_socket__create(&xsk->socket, ifindex, QUEUE_ID,
xsk->umem,
&xsk->rx,
&xsk->tx,
&socket_config);
if (!ASSERT_OK(ret, "xsk_socket__create"))
return ret;
/* First half of umem is for TX. This way address matches 1-to-1
* to the completion queue index.
*/
for (i = 0; i < UMEM_NUM / 2; i++) {
addr = i * UMEM_FRAME_SIZE;
printf("%p: tx_desc[%d] -> %lx\n", xsk, i, addr);
}
/* Second half of umem is for RX. */
ret = xsk_ring_prod__reserve(&xsk->fill, UMEM_NUM / 2, &idx);
if (!ASSERT_EQ(UMEM_NUM / 2, ret, "xsk_ring_prod__reserve"))
return ret;
if (!ASSERT_EQ(idx, 0, "fill idx != 0"))
return -1;
for (i = 0; i < UMEM_NUM / 2; i++) {
addr = (UMEM_NUM / 2 + i) * UMEM_FRAME_SIZE;
printf("%p: rx_desc[%d] -> %lx\n", xsk, i, addr);
*xsk_ring_prod__fill_addr(&xsk->fill, i) = addr;
}
xsk_ring_prod__submit(&xsk->fill, ret);
return 0;
}
static void close_xsk(struct xsk *xsk)
{
if (xsk->umem)
xsk_umem__delete(xsk->umem);
if (xsk->socket)
xsk_socket__delete(xsk->socket);
munmap(xsk->umem_area, UMEM_SIZE);
}
static void ip_csum(struct iphdr *iph)
{
__u32 sum = 0;
__u16 *p;
int i;
iph->check = 0;
p = (void *)iph;
for (i = 0; i < sizeof(*iph) / sizeof(*p); i++)
sum += p[i];
while (sum >> 16)
sum = (sum & 0xffff) + (sum >> 16);
iph->check = ~sum;
}
static int generate_packet(struct xsk *xsk, __u16 dst_port)
{
struct xdp_desc *tx_desc;
struct udphdr *udph;
struct ethhdr *eth;
struct iphdr *iph;
void *data;
__u32 idx;
int ret;
ret = xsk_ring_prod__reserve(&xsk->tx, 1, &idx);
if (!ASSERT_EQ(ret, 1, "xsk_ring_prod__reserve"))
return -1;
tx_desc = xsk_ring_prod__tx_desc(&xsk->tx, idx);
tx_desc->addr = idx % (UMEM_NUM / 2) * UMEM_FRAME_SIZE;
printf("%p: tx_desc[%u]->addr=%llx\n", xsk, idx, tx_desc->addr);
data = xsk_umem__get_data(xsk->umem_area, tx_desc->addr);
eth = data;
iph = (void *)(eth + 1);
udph = (void *)(iph + 1);
memcpy(eth->h_dest, "\x00\x00\x00\x00\x00\x02", ETH_ALEN);
memcpy(eth->h_source, "\x00\x00\x00\x00\x00\x01", ETH_ALEN);
eth->h_proto = htons(ETH_P_IP);
iph->version = 0x4;
iph->ihl = 0x5;
iph->tos = 0x9;
iph->tot_len = htons(sizeof(*iph) + sizeof(*udph) + UDP_PAYLOAD_BYTES);
iph->id = 0;
iph->frag_off = 0;
iph->ttl = 0;
iph->protocol = IPPROTO_UDP;
ASSERT_EQ(inet_pton(FAMILY, TX_ADDR, &iph->saddr), 1, "inet_pton(TX_ADDR)");
ASSERT_EQ(inet_pton(FAMILY, RX_ADDR, &iph->daddr), 1, "inet_pton(RX_ADDR)");
ip_csum(iph);
udph->source = htons(AF_XDP_SOURCE_PORT);
udph->dest = htons(dst_port);
udph->len = htons(sizeof(*udph) + UDP_PAYLOAD_BYTES);
udph->check = 0;
memset(udph + 1, 0xAA, UDP_PAYLOAD_BYTES);
tx_desc->len = sizeof(*eth) + sizeof(*iph) + sizeof(*udph) + UDP_PAYLOAD_BYTES;
xsk_ring_prod__submit(&xsk->tx, 1);
ret = sendto(xsk_socket__fd(xsk->socket), NULL, 0, MSG_DONTWAIT, NULL, 0);
if (!ASSERT_GE(ret, 0, "sendto"))
return ret;
return 0;
}
static void complete_tx(struct xsk *xsk)
{
__u32 idx;
__u64 addr;
if (ASSERT_EQ(xsk_ring_cons__peek(&xsk->comp, 1, &idx), 1, "xsk_ring_cons__peek")) {
addr = *xsk_ring_cons__comp_addr(&xsk->comp, idx);
printf("%p: complete tx idx=%u addr=%llx\n", xsk, idx, addr);
xsk_ring_cons__release(&xsk->comp, 1);
}
}
static void refill_rx(struct xsk *xsk, __u64 addr)
{
__u32 idx;
if (ASSERT_EQ(xsk_ring_prod__reserve(&xsk->fill, 1, &idx), 1, "xsk_ring_prod__reserve")) {
printf("%p: complete idx=%u addr=%llx\n", xsk, idx, addr);
*xsk_ring_prod__fill_addr(&xsk->fill, idx) = addr;
xsk_ring_prod__submit(&xsk->fill, 1);
}
}
static int verify_xsk_metadata(struct xsk *xsk)
{
const struct xdp_desc *rx_desc;
struct pollfd fds = {};
struct xdp_meta *meta;
struct ethhdr *eth;
struct iphdr *iph;
__u64 comp_addr;
void *data;
__u64 addr;
__u32 idx;
int ret;
ret = recvfrom(xsk_socket__fd(xsk->socket), NULL, 0, MSG_DONTWAIT, NULL, NULL);
if (!ASSERT_EQ(ret, 0, "recvfrom"))
return -1;
fds.fd = xsk_socket__fd(xsk->socket);
fds.events = POLLIN;
ret = poll(&fds, 1, 1000);
if (!ASSERT_GT(ret, 0, "poll"))
return -1;
ret = xsk_ring_cons__peek(&xsk->rx, 1, &idx);
if (!ASSERT_EQ(ret, 1, "xsk_ring_cons__peek"))
return -2;
rx_desc = xsk_ring_cons__rx_desc(&xsk->rx, idx);
comp_addr = xsk_umem__extract_addr(rx_desc->addr);
addr = xsk_umem__add_offset_to_addr(rx_desc->addr);
printf("%p: rx_desc[%u]->addr=%llx addr=%llx comp_addr=%llx\n",
xsk, idx, rx_desc->addr, addr, comp_addr);
data = xsk_umem__get_data(xsk->umem_area, addr);
/* Make sure we got the packet offset correctly. */
eth = data;
ASSERT_EQ(eth->h_proto, htons(ETH_P_IP), "eth->h_proto");
iph = (void *)(eth + 1);
ASSERT_EQ((int)iph->version, 4, "iph->version");
/* custom metadata */
meta = data - sizeof(struct xdp_meta);
if (!ASSERT_NEQ(meta->rx_timestamp, 0, "rx_timestamp"))
return -1;
if (!ASSERT_NEQ(meta->rx_hash, 0, "rx_hash"))
return -1;
ASSERT_EQ(meta->rx_hash_type, 0, "rx_hash_type");
xsk_ring_cons__release(&xsk->rx, 1);
refill_rx(xsk, comp_addr);
return 0;
}
void test_xdp_metadata(void)
{
struct xdp_metadata2 *bpf_obj2 = NULL;
struct xdp_metadata *bpf_obj = NULL;
struct bpf_program *new_prog, *prog;
struct nstoken *tok = NULL;
__u32 queue_id = QUEUE_ID;
struct bpf_map *prog_arr;
struct xsk tx_xsk = {};
struct xsk rx_xsk = {};
__u32 val, key = 0;
int retries = 10;
int rx_ifindex;
int tx_ifindex;
int sock_fd;
int ret;
/* Setup new networking namespace, with a veth pair. */
SYS(out, "ip netns add xdp_metadata");
tok = open_netns("xdp_metadata");
SYS(out, "ip link add numtxqueues 1 numrxqueues 1 " TX_NAME
" type veth peer " RX_NAME " numtxqueues 1 numrxqueues 1");
SYS(out, "ip link set dev " TX_NAME " address 00:00:00:00:00:01");
SYS(out, "ip link set dev " RX_NAME " address 00:00:00:00:00:02");
SYS(out, "ip link set dev " TX_NAME " up");
SYS(out, "ip link set dev " RX_NAME " up");
SYS(out, "ip addr add " TX_ADDR "/" PREFIX_LEN " dev " TX_NAME);
SYS(out, "ip addr add " RX_ADDR "/" PREFIX_LEN " dev " RX_NAME);
rx_ifindex = if_nametoindex(RX_NAME);
tx_ifindex = if_nametoindex(TX_NAME);
/* Setup separate AF_XDP for TX and RX interfaces. */
ret = open_xsk(tx_ifindex, &tx_xsk);
if (!ASSERT_OK(ret, "open_xsk(TX_NAME)"))
goto out;
ret = open_xsk(rx_ifindex, &rx_xsk);
if (!ASSERT_OK(ret, "open_xsk(RX_NAME)"))
goto out;
bpf_obj = xdp_metadata__open();
if (!ASSERT_OK_PTR(bpf_obj, "open skeleton"))
goto out;
prog = bpf_object__find_program_by_name(bpf_obj->obj, "rx");
bpf_program__set_ifindex(prog, rx_ifindex);
bpf_program__set_flags(prog, BPF_F_XDP_DEV_BOUND_ONLY);
if (!ASSERT_OK(xdp_metadata__load(bpf_obj), "load skeleton"))
goto out;
/* Make sure we can't add dev-bound programs to prog maps. */
prog_arr = bpf_object__find_map_by_name(bpf_obj->obj, "prog_arr");
if (!ASSERT_OK_PTR(prog_arr, "no prog_arr map"))
goto out;
val = bpf_program__fd(prog);
if (!ASSERT_ERR(bpf_map__update_elem(prog_arr, &key, sizeof(key),
&val, sizeof(val), BPF_ANY),
"update prog_arr"))
goto out;
/* Attach BPF program to RX interface. */
ret = bpf_xdp_attach(rx_ifindex,
bpf_program__fd(bpf_obj->progs.rx),
XDP_FLAGS, NULL);
if (!ASSERT_GE(ret, 0, "bpf_xdp_attach"))
goto out;
sock_fd = xsk_socket__fd(rx_xsk.socket);
ret = bpf_map_update_elem(bpf_map__fd(bpf_obj->maps.xsk), &queue_id, &sock_fd, 0);
if (!ASSERT_GE(ret, 0, "bpf_map_update_elem"))
goto out;
/* Send packet destined to RX AF_XDP socket. */
if (!ASSERT_GE(generate_packet(&tx_xsk, AF_XDP_CONSUMER_PORT), 0,
"generate AF_XDP_CONSUMER_PORT"))
goto out;
/* Verify AF_XDP RX packet has proper metadata. */
if (!ASSERT_GE(verify_xsk_metadata(&rx_xsk), 0,
"verify_xsk_metadata"))
goto out;
complete_tx(&tx_xsk);
/* Make sure freplace correctly picks up original bound device
* and doesn't crash.
*/
bpf_obj2 = xdp_metadata2__open();
if (!ASSERT_OK_PTR(bpf_obj2, "open skeleton"))
goto out;
new_prog = bpf_object__find_program_by_name(bpf_obj2->obj, "freplace_rx");
bpf_program__set_attach_target(new_prog, bpf_program__fd(prog), "rx");
if (!ASSERT_OK(xdp_metadata2__load(bpf_obj2), "load freplace skeleton"))
goto out;
if (!ASSERT_OK(xdp_metadata2__attach(bpf_obj2), "attach freplace"))
goto out;
/* Send packet to trigger . */
if (!ASSERT_GE(generate_packet(&tx_xsk, AF_XDP_CONSUMER_PORT), 0,
"generate freplace packet"))
goto out;
while (!retries--) {
if (bpf_obj2->bss->called)
break;
usleep(10);
}
ASSERT_GT(bpf_obj2->bss->called, 0, "not called");
out:
close_xsk(&rx_xsk);
close_xsk(&tx_xsk);
xdp_metadata2__destroy(bpf_obj2);
xdp_metadata__destroy(bpf_obj);
if (tok)
close_netns(tok);
SYS_NOFAIL("ip netns del xdp_metadata");
}
| linux-master | tools/testing/selftests/bpf/prog_tests/xdp_metadata.c |
// SPDX-License-Identifier: GPL-2.0
#include <test_progs.h>
void test_xdp_perf(void)
{
const char *file = "./xdp_dummy.bpf.o";
struct bpf_object *obj;
char in[128], out[128];
int err, prog_fd;
LIBBPF_OPTS(bpf_test_run_opts, topts,
.data_in = in,
.data_size_in = sizeof(in),
.data_out = out,
.data_size_out = sizeof(out),
.repeat = 1000000,
);
err = bpf_prog_test_load(file, BPF_PROG_TYPE_XDP, &obj, &prog_fd);
if (CHECK_FAIL(err))
return;
err = bpf_prog_test_run_opts(prog_fd, &topts);
ASSERT_OK(err, "test_run");
ASSERT_EQ(topts.retval, XDP_PASS, "test_run retval");
ASSERT_EQ(topts.data_size_out, 128, "test_run data_size_out");
bpf_object__close(obj);
}
| linux-master | tools/testing/selftests/bpf/prog_tests/xdp_perf.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
#include <test_progs.h>
#include <network_helpers.h>
#include "local_kptr_stash.skel.h"
#include "local_kptr_stash_fail.skel.h"
static void test_local_kptr_stash_simple(void)
{
LIBBPF_OPTS(bpf_test_run_opts, opts,
.data_in = &pkt_v4,
.data_size_in = sizeof(pkt_v4),
.repeat = 1,
);
struct local_kptr_stash *skel;
int ret;
skel = local_kptr_stash__open_and_load();
if (!ASSERT_OK_PTR(skel, "local_kptr_stash__open_and_load"))
return;
ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.stash_rb_nodes), &opts);
ASSERT_OK(ret, "local_kptr_stash_add_nodes run");
ASSERT_OK(opts.retval, "local_kptr_stash_add_nodes retval");
local_kptr_stash__destroy(skel);
}
static void test_local_kptr_stash_plain(void)
{
LIBBPF_OPTS(bpf_test_run_opts, opts,
.data_in = &pkt_v4,
.data_size_in = sizeof(pkt_v4),
.repeat = 1,
);
struct local_kptr_stash *skel;
int ret;
skel = local_kptr_stash__open_and_load();
if (!ASSERT_OK_PTR(skel, "local_kptr_stash__open_and_load"))
return;
ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.stash_plain), &opts);
ASSERT_OK(ret, "local_kptr_stash_add_plain run");
ASSERT_OK(opts.retval, "local_kptr_stash_add_plain retval");
local_kptr_stash__destroy(skel);
}
static void test_local_kptr_stash_unstash(void)
{
LIBBPF_OPTS(bpf_test_run_opts, opts,
.data_in = &pkt_v4,
.data_size_in = sizeof(pkt_v4),
.repeat = 1,
);
struct local_kptr_stash *skel;
int ret;
skel = local_kptr_stash__open_and_load();
if (!ASSERT_OK_PTR(skel, "local_kptr_stash__open_and_load"))
return;
ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.stash_rb_nodes), &opts);
ASSERT_OK(ret, "local_kptr_stash_add_nodes run");
ASSERT_OK(opts.retval, "local_kptr_stash_add_nodes retval");
ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.unstash_rb_node), &opts);
ASSERT_OK(ret, "local_kptr_stash_add_nodes run");
ASSERT_EQ(opts.retval, 42, "local_kptr_stash_add_nodes retval");
local_kptr_stash__destroy(skel);
}
static void test_local_kptr_stash_fail(void)
{
RUN_TESTS(local_kptr_stash_fail);
}
void test_local_kptr_stash(void)
{
if (test__start_subtest("local_kptr_stash_simple"))
test_local_kptr_stash_simple();
if (test__start_subtest("local_kptr_stash_plain"))
test_local_kptr_stash_plain();
if (test__start_subtest("local_kptr_stash_unstash"))
test_local_kptr_stash_unstash();
if (test__start_subtest("local_kptr_stash_fail"))
test_local_kptr_stash_fail();
}
| linux-master | tools/testing/selftests/bpf/prog_tests/local_kptr_stash.c |
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2020 Cloudflare
/*
* Tests for sockmap/sockhash holding kTLS sockets.
*/
#include <netinet/tcp.h>
#include "test_progs.h"
#define MAX_TEST_NAME 80
#define TCP_ULP 31
static int tcp_server(int family)
{
int err, s;
s = socket(family, SOCK_STREAM, 0);
if (!ASSERT_GE(s, 0, "socket"))
return -1;
err = listen(s, SOMAXCONN);
if (!ASSERT_OK(err, "listen"))
return -1;
return s;
}
static int disconnect(int fd)
{
struct sockaddr unspec = { AF_UNSPEC };
return connect(fd, &unspec, sizeof(unspec));
}
/* Disconnect (unhash) a kTLS socket after removing it from sockmap. */
static void test_sockmap_ktls_disconnect_after_delete(int family, int map)
{
struct sockaddr_storage addr = {0};
socklen_t len = sizeof(addr);
int err, cli, srv, zero = 0;
srv = tcp_server(family);
if (srv == -1)
return;
err = getsockname(srv, (struct sockaddr *)&addr, &len);
if (!ASSERT_OK(err, "getsockopt"))
goto close_srv;
cli = socket(family, SOCK_STREAM, 0);
if (!ASSERT_GE(cli, 0, "socket"))
goto close_srv;
err = connect(cli, (struct sockaddr *)&addr, len);
if (!ASSERT_OK(err, "connect"))
goto close_cli;
err = bpf_map_update_elem(map, &zero, &cli, 0);
if (!ASSERT_OK(err, "bpf_map_update_elem"))
goto close_cli;
err = setsockopt(cli, IPPROTO_TCP, TCP_ULP, "tls", strlen("tls"));
if (!ASSERT_OK(err, "setsockopt(TCP_ULP)"))
goto close_cli;
err = bpf_map_delete_elem(map, &zero);
if (!ASSERT_OK(err, "bpf_map_delete_elem"))
goto close_cli;
err = disconnect(cli);
ASSERT_OK(err, "disconnect");
close_cli:
close(cli);
close_srv:
close(srv);
}
static void test_sockmap_ktls_update_fails_when_sock_has_ulp(int family, int map)
{
struct sockaddr_storage addr = {};
socklen_t len = sizeof(addr);
struct sockaddr_in6 *v6;
struct sockaddr_in *v4;
int err, s, zero = 0;
switch (family) {
case AF_INET:
v4 = (struct sockaddr_in *)&addr;
v4->sin_family = AF_INET;
break;
case AF_INET6:
v6 = (struct sockaddr_in6 *)&addr;
v6->sin6_family = AF_INET6;
break;
default:
PRINT_FAIL("unsupported socket family %d", family);
return;
}
s = socket(family, SOCK_STREAM, 0);
if (!ASSERT_GE(s, 0, "socket"))
return;
err = bind(s, (struct sockaddr *)&addr, len);
if (!ASSERT_OK(err, "bind"))
goto close;
err = getsockname(s, (struct sockaddr *)&addr, &len);
if (!ASSERT_OK(err, "getsockname"))
goto close;
err = connect(s, (struct sockaddr *)&addr, len);
if (!ASSERT_OK(err, "connect"))
goto close;
/* save sk->sk_prot and set it to tls_prots */
err = setsockopt(s, IPPROTO_TCP, TCP_ULP, "tls", strlen("tls"));
if (!ASSERT_OK(err, "setsockopt(TCP_ULP)"))
goto close;
/* sockmap update should not affect saved sk_prot */
err = bpf_map_update_elem(map, &zero, &s, BPF_ANY);
if (!ASSERT_ERR(err, "sockmap update elem"))
goto close;
/* call sk->sk_prot->setsockopt to dispatch to saved sk_prot */
err = setsockopt(s, IPPROTO_TCP, TCP_NODELAY, &zero, sizeof(zero));
ASSERT_OK(err, "setsockopt(TCP_NODELAY)");
close:
close(s);
}
static const char *fmt_test_name(const char *subtest_name, int family,
enum bpf_map_type map_type)
{
const char *map_type_str = BPF_MAP_TYPE_SOCKMAP ? "SOCKMAP" : "SOCKHASH";
const char *family_str = AF_INET ? "IPv4" : "IPv6";
static char test_name[MAX_TEST_NAME];
snprintf(test_name, MAX_TEST_NAME,
"sockmap_ktls %s %s %s",
subtest_name, family_str, map_type_str);
return test_name;
}
static void run_tests(int family, enum bpf_map_type map_type)
{
int map;
map = bpf_map_create(map_type, NULL, sizeof(int), sizeof(int), 1, NULL);
if (!ASSERT_GE(map, 0, "bpf_map_create"))
return;
if (test__start_subtest(fmt_test_name("disconnect_after_delete", family, map_type)))
test_sockmap_ktls_disconnect_after_delete(family, map);
if (test__start_subtest(fmt_test_name("update_fails_when_sock_has_ulp", family, map_type)))
test_sockmap_ktls_update_fails_when_sock_has_ulp(family, map);
close(map);
}
void test_sockmap_ktls(void)
{
run_tests(AF_INET, BPF_MAP_TYPE_SOCKMAP);
run_tests(AF_INET, BPF_MAP_TYPE_SOCKHASH);
run_tests(AF_INET6, BPF_MAP_TYPE_SOCKMAP);
run_tests(AF_INET6, BPF_MAP_TYPE_SOCKHASH);
}
| linux-master | tools/testing/selftests/bpf/prog_tests/sockmap_ktls.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2019 Facebook */
#include <test_progs.h>
#include "test_static_linked.skel.h"
void test_static_linked(void)
{
int err;
struct test_static_linked* skel;
skel = test_static_linked__open();
if (!ASSERT_OK_PTR(skel, "skel_open"))
return;
skel->rodata->rovar1 = 1;
skel->rodata->rovar2 = 4;
err = test_static_linked__load(skel);
if (!ASSERT_OK(err, "skel_load"))
goto cleanup;
err = test_static_linked__attach(skel);
if (!ASSERT_OK(err, "skel_attach"))
goto cleanup;
/* trigger */
usleep(1);
ASSERT_EQ(skel->data->var1, 1 * 2 + 2 + 3, "var1");
ASSERT_EQ(skel->data->var2, 4 * 3 + 5 + 6, "var2");
cleanup:
test_static_linked__destroy(skel);
}
| linux-master | tools/testing/selftests/bpf/prog_tests/static_linked.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/if_link.h>
#include <test_progs.h>
#define IFINDEX_LO 1
void serial_test_xdp_info(void)
{
__u32 len = sizeof(struct bpf_prog_info), duration = 0, prog_id;
const char *file = "./xdp_dummy.bpf.o";
LIBBPF_OPTS(bpf_xdp_query_opts, opts);
struct bpf_prog_info info = {};
struct bpf_object *obj;
int err, prog_fd;
/* Get prog_id for XDP_ATTACHED_NONE mode */
err = bpf_xdp_query_id(IFINDEX_LO, 0, &prog_id);
if (CHECK(err, "get_xdp_none", "errno=%d\n", errno))
return;
if (CHECK(prog_id, "prog_id_none", "unexpected prog_id=%u\n", prog_id))
return;
err = bpf_xdp_query_id(IFINDEX_LO, XDP_FLAGS_SKB_MODE, &prog_id);
if (CHECK(err, "get_xdp_none_skb", "errno=%d\n", errno))
return;
if (CHECK(prog_id, "prog_id_none_skb", "unexpected prog_id=%u\n",
prog_id))
return;
/* Setup prog */
err = bpf_prog_test_load(file, BPF_PROG_TYPE_XDP, &obj, &prog_fd);
if (CHECK_FAIL(err))
return;
err = bpf_prog_get_info_by_fd(prog_fd, &info, &len);
if (CHECK(err, "get_prog_info", "errno=%d\n", errno))
goto out_close;
err = bpf_xdp_attach(IFINDEX_LO, prog_fd, XDP_FLAGS_SKB_MODE, NULL);
if (CHECK(err, "set_xdp_skb", "errno=%d\n", errno))
goto out_close;
/* Get prog_id for single prog mode */
err = bpf_xdp_query_id(IFINDEX_LO, 0, &prog_id);
if (CHECK(err, "get_xdp", "errno=%d\n", errno))
goto out;
if (CHECK(prog_id != info.id, "prog_id", "prog_id not available\n"))
goto out;
err = bpf_xdp_query_id(IFINDEX_LO, XDP_FLAGS_SKB_MODE, &prog_id);
if (CHECK(err, "get_xdp_skb", "errno=%d\n", errno))
goto out;
if (CHECK(prog_id != info.id, "prog_id_skb", "prog_id not available\n"))
goto out;
err = bpf_xdp_query_id(IFINDEX_LO, XDP_FLAGS_DRV_MODE, &prog_id);
if (CHECK(err, "get_xdp_drv", "errno=%d\n", errno))
goto out;
if (CHECK(prog_id, "prog_id_drv", "unexpected prog_id=%u\n", prog_id))
goto out;
/* Check xdp features supported by lo device */
opts.feature_flags = ~0;
err = bpf_xdp_query(IFINDEX_LO, XDP_FLAGS_DRV_MODE, &opts);
if (!ASSERT_OK(err, "bpf_xdp_query"))
goto out;
ASSERT_EQ(opts.feature_flags, 0, "opts.feature_flags");
out:
bpf_xdp_detach(IFINDEX_LO, 0, NULL);
out_close:
bpf_object__close(obj);
}
| linux-master | tools/testing/selftests/bpf/prog_tests/xdp_info.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2020 Google LLC.
*/
#include <asm-generic/errno-base.h>
#include <sys/stat.h>
#include <test_progs.h>
#include <linux/limits.h>
#include "local_storage.skel.h"
#include "network_helpers.h"
#include "task_local_storage_helpers.h"
#define TEST_STORAGE_VALUE 0xbeefdead
struct storage {
void *inode;
unsigned int value;
};
/* Fork and exec the provided rm binary and return the exit code of the
* forked process and its pid.
*/
static int run_self_unlink(struct local_storage *skel, const char *rm_path)
{
int child_pid, child_status, ret;
int null_fd;
child_pid = fork();
if (child_pid == 0) {
null_fd = open("/dev/null", O_WRONLY);
dup2(null_fd, STDOUT_FILENO);
dup2(null_fd, STDERR_FILENO);
close(null_fd);
skel->bss->monitored_pid = getpid();
/* Use the copied /usr/bin/rm to delete itself
* /tmp/copy_of_rm /tmp/copy_of_rm.
*/
ret = execlp(rm_path, rm_path, rm_path, NULL);
if (ret)
exit(errno);
} else if (child_pid > 0) {
waitpid(child_pid, &child_status, 0);
ASSERT_EQ(skel->data->task_storage_result, 0, "task_storage_result");
return WEXITSTATUS(child_status);
}
return -EINVAL;
}
static bool check_syscall_operations(int map_fd, int obj_fd)
{
struct storage val = { .value = TEST_STORAGE_VALUE },
lookup_val = { .value = 0 };
int err;
/* Looking up an existing element should fail initially */
err = bpf_map_lookup_elem_flags(map_fd, &obj_fd, &lookup_val, 0);
if (!ASSERT_EQ(err, -ENOENT, "bpf_map_lookup_elem"))
return false;
/* Create a new element */
err = bpf_map_update_elem(map_fd, &obj_fd, &val, BPF_NOEXIST);
if (!ASSERT_OK(err, "bpf_map_update_elem"))
return false;
/* Lookup the newly created element */
err = bpf_map_lookup_elem_flags(map_fd, &obj_fd, &lookup_val, 0);
if (!ASSERT_OK(err, "bpf_map_lookup_elem"))
return false;
/* Check the value of the newly created element */
if (!ASSERT_EQ(lookup_val.value, val.value, "bpf_map_lookup_elem"))
return false;
err = bpf_map_delete_elem(map_fd, &obj_fd);
if (!ASSERT_OK(err, "bpf_map_delete_elem()"))
return false;
/* The lookup should fail, now that the element has been deleted */
err = bpf_map_lookup_elem_flags(map_fd, &obj_fd, &lookup_val, 0);
if (!ASSERT_EQ(err, -ENOENT, "bpf_map_lookup_elem"))
return false;
return true;
}
void test_test_local_storage(void)
{
char tmp_dir_path[] = "/tmp/local_storageXXXXXX";
int err, serv_sk = -1, task_fd = -1, rm_fd = -1;
struct local_storage *skel = NULL;
char tmp_exec_path[64];
char cmd[256];
skel = local_storage__open_and_load();
if (!ASSERT_OK_PTR(skel, "skel_load"))
goto close_prog;
err = local_storage__attach(skel);
if (!ASSERT_OK(err, "attach"))
goto close_prog;
task_fd = sys_pidfd_open(getpid(), 0);
if (!ASSERT_GE(task_fd, 0, "pidfd_open"))
goto close_prog;
if (!check_syscall_operations(bpf_map__fd(skel->maps.task_storage_map),
task_fd))
goto close_prog;
if (!ASSERT_OK_PTR(mkdtemp(tmp_dir_path), "mkdtemp"))
goto close_prog;
snprintf(tmp_exec_path, sizeof(tmp_exec_path), "%s/copy_of_rm",
tmp_dir_path);
snprintf(cmd, sizeof(cmd), "cp /bin/rm %s", tmp_exec_path);
if (!ASSERT_OK(system(cmd), "system(cp)"))
goto close_prog_rmdir;
rm_fd = open(tmp_exec_path, O_RDONLY);
if (!ASSERT_GE(rm_fd, 0, "open(tmp_exec_path)"))
goto close_prog_rmdir;
if (!check_syscall_operations(bpf_map__fd(skel->maps.inode_storage_map),
rm_fd))
goto close_prog_rmdir;
/* Sets skel->bss->monitored_pid to the pid of the forked child
* forks a child process that executes tmp_exec_path and tries to
* unlink its executable. This operation should be denied by the loaded
* LSM program.
*/
err = run_self_unlink(skel, tmp_exec_path);
if (!ASSERT_EQ(err, EPERM, "run_self_unlink"))
goto close_prog_rmdir;
/* Set the process being monitored to be the current process */
skel->bss->monitored_pid = getpid();
/* Move copy_of_rm to a new location so that it triggers the
* inode_rename LSM hook with a new_dentry that has a NULL inode ptr.
*/
snprintf(cmd, sizeof(cmd), "mv %s/copy_of_rm %s/check_null_ptr",
tmp_dir_path, tmp_dir_path);
if (!ASSERT_OK(system(cmd), "system(mv)"))
goto close_prog_rmdir;
ASSERT_EQ(skel->data->inode_storage_result, 0, "inode_storage_result");
serv_sk = start_server(AF_INET6, SOCK_STREAM, NULL, 0, 0);
if (!ASSERT_GE(serv_sk, 0, "start_server"))
goto close_prog_rmdir;
ASSERT_EQ(skel->data->sk_storage_result, 0, "sk_storage_result");
if (!check_syscall_operations(bpf_map__fd(skel->maps.sk_storage_map),
serv_sk))
goto close_prog_rmdir;
close_prog_rmdir:
snprintf(cmd, sizeof(cmd), "rm -rf %s", tmp_dir_path);
system(cmd);
close_prog:
close(serv_sk);
close(rm_fd);
close(task_fd);
local_storage__destroy(skel);
}
| linux-master | tools/testing/selftests/bpf/prog_tests/test_local_storage.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2021 Facebook */
#include <test_progs.h>
#include "syscall.skel.h"
struct args {
__u64 log_buf;
__u32 log_size;
int max_entries;
int map_fd;
int prog_fd;
int btf_fd;
};
void test_syscall(void)
{
static char verifier_log[8192];
struct args ctx = {
.max_entries = 1024,
.log_buf = (uintptr_t) verifier_log,
.log_size = sizeof(verifier_log),
};
LIBBPF_OPTS(bpf_test_run_opts, tattr,
.ctx_in = &ctx,
.ctx_size_in = sizeof(ctx),
);
struct syscall *skel = NULL;
__u64 key = 12, value = 0;
int err, prog_fd;
skel = syscall__open_and_load();
if (!ASSERT_OK_PTR(skel, "skel_load"))
goto cleanup;
prog_fd = bpf_program__fd(skel->progs.bpf_prog);
err = bpf_prog_test_run_opts(prog_fd, &tattr);
ASSERT_EQ(err, 0, "err");
ASSERT_EQ(tattr.retval, 1, "retval");
ASSERT_GT(ctx.map_fd, 0, "ctx.map_fd");
ASSERT_GT(ctx.prog_fd, 0, "ctx.prog_fd");
ASSERT_OK(memcmp(verifier_log, "processed", sizeof("processed") - 1),
"verifier_log");
err = bpf_map_lookup_elem(ctx.map_fd, &key, &value);
ASSERT_EQ(err, 0, "map_lookup");
ASSERT_EQ(value, 34, "map lookup value");
cleanup:
syscall__destroy(skel);
if (ctx.prog_fd > 0)
close(ctx.prog_fd);
if (ctx.map_fd > 0)
close(ctx.map_fd);
if (ctx.btf_fd > 0)
close(ctx.btf_fd);
}
| linux-master | tools/testing/selftests/bpf/prog_tests/syscall.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2019 Facebook */
#include <test_progs.h>
#include "fentry_test.lskel.h"
#include "fexit_test.lskel.h"
void test_fentry_fexit(void)
{
struct fentry_test_lskel *fentry_skel = NULL;
struct fexit_test_lskel *fexit_skel = NULL;
__u64 *fentry_res, *fexit_res;
int err, prog_fd, i;
LIBBPF_OPTS(bpf_test_run_opts, topts);
fentry_skel = fentry_test_lskel__open_and_load();
if (!ASSERT_OK_PTR(fentry_skel, "fentry_skel_load"))
goto close_prog;
fexit_skel = fexit_test_lskel__open_and_load();
if (!ASSERT_OK_PTR(fexit_skel, "fexit_skel_load"))
goto close_prog;
err = fentry_test_lskel__attach(fentry_skel);
if (!ASSERT_OK(err, "fentry_attach"))
goto close_prog;
err = fexit_test_lskel__attach(fexit_skel);
if (!ASSERT_OK(err, "fexit_attach"))
goto close_prog;
prog_fd = fexit_skel->progs.test1.prog_fd;
err = bpf_prog_test_run_opts(prog_fd, &topts);
ASSERT_OK(err, "ipv6 test_run");
ASSERT_OK(topts.retval, "ipv6 test retval");
fentry_res = (__u64 *)fentry_skel->bss;
fexit_res = (__u64 *)fexit_skel->bss;
printf("%lld\n", fentry_skel->bss->test1_result);
for (i = 0; i < 8; i++) {
ASSERT_EQ(fentry_res[i], 1, "fentry result");
ASSERT_EQ(fexit_res[i], 1, "fexit result");
}
close_prog:
fentry_test_lskel__destroy(fentry_skel);
fexit_test_lskel__destroy(fexit_skel);
}
| linux-master | tools/testing/selftests/bpf/prog_tests/fentry_fexit.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2023 Isovalent */
#include <uapi/linux/if_link.h>
#include <net/if.h>
#include <test_progs.h>
#define loopback 1
#define ping_cmd "ping -q -c1 -w1 127.0.0.1 > /dev/null"
#include "test_tc_link.skel.h"
#include "tc_helpers.h"
void serial_test_tc_opts_basic(void)
{
LIBBPF_OPTS(bpf_prog_attach_opts, opta);
LIBBPF_OPTS(bpf_prog_detach_opts, optd);
LIBBPF_OPTS(bpf_prog_query_opts, optq);
__u32 fd1, fd2, id1, id2;
struct test_tc_link *skel;
__u32 prog_ids[2];
int err;
skel = test_tc_link__open_and_load();
if (!ASSERT_OK_PTR(skel, "skel_load"))
goto cleanup;
fd1 = bpf_program__fd(skel->progs.tc1);
fd2 = bpf_program__fd(skel->progs.tc2);
id1 = id_from_prog_fd(fd1);
id2 = id_from_prog_fd(fd2);
ASSERT_NEQ(id1, id2, "prog_ids_1_2");
assert_mprog_count(BPF_TCX_INGRESS, 0);
assert_mprog_count(BPF_TCX_EGRESS, 0);
ASSERT_EQ(skel->bss->seen_tc1, false, "seen_tc1");
ASSERT_EQ(skel->bss->seen_tc2, false, "seen_tc2");
err = bpf_prog_attach_opts(fd1, loopback, BPF_TCX_INGRESS, &opta);
if (!ASSERT_EQ(err, 0, "prog_attach"))
goto cleanup;
assert_mprog_count(BPF_TCX_INGRESS, 1);
assert_mprog_count(BPF_TCX_EGRESS, 0);
optq.prog_ids = prog_ids;
memset(prog_ids, 0, sizeof(prog_ids));
optq.count = ARRAY_SIZE(prog_ids);
err = bpf_prog_query_opts(loopback, BPF_TCX_INGRESS, &optq);
if (!ASSERT_OK(err, "prog_query"))
goto cleanup_in;
ASSERT_EQ(optq.count, 1, "count");
ASSERT_EQ(optq.revision, 2, "revision");
ASSERT_EQ(optq.prog_ids[0], id1, "prog_ids[0]");
ASSERT_EQ(optq.prog_ids[1], 0, "prog_ids[1]");
ASSERT_OK(system(ping_cmd), ping_cmd);
ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
ASSERT_EQ(skel->bss->seen_tc2, false, "seen_tc2");
err = bpf_prog_attach_opts(fd2, loopback, BPF_TCX_EGRESS, &opta);
if (!ASSERT_EQ(err, 0, "prog_attach"))
goto cleanup_in;
assert_mprog_count(BPF_TCX_INGRESS, 1);
assert_mprog_count(BPF_TCX_EGRESS, 1);
memset(prog_ids, 0, sizeof(prog_ids));
optq.count = ARRAY_SIZE(prog_ids);
err = bpf_prog_query_opts(loopback, BPF_TCX_EGRESS, &optq);
if (!ASSERT_OK(err, "prog_query"))
goto cleanup_eg;
ASSERT_EQ(optq.count, 1, "count");
ASSERT_EQ(optq.revision, 2, "revision");
ASSERT_EQ(optq.prog_ids[0], id2, "prog_ids[0]");
ASSERT_EQ(optq.prog_ids[1], 0, "prog_ids[1]");
ASSERT_OK(system(ping_cmd), ping_cmd);
ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
ASSERT_EQ(skel->bss->seen_tc2, true, "seen_tc2");
cleanup_eg:
err = bpf_prog_detach_opts(fd2, loopback, BPF_TCX_EGRESS, &optd);
ASSERT_OK(err, "prog_detach_eg");
assert_mprog_count(BPF_TCX_INGRESS, 1);
assert_mprog_count(BPF_TCX_EGRESS, 0);
cleanup_in:
err = bpf_prog_detach_opts(fd1, loopback, BPF_TCX_INGRESS, &optd);
ASSERT_OK(err, "prog_detach_in");
assert_mprog_count(BPF_TCX_INGRESS, 0);
assert_mprog_count(BPF_TCX_EGRESS, 0);
cleanup:
test_tc_link__destroy(skel);
}
static void test_tc_opts_before_target(int target)
{
LIBBPF_OPTS(bpf_prog_attach_opts, opta);
LIBBPF_OPTS(bpf_prog_detach_opts, optd);
LIBBPF_OPTS(bpf_prog_query_opts, optq);
__u32 fd1, fd2, fd3, fd4, id1, id2, id3, id4;
struct test_tc_link *skel;
__u32 prog_ids[5];
int err;
skel = test_tc_link__open_and_load();
if (!ASSERT_OK_PTR(skel, "skel_load"))
goto cleanup;
fd1 = bpf_program__fd(skel->progs.tc1);
fd2 = bpf_program__fd(skel->progs.tc2);
fd3 = bpf_program__fd(skel->progs.tc3);
fd4 = bpf_program__fd(skel->progs.tc4);
id1 = id_from_prog_fd(fd1);
id2 = id_from_prog_fd(fd2);
id3 = id_from_prog_fd(fd3);
id4 = id_from_prog_fd(fd4);
ASSERT_NEQ(id1, id2, "prog_ids_1_2");
ASSERT_NEQ(id3, id4, "prog_ids_3_4");
ASSERT_NEQ(id2, id3, "prog_ids_2_3");
assert_mprog_count(target, 0);
err = bpf_prog_attach_opts(fd1, loopback, target, &opta);
if (!ASSERT_EQ(err, 0, "prog_attach"))
goto cleanup;
assert_mprog_count(target, 1);
err = bpf_prog_attach_opts(fd2, loopback, target, &opta);
if (!ASSERT_EQ(err, 0, "prog_attach"))
goto cleanup_target;
assert_mprog_count(target, 2);
optq.prog_ids = prog_ids;
memset(prog_ids, 0, sizeof(prog_ids));
optq.count = ARRAY_SIZE(prog_ids);
err = bpf_prog_query_opts(loopback, target, &optq);
if (!ASSERT_OK(err, "prog_query"))
goto cleanup_target2;
ASSERT_EQ(optq.count, 2, "count");
ASSERT_EQ(optq.revision, 3, "revision");
ASSERT_EQ(optq.prog_ids[0], id1, "prog_ids[0]");
ASSERT_EQ(optq.prog_ids[1], id2, "prog_ids[1]");
ASSERT_EQ(optq.prog_ids[2], 0, "prog_ids[2]");
ASSERT_OK(system(ping_cmd), ping_cmd);
ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
ASSERT_EQ(skel->bss->seen_tc2, true, "seen_tc2");
ASSERT_EQ(skel->bss->seen_tc3, false, "seen_tc3");
ASSERT_EQ(skel->bss->seen_tc4, false, "seen_tc4");
LIBBPF_OPTS_RESET(opta,
.flags = BPF_F_BEFORE,
.relative_fd = fd2,
);
err = bpf_prog_attach_opts(fd3, loopback, target, &opta);
if (!ASSERT_EQ(err, 0, "prog_attach"))
goto cleanup_target2;
memset(prog_ids, 0, sizeof(prog_ids));
optq.count = ARRAY_SIZE(prog_ids);
err = bpf_prog_query_opts(loopback, target, &optq);
if (!ASSERT_OK(err, "prog_query"))
goto cleanup_target3;
ASSERT_EQ(optq.count, 3, "count");
ASSERT_EQ(optq.revision, 4, "revision");
ASSERT_EQ(optq.prog_ids[0], id1, "prog_ids[0]");
ASSERT_EQ(optq.prog_ids[1], id3, "prog_ids[1]");
ASSERT_EQ(optq.prog_ids[2], id2, "prog_ids[2]");
ASSERT_EQ(optq.prog_ids[3], 0, "prog_ids[3]");
LIBBPF_OPTS_RESET(opta,
.flags = BPF_F_BEFORE,
.relative_id = id1,
);
err = bpf_prog_attach_opts(fd4, loopback, target, &opta);
if (!ASSERT_EQ(err, 0, "prog_attach"))
goto cleanup_target3;
assert_mprog_count(target, 4);
memset(prog_ids, 0, sizeof(prog_ids));
optq.count = ARRAY_SIZE(prog_ids);
err = bpf_prog_query_opts(loopback, target, &optq);
if (!ASSERT_OK(err, "prog_query"))
goto cleanup_target4;
ASSERT_EQ(optq.count, 4, "count");
ASSERT_EQ(optq.revision, 5, "revision");
ASSERT_EQ(optq.prog_ids[0], id4, "prog_ids[0]");
ASSERT_EQ(optq.prog_ids[1], id1, "prog_ids[1]");
ASSERT_EQ(optq.prog_ids[2], id3, "prog_ids[2]");
ASSERT_EQ(optq.prog_ids[3], id2, "prog_ids[3]");
ASSERT_EQ(optq.prog_ids[4], 0, "prog_ids[4]");
ASSERT_OK(system(ping_cmd), ping_cmd);
ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
ASSERT_EQ(skel->bss->seen_tc2, true, "seen_tc2");
ASSERT_EQ(skel->bss->seen_tc3, true, "seen_tc3");
ASSERT_EQ(skel->bss->seen_tc4, true, "seen_tc4");
cleanup_target4:
err = bpf_prog_detach_opts(fd4, loopback, target, &optd);
ASSERT_OK(err, "prog_detach");
assert_mprog_count(target, 3);
cleanup_target3:
err = bpf_prog_detach_opts(fd3, loopback, target, &optd);
ASSERT_OK(err, "prog_detach");
assert_mprog_count(target, 2);
cleanup_target2:
err = bpf_prog_detach_opts(fd2, loopback, target, &optd);
ASSERT_OK(err, "prog_detach");
assert_mprog_count(target, 1);
cleanup_target:
err = bpf_prog_detach_opts(fd1, loopback, target, &optd);
ASSERT_OK(err, "prog_detach");
assert_mprog_count(target, 0);
cleanup:
test_tc_link__destroy(skel);
}
void serial_test_tc_opts_before(void)
{
test_tc_opts_before_target(BPF_TCX_INGRESS);
test_tc_opts_before_target(BPF_TCX_EGRESS);
}
static void test_tc_opts_after_target(int target)
{
LIBBPF_OPTS(bpf_prog_attach_opts, opta);
LIBBPF_OPTS(bpf_prog_detach_opts, optd);
LIBBPF_OPTS(bpf_prog_query_opts, optq);
__u32 fd1, fd2, fd3, fd4, id1, id2, id3, id4;
struct test_tc_link *skel;
__u32 prog_ids[5];
int err;
skel = test_tc_link__open_and_load();
if (!ASSERT_OK_PTR(skel, "skel_load"))
goto cleanup;
fd1 = bpf_program__fd(skel->progs.tc1);
fd2 = bpf_program__fd(skel->progs.tc2);
fd3 = bpf_program__fd(skel->progs.tc3);
fd4 = bpf_program__fd(skel->progs.tc4);
id1 = id_from_prog_fd(fd1);
id2 = id_from_prog_fd(fd2);
id3 = id_from_prog_fd(fd3);
id4 = id_from_prog_fd(fd4);
ASSERT_NEQ(id1, id2, "prog_ids_1_2");
ASSERT_NEQ(id3, id4, "prog_ids_3_4");
ASSERT_NEQ(id2, id3, "prog_ids_2_3");
assert_mprog_count(target, 0);
err = bpf_prog_attach_opts(fd1, loopback, target, &opta);
if (!ASSERT_EQ(err, 0, "prog_attach"))
goto cleanup;
assert_mprog_count(target, 1);
err = bpf_prog_attach_opts(fd2, loopback, target, &opta);
if (!ASSERT_EQ(err, 0, "prog_attach"))
goto cleanup_target;
assert_mprog_count(target, 2);
optq.prog_ids = prog_ids;
memset(prog_ids, 0, sizeof(prog_ids));
optq.count = ARRAY_SIZE(prog_ids);
err = bpf_prog_query_opts(loopback, target, &optq);
if (!ASSERT_OK(err, "prog_query"))
goto cleanup_target2;
ASSERT_EQ(optq.count, 2, "count");
ASSERT_EQ(optq.revision, 3, "revision");
ASSERT_EQ(optq.prog_ids[0], id1, "prog_ids[0]");
ASSERT_EQ(optq.prog_ids[1], id2, "prog_ids[1]");
ASSERT_EQ(optq.prog_ids[2], 0, "prog_ids[2]");
ASSERT_OK(system(ping_cmd), ping_cmd);
ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
ASSERT_EQ(skel->bss->seen_tc2, true, "seen_tc2");
ASSERT_EQ(skel->bss->seen_tc3, false, "seen_tc3");
ASSERT_EQ(skel->bss->seen_tc4, false, "seen_tc4");
LIBBPF_OPTS_RESET(opta,
.flags = BPF_F_AFTER,
.relative_fd = fd1,
);
err = bpf_prog_attach_opts(fd3, loopback, target, &opta);
if (!ASSERT_EQ(err, 0, "prog_attach"))
goto cleanup_target2;
memset(prog_ids, 0, sizeof(prog_ids));
optq.count = ARRAY_SIZE(prog_ids);
err = bpf_prog_query_opts(loopback, target, &optq);
if (!ASSERT_OK(err, "prog_query"))
goto cleanup_target3;
ASSERT_EQ(optq.count, 3, "count");
ASSERT_EQ(optq.revision, 4, "revision");
ASSERT_EQ(optq.prog_ids[0], id1, "prog_ids[0]");
ASSERT_EQ(optq.prog_ids[1], id3, "prog_ids[1]");
ASSERT_EQ(optq.prog_ids[2], id2, "prog_ids[2]");
ASSERT_EQ(optq.prog_ids[3], 0, "prog_ids[3]");
LIBBPF_OPTS_RESET(opta,
.flags = BPF_F_AFTER,
.relative_id = id2,
);
err = bpf_prog_attach_opts(fd4, loopback, target, &opta);
if (!ASSERT_EQ(err, 0, "prog_attach"))
goto cleanup_target3;
assert_mprog_count(target, 4);
memset(prog_ids, 0, sizeof(prog_ids));
optq.count = ARRAY_SIZE(prog_ids);
err = bpf_prog_query_opts(loopback, target, &optq);
if (!ASSERT_OK(err, "prog_query"))
goto cleanup_target4;
ASSERT_EQ(optq.count, 4, "count");
ASSERT_EQ(optq.revision, 5, "revision");
ASSERT_EQ(optq.prog_ids[0], id1, "prog_ids[0]");
ASSERT_EQ(optq.prog_ids[1], id3, "prog_ids[1]");
ASSERT_EQ(optq.prog_ids[2], id2, "prog_ids[2]");
ASSERT_EQ(optq.prog_ids[3], id4, "prog_ids[3]");
ASSERT_EQ(optq.prog_ids[4], 0, "prog_ids[4]");
ASSERT_OK(system(ping_cmd), ping_cmd);
ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
ASSERT_EQ(skel->bss->seen_tc2, true, "seen_tc2");
ASSERT_EQ(skel->bss->seen_tc3, true, "seen_tc3");
ASSERT_EQ(skel->bss->seen_tc4, true, "seen_tc4");
cleanup_target4:
err = bpf_prog_detach_opts(fd4, loopback, target, &optd);
ASSERT_OK(err, "prog_detach");
assert_mprog_count(target, 3);
memset(prog_ids, 0, sizeof(prog_ids));
optq.count = ARRAY_SIZE(prog_ids);
err = bpf_prog_query_opts(loopback, target, &optq);
if (!ASSERT_OK(err, "prog_query"))
goto cleanup_target3;
ASSERT_EQ(optq.count, 3, "count");
ASSERT_EQ(optq.revision, 6, "revision");
ASSERT_EQ(optq.prog_ids[0], id1, "prog_ids[0]");
ASSERT_EQ(optq.prog_ids[1], id3, "prog_ids[1]");
ASSERT_EQ(optq.prog_ids[2], id2, "prog_ids[2]");
ASSERT_EQ(optq.prog_ids[3], 0, "prog_ids[3]");
cleanup_target3:
err = bpf_prog_detach_opts(fd3, loopback, target, &optd);
ASSERT_OK(err, "prog_detach");
assert_mprog_count(target, 2);
memset(prog_ids, 0, sizeof(prog_ids));
optq.count = ARRAY_SIZE(prog_ids);
err = bpf_prog_query_opts(loopback, target, &optq);
if (!ASSERT_OK(err, "prog_query"))
goto cleanup_target2;
ASSERT_EQ(optq.count, 2, "count");
ASSERT_EQ(optq.revision, 7, "revision");
ASSERT_EQ(optq.prog_ids[0], id1, "prog_ids[0]");
ASSERT_EQ(optq.prog_ids[1], id2, "prog_ids[1]");
ASSERT_EQ(optq.prog_ids[2], 0, "prog_ids[2]");
cleanup_target2:
err = bpf_prog_detach_opts(fd2, loopback, target, &optd);
ASSERT_OK(err, "prog_detach");
assert_mprog_count(target, 1);
memset(prog_ids, 0, sizeof(prog_ids));
optq.count = ARRAY_SIZE(prog_ids);
err = bpf_prog_query_opts(loopback, target, &optq);
if (!ASSERT_OK(err, "prog_query"))
goto cleanup_target;
ASSERT_EQ(optq.count, 1, "count");
ASSERT_EQ(optq.revision, 8, "revision");
ASSERT_EQ(optq.prog_ids[0], id1, "prog_ids[0]");
ASSERT_EQ(optq.prog_ids[1], 0, "prog_ids[1]");
cleanup_target:
err = bpf_prog_detach_opts(fd1, loopback, target, &optd);
ASSERT_OK(err, "prog_detach");
assert_mprog_count(target, 0);
cleanup:
test_tc_link__destroy(skel);
}
void serial_test_tc_opts_after(void)
{
test_tc_opts_after_target(BPF_TCX_INGRESS);
test_tc_opts_after_target(BPF_TCX_EGRESS);
}
static void test_tc_opts_revision_target(int target)
{
LIBBPF_OPTS(bpf_prog_attach_opts, opta);
LIBBPF_OPTS(bpf_prog_detach_opts, optd);
LIBBPF_OPTS(bpf_prog_query_opts, optq);
__u32 fd1, fd2, id1, id2;
struct test_tc_link *skel;
__u32 prog_ids[3];
int err;
skel = test_tc_link__open_and_load();
if (!ASSERT_OK_PTR(skel, "skel_load"))
goto cleanup;
fd1 = bpf_program__fd(skel->progs.tc1);
fd2 = bpf_program__fd(skel->progs.tc2);
id1 = id_from_prog_fd(fd1);
id2 = id_from_prog_fd(fd2);
ASSERT_NEQ(id1, id2, "prog_ids_1_2");
assert_mprog_count(target, 0);
LIBBPF_OPTS_RESET(opta,
.expected_revision = 1,
);
err = bpf_prog_attach_opts(fd1, loopback, target, &opta);
if (!ASSERT_EQ(err, 0, "prog_attach"))
goto cleanup;
assert_mprog_count(target, 1);
LIBBPF_OPTS_RESET(opta,
.expected_revision = 1,
);
err = bpf_prog_attach_opts(fd2, loopback, target, &opta);
if (!ASSERT_EQ(err, -ESTALE, "prog_attach"))
goto cleanup_target;
assert_mprog_count(target, 1);
LIBBPF_OPTS_RESET(opta,
.expected_revision = 2,
);
err = bpf_prog_attach_opts(fd2, loopback, target, &opta);
if (!ASSERT_EQ(err, 0, "prog_attach"))
goto cleanup_target;
assert_mprog_count(target, 2);
optq.prog_ids = prog_ids;
memset(prog_ids, 0, sizeof(prog_ids));
optq.count = ARRAY_SIZE(prog_ids);
err = bpf_prog_query_opts(loopback, target, &optq);
if (!ASSERT_OK(err, "prog_query"))
goto cleanup_target2;
ASSERT_EQ(optq.count, 2, "count");
ASSERT_EQ(optq.revision, 3, "revision");
ASSERT_EQ(optq.prog_ids[0], id1, "prog_ids[0]");
ASSERT_EQ(optq.prog_ids[1], id2, "prog_ids[1]");
ASSERT_EQ(optq.prog_ids[2], 0, "prog_ids[2]");
ASSERT_OK(system(ping_cmd), ping_cmd);
ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
ASSERT_EQ(skel->bss->seen_tc2, true, "seen_tc2");
LIBBPF_OPTS_RESET(optd,
.expected_revision = 2,
);
err = bpf_prog_detach_opts(fd2, loopback, target, &optd);
ASSERT_EQ(err, -ESTALE, "prog_detach");
assert_mprog_count(target, 2);
cleanup_target2:
LIBBPF_OPTS_RESET(optd,
.expected_revision = 3,
);
err = bpf_prog_detach_opts(fd2, loopback, target, &optd);
ASSERT_OK(err, "prog_detach");
assert_mprog_count(target, 1);
cleanup_target:
LIBBPF_OPTS_RESET(optd);
err = bpf_prog_detach_opts(fd1, loopback, target, &optd);
ASSERT_OK(err, "prog_detach");
assert_mprog_count(target, 0);
cleanup:
test_tc_link__destroy(skel);
}
void serial_test_tc_opts_revision(void)
{
test_tc_opts_revision_target(BPF_TCX_INGRESS);
test_tc_opts_revision_target(BPF_TCX_EGRESS);
}
static void test_tc_chain_classic(int target, bool chain_tc_old)
{
LIBBPF_OPTS(bpf_tc_opts, tc_opts, .handle = 1, .priority = 1);
LIBBPF_OPTS(bpf_tc_hook, tc_hook, .ifindex = loopback);
LIBBPF_OPTS(bpf_prog_attach_opts, opta);
LIBBPF_OPTS(bpf_prog_detach_opts, optd);
bool hook_created = false, tc_attached = false;
__u32 fd1, fd2, fd3, id1, id2, id3;
struct test_tc_link *skel;
int err;
skel = test_tc_link__open_and_load();
if (!ASSERT_OK_PTR(skel, "skel_load"))
goto cleanup;
fd1 = bpf_program__fd(skel->progs.tc1);
fd2 = bpf_program__fd(skel->progs.tc2);
fd3 = bpf_program__fd(skel->progs.tc3);
id1 = id_from_prog_fd(fd1);
id2 = id_from_prog_fd(fd2);
id3 = id_from_prog_fd(fd3);
ASSERT_NEQ(id1, id2, "prog_ids_1_2");
ASSERT_NEQ(id2, id3, "prog_ids_2_3");
assert_mprog_count(target, 0);
if (chain_tc_old) {
tc_hook.attach_point = target == BPF_TCX_INGRESS ?
BPF_TC_INGRESS : BPF_TC_EGRESS;
err = bpf_tc_hook_create(&tc_hook);
if (err == 0)
hook_created = true;
err = err == -EEXIST ? 0 : err;
if (!ASSERT_OK(err, "bpf_tc_hook_create"))
goto cleanup;
tc_opts.prog_fd = fd3;
err = bpf_tc_attach(&tc_hook, &tc_opts);
if (!ASSERT_OK(err, "bpf_tc_attach"))
goto cleanup;
tc_attached = true;
}
err = bpf_prog_attach_opts(fd1, loopback, target, &opta);
if (!ASSERT_EQ(err, 0, "prog_attach"))
goto cleanup;
err = bpf_prog_attach_opts(fd2, loopback, target, &opta);
if (!ASSERT_EQ(err, 0, "prog_attach"))
goto cleanup_detach;
assert_mprog_count(target, 2);
ASSERT_OK(system(ping_cmd), ping_cmd);
ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
ASSERT_EQ(skel->bss->seen_tc2, true, "seen_tc2");
ASSERT_EQ(skel->bss->seen_tc3, chain_tc_old, "seen_tc3");
skel->bss->seen_tc1 = false;
skel->bss->seen_tc2 = false;
skel->bss->seen_tc3 = false;
err = bpf_prog_detach_opts(fd2, loopback, target, &optd);
if (!ASSERT_OK(err, "prog_detach"))
goto cleanup_detach;
assert_mprog_count(target, 1);
ASSERT_OK(system(ping_cmd), ping_cmd);
ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
ASSERT_EQ(skel->bss->seen_tc2, false, "seen_tc2");
ASSERT_EQ(skel->bss->seen_tc3, chain_tc_old, "seen_tc3");
cleanup_detach:
err = bpf_prog_detach_opts(fd1, loopback, target, &optd);
if (!ASSERT_OK(err, "prog_detach"))
goto cleanup;
__assert_mprog_count(target, 0, chain_tc_old, loopback);
cleanup:
if (tc_attached) {
tc_opts.flags = tc_opts.prog_fd = tc_opts.prog_id = 0;
err = bpf_tc_detach(&tc_hook, &tc_opts);
ASSERT_OK(err, "bpf_tc_detach");
}
if (hook_created) {
tc_hook.attach_point = BPF_TC_INGRESS | BPF_TC_EGRESS;
bpf_tc_hook_destroy(&tc_hook);
}
test_tc_link__destroy(skel);
assert_mprog_count(target, 0);
}
void serial_test_tc_opts_chain_classic(void)
{
test_tc_chain_classic(BPF_TCX_INGRESS, false);
test_tc_chain_classic(BPF_TCX_EGRESS, false);
test_tc_chain_classic(BPF_TCX_INGRESS, true);
test_tc_chain_classic(BPF_TCX_EGRESS, true);
}
static void test_tc_opts_replace_target(int target)
{
LIBBPF_OPTS(bpf_prog_attach_opts, opta);
LIBBPF_OPTS(bpf_prog_detach_opts, optd);
LIBBPF_OPTS(bpf_prog_query_opts, optq);
__u32 fd1, fd2, fd3, id1, id2, id3, detach_fd;
__u32 prog_ids[4], prog_flags[4];
struct test_tc_link *skel;
int err;
skel = test_tc_link__open_and_load();
if (!ASSERT_OK_PTR(skel, "skel_load"))
goto cleanup;
fd1 = bpf_program__fd(skel->progs.tc1);
fd2 = bpf_program__fd(skel->progs.tc2);
fd3 = bpf_program__fd(skel->progs.tc3);
id1 = id_from_prog_fd(fd1);
id2 = id_from_prog_fd(fd2);
id3 = id_from_prog_fd(fd3);
ASSERT_NEQ(id1, id2, "prog_ids_1_2");
ASSERT_NEQ(id2, id3, "prog_ids_2_3");
assert_mprog_count(target, 0);
LIBBPF_OPTS_RESET(opta,
.expected_revision = 1,
);
err = bpf_prog_attach_opts(fd1, loopback, target, &opta);
if (!ASSERT_EQ(err, 0, "prog_attach"))
goto cleanup;
assert_mprog_count(target, 1);
LIBBPF_OPTS_RESET(opta,
.flags = BPF_F_BEFORE,
.relative_id = id1,
.expected_revision = 2,
);
err = bpf_prog_attach_opts(fd2, loopback, target, &opta);
if (!ASSERT_EQ(err, 0, "prog_attach"))
goto cleanup_target;
detach_fd = fd2;
assert_mprog_count(target, 2);
optq.prog_attach_flags = prog_flags;
optq.prog_ids = prog_ids;
memset(prog_flags, 0, sizeof(prog_flags));
memset(prog_ids, 0, sizeof(prog_ids));
optq.count = ARRAY_SIZE(prog_ids);
err = bpf_prog_query_opts(loopback, target, &optq);
if (!ASSERT_OK(err, "prog_query"))
goto cleanup_target2;
ASSERT_EQ(optq.count, 2, "count");
ASSERT_EQ(optq.revision, 3, "revision");
ASSERT_EQ(optq.prog_ids[0], id2, "prog_ids[0]");
ASSERT_EQ(optq.prog_ids[1], id1, "prog_ids[1]");
ASSERT_EQ(optq.prog_ids[2], 0, "prog_ids[2]");
ASSERT_EQ(optq.prog_attach_flags[0], 0, "prog_flags[0]");
ASSERT_EQ(optq.prog_attach_flags[1], 0, "prog_flags[1]");
ASSERT_EQ(optq.prog_attach_flags[2], 0, "prog_flags[2]");
ASSERT_OK(system(ping_cmd), ping_cmd);
ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
ASSERT_EQ(skel->bss->seen_tc2, true, "seen_tc2");
ASSERT_EQ(skel->bss->seen_tc3, false, "seen_tc3");
skel->bss->seen_tc1 = false;
skel->bss->seen_tc2 = false;
skel->bss->seen_tc3 = false;
LIBBPF_OPTS_RESET(opta,
.flags = BPF_F_REPLACE,
.replace_prog_fd = fd2,
.expected_revision = 3,
);
err = bpf_prog_attach_opts(fd3, loopback, target, &opta);
if (!ASSERT_EQ(err, 0, "prog_attach"))
goto cleanup_target2;
detach_fd = fd3;
assert_mprog_count(target, 2);
memset(prog_ids, 0, sizeof(prog_ids));
optq.count = ARRAY_SIZE(prog_ids);
err = bpf_prog_query_opts(loopback, target, &optq);
if (!ASSERT_OK(err, "prog_query"))
goto cleanup_target2;
ASSERT_EQ(optq.count, 2, "count");
ASSERT_EQ(optq.revision, 4, "revision");
ASSERT_EQ(optq.prog_ids[0], id3, "prog_ids[0]");
ASSERT_EQ(optq.prog_ids[1], id1, "prog_ids[1]");
ASSERT_EQ(optq.prog_ids[2], 0, "prog_ids[2]");
ASSERT_OK(system(ping_cmd), ping_cmd);
ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
ASSERT_EQ(skel->bss->seen_tc2, false, "seen_tc2");
ASSERT_EQ(skel->bss->seen_tc3, true, "seen_tc3");
skel->bss->seen_tc1 = false;
skel->bss->seen_tc2 = false;
skel->bss->seen_tc3 = false;
LIBBPF_OPTS_RESET(opta,
.flags = BPF_F_REPLACE | BPF_F_BEFORE,
.replace_prog_fd = fd3,
.relative_fd = fd1,
.expected_revision = 4,
);
err = bpf_prog_attach_opts(fd2, loopback, target, &opta);
if (!ASSERT_EQ(err, 0, "prog_attach"))
goto cleanup_target2;
detach_fd = fd2;
assert_mprog_count(target, 2);
memset(prog_ids, 0, sizeof(prog_ids));
optq.count = ARRAY_SIZE(prog_ids);
err = bpf_prog_query_opts(loopback, target, &optq);
if (!ASSERT_OK(err, "prog_query"))
goto cleanup_target2;
ASSERT_EQ(optq.count, 2, "count");
ASSERT_EQ(optq.revision, 5, "revision");
ASSERT_EQ(optq.prog_ids[0], id2, "prog_ids[0]");
ASSERT_EQ(optq.prog_ids[1], id1, "prog_ids[1]");
ASSERT_EQ(optq.prog_ids[2], 0, "prog_ids[2]");
ASSERT_OK(system(ping_cmd), ping_cmd);
ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
ASSERT_EQ(skel->bss->seen_tc2, true, "seen_tc2");
ASSERT_EQ(skel->bss->seen_tc3, false, "seen_tc3");
LIBBPF_OPTS_RESET(opta,
.flags = BPF_F_REPLACE,
.replace_prog_fd = fd2,
);
err = bpf_prog_attach_opts(fd2, loopback, target, &opta);
ASSERT_EQ(err, -EEXIST, "prog_attach");
assert_mprog_count(target, 2);
LIBBPF_OPTS_RESET(opta,
.flags = BPF_F_REPLACE | BPF_F_AFTER,
.replace_prog_fd = fd2,
.relative_fd = fd1,
.expected_revision = 5,
);
err = bpf_prog_attach_opts(fd3, loopback, target, &opta);
ASSERT_EQ(err, -ERANGE, "prog_attach");
assert_mprog_count(target, 2);
LIBBPF_OPTS_RESET(opta,
.flags = BPF_F_BEFORE | BPF_F_AFTER | BPF_F_REPLACE,
.replace_prog_fd = fd2,
.relative_fd = fd1,
.expected_revision = 5,
);
err = bpf_prog_attach_opts(fd3, loopback, target, &opta);
ASSERT_EQ(err, -ERANGE, "prog_attach");
assert_mprog_count(target, 2);
LIBBPF_OPTS_RESET(optd,
.flags = BPF_F_BEFORE,
.relative_id = id1,
.expected_revision = 5,
);
cleanup_target2:
err = bpf_prog_detach_opts(detach_fd, loopback, target, &optd);
ASSERT_OK(err, "prog_detach");
assert_mprog_count(target, 1);
cleanup_target:
LIBBPF_OPTS_RESET(optd);
err = bpf_prog_detach_opts(fd1, loopback, target, &optd);
ASSERT_OK(err, "prog_detach");
assert_mprog_count(target, 0);
cleanup:
test_tc_link__destroy(skel);
}
void serial_test_tc_opts_replace(void)
{
test_tc_opts_replace_target(BPF_TCX_INGRESS);
test_tc_opts_replace_target(BPF_TCX_EGRESS);
}
static void test_tc_opts_invalid_target(int target)
{
LIBBPF_OPTS(bpf_prog_attach_opts, opta);
LIBBPF_OPTS(bpf_prog_detach_opts, optd);
__u32 fd1, fd2, id1, id2;
struct test_tc_link *skel;
int err;
skel = test_tc_link__open_and_load();
if (!ASSERT_OK_PTR(skel, "skel_load"))
goto cleanup;
fd1 = bpf_program__fd(skel->progs.tc1);
fd2 = bpf_program__fd(skel->progs.tc2);
id1 = id_from_prog_fd(fd1);
id2 = id_from_prog_fd(fd2);
ASSERT_NEQ(id1, id2, "prog_ids_1_2");
assert_mprog_count(target, 0);
LIBBPF_OPTS_RESET(opta,
.flags = BPF_F_BEFORE | BPF_F_AFTER,
);
err = bpf_prog_attach_opts(fd1, loopback, target, &opta);
ASSERT_EQ(err, -ERANGE, "prog_attach");
assert_mprog_count(target, 0);
LIBBPF_OPTS_RESET(opta,
.flags = BPF_F_BEFORE | BPF_F_ID,
);
err = bpf_prog_attach_opts(fd1, loopback, target, &opta);
ASSERT_EQ(err, -ENOENT, "prog_attach");
assert_mprog_count(target, 0);
LIBBPF_OPTS_RESET(opta,
.flags = BPF_F_AFTER | BPF_F_ID,
);
err = bpf_prog_attach_opts(fd1, loopback, target, &opta);
ASSERT_EQ(err, -ENOENT, "prog_attach");
assert_mprog_count(target, 0);
LIBBPF_OPTS_RESET(opta,
.relative_fd = fd2,
);
err = bpf_prog_attach_opts(fd1, loopback, target, &opta);
ASSERT_EQ(err, -EINVAL, "prog_attach");
assert_mprog_count(target, 0);
LIBBPF_OPTS_RESET(opta,
.flags = BPF_F_BEFORE | BPF_F_AFTER,
.relative_fd = fd2,
);
err = bpf_prog_attach_opts(fd1, loopback, target, &opta);
ASSERT_EQ(err, -ENOENT, "prog_attach");
assert_mprog_count(target, 0);
LIBBPF_OPTS_RESET(opta,
.flags = BPF_F_ID,
.relative_id = id2,
);
err = bpf_prog_attach_opts(fd1, loopback, target, &opta);
ASSERT_EQ(err, -EINVAL, "prog_attach");
assert_mprog_count(target, 0);
LIBBPF_OPTS_RESET(opta,
.flags = BPF_F_BEFORE,
.relative_fd = fd1,
);
err = bpf_prog_attach_opts(fd1, loopback, target, &opta);
ASSERT_EQ(err, -ENOENT, "prog_attach");
assert_mprog_count(target, 0);
LIBBPF_OPTS_RESET(opta,
.flags = BPF_F_AFTER,
.relative_fd = fd1,
);
err = bpf_prog_attach_opts(fd1, loopback, target, &opta);
ASSERT_EQ(err, -ENOENT, "prog_attach");
assert_mprog_count(target, 0);
LIBBPF_OPTS_RESET(opta);
err = bpf_prog_attach_opts(fd1, loopback, target, &opta);
if (!ASSERT_EQ(err, 0, "prog_attach"))
goto cleanup;
assert_mprog_count(target, 1);
LIBBPF_OPTS_RESET(opta);
err = bpf_prog_attach_opts(fd1, loopback, target, &opta);
ASSERT_EQ(err, -EEXIST, "prog_attach");
assert_mprog_count(target, 1);
LIBBPF_OPTS_RESET(opta,
.flags = BPF_F_BEFORE,
.relative_fd = fd1,
);
err = bpf_prog_attach_opts(fd1, loopback, target, &opta);
ASSERT_EQ(err, -EEXIST, "prog_attach");
assert_mprog_count(target, 1);
LIBBPF_OPTS_RESET(opta,
.flags = BPF_F_AFTER,
.relative_fd = fd1,
);
err = bpf_prog_attach_opts(fd1, loopback, target, &opta);
ASSERT_EQ(err, -EEXIST, "prog_attach");
assert_mprog_count(target, 1);
LIBBPF_OPTS_RESET(opta,
.flags = BPF_F_REPLACE,
.relative_fd = fd1,
);
err = bpf_prog_attach_opts(fd1, loopback, target, &opta);
ASSERT_EQ(err, -EINVAL, "prog_attach_x1");
assert_mprog_count(target, 1);
LIBBPF_OPTS_RESET(opta,
.flags = BPF_F_REPLACE,
.replace_prog_fd = fd1,
);
err = bpf_prog_attach_opts(fd1, loopback, target, &opta);
ASSERT_EQ(err, -EEXIST, "prog_attach");
assert_mprog_count(target, 1);
err = bpf_prog_detach_opts(fd1, loopback, target, &optd);
ASSERT_OK(err, "prog_detach");
assert_mprog_count(target, 0);
cleanup:
test_tc_link__destroy(skel);
}
void serial_test_tc_opts_invalid(void)
{
test_tc_opts_invalid_target(BPF_TCX_INGRESS);
test_tc_opts_invalid_target(BPF_TCX_EGRESS);
}
static void test_tc_opts_prepend_target(int target)
{
LIBBPF_OPTS(bpf_prog_attach_opts, opta);
LIBBPF_OPTS(bpf_prog_detach_opts, optd);
LIBBPF_OPTS(bpf_prog_query_opts, optq);
__u32 fd1, fd2, fd3, fd4, id1, id2, id3, id4;
struct test_tc_link *skel;
__u32 prog_ids[5];
int err;
skel = test_tc_link__open_and_load();
if (!ASSERT_OK_PTR(skel, "skel_load"))
goto cleanup;
fd1 = bpf_program__fd(skel->progs.tc1);
fd2 = bpf_program__fd(skel->progs.tc2);
fd3 = bpf_program__fd(skel->progs.tc3);
fd4 = bpf_program__fd(skel->progs.tc4);
id1 = id_from_prog_fd(fd1);
id2 = id_from_prog_fd(fd2);
id3 = id_from_prog_fd(fd3);
id4 = id_from_prog_fd(fd4);
ASSERT_NEQ(id1, id2, "prog_ids_1_2");
ASSERT_NEQ(id3, id4, "prog_ids_3_4");
ASSERT_NEQ(id2, id3, "prog_ids_2_3");
assert_mprog_count(target, 0);
err = bpf_prog_attach_opts(fd1, loopback, target, &opta);
if (!ASSERT_EQ(err, 0, "prog_attach"))
goto cleanup;
assert_mprog_count(target, 1);
LIBBPF_OPTS_RESET(opta,
.flags = BPF_F_BEFORE,
);
err = bpf_prog_attach_opts(fd2, loopback, target, &opta);
if (!ASSERT_EQ(err, 0, "prog_attach"))
goto cleanup_target;
assert_mprog_count(target, 2);
optq.prog_ids = prog_ids;
memset(prog_ids, 0, sizeof(prog_ids));
optq.count = ARRAY_SIZE(prog_ids);
err = bpf_prog_query_opts(loopback, target, &optq);
if (!ASSERT_OK(err, "prog_query"))
goto cleanup_target2;
ASSERT_EQ(optq.count, 2, "count");
ASSERT_EQ(optq.revision, 3, "revision");
ASSERT_EQ(optq.prog_ids[0], id2, "prog_ids[0]");
ASSERT_EQ(optq.prog_ids[1], id1, "prog_ids[1]");
ASSERT_EQ(optq.prog_ids[2], 0, "prog_ids[2]");
ASSERT_OK(system(ping_cmd), ping_cmd);
ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
ASSERT_EQ(skel->bss->seen_tc2, true, "seen_tc2");
ASSERT_EQ(skel->bss->seen_tc3, false, "seen_tc3");
ASSERT_EQ(skel->bss->seen_tc4, false, "seen_tc4");
LIBBPF_OPTS_RESET(opta,
.flags = BPF_F_BEFORE,
);
err = bpf_prog_attach_opts(fd3, loopback, target, &opta);
if (!ASSERT_EQ(err, 0, "prog_attach"))
goto cleanup_target2;
LIBBPF_OPTS_RESET(opta,
.flags = BPF_F_BEFORE,
);
err = bpf_prog_attach_opts(fd4, loopback, target, &opta);
if (!ASSERT_EQ(err, 0, "prog_attach"))
goto cleanup_target3;
assert_mprog_count(target, 4);
memset(prog_ids, 0, sizeof(prog_ids));
optq.count = ARRAY_SIZE(prog_ids);
err = bpf_prog_query_opts(loopback, target, &optq);
if (!ASSERT_OK(err, "prog_query"))
goto cleanup_target4;
ASSERT_EQ(optq.count, 4, "count");
ASSERT_EQ(optq.revision, 5, "revision");
ASSERT_EQ(optq.prog_ids[0], id4, "prog_ids[0]");
ASSERT_EQ(optq.prog_ids[1], id3, "prog_ids[1]");
ASSERT_EQ(optq.prog_ids[2], id2, "prog_ids[2]");
ASSERT_EQ(optq.prog_ids[3], id1, "prog_ids[3]");
ASSERT_EQ(optq.prog_ids[4], 0, "prog_ids[4]");
ASSERT_OK(system(ping_cmd), ping_cmd);
ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
ASSERT_EQ(skel->bss->seen_tc2, true, "seen_tc2");
ASSERT_EQ(skel->bss->seen_tc3, true, "seen_tc3");
ASSERT_EQ(skel->bss->seen_tc4, true, "seen_tc4");
cleanup_target4:
err = bpf_prog_detach_opts(fd4, loopback, target, &optd);
ASSERT_OK(err, "prog_detach");
assert_mprog_count(target, 3);
cleanup_target3:
err = bpf_prog_detach_opts(fd3, loopback, target, &optd);
ASSERT_OK(err, "prog_detach");
assert_mprog_count(target, 2);
cleanup_target2:
err = bpf_prog_detach_opts(fd2, loopback, target, &optd);
ASSERT_OK(err, "prog_detach");
assert_mprog_count(target, 1);
cleanup_target:
err = bpf_prog_detach_opts(fd1, loopback, target, &optd);
ASSERT_OK(err, "prog_detach");
assert_mprog_count(target, 0);
cleanup:
test_tc_link__destroy(skel);
}
void serial_test_tc_opts_prepend(void)
{
test_tc_opts_prepend_target(BPF_TCX_INGRESS);
test_tc_opts_prepend_target(BPF_TCX_EGRESS);
}
static void test_tc_opts_append_target(int target)
{
LIBBPF_OPTS(bpf_prog_attach_opts, opta);
LIBBPF_OPTS(bpf_prog_detach_opts, optd);
LIBBPF_OPTS(bpf_prog_query_opts, optq);
__u32 fd1, fd2, fd3, fd4, id1, id2, id3, id4;
struct test_tc_link *skel;
__u32 prog_ids[5];
int err;
skel = test_tc_link__open_and_load();
if (!ASSERT_OK_PTR(skel, "skel_load"))
goto cleanup;
fd1 = bpf_program__fd(skel->progs.tc1);
fd2 = bpf_program__fd(skel->progs.tc2);
fd3 = bpf_program__fd(skel->progs.tc3);
fd4 = bpf_program__fd(skel->progs.tc4);
id1 = id_from_prog_fd(fd1);
id2 = id_from_prog_fd(fd2);
id3 = id_from_prog_fd(fd3);
id4 = id_from_prog_fd(fd4);
ASSERT_NEQ(id1, id2, "prog_ids_1_2");
ASSERT_NEQ(id3, id4, "prog_ids_3_4");
ASSERT_NEQ(id2, id3, "prog_ids_2_3");
assert_mprog_count(target, 0);
err = bpf_prog_attach_opts(fd1, loopback, target, &opta);
if (!ASSERT_EQ(err, 0, "prog_attach"))
goto cleanup;
assert_mprog_count(target, 1);
LIBBPF_OPTS_RESET(opta,
.flags = BPF_F_AFTER,
);
err = bpf_prog_attach_opts(fd2, loopback, target, &opta);
if (!ASSERT_EQ(err, 0, "prog_attach"))
goto cleanup_target;
assert_mprog_count(target, 2);
optq.prog_ids = prog_ids;
memset(prog_ids, 0, sizeof(prog_ids));
optq.count = ARRAY_SIZE(prog_ids);
err = bpf_prog_query_opts(loopback, target, &optq);
if (!ASSERT_OK(err, "prog_query"))
goto cleanup_target2;
ASSERT_EQ(optq.count, 2, "count");
ASSERT_EQ(optq.revision, 3, "revision");
ASSERT_EQ(optq.prog_ids[0], id1, "prog_ids[0]");
ASSERT_EQ(optq.prog_ids[1], id2, "prog_ids[1]");
ASSERT_EQ(optq.prog_ids[2], 0, "prog_ids[2]");
ASSERT_OK(system(ping_cmd), ping_cmd);
ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
ASSERT_EQ(skel->bss->seen_tc2, true, "seen_tc2");
ASSERT_EQ(skel->bss->seen_tc3, false, "seen_tc3");
ASSERT_EQ(skel->bss->seen_tc4, false, "seen_tc4");
LIBBPF_OPTS_RESET(opta,
.flags = BPF_F_AFTER,
);
err = bpf_prog_attach_opts(fd3, loopback, target, &opta);
if (!ASSERT_EQ(err, 0, "prog_attach"))
goto cleanup_target2;
LIBBPF_OPTS_RESET(opta,
.flags = BPF_F_AFTER,
);
err = bpf_prog_attach_opts(fd4, loopback, target, &opta);
if (!ASSERT_EQ(err, 0, "prog_attach"))
goto cleanup_target3;
assert_mprog_count(target, 4);
memset(prog_ids, 0, sizeof(prog_ids));
optq.count = ARRAY_SIZE(prog_ids);
err = bpf_prog_query_opts(loopback, target, &optq);
if (!ASSERT_OK(err, "prog_query"))
goto cleanup_target4;
ASSERT_EQ(optq.count, 4, "count");
ASSERT_EQ(optq.revision, 5, "revision");
ASSERT_EQ(optq.prog_ids[0], id1, "prog_ids[0]");
ASSERT_EQ(optq.prog_ids[1], id2, "prog_ids[1]");
ASSERT_EQ(optq.prog_ids[2], id3, "prog_ids[2]");
ASSERT_EQ(optq.prog_ids[3], id4, "prog_ids[3]");
ASSERT_EQ(optq.prog_ids[4], 0, "prog_ids[4]");
ASSERT_OK(system(ping_cmd), ping_cmd);
ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
ASSERT_EQ(skel->bss->seen_tc2, true, "seen_tc2");
ASSERT_EQ(skel->bss->seen_tc3, true, "seen_tc3");
ASSERT_EQ(skel->bss->seen_tc4, true, "seen_tc4");
cleanup_target4:
err = bpf_prog_detach_opts(fd4, loopback, target, &optd);
ASSERT_OK(err, "prog_detach");
assert_mprog_count(target, 3);
cleanup_target3:
err = bpf_prog_detach_opts(fd3, loopback, target, &optd);
ASSERT_OK(err, "prog_detach");
assert_mprog_count(target, 2);
cleanup_target2:
err = bpf_prog_detach_opts(fd2, loopback, target, &optd);
ASSERT_OK(err, "prog_detach");
assert_mprog_count(target, 1);
cleanup_target:
err = bpf_prog_detach_opts(fd1, loopback, target, &optd);
ASSERT_OK(err, "prog_detach");
assert_mprog_count(target, 0);
cleanup:
test_tc_link__destroy(skel);
}
void serial_test_tc_opts_append(void)
{
test_tc_opts_append_target(BPF_TCX_INGRESS);
test_tc_opts_append_target(BPF_TCX_EGRESS);
}
static void test_tc_opts_dev_cleanup_target(int target)
{
LIBBPF_OPTS(bpf_prog_attach_opts, opta);
LIBBPF_OPTS(bpf_prog_detach_opts, optd);
LIBBPF_OPTS(bpf_prog_query_opts, optq);
__u32 fd1, fd2, fd3, fd4, id1, id2, id3, id4;
struct test_tc_link *skel;
int err, ifindex;
ASSERT_OK(system("ip link add dev tcx_opts1 type veth peer name tcx_opts2"), "add veth");
ifindex = if_nametoindex("tcx_opts1");
ASSERT_NEQ(ifindex, 0, "non_zero_ifindex");
skel = test_tc_link__open_and_load();
if (!ASSERT_OK_PTR(skel, "skel_load"))
goto cleanup;
fd1 = bpf_program__fd(skel->progs.tc1);
fd2 = bpf_program__fd(skel->progs.tc2);
fd3 = bpf_program__fd(skel->progs.tc3);
fd4 = bpf_program__fd(skel->progs.tc4);
id1 = id_from_prog_fd(fd1);
id2 = id_from_prog_fd(fd2);
id3 = id_from_prog_fd(fd3);
id4 = id_from_prog_fd(fd4);
ASSERT_NEQ(id1, id2, "prog_ids_1_2");
ASSERT_NEQ(id3, id4, "prog_ids_3_4");
ASSERT_NEQ(id2, id3, "prog_ids_2_3");
assert_mprog_count_ifindex(ifindex, target, 0);
err = bpf_prog_attach_opts(fd1, ifindex, target, &opta);
if (!ASSERT_EQ(err, 0, "prog_attach"))
goto cleanup;
assert_mprog_count_ifindex(ifindex, target, 1);
err = bpf_prog_attach_opts(fd2, ifindex, target, &opta);
if (!ASSERT_EQ(err, 0, "prog_attach"))
goto cleanup1;
assert_mprog_count_ifindex(ifindex, target, 2);
err = bpf_prog_attach_opts(fd3, ifindex, target, &opta);
if (!ASSERT_EQ(err, 0, "prog_attach"))
goto cleanup2;
assert_mprog_count_ifindex(ifindex, target, 3);
err = bpf_prog_attach_opts(fd4, ifindex, target, &opta);
if (!ASSERT_EQ(err, 0, "prog_attach"))
goto cleanup3;
assert_mprog_count_ifindex(ifindex, target, 4);
ASSERT_OK(system("ip link del dev tcx_opts1"), "del veth");
ASSERT_EQ(if_nametoindex("tcx_opts1"), 0, "dev1_removed");
ASSERT_EQ(if_nametoindex("tcx_opts2"), 0, "dev2_removed");
return;
cleanup3:
err = bpf_prog_detach_opts(fd3, loopback, target, &optd);
ASSERT_OK(err, "prog_detach");
assert_mprog_count_ifindex(ifindex, target, 2);
cleanup2:
err = bpf_prog_detach_opts(fd2, loopback, target, &optd);
ASSERT_OK(err, "prog_detach");
assert_mprog_count_ifindex(ifindex, target, 1);
cleanup1:
err = bpf_prog_detach_opts(fd1, loopback, target, &optd);
ASSERT_OK(err, "prog_detach");
assert_mprog_count_ifindex(ifindex, target, 0);
cleanup:
test_tc_link__destroy(skel);
ASSERT_OK(system("ip link del dev tcx_opts1"), "del veth");
ASSERT_EQ(if_nametoindex("tcx_opts1"), 0, "dev1_removed");
ASSERT_EQ(if_nametoindex("tcx_opts2"), 0, "dev2_removed");
}
void serial_test_tc_opts_dev_cleanup(void)
{
test_tc_opts_dev_cleanup_target(BPF_TCX_INGRESS);
test_tc_opts_dev_cleanup_target(BPF_TCX_EGRESS);
}
static void test_tc_opts_mixed_target(int target)
{
LIBBPF_OPTS(bpf_prog_attach_opts, opta);
LIBBPF_OPTS(bpf_prog_detach_opts, optd);
LIBBPF_OPTS(bpf_prog_query_opts, optq);
LIBBPF_OPTS(bpf_tcx_opts, optl);
__u32 pid1, pid2, pid3, pid4, lid2, lid4;
__u32 prog_flags[4], link_flags[4];
__u32 prog_ids[4], link_ids[4];
struct test_tc_link *skel;
struct bpf_link *link;
int err, detach_fd;
skel = test_tc_link__open();
if (!ASSERT_OK_PTR(skel, "skel_open"))
goto cleanup;
ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc1, target),
0, "tc1_attach_type");
ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc2, target),
0, "tc2_attach_type");
ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc3, target),
0, "tc3_attach_type");
ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc4, target),
0, "tc4_attach_type");
err = test_tc_link__load(skel);
if (!ASSERT_OK(err, "skel_load"))
goto cleanup;
pid1 = id_from_prog_fd(bpf_program__fd(skel->progs.tc1));
pid2 = id_from_prog_fd(bpf_program__fd(skel->progs.tc2));
pid3 = id_from_prog_fd(bpf_program__fd(skel->progs.tc3));
pid4 = id_from_prog_fd(bpf_program__fd(skel->progs.tc4));
ASSERT_NEQ(pid1, pid2, "prog_ids_1_2");
ASSERT_NEQ(pid3, pid4, "prog_ids_3_4");
ASSERT_NEQ(pid2, pid3, "prog_ids_2_3");
assert_mprog_count(target, 0);
err = bpf_prog_attach_opts(bpf_program__fd(skel->progs.tc1),
loopback, target, &opta);
if (!ASSERT_EQ(err, 0, "prog_attach"))
goto cleanup;
detach_fd = bpf_program__fd(skel->progs.tc1);
assert_mprog_count(target, 1);
link = bpf_program__attach_tcx(skel->progs.tc2, loopback, &optl);
if (!ASSERT_OK_PTR(link, "link_attach"))
goto cleanup1;
skel->links.tc2 = link;
lid2 = id_from_link_fd(bpf_link__fd(skel->links.tc2));
assert_mprog_count(target, 2);
LIBBPF_OPTS_RESET(opta,
.flags = BPF_F_REPLACE,
.replace_prog_fd = bpf_program__fd(skel->progs.tc1),
);
err = bpf_prog_attach_opts(bpf_program__fd(skel->progs.tc2),
loopback, target, &opta);
ASSERT_EQ(err, -EEXIST, "prog_attach");
assert_mprog_count(target, 2);
LIBBPF_OPTS_RESET(opta,
.flags = BPF_F_REPLACE,
.replace_prog_fd = bpf_program__fd(skel->progs.tc2),
);
err = bpf_prog_attach_opts(bpf_program__fd(skel->progs.tc1),
loopback, target, &opta);
ASSERT_EQ(err, -EEXIST, "prog_attach");
assert_mprog_count(target, 2);
LIBBPF_OPTS_RESET(opta,
.flags = BPF_F_REPLACE,
.replace_prog_fd = bpf_program__fd(skel->progs.tc2),
);
err = bpf_prog_attach_opts(bpf_program__fd(skel->progs.tc3),
loopback, target, &opta);
ASSERT_EQ(err, -EBUSY, "prog_attach");
assert_mprog_count(target, 2);
LIBBPF_OPTS_RESET(opta,
.flags = BPF_F_REPLACE,
.replace_prog_fd = bpf_program__fd(skel->progs.tc1),
);
err = bpf_prog_attach_opts(bpf_program__fd(skel->progs.tc3),
loopback, target, &opta);
if (!ASSERT_EQ(err, 0, "prog_attach"))
goto cleanup1;
detach_fd = bpf_program__fd(skel->progs.tc3);
assert_mprog_count(target, 2);
link = bpf_program__attach_tcx(skel->progs.tc4, loopback, &optl);
if (!ASSERT_OK_PTR(link, "link_attach"))
goto cleanup1;
skel->links.tc4 = link;
lid4 = id_from_link_fd(bpf_link__fd(skel->links.tc4));
assert_mprog_count(target, 3);
LIBBPF_OPTS_RESET(opta,
.flags = BPF_F_REPLACE,
.replace_prog_fd = bpf_program__fd(skel->progs.tc4),
);
err = bpf_prog_attach_opts(bpf_program__fd(skel->progs.tc2),
loopback, target, &opta);
ASSERT_EQ(err, -EEXIST, "prog_attach");
optq.prog_ids = prog_ids;
optq.prog_attach_flags = prog_flags;
optq.link_ids = link_ids;
optq.link_attach_flags = link_flags;
memset(prog_ids, 0, sizeof(prog_ids));
memset(prog_flags, 0, sizeof(prog_flags));
memset(link_ids, 0, sizeof(link_ids));
memset(link_flags, 0, sizeof(link_flags));
optq.count = ARRAY_SIZE(prog_ids);
err = bpf_prog_query_opts(loopback, target, &optq);
if (!ASSERT_OK(err, "prog_query"))
goto cleanup1;
ASSERT_EQ(optq.count, 3, "count");
ASSERT_EQ(optq.revision, 5, "revision");
ASSERT_EQ(optq.prog_ids[0], pid3, "prog_ids[0]");
ASSERT_EQ(optq.prog_attach_flags[0], 0, "prog_flags[0]");
ASSERT_EQ(optq.link_ids[0], 0, "link_ids[0]");
ASSERT_EQ(optq.link_attach_flags[0], 0, "link_flags[0]");
ASSERT_EQ(optq.prog_ids[1], pid2, "prog_ids[1]");
ASSERT_EQ(optq.prog_attach_flags[1], 0, "prog_flags[1]");
ASSERT_EQ(optq.link_ids[1], lid2, "link_ids[1]");
ASSERT_EQ(optq.link_attach_flags[1], 0, "link_flags[1]");
ASSERT_EQ(optq.prog_ids[2], pid4, "prog_ids[2]");
ASSERT_EQ(optq.prog_attach_flags[2], 0, "prog_flags[2]");
ASSERT_EQ(optq.link_ids[2], lid4, "link_ids[2]");
ASSERT_EQ(optq.link_attach_flags[2], 0, "link_flags[2]");
ASSERT_EQ(optq.prog_ids[3], 0, "prog_ids[3]");
ASSERT_EQ(optq.prog_attach_flags[3], 0, "prog_flags[3]");
ASSERT_EQ(optq.link_ids[3], 0, "link_ids[3]");
ASSERT_EQ(optq.link_attach_flags[3], 0, "link_flags[3]");
ASSERT_OK(system(ping_cmd), ping_cmd);
cleanup1:
err = bpf_prog_detach_opts(detach_fd, loopback, target, &optd);
ASSERT_OK(err, "prog_detach");
assert_mprog_count(target, 2);
cleanup:
test_tc_link__destroy(skel);
assert_mprog_count(target, 0);
}
void serial_test_tc_opts_mixed(void)
{
test_tc_opts_mixed_target(BPF_TCX_INGRESS);
test_tc_opts_mixed_target(BPF_TCX_EGRESS);
}
static void test_tc_opts_demixed_target(int target)
{
LIBBPF_OPTS(bpf_prog_attach_opts, opta);
LIBBPF_OPTS(bpf_prog_detach_opts, optd);
LIBBPF_OPTS(bpf_tcx_opts, optl);
struct test_tc_link *skel;
struct bpf_link *link;
__u32 pid1, pid2;
int err;
skel = test_tc_link__open();
if (!ASSERT_OK_PTR(skel, "skel_open"))
goto cleanup;
ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc1, target),
0, "tc1_attach_type");
ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc2, target),
0, "tc2_attach_type");
err = test_tc_link__load(skel);
if (!ASSERT_OK(err, "skel_load"))
goto cleanup;
pid1 = id_from_prog_fd(bpf_program__fd(skel->progs.tc1));
pid2 = id_from_prog_fd(bpf_program__fd(skel->progs.tc2));
ASSERT_NEQ(pid1, pid2, "prog_ids_1_2");
assert_mprog_count(target, 0);
err = bpf_prog_attach_opts(bpf_program__fd(skel->progs.tc1),
loopback, target, &opta);
if (!ASSERT_EQ(err, 0, "prog_attach"))
goto cleanup;
assert_mprog_count(target, 1);
link = bpf_program__attach_tcx(skel->progs.tc2, loopback, &optl);
if (!ASSERT_OK_PTR(link, "link_attach"))
goto cleanup1;
skel->links.tc2 = link;
assert_mprog_count(target, 2);
LIBBPF_OPTS_RESET(optd,
.flags = BPF_F_AFTER,
);
err = bpf_prog_detach_opts(0, loopback, target, &optd);
ASSERT_EQ(err, -EBUSY, "prog_detach");
assert_mprog_count(target, 2);
LIBBPF_OPTS_RESET(optd,
.flags = BPF_F_BEFORE,
);
err = bpf_prog_detach_opts(0, loopback, target, &optd);
ASSERT_OK(err, "prog_detach");
assert_mprog_count(target, 1);
goto cleanup;
cleanup1:
err = bpf_prog_detach_opts(bpf_program__fd(skel->progs.tc1),
loopback, target, &optd);
ASSERT_OK(err, "prog_detach");
assert_mprog_count(target, 2);
cleanup:
test_tc_link__destroy(skel);
assert_mprog_count(target, 0);
}
void serial_test_tc_opts_demixed(void)
{
test_tc_opts_demixed_target(BPF_TCX_INGRESS);
test_tc_opts_demixed_target(BPF_TCX_EGRESS);
}
static void test_tc_opts_detach_target(int target)
{
LIBBPF_OPTS(bpf_prog_attach_opts, opta);
LIBBPF_OPTS(bpf_prog_detach_opts, optd);
LIBBPF_OPTS(bpf_prog_query_opts, optq);
__u32 fd1, fd2, fd3, fd4, id1, id2, id3, id4;
struct test_tc_link *skel;
__u32 prog_ids[5];
int err;
skel = test_tc_link__open_and_load();
if (!ASSERT_OK_PTR(skel, "skel_load"))
goto cleanup;
fd1 = bpf_program__fd(skel->progs.tc1);
fd2 = bpf_program__fd(skel->progs.tc2);
fd3 = bpf_program__fd(skel->progs.tc3);
fd4 = bpf_program__fd(skel->progs.tc4);
id1 = id_from_prog_fd(fd1);
id2 = id_from_prog_fd(fd2);
id3 = id_from_prog_fd(fd3);
id4 = id_from_prog_fd(fd4);
ASSERT_NEQ(id1, id2, "prog_ids_1_2");
ASSERT_NEQ(id3, id4, "prog_ids_3_4");
ASSERT_NEQ(id2, id3, "prog_ids_2_3");
assert_mprog_count(target, 0);
err = bpf_prog_attach_opts(fd1, loopback, target, &opta);
if (!ASSERT_EQ(err, 0, "prog_attach"))
goto cleanup;
assert_mprog_count(target, 1);
err = bpf_prog_attach_opts(fd2, loopback, target, &opta);
if (!ASSERT_EQ(err, 0, "prog_attach"))
goto cleanup1;
assert_mprog_count(target, 2);
err = bpf_prog_attach_opts(fd3, loopback, target, &opta);
if (!ASSERT_EQ(err, 0, "prog_attach"))
goto cleanup2;
assert_mprog_count(target, 3);
err = bpf_prog_attach_opts(fd4, loopback, target, &opta);
if (!ASSERT_EQ(err, 0, "prog_attach"))
goto cleanup3;
assert_mprog_count(target, 4);
optq.prog_ids = prog_ids;
memset(prog_ids, 0, sizeof(prog_ids));
optq.count = ARRAY_SIZE(prog_ids);
err = bpf_prog_query_opts(loopback, target, &optq);
if (!ASSERT_OK(err, "prog_query"))
goto cleanup4;
ASSERT_EQ(optq.count, 4, "count");
ASSERT_EQ(optq.revision, 5, "revision");
ASSERT_EQ(optq.prog_ids[0], id1, "prog_ids[0]");
ASSERT_EQ(optq.prog_ids[1], id2, "prog_ids[1]");
ASSERT_EQ(optq.prog_ids[2], id3, "prog_ids[2]");
ASSERT_EQ(optq.prog_ids[3], id4, "prog_ids[3]");
ASSERT_EQ(optq.prog_ids[4], 0, "prog_ids[4]");
LIBBPF_OPTS_RESET(optd,
.flags = BPF_F_BEFORE,
);
err = bpf_prog_detach_opts(0, loopback, target, &optd);
ASSERT_OK(err, "prog_detach");
assert_mprog_count(target, 3);
memset(prog_ids, 0, sizeof(prog_ids));
optq.count = ARRAY_SIZE(prog_ids);
err = bpf_prog_query_opts(loopback, target, &optq);
if (!ASSERT_OK(err, "prog_query"))
goto cleanup4;
ASSERT_EQ(optq.count, 3, "count");
ASSERT_EQ(optq.revision, 6, "revision");
ASSERT_EQ(optq.prog_ids[0], id2, "prog_ids[0]");
ASSERT_EQ(optq.prog_ids[1], id3, "prog_ids[1]");
ASSERT_EQ(optq.prog_ids[2], id4, "prog_ids[2]");
ASSERT_EQ(optq.prog_ids[3], 0, "prog_ids[3]");
LIBBPF_OPTS_RESET(optd,
.flags = BPF_F_AFTER,
);
err = bpf_prog_detach_opts(0, loopback, target, &optd);
ASSERT_OK(err, "prog_detach");
assert_mprog_count(target, 2);
memset(prog_ids, 0, sizeof(prog_ids));
optq.count = ARRAY_SIZE(prog_ids);
err = bpf_prog_query_opts(loopback, target, &optq);
if (!ASSERT_OK(err, "prog_query"))
goto cleanup4;
ASSERT_EQ(optq.count, 2, "count");
ASSERT_EQ(optq.revision, 7, "revision");
ASSERT_EQ(optq.prog_ids[0], id2, "prog_ids[0]");
ASSERT_EQ(optq.prog_ids[1], id3, "prog_ids[1]");
ASSERT_EQ(optq.prog_ids[2], 0, "prog_ids[2]");
LIBBPF_OPTS_RESET(optd);
err = bpf_prog_detach_opts(fd3, loopback, target, &optd);
ASSERT_OK(err, "prog_detach");
assert_mprog_count(target, 1);
err = bpf_prog_detach_opts(fd2, loopback, target, &optd);
ASSERT_OK(err, "prog_detach");
assert_mprog_count(target, 0);
LIBBPF_OPTS_RESET(optd,
.flags = BPF_F_BEFORE,
);
err = bpf_prog_detach_opts(0, loopback, target, &optd);
ASSERT_EQ(err, -ENOENT, "prog_detach");
LIBBPF_OPTS_RESET(optd,
.flags = BPF_F_AFTER,
);
err = bpf_prog_detach_opts(0, loopback, target, &optd);
ASSERT_EQ(err, -ENOENT, "prog_detach");
goto cleanup;
cleanup4:
err = bpf_prog_detach_opts(fd4, loopback, target, &optd);
ASSERT_OK(err, "prog_detach");
assert_mprog_count(target, 3);
cleanup3:
err = bpf_prog_detach_opts(fd3, loopback, target, &optd);
ASSERT_OK(err, "prog_detach");
assert_mprog_count(target, 2);
cleanup2:
err = bpf_prog_detach_opts(fd2, loopback, target, &optd);
ASSERT_OK(err, "prog_detach");
assert_mprog_count(target, 1);
cleanup1:
err = bpf_prog_detach_opts(fd1, loopback, target, &optd);
ASSERT_OK(err, "prog_detach");
assert_mprog_count(target, 0);
cleanup:
test_tc_link__destroy(skel);
}
void serial_test_tc_opts_detach(void)
{
test_tc_opts_detach_target(BPF_TCX_INGRESS);
test_tc_opts_detach_target(BPF_TCX_EGRESS);
}
static void test_tc_opts_detach_before_target(int target)
{
LIBBPF_OPTS(bpf_prog_attach_opts, opta);
LIBBPF_OPTS(bpf_prog_detach_opts, optd);
LIBBPF_OPTS(bpf_prog_query_opts, optq);
__u32 fd1, fd2, fd3, fd4, id1, id2, id3, id4;
struct test_tc_link *skel;
__u32 prog_ids[5];
int err;
skel = test_tc_link__open_and_load();
if (!ASSERT_OK_PTR(skel, "skel_load"))
goto cleanup;
fd1 = bpf_program__fd(skel->progs.tc1);
fd2 = bpf_program__fd(skel->progs.tc2);
fd3 = bpf_program__fd(skel->progs.tc3);
fd4 = bpf_program__fd(skel->progs.tc4);
id1 = id_from_prog_fd(fd1);
id2 = id_from_prog_fd(fd2);
id3 = id_from_prog_fd(fd3);
id4 = id_from_prog_fd(fd4);
ASSERT_NEQ(id1, id2, "prog_ids_1_2");
ASSERT_NEQ(id3, id4, "prog_ids_3_4");
ASSERT_NEQ(id2, id3, "prog_ids_2_3");
assert_mprog_count(target, 0);
err = bpf_prog_attach_opts(fd1, loopback, target, &opta);
if (!ASSERT_EQ(err, 0, "prog_attach"))
goto cleanup;
assert_mprog_count(target, 1);
err = bpf_prog_attach_opts(fd2, loopback, target, &opta);
if (!ASSERT_EQ(err, 0, "prog_attach"))
goto cleanup1;
assert_mprog_count(target, 2);
err = bpf_prog_attach_opts(fd3, loopback, target, &opta);
if (!ASSERT_EQ(err, 0, "prog_attach"))
goto cleanup2;
assert_mprog_count(target, 3);
err = bpf_prog_attach_opts(fd4, loopback, target, &opta);
if (!ASSERT_EQ(err, 0, "prog_attach"))
goto cleanup3;
assert_mprog_count(target, 4);
optq.prog_ids = prog_ids;
memset(prog_ids, 0, sizeof(prog_ids));
optq.count = ARRAY_SIZE(prog_ids);
err = bpf_prog_query_opts(loopback, target, &optq);
if (!ASSERT_OK(err, "prog_query"))
goto cleanup4;
ASSERT_EQ(optq.count, 4, "count");
ASSERT_EQ(optq.revision, 5, "revision");
ASSERT_EQ(optq.prog_ids[0], id1, "prog_ids[0]");
ASSERT_EQ(optq.prog_ids[1], id2, "prog_ids[1]");
ASSERT_EQ(optq.prog_ids[2], id3, "prog_ids[2]");
ASSERT_EQ(optq.prog_ids[3], id4, "prog_ids[3]");
ASSERT_EQ(optq.prog_ids[4], 0, "prog_ids[4]");
LIBBPF_OPTS_RESET(optd,
.flags = BPF_F_BEFORE,
.relative_fd = fd2,
);
err = bpf_prog_detach_opts(fd1, loopback, target, &optd);
ASSERT_OK(err, "prog_detach");
assert_mprog_count(target, 3);
memset(prog_ids, 0, sizeof(prog_ids));
optq.count = ARRAY_SIZE(prog_ids);
err = bpf_prog_query_opts(loopback, target, &optq);
if (!ASSERT_OK(err, "prog_query"))
goto cleanup4;
ASSERT_EQ(optq.count, 3, "count");
ASSERT_EQ(optq.revision, 6, "revision");
ASSERT_EQ(optq.prog_ids[0], id2, "prog_ids[0]");
ASSERT_EQ(optq.prog_ids[1], id3, "prog_ids[1]");
ASSERT_EQ(optq.prog_ids[2], id4, "prog_ids[2]");
ASSERT_EQ(optq.prog_ids[3], 0, "prog_ids[3]");
LIBBPF_OPTS_RESET(optd,
.flags = BPF_F_BEFORE,
.relative_fd = fd2,
);
err = bpf_prog_detach_opts(fd1, loopback, target, &optd);
ASSERT_EQ(err, -ENOENT, "prog_detach");
assert_mprog_count(target, 3);
LIBBPF_OPTS_RESET(optd,
.flags = BPF_F_BEFORE,
.relative_fd = fd4,
);
err = bpf_prog_detach_opts(fd2, loopback, target, &optd);
ASSERT_EQ(err, -ERANGE, "prog_detach");
assert_mprog_count(target, 3);
LIBBPF_OPTS_RESET(optd,
.flags = BPF_F_BEFORE,
.relative_fd = fd1,
);
err = bpf_prog_detach_opts(fd2, loopback, target, &optd);
ASSERT_EQ(err, -ENOENT, "prog_detach");
assert_mprog_count(target, 3);
LIBBPF_OPTS_RESET(optd,
.flags = BPF_F_BEFORE,
.relative_fd = fd3,
);
err = bpf_prog_detach_opts(fd2, loopback, target, &optd);
ASSERT_OK(err, "prog_detach");
assert_mprog_count(target, 2);
memset(prog_ids, 0, sizeof(prog_ids));
optq.count = ARRAY_SIZE(prog_ids);
err = bpf_prog_query_opts(loopback, target, &optq);
if (!ASSERT_OK(err, "prog_query"))
goto cleanup4;
ASSERT_EQ(optq.count, 2, "count");
ASSERT_EQ(optq.revision, 7, "revision");
ASSERT_EQ(optq.prog_ids[0], id3, "prog_ids[0]");
ASSERT_EQ(optq.prog_ids[1], id4, "prog_ids[1]");
ASSERT_EQ(optq.prog_ids[2], 0, "prog_ids[2]");
LIBBPF_OPTS_RESET(optd,
.flags = BPF_F_BEFORE,
.relative_fd = fd4,
);
err = bpf_prog_detach_opts(0, loopback, target, &optd);
ASSERT_OK(err, "prog_detach");
assert_mprog_count(target, 1);
memset(prog_ids, 0, sizeof(prog_ids));
optq.count = ARRAY_SIZE(prog_ids);
err = bpf_prog_query_opts(loopback, target, &optq);
if (!ASSERT_OK(err, "prog_query"))
goto cleanup4;
ASSERT_EQ(optq.count, 1, "count");
ASSERT_EQ(optq.revision, 8, "revision");
ASSERT_EQ(optq.prog_ids[0], id4, "prog_ids[0]");
ASSERT_EQ(optq.prog_ids[1], 0, "prog_ids[1]");
LIBBPF_OPTS_RESET(optd,
.flags = BPF_F_BEFORE,
);
err = bpf_prog_detach_opts(0, loopback, target, &optd);
ASSERT_OK(err, "prog_detach");
assert_mprog_count(target, 0);
goto cleanup;
cleanup4:
err = bpf_prog_detach_opts(fd4, loopback, target, &optd);
ASSERT_OK(err, "prog_detach");
assert_mprog_count(target, 3);
cleanup3:
err = bpf_prog_detach_opts(fd3, loopback, target, &optd);
ASSERT_OK(err, "prog_detach");
assert_mprog_count(target, 2);
cleanup2:
err = bpf_prog_detach_opts(fd2, loopback, target, &optd);
ASSERT_OK(err, "prog_detach");
assert_mprog_count(target, 1);
cleanup1:
err = bpf_prog_detach_opts(fd1, loopback, target, &optd);
ASSERT_OK(err, "prog_detach");
assert_mprog_count(target, 0);
cleanup:
test_tc_link__destroy(skel);
}
void serial_test_tc_opts_detach_before(void)
{
test_tc_opts_detach_before_target(BPF_TCX_INGRESS);
test_tc_opts_detach_before_target(BPF_TCX_EGRESS);
}
static void test_tc_opts_detach_after_target(int target)
{
LIBBPF_OPTS(bpf_prog_attach_opts, opta);
LIBBPF_OPTS(bpf_prog_detach_opts, optd);
LIBBPF_OPTS(bpf_prog_query_opts, optq);
__u32 fd1, fd2, fd3, fd4, id1, id2, id3, id4;
struct test_tc_link *skel;
__u32 prog_ids[5];
int err;
skel = test_tc_link__open_and_load();
if (!ASSERT_OK_PTR(skel, "skel_load"))
goto cleanup;
fd1 = bpf_program__fd(skel->progs.tc1);
fd2 = bpf_program__fd(skel->progs.tc2);
fd3 = bpf_program__fd(skel->progs.tc3);
fd4 = bpf_program__fd(skel->progs.tc4);
id1 = id_from_prog_fd(fd1);
id2 = id_from_prog_fd(fd2);
id3 = id_from_prog_fd(fd3);
id4 = id_from_prog_fd(fd4);
ASSERT_NEQ(id1, id2, "prog_ids_1_2");
ASSERT_NEQ(id3, id4, "prog_ids_3_4");
ASSERT_NEQ(id2, id3, "prog_ids_2_3");
assert_mprog_count(target, 0);
err = bpf_prog_attach_opts(fd1, loopback, target, &opta);
if (!ASSERT_EQ(err, 0, "prog_attach"))
goto cleanup;
assert_mprog_count(target, 1);
err = bpf_prog_attach_opts(fd2, loopback, target, &opta);
if (!ASSERT_EQ(err, 0, "prog_attach"))
goto cleanup1;
assert_mprog_count(target, 2);
err = bpf_prog_attach_opts(fd3, loopback, target, &opta);
if (!ASSERT_EQ(err, 0, "prog_attach"))
goto cleanup2;
assert_mprog_count(target, 3);
err = bpf_prog_attach_opts(fd4, loopback, target, &opta);
if (!ASSERT_EQ(err, 0, "prog_attach"))
goto cleanup3;
assert_mprog_count(target, 4);
optq.prog_ids = prog_ids;
memset(prog_ids, 0, sizeof(prog_ids));
optq.count = ARRAY_SIZE(prog_ids);
err = bpf_prog_query_opts(loopback, target, &optq);
if (!ASSERT_OK(err, "prog_query"))
goto cleanup4;
ASSERT_EQ(optq.count, 4, "count");
ASSERT_EQ(optq.revision, 5, "revision");
ASSERT_EQ(optq.prog_ids[0], id1, "prog_ids[0]");
ASSERT_EQ(optq.prog_ids[1], id2, "prog_ids[1]");
ASSERT_EQ(optq.prog_ids[2], id3, "prog_ids[2]");
ASSERT_EQ(optq.prog_ids[3], id4, "prog_ids[3]");
ASSERT_EQ(optq.prog_ids[4], 0, "prog_ids[4]");
LIBBPF_OPTS_RESET(optd,
.flags = BPF_F_AFTER,
.relative_fd = fd1,
);
err = bpf_prog_detach_opts(fd2, loopback, target, &optd);
ASSERT_OK(err, "prog_detach");
assert_mprog_count(target, 3);
memset(prog_ids, 0, sizeof(prog_ids));
optq.count = ARRAY_SIZE(prog_ids);
err = bpf_prog_query_opts(loopback, target, &optq);
if (!ASSERT_OK(err, "prog_query"))
goto cleanup4;
ASSERT_EQ(optq.count, 3, "count");
ASSERT_EQ(optq.revision, 6, "revision");
ASSERT_EQ(optq.prog_ids[0], id1, "prog_ids[0]");
ASSERT_EQ(optq.prog_ids[1], id3, "prog_ids[1]");
ASSERT_EQ(optq.prog_ids[2], id4, "prog_ids[2]");
ASSERT_EQ(optq.prog_ids[3], 0, "prog_ids[3]");
LIBBPF_OPTS_RESET(optd,
.flags = BPF_F_AFTER,
.relative_fd = fd1,
);
err = bpf_prog_detach_opts(fd2, loopback, target, &optd);
ASSERT_EQ(err, -ENOENT, "prog_detach");
assert_mprog_count(target, 3);
LIBBPF_OPTS_RESET(optd,
.flags = BPF_F_AFTER,
.relative_fd = fd4,
);
err = bpf_prog_detach_opts(fd1, loopback, target, &optd);
ASSERT_EQ(err, -ERANGE, "prog_detach");
assert_mprog_count(target, 3);
LIBBPF_OPTS_RESET(optd,
.flags = BPF_F_AFTER,
.relative_fd = fd3,
);
err = bpf_prog_detach_opts(fd1, loopback, target, &optd);
ASSERT_EQ(err, -ERANGE, "prog_detach");
assert_mprog_count(target, 3);
LIBBPF_OPTS_RESET(optd,
.flags = BPF_F_AFTER,
.relative_fd = fd1,
);
err = bpf_prog_detach_opts(fd1, loopback, target, &optd);
ASSERT_EQ(err, -ERANGE, "prog_detach");
assert_mprog_count(target, 3);
LIBBPF_OPTS_RESET(optd,
.flags = BPF_F_AFTER,
.relative_fd = fd1,
);
err = bpf_prog_detach_opts(fd3, loopback, target, &optd);
ASSERT_OK(err, "prog_detach");
assert_mprog_count(target, 2);
memset(prog_ids, 0, sizeof(prog_ids));
optq.count = ARRAY_SIZE(prog_ids);
err = bpf_prog_query_opts(loopback, target, &optq);
if (!ASSERT_OK(err, "prog_query"))
goto cleanup4;
ASSERT_EQ(optq.count, 2, "count");
ASSERT_EQ(optq.revision, 7, "revision");
ASSERT_EQ(optq.prog_ids[0], id1, "prog_ids[0]");
ASSERT_EQ(optq.prog_ids[1], id4, "prog_ids[1]");
ASSERT_EQ(optq.prog_ids[2], 0, "prog_ids[2]");
LIBBPF_OPTS_RESET(optd,
.flags = BPF_F_AFTER,
.relative_fd = fd1,
);
err = bpf_prog_detach_opts(0, loopback, target, &optd);
ASSERT_OK(err, "prog_detach");
assert_mprog_count(target, 1);
memset(prog_ids, 0, sizeof(prog_ids));
optq.count = ARRAY_SIZE(prog_ids);
err = bpf_prog_query_opts(loopback, target, &optq);
if (!ASSERT_OK(err, "prog_query"))
goto cleanup4;
ASSERT_EQ(optq.count, 1, "count");
ASSERT_EQ(optq.revision, 8, "revision");
ASSERT_EQ(optq.prog_ids[0], id1, "prog_ids[0]");
ASSERT_EQ(optq.prog_ids[1], 0, "prog_ids[1]");
LIBBPF_OPTS_RESET(optd,
.flags = BPF_F_AFTER,
);
err = bpf_prog_detach_opts(0, loopback, target, &optd);
ASSERT_OK(err, "prog_detach");
assert_mprog_count(target, 0);
goto cleanup;
cleanup4:
err = bpf_prog_detach_opts(fd4, loopback, target, &optd);
ASSERT_OK(err, "prog_detach");
assert_mprog_count(target, 3);
cleanup3:
err = bpf_prog_detach_opts(fd3, loopback, target, &optd);
ASSERT_OK(err, "prog_detach");
assert_mprog_count(target, 2);
cleanup2:
err = bpf_prog_detach_opts(fd2, loopback, target, &optd);
ASSERT_OK(err, "prog_detach");
assert_mprog_count(target, 1);
cleanup1:
err = bpf_prog_detach_opts(fd1, loopback, target, &optd);
ASSERT_OK(err, "prog_detach");
assert_mprog_count(target, 0);
cleanup:
test_tc_link__destroy(skel);
}
void serial_test_tc_opts_detach_after(void)
{
test_tc_opts_detach_after_target(BPF_TCX_INGRESS);
test_tc_opts_detach_after_target(BPF_TCX_EGRESS);
}
static void test_tc_opts_delete_empty(int target, bool chain_tc_old)
{
LIBBPF_OPTS(bpf_tc_hook, tc_hook, .ifindex = loopback);
LIBBPF_OPTS(bpf_prog_detach_opts, optd);
int err;
assert_mprog_count(target, 0);
if (chain_tc_old) {
tc_hook.attach_point = target == BPF_TCX_INGRESS ?
BPF_TC_INGRESS : BPF_TC_EGRESS;
err = bpf_tc_hook_create(&tc_hook);
ASSERT_OK(err, "bpf_tc_hook_create");
__assert_mprog_count(target, 0, true, loopback);
}
err = bpf_prog_detach_opts(0, loopback, target, &optd);
ASSERT_EQ(err, -ENOENT, "prog_detach");
if (chain_tc_old) {
tc_hook.attach_point = BPF_TC_INGRESS | BPF_TC_EGRESS;
bpf_tc_hook_destroy(&tc_hook);
}
assert_mprog_count(target, 0);
}
void serial_test_tc_opts_delete_empty(void)
{
test_tc_opts_delete_empty(BPF_TCX_INGRESS, false);
test_tc_opts_delete_empty(BPF_TCX_EGRESS, false);
test_tc_opts_delete_empty(BPF_TCX_INGRESS, true);
test_tc_opts_delete_empty(BPF_TCX_EGRESS, true);
}
static void test_tc_chain_mixed(int target)
{
LIBBPF_OPTS(bpf_tc_opts, tc_opts, .handle = 1, .priority = 1);
LIBBPF_OPTS(bpf_tc_hook, tc_hook, .ifindex = loopback);
LIBBPF_OPTS(bpf_prog_attach_opts, opta);
LIBBPF_OPTS(bpf_prog_detach_opts, optd);
__u32 fd1, fd2, fd3, id1, id2, id3;
struct test_tc_link *skel;
int err, detach_fd;
skel = test_tc_link__open_and_load();
if (!ASSERT_OK_PTR(skel, "skel_load"))
goto cleanup;
fd1 = bpf_program__fd(skel->progs.tc4);
fd2 = bpf_program__fd(skel->progs.tc5);
fd3 = bpf_program__fd(skel->progs.tc6);
id1 = id_from_prog_fd(fd1);
id2 = id_from_prog_fd(fd2);
id3 = id_from_prog_fd(fd3);
ASSERT_NEQ(id1, id2, "prog_ids_1_2");
ASSERT_NEQ(id2, id3, "prog_ids_2_3");
assert_mprog_count(target, 0);
tc_hook.attach_point = target == BPF_TCX_INGRESS ?
BPF_TC_INGRESS : BPF_TC_EGRESS;
err = bpf_tc_hook_create(&tc_hook);
err = err == -EEXIST ? 0 : err;
if (!ASSERT_OK(err, "bpf_tc_hook_create"))
goto cleanup;
tc_opts.prog_fd = fd2;
err = bpf_tc_attach(&tc_hook, &tc_opts);
if (!ASSERT_OK(err, "bpf_tc_attach"))
goto cleanup_hook;
err = bpf_prog_attach_opts(fd3, loopback, target, &opta);
if (!ASSERT_EQ(err, 0, "prog_attach"))
goto cleanup_filter;
detach_fd = fd3;
assert_mprog_count(target, 1);
ASSERT_OK(system(ping_cmd), ping_cmd);
ASSERT_EQ(skel->bss->seen_tc4, false, "seen_tc4");
ASSERT_EQ(skel->bss->seen_tc5, false, "seen_tc5");
ASSERT_EQ(skel->bss->seen_tc6, true, "seen_tc6");
skel->bss->seen_tc4 = false;
skel->bss->seen_tc5 = false;
skel->bss->seen_tc6 = false;
LIBBPF_OPTS_RESET(opta,
.flags = BPF_F_REPLACE,
.replace_prog_fd = fd3,
);
err = bpf_prog_attach_opts(fd1, loopback, target, &opta);
if (!ASSERT_EQ(err, 0, "prog_attach"))
goto cleanup_opts;
detach_fd = fd1;
assert_mprog_count(target, 1);
ASSERT_OK(system(ping_cmd), ping_cmd);
ASSERT_EQ(skel->bss->seen_tc4, true, "seen_tc4");
ASSERT_EQ(skel->bss->seen_tc5, true, "seen_tc5");
ASSERT_EQ(skel->bss->seen_tc6, false, "seen_tc6");
skel->bss->seen_tc4 = false;
skel->bss->seen_tc5 = false;
skel->bss->seen_tc6 = false;
cleanup_opts:
err = bpf_prog_detach_opts(detach_fd, loopback, target, &optd);
ASSERT_OK(err, "prog_detach");
__assert_mprog_count(target, 0, true, loopback);
ASSERT_OK(system(ping_cmd), ping_cmd);
ASSERT_EQ(skel->bss->seen_tc4, false, "seen_tc4");
ASSERT_EQ(skel->bss->seen_tc5, true, "seen_tc5");
ASSERT_EQ(skel->bss->seen_tc6, false, "seen_tc6");
cleanup_filter:
tc_opts.flags = tc_opts.prog_fd = tc_opts.prog_id = 0;
err = bpf_tc_detach(&tc_hook, &tc_opts);
ASSERT_OK(err, "bpf_tc_detach");
cleanup_hook:
tc_hook.attach_point = BPF_TC_INGRESS | BPF_TC_EGRESS;
bpf_tc_hook_destroy(&tc_hook);
cleanup:
test_tc_link__destroy(skel);
}
void serial_test_tc_opts_chain_mixed(void)
{
test_tc_chain_mixed(BPF_TCX_INGRESS);
test_tc_chain_mixed(BPF_TCX_EGRESS);
}
| linux-master | tools/testing/selftests/bpf/prog_tests/tc_opts.c |
// SPDX-License-Identifier: GPL-2.0
#include <limits.h>
#include <stdio.h>
#include <string.h>
#include <ctype.h>
#include <regex.h>
#include <test_progs.h>
#include "bpf/btf.h"
#include "bpf_util.h"
#include "linux/filter.h"
#include "disasm.h"
#define MAX_PROG_TEXT_SZ (32 * 1024)
/* The code in this file serves the sole purpose of executing test cases
* specified in the test_cases array. Each test case specifies a program
* type, context field offset, and disassembly patterns that correspond
* to read and write instructions generated by
* verifier.c:convert_ctx_access() for accessing that field.
*
* For each test case, up to three programs are created:
* - One that uses BPF_LDX_MEM to read the context field.
* - One that uses BPF_STX_MEM to write to the context field.
* - One that uses BPF_ST_MEM to write to the context field.
*
* The disassembly of each program is then compared with the pattern
* specified in the test case.
*/
struct test_case {
char *name;
enum bpf_prog_type prog_type;
enum bpf_attach_type expected_attach_type;
int field_offset;
int field_sz;
/* Program generated for BPF_ST_MEM uses value 42 by default,
* this field allows to specify custom value.
*/
struct {
bool use;
int value;
} st_value;
/* Pattern for BPF_LDX_MEM(field_sz, dst, ctx, field_offset) */
char *read;
/* Pattern for BPF_STX_MEM(field_sz, ctx, src, field_offset) and
* BPF_ST_MEM (field_sz, ctx, src, field_offset)
*/
char *write;
/* Pattern for BPF_ST_MEM(field_sz, ctx, src, field_offset),
* takes priority over `write`.
*/
char *write_st;
/* Pattern for BPF_STX_MEM (field_sz, ctx, src, field_offset),
* takes priority over `write`.
*/
char *write_stx;
};
#define N(_prog_type, type, field, name_extra...) \
.name = #_prog_type "." #field name_extra, \
.prog_type = BPF_PROG_TYPE_##_prog_type, \
.field_offset = offsetof(type, field), \
.field_sz = sizeof(typeof(((type *)NULL)->field))
static struct test_case test_cases[] = {
/* Sign extension on s390 changes the pattern */
#if defined(__x86_64__) || defined(__aarch64__)
{
N(SCHED_CLS, struct __sk_buff, tstamp),
.read = "r11 = *(u8 *)($ctx + sk_buff::__mono_tc_offset);"
"w11 &= 3;"
"if w11 != 0x3 goto pc+2;"
"$dst = 0;"
"goto pc+1;"
"$dst = *(u64 *)($ctx + sk_buff::tstamp);",
.write = "r11 = *(u8 *)($ctx + sk_buff::__mono_tc_offset);"
"if w11 & 0x2 goto pc+1;"
"goto pc+2;"
"w11 &= -2;"
"*(u8 *)($ctx + sk_buff::__mono_tc_offset) = r11;"
"*(u64 *)($ctx + sk_buff::tstamp) = $src;",
},
#endif
{
N(SCHED_CLS, struct __sk_buff, priority),
.read = "$dst = *(u32 *)($ctx + sk_buff::priority);",
.write = "*(u32 *)($ctx + sk_buff::priority) = $src;",
},
{
N(SCHED_CLS, struct __sk_buff, mark),
.read = "$dst = *(u32 *)($ctx + sk_buff::mark);",
.write = "*(u32 *)($ctx + sk_buff::mark) = $src;",
},
{
N(SCHED_CLS, struct __sk_buff, cb[0]),
.read = "$dst = *(u32 *)($ctx + $(sk_buff::cb + qdisc_skb_cb::data));",
.write = "*(u32 *)($ctx + $(sk_buff::cb + qdisc_skb_cb::data)) = $src;",
},
{
N(SCHED_CLS, struct __sk_buff, tc_classid),
.read = "$dst = *(u16 *)($ctx + $(sk_buff::cb + qdisc_skb_cb::tc_classid));",
.write = "*(u16 *)($ctx + $(sk_buff::cb + qdisc_skb_cb::tc_classid)) = $src;",
},
{
N(SCHED_CLS, struct __sk_buff, tc_index),
.read = "$dst = *(u16 *)($ctx + sk_buff::tc_index);",
.write = "*(u16 *)($ctx + sk_buff::tc_index) = $src;",
},
{
N(SCHED_CLS, struct __sk_buff, queue_mapping),
.read = "$dst = *(u16 *)($ctx + sk_buff::queue_mapping);",
.write_stx = "if $src >= 0xffff goto pc+1;"
"*(u16 *)($ctx + sk_buff::queue_mapping) = $src;",
.write_st = "*(u16 *)($ctx + sk_buff::queue_mapping) = $src;",
},
{
/* This is a corner case in filter.c:bpf_convert_ctx_access() */
N(SCHED_CLS, struct __sk_buff, queue_mapping, ".ushrt_max"),
.st_value = { true, USHRT_MAX },
.write_st = "goto pc+0;",
},
{
N(CGROUP_SOCK, struct bpf_sock, bound_dev_if),
.read = "$dst = *(u32 *)($ctx + sock_common::skc_bound_dev_if);",
.write = "*(u32 *)($ctx + sock_common::skc_bound_dev_if) = $src;",
},
{
N(CGROUP_SOCK, struct bpf_sock, mark),
.read = "$dst = *(u32 *)($ctx + sock::sk_mark);",
.write = "*(u32 *)($ctx + sock::sk_mark) = $src;",
},
{
N(CGROUP_SOCK, struct bpf_sock, priority),
.read = "$dst = *(u32 *)($ctx + sock::sk_priority);",
.write = "*(u32 *)($ctx + sock::sk_priority) = $src;",
},
{
N(SOCK_OPS, struct bpf_sock_ops, replylong[0]),
.read = "$dst = *(u32 *)($ctx + bpf_sock_ops_kern::replylong);",
.write = "*(u32 *)($ctx + bpf_sock_ops_kern::replylong) = $src;",
},
{
N(CGROUP_SYSCTL, struct bpf_sysctl, file_pos),
#if __BYTE_ORDER == __LITTLE_ENDIAN
.read = "$dst = *(u64 *)($ctx + bpf_sysctl_kern::ppos);"
"$dst = *(u32 *)($dst +0);",
.write = "*(u64 *)($ctx + bpf_sysctl_kern::tmp_reg) = r9;"
"r9 = *(u64 *)($ctx + bpf_sysctl_kern::ppos);"
"*(u32 *)(r9 +0) = $src;"
"r9 = *(u64 *)($ctx + bpf_sysctl_kern::tmp_reg);",
#else
.read = "$dst = *(u64 *)($ctx + bpf_sysctl_kern::ppos);"
"$dst = *(u32 *)($dst +4);",
.write = "*(u64 *)($ctx + bpf_sysctl_kern::tmp_reg) = r9;"
"r9 = *(u64 *)($ctx + bpf_sysctl_kern::ppos);"
"*(u32 *)(r9 +4) = $src;"
"r9 = *(u64 *)($ctx + bpf_sysctl_kern::tmp_reg);",
#endif
},
{
N(CGROUP_SOCKOPT, struct bpf_sockopt, sk),
.read = "$dst = *(u64 *)($ctx + bpf_sockopt_kern::sk);",
.expected_attach_type = BPF_CGROUP_GETSOCKOPT,
},
{
N(CGROUP_SOCKOPT, struct bpf_sockopt, level),
.read = "$dst = *(u32 *)($ctx + bpf_sockopt_kern::level);",
.write = "*(u32 *)($ctx + bpf_sockopt_kern::level) = $src;",
.expected_attach_type = BPF_CGROUP_SETSOCKOPT,
},
{
N(CGROUP_SOCKOPT, struct bpf_sockopt, optname),
.read = "$dst = *(u32 *)($ctx + bpf_sockopt_kern::optname);",
.write = "*(u32 *)($ctx + bpf_sockopt_kern::optname) = $src;",
.expected_attach_type = BPF_CGROUP_SETSOCKOPT,
},
{
N(CGROUP_SOCKOPT, struct bpf_sockopt, optlen),
.read = "$dst = *(u32 *)($ctx + bpf_sockopt_kern::optlen);",
.write = "*(u32 *)($ctx + bpf_sockopt_kern::optlen) = $src;",
.expected_attach_type = BPF_CGROUP_SETSOCKOPT,
},
{
N(CGROUP_SOCKOPT, struct bpf_sockopt, retval),
.read = "$dst = *(u64 *)($ctx + bpf_sockopt_kern::current_task);"
"$dst = *(u64 *)($dst + task_struct::bpf_ctx);"
"$dst = *(u32 *)($dst + bpf_cg_run_ctx::retval);",
.write = "*(u64 *)($ctx + bpf_sockopt_kern::tmp_reg) = r9;"
"r9 = *(u64 *)($ctx + bpf_sockopt_kern::current_task);"
"r9 = *(u64 *)(r9 + task_struct::bpf_ctx);"
"*(u32 *)(r9 + bpf_cg_run_ctx::retval) = $src;"
"r9 = *(u64 *)($ctx + bpf_sockopt_kern::tmp_reg);",
.expected_attach_type = BPF_CGROUP_GETSOCKOPT,
},
{
N(CGROUP_SOCKOPT, struct bpf_sockopt, optval),
.read = "$dst = *(u64 *)($ctx + bpf_sockopt_kern::optval);",
.expected_attach_type = BPF_CGROUP_GETSOCKOPT,
},
{
N(CGROUP_SOCKOPT, struct bpf_sockopt, optval_end),
.read = "$dst = *(u64 *)($ctx + bpf_sockopt_kern::optval_end);",
.expected_attach_type = BPF_CGROUP_GETSOCKOPT,
},
};
#undef N
static regex_t *ident_regex;
static regex_t *field_regex;
static char *skip_space(char *str)
{
while (*str && isspace(*str))
++str;
return str;
}
static char *skip_space_and_semi(char *str)
{
while (*str && (isspace(*str) || *str == ';'))
++str;
return str;
}
static char *match_str(char *str, char *prefix)
{
while (*str && *prefix && *str == *prefix) {
++str;
++prefix;
}
if (*prefix)
return NULL;
return str;
}
static char *match_number(char *str, int num)
{
char *next;
int snum = strtol(str, &next, 10);
if (next - str == 0 || num != snum)
return NULL;
return next;
}
static int find_field_offset_aux(struct btf *btf, int btf_id, char *field_name, int off)
{
const struct btf_type *type = btf__type_by_id(btf, btf_id);
const struct btf_member *m;
__u16 mnum;
int i;
if (!type) {
PRINT_FAIL("Can't find btf_type for id %d\n", btf_id);
return -1;
}
if (!btf_is_struct(type) && !btf_is_union(type)) {
PRINT_FAIL("BTF id %d is not struct or union\n", btf_id);
return -1;
}
m = btf_members(type);
mnum = btf_vlen(type);
for (i = 0; i < mnum; ++i, ++m) {
const char *mname = btf__name_by_offset(btf, m->name_off);
if (strcmp(mname, "") == 0) {
int msize = find_field_offset_aux(btf, m->type, field_name,
off + m->offset);
if (msize >= 0)
return msize;
}
if (strcmp(mname, field_name))
continue;
return (off + m->offset) / 8;
}
return -1;
}
static int find_field_offset(struct btf *btf, char *pattern, regmatch_t *matches)
{
int type_sz = matches[1].rm_eo - matches[1].rm_so;
int field_sz = matches[2].rm_eo - matches[2].rm_so;
char *type = pattern + matches[1].rm_so;
char *field = pattern + matches[2].rm_so;
char field_str[128] = {};
char type_str[128] = {};
int btf_id, field_offset;
if (type_sz >= sizeof(type_str)) {
PRINT_FAIL("Malformed pattern: type ident is too long: %d\n", type_sz);
return -1;
}
if (field_sz >= sizeof(field_str)) {
PRINT_FAIL("Malformed pattern: field ident is too long: %d\n", field_sz);
return -1;
}
strncpy(type_str, type, type_sz);
strncpy(field_str, field, field_sz);
btf_id = btf__find_by_name(btf, type_str);
if (btf_id < 0) {
PRINT_FAIL("No BTF info for type %s\n", type_str);
return -1;
}
field_offset = find_field_offset_aux(btf, btf_id, field_str, 0);
if (field_offset < 0) {
PRINT_FAIL("No BTF info for field %s::%s\n", type_str, field_str);
return -1;
}
return field_offset;
}
static regex_t *compile_regex(char *pat)
{
regex_t *re;
int err;
re = malloc(sizeof(regex_t));
if (!re) {
PRINT_FAIL("Can't alloc regex\n");
return NULL;
}
err = regcomp(re, pat, REG_EXTENDED);
if (err) {
char errbuf[512];
regerror(err, re, errbuf, sizeof(errbuf));
PRINT_FAIL("Can't compile regex: %s\n", errbuf);
free(re);
return NULL;
}
return re;
}
static void free_regex(regex_t *re)
{
if (!re)
return;
regfree(re);
free(re);
}
static u32 max_line_len(char *str)
{
u32 max_line = 0;
char *next = str;
while (next) {
next = strchr(str, '\n');
if (next) {
max_line = max_t(u32, max_line, (next - str));
str = next + 1;
} else {
max_line = max_t(u32, max_line, strlen(str));
}
}
return min(max_line, 60u);
}
/* Print strings `pattern_origin` and `text_origin` side by side,
* assume `pattern_pos` and `text_pos` designate location within
* corresponding origin string where match diverges.
* The output should look like:
*
* Can't match disassembly(left) with pattern(right):
* r2 = *(u64 *)(r1 +0) ; $dst = *(u64 *)($ctx + bpf_sockopt_kern::sk1)
* ^ ^
* r0 = 0 ;
* exit ;
*/
static void print_match_error(FILE *out,
char *pattern_origin, char *text_origin,
char *pattern_pos, char *text_pos)
{
char *pattern = pattern_origin;
char *text = text_origin;
int middle = max_line_len(text) + 2;
fprintf(out, "Can't match disassembly(left) with pattern(right):\n");
while (*pattern || *text) {
int column = 0;
int mark1 = -1;
int mark2 = -1;
/* Print one line from text */
while (*text && *text != '\n') {
if (text == text_pos)
mark1 = column;
fputc(*text, out);
++text;
++column;
}
if (text == text_pos)
mark1 = column;
/* Pad to the middle */
while (column < middle) {
fputc(' ', out);
++column;
}
fputs("; ", out);
column += 3;
/* Print one line from pattern, pattern lines are terminated by ';' */
while (*pattern && *pattern != ';') {
if (pattern == pattern_pos)
mark2 = column;
fputc(*pattern, out);
++pattern;
++column;
}
if (pattern == pattern_pos)
mark2 = column;
fputc('\n', out);
if (*pattern)
++pattern;
if (*text)
++text;
/* If pattern and text diverge at this line, print an
* additional line with '^' marks, highlighting
* positions where match fails.
*/
if (mark1 > 0 || mark2 > 0) {
for (column = 0; column <= max(mark1, mark2); ++column) {
if (column == mark1 || column == mark2)
fputc('^', out);
else
fputc(' ', out);
}
fputc('\n', out);
}
}
}
/* Test if `text` matches `pattern`. Pattern consists of the following elements:
*
* - Field offset references:
*
* <type>::<field>
*
* When such reference is encountered BTF is used to compute numerical
* value for the offset of <field> in <type>. The `text` is expected to
* contain matching numerical value.
*
* - Field groups:
*
* $(<type>::<field> [+ <type>::<field>]*)
*
* Allows to specify an offset that is a sum of multiple field offsets.
* The `text` is expected to contain matching numerical value.
*
* - Variable references, e.g. `$src`, `$dst`, `$ctx`.
* These are substitutions specified in `reg_map` array.
* If a substring of pattern is equal to `reg_map[i][0]` the `text` is
* expected to contain `reg_map[i][1]` in the matching position.
*
* - Whitespace is ignored, ';' counts as whitespace for `pattern`.
*
* - Any other characters, `pattern` and `text` should match one-to-one.
*
* Example of a pattern:
*
* __________ fields group ________________
* ' '
* *(u16 *)($ctx + $(sk_buff::cb + qdisc_skb_cb::tc_classid)) = $src;
* ^^^^ '______________________'
* variable reference field offset reference
*/
static bool match_pattern(struct btf *btf, char *pattern, char *text, char *reg_map[][2])
{
char *pattern_origin = pattern;
char *text_origin = text;
regmatch_t matches[3];
_continue:
while (*pattern) {
if (!*text)
goto err;
/* Skip whitespace */
if (isspace(*pattern) || *pattern == ';') {
if (!isspace(*text) && text != text_origin && isalnum(text[-1]))
goto err;
pattern = skip_space_and_semi(pattern);
text = skip_space(text);
continue;
}
/* Check for variable references */
for (int i = 0; reg_map[i][0]; ++i) {
char *pattern_next, *text_next;
pattern_next = match_str(pattern, reg_map[i][0]);
if (!pattern_next)
continue;
text_next = match_str(text, reg_map[i][1]);
if (!text_next)
goto err;
pattern = pattern_next;
text = text_next;
goto _continue;
}
/* Match field group:
* $(sk_buff::cb + qdisc_skb_cb::tc_classid)
*/
if (strncmp(pattern, "$(", 2) == 0) {
char *group_start = pattern, *text_next;
int acc_offset = 0;
pattern += 2;
for (;;) {
int field_offset;
pattern = skip_space(pattern);
if (!*pattern) {
PRINT_FAIL("Unexpected end of pattern\n");
goto err;
}
if (*pattern == ')') {
++pattern;
break;
}
if (*pattern == '+') {
++pattern;
continue;
}
printf("pattern: %s\n", pattern);
if (regexec(field_regex, pattern, 3, matches, 0) != 0) {
PRINT_FAIL("Field reference expected\n");
goto err;
}
field_offset = find_field_offset(btf, pattern, matches);
if (field_offset < 0)
goto err;
pattern += matches[0].rm_eo;
acc_offset += field_offset;
}
text_next = match_number(text, acc_offset);
if (!text_next) {
PRINT_FAIL("No match for group offset %.*s (%d)\n",
(int)(pattern - group_start),
group_start,
acc_offset);
goto err;
}
text = text_next;
}
/* Match field reference:
* sk_buff::cb
*/
if (regexec(field_regex, pattern, 3, matches, 0) == 0) {
int field_offset;
char *text_next;
field_offset = find_field_offset(btf, pattern, matches);
if (field_offset < 0)
goto err;
text_next = match_number(text, field_offset);
if (!text_next) {
PRINT_FAIL("No match for field offset %.*s (%d)\n",
(int)matches[0].rm_eo, pattern, field_offset);
goto err;
}
pattern += matches[0].rm_eo;
text = text_next;
continue;
}
/* If pattern points to identifier not followed by '::'
* skip the identifier to avoid n^2 application of the
* field reference rule.
*/
if (regexec(ident_regex, pattern, 1, matches, 0) == 0) {
if (strncmp(pattern, text, matches[0].rm_eo) != 0)
goto err;
pattern += matches[0].rm_eo;
text += matches[0].rm_eo;
continue;
}
/* Match literally */
if (*pattern != *text)
goto err;
++pattern;
++text;
}
return true;
err:
test__fail();
print_match_error(stdout, pattern_origin, text_origin, pattern, text);
return false;
}
/* Request BPF program instructions after all rewrites are applied,
* e.g. verifier.c:convert_ctx_access() is done.
*/
static int get_xlated_program(int fd_prog, struct bpf_insn **buf, __u32 *cnt)
{
struct bpf_prog_info info = {};
__u32 info_len = sizeof(info);
__u32 xlated_prog_len;
__u32 buf_element_size = sizeof(struct bpf_insn);
if (bpf_prog_get_info_by_fd(fd_prog, &info, &info_len)) {
perror("bpf_prog_get_info_by_fd failed");
return -1;
}
xlated_prog_len = info.xlated_prog_len;
if (xlated_prog_len % buf_element_size) {
printf("Program length %d is not multiple of %d\n",
xlated_prog_len, buf_element_size);
return -1;
}
*cnt = xlated_prog_len / buf_element_size;
*buf = calloc(*cnt, buf_element_size);
if (!buf) {
perror("can't allocate xlated program buffer");
return -ENOMEM;
}
bzero(&info, sizeof(info));
info.xlated_prog_len = xlated_prog_len;
info.xlated_prog_insns = (__u64)(unsigned long)*buf;
if (bpf_prog_get_info_by_fd(fd_prog, &info, &info_len)) {
perror("second bpf_prog_get_info_by_fd failed");
goto out_free_buf;
}
return 0;
out_free_buf:
free(*buf);
return -1;
}
static void print_insn(void *private_data, const char *fmt, ...)
{
va_list args;
va_start(args, fmt);
vfprintf((FILE *)private_data, fmt, args);
va_end(args);
}
/* Disassemble instructions to a stream */
static void print_xlated(FILE *out, struct bpf_insn *insn, __u32 len)
{
const struct bpf_insn_cbs cbs = {
.cb_print = print_insn,
.cb_call = NULL,
.cb_imm = NULL,
.private_data = out,
};
bool double_insn = false;
int i;
for (i = 0; i < len; i++) {
if (double_insn) {
double_insn = false;
continue;
}
double_insn = insn[i].code == (BPF_LD | BPF_IMM | BPF_DW);
print_bpf_insn(&cbs, insn + i, true);
}
}
/* We share code with kernel BPF disassembler, it adds '(FF) ' prefix
* for each instruction (FF stands for instruction `code` byte).
* This function removes the prefix inplace for each line in `str`.
*/
static void remove_insn_prefix(char *str, int size)
{
const int prefix_size = 5;
int write_pos = 0, read_pos = prefix_size;
int len = strlen(str);
char c;
size = min(size, len);
while (read_pos < size) {
c = str[read_pos++];
if (c == 0)
break;
str[write_pos++] = c;
if (c == '\n')
read_pos += prefix_size;
}
str[write_pos] = 0;
}
struct prog_info {
char *prog_kind;
enum bpf_prog_type prog_type;
enum bpf_attach_type expected_attach_type;
struct bpf_insn *prog;
u32 prog_len;
};
static void match_program(struct btf *btf,
struct prog_info *pinfo,
char *pattern,
char *reg_map[][2],
bool skip_first_insn)
{
struct bpf_insn *buf = NULL;
int err = 0, prog_fd = 0;
FILE *prog_out = NULL;
char *text = NULL;
__u32 cnt = 0;
text = calloc(MAX_PROG_TEXT_SZ, 1);
if (!text) {
PRINT_FAIL("Can't allocate %d bytes\n", MAX_PROG_TEXT_SZ);
goto out;
}
// TODO: log level
LIBBPF_OPTS(bpf_prog_load_opts, opts);
opts.log_buf = text;
opts.log_size = MAX_PROG_TEXT_SZ;
opts.log_level = 1 | 2 | 4;
opts.expected_attach_type = pinfo->expected_attach_type;
prog_fd = bpf_prog_load(pinfo->prog_type, NULL, "GPL",
pinfo->prog, pinfo->prog_len, &opts);
if (prog_fd < 0) {
PRINT_FAIL("Can't load program, errno %d (%s), verifier log:\n%s\n",
errno, strerror(errno), text);
goto out;
}
memset(text, 0, MAX_PROG_TEXT_SZ);
err = get_xlated_program(prog_fd, &buf, &cnt);
if (err) {
PRINT_FAIL("Can't load back BPF program\n");
goto out;
}
prog_out = fmemopen(text, MAX_PROG_TEXT_SZ - 1, "w");
if (!prog_out) {
PRINT_FAIL("Can't open memory stream\n");
goto out;
}
if (skip_first_insn)
print_xlated(prog_out, buf + 1, cnt - 1);
else
print_xlated(prog_out, buf, cnt);
fclose(prog_out);
remove_insn_prefix(text, MAX_PROG_TEXT_SZ);
ASSERT_TRUE(match_pattern(btf, pattern, text, reg_map),
pinfo->prog_kind);
out:
if (prog_fd)
close(prog_fd);
free(buf);
free(text);
}
static void run_one_testcase(struct btf *btf, struct test_case *test)
{
struct prog_info pinfo = {};
int bpf_sz;
if (!test__start_subtest(test->name))
return;
switch (test->field_sz) {
case 8:
bpf_sz = BPF_DW;
break;
case 4:
bpf_sz = BPF_W;
break;
case 2:
bpf_sz = BPF_H;
break;
case 1:
bpf_sz = BPF_B;
break;
default:
PRINT_FAIL("Unexpected field size: %d, want 8,4,2 or 1\n", test->field_sz);
return;
}
pinfo.prog_type = test->prog_type;
pinfo.expected_attach_type = test->expected_attach_type;
if (test->read) {
struct bpf_insn ldx_prog[] = {
BPF_LDX_MEM(bpf_sz, BPF_REG_2, BPF_REG_1, test->field_offset),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
};
char *reg_map[][2] = {
{ "$ctx", "r1" },
{ "$dst", "r2" },
{}
};
pinfo.prog_kind = "LDX";
pinfo.prog = ldx_prog;
pinfo.prog_len = ARRAY_SIZE(ldx_prog);
match_program(btf, &pinfo, test->read, reg_map, false);
}
if (test->write || test->write_st || test->write_stx) {
struct bpf_insn stx_prog[] = {
BPF_MOV64_IMM(BPF_REG_2, 0),
BPF_STX_MEM(bpf_sz, BPF_REG_1, BPF_REG_2, test->field_offset),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
};
char *stx_reg_map[][2] = {
{ "$ctx", "r1" },
{ "$src", "r2" },
{}
};
struct bpf_insn st_prog[] = {
BPF_ST_MEM(bpf_sz, BPF_REG_1, test->field_offset,
test->st_value.use ? test->st_value.value : 42),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
};
char *st_reg_map[][2] = {
{ "$ctx", "r1" },
{ "$src", "42" },
{}
};
if (test->write || test->write_stx) {
char *pattern = test->write_stx ? test->write_stx : test->write;
pinfo.prog_kind = "STX";
pinfo.prog = stx_prog;
pinfo.prog_len = ARRAY_SIZE(stx_prog);
match_program(btf, &pinfo, pattern, stx_reg_map, true);
}
if (test->write || test->write_st) {
char *pattern = test->write_st ? test->write_st : test->write;
pinfo.prog_kind = "ST";
pinfo.prog = st_prog;
pinfo.prog_len = ARRAY_SIZE(st_prog);
match_program(btf, &pinfo, pattern, st_reg_map, false);
}
}
test__end_subtest();
}
void test_ctx_rewrite(void)
{
struct btf *btf;
int i;
field_regex = compile_regex("^([[:alpha:]_][[:alnum:]_]+)::([[:alpha:]_][[:alnum:]_]+)");
ident_regex = compile_regex("^[[:alpha:]_][[:alnum:]_]+");
if (!field_regex || !ident_regex)
return;
btf = btf__load_vmlinux_btf();
if (!btf) {
PRINT_FAIL("Can't load vmlinux BTF, errno %d (%s)\n", errno, strerror(errno));
goto out;
}
for (i = 0; i < ARRAY_SIZE(test_cases); ++i)
run_one_testcase(btf, &test_cases[i]);
out:
btf__free(btf);
free_regex(field_regex);
free_regex(ident_regex);
}
| linux-master | tools/testing/selftests/bpf/prog_tests/ctx_rewrite.c |
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2020 Cloudflare
#include <error.h>
#include <netinet/tcp.h>
#include <sys/epoll.h>
#include "test_progs.h"
#include "test_skmsg_load_helpers.skel.h"
#include "test_sockmap_update.skel.h"
#include "test_sockmap_invalid_update.skel.h"
#include "test_sockmap_skb_verdict_attach.skel.h"
#include "test_sockmap_progs_query.skel.h"
#include "test_sockmap_pass_prog.skel.h"
#include "test_sockmap_drop_prog.skel.h"
#include "bpf_iter_sockmap.skel.h"
#include "sockmap_helpers.h"
#define TCP_REPAIR 19 /* TCP sock is under repair right now */
#define TCP_REPAIR_ON 1
#define TCP_REPAIR_OFF_NO_WP -1 /* Turn off without window probes */
static int connected_socket_v4(void)
{
struct sockaddr_in addr = {
.sin_family = AF_INET,
.sin_port = htons(80),
.sin_addr = { inet_addr("127.0.0.1") },
};
socklen_t len = sizeof(addr);
int s, repair, err;
s = socket(AF_INET, SOCK_STREAM, 0);
if (!ASSERT_GE(s, 0, "socket"))
goto error;
repair = TCP_REPAIR_ON;
err = setsockopt(s, SOL_TCP, TCP_REPAIR, &repair, sizeof(repair));
if (!ASSERT_OK(err, "setsockopt(TCP_REPAIR)"))
goto error;
err = connect(s, (struct sockaddr *)&addr, len);
if (!ASSERT_OK(err, "connect"))
goto error;
repair = TCP_REPAIR_OFF_NO_WP;
err = setsockopt(s, SOL_TCP, TCP_REPAIR, &repair, sizeof(repair));
if (!ASSERT_OK(err, "setsockopt(TCP_REPAIR)"))
goto error;
return s;
error:
perror(__func__);
close(s);
return -1;
}
static void compare_cookies(struct bpf_map *src, struct bpf_map *dst)
{
__u32 i, max_entries = bpf_map__max_entries(src);
int err, src_fd, dst_fd;
src_fd = bpf_map__fd(src);
dst_fd = bpf_map__fd(dst);
for (i = 0; i < max_entries; i++) {
__u64 src_cookie, dst_cookie;
err = bpf_map_lookup_elem(src_fd, &i, &src_cookie);
if (err && errno == ENOENT) {
err = bpf_map_lookup_elem(dst_fd, &i, &dst_cookie);
ASSERT_ERR(err, "map_lookup_elem(dst)");
ASSERT_EQ(errno, ENOENT, "map_lookup_elem(dst)");
continue;
}
if (!ASSERT_OK(err, "lookup_elem(src)"))
continue;
err = bpf_map_lookup_elem(dst_fd, &i, &dst_cookie);
if (!ASSERT_OK(err, "lookup_elem(dst)"))
continue;
ASSERT_EQ(dst_cookie, src_cookie, "cookie mismatch");
}
}
/* Create a map, populate it with one socket, and free the map. */
static void test_sockmap_create_update_free(enum bpf_map_type map_type)
{
const int zero = 0;
int s, map, err;
s = connected_socket_v4();
if (!ASSERT_GE(s, 0, "connected_socket_v4"))
return;
map = bpf_map_create(map_type, NULL, sizeof(int), sizeof(int), 1, NULL);
if (!ASSERT_GE(map, 0, "bpf_map_create"))
goto out;
err = bpf_map_update_elem(map, &zero, &s, BPF_NOEXIST);
if (!ASSERT_OK(err, "bpf_map_update"))
goto out;
out:
close(map);
close(s);
}
static void test_skmsg_helpers(enum bpf_map_type map_type)
{
struct test_skmsg_load_helpers *skel;
int err, map, verdict;
skel = test_skmsg_load_helpers__open_and_load();
if (!ASSERT_OK_PTR(skel, "test_skmsg_load_helpers__open_and_load"))
return;
verdict = bpf_program__fd(skel->progs.prog_msg_verdict);
map = bpf_map__fd(skel->maps.sock_map);
err = bpf_prog_attach(verdict, map, BPF_SK_MSG_VERDICT, 0);
if (!ASSERT_OK(err, "bpf_prog_attach"))
goto out;
err = bpf_prog_detach2(verdict, map, BPF_SK_MSG_VERDICT);
if (!ASSERT_OK(err, "bpf_prog_detach2"))
goto out;
out:
test_skmsg_load_helpers__destroy(skel);
}
static void test_sockmap_update(enum bpf_map_type map_type)
{
int err, prog, src;
struct test_sockmap_update *skel;
struct bpf_map *dst_map;
const __u32 zero = 0;
char dummy[14] = {0};
LIBBPF_OPTS(bpf_test_run_opts, topts,
.data_in = dummy,
.data_size_in = sizeof(dummy),
.repeat = 1,
);
__s64 sk;
sk = connected_socket_v4();
if (!ASSERT_NEQ(sk, -1, "connected_socket_v4"))
return;
skel = test_sockmap_update__open_and_load();
if (!ASSERT_OK_PTR(skel, "open_and_load"))
goto close_sk;
prog = bpf_program__fd(skel->progs.copy_sock_map);
src = bpf_map__fd(skel->maps.src);
if (map_type == BPF_MAP_TYPE_SOCKMAP)
dst_map = skel->maps.dst_sock_map;
else
dst_map = skel->maps.dst_sock_hash;
err = bpf_map_update_elem(src, &zero, &sk, BPF_NOEXIST);
if (!ASSERT_OK(err, "update_elem(src)"))
goto out;
err = bpf_prog_test_run_opts(prog, &topts);
if (!ASSERT_OK(err, "test_run"))
goto out;
if (!ASSERT_NEQ(topts.retval, 0, "test_run retval"))
goto out;
compare_cookies(skel->maps.src, dst_map);
out:
test_sockmap_update__destroy(skel);
close_sk:
close(sk);
}
static void test_sockmap_invalid_update(void)
{
struct test_sockmap_invalid_update *skel;
skel = test_sockmap_invalid_update__open_and_load();
if (!ASSERT_NULL(skel, "open_and_load"))
test_sockmap_invalid_update__destroy(skel);
}
static void test_sockmap_copy(enum bpf_map_type map_type)
{
DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
int err, len, src_fd, iter_fd;
union bpf_iter_link_info linfo = {};
__u32 i, num_sockets, num_elems;
struct bpf_iter_sockmap *skel;
__s64 *sock_fd = NULL;
struct bpf_link *link;
struct bpf_map *src;
char buf[64];
skel = bpf_iter_sockmap__open_and_load();
if (!ASSERT_OK_PTR(skel, "bpf_iter_sockmap__open_and_load"))
return;
if (map_type == BPF_MAP_TYPE_SOCKMAP) {
src = skel->maps.sockmap;
num_elems = bpf_map__max_entries(src);
num_sockets = num_elems - 1;
} else {
src = skel->maps.sockhash;
num_elems = bpf_map__max_entries(src) - 1;
num_sockets = num_elems;
}
sock_fd = calloc(num_sockets, sizeof(*sock_fd));
if (!ASSERT_OK_PTR(sock_fd, "calloc(sock_fd)"))
goto out;
for (i = 0; i < num_sockets; i++)
sock_fd[i] = -1;
src_fd = bpf_map__fd(src);
for (i = 0; i < num_sockets; i++) {
sock_fd[i] = connected_socket_v4();
if (!ASSERT_NEQ(sock_fd[i], -1, "connected_socket_v4"))
goto out;
err = bpf_map_update_elem(src_fd, &i, &sock_fd[i], BPF_NOEXIST);
if (!ASSERT_OK(err, "map_update"))
goto out;
}
linfo.map.map_fd = src_fd;
opts.link_info = &linfo;
opts.link_info_len = sizeof(linfo);
link = bpf_program__attach_iter(skel->progs.copy, &opts);
if (!ASSERT_OK_PTR(link, "attach_iter"))
goto out;
iter_fd = bpf_iter_create(bpf_link__fd(link));
if (!ASSERT_GE(iter_fd, 0, "create_iter"))
goto free_link;
/* do some tests */
while ((len = read(iter_fd, buf, sizeof(buf))) > 0)
;
if (!ASSERT_GE(len, 0, "read"))
goto close_iter;
/* test results */
if (!ASSERT_EQ(skel->bss->elems, num_elems, "elems"))
goto close_iter;
if (!ASSERT_EQ(skel->bss->socks, num_sockets, "socks"))
goto close_iter;
compare_cookies(src, skel->maps.dst);
close_iter:
close(iter_fd);
free_link:
bpf_link__destroy(link);
out:
for (i = 0; sock_fd && i < num_sockets; i++)
if (sock_fd[i] >= 0)
close(sock_fd[i]);
if (sock_fd)
free(sock_fd);
bpf_iter_sockmap__destroy(skel);
}
static void test_sockmap_skb_verdict_attach(enum bpf_attach_type first,
enum bpf_attach_type second)
{
struct test_sockmap_skb_verdict_attach *skel;
int err, map, verdict;
skel = test_sockmap_skb_verdict_attach__open_and_load();
if (!ASSERT_OK_PTR(skel, "open_and_load"))
return;
verdict = bpf_program__fd(skel->progs.prog_skb_verdict);
map = bpf_map__fd(skel->maps.sock_map);
err = bpf_prog_attach(verdict, map, first, 0);
if (!ASSERT_OK(err, "bpf_prog_attach"))
goto out;
err = bpf_prog_attach(verdict, map, second, 0);
ASSERT_EQ(err, -EBUSY, "prog_attach_fail");
err = bpf_prog_detach2(verdict, map, first);
if (!ASSERT_OK(err, "bpf_prog_detach2"))
goto out;
out:
test_sockmap_skb_verdict_attach__destroy(skel);
}
static __u32 query_prog_id(int prog_fd)
{
struct bpf_prog_info info = {};
__u32 info_len = sizeof(info);
int err;
err = bpf_prog_get_info_by_fd(prog_fd, &info, &info_len);
if (!ASSERT_OK(err, "bpf_prog_get_info_by_fd") ||
!ASSERT_EQ(info_len, sizeof(info), "bpf_prog_get_info_by_fd"))
return 0;
return info.id;
}
static void test_sockmap_progs_query(enum bpf_attach_type attach_type)
{
struct test_sockmap_progs_query *skel;
int err, map_fd, verdict_fd;
__u32 attach_flags = 0;
__u32 prog_ids[3] = {};
__u32 prog_cnt = 3;
skel = test_sockmap_progs_query__open_and_load();
if (!ASSERT_OK_PTR(skel, "test_sockmap_progs_query__open_and_load"))
return;
map_fd = bpf_map__fd(skel->maps.sock_map);
if (attach_type == BPF_SK_MSG_VERDICT)
verdict_fd = bpf_program__fd(skel->progs.prog_skmsg_verdict);
else
verdict_fd = bpf_program__fd(skel->progs.prog_skb_verdict);
err = bpf_prog_query(map_fd, attach_type, 0 /* query flags */,
&attach_flags, prog_ids, &prog_cnt);
ASSERT_OK(err, "bpf_prog_query failed");
ASSERT_EQ(attach_flags, 0, "wrong attach_flags on query");
ASSERT_EQ(prog_cnt, 0, "wrong program count on query");
err = bpf_prog_attach(verdict_fd, map_fd, attach_type, 0);
if (!ASSERT_OK(err, "bpf_prog_attach failed"))
goto out;
prog_cnt = 1;
err = bpf_prog_query(map_fd, attach_type, 0 /* query flags */,
&attach_flags, prog_ids, &prog_cnt);
ASSERT_OK(err, "bpf_prog_query failed");
ASSERT_EQ(attach_flags, 0, "wrong attach_flags on query");
ASSERT_EQ(prog_cnt, 1, "wrong program count on query");
ASSERT_EQ(prog_ids[0], query_prog_id(verdict_fd),
"wrong prog_ids on query");
bpf_prog_detach2(verdict_fd, map_fd, attach_type);
out:
test_sockmap_progs_query__destroy(skel);
}
#define MAX_EVENTS 10
static void test_sockmap_skb_verdict_shutdown(void)
{
struct epoll_event ev, events[MAX_EVENTS];
int n, err, map, verdict, s, c1, p1;
struct test_sockmap_pass_prog *skel;
int epollfd;
int zero = 0;
char b;
skel = test_sockmap_pass_prog__open_and_load();
if (!ASSERT_OK_PTR(skel, "open_and_load"))
return;
verdict = bpf_program__fd(skel->progs.prog_skb_verdict);
map = bpf_map__fd(skel->maps.sock_map_rx);
err = bpf_prog_attach(verdict, map, BPF_SK_SKB_STREAM_VERDICT, 0);
if (!ASSERT_OK(err, "bpf_prog_attach"))
goto out;
s = socket_loopback(AF_INET, SOCK_STREAM);
if (s < 0)
goto out;
err = create_pair(s, AF_INET, SOCK_STREAM, &c1, &p1);
if (err < 0)
goto out;
err = bpf_map_update_elem(map, &zero, &c1, BPF_NOEXIST);
if (err < 0)
goto out_close;
shutdown(p1, SHUT_WR);
ev.events = EPOLLIN;
ev.data.fd = c1;
epollfd = epoll_create1(0);
if (!ASSERT_GT(epollfd, -1, "epoll_create(0)"))
goto out_close;
err = epoll_ctl(epollfd, EPOLL_CTL_ADD, c1, &ev);
if (!ASSERT_OK(err, "epoll_ctl(EPOLL_CTL_ADD)"))
goto out_close;
err = epoll_wait(epollfd, events, MAX_EVENTS, -1);
if (!ASSERT_EQ(err, 1, "epoll_wait(fd)"))
goto out_close;
n = recv(c1, &b, 1, SOCK_NONBLOCK);
ASSERT_EQ(n, 0, "recv_timeout(fin)");
out_close:
close(c1);
close(p1);
out:
test_sockmap_pass_prog__destroy(skel);
}
static void test_sockmap_skb_verdict_fionread(bool pass_prog)
{
int expected, zero = 0, sent, recvd, avail;
int err, map, verdict, s, c0, c1, p0, p1;
struct test_sockmap_pass_prog *pass;
struct test_sockmap_drop_prog *drop;
char buf[256] = "0123456789";
if (pass_prog) {
pass = test_sockmap_pass_prog__open_and_load();
if (!ASSERT_OK_PTR(pass, "open_and_load"))
return;
verdict = bpf_program__fd(pass->progs.prog_skb_verdict);
map = bpf_map__fd(pass->maps.sock_map_rx);
expected = sizeof(buf);
} else {
drop = test_sockmap_drop_prog__open_and_load();
if (!ASSERT_OK_PTR(drop, "open_and_load"))
return;
verdict = bpf_program__fd(drop->progs.prog_skb_verdict);
map = bpf_map__fd(drop->maps.sock_map_rx);
/* On drop data is consumed immediately and copied_seq inc'd */
expected = 0;
}
err = bpf_prog_attach(verdict, map, BPF_SK_SKB_STREAM_VERDICT, 0);
if (!ASSERT_OK(err, "bpf_prog_attach"))
goto out;
s = socket_loopback(AF_INET, SOCK_STREAM);
if (!ASSERT_GT(s, -1, "socket_loopback(s)"))
goto out;
err = create_socket_pairs(s, AF_INET, SOCK_STREAM, &c0, &c1, &p0, &p1);
if (!ASSERT_OK(err, "create_socket_pairs(s)"))
goto out;
err = bpf_map_update_elem(map, &zero, &c1, BPF_NOEXIST);
if (!ASSERT_OK(err, "bpf_map_update_elem(c1)"))
goto out_close;
sent = xsend(p1, &buf, sizeof(buf), 0);
ASSERT_EQ(sent, sizeof(buf), "xsend(p0)");
err = ioctl(c1, FIONREAD, &avail);
ASSERT_OK(err, "ioctl(FIONREAD) error");
ASSERT_EQ(avail, expected, "ioctl(FIONREAD)");
/* On DROP test there will be no data to read */
if (pass_prog) {
recvd = recv_timeout(c1, &buf, sizeof(buf), SOCK_NONBLOCK, IO_TIMEOUT_SEC);
ASSERT_EQ(recvd, sizeof(buf), "recv_timeout(c0)");
}
out_close:
close(c0);
close(p0);
close(c1);
close(p1);
out:
if (pass_prog)
test_sockmap_pass_prog__destroy(pass);
else
test_sockmap_drop_prog__destroy(drop);
}
void test_sockmap_basic(void)
{
if (test__start_subtest("sockmap create_update_free"))
test_sockmap_create_update_free(BPF_MAP_TYPE_SOCKMAP);
if (test__start_subtest("sockhash create_update_free"))
test_sockmap_create_update_free(BPF_MAP_TYPE_SOCKHASH);
if (test__start_subtest("sockmap sk_msg load helpers"))
test_skmsg_helpers(BPF_MAP_TYPE_SOCKMAP);
if (test__start_subtest("sockhash sk_msg load helpers"))
test_skmsg_helpers(BPF_MAP_TYPE_SOCKHASH);
if (test__start_subtest("sockmap update"))
test_sockmap_update(BPF_MAP_TYPE_SOCKMAP);
if (test__start_subtest("sockhash update"))
test_sockmap_update(BPF_MAP_TYPE_SOCKHASH);
if (test__start_subtest("sockmap update in unsafe context"))
test_sockmap_invalid_update();
if (test__start_subtest("sockmap copy"))
test_sockmap_copy(BPF_MAP_TYPE_SOCKMAP);
if (test__start_subtest("sockhash copy"))
test_sockmap_copy(BPF_MAP_TYPE_SOCKHASH);
if (test__start_subtest("sockmap skb_verdict attach")) {
test_sockmap_skb_verdict_attach(BPF_SK_SKB_VERDICT,
BPF_SK_SKB_STREAM_VERDICT);
test_sockmap_skb_verdict_attach(BPF_SK_SKB_STREAM_VERDICT,
BPF_SK_SKB_VERDICT);
}
if (test__start_subtest("sockmap msg_verdict progs query"))
test_sockmap_progs_query(BPF_SK_MSG_VERDICT);
if (test__start_subtest("sockmap stream_parser progs query"))
test_sockmap_progs_query(BPF_SK_SKB_STREAM_PARSER);
if (test__start_subtest("sockmap stream_verdict progs query"))
test_sockmap_progs_query(BPF_SK_SKB_STREAM_VERDICT);
if (test__start_subtest("sockmap skb_verdict progs query"))
test_sockmap_progs_query(BPF_SK_SKB_VERDICT);
if (test__start_subtest("sockmap skb_verdict shutdown"))
test_sockmap_skb_verdict_shutdown();
if (test__start_subtest("sockmap skb_verdict fionread"))
test_sockmap_skb_verdict_fionread(true);
if (test__start_subtest("sockmap skb_verdict fionread on drop"))
test_sockmap_skb_verdict_fionread(false);
}
| linux-master | tools/testing/selftests/bpf/prog_tests/sockmap_basic.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020 Facebook */
#include <test_progs.h>
#include <time.h>
#include "test_varlen.skel.h"
#define CHECK_VAL(got, exp) \
CHECK((got) != (exp), "check", "got %ld != exp %ld\n", \
(long)(got), (long)(exp))
void test_varlen(void)
{
int duration = 0, err;
struct test_varlen* skel;
struct test_varlen__bss *bss;
struct test_varlen__data *data;
const char str1[] = "Hello, ";
const char str2[] = "World!";
const char exp_str[] = "Hello, \0World!\0";
const int size1 = sizeof(str1);
const int size2 = sizeof(str2);
skel = test_varlen__open_and_load();
if (CHECK(!skel, "skel_open", "failed to open skeleton\n"))
return;
bss = skel->bss;
data = skel->data;
err = test_varlen__attach(skel);
if (CHECK(err, "skel_attach", "skeleton attach failed: %d\n", err))
goto cleanup;
bss->test_pid = getpid();
/* trigger everything */
memcpy(bss->buf_in1, str1, size1);
memcpy(bss->buf_in2, str2, size2);
bss->capture = true;
usleep(1);
bss->capture = false;
CHECK_VAL(bss->payload1_len1, size1);
CHECK_VAL(bss->payload1_len2, size2);
CHECK_VAL(bss->total1, size1 + size2);
CHECK(memcmp(bss->payload1, exp_str, size1 + size2), "content_check",
"doesn't match!\n");
CHECK_VAL(data->payload2_len1, size1);
CHECK_VAL(data->payload2_len2, size2);
CHECK_VAL(data->total2, size1 + size2);
CHECK(memcmp(data->payload2, exp_str, size1 + size2), "content_check",
"doesn't match!\n");
CHECK_VAL(data->payload3_len1, size1);
CHECK_VAL(data->payload3_len2, size2);
CHECK_VAL(data->total3, size1 + size2);
CHECK(memcmp(data->payload3, exp_str, size1 + size2), "content_check",
"doesn't match!\n");
CHECK_VAL(data->payload4_len1, size1);
CHECK_VAL(data->payload4_len2, size2);
CHECK_VAL(data->total4, size1 + size2);
CHECK(memcmp(data->payload4, exp_str, size1 + size2), "content_check",
"doesn't match!\n");
CHECK_VAL(bss->ret_bad_read, -EFAULT);
CHECK_VAL(data->payload_bad[0], 0x42);
CHECK_VAL(data->payload_bad[1], 0x42);
CHECK_VAL(data->payload_bad[2], 0);
CHECK_VAL(data->payload_bad[3], 0x42);
CHECK_VAL(data->payload_bad[4], 0x42);
cleanup:
test_varlen__destroy(skel);
}
| linux-master | tools/testing/selftests/bpf/prog_tests/varlen.c |
// SPDX-License-Identifier: GPL-2.0
#define _GNU_SOURCE
#include <test_progs.h>
#include "test_deny_namespace.skel.h"
#include <sched.h>
#include "cap_helpers.h"
#include <stdio.h>
static int wait_for_pid(pid_t pid)
{
int status, ret;
again:
ret = waitpid(pid, &status, 0);
if (ret == -1) {
if (errno == EINTR)
goto again;
return -1;
}
if (!WIFEXITED(status))
return -1;
return WEXITSTATUS(status);
}
/* negative return value -> some internal error
* positive return value -> userns creation failed
* 0 -> userns creation succeeded
*/
static int create_user_ns(void)
{
pid_t pid;
pid = fork();
if (pid < 0)
return -1;
if (pid == 0) {
if (unshare(CLONE_NEWUSER))
_exit(EXIT_FAILURE);
_exit(EXIT_SUCCESS);
}
return wait_for_pid(pid);
}
static void test_userns_create_bpf(void)
{
__u32 cap_mask = 1ULL << CAP_SYS_ADMIN;
__u64 old_caps = 0;
cap_enable_effective(cap_mask, &old_caps);
ASSERT_OK(create_user_ns(), "priv new user ns");
cap_disable_effective(cap_mask, &old_caps);
ASSERT_EQ(create_user_ns(), EPERM, "unpriv new user ns");
if (cap_mask & old_caps)
cap_enable_effective(cap_mask, NULL);
}
static void test_unpriv_userns_create_no_bpf(void)
{
__u32 cap_mask = 1ULL << CAP_SYS_ADMIN;
__u64 old_caps = 0;
cap_disable_effective(cap_mask, &old_caps);
ASSERT_OK(create_user_ns(), "no-bpf unpriv new user ns");
if (cap_mask & old_caps)
cap_enable_effective(cap_mask, NULL);
}
void test_deny_namespace(void)
{
struct test_deny_namespace *skel = NULL;
int err;
if (test__start_subtest("unpriv_userns_create_no_bpf"))
test_unpriv_userns_create_no_bpf();
skel = test_deny_namespace__open_and_load();
if (!ASSERT_OK_PTR(skel, "skel load"))
goto close_prog;
err = test_deny_namespace__attach(skel);
if (!ASSERT_OK(err, "attach"))
goto close_prog;
if (test__start_subtest("userns_create_bpf"))
test_userns_create_bpf();
test_deny_namespace__detach(skel);
close_prog:
test_deny_namespace__destroy(skel);
}
| linux-master | tools/testing/selftests/bpf/prog_tests/deny_namespace.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020 Facebook */
#include <test_progs.h>
#include <unistd.h>
#include <sys/syscall.h>
#include <task_local_storage_helpers.h>
#include "bpf_iter_ipv6_route.skel.h"
#include "bpf_iter_netlink.skel.h"
#include "bpf_iter_bpf_map.skel.h"
#include "bpf_iter_task.skel.h"
#include "bpf_iter_task_stack.skel.h"
#include "bpf_iter_task_file.skel.h"
#include "bpf_iter_task_vma.skel.h"
#include "bpf_iter_task_btf.skel.h"
#include "bpf_iter_tcp4.skel.h"
#include "bpf_iter_tcp6.skel.h"
#include "bpf_iter_udp4.skel.h"
#include "bpf_iter_udp6.skel.h"
#include "bpf_iter_unix.skel.h"
#include "bpf_iter_vma_offset.skel.h"
#include "bpf_iter_test_kern1.skel.h"
#include "bpf_iter_test_kern2.skel.h"
#include "bpf_iter_test_kern3.skel.h"
#include "bpf_iter_test_kern4.skel.h"
#include "bpf_iter_bpf_hash_map.skel.h"
#include "bpf_iter_bpf_percpu_hash_map.skel.h"
#include "bpf_iter_bpf_array_map.skel.h"
#include "bpf_iter_bpf_percpu_array_map.skel.h"
#include "bpf_iter_bpf_sk_storage_helpers.skel.h"
#include "bpf_iter_bpf_sk_storage_map.skel.h"
#include "bpf_iter_test_kern5.skel.h"
#include "bpf_iter_test_kern6.skel.h"
#include "bpf_iter_bpf_link.skel.h"
#include "bpf_iter_ksym.skel.h"
#include "bpf_iter_sockmap.skel.h"
static int duration;
static void test_btf_id_or_null(void)
{
struct bpf_iter_test_kern3 *skel;
skel = bpf_iter_test_kern3__open_and_load();
if (!ASSERT_ERR_PTR(skel, "bpf_iter_test_kern3__open_and_load")) {
bpf_iter_test_kern3__destroy(skel);
return;
}
}
static void do_dummy_read_opts(struct bpf_program *prog, struct bpf_iter_attach_opts *opts)
{
struct bpf_link *link;
char buf[16] = {};
int iter_fd, len;
link = bpf_program__attach_iter(prog, opts);
if (!ASSERT_OK_PTR(link, "attach_iter"))
return;
iter_fd = bpf_iter_create(bpf_link__fd(link));
if (!ASSERT_GE(iter_fd, 0, "create_iter"))
goto free_link;
/* not check contents, but ensure read() ends without error */
while ((len = read(iter_fd, buf, sizeof(buf))) > 0)
;
CHECK(len < 0, "read", "read failed: %s\n", strerror(errno));
close(iter_fd);
free_link:
bpf_link__destroy(link);
}
static void do_dummy_read(struct bpf_program *prog)
{
do_dummy_read_opts(prog, NULL);
}
static void do_read_map_iter_fd(struct bpf_object_skeleton **skel, struct bpf_program *prog,
struct bpf_map *map)
{
DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
union bpf_iter_link_info linfo;
struct bpf_link *link;
char buf[16] = {};
int iter_fd, len;
memset(&linfo, 0, sizeof(linfo));
linfo.map.map_fd = bpf_map__fd(map);
opts.link_info = &linfo;
opts.link_info_len = sizeof(linfo);
link = bpf_program__attach_iter(prog, &opts);
if (!ASSERT_OK_PTR(link, "attach_map_iter"))
return;
iter_fd = bpf_iter_create(bpf_link__fd(link));
if (!ASSERT_GE(iter_fd, 0, "create_map_iter")) {
bpf_link__destroy(link);
return;
}
/* Close link and map fd prematurely */
bpf_link__destroy(link);
bpf_object__destroy_skeleton(*skel);
*skel = NULL;
/* Try to let map free work to run first if map is freed */
usleep(100);
/* Memory used by both sock map and sock local storage map are
* freed after two synchronize_rcu() calls, so wait for it
*/
kern_sync_rcu();
kern_sync_rcu();
/* Read after both map fd and link fd are closed */
while ((len = read(iter_fd, buf, sizeof(buf))) > 0)
;
ASSERT_GE(len, 0, "read_iterator");
close(iter_fd);
}
static int read_fd_into_buffer(int fd, char *buf, int size)
{
int bufleft = size;
int len;
do {
len = read(fd, buf, bufleft);
if (len > 0) {
buf += len;
bufleft -= len;
}
} while (len > 0);
return len < 0 ? len : size - bufleft;
}
static void test_ipv6_route(void)
{
struct bpf_iter_ipv6_route *skel;
skel = bpf_iter_ipv6_route__open_and_load();
if (!ASSERT_OK_PTR(skel, "bpf_iter_ipv6_route__open_and_load"))
return;
do_dummy_read(skel->progs.dump_ipv6_route);
bpf_iter_ipv6_route__destroy(skel);
}
static void test_netlink(void)
{
struct bpf_iter_netlink *skel;
skel = bpf_iter_netlink__open_and_load();
if (!ASSERT_OK_PTR(skel, "bpf_iter_netlink__open_and_load"))
return;
do_dummy_read(skel->progs.dump_netlink);
bpf_iter_netlink__destroy(skel);
}
static void test_bpf_map(void)
{
struct bpf_iter_bpf_map *skel;
skel = bpf_iter_bpf_map__open_and_load();
if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_map__open_and_load"))
return;
do_dummy_read(skel->progs.dump_bpf_map);
bpf_iter_bpf_map__destroy(skel);
}
static void check_bpf_link_info(const struct bpf_program *prog)
{
LIBBPF_OPTS(bpf_iter_attach_opts, opts);
union bpf_iter_link_info linfo;
struct bpf_link_info info = {};
struct bpf_link *link;
__u32 info_len;
int err;
memset(&linfo, 0, sizeof(linfo));
linfo.task.tid = getpid();
opts.link_info = &linfo;
opts.link_info_len = sizeof(linfo);
link = bpf_program__attach_iter(prog, &opts);
if (!ASSERT_OK_PTR(link, "attach_iter"))
return;
info_len = sizeof(info);
err = bpf_link_get_info_by_fd(bpf_link__fd(link), &info, &info_len);
ASSERT_OK(err, "bpf_link_get_info_by_fd");
ASSERT_EQ(info.iter.task.tid, getpid(), "check_task_tid");
bpf_link__destroy(link);
}
static pthread_mutex_t do_nothing_mutex;
static void *do_nothing_wait(void *arg)
{
pthread_mutex_lock(&do_nothing_mutex);
pthread_mutex_unlock(&do_nothing_mutex);
pthread_exit(arg);
}
static void test_task_common_nocheck(struct bpf_iter_attach_opts *opts,
int *num_unknown, int *num_known)
{
struct bpf_iter_task *skel;
pthread_t thread_id;
void *ret;
skel = bpf_iter_task__open_and_load();
if (!ASSERT_OK_PTR(skel, "bpf_iter_task__open_and_load"))
return;
ASSERT_OK(pthread_mutex_lock(&do_nothing_mutex), "pthread_mutex_lock");
ASSERT_OK(pthread_create(&thread_id, NULL, &do_nothing_wait, NULL),
"pthread_create");
skel->bss->tid = getpid();
do_dummy_read_opts(skel->progs.dump_task, opts);
*num_unknown = skel->bss->num_unknown_tid;
*num_known = skel->bss->num_known_tid;
ASSERT_OK(pthread_mutex_unlock(&do_nothing_mutex), "pthread_mutex_unlock");
ASSERT_FALSE(pthread_join(thread_id, &ret) || ret != NULL,
"pthread_join");
bpf_iter_task__destroy(skel);
}
static void test_task_common(struct bpf_iter_attach_opts *opts, int num_unknown, int num_known)
{
int num_unknown_tid, num_known_tid;
test_task_common_nocheck(opts, &num_unknown_tid, &num_known_tid);
ASSERT_EQ(num_unknown_tid, num_unknown, "check_num_unknown_tid");
ASSERT_EQ(num_known_tid, num_known, "check_num_known_tid");
}
static void test_task_tid(void)
{
LIBBPF_OPTS(bpf_iter_attach_opts, opts);
union bpf_iter_link_info linfo;
int num_unknown_tid, num_known_tid;
memset(&linfo, 0, sizeof(linfo));
linfo.task.tid = getpid();
opts.link_info = &linfo;
opts.link_info_len = sizeof(linfo);
test_task_common(&opts, 0, 1);
linfo.task.tid = 0;
linfo.task.pid = getpid();
test_task_common(&opts, 1, 1);
test_task_common_nocheck(NULL, &num_unknown_tid, &num_known_tid);
ASSERT_GT(num_unknown_tid, 1, "check_num_unknown_tid");
ASSERT_EQ(num_known_tid, 1, "check_num_known_tid");
}
static void test_task_pid(void)
{
LIBBPF_OPTS(bpf_iter_attach_opts, opts);
union bpf_iter_link_info linfo;
memset(&linfo, 0, sizeof(linfo));
linfo.task.pid = getpid();
opts.link_info = &linfo;
opts.link_info_len = sizeof(linfo);
test_task_common(&opts, 1, 1);
}
static void test_task_pidfd(void)
{
LIBBPF_OPTS(bpf_iter_attach_opts, opts);
union bpf_iter_link_info linfo;
int pidfd;
pidfd = sys_pidfd_open(getpid(), 0);
if (!ASSERT_GT(pidfd, 0, "sys_pidfd_open"))
return;
memset(&linfo, 0, sizeof(linfo));
linfo.task.pid_fd = pidfd;
opts.link_info = &linfo;
opts.link_info_len = sizeof(linfo);
test_task_common(&opts, 1, 1);
close(pidfd);
}
static void test_task_sleepable(void)
{
struct bpf_iter_task *skel;
skel = bpf_iter_task__open_and_load();
if (!ASSERT_OK_PTR(skel, "bpf_iter_task__open_and_load"))
return;
do_dummy_read(skel->progs.dump_task_sleepable);
ASSERT_GT(skel->bss->num_expected_failure_copy_from_user_task, 0,
"num_expected_failure_copy_from_user_task");
ASSERT_GT(skel->bss->num_success_copy_from_user_task, 0,
"num_success_copy_from_user_task");
bpf_iter_task__destroy(skel);
}
static void test_task_stack(void)
{
struct bpf_iter_task_stack *skel;
skel = bpf_iter_task_stack__open_and_load();
if (!ASSERT_OK_PTR(skel, "bpf_iter_task_stack__open_and_load"))
return;
do_dummy_read(skel->progs.dump_task_stack);
do_dummy_read(skel->progs.get_task_user_stacks);
bpf_iter_task_stack__destroy(skel);
}
static void test_task_file(void)
{
LIBBPF_OPTS(bpf_iter_attach_opts, opts);
struct bpf_iter_task_file *skel;
union bpf_iter_link_info linfo;
pthread_t thread_id;
void *ret;
skel = bpf_iter_task_file__open_and_load();
if (!ASSERT_OK_PTR(skel, "bpf_iter_task_file__open_and_load"))
return;
skel->bss->tgid = getpid();
ASSERT_OK(pthread_mutex_lock(&do_nothing_mutex), "pthread_mutex_lock");
ASSERT_OK(pthread_create(&thread_id, NULL, &do_nothing_wait, NULL),
"pthread_create");
memset(&linfo, 0, sizeof(linfo));
linfo.task.tid = getpid();
opts.link_info = &linfo;
opts.link_info_len = sizeof(linfo);
do_dummy_read_opts(skel->progs.dump_task_file, &opts);
ASSERT_EQ(skel->bss->count, 0, "check_count");
ASSERT_EQ(skel->bss->unique_tgid_count, 1, "check_unique_tgid_count");
skel->bss->last_tgid = 0;
skel->bss->count = 0;
skel->bss->unique_tgid_count = 0;
do_dummy_read(skel->progs.dump_task_file);
ASSERT_EQ(skel->bss->count, 0, "check_count");
ASSERT_GT(skel->bss->unique_tgid_count, 1, "check_unique_tgid_count");
check_bpf_link_info(skel->progs.dump_task_file);
ASSERT_OK(pthread_mutex_unlock(&do_nothing_mutex), "pthread_mutex_unlock");
ASSERT_OK(pthread_join(thread_id, &ret), "pthread_join");
ASSERT_NULL(ret, "pthread_join");
bpf_iter_task_file__destroy(skel);
}
#define TASKBUFSZ 32768
static char taskbuf[TASKBUFSZ];
static int do_btf_read(struct bpf_iter_task_btf *skel)
{
struct bpf_program *prog = skel->progs.dump_task_struct;
struct bpf_iter_task_btf__bss *bss = skel->bss;
int iter_fd = -1, err;
struct bpf_link *link;
char *buf = taskbuf;
int ret = 0;
link = bpf_program__attach_iter(prog, NULL);
if (!ASSERT_OK_PTR(link, "attach_iter"))
return ret;
iter_fd = bpf_iter_create(bpf_link__fd(link));
if (!ASSERT_GE(iter_fd, 0, "create_iter"))
goto free_link;
err = read_fd_into_buffer(iter_fd, buf, TASKBUFSZ);
if (bss->skip) {
printf("%s:SKIP:no __builtin_btf_type_id\n", __func__);
ret = 1;
test__skip();
goto free_link;
}
if (CHECK(err < 0, "read", "read failed: %s\n", strerror(errno)))
goto free_link;
ASSERT_HAS_SUBSTR(taskbuf, "(struct task_struct)",
"check for btf representation of task_struct in iter data");
free_link:
if (iter_fd > 0)
close(iter_fd);
bpf_link__destroy(link);
return ret;
}
static void test_task_btf(void)
{
struct bpf_iter_task_btf__bss *bss;
struct bpf_iter_task_btf *skel;
int ret;
skel = bpf_iter_task_btf__open_and_load();
if (!ASSERT_OK_PTR(skel, "bpf_iter_task_btf__open_and_load"))
return;
bss = skel->bss;
ret = do_btf_read(skel);
if (ret)
goto cleanup;
if (!ASSERT_NEQ(bss->tasks, 0, "no task iteration, did BPF program run?"))
goto cleanup;
ASSERT_EQ(bss->seq_err, 0, "check for unexpected err");
cleanup:
bpf_iter_task_btf__destroy(skel);
}
static void test_tcp4(void)
{
struct bpf_iter_tcp4 *skel;
skel = bpf_iter_tcp4__open_and_load();
if (!ASSERT_OK_PTR(skel, "bpf_iter_tcp4__open_and_load"))
return;
do_dummy_read(skel->progs.dump_tcp4);
bpf_iter_tcp4__destroy(skel);
}
static void test_tcp6(void)
{
struct bpf_iter_tcp6 *skel;
skel = bpf_iter_tcp6__open_and_load();
if (!ASSERT_OK_PTR(skel, "bpf_iter_tcp6__open_and_load"))
return;
do_dummy_read(skel->progs.dump_tcp6);
bpf_iter_tcp6__destroy(skel);
}
static void test_udp4(void)
{
struct bpf_iter_udp4 *skel;
skel = bpf_iter_udp4__open_and_load();
if (!ASSERT_OK_PTR(skel, "bpf_iter_udp4__open_and_load"))
return;
do_dummy_read(skel->progs.dump_udp4);
bpf_iter_udp4__destroy(skel);
}
static void test_udp6(void)
{
struct bpf_iter_udp6 *skel;
skel = bpf_iter_udp6__open_and_load();
if (!ASSERT_OK_PTR(skel, "bpf_iter_udp6__open_and_load"))
return;
do_dummy_read(skel->progs.dump_udp6);
bpf_iter_udp6__destroy(skel);
}
static void test_unix(void)
{
struct bpf_iter_unix *skel;
skel = bpf_iter_unix__open_and_load();
if (!ASSERT_OK_PTR(skel, "bpf_iter_unix__open_and_load"))
return;
do_dummy_read(skel->progs.dump_unix);
bpf_iter_unix__destroy(skel);
}
/* The expected string is less than 16 bytes */
static int do_read_with_fd(int iter_fd, const char *expected,
bool read_one_char)
{
int len, read_buf_len, start;
char buf[16] = {};
read_buf_len = read_one_char ? 1 : 16;
start = 0;
while ((len = read(iter_fd, buf + start, read_buf_len)) > 0) {
start += len;
if (CHECK(start >= 16, "read", "read len %d\n", len))
return -1;
read_buf_len = read_one_char ? 1 : 16 - start;
}
if (CHECK(len < 0, "read", "read failed: %s\n", strerror(errno)))
return -1;
if (!ASSERT_STREQ(buf, expected, "read"))
return -1;
return 0;
}
static void test_anon_iter(bool read_one_char)
{
struct bpf_iter_test_kern1 *skel;
struct bpf_link *link;
int iter_fd, err;
skel = bpf_iter_test_kern1__open_and_load();
if (!ASSERT_OK_PTR(skel, "bpf_iter_test_kern1__open_and_load"))
return;
err = bpf_iter_test_kern1__attach(skel);
if (!ASSERT_OK(err, "bpf_iter_test_kern1__attach")) {
goto out;
}
link = skel->links.dump_task;
iter_fd = bpf_iter_create(bpf_link__fd(link));
if (!ASSERT_GE(iter_fd, 0, "create_iter"))
goto out;
do_read_with_fd(iter_fd, "abcd", read_one_char);
close(iter_fd);
out:
bpf_iter_test_kern1__destroy(skel);
}
static int do_read(const char *path, const char *expected)
{
int err, iter_fd;
iter_fd = open(path, O_RDONLY);
if (CHECK(iter_fd < 0, "open", "open %s failed: %s\n",
path, strerror(errno)))
return -1;
err = do_read_with_fd(iter_fd, expected, false);
close(iter_fd);
return err;
}
static void test_file_iter(void)
{
const char *path = "/sys/fs/bpf/bpf_iter_test1";
struct bpf_iter_test_kern1 *skel1;
struct bpf_iter_test_kern2 *skel2;
struct bpf_link *link;
int err;
skel1 = bpf_iter_test_kern1__open_and_load();
if (!ASSERT_OK_PTR(skel1, "bpf_iter_test_kern1__open_and_load"))
return;
link = bpf_program__attach_iter(skel1->progs.dump_task, NULL);
if (!ASSERT_OK_PTR(link, "attach_iter"))
goto out;
/* unlink this path if it exists. */
unlink(path);
err = bpf_link__pin(link, path);
if (CHECK(err, "pin_iter", "pin_iter to %s failed: %d\n", path, err))
goto free_link;
err = do_read(path, "abcd");
if (err)
goto unlink_path;
/* file based iterator seems working fine. Let us a link update
* of the underlying link and `cat` the iterator again, its content
* should change.
*/
skel2 = bpf_iter_test_kern2__open_and_load();
if (!ASSERT_OK_PTR(skel2, "bpf_iter_test_kern2__open_and_load"))
goto unlink_path;
err = bpf_link__update_program(link, skel2->progs.dump_task);
if (!ASSERT_OK(err, "update_prog"))
goto destroy_skel2;
do_read(path, "ABCD");
destroy_skel2:
bpf_iter_test_kern2__destroy(skel2);
unlink_path:
unlink(path);
free_link:
bpf_link__destroy(link);
out:
bpf_iter_test_kern1__destroy(skel1);
}
static void test_overflow(bool test_e2big_overflow, bool ret1)
{
__u32 map_info_len, total_read_len, expected_read_len;
int err, iter_fd, map1_fd, map2_fd, len;
struct bpf_map_info map_info = {};
struct bpf_iter_test_kern4 *skel;
struct bpf_link *link;
__u32 iter_size;
char *buf;
skel = bpf_iter_test_kern4__open();
if (!ASSERT_OK_PTR(skel, "bpf_iter_test_kern4__open"))
return;
/* create two maps: bpf program will only do bpf_seq_write
* for these two maps. The goal is one map output almost
* fills seq_file buffer and then the other will trigger
* overflow and needs restart.
*/
map1_fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, NULL, 4, 8, 1, NULL);
if (CHECK(map1_fd < 0, "bpf_map_create",
"map_creation failed: %s\n", strerror(errno)))
goto out;
map2_fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, NULL, 4, 8, 1, NULL);
if (CHECK(map2_fd < 0, "bpf_map_create",
"map_creation failed: %s\n", strerror(errno)))
goto free_map1;
/* bpf_seq_printf kernel buffer is 8 pages, so one map
* bpf_seq_write will mostly fill it, and the other map
* will partially fill and then trigger overflow and need
* bpf_seq_read restart.
*/
iter_size = sysconf(_SC_PAGE_SIZE) << 3;
if (test_e2big_overflow) {
skel->rodata->print_len = (iter_size + 8) / 8;
expected_read_len = 2 * (iter_size + 8);
} else if (!ret1) {
skel->rodata->print_len = (iter_size - 8) / 8;
expected_read_len = 2 * (iter_size - 8);
} else {
skel->rodata->print_len = 1;
expected_read_len = 2 * 8;
}
skel->rodata->ret1 = ret1;
if (!ASSERT_OK(bpf_iter_test_kern4__load(skel),
"bpf_iter_test_kern4__load"))
goto free_map2;
/* setup filtering map_id in bpf program */
map_info_len = sizeof(map_info);
err = bpf_map_get_info_by_fd(map1_fd, &map_info, &map_info_len);
if (CHECK(err, "get_map_info", "get map info failed: %s\n",
strerror(errno)))
goto free_map2;
skel->bss->map1_id = map_info.id;
err = bpf_map_get_info_by_fd(map2_fd, &map_info, &map_info_len);
if (CHECK(err, "get_map_info", "get map info failed: %s\n",
strerror(errno)))
goto free_map2;
skel->bss->map2_id = map_info.id;
link = bpf_program__attach_iter(skel->progs.dump_bpf_map, NULL);
if (!ASSERT_OK_PTR(link, "attach_iter"))
goto free_map2;
iter_fd = bpf_iter_create(bpf_link__fd(link));
if (!ASSERT_GE(iter_fd, 0, "create_iter"))
goto free_link;
buf = malloc(expected_read_len);
if (!buf)
goto close_iter;
/* do read */
total_read_len = 0;
if (test_e2big_overflow) {
while ((len = read(iter_fd, buf, expected_read_len)) > 0)
total_read_len += len;
CHECK(len != -1 || errno != E2BIG, "read",
"expected ret -1, errno E2BIG, but get ret %d, error %s\n",
len, strerror(errno));
goto free_buf;
} else if (!ret1) {
while ((len = read(iter_fd, buf, expected_read_len)) > 0)
total_read_len += len;
if (CHECK(len < 0, "read", "read failed: %s\n",
strerror(errno)))
goto free_buf;
} else {
do {
len = read(iter_fd, buf, expected_read_len);
if (len > 0)
total_read_len += len;
} while (len > 0 || len == -EAGAIN);
if (CHECK(len < 0, "read", "read failed: %s\n",
strerror(errno)))
goto free_buf;
}
if (!ASSERT_EQ(total_read_len, expected_read_len, "read"))
goto free_buf;
if (!ASSERT_EQ(skel->bss->map1_accessed, 1, "map1_accessed"))
goto free_buf;
if (!ASSERT_EQ(skel->bss->map2_accessed, 2, "map2_accessed"))
goto free_buf;
ASSERT_EQ(skel->bss->map2_seqnum1, skel->bss->map2_seqnum2, "map2_seqnum");
free_buf:
free(buf);
close_iter:
close(iter_fd);
free_link:
bpf_link__destroy(link);
free_map2:
close(map2_fd);
free_map1:
close(map1_fd);
out:
bpf_iter_test_kern4__destroy(skel);
}
static void test_bpf_hash_map(void)
{
__u32 expected_key_a = 0, expected_key_b = 0;
DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
struct bpf_iter_bpf_hash_map *skel;
int err, i, len, map_fd, iter_fd;
union bpf_iter_link_info linfo;
__u64 val, expected_val = 0;
struct bpf_link *link;
struct key_t {
int a;
int b;
int c;
} key;
char buf[64];
skel = bpf_iter_bpf_hash_map__open();
if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_hash_map__open"))
return;
skel->bss->in_test_mode = true;
err = bpf_iter_bpf_hash_map__load(skel);
if (!ASSERT_OK(err, "bpf_iter_bpf_hash_map__load"))
goto out;
/* iterator with hashmap2 and hashmap3 should fail */
memset(&linfo, 0, sizeof(linfo));
linfo.map.map_fd = bpf_map__fd(skel->maps.hashmap2);
opts.link_info = &linfo;
opts.link_info_len = sizeof(linfo);
link = bpf_program__attach_iter(skel->progs.dump_bpf_hash_map, &opts);
if (!ASSERT_ERR_PTR(link, "attach_iter"))
goto out;
linfo.map.map_fd = bpf_map__fd(skel->maps.hashmap3);
link = bpf_program__attach_iter(skel->progs.dump_bpf_hash_map, &opts);
if (!ASSERT_ERR_PTR(link, "attach_iter"))
goto out;
/* hashmap1 should be good, update map values here */
map_fd = bpf_map__fd(skel->maps.hashmap1);
for (i = 0; i < bpf_map__max_entries(skel->maps.hashmap1); i++) {
key.a = i + 1;
key.b = i + 2;
key.c = i + 3;
val = i + 4;
expected_key_a += key.a;
expected_key_b += key.b;
expected_val += val;
err = bpf_map_update_elem(map_fd, &key, &val, BPF_ANY);
if (!ASSERT_OK(err, "map_update"))
goto out;
}
/* Sleepable program is prohibited for hash map iterator */
linfo.map.map_fd = map_fd;
link = bpf_program__attach_iter(skel->progs.sleepable_dummy_dump, &opts);
if (!ASSERT_ERR_PTR(link, "attach_sleepable_prog_to_iter"))
goto out;
linfo.map.map_fd = map_fd;
link = bpf_program__attach_iter(skel->progs.dump_bpf_hash_map, &opts);
if (!ASSERT_OK_PTR(link, "attach_iter"))
goto out;
iter_fd = bpf_iter_create(bpf_link__fd(link));
if (!ASSERT_GE(iter_fd, 0, "create_iter"))
goto free_link;
/* do some tests */
while ((len = read(iter_fd, buf, sizeof(buf))) > 0)
;
if (CHECK(len < 0, "read", "read failed: %s\n", strerror(errno)))
goto close_iter;
/* test results */
if (!ASSERT_EQ(skel->bss->key_sum_a, expected_key_a, "key_sum_a"))
goto close_iter;
if (!ASSERT_EQ(skel->bss->key_sum_b, expected_key_b, "key_sum_b"))
goto close_iter;
if (!ASSERT_EQ(skel->bss->val_sum, expected_val, "val_sum"))
goto close_iter;
close_iter:
close(iter_fd);
free_link:
bpf_link__destroy(link);
out:
bpf_iter_bpf_hash_map__destroy(skel);
}
static void test_bpf_percpu_hash_map(void)
{
__u32 expected_key_a = 0, expected_key_b = 0;
DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
struct bpf_iter_bpf_percpu_hash_map *skel;
int err, i, j, len, map_fd, iter_fd;
union bpf_iter_link_info linfo;
__u32 expected_val = 0;
struct bpf_link *link;
struct key_t {
int a;
int b;
int c;
} key;
char buf[64];
void *val;
skel = bpf_iter_bpf_percpu_hash_map__open();
if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_percpu_hash_map__open"))
return;
skel->rodata->num_cpus = bpf_num_possible_cpus();
val = malloc(8 * bpf_num_possible_cpus());
err = bpf_iter_bpf_percpu_hash_map__load(skel);
if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_percpu_hash_map__load"))
goto out;
/* update map values here */
map_fd = bpf_map__fd(skel->maps.hashmap1);
for (i = 0; i < bpf_map__max_entries(skel->maps.hashmap1); i++) {
key.a = i + 1;
key.b = i + 2;
key.c = i + 3;
expected_key_a += key.a;
expected_key_b += key.b;
for (j = 0; j < bpf_num_possible_cpus(); j++) {
*(__u32 *)(val + j * 8) = i + j;
expected_val += i + j;
}
err = bpf_map_update_elem(map_fd, &key, val, BPF_ANY);
if (!ASSERT_OK(err, "map_update"))
goto out;
}
memset(&linfo, 0, sizeof(linfo));
linfo.map.map_fd = map_fd;
opts.link_info = &linfo;
opts.link_info_len = sizeof(linfo);
link = bpf_program__attach_iter(skel->progs.dump_bpf_percpu_hash_map, &opts);
if (!ASSERT_OK_PTR(link, "attach_iter"))
goto out;
iter_fd = bpf_iter_create(bpf_link__fd(link));
if (!ASSERT_GE(iter_fd, 0, "create_iter"))
goto free_link;
/* do some tests */
while ((len = read(iter_fd, buf, sizeof(buf))) > 0)
;
if (CHECK(len < 0, "read", "read failed: %s\n", strerror(errno)))
goto close_iter;
/* test results */
if (!ASSERT_EQ(skel->bss->key_sum_a, expected_key_a, "key_sum_a"))
goto close_iter;
if (!ASSERT_EQ(skel->bss->key_sum_b, expected_key_b, "key_sum_b"))
goto close_iter;
if (!ASSERT_EQ(skel->bss->val_sum, expected_val, "val_sum"))
goto close_iter;
close_iter:
close(iter_fd);
free_link:
bpf_link__destroy(link);
out:
bpf_iter_bpf_percpu_hash_map__destroy(skel);
free(val);
}
static void test_bpf_array_map(void)
{
__u64 val, expected_val = 0, res_first_val, first_val = 0;
DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
__u32 key, expected_key = 0, res_first_key;
int err, i, map_fd, hash_fd, iter_fd;
struct bpf_iter_bpf_array_map *skel;
union bpf_iter_link_info linfo;
struct bpf_link *link;
char buf[64] = {};
int len, start;
skel = bpf_iter_bpf_array_map__open_and_load();
if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_array_map__open_and_load"))
return;
map_fd = bpf_map__fd(skel->maps.arraymap1);
for (i = 0; i < bpf_map__max_entries(skel->maps.arraymap1); i++) {
val = i + 4;
expected_key += i;
expected_val += val;
if (i == 0)
first_val = val;
err = bpf_map_update_elem(map_fd, &i, &val, BPF_ANY);
if (!ASSERT_OK(err, "map_update"))
goto out;
}
memset(&linfo, 0, sizeof(linfo));
linfo.map.map_fd = map_fd;
opts.link_info = &linfo;
opts.link_info_len = sizeof(linfo);
link = bpf_program__attach_iter(skel->progs.dump_bpf_array_map, &opts);
if (!ASSERT_OK_PTR(link, "attach_iter"))
goto out;
iter_fd = bpf_iter_create(bpf_link__fd(link));
if (!ASSERT_GE(iter_fd, 0, "create_iter"))
goto free_link;
/* do some tests */
start = 0;
while ((len = read(iter_fd, buf + start, sizeof(buf) - start)) > 0)
start += len;
if (CHECK(len < 0, "read", "read failed: %s\n", strerror(errno)))
goto close_iter;
/* test results */
res_first_key = *(__u32 *)buf;
res_first_val = *(__u64 *)(buf + sizeof(__u32));
if (CHECK(res_first_key != 0 || res_first_val != first_val,
"bpf_seq_write",
"seq_write failure: first key %u vs expected 0, "
" first value %llu vs expected %llu\n",
res_first_key, res_first_val, first_val))
goto close_iter;
if (!ASSERT_EQ(skel->bss->key_sum, expected_key, "key_sum"))
goto close_iter;
if (!ASSERT_EQ(skel->bss->val_sum, expected_val, "val_sum"))
goto close_iter;
hash_fd = bpf_map__fd(skel->maps.hashmap1);
for (i = 0; i < bpf_map__max_entries(skel->maps.arraymap1); i++) {
err = bpf_map_lookup_elem(map_fd, &i, &val);
if (!ASSERT_OK(err, "map_lookup arraymap1"))
goto close_iter;
if (!ASSERT_EQ(i, val, "invalid_val arraymap1"))
goto close_iter;
val = i + 4;
err = bpf_map_lookup_elem(hash_fd, &val, &key);
if (!ASSERT_OK(err, "map_lookup hashmap1"))
goto close_iter;
if (!ASSERT_EQ(key, val - 4, "invalid_val hashmap1"))
goto close_iter;
}
close_iter:
close(iter_fd);
free_link:
bpf_link__destroy(link);
out:
bpf_iter_bpf_array_map__destroy(skel);
}
static void test_bpf_array_map_iter_fd(void)
{
struct bpf_iter_bpf_array_map *skel;
skel = bpf_iter_bpf_array_map__open_and_load();
if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_array_map__open_and_load"))
return;
do_read_map_iter_fd(&skel->skeleton, skel->progs.dump_bpf_array_map,
skel->maps.arraymap1);
bpf_iter_bpf_array_map__destroy(skel);
}
static void test_bpf_percpu_array_map(void)
{
DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
struct bpf_iter_bpf_percpu_array_map *skel;
__u32 expected_key = 0, expected_val = 0;
union bpf_iter_link_info linfo;
int err, i, j, map_fd, iter_fd;
struct bpf_link *link;
char buf[64];
void *val;
int len;
skel = bpf_iter_bpf_percpu_array_map__open();
if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_percpu_array_map__open"))
return;
skel->rodata->num_cpus = bpf_num_possible_cpus();
val = malloc(8 * bpf_num_possible_cpus());
err = bpf_iter_bpf_percpu_array_map__load(skel);
if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_percpu_array_map__load"))
goto out;
/* update map values here */
map_fd = bpf_map__fd(skel->maps.arraymap1);
for (i = 0; i < bpf_map__max_entries(skel->maps.arraymap1); i++) {
expected_key += i;
for (j = 0; j < bpf_num_possible_cpus(); j++) {
*(__u32 *)(val + j * 8) = i + j;
expected_val += i + j;
}
err = bpf_map_update_elem(map_fd, &i, val, BPF_ANY);
if (!ASSERT_OK(err, "map_update"))
goto out;
}
memset(&linfo, 0, sizeof(linfo));
linfo.map.map_fd = map_fd;
opts.link_info = &linfo;
opts.link_info_len = sizeof(linfo);
link = bpf_program__attach_iter(skel->progs.dump_bpf_percpu_array_map, &opts);
if (!ASSERT_OK_PTR(link, "attach_iter"))
goto out;
iter_fd = bpf_iter_create(bpf_link__fd(link));
if (!ASSERT_GE(iter_fd, 0, "create_iter"))
goto free_link;
/* do some tests */
while ((len = read(iter_fd, buf, sizeof(buf))) > 0)
;
if (CHECK(len < 0, "read", "read failed: %s\n", strerror(errno)))
goto close_iter;
/* test results */
if (!ASSERT_EQ(skel->bss->key_sum, expected_key, "key_sum"))
goto close_iter;
if (!ASSERT_EQ(skel->bss->val_sum, expected_val, "val_sum"))
goto close_iter;
close_iter:
close(iter_fd);
free_link:
bpf_link__destroy(link);
out:
bpf_iter_bpf_percpu_array_map__destroy(skel);
free(val);
}
/* An iterator program deletes all local storage in a map. */
static void test_bpf_sk_storage_delete(void)
{
DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
struct bpf_iter_bpf_sk_storage_helpers *skel;
union bpf_iter_link_info linfo;
int err, len, map_fd, iter_fd;
struct bpf_link *link;
int sock_fd = -1;
__u32 val = 42;
char buf[64];
skel = bpf_iter_bpf_sk_storage_helpers__open_and_load();
if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_sk_storage_helpers__open_and_load"))
return;
map_fd = bpf_map__fd(skel->maps.sk_stg_map);
sock_fd = socket(AF_INET6, SOCK_STREAM, 0);
if (!ASSERT_GE(sock_fd, 0, "socket"))
goto out;
err = bpf_map_update_elem(map_fd, &sock_fd, &val, BPF_NOEXIST);
if (!ASSERT_OK(err, "map_update"))
goto out;
memset(&linfo, 0, sizeof(linfo));
linfo.map.map_fd = map_fd;
opts.link_info = &linfo;
opts.link_info_len = sizeof(linfo);
link = bpf_program__attach_iter(skel->progs.delete_bpf_sk_storage_map,
&opts);
if (!ASSERT_OK_PTR(link, "attach_iter"))
goto out;
iter_fd = bpf_iter_create(bpf_link__fd(link));
if (!ASSERT_GE(iter_fd, 0, "create_iter"))
goto free_link;
/* do some tests */
while ((len = read(iter_fd, buf, sizeof(buf))) > 0)
;
if (CHECK(len < 0, "read", "read failed: %s\n", strerror(errno)))
goto close_iter;
/* test results */
err = bpf_map_lookup_elem(map_fd, &sock_fd, &val);
if (CHECK(!err || errno != ENOENT, "bpf_map_lookup_elem",
"map value wasn't deleted (err=%d, errno=%d)\n", err, errno))
goto close_iter;
close_iter:
close(iter_fd);
free_link:
bpf_link__destroy(link);
out:
if (sock_fd >= 0)
close(sock_fd);
bpf_iter_bpf_sk_storage_helpers__destroy(skel);
}
/* This creates a socket and its local storage. It then runs a task_iter BPF
* program that replaces the existing socket local storage with the tgid of the
* only task owning a file descriptor to this socket, this process, prog_tests.
* It then runs a tcp socket iterator that negates the value in the existing
* socket local storage, the test verifies that the resulting value is -pid.
*/
static void test_bpf_sk_storage_get(void)
{
struct bpf_iter_bpf_sk_storage_helpers *skel;
int err, map_fd, val = -1;
int sock_fd = -1;
skel = bpf_iter_bpf_sk_storage_helpers__open_and_load();
if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_sk_storage_helpers__open_and_load"))
return;
sock_fd = socket(AF_INET6, SOCK_STREAM, 0);
if (!ASSERT_GE(sock_fd, 0, "socket"))
goto out;
err = listen(sock_fd, 1);
if (!ASSERT_OK(err, "listen"))
goto close_socket;
map_fd = bpf_map__fd(skel->maps.sk_stg_map);
err = bpf_map_update_elem(map_fd, &sock_fd, &val, BPF_NOEXIST);
if (!ASSERT_OK(err, "bpf_map_update_elem"))
goto close_socket;
do_dummy_read(skel->progs.fill_socket_owner);
err = bpf_map_lookup_elem(map_fd, &sock_fd, &val);
if (CHECK(err || val != getpid(), "bpf_map_lookup_elem",
"map value wasn't set correctly (expected %d, got %d, err=%d)\n",
getpid(), val, err))
goto close_socket;
do_dummy_read(skel->progs.negate_socket_local_storage);
err = bpf_map_lookup_elem(map_fd, &sock_fd, &val);
CHECK(err || val != -getpid(), "bpf_map_lookup_elem",
"map value wasn't set correctly (expected %d, got %d, err=%d)\n",
-getpid(), val, err);
close_socket:
close(sock_fd);
out:
bpf_iter_bpf_sk_storage_helpers__destroy(skel);
}
static void test_bpf_sk_stoarge_map_iter_fd(void)
{
struct bpf_iter_bpf_sk_storage_map *skel;
skel = bpf_iter_bpf_sk_storage_map__open_and_load();
if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_sk_storage_map__open_and_load"))
return;
do_read_map_iter_fd(&skel->skeleton, skel->progs.rw_bpf_sk_storage_map,
skel->maps.sk_stg_map);
bpf_iter_bpf_sk_storage_map__destroy(skel);
}
static void test_bpf_sk_storage_map(void)
{
DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
int err, i, len, map_fd, iter_fd, num_sockets;
struct bpf_iter_bpf_sk_storage_map *skel;
union bpf_iter_link_info linfo;
int sock_fd[3] = {-1, -1, -1};
__u32 val, expected_val = 0;
struct bpf_link *link;
char buf[64];
skel = bpf_iter_bpf_sk_storage_map__open_and_load();
if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_sk_storage_map__open_and_load"))
return;
map_fd = bpf_map__fd(skel->maps.sk_stg_map);
num_sockets = ARRAY_SIZE(sock_fd);
for (i = 0; i < num_sockets; i++) {
sock_fd[i] = socket(AF_INET6, SOCK_STREAM, 0);
if (!ASSERT_GE(sock_fd[i], 0, "socket"))
goto out;
val = i + 1;
expected_val += val;
err = bpf_map_update_elem(map_fd, &sock_fd[i], &val,
BPF_NOEXIST);
if (!ASSERT_OK(err, "map_update"))
goto out;
}
memset(&linfo, 0, sizeof(linfo));
linfo.map.map_fd = map_fd;
opts.link_info = &linfo;
opts.link_info_len = sizeof(linfo);
link = bpf_program__attach_iter(skel->progs.oob_write_bpf_sk_storage_map, &opts);
err = libbpf_get_error(link);
if (!ASSERT_EQ(err, -EACCES, "attach_oob_write_iter")) {
if (!err)
bpf_link__destroy(link);
goto out;
}
link = bpf_program__attach_iter(skel->progs.rw_bpf_sk_storage_map, &opts);
if (!ASSERT_OK_PTR(link, "attach_iter"))
goto out;
iter_fd = bpf_iter_create(bpf_link__fd(link));
if (!ASSERT_GE(iter_fd, 0, "create_iter"))
goto free_link;
skel->bss->to_add_val = time(NULL);
/* do some tests */
while ((len = read(iter_fd, buf, sizeof(buf))) > 0)
;
if (CHECK(len < 0, "read", "read failed: %s\n", strerror(errno)))
goto close_iter;
/* test results */
if (!ASSERT_EQ(skel->bss->ipv6_sk_count, num_sockets, "ipv6_sk_count"))
goto close_iter;
if (!ASSERT_EQ(skel->bss->val_sum, expected_val, "val_sum"))
goto close_iter;
for (i = 0; i < num_sockets; i++) {
err = bpf_map_lookup_elem(map_fd, &sock_fd[i], &val);
if (!ASSERT_OK(err, "map_lookup") ||
!ASSERT_EQ(val, i + 1 + skel->bss->to_add_val, "check_map_value"))
break;
}
close_iter:
close(iter_fd);
free_link:
bpf_link__destroy(link);
out:
for (i = 0; i < num_sockets; i++) {
if (sock_fd[i] >= 0)
close(sock_fd[i]);
}
bpf_iter_bpf_sk_storage_map__destroy(skel);
}
static void test_rdonly_buf_out_of_bound(void)
{
DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
struct bpf_iter_test_kern5 *skel;
union bpf_iter_link_info linfo;
struct bpf_link *link;
skel = bpf_iter_test_kern5__open_and_load();
if (!ASSERT_OK_PTR(skel, "bpf_iter_test_kern5__open_and_load"))
return;
memset(&linfo, 0, sizeof(linfo));
linfo.map.map_fd = bpf_map__fd(skel->maps.hashmap1);
opts.link_info = &linfo;
opts.link_info_len = sizeof(linfo);
link = bpf_program__attach_iter(skel->progs.dump_bpf_hash_map, &opts);
if (!ASSERT_ERR_PTR(link, "attach_iter"))
bpf_link__destroy(link);
bpf_iter_test_kern5__destroy(skel);
}
static void test_buf_neg_offset(void)
{
struct bpf_iter_test_kern6 *skel;
skel = bpf_iter_test_kern6__open_and_load();
if (!ASSERT_ERR_PTR(skel, "bpf_iter_test_kern6__open_and_load"))
bpf_iter_test_kern6__destroy(skel);
}
static void test_link_iter(void)
{
struct bpf_iter_bpf_link *skel;
skel = bpf_iter_bpf_link__open_and_load();
if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_link__open_and_load"))
return;
do_dummy_read(skel->progs.dump_bpf_link);
bpf_iter_bpf_link__destroy(skel);
}
static void test_ksym_iter(void)
{
struct bpf_iter_ksym *skel;
skel = bpf_iter_ksym__open_and_load();
if (!ASSERT_OK_PTR(skel, "bpf_iter_ksym__open_and_load"))
return;
do_dummy_read(skel->progs.dump_ksym);
bpf_iter_ksym__destroy(skel);
}
#define CMP_BUFFER_SIZE 1024
static char task_vma_output[CMP_BUFFER_SIZE];
static char proc_maps_output[CMP_BUFFER_SIZE];
/* remove \0 and \t from str, and only keep the first line */
static void str_strip_first_line(char *str)
{
char *dst = str, *src = str;
do {
if (*src == ' ' || *src == '\t')
src++;
else
*(dst++) = *(src++);
} while (*src != '\0' && *src != '\n');
*dst = '\0';
}
static void test_task_vma_common(struct bpf_iter_attach_opts *opts)
{
int err, iter_fd = -1, proc_maps_fd = -1;
struct bpf_iter_task_vma *skel;
int len, read_size = 4;
char maps_path[64];
skel = bpf_iter_task_vma__open();
if (!ASSERT_OK_PTR(skel, "bpf_iter_task_vma__open"))
return;
skel->bss->pid = getpid();
skel->bss->one_task = opts ? 1 : 0;
err = bpf_iter_task_vma__load(skel);
if (!ASSERT_OK(err, "bpf_iter_task_vma__load"))
goto out;
skel->links.proc_maps = bpf_program__attach_iter(
skel->progs.proc_maps, opts);
if (!ASSERT_OK_PTR(skel->links.proc_maps, "bpf_program__attach_iter")) {
skel->links.proc_maps = NULL;
goto out;
}
iter_fd = bpf_iter_create(bpf_link__fd(skel->links.proc_maps));
if (!ASSERT_GE(iter_fd, 0, "create_iter"))
goto out;
/* Read CMP_BUFFER_SIZE (1kB) from bpf_iter. Read in small chunks
* to trigger seq_file corner cases.
*/
len = 0;
while (len < CMP_BUFFER_SIZE) {
err = read_fd_into_buffer(iter_fd, task_vma_output + len,
MIN(read_size, CMP_BUFFER_SIZE - len));
if (!err)
break;
if (!ASSERT_GE(err, 0, "read_iter_fd"))
goto out;
len += err;
}
if (opts)
ASSERT_EQ(skel->bss->one_task_error, 0, "unexpected task");
/* read CMP_BUFFER_SIZE (1kB) from /proc/pid/maps */
snprintf(maps_path, 64, "/proc/%u/maps", skel->bss->pid);
proc_maps_fd = open(maps_path, O_RDONLY);
if (!ASSERT_GE(proc_maps_fd, 0, "open_proc_maps"))
goto out;
err = read_fd_into_buffer(proc_maps_fd, proc_maps_output, CMP_BUFFER_SIZE);
if (!ASSERT_GE(err, 0, "read_prog_maps_fd"))
goto out;
/* strip and compare the first line of the two files */
str_strip_first_line(task_vma_output);
str_strip_first_line(proc_maps_output);
ASSERT_STREQ(task_vma_output, proc_maps_output, "compare_output");
check_bpf_link_info(skel->progs.proc_maps);
out:
close(proc_maps_fd);
close(iter_fd);
bpf_iter_task_vma__destroy(skel);
}
static void test_task_vma_dead_task(void)
{
struct bpf_iter_task_vma *skel;
int wstatus, child_pid = -1;
time_t start_tm, cur_tm;
int err, iter_fd = -1;
int wait_sec = 3;
skel = bpf_iter_task_vma__open();
if (!ASSERT_OK_PTR(skel, "bpf_iter_task_vma__open"))
return;
skel->bss->pid = getpid();
err = bpf_iter_task_vma__load(skel);
if (!ASSERT_OK(err, "bpf_iter_task_vma__load"))
goto out;
skel->links.proc_maps = bpf_program__attach_iter(
skel->progs.proc_maps, NULL);
if (!ASSERT_OK_PTR(skel->links.proc_maps, "bpf_program__attach_iter")) {
skel->links.proc_maps = NULL;
goto out;
}
start_tm = time(NULL);
cur_tm = start_tm;
child_pid = fork();
if (child_pid == 0) {
/* Fork short-lived processes in the background. */
while (cur_tm < start_tm + wait_sec) {
system("echo > /dev/null");
cur_tm = time(NULL);
}
exit(0);
}
if (!ASSERT_GE(child_pid, 0, "fork_child"))
goto out;
while (cur_tm < start_tm + wait_sec) {
iter_fd = bpf_iter_create(bpf_link__fd(skel->links.proc_maps));
if (!ASSERT_GE(iter_fd, 0, "create_iter"))
goto out;
/* Drain all data from iter_fd. */
while (cur_tm < start_tm + wait_sec) {
err = read_fd_into_buffer(iter_fd, task_vma_output, CMP_BUFFER_SIZE);
if (!ASSERT_GE(err, 0, "read_iter_fd"))
goto out;
cur_tm = time(NULL);
if (err == 0)
break;
}
close(iter_fd);
iter_fd = -1;
}
check_bpf_link_info(skel->progs.proc_maps);
out:
waitpid(child_pid, &wstatus, 0);
close(iter_fd);
bpf_iter_task_vma__destroy(skel);
}
void test_bpf_sockmap_map_iter_fd(void)
{
struct bpf_iter_sockmap *skel;
skel = bpf_iter_sockmap__open_and_load();
if (!ASSERT_OK_PTR(skel, "bpf_iter_sockmap__open_and_load"))
return;
do_read_map_iter_fd(&skel->skeleton, skel->progs.copy, skel->maps.sockmap);
bpf_iter_sockmap__destroy(skel);
}
static void test_task_vma(void)
{
LIBBPF_OPTS(bpf_iter_attach_opts, opts);
union bpf_iter_link_info linfo;
memset(&linfo, 0, sizeof(linfo));
linfo.task.tid = getpid();
opts.link_info = &linfo;
opts.link_info_len = sizeof(linfo);
test_task_vma_common(&opts);
test_task_vma_common(NULL);
}
/* uprobe attach point */
static noinline int trigger_func(int arg)
{
asm volatile ("");
return arg + 1;
}
static void test_task_vma_offset_common(struct bpf_iter_attach_opts *opts, bool one_proc)
{
struct bpf_iter_vma_offset *skel;
char buf[16] = {};
int iter_fd, len;
int pgsz, shift;
skel = bpf_iter_vma_offset__open_and_load();
if (!ASSERT_OK_PTR(skel, "bpf_iter_vma_offset__open_and_load"))
return;
skel->bss->pid = getpid();
skel->bss->address = (uintptr_t)trigger_func;
for (pgsz = getpagesize(), shift = 0; pgsz > 1; pgsz >>= 1, shift++)
;
skel->bss->page_shift = shift;
skel->links.get_vma_offset = bpf_program__attach_iter(skel->progs.get_vma_offset, opts);
if (!ASSERT_OK_PTR(skel->links.get_vma_offset, "attach_iter"))
goto exit;
iter_fd = bpf_iter_create(bpf_link__fd(skel->links.get_vma_offset));
if (!ASSERT_GT(iter_fd, 0, "create_iter"))
goto exit;
while ((len = read(iter_fd, buf, sizeof(buf))) > 0)
;
buf[15] = 0;
ASSERT_EQ(strcmp(buf, "OK\n"), 0, "strcmp");
ASSERT_EQ(skel->bss->offset, get_uprobe_offset(trigger_func), "offset");
if (one_proc)
ASSERT_EQ(skel->bss->unique_tgid_cnt, 1, "unique_tgid_count");
else
ASSERT_GT(skel->bss->unique_tgid_cnt, 1, "unique_tgid_count");
close(iter_fd);
exit:
bpf_iter_vma_offset__destroy(skel);
}
static void test_task_vma_offset(void)
{
LIBBPF_OPTS(bpf_iter_attach_opts, opts);
union bpf_iter_link_info linfo;
memset(&linfo, 0, sizeof(linfo));
linfo.task.pid = getpid();
opts.link_info = &linfo;
opts.link_info_len = sizeof(linfo);
test_task_vma_offset_common(&opts, true);
linfo.task.pid = 0;
linfo.task.tid = getpid();
test_task_vma_offset_common(&opts, true);
test_task_vma_offset_common(NULL, false);
}
void test_bpf_iter(void)
{
ASSERT_OK(pthread_mutex_init(&do_nothing_mutex, NULL), "pthread_mutex_init");
if (test__start_subtest("btf_id_or_null"))
test_btf_id_or_null();
if (test__start_subtest("ipv6_route"))
test_ipv6_route();
if (test__start_subtest("netlink"))
test_netlink();
if (test__start_subtest("bpf_map"))
test_bpf_map();
if (test__start_subtest("task_tid"))
test_task_tid();
if (test__start_subtest("task_pid"))
test_task_pid();
if (test__start_subtest("task_pidfd"))
test_task_pidfd();
if (test__start_subtest("task_sleepable"))
test_task_sleepable();
if (test__start_subtest("task_stack"))
test_task_stack();
if (test__start_subtest("task_file"))
test_task_file();
if (test__start_subtest("task_vma"))
test_task_vma();
if (test__start_subtest("task_vma_dead_task"))
test_task_vma_dead_task();
if (test__start_subtest("task_btf"))
test_task_btf();
if (test__start_subtest("tcp4"))
test_tcp4();
if (test__start_subtest("tcp6"))
test_tcp6();
if (test__start_subtest("udp4"))
test_udp4();
if (test__start_subtest("udp6"))
test_udp6();
if (test__start_subtest("unix"))
test_unix();
if (test__start_subtest("anon"))
test_anon_iter(false);
if (test__start_subtest("anon-read-one-char"))
test_anon_iter(true);
if (test__start_subtest("file"))
test_file_iter();
if (test__start_subtest("overflow"))
test_overflow(false, false);
if (test__start_subtest("overflow-e2big"))
test_overflow(true, false);
if (test__start_subtest("prog-ret-1"))
test_overflow(false, true);
if (test__start_subtest("bpf_hash_map"))
test_bpf_hash_map();
if (test__start_subtest("bpf_percpu_hash_map"))
test_bpf_percpu_hash_map();
if (test__start_subtest("bpf_array_map"))
test_bpf_array_map();
if (test__start_subtest("bpf_array_map_iter_fd"))
test_bpf_array_map_iter_fd();
if (test__start_subtest("bpf_percpu_array_map"))
test_bpf_percpu_array_map();
if (test__start_subtest("bpf_sk_storage_map"))
test_bpf_sk_storage_map();
if (test__start_subtest("bpf_sk_storage_map_iter_fd"))
test_bpf_sk_stoarge_map_iter_fd();
if (test__start_subtest("bpf_sk_storage_delete"))
test_bpf_sk_storage_delete();
if (test__start_subtest("bpf_sk_storage_get"))
test_bpf_sk_storage_get();
if (test__start_subtest("rdonly-buf-out-of-bound"))
test_rdonly_buf_out_of_bound();
if (test__start_subtest("buf-neg-offset"))
test_buf_neg_offset();
if (test__start_subtest("link-iter"))
test_link_iter();
if (test__start_subtest("ksym"))
test_ksym_iter();
if (test__start_subtest("bpf_sockmap_map_iter_fd"))
test_bpf_sockmap_map_iter_fd();
if (test__start_subtest("vma_offset"))
test_task_vma_offset();
}
| linux-master | tools/testing/selftests/bpf/prog_tests/bpf_iter.c |
// SPDX-License-Identifier: GPL-2.0
#include <test_progs.h>
#define MAX_INSNS 512
#define MAX_MATCHES 24
struct bpf_reg_match {
unsigned int line;
const char *match;
};
struct bpf_align_test {
const char *descr;
struct bpf_insn insns[MAX_INSNS];
enum {
UNDEF,
ACCEPT,
REJECT
} result;
enum bpf_prog_type prog_type;
/* Matches must be in order of increasing line */
struct bpf_reg_match matches[MAX_MATCHES];
};
static struct bpf_align_test tests[] = {
/* Four tests of known constants. These aren't staggeringly
* interesting since we track exact values now.
*/
{
.descr = "mov",
.insns = {
BPF_MOV64_IMM(BPF_REG_3, 2),
BPF_MOV64_IMM(BPF_REG_3, 4),
BPF_MOV64_IMM(BPF_REG_3, 8),
BPF_MOV64_IMM(BPF_REG_3, 16),
BPF_MOV64_IMM(BPF_REG_3, 32),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.matches = {
{0, "R1=ctx(off=0,imm=0)"},
{0, "R10=fp0"},
{0, "R3_w=2"},
{1, "R3_w=4"},
{2, "R3_w=8"},
{3, "R3_w=16"},
{4, "R3_w=32"},
},
},
{
.descr = "shift",
.insns = {
BPF_MOV64_IMM(BPF_REG_3, 1),
BPF_ALU64_IMM(BPF_LSH, BPF_REG_3, 1),
BPF_ALU64_IMM(BPF_LSH, BPF_REG_3, 1),
BPF_ALU64_IMM(BPF_LSH, BPF_REG_3, 1),
BPF_ALU64_IMM(BPF_LSH, BPF_REG_3, 1),
BPF_ALU64_IMM(BPF_RSH, BPF_REG_3, 4),
BPF_MOV64_IMM(BPF_REG_4, 32),
BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 1),
BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 1),
BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 1),
BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 1),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.matches = {
{0, "R1=ctx(off=0,imm=0)"},
{0, "R10=fp0"},
{0, "R3_w=1"},
{1, "R3_w=2"},
{2, "R3_w=4"},
{3, "R3_w=8"},
{4, "R3_w=16"},
{5, "R3_w=1"},
{6, "R4_w=32"},
{7, "R4_w=16"},
{8, "R4_w=8"},
{9, "R4_w=4"},
{10, "R4_w=2"},
},
},
{
.descr = "addsub",
.insns = {
BPF_MOV64_IMM(BPF_REG_3, 4),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 4),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 2),
BPF_MOV64_IMM(BPF_REG_4, 8),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 2),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.matches = {
{0, "R1=ctx(off=0,imm=0)"},
{0, "R10=fp0"},
{0, "R3_w=4"},
{1, "R3_w=8"},
{2, "R3_w=10"},
{3, "R4_w=8"},
{4, "R4_w=12"},
{5, "R4_w=14"},
},
},
{
.descr = "mul",
.insns = {
BPF_MOV64_IMM(BPF_REG_3, 7),
BPF_ALU64_IMM(BPF_MUL, BPF_REG_3, 1),
BPF_ALU64_IMM(BPF_MUL, BPF_REG_3, 2),
BPF_ALU64_IMM(BPF_MUL, BPF_REG_3, 4),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.matches = {
{0, "R1=ctx(off=0,imm=0)"},
{0, "R10=fp0"},
{0, "R3_w=7"},
{1, "R3_w=7"},
{2, "R3_w=14"},
{3, "R3_w=56"},
},
},
/* Tests using unknown values */
#define PREP_PKT_POINTERS \
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, \
offsetof(struct __sk_buff, data)), \
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, \
offsetof(struct __sk_buff, data_end))
#define LOAD_UNKNOWN(DST_REG) \
PREP_PKT_POINTERS, \
BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), \
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), \
BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 1), \
BPF_EXIT_INSN(), \
BPF_LDX_MEM(BPF_B, DST_REG, BPF_REG_2, 0)
{
.descr = "unknown shift",
.insns = {
LOAD_UNKNOWN(BPF_REG_3),
BPF_ALU64_IMM(BPF_LSH, BPF_REG_3, 1),
BPF_ALU64_IMM(BPF_LSH, BPF_REG_3, 1),
BPF_ALU64_IMM(BPF_LSH, BPF_REG_3, 1),
BPF_ALU64_IMM(BPF_LSH, BPF_REG_3, 1),
LOAD_UNKNOWN(BPF_REG_4),
BPF_ALU64_IMM(BPF_LSH, BPF_REG_4, 5),
BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 1),
BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 1),
BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 1),
BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 1),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.matches = {
{6, "R0_w=pkt(off=8,r=8,imm=0)"},
{6, "R3_w=scalar(umax=255,var_off=(0x0; 0xff))"},
{7, "R3_w=scalar(umax=510,var_off=(0x0; 0x1fe))"},
{8, "R3_w=scalar(umax=1020,var_off=(0x0; 0x3fc))"},
{9, "R3_w=scalar(umax=2040,var_off=(0x0; 0x7f8))"},
{10, "R3_w=scalar(umax=4080,var_off=(0x0; 0xff0))"},
{12, "R3_w=pkt_end(off=0,imm=0)"},
{17, "R4_w=scalar(umax=255,var_off=(0x0; 0xff))"},
{18, "R4_w=scalar(umax=8160,var_off=(0x0; 0x1fe0))"},
{19, "R4_w=scalar(umax=4080,var_off=(0x0; 0xff0))"},
{20, "R4_w=scalar(umax=2040,var_off=(0x0; 0x7f8))"},
{21, "R4_w=scalar(umax=1020,var_off=(0x0; 0x3fc))"},
{22, "R4_w=scalar(umax=510,var_off=(0x0; 0x1fe))"},
},
},
{
.descr = "unknown mul",
.insns = {
LOAD_UNKNOWN(BPF_REG_3),
BPF_MOV64_REG(BPF_REG_4, BPF_REG_3),
BPF_ALU64_IMM(BPF_MUL, BPF_REG_4, 1),
BPF_MOV64_REG(BPF_REG_4, BPF_REG_3),
BPF_ALU64_IMM(BPF_MUL, BPF_REG_4, 2),
BPF_MOV64_REG(BPF_REG_4, BPF_REG_3),
BPF_ALU64_IMM(BPF_MUL, BPF_REG_4, 4),
BPF_MOV64_REG(BPF_REG_4, BPF_REG_3),
BPF_ALU64_IMM(BPF_MUL, BPF_REG_4, 8),
BPF_ALU64_IMM(BPF_MUL, BPF_REG_4, 2),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.matches = {
{6, "R3_w=scalar(umax=255,var_off=(0x0; 0xff))"},
{7, "R4_w=scalar(id=1,umax=255,var_off=(0x0; 0xff))"},
{8, "R4_w=scalar(umax=255,var_off=(0x0; 0xff))"},
{9, "R4_w=scalar(id=1,umax=255,var_off=(0x0; 0xff))"},
{10, "R4_w=scalar(umax=510,var_off=(0x0; 0x1fe))"},
{11, "R4_w=scalar(id=1,umax=255,var_off=(0x0; 0xff))"},
{12, "R4_w=scalar(umax=1020,var_off=(0x0; 0x3fc))"},
{13, "R4_w=scalar(id=1,umax=255,var_off=(0x0; 0xff))"},
{14, "R4_w=scalar(umax=2040,var_off=(0x0; 0x7f8))"},
{15, "R4_w=scalar(umax=4080,var_off=(0x0; 0xff0))"},
},
},
{
.descr = "packet const offset",
.insns = {
PREP_PKT_POINTERS,
BPF_MOV64_REG(BPF_REG_5, BPF_REG_2),
BPF_MOV64_IMM(BPF_REG_0, 0),
/* Skip over ethernet header. */
BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 14),
BPF_MOV64_REG(BPF_REG_4, BPF_REG_5),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_4, 1),
BPF_EXIT_INSN(),
BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_5, 0),
BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_5, 1),
BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_5, 2),
BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_5, 3),
BPF_LDX_MEM(BPF_H, BPF_REG_4, BPF_REG_5, 0),
BPF_LDX_MEM(BPF_H, BPF_REG_4, BPF_REG_5, 2),
BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_5, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.matches = {
{2, "R5_w=pkt(off=0,r=0,imm=0)"},
{4, "R5_w=pkt(off=14,r=0,imm=0)"},
{5, "R4_w=pkt(off=14,r=0,imm=0)"},
{9, "R2=pkt(off=0,r=18,imm=0)"},
{10, "R5=pkt(off=14,r=18,imm=0)"},
{10, "R4_w=scalar(umax=255,var_off=(0x0; 0xff))"},
{13, "R4_w=scalar(umax=65535,var_off=(0x0; 0xffff))"},
{14, "R4_w=scalar(umax=65535,var_off=(0x0; 0xffff))"},
},
},
{
.descr = "packet variable offset",
.insns = {
LOAD_UNKNOWN(BPF_REG_6),
BPF_ALU64_IMM(BPF_LSH, BPF_REG_6, 2),
/* First, add a constant to the R5 packet pointer,
* then a variable with a known alignment.
*/
BPF_MOV64_REG(BPF_REG_5, BPF_REG_2),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 14),
BPF_ALU64_REG(BPF_ADD, BPF_REG_5, BPF_REG_6),
BPF_MOV64_REG(BPF_REG_4, BPF_REG_5),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_4, 1),
BPF_EXIT_INSN(),
BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_5, 0),
/* Now, test in the other direction. Adding first
* the variable offset to R5, then the constant.
*/
BPF_MOV64_REG(BPF_REG_5, BPF_REG_2),
BPF_ALU64_REG(BPF_ADD, BPF_REG_5, BPF_REG_6),
BPF_MOV64_REG(BPF_REG_4, BPF_REG_5),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 14),
BPF_MOV64_REG(BPF_REG_4, BPF_REG_5),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_4, 1),
BPF_EXIT_INSN(),
BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_5, 0),
/* Test multiple accumulations of unknown values
* into a packet pointer.
*/
BPF_MOV64_REG(BPF_REG_5, BPF_REG_2),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 14),
BPF_ALU64_REG(BPF_ADD, BPF_REG_5, BPF_REG_6),
BPF_MOV64_REG(BPF_REG_4, BPF_REG_5),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 4),
BPF_ALU64_REG(BPF_ADD, BPF_REG_5, BPF_REG_6),
BPF_MOV64_REG(BPF_REG_4, BPF_REG_5),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_4, 1),
BPF_EXIT_INSN(),
BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_5, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.matches = {
/* Calculated offset in R6 has unknown value, but known
* alignment of 4.
*/
{6, "R2_w=pkt(off=0,r=8,imm=0)"},
{7, "R6_w=scalar(umax=1020,var_off=(0x0; 0x3fc))"},
/* Offset is added to packet pointer R5, resulting in
* known fixed offset, and variable offset from R6.
*/
{11, "R5_w=pkt(id=1,off=14,r=0,umax=1020,var_off=(0x0; 0x3fc))"},
/* At the time the word size load is performed from R5,
* it's total offset is NET_IP_ALIGN + reg->off (0) +
* reg->aux_off (14) which is 16. Then the variable
* offset is considered using reg->aux_off_align which
* is 4 and meets the load's requirements.
*/
{15, "R4=pkt(id=1,off=18,r=18,umax=1020,var_off=(0x0; 0x3fc))"},
{15, "R5=pkt(id=1,off=14,r=18,umax=1020,var_off=(0x0; 0x3fc))"},
/* Variable offset is added to R5 packet pointer,
* resulting in auxiliary alignment of 4. To avoid BPF
* verifier's precision backtracking logging
* interfering we also have a no-op R4 = R5
* instruction to validate R5 state. We also check
* that R4 is what it should be in such case.
*/
{18, "R4_w=pkt(id=2,off=0,r=0,umax=1020,var_off=(0x0; 0x3fc))"},
{18, "R5_w=pkt(id=2,off=0,r=0,umax=1020,var_off=(0x0; 0x3fc))"},
/* Constant offset is added to R5, resulting in
* reg->off of 14.
*/
{19, "R5_w=pkt(id=2,off=14,r=0,umax=1020,var_off=(0x0; 0x3fc))"},
/* At the time the word size load is performed from R5,
* its total fixed offset is NET_IP_ALIGN + reg->off
* (14) which is 16. Then the variable offset is 4-byte
* aligned, so the total offset is 4-byte aligned and
* meets the load's requirements.
*/
{24, "R4=pkt(id=2,off=18,r=18,umax=1020,var_off=(0x0; 0x3fc))"},
{24, "R5=pkt(id=2,off=14,r=18,umax=1020,var_off=(0x0; 0x3fc))"},
/* Constant offset is added to R5 packet pointer,
* resulting in reg->off value of 14.
*/
{26, "R5_w=pkt(off=14,r=8"},
/* Variable offset is added to R5, resulting in a
* variable offset of (4n). See comment for insn #18
* for R4 = R5 trick.
*/
{28, "R4_w=pkt(id=3,off=14,r=0,umax=1020,var_off=(0x0; 0x3fc))"},
{28, "R5_w=pkt(id=3,off=14,r=0,umax=1020,var_off=(0x0; 0x3fc))"},
/* Constant is added to R5 again, setting reg->off to 18. */
{29, "R5_w=pkt(id=3,off=18,r=0,umax=1020,var_off=(0x0; 0x3fc))"},
/* And once more we add a variable; resulting var_off
* is still (4n), fixed offset is not changed.
* Also, we create a new reg->id.
*/
{31, "R4_w=pkt(id=4,off=18,r=0,umax=2040,var_off=(0x0; 0x7fc)"},
{31, "R5_w=pkt(id=4,off=18,r=0,umax=2040,var_off=(0x0; 0x7fc)"},
/* At the time the word size load is performed from R5,
* its total fixed offset is NET_IP_ALIGN + reg->off (18)
* which is 20. Then the variable offset is (4n), so
* the total offset is 4-byte aligned and meets the
* load's requirements.
*/
{35, "R4=pkt(id=4,off=22,r=22,umax=2040,var_off=(0x0; 0x7fc)"},
{35, "R5=pkt(id=4,off=18,r=22,umax=2040,var_off=(0x0; 0x7fc)"},
},
},
{
.descr = "packet variable offset 2",
.insns = {
/* Create an unknown offset, (4n+2)-aligned */
LOAD_UNKNOWN(BPF_REG_6),
BPF_ALU64_IMM(BPF_LSH, BPF_REG_6, 2),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 14),
/* Add it to the packet pointer */
BPF_MOV64_REG(BPF_REG_5, BPF_REG_2),
BPF_ALU64_REG(BPF_ADD, BPF_REG_5, BPF_REG_6),
/* Check bounds and perform a read */
BPF_MOV64_REG(BPF_REG_4, BPF_REG_5),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_4, 1),
BPF_EXIT_INSN(),
BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_5, 0),
/* Make a (4n) offset from the value we just read */
BPF_ALU64_IMM(BPF_AND, BPF_REG_6, 0xff),
BPF_ALU64_IMM(BPF_LSH, BPF_REG_6, 2),
/* Add it to the packet pointer */
BPF_ALU64_REG(BPF_ADD, BPF_REG_5, BPF_REG_6),
/* Check bounds and perform a read */
BPF_MOV64_REG(BPF_REG_4, BPF_REG_5),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_4, 1),
BPF_EXIT_INSN(),
BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_5, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.matches = {
/* Calculated offset in R6 has unknown value, but known
* alignment of 4.
*/
{6, "R2_w=pkt(off=0,r=8,imm=0)"},
{7, "R6_w=scalar(umax=1020,var_off=(0x0; 0x3fc))"},
/* Adding 14 makes R6 be (4n+2) */
{8, "R6_w=scalar(umin=14,umax=1034,var_off=(0x2; 0x7fc))"},
/* Packet pointer has (4n+2) offset */
{11, "R5_w=pkt(id=1,off=0,r=0,umin=14,umax=1034,var_off=(0x2; 0x7fc)"},
{12, "R4=pkt(id=1,off=4,r=0,umin=14,umax=1034,var_off=(0x2; 0x7fc)"},
/* At the time the word size load is performed from R5,
* its total fixed offset is NET_IP_ALIGN + reg->off (0)
* which is 2. Then the variable offset is (4n+2), so
* the total offset is 4-byte aligned and meets the
* load's requirements.
*/
{15, "R5=pkt(id=1,off=0,r=4,umin=14,umax=1034,var_off=(0x2; 0x7fc)"},
/* Newly read value in R6 was shifted left by 2, so has
* known alignment of 4.
*/
{17, "R6_w=scalar(umax=1020,var_off=(0x0; 0x3fc))"},
/* Added (4n) to packet pointer's (4n+2) var_off, giving
* another (4n+2).
*/
{19, "R5_w=pkt(id=2,off=0,r=0,umin=14,umax=2054,var_off=(0x2; 0xffc)"},
{20, "R4=pkt(id=2,off=4,r=0,umin=14,umax=2054,var_off=(0x2; 0xffc)"},
/* At the time the word size load is performed from R5,
* its total fixed offset is NET_IP_ALIGN + reg->off (0)
* which is 2. Then the variable offset is (4n+2), so
* the total offset is 4-byte aligned and meets the
* load's requirements.
*/
{23, "R5=pkt(id=2,off=0,r=4,umin=14,umax=2054,var_off=(0x2; 0xffc)"},
},
},
{
.descr = "dubious pointer arithmetic",
.insns = {
PREP_PKT_POINTERS,
BPF_MOV64_IMM(BPF_REG_0, 0),
/* (ptr - ptr) << 2 */
BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
BPF_ALU64_REG(BPF_SUB, BPF_REG_5, BPF_REG_2),
BPF_ALU64_IMM(BPF_LSH, BPF_REG_5, 2),
/* We have a (4n) value. Let's make a packet offset
* out of it. First add 14, to make it a (4n+2)
*/
BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 14),
/* Then make sure it's nonnegative */
BPF_JMP_IMM(BPF_JSGE, BPF_REG_5, 0, 1),
BPF_EXIT_INSN(),
/* Add it to packet pointer */
BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
/* Check bounds and perform a read */
BPF_MOV64_REG(BPF_REG_4, BPF_REG_6),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_4, 1),
BPF_EXIT_INSN(),
BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_6, 0),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.result = REJECT,
.matches = {
{3, "R5_w=pkt_end(off=0,imm=0)"},
/* (ptr - ptr) << 2 == unknown, (4n) */
{5, "R5_w=scalar(smax=9223372036854775804,umax=18446744073709551612,var_off=(0x0; 0xfffffffffffffffc)"},
/* (4n) + 14 == (4n+2). We blow our bounds, because
* the add could overflow.
*/
{6, "R5_w=scalar(smin=-9223372036854775806,smax=9223372036854775806,umin=2,umax=18446744073709551614,var_off=(0x2; 0xfffffffffffffffc)"},
/* Checked s>=0 */
{9, "R5=scalar(umin=2,umax=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc)"},
/* packet pointer + nonnegative (4n+2) */
{11, "R6_w=pkt(id=1,off=0,r=0,umin=2,umax=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc)"},
{12, "R4_w=pkt(id=1,off=4,r=0,umin=2,umax=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc)"},
/* NET_IP_ALIGN + (4n+2) == (4n), alignment is fine.
* We checked the bounds, but it might have been able
* to overflow if the packet pointer started in the
* upper half of the address space.
* So we did not get a 'range' on R6, and the access
* attempt will fail.
*/
{15, "R6_w=pkt(id=1,off=0,r=0,umin=2,umax=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc)"},
}
},
{
.descr = "variable subtraction",
.insns = {
/* Create an unknown offset, (4n+2)-aligned */
LOAD_UNKNOWN(BPF_REG_6),
BPF_MOV64_REG(BPF_REG_7, BPF_REG_6),
BPF_ALU64_IMM(BPF_LSH, BPF_REG_6, 2),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 14),
/* Create another unknown, (4n)-aligned, and subtract
* it from the first one
*/
BPF_ALU64_IMM(BPF_LSH, BPF_REG_7, 2),
BPF_ALU64_REG(BPF_SUB, BPF_REG_6, BPF_REG_7),
/* Bounds-check the result */
BPF_JMP_IMM(BPF_JSGE, BPF_REG_6, 0, 1),
BPF_EXIT_INSN(),
/* Add it to the packet pointer */
BPF_MOV64_REG(BPF_REG_5, BPF_REG_2),
BPF_ALU64_REG(BPF_ADD, BPF_REG_5, BPF_REG_6),
/* Check bounds and perform a read */
BPF_MOV64_REG(BPF_REG_4, BPF_REG_5),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_4, 1),
BPF_EXIT_INSN(),
BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_5, 0),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.matches = {
/* Calculated offset in R6 has unknown value, but known
* alignment of 4.
*/
{6, "R2_w=pkt(off=0,r=8,imm=0)"},
{8, "R6_w=scalar(umax=1020,var_off=(0x0; 0x3fc))"},
/* Adding 14 makes R6 be (4n+2) */
{9, "R6_w=scalar(umin=14,umax=1034,var_off=(0x2; 0x7fc))"},
/* New unknown value in R7 is (4n) */
{10, "R7_w=scalar(umax=1020,var_off=(0x0; 0x3fc))"},
/* Subtracting it from R6 blows our unsigned bounds */
{11, "R6=scalar(smin=-1006,smax=1034,umin=2,umax=18446744073709551614,var_off=(0x2; 0xfffffffffffffffc)"},
/* Checked s>= 0 */
{14, "R6=scalar(umin=2,umax=1034,var_off=(0x2; 0x7fc))"},
/* At the time the word size load is performed from R5,
* its total fixed offset is NET_IP_ALIGN + reg->off (0)
* which is 2. Then the variable offset is (4n+2), so
* the total offset is 4-byte aligned and meets the
* load's requirements.
*/
{20, "R5=pkt(id=2,off=0,r=4,umin=2,umax=1034,var_off=(0x2; 0x7fc)"},
},
},
{
.descr = "pointer variable subtraction",
.insns = {
/* Create an unknown offset, (4n+2)-aligned and bounded
* to [14,74]
*/
LOAD_UNKNOWN(BPF_REG_6),
BPF_MOV64_REG(BPF_REG_7, BPF_REG_6),
BPF_ALU64_IMM(BPF_AND, BPF_REG_6, 0xf),
BPF_ALU64_IMM(BPF_LSH, BPF_REG_6, 2),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 14),
/* Subtract it from the packet pointer */
BPF_MOV64_REG(BPF_REG_5, BPF_REG_2),
BPF_ALU64_REG(BPF_SUB, BPF_REG_5, BPF_REG_6),
/* Create another unknown, (4n)-aligned and >= 74.
* That in fact means >= 76, since 74 % 4 == 2
*/
BPF_ALU64_IMM(BPF_LSH, BPF_REG_7, 2),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, 76),
/* Add it to the packet pointer */
BPF_ALU64_REG(BPF_ADD, BPF_REG_5, BPF_REG_7),
/* Check bounds and perform a read */
BPF_MOV64_REG(BPF_REG_4, BPF_REG_5),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_4, 1),
BPF_EXIT_INSN(),
BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_5, 0),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.matches = {
/* Calculated offset in R6 has unknown value, but known
* alignment of 4.
*/
{6, "R2_w=pkt(off=0,r=8,imm=0)"},
{9, "R6_w=scalar(umax=60,var_off=(0x0; 0x3c))"},
/* Adding 14 makes R6 be (4n+2) */
{10, "R6_w=scalar(umin=14,umax=74,var_off=(0x2; 0x7c))"},
/* Subtracting from packet pointer overflows ubounds */
{13, "R5_w=pkt(id=2,off=0,r=8,umin=18446744073709551542,umax=18446744073709551602,var_off=(0xffffffffffffff82; 0x7c)"},
/* New unknown value in R7 is (4n), >= 76 */
{14, "R7_w=scalar(umin=76,umax=1096,var_off=(0x0; 0x7fc))"},
/* Adding it to packet pointer gives nice bounds again */
{16, "R5_w=pkt(id=3,off=0,r=0,umin=2,umax=1082,var_off=(0x2; 0x7fc)"},
/* At the time the word size load is performed from R5,
* its total fixed offset is NET_IP_ALIGN + reg->off (0)
* which is 2. Then the variable offset is (4n+2), so
* the total offset is 4-byte aligned and meets the
* load's requirements.
*/
{20, "R5=pkt(id=3,off=0,r=4,umin=2,umax=1082,var_off=(0x2; 0x7fc)"},
},
},
};
static int probe_filter_length(const struct bpf_insn *fp)
{
int len;
for (len = MAX_INSNS - 1; len > 0; --len)
if (fp[len].code != 0 || fp[len].imm != 0)
break;
return len + 1;
}
static char bpf_vlog[32768];
static int do_test_single(struct bpf_align_test *test)
{
struct bpf_insn *prog = test->insns;
int prog_type = test->prog_type;
char bpf_vlog_copy[32768];
LIBBPF_OPTS(bpf_prog_load_opts, opts,
.prog_flags = BPF_F_STRICT_ALIGNMENT,
.log_buf = bpf_vlog,
.log_size = sizeof(bpf_vlog),
.log_level = 2,
);
const char *line_ptr;
int cur_line = -1;
int prog_len, i;
int fd_prog;
int ret;
prog_len = probe_filter_length(prog);
fd_prog = bpf_prog_load(prog_type ? : BPF_PROG_TYPE_SOCKET_FILTER, NULL, "GPL",
prog, prog_len, &opts);
if (fd_prog < 0 && test->result != REJECT) {
printf("Failed to load program.\n");
printf("%s", bpf_vlog);
ret = 1;
} else if (fd_prog >= 0 && test->result == REJECT) {
printf("Unexpected success to load!\n");
printf("%s", bpf_vlog);
ret = 1;
close(fd_prog);
} else {
ret = 0;
/* We make a local copy so that we can strtok() it */
strncpy(bpf_vlog_copy, bpf_vlog, sizeof(bpf_vlog_copy));
line_ptr = strtok(bpf_vlog_copy, "\n");
for (i = 0; i < MAX_MATCHES; i++) {
struct bpf_reg_match m = test->matches[i];
int tmp;
if (!m.match)
break;
while (line_ptr) {
cur_line = -1;
sscanf(line_ptr, "%u: ", &cur_line);
if (cur_line == -1)
sscanf(line_ptr, "from %u to %u: ", &tmp, &cur_line);
if (cur_line == m.line)
break;
line_ptr = strtok(NULL, "\n");
}
if (!line_ptr) {
printf("Failed to find line %u for match: %s\n",
m.line, m.match);
ret = 1;
printf("%s", bpf_vlog);
break;
}
/* Check the next line as well in case the previous line
* did not have a corresponding bpf insn. Example:
* func#0 @0
* 0: R1=ctx(off=0,imm=0) R10=fp0
* 0: (b7) r3 = 2 ; R3_w=2
*
* Sometimes it's actually two lines below, e.g. when
* searching for "6: R3_w=scalar(umax=255,var_off=(0x0; 0xff))":
* from 4 to 6: R0_w=pkt(off=8,r=8,imm=0) R1=ctx(off=0,imm=0) R2_w=pkt(off=0,r=8,imm=0) R3_w=pkt_end(off=0,imm=0) R10=fp0
* 6: R0_w=pkt(off=8,r=8,imm=0) R1=ctx(off=0,imm=0) R2_w=pkt(off=0,r=8,imm=0) R3_w=pkt_end(off=0,imm=0) R10=fp0
* 6: (71) r3 = *(u8 *)(r2 +0) ; R2_w=pkt(off=0,r=8,imm=0) R3_w=scalar(umax=255,var_off=(0x0; 0xff))
*/
while (!strstr(line_ptr, m.match)) {
cur_line = -1;
line_ptr = strtok(NULL, "\n");
sscanf(line_ptr ?: "", "%u: ", &cur_line);
if (!line_ptr || cur_line != m.line)
break;
}
if (cur_line != m.line || !line_ptr || !strstr(line_ptr, m.match)) {
printf("Failed to find match %u: %s\n", m.line, m.match);
ret = 1;
printf("%s", bpf_vlog);
break;
}
}
if (fd_prog >= 0)
close(fd_prog);
}
return ret;
}
void test_align(void)
{
unsigned int i;
for (i = 0; i < ARRAY_SIZE(tests); i++) {
struct bpf_align_test *test = &tests[i];
if (!test__start_subtest(test->descr))
continue;
ASSERT_OK(do_test_single(test), test->descr);
}
}
| linux-master | tools/testing/selftests/bpf/prog_tests/align.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020 Facebook */
#include <test_progs.h>
#include <time.h>
#include "test_autoload.skel.h"
void test_autoload(void)
{
int duration = 0, err;
struct test_autoload* skel;
skel = test_autoload__open_and_load();
/* prog3 should be broken */
if (CHECK(skel, "skel_open_and_load", "unexpected success\n"))
goto cleanup;
skel = test_autoload__open();
if (CHECK(!skel, "skel_open", "failed to open skeleton\n"))
goto cleanup;
/* don't load prog3 */
bpf_program__set_autoload(skel->progs.prog3, false);
err = test_autoload__load(skel);
if (CHECK(err, "skel_load", "failed to load skeleton: %d\n", err))
goto cleanup;
err = test_autoload__attach(skel);
if (CHECK(err, "skel_attach", "skeleton attach failed: %d\n", err))
goto cleanup;
usleep(1);
CHECK(!skel->bss->prog1_called, "prog1", "not called\n");
CHECK(!skel->bss->prog2_called, "prog2", "not called\n");
CHECK(skel->bss->prog3_called, "prog3", "called?!\n");
cleanup:
test_autoload__destroy(skel);
}
| linux-master | tools/testing/selftests/bpf/prog_tests/autoload.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2022, Oracle and/or its affiliates. */
#include <test_progs.h>
#include <bpf/btf.h>
#include "test_unpriv_bpf_disabled.skel.h"
#include "cap_helpers.h"
/* Using CAP_LAST_CAP is risky here, since it can get pulled in from
* an old /usr/include/linux/capability.h and be < CAP_BPF; as a result
* CAP_BPF would not be included in ALL_CAPS. Instead use CAP_BPF as
* we know its value is correct since it is explicitly defined in
* cap_helpers.h.
*/
#define ALL_CAPS ((2ULL << CAP_BPF) - 1)
#define PINPATH "/sys/fs/bpf/unpriv_bpf_disabled_"
#define NUM_MAPS 7
static __u32 got_perfbuf_val;
static __u32 got_ringbuf_val;
static int process_ringbuf(void *ctx, void *data, size_t len)
{
if (ASSERT_EQ(len, sizeof(__u32), "ringbuf_size_valid"))
got_ringbuf_val = *(__u32 *)data;
return 0;
}
static void process_perfbuf(void *ctx, int cpu, void *data, __u32 len)
{
if (ASSERT_EQ(len, sizeof(__u32), "perfbuf_size_valid"))
got_perfbuf_val = *(__u32 *)data;
}
static int sysctl_set(const char *sysctl_path, char *old_val, const char *new_val)
{
int ret = 0;
FILE *fp;
fp = fopen(sysctl_path, "r+");
if (!fp)
return -errno;
if (old_val && fscanf(fp, "%s", old_val) <= 0) {
ret = -ENOENT;
} else if (!old_val || strcmp(old_val, new_val) != 0) {
fseek(fp, 0, SEEK_SET);
if (fprintf(fp, "%s", new_val) < 0)
ret = -errno;
}
fclose(fp);
return ret;
}
static void test_unpriv_bpf_disabled_positive(struct test_unpriv_bpf_disabled *skel,
__u32 prog_id, int prog_fd, int perf_fd,
char **map_paths, int *map_fds)
{
struct perf_buffer *perfbuf = NULL;
struct ring_buffer *ringbuf = NULL;
int i, nr_cpus, link_fd = -1;
nr_cpus = bpf_num_possible_cpus();
skel->bss->perfbuf_val = 1;
skel->bss->ringbuf_val = 2;
/* Positive tests for unprivileged BPF disabled. Verify we can
* - retrieve and interact with pinned maps;
* - set up and interact with perf buffer;
* - set up and interact with ring buffer;
* - create a link
*/
perfbuf = perf_buffer__new(bpf_map__fd(skel->maps.perfbuf), 8, process_perfbuf, NULL, NULL,
NULL);
if (!ASSERT_OK_PTR(perfbuf, "perf_buffer__new"))
goto cleanup;
ringbuf = ring_buffer__new(bpf_map__fd(skel->maps.ringbuf), process_ringbuf, NULL, NULL);
if (!ASSERT_OK_PTR(ringbuf, "ring_buffer__new"))
goto cleanup;
/* trigger & validate perf event, ringbuf output */
usleep(1);
ASSERT_GT(perf_buffer__poll(perfbuf, 100), -1, "perf_buffer__poll");
ASSERT_EQ(got_perfbuf_val, skel->bss->perfbuf_val, "check_perfbuf_val");
ASSERT_EQ(ring_buffer__consume(ringbuf), 1, "ring_buffer__consume");
ASSERT_EQ(got_ringbuf_val, skel->bss->ringbuf_val, "check_ringbuf_val");
for (i = 0; i < NUM_MAPS; i++) {
map_fds[i] = bpf_obj_get(map_paths[i]);
if (!ASSERT_GT(map_fds[i], -1, "obj_get"))
goto cleanup;
}
for (i = 0; i < NUM_MAPS; i++) {
bool prog_array = strstr(map_paths[i], "prog_array") != NULL;
bool array = strstr(map_paths[i], "array") != NULL;
bool buf = strstr(map_paths[i], "buf") != NULL;
__u32 key = 0, vals[nr_cpus], lookup_vals[nr_cpus];
__u32 expected_val = 1;
int j;
/* skip ringbuf, perfbuf */
if (buf)
continue;
for (j = 0; j < nr_cpus; j++)
vals[j] = expected_val;
if (prog_array) {
/* need valid prog array value */
vals[0] = prog_fd;
/* prog array lookup returns prog id, not fd */
expected_val = prog_id;
}
ASSERT_OK(bpf_map_update_elem(map_fds[i], &key, vals, 0), "map_update_elem");
ASSERT_OK(bpf_map_lookup_elem(map_fds[i], &key, &lookup_vals), "map_lookup_elem");
ASSERT_EQ(lookup_vals[0], expected_val, "map_lookup_elem_values");
if (!array)
ASSERT_OK(bpf_map_delete_elem(map_fds[i], &key), "map_delete_elem");
}
link_fd = bpf_link_create(bpf_program__fd(skel->progs.handle_perf_event), perf_fd,
BPF_PERF_EVENT, NULL);
ASSERT_GT(link_fd, 0, "link_create");
cleanup:
if (link_fd)
close(link_fd);
if (perfbuf)
perf_buffer__free(perfbuf);
if (ringbuf)
ring_buffer__free(ringbuf);
}
static void test_unpriv_bpf_disabled_negative(struct test_unpriv_bpf_disabled *skel,
__u32 prog_id, int prog_fd, int perf_fd,
char **map_paths, int *map_fds)
{
const struct bpf_insn prog_insns[] = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
};
const size_t prog_insn_cnt = sizeof(prog_insns) / sizeof(struct bpf_insn);
LIBBPF_OPTS(bpf_prog_load_opts, load_opts);
struct bpf_map_info map_info = {};
__u32 map_info_len = sizeof(map_info);
struct bpf_link_info link_info = {};
__u32 link_info_len = sizeof(link_info);
struct btf *btf = NULL;
__u32 attach_flags = 0;
__u32 prog_ids[3] = {};
__u32 prog_cnt = 3;
__u32 next;
int i;
/* Negative tests for unprivileged BPF disabled. Verify we cannot
* - load BPF programs;
* - create BPF maps;
* - get a prog/map/link fd by id;
* - get next prog/map/link id
* - query prog
* - BTF load
*/
ASSERT_EQ(bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER, "simple_prog", "GPL",
prog_insns, prog_insn_cnt, &load_opts),
-EPERM, "prog_load_fails");
/* some map types require particular correct parameters which could be
* sanity-checked before enforcing -EPERM, so only validate that
* the simple ARRAY and HASH maps are failing with -EPERM
*/
for (i = BPF_MAP_TYPE_HASH; i <= BPF_MAP_TYPE_ARRAY; i++)
ASSERT_EQ(bpf_map_create(i, NULL, sizeof(int), sizeof(int), 1, NULL),
-EPERM, "map_create_fails");
ASSERT_EQ(bpf_prog_get_fd_by_id(prog_id), -EPERM, "prog_get_fd_by_id_fails");
ASSERT_EQ(bpf_prog_get_next_id(prog_id, &next), -EPERM, "prog_get_next_id_fails");
ASSERT_EQ(bpf_prog_get_next_id(0, &next), -EPERM, "prog_get_next_id_fails");
if (ASSERT_OK(bpf_map_get_info_by_fd(map_fds[0], &map_info, &map_info_len),
"obj_get_info_by_fd")) {
ASSERT_EQ(bpf_map_get_fd_by_id(map_info.id), -EPERM, "map_get_fd_by_id_fails");
ASSERT_EQ(bpf_map_get_next_id(map_info.id, &next), -EPERM,
"map_get_next_id_fails");
}
ASSERT_EQ(bpf_map_get_next_id(0, &next), -EPERM, "map_get_next_id_fails");
if (ASSERT_OK(bpf_link_get_info_by_fd(bpf_link__fd(skel->links.sys_nanosleep_enter),
&link_info, &link_info_len),
"obj_get_info_by_fd")) {
ASSERT_EQ(bpf_link_get_fd_by_id(link_info.id), -EPERM, "link_get_fd_by_id_fails");
ASSERT_EQ(bpf_link_get_next_id(link_info.id, &next), -EPERM,
"link_get_next_id_fails");
}
ASSERT_EQ(bpf_link_get_next_id(0, &next), -EPERM, "link_get_next_id_fails");
ASSERT_EQ(bpf_prog_query(prog_fd, BPF_TRACE_FENTRY, 0, &attach_flags, prog_ids,
&prog_cnt), -EPERM, "prog_query_fails");
btf = btf__new_empty();
if (ASSERT_OK_PTR(btf, "empty_btf") &&
ASSERT_GT(btf__add_int(btf, "int", 4, 0), 0, "unpriv_int_type")) {
const void *raw_btf_data;
__u32 raw_btf_size;
raw_btf_data = btf__raw_data(btf, &raw_btf_size);
if (ASSERT_OK_PTR(raw_btf_data, "raw_btf_data_good"))
ASSERT_EQ(bpf_btf_load(raw_btf_data, raw_btf_size, NULL), -EPERM,
"bpf_btf_load_fails");
}
btf__free(btf);
}
void test_unpriv_bpf_disabled(void)
{
char *map_paths[NUM_MAPS] = { PINPATH "array",
PINPATH "percpu_array",
PINPATH "hash",
PINPATH "percpu_hash",
PINPATH "perfbuf",
PINPATH "ringbuf",
PINPATH "prog_array" };
int map_fds[NUM_MAPS];
struct test_unpriv_bpf_disabled *skel;
char unprivileged_bpf_disabled_orig[32] = {};
char perf_event_paranoid_orig[32] = {};
struct bpf_prog_info prog_info = {};
__u32 prog_info_len = sizeof(prog_info);
struct perf_event_attr attr = {};
int prog_fd, perf_fd = -1, i, ret;
__u64 save_caps = 0;
__u32 prog_id;
skel = test_unpriv_bpf_disabled__open_and_load();
if (!ASSERT_OK_PTR(skel, "skel_open"))
return;
skel->bss->test_pid = getpid();
map_fds[0] = bpf_map__fd(skel->maps.array);
map_fds[1] = bpf_map__fd(skel->maps.percpu_array);
map_fds[2] = bpf_map__fd(skel->maps.hash);
map_fds[3] = bpf_map__fd(skel->maps.percpu_hash);
map_fds[4] = bpf_map__fd(skel->maps.perfbuf);
map_fds[5] = bpf_map__fd(skel->maps.ringbuf);
map_fds[6] = bpf_map__fd(skel->maps.prog_array);
for (i = 0; i < NUM_MAPS; i++)
ASSERT_OK(bpf_obj_pin(map_fds[i], map_paths[i]), "pin map_fd");
/* allow user without caps to use perf events */
if (!ASSERT_OK(sysctl_set("/proc/sys/kernel/perf_event_paranoid", perf_event_paranoid_orig,
"-1"),
"set_perf_event_paranoid"))
goto cleanup;
/* ensure unprivileged bpf disabled is set */
ret = sysctl_set("/proc/sys/kernel/unprivileged_bpf_disabled",
unprivileged_bpf_disabled_orig, "2");
if (ret == -EPERM) {
/* if unprivileged_bpf_disabled=1, we get -EPERM back; that's okay. */
if (!ASSERT_OK(strcmp(unprivileged_bpf_disabled_orig, "1"),
"unprivileged_bpf_disabled_on"))
goto cleanup;
} else {
if (!ASSERT_OK(ret, "set unprivileged_bpf_disabled"))
goto cleanup;
}
prog_fd = bpf_program__fd(skel->progs.sys_nanosleep_enter);
ASSERT_OK(bpf_prog_get_info_by_fd(prog_fd, &prog_info, &prog_info_len),
"obj_get_info_by_fd");
prog_id = prog_info.id;
ASSERT_GT(prog_id, 0, "valid_prog_id");
attr.size = sizeof(attr);
attr.type = PERF_TYPE_SOFTWARE;
attr.config = PERF_COUNT_SW_CPU_CLOCK;
attr.freq = 1;
attr.sample_freq = 1000;
perf_fd = syscall(__NR_perf_event_open, &attr, -1, 0, -1, PERF_FLAG_FD_CLOEXEC);
if (!ASSERT_GE(perf_fd, 0, "perf_fd"))
goto cleanup;
if (!ASSERT_OK(test_unpriv_bpf_disabled__attach(skel), "skel_attach"))
goto cleanup;
if (!ASSERT_OK(cap_disable_effective(ALL_CAPS, &save_caps), "disable caps"))
goto cleanup;
if (test__start_subtest("unpriv_bpf_disabled_positive"))
test_unpriv_bpf_disabled_positive(skel, prog_id, prog_fd, perf_fd, map_paths,
map_fds);
if (test__start_subtest("unpriv_bpf_disabled_negative"))
test_unpriv_bpf_disabled_negative(skel, prog_id, prog_fd, perf_fd, map_paths,
map_fds);
cleanup:
close(perf_fd);
if (save_caps)
cap_enable_effective(save_caps, NULL);
if (strlen(perf_event_paranoid_orig) > 0)
sysctl_set("/proc/sys/kernel/perf_event_paranoid", NULL, perf_event_paranoid_orig);
if (strlen(unprivileged_bpf_disabled_orig) > 0)
sysctl_set("/proc/sys/kernel/unprivileged_bpf_disabled", NULL,
unprivileged_bpf_disabled_orig);
for (i = 0; i < NUM_MAPS; i++)
unlink(map_paths[i]);
test_unpriv_bpf_disabled__destroy(skel);
}
| linux-master | tools/testing/selftests/bpf/prog_tests/unpriv_bpf_disabled.c |
// SPDX-License-Identifier: GPL-2.0
#include <test_progs.h>
#include "cgroup_helpers.h"
#include "network_helpers.h"
static int verify_ports(int family, int fd,
__u16 expected_local, __u16 expected_peer)
{
struct sockaddr_storage addr;
socklen_t len = sizeof(addr);
__u16 port;
if (getsockname(fd, (struct sockaddr *)&addr, &len)) {
log_err("Failed to get server addr");
return -1;
}
if (family == AF_INET)
port = ((struct sockaddr_in *)&addr)->sin_port;
else
port = ((struct sockaddr_in6 *)&addr)->sin6_port;
if (ntohs(port) != expected_local) {
log_err("Unexpected local port %d, expected %d", ntohs(port),
expected_local);
return -1;
}
if (getpeername(fd, (struct sockaddr *)&addr, &len)) {
log_err("Failed to get peer addr");
return -1;
}
if (family == AF_INET)
port = ((struct sockaddr_in *)&addr)->sin_port;
else
port = ((struct sockaddr_in6 *)&addr)->sin6_port;
if (ntohs(port) != expected_peer) {
log_err("Unexpected peer port %d, expected %d", ntohs(port),
expected_peer);
return -1;
}
return 0;
}
static int run_test(int cgroup_fd, int server_fd, int family, int type)
{
bool v4 = family == AF_INET;
__u16 expected_local_port = v4 ? 22222 : 22223;
__u16 expected_peer_port = 60000;
struct bpf_program *prog;
struct bpf_object *obj;
const char *obj_file = v4 ? "connect_force_port4.bpf.o" : "connect_force_port6.bpf.o";
int fd, err;
__u32 duration = 0;
obj = bpf_object__open_file(obj_file, NULL);
if (!ASSERT_OK_PTR(obj, "bpf_obj_open"))
return -1;
err = bpf_object__load(obj);
if (!ASSERT_OK(err, "bpf_obj_load")) {
err = -EIO;
goto close_bpf_object;
}
prog = bpf_object__find_program_by_name(obj, v4 ?
"connect4" :
"connect6");
if (CHECK(!prog, "find_prog", "connect prog not found\n")) {
err = -EIO;
goto close_bpf_object;
}
err = bpf_prog_attach(bpf_program__fd(prog), cgroup_fd, v4 ?
BPF_CGROUP_INET4_CONNECT :
BPF_CGROUP_INET6_CONNECT, 0);
if (err) {
log_err("Failed to attach BPF program");
goto close_bpf_object;
}
prog = bpf_object__find_program_by_name(obj, v4 ?
"getpeername4" :
"getpeername6");
if (CHECK(!prog, "find_prog", "getpeername prog not found\n")) {
err = -EIO;
goto close_bpf_object;
}
err = bpf_prog_attach(bpf_program__fd(prog), cgroup_fd, v4 ?
BPF_CGROUP_INET4_GETPEERNAME :
BPF_CGROUP_INET6_GETPEERNAME, 0);
if (err) {
log_err("Failed to attach BPF program");
goto close_bpf_object;
}
prog = bpf_object__find_program_by_name(obj, v4 ?
"getsockname4" :
"getsockname6");
if (CHECK(!prog, "find_prog", "getsockname prog not found\n")) {
err = -EIO;
goto close_bpf_object;
}
err = bpf_prog_attach(bpf_program__fd(prog), cgroup_fd, v4 ?
BPF_CGROUP_INET4_GETSOCKNAME :
BPF_CGROUP_INET6_GETSOCKNAME, 0);
if (err) {
log_err("Failed to attach BPF program");
goto close_bpf_object;
}
fd = connect_to_fd(server_fd, 0);
if (fd < 0) {
err = -1;
goto close_bpf_object;
}
err = verify_ports(family, fd, expected_local_port,
expected_peer_port);
close(fd);
close_bpf_object:
bpf_object__close(obj);
return err;
}
void test_connect_force_port(void)
{
int server_fd, cgroup_fd;
cgroup_fd = test__join_cgroup("/connect_force_port");
if (CHECK_FAIL(cgroup_fd < 0))
return;
server_fd = start_server(AF_INET, SOCK_STREAM, NULL, 60123, 0);
if (CHECK_FAIL(server_fd < 0))
goto close_cgroup_fd;
CHECK_FAIL(run_test(cgroup_fd, server_fd, AF_INET, SOCK_STREAM));
close(server_fd);
server_fd = start_server(AF_INET6, SOCK_STREAM, NULL, 60124, 0);
if (CHECK_FAIL(server_fd < 0))
goto close_cgroup_fd;
CHECK_FAIL(run_test(cgroup_fd, server_fd, AF_INET6, SOCK_STREAM));
close(server_fd);
server_fd = start_server(AF_INET, SOCK_DGRAM, NULL, 60123, 0);
if (CHECK_FAIL(server_fd < 0))
goto close_cgroup_fd;
CHECK_FAIL(run_test(cgroup_fd, server_fd, AF_INET, SOCK_DGRAM));
close(server_fd);
server_fd = start_server(AF_INET6, SOCK_DGRAM, NULL, 60124, 0);
if (CHECK_FAIL(server_fd < 0))
goto close_cgroup_fd;
CHECK_FAIL(run_test(cgroup_fd, server_fd, AF_INET6, SOCK_DGRAM));
close(server_fd);
close_cgroup_fd:
close(cgroup_fd);
}
| linux-master | tools/testing/selftests/bpf/prog_tests/connect_force_port.c |
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2019 Facebook */
#include <test_progs.h>
#include <linux/bpf.h>
#include "test_pe_preserve_elems.skel.h"
static int duration;
static void test_one_map(struct bpf_map *map, struct bpf_program *prog,
bool has_share_pe)
{
int err, key = 0, pfd = -1, mfd = bpf_map__fd(map);
DECLARE_LIBBPF_OPTS(bpf_test_run_opts, opts);
struct perf_event_attr attr = {
.size = sizeof(struct perf_event_attr),
.type = PERF_TYPE_SOFTWARE,
.config = PERF_COUNT_SW_CPU_CLOCK,
};
pfd = syscall(__NR_perf_event_open, &attr, 0 /* pid */,
-1 /* cpu 0 */, -1 /* group id */, 0 /* flags */);
if (CHECK(pfd < 0, "perf_event_open", "failed\n"))
return;
err = bpf_map_update_elem(mfd, &key, &pfd, BPF_ANY);
close(pfd);
if (CHECK(err < 0, "bpf_map_update_elem", "failed\n"))
return;
err = bpf_prog_test_run_opts(bpf_program__fd(prog), &opts);
if (CHECK(err < 0, "bpf_prog_test_run_opts", "failed\n"))
return;
if (CHECK(opts.retval != 0, "bpf_perf_event_read_value",
"failed with %d\n", opts.retval))
return;
/* closing mfd, prog still holds a reference on map */
close(mfd);
err = bpf_prog_test_run_opts(bpf_program__fd(prog), &opts);
if (CHECK(err < 0, "bpf_prog_test_run_opts", "failed\n"))
return;
if (has_share_pe) {
CHECK(opts.retval != 0, "bpf_perf_event_read_value",
"failed with %d\n", opts.retval);
} else {
CHECK(opts.retval != -ENOENT, "bpf_perf_event_read_value",
"should have failed with %d, but got %d\n", -ENOENT,
opts.retval);
}
}
void test_pe_preserve_elems(void)
{
struct test_pe_preserve_elems *skel;
skel = test_pe_preserve_elems__open_and_load();
if (CHECK(!skel, "skel_open", "failed to open skeleton\n"))
return;
test_one_map(skel->maps.array_1, skel->progs.read_array_1, false);
test_one_map(skel->maps.array_2, skel->progs.read_array_2, true);
test_pe_preserve_elems__destroy(skel);
}
| linux-master | tools/testing/selftests/bpf/prog_tests/pe_preserve_elems.c |
// SPDX-License-Identifier: GPL-2.0
#include <test_progs.h>
#include "test_probe_read_user_str.skel.h"
static const char str1[] = "mestring";
static const char str2[] = "mestringalittlebigger";
static const char str3[] = "mestringblubblubblubblubblub";
static int test_one_str(struct test_probe_read_user_str *skel, const char *str,
size_t len)
{
int err, duration = 0;
char buf[256];
/* Ensure bytes after string are ones */
memset(buf, 1, sizeof(buf));
memcpy(buf, str, len);
/* Give prog our userspace pointer */
skel->bss->user_ptr = buf;
/* Trigger tracepoint */
usleep(1);
/* Did helper fail? */
if (CHECK(skel->bss->ret < 0, "prog_ret", "prog returned: %ld\n",
skel->bss->ret))
return 1;
/* Check that string was copied correctly */
err = memcmp(skel->bss->buf, str, len);
if (CHECK(err, "memcmp", "prog copied wrong string"))
return 1;
/* Now check that no extra trailing bytes were copied */
memset(buf, 0, sizeof(buf));
err = memcmp(skel->bss->buf + len, buf, sizeof(buf) - len);
if (CHECK(err, "memcmp", "trailing bytes were not stripped"))
return 1;
return 0;
}
void test_probe_read_user_str(void)
{
struct test_probe_read_user_str *skel;
int err, duration = 0;
skel = test_probe_read_user_str__open_and_load();
if (CHECK(!skel, "test_probe_read_user_str__open_and_load",
"skeleton open and load failed\n"))
return;
/* Give pid to bpf prog so it doesn't read from anyone else */
skel->bss->pid = getpid();
err = test_probe_read_user_str__attach(skel);
if (CHECK(err, "test_probe_read_user_str__attach",
"skeleton attach failed: %d\n", err))
goto out;
if (test_one_str(skel, str1, sizeof(str1)))
goto out;
if (test_one_str(skel, str2, sizeof(str2)))
goto out;
if (test_one_str(skel, str3, sizeof(str3)))
goto out;
out:
test_probe_read_user_str__destroy(skel);
}
| linux-master | tools/testing/selftests/bpf/prog_tests/probe_read_user_str.c |
// SPDX-License-Identifier: GPL-2.0
#include <test_progs.h>
void test_obj_name(void)
{
struct {
const char *name;
int success;
int expected_errno;
} tests[] = {
{ "", 1, 0 },
{ "_123456789ABCDE", 1, 0 },
{ "_123456789ABCDEF", 0, EINVAL },
{ "_123456789ABCD\n", 0, EINVAL },
};
struct bpf_insn prog[] = {
BPF_ALU64_IMM(BPF_MOV, BPF_REG_0, 0),
BPF_EXIT_INSN(),
};
__u32 duration = 0;
int i;
for (i = 0; i < ARRAY_SIZE(tests); i++) {
size_t name_len = strlen(tests[i].name) + 1;
union bpf_attr attr;
size_t ncopy;
int fd;
/* test different attr.prog_name during BPF_PROG_LOAD */
ncopy = name_len < sizeof(attr.prog_name) ?
name_len : sizeof(attr.prog_name);
bzero(&attr, sizeof(attr));
attr.prog_type = BPF_PROG_TYPE_SCHED_CLS;
attr.insn_cnt = 2;
attr.insns = ptr_to_u64(prog);
attr.license = ptr_to_u64("");
memcpy(attr.prog_name, tests[i].name, ncopy);
fd = syscall(__NR_bpf, BPF_PROG_LOAD, &attr, sizeof(attr));
CHECK((tests[i].success && fd < 0) ||
(!tests[i].success && fd >= 0) ||
(!tests[i].success && errno != tests[i].expected_errno),
"check-bpf-prog-name",
"fd %d(%d) errno %d(%d)\n",
fd, tests[i].success, errno, tests[i].expected_errno);
if (fd >= 0)
close(fd);
/* test different attr.map_name during BPF_MAP_CREATE */
ncopy = name_len < sizeof(attr.map_name) ?
name_len : sizeof(attr.map_name);
bzero(&attr, sizeof(attr));
attr.map_type = BPF_MAP_TYPE_ARRAY;
attr.key_size = 4;
attr.value_size = 4;
attr.max_entries = 1;
attr.map_flags = 0;
memcpy(attr.map_name, tests[i].name, ncopy);
fd = syscall(__NR_bpf, BPF_MAP_CREATE, &attr, sizeof(attr));
CHECK((tests[i].success && fd < 0) ||
(!tests[i].success && fd >= 0) ||
(!tests[i].success && errno != tests[i].expected_errno),
"check-bpf-map-name",
"fd %d(%d) errno %d(%d)\n",
fd, tests[i].success, errno, tests[i].expected_errno);
if (fd >= 0)
close(fd);
}
}
| linux-master | tools/testing/selftests/bpf/prog_tests/obj_name.c |
// SPDX-License-Identifier: GPL-2.0
#include <test_progs.h>
#include <network_helpers.h>
#include <error.h>
#include <linux/if.h>
#include <linux/if_tun.h>
#include <sys/uio.h>
#include "bpf_flow.skel.h"
#define FLOW_CONTINUE_SADDR 0x7f00007f /* 127.0.0.127 */
#ifndef IP_MF
#define IP_MF 0x2000
#endif
#define CHECK_FLOW_KEYS(desc, got, expected) \
_CHECK(memcmp(&got, &expected, sizeof(got)) != 0, \
desc, \
topts.duration, \
"nhoff=%u/%u " \
"thoff=%u/%u " \
"addr_proto=0x%x/0x%x " \
"is_frag=%u/%u " \
"is_first_frag=%u/%u " \
"is_encap=%u/%u " \
"ip_proto=0x%x/0x%x " \
"n_proto=0x%x/0x%x " \
"flow_label=0x%x/0x%x " \
"sport=%u/%u " \
"dport=%u/%u\n", \
got.nhoff, expected.nhoff, \
got.thoff, expected.thoff, \
got.addr_proto, expected.addr_proto, \
got.is_frag, expected.is_frag, \
got.is_first_frag, expected.is_first_frag, \
got.is_encap, expected.is_encap, \
got.ip_proto, expected.ip_proto, \
got.n_proto, expected.n_proto, \
got.flow_label, expected.flow_label, \
got.sport, expected.sport, \
got.dport, expected.dport)
struct ipv4_pkt {
struct ethhdr eth;
struct iphdr iph;
struct tcphdr tcp;
} __packed;
struct ipip_pkt {
struct ethhdr eth;
struct iphdr iph;
struct iphdr iph_inner;
struct tcphdr tcp;
} __packed;
struct svlan_ipv4_pkt {
struct ethhdr eth;
__u16 vlan_tci;
__u16 vlan_proto;
struct iphdr iph;
struct tcphdr tcp;
} __packed;
struct ipv6_pkt {
struct ethhdr eth;
struct ipv6hdr iph;
struct tcphdr tcp;
} __packed;
struct ipv6_frag_pkt {
struct ethhdr eth;
struct ipv6hdr iph;
struct frag_hdr {
__u8 nexthdr;
__u8 reserved;
__be16 frag_off;
__be32 identification;
} ipf;
struct tcphdr tcp;
} __packed;
struct dvlan_ipv6_pkt {
struct ethhdr eth;
__u16 vlan_tci;
__u16 vlan_proto;
__u16 vlan_tci2;
__u16 vlan_proto2;
struct ipv6hdr iph;
struct tcphdr tcp;
} __packed;
struct test {
const char *name;
union {
struct ipv4_pkt ipv4;
struct svlan_ipv4_pkt svlan_ipv4;
struct ipip_pkt ipip;
struct ipv6_pkt ipv6;
struct ipv6_frag_pkt ipv6_frag;
struct dvlan_ipv6_pkt dvlan_ipv6;
} pkt;
struct bpf_flow_keys keys;
__u32 flags;
__u32 retval;
};
#define VLAN_HLEN 4
static __u32 duration;
struct test tests[] = {
{
.name = "ipv4",
.pkt.ipv4 = {
.eth.h_proto = __bpf_constant_htons(ETH_P_IP),
.iph.ihl = 5,
.iph.protocol = IPPROTO_TCP,
.iph.tot_len = __bpf_constant_htons(MAGIC_BYTES),
.tcp.doff = 5,
.tcp.source = 80,
.tcp.dest = 8080,
},
.keys = {
.nhoff = ETH_HLEN,
.thoff = ETH_HLEN + sizeof(struct iphdr),
.addr_proto = ETH_P_IP,
.ip_proto = IPPROTO_TCP,
.n_proto = __bpf_constant_htons(ETH_P_IP),
.sport = 80,
.dport = 8080,
},
.retval = BPF_OK,
},
{
.name = "ipv6",
.pkt.ipv6 = {
.eth.h_proto = __bpf_constant_htons(ETH_P_IPV6),
.iph.nexthdr = IPPROTO_TCP,
.iph.payload_len = __bpf_constant_htons(MAGIC_BYTES),
.tcp.doff = 5,
.tcp.source = 80,
.tcp.dest = 8080,
},
.keys = {
.nhoff = ETH_HLEN,
.thoff = ETH_HLEN + sizeof(struct ipv6hdr),
.addr_proto = ETH_P_IPV6,
.ip_proto = IPPROTO_TCP,
.n_proto = __bpf_constant_htons(ETH_P_IPV6),
.sport = 80,
.dport = 8080,
},
.retval = BPF_OK,
},
{
.name = "802.1q-ipv4",
.pkt.svlan_ipv4 = {
.eth.h_proto = __bpf_constant_htons(ETH_P_8021Q),
.vlan_proto = __bpf_constant_htons(ETH_P_IP),
.iph.ihl = 5,
.iph.protocol = IPPROTO_TCP,
.iph.tot_len = __bpf_constant_htons(MAGIC_BYTES),
.tcp.doff = 5,
.tcp.source = 80,
.tcp.dest = 8080,
},
.keys = {
.nhoff = ETH_HLEN + VLAN_HLEN,
.thoff = ETH_HLEN + VLAN_HLEN + sizeof(struct iphdr),
.addr_proto = ETH_P_IP,
.ip_proto = IPPROTO_TCP,
.n_proto = __bpf_constant_htons(ETH_P_IP),
.sport = 80,
.dport = 8080,
},
.retval = BPF_OK,
},
{
.name = "802.1ad-ipv6",
.pkt.dvlan_ipv6 = {
.eth.h_proto = __bpf_constant_htons(ETH_P_8021AD),
.vlan_proto = __bpf_constant_htons(ETH_P_8021Q),
.vlan_proto2 = __bpf_constant_htons(ETH_P_IPV6),
.iph.nexthdr = IPPROTO_TCP,
.iph.payload_len = __bpf_constant_htons(MAGIC_BYTES),
.tcp.doff = 5,
.tcp.source = 80,
.tcp.dest = 8080,
},
.keys = {
.nhoff = ETH_HLEN + VLAN_HLEN * 2,
.thoff = ETH_HLEN + VLAN_HLEN * 2 +
sizeof(struct ipv6hdr),
.addr_proto = ETH_P_IPV6,
.ip_proto = IPPROTO_TCP,
.n_proto = __bpf_constant_htons(ETH_P_IPV6),
.sport = 80,
.dport = 8080,
},
.retval = BPF_OK,
},
{
.name = "ipv4-frag",
.pkt.ipv4 = {
.eth.h_proto = __bpf_constant_htons(ETH_P_IP),
.iph.ihl = 5,
.iph.protocol = IPPROTO_TCP,
.iph.tot_len = __bpf_constant_htons(MAGIC_BYTES),
.iph.frag_off = __bpf_constant_htons(IP_MF),
.tcp.doff = 5,
.tcp.source = 80,
.tcp.dest = 8080,
},
.keys = {
.flags = BPF_FLOW_DISSECTOR_F_PARSE_1ST_FRAG,
.nhoff = ETH_HLEN,
.thoff = ETH_HLEN + sizeof(struct iphdr),
.addr_proto = ETH_P_IP,
.ip_proto = IPPROTO_TCP,
.n_proto = __bpf_constant_htons(ETH_P_IP),
.is_frag = true,
.is_first_frag = true,
.sport = 80,
.dport = 8080,
},
.flags = BPF_FLOW_DISSECTOR_F_PARSE_1ST_FRAG,
.retval = BPF_OK,
},
{
.name = "ipv4-no-frag",
.pkt.ipv4 = {
.eth.h_proto = __bpf_constant_htons(ETH_P_IP),
.iph.ihl = 5,
.iph.protocol = IPPROTO_TCP,
.iph.tot_len = __bpf_constant_htons(MAGIC_BYTES),
.iph.frag_off = __bpf_constant_htons(IP_MF),
.tcp.doff = 5,
.tcp.source = 80,
.tcp.dest = 8080,
},
.keys = {
.nhoff = ETH_HLEN,
.thoff = ETH_HLEN + sizeof(struct iphdr),
.addr_proto = ETH_P_IP,
.ip_proto = IPPROTO_TCP,
.n_proto = __bpf_constant_htons(ETH_P_IP),
.is_frag = true,
.is_first_frag = true,
},
.retval = BPF_OK,
},
{
.name = "ipv6-frag",
.pkt.ipv6_frag = {
.eth.h_proto = __bpf_constant_htons(ETH_P_IPV6),
.iph.nexthdr = IPPROTO_FRAGMENT,
.iph.payload_len = __bpf_constant_htons(MAGIC_BYTES),
.ipf.nexthdr = IPPROTO_TCP,
.tcp.doff = 5,
.tcp.source = 80,
.tcp.dest = 8080,
},
.keys = {
.flags = BPF_FLOW_DISSECTOR_F_PARSE_1ST_FRAG,
.nhoff = ETH_HLEN,
.thoff = ETH_HLEN + sizeof(struct ipv6hdr) +
sizeof(struct frag_hdr),
.addr_proto = ETH_P_IPV6,
.ip_proto = IPPROTO_TCP,
.n_proto = __bpf_constant_htons(ETH_P_IPV6),
.is_frag = true,
.is_first_frag = true,
.sport = 80,
.dport = 8080,
},
.flags = BPF_FLOW_DISSECTOR_F_PARSE_1ST_FRAG,
.retval = BPF_OK,
},
{
.name = "ipv6-no-frag",
.pkt.ipv6_frag = {
.eth.h_proto = __bpf_constant_htons(ETH_P_IPV6),
.iph.nexthdr = IPPROTO_FRAGMENT,
.iph.payload_len = __bpf_constant_htons(MAGIC_BYTES),
.ipf.nexthdr = IPPROTO_TCP,
.tcp.doff = 5,
.tcp.source = 80,
.tcp.dest = 8080,
},
.keys = {
.nhoff = ETH_HLEN,
.thoff = ETH_HLEN + sizeof(struct ipv6hdr) +
sizeof(struct frag_hdr),
.addr_proto = ETH_P_IPV6,
.ip_proto = IPPROTO_TCP,
.n_proto = __bpf_constant_htons(ETH_P_IPV6),
.is_frag = true,
.is_first_frag = true,
},
.retval = BPF_OK,
},
{
.name = "ipv6-flow-label",
.pkt.ipv6 = {
.eth.h_proto = __bpf_constant_htons(ETH_P_IPV6),
.iph.nexthdr = IPPROTO_TCP,
.iph.payload_len = __bpf_constant_htons(MAGIC_BYTES),
.iph.flow_lbl = { 0xb, 0xee, 0xef },
.tcp.doff = 5,
.tcp.source = 80,
.tcp.dest = 8080,
},
.keys = {
.nhoff = ETH_HLEN,
.thoff = ETH_HLEN + sizeof(struct ipv6hdr),
.addr_proto = ETH_P_IPV6,
.ip_proto = IPPROTO_TCP,
.n_proto = __bpf_constant_htons(ETH_P_IPV6),
.sport = 80,
.dport = 8080,
.flow_label = __bpf_constant_htonl(0xbeeef),
},
.retval = BPF_OK,
},
{
.name = "ipv6-no-flow-label",
.pkt.ipv6 = {
.eth.h_proto = __bpf_constant_htons(ETH_P_IPV6),
.iph.nexthdr = IPPROTO_TCP,
.iph.payload_len = __bpf_constant_htons(MAGIC_BYTES),
.iph.flow_lbl = { 0xb, 0xee, 0xef },
.tcp.doff = 5,
.tcp.source = 80,
.tcp.dest = 8080,
},
.keys = {
.flags = BPF_FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL,
.nhoff = ETH_HLEN,
.thoff = ETH_HLEN + sizeof(struct ipv6hdr),
.addr_proto = ETH_P_IPV6,
.ip_proto = IPPROTO_TCP,
.n_proto = __bpf_constant_htons(ETH_P_IPV6),
.flow_label = __bpf_constant_htonl(0xbeeef),
},
.flags = BPF_FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL,
.retval = BPF_OK,
},
{
.name = "ipv6-empty-flow-label",
.pkt.ipv6 = {
.eth.h_proto = __bpf_constant_htons(ETH_P_IPV6),
.iph.nexthdr = IPPROTO_TCP,
.iph.payload_len = __bpf_constant_htons(MAGIC_BYTES),
.iph.flow_lbl = { 0x00, 0x00, 0x00 },
.tcp.doff = 5,
.tcp.source = 80,
.tcp.dest = 8080,
},
.keys = {
.flags = BPF_FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL,
.nhoff = ETH_HLEN,
.thoff = ETH_HLEN + sizeof(struct ipv6hdr),
.addr_proto = ETH_P_IPV6,
.ip_proto = IPPROTO_TCP,
.n_proto = __bpf_constant_htons(ETH_P_IPV6),
.sport = 80,
.dport = 8080,
},
.flags = BPF_FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL,
.retval = BPF_OK,
},
{
.name = "ipip-encap",
.pkt.ipip = {
.eth.h_proto = __bpf_constant_htons(ETH_P_IP),
.iph.ihl = 5,
.iph.protocol = IPPROTO_IPIP,
.iph.tot_len = __bpf_constant_htons(MAGIC_BYTES),
.iph_inner.ihl = 5,
.iph_inner.protocol = IPPROTO_TCP,
.iph_inner.tot_len =
__bpf_constant_htons(MAGIC_BYTES) -
sizeof(struct iphdr),
.tcp.doff = 5,
.tcp.source = 80,
.tcp.dest = 8080,
},
.keys = {
.nhoff = ETH_HLEN,
.thoff = ETH_HLEN + sizeof(struct iphdr) +
sizeof(struct iphdr),
.addr_proto = ETH_P_IP,
.ip_proto = IPPROTO_TCP,
.n_proto = __bpf_constant_htons(ETH_P_IP),
.is_encap = true,
.sport = 80,
.dport = 8080,
},
.retval = BPF_OK,
},
{
.name = "ipip-no-encap",
.pkt.ipip = {
.eth.h_proto = __bpf_constant_htons(ETH_P_IP),
.iph.ihl = 5,
.iph.protocol = IPPROTO_IPIP,
.iph.tot_len = __bpf_constant_htons(MAGIC_BYTES),
.iph_inner.ihl = 5,
.iph_inner.protocol = IPPROTO_TCP,
.iph_inner.tot_len =
__bpf_constant_htons(MAGIC_BYTES) -
sizeof(struct iphdr),
.tcp.doff = 5,
.tcp.source = 80,
.tcp.dest = 8080,
},
.keys = {
.flags = BPF_FLOW_DISSECTOR_F_STOP_AT_ENCAP,
.nhoff = ETH_HLEN,
.thoff = ETH_HLEN + sizeof(struct iphdr),
.addr_proto = ETH_P_IP,
.ip_proto = IPPROTO_IPIP,
.n_proto = __bpf_constant_htons(ETH_P_IP),
.is_encap = true,
},
.flags = BPF_FLOW_DISSECTOR_F_STOP_AT_ENCAP,
.retval = BPF_OK,
},
{
.name = "ipip-encap-dissector-continue",
.pkt.ipip = {
.eth.h_proto = __bpf_constant_htons(ETH_P_IP),
.iph.ihl = 5,
.iph.protocol = IPPROTO_IPIP,
.iph.tot_len = __bpf_constant_htons(MAGIC_BYTES),
.iph.saddr = __bpf_constant_htonl(FLOW_CONTINUE_SADDR),
.iph_inner.ihl = 5,
.iph_inner.protocol = IPPROTO_TCP,
.iph_inner.tot_len =
__bpf_constant_htons(MAGIC_BYTES) -
sizeof(struct iphdr),
.tcp.doff = 5,
.tcp.source = 99,
.tcp.dest = 9090,
},
.retval = BPF_FLOW_DISSECTOR_CONTINUE,
},
};
static int create_tap(const char *ifname)
{
struct ifreq ifr = {
.ifr_flags = IFF_TAP | IFF_NO_PI | IFF_NAPI | IFF_NAPI_FRAGS,
};
int fd, ret;
strncpy(ifr.ifr_name, ifname, sizeof(ifr.ifr_name));
fd = open("/dev/net/tun", O_RDWR);
if (fd < 0)
return -1;
ret = ioctl(fd, TUNSETIFF, &ifr);
if (ret)
return -1;
return fd;
}
static int tx_tap(int fd, void *pkt, size_t len)
{
struct iovec iov[] = {
{
.iov_len = len,
.iov_base = pkt,
},
};
return writev(fd, iov, ARRAY_SIZE(iov));
}
static int ifup(const char *ifname)
{
struct ifreq ifr = {};
int sk, ret;
strncpy(ifr.ifr_name, ifname, sizeof(ifr.ifr_name));
sk = socket(PF_INET, SOCK_DGRAM, 0);
if (sk < 0)
return -1;
ret = ioctl(sk, SIOCGIFFLAGS, &ifr);
if (ret) {
close(sk);
return -1;
}
ifr.ifr_flags |= IFF_UP;
ret = ioctl(sk, SIOCSIFFLAGS, &ifr);
if (ret) {
close(sk);
return -1;
}
close(sk);
return 0;
}
static int init_prog_array(struct bpf_object *obj, struct bpf_map *prog_array)
{
int i, err, map_fd, prog_fd;
struct bpf_program *prog;
char prog_name[32];
map_fd = bpf_map__fd(prog_array);
if (map_fd < 0)
return -1;
for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
snprintf(prog_name, sizeof(prog_name), "flow_dissector_%d", i);
prog = bpf_object__find_program_by_name(obj, prog_name);
if (!prog)
return -1;
prog_fd = bpf_program__fd(prog);
if (prog_fd < 0)
return -1;
err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
if (err)
return -1;
}
return 0;
}
static void run_tests_skb_less(int tap_fd, struct bpf_map *keys)
{
int i, err, keys_fd;
keys_fd = bpf_map__fd(keys);
if (CHECK(keys_fd < 0, "bpf_map__fd", "err %d\n", keys_fd))
return;
for (i = 0; i < ARRAY_SIZE(tests); i++) {
/* Keep in sync with 'flags' from eth_get_headlen. */
__u32 eth_get_headlen_flags =
BPF_FLOW_DISSECTOR_F_PARSE_1ST_FRAG;
LIBBPF_OPTS(bpf_test_run_opts, topts);
struct bpf_flow_keys flow_keys = {};
__u32 key = (__u32)(tests[i].keys.sport) << 16 |
tests[i].keys.dport;
/* For skb-less case we can't pass input flags; run
* only the tests that have a matching set of flags.
*/
if (tests[i].flags != eth_get_headlen_flags)
continue;
err = tx_tap(tap_fd, &tests[i].pkt, sizeof(tests[i].pkt));
CHECK(err < 0, "tx_tap", "err %d errno %d\n", err, errno);
/* check the stored flow_keys only if BPF_OK expected */
if (tests[i].retval != BPF_OK)
continue;
err = bpf_map_lookup_elem(keys_fd, &key, &flow_keys);
ASSERT_OK(err, "bpf_map_lookup_elem");
CHECK_FLOW_KEYS(tests[i].name, flow_keys, tests[i].keys);
err = bpf_map_delete_elem(keys_fd, &key);
ASSERT_OK(err, "bpf_map_delete_elem");
}
}
static void test_skb_less_prog_attach(struct bpf_flow *skel, int tap_fd)
{
int err, prog_fd;
prog_fd = bpf_program__fd(skel->progs._dissect);
if (CHECK(prog_fd < 0, "bpf_program__fd", "err %d\n", prog_fd))
return;
err = bpf_prog_attach(prog_fd, 0, BPF_FLOW_DISSECTOR, 0);
if (CHECK(err, "bpf_prog_attach", "err %d errno %d\n", err, errno))
return;
run_tests_skb_less(tap_fd, skel->maps.last_dissection);
err = bpf_prog_detach2(prog_fd, 0, BPF_FLOW_DISSECTOR);
CHECK(err, "bpf_prog_detach2", "err %d errno %d\n", err, errno);
}
static void test_skb_less_link_create(struct bpf_flow *skel, int tap_fd)
{
struct bpf_link *link;
int err, net_fd;
net_fd = open("/proc/self/ns/net", O_RDONLY);
if (CHECK(net_fd < 0, "open(/proc/self/ns/net)", "err %d\n", errno))
return;
link = bpf_program__attach_netns(skel->progs._dissect, net_fd);
if (!ASSERT_OK_PTR(link, "attach_netns"))
goto out_close;
run_tests_skb_less(tap_fd, skel->maps.last_dissection);
err = bpf_link__destroy(link);
CHECK(err, "bpf_link__destroy", "err %d\n", err);
out_close:
close(net_fd);
}
void test_flow_dissector(void)
{
int i, err, prog_fd, keys_fd = -1, tap_fd;
struct bpf_flow *skel;
skel = bpf_flow__open_and_load();
if (CHECK(!skel, "skel", "failed to open/load skeleton\n"))
return;
prog_fd = bpf_program__fd(skel->progs._dissect);
if (CHECK(prog_fd < 0, "bpf_program__fd", "err %d\n", prog_fd))
goto out_destroy_skel;
keys_fd = bpf_map__fd(skel->maps.last_dissection);
if (CHECK(keys_fd < 0, "bpf_map__fd", "err %d\n", keys_fd))
goto out_destroy_skel;
err = init_prog_array(skel->obj, skel->maps.jmp_table);
if (CHECK(err, "init_prog_array", "err %d\n", err))
goto out_destroy_skel;
for (i = 0; i < ARRAY_SIZE(tests); i++) {
struct bpf_flow_keys flow_keys;
LIBBPF_OPTS(bpf_test_run_opts, topts,
.data_in = &tests[i].pkt,
.data_size_in = sizeof(tests[i].pkt),
.data_out = &flow_keys,
);
static struct bpf_flow_keys ctx = {};
if (tests[i].flags) {
topts.ctx_in = &ctx;
topts.ctx_size_in = sizeof(ctx);
ctx.flags = tests[i].flags;
}
err = bpf_prog_test_run_opts(prog_fd, &topts);
ASSERT_OK(err, "test_run");
ASSERT_EQ(topts.retval, tests[i].retval, "test_run retval");
/* check the resulting flow_keys only if BPF_OK returned */
if (topts.retval != BPF_OK)
continue;
ASSERT_EQ(topts.data_size_out, sizeof(flow_keys),
"test_run data_size_out");
CHECK_FLOW_KEYS(tests[i].name, flow_keys, tests[i].keys);
}
/* Do the same tests but for skb-less flow dissector.
* We use a known path in the net/tun driver that calls
* eth_get_headlen and we manually export bpf_flow_keys
* via BPF map in this case.
*/
tap_fd = create_tap("tap0");
CHECK(tap_fd < 0, "create_tap", "tap_fd %d errno %d\n", tap_fd, errno);
err = ifup("tap0");
CHECK(err, "ifup", "err %d errno %d\n", err, errno);
/* Test direct prog attachment */
test_skb_less_prog_attach(skel, tap_fd);
/* Test indirect prog attachment via link */
test_skb_less_link_create(skel, tap_fd);
close(tap_fd);
out_destroy_skel:
bpf_flow__destroy(skel);
}
| linux-master | tools/testing/selftests/bpf/prog_tests/flow_dissector.c |
// SPDX-License-Identifier: GPL-2.0
#include <test_progs.h>
#include <bpf/btf.h>
#include "bpf/libbpf_internal.h"
static int duration = 0;
static void validate_mask(int case_nr, const char *exp, bool *mask, int n)
{
int i;
for (i = 0; exp[i]; i++) {
if (exp[i] == '1') {
if (CHECK(i + 1 > n, "mask_short",
"case #%d: mask too short, got n=%d, need at least %d\n",
case_nr, n, i + 1))
return;
CHECK(!mask[i], "cpu_not_set",
"case #%d: mask differs, expected cpu#%d SET\n",
case_nr, i);
} else {
CHECK(i < n && mask[i], "cpu_set",
"case #%d: mask differs, expected cpu#%d UNSET\n",
case_nr, i);
}
}
CHECK(i < n, "mask_long",
"case #%d: mask too long, got n=%d, expected at most %d\n",
case_nr, n, i);
}
static struct {
const char *cpu_mask;
const char *expect;
bool fails;
} test_cases[] = {
{ "0\n", "1", false },
{ "0,2\n", "101", false },
{ "0-2\n", "111", false },
{ "0-2,3-4\n", "11111", false },
{ "0", "1", false },
{ "0-2", "111", false },
{ "0,2", "101", false },
{ "0,1-3", "1111", false },
{ "0,1,2,3", "1111", false },
{ "0,2-3,5", "101101", false },
{ "3-3", "0001", false },
{ "2-4,6,9-10", "00111010011", false },
/* failure cases */
{ "", "", true },
{ "0-", "", true },
{ "0 ", "", true },
{ "0_1", "", true },
{ "1-0", "", true },
{ "-1", "", true },
};
void test_cpu_mask()
{
int i, err, n;
bool *mask;
for (i = 0; i < ARRAY_SIZE(test_cases); i++) {
mask = NULL;
err = parse_cpu_mask_str(test_cases[i].cpu_mask, &mask, &n);
if (test_cases[i].fails) {
CHECK(!err, "should_fail",
"case #%d: parsing should fail!\n", i + 1);
} else {
if (CHECK(err, "parse_err",
"case #%d: cpu mask parsing failed: %d\n",
i + 1, err))
continue;
validate_mask(i + 1, test_cases[i].expect, mask, n);
}
free(mask);
}
}
| linux-master | tools/testing/selftests/bpf/prog_tests/cpu_mask.c |
// SPDX-License-Identifier: GPL-2.0
#include <sys/types.h>
#include <sys/socket.h>
#include <test_progs.h>
#include <bpf/btf.h>
#include "lsm_cgroup.skel.h"
#include "lsm_cgroup_nonvoid.skel.h"
#include "cgroup_helpers.h"
#include "network_helpers.h"
#ifndef ENOTSUPP
#define ENOTSUPP 524
#endif
static struct btf *btf;
static __u32 query_prog_cnt(int cgroup_fd, const char *attach_func)
{
LIBBPF_OPTS(bpf_prog_query_opts, p);
int cnt = 0;
int i;
ASSERT_OK(bpf_prog_query_opts(cgroup_fd, BPF_LSM_CGROUP, &p), "prog_query");
if (!attach_func)
return p.prog_cnt;
/* When attach_func is provided, count the number of progs that
* attach to the given symbol.
*/
if (!btf)
btf = btf__load_vmlinux_btf();
if (!ASSERT_OK(libbpf_get_error(btf), "btf_vmlinux"))
return -1;
p.prog_ids = malloc(sizeof(u32) * p.prog_cnt);
p.prog_attach_flags = malloc(sizeof(u32) * p.prog_cnt);
ASSERT_OK(bpf_prog_query_opts(cgroup_fd, BPF_LSM_CGROUP, &p), "prog_query");
for (i = 0; i < p.prog_cnt; i++) {
struct bpf_prog_info info = {};
__u32 info_len = sizeof(info);
int fd;
fd = bpf_prog_get_fd_by_id(p.prog_ids[i]);
ASSERT_GE(fd, 0, "prog_get_fd_by_id");
ASSERT_OK(bpf_prog_get_info_by_fd(fd, &info, &info_len),
"prog_info_by_fd");
close(fd);
if (info.attach_btf_id ==
btf__find_by_name_kind(btf, attach_func, BTF_KIND_FUNC))
cnt++;
}
free(p.prog_ids);
free(p.prog_attach_flags);
return cnt;
}
static void test_lsm_cgroup_functional(void)
{
DECLARE_LIBBPF_OPTS(bpf_prog_attach_opts, attach_opts);
DECLARE_LIBBPF_OPTS(bpf_link_update_opts, update_opts);
int cgroup_fd = -1, cgroup_fd2 = -1, cgroup_fd3 = -1;
int listen_fd, client_fd, accepted_fd;
struct lsm_cgroup *skel = NULL;
int post_create_prog_fd2 = -1;
int post_create_prog_fd = -1;
int bind_link_fd2 = -1;
int bind_prog_fd2 = -1;
int alloc_prog_fd = -1;
int bind_prog_fd = -1;
int bind_link_fd = -1;
int clone_prog_fd = -1;
int err, fd, prio;
socklen_t socklen;
cgroup_fd3 = test__join_cgroup("/sock_policy_empty");
if (!ASSERT_GE(cgroup_fd3, 0, "create empty cgroup"))
goto close_cgroup;
cgroup_fd2 = test__join_cgroup("/sock_policy_reuse");
if (!ASSERT_GE(cgroup_fd2, 0, "create cgroup for reuse"))
goto close_cgroup;
cgroup_fd = test__join_cgroup("/sock_policy");
if (!ASSERT_GE(cgroup_fd, 0, "join_cgroup"))
goto close_cgroup;
skel = lsm_cgroup__open_and_load();
if (!ASSERT_OK_PTR(skel, "open_and_load"))
goto close_cgroup;
post_create_prog_fd = bpf_program__fd(skel->progs.socket_post_create);
post_create_prog_fd2 = bpf_program__fd(skel->progs.socket_post_create2);
bind_prog_fd = bpf_program__fd(skel->progs.socket_bind);
bind_prog_fd2 = bpf_program__fd(skel->progs.socket_bind2);
alloc_prog_fd = bpf_program__fd(skel->progs.socket_alloc);
clone_prog_fd = bpf_program__fd(skel->progs.socket_clone);
ASSERT_EQ(query_prog_cnt(cgroup_fd, "bpf_lsm_sk_alloc_security"), 0, "prog count");
ASSERT_EQ(query_prog_cnt(cgroup_fd, NULL), 0, "total prog count");
err = bpf_prog_attach(alloc_prog_fd, cgroup_fd, BPF_LSM_CGROUP, 0);
if (err == -ENOTSUPP) {
test__skip();
goto close_cgroup;
}
if (!ASSERT_OK(err, "attach alloc_prog_fd"))
goto detach_cgroup;
ASSERT_EQ(query_prog_cnt(cgroup_fd, "bpf_lsm_sk_alloc_security"), 1, "prog count");
ASSERT_EQ(query_prog_cnt(cgroup_fd, NULL), 1, "total prog count");
ASSERT_EQ(query_prog_cnt(cgroup_fd, "bpf_lsm_inet_csk_clone"), 0, "prog count");
err = bpf_prog_attach(clone_prog_fd, cgroup_fd, BPF_LSM_CGROUP, 0);
if (!ASSERT_OK(err, "attach clone_prog_fd"))
goto detach_cgroup;
ASSERT_EQ(query_prog_cnt(cgroup_fd, "bpf_lsm_inet_csk_clone"), 1, "prog count");
ASSERT_EQ(query_prog_cnt(cgroup_fd, NULL), 2, "total prog count");
/* Make sure replacing works. */
ASSERT_EQ(query_prog_cnt(cgroup_fd, "bpf_lsm_socket_post_create"), 0, "prog count");
err = bpf_prog_attach(post_create_prog_fd, cgroup_fd,
BPF_LSM_CGROUP, 0);
if (!ASSERT_OK(err, "attach post_create_prog_fd"))
goto detach_cgroup;
ASSERT_EQ(query_prog_cnt(cgroup_fd, "bpf_lsm_socket_post_create"), 1, "prog count");
ASSERT_EQ(query_prog_cnt(cgroup_fd, NULL), 3, "total prog count");
attach_opts.replace_prog_fd = post_create_prog_fd;
err = bpf_prog_attach_opts(post_create_prog_fd2, cgroup_fd,
BPF_LSM_CGROUP, &attach_opts);
if (!ASSERT_OK(err, "prog replace post_create_prog_fd"))
goto detach_cgroup;
ASSERT_EQ(query_prog_cnt(cgroup_fd, "bpf_lsm_socket_post_create"), 1, "prog count");
ASSERT_EQ(query_prog_cnt(cgroup_fd, NULL), 3, "total prog count");
/* Try the same attach/replace via link API. */
ASSERT_EQ(query_prog_cnt(cgroup_fd, "bpf_lsm_socket_bind"), 0, "prog count");
bind_link_fd = bpf_link_create(bind_prog_fd, cgroup_fd,
BPF_LSM_CGROUP, NULL);
if (!ASSERT_GE(bind_link_fd, 0, "link create bind_prog_fd"))
goto detach_cgroup;
ASSERT_EQ(query_prog_cnt(cgroup_fd, "bpf_lsm_socket_bind"), 1, "prog count");
ASSERT_EQ(query_prog_cnt(cgroup_fd, NULL), 4, "total prog count");
update_opts.old_prog_fd = bind_prog_fd;
update_opts.flags = BPF_F_REPLACE;
err = bpf_link_update(bind_link_fd, bind_prog_fd2, &update_opts);
if (!ASSERT_OK(err, "link update bind_prog_fd"))
goto detach_cgroup;
ASSERT_EQ(query_prog_cnt(cgroup_fd, "bpf_lsm_socket_bind"), 1, "prog count");
ASSERT_EQ(query_prog_cnt(cgroup_fd, NULL), 4, "total prog count");
/* Attach another instance of bind program to another cgroup.
* This should trigger the reuse of the trampoline shim (two
* programs attaching to the same btf_id).
*/
ASSERT_EQ(query_prog_cnt(cgroup_fd, "bpf_lsm_socket_bind"), 1, "prog count");
ASSERT_EQ(query_prog_cnt(cgroup_fd2, "bpf_lsm_socket_bind"), 0, "prog count");
bind_link_fd2 = bpf_link_create(bind_prog_fd2, cgroup_fd2,
BPF_LSM_CGROUP, NULL);
if (!ASSERT_GE(bind_link_fd2, 0, "link create bind_prog_fd2"))
goto detach_cgroup;
ASSERT_EQ(query_prog_cnt(cgroup_fd2, "bpf_lsm_socket_bind"), 1, "prog count");
ASSERT_EQ(query_prog_cnt(cgroup_fd, NULL), 4, "total prog count");
ASSERT_EQ(query_prog_cnt(cgroup_fd2, NULL), 1, "total prog count");
fd = socket(AF_UNIX, SOCK_STREAM, 0);
if (!(skel->kconfig->CONFIG_SECURITY_APPARMOR
|| skel->kconfig->CONFIG_SECURITY_SELINUX
|| skel->kconfig->CONFIG_SECURITY_SMACK))
/* AF_UNIX is prohibited. */
ASSERT_LT(fd, 0, "socket(AF_UNIX)");
close(fd);
/* AF_INET6 gets default policy (sk_priority). */
fd = socket(AF_INET6, SOCK_STREAM, 0);
if (!ASSERT_GE(fd, 0, "socket(SOCK_STREAM)"))
goto detach_cgroup;
prio = 0;
socklen = sizeof(prio);
ASSERT_GE(getsockopt(fd, SOL_SOCKET, SO_PRIORITY, &prio, &socklen), 0,
"getsockopt");
ASSERT_EQ(prio, 123, "sk_priority");
close(fd);
/* TX-only AF_PACKET is allowed. */
ASSERT_LT(socket(AF_PACKET, SOCK_RAW, htons(ETH_P_ALL)), 0,
"socket(AF_PACKET, ..., ETH_P_ALL)");
fd = socket(AF_PACKET, SOCK_RAW, 0);
ASSERT_GE(fd, 0, "socket(AF_PACKET, ..., 0)");
/* TX-only AF_PACKET can not be rebound. */
struct sockaddr_ll sa = {
.sll_family = AF_PACKET,
.sll_protocol = htons(ETH_P_ALL),
};
ASSERT_LT(bind(fd, (struct sockaddr *)&sa, sizeof(sa)), 0,
"bind(ETH_P_ALL)");
close(fd);
/* Trigger passive open. */
listen_fd = start_server(AF_INET6, SOCK_STREAM, "::1", 0, 0);
ASSERT_GE(listen_fd, 0, "start_server");
client_fd = connect_to_fd(listen_fd, 0);
ASSERT_GE(client_fd, 0, "connect_to_fd");
accepted_fd = accept(listen_fd, NULL, NULL);
ASSERT_GE(accepted_fd, 0, "accept");
prio = 0;
socklen = sizeof(prio);
ASSERT_GE(getsockopt(accepted_fd, SOL_SOCKET, SO_PRIORITY, &prio, &socklen), 0,
"getsockopt");
ASSERT_EQ(prio, 234, "sk_priority");
/* These are replaced and never called. */
ASSERT_EQ(skel->bss->called_socket_post_create, 0, "called_create");
ASSERT_EQ(skel->bss->called_socket_bind, 0, "called_bind");
/* AF_INET6+SOCK_STREAM
* AF_PACKET+SOCK_RAW
* AF_UNIX+SOCK_RAW if already have non-bpf lsms installed
* listen_fd
* client_fd
* accepted_fd
*/
if (skel->kconfig->CONFIG_SECURITY_APPARMOR
|| skel->kconfig->CONFIG_SECURITY_SELINUX
|| skel->kconfig->CONFIG_SECURITY_SMACK)
/* AF_UNIX+SOCK_RAW if already have non-bpf lsms installed */
ASSERT_EQ(skel->bss->called_socket_post_create2, 6, "called_create2");
else
ASSERT_EQ(skel->bss->called_socket_post_create2, 5, "called_create2");
/* start_server
* bind(ETH_P_ALL)
*/
ASSERT_EQ(skel->bss->called_socket_bind2, 2, "called_bind2");
/* Single accept(). */
ASSERT_EQ(skel->bss->called_socket_clone, 1, "called_clone");
/* AF_UNIX+SOCK_STREAM (failed)
* AF_INET6+SOCK_STREAM
* AF_PACKET+SOCK_RAW (failed)
* AF_PACKET+SOCK_RAW
* listen_fd
* client_fd
* accepted_fd
*/
ASSERT_EQ(skel->bss->called_socket_alloc, 7, "called_alloc");
close(listen_fd);
close(client_fd);
close(accepted_fd);
/* Make sure other cgroup doesn't trigger the programs. */
if (!ASSERT_OK(join_cgroup("/sock_policy_empty"), "join root cgroup"))
goto detach_cgroup;
fd = socket(AF_INET6, SOCK_STREAM, 0);
if (!ASSERT_GE(fd, 0, "socket(SOCK_STREAM)"))
goto detach_cgroup;
prio = 0;
socklen = sizeof(prio);
ASSERT_GE(getsockopt(fd, SOL_SOCKET, SO_PRIORITY, &prio, &socklen), 0,
"getsockopt");
ASSERT_EQ(prio, 0, "sk_priority");
close(fd);
detach_cgroup:
ASSERT_GE(bpf_prog_detach2(post_create_prog_fd2, cgroup_fd,
BPF_LSM_CGROUP), 0, "detach_create");
close(bind_link_fd);
/* Don't close bind_link_fd2, exercise cgroup release cleanup. */
ASSERT_GE(bpf_prog_detach2(alloc_prog_fd, cgroup_fd,
BPF_LSM_CGROUP), 0, "detach_alloc");
ASSERT_GE(bpf_prog_detach2(clone_prog_fd, cgroup_fd,
BPF_LSM_CGROUP), 0, "detach_clone");
close_cgroup:
close(cgroup_fd);
close(cgroup_fd2);
close(cgroup_fd3);
lsm_cgroup__destroy(skel);
}
static void test_lsm_cgroup_nonvoid(void)
{
struct lsm_cgroup_nonvoid *skel = NULL;
skel = lsm_cgroup_nonvoid__open_and_load();
ASSERT_NULL(skel, "open succeeds");
lsm_cgroup_nonvoid__destroy(skel);
}
void test_lsm_cgroup(void)
{
if (test__start_subtest("functional"))
test_lsm_cgroup_functional();
if (test__start_subtest("nonvoid"))
test_lsm_cgroup_nonvoid();
btf__free(btf);
}
| linux-master | tools/testing/selftests/bpf/prog_tests/lsm_cgroup.c |
// SPDX-License-Identifier: GPL-2.0
#include <test_progs.h>
#include <network_helpers.h>
void serial_test_flow_dissector_load_bytes(void)
{
struct bpf_flow_keys flow_keys;
struct bpf_insn prog[] = {
// BPF_REG_1 - 1st argument: context
// BPF_REG_2 - 2nd argument: offset, start at first byte
BPF_MOV64_IMM(BPF_REG_2, 0),
// BPF_REG_3 - 3rd argument: destination, reserve byte on stack
BPF_ALU64_REG(BPF_MOV, BPF_REG_3, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, -1),
// BPF_REG_4 - 4th argument: copy one byte
BPF_MOV64_IMM(BPF_REG_4, 1),
// bpf_skb_load_bytes(ctx, sizeof(pkt_v4), ptr, 1)
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_skb_load_bytes),
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
// if (ret == 0) return BPF_DROP (2)
BPF_MOV64_IMM(BPF_REG_0, BPF_DROP),
BPF_EXIT_INSN(),
// if (ret != 0) return BPF_OK (0)
BPF_MOV64_IMM(BPF_REG_0, BPF_OK),
BPF_EXIT_INSN(),
};
int fd, err;
LIBBPF_OPTS(bpf_test_run_opts, topts,
.data_in = &pkt_v4,
.data_size_in = sizeof(pkt_v4),
.data_out = &flow_keys,
.data_size_out = sizeof(flow_keys),
.repeat = 1,
);
/* make sure bpf_skb_load_bytes is not allowed from skb-less context
*/
fd = bpf_test_load_program(BPF_PROG_TYPE_FLOW_DISSECTOR, prog,
ARRAY_SIZE(prog), "GPL", 0, NULL, 0);
ASSERT_GE(fd, 0, "bpf_test_load_program good fd");
err = bpf_prog_test_run_opts(fd, &topts);
ASSERT_OK(err, "test_run");
ASSERT_EQ(topts.data_size_out, sizeof(flow_keys),
"test_run data_size_out");
ASSERT_EQ(topts.retval, BPF_OK, "test_run retval");
if (fd >= -1)
close(fd);
}
| linux-master | tools/testing/selftests/bpf/prog_tests/flow_dissector_load_bytes.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020, Oracle and/or its affiliates. */
#include <test_progs.h>
#include "trace_printk.lskel.h"
#define TRACEFS_PIPE "/sys/kernel/tracing/trace_pipe"
#define DEBUGFS_PIPE "/sys/kernel/debug/tracing/trace_pipe"
#define SEARCHMSG "testing,testing"
void serial_test_trace_printk(void)
{
struct trace_printk_lskel__bss *bss;
int err = 0, iter = 0, found = 0;
struct trace_printk_lskel *skel;
char *buf = NULL;
FILE *fp = NULL;
size_t buflen;
skel = trace_printk_lskel__open();
if (!ASSERT_OK_PTR(skel, "trace_printk__open"))
return;
ASSERT_EQ(skel->rodata->fmt[0], 'T', "skel->rodata->fmt[0]");
skel->rodata->fmt[0] = 't';
err = trace_printk_lskel__load(skel);
if (!ASSERT_OK(err, "trace_printk__load"))
goto cleanup;
bss = skel->bss;
err = trace_printk_lskel__attach(skel);
if (!ASSERT_OK(err, "trace_printk__attach"))
goto cleanup;
if (access(TRACEFS_PIPE, F_OK) == 0)
fp = fopen(TRACEFS_PIPE, "r");
else
fp = fopen(DEBUGFS_PIPE, "r");
if (!ASSERT_OK_PTR(fp, "fopen(TRACE_PIPE)"))
goto cleanup;
/* We do not want to wait forever if this test fails... */
fcntl(fileno(fp), F_SETFL, O_NONBLOCK);
/* wait for tracepoint to trigger */
usleep(1);
trace_printk_lskel__detach(skel);
if (!ASSERT_GT(bss->trace_printk_ran, 0, "bss->trace_printk_ran"))
goto cleanup;
if (!ASSERT_GT(bss->trace_printk_ret, 0, "bss->trace_printk_ret"))
goto cleanup;
/* verify our search string is in the trace buffer */
while (getline(&buf, &buflen, fp) >= 0 || errno == EAGAIN) {
if (strstr(buf, SEARCHMSG) != NULL)
found++;
if (found == bss->trace_printk_ran)
break;
if (++iter > 1000)
break;
}
if (!ASSERT_EQ(found, bss->trace_printk_ran, "found"))
goto cleanup;
cleanup:
trace_printk_lskel__destroy(skel);
free(buf);
if (fp)
fclose(fp);
}
| linux-master | tools/testing/selftests/bpf/prog_tests/trace_printk.c |
// SPDX-License-Identifier: GPL-2.0
#define _GNU_SOURCE
#include <test_progs.h>
#include <network_helpers.h>
#include <sys/stat.h>
#include <linux/sched.h>
#include <sys/syscall.h>
#include "test_pkt_md_access.skel.h"
#include "test_trace_ext.skel.h"
#include "test_trace_ext_tracing.skel.h"
static __u32 duration;
void test_trace_ext(void)
{
struct test_pkt_md_access *skel_pkt = NULL;
struct test_trace_ext_tracing *skel_trace = NULL;
struct test_trace_ext_tracing__bss *bss_trace;
struct test_trace_ext *skel_ext = NULL;
struct test_trace_ext__bss *bss_ext;
int err, pkt_fd, ext_fd;
struct bpf_program *prog;
char buf[100];
__u64 len;
LIBBPF_OPTS(bpf_test_run_opts, topts,
.data_in = &pkt_v4,
.data_size_in = sizeof(pkt_v4),
.repeat = 1,
);
/* open/load/attach test_pkt_md_access */
skel_pkt = test_pkt_md_access__open_and_load();
if (CHECK(!skel_pkt, "setup", "classifier/test_pkt_md_access open failed\n"))
goto cleanup;
err = test_pkt_md_access__attach(skel_pkt);
if (CHECK(err, "setup", "classifier/test_pkt_md_access attach failed: %d\n", err))
goto cleanup;
prog = skel_pkt->progs.test_pkt_md_access;
pkt_fd = bpf_program__fd(prog);
/* open extension */
skel_ext = test_trace_ext__open();
if (CHECK(!skel_ext, "setup", "freplace/test_pkt_md_access open failed\n"))
goto cleanup;
/* set extension's attach target - test_pkt_md_access */
prog = skel_ext->progs.test_pkt_md_access_new;
bpf_program__set_attach_target(prog, pkt_fd, "test_pkt_md_access");
/* load/attach extension */
err = test_trace_ext__load(skel_ext);
if (CHECK(err, "setup", "freplace/test_pkt_md_access load failed\n")) {
libbpf_strerror(err, buf, sizeof(buf));
fprintf(stderr, "%s\n", buf);
goto cleanup;
}
err = test_trace_ext__attach(skel_ext);
if (CHECK(err, "setup", "freplace/test_pkt_md_access attach failed: %d\n", err))
goto cleanup;
prog = skel_ext->progs.test_pkt_md_access_new;
ext_fd = bpf_program__fd(prog);
/* open tracing */
skel_trace = test_trace_ext_tracing__open();
if (CHECK(!skel_trace, "setup", "tracing/test_pkt_md_access_new open failed\n"))
goto cleanup;
/* set tracing's attach target - fentry */
prog = skel_trace->progs.fentry;
bpf_program__set_attach_target(prog, ext_fd, "test_pkt_md_access_new");
/* set tracing's attach target - fexit */
prog = skel_trace->progs.fexit;
bpf_program__set_attach_target(prog, ext_fd, "test_pkt_md_access_new");
/* load/attach tracing */
err = test_trace_ext_tracing__load(skel_trace);
if (!ASSERT_OK(err, "tracing/test_pkt_md_access_new load")) {
libbpf_strerror(err, buf, sizeof(buf));
fprintf(stderr, "%s\n", buf);
goto cleanup;
}
err = test_trace_ext_tracing__attach(skel_trace);
if (!ASSERT_OK(err, "tracing/test_pkt_md_access_new attach"))
goto cleanup;
/* trigger the test */
err = bpf_prog_test_run_opts(pkt_fd, &topts);
ASSERT_OK(err, "test_run_opts err");
ASSERT_OK(topts.retval, "test_run_opts retval");
bss_ext = skel_ext->bss;
bss_trace = skel_trace->bss;
len = bss_ext->ext_called;
ASSERT_NEQ(bss_ext->ext_called, 0,
"failed to trigger freplace/test_pkt_md_access");
ASSERT_EQ(bss_trace->fentry_called, len,
"failed to trigger fentry/test_pkt_md_access_new");
ASSERT_EQ(bss_trace->fexit_called, len,
"failed to trigger fexit/test_pkt_md_access_new");
cleanup:
test_trace_ext_tracing__destroy(skel_trace);
test_trace_ext__destroy(skel_ext);
test_pkt_md_access__destroy(skel_pkt);
}
| linux-master | tools/testing/selftests/bpf/prog_tests/trace_ext.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2021 Facebook */
#include "test_progs.h"
#include "xdpwall.skel.h"
void test_xdpwall(void)
{
struct xdpwall *skel;
skel = xdpwall__open_and_load();
ASSERT_OK_PTR(skel, "Does LLMV have https://reviews.llvm.org/D109073?");
xdpwall__destroy(skel);
}
| linux-master | tools/testing/selftests/bpf/prog_tests/xdpwall.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2019 Facebook */
#include <test_progs.h>
#include "test_ksyms.skel.h"
#include <sys/stat.h>
static int duration;
void test_ksyms(void)
{
const char *btf_path = "/sys/kernel/btf/vmlinux";
struct test_ksyms *skel;
struct test_ksyms__data *data;
__u64 link_fops_addr, per_cpu_start_addr;
struct stat st;
__u64 btf_size;
int err;
err = kallsyms_find("bpf_link_fops", &link_fops_addr);
if (CHECK(err == -EINVAL, "kallsyms_fopen", "failed to open: %d\n", errno))
return;
if (CHECK(err == -ENOENT, "ksym_find", "symbol 'bpf_link_fops' not found\n"))
return;
err = kallsyms_find("__per_cpu_start", &per_cpu_start_addr);
if (CHECK(err == -EINVAL, "kallsyms_fopen", "failed to open: %d\n", errno))
return;
if (CHECK(err == -ENOENT, "ksym_find", "symbol 'per_cpu_start' not found\n"))
return;
if (CHECK(stat(btf_path, &st), "stat_btf", "err %d\n", errno))
return;
btf_size = st.st_size;
skel = test_ksyms__open_and_load();
if (CHECK(!skel, "skel_open", "failed to open and load skeleton\n"))
return;
err = test_ksyms__attach(skel);
if (CHECK(err, "skel_attach", "skeleton attach failed: %d\n", err))
goto cleanup;
/* trigger tracepoint */
usleep(1);
data = skel->data;
CHECK(data->out__bpf_link_fops != link_fops_addr, "bpf_link_fops",
"got 0x%llx, exp 0x%llx\n",
data->out__bpf_link_fops, link_fops_addr);
CHECK(data->out__bpf_link_fops1 != 0, "bpf_link_fops1",
"got %llu, exp %llu\n", data->out__bpf_link_fops1, (__u64)0);
CHECK(data->out__btf_size != btf_size, "btf_size",
"got %llu, exp %llu\n", data->out__btf_size, btf_size);
CHECK(data->out__per_cpu_start != per_cpu_start_addr, "__per_cpu_start",
"got %llu, exp %llu\n", data->out__per_cpu_start,
per_cpu_start_addr);
cleanup:
test_ksyms__destroy(skel);
}
| linux-master | tools/testing/selftests/bpf/prog_tests/ksyms.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020 Facebook */
#include <test_progs.h>
#include "test_subprogs.skel.h"
#include "test_subprogs_unused.skel.h"
struct toggler_ctx {
int fd;
bool stop;
};
static void *toggle_jit_harden(void *arg)
{
struct toggler_ctx *ctx = arg;
char two = '2';
char zero = '0';
while (!ctx->stop) {
lseek(ctx->fd, SEEK_SET, 0);
write(ctx->fd, &two, sizeof(two));
lseek(ctx->fd, SEEK_SET, 0);
write(ctx->fd, &zero, sizeof(zero));
}
return NULL;
}
static void test_subprogs_with_jit_harden_toggling(void)
{
struct toggler_ctx ctx;
pthread_t toggler;
int err;
unsigned int i, loop = 10;
ctx.fd = open("/proc/sys/net/core/bpf_jit_harden", O_RDWR);
if (!ASSERT_GE(ctx.fd, 0, "open bpf_jit_harden"))
return;
ctx.stop = false;
err = pthread_create(&toggler, NULL, toggle_jit_harden, &ctx);
if (!ASSERT_OK(err, "new toggler"))
goto out;
/* Make toggler thread to run */
usleep(1);
for (i = 0; i < loop; i++) {
struct test_subprogs *skel = test_subprogs__open_and_load();
if (!ASSERT_OK_PTR(skel, "skel open"))
break;
test_subprogs__destroy(skel);
}
ctx.stop = true;
pthread_join(toggler, NULL);
out:
close(ctx.fd);
}
static void test_subprogs_alone(void)
{
struct test_subprogs *skel;
struct test_subprogs_unused *skel2;
int err;
skel = test_subprogs__open_and_load();
if (!ASSERT_OK_PTR(skel, "skel_open"))
return;
err = test_subprogs__attach(skel);
if (!ASSERT_OK(err, "skel attach"))
goto cleanup;
usleep(1);
ASSERT_EQ(skel->bss->res1, 12, "res1");
ASSERT_EQ(skel->bss->res2, 17, "res2");
ASSERT_EQ(skel->bss->res3, 19, "res3");
ASSERT_EQ(skel->bss->res4, 36, "res4");
skel2 = test_subprogs_unused__open_and_load();
ASSERT_OK_PTR(skel2, "unused_progs_skel");
test_subprogs_unused__destroy(skel2);
cleanup:
test_subprogs__destroy(skel);
}
void test_subprogs(void)
{
if (test__start_subtest("subprogs_alone"))
test_subprogs_alone();
if (test__start_subtest("subprogs_and_jit_harden"))
test_subprogs_with_jit_harden_toggling();
}
| linux-master | tools/testing/selftests/bpf/prog_tests/subprogs.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
#include <test_progs.h>
#include <bpf/btf.h>
#include "test_log_fixup.skel.h"
enum trunc_type {
TRUNC_NONE,
TRUNC_PARTIAL,
TRUNC_FULL,
};
static void bad_core_relo(size_t log_buf_size, enum trunc_type trunc_type)
{
char log_buf[8 * 1024];
struct test_log_fixup* skel;
int err;
skel = test_log_fixup__open();
if (!ASSERT_OK_PTR(skel, "skel_open"))
return;
bpf_program__set_autoload(skel->progs.bad_relo, true);
memset(log_buf, 0, sizeof(log_buf));
bpf_program__set_log_buf(skel->progs.bad_relo, log_buf, log_buf_size ?: sizeof(log_buf));
bpf_program__set_log_level(skel->progs.bad_relo, 1 | 8); /* BPF_LOG_FIXED to force truncation */
err = test_log_fixup__load(skel);
if (!ASSERT_ERR(err, "load_fail"))
goto cleanup;
ASSERT_HAS_SUBSTR(log_buf,
"0: <invalid CO-RE relocation>\n"
"failed to resolve CO-RE relocation <byte_sz> ",
"log_buf_part1");
switch (trunc_type) {
case TRUNC_NONE:
ASSERT_HAS_SUBSTR(log_buf,
"struct task_struct___bad.fake_field (0:1 @ offset 4)\n",
"log_buf_part2");
ASSERT_HAS_SUBSTR(log_buf,
"max_states_per_insn 0 total_states 0 peak_states 0 mark_read 0\n",
"log_buf_end");
break;
case TRUNC_PARTIAL:
/* we should get full libbpf message patch */
ASSERT_HAS_SUBSTR(log_buf,
"struct task_struct___bad.fake_field (0:1 @ offset 4)\n",
"log_buf_part2");
/* we shouldn't get full end of BPF verifier log */
ASSERT_NULL(strstr(log_buf, "max_states_per_insn 0 total_states 0 peak_states 0 mark_read 0\n"),
"log_buf_end");
break;
case TRUNC_FULL:
/* we shouldn't get second part of libbpf message patch */
ASSERT_NULL(strstr(log_buf, "struct task_struct___bad.fake_field (0:1 @ offset 4)\n"),
"log_buf_part2");
/* we shouldn't get full end of BPF verifier log */
ASSERT_NULL(strstr(log_buf, "max_states_per_insn 0 total_states 0 peak_states 0 mark_read 0\n"),
"log_buf_end");
break;
}
if (env.verbosity > VERBOSE_NONE)
printf("LOG: \n=================\n%s=================\n", log_buf);
cleanup:
test_log_fixup__destroy(skel);
}
static void bad_core_relo_subprog(void)
{
char log_buf[8 * 1024];
struct test_log_fixup* skel;
int err;
skel = test_log_fixup__open();
if (!ASSERT_OK_PTR(skel, "skel_open"))
return;
bpf_program__set_autoload(skel->progs.bad_relo_subprog, true);
bpf_program__set_log_buf(skel->progs.bad_relo_subprog, log_buf, sizeof(log_buf));
err = test_log_fixup__load(skel);
if (!ASSERT_ERR(err, "load_fail"))
goto cleanup;
ASSERT_HAS_SUBSTR(log_buf,
": <invalid CO-RE relocation>\n"
"failed to resolve CO-RE relocation <byte_off> ",
"log_buf");
ASSERT_HAS_SUBSTR(log_buf,
"struct task_struct___bad.fake_field_subprog (0:2 @ offset 8)\n",
"log_buf");
if (env.verbosity > VERBOSE_NONE)
printf("LOG: \n=================\n%s=================\n", log_buf);
cleanup:
test_log_fixup__destroy(skel);
}
static void missing_map(void)
{
char log_buf[8 * 1024];
struct test_log_fixup* skel;
int err;
skel = test_log_fixup__open();
if (!ASSERT_OK_PTR(skel, "skel_open"))
return;
bpf_map__set_autocreate(skel->maps.missing_map, false);
bpf_program__set_autoload(skel->progs.use_missing_map, true);
bpf_program__set_log_buf(skel->progs.use_missing_map, log_buf, sizeof(log_buf));
err = test_log_fixup__load(skel);
if (!ASSERT_ERR(err, "load_fail"))
goto cleanup;
ASSERT_TRUE(bpf_map__autocreate(skel->maps.existing_map), "existing_map_autocreate");
ASSERT_FALSE(bpf_map__autocreate(skel->maps.missing_map), "missing_map_autocreate");
ASSERT_HAS_SUBSTR(log_buf,
": <invalid BPF map reference>\n"
"BPF map 'missing_map' is referenced but wasn't created\n",
"log_buf");
if (env.verbosity > VERBOSE_NONE)
printf("LOG: \n=================\n%s=================\n", log_buf);
cleanup:
test_log_fixup__destroy(skel);
}
static void missing_kfunc(void)
{
char log_buf[8 * 1024];
struct test_log_fixup* skel;
int err;
skel = test_log_fixup__open();
if (!ASSERT_OK_PTR(skel, "skel_open"))
return;
bpf_program__set_autoload(skel->progs.use_missing_kfunc, true);
bpf_program__set_log_buf(skel->progs.use_missing_kfunc, log_buf, sizeof(log_buf));
err = test_log_fixup__load(skel);
if (!ASSERT_ERR(err, "load_fail"))
goto cleanup;
ASSERT_HAS_SUBSTR(log_buf,
"0: <invalid kfunc call>\n"
"kfunc 'bpf_nonexistent_kfunc' is referenced but wasn't resolved\n",
"log_buf");
if (env.verbosity > VERBOSE_NONE)
printf("LOG: \n=================\n%s=================\n", log_buf);
cleanup:
test_log_fixup__destroy(skel);
}
void test_log_fixup(void)
{
if (test__start_subtest("bad_core_relo_trunc_none"))
bad_core_relo(0, TRUNC_NONE /* full buf */);
if (test__start_subtest("bad_core_relo_trunc_partial"))
bad_core_relo(300, TRUNC_PARTIAL /* truncate original log a bit */);
if (test__start_subtest("bad_core_relo_trunc_full"))
bad_core_relo(210, TRUNC_FULL /* truncate also libbpf's message patch */);
if (test__start_subtest("bad_core_relo_subprog"))
bad_core_relo_subprog();
if (test__start_subtest("missing_map"))
missing_map();
if (test__start_subtest("missing_kfunc"))
missing_kfunc();
}
| linux-master | tools/testing/selftests/bpf/prog_tests/log_fixup.c |
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2020 Facebook
#include <test_progs.h>
#include <network_helpers.h>
#include "map_ptr_kern.lskel.h"
void test_map_ptr(void)
{
struct map_ptr_kern_lskel *skel;
char buf[128];
int err;
int page_size = getpagesize();
LIBBPF_OPTS(bpf_test_run_opts, topts,
.data_in = &pkt_v4,
.data_size_in = sizeof(pkt_v4),
.data_out = buf,
.data_size_out = sizeof(buf),
.repeat = 1,
);
skel = map_ptr_kern_lskel__open();
if (!ASSERT_OK_PTR(skel, "skel_open"))
return;
skel->maps.m_ringbuf.max_entries = page_size;
err = map_ptr_kern_lskel__load(skel);
if (!ASSERT_OK(err, "skel_load"))
goto cleanup;
skel->bss->page_size = page_size;
err = bpf_prog_test_run_opts(skel->progs.cg_skb.prog_fd, &topts);
if (!ASSERT_OK(err, "test_run"))
goto cleanup;
if (!ASSERT_NEQ(topts.retval, 0, "test_run retval"))
goto cleanup;
cleanup:
map_ptr_kern_lskel__destroy(skel);
}
| linux-master | tools/testing/selftests/bpf/prog_tests/map_ptr.c |
// SPDX-License-Identifier: GPL-2.0
#define _GNU_SOURCE
#include <test_progs.h>
#include "test_task_pt_regs.skel.h"
/* uprobe attach point */
static noinline void trigger_func(void)
{
asm volatile ("");
}
void test_task_pt_regs(void)
{
struct test_task_pt_regs *skel;
struct bpf_link *uprobe_link;
ssize_t uprobe_offset;
bool match;
uprobe_offset = get_uprobe_offset(&trigger_func);
if (!ASSERT_GE(uprobe_offset, 0, "uprobe_offset"))
return;
skel = test_task_pt_regs__open_and_load();
if (!ASSERT_OK_PTR(skel, "skel_open"))
return;
if (!ASSERT_OK_PTR(skel->bss, "check_bss"))
goto cleanup;
uprobe_link = bpf_program__attach_uprobe(skel->progs.handle_uprobe,
false /* retprobe */,
0 /* self pid */,
"/proc/self/exe",
uprobe_offset);
if (!ASSERT_OK_PTR(uprobe_link, "attach_uprobe"))
goto cleanup;
skel->links.handle_uprobe = uprobe_link;
/* trigger & validate uprobe */
trigger_func();
if (!ASSERT_EQ(skel->bss->uprobe_res, 1, "check_uprobe_res"))
goto cleanup;
match = !memcmp(&skel->bss->current_regs, &skel->bss->ctx_regs,
sizeof(skel->bss->current_regs));
ASSERT_TRUE(match, "check_regs_match");
cleanup:
test_task_pt_regs__destroy(skel);
}
| linux-master | tools/testing/selftests/bpf/prog_tests/task_pt_regs.c |
// SPDX-License-Identifier: GPL-2.0
#include <test_progs.h>
#include <bpf/btf.h>
static int duration = 0;
void btf_dump_printf(void *ctx, const char *fmt, va_list args)
{
vfprintf(ctx, fmt, args);
}
static struct btf_dump_test_case {
const char *name;
const char *file;
bool known_ptr_sz;
} btf_dump_test_cases[] = {
{"btf_dump: syntax", "btf_dump_test_case_syntax", true},
{"btf_dump: ordering", "btf_dump_test_case_ordering", false},
{"btf_dump: padding", "btf_dump_test_case_padding", true},
{"btf_dump: packing", "btf_dump_test_case_packing", true},
{"btf_dump: bitfields", "btf_dump_test_case_bitfields", true},
{"btf_dump: multidim", "btf_dump_test_case_multidim", false},
{"btf_dump: namespacing", "btf_dump_test_case_namespacing", false},
};
static int btf_dump_all_types(const struct btf *btf, void *ctx)
{
size_t type_cnt = btf__type_cnt(btf);
struct btf_dump *d;
int err = 0, id;
d = btf_dump__new(btf, btf_dump_printf, ctx, NULL);
err = libbpf_get_error(d);
if (err)
return err;
for (id = 1; id < type_cnt; id++) {
err = btf_dump__dump_type(d, id);
if (err)
goto done;
}
done:
btf_dump__free(d);
return err;
}
static int test_btf_dump_case(int n, struct btf_dump_test_case *t)
{
char test_file[256], out_file[256], diff_cmd[1024];
struct btf *btf = NULL;
int err = 0, fd = -1;
FILE *f = NULL;
snprintf(test_file, sizeof(test_file), "%s.bpf.o", t->file);
btf = btf__parse_elf(test_file, NULL);
if (!ASSERT_OK_PTR(btf, "btf_parse_elf")) {
err = -PTR_ERR(btf);
btf = NULL;
goto done;
}
/* tests with t->known_ptr_sz have no "long" or "unsigned long" type,
* so it's impossible to determine correct pointer size; but if they
* do, it should be 8 regardless of host architecture, becaues BPF
* target is always 64-bit
*/
if (!t->known_ptr_sz) {
btf__set_pointer_size(btf, 8);
} else {
CHECK(btf__pointer_size(btf) != 8, "ptr_sz", "exp %d, got %zu\n",
8, btf__pointer_size(btf));
}
snprintf(out_file, sizeof(out_file), "/tmp/%s.output.XXXXXX", t->file);
fd = mkstemp(out_file);
if (!ASSERT_GE(fd, 0, "create_tmp")) {
err = fd;
goto done;
}
f = fdopen(fd, "w");
if (CHECK(f == NULL, "open_tmp", "failed to open file: %s(%d)\n",
strerror(errno), errno)) {
close(fd);
goto done;
}
err = btf_dump_all_types(btf, f);
fclose(f);
close(fd);
if (CHECK(err, "btf_dump", "failure during C dumping: %d\n", err)) {
goto done;
}
snprintf(test_file, sizeof(test_file), "progs/%s.c", t->file);
if (access(test_file, R_OK) == -1)
/*
* When the test is run with O=, kselftest copies TEST_FILES
* without preserving the directory structure.
*/
snprintf(test_file, sizeof(test_file), "%s.c", t->file);
/*
* Diff test output and expected test output, contained between
* START-EXPECTED-OUTPUT and END-EXPECTED-OUTPUT lines in test case.
* For expected output lines, everything before '*' is stripped out.
* Also lines containing comment start and comment end markers are
* ignored.
*/
snprintf(diff_cmd, sizeof(diff_cmd),
"awk '/START-EXPECTED-OUTPUT/{out=1;next} "
"/END-EXPECTED-OUTPUT/{out=0} "
"/\\/\\*|\\*\\//{next} " /* ignore comment start/end lines */
"out {sub(/^[ \\t]*\\*/, \"\"); print}' '%s' | diff -u - '%s'",
test_file, out_file);
err = system(diff_cmd);
if (CHECK(err, "diff",
"differing test output, output=%s, err=%d, diff cmd:\n%s\n",
out_file, err, diff_cmd))
goto done;
remove(out_file);
done:
btf__free(btf);
return err;
}
static char *dump_buf;
static size_t dump_buf_sz;
static FILE *dump_buf_file;
static void test_btf_dump_incremental(void)
{
struct btf *btf = NULL;
struct btf_dump *d = NULL;
int id, err, i;
dump_buf_file = open_memstream(&dump_buf, &dump_buf_sz);
if (!ASSERT_OK_PTR(dump_buf_file, "dump_memstream"))
return;
btf = btf__new_empty();
if (!ASSERT_OK_PTR(btf, "new_empty"))
goto err_out;
d = btf_dump__new(btf, btf_dump_printf, dump_buf_file, NULL);
if (!ASSERT_OK(libbpf_get_error(d), "btf_dump__new"))
goto err_out;
/* First, generate BTF corresponding to the following C code:
*
* enum x;
*
* enum x { X = 1 };
*
* enum { Y = 1 };
*
* struct s;
*
* struct s { int x; };
*
*/
id = btf__add_enum(btf, "x", 4);
ASSERT_EQ(id, 1, "enum_declaration_id");
id = btf__add_enum(btf, "x", 4);
ASSERT_EQ(id, 2, "named_enum_id");
err = btf__add_enum_value(btf, "X", 1);
ASSERT_OK(err, "named_enum_val_ok");
id = btf__add_enum(btf, NULL, 4);
ASSERT_EQ(id, 3, "anon_enum_id");
err = btf__add_enum_value(btf, "Y", 1);
ASSERT_OK(err, "anon_enum_val_ok");
id = btf__add_int(btf, "int", 4, BTF_INT_SIGNED);
ASSERT_EQ(id, 4, "int_id");
id = btf__add_fwd(btf, "s", BTF_FWD_STRUCT);
ASSERT_EQ(id, 5, "fwd_id");
id = btf__add_struct(btf, "s", 4);
ASSERT_EQ(id, 6, "struct_id");
err = btf__add_field(btf, "x", 4, 0, 0);
ASSERT_OK(err, "field_ok");
for (i = 1; i < btf__type_cnt(btf); i++) {
err = btf_dump__dump_type(d, i);
ASSERT_OK(err, "dump_type_ok");
}
fflush(dump_buf_file);
dump_buf[dump_buf_sz] = 0; /* some libc implementations don't do this */
ASSERT_STREQ(dump_buf,
"enum x;\n"
"\n"
"enum x {\n"
" X = 1,\n"
"};\n"
"\n"
"enum {\n"
" Y = 1,\n"
"};\n"
"\n"
"struct s;\n"
"\n"
"struct s {\n"
" int x;\n"
"};\n\n", "c_dump1");
/* Now, after dumping original BTF, append another struct that embeds
* anonymous enum. It also has a name conflict with the first struct:
*
* struct s___2 {
* enum { VAL___2 = 1 } x;
* struct s s;
* };
*
* This will test that btf_dump'er maintains internal state properly.
* Note that VAL___2 enum value. It's because we've already emitted
* that enum as a global anonymous enum, so btf_dump will ensure that
* enum values don't conflict;
*
*/
fseek(dump_buf_file, 0, SEEK_SET);
id = btf__add_struct(btf, "s", 4);
ASSERT_EQ(id, 7, "struct_id");
err = btf__add_field(btf, "x", 2, 0, 0);
ASSERT_OK(err, "field_ok");
err = btf__add_field(btf, "y", 3, 32, 0);
ASSERT_OK(err, "field_ok");
err = btf__add_field(btf, "s", 6, 64, 0);
ASSERT_OK(err, "field_ok");
for (i = 1; i < btf__type_cnt(btf); i++) {
err = btf_dump__dump_type(d, i);
ASSERT_OK(err, "dump_type_ok");
}
fflush(dump_buf_file);
dump_buf[dump_buf_sz] = 0; /* some libc implementations don't do this */
ASSERT_STREQ(dump_buf,
"struct s___2 {\n"
" enum x x;\n"
" enum {\n"
" Y___2 = 1,\n"
" } y;\n"
" struct s s;\n"
"};\n\n" , "c_dump1");
err_out:
fclose(dump_buf_file);
free(dump_buf);
btf_dump__free(d);
btf__free(btf);
}
#define STRSIZE 4096
static void btf_dump_snprintf(void *ctx, const char *fmt, va_list args)
{
char *s = ctx, new[STRSIZE];
vsnprintf(new, STRSIZE, fmt, args);
if (strlen(s) < STRSIZE)
strncat(s, new, STRSIZE - strlen(s) - 1);
}
static int btf_dump_data(struct btf *btf, struct btf_dump *d,
char *name, char *prefix, __u64 flags, void *ptr,
size_t ptr_sz, char *str, const char *expected_val)
{
DECLARE_LIBBPF_OPTS(btf_dump_type_data_opts, opts);
size_t type_sz;
__s32 type_id;
int ret = 0;
if (flags & BTF_F_COMPACT)
opts.compact = true;
if (flags & BTF_F_NONAME)
opts.skip_names = true;
if (flags & BTF_F_ZERO)
opts.emit_zeroes = true;
if (prefix) {
ASSERT_STRNEQ(name, prefix, strlen(prefix),
"verify prefix match");
name += strlen(prefix) + 1;
}
type_id = btf__find_by_name(btf, name);
if (!ASSERT_GE(type_id, 0, "find type id"))
return -ENOENT;
type_sz = btf__resolve_size(btf, type_id);
str[0] = '\0';
ret = btf_dump__dump_type_data(d, type_id, ptr, ptr_sz, &opts);
if (type_sz <= ptr_sz) {
if (!ASSERT_EQ(ret, type_sz, "failed/unexpected type_sz"))
return -EINVAL;
} else {
if (!ASSERT_EQ(ret, -E2BIG, "failed to return -E2BIG"))
return -EINVAL;
}
if (!ASSERT_STREQ(str, expected_val, "ensure expected/actual match"))
return -EFAULT;
return 0;
}
#define TEST_BTF_DUMP_DATA(_b, _d, _prefix, _str, _type, _flags, \
_expected, ...) \
do { \
char __ptrtype[64] = #_type; \
char *_ptrtype = (char *)__ptrtype; \
_type _ptrdata = __VA_ARGS__; \
void *_ptr = &_ptrdata; \
\
(void) btf_dump_data(_b, _d, _ptrtype, _prefix, _flags, \
_ptr, sizeof(_type), _str, \
_expected); \
} while (0)
/* Use where expected data string matches its stringified declaration */
#define TEST_BTF_DUMP_DATA_C(_b, _d, _prefix, _str, _type, _flags, \
...) \
TEST_BTF_DUMP_DATA(_b, _d, _prefix, _str, _type, _flags, \
"(" #_type ")" #__VA_ARGS__, __VA_ARGS__)
/* overflow test; pass typesize < expected type size, ensure E2BIG returned */
#define TEST_BTF_DUMP_DATA_OVER(_b, _d, _prefix, _str, _type, _type_sz, \
_expected, ...) \
do { \
char __ptrtype[64] = #_type; \
char *_ptrtype = (char *)__ptrtype; \
_type _ptrdata = __VA_ARGS__; \
void *_ptr = &_ptrdata; \
\
(void) btf_dump_data(_b, _d, _ptrtype, _prefix, 0, \
_ptr, _type_sz, _str, _expected); \
} while (0)
#define TEST_BTF_DUMP_VAR(_b, _d, _prefix, _str, _var, _type, _flags, \
_expected, ...) \
do { \
_type _ptrdata = __VA_ARGS__; \
void *_ptr = &_ptrdata; \
\
(void) btf_dump_data(_b, _d, _var, _prefix, _flags, \
_ptr, sizeof(_type), _str, \
_expected); \
} while (0)
static void test_btf_dump_int_data(struct btf *btf, struct btf_dump *d,
char *str)
{
#ifdef __SIZEOF_INT128__
unsigned __int128 i = 0xffffffffffffffff;
/* this dance is required because we cannot directly initialize
* a 128-bit value to anything larger than a 64-bit value.
*/
i = (i << 64) | (i - 1);
#endif
/* simple int */
TEST_BTF_DUMP_DATA_C(btf, d, NULL, str, int, BTF_F_COMPACT, 1234);
TEST_BTF_DUMP_DATA(btf, d, NULL, str, int, BTF_F_COMPACT | BTF_F_NONAME,
"1234", 1234);
TEST_BTF_DUMP_DATA(btf, d, NULL, str, int, 0, "(int)1234", 1234);
/* zero value should be printed at toplevel */
TEST_BTF_DUMP_DATA(btf, d, NULL, str, int, BTF_F_COMPACT, "(int)0", 0);
TEST_BTF_DUMP_DATA(btf, d, NULL, str, int, BTF_F_COMPACT | BTF_F_NONAME,
"0", 0);
TEST_BTF_DUMP_DATA(btf, d, NULL, str, int, BTF_F_COMPACT | BTF_F_ZERO,
"(int)0", 0);
TEST_BTF_DUMP_DATA(btf, d, NULL, str, int,
BTF_F_COMPACT | BTF_F_NONAME | BTF_F_ZERO,
"0", 0);
TEST_BTF_DUMP_DATA_C(btf, d, NULL, str, int, BTF_F_COMPACT, -4567);
TEST_BTF_DUMP_DATA(btf, d, NULL, str, int, BTF_F_COMPACT | BTF_F_NONAME,
"-4567", -4567);
TEST_BTF_DUMP_DATA(btf, d, NULL, str, int, 0, "(int)-4567", -4567);
TEST_BTF_DUMP_DATA_OVER(btf, d, NULL, str, int, sizeof(int)-1, "", 1);
#ifdef __SIZEOF_INT128__
/* gcc encode unsigned __int128 type with name "__int128 unsigned" in dwarf,
* and clang encode it with name "unsigned __int128" in dwarf.
* Do an availability test for either variant before doing actual test.
*/
if (btf__find_by_name(btf, "unsigned __int128") > 0) {
TEST_BTF_DUMP_DATA(btf, d, NULL, str, unsigned __int128, BTF_F_COMPACT,
"(unsigned __int128)0xffffffffffffffff",
0xffffffffffffffff);
ASSERT_OK(btf_dump_data(btf, d, "unsigned __int128", NULL, 0, &i, 16, str,
"(unsigned __int128)0xfffffffffffffffffffffffffffffffe"),
"dump unsigned __int128");
} else if (btf__find_by_name(btf, "__int128 unsigned") > 0) {
TEST_BTF_DUMP_DATA(btf, d, NULL, str, __int128 unsigned, BTF_F_COMPACT,
"(__int128 unsigned)0xffffffffffffffff",
0xffffffffffffffff);
ASSERT_OK(btf_dump_data(btf, d, "__int128 unsigned", NULL, 0, &i, 16, str,
"(__int128 unsigned)0xfffffffffffffffffffffffffffffffe"),
"dump unsigned __int128");
} else {
ASSERT_TRUE(false, "unsigned_int128_not_found");
}
#endif
}
static void test_btf_dump_float_data(struct btf *btf, struct btf_dump *d,
char *str)
{
float t1 = 1.234567;
float t2 = -1.234567;
float t3 = 0.0;
double t4 = 5.678912;
double t5 = -5.678912;
double t6 = 0.0;
long double t7 = 9.876543;
long double t8 = -9.876543;
long double t9 = 0.0;
/* since the kernel does not likely have any float types in its BTF, we
* will need to add some of various sizes.
*/
ASSERT_GT(btf__add_float(btf, "test_float", 4), 0, "add float");
ASSERT_OK(btf_dump_data(btf, d, "test_float", NULL, 0, &t1, 4, str,
"(test_float)1.234567"), "dump float");
ASSERT_OK(btf_dump_data(btf, d, "test_float", NULL, 0, &t2, 4, str,
"(test_float)-1.234567"), "dump float");
ASSERT_OK(btf_dump_data(btf, d, "test_float", NULL, 0, &t3, 4, str,
"(test_float)0.000000"), "dump float");
ASSERT_GT(btf__add_float(btf, "test_double", 8), 0, "add_double");
ASSERT_OK(btf_dump_data(btf, d, "test_double", NULL, 0, &t4, 8, str,
"(test_double)5.678912"), "dump double");
ASSERT_OK(btf_dump_data(btf, d, "test_double", NULL, 0, &t5, 8, str,
"(test_double)-5.678912"), "dump double");
ASSERT_OK(btf_dump_data(btf, d, "test_double", NULL, 0, &t6, 8, str,
"(test_double)0.000000"), "dump double");
ASSERT_GT(btf__add_float(btf, "test_long_double", 16), 0, "add long double");
ASSERT_OK(btf_dump_data(btf, d, "test_long_double", NULL, 0, &t7, 16,
str, "(test_long_double)9.876543"),
"dump long_double");
ASSERT_OK(btf_dump_data(btf, d, "test_long_double", NULL, 0, &t8, 16,
str, "(test_long_double)-9.876543"),
"dump long_double");
ASSERT_OK(btf_dump_data(btf, d, "test_long_double", NULL, 0, &t9, 16,
str, "(test_long_double)0.000000"),
"dump long_double");
}
static void test_btf_dump_char_data(struct btf *btf, struct btf_dump *d,
char *str)
{
/* simple char */
TEST_BTF_DUMP_DATA_C(btf, d, NULL, str, char, BTF_F_COMPACT, 100);
TEST_BTF_DUMP_DATA(btf, d, NULL, str, char, BTF_F_COMPACT | BTF_F_NONAME,
"100", 100);
TEST_BTF_DUMP_DATA(btf, d, NULL, str, char, 0, "(char)100", 100);
/* zero value should be printed at toplevel */
TEST_BTF_DUMP_DATA(btf, d, NULL, str, char, BTF_F_COMPACT,
"(char)0", 0);
TEST_BTF_DUMP_DATA(btf, d, NULL, str, char, BTF_F_COMPACT | BTF_F_NONAME,
"0", 0);
TEST_BTF_DUMP_DATA(btf, d, NULL, str, char, BTF_F_COMPACT | BTF_F_ZERO,
"(char)0", 0);
TEST_BTF_DUMP_DATA(btf, d, NULL, str, char, BTF_F_COMPACT | BTF_F_NONAME | BTF_F_ZERO,
"0", 0);
TEST_BTF_DUMP_DATA(btf, d, NULL, str, char, 0, "(char)0", 0);
TEST_BTF_DUMP_DATA_OVER(btf, d, NULL, str, char, sizeof(char)-1, "", 100);
}
static void test_btf_dump_typedef_data(struct btf *btf, struct btf_dump *d,
char *str)
{
/* simple typedef */
TEST_BTF_DUMP_DATA_C(btf, d, NULL, str, uint64_t, BTF_F_COMPACT, 100);
TEST_BTF_DUMP_DATA(btf, d, NULL, str, u64, BTF_F_COMPACT | BTF_F_NONAME,
"1", 1);
TEST_BTF_DUMP_DATA(btf, d, NULL, str, u64, 0, "(u64)1", 1);
/* zero value should be printed at toplevel */
TEST_BTF_DUMP_DATA(btf, d, NULL, str, u64, BTF_F_COMPACT, "(u64)0", 0);
TEST_BTF_DUMP_DATA(btf, d, NULL, str, u64, BTF_F_COMPACT | BTF_F_NONAME,
"0", 0);
TEST_BTF_DUMP_DATA(btf, d, NULL, str, u64, BTF_F_COMPACT | BTF_F_ZERO,
"(u64)0", 0);
TEST_BTF_DUMP_DATA(btf, d, NULL, str, u64,
BTF_F_COMPACT | BTF_F_NONAME | BTF_F_ZERO,
"0", 0);
TEST_BTF_DUMP_DATA(btf, d, NULL, str, u64, 0, "(u64)0", 0);
/* typedef struct */
TEST_BTF_DUMP_DATA_C(btf, d, NULL, str, atomic_t, BTF_F_COMPACT,
{.counter = (int)1,});
TEST_BTF_DUMP_DATA(btf, d, NULL, str, atomic_t, BTF_F_COMPACT | BTF_F_NONAME,
"{1,}", { .counter = 1 });
TEST_BTF_DUMP_DATA(btf, d, NULL, str, atomic_t, 0,
"(atomic_t){\n"
" .counter = (int)1,\n"
"}",
{.counter = 1,});
/* typedef with 0 value should be printed at toplevel */
TEST_BTF_DUMP_DATA(btf, d, NULL, str, atomic_t, BTF_F_COMPACT, "(atomic_t){}",
{.counter = 0,});
TEST_BTF_DUMP_DATA(btf, d, NULL, str, atomic_t, BTF_F_COMPACT | BTF_F_NONAME,
"{}", {.counter = 0,});
TEST_BTF_DUMP_DATA(btf, d, NULL, str, atomic_t, 0,
"(atomic_t){\n"
"}",
{.counter = 0,});
TEST_BTF_DUMP_DATA(btf, d, NULL, str, atomic_t, BTF_F_COMPACT | BTF_F_ZERO,
"(atomic_t){.counter = (int)0,}",
{.counter = 0,});
TEST_BTF_DUMP_DATA(btf, d, NULL, str, atomic_t,
BTF_F_COMPACT | BTF_F_NONAME | BTF_F_ZERO,
"{0,}", {.counter = 0,});
TEST_BTF_DUMP_DATA(btf, d, NULL, str, atomic_t, BTF_F_ZERO,
"(atomic_t){\n"
" .counter = (int)0,\n"
"}",
{ .counter = 0,});
/* overflow should show type but not value since it overflows */
TEST_BTF_DUMP_DATA_OVER(btf, d, NULL, str, atomic_t, sizeof(atomic_t)-1,
"(atomic_t){\n", { .counter = 1});
}
static void test_btf_dump_enum_data(struct btf *btf, struct btf_dump *d,
char *str)
{
/* enum where enum value does (and does not) exist */
TEST_BTF_DUMP_DATA_C(btf, d, "enum", str, enum bpf_cmd, BTF_F_COMPACT,
BPF_MAP_CREATE);
TEST_BTF_DUMP_DATA(btf, d, "enum", str, enum bpf_cmd, BTF_F_COMPACT,
"(enum bpf_cmd)BPF_MAP_CREATE", 0);
TEST_BTF_DUMP_DATA(btf, d, "enum", str, enum bpf_cmd,
BTF_F_COMPACT | BTF_F_NONAME,
"BPF_MAP_CREATE",
BPF_MAP_CREATE);
TEST_BTF_DUMP_DATA(btf, d, "enum", str, enum bpf_cmd, 0,
"(enum bpf_cmd)BPF_MAP_CREATE",
BPF_MAP_CREATE);
TEST_BTF_DUMP_DATA(btf, d, "enum", str, enum bpf_cmd,
BTF_F_COMPACT | BTF_F_NONAME | BTF_F_ZERO,
"BPF_MAP_CREATE", 0);
TEST_BTF_DUMP_DATA(btf, d, "enum", str, enum bpf_cmd,
BTF_F_COMPACT | BTF_F_ZERO,
"(enum bpf_cmd)BPF_MAP_CREATE",
BPF_MAP_CREATE);
TEST_BTF_DUMP_DATA(btf, d, "enum", str, enum bpf_cmd,
BTF_F_COMPACT | BTF_F_NONAME | BTF_F_ZERO,
"BPF_MAP_CREATE", BPF_MAP_CREATE);
TEST_BTF_DUMP_DATA_C(btf, d, "enum", str, enum bpf_cmd, BTF_F_COMPACT, 2000);
TEST_BTF_DUMP_DATA(btf, d, "enum", str, enum bpf_cmd,
BTF_F_COMPACT | BTF_F_NONAME,
"2000", 2000);
TEST_BTF_DUMP_DATA(btf, d, "enum", str, enum bpf_cmd, 0,
"(enum bpf_cmd)2000", 2000);
TEST_BTF_DUMP_DATA_OVER(btf, d, "enum", str, enum bpf_cmd,
sizeof(enum bpf_cmd) - 1, "", BPF_MAP_CREATE);
}
static void test_btf_dump_struct_data(struct btf *btf, struct btf_dump *d,
char *str)
{
DECLARE_LIBBPF_OPTS(btf_dump_type_data_opts, opts);
char zero_data[512] = { };
char type_data[512];
void *fops = type_data;
void *skb = type_data;
size_t type_sz;
__s32 type_id;
char *cmpstr;
int ret;
memset(type_data, 255, sizeof(type_data));
/* simple struct */
TEST_BTF_DUMP_DATA_C(btf, d, "struct", str, struct btf_enum, BTF_F_COMPACT,
{.name_off = (__u32)3,.val = (__s32)-1,});
TEST_BTF_DUMP_DATA(btf, d, "struct", str, struct btf_enum,
BTF_F_COMPACT | BTF_F_NONAME,
"{3,-1,}",
{ .name_off = 3, .val = -1,});
TEST_BTF_DUMP_DATA(btf, d, "struct", str, struct btf_enum, 0,
"(struct btf_enum){\n"
" .name_off = (__u32)3,\n"
" .val = (__s32)-1,\n"
"}",
{ .name_off = 3, .val = -1,});
TEST_BTF_DUMP_DATA(btf, d, "struct", str, struct btf_enum,
BTF_F_COMPACT | BTF_F_NONAME,
"{-1,}",
{ .name_off = 0, .val = -1,});
TEST_BTF_DUMP_DATA(btf, d, "struct", str, struct btf_enum,
BTF_F_COMPACT | BTF_F_NONAME | BTF_F_ZERO,
"{0,-1,}",
{ .name_off = 0, .val = -1,});
/* empty struct should be printed */
TEST_BTF_DUMP_DATA(btf, d, "struct", str, struct btf_enum, BTF_F_COMPACT,
"(struct btf_enum){}",
{ .name_off = 0, .val = 0,});
TEST_BTF_DUMP_DATA(btf, d, "struct", str, struct btf_enum,
BTF_F_COMPACT | BTF_F_NONAME,
"{}",
{ .name_off = 0, .val = 0,});
TEST_BTF_DUMP_DATA(btf, d, "struct", str, struct btf_enum, 0,
"(struct btf_enum){\n"
"}",
{ .name_off = 0, .val = 0,});
TEST_BTF_DUMP_DATA(btf, d, "struct", str, struct btf_enum,
BTF_F_COMPACT | BTF_F_ZERO,
"(struct btf_enum){.name_off = (__u32)0,.val = (__s32)0,}",
{ .name_off = 0, .val = 0,});
TEST_BTF_DUMP_DATA(btf, d, "struct", str, struct btf_enum,
BTF_F_ZERO,
"(struct btf_enum){\n"
" .name_off = (__u32)0,\n"
" .val = (__s32)0,\n"
"}",
{ .name_off = 0, .val = 0,});
/* struct with pointers */
TEST_BTF_DUMP_DATA(btf, d, "struct", str, struct list_head, BTF_F_COMPACT,
"(struct list_head){.next = (struct list_head *)0x1,}",
{ .next = (struct list_head *)1 });
TEST_BTF_DUMP_DATA(btf, d, "struct", str, struct list_head, 0,
"(struct list_head){\n"
" .next = (struct list_head *)0x1,\n"
"}",
{ .next = (struct list_head *)1 });
/* NULL pointer should not be displayed */
TEST_BTF_DUMP_DATA(btf, d, "struct", str, struct list_head, BTF_F_COMPACT,
"(struct list_head){}",
{ .next = (struct list_head *)0 });
TEST_BTF_DUMP_DATA(btf, d, "struct", str, struct list_head, 0,
"(struct list_head){\n"
"}",
{ .next = (struct list_head *)0 });
/* struct with function pointers */
type_id = btf__find_by_name(btf, "file_operations");
if (ASSERT_GT(type_id, 0, "find type id")) {
type_sz = btf__resolve_size(btf, type_id);
str[0] = '\0';
ret = btf_dump__dump_type_data(d, type_id, fops, type_sz, &opts);
ASSERT_EQ(ret, type_sz,
"unexpected return value dumping file_operations");
cmpstr =
"(struct file_operations){\n"
" .owner = (struct module *)0xffffffffffffffff,\n"
" .llseek = (loff_t (*)(struct file *, loff_t, int))0xffffffffffffffff,";
ASSERT_STRNEQ(str, cmpstr, strlen(cmpstr), "file_operations");
}
/* struct with char array */
TEST_BTF_DUMP_DATA(btf, d, "struct", str, struct bpf_prog_info, BTF_F_COMPACT,
"(struct bpf_prog_info){.name = (char[16])['f','o','o',],}",
{ .name = "foo",});
TEST_BTF_DUMP_DATA(btf, d, "struct", str, struct bpf_prog_info,
BTF_F_COMPACT | BTF_F_NONAME,
"{['f','o','o',],}",
{.name = "foo",});
TEST_BTF_DUMP_DATA(btf, d, "struct", str, struct bpf_prog_info, 0,
"(struct bpf_prog_info){\n"
" .name = (char[16])[\n"
" 'f',\n"
" 'o',\n"
" 'o',\n"
" ],\n"
"}",
{.name = "foo",});
/* leading null char means do not display string */
TEST_BTF_DUMP_DATA(btf, d, "struct", str, struct bpf_prog_info, BTF_F_COMPACT,
"(struct bpf_prog_info){}",
{.name = {'\0', 'f', 'o', 'o'}});
/* handle non-printable characters */
TEST_BTF_DUMP_DATA(btf, d, "struct", str, struct bpf_prog_info, BTF_F_COMPACT,
"(struct bpf_prog_info){.name = (char[16])[1,2,3,],}",
{ .name = {1, 2, 3, 0}});
/* struct with non-char array */
TEST_BTF_DUMP_DATA(btf, d, "struct", str, struct __sk_buff, BTF_F_COMPACT,
"(struct __sk_buff){.cb = (__u32[5])[1,2,3,4,5,],}",
{ .cb = {1, 2, 3, 4, 5,},});
TEST_BTF_DUMP_DATA(btf, d, "struct", str, struct __sk_buff,
BTF_F_COMPACT | BTF_F_NONAME,
"{[1,2,3,4,5,],}",
{ .cb = { 1, 2, 3, 4, 5},});
TEST_BTF_DUMP_DATA(btf, d, "struct", str, struct __sk_buff, 0,
"(struct __sk_buff){\n"
" .cb = (__u32[5])[\n"
" 1,\n"
" 2,\n"
" 3,\n"
" 4,\n"
" 5,\n"
" ],\n"
"}",
{ .cb = { 1, 2, 3, 4, 5},});
/* For non-char, arrays, show non-zero values only */
TEST_BTF_DUMP_DATA(btf, d, "struct", str, struct __sk_buff, BTF_F_COMPACT,
"(struct __sk_buff){.cb = (__u32[5])[0,0,1,0,0,],}",
{ .cb = { 0, 0, 1, 0, 0},});
TEST_BTF_DUMP_DATA(btf, d, "struct", str, struct __sk_buff, 0,
"(struct __sk_buff){\n"
" .cb = (__u32[5])[\n"
" 0,\n"
" 0,\n"
" 1,\n"
" 0,\n"
" 0,\n"
" ],\n"
"}",
{ .cb = { 0, 0, 1, 0, 0},});
/* struct with bitfields */
TEST_BTF_DUMP_DATA_C(btf, d, "struct", str, struct bpf_insn, BTF_F_COMPACT,
{.code = (__u8)1,.dst_reg = (__u8)0x2,.src_reg = (__u8)0x3,.off = (__s16)4,.imm = (__s32)5,});
TEST_BTF_DUMP_DATA(btf, d, "struct", str, struct bpf_insn,
BTF_F_COMPACT | BTF_F_NONAME,
"{1,0x2,0x3,4,5,}",
{ .code = 1, .dst_reg = 0x2, .src_reg = 0x3, .off = 4,
.imm = 5,});
TEST_BTF_DUMP_DATA(btf, d, "struct", str, struct bpf_insn, 0,
"(struct bpf_insn){\n"
" .code = (__u8)1,\n"
" .dst_reg = (__u8)0x2,\n"
" .src_reg = (__u8)0x3,\n"
" .off = (__s16)4,\n"
" .imm = (__s32)5,\n"
"}",
{.code = 1, .dst_reg = 2, .src_reg = 3, .off = 4, .imm = 5});
/* zeroed bitfields should not be displayed */
TEST_BTF_DUMP_DATA(btf, d, "struct", str, struct bpf_insn, BTF_F_COMPACT,
"(struct bpf_insn){.dst_reg = (__u8)0x1,}",
{ .code = 0, .dst_reg = 1});
/* struct with enum bitfield */
type_id = btf__find_by_name(btf, "fs_context");
if (ASSERT_GT(type_id, 0, "find fs_context")) {
type_sz = btf__resolve_size(btf, type_id);
str[0] = '\0';
opts.emit_zeroes = true;
ret = btf_dump__dump_type_data(d, type_id, zero_data, type_sz, &opts);
ASSERT_EQ(ret, type_sz,
"unexpected return value dumping fs_context");
ASSERT_NEQ(strstr(str, "FS_CONTEXT_FOR_MOUNT"), NULL,
"bitfield value not present");
}
/* struct with nested anon union */
TEST_BTF_DUMP_DATA(btf, d, "struct", str, struct bpf_sock_ops, BTF_F_COMPACT,
"(struct bpf_sock_ops){.op = (__u32)1,(union){.args = (__u32[4])[1,2,3,4,],.reply = (__u32)1,.replylong = (__u32[4])[1,2,3,4,],},}",
{ .op = 1, .args = { 1, 2, 3, 4}});
/* union with nested struct */
TEST_BTF_DUMP_DATA(btf, d, "union", str, union bpf_iter_link_info, BTF_F_COMPACT,
"(union bpf_iter_link_info){.map = (struct){.map_fd = (__u32)1,},.cgroup = (struct){.order = (enum bpf_cgroup_iter_order)BPF_CGROUP_ITER_SELF_ONLY,.cgroup_fd = (__u32)1,},.task = (struct){.tid = (__u32)1,.pid = (__u32)1,},}",
{ .cgroup = { .order = 1, .cgroup_fd = 1, }});
/* struct skb with nested structs/unions; because type output is so
* complex, we don't do a string comparison, just verify we return
* the type size as the amount of data displayed.
*/
type_id = btf__find_by_name(btf, "sk_buff");
if (ASSERT_GT(type_id, 0, "find struct sk_buff")) {
type_sz = btf__resolve_size(btf, type_id);
str[0] = '\0';
ret = btf_dump__dump_type_data(d, type_id, skb, type_sz, &opts);
ASSERT_EQ(ret, type_sz,
"unexpected return value dumping sk_buff");
}
/* overflow bpf_sock_ops struct with final element nonzero/zero.
* Regardless of the value of the final field, we don't have all the
* data we need to display it, so we should trigger an overflow.
* In other words overflow checking should trump "is field zero?"
* checks because if we've overflowed, it shouldn't matter what the
* field is - we can't trust its value so shouldn't display it.
*/
TEST_BTF_DUMP_DATA_OVER(btf, d, "struct", str, struct bpf_sock_ops,
sizeof(struct bpf_sock_ops) - 1,
"(struct bpf_sock_ops){\n\t.op = (__u32)1,\n",
{ .op = 1, .skb_hwtstamp = 2});
TEST_BTF_DUMP_DATA_OVER(btf, d, "struct", str, struct bpf_sock_ops,
sizeof(struct bpf_sock_ops) - 1,
"(struct bpf_sock_ops){\n\t.op = (__u32)1,\n",
{ .op = 1, .skb_hwtstamp = 0});
}
static void test_btf_dump_var_data(struct btf *btf, struct btf_dump *d,
char *str)
{
#if 0
TEST_BTF_DUMP_VAR(btf, d, NULL, str, "cpu_number", int, BTF_F_COMPACT,
"int cpu_number = (int)100", 100);
#endif
TEST_BTF_DUMP_VAR(btf, d, NULL, str, "cpu_profile_flip", int, BTF_F_COMPACT,
"static int cpu_profile_flip = (int)2", 2);
}
static void test_btf_datasec(struct btf *btf, struct btf_dump *d, char *str,
const char *name, const char *expected_val,
void *data, size_t data_sz)
{
DECLARE_LIBBPF_OPTS(btf_dump_type_data_opts, opts);
int ret = 0, cmp;
size_t secsize;
__s32 type_id;
opts.compact = true;
type_id = btf__find_by_name(btf, name);
if (!ASSERT_GT(type_id, 0, "find type id"))
return;
secsize = btf__resolve_size(btf, type_id);
ASSERT_EQ(secsize, 0, "verify section size");
str[0] = '\0';
ret = btf_dump__dump_type_data(d, type_id, data, data_sz, &opts);
ASSERT_EQ(ret, 0, "unexpected return value");
cmp = strcmp(str, expected_val);
ASSERT_EQ(cmp, 0, "ensure expected/actual match");
}
static void test_btf_dump_datasec_data(char *str)
{
struct btf *btf;
char license[4] = "GPL";
struct btf_dump *d;
btf = btf__parse("xdping_kern.bpf.o", NULL);
if (!ASSERT_OK_PTR(btf, "xdping_kern.bpf.o BTF not found"))
return;
d = btf_dump__new(btf, btf_dump_snprintf, str, NULL);
if (!ASSERT_OK_PTR(d, "could not create BTF dump"))
goto out;
test_btf_datasec(btf, d, str, "license",
"SEC(\"license\") char[4] _license = (char[4])['G','P','L',];",
license, sizeof(license));
out:
btf_dump__free(d);
btf__free(btf);
}
void test_btf_dump() {
char str[STRSIZE];
struct btf_dump *d;
struct btf *btf;
int i;
for (i = 0; i < ARRAY_SIZE(btf_dump_test_cases); i++) {
struct btf_dump_test_case *t = &btf_dump_test_cases[i];
if (!test__start_subtest(t->name))
continue;
test_btf_dump_case(i, &btf_dump_test_cases[i]);
}
if (test__start_subtest("btf_dump: incremental"))
test_btf_dump_incremental();
btf = libbpf_find_kernel_btf();
if (!ASSERT_OK_PTR(btf, "no kernel BTF found"))
return;
d = btf_dump__new(btf, btf_dump_snprintf, str, NULL);
if (!ASSERT_OK_PTR(d, "could not create BTF dump"))
return;
/* Verify type display for various types. */
if (test__start_subtest("btf_dump: int_data"))
test_btf_dump_int_data(btf, d, str);
if (test__start_subtest("btf_dump: float_data"))
test_btf_dump_float_data(btf, d, str);
if (test__start_subtest("btf_dump: char_data"))
test_btf_dump_char_data(btf, d, str);
if (test__start_subtest("btf_dump: typedef_data"))
test_btf_dump_typedef_data(btf, d, str);
if (test__start_subtest("btf_dump: enum_data"))
test_btf_dump_enum_data(btf, d, str);
if (test__start_subtest("btf_dump: struct_data"))
test_btf_dump_struct_data(btf, d, str);
if (test__start_subtest("btf_dump: var_data"))
test_btf_dump_var_data(btf, d, str);
btf_dump__free(d);
btf__free(btf);
if (test__start_subtest("btf_dump: datasec_data"))
test_btf_dump_datasec_data(str);
}
| linux-master | tools/testing/selftests/bpf/prog_tests/btf_dump.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2021 Facebook */
#include <test_progs.h>
#include <network_helpers.h>
#include "for_each_hash_map_elem.skel.h"
#include "for_each_array_map_elem.skel.h"
#include "for_each_map_elem_write_key.skel.h"
static unsigned int duration;
static void test_hash_map(void)
{
int i, err, max_entries;
struct for_each_hash_map_elem *skel;
__u64 *percpu_valbuf = NULL;
size_t percpu_val_sz;
__u32 key, num_cpus;
__u64 val;
LIBBPF_OPTS(bpf_test_run_opts, topts,
.data_in = &pkt_v4,
.data_size_in = sizeof(pkt_v4),
.repeat = 1,
);
skel = for_each_hash_map_elem__open_and_load();
if (!ASSERT_OK_PTR(skel, "for_each_hash_map_elem__open_and_load"))
return;
max_entries = bpf_map__max_entries(skel->maps.hashmap);
for (i = 0; i < max_entries; i++) {
key = i;
val = i + 1;
err = bpf_map__update_elem(skel->maps.hashmap, &key, sizeof(key),
&val, sizeof(val), BPF_ANY);
if (!ASSERT_OK(err, "map_update"))
goto out;
}
num_cpus = bpf_num_possible_cpus();
percpu_val_sz = sizeof(__u64) * num_cpus;
percpu_valbuf = malloc(percpu_val_sz);
if (!ASSERT_OK_PTR(percpu_valbuf, "percpu_valbuf"))
goto out;
key = 1;
for (i = 0; i < num_cpus; i++)
percpu_valbuf[i] = i + 1;
err = bpf_map__update_elem(skel->maps.percpu_map, &key, sizeof(key),
percpu_valbuf, percpu_val_sz, BPF_ANY);
if (!ASSERT_OK(err, "percpu_map_update"))
goto out;
err = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.test_pkt_access), &topts);
duration = topts.duration;
if (CHECK(err || topts.retval, "ipv4", "err %d errno %d retval %d\n",
err, errno, topts.retval))
goto out;
ASSERT_EQ(skel->bss->hashmap_output, 4, "hashmap_output");
ASSERT_EQ(skel->bss->hashmap_elems, max_entries, "hashmap_elems");
key = 1;
err = bpf_map__lookup_elem(skel->maps.hashmap, &key, sizeof(key), &val, sizeof(val), 0);
ASSERT_ERR(err, "hashmap_lookup");
ASSERT_EQ(skel->bss->percpu_called, 1, "percpu_called");
ASSERT_LT(skel->bss->cpu, num_cpus, "num_cpus");
ASSERT_EQ(skel->bss->percpu_map_elems, 1, "percpu_map_elems");
ASSERT_EQ(skel->bss->percpu_key, 1, "percpu_key");
ASSERT_EQ(skel->bss->percpu_val, skel->bss->cpu + 1, "percpu_val");
ASSERT_EQ(skel->bss->percpu_output, 100, "percpu_output");
out:
free(percpu_valbuf);
for_each_hash_map_elem__destroy(skel);
}
static void test_array_map(void)
{
__u32 key, num_cpus, max_entries;
int i, err;
struct for_each_array_map_elem *skel;
__u64 *percpu_valbuf = NULL;
size_t percpu_val_sz;
__u64 val, expected_total;
LIBBPF_OPTS(bpf_test_run_opts, topts,
.data_in = &pkt_v4,
.data_size_in = sizeof(pkt_v4),
.repeat = 1,
);
skel = for_each_array_map_elem__open_and_load();
if (!ASSERT_OK_PTR(skel, "for_each_array_map_elem__open_and_load"))
return;
expected_total = 0;
max_entries = bpf_map__max_entries(skel->maps.arraymap);
for (i = 0; i < max_entries; i++) {
key = i;
val = i + 1;
/* skip the last iteration for expected total */
if (i != max_entries - 1)
expected_total += val;
err = bpf_map__update_elem(skel->maps.arraymap, &key, sizeof(key),
&val, sizeof(val), BPF_ANY);
if (!ASSERT_OK(err, "map_update"))
goto out;
}
num_cpus = bpf_num_possible_cpus();
percpu_val_sz = sizeof(__u64) * num_cpus;
percpu_valbuf = malloc(percpu_val_sz);
if (!ASSERT_OK_PTR(percpu_valbuf, "percpu_valbuf"))
goto out;
key = 0;
for (i = 0; i < num_cpus; i++)
percpu_valbuf[i] = i + 1;
err = bpf_map__update_elem(skel->maps.percpu_map, &key, sizeof(key),
percpu_valbuf, percpu_val_sz, BPF_ANY);
if (!ASSERT_OK(err, "percpu_map_update"))
goto out;
err = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.test_pkt_access), &topts);
duration = topts.duration;
if (CHECK(err || topts.retval, "ipv4", "err %d errno %d retval %d\n",
err, errno, topts.retval))
goto out;
ASSERT_EQ(skel->bss->arraymap_output, expected_total, "array_output");
ASSERT_EQ(skel->bss->cpu + 1, skel->bss->percpu_val, "percpu_val");
out:
free(percpu_valbuf);
for_each_array_map_elem__destroy(skel);
}
static void test_write_map_key(void)
{
struct for_each_map_elem_write_key *skel;
skel = for_each_map_elem_write_key__open_and_load();
if (!ASSERT_ERR_PTR(skel, "for_each_map_elem_write_key__open_and_load"))
for_each_map_elem_write_key__destroy(skel);
}
void test_for_each(void)
{
if (test__start_subtest("hash_map"))
test_hash_map();
if (test__start_subtest("array_map"))
test_array_map();
if (test__start_subtest("write_map_key"))
test_write_map_key();
}
| linux-master | tools/testing/selftests/bpf/prog_tests/for_each.c |
// SPDX-License-Identifier: GPL-2.0
#include <test_progs.h>
#include "test_helper_restricted.skel.h"
void test_helper_restricted(void)
{
int prog_i = 0, prog_cnt;
do {
struct test_helper_restricted *test;
int err;
test = test_helper_restricted__open();
if (!ASSERT_OK_PTR(test, "open"))
return;
prog_cnt = test->skeleton->prog_cnt;
for (int j = 0; j < prog_cnt; ++j) {
struct bpf_program *prog = *test->skeleton->progs[j].prog;
bpf_program__set_autoload(prog, true);
}
err = test_helper_restricted__load(test);
ASSERT_ERR(err, "load_should_fail");
test_helper_restricted__destroy(test);
} while (++prog_i < prog_cnt);
}
| linux-master | tools/testing/selftests/bpf/prog_tests/helper_restricted.c |
// SPDX-License-Identifier: GPL-2.0
#include <test_progs.h>
#include "uninit_stack.skel.h"
void test_uninit_stack(void)
{
RUN_TESTS(uninit_stack);
}
| linux-master | tools/testing/selftests/bpf/prog_tests/uninit_stack.c |
// SPDX-License-Identifier: GPL-2.0
#include <test_progs.h>
#include <network_helpers.h>
void test_xdp(void)
{
struct vip key4 = {.protocol = 6, .family = AF_INET};
struct vip key6 = {.protocol = 6, .family = AF_INET6};
struct iptnl_info value4 = {.family = AF_INET};
struct iptnl_info value6 = {.family = AF_INET6};
const char *file = "./test_xdp.bpf.o";
struct bpf_object *obj;
char buf[128];
struct ipv6hdr iph6;
struct iphdr iph;
int err, prog_fd, map_fd;
LIBBPF_OPTS(bpf_test_run_opts, topts,
.data_in = &pkt_v4,
.data_size_in = sizeof(pkt_v4),
.data_out = buf,
.data_size_out = sizeof(buf),
.repeat = 1,
);
err = bpf_prog_test_load(file, BPF_PROG_TYPE_XDP, &obj, &prog_fd);
if (CHECK_FAIL(err))
return;
map_fd = bpf_find_map(__func__, obj, "vip2tnl");
if (map_fd < 0)
goto out;
bpf_map_update_elem(map_fd, &key4, &value4, 0);
bpf_map_update_elem(map_fd, &key6, &value6, 0);
err = bpf_prog_test_run_opts(prog_fd, &topts);
memcpy(&iph, buf + sizeof(struct ethhdr), sizeof(iph));
ASSERT_OK(err, "test_run");
ASSERT_EQ(topts.retval, XDP_TX, "ipv4 test_run retval");
ASSERT_EQ(topts.data_size_out, 74, "ipv4 test_run data_size_out");
ASSERT_EQ(iph.protocol, IPPROTO_IPIP, "ipv4 test_run iph.protocol");
topts.data_in = &pkt_v6;
topts.data_size_in = sizeof(pkt_v6);
topts.data_size_out = sizeof(buf);
err = bpf_prog_test_run_opts(prog_fd, &topts);
memcpy(&iph6, buf + sizeof(struct ethhdr), sizeof(iph6));
ASSERT_OK(err, "test_run");
ASSERT_EQ(topts.retval, XDP_TX, "ipv6 test_run retval");
ASSERT_EQ(topts.data_size_out, 114, "ipv6 test_run data_size_out");
ASSERT_EQ(iph6.nexthdr, IPPROTO_IPV6, "ipv6 test_run iph6.nexthdr");
out:
bpf_object__close(obj);
}
| linux-master | tools/testing/selftests/bpf/prog_tests/xdp.c |
// SPDX-License-Identifier: GPL-2.0
#include <test_progs.h>
#include "stacktrace_map_skip.skel.h"
#define TEST_STACK_DEPTH 2
void test_stacktrace_map_skip(void)
{
struct stacktrace_map_skip *skel;
int stackid_hmap_fd, stackmap_fd, stack_amap_fd;
int err, stack_trace_len;
skel = stacktrace_map_skip__open_and_load();
if (!ASSERT_OK_PTR(skel, "skel_open_and_load"))
return;
/* find map fds */
stackid_hmap_fd = bpf_map__fd(skel->maps.stackid_hmap);
if (!ASSERT_GE(stackid_hmap_fd, 0, "stackid_hmap fd"))
goto out;
stackmap_fd = bpf_map__fd(skel->maps.stackmap);
if (!ASSERT_GE(stackmap_fd, 0, "stackmap fd"))
goto out;
stack_amap_fd = bpf_map__fd(skel->maps.stack_amap);
if (!ASSERT_GE(stack_amap_fd, 0, "stack_amap fd"))
goto out;
skel->bss->pid = getpid();
err = stacktrace_map_skip__attach(skel);
if (!ASSERT_OK(err, "skel_attach"))
goto out;
/* give some time for bpf program run */
sleep(1);
/* disable stack trace collection */
skel->bss->control = 1;
/* for every element in stackid_hmap, we can find a corresponding one
* in stackmap, and vise versa.
*/
err = compare_map_keys(stackid_hmap_fd, stackmap_fd);
if (!ASSERT_OK(err, "compare_map_keys stackid_hmap vs. stackmap"))
goto out;
err = compare_map_keys(stackmap_fd, stackid_hmap_fd);
if (!ASSERT_OK(err, "compare_map_keys stackmap vs. stackid_hmap"))
goto out;
stack_trace_len = TEST_STACK_DEPTH * sizeof(__u64);
err = compare_stack_ips(stackmap_fd, stack_amap_fd, stack_trace_len);
if (!ASSERT_OK(err, "compare_stack_ips stackmap vs. stack_amap"))
goto out;
if (!ASSERT_EQ(skel->bss->failed, 0, "skip_failed"))
goto out;
out:
stacktrace_map_skip__destroy(skel);
}
| linux-master | tools/testing/selftests/bpf/prog_tests/stacktrace_map_skip.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2021 Google LLC. */
#include <test_progs.h>
#include "test_snprintf.skel.h"
#include "test_snprintf_single.skel.h"
#define EXP_NUM_OUT "-8 9 96 -424242 1337 DABBAD00"
#define EXP_NUM_RET sizeof(EXP_NUM_OUT)
#define EXP_IP_OUT "127.000.000.001 0000:0000:0000:0000:0000:0000:0000:0001"
#define EXP_IP_RET sizeof(EXP_IP_OUT)
/* The third specifier, %pB, depends on compiler inlining so don't check it */
#define EXP_SYM_OUT "schedule schedule+0x0/"
#define MIN_SYM_RET sizeof(EXP_SYM_OUT)
/* The third specifier, %p, is a hashed pointer which changes on every reboot */
#define EXP_ADDR_OUT "0000000000000000 ffff00000add4e55 "
#define EXP_ADDR_RET sizeof(EXP_ADDR_OUT "unknownhashedptr")
#define EXP_STR_OUT "str1 a b c d e longstr"
#define EXP_STR_RET sizeof(EXP_STR_OUT)
#define EXP_OVER_OUT "%over"
#define EXP_OVER_RET 10
#define EXP_PAD_OUT " 4 000"
#define EXP_PAD_RET 900007
#define EXP_NO_ARG_OUT "simple case"
#define EXP_NO_ARG_RET 12
#define EXP_NO_BUF_RET 29
static void test_snprintf_positive(void)
{
char exp_addr_out[] = EXP_ADDR_OUT;
char exp_sym_out[] = EXP_SYM_OUT;
struct test_snprintf *skel;
skel = test_snprintf__open_and_load();
if (!ASSERT_OK_PTR(skel, "skel_open"))
return;
skel->bss->pid = getpid();
if (!ASSERT_OK(test_snprintf__attach(skel), "skel_attach"))
goto cleanup;
/* trigger tracepoint */
usleep(1);
ASSERT_STREQ(skel->bss->num_out, EXP_NUM_OUT, "num_out");
ASSERT_EQ(skel->bss->num_ret, EXP_NUM_RET, "num_ret");
ASSERT_STREQ(skel->bss->ip_out, EXP_IP_OUT, "ip_out");
ASSERT_EQ(skel->bss->ip_ret, EXP_IP_RET, "ip_ret");
ASSERT_OK(memcmp(skel->bss->sym_out, exp_sym_out,
sizeof(exp_sym_out) - 1), "sym_out");
ASSERT_LT(MIN_SYM_RET, skel->bss->sym_ret, "sym_ret");
ASSERT_OK(memcmp(skel->bss->addr_out, exp_addr_out,
sizeof(exp_addr_out) - 1), "addr_out");
ASSERT_EQ(skel->bss->addr_ret, EXP_ADDR_RET, "addr_ret");
ASSERT_STREQ(skel->bss->str_out, EXP_STR_OUT, "str_out");
ASSERT_EQ(skel->bss->str_ret, EXP_STR_RET, "str_ret");
ASSERT_STREQ(skel->bss->over_out, EXP_OVER_OUT, "over_out");
ASSERT_EQ(skel->bss->over_ret, EXP_OVER_RET, "over_ret");
ASSERT_STREQ(skel->bss->pad_out, EXP_PAD_OUT, "pad_out");
ASSERT_EQ(skel->bss->pad_ret, EXP_PAD_RET, "pad_ret");
ASSERT_STREQ(skel->bss->noarg_out, EXP_NO_ARG_OUT, "no_arg_out");
ASSERT_EQ(skel->bss->noarg_ret, EXP_NO_ARG_RET, "no_arg_ret");
ASSERT_EQ(skel->bss->nobuf_ret, EXP_NO_BUF_RET, "no_buf_ret");
cleanup:
test_snprintf__destroy(skel);
}
/* Loads an eBPF object calling bpf_snprintf with up to 10 characters of fmt */
static int load_single_snprintf(char *fmt)
{
struct test_snprintf_single *skel;
int ret;
skel = test_snprintf_single__open();
if (!skel)
return -EINVAL;
memcpy(skel->rodata->fmt, fmt, MIN(strlen(fmt) + 1, 10));
ret = test_snprintf_single__load(skel);
test_snprintf_single__destroy(skel);
return ret;
}
static void test_snprintf_negative(void)
{
ASSERT_OK(load_single_snprintf("valid %d"), "valid usage");
ASSERT_ERR(load_single_snprintf("0123456789"), "no terminating zero");
ASSERT_ERR(load_single_snprintf("%d %d"), "too many specifiers");
ASSERT_ERR(load_single_snprintf("%pi5"), "invalid specifier 1");
ASSERT_ERR(load_single_snprintf("%a"), "invalid specifier 2");
ASSERT_ERR(load_single_snprintf("%"), "invalid specifier 3");
ASSERT_ERR(load_single_snprintf("%12345678"), "invalid specifier 4");
ASSERT_ERR(load_single_snprintf("%--------"), "invalid specifier 5");
ASSERT_ERR(load_single_snprintf("%lc"), "invalid specifier 6");
ASSERT_ERR(load_single_snprintf("%llc"), "invalid specifier 7");
ASSERT_ERR(load_single_snprintf("\x80"), "non ascii character");
ASSERT_ERR(load_single_snprintf("\x1"), "non printable character");
}
void test_snprintf(void)
{
if (test__start_subtest("snprintf_positive"))
test_snprintf_positive();
if (test__start_subtest("snprintf_negative"))
test_snprintf_negative();
}
| linux-master | tools/testing/selftests/bpf/prog_tests/snprintf.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2022 Facebook */
#include <test_progs.h>
#include "test_custom_sec_handlers.skel.h"
#define COOKIE_ABC1 1
#define COOKIE_ABC2 2
#define COOKIE_CUSTOM 3
#define COOKIE_FALLBACK 4
#define COOKIE_KPROBE 5
static int custom_setup_prog(struct bpf_program *prog, long cookie)
{
if (cookie == COOKIE_ABC1)
bpf_program__set_autoload(prog, false);
return 0;
}
static int custom_prepare_load_prog(struct bpf_program *prog,
struct bpf_prog_load_opts *opts, long cookie)
{
if (cookie == COOKIE_FALLBACK)
opts->prog_flags |= BPF_F_SLEEPABLE;
else if (cookie == COOKIE_ABC1)
ASSERT_FALSE(true, "unexpected preload for abc");
return 0;
}
static int custom_attach_prog(const struct bpf_program *prog, long cookie,
struct bpf_link **link)
{
switch (cookie) {
case COOKIE_ABC2:
*link = bpf_program__attach_raw_tracepoint(prog, "sys_enter");
return libbpf_get_error(*link);
case COOKIE_CUSTOM:
*link = bpf_program__attach_tracepoint(prog, "syscalls", "sys_enter_nanosleep");
return libbpf_get_error(*link);
case COOKIE_KPROBE:
case COOKIE_FALLBACK:
/* no auto-attach for SEC("xyz") and SEC("kprobe") */
*link = NULL;
return 0;
default:
ASSERT_FALSE(true, "unexpected cookie");
return -EINVAL;
}
}
static int abc1_id;
static int abc2_id;
static int custom_id;
static int fallback_id;
static int kprobe_id;
__attribute__((constructor))
static void register_sec_handlers(void)
{
LIBBPF_OPTS(libbpf_prog_handler_opts, abc1_opts,
.cookie = COOKIE_ABC1,
.prog_setup_fn = custom_setup_prog,
.prog_prepare_load_fn = custom_prepare_load_prog,
.prog_attach_fn = NULL,
);
LIBBPF_OPTS(libbpf_prog_handler_opts, abc2_opts,
.cookie = COOKIE_ABC2,
.prog_setup_fn = custom_setup_prog,
.prog_prepare_load_fn = custom_prepare_load_prog,
.prog_attach_fn = custom_attach_prog,
);
LIBBPF_OPTS(libbpf_prog_handler_opts, custom_opts,
.cookie = COOKIE_CUSTOM,
.prog_setup_fn = NULL,
.prog_prepare_load_fn = NULL,
.prog_attach_fn = custom_attach_prog,
);
abc1_id = libbpf_register_prog_handler("abc", BPF_PROG_TYPE_RAW_TRACEPOINT, 0, &abc1_opts);
abc2_id = libbpf_register_prog_handler("abc/", BPF_PROG_TYPE_RAW_TRACEPOINT, 0, &abc2_opts);
custom_id = libbpf_register_prog_handler("custom+", BPF_PROG_TYPE_TRACEPOINT, 0, &custom_opts);
}
__attribute__((destructor))
static void unregister_sec_handlers(void)
{
libbpf_unregister_prog_handler(abc1_id);
libbpf_unregister_prog_handler(abc2_id);
libbpf_unregister_prog_handler(custom_id);
}
void test_custom_sec_handlers(void)
{
LIBBPF_OPTS(libbpf_prog_handler_opts, opts,
.prog_setup_fn = custom_setup_prog,
.prog_prepare_load_fn = custom_prepare_load_prog,
.prog_attach_fn = custom_attach_prog,
);
struct test_custom_sec_handlers* skel;
int err;
ASSERT_GT(abc1_id, 0, "abc1_id");
ASSERT_GT(abc2_id, 0, "abc2_id");
ASSERT_GT(custom_id, 0, "custom_id");
/* override libbpf's handle of SEC("kprobe/...") but also allow pure
* SEC("kprobe") due to "kprobe+" specifier. Register it as
* TRACEPOINT, just for fun.
*/
opts.cookie = COOKIE_KPROBE;
kprobe_id = libbpf_register_prog_handler("kprobe+", BPF_PROG_TYPE_TRACEPOINT, 0, &opts);
/* fallback treats everything as BPF_PROG_TYPE_SYSCALL program to test
* setting custom BPF_F_SLEEPABLE bit in preload handler
*/
opts.cookie = COOKIE_FALLBACK;
fallback_id = libbpf_register_prog_handler(NULL, BPF_PROG_TYPE_SYSCALL, 0, &opts);
if (!ASSERT_GT(fallback_id, 0, "fallback_id") /* || !ASSERT_GT(kprobe_id, 0, "kprobe_id")*/) {
if (fallback_id > 0)
libbpf_unregister_prog_handler(fallback_id);
if (kprobe_id > 0)
libbpf_unregister_prog_handler(kprobe_id);
return;
}
/* open skeleton and validate assumptions */
skel = test_custom_sec_handlers__open();
if (!ASSERT_OK_PTR(skel, "skel_open"))
goto cleanup;
ASSERT_EQ(bpf_program__type(skel->progs.abc1), BPF_PROG_TYPE_RAW_TRACEPOINT, "abc1_type");
ASSERT_FALSE(bpf_program__autoload(skel->progs.abc1), "abc1_autoload");
ASSERT_EQ(bpf_program__type(skel->progs.abc2), BPF_PROG_TYPE_RAW_TRACEPOINT, "abc2_type");
ASSERT_EQ(bpf_program__type(skel->progs.custom1), BPF_PROG_TYPE_TRACEPOINT, "custom1_type");
ASSERT_EQ(bpf_program__type(skel->progs.custom2), BPF_PROG_TYPE_TRACEPOINT, "custom2_type");
ASSERT_EQ(bpf_program__type(skel->progs.kprobe1), BPF_PROG_TYPE_TRACEPOINT, "kprobe1_type");
ASSERT_EQ(bpf_program__type(skel->progs.xyz), BPF_PROG_TYPE_SYSCALL, "xyz_type");
skel->rodata->my_pid = getpid();
/* now attempt to load everything */
err = test_custom_sec_handlers__load(skel);
if (!ASSERT_OK(err, "skel_load"))
goto cleanup;
/* now try to auto-attach everything */
err = test_custom_sec_handlers__attach(skel);
if (!ASSERT_OK(err, "skel_attach"))
goto cleanup;
skel->links.xyz = bpf_program__attach(skel->progs.kprobe1);
ASSERT_EQ(errno, EOPNOTSUPP, "xyz_attach_err");
ASSERT_ERR_PTR(skel->links.xyz, "xyz_attach");
/* trigger programs */
usleep(1);
/* SEC("abc") is set to not auto-loaded */
ASSERT_FALSE(skel->bss->abc1_called, "abc1_called");
ASSERT_TRUE(skel->bss->abc2_called, "abc2_called");
ASSERT_TRUE(skel->bss->custom1_called, "custom1_called");
ASSERT_TRUE(skel->bss->custom2_called, "custom2_called");
/* SEC("kprobe") shouldn't be auto-attached */
ASSERT_FALSE(skel->bss->kprobe1_called, "kprobe1_called");
/* SEC("xyz") shouldn't be auto-attached */
ASSERT_FALSE(skel->bss->xyz_called, "xyz_called");
cleanup:
test_custom_sec_handlers__destroy(skel);
ASSERT_OK(libbpf_unregister_prog_handler(fallback_id), "unregister_fallback");
ASSERT_OK(libbpf_unregister_prog_handler(kprobe_id), "unregister_kprobe");
}
| linux-master | tools/testing/selftests/bpf/prog_tests/custom_sec_handlers.c |
// SPDX-License-Identifier: GPL-2.0
#include "test_progs.h"
#include "network_helpers.h"
static __u32 duration;
static void test_global_func_args0(struct bpf_object *obj)
{
int err, i, map_fd, actual_value;
const char *map_name = "values";
map_fd = bpf_find_map(__func__, obj, map_name);
if (CHECK(map_fd < 0, "bpf_find_map", "cannot find BPF map %s: %s\n",
map_name, strerror(errno)))
return;
struct {
const char *descr;
int expected_value;
} tests[] = {
{"passing NULL pointer", 0},
{"returning value", 1},
{"reading local variable", 100 },
{"writing local variable", 101 },
{"reading global variable", 42 },
{"writing global variable", 43 },
{"writing to pointer-to-pointer", 1 },
};
for (i = 0; i < ARRAY_SIZE(tests); ++i) {
const int expected_value = tests[i].expected_value;
err = bpf_map_lookup_elem(map_fd, &i, &actual_value);
CHECK(err || actual_value != expected_value, tests[i].descr,
"err %d result %d expected %d\n", err, actual_value, expected_value);
}
}
void test_global_func_args(void)
{
const char *file = "./test_global_func_args.bpf.o";
struct bpf_object *obj;
int err, prog_fd;
LIBBPF_OPTS(bpf_test_run_opts, topts,
.data_in = &pkt_v4,
.data_size_in = sizeof(pkt_v4),
.repeat = 1,
);
err = bpf_prog_test_load(file, BPF_PROG_TYPE_CGROUP_SKB, &obj, &prog_fd);
if (CHECK(err, "load program", "error %d loading %s\n", err, file))
return;
err = bpf_prog_test_run_opts(prog_fd, &topts);
ASSERT_OK(err, "test_run");
ASSERT_OK(topts.retval, "test_run retval");
test_global_func_args0(obj);
bpf_object__close(obj);
}
| linux-master | tools/testing/selftests/bpf/prog_tests/global_func_args.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020 Facebook */
#include <test_progs.h>
#include <time.h>
#include "test_vmlinux.skel.h"
#define MY_TV_NSEC 1337
static void nsleep()
{
struct timespec ts = { .tv_nsec = MY_TV_NSEC };
(void)syscall(__NR_nanosleep, &ts, NULL);
}
void test_vmlinux(void)
{
int duration = 0, err;
struct test_vmlinux* skel;
struct test_vmlinux__bss *bss;
skel = test_vmlinux__open_and_load();
if (CHECK(!skel, "skel_open", "failed to open skeleton\n"))
return;
bss = skel->bss;
err = test_vmlinux__attach(skel);
if (CHECK(err, "skel_attach", "skeleton attach failed: %d\n", err))
goto cleanup;
/* trigger everything */
nsleep();
CHECK(!bss->tp_called, "tp", "not called\n");
CHECK(!bss->raw_tp_called, "raw_tp", "not called\n");
CHECK(!bss->tp_btf_called, "tp_btf", "not called\n");
CHECK(!bss->kprobe_called, "kprobe", "not called\n");
CHECK(!bss->fentry_called, "fentry", "not called\n");
cleanup:
test_vmlinux__destroy(skel);
}
| linux-master | tools/testing/selftests/bpf/prog_tests/vmlinux.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
#include <errno.h>
#include <sys/syscall.h>
#include <unistd.h>
#include "test_map_ops.skel.h"
#include "test_progs.h"
static void map_update(void)
{
(void)syscall(__NR_getpid);
}
static void map_delete(void)
{
(void)syscall(__NR_getppid);
}
static void map_push(void)
{
(void)syscall(__NR_getuid);
}
static void map_pop(void)
{
(void)syscall(__NR_geteuid);
}
static void map_peek(void)
{
(void)syscall(__NR_getgid);
}
static void map_for_each_pass(void)
{
(void)syscall(__NR_gettid);
}
static void map_for_each_fail(void)
{
(void)syscall(__NR_getpgid);
}
static int setup(struct test_map_ops **skel)
{
int err = 0;
if (!skel)
return -1;
*skel = test_map_ops__open();
if (!ASSERT_OK_PTR(*skel, "test_map_ops__open"))
return -1;
(*skel)->rodata->pid = getpid();
err = test_map_ops__load(*skel);
if (!ASSERT_OK(err, "test_map_ops__load"))
return err;
err = test_map_ops__attach(*skel);
if (!ASSERT_OK(err, "test_map_ops__attach"))
return err;
return err;
}
static void teardown(struct test_map_ops **skel)
{
if (skel && *skel)
test_map_ops__destroy(*skel);
}
static void map_ops_update_delete_subtest(void)
{
struct test_map_ops *skel;
if (setup(&skel))
goto teardown;
map_update();
ASSERT_OK(skel->bss->err, "map_update_initial");
map_update();
ASSERT_LT(skel->bss->err, 0, "map_update_existing");
ASSERT_EQ(skel->bss->err, -EEXIST, "map_update_existing");
map_delete();
ASSERT_OK(skel->bss->err, "map_delete_existing");
map_delete();
ASSERT_LT(skel->bss->err, 0, "map_delete_non_existing");
ASSERT_EQ(skel->bss->err, -ENOENT, "map_delete_non_existing");
teardown:
teardown(&skel);
}
static void map_ops_push_peek_pop_subtest(void)
{
struct test_map_ops *skel;
if (setup(&skel))
goto teardown;
map_push();
ASSERT_OK(skel->bss->err, "map_push_initial");
map_push();
ASSERT_LT(skel->bss->err, 0, "map_push_when_full");
ASSERT_EQ(skel->bss->err, -E2BIG, "map_push_when_full");
map_peek();
ASSERT_OK(skel->bss->err, "map_peek");
map_pop();
ASSERT_OK(skel->bss->err, "map_pop");
map_peek();
ASSERT_LT(skel->bss->err, 0, "map_peek_when_empty");
ASSERT_EQ(skel->bss->err, -ENOENT, "map_peek_when_empty");
map_pop();
ASSERT_LT(skel->bss->err, 0, "map_pop_when_empty");
ASSERT_EQ(skel->bss->err, -ENOENT, "map_pop_when_empty");
teardown:
teardown(&skel);
}
static void map_ops_for_each_subtest(void)
{
struct test_map_ops *skel;
if (setup(&skel))
goto teardown;
map_for_each_pass();
/* expect to iterate over 1 element */
ASSERT_EQ(skel->bss->err, 1, "map_for_each_no_flags");
map_for_each_fail();
ASSERT_LT(skel->bss->err, 0, "map_for_each_with_flags");
ASSERT_EQ(skel->bss->err, -EINVAL, "map_for_each_with_flags");
teardown:
teardown(&skel);
}
void test_map_ops(void)
{
if (test__start_subtest("map_ops_update_delete"))
map_ops_update_delete_subtest();
if (test__start_subtest("map_ops_push_peek_pop"))
map_ops_push_peek_pop_subtest();
if (test__start_subtest("map_ops_for_each"))
map_ops_for_each_subtest();
}
| linux-master | tools/testing/selftests/bpf/prog_tests/map_ops.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2022 Facebook */
#include <test_progs.h>
#include <network_helpers.h>
#include "dynptr_fail.skel.h"
#include "dynptr_success.skel.h"
enum test_setup_type {
SETUP_SYSCALL_SLEEP,
SETUP_SKB_PROG,
};
static struct {
const char *prog_name;
enum test_setup_type type;
} success_tests[] = {
{"test_read_write", SETUP_SYSCALL_SLEEP},
{"test_dynptr_data", SETUP_SYSCALL_SLEEP},
{"test_ringbuf", SETUP_SYSCALL_SLEEP},
{"test_skb_readonly", SETUP_SKB_PROG},
{"test_dynptr_skb_data", SETUP_SKB_PROG},
{"test_adjust", SETUP_SYSCALL_SLEEP},
{"test_adjust_err", SETUP_SYSCALL_SLEEP},
{"test_zero_size_dynptr", SETUP_SYSCALL_SLEEP},
{"test_dynptr_is_null", SETUP_SYSCALL_SLEEP},
{"test_dynptr_is_rdonly", SETUP_SKB_PROG},
{"test_dynptr_clone", SETUP_SKB_PROG},
{"test_dynptr_skb_no_buff", SETUP_SKB_PROG},
{"test_dynptr_skb_strcmp", SETUP_SKB_PROG},
};
static void verify_success(const char *prog_name, enum test_setup_type setup_type)
{
struct dynptr_success *skel;
struct bpf_program *prog;
struct bpf_link *link;
int err;
skel = dynptr_success__open();
if (!ASSERT_OK_PTR(skel, "dynptr_success__open"))
return;
skel->bss->pid = getpid();
prog = bpf_object__find_program_by_name(skel->obj, prog_name);
if (!ASSERT_OK_PTR(prog, "bpf_object__find_program_by_name"))
goto cleanup;
bpf_program__set_autoload(prog, true);
err = dynptr_success__load(skel);
if (!ASSERT_OK(err, "dynptr_success__load"))
goto cleanup;
switch (setup_type) {
case SETUP_SYSCALL_SLEEP:
link = bpf_program__attach(prog);
if (!ASSERT_OK_PTR(link, "bpf_program__attach"))
goto cleanup;
usleep(1);
bpf_link__destroy(link);
break;
case SETUP_SKB_PROG:
{
int prog_fd;
char buf[64];
LIBBPF_OPTS(bpf_test_run_opts, topts,
.data_in = &pkt_v4,
.data_size_in = sizeof(pkt_v4),
.data_out = buf,
.data_size_out = sizeof(buf),
.repeat = 1,
);
prog_fd = bpf_program__fd(prog);
if (!ASSERT_GE(prog_fd, 0, "prog_fd"))
goto cleanup;
err = bpf_prog_test_run_opts(prog_fd, &topts);
if (!ASSERT_OK(err, "test_run"))
goto cleanup;
break;
}
}
ASSERT_EQ(skel->bss->err, 0, "err");
cleanup:
dynptr_success__destroy(skel);
}
void test_dynptr(void)
{
int i;
for (i = 0; i < ARRAY_SIZE(success_tests); i++) {
if (!test__start_subtest(success_tests[i].prog_name))
continue;
verify_success(success_tests[i].prog_name, success_tests[i].type);
}
RUN_TESTS(dynptr_fail);
}
| linux-master | tools/testing/selftests/bpf/prog_tests/dynptr.c |
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2019 Facebook
#include <test_progs.h>
static int libbpf_debug_print(enum libbpf_print_level level,
const char *format, va_list args)
{
if (level != LIBBPF_DEBUG) {
vprintf(format, args);
return 0;
}
if (!strstr(format, "verifier log"))
return 0;
vprintf("%s", args);
return 0;
}
extern int extra_prog_load_log_flags;
static int check_load(const char *file, enum bpf_prog_type type)
{
struct bpf_object *obj = NULL;
struct bpf_program *prog;
int err;
obj = bpf_object__open_file(file, NULL);
err = libbpf_get_error(obj);
if (err)
return err;
prog = bpf_object__next_program(obj, NULL);
if (!prog) {
err = -ENOENT;
goto err_out;
}
bpf_program__set_type(prog, type);
bpf_program__set_flags(prog, BPF_F_TEST_RND_HI32);
bpf_program__set_log_level(prog, 4 | extra_prog_load_log_flags);
err = bpf_object__load(obj);
err_out:
bpf_object__close(obj);
return err;
}
struct scale_test_def {
const char *file;
enum bpf_prog_type attach_type;
bool fails;
};
static void scale_test(const char *file,
enum bpf_prog_type attach_type,
bool should_fail)
{
libbpf_print_fn_t old_print_fn = NULL;
int err;
if (env.verifier_stats) {
test__force_log();
old_print_fn = libbpf_set_print(libbpf_debug_print);
}
err = check_load(file, attach_type);
if (should_fail)
ASSERT_ERR(err, "expect_error");
else
ASSERT_OK(err, "expect_success");
if (env.verifier_stats)
libbpf_set_print(old_print_fn);
}
void test_verif_scale1()
{
scale_test("test_verif_scale1.bpf.o", BPF_PROG_TYPE_SCHED_CLS, false);
}
void test_verif_scale2()
{
scale_test("test_verif_scale2.bpf.o", BPF_PROG_TYPE_SCHED_CLS, false);
}
void test_verif_scale3()
{
scale_test("test_verif_scale3.bpf.o", BPF_PROG_TYPE_SCHED_CLS, false);
}
void test_verif_scale_pyperf_global()
{
scale_test("pyperf_global.bpf.o", BPF_PROG_TYPE_RAW_TRACEPOINT, false);
}
void test_verif_scale_pyperf_subprogs()
{
scale_test("pyperf_subprogs.bpf.o", BPF_PROG_TYPE_RAW_TRACEPOINT, false);
}
void test_verif_scale_pyperf50()
{
/* full unroll by llvm */
scale_test("pyperf50.bpf.o", BPF_PROG_TYPE_RAW_TRACEPOINT, false);
}
void test_verif_scale_pyperf100()
{
/* full unroll by llvm */
scale_test("pyperf100.bpf.o", BPF_PROG_TYPE_RAW_TRACEPOINT, false);
}
void test_verif_scale_pyperf180()
{
/* full unroll by llvm */
scale_test("pyperf180.bpf.o", BPF_PROG_TYPE_RAW_TRACEPOINT, false);
}
void test_verif_scale_pyperf600()
{
/* partial unroll. llvm will unroll loop ~150 times.
* C loop count -> 600.
* Asm loop count -> 4.
* 16k insns in loop body.
* Total of 5 such loops. Total program size ~82k insns.
*/
scale_test("pyperf600.bpf.o", BPF_PROG_TYPE_RAW_TRACEPOINT, false);
}
void test_verif_scale_pyperf600_bpf_loop(void)
{
/* use the bpf_loop helper*/
scale_test("pyperf600_bpf_loop.bpf.o", BPF_PROG_TYPE_RAW_TRACEPOINT, false);
}
void test_verif_scale_pyperf600_nounroll()
{
/* no unroll at all.
* C loop count -> 600.
* ASM loop count -> 600.
* ~110 insns in loop body.
* Total of 5 such loops. Total program size ~1500 insns.
*/
scale_test("pyperf600_nounroll.bpf.o", BPF_PROG_TYPE_RAW_TRACEPOINT, false);
}
void test_verif_scale_pyperf600_iter()
{
/* open-coded BPF iterator version */
scale_test("pyperf600_iter.bpf.o", BPF_PROG_TYPE_RAW_TRACEPOINT, false);
}
void test_verif_scale_loop1()
{
scale_test("loop1.bpf.o", BPF_PROG_TYPE_RAW_TRACEPOINT, false);
}
void test_verif_scale_loop2()
{
scale_test("loop2.bpf.o", BPF_PROG_TYPE_RAW_TRACEPOINT, false);
}
void test_verif_scale_loop3_fail()
{
scale_test("loop3.bpf.o", BPF_PROG_TYPE_RAW_TRACEPOINT, true /* fails */);
}
void test_verif_scale_loop4()
{
scale_test("loop4.bpf.o", BPF_PROG_TYPE_SCHED_CLS, false);
}
void test_verif_scale_loop5()
{
scale_test("loop5.bpf.o", BPF_PROG_TYPE_SCHED_CLS, false);
}
void test_verif_scale_loop6()
{
scale_test("loop6.bpf.o", BPF_PROG_TYPE_KPROBE, false);
}
void test_verif_scale_strobemeta()
{
/* partial unroll. 19k insn in a loop.
* Total program size 20.8k insn.
* ~350k processed_insns
*/
scale_test("strobemeta.bpf.o", BPF_PROG_TYPE_RAW_TRACEPOINT, false);
}
void test_verif_scale_strobemeta_bpf_loop(void)
{
/* use the bpf_loop helper*/
scale_test("strobemeta_bpf_loop.bpf.o", BPF_PROG_TYPE_RAW_TRACEPOINT, false);
}
void test_verif_scale_strobemeta_nounroll1()
{
/* no unroll, tiny loops */
scale_test("strobemeta_nounroll1.bpf.o", BPF_PROG_TYPE_RAW_TRACEPOINT, false);
}
void test_verif_scale_strobemeta_nounroll2()
{
/* no unroll, tiny loops */
scale_test("strobemeta_nounroll2.bpf.o", BPF_PROG_TYPE_RAW_TRACEPOINT, false);
}
void test_verif_scale_strobemeta_subprogs()
{
/* non-inlined subprogs */
scale_test("strobemeta_subprogs.bpf.o", BPF_PROG_TYPE_RAW_TRACEPOINT, false);
}
void test_verif_scale_sysctl_loop1()
{
scale_test("test_sysctl_loop1.bpf.o", BPF_PROG_TYPE_CGROUP_SYSCTL, false);
}
void test_verif_scale_sysctl_loop2()
{
scale_test("test_sysctl_loop2.bpf.o", BPF_PROG_TYPE_CGROUP_SYSCTL, false);
}
void test_verif_scale_xdp_loop()
{
scale_test("test_xdp_loop.bpf.o", BPF_PROG_TYPE_XDP, false);
}
void test_verif_scale_seg6_loop()
{
scale_test("test_seg6_loop.bpf.o", BPF_PROG_TYPE_LWT_SEG6LOCAL, false);
}
void test_verif_twfw()
{
scale_test("twfw.bpf.o", BPF_PROG_TYPE_CGROUP_SKB, false);
}
| linux-master | tools/testing/selftests/bpf/prog_tests/bpf_verif_scale.c |
// SPDX-License-Identifier: GPL-2.0
#include <test_progs.h>
#include "cgroup_helpers.h"
static char bpf_log_buf[4096];
static bool verbose;
#ifndef PAGE_SIZE
#define PAGE_SIZE 4096
#endif
enum sockopt_test_error {
OK = 0,
DENY_LOAD,
DENY_ATTACH,
EOPNOTSUPP_GETSOCKOPT,
EPERM_GETSOCKOPT,
EFAULT_GETSOCKOPT,
EPERM_SETSOCKOPT,
EFAULT_SETSOCKOPT,
};
static struct sockopt_test {
const char *descr;
const struct bpf_insn insns[64];
enum bpf_attach_type attach_type;
enum bpf_attach_type expected_attach_type;
int set_optname;
int set_level;
const char set_optval[64];
socklen_t set_optlen;
int get_optname;
int get_level;
const char get_optval[64];
socklen_t get_optlen;
socklen_t get_optlen_ret;
enum sockopt_test_error error;
} tests[] = {
/* ==================== getsockopt ==================== */
{
.descr = "getsockopt: no expected_attach_type",
.insns = {
/* return 1 */
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
},
.attach_type = BPF_CGROUP_GETSOCKOPT,
.expected_attach_type = 0,
.error = DENY_LOAD,
},
{
.descr = "getsockopt: wrong expected_attach_type",
.insns = {
/* return 1 */
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
},
.attach_type = BPF_CGROUP_GETSOCKOPT,
.expected_attach_type = BPF_CGROUP_SETSOCKOPT,
.error = DENY_ATTACH,
},
{
.descr = "getsockopt: bypass bpf hook",
.insns = {
/* return 1 */
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
},
.attach_type = BPF_CGROUP_GETSOCKOPT,
.expected_attach_type = BPF_CGROUP_GETSOCKOPT,
.get_level = SOL_IP,
.set_level = SOL_IP,
.get_optname = IP_TOS,
.set_optname = IP_TOS,
.set_optval = { 1 << 3 },
.set_optlen = 1,
.get_optval = { 1 << 3 },
.get_optlen = 1,
},
{
.descr = "getsockopt: return EPERM from bpf hook",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.attach_type = BPF_CGROUP_GETSOCKOPT,
.expected_attach_type = BPF_CGROUP_GETSOCKOPT,
.get_level = SOL_IP,
.get_optname = IP_TOS,
.get_optlen = 1,
.error = EPERM_GETSOCKOPT,
},
{
.descr = "getsockopt: no optval bounds check, deny loading",
.insns = {
/* r6 = ctx->optval */
BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1,
offsetof(struct bpf_sockopt, optval)),
/* ctx->optval[0] = 0x80 */
BPF_MOV64_IMM(BPF_REG_0, 0x80),
BPF_STX_MEM(BPF_W, BPF_REG_6, BPF_REG_0, 0),
/* return 1 */
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
},
.attach_type = BPF_CGROUP_GETSOCKOPT,
.expected_attach_type = BPF_CGROUP_GETSOCKOPT,
.error = DENY_LOAD,
},
{
.descr = "getsockopt: read ctx->level",
.insns = {
/* r6 = ctx->level */
BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
offsetof(struct bpf_sockopt, level)),
/* if (ctx->level == 123) { */
BPF_JMP_IMM(BPF_JNE, BPF_REG_6, 123, 4),
/* ctx->retval = 0 */
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
offsetof(struct bpf_sockopt, retval)),
/* return 1 */
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_JMP_A(1),
/* } else { */
/* return 0 */
BPF_MOV64_IMM(BPF_REG_0, 0),
/* } */
BPF_EXIT_INSN(),
},
.attach_type = BPF_CGROUP_GETSOCKOPT,
.expected_attach_type = BPF_CGROUP_GETSOCKOPT,
.get_level = 123,
.get_optlen = 1,
},
{
.descr = "getsockopt: deny writing to ctx->level",
.insns = {
/* ctx->level = 1 */
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
offsetof(struct bpf_sockopt, level)),
BPF_EXIT_INSN(),
},
.attach_type = BPF_CGROUP_GETSOCKOPT,
.expected_attach_type = BPF_CGROUP_GETSOCKOPT,
.error = DENY_LOAD,
},
{
.descr = "getsockopt: read ctx->optname",
.insns = {
/* r6 = ctx->optname */
BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
offsetof(struct bpf_sockopt, optname)),
/* if (ctx->optname == 123) { */
BPF_JMP_IMM(BPF_JNE, BPF_REG_6, 123, 4),
/* ctx->retval = 0 */
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
offsetof(struct bpf_sockopt, retval)),
/* return 1 */
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_JMP_A(1),
/* } else { */
/* return 0 */
BPF_MOV64_IMM(BPF_REG_0, 0),
/* } */
BPF_EXIT_INSN(),
},
.attach_type = BPF_CGROUP_GETSOCKOPT,
.expected_attach_type = BPF_CGROUP_GETSOCKOPT,
.get_optname = 123,
.get_optlen = 1,
},
{
.descr = "getsockopt: read ctx->retval",
.insns = {
/* r6 = ctx->retval */
BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
offsetof(struct bpf_sockopt, retval)),
/* return 1 */
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
},
.attach_type = BPF_CGROUP_GETSOCKOPT,
.expected_attach_type = BPF_CGROUP_GETSOCKOPT,
.get_level = SOL_IP,
.get_optname = IP_TOS,
.get_optlen = 1,
},
{
.descr = "getsockopt: deny writing to ctx->optname",
.insns = {
/* ctx->optname = 1 */
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
offsetof(struct bpf_sockopt, optname)),
BPF_EXIT_INSN(),
},
.attach_type = BPF_CGROUP_GETSOCKOPT,
.expected_attach_type = BPF_CGROUP_GETSOCKOPT,
.error = DENY_LOAD,
},
{
.descr = "getsockopt: read ctx->optlen",
.insns = {
/* r6 = ctx->optlen */
BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
offsetof(struct bpf_sockopt, optlen)),
/* if (ctx->optlen == 64) { */
BPF_JMP_IMM(BPF_JNE, BPF_REG_6, 64, 4),
/* ctx->retval = 0 */
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
offsetof(struct bpf_sockopt, retval)),
/* return 1 */
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_JMP_A(1),
/* } else { */
/* return 0 */
BPF_MOV64_IMM(BPF_REG_0, 0),
/* } */
BPF_EXIT_INSN(),
},
.attach_type = BPF_CGROUP_GETSOCKOPT,
.expected_attach_type = BPF_CGROUP_GETSOCKOPT,
.get_optlen = 64,
},
{
.descr = "getsockopt: deny bigger ctx->optlen",
.insns = {
/* ctx->optlen = 65 */
BPF_MOV64_IMM(BPF_REG_0, 65),
BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
offsetof(struct bpf_sockopt, optlen)),
/* ctx->retval = 0 */
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
offsetof(struct bpf_sockopt, retval)),
/* return 1 */
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
},
.attach_type = BPF_CGROUP_GETSOCKOPT,
.expected_attach_type = BPF_CGROUP_GETSOCKOPT,
.get_optlen = 64,
.error = EFAULT_GETSOCKOPT,
},
{
.descr = "getsockopt: ignore >PAGE_SIZE optlen",
.insns = {
/* write 0xFF to the first optval byte */
/* r6 = ctx->optval */
BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1,
offsetof(struct bpf_sockopt, optval)),
/* r2 = ctx->optval */
BPF_MOV64_REG(BPF_REG_2, BPF_REG_6),
/* r6 = ctx->optval + 1 */
BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
/* r7 = ctx->optval_end */
BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_1,
offsetof(struct bpf_sockopt, optval_end)),
/* if (ctx->optval + 1 <= ctx->optval_end) { */
BPF_JMP_REG(BPF_JGT, BPF_REG_6, BPF_REG_7, 1),
/* ctx->optval[0] = 0xF0 */
BPF_ST_MEM(BPF_B, BPF_REG_2, 0, 0xFF),
/* } */
/* retval changes are ignored */
/* ctx->retval = 5 */
BPF_MOV64_IMM(BPF_REG_0, 5),
BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
offsetof(struct bpf_sockopt, retval)),
/* return 1 */
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
},
.attach_type = BPF_CGROUP_GETSOCKOPT,
.expected_attach_type = BPF_CGROUP_GETSOCKOPT,
.get_level = 1234,
.get_optname = 5678,
.get_optval = {}, /* the changes are ignored */
.get_optlen = PAGE_SIZE + 1,
.error = EOPNOTSUPP_GETSOCKOPT,
},
{
.descr = "getsockopt: support smaller ctx->optlen",
.insns = {
/* ctx->optlen = 32 */
BPF_MOV64_IMM(BPF_REG_0, 32),
BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
offsetof(struct bpf_sockopt, optlen)),
/* ctx->retval = 0 */
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
offsetof(struct bpf_sockopt, retval)),
/* return 1 */
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
},
.attach_type = BPF_CGROUP_GETSOCKOPT,
.expected_attach_type = BPF_CGROUP_GETSOCKOPT,
.get_optlen = 64,
.get_optlen_ret = 32,
},
{
.descr = "getsockopt: deny writing to ctx->optval",
.insns = {
/* ctx->optval = 1 */
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
offsetof(struct bpf_sockopt, optval)),
BPF_EXIT_INSN(),
},
.attach_type = BPF_CGROUP_GETSOCKOPT,
.expected_attach_type = BPF_CGROUP_GETSOCKOPT,
.error = DENY_LOAD,
},
{
.descr = "getsockopt: deny writing to ctx->optval_end",
.insns = {
/* ctx->optval_end = 1 */
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
offsetof(struct bpf_sockopt, optval_end)),
BPF_EXIT_INSN(),
},
.attach_type = BPF_CGROUP_GETSOCKOPT,
.expected_attach_type = BPF_CGROUP_GETSOCKOPT,
.error = DENY_LOAD,
},
{
.descr = "getsockopt: rewrite value",
.insns = {
/* r6 = ctx->optval */
BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1,
offsetof(struct bpf_sockopt, optval)),
/* r2 = ctx->optval */
BPF_MOV64_REG(BPF_REG_2, BPF_REG_6),
/* r6 = ctx->optval + 1 */
BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
/* r7 = ctx->optval_end */
BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_1,
offsetof(struct bpf_sockopt, optval_end)),
/* if (ctx->optval + 1 <= ctx->optval_end) { */
BPF_JMP_REG(BPF_JGT, BPF_REG_6, BPF_REG_7, 1),
/* ctx->optval[0] = 0xF0 */
BPF_ST_MEM(BPF_B, BPF_REG_2, 0, 0xF0),
/* } */
/* ctx->retval = 0 */
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
offsetof(struct bpf_sockopt, retval)),
/* return 1*/
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
},
.attach_type = BPF_CGROUP_GETSOCKOPT,
.expected_attach_type = BPF_CGROUP_GETSOCKOPT,
.get_level = SOL_IP,
.get_optname = IP_TOS,
.get_optval = { 0xF0 },
.get_optlen = 1,
},
/* ==================== setsockopt ==================== */
{
.descr = "setsockopt: no expected_attach_type",
.insns = {
/* return 1 */
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
},
.attach_type = BPF_CGROUP_SETSOCKOPT,
.expected_attach_type = 0,
.error = DENY_LOAD,
},
{
.descr = "setsockopt: wrong expected_attach_type",
.insns = {
/* return 1 */
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
},
.attach_type = BPF_CGROUP_SETSOCKOPT,
.expected_attach_type = BPF_CGROUP_GETSOCKOPT,
.error = DENY_ATTACH,
},
{
.descr = "setsockopt: bypass bpf hook",
.insns = {
/* return 1 */
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
},
.attach_type = BPF_CGROUP_SETSOCKOPT,
.expected_attach_type = BPF_CGROUP_SETSOCKOPT,
.get_level = SOL_IP,
.set_level = SOL_IP,
.get_optname = IP_TOS,
.set_optname = IP_TOS,
.set_optval = { 1 << 3 },
.set_optlen = 1,
.get_optval = { 1 << 3 },
.get_optlen = 1,
},
{
.descr = "setsockopt: return EPERM from bpf hook",
.insns = {
/* return 0 */
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.attach_type = BPF_CGROUP_SETSOCKOPT,
.expected_attach_type = BPF_CGROUP_SETSOCKOPT,
.set_level = SOL_IP,
.set_optname = IP_TOS,
.set_optlen = 1,
.error = EPERM_SETSOCKOPT,
},
{
.descr = "setsockopt: no optval bounds check, deny loading",
.insns = {
/* r6 = ctx->optval */
BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1,
offsetof(struct bpf_sockopt, optval)),
/* r0 = ctx->optval[0] */
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_6, 0),
/* return 1 */
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
},
.attach_type = BPF_CGROUP_SETSOCKOPT,
.expected_attach_type = BPF_CGROUP_SETSOCKOPT,
.error = DENY_LOAD,
},
{
.descr = "setsockopt: read ctx->level",
.insns = {
/* r6 = ctx->level */
BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
offsetof(struct bpf_sockopt, level)),
/* if (ctx->level == 123) { */
BPF_JMP_IMM(BPF_JNE, BPF_REG_6, 123, 4),
/* ctx->optlen = -1 */
BPF_MOV64_IMM(BPF_REG_0, -1),
BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
offsetof(struct bpf_sockopt, optlen)),
/* return 1 */
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_JMP_A(1),
/* } else { */
/* return 0 */
BPF_MOV64_IMM(BPF_REG_0, 0),
/* } */
BPF_EXIT_INSN(),
},
.attach_type = BPF_CGROUP_SETSOCKOPT,
.expected_attach_type = BPF_CGROUP_SETSOCKOPT,
.set_level = 123,
.set_optlen = 1,
},
{
.descr = "setsockopt: allow changing ctx->level",
.insns = {
/* ctx->level = SOL_IP */
BPF_MOV64_IMM(BPF_REG_0, SOL_IP),
BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
offsetof(struct bpf_sockopt, level)),
/* return 1 */
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
},
.attach_type = BPF_CGROUP_SETSOCKOPT,
.expected_attach_type = BPF_CGROUP_SETSOCKOPT,
.get_level = SOL_IP,
.set_level = 234, /* should be rewritten to SOL_IP */
.get_optname = IP_TOS,
.set_optname = IP_TOS,
.set_optval = { 1 << 3 },
.set_optlen = 1,
.get_optval = { 1 << 3 },
.get_optlen = 1,
},
{
.descr = "setsockopt: read ctx->optname",
.insns = {
/* r6 = ctx->optname */
BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
offsetof(struct bpf_sockopt, optname)),
/* if (ctx->optname == 123) { */
BPF_JMP_IMM(BPF_JNE, BPF_REG_6, 123, 4),
/* ctx->optlen = -1 */
BPF_MOV64_IMM(BPF_REG_0, -1),
BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
offsetof(struct bpf_sockopt, optlen)),
/* return 1 */
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_JMP_A(1),
/* } else { */
/* return 0 */
BPF_MOV64_IMM(BPF_REG_0, 0),
/* } */
BPF_EXIT_INSN(),
},
.attach_type = BPF_CGROUP_SETSOCKOPT,
.expected_attach_type = BPF_CGROUP_SETSOCKOPT,
.set_optname = 123,
.set_optlen = 1,
},
{
.descr = "setsockopt: allow changing ctx->optname",
.insns = {
/* ctx->optname = IP_TOS */
BPF_MOV64_IMM(BPF_REG_0, IP_TOS),
BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
offsetof(struct bpf_sockopt, optname)),
/* return 1 */
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
},
.attach_type = BPF_CGROUP_SETSOCKOPT,
.expected_attach_type = BPF_CGROUP_SETSOCKOPT,
.get_level = SOL_IP,
.set_level = SOL_IP,
.get_optname = IP_TOS,
.set_optname = 456, /* should be rewritten to IP_TOS */
.set_optval = { 1 << 3 },
.set_optlen = 1,
.get_optval = { 1 << 3 },
.get_optlen = 1,
},
{
.descr = "setsockopt: read ctx->optlen",
.insns = {
/* r6 = ctx->optlen */
BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
offsetof(struct bpf_sockopt, optlen)),
/* if (ctx->optlen == 64) { */
BPF_JMP_IMM(BPF_JNE, BPF_REG_6, 64, 4),
/* ctx->optlen = -1 */
BPF_MOV64_IMM(BPF_REG_0, -1),
BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
offsetof(struct bpf_sockopt, optlen)),
/* return 1 */
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_JMP_A(1),
/* } else { */
/* return 0 */
BPF_MOV64_IMM(BPF_REG_0, 0),
/* } */
BPF_EXIT_INSN(),
},
.attach_type = BPF_CGROUP_SETSOCKOPT,
.expected_attach_type = BPF_CGROUP_SETSOCKOPT,
.set_optlen = 64,
},
{
.descr = "setsockopt: ctx->optlen == -1 is ok",
.insns = {
/* ctx->optlen = -1 */
BPF_MOV64_IMM(BPF_REG_0, -1),
BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
offsetof(struct bpf_sockopt, optlen)),
/* return 1 */
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
},
.attach_type = BPF_CGROUP_SETSOCKOPT,
.expected_attach_type = BPF_CGROUP_SETSOCKOPT,
.set_optlen = 64,
},
{
.descr = "setsockopt: deny ctx->optlen < 0 (except -1)",
.insns = {
/* ctx->optlen = -2 */
BPF_MOV64_IMM(BPF_REG_0, -2),
BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
offsetof(struct bpf_sockopt, optlen)),
/* return 1 */
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
},
.attach_type = BPF_CGROUP_SETSOCKOPT,
.expected_attach_type = BPF_CGROUP_SETSOCKOPT,
.set_optlen = 4,
.error = EFAULT_SETSOCKOPT,
},
{
.descr = "setsockopt: deny ctx->optlen > input optlen",
.insns = {
/* ctx->optlen = 65 */
BPF_MOV64_IMM(BPF_REG_0, 65),
BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
offsetof(struct bpf_sockopt, optlen)),
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
},
.attach_type = BPF_CGROUP_SETSOCKOPT,
.expected_attach_type = BPF_CGROUP_SETSOCKOPT,
.set_optlen = 64,
.error = EFAULT_SETSOCKOPT,
},
{
.descr = "setsockopt: ignore >PAGE_SIZE optlen",
.insns = {
/* write 0xFF to the first optval byte */
/* r6 = ctx->optval */
BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1,
offsetof(struct bpf_sockopt, optval)),
/* r2 = ctx->optval */
BPF_MOV64_REG(BPF_REG_2, BPF_REG_6),
/* r6 = ctx->optval + 1 */
BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
/* r7 = ctx->optval_end */
BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_1,
offsetof(struct bpf_sockopt, optval_end)),
/* if (ctx->optval + 1 <= ctx->optval_end) { */
BPF_JMP_REG(BPF_JGT, BPF_REG_6, BPF_REG_7, 1),
/* ctx->optval[0] = 0xF0 */
BPF_ST_MEM(BPF_B, BPF_REG_2, 0, 0xF0),
/* } */
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
},
.attach_type = BPF_CGROUP_SETSOCKOPT,
.expected_attach_type = BPF_CGROUP_SETSOCKOPT,
.set_level = SOL_IP,
.set_optname = IP_TOS,
.set_optval = {},
.set_optlen = PAGE_SIZE + 1,
.get_level = SOL_IP,
.get_optname = IP_TOS,
.get_optval = {}, /* the changes are ignored */
.get_optlen = 4,
},
{
.descr = "setsockopt: allow changing ctx->optlen within bounds",
.insns = {
/* r6 = ctx->optval */
BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1,
offsetof(struct bpf_sockopt, optval)),
/* r2 = ctx->optval */
BPF_MOV64_REG(BPF_REG_2, BPF_REG_6),
/* r6 = ctx->optval + 1 */
BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
/* r7 = ctx->optval_end */
BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_1,
offsetof(struct bpf_sockopt, optval_end)),
/* if (ctx->optval + 1 <= ctx->optval_end) { */
BPF_JMP_REG(BPF_JGT, BPF_REG_6, BPF_REG_7, 1),
/* ctx->optval[0] = 1 << 3 */
BPF_ST_MEM(BPF_B, BPF_REG_2, 0, 1 << 3),
/* } */
/* ctx->optlen = 1 */
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
offsetof(struct bpf_sockopt, optlen)),
/* return 1*/
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
},
.attach_type = BPF_CGROUP_SETSOCKOPT,
.expected_attach_type = BPF_CGROUP_SETSOCKOPT,
.get_level = SOL_IP,
.set_level = SOL_IP,
.get_optname = IP_TOS,
.set_optname = IP_TOS,
.set_optval = { 1, 1, 1, 1 },
.set_optlen = 4,
.get_optval = { 1 << 3 },
.get_optlen = 1,
},
{
.descr = "setsockopt: deny write ctx->retval",
.insns = {
/* ctx->retval = 0 */
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
offsetof(struct bpf_sockopt, retval)),
/* return 1 */
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
},
.attach_type = BPF_CGROUP_SETSOCKOPT,
.expected_attach_type = BPF_CGROUP_SETSOCKOPT,
.error = DENY_LOAD,
},
{
.descr = "setsockopt: deny read ctx->retval",
.insns = {
/* r6 = ctx->retval */
BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
offsetof(struct bpf_sockopt, retval)),
/* return 1 */
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
},
.attach_type = BPF_CGROUP_SETSOCKOPT,
.expected_attach_type = BPF_CGROUP_SETSOCKOPT,
.error = DENY_LOAD,
},
{
.descr = "setsockopt: deny writing to ctx->optval",
.insns = {
/* ctx->optval = 1 */
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
offsetof(struct bpf_sockopt, optval)),
BPF_EXIT_INSN(),
},
.attach_type = BPF_CGROUP_SETSOCKOPT,
.expected_attach_type = BPF_CGROUP_SETSOCKOPT,
.error = DENY_LOAD,
},
{
.descr = "setsockopt: deny writing to ctx->optval_end",
.insns = {
/* ctx->optval_end = 1 */
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
offsetof(struct bpf_sockopt, optval_end)),
BPF_EXIT_INSN(),
},
.attach_type = BPF_CGROUP_SETSOCKOPT,
.expected_attach_type = BPF_CGROUP_SETSOCKOPT,
.error = DENY_LOAD,
},
{
.descr = "setsockopt: allow IP_TOS <= 128",
.insns = {
/* r6 = ctx->optval */
BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1,
offsetof(struct bpf_sockopt, optval)),
/* r7 = ctx->optval + 1 */
BPF_MOV64_REG(BPF_REG_7, BPF_REG_6),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, 1),
/* r8 = ctx->optval_end */
BPF_LDX_MEM(BPF_DW, BPF_REG_8, BPF_REG_1,
offsetof(struct bpf_sockopt, optval_end)),
/* if (ctx->optval + 1 <= ctx->optval_end) { */
BPF_JMP_REG(BPF_JGT, BPF_REG_7, BPF_REG_8, 4),
/* r9 = ctx->optval[0] */
BPF_LDX_MEM(BPF_B, BPF_REG_9, BPF_REG_6, 0),
/* if (ctx->optval[0] < 128) */
BPF_JMP_IMM(BPF_JGT, BPF_REG_9, 128, 2),
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_JMP_A(1),
/* } */
/* } else { */
BPF_MOV64_IMM(BPF_REG_0, 0),
/* } */
BPF_EXIT_INSN(),
},
.attach_type = BPF_CGROUP_SETSOCKOPT,
.expected_attach_type = BPF_CGROUP_SETSOCKOPT,
.get_level = SOL_IP,
.set_level = SOL_IP,
.get_optname = IP_TOS,
.set_optname = IP_TOS,
.set_optval = { 0x80 },
.set_optlen = 1,
.get_optval = { 0x80 },
.get_optlen = 1,
},
{
.descr = "setsockopt: deny IP_TOS > 128",
.insns = {
/* r6 = ctx->optval */
BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1,
offsetof(struct bpf_sockopt, optval)),
/* r7 = ctx->optval + 1 */
BPF_MOV64_REG(BPF_REG_7, BPF_REG_6),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, 1),
/* r8 = ctx->optval_end */
BPF_LDX_MEM(BPF_DW, BPF_REG_8, BPF_REG_1,
offsetof(struct bpf_sockopt, optval_end)),
/* if (ctx->optval + 1 <= ctx->optval_end) { */
BPF_JMP_REG(BPF_JGT, BPF_REG_7, BPF_REG_8, 4),
/* r9 = ctx->optval[0] */
BPF_LDX_MEM(BPF_B, BPF_REG_9, BPF_REG_6, 0),
/* if (ctx->optval[0] < 128) */
BPF_JMP_IMM(BPF_JGT, BPF_REG_9, 128, 2),
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_JMP_A(1),
/* } */
/* } else { */
BPF_MOV64_IMM(BPF_REG_0, 0),
/* } */
BPF_EXIT_INSN(),
},
.attach_type = BPF_CGROUP_SETSOCKOPT,
.expected_attach_type = BPF_CGROUP_SETSOCKOPT,
.get_level = SOL_IP,
.set_level = SOL_IP,
.get_optname = IP_TOS,
.set_optname = IP_TOS,
.set_optval = { 0x81 },
.set_optlen = 1,
.get_optval = { 0x00 },
.get_optlen = 1,
.error = EPERM_SETSOCKOPT,
},
};
static int load_prog(const struct bpf_insn *insns,
enum bpf_attach_type expected_attach_type)
{
LIBBPF_OPTS(bpf_prog_load_opts, opts,
.expected_attach_type = expected_attach_type,
.log_level = 2,
.log_buf = bpf_log_buf,
.log_size = sizeof(bpf_log_buf),
);
int fd, insns_cnt = 0;
for (;
insns[insns_cnt].code != (BPF_JMP | BPF_EXIT);
insns_cnt++) {
}
insns_cnt++;
fd = bpf_prog_load(BPF_PROG_TYPE_CGROUP_SOCKOPT, NULL, "GPL", insns, insns_cnt, &opts);
if (verbose && fd < 0)
fprintf(stderr, "%s\n", bpf_log_buf);
return fd;
}
static int run_test(int cgroup_fd, struct sockopt_test *test)
{
int sock_fd, err, prog_fd;
void *optval = NULL;
int ret = 0;
prog_fd = load_prog(test->insns, test->expected_attach_type);
if (prog_fd < 0) {
if (test->error == DENY_LOAD)
return 0;
log_err("Failed to load BPF program");
return -1;
}
err = bpf_prog_attach(prog_fd, cgroup_fd, test->attach_type, 0);
if (err < 0) {
if (test->error == DENY_ATTACH)
goto close_prog_fd;
log_err("Failed to attach BPF program");
ret = -1;
goto close_prog_fd;
}
sock_fd = socket(AF_INET, SOCK_STREAM, 0);
if (sock_fd < 0) {
log_err("Failed to create AF_INET socket");
ret = -1;
goto detach_prog;
}
if (test->set_optlen) {
if (test->set_optlen >= PAGE_SIZE) {
int num_pages = test->set_optlen / PAGE_SIZE;
int remainder = test->set_optlen % PAGE_SIZE;
test->set_optlen = num_pages * sysconf(_SC_PAGESIZE) + remainder;
}
err = setsockopt(sock_fd, test->set_level, test->set_optname,
test->set_optval, test->set_optlen);
if (err) {
if (errno == EPERM && test->error == EPERM_SETSOCKOPT)
goto close_sock_fd;
if (errno == EFAULT && test->error == EFAULT_SETSOCKOPT)
goto free_optval;
log_err("Failed to call setsockopt");
ret = -1;
goto close_sock_fd;
}
}
if (test->get_optlen) {
if (test->get_optlen >= PAGE_SIZE) {
int num_pages = test->get_optlen / PAGE_SIZE;
int remainder = test->get_optlen % PAGE_SIZE;
test->get_optlen = num_pages * sysconf(_SC_PAGESIZE) + remainder;
}
optval = malloc(test->get_optlen);
memset(optval, 0, test->get_optlen);
socklen_t optlen = test->get_optlen;
socklen_t expected_get_optlen = test->get_optlen_ret ?:
test->get_optlen;
err = getsockopt(sock_fd, test->get_level, test->get_optname,
optval, &optlen);
if (err) {
if (errno == EOPNOTSUPP && test->error == EOPNOTSUPP_GETSOCKOPT)
goto free_optval;
if (errno == EPERM && test->error == EPERM_GETSOCKOPT)
goto free_optval;
if (errno == EFAULT && test->error == EFAULT_GETSOCKOPT)
goto free_optval;
log_err("Failed to call getsockopt");
ret = -1;
goto free_optval;
}
if (optlen != expected_get_optlen) {
errno = 0;
log_err("getsockopt returned unexpected optlen");
ret = -1;
goto free_optval;
}
if (memcmp(optval, test->get_optval, optlen) != 0) {
errno = 0;
log_err("getsockopt returned unexpected optval");
ret = -1;
goto free_optval;
}
}
ret = test->error != OK;
free_optval:
free(optval);
close_sock_fd:
close(sock_fd);
detach_prog:
bpf_prog_detach2(prog_fd, cgroup_fd, test->attach_type);
close_prog_fd:
close(prog_fd);
return ret;
}
void test_sockopt(void)
{
int cgroup_fd, i;
cgroup_fd = test__join_cgroup("/sockopt");
if (!ASSERT_GE(cgroup_fd, 0, "join_cgroup"))
return;
for (i = 0; i < ARRAY_SIZE(tests); i++) {
if (!test__start_subtest(tests[i].descr))
continue;
ASSERT_OK(run_test(cgroup_fd, &tests[i]), tests[i].descr);
}
close(cgroup_fd);
}
| linux-master | tools/testing/selftests/bpf/prog_tests/sockopt.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020 Facebook */
#include <test_progs.h>
#include "test_global_func1.skel.h"
#include "test_global_func2.skel.h"
#include "test_global_func3.skel.h"
#include "test_global_func4.skel.h"
#include "test_global_func5.skel.h"
#include "test_global_func6.skel.h"
#include "test_global_func7.skel.h"
#include "test_global_func8.skel.h"
#include "test_global_func9.skel.h"
#include "test_global_func10.skel.h"
#include "test_global_func11.skel.h"
#include "test_global_func12.skel.h"
#include "test_global_func13.skel.h"
#include "test_global_func14.skel.h"
#include "test_global_func15.skel.h"
#include "test_global_func16.skel.h"
#include "test_global_func17.skel.h"
#include "test_global_func_ctx_args.skel.h"
void test_test_global_funcs(void)
{
RUN_TESTS(test_global_func1);
RUN_TESTS(test_global_func2);
RUN_TESTS(test_global_func3);
RUN_TESTS(test_global_func4);
RUN_TESTS(test_global_func5);
RUN_TESTS(test_global_func6);
RUN_TESTS(test_global_func7);
RUN_TESTS(test_global_func8);
RUN_TESTS(test_global_func9);
RUN_TESTS(test_global_func10);
RUN_TESTS(test_global_func11);
RUN_TESTS(test_global_func12);
RUN_TESTS(test_global_func13);
RUN_TESTS(test_global_func14);
RUN_TESTS(test_global_func15);
RUN_TESTS(test_global_func16);
RUN_TESTS(test_global_func17);
RUN_TESTS(test_global_func_ctx_args);
}
| linux-master | tools/testing/selftests/bpf/prog_tests/test_global_funcs.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
#include <test_progs.h>
#define _SDT_HAS_SEMAPHORES 1
#include "../sdt.h"
#include "test_usdt.skel.h"
#include "test_urandom_usdt.skel.h"
int lets_test_this(int);
static volatile int idx = 2;
static volatile __u64 bla = 0xFEDCBA9876543210ULL;
static volatile short nums[] = {-1, -2, -3, -4};
static volatile struct {
int x;
signed char y;
} t1 = { 1, -127 };
#define SEC(name) __attribute__((section(name), used))
unsigned short test_usdt0_semaphore SEC(".probes");
unsigned short test_usdt3_semaphore SEC(".probes");
unsigned short test_usdt12_semaphore SEC(".probes");
static void __always_inline trigger_func(int x) {
long y = 42;
if (test_usdt0_semaphore)
STAP_PROBE(test, usdt0);
if (test_usdt3_semaphore)
STAP_PROBE3(test, usdt3, x, y, &bla);
if (test_usdt12_semaphore) {
STAP_PROBE12(test, usdt12,
x, x + 1, y, x + y, 5,
y / 7, bla, &bla, -9, nums[x],
nums[idx], t1.y);
}
}
static void subtest_basic_usdt(void)
{
LIBBPF_OPTS(bpf_usdt_opts, opts);
struct test_usdt *skel;
struct test_usdt__bss *bss;
int err;
skel = test_usdt__open_and_load();
if (!ASSERT_OK_PTR(skel, "skel_open"))
return;
bss = skel->bss;
bss->my_pid = getpid();
err = test_usdt__attach(skel);
if (!ASSERT_OK(err, "skel_attach"))
goto cleanup;
/* usdt0 won't be auto-attached */
opts.usdt_cookie = 0xcafedeadbeeffeed;
skel->links.usdt0 = bpf_program__attach_usdt(skel->progs.usdt0,
0 /*self*/, "/proc/self/exe",
"test", "usdt0", &opts);
if (!ASSERT_OK_PTR(skel->links.usdt0, "usdt0_link"))
goto cleanup;
trigger_func(1);
ASSERT_EQ(bss->usdt0_called, 1, "usdt0_called");
ASSERT_EQ(bss->usdt3_called, 1, "usdt3_called");
ASSERT_EQ(bss->usdt12_called, 1, "usdt12_called");
ASSERT_EQ(bss->usdt0_cookie, 0xcafedeadbeeffeed, "usdt0_cookie");
ASSERT_EQ(bss->usdt0_arg_cnt, 0, "usdt0_arg_cnt");
ASSERT_EQ(bss->usdt0_arg_ret, -ENOENT, "usdt0_arg_ret");
/* auto-attached usdt3 gets default zero cookie value */
ASSERT_EQ(bss->usdt3_cookie, 0, "usdt3_cookie");
ASSERT_EQ(bss->usdt3_arg_cnt, 3, "usdt3_arg_cnt");
ASSERT_EQ(bss->usdt3_arg_rets[0], 0, "usdt3_arg1_ret");
ASSERT_EQ(bss->usdt3_arg_rets[1], 0, "usdt3_arg2_ret");
ASSERT_EQ(bss->usdt3_arg_rets[2], 0, "usdt3_arg3_ret");
ASSERT_EQ(bss->usdt3_args[0], 1, "usdt3_arg1");
ASSERT_EQ(bss->usdt3_args[1], 42, "usdt3_arg2");
ASSERT_EQ(bss->usdt3_args[2], (uintptr_t)&bla, "usdt3_arg3");
/* auto-attached usdt12 gets default zero cookie value */
ASSERT_EQ(bss->usdt12_cookie, 0, "usdt12_cookie");
ASSERT_EQ(bss->usdt12_arg_cnt, 12, "usdt12_arg_cnt");
ASSERT_EQ(bss->usdt12_args[0], 1, "usdt12_arg1");
ASSERT_EQ(bss->usdt12_args[1], 1 + 1, "usdt12_arg2");
ASSERT_EQ(bss->usdt12_args[2], 42, "usdt12_arg3");
ASSERT_EQ(bss->usdt12_args[3], 42 + 1, "usdt12_arg4");
ASSERT_EQ(bss->usdt12_args[4], 5, "usdt12_arg5");
ASSERT_EQ(bss->usdt12_args[5], 42 / 7, "usdt12_arg6");
ASSERT_EQ(bss->usdt12_args[6], bla, "usdt12_arg7");
ASSERT_EQ(bss->usdt12_args[7], (uintptr_t)&bla, "usdt12_arg8");
ASSERT_EQ(bss->usdt12_args[8], -9, "usdt12_arg9");
ASSERT_EQ(bss->usdt12_args[9], nums[1], "usdt12_arg10");
ASSERT_EQ(bss->usdt12_args[10], nums[idx], "usdt12_arg11");
ASSERT_EQ(bss->usdt12_args[11], t1.y, "usdt12_arg12");
/* trigger_func() is marked __always_inline, so USDT invocations will be
* inlined in two different places, meaning that each USDT will have
* at least 2 different places to be attached to. This verifies that
* bpf_program__attach_usdt() handles this properly and attaches to
* all possible places of USDT invocation.
*/
trigger_func(2);
ASSERT_EQ(bss->usdt0_called, 2, "usdt0_called");
ASSERT_EQ(bss->usdt3_called, 2, "usdt3_called");
ASSERT_EQ(bss->usdt12_called, 2, "usdt12_called");
/* only check values that depend on trigger_func()'s input value */
ASSERT_EQ(bss->usdt3_args[0], 2, "usdt3_arg1");
ASSERT_EQ(bss->usdt12_args[0], 2, "usdt12_arg1");
ASSERT_EQ(bss->usdt12_args[1], 2 + 1, "usdt12_arg2");
ASSERT_EQ(bss->usdt12_args[3], 42 + 2, "usdt12_arg4");
ASSERT_EQ(bss->usdt12_args[9], nums[2], "usdt12_arg10");
/* detach and re-attach usdt3 */
bpf_link__destroy(skel->links.usdt3);
opts.usdt_cookie = 0xBADC00C51E;
skel->links.usdt3 = bpf_program__attach_usdt(skel->progs.usdt3, -1 /* any pid */,
"/proc/self/exe", "test", "usdt3", &opts);
if (!ASSERT_OK_PTR(skel->links.usdt3, "usdt3_reattach"))
goto cleanup;
trigger_func(3);
ASSERT_EQ(bss->usdt3_called, 3, "usdt3_called");
/* this time usdt3 has custom cookie */
ASSERT_EQ(bss->usdt3_cookie, 0xBADC00C51E, "usdt3_cookie");
ASSERT_EQ(bss->usdt3_arg_cnt, 3, "usdt3_arg_cnt");
ASSERT_EQ(bss->usdt3_arg_rets[0], 0, "usdt3_arg1_ret");
ASSERT_EQ(bss->usdt3_arg_rets[1], 0, "usdt3_arg2_ret");
ASSERT_EQ(bss->usdt3_arg_rets[2], 0, "usdt3_arg3_ret");
ASSERT_EQ(bss->usdt3_args[0], 3, "usdt3_arg1");
ASSERT_EQ(bss->usdt3_args[1], 42, "usdt3_arg2");
ASSERT_EQ(bss->usdt3_args[2], (uintptr_t)&bla, "usdt3_arg3");
cleanup:
test_usdt__destroy(skel);
}
unsigned short test_usdt_100_semaphore SEC(".probes");
unsigned short test_usdt_300_semaphore SEC(".probes");
unsigned short test_usdt_400_semaphore SEC(".probes");
#define R10(F, X) F(X+0); F(X+1);F(X+2); F(X+3); F(X+4); \
F(X+5); F(X+6); F(X+7); F(X+8); F(X+9);
#define R100(F, X) R10(F,X+ 0);R10(F,X+10);R10(F,X+20);R10(F,X+30);R10(F,X+40); \
R10(F,X+50);R10(F,X+60);R10(F,X+70);R10(F,X+80);R10(F,X+90);
/* carefully control that we get exactly 100 inlines by preventing inlining */
static void __always_inline f100(int x)
{
STAP_PROBE1(test, usdt_100, x);
}
__weak void trigger_100_usdts(void)
{
R100(f100, 0);
}
/* we shouldn't be able to attach to test:usdt2_300 USDT as we don't have as
* many slots for specs. It's important that each STAP_PROBE2() invocation
* (after untolling) gets different arg spec due to compiler inlining i as
* a constant
*/
static void __always_inline f300(int x)
{
STAP_PROBE1(test, usdt_300, x);
}
__weak void trigger_300_usdts(void)
{
R100(f300, 0);
R100(f300, 100);
R100(f300, 200);
}
static void __always_inline f400(int x __attribute__((unused)))
{
STAP_PROBE1(test, usdt_400, 400);
}
/* this time we have 400 different USDT call sites, but they have uniform
* argument location, so libbpf's spec string deduplication logic should keep
* spec count use very small and so we should be able to attach to all 400
* call sites
*/
__weak void trigger_400_usdts(void)
{
R100(f400, 0);
R100(f400, 100);
R100(f400, 200);
R100(f400, 300);
}
static void subtest_multispec_usdt(void)
{
LIBBPF_OPTS(bpf_usdt_opts, opts);
struct test_usdt *skel;
struct test_usdt__bss *bss;
int err, i;
skel = test_usdt__open_and_load();
if (!ASSERT_OK_PTR(skel, "skel_open"))
return;
bss = skel->bss;
bss->my_pid = getpid();
err = test_usdt__attach(skel);
if (!ASSERT_OK(err, "skel_attach"))
goto cleanup;
/* usdt_100 is auto-attached and there are 100 inlined call sites,
* let's validate that all of them are properly attached to and
* handled from BPF side
*/
trigger_100_usdts();
ASSERT_EQ(bss->usdt_100_called, 100, "usdt_100_called");
ASSERT_EQ(bss->usdt_100_sum, 99 * 100 / 2, "usdt_100_sum");
/* Stress test free spec ID tracking. By default libbpf allows up to
* 256 specs to be used, so if we don't return free spec IDs back
* after few detachments and re-attachments we should run out of
* available spec IDs.
*/
for (i = 0; i < 2; i++) {
bpf_link__destroy(skel->links.usdt_100);
skel->links.usdt_100 = bpf_program__attach_usdt(skel->progs.usdt_100, -1,
"/proc/self/exe",
"test", "usdt_100", NULL);
if (!ASSERT_OK_PTR(skel->links.usdt_100, "usdt_100_reattach"))
goto cleanup;
bss->usdt_100_sum = 0;
trigger_100_usdts();
ASSERT_EQ(bss->usdt_100_called, (i + 1) * 100 + 100, "usdt_100_called");
ASSERT_EQ(bss->usdt_100_sum, 99 * 100 / 2, "usdt_100_sum");
}
/* Now let's step it up and try to attach USDT that requires more than
* 256 attach points with different specs for each.
* Note that we need trigger_300_usdts() only to actually have 300
* USDT call sites, we are not going to actually trace them.
*/
trigger_300_usdts();
/* we'll reuse usdt_100 BPF program for usdt_300 test */
bpf_link__destroy(skel->links.usdt_100);
skel->links.usdt_100 = bpf_program__attach_usdt(skel->progs.usdt_100, -1, "/proc/self/exe",
"test", "usdt_300", NULL);
err = -errno;
if (!ASSERT_ERR_PTR(skel->links.usdt_100, "usdt_300_bad_attach"))
goto cleanup;
ASSERT_EQ(err, -E2BIG, "usdt_300_attach_err");
/* let's check that there are no "dangling" BPF programs attached due
* to partial success of the above test:usdt_300 attachment
*/
bss->usdt_100_called = 0;
bss->usdt_100_sum = 0;
f300(777); /* this is 301st instance of usdt_300 */
ASSERT_EQ(bss->usdt_100_called, 0, "usdt_301_called");
ASSERT_EQ(bss->usdt_100_sum, 0, "usdt_301_sum");
/* This time we have USDT with 400 inlined invocations, but arg specs
* should be the same across all sites, so libbpf will only need to
* use one spec and thus we'll be able to attach 400 uprobes
* successfully.
*
* Again, we are reusing usdt_100 BPF program.
*/
skel->links.usdt_100 = bpf_program__attach_usdt(skel->progs.usdt_100, -1,
"/proc/self/exe",
"test", "usdt_400", NULL);
if (!ASSERT_OK_PTR(skel->links.usdt_100, "usdt_400_attach"))
goto cleanup;
trigger_400_usdts();
ASSERT_EQ(bss->usdt_100_called, 400, "usdt_400_called");
ASSERT_EQ(bss->usdt_100_sum, 400 * 400, "usdt_400_sum");
cleanup:
test_usdt__destroy(skel);
}
static FILE *urand_spawn(int *pid)
{
FILE *f;
/* urandom_read's stdout is wired into f */
f = popen("./urandom_read 1 report-pid", "r");
if (!f)
return NULL;
if (fscanf(f, "%d", pid) != 1) {
pclose(f);
errno = EINVAL;
return NULL;
}
return f;
}
static int urand_trigger(FILE **urand_pipe)
{
int exit_code;
/* pclose() waits for child process to exit and returns their exit code */
exit_code = pclose(*urand_pipe);
*urand_pipe = NULL;
return exit_code;
}
static void subtest_urandom_usdt(bool auto_attach)
{
struct test_urandom_usdt *skel;
struct test_urandom_usdt__bss *bss;
struct bpf_link *l;
FILE *urand_pipe = NULL;
int err, urand_pid = 0;
skel = test_urandom_usdt__open_and_load();
if (!ASSERT_OK_PTR(skel, "skel_open"))
return;
urand_pipe = urand_spawn(&urand_pid);
if (!ASSERT_OK_PTR(urand_pipe, "urand_spawn"))
goto cleanup;
bss = skel->bss;
bss->urand_pid = urand_pid;
if (auto_attach) {
err = test_urandom_usdt__attach(skel);
if (!ASSERT_OK(err, "skel_auto_attach"))
goto cleanup;
} else {
l = bpf_program__attach_usdt(skel->progs.urand_read_without_sema,
urand_pid, "./urandom_read",
"urand", "read_without_sema", NULL);
if (!ASSERT_OK_PTR(l, "urand_without_sema_attach"))
goto cleanup;
skel->links.urand_read_without_sema = l;
l = bpf_program__attach_usdt(skel->progs.urand_read_with_sema,
urand_pid, "./urandom_read",
"urand", "read_with_sema", NULL);
if (!ASSERT_OK_PTR(l, "urand_with_sema_attach"))
goto cleanup;
skel->links.urand_read_with_sema = l;
l = bpf_program__attach_usdt(skel->progs.urandlib_read_without_sema,
urand_pid, "./liburandom_read.so",
"urandlib", "read_without_sema", NULL);
if (!ASSERT_OK_PTR(l, "urandlib_without_sema_attach"))
goto cleanup;
skel->links.urandlib_read_without_sema = l;
l = bpf_program__attach_usdt(skel->progs.urandlib_read_with_sema,
urand_pid, "./liburandom_read.so",
"urandlib", "read_with_sema", NULL);
if (!ASSERT_OK_PTR(l, "urandlib_with_sema_attach"))
goto cleanup;
skel->links.urandlib_read_with_sema = l;
}
/* trigger urandom_read USDTs */
ASSERT_OK(urand_trigger(&urand_pipe), "urand_exit_code");
ASSERT_EQ(bss->urand_read_without_sema_call_cnt, 1, "urand_wo_sema_cnt");
ASSERT_EQ(bss->urand_read_without_sema_buf_sz_sum, 256, "urand_wo_sema_sum");
ASSERT_EQ(bss->urand_read_with_sema_call_cnt, 1, "urand_w_sema_cnt");
ASSERT_EQ(bss->urand_read_with_sema_buf_sz_sum, 256, "urand_w_sema_sum");
ASSERT_EQ(bss->urandlib_read_without_sema_call_cnt, 1, "urandlib_wo_sema_cnt");
ASSERT_EQ(bss->urandlib_read_without_sema_buf_sz_sum, 256, "urandlib_wo_sema_sum");
ASSERT_EQ(bss->urandlib_read_with_sema_call_cnt, 1, "urandlib_w_sema_cnt");
ASSERT_EQ(bss->urandlib_read_with_sema_buf_sz_sum, 256, "urandlib_w_sema_sum");
cleanup:
if (urand_pipe)
pclose(urand_pipe);
test_urandom_usdt__destroy(skel);
}
void test_usdt(void)
{
if (test__start_subtest("basic"))
subtest_basic_usdt();
if (test__start_subtest("multispec"))
subtest_multispec_usdt();
if (test__start_subtest("urand_auto_attach"))
subtest_urandom_usdt(true /* auto_attach */);
if (test__start_subtest("urand_pid_attach"))
subtest_urandom_usdt(false /* auto_attach */);
}
| linux-master | tools/testing/selftests/bpf/prog_tests/usdt.c |
// SPDX-License-Identifier: GPL-2.0
#include <sys/types.h>
#include <sys/stat.h>
#include <unistd.h>
#include <test_progs.h>
__u32 get_map_id(struct bpf_object *obj, const char *name)
{
struct bpf_map_info map_info = {};
__u32 map_info_len, duration = 0;
struct bpf_map *map;
int err;
map_info_len = sizeof(map_info);
map = bpf_object__find_map_by_name(obj, name);
if (CHECK(!map, "find map", "NULL map"))
return 0;
err = bpf_map_get_info_by_fd(bpf_map__fd(map),
&map_info, &map_info_len);
CHECK(err, "get map info", "err %d errno %d", err, errno);
return map_info.id;
}
void test_pinning(void)
{
const char *file_invalid = "./test_pinning_invalid.bpf.o";
const char *custpinpath = "/sys/fs/bpf/custom/pinmap";
const char *nopinpath = "/sys/fs/bpf/nopinmap";
const char *nopinpath2 = "/sys/fs/bpf/nopinmap2";
const char *custpath = "/sys/fs/bpf/custom";
const char *pinpath = "/sys/fs/bpf/pinmap";
const char *file = "./test_pinning.bpf.o";
__u32 map_id, map_id2, duration = 0;
struct stat statbuf = {};
struct bpf_object *obj;
struct bpf_map *map;
int err, map_fd;
DECLARE_LIBBPF_OPTS(bpf_object_open_opts, opts,
.pin_root_path = custpath,
);
/* check that opening fails with invalid pinning value in map def */
obj = bpf_object__open_file(file_invalid, NULL);
err = libbpf_get_error(obj);
if (CHECK(err != -EINVAL, "invalid open", "err %d errno %d\n", err, errno)) {
obj = NULL;
goto out;
}
/* open the valid object file */
obj = bpf_object__open_file(file, NULL);
err = libbpf_get_error(obj);
if (CHECK(err, "default open", "err %d errno %d\n", err, errno)) {
obj = NULL;
goto out;
}
err = bpf_object__load(obj);
if (CHECK(err, "default load", "err %d errno %d\n", err, errno))
goto out;
/* check that pinmap was pinned */
err = stat(pinpath, &statbuf);
if (CHECK(err, "stat pinpath", "err %d errno %d\n", err, errno))
goto out;
/* check that nopinmap was *not* pinned */
err = stat(nopinpath, &statbuf);
if (CHECK(!err || errno != ENOENT, "stat nopinpath",
"err %d errno %d\n", err, errno))
goto out;
/* check that nopinmap2 was *not* pinned */
err = stat(nopinpath2, &statbuf);
if (CHECK(!err || errno != ENOENT, "stat nopinpath2",
"err %d errno %d\n", err, errno))
goto out;
map_id = get_map_id(obj, "pinmap");
if (!map_id)
goto out;
bpf_object__close(obj);
obj = bpf_object__open_file(file, NULL);
if (CHECK_FAIL(libbpf_get_error(obj))) {
obj = NULL;
goto out;
}
err = bpf_object__load(obj);
if (CHECK(err, "default load", "err %d errno %d\n", err, errno))
goto out;
/* check that same map ID was reused for second load */
map_id2 = get_map_id(obj, "pinmap");
if (CHECK(map_id != map_id2, "check reuse",
"err %d errno %d id %d id2 %d\n", err, errno, map_id, map_id2))
goto out;
/* should be no-op to re-pin same map */
map = bpf_object__find_map_by_name(obj, "pinmap");
if (CHECK(!map, "find map", "NULL map"))
goto out;
err = bpf_map__pin(map, NULL);
if (CHECK(err, "re-pin map", "err %d errno %d\n", err, errno))
goto out;
/* but error to pin at different location */
err = bpf_map__pin(map, "/sys/fs/bpf/other");
if (CHECK(!err, "pin map different", "err %d errno %d\n", err, errno))
goto out;
/* unpin maps with a pin_path set */
err = bpf_object__unpin_maps(obj, NULL);
if (CHECK(err, "unpin maps", "err %d errno %d\n", err, errno))
goto out;
/* and re-pin them... */
err = bpf_object__pin_maps(obj, NULL);
if (CHECK(err, "pin maps", "err %d errno %d\n", err, errno))
goto out;
/* get pinning path */
if (!ASSERT_STREQ(bpf_map__pin_path(map), pinpath, "get pin path"))
goto out;
/* set pinning path of other map and re-pin all */
map = bpf_object__find_map_by_name(obj, "nopinmap");
if (CHECK(!map, "find map", "NULL map"))
goto out;
err = bpf_map__set_pin_path(map, custpinpath);
if (CHECK(err, "set pin path", "err %d errno %d\n", err, errno))
goto out;
/* get pinning path after set */
if (!ASSERT_STREQ(bpf_map__pin_path(map), custpinpath,
"get pin path after set"))
goto out;
/* should only pin the one unpinned map */
err = bpf_object__pin_maps(obj, NULL);
if (CHECK(err, "pin maps", "err %d errno %d\n", err, errno))
goto out;
/* check that nopinmap was pinned at the custom path */
err = stat(custpinpath, &statbuf);
if (CHECK(err, "stat custpinpath", "err %d errno %d\n", err, errno))
goto out;
/* remove the custom pin path to re-test it with auto-pinning below */
err = unlink(custpinpath);
if (CHECK(err, "unlink custpinpath", "err %d errno %d\n", err, errno))
goto out;
err = rmdir(custpath);
if (CHECK(err, "rmdir custpindir", "err %d errno %d\n", err, errno))
goto out;
bpf_object__close(obj);
/* open the valid object file again */
obj = bpf_object__open_file(file, NULL);
err = libbpf_get_error(obj);
if (CHECK(err, "default open", "err %d errno %d\n", err, errno)) {
obj = NULL;
goto out;
}
/* set pin paths so that nopinmap2 will attempt to reuse the map at
* pinpath (which will fail), but not before pinmap has already been
* reused
*/
bpf_object__for_each_map(map, obj) {
if (!strcmp(bpf_map__name(map), "nopinmap"))
err = bpf_map__set_pin_path(map, nopinpath2);
else if (!strcmp(bpf_map__name(map), "nopinmap2"))
err = bpf_map__set_pin_path(map, pinpath);
else
continue;
if (CHECK(err, "set pin path", "err %d errno %d\n", err, errno))
goto out;
}
/* should fail because of map parameter mismatch */
err = bpf_object__load(obj);
if (CHECK(err != -EINVAL, "param mismatch load", "err %d errno %d\n", err, errno))
goto out;
/* nopinmap2 should have been pinned and cleaned up again */
err = stat(nopinpath2, &statbuf);
if (CHECK(!err || errno != ENOENT, "stat nopinpath2",
"err %d errno %d\n", err, errno))
goto out;
/* pinmap should still be there */
err = stat(pinpath, &statbuf);
if (CHECK(err, "stat pinpath", "err %d errno %d\n", err, errno))
goto out;
bpf_object__close(obj);
/* test auto-pinning at custom path with open opt */
obj = bpf_object__open_file(file, &opts);
if (CHECK_FAIL(libbpf_get_error(obj))) {
obj = NULL;
goto out;
}
err = bpf_object__load(obj);
if (CHECK(err, "custom load", "err %d errno %d\n", err, errno))
goto out;
/* check that pinmap was pinned at the custom path */
err = stat(custpinpath, &statbuf);
if (CHECK(err, "stat custpinpath", "err %d errno %d\n", err, errno))
goto out;
/* remove the custom pin path to re-test it with reuse fd below */
err = unlink(custpinpath);
if (CHECK(err, "unlink custpinpath", "err %d errno %d\n", err, errno))
goto out;
err = rmdir(custpath);
if (CHECK(err, "rmdir custpindir", "err %d errno %d\n", err, errno))
goto out;
bpf_object__close(obj);
/* test pinning at custom path with reuse fd */
obj = bpf_object__open_file(file, NULL);
err = libbpf_get_error(obj);
if (CHECK(err, "default open", "err %d errno %d\n", err, errno)) {
obj = NULL;
goto out;
}
map_fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, NULL, sizeof(__u32),
sizeof(__u64), 1, NULL);
if (CHECK(map_fd < 0, "create pinmap manually", "fd %d\n", map_fd))
goto out;
map = bpf_object__find_map_by_name(obj, "pinmap");
if (CHECK(!map, "find map", "NULL map"))
goto close_map_fd;
err = bpf_map__reuse_fd(map, map_fd);
if (CHECK(err, "reuse pinmap fd", "err %d errno %d\n", err, errno))
goto close_map_fd;
err = bpf_map__set_pin_path(map, custpinpath);
if (CHECK(err, "set pin path", "err %d errno %d\n", err, errno))
goto close_map_fd;
err = bpf_object__load(obj);
if (CHECK(err, "custom load", "err %d errno %d\n", err, errno))
goto close_map_fd;
/* check that pinmap was pinned at the custom path */
err = stat(custpinpath, &statbuf);
if (CHECK(err, "stat custpinpath", "err %d errno %d\n", err, errno))
goto close_map_fd;
close_map_fd:
close(map_fd);
out:
unlink(pinpath);
unlink(nopinpath);
unlink(nopinpath2);
unlink(custpinpath);
rmdir(custpath);
if (obj)
bpf_object__close(obj);
}
| linux-master | tools/testing/selftests/bpf/prog_tests/pinning.c |
// SPDX-License-Identifier: GPL-2.0
#include <bpf/btf.h>
#include <test_btf.h>
#include <linux/btf.h>
#include <test_progs.h>
#include <network_helpers.h>
#include "linked_list.skel.h"
#include "linked_list_fail.skel.h"
static char log_buf[1024 * 1024];
static struct {
const char *prog_name;
const char *err_msg;
} linked_list_fail_tests[] = {
#define TEST(test, off) \
{ #test "_missing_lock_push_front", \
"bpf_spin_lock at off=" #off " must be held for bpf_list_head" }, \
{ #test "_missing_lock_push_back", \
"bpf_spin_lock at off=" #off " must be held for bpf_list_head" }, \
{ #test "_missing_lock_pop_front", \
"bpf_spin_lock at off=" #off " must be held for bpf_list_head" }, \
{ #test "_missing_lock_pop_back", \
"bpf_spin_lock at off=" #off " must be held for bpf_list_head" },
TEST(kptr, 40)
TEST(global, 16)
TEST(map, 0)
TEST(inner_map, 0)
#undef TEST
#define TEST(test, op) \
{ #test "_kptr_incorrect_lock_" #op, \
"held lock and object are not in the same allocation\n" \
"bpf_spin_lock at off=40 must be held for bpf_list_head" }, \
{ #test "_global_incorrect_lock_" #op, \
"held lock and object are not in the same allocation\n" \
"bpf_spin_lock at off=16 must be held for bpf_list_head" }, \
{ #test "_map_incorrect_lock_" #op, \
"held lock and object are not in the same allocation\n" \
"bpf_spin_lock at off=0 must be held for bpf_list_head" }, \
{ #test "_inner_map_incorrect_lock_" #op, \
"held lock and object are not in the same allocation\n" \
"bpf_spin_lock at off=0 must be held for bpf_list_head" },
TEST(kptr, push_front)
TEST(kptr, push_back)
TEST(kptr, pop_front)
TEST(kptr, pop_back)
TEST(global, push_front)
TEST(global, push_back)
TEST(global, pop_front)
TEST(global, pop_back)
TEST(map, push_front)
TEST(map, push_back)
TEST(map, pop_front)
TEST(map, pop_back)
TEST(inner_map, push_front)
TEST(inner_map, push_back)
TEST(inner_map, pop_front)
TEST(inner_map, pop_back)
#undef TEST
{ "map_compat_kprobe", "tracing progs cannot use bpf_{list_head,rb_root} yet" },
{ "map_compat_kretprobe", "tracing progs cannot use bpf_{list_head,rb_root} yet" },
{ "map_compat_tp", "tracing progs cannot use bpf_{list_head,rb_root} yet" },
{ "map_compat_perf", "tracing progs cannot use bpf_{list_head,rb_root} yet" },
{ "map_compat_raw_tp", "tracing progs cannot use bpf_{list_head,rb_root} yet" },
{ "map_compat_raw_tp_w", "tracing progs cannot use bpf_{list_head,rb_root} yet" },
{ "obj_type_id_oor", "local type ID argument must be in range [0, U32_MAX]" },
{ "obj_new_no_composite", "bpf_obj_new type ID argument must be of a struct" },
{ "obj_new_no_struct", "bpf_obj_new type ID argument must be of a struct" },
{ "obj_drop_non_zero_off", "R1 must have zero offset when passed to release func" },
{ "new_null_ret", "R0 invalid mem access 'ptr_or_null_'" },
{ "obj_new_acq", "Unreleased reference id=" },
{ "use_after_drop", "invalid mem access 'scalar'" },
{ "ptr_walk_scalar", "type=scalar expected=percpu_ptr_" },
{ "direct_read_lock", "direct access to bpf_spin_lock is disallowed" },
{ "direct_write_lock", "direct access to bpf_spin_lock is disallowed" },
{ "direct_read_head", "direct access to bpf_list_head is disallowed" },
{ "direct_write_head", "direct access to bpf_list_head is disallowed" },
{ "direct_read_node", "direct access to bpf_list_node is disallowed" },
{ "direct_write_node", "direct access to bpf_list_node is disallowed" },
{ "use_after_unlock_push_front", "invalid mem access 'scalar'" },
{ "use_after_unlock_push_back", "invalid mem access 'scalar'" },
{ "double_push_front", "arg#1 expected pointer to allocated object" },
{ "double_push_back", "arg#1 expected pointer to allocated object" },
{ "no_node_value_type", "bpf_list_node not found at offset=0" },
{ "incorrect_value_type",
"operation on bpf_list_head expects arg#1 bpf_list_node at offset=48 in struct foo, "
"but arg is at offset=0 in struct bar" },
{ "incorrect_node_var_off", "variable ptr_ access var_off=(0x0; 0xffffffff) disallowed" },
{ "incorrect_node_off1", "bpf_list_node not found at offset=49" },
{ "incorrect_node_off2", "arg#1 offset=0, but expected bpf_list_node at offset=48 in struct foo" },
{ "no_head_type", "bpf_list_head not found at offset=0" },
{ "incorrect_head_var_off1", "R1 doesn't have constant offset" },
{ "incorrect_head_var_off2", "variable ptr_ access var_off=(0x0; 0xffffffff) disallowed" },
{ "incorrect_head_off1", "bpf_list_head not found at offset=25" },
{ "incorrect_head_off2", "bpf_list_head not found at offset=1" },
{ "pop_front_off",
"15: (bf) r1 = r6 ; R1_w=ptr_or_null_foo(id=4,ref_obj_id=4,off=48,imm=0) "
"R6_w=ptr_or_null_foo(id=4,ref_obj_id=4,off=48,imm=0) refs=2,4\n"
"16: (85) call bpf_this_cpu_ptr#154\nR1 type=ptr_or_null_ expected=percpu_ptr_" },
{ "pop_back_off",
"15: (bf) r1 = r6 ; R1_w=ptr_or_null_foo(id=4,ref_obj_id=4,off=48,imm=0) "
"R6_w=ptr_or_null_foo(id=4,ref_obj_id=4,off=48,imm=0) refs=2,4\n"
"16: (85) call bpf_this_cpu_ptr#154\nR1 type=ptr_or_null_ expected=percpu_ptr_" },
};
static void test_linked_list_fail_prog(const char *prog_name, const char *err_msg)
{
LIBBPF_OPTS(bpf_object_open_opts, opts, .kernel_log_buf = log_buf,
.kernel_log_size = sizeof(log_buf),
.kernel_log_level = 1);
struct linked_list_fail *skel;
struct bpf_program *prog;
int ret;
skel = linked_list_fail__open_opts(&opts);
if (!ASSERT_OK_PTR(skel, "linked_list_fail__open_opts"))
return;
prog = bpf_object__find_program_by_name(skel->obj, prog_name);
if (!ASSERT_OK_PTR(prog, "bpf_object__find_program_by_name"))
goto end;
bpf_program__set_autoload(prog, true);
ret = linked_list_fail__load(skel);
if (!ASSERT_ERR(ret, "linked_list_fail__load must fail"))
goto end;
if (!ASSERT_OK_PTR(strstr(log_buf, err_msg), "expected error message")) {
fprintf(stderr, "Expected: %s\n", err_msg);
fprintf(stderr, "Verifier: %s\n", log_buf);
}
end:
linked_list_fail__destroy(skel);
}
static void clear_fields(struct bpf_map *map)
{
char buf[24];
int key = 0;
memset(buf, 0xff, sizeof(buf));
ASSERT_OK(bpf_map__update_elem(map, &key, sizeof(key), buf, sizeof(buf), 0), "check_and_free_fields");
}
enum {
TEST_ALL,
PUSH_POP,
PUSH_POP_MULT,
LIST_IN_LIST,
};
static void test_linked_list_success(int mode, bool leave_in_map)
{
LIBBPF_OPTS(bpf_test_run_opts, opts,
.data_in = &pkt_v4,
.data_size_in = sizeof(pkt_v4),
.repeat = 1,
);
struct linked_list *skel;
int ret;
skel = linked_list__open_and_load();
if (!ASSERT_OK_PTR(skel, "linked_list__open_and_load"))
return;
if (mode == LIST_IN_LIST)
goto lil;
if (mode == PUSH_POP_MULT)
goto ppm;
ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.map_list_push_pop), &opts);
ASSERT_OK(ret, "map_list_push_pop");
ASSERT_OK(opts.retval, "map_list_push_pop retval");
if (!leave_in_map)
clear_fields(skel->maps.array_map);
ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.inner_map_list_push_pop), &opts);
ASSERT_OK(ret, "inner_map_list_push_pop");
ASSERT_OK(opts.retval, "inner_map_list_push_pop retval");
if (!leave_in_map)
clear_fields(skel->maps.inner_map);
ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.global_list_push_pop), &opts);
ASSERT_OK(ret, "global_list_push_pop");
ASSERT_OK(opts.retval, "global_list_push_pop retval");
if (!leave_in_map)
clear_fields(skel->maps.bss_A);
if (mode == PUSH_POP)
goto end;
ppm:
ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.map_list_push_pop_multiple), &opts);
ASSERT_OK(ret, "map_list_push_pop_multiple");
ASSERT_OK(opts.retval, "map_list_push_pop_multiple retval");
if (!leave_in_map)
clear_fields(skel->maps.array_map);
ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.inner_map_list_push_pop_multiple), &opts);
ASSERT_OK(ret, "inner_map_list_push_pop_multiple");
ASSERT_OK(opts.retval, "inner_map_list_push_pop_multiple retval");
if (!leave_in_map)
clear_fields(skel->maps.inner_map);
ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.global_list_push_pop_multiple), &opts);
ASSERT_OK(ret, "global_list_push_pop_multiple");
ASSERT_OK(opts.retval, "global_list_push_pop_multiple retval");
if (!leave_in_map)
clear_fields(skel->maps.bss_A);
if (mode == PUSH_POP_MULT)
goto end;
lil:
ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.map_list_in_list), &opts);
ASSERT_OK(ret, "map_list_in_list");
ASSERT_OK(opts.retval, "map_list_in_list retval");
if (!leave_in_map)
clear_fields(skel->maps.array_map);
ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.inner_map_list_in_list), &opts);
ASSERT_OK(ret, "inner_map_list_in_list");
ASSERT_OK(opts.retval, "inner_map_list_in_list retval");
if (!leave_in_map)
clear_fields(skel->maps.inner_map);
ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.global_list_in_list), &opts);
ASSERT_OK(ret, "global_list_in_list");
ASSERT_OK(opts.retval, "global_list_in_list retval");
if (!leave_in_map)
clear_fields(skel->maps.bss_A);
end:
linked_list__destroy(skel);
}
#define SPIN_LOCK 2
#define LIST_HEAD 3
#define LIST_NODE 4
static struct btf *init_btf(void)
{
int id, lid, hid, nid;
struct btf *btf;
btf = btf__new_empty();
if (!ASSERT_OK_PTR(btf, "btf__new_empty"))
return NULL;
id = btf__add_int(btf, "int", 4, BTF_INT_SIGNED);
if (!ASSERT_EQ(id, 1, "btf__add_int"))
goto end;
lid = btf__add_struct(btf, "bpf_spin_lock", 4);
if (!ASSERT_EQ(lid, SPIN_LOCK, "btf__add_struct bpf_spin_lock"))
goto end;
hid = btf__add_struct(btf, "bpf_list_head", 16);
if (!ASSERT_EQ(hid, LIST_HEAD, "btf__add_struct bpf_list_head"))
goto end;
nid = btf__add_struct(btf, "bpf_list_node", 24);
if (!ASSERT_EQ(nid, LIST_NODE, "btf__add_struct bpf_list_node"))
goto end;
return btf;
end:
btf__free(btf);
return NULL;
}
static void list_and_rb_node_same_struct(bool refcount_field)
{
int bpf_rb_node_btf_id, bpf_refcount_btf_id, foo_btf_id;
struct btf *btf;
int id, err;
btf = init_btf();
if (!ASSERT_OK_PTR(btf, "init_btf"))
return;
bpf_rb_node_btf_id = btf__add_struct(btf, "bpf_rb_node", 32);
if (!ASSERT_GT(bpf_rb_node_btf_id, 0, "btf__add_struct bpf_rb_node"))
return;
if (refcount_field) {
bpf_refcount_btf_id = btf__add_struct(btf, "bpf_refcount", 4);
if (!ASSERT_GT(bpf_refcount_btf_id, 0, "btf__add_struct bpf_refcount"))
return;
}
id = btf__add_struct(btf, "bar", refcount_field ? 60 : 56);
if (!ASSERT_GT(id, 0, "btf__add_struct bar"))
return;
err = btf__add_field(btf, "a", LIST_NODE, 0, 0);
if (!ASSERT_OK(err, "btf__add_field bar::a"))
return;
err = btf__add_field(btf, "c", bpf_rb_node_btf_id, 192, 0);
if (!ASSERT_OK(err, "btf__add_field bar::c"))
return;
if (refcount_field) {
err = btf__add_field(btf, "ref", bpf_refcount_btf_id, 448, 0);
if (!ASSERT_OK(err, "btf__add_field bar::ref"))
return;
}
foo_btf_id = btf__add_struct(btf, "foo", 20);
if (!ASSERT_GT(foo_btf_id, 0, "btf__add_struct foo"))
return;
err = btf__add_field(btf, "a", LIST_HEAD, 0, 0);
if (!ASSERT_OK(err, "btf__add_field foo::a"))
return;
err = btf__add_field(btf, "b", SPIN_LOCK, 128, 0);
if (!ASSERT_OK(err, "btf__add_field foo::b"))
return;
id = btf__add_decl_tag(btf, "contains:bar:a", foo_btf_id, 0);
if (!ASSERT_GT(id, 0, "btf__add_decl_tag contains:bar:a"))
return;
err = btf__load_into_kernel(btf);
ASSERT_EQ(err, refcount_field ? 0 : -EINVAL, "check btf");
btf__free(btf);
}
static void test_btf(void)
{
struct btf *btf = NULL;
int id, err;
while (test__start_subtest("btf: too many locks")) {
btf = init_btf();
if (!ASSERT_OK_PTR(btf, "init_btf"))
break;
id = btf__add_struct(btf, "foo", 24);
if (!ASSERT_EQ(id, 5, "btf__add_struct foo"))
break;
err = btf__add_field(btf, "a", SPIN_LOCK, 0, 0);
if (!ASSERT_OK(err, "btf__add_struct foo::a"))
break;
err = btf__add_field(btf, "b", SPIN_LOCK, 32, 0);
if (!ASSERT_OK(err, "btf__add_struct foo::a"))
break;
err = btf__add_field(btf, "c", LIST_HEAD, 64, 0);
if (!ASSERT_OK(err, "btf__add_struct foo::a"))
break;
err = btf__load_into_kernel(btf);
ASSERT_EQ(err, -E2BIG, "check btf");
btf__free(btf);
break;
}
while (test__start_subtest("btf: missing lock")) {
btf = init_btf();
if (!ASSERT_OK_PTR(btf, "init_btf"))
break;
id = btf__add_struct(btf, "foo", 16);
if (!ASSERT_EQ(id, 5, "btf__add_struct foo"))
break;
err = btf__add_field(btf, "a", LIST_HEAD, 0, 0);
if (!ASSERT_OK(err, "btf__add_struct foo::a"))
break;
id = btf__add_decl_tag(btf, "contains:baz:a", 5, 0);
if (!ASSERT_EQ(id, 6, "btf__add_decl_tag contains:baz:a"))
break;
id = btf__add_struct(btf, "baz", 16);
if (!ASSERT_EQ(id, 7, "btf__add_struct baz"))
break;
err = btf__add_field(btf, "a", LIST_NODE, 0, 0);
if (!ASSERT_OK(err, "btf__add_field baz::a"))
break;
err = btf__load_into_kernel(btf);
ASSERT_EQ(err, -EINVAL, "check btf");
btf__free(btf);
break;
}
while (test__start_subtest("btf: bad offset")) {
btf = init_btf();
if (!ASSERT_OK_PTR(btf, "init_btf"))
break;
id = btf__add_struct(btf, "foo", 36);
if (!ASSERT_EQ(id, 5, "btf__add_struct foo"))
break;
err = btf__add_field(btf, "a", LIST_HEAD, 0, 0);
if (!ASSERT_OK(err, "btf__add_field foo::a"))
break;
err = btf__add_field(btf, "b", LIST_NODE, 0, 0);
if (!ASSERT_OK(err, "btf__add_field foo::b"))
break;
err = btf__add_field(btf, "c", SPIN_LOCK, 0, 0);
if (!ASSERT_OK(err, "btf__add_field foo::c"))
break;
id = btf__add_decl_tag(btf, "contains:foo:b", 5, 0);
if (!ASSERT_EQ(id, 6, "btf__add_decl_tag contains:foo:b"))
break;
err = btf__load_into_kernel(btf);
ASSERT_EQ(err, -EEXIST, "check btf");
btf__free(btf);
break;
}
while (test__start_subtest("btf: missing contains:")) {
btf = init_btf();
if (!ASSERT_OK_PTR(btf, "init_btf"))
break;
id = btf__add_struct(btf, "foo", 24);
if (!ASSERT_EQ(id, 5, "btf__add_struct foo"))
break;
err = btf__add_field(btf, "a", SPIN_LOCK, 0, 0);
if (!ASSERT_OK(err, "btf__add_field foo::a"))
break;
err = btf__add_field(btf, "b", LIST_HEAD, 64, 0);
if (!ASSERT_OK(err, "btf__add_field foo::b"))
break;
err = btf__load_into_kernel(btf);
ASSERT_EQ(err, -EINVAL, "check btf");
btf__free(btf);
break;
}
while (test__start_subtest("btf: missing struct")) {
btf = init_btf();
if (!ASSERT_OK_PTR(btf, "init_btf"))
break;
id = btf__add_struct(btf, "foo", 24);
if (!ASSERT_EQ(id, 5, "btf__add_struct foo"))
break;
err = btf__add_field(btf, "a", SPIN_LOCK, 0, 0);
if (!ASSERT_OK(err, "btf__add_field foo::a"))
break;
err = btf__add_field(btf, "b", LIST_HEAD, 64, 0);
if (!ASSERT_OK(err, "btf__add_field foo::b"))
break;
id = btf__add_decl_tag(btf, "contains:bar:bar", 5, 1);
if (!ASSERT_EQ(id, 6, "btf__add_decl_tag contains:bar:bar"))
break;
err = btf__load_into_kernel(btf);
ASSERT_EQ(err, -ENOENT, "check btf");
btf__free(btf);
break;
}
while (test__start_subtest("btf: missing node")) {
btf = init_btf();
if (!ASSERT_OK_PTR(btf, "init_btf"))
break;
id = btf__add_struct(btf, "foo", 24);
if (!ASSERT_EQ(id, 5, "btf__add_struct foo"))
break;
err = btf__add_field(btf, "a", SPIN_LOCK, 0, 0);
if (!ASSERT_OK(err, "btf__add_field foo::a"))
break;
err = btf__add_field(btf, "b", LIST_HEAD, 64, 0);
if (!ASSERT_OK(err, "btf__add_field foo::b"))
break;
id = btf__add_decl_tag(btf, "contains:foo:c", 5, 1);
if (!ASSERT_EQ(id, 6, "btf__add_decl_tag contains:foo:c"))
break;
err = btf__load_into_kernel(btf);
btf__free(btf);
ASSERT_EQ(err, -ENOENT, "check btf");
break;
}
while (test__start_subtest("btf: node incorrect type")) {
btf = init_btf();
if (!ASSERT_OK_PTR(btf, "init_btf"))
break;
id = btf__add_struct(btf, "foo", 20);
if (!ASSERT_EQ(id, 5, "btf__add_struct foo"))
break;
err = btf__add_field(btf, "a", LIST_HEAD, 0, 0);
if (!ASSERT_OK(err, "btf__add_field foo::a"))
break;
err = btf__add_field(btf, "b", SPIN_LOCK, 128, 0);
if (!ASSERT_OK(err, "btf__add_field foo::b"))
break;
id = btf__add_decl_tag(btf, "contains:bar:a", 5, 0);
if (!ASSERT_EQ(id, 6, "btf__add_decl_tag contains:bar:a"))
break;
id = btf__add_struct(btf, "bar", 4);
if (!ASSERT_EQ(id, 7, "btf__add_struct bar"))
break;
err = btf__add_field(btf, "a", SPIN_LOCK, 0, 0);
if (!ASSERT_OK(err, "btf__add_field bar::a"))
break;
err = btf__load_into_kernel(btf);
ASSERT_EQ(err, -EINVAL, "check btf");
btf__free(btf);
break;
}
while (test__start_subtest("btf: multiple bpf_list_node with name b")) {
btf = init_btf();
if (!ASSERT_OK_PTR(btf, "init_btf"))
break;
id = btf__add_struct(btf, "foo", 52);
if (!ASSERT_EQ(id, 5, "btf__add_struct foo"))
break;
err = btf__add_field(btf, "a", LIST_HEAD, 0, 0);
if (!ASSERT_OK(err, "btf__add_field foo::a"))
break;
err = btf__add_field(btf, "b", LIST_NODE, 128, 0);
if (!ASSERT_OK(err, "btf__add_field foo::b"))
break;
err = btf__add_field(btf, "b", LIST_NODE, 256, 0);
if (!ASSERT_OK(err, "btf__add_field foo::c"))
break;
err = btf__add_field(btf, "d", SPIN_LOCK, 384, 0);
if (!ASSERT_OK(err, "btf__add_field foo::d"))
break;
id = btf__add_decl_tag(btf, "contains:foo:b", 5, 0);
if (!ASSERT_EQ(id, 6, "btf__add_decl_tag contains:foo:b"))
break;
err = btf__load_into_kernel(btf);
ASSERT_EQ(err, -EINVAL, "check btf");
btf__free(btf);
break;
}
while (test__start_subtest("btf: owning | owned AA cycle")) {
btf = init_btf();
if (!ASSERT_OK_PTR(btf, "init_btf"))
break;
id = btf__add_struct(btf, "foo", 44);
if (!ASSERT_EQ(id, 5, "btf__add_struct foo"))
break;
err = btf__add_field(btf, "a", LIST_HEAD, 0, 0);
if (!ASSERT_OK(err, "btf__add_field foo::a"))
break;
err = btf__add_field(btf, "b", LIST_NODE, 128, 0);
if (!ASSERT_OK(err, "btf__add_field foo::b"))
break;
err = btf__add_field(btf, "c", SPIN_LOCK, 320, 0);
if (!ASSERT_OK(err, "btf__add_field foo::c"))
break;
id = btf__add_decl_tag(btf, "contains:foo:b", 5, 0);
if (!ASSERT_EQ(id, 6, "btf__add_decl_tag contains:foo:b"))
break;
err = btf__load_into_kernel(btf);
ASSERT_EQ(err, -ELOOP, "check btf");
btf__free(btf);
break;
}
while (test__start_subtest("btf: owning | owned ABA cycle")) {
btf = init_btf();
if (!ASSERT_OK_PTR(btf, "init_btf"))
break;
id = btf__add_struct(btf, "foo", 44);
if (!ASSERT_EQ(id, 5, "btf__add_struct foo"))
break;
err = btf__add_field(btf, "a", LIST_HEAD, 0, 0);
if (!ASSERT_OK(err, "btf__add_field foo::a"))
break;
err = btf__add_field(btf, "b", LIST_NODE, 128, 0);
if (!ASSERT_OK(err, "btf__add_field foo::b"))
break;
err = btf__add_field(btf, "c", SPIN_LOCK, 320, 0);
if (!ASSERT_OK(err, "btf__add_field foo::c"))
break;
id = btf__add_decl_tag(btf, "contains:bar:b", 5, 0);
if (!ASSERT_EQ(id, 6, "btf__add_decl_tag contains:bar:b"))
break;
id = btf__add_struct(btf, "bar", 44);
if (!ASSERT_EQ(id, 7, "btf__add_struct bar"))
break;
err = btf__add_field(btf, "a", LIST_HEAD, 0, 0);
if (!ASSERT_OK(err, "btf__add_field bar::a"))
break;
err = btf__add_field(btf, "b", LIST_NODE, 128, 0);
if (!ASSERT_OK(err, "btf__add_field bar::b"))
break;
err = btf__add_field(btf, "c", SPIN_LOCK, 320, 0);
if (!ASSERT_OK(err, "btf__add_field bar::c"))
break;
id = btf__add_decl_tag(btf, "contains:foo:b", 7, 0);
if (!ASSERT_EQ(id, 8, "btf__add_decl_tag contains:foo:b"))
break;
err = btf__load_into_kernel(btf);
ASSERT_EQ(err, -ELOOP, "check btf");
btf__free(btf);
break;
}
while (test__start_subtest("btf: owning -> owned")) {
btf = init_btf();
if (!ASSERT_OK_PTR(btf, "init_btf"))
break;
id = btf__add_struct(btf, "foo", 28);
if (!ASSERT_EQ(id, 5, "btf__add_struct foo"))
break;
err = btf__add_field(btf, "a", LIST_HEAD, 0, 0);
if (!ASSERT_OK(err, "btf__add_field foo::a"))
break;
err = btf__add_field(btf, "b", SPIN_LOCK, 192, 0);
if (!ASSERT_OK(err, "btf__add_field foo::b"))
break;
id = btf__add_decl_tag(btf, "contains:bar:a", 5, 0);
if (!ASSERT_EQ(id, 6, "btf__add_decl_tag contains:bar:a"))
break;
id = btf__add_struct(btf, "bar", 24);
if (!ASSERT_EQ(id, 7, "btf__add_struct bar"))
break;
err = btf__add_field(btf, "a", LIST_NODE, 0, 0);
if (!ASSERT_OK(err, "btf__add_field bar::a"))
break;
err = btf__load_into_kernel(btf);
ASSERT_EQ(err, 0, "check btf");
btf__free(btf);
break;
}
while (test__start_subtest("btf: owning -> owning | owned -> owned")) {
btf = init_btf();
if (!ASSERT_OK_PTR(btf, "init_btf"))
break;
id = btf__add_struct(btf, "foo", 28);
if (!ASSERT_EQ(id, 5, "btf__add_struct foo"))
break;
err = btf__add_field(btf, "a", LIST_HEAD, 0, 0);
if (!ASSERT_OK(err, "btf__add_field foo::a"))
break;
err = btf__add_field(btf, "b", SPIN_LOCK, 192, 0);
if (!ASSERT_OK(err, "btf__add_field foo::b"))
break;
id = btf__add_decl_tag(btf, "contains:bar:b", 5, 0);
if (!ASSERT_EQ(id, 6, "btf__add_decl_tag contains:bar:b"))
break;
id = btf__add_struct(btf, "bar", 44);
if (!ASSERT_EQ(id, 7, "btf__add_struct bar"))
break;
err = btf__add_field(btf, "a", LIST_HEAD, 0, 0);
if (!ASSERT_OK(err, "btf__add_field bar::a"))
break;
err = btf__add_field(btf, "b", LIST_NODE, 128, 0);
if (!ASSERT_OK(err, "btf__add_field bar::b"))
break;
err = btf__add_field(btf, "c", SPIN_LOCK, 320, 0);
if (!ASSERT_OK(err, "btf__add_field bar::c"))
break;
id = btf__add_decl_tag(btf, "contains:baz:a", 7, 0);
if (!ASSERT_EQ(id, 8, "btf__add_decl_tag contains:baz:a"))
break;
id = btf__add_struct(btf, "baz", 24);
if (!ASSERT_EQ(id, 9, "btf__add_struct baz"))
break;
err = btf__add_field(btf, "a", LIST_NODE, 0, 0);
if (!ASSERT_OK(err, "btf__add_field baz:a"))
break;
err = btf__load_into_kernel(btf);
ASSERT_EQ(err, 0, "check btf");
btf__free(btf);
break;
}
while (test__start_subtest("btf: owning | owned -> owning | owned -> owned")) {
btf = init_btf();
if (!ASSERT_OK_PTR(btf, "init_btf"))
break;
id = btf__add_struct(btf, "foo", 44);
if (!ASSERT_EQ(id, 5, "btf__add_struct foo"))
break;
err = btf__add_field(btf, "a", LIST_HEAD, 0, 0);
if (!ASSERT_OK(err, "btf__add_field foo::a"))
break;
err = btf__add_field(btf, "b", LIST_NODE, 128, 0);
if (!ASSERT_OK(err, "btf__add_field foo::b"))
break;
err = btf__add_field(btf, "c", SPIN_LOCK, 320, 0);
if (!ASSERT_OK(err, "btf__add_field foo::c"))
break;
id = btf__add_decl_tag(btf, "contains:bar:b", 5, 0);
if (!ASSERT_EQ(id, 6, "btf__add_decl_tag contains:bar:b"))
break;
id = btf__add_struct(btf, "bar", 44);
if (!ASSERT_EQ(id, 7, "btf__add_struct bar"))
break;
err = btf__add_field(btf, "a", LIST_HEAD, 0, 0);
if (!ASSERT_OK(err, "btf__add_field bar:a"))
break;
err = btf__add_field(btf, "b", LIST_NODE, 128, 0);
if (!ASSERT_OK(err, "btf__add_field bar:b"))
break;
err = btf__add_field(btf, "c", SPIN_LOCK, 320, 0);
if (!ASSERT_OK(err, "btf__add_field bar:c"))
break;
id = btf__add_decl_tag(btf, "contains:baz:a", 7, 0);
if (!ASSERT_EQ(id, 8, "btf__add_decl_tag contains:baz:a"))
break;
id = btf__add_struct(btf, "baz", 24);
if (!ASSERT_EQ(id, 9, "btf__add_struct baz"))
break;
err = btf__add_field(btf, "a", LIST_NODE, 0, 0);
if (!ASSERT_OK(err, "btf__add_field baz:a"))
break;
err = btf__load_into_kernel(btf);
ASSERT_EQ(err, -ELOOP, "check btf");
btf__free(btf);
break;
}
while (test__start_subtest("btf: owning -> owning | owned -> owning | owned -> owned")) {
btf = init_btf();
if (!ASSERT_OK_PTR(btf, "init_btf"))
break;
id = btf__add_struct(btf, "foo", 20);
if (!ASSERT_EQ(id, 5, "btf__add_struct foo"))
break;
err = btf__add_field(btf, "a", LIST_HEAD, 0, 0);
if (!ASSERT_OK(err, "btf__add_field foo::a"))
break;
err = btf__add_field(btf, "b", SPIN_LOCK, 128, 0);
if (!ASSERT_OK(err, "btf__add_field foo::b"))
break;
id = btf__add_decl_tag(btf, "contains:bar:b", 5, 0);
if (!ASSERT_EQ(id, 6, "btf__add_decl_tag contains:bar:b"))
break;
id = btf__add_struct(btf, "bar", 44);
if (!ASSERT_EQ(id, 7, "btf__add_struct bar"))
break;
err = btf__add_field(btf, "a", LIST_HEAD, 0, 0);
if (!ASSERT_OK(err, "btf__add_field bar::a"))
break;
err = btf__add_field(btf, "b", LIST_NODE, 128, 0);
if (!ASSERT_OK(err, "btf__add_field bar::b"))
break;
err = btf__add_field(btf, "c", SPIN_LOCK, 320, 0);
if (!ASSERT_OK(err, "btf__add_field bar::c"))
break;
id = btf__add_decl_tag(btf, "contains:baz:b", 7, 0);
if (!ASSERT_EQ(id, 8, "btf__add_decl_tag"))
break;
id = btf__add_struct(btf, "baz", 44);
if (!ASSERT_EQ(id, 9, "btf__add_struct baz"))
break;
err = btf__add_field(btf, "a", LIST_HEAD, 0, 0);
if (!ASSERT_OK(err, "btf__add_field bar::a"))
break;
err = btf__add_field(btf, "b", LIST_NODE, 128, 0);
if (!ASSERT_OK(err, "btf__add_field bar::b"))
break;
err = btf__add_field(btf, "c", SPIN_LOCK, 320, 0);
if (!ASSERT_OK(err, "btf__add_field bar::c"))
break;
id = btf__add_decl_tag(btf, "contains:bam:a", 9, 0);
if (!ASSERT_EQ(id, 10, "btf__add_decl_tag contains:bam:a"))
break;
id = btf__add_struct(btf, "bam", 24);
if (!ASSERT_EQ(id, 11, "btf__add_struct bam"))
break;
err = btf__add_field(btf, "a", LIST_NODE, 0, 0);
if (!ASSERT_OK(err, "btf__add_field bam::a"))
break;
err = btf__load_into_kernel(btf);
ASSERT_EQ(err, -ELOOP, "check btf");
btf__free(btf);
break;
}
while (test__start_subtest("btf: list_node and rb_node in same struct")) {
list_and_rb_node_same_struct(true);
break;
}
while (test__start_subtest("btf: list_node and rb_node in same struct, no bpf_refcount")) {
list_and_rb_node_same_struct(false);
break;
}
}
void test_linked_list(void)
{
int i;
for (i = 0; i < ARRAY_SIZE(linked_list_fail_tests); i++) {
if (!test__start_subtest(linked_list_fail_tests[i].prog_name))
continue;
test_linked_list_fail_prog(linked_list_fail_tests[i].prog_name,
linked_list_fail_tests[i].err_msg);
}
test_btf();
test_linked_list_success(PUSH_POP, false);
test_linked_list_success(PUSH_POP, true);
test_linked_list_success(PUSH_POP_MULT, false);
test_linked_list_success(PUSH_POP_MULT, true);
test_linked_list_success(LIST_IN_LIST, false);
test_linked_list_success(LIST_IN_LIST, true);
test_linked_list_success(TEST_ALL, false);
}
| linux-master | tools/testing/selftests/bpf/prog_tests/linked_list.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2022 Huawei Technologies Duesseldorf GmbH
*
* Author: Roberto Sassu <[email protected]>
*/
#include <linux/keyctl.h>
#include <test_progs.h>
#include "test_lookup_key.skel.h"
#define KEY_LOOKUP_CREATE 0x01
#define KEY_LOOKUP_PARTIAL 0x02
static bool kfunc_not_supported;
static int libbpf_print_cb(enum libbpf_print_level level, const char *fmt,
va_list args)
{
char *func;
if (strcmp(fmt, "libbpf: extern (func ksym) '%s': not found in kernel or module BTFs\n"))
return 0;
func = va_arg(args, char *);
if (strcmp(func, "bpf_lookup_user_key") && strcmp(func, "bpf_key_put") &&
strcmp(func, "bpf_lookup_system_key"))
return 0;
kfunc_not_supported = true;
return 0;
}
void test_lookup_key(void)
{
libbpf_print_fn_t old_print_cb;
struct test_lookup_key *skel;
__u32 next_id;
int ret;
skel = test_lookup_key__open();
if (!ASSERT_OK_PTR(skel, "test_lookup_key__open"))
return;
old_print_cb = libbpf_set_print(libbpf_print_cb);
ret = test_lookup_key__load(skel);
libbpf_set_print(old_print_cb);
if (ret < 0 && kfunc_not_supported) {
printf("%s:SKIP:bpf_lookup_*_key(), bpf_key_put() kfuncs not supported\n",
__func__);
test__skip();
goto close_prog;
}
if (!ASSERT_OK(ret, "test_lookup_key__load"))
goto close_prog;
ret = test_lookup_key__attach(skel);
if (!ASSERT_OK(ret, "test_lookup_key__attach"))
goto close_prog;
skel->bss->monitored_pid = getpid();
skel->bss->key_serial = KEY_SPEC_THREAD_KEYRING;
/* The thread-specific keyring does not exist, this test fails. */
skel->bss->flags = 0;
ret = bpf_prog_get_next_id(0, &next_id);
if (!ASSERT_LT(ret, 0, "bpf_prog_get_next_id"))
goto close_prog;
/* Force creation of the thread-specific keyring, this test succeeds. */
skel->bss->flags = KEY_LOOKUP_CREATE;
ret = bpf_prog_get_next_id(0, &next_id);
if (!ASSERT_OK(ret, "bpf_prog_get_next_id"))
goto close_prog;
/* Pass both lookup flags for parameter validation. */
skel->bss->flags = KEY_LOOKUP_CREATE | KEY_LOOKUP_PARTIAL;
ret = bpf_prog_get_next_id(0, &next_id);
if (!ASSERT_OK(ret, "bpf_prog_get_next_id"))
goto close_prog;
/* Pass invalid flags. */
skel->bss->flags = UINT64_MAX;
ret = bpf_prog_get_next_id(0, &next_id);
if (!ASSERT_LT(ret, 0, "bpf_prog_get_next_id"))
goto close_prog;
skel->bss->key_serial = 0;
skel->bss->key_id = 1;
ret = bpf_prog_get_next_id(0, &next_id);
if (!ASSERT_OK(ret, "bpf_prog_get_next_id"))
goto close_prog;
skel->bss->key_id = UINT32_MAX;
ret = bpf_prog_get_next_id(0, &next_id);
ASSERT_LT(ret, 0, "bpf_prog_get_next_id");
close_prog:
skel->bss->monitored_pid = 0;
test_lookup_key__destroy(skel);
}
| linux-master | tools/testing/selftests/bpf/prog_tests/lookup_key.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020 Facebook */
#include <test_progs.h>
#include "test_btf_map_in_map.skel.h"
static int duration;
static __u32 bpf_map_id(struct bpf_map *map)
{
struct bpf_map_info info;
__u32 info_len = sizeof(info);
int err;
memset(&info, 0, info_len);
err = bpf_map_get_info_by_fd(bpf_map__fd(map), &info, &info_len);
if (err)
return 0;
return info.id;
}
static void test_lookup_update(void)
{
int map1_fd, map2_fd, map3_fd, map4_fd, map5_fd, map1_id, map2_id;
int outer_arr_fd, outer_hash_fd, outer_arr_dyn_fd;
struct test_btf_map_in_map *skel;
int err, key = 0, val, i, fd;
skel = test_btf_map_in_map__open_and_load();
if (CHECK(!skel, "skel_open", "failed to open&load skeleton\n"))
return;
err = test_btf_map_in_map__attach(skel);
if (CHECK(err, "skel_attach", "skeleton attach failed: %d\n", err))
goto cleanup;
map1_fd = bpf_map__fd(skel->maps.inner_map1);
map2_fd = bpf_map__fd(skel->maps.inner_map2);
map3_fd = bpf_map__fd(skel->maps.inner_map3);
map4_fd = bpf_map__fd(skel->maps.inner_map4);
map5_fd = bpf_map__fd(skel->maps.inner_map5);
outer_arr_dyn_fd = bpf_map__fd(skel->maps.outer_arr_dyn);
outer_arr_fd = bpf_map__fd(skel->maps.outer_arr);
outer_hash_fd = bpf_map__fd(skel->maps.outer_hash);
/* inner1 = input, inner2 = input + 1, inner3 = input + 2 */
bpf_map_update_elem(outer_arr_fd, &key, &map1_fd, 0);
bpf_map_update_elem(outer_hash_fd, &key, &map2_fd, 0);
bpf_map_update_elem(outer_arr_dyn_fd, &key, &map3_fd, 0);
skel->bss->input = 1;
usleep(1);
bpf_map_lookup_elem(map1_fd, &key, &val);
CHECK(val != 1, "inner1", "got %d != exp %d\n", val, 1);
bpf_map_lookup_elem(map2_fd, &key, &val);
CHECK(val != 2, "inner2", "got %d != exp %d\n", val, 2);
bpf_map_lookup_elem(map3_fd, &key, &val);
CHECK(val != 3, "inner3", "got %d != exp %d\n", val, 3);
/* inner2 = input, inner1 = input + 1, inner4 = input + 2 */
bpf_map_update_elem(outer_arr_fd, &key, &map2_fd, 0);
bpf_map_update_elem(outer_hash_fd, &key, &map1_fd, 0);
bpf_map_update_elem(outer_arr_dyn_fd, &key, &map4_fd, 0);
skel->bss->input = 3;
usleep(1);
bpf_map_lookup_elem(map1_fd, &key, &val);
CHECK(val != 4, "inner1", "got %d != exp %d\n", val, 4);
bpf_map_lookup_elem(map2_fd, &key, &val);
CHECK(val != 3, "inner2", "got %d != exp %d\n", val, 3);
bpf_map_lookup_elem(map4_fd, &key, &val);
CHECK(val != 5, "inner4", "got %d != exp %d\n", val, 5);
/* inner5 = input + 2 */
bpf_map_update_elem(outer_arr_dyn_fd, &key, &map5_fd, 0);
skel->bss->input = 5;
usleep(1);
bpf_map_lookup_elem(map5_fd, &key, &val);
CHECK(val != 7, "inner5", "got %d != exp %d\n", val, 7);
for (i = 0; i < 5; i++) {
val = i % 2 ? map1_fd : map2_fd;
err = bpf_map_update_elem(outer_hash_fd, &key, &val, 0);
if (CHECK_FAIL(err)) {
printf("failed to update hash_of_maps on iter #%d\n", i);
goto cleanup;
}
err = bpf_map_update_elem(outer_arr_fd, &key, &val, 0);
if (CHECK_FAIL(err)) {
printf("failed to update array_of_maps on iter #%d\n", i);
goto cleanup;
}
val = i % 2 ? map4_fd : map5_fd;
err = bpf_map_update_elem(outer_arr_dyn_fd, &key, &val, 0);
if (CHECK_FAIL(err)) {
printf("failed to update array_of_maps (dyn) on iter #%d\n", i);
goto cleanup;
}
}
map1_id = bpf_map_id(skel->maps.inner_map1);
map2_id = bpf_map_id(skel->maps.inner_map2);
CHECK(map1_id == 0, "map1_id", "failed to get ID 1\n");
CHECK(map2_id == 0, "map2_id", "failed to get ID 2\n");
test_btf_map_in_map__destroy(skel);
skel = NULL;
/* we need to either wait for or force synchronize_rcu(), before
* checking for "still exists" condition, otherwise map could still be
* resolvable by ID, causing false positives.
*
* Older kernels (5.8 and earlier) freed map only after two
* synchronize_rcu()s, so trigger two, to be entirely sure.
*/
CHECK(kern_sync_rcu(), "sync_rcu", "failed\n");
CHECK(kern_sync_rcu(), "sync_rcu", "failed\n");
fd = bpf_map_get_fd_by_id(map1_id);
if (CHECK(fd >= 0, "map1_leak", "inner_map1 leaked!\n")) {
close(fd);
goto cleanup;
}
fd = bpf_map_get_fd_by_id(map2_id);
if (CHECK(fd >= 0, "map2_leak", "inner_map2 leaked!\n")) {
close(fd);
goto cleanup;
}
cleanup:
test_btf_map_in_map__destroy(skel);
}
static void test_diff_size(void)
{
struct test_btf_map_in_map *skel;
int err, inner_map_fd, zero = 0;
skel = test_btf_map_in_map__open_and_load();
if (CHECK(!skel, "skel_open", "failed to open&load skeleton\n"))
return;
inner_map_fd = bpf_map__fd(skel->maps.sockarr_sz2);
err = bpf_map_update_elem(bpf_map__fd(skel->maps.outer_sockarr), &zero,
&inner_map_fd, 0);
CHECK(err, "outer_sockarr inner map size check",
"cannot use a different size inner_map\n");
inner_map_fd = bpf_map__fd(skel->maps.inner_map_sz2);
err = bpf_map_update_elem(bpf_map__fd(skel->maps.outer_arr), &zero,
&inner_map_fd, 0);
CHECK(!err, "outer_arr inner map size check",
"incorrectly updated with a different size inner_map\n");
test_btf_map_in_map__destroy(skel);
}
void test_btf_map_in_map(void)
{
if (test__start_subtest("lookup_update"))
test_lookup_update();
if (test__start_subtest("diff_size"))
test_diff_size();
}
| linux-master | tools/testing/selftests/bpf/prog_tests/btf_map_in_map.c |
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2019 Facebook */
#define _GNU_SOURCE
#include <sched.h>
#include <sys/prctl.h>
#include <test_progs.h>
#define MAX_CNT 100000
static __u64 time_get_ns(void)
{
struct timespec ts;
clock_gettime(CLOCK_MONOTONIC, &ts);
return ts.tv_sec * 1000000000ull + ts.tv_nsec;
}
static int test_task_rename(const char *prog)
{
int i, fd, duration = 0, err;
char buf[] = "test_overhead";
__u64 start_time;
fd = open("/proc/self/comm", O_WRONLY|O_TRUNC);
if (CHECK(fd < 0, "open /proc", "err %d", errno))
return -1;
start_time = time_get_ns();
for (i = 0; i < MAX_CNT; i++) {
err = write(fd, buf, sizeof(buf));
if (err < 0) {
CHECK(err < 0, "task rename", "err %d", errno);
close(fd);
return -1;
}
}
printf("task_rename %s\t%lluK events per sec\n", prog,
MAX_CNT * 1000000ll / (time_get_ns() - start_time));
close(fd);
return 0;
}
static void test_run(const char *prog)
{
test_task_rename(prog);
}
static void setaffinity(void)
{
cpu_set_t cpuset;
int cpu = 0;
CPU_ZERO(&cpuset);
CPU_SET(cpu, &cpuset);
sched_setaffinity(0, sizeof(cpuset), &cpuset);
}
void test_test_overhead(void)
{
const char *kprobe_name = "prog1";
const char *kretprobe_name = "prog2";
const char *raw_tp_name = "prog3";
const char *fentry_name = "prog4";
const char *fexit_name = "prog5";
const char *kprobe_func = "__set_task_comm";
struct bpf_program *kprobe_prog, *kretprobe_prog, *raw_tp_prog;
struct bpf_program *fentry_prog, *fexit_prog;
struct bpf_object *obj;
struct bpf_link *link;
int err, duration = 0;
char comm[16] = {};
if (CHECK_FAIL(prctl(PR_GET_NAME, comm, 0L, 0L, 0L)))
return;
obj = bpf_object__open_file("./test_overhead.bpf.o", NULL);
if (!ASSERT_OK_PTR(obj, "obj_open_file"))
return;
kprobe_prog = bpf_object__find_program_by_name(obj, kprobe_name);
if (CHECK(!kprobe_prog, "find_probe",
"prog '%s' not found\n", kprobe_name))
goto cleanup;
kretprobe_prog = bpf_object__find_program_by_name(obj, kretprobe_name);
if (CHECK(!kretprobe_prog, "find_probe",
"prog '%s' not found\n", kretprobe_name))
goto cleanup;
raw_tp_prog = bpf_object__find_program_by_name(obj, raw_tp_name);
if (CHECK(!raw_tp_prog, "find_probe",
"prog '%s' not found\n", raw_tp_name))
goto cleanup;
fentry_prog = bpf_object__find_program_by_name(obj, fentry_name);
if (CHECK(!fentry_prog, "find_probe",
"prog '%s' not found\n", fentry_name))
goto cleanup;
fexit_prog = bpf_object__find_program_by_name(obj, fexit_name);
if (CHECK(!fexit_prog, "find_probe",
"prog '%s' not found\n", fexit_name))
goto cleanup;
err = bpf_object__load(obj);
if (CHECK(err, "obj_load", "err %d\n", err))
goto cleanup;
setaffinity();
/* base line run */
test_run("base");
/* attach kprobe */
link = bpf_program__attach_kprobe(kprobe_prog, false /* retprobe */,
kprobe_func);
if (!ASSERT_OK_PTR(link, "attach_kprobe"))
goto cleanup;
test_run("kprobe");
bpf_link__destroy(link);
/* attach kretprobe */
link = bpf_program__attach_kprobe(kretprobe_prog, true /* retprobe */,
kprobe_func);
if (!ASSERT_OK_PTR(link, "attach_kretprobe"))
goto cleanup;
test_run("kretprobe");
bpf_link__destroy(link);
/* attach raw_tp */
link = bpf_program__attach_raw_tracepoint(raw_tp_prog, "task_rename");
if (!ASSERT_OK_PTR(link, "attach_raw_tp"))
goto cleanup;
test_run("raw_tp");
bpf_link__destroy(link);
/* attach fentry */
link = bpf_program__attach_trace(fentry_prog);
if (!ASSERT_OK_PTR(link, "attach_fentry"))
goto cleanup;
test_run("fentry");
bpf_link__destroy(link);
/* attach fexit */
link = bpf_program__attach_trace(fexit_prog);
if (!ASSERT_OK_PTR(link, "attach_fexit"))
goto cleanup;
test_run("fexit");
bpf_link__destroy(link);
cleanup:
prctl(PR_SET_NAME, comm, 0L, 0L, 0L);
bpf_object__close(obj);
}
| linux-master | tools/testing/selftests/bpf/prog_tests/test_overhead.c |
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2020 Tessares SA <http://www.tessares.net> */
#include <test_progs.h>
#include "test_map_init.skel.h"
#define TEST_VALUE 0x1234
#define FILL_VALUE 0xdeadbeef
static int nr_cpus;
static int duration;
typedef unsigned long long map_key_t;
typedef unsigned long long map_value_t;
typedef struct {
map_value_t v; /* padding */
} __bpf_percpu_val_align pcpu_map_value_t;
static int map_populate(int map_fd, int num)
{
pcpu_map_value_t value[nr_cpus];
int i, err;
map_key_t key;
for (i = 0; i < nr_cpus; i++)
bpf_percpu(value, i) = FILL_VALUE;
for (key = 1; key <= num; key++) {
err = bpf_map_update_elem(map_fd, &key, value, BPF_NOEXIST);
if (!ASSERT_OK(err, "bpf_map_update_elem"))
return -1;
}
return 0;
}
static struct test_map_init *setup(enum bpf_map_type map_type, int map_sz,
int *map_fd, int populate)
{
struct test_map_init *skel;
int err;
skel = test_map_init__open();
if (!ASSERT_OK_PTR(skel, "skel_open"))
return NULL;
err = bpf_map__set_type(skel->maps.hashmap1, map_type);
if (!ASSERT_OK(err, "bpf_map__set_type"))
goto error;
err = bpf_map__set_max_entries(skel->maps.hashmap1, map_sz);
if (!ASSERT_OK(err, "bpf_map__set_max_entries"))
goto error;
err = test_map_init__load(skel);
if (!ASSERT_OK(err, "skel_load"))
goto error;
*map_fd = bpf_map__fd(skel->maps.hashmap1);
if (CHECK(*map_fd < 0, "bpf_map__fd", "failed\n"))
goto error;
err = map_populate(*map_fd, populate);
if (!ASSERT_OK(err, "map_populate"))
goto error_map;
return skel;
error_map:
close(*map_fd);
error:
test_map_init__destroy(skel);
return NULL;
}
/* executes bpf program that updates map with key, value */
static int prog_run_insert_elem(struct test_map_init *skel, map_key_t key,
map_value_t value)
{
struct test_map_init__bss *bss;
bss = skel->bss;
bss->inKey = key;
bss->inValue = value;
bss->inPid = getpid();
if (!ASSERT_OK(test_map_init__attach(skel), "skel_attach"))
return -1;
/* Let tracepoint trigger */
syscall(__NR_getpgid);
test_map_init__detach(skel);
return 0;
}
static int check_values_one_cpu(pcpu_map_value_t *value, map_value_t expected)
{
int i, nzCnt = 0;
map_value_t val;
for (i = 0; i < nr_cpus; i++) {
val = bpf_percpu(value, i);
if (val) {
if (CHECK(val != expected, "map value",
"unexpected for cpu %d: 0x%llx\n", i, val))
return -1;
nzCnt++;
}
}
if (CHECK(nzCnt != 1, "map value", "set for %d CPUs instead of 1!\n",
nzCnt))
return -1;
return 0;
}
/* Add key=1 elem with values set for all CPUs
* Delete elem key=1
* Run bpf prog that inserts new key=1 elem with value=0x1234
* (bpf prog can only set value for current CPU)
* Lookup Key=1 and check value is as expected for all CPUs:
* value set by bpf prog for one CPU, 0 for all others
*/
static void test_pcpu_map_init(void)
{
pcpu_map_value_t value[nr_cpus];
struct test_map_init *skel;
int map_fd, err;
map_key_t key;
/* max 1 elem in map so insertion is forced to reuse freed entry */
skel = setup(BPF_MAP_TYPE_PERCPU_HASH, 1, &map_fd, 1);
if (!ASSERT_OK_PTR(skel, "prog_setup"))
return;
/* delete element so the entry can be re-used*/
key = 1;
err = bpf_map_delete_elem(map_fd, &key);
if (!ASSERT_OK(err, "bpf_map_delete_elem"))
goto cleanup;
/* run bpf prog that inserts new elem, re-using the slot just freed */
err = prog_run_insert_elem(skel, key, TEST_VALUE);
if (!ASSERT_OK(err, "prog_run_insert_elem"))
goto cleanup;
/* check that key=1 was re-created by bpf prog */
err = bpf_map_lookup_elem(map_fd, &key, value);
if (!ASSERT_OK(err, "bpf_map_lookup_elem"))
goto cleanup;
/* and has expected values */
check_values_one_cpu(value, TEST_VALUE);
cleanup:
test_map_init__destroy(skel);
}
/* Add key=1 and key=2 elems with values set for all CPUs
* Run bpf prog that inserts new key=3 elem
* (only for current cpu; other cpus should have initial value = 0)
* Lookup Key=1 and check value is as expected for all CPUs
*/
static void test_pcpu_lru_map_init(void)
{
pcpu_map_value_t value[nr_cpus];
struct test_map_init *skel;
int map_fd, err;
map_key_t key;
/* Set up LRU map with 2 elements, values filled for all CPUs.
* With these 2 elements, the LRU map is full
*/
skel = setup(BPF_MAP_TYPE_LRU_PERCPU_HASH, 2, &map_fd, 2);
if (!ASSERT_OK_PTR(skel, "prog_setup"))
return;
/* run bpf prog that inserts new key=3 element, re-using LRU slot */
key = 3;
err = prog_run_insert_elem(skel, key, TEST_VALUE);
if (!ASSERT_OK(err, "prog_run_insert_elem"))
goto cleanup;
/* check that key=3 replaced one of earlier elements */
err = bpf_map_lookup_elem(map_fd, &key, value);
if (!ASSERT_OK(err, "bpf_map_lookup_elem"))
goto cleanup;
/* and has expected values */
check_values_one_cpu(value, TEST_VALUE);
cleanup:
test_map_init__destroy(skel);
}
void test_map_init(void)
{
nr_cpus = bpf_num_possible_cpus();
if (nr_cpus <= 1) {
printf("%s:SKIP: >1 cpu needed for this test\n", __func__);
test__skip();
return;
}
if (test__start_subtest("pcpu_map_init"))
test_pcpu_map_init();
if (test__start_subtest("pcpu_lru_map_init"))
test_pcpu_lru_map_init();
}
| linux-master | tools/testing/selftests/bpf/prog_tests/map_init.c |
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
// Copyright (c) 2020 Cloudflare
#define _GNU_SOURCE
#include <arpa/inet.h>
#include <string.h>
#include <linux/pkt_cls.h>
#include <netinet/tcp.h>
#include <test_progs.h>
#include "progs/test_cls_redirect.h"
#include "test_cls_redirect.skel.h"
#include "test_cls_redirect_dynptr.skel.h"
#include "test_cls_redirect_subprogs.skel.h"
#define ENCAP_IP INADDR_LOOPBACK
#define ENCAP_PORT (1234)
static int duration = 0;
struct addr_port {
in_port_t port;
union {
struct in_addr in_addr;
struct in6_addr in6_addr;
};
};
struct tuple {
int family;
struct addr_port src;
struct addr_port dst;
};
static int start_server(const struct sockaddr *addr, socklen_t len, int type)
{
int fd = socket(addr->sa_family, type, 0);
if (CHECK_FAIL(fd == -1))
return -1;
if (CHECK_FAIL(bind(fd, addr, len) == -1))
goto err;
if (type == SOCK_STREAM && CHECK_FAIL(listen(fd, 128) == -1))
goto err;
return fd;
err:
close(fd);
return -1;
}
static int connect_to_server(const struct sockaddr *addr, socklen_t len,
int type)
{
int fd = socket(addr->sa_family, type, 0);
if (CHECK_FAIL(fd == -1))
return -1;
if (CHECK_FAIL(connect(fd, addr, len)))
goto err;
return fd;
err:
close(fd);
return -1;
}
static bool fill_addr_port(const struct sockaddr *sa, struct addr_port *ap)
{
const struct sockaddr_in6 *in6;
const struct sockaddr_in *in;
switch (sa->sa_family) {
case AF_INET:
in = (const struct sockaddr_in *)sa;
ap->in_addr = in->sin_addr;
ap->port = in->sin_port;
return true;
case AF_INET6:
in6 = (const struct sockaddr_in6 *)sa;
ap->in6_addr = in6->sin6_addr;
ap->port = in6->sin6_port;
return true;
default:
return false;
}
}
static bool set_up_conn(const struct sockaddr *addr, socklen_t len, int type,
int *server, int *conn, struct tuple *tuple)
{
struct sockaddr_storage ss;
socklen_t slen = sizeof(ss);
struct sockaddr *sa = (struct sockaddr *)&ss;
*server = start_server(addr, len, type);
if (*server < 0)
return false;
if (CHECK_FAIL(getsockname(*server, sa, &slen)))
goto close_server;
*conn = connect_to_server(sa, slen, type);
if (*conn < 0)
goto close_server;
/* We want to simulate packets arriving at conn, so we have to
* swap src and dst.
*/
slen = sizeof(ss);
if (CHECK_FAIL(getsockname(*conn, sa, &slen)))
goto close_conn;
if (CHECK_FAIL(!fill_addr_port(sa, &tuple->dst)))
goto close_conn;
slen = sizeof(ss);
if (CHECK_FAIL(getpeername(*conn, sa, &slen)))
goto close_conn;
if (CHECK_FAIL(!fill_addr_port(sa, &tuple->src)))
goto close_conn;
tuple->family = ss.ss_family;
return true;
close_conn:
close(*conn);
*conn = -1;
close_server:
close(*server);
*server = -1;
return false;
}
static socklen_t prepare_addr(struct sockaddr_storage *addr, int family)
{
struct sockaddr_in *addr4;
struct sockaddr_in6 *addr6;
switch (family) {
case AF_INET:
addr4 = (struct sockaddr_in *)addr;
memset(addr4, 0, sizeof(*addr4));
addr4->sin_family = family;
addr4->sin_addr.s_addr = htonl(INADDR_LOOPBACK);
return sizeof(*addr4);
case AF_INET6:
addr6 = (struct sockaddr_in6 *)addr;
memset(addr6, 0, sizeof(*addr6));
addr6->sin6_family = family;
addr6->sin6_addr = in6addr_loopback;
return sizeof(*addr6);
default:
fprintf(stderr, "Invalid family %d", family);
return 0;
}
}
static bool was_decapsulated(struct bpf_test_run_opts *tattr)
{
return tattr->data_size_out < tattr->data_size_in;
}
enum type {
UDP,
TCP,
__NR_KIND,
};
enum hops {
NO_HOPS,
ONE_HOP,
};
enum flags {
NONE,
SYN,
ACK,
};
enum conn {
KNOWN_CONN,
UNKNOWN_CONN,
};
enum result {
ACCEPT,
FORWARD,
};
struct test_cfg {
enum type type;
enum result result;
enum conn conn;
enum hops hops;
enum flags flags;
};
static int test_str(void *buf, size_t len, const struct test_cfg *test,
int family)
{
const char *family_str, *type, *conn, *hops, *result, *flags;
family_str = "IPv4";
if (family == AF_INET6)
family_str = "IPv6";
type = "TCP";
if (test->type == UDP)
type = "UDP";
conn = "known";
if (test->conn == UNKNOWN_CONN)
conn = "unknown";
hops = "no hops";
if (test->hops == ONE_HOP)
hops = "one hop";
result = "accept";
if (test->result == FORWARD)
result = "forward";
flags = "none";
if (test->flags == SYN)
flags = "SYN";
else if (test->flags == ACK)
flags = "ACK";
return snprintf(buf, len, "%s %s %s %s (%s, flags: %s)", family_str,
type, result, conn, hops, flags);
}
static struct test_cfg tests[] = {
{ TCP, ACCEPT, UNKNOWN_CONN, NO_HOPS, SYN },
{ TCP, ACCEPT, UNKNOWN_CONN, NO_HOPS, ACK },
{ TCP, FORWARD, UNKNOWN_CONN, ONE_HOP, ACK },
{ TCP, ACCEPT, KNOWN_CONN, ONE_HOP, ACK },
{ UDP, ACCEPT, UNKNOWN_CONN, NO_HOPS, NONE },
{ UDP, FORWARD, UNKNOWN_CONN, ONE_HOP, NONE },
{ UDP, ACCEPT, KNOWN_CONN, ONE_HOP, NONE },
};
static void encap_init(encap_headers_t *encap, uint8_t hop_count, uint8_t proto)
{
const uint8_t hlen =
(sizeof(struct guehdr) / sizeof(uint32_t)) + hop_count;
*encap = (encap_headers_t){
.eth = { .h_proto = htons(ETH_P_IP) },
.ip = {
.ihl = 5,
.version = 4,
.ttl = IPDEFTTL,
.protocol = IPPROTO_UDP,
.daddr = htonl(ENCAP_IP)
},
.udp = {
.dest = htons(ENCAP_PORT),
},
.gue = {
.hlen = hlen,
.proto_ctype = proto
},
.unigue = {
.hop_count = hop_count
},
};
}
static size_t build_input(const struct test_cfg *test, void *const buf,
const struct tuple *tuple)
{
in_port_t sport = tuple->src.port;
encap_headers_t encap;
struct iphdr ip;
struct ipv6hdr ipv6;
struct tcphdr tcp;
struct udphdr udp;
struct in_addr next_hop;
uint8_t *p = buf;
int proto;
proto = IPPROTO_IPIP;
if (tuple->family == AF_INET6)
proto = IPPROTO_IPV6;
encap_init(&encap, test->hops == ONE_HOP ? 1 : 0, proto);
p = mempcpy(p, &encap, sizeof(encap));
if (test->hops == ONE_HOP) {
next_hop = (struct in_addr){ .s_addr = htonl(0x7f000002) };
p = mempcpy(p, &next_hop, sizeof(next_hop));
}
proto = IPPROTO_TCP;
if (test->type == UDP)
proto = IPPROTO_UDP;
switch (tuple->family) {
case AF_INET:
ip = (struct iphdr){
.ihl = 5,
.version = 4,
.ttl = IPDEFTTL,
.protocol = proto,
.saddr = tuple->src.in_addr.s_addr,
.daddr = tuple->dst.in_addr.s_addr,
};
p = mempcpy(p, &ip, sizeof(ip));
break;
case AF_INET6:
ipv6 = (struct ipv6hdr){
.version = 6,
.hop_limit = IPDEFTTL,
.nexthdr = proto,
.saddr = tuple->src.in6_addr,
.daddr = tuple->dst.in6_addr,
};
p = mempcpy(p, &ipv6, sizeof(ipv6));
break;
default:
return 0;
}
if (test->conn == UNKNOWN_CONN)
sport--;
switch (test->type) {
case TCP:
tcp = (struct tcphdr){
.source = sport,
.dest = tuple->dst.port,
};
if (test->flags == SYN)
tcp.syn = true;
if (test->flags == ACK)
tcp.ack = true;
p = mempcpy(p, &tcp, sizeof(tcp));
break;
case UDP:
udp = (struct udphdr){
.source = sport,
.dest = tuple->dst.port,
};
p = mempcpy(p, &udp, sizeof(udp));
break;
default:
return 0;
}
return (void *)p - buf;
}
static void close_fds(int *fds, int n)
{
int i;
for (i = 0; i < n; i++)
if (fds[i] > 0)
close(fds[i]);
}
static void test_cls_redirect_common(struct bpf_program *prog)
{
LIBBPF_OPTS(bpf_test_run_opts, tattr);
int families[] = { AF_INET, AF_INET6 };
struct sockaddr_storage ss;
struct sockaddr *addr;
socklen_t slen;
int i, j, err, prog_fd;
int servers[__NR_KIND][ARRAY_SIZE(families)] = {};
int conns[__NR_KIND][ARRAY_SIZE(families)] = {};
struct tuple tuples[__NR_KIND][ARRAY_SIZE(families)];
addr = (struct sockaddr *)&ss;
for (i = 0; i < ARRAY_SIZE(families); i++) {
slen = prepare_addr(&ss, families[i]);
if (CHECK_FAIL(!slen))
goto cleanup;
if (CHECK_FAIL(!set_up_conn(addr, slen, SOCK_DGRAM,
&servers[UDP][i], &conns[UDP][i],
&tuples[UDP][i])))
goto cleanup;
if (CHECK_FAIL(!set_up_conn(addr, slen, SOCK_STREAM,
&servers[TCP][i], &conns[TCP][i],
&tuples[TCP][i])))
goto cleanup;
}
prog_fd = bpf_program__fd(prog);
for (i = 0; i < ARRAY_SIZE(tests); i++) {
struct test_cfg *test = &tests[i];
for (j = 0; j < ARRAY_SIZE(families); j++) {
struct tuple *tuple = &tuples[test->type][j];
char input[256];
char tmp[256];
test_str(tmp, sizeof(tmp), test, tuple->family);
if (!test__start_subtest(tmp))
continue;
tattr.data_out = tmp;
tattr.data_size_out = sizeof(tmp);
tattr.data_in = input;
tattr.data_size_in = build_input(test, input, tuple);
if (CHECK_FAIL(!tattr.data_size_in))
continue;
err = bpf_prog_test_run_opts(prog_fd, &tattr);
if (CHECK_FAIL(err))
continue;
if (tattr.retval != TC_ACT_REDIRECT) {
PRINT_FAIL("expected TC_ACT_REDIRECT, got %d\n",
tattr.retval);
continue;
}
switch (test->result) {
case ACCEPT:
if (CHECK_FAIL(!was_decapsulated(&tattr)))
continue;
break;
case FORWARD:
if (CHECK_FAIL(was_decapsulated(&tattr)))
continue;
break;
default:
PRINT_FAIL("unknown result %d\n", test->result);
continue;
}
}
}
cleanup:
close_fds((int *)servers, sizeof(servers) / sizeof(servers[0][0]));
close_fds((int *)conns, sizeof(conns) / sizeof(conns[0][0]));
}
static void test_cls_redirect_dynptr(void)
{
struct test_cls_redirect_dynptr *skel;
int err;
skel = test_cls_redirect_dynptr__open();
if (!ASSERT_OK_PTR(skel, "skel_open"))
return;
skel->rodata->ENCAPSULATION_IP = htonl(ENCAP_IP);
skel->rodata->ENCAPSULATION_PORT = htons(ENCAP_PORT);
err = test_cls_redirect_dynptr__load(skel);
if (!ASSERT_OK(err, "skel_load"))
goto cleanup;
test_cls_redirect_common(skel->progs.cls_redirect);
cleanup:
test_cls_redirect_dynptr__destroy(skel);
}
static void test_cls_redirect_inlined(void)
{
struct test_cls_redirect *skel;
int err;
skel = test_cls_redirect__open();
if (CHECK(!skel, "skel_open", "failed\n"))
return;
skel->rodata->ENCAPSULATION_IP = htonl(ENCAP_IP);
skel->rodata->ENCAPSULATION_PORT = htons(ENCAP_PORT);
err = test_cls_redirect__load(skel);
if (CHECK(err, "skel_load", "failed: %d\n", err))
goto cleanup;
test_cls_redirect_common(skel->progs.cls_redirect);
cleanup:
test_cls_redirect__destroy(skel);
}
static void test_cls_redirect_subprogs(void)
{
struct test_cls_redirect_subprogs *skel;
int err;
skel = test_cls_redirect_subprogs__open();
if (CHECK(!skel, "skel_open", "failed\n"))
return;
skel->rodata->ENCAPSULATION_IP = htonl(ENCAP_IP);
skel->rodata->ENCAPSULATION_PORT = htons(ENCAP_PORT);
err = test_cls_redirect_subprogs__load(skel);
if (CHECK(err, "skel_load", "failed: %d\n", err))
goto cleanup;
test_cls_redirect_common(skel->progs.cls_redirect);
cleanup:
test_cls_redirect_subprogs__destroy(skel);
}
void test_cls_redirect(void)
{
if (test__start_subtest("cls_redirect_inlined"))
test_cls_redirect_inlined();
if (test__start_subtest("cls_redirect_subprogs"))
test_cls_redirect_subprogs();
if (test__start_subtest("cls_redirect_dynptr"))
test_cls_redirect_dynptr();
}
| linux-master | tools/testing/selftests/bpf/prog_tests/cls_redirect.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2021 Google LLC.
*/
#include <test_progs.h>
#include <cgroup_helpers.h>
#include <network_helpers.h>
#include "cgroup_getset_retval_setsockopt.skel.h"
#include "cgroup_getset_retval_getsockopt.skel.h"
#include "cgroup_getset_retval_hooks.skel.h"
#define SOL_CUSTOM 0xdeadbeef
static int zero;
static void test_setsockopt_set(int cgroup_fd, int sock_fd)
{
struct cgroup_getset_retval_setsockopt *obj;
struct bpf_link *link_set_eunatch = NULL;
obj = cgroup_getset_retval_setsockopt__open_and_load();
if (!ASSERT_OK_PTR(obj, "skel-load"))
return;
obj->bss->page_size = sysconf(_SC_PAGESIZE);
/* Attach setsockopt that sets EUNATCH, assert that
* we actually get that error when we run setsockopt()
*/
link_set_eunatch = bpf_program__attach_cgroup(obj->progs.set_eunatch,
cgroup_fd);
if (!ASSERT_OK_PTR(link_set_eunatch, "cg-attach-set_eunatch"))
goto close_bpf_object;
if (!ASSERT_ERR(setsockopt(sock_fd, SOL_SOCKET, SO_REUSEADDR,
&zero, sizeof(int)), "setsockopt"))
goto close_bpf_object;
if (!ASSERT_EQ(errno, EUNATCH, "setsockopt-errno"))
goto close_bpf_object;
if (!ASSERT_EQ(obj->bss->invocations, 1, "invocations"))
goto close_bpf_object;
if (!ASSERT_FALSE(obj->bss->assertion_error, "assertion_error"))
goto close_bpf_object;
close_bpf_object:
bpf_link__destroy(link_set_eunatch);
cgroup_getset_retval_setsockopt__destroy(obj);
}
static void test_setsockopt_set_and_get(int cgroup_fd, int sock_fd)
{
struct cgroup_getset_retval_setsockopt *obj;
struct bpf_link *link_set_eunatch = NULL, *link_get_retval = NULL;
obj = cgroup_getset_retval_setsockopt__open_and_load();
if (!ASSERT_OK_PTR(obj, "skel-load"))
return;
obj->bss->page_size = sysconf(_SC_PAGESIZE);
/* Attach setsockopt that sets EUNATCH, and one that gets the
* previously set errno. Assert that we get the same errno back.
*/
link_set_eunatch = bpf_program__attach_cgroup(obj->progs.set_eunatch,
cgroup_fd);
if (!ASSERT_OK_PTR(link_set_eunatch, "cg-attach-set_eunatch"))
goto close_bpf_object;
link_get_retval = bpf_program__attach_cgroup(obj->progs.get_retval,
cgroup_fd);
if (!ASSERT_OK_PTR(link_get_retval, "cg-attach-get_retval"))
goto close_bpf_object;
if (!ASSERT_ERR(setsockopt(sock_fd, SOL_SOCKET, SO_REUSEADDR,
&zero, sizeof(int)), "setsockopt"))
goto close_bpf_object;
if (!ASSERT_EQ(errno, EUNATCH, "setsockopt-errno"))
goto close_bpf_object;
if (!ASSERT_EQ(obj->bss->invocations, 2, "invocations"))
goto close_bpf_object;
if (!ASSERT_FALSE(obj->bss->assertion_error, "assertion_error"))
goto close_bpf_object;
if (!ASSERT_EQ(obj->bss->retval_value, -EUNATCH, "retval_value"))
goto close_bpf_object;
close_bpf_object:
bpf_link__destroy(link_set_eunatch);
bpf_link__destroy(link_get_retval);
cgroup_getset_retval_setsockopt__destroy(obj);
}
static void test_setsockopt_default_zero(int cgroup_fd, int sock_fd)
{
struct cgroup_getset_retval_setsockopt *obj;
struct bpf_link *link_get_retval = NULL;
obj = cgroup_getset_retval_setsockopt__open_and_load();
if (!ASSERT_OK_PTR(obj, "skel-load"))
return;
obj->bss->page_size = sysconf(_SC_PAGESIZE);
/* Attach setsockopt that gets the previously set errno.
* Assert that, without anything setting one, we get 0.
*/
link_get_retval = bpf_program__attach_cgroup(obj->progs.get_retval,
cgroup_fd);
if (!ASSERT_OK_PTR(link_get_retval, "cg-attach-get_retval"))
goto close_bpf_object;
if (!ASSERT_OK(setsockopt(sock_fd, SOL_SOCKET, SO_REUSEADDR,
&zero, sizeof(int)), "setsockopt"))
goto close_bpf_object;
if (!ASSERT_EQ(obj->bss->invocations, 1, "invocations"))
goto close_bpf_object;
if (!ASSERT_FALSE(obj->bss->assertion_error, "assertion_error"))
goto close_bpf_object;
if (!ASSERT_EQ(obj->bss->retval_value, 0, "retval_value"))
goto close_bpf_object;
close_bpf_object:
bpf_link__destroy(link_get_retval);
cgroup_getset_retval_setsockopt__destroy(obj);
}
static void test_setsockopt_default_zero_and_set(int cgroup_fd, int sock_fd)
{
struct cgroup_getset_retval_setsockopt *obj;
struct bpf_link *link_get_retval = NULL, *link_set_eunatch = NULL;
obj = cgroup_getset_retval_setsockopt__open_and_load();
if (!ASSERT_OK_PTR(obj, "skel-load"))
return;
obj->bss->page_size = sysconf(_SC_PAGESIZE);
/* Attach setsockopt that gets the previously set errno, and then
* one that sets the errno to EUNATCH. Assert that the get does not
* see EUNATCH set later, and does not prevent EUNATCH from being set.
*/
link_get_retval = bpf_program__attach_cgroup(obj->progs.get_retval,
cgroup_fd);
if (!ASSERT_OK_PTR(link_get_retval, "cg-attach-get_retval"))
goto close_bpf_object;
link_set_eunatch = bpf_program__attach_cgroup(obj->progs.set_eunatch,
cgroup_fd);
if (!ASSERT_OK_PTR(link_set_eunatch, "cg-attach-set_eunatch"))
goto close_bpf_object;
if (!ASSERT_ERR(setsockopt(sock_fd, SOL_SOCKET, SO_REUSEADDR,
&zero, sizeof(int)), "setsockopt"))
goto close_bpf_object;
if (!ASSERT_EQ(errno, EUNATCH, "setsockopt-errno"))
goto close_bpf_object;
if (!ASSERT_EQ(obj->bss->invocations, 2, "invocations"))
goto close_bpf_object;
if (!ASSERT_FALSE(obj->bss->assertion_error, "assertion_error"))
goto close_bpf_object;
if (!ASSERT_EQ(obj->bss->retval_value, 0, "retval_value"))
goto close_bpf_object;
close_bpf_object:
bpf_link__destroy(link_get_retval);
bpf_link__destroy(link_set_eunatch);
cgroup_getset_retval_setsockopt__destroy(obj);
}
static void test_setsockopt_override(int cgroup_fd, int sock_fd)
{
struct cgroup_getset_retval_setsockopt *obj;
struct bpf_link *link_set_eunatch = NULL, *link_set_eisconn = NULL;
struct bpf_link *link_get_retval = NULL;
obj = cgroup_getset_retval_setsockopt__open_and_load();
if (!ASSERT_OK_PTR(obj, "skel-load"))
return;
obj->bss->page_size = sysconf(_SC_PAGESIZE);
/* Attach setsockopt that sets EUNATCH, then one that sets EISCONN,
* and then one that gets the exported errno. Assert both the syscall
* and the helper sees the last set errno.
*/
link_set_eunatch = bpf_program__attach_cgroup(obj->progs.set_eunatch,
cgroup_fd);
if (!ASSERT_OK_PTR(link_set_eunatch, "cg-attach-set_eunatch"))
goto close_bpf_object;
link_set_eisconn = bpf_program__attach_cgroup(obj->progs.set_eisconn,
cgroup_fd);
if (!ASSERT_OK_PTR(link_set_eisconn, "cg-attach-set_eisconn"))
goto close_bpf_object;
link_get_retval = bpf_program__attach_cgroup(obj->progs.get_retval,
cgroup_fd);
if (!ASSERT_OK_PTR(link_get_retval, "cg-attach-get_retval"))
goto close_bpf_object;
if (!ASSERT_ERR(setsockopt(sock_fd, SOL_SOCKET, SO_REUSEADDR,
&zero, sizeof(int)), "setsockopt"))
goto close_bpf_object;
if (!ASSERT_EQ(errno, EISCONN, "setsockopt-errno"))
goto close_bpf_object;
if (!ASSERT_EQ(obj->bss->invocations, 3, "invocations"))
goto close_bpf_object;
if (!ASSERT_FALSE(obj->bss->assertion_error, "assertion_error"))
goto close_bpf_object;
if (!ASSERT_EQ(obj->bss->retval_value, -EISCONN, "retval_value"))
goto close_bpf_object;
close_bpf_object:
bpf_link__destroy(link_set_eunatch);
bpf_link__destroy(link_set_eisconn);
bpf_link__destroy(link_get_retval);
cgroup_getset_retval_setsockopt__destroy(obj);
}
static void test_setsockopt_legacy_eperm(int cgroup_fd, int sock_fd)
{
struct cgroup_getset_retval_setsockopt *obj;
struct bpf_link *link_legacy_eperm = NULL, *link_get_retval = NULL;
obj = cgroup_getset_retval_setsockopt__open_and_load();
if (!ASSERT_OK_PTR(obj, "skel-load"))
return;
obj->bss->page_size = sysconf(_SC_PAGESIZE);
/* Attach setsockopt that return a reject without setting errno
* (legacy reject), and one that gets the errno. Assert that for
* backward compatibility the syscall result in EPERM, and this
* is also visible to the helper.
*/
link_legacy_eperm = bpf_program__attach_cgroup(obj->progs.legacy_eperm,
cgroup_fd);
if (!ASSERT_OK_PTR(link_legacy_eperm, "cg-attach-legacy_eperm"))
goto close_bpf_object;
link_get_retval = bpf_program__attach_cgroup(obj->progs.get_retval,
cgroup_fd);
if (!ASSERT_OK_PTR(link_get_retval, "cg-attach-get_retval"))
goto close_bpf_object;
if (!ASSERT_ERR(setsockopt(sock_fd, SOL_SOCKET, SO_REUSEADDR,
&zero, sizeof(int)), "setsockopt"))
goto close_bpf_object;
if (!ASSERT_EQ(errno, EPERM, "setsockopt-errno"))
goto close_bpf_object;
if (!ASSERT_EQ(obj->bss->invocations, 2, "invocations"))
goto close_bpf_object;
if (!ASSERT_FALSE(obj->bss->assertion_error, "assertion_error"))
goto close_bpf_object;
if (!ASSERT_EQ(obj->bss->retval_value, -EPERM, "retval_value"))
goto close_bpf_object;
close_bpf_object:
bpf_link__destroy(link_legacy_eperm);
bpf_link__destroy(link_get_retval);
cgroup_getset_retval_setsockopt__destroy(obj);
}
static void test_setsockopt_legacy_no_override(int cgroup_fd, int sock_fd)
{
struct cgroup_getset_retval_setsockopt *obj;
struct bpf_link *link_set_eunatch = NULL, *link_legacy_eperm = NULL;
struct bpf_link *link_get_retval = NULL;
obj = cgroup_getset_retval_setsockopt__open_and_load();
if (!ASSERT_OK_PTR(obj, "skel-load"))
return;
obj->bss->page_size = sysconf(_SC_PAGESIZE);
/* Attach setsockopt that sets EUNATCH, then one that return a reject
* without setting errno, and then one that gets the exported errno.
* Assert both the syscall and the helper's errno are unaffected by
* the second prog (i.e. legacy rejects does not override the errno
* to EPERM).
*/
link_set_eunatch = bpf_program__attach_cgroup(obj->progs.set_eunatch,
cgroup_fd);
if (!ASSERT_OK_PTR(link_set_eunatch, "cg-attach-set_eunatch"))
goto close_bpf_object;
link_legacy_eperm = bpf_program__attach_cgroup(obj->progs.legacy_eperm,
cgroup_fd);
if (!ASSERT_OK_PTR(link_legacy_eperm, "cg-attach-legacy_eperm"))
goto close_bpf_object;
link_get_retval = bpf_program__attach_cgroup(obj->progs.get_retval,
cgroup_fd);
if (!ASSERT_OK_PTR(link_get_retval, "cg-attach-get_retval"))
goto close_bpf_object;
if (!ASSERT_ERR(setsockopt(sock_fd, SOL_SOCKET, SO_REUSEADDR,
&zero, sizeof(int)), "setsockopt"))
goto close_bpf_object;
if (!ASSERT_EQ(errno, EUNATCH, "setsockopt-errno"))
goto close_bpf_object;
if (!ASSERT_EQ(obj->bss->invocations, 3, "invocations"))
goto close_bpf_object;
if (!ASSERT_FALSE(obj->bss->assertion_error, "assertion_error"))
goto close_bpf_object;
if (!ASSERT_EQ(obj->bss->retval_value, -EUNATCH, "retval_value"))
goto close_bpf_object;
close_bpf_object:
bpf_link__destroy(link_set_eunatch);
bpf_link__destroy(link_legacy_eperm);
bpf_link__destroy(link_get_retval);
cgroup_getset_retval_setsockopt__destroy(obj);
}
static void test_getsockopt_get(int cgroup_fd, int sock_fd)
{
struct cgroup_getset_retval_getsockopt *obj;
struct bpf_link *link_get_retval = NULL;
int buf;
socklen_t optlen = sizeof(buf);
obj = cgroup_getset_retval_getsockopt__open_and_load();
if (!ASSERT_OK_PTR(obj, "skel-load"))
return;
obj->bss->page_size = sysconf(_SC_PAGESIZE);
/* Attach getsockopt that gets previously set errno. Assert that the
* error from kernel is in both ctx_retval_value and retval_value.
*/
link_get_retval = bpf_program__attach_cgroup(obj->progs.get_retval,
cgroup_fd);
if (!ASSERT_OK_PTR(link_get_retval, "cg-attach-get_retval"))
goto close_bpf_object;
if (!ASSERT_ERR(getsockopt(sock_fd, SOL_CUSTOM, 0,
&buf, &optlen), "getsockopt"))
goto close_bpf_object;
if (!ASSERT_EQ(errno, EOPNOTSUPP, "getsockopt-errno"))
goto close_bpf_object;
if (!ASSERT_EQ(obj->bss->invocations, 1, "invocations"))
goto close_bpf_object;
if (!ASSERT_FALSE(obj->bss->assertion_error, "assertion_error"))
goto close_bpf_object;
if (!ASSERT_EQ(obj->bss->retval_value, -EOPNOTSUPP, "retval_value"))
goto close_bpf_object;
if (!ASSERT_EQ(obj->bss->ctx_retval_value, -EOPNOTSUPP, "ctx_retval_value"))
goto close_bpf_object;
close_bpf_object:
bpf_link__destroy(link_get_retval);
cgroup_getset_retval_getsockopt__destroy(obj);
}
static void test_getsockopt_override(int cgroup_fd, int sock_fd)
{
struct cgroup_getset_retval_getsockopt *obj;
struct bpf_link *link_set_eisconn = NULL;
int buf;
socklen_t optlen = sizeof(buf);
obj = cgroup_getset_retval_getsockopt__open_and_load();
if (!ASSERT_OK_PTR(obj, "skel-load"))
return;
obj->bss->page_size = sysconf(_SC_PAGESIZE);
/* Attach getsockopt that sets retval to -EISCONN. Assert that this
* overrides the value from kernel.
*/
link_set_eisconn = bpf_program__attach_cgroup(obj->progs.set_eisconn,
cgroup_fd);
if (!ASSERT_OK_PTR(link_set_eisconn, "cg-attach-set_eisconn"))
goto close_bpf_object;
if (!ASSERT_ERR(getsockopt(sock_fd, SOL_CUSTOM, 0,
&buf, &optlen), "getsockopt"))
goto close_bpf_object;
if (!ASSERT_EQ(errno, EISCONN, "getsockopt-errno"))
goto close_bpf_object;
if (!ASSERT_EQ(obj->bss->invocations, 1, "invocations"))
goto close_bpf_object;
if (!ASSERT_FALSE(obj->bss->assertion_error, "assertion_error"))
goto close_bpf_object;
close_bpf_object:
bpf_link__destroy(link_set_eisconn);
cgroup_getset_retval_getsockopt__destroy(obj);
}
static void test_getsockopt_retval_sync(int cgroup_fd, int sock_fd)
{
struct cgroup_getset_retval_getsockopt *obj;
struct bpf_link *link_set_eisconn = NULL, *link_clear_retval = NULL;
struct bpf_link *link_get_retval = NULL;
int buf;
socklen_t optlen = sizeof(buf);
obj = cgroup_getset_retval_getsockopt__open_and_load();
if (!ASSERT_OK_PTR(obj, "skel-load"))
return;
obj->bss->page_size = sysconf(_SC_PAGESIZE);
/* Attach getsockopt that sets retval to -EISCONN, and one that clears
* ctx retval. Assert that the clearing ctx retval is synced to helper
* and clears any errors both from kernel and BPF..
*/
link_set_eisconn = bpf_program__attach_cgroup(obj->progs.set_eisconn,
cgroup_fd);
if (!ASSERT_OK_PTR(link_set_eisconn, "cg-attach-set_eisconn"))
goto close_bpf_object;
link_clear_retval = bpf_program__attach_cgroup(obj->progs.clear_retval,
cgroup_fd);
if (!ASSERT_OK_PTR(link_clear_retval, "cg-attach-clear_retval"))
goto close_bpf_object;
link_get_retval = bpf_program__attach_cgroup(obj->progs.get_retval,
cgroup_fd);
if (!ASSERT_OK_PTR(link_get_retval, "cg-attach-get_retval"))
goto close_bpf_object;
if (!ASSERT_OK(getsockopt(sock_fd, SOL_CUSTOM, 0,
&buf, &optlen), "getsockopt"))
goto close_bpf_object;
if (!ASSERT_EQ(obj->bss->invocations, 3, "invocations"))
goto close_bpf_object;
if (!ASSERT_FALSE(obj->bss->assertion_error, "assertion_error"))
goto close_bpf_object;
if (!ASSERT_EQ(obj->bss->retval_value, 0, "retval_value"))
goto close_bpf_object;
if (!ASSERT_EQ(obj->bss->ctx_retval_value, 0, "ctx_retval_value"))
goto close_bpf_object;
close_bpf_object:
bpf_link__destroy(link_set_eisconn);
bpf_link__destroy(link_clear_retval);
bpf_link__destroy(link_get_retval);
cgroup_getset_retval_getsockopt__destroy(obj);
}
struct exposed_hook {
const char *name;
int expected_err;
} exposed_hooks[] = {
#define BPF_RETVAL_HOOK(NAME, SECTION, CTX, EXPECTED_ERR) \
{ \
.name = #NAME, \
.expected_err = EXPECTED_ERR, \
},
#include "cgroup_getset_retval_hooks.h"
#undef BPF_RETVAL_HOOK
};
static void test_exposed_hooks(int cgroup_fd, int sock_fd)
{
struct cgroup_getset_retval_hooks *skel;
struct bpf_program *prog;
int err;
int i;
for (i = 0; i < ARRAY_SIZE(exposed_hooks); i++) {
skel = cgroup_getset_retval_hooks__open();
if (!ASSERT_OK_PTR(skel, "cgroup_getset_retval_hooks__open"))
continue;
prog = bpf_object__find_program_by_name(skel->obj, exposed_hooks[i].name);
if (!ASSERT_NEQ(prog, NULL, "bpf_object__find_program_by_name"))
goto close_skel;
err = bpf_program__set_autoload(prog, true);
if (!ASSERT_OK(err, "bpf_program__set_autoload"))
goto close_skel;
err = cgroup_getset_retval_hooks__load(skel);
ASSERT_EQ(err, exposed_hooks[i].expected_err, "expected_err");
close_skel:
cgroup_getset_retval_hooks__destroy(skel);
}
}
void test_cgroup_getset_retval(void)
{
int cgroup_fd = -1;
int sock_fd = -1;
cgroup_fd = test__join_cgroup("/cgroup_getset_retval");
if (!ASSERT_GE(cgroup_fd, 0, "cg-create"))
goto close_fd;
sock_fd = start_server(AF_INET, SOCK_DGRAM, NULL, 0, 0);
if (!ASSERT_GE(sock_fd, 0, "start-server"))
goto close_fd;
if (test__start_subtest("setsockopt-set"))
test_setsockopt_set(cgroup_fd, sock_fd);
if (test__start_subtest("setsockopt-set_and_get"))
test_setsockopt_set_and_get(cgroup_fd, sock_fd);
if (test__start_subtest("setsockopt-default_zero"))
test_setsockopt_default_zero(cgroup_fd, sock_fd);
if (test__start_subtest("setsockopt-default_zero_and_set"))
test_setsockopt_default_zero_and_set(cgroup_fd, sock_fd);
if (test__start_subtest("setsockopt-override"))
test_setsockopt_override(cgroup_fd, sock_fd);
if (test__start_subtest("setsockopt-legacy_eperm"))
test_setsockopt_legacy_eperm(cgroup_fd, sock_fd);
if (test__start_subtest("setsockopt-legacy_no_override"))
test_setsockopt_legacy_no_override(cgroup_fd, sock_fd);
if (test__start_subtest("getsockopt-get"))
test_getsockopt_get(cgroup_fd, sock_fd);
if (test__start_subtest("getsockopt-override"))
test_getsockopt_override(cgroup_fd, sock_fd);
if (test__start_subtest("getsockopt-retval_sync"))
test_getsockopt_retval_sync(cgroup_fd, sock_fd);
if (test__start_subtest("exposed_hooks"))
test_exposed_hooks(cgroup_fd, sock_fd);
close_fd:
close(cgroup_fd);
}
| linux-master | tools/testing/selftests/bpf/prog_tests/cgroup_getset_retval.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) 2023. Huawei Technologies Co., Ltd */
#define _GNU_SOURCE
#include <sched.h>
#include <stdbool.h>
#include <test_progs.h>
#include "htab_reuse.skel.h"
struct htab_op_ctx {
int fd;
int loop;
bool stop;
};
struct htab_val {
unsigned int lock;
unsigned int data;
};
static void *htab_lookup_fn(void *arg)
{
struct htab_op_ctx *ctx = arg;
int i = 0;
while (i++ < ctx->loop && !ctx->stop) {
struct htab_val value;
unsigned int key;
/* Use BPF_F_LOCK to use spin-lock in map value. */
key = 7;
bpf_map_lookup_elem_flags(ctx->fd, &key, &value, BPF_F_LOCK);
}
return NULL;
}
static void *htab_update_fn(void *arg)
{
struct htab_op_ctx *ctx = arg;
int i = 0;
while (i++ < ctx->loop && !ctx->stop) {
struct htab_val value;
unsigned int key;
key = 7;
value.lock = 0;
value.data = key;
bpf_map_update_elem(ctx->fd, &key, &value, BPF_F_LOCK);
bpf_map_delete_elem(ctx->fd, &key);
key = 24;
value.lock = 0;
value.data = key;
bpf_map_update_elem(ctx->fd, &key, &value, BPF_F_LOCK);
bpf_map_delete_elem(ctx->fd, &key);
}
return NULL;
}
void test_htab_reuse(void)
{
unsigned int i, wr_nr = 1, rd_nr = 4;
pthread_t tids[wr_nr + rd_nr];
struct htab_reuse *skel;
struct htab_op_ctx ctx;
int err;
skel = htab_reuse__open_and_load();
if (!ASSERT_OK_PTR(skel, "htab_reuse__open_and_load"))
return;
ctx.fd = bpf_map__fd(skel->maps.htab);
ctx.loop = 500;
ctx.stop = false;
memset(tids, 0, sizeof(tids));
for (i = 0; i < wr_nr; i++) {
err = pthread_create(&tids[i], NULL, htab_update_fn, &ctx);
if (!ASSERT_OK(err, "pthread_create")) {
ctx.stop = true;
goto reap;
}
}
for (i = 0; i < rd_nr; i++) {
err = pthread_create(&tids[i + wr_nr], NULL, htab_lookup_fn, &ctx);
if (!ASSERT_OK(err, "pthread_create")) {
ctx.stop = true;
goto reap;
}
}
reap:
for (i = 0; i < wr_nr + rd_nr; i++) {
if (!tids[i])
continue;
pthread_join(tids[i], NULL);
}
htab_reuse__destroy(skel);
}
| linux-master | tools/testing/selftests/bpf/prog_tests/htab_reuse.c |
// SPDX-License-Identifier: GPL-2.0
#include <test_progs.h>
#include "cgroup_helpers.h"
#include "sockopt_multi.skel.h"
static int run_getsockopt_test(struct sockopt_multi *obj, int cg_parent,
int cg_child, int sock_fd)
{
struct bpf_link *link_parent = NULL;
struct bpf_link *link_child = NULL;
socklen_t optlen;
__u8 buf;
int err;
/* Set IP_TOS to the expected value (0x80). */
buf = 0x80;
err = setsockopt(sock_fd, SOL_IP, IP_TOS, &buf, 1);
if (err < 0) {
log_err("Failed to call setsockopt(IP_TOS)");
goto detach;
}
buf = 0x00;
optlen = 1;
err = getsockopt(sock_fd, SOL_IP, IP_TOS, &buf, &optlen);
if (err) {
log_err("Failed to call getsockopt(IP_TOS)");
goto detach;
}
if (buf != 0x80) {
log_err("Unexpected getsockopt 0x%x != 0x80 without BPF", buf);
err = -1;
goto detach;
}
/* Attach child program and make sure it returns new value:
* - kernel: -> 0x80
* - child: 0x80 -> 0x90
*/
link_child = bpf_program__attach_cgroup(obj->progs._getsockopt_child,
cg_child);
if (!ASSERT_OK_PTR(link_child, "cg-attach-getsockopt_child"))
goto detach;
buf = 0x00;
optlen = 1;
err = getsockopt(sock_fd, SOL_IP, IP_TOS, &buf, &optlen);
if (err) {
log_err("Failed to call getsockopt(IP_TOS)");
goto detach;
}
if (buf != 0x90) {
log_err("Unexpected getsockopt 0x%x != 0x90", buf);
err = -1;
goto detach;
}
/* Attach parent program and make sure it returns new value:
* - kernel: -> 0x80
* - child: 0x80 -> 0x90
* - parent: 0x90 -> 0xA0
*/
link_parent = bpf_program__attach_cgroup(obj->progs._getsockopt_parent,
cg_parent);
if (!ASSERT_OK_PTR(link_parent, "cg-attach-getsockopt_parent"))
goto detach;
buf = 0x00;
optlen = 1;
err = getsockopt(sock_fd, SOL_IP, IP_TOS, &buf, &optlen);
if (err) {
log_err("Failed to call getsockopt(IP_TOS)");
goto detach;
}
if (buf != 0xA0) {
log_err("Unexpected getsockopt 0x%x != 0xA0", buf);
err = -1;
goto detach;
}
/* Setting unexpected initial sockopt should return EPERM:
* - kernel: -> 0x40
* - child: unexpected 0x40, EPERM
* - parent: unexpected 0x40, EPERM
*/
buf = 0x40;
err = setsockopt(sock_fd, SOL_IP, IP_TOS, &buf, 1);
if (err < 0) {
log_err("Failed to call setsockopt(IP_TOS)");
goto detach;
}
buf = 0x00;
optlen = 1;
err = getsockopt(sock_fd, SOL_IP, IP_TOS, &buf, &optlen);
if (!err) {
log_err("Unexpected success from getsockopt(IP_TOS)");
goto detach;
}
/* Detach child program and make sure we still get EPERM:
* - kernel: -> 0x40
* - parent: unexpected 0x40, EPERM
*/
bpf_link__destroy(link_child);
link_child = NULL;
buf = 0x00;
optlen = 1;
err = getsockopt(sock_fd, SOL_IP, IP_TOS, &buf, &optlen);
if (!err) {
log_err("Unexpected success from getsockopt(IP_TOS)");
goto detach;
}
/* Set initial value to the one the parent program expects:
* - kernel: -> 0x90
* - parent: 0x90 -> 0xA0
*/
buf = 0x90;
err = setsockopt(sock_fd, SOL_IP, IP_TOS, &buf, 1);
if (err < 0) {
log_err("Failed to call setsockopt(IP_TOS)");
goto detach;
}
buf = 0x00;
optlen = 1;
err = getsockopt(sock_fd, SOL_IP, IP_TOS, &buf, &optlen);
if (err) {
log_err("Failed to call getsockopt(IP_TOS)");
goto detach;
}
if (buf != 0xA0) {
log_err("Unexpected getsockopt 0x%x != 0xA0", buf);
err = -1;
goto detach;
}
detach:
bpf_link__destroy(link_child);
bpf_link__destroy(link_parent);
return err;
}
static int run_setsockopt_test(struct sockopt_multi *obj, int cg_parent,
int cg_child, int sock_fd)
{
struct bpf_link *link_parent = NULL;
struct bpf_link *link_child = NULL;
socklen_t optlen;
__u8 buf;
int err;
/* Set IP_TOS to the expected value (0x80). */
buf = 0x80;
err = setsockopt(sock_fd, SOL_IP, IP_TOS, &buf, 1);
if (err < 0) {
log_err("Failed to call setsockopt(IP_TOS)");
goto detach;
}
buf = 0x00;
optlen = 1;
err = getsockopt(sock_fd, SOL_IP, IP_TOS, &buf, &optlen);
if (err) {
log_err("Failed to call getsockopt(IP_TOS)");
goto detach;
}
if (buf != 0x80) {
log_err("Unexpected getsockopt 0x%x != 0x80 without BPF", buf);
err = -1;
goto detach;
}
/* Attach child program and make sure it adds 0x10. */
link_child = bpf_program__attach_cgroup(obj->progs._setsockopt,
cg_child);
if (!ASSERT_OK_PTR(link_child, "cg-attach-setsockopt_child"))
goto detach;
buf = 0x80;
err = setsockopt(sock_fd, SOL_IP, IP_TOS, &buf, 1);
if (err < 0) {
log_err("Failed to call setsockopt(IP_TOS)");
goto detach;
}
buf = 0x00;
optlen = 1;
err = getsockopt(sock_fd, SOL_IP, IP_TOS, &buf, &optlen);
if (err) {
log_err("Failed to call getsockopt(IP_TOS)");
goto detach;
}
if (buf != 0x80 + 0x10) {
log_err("Unexpected getsockopt 0x%x != 0x80 + 0x10", buf);
err = -1;
goto detach;
}
/* Attach parent program and make sure it adds another 0x10. */
link_parent = bpf_program__attach_cgroup(obj->progs._setsockopt,
cg_parent);
if (!ASSERT_OK_PTR(link_parent, "cg-attach-setsockopt_parent"))
goto detach;
buf = 0x80;
err = setsockopt(sock_fd, SOL_IP, IP_TOS, &buf, 1);
if (err < 0) {
log_err("Failed to call setsockopt(IP_TOS)");
goto detach;
}
buf = 0x00;
optlen = 1;
err = getsockopt(sock_fd, SOL_IP, IP_TOS, &buf, &optlen);
if (err) {
log_err("Failed to call getsockopt(IP_TOS)");
goto detach;
}
if (buf != 0x80 + 2 * 0x10) {
log_err("Unexpected getsockopt 0x%x != 0x80 + 2 * 0x10", buf);
err = -1;
goto detach;
}
detach:
bpf_link__destroy(link_child);
bpf_link__destroy(link_parent);
return err;
}
void test_sockopt_multi(void)
{
int cg_parent = -1, cg_child = -1;
struct sockopt_multi *obj = NULL;
int sock_fd = -1;
cg_parent = test__join_cgroup("/parent");
if (!ASSERT_GE(cg_parent, 0, "join_cgroup /parent"))
goto out;
cg_child = test__join_cgroup("/parent/child");
if (!ASSERT_GE(cg_child, 0, "join_cgroup /parent/child"))
goto out;
obj = sockopt_multi__open_and_load();
if (!ASSERT_OK_PTR(obj, "skel-load"))
goto out;
obj->bss->page_size = sysconf(_SC_PAGESIZE);
sock_fd = socket(AF_INET, SOCK_STREAM, 0);
if (!ASSERT_GE(sock_fd, 0, "socket"))
goto out;
ASSERT_OK(run_getsockopt_test(obj, cg_parent, cg_child, sock_fd), "getsockopt_test");
ASSERT_OK(run_setsockopt_test(obj, cg_parent, cg_child, sock_fd), "setsockopt_test");
out:
close(sock_fd);
sockopt_multi__destroy(obj);
close(cg_child);
close(cg_parent);
}
| linux-master | tools/testing/selftests/bpf/prog_tests/sockopt_multi.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2021 Facebook */
#include <test_progs.h>
#include "timer_mim.skel.h"
#include "timer_mim_reject.skel.h"
static int timer_mim(struct timer_mim *timer_skel)
{
__u64 cnt1, cnt2;
int err, prog_fd, key1 = 1;
LIBBPF_OPTS(bpf_test_run_opts, topts);
err = timer_mim__attach(timer_skel);
if (!ASSERT_OK(err, "timer_attach"))
return err;
prog_fd = bpf_program__fd(timer_skel->progs.test1);
err = bpf_prog_test_run_opts(prog_fd, &topts);
ASSERT_OK(err, "test_run");
ASSERT_EQ(topts.retval, 0, "test_run");
timer_mim__detach(timer_skel);
/* check that timer_cb[12] are incrementing 'cnt' */
cnt1 = READ_ONCE(timer_skel->bss->cnt);
for (int i = 0; i < 100; i++) {
cnt2 = READ_ONCE(timer_skel->bss->cnt);
if (cnt2 != cnt1)
break;
usleep(200); /* 100 times more than interval */
}
ASSERT_GT(cnt2, cnt1, "cnt");
ASSERT_EQ(timer_skel->bss->err, 0, "err");
/* check that code paths completed */
ASSERT_EQ(timer_skel->bss->ok, 1 | 2, "ok");
close(bpf_map__fd(timer_skel->maps.inner_htab));
err = bpf_map__delete_elem(timer_skel->maps.outer_arr, &key1, sizeof(key1), 0);
ASSERT_EQ(err, 0, "delete inner map");
/* check that timer_cb[12] are no longer running */
cnt1 = READ_ONCE(timer_skel->bss->cnt);
for (int i = 0; i < 100; i++) {
usleep(200); /* 100 times more than interval */
cnt2 = READ_ONCE(timer_skel->bss->cnt);
if (cnt2 == cnt1)
break;
}
ASSERT_EQ(cnt2, cnt1, "cnt");
return 0;
}
void serial_test_timer_mim(void)
{
struct timer_mim_reject *timer_reject_skel = NULL;
libbpf_print_fn_t old_print_fn = NULL;
struct timer_mim *timer_skel = NULL;
int err;
old_print_fn = libbpf_set_print(NULL);
timer_reject_skel = timer_mim_reject__open_and_load();
libbpf_set_print(old_print_fn);
if (!ASSERT_ERR_PTR(timer_reject_skel, "timer_reject_skel_load"))
goto cleanup;
timer_skel = timer_mim__open_and_load();
if (!ASSERT_OK_PTR(timer_skel, "timer_skel_load"))
goto cleanup;
err = timer_mim(timer_skel);
ASSERT_OK(err, "timer_mim");
cleanup:
timer_mim__destroy(timer_skel);
timer_mim_reject__destroy(timer_reject_skel);
}
| linux-master | tools/testing/selftests/bpf/prog_tests/timer_mim.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2021 Facebook */
#include <test_progs.h>
#include <network_helpers.h>
#include "test_ksyms_module.lskel.h"
#include "test_ksyms_module.skel.h"
static void test_ksyms_module_lskel(void)
{
struct test_ksyms_module_lskel *skel;
int err;
LIBBPF_OPTS(bpf_test_run_opts, topts,
.data_in = &pkt_v4,
.data_size_in = sizeof(pkt_v4),
.repeat = 1,
);
if (!env.has_testmod) {
test__skip();
return;
}
skel = test_ksyms_module_lskel__open_and_load();
if (!ASSERT_OK_PTR(skel, "test_ksyms_module_lskel__open_and_load"))
return;
err = bpf_prog_test_run_opts(skel->progs.load.prog_fd, &topts);
if (!ASSERT_OK(err, "bpf_prog_test_run"))
goto cleanup;
ASSERT_EQ(topts.retval, 0, "retval");
ASSERT_EQ(skel->bss->out_bpf_testmod_ksym, 42, "bpf_testmod_ksym");
cleanup:
test_ksyms_module_lskel__destroy(skel);
}
static void test_ksyms_module_libbpf(void)
{
struct test_ksyms_module *skel;
int err;
LIBBPF_OPTS(bpf_test_run_opts, topts,
.data_in = &pkt_v4,
.data_size_in = sizeof(pkt_v4),
.repeat = 1,
);
if (!env.has_testmod) {
test__skip();
return;
}
skel = test_ksyms_module__open_and_load();
if (!ASSERT_OK_PTR(skel, "test_ksyms_module__open"))
return;
err = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.load), &topts);
if (!ASSERT_OK(err, "bpf_prog_test_run"))
goto cleanup;
ASSERT_EQ(topts.retval, 0, "retval");
ASSERT_EQ(skel->bss->out_bpf_testmod_ksym, 42, "bpf_testmod_ksym");
cleanup:
test_ksyms_module__destroy(skel);
}
void test_ksyms_module(void)
{
if (test__start_subtest("lskel"))
test_ksyms_module_lskel();
if (test__start_subtest("libbpf"))
test_ksyms_module_libbpf();
}
| linux-master | tools/testing/selftests/bpf/prog_tests/ksyms_module.c |
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2018 Facebook
#include <test_progs.h>
static int duration = 0;
struct sec_name_test {
const char sec_name[32];
struct {
int rc;
enum bpf_prog_type prog_type;
enum bpf_attach_type expected_attach_type;
} expected_load;
struct {
int rc;
enum bpf_attach_type attach_type;
} expected_attach;
};
static struct sec_name_test tests[] = {
{"InvAliD", {-ESRCH, 0, 0}, {-EINVAL, 0} },
{"cgroup", {-ESRCH, 0, 0}, {-EINVAL, 0} },
{"socket", {0, BPF_PROG_TYPE_SOCKET_FILTER, 0}, {-EINVAL, 0} },
{"kprobe/", {0, BPF_PROG_TYPE_KPROBE, 0}, {-EINVAL, 0} },
{"uprobe/", {0, BPF_PROG_TYPE_KPROBE, 0}, {-EINVAL, 0} },
{"kretprobe/", {0, BPF_PROG_TYPE_KPROBE, 0}, {-EINVAL, 0} },
{"uretprobe/", {0, BPF_PROG_TYPE_KPROBE, 0}, {-EINVAL, 0} },
{"classifier", {0, BPF_PROG_TYPE_SCHED_CLS, 0}, {-EINVAL, 0} },
{"action", {0, BPF_PROG_TYPE_SCHED_ACT, 0}, {-EINVAL, 0} },
{"tracepoint/", {0, BPF_PROG_TYPE_TRACEPOINT, 0}, {-EINVAL, 0} },
{"tp/", {0, BPF_PROG_TYPE_TRACEPOINT, 0}, {-EINVAL, 0} },
{
"raw_tracepoint/",
{0, BPF_PROG_TYPE_RAW_TRACEPOINT, 0},
{-EINVAL, 0},
},
{"raw_tp/", {0, BPF_PROG_TYPE_RAW_TRACEPOINT, 0}, {-EINVAL, 0} },
{"xdp", {0, BPF_PROG_TYPE_XDP, BPF_XDP}, {0, BPF_XDP} },
{"perf_event", {0, BPF_PROG_TYPE_PERF_EVENT, 0}, {-EINVAL, 0} },
{"lwt_in", {0, BPF_PROG_TYPE_LWT_IN, 0}, {-EINVAL, 0} },
{"lwt_out", {0, BPF_PROG_TYPE_LWT_OUT, 0}, {-EINVAL, 0} },
{"lwt_xmit", {0, BPF_PROG_TYPE_LWT_XMIT, 0}, {-EINVAL, 0} },
{"lwt_seg6local", {0, BPF_PROG_TYPE_LWT_SEG6LOCAL, 0}, {-EINVAL, 0} },
{
"cgroup_skb/ingress",
{0, BPF_PROG_TYPE_CGROUP_SKB, BPF_CGROUP_INET_INGRESS},
{0, BPF_CGROUP_INET_INGRESS},
},
{
"cgroup_skb/egress",
{0, BPF_PROG_TYPE_CGROUP_SKB, BPF_CGROUP_INET_EGRESS},
{0, BPF_CGROUP_INET_EGRESS},
},
{"cgroup/skb", {0, BPF_PROG_TYPE_CGROUP_SKB, 0}, {-EINVAL, 0} },
{
"cgroup/sock",
{0, BPF_PROG_TYPE_CGROUP_SOCK, BPF_CGROUP_INET_SOCK_CREATE},
{0, BPF_CGROUP_INET_SOCK_CREATE},
},
{
"cgroup/post_bind4",
{0, BPF_PROG_TYPE_CGROUP_SOCK, BPF_CGROUP_INET4_POST_BIND},
{0, BPF_CGROUP_INET4_POST_BIND},
},
{
"cgroup/post_bind6",
{0, BPF_PROG_TYPE_CGROUP_SOCK, BPF_CGROUP_INET6_POST_BIND},
{0, BPF_CGROUP_INET6_POST_BIND},
},
{
"cgroup/dev",
{0, BPF_PROG_TYPE_CGROUP_DEVICE, BPF_CGROUP_DEVICE},
{0, BPF_CGROUP_DEVICE},
},
{
"sockops",
{0, BPF_PROG_TYPE_SOCK_OPS, BPF_CGROUP_SOCK_OPS},
{0, BPF_CGROUP_SOCK_OPS},
},
{
"sk_skb/stream_parser",
{0, BPF_PROG_TYPE_SK_SKB, BPF_SK_SKB_STREAM_PARSER},
{0, BPF_SK_SKB_STREAM_PARSER},
},
{
"sk_skb/stream_verdict",
{0, BPF_PROG_TYPE_SK_SKB, BPF_SK_SKB_STREAM_VERDICT},
{0, BPF_SK_SKB_STREAM_VERDICT},
},
{"sk_skb", {0, BPF_PROG_TYPE_SK_SKB, 0}, {-EINVAL, 0} },
{
"sk_msg",
{0, BPF_PROG_TYPE_SK_MSG, BPF_SK_MSG_VERDICT},
{0, BPF_SK_MSG_VERDICT},
},
{
"lirc_mode2",
{0, BPF_PROG_TYPE_LIRC_MODE2, BPF_LIRC_MODE2},
{0, BPF_LIRC_MODE2},
},
{
"flow_dissector",
{0, BPF_PROG_TYPE_FLOW_DISSECTOR, BPF_FLOW_DISSECTOR},
{0, BPF_FLOW_DISSECTOR},
},
{
"cgroup/bind4",
{0, BPF_PROG_TYPE_CGROUP_SOCK_ADDR, BPF_CGROUP_INET4_BIND},
{0, BPF_CGROUP_INET4_BIND},
},
{
"cgroup/bind6",
{0, BPF_PROG_TYPE_CGROUP_SOCK_ADDR, BPF_CGROUP_INET6_BIND},
{0, BPF_CGROUP_INET6_BIND},
},
{
"cgroup/connect4",
{0, BPF_PROG_TYPE_CGROUP_SOCK_ADDR, BPF_CGROUP_INET4_CONNECT},
{0, BPF_CGROUP_INET4_CONNECT},
},
{
"cgroup/connect6",
{0, BPF_PROG_TYPE_CGROUP_SOCK_ADDR, BPF_CGROUP_INET6_CONNECT},
{0, BPF_CGROUP_INET6_CONNECT},
},
{
"cgroup/sendmsg4",
{0, BPF_PROG_TYPE_CGROUP_SOCK_ADDR, BPF_CGROUP_UDP4_SENDMSG},
{0, BPF_CGROUP_UDP4_SENDMSG},
},
{
"cgroup/sendmsg6",
{0, BPF_PROG_TYPE_CGROUP_SOCK_ADDR, BPF_CGROUP_UDP6_SENDMSG},
{0, BPF_CGROUP_UDP6_SENDMSG},
},
{
"cgroup/recvmsg4",
{0, BPF_PROG_TYPE_CGROUP_SOCK_ADDR, BPF_CGROUP_UDP4_RECVMSG},
{0, BPF_CGROUP_UDP4_RECVMSG},
},
{
"cgroup/recvmsg6",
{0, BPF_PROG_TYPE_CGROUP_SOCK_ADDR, BPF_CGROUP_UDP6_RECVMSG},
{0, BPF_CGROUP_UDP6_RECVMSG},
},
{
"cgroup/sysctl",
{0, BPF_PROG_TYPE_CGROUP_SYSCTL, BPF_CGROUP_SYSCTL},
{0, BPF_CGROUP_SYSCTL},
},
{
"cgroup/getsockopt",
{0, BPF_PROG_TYPE_CGROUP_SOCKOPT, BPF_CGROUP_GETSOCKOPT},
{0, BPF_CGROUP_GETSOCKOPT},
},
{
"cgroup/setsockopt",
{0, BPF_PROG_TYPE_CGROUP_SOCKOPT, BPF_CGROUP_SETSOCKOPT},
{0, BPF_CGROUP_SETSOCKOPT},
},
};
static void test_prog_type_by_name(const struct sec_name_test *test)
{
enum bpf_attach_type expected_attach_type;
enum bpf_prog_type prog_type;
int rc;
rc = libbpf_prog_type_by_name(test->sec_name, &prog_type,
&expected_attach_type);
CHECK(rc != test->expected_load.rc, "check_code",
"prog: unexpected rc=%d for %s\n", rc, test->sec_name);
if (rc)
return;
CHECK(prog_type != test->expected_load.prog_type, "check_prog_type",
"prog: unexpected prog_type=%d for %s\n",
prog_type, test->sec_name);
CHECK(expected_attach_type != test->expected_load.expected_attach_type,
"check_attach_type", "prog: unexpected expected_attach_type=%d for %s\n",
expected_attach_type, test->sec_name);
}
static void test_attach_type_by_name(const struct sec_name_test *test)
{
enum bpf_attach_type attach_type;
int rc;
rc = libbpf_attach_type_by_name(test->sec_name, &attach_type);
CHECK(rc != test->expected_attach.rc, "check_ret",
"attach: unexpected rc=%d for %s\n", rc, test->sec_name);
if (rc)
return;
CHECK(attach_type != test->expected_attach.attach_type,
"check_attach_type", "attach: unexpected attach_type=%d for %s\n",
attach_type, test->sec_name);
}
void test_section_names(void)
{
int i;
for (i = 0; i < ARRAY_SIZE(tests); ++i) {
struct sec_name_test *test = &tests[i];
test_prog_type_by_name(test);
test_attach_type_by_name(test);
}
}
| linux-master | tools/testing/selftests/bpf/prog_tests/section_names.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2021 Facebook */
#define _GNU_SOURCE /* See feature_test_macros(7) */
#include <unistd.h>
#include <sched.h>
#include <pthread.h>
#include <sys/syscall.h> /* For SYS_xxx definitions */
#include <sys/types.h>
#include <test_progs.h>
#include "task_local_storage_helpers.h"
#include "task_local_storage.skel.h"
#include "task_local_storage_exit_creds.skel.h"
#include "task_ls_recursion.skel.h"
#include "task_storage_nodeadlock.skel.h"
static void test_sys_enter_exit(void)
{
struct task_local_storage *skel;
int err;
skel = task_local_storage__open_and_load();
if (!ASSERT_OK_PTR(skel, "skel_open_and_load"))
return;
skel->bss->target_pid = syscall(SYS_gettid);
err = task_local_storage__attach(skel);
if (!ASSERT_OK(err, "skel_attach"))
goto out;
syscall(SYS_gettid);
syscall(SYS_gettid);
/* 3x syscalls: 1x attach and 2x gettid */
ASSERT_EQ(skel->bss->enter_cnt, 3, "enter_cnt");
ASSERT_EQ(skel->bss->exit_cnt, 3, "exit_cnt");
ASSERT_EQ(skel->bss->mismatch_cnt, 0, "mismatch_cnt");
out:
task_local_storage__destroy(skel);
}
static void test_exit_creds(void)
{
struct task_local_storage_exit_creds *skel;
int err, run_count, sync_rcu_calls = 0;
const int MAX_SYNC_RCU_CALLS = 1000;
skel = task_local_storage_exit_creds__open_and_load();
if (!ASSERT_OK_PTR(skel, "skel_open_and_load"))
return;
err = task_local_storage_exit_creds__attach(skel);
if (!ASSERT_OK(err, "skel_attach"))
goto out;
/* trigger at least one exit_creds() */
if (CHECK_FAIL(system("ls > /dev/null")))
goto out;
/* kern_sync_rcu is not enough on its own as the read section we want
* to wait for may start after we enter synchronize_rcu, so our call
* won't wait for the section to finish. Loop on the run counter
* as well to ensure the program has run.
*/
do {
kern_sync_rcu();
run_count = __atomic_load_n(&skel->bss->run_count, __ATOMIC_SEQ_CST);
} while (run_count == 0 && ++sync_rcu_calls < MAX_SYNC_RCU_CALLS);
ASSERT_NEQ(sync_rcu_calls, MAX_SYNC_RCU_CALLS,
"sync_rcu count too high");
ASSERT_NEQ(run_count, 0, "run_count");
ASSERT_EQ(skel->bss->valid_ptr_count, 0, "valid_ptr_count");
ASSERT_NEQ(skel->bss->null_ptr_count, 0, "null_ptr_count");
out:
task_local_storage_exit_creds__destroy(skel);
}
static void test_recursion(void)
{
int err, map_fd, prog_fd, task_fd;
struct task_ls_recursion *skel;
struct bpf_prog_info info;
__u32 info_len = sizeof(info);
long value;
task_fd = sys_pidfd_open(getpid(), 0);
if (!ASSERT_NEQ(task_fd, -1, "sys_pidfd_open"))
return;
skel = task_ls_recursion__open_and_load();
if (!ASSERT_OK_PTR(skel, "skel_open_and_load"))
goto out;
err = task_ls_recursion__attach(skel);
if (!ASSERT_OK(err, "skel_attach"))
goto out;
/* trigger sys_enter, make sure it does not cause deadlock */
skel->bss->test_pid = getpid();
syscall(SYS_gettid);
skel->bss->test_pid = 0;
task_ls_recursion__detach(skel);
/* Refer to the comment in BPF_PROG(on_update) for
* the explanation on the value 201 and 100.
*/
map_fd = bpf_map__fd(skel->maps.map_a);
err = bpf_map_lookup_elem(map_fd, &task_fd, &value);
ASSERT_OK(err, "lookup map_a");
ASSERT_EQ(value, 201, "map_a value");
ASSERT_EQ(skel->bss->nr_del_errs, 1, "bpf_task_storage_delete busy");
map_fd = bpf_map__fd(skel->maps.map_b);
err = bpf_map_lookup_elem(map_fd, &task_fd, &value);
ASSERT_OK(err, "lookup map_b");
ASSERT_EQ(value, 100, "map_b value");
prog_fd = bpf_program__fd(skel->progs.on_lookup);
memset(&info, 0, sizeof(info));
err = bpf_prog_get_info_by_fd(prog_fd, &info, &info_len);
ASSERT_OK(err, "get prog info");
ASSERT_GT(info.recursion_misses, 0, "on_lookup prog recursion");
prog_fd = bpf_program__fd(skel->progs.on_update);
memset(&info, 0, sizeof(info));
err = bpf_prog_get_info_by_fd(prog_fd, &info, &info_len);
ASSERT_OK(err, "get prog info");
ASSERT_EQ(info.recursion_misses, 0, "on_update prog recursion");
prog_fd = bpf_program__fd(skel->progs.on_enter);
memset(&info, 0, sizeof(info));
err = bpf_prog_get_info_by_fd(prog_fd, &info, &info_len);
ASSERT_OK(err, "get prog info");
ASSERT_EQ(info.recursion_misses, 0, "on_enter prog recursion");
out:
close(task_fd);
task_ls_recursion__destroy(skel);
}
static bool stop;
static void waitall(const pthread_t *tids, int nr)
{
int i;
stop = true;
for (i = 0; i < nr; i++)
pthread_join(tids[i], NULL);
}
static void *sock_create_loop(void *arg)
{
struct task_storage_nodeadlock *skel = arg;
int fd;
while (!stop) {
fd = socket(AF_INET, SOCK_STREAM, 0);
close(fd);
if (skel->bss->nr_get_errs || skel->bss->nr_del_errs)
stop = true;
}
return NULL;
}
static void test_nodeadlock(void)
{
struct task_storage_nodeadlock *skel;
struct bpf_prog_info info = {};
__u32 info_len = sizeof(info);
const int nr_threads = 32;
pthread_t tids[nr_threads];
int i, prog_fd, err;
cpu_set_t old, new;
/* Pin all threads to one cpu to increase the chance of preemption
* in a sleepable bpf prog.
*/
CPU_ZERO(&new);
CPU_SET(0, &new);
err = sched_getaffinity(getpid(), sizeof(old), &old);
if (!ASSERT_OK(err, "getaffinity"))
return;
err = sched_setaffinity(getpid(), sizeof(new), &new);
if (!ASSERT_OK(err, "setaffinity"))
return;
skel = task_storage_nodeadlock__open_and_load();
if (!ASSERT_OK_PTR(skel, "open_and_load"))
goto done;
/* Unnecessary recursion and deadlock detection are reproducible
* in the preemptible kernel.
*/
if (!skel->kconfig->CONFIG_PREEMPT) {
test__skip();
goto done;
}
err = task_storage_nodeadlock__attach(skel);
ASSERT_OK(err, "attach prog");
for (i = 0; i < nr_threads; i++) {
err = pthread_create(&tids[i], NULL, sock_create_loop, skel);
if (err) {
/* Only assert once here to avoid excessive
* PASS printing during test failure.
*/
ASSERT_OK(err, "pthread_create");
waitall(tids, i);
goto done;
}
}
/* With 32 threads, 1s is enough to reproduce the issue */
sleep(1);
waitall(tids, nr_threads);
info_len = sizeof(info);
prog_fd = bpf_program__fd(skel->progs.socket_post_create);
err = bpf_prog_get_info_by_fd(prog_fd, &info, &info_len);
ASSERT_OK(err, "get prog info");
ASSERT_EQ(info.recursion_misses, 0, "prog recursion");
ASSERT_EQ(skel->bss->nr_get_errs, 0, "bpf_task_storage_get busy");
ASSERT_EQ(skel->bss->nr_del_errs, 0, "bpf_task_storage_delete busy");
done:
task_storage_nodeadlock__destroy(skel);
sched_setaffinity(getpid(), sizeof(old), &old);
}
void test_task_local_storage(void)
{
if (test__start_subtest("sys_enter_exit"))
test_sys_enter_exit();
if (test__start_subtest("exit_creds"))
test_exit_creds();
if (test__start_subtest("recursion"))
test_recursion();
if (test__start_subtest("nodeadlock"))
test_nodeadlock();
}
| linux-master | tools/testing/selftests/bpf/prog_tests/task_local_storage.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020 Facebook */
#include <test_progs.h>
struct callback_head {
struct callback_head *next;
void (*func)(struct callback_head *head);
};
/* ___shuffled flavor is just an illusion for BPF code, it doesn't really
* exist and user-space needs to provide data in the memory layout that
* matches callback_head. We just defined ___shuffled flavor to make it easier
* to work with the skeleton
*/
struct callback_head___shuffled {
struct callback_head___shuffled *next;
void (*func)(struct callback_head *head);
};
#include "test_core_read_macros.skel.h"
void test_core_read_macros(void)
{
int duration = 0, err;
struct test_core_read_macros* skel;
struct test_core_read_macros__bss *bss;
struct callback_head u_probe_in;
struct callback_head___shuffled u_core_in;
skel = test_core_read_macros__open_and_load();
if (CHECK(!skel, "skel_open", "failed to open skeleton\n"))
return;
bss = skel->bss;
bss->my_pid = getpid();
/* next pointers have to be set from the kernel side */
bss->k_probe_in.func = (void *)(long)0x1234;
bss->k_core_in.func = (void *)(long)0xabcd;
u_probe_in.next = &u_probe_in;
u_probe_in.func = (void *)(long)0x5678;
bss->u_probe_in = &u_probe_in;
u_core_in.next = &u_core_in;
u_core_in.func = (void *)(long)0xdbca;
bss->u_core_in = &u_core_in;
err = test_core_read_macros__attach(skel);
if (CHECK(err, "skel_attach", "skeleton attach failed: %d\n", err))
goto cleanup;
/* trigger tracepoint */
usleep(1);
ASSERT_EQ(bss->k_probe_out, 0x1234, "k_probe_out");
ASSERT_EQ(bss->k_core_out, 0xabcd, "k_core_out");
ASSERT_EQ(bss->u_probe_out, 0x5678, "u_probe_out");
ASSERT_EQ(bss->u_core_out, 0xdbca, "u_core_out");
cleanup:
test_core_read_macros__destroy(skel);
}
| linux-master | tools/testing/selftests/bpf/prog_tests/core_read_macros.c |
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Test suite of lwt BPF programs that reroutes packets
* The file tests focus not only if these programs work as expected normally,
* but also if they can handle abnormal situations gracefully. This test
* suite currently only covers lwt_xmit hook. lwt_in tests have not been
* implemented.
*
* WARNING
* -------
* This test suite can crash the kernel, thus should be run in a VM.
*
* Setup:
* ---------
* all tests are performed in a single netns. A lwt encap route is setup for
* each subtest:
*
* ip route add 10.0.0.0/24 encap bpf xmit <obj> sec "<section_N>" dev link_err
*
* Here <obj> is statically defined to test_lwt_reroute.bpf.o, and it contains
* a single test program entry. This program sets packet mark by last byte of
* the IPv4 daddr. For example, a packet going to 1.2.3.4 will receive a skb
* mark 4. A packet will only be marked once, and IP x.x.x.0 will be skipped
* to avoid route loop. We didn't use generated BPF skeleton since the
* attachment for lwt programs are not supported by libbpf yet.
*
* The test program will bring up a tun device, and sets up the following
* routes:
*
* ip rule add pref 100 from all fwmark <tun_index> lookup 100
* ip route add table 100 default dev tun0
*
* For normal testing, a ping command is running in the test netns:
*
* ping 10.0.0.<tun_index> -c 1 -w 1 -s 100
*
* For abnormal testing, fq is used as the qdisc of the tun device. Then a UDP
* socket will try to overflow the fq queue and trigger qdisc drop error.
*
* Scenarios:
* --------------------------------
* 1. Reroute to a running tun device
* 2. Reroute to a device where qdisc drop
*
* For case 1, ping packets should be received by the tun device.
*
* For case 2, force UDP packets to overflow fq limit. As long as kernel
* is not crashed, it is considered successful.
*/
#include "lwt_helpers.h"
#include "network_helpers.h"
#include <linux/net_tstamp.h>
#define BPF_OBJECT "test_lwt_reroute.bpf.o"
#define LOCAL_SRC "10.0.0.1"
#define TEST_CIDR "10.0.0.0/24"
#define XMIT_HOOK "xmit"
#define XMIT_SECTION "lwt_xmit"
#define NSEC_PER_SEC 1000000000ULL
/* send a ping to be rerouted to the target device */
static void ping_once(const char *ip)
{
/* We won't get a reply. Don't fail here */
SYS_NOFAIL("ping %s -c1 -W1 -s %d >/dev/null 2>&1",
ip, ICMP_PAYLOAD_SIZE);
}
/* Send snd_target UDP packets to overflow the fq queue and trigger qdisc drop
* error. This is done via TX tstamp to force buffering delayed packets.
*/
static int overflow_fq(int snd_target, const char *target_ip)
{
struct sockaddr_in addr = {
.sin_family = AF_INET,
.sin_port = htons(1234),
};
char data_buf[8]; /* only #pkts matter, so use a random small buffer */
char control_buf[CMSG_SPACE(sizeof(uint64_t))];
struct iovec iov = {
.iov_base = data_buf,
.iov_len = sizeof(data_buf),
};
int err = -1;
int s = -1;
struct sock_txtime txtime_on = {
.clockid = CLOCK_MONOTONIC,
.flags = 0,
};
struct msghdr msg = {
.msg_name = &addr,
.msg_namelen = sizeof(addr),
.msg_control = control_buf,
.msg_controllen = sizeof(control_buf),
.msg_iovlen = 1,
.msg_iov = &iov,
};
struct cmsghdr *cmsg = CMSG_FIRSTHDR(&msg);
memset(data_buf, 0, sizeof(data_buf));
s = socket(AF_INET, SOCK_DGRAM, 0);
if (!ASSERT_GE(s, 0, "socket"))
goto out;
err = setsockopt(s, SOL_SOCKET, SO_TXTIME, &txtime_on, sizeof(txtime_on));
if (!ASSERT_OK(err, "setsockopt(SO_TXTIME)"))
goto out;
err = inet_pton(AF_INET, target_ip, &addr.sin_addr);
if (!ASSERT_EQ(err, 1, "inet_pton"))
goto out;
while (snd_target > 0) {
struct timespec now;
memset(control_buf, 0, sizeof(control_buf));
cmsg->cmsg_type = SCM_TXTIME;
cmsg->cmsg_level = SOL_SOCKET;
cmsg->cmsg_len = CMSG_LEN(sizeof(uint64_t));
err = clock_gettime(CLOCK_MONOTONIC, &now);
if (!ASSERT_OK(err, "clock_gettime(CLOCK_MONOTONIC)")) {
err = -1;
goto out;
}
*(uint64_t *)CMSG_DATA(cmsg) = (now.tv_nsec + 1) * NSEC_PER_SEC +
now.tv_nsec;
/* we will intentionally send more than fq limit, so ignore
* the error here.
*/
sendmsg(s, &msg, MSG_NOSIGNAL);
snd_target--;
}
/* no kernel crash so far is considered success */
err = 0;
out:
if (s >= 0)
close(s);
return err;
}
static int setup(const char *tun_dev)
{
int target_index = -1;
int tap_fd = -1;
tap_fd = open_tuntap(tun_dev, false);
if (!ASSERT_GE(tap_fd, 0, "open_tun"))
return -1;
target_index = if_nametoindex(tun_dev);
if (!ASSERT_GE(target_index, 0, "if_nametoindex"))
return -1;
SYS(fail, "ip link add link_err type dummy");
SYS(fail, "ip link set lo up");
SYS(fail, "ip addr add dev lo " LOCAL_SRC "/32");
SYS(fail, "ip link set link_err up");
SYS(fail, "ip link set %s up", tun_dev);
SYS(fail, "ip route add %s dev link_err encap bpf xmit obj %s sec lwt_xmit",
TEST_CIDR, BPF_OBJECT);
SYS(fail, "ip rule add pref 100 from all fwmark %d lookup 100",
target_index);
SYS(fail, "ip route add t 100 default dev %s", tun_dev);
return tap_fd;
fail:
if (tap_fd >= 0)
close(tap_fd);
return -1;
}
static void test_lwt_reroute_normal_xmit(void)
{
const char *tun_dev = "tun0";
int tun_fd = -1;
int ifindex = -1;
char ip[256];
struct timeval timeo = {
.tv_sec = 0,
.tv_usec = 250000,
};
tun_fd = setup(tun_dev);
if (!ASSERT_GE(tun_fd, 0, "setup_reroute"))
return;
ifindex = if_nametoindex(tun_dev);
if (!ASSERT_GE(ifindex, 0, "if_nametoindex"))
return;
snprintf(ip, 256, "10.0.0.%d", ifindex);
/* ping packets should be received by the tun device */
ping_once(ip);
if (!ASSERT_EQ(wait_for_packet(tun_fd, __expect_icmp_ipv4, &timeo), 1,
"wait_for_packet"))
log_err("%s xmit", __func__);
}
/*
* Test the failure case when the skb is dropped at the qdisc. This is a
* regression prevention at the xmit hook only.
*/
static void test_lwt_reroute_qdisc_dropped(void)
{
const char *tun_dev = "tun0";
int tun_fd = -1;
int ifindex = -1;
char ip[256];
tun_fd = setup(tun_dev);
if (!ASSERT_GE(tun_fd, 0, "setup_reroute"))
goto fail;
SYS(fail, "tc qdisc replace dev %s root fq limit 5 flow_limit 5", tun_dev);
ifindex = if_nametoindex(tun_dev);
if (!ASSERT_GE(ifindex, 0, "if_nametoindex"))
return;
snprintf(ip, 256, "10.0.0.%d", ifindex);
ASSERT_EQ(overflow_fq(10, ip), 0, "overflow_fq");
fail:
if (tun_fd >= 0)
close(tun_fd);
}
static void *test_lwt_reroute_run(void *arg)
{
netns_delete();
RUN_TEST(lwt_reroute_normal_xmit);
RUN_TEST(lwt_reroute_qdisc_dropped);
return NULL;
}
void test_lwt_reroute(void)
{
pthread_t test_thread;
int err;
/* Run the tests in their own thread to isolate the namespace changes
* so they do not affect the environment of other tests.
* (specifically needed because of unshare(CLONE_NEWNS) in open_netns())
*/
err = pthread_create(&test_thread, NULL, &test_lwt_reroute_run, NULL);
if (ASSERT_OK(err, "pthread_create"))
ASSERT_OK(pthread_join(test_thread, NULL), "pthread_join");
}
| linux-master | tools/testing/selftests/bpf/prog_tests/lwt_reroute.c |
Subsets and Splits