python_code
stringlengths 0
1.8M
| repo_name
stringclasses 7
values | file_path
stringlengths 5
99
|
---|---|---|
// SPDX-License-Identifier: GPL-2.0-only
#include <stdbool.h>
#include <stdlib.h>
#include <error.h>
#include <stdio.h>
#include "unpriv_helpers.h"
bool get_unpriv_disabled(void)
{
bool disabled;
char buf[2];
FILE *fd;
fd = fopen("/proc/sys/" UNPRIV_SYSCTL, "r");
if (fd) {
disabled = (fgets(buf, 2, fd) == buf && atoi(buf));
fclose(fd);
} else {
perror("fopen /proc/sys/" UNPRIV_SYSCTL);
disabled = true;
}
return disabled;
}
| linux-master | tools/testing/selftests/bpf/unpriv_helpers.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
#include "sdt.h"
void urandlib_read_without_sema(int iter_num, int iter_cnt, int read_sz)
{
STAP_PROBE3(urandlib, read_without_sema, iter_num, iter_cnt, read_sz);
}
| linux-master | tools/testing/selftests/bpf/urandom_read_lib2.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Testsuite for eBPF verifier
*
* Copyright (c) 2014 PLUMgrid, http://plumgrid.com
* Copyright (c) 2017 Facebook
* Copyright (c) 2018 Covalent IO, Inc. http://covalent.io
*/
#include <endian.h>
#include <asm/types.h>
#include <linux/types.h>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <errno.h>
#include <string.h>
#include <stddef.h>
#include <stdbool.h>
#include <sched.h>
#include <limits.h>
#include <assert.h>
#include <linux/unistd.h>
#include <linux/filter.h>
#include <linux/bpf_perf_event.h>
#include <linux/bpf.h>
#include <linux/if_ether.h>
#include <linux/btf.h>
#include <bpf/btf.h>
#include <bpf/bpf.h>
#include <bpf/libbpf.h>
#include "autoconf_helper.h"
#include "unpriv_helpers.h"
#include "cap_helpers.h"
#include "bpf_rand.h"
#include "bpf_util.h"
#include "test_btf.h"
#include "../../../include/linux/filter.h"
#include "testing_helpers.h"
#ifndef ENOTSUPP
#define ENOTSUPP 524
#endif
#define MAX_INSNS BPF_MAXINSNS
#define MAX_EXPECTED_INSNS 32
#define MAX_UNEXPECTED_INSNS 32
#define MAX_TEST_INSNS 1000000
#define MAX_FIXUPS 8
#define MAX_NR_MAPS 23
#define MAX_TEST_RUNS 8
#define POINTER_VALUE 0xcafe4all
#define TEST_DATA_LEN 64
#define MAX_FUNC_INFOS 8
#define MAX_BTF_STRINGS 256
#define MAX_BTF_TYPES 256
#define INSN_OFF_MASK ((__s16)0xFFFF)
#define INSN_IMM_MASK ((__s32)0xFFFFFFFF)
#define SKIP_INSNS() BPF_RAW_INSN(0xde, 0xa, 0xd, 0xbeef, 0xdeadbeef)
#define DEFAULT_LIBBPF_LOG_LEVEL 4
#define F_NEEDS_EFFICIENT_UNALIGNED_ACCESS (1 << 0)
#define F_LOAD_WITH_STRICT_ALIGNMENT (1 << 1)
/* need CAP_BPF, CAP_NET_ADMIN, CAP_PERFMON to load progs */
#define ADMIN_CAPS (1ULL << CAP_NET_ADMIN | \
1ULL << CAP_PERFMON | \
1ULL << CAP_BPF)
#define UNPRIV_SYSCTL "kernel/unprivileged_bpf_disabled"
static bool unpriv_disabled = false;
static int skips;
static bool verbose = false;
static int verif_log_level = 0;
struct kfunc_btf_id_pair {
const char *kfunc;
int insn_idx;
};
struct bpf_test {
const char *descr;
struct bpf_insn insns[MAX_INSNS];
struct bpf_insn *fill_insns;
/* If specified, test engine looks for this sequence of
* instructions in the BPF program after loading. Allows to
* test rewrites applied by verifier. Use values
* INSN_OFF_MASK and INSN_IMM_MASK to mask `off` and `imm`
* fields if content does not matter. The test case fails if
* specified instructions are not found.
*
* The sequence could be split into sub-sequences by adding
* SKIP_INSNS instruction at the end of each sub-sequence. In
* such case sub-sequences are searched for one after another.
*/
struct bpf_insn expected_insns[MAX_EXPECTED_INSNS];
/* If specified, test engine applies same pattern matching
* logic as for `expected_insns`. If the specified pattern is
* matched test case is marked as failed.
*/
struct bpf_insn unexpected_insns[MAX_UNEXPECTED_INSNS];
int fixup_map_hash_8b[MAX_FIXUPS];
int fixup_map_hash_48b[MAX_FIXUPS];
int fixup_map_hash_16b[MAX_FIXUPS];
int fixup_map_array_48b[MAX_FIXUPS];
int fixup_map_sockmap[MAX_FIXUPS];
int fixup_map_sockhash[MAX_FIXUPS];
int fixup_map_xskmap[MAX_FIXUPS];
int fixup_map_stacktrace[MAX_FIXUPS];
int fixup_prog1[MAX_FIXUPS];
int fixup_prog2[MAX_FIXUPS];
int fixup_map_in_map[MAX_FIXUPS];
int fixup_cgroup_storage[MAX_FIXUPS];
int fixup_percpu_cgroup_storage[MAX_FIXUPS];
int fixup_map_spin_lock[MAX_FIXUPS];
int fixup_map_array_ro[MAX_FIXUPS];
int fixup_map_array_wo[MAX_FIXUPS];
int fixup_map_array_small[MAX_FIXUPS];
int fixup_sk_storage_map[MAX_FIXUPS];
int fixup_map_event_output[MAX_FIXUPS];
int fixup_map_reuseport_array[MAX_FIXUPS];
int fixup_map_ringbuf[MAX_FIXUPS];
int fixup_map_timer[MAX_FIXUPS];
int fixup_map_kptr[MAX_FIXUPS];
struct kfunc_btf_id_pair fixup_kfunc_btf_id[MAX_FIXUPS];
/* Expected verifier log output for result REJECT or VERBOSE_ACCEPT.
* Can be a tab-separated sequence of expected strings. An empty string
* means no log verification.
*/
const char *errstr;
const char *errstr_unpriv;
uint32_t insn_processed;
int prog_len;
enum {
UNDEF,
ACCEPT,
REJECT,
VERBOSE_ACCEPT,
} result, result_unpriv;
enum bpf_prog_type prog_type;
uint8_t flags;
void (*fill_helper)(struct bpf_test *self);
int runs;
#define bpf_testdata_struct_t \
struct { \
uint32_t retval, retval_unpriv; \
union { \
__u8 data[TEST_DATA_LEN]; \
__u64 data64[TEST_DATA_LEN / 8]; \
}; \
}
union {
bpf_testdata_struct_t;
bpf_testdata_struct_t retvals[MAX_TEST_RUNS];
};
enum bpf_attach_type expected_attach_type;
const char *kfunc;
struct bpf_func_info func_info[MAX_FUNC_INFOS];
int func_info_cnt;
char btf_strings[MAX_BTF_STRINGS];
/* A set of BTF types to load when specified,
* use macro definitions from test_btf.h,
* must end with BTF_END_RAW
*/
__u32 btf_types[MAX_BTF_TYPES];
};
/* Note we want this to be 64 bit aligned so that the end of our array is
* actually the end of the structure.
*/
#define MAX_ENTRIES 11
struct test_val {
unsigned int index;
int foo[MAX_ENTRIES];
};
struct other_val {
long long foo;
long long bar;
};
static void bpf_fill_ld_abs_vlan_push_pop(struct bpf_test *self)
{
/* test: {skb->data[0], vlan_push} x 51 + {skb->data[0], vlan_pop} x 51 */
#define PUSH_CNT 51
/* jump range is limited to 16 bit. PUSH_CNT of ld_abs needs room */
unsigned int len = (1 << 15) - PUSH_CNT * 2 * 5 * 6;
struct bpf_insn *insn = self->fill_insns;
int i = 0, j, k = 0;
insn[i++] = BPF_MOV64_REG(BPF_REG_6, BPF_REG_1);
loop:
for (j = 0; j < PUSH_CNT; j++) {
insn[i++] = BPF_LD_ABS(BPF_B, 0);
/* jump to error label */
insn[i] = BPF_JMP32_IMM(BPF_JNE, BPF_REG_0, 0x34, len - i - 3);
i++;
insn[i++] = BPF_MOV64_REG(BPF_REG_1, BPF_REG_6);
insn[i++] = BPF_MOV64_IMM(BPF_REG_2, 1);
insn[i++] = BPF_MOV64_IMM(BPF_REG_3, 2);
insn[i++] = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_skb_vlan_push);
insn[i] = BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, len - i - 3);
i++;
}
for (j = 0; j < PUSH_CNT; j++) {
insn[i++] = BPF_LD_ABS(BPF_B, 0);
insn[i] = BPF_JMP32_IMM(BPF_JNE, BPF_REG_0, 0x34, len - i - 3);
i++;
insn[i++] = BPF_MOV64_REG(BPF_REG_1, BPF_REG_6);
insn[i++] = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_skb_vlan_pop);
insn[i] = BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, len - i - 3);
i++;
}
if (++k < 5)
goto loop;
for (; i < len - 3; i++)
insn[i] = BPF_ALU64_IMM(BPF_MOV, BPF_REG_0, 0xbef);
insn[len - 3] = BPF_JMP_A(1);
/* error label */
insn[len - 2] = BPF_MOV32_IMM(BPF_REG_0, 0);
insn[len - 1] = BPF_EXIT_INSN();
self->prog_len = len;
}
static void bpf_fill_jump_around_ld_abs(struct bpf_test *self)
{
struct bpf_insn *insn = self->fill_insns;
/* jump range is limited to 16 bit. every ld_abs is replaced by 6 insns,
* but on arches like arm, ppc etc, there will be one BPF_ZEXT inserted
* to extend the error value of the inlined ld_abs sequence which then
* contains 7 insns. so, set the dividend to 7 so the testcase could
* work on all arches.
*/
unsigned int len = (1 << 15) / 7;
int i = 0;
insn[i++] = BPF_MOV64_REG(BPF_REG_6, BPF_REG_1);
insn[i++] = BPF_LD_ABS(BPF_B, 0);
insn[i] = BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 10, len - i - 2);
i++;
while (i < len - 1)
insn[i++] = BPF_LD_ABS(BPF_B, 1);
insn[i] = BPF_EXIT_INSN();
self->prog_len = i + 1;
}
static void bpf_fill_rand_ld_dw(struct bpf_test *self)
{
struct bpf_insn *insn = self->fill_insns;
uint64_t res = 0;
int i = 0;
insn[i++] = BPF_MOV32_IMM(BPF_REG_0, 0);
while (i < self->retval) {
uint64_t val = bpf_semi_rand_get();
struct bpf_insn tmp[2] = { BPF_LD_IMM64(BPF_REG_1, val) };
res ^= val;
insn[i++] = tmp[0];
insn[i++] = tmp[1];
insn[i++] = BPF_ALU64_REG(BPF_XOR, BPF_REG_0, BPF_REG_1);
}
insn[i++] = BPF_MOV64_REG(BPF_REG_1, BPF_REG_0);
insn[i++] = BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 32);
insn[i++] = BPF_ALU64_REG(BPF_XOR, BPF_REG_0, BPF_REG_1);
insn[i] = BPF_EXIT_INSN();
self->prog_len = i + 1;
res ^= (res >> 32);
self->retval = (uint32_t)res;
}
#define MAX_JMP_SEQ 8192
/* test the sequence of 8k jumps */
static void bpf_fill_scale1(struct bpf_test *self)
{
struct bpf_insn *insn = self->fill_insns;
int i = 0, k = 0;
insn[i++] = BPF_MOV64_REG(BPF_REG_6, BPF_REG_1);
/* test to check that the long sequence of jumps is acceptable */
while (k++ < MAX_JMP_SEQ) {
insn[i++] = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_get_prandom_u32);
insn[i++] = BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, bpf_semi_rand_get(), 2);
insn[i++] = BPF_MOV64_REG(BPF_REG_1, BPF_REG_10);
insn[i++] = BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6,
-8 * (k % 64 + 1));
}
/* is_state_visited() doesn't allocate state for pruning for every jump.
* Hence multiply jmps by 4 to accommodate that heuristic
*/
while (i < MAX_TEST_INSNS - MAX_JMP_SEQ * 4)
insn[i++] = BPF_ALU64_IMM(BPF_MOV, BPF_REG_0, 42);
insn[i] = BPF_EXIT_INSN();
self->prog_len = i + 1;
self->retval = 42;
}
/* test the sequence of 8k jumps in inner most function (function depth 8)*/
static void bpf_fill_scale2(struct bpf_test *self)
{
struct bpf_insn *insn = self->fill_insns;
int i = 0, k = 0;
#define FUNC_NEST 7
for (k = 0; k < FUNC_NEST; k++) {
insn[i++] = BPF_CALL_REL(1);
insn[i++] = BPF_EXIT_INSN();
}
insn[i++] = BPF_MOV64_REG(BPF_REG_6, BPF_REG_1);
/* test to check that the long sequence of jumps is acceptable */
k = 0;
while (k++ < MAX_JMP_SEQ) {
insn[i++] = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_get_prandom_u32);
insn[i++] = BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, bpf_semi_rand_get(), 2);
insn[i++] = BPF_MOV64_REG(BPF_REG_1, BPF_REG_10);
insn[i++] = BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6,
-8 * (k % (64 - 4 * FUNC_NEST) + 1));
}
while (i < MAX_TEST_INSNS - MAX_JMP_SEQ * 4)
insn[i++] = BPF_ALU64_IMM(BPF_MOV, BPF_REG_0, 42);
insn[i] = BPF_EXIT_INSN();
self->prog_len = i + 1;
self->retval = 42;
}
static void bpf_fill_scale(struct bpf_test *self)
{
switch (self->retval) {
case 1:
return bpf_fill_scale1(self);
case 2:
return bpf_fill_scale2(self);
default:
self->prog_len = 0;
break;
}
}
static int bpf_fill_torturous_jumps_insn_1(struct bpf_insn *insn)
{
unsigned int len = 259, hlen = 128;
int i;
insn[0] = BPF_EMIT_CALL(BPF_FUNC_get_prandom_u32);
for (i = 1; i <= hlen; i++) {
insn[i] = BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, i, hlen);
insn[i + hlen] = BPF_JMP_A(hlen - i);
}
insn[len - 2] = BPF_MOV64_IMM(BPF_REG_0, 1);
insn[len - 1] = BPF_EXIT_INSN();
return len;
}
static int bpf_fill_torturous_jumps_insn_2(struct bpf_insn *insn)
{
unsigned int len = 4100, jmp_off = 2048;
int i, j;
insn[0] = BPF_EMIT_CALL(BPF_FUNC_get_prandom_u32);
for (i = 1; i <= jmp_off; i++) {
insn[i] = BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, i, jmp_off);
}
insn[i++] = BPF_JMP_A(jmp_off);
for (; i <= jmp_off * 2 + 1; i+=16) {
for (j = 0; j < 16; j++) {
insn[i + j] = BPF_JMP_A(16 - j - 1);
}
}
insn[len - 2] = BPF_MOV64_IMM(BPF_REG_0, 2);
insn[len - 1] = BPF_EXIT_INSN();
return len;
}
static void bpf_fill_torturous_jumps(struct bpf_test *self)
{
struct bpf_insn *insn = self->fill_insns;
int i = 0;
switch (self->retval) {
case 1:
self->prog_len = bpf_fill_torturous_jumps_insn_1(insn);
return;
case 2:
self->prog_len = bpf_fill_torturous_jumps_insn_2(insn);
return;
case 3:
/* main */
insn[i++] = BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 4);
insn[i++] = BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 262);
insn[i++] = BPF_ST_MEM(BPF_B, BPF_REG_10, -32, 0);
insn[i++] = BPF_MOV64_IMM(BPF_REG_0, 3);
insn[i++] = BPF_EXIT_INSN();
/* subprog 1 */
i += bpf_fill_torturous_jumps_insn_1(insn + i);
/* subprog 2 */
i += bpf_fill_torturous_jumps_insn_2(insn + i);
self->prog_len = i;
return;
default:
self->prog_len = 0;
break;
}
}
static void bpf_fill_big_prog_with_loop_1(struct bpf_test *self)
{
struct bpf_insn *insn = self->fill_insns;
/* This test was added to catch a specific use after free
* error, which happened upon BPF program reallocation.
* Reallocation is handled by core.c:bpf_prog_realloc, which
* reuses old memory if page boundary is not crossed. The
* value of `len` is chosen to cross this boundary on bpf_loop
* patching.
*/
const int len = getpagesize() - 25;
int callback_load_idx;
int callback_idx;
int i = 0;
insn[i++] = BPF_ALU64_IMM(BPF_MOV, BPF_REG_1, 1);
callback_load_idx = i;
insn[i++] = BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW,
BPF_REG_2, BPF_PSEUDO_FUNC, 0,
777 /* filled below */);
insn[i++] = BPF_RAW_INSN(0, 0, 0, 0, 0);
insn[i++] = BPF_ALU64_IMM(BPF_MOV, BPF_REG_3, 0);
insn[i++] = BPF_ALU64_IMM(BPF_MOV, BPF_REG_4, 0);
insn[i++] = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_loop);
while (i < len - 3)
insn[i++] = BPF_ALU64_IMM(BPF_MOV, BPF_REG_0, 0);
insn[i++] = BPF_EXIT_INSN();
callback_idx = i;
insn[i++] = BPF_ALU64_IMM(BPF_MOV, BPF_REG_0, 0);
insn[i++] = BPF_EXIT_INSN();
insn[callback_load_idx].imm = callback_idx - callback_load_idx - 1;
self->func_info[1].insn_off = callback_idx;
self->prog_len = i;
assert(i == len);
}
/* BPF_SK_LOOKUP contains 13 instructions, if you need to fix up maps */
#define BPF_SK_LOOKUP(func) \
/* struct bpf_sock_tuple tuple = {} */ \
BPF_MOV64_IMM(BPF_REG_2, 0), \
BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_2, -8), \
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -16), \
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -24), \
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -32), \
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -40), \
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -48), \
/* sk = func(ctx, &tuple, sizeof tuple, 0, 0) */ \
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), \
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -48), \
BPF_MOV64_IMM(BPF_REG_3, sizeof(struct bpf_sock_tuple)), \
BPF_MOV64_IMM(BPF_REG_4, 0), \
BPF_MOV64_IMM(BPF_REG_5, 0), \
BPF_EMIT_CALL(BPF_FUNC_ ## func)
/* BPF_DIRECT_PKT_R2 contains 7 instructions, it initializes default return
* value into 0 and does necessary preparation for direct packet access
* through r2. The allowed access range is 8 bytes.
*/
#define BPF_DIRECT_PKT_R2 \
BPF_MOV64_IMM(BPF_REG_0, 0), \
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, \
offsetof(struct __sk_buff, data)), \
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, \
offsetof(struct __sk_buff, data_end)), \
BPF_MOV64_REG(BPF_REG_4, BPF_REG_2), \
BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8), \
BPF_JMP_REG(BPF_JLE, BPF_REG_4, BPF_REG_3, 1), \
BPF_EXIT_INSN()
/* BPF_RAND_UEXT_R7 contains 4 instructions, it initializes R7 into a random
* positive u32, and zero-extend it into 64-bit.
*/
#define BPF_RAND_UEXT_R7 \
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, \
BPF_FUNC_get_prandom_u32), \
BPF_MOV64_REG(BPF_REG_7, BPF_REG_0), \
BPF_ALU64_IMM(BPF_LSH, BPF_REG_7, 33), \
BPF_ALU64_IMM(BPF_RSH, BPF_REG_7, 33)
/* BPF_RAND_SEXT_R7 contains 5 instructions, it initializes R7 into a random
* negative u32, and sign-extend it into 64-bit.
*/
#define BPF_RAND_SEXT_R7 \
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, \
BPF_FUNC_get_prandom_u32), \
BPF_MOV64_REG(BPF_REG_7, BPF_REG_0), \
BPF_ALU64_IMM(BPF_OR, BPF_REG_7, 0x80000000), \
BPF_ALU64_IMM(BPF_LSH, BPF_REG_7, 32), \
BPF_ALU64_IMM(BPF_ARSH, BPF_REG_7, 32)
static struct bpf_test tests[] = {
#define FILL_ARRAY
#include <verifier/tests.h>
#undef FILL_ARRAY
};
static int probe_filter_length(const struct bpf_insn *fp)
{
int len;
for (len = MAX_INSNS - 1; len > 0; --len)
if (fp[len].code != 0 || fp[len].imm != 0)
break;
return len + 1;
}
static bool skip_unsupported_map(enum bpf_map_type map_type)
{
if (!libbpf_probe_bpf_map_type(map_type, NULL)) {
printf("SKIP (unsupported map type %d)\n", map_type);
skips++;
return true;
}
return false;
}
static int __create_map(uint32_t type, uint32_t size_key,
uint32_t size_value, uint32_t max_elem,
uint32_t extra_flags)
{
LIBBPF_OPTS(bpf_map_create_opts, opts);
int fd;
opts.map_flags = (type == BPF_MAP_TYPE_HASH ? BPF_F_NO_PREALLOC : 0) | extra_flags;
fd = bpf_map_create(type, NULL, size_key, size_value, max_elem, &opts);
if (fd < 0) {
if (skip_unsupported_map(type))
return -1;
printf("Failed to create hash map '%s'!\n", strerror(errno));
}
return fd;
}
static int create_map(uint32_t type, uint32_t size_key,
uint32_t size_value, uint32_t max_elem)
{
return __create_map(type, size_key, size_value, max_elem, 0);
}
static void update_map(int fd, int index)
{
struct test_val value = {
.index = (6 + 1) * sizeof(int),
.foo[6] = 0xabcdef12,
};
assert(!bpf_map_update_elem(fd, &index, &value, 0));
}
static int create_prog_dummy_simple(enum bpf_prog_type prog_type, int ret)
{
struct bpf_insn prog[] = {
BPF_MOV64_IMM(BPF_REG_0, ret),
BPF_EXIT_INSN(),
};
return bpf_prog_load(prog_type, NULL, "GPL", prog, ARRAY_SIZE(prog), NULL);
}
static int create_prog_dummy_loop(enum bpf_prog_type prog_type, int mfd,
int idx, int ret)
{
struct bpf_insn prog[] = {
BPF_MOV64_IMM(BPF_REG_3, idx),
BPF_LD_MAP_FD(BPF_REG_2, mfd),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_tail_call),
BPF_MOV64_IMM(BPF_REG_0, ret),
BPF_EXIT_INSN(),
};
return bpf_prog_load(prog_type, NULL, "GPL", prog, ARRAY_SIZE(prog), NULL);
}
static int create_prog_array(enum bpf_prog_type prog_type, uint32_t max_elem,
int p1key, int p2key, int p3key)
{
int mfd, p1fd, p2fd, p3fd;
mfd = bpf_map_create(BPF_MAP_TYPE_PROG_ARRAY, NULL, sizeof(int),
sizeof(int), max_elem, NULL);
if (mfd < 0) {
if (skip_unsupported_map(BPF_MAP_TYPE_PROG_ARRAY))
return -1;
printf("Failed to create prog array '%s'!\n", strerror(errno));
return -1;
}
p1fd = create_prog_dummy_simple(prog_type, 42);
p2fd = create_prog_dummy_loop(prog_type, mfd, p2key, 41);
p3fd = create_prog_dummy_simple(prog_type, 24);
if (p1fd < 0 || p2fd < 0 || p3fd < 0)
goto err;
if (bpf_map_update_elem(mfd, &p1key, &p1fd, BPF_ANY) < 0)
goto err;
if (bpf_map_update_elem(mfd, &p2key, &p2fd, BPF_ANY) < 0)
goto err;
if (bpf_map_update_elem(mfd, &p3key, &p3fd, BPF_ANY) < 0) {
err:
close(mfd);
mfd = -1;
}
close(p3fd);
close(p2fd);
close(p1fd);
return mfd;
}
static int create_map_in_map(void)
{
LIBBPF_OPTS(bpf_map_create_opts, opts);
int inner_map_fd, outer_map_fd;
inner_map_fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, NULL, sizeof(int),
sizeof(int), 1, NULL);
if (inner_map_fd < 0) {
if (skip_unsupported_map(BPF_MAP_TYPE_ARRAY))
return -1;
printf("Failed to create array '%s'!\n", strerror(errno));
return inner_map_fd;
}
opts.inner_map_fd = inner_map_fd;
outer_map_fd = bpf_map_create(BPF_MAP_TYPE_ARRAY_OF_MAPS, NULL,
sizeof(int), sizeof(int), 1, &opts);
if (outer_map_fd < 0) {
if (skip_unsupported_map(BPF_MAP_TYPE_ARRAY_OF_MAPS))
return -1;
printf("Failed to create array of maps '%s'!\n",
strerror(errno));
}
close(inner_map_fd);
return outer_map_fd;
}
static int create_cgroup_storage(bool percpu)
{
enum bpf_map_type type = percpu ? BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE :
BPF_MAP_TYPE_CGROUP_STORAGE;
int fd;
fd = bpf_map_create(type, NULL, sizeof(struct bpf_cgroup_storage_key),
TEST_DATA_LEN, 0, NULL);
if (fd < 0) {
if (skip_unsupported_map(type))
return -1;
printf("Failed to create cgroup storage '%s'!\n",
strerror(errno));
}
return fd;
}
/* struct bpf_spin_lock {
* int val;
* };
* struct val {
* int cnt;
* struct bpf_spin_lock l;
* };
* struct bpf_timer {
* __u64 :64;
* __u64 :64;
* } __attribute__((aligned(8)));
* struct timer {
* struct bpf_timer t;
* };
* struct btf_ptr {
* struct prog_test_ref_kfunc __kptr_untrusted *ptr;
* struct prog_test_ref_kfunc __kptr *ptr;
* struct prog_test_member __kptr *ptr;
* }
*/
static const char btf_str_sec[] = "\0bpf_spin_lock\0val\0cnt\0l\0bpf_timer\0timer\0t"
"\0btf_ptr\0prog_test_ref_kfunc\0ptr\0kptr\0kptr_untrusted"
"\0prog_test_member";
static __u32 btf_raw_types[] = {
/* int */
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
/* struct bpf_spin_lock */ /* [2] */
BTF_TYPE_ENC(1, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 1), 4),
BTF_MEMBER_ENC(15, 1, 0), /* int val; */
/* struct val */ /* [3] */
BTF_TYPE_ENC(15, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 2), 8),
BTF_MEMBER_ENC(19, 1, 0), /* int cnt; */
BTF_MEMBER_ENC(23, 2, 32),/* struct bpf_spin_lock l; */
/* struct bpf_timer */ /* [4] */
BTF_TYPE_ENC(25, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 0), 16),
/* struct timer */ /* [5] */
BTF_TYPE_ENC(35, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 1), 16),
BTF_MEMBER_ENC(41, 4, 0), /* struct bpf_timer t; */
/* struct prog_test_ref_kfunc */ /* [6] */
BTF_STRUCT_ENC(51, 0, 0),
BTF_STRUCT_ENC(95, 0, 0), /* [7] */
/* type tag "kptr_untrusted" */
BTF_TYPE_TAG_ENC(80, 6), /* [8] */
/* type tag "kptr" */
BTF_TYPE_TAG_ENC(75, 6), /* [9] */
BTF_TYPE_TAG_ENC(75, 7), /* [10] */
BTF_PTR_ENC(8), /* [11] */
BTF_PTR_ENC(9), /* [12] */
BTF_PTR_ENC(10), /* [13] */
/* struct btf_ptr */ /* [14] */
BTF_STRUCT_ENC(43, 3, 24),
BTF_MEMBER_ENC(71, 11, 0), /* struct prog_test_ref_kfunc __kptr_untrusted *ptr; */
BTF_MEMBER_ENC(71, 12, 64), /* struct prog_test_ref_kfunc __kptr *ptr; */
BTF_MEMBER_ENC(71, 13, 128), /* struct prog_test_member __kptr *ptr; */
};
static char bpf_vlog[UINT_MAX >> 8];
static int load_btf_spec(__u32 *types, int types_len,
const char *strings, int strings_len)
{
struct btf_header hdr = {
.magic = BTF_MAGIC,
.version = BTF_VERSION,
.hdr_len = sizeof(struct btf_header),
.type_len = types_len,
.str_off = types_len,
.str_len = strings_len,
};
void *ptr, *raw_btf;
int btf_fd;
LIBBPF_OPTS(bpf_btf_load_opts, opts,
.log_buf = bpf_vlog,
.log_size = sizeof(bpf_vlog),
.log_level = (verbose
? verif_log_level
: DEFAULT_LIBBPF_LOG_LEVEL),
);
raw_btf = malloc(sizeof(hdr) + types_len + strings_len);
ptr = raw_btf;
memcpy(ptr, &hdr, sizeof(hdr));
ptr += sizeof(hdr);
memcpy(ptr, types, hdr.type_len);
ptr += hdr.type_len;
memcpy(ptr, strings, hdr.str_len);
ptr += hdr.str_len;
btf_fd = bpf_btf_load(raw_btf, ptr - raw_btf, &opts);
if (btf_fd < 0)
printf("Failed to load BTF spec: '%s'\n", strerror(errno));
free(raw_btf);
return btf_fd < 0 ? -1 : btf_fd;
}
static int load_btf(void)
{
return load_btf_spec(btf_raw_types, sizeof(btf_raw_types),
btf_str_sec, sizeof(btf_str_sec));
}
static int load_btf_for_test(struct bpf_test *test)
{
int types_num = 0;
while (types_num < MAX_BTF_TYPES &&
test->btf_types[types_num] != BTF_END_RAW)
++types_num;
int types_len = types_num * sizeof(test->btf_types[0]);
return load_btf_spec(test->btf_types, types_len,
test->btf_strings, sizeof(test->btf_strings));
}
static int create_map_spin_lock(void)
{
LIBBPF_OPTS(bpf_map_create_opts, opts,
.btf_key_type_id = 1,
.btf_value_type_id = 3,
);
int fd, btf_fd;
btf_fd = load_btf();
if (btf_fd < 0)
return -1;
opts.btf_fd = btf_fd;
fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, "test_map", 4, 8, 1, &opts);
if (fd < 0)
printf("Failed to create map with spin_lock\n");
return fd;
}
static int create_sk_storage_map(void)
{
LIBBPF_OPTS(bpf_map_create_opts, opts,
.map_flags = BPF_F_NO_PREALLOC,
.btf_key_type_id = 1,
.btf_value_type_id = 3,
);
int fd, btf_fd;
btf_fd = load_btf();
if (btf_fd < 0)
return -1;
opts.btf_fd = btf_fd;
fd = bpf_map_create(BPF_MAP_TYPE_SK_STORAGE, "test_map", 4, 8, 0, &opts);
close(opts.btf_fd);
if (fd < 0)
printf("Failed to create sk_storage_map\n");
return fd;
}
static int create_map_timer(void)
{
LIBBPF_OPTS(bpf_map_create_opts, opts,
.btf_key_type_id = 1,
.btf_value_type_id = 5,
);
int fd, btf_fd;
btf_fd = load_btf();
if (btf_fd < 0)
return -1;
opts.btf_fd = btf_fd;
fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, "test_map", 4, 16, 1, &opts);
if (fd < 0)
printf("Failed to create map with timer\n");
return fd;
}
static int create_map_kptr(void)
{
LIBBPF_OPTS(bpf_map_create_opts, opts,
.btf_key_type_id = 1,
.btf_value_type_id = 14,
);
int fd, btf_fd;
btf_fd = load_btf();
if (btf_fd < 0)
return -1;
opts.btf_fd = btf_fd;
fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, "test_map", 4, 24, 1, &opts);
if (fd < 0)
printf("Failed to create map with btf_id pointer\n");
return fd;
}
static void set_root(bool set)
{
__u64 caps;
if (set) {
if (cap_enable_effective(1ULL << CAP_SYS_ADMIN, &caps))
perror("cap_disable_effective(CAP_SYS_ADMIN)");
} else {
if (cap_disable_effective(1ULL << CAP_SYS_ADMIN, &caps))
perror("cap_disable_effective(CAP_SYS_ADMIN)");
}
}
static __u64 ptr_to_u64(const void *ptr)
{
return (uintptr_t) ptr;
}
static struct btf *btf__load_testmod_btf(struct btf *vmlinux)
{
struct bpf_btf_info info;
__u32 len = sizeof(info);
struct btf *btf = NULL;
char name[64];
__u32 id = 0;
int err, fd;
/* Iterate all loaded BTF objects and find bpf_testmod,
* we need SYS_ADMIN cap for that.
*/
set_root(true);
while (true) {
err = bpf_btf_get_next_id(id, &id);
if (err) {
if (errno == ENOENT)
break;
perror("bpf_btf_get_next_id failed");
break;
}
fd = bpf_btf_get_fd_by_id(id);
if (fd < 0) {
if (errno == ENOENT)
continue;
perror("bpf_btf_get_fd_by_id failed");
break;
}
memset(&info, 0, sizeof(info));
info.name_len = sizeof(name);
info.name = ptr_to_u64(name);
len = sizeof(info);
err = bpf_obj_get_info_by_fd(fd, &info, &len);
if (err) {
close(fd);
perror("bpf_obj_get_info_by_fd failed");
break;
}
if (strcmp("bpf_testmod", name)) {
close(fd);
continue;
}
btf = btf__load_from_kernel_by_id_split(id, vmlinux);
if (!btf) {
close(fd);
break;
}
/* We need the fd to stay open so it can be used in fd_array.
* The final cleanup call to btf__free will free btf object
* and close the file descriptor.
*/
btf__set_fd(btf, fd);
break;
}
set_root(false);
return btf;
}
static struct btf *testmod_btf;
static struct btf *vmlinux_btf;
static void kfuncs_cleanup(void)
{
btf__free(testmod_btf);
btf__free(vmlinux_btf);
}
static void fixup_prog_kfuncs(struct bpf_insn *prog, int *fd_array,
struct kfunc_btf_id_pair *fixup_kfunc_btf_id)
{
/* Patch in kfunc BTF IDs */
while (fixup_kfunc_btf_id->kfunc) {
int btf_id = 0;
/* try to find kfunc in kernel BTF */
vmlinux_btf = vmlinux_btf ?: btf__load_vmlinux_btf();
if (vmlinux_btf) {
btf_id = btf__find_by_name_kind(vmlinux_btf,
fixup_kfunc_btf_id->kfunc,
BTF_KIND_FUNC);
btf_id = btf_id < 0 ? 0 : btf_id;
}
/* kfunc not found in kernel BTF, try bpf_testmod BTF */
if (!btf_id) {
testmod_btf = testmod_btf ?: btf__load_testmod_btf(vmlinux_btf);
if (testmod_btf) {
btf_id = btf__find_by_name_kind(testmod_btf,
fixup_kfunc_btf_id->kfunc,
BTF_KIND_FUNC);
btf_id = btf_id < 0 ? 0 : btf_id;
if (btf_id) {
/* We put bpf_testmod module fd into fd_array
* and its index 1 into instruction 'off'.
*/
*fd_array = btf__fd(testmod_btf);
prog[fixup_kfunc_btf_id->insn_idx].off = 1;
}
}
}
prog[fixup_kfunc_btf_id->insn_idx].imm = btf_id;
fixup_kfunc_btf_id++;
}
}
static void do_test_fixup(struct bpf_test *test, enum bpf_prog_type prog_type,
struct bpf_insn *prog, int *map_fds, int *fd_array)
{
int *fixup_map_hash_8b = test->fixup_map_hash_8b;
int *fixup_map_hash_48b = test->fixup_map_hash_48b;
int *fixup_map_hash_16b = test->fixup_map_hash_16b;
int *fixup_map_array_48b = test->fixup_map_array_48b;
int *fixup_map_sockmap = test->fixup_map_sockmap;
int *fixup_map_sockhash = test->fixup_map_sockhash;
int *fixup_map_xskmap = test->fixup_map_xskmap;
int *fixup_map_stacktrace = test->fixup_map_stacktrace;
int *fixup_prog1 = test->fixup_prog1;
int *fixup_prog2 = test->fixup_prog2;
int *fixup_map_in_map = test->fixup_map_in_map;
int *fixup_cgroup_storage = test->fixup_cgroup_storage;
int *fixup_percpu_cgroup_storage = test->fixup_percpu_cgroup_storage;
int *fixup_map_spin_lock = test->fixup_map_spin_lock;
int *fixup_map_array_ro = test->fixup_map_array_ro;
int *fixup_map_array_wo = test->fixup_map_array_wo;
int *fixup_map_array_small = test->fixup_map_array_small;
int *fixup_sk_storage_map = test->fixup_sk_storage_map;
int *fixup_map_event_output = test->fixup_map_event_output;
int *fixup_map_reuseport_array = test->fixup_map_reuseport_array;
int *fixup_map_ringbuf = test->fixup_map_ringbuf;
int *fixup_map_timer = test->fixup_map_timer;
int *fixup_map_kptr = test->fixup_map_kptr;
if (test->fill_helper) {
test->fill_insns = calloc(MAX_TEST_INSNS, sizeof(struct bpf_insn));
test->fill_helper(test);
}
/* Allocating HTs with 1 elem is fine here, since we only test
* for verifier and not do a runtime lookup, so the only thing
* that really matters is value size in this case.
*/
if (*fixup_map_hash_8b) {
map_fds[0] = create_map(BPF_MAP_TYPE_HASH, sizeof(long long),
sizeof(long long), 1);
do {
prog[*fixup_map_hash_8b].imm = map_fds[0];
fixup_map_hash_8b++;
} while (*fixup_map_hash_8b);
}
if (*fixup_map_hash_48b) {
map_fds[1] = create_map(BPF_MAP_TYPE_HASH, sizeof(long long),
sizeof(struct test_val), 1);
do {
prog[*fixup_map_hash_48b].imm = map_fds[1];
fixup_map_hash_48b++;
} while (*fixup_map_hash_48b);
}
if (*fixup_map_hash_16b) {
map_fds[2] = create_map(BPF_MAP_TYPE_HASH, sizeof(long long),
sizeof(struct other_val), 1);
do {
prog[*fixup_map_hash_16b].imm = map_fds[2];
fixup_map_hash_16b++;
} while (*fixup_map_hash_16b);
}
if (*fixup_map_array_48b) {
map_fds[3] = create_map(BPF_MAP_TYPE_ARRAY, sizeof(int),
sizeof(struct test_val), 1);
update_map(map_fds[3], 0);
do {
prog[*fixup_map_array_48b].imm = map_fds[3];
fixup_map_array_48b++;
} while (*fixup_map_array_48b);
}
if (*fixup_prog1) {
map_fds[4] = create_prog_array(prog_type, 4, 0, 1, 2);
do {
prog[*fixup_prog1].imm = map_fds[4];
fixup_prog1++;
} while (*fixup_prog1);
}
if (*fixup_prog2) {
map_fds[5] = create_prog_array(prog_type, 8, 7, 1, 2);
do {
prog[*fixup_prog2].imm = map_fds[5];
fixup_prog2++;
} while (*fixup_prog2);
}
if (*fixup_map_in_map) {
map_fds[6] = create_map_in_map();
do {
prog[*fixup_map_in_map].imm = map_fds[6];
fixup_map_in_map++;
} while (*fixup_map_in_map);
}
if (*fixup_cgroup_storage) {
map_fds[7] = create_cgroup_storage(false);
do {
prog[*fixup_cgroup_storage].imm = map_fds[7];
fixup_cgroup_storage++;
} while (*fixup_cgroup_storage);
}
if (*fixup_percpu_cgroup_storage) {
map_fds[8] = create_cgroup_storage(true);
do {
prog[*fixup_percpu_cgroup_storage].imm = map_fds[8];
fixup_percpu_cgroup_storage++;
} while (*fixup_percpu_cgroup_storage);
}
if (*fixup_map_sockmap) {
map_fds[9] = create_map(BPF_MAP_TYPE_SOCKMAP, sizeof(int),
sizeof(int), 1);
do {
prog[*fixup_map_sockmap].imm = map_fds[9];
fixup_map_sockmap++;
} while (*fixup_map_sockmap);
}
if (*fixup_map_sockhash) {
map_fds[10] = create_map(BPF_MAP_TYPE_SOCKHASH, sizeof(int),
sizeof(int), 1);
do {
prog[*fixup_map_sockhash].imm = map_fds[10];
fixup_map_sockhash++;
} while (*fixup_map_sockhash);
}
if (*fixup_map_xskmap) {
map_fds[11] = create_map(BPF_MAP_TYPE_XSKMAP, sizeof(int),
sizeof(int), 1);
do {
prog[*fixup_map_xskmap].imm = map_fds[11];
fixup_map_xskmap++;
} while (*fixup_map_xskmap);
}
if (*fixup_map_stacktrace) {
map_fds[12] = create_map(BPF_MAP_TYPE_STACK_TRACE, sizeof(u32),
sizeof(u64), 1);
do {
prog[*fixup_map_stacktrace].imm = map_fds[12];
fixup_map_stacktrace++;
} while (*fixup_map_stacktrace);
}
if (*fixup_map_spin_lock) {
map_fds[13] = create_map_spin_lock();
do {
prog[*fixup_map_spin_lock].imm = map_fds[13];
fixup_map_spin_lock++;
} while (*fixup_map_spin_lock);
}
if (*fixup_map_array_ro) {
map_fds[14] = __create_map(BPF_MAP_TYPE_ARRAY, sizeof(int),
sizeof(struct test_val), 1,
BPF_F_RDONLY_PROG);
update_map(map_fds[14], 0);
do {
prog[*fixup_map_array_ro].imm = map_fds[14];
fixup_map_array_ro++;
} while (*fixup_map_array_ro);
}
if (*fixup_map_array_wo) {
map_fds[15] = __create_map(BPF_MAP_TYPE_ARRAY, sizeof(int),
sizeof(struct test_val), 1,
BPF_F_WRONLY_PROG);
update_map(map_fds[15], 0);
do {
prog[*fixup_map_array_wo].imm = map_fds[15];
fixup_map_array_wo++;
} while (*fixup_map_array_wo);
}
if (*fixup_map_array_small) {
map_fds[16] = __create_map(BPF_MAP_TYPE_ARRAY, sizeof(int),
1, 1, 0);
update_map(map_fds[16], 0);
do {
prog[*fixup_map_array_small].imm = map_fds[16];
fixup_map_array_small++;
} while (*fixup_map_array_small);
}
if (*fixup_sk_storage_map) {
map_fds[17] = create_sk_storage_map();
do {
prog[*fixup_sk_storage_map].imm = map_fds[17];
fixup_sk_storage_map++;
} while (*fixup_sk_storage_map);
}
if (*fixup_map_event_output) {
map_fds[18] = __create_map(BPF_MAP_TYPE_PERF_EVENT_ARRAY,
sizeof(int), sizeof(int), 1, 0);
do {
prog[*fixup_map_event_output].imm = map_fds[18];
fixup_map_event_output++;
} while (*fixup_map_event_output);
}
if (*fixup_map_reuseport_array) {
map_fds[19] = __create_map(BPF_MAP_TYPE_REUSEPORT_SOCKARRAY,
sizeof(u32), sizeof(u64), 1, 0);
do {
prog[*fixup_map_reuseport_array].imm = map_fds[19];
fixup_map_reuseport_array++;
} while (*fixup_map_reuseport_array);
}
if (*fixup_map_ringbuf) {
map_fds[20] = create_map(BPF_MAP_TYPE_RINGBUF, 0,
0, getpagesize());
do {
prog[*fixup_map_ringbuf].imm = map_fds[20];
fixup_map_ringbuf++;
} while (*fixup_map_ringbuf);
}
if (*fixup_map_timer) {
map_fds[21] = create_map_timer();
do {
prog[*fixup_map_timer].imm = map_fds[21];
fixup_map_timer++;
} while (*fixup_map_timer);
}
if (*fixup_map_kptr) {
map_fds[22] = create_map_kptr();
do {
prog[*fixup_map_kptr].imm = map_fds[22];
fixup_map_kptr++;
} while (*fixup_map_kptr);
}
fixup_prog_kfuncs(prog, fd_array, test->fixup_kfunc_btf_id);
}
struct libcap {
struct __user_cap_header_struct hdr;
struct __user_cap_data_struct data[2];
};
static int set_admin(bool admin)
{
int err;
if (admin) {
err = cap_enable_effective(ADMIN_CAPS, NULL);
if (err)
perror("cap_enable_effective(ADMIN_CAPS)");
} else {
err = cap_disable_effective(ADMIN_CAPS, NULL);
if (err)
perror("cap_disable_effective(ADMIN_CAPS)");
}
return err;
}
static int do_prog_test_run(int fd_prog, bool unpriv, uint32_t expected_val,
void *data, size_t size_data)
{
__u8 tmp[TEST_DATA_LEN << 2];
__u32 size_tmp = sizeof(tmp);
int err, saved_errno;
LIBBPF_OPTS(bpf_test_run_opts, topts,
.data_in = data,
.data_size_in = size_data,
.data_out = tmp,
.data_size_out = size_tmp,
.repeat = 1,
);
if (unpriv)
set_admin(true);
err = bpf_prog_test_run_opts(fd_prog, &topts);
saved_errno = errno;
if (unpriv)
set_admin(false);
if (err) {
switch (saved_errno) {
case ENOTSUPP:
printf("Did not run the program (not supported) ");
return 0;
case EPERM:
if (unpriv) {
printf("Did not run the program (no permission) ");
return 0;
}
/* fallthrough; */
default:
printf("FAIL: Unexpected bpf_prog_test_run error (%s) ",
strerror(saved_errno));
return err;
}
}
if (topts.retval != expected_val && expected_val != POINTER_VALUE) {
printf("FAIL retval %d != %d ", topts.retval, expected_val);
return 1;
}
return 0;
}
/* Returns true if every part of exp (tab-separated) appears in log, in order.
*
* If exp is an empty string, returns true.
*/
static bool cmp_str_seq(const char *log, const char *exp)
{
char needle[200];
const char *p, *q;
int len;
do {
if (!strlen(exp))
break;
p = strchr(exp, '\t');
if (!p)
p = exp + strlen(exp);
len = p - exp;
if (len >= sizeof(needle) || !len) {
printf("FAIL\nTestcase bug\n");
return false;
}
strncpy(needle, exp, len);
needle[len] = 0;
q = strstr(log, needle);
if (!q) {
printf("FAIL\nUnexpected verifier log!\n"
"EXP: %s\nRES:\n", needle);
return false;
}
log = q + len;
exp = p + 1;
} while (*p);
return true;
}
static struct bpf_insn *get_xlated_program(int fd_prog, int *cnt)
{
__u32 buf_element_size = sizeof(struct bpf_insn);
struct bpf_prog_info info = {};
__u32 info_len = sizeof(info);
__u32 xlated_prog_len;
struct bpf_insn *buf;
if (bpf_prog_get_info_by_fd(fd_prog, &info, &info_len)) {
perror("bpf_prog_get_info_by_fd failed");
return NULL;
}
xlated_prog_len = info.xlated_prog_len;
if (xlated_prog_len % buf_element_size) {
printf("Program length %d is not multiple of %d\n",
xlated_prog_len, buf_element_size);
return NULL;
}
*cnt = xlated_prog_len / buf_element_size;
buf = calloc(*cnt, buf_element_size);
if (!buf) {
perror("can't allocate xlated program buffer");
return NULL;
}
bzero(&info, sizeof(info));
info.xlated_prog_len = xlated_prog_len;
info.xlated_prog_insns = (__u64)(unsigned long)buf;
if (bpf_prog_get_info_by_fd(fd_prog, &info, &info_len)) {
perror("second bpf_prog_get_info_by_fd failed");
goto out_free_buf;
}
return buf;
out_free_buf:
free(buf);
return NULL;
}
static bool is_null_insn(struct bpf_insn *insn)
{
struct bpf_insn null_insn = {};
return memcmp(insn, &null_insn, sizeof(null_insn)) == 0;
}
static bool is_skip_insn(struct bpf_insn *insn)
{
struct bpf_insn skip_insn = SKIP_INSNS();
return memcmp(insn, &skip_insn, sizeof(skip_insn)) == 0;
}
static int null_terminated_insn_len(struct bpf_insn *seq, int max_len)
{
int i;
for (i = 0; i < max_len; ++i) {
if (is_null_insn(&seq[i]))
return i;
}
return max_len;
}
static bool compare_masked_insn(struct bpf_insn *orig, struct bpf_insn *masked)
{
struct bpf_insn orig_masked;
memcpy(&orig_masked, orig, sizeof(orig_masked));
if (masked->imm == INSN_IMM_MASK)
orig_masked.imm = INSN_IMM_MASK;
if (masked->off == INSN_OFF_MASK)
orig_masked.off = INSN_OFF_MASK;
return memcmp(&orig_masked, masked, sizeof(orig_masked)) == 0;
}
static int find_insn_subseq(struct bpf_insn *seq, struct bpf_insn *subseq,
int seq_len, int subseq_len)
{
int i, j;
if (subseq_len > seq_len)
return -1;
for (i = 0; i < seq_len - subseq_len + 1; ++i) {
bool found = true;
for (j = 0; j < subseq_len; ++j) {
if (!compare_masked_insn(&seq[i + j], &subseq[j])) {
found = false;
break;
}
}
if (found)
return i;
}
return -1;
}
static int find_skip_insn_marker(struct bpf_insn *seq, int len)
{
int i;
for (i = 0; i < len; ++i)
if (is_skip_insn(&seq[i]))
return i;
return -1;
}
/* Return true if all sub-sequences in `subseqs` could be found in
* `seq` one after another. Sub-sequences are separated by a single
* nil instruction.
*/
static bool find_all_insn_subseqs(struct bpf_insn *seq, struct bpf_insn *subseqs,
int seq_len, int max_subseqs_len)
{
int subseqs_len = null_terminated_insn_len(subseqs, max_subseqs_len);
while (subseqs_len > 0) {
int skip_idx = find_skip_insn_marker(subseqs, subseqs_len);
int cur_subseq_len = skip_idx < 0 ? subseqs_len : skip_idx;
int subseq_idx = find_insn_subseq(seq, subseqs,
seq_len, cur_subseq_len);
if (subseq_idx < 0)
return false;
seq += subseq_idx + cur_subseq_len;
seq_len -= subseq_idx + cur_subseq_len;
subseqs += cur_subseq_len + 1;
subseqs_len -= cur_subseq_len + 1;
}
return true;
}
static void print_insn(struct bpf_insn *buf, int cnt)
{
int i;
printf(" addr op d s off imm\n");
for (i = 0; i < cnt; ++i) {
struct bpf_insn *insn = &buf[i];
if (is_null_insn(insn))
break;
if (is_skip_insn(insn))
printf(" ...\n");
else
printf(" %04x: %02x %1x %x %04hx %08x\n",
i, insn->code, insn->dst_reg,
insn->src_reg, insn->off, insn->imm);
}
}
static bool check_xlated_program(struct bpf_test *test, int fd_prog)
{
struct bpf_insn *buf;
int cnt;
bool result = true;
bool check_expected = !is_null_insn(test->expected_insns);
bool check_unexpected = !is_null_insn(test->unexpected_insns);
if (!check_expected && !check_unexpected)
goto out;
buf = get_xlated_program(fd_prog, &cnt);
if (!buf) {
printf("FAIL: can't get xlated program\n");
result = false;
goto out;
}
if (check_expected &&
!find_all_insn_subseqs(buf, test->expected_insns,
cnt, MAX_EXPECTED_INSNS)) {
printf("FAIL: can't find expected subsequence of instructions\n");
result = false;
if (verbose) {
printf("Program:\n");
print_insn(buf, cnt);
printf("Expected subsequence:\n");
print_insn(test->expected_insns, MAX_EXPECTED_INSNS);
}
}
if (check_unexpected &&
find_all_insn_subseqs(buf, test->unexpected_insns,
cnt, MAX_UNEXPECTED_INSNS)) {
printf("FAIL: found unexpected subsequence of instructions\n");
result = false;
if (verbose) {
printf("Program:\n");
print_insn(buf, cnt);
printf("Un-expected subsequence:\n");
print_insn(test->unexpected_insns, MAX_UNEXPECTED_INSNS);
}
}
free(buf);
out:
return result;
}
static void do_test_single(struct bpf_test *test, bool unpriv,
int *passes, int *errors)
{
int fd_prog, btf_fd, expected_ret, alignment_prevented_execution;
int prog_len, prog_type = test->prog_type;
struct bpf_insn *prog = test->insns;
LIBBPF_OPTS(bpf_prog_load_opts, opts);
int run_errs, run_successes;
int map_fds[MAX_NR_MAPS];
const char *expected_err;
int fd_array[2] = { -1, -1 };
int saved_errno;
int fixup_skips;
__u32 pflags;
int i, err;
fd_prog = -1;
for (i = 0; i < MAX_NR_MAPS; i++)
map_fds[i] = -1;
btf_fd = -1;
if (!prog_type)
prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
fixup_skips = skips;
do_test_fixup(test, prog_type, prog, map_fds, &fd_array[1]);
if (test->fill_insns) {
prog = test->fill_insns;
prog_len = test->prog_len;
} else {
prog_len = probe_filter_length(prog);
}
/* If there were some map skips during fixup due to missing bpf
* features, skip this test.
*/
if (fixup_skips != skips)
return;
pflags = BPF_F_TEST_RND_HI32;
if (test->flags & F_LOAD_WITH_STRICT_ALIGNMENT)
pflags |= BPF_F_STRICT_ALIGNMENT;
if (test->flags & F_NEEDS_EFFICIENT_UNALIGNED_ACCESS)
pflags |= BPF_F_ANY_ALIGNMENT;
if (test->flags & ~3)
pflags |= test->flags;
expected_ret = unpriv && test->result_unpriv != UNDEF ?
test->result_unpriv : test->result;
expected_err = unpriv && test->errstr_unpriv ?
test->errstr_unpriv : test->errstr;
opts.expected_attach_type = test->expected_attach_type;
if (verbose)
opts.log_level = verif_log_level | 4; /* force stats */
else if (expected_ret == VERBOSE_ACCEPT)
opts.log_level = 2;
else
opts.log_level = DEFAULT_LIBBPF_LOG_LEVEL;
opts.prog_flags = pflags;
if (fd_array[1] != -1)
opts.fd_array = &fd_array[0];
if ((prog_type == BPF_PROG_TYPE_TRACING ||
prog_type == BPF_PROG_TYPE_LSM) && test->kfunc) {
int attach_btf_id;
attach_btf_id = libbpf_find_vmlinux_btf_id(test->kfunc,
opts.expected_attach_type);
if (attach_btf_id < 0) {
printf("FAIL\nFailed to find BTF ID for '%s'!\n",
test->kfunc);
(*errors)++;
return;
}
opts.attach_btf_id = attach_btf_id;
}
if (test->btf_types[0] != 0) {
btf_fd = load_btf_for_test(test);
if (btf_fd < 0)
goto fail_log;
opts.prog_btf_fd = btf_fd;
}
if (test->func_info_cnt != 0) {
opts.func_info = test->func_info;
opts.func_info_cnt = test->func_info_cnt;
opts.func_info_rec_size = sizeof(test->func_info[0]);
}
opts.log_buf = bpf_vlog;
opts.log_size = sizeof(bpf_vlog);
fd_prog = bpf_prog_load(prog_type, NULL, "GPL", prog, prog_len, &opts);
saved_errno = errno;
/* BPF_PROG_TYPE_TRACING requires more setup and
* bpf_probe_prog_type won't give correct answer
*/
if (fd_prog < 0 && prog_type != BPF_PROG_TYPE_TRACING &&
!libbpf_probe_bpf_prog_type(prog_type, NULL)) {
printf("SKIP (unsupported program type %d)\n", prog_type);
skips++;
goto close_fds;
}
if (fd_prog < 0 && saved_errno == ENOTSUPP) {
printf("SKIP (program uses an unsupported feature)\n");
skips++;
goto close_fds;
}
alignment_prevented_execution = 0;
if (expected_ret == ACCEPT || expected_ret == VERBOSE_ACCEPT) {
if (fd_prog < 0) {
printf("FAIL\nFailed to load prog '%s'!\n",
strerror(saved_errno));
goto fail_log;
}
#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
if (fd_prog >= 0 &&
(test->flags & F_NEEDS_EFFICIENT_UNALIGNED_ACCESS))
alignment_prevented_execution = 1;
#endif
if (expected_ret == VERBOSE_ACCEPT && !cmp_str_seq(bpf_vlog, expected_err)) {
goto fail_log;
}
} else {
if (fd_prog >= 0) {
printf("FAIL\nUnexpected success to load!\n");
goto fail_log;
}
if (!expected_err || !cmp_str_seq(bpf_vlog, expected_err)) {
printf("FAIL\nUnexpected error message!\n\tEXP: %s\n\tRES: %s\n",
expected_err, bpf_vlog);
goto fail_log;
}
}
if (!unpriv && test->insn_processed) {
uint32_t insn_processed;
char *proc;
proc = strstr(bpf_vlog, "processed ");
insn_processed = atoi(proc + 10);
if (test->insn_processed != insn_processed) {
printf("FAIL\nUnexpected insn_processed %u vs %u\n",
insn_processed, test->insn_processed);
goto fail_log;
}
}
if (verbose)
printf(", verifier log:\n%s", bpf_vlog);
if (!check_xlated_program(test, fd_prog))
goto fail_log;
run_errs = 0;
run_successes = 0;
if (!alignment_prevented_execution && fd_prog >= 0 && test->runs >= 0) {
uint32_t expected_val;
int i;
if (!test->runs)
test->runs = 1;
for (i = 0; i < test->runs; i++) {
if (unpriv && test->retvals[i].retval_unpriv)
expected_val = test->retvals[i].retval_unpriv;
else
expected_val = test->retvals[i].retval;
err = do_prog_test_run(fd_prog, unpriv, expected_val,
test->retvals[i].data,
sizeof(test->retvals[i].data));
if (err) {
printf("(run %d/%d) ", i + 1, test->runs);
run_errs++;
} else {
run_successes++;
}
}
}
if (!run_errs) {
(*passes)++;
if (run_successes > 1)
printf("%d cases ", run_successes);
printf("OK");
if (alignment_prevented_execution)
printf(" (NOTE: not executed due to unknown alignment)");
printf("\n");
} else {
printf("\n");
goto fail_log;
}
close_fds:
if (test->fill_insns)
free(test->fill_insns);
close(fd_prog);
close(btf_fd);
for (i = 0; i < MAX_NR_MAPS; i++)
close(map_fds[i]);
sched_yield();
return;
fail_log:
(*errors)++;
printf("%s", bpf_vlog);
goto close_fds;
}
static bool is_admin(void)
{
__u64 caps;
/* The test checks for finer cap as CAP_NET_ADMIN,
* CAP_PERFMON, and CAP_BPF instead of CAP_SYS_ADMIN.
* Thus, disable CAP_SYS_ADMIN at the beginning.
*/
if (cap_disable_effective(1ULL << CAP_SYS_ADMIN, &caps)) {
perror("cap_disable_effective(CAP_SYS_ADMIN)");
return false;
}
return (caps & ADMIN_CAPS) == ADMIN_CAPS;
}
static bool test_as_unpriv(struct bpf_test *test)
{
#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
/* Some architectures have strict alignment requirements. In
* that case, the BPF verifier detects if a program has
* unaligned accesses and rejects them. A user can pass
* BPF_F_ANY_ALIGNMENT to a program to override this
* check. That, however, will only work when a privileged user
* loads a program. An unprivileged user loading a program
* with this flag will be rejected prior entering the
* verifier.
*/
if (test->flags & F_NEEDS_EFFICIENT_UNALIGNED_ACCESS)
return false;
#endif
return !test->prog_type ||
test->prog_type == BPF_PROG_TYPE_SOCKET_FILTER ||
test->prog_type == BPF_PROG_TYPE_CGROUP_SKB;
}
static int do_test(bool unpriv, unsigned int from, unsigned int to)
{
int i, passes = 0, errors = 0;
/* ensure previous instance of the module is unloaded */
unload_bpf_testmod(verbose);
if (load_bpf_testmod(verbose))
return EXIT_FAILURE;
for (i = from; i < to; i++) {
struct bpf_test *test = &tests[i];
/* Program types that are not supported by non-root we
* skip right away.
*/
if (test_as_unpriv(test) && unpriv_disabled) {
printf("#%d/u %s SKIP\n", i, test->descr);
skips++;
} else if (test_as_unpriv(test)) {
if (!unpriv)
set_admin(false);
printf("#%d/u %s ", i, test->descr);
do_test_single(test, true, &passes, &errors);
if (!unpriv)
set_admin(true);
}
if (unpriv) {
printf("#%d/p %s SKIP\n", i, test->descr);
skips++;
} else {
printf("#%d/p %s ", i, test->descr);
do_test_single(test, false, &passes, &errors);
}
}
unload_bpf_testmod(verbose);
kfuncs_cleanup();
printf("Summary: %d PASSED, %d SKIPPED, %d FAILED\n", passes,
skips, errors);
return errors ? EXIT_FAILURE : EXIT_SUCCESS;
}
int main(int argc, char **argv)
{
unsigned int from = 0, to = ARRAY_SIZE(tests);
bool unpriv = !is_admin();
int arg = 1;
if (argc > 1 && strcmp(argv[1], "-v") == 0) {
arg++;
verbose = true;
verif_log_level = 1;
argc--;
}
if (argc > 1 && strcmp(argv[1], "-vv") == 0) {
arg++;
verbose = true;
verif_log_level = 2;
argc--;
}
if (argc == 3) {
unsigned int l = atoi(argv[arg]);
unsigned int u = atoi(argv[arg + 1]);
if (l < to && u < to) {
from = l;
to = u + 1;
}
} else if (argc == 2) {
unsigned int t = atoi(argv[arg]);
if (t < to) {
from = t;
to = t + 1;
}
}
unpriv_disabled = get_unpriv_disabled();
if (unpriv && unpriv_disabled) {
printf("Cannot run as unprivileged user with sysctl %s.\n",
UNPRIV_SYSCTL);
return EXIT_FAILURE;
}
/* Use libbpf 1.0 API mode */
libbpf_set_strict_mode(LIBBPF_STRICT_ALL);
bpf_semi_rand_init();
return do_test(unpriv, from, to);
}
| linux-master | tools/testing/selftests/bpf/test_verifier.c |
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2018 Facebook
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <errno.h>
#include <fcntl.h>
#include <syscall.h>
#include <unistd.h>
#include <linux/perf_event.h>
#include <sys/ioctl.h>
#include <sys/time.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <linux/bpf.h>
#include <bpf/bpf.h>
#include <bpf/libbpf.h>
#include "cgroup_helpers.h"
#include "testing_helpers.h"
#define CHECK(condition, tag, format...) ({ \
int __ret = !!(condition); \
if (__ret) { \
printf("%s:FAIL:%s ", __func__, tag); \
printf(format); \
} else { \
printf("%s:PASS:%s\n", __func__, tag); \
} \
__ret; \
})
static int bpf_find_map(const char *test, struct bpf_object *obj,
const char *name)
{
struct bpf_map *map;
map = bpf_object__find_map_by_name(obj, name);
if (!map)
return -1;
return bpf_map__fd(map);
}
#define TEST_CGROUP "/test-bpf-get-cgroup-id/"
int main(int argc, char **argv)
{
const char *probe_name = "syscalls/sys_enter_nanosleep";
const char *file = "get_cgroup_id_kern.bpf.o";
int err, bytes, efd, prog_fd, pmu_fd;
int cgroup_fd, cgidmap_fd, pidmap_fd;
struct perf_event_attr attr = {};
struct bpf_object *obj;
__u64 kcgid = 0, ucgid;
__u32 key = 0, pid;
int exit_code = 1;
char buf[256];
const struct timespec req = {
.tv_sec = 1,
.tv_nsec = 0,
};
cgroup_fd = cgroup_setup_and_join(TEST_CGROUP);
if (CHECK(cgroup_fd < 0, "cgroup_setup_and_join", "err %d errno %d\n", cgroup_fd, errno))
return 1;
/* Use libbpf 1.0 API mode */
libbpf_set_strict_mode(LIBBPF_STRICT_ALL);
err = bpf_prog_test_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj, &prog_fd);
if (CHECK(err, "bpf_prog_test_load", "err %d errno %d\n", err, errno))
goto cleanup_cgroup_env;
cgidmap_fd = bpf_find_map(__func__, obj, "cg_ids");
if (CHECK(cgidmap_fd < 0, "bpf_find_map", "err %d errno %d\n",
cgidmap_fd, errno))
goto close_prog;
pidmap_fd = bpf_find_map(__func__, obj, "pidmap");
if (CHECK(pidmap_fd < 0, "bpf_find_map", "err %d errno %d\n",
pidmap_fd, errno))
goto close_prog;
pid = getpid();
bpf_map_update_elem(pidmap_fd, &key, &pid, 0);
if (access("/sys/kernel/tracing/trace", F_OK) == 0) {
snprintf(buf, sizeof(buf),
"/sys/kernel/tracing/events/%s/id", probe_name);
} else {
snprintf(buf, sizeof(buf),
"/sys/kernel/debug/tracing/events/%s/id", probe_name);
}
efd = open(buf, O_RDONLY, 0);
if (CHECK(efd < 0, "open", "err %d errno %d\n", efd, errno))
goto close_prog;
bytes = read(efd, buf, sizeof(buf));
close(efd);
if (CHECK(bytes <= 0 || bytes >= sizeof(buf), "read",
"bytes %d errno %d\n", bytes, errno))
goto close_prog;
attr.config = strtol(buf, NULL, 0);
attr.type = PERF_TYPE_TRACEPOINT;
attr.sample_type = PERF_SAMPLE_RAW;
attr.sample_period = 1;
attr.wakeup_events = 1;
/* attach to this pid so the all bpf invocations will be in the
* cgroup associated with this pid.
*/
pmu_fd = syscall(__NR_perf_event_open, &attr, getpid(), -1, -1, 0);
if (CHECK(pmu_fd < 0, "perf_event_open", "err %d errno %d\n", pmu_fd,
errno))
goto close_prog;
err = ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE, 0);
if (CHECK(err, "perf_event_ioc_enable", "err %d errno %d\n", err,
errno))
goto close_pmu;
err = ioctl(pmu_fd, PERF_EVENT_IOC_SET_BPF, prog_fd);
if (CHECK(err, "perf_event_ioc_set_bpf", "err %d errno %d\n", err,
errno))
goto close_pmu;
/* trigger some syscalls */
syscall(__NR_nanosleep, &req, NULL);
err = bpf_map_lookup_elem(cgidmap_fd, &key, &kcgid);
if (CHECK(err, "bpf_map_lookup_elem", "err %d errno %d\n", err, errno))
goto close_pmu;
ucgid = get_cgroup_id(TEST_CGROUP);
if (CHECK(kcgid != ucgid, "compare_cgroup_id",
"kern cgid %llx user cgid %llx", kcgid, ucgid))
goto close_pmu;
exit_code = 0;
printf("%s:PASS\n", argv[0]);
close_pmu:
close(pmu_fd);
close_prog:
bpf_object__close(obj);
cleanup_cgroup_env:
cleanup_cgroup_environment();
return exit_code;
}
| linux-master | tools/testing/selftests/bpf/get_cgroup_id_user.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020 Facebook */
#include <linux/btf.h>
#include <linux/btf_ids.h>
#include <linux/error-injection.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/percpu-defs.h>
#include <linux/sysfs.h>
#include <linux/tracepoint.h>
#include "bpf_testmod.h"
#include "bpf_testmod_kfunc.h"
#define CREATE_TRACE_POINTS
#include "bpf_testmod-events.h"
typedef int (*func_proto_typedef)(long);
typedef int (*func_proto_typedef_nested1)(func_proto_typedef);
typedef int (*func_proto_typedef_nested2)(func_proto_typedef_nested1);
DEFINE_PER_CPU(int, bpf_testmod_ksym_percpu) = 123;
long bpf_testmod_test_struct_arg_result;
struct bpf_testmod_struct_arg_1 {
int a;
};
struct bpf_testmod_struct_arg_2 {
long a;
long b;
};
struct bpf_testmod_struct_arg_3 {
int a;
int b[];
};
struct bpf_testmod_struct_arg_4 {
u64 a;
int b;
};
__diag_push();
__diag_ignore_all("-Wmissing-prototypes",
"Global functions as their definitions will be in bpf_testmod.ko BTF");
noinline int
bpf_testmod_test_struct_arg_1(struct bpf_testmod_struct_arg_2 a, int b, int c) {
bpf_testmod_test_struct_arg_result = a.a + a.b + b + c;
return bpf_testmod_test_struct_arg_result;
}
noinline int
bpf_testmod_test_struct_arg_2(int a, struct bpf_testmod_struct_arg_2 b, int c) {
bpf_testmod_test_struct_arg_result = a + b.a + b.b + c;
return bpf_testmod_test_struct_arg_result;
}
noinline int
bpf_testmod_test_struct_arg_3(int a, int b, struct bpf_testmod_struct_arg_2 c) {
bpf_testmod_test_struct_arg_result = a + b + c.a + c.b;
return bpf_testmod_test_struct_arg_result;
}
noinline int
bpf_testmod_test_struct_arg_4(struct bpf_testmod_struct_arg_1 a, int b,
int c, int d, struct bpf_testmod_struct_arg_2 e) {
bpf_testmod_test_struct_arg_result = a.a + b + c + d + e.a + e.b;
return bpf_testmod_test_struct_arg_result;
}
noinline int
bpf_testmod_test_struct_arg_5(void) {
bpf_testmod_test_struct_arg_result = 1;
return bpf_testmod_test_struct_arg_result;
}
noinline int
bpf_testmod_test_struct_arg_6(struct bpf_testmod_struct_arg_3 *a) {
bpf_testmod_test_struct_arg_result = a->b[0];
return bpf_testmod_test_struct_arg_result;
}
noinline int
bpf_testmod_test_struct_arg_7(u64 a, void *b, short c, int d, void *e,
struct bpf_testmod_struct_arg_4 f)
{
bpf_testmod_test_struct_arg_result = a + (long)b + c + d +
(long)e + f.a + f.b;
return bpf_testmod_test_struct_arg_result;
}
noinline int
bpf_testmod_test_struct_arg_8(u64 a, void *b, short c, int d, void *e,
struct bpf_testmod_struct_arg_4 f, int g)
{
bpf_testmod_test_struct_arg_result = a + (long)b + c + d +
(long)e + f.a + f.b + g;
return bpf_testmod_test_struct_arg_result;
}
noinline int
bpf_testmod_test_arg_ptr_to_struct(struct bpf_testmod_struct_arg_1 *a) {
bpf_testmod_test_struct_arg_result = a->a;
return bpf_testmod_test_struct_arg_result;
}
__bpf_kfunc void
bpf_testmod_test_mod_kfunc(int i)
{
*(int *)this_cpu_ptr(&bpf_testmod_ksym_percpu) = i;
}
__bpf_kfunc int bpf_iter_testmod_seq_new(struct bpf_iter_testmod_seq *it, s64 value, int cnt)
{
if (cnt < 0) {
it->cnt = 0;
return -EINVAL;
}
it->value = value;
it->cnt = cnt;
return 0;
}
__bpf_kfunc s64 *bpf_iter_testmod_seq_next(struct bpf_iter_testmod_seq* it)
{
if (it->cnt <= 0)
return NULL;
it->cnt--;
return &it->value;
}
__bpf_kfunc void bpf_iter_testmod_seq_destroy(struct bpf_iter_testmod_seq *it)
{
it->cnt = 0;
}
struct bpf_testmod_btf_type_tag_1 {
int a;
};
struct bpf_testmod_btf_type_tag_2 {
struct bpf_testmod_btf_type_tag_1 __user *p;
};
struct bpf_testmod_btf_type_tag_3 {
struct bpf_testmod_btf_type_tag_1 __percpu *p;
};
noinline int
bpf_testmod_test_btf_type_tag_user_1(struct bpf_testmod_btf_type_tag_1 __user *arg) {
BTF_TYPE_EMIT(func_proto_typedef);
BTF_TYPE_EMIT(func_proto_typedef_nested1);
BTF_TYPE_EMIT(func_proto_typedef_nested2);
return arg->a;
}
noinline int
bpf_testmod_test_btf_type_tag_user_2(struct bpf_testmod_btf_type_tag_2 *arg) {
return arg->p->a;
}
noinline int
bpf_testmod_test_btf_type_tag_percpu_1(struct bpf_testmod_btf_type_tag_1 __percpu *arg) {
return arg->a;
}
noinline int
bpf_testmod_test_btf_type_tag_percpu_2(struct bpf_testmod_btf_type_tag_3 *arg) {
return arg->p->a;
}
noinline int bpf_testmod_loop_test(int n)
{
/* Make sum volatile, so smart compilers, such as clang, will not
* optimize the code by removing the loop.
*/
volatile int sum = 0;
int i;
/* the primary goal of this test is to test LBR. Create a lot of
* branches in the function, so we can catch it easily.
*/
for (i = 0; i < n; i++)
sum += i;
return sum;
}
__weak noinline struct file *bpf_testmod_return_ptr(int arg)
{
static struct file f = {};
switch (arg) {
case 1: return (void *)EINVAL; /* user addr */
case 2: return (void *)0xcafe4a11; /* user addr */
case 3: return (void *)-EINVAL; /* canonical, but invalid */
case 4: return (void *)(1ull << 60); /* non-canonical and invalid */
case 5: return (void *)~(1ull << 30); /* trigger extable */
case 6: return &f; /* valid addr */
case 7: return (void *)((long)&f | 1); /* kernel tricks */
default: return NULL;
}
}
noinline int bpf_testmod_fentry_test1(int a)
{
return a + 1;
}
noinline int bpf_testmod_fentry_test2(int a, u64 b)
{
return a + b;
}
noinline int bpf_testmod_fentry_test3(char a, int b, u64 c)
{
return a + b + c;
}
noinline int bpf_testmod_fentry_test7(u64 a, void *b, short c, int d,
void *e, char f, int g)
{
return a + (long)b + c + d + (long)e + f + g;
}
noinline int bpf_testmod_fentry_test11(u64 a, void *b, short c, int d,
void *e, char f, int g,
unsigned int h, long i, __u64 j,
unsigned long k)
{
return a + (long)b + c + d + (long)e + f + g + h + i + j + k;
}
int bpf_testmod_fentry_ok;
noinline ssize_t
bpf_testmod_test_read(struct file *file, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t len)
{
struct bpf_testmod_test_read_ctx ctx = {
.buf = buf,
.off = off,
.len = len,
};
struct bpf_testmod_struct_arg_1 struct_arg1 = {10}, struct_arg1_2 = {-1};
struct bpf_testmod_struct_arg_2 struct_arg2 = {2, 3};
struct bpf_testmod_struct_arg_3 *struct_arg3;
struct bpf_testmod_struct_arg_4 struct_arg4 = {21, 22};
int i = 1;
while (bpf_testmod_return_ptr(i))
i++;
(void)bpf_testmod_test_struct_arg_1(struct_arg2, 1, 4);
(void)bpf_testmod_test_struct_arg_2(1, struct_arg2, 4);
(void)bpf_testmod_test_struct_arg_3(1, 4, struct_arg2);
(void)bpf_testmod_test_struct_arg_4(struct_arg1, 1, 2, 3, struct_arg2);
(void)bpf_testmod_test_struct_arg_5();
(void)bpf_testmod_test_struct_arg_7(16, (void *)17, 18, 19,
(void *)20, struct_arg4);
(void)bpf_testmod_test_struct_arg_8(16, (void *)17, 18, 19,
(void *)20, struct_arg4, 23);
(void)bpf_testmod_test_arg_ptr_to_struct(&struct_arg1_2);
struct_arg3 = kmalloc((sizeof(struct bpf_testmod_struct_arg_3) +
sizeof(int)), GFP_KERNEL);
if (struct_arg3 != NULL) {
struct_arg3->b[0] = 1;
(void)bpf_testmod_test_struct_arg_6(struct_arg3);
kfree(struct_arg3);
}
/* This is always true. Use the check to make sure the compiler
* doesn't remove bpf_testmod_loop_test.
*/
if (bpf_testmod_loop_test(101) > 100)
trace_bpf_testmod_test_read(current, &ctx);
/* Magic number to enable writable tp */
if (len == 64) {
struct bpf_testmod_test_writable_ctx writable = {
.val = 1024,
};
trace_bpf_testmod_test_writable_bare(&writable);
if (writable.early_ret)
return snprintf(buf, len, "%d\n", writable.val);
}
if (bpf_testmod_fentry_test1(1) != 2 ||
bpf_testmod_fentry_test2(2, 3) != 5 ||
bpf_testmod_fentry_test3(4, 5, 6) != 15 ||
bpf_testmod_fentry_test7(16, (void *)17, 18, 19, (void *)20,
21, 22) != 133 ||
bpf_testmod_fentry_test11(16, (void *)17, 18, 19, (void *)20,
21, 22, 23, 24, 25, 26) != 231)
goto out;
bpf_testmod_fentry_ok = 1;
out:
return -EIO; /* always fail */
}
EXPORT_SYMBOL(bpf_testmod_test_read);
ALLOW_ERROR_INJECTION(bpf_testmod_test_read, ERRNO);
noinline ssize_t
bpf_testmod_test_write(struct file *file, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t len)
{
struct bpf_testmod_test_write_ctx ctx = {
.buf = buf,
.off = off,
.len = len,
};
trace_bpf_testmod_test_write_bare(current, &ctx);
return -EIO; /* always fail */
}
EXPORT_SYMBOL(bpf_testmod_test_write);
ALLOW_ERROR_INJECTION(bpf_testmod_test_write, ERRNO);
noinline int bpf_fentry_shadow_test(int a)
{
return a + 2;
}
EXPORT_SYMBOL_GPL(bpf_fentry_shadow_test);
__diag_pop();
static struct bin_attribute bin_attr_bpf_testmod_file __ro_after_init = {
.attr = { .name = "bpf_testmod", .mode = 0666, },
.read = bpf_testmod_test_read,
.write = bpf_testmod_test_write,
};
BTF_SET8_START(bpf_testmod_common_kfunc_ids)
BTF_ID_FLAGS(func, bpf_iter_testmod_seq_new, KF_ITER_NEW)
BTF_ID_FLAGS(func, bpf_iter_testmod_seq_next, KF_ITER_NEXT | KF_RET_NULL)
BTF_ID_FLAGS(func, bpf_iter_testmod_seq_destroy, KF_ITER_DESTROY)
BTF_SET8_END(bpf_testmod_common_kfunc_ids)
static const struct btf_kfunc_id_set bpf_testmod_common_kfunc_set = {
.owner = THIS_MODULE,
.set = &bpf_testmod_common_kfunc_ids,
};
__bpf_kfunc u64 bpf_kfunc_call_test1(struct sock *sk, u32 a, u64 b, u32 c, u64 d)
{
return a + b + c + d;
}
__bpf_kfunc int bpf_kfunc_call_test2(struct sock *sk, u32 a, u32 b)
{
return a + b;
}
__bpf_kfunc struct sock *bpf_kfunc_call_test3(struct sock *sk)
{
return sk;
}
__bpf_kfunc long noinline bpf_kfunc_call_test4(signed char a, short b, int c, long d)
{
/* Provoke the compiler to assume that the caller has sign-extended a,
* b and c on platforms where this is required (e.g. s390x).
*/
return (long)a + (long)b + (long)c + d;
}
static struct prog_test_ref_kfunc prog_test_struct = {
.a = 42,
.b = 108,
.next = &prog_test_struct,
.cnt = REFCOUNT_INIT(1),
};
__bpf_kfunc struct prog_test_ref_kfunc *
bpf_kfunc_call_test_acquire(unsigned long *scalar_ptr)
{
refcount_inc(&prog_test_struct.cnt);
return &prog_test_struct;
}
__bpf_kfunc void bpf_kfunc_call_test_offset(struct prog_test_ref_kfunc *p)
{
WARN_ON_ONCE(1);
}
__bpf_kfunc struct prog_test_member *
bpf_kfunc_call_memb_acquire(void)
{
WARN_ON_ONCE(1);
return NULL;
}
__bpf_kfunc void bpf_kfunc_call_memb1_release(struct prog_test_member1 *p)
{
WARN_ON_ONCE(1);
}
static int *__bpf_kfunc_call_test_get_mem(struct prog_test_ref_kfunc *p, const int size)
{
if (size > 2 * sizeof(int))
return NULL;
return (int *)p;
}
__bpf_kfunc int *bpf_kfunc_call_test_get_rdwr_mem(struct prog_test_ref_kfunc *p,
const int rdwr_buf_size)
{
return __bpf_kfunc_call_test_get_mem(p, rdwr_buf_size);
}
__bpf_kfunc int *bpf_kfunc_call_test_get_rdonly_mem(struct prog_test_ref_kfunc *p,
const int rdonly_buf_size)
{
return __bpf_kfunc_call_test_get_mem(p, rdonly_buf_size);
}
/* the next 2 ones can't be really used for testing expect to ensure
* that the verifier rejects the call.
* Acquire functions must return struct pointers, so these ones are
* failing.
*/
__bpf_kfunc int *bpf_kfunc_call_test_acq_rdonly_mem(struct prog_test_ref_kfunc *p,
const int rdonly_buf_size)
{
return __bpf_kfunc_call_test_get_mem(p, rdonly_buf_size);
}
__bpf_kfunc void bpf_kfunc_call_int_mem_release(int *p)
{
}
__bpf_kfunc void bpf_kfunc_call_test_pass_ctx(struct __sk_buff *skb)
{
}
__bpf_kfunc void bpf_kfunc_call_test_pass1(struct prog_test_pass1 *p)
{
}
__bpf_kfunc void bpf_kfunc_call_test_pass2(struct prog_test_pass2 *p)
{
}
__bpf_kfunc void bpf_kfunc_call_test_fail1(struct prog_test_fail1 *p)
{
}
__bpf_kfunc void bpf_kfunc_call_test_fail2(struct prog_test_fail2 *p)
{
}
__bpf_kfunc void bpf_kfunc_call_test_fail3(struct prog_test_fail3 *p)
{
}
__bpf_kfunc void bpf_kfunc_call_test_mem_len_pass1(void *mem, int mem__sz)
{
}
__bpf_kfunc void bpf_kfunc_call_test_mem_len_fail1(void *mem, int len)
{
}
__bpf_kfunc void bpf_kfunc_call_test_mem_len_fail2(u64 *mem, int len)
{
}
__bpf_kfunc void bpf_kfunc_call_test_ref(struct prog_test_ref_kfunc *p)
{
/* p != NULL, but p->cnt could be 0 */
}
__bpf_kfunc void bpf_kfunc_call_test_destructive(void)
{
}
__bpf_kfunc static u32 bpf_kfunc_call_test_static_unused_arg(u32 arg, u32 unused)
{
return arg;
}
BTF_SET8_START(bpf_testmod_check_kfunc_ids)
BTF_ID_FLAGS(func, bpf_testmod_test_mod_kfunc)
BTF_ID_FLAGS(func, bpf_kfunc_call_test1)
BTF_ID_FLAGS(func, bpf_kfunc_call_test2)
BTF_ID_FLAGS(func, bpf_kfunc_call_test3)
BTF_ID_FLAGS(func, bpf_kfunc_call_test4)
BTF_ID_FLAGS(func, bpf_kfunc_call_test_mem_len_pass1)
BTF_ID_FLAGS(func, bpf_kfunc_call_test_mem_len_fail1)
BTF_ID_FLAGS(func, bpf_kfunc_call_test_mem_len_fail2)
BTF_ID_FLAGS(func, bpf_kfunc_call_test_acquire, KF_ACQUIRE | KF_RET_NULL)
BTF_ID_FLAGS(func, bpf_kfunc_call_memb_acquire, KF_ACQUIRE | KF_RET_NULL)
BTF_ID_FLAGS(func, bpf_kfunc_call_memb1_release, KF_RELEASE)
BTF_ID_FLAGS(func, bpf_kfunc_call_test_get_rdwr_mem, KF_RET_NULL)
BTF_ID_FLAGS(func, bpf_kfunc_call_test_get_rdonly_mem, KF_RET_NULL)
BTF_ID_FLAGS(func, bpf_kfunc_call_test_acq_rdonly_mem, KF_ACQUIRE | KF_RET_NULL)
BTF_ID_FLAGS(func, bpf_kfunc_call_int_mem_release, KF_RELEASE)
BTF_ID_FLAGS(func, bpf_kfunc_call_test_pass_ctx)
BTF_ID_FLAGS(func, bpf_kfunc_call_test_pass1)
BTF_ID_FLAGS(func, bpf_kfunc_call_test_pass2)
BTF_ID_FLAGS(func, bpf_kfunc_call_test_fail1)
BTF_ID_FLAGS(func, bpf_kfunc_call_test_fail2)
BTF_ID_FLAGS(func, bpf_kfunc_call_test_fail3)
BTF_ID_FLAGS(func, bpf_kfunc_call_test_ref, KF_TRUSTED_ARGS | KF_RCU)
BTF_ID_FLAGS(func, bpf_kfunc_call_test_destructive, KF_DESTRUCTIVE)
BTF_ID_FLAGS(func, bpf_kfunc_call_test_static_unused_arg)
BTF_ID_FLAGS(func, bpf_kfunc_call_test_offset)
BTF_SET8_END(bpf_testmod_check_kfunc_ids)
static const struct btf_kfunc_id_set bpf_testmod_kfunc_set = {
.owner = THIS_MODULE,
.set = &bpf_testmod_check_kfunc_ids,
};
extern int bpf_fentry_test1(int a);
static int bpf_testmod_init(void)
{
int ret;
ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_UNSPEC, &bpf_testmod_common_kfunc_set);
ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SCHED_CLS, &bpf_testmod_kfunc_set);
ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING, &bpf_testmod_kfunc_set);
ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SYSCALL, &bpf_testmod_kfunc_set);
if (ret < 0)
return ret;
if (bpf_fentry_test1(0) < 0)
return -EINVAL;
return sysfs_create_bin_file(kernel_kobj, &bin_attr_bpf_testmod_file);
}
static void bpf_testmod_exit(void)
{
return sysfs_remove_bin_file(kernel_kobj, &bin_attr_bpf_testmod_file);
}
module_init(bpf_testmod_init);
module_exit(bpf_testmod_exit);
MODULE_AUTHOR("Andrii Nakryiko");
MODULE_DESCRIPTION("BPF selftests module");
MODULE_LICENSE("Dual BSD/GPL");
| linux-master | tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c |
// SPDX-License-Identifier: LGPL-2.1 OR BSD-2-Clause
/* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
#define _GNU_SOURCE
#include <test_progs.h>
#include <network_helpers.h>
#include <ctype.h>
#define CMD_OUT_BUF_SIZE 1023
#define SYS_OUT(cmd, ...) ({ \
char buf[1024]; \
snprintf(buf, sizeof(buf), (cmd), ##__VA_ARGS__); \
FILE *f = popen(buf, "r"); \
if (!ASSERT_OK_PTR(f, buf)) \
goto out; \
f; \
})
/* out must be at least `size * 4 + 1` bytes long */
static void escape_str(char *out, const char *in, size_t size)
{
static const char *hex = "0123456789ABCDEF";
size_t i;
for (i = 0; i < size; i++) {
if (isprint(in[i]) && in[i] != '\\' && in[i] != '\'') {
*out++ = in[i];
} else {
*out++ = '\\';
*out++ = 'x';
*out++ = hex[(in[i] >> 4) & 0xf];
*out++ = hex[in[i] & 0xf];
}
}
*out++ = '\0';
}
static bool expect_str(char *buf, size_t size, const char *str, const char *name)
{
static char escbuf_expected[CMD_OUT_BUF_SIZE * 4];
static char escbuf_actual[CMD_OUT_BUF_SIZE * 4];
static int duration = 0;
bool ok;
ok = size == strlen(str) && !memcmp(buf, str, size);
if (!ok) {
escape_str(escbuf_expected, str, strlen(str));
escape_str(escbuf_actual, buf, size);
}
CHECK(!ok, name, "unexpected %s: actual '%s' != expected '%s'\n",
name, escbuf_actual, escbuf_expected);
return ok;
}
static void test_synproxy(bool xdp)
{
int server_fd = -1, client_fd = -1, accept_fd = -1;
char *prog_id = NULL, *prog_id_end;
struct nstoken *ns = NULL;
FILE *ctrl_file = NULL;
char buf[CMD_OUT_BUF_SIZE];
size_t size;
SYS(out, "ip netns add synproxy");
SYS(out, "ip link add tmp0 type veth peer name tmp1");
SYS(out, "ip link set tmp1 netns synproxy");
SYS(out, "ip link set tmp0 up");
SYS(out, "ip addr replace 198.18.0.1/24 dev tmp0");
/* When checksum offload is enabled, the XDP program sees wrong
* checksums and drops packets.
*/
SYS(out, "ethtool -K tmp0 tx off");
if (xdp)
/* Workaround required for veth. */
SYS(out, "ip link set tmp0 xdp object xdp_dummy.bpf.o section xdp 2> /dev/null");
ns = open_netns("synproxy");
if (!ASSERT_OK_PTR(ns, "setns"))
goto out;
SYS(out, "ip link set lo up");
SYS(out, "ip link set tmp1 up");
SYS(out, "ip addr replace 198.18.0.2/24 dev tmp1");
SYS(out, "sysctl -w net.ipv4.tcp_syncookies=2");
SYS(out, "sysctl -w net.ipv4.tcp_timestamps=1");
SYS(out, "sysctl -w net.netfilter.nf_conntrack_tcp_loose=0");
SYS(out, "iptables-legacy -t raw -I PREROUTING \
-i tmp1 -p tcp -m tcp --syn --dport 8080 -j CT --notrack");
SYS(out, "iptables-legacy -t filter -A INPUT \
-i tmp1 -p tcp -m tcp --dport 8080 -m state --state INVALID,UNTRACKED \
-j SYNPROXY --sack-perm --timestamp --wscale 7 --mss 1460");
SYS(out, "iptables-legacy -t filter -A INPUT \
-i tmp1 -m state --state INVALID -j DROP");
ctrl_file = SYS_OUT("./xdp_synproxy --iface tmp1 --ports 8080 \
--single --mss4 1460 --mss6 1440 \
--wscale 7 --ttl 64%s", xdp ? "" : " --tc");
size = fread(buf, 1, sizeof(buf), ctrl_file);
pclose(ctrl_file);
if (!expect_str(buf, size, "Total SYNACKs generated: 0\n",
"initial SYNACKs"))
goto out;
if (!xdp) {
ctrl_file = SYS_OUT("tc filter show dev tmp1 ingress");
size = fread(buf, 1, sizeof(buf), ctrl_file);
pclose(ctrl_file);
prog_id = memmem(buf, size, " id ", 4);
if (!ASSERT_OK_PTR(prog_id, "find prog id"))
goto out;
prog_id += 4;
if (!ASSERT_LT(prog_id, buf + size, "find prog id begin"))
goto out;
prog_id_end = prog_id;
while (prog_id_end < buf + size && *prog_id_end >= '0' &&
*prog_id_end <= '9')
prog_id_end++;
if (!ASSERT_LT(prog_id_end, buf + size, "find prog id end"))
goto out;
*prog_id_end = '\0';
}
server_fd = start_server(AF_INET, SOCK_STREAM, "198.18.0.2", 8080, 0);
if (!ASSERT_GE(server_fd, 0, "start_server"))
goto out;
close_netns(ns);
ns = NULL;
client_fd = connect_to_fd(server_fd, 10000);
if (!ASSERT_GE(client_fd, 0, "connect_to_fd"))
goto out;
accept_fd = accept(server_fd, NULL, NULL);
if (!ASSERT_GE(accept_fd, 0, "accept"))
goto out;
ns = open_netns("synproxy");
if (!ASSERT_OK_PTR(ns, "setns"))
goto out;
if (xdp)
ctrl_file = SYS_OUT("./xdp_synproxy --iface tmp1 --single");
else
ctrl_file = SYS_OUT("./xdp_synproxy --prog %s --single",
prog_id);
size = fread(buf, 1, sizeof(buf), ctrl_file);
pclose(ctrl_file);
if (!expect_str(buf, size, "Total SYNACKs generated: 1\n",
"SYNACKs after connection"))
goto out;
out:
if (accept_fd >= 0)
close(accept_fd);
if (client_fd >= 0)
close(client_fd);
if (server_fd >= 0)
close(server_fd);
if (ns)
close_netns(ns);
SYS_NOFAIL("ip link del tmp0");
SYS_NOFAIL("ip netns del synproxy");
}
void test_xdp_synproxy(void)
{
if (test__start_subtest("xdp"))
test_synproxy(true);
if (test__start_subtest("tc"))
test_synproxy(false);
}
| linux-master | tools/testing/selftests/bpf/prog_tests/xdp_synproxy.c |
// SPDX-License-Identifier: GPL-2.0
#include <test_progs.h>
#include "atomics.lskel.h"
static void test_add(struct atomics_lskel *skel)
{
int err, prog_fd;
LIBBPF_OPTS(bpf_test_run_opts, topts);
/* No need to attach it, just run it directly */
prog_fd = skel->progs.add.prog_fd;
err = bpf_prog_test_run_opts(prog_fd, &topts);
if (!ASSERT_OK(err, "test_run_opts err"))
return;
if (!ASSERT_OK(topts.retval, "test_run_opts retval"))
return;
ASSERT_EQ(skel->data->add64_value, 3, "add64_value");
ASSERT_EQ(skel->bss->add64_result, 1, "add64_result");
ASSERT_EQ(skel->data->add32_value, 3, "add32_value");
ASSERT_EQ(skel->bss->add32_result, 1, "add32_result");
ASSERT_EQ(skel->bss->add_stack_value_copy, 3, "add_stack_value");
ASSERT_EQ(skel->bss->add_stack_result, 1, "add_stack_result");
ASSERT_EQ(skel->data->add_noreturn_value, 3, "add_noreturn_value");
}
static void test_sub(struct atomics_lskel *skel)
{
int err, prog_fd;
LIBBPF_OPTS(bpf_test_run_opts, topts);
/* No need to attach it, just run it directly */
prog_fd = skel->progs.sub.prog_fd;
err = bpf_prog_test_run_opts(prog_fd, &topts);
if (!ASSERT_OK(err, "test_run_opts err"))
return;
if (!ASSERT_OK(topts.retval, "test_run_opts retval"))
return;
ASSERT_EQ(skel->data->sub64_value, -1, "sub64_value");
ASSERT_EQ(skel->bss->sub64_result, 1, "sub64_result");
ASSERT_EQ(skel->data->sub32_value, -1, "sub32_value");
ASSERT_EQ(skel->bss->sub32_result, 1, "sub32_result");
ASSERT_EQ(skel->bss->sub_stack_value_copy, -1, "sub_stack_value");
ASSERT_EQ(skel->bss->sub_stack_result, 1, "sub_stack_result");
ASSERT_EQ(skel->data->sub_noreturn_value, -1, "sub_noreturn_value");
}
static void test_and(struct atomics_lskel *skel)
{
int err, prog_fd;
LIBBPF_OPTS(bpf_test_run_opts, topts);
/* No need to attach it, just run it directly */
prog_fd = skel->progs.and.prog_fd;
err = bpf_prog_test_run_opts(prog_fd, &topts);
if (!ASSERT_OK(err, "test_run_opts err"))
return;
if (!ASSERT_OK(topts.retval, "test_run_opts retval"))
return;
ASSERT_EQ(skel->data->and64_value, 0x010ull << 32, "and64_value");
ASSERT_EQ(skel->bss->and64_result, 0x110ull << 32, "and64_result");
ASSERT_EQ(skel->data->and32_value, 0x010, "and32_value");
ASSERT_EQ(skel->bss->and32_result, 0x110, "and32_result");
ASSERT_EQ(skel->data->and_noreturn_value, 0x010ull << 32, "and_noreturn_value");
}
static void test_or(struct atomics_lskel *skel)
{
int err, prog_fd;
LIBBPF_OPTS(bpf_test_run_opts, topts);
/* No need to attach it, just run it directly */
prog_fd = skel->progs.or.prog_fd;
err = bpf_prog_test_run_opts(prog_fd, &topts);
if (!ASSERT_OK(err, "test_run_opts err"))
return;
if (!ASSERT_OK(topts.retval, "test_run_opts retval"))
return;
ASSERT_EQ(skel->data->or64_value, 0x111ull << 32, "or64_value");
ASSERT_EQ(skel->bss->or64_result, 0x110ull << 32, "or64_result");
ASSERT_EQ(skel->data->or32_value, 0x111, "or32_value");
ASSERT_EQ(skel->bss->or32_result, 0x110, "or32_result");
ASSERT_EQ(skel->data->or_noreturn_value, 0x111ull << 32, "or_noreturn_value");
}
static void test_xor(struct atomics_lskel *skel)
{
int err, prog_fd;
LIBBPF_OPTS(bpf_test_run_opts, topts);
/* No need to attach it, just run it directly */
prog_fd = skel->progs.xor.prog_fd;
err = bpf_prog_test_run_opts(prog_fd, &topts);
if (!ASSERT_OK(err, "test_run_opts err"))
return;
if (!ASSERT_OK(topts.retval, "test_run_opts retval"))
return;
ASSERT_EQ(skel->data->xor64_value, 0x101ull << 32, "xor64_value");
ASSERT_EQ(skel->bss->xor64_result, 0x110ull << 32, "xor64_result");
ASSERT_EQ(skel->data->xor32_value, 0x101, "xor32_value");
ASSERT_EQ(skel->bss->xor32_result, 0x110, "xor32_result");
ASSERT_EQ(skel->data->xor_noreturn_value, 0x101ull << 32, "xor_nxoreturn_value");
}
static void test_cmpxchg(struct atomics_lskel *skel)
{
int err, prog_fd;
LIBBPF_OPTS(bpf_test_run_opts, topts);
/* No need to attach it, just run it directly */
prog_fd = skel->progs.cmpxchg.prog_fd;
err = bpf_prog_test_run_opts(prog_fd, &topts);
if (!ASSERT_OK(err, "test_run_opts err"))
return;
if (!ASSERT_OK(topts.retval, "test_run_opts retval"))
return;
ASSERT_EQ(skel->data->cmpxchg64_value, 2, "cmpxchg64_value");
ASSERT_EQ(skel->bss->cmpxchg64_result_fail, 1, "cmpxchg_result_fail");
ASSERT_EQ(skel->bss->cmpxchg64_result_succeed, 1, "cmpxchg_result_succeed");
ASSERT_EQ(skel->data->cmpxchg32_value, 2, "lcmpxchg32_value");
ASSERT_EQ(skel->bss->cmpxchg32_result_fail, 1, "cmpxchg_result_fail");
ASSERT_EQ(skel->bss->cmpxchg32_result_succeed, 1, "cmpxchg_result_succeed");
}
static void test_xchg(struct atomics_lskel *skel)
{
int err, prog_fd;
LIBBPF_OPTS(bpf_test_run_opts, topts);
/* No need to attach it, just run it directly */
prog_fd = skel->progs.xchg.prog_fd;
err = bpf_prog_test_run_opts(prog_fd, &topts);
if (!ASSERT_OK(err, "test_run_opts err"))
return;
if (!ASSERT_OK(topts.retval, "test_run_opts retval"))
return;
ASSERT_EQ(skel->data->xchg64_value, 2, "xchg64_value");
ASSERT_EQ(skel->bss->xchg64_result, 1, "xchg64_result");
ASSERT_EQ(skel->data->xchg32_value, 2, "xchg32_value");
ASSERT_EQ(skel->bss->xchg32_result, 1, "xchg32_result");
}
void test_atomics(void)
{
struct atomics_lskel *skel;
skel = atomics_lskel__open_and_load();
if (!ASSERT_OK_PTR(skel, "atomics skeleton load"))
return;
if (skel->data->skip_tests) {
printf("%s:SKIP:no ENABLE_ATOMICS_TESTS (missing Clang BPF atomics support)",
__func__);
test__skip();
goto cleanup;
}
skel->bss->pid = getpid();
if (test__start_subtest("add"))
test_add(skel);
if (test__start_subtest("sub"))
test_sub(skel);
if (test__start_subtest("and"))
test_and(skel);
if (test__start_subtest("or"))
test_or(skel);
if (test__start_subtest("xor"))
test_xor(skel);
if (test__start_subtest("cmpxchg"))
test_cmpxchg(skel);
if (test__start_subtest("xchg"))
test_xchg(skel);
cleanup:
atomics_lskel__destroy(skel);
}
| linux-master | tools/testing/selftests/bpf/prog_tests/atomics.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2018 Facebook */
#include <stdlib.h>
#include <unistd.h>
#include <stdbool.h>
#include <string.h>
#include <errno.h>
#include <assert.h>
#include <fcntl.h>
#include <linux/bpf.h>
#include <linux/err.h>
#include <linux/types.h>
#include <linux/if_ether.h>
#include <sys/types.h>
#include <sys/epoll.h>
#include <sys/socket.h>
#include <netinet/in.h>
#include <bpf/bpf.h>
#include <bpf/libbpf.h>
#include "bpf_util.h"
#include "test_progs.h"
#include "test_select_reuseport_common.h"
#define MAX_TEST_NAME 80
#define MIN_TCPHDR_LEN 20
#define UDPHDR_LEN 8
#define TCP_SYNCOOKIE_SYSCTL "/proc/sys/net/ipv4/tcp_syncookies"
#define TCP_FO_SYSCTL "/proc/sys/net/ipv4/tcp_fastopen"
#define REUSEPORT_ARRAY_SIZE 32
static int result_map, tmp_index_ovr_map, linum_map, data_check_map;
static __u32 expected_results[NR_RESULTS];
static int sk_fds[REUSEPORT_ARRAY_SIZE];
static int reuseport_array = -1, outer_map = -1;
static enum bpf_map_type inner_map_type;
static int select_by_skb_data_prog;
static int saved_tcp_syncookie = -1;
static struct bpf_object *obj;
static int saved_tcp_fo = -1;
static __u32 index_zero;
static int epfd;
static union sa46 {
struct sockaddr_in6 v6;
struct sockaddr_in v4;
sa_family_t family;
} srv_sa;
#define RET_IF(condition, tag, format...) ({ \
if (CHECK_FAIL(condition)) { \
printf(tag " " format); \
return; \
} \
})
#define RET_ERR(condition, tag, format...) ({ \
if (CHECK_FAIL(condition)) { \
printf(tag " " format); \
return -1; \
} \
})
static int create_maps(enum bpf_map_type inner_type)
{
LIBBPF_OPTS(bpf_map_create_opts, opts);
inner_map_type = inner_type;
/* Creating reuseport_array */
reuseport_array = bpf_map_create(inner_type, "reuseport_array",
sizeof(__u32), sizeof(__u32), REUSEPORT_ARRAY_SIZE, NULL);
RET_ERR(reuseport_array < 0, "creating reuseport_array",
"reuseport_array:%d errno:%d\n", reuseport_array, errno);
/* Creating outer_map */
opts.inner_map_fd = reuseport_array;
outer_map = bpf_map_create(BPF_MAP_TYPE_ARRAY_OF_MAPS, "outer_map",
sizeof(__u32), sizeof(__u32), 1, &opts);
RET_ERR(outer_map < 0, "creating outer_map",
"outer_map:%d errno:%d\n", outer_map, errno);
return 0;
}
static int prepare_bpf_obj(void)
{
struct bpf_program *prog;
struct bpf_map *map;
int err;
obj = bpf_object__open("test_select_reuseport_kern.bpf.o");
err = libbpf_get_error(obj);
RET_ERR(err, "open test_select_reuseport_kern.bpf.o",
"obj:%p PTR_ERR(obj):%d\n", obj, err);
map = bpf_object__find_map_by_name(obj, "outer_map");
RET_ERR(!map, "find outer_map", "!map\n");
err = bpf_map__reuse_fd(map, outer_map);
RET_ERR(err, "reuse outer_map", "err:%d\n", err);
err = bpf_object__load(obj);
RET_ERR(err, "load bpf_object", "err:%d\n", err);
prog = bpf_object__next_program(obj, NULL);
RET_ERR(!prog, "get first bpf_program", "!prog\n");
select_by_skb_data_prog = bpf_program__fd(prog);
RET_ERR(select_by_skb_data_prog < 0, "get prog fd",
"select_by_skb_data_prog:%d\n", select_by_skb_data_prog);
map = bpf_object__find_map_by_name(obj, "result_map");
RET_ERR(!map, "find result_map", "!map\n");
result_map = bpf_map__fd(map);
RET_ERR(result_map < 0, "get result_map fd",
"result_map:%d\n", result_map);
map = bpf_object__find_map_by_name(obj, "tmp_index_ovr_map");
RET_ERR(!map, "find tmp_index_ovr_map\n", "!map");
tmp_index_ovr_map = bpf_map__fd(map);
RET_ERR(tmp_index_ovr_map < 0, "get tmp_index_ovr_map fd",
"tmp_index_ovr_map:%d\n", tmp_index_ovr_map);
map = bpf_object__find_map_by_name(obj, "linum_map");
RET_ERR(!map, "find linum_map", "!map\n");
linum_map = bpf_map__fd(map);
RET_ERR(linum_map < 0, "get linum_map fd",
"linum_map:%d\n", linum_map);
map = bpf_object__find_map_by_name(obj, "data_check_map");
RET_ERR(!map, "find data_check_map", "!map\n");
data_check_map = bpf_map__fd(map);
RET_ERR(data_check_map < 0, "get data_check_map fd",
"data_check_map:%d\n", data_check_map);
return 0;
}
static void sa46_init_loopback(union sa46 *sa, sa_family_t family)
{
memset(sa, 0, sizeof(*sa));
sa->family = family;
if (sa->family == AF_INET6)
sa->v6.sin6_addr = in6addr_loopback;
else
sa->v4.sin_addr.s_addr = htonl(INADDR_LOOPBACK);
}
static void sa46_init_inany(union sa46 *sa, sa_family_t family)
{
memset(sa, 0, sizeof(*sa));
sa->family = family;
if (sa->family == AF_INET6)
sa->v6.sin6_addr = in6addr_any;
else
sa->v4.sin_addr.s_addr = INADDR_ANY;
}
static int read_int_sysctl(const char *sysctl)
{
char buf[16];
int fd, ret;
fd = open(sysctl, 0);
RET_ERR(fd == -1, "open(sysctl)",
"sysctl:%s fd:%d errno:%d\n", sysctl, fd, errno);
ret = read(fd, buf, sizeof(buf));
RET_ERR(ret <= 0, "read(sysctl)",
"sysctl:%s ret:%d errno:%d\n", sysctl, ret, errno);
close(fd);
return atoi(buf);
}
static int write_int_sysctl(const char *sysctl, int v)
{
int fd, ret, size;
char buf[16];
fd = open(sysctl, O_RDWR);
RET_ERR(fd == -1, "open(sysctl)",
"sysctl:%s fd:%d errno:%d\n", sysctl, fd, errno);
size = snprintf(buf, sizeof(buf), "%d", v);
ret = write(fd, buf, size);
RET_ERR(ret != size, "write(sysctl)",
"sysctl:%s ret:%d size:%d errno:%d\n",
sysctl, ret, size, errno);
close(fd);
return 0;
}
static void restore_sysctls(void)
{
if (saved_tcp_fo != -1)
write_int_sysctl(TCP_FO_SYSCTL, saved_tcp_fo);
if (saved_tcp_syncookie != -1)
write_int_sysctl(TCP_SYNCOOKIE_SYSCTL, saved_tcp_syncookie);
}
static int enable_fastopen(void)
{
int fo;
fo = read_int_sysctl(TCP_FO_SYSCTL);
if (fo < 0)
return -1;
return write_int_sysctl(TCP_FO_SYSCTL, fo | 7);
}
static int enable_syncookie(void)
{
return write_int_sysctl(TCP_SYNCOOKIE_SYSCTL, 2);
}
static int disable_syncookie(void)
{
return write_int_sysctl(TCP_SYNCOOKIE_SYSCTL, 0);
}
static long get_linum(void)
{
__u32 linum;
int err;
err = bpf_map_lookup_elem(linum_map, &index_zero, &linum);
RET_ERR(err < 0, "lookup_elem(linum_map)", "err:%d errno:%d\n",
err, errno);
return linum;
}
static void check_data(int type, sa_family_t family, const struct cmd *cmd,
int cli_fd)
{
struct data_check expected = {}, result;
union sa46 cli_sa;
socklen_t addrlen;
int err;
addrlen = sizeof(cli_sa);
err = getsockname(cli_fd, (struct sockaddr *)&cli_sa,
&addrlen);
RET_IF(err < 0, "getsockname(cli_fd)", "err:%d errno:%d\n",
err, errno);
err = bpf_map_lookup_elem(data_check_map, &index_zero, &result);
RET_IF(err < 0, "lookup_elem(data_check_map)", "err:%d errno:%d\n",
err, errno);
if (type == SOCK_STREAM) {
expected.len = MIN_TCPHDR_LEN;
expected.ip_protocol = IPPROTO_TCP;
} else {
expected.len = UDPHDR_LEN;
expected.ip_protocol = IPPROTO_UDP;
}
if (family == AF_INET6) {
expected.eth_protocol = htons(ETH_P_IPV6);
expected.bind_inany = !srv_sa.v6.sin6_addr.s6_addr32[3] &&
!srv_sa.v6.sin6_addr.s6_addr32[2] &&
!srv_sa.v6.sin6_addr.s6_addr32[1] &&
!srv_sa.v6.sin6_addr.s6_addr32[0];
memcpy(&expected.skb_addrs[0], cli_sa.v6.sin6_addr.s6_addr32,
sizeof(cli_sa.v6.sin6_addr));
memcpy(&expected.skb_addrs[4], &in6addr_loopback,
sizeof(in6addr_loopback));
expected.skb_ports[0] = cli_sa.v6.sin6_port;
expected.skb_ports[1] = srv_sa.v6.sin6_port;
} else {
expected.eth_protocol = htons(ETH_P_IP);
expected.bind_inany = !srv_sa.v4.sin_addr.s_addr;
expected.skb_addrs[0] = cli_sa.v4.sin_addr.s_addr;
expected.skb_addrs[1] = htonl(INADDR_LOOPBACK);
expected.skb_ports[0] = cli_sa.v4.sin_port;
expected.skb_ports[1] = srv_sa.v4.sin_port;
}
if (memcmp(&result, &expected, offsetof(struct data_check,
equal_check_end))) {
printf("unexpected data_check\n");
printf(" result: (0x%x, %u, %u)\n",
result.eth_protocol, result.ip_protocol,
result.bind_inany);
printf("expected: (0x%x, %u, %u)\n",
expected.eth_protocol, expected.ip_protocol,
expected.bind_inany);
RET_IF(1, "data_check result != expected",
"bpf_prog_linum:%ld\n", get_linum());
}
RET_IF(!result.hash, "data_check result.hash empty",
"result.hash:%u", result.hash);
expected.len += cmd ? sizeof(*cmd) : 0;
if (type == SOCK_STREAM)
RET_IF(expected.len > result.len, "expected.len > result.len",
"expected.len:%u result.len:%u bpf_prog_linum:%ld\n",
expected.len, result.len, get_linum());
else
RET_IF(expected.len != result.len, "expected.len != result.len",
"expected.len:%u result.len:%u bpf_prog_linum:%ld\n",
expected.len, result.len, get_linum());
}
static const char *result_to_str(enum result res)
{
switch (res) {
case DROP_ERR_INNER_MAP:
return "DROP_ERR_INNER_MAP";
case DROP_ERR_SKB_DATA:
return "DROP_ERR_SKB_DATA";
case DROP_ERR_SK_SELECT_REUSEPORT:
return "DROP_ERR_SK_SELECT_REUSEPORT";
case DROP_MISC:
return "DROP_MISC";
case PASS:
return "PASS";
case PASS_ERR_SK_SELECT_REUSEPORT:
return "PASS_ERR_SK_SELECT_REUSEPORT";
default:
return "UNKNOWN";
}
}
static void check_results(void)
{
__u32 results[NR_RESULTS];
__u32 i, broken = 0;
int err;
for (i = 0; i < NR_RESULTS; i++) {
err = bpf_map_lookup_elem(result_map, &i, &results[i]);
RET_IF(err < 0, "lookup_elem(result_map)",
"i:%u err:%d errno:%d\n", i, err, errno);
}
for (i = 0; i < NR_RESULTS; i++) {
if (results[i] != expected_results[i]) {
broken = i;
break;
}
}
if (i == NR_RESULTS)
return;
printf("unexpected result\n");
printf(" result: [");
printf("%u", results[0]);
for (i = 1; i < NR_RESULTS; i++)
printf(", %u", results[i]);
printf("]\n");
printf("expected: [");
printf("%u", expected_results[0]);
for (i = 1; i < NR_RESULTS; i++)
printf(", %u", expected_results[i]);
printf("]\n");
printf("mismatch on %s (bpf_prog_linum:%ld)\n", result_to_str(broken),
get_linum());
CHECK_FAIL(true);
}
static int send_data(int type, sa_family_t family, void *data, size_t len,
enum result expected)
{
union sa46 cli_sa;
int fd, err;
fd = socket(family, type, 0);
RET_ERR(fd == -1, "socket()", "fd:%d errno:%d\n", fd, errno);
sa46_init_loopback(&cli_sa, family);
err = bind(fd, (struct sockaddr *)&cli_sa, sizeof(cli_sa));
RET_ERR(fd == -1, "bind(cli_sa)", "err:%d errno:%d\n", err, errno);
err = sendto(fd, data, len, MSG_FASTOPEN, (struct sockaddr *)&srv_sa,
sizeof(srv_sa));
RET_ERR(err != len && expected >= PASS,
"sendto()", "family:%u err:%d errno:%d expected:%d\n",
family, err, errno, expected);
return fd;
}
static void do_test(int type, sa_family_t family, struct cmd *cmd,
enum result expected)
{
int nev, srv_fd, cli_fd;
struct epoll_event ev;
struct cmd rcv_cmd;
ssize_t nread;
cli_fd = send_data(type, family, cmd, cmd ? sizeof(*cmd) : 0,
expected);
if (cli_fd < 0)
return;
nev = epoll_wait(epfd, &ev, 1, expected >= PASS ? 5 : 0);
RET_IF((nev <= 0 && expected >= PASS) ||
(nev > 0 && expected < PASS),
"nev <> expected",
"nev:%d expected:%d type:%d family:%d data:(%d, %d)\n",
nev, expected, type, family,
cmd ? cmd->reuseport_index : -1,
cmd ? cmd->pass_on_failure : -1);
check_results();
check_data(type, family, cmd, cli_fd);
if (expected < PASS)
return;
RET_IF(expected != PASS_ERR_SK_SELECT_REUSEPORT &&
cmd->reuseport_index != ev.data.u32,
"check cmd->reuseport_index",
"cmd:(%u, %u) ev.data.u32:%u\n",
cmd->pass_on_failure, cmd->reuseport_index, ev.data.u32);
srv_fd = sk_fds[ev.data.u32];
if (type == SOCK_STREAM) {
int new_fd = accept(srv_fd, NULL, 0);
RET_IF(new_fd == -1, "accept(srv_fd)",
"ev.data.u32:%u new_fd:%d errno:%d\n",
ev.data.u32, new_fd, errno);
nread = recv(new_fd, &rcv_cmd, sizeof(rcv_cmd), MSG_DONTWAIT);
RET_IF(nread != sizeof(rcv_cmd),
"recv(new_fd)",
"ev.data.u32:%u nread:%zd sizeof(rcv_cmd):%zu errno:%d\n",
ev.data.u32, nread, sizeof(rcv_cmd), errno);
close(new_fd);
} else {
nread = recv(srv_fd, &rcv_cmd, sizeof(rcv_cmd), MSG_DONTWAIT);
RET_IF(nread != sizeof(rcv_cmd),
"recv(sk_fds)",
"ev.data.u32:%u nread:%zd sizeof(rcv_cmd):%zu errno:%d\n",
ev.data.u32, nread, sizeof(rcv_cmd), errno);
}
close(cli_fd);
}
static void test_err_inner_map(int type, sa_family_t family)
{
struct cmd cmd = {
.reuseport_index = 0,
.pass_on_failure = 0,
};
expected_results[DROP_ERR_INNER_MAP]++;
do_test(type, family, &cmd, DROP_ERR_INNER_MAP);
}
static void test_err_skb_data(int type, sa_family_t family)
{
expected_results[DROP_ERR_SKB_DATA]++;
do_test(type, family, NULL, DROP_ERR_SKB_DATA);
}
static void test_err_sk_select_port(int type, sa_family_t family)
{
struct cmd cmd = {
.reuseport_index = REUSEPORT_ARRAY_SIZE,
.pass_on_failure = 0,
};
expected_results[DROP_ERR_SK_SELECT_REUSEPORT]++;
do_test(type, family, &cmd, DROP_ERR_SK_SELECT_REUSEPORT);
}
static void test_pass(int type, sa_family_t family)
{
struct cmd cmd;
int i;
cmd.pass_on_failure = 0;
for (i = 0; i < REUSEPORT_ARRAY_SIZE; i++) {
expected_results[PASS]++;
cmd.reuseport_index = i;
do_test(type, family, &cmd, PASS);
}
}
static void test_syncookie(int type, sa_family_t family)
{
int err, tmp_index = 1;
struct cmd cmd = {
.reuseport_index = 0,
.pass_on_failure = 0,
};
/*
* +1 for TCP-SYN and
* +1 for the TCP-ACK (ack the syncookie)
*/
expected_results[PASS] += 2;
enable_syncookie();
/*
* Simulate TCP-SYN and TCP-ACK are handled by two different sk:
* TCP-SYN: select sk_fds[tmp_index = 1] tmp_index is from the
* tmp_index_ovr_map
* TCP-ACK: select sk_fds[reuseport_index = 0] reuseport_index
* is from the cmd.reuseport_index
*/
err = bpf_map_update_elem(tmp_index_ovr_map, &index_zero,
&tmp_index, BPF_ANY);
RET_IF(err < 0, "update_elem(tmp_index_ovr_map, 0, 1)",
"err:%d errno:%d\n", err, errno);
do_test(type, family, &cmd, PASS);
err = bpf_map_lookup_elem(tmp_index_ovr_map, &index_zero,
&tmp_index);
RET_IF(err < 0 || tmp_index >= 0,
"lookup_elem(tmp_index_ovr_map)",
"err:%d errno:%d tmp_index:%d\n",
err, errno, tmp_index);
disable_syncookie();
}
static void test_pass_on_err(int type, sa_family_t family)
{
struct cmd cmd = {
.reuseport_index = REUSEPORT_ARRAY_SIZE,
.pass_on_failure = 1,
};
expected_results[PASS_ERR_SK_SELECT_REUSEPORT] += 1;
do_test(type, family, &cmd, PASS_ERR_SK_SELECT_REUSEPORT);
}
static void test_detach_bpf(int type, sa_family_t family)
{
#ifdef SO_DETACH_REUSEPORT_BPF
__u32 nr_run_before = 0, nr_run_after = 0, tmp, i;
struct epoll_event ev;
int cli_fd, err, nev;
struct cmd cmd = {};
int optvalue = 0;
err = setsockopt(sk_fds[0], SOL_SOCKET, SO_DETACH_REUSEPORT_BPF,
&optvalue, sizeof(optvalue));
RET_IF(err == -1, "setsockopt(SO_DETACH_REUSEPORT_BPF)",
"err:%d errno:%d\n", err, errno);
err = setsockopt(sk_fds[1], SOL_SOCKET, SO_DETACH_REUSEPORT_BPF,
&optvalue, sizeof(optvalue));
RET_IF(err == 0 || errno != ENOENT,
"setsockopt(SO_DETACH_REUSEPORT_BPF)",
"err:%d errno:%d\n", err, errno);
for (i = 0; i < NR_RESULTS; i++) {
err = bpf_map_lookup_elem(result_map, &i, &tmp);
RET_IF(err < 0, "lookup_elem(result_map)",
"i:%u err:%d errno:%d\n", i, err, errno);
nr_run_before += tmp;
}
cli_fd = send_data(type, family, &cmd, sizeof(cmd), PASS);
if (cli_fd < 0)
return;
nev = epoll_wait(epfd, &ev, 1, 5);
RET_IF(nev <= 0, "nev <= 0",
"nev:%d expected:1 type:%d family:%d data:(0, 0)\n",
nev, type, family);
for (i = 0; i < NR_RESULTS; i++) {
err = bpf_map_lookup_elem(result_map, &i, &tmp);
RET_IF(err < 0, "lookup_elem(result_map)",
"i:%u err:%d errno:%d\n", i, err, errno);
nr_run_after += tmp;
}
RET_IF(nr_run_before != nr_run_after,
"nr_run_before != nr_run_after",
"nr_run_before:%u nr_run_after:%u\n",
nr_run_before, nr_run_after);
close(cli_fd);
#else
test__skip();
#endif
}
static void prepare_sk_fds(int type, sa_family_t family, bool inany)
{
const int first = REUSEPORT_ARRAY_SIZE - 1;
int i, err, optval = 1;
struct epoll_event ev;
socklen_t addrlen;
if (inany)
sa46_init_inany(&srv_sa, family);
else
sa46_init_loopback(&srv_sa, family);
addrlen = sizeof(srv_sa);
/*
* The sk_fds[] is filled from the back such that the order
* is exactly opposite to the (struct sock_reuseport *)reuse->socks[].
*/
for (i = first; i >= 0; i--) {
sk_fds[i] = socket(family, type, 0);
RET_IF(sk_fds[i] == -1, "socket()", "sk_fds[%d]:%d errno:%d\n",
i, sk_fds[i], errno);
err = setsockopt(sk_fds[i], SOL_SOCKET, SO_REUSEPORT,
&optval, sizeof(optval));
RET_IF(err == -1, "setsockopt(SO_REUSEPORT)",
"sk_fds[%d] err:%d errno:%d\n",
i, err, errno);
if (i == first) {
err = setsockopt(sk_fds[i], SOL_SOCKET,
SO_ATTACH_REUSEPORT_EBPF,
&select_by_skb_data_prog,
sizeof(select_by_skb_data_prog));
RET_IF(err < 0, "setsockopt(SO_ATTACH_REUEPORT_EBPF)",
"err:%d errno:%d\n", err, errno);
}
err = bind(sk_fds[i], (struct sockaddr *)&srv_sa, addrlen);
RET_IF(err < 0, "bind()", "sk_fds[%d] err:%d errno:%d\n",
i, err, errno);
if (type == SOCK_STREAM) {
err = listen(sk_fds[i], 10);
RET_IF(err < 0, "listen()",
"sk_fds[%d] err:%d errno:%d\n",
i, err, errno);
}
err = bpf_map_update_elem(reuseport_array, &i, &sk_fds[i],
BPF_NOEXIST);
RET_IF(err < 0, "update_elem(reuseport_array)",
"sk_fds[%d] err:%d errno:%d\n", i, err, errno);
if (i == first) {
socklen_t addrlen = sizeof(srv_sa);
err = getsockname(sk_fds[i], (struct sockaddr *)&srv_sa,
&addrlen);
RET_IF(err == -1, "getsockname()",
"sk_fds[%d] err:%d errno:%d\n", i, err, errno);
}
}
epfd = epoll_create(1);
RET_IF(epfd == -1, "epoll_create(1)",
"epfd:%d errno:%d\n", epfd, errno);
ev.events = EPOLLIN;
for (i = 0; i < REUSEPORT_ARRAY_SIZE; i++) {
ev.data.u32 = i;
err = epoll_ctl(epfd, EPOLL_CTL_ADD, sk_fds[i], &ev);
RET_IF(err, "epoll_ctl(EPOLL_CTL_ADD)", "sk_fds[%d]\n", i);
}
}
static void setup_per_test(int type, sa_family_t family, bool inany,
bool no_inner_map)
{
int ovr = -1, err;
prepare_sk_fds(type, family, inany);
err = bpf_map_update_elem(tmp_index_ovr_map, &index_zero, &ovr,
BPF_ANY);
RET_IF(err < 0, "update_elem(tmp_index_ovr_map, 0, -1)",
"err:%d errno:%d\n", err, errno);
/* Install reuseport_array to outer_map? */
if (no_inner_map)
return;
err = bpf_map_update_elem(outer_map, &index_zero, &reuseport_array,
BPF_ANY);
RET_IF(err < 0, "update_elem(outer_map, 0, reuseport_array)",
"err:%d errno:%d\n", err, errno);
}
static void cleanup_per_test(bool no_inner_map)
{
int i, err, zero = 0;
memset(expected_results, 0, sizeof(expected_results));
for (i = 0; i < NR_RESULTS; i++) {
err = bpf_map_update_elem(result_map, &i, &zero, BPF_ANY);
RET_IF(err, "reset elem in result_map",
"i:%u err:%d errno:%d\n", i, err, errno);
}
err = bpf_map_update_elem(linum_map, &zero, &zero, BPF_ANY);
RET_IF(err, "reset line number in linum_map", "err:%d errno:%d\n",
err, errno);
for (i = 0; i < REUSEPORT_ARRAY_SIZE; i++)
close(sk_fds[i]);
close(epfd);
/* Delete reuseport_array from outer_map? */
if (no_inner_map)
return;
err = bpf_map_delete_elem(outer_map, &index_zero);
RET_IF(err < 0, "delete_elem(outer_map)",
"err:%d errno:%d\n", err, errno);
}
static void cleanup(void)
{
if (outer_map >= 0) {
close(outer_map);
outer_map = -1;
}
if (reuseport_array >= 0) {
close(reuseport_array);
reuseport_array = -1;
}
if (obj) {
bpf_object__close(obj);
obj = NULL;
}
memset(expected_results, 0, sizeof(expected_results));
}
static const char *maptype_str(enum bpf_map_type type)
{
switch (type) {
case BPF_MAP_TYPE_REUSEPORT_SOCKARRAY:
return "reuseport_sockarray";
case BPF_MAP_TYPE_SOCKMAP:
return "sockmap";
case BPF_MAP_TYPE_SOCKHASH:
return "sockhash";
default:
return "unknown";
}
}
static const char *family_str(sa_family_t family)
{
switch (family) {
case AF_INET:
return "IPv4";
case AF_INET6:
return "IPv6";
default:
return "unknown";
}
}
static const char *sotype_str(int sotype)
{
switch (sotype) {
case SOCK_STREAM:
return "TCP";
case SOCK_DGRAM:
return "UDP";
default:
return "unknown";
}
}
#define TEST_INIT(fn_, ...) { .fn = fn_, .name = #fn_, __VA_ARGS__ }
static void test_config(int sotype, sa_family_t family, bool inany)
{
const struct test {
void (*fn)(int sotype, sa_family_t family);
const char *name;
bool no_inner_map;
int need_sotype;
} tests[] = {
TEST_INIT(test_err_inner_map,
.no_inner_map = true),
TEST_INIT(test_err_skb_data),
TEST_INIT(test_err_sk_select_port),
TEST_INIT(test_pass),
TEST_INIT(test_syncookie,
.need_sotype = SOCK_STREAM),
TEST_INIT(test_pass_on_err),
TEST_INIT(test_detach_bpf),
};
char s[MAX_TEST_NAME];
const struct test *t;
for (t = tests; t < tests + ARRAY_SIZE(tests); t++) {
if (t->need_sotype && t->need_sotype != sotype)
continue; /* test not compatible with socket type */
snprintf(s, sizeof(s), "%s %s/%s %s %s",
maptype_str(inner_map_type),
family_str(family), sotype_str(sotype),
inany ? "INANY" : "LOOPBACK", t->name);
if (!test__start_subtest(s))
continue;
setup_per_test(sotype, family, inany, t->no_inner_map);
t->fn(sotype, family);
cleanup_per_test(t->no_inner_map);
}
}
#define BIND_INANY true
static void test_all(void)
{
const struct config {
int sotype;
sa_family_t family;
bool inany;
} configs[] = {
{ SOCK_STREAM, AF_INET },
{ SOCK_STREAM, AF_INET, BIND_INANY },
{ SOCK_STREAM, AF_INET6 },
{ SOCK_STREAM, AF_INET6, BIND_INANY },
{ SOCK_DGRAM, AF_INET },
{ SOCK_DGRAM, AF_INET6 },
};
const struct config *c;
for (c = configs; c < configs + ARRAY_SIZE(configs); c++)
test_config(c->sotype, c->family, c->inany);
}
void test_map_type(enum bpf_map_type mt)
{
if (create_maps(mt))
goto out;
if (prepare_bpf_obj())
goto out;
test_all();
out:
cleanup();
}
void serial_test_select_reuseport(void)
{
saved_tcp_fo = read_int_sysctl(TCP_FO_SYSCTL);
if (saved_tcp_fo < 0)
goto out;
saved_tcp_syncookie = read_int_sysctl(TCP_SYNCOOKIE_SYSCTL);
if (saved_tcp_syncookie < 0)
goto out;
if (enable_fastopen())
goto out;
if (disable_syncookie())
goto out;
test_map_type(BPF_MAP_TYPE_REUSEPORT_SOCKARRAY);
test_map_type(BPF_MAP_TYPE_SOCKMAP);
test_map_type(BPF_MAP_TYPE_SOCKHASH);
out:
restore_sysctls();
}
| linux-master | tools/testing/selftests/bpf/prog_tests/select_reuseport.c |
// SPDX-License-Identifier: GPL-2.0
#include <test_progs.h>
#include <network_helpers.h>
#include <linux/netfilter/nf_conntrack_common.h>
#include "test_bpf_nf.skel.h"
#include "test_bpf_nf_fail.skel.h"
static char log_buf[1024 * 1024];
struct {
const char *prog_name;
const char *err_msg;
} test_bpf_nf_fail_tests[] = {
{ "alloc_release", "kernel function bpf_ct_release args#0 expected pointer to STRUCT nf_conn but" },
{ "insert_insert", "kernel function bpf_ct_insert_entry args#0 expected pointer to STRUCT nf_conn___init but" },
{ "lookup_insert", "kernel function bpf_ct_insert_entry args#0 expected pointer to STRUCT nf_conn___init but" },
{ "set_timeout_after_insert", "kernel function bpf_ct_set_timeout args#0 expected pointer to STRUCT nf_conn___init but" },
{ "set_status_after_insert", "kernel function bpf_ct_set_status args#0 expected pointer to STRUCT nf_conn___init but" },
{ "change_timeout_after_alloc", "kernel function bpf_ct_change_timeout args#0 expected pointer to STRUCT nf_conn but" },
{ "change_status_after_alloc", "kernel function bpf_ct_change_status args#0 expected pointer to STRUCT nf_conn but" },
{ "write_not_allowlisted_field", "no write support to nf_conn at off" },
};
enum {
TEST_XDP,
TEST_TC_BPF,
};
#define TIMEOUT_MS 3000
#define IPS_STATUS_MASK (IPS_CONFIRMED | IPS_SEEN_REPLY | \
IPS_SRC_NAT_DONE | IPS_DST_NAT_DONE | \
IPS_SRC_NAT | IPS_DST_NAT)
static int connect_to_server(int srv_fd)
{
int fd = -1;
fd = socket(AF_INET, SOCK_STREAM, 0);
if (!ASSERT_GE(fd, 0, "socket"))
goto out;
if (!ASSERT_EQ(connect_fd_to_fd(fd, srv_fd, TIMEOUT_MS), 0, "connect_fd_to_fd")) {
close(fd);
fd = -1;
}
out:
return fd;
}
static void test_bpf_nf_ct(int mode)
{
const char *iptables = "iptables-legacy -t raw %s PREROUTING -j CONNMARK --set-mark 42/0";
int srv_fd = -1, client_fd = -1, srv_client_fd = -1;
struct sockaddr_in peer_addr = {};
struct test_bpf_nf *skel;
int prog_fd, err;
socklen_t len;
u16 srv_port;
char cmd[128];
LIBBPF_OPTS(bpf_test_run_opts, topts,
.data_in = &pkt_v4,
.data_size_in = sizeof(pkt_v4),
.repeat = 1,
);
skel = test_bpf_nf__open_and_load();
if (!ASSERT_OK_PTR(skel, "test_bpf_nf__open_and_load"))
return;
/* Enable connection tracking */
snprintf(cmd, sizeof(cmd), iptables, "-A");
if (!ASSERT_OK(system(cmd), cmd))
goto end;
srv_port = (mode == TEST_XDP) ? 5005 : 5006;
srv_fd = start_server(AF_INET, SOCK_STREAM, "127.0.0.1", srv_port, TIMEOUT_MS);
if (!ASSERT_GE(srv_fd, 0, "start_server"))
goto end;
client_fd = connect_to_server(srv_fd);
if (!ASSERT_GE(client_fd, 0, "connect_to_server"))
goto end;
len = sizeof(peer_addr);
srv_client_fd = accept(srv_fd, (struct sockaddr *)&peer_addr, &len);
if (!ASSERT_GE(srv_client_fd, 0, "accept"))
goto end;
if (!ASSERT_EQ(len, sizeof(struct sockaddr_in), "sockaddr len"))
goto end;
skel->bss->saddr = peer_addr.sin_addr.s_addr;
skel->bss->sport = peer_addr.sin_port;
skel->bss->daddr = peer_addr.sin_addr.s_addr;
skel->bss->dport = htons(srv_port);
if (mode == TEST_XDP)
prog_fd = bpf_program__fd(skel->progs.nf_xdp_ct_test);
else
prog_fd = bpf_program__fd(skel->progs.nf_skb_ct_test);
err = bpf_prog_test_run_opts(prog_fd, &topts);
if (!ASSERT_OK(err, "bpf_prog_test_run"))
goto end;
ASSERT_EQ(skel->bss->test_einval_bpf_tuple, -EINVAL, "Test EINVAL for NULL bpf_tuple");
ASSERT_EQ(skel->bss->test_einval_reserved, -EINVAL, "Test EINVAL for reserved not set to 0");
ASSERT_EQ(skel->bss->test_einval_netns_id, -EINVAL, "Test EINVAL for netns_id < -1");
ASSERT_EQ(skel->bss->test_einval_len_opts, -EINVAL, "Test EINVAL for len__opts != NF_BPF_CT_OPTS_SZ");
ASSERT_EQ(skel->bss->test_eproto_l4proto, -EPROTO, "Test EPROTO for l4proto != TCP or UDP");
ASSERT_EQ(skel->bss->test_enonet_netns_id, -ENONET, "Test ENONET for bad but valid netns_id");
ASSERT_EQ(skel->bss->test_enoent_lookup, -ENOENT, "Test ENOENT for failed lookup");
ASSERT_EQ(skel->bss->test_eafnosupport, -EAFNOSUPPORT, "Test EAFNOSUPPORT for invalid len__tuple");
ASSERT_EQ(skel->data->test_alloc_entry, 0, "Test for alloc new entry");
ASSERT_EQ(skel->data->test_insert_entry, 0, "Test for insert new entry");
ASSERT_EQ(skel->data->test_succ_lookup, 0, "Test for successful lookup");
/* allow some tolerance for test_delta_timeout value to avoid races. */
ASSERT_GT(skel->bss->test_delta_timeout, 8, "Test for min ct timeout update");
ASSERT_LE(skel->bss->test_delta_timeout, 10, "Test for max ct timeout update");
ASSERT_EQ(skel->bss->test_insert_lookup_mark, 77, "Test for insert and lookup mark value");
ASSERT_EQ(skel->bss->test_status, IPS_STATUS_MASK, "Test for ct status update ");
ASSERT_EQ(skel->data->test_exist_lookup, 0, "Test existing connection lookup");
ASSERT_EQ(skel->bss->test_exist_lookup_mark, 43, "Test existing connection lookup ctmark");
ASSERT_EQ(skel->data->test_snat_addr, 0, "Test for source natting");
ASSERT_EQ(skel->data->test_dnat_addr, 0, "Test for destination natting");
end:
if (client_fd != -1)
close(client_fd);
if (srv_client_fd != -1)
close(srv_client_fd);
if (srv_fd != -1)
close(srv_fd);
snprintf(cmd, sizeof(cmd), iptables, "-D");
system(cmd);
test_bpf_nf__destroy(skel);
}
static void test_bpf_nf_ct_fail(const char *prog_name, const char *err_msg)
{
LIBBPF_OPTS(bpf_object_open_opts, opts, .kernel_log_buf = log_buf,
.kernel_log_size = sizeof(log_buf),
.kernel_log_level = 1);
struct test_bpf_nf_fail *skel;
struct bpf_program *prog;
int ret;
skel = test_bpf_nf_fail__open_opts(&opts);
if (!ASSERT_OK_PTR(skel, "test_bpf_nf_fail__open"))
return;
prog = bpf_object__find_program_by_name(skel->obj, prog_name);
if (!ASSERT_OK_PTR(prog, "bpf_object__find_program_by_name"))
goto end;
bpf_program__set_autoload(prog, true);
ret = test_bpf_nf_fail__load(skel);
if (!ASSERT_ERR(ret, "test_bpf_nf_fail__load must fail"))
goto end;
if (!ASSERT_OK_PTR(strstr(log_buf, err_msg), "expected error message")) {
fprintf(stderr, "Expected: %s\n", err_msg);
fprintf(stderr, "Verifier: %s\n", log_buf);
}
end:
test_bpf_nf_fail__destroy(skel);
}
void test_bpf_nf(void)
{
int i;
if (test__start_subtest("xdp-ct"))
test_bpf_nf_ct(TEST_XDP);
if (test__start_subtest("tc-bpf-ct"))
test_bpf_nf_ct(TEST_TC_BPF);
for (i = 0; i < ARRAY_SIZE(test_bpf_nf_fail_tests); i++) {
if (test__start_subtest(test_bpf_nf_fail_tests[i].prog_name))
test_bpf_nf_ct_fail(test_bpf_nf_fail_tests[i].prog_name,
test_bpf_nf_fail_tests[i].err_msg);
}
}
| linux-master | tools/testing/selftests/bpf/prog_tests/bpf_nf.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) 2021. Huawei Technologies Co., Ltd */
#include <test_progs.h>
#include "strncmp_test.skel.h"
static int trigger_strncmp(const struct strncmp_test *skel)
{
int cmp;
usleep(1);
cmp = skel->bss->cmp_ret;
if (cmp > 0)
return 1;
if (cmp < 0)
return -1;
return 0;
}
/*
* Compare str and target after making str[i] != target[i].
* When exp is -1, make str[i] < target[i] and delta = -1.
*/
static void strncmp_full_str_cmp(struct strncmp_test *skel, const char *name,
int exp)
{
size_t nr = sizeof(skel->bss->str);
char *str = skel->bss->str;
int delta = exp;
int got;
size_t i;
memcpy(str, skel->rodata->target, nr);
for (i = 0; i < nr - 1; i++) {
str[i] += delta;
got = trigger_strncmp(skel);
ASSERT_EQ(got, exp, name);
str[i] -= delta;
}
}
static void test_strncmp_ret(void)
{
struct strncmp_test *skel;
int err, got;
skel = strncmp_test__open();
if (!ASSERT_OK_PTR(skel, "strncmp_test open"))
return;
bpf_program__set_autoload(skel->progs.do_strncmp, true);
err = strncmp_test__load(skel);
if (!ASSERT_EQ(err, 0, "strncmp_test load"))
goto out;
err = strncmp_test__attach(skel);
if (!ASSERT_EQ(err, 0, "strncmp_test attach"))
goto out;
skel->bss->target_pid = getpid();
/* Empty str */
skel->bss->str[0] = '\0';
got = trigger_strncmp(skel);
ASSERT_EQ(got, -1, "strncmp: empty str");
/* Same string */
memcpy(skel->bss->str, skel->rodata->target, sizeof(skel->bss->str));
got = trigger_strncmp(skel);
ASSERT_EQ(got, 0, "strncmp: same str");
/* Not-null-termainted string */
memcpy(skel->bss->str, skel->rodata->target, sizeof(skel->bss->str));
skel->bss->str[sizeof(skel->bss->str) - 1] = 'A';
got = trigger_strncmp(skel);
ASSERT_EQ(got, 1, "strncmp: not-null-term str");
strncmp_full_str_cmp(skel, "strncmp: less than", -1);
strncmp_full_str_cmp(skel, "strncmp: greater than", 1);
out:
strncmp_test__destroy(skel);
}
static void test_strncmp_bad_not_const_str_size(void)
{
struct strncmp_test *skel;
int err;
skel = strncmp_test__open();
if (!ASSERT_OK_PTR(skel, "strncmp_test open"))
return;
bpf_program__set_autoload(skel->progs.strncmp_bad_not_const_str_size, true);
err = strncmp_test__load(skel);
ASSERT_ERR(err, "strncmp_test load bad_not_const_str_size");
strncmp_test__destroy(skel);
}
static void test_strncmp_bad_writable_target(void)
{
struct strncmp_test *skel;
int err;
skel = strncmp_test__open();
if (!ASSERT_OK_PTR(skel, "strncmp_test open"))
return;
bpf_program__set_autoload(skel->progs.strncmp_bad_writable_target, true);
err = strncmp_test__load(skel);
ASSERT_ERR(err, "strncmp_test load bad_writable_target");
strncmp_test__destroy(skel);
}
static void test_strncmp_bad_not_null_term_target(void)
{
struct strncmp_test *skel;
int err;
skel = strncmp_test__open();
if (!ASSERT_OK_PTR(skel, "strncmp_test open"))
return;
bpf_program__set_autoload(skel->progs.strncmp_bad_not_null_term_target, true);
err = strncmp_test__load(skel);
ASSERT_ERR(err, "strncmp_test load bad_not_null_term_target");
strncmp_test__destroy(skel);
}
void test_test_strncmp(void)
{
if (test__start_subtest("strncmp_ret"))
test_strncmp_ret();
if (test__start_subtest("strncmp_bad_not_const_str_size"))
test_strncmp_bad_not_const_str_size();
if (test__start_subtest("strncmp_bad_writable_target"))
test_strncmp_bad_writable_target();
if (test__start_subtest("strncmp_bad_not_null_term_target"))
test_strncmp_bad_not_null_term_target();
}
| linux-master | tools/testing/selftests/bpf/prog_tests/test_strncmp.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2021 Facebook */
#include "test_progs.h"
#include "core_kern.lskel.h"
void test_core_kern_lskel(void)
{
struct core_kern_lskel *skel;
int link_fd;
skel = core_kern_lskel__open_and_load();
if (!ASSERT_OK_PTR(skel, "open_and_load"))
return;
link_fd = core_kern_lskel__core_relo_proto__attach(skel);
if (!ASSERT_GT(link_fd, 0, "attach(core_relo_proto)"))
goto cleanup;
/* trigger tracepoints */
usleep(1);
ASSERT_TRUE(skel->bss->proto_out[0], "bpf_core_type_exists");
ASSERT_FALSE(skel->bss->proto_out[1], "!bpf_core_type_exists");
ASSERT_TRUE(skel->bss->proto_out[2], "bpf_core_type_exists. nested");
cleanup:
core_kern_lskel__destroy(skel);
}
| linux-master | tools/testing/selftests/bpf/prog_tests/core_kern.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2021 Facebook */
#include <test_progs.h>
#include "trace_vprintk.lskel.h"
void test_verif_stats(void)
{
__u32 len = sizeof(struct bpf_prog_info);
struct trace_vprintk_lskel *skel;
struct bpf_prog_info info = {};
int err;
skel = trace_vprintk_lskel__open_and_load();
if (!ASSERT_OK_PTR(skel, "trace_vprintk__open_and_load"))
goto cleanup;
err = bpf_prog_get_info_by_fd(skel->progs.sys_enter.prog_fd,
&info, &len);
if (!ASSERT_OK(err, "bpf_prog_get_info_by_fd"))
goto cleanup;
if (!ASSERT_GT(info.verified_insns, 0, "verified_insns"))
goto cleanup;
cleanup:
trace_vprintk_lskel__destroy(skel);
}
| linux-master | tools/testing/selftests/bpf/prog_tests/verif_stats.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2021 Facebook */
#include <test_progs.h>
#include <netinet/tcp.h>
#include "sockopt_qos_to_cc.skel.h"
static void run_setsockopt_test(int cg_fd, int sock_fd)
{
socklen_t optlen;
char cc[16]; /* TCP_CA_NAME_MAX */
int buf;
int err = -1;
buf = 0x2D;
err = setsockopt(sock_fd, SOL_IPV6, IPV6_TCLASS, &buf, sizeof(buf));
if (!ASSERT_OK(err, "setsockopt(sock_fd, IPV6_TCLASS)"))
return;
/* Verify the setsockopt cc change */
optlen = sizeof(cc);
err = getsockopt(sock_fd, SOL_TCP, TCP_CONGESTION, cc, &optlen);
if (!ASSERT_OK(err, "getsockopt(sock_fd, TCP_CONGESTION)"))
return;
if (!ASSERT_STREQ(cc, "reno", "getsockopt(sock_fd, TCP_CONGESTION)"))
return;
}
void test_sockopt_qos_to_cc(void)
{
struct sockopt_qos_to_cc *skel;
char cc_cubic[16] = "cubic"; /* TCP_CA_NAME_MAX */
int cg_fd = -1;
int sock_fd = -1;
int err;
cg_fd = test__join_cgroup("/sockopt_qos_to_cc");
if (!ASSERT_GE(cg_fd, 0, "cg-join(sockopt_qos_to_cc)"))
return;
skel = sockopt_qos_to_cc__open_and_load();
if (!ASSERT_OK_PTR(skel, "skel"))
goto done;
skel->bss->page_size = sysconf(_SC_PAGESIZE);
sock_fd = socket(AF_INET6, SOCK_STREAM, 0);
if (!ASSERT_GE(sock_fd, 0, "v6 socket open"))
goto done;
err = setsockopt(sock_fd, SOL_TCP, TCP_CONGESTION, &cc_cubic,
sizeof(cc_cubic));
if (!ASSERT_OK(err, "setsockopt(sock_fd, TCP_CONGESTION)"))
goto done;
skel->links.sockopt_qos_to_cc =
bpf_program__attach_cgroup(skel->progs.sockopt_qos_to_cc,
cg_fd);
if (!ASSERT_OK_PTR(skel->links.sockopt_qos_to_cc,
"prog_attach(sockopt_qos_to_cc)"))
goto done;
run_setsockopt_test(cg_fd, sock_fd);
done:
if (sock_fd != -1)
close(sock_fd);
if (cg_fd != -1)
close(cg_fd);
/* destroy can take null and error pointer */
sockopt_qos_to_cc__destroy(skel);
}
| linux-master | tools/testing/selftests/bpf/prog_tests/sockopt_qos_to_cc.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2023 Facebook */
#include <test_progs.h>
#include <linux/in6.h>
#include <sys/socket.h>
#include <sched.h>
#include <unistd.h>
#include "cgroup_helpers.h"
#include "testing_helpers.h"
#include "cgroup_tcp_skb.skel.h"
#include "cgroup_tcp_skb.h"
#include "network_helpers.h"
#define CGROUP_TCP_SKB_PATH "/test_cgroup_tcp_skb"
static int install_filters(int cgroup_fd,
struct bpf_link **egress_link,
struct bpf_link **ingress_link,
struct bpf_program *egress_prog,
struct bpf_program *ingress_prog,
struct cgroup_tcp_skb *skel)
{
/* Prepare filters */
skel->bss->g_sock_state = 0;
skel->bss->g_unexpected = 0;
*egress_link =
bpf_program__attach_cgroup(egress_prog,
cgroup_fd);
if (!ASSERT_OK_PTR(egress_link, "egress_link"))
return -1;
*ingress_link =
bpf_program__attach_cgroup(ingress_prog,
cgroup_fd);
if (!ASSERT_OK_PTR(ingress_link, "ingress_link"))
return -1;
return 0;
}
static void uninstall_filters(struct bpf_link **egress_link,
struct bpf_link **ingress_link)
{
bpf_link__destroy(*egress_link);
*egress_link = NULL;
bpf_link__destroy(*ingress_link);
*ingress_link = NULL;
}
static int create_client_sock_v6(void)
{
int fd;
fd = socket(AF_INET6, SOCK_STREAM, 0);
if (fd < 0) {
perror("socket");
return -1;
}
return fd;
}
/* Connect to the server in a cgroup from the outside of the cgroup. */
static int talk_to_cgroup(int *client_fd, int *listen_fd, int *service_fd,
struct cgroup_tcp_skb *skel)
{
int err, cp;
char buf[5];
int port;
/* Create client & server socket */
err = join_root_cgroup();
if (!ASSERT_OK(err, "join_root_cgroup"))
return -1;
*client_fd = create_client_sock_v6();
if (!ASSERT_GE(*client_fd, 0, "client_fd"))
return -1;
err = join_cgroup(CGROUP_TCP_SKB_PATH);
if (!ASSERT_OK(err, "join_cgroup"))
return -1;
*listen_fd = start_server(AF_INET6, SOCK_STREAM, NULL, 0, 0);
if (!ASSERT_GE(*listen_fd, 0, "listen_fd"))
return -1;
port = get_socket_local_port(*listen_fd);
if (!ASSERT_GE(port, 0, "get_socket_local_port"))
return -1;
skel->bss->g_sock_port = ntohs(port);
/* Connect client to server */
err = connect_fd_to_fd(*client_fd, *listen_fd, 0);
if (!ASSERT_OK(err, "connect_fd_to_fd"))
return -1;
*service_fd = accept(*listen_fd, NULL, NULL);
if (!ASSERT_GE(*service_fd, 0, "service_fd"))
return -1;
err = join_root_cgroup();
if (!ASSERT_OK(err, "join_root_cgroup"))
return -1;
cp = write(*client_fd, "hello", 5);
if (!ASSERT_EQ(cp, 5, "write"))
return -1;
cp = read(*service_fd, buf, 5);
if (!ASSERT_EQ(cp, 5, "read"))
return -1;
return 0;
}
/* Connect to the server out of a cgroup from inside the cgroup. */
static int talk_to_outside(int *client_fd, int *listen_fd, int *service_fd,
struct cgroup_tcp_skb *skel)
{
int err, cp;
char buf[5];
int port;
/* Create client & server socket */
err = join_root_cgroup();
if (!ASSERT_OK(err, "join_root_cgroup"))
return -1;
*listen_fd = start_server(AF_INET6, SOCK_STREAM, NULL, 0, 0);
if (!ASSERT_GE(*listen_fd, 0, "listen_fd"))
return -1;
err = join_cgroup(CGROUP_TCP_SKB_PATH);
if (!ASSERT_OK(err, "join_cgroup"))
return -1;
*client_fd = create_client_sock_v6();
if (!ASSERT_GE(*client_fd, 0, "client_fd"))
return -1;
err = join_root_cgroup();
if (!ASSERT_OK(err, "join_root_cgroup"))
return -1;
port = get_socket_local_port(*listen_fd);
if (!ASSERT_GE(port, 0, "get_socket_local_port"))
return -1;
skel->bss->g_sock_port = ntohs(port);
/* Connect client to server */
err = connect_fd_to_fd(*client_fd, *listen_fd, 0);
if (!ASSERT_OK(err, "connect_fd_to_fd"))
return -1;
*service_fd = accept(*listen_fd, NULL, NULL);
if (!ASSERT_GE(*service_fd, 0, "service_fd"))
return -1;
cp = write(*client_fd, "hello", 5);
if (!ASSERT_EQ(cp, 5, "write"))
return -1;
cp = read(*service_fd, buf, 5);
if (!ASSERT_EQ(cp, 5, "read"))
return -1;
return 0;
}
static int close_connection(int *closing_fd, int *peer_fd, int *listen_fd,
struct cgroup_tcp_skb *skel)
{
__u32 saved_packet_count = 0;
int err;
int i;
/* Wait for ACKs to be sent */
saved_packet_count = skel->bss->g_packet_count;
usleep(100000); /* 0.1s */
for (i = 0;
skel->bss->g_packet_count != saved_packet_count && i < 10;
i++) {
saved_packet_count = skel->bss->g_packet_count;
usleep(100000); /* 0.1s */
}
if (!ASSERT_EQ(skel->bss->g_packet_count, saved_packet_count,
"packet_count"))
return -1;
skel->bss->g_packet_count = 0;
saved_packet_count = 0;
/* Half shutdown to make sure the closing socket having a chance to
* receive a FIN from the peer.
*/
err = shutdown(*closing_fd, SHUT_WR);
if (!ASSERT_OK(err, "shutdown closing_fd"))
return -1;
/* Wait for FIN and the ACK of the FIN to be observed */
for (i = 0;
skel->bss->g_packet_count < saved_packet_count + 2 && i < 10;
i++)
usleep(100000); /* 0.1s */
if (!ASSERT_GE(skel->bss->g_packet_count, saved_packet_count + 2,
"packet_count"))
return -1;
saved_packet_count = skel->bss->g_packet_count;
/* Fully shutdown the connection */
err = close(*peer_fd);
if (!ASSERT_OK(err, "close peer_fd"))
return -1;
*peer_fd = -1;
/* Wait for FIN and the ACK of the FIN to be observed */
for (i = 0;
skel->bss->g_packet_count < saved_packet_count + 2 && i < 10;
i++)
usleep(100000); /* 0.1s */
if (!ASSERT_GE(skel->bss->g_packet_count, saved_packet_count + 2,
"packet_count"))
return -1;
err = close(*closing_fd);
if (!ASSERT_OK(err, "close closing_fd"))
return -1;
*closing_fd = -1;
close(*listen_fd);
*listen_fd = -1;
return 0;
}
/* This test case includes four scenarios:
* 1. Connect to the server from outside the cgroup and close the connection
* from outside the cgroup.
* 2. Connect to the server from outside the cgroup and close the connection
* from inside the cgroup.
* 3. Connect to the server from inside the cgroup and close the connection
* from outside the cgroup.
* 4. Connect to the server from inside the cgroup and close the connection
* from inside the cgroup.
*
* The test case is to verify that cgroup_skb/{egress,ingress} filters
* receive expected packets including SYN, SYN/ACK, ACK, FIN, and FIN/ACK.
*/
void test_cgroup_tcp_skb(void)
{
struct bpf_link *ingress_link = NULL;
struct bpf_link *egress_link = NULL;
int client_fd = -1, listen_fd = -1;
struct cgroup_tcp_skb *skel;
int service_fd = -1;
int cgroup_fd = -1;
int err;
skel = cgroup_tcp_skb__open_and_load();
if (!ASSERT_OK(!skel, "skel_open_load"))
return;
err = setup_cgroup_environment();
if (!ASSERT_OK(err, "setup_cgroup_environment"))
goto cleanup;
cgroup_fd = create_and_get_cgroup(CGROUP_TCP_SKB_PATH);
if (!ASSERT_GE(cgroup_fd, 0, "cgroup_fd"))
goto cleanup;
/* Scenario 1 */
err = install_filters(cgroup_fd, &egress_link, &ingress_link,
skel->progs.server_egress,
skel->progs.server_ingress,
skel);
if (!ASSERT_OK(err, "install_filters"))
goto cleanup;
err = talk_to_cgroup(&client_fd, &listen_fd, &service_fd, skel);
if (!ASSERT_OK(err, "talk_to_cgroup"))
goto cleanup;
err = close_connection(&client_fd, &service_fd, &listen_fd, skel);
if (!ASSERT_OK(err, "close_connection"))
goto cleanup;
ASSERT_EQ(skel->bss->g_unexpected, 0, "g_unexpected");
ASSERT_EQ(skel->bss->g_sock_state, CLOSED, "g_sock_state");
uninstall_filters(&egress_link, &ingress_link);
/* Scenario 2 */
err = install_filters(cgroup_fd, &egress_link, &ingress_link,
skel->progs.server_egress_srv,
skel->progs.server_ingress_srv,
skel);
err = talk_to_cgroup(&client_fd, &listen_fd, &service_fd, skel);
if (!ASSERT_OK(err, "talk_to_cgroup"))
goto cleanup;
err = close_connection(&service_fd, &client_fd, &listen_fd, skel);
if (!ASSERT_OK(err, "close_connection"))
goto cleanup;
ASSERT_EQ(skel->bss->g_unexpected, 0, "g_unexpected");
ASSERT_EQ(skel->bss->g_sock_state, TIME_WAIT, "g_sock_state");
uninstall_filters(&egress_link, &ingress_link);
/* Scenario 3 */
err = install_filters(cgroup_fd, &egress_link, &ingress_link,
skel->progs.client_egress_srv,
skel->progs.client_ingress_srv,
skel);
err = talk_to_outside(&client_fd, &listen_fd, &service_fd, skel);
if (!ASSERT_OK(err, "talk_to_outside"))
goto cleanup;
err = close_connection(&service_fd, &client_fd, &listen_fd, skel);
if (!ASSERT_OK(err, "close_connection"))
goto cleanup;
ASSERT_EQ(skel->bss->g_unexpected, 0, "g_unexpected");
ASSERT_EQ(skel->bss->g_sock_state, CLOSED, "g_sock_state");
uninstall_filters(&egress_link, &ingress_link);
/* Scenario 4 */
err = install_filters(cgroup_fd, &egress_link, &ingress_link,
skel->progs.client_egress,
skel->progs.client_ingress,
skel);
err = talk_to_outside(&client_fd, &listen_fd, &service_fd, skel);
if (!ASSERT_OK(err, "talk_to_outside"))
goto cleanup;
err = close_connection(&client_fd, &service_fd, &listen_fd, skel);
if (!ASSERT_OK(err, "close_connection"))
goto cleanup;
ASSERT_EQ(skel->bss->g_unexpected, 0, "g_unexpected");
ASSERT_EQ(skel->bss->g_sock_state, TIME_WAIT, "g_sock_state");
uninstall_filters(&egress_link, &ingress_link);
cleanup:
close(client_fd);
close(listen_fd);
close(service_fd);
close(cgroup_fd);
bpf_link__destroy(egress_link);
bpf_link__destroy(ingress_link);
cleanup_cgroup_environment();
cgroup_tcp_skb__destroy(skel);
}
| linux-master | tools/testing/selftests/bpf/prog_tests/cgroup_tcp_skb.c |
// SPDX-License-Identifier: GPL-2.0
#include <test_progs.h>
#include "get_func_ip_test.skel.h"
#include "get_func_ip_uprobe_test.skel.h"
static noinline void uprobe_trigger(void)
{
}
static void test_function_entry(void)
{
struct get_func_ip_test *skel = NULL;
int err, prog_fd;
LIBBPF_OPTS(bpf_test_run_opts, topts);
skel = get_func_ip_test__open();
if (!ASSERT_OK_PTR(skel, "get_func_ip_test__open"))
return;
err = get_func_ip_test__load(skel);
if (!ASSERT_OK(err, "get_func_ip_test__load"))
goto cleanup;
err = get_func_ip_test__attach(skel);
if (!ASSERT_OK(err, "get_func_ip_test__attach"))
goto cleanup;
skel->bss->uprobe_trigger = (unsigned long) uprobe_trigger;
prog_fd = bpf_program__fd(skel->progs.test1);
err = bpf_prog_test_run_opts(prog_fd, &topts);
ASSERT_OK(err, "test_run");
ASSERT_EQ(topts.retval, 0, "test_run");
prog_fd = bpf_program__fd(skel->progs.test5);
err = bpf_prog_test_run_opts(prog_fd, &topts);
ASSERT_OK(err, "test_run");
uprobe_trigger();
ASSERT_EQ(skel->bss->test1_result, 1, "test1_result");
ASSERT_EQ(skel->bss->test2_result, 1, "test2_result");
ASSERT_EQ(skel->bss->test3_result, 1, "test3_result");
ASSERT_EQ(skel->bss->test4_result, 1, "test4_result");
ASSERT_EQ(skel->bss->test5_result, 1, "test5_result");
ASSERT_EQ(skel->bss->test7_result, 1, "test7_result");
ASSERT_EQ(skel->bss->test8_result, 1, "test8_result");
cleanup:
get_func_ip_test__destroy(skel);
}
#ifdef __x86_64__
extern void uprobe_trigger_body(void);
asm(
".globl uprobe_trigger_body\n"
".type uprobe_trigger_body, @function\n"
"uprobe_trigger_body:\n"
" nop\n"
" ret\n"
);
static void test_function_body_kprobe(void)
{
struct get_func_ip_test *skel = NULL;
LIBBPF_OPTS(bpf_test_run_opts, topts);
LIBBPF_OPTS(bpf_kprobe_opts, kopts);
struct bpf_link *link6 = NULL;
int err, prog_fd;
skel = get_func_ip_test__open();
if (!ASSERT_OK_PTR(skel, "get_func_ip_test__open"))
return;
/* test6 is x86_64 specific and is disabled by default,
* enable it for body test.
*/
bpf_program__set_autoload(skel->progs.test6, true);
err = get_func_ip_test__load(skel);
if (!ASSERT_OK(err, "get_func_ip_test__load"))
goto cleanup;
kopts.offset = skel->kconfig->CONFIG_X86_KERNEL_IBT ? 9 : 5;
link6 = bpf_program__attach_kprobe_opts(skel->progs.test6, "bpf_fentry_test6", &kopts);
if (!ASSERT_OK_PTR(link6, "link6"))
goto cleanup;
prog_fd = bpf_program__fd(skel->progs.test1);
err = bpf_prog_test_run_opts(prog_fd, &topts);
ASSERT_OK(err, "test_run");
ASSERT_EQ(topts.retval, 0, "test_run");
ASSERT_EQ(skel->bss->test6_result, 1, "test6_result");
cleanup:
bpf_link__destroy(link6);
get_func_ip_test__destroy(skel);
}
static void test_function_body_uprobe(void)
{
struct get_func_ip_uprobe_test *skel = NULL;
int err;
skel = get_func_ip_uprobe_test__open_and_load();
if (!ASSERT_OK_PTR(skel, "get_func_ip_uprobe_test__open_and_load"))
return;
err = get_func_ip_uprobe_test__attach(skel);
if (!ASSERT_OK(err, "get_func_ip_test__attach"))
goto cleanup;
skel->bss->uprobe_trigger_body = (unsigned long) uprobe_trigger_body;
uprobe_trigger_body();
ASSERT_EQ(skel->bss->test1_result, 1, "test1_result");
cleanup:
get_func_ip_uprobe_test__destroy(skel);
}
static void test_function_body(void)
{
test_function_body_kprobe();
test_function_body_uprobe();
}
#else
#define test_function_body()
#endif
void test_get_func_ip_test(void)
{
test_function_entry();
test_function_body();
}
| linux-master | tools/testing/selftests/bpf/prog_tests/get_func_ip_test.c |
// SPDX-License-Identifier: GPL-2.0
#include <test_progs.h>
static void test_task_fd_query_tp_core(const char *probe_name,
const char *tp_name)
{
const char *file = "./test_tracepoint.bpf.o";
int err, bytes, efd, prog_fd, pmu_fd;
struct perf_event_attr attr = {};
__u64 probe_offset, probe_addr;
__u32 len, prog_id, fd_type;
struct bpf_object *obj = NULL;
__u32 duration = 0;
char buf[256];
err = bpf_prog_test_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj, &prog_fd);
if (CHECK(err, "bpf_prog_test_load", "err %d errno %d\n", err, errno))
goto close_prog;
if (access("/sys/kernel/tracing/trace", F_OK) == 0) {
snprintf(buf, sizeof(buf),
"/sys/kernel/tracing/events/%s/id", probe_name);
} else {
snprintf(buf, sizeof(buf),
"/sys/kernel/debug/tracing/events/%s/id", probe_name);
}
efd = open(buf, O_RDONLY, 0);
if (CHECK(efd < 0, "open", "err %d errno %d\n", efd, errno))
goto close_prog;
bytes = read(efd, buf, sizeof(buf));
close(efd);
if (CHECK(bytes <= 0 || bytes >= sizeof(buf), "read",
"bytes %d errno %d\n", bytes, errno))
goto close_prog;
attr.config = strtol(buf, NULL, 0);
attr.type = PERF_TYPE_TRACEPOINT;
attr.sample_type = PERF_SAMPLE_RAW;
attr.sample_period = 1;
attr.wakeup_events = 1;
pmu_fd = syscall(__NR_perf_event_open, &attr, -1 /* pid */,
0 /* cpu 0 */, -1 /* group id */,
0 /* flags */);
if (CHECK(err, "perf_event_open", "err %d errno %d\n", err, errno))
goto close_pmu;
err = ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE, 0);
if (CHECK(err, "perf_event_ioc_enable", "err %d errno %d\n", err,
errno))
goto close_pmu;
err = ioctl(pmu_fd, PERF_EVENT_IOC_SET_BPF, prog_fd);
if (CHECK(err, "perf_event_ioc_set_bpf", "err %d errno %d\n", err,
errno))
goto close_pmu;
/* query (getpid(), pmu_fd) */
len = sizeof(buf);
err = bpf_task_fd_query(getpid(), pmu_fd, 0, buf, &len, &prog_id,
&fd_type, &probe_offset, &probe_addr);
if (CHECK(err < 0, "bpf_task_fd_query", "err %d errno %d\n", err,
errno))
goto close_pmu;
err = (fd_type == BPF_FD_TYPE_TRACEPOINT) && !strcmp(buf, tp_name);
if (CHECK(!err, "check_results", "fd_type %d tp_name %s\n",
fd_type, buf))
goto close_pmu;
close_pmu:
close(pmu_fd);
close_prog:
bpf_object__close(obj);
}
void test_task_fd_query_tp(void)
{
test_task_fd_query_tp_core("sched/sched_switch",
"sched_switch");
test_task_fd_query_tp_core("syscalls/sys_enter_read",
"sys_enter_read");
}
| linux-master | tools/testing/selftests/bpf/prog_tests/task_fd_query_tp.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2019 Facebook */
#include <test_progs.h>
#include "fentry_test.lskel.h"
#include "fentry_many_args.skel.h"
static int fentry_test_common(struct fentry_test_lskel *fentry_skel)
{
int err, prog_fd, i;
int link_fd;
__u64 *result;
LIBBPF_OPTS(bpf_test_run_opts, topts);
err = fentry_test_lskel__attach(fentry_skel);
if (!ASSERT_OK(err, "fentry_attach"))
return err;
/* Check that already linked program can't be attached again. */
link_fd = fentry_test_lskel__test1__attach(fentry_skel);
if (!ASSERT_LT(link_fd, 0, "fentry_attach_link"))
return -1;
prog_fd = fentry_skel->progs.test1.prog_fd;
err = bpf_prog_test_run_opts(prog_fd, &topts);
ASSERT_OK(err, "test_run");
ASSERT_EQ(topts.retval, 0, "test_run");
result = (__u64 *)fentry_skel->bss;
for (i = 0; i < sizeof(*fentry_skel->bss) / sizeof(__u64); i++) {
if (!ASSERT_EQ(result[i], 1, "fentry_result"))
return -1;
}
fentry_test_lskel__detach(fentry_skel);
/* zero results for re-attach test */
memset(fentry_skel->bss, 0, sizeof(*fentry_skel->bss));
return 0;
}
static void fentry_test(void)
{
struct fentry_test_lskel *fentry_skel = NULL;
int err;
fentry_skel = fentry_test_lskel__open_and_load();
if (!ASSERT_OK_PTR(fentry_skel, "fentry_skel_load"))
goto cleanup;
err = fentry_test_common(fentry_skel);
if (!ASSERT_OK(err, "fentry_first_attach"))
goto cleanup;
err = fentry_test_common(fentry_skel);
ASSERT_OK(err, "fentry_second_attach");
cleanup:
fentry_test_lskel__destroy(fentry_skel);
}
static void fentry_many_args(void)
{
struct fentry_many_args *fentry_skel = NULL;
int err;
fentry_skel = fentry_many_args__open_and_load();
if (!ASSERT_OK_PTR(fentry_skel, "fentry_many_args_skel_load"))
goto cleanup;
err = fentry_many_args__attach(fentry_skel);
if (!ASSERT_OK(err, "fentry_many_args_attach"))
goto cleanup;
ASSERT_OK(trigger_module_test_read(1), "trigger_read");
ASSERT_EQ(fentry_skel->bss->test1_result, 1,
"fentry_many_args_result1");
ASSERT_EQ(fentry_skel->bss->test2_result, 1,
"fentry_many_args_result2");
ASSERT_EQ(fentry_skel->bss->test3_result, 1,
"fentry_many_args_result3");
cleanup:
fentry_many_args__destroy(fentry_skel);
}
void test_fentry_test(void)
{
if (test__start_subtest("fentry"))
fentry_test();
if (test__start_subtest("fentry_many_args"))
fentry_many_args();
}
| linux-master | tools/testing/selftests/bpf/prog_tests/fentry_test.c |
// SPDX-License-Identifier: GPL-2.0
#include <test_progs.h>
#include "timer_crash.skel.h"
enum {
MODE_ARRAY,
MODE_HASH,
};
static void test_timer_crash_mode(int mode)
{
struct timer_crash *skel;
skel = timer_crash__open_and_load();
if (!ASSERT_OK_PTR(skel, "timer_crash__open_and_load"))
return;
skel->bss->pid = getpid();
skel->bss->crash_map = mode;
if (!ASSERT_OK(timer_crash__attach(skel), "timer_crash__attach"))
goto end;
usleep(1);
end:
timer_crash__destroy(skel);
}
void test_timer_crash(void)
{
if (test__start_subtest("array"))
test_timer_crash_mode(MODE_ARRAY);
if (test__start_subtest("hash"))
test_timer_crash_mode(MODE_HASH);
}
| linux-master | tools/testing/selftests/bpf/prog_tests/timer_crash.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2022 Bytedance */
#include <test_progs.h>
#include "test_access_variable_array.skel.h"
void test_access_variable_array(void)
{
struct test_access_variable_array *skel;
skel = test_access_variable_array__open_and_load();
if (!ASSERT_OK_PTR(skel, "test_access_variable_array__open_and_load"))
return;
test_access_variable_array__destroy(skel);
}
| linux-master | tools/testing/selftests/bpf/prog_tests/access_variable_array.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
#define _GNU_SOURCE
#include <linux/compiler.h>
#include <linux/ring_buffer.h>
#include <pthread.h>
#include <stdio.h>
#include <stdlib.h>
#include <sys/mman.h>
#include <sys/syscall.h>
#include <sys/sysinfo.h>
#include <test_progs.h>
#include <uapi/linux/bpf.h>
#include <unistd.h>
#include "user_ringbuf_fail.skel.h"
#include "user_ringbuf_success.skel.h"
#include "../progs/test_user_ringbuf.h"
static const long c_sample_size = sizeof(struct sample) + BPF_RINGBUF_HDR_SZ;
static const long c_ringbuf_size = 1 << 12; /* 1 small page */
static const long c_max_entries = c_ringbuf_size / c_sample_size;
static void drain_current_samples(void)
{
syscall(__NR_getpgid);
}
static int write_samples(struct user_ring_buffer *ringbuf, uint32_t num_samples)
{
int i, err = 0;
/* Write some number of samples to the ring buffer. */
for (i = 0; i < num_samples; i++) {
struct sample *entry;
int read;
entry = user_ring_buffer__reserve(ringbuf, sizeof(*entry));
if (!entry) {
err = -errno;
goto done;
}
entry->pid = getpid();
entry->seq = i;
entry->value = i * i;
read = snprintf(entry->comm, sizeof(entry->comm), "%u", i);
if (read <= 0) {
/* Assert on the error path to avoid spamming logs with
* mostly success messages.
*/
ASSERT_GT(read, 0, "snprintf_comm");
err = read;
user_ring_buffer__discard(ringbuf, entry);
goto done;
}
user_ring_buffer__submit(ringbuf, entry);
}
done:
drain_current_samples();
return err;
}
static struct user_ringbuf_success *open_load_ringbuf_skel(void)
{
struct user_ringbuf_success *skel;
int err;
skel = user_ringbuf_success__open();
if (!ASSERT_OK_PTR(skel, "skel_open"))
return NULL;
err = bpf_map__set_max_entries(skel->maps.user_ringbuf, c_ringbuf_size);
if (!ASSERT_OK(err, "set_max_entries"))
goto cleanup;
err = bpf_map__set_max_entries(skel->maps.kernel_ringbuf, c_ringbuf_size);
if (!ASSERT_OK(err, "set_max_entries"))
goto cleanup;
err = user_ringbuf_success__load(skel);
if (!ASSERT_OK(err, "skel_load"))
goto cleanup;
return skel;
cleanup:
user_ringbuf_success__destroy(skel);
return NULL;
}
static void test_user_ringbuf_mappings(void)
{
int err, rb_fd;
int page_size = getpagesize();
void *mmap_ptr;
struct user_ringbuf_success *skel;
skel = open_load_ringbuf_skel();
if (!skel)
return;
rb_fd = bpf_map__fd(skel->maps.user_ringbuf);
/* cons_pos can be mapped R/O, can't add +X with mprotect. */
mmap_ptr = mmap(NULL, page_size, PROT_READ, MAP_SHARED, rb_fd, 0);
ASSERT_OK_PTR(mmap_ptr, "ro_cons_pos");
ASSERT_ERR(mprotect(mmap_ptr, page_size, PROT_WRITE), "write_cons_pos_protect");
ASSERT_ERR(mprotect(mmap_ptr, page_size, PROT_EXEC), "exec_cons_pos_protect");
ASSERT_ERR_PTR(mremap(mmap_ptr, 0, 4 * page_size, MREMAP_MAYMOVE), "wr_prod_pos");
err = -errno;
ASSERT_ERR(err, "wr_prod_pos_err");
ASSERT_OK(munmap(mmap_ptr, page_size), "unmap_ro_cons");
/* prod_pos can be mapped RW, can't add +X with mprotect. */
mmap_ptr = mmap(NULL, page_size, PROT_READ | PROT_WRITE, MAP_SHARED,
rb_fd, page_size);
ASSERT_OK_PTR(mmap_ptr, "rw_prod_pos");
ASSERT_ERR(mprotect(mmap_ptr, page_size, PROT_EXEC), "exec_prod_pos_protect");
err = -errno;
ASSERT_ERR(err, "wr_prod_pos_err");
ASSERT_OK(munmap(mmap_ptr, page_size), "unmap_rw_prod");
/* data pages can be mapped RW, can't add +X with mprotect. */
mmap_ptr = mmap(NULL, page_size, PROT_WRITE, MAP_SHARED, rb_fd,
2 * page_size);
ASSERT_OK_PTR(mmap_ptr, "rw_data");
ASSERT_ERR(mprotect(mmap_ptr, page_size, PROT_EXEC), "exec_data_protect");
err = -errno;
ASSERT_ERR(err, "exec_data_err");
ASSERT_OK(munmap(mmap_ptr, page_size), "unmap_rw_data");
user_ringbuf_success__destroy(skel);
}
static int load_skel_create_ringbufs(struct user_ringbuf_success **skel_out,
struct ring_buffer **kern_ringbuf_out,
ring_buffer_sample_fn callback,
struct user_ring_buffer **user_ringbuf_out)
{
struct user_ringbuf_success *skel;
struct ring_buffer *kern_ringbuf = NULL;
struct user_ring_buffer *user_ringbuf = NULL;
int err = -ENOMEM, rb_fd;
skel = open_load_ringbuf_skel();
if (!skel)
return err;
/* only trigger BPF program for current process */
skel->bss->pid = getpid();
if (kern_ringbuf_out) {
rb_fd = bpf_map__fd(skel->maps.kernel_ringbuf);
kern_ringbuf = ring_buffer__new(rb_fd, callback, skel, NULL);
if (!ASSERT_OK_PTR(kern_ringbuf, "kern_ringbuf_create"))
goto cleanup;
*kern_ringbuf_out = kern_ringbuf;
}
if (user_ringbuf_out) {
rb_fd = bpf_map__fd(skel->maps.user_ringbuf);
user_ringbuf = user_ring_buffer__new(rb_fd, NULL);
if (!ASSERT_OK_PTR(user_ringbuf, "user_ringbuf_create"))
goto cleanup;
*user_ringbuf_out = user_ringbuf;
ASSERT_EQ(skel->bss->read, 0, "no_reads_after_load");
}
err = user_ringbuf_success__attach(skel);
if (!ASSERT_OK(err, "skel_attach"))
goto cleanup;
*skel_out = skel;
return 0;
cleanup:
if (kern_ringbuf_out)
*kern_ringbuf_out = NULL;
if (user_ringbuf_out)
*user_ringbuf_out = NULL;
ring_buffer__free(kern_ringbuf);
user_ring_buffer__free(user_ringbuf);
user_ringbuf_success__destroy(skel);
return err;
}
static int load_skel_create_user_ringbuf(struct user_ringbuf_success **skel_out,
struct user_ring_buffer **ringbuf_out)
{
return load_skel_create_ringbufs(skel_out, NULL, NULL, ringbuf_out);
}
static void manually_write_test_invalid_sample(struct user_ringbuf_success *skel,
__u32 size, __u64 producer_pos, int err)
{
void *data_ptr;
__u64 *producer_pos_ptr;
int rb_fd, page_size = getpagesize();
rb_fd = bpf_map__fd(skel->maps.user_ringbuf);
ASSERT_EQ(skel->bss->read, 0, "num_samples_before_bad_sample");
/* Map the producer_pos as RW. */
producer_pos_ptr = mmap(NULL, page_size, PROT_READ | PROT_WRITE,
MAP_SHARED, rb_fd, page_size);
ASSERT_OK_PTR(producer_pos_ptr, "producer_pos_ptr");
/* Map the data pages as RW. */
data_ptr = mmap(NULL, page_size, PROT_WRITE, MAP_SHARED, rb_fd, 2 * page_size);
ASSERT_OK_PTR(data_ptr, "rw_data");
memset(data_ptr, 0, BPF_RINGBUF_HDR_SZ);
*(__u32 *)data_ptr = size;
/* Synchronizes with smp_load_acquire() in __bpf_user_ringbuf_peek() in the kernel. */
smp_store_release(producer_pos_ptr, producer_pos + BPF_RINGBUF_HDR_SZ);
drain_current_samples();
ASSERT_EQ(skel->bss->read, 0, "num_samples_after_bad_sample");
ASSERT_EQ(skel->bss->err, err, "err_after_bad_sample");
ASSERT_OK(munmap(producer_pos_ptr, page_size), "unmap_producer_pos");
ASSERT_OK(munmap(data_ptr, page_size), "unmap_data_ptr");
}
static void test_user_ringbuf_post_misaligned(void)
{
struct user_ringbuf_success *skel;
struct user_ring_buffer *ringbuf;
int err;
__u32 size = (1 << 5) + 7;
err = load_skel_create_user_ringbuf(&skel, &ringbuf);
if (!ASSERT_OK(err, "misaligned_skel"))
return;
manually_write_test_invalid_sample(skel, size, size, -EINVAL);
user_ring_buffer__free(ringbuf);
user_ringbuf_success__destroy(skel);
}
static void test_user_ringbuf_post_producer_wrong_offset(void)
{
struct user_ringbuf_success *skel;
struct user_ring_buffer *ringbuf;
int err;
__u32 size = (1 << 5);
err = load_skel_create_user_ringbuf(&skel, &ringbuf);
if (!ASSERT_OK(err, "wrong_offset_skel"))
return;
manually_write_test_invalid_sample(skel, size, size - 8, -EINVAL);
user_ring_buffer__free(ringbuf);
user_ringbuf_success__destroy(skel);
}
static void test_user_ringbuf_post_larger_than_ringbuf_sz(void)
{
struct user_ringbuf_success *skel;
struct user_ring_buffer *ringbuf;
int err;
__u32 size = c_ringbuf_size;
err = load_skel_create_user_ringbuf(&skel, &ringbuf);
if (!ASSERT_OK(err, "huge_sample_skel"))
return;
manually_write_test_invalid_sample(skel, size, size, -E2BIG);
user_ring_buffer__free(ringbuf);
user_ringbuf_success__destroy(skel);
}
static void test_user_ringbuf_basic(void)
{
struct user_ringbuf_success *skel;
struct user_ring_buffer *ringbuf;
int err;
err = load_skel_create_user_ringbuf(&skel, &ringbuf);
if (!ASSERT_OK(err, "ringbuf_basic_skel"))
return;
ASSERT_EQ(skel->bss->read, 0, "num_samples_read_before");
err = write_samples(ringbuf, 2);
if (!ASSERT_OK(err, "write_samples"))
goto cleanup;
ASSERT_EQ(skel->bss->read, 2, "num_samples_read_after");
cleanup:
user_ring_buffer__free(ringbuf);
user_ringbuf_success__destroy(skel);
}
static void test_user_ringbuf_sample_full_ring_buffer(void)
{
struct user_ringbuf_success *skel;
struct user_ring_buffer *ringbuf;
int err;
void *sample;
err = load_skel_create_user_ringbuf(&skel, &ringbuf);
if (!ASSERT_OK(err, "ringbuf_full_sample_skel"))
return;
sample = user_ring_buffer__reserve(ringbuf, c_ringbuf_size - BPF_RINGBUF_HDR_SZ);
if (!ASSERT_OK_PTR(sample, "full_sample"))
goto cleanup;
user_ring_buffer__submit(ringbuf, sample);
ASSERT_EQ(skel->bss->read, 0, "num_samples_read_before");
drain_current_samples();
ASSERT_EQ(skel->bss->read, 1, "num_samples_read_after");
cleanup:
user_ring_buffer__free(ringbuf);
user_ringbuf_success__destroy(skel);
}
static void test_user_ringbuf_post_alignment_autoadjust(void)
{
struct user_ringbuf_success *skel;
struct user_ring_buffer *ringbuf;
struct sample *sample;
int err;
err = load_skel_create_user_ringbuf(&skel, &ringbuf);
if (!ASSERT_OK(err, "ringbuf_align_autoadjust_skel"))
return;
/* libbpf should automatically round any sample up to an 8-byte alignment. */
sample = user_ring_buffer__reserve(ringbuf, sizeof(*sample) + 1);
ASSERT_OK_PTR(sample, "reserve_autoaligned");
user_ring_buffer__submit(ringbuf, sample);
ASSERT_EQ(skel->bss->read, 0, "num_samples_read_before");
drain_current_samples();
ASSERT_EQ(skel->bss->read, 1, "num_samples_read_after");
user_ring_buffer__free(ringbuf);
user_ringbuf_success__destroy(skel);
}
static void test_user_ringbuf_overfill(void)
{
struct user_ringbuf_success *skel;
struct user_ring_buffer *ringbuf;
int err;
err = load_skel_create_user_ringbuf(&skel, &ringbuf);
if (err)
return;
err = write_samples(ringbuf, c_max_entries * 5);
ASSERT_ERR(err, "write_samples");
ASSERT_EQ(skel->bss->read, c_max_entries, "max_entries");
user_ring_buffer__free(ringbuf);
user_ringbuf_success__destroy(skel);
}
static void test_user_ringbuf_discards_properly_ignored(void)
{
struct user_ringbuf_success *skel;
struct user_ring_buffer *ringbuf;
int err, num_discarded = 0;
__u64 *token;
err = load_skel_create_user_ringbuf(&skel, &ringbuf);
if (err)
return;
ASSERT_EQ(skel->bss->read, 0, "num_samples_read_before");
while (1) {
/* Write samples until the buffer is full. */
token = user_ring_buffer__reserve(ringbuf, sizeof(*token));
if (!token)
break;
user_ring_buffer__discard(ringbuf, token);
num_discarded++;
}
if (!ASSERT_GE(num_discarded, 0, "num_discarded"))
goto cleanup;
/* Should not read any samples, as they are all discarded. */
ASSERT_EQ(skel->bss->read, 0, "num_pre_kick");
drain_current_samples();
ASSERT_EQ(skel->bss->read, 0, "num_post_kick");
/* Now that the ring buffer has been drained, we should be able to
* reserve another token.
*/
token = user_ring_buffer__reserve(ringbuf, sizeof(*token));
if (!ASSERT_OK_PTR(token, "new_token"))
goto cleanup;
user_ring_buffer__discard(ringbuf, token);
cleanup:
user_ring_buffer__free(ringbuf);
user_ringbuf_success__destroy(skel);
}
static void test_user_ringbuf_loop(void)
{
struct user_ringbuf_success *skel;
struct user_ring_buffer *ringbuf;
uint32_t total_samples = 8192;
uint32_t remaining_samples = total_samples;
int err;
BUILD_BUG_ON(total_samples <= c_max_entries);
err = load_skel_create_user_ringbuf(&skel, &ringbuf);
if (err)
return;
do {
uint32_t curr_samples;
curr_samples = remaining_samples > c_max_entries
? c_max_entries : remaining_samples;
err = write_samples(ringbuf, curr_samples);
if (err != 0) {
/* Assert inside of if statement to avoid flooding logs
* on the success path.
*/
ASSERT_OK(err, "write_samples");
goto cleanup;
}
remaining_samples -= curr_samples;
ASSERT_EQ(skel->bss->read, total_samples - remaining_samples,
"current_batched_entries");
} while (remaining_samples > 0);
ASSERT_EQ(skel->bss->read, total_samples, "total_batched_entries");
cleanup:
user_ring_buffer__free(ringbuf);
user_ringbuf_success__destroy(skel);
}
static int send_test_message(struct user_ring_buffer *ringbuf,
enum test_msg_op op, s64 operand_64,
s32 operand_32)
{
struct test_msg *msg;
msg = user_ring_buffer__reserve(ringbuf, sizeof(*msg));
if (!msg) {
/* Assert on the error path to avoid spamming logs with mostly
* success messages.
*/
ASSERT_OK_PTR(msg, "reserve_msg");
return -ENOMEM;
}
msg->msg_op = op;
switch (op) {
case TEST_MSG_OP_INC64:
case TEST_MSG_OP_MUL64:
msg->operand_64 = operand_64;
break;
case TEST_MSG_OP_INC32:
case TEST_MSG_OP_MUL32:
msg->operand_32 = operand_32;
break;
default:
PRINT_FAIL("Invalid operand %d\n", op);
user_ring_buffer__discard(ringbuf, msg);
return -EINVAL;
}
user_ring_buffer__submit(ringbuf, msg);
return 0;
}
static void kick_kernel_read_messages(void)
{
syscall(__NR_prctl);
}
static int handle_kernel_msg(void *ctx, void *data, size_t len)
{
struct user_ringbuf_success *skel = ctx;
struct test_msg *msg = data;
switch (msg->msg_op) {
case TEST_MSG_OP_INC64:
skel->bss->user_mutated += msg->operand_64;
return 0;
case TEST_MSG_OP_INC32:
skel->bss->user_mutated += msg->operand_32;
return 0;
case TEST_MSG_OP_MUL64:
skel->bss->user_mutated *= msg->operand_64;
return 0;
case TEST_MSG_OP_MUL32:
skel->bss->user_mutated *= msg->operand_32;
return 0;
default:
fprintf(stderr, "Invalid operand %d\n", msg->msg_op);
return -EINVAL;
}
}
static void drain_kernel_messages_buffer(struct ring_buffer *kern_ringbuf,
struct user_ringbuf_success *skel)
{
int cnt;
cnt = ring_buffer__consume(kern_ringbuf);
ASSERT_EQ(cnt, 8, "consume_kern_ringbuf");
ASSERT_OK(skel->bss->err, "consume_kern_ringbuf_err");
}
static void test_user_ringbuf_msg_protocol(void)
{
struct user_ringbuf_success *skel;
struct user_ring_buffer *user_ringbuf;
struct ring_buffer *kern_ringbuf;
int err, i;
__u64 expected_kern = 0;
err = load_skel_create_ringbufs(&skel, &kern_ringbuf, handle_kernel_msg, &user_ringbuf);
if (!ASSERT_OK(err, "create_ringbufs"))
return;
for (i = 0; i < 64; i++) {
enum test_msg_op op = i % TEST_MSG_OP_NUM_OPS;
__u64 operand_64 = TEST_OP_64;
__u32 operand_32 = TEST_OP_32;
err = send_test_message(user_ringbuf, op, operand_64, operand_32);
if (err) {
/* Only assert on a failure to avoid spamming success logs. */
ASSERT_OK(err, "send_test_message");
goto cleanup;
}
switch (op) {
case TEST_MSG_OP_INC64:
expected_kern += operand_64;
break;
case TEST_MSG_OP_INC32:
expected_kern += operand_32;
break;
case TEST_MSG_OP_MUL64:
expected_kern *= operand_64;
break;
case TEST_MSG_OP_MUL32:
expected_kern *= operand_32;
break;
default:
PRINT_FAIL("Unexpected op %d\n", op);
goto cleanup;
}
if (i % 8 == 0) {
kick_kernel_read_messages();
ASSERT_EQ(skel->bss->kern_mutated, expected_kern, "expected_kern");
ASSERT_EQ(skel->bss->err, 0, "bpf_prog_err");
drain_kernel_messages_buffer(kern_ringbuf, skel);
}
}
cleanup:
ring_buffer__free(kern_ringbuf);
user_ring_buffer__free(user_ringbuf);
user_ringbuf_success__destroy(skel);
}
static void *kick_kernel_cb(void *arg)
{
/* Kick the kernel, causing it to drain the ring buffer and then wake
* up the test thread waiting on epoll.
*/
syscall(__NR_prlimit64);
return NULL;
}
static int spawn_kick_thread_for_poll(void)
{
pthread_t thread;
return pthread_create(&thread, NULL, kick_kernel_cb, NULL);
}
static void test_user_ringbuf_blocking_reserve(void)
{
struct user_ringbuf_success *skel;
struct user_ring_buffer *ringbuf;
int err, num_written = 0;
__u64 *token;
err = load_skel_create_user_ringbuf(&skel, &ringbuf);
if (err)
return;
ASSERT_EQ(skel->bss->read, 0, "num_samples_read_before");
while (1) {
/* Write samples until the buffer is full. */
token = user_ring_buffer__reserve(ringbuf, sizeof(*token));
if (!token)
break;
*token = 0xdeadbeef;
user_ring_buffer__submit(ringbuf, token);
num_written++;
}
if (!ASSERT_GE(num_written, 0, "num_written"))
goto cleanup;
/* Should not have read any samples until the kernel is kicked. */
ASSERT_EQ(skel->bss->read, 0, "num_pre_kick");
/* We correctly time out after 1 second, without a sample. */
token = user_ring_buffer__reserve_blocking(ringbuf, sizeof(*token), 1000);
if (!ASSERT_EQ(token, NULL, "pre_kick_timeout_token"))
goto cleanup;
err = spawn_kick_thread_for_poll();
if (!ASSERT_EQ(err, 0, "deferred_kick_thread\n"))
goto cleanup;
/* After spawning another thread that asychronously kicks the kernel to
* drain the messages, we're able to block and successfully get a
* sample once we receive an event notification.
*/
token = user_ring_buffer__reserve_blocking(ringbuf, sizeof(*token), 10000);
if (!ASSERT_OK_PTR(token, "block_token"))
goto cleanup;
ASSERT_GT(skel->bss->read, 0, "num_post_kill");
ASSERT_LE(skel->bss->read, num_written, "num_post_kill");
ASSERT_EQ(skel->bss->err, 0, "err_post_poll");
user_ring_buffer__discard(ringbuf, token);
cleanup:
user_ring_buffer__free(ringbuf);
user_ringbuf_success__destroy(skel);
}
#define SUCCESS_TEST(_func) { _func, #_func }
static struct {
void (*test_callback)(void);
const char *test_name;
} success_tests[] = {
SUCCESS_TEST(test_user_ringbuf_mappings),
SUCCESS_TEST(test_user_ringbuf_post_misaligned),
SUCCESS_TEST(test_user_ringbuf_post_producer_wrong_offset),
SUCCESS_TEST(test_user_ringbuf_post_larger_than_ringbuf_sz),
SUCCESS_TEST(test_user_ringbuf_basic),
SUCCESS_TEST(test_user_ringbuf_sample_full_ring_buffer),
SUCCESS_TEST(test_user_ringbuf_post_alignment_autoadjust),
SUCCESS_TEST(test_user_ringbuf_overfill),
SUCCESS_TEST(test_user_ringbuf_discards_properly_ignored),
SUCCESS_TEST(test_user_ringbuf_loop),
SUCCESS_TEST(test_user_ringbuf_msg_protocol),
SUCCESS_TEST(test_user_ringbuf_blocking_reserve),
};
void test_user_ringbuf(void)
{
int i;
for (i = 0; i < ARRAY_SIZE(success_tests); i++) {
if (!test__start_subtest(success_tests[i].test_name))
continue;
success_tests[i].test_callback();
}
RUN_TESTS(user_ringbuf_fail);
}
| linux-master | tools/testing/selftests/bpf/prog_tests/user_ringbuf.c |
// SPDX-License-Identifier: GPL-2.0
#include <test_progs.h>
#include <network_helpers.h>
void test_skb_ctx(void)
{
struct __sk_buff skb = {
.cb[0] = 1,
.cb[1] = 2,
.cb[2] = 3,
.cb[3] = 4,
.cb[4] = 5,
.priority = 6,
.ingress_ifindex = 11,
.ifindex = 1,
.tstamp = 7,
.wire_len = 100,
.gso_segs = 8,
.mark = 9,
.gso_size = 10,
.hwtstamp = 11,
};
LIBBPF_OPTS(bpf_test_run_opts, tattr,
.data_in = &pkt_v4,
.data_size_in = sizeof(pkt_v4),
.ctx_in = &skb,
.ctx_size_in = sizeof(skb),
.ctx_out = &skb,
.ctx_size_out = sizeof(skb),
);
struct bpf_object *obj;
int err, prog_fd, i;
err = bpf_prog_test_load("./test_skb_ctx.bpf.o", BPF_PROG_TYPE_SCHED_CLS,
&obj, &prog_fd);
if (!ASSERT_OK(err, "load"))
return;
/* ctx_in != NULL, ctx_size_in == 0 */
tattr.ctx_size_in = 0;
err = bpf_prog_test_run_opts(prog_fd, &tattr);
ASSERT_NEQ(err, 0, "ctx_size_in");
tattr.ctx_size_in = sizeof(skb);
/* ctx_out != NULL, ctx_size_out == 0 */
tattr.ctx_size_out = 0;
err = bpf_prog_test_run_opts(prog_fd, &tattr);
ASSERT_NEQ(err, 0, "ctx_size_out");
tattr.ctx_size_out = sizeof(skb);
/* non-zero [len, tc_index] fields should be rejected*/
skb.len = 1;
err = bpf_prog_test_run_opts(prog_fd, &tattr);
ASSERT_NEQ(err, 0, "len");
skb.len = 0;
skb.tc_index = 1;
err = bpf_prog_test_run_opts(prog_fd, &tattr);
ASSERT_NEQ(err, 0, "tc_index");
skb.tc_index = 0;
/* non-zero [hash, sk] fields should be rejected */
skb.hash = 1;
err = bpf_prog_test_run_opts(prog_fd, &tattr);
ASSERT_NEQ(err, 0, "hash");
skb.hash = 0;
skb.sk = (struct bpf_sock *)1;
err = bpf_prog_test_run_opts(prog_fd, &tattr);
ASSERT_NEQ(err, 0, "sk");
skb.sk = 0;
err = bpf_prog_test_run_opts(prog_fd, &tattr);
ASSERT_OK(err, "test_run");
ASSERT_OK(tattr.retval, "test_run retval");
ASSERT_EQ(tattr.ctx_size_out, sizeof(skb), "ctx_size_out");
for (i = 0; i < 5; i++)
ASSERT_EQ(skb.cb[i], i + 2, "ctx_out_cb");
ASSERT_EQ(skb.priority, 7, "ctx_out_priority");
ASSERT_EQ(skb.ifindex, 1, "ctx_out_ifindex");
ASSERT_EQ(skb.ingress_ifindex, 11, "ctx_out_ingress_ifindex");
ASSERT_EQ(skb.tstamp, 8, "ctx_out_tstamp");
ASSERT_EQ(skb.mark, 10, "ctx_out_mark");
bpf_object__close(obj);
}
| linux-master | tools/testing/selftests/bpf/prog_tests/skb_ctx.c |
// SPDX-License-Identifier: GPL-2.0
#include <test_progs.h>
#include "progs/core_reloc_types.h"
#include "bpf_testmod/bpf_testmod.h"
#include <linux/limits.h>
#include <sys/mman.h>
#include <sys/syscall.h>
#include <bpf/btf.h>
static int duration = 0;
#define STRUCT_TO_CHAR_PTR(struct_name) (const char *)&(struct struct_name)
#define MODULES_CASE(name, pg_name, tp_name) { \
.case_name = name, \
.bpf_obj_file = "test_core_reloc_module.bpf.o", \
.btf_src_file = NULL, /* find in kernel module BTFs */ \
.input = "", \
.input_len = 0, \
.output = STRUCT_TO_CHAR_PTR(core_reloc_module_output) { \
.read_ctx_sz = sizeof(struct bpf_testmod_test_read_ctx),\
.read_ctx_exists = true, \
.buf_exists = true, \
.len_exists = true, \
.off_exists = true, \
.len = 123, \
.off = 0, \
.comm = "test_progs", \
.comm_len = sizeof("test_progs"), \
}, \
.output_len = sizeof(struct core_reloc_module_output), \
.prog_name = pg_name, \
.raw_tp_name = tp_name, \
.trigger = __trigger_module_test_read, \
.needs_testmod = true, \
}
#define FLAVORS_DATA(struct_name) STRUCT_TO_CHAR_PTR(struct_name) { \
.a = 42, \
.b = 0xc001, \
.c = 0xbeef, \
}
#define FLAVORS_CASE_COMMON(name) \
.case_name = #name, \
.bpf_obj_file = "test_core_reloc_flavors.bpf.o", \
.btf_src_file = "btf__core_reloc_" #name ".bpf.o", \
.raw_tp_name = "sys_enter", \
.prog_name = "test_core_flavors" \
#define FLAVORS_CASE(name) { \
FLAVORS_CASE_COMMON(name), \
.input = FLAVORS_DATA(core_reloc_##name), \
.input_len = sizeof(struct core_reloc_##name), \
.output = FLAVORS_DATA(core_reloc_flavors), \
.output_len = sizeof(struct core_reloc_flavors), \
}
#define FLAVORS_ERR_CASE(name) { \
FLAVORS_CASE_COMMON(name), \
.fails = true, \
}
#define NESTING_DATA(struct_name) STRUCT_TO_CHAR_PTR(struct_name) { \
.a = { .a = { .a = 42 } }, \
.b = { .b = { .b = 0xc001 } }, \
}
#define NESTING_CASE_COMMON(name) \
.case_name = #name, \
.bpf_obj_file = "test_core_reloc_nesting.bpf.o", \
.btf_src_file = "btf__core_reloc_" #name ".bpf.o", \
.raw_tp_name = "sys_enter", \
.prog_name = "test_core_nesting" \
#define NESTING_CASE(name) { \
NESTING_CASE_COMMON(name), \
.input = NESTING_DATA(core_reloc_##name), \
.input_len = sizeof(struct core_reloc_##name), \
.output = NESTING_DATA(core_reloc_nesting), \
.output_len = sizeof(struct core_reloc_nesting) \
}
#define NESTING_ERR_CASE(name) { \
NESTING_CASE_COMMON(name), \
.fails = true, \
.run_btfgen_fails = true, \
}
#define ARRAYS_DATA(struct_name) STRUCT_TO_CHAR_PTR(struct_name) { \
.a = { [2] = 1 }, \
.b = { [1] = { [2] = { [3] = 2 } } }, \
.c = { [1] = { .c = 3 } }, \
.d = { [0] = { [0] = { .d = 4 } } }, \
}
#define ARRAYS_CASE_COMMON(name) \
.case_name = #name, \
.bpf_obj_file = "test_core_reloc_arrays.bpf.o", \
.btf_src_file = "btf__core_reloc_" #name ".bpf.o", \
.raw_tp_name = "sys_enter", \
.prog_name = "test_core_arrays" \
#define ARRAYS_CASE(name) { \
ARRAYS_CASE_COMMON(name), \
.input = ARRAYS_DATA(core_reloc_##name), \
.input_len = sizeof(struct core_reloc_##name), \
.output = STRUCT_TO_CHAR_PTR(core_reloc_arrays_output) { \
.a2 = 1, \
.b123 = 2, \
.c1c = 3, \
.d00d = 4, \
.f10c = 0, \
}, \
.output_len = sizeof(struct core_reloc_arrays_output) \
}
#define ARRAYS_ERR_CASE(name) { \
ARRAYS_CASE_COMMON(name), \
.fails = true, \
}
#define PRIMITIVES_DATA(struct_name) STRUCT_TO_CHAR_PTR(struct_name) { \
.a = 1, \
.b = 2, \
.c = 3, \
.d = (void *)4, \
.f = (void *)5, \
}
#define PRIMITIVES_CASE_COMMON(name) \
.case_name = #name, \
.bpf_obj_file = "test_core_reloc_primitives.bpf.o", \
.btf_src_file = "btf__core_reloc_" #name ".bpf.o", \
.raw_tp_name = "sys_enter", \
.prog_name = "test_core_primitives" \
#define PRIMITIVES_CASE(name) { \
PRIMITIVES_CASE_COMMON(name), \
.input = PRIMITIVES_DATA(core_reloc_##name), \
.input_len = sizeof(struct core_reloc_##name), \
.output = PRIMITIVES_DATA(core_reloc_primitives), \
.output_len = sizeof(struct core_reloc_primitives), \
}
#define PRIMITIVES_ERR_CASE(name) { \
PRIMITIVES_CASE_COMMON(name), \
.fails = true, \
}
#define MODS_CASE(name) { \
.case_name = #name, \
.bpf_obj_file = "test_core_reloc_mods.bpf.o", \
.btf_src_file = "btf__core_reloc_" #name ".bpf.o", \
.input = STRUCT_TO_CHAR_PTR(core_reloc_##name) { \
.a = 1, \
.b = 2, \
.c = (void *)3, \
.d = (void *)4, \
.e = { [2] = 5 }, \
.f = { [1] = 6 }, \
.g = { .x = 7 }, \
.h = { .y = 8 }, \
}, \
.input_len = sizeof(struct core_reloc_##name), \
.output = STRUCT_TO_CHAR_PTR(core_reloc_mods_output) { \
.a = 1, .b = 2, .c = 3, .d = 4, \
.e = 5, .f = 6, .g = 7, .h = 8, \
}, \
.output_len = sizeof(struct core_reloc_mods_output), \
.raw_tp_name = "sys_enter", \
.prog_name = "test_core_mods", \
}
#define PTR_AS_ARR_CASE(name) { \
.case_name = #name, \
.bpf_obj_file = "test_core_reloc_ptr_as_arr.bpf.o", \
.btf_src_file = "btf__core_reloc_" #name ".bpf.o", \
.input = (const char *)&(struct core_reloc_##name []){ \
{ .a = 1 }, \
{ .a = 2 }, \
{ .a = 3 }, \
}, \
.input_len = 3 * sizeof(struct core_reloc_##name), \
.output = STRUCT_TO_CHAR_PTR(core_reloc_ptr_as_arr) { \
.a = 3, \
}, \
.output_len = sizeof(struct core_reloc_ptr_as_arr), \
.raw_tp_name = "sys_enter", \
.prog_name = "test_core_ptr_as_arr", \
}
#define INTS_DATA(struct_name) STRUCT_TO_CHAR_PTR(struct_name) { \
.u8_field = 1, \
.s8_field = 2, \
.u16_field = 3, \
.s16_field = 4, \
.u32_field = 5, \
.s32_field = 6, \
.u64_field = 7, \
.s64_field = 8, \
}
#define INTS_CASE_COMMON(name) \
.case_name = #name, \
.bpf_obj_file = "test_core_reloc_ints.bpf.o", \
.btf_src_file = "btf__core_reloc_" #name ".bpf.o", \
.raw_tp_name = "sys_enter", \
.prog_name = "test_core_ints"
#define INTS_CASE(name) { \
INTS_CASE_COMMON(name), \
.input = INTS_DATA(core_reloc_##name), \
.input_len = sizeof(struct core_reloc_##name), \
.output = INTS_DATA(core_reloc_ints), \
.output_len = sizeof(struct core_reloc_ints), \
}
#define INTS_ERR_CASE(name) { \
INTS_CASE_COMMON(name), \
.fails = true, \
}
#define FIELD_EXISTS_CASE_COMMON(name) \
.case_name = #name, \
.bpf_obj_file = "test_core_reloc_existence.bpf.o", \
.btf_src_file = "btf__core_reloc_" #name ".bpf.o", \
.raw_tp_name = "sys_enter", \
.prog_name = "test_core_existence"
#define BITFIELDS_CASE_COMMON(objfile, test_name_prefix, name) \
.case_name = test_name_prefix#name, \
.bpf_obj_file = objfile, \
.btf_src_file = "btf__core_reloc_" #name ".bpf.o"
#define BITFIELDS_CASE(name, ...) { \
BITFIELDS_CASE_COMMON("test_core_reloc_bitfields_probed.bpf.o", \
"probed:", name), \
.input = STRUCT_TO_CHAR_PTR(core_reloc_##name) __VA_ARGS__, \
.input_len = sizeof(struct core_reloc_##name), \
.output = STRUCT_TO_CHAR_PTR(core_reloc_bitfields_output) \
__VA_ARGS__, \
.output_len = sizeof(struct core_reloc_bitfields_output), \
.raw_tp_name = "sys_enter", \
.prog_name = "test_core_bitfields", \
}, { \
BITFIELDS_CASE_COMMON("test_core_reloc_bitfields_direct.bpf.o", \
"direct:", name), \
.input = STRUCT_TO_CHAR_PTR(core_reloc_##name) __VA_ARGS__, \
.input_len = sizeof(struct core_reloc_##name), \
.output = STRUCT_TO_CHAR_PTR(core_reloc_bitfields_output) \
__VA_ARGS__, \
.output_len = sizeof(struct core_reloc_bitfields_output), \
.prog_name = "test_core_bitfields_direct", \
}
#define BITFIELDS_ERR_CASE(name) { \
BITFIELDS_CASE_COMMON("test_core_reloc_bitfields_probed.bpf.o", \
"probed:", name), \
.fails = true, \
.run_btfgen_fails = true, \
.raw_tp_name = "sys_enter", \
.prog_name = "test_core_bitfields", \
}, { \
BITFIELDS_CASE_COMMON("test_core_reloc_bitfields_direct.bpf.o", \
"direct:", name), \
.fails = true, \
.run_btfgen_fails = true, \
.prog_name = "test_core_bitfields_direct", \
}
#define SIZE_CASE_COMMON(name) \
.case_name = #name, \
.bpf_obj_file = "test_core_reloc_size.bpf.o", \
.btf_src_file = "btf__core_reloc_" #name ".bpf.o", \
.raw_tp_name = "sys_enter", \
.prog_name = "test_core_size"
#define SIZE_OUTPUT_DATA(type) \
STRUCT_TO_CHAR_PTR(core_reloc_size_output) { \
.int_sz = sizeof(((type *)0)->int_field), \
.int_off = offsetof(type, int_field), \
.struct_sz = sizeof(((type *)0)->struct_field), \
.struct_off = offsetof(type, struct_field), \
.union_sz = sizeof(((type *)0)->union_field), \
.union_off = offsetof(type, union_field), \
.arr_sz = sizeof(((type *)0)->arr_field), \
.arr_off = offsetof(type, arr_field), \
.arr_elem_sz = sizeof(((type *)0)->arr_field[1]), \
.arr_elem_off = offsetof(type, arr_field[1]), \
.ptr_sz = 8, /* always 8-byte pointer for BPF */ \
.ptr_off = offsetof(type, ptr_field), \
.enum_sz = sizeof(((type *)0)->enum_field), \
.enum_off = offsetof(type, enum_field), \
.float_sz = sizeof(((type *)0)->float_field), \
.float_off = offsetof(type, float_field), \
}
#define SIZE_CASE(name) { \
SIZE_CASE_COMMON(name), \
.input_len = 0, \
.output = SIZE_OUTPUT_DATA(struct core_reloc_##name), \
.output_len = sizeof(struct core_reloc_size_output), \
}
#define SIZE_ERR_CASE(name) { \
SIZE_CASE_COMMON(name), \
.fails = true, \
.run_btfgen_fails = true, \
}
#define TYPE_BASED_CASE_COMMON(name) \
.case_name = #name, \
.bpf_obj_file = "test_core_reloc_type_based.bpf.o", \
.btf_src_file = "btf__core_reloc_" #name ".bpf.o", \
.raw_tp_name = "sys_enter", \
.prog_name = "test_core_type_based"
#define TYPE_BASED_CASE(name, ...) { \
TYPE_BASED_CASE_COMMON(name), \
.output = STRUCT_TO_CHAR_PTR(core_reloc_type_based_output) \
__VA_ARGS__, \
.output_len = sizeof(struct core_reloc_type_based_output), \
}
#define TYPE_BASED_ERR_CASE(name) { \
TYPE_BASED_CASE_COMMON(name), \
.fails = true, \
}
#define TYPE_ID_CASE_COMMON(name) \
.case_name = #name, \
.bpf_obj_file = "test_core_reloc_type_id.bpf.o", \
.btf_src_file = "btf__core_reloc_" #name ".bpf.o", \
.raw_tp_name = "sys_enter", \
.prog_name = "test_core_type_id"
#define TYPE_ID_CASE(name, setup_fn) { \
TYPE_ID_CASE_COMMON(name), \
.output = STRUCT_TO_CHAR_PTR(core_reloc_type_id_output) {}, \
.output_len = sizeof(struct core_reloc_type_id_output), \
.setup = setup_fn, \
}
#define TYPE_ID_ERR_CASE(name) { \
TYPE_ID_CASE_COMMON(name), \
.fails = true, \
}
#define ENUMVAL_CASE_COMMON(name) \
.case_name = #name, \
.bpf_obj_file = "test_core_reloc_enumval.bpf.o", \
.btf_src_file = "btf__core_reloc_" #name ".bpf.o", \
.raw_tp_name = "sys_enter", \
.prog_name = "test_core_enumval"
#define ENUMVAL_CASE(name, ...) { \
ENUMVAL_CASE_COMMON(name), \
.output = STRUCT_TO_CHAR_PTR(core_reloc_enumval_output) \
__VA_ARGS__, \
.output_len = sizeof(struct core_reloc_enumval_output), \
}
#define ENUMVAL_ERR_CASE(name) { \
ENUMVAL_CASE_COMMON(name), \
.fails = true, \
}
#define ENUM64VAL_CASE_COMMON(name) \
.case_name = #name, \
.bpf_obj_file = "test_core_reloc_enum64val.bpf.o", \
.btf_src_file = "btf__core_reloc_" #name ".bpf.o", \
.raw_tp_name = "sys_enter", \
.prog_name = "test_core_enum64val"
#define ENUM64VAL_CASE(name, ...) { \
ENUM64VAL_CASE_COMMON(name), \
.output = STRUCT_TO_CHAR_PTR(core_reloc_enum64val_output) \
__VA_ARGS__, \
.output_len = sizeof(struct core_reloc_enum64val_output), \
}
#define ENUM64VAL_ERR_CASE(name) { \
ENUM64VAL_CASE_COMMON(name), \
.fails = true, \
}
struct core_reloc_test_case;
typedef int (*setup_test_fn)(struct core_reloc_test_case *test);
typedef int (*trigger_test_fn)(const struct core_reloc_test_case *test);
struct core_reloc_test_case {
const char *case_name;
const char *bpf_obj_file;
const char *btf_src_file;
const char *input;
int input_len;
const char *output;
int output_len;
bool fails;
bool run_btfgen_fails;
bool needs_testmod;
bool relaxed_core_relocs;
const char *prog_name;
const char *raw_tp_name;
setup_test_fn setup;
trigger_test_fn trigger;
};
static int find_btf_type(const struct btf *btf, const char *name, __u32 kind)
{
int id;
id = btf__find_by_name_kind(btf, name, kind);
if (CHECK(id <= 0, "find_type_id", "failed to find '%s', kind %d: %d\n", name, kind, id))
return -1;
return id;
}
static int setup_type_id_case_local(struct core_reloc_test_case *test)
{
struct core_reloc_type_id_output *exp = (void *)test->output;
struct btf *local_btf = btf__parse(test->bpf_obj_file, NULL);
struct btf *targ_btf = btf__parse(test->btf_src_file, NULL);
const struct btf_type *t;
const char *name;
int i;
if (!ASSERT_OK_PTR(local_btf, "local_btf") || !ASSERT_OK_PTR(targ_btf, "targ_btf")) {
btf__free(local_btf);
btf__free(targ_btf);
return -EINVAL;
}
exp->local_anon_struct = -1;
exp->local_anon_union = -1;
exp->local_anon_enum = -1;
exp->local_anon_func_proto_ptr = -1;
exp->local_anon_void_ptr = -1;
exp->local_anon_arr = -1;
for (i = 1; i < btf__type_cnt(local_btf); i++)
{
t = btf__type_by_id(local_btf, i);
/* we are interested only in anonymous types */
if (t->name_off)
continue;
if (btf_is_struct(t) && btf_vlen(t) &&
(name = btf__name_by_offset(local_btf, btf_members(t)[0].name_off)) &&
strcmp(name, "marker_field") == 0) {
exp->local_anon_struct = i;
} else if (btf_is_union(t) && btf_vlen(t) &&
(name = btf__name_by_offset(local_btf, btf_members(t)[0].name_off)) &&
strcmp(name, "marker_field") == 0) {
exp->local_anon_union = i;
} else if (btf_is_enum(t) && btf_vlen(t) &&
(name = btf__name_by_offset(local_btf, btf_enum(t)[0].name_off)) &&
strcmp(name, "MARKER_ENUM_VAL") == 0) {
exp->local_anon_enum = i;
} else if (btf_is_ptr(t) && (t = btf__type_by_id(local_btf, t->type))) {
if (btf_is_func_proto(t) && (t = btf__type_by_id(local_btf, t->type)) &&
btf_is_int(t) && (name = btf__name_by_offset(local_btf, t->name_off)) &&
strcmp(name, "_Bool") == 0) {
/* ptr -> func_proto -> _Bool */
exp->local_anon_func_proto_ptr = i;
} else if (btf_is_void(t)) {
/* ptr -> void */
exp->local_anon_void_ptr = i;
}
} else if (btf_is_array(t) && (t = btf__type_by_id(local_btf, btf_array(t)->type)) &&
btf_is_int(t) && (name = btf__name_by_offset(local_btf, t->name_off)) &&
strcmp(name, "_Bool") == 0) {
/* _Bool[] */
exp->local_anon_arr = i;
}
}
exp->local_struct = find_btf_type(local_btf, "a_struct", BTF_KIND_STRUCT);
exp->local_union = find_btf_type(local_btf, "a_union", BTF_KIND_UNION);
exp->local_enum = find_btf_type(local_btf, "an_enum", BTF_KIND_ENUM);
exp->local_int = find_btf_type(local_btf, "int", BTF_KIND_INT);
exp->local_struct_typedef = find_btf_type(local_btf, "named_struct_typedef", BTF_KIND_TYPEDEF);
exp->local_func_proto_typedef = find_btf_type(local_btf, "func_proto_typedef", BTF_KIND_TYPEDEF);
exp->local_arr_typedef = find_btf_type(local_btf, "arr_typedef", BTF_KIND_TYPEDEF);
btf__free(local_btf);
btf__free(targ_btf);
return 0;
}
static int setup_type_id_case_success(struct core_reloc_test_case *test) {
struct core_reloc_type_id_output *exp = (void *)test->output;
struct btf *targ_btf;
int err;
err = setup_type_id_case_local(test);
if (err)
return err;
targ_btf = btf__parse(test->btf_src_file, NULL);
exp->targ_struct = find_btf_type(targ_btf, "a_struct", BTF_KIND_STRUCT);
exp->targ_union = find_btf_type(targ_btf, "a_union", BTF_KIND_UNION);
exp->targ_enum = find_btf_type(targ_btf, "an_enum", BTF_KIND_ENUM);
exp->targ_int = find_btf_type(targ_btf, "int", BTF_KIND_INT);
exp->targ_struct_typedef = find_btf_type(targ_btf, "named_struct_typedef", BTF_KIND_TYPEDEF);
exp->targ_func_proto_typedef = find_btf_type(targ_btf, "func_proto_typedef", BTF_KIND_TYPEDEF);
exp->targ_arr_typedef = find_btf_type(targ_btf, "arr_typedef", BTF_KIND_TYPEDEF);
btf__free(targ_btf);
return 0;
}
static int setup_type_id_case_failure(struct core_reloc_test_case *test)
{
struct core_reloc_type_id_output *exp = (void *)test->output;
int err;
err = setup_type_id_case_local(test);
if (err)
return err;
exp->targ_struct = 0;
exp->targ_union = 0;
exp->targ_enum = 0;
exp->targ_int = 0;
exp->targ_struct_typedef = 0;
exp->targ_func_proto_typedef = 0;
exp->targ_arr_typedef = 0;
return 0;
}
static int __trigger_module_test_read(const struct core_reloc_test_case *test)
{
struct core_reloc_module_output *exp = (void *)test->output;
trigger_module_test_read(exp->len);
return 0;
}
static const struct core_reloc_test_case test_cases[] = {
/* validate we can find kernel image and use its BTF for relocs */
{
.case_name = "kernel",
.bpf_obj_file = "test_core_reloc_kernel.bpf.o",
.btf_src_file = NULL, /* load from /lib/modules/$(uname -r) */
.input = "",
.input_len = 0,
.output = STRUCT_TO_CHAR_PTR(core_reloc_kernel_output) {
.valid = { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, },
.comm = "test_progs",
.comm_len = sizeof("test_progs"),
.local_task_struct_matches = true,
},
.output_len = sizeof(struct core_reloc_kernel_output),
.raw_tp_name = "sys_enter",
.prog_name = "test_core_kernel",
},
/* validate we can find kernel module BTF types for relocs/attach */
MODULES_CASE("module_probed", "test_core_module_probed", "bpf_testmod_test_read"),
MODULES_CASE("module_direct", "test_core_module_direct", NULL),
/* validate BPF program can use multiple flavors to match against
* single target BTF type
*/
FLAVORS_CASE(flavors),
FLAVORS_ERR_CASE(flavors__err_wrong_name),
/* various struct/enum nesting and resolution scenarios */
NESTING_CASE(nesting),
NESTING_CASE(nesting___anon_embed),
NESTING_CASE(nesting___struct_union_mixup),
NESTING_CASE(nesting___extra_nesting),
NESTING_CASE(nesting___dup_compat_types),
NESTING_ERR_CASE(nesting___err_missing_field),
NESTING_ERR_CASE(nesting___err_array_field),
NESTING_ERR_CASE(nesting___err_missing_container),
NESTING_ERR_CASE(nesting___err_nonstruct_container),
NESTING_ERR_CASE(nesting___err_array_container),
NESTING_ERR_CASE(nesting___err_dup_incompat_types),
NESTING_ERR_CASE(nesting___err_partial_match_dups),
NESTING_ERR_CASE(nesting___err_too_deep),
/* various array access relocation scenarios */
ARRAYS_CASE(arrays),
ARRAYS_CASE(arrays___diff_arr_dim),
ARRAYS_CASE(arrays___diff_arr_val_sz),
ARRAYS_CASE(arrays___equiv_zero_sz_arr),
ARRAYS_CASE(arrays___fixed_arr),
ARRAYS_ERR_CASE(arrays___err_too_small),
ARRAYS_ERR_CASE(arrays___err_too_shallow),
ARRAYS_ERR_CASE(arrays___err_non_array),
ARRAYS_ERR_CASE(arrays___err_wrong_val_type),
ARRAYS_ERR_CASE(arrays___err_bad_zero_sz_arr),
/* enum/ptr/int handling scenarios */
PRIMITIVES_CASE(primitives),
PRIMITIVES_CASE(primitives___diff_enum_def),
PRIMITIVES_CASE(primitives___diff_func_proto),
PRIMITIVES_CASE(primitives___diff_ptr_type),
PRIMITIVES_ERR_CASE(primitives___err_non_enum),
PRIMITIVES_ERR_CASE(primitives___err_non_int),
PRIMITIVES_ERR_CASE(primitives___err_non_ptr),
/* const/volatile/restrict and typedefs scenarios */
MODS_CASE(mods),
MODS_CASE(mods___mod_swap),
MODS_CASE(mods___typedefs),
/* handling "ptr is an array" semantics */
PTR_AS_ARR_CASE(ptr_as_arr),
PTR_AS_ARR_CASE(ptr_as_arr___diff_sz),
/* int signedness/sizing/bitfield handling */
INTS_CASE(ints),
INTS_CASE(ints___bool),
INTS_CASE(ints___reverse_sign),
/* validate edge cases of capturing relocations */
{
.case_name = "misc",
.bpf_obj_file = "test_core_reloc_misc.bpf.o",
.btf_src_file = "btf__core_reloc_misc.bpf.o",
.input = (const char *)&(struct core_reloc_misc_extensible[]){
{ .a = 1 },
{ .a = 2 }, /* not read */
{ .a = 3 },
},
.input_len = 4 * sizeof(int),
.output = STRUCT_TO_CHAR_PTR(core_reloc_misc_output) {
.a = 1,
.b = 1,
.c = 0, /* BUG in clang, should be 3 */
},
.output_len = sizeof(struct core_reloc_misc_output),
.raw_tp_name = "sys_enter",
.prog_name = "test_core_misc",
},
/* validate field existence checks */
{
FIELD_EXISTS_CASE_COMMON(existence),
.input = STRUCT_TO_CHAR_PTR(core_reloc_existence) {
.a = 1,
.b = 2,
.c = 3,
.arr = { 4 },
.s = { .x = 5 },
},
.input_len = sizeof(struct core_reloc_existence),
.output = STRUCT_TO_CHAR_PTR(core_reloc_existence_output) {
.a_exists = 1,
.b_exists = 1,
.c_exists = 1,
.arr_exists = 1,
.s_exists = 1,
.a_value = 1,
.b_value = 2,
.c_value = 3,
.arr_value = 4,
.s_value = 5,
},
.output_len = sizeof(struct core_reloc_existence_output),
},
{
FIELD_EXISTS_CASE_COMMON(existence___minimal),
.input = STRUCT_TO_CHAR_PTR(core_reloc_existence___minimal) {
.a = 42,
},
.input_len = sizeof(struct core_reloc_existence___minimal),
.output = STRUCT_TO_CHAR_PTR(core_reloc_existence_output) {
.a_exists = 1,
.b_exists = 0,
.c_exists = 0,
.arr_exists = 0,
.s_exists = 0,
.a_value = 42,
.b_value = 0xff000002u,
.c_value = 0xff000003u,
.arr_value = 0xff000004u,
.s_value = 0xff000005u,
},
.output_len = sizeof(struct core_reloc_existence_output),
},
{
FIELD_EXISTS_CASE_COMMON(existence___wrong_field_defs),
.input = STRUCT_TO_CHAR_PTR(core_reloc_existence___wrong_field_defs) {
},
.input_len = sizeof(struct core_reloc_existence___wrong_field_defs),
.output = STRUCT_TO_CHAR_PTR(core_reloc_existence_output) {
.a_exists = 0,
.b_exists = 0,
.c_exists = 0,
.arr_exists = 0,
.s_exists = 0,
.a_value = 0xff000001u,
.b_value = 0xff000002u,
.c_value = 0xff000003u,
.arr_value = 0xff000004u,
.s_value = 0xff000005u,
},
.output_len = sizeof(struct core_reloc_existence_output),
},
/* bitfield relocation checks */
BITFIELDS_CASE(bitfields, {
.ub1 = 1,
.ub2 = 2,
.ub7 = 96,
.sb4 = -7,
.sb20 = -0x76543,
.u32 = 0x80000000,
.s32 = -0x76543210,
}),
BITFIELDS_CASE(bitfields___bit_sz_change, {
.ub1 = 6,
.ub2 = 0xABCDE,
.ub7 = 1,
.sb4 = -1,
.sb20 = -0x17654321,
.u32 = 0xBEEF,
.s32 = -0x3FEDCBA987654321LL,
}),
BITFIELDS_CASE(bitfields___bitfield_vs_int, {
.ub1 = 0xFEDCBA9876543210LL,
.ub2 = 0xA6,
.ub7 = -0x7EDCBA987654321LL,
.sb4 = -0x6123456789ABCDELL,
.sb20 = 0xD00DLL,
.u32 = -0x76543,
.s32 = 0x0ADEADBEEFBADB0BLL,
}),
BITFIELDS_CASE(bitfields___just_big_enough, {
.ub1 = 0xFLL,
.ub2 = 0x0812345678FEDCBALL,
}),
BITFIELDS_ERR_CASE(bitfields___err_too_big_bitfield),
/* field size and offset relocation checks */
SIZE_CASE(size),
SIZE_CASE(size___diff_sz),
SIZE_CASE(size___diff_offs),
SIZE_ERR_CASE(size___err_ambiguous),
/* validate type existence, match, and size relocations */
TYPE_BASED_CASE(type_based, {
.struct_exists = 1,
.complex_struct_exists = 1,
.union_exists = 1,
.enum_exists = 1,
.typedef_named_struct_exists = 1,
.typedef_anon_struct_exists = 1,
.typedef_struct_ptr_exists = 1,
.typedef_int_exists = 1,
.typedef_enum_exists = 1,
.typedef_void_ptr_exists = 1,
.typedef_restrict_ptr_exists = 1,
.typedef_func_proto_exists = 1,
.typedef_arr_exists = 1,
.struct_matches = 1,
.complex_struct_matches = 1,
.union_matches = 1,
.enum_matches = 1,
.typedef_named_struct_matches = 1,
.typedef_anon_struct_matches = 1,
.typedef_struct_ptr_matches = 1,
.typedef_int_matches = 1,
.typedef_enum_matches = 1,
.typedef_void_ptr_matches = 1,
.typedef_restrict_ptr_matches = 1,
.typedef_func_proto_matches = 1,
.typedef_arr_matches = 1,
.struct_sz = sizeof(struct a_struct),
.union_sz = sizeof(union a_union),
.enum_sz = sizeof(enum an_enum),
.typedef_named_struct_sz = sizeof(named_struct_typedef),
.typedef_anon_struct_sz = sizeof(anon_struct_typedef),
.typedef_struct_ptr_sz = sizeof(struct_ptr_typedef),
.typedef_int_sz = sizeof(int_typedef),
.typedef_enum_sz = sizeof(enum_typedef),
.typedef_void_ptr_sz = sizeof(void_ptr_typedef),
.typedef_func_proto_sz = sizeof(func_proto_typedef),
.typedef_arr_sz = sizeof(arr_typedef),
}),
TYPE_BASED_CASE(type_based___all_missing, {
/* all zeros */
}),
TYPE_BASED_CASE(type_based___diff, {
.struct_exists = 1,
.complex_struct_exists = 1,
.union_exists = 1,
.enum_exists = 1,
.typedef_named_struct_exists = 1,
.typedef_anon_struct_exists = 1,
.typedef_struct_ptr_exists = 1,
.typedef_int_exists = 1,
.typedef_enum_exists = 1,
.typedef_void_ptr_exists = 1,
.typedef_func_proto_exists = 1,
.typedef_arr_exists = 1,
.struct_matches = 1,
.complex_struct_matches = 1,
.union_matches = 1,
.enum_matches = 1,
.typedef_named_struct_matches = 1,
.typedef_anon_struct_matches = 1,
.typedef_struct_ptr_matches = 1,
.typedef_int_matches = 0,
.typedef_enum_matches = 1,
.typedef_void_ptr_matches = 1,
.typedef_func_proto_matches = 0,
.typedef_arr_matches = 0,
.struct_sz = sizeof(struct a_struct___diff),
.union_sz = sizeof(union a_union___diff),
.enum_sz = sizeof(enum an_enum___diff),
.typedef_named_struct_sz = sizeof(named_struct_typedef___diff),
.typedef_anon_struct_sz = sizeof(anon_struct_typedef___diff),
.typedef_struct_ptr_sz = sizeof(struct_ptr_typedef___diff),
.typedef_int_sz = sizeof(int_typedef___diff),
.typedef_enum_sz = sizeof(enum_typedef___diff),
.typedef_void_ptr_sz = sizeof(void_ptr_typedef___diff),
.typedef_func_proto_sz = sizeof(func_proto_typedef___diff),
.typedef_arr_sz = sizeof(arr_typedef___diff),
}),
TYPE_BASED_CASE(type_based___diff_sz, {
.struct_exists = 1,
.union_exists = 1,
.enum_exists = 1,
.typedef_named_struct_exists = 1,
.typedef_anon_struct_exists = 1,
.typedef_struct_ptr_exists = 1,
.typedef_int_exists = 1,
.typedef_enum_exists = 1,
.typedef_void_ptr_exists = 1,
.typedef_func_proto_exists = 1,
.typedef_arr_exists = 1,
.struct_matches = 0,
.union_matches = 0,
.enum_matches = 0,
.typedef_named_struct_matches = 0,
.typedef_anon_struct_matches = 0,
.typedef_struct_ptr_matches = 1,
.typedef_int_matches = 0,
.typedef_enum_matches = 0,
.typedef_void_ptr_matches = 1,
.typedef_func_proto_matches = 0,
.typedef_arr_matches = 0,
.struct_sz = sizeof(struct a_struct___diff_sz),
.union_sz = sizeof(union a_union___diff_sz),
.enum_sz = sizeof(enum an_enum___diff_sz),
.typedef_named_struct_sz = sizeof(named_struct_typedef___diff_sz),
.typedef_anon_struct_sz = sizeof(anon_struct_typedef___diff_sz),
.typedef_struct_ptr_sz = sizeof(struct_ptr_typedef___diff_sz),
.typedef_int_sz = sizeof(int_typedef___diff_sz),
.typedef_enum_sz = sizeof(enum_typedef___diff_sz),
.typedef_void_ptr_sz = sizeof(void_ptr_typedef___diff_sz),
.typedef_func_proto_sz = sizeof(func_proto_typedef___diff_sz),
.typedef_arr_sz = sizeof(arr_typedef___diff_sz),
}),
TYPE_BASED_CASE(type_based___incompat, {
.enum_exists = 1,
.enum_matches = 1,
.enum_sz = sizeof(enum an_enum),
}),
TYPE_BASED_CASE(type_based___fn_wrong_args, {
.struct_exists = 1,
.struct_matches = 1,
.struct_sz = sizeof(struct a_struct),
}),
/* BTF_TYPE_ID_LOCAL/BTF_TYPE_ID_TARGET tests */
TYPE_ID_CASE(type_id, setup_type_id_case_success),
TYPE_ID_CASE(type_id___missing_targets, setup_type_id_case_failure),
/* Enumerator value existence and value relocations */
ENUMVAL_CASE(enumval, {
.named_val1_exists = true,
.named_val2_exists = true,
.named_val3_exists = true,
.anon_val1_exists = true,
.anon_val2_exists = true,
.anon_val3_exists = true,
.named_val1 = 1,
.named_val2 = 2,
.anon_val1 = 0x10,
.anon_val2 = 0x20,
}),
ENUMVAL_CASE(enumval___diff, {
.named_val1_exists = true,
.named_val2_exists = true,
.named_val3_exists = true,
.anon_val1_exists = true,
.anon_val2_exists = true,
.anon_val3_exists = true,
.named_val1 = 101,
.named_val2 = 202,
.anon_val1 = 0x11,
.anon_val2 = 0x22,
}),
ENUMVAL_CASE(enumval___val3_missing, {
.named_val1_exists = true,
.named_val2_exists = true,
.named_val3_exists = false,
.anon_val1_exists = true,
.anon_val2_exists = true,
.anon_val3_exists = false,
.named_val1 = 111,
.named_val2 = 222,
.anon_val1 = 0x111,
.anon_val2 = 0x222,
}),
ENUMVAL_ERR_CASE(enumval___err_missing),
/* 64bit enumerator value existence and value relocations */
ENUM64VAL_CASE(enum64val, {
.unsigned_val1_exists = true,
.unsigned_val2_exists = true,
.unsigned_val3_exists = true,
.signed_val1_exists = true,
.signed_val2_exists = true,
.signed_val3_exists = true,
.unsigned_val1 = 0x1ffffffffULL,
.unsigned_val2 = 0x2,
.signed_val1 = 0x1ffffffffLL,
.signed_val2 = -2,
}),
ENUM64VAL_CASE(enum64val___diff, {
.unsigned_val1_exists = true,
.unsigned_val2_exists = true,
.unsigned_val3_exists = true,
.signed_val1_exists = true,
.signed_val2_exists = true,
.signed_val3_exists = true,
.unsigned_val1 = 0x101ffffffffULL,
.unsigned_val2 = 0x202ffffffffULL,
.signed_val1 = -101,
.signed_val2 = -202,
}),
ENUM64VAL_CASE(enum64val___val3_missing, {
.unsigned_val1_exists = true,
.unsigned_val2_exists = true,
.unsigned_val3_exists = false,
.signed_val1_exists = true,
.signed_val2_exists = true,
.signed_val3_exists = false,
.unsigned_val1 = 0x111ffffffffULL,
.unsigned_val2 = 0x222,
.signed_val1 = 0x111ffffffffLL,
.signed_val2 = -222,
}),
ENUM64VAL_ERR_CASE(enum64val___err_missing),
};
struct data {
char in[256];
char out[256];
bool skip;
uint64_t my_pid_tgid;
};
static size_t roundup_page(size_t sz)
{
long page_size = sysconf(_SC_PAGE_SIZE);
return (sz + page_size - 1) / page_size * page_size;
}
static int run_btfgen(const char *src_btf, const char *dst_btf, const char *objpath)
{
char command[4096];
int n;
n = snprintf(command, sizeof(command),
"./bpftool gen min_core_btf %s %s %s",
src_btf, dst_btf, objpath);
if (n < 0 || n >= sizeof(command))
return -1;
return system(command);
}
static void run_core_reloc_tests(bool use_btfgen)
{
const size_t mmap_sz = roundup_page(sizeof(struct data));
DECLARE_LIBBPF_OPTS(bpf_object_open_opts, open_opts);
struct core_reloc_test_case *test_case, test_case_copy;
const char *tp_name, *probe_name;
int err, i, equal, fd;
struct bpf_link *link = NULL;
struct bpf_map *data_map;
struct bpf_program *prog;
struct bpf_object *obj;
uint64_t my_pid_tgid;
struct data *data;
void *mmap_data = NULL;
my_pid_tgid = getpid() | ((uint64_t)syscall(SYS_gettid) << 32);
for (i = 0; i < ARRAY_SIZE(test_cases); i++) {
char btf_file[] = "/tmp/core_reloc.btf.XXXXXX";
test_case_copy = test_cases[i];
test_case = &test_case_copy;
if (!test__start_subtest(test_case->case_name))
continue;
if (test_case->needs_testmod && !env.has_testmod) {
test__skip();
continue;
}
/* generate a "minimal" BTF file and use it as source */
if (use_btfgen) {
if (!test_case->btf_src_file || test_case->run_btfgen_fails) {
test__skip();
continue;
}
fd = mkstemp(btf_file);
if (!ASSERT_GE(fd, 0, "btf_tmp"))
continue;
close(fd); /* we only need the path */
err = run_btfgen(test_case->btf_src_file, btf_file,
test_case->bpf_obj_file);
if (!ASSERT_OK(err, "run_btfgen"))
continue;
test_case->btf_src_file = btf_file;
}
if (test_case->setup) {
err = test_case->setup(test_case);
if (CHECK(err, "test_setup", "test #%d setup failed: %d\n", i, err))
continue;
}
if (test_case->btf_src_file) {
err = access(test_case->btf_src_file, R_OK);
if (!ASSERT_OK(err, "btf_src_file"))
continue;
}
open_opts.btf_custom_path = test_case->btf_src_file;
obj = bpf_object__open_file(test_case->bpf_obj_file, &open_opts);
if (!ASSERT_OK_PTR(obj, "obj_open"))
goto cleanup;
probe_name = test_case->prog_name;
tp_name = test_case->raw_tp_name; /* NULL for tp_btf */
prog = bpf_object__find_program_by_name(obj, probe_name);
if (CHECK(!prog, "find_probe",
"prog '%s' not found\n", probe_name))
goto cleanup;
err = bpf_object__load(obj);
if (err) {
if (!test_case->fails)
ASSERT_OK(err, "obj_load");
goto cleanup;
}
data_map = bpf_object__find_map_by_name(obj, ".bss");
if (CHECK(!data_map, "find_data_map", "data map not found\n"))
goto cleanup;
mmap_data = mmap(NULL, mmap_sz, PROT_READ | PROT_WRITE,
MAP_SHARED, bpf_map__fd(data_map), 0);
if (CHECK(mmap_data == MAP_FAILED, "mmap",
".bss mmap failed: %d", errno)) {
mmap_data = NULL;
goto cleanup;
}
data = mmap_data;
memset(mmap_data, 0, sizeof(*data));
if (test_case->input_len)
memcpy(data->in, test_case->input, test_case->input_len);
data->my_pid_tgid = my_pid_tgid;
link = bpf_program__attach_raw_tracepoint(prog, tp_name);
if (!ASSERT_OK_PTR(link, "attach_raw_tp"))
goto cleanup;
/* trigger test run */
if (test_case->trigger) {
if (!ASSERT_OK(test_case->trigger(test_case), "test_trigger"))
goto cleanup;
} else {
usleep(1);
}
if (data->skip) {
test__skip();
goto cleanup;
}
if (!ASSERT_FALSE(test_case->fails, "obj_load_should_fail"))
goto cleanup;
equal = memcmp(data->out, test_case->output,
test_case->output_len) == 0;
if (CHECK(!equal, "check_result",
"input/output data don't match\n")) {
int j;
for (j = 0; j < test_case->input_len; j++) {
printf("input byte #%d: 0x%02hhx\n",
j, test_case->input[j]);
}
for (j = 0; j < test_case->output_len; j++) {
printf("output byte #%d: EXP 0x%02hhx GOT 0x%02hhx\n",
j, test_case->output[j], data->out[j]);
}
goto cleanup;
}
cleanup:
if (mmap_data) {
CHECK_FAIL(munmap(mmap_data, mmap_sz));
mmap_data = NULL;
}
if (use_btfgen)
remove(test_case->btf_src_file);
bpf_link__destroy(link);
link = NULL;
bpf_object__close(obj);
}
}
void test_core_reloc(void)
{
run_core_reloc_tests(false);
}
void test_core_reloc_btfgen(void)
{
run_core_reloc_tests(true);
}
| linux-master | tools/testing/selftests/bpf/prog_tests/core_reloc.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2019 Facebook */
#include <test_progs.h>
#include "fexit_test.lskel.h"
#include "fexit_many_args.skel.h"
static int fexit_test_common(struct fexit_test_lskel *fexit_skel)
{
int err, prog_fd, i;
int link_fd;
__u64 *result;
LIBBPF_OPTS(bpf_test_run_opts, topts);
err = fexit_test_lskel__attach(fexit_skel);
if (!ASSERT_OK(err, "fexit_attach"))
return err;
/* Check that already linked program can't be attached again. */
link_fd = fexit_test_lskel__test1__attach(fexit_skel);
if (!ASSERT_LT(link_fd, 0, "fexit_attach_link"))
return -1;
prog_fd = fexit_skel->progs.test1.prog_fd;
err = bpf_prog_test_run_opts(prog_fd, &topts);
ASSERT_OK(err, "test_run");
ASSERT_EQ(topts.retval, 0, "test_run");
result = (__u64 *)fexit_skel->bss;
for (i = 0; i < sizeof(*fexit_skel->bss) / sizeof(__u64); i++) {
if (!ASSERT_EQ(result[i], 1, "fexit_result"))
return -1;
}
fexit_test_lskel__detach(fexit_skel);
/* zero results for re-attach test */
memset(fexit_skel->bss, 0, sizeof(*fexit_skel->bss));
return 0;
}
static void fexit_test(void)
{
struct fexit_test_lskel *fexit_skel = NULL;
int err;
fexit_skel = fexit_test_lskel__open_and_load();
if (!ASSERT_OK_PTR(fexit_skel, "fexit_skel_load"))
goto cleanup;
err = fexit_test_common(fexit_skel);
if (!ASSERT_OK(err, "fexit_first_attach"))
goto cleanup;
err = fexit_test_common(fexit_skel);
ASSERT_OK(err, "fexit_second_attach");
cleanup:
fexit_test_lskel__destroy(fexit_skel);
}
static void fexit_many_args(void)
{
struct fexit_many_args *fexit_skel = NULL;
int err;
fexit_skel = fexit_many_args__open_and_load();
if (!ASSERT_OK_PTR(fexit_skel, "fexit_many_args_skel_load"))
goto cleanup;
err = fexit_many_args__attach(fexit_skel);
if (!ASSERT_OK(err, "fexit_many_args_attach"))
goto cleanup;
ASSERT_OK(trigger_module_test_read(1), "trigger_read");
ASSERT_EQ(fexit_skel->bss->test1_result, 1,
"fexit_many_args_result1");
ASSERT_EQ(fexit_skel->bss->test2_result, 1,
"fexit_many_args_result2");
ASSERT_EQ(fexit_skel->bss->test3_result, 1,
"fexit_many_args_result3");
cleanup:
fexit_many_args__destroy(fexit_skel);
}
void test_fexit_test(void)
{
if (test__start_subtest("fexit"))
fexit_test();
if (test__start_subtest("fexit_many_args"))
fexit_many_args();
}
| linux-master | tools/testing/selftests/bpf/prog_tests/fexit_test.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
#include <test_progs.h>
#include <network_helpers.h>
#include "rbtree.skel.h"
#include "rbtree_fail.skel.h"
#include "rbtree_btf_fail__wrong_node_type.skel.h"
#include "rbtree_btf_fail__add_wrong_type.skel.h"
static void test_rbtree_add_nodes(void)
{
LIBBPF_OPTS(bpf_test_run_opts, opts,
.data_in = &pkt_v4,
.data_size_in = sizeof(pkt_v4),
.repeat = 1,
);
struct rbtree *skel;
int ret;
skel = rbtree__open_and_load();
if (!ASSERT_OK_PTR(skel, "rbtree__open_and_load"))
return;
ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.rbtree_add_nodes), &opts);
ASSERT_OK(ret, "rbtree_add_nodes run");
ASSERT_OK(opts.retval, "rbtree_add_nodes retval");
ASSERT_EQ(skel->data->less_callback_ran, 1, "rbtree_add_nodes less_callback_ran");
rbtree__destroy(skel);
}
static void test_rbtree_add_and_remove(void)
{
LIBBPF_OPTS(bpf_test_run_opts, opts,
.data_in = &pkt_v4,
.data_size_in = sizeof(pkt_v4),
.repeat = 1,
);
struct rbtree *skel;
int ret;
skel = rbtree__open_and_load();
if (!ASSERT_OK_PTR(skel, "rbtree__open_and_load"))
return;
ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.rbtree_add_and_remove), &opts);
ASSERT_OK(ret, "rbtree_add_and_remove");
ASSERT_OK(opts.retval, "rbtree_add_and_remove retval");
ASSERT_EQ(skel->data->removed_key, 5, "rbtree_add_and_remove first removed key");
rbtree__destroy(skel);
}
static void test_rbtree_first_and_remove(void)
{
LIBBPF_OPTS(bpf_test_run_opts, opts,
.data_in = &pkt_v4,
.data_size_in = sizeof(pkt_v4),
.repeat = 1,
);
struct rbtree *skel;
int ret;
skel = rbtree__open_and_load();
if (!ASSERT_OK_PTR(skel, "rbtree__open_and_load"))
return;
ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.rbtree_first_and_remove), &opts);
ASSERT_OK(ret, "rbtree_first_and_remove");
ASSERT_OK(opts.retval, "rbtree_first_and_remove retval");
ASSERT_EQ(skel->data->first_data[0], 2, "rbtree_first_and_remove first rbtree_first()");
ASSERT_EQ(skel->data->removed_key, 1, "rbtree_first_and_remove first removed key");
ASSERT_EQ(skel->data->first_data[1], 4, "rbtree_first_and_remove second rbtree_first()");
rbtree__destroy(skel);
}
static void test_rbtree_api_release_aliasing(void)
{
LIBBPF_OPTS(bpf_test_run_opts, opts,
.data_in = &pkt_v4,
.data_size_in = sizeof(pkt_v4),
.repeat = 1,
);
struct rbtree *skel;
int ret;
skel = rbtree__open_and_load();
if (!ASSERT_OK_PTR(skel, "rbtree__open_and_load"))
return;
ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.rbtree_api_release_aliasing), &opts);
ASSERT_OK(ret, "rbtree_api_release_aliasing");
ASSERT_OK(opts.retval, "rbtree_api_release_aliasing retval");
ASSERT_EQ(skel->data->first_data[0], 42, "rbtree_api_release_aliasing first rbtree_remove()");
ASSERT_EQ(skel->data->first_data[1], -1, "rbtree_api_release_aliasing second rbtree_remove()");
rbtree__destroy(skel);
}
void test_rbtree_success(void)
{
if (test__start_subtest("rbtree_add_nodes"))
test_rbtree_add_nodes();
if (test__start_subtest("rbtree_add_and_remove"))
test_rbtree_add_and_remove();
if (test__start_subtest("rbtree_first_and_remove"))
test_rbtree_first_and_remove();
if (test__start_subtest("rbtree_api_release_aliasing"))
test_rbtree_api_release_aliasing();
}
#define BTF_FAIL_TEST(suffix) \
void test_rbtree_btf_fail__##suffix(void) \
{ \
struct rbtree_btf_fail__##suffix *skel; \
\
skel = rbtree_btf_fail__##suffix##__open_and_load(); \
if (!ASSERT_ERR_PTR(skel, \
"rbtree_btf_fail__" #suffix "__open_and_load unexpected success")) \
rbtree_btf_fail__##suffix##__destroy(skel); \
}
#define RUN_BTF_FAIL_TEST(suffix) \
if (test__start_subtest("rbtree_btf_fail__" #suffix)) \
test_rbtree_btf_fail__##suffix();
BTF_FAIL_TEST(wrong_node_type);
BTF_FAIL_TEST(add_wrong_type);
void test_rbtree_btf_fail(void)
{
RUN_BTF_FAIL_TEST(wrong_node_type);
RUN_BTF_FAIL_TEST(add_wrong_type);
}
void test_rbtree_fail(void)
{
RUN_TESTS(rbtree_fail);
}
| linux-master | tools/testing/selftests/bpf/prog_tests/rbtree.c |
// SPDX-License-Identifier: GPL-2.0
#include <test_progs.h>
void test_tcp_estats(void)
{
const char *file = "./test_tcp_estats.bpf.o";
int err, prog_fd;
struct bpf_object *obj;
err = bpf_prog_test_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj, &prog_fd);
if (!ASSERT_OK(err, ""))
return;
bpf_object__close(obj);
}
| linux-master | tools/testing/selftests/bpf/prog_tests/tcp_estats.c |
// SPDX-License-Identifier: GPL-2.0
#include <test_progs.h>
void test_reference_tracking(void)
{
const char *file = "test_sk_lookup_kern.bpf.o";
const char *obj_name = "ref_track";
DECLARE_LIBBPF_OPTS(bpf_object_open_opts, open_opts,
.object_name = obj_name,
.relaxed_maps = true,
);
struct bpf_object *obj_iter, *obj = NULL;
struct bpf_program *prog;
__u32 duration = 0;
int err = 0;
obj_iter = bpf_object__open_file(file, &open_opts);
if (!ASSERT_OK_PTR(obj_iter, "obj_iter_open_file"))
return;
if (CHECK(strcmp(bpf_object__name(obj_iter), obj_name), "obj_name",
"wrong obj name '%s', expected '%s'\n",
bpf_object__name(obj_iter), obj_name))
goto cleanup;
bpf_object__for_each_program(prog, obj_iter) {
struct bpf_program *p;
const char *name;
name = bpf_program__name(prog);
if (!test__start_subtest(name))
continue;
obj = bpf_object__open_file(file, &open_opts);
if (!ASSERT_OK_PTR(obj, "obj_open_file"))
goto cleanup;
/* all programs are not loaded by default, so just set
* autoload to true for the single prog under test
*/
p = bpf_object__find_program_by_name(obj, name);
bpf_program__set_autoload(p, true);
/* Expect verifier failure if test name has 'err' */
if (strncmp(name, "err_", sizeof("err_") - 1) == 0) {
libbpf_print_fn_t old_print_fn;
old_print_fn = libbpf_set_print(NULL);
err = !bpf_object__load(obj);
libbpf_set_print(old_print_fn);
} else {
err = bpf_object__load(obj);
}
ASSERT_OK(err, name);
bpf_object__close(obj);
obj = NULL;
}
cleanup:
bpf_object__close(obj);
bpf_object__close(obj_iter);
}
| linux-master | tools/testing/selftests/bpf/prog_tests/reference_tracking.c |
// SPDX-License-Identifier: GPL-2.0
#include <test_progs.h>
#include <network_helpers.h>
#include "map_kptr.skel.h"
#include "map_kptr_fail.skel.h"
#include "rcu_tasks_trace_gp.skel.h"
static void test_map_kptr_success(bool test_run)
{
LIBBPF_OPTS(bpf_test_run_opts, lopts);
LIBBPF_OPTS(bpf_test_run_opts, opts,
.data_in = &pkt_v4,
.data_size_in = sizeof(pkt_v4),
.repeat = 1,
);
int key = 0, ret, cpu;
struct map_kptr *skel;
char buf[16], *pbuf;
skel = map_kptr__open_and_load();
if (!ASSERT_OK_PTR(skel, "map_kptr__open_and_load"))
return;
ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.test_map_kptr_ref1), &opts);
ASSERT_OK(ret, "test_map_kptr_ref1 refcount");
ASSERT_OK(opts.retval, "test_map_kptr_ref1 retval");
ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.test_map_kptr_ref2), &opts);
ASSERT_OK(ret, "test_map_kptr_ref2 refcount");
ASSERT_OK(opts.retval, "test_map_kptr_ref2 retval");
ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.test_ls_map_kptr_ref1), &lopts);
ASSERT_OK(ret, "test_ls_map_kptr_ref1 refcount");
ASSERT_OK(lopts.retval, "test_ls_map_kptr_ref1 retval");
ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.test_ls_map_kptr_ref2), &lopts);
ASSERT_OK(ret, "test_ls_map_kptr_ref2 refcount");
ASSERT_OK(lopts.retval, "test_ls_map_kptr_ref2 retval");
if (test_run)
goto exit;
cpu = libbpf_num_possible_cpus();
if (!ASSERT_GT(cpu, 0, "libbpf_num_possible_cpus"))
goto exit;
pbuf = calloc(cpu, sizeof(buf));
if (!ASSERT_OK_PTR(pbuf, "calloc(pbuf)"))
goto exit;
ret = bpf_map__update_elem(skel->maps.array_map,
&key, sizeof(key), buf, sizeof(buf), 0);
ASSERT_OK(ret, "array_map update");
skel->data->ref--;
ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.test_map_kptr_ref3), &opts);
ASSERT_OK(ret, "test_map_kptr_ref3 refcount");
ASSERT_OK(opts.retval, "test_map_kptr_ref3 retval");
ret = bpf_map__update_elem(skel->maps.pcpu_array_map,
&key, sizeof(key), pbuf, cpu * sizeof(buf), 0);
ASSERT_OK(ret, "pcpu_array_map update");
skel->data->ref--;
ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.test_map_kptr_ref3), &opts);
ASSERT_OK(ret, "test_map_kptr_ref3 refcount");
ASSERT_OK(opts.retval, "test_map_kptr_ref3 retval");
ret = bpf_map__delete_elem(skel->maps.hash_map, &key, sizeof(key), 0);
ASSERT_OK(ret, "hash_map delete");
skel->data->ref--;
ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.test_map_kptr_ref3), &opts);
ASSERT_OK(ret, "test_map_kptr_ref3 refcount");
ASSERT_OK(opts.retval, "test_map_kptr_ref3 retval");
ret = bpf_map__delete_elem(skel->maps.pcpu_hash_map, &key, sizeof(key), 0);
ASSERT_OK(ret, "pcpu_hash_map delete");
skel->data->ref--;
ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.test_map_kptr_ref3), &opts);
ASSERT_OK(ret, "test_map_kptr_ref3 refcount");
ASSERT_OK(opts.retval, "test_map_kptr_ref3 retval");
ret = bpf_map__delete_elem(skel->maps.hash_malloc_map, &key, sizeof(key), 0);
ASSERT_OK(ret, "hash_malloc_map delete");
skel->data->ref--;
ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.test_map_kptr_ref3), &opts);
ASSERT_OK(ret, "test_map_kptr_ref3 refcount");
ASSERT_OK(opts.retval, "test_map_kptr_ref3 retval");
ret = bpf_map__delete_elem(skel->maps.pcpu_hash_malloc_map, &key, sizeof(key), 0);
ASSERT_OK(ret, "pcpu_hash_malloc_map delete");
skel->data->ref--;
ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.test_map_kptr_ref3), &opts);
ASSERT_OK(ret, "test_map_kptr_ref3 refcount");
ASSERT_OK(opts.retval, "test_map_kptr_ref3 retval");
ret = bpf_map__delete_elem(skel->maps.lru_hash_map, &key, sizeof(key), 0);
ASSERT_OK(ret, "lru_hash_map delete");
skel->data->ref--;
ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.test_map_kptr_ref3), &opts);
ASSERT_OK(ret, "test_map_kptr_ref3 refcount");
ASSERT_OK(opts.retval, "test_map_kptr_ref3 retval");
ret = bpf_map__delete_elem(skel->maps.lru_pcpu_hash_map, &key, sizeof(key), 0);
ASSERT_OK(ret, "lru_pcpu_hash_map delete");
skel->data->ref--;
ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.test_map_kptr_ref3), &opts);
ASSERT_OK(ret, "test_map_kptr_ref3 refcount");
ASSERT_OK(opts.retval, "test_map_kptr_ref3 retval");
ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.test_ls_map_kptr_ref_del), &lopts);
ASSERT_OK(ret, "test_ls_map_kptr_ref_del delete");
skel->data->ref--;
ASSERT_OK(lopts.retval, "test_ls_map_kptr_ref_del retval");
free(pbuf);
exit:
map_kptr__destroy(skel);
}
static int kern_sync_rcu_tasks_trace(struct rcu_tasks_trace_gp *rcu)
{
long gp_seq = READ_ONCE(rcu->bss->gp_seq);
LIBBPF_OPTS(bpf_test_run_opts, opts);
if (!ASSERT_OK(bpf_prog_test_run_opts(bpf_program__fd(rcu->progs.do_call_rcu_tasks_trace),
&opts), "do_call_rcu_tasks_trace"))
return -EFAULT;
if (!ASSERT_OK(opts.retval, "opts.retval == 0"))
return -EFAULT;
while (gp_seq == READ_ONCE(rcu->bss->gp_seq))
sched_yield();
return 0;
}
void serial_test_map_kptr(void)
{
struct rcu_tasks_trace_gp *skel;
RUN_TESTS(map_kptr_fail);
skel = rcu_tasks_trace_gp__open_and_load();
if (!ASSERT_OK_PTR(skel, "rcu_tasks_trace_gp__open_and_load"))
return;
if (!ASSERT_OK(rcu_tasks_trace_gp__attach(skel), "rcu_tasks_trace_gp__attach"))
goto end;
if (test__start_subtest("success-map")) {
test_map_kptr_success(true);
ASSERT_OK(kern_sync_rcu_tasks_trace(skel), "sync rcu_tasks_trace");
ASSERT_OK(kern_sync_rcu(), "sync rcu");
/* Observe refcount dropping to 1 on bpf_map_free_deferred */
test_map_kptr_success(false);
ASSERT_OK(kern_sync_rcu_tasks_trace(skel), "sync rcu_tasks_trace");
ASSERT_OK(kern_sync_rcu(), "sync rcu");
/* Observe refcount dropping to 1 on synchronous delete elem */
test_map_kptr_success(true);
}
end:
rcu_tasks_trace_gp__destroy(skel);
return;
}
| linux-master | tools/testing/selftests/bpf/prog_tests/map_kptr.c |
// SPDX-License-Identifier: GPL-2.0
#include <test_progs.h>
#include <network_helpers.h>
static void sigalrm_handler(int s) {}
static struct sigaction sigalrm_action = {
.sa_handler = sigalrm_handler,
};
static void test_signal_pending_by_type(enum bpf_prog_type prog_type)
{
struct bpf_insn prog[4096];
struct itimerval timeo = {
.it_value.tv_usec = 100000, /* 100ms */
};
int prog_fd;
int err;
int i;
LIBBPF_OPTS(bpf_test_run_opts, topts,
.data_in = &pkt_v4,
.data_size_in = sizeof(pkt_v4),
.repeat = 0xffffffff,
);
for (i = 0; i < ARRAY_SIZE(prog); i++)
prog[i] = BPF_ALU64_IMM(BPF_MOV, BPF_REG_0, 0);
prog[ARRAY_SIZE(prog) - 1] = BPF_EXIT_INSN();
prog_fd = bpf_test_load_program(prog_type, prog, ARRAY_SIZE(prog),
"GPL", 0, NULL, 0);
ASSERT_GE(prog_fd, 0, "test-run load");
err = sigaction(SIGALRM, &sigalrm_action, NULL);
ASSERT_OK(err, "test-run-signal-sigaction");
err = setitimer(ITIMER_REAL, &timeo, NULL);
ASSERT_OK(err, "test-run-signal-timer");
err = bpf_prog_test_run_opts(prog_fd, &topts);
ASSERT_LE(topts.duration, 500000000 /* 500ms */,
"test-run-signal-duration");
signal(SIGALRM, SIG_DFL);
}
void test_signal_pending(void)
{
test_signal_pending_by_type(BPF_PROG_TYPE_SOCKET_FILTER);
test_signal_pending_by_type(BPF_PROG_TYPE_FLOW_DISSECTOR);
}
| linux-master | tools/testing/selftests/bpf/prog_tests/signal_pending.c |
// SPDX-License-Identifier: GPL-2.0
#define _GNU_SOURCE
#include <test_progs.h>
#include <sys/epoll.h>
#include "test_ringbuf_multi.skel.h"
static int duration = 0;
struct sample {
int pid;
int seq;
long value;
char comm[16];
};
static int process_sample(void *ctx, void *data, size_t len)
{
int ring = (unsigned long)ctx;
struct sample *s = data;
switch (s->seq) {
case 0:
CHECK(ring != 1, "sample1_ring", "exp %d, got %d\n", 1, ring);
CHECK(s->value != 333, "sample1_value", "exp %ld, got %ld\n",
333L, s->value);
break;
case 1:
CHECK(ring != 2, "sample2_ring", "exp %d, got %d\n", 2, ring);
CHECK(s->value != 777, "sample2_value", "exp %ld, got %ld\n",
777L, s->value);
break;
default:
CHECK(true, "extra_sample", "unexpected sample seq %d, val %ld\n",
s->seq, s->value);
return -1;
}
return 0;
}
void test_ringbuf_multi(void)
{
struct test_ringbuf_multi *skel;
struct ring_buffer *ringbuf = NULL;
int err;
int page_size = getpagesize();
int proto_fd = -1;
skel = test_ringbuf_multi__open();
if (CHECK(!skel, "skel_open", "skeleton open failed\n"))
return;
/* validate ringbuf size adjustment logic */
ASSERT_EQ(bpf_map__max_entries(skel->maps.ringbuf1), page_size, "rb1_size_before");
ASSERT_OK(bpf_map__set_max_entries(skel->maps.ringbuf1, page_size + 1), "rb1_resize");
ASSERT_EQ(bpf_map__max_entries(skel->maps.ringbuf1), 2 * page_size, "rb1_size_after");
ASSERT_OK(bpf_map__set_max_entries(skel->maps.ringbuf1, page_size), "rb1_reset");
ASSERT_EQ(bpf_map__max_entries(skel->maps.ringbuf1), page_size, "rb1_size_final");
proto_fd = bpf_map_create(BPF_MAP_TYPE_RINGBUF, NULL, 0, 0, page_size, NULL);
if (CHECK(proto_fd < 0, "bpf_map_create", "bpf_map_create failed\n"))
goto cleanup;
err = bpf_map__set_inner_map_fd(skel->maps.ringbuf_hash, proto_fd);
if (CHECK(err != 0, "bpf_map__set_inner_map_fd", "bpf_map__set_inner_map_fd failed\n"))
goto cleanup;
err = test_ringbuf_multi__load(skel);
if (CHECK(err != 0, "skel_load", "skeleton load failed\n"))
goto cleanup;
close(proto_fd);
proto_fd = -1;
/* make sure we can't resize ringbuf after object load */
if (!ASSERT_ERR(bpf_map__set_max_entries(skel->maps.ringbuf1, 3 * page_size), "rb1_resize_after_load"))
goto cleanup;
/* only trigger BPF program for current process */
skel->bss->pid = getpid();
ringbuf = ring_buffer__new(bpf_map__fd(skel->maps.ringbuf1),
process_sample, (void *)(long)1, NULL);
if (CHECK(!ringbuf, "ringbuf_create", "failed to create ringbuf\n"))
goto cleanup;
err = ring_buffer__add(ringbuf, bpf_map__fd(skel->maps.ringbuf2),
process_sample, (void *)(long)2);
if (CHECK(err, "ringbuf_add", "failed to add another ring\n"))
goto cleanup;
err = test_ringbuf_multi__attach(skel);
if (CHECK(err, "skel_attach", "skeleton attachment failed: %d\n", err))
goto cleanup;
/* trigger few samples, some will be skipped */
skel->bss->target_ring = 0;
skel->bss->value = 333;
syscall(__NR_getpgid);
/* skipped, no ringbuf in slot 1 */
skel->bss->target_ring = 1;
skel->bss->value = 555;
syscall(__NR_getpgid);
skel->bss->target_ring = 2;
skel->bss->value = 777;
syscall(__NR_getpgid);
/* poll for samples, should get 2 ringbufs back */
err = ring_buffer__poll(ringbuf, -1);
if (CHECK(err != 2, "poll_res", "expected 2 records, got %d\n", err))
goto cleanup;
/* expect extra polling to return nothing */
err = ring_buffer__poll(ringbuf, 0);
if (CHECK(err < 0, "extra_samples", "poll result: %d\n", err))
goto cleanup;
CHECK(skel->bss->dropped != 0, "err_dropped", "exp %ld, got %ld\n",
0L, skel->bss->dropped);
CHECK(skel->bss->skipped != 1, "err_skipped", "exp %ld, got %ld\n",
1L, skel->bss->skipped);
CHECK(skel->bss->total != 2, "err_total", "exp %ld, got %ld\n",
2L, skel->bss->total);
cleanup:
if (proto_fd >= 0)
close(proto_fd);
ring_buffer__free(ringbuf);
test_ringbuf_multi__destroy(skel);
}
| linux-master | tools/testing/selftests/bpf/prog_tests/ringbuf_multi.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2023 Isovalent */
#include <uapi/linux/if_link.h>
#include <uapi/linux/pkt_sched.h>
#include <net/if.h>
#include <test_progs.h>
#define loopback 1
#define ping_cmd "ping -q -c1 -w1 127.0.0.1 > /dev/null"
#include "test_tc_link.skel.h"
#include "tc_helpers.h"
void serial_test_tc_links_basic(void)
{
LIBBPF_OPTS(bpf_prog_query_opts, optq);
LIBBPF_OPTS(bpf_tcx_opts, optl);
__u32 prog_ids[2], link_ids[2];
__u32 pid1, pid2, lid1, lid2;
struct test_tc_link *skel;
struct bpf_link *link;
int err;
skel = test_tc_link__open_and_load();
if (!ASSERT_OK_PTR(skel, "skel_load"))
goto cleanup;
pid1 = id_from_prog_fd(bpf_program__fd(skel->progs.tc1));
pid2 = id_from_prog_fd(bpf_program__fd(skel->progs.tc2));
ASSERT_NEQ(pid1, pid2, "prog_ids_1_2");
assert_mprog_count(BPF_TCX_INGRESS, 0);
assert_mprog_count(BPF_TCX_EGRESS, 0);
ASSERT_EQ(skel->bss->seen_tc1, false, "seen_tc1");
ASSERT_EQ(skel->bss->seen_tc2, false, "seen_tc2");
link = bpf_program__attach_tcx(skel->progs.tc1, loopback, &optl);
if (!ASSERT_OK_PTR(link, "link_attach"))
goto cleanup;
skel->links.tc1 = link;
lid1 = id_from_link_fd(bpf_link__fd(skel->links.tc1));
assert_mprog_count(BPF_TCX_INGRESS, 1);
assert_mprog_count(BPF_TCX_EGRESS, 0);
optq.prog_ids = prog_ids;
optq.link_ids = link_ids;
memset(prog_ids, 0, sizeof(prog_ids));
memset(link_ids, 0, sizeof(link_ids));
optq.count = ARRAY_SIZE(prog_ids);
err = bpf_prog_query_opts(loopback, BPF_TCX_INGRESS, &optq);
if (!ASSERT_OK(err, "prog_query"))
goto cleanup;
ASSERT_EQ(optq.count, 1, "count");
ASSERT_EQ(optq.revision, 2, "revision");
ASSERT_EQ(optq.prog_ids[0], pid1, "prog_ids[0]");
ASSERT_EQ(optq.link_ids[0], lid1, "link_ids[0]");
ASSERT_EQ(optq.prog_ids[1], 0, "prog_ids[1]");
ASSERT_EQ(optq.link_ids[1], 0, "link_ids[1]");
ASSERT_OK(system(ping_cmd), ping_cmd);
ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
ASSERT_EQ(skel->bss->seen_tc2, false, "seen_tc2");
link = bpf_program__attach_tcx(skel->progs.tc2, loopback, &optl);
if (!ASSERT_OK_PTR(link, "link_attach"))
goto cleanup;
skel->links.tc2 = link;
lid2 = id_from_link_fd(bpf_link__fd(skel->links.tc2));
ASSERT_NEQ(lid1, lid2, "link_ids_1_2");
assert_mprog_count(BPF_TCX_INGRESS, 1);
assert_mprog_count(BPF_TCX_EGRESS, 1);
memset(prog_ids, 0, sizeof(prog_ids));
memset(link_ids, 0, sizeof(link_ids));
optq.count = ARRAY_SIZE(prog_ids);
err = bpf_prog_query_opts(loopback, BPF_TCX_EGRESS, &optq);
if (!ASSERT_OK(err, "prog_query"))
goto cleanup;
ASSERT_EQ(optq.count, 1, "count");
ASSERT_EQ(optq.revision, 2, "revision");
ASSERT_EQ(optq.prog_ids[0], pid2, "prog_ids[0]");
ASSERT_EQ(optq.link_ids[0], lid2, "link_ids[0]");
ASSERT_EQ(optq.prog_ids[1], 0, "prog_ids[1]");
ASSERT_EQ(optq.link_ids[1], 0, "link_ids[1]");
ASSERT_OK(system(ping_cmd), ping_cmd);
ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
ASSERT_EQ(skel->bss->seen_tc2, true, "seen_tc2");
cleanup:
test_tc_link__destroy(skel);
assert_mprog_count(BPF_TCX_INGRESS, 0);
assert_mprog_count(BPF_TCX_EGRESS, 0);
}
static void test_tc_links_before_target(int target)
{
LIBBPF_OPTS(bpf_prog_query_opts, optq);
LIBBPF_OPTS(bpf_tcx_opts, optl);
__u32 prog_ids[5], link_ids[5];
__u32 pid1, pid2, pid3, pid4;
__u32 lid1, lid2, lid3, lid4;
struct test_tc_link *skel;
struct bpf_link *link;
int err;
skel = test_tc_link__open();
if (!ASSERT_OK_PTR(skel, "skel_open"))
goto cleanup;
ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc1, target),
0, "tc1_attach_type");
ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc2, target),
0, "tc2_attach_type");
ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc3, target),
0, "tc3_attach_type");
ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc4, target),
0, "tc4_attach_type");
err = test_tc_link__load(skel);
if (!ASSERT_OK(err, "skel_load"))
goto cleanup;
pid1 = id_from_prog_fd(bpf_program__fd(skel->progs.tc1));
pid2 = id_from_prog_fd(bpf_program__fd(skel->progs.tc2));
pid3 = id_from_prog_fd(bpf_program__fd(skel->progs.tc3));
pid4 = id_from_prog_fd(bpf_program__fd(skel->progs.tc4));
ASSERT_NEQ(pid1, pid2, "prog_ids_1_2");
ASSERT_NEQ(pid3, pid4, "prog_ids_3_4");
ASSERT_NEQ(pid2, pid3, "prog_ids_2_3");
assert_mprog_count(target, 0);
link = bpf_program__attach_tcx(skel->progs.tc1, loopback, &optl);
if (!ASSERT_OK_PTR(link, "link_attach"))
goto cleanup;
skel->links.tc1 = link;
lid1 = id_from_link_fd(bpf_link__fd(skel->links.tc1));
assert_mprog_count(target, 1);
link = bpf_program__attach_tcx(skel->progs.tc2, loopback, &optl);
if (!ASSERT_OK_PTR(link, "link_attach"))
goto cleanup;
skel->links.tc2 = link;
lid2 = id_from_link_fd(bpf_link__fd(skel->links.tc2));
assert_mprog_count(target, 2);
optq.prog_ids = prog_ids;
optq.link_ids = link_ids;
memset(prog_ids, 0, sizeof(prog_ids));
memset(link_ids, 0, sizeof(link_ids));
optq.count = ARRAY_SIZE(prog_ids);
err = bpf_prog_query_opts(loopback, target, &optq);
if (!ASSERT_OK(err, "prog_query"))
goto cleanup;
ASSERT_EQ(optq.count, 2, "count");
ASSERT_EQ(optq.revision, 3, "revision");
ASSERT_EQ(optq.prog_ids[0], pid1, "prog_ids[0]");
ASSERT_EQ(optq.link_ids[0], lid1, "link_ids[0]");
ASSERT_EQ(optq.prog_ids[1], pid2, "prog_ids[1]");
ASSERT_EQ(optq.link_ids[1], lid2, "link_ids[1]");
ASSERT_EQ(optq.prog_ids[2], 0, "prog_ids[2]");
ASSERT_EQ(optq.link_ids[2], 0, "link_ids[2]");
ASSERT_OK(system(ping_cmd), ping_cmd);
ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
ASSERT_EQ(skel->bss->seen_tc2, true, "seen_tc2");
ASSERT_EQ(skel->bss->seen_tc3, false, "seen_tc3");
ASSERT_EQ(skel->bss->seen_tc4, false, "seen_tc4");
skel->bss->seen_tc1 = false;
skel->bss->seen_tc2 = false;
LIBBPF_OPTS_RESET(optl,
.flags = BPF_F_BEFORE,
.relative_fd = bpf_program__fd(skel->progs.tc2),
);
link = bpf_program__attach_tcx(skel->progs.tc3, loopback, &optl);
if (!ASSERT_OK_PTR(link, "link_attach"))
goto cleanup;
skel->links.tc3 = link;
lid3 = id_from_link_fd(bpf_link__fd(skel->links.tc3));
LIBBPF_OPTS_RESET(optl,
.flags = BPF_F_BEFORE | BPF_F_LINK,
.relative_id = lid1,
);
link = bpf_program__attach_tcx(skel->progs.tc4, loopback, &optl);
if (!ASSERT_OK_PTR(link, "link_attach"))
goto cleanup;
skel->links.tc4 = link;
lid4 = id_from_link_fd(bpf_link__fd(skel->links.tc4));
assert_mprog_count(target, 4);
memset(prog_ids, 0, sizeof(prog_ids));
memset(link_ids, 0, sizeof(link_ids));
optq.count = ARRAY_SIZE(prog_ids);
err = bpf_prog_query_opts(loopback, target, &optq);
if (!ASSERT_OK(err, "prog_query"))
goto cleanup;
ASSERT_EQ(optq.count, 4, "count");
ASSERT_EQ(optq.revision, 5, "revision");
ASSERT_EQ(optq.prog_ids[0], pid4, "prog_ids[0]");
ASSERT_EQ(optq.link_ids[0], lid4, "link_ids[0]");
ASSERT_EQ(optq.prog_ids[1], pid1, "prog_ids[1]");
ASSERT_EQ(optq.link_ids[1], lid1, "link_ids[1]");
ASSERT_EQ(optq.prog_ids[2], pid3, "prog_ids[2]");
ASSERT_EQ(optq.link_ids[2], lid3, "link_ids[2]");
ASSERT_EQ(optq.prog_ids[3], pid2, "prog_ids[3]");
ASSERT_EQ(optq.link_ids[3], lid2, "link_ids[3]");
ASSERT_EQ(optq.prog_ids[4], 0, "prog_ids[4]");
ASSERT_EQ(optq.link_ids[4], 0, "link_ids[4]");
ASSERT_OK(system(ping_cmd), ping_cmd);
ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
ASSERT_EQ(skel->bss->seen_tc2, true, "seen_tc2");
ASSERT_EQ(skel->bss->seen_tc3, true, "seen_tc3");
ASSERT_EQ(skel->bss->seen_tc4, true, "seen_tc4");
cleanup:
test_tc_link__destroy(skel);
assert_mprog_count(target, 0);
}
void serial_test_tc_links_before(void)
{
test_tc_links_before_target(BPF_TCX_INGRESS);
test_tc_links_before_target(BPF_TCX_EGRESS);
}
static void test_tc_links_after_target(int target)
{
LIBBPF_OPTS(bpf_prog_query_opts, optq);
LIBBPF_OPTS(bpf_tcx_opts, optl);
__u32 prog_ids[5], link_ids[5];
__u32 pid1, pid2, pid3, pid4;
__u32 lid1, lid2, lid3, lid4;
struct test_tc_link *skel;
struct bpf_link *link;
int err;
skel = test_tc_link__open();
if (!ASSERT_OK_PTR(skel, "skel_open"))
goto cleanup;
ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc1, target),
0, "tc1_attach_type");
ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc2, target),
0, "tc2_attach_type");
ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc3, target),
0, "tc3_attach_type");
ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc4, target),
0, "tc4_attach_type");
err = test_tc_link__load(skel);
if (!ASSERT_OK(err, "skel_load"))
goto cleanup;
pid1 = id_from_prog_fd(bpf_program__fd(skel->progs.tc1));
pid2 = id_from_prog_fd(bpf_program__fd(skel->progs.tc2));
pid3 = id_from_prog_fd(bpf_program__fd(skel->progs.tc3));
pid4 = id_from_prog_fd(bpf_program__fd(skel->progs.tc4));
ASSERT_NEQ(pid1, pid2, "prog_ids_1_2");
ASSERT_NEQ(pid3, pid4, "prog_ids_3_4");
ASSERT_NEQ(pid2, pid3, "prog_ids_2_3");
assert_mprog_count(target, 0);
link = bpf_program__attach_tcx(skel->progs.tc1, loopback, &optl);
if (!ASSERT_OK_PTR(link, "link_attach"))
goto cleanup;
skel->links.tc1 = link;
lid1 = id_from_link_fd(bpf_link__fd(skel->links.tc1));
assert_mprog_count(target, 1);
link = bpf_program__attach_tcx(skel->progs.tc2, loopback, &optl);
if (!ASSERT_OK_PTR(link, "link_attach"))
goto cleanup;
skel->links.tc2 = link;
lid2 = id_from_link_fd(bpf_link__fd(skel->links.tc2));
assert_mprog_count(target, 2);
optq.prog_ids = prog_ids;
optq.link_ids = link_ids;
memset(prog_ids, 0, sizeof(prog_ids));
memset(link_ids, 0, sizeof(link_ids));
optq.count = ARRAY_SIZE(prog_ids);
err = bpf_prog_query_opts(loopback, target, &optq);
if (!ASSERT_OK(err, "prog_query"))
goto cleanup;
ASSERT_EQ(optq.count, 2, "count");
ASSERT_EQ(optq.revision, 3, "revision");
ASSERT_EQ(optq.prog_ids[0], pid1, "prog_ids[0]");
ASSERT_EQ(optq.link_ids[0], lid1, "link_ids[0]");
ASSERT_EQ(optq.prog_ids[1], pid2, "prog_ids[1]");
ASSERT_EQ(optq.link_ids[1], lid2, "link_ids[1]");
ASSERT_EQ(optq.prog_ids[2], 0, "prog_ids[2]");
ASSERT_EQ(optq.link_ids[2], 0, "link_ids[2]");
ASSERT_OK(system(ping_cmd), ping_cmd);
ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
ASSERT_EQ(skel->bss->seen_tc2, true, "seen_tc2");
ASSERT_EQ(skel->bss->seen_tc3, false, "seen_tc3");
ASSERT_EQ(skel->bss->seen_tc4, false, "seen_tc4");
skel->bss->seen_tc1 = false;
skel->bss->seen_tc2 = false;
LIBBPF_OPTS_RESET(optl,
.flags = BPF_F_AFTER,
.relative_fd = bpf_program__fd(skel->progs.tc1),
);
link = bpf_program__attach_tcx(skel->progs.tc3, loopback, &optl);
if (!ASSERT_OK_PTR(link, "link_attach"))
goto cleanup;
skel->links.tc3 = link;
lid3 = id_from_link_fd(bpf_link__fd(skel->links.tc3));
LIBBPF_OPTS_RESET(optl,
.flags = BPF_F_AFTER | BPF_F_LINK,
.relative_fd = bpf_link__fd(skel->links.tc2),
);
link = bpf_program__attach_tcx(skel->progs.tc4, loopback, &optl);
if (!ASSERT_OK_PTR(link, "link_attach"))
goto cleanup;
skel->links.tc4 = link;
lid4 = id_from_link_fd(bpf_link__fd(skel->links.tc4));
assert_mprog_count(target, 4);
memset(prog_ids, 0, sizeof(prog_ids));
memset(link_ids, 0, sizeof(link_ids));
optq.count = ARRAY_SIZE(prog_ids);
err = bpf_prog_query_opts(loopback, target, &optq);
if (!ASSERT_OK(err, "prog_query"))
goto cleanup;
ASSERT_EQ(optq.count, 4, "count");
ASSERT_EQ(optq.revision, 5, "revision");
ASSERT_EQ(optq.prog_ids[0], pid1, "prog_ids[0]");
ASSERT_EQ(optq.link_ids[0], lid1, "link_ids[0]");
ASSERT_EQ(optq.prog_ids[1], pid3, "prog_ids[1]");
ASSERT_EQ(optq.link_ids[1], lid3, "link_ids[1]");
ASSERT_EQ(optq.prog_ids[2], pid2, "prog_ids[2]");
ASSERT_EQ(optq.link_ids[2], lid2, "link_ids[2]");
ASSERT_EQ(optq.prog_ids[3], pid4, "prog_ids[3]");
ASSERT_EQ(optq.link_ids[3], lid4, "link_ids[3]");
ASSERT_EQ(optq.prog_ids[4], 0, "prog_ids[4]");
ASSERT_EQ(optq.link_ids[4], 0, "link_ids[4]");
ASSERT_OK(system(ping_cmd), ping_cmd);
ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
ASSERT_EQ(skel->bss->seen_tc2, true, "seen_tc2");
ASSERT_EQ(skel->bss->seen_tc3, true, "seen_tc3");
ASSERT_EQ(skel->bss->seen_tc4, true, "seen_tc4");
cleanup:
test_tc_link__destroy(skel);
assert_mprog_count(target, 0);
}
void serial_test_tc_links_after(void)
{
test_tc_links_after_target(BPF_TCX_INGRESS);
test_tc_links_after_target(BPF_TCX_EGRESS);
}
static void test_tc_links_revision_target(int target)
{
LIBBPF_OPTS(bpf_prog_query_opts, optq);
LIBBPF_OPTS(bpf_tcx_opts, optl);
__u32 prog_ids[3], link_ids[3];
__u32 pid1, pid2, lid1, lid2;
struct test_tc_link *skel;
struct bpf_link *link;
int err;
skel = test_tc_link__open();
if (!ASSERT_OK_PTR(skel, "skel_open"))
goto cleanup;
ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc1, target),
0, "tc1_attach_type");
ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc2, target),
0, "tc2_attach_type");
err = test_tc_link__load(skel);
if (!ASSERT_OK(err, "skel_load"))
goto cleanup;
pid1 = id_from_prog_fd(bpf_program__fd(skel->progs.tc1));
pid2 = id_from_prog_fd(bpf_program__fd(skel->progs.tc2));
ASSERT_NEQ(pid1, pid2, "prog_ids_1_2");
assert_mprog_count(target, 0);
optl.expected_revision = 1;
link = bpf_program__attach_tcx(skel->progs.tc1, loopback, &optl);
if (!ASSERT_OK_PTR(link, "link_attach"))
goto cleanup;
skel->links.tc1 = link;
lid1 = id_from_link_fd(bpf_link__fd(skel->links.tc1));
assert_mprog_count(target, 1);
optl.expected_revision = 1;
link = bpf_program__attach_tcx(skel->progs.tc2, loopback, &optl);
if (!ASSERT_ERR_PTR(link, "link_attach_should_fail")) {
bpf_link__destroy(link);
goto cleanup;
}
assert_mprog_count(target, 1);
optl.expected_revision = 2;
link = bpf_program__attach_tcx(skel->progs.tc2, loopback, &optl);
if (!ASSERT_OK_PTR(link, "link_attach"))
goto cleanup;
skel->links.tc2 = link;
lid2 = id_from_link_fd(bpf_link__fd(skel->links.tc2));
assert_mprog_count(target, 2);
optq.prog_ids = prog_ids;
optq.link_ids = link_ids;
memset(prog_ids, 0, sizeof(prog_ids));
memset(link_ids, 0, sizeof(link_ids));
optq.count = ARRAY_SIZE(prog_ids);
err = bpf_prog_query_opts(loopback, target, &optq);
if (!ASSERT_OK(err, "prog_query"))
goto cleanup;
ASSERT_EQ(optq.count, 2, "count");
ASSERT_EQ(optq.revision, 3, "revision");
ASSERT_EQ(optq.prog_ids[0], pid1, "prog_ids[0]");
ASSERT_EQ(optq.link_ids[0], lid1, "link_ids[0]");
ASSERT_EQ(optq.prog_ids[1], pid2, "prog_ids[1]");
ASSERT_EQ(optq.link_ids[1], lid2, "link_ids[1]");
ASSERT_EQ(optq.prog_ids[2], 0, "prog_ids[2]");
ASSERT_EQ(optq.link_ids[2], 0, "prog_ids[2]");
ASSERT_OK(system(ping_cmd), ping_cmd);
ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
ASSERT_EQ(skel->bss->seen_tc2, true, "seen_tc2");
cleanup:
test_tc_link__destroy(skel);
assert_mprog_count(target, 0);
}
void serial_test_tc_links_revision(void)
{
test_tc_links_revision_target(BPF_TCX_INGRESS);
test_tc_links_revision_target(BPF_TCX_EGRESS);
}
static void test_tc_chain_classic(int target, bool chain_tc_old)
{
LIBBPF_OPTS(bpf_tc_opts, tc_opts, .handle = 1, .priority = 1);
LIBBPF_OPTS(bpf_tc_hook, tc_hook, .ifindex = loopback);
bool hook_created = false, tc_attached = false;
LIBBPF_OPTS(bpf_tcx_opts, optl);
__u32 pid1, pid2, pid3;
struct test_tc_link *skel;
struct bpf_link *link;
int err;
skel = test_tc_link__open();
if (!ASSERT_OK_PTR(skel, "skel_open"))
goto cleanup;
ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc1, target),
0, "tc1_attach_type");
ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc2, target),
0, "tc2_attach_type");
err = test_tc_link__load(skel);
if (!ASSERT_OK(err, "skel_load"))
goto cleanup;
pid1 = id_from_prog_fd(bpf_program__fd(skel->progs.tc1));
pid2 = id_from_prog_fd(bpf_program__fd(skel->progs.tc2));
pid3 = id_from_prog_fd(bpf_program__fd(skel->progs.tc3));
ASSERT_NEQ(pid1, pid2, "prog_ids_1_2");
ASSERT_NEQ(pid2, pid3, "prog_ids_2_3");
assert_mprog_count(target, 0);
if (chain_tc_old) {
tc_hook.attach_point = target == BPF_TCX_INGRESS ?
BPF_TC_INGRESS : BPF_TC_EGRESS;
err = bpf_tc_hook_create(&tc_hook);
if (err == 0)
hook_created = true;
err = err == -EEXIST ? 0 : err;
if (!ASSERT_OK(err, "bpf_tc_hook_create"))
goto cleanup;
tc_opts.prog_fd = bpf_program__fd(skel->progs.tc3);
err = bpf_tc_attach(&tc_hook, &tc_opts);
if (!ASSERT_OK(err, "bpf_tc_attach"))
goto cleanup;
tc_attached = true;
}
link = bpf_program__attach_tcx(skel->progs.tc1, loopback, &optl);
if (!ASSERT_OK_PTR(link, "link_attach"))
goto cleanup;
skel->links.tc1 = link;
link = bpf_program__attach_tcx(skel->progs.tc2, loopback, &optl);
if (!ASSERT_OK_PTR(link, "link_attach"))
goto cleanup;
skel->links.tc2 = link;
assert_mprog_count(target, 2);
ASSERT_OK(system(ping_cmd), ping_cmd);
ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
ASSERT_EQ(skel->bss->seen_tc2, true, "seen_tc2");
ASSERT_EQ(skel->bss->seen_tc3, chain_tc_old, "seen_tc3");
skel->bss->seen_tc1 = false;
skel->bss->seen_tc2 = false;
skel->bss->seen_tc3 = false;
err = bpf_link__detach(skel->links.tc2);
if (!ASSERT_OK(err, "prog_detach"))
goto cleanup;
assert_mprog_count(target, 1);
ASSERT_OK(system(ping_cmd), ping_cmd);
ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
ASSERT_EQ(skel->bss->seen_tc2, false, "seen_tc2");
ASSERT_EQ(skel->bss->seen_tc3, chain_tc_old, "seen_tc3");
cleanup:
if (tc_attached) {
tc_opts.flags = tc_opts.prog_fd = tc_opts.prog_id = 0;
err = bpf_tc_detach(&tc_hook, &tc_opts);
ASSERT_OK(err, "bpf_tc_detach");
}
if (hook_created) {
tc_hook.attach_point = BPF_TC_INGRESS | BPF_TC_EGRESS;
bpf_tc_hook_destroy(&tc_hook);
}
assert_mprog_count(target, 1);
test_tc_link__destroy(skel);
assert_mprog_count(target, 0);
}
void serial_test_tc_links_chain_classic(void)
{
test_tc_chain_classic(BPF_TCX_INGRESS, false);
test_tc_chain_classic(BPF_TCX_EGRESS, false);
test_tc_chain_classic(BPF_TCX_INGRESS, true);
test_tc_chain_classic(BPF_TCX_EGRESS, true);
}
static void test_tc_links_replace_target(int target)
{
LIBBPF_OPTS(bpf_prog_query_opts, optq);
LIBBPF_OPTS(bpf_tcx_opts, optl);
__u32 pid1, pid2, pid3, lid1, lid2;
__u32 prog_ids[4], link_ids[4];
struct test_tc_link *skel;
struct bpf_link *link;
int err;
skel = test_tc_link__open();
if (!ASSERT_OK_PTR(skel, "skel_open"))
goto cleanup;
ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc1, target),
0, "tc1_attach_type");
ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc2, target),
0, "tc2_attach_type");
ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc3, target),
0, "tc3_attach_type");
err = test_tc_link__load(skel);
if (!ASSERT_OK(err, "skel_load"))
goto cleanup;
pid1 = id_from_prog_fd(bpf_program__fd(skel->progs.tc1));
pid2 = id_from_prog_fd(bpf_program__fd(skel->progs.tc2));
pid3 = id_from_prog_fd(bpf_program__fd(skel->progs.tc3));
ASSERT_NEQ(pid1, pid2, "prog_ids_1_2");
ASSERT_NEQ(pid2, pid3, "prog_ids_2_3");
assert_mprog_count(target, 0);
optl.expected_revision = 1;
link = bpf_program__attach_tcx(skel->progs.tc1, loopback, &optl);
if (!ASSERT_OK_PTR(link, "link_attach"))
goto cleanup;
skel->links.tc1 = link;
lid1 = id_from_link_fd(bpf_link__fd(skel->links.tc1));
assert_mprog_count(target, 1);
LIBBPF_OPTS_RESET(optl,
.flags = BPF_F_BEFORE,
.relative_id = pid1,
.expected_revision = 2,
);
link = bpf_program__attach_tcx(skel->progs.tc2, loopback, &optl);
if (!ASSERT_OK_PTR(link, "link_attach"))
goto cleanup;
skel->links.tc2 = link;
lid2 = id_from_link_fd(bpf_link__fd(skel->links.tc2));
assert_mprog_count(target, 2);
optq.prog_ids = prog_ids;
optq.link_ids = link_ids;
memset(prog_ids, 0, sizeof(prog_ids));
memset(link_ids, 0, sizeof(link_ids));
optq.count = ARRAY_SIZE(prog_ids);
err = bpf_prog_query_opts(loopback, target, &optq);
if (!ASSERT_OK(err, "prog_query"))
goto cleanup;
ASSERT_EQ(optq.count, 2, "count");
ASSERT_EQ(optq.revision, 3, "revision");
ASSERT_EQ(optq.prog_ids[0], pid2, "prog_ids[0]");
ASSERT_EQ(optq.link_ids[0], lid2, "link_ids[0]");
ASSERT_EQ(optq.prog_ids[1], pid1, "prog_ids[1]");
ASSERT_EQ(optq.link_ids[1], lid1, "link_ids[1]");
ASSERT_EQ(optq.prog_ids[2], 0, "prog_ids[2]");
ASSERT_EQ(optq.link_ids[2], 0, "link_ids[2]");
ASSERT_OK(system(ping_cmd), ping_cmd);
ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
ASSERT_EQ(skel->bss->seen_tc2, true, "seen_tc2");
ASSERT_EQ(skel->bss->seen_tc3, false, "seen_tc3");
skel->bss->seen_tc1 = false;
skel->bss->seen_tc2 = false;
skel->bss->seen_tc3 = false;
LIBBPF_OPTS_RESET(optl,
.flags = BPF_F_REPLACE,
.relative_fd = bpf_program__fd(skel->progs.tc2),
.expected_revision = 3,
);
link = bpf_program__attach_tcx(skel->progs.tc3, loopback, &optl);
if (!ASSERT_ERR_PTR(link, "link_attach_should_fail")) {
bpf_link__destroy(link);
goto cleanup;
}
assert_mprog_count(target, 2);
LIBBPF_OPTS_RESET(optl,
.flags = BPF_F_REPLACE | BPF_F_LINK,
.relative_fd = bpf_link__fd(skel->links.tc2),
.expected_revision = 3,
);
link = bpf_program__attach_tcx(skel->progs.tc3, loopback, &optl);
if (!ASSERT_ERR_PTR(link, "link_attach_should_fail")) {
bpf_link__destroy(link);
goto cleanup;
}
assert_mprog_count(target, 2);
LIBBPF_OPTS_RESET(optl,
.flags = BPF_F_REPLACE | BPF_F_LINK | BPF_F_AFTER,
.relative_id = lid2,
);
link = bpf_program__attach_tcx(skel->progs.tc3, loopback, &optl);
if (!ASSERT_ERR_PTR(link, "link_attach_should_fail")) {
bpf_link__destroy(link);
goto cleanup;
}
assert_mprog_count(target, 2);
err = bpf_link__update_program(skel->links.tc2, skel->progs.tc3);
if (!ASSERT_OK(err, "link_update"))
goto cleanup;
assert_mprog_count(target, 2);
memset(prog_ids, 0, sizeof(prog_ids));
memset(link_ids, 0, sizeof(link_ids));
optq.count = ARRAY_SIZE(prog_ids);
err = bpf_prog_query_opts(loopback, target, &optq);
if (!ASSERT_OK(err, "prog_query"))
goto cleanup;
ASSERT_EQ(optq.count, 2, "count");
ASSERT_EQ(optq.revision, 4, "revision");
ASSERT_EQ(optq.prog_ids[0], pid3, "prog_ids[0]");
ASSERT_EQ(optq.link_ids[0], lid2, "link_ids[0]");
ASSERT_EQ(optq.prog_ids[1], pid1, "prog_ids[1]");
ASSERT_EQ(optq.link_ids[1], lid1, "link_ids[1]");
ASSERT_EQ(optq.prog_ids[2], 0, "prog_ids[2]");
ASSERT_EQ(optq.link_ids[2], 0, "link_ids[2]");
ASSERT_OK(system(ping_cmd), ping_cmd);
ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
ASSERT_EQ(skel->bss->seen_tc2, false, "seen_tc2");
ASSERT_EQ(skel->bss->seen_tc3, true, "seen_tc3");
skel->bss->seen_tc1 = false;
skel->bss->seen_tc2 = false;
skel->bss->seen_tc3 = false;
err = bpf_link__detach(skel->links.tc2);
if (!ASSERT_OK(err, "link_detach"))
goto cleanup;
assert_mprog_count(target, 1);
memset(prog_ids, 0, sizeof(prog_ids));
memset(link_ids, 0, sizeof(link_ids));
optq.count = ARRAY_SIZE(prog_ids);
err = bpf_prog_query_opts(loopback, target, &optq);
if (!ASSERT_OK(err, "prog_query"))
goto cleanup;
ASSERT_EQ(optq.count, 1, "count");
ASSERT_EQ(optq.revision, 5, "revision");
ASSERT_EQ(optq.prog_ids[0], pid1, "prog_ids[0]");
ASSERT_EQ(optq.link_ids[0], lid1, "link_ids[0]");
ASSERT_EQ(optq.prog_ids[1], 0, "prog_ids[1]");
ASSERT_EQ(optq.link_ids[1], 0, "link_ids[1]");
ASSERT_OK(system(ping_cmd), ping_cmd);
ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
ASSERT_EQ(skel->bss->seen_tc2, false, "seen_tc2");
ASSERT_EQ(skel->bss->seen_tc3, false, "seen_tc3");
skel->bss->seen_tc1 = false;
skel->bss->seen_tc2 = false;
skel->bss->seen_tc3 = false;
err = bpf_link__update_program(skel->links.tc1, skel->progs.tc1);
if (!ASSERT_OK(err, "link_update_self"))
goto cleanup;
assert_mprog_count(target, 1);
memset(prog_ids, 0, sizeof(prog_ids));
memset(link_ids, 0, sizeof(link_ids));
optq.count = ARRAY_SIZE(prog_ids);
err = bpf_prog_query_opts(loopback, target, &optq);
if (!ASSERT_OK(err, "prog_query"))
goto cleanup;
ASSERT_EQ(optq.count, 1, "count");
ASSERT_EQ(optq.revision, 5, "revision");
ASSERT_EQ(optq.prog_ids[0], pid1, "prog_ids[0]");
ASSERT_EQ(optq.link_ids[0], lid1, "link_ids[0]");
ASSERT_EQ(optq.prog_ids[1], 0, "prog_ids[1]");
ASSERT_EQ(optq.link_ids[1], 0, "link_ids[1]");
ASSERT_OK(system(ping_cmd), ping_cmd);
ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
ASSERT_EQ(skel->bss->seen_tc2, false, "seen_tc2");
ASSERT_EQ(skel->bss->seen_tc3, false, "seen_tc3");
cleanup:
test_tc_link__destroy(skel);
assert_mprog_count(target, 0);
}
void serial_test_tc_links_replace(void)
{
test_tc_links_replace_target(BPF_TCX_INGRESS);
test_tc_links_replace_target(BPF_TCX_EGRESS);
}
static void test_tc_links_invalid_target(int target)
{
LIBBPF_OPTS(bpf_prog_query_opts, optq);
LIBBPF_OPTS(bpf_tcx_opts, optl);
__u32 pid1, pid2, lid1;
struct test_tc_link *skel;
struct bpf_link *link;
int err;
skel = test_tc_link__open();
if (!ASSERT_OK_PTR(skel, "skel_open"))
goto cleanup;
ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc1, target),
0, "tc1_attach_type");
ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc2, target),
0, "tc2_attach_type");
err = test_tc_link__load(skel);
if (!ASSERT_OK(err, "skel_load"))
goto cleanup;
pid1 = id_from_prog_fd(bpf_program__fd(skel->progs.tc1));
pid2 = id_from_prog_fd(bpf_program__fd(skel->progs.tc2));
ASSERT_NEQ(pid1, pid2, "prog_ids_1_2");
assert_mprog_count(target, 0);
optl.flags = BPF_F_BEFORE | BPF_F_AFTER;
link = bpf_program__attach_tcx(skel->progs.tc1, loopback, &optl);
if (!ASSERT_ERR_PTR(link, "link_attach_should_fail")) {
bpf_link__destroy(link);
goto cleanup;
}
assert_mprog_count(target, 0);
LIBBPF_OPTS_RESET(optl,
.flags = BPF_F_BEFORE | BPF_F_ID,
);
link = bpf_program__attach_tcx(skel->progs.tc1, loopback, &optl);
if (!ASSERT_ERR_PTR(link, "link_attach_should_fail")) {
bpf_link__destroy(link);
goto cleanup;
}
assert_mprog_count(target, 0);
LIBBPF_OPTS_RESET(optl,
.flags = BPF_F_AFTER | BPF_F_ID,
);
link = bpf_program__attach_tcx(skel->progs.tc1, loopback, &optl);
if (!ASSERT_ERR_PTR(link, "link_attach_should_fail")) {
bpf_link__destroy(link);
goto cleanup;
}
assert_mprog_count(target, 0);
LIBBPF_OPTS_RESET(optl,
.flags = BPF_F_ID,
);
link = bpf_program__attach_tcx(skel->progs.tc1, loopback, &optl);
if (!ASSERT_ERR_PTR(link, "link_attach_should_fail")) {
bpf_link__destroy(link);
goto cleanup;
}
assert_mprog_count(target, 0);
LIBBPF_OPTS_RESET(optl,
.flags = BPF_F_LINK,
.relative_fd = bpf_program__fd(skel->progs.tc2),
);
link = bpf_program__attach_tcx(skel->progs.tc1, loopback, &optl);
if (!ASSERT_ERR_PTR(link, "link_attach_should_fail")) {
bpf_link__destroy(link);
goto cleanup;
}
assert_mprog_count(target, 0);
LIBBPF_OPTS_RESET(optl,
.flags = BPF_F_LINK,
);
link = bpf_program__attach_tcx(skel->progs.tc1, loopback, &optl);
if (!ASSERT_ERR_PTR(link, "link_attach_should_fail")) {
bpf_link__destroy(link);
goto cleanup;
}
assert_mprog_count(target, 0);
LIBBPF_OPTS_RESET(optl,
.relative_fd = bpf_program__fd(skel->progs.tc2),
);
link = bpf_program__attach_tcx(skel->progs.tc1, loopback, &optl);
if (!ASSERT_ERR_PTR(link, "link_attach_should_fail")) {
bpf_link__destroy(link);
goto cleanup;
}
assert_mprog_count(target, 0);
LIBBPF_OPTS_RESET(optl,
.flags = BPF_F_BEFORE | BPF_F_AFTER,
.relative_fd = bpf_program__fd(skel->progs.tc2),
);
link = bpf_program__attach_tcx(skel->progs.tc1, loopback, &optl);
if (!ASSERT_ERR_PTR(link, "link_attach_should_fail")) {
bpf_link__destroy(link);
goto cleanup;
}
assert_mprog_count(target, 0);
LIBBPF_OPTS_RESET(optl,
.flags = BPF_F_BEFORE,
.relative_fd = bpf_program__fd(skel->progs.tc1),
);
link = bpf_program__attach_tcx(skel->progs.tc1, loopback, &optl);
if (!ASSERT_ERR_PTR(link, "link_attach_should_fail")) {
bpf_link__destroy(link);
goto cleanup;
}
assert_mprog_count(target, 0);
LIBBPF_OPTS_RESET(optl,
.flags = BPF_F_ID,
.relative_id = pid2,
);
link = bpf_program__attach_tcx(skel->progs.tc1, loopback, &optl);
if (!ASSERT_ERR_PTR(link, "link_attach_should_fail")) {
bpf_link__destroy(link);
goto cleanup;
}
assert_mprog_count(target, 0);
LIBBPF_OPTS_RESET(optl,
.flags = BPF_F_ID,
.relative_id = 42,
);
link = bpf_program__attach_tcx(skel->progs.tc1, loopback, &optl);
if (!ASSERT_ERR_PTR(link, "link_attach_should_fail")) {
bpf_link__destroy(link);
goto cleanup;
}
assert_mprog_count(target, 0);
LIBBPF_OPTS_RESET(optl,
.flags = BPF_F_BEFORE,
.relative_fd = bpf_program__fd(skel->progs.tc1),
);
link = bpf_program__attach_tcx(skel->progs.tc1, loopback, &optl);
if (!ASSERT_ERR_PTR(link, "link_attach_should_fail")) {
bpf_link__destroy(link);
goto cleanup;
}
assert_mprog_count(target, 0);
LIBBPF_OPTS_RESET(optl,
.flags = BPF_F_BEFORE | BPF_F_LINK,
.relative_fd = bpf_program__fd(skel->progs.tc1),
);
link = bpf_program__attach_tcx(skel->progs.tc1, loopback, &optl);
if (!ASSERT_ERR_PTR(link, "link_attach_should_fail")) {
bpf_link__destroy(link);
goto cleanup;
}
assert_mprog_count(target, 0);
LIBBPF_OPTS_RESET(optl,
.flags = BPF_F_AFTER,
.relative_fd = bpf_program__fd(skel->progs.tc1),
);
link = bpf_program__attach_tcx(skel->progs.tc1, loopback, &optl);
if (!ASSERT_ERR_PTR(link, "link_attach_should_fail")) {
bpf_link__destroy(link);
goto cleanup;
}
assert_mprog_count(target, 0);
LIBBPF_OPTS_RESET(optl);
link = bpf_program__attach_tcx(skel->progs.tc1, 0, &optl);
if (!ASSERT_ERR_PTR(link, "link_attach_should_fail")) {
bpf_link__destroy(link);
goto cleanup;
}
assert_mprog_count(target, 0);
LIBBPF_OPTS_RESET(optl,
.flags = BPF_F_AFTER | BPF_F_LINK,
.relative_fd = bpf_program__fd(skel->progs.tc1),
);
link = bpf_program__attach_tcx(skel->progs.tc1, loopback, &optl);
if (!ASSERT_ERR_PTR(link, "link_attach_should_fail")) {
bpf_link__destroy(link);
goto cleanup;
}
assert_mprog_count(target, 0);
LIBBPF_OPTS_RESET(optl);
link = bpf_program__attach_tcx(skel->progs.tc1, loopback, &optl);
if (!ASSERT_OK_PTR(link, "link_attach"))
goto cleanup;
skel->links.tc1 = link;
lid1 = id_from_link_fd(bpf_link__fd(skel->links.tc1));
assert_mprog_count(target, 1);
LIBBPF_OPTS_RESET(optl,
.flags = BPF_F_AFTER | BPF_F_LINK,
.relative_fd = bpf_program__fd(skel->progs.tc1),
);
link = bpf_program__attach_tcx(skel->progs.tc2, loopback, &optl);
if (!ASSERT_ERR_PTR(link, "link_attach_should_fail")) {
bpf_link__destroy(link);
goto cleanup;
}
assert_mprog_count(target, 1);
LIBBPF_OPTS_RESET(optl,
.flags = BPF_F_BEFORE | BPF_F_LINK | BPF_F_ID,
.relative_id = ~0,
);
link = bpf_program__attach_tcx(skel->progs.tc2, loopback, &optl);
if (!ASSERT_ERR_PTR(link, "link_attach_should_fail")) {
bpf_link__destroy(link);
goto cleanup;
}
assert_mprog_count(target, 1);
LIBBPF_OPTS_RESET(optl,
.flags = BPF_F_BEFORE | BPF_F_LINK | BPF_F_ID,
.relative_id = lid1,
);
link = bpf_program__attach_tcx(skel->progs.tc1, loopback, &optl);
if (!ASSERT_ERR_PTR(link, "link_attach_should_fail")) {
bpf_link__destroy(link);
goto cleanup;
}
assert_mprog_count(target, 1);
LIBBPF_OPTS_RESET(optl,
.flags = BPF_F_BEFORE | BPF_F_ID,
.relative_id = pid1,
);
link = bpf_program__attach_tcx(skel->progs.tc1, loopback, &optl);
if (!ASSERT_ERR_PTR(link, "link_attach_should_fail")) {
bpf_link__destroy(link);
goto cleanup;
}
assert_mprog_count(target, 1);
LIBBPF_OPTS_RESET(optl,
.flags = BPF_F_BEFORE | BPF_F_LINK | BPF_F_ID,
.relative_id = lid1,
);
link = bpf_program__attach_tcx(skel->progs.tc2, loopback, &optl);
if (!ASSERT_OK_PTR(link, "link_attach"))
goto cleanup;
skel->links.tc2 = link;
assert_mprog_count(target, 2);
cleanup:
test_tc_link__destroy(skel);
assert_mprog_count(target, 0);
}
void serial_test_tc_links_invalid(void)
{
test_tc_links_invalid_target(BPF_TCX_INGRESS);
test_tc_links_invalid_target(BPF_TCX_EGRESS);
}
static void test_tc_links_prepend_target(int target)
{
LIBBPF_OPTS(bpf_prog_query_opts, optq);
LIBBPF_OPTS(bpf_tcx_opts, optl);
__u32 prog_ids[5], link_ids[5];
__u32 pid1, pid2, pid3, pid4;
__u32 lid1, lid2, lid3, lid4;
struct test_tc_link *skel;
struct bpf_link *link;
int err;
skel = test_tc_link__open();
if (!ASSERT_OK_PTR(skel, "skel_open"))
goto cleanup;
ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc1, target),
0, "tc1_attach_type");
ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc2, target),
0, "tc2_attach_type");
ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc3, target),
0, "tc3_attach_type");
ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc4, target),
0, "tc4_attach_type");
err = test_tc_link__load(skel);
if (!ASSERT_OK(err, "skel_load"))
goto cleanup;
pid1 = id_from_prog_fd(bpf_program__fd(skel->progs.tc1));
pid2 = id_from_prog_fd(bpf_program__fd(skel->progs.tc2));
pid3 = id_from_prog_fd(bpf_program__fd(skel->progs.tc3));
pid4 = id_from_prog_fd(bpf_program__fd(skel->progs.tc4));
ASSERT_NEQ(pid1, pid2, "prog_ids_1_2");
ASSERT_NEQ(pid3, pid4, "prog_ids_3_4");
ASSERT_NEQ(pid2, pid3, "prog_ids_2_3");
assert_mprog_count(target, 0);
link = bpf_program__attach_tcx(skel->progs.tc1, loopback, &optl);
if (!ASSERT_OK_PTR(link, "link_attach"))
goto cleanup;
skel->links.tc1 = link;
lid1 = id_from_link_fd(bpf_link__fd(skel->links.tc1));
assert_mprog_count(target, 1);
LIBBPF_OPTS_RESET(optl,
.flags = BPF_F_BEFORE,
);
link = bpf_program__attach_tcx(skel->progs.tc2, loopback, &optl);
if (!ASSERT_OK_PTR(link, "link_attach"))
goto cleanup;
skel->links.tc2 = link;
lid2 = id_from_link_fd(bpf_link__fd(skel->links.tc2));
assert_mprog_count(target, 2);
optq.prog_ids = prog_ids;
optq.link_ids = link_ids;
memset(prog_ids, 0, sizeof(prog_ids));
memset(link_ids, 0, sizeof(link_ids));
optq.count = ARRAY_SIZE(prog_ids);
err = bpf_prog_query_opts(loopback, target, &optq);
if (!ASSERT_OK(err, "prog_query"))
goto cleanup;
ASSERT_EQ(optq.count, 2, "count");
ASSERT_EQ(optq.revision, 3, "revision");
ASSERT_EQ(optq.prog_ids[0], pid2, "prog_ids[0]");
ASSERT_EQ(optq.link_ids[0], lid2, "link_ids[0]");
ASSERT_EQ(optq.prog_ids[1], pid1, "prog_ids[1]");
ASSERT_EQ(optq.link_ids[1], lid1, "link_ids[1]");
ASSERT_EQ(optq.prog_ids[2], 0, "prog_ids[2]");
ASSERT_EQ(optq.link_ids[2], 0, "link_ids[2]");
ASSERT_OK(system(ping_cmd), ping_cmd);
ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
ASSERT_EQ(skel->bss->seen_tc2, true, "seen_tc2");
ASSERT_EQ(skel->bss->seen_tc3, false, "seen_tc3");
ASSERT_EQ(skel->bss->seen_tc4, false, "seen_tc4");
skel->bss->seen_tc1 = false;
skel->bss->seen_tc2 = false;
LIBBPF_OPTS_RESET(optl,
.flags = BPF_F_BEFORE,
);
link = bpf_program__attach_tcx(skel->progs.tc3, loopback, &optl);
if (!ASSERT_OK_PTR(link, "link_attach"))
goto cleanup;
skel->links.tc3 = link;
lid3 = id_from_link_fd(bpf_link__fd(skel->links.tc3));
LIBBPF_OPTS_RESET(optl,
.flags = BPF_F_BEFORE,
);
link = bpf_program__attach_tcx(skel->progs.tc4, loopback, &optl);
if (!ASSERT_OK_PTR(link, "link_attach"))
goto cleanup;
skel->links.tc4 = link;
lid4 = id_from_link_fd(bpf_link__fd(skel->links.tc4));
assert_mprog_count(target, 4);
memset(prog_ids, 0, sizeof(prog_ids));
memset(link_ids, 0, sizeof(link_ids));
optq.count = ARRAY_SIZE(prog_ids);
err = bpf_prog_query_opts(loopback, target, &optq);
if (!ASSERT_OK(err, "prog_query"))
goto cleanup;
ASSERT_EQ(optq.count, 4, "count");
ASSERT_EQ(optq.revision, 5, "revision");
ASSERT_EQ(optq.prog_ids[0], pid4, "prog_ids[0]");
ASSERT_EQ(optq.link_ids[0], lid4, "link_ids[0]");
ASSERT_EQ(optq.prog_ids[1], pid3, "prog_ids[1]");
ASSERT_EQ(optq.link_ids[1], lid3, "link_ids[1]");
ASSERT_EQ(optq.prog_ids[2], pid2, "prog_ids[2]");
ASSERT_EQ(optq.link_ids[2], lid2, "link_ids[2]");
ASSERT_EQ(optq.prog_ids[3], pid1, "prog_ids[3]");
ASSERT_EQ(optq.link_ids[3], lid1, "link_ids[3]");
ASSERT_EQ(optq.prog_ids[4], 0, "prog_ids[4]");
ASSERT_EQ(optq.link_ids[4], 0, "link_ids[4]");
ASSERT_OK(system(ping_cmd), ping_cmd);
ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
ASSERT_EQ(skel->bss->seen_tc2, true, "seen_tc2");
ASSERT_EQ(skel->bss->seen_tc3, true, "seen_tc3");
ASSERT_EQ(skel->bss->seen_tc4, true, "seen_tc4");
cleanup:
test_tc_link__destroy(skel);
assert_mprog_count(target, 0);
}
void serial_test_tc_links_prepend(void)
{
test_tc_links_prepend_target(BPF_TCX_INGRESS);
test_tc_links_prepend_target(BPF_TCX_EGRESS);
}
static void test_tc_links_append_target(int target)
{
LIBBPF_OPTS(bpf_prog_query_opts, optq);
LIBBPF_OPTS(bpf_tcx_opts, optl);
__u32 prog_ids[5], link_ids[5];
__u32 pid1, pid2, pid3, pid4;
__u32 lid1, lid2, lid3, lid4;
struct test_tc_link *skel;
struct bpf_link *link;
int err;
skel = test_tc_link__open();
if (!ASSERT_OK_PTR(skel, "skel_open"))
goto cleanup;
ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc1, target),
0, "tc1_attach_type");
ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc2, target),
0, "tc2_attach_type");
ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc3, target),
0, "tc3_attach_type");
ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc4, target),
0, "tc4_attach_type");
err = test_tc_link__load(skel);
if (!ASSERT_OK(err, "skel_load"))
goto cleanup;
pid1 = id_from_prog_fd(bpf_program__fd(skel->progs.tc1));
pid2 = id_from_prog_fd(bpf_program__fd(skel->progs.tc2));
pid3 = id_from_prog_fd(bpf_program__fd(skel->progs.tc3));
pid4 = id_from_prog_fd(bpf_program__fd(skel->progs.tc4));
ASSERT_NEQ(pid1, pid2, "prog_ids_1_2");
ASSERT_NEQ(pid3, pid4, "prog_ids_3_4");
ASSERT_NEQ(pid2, pid3, "prog_ids_2_3");
assert_mprog_count(target, 0);
link = bpf_program__attach_tcx(skel->progs.tc1, loopback, &optl);
if (!ASSERT_OK_PTR(link, "link_attach"))
goto cleanup;
skel->links.tc1 = link;
lid1 = id_from_link_fd(bpf_link__fd(skel->links.tc1));
assert_mprog_count(target, 1);
LIBBPF_OPTS_RESET(optl,
.flags = BPF_F_AFTER,
);
link = bpf_program__attach_tcx(skel->progs.tc2, loopback, &optl);
if (!ASSERT_OK_PTR(link, "link_attach"))
goto cleanup;
skel->links.tc2 = link;
lid2 = id_from_link_fd(bpf_link__fd(skel->links.tc2));
assert_mprog_count(target, 2);
optq.prog_ids = prog_ids;
optq.link_ids = link_ids;
memset(prog_ids, 0, sizeof(prog_ids));
memset(link_ids, 0, sizeof(link_ids));
optq.count = ARRAY_SIZE(prog_ids);
err = bpf_prog_query_opts(loopback, target, &optq);
if (!ASSERT_OK(err, "prog_query"))
goto cleanup;
ASSERT_EQ(optq.count, 2, "count");
ASSERT_EQ(optq.revision, 3, "revision");
ASSERT_EQ(optq.prog_ids[0], pid1, "prog_ids[0]");
ASSERT_EQ(optq.link_ids[0], lid1, "link_ids[0]");
ASSERT_EQ(optq.prog_ids[1], pid2, "prog_ids[1]");
ASSERT_EQ(optq.link_ids[1], lid2, "link_ids[1]");
ASSERT_EQ(optq.prog_ids[2], 0, "prog_ids[2]");
ASSERT_EQ(optq.link_ids[2], 0, "link_ids[2]");
ASSERT_OK(system(ping_cmd), ping_cmd);
ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
ASSERT_EQ(skel->bss->seen_tc2, true, "seen_tc2");
ASSERT_EQ(skel->bss->seen_tc3, false, "seen_tc3");
ASSERT_EQ(skel->bss->seen_tc4, false, "seen_tc4");
skel->bss->seen_tc1 = false;
skel->bss->seen_tc2 = false;
LIBBPF_OPTS_RESET(optl,
.flags = BPF_F_AFTER,
);
link = bpf_program__attach_tcx(skel->progs.tc3, loopback, &optl);
if (!ASSERT_OK_PTR(link, "link_attach"))
goto cleanup;
skel->links.tc3 = link;
lid3 = id_from_link_fd(bpf_link__fd(skel->links.tc3));
LIBBPF_OPTS_RESET(optl,
.flags = BPF_F_AFTER,
);
link = bpf_program__attach_tcx(skel->progs.tc4, loopback, &optl);
if (!ASSERT_OK_PTR(link, "link_attach"))
goto cleanup;
skel->links.tc4 = link;
lid4 = id_from_link_fd(bpf_link__fd(skel->links.tc4));
assert_mprog_count(target, 4);
memset(prog_ids, 0, sizeof(prog_ids));
memset(link_ids, 0, sizeof(link_ids));
optq.count = ARRAY_SIZE(prog_ids);
err = bpf_prog_query_opts(loopback, target, &optq);
if (!ASSERT_OK(err, "prog_query"))
goto cleanup;
ASSERT_EQ(optq.count, 4, "count");
ASSERT_EQ(optq.revision, 5, "revision");
ASSERT_EQ(optq.prog_ids[0], pid1, "prog_ids[0]");
ASSERT_EQ(optq.link_ids[0], lid1, "link_ids[0]");
ASSERT_EQ(optq.prog_ids[1], pid2, "prog_ids[1]");
ASSERT_EQ(optq.link_ids[1], lid2, "link_ids[1]");
ASSERT_EQ(optq.prog_ids[2], pid3, "prog_ids[2]");
ASSERT_EQ(optq.link_ids[2], lid3, "link_ids[2]");
ASSERT_EQ(optq.prog_ids[3], pid4, "prog_ids[3]");
ASSERT_EQ(optq.link_ids[3], lid4, "link_ids[3]");
ASSERT_EQ(optq.prog_ids[4], 0, "prog_ids[4]");
ASSERT_EQ(optq.link_ids[4], 0, "link_ids[4]");
ASSERT_OK(system(ping_cmd), ping_cmd);
ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
ASSERT_EQ(skel->bss->seen_tc2, true, "seen_tc2");
ASSERT_EQ(skel->bss->seen_tc3, true, "seen_tc3");
ASSERT_EQ(skel->bss->seen_tc4, true, "seen_tc4");
cleanup:
test_tc_link__destroy(skel);
assert_mprog_count(target, 0);
}
void serial_test_tc_links_append(void)
{
test_tc_links_append_target(BPF_TCX_INGRESS);
test_tc_links_append_target(BPF_TCX_EGRESS);
}
static void test_tc_links_dev_cleanup_target(int target)
{
LIBBPF_OPTS(bpf_tcx_opts, optl);
LIBBPF_OPTS(bpf_prog_query_opts, optq);
__u32 pid1, pid2, pid3, pid4;
struct test_tc_link *skel;
struct bpf_link *link;
int err, ifindex;
ASSERT_OK(system("ip link add dev tcx_opts1 type veth peer name tcx_opts2"), "add veth");
ifindex = if_nametoindex("tcx_opts1");
ASSERT_NEQ(ifindex, 0, "non_zero_ifindex");
skel = test_tc_link__open();
if (!ASSERT_OK_PTR(skel, "skel_open"))
goto cleanup;
ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc1, target),
0, "tc1_attach_type");
ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc2, target),
0, "tc2_attach_type");
ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc3, target),
0, "tc3_attach_type");
ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc4, target),
0, "tc4_attach_type");
err = test_tc_link__load(skel);
if (!ASSERT_OK(err, "skel_load"))
goto cleanup;
pid1 = id_from_prog_fd(bpf_program__fd(skel->progs.tc1));
pid2 = id_from_prog_fd(bpf_program__fd(skel->progs.tc2));
pid3 = id_from_prog_fd(bpf_program__fd(skel->progs.tc3));
pid4 = id_from_prog_fd(bpf_program__fd(skel->progs.tc4));
ASSERT_NEQ(pid1, pid2, "prog_ids_1_2");
ASSERT_NEQ(pid3, pid4, "prog_ids_3_4");
ASSERT_NEQ(pid2, pid3, "prog_ids_2_3");
assert_mprog_count(target, 0);
link = bpf_program__attach_tcx(skel->progs.tc1, ifindex, &optl);
if (!ASSERT_OK_PTR(link, "link_attach"))
goto cleanup;
skel->links.tc1 = link;
assert_mprog_count_ifindex(ifindex, target, 1);
link = bpf_program__attach_tcx(skel->progs.tc2, ifindex, &optl);
if (!ASSERT_OK_PTR(link, "link_attach"))
goto cleanup;
skel->links.tc2 = link;
assert_mprog_count_ifindex(ifindex, target, 2);
link = bpf_program__attach_tcx(skel->progs.tc3, ifindex, &optl);
if (!ASSERT_OK_PTR(link, "link_attach"))
goto cleanup;
skel->links.tc3 = link;
assert_mprog_count_ifindex(ifindex, target, 3);
link = bpf_program__attach_tcx(skel->progs.tc4, ifindex, &optl);
if (!ASSERT_OK_PTR(link, "link_attach"))
goto cleanup;
skel->links.tc4 = link;
assert_mprog_count_ifindex(ifindex, target, 4);
ASSERT_OK(system("ip link del dev tcx_opts1"), "del veth");
ASSERT_EQ(if_nametoindex("tcx_opts1"), 0, "dev1_removed");
ASSERT_EQ(if_nametoindex("tcx_opts2"), 0, "dev2_removed");
ASSERT_EQ(ifindex_from_link_fd(bpf_link__fd(skel->links.tc1)), 0, "tc1_ifindex");
ASSERT_EQ(ifindex_from_link_fd(bpf_link__fd(skel->links.tc2)), 0, "tc2_ifindex");
ASSERT_EQ(ifindex_from_link_fd(bpf_link__fd(skel->links.tc3)), 0, "tc3_ifindex");
ASSERT_EQ(ifindex_from_link_fd(bpf_link__fd(skel->links.tc4)), 0, "tc4_ifindex");
test_tc_link__destroy(skel);
return;
cleanup:
test_tc_link__destroy(skel);
ASSERT_OK(system("ip link del dev tcx_opts1"), "del veth");
ASSERT_EQ(if_nametoindex("tcx_opts1"), 0, "dev1_removed");
ASSERT_EQ(if_nametoindex("tcx_opts2"), 0, "dev2_removed");
}
void serial_test_tc_links_dev_cleanup(void)
{
test_tc_links_dev_cleanup_target(BPF_TCX_INGRESS);
test_tc_links_dev_cleanup_target(BPF_TCX_EGRESS);
}
static void test_tc_chain_mixed(int target)
{
LIBBPF_OPTS(bpf_tc_opts, tc_opts, .handle = 1, .priority = 1);
LIBBPF_OPTS(bpf_tc_hook, tc_hook, .ifindex = loopback);
LIBBPF_OPTS(bpf_tcx_opts, optl);
struct test_tc_link *skel;
struct bpf_link *link;
__u32 pid1, pid2, pid3;
int err;
skel = test_tc_link__open();
if (!ASSERT_OK_PTR(skel, "skel_open"))
goto cleanup;
ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc4, target),
0, "tc4_attach_type");
ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc5, target),
0, "tc5_attach_type");
ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc6, target),
0, "tc6_attach_type");
err = test_tc_link__load(skel);
if (!ASSERT_OK(err, "skel_load"))
goto cleanup;
pid1 = id_from_prog_fd(bpf_program__fd(skel->progs.tc4));
pid2 = id_from_prog_fd(bpf_program__fd(skel->progs.tc5));
pid3 = id_from_prog_fd(bpf_program__fd(skel->progs.tc6));
ASSERT_NEQ(pid1, pid2, "prog_ids_1_2");
ASSERT_NEQ(pid2, pid3, "prog_ids_2_3");
assert_mprog_count(target, 0);
tc_hook.attach_point = target == BPF_TCX_INGRESS ?
BPF_TC_INGRESS : BPF_TC_EGRESS;
err = bpf_tc_hook_create(&tc_hook);
err = err == -EEXIST ? 0 : err;
if (!ASSERT_OK(err, "bpf_tc_hook_create"))
goto cleanup;
tc_opts.prog_fd = bpf_program__fd(skel->progs.tc5);
err = bpf_tc_attach(&tc_hook, &tc_opts);
if (!ASSERT_OK(err, "bpf_tc_attach"))
goto cleanup;
link = bpf_program__attach_tcx(skel->progs.tc6, loopback, &optl);
if (!ASSERT_OK_PTR(link, "link_attach"))
goto cleanup;
skel->links.tc6 = link;
assert_mprog_count(target, 1);
ASSERT_OK(system(ping_cmd), ping_cmd);
ASSERT_EQ(skel->bss->seen_tc4, false, "seen_tc4");
ASSERT_EQ(skel->bss->seen_tc5, false, "seen_tc5");
ASSERT_EQ(skel->bss->seen_tc6, true, "seen_tc6");
skel->bss->seen_tc4 = false;
skel->bss->seen_tc5 = false;
skel->bss->seen_tc6 = false;
err = bpf_link__update_program(skel->links.tc6, skel->progs.tc4);
if (!ASSERT_OK(err, "link_update"))
goto cleanup;
assert_mprog_count(target, 1);
ASSERT_OK(system(ping_cmd), ping_cmd);
ASSERT_EQ(skel->bss->seen_tc4, true, "seen_tc4");
ASSERT_EQ(skel->bss->seen_tc5, true, "seen_tc5");
ASSERT_EQ(skel->bss->seen_tc6, false, "seen_tc6");
skel->bss->seen_tc4 = false;
skel->bss->seen_tc5 = false;
skel->bss->seen_tc6 = false;
err = bpf_link__detach(skel->links.tc6);
if (!ASSERT_OK(err, "prog_detach"))
goto cleanup;
__assert_mprog_count(target, 0, true, loopback);
ASSERT_OK(system(ping_cmd), ping_cmd);
ASSERT_EQ(skel->bss->seen_tc4, false, "seen_tc4");
ASSERT_EQ(skel->bss->seen_tc5, true, "seen_tc5");
ASSERT_EQ(skel->bss->seen_tc6, false, "seen_tc6");
cleanup:
tc_opts.flags = tc_opts.prog_fd = tc_opts.prog_id = 0;
err = bpf_tc_detach(&tc_hook, &tc_opts);
ASSERT_OK(err, "bpf_tc_detach");
tc_hook.attach_point = BPF_TC_INGRESS | BPF_TC_EGRESS;
bpf_tc_hook_destroy(&tc_hook);
test_tc_link__destroy(skel);
}
void serial_test_tc_links_chain_mixed(void)
{
test_tc_chain_mixed(BPF_TCX_INGRESS);
test_tc_chain_mixed(BPF_TCX_EGRESS);
}
static void test_tc_links_ingress(int target, bool chain_tc_old,
bool tcx_teardown_first)
{
LIBBPF_OPTS(bpf_tc_opts, tc_opts,
.handle = 1,
.priority = 1,
);
LIBBPF_OPTS(bpf_tc_hook, tc_hook,
.ifindex = loopback,
.attach_point = BPF_TC_CUSTOM,
.parent = TC_H_INGRESS,
);
bool hook_created = false, tc_attached = false;
LIBBPF_OPTS(bpf_tcx_opts, optl);
__u32 pid1, pid2, pid3;
struct test_tc_link *skel;
struct bpf_link *link;
int err;
skel = test_tc_link__open();
if (!ASSERT_OK_PTR(skel, "skel_open"))
goto cleanup;
ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc1, target),
0, "tc1_attach_type");
ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc2, target),
0, "tc2_attach_type");
err = test_tc_link__load(skel);
if (!ASSERT_OK(err, "skel_load"))
goto cleanup;
pid1 = id_from_prog_fd(bpf_program__fd(skel->progs.tc1));
pid2 = id_from_prog_fd(bpf_program__fd(skel->progs.tc2));
pid3 = id_from_prog_fd(bpf_program__fd(skel->progs.tc3));
ASSERT_NEQ(pid1, pid2, "prog_ids_1_2");
ASSERT_NEQ(pid2, pid3, "prog_ids_2_3");
assert_mprog_count(target, 0);
if (chain_tc_old) {
ASSERT_OK(system("tc qdisc add dev lo ingress"), "add_ingress");
hook_created = true;
tc_opts.prog_fd = bpf_program__fd(skel->progs.tc3);
err = bpf_tc_attach(&tc_hook, &tc_opts);
if (!ASSERT_OK(err, "bpf_tc_attach"))
goto cleanup;
tc_attached = true;
}
link = bpf_program__attach_tcx(skel->progs.tc1, loopback, &optl);
if (!ASSERT_OK_PTR(link, "link_attach"))
goto cleanup;
skel->links.tc1 = link;
link = bpf_program__attach_tcx(skel->progs.tc2, loopback, &optl);
if (!ASSERT_OK_PTR(link, "link_attach"))
goto cleanup;
skel->links.tc2 = link;
assert_mprog_count(target, 2);
ASSERT_OK(system(ping_cmd), ping_cmd);
ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
ASSERT_EQ(skel->bss->seen_tc2, true, "seen_tc2");
ASSERT_EQ(skel->bss->seen_tc3, chain_tc_old, "seen_tc3");
skel->bss->seen_tc1 = false;
skel->bss->seen_tc2 = false;
skel->bss->seen_tc3 = false;
err = bpf_link__detach(skel->links.tc2);
if (!ASSERT_OK(err, "prog_detach"))
goto cleanup;
assert_mprog_count(target, 1);
ASSERT_OK(system(ping_cmd), ping_cmd);
ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
ASSERT_EQ(skel->bss->seen_tc2, false, "seen_tc2");
ASSERT_EQ(skel->bss->seen_tc3, chain_tc_old, "seen_tc3");
cleanup:
if (tc_attached) {
tc_opts.flags = tc_opts.prog_fd = tc_opts.prog_id = 0;
err = bpf_tc_detach(&tc_hook, &tc_opts);
ASSERT_OK(err, "bpf_tc_detach");
}
ASSERT_OK(system(ping_cmd), ping_cmd);
assert_mprog_count(target, 1);
if (hook_created && tcx_teardown_first)
ASSERT_OK(system("tc qdisc del dev lo ingress"), "del_ingress");
ASSERT_OK(system(ping_cmd), ping_cmd);
test_tc_link__destroy(skel);
ASSERT_OK(system(ping_cmd), ping_cmd);
if (hook_created && !tcx_teardown_first)
ASSERT_OK(system("tc qdisc del dev lo ingress"), "del_ingress");
ASSERT_OK(system(ping_cmd), ping_cmd);
assert_mprog_count(target, 0);
}
void serial_test_tc_links_ingress(void)
{
test_tc_links_ingress(BPF_TCX_INGRESS, true, true);
test_tc_links_ingress(BPF_TCX_INGRESS, true, false);
test_tc_links_ingress(BPF_TCX_INGRESS, false, false);
}
static void test_tc_links_dev_mixed(int target)
{
LIBBPF_OPTS(bpf_tc_opts, tc_opts, .handle = 1, .priority = 1);
LIBBPF_OPTS(bpf_tc_hook, tc_hook);
LIBBPF_OPTS(bpf_tcx_opts, optl);
__u32 pid1, pid2, pid3, pid4;
struct test_tc_link *skel;
struct bpf_link *link;
int err, ifindex;
ASSERT_OK(system("ip link add dev tcx_opts1 type veth peer name tcx_opts2"), "add veth");
ifindex = if_nametoindex("tcx_opts1");
ASSERT_NEQ(ifindex, 0, "non_zero_ifindex");
skel = test_tc_link__open();
if (!ASSERT_OK_PTR(skel, "skel_open"))
goto cleanup;
ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc1, target),
0, "tc1_attach_type");
ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc2, target),
0, "tc2_attach_type");
ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc3, target),
0, "tc3_attach_type");
ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc4, target),
0, "tc4_attach_type");
err = test_tc_link__load(skel);
if (!ASSERT_OK(err, "skel_load"))
goto cleanup;
pid1 = id_from_prog_fd(bpf_program__fd(skel->progs.tc1));
pid2 = id_from_prog_fd(bpf_program__fd(skel->progs.tc2));
pid3 = id_from_prog_fd(bpf_program__fd(skel->progs.tc3));
pid4 = id_from_prog_fd(bpf_program__fd(skel->progs.tc4));
ASSERT_NEQ(pid1, pid2, "prog_ids_1_2");
ASSERT_NEQ(pid3, pid4, "prog_ids_3_4");
ASSERT_NEQ(pid2, pid3, "prog_ids_2_3");
assert_mprog_count(target, 0);
link = bpf_program__attach_tcx(skel->progs.tc1, ifindex, &optl);
if (!ASSERT_OK_PTR(link, "link_attach"))
goto cleanup;
skel->links.tc1 = link;
assert_mprog_count_ifindex(ifindex, target, 1);
link = bpf_program__attach_tcx(skel->progs.tc2, ifindex, &optl);
if (!ASSERT_OK_PTR(link, "link_attach"))
goto cleanup;
skel->links.tc2 = link;
assert_mprog_count_ifindex(ifindex, target, 2);
link = bpf_program__attach_tcx(skel->progs.tc3, ifindex, &optl);
if (!ASSERT_OK_PTR(link, "link_attach"))
goto cleanup;
skel->links.tc3 = link;
assert_mprog_count_ifindex(ifindex, target, 3);
link = bpf_program__attach_tcx(skel->progs.tc4, ifindex, &optl);
if (!ASSERT_OK_PTR(link, "link_attach"))
goto cleanup;
skel->links.tc4 = link;
assert_mprog_count_ifindex(ifindex, target, 4);
tc_hook.ifindex = ifindex;
tc_hook.attach_point = target == BPF_TCX_INGRESS ?
BPF_TC_INGRESS : BPF_TC_EGRESS;
err = bpf_tc_hook_create(&tc_hook);
err = err == -EEXIST ? 0 : err;
if (!ASSERT_OK(err, "bpf_tc_hook_create"))
goto cleanup;
tc_opts.prog_fd = bpf_program__fd(skel->progs.tc5);
err = bpf_tc_attach(&tc_hook, &tc_opts);
if (!ASSERT_OK(err, "bpf_tc_attach"))
goto cleanup;
ASSERT_OK(system("ip link del dev tcx_opts1"), "del veth");
ASSERT_EQ(if_nametoindex("tcx_opts1"), 0, "dev1_removed");
ASSERT_EQ(if_nametoindex("tcx_opts2"), 0, "dev2_removed");
ASSERT_EQ(ifindex_from_link_fd(bpf_link__fd(skel->links.tc1)), 0, "tc1_ifindex");
ASSERT_EQ(ifindex_from_link_fd(bpf_link__fd(skel->links.tc2)), 0, "tc2_ifindex");
ASSERT_EQ(ifindex_from_link_fd(bpf_link__fd(skel->links.tc3)), 0, "tc3_ifindex");
ASSERT_EQ(ifindex_from_link_fd(bpf_link__fd(skel->links.tc4)), 0, "tc4_ifindex");
test_tc_link__destroy(skel);
return;
cleanup:
test_tc_link__destroy(skel);
ASSERT_OK(system("ip link del dev tcx_opts1"), "del veth");
ASSERT_EQ(if_nametoindex("tcx_opts1"), 0, "dev1_removed");
ASSERT_EQ(if_nametoindex("tcx_opts2"), 0, "dev2_removed");
}
void serial_test_tc_links_dev_mixed(void)
{
test_tc_links_dev_mixed(BPF_TCX_INGRESS);
test_tc_links_dev_mixed(BPF_TCX_EGRESS);
}
| linux-master | tools/testing/selftests/bpf/prog_tests/tc_links.c |
// SPDX-License-Identifier: GPL-2.0
#include <test_progs.h>
#include "test_stacktrace_build_id.skel.h"
void test_stacktrace_build_id(void)
{
int control_map_fd, stackid_hmap_fd, stackmap_fd, stack_amap_fd;
struct test_stacktrace_build_id *skel;
int err, stack_trace_len, build_id_size;
__u32 key, prev_key, val, duration = 0;
char buf[BPF_BUILD_ID_SIZE];
struct bpf_stack_build_id id_offs[PERF_MAX_STACK_DEPTH];
int build_id_matches = 0;
int i, retry = 1;
retry:
skel = test_stacktrace_build_id__open_and_load();
if (CHECK(!skel, "skel_open_and_load", "skeleton open/load failed\n"))
return;
err = test_stacktrace_build_id__attach(skel);
if (CHECK(err, "attach_tp", "err %d\n", err))
goto cleanup;
/* find map fds */
control_map_fd = bpf_map__fd(skel->maps.control_map);
stackid_hmap_fd = bpf_map__fd(skel->maps.stackid_hmap);
stackmap_fd = bpf_map__fd(skel->maps.stackmap);
stack_amap_fd = bpf_map__fd(skel->maps.stack_amap);
if (CHECK_FAIL(system("dd if=/dev/urandom of=/dev/zero count=4 2> /dev/null")))
goto cleanup;
if (CHECK_FAIL(system("./urandom_read")))
goto cleanup;
/* disable stack trace collection */
key = 0;
val = 1;
bpf_map_update_elem(control_map_fd, &key, &val, 0);
/* for every element in stackid_hmap, we can find a corresponding one
* in stackmap, and vise versa.
*/
err = compare_map_keys(stackid_hmap_fd, stackmap_fd);
if (CHECK(err, "compare_map_keys stackid_hmap vs. stackmap",
"err %d errno %d\n", err, errno))
goto cleanup;
err = compare_map_keys(stackmap_fd, stackid_hmap_fd);
if (CHECK(err, "compare_map_keys stackmap vs. stackid_hmap",
"err %d errno %d\n", err, errno))
goto cleanup;
build_id_size = read_build_id("urandom_read", buf, sizeof(buf));
err = build_id_size < 0 ? build_id_size : 0;
if (CHECK(err, "read_build_id",
"err %d errno %d\n", err, errno))
goto cleanup;
err = bpf_map__get_next_key(skel->maps.stackmap, NULL, &key, sizeof(key));
if (CHECK(err, "get_next_key from stackmap",
"err %d, errno %d\n", err, errno))
goto cleanup;
do {
err = bpf_map_lookup_elem(stackmap_fd, &key, id_offs);
if (CHECK(err, "lookup_elem from stackmap",
"err %d, errno %d\n", err, errno))
goto cleanup;
for (i = 0; i < PERF_MAX_STACK_DEPTH; ++i)
if (id_offs[i].status == BPF_STACK_BUILD_ID_VALID &&
id_offs[i].offset != 0) {
if (memcmp(buf, id_offs[i].build_id, build_id_size) == 0)
build_id_matches = 1;
}
prev_key = key;
} while (bpf_map__get_next_key(skel->maps.stackmap, &prev_key, &key, sizeof(key)) == 0);
/* stack_map_get_build_id_offset() is racy and sometimes can return
* BPF_STACK_BUILD_ID_IP instead of BPF_STACK_BUILD_ID_VALID;
* try it one more time.
*/
if (build_id_matches < 1 && retry--) {
test_stacktrace_build_id__destroy(skel);
printf("%s:WARN:Didn't find expected build ID from the map, retrying\n",
__func__);
goto retry;
}
if (CHECK(build_id_matches < 1, "build id match",
"Didn't find expected build ID from the map\n"))
goto cleanup;
stack_trace_len = PERF_MAX_STACK_DEPTH *
sizeof(struct bpf_stack_build_id);
err = compare_stack_ips(stackmap_fd, stack_amap_fd, stack_trace_len);
CHECK(err, "compare_stack_ips stackmap vs. stack_amap",
"err %d errno %d\n", err, errno);
cleanup:
test_stacktrace_build_id__destroy(skel);
}
| linux-master | tools/testing/selftests/bpf/prog_tests/stacktrace_build_id.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2019 Facebook */
#define _GNU_SOURCE
#include <netinet/in.h>
#include <arpa/inet.h>
#include <unistd.h>
#include <sched.h>
#include <stdlib.h>
#include <string.h>
#include <errno.h>
#include <bpf/bpf.h>
#include <bpf/libbpf.h>
#include <linux/compiler.h>
#include "network_helpers.h"
#include "cgroup_helpers.h"
#include "test_progs.h"
#include "test_sock_fields.skel.h"
enum bpf_linum_array_idx {
EGRESS_LINUM_IDX,
INGRESS_LINUM_IDX,
READ_SK_DST_PORT_LINUM_IDX,
__NR_BPF_LINUM_ARRAY_IDX,
};
struct bpf_spinlock_cnt {
struct bpf_spin_lock lock;
__u32 cnt;
};
#define PARENT_CGROUP "/test-bpf-sock-fields"
#define CHILD_CGROUP "/test-bpf-sock-fields/child"
#define DATA "Hello BPF!"
#define DATA_LEN sizeof(DATA)
static struct sockaddr_in6 srv_sa6, cli_sa6;
static int sk_pkt_out_cnt10_fd;
static struct test_sock_fields *skel;
static int sk_pkt_out_cnt_fd;
static __u64 parent_cg_id;
static __u64 child_cg_id;
static int linum_map_fd;
static __u32 duration;
static bool create_netns(void)
{
if (!ASSERT_OK(unshare(CLONE_NEWNET), "create netns"))
return false;
if (!ASSERT_OK(system("ip link set dev lo up"), "bring up lo"))
return false;
return true;
}
static void print_sk(const struct bpf_sock *sk, const char *prefix)
{
char src_ip4[24], dst_ip4[24];
char src_ip6[64], dst_ip6[64];
inet_ntop(AF_INET, &sk->src_ip4, src_ip4, sizeof(src_ip4));
inet_ntop(AF_INET6, &sk->src_ip6, src_ip6, sizeof(src_ip6));
inet_ntop(AF_INET, &sk->dst_ip4, dst_ip4, sizeof(dst_ip4));
inet_ntop(AF_INET6, &sk->dst_ip6, dst_ip6, sizeof(dst_ip6));
printf("%s: state:%u bound_dev_if:%u family:%u type:%u protocol:%u mark:%u priority:%u "
"src_ip4:%x(%s) src_ip6:%x:%x:%x:%x(%s) src_port:%u "
"dst_ip4:%x(%s) dst_ip6:%x:%x:%x:%x(%s) dst_port:%u\n",
prefix,
sk->state, sk->bound_dev_if, sk->family, sk->type, sk->protocol,
sk->mark, sk->priority,
sk->src_ip4, src_ip4,
sk->src_ip6[0], sk->src_ip6[1], sk->src_ip6[2], sk->src_ip6[3],
src_ip6, sk->src_port,
sk->dst_ip4, dst_ip4,
sk->dst_ip6[0], sk->dst_ip6[1], sk->dst_ip6[2], sk->dst_ip6[3],
dst_ip6, ntohs(sk->dst_port));
}
static void print_tp(const struct bpf_tcp_sock *tp, const char *prefix)
{
printf("%s: snd_cwnd:%u srtt_us:%u rtt_min:%u snd_ssthresh:%u rcv_nxt:%u "
"snd_nxt:%u snd:una:%u mss_cache:%u ecn_flags:%u "
"rate_delivered:%u rate_interval_us:%u packets_out:%u "
"retrans_out:%u total_retrans:%u segs_in:%u data_segs_in:%u "
"segs_out:%u data_segs_out:%u lost_out:%u sacked_out:%u "
"bytes_received:%llu bytes_acked:%llu\n",
prefix,
tp->snd_cwnd, tp->srtt_us, tp->rtt_min, tp->snd_ssthresh,
tp->rcv_nxt, tp->snd_nxt, tp->snd_una, tp->mss_cache,
tp->ecn_flags, tp->rate_delivered, tp->rate_interval_us,
tp->packets_out, tp->retrans_out, tp->total_retrans,
tp->segs_in, tp->data_segs_in, tp->segs_out,
tp->data_segs_out, tp->lost_out, tp->sacked_out,
tp->bytes_received, tp->bytes_acked);
}
static void check_result(void)
{
struct bpf_tcp_sock srv_tp, cli_tp, listen_tp;
struct bpf_sock srv_sk, cli_sk, listen_sk;
__u32 idx, ingress_linum, egress_linum, linum;
int err;
idx = EGRESS_LINUM_IDX;
err = bpf_map_lookup_elem(linum_map_fd, &idx, &egress_linum);
CHECK(err < 0, "bpf_map_lookup_elem(linum_map_fd)",
"err:%d errno:%d\n", err, errno);
idx = INGRESS_LINUM_IDX;
err = bpf_map_lookup_elem(linum_map_fd, &idx, &ingress_linum);
CHECK(err < 0, "bpf_map_lookup_elem(linum_map_fd)",
"err:%d errno:%d\n", err, errno);
idx = READ_SK_DST_PORT_LINUM_IDX;
err = bpf_map_lookup_elem(linum_map_fd, &idx, &linum);
ASSERT_OK(err, "bpf_map_lookup_elem(linum_map_fd, READ_SK_DST_PORT_IDX)");
ASSERT_EQ(linum, 0, "failure in read_sk_dst_port on line");
memcpy(&srv_sk, &skel->bss->srv_sk, sizeof(srv_sk));
memcpy(&srv_tp, &skel->bss->srv_tp, sizeof(srv_tp));
memcpy(&cli_sk, &skel->bss->cli_sk, sizeof(cli_sk));
memcpy(&cli_tp, &skel->bss->cli_tp, sizeof(cli_tp));
memcpy(&listen_sk, &skel->bss->listen_sk, sizeof(listen_sk));
memcpy(&listen_tp, &skel->bss->listen_tp, sizeof(listen_tp));
print_sk(&listen_sk, "listen_sk");
print_sk(&srv_sk, "srv_sk");
print_sk(&cli_sk, "cli_sk");
print_tp(&listen_tp, "listen_tp");
print_tp(&srv_tp, "srv_tp");
print_tp(&cli_tp, "cli_tp");
CHECK(listen_sk.state != 10 ||
listen_sk.family != AF_INET6 ||
listen_sk.protocol != IPPROTO_TCP ||
memcmp(listen_sk.src_ip6, &in6addr_loopback,
sizeof(listen_sk.src_ip6)) ||
listen_sk.dst_ip6[0] || listen_sk.dst_ip6[1] ||
listen_sk.dst_ip6[2] || listen_sk.dst_ip6[3] ||
listen_sk.src_port != ntohs(srv_sa6.sin6_port) ||
listen_sk.dst_port,
"listen_sk",
"Unexpected. Check listen_sk output. ingress_linum:%u\n",
ingress_linum);
CHECK(srv_sk.state == 10 ||
!srv_sk.state ||
srv_sk.family != AF_INET6 ||
srv_sk.protocol != IPPROTO_TCP ||
memcmp(srv_sk.src_ip6, &in6addr_loopback,
sizeof(srv_sk.src_ip6)) ||
memcmp(srv_sk.dst_ip6, &in6addr_loopback,
sizeof(srv_sk.dst_ip6)) ||
srv_sk.src_port != ntohs(srv_sa6.sin6_port) ||
srv_sk.dst_port != cli_sa6.sin6_port,
"srv_sk", "Unexpected. Check srv_sk output. egress_linum:%u\n",
egress_linum);
CHECK(!skel->bss->lsndtime, "srv_tp", "Unexpected lsndtime:0\n");
CHECK(cli_sk.state == 10 ||
!cli_sk.state ||
cli_sk.family != AF_INET6 ||
cli_sk.protocol != IPPROTO_TCP ||
memcmp(cli_sk.src_ip6, &in6addr_loopback,
sizeof(cli_sk.src_ip6)) ||
memcmp(cli_sk.dst_ip6, &in6addr_loopback,
sizeof(cli_sk.dst_ip6)) ||
cli_sk.src_port != ntohs(cli_sa6.sin6_port) ||
cli_sk.dst_port != srv_sa6.sin6_port,
"cli_sk", "Unexpected. Check cli_sk output. egress_linum:%u\n",
egress_linum);
CHECK(listen_tp.data_segs_out ||
listen_tp.data_segs_in ||
listen_tp.total_retrans ||
listen_tp.bytes_acked,
"listen_tp",
"Unexpected. Check listen_tp output. ingress_linum:%u\n",
ingress_linum);
CHECK(srv_tp.data_segs_out != 2 ||
srv_tp.data_segs_in ||
srv_tp.snd_cwnd != 10 ||
srv_tp.total_retrans ||
srv_tp.bytes_acked < 2 * DATA_LEN,
"srv_tp", "Unexpected. Check srv_tp output. egress_linum:%u\n",
egress_linum);
CHECK(cli_tp.data_segs_out ||
cli_tp.data_segs_in != 2 ||
cli_tp.snd_cwnd != 10 ||
cli_tp.total_retrans ||
cli_tp.bytes_received < 2 * DATA_LEN,
"cli_tp", "Unexpected. Check cli_tp output. egress_linum:%u\n",
egress_linum);
CHECK(skel->bss->parent_cg_id != parent_cg_id,
"parent_cg_id", "%zu != %zu\n",
(size_t)skel->bss->parent_cg_id, (size_t)parent_cg_id);
CHECK(skel->bss->child_cg_id != child_cg_id,
"child_cg_id", "%zu != %zu\n",
(size_t)skel->bss->child_cg_id, (size_t)child_cg_id);
}
static void check_sk_pkt_out_cnt(int accept_fd, int cli_fd)
{
struct bpf_spinlock_cnt pkt_out_cnt = {}, pkt_out_cnt10 = {};
int err;
pkt_out_cnt.cnt = ~0;
pkt_out_cnt10.cnt = ~0;
err = bpf_map_lookup_elem(sk_pkt_out_cnt_fd, &accept_fd, &pkt_out_cnt);
if (!err)
err = bpf_map_lookup_elem(sk_pkt_out_cnt10_fd, &accept_fd,
&pkt_out_cnt10);
/* The bpf prog only counts for fullsock and
* passive connection did not become fullsock until 3WHS
* had been finished, so the bpf prog only counted two data
* packet out.
*/
CHECK(err || pkt_out_cnt.cnt < 0xeB9F + 2 ||
pkt_out_cnt10.cnt < 0xeB9F + 20,
"bpf_map_lookup_elem(sk_pkt_out_cnt, &accept_fd)",
"err:%d errno:%d pkt_out_cnt:%u pkt_out_cnt10:%u\n",
err, errno, pkt_out_cnt.cnt, pkt_out_cnt10.cnt);
pkt_out_cnt.cnt = ~0;
pkt_out_cnt10.cnt = ~0;
err = bpf_map_lookup_elem(sk_pkt_out_cnt_fd, &cli_fd, &pkt_out_cnt);
if (!err)
err = bpf_map_lookup_elem(sk_pkt_out_cnt10_fd, &cli_fd,
&pkt_out_cnt10);
/* Active connection is fullsock from the beginning.
* 1 SYN and 1 ACK during 3WHS
* 2 Acks on data packet.
*
* The bpf_prog initialized it to 0xeB9F.
*/
CHECK(err || pkt_out_cnt.cnt < 0xeB9F + 4 ||
pkt_out_cnt10.cnt < 0xeB9F + 40,
"bpf_map_lookup_elem(sk_pkt_out_cnt, &cli_fd)",
"err:%d errno:%d pkt_out_cnt:%u pkt_out_cnt10:%u\n",
err, errno, pkt_out_cnt.cnt, pkt_out_cnt10.cnt);
}
static int init_sk_storage(int sk_fd, __u32 pkt_out_cnt)
{
struct bpf_spinlock_cnt scnt = {};
int err;
scnt.cnt = pkt_out_cnt;
err = bpf_map_update_elem(sk_pkt_out_cnt_fd, &sk_fd, &scnt,
BPF_NOEXIST);
if (CHECK(err, "bpf_map_update_elem(sk_pkt_out_cnt_fd)",
"err:%d errno:%d\n", err, errno))
return err;
err = bpf_map_update_elem(sk_pkt_out_cnt10_fd, &sk_fd, &scnt,
BPF_NOEXIST);
if (CHECK(err, "bpf_map_update_elem(sk_pkt_out_cnt10_fd)",
"err:%d errno:%d\n", err, errno))
return err;
return 0;
}
static void test(void)
{
int listen_fd = -1, cli_fd = -1, accept_fd = -1, err, i;
socklen_t addrlen = sizeof(struct sockaddr_in6);
char buf[DATA_LEN];
/* Prepare listen_fd */
listen_fd = start_server(AF_INET6, SOCK_STREAM, "::1", 0xcafe, 0);
/* start_server() has logged the error details */
if (CHECK_FAIL(listen_fd == -1))
goto done;
err = getsockname(listen_fd, (struct sockaddr *)&srv_sa6, &addrlen);
if (CHECK(err, "getsockname(listen_fd)", "err:%d errno:%d\n", err,
errno))
goto done;
memcpy(&skel->bss->srv_sa6, &srv_sa6, sizeof(srv_sa6));
cli_fd = connect_to_fd(listen_fd, 0);
if (CHECK_FAIL(cli_fd == -1))
goto done;
err = getsockname(cli_fd, (struct sockaddr *)&cli_sa6, &addrlen);
if (CHECK(err, "getsockname(cli_fd)", "err:%d errno:%d\n",
err, errno))
goto done;
accept_fd = accept(listen_fd, NULL, NULL);
if (CHECK(accept_fd == -1, "accept(listen_fd)",
"accept_fd:%d errno:%d\n",
accept_fd, errno))
goto done;
if (init_sk_storage(accept_fd, 0xeB9F))
goto done;
for (i = 0; i < 2; i++) {
/* Send some data from accept_fd to cli_fd.
* MSG_EOR to stop kernel from coalescing two pkts.
*/
err = send(accept_fd, DATA, DATA_LEN, MSG_EOR);
if (CHECK(err != DATA_LEN, "send(accept_fd)",
"err:%d errno:%d\n", err, errno))
goto done;
err = recv(cli_fd, buf, DATA_LEN, 0);
if (CHECK(err != DATA_LEN, "recv(cli_fd)", "err:%d errno:%d\n",
err, errno))
goto done;
}
shutdown(cli_fd, SHUT_WR);
err = recv(accept_fd, buf, 1, 0);
if (CHECK(err, "recv(accept_fd) for fin", "err:%d errno:%d\n",
err, errno))
goto done;
shutdown(accept_fd, SHUT_WR);
err = recv(cli_fd, buf, 1, 0);
if (CHECK(err, "recv(cli_fd) for fin", "err:%d errno:%d\n",
err, errno))
goto done;
check_sk_pkt_out_cnt(accept_fd, cli_fd);
check_result();
done:
if (accept_fd != -1)
close(accept_fd);
if (cli_fd != -1)
close(cli_fd);
if (listen_fd != -1)
close(listen_fd);
}
void serial_test_sock_fields(void)
{
int parent_cg_fd = -1, child_cg_fd = -1;
struct bpf_link *link;
/* Use a dedicated netns to have a fixed listen port */
if (!create_netns())
return;
/* Create a cgroup, get fd, and join it */
parent_cg_fd = test__join_cgroup(PARENT_CGROUP);
if (CHECK_FAIL(parent_cg_fd < 0))
return;
parent_cg_id = get_cgroup_id(PARENT_CGROUP);
if (CHECK_FAIL(!parent_cg_id))
goto done;
child_cg_fd = test__join_cgroup(CHILD_CGROUP);
if (CHECK_FAIL(child_cg_fd < 0))
goto done;
child_cg_id = get_cgroup_id(CHILD_CGROUP);
if (CHECK_FAIL(!child_cg_id))
goto done;
skel = test_sock_fields__open_and_load();
if (CHECK(!skel, "test_sock_fields__open_and_load", "failed\n"))
goto done;
link = bpf_program__attach_cgroup(skel->progs.egress_read_sock_fields, child_cg_fd);
if (!ASSERT_OK_PTR(link, "attach_cgroup(egress_read_sock_fields)"))
goto done;
skel->links.egress_read_sock_fields = link;
link = bpf_program__attach_cgroup(skel->progs.ingress_read_sock_fields, child_cg_fd);
if (!ASSERT_OK_PTR(link, "attach_cgroup(ingress_read_sock_fields)"))
goto done;
skel->links.ingress_read_sock_fields = link;
link = bpf_program__attach_cgroup(skel->progs.read_sk_dst_port, child_cg_fd);
if (!ASSERT_OK_PTR(link, "attach_cgroup(read_sk_dst_port"))
goto done;
skel->links.read_sk_dst_port = link;
linum_map_fd = bpf_map__fd(skel->maps.linum_map);
sk_pkt_out_cnt_fd = bpf_map__fd(skel->maps.sk_pkt_out_cnt);
sk_pkt_out_cnt10_fd = bpf_map__fd(skel->maps.sk_pkt_out_cnt10);
test();
done:
test_sock_fields__destroy(skel);
if (child_cg_fd >= 0)
close(child_cg_fd);
if (parent_cg_fd >= 0)
close(parent_cg_fd);
}
| linux-master | tools/testing/selftests/bpf/prog_tests/sock_fields.c |
// SPDX-License-Identifier: GPL-2.0
#define _GNU_SOURCE
#include <pthread.h>
#include <sched.h>
#include <sys/socket.h>
#include <test_progs.h>
#include "test_perf_buffer.skel.h"
#include "bpf/libbpf_internal.h"
static int duration;
/* AddressSanitizer sometimes crashes due to data dereference below, due to
* this being mmap()'ed memory. Disable instrumentation with
* no_sanitize_address attribute
*/
__attribute__((no_sanitize_address))
static void on_sample(void *ctx, int cpu, void *data, __u32 size)
{
int cpu_data = *(int *)data, duration = 0;
cpu_set_t *cpu_seen = ctx;
if (cpu_data != cpu)
CHECK(cpu_data != cpu, "check_cpu_data",
"cpu_data %d != cpu %d\n", cpu_data, cpu);
CPU_SET(cpu, cpu_seen);
}
int trigger_on_cpu(int cpu)
{
cpu_set_t cpu_set;
int err;
CPU_ZERO(&cpu_set);
CPU_SET(cpu, &cpu_set);
err = pthread_setaffinity_np(pthread_self(), sizeof(cpu_set), &cpu_set);
if (err && CHECK(err, "set_affinity", "cpu #%d, err %d\n", cpu, err))
return err;
usleep(1);
return 0;
}
void serial_test_perf_buffer(void)
{
int err, on_len, nr_on_cpus = 0, nr_cpus, i, j;
int zero = 0, my_pid = getpid();
struct test_perf_buffer *skel;
cpu_set_t cpu_seen;
struct perf_buffer *pb;
int last_fd = -1, fd;
bool *online;
nr_cpus = libbpf_num_possible_cpus();
if (CHECK(nr_cpus < 0, "nr_cpus", "err %d\n", nr_cpus))
return;
err = parse_cpu_mask_file("/sys/devices/system/cpu/online",
&online, &on_len);
if (CHECK(err, "nr_on_cpus", "err %d\n", err))
return;
for (i = 0; i < on_len; i++)
if (online[i])
nr_on_cpus++;
/* load program */
skel = test_perf_buffer__open_and_load();
if (CHECK(!skel, "skel_load", "skeleton open/load failed\n"))
goto out_close;
err = bpf_map_update_elem(bpf_map__fd(skel->maps.my_pid_map), &zero, &my_pid, 0);
if (!ASSERT_OK(err, "my_pid_update"))
goto out_close;
/* attach probe */
err = test_perf_buffer__attach(skel);
if (CHECK(err, "attach_kprobe", "err %d\n", err))
goto out_close;
/* set up perf buffer */
pb = perf_buffer__new(bpf_map__fd(skel->maps.perf_buf_map), 1,
on_sample, NULL, &cpu_seen, NULL);
if (!ASSERT_OK_PTR(pb, "perf_buf__new"))
goto out_close;
CHECK(perf_buffer__epoll_fd(pb) < 0, "epoll_fd",
"bad fd: %d\n", perf_buffer__epoll_fd(pb));
/* trigger kprobe on every CPU */
CPU_ZERO(&cpu_seen);
for (i = 0; i < nr_cpus; i++) {
if (i >= on_len || !online[i]) {
printf("skipping offline CPU #%d\n", i);
continue;
}
if (trigger_on_cpu(i))
goto out_close;
}
/* read perf buffer */
err = perf_buffer__poll(pb, 100);
if (CHECK(err < 0, "perf_buffer__poll", "err %d\n", err))
goto out_free_pb;
if (CHECK(CPU_COUNT(&cpu_seen) != nr_on_cpus, "seen_cpu_cnt",
"expect %d, seen %d\n", nr_on_cpus, CPU_COUNT(&cpu_seen)))
goto out_free_pb;
if (CHECK(perf_buffer__buffer_cnt(pb) != nr_on_cpus, "buf_cnt",
"got %zu, expected %d\n", perf_buffer__buffer_cnt(pb), nr_on_cpus))
goto out_close;
for (i = 0, j = 0; i < nr_cpus; i++) {
if (i >= on_len || !online[i])
continue;
fd = perf_buffer__buffer_fd(pb, j);
CHECK(fd < 0 || last_fd == fd, "fd_check", "last fd %d == fd %d\n", last_fd, fd);
last_fd = fd;
err = perf_buffer__consume_buffer(pb, j);
if (CHECK(err, "drain_buf", "cpu %d, err %d\n", i, err))
goto out_close;
CPU_CLR(i, &cpu_seen);
if (trigger_on_cpu(i))
goto out_close;
err = perf_buffer__consume_buffer(pb, j);
if (CHECK(err, "consume_buf", "cpu %d, err %d\n", j, err))
goto out_close;
if (CHECK(!CPU_ISSET(i, &cpu_seen), "cpu_seen", "cpu %d not seen\n", i))
goto out_close;
j++;
}
out_free_pb:
perf_buffer__free(pb);
out_close:
test_perf_buffer__destroy(skel);
free(online);
}
| linux-master | tools/testing/selftests/bpf/prog_tests/perf_buffer.c |
// SPDX-License-Identifier: GPL-2.0
#include <test_progs.h>
#include "cgroup_helpers.h"
#define FOO "/foo"
#define BAR "/foo/bar/"
#define PING_CMD "ping -q -c1 -w1 127.0.0.1 > /dev/null"
static char bpf_log_buf[BPF_LOG_BUF_SIZE];
static int prog_load(int verdict)
{
struct bpf_insn prog[] = {
BPF_MOV64_IMM(BPF_REG_0, verdict), /* r0 = verdict */
BPF_EXIT_INSN(),
};
size_t insns_cnt = ARRAY_SIZE(prog);
return bpf_test_load_program(BPF_PROG_TYPE_CGROUP_SKB,
prog, insns_cnt, "GPL", 0,
bpf_log_buf, BPF_LOG_BUF_SIZE);
}
void serial_test_cgroup_attach_override(void)
{
int drop_prog = -1, allow_prog = -1, foo = -1, bar = -1;
__u32 duration = 0;
allow_prog = prog_load(1);
if (CHECK(allow_prog < 0, "prog_load_allow",
"verifier output:\n%s\n-------\n", bpf_log_buf))
goto err;
drop_prog = prog_load(0);
if (CHECK(drop_prog < 0, "prog_load_drop",
"verifier output:\n%s\n-------\n", bpf_log_buf))
goto err;
foo = test__join_cgroup(FOO);
if (CHECK(foo < 0, "cgroup_join_foo", "cgroup setup failed\n"))
goto err;
if (CHECK(bpf_prog_attach(drop_prog, foo, BPF_CGROUP_INET_EGRESS,
BPF_F_ALLOW_OVERRIDE),
"prog_attach_drop_foo_override",
"attach prog to %s failed, errno=%d\n", FOO, errno))
goto err;
if (CHECK(!system(PING_CMD), "ping_fail",
"ping unexpectedly succeeded\n"))
goto err;
bar = test__join_cgroup(BAR);
if (CHECK(bar < 0, "cgroup_join_bar", "cgroup setup failed\n"))
goto err;
if (CHECK(!system(PING_CMD), "ping_fail",
"ping unexpectedly succeeded\n"))
goto err;
if (CHECK(bpf_prog_attach(allow_prog, bar, BPF_CGROUP_INET_EGRESS,
BPF_F_ALLOW_OVERRIDE),
"prog_attach_allow_bar_override",
"attach prog to %s failed, errno=%d\n", BAR, errno))
goto err;
if (CHECK(system(PING_CMD), "ping_ok", "ping failed\n"))
goto err;
if (CHECK(bpf_prog_detach(bar, BPF_CGROUP_INET_EGRESS),
"prog_detach_bar",
"detach prog from %s failed, errno=%d\n", BAR, errno))
goto err;
if (CHECK(!system(PING_CMD), "ping_fail",
"ping unexpectedly succeeded\n"))
goto err;
if (CHECK(bpf_prog_attach(allow_prog, bar, BPF_CGROUP_INET_EGRESS,
BPF_F_ALLOW_OVERRIDE),
"prog_attach_allow_bar_override",
"attach prog to %s failed, errno=%d\n", BAR, errno))
goto err;
if (CHECK(bpf_prog_detach(foo, BPF_CGROUP_INET_EGRESS),
"prog_detach_foo",
"detach prog from %s failed, errno=%d\n", FOO, errno))
goto err;
if (CHECK(system(PING_CMD), "ping_ok", "ping failed\n"))
goto err;
if (CHECK(bpf_prog_attach(allow_prog, bar, BPF_CGROUP_INET_EGRESS,
BPF_F_ALLOW_OVERRIDE),
"prog_attach_allow_bar_override",
"attach prog to %s failed, errno=%d\n", BAR, errno))
goto err;
if (CHECK(!bpf_prog_attach(allow_prog, bar, BPF_CGROUP_INET_EGRESS, 0),
"fail_prog_attach_allow_bar_none",
"attach prog to %s unexpectedly succeeded\n", BAR))
goto err;
if (CHECK(bpf_prog_detach(bar, BPF_CGROUP_INET_EGRESS),
"prog_detach_bar",
"detach prog from %s failed, errno=%d\n", BAR, errno))
goto err;
if (CHECK(!bpf_prog_detach(foo, BPF_CGROUP_INET_EGRESS),
"fail_prog_detach_foo",
"double detach from %s unexpectedly succeeded\n", FOO))
goto err;
if (CHECK(bpf_prog_attach(allow_prog, foo, BPF_CGROUP_INET_EGRESS, 0),
"prog_attach_allow_foo_none",
"attach prog to %s failed, errno=%d\n", FOO, errno))
goto err;
if (CHECK(!bpf_prog_attach(allow_prog, bar, BPF_CGROUP_INET_EGRESS, 0),
"fail_prog_attach_allow_bar_none",
"attach prog to %s unexpectedly succeeded\n", BAR))
goto err;
if (CHECK(!bpf_prog_attach(allow_prog, bar, BPF_CGROUP_INET_EGRESS,
BPF_F_ALLOW_OVERRIDE),
"fail_prog_attach_allow_bar_override",
"attach prog to %s unexpectedly succeeded\n", BAR))
goto err;
if (CHECK(!bpf_prog_attach(allow_prog, foo, BPF_CGROUP_INET_EGRESS,
BPF_F_ALLOW_OVERRIDE),
"fail_prog_attach_allow_foo_override",
"attach prog to %s unexpectedly succeeded\n", FOO))
goto err;
if (CHECK(bpf_prog_attach(drop_prog, foo, BPF_CGROUP_INET_EGRESS, 0),
"prog_attach_drop_foo_none",
"attach prog to %s failed, errno=%d\n", FOO, errno))
goto err;
err:
close(foo);
close(bar);
close(allow_prog);
close(drop_prog);
}
| linux-master | tools/testing/selftests/bpf/prog_tests/cgroup_attach_override.c |
// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
#include "test_progs.h"
#include "testing_helpers.h"
static void init_test_filter_set(struct test_filter_set *set)
{
set->cnt = 0;
set->tests = NULL;
}
static void free_test_filter_set(struct test_filter_set *set)
{
int i, j;
for (i = 0; i < set->cnt; i++) {
for (j = 0; j < set->tests[i].subtest_cnt; j++)
free((void *)set->tests[i].subtests[j]);
free(set->tests[i].subtests);
free(set->tests[i].name);
}
free(set->tests);
init_test_filter_set(set);
}
static void test_parse_test_list(void)
{
struct test_filter_set set;
init_test_filter_set(&set);
ASSERT_OK(parse_test_list("arg_parsing", &set, true), "parsing");
if (!ASSERT_EQ(set.cnt, 1, "test filters count"))
goto error;
if (!ASSERT_OK_PTR(set.tests, "test filters initialized"))
goto error;
ASSERT_EQ(set.tests[0].subtest_cnt, 0, "subtest filters count");
ASSERT_OK(strcmp("arg_parsing", set.tests[0].name), "subtest name");
free_test_filter_set(&set);
ASSERT_OK(parse_test_list("arg_parsing,bpf_cookie", &set, true),
"parsing");
if (!ASSERT_EQ(set.cnt, 2, "count of test filters"))
goto error;
if (!ASSERT_OK_PTR(set.tests, "test filters initialized"))
goto error;
ASSERT_EQ(set.tests[0].subtest_cnt, 0, "subtest filters count");
ASSERT_EQ(set.tests[1].subtest_cnt, 0, "subtest filters count");
ASSERT_OK(strcmp("arg_parsing", set.tests[0].name), "test name");
ASSERT_OK(strcmp("bpf_cookie", set.tests[1].name), "test name");
free_test_filter_set(&set);
ASSERT_OK(parse_test_list("arg_parsing/arg_parsing,bpf_cookie",
&set,
true),
"parsing");
if (!ASSERT_EQ(set.cnt, 2, "count of test filters"))
goto error;
if (!ASSERT_OK_PTR(set.tests, "test filters initialized"))
goto error;
if (!ASSERT_EQ(set.tests[0].subtest_cnt, 1, "subtest filters count"))
goto error;
ASSERT_EQ(set.tests[1].subtest_cnt, 0, "subtest filters count");
ASSERT_OK(strcmp("arg_parsing", set.tests[0].name), "test name");
ASSERT_OK(strcmp("arg_parsing", set.tests[0].subtests[0]),
"subtest name");
ASSERT_OK(strcmp("bpf_cookie", set.tests[1].name), "test name");
free_test_filter_set(&set);
ASSERT_OK(parse_test_list("arg_parsing/arg_parsing", &set, true),
"parsing");
ASSERT_OK(parse_test_list("bpf_cookie", &set, true), "parsing");
ASSERT_OK(parse_test_list("send_signal", &set, true), "parsing");
if (!ASSERT_EQ(set.cnt, 3, "count of test filters"))
goto error;
if (!ASSERT_OK_PTR(set.tests, "test filters initialized"))
goto error;
if (!ASSERT_EQ(set.tests[0].subtest_cnt, 1, "subtest filters count"))
goto error;
ASSERT_EQ(set.tests[1].subtest_cnt, 0, "subtest filters count");
ASSERT_EQ(set.tests[2].subtest_cnt, 0, "subtest filters count");
ASSERT_OK(strcmp("arg_parsing", set.tests[0].name), "test name");
ASSERT_OK(strcmp("arg_parsing", set.tests[0].subtests[0]),
"subtest name");
ASSERT_OK(strcmp("bpf_cookie", set.tests[1].name), "test name");
ASSERT_OK(strcmp("send_signal", set.tests[2].name), "test name");
free_test_filter_set(&set);
ASSERT_OK(parse_test_list("bpf_cookie/trace", &set, false), "parsing");
if (!ASSERT_EQ(set.cnt, 1, "count of test filters"))
goto error;
if (!ASSERT_OK_PTR(set.tests, "test filters initialized"))
goto error;
if (!ASSERT_EQ(set.tests[0].subtest_cnt, 1, "subtest filters count"))
goto error;
ASSERT_OK(strcmp("*bpf_cookie*", set.tests[0].name), "test name");
ASSERT_OK(strcmp("*trace*", set.tests[0].subtests[0]), "subtest name");
free_test_filter_set(&set);
ASSERT_OK(parse_test_list("t/subtest1,t/subtest2", &set, true),
"parsing");
if (!ASSERT_EQ(set.cnt, 1, "count of test filters"))
goto error;
if (!ASSERT_OK_PTR(set.tests, "test filters initialized"))
goto error;
if (!ASSERT_EQ(set.tests[0].subtest_cnt, 2, "subtest filters count"))
goto error;
ASSERT_OK(strcmp("t", set.tests[0].name), "test name");
ASSERT_OK(strcmp("subtest1", set.tests[0].subtests[0]), "subtest name");
ASSERT_OK(strcmp("subtest2", set.tests[0].subtests[1]), "subtest name");
error:
free_test_filter_set(&set);
}
static void test_parse_test_list_file(void)
{
struct test_filter_set set;
char tmpfile[80];
FILE *fp;
int fd;
snprintf(tmpfile, sizeof(tmpfile), "/tmp/bpf_arg_parsing_test.XXXXXX");
fd = mkstemp(tmpfile);
if (!ASSERT_GE(fd, 0, "create tmp"))
return;
fp = fdopen(fd, "w");
if (!ASSERT_NEQ(fp, NULL, "fdopen tmp")) {
close(fd);
goto out_remove;
}
fprintf(fp, "# comment\n");
fprintf(fp, " test_with_spaces \n");
fprintf(fp, "testA/subtest # comment\n");
fprintf(fp, "testB#comment with no space\n");
fprintf(fp, "testB # duplicate\n");
fprintf(fp, "testA/subtest # subtest duplicate\n");
fprintf(fp, "testA/subtest2\n");
fprintf(fp, "testC_no_eof_newline");
fflush(fp);
if (!ASSERT_OK(ferror(fp), "prepare tmp"))
goto out_fclose;
init_test_filter_set(&set);
ASSERT_OK(parse_test_list_file(tmpfile, &set, true), "parse file");
ASSERT_EQ(set.cnt, 4, "test count");
ASSERT_OK(strcmp("test_with_spaces", set.tests[0].name), "test 0 name");
ASSERT_EQ(set.tests[0].subtest_cnt, 0, "test 0 subtest count");
ASSERT_OK(strcmp("testA", set.tests[1].name), "test 1 name");
ASSERT_EQ(set.tests[1].subtest_cnt, 2, "test 1 subtest count");
ASSERT_OK(strcmp("subtest", set.tests[1].subtests[0]), "test 1 subtest 0");
ASSERT_OK(strcmp("subtest2", set.tests[1].subtests[1]), "test 1 subtest 1");
ASSERT_OK(strcmp("testB", set.tests[2].name), "test 2 name");
ASSERT_OK(strcmp("testC_no_eof_newline", set.tests[3].name), "test 3 name");
free_test_filter_set(&set);
out_fclose:
fclose(fp);
out_remove:
remove(tmpfile);
}
void test_arg_parsing(void)
{
if (test__start_subtest("test_parse_test_list"))
test_parse_test_list();
if (test__start_subtest("test_parse_test_list_file"))
test_parse_test_list_file();
}
| linux-master | tools/testing/selftests/bpf/prog_tests/arg_parsing.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2021 Facebook */
#include <sys/syscall.h>
#include <test_progs.h>
#include "bloom_filter_map.skel.h"
static void test_fail_cases(void)
{
LIBBPF_OPTS(bpf_map_create_opts, opts);
__u32 value;
int fd, err;
/* Invalid key size */
fd = bpf_map_create(BPF_MAP_TYPE_BLOOM_FILTER, NULL, 4, sizeof(value), 100, NULL);
if (!ASSERT_LT(fd, 0, "bpf_map_create bloom filter invalid key size"))
close(fd);
/* Invalid value size */
fd = bpf_map_create(BPF_MAP_TYPE_BLOOM_FILTER, NULL, 0, 0, 100, NULL);
if (!ASSERT_LT(fd, 0, "bpf_map_create bloom filter invalid value size 0"))
close(fd);
/* Invalid max entries size */
fd = bpf_map_create(BPF_MAP_TYPE_BLOOM_FILTER, NULL, 0, sizeof(value), 0, NULL);
if (!ASSERT_LT(fd, 0, "bpf_map_create bloom filter invalid max entries size"))
close(fd);
/* Bloom filter maps do not support BPF_F_NO_PREALLOC */
opts.map_flags = BPF_F_NO_PREALLOC;
fd = bpf_map_create(BPF_MAP_TYPE_BLOOM_FILTER, NULL, 0, sizeof(value), 100, &opts);
if (!ASSERT_LT(fd, 0, "bpf_map_create bloom filter invalid flags"))
close(fd);
fd = bpf_map_create(BPF_MAP_TYPE_BLOOM_FILTER, NULL, 0, sizeof(value), 100, NULL);
if (!ASSERT_GE(fd, 0, "bpf_map_create bloom filter"))
return;
/* Test invalid flags */
err = bpf_map_update_elem(fd, NULL, &value, -1);
ASSERT_EQ(err, -EINVAL, "bpf_map_update_elem bloom filter invalid flags");
err = bpf_map_update_elem(fd, NULL, &value, BPF_EXIST);
ASSERT_EQ(err, -EINVAL, "bpf_map_update_elem bloom filter invalid flags");
err = bpf_map_update_elem(fd, NULL, &value, BPF_F_LOCK);
ASSERT_EQ(err, -EINVAL, "bpf_map_update_elem bloom filter invalid flags");
err = bpf_map_update_elem(fd, NULL, &value, BPF_NOEXIST);
ASSERT_EQ(err, -EINVAL, "bpf_map_update_elem bloom filter invalid flags");
err = bpf_map_update_elem(fd, NULL, &value, 10000);
ASSERT_EQ(err, -EINVAL, "bpf_map_update_elem bloom filter invalid flags");
close(fd);
}
static void test_success_cases(void)
{
LIBBPF_OPTS(bpf_map_create_opts, opts);
char value[11];
int fd, err;
/* Create a map */
opts.map_flags = BPF_F_ZERO_SEED | BPF_F_NUMA_NODE;
fd = bpf_map_create(BPF_MAP_TYPE_BLOOM_FILTER, NULL, 0, sizeof(value), 100, &opts);
if (!ASSERT_GE(fd, 0, "bpf_map_create bloom filter success case"))
return;
/* Add a value to the bloom filter */
err = bpf_map_update_elem(fd, NULL, &value, 0);
if (!ASSERT_OK(err, "bpf_map_update_elem bloom filter success case"))
goto done;
/* Lookup a value in the bloom filter */
err = bpf_map_lookup_elem(fd, NULL, &value);
ASSERT_OK(err, "bpf_map_update_elem bloom filter success case");
done:
close(fd);
}
static void check_bloom(struct bloom_filter_map *skel)
{
struct bpf_link *link;
link = bpf_program__attach(skel->progs.check_bloom);
if (!ASSERT_OK_PTR(link, "link"))
return;
syscall(SYS_getpgid);
ASSERT_EQ(skel->bss->error, 0, "error");
bpf_link__destroy(link);
}
static void test_inner_map(struct bloom_filter_map *skel, const __u32 *rand_vals,
__u32 nr_rand_vals)
{
int outer_map_fd, inner_map_fd, err, i, key = 0;
struct bpf_link *link;
/* Create a bloom filter map that will be used as the inner map */
inner_map_fd = bpf_map_create(BPF_MAP_TYPE_BLOOM_FILTER, NULL, 0, sizeof(*rand_vals),
nr_rand_vals, NULL);
if (!ASSERT_GE(inner_map_fd, 0, "bpf_map_create bloom filter inner map"))
return;
for (i = 0; i < nr_rand_vals; i++) {
err = bpf_map_update_elem(inner_map_fd, NULL, rand_vals + i, BPF_ANY);
if (!ASSERT_OK(err, "Add random value to inner_map_fd"))
goto done;
}
/* Add the bloom filter map to the outer map */
outer_map_fd = bpf_map__fd(skel->maps.outer_map);
err = bpf_map_update_elem(outer_map_fd, &key, &inner_map_fd, BPF_ANY);
if (!ASSERT_OK(err, "Add bloom filter map to outer map"))
goto done;
/* Attach the bloom_filter_inner_map prog */
link = bpf_program__attach(skel->progs.inner_map);
if (!ASSERT_OK_PTR(link, "link"))
goto delete_inner_map;
syscall(SYS_getpgid);
ASSERT_EQ(skel->bss->error, 0, "error");
bpf_link__destroy(link);
delete_inner_map:
/* Ensure the inner bloom filter map can be deleted */
err = bpf_map_delete_elem(outer_map_fd, &key);
ASSERT_OK(err, "Delete inner bloom filter map");
done:
close(inner_map_fd);
}
static int setup_progs(struct bloom_filter_map **out_skel, __u32 **out_rand_vals,
__u32 *out_nr_rand_vals)
{
struct bloom_filter_map *skel;
int random_data_fd, bloom_fd;
__u32 *rand_vals = NULL;
__u32 map_size, val;
int err, i;
/* Set up a bloom filter map skeleton */
skel = bloom_filter_map__open_and_load();
if (!ASSERT_OK_PTR(skel, "bloom_filter_map__open_and_load"))
return -EINVAL;
/* Set up rand_vals */
map_size = bpf_map__max_entries(skel->maps.map_random_data);
rand_vals = malloc(sizeof(*rand_vals) * map_size);
if (!rand_vals) {
err = -ENOMEM;
goto error;
}
/* Generate random values and populate both skeletons */
random_data_fd = bpf_map__fd(skel->maps.map_random_data);
bloom_fd = bpf_map__fd(skel->maps.map_bloom);
for (i = 0; i < map_size; i++) {
val = rand();
err = bpf_map_update_elem(random_data_fd, &i, &val, BPF_ANY);
if (!ASSERT_OK(err, "Add random value to map_random_data"))
goto error;
err = bpf_map_update_elem(bloom_fd, NULL, &val, BPF_ANY);
if (!ASSERT_OK(err, "Add random value to map_bloom"))
goto error;
rand_vals[i] = val;
}
*out_skel = skel;
*out_rand_vals = rand_vals;
*out_nr_rand_vals = map_size;
return 0;
error:
bloom_filter_map__destroy(skel);
if (rand_vals)
free(rand_vals);
return err;
}
void test_bloom_filter_map(void)
{
__u32 *rand_vals, nr_rand_vals;
struct bloom_filter_map *skel;
int err;
test_fail_cases();
test_success_cases();
err = setup_progs(&skel, &rand_vals, &nr_rand_vals);
if (err)
return;
test_inner_map(skel, rand_vals, nr_rand_vals);
free(rand_vals);
check_bloom(skel);
bloom_filter_map__destroy(skel);
}
| linux-master | tools/testing/selftests/bpf/prog_tests/bloom_filter_map.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2021 Facebook */
#include <test_progs.h>
#include "timer.skel.h"
static int timer(struct timer *timer_skel)
{
int err, prog_fd;
LIBBPF_OPTS(bpf_test_run_opts, topts);
err = timer__attach(timer_skel);
if (!ASSERT_OK(err, "timer_attach"))
return err;
ASSERT_EQ(timer_skel->data->callback_check, 52, "callback_check1");
ASSERT_EQ(timer_skel->data->callback2_check, 52, "callback2_check1");
prog_fd = bpf_program__fd(timer_skel->progs.test1);
err = bpf_prog_test_run_opts(prog_fd, &topts);
ASSERT_OK(err, "test_run");
ASSERT_EQ(topts.retval, 0, "test_run");
timer__detach(timer_skel);
usleep(50); /* 10 usecs should be enough, but give it extra */
/* check that timer_cb1() was executed 10+10 times */
ASSERT_EQ(timer_skel->data->callback_check, 42, "callback_check2");
ASSERT_EQ(timer_skel->data->callback2_check, 42, "callback2_check2");
/* check that timer_cb2() was executed twice */
ASSERT_EQ(timer_skel->bss->bss_data, 10, "bss_data");
/* check that timer_cb3() was executed twice */
ASSERT_EQ(timer_skel->bss->abs_data, 12, "abs_data");
/* check that there were no errors in timer execution */
ASSERT_EQ(timer_skel->bss->err, 0, "err");
/* check that code paths completed */
ASSERT_EQ(timer_skel->bss->ok, 1 | 2 | 4, "ok");
return 0;
}
/* TODO: use pid filtering */
void serial_test_timer(void)
{
struct timer *timer_skel = NULL;
int err;
timer_skel = timer__open_and_load();
if (!ASSERT_OK_PTR(timer_skel, "timer_skel_load"))
goto cleanup;
err = timer(timer_skel);
ASSERT_OK(err, "timer");
cleanup:
timer__destroy(timer_skel);
}
| linux-master | tools/testing/selftests/bpf/prog_tests/timer.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2019 Facebook */
#include <test_progs.h>
#include <sys/mman.h>
struct s {
int a;
long long b;
} __attribute__((packed));
#include "test_skeleton.skel.h"
void test_skeleton(void)
{
int duration = 0, err;
struct test_skeleton* skel;
struct test_skeleton__bss *bss;
struct test_skeleton__data *data;
struct test_skeleton__data_dyn *data_dyn;
struct test_skeleton__rodata *rodata;
struct test_skeleton__rodata_dyn *rodata_dyn;
struct test_skeleton__kconfig *kcfg;
const void *elf_bytes;
size_t elf_bytes_sz = 0;
void *m;
int i, fd;
skel = test_skeleton__open();
if (CHECK(!skel, "skel_open", "failed to open skeleton\n"))
return;
if (CHECK(skel->kconfig, "skel_kconfig", "kconfig is mmaped()!\n"))
goto cleanup;
bss = skel->bss;
data = skel->data;
data_dyn = skel->data_dyn;
rodata = skel->rodata;
rodata_dyn = skel->rodata_dyn;
ASSERT_STREQ(bpf_map__name(skel->maps.rodata_dyn), ".rodata.dyn", "rodata_dyn_name");
ASSERT_STREQ(bpf_map__name(skel->maps.data_dyn), ".data.dyn", "data_dyn_name");
/* validate values are pre-initialized correctly */
CHECK(data->in1 != -1, "in1", "got %d != exp %d\n", data->in1, -1);
CHECK(data->out1 != -1, "out1", "got %d != exp %d\n", data->out1, -1);
CHECK(data->in2 != -1, "in2", "got %lld != exp %lld\n", data->in2, -1LL);
CHECK(data->out2 != -1, "out2", "got %lld != exp %lld\n", data->out2, -1LL);
CHECK(bss->in3 != 0, "in3", "got %d != exp %d\n", bss->in3, 0);
CHECK(bss->out3 != 0, "out3", "got %d != exp %d\n", bss->out3, 0);
CHECK(bss->in4 != 0, "in4", "got %lld != exp %lld\n", bss->in4, 0LL);
CHECK(bss->out4 != 0, "out4", "got %lld != exp %lld\n", bss->out4, 0LL);
CHECK(rodata->in.in6 != 0, "in6", "got %d != exp %d\n", rodata->in.in6, 0);
CHECK(bss->out6 != 0, "out6", "got %d != exp %d\n", bss->out6, 0);
ASSERT_EQ(rodata_dyn->in_dynarr_sz, 0, "in_dynarr_sz");
for (i = 0; i < 4; i++)
ASSERT_EQ(rodata_dyn->in_dynarr[i], -(i + 1), "in_dynarr");
for (i = 0; i < 4; i++)
ASSERT_EQ(data_dyn->out_dynarr[i], i + 1, "out_dynarr");
/* validate we can pre-setup global variables, even in .bss */
data->in1 = 10;
data->in2 = 11;
bss->in3 = 12;
bss->in4 = 13;
rodata->in.in6 = 14;
rodata_dyn->in_dynarr_sz = 4;
for (i = 0; i < 4; i++)
rodata_dyn->in_dynarr[i] = i + 10;
err = test_skeleton__load(skel);
if (CHECK(err, "skel_load", "failed to load skeleton: %d\n", err))
goto cleanup;
/* validate pre-setup values are still there */
CHECK(data->in1 != 10, "in1", "got %d != exp %d\n", data->in1, 10);
CHECK(data->in2 != 11, "in2", "got %lld != exp %lld\n", data->in2, 11LL);
CHECK(bss->in3 != 12, "in3", "got %d != exp %d\n", bss->in3, 12);
CHECK(bss->in4 != 13, "in4", "got %lld != exp %lld\n", bss->in4, 13LL);
CHECK(rodata->in.in6 != 14, "in6", "got %d != exp %d\n", rodata->in.in6, 14);
ASSERT_EQ(rodata_dyn->in_dynarr_sz, 4, "in_dynarr_sz");
for (i = 0; i < 4; i++)
ASSERT_EQ(rodata_dyn->in_dynarr[i], i + 10, "in_dynarr");
/* now set new values and attach to get them into outX variables */
data->in1 = 1;
data->in2 = 2;
bss->in3 = 3;
bss->in4 = 4;
bss->in5.a = 5;
bss->in5.b = 6;
kcfg = skel->kconfig;
skel->data_read_mostly->read_mostly_var = 123;
err = test_skeleton__attach(skel);
if (CHECK(err, "skel_attach", "skeleton attach failed: %d\n", err))
goto cleanup;
/* trigger tracepoint */
usleep(1);
CHECK(data->out1 != 1, "res1", "got %d != exp %d\n", data->out1, 1);
CHECK(data->out2 != 2, "res2", "got %lld != exp %d\n", data->out2, 2);
CHECK(bss->out3 != 3, "res3", "got %d != exp %d\n", (int)bss->out3, 3);
CHECK(bss->out4 != 4, "res4", "got %lld != exp %d\n", bss->out4, 4);
CHECK(bss->out5.a != 5, "res5", "got %d != exp %d\n", bss->out5.a, 5);
CHECK(bss->out5.b != 6, "res6", "got %lld != exp %d\n", bss->out5.b, 6);
CHECK(bss->out6 != 14, "res7", "got %d != exp %d\n", bss->out6, 14);
CHECK(bss->bpf_syscall != kcfg->CONFIG_BPF_SYSCALL, "ext1",
"got %d != exp %d\n", bss->bpf_syscall, kcfg->CONFIG_BPF_SYSCALL);
CHECK(bss->kern_ver != kcfg->LINUX_KERNEL_VERSION, "ext2",
"got %d != exp %d\n", bss->kern_ver, kcfg->LINUX_KERNEL_VERSION);
for (i = 0; i < 4; i++)
ASSERT_EQ(data_dyn->out_dynarr[i], i + 10, "out_dynarr");
ASSERT_EQ(skel->bss->out_mostly_var, 123, "out_mostly_var");
ASSERT_EQ(bss->huge_arr[ARRAY_SIZE(bss->huge_arr) - 1], 123, "huge_arr");
fd = bpf_map__fd(skel->maps.data_non_mmapable);
m = mmap(NULL, getpagesize(), PROT_READ, MAP_SHARED, fd, 0);
if (!ASSERT_EQ(m, MAP_FAILED, "unexpected_mmap_success"))
munmap(m, getpagesize());
ASSERT_EQ(bpf_map__map_flags(skel->maps.data_non_mmapable), 0, "non_mmap_flags");
elf_bytes = test_skeleton__elf_bytes(&elf_bytes_sz);
ASSERT_OK_PTR(elf_bytes, "elf_bytes");
ASSERT_GE(elf_bytes_sz, 0, "elf_bytes_sz");
cleanup:
test_skeleton__destroy(skel);
}
| linux-master | tools/testing/selftests/bpf/prog_tests/skeleton.c |
// SPDX-License-Identifier: GPL-2.0
#include <test_progs.h>
#include <network_helpers.h>
#include <net/if.h>
#include "test_xdp.skel.h"
#include "test_xdp_bpf2bpf.skel.h"
struct meta {
int ifindex;
int pkt_len;
};
struct test_ctx_s {
bool passed;
int pkt_size;
};
struct test_ctx_s test_ctx;
static void on_sample(void *ctx, int cpu, void *data, __u32 size)
{
struct meta *meta = (struct meta *)data;
struct ipv4_packet *trace_pkt_v4 = data + sizeof(*meta);
unsigned char *raw_pkt = data + sizeof(*meta);
struct test_ctx_s *tst_ctx = ctx;
ASSERT_GE(size, sizeof(pkt_v4) + sizeof(*meta), "check_size");
ASSERT_EQ(meta->ifindex, if_nametoindex("lo"), "check_meta_ifindex");
ASSERT_EQ(meta->pkt_len, tst_ctx->pkt_size, "check_meta_pkt_len");
ASSERT_EQ(memcmp(trace_pkt_v4, &pkt_v4, sizeof(pkt_v4)), 0,
"check_packet_content");
if (meta->pkt_len > sizeof(pkt_v4)) {
for (int i = 0; i < meta->pkt_len - sizeof(pkt_v4); i++)
ASSERT_EQ(raw_pkt[i + sizeof(pkt_v4)], (unsigned char)i,
"check_packet_content");
}
tst_ctx->passed = true;
}
#define BUF_SZ 9000
static void run_xdp_bpf2bpf_pkt_size(int pkt_fd, struct perf_buffer *pb,
struct test_xdp_bpf2bpf *ftrace_skel,
int pkt_size)
{
__u8 *buf, *buf_in;
int err;
LIBBPF_OPTS(bpf_test_run_opts, topts);
if (!ASSERT_LE(pkt_size, BUF_SZ, "pkt_size") ||
!ASSERT_GE(pkt_size, sizeof(pkt_v4), "pkt_size"))
return;
buf_in = malloc(BUF_SZ);
if (!ASSERT_OK_PTR(buf_in, "buf_in malloc()"))
return;
buf = malloc(BUF_SZ);
if (!ASSERT_OK_PTR(buf, "buf malloc()")) {
free(buf_in);
return;
}
test_ctx.passed = false;
test_ctx.pkt_size = pkt_size;
memcpy(buf_in, &pkt_v4, sizeof(pkt_v4));
if (pkt_size > sizeof(pkt_v4)) {
for (int i = 0; i < (pkt_size - sizeof(pkt_v4)); i++)
buf_in[i + sizeof(pkt_v4)] = i;
}
/* Run test program */
topts.data_in = buf_in;
topts.data_size_in = pkt_size;
topts.data_out = buf;
topts.data_size_out = BUF_SZ;
err = bpf_prog_test_run_opts(pkt_fd, &topts);
ASSERT_OK(err, "ipv4");
ASSERT_EQ(topts.retval, XDP_PASS, "ipv4 retval");
ASSERT_EQ(topts.data_size_out, pkt_size, "ipv4 size");
/* Make sure bpf_xdp_output() was triggered and it sent the expected
* data to the perf ring buffer.
*/
err = perf_buffer__poll(pb, 100);
ASSERT_GE(err, 0, "perf_buffer__poll");
ASSERT_TRUE(test_ctx.passed, "test passed");
/* Verify test results */
ASSERT_EQ(ftrace_skel->bss->test_result_fentry, if_nametoindex("lo"),
"fentry result");
ASSERT_EQ(ftrace_skel->bss->test_result_fexit, XDP_PASS, "fexit result");
free(buf);
free(buf_in);
}
void test_xdp_bpf2bpf(void)
{
int err, pkt_fd, map_fd;
int pkt_sizes[] = {sizeof(pkt_v4), 1024, 4100, 8200};
struct iptnl_info value4 = {.family = AF_INET6};
struct test_xdp *pkt_skel = NULL;
struct test_xdp_bpf2bpf *ftrace_skel = NULL;
struct vip key4 = {.protocol = 6, .family = AF_INET};
struct bpf_program *prog;
struct perf_buffer *pb = NULL;
/* Load XDP program to introspect */
pkt_skel = test_xdp__open_and_load();
if (!ASSERT_OK_PTR(pkt_skel, "test_xdp__open_and_load"))
return;
pkt_fd = bpf_program__fd(pkt_skel->progs._xdp_tx_iptunnel);
map_fd = bpf_map__fd(pkt_skel->maps.vip2tnl);
bpf_map_update_elem(map_fd, &key4, &value4, 0);
/* Load trace program */
ftrace_skel = test_xdp_bpf2bpf__open();
if (!ASSERT_OK_PTR(ftrace_skel, "test_xdp_bpf2bpf__open"))
goto out;
/* Demonstrate the bpf_program__set_attach_target() API rather than
* the load with options, i.e. opts.attach_prog_fd.
*/
prog = ftrace_skel->progs.trace_on_entry;
bpf_program__set_expected_attach_type(prog, BPF_TRACE_FENTRY);
bpf_program__set_attach_target(prog, pkt_fd, "_xdp_tx_iptunnel");
prog = ftrace_skel->progs.trace_on_exit;
bpf_program__set_expected_attach_type(prog, BPF_TRACE_FEXIT);
bpf_program__set_attach_target(prog, pkt_fd, "_xdp_tx_iptunnel");
err = test_xdp_bpf2bpf__load(ftrace_skel);
if (!ASSERT_OK(err, "test_xdp_bpf2bpf__load"))
goto out;
err = test_xdp_bpf2bpf__attach(ftrace_skel);
if (!ASSERT_OK(err, "test_xdp_bpf2bpf__attach"))
goto out;
/* Set up perf buffer */
pb = perf_buffer__new(bpf_map__fd(ftrace_skel->maps.perf_buf_map), 8,
on_sample, NULL, &test_ctx, NULL);
if (!ASSERT_OK_PTR(pb, "perf_buf__new"))
goto out;
for (int i = 0; i < ARRAY_SIZE(pkt_sizes); i++)
run_xdp_bpf2bpf_pkt_size(pkt_fd, pb, ftrace_skel,
pkt_sizes[i]);
out:
perf_buffer__free(pb);
test_xdp__destroy(pkt_skel);
test_xdp_bpf2bpf__destroy(ftrace_skel);
}
| linux-master | tools/testing/selftests/bpf/prog_tests/xdp_bpf2bpf.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) Meta Platforms, Inc. and affiliates. */
#define _GNU_SOURCE
#include <sched.h>
#include <linux/socket.h>
#include <linux/tls.h>
#include <net/if.h>
#include "test_progs.h"
#include "cgroup_helpers.h"
#include "network_helpers.h"
#include "setget_sockopt.skel.h"
#define CG_NAME "/setget-sockopt-test"
static const char addr4_str[] = "127.0.0.1";
static const char addr6_str[] = "::1";
static struct setget_sockopt *skel;
static int cg_fd;
static int create_netns(void)
{
if (!ASSERT_OK(unshare(CLONE_NEWNET), "create netns"))
return -1;
if (!ASSERT_OK(system("ip link set dev lo up"), "set lo up"))
return -1;
if (!ASSERT_OK(system("ip link add dev binddevtest1 type veth peer name binddevtest2"),
"add veth"))
return -1;
if (!ASSERT_OK(system("ip link set dev binddevtest1 up"),
"bring veth up"))
return -1;
return 0;
}
static void test_tcp(int family)
{
struct setget_sockopt__bss *bss = skel->bss;
int sfd, cfd;
memset(bss, 0, sizeof(*bss));
sfd = start_server(family, SOCK_STREAM,
family == AF_INET6 ? addr6_str : addr4_str, 0, 0);
if (!ASSERT_GE(sfd, 0, "start_server"))
return;
cfd = connect_to_fd(sfd, 0);
if (!ASSERT_GE(cfd, 0, "connect_to_fd_server")) {
close(sfd);
return;
}
close(sfd);
close(cfd);
ASSERT_EQ(bss->nr_listen, 1, "nr_listen");
ASSERT_EQ(bss->nr_connect, 1, "nr_connect");
ASSERT_EQ(bss->nr_active, 1, "nr_active");
ASSERT_EQ(bss->nr_passive, 1, "nr_passive");
ASSERT_EQ(bss->nr_socket_post_create, 2, "nr_socket_post_create");
ASSERT_EQ(bss->nr_binddev, 2, "nr_bind");
}
static void test_udp(int family)
{
struct setget_sockopt__bss *bss = skel->bss;
int sfd;
memset(bss, 0, sizeof(*bss));
sfd = start_server(family, SOCK_DGRAM,
family == AF_INET6 ? addr6_str : addr4_str, 0, 0);
if (!ASSERT_GE(sfd, 0, "start_server"))
return;
close(sfd);
ASSERT_GE(bss->nr_socket_post_create, 1, "nr_socket_post_create");
ASSERT_EQ(bss->nr_binddev, 1, "nr_bind");
}
static void test_ktls(int family)
{
struct tls12_crypto_info_aes_gcm_128 aes128;
struct setget_sockopt__bss *bss = skel->bss;
int cfd = -1, sfd = -1, fd = -1, ret;
char buf;
memset(bss, 0, sizeof(*bss));
sfd = start_server(family, SOCK_STREAM,
family == AF_INET6 ? addr6_str : addr4_str, 0, 0);
if (!ASSERT_GE(sfd, 0, "start_server"))
return;
fd = connect_to_fd(sfd, 0);
if (!ASSERT_GE(fd, 0, "connect_to_fd"))
goto err_out;
cfd = accept(sfd, NULL, 0);
if (!ASSERT_GE(cfd, 0, "accept"))
goto err_out;
close(sfd);
sfd = -1;
/* Setup KTLS */
ret = setsockopt(fd, IPPROTO_TCP, TCP_ULP, "tls", sizeof("tls"));
if (!ASSERT_OK(ret, "setsockopt"))
goto err_out;
ret = setsockopt(cfd, IPPROTO_TCP, TCP_ULP, "tls", sizeof("tls"));
if (!ASSERT_OK(ret, "setsockopt"))
goto err_out;
memset(&aes128, 0, sizeof(aes128));
aes128.info.version = TLS_1_2_VERSION;
aes128.info.cipher_type = TLS_CIPHER_AES_GCM_128;
ret = setsockopt(fd, SOL_TLS, TLS_TX, &aes128, sizeof(aes128));
if (!ASSERT_OK(ret, "setsockopt"))
goto err_out;
ret = setsockopt(cfd, SOL_TLS, TLS_RX, &aes128, sizeof(aes128));
if (!ASSERT_OK(ret, "setsockopt"))
goto err_out;
/* KTLS is enabled */
close(fd);
/* At this point, the cfd socket is at the CLOSE_WAIT state
* and still run TLS protocol. The test for
* BPF_TCP_CLOSE_WAIT should be run at this point.
*/
ret = read(cfd, &buf, sizeof(buf));
ASSERT_EQ(ret, 0, "read");
close(cfd);
ASSERT_EQ(bss->nr_listen, 1, "nr_listen");
ASSERT_EQ(bss->nr_connect, 1, "nr_connect");
ASSERT_EQ(bss->nr_active, 1, "nr_active");
ASSERT_EQ(bss->nr_passive, 1, "nr_passive");
ASSERT_EQ(bss->nr_socket_post_create, 2, "nr_socket_post_create");
ASSERT_EQ(bss->nr_binddev, 2, "nr_bind");
ASSERT_EQ(bss->nr_fin_wait1, 1, "nr_fin_wait1");
return;
err_out:
close(fd);
close(cfd);
close(sfd);
}
void test_setget_sockopt(void)
{
cg_fd = test__join_cgroup(CG_NAME);
if (cg_fd < 0)
return;
if (create_netns())
goto done;
skel = setget_sockopt__open();
if (!ASSERT_OK_PTR(skel, "open skel"))
goto done;
strcpy(skel->rodata->veth, "binddevtest1");
skel->rodata->veth_ifindex = if_nametoindex("binddevtest1");
if (!ASSERT_GT(skel->rodata->veth_ifindex, 0, "if_nametoindex"))
goto done;
if (!ASSERT_OK(setget_sockopt__load(skel), "load skel"))
goto done;
skel->links.skops_sockopt =
bpf_program__attach_cgroup(skel->progs.skops_sockopt, cg_fd);
if (!ASSERT_OK_PTR(skel->links.skops_sockopt, "attach cgroup"))
goto done;
skel->links.socket_post_create =
bpf_program__attach_cgroup(skel->progs.socket_post_create, cg_fd);
if (!ASSERT_OK_PTR(skel->links.socket_post_create, "attach_cgroup"))
goto done;
test_tcp(AF_INET6);
test_tcp(AF_INET);
test_udp(AF_INET6);
test_udp(AF_INET);
test_ktls(AF_INET6);
test_ktls(AF_INET);
done:
setget_sockopt__destroy(skel);
close(cg_fd);
}
| linux-master | tools/testing/selftests/bpf/prog_tests/setget_sockopt.c |
// SPDX-License-Identifier: GPL-2.0
#include <test_progs.h>
void test_task_fd_query_rawtp(void)
{
const char *file = "./test_get_stack_rawtp.bpf.o";
__u64 probe_offset, probe_addr;
__u32 len, prog_id, fd_type;
struct bpf_object *obj;
int efd, err, prog_fd;
__u32 duration = 0;
char buf[256];
err = bpf_prog_test_load(file, BPF_PROG_TYPE_RAW_TRACEPOINT, &obj, &prog_fd);
if (CHECK(err, "prog_load raw tp", "err %d errno %d\n", err, errno))
return;
efd = bpf_raw_tracepoint_open("sys_enter", prog_fd);
if (CHECK(efd < 0, "raw_tp_open", "err %d errno %d\n", efd, errno))
goto close_prog;
/* query (getpid(), efd) */
len = sizeof(buf);
err = bpf_task_fd_query(getpid(), efd, 0, buf, &len, &prog_id,
&fd_type, &probe_offset, &probe_addr);
if (CHECK(err < 0, "bpf_task_fd_query", "err %d errno %d\n", err,
errno))
goto close_prog;
err = fd_type == BPF_FD_TYPE_RAW_TRACEPOINT &&
strcmp(buf, "sys_enter") == 0;
if (CHECK(!err, "check_results", "fd_type %d tp_name %s\n",
fd_type, buf))
goto close_prog;
/* test zero len */
len = 0;
err = bpf_task_fd_query(getpid(), efd, 0, buf, &len, &prog_id,
&fd_type, &probe_offset, &probe_addr);
if (CHECK(err < 0, "bpf_task_fd_query (len = 0)", "err %d errno %d\n",
err, errno))
goto close_prog;
err = fd_type == BPF_FD_TYPE_RAW_TRACEPOINT &&
len == strlen("sys_enter");
if (CHECK(!err, "check_results", "fd_type %d len %u\n", fd_type, len))
goto close_prog;
/* test empty buffer */
len = sizeof(buf);
err = bpf_task_fd_query(getpid(), efd, 0, 0, &len, &prog_id,
&fd_type, &probe_offset, &probe_addr);
if (CHECK(err < 0, "bpf_task_fd_query (buf = 0)", "err %d errno %d\n",
err, errno))
goto close_prog;
err = fd_type == BPF_FD_TYPE_RAW_TRACEPOINT &&
len == strlen("sys_enter");
if (CHECK(!err, "check_results", "fd_type %d len %u\n", fd_type, len))
goto close_prog;
/* test smaller buffer */
len = 3;
err = bpf_task_fd_query(getpid(), efd, 0, buf, &len, &prog_id,
&fd_type, &probe_offset, &probe_addr);
if (CHECK(err >= 0 || errno != ENOSPC, "bpf_task_fd_query (len = 3)",
"err %d errno %d\n", err, errno))
goto close_prog;
err = fd_type == BPF_FD_TYPE_RAW_TRACEPOINT &&
len == strlen("sys_enter") &&
strcmp(buf, "sy") == 0;
if (CHECK(!err, "check_results", "fd_type %d len %u\n", fd_type, len))
goto close_prog;
close_prog:
bpf_object__close(obj);
}
| linux-master | tools/testing/selftests/bpf/prog_tests/task_fd_query_rawtp.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
#include <test_progs.h>
#include "cpumask_failure.skel.h"
#include "cpumask_success.skel.h"
static const char * const cpumask_success_testcases[] = {
"test_alloc_free_cpumask",
"test_set_clear_cpu",
"test_setall_clear_cpu",
"test_first_firstzero_cpu",
"test_firstand_nocpu",
"test_test_and_set_clear",
"test_and_or_xor",
"test_intersects_subset",
"test_copy_any_anyand",
"test_insert_leave",
"test_insert_remove_release",
"test_global_mask_rcu",
};
static void verify_success(const char *prog_name)
{
struct cpumask_success *skel;
struct bpf_program *prog;
struct bpf_link *link = NULL;
pid_t child_pid;
int status;
skel = cpumask_success__open();
if (!ASSERT_OK_PTR(skel, "cpumask_success__open"))
return;
skel->bss->pid = getpid();
skel->bss->nr_cpus = libbpf_num_possible_cpus();
cpumask_success__load(skel);
if (!ASSERT_OK_PTR(skel, "cpumask_success__load"))
goto cleanup;
prog = bpf_object__find_program_by_name(skel->obj, prog_name);
if (!ASSERT_OK_PTR(prog, "bpf_object__find_program_by_name"))
goto cleanup;
link = bpf_program__attach(prog);
if (!ASSERT_OK_PTR(link, "bpf_program__attach"))
goto cleanup;
child_pid = fork();
if (!ASSERT_GT(child_pid, -1, "child_pid"))
goto cleanup;
if (child_pid == 0)
_exit(0);
waitpid(child_pid, &status, 0);
ASSERT_OK(skel->bss->err, "post_wait_err");
cleanup:
bpf_link__destroy(link);
cpumask_success__destroy(skel);
}
void test_cpumask(void)
{
int i;
for (i = 0; i < ARRAY_SIZE(cpumask_success_testcases); i++) {
if (!test__start_subtest(cpumask_success_testcases[i]))
continue;
verify_success(cpumask_success_testcases[i]);
}
RUN_TESTS(cpumask_success);
RUN_TESTS(cpumask_failure);
}
| linux-master | tools/testing/selftests/bpf/prog_tests/cpumask.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2022 Google */
#include <test_progs.h>
#include <bpf/libbpf.h>
#include <bpf/btf.h>
#include "cgroup_iter.skel.h"
#include "cgroup_helpers.h"
#define ROOT 0
#define PARENT 1
#define CHILD1 2
#define CHILD2 3
#define NUM_CGROUPS 4
#define PROLOGUE "prologue\n"
#define EPILOGUE "epilogue\n"
static const char *cg_path[] = {
"/", "/parent", "/parent/child1", "/parent/child2"
};
static int cg_fd[] = {-1, -1, -1, -1};
static unsigned long long cg_id[] = {0, 0, 0, 0};
static char expected_output[64];
static int setup_cgroups(void)
{
int fd, i = 0;
for (i = 0; i < NUM_CGROUPS; i++) {
fd = create_and_get_cgroup(cg_path[i]);
if (fd < 0)
return fd;
cg_fd[i] = fd;
cg_id[i] = get_cgroup_id(cg_path[i]);
}
return 0;
}
static void cleanup_cgroups(void)
{
int i;
for (i = 0; i < NUM_CGROUPS; i++)
close(cg_fd[i]);
}
static void read_from_cgroup_iter(struct bpf_program *prog, int cgroup_fd,
int order, const char *testname)
{
DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
union bpf_iter_link_info linfo;
struct bpf_link *link;
int len, iter_fd;
static char buf[128];
size_t left;
char *p;
memset(&linfo, 0, sizeof(linfo));
linfo.cgroup.cgroup_fd = cgroup_fd;
linfo.cgroup.order = order;
opts.link_info = &linfo;
opts.link_info_len = sizeof(linfo);
link = bpf_program__attach_iter(prog, &opts);
if (!ASSERT_OK_PTR(link, "attach_iter"))
return;
iter_fd = bpf_iter_create(bpf_link__fd(link));
if (iter_fd < 0)
goto free_link;
memset(buf, 0, sizeof(buf));
left = ARRAY_SIZE(buf);
p = buf;
while ((len = read(iter_fd, p, left)) > 0) {
p += len;
left -= len;
}
ASSERT_STREQ(buf, expected_output, testname);
/* read() after iter finishes should be ok. */
if (len == 0)
ASSERT_OK(read(iter_fd, buf, sizeof(buf)), "second_read");
close(iter_fd);
free_link:
bpf_link__destroy(link);
}
/* Invalid cgroup. */
static void test_invalid_cgroup(struct cgroup_iter *skel)
{
DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
union bpf_iter_link_info linfo;
struct bpf_link *link;
memset(&linfo, 0, sizeof(linfo));
linfo.cgroup.cgroup_fd = (__u32)-1;
opts.link_info = &linfo;
opts.link_info_len = sizeof(linfo);
link = bpf_program__attach_iter(skel->progs.cgroup_id_printer, &opts);
ASSERT_ERR_PTR(link, "attach_iter");
bpf_link__destroy(link);
}
/* Specifying both cgroup_fd and cgroup_id is invalid. */
static void test_invalid_cgroup_spec(struct cgroup_iter *skel)
{
DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
union bpf_iter_link_info linfo;
struct bpf_link *link;
memset(&linfo, 0, sizeof(linfo));
linfo.cgroup.cgroup_fd = (__u32)cg_fd[PARENT];
linfo.cgroup.cgroup_id = (__u64)cg_id[PARENT];
opts.link_info = &linfo;
opts.link_info_len = sizeof(linfo);
link = bpf_program__attach_iter(skel->progs.cgroup_id_printer, &opts);
ASSERT_ERR_PTR(link, "attach_iter");
bpf_link__destroy(link);
}
/* Preorder walk prints parent and child in order. */
static void test_walk_preorder(struct cgroup_iter *skel)
{
snprintf(expected_output, sizeof(expected_output),
PROLOGUE "%8llu\n%8llu\n%8llu\n" EPILOGUE,
cg_id[PARENT], cg_id[CHILD1], cg_id[CHILD2]);
read_from_cgroup_iter(skel->progs.cgroup_id_printer, cg_fd[PARENT],
BPF_CGROUP_ITER_DESCENDANTS_PRE, "preorder");
}
/* Postorder walk prints child and parent in order. */
static void test_walk_postorder(struct cgroup_iter *skel)
{
snprintf(expected_output, sizeof(expected_output),
PROLOGUE "%8llu\n%8llu\n%8llu\n" EPILOGUE,
cg_id[CHILD1], cg_id[CHILD2], cg_id[PARENT]);
read_from_cgroup_iter(skel->progs.cgroup_id_printer, cg_fd[PARENT],
BPF_CGROUP_ITER_DESCENDANTS_POST, "postorder");
}
/* Walking parents prints parent and then root. */
static void test_walk_ancestors_up(struct cgroup_iter *skel)
{
/* terminate the walk when ROOT is met. */
skel->bss->terminal_cgroup = cg_id[ROOT];
snprintf(expected_output, sizeof(expected_output),
PROLOGUE "%8llu\n%8llu\n" EPILOGUE,
cg_id[PARENT], cg_id[ROOT]);
read_from_cgroup_iter(skel->progs.cgroup_id_printer, cg_fd[PARENT],
BPF_CGROUP_ITER_ANCESTORS_UP, "ancestors_up");
skel->bss->terminal_cgroup = 0;
}
/* Early termination prints parent only. */
static void test_early_termination(struct cgroup_iter *skel)
{
/* terminate the walk after the first element is processed. */
skel->bss->terminate_early = 1;
snprintf(expected_output, sizeof(expected_output),
PROLOGUE "%8llu\n" EPILOGUE, cg_id[PARENT]);
read_from_cgroup_iter(skel->progs.cgroup_id_printer, cg_fd[PARENT],
BPF_CGROUP_ITER_DESCENDANTS_PRE, "early_termination");
skel->bss->terminate_early = 0;
}
/* Waling self prints self only. */
static void test_walk_self_only(struct cgroup_iter *skel)
{
snprintf(expected_output, sizeof(expected_output),
PROLOGUE "%8llu\n" EPILOGUE, cg_id[PARENT]);
read_from_cgroup_iter(skel->progs.cgroup_id_printer, cg_fd[PARENT],
BPF_CGROUP_ITER_SELF_ONLY, "self_only");
}
static void test_walk_dead_self_only(struct cgroup_iter *skel)
{
DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
char expected_output[128], buf[128];
const char *cgrp_name = "/dead";
union bpf_iter_link_info linfo;
int len, cgrp_fd, iter_fd;
struct bpf_link *link;
size_t left;
char *p;
cgrp_fd = create_and_get_cgroup(cgrp_name);
if (!ASSERT_GE(cgrp_fd, 0, "create cgrp"))
return;
/* The cgroup will be dead during read() iteration, so it only has
* epilogue in the output
*/
snprintf(expected_output, sizeof(expected_output), EPILOGUE);
memset(&linfo, 0, sizeof(linfo));
linfo.cgroup.cgroup_fd = cgrp_fd;
linfo.cgroup.order = BPF_CGROUP_ITER_SELF_ONLY;
opts.link_info = &linfo;
opts.link_info_len = sizeof(linfo);
link = bpf_program__attach_iter(skel->progs.cgroup_id_printer, &opts);
if (!ASSERT_OK_PTR(link, "attach_iter"))
goto close_cgrp;
iter_fd = bpf_iter_create(bpf_link__fd(link));
if (!ASSERT_GE(iter_fd, 0, "iter_create"))
goto free_link;
/* Close link fd and cgroup fd */
bpf_link__destroy(link);
close(cgrp_fd);
/* Remove cgroup to mark it as dead */
remove_cgroup(cgrp_name);
/* Two kern_sync_rcu() and usleep() pairs are used to wait for the
* releases of cgroup css, and the last kern_sync_rcu() and usleep()
* pair is used to wait for the free of cgroup itself.
*/
kern_sync_rcu();
usleep(8000);
kern_sync_rcu();
usleep(8000);
kern_sync_rcu();
usleep(1000);
memset(buf, 0, sizeof(buf));
left = ARRAY_SIZE(buf);
p = buf;
while ((len = read(iter_fd, p, left)) > 0) {
p += len;
left -= len;
}
ASSERT_STREQ(buf, expected_output, "dead cgroup output");
/* read() after iter finishes should be ok. */
if (len == 0)
ASSERT_OK(read(iter_fd, buf, sizeof(buf)), "second_read");
close(iter_fd);
return;
free_link:
bpf_link__destroy(link);
close_cgrp:
close(cgrp_fd);
}
void test_cgroup_iter(void)
{
struct cgroup_iter *skel = NULL;
if (setup_cgroup_environment())
return;
if (setup_cgroups())
goto out;
skel = cgroup_iter__open_and_load();
if (!ASSERT_OK_PTR(skel, "cgroup_iter__open_and_load"))
goto out;
if (test__start_subtest("cgroup_iter__invalid_cgroup"))
test_invalid_cgroup(skel);
if (test__start_subtest("cgroup_iter__invalid_cgroup_spec"))
test_invalid_cgroup_spec(skel);
if (test__start_subtest("cgroup_iter__preorder"))
test_walk_preorder(skel);
if (test__start_subtest("cgroup_iter__postorder"))
test_walk_postorder(skel);
if (test__start_subtest("cgroup_iter__ancestors_up_walk"))
test_walk_ancestors_up(skel);
if (test__start_subtest("cgroup_iter__early_termination"))
test_early_termination(skel);
if (test__start_subtest("cgroup_iter__self_only"))
test_walk_self_only(skel);
if (test__start_subtest("cgroup_iter__dead_self_only"))
test_walk_dead_self_only(skel);
out:
cgroup_iter__destroy(skel);
cleanup_cgroups();
cleanup_cgroup_environment();
}
| linux-master | tools/testing/selftests/bpf/prog_tests/cgroup_iter.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020 Facebook */
#define _GNU_SOURCE
#include <string.h>
#include <byteswap.h>
#include <test_progs.h>
#include <bpf/btf.h>
void test_btf_endian() {
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
enum btf_endianness endian = BTF_LITTLE_ENDIAN;
#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
enum btf_endianness endian = BTF_BIG_ENDIAN;
#else
#error "Unrecognized __BYTE_ORDER__"
#endif
enum btf_endianness swap_endian = 1 - endian;
struct btf *btf = NULL, *swap_btf = NULL;
const void *raw_data, *swap_raw_data;
const struct btf_type *t;
const struct btf_header *hdr;
__u32 raw_sz, swap_raw_sz;
int var_id;
/* Load BTF in native endianness */
btf = btf__parse_elf("btf_dump_test_case_syntax.bpf.o", NULL);
if (!ASSERT_OK_PTR(btf, "parse_native_btf"))
goto err_out;
ASSERT_EQ(btf__endianness(btf), endian, "endian");
btf__set_endianness(btf, swap_endian);
ASSERT_EQ(btf__endianness(btf), swap_endian, "endian");
/* Get raw BTF data in non-native endianness... */
raw_data = btf__raw_data(btf, &raw_sz);
if (!ASSERT_OK_PTR(raw_data, "raw_data_inverted"))
goto err_out;
/* ...and open it as a new BTF instance */
swap_btf = btf__new(raw_data, raw_sz);
if (!ASSERT_OK_PTR(swap_btf, "parse_swap_btf"))
goto err_out;
ASSERT_EQ(btf__endianness(swap_btf), swap_endian, "endian");
ASSERT_EQ(btf__type_cnt(swap_btf), btf__type_cnt(btf), "nr_types");
swap_raw_data = btf__raw_data(swap_btf, &swap_raw_sz);
if (!ASSERT_OK_PTR(swap_raw_data, "swap_raw_data"))
goto err_out;
/* both raw data should be identical (with non-native endianness) */
ASSERT_OK(memcmp(raw_data, swap_raw_data, raw_sz), "mem_identical");
/* make sure that at least BTF header data is really swapped */
hdr = swap_raw_data;
ASSERT_EQ(bswap_16(hdr->magic), BTF_MAGIC, "btf_magic_swapped");
ASSERT_EQ(raw_sz, swap_raw_sz, "raw_sizes");
/* swap it back to native endianness */
btf__set_endianness(swap_btf, endian);
swap_raw_data = btf__raw_data(swap_btf, &swap_raw_sz);
if (!ASSERT_OK_PTR(swap_raw_data, "swap_raw_data"))
goto err_out;
/* now header should have native BTF_MAGIC */
hdr = swap_raw_data;
ASSERT_EQ(hdr->magic, BTF_MAGIC, "btf_magic_native");
ASSERT_EQ(raw_sz, swap_raw_sz, "raw_sizes");
/* now modify original BTF */
var_id = btf__add_var(btf, "some_var", BTF_VAR_GLOBAL_ALLOCATED, 1);
ASSERT_GT(var_id, 0, "var_id");
btf__free(swap_btf);
swap_btf = NULL;
btf__set_endianness(btf, swap_endian);
raw_data = btf__raw_data(btf, &raw_sz);
if (!ASSERT_OK_PTR(raw_data, "raw_data_inverted"))
goto err_out;
/* and re-open swapped raw data again */
swap_btf = btf__new(raw_data, raw_sz);
if (!ASSERT_OK_PTR(swap_btf, "parse_swap_btf"))
goto err_out;
ASSERT_EQ(btf__endianness(swap_btf), swap_endian, "endian");
ASSERT_EQ(btf__type_cnt(swap_btf), btf__type_cnt(btf), "nr_types");
/* the type should appear as if it was stored in native endianness */
t = btf__type_by_id(swap_btf, var_id);
ASSERT_STREQ(btf__str_by_offset(swap_btf, t->name_off), "some_var", "var_name");
ASSERT_EQ(btf_var(t)->linkage, BTF_VAR_GLOBAL_ALLOCATED, "var_linkage");
ASSERT_EQ(t->type, 1, "var_type");
err_out:
btf__free(btf);
btf__free(swap_btf);
}
| linux-master | tools/testing/selftests/bpf/prog_tests/btf_endian.c |
// SPDX-License-Identifier: GPL-2.0
#include <sys/sysinfo.h>
#include <test_progs.h>
#include "network_helpers.h"
#include "netcnt_prog.skel.h"
#include "netcnt_common.h"
#define CG_NAME "/netcnt"
void serial_test_netcnt(void)
{
union percpu_net_cnt *percpu_netcnt = NULL;
struct bpf_cgroup_storage_key key;
int map_fd, percpu_map_fd;
struct netcnt_prog *skel;
unsigned long packets;
union net_cnt netcnt;
unsigned long bytes;
int cpu, nproc;
int cg_fd = -1;
char cmd[128];
skel = netcnt_prog__open_and_load();
if (!ASSERT_OK_PTR(skel, "netcnt_prog__open_and_load"))
return;
nproc = bpf_num_possible_cpus();
percpu_netcnt = malloc(sizeof(*percpu_netcnt) * nproc);
if (!ASSERT_OK_PTR(percpu_netcnt, "malloc(percpu_netcnt)"))
goto err;
cg_fd = test__join_cgroup(CG_NAME);
if (!ASSERT_GE(cg_fd, 0, "test__join_cgroup"))
goto err;
skel->links.bpf_nextcnt = bpf_program__attach_cgroup(skel->progs.bpf_nextcnt, cg_fd);
if (!ASSERT_OK_PTR(skel->links.bpf_nextcnt,
"attach_cgroup(bpf_nextcnt)"))
goto err;
snprintf(cmd, sizeof(cmd), "%s ::1 -A -c 10000 -q > /dev/null", ping_command(AF_INET6));
ASSERT_OK(system(cmd), cmd);
map_fd = bpf_map__fd(skel->maps.netcnt);
if (!ASSERT_OK(bpf_map_get_next_key(map_fd, NULL, &key), "bpf_map_get_next_key"))
goto err;
if (!ASSERT_OK(bpf_map_lookup_elem(map_fd, &key, &netcnt), "bpf_map_lookup_elem(netcnt)"))
goto err;
percpu_map_fd = bpf_map__fd(skel->maps.percpu_netcnt);
if (!ASSERT_OK(bpf_map_lookup_elem(percpu_map_fd, &key, &percpu_netcnt[0]),
"bpf_map_lookup_elem(percpu_netcnt)"))
goto err;
/* Some packets can be still in per-cpu cache, but not more than
* MAX_PERCPU_PACKETS.
*/
packets = netcnt.packets;
bytes = netcnt.bytes;
for (cpu = 0; cpu < nproc; cpu++) {
ASSERT_LE(percpu_netcnt[cpu].packets, MAX_PERCPU_PACKETS, "MAX_PERCPU_PACKETS");
packets += percpu_netcnt[cpu].packets;
bytes += percpu_netcnt[cpu].bytes;
}
/* No packets should be lost */
ASSERT_GE(packets, 10000, "packets");
/* Let's check that bytes counter matches the number of packets
* multiplied by the size of ipv6 ICMP packet.
*/
ASSERT_GE(bytes, packets * 104, "bytes");
err:
if (cg_fd != -1)
close(cg_fd);
free(percpu_netcnt);
netcnt_prog__destroy(skel);
}
| linux-master | tools/testing/selftests/bpf/prog_tests/netcnt.c |
// SPDX-License-Identifier: GPL-2.0
#include <test_progs.h>
#include <linux/btf.h>
#include "netif_receive_skb.skel.h"
/* Demonstrate that bpf_snprintf_btf succeeds and that various data types
* are formatted correctly.
*/
void serial_test_snprintf_btf(void)
{
struct netif_receive_skb *skel;
struct netif_receive_skb__bss *bss;
int err, duration = 0;
skel = netif_receive_skb__open();
if (CHECK(!skel, "skel_open", "failed to open skeleton\n"))
return;
err = netif_receive_skb__load(skel);
if (CHECK(err, "skel_load", "failed to load skeleton: %d\n", err))
goto cleanup;
bss = skel->bss;
err = netif_receive_skb__attach(skel);
if (CHECK(err, "skel_attach", "skeleton attach failed: %d\n", err))
goto cleanup;
/* generate receive event */
err = system("ping -c 1 127.0.0.1 > /dev/null");
if (CHECK(err, "system", "ping failed: %d\n", err))
goto cleanup;
if (bss->skip) {
printf("%s:SKIP:no __builtin_btf_type_id\n", __func__);
test__skip();
goto cleanup;
}
/*
* Make sure netif_receive_skb program was triggered
* and it set expected return values from bpf_trace_printk()s
* and all tests ran.
*/
if (!ASSERT_GT(bss->ret, 0, "bpf_snprintf_ret"))
goto cleanup;
if (CHECK(bss->ran_subtests == 0, "check if subtests ran",
"no subtests ran, did BPF program run?"))
goto cleanup;
if (CHECK(bss->num_subtests != bss->ran_subtests,
"check all subtests ran",
"only ran %d of %d tests\n", bss->num_subtests,
bss->ran_subtests))
goto cleanup;
cleanup:
netif_receive_skb__destroy(skel);
}
| linux-master | tools/testing/selftests/bpf/prog_tests/snprintf_btf.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Tests for attaching, detaching, and replacing flow_dissector BPF program.
*/
#define _GNU_SOURCE
#include <errno.h>
#include <fcntl.h>
#include <sched.h>
#include <stdbool.h>
#include <sys/stat.h>
#include <unistd.h>
#include <linux/bpf.h>
#include <bpf/bpf.h>
#include "test_progs.h"
static int init_net = -1;
static __u32 query_attached_prog_id(int netns)
{
__u32 prog_ids[1] = {};
__u32 prog_cnt = ARRAY_SIZE(prog_ids);
int err;
err = bpf_prog_query(netns, BPF_FLOW_DISSECTOR, 0, NULL,
prog_ids, &prog_cnt);
if (CHECK_FAIL(err)) {
perror("bpf_prog_query");
return 0;
}
return prog_cnt == 1 ? prog_ids[0] : 0;
}
static bool prog_is_attached(int netns)
{
return query_attached_prog_id(netns) > 0;
}
static int load_prog(enum bpf_prog_type type)
{
struct bpf_insn prog[] = {
BPF_MOV64_IMM(BPF_REG_0, BPF_OK),
BPF_EXIT_INSN(),
};
int fd;
fd = bpf_test_load_program(type, prog, ARRAY_SIZE(prog), "GPL", 0, NULL, 0);
if (CHECK_FAIL(fd < 0))
perror("bpf_test_load_program");
return fd;
}
static __u32 query_prog_id(int prog)
{
struct bpf_prog_info info = {};
__u32 info_len = sizeof(info);
int err;
err = bpf_prog_get_info_by_fd(prog, &info, &info_len);
if (CHECK_FAIL(err || info_len != sizeof(info))) {
perror("bpf_prog_get_info_by_fd");
return 0;
}
return info.id;
}
static int unshare_net(int old_net)
{
int err, new_net;
err = unshare(CLONE_NEWNET);
if (CHECK_FAIL(err)) {
perror("unshare(CLONE_NEWNET)");
return -1;
}
new_net = open("/proc/self/ns/net", O_RDONLY);
if (CHECK_FAIL(new_net < 0)) {
perror("open(/proc/self/ns/net)");
setns(old_net, CLONE_NEWNET);
return -1;
}
return new_net;
}
static void test_prog_attach_prog_attach(int netns, int prog1, int prog2)
{
int err;
err = bpf_prog_attach(prog1, 0, BPF_FLOW_DISSECTOR, 0);
if (CHECK_FAIL(err)) {
perror("bpf_prog_attach(prog1)");
return;
}
CHECK_FAIL(query_attached_prog_id(netns) != query_prog_id(prog1));
/* Expect success when attaching a different program */
err = bpf_prog_attach(prog2, 0, BPF_FLOW_DISSECTOR, 0);
if (CHECK_FAIL(err)) {
perror("bpf_prog_attach(prog2) #1");
goto out_detach;
}
CHECK_FAIL(query_attached_prog_id(netns) != query_prog_id(prog2));
/* Expect failure when attaching the same program twice */
err = bpf_prog_attach(prog2, 0, BPF_FLOW_DISSECTOR, 0);
if (CHECK_FAIL(!err || errno != EINVAL))
perror("bpf_prog_attach(prog2) #2");
CHECK_FAIL(query_attached_prog_id(netns) != query_prog_id(prog2));
out_detach:
err = bpf_prog_detach2(prog2, 0, BPF_FLOW_DISSECTOR);
if (CHECK_FAIL(err))
perror("bpf_prog_detach");
CHECK_FAIL(prog_is_attached(netns));
}
static void test_link_create_link_create(int netns, int prog1, int prog2)
{
DECLARE_LIBBPF_OPTS(bpf_link_create_opts, opts);
int link1, link2;
link1 = bpf_link_create(prog1, netns, BPF_FLOW_DISSECTOR, &opts);
if (CHECK_FAIL(link < 0)) {
perror("bpf_link_create(prog1)");
return;
}
CHECK_FAIL(query_attached_prog_id(netns) != query_prog_id(prog1));
/* Expect failure creating link when another link exists */
errno = 0;
link2 = bpf_link_create(prog2, netns, BPF_FLOW_DISSECTOR, &opts);
if (CHECK_FAIL(link2 >= 0 || errno != E2BIG))
perror("bpf_prog_attach(prog2) expected E2BIG");
if (link2 >= 0)
close(link2);
CHECK_FAIL(query_attached_prog_id(netns) != query_prog_id(prog1));
close(link1);
CHECK_FAIL(prog_is_attached(netns));
}
static void test_prog_attach_link_create(int netns, int prog1, int prog2)
{
DECLARE_LIBBPF_OPTS(bpf_link_create_opts, opts);
int err, link;
err = bpf_prog_attach(prog1, 0, BPF_FLOW_DISSECTOR, 0);
if (CHECK_FAIL(err)) {
perror("bpf_prog_attach(prog1)");
return;
}
CHECK_FAIL(query_attached_prog_id(netns) != query_prog_id(prog1));
/* Expect failure creating link when prog attached */
errno = 0;
link = bpf_link_create(prog2, netns, BPF_FLOW_DISSECTOR, &opts);
if (CHECK_FAIL(link >= 0 || errno != EEXIST))
perror("bpf_link_create(prog2) expected EEXIST");
if (link >= 0)
close(link);
CHECK_FAIL(query_attached_prog_id(netns) != query_prog_id(prog1));
err = bpf_prog_detach2(prog1, 0, BPF_FLOW_DISSECTOR);
if (CHECK_FAIL(err))
perror("bpf_prog_detach");
CHECK_FAIL(prog_is_attached(netns));
}
static void test_link_create_prog_attach(int netns, int prog1, int prog2)
{
DECLARE_LIBBPF_OPTS(bpf_link_create_opts, opts);
int err, link;
link = bpf_link_create(prog1, netns, BPF_FLOW_DISSECTOR, &opts);
if (CHECK_FAIL(link < 0)) {
perror("bpf_link_create(prog1)");
return;
}
CHECK_FAIL(query_attached_prog_id(netns) != query_prog_id(prog1));
/* Expect failure attaching prog when link exists */
errno = 0;
err = bpf_prog_attach(prog2, 0, BPF_FLOW_DISSECTOR, 0);
if (CHECK_FAIL(!err || errno != EEXIST))
perror("bpf_prog_attach(prog2) expected EEXIST");
CHECK_FAIL(query_attached_prog_id(netns) != query_prog_id(prog1));
close(link);
CHECK_FAIL(prog_is_attached(netns));
}
static void test_link_create_prog_detach(int netns, int prog1, int prog2)
{
DECLARE_LIBBPF_OPTS(bpf_link_create_opts, opts);
int err, link;
link = bpf_link_create(prog1, netns, BPF_FLOW_DISSECTOR, &opts);
if (CHECK_FAIL(link < 0)) {
perror("bpf_link_create(prog1)");
return;
}
CHECK_FAIL(query_attached_prog_id(netns) != query_prog_id(prog1));
/* Expect failure detaching prog when link exists */
errno = 0;
err = bpf_prog_detach2(prog1, 0, BPF_FLOW_DISSECTOR);
if (CHECK_FAIL(!err || errno != EINVAL))
perror("bpf_prog_detach expected EINVAL");
CHECK_FAIL(query_attached_prog_id(netns) != query_prog_id(prog1));
close(link);
CHECK_FAIL(prog_is_attached(netns));
}
static void test_prog_attach_detach_query(int netns, int prog1, int prog2)
{
int err;
err = bpf_prog_attach(prog1, 0, BPF_FLOW_DISSECTOR, 0);
if (CHECK_FAIL(err)) {
perror("bpf_prog_attach(prog1)");
return;
}
CHECK_FAIL(query_attached_prog_id(netns) != query_prog_id(prog1));
err = bpf_prog_detach2(prog1, 0, BPF_FLOW_DISSECTOR);
if (CHECK_FAIL(err)) {
perror("bpf_prog_detach");
return;
}
/* Expect no prog attached after successful detach */
CHECK_FAIL(prog_is_attached(netns));
}
static void test_link_create_close_query(int netns, int prog1, int prog2)
{
DECLARE_LIBBPF_OPTS(bpf_link_create_opts, opts);
int link;
link = bpf_link_create(prog1, netns, BPF_FLOW_DISSECTOR, &opts);
if (CHECK_FAIL(link < 0)) {
perror("bpf_link_create(prog1)");
return;
}
CHECK_FAIL(query_attached_prog_id(netns) != query_prog_id(prog1));
close(link);
/* Expect no prog attached after closing last link FD */
CHECK_FAIL(prog_is_attached(netns));
}
static void test_link_update_no_old_prog(int netns, int prog1, int prog2)
{
DECLARE_LIBBPF_OPTS(bpf_link_create_opts, create_opts);
DECLARE_LIBBPF_OPTS(bpf_link_update_opts, update_opts);
int err, link;
link = bpf_link_create(prog1, netns, BPF_FLOW_DISSECTOR, &create_opts);
if (CHECK_FAIL(link < 0)) {
perror("bpf_link_create(prog1)");
return;
}
CHECK_FAIL(query_attached_prog_id(netns) != query_prog_id(prog1));
/* Expect success replacing the prog when old prog not specified */
update_opts.flags = 0;
update_opts.old_prog_fd = 0;
err = bpf_link_update(link, prog2, &update_opts);
if (CHECK_FAIL(err))
perror("bpf_link_update");
CHECK_FAIL(query_attached_prog_id(netns) != query_prog_id(prog2));
close(link);
CHECK_FAIL(prog_is_attached(netns));
}
static void test_link_update_replace_old_prog(int netns, int prog1, int prog2)
{
DECLARE_LIBBPF_OPTS(bpf_link_create_opts, create_opts);
DECLARE_LIBBPF_OPTS(bpf_link_update_opts, update_opts);
int err, link;
link = bpf_link_create(prog1, netns, BPF_FLOW_DISSECTOR, &create_opts);
if (CHECK_FAIL(link < 0)) {
perror("bpf_link_create(prog1)");
return;
}
CHECK_FAIL(query_attached_prog_id(netns) != query_prog_id(prog1));
/* Expect success F_REPLACE and old prog specified to succeed */
update_opts.flags = BPF_F_REPLACE;
update_opts.old_prog_fd = prog1;
err = bpf_link_update(link, prog2, &update_opts);
if (CHECK_FAIL(err))
perror("bpf_link_update");
CHECK_FAIL(query_attached_prog_id(netns) != query_prog_id(prog2));
close(link);
CHECK_FAIL(prog_is_attached(netns));
}
static void test_link_update_same_prog(int netns, int prog1, int prog2)
{
DECLARE_LIBBPF_OPTS(bpf_link_create_opts, create_opts);
DECLARE_LIBBPF_OPTS(bpf_link_update_opts, update_opts);
int err, link;
link = bpf_link_create(prog1, netns, BPF_FLOW_DISSECTOR, &create_opts);
if (CHECK_FAIL(link < 0)) {
perror("bpf_link_create(prog1)");
return;
}
CHECK_FAIL(query_attached_prog_id(netns) != query_prog_id(prog1));
/* Expect success updating the prog with the same one */
update_opts.flags = 0;
update_opts.old_prog_fd = 0;
err = bpf_link_update(link, prog1, &update_opts);
if (CHECK_FAIL(err))
perror("bpf_link_update");
CHECK_FAIL(query_attached_prog_id(netns) != query_prog_id(prog1));
close(link);
CHECK_FAIL(prog_is_attached(netns));
}
static void test_link_update_invalid_opts(int netns, int prog1, int prog2)
{
DECLARE_LIBBPF_OPTS(bpf_link_create_opts, create_opts);
DECLARE_LIBBPF_OPTS(bpf_link_update_opts, update_opts);
int err, link;
link = bpf_link_create(prog1, netns, BPF_FLOW_DISSECTOR, &create_opts);
if (CHECK_FAIL(link < 0)) {
perror("bpf_link_create(prog1)");
return;
}
CHECK_FAIL(query_attached_prog_id(netns) != query_prog_id(prog1));
/* Expect update to fail w/ old prog FD but w/o F_REPLACE*/
errno = 0;
update_opts.flags = 0;
update_opts.old_prog_fd = prog1;
err = bpf_link_update(link, prog2, &update_opts);
if (CHECK_FAIL(!err || errno != EINVAL)) {
perror("bpf_link_update expected EINVAL");
goto out_close;
}
CHECK_FAIL(query_attached_prog_id(netns) != query_prog_id(prog1));
/* Expect update to fail on old prog FD mismatch */
errno = 0;
update_opts.flags = BPF_F_REPLACE;
update_opts.old_prog_fd = prog2;
err = bpf_link_update(link, prog2, &update_opts);
if (CHECK_FAIL(!err || errno != EPERM)) {
perror("bpf_link_update expected EPERM");
goto out_close;
}
CHECK_FAIL(query_attached_prog_id(netns) != query_prog_id(prog1));
/* Expect update to fail for invalid old prog FD */
errno = 0;
update_opts.flags = BPF_F_REPLACE;
update_opts.old_prog_fd = -1;
err = bpf_link_update(link, prog2, &update_opts);
if (CHECK_FAIL(!err || errno != EBADF)) {
perror("bpf_link_update expected EBADF");
goto out_close;
}
CHECK_FAIL(query_attached_prog_id(netns) != query_prog_id(prog1));
/* Expect update to fail with invalid flags */
errno = 0;
update_opts.flags = BPF_F_ALLOW_MULTI;
update_opts.old_prog_fd = 0;
err = bpf_link_update(link, prog2, &update_opts);
if (CHECK_FAIL(!err || errno != EINVAL))
perror("bpf_link_update expected EINVAL");
CHECK_FAIL(query_attached_prog_id(netns) != query_prog_id(prog1));
out_close:
close(link);
CHECK_FAIL(prog_is_attached(netns));
}
static void test_link_update_invalid_prog(int netns, int prog1, int prog2)
{
DECLARE_LIBBPF_OPTS(bpf_link_create_opts, create_opts);
DECLARE_LIBBPF_OPTS(bpf_link_update_opts, update_opts);
int err, link, prog3;
link = bpf_link_create(prog1, netns, BPF_FLOW_DISSECTOR, &create_opts);
if (CHECK_FAIL(link < 0)) {
perror("bpf_link_create(prog1)");
return;
}
CHECK_FAIL(query_attached_prog_id(netns) != query_prog_id(prog1));
/* Expect failure when new prog FD is not valid */
errno = 0;
update_opts.flags = 0;
update_opts.old_prog_fd = 0;
err = bpf_link_update(link, -1, &update_opts);
if (CHECK_FAIL(!err || errno != EBADF)) {
perror("bpf_link_update expected EINVAL");
goto out_close_link;
}
CHECK_FAIL(query_attached_prog_id(netns) != query_prog_id(prog1));
prog3 = load_prog(BPF_PROG_TYPE_SOCKET_FILTER);
if (prog3 < 0)
goto out_close_link;
/* Expect failure when new prog FD type doesn't match */
errno = 0;
update_opts.flags = 0;
update_opts.old_prog_fd = 0;
err = bpf_link_update(link, prog3, &update_opts);
if (CHECK_FAIL(!err || errno != EINVAL))
perror("bpf_link_update expected EINVAL");
CHECK_FAIL(query_attached_prog_id(netns) != query_prog_id(prog1));
close(prog3);
out_close_link:
close(link);
CHECK_FAIL(prog_is_attached(netns));
}
static void test_link_update_netns_gone(int netns, int prog1, int prog2)
{
DECLARE_LIBBPF_OPTS(bpf_link_create_opts, create_opts);
DECLARE_LIBBPF_OPTS(bpf_link_update_opts, update_opts);
int err, link, old_net;
old_net = netns;
netns = unshare_net(old_net);
if (netns < 0)
return;
link = bpf_link_create(prog1, netns, BPF_FLOW_DISSECTOR, &create_opts);
if (CHECK_FAIL(link < 0)) {
perror("bpf_link_create(prog1)");
return;
}
CHECK_FAIL(query_attached_prog_id(netns) != query_prog_id(prog1));
close(netns);
err = setns(old_net, CLONE_NEWNET);
if (CHECK_FAIL(err)) {
perror("setns(CLONE_NEWNET)");
close(link);
return;
}
/* Expect failure when netns destroyed */
errno = 0;
update_opts.flags = 0;
update_opts.old_prog_fd = 0;
err = bpf_link_update(link, prog2, &update_opts);
if (CHECK_FAIL(!err || errno != ENOLINK))
perror("bpf_link_update");
close(link);
}
static void test_link_get_info(int netns, int prog1, int prog2)
{
DECLARE_LIBBPF_OPTS(bpf_link_create_opts, create_opts);
DECLARE_LIBBPF_OPTS(bpf_link_update_opts, update_opts);
struct bpf_link_info info = {};
struct stat netns_stat = {};
__u32 info_len, link_id;
int err, link, old_net;
old_net = netns;
netns = unshare_net(old_net);
if (netns < 0)
return;
err = fstat(netns, &netns_stat);
if (CHECK_FAIL(err)) {
perror("stat(netns)");
goto out_resetns;
}
link = bpf_link_create(prog1, netns, BPF_FLOW_DISSECTOR, &create_opts);
if (CHECK_FAIL(link < 0)) {
perror("bpf_link_create(prog1)");
goto out_resetns;
}
info_len = sizeof(info);
err = bpf_link_get_info_by_fd(link, &info, &info_len);
if (CHECK_FAIL(err)) {
perror("bpf_obj_get_info");
goto out_unlink;
}
CHECK_FAIL(info_len != sizeof(info));
/* Expect link info to be sane and match prog and netns details */
CHECK_FAIL(info.type != BPF_LINK_TYPE_NETNS);
CHECK_FAIL(info.id == 0);
CHECK_FAIL(info.prog_id != query_prog_id(prog1));
CHECK_FAIL(info.netns.netns_ino != netns_stat.st_ino);
CHECK_FAIL(info.netns.attach_type != BPF_FLOW_DISSECTOR);
update_opts.flags = 0;
update_opts.old_prog_fd = 0;
err = bpf_link_update(link, prog2, &update_opts);
if (CHECK_FAIL(err)) {
perror("bpf_link_update(prog2)");
goto out_unlink;
}
link_id = info.id;
info_len = sizeof(info);
err = bpf_link_get_info_by_fd(link, &info, &info_len);
if (CHECK_FAIL(err)) {
perror("bpf_obj_get_info");
goto out_unlink;
}
CHECK_FAIL(info_len != sizeof(info));
/* Expect no info change after update except in prog id */
CHECK_FAIL(info.type != BPF_LINK_TYPE_NETNS);
CHECK_FAIL(info.id != link_id);
CHECK_FAIL(info.prog_id != query_prog_id(prog2));
CHECK_FAIL(info.netns.netns_ino != netns_stat.st_ino);
CHECK_FAIL(info.netns.attach_type != BPF_FLOW_DISSECTOR);
/* Leave netns link is attached to and close last FD to it */
err = setns(old_net, CLONE_NEWNET);
if (CHECK_FAIL(err)) {
perror("setns(NEWNET)");
goto out_unlink;
}
close(netns);
old_net = -1;
netns = -1;
info_len = sizeof(info);
err = bpf_link_get_info_by_fd(link, &info, &info_len);
if (CHECK_FAIL(err)) {
perror("bpf_obj_get_info");
goto out_unlink;
}
CHECK_FAIL(info_len != sizeof(info));
/* Expect netns_ino to change to 0 */
CHECK_FAIL(info.type != BPF_LINK_TYPE_NETNS);
CHECK_FAIL(info.id != link_id);
CHECK_FAIL(info.prog_id != query_prog_id(prog2));
CHECK_FAIL(info.netns.netns_ino != 0);
CHECK_FAIL(info.netns.attach_type != BPF_FLOW_DISSECTOR);
out_unlink:
close(link);
out_resetns:
if (old_net != -1)
setns(old_net, CLONE_NEWNET);
if (netns != -1)
close(netns);
}
static void run_tests(int netns)
{
struct test {
const char *test_name;
void (*test_func)(int netns, int prog1, int prog2);
} tests[] = {
{ "prog attach, prog attach",
test_prog_attach_prog_attach },
{ "link create, link create",
test_link_create_link_create },
{ "prog attach, link create",
test_prog_attach_link_create },
{ "link create, prog attach",
test_link_create_prog_attach },
{ "link create, prog detach",
test_link_create_prog_detach },
{ "prog attach, detach, query",
test_prog_attach_detach_query },
{ "link create, close, query",
test_link_create_close_query },
{ "link update no old prog",
test_link_update_no_old_prog },
{ "link update with replace old prog",
test_link_update_replace_old_prog },
{ "link update with same prog",
test_link_update_same_prog },
{ "link update invalid opts",
test_link_update_invalid_opts },
{ "link update invalid prog",
test_link_update_invalid_prog },
{ "link update netns gone",
test_link_update_netns_gone },
{ "link get info",
test_link_get_info },
};
int i, progs[2] = { -1, -1 };
char test_name[80];
for (i = 0; i < ARRAY_SIZE(progs); i++) {
progs[i] = load_prog(BPF_PROG_TYPE_FLOW_DISSECTOR);
if (progs[i] < 0)
goto out_close;
}
for (i = 0; i < ARRAY_SIZE(tests); i++) {
snprintf(test_name, sizeof(test_name),
"flow dissector %s%s",
tests[i].test_name,
netns == init_net ? " (init_net)" : "");
if (test__start_subtest(test_name))
tests[i].test_func(netns, progs[0], progs[1]);
}
out_close:
for (i = 0; i < ARRAY_SIZE(progs); i++) {
if (progs[i] >= 0)
CHECK_FAIL(close(progs[i]));
}
}
void serial_test_flow_dissector_reattach(void)
{
int err, new_net, saved_net;
saved_net = open("/proc/self/ns/net", O_RDONLY);
if (CHECK_FAIL(saved_net < 0)) {
perror("open(/proc/self/ns/net");
return;
}
init_net = open("/proc/1/ns/net", O_RDONLY);
if (CHECK_FAIL(init_net < 0)) {
perror("open(/proc/1/ns/net)");
goto out_close;
}
err = setns(init_net, CLONE_NEWNET);
if (CHECK_FAIL(err)) {
perror("setns(/proc/1/ns/net)");
goto out_close;
}
if (prog_is_attached(init_net)) {
test__skip();
printf("Can't test with flow dissector attached to init_net\n");
goto out_setns;
}
/* First run tests in root network namespace */
run_tests(init_net);
/* Then repeat tests in a non-root namespace */
new_net = unshare_net(init_net);
if (new_net < 0)
goto out_setns;
run_tests(new_net);
close(new_net);
out_setns:
/* Move back to netns we started in. */
err = setns(saved_net, CLONE_NEWNET);
if (CHECK_FAIL(err))
perror("setns(/proc/self/ns/net)");
out_close:
close(init_net);
close(saved_net);
}
| linux-master | tools/testing/selftests/bpf/prog_tests/flow_dissector_reattach.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates.*/
#include <test_progs.h>
#include <network_helpers.h>
#include "test_ldsx_insn.skel.h"
static void test_map_val_and_probed_memory(void)
{
struct test_ldsx_insn *skel;
int err;
skel = test_ldsx_insn__open();
if (!ASSERT_OK_PTR(skel, "test_ldsx_insn__open"))
return;
if (skel->rodata->skip) {
test__skip();
goto out;
}
bpf_program__set_autoload(skel->progs.rdonly_map_prog, true);
bpf_program__set_autoload(skel->progs.map_val_prog, true);
bpf_program__set_autoload(skel->progs.test_ptr_struct_arg, true);
err = test_ldsx_insn__load(skel);
if (!ASSERT_OK(err, "test_ldsx_insn__load"))
goto out;
err = test_ldsx_insn__attach(skel);
if (!ASSERT_OK(err, "test_ldsx_insn__attach"))
goto out;
ASSERT_OK(trigger_module_test_read(256), "trigger_read");
ASSERT_EQ(skel->bss->done1, 1, "done1");
ASSERT_EQ(skel->bss->ret1, 1, "ret1");
ASSERT_EQ(skel->bss->done2, 1, "done2");
ASSERT_EQ(skel->bss->ret2, 1, "ret2");
ASSERT_EQ(skel->bss->int_member, -1, "int_member");
out:
test_ldsx_insn__destroy(skel);
}
static void test_ctx_member_sign_ext(void)
{
struct test_ldsx_insn *skel;
int err, fd, cgroup_fd;
char buf[16] = {0};
socklen_t optlen;
cgroup_fd = test__join_cgroup("/ldsx_test");
if (!ASSERT_GE(cgroup_fd, 0, "join_cgroup /ldsx_test"))
return;
skel = test_ldsx_insn__open();
if (!ASSERT_OK_PTR(skel, "test_ldsx_insn__open"))
goto close_cgroup_fd;
if (skel->rodata->skip) {
test__skip();
goto destroy_skel;
}
bpf_program__set_autoload(skel->progs._getsockopt, true);
err = test_ldsx_insn__load(skel);
if (!ASSERT_OK(err, "test_ldsx_insn__load"))
goto destroy_skel;
skel->links._getsockopt =
bpf_program__attach_cgroup(skel->progs._getsockopt, cgroup_fd);
if (!ASSERT_OK_PTR(skel->links._getsockopt, "getsockopt_link"))
goto destroy_skel;
fd = socket(AF_INET, SOCK_STREAM, 0);
if (!ASSERT_GE(fd, 0, "socket"))
goto destroy_skel;
optlen = sizeof(buf);
(void)getsockopt(fd, SOL_IP, IP_TTL, buf, &optlen);
ASSERT_EQ(skel->bss->set_optlen, -1, "optlen");
ASSERT_EQ(skel->bss->set_retval, -1, "retval");
close(fd);
destroy_skel:
test_ldsx_insn__destroy(skel);
close_cgroup_fd:
close(cgroup_fd);
}
static void test_ctx_member_narrow_sign_ext(void)
{
struct test_ldsx_insn *skel;
struct __sk_buff skb = {};
LIBBPF_OPTS(bpf_test_run_opts, topts,
.data_in = &pkt_v4,
.data_size_in = sizeof(pkt_v4),
.ctx_in = &skb,
.ctx_size_in = sizeof(skb),
);
int err, prog_fd;
skel = test_ldsx_insn__open();
if (!ASSERT_OK_PTR(skel, "test_ldsx_insn__open"))
return;
if (skel->rodata->skip) {
test__skip();
goto out;
}
bpf_program__set_autoload(skel->progs._tc, true);
err = test_ldsx_insn__load(skel);
if (!ASSERT_OK(err, "test_ldsx_insn__load"))
goto out;
prog_fd = bpf_program__fd(skel->progs._tc);
err = bpf_prog_test_run_opts(prog_fd, &topts);
ASSERT_OK(err, "test_run");
ASSERT_EQ(skel->bss->set_mark, -2, "set_mark");
out:
test_ldsx_insn__destroy(skel);
}
void test_ldsx_insn(void)
{
if (test__start_subtest("map_val and probed_memory"))
test_map_val_and_probed_memory();
if (test__start_subtest("ctx_member_sign_ext"))
test_ctx_member_sign_ext();
if (test__start_subtest("ctx_member_narrow_sign_ext"))
test_ctx_member_narrow_sign_ext();
}
| linux-master | tools/testing/selftests/bpf/prog_tests/test_ldsx_insn.c |
// SPDX-License-Identifier: GPL-2.0
#include <test_progs.h>
#include "netns_cookie_prog.skel.h"
#include "network_helpers.h"
#ifndef SO_NETNS_COOKIE
#define SO_NETNS_COOKIE 71
#endif
static int duration;
void test_netns_cookie(void)
{
int server_fd = -1, client_fd = -1, cgroup_fd = -1;
int err, val, ret, map, verdict;
struct netns_cookie_prog *skel;
uint64_t cookie_expected_value;
socklen_t vallen = sizeof(cookie_expected_value);
static const char send_msg[] = "message";
skel = netns_cookie_prog__open_and_load();
if (!ASSERT_OK_PTR(skel, "skel_open"))
return;
cgroup_fd = test__join_cgroup("/netns_cookie");
if (CHECK(cgroup_fd < 0, "join_cgroup", "cgroup creation failed\n"))
goto done;
skel->links.get_netns_cookie_sockops = bpf_program__attach_cgroup(
skel->progs.get_netns_cookie_sockops, cgroup_fd);
if (!ASSERT_OK_PTR(skel->links.get_netns_cookie_sockops, "prog_attach"))
goto done;
verdict = bpf_program__fd(skel->progs.get_netns_cookie_sk_msg);
map = bpf_map__fd(skel->maps.sock_map);
err = bpf_prog_attach(verdict, map, BPF_SK_MSG_VERDICT, 0);
if (!ASSERT_OK(err, "prog_attach"))
goto done;
server_fd = start_server(AF_INET6, SOCK_STREAM, "::1", 0, 0);
if (CHECK(server_fd < 0, "start_server", "errno %d\n", errno))
goto done;
client_fd = connect_to_fd(server_fd, 0);
if (CHECK(client_fd < 0, "connect_to_fd", "errno %d\n", errno))
goto done;
ret = send(client_fd, send_msg, sizeof(send_msg), 0);
if (CHECK(ret != sizeof(send_msg), "send(msg)", "ret:%d\n", ret))
goto done;
err = bpf_map_lookup_elem(bpf_map__fd(skel->maps.sockops_netns_cookies),
&client_fd, &val);
if (!ASSERT_OK(err, "map_lookup(sockops_netns_cookies)"))
goto done;
err = getsockopt(client_fd, SOL_SOCKET, SO_NETNS_COOKIE,
&cookie_expected_value, &vallen);
if (!ASSERT_OK(err, "getsockopt"))
goto done;
ASSERT_EQ(val, cookie_expected_value, "cookie_value");
err = bpf_map_lookup_elem(bpf_map__fd(skel->maps.sk_msg_netns_cookies),
&client_fd, &val);
if (!ASSERT_OK(err, "map_lookup(sk_msg_netns_cookies)"))
goto done;
ASSERT_EQ(val, cookie_expected_value, "cookie_value");
done:
if (server_fd != -1)
close(server_fd);
if (client_fd != -1)
close(client_fd);
if (cgroup_fd != -1)
close(cgroup_fd);
netns_cookie_prog__destroy(skel);
}
| linux-master | tools/testing/selftests/bpf/prog_tests/netns_cookie.c |
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2020 Facebook
#include <test_progs.h>
#include "test_stacktrace_build_id.skel.h"
void test_get_stackid_cannot_attach(void)
{
struct perf_event_attr attr = {
/* .type = PERF_TYPE_SOFTWARE, */
.type = PERF_TYPE_HARDWARE,
.config = PERF_COUNT_HW_CPU_CYCLES,
.precise_ip = 1,
.sample_type = PERF_SAMPLE_IP | PERF_SAMPLE_BRANCH_STACK,
.branch_sample_type = PERF_SAMPLE_BRANCH_USER |
PERF_SAMPLE_BRANCH_NO_FLAGS |
PERF_SAMPLE_BRANCH_NO_CYCLES |
PERF_SAMPLE_BRANCH_CALL_STACK,
.sample_period = 5000,
.size = sizeof(struct perf_event_attr),
};
struct test_stacktrace_build_id *skel;
__u32 duration = 0;
int pmu_fd, err;
skel = test_stacktrace_build_id__open();
if (CHECK(!skel, "skel_open", "skeleton open failed\n"))
return;
/* override program type */
bpf_program__set_type(skel->progs.oncpu, BPF_PROG_TYPE_PERF_EVENT);
err = test_stacktrace_build_id__load(skel);
if (CHECK(err, "skel_load", "skeleton load failed: %d\n", err))
goto cleanup;
pmu_fd = syscall(__NR_perf_event_open, &attr, -1 /* pid */,
0 /* cpu 0 */, -1 /* group id */,
0 /* flags */);
if (pmu_fd < 0 && (errno == ENOENT || errno == EOPNOTSUPP)) {
printf("%s:SKIP:cannot open PERF_COUNT_HW_CPU_CYCLES with precise_ip > 0\n",
__func__);
test__skip();
goto cleanup;
}
if (CHECK(pmu_fd < 0, "perf_event_open", "err %d errno %d\n",
pmu_fd, errno))
goto cleanup;
skel->links.oncpu = bpf_program__attach_perf_event(skel->progs.oncpu,
pmu_fd);
ASSERT_ERR_PTR(skel->links.oncpu, "attach_perf_event_no_callchain");
close(pmu_fd);
/* add PERF_SAMPLE_CALLCHAIN, attach should succeed */
attr.sample_type |= PERF_SAMPLE_CALLCHAIN;
pmu_fd = syscall(__NR_perf_event_open, &attr, -1 /* pid */,
0 /* cpu 0 */, -1 /* group id */,
0 /* flags */);
if (CHECK(pmu_fd < 0, "perf_event_open", "err %d errno %d\n",
pmu_fd, errno))
goto cleanup;
skel->links.oncpu = bpf_program__attach_perf_event(skel->progs.oncpu,
pmu_fd);
ASSERT_OK_PTR(skel->links.oncpu, "attach_perf_event_callchain");
bpf_link__destroy(skel->links.oncpu);
close(pmu_fd);
/* add exclude_callchain_kernel, attach should fail */
attr.exclude_callchain_kernel = 1;
pmu_fd = syscall(__NR_perf_event_open, &attr, -1 /* pid */,
0 /* cpu 0 */, -1 /* group id */,
0 /* flags */);
if (CHECK(pmu_fd < 0, "perf_event_open", "err %d errno %d\n",
pmu_fd, errno))
goto cleanup;
skel->links.oncpu = bpf_program__attach_perf_event(skel->progs.oncpu,
pmu_fd);
ASSERT_ERR_PTR(skel->links.oncpu, "attach_perf_event_exclude_callchain_kernel");
close(pmu_fd);
cleanup:
test_stacktrace_build_id__destroy(skel);
}
| linux-master | tools/testing/selftests/bpf/prog_tests/get_stackid_cannot_attach.c |
// SPDX-License-Identifier: GPL-2.0
#include <test_progs.h>
#include <network_helpers.h>
static void *spin_lock_thread(void *arg)
{
int err, prog_fd = *(u32 *) arg;
LIBBPF_OPTS(bpf_test_run_opts, topts,
.data_in = &pkt_v4,
.data_size_in = sizeof(pkt_v4),
.repeat = 10000,
);
err = bpf_prog_test_run_opts(prog_fd, &topts);
ASSERT_OK(err, "test_run_opts err");
ASSERT_OK(topts.retval, "test_run_opts retval");
pthread_exit(arg);
}
static void *parallel_map_access(void *arg)
{
int err, map_fd = *(u32 *) arg;
int vars[17], i, j, rnd, key = 0;
for (i = 0; i < 10000; i++) {
err = bpf_map_lookup_elem_flags(map_fd, &key, vars, BPF_F_LOCK);
if (CHECK_FAIL(err)) {
printf("lookup failed\n");
goto out;
}
if (CHECK_FAIL(vars[0] != 0)) {
printf("lookup #%d var[0]=%d\n", i, vars[0]);
goto out;
}
rnd = vars[1];
for (j = 2; j < 17; j++) {
if (vars[j] == rnd)
continue;
printf("lookup #%d var[1]=%d var[%d]=%d\n",
i, rnd, j, vars[j]);
CHECK_FAIL(vars[j] != rnd);
goto out;
}
}
out:
pthread_exit(arg);
}
void test_map_lock(void)
{
const char *file = "./test_map_lock.bpf.o";
int prog_fd, map_fd[2], vars[17] = {};
pthread_t thread_id[6];
struct bpf_object *obj = NULL;
int err = 0, key = 0, i;
void *ret;
err = bpf_prog_test_load(file, BPF_PROG_TYPE_CGROUP_SKB, &obj, &prog_fd);
if (CHECK_FAIL(err)) {
printf("test_map_lock:bpf_prog_test_load errno %d\n", errno);
goto close_prog;
}
map_fd[0] = bpf_find_map(__func__, obj, "hash_map");
if (CHECK_FAIL(map_fd[0] < 0))
goto close_prog;
map_fd[1] = bpf_find_map(__func__, obj, "array_map");
if (CHECK_FAIL(map_fd[1] < 0))
goto close_prog;
bpf_map_update_elem(map_fd[0], &key, vars, BPF_F_LOCK);
for (i = 0; i < 4; i++)
if (CHECK_FAIL(pthread_create(&thread_id[i], NULL,
&spin_lock_thread, &prog_fd)))
goto close_prog;
for (i = 4; i < 6; i++)
if (CHECK_FAIL(pthread_create(&thread_id[i], NULL,
¶llel_map_access,
&map_fd[i - 4])))
goto close_prog;
for (i = 0; i < 4; i++)
if (CHECK_FAIL(pthread_join(thread_id[i], &ret) ||
ret != (void *)&prog_fd))
goto close_prog;
for (i = 4; i < 6; i++)
if (CHECK_FAIL(pthread_join(thread_id[i], &ret) ||
ret != (void *)&map_fd[i - 4]))
goto close_prog;
close_prog:
bpf_object__close(obj);
}
| linux-master | tools/testing/selftests/bpf/prog_tests/map_lock.c |
// SPDX-License-Identifier: GPL-2.0
#include <test_progs.h>
#include <network_helpers.h>
static void test_xdp_adjust_tail_shrink(void)
{
const char *file = "./test_xdp_adjust_tail_shrink.bpf.o";
__u32 expect_sz;
struct bpf_object *obj;
int err, prog_fd;
char buf[128];
LIBBPF_OPTS(bpf_test_run_opts, topts,
.data_in = &pkt_v4,
.data_size_in = sizeof(pkt_v4),
.data_out = buf,
.data_size_out = sizeof(buf),
.repeat = 1,
);
err = bpf_prog_test_load(file, BPF_PROG_TYPE_XDP, &obj, &prog_fd);
if (!ASSERT_OK(err, "test_xdp_adjust_tail_shrink"))
return;
err = bpf_prog_test_run_opts(prog_fd, &topts);
ASSERT_OK(err, "ipv4");
ASSERT_EQ(topts.retval, XDP_DROP, "ipv4 retval");
expect_sz = sizeof(pkt_v6) - 20; /* Test shrink with 20 bytes */
topts.data_in = &pkt_v6;
topts.data_size_in = sizeof(pkt_v6);
topts.data_size_out = sizeof(buf);
err = bpf_prog_test_run_opts(prog_fd, &topts);
ASSERT_OK(err, "ipv6");
ASSERT_EQ(topts.retval, XDP_TX, "ipv6 retval");
ASSERT_EQ(topts.data_size_out, expect_sz, "ipv6 size");
bpf_object__close(obj);
}
static void test_xdp_adjust_tail_grow(void)
{
const char *file = "./test_xdp_adjust_tail_grow.bpf.o";
struct bpf_object *obj;
char buf[4096]; /* avoid segfault: large buf to hold grow results */
__u32 expect_sz;
int err, prog_fd;
LIBBPF_OPTS(bpf_test_run_opts, topts,
.data_in = &pkt_v4,
.data_size_in = sizeof(pkt_v4),
.data_out = buf,
.data_size_out = sizeof(buf),
.repeat = 1,
);
err = bpf_prog_test_load(file, BPF_PROG_TYPE_XDP, &obj, &prog_fd);
if (!ASSERT_OK(err, "test_xdp_adjust_tail_grow"))
return;
err = bpf_prog_test_run_opts(prog_fd, &topts);
ASSERT_OK(err, "ipv4");
ASSERT_EQ(topts.retval, XDP_DROP, "ipv4 retval");
expect_sz = sizeof(pkt_v6) + 40; /* Test grow with 40 bytes */
topts.data_in = &pkt_v6;
topts.data_size_in = sizeof(pkt_v6);
topts.data_size_out = sizeof(buf);
err = bpf_prog_test_run_opts(prog_fd, &topts);
ASSERT_OK(err, "ipv6");
ASSERT_EQ(topts.retval, XDP_TX, "ipv6 retval");
ASSERT_EQ(topts.data_size_out, expect_sz, "ipv6 size");
bpf_object__close(obj);
}
static void test_xdp_adjust_tail_grow2(void)
{
const char *file = "./test_xdp_adjust_tail_grow.bpf.o";
char buf[4096]; /* avoid segfault: large buf to hold grow results */
struct bpf_object *obj;
int err, cnt, i;
int max_grow, prog_fd;
/* SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) */
#if defined(__s390x__)
int tailroom = 512;
#else
int tailroom = 320;
#endif
LIBBPF_OPTS(bpf_test_run_opts, tattr,
.repeat = 1,
.data_in = &buf,
.data_out = &buf,
.data_size_in = 0, /* Per test */
.data_size_out = 0, /* Per test */
);
err = bpf_prog_test_load(file, BPF_PROG_TYPE_XDP, &obj, &prog_fd);
if (!ASSERT_OK(err, "test_xdp_adjust_tail_grow"))
return;
/* Test case-64 */
memset(buf, 1, sizeof(buf));
tattr.data_size_in = 64; /* Determine test case via pkt size */
tattr.data_size_out = 128; /* Limit copy_size */
/* Kernel side alloc packet memory area that is zero init */
err = bpf_prog_test_run_opts(prog_fd, &tattr);
ASSERT_EQ(errno, ENOSPC, "case-64 errno"); /* Due limit copy_size in bpf_test_finish */
ASSERT_EQ(tattr.retval, XDP_TX, "case-64 retval");
ASSERT_EQ(tattr.data_size_out, 192, "case-64 data_size_out"); /* Expected grow size */
/* Extra checks for data contents */
ASSERT_EQ(buf[0], 1, "case-64-data buf[0]"); /* 0-63 memset to 1 */
ASSERT_EQ(buf[63], 1, "case-64-data buf[63]");
ASSERT_EQ(buf[64], 0, "case-64-data buf[64]"); /* 64-127 memset to 0 */
ASSERT_EQ(buf[127], 0, "case-64-data buf[127]");
ASSERT_EQ(buf[128], 1, "case-64-data buf[128]"); /* 128-191 memset to 1 */
ASSERT_EQ(buf[191], 1, "case-64-data buf[191]");
/* Test case-128 */
memset(buf, 2, sizeof(buf));
tattr.data_size_in = 128; /* Determine test case via pkt size */
tattr.data_size_out = sizeof(buf); /* Copy everything */
err = bpf_prog_test_run_opts(prog_fd, &tattr);
max_grow = 4096 - XDP_PACKET_HEADROOM - tailroom; /* 3520 */
ASSERT_OK(err, "case-128");
ASSERT_EQ(tattr.retval, XDP_TX, "case-128 retval");
ASSERT_EQ(tattr.data_size_out, max_grow, "case-128 data_size_out"); /* Expect max grow */
/* Extra checks for data content: Count grow size, will contain zeros */
for (i = 0, cnt = 0; i < sizeof(buf); i++) {
if (buf[i] == 0)
cnt++;
}
ASSERT_EQ(cnt, max_grow - tattr.data_size_in, "case-128-data cnt"); /* Grow increase */
ASSERT_EQ(tattr.data_size_out, max_grow, "case-128-data data_size_out"); /* Total grow */
bpf_object__close(obj);
}
static void test_xdp_adjust_frags_tail_shrink(void)
{
const char *file = "./test_xdp_adjust_tail_shrink.bpf.o";
__u32 exp_size;
struct bpf_program *prog;
struct bpf_object *obj;
int err, prog_fd;
__u8 *buf;
LIBBPF_OPTS(bpf_test_run_opts, topts);
/* For the individual test cases, the first byte in the packet
* indicates which test will be run.
*/
obj = bpf_object__open(file);
if (libbpf_get_error(obj))
return;
prog = bpf_object__next_program(obj, NULL);
if (bpf_object__load(obj))
return;
prog_fd = bpf_program__fd(prog);
buf = malloc(9000);
if (!ASSERT_OK_PTR(buf, "alloc buf 9Kb"))
goto out;
memset(buf, 0, 9000);
/* Test case removing 10 bytes from last frag, NOT freeing it */
exp_size = 8990; /* 9000 - 10 */
topts.data_in = buf;
topts.data_out = buf;
topts.data_size_in = 9000;
topts.data_size_out = 9000;
err = bpf_prog_test_run_opts(prog_fd, &topts);
ASSERT_OK(err, "9Kb-10b");
ASSERT_EQ(topts.retval, XDP_TX, "9Kb-10b retval");
ASSERT_EQ(topts.data_size_out, exp_size, "9Kb-10b size");
/* Test case removing one of two pages, assuming 4K pages */
buf[0] = 1;
exp_size = 4900; /* 9000 - 4100 */
topts.data_size_out = 9000; /* reset from previous invocation */
err = bpf_prog_test_run_opts(prog_fd, &topts);
ASSERT_OK(err, "9Kb-4Kb");
ASSERT_EQ(topts.retval, XDP_TX, "9Kb-4Kb retval");
ASSERT_EQ(topts.data_size_out, exp_size, "9Kb-4Kb size");
/* Test case removing two pages resulting in a linear xdp_buff */
buf[0] = 2;
exp_size = 800; /* 9000 - 8200 */
topts.data_size_out = 9000; /* reset from previous invocation */
err = bpf_prog_test_run_opts(prog_fd, &topts);
ASSERT_OK(err, "9Kb-9Kb");
ASSERT_EQ(topts.retval, XDP_TX, "9Kb-9Kb retval");
ASSERT_EQ(topts.data_size_out, exp_size, "9Kb-9Kb size");
free(buf);
out:
bpf_object__close(obj);
}
static void test_xdp_adjust_frags_tail_grow(void)
{
const char *file = "./test_xdp_adjust_tail_grow.bpf.o";
__u32 exp_size;
struct bpf_program *prog;
struct bpf_object *obj;
int err, i, prog_fd;
__u8 *buf;
LIBBPF_OPTS(bpf_test_run_opts, topts);
obj = bpf_object__open(file);
if (libbpf_get_error(obj))
return;
prog = bpf_object__next_program(obj, NULL);
if (bpf_object__load(obj))
return;
prog_fd = bpf_program__fd(prog);
buf = malloc(16384);
if (!ASSERT_OK_PTR(buf, "alloc buf 16Kb"))
goto out;
/* Test case add 10 bytes to last frag */
memset(buf, 1, 16384);
exp_size = 9000 + 10;
topts.data_in = buf;
topts.data_out = buf;
topts.data_size_in = 9000;
topts.data_size_out = 16384;
err = bpf_prog_test_run_opts(prog_fd, &topts);
ASSERT_OK(err, "9Kb+10b");
ASSERT_EQ(topts.retval, XDP_TX, "9Kb+10b retval");
ASSERT_EQ(topts.data_size_out, exp_size, "9Kb+10b size");
for (i = 0; i < 9000; i++)
ASSERT_EQ(buf[i], 1, "9Kb+10b-old");
for (i = 9000; i < 9010; i++)
ASSERT_EQ(buf[i], 0, "9Kb+10b-new");
for (i = 9010; i < 16384; i++)
ASSERT_EQ(buf[i], 1, "9Kb+10b-untouched");
/* Test a too large grow */
memset(buf, 1, 16384);
exp_size = 9001;
topts.data_in = topts.data_out = buf;
topts.data_size_in = 9001;
topts.data_size_out = 16384;
err = bpf_prog_test_run_opts(prog_fd, &topts);
ASSERT_OK(err, "9Kb+10b");
ASSERT_EQ(topts.retval, XDP_DROP, "9Kb+10b retval");
ASSERT_EQ(topts.data_size_out, exp_size, "9Kb+10b size");
free(buf);
out:
bpf_object__close(obj);
}
void test_xdp_adjust_tail(void)
{
if (test__start_subtest("xdp_adjust_tail_shrink"))
test_xdp_adjust_tail_shrink();
if (test__start_subtest("xdp_adjust_tail_grow"))
test_xdp_adjust_tail_grow();
if (test__start_subtest("xdp_adjust_tail_grow2"))
test_xdp_adjust_tail_grow2();
if (test__start_subtest("xdp_adjust_frags_tail_shrink"))
test_xdp_adjust_frags_tail_shrink();
if (test__start_subtest("xdp_adjust_frags_tail_grow"))
test_xdp_adjust_frags_tail_grow();
}
| linux-master | tools/testing/selftests/bpf/prog_tests/xdp_adjust_tail.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2020 Google LLC.
*/
#include <test_progs.h>
#include <sys/mman.h>
#include <sys/wait.h>
#include <unistd.h>
#include <malloc.h>
#include <stdlib.h>
#include "lsm.skel.h"
char *CMD_ARGS[] = {"true", NULL};
#define GET_PAGE_ADDR(ADDR, PAGE_SIZE) \
(char *)(((unsigned long) (ADDR + PAGE_SIZE)) & ~(PAGE_SIZE-1))
int stack_mprotect(void)
{
void *buf;
long sz;
int ret;
sz = sysconf(_SC_PAGESIZE);
if (sz < 0)
return sz;
buf = alloca(sz * 3);
ret = mprotect(GET_PAGE_ADDR(buf, sz), sz,
PROT_READ | PROT_WRITE | PROT_EXEC);
return ret;
}
int exec_cmd(int *monitored_pid)
{
int child_pid, child_status;
child_pid = fork();
if (child_pid == 0) {
*monitored_pid = getpid();
execvp(CMD_ARGS[0], CMD_ARGS);
return -EINVAL;
} else if (child_pid > 0) {
waitpid(child_pid, &child_status, 0);
return child_status;
}
return -EINVAL;
}
static int test_lsm(struct lsm *skel)
{
struct bpf_link *link;
int buf = 1234;
int err;
err = lsm__attach(skel);
if (!ASSERT_OK(err, "attach"))
return err;
/* Check that already linked program can't be attached again. */
link = bpf_program__attach(skel->progs.test_int_hook);
if (!ASSERT_ERR_PTR(link, "attach_link"))
return -1;
err = exec_cmd(&skel->bss->monitored_pid);
if (!ASSERT_OK(err, "exec_cmd"))
return err;
ASSERT_EQ(skel->bss->bprm_count, 1, "bprm_count");
skel->bss->monitored_pid = getpid();
err = stack_mprotect();
if (!ASSERT_EQ(err, -1, "stack_mprotect") ||
!ASSERT_EQ(errno, EPERM, "stack_mprotect"))
return err;
ASSERT_EQ(skel->bss->mprotect_count, 1, "mprotect_count");
syscall(__NR_setdomainname, &buf, -2L);
syscall(__NR_setdomainname, 0, -3L);
syscall(__NR_setdomainname, ~0L, -4L);
ASSERT_EQ(skel->bss->copy_test, 3, "copy_test");
lsm__detach(skel);
skel->bss->copy_test = 0;
skel->bss->bprm_count = 0;
skel->bss->mprotect_count = 0;
return 0;
}
void test_test_lsm(void)
{
struct lsm *skel = NULL;
int err;
skel = lsm__open_and_load();
if (!ASSERT_OK_PTR(skel, "lsm_skel_load"))
goto close_prog;
err = test_lsm(skel);
if (!ASSERT_OK(err, "test_lsm_first_attach"))
goto close_prog;
err = test_lsm(skel);
ASSERT_OK(err, "test_lsm_second_attach");
close_prog:
lsm__destroy(skel);
}
| linux-master | tools/testing/selftests/bpf/prog_tests/test_lsm.c |
// SPDX-License-Identifier: GPL-2.0
#define _GNU_SOURCE
#include <sched.h>
#include <stdlib.h>
#include <sys/types.h>
#include <sys/socket.h>
#include "test_progs.h"
#include "cap_helpers.h"
#include "bind_perm.skel.h"
static int duration;
static int create_netns(void)
{
if (!ASSERT_OK(unshare(CLONE_NEWNET), "create netns"))
return -1;
return 0;
}
void try_bind(int family, int port, int expected_errno)
{
struct sockaddr_storage addr = {};
struct sockaddr_in6 *sin6;
struct sockaddr_in *sin;
int fd = -1;
fd = socket(family, SOCK_STREAM, 0);
if (CHECK(fd < 0, "fd", "errno %d", errno))
goto close_socket;
if (family == AF_INET) {
sin = (struct sockaddr_in *)&addr;
sin->sin_family = family;
sin->sin_port = htons(port);
} else {
sin6 = (struct sockaddr_in6 *)&addr;
sin6->sin6_family = family;
sin6->sin6_port = htons(port);
}
errno = 0;
bind(fd, (struct sockaddr *)&addr, sizeof(addr));
ASSERT_EQ(errno, expected_errno, "bind");
close_socket:
if (fd >= 0)
close(fd);
}
void test_bind_perm(void)
{
const __u64 net_bind_svc_cap = 1ULL << CAP_NET_BIND_SERVICE;
struct bind_perm *skel;
__u64 old_caps = 0;
int cgroup_fd;
if (create_netns())
return;
cgroup_fd = test__join_cgroup("/bind_perm");
if (CHECK(cgroup_fd < 0, "cg-join", "errno %d", errno))
return;
skel = bind_perm__open_and_load();
if (!ASSERT_OK_PTR(skel, "skel"))
goto close_cgroup_fd;
skel->links.bind_v4_prog = bpf_program__attach_cgroup(skel->progs.bind_v4_prog, cgroup_fd);
if (!ASSERT_OK_PTR(skel, "bind_v4_prog"))
goto close_skeleton;
skel->links.bind_v6_prog = bpf_program__attach_cgroup(skel->progs.bind_v6_prog, cgroup_fd);
if (!ASSERT_OK_PTR(skel, "bind_v6_prog"))
goto close_skeleton;
ASSERT_OK(cap_disable_effective(net_bind_svc_cap, &old_caps),
"cap_disable_effective");
try_bind(AF_INET, 110, EACCES);
try_bind(AF_INET6, 110, EACCES);
try_bind(AF_INET, 111, 0);
try_bind(AF_INET6, 111, 0);
if (old_caps & net_bind_svc_cap)
ASSERT_OK(cap_enable_effective(net_bind_svc_cap, NULL),
"cap_enable_effective");
close_skeleton:
bind_perm__destroy(skel);
close_cgroup_fd:
close(cgroup_fd);
}
| linux-master | tools/testing/selftests/bpf/prog_tests/bind_perm.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) 2022. Huawei Technologies Co., Ltd */
#define _GNU_SOURCE
#include <sched.h>
#include <stdbool.h>
#include <test_progs.h>
#include "htab_update.skel.h"
struct htab_update_ctx {
int fd;
int loop;
bool stop;
};
static void test_reenter_update(void)
{
struct htab_update *skel;
unsigned int key, value;
int err;
skel = htab_update__open();
if (!ASSERT_OK_PTR(skel, "htab_update__open"))
return;
/* lookup_elem_raw() may be inlined and find_kernel_btf_id() will return -ESRCH */
bpf_program__set_autoload(skel->progs.lookup_elem_raw, true);
err = htab_update__load(skel);
if (!ASSERT_TRUE(!err || err == -ESRCH, "htab_update__load") || err)
goto out;
skel->bss->pid = getpid();
err = htab_update__attach(skel);
if (!ASSERT_OK(err, "htab_update__attach"))
goto out;
/* Will trigger the reentrancy of bpf_map_update_elem() */
key = 0;
value = 0;
err = bpf_map_update_elem(bpf_map__fd(skel->maps.htab), &key, &value, 0);
if (!ASSERT_OK(err, "add element"))
goto out;
ASSERT_EQ(skel->bss->update_err, -EBUSY, "no reentrancy");
out:
htab_update__destroy(skel);
}
static void *htab_update_thread(void *arg)
{
struct htab_update_ctx *ctx = arg;
cpu_set_t cpus;
int i;
/* Pinned on CPU 0 */
CPU_ZERO(&cpus);
CPU_SET(0, &cpus);
pthread_setaffinity_np(pthread_self(), sizeof(cpus), &cpus);
i = 0;
while (i++ < ctx->loop && !ctx->stop) {
unsigned int key = 0, value = 0;
int err;
err = bpf_map_update_elem(ctx->fd, &key, &value, 0);
if (err) {
ctx->stop = true;
return (void *)(long)err;
}
}
return NULL;
}
static void test_concurrent_update(void)
{
struct htab_update_ctx ctx;
struct htab_update *skel;
unsigned int i, nr;
pthread_t *tids;
int err;
skel = htab_update__open_and_load();
if (!ASSERT_OK_PTR(skel, "htab_update__open_and_load"))
return;
ctx.fd = bpf_map__fd(skel->maps.htab);
ctx.loop = 1000;
ctx.stop = false;
nr = 4;
tids = calloc(nr, sizeof(*tids));
if (!ASSERT_NEQ(tids, NULL, "no mem"))
goto out;
for (i = 0; i < nr; i++) {
err = pthread_create(&tids[i], NULL, htab_update_thread, &ctx);
if (!ASSERT_OK(err, "pthread_create")) {
unsigned int j;
ctx.stop = true;
for (j = 0; j < i; j++)
pthread_join(tids[j], NULL);
goto out;
}
}
for (i = 0; i < nr; i++) {
void *thread_err = NULL;
pthread_join(tids[i], &thread_err);
ASSERT_EQ(thread_err, NULL, "update error");
}
out:
if (tids)
free(tids);
htab_update__destroy(skel);
}
void test_htab_update(void)
{
if (test__start_subtest("reenter_update"))
test_reenter_update();
if (test__start_subtest("concurrent_update"))
test_concurrent_update();
}
| linux-master | tools/testing/selftests/bpf/prog_tests/htab_update.c |
// SPDX-License-Identifier: GPL-2.0
#include <test_progs.h>
#include "get_func_args_test.skel.h"
void test_get_func_args_test(void)
{
struct get_func_args_test *skel = NULL;
int err, prog_fd;
LIBBPF_OPTS(bpf_test_run_opts, topts);
skel = get_func_args_test__open_and_load();
if (!ASSERT_OK_PTR(skel, "get_func_args_test__open_and_load"))
return;
err = get_func_args_test__attach(skel);
if (!ASSERT_OK(err, "get_func_args_test__attach"))
goto cleanup;
/* This runs bpf_fentry_test* functions and triggers
* fentry/fexit programs.
*/
prog_fd = bpf_program__fd(skel->progs.test1);
err = bpf_prog_test_run_opts(prog_fd, &topts);
ASSERT_OK(err, "test_run");
ASSERT_EQ(topts.retval, 0, "test_run");
/* This runs bpf_modify_return_test function and triggers
* fmod_ret_test and fexit_test programs.
*/
prog_fd = bpf_program__fd(skel->progs.fmod_ret_test);
err = bpf_prog_test_run_opts(prog_fd, &topts);
ASSERT_OK(err, "test_run");
ASSERT_EQ(topts.retval >> 16, 1, "test_run");
ASSERT_EQ(topts.retval & 0xffff, 1234 + 29, "test_run");
ASSERT_EQ(skel->bss->test1_result, 1, "test1_result");
ASSERT_EQ(skel->bss->test2_result, 1, "test2_result");
ASSERT_EQ(skel->bss->test3_result, 1, "test3_result");
ASSERT_EQ(skel->bss->test4_result, 1, "test4_result");
cleanup:
get_func_args_test__destroy(skel);
}
| linux-master | tools/testing/selftests/bpf/prog_tests/get_func_args_test.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2022 Facebook
* Copyright (C) 2022 Huawei Technologies Duesseldorf GmbH
*
* Author: Roberto Sassu <[email protected]>
*/
#include <test_progs.h>
#include "test_kfunc_dynptr_param.skel.h"
static struct {
const char *prog_name;
int expected_runtime_err;
} kfunc_dynptr_tests[] = {
{"dynptr_data_null", -EBADMSG},
};
static bool kfunc_not_supported;
static int libbpf_print_cb(enum libbpf_print_level level, const char *fmt,
va_list args)
{
if (strcmp(fmt, "libbpf: extern (func ksym) '%s': not found in kernel or module BTFs\n"))
return 0;
if (strcmp(va_arg(args, char *), "bpf_verify_pkcs7_signature"))
return 0;
kfunc_not_supported = true;
return 0;
}
static bool has_pkcs7_kfunc_support(void)
{
struct test_kfunc_dynptr_param *skel;
libbpf_print_fn_t old_print_cb;
int err;
skel = test_kfunc_dynptr_param__open();
if (!ASSERT_OK_PTR(skel, "test_kfunc_dynptr_param__open"))
return false;
kfunc_not_supported = false;
old_print_cb = libbpf_set_print(libbpf_print_cb);
err = test_kfunc_dynptr_param__load(skel);
libbpf_set_print(old_print_cb);
if (err < 0 && kfunc_not_supported) {
fprintf(stderr,
"%s:SKIP:bpf_verify_pkcs7_signature() kfunc not supported\n",
__func__);
test_kfunc_dynptr_param__destroy(skel);
return false;
}
test_kfunc_dynptr_param__destroy(skel);
return true;
}
static void verify_success(const char *prog_name, int expected_runtime_err)
{
struct test_kfunc_dynptr_param *skel;
struct bpf_program *prog;
struct bpf_link *link;
__u32 next_id;
int err;
skel = test_kfunc_dynptr_param__open();
if (!ASSERT_OK_PTR(skel, "test_kfunc_dynptr_param__open"))
return;
skel->bss->pid = getpid();
err = test_kfunc_dynptr_param__load(skel);
if (!ASSERT_OK(err, "test_kfunc_dynptr_param__load"))
goto cleanup;
prog = bpf_object__find_program_by_name(skel->obj, prog_name);
if (!ASSERT_OK_PTR(prog, "bpf_object__find_program_by_name"))
goto cleanup;
link = bpf_program__attach(prog);
if (!ASSERT_OK_PTR(link, "bpf_program__attach"))
goto cleanup;
err = bpf_prog_get_next_id(0, &next_id);
bpf_link__destroy(link);
if (!ASSERT_OK(err, "bpf_prog_get_next_id"))
goto cleanup;
ASSERT_EQ(skel->bss->err, expected_runtime_err, "err");
cleanup:
test_kfunc_dynptr_param__destroy(skel);
}
void test_kfunc_dynptr_param(void)
{
int i;
if (!has_pkcs7_kfunc_support())
return;
for (i = 0; i < ARRAY_SIZE(kfunc_dynptr_tests); i++) {
if (!test__start_subtest(kfunc_dynptr_tests[i].prog_name))
continue;
verify_success(kfunc_dynptr_tests[i].prog_name,
kfunc_dynptr_tests[i].expected_runtime_err);
}
RUN_TESTS(test_kfunc_dynptr_param);
}
| linux-master | tools/testing/selftests/bpf/prog_tests/kfunc_dynptr_param.c |
// SPDX-License-Identifier: GPL-2.0
#include <test_progs.h>
#include "test_enable_stats.skel.h"
void test_enable_stats(void)
{
struct test_enable_stats *skel;
int stats_fd, err, prog_fd;
struct bpf_prog_info info;
__u32 info_len = sizeof(info);
int duration = 0;
skel = test_enable_stats__open_and_load();
if (CHECK(!skel, "skel_open_and_load", "skeleton open/load failed\n"))
return;
stats_fd = bpf_enable_stats(BPF_STATS_RUN_TIME);
if (CHECK(stats_fd < 0, "get_stats_fd", "failed %d\n", errno)) {
test_enable_stats__destroy(skel);
return;
}
err = test_enable_stats__attach(skel);
if (CHECK(err, "attach_raw_tp", "err %d\n", err))
goto cleanup;
test_enable_stats__detach(skel);
prog_fd = bpf_program__fd(skel->progs.test_enable_stats);
memset(&info, 0, info_len);
err = bpf_prog_get_info_by_fd(prog_fd, &info, &info_len);
if (CHECK(err, "get_prog_info",
"failed to get bpf_prog_info for fd %d\n", prog_fd))
goto cleanup;
if (CHECK(info.run_time_ns == 0, "check_stats_enabled",
"failed to enable run_time_ns stats\n"))
goto cleanup;
CHECK(info.run_cnt != skel->bss->count, "check_run_cnt_valid",
"invalid run_cnt stats\n");
cleanup:
test_enable_stats__destroy(skel);
close(stats_fd);
}
| linux-master | tools/testing/selftests/bpf/prog_tests/enable_stats.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
#include <test_progs.h>
#include "tracing_struct.skel.h"
static void test_fentry(void)
{
struct tracing_struct *skel;
int err;
skel = tracing_struct__open_and_load();
if (!ASSERT_OK_PTR(skel, "tracing_struct__open_and_load"))
return;
err = tracing_struct__attach(skel);
if (!ASSERT_OK(err, "tracing_struct__attach"))
goto destroy_skel;
ASSERT_OK(trigger_module_test_read(256), "trigger_read");
ASSERT_EQ(skel->bss->t1_a_a, 2, "t1:a.a");
ASSERT_EQ(skel->bss->t1_a_b, 3, "t1:a.b");
ASSERT_EQ(skel->bss->t1_b, 1, "t1:b");
ASSERT_EQ(skel->bss->t1_c, 4, "t1:c");
ASSERT_EQ(skel->bss->t1_nregs, 4, "t1 nregs");
ASSERT_EQ(skel->bss->t1_reg0, 2, "t1 reg0");
ASSERT_EQ(skel->bss->t1_reg1, 3, "t1 reg1");
ASSERT_EQ(skel->bss->t1_reg2, 1, "t1 reg2");
ASSERT_EQ(skel->bss->t1_reg3, 4, "t1 reg3");
ASSERT_EQ(skel->bss->t1_ret, 10, "t1 ret");
ASSERT_EQ(skel->bss->t2_a, 1, "t2:a");
ASSERT_EQ(skel->bss->t2_b_a, 2, "t2:b.a");
ASSERT_EQ(skel->bss->t2_b_b, 3, "t2:b.b");
ASSERT_EQ(skel->bss->t2_c, 4, "t2:c");
ASSERT_EQ(skel->bss->t2_ret, 10, "t2 ret");
ASSERT_EQ(skel->bss->t3_a, 1, "t3:a");
ASSERT_EQ(skel->bss->t3_b, 4, "t3:b");
ASSERT_EQ(skel->bss->t3_c_a, 2, "t3:c.a");
ASSERT_EQ(skel->bss->t3_c_b, 3, "t3:c.b");
ASSERT_EQ(skel->bss->t3_ret, 10, "t3 ret");
ASSERT_EQ(skel->bss->t4_a_a, 10, "t4:a.a");
ASSERT_EQ(skel->bss->t4_b, 1, "t4:b");
ASSERT_EQ(skel->bss->t4_c, 2, "t4:c");
ASSERT_EQ(skel->bss->t4_d, 3, "t4:d");
ASSERT_EQ(skel->bss->t4_e_a, 2, "t4:e.a");
ASSERT_EQ(skel->bss->t4_e_b, 3, "t4:e.b");
ASSERT_EQ(skel->bss->t4_ret, 21, "t4 ret");
ASSERT_EQ(skel->bss->t5_ret, 1, "t5 ret");
ASSERT_EQ(skel->bss->t6, 1, "t6 ret");
ASSERT_EQ(skel->bss->t7_a, 16, "t7:a");
ASSERT_EQ(skel->bss->t7_b, 17, "t7:b");
ASSERT_EQ(skel->bss->t7_c, 18, "t7:c");
ASSERT_EQ(skel->bss->t7_d, 19, "t7:d");
ASSERT_EQ(skel->bss->t7_e, 20, "t7:e");
ASSERT_EQ(skel->bss->t7_f_a, 21, "t7:f.a");
ASSERT_EQ(skel->bss->t7_f_b, 22, "t7:f.b");
ASSERT_EQ(skel->bss->t7_ret, 133, "t7 ret");
ASSERT_EQ(skel->bss->t8_a, 16, "t8:a");
ASSERT_EQ(skel->bss->t8_b, 17, "t8:b");
ASSERT_EQ(skel->bss->t8_c, 18, "t8:c");
ASSERT_EQ(skel->bss->t8_d, 19, "t8:d");
ASSERT_EQ(skel->bss->t8_e, 20, "t8:e");
ASSERT_EQ(skel->bss->t8_f_a, 21, "t8:f.a");
ASSERT_EQ(skel->bss->t8_f_b, 22, "t8:f.b");
ASSERT_EQ(skel->bss->t8_g, 23, "t8:g");
ASSERT_EQ(skel->bss->t8_ret, 156, "t8 ret");
tracing_struct__detach(skel);
destroy_skel:
tracing_struct__destroy(skel);
}
void test_tracing_struct(void)
{
test_fentry();
}
| linux-master | tools/testing/selftests/bpf/prog_tests/tracing_struct.c |
// SPDX-License-Identifier: GPL-2.0
#include <test_progs.h>
#include "async_stack_depth.skel.h"
void test_async_stack_depth(void)
{
RUN_TESTS(async_stack_depth);
}
| linux-master | tools/testing/selftests/bpf/prog_tests/async_stack_depth.c |
// SPDX-License-Identifier: GPL-2.0
#include <test_progs.h>
#define nr_iters 2
void serial_test_bpf_obj_id(void)
{
const __u64 array_magic_value = 0xfaceb00c;
const __u32 array_key = 0;
const char *file = "./test_obj_id.bpf.o";
const char *expected_prog_name = "test_obj_id";
const char *expected_map_name = "test_map_id";
const __u64 nsec_per_sec = 1000000000;
struct bpf_object *objs[nr_iters] = {};
struct bpf_link *links[nr_iters] = {};
struct bpf_program *prog;
int prog_fds[nr_iters], map_fds[nr_iters];
/* +1 to test for the info_len returned by kernel */
struct bpf_prog_info prog_infos[nr_iters + 1];
struct bpf_map_info map_infos[nr_iters + 1];
struct bpf_link_info link_infos[nr_iters + 1];
/* Each prog only uses one map. +1 to test nr_map_ids
* returned by kernel.
*/
__u32 map_ids[nr_iters + 1];
char jited_insns[128], xlated_insns[128], zeros[128], tp_name[128];
__u32 i, next_id, info_len, nr_id_found, duration = 0;
struct timespec real_time_ts, boot_time_ts;
int err = 0;
__u64 array_value;
uid_t my_uid = getuid();
time_t now, load_time;
err = bpf_prog_get_fd_by_id(0);
CHECK(err >= 0 || errno != ENOENT,
"get-fd-by-notexist-prog-id", "err %d errno %d\n", err, errno);
err = bpf_map_get_fd_by_id(0);
CHECK(err >= 0 || errno != ENOENT,
"get-fd-by-notexist-map-id", "err %d errno %d\n", err, errno);
err = bpf_link_get_fd_by_id(0);
CHECK(err >= 0 || errno != ENOENT,
"get-fd-by-notexist-link-id", "err %d errno %d\n", err, errno);
/* Check bpf_map_get_info_by_fd() */
bzero(zeros, sizeof(zeros));
for (i = 0; i < nr_iters; i++) {
now = time(NULL);
err = bpf_prog_test_load(file, BPF_PROG_TYPE_RAW_TRACEPOINT,
&objs[i], &prog_fds[i]);
/* test_obj_id.o is a dumb prog. It should never fail
* to load.
*/
if (CHECK_FAIL(err))
continue;
/* Insert a magic value to the map */
map_fds[i] = bpf_find_map(__func__, objs[i], "test_map_id");
if (CHECK_FAIL(map_fds[i] < 0))
goto done;
err = bpf_map_update_elem(map_fds[i], &array_key,
&array_magic_value, 0);
if (CHECK_FAIL(err))
goto done;
prog = bpf_object__find_program_by_name(objs[i],
"test_obj_id");
if (CHECK_FAIL(!prog))
goto done;
links[i] = bpf_program__attach(prog);
err = libbpf_get_error(links[i]);
if (CHECK(err, "prog_attach", "prog #%d, err %d\n", i, err)) {
links[i] = NULL;
goto done;
}
/* Check getting map info */
info_len = sizeof(struct bpf_map_info) * 2;
bzero(&map_infos[i], info_len);
err = bpf_map_get_info_by_fd(map_fds[i], &map_infos[i],
&info_len);
if (CHECK(err ||
map_infos[i].type != BPF_MAP_TYPE_ARRAY ||
map_infos[i].key_size != sizeof(__u32) ||
map_infos[i].value_size != sizeof(__u64) ||
map_infos[i].max_entries != 1 ||
map_infos[i].map_flags != 0 ||
info_len != sizeof(struct bpf_map_info) ||
strcmp((char *)map_infos[i].name, expected_map_name),
"get-map-info(fd)",
"err %d errno %d type %d(%d) info_len %u(%zu) key_size %u value_size %u max_entries %u map_flags %X name %s(%s)\n",
err, errno,
map_infos[i].type, BPF_MAP_TYPE_ARRAY,
info_len, sizeof(struct bpf_map_info),
map_infos[i].key_size,
map_infos[i].value_size,
map_infos[i].max_entries,
map_infos[i].map_flags,
map_infos[i].name, expected_map_name))
goto done;
/* Check getting prog info */
info_len = sizeof(struct bpf_prog_info) * 2;
bzero(&prog_infos[i], info_len);
bzero(jited_insns, sizeof(jited_insns));
bzero(xlated_insns, sizeof(xlated_insns));
prog_infos[i].jited_prog_insns = ptr_to_u64(jited_insns);
prog_infos[i].jited_prog_len = sizeof(jited_insns);
prog_infos[i].xlated_prog_insns = ptr_to_u64(xlated_insns);
prog_infos[i].xlated_prog_len = sizeof(xlated_insns);
prog_infos[i].map_ids = ptr_to_u64(map_ids + i);
prog_infos[i].nr_map_ids = 2;
err = clock_gettime(CLOCK_REALTIME, &real_time_ts);
if (CHECK_FAIL(err))
goto done;
err = clock_gettime(CLOCK_BOOTTIME, &boot_time_ts);
if (CHECK_FAIL(err))
goto done;
err = bpf_prog_get_info_by_fd(prog_fds[i], &prog_infos[i],
&info_len);
load_time = (real_time_ts.tv_sec - boot_time_ts.tv_sec)
+ (prog_infos[i].load_time / nsec_per_sec);
if (CHECK(err ||
prog_infos[i].type != BPF_PROG_TYPE_RAW_TRACEPOINT ||
info_len != sizeof(struct bpf_prog_info) ||
(env.jit_enabled && !prog_infos[i].jited_prog_len) ||
(env.jit_enabled &&
!memcmp(jited_insns, zeros, sizeof(zeros))) ||
!prog_infos[i].xlated_prog_len ||
!memcmp(xlated_insns, zeros, sizeof(zeros)) ||
load_time < now - 60 || load_time > now + 60 ||
prog_infos[i].created_by_uid != my_uid ||
prog_infos[i].nr_map_ids != 1 ||
*(int *)(long)prog_infos[i].map_ids != map_infos[i].id ||
strcmp((char *)prog_infos[i].name, expected_prog_name),
"get-prog-info(fd)",
"err %d errno %d i %d type %d(%d) info_len %u(%zu) "
"jit_enabled %d jited_prog_len %u xlated_prog_len %u "
"jited_prog %d xlated_prog %d load_time %lu(%lu) "
"uid %u(%u) nr_map_ids %u(%u) map_id %u(%u) "
"name %s(%s)\n",
err, errno, i,
prog_infos[i].type, BPF_PROG_TYPE_SOCKET_FILTER,
info_len, sizeof(struct bpf_prog_info),
env.jit_enabled,
prog_infos[i].jited_prog_len,
prog_infos[i].xlated_prog_len,
!!memcmp(jited_insns, zeros, sizeof(zeros)),
!!memcmp(xlated_insns, zeros, sizeof(zeros)),
load_time, now,
prog_infos[i].created_by_uid, my_uid,
prog_infos[i].nr_map_ids, 1,
*(int *)(long)prog_infos[i].map_ids, map_infos[i].id,
prog_infos[i].name, expected_prog_name))
goto done;
/* Check getting link info */
info_len = sizeof(struct bpf_link_info) * 2;
bzero(&link_infos[i], info_len);
link_infos[i].raw_tracepoint.tp_name = ptr_to_u64(&tp_name);
link_infos[i].raw_tracepoint.tp_name_len = sizeof(tp_name);
err = bpf_link_get_info_by_fd(bpf_link__fd(links[i]),
&link_infos[i], &info_len);
if (CHECK(err ||
link_infos[i].type != BPF_LINK_TYPE_RAW_TRACEPOINT ||
link_infos[i].prog_id != prog_infos[i].id ||
link_infos[i].raw_tracepoint.tp_name != ptr_to_u64(&tp_name) ||
strcmp(u64_to_ptr(link_infos[i].raw_tracepoint.tp_name),
"sys_enter") ||
info_len != sizeof(struct bpf_link_info),
"get-link-info(fd)",
"err %d errno %d info_len %u(%zu) type %d(%d) id %d "
"prog_id %d (%d) tp_name %s(%s)\n",
err, errno,
info_len, sizeof(struct bpf_link_info),
link_infos[i].type, BPF_LINK_TYPE_RAW_TRACEPOINT,
link_infos[i].id,
link_infos[i].prog_id, prog_infos[i].id,
(const char *)u64_to_ptr(link_infos[i].raw_tracepoint.tp_name),
"sys_enter"))
goto done;
}
/* Check bpf_prog_get_next_id() */
nr_id_found = 0;
next_id = 0;
while (!bpf_prog_get_next_id(next_id, &next_id)) {
struct bpf_prog_info prog_info = {};
__u32 saved_map_id;
int prog_fd;
info_len = sizeof(prog_info);
prog_fd = bpf_prog_get_fd_by_id(next_id);
if (prog_fd < 0 && errno == ENOENT)
/* The bpf_prog is in the dead row */
continue;
if (CHECK(prog_fd < 0, "get-prog-fd(next_id)",
"prog_fd %d next_id %d errno %d\n",
prog_fd, next_id, errno))
break;
for (i = 0; i < nr_iters; i++)
if (prog_infos[i].id == next_id)
break;
if (i == nr_iters)
continue;
nr_id_found++;
/* Negative test:
* prog_info.nr_map_ids = 1
* prog_info.map_ids = NULL
*/
prog_info.nr_map_ids = 1;
err = bpf_prog_get_info_by_fd(prog_fd, &prog_info, &info_len);
if (CHECK(!err || errno != EFAULT,
"get-prog-fd-bad-nr-map-ids", "err %d errno %d(%d)",
err, errno, EFAULT))
break;
bzero(&prog_info, sizeof(prog_info));
info_len = sizeof(prog_info);
saved_map_id = *(int *)((long)prog_infos[i].map_ids);
prog_info.map_ids = prog_infos[i].map_ids;
prog_info.nr_map_ids = 2;
err = bpf_prog_get_info_by_fd(prog_fd, &prog_info, &info_len);
prog_infos[i].jited_prog_insns = 0;
prog_infos[i].xlated_prog_insns = 0;
CHECK(err || info_len != sizeof(struct bpf_prog_info) ||
memcmp(&prog_info, &prog_infos[i], info_len) ||
*(int *)(long)prog_info.map_ids != saved_map_id,
"get-prog-info(next_id->fd)",
"err %d errno %d info_len %u(%zu) memcmp %d map_id %u(%u)\n",
err, errno, info_len, sizeof(struct bpf_prog_info),
memcmp(&prog_info, &prog_infos[i], info_len),
*(int *)(long)prog_info.map_ids, saved_map_id);
close(prog_fd);
}
CHECK(nr_id_found != nr_iters,
"check total prog id found by get_next_id",
"nr_id_found %u(%u)\n",
nr_id_found, nr_iters);
/* Check bpf_map_get_next_id() */
nr_id_found = 0;
next_id = 0;
while (!bpf_map_get_next_id(next_id, &next_id)) {
struct bpf_map_info map_info = {};
int map_fd;
info_len = sizeof(map_info);
map_fd = bpf_map_get_fd_by_id(next_id);
if (map_fd < 0 && errno == ENOENT)
/* The bpf_map is in the dead row */
continue;
if (CHECK(map_fd < 0, "get-map-fd(next_id)",
"map_fd %d next_id %u errno %d\n",
map_fd, next_id, errno))
break;
for (i = 0; i < nr_iters; i++)
if (map_infos[i].id == next_id)
break;
if (i == nr_iters)
continue;
nr_id_found++;
err = bpf_map_lookup_elem(map_fd, &array_key, &array_value);
if (CHECK_FAIL(err))
goto done;
err = bpf_map_get_info_by_fd(map_fd, &map_info, &info_len);
CHECK(err || info_len != sizeof(struct bpf_map_info) ||
memcmp(&map_info, &map_infos[i], info_len) ||
array_value != array_magic_value,
"check get-map-info(next_id->fd)",
"err %d errno %d info_len %u(%zu) memcmp %d array_value %llu(%llu)\n",
err, errno, info_len, sizeof(struct bpf_map_info),
memcmp(&map_info, &map_infos[i], info_len),
array_value, array_magic_value);
close(map_fd);
}
CHECK(nr_id_found != nr_iters,
"check total map id found by get_next_id",
"nr_id_found %u(%u)\n",
nr_id_found, nr_iters);
/* Check bpf_link_get_next_id() */
nr_id_found = 0;
next_id = 0;
while (!bpf_link_get_next_id(next_id, &next_id)) {
struct bpf_link_info link_info;
int link_fd, cmp_res;
info_len = sizeof(link_info);
memset(&link_info, 0, info_len);
link_fd = bpf_link_get_fd_by_id(next_id);
if (link_fd < 0 && errno == ENOENT)
/* The bpf_link is in the dead row */
continue;
if (CHECK(link_fd < 0, "get-link-fd(next_id)",
"link_fd %d next_id %u errno %d\n",
link_fd, next_id, errno))
break;
for (i = 0; i < nr_iters; i++)
if (link_infos[i].id == next_id)
break;
if (i == nr_iters)
continue;
nr_id_found++;
err = bpf_link_get_info_by_fd(link_fd, &link_info, &info_len);
cmp_res = memcmp(&link_info, &link_infos[i],
offsetof(struct bpf_link_info, raw_tracepoint));
CHECK(err || info_len != sizeof(link_info) || cmp_res,
"check get-link-info(next_id->fd)",
"err %d errno %d info_len %u(%zu) memcmp %d\n",
err, errno, info_len, sizeof(struct bpf_link_info),
cmp_res);
close(link_fd);
}
CHECK(nr_id_found != nr_iters,
"check total link id found by get_next_id",
"nr_id_found %u(%u)\n", nr_id_found, nr_iters);
done:
for (i = 0; i < nr_iters; i++) {
bpf_link__destroy(links[i]);
bpf_object__close(objs[i]);
}
}
| linux-master | tools/testing/selftests/bpf/prog_tests/bpf_obj_id.c |
// SPDX-License-Identifier: GPL-2.0
#include <test_progs.h>
#include "jeq_infer_not_null_fail.skel.h"
void test_jeq_infer_not_null(void)
{
RUN_TESTS(jeq_infer_not_null_fail);
}
| linux-master | tools/testing/selftests/bpf/prog_tests/jeq_infer_not_null.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) 2023. Huawei Technologies Co., Ltd */
#define _GNU_SOURCE
#include <sched.h>
#include <pthread.h>
#include <stdbool.h>
#include <bpf/btf.h>
#include <test_progs.h>
#include "test_bpf_ma.skel.h"
void test_test_bpf_ma(void)
{
struct test_bpf_ma *skel;
struct btf *btf;
int i, err;
skel = test_bpf_ma__open();
if (!ASSERT_OK_PTR(skel, "open"))
return;
btf = bpf_object__btf(skel->obj);
if (!ASSERT_OK_PTR(btf, "btf"))
goto out;
for (i = 0; i < ARRAY_SIZE(skel->rodata->data_sizes); i++) {
char name[32];
int id;
snprintf(name, sizeof(name), "bin_data_%u", skel->rodata->data_sizes[i]);
id = btf__find_by_name_kind(btf, name, BTF_KIND_STRUCT);
if (!ASSERT_GT(id, 0, "bin_data"))
goto out;
skel->rodata->data_btf_ids[i] = id;
}
err = test_bpf_ma__load(skel);
if (!ASSERT_OK(err, "load"))
goto out;
err = test_bpf_ma__attach(skel);
if (!ASSERT_OK(err, "attach"))
goto out;
skel->bss->pid = getpid();
usleep(1);
ASSERT_OK(skel->bss->err, "test error");
out:
test_bpf_ma__destroy(skel);
}
| linux-master | tools/testing/selftests/bpf/prog_tests/test_bpf_ma.c |
// SPDX-License-Identifier: GPL-2.0
#include <test_progs.h>
struct bss {
unsigned did_run;
unsigned iters;
unsigned sum;
};
struct rdonly_map_subtest {
const char *subtest_name;
const char *prog_name;
unsigned exp_iters;
unsigned exp_sum;
};
void test_rdonly_maps(void)
{
const char *file = "test_rdonly_maps.bpf.o";
struct rdonly_map_subtest subtests[] = {
{ "skip loop", "skip_loop", 0, 0 },
{ "part loop", "part_loop", 3, 2 + 3 + 4 },
{ "full loop", "full_loop", 4, 2 + 3 + 4 + 5 },
};
int i, err, zero = 0, duration = 0;
struct bpf_link *link = NULL;
struct bpf_program *prog;
struct bpf_map *bss_map;
struct bpf_object *obj;
struct bss bss;
obj = bpf_object__open_file(file, NULL);
if (!ASSERT_OK_PTR(obj, "obj_open"))
return;
err = bpf_object__load(obj);
if (CHECK(err, "obj_load", "err %d errno %d\n", err, errno))
goto cleanup;
bss_map = bpf_object__find_map_by_name(obj, ".bss");
if (CHECK(!bss_map, "find_bss_map", "failed\n"))
goto cleanup;
for (i = 0; i < ARRAY_SIZE(subtests); i++) {
const struct rdonly_map_subtest *t = &subtests[i];
if (!test__start_subtest(t->subtest_name))
continue;
prog = bpf_object__find_program_by_name(obj, t->prog_name);
if (CHECK(!prog, "find_prog", "prog '%s' not found\n",
t->prog_name))
goto cleanup;
memset(&bss, 0, sizeof(bss));
err = bpf_map_update_elem(bpf_map__fd(bss_map), &zero, &bss, 0);
if (CHECK(err, "set_bss", "failed to set bss data: %d\n", err))
goto cleanup;
link = bpf_program__attach_raw_tracepoint(prog, "sys_enter");
if (!ASSERT_OK_PTR(link, "attach_prog"))
goto cleanup;
/* trigger probe */
usleep(1);
bpf_link__destroy(link);
link = NULL;
err = bpf_map_lookup_elem(bpf_map__fd(bss_map), &zero, &bss);
if (CHECK(err, "get_bss", "failed to get bss data: %d\n", err))
goto cleanup;
if (CHECK(bss.did_run == 0, "check_run",
"prog '%s' didn't run?\n", t->prog_name))
goto cleanup;
if (CHECK(bss.iters != t->exp_iters, "check_iters",
"prog '%s' iters: %d, expected: %d\n",
t->prog_name, bss.iters, t->exp_iters))
goto cleanup;
if (CHECK(bss.sum != t->exp_sum, "check_sum",
"prog '%s' sum: %d, expected: %d\n",
t->prog_name, bss.sum, t->exp_sum))
goto cleanup;
}
cleanup:
bpf_link__destroy(link);
bpf_object__close(obj);
}
| linux-master | tools/testing/selftests/bpf/prog_tests/rdonly_maps.c |
// SPDX-License-Identifier: GPL-2.0
#include <net/if.h>
#include <test_progs.h>
#include <network_helpers.h>
#define LOCAL_NETNS "xdp_dev_bound_only_netns"
static int load_dummy_prog(char *name, __u32 ifindex, __u32 flags)
{
struct bpf_insn insns[] = { BPF_MOV64_IMM(BPF_REG_0, 0), BPF_EXIT_INSN() };
LIBBPF_OPTS(bpf_prog_load_opts, opts);
opts.prog_flags = flags;
opts.prog_ifindex = ifindex;
return bpf_prog_load(BPF_PROG_TYPE_XDP, name, "GPL", insns, ARRAY_SIZE(insns), &opts);
}
/* A test case for bpf_offload_netdev->offload handling bug:
* - create a veth device (does not support offload);
* - create a device bound XDP program with BPF_F_XDP_DEV_BOUND_ONLY flag
* (such programs are not offloaded);
* - create a device bound XDP program without flags (such programs are offloaded).
* This might lead to 'BUG: kernel NULL pointer dereference'.
*/
void test_xdp_dev_bound_only_offdev(void)
{
struct nstoken *tok = NULL;
__u32 ifindex;
int fd1 = -1;
int fd2 = -1;
SYS(out, "ip netns add " LOCAL_NETNS);
tok = open_netns(LOCAL_NETNS);
if (!ASSERT_OK_PTR(tok, "open_netns"))
goto out;
SYS(out, "ip link add eth42 type veth");
ifindex = if_nametoindex("eth42");
if (!ASSERT_NEQ(ifindex, 0, "if_nametoindex")) {
perror("if_nametoindex");
goto out;
}
fd1 = load_dummy_prog("dummy1", ifindex, BPF_F_XDP_DEV_BOUND_ONLY);
if (!ASSERT_GE(fd1, 0, "load_dummy_prog #1")) {
perror("load_dummy_prog #1");
goto out;
}
/* Program with ifindex is considered offloaded, however veth
* does not support offload => error should be reported.
*/
fd2 = load_dummy_prog("dummy2", ifindex, 0);
ASSERT_EQ(fd2, -EINVAL, "load_dummy_prog #2 (offloaded)");
out:
close(fd1);
close(fd2);
close_netns(tok);
/* eth42 was added inside netns, removing the netns will
* also remove eth42 veth pair.
*/
SYS_NOFAIL("ip netns del " LOCAL_NETNS);
}
| linux-master | tools/testing/selftests/bpf/prog_tests/xdp_dev_bound_only.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2023 Bytedance */
#include <sys/syscall.h>
#include <test_progs.h>
#include <cgroup_helpers.h>
#include "test_task_under_cgroup.skel.h"
#define FOO "/foo"
void test_task_under_cgroup(void)
{
struct test_task_under_cgroup *skel;
int ret, foo;
pid_t pid;
foo = test__join_cgroup(FOO);
if (!ASSERT_OK(foo < 0, "cgroup_join_foo"))
return;
skel = test_task_under_cgroup__open();
if (!ASSERT_OK_PTR(skel, "test_task_under_cgroup__open"))
goto cleanup;
skel->rodata->local_pid = getpid();
skel->bss->remote_pid = getpid();
skel->rodata->cgid = get_cgroup_id(FOO);
ret = test_task_under_cgroup__load(skel);
if (!ASSERT_OK(ret, "test_task_under_cgroup__load"))
goto cleanup;
ret = test_task_under_cgroup__attach(skel);
if (!ASSERT_OK(ret, "test_task_under_cgroup__attach"))
goto cleanup;
pid = fork();
if (pid == 0)
exit(0);
ret = (pid == -1);
if (ASSERT_OK(ret, "fork process"))
wait(NULL);
test_task_under_cgroup__detach(skel);
ASSERT_NEQ(skel->bss->remote_pid, skel->rodata->local_pid,
"test task_under_cgroup");
cleanup:
test_task_under_cgroup__destroy(skel);
close(foo);
}
| linux-master | tools/testing/selftests/bpf/prog_tests/task_under_cgroup.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020 Google */
#include <test_progs.h>
#include <bpf/libbpf.h>
#include <bpf/btf.h>
#include "test_ksyms_btf.skel.h"
#include "test_ksyms_btf_null_check.skel.h"
#include "test_ksyms_weak.skel.h"
#include "test_ksyms_weak.lskel.h"
#include "test_ksyms_btf_write_check.skel.h"
static int duration;
static void test_basic(void)
{
__u64 runqueues_addr, bpf_prog_active_addr;
__u32 this_rq_cpu;
int this_bpf_prog_active;
struct test_ksyms_btf *skel = NULL;
struct test_ksyms_btf__data *data;
int err;
err = kallsyms_find("runqueues", &runqueues_addr);
if (CHECK(err == -EINVAL, "kallsyms_fopen", "failed to open: %d\n", errno))
return;
if (CHECK(err == -ENOENT, "ksym_find", "symbol 'runqueues' not found\n"))
return;
err = kallsyms_find("bpf_prog_active", &bpf_prog_active_addr);
if (CHECK(err == -EINVAL, "kallsyms_fopen", "failed to open: %d\n", errno))
return;
if (CHECK(err == -ENOENT, "ksym_find", "symbol 'bpf_prog_active' not found\n"))
return;
skel = test_ksyms_btf__open_and_load();
if (CHECK(!skel, "skel_open", "failed to open and load skeleton\n"))
goto cleanup;
err = test_ksyms_btf__attach(skel);
if (CHECK(err, "skel_attach", "skeleton attach failed: %d\n", err))
goto cleanup;
/* trigger tracepoint */
usleep(1);
data = skel->data;
CHECK(data->out__runqueues_addr != runqueues_addr, "runqueues_addr",
"got %llu, exp %llu\n",
(unsigned long long)data->out__runqueues_addr,
(unsigned long long)runqueues_addr);
CHECK(data->out__bpf_prog_active_addr != bpf_prog_active_addr, "bpf_prog_active_addr",
"got %llu, exp %llu\n",
(unsigned long long)data->out__bpf_prog_active_addr,
(unsigned long long)bpf_prog_active_addr);
CHECK(data->out__rq_cpu == -1, "rq_cpu",
"got %u, exp != -1\n", data->out__rq_cpu);
CHECK(data->out__bpf_prog_active < 0, "bpf_prog_active",
"got %d, exp >= 0\n", data->out__bpf_prog_active);
CHECK(data->out__cpu_0_rq_cpu != 0, "cpu_rq(0)->cpu",
"got %u, exp 0\n", data->out__cpu_0_rq_cpu);
this_rq_cpu = data->out__this_rq_cpu;
CHECK(this_rq_cpu != data->out__rq_cpu, "this_rq_cpu",
"got %u, exp %u\n", this_rq_cpu, data->out__rq_cpu);
this_bpf_prog_active = data->out__this_bpf_prog_active;
CHECK(this_bpf_prog_active != data->out__bpf_prog_active, "this_bpf_prog_active",
"got %d, exp %d\n", this_bpf_prog_active,
data->out__bpf_prog_active);
cleanup:
test_ksyms_btf__destroy(skel);
}
static void test_null_check(void)
{
struct test_ksyms_btf_null_check *skel;
skel = test_ksyms_btf_null_check__open_and_load();
CHECK(skel, "skel_open", "unexpected load of a prog missing null check\n");
test_ksyms_btf_null_check__destroy(skel);
}
static void test_weak_syms(void)
{
struct test_ksyms_weak *skel;
struct test_ksyms_weak__data *data;
int err;
skel = test_ksyms_weak__open_and_load();
if (!ASSERT_OK_PTR(skel, "test_ksyms_weak__open_and_load"))
return;
err = test_ksyms_weak__attach(skel);
if (!ASSERT_OK(err, "test_ksyms_weak__attach"))
goto cleanup;
/* trigger tracepoint */
usleep(1);
data = skel->data;
ASSERT_EQ(data->out__existing_typed, 0, "existing typed ksym");
ASSERT_NEQ(data->out__existing_typeless, -1, "existing typeless ksym");
ASSERT_EQ(data->out__non_existent_typeless, 0, "nonexistent typeless ksym");
ASSERT_EQ(data->out__non_existent_typed, 0, "nonexistent typed ksym");
cleanup:
test_ksyms_weak__destroy(skel);
}
static void test_weak_syms_lskel(void)
{
struct test_ksyms_weak_lskel *skel;
struct test_ksyms_weak_lskel__data *data;
int err;
skel = test_ksyms_weak_lskel__open_and_load();
if (!ASSERT_OK_PTR(skel, "test_ksyms_weak_lskel__open_and_load"))
return;
err = test_ksyms_weak_lskel__attach(skel);
if (!ASSERT_OK(err, "test_ksyms_weak_lskel__attach"))
goto cleanup;
/* trigger tracepoint */
usleep(1);
data = skel->data;
ASSERT_EQ(data->out__existing_typed, 0, "existing typed ksym");
ASSERT_NEQ(data->out__existing_typeless, -1, "existing typeless ksym");
ASSERT_EQ(data->out__non_existent_typeless, 0, "nonexistent typeless ksym");
ASSERT_EQ(data->out__non_existent_typed, 0, "nonexistent typed ksym");
cleanup:
test_ksyms_weak_lskel__destroy(skel);
}
static void test_write_check(bool test_handler1)
{
struct test_ksyms_btf_write_check *skel;
skel = test_ksyms_btf_write_check__open();
if (!ASSERT_OK_PTR(skel, "test_ksyms_btf_write_check__open"))
return;
bpf_program__set_autoload(test_handler1 ? skel->progs.handler2 : skel->progs.handler1, false);
ASSERT_ERR(test_ksyms_btf_write_check__load(skel),
"unexpected load of a prog writing to ksym memory\n");
test_ksyms_btf_write_check__destroy(skel);
}
void test_ksyms_btf(void)
{
int percpu_datasec;
struct btf *btf;
btf = libbpf_find_kernel_btf();
if (!ASSERT_OK_PTR(btf, "btf_exists"))
return;
percpu_datasec = btf__find_by_name_kind(btf, ".data..percpu",
BTF_KIND_DATASEC);
btf__free(btf);
if (percpu_datasec < 0) {
printf("%s:SKIP:no PERCPU DATASEC in kernel btf\n",
__func__);
test__skip();
return;
}
if (test__start_subtest("basic"))
test_basic();
if (test__start_subtest("null_check"))
test_null_check();
if (test__start_subtest("weak_ksyms"))
test_weak_syms();
if (test__start_subtest("weak_ksyms_lskel"))
test_weak_syms_lskel();
if (test__start_subtest("write_check1"))
test_write_check(true);
if (test__start_subtest("write_check2"))
test_write_check(false);
}
| linux-master | tools/testing/selftests/bpf/prog_tests/ksyms_btf.c |
// SPDX-License-Identifier: GPL-2.0
#include <test_progs.h>
#include <linux/nbd.h>
void test_raw_tp_writable_reject_nbd_invalid(void)
{
__u32 duration = 0;
char error[4096];
int bpf_fd = -1, tp_fd = -1;
const struct bpf_insn program[] = {
/* r6 is our tp buffer */
BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
/* one byte beyond the end of the nbd_request struct */
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_6,
sizeof(struct nbd_request)),
BPF_EXIT_INSN(),
};
LIBBPF_OPTS(bpf_prog_load_opts, opts,
.log_level = 2,
.log_buf = error,
.log_size = sizeof(error),
);
bpf_fd = bpf_prog_load(BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE, NULL, "GPL v2",
program, sizeof(program) / sizeof(struct bpf_insn),
&opts);
if (CHECK(bpf_fd < 0, "bpf_raw_tracepoint_writable load",
"failed: %d errno %d\n", bpf_fd, errno))
return;
tp_fd = bpf_raw_tracepoint_open("nbd_send_request", bpf_fd);
if (CHECK(tp_fd >= 0, "bpf_raw_tracepoint_writable open",
"erroneously succeeded\n"))
goto out_bpffd;
close(tp_fd);
out_bpffd:
close(bpf_fd);
}
| linux-master | tools/testing/selftests/bpf/prog_tests/raw_tp_writable_reject_nbd_invalid.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
#include <ctype.h>
#include <test_progs.h>
#include <bpf/btf.h>
/*
* Utility function uppercasing an entire string.
*/
static void uppercase(char *s)
{
for (; *s != '\0'; s++)
*s = toupper(*s);
}
/*
* Test case to check that all bpf_attach_type variants are covered by
* libbpf_bpf_attach_type_str.
*/
static void test_libbpf_bpf_attach_type_str(void)
{
struct btf *btf;
const struct btf_type *t;
const struct btf_enum *e;
int i, n, id;
btf = btf__parse("/sys/kernel/btf/vmlinux", NULL);
if (!ASSERT_OK_PTR(btf, "btf_parse"))
return;
/* find enum bpf_attach_type and enumerate each value */
id = btf__find_by_name_kind(btf, "bpf_attach_type", BTF_KIND_ENUM);
if (!ASSERT_GT(id, 0, "bpf_attach_type_id"))
goto cleanup;
t = btf__type_by_id(btf, id);
e = btf_enum(t);
n = btf_vlen(t);
for (i = 0; i < n; e++, i++) {
enum bpf_attach_type attach_type = (enum bpf_attach_type)e->val;
const char *attach_type_name;
const char *attach_type_str;
char buf[256];
if (attach_type == __MAX_BPF_ATTACH_TYPE)
continue;
attach_type_name = btf__str_by_offset(btf, e->name_off);
attach_type_str = libbpf_bpf_attach_type_str(attach_type);
ASSERT_OK_PTR(attach_type_str, attach_type_name);
snprintf(buf, sizeof(buf), "BPF_%s", attach_type_str);
uppercase(buf);
ASSERT_STREQ(buf, attach_type_name, "exp_str_value");
}
cleanup:
btf__free(btf);
}
/*
* Test case to check that all bpf_link_type variants are covered by
* libbpf_bpf_link_type_str.
*/
static void test_libbpf_bpf_link_type_str(void)
{
struct btf *btf;
const struct btf_type *t;
const struct btf_enum *e;
int i, n, id;
btf = btf__parse("/sys/kernel/btf/vmlinux", NULL);
if (!ASSERT_OK_PTR(btf, "btf_parse"))
return;
/* find enum bpf_link_type and enumerate each value */
id = btf__find_by_name_kind(btf, "bpf_link_type", BTF_KIND_ENUM);
if (!ASSERT_GT(id, 0, "bpf_link_type_id"))
goto cleanup;
t = btf__type_by_id(btf, id);
e = btf_enum(t);
n = btf_vlen(t);
for (i = 0; i < n; e++, i++) {
enum bpf_link_type link_type = (enum bpf_link_type)e->val;
const char *link_type_name;
const char *link_type_str;
char buf[256];
if (link_type == MAX_BPF_LINK_TYPE)
continue;
link_type_name = btf__str_by_offset(btf, e->name_off);
link_type_str = libbpf_bpf_link_type_str(link_type);
ASSERT_OK_PTR(link_type_str, link_type_name);
snprintf(buf, sizeof(buf), "BPF_LINK_TYPE_%s", link_type_str);
uppercase(buf);
ASSERT_STREQ(buf, link_type_name, "exp_str_value");
}
cleanup:
btf__free(btf);
}
/*
* Test case to check that all bpf_map_type variants are covered by
* libbpf_bpf_map_type_str.
*/
static void test_libbpf_bpf_map_type_str(void)
{
struct btf *btf;
const struct btf_type *t;
const struct btf_enum *e;
int i, n, id;
btf = btf__parse("/sys/kernel/btf/vmlinux", NULL);
if (!ASSERT_OK_PTR(btf, "btf_parse"))
return;
/* find enum bpf_map_type and enumerate each value */
id = btf__find_by_name_kind(btf, "bpf_map_type", BTF_KIND_ENUM);
if (!ASSERT_GT(id, 0, "bpf_map_type_id"))
goto cleanup;
t = btf__type_by_id(btf, id);
e = btf_enum(t);
n = btf_vlen(t);
for (i = 0; i < n; e++, i++) {
enum bpf_map_type map_type = (enum bpf_map_type)e->val;
const char *map_type_name;
const char *map_type_str;
char buf[256];
map_type_name = btf__str_by_offset(btf, e->name_off);
map_type_str = libbpf_bpf_map_type_str(map_type);
ASSERT_OK_PTR(map_type_str, map_type_name);
snprintf(buf, sizeof(buf), "BPF_MAP_TYPE_%s", map_type_str);
uppercase(buf);
/* Special case for map_type_name BPF_MAP_TYPE_CGROUP_STORAGE_DEPRECATED
* where it and BPF_MAP_TYPE_CGROUP_STORAGE have the same enum value
* (map_type). For this enum value, libbpf_bpf_map_type_str() picks
* BPF_MAP_TYPE_CGROUP_STORAGE.
*/
if (strcmp(map_type_name, "BPF_MAP_TYPE_CGROUP_STORAGE_DEPRECATED") == 0)
continue;
ASSERT_STREQ(buf, map_type_name, "exp_str_value");
}
cleanup:
btf__free(btf);
}
/*
* Test case to check that all bpf_prog_type variants are covered by
* libbpf_bpf_prog_type_str.
*/
static void test_libbpf_bpf_prog_type_str(void)
{
struct btf *btf;
const struct btf_type *t;
const struct btf_enum *e;
int i, n, id;
btf = btf__parse("/sys/kernel/btf/vmlinux", NULL);
if (!ASSERT_OK_PTR(btf, "btf_parse"))
return;
/* find enum bpf_prog_type and enumerate each value */
id = btf__find_by_name_kind(btf, "bpf_prog_type", BTF_KIND_ENUM);
if (!ASSERT_GT(id, 0, "bpf_prog_type_id"))
goto cleanup;
t = btf__type_by_id(btf, id);
e = btf_enum(t);
n = btf_vlen(t);
for (i = 0; i < n; e++, i++) {
enum bpf_prog_type prog_type = (enum bpf_prog_type)e->val;
const char *prog_type_name;
const char *prog_type_str;
char buf[256];
prog_type_name = btf__str_by_offset(btf, e->name_off);
prog_type_str = libbpf_bpf_prog_type_str(prog_type);
ASSERT_OK_PTR(prog_type_str, prog_type_name);
snprintf(buf, sizeof(buf), "BPF_PROG_TYPE_%s", prog_type_str);
uppercase(buf);
ASSERT_STREQ(buf, prog_type_name, "exp_str_value");
}
cleanup:
btf__free(btf);
}
/*
* Run all libbpf str conversion tests.
*/
void test_libbpf_str(void)
{
if (test__start_subtest("bpf_attach_type_str"))
test_libbpf_bpf_attach_type_str();
if (test__start_subtest("bpf_link_type_str"))
test_libbpf_bpf_link_type_str();
if (test__start_subtest("bpf_map_type_str"))
test_libbpf_bpf_map_type_str();
if (test__start_subtest("bpf_prog_type_str"))
test_libbpf_bpf_prog_type_str();
}
| linux-master | tools/testing/selftests/bpf/prog_tests/libbpf_str.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Check if we can migrate child sockets.
*
* 1. call listen() for 4 server sockets.
* 2. call connect() for 25 client sockets.
* 3. call listen() for 1 server socket. (migration target)
* 4. update a map to migrate all child sockets
* to the last server socket (migrate_map[cookie] = 4)
* 5. call shutdown() for first 4 server sockets
* and migrate the requests in the accept queue
* to the last server socket.
* 6. call listen() for the second server socket.
* 7. call shutdown() for the last server
* and migrate the requests in the accept queue
* to the second server socket.
* 8. call listen() for the last server.
* 9. call shutdown() for the second server
* and migrate the requests in the accept queue
* to the last server socket.
* 10. call accept() for the last server socket.
*
* Author: Kuniyuki Iwashima <[email protected]>
*/
#include <bpf/bpf.h>
#include <bpf/libbpf.h>
#include "test_progs.h"
#include "test_migrate_reuseport.skel.h"
#include "network_helpers.h"
#ifndef TCP_FASTOPEN_CONNECT
#define TCP_FASTOPEN_CONNECT 30
#endif
#define IFINDEX_LO 1
#define NR_SERVERS 5
#define NR_CLIENTS (NR_SERVERS * 5)
#define MIGRATED_TO (NR_SERVERS - 1)
/* fastopenq->max_qlen and sk->sk_max_ack_backlog */
#define QLEN (NR_CLIENTS * 5)
#define MSG "Hello World\0"
#define MSGLEN 12
static struct migrate_reuseport_test_case {
const char *name;
__s64 servers[NR_SERVERS];
__s64 clients[NR_CLIENTS];
struct sockaddr_storage addr;
socklen_t addrlen;
int family;
int state;
bool drop_ack;
bool expire_synack_timer;
bool fastopen;
struct bpf_link *link;
} test_cases[] = {
{
.name = "IPv4 TCP_ESTABLISHED inet_csk_listen_stop",
.family = AF_INET,
.state = BPF_TCP_ESTABLISHED,
.drop_ack = false,
.expire_synack_timer = false,
.fastopen = false,
},
{
.name = "IPv4 TCP_SYN_RECV inet_csk_listen_stop",
.family = AF_INET,
.state = BPF_TCP_SYN_RECV,
.drop_ack = true,
.expire_synack_timer = false,
.fastopen = true,
},
{
.name = "IPv4 TCP_NEW_SYN_RECV reqsk_timer_handler",
.family = AF_INET,
.state = BPF_TCP_NEW_SYN_RECV,
.drop_ack = true,
.expire_synack_timer = true,
.fastopen = false,
},
{
.name = "IPv4 TCP_NEW_SYN_RECV inet_csk_complete_hashdance",
.family = AF_INET,
.state = BPF_TCP_NEW_SYN_RECV,
.drop_ack = true,
.expire_synack_timer = false,
.fastopen = false,
},
{
.name = "IPv6 TCP_ESTABLISHED inet_csk_listen_stop",
.family = AF_INET6,
.state = BPF_TCP_ESTABLISHED,
.drop_ack = false,
.expire_synack_timer = false,
.fastopen = false,
},
{
.name = "IPv6 TCP_SYN_RECV inet_csk_listen_stop",
.family = AF_INET6,
.state = BPF_TCP_SYN_RECV,
.drop_ack = true,
.expire_synack_timer = false,
.fastopen = true,
},
{
.name = "IPv6 TCP_NEW_SYN_RECV reqsk_timer_handler",
.family = AF_INET6,
.state = BPF_TCP_NEW_SYN_RECV,
.drop_ack = true,
.expire_synack_timer = true,
.fastopen = false,
},
{
.name = "IPv6 TCP_NEW_SYN_RECV inet_csk_complete_hashdance",
.family = AF_INET6,
.state = BPF_TCP_NEW_SYN_RECV,
.drop_ack = true,
.expire_synack_timer = false,
.fastopen = false,
}
};
static void init_fds(__s64 fds[], int len)
{
int i;
for (i = 0; i < len; i++)
fds[i] = -1;
}
static void close_fds(__s64 fds[], int len)
{
int i;
for (i = 0; i < len; i++) {
if (fds[i] != -1) {
close(fds[i]);
fds[i] = -1;
}
}
}
static int setup_fastopen(char *buf, int size, int *saved_len, bool restore)
{
int err = 0, fd, len;
fd = open("/proc/sys/net/ipv4/tcp_fastopen", O_RDWR);
if (!ASSERT_NEQ(fd, -1, "open"))
return -1;
if (restore) {
len = write(fd, buf, *saved_len);
if (!ASSERT_EQ(len, *saved_len, "write - restore"))
err = -1;
} else {
*saved_len = read(fd, buf, size);
if (!ASSERT_GE(*saved_len, 1, "read")) {
err = -1;
goto close;
}
err = lseek(fd, 0, SEEK_SET);
if (!ASSERT_OK(err, "lseek"))
goto close;
/* (TFO_CLIENT_ENABLE | TFO_SERVER_ENABLE |
* TFO_CLIENT_NO_COOKIE | TFO_SERVER_COOKIE_NOT_REQD)
*/
len = write(fd, "519", 3);
if (!ASSERT_EQ(len, 3, "write - setup"))
err = -1;
}
close:
close(fd);
return err;
}
static int drop_ack(struct migrate_reuseport_test_case *test_case,
struct test_migrate_reuseport *skel)
{
if (test_case->family == AF_INET)
skel->bss->server_port = ((struct sockaddr_in *)
&test_case->addr)->sin_port;
else
skel->bss->server_port = ((struct sockaddr_in6 *)
&test_case->addr)->sin6_port;
test_case->link = bpf_program__attach_xdp(skel->progs.drop_ack,
IFINDEX_LO);
if (!ASSERT_OK_PTR(test_case->link, "bpf_program__attach_xdp"))
return -1;
return 0;
}
static int pass_ack(struct migrate_reuseport_test_case *test_case)
{
int err;
err = bpf_link__destroy(test_case->link);
if (!ASSERT_OK(err, "bpf_link__destroy"))
return -1;
test_case->link = NULL;
return 0;
}
static int start_servers(struct migrate_reuseport_test_case *test_case,
struct test_migrate_reuseport *skel)
{
int i, err, prog_fd, reuseport = 1, qlen = QLEN;
prog_fd = bpf_program__fd(skel->progs.migrate_reuseport);
make_sockaddr(test_case->family,
test_case->family == AF_INET ? "127.0.0.1" : "::1", 0,
&test_case->addr, &test_case->addrlen);
for (i = 0; i < NR_SERVERS; i++) {
test_case->servers[i] = socket(test_case->family, SOCK_STREAM,
IPPROTO_TCP);
if (!ASSERT_NEQ(test_case->servers[i], -1, "socket"))
return -1;
err = setsockopt(test_case->servers[i], SOL_SOCKET,
SO_REUSEPORT, &reuseport, sizeof(reuseport));
if (!ASSERT_OK(err, "setsockopt - SO_REUSEPORT"))
return -1;
err = bind(test_case->servers[i],
(struct sockaddr *)&test_case->addr,
test_case->addrlen);
if (!ASSERT_OK(err, "bind"))
return -1;
if (i == 0) {
err = setsockopt(test_case->servers[i], SOL_SOCKET,
SO_ATTACH_REUSEPORT_EBPF,
&prog_fd, sizeof(prog_fd));
if (!ASSERT_OK(err,
"setsockopt - SO_ATTACH_REUSEPORT_EBPF"))
return -1;
err = getsockname(test_case->servers[i],
(struct sockaddr *)&test_case->addr,
&test_case->addrlen);
if (!ASSERT_OK(err, "getsockname"))
return -1;
}
if (test_case->fastopen) {
err = setsockopt(test_case->servers[i],
SOL_TCP, TCP_FASTOPEN,
&qlen, sizeof(qlen));
if (!ASSERT_OK(err, "setsockopt - TCP_FASTOPEN"))
return -1;
}
/* All requests will be tied to the first four listeners */
if (i != MIGRATED_TO) {
err = listen(test_case->servers[i], qlen);
if (!ASSERT_OK(err, "listen"))
return -1;
}
}
return 0;
}
static int start_clients(struct migrate_reuseport_test_case *test_case)
{
char buf[MSGLEN] = MSG;
int i, err;
for (i = 0; i < NR_CLIENTS; i++) {
test_case->clients[i] = socket(test_case->family, SOCK_STREAM,
IPPROTO_TCP);
if (!ASSERT_NEQ(test_case->clients[i], -1, "socket"))
return -1;
/* The attached XDP program drops only the final ACK, so
* clients will transition to TCP_ESTABLISHED immediately.
*/
err = settimeo(test_case->clients[i], 100);
if (!ASSERT_OK(err, "settimeo"))
return -1;
if (test_case->fastopen) {
int fastopen = 1;
err = setsockopt(test_case->clients[i], IPPROTO_TCP,
TCP_FASTOPEN_CONNECT, &fastopen,
sizeof(fastopen));
if (!ASSERT_OK(err,
"setsockopt - TCP_FASTOPEN_CONNECT"))
return -1;
}
err = connect(test_case->clients[i],
(struct sockaddr *)&test_case->addr,
test_case->addrlen);
if (!ASSERT_OK(err, "connect"))
return -1;
err = write(test_case->clients[i], buf, MSGLEN);
if (!ASSERT_EQ(err, MSGLEN, "write"))
return -1;
}
return 0;
}
static int update_maps(struct migrate_reuseport_test_case *test_case,
struct test_migrate_reuseport *skel)
{
int i, err, migrated_to = MIGRATED_TO;
int reuseport_map_fd, migrate_map_fd;
__u64 value;
reuseport_map_fd = bpf_map__fd(skel->maps.reuseport_map);
migrate_map_fd = bpf_map__fd(skel->maps.migrate_map);
for (i = 0; i < NR_SERVERS; i++) {
value = (__u64)test_case->servers[i];
err = bpf_map_update_elem(reuseport_map_fd, &i, &value,
BPF_NOEXIST);
if (!ASSERT_OK(err, "bpf_map_update_elem - reuseport_map"))
return -1;
err = bpf_map_lookup_elem(reuseport_map_fd, &i, &value);
if (!ASSERT_OK(err, "bpf_map_lookup_elem - reuseport_map"))
return -1;
err = bpf_map_update_elem(migrate_map_fd, &value, &migrated_to,
BPF_NOEXIST);
if (!ASSERT_OK(err, "bpf_map_update_elem - migrate_map"))
return -1;
}
return 0;
}
static int migrate_dance(struct migrate_reuseport_test_case *test_case)
{
int i, err;
/* Migrate TCP_ESTABLISHED and TCP_SYN_RECV requests
* to the last listener based on eBPF.
*/
for (i = 0; i < MIGRATED_TO; i++) {
err = shutdown(test_case->servers[i], SHUT_RDWR);
if (!ASSERT_OK(err, "shutdown"))
return -1;
}
/* No dance for TCP_NEW_SYN_RECV to migrate based on eBPF */
if (test_case->state == BPF_TCP_NEW_SYN_RECV)
return 0;
/* Note that we use the second listener instead of the
* first one here.
*
* The fist listener is bind()ed with port 0 and,
* SOCK_BINDPORT_LOCK is not set to sk_userlocks, so
* calling listen() again will bind() the first listener
* on a new ephemeral port and detach it from the existing
* reuseport group. (See: __inet_bind(), tcp_set_state())
*
* OTOH, the second one is bind()ed with a specific port,
* and SOCK_BINDPORT_LOCK is set. Thus, re-listen() will
* resurrect the listener on the existing reuseport group.
*/
err = listen(test_case->servers[1], QLEN);
if (!ASSERT_OK(err, "listen"))
return -1;
/* Migrate from the last listener to the second one.
*
* All listeners were detached out of the reuseport_map,
* so migration will be done by kernel random pick from here.
*/
err = shutdown(test_case->servers[MIGRATED_TO], SHUT_RDWR);
if (!ASSERT_OK(err, "shutdown"))
return -1;
/* Back to the existing reuseport group */
err = listen(test_case->servers[MIGRATED_TO], QLEN);
if (!ASSERT_OK(err, "listen"))
return -1;
/* Migrate back to the last one from the second one */
err = shutdown(test_case->servers[1], SHUT_RDWR);
if (!ASSERT_OK(err, "shutdown"))
return -1;
return 0;
}
static void count_requests(struct migrate_reuseport_test_case *test_case,
struct test_migrate_reuseport *skel)
{
struct sockaddr_storage addr;
socklen_t len = sizeof(addr);
int err, cnt = 0, client;
char buf[MSGLEN];
err = settimeo(test_case->servers[MIGRATED_TO], 4000);
if (!ASSERT_OK(err, "settimeo"))
goto out;
for (; cnt < NR_CLIENTS; cnt++) {
client = accept(test_case->servers[MIGRATED_TO],
(struct sockaddr *)&addr, &len);
if (!ASSERT_NEQ(client, -1, "accept"))
goto out;
memset(buf, 0, MSGLEN);
read(client, &buf, MSGLEN);
close(client);
if (!ASSERT_STREQ(buf, MSG, "read"))
goto out;
}
out:
ASSERT_EQ(cnt, NR_CLIENTS, "count in userspace");
switch (test_case->state) {
case BPF_TCP_ESTABLISHED:
cnt = skel->bss->migrated_at_close;
break;
case BPF_TCP_SYN_RECV:
cnt = skel->bss->migrated_at_close_fastopen;
break;
case BPF_TCP_NEW_SYN_RECV:
if (test_case->expire_synack_timer)
cnt = skel->bss->migrated_at_send_synack;
else
cnt = skel->bss->migrated_at_recv_ack;
break;
default:
cnt = 0;
}
ASSERT_EQ(cnt, NR_CLIENTS, "count in BPF prog");
}
static void run_test(struct migrate_reuseport_test_case *test_case,
struct test_migrate_reuseport *skel)
{
int err, saved_len;
char buf[16];
skel->bss->migrated_at_close = 0;
skel->bss->migrated_at_close_fastopen = 0;
skel->bss->migrated_at_send_synack = 0;
skel->bss->migrated_at_recv_ack = 0;
init_fds(test_case->servers, NR_SERVERS);
init_fds(test_case->clients, NR_CLIENTS);
if (test_case->fastopen) {
memset(buf, 0, sizeof(buf));
err = setup_fastopen(buf, sizeof(buf), &saved_len, false);
if (!ASSERT_OK(err, "setup_fastopen - setup"))
return;
}
err = start_servers(test_case, skel);
if (!ASSERT_OK(err, "start_servers"))
goto close_servers;
if (test_case->drop_ack) {
/* Drop the final ACK of the 3-way handshake and stick the
* in-flight requests on TCP_SYN_RECV or TCP_NEW_SYN_RECV.
*/
err = drop_ack(test_case, skel);
if (!ASSERT_OK(err, "drop_ack"))
goto close_servers;
}
/* Tie requests to the first four listeners */
err = start_clients(test_case);
if (!ASSERT_OK(err, "start_clients"))
goto close_clients;
err = listen(test_case->servers[MIGRATED_TO], QLEN);
if (!ASSERT_OK(err, "listen"))
goto close_clients;
err = update_maps(test_case, skel);
if (!ASSERT_OK(err, "fill_maps"))
goto close_clients;
/* Migrate the requests in the accept queue only.
* TCP_NEW_SYN_RECV requests are not migrated at this point.
*/
err = migrate_dance(test_case);
if (!ASSERT_OK(err, "migrate_dance"))
goto close_clients;
if (test_case->expire_synack_timer) {
/* Wait for SYN+ACK timers to expire so that
* reqsk_timer_handler() migrates TCP_NEW_SYN_RECV requests.
*/
sleep(1);
}
if (test_case->link) {
/* Resume 3WHS and migrate TCP_NEW_SYN_RECV requests */
err = pass_ack(test_case);
if (!ASSERT_OK(err, "pass_ack"))
goto close_clients;
}
count_requests(test_case, skel);
close_clients:
close_fds(test_case->clients, NR_CLIENTS);
if (test_case->link) {
err = pass_ack(test_case);
ASSERT_OK(err, "pass_ack - clean up");
}
close_servers:
close_fds(test_case->servers, NR_SERVERS);
if (test_case->fastopen) {
err = setup_fastopen(buf, sizeof(buf), &saved_len, true);
ASSERT_OK(err, "setup_fastopen - restore");
}
}
void serial_test_migrate_reuseport(void)
{
struct test_migrate_reuseport *skel;
int i;
skel = test_migrate_reuseport__open_and_load();
if (!ASSERT_OK_PTR(skel, "open_and_load"))
return;
for (i = 0; i < ARRAY_SIZE(test_cases); i++) {
test__start_subtest(test_cases[i].name);
run_test(&test_cases[i], skel);
}
test_migrate_reuseport__destroy(skel);
}
| linux-master | tools/testing/selftests/bpf/prog_tests/migrate_reuseport.c |
// SPDX-License-Identifier: GPL-2.0
#include <test_progs.h>
#include <network_helpers.h>
#include <net/if.h>
#include "empty_skb.skel.h"
void test_empty_skb(void)
{
LIBBPF_OPTS(bpf_test_run_opts, tattr);
struct empty_skb *bpf_obj = NULL;
struct nstoken *tok = NULL;
struct bpf_program *prog;
char eth_hlen_pp[15];
char eth_hlen[14];
int veth_ifindex;
int ipip_ifindex;
int err;
int i;
struct {
const char *msg;
const void *data_in;
__u32 data_size_in;
int *ifindex;
int err;
int ret;
int lwt_egress_ret; /* expected retval at lwt/egress */
bool success_on_tc;
} tests[] = {
/* Empty packets are always rejected. */
{
/* BPF_PROG_RUN ETH_HLEN size check */
.msg = "veth empty ingress packet",
.data_in = NULL,
.data_size_in = 0,
.ifindex = &veth_ifindex,
.err = -EINVAL,
},
{
/* BPF_PROG_RUN ETH_HLEN size check */
.msg = "ipip empty ingress packet",
.data_in = NULL,
.data_size_in = 0,
.ifindex = &ipip_ifindex,
.err = -EINVAL,
},
/* ETH_HLEN-sized packets:
* - can not be redirected at LWT_XMIT
* - can be redirected at TC to non-tunneling dest
*/
{
/* __bpf_redirect_common */
.msg = "veth ETH_HLEN packet ingress",
.data_in = eth_hlen,
.data_size_in = sizeof(eth_hlen),
.ifindex = &veth_ifindex,
.ret = -ERANGE,
.lwt_egress_ret = -ERANGE,
.success_on_tc = true,
},
{
/* __bpf_redirect_no_mac
*
* lwt: skb->len=0 <= skb_network_offset=0
* tc: skb->len=14 <= skb_network_offset=14
*/
.msg = "ipip ETH_HLEN packet ingress",
.data_in = eth_hlen,
.data_size_in = sizeof(eth_hlen),
.ifindex = &ipip_ifindex,
.ret = -ERANGE,
.lwt_egress_ret = -ERANGE,
},
/* ETH_HLEN+1-sized packet should be redirected. */
{
.msg = "veth ETH_HLEN+1 packet ingress",
.data_in = eth_hlen_pp,
.data_size_in = sizeof(eth_hlen_pp),
.ifindex = &veth_ifindex,
.lwt_egress_ret = 1, /* veth_xmit NET_XMIT_DROP */
},
{
.msg = "ipip ETH_HLEN+1 packet ingress",
.data_in = eth_hlen_pp,
.data_size_in = sizeof(eth_hlen_pp),
.ifindex = &ipip_ifindex,
},
};
SYS(out, "ip netns add empty_skb");
tok = open_netns("empty_skb");
SYS(out, "ip link add veth0 type veth peer veth1");
SYS(out, "ip link set dev veth0 up");
SYS(out, "ip link set dev veth1 up");
SYS(out, "ip addr add 10.0.0.1/8 dev veth0");
SYS(out, "ip addr add 10.0.0.2/8 dev veth1");
veth_ifindex = if_nametoindex("veth0");
SYS(out, "ip link add ipip0 type ipip local 10.0.0.1 remote 10.0.0.2");
SYS(out, "ip link set ipip0 up");
SYS(out, "ip addr add 192.168.1.1/16 dev ipip0");
ipip_ifindex = if_nametoindex("ipip0");
bpf_obj = empty_skb__open_and_load();
if (!ASSERT_OK_PTR(bpf_obj, "open skeleton"))
goto out;
for (i = 0; i < ARRAY_SIZE(tests); i++) {
bpf_object__for_each_program(prog, bpf_obj->obj) {
bool at_egress = strstr(bpf_program__name(prog), "egress") != NULL;
bool at_tc = !strncmp(bpf_program__section_name(prog), "tc", 2);
int expected_ret;
char buf[128];
expected_ret = at_egress && !at_tc ? tests[i].lwt_egress_ret : tests[i].ret;
tattr.data_in = tests[i].data_in;
tattr.data_size_in = tests[i].data_size_in;
tattr.data_size_out = 0;
bpf_obj->bss->ifindex = *tests[i].ifindex;
bpf_obj->bss->ret = 0;
err = bpf_prog_test_run_opts(bpf_program__fd(prog), &tattr);
sprintf(buf, "err: %s [%s]", tests[i].msg, bpf_program__name(prog));
if (at_tc && tests[i].success_on_tc)
ASSERT_GE(err, 0, buf);
else
ASSERT_EQ(err, tests[i].err, buf);
sprintf(buf, "ret: %s [%s]", tests[i].msg, bpf_program__name(prog));
if (at_tc && tests[i].success_on_tc)
ASSERT_GE(bpf_obj->bss->ret, 0, buf);
else
ASSERT_EQ(bpf_obj->bss->ret, expected_ret, buf);
}
}
out:
if (bpf_obj)
empty_skb__destroy(bpf_obj);
if (tok)
close_netns(tok);
SYS_NOFAIL("ip netns del empty_skb");
}
| linux-master | tools/testing/selftests/bpf/prog_tests/empty_skb.c |
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
// Copyright (c) 2020 Cloudflare
/*
* Test BPF attach point for INET socket lookup (BPF_SK_LOOKUP).
*
* Tests exercise:
* - attaching/detaching/querying programs to BPF_SK_LOOKUP hook,
* - redirecting socket lookup to a socket selected by BPF program,
* - failing a socket lookup on BPF program's request,
* - error scenarios for selecting a socket from BPF program,
* - accessing BPF program context,
* - attaching and running multiple BPF programs.
*
* Tests run in a dedicated network namespace.
*/
#define _GNU_SOURCE
#include <arpa/inet.h>
#include <assert.h>
#include <errno.h>
#include <error.h>
#include <fcntl.h>
#include <sched.h>
#include <stdio.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <unistd.h>
#include <bpf/libbpf.h>
#include <bpf/bpf.h>
#include "test_progs.h"
#include "bpf_util.h"
#include "cgroup_helpers.h"
#include "network_helpers.h"
#include "testing_helpers.h"
#include "test_sk_lookup.skel.h"
/* External (address, port) pairs the client sends packets to. */
#define EXT_IP4 "127.0.0.1"
#define EXT_IP6 "fd00::1"
#define EXT_PORT 7007
/* Internal (address, port) pairs the server listens/receives at. */
#define INT_IP4 "127.0.0.2"
#define INT_IP4_V6 "::ffff:127.0.0.2"
#define INT_IP6 "fd00::2"
#define INT_PORT 8008
#define IO_TIMEOUT_SEC 3
enum server {
SERVER_A = 0,
SERVER_B = 1,
MAX_SERVERS,
};
enum {
PROG1 = 0,
PROG2,
};
struct inet_addr {
const char *ip;
unsigned short port;
};
struct test {
const char *desc;
struct bpf_program *lookup_prog;
struct bpf_program *reuseport_prog;
struct bpf_map *sock_map;
int sotype;
struct inet_addr connect_to;
struct inet_addr listen_at;
enum server accept_on;
bool reuseport_has_conns; /* Add a connected socket to reuseport group */
};
static __u32 duration; /* for CHECK macro */
static bool is_ipv6(const char *ip)
{
return !!strchr(ip, ':');
}
static int attach_reuseport(int sock_fd, struct bpf_program *reuseport_prog)
{
int err, prog_fd;
prog_fd = bpf_program__fd(reuseport_prog);
if (prog_fd < 0) {
errno = -prog_fd;
return -1;
}
err = setsockopt(sock_fd, SOL_SOCKET, SO_ATTACH_REUSEPORT_EBPF,
&prog_fd, sizeof(prog_fd));
if (err)
return -1;
return 0;
}
static socklen_t inetaddr_len(const struct sockaddr_storage *addr)
{
return (addr->ss_family == AF_INET ? sizeof(struct sockaddr_in) :
addr->ss_family == AF_INET6 ? sizeof(struct sockaddr_in6) : 0);
}
static int make_socket(int sotype, const char *ip, int port,
struct sockaddr_storage *addr)
{
struct timeval timeo = { .tv_sec = IO_TIMEOUT_SEC };
int err, family, fd;
family = is_ipv6(ip) ? AF_INET6 : AF_INET;
err = make_sockaddr(family, ip, port, addr, NULL);
if (CHECK(err, "make_address", "failed\n"))
return -1;
fd = socket(addr->ss_family, sotype, 0);
if (CHECK(fd < 0, "socket", "failed\n")) {
log_err("failed to make socket");
return -1;
}
err = setsockopt(fd, SOL_SOCKET, SO_SNDTIMEO, &timeo, sizeof(timeo));
if (CHECK(err, "setsockopt(SO_SNDTIMEO)", "failed\n")) {
log_err("failed to set SNDTIMEO");
close(fd);
return -1;
}
err = setsockopt(fd, SOL_SOCKET, SO_RCVTIMEO, &timeo, sizeof(timeo));
if (CHECK(err, "setsockopt(SO_RCVTIMEO)", "failed\n")) {
log_err("failed to set RCVTIMEO");
close(fd);
return -1;
}
return fd;
}
static int make_server(int sotype, const char *ip, int port,
struct bpf_program *reuseport_prog)
{
struct sockaddr_storage addr = {0};
const int one = 1;
int err, fd = -1;
fd = make_socket(sotype, ip, port, &addr);
if (fd < 0)
return -1;
/* Enabled for UDPv6 sockets for IPv4-mapped IPv6 to work. */
if (sotype == SOCK_DGRAM) {
err = setsockopt(fd, SOL_IP, IP_RECVORIGDSTADDR, &one,
sizeof(one));
if (CHECK(err, "setsockopt(IP_RECVORIGDSTADDR)", "failed\n")) {
log_err("failed to enable IP_RECVORIGDSTADDR");
goto fail;
}
}
if (sotype == SOCK_DGRAM && addr.ss_family == AF_INET6) {
err = setsockopt(fd, SOL_IPV6, IPV6_RECVORIGDSTADDR, &one,
sizeof(one));
if (CHECK(err, "setsockopt(IPV6_RECVORIGDSTADDR)", "failed\n")) {
log_err("failed to enable IPV6_RECVORIGDSTADDR");
goto fail;
}
}
if (sotype == SOCK_STREAM) {
err = setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, &one,
sizeof(one));
if (CHECK(err, "setsockopt(SO_REUSEADDR)", "failed\n")) {
log_err("failed to enable SO_REUSEADDR");
goto fail;
}
}
if (reuseport_prog) {
err = setsockopt(fd, SOL_SOCKET, SO_REUSEPORT, &one,
sizeof(one));
if (CHECK(err, "setsockopt(SO_REUSEPORT)", "failed\n")) {
log_err("failed to enable SO_REUSEPORT");
goto fail;
}
}
err = bind(fd, (void *)&addr, inetaddr_len(&addr));
if (CHECK(err, "bind", "failed\n")) {
log_err("failed to bind listen socket");
goto fail;
}
if (sotype == SOCK_STREAM) {
err = listen(fd, SOMAXCONN);
if (CHECK(err, "make_server", "listen")) {
log_err("failed to listen on port %d", port);
goto fail;
}
}
/* Late attach reuseport prog so we can have one init path */
if (reuseport_prog) {
err = attach_reuseport(fd, reuseport_prog);
if (CHECK(err, "attach_reuseport", "failed\n")) {
log_err("failed to attach reuseport prog");
goto fail;
}
}
return fd;
fail:
close(fd);
return -1;
}
static int make_client(int sotype, const char *ip, int port)
{
struct sockaddr_storage addr = {0};
int err, fd;
fd = make_socket(sotype, ip, port, &addr);
if (fd < 0)
return -1;
err = connect(fd, (void *)&addr, inetaddr_len(&addr));
if (CHECK(err, "make_client", "connect")) {
log_err("failed to connect client socket");
goto fail;
}
return fd;
fail:
close(fd);
return -1;
}
static __u64 socket_cookie(int fd)
{
__u64 cookie;
socklen_t cookie_len = sizeof(cookie);
if (CHECK(getsockopt(fd, SOL_SOCKET, SO_COOKIE, &cookie, &cookie_len) < 0,
"getsockopt(SO_COOKIE)", "%s\n", strerror(errno)))
return 0;
return cookie;
}
static int fill_sk_lookup_ctx(struct bpf_sk_lookup *ctx, const char *local_ip, __u16 local_port,
const char *remote_ip, __u16 remote_port)
{
void *local, *remote;
int err;
memset(ctx, 0, sizeof(*ctx));
ctx->local_port = local_port;
ctx->remote_port = htons(remote_port);
if (is_ipv6(local_ip)) {
ctx->family = AF_INET6;
local = &ctx->local_ip6[0];
remote = &ctx->remote_ip6[0];
} else {
ctx->family = AF_INET;
local = &ctx->local_ip4;
remote = &ctx->remote_ip4;
}
err = inet_pton(ctx->family, local_ip, local);
if (CHECK(err != 1, "inet_pton", "local_ip failed\n"))
return 1;
err = inet_pton(ctx->family, remote_ip, remote);
if (CHECK(err != 1, "inet_pton", "remote_ip failed\n"))
return 1;
return 0;
}
static int send_byte(int fd)
{
ssize_t n;
errno = 0;
n = send(fd, "a", 1, 0);
if (CHECK(n <= 0, "send_byte", "send")) {
log_err("failed/partial send");
return -1;
}
return 0;
}
static int recv_byte(int fd)
{
char buf[1];
ssize_t n;
n = recv(fd, buf, sizeof(buf), 0);
if (CHECK(n <= 0, "recv_byte", "recv")) {
log_err("failed/partial recv");
return -1;
}
return 0;
}
static int tcp_recv_send(int server_fd)
{
char buf[1];
int ret, fd;
ssize_t n;
fd = accept(server_fd, NULL, NULL);
if (CHECK(fd < 0, "accept", "failed\n")) {
log_err("failed to accept");
return -1;
}
n = recv(fd, buf, sizeof(buf), 0);
if (CHECK(n <= 0, "recv", "failed\n")) {
log_err("failed/partial recv");
ret = -1;
goto close;
}
n = send(fd, buf, n, 0);
if (CHECK(n <= 0, "send", "failed\n")) {
log_err("failed/partial send");
ret = -1;
goto close;
}
ret = 0;
close:
close(fd);
return ret;
}
static void v4_to_v6(struct sockaddr_storage *ss)
{
struct sockaddr_in6 *v6 = (struct sockaddr_in6 *)ss;
struct sockaddr_in v4 = *(struct sockaddr_in *)ss;
v6->sin6_family = AF_INET6;
v6->sin6_port = v4.sin_port;
v6->sin6_addr.s6_addr[10] = 0xff;
v6->sin6_addr.s6_addr[11] = 0xff;
memcpy(&v6->sin6_addr.s6_addr[12], &v4.sin_addr.s_addr, 4);
memset(&v6->sin6_addr.s6_addr[0], 0, 10);
}
static int udp_recv_send(int server_fd)
{
char cmsg_buf[CMSG_SPACE(sizeof(struct sockaddr_storage))];
struct sockaddr_storage _src_addr = { 0 };
struct sockaddr_storage *src_addr = &_src_addr;
struct sockaddr_storage *dst_addr = NULL;
struct msghdr msg = { 0 };
struct iovec iov = { 0 };
struct cmsghdr *cm;
char buf[1];
int ret, fd;
ssize_t n;
iov.iov_base = buf;
iov.iov_len = sizeof(buf);
msg.msg_name = src_addr;
msg.msg_namelen = sizeof(*src_addr);
msg.msg_iov = &iov;
msg.msg_iovlen = 1;
msg.msg_control = cmsg_buf;
msg.msg_controllen = sizeof(cmsg_buf);
errno = 0;
n = recvmsg(server_fd, &msg, 0);
if (CHECK(n <= 0, "recvmsg", "failed\n")) {
log_err("failed to receive");
return -1;
}
if (CHECK(msg.msg_flags & MSG_CTRUNC, "recvmsg", "truncated cmsg\n"))
return -1;
for (cm = CMSG_FIRSTHDR(&msg); cm; cm = CMSG_NXTHDR(&msg, cm)) {
if ((cm->cmsg_level == SOL_IP &&
cm->cmsg_type == IP_ORIGDSTADDR) ||
(cm->cmsg_level == SOL_IPV6 &&
cm->cmsg_type == IPV6_ORIGDSTADDR)) {
dst_addr = (struct sockaddr_storage *)CMSG_DATA(cm);
break;
}
log_err("warning: ignored cmsg at level %d type %d",
cm->cmsg_level, cm->cmsg_type);
}
if (CHECK(!dst_addr, "recvmsg", "missing ORIGDSTADDR\n"))
return -1;
/* Server socket bound to IPv4-mapped IPv6 address */
if (src_addr->ss_family == AF_INET6 &&
dst_addr->ss_family == AF_INET) {
v4_to_v6(dst_addr);
}
/* Reply from original destination address. */
fd = socket(dst_addr->ss_family, SOCK_DGRAM, 0);
if (CHECK(fd < 0, "socket", "failed\n")) {
log_err("failed to create tx socket");
return -1;
}
ret = bind(fd, (struct sockaddr *)dst_addr, sizeof(*dst_addr));
if (CHECK(ret, "bind", "failed\n")) {
log_err("failed to bind tx socket");
goto out;
}
msg.msg_control = NULL;
msg.msg_controllen = 0;
n = sendmsg(fd, &msg, 0);
if (CHECK(n <= 0, "sendmsg", "failed\n")) {
log_err("failed to send echo reply");
ret = -1;
goto out;
}
ret = 0;
out:
close(fd);
return ret;
}
static int tcp_echo_test(int client_fd, int server_fd)
{
int err;
err = send_byte(client_fd);
if (err)
return -1;
err = tcp_recv_send(server_fd);
if (err)
return -1;
err = recv_byte(client_fd);
if (err)
return -1;
return 0;
}
static int udp_echo_test(int client_fd, int server_fd)
{
int err;
err = send_byte(client_fd);
if (err)
return -1;
err = udp_recv_send(server_fd);
if (err)
return -1;
err = recv_byte(client_fd);
if (err)
return -1;
return 0;
}
static struct bpf_link *attach_lookup_prog(struct bpf_program *prog)
{
struct bpf_link *link;
int net_fd;
net_fd = open("/proc/self/ns/net", O_RDONLY);
if (CHECK(net_fd < 0, "open", "failed\n")) {
log_err("failed to open /proc/self/ns/net");
return NULL;
}
link = bpf_program__attach_netns(prog, net_fd);
if (!ASSERT_OK_PTR(link, "bpf_program__attach_netns")) {
errno = -PTR_ERR(link);
log_err("failed to attach program '%s' to netns",
bpf_program__name(prog));
link = NULL;
}
close(net_fd);
return link;
}
static int update_lookup_map(struct bpf_map *map, int index, int sock_fd)
{
int err, map_fd;
uint64_t value;
map_fd = bpf_map__fd(map);
if (CHECK(map_fd < 0, "bpf_map__fd", "failed\n")) {
errno = -map_fd;
log_err("failed to get map FD");
return -1;
}
value = (uint64_t)sock_fd;
err = bpf_map_update_elem(map_fd, &index, &value, BPF_NOEXIST);
if (CHECK(err, "bpf_map_update_elem", "failed\n")) {
log_err("failed to update redir_map @ %d", index);
return -1;
}
return 0;
}
static void query_lookup_prog(struct test_sk_lookup *skel)
{
struct bpf_link *link[3] = {};
struct bpf_link_info info;
__u32 attach_flags = 0;
__u32 prog_ids[3] = {};
__u32 prog_cnt = 3;
__u32 prog_id;
int net_fd;
int err;
net_fd = open("/proc/self/ns/net", O_RDONLY);
if (CHECK(net_fd < 0, "open", "failed\n")) {
log_err("failed to open /proc/self/ns/net");
return;
}
link[0] = attach_lookup_prog(skel->progs.lookup_pass);
if (!link[0])
goto close;
link[1] = attach_lookup_prog(skel->progs.lookup_pass);
if (!link[1])
goto detach;
link[2] = attach_lookup_prog(skel->progs.lookup_drop);
if (!link[2])
goto detach;
err = bpf_prog_query(net_fd, BPF_SK_LOOKUP, 0 /* query flags */,
&attach_flags, prog_ids, &prog_cnt);
if (CHECK(err, "bpf_prog_query", "failed\n")) {
log_err("failed to query lookup prog");
goto detach;
}
errno = 0;
if (CHECK(attach_flags != 0, "bpf_prog_query",
"wrong attach_flags on query: %u", attach_flags))
goto detach;
if (CHECK(prog_cnt != 3, "bpf_prog_query",
"wrong program count on query: %u", prog_cnt))
goto detach;
prog_id = link_info_prog_id(link[0], &info);
CHECK(prog_ids[0] != prog_id, "bpf_prog_query",
"invalid program #0 id on query: %u != %u\n",
prog_ids[0], prog_id);
CHECK(info.netns.netns_ino == 0, "netns_ino",
"unexpected netns_ino: %u\n", info.netns.netns_ino);
prog_id = link_info_prog_id(link[1], &info);
CHECK(prog_ids[1] != prog_id, "bpf_prog_query",
"invalid program #1 id on query: %u != %u\n",
prog_ids[1], prog_id);
CHECK(info.netns.netns_ino == 0, "netns_ino",
"unexpected netns_ino: %u\n", info.netns.netns_ino);
prog_id = link_info_prog_id(link[2], &info);
CHECK(prog_ids[2] != prog_id, "bpf_prog_query",
"invalid program #2 id on query: %u != %u\n",
prog_ids[2], prog_id);
CHECK(info.netns.netns_ino == 0, "netns_ino",
"unexpected netns_ino: %u\n", info.netns.netns_ino);
err = bpf_link__detach(link[0]);
if (CHECK(err, "link_detach", "failed %d\n", err))
goto detach;
/* prog id is still there, but netns_ino is zeroed out */
prog_id = link_info_prog_id(link[0], &info);
CHECK(prog_ids[0] != prog_id, "bpf_prog_query",
"invalid program #0 id on query: %u != %u\n",
prog_ids[0], prog_id);
CHECK(info.netns.netns_ino != 0, "netns_ino",
"unexpected netns_ino: %u\n", info.netns.netns_ino);
detach:
if (link[2])
bpf_link__destroy(link[2]);
if (link[1])
bpf_link__destroy(link[1]);
if (link[0])
bpf_link__destroy(link[0]);
close:
close(net_fd);
}
static void run_lookup_prog(const struct test *t)
{
int server_fds[] = { [0 ... MAX_SERVERS - 1] = -1 };
int client_fd, reuse_conn_fd = -1;
struct bpf_link *lookup_link;
int i, err;
lookup_link = attach_lookup_prog(t->lookup_prog);
if (!lookup_link)
return;
for (i = 0; i < ARRAY_SIZE(server_fds); i++) {
server_fds[i] = make_server(t->sotype, t->listen_at.ip,
t->listen_at.port,
t->reuseport_prog);
if (server_fds[i] < 0)
goto close;
err = update_lookup_map(t->sock_map, i, server_fds[i]);
if (err)
goto close;
/* want just one server for non-reuseport test */
if (!t->reuseport_prog)
break;
}
/* Regular UDP socket lookup with reuseport behaves
* differently when reuseport group contains connected
* sockets. Check that adding a connected UDP socket to the
* reuseport group does not affect how reuseport works with
* BPF socket lookup.
*/
if (t->reuseport_has_conns) {
struct sockaddr_storage addr = {};
socklen_t len = sizeof(addr);
/* Add an extra socket to reuseport group */
reuse_conn_fd = make_server(t->sotype, t->listen_at.ip,
t->listen_at.port,
t->reuseport_prog);
if (reuse_conn_fd < 0)
goto close;
/* Connect the extra socket to itself */
err = getsockname(reuse_conn_fd, (void *)&addr, &len);
if (CHECK(err, "getsockname", "errno %d\n", errno))
goto close;
err = connect(reuse_conn_fd, (void *)&addr, len);
if (CHECK(err, "connect", "errno %d\n", errno))
goto close;
}
client_fd = make_client(t->sotype, t->connect_to.ip, t->connect_to.port);
if (client_fd < 0)
goto close;
if (t->sotype == SOCK_STREAM)
tcp_echo_test(client_fd, server_fds[t->accept_on]);
else
udp_echo_test(client_fd, server_fds[t->accept_on]);
close(client_fd);
close:
if (reuse_conn_fd != -1)
close(reuse_conn_fd);
for (i = 0; i < ARRAY_SIZE(server_fds); i++) {
if (server_fds[i] != -1)
close(server_fds[i]);
}
bpf_link__destroy(lookup_link);
}
static void test_redirect_lookup(struct test_sk_lookup *skel)
{
const struct test tests[] = {
{
.desc = "TCP IPv4 redir port",
.lookup_prog = skel->progs.redir_port,
.sock_map = skel->maps.redir_map,
.sotype = SOCK_STREAM,
.connect_to = { EXT_IP4, EXT_PORT },
.listen_at = { EXT_IP4, INT_PORT },
},
{
.desc = "TCP IPv4 redir addr",
.lookup_prog = skel->progs.redir_ip4,
.sock_map = skel->maps.redir_map,
.sotype = SOCK_STREAM,
.connect_to = { EXT_IP4, EXT_PORT },
.listen_at = { INT_IP4, EXT_PORT },
},
{
.desc = "TCP IPv4 redir with reuseport",
.lookup_prog = skel->progs.select_sock_a,
.reuseport_prog = skel->progs.select_sock_b,
.sock_map = skel->maps.redir_map,
.sotype = SOCK_STREAM,
.connect_to = { EXT_IP4, EXT_PORT },
.listen_at = { INT_IP4, INT_PORT },
.accept_on = SERVER_B,
},
{
.desc = "TCP IPv4 redir skip reuseport",
.lookup_prog = skel->progs.select_sock_a_no_reuseport,
.reuseport_prog = skel->progs.select_sock_b,
.sock_map = skel->maps.redir_map,
.sotype = SOCK_STREAM,
.connect_to = { EXT_IP4, EXT_PORT },
.listen_at = { INT_IP4, INT_PORT },
.accept_on = SERVER_A,
},
{
.desc = "TCP IPv6 redir port",
.lookup_prog = skel->progs.redir_port,
.sock_map = skel->maps.redir_map,
.sotype = SOCK_STREAM,
.connect_to = { EXT_IP6, EXT_PORT },
.listen_at = { EXT_IP6, INT_PORT },
},
{
.desc = "TCP IPv6 redir addr",
.lookup_prog = skel->progs.redir_ip6,
.sock_map = skel->maps.redir_map,
.sotype = SOCK_STREAM,
.connect_to = { EXT_IP6, EXT_PORT },
.listen_at = { INT_IP6, EXT_PORT },
},
{
.desc = "TCP IPv4->IPv6 redir port",
.lookup_prog = skel->progs.redir_port,
.sock_map = skel->maps.redir_map,
.sotype = SOCK_STREAM,
.connect_to = { EXT_IP4, EXT_PORT },
.listen_at = { INT_IP4_V6, INT_PORT },
},
{
.desc = "TCP IPv6 redir with reuseport",
.lookup_prog = skel->progs.select_sock_a,
.reuseport_prog = skel->progs.select_sock_b,
.sock_map = skel->maps.redir_map,
.sotype = SOCK_STREAM,
.connect_to = { EXT_IP6, EXT_PORT },
.listen_at = { INT_IP6, INT_PORT },
.accept_on = SERVER_B,
},
{
.desc = "TCP IPv6 redir skip reuseport",
.lookup_prog = skel->progs.select_sock_a_no_reuseport,
.reuseport_prog = skel->progs.select_sock_b,
.sock_map = skel->maps.redir_map,
.sotype = SOCK_STREAM,
.connect_to = { EXT_IP6, EXT_PORT },
.listen_at = { INT_IP6, INT_PORT },
.accept_on = SERVER_A,
},
{
.desc = "UDP IPv4 redir port",
.lookup_prog = skel->progs.redir_port,
.sock_map = skel->maps.redir_map,
.sotype = SOCK_DGRAM,
.connect_to = { EXT_IP4, EXT_PORT },
.listen_at = { EXT_IP4, INT_PORT },
},
{
.desc = "UDP IPv4 redir addr",
.lookup_prog = skel->progs.redir_ip4,
.sock_map = skel->maps.redir_map,
.sotype = SOCK_DGRAM,
.connect_to = { EXT_IP4, EXT_PORT },
.listen_at = { INT_IP4, EXT_PORT },
},
{
.desc = "UDP IPv4 redir with reuseport",
.lookup_prog = skel->progs.select_sock_a,
.reuseport_prog = skel->progs.select_sock_b,
.sock_map = skel->maps.redir_map,
.sotype = SOCK_DGRAM,
.connect_to = { EXT_IP4, EXT_PORT },
.listen_at = { INT_IP4, INT_PORT },
.accept_on = SERVER_B,
},
{
.desc = "UDP IPv4 redir and reuseport with conns",
.lookup_prog = skel->progs.select_sock_a,
.reuseport_prog = skel->progs.select_sock_b,
.sock_map = skel->maps.redir_map,
.sotype = SOCK_DGRAM,
.connect_to = { EXT_IP4, EXT_PORT },
.listen_at = { INT_IP4, INT_PORT },
.accept_on = SERVER_B,
.reuseport_has_conns = true,
},
{
.desc = "UDP IPv4 redir skip reuseport",
.lookup_prog = skel->progs.select_sock_a_no_reuseport,
.reuseport_prog = skel->progs.select_sock_b,
.sock_map = skel->maps.redir_map,
.sotype = SOCK_DGRAM,
.connect_to = { EXT_IP4, EXT_PORT },
.listen_at = { INT_IP4, INT_PORT },
.accept_on = SERVER_A,
},
{
.desc = "UDP IPv6 redir port",
.lookup_prog = skel->progs.redir_port,
.sock_map = skel->maps.redir_map,
.sotype = SOCK_DGRAM,
.connect_to = { EXT_IP6, EXT_PORT },
.listen_at = { EXT_IP6, INT_PORT },
},
{
.desc = "UDP IPv6 redir addr",
.lookup_prog = skel->progs.redir_ip6,
.sock_map = skel->maps.redir_map,
.sotype = SOCK_DGRAM,
.connect_to = { EXT_IP6, EXT_PORT },
.listen_at = { INT_IP6, EXT_PORT },
},
{
.desc = "UDP IPv4->IPv6 redir port",
.lookup_prog = skel->progs.redir_port,
.sock_map = skel->maps.redir_map,
.sotype = SOCK_DGRAM,
.listen_at = { INT_IP4_V6, INT_PORT },
.connect_to = { EXT_IP4, EXT_PORT },
},
{
.desc = "UDP IPv6 redir and reuseport",
.lookup_prog = skel->progs.select_sock_a,
.reuseport_prog = skel->progs.select_sock_b,
.sock_map = skel->maps.redir_map,
.sotype = SOCK_DGRAM,
.connect_to = { EXT_IP6, EXT_PORT },
.listen_at = { INT_IP6, INT_PORT },
.accept_on = SERVER_B,
},
{
.desc = "UDP IPv6 redir and reuseport with conns",
.lookup_prog = skel->progs.select_sock_a,
.reuseport_prog = skel->progs.select_sock_b,
.sock_map = skel->maps.redir_map,
.sotype = SOCK_DGRAM,
.connect_to = { EXT_IP6, EXT_PORT },
.listen_at = { INT_IP6, INT_PORT },
.accept_on = SERVER_B,
.reuseport_has_conns = true,
},
{
.desc = "UDP IPv6 redir skip reuseport",
.lookup_prog = skel->progs.select_sock_a_no_reuseport,
.reuseport_prog = skel->progs.select_sock_b,
.sock_map = skel->maps.redir_map,
.sotype = SOCK_DGRAM,
.connect_to = { EXT_IP6, EXT_PORT },
.listen_at = { INT_IP6, INT_PORT },
.accept_on = SERVER_A,
},
};
const struct test *t;
for (t = tests; t < tests + ARRAY_SIZE(tests); t++) {
if (test__start_subtest(t->desc))
run_lookup_prog(t);
}
}
static void drop_on_lookup(const struct test *t)
{
struct sockaddr_storage dst = {};
int client_fd, server_fd, err;
struct bpf_link *lookup_link;
ssize_t n;
lookup_link = attach_lookup_prog(t->lookup_prog);
if (!lookup_link)
return;
server_fd = make_server(t->sotype, t->listen_at.ip, t->listen_at.port,
t->reuseport_prog);
if (server_fd < 0)
goto detach;
client_fd = make_socket(t->sotype, t->connect_to.ip,
t->connect_to.port, &dst);
if (client_fd < 0)
goto close_srv;
err = connect(client_fd, (void *)&dst, inetaddr_len(&dst));
if (t->sotype == SOCK_DGRAM) {
err = send_byte(client_fd);
if (err)
goto close_all;
/* Read out asynchronous error */
n = recv(client_fd, NULL, 0, 0);
err = n == -1;
}
if (CHECK(!err || errno != ECONNREFUSED, "connect",
"unexpected success or error\n"))
log_err("expected ECONNREFUSED on connect");
close_all:
close(client_fd);
close_srv:
close(server_fd);
detach:
bpf_link__destroy(lookup_link);
}
static void test_drop_on_lookup(struct test_sk_lookup *skel)
{
const struct test tests[] = {
{
.desc = "TCP IPv4 drop on lookup",
.lookup_prog = skel->progs.lookup_drop,
.sotype = SOCK_STREAM,
.connect_to = { EXT_IP4, EXT_PORT },
.listen_at = { EXT_IP4, EXT_PORT },
},
{
.desc = "TCP IPv6 drop on lookup",
.lookup_prog = skel->progs.lookup_drop,
.sotype = SOCK_STREAM,
.connect_to = { EXT_IP6, EXT_PORT },
.listen_at = { EXT_IP6, EXT_PORT },
},
{
.desc = "UDP IPv4 drop on lookup",
.lookup_prog = skel->progs.lookup_drop,
.sotype = SOCK_DGRAM,
.connect_to = { EXT_IP4, EXT_PORT },
.listen_at = { EXT_IP4, EXT_PORT },
},
{
.desc = "UDP IPv6 drop on lookup",
.lookup_prog = skel->progs.lookup_drop,
.sotype = SOCK_DGRAM,
.connect_to = { EXT_IP6, EXT_PORT },
.listen_at = { EXT_IP6, INT_PORT },
},
/* The program will drop on success, meaning that the ifindex
* was 1.
*/
{
.desc = "TCP IPv4 drop on valid ifindex",
.lookup_prog = skel->progs.check_ifindex,
.sotype = SOCK_STREAM,
.connect_to = { EXT_IP4, EXT_PORT },
.listen_at = { EXT_IP4, EXT_PORT },
},
{
.desc = "TCP IPv6 drop on valid ifindex",
.lookup_prog = skel->progs.check_ifindex,
.sotype = SOCK_STREAM,
.connect_to = { EXT_IP6, EXT_PORT },
.listen_at = { EXT_IP6, EXT_PORT },
},
{
.desc = "UDP IPv4 drop on valid ifindex",
.lookup_prog = skel->progs.check_ifindex,
.sotype = SOCK_DGRAM,
.connect_to = { EXT_IP4, EXT_PORT },
.listen_at = { EXT_IP4, EXT_PORT },
},
{
.desc = "UDP IPv6 drop on valid ifindex",
.lookup_prog = skel->progs.check_ifindex,
.sotype = SOCK_DGRAM,
.connect_to = { EXT_IP6, EXT_PORT },
.listen_at = { EXT_IP6, EXT_PORT },
},
};
const struct test *t;
for (t = tests; t < tests + ARRAY_SIZE(tests); t++) {
if (test__start_subtest(t->desc))
drop_on_lookup(t);
}
}
static void drop_on_reuseport(const struct test *t)
{
struct sockaddr_storage dst = { 0 };
int client, server1, server2, err;
struct bpf_link *lookup_link;
ssize_t n;
lookup_link = attach_lookup_prog(t->lookup_prog);
if (!lookup_link)
return;
server1 = make_server(t->sotype, t->listen_at.ip, t->listen_at.port,
t->reuseport_prog);
if (server1 < 0)
goto detach;
err = update_lookup_map(t->sock_map, SERVER_A, server1);
if (err)
goto detach;
/* second server on destination address we should never reach */
server2 = make_server(t->sotype, t->connect_to.ip, t->connect_to.port,
NULL /* reuseport prog */);
if (server2 < 0)
goto close_srv1;
client = make_socket(t->sotype, t->connect_to.ip,
t->connect_to.port, &dst);
if (client < 0)
goto close_srv2;
err = connect(client, (void *)&dst, inetaddr_len(&dst));
if (t->sotype == SOCK_DGRAM) {
err = send_byte(client);
if (err)
goto close_all;
/* Read out asynchronous error */
n = recv(client, NULL, 0, 0);
err = n == -1;
}
if (CHECK(!err || errno != ECONNREFUSED, "connect",
"unexpected success or error\n"))
log_err("expected ECONNREFUSED on connect");
close_all:
close(client);
close_srv2:
close(server2);
close_srv1:
close(server1);
detach:
bpf_link__destroy(lookup_link);
}
static void test_drop_on_reuseport(struct test_sk_lookup *skel)
{
const struct test tests[] = {
{
.desc = "TCP IPv4 drop on reuseport",
.lookup_prog = skel->progs.select_sock_a,
.reuseport_prog = skel->progs.reuseport_drop,
.sock_map = skel->maps.redir_map,
.sotype = SOCK_STREAM,
.connect_to = { EXT_IP4, EXT_PORT },
.listen_at = { INT_IP4, INT_PORT },
},
{
.desc = "TCP IPv6 drop on reuseport",
.lookup_prog = skel->progs.select_sock_a,
.reuseport_prog = skel->progs.reuseport_drop,
.sock_map = skel->maps.redir_map,
.sotype = SOCK_STREAM,
.connect_to = { EXT_IP6, EXT_PORT },
.listen_at = { INT_IP6, INT_PORT },
},
{
.desc = "UDP IPv4 drop on reuseport",
.lookup_prog = skel->progs.select_sock_a,
.reuseport_prog = skel->progs.reuseport_drop,
.sock_map = skel->maps.redir_map,
.sotype = SOCK_DGRAM,
.connect_to = { EXT_IP4, EXT_PORT },
.listen_at = { INT_IP4, INT_PORT },
},
{
.desc = "TCP IPv6 drop on reuseport",
.lookup_prog = skel->progs.select_sock_a,
.reuseport_prog = skel->progs.reuseport_drop,
.sock_map = skel->maps.redir_map,
.sotype = SOCK_STREAM,
.connect_to = { EXT_IP6, EXT_PORT },
.listen_at = { INT_IP6, INT_PORT },
},
};
const struct test *t;
for (t = tests; t < tests + ARRAY_SIZE(tests); t++) {
if (test__start_subtest(t->desc))
drop_on_reuseport(t);
}
}
static void run_sk_assign(struct test_sk_lookup *skel,
struct bpf_program *lookup_prog,
const char *remote_ip, const char *local_ip)
{
int server_fds[] = { [0 ... MAX_SERVERS - 1] = -1 };
struct bpf_sk_lookup ctx;
__u64 server_cookie;
int i, err;
DECLARE_LIBBPF_OPTS(bpf_test_run_opts, opts,
.ctx_in = &ctx,
.ctx_size_in = sizeof(ctx),
.ctx_out = &ctx,
.ctx_size_out = sizeof(ctx),
);
if (fill_sk_lookup_ctx(&ctx, local_ip, EXT_PORT, remote_ip, INT_PORT))
return;
ctx.protocol = IPPROTO_TCP;
for (i = 0; i < ARRAY_SIZE(server_fds); i++) {
server_fds[i] = make_server(SOCK_STREAM, local_ip, 0, NULL);
if (server_fds[i] < 0)
goto close_servers;
err = update_lookup_map(skel->maps.redir_map, i,
server_fds[i]);
if (err)
goto close_servers;
}
server_cookie = socket_cookie(server_fds[SERVER_B]);
if (!server_cookie)
return;
err = bpf_prog_test_run_opts(bpf_program__fd(lookup_prog), &opts);
if (CHECK(err, "test_run", "failed with error %d\n", errno))
goto close_servers;
if (CHECK(ctx.cookie == 0, "ctx.cookie", "no socket selected\n"))
goto close_servers;
CHECK(ctx.cookie != server_cookie, "ctx.cookie",
"selected sk %llu instead of %llu\n", ctx.cookie, server_cookie);
close_servers:
for (i = 0; i < ARRAY_SIZE(server_fds); i++) {
if (server_fds[i] != -1)
close(server_fds[i]);
}
}
static void run_sk_assign_v4(struct test_sk_lookup *skel,
struct bpf_program *lookup_prog)
{
run_sk_assign(skel, lookup_prog, INT_IP4, EXT_IP4);
}
static void run_sk_assign_v6(struct test_sk_lookup *skel,
struct bpf_program *lookup_prog)
{
run_sk_assign(skel, lookup_prog, INT_IP6, EXT_IP6);
}
static void run_sk_assign_connected(struct test_sk_lookup *skel,
int sotype)
{
int err, client_fd, connected_fd, server_fd;
struct bpf_link *lookup_link;
server_fd = make_server(sotype, EXT_IP4, EXT_PORT, NULL);
if (server_fd < 0)
return;
connected_fd = make_client(sotype, EXT_IP4, EXT_PORT);
if (connected_fd < 0)
goto out_close_server;
/* Put a connected socket in redirect map */
err = update_lookup_map(skel->maps.redir_map, SERVER_A, connected_fd);
if (err)
goto out_close_connected;
lookup_link = attach_lookup_prog(skel->progs.sk_assign_esocknosupport);
if (!lookup_link)
goto out_close_connected;
/* Try to redirect TCP SYN / UDP packet to a connected socket */
client_fd = make_client(sotype, EXT_IP4, EXT_PORT);
if (client_fd < 0)
goto out_unlink_prog;
if (sotype == SOCK_DGRAM) {
send_byte(client_fd);
recv_byte(server_fd);
}
close(client_fd);
out_unlink_prog:
bpf_link__destroy(lookup_link);
out_close_connected:
close(connected_fd);
out_close_server:
close(server_fd);
}
static void test_sk_assign_helper(struct test_sk_lookup *skel)
{
if (test__start_subtest("sk_assign returns EEXIST"))
run_sk_assign_v4(skel, skel->progs.sk_assign_eexist);
if (test__start_subtest("sk_assign honors F_REPLACE"))
run_sk_assign_v4(skel, skel->progs.sk_assign_replace_flag);
if (test__start_subtest("sk_assign accepts NULL socket"))
run_sk_assign_v4(skel, skel->progs.sk_assign_null);
if (test__start_subtest("access ctx->sk"))
run_sk_assign_v4(skel, skel->progs.access_ctx_sk);
if (test__start_subtest("narrow access to ctx v4"))
run_sk_assign_v4(skel, skel->progs.ctx_narrow_access);
if (test__start_subtest("narrow access to ctx v6"))
run_sk_assign_v6(skel, skel->progs.ctx_narrow_access);
if (test__start_subtest("sk_assign rejects TCP established"))
run_sk_assign_connected(skel, SOCK_STREAM);
if (test__start_subtest("sk_assign rejects UDP connected"))
run_sk_assign_connected(skel, SOCK_DGRAM);
}
struct test_multi_prog {
const char *desc;
struct bpf_program *prog1;
struct bpf_program *prog2;
struct bpf_map *redir_map;
struct bpf_map *run_map;
int expect_errno;
struct inet_addr listen_at;
};
static void run_multi_prog_lookup(const struct test_multi_prog *t)
{
struct sockaddr_storage dst = {};
int map_fd, server_fd, client_fd;
struct bpf_link *link1, *link2;
int prog_idx, done, err;
map_fd = bpf_map__fd(t->run_map);
done = 0;
prog_idx = PROG1;
err = bpf_map_update_elem(map_fd, &prog_idx, &done, BPF_ANY);
if (CHECK(err, "bpf_map_update_elem", "failed\n"))
return;
prog_idx = PROG2;
err = bpf_map_update_elem(map_fd, &prog_idx, &done, BPF_ANY);
if (CHECK(err, "bpf_map_update_elem", "failed\n"))
return;
link1 = attach_lookup_prog(t->prog1);
if (!link1)
return;
link2 = attach_lookup_prog(t->prog2);
if (!link2)
goto out_unlink1;
server_fd = make_server(SOCK_STREAM, t->listen_at.ip,
t->listen_at.port, NULL);
if (server_fd < 0)
goto out_unlink2;
err = update_lookup_map(t->redir_map, SERVER_A, server_fd);
if (err)
goto out_close_server;
client_fd = make_socket(SOCK_STREAM, EXT_IP4, EXT_PORT, &dst);
if (client_fd < 0)
goto out_close_server;
err = connect(client_fd, (void *)&dst, inetaddr_len(&dst));
if (CHECK(err && !t->expect_errno, "connect",
"unexpected error %d\n", errno))
goto out_close_client;
if (CHECK(err && t->expect_errno && errno != t->expect_errno,
"connect", "unexpected error %d\n", errno))
goto out_close_client;
done = 0;
prog_idx = PROG1;
err = bpf_map_lookup_elem(map_fd, &prog_idx, &done);
CHECK(err, "bpf_map_lookup_elem", "failed\n");
CHECK(!done, "bpf_map_lookup_elem", "PROG1 !done\n");
done = 0;
prog_idx = PROG2;
err = bpf_map_lookup_elem(map_fd, &prog_idx, &done);
CHECK(err, "bpf_map_lookup_elem", "failed\n");
CHECK(!done, "bpf_map_lookup_elem", "PROG2 !done\n");
out_close_client:
close(client_fd);
out_close_server:
close(server_fd);
out_unlink2:
bpf_link__destroy(link2);
out_unlink1:
bpf_link__destroy(link1);
}
static void test_multi_prog_lookup(struct test_sk_lookup *skel)
{
struct test_multi_prog tests[] = {
{
.desc = "multi prog - pass, pass",
.prog1 = skel->progs.multi_prog_pass1,
.prog2 = skel->progs.multi_prog_pass2,
.listen_at = { EXT_IP4, EXT_PORT },
},
{
.desc = "multi prog - drop, drop",
.prog1 = skel->progs.multi_prog_drop1,
.prog2 = skel->progs.multi_prog_drop2,
.listen_at = { EXT_IP4, EXT_PORT },
.expect_errno = ECONNREFUSED,
},
{
.desc = "multi prog - pass, drop",
.prog1 = skel->progs.multi_prog_pass1,
.prog2 = skel->progs.multi_prog_drop2,
.listen_at = { EXT_IP4, EXT_PORT },
.expect_errno = ECONNREFUSED,
},
{
.desc = "multi prog - drop, pass",
.prog1 = skel->progs.multi_prog_drop1,
.prog2 = skel->progs.multi_prog_pass2,
.listen_at = { EXT_IP4, EXT_PORT },
.expect_errno = ECONNREFUSED,
},
{
.desc = "multi prog - pass, redir",
.prog1 = skel->progs.multi_prog_pass1,
.prog2 = skel->progs.multi_prog_redir2,
.listen_at = { INT_IP4, INT_PORT },
},
{
.desc = "multi prog - redir, pass",
.prog1 = skel->progs.multi_prog_redir1,
.prog2 = skel->progs.multi_prog_pass2,
.listen_at = { INT_IP4, INT_PORT },
},
{
.desc = "multi prog - drop, redir",
.prog1 = skel->progs.multi_prog_drop1,
.prog2 = skel->progs.multi_prog_redir2,
.listen_at = { INT_IP4, INT_PORT },
},
{
.desc = "multi prog - redir, drop",
.prog1 = skel->progs.multi_prog_redir1,
.prog2 = skel->progs.multi_prog_drop2,
.listen_at = { INT_IP4, INT_PORT },
},
{
.desc = "multi prog - redir, redir",
.prog1 = skel->progs.multi_prog_redir1,
.prog2 = skel->progs.multi_prog_redir2,
.listen_at = { INT_IP4, INT_PORT },
},
};
struct test_multi_prog *t;
for (t = tests; t < tests + ARRAY_SIZE(tests); t++) {
t->redir_map = skel->maps.redir_map;
t->run_map = skel->maps.run_map;
if (test__start_subtest(t->desc))
run_multi_prog_lookup(t);
}
}
static void run_tests(struct test_sk_lookup *skel)
{
if (test__start_subtest("query lookup prog"))
query_lookup_prog(skel);
test_redirect_lookup(skel);
test_drop_on_lookup(skel);
test_drop_on_reuseport(skel);
test_sk_assign_helper(skel);
test_multi_prog_lookup(skel);
}
static int switch_netns(void)
{
static const char * const setup_script[] = {
"ip -6 addr add dev lo " EXT_IP6 "/128",
"ip -6 addr add dev lo " INT_IP6 "/128",
"ip link set dev lo up",
NULL,
};
const char * const *cmd;
int err;
err = unshare(CLONE_NEWNET);
if (CHECK(err, "unshare", "failed\n")) {
log_err("unshare(CLONE_NEWNET)");
return -1;
}
for (cmd = setup_script; *cmd; cmd++) {
err = system(*cmd);
if (CHECK(err, "system", "failed\n")) {
log_err("system(%s)", *cmd);
return -1;
}
}
return 0;
}
void test_sk_lookup(void)
{
struct test_sk_lookup *skel;
int err;
err = switch_netns();
if (err)
return;
skel = test_sk_lookup__open_and_load();
if (CHECK(!skel, "skel open_and_load", "failed\n"))
return;
run_tests(skel);
test_sk_lookup__destroy(skel);
}
| linux-master | tools/testing/selftests/bpf/prog_tests/sk_lookup.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2021 Facebook */
#include <test_progs.h>
#include <sys/types.h>
#include <unistd.h>
#include "find_vma.skel.h"
#include "find_vma_fail1.skel.h"
#include "find_vma_fail2.skel.h"
static void test_and_reset_skel(struct find_vma *skel, int expected_find_zero_ret, bool need_test)
{
if (need_test) {
ASSERT_EQ(skel->bss->found_vm_exec, 1, "found_vm_exec");
ASSERT_EQ(skel->data->find_addr_ret, 0, "find_addr_ret");
ASSERT_EQ(skel->data->find_zero_ret, expected_find_zero_ret, "find_zero_ret");
ASSERT_OK_PTR(strstr(skel->bss->d_iname, "test_progs"), "find_test_progs");
}
skel->bss->found_vm_exec = 0;
skel->data->find_addr_ret = -1;
skel->data->find_zero_ret = -1;
skel->bss->d_iname[0] = 0;
}
static int open_pe(void)
{
struct perf_event_attr attr = {0};
int pfd;
/* create perf event */
attr.size = sizeof(attr);
attr.type = PERF_TYPE_HARDWARE;
attr.config = PERF_COUNT_HW_CPU_CYCLES;
attr.freq = 1;
attr.sample_freq = 1000;
pfd = syscall(__NR_perf_event_open, &attr, 0, -1, -1, PERF_FLAG_FD_CLOEXEC);
return pfd >= 0 ? pfd : -errno;
}
static bool find_vma_pe_condition(struct find_vma *skel)
{
return skel->bss->found_vm_exec == 0 ||
skel->data->find_addr_ret != 0 ||
skel->data->find_zero_ret == -1 ||
strcmp(skel->bss->d_iname, "test_progs") != 0;
}
static void test_find_vma_pe(struct find_vma *skel)
{
struct bpf_link *link = NULL;
volatile int j = 0;
int pfd, i;
const int one_bn = 1000000000;
pfd = open_pe();
if (pfd < 0) {
if (pfd == -ENOENT || pfd == -EOPNOTSUPP) {
printf("%s:SKIP:no PERF_COUNT_HW_CPU_CYCLES\n", __func__);
test__skip();
goto cleanup;
}
if (!ASSERT_GE(pfd, 0, "perf_event_open"))
goto cleanup;
}
link = bpf_program__attach_perf_event(skel->progs.handle_pe, pfd);
if (!ASSERT_OK_PTR(link, "attach_perf_event"))
goto cleanup;
for (i = 0; i < one_bn && find_vma_pe_condition(skel); ++i)
++j;
test_and_reset_skel(skel, -EBUSY /* in nmi, irq_work is busy */, i == one_bn);
cleanup:
bpf_link__destroy(link);
close(pfd);
}
static void test_find_vma_kprobe(struct find_vma *skel)
{
int err;
err = find_vma__attach(skel);
if (!ASSERT_OK(err, "get_branch_snapshot__attach"))
return;
getpgid(skel->bss->target_pid);
test_and_reset_skel(skel, -ENOENT /* could not find vma for ptr 0 */, true);
}
static void test_illegal_write_vma(void)
{
struct find_vma_fail1 *skel;
skel = find_vma_fail1__open_and_load();
if (!ASSERT_ERR_PTR(skel, "find_vma_fail1__open_and_load"))
find_vma_fail1__destroy(skel);
}
static void test_illegal_write_task(void)
{
struct find_vma_fail2 *skel;
skel = find_vma_fail2__open_and_load();
if (!ASSERT_ERR_PTR(skel, "find_vma_fail2__open_and_load"))
find_vma_fail2__destroy(skel);
}
void serial_test_find_vma(void)
{
struct find_vma *skel;
skel = find_vma__open_and_load();
if (!ASSERT_OK_PTR(skel, "find_vma__open_and_load"))
return;
skel->bss->target_pid = getpid();
skel->bss->addr = (__u64)(uintptr_t)test_find_vma_pe;
test_find_vma_pe(skel);
test_find_vma_kprobe(skel);
find_vma__destroy(skel);
test_illegal_write_vma();
test_illegal_write_task();
}
| linux-master | tools/testing/selftests/bpf/prog_tests/find_vma.c |
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Topology:
* ---------
* NS0 namespace | NS1 namespace | NS2 namespace
* | |
* +---------------+ | +---------------+ |
* | ipsec0 |---------| ipsec0 | |
* | 192.168.1.100 | | | 192.168.1.200 | |
* | if_id: bpf | | +---------------+ |
* +---------------+ | |
* | | | +---------------+
* | | | | ipsec0 |
* \------------------------------------------| 192.168.1.200 |
* | | +---------------+
* | |
* | | (overlay network)
* ------------------------------------------------------
* | | (underlay network)
* +--------------+ | +--------------+ |
* | veth01 |----------| veth10 | |
* | 172.16.1.100 | | | 172.16.1.200 | |
* ---------------+ | +--------------+ |
* | |
* +--------------+ | | +--------------+
* | veth02 |-----------------------------------| veth20 |
* | 172.16.2.100 | | | | 172.16.2.200 |
* +--------------+ | | +--------------+
*
*
* Test Packet flow
* -----------
* The tests perform 'ping 192.168.1.200' from the NS0 namespace:
* 1) request is routed to NS0 ipsec0
* 2) NS0 ipsec0 tc egress BPF program is triggered and sets the if_id based
* on the requested value. This makes the ipsec0 device in external mode
* select the destination tunnel
* 3) ping reaches the other namespace (NS1 or NS2 based on which if_id was
* used) and response is sent
* 4) response is received on NS0 ipsec0, tc ingress program is triggered and
* records the response if_id
* 5) requested if_id is compared with received if_id
*/
#include <net/if.h>
#include <linux/rtnetlink.h>
#include <linux/if_link.h>
#include "test_progs.h"
#include "network_helpers.h"
#include "xfrm_info.skel.h"
#define NS0 "xfrm_test_ns0"
#define NS1 "xfrm_test_ns1"
#define NS2 "xfrm_test_ns2"
#define IF_ID_0_TO_1 1
#define IF_ID_0_TO_2 2
#define IF_ID_1 3
#define IF_ID_2 4
#define IP4_ADDR_VETH01 "172.16.1.100"
#define IP4_ADDR_VETH10 "172.16.1.200"
#define IP4_ADDR_VETH02 "172.16.2.100"
#define IP4_ADDR_VETH20 "172.16.2.200"
#define ESP_DUMMY_PARAMS \
"proto esp aead 'rfc4106(gcm(aes))' " \
"0xe4d8f4b4da1df18a3510b3781496daa82488b713 128 mode tunnel "
static int attach_tc_prog(struct bpf_tc_hook *hook, int igr_fd, int egr_fd)
{
LIBBPF_OPTS(bpf_tc_opts, opts1, .handle = 1, .priority = 1,
.prog_fd = igr_fd);
LIBBPF_OPTS(bpf_tc_opts, opts2, .handle = 1, .priority = 1,
.prog_fd = egr_fd);
int ret;
ret = bpf_tc_hook_create(hook);
if (!ASSERT_OK(ret, "create tc hook"))
return ret;
if (igr_fd >= 0) {
hook->attach_point = BPF_TC_INGRESS;
ret = bpf_tc_attach(hook, &opts1);
if (!ASSERT_OK(ret, "bpf_tc_attach")) {
bpf_tc_hook_destroy(hook);
return ret;
}
}
if (egr_fd >= 0) {
hook->attach_point = BPF_TC_EGRESS;
ret = bpf_tc_attach(hook, &opts2);
if (!ASSERT_OK(ret, "bpf_tc_attach")) {
bpf_tc_hook_destroy(hook);
return ret;
}
}
return 0;
}
static void cleanup(void)
{
SYS_NOFAIL("test -f /var/run/netns/" NS0 " && ip netns delete " NS0);
SYS_NOFAIL("test -f /var/run/netns/" NS1 " && ip netns delete " NS1);
SYS_NOFAIL("test -f /var/run/netns/" NS2 " && ip netns delete " NS2);
}
static int config_underlay(void)
{
SYS(fail, "ip netns add " NS0);
SYS(fail, "ip netns add " NS1);
SYS(fail, "ip netns add " NS2);
/* NS0 <-> NS1 [veth01 <-> veth10] */
SYS(fail, "ip link add veth01 netns " NS0 " type veth peer name veth10 netns " NS1);
SYS(fail, "ip -net " NS0 " addr add " IP4_ADDR_VETH01 "/24 dev veth01");
SYS(fail, "ip -net " NS0 " link set dev veth01 up");
SYS(fail, "ip -net " NS1 " addr add " IP4_ADDR_VETH10 "/24 dev veth10");
SYS(fail, "ip -net " NS1 " link set dev veth10 up");
/* NS0 <-> NS2 [veth02 <-> veth20] */
SYS(fail, "ip link add veth02 netns " NS0 " type veth peer name veth20 netns " NS2);
SYS(fail, "ip -net " NS0 " addr add " IP4_ADDR_VETH02 "/24 dev veth02");
SYS(fail, "ip -net " NS0 " link set dev veth02 up");
SYS(fail, "ip -net " NS2 " addr add " IP4_ADDR_VETH20 "/24 dev veth20");
SYS(fail, "ip -net " NS2 " link set dev veth20 up");
return 0;
fail:
return -1;
}
static int setup_xfrm_tunnel_ns(const char *ns, const char *ipv4_local,
const char *ipv4_remote, int if_id)
{
/* State: local -> remote */
SYS(fail, "ip -net %s xfrm state add src %s dst %s spi 1 "
ESP_DUMMY_PARAMS "if_id %d", ns, ipv4_local, ipv4_remote, if_id);
/* State: local <- remote */
SYS(fail, "ip -net %s xfrm state add src %s dst %s spi 1 "
ESP_DUMMY_PARAMS "if_id %d", ns, ipv4_remote, ipv4_local, if_id);
/* Policy: local -> remote */
SYS(fail, "ip -net %s xfrm policy add dir out src 0.0.0.0/0 dst 0.0.0.0/0 "
"if_id %d tmpl src %s dst %s proto esp mode tunnel if_id %d", ns,
if_id, ipv4_local, ipv4_remote, if_id);
/* Policy: local <- remote */
SYS(fail, "ip -net %s xfrm policy add dir in src 0.0.0.0/0 dst 0.0.0.0/0 "
"if_id %d tmpl src %s dst %s proto esp mode tunnel if_id %d", ns,
if_id, ipv4_remote, ipv4_local, if_id);
return 0;
fail:
return -1;
}
static int setup_xfrm_tunnel(const char *ns_a, const char *ns_b,
const char *ipv4_a, const char *ipv4_b,
int if_id_a, int if_id_b)
{
return setup_xfrm_tunnel_ns(ns_a, ipv4_a, ipv4_b, if_id_a) ||
setup_xfrm_tunnel_ns(ns_b, ipv4_b, ipv4_a, if_id_b);
}
static struct rtattr *rtattr_add(struct nlmsghdr *nh, unsigned short type,
unsigned short len)
{
struct rtattr *rta =
(struct rtattr *)((uint8_t *)nh + RTA_ALIGN(nh->nlmsg_len));
rta->rta_type = type;
rta->rta_len = RTA_LENGTH(len);
nh->nlmsg_len = RTA_ALIGN(nh->nlmsg_len) + RTA_ALIGN(rta->rta_len);
return rta;
}
static struct rtattr *rtattr_add_str(struct nlmsghdr *nh, unsigned short type,
const char *s)
{
struct rtattr *rta = rtattr_add(nh, type, strlen(s));
memcpy(RTA_DATA(rta), s, strlen(s));
return rta;
}
static struct rtattr *rtattr_begin(struct nlmsghdr *nh, unsigned short type)
{
return rtattr_add(nh, type, 0);
}
static void rtattr_end(struct nlmsghdr *nh, struct rtattr *attr)
{
uint8_t *end = (uint8_t *)nh + nh->nlmsg_len;
attr->rta_len = end - (uint8_t *)attr;
}
static int setup_xfrmi_external_dev(const char *ns)
{
struct {
struct nlmsghdr nh;
struct ifinfomsg info;
unsigned char data[128];
} req;
struct rtattr *link_info, *info_data;
struct nstoken *nstoken;
int ret = -1, sock = -1;
struct nlmsghdr *nh;
memset(&req, 0, sizeof(req));
nh = &req.nh;
nh->nlmsg_len = NLMSG_LENGTH(sizeof(req.info));
nh->nlmsg_type = RTM_NEWLINK;
nh->nlmsg_flags |= NLM_F_CREATE | NLM_F_REQUEST;
rtattr_add_str(nh, IFLA_IFNAME, "ipsec0");
link_info = rtattr_begin(nh, IFLA_LINKINFO);
rtattr_add_str(nh, IFLA_INFO_KIND, "xfrm");
info_data = rtattr_begin(nh, IFLA_INFO_DATA);
rtattr_add(nh, IFLA_XFRM_COLLECT_METADATA, 0);
rtattr_end(nh, info_data);
rtattr_end(nh, link_info);
nstoken = open_netns(ns);
if (!ASSERT_OK_PTR(nstoken, "setns"))
goto done;
sock = socket(AF_NETLINK, SOCK_RAW | SOCK_CLOEXEC, NETLINK_ROUTE);
if (!ASSERT_GE(sock, 0, "netlink socket"))
goto done;
ret = send(sock, nh, nh->nlmsg_len, 0);
if (!ASSERT_EQ(ret, nh->nlmsg_len, "netlink send length"))
goto done;
ret = 0;
done:
if (sock != -1)
close(sock);
if (nstoken)
close_netns(nstoken);
return ret;
}
static int config_overlay(void)
{
if (setup_xfrm_tunnel(NS0, NS1, IP4_ADDR_VETH01, IP4_ADDR_VETH10,
IF_ID_0_TO_1, IF_ID_1))
goto fail;
if (setup_xfrm_tunnel(NS0, NS2, IP4_ADDR_VETH02, IP4_ADDR_VETH20,
IF_ID_0_TO_2, IF_ID_2))
goto fail;
/* Older iproute2 doesn't support this option */
if (!ASSERT_OK(setup_xfrmi_external_dev(NS0), "xfrmi"))
goto fail;
SYS(fail, "ip -net " NS0 " addr add 192.168.1.100/24 dev ipsec0");
SYS(fail, "ip -net " NS0 " link set dev ipsec0 up");
SYS(fail, "ip -net " NS1 " link add ipsec0 type xfrm if_id %d", IF_ID_1);
SYS(fail, "ip -net " NS1 " addr add 192.168.1.200/24 dev ipsec0");
SYS(fail, "ip -net " NS1 " link set dev ipsec0 up");
SYS(fail, "ip -net " NS2 " link add ipsec0 type xfrm if_id %d", IF_ID_2);
SYS(fail, "ip -net " NS2 " addr add 192.168.1.200/24 dev ipsec0");
SYS(fail, "ip -net " NS2 " link set dev ipsec0 up");
return 0;
fail:
return -1;
}
static int test_xfrm_ping(struct xfrm_info *skel, u32 if_id)
{
skel->bss->req_if_id = if_id;
SYS(fail, "ping -i 0.01 -c 3 -w 10 -q 192.168.1.200 > /dev/null");
if (!ASSERT_EQ(skel->bss->resp_if_id, if_id, "if_id"))
goto fail;
return 0;
fail:
return -1;
}
static void _test_xfrm_info(void)
{
LIBBPF_OPTS(bpf_tc_hook, tc_hook, .attach_point = BPF_TC_INGRESS);
int get_xfrm_info_prog_fd, set_xfrm_info_prog_fd;
struct nstoken *nstoken = NULL;
struct xfrm_info *skel;
int ifindex;
/* load and attach bpf progs to ipsec dev tc hook point */
skel = xfrm_info__open_and_load();
if (!ASSERT_OK_PTR(skel, "xfrm_info__open_and_load"))
goto done;
nstoken = open_netns(NS0);
if (!ASSERT_OK_PTR(nstoken, "setns " NS0))
goto done;
ifindex = if_nametoindex("ipsec0");
if (!ASSERT_NEQ(ifindex, 0, "ipsec0 ifindex"))
goto done;
tc_hook.ifindex = ifindex;
set_xfrm_info_prog_fd = bpf_program__fd(skel->progs.set_xfrm_info);
get_xfrm_info_prog_fd = bpf_program__fd(skel->progs.get_xfrm_info);
if (!ASSERT_GE(set_xfrm_info_prog_fd, 0, "bpf_program__fd"))
goto done;
if (!ASSERT_GE(get_xfrm_info_prog_fd, 0, "bpf_program__fd"))
goto done;
if (attach_tc_prog(&tc_hook, get_xfrm_info_prog_fd,
set_xfrm_info_prog_fd))
goto done;
/* perform test */
if (!ASSERT_EQ(test_xfrm_ping(skel, IF_ID_0_TO_1), 0, "ping " NS1))
goto done;
if (!ASSERT_EQ(test_xfrm_ping(skel, IF_ID_0_TO_2), 0, "ping " NS2))
goto done;
done:
if (nstoken)
close_netns(nstoken);
xfrm_info__destroy(skel);
}
void test_xfrm_info(void)
{
cleanup();
if (!ASSERT_OK(config_underlay(), "config_underlay"))
goto done;
if (!ASSERT_OK(config_overlay(), "config_overlay"))
goto done;
if (test__start_subtest("xfrm_info"))
_test_xfrm_info();
done:
cleanup();
}
| linux-master | tools/testing/selftests/bpf/prog_tests/xfrm_info.c |
// SPDX-License-Identifier: GPL-2.0
#include <test_progs.h>
#include <sys/mman.h>
#include "test_mmap.skel.h"
struct map_data {
__u64 val[512 * 4];
};
static size_t roundup_page(size_t sz)
{
long page_size = sysconf(_SC_PAGE_SIZE);
return (sz + page_size - 1) / page_size * page_size;
}
void test_mmap(void)
{
const size_t bss_sz = roundup_page(sizeof(struct test_mmap__bss));
const size_t map_sz = roundup_page(sizeof(struct map_data));
const int zero = 0, one = 1, two = 2, far = 1500;
const long page_size = sysconf(_SC_PAGE_SIZE);
int err, duration = 0, i, data_map_fd, data_map_id, tmp_fd, rdmap_fd;
struct bpf_map *data_map, *bss_map;
void *bss_mmaped = NULL, *map_mmaped = NULL, *tmp0, *tmp1, *tmp2;
struct test_mmap__bss *bss_data;
struct bpf_map_info map_info;
__u32 map_info_sz = sizeof(map_info);
struct map_data *map_data;
struct test_mmap *skel;
__u64 val = 0;
skel = test_mmap__open();
if (CHECK(!skel, "skel_open", "skeleton open failed\n"))
return;
err = bpf_map__set_max_entries(skel->maps.rdonly_map, page_size);
if (CHECK(err != 0, "bpf_map__set_max_entries", "bpf_map__set_max_entries failed\n"))
goto cleanup;
/* at least 4 pages of data */
err = bpf_map__set_max_entries(skel->maps.data_map,
4 * (page_size / sizeof(u64)));
if (CHECK(err != 0, "bpf_map__set_max_entries", "bpf_map__set_max_entries failed\n"))
goto cleanup;
err = test_mmap__load(skel);
if (CHECK(err != 0, "skel_load", "skeleton load failed\n"))
goto cleanup;
bss_map = skel->maps.bss;
data_map = skel->maps.data_map;
data_map_fd = bpf_map__fd(data_map);
rdmap_fd = bpf_map__fd(skel->maps.rdonly_map);
tmp1 = mmap(NULL, page_size, PROT_READ | PROT_WRITE, MAP_SHARED, rdmap_fd, 0);
if (CHECK(tmp1 != MAP_FAILED, "rdonly_write_mmap", "unexpected success\n")) {
munmap(tmp1, page_size);
goto cleanup;
}
/* now double-check if it's mmap()'able at all */
tmp1 = mmap(NULL, page_size, PROT_READ, MAP_SHARED, rdmap_fd, 0);
if (CHECK(tmp1 == MAP_FAILED, "rdonly_read_mmap", "failed: %d\n", errno))
goto cleanup;
/* get map's ID */
memset(&map_info, 0, map_info_sz);
err = bpf_map_get_info_by_fd(data_map_fd, &map_info, &map_info_sz);
if (CHECK(err, "map_get_info", "failed %d\n", errno))
goto cleanup;
data_map_id = map_info.id;
/* mmap BSS map */
bss_mmaped = mmap(NULL, bss_sz, PROT_READ | PROT_WRITE, MAP_SHARED,
bpf_map__fd(bss_map), 0);
if (CHECK(bss_mmaped == MAP_FAILED, "bss_mmap",
".bss mmap failed: %d\n", errno)) {
bss_mmaped = NULL;
goto cleanup;
}
/* map as R/W first */
map_mmaped = mmap(NULL, map_sz, PROT_READ | PROT_WRITE, MAP_SHARED,
data_map_fd, 0);
if (CHECK(map_mmaped == MAP_FAILED, "data_mmap",
"data_map mmap failed: %d\n", errno)) {
map_mmaped = NULL;
goto cleanup;
}
bss_data = bss_mmaped;
map_data = map_mmaped;
CHECK_FAIL(bss_data->in_val);
CHECK_FAIL(bss_data->out_val);
CHECK_FAIL(skel->bss->in_val);
CHECK_FAIL(skel->bss->out_val);
CHECK_FAIL(map_data->val[0]);
CHECK_FAIL(map_data->val[1]);
CHECK_FAIL(map_data->val[2]);
CHECK_FAIL(map_data->val[far]);
err = test_mmap__attach(skel);
if (CHECK(err, "attach_raw_tp", "err %d\n", err))
goto cleanup;
bss_data->in_val = 123;
val = 111;
CHECK_FAIL(bpf_map_update_elem(data_map_fd, &zero, &val, 0));
usleep(1);
CHECK_FAIL(bss_data->in_val != 123);
CHECK_FAIL(bss_data->out_val != 123);
CHECK_FAIL(skel->bss->in_val != 123);
CHECK_FAIL(skel->bss->out_val != 123);
CHECK_FAIL(map_data->val[0] != 111);
CHECK_FAIL(map_data->val[1] != 222);
CHECK_FAIL(map_data->val[2] != 123);
CHECK_FAIL(map_data->val[far] != 3 * 123);
CHECK_FAIL(bpf_map_lookup_elem(data_map_fd, &zero, &val));
CHECK_FAIL(val != 111);
CHECK_FAIL(bpf_map_lookup_elem(data_map_fd, &one, &val));
CHECK_FAIL(val != 222);
CHECK_FAIL(bpf_map_lookup_elem(data_map_fd, &two, &val));
CHECK_FAIL(val != 123);
CHECK_FAIL(bpf_map_lookup_elem(data_map_fd, &far, &val));
CHECK_FAIL(val != 3 * 123);
/* data_map freeze should fail due to R/W mmap() */
err = bpf_map_freeze(data_map_fd);
if (CHECK(!err || errno != EBUSY, "no_freeze",
"data_map freeze succeeded: err=%d, errno=%d\n", err, errno))
goto cleanup;
err = mprotect(map_mmaped, map_sz, PROT_READ);
if (CHECK(err, "mprotect_ro", "mprotect to r/o failed %d\n", errno))
goto cleanup;
/* unmap R/W mapping */
err = munmap(map_mmaped, map_sz);
map_mmaped = NULL;
if (CHECK(err, "data_map_munmap", "data_map munmap failed: %d\n", errno))
goto cleanup;
/* re-map as R/O now */
map_mmaped = mmap(NULL, map_sz, PROT_READ, MAP_SHARED, data_map_fd, 0);
if (CHECK(map_mmaped == MAP_FAILED, "data_mmap",
"data_map R/O mmap failed: %d\n", errno)) {
map_mmaped = NULL;
goto cleanup;
}
err = mprotect(map_mmaped, map_sz, PROT_WRITE);
if (CHECK(!err, "mprotect_wr", "mprotect() succeeded unexpectedly!\n"))
goto cleanup;
err = mprotect(map_mmaped, map_sz, PROT_EXEC);
if (CHECK(!err, "mprotect_ex", "mprotect() succeeded unexpectedly!\n"))
goto cleanup;
map_data = map_mmaped;
/* map/unmap in a loop to test ref counting */
for (i = 0; i < 10; i++) {
int flags = i % 2 ? PROT_READ : PROT_WRITE;
void *p;
p = mmap(NULL, map_sz, flags, MAP_SHARED, data_map_fd, 0);
if (CHECK_FAIL(p == MAP_FAILED))
goto cleanup;
err = munmap(p, map_sz);
if (CHECK_FAIL(err))
goto cleanup;
}
/* data_map freeze should now succeed due to no R/W mapping */
err = bpf_map_freeze(data_map_fd);
if (CHECK(err, "freeze", "data_map freeze failed: err=%d, errno=%d\n",
err, errno))
goto cleanup;
/* mapping as R/W now should fail */
tmp1 = mmap(NULL, map_sz, PROT_READ | PROT_WRITE, MAP_SHARED,
data_map_fd, 0);
if (CHECK(tmp1 != MAP_FAILED, "data_mmap", "mmap succeeded\n")) {
munmap(tmp1, map_sz);
goto cleanup;
}
bss_data->in_val = 321;
usleep(1);
CHECK_FAIL(bss_data->in_val != 321);
CHECK_FAIL(bss_data->out_val != 321);
CHECK_FAIL(skel->bss->in_val != 321);
CHECK_FAIL(skel->bss->out_val != 321);
CHECK_FAIL(map_data->val[0] != 111);
CHECK_FAIL(map_data->val[1] != 222);
CHECK_FAIL(map_data->val[2] != 321);
CHECK_FAIL(map_data->val[far] != 3 * 321);
/* check some more advanced mmap() manipulations */
tmp0 = mmap(NULL, 4 * page_size, PROT_READ, MAP_SHARED | MAP_ANONYMOUS,
-1, 0);
if (CHECK(tmp0 == MAP_FAILED, "adv_mmap0", "errno %d\n", errno))
goto cleanup;
/* map all but last page: pages 1-3 mapped */
tmp1 = mmap(tmp0, 3 * page_size, PROT_READ, MAP_SHARED | MAP_FIXED,
data_map_fd, 0);
if (CHECK(tmp0 != tmp1, "adv_mmap1", "tmp0: %p, tmp1: %p\n", tmp0, tmp1)) {
munmap(tmp0, 4 * page_size);
goto cleanup;
}
/* unmap second page: pages 1, 3 mapped */
err = munmap(tmp1 + page_size, page_size);
if (CHECK(err, "adv_mmap2", "errno %d\n", errno)) {
munmap(tmp1, 4 * page_size);
goto cleanup;
}
/* map page 2 back */
tmp2 = mmap(tmp1 + page_size, page_size, PROT_READ,
MAP_SHARED | MAP_FIXED, data_map_fd, 0);
if (CHECK(tmp2 == MAP_FAILED, "adv_mmap3", "errno %d\n", errno)) {
munmap(tmp1, page_size);
munmap(tmp1 + 2*page_size, 2 * page_size);
goto cleanup;
}
CHECK(tmp1 + page_size != tmp2, "adv_mmap4",
"tmp1: %p, tmp2: %p\n", tmp1, tmp2);
/* re-map all 4 pages */
tmp2 = mmap(tmp1, 4 * page_size, PROT_READ, MAP_SHARED | MAP_FIXED,
data_map_fd, 0);
if (CHECK(tmp2 == MAP_FAILED, "adv_mmap5", "errno %d\n", errno)) {
munmap(tmp1, 4 * page_size); /* unmap page 1 */
goto cleanup;
}
CHECK(tmp1 != tmp2, "adv_mmap6", "tmp1: %p, tmp2: %p\n", tmp1, tmp2);
map_data = tmp2;
CHECK_FAIL(bss_data->in_val != 321);
CHECK_FAIL(bss_data->out_val != 321);
CHECK_FAIL(skel->bss->in_val != 321);
CHECK_FAIL(skel->bss->out_val != 321);
CHECK_FAIL(map_data->val[0] != 111);
CHECK_FAIL(map_data->val[1] != 222);
CHECK_FAIL(map_data->val[2] != 321);
CHECK_FAIL(map_data->val[far] != 3 * 321);
munmap(tmp2, 4 * page_size);
/* map all 4 pages, but with pg_off=1 page, should fail */
tmp1 = mmap(NULL, 4 * page_size, PROT_READ, MAP_SHARED | MAP_FIXED,
data_map_fd, page_size /* initial page shift */);
if (CHECK(tmp1 != MAP_FAILED, "adv_mmap7", "unexpected success")) {
munmap(tmp1, 4 * page_size);
goto cleanup;
}
tmp1 = mmap(NULL, map_sz, PROT_READ, MAP_SHARED, data_map_fd, 0);
if (CHECK(tmp1 == MAP_FAILED, "last_mmap", "failed %d\n", errno))
goto cleanup;
test_mmap__destroy(skel);
skel = NULL;
CHECK_FAIL(munmap(bss_mmaped, bss_sz));
bss_mmaped = NULL;
CHECK_FAIL(munmap(map_mmaped, map_sz));
map_mmaped = NULL;
/* map should be still held by active mmap */
tmp_fd = bpf_map_get_fd_by_id(data_map_id);
if (CHECK(tmp_fd < 0, "get_map_by_id", "failed %d\n", errno)) {
munmap(tmp1, map_sz);
goto cleanup;
}
close(tmp_fd);
/* this should release data map finally */
munmap(tmp1, map_sz);
/* we need to wait for RCU grace period */
for (i = 0; i < 10000; i++) {
__u32 id = data_map_id - 1;
if (bpf_map_get_next_id(id, &id) || id > data_map_id)
break;
usleep(1);
}
/* should fail to get map FD by non-existing ID */
tmp_fd = bpf_map_get_fd_by_id(data_map_id);
if (CHECK(tmp_fd >= 0, "get_map_by_id_after",
"unexpectedly succeeded %d\n", tmp_fd)) {
close(tmp_fd);
goto cleanup;
}
cleanup:
if (bss_mmaped)
CHECK_FAIL(munmap(bss_mmaped, bss_sz));
if (map_mmaped)
CHECK_FAIL(munmap(map_mmaped, map_sz));
test_mmap__destroy(skel);
}
| linux-master | tools/testing/selftests/bpf/prog_tests/mmap.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020 Jesper Dangaard Brouer */
#include <linux/if_link.h> /* before test_progs.h, avoid bpf_util.h redefines */
#include <test_progs.h>
#include "test_check_mtu.skel.h"
#include "network_helpers.h"
#include <stdlib.h>
#include <inttypes.h>
#define IFINDEX_LO 1
static __u32 duration; /* Hint: needed for CHECK macro */
static int read_mtu_device_lo(void)
{
const char *filename = "/sys/class/net/lo/mtu";
char buf[11] = {};
int value, n, fd;
fd = open(filename, 0, O_RDONLY);
if (fd == -1)
return -1;
n = read(fd, buf, sizeof(buf));
close(fd);
if (n == -1)
return -2;
value = strtoimax(buf, NULL, 10);
if (errno == ERANGE)
return -3;
return value;
}
static void test_check_mtu_xdp_attach(void)
{
struct bpf_link_info link_info;
__u32 link_info_len = sizeof(link_info);
struct test_check_mtu *skel;
struct bpf_program *prog;
struct bpf_link *link;
int err = 0;
int fd;
skel = test_check_mtu__open_and_load();
if (CHECK(!skel, "open and load skel", "failed"))
return; /* Exit if e.g. helper unknown to kernel */
prog = skel->progs.xdp_use_helper_basic;
link = bpf_program__attach_xdp(prog, IFINDEX_LO);
if (!ASSERT_OK_PTR(link, "link_attach"))
goto out;
skel->links.xdp_use_helper_basic = link;
memset(&link_info, 0, sizeof(link_info));
fd = bpf_link__fd(link);
err = bpf_link_get_info_by_fd(fd, &link_info, &link_info_len);
if (CHECK(err, "link_info", "failed: %d\n", err))
goto out;
CHECK(link_info.type != BPF_LINK_TYPE_XDP, "link_type",
"got %u != exp %u\n", link_info.type, BPF_LINK_TYPE_XDP);
CHECK(link_info.xdp.ifindex != IFINDEX_LO, "link_ifindex",
"got %u != exp %u\n", link_info.xdp.ifindex, IFINDEX_LO);
err = bpf_link__detach(link);
CHECK(err, "link_detach", "failed %d\n", err);
out:
test_check_mtu__destroy(skel);
}
static void test_check_mtu_run_xdp(struct test_check_mtu *skel,
struct bpf_program *prog,
__u32 mtu_expect)
{
int retval_expect = XDP_PASS;
__u32 mtu_result = 0;
char buf[256] = {};
int err, prog_fd = bpf_program__fd(prog);
LIBBPF_OPTS(bpf_test_run_opts, topts,
.repeat = 1,
.data_in = &pkt_v4,
.data_size_in = sizeof(pkt_v4),
.data_out = buf,
.data_size_out = sizeof(buf),
);
err = bpf_prog_test_run_opts(prog_fd, &topts);
ASSERT_OK(err, "test_run");
ASSERT_EQ(topts.retval, retval_expect, "retval");
/* Extract MTU that BPF-prog got */
mtu_result = skel->bss->global_bpf_mtu_xdp;
ASSERT_EQ(mtu_result, mtu_expect, "MTU-compare-user");
}
static void test_check_mtu_xdp(__u32 mtu, __u32 ifindex)
{
struct test_check_mtu *skel;
int err;
skel = test_check_mtu__open();
if (CHECK(!skel, "skel_open", "failed"))
return;
/* Update "constants" in BPF-prog *BEFORE* libbpf load */
skel->rodata->GLOBAL_USER_MTU = mtu;
skel->rodata->GLOBAL_USER_IFINDEX = ifindex;
err = test_check_mtu__load(skel);
if (CHECK(err, "skel_load", "failed: %d\n", err))
goto cleanup;
test_check_mtu_run_xdp(skel, skel->progs.xdp_use_helper, mtu);
test_check_mtu_run_xdp(skel, skel->progs.xdp_exceed_mtu, mtu);
test_check_mtu_run_xdp(skel, skel->progs.xdp_minus_delta, mtu);
test_check_mtu_run_xdp(skel, skel->progs.xdp_input_len, mtu);
test_check_mtu_run_xdp(skel, skel->progs.xdp_input_len_exceed, mtu);
cleanup:
test_check_mtu__destroy(skel);
}
static void test_check_mtu_run_tc(struct test_check_mtu *skel,
struct bpf_program *prog,
__u32 mtu_expect)
{
int retval_expect = BPF_OK;
__u32 mtu_result = 0;
char buf[256] = {};
int err, prog_fd = bpf_program__fd(prog);
LIBBPF_OPTS(bpf_test_run_opts, topts,
.data_in = &pkt_v4,
.data_size_in = sizeof(pkt_v4),
.data_out = buf,
.data_size_out = sizeof(buf),
.repeat = 1,
);
err = bpf_prog_test_run_opts(prog_fd, &topts);
ASSERT_OK(err, "test_run");
ASSERT_EQ(topts.retval, retval_expect, "retval");
/* Extract MTU that BPF-prog got */
mtu_result = skel->bss->global_bpf_mtu_tc;
ASSERT_EQ(mtu_result, mtu_expect, "MTU-compare-user");
}
static void test_check_mtu_tc(__u32 mtu, __u32 ifindex)
{
struct test_check_mtu *skel;
int err;
skel = test_check_mtu__open();
if (CHECK(!skel, "skel_open", "failed"))
return;
/* Update "constants" in BPF-prog *BEFORE* libbpf load */
skel->rodata->GLOBAL_USER_MTU = mtu;
skel->rodata->GLOBAL_USER_IFINDEX = ifindex;
err = test_check_mtu__load(skel);
if (CHECK(err, "skel_load", "failed: %d\n", err))
goto cleanup;
test_check_mtu_run_tc(skel, skel->progs.tc_use_helper, mtu);
test_check_mtu_run_tc(skel, skel->progs.tc_exceed_mtu, mtu);
test_check_mtu_run_tc(skel, skel->progs.tc_exceed_mtu_da, mtu);
test_check_mtu_run_tc(skel, skel->progs.tc_minus_delta, mtu);
test_check_mtu_run_tc(skel, skel->progs.tc_input_len, mtu);
test_check_mtu_run_tc(skel, skel->progs.tc_input_len_exceed, mtu);
cleanup:
test_check_mtu__destroy(skel);
}
void serial_test_check_mtu(void)
{
int mtu_lo;
if (test__start_subtest("bpf_check_mtu XDP-attach"))
test_check_mtu_xdp_attach();
mtu_lo = read_mtu_device_lo();
if (CHECK(mtu_lo < 0, "reading MTU value", "failed (err:%d)", mtu_lo))
return;
if (test__start_subtest("bpf_check_mtu XDP-run"))
test_check_mtu_xdp(mtu_lo, 0);
if (test__start_subtest("bpf_check_mtu XDP-run ifindex-lookup"))
test_check_mtu_xdp(mtu_lo, IFINDEX_LO);
if (test__start_subtest("bpf_check_mtu TC-run"))
test_check_mtu_tc(mtu_lo, 0);
if (test__start_subtest("bpf_check_mtu TC-run ifindex-lookup"))
test_check_mtu_tc(mtu_lo, IFINDEX_LO);
}
| linux-master | tools/testing/selftests/bpf/prog_tests/check_mtu.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2022 Bytedance */
#include <test_progs.h>
#include "test_map_lookup_percpu_elem.skel.h"
void test_map_lookup_percpu_elem(void)
{
struct test_map_lookup_percpu_elem *skel;
__u64 key = 0, sum;
int ret, i, nr_cpus = libbpf_num_possible_cpus();
__u64 *buf;
buf = malloc(nr_cpus*sizeof(__u64));
if (!ASSERT_OK_PTR(buf, "malloc"))
return;
for (i = 0; i < nr_cpus; i++)
buf[i] = i;
sum = (nr_cpus - 1) * nr_cpus / 2;
skel = test_map_lookup_percpu_elem__open();
if (!ASSERT_OK_PTR(skel, "test_map_lookup_percpu_elem__open"))
goto exit;
skel->rodata->my_pid = getpid();
skel->rodata->nr_cpus = nr_cpus;
ret = test_map_lookup_percpu_elem__load(skel);
if (!ASSERT_OK(ret, "test_map_lookup_percpu_elem__load"))
goto cleanup;
ret = test_map_lookup_percpu_elem__attach(skel);
if (!ASSERT_OK(ret, "test_map_lookup_percpu_elem__attach"))
goto cleanup;
ret = bpf_map_update_elem(bpf_map__fd(skel->maps.percpu_array_map), &key, buf, 0);
ASSERT_OK(ret, "percpu_array_map update");
ret = bpf_map_update_elem(bpf_map__fd(skel->maps.percpu_hash_map), &key, buf, 0);
ASSERT_OK(ret, "percpu_hash_map update");
ret = bpf_map_update_elem(bpf_map__fd(skel->maps.percpu_lru_hash_map), &key, buf, 0);
ASSERT_OK(ret, "percpu_lru_hash_map update");
syscall(__NR_getuid);
test_map_lookup_percpu_elem__detach(skel);
ASSERT_EQ(skel->bss->percpu_array_elem_sum, sum, "percpu_array lookup percpu elem");
ASSERT_EQ(skel->bss->percpu_hash_elem_sum, sum, "percpu_hash lookup percpu elem");
ASSERT_EQ(skel->bss->percpu_lru_hash_elem_sum, sum, "percpu_lru_hash lookup percpu elem");
cleanup:
test_map_lookup_percpu_elem__destroy(skel);
exit:
free(buf);
}
| linux-master | tools/testing/selftests/bpf/prog_tests/map_lookup_percpu_elem.c |
// SPDX-License-Identifier: GPL-2.0
#include <test_progs.h>
#include "test_hash_large_key.skel.h"
void test_hash_large_key(void)
{
int err, value = 21, duration = 0, hash_map_fd;
struct test_hash_large_key *skel;
struct bigelement {
int a;
char b[4096];
long long c;
} key;
bzero(&key, sizeof(key));
skel = test_hash_large_key__open_and_load();
if (CHECK(!skel, "skel_open_and_load", "skeleton open/load failed\n"))
return;
hash_map_fd = bpf_map__fd(skel->maps.hash_map);
if (CHECK(hash_map_fd < 0, "bpf_map__fd", "failed\n"))
goto cleanup;
err = test_hash_large_key__attach(skel);
if (CHECK(err, "attach_raw_tp", "err %d\n", err))
goto cleanup;
err = bpf_map_update_elem(hash_map_fd, &key, &value, BPF_ANY);
if (CHECK(err, "bpf_map_update_elem", "errno=%d\n", errno))
goto cleanup;
key.c = 1;
err = bpf_map_lookup_elem(hash_map_fd, &key, &value);
if (CHECK(err, "bpf_map_lookup_elem", "errno=%d\n", errno))
goto cleanup;
CHECK_FAIL(value != 42);
cleanup:
test_hash_large_key__destroy(skel);
}
| linux-master | tools/testing/selftests/bpf/prog_tests/hash_large_key.c |
// SPDX-License-Identifier: GPL-2.0
#include <test_progs.h>
#include <network_helpers.h>
void test_pkt_access(void)
{
const char *file = "./test_pkt_access.bpf.o";
struct bpf_object *obj;
int err, prog_fd;
LIBBPF_OPTS(bpf_test_run_opts, topts,
.data_in = &pkt_v4,
.data_size_in = sizeof(pkt_v4),
.repeat = 100000,
);
err = bpf_prog_test_load(file, BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd);
if (CHECK_FAIL(err))
return;
err = bpf_prog_test_run_opts(prog_fd, &topts);
ASSERT_OK(err, "ipv4 test_run_opts err");
ASSERT_OK(topts.retval, "ipv4 test_run_opts retval");
topts.data_in = &pkt_v6;
topts.data_size_in = sizeof(pkt_v6);
topts.data_size_out = 0; /* reset from last call */
err = bpf_prog_test_run_opts(prog_fd, &topts);
ASSERT_OK(err, "ipv6 test_run_opts err");
ASSERT_OK(topts.retval, "ipv6 test_run_opts retval");
bpf_object__close(obj);
}
| linux-master | tools/testing/selftests/bpf/prog_tests/pkt_access.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
#include <errno.h>
#include <sys/syscall.h>
#include <unistd.h>
#include "test_global_map_resize.skel.h"
#include "test_progs.h"
static void run_prog_bss_array_sum(void)
{
(void)syscall(__NR_getpid);
}
static void run_prog_data_array_sum(void)
{
(void)syscall(__NR_getuid);
}
static void global_map_resize_bss_subtest(void)
{
int err;
struct test_global_map_resize *skel;
struct bpf_map *map;
const __u32 desired_sz = sizeof(skel->bss->sum) + sysconf(_SC_PAGE_SIZE) * 2;
size_t array_len, actual_sz, new_sz;
skel = test_global_map_resize__open();
if (!ASSERT_OK_PTR(skel, "test_global_map_resize__open"))
goto teardown;
/* set some initial value before resizing.
* it is expected this non-zero value will be preserved
* while resizing.
*/
skel->bss->array[0] = 1;
/* resize map value and verify the new size */
map = skel->maps.bss;
err = bpf_map__set_value_size(map, desired_sz);
if (!ASSERT_OK(err, "bpf_map__set_value_size"))
goto teardown;
if (!ASSERT_EQ(bpf_map__value_size(map), desired_sz, "resize"))
goto teardown;
new_sz = sizeof(skel->data_percpu_arr->percpu_arr[0]) * libbpf_num_possible_cpus();
err = bpf_map__set_value_size(skel->maps.data_percpu_arr, new_sz);
ASSERT_OK(err, "percpu_arr_resize");
/* set the expected number of elements based on the resized array */
array_len = (desired_sz - sizeof(skel->bss->sum)) / sizeof(skel->bss->array[0]);
if (!ASSERT_GT(array_len, 1, "array_len"))
goto teardown;
skel->bss = bpf_map__initial_value(skel->maps.bss, &actual_sz);
if (!ASSERT_OK_PTR(skel->bss, "bpf_map__initial_value (ptr)"))
goto teardown;
if (!ASSERT_EQ(actual_sz, desired_sz, "bpf_map__initial_value (size)"))
goto teardown;
/* fill the newly resized array with ones,
* skipping the first element which was previously set
*/
for (int i = 1; i < array_len; i++)
skel->bss->array[i] = 1;
/* set global const values before loading */
skel->rodata->pid = getpid();
skel->rodata->bss_array_len = array_len;
skel->rodata->data_array_len = 1;
err = test_global_map_resize__load(skel);
if (!ASSERT_OK(err, "test_global_map_resize__load"))
goto teardown;
err = test_global_map_resize__attach(skel);
if (!ASSERT_OK(err, "test_global_map_resize__attach"))
goto teardown;
/* run the bpf program which will sum the contents of the array.
* since the array was filled with ones,verify the sum equals array_len
*/
run_prog_bss_array_sum();
if (!ASSERT_EQ(skel->bss->sum, array_len, "sum"))
goto teardown;
teardown:
test_global_map_resize__destroy(skel);
}
static void global_map_resize_data_subtest(void)
{
struct test_global_map_resize *skel;
struct bpf_map *map;
const __u32 desired_sz = sysconf(_SC_PAGE_SIZE) * 2;
size_t array_len, actual_sz, new_sz;
int err;
skel = test_global_map_resize__open();
if (!ASSERT_OK_PTR(skel, "test_global_map_resize__open"))
goto teardown;
/* set some initial value before resizing.
* it is expected this non-zero value will be preserved
* while resizing.
*/
skel->data_custom->my_array[0] = 1;
/* resize map value and verify the new size */
map = skel->maps.data_custom;
err = bpf_map__set_value_size(map, desired_sz);
if (!ASSERT_OK(err, "bpf_map__set_value_size"))
goto teardown;
if (!ASSERT_EQ(bpf_map__value_size(map), desired_sz, "resize"))
goto teardown;
new_sz = sizeof(skel->data_percpu_arr->percpu_arr[0]) * libbpf_num_possible_cpus();
err = bpf_map__set_value_size(skel->maps.data_percpu_arr, new_sz);
ASSERT_OK(err, "percpu_arr_resize");
/* set the expected number of elements based on the resized array */
array_len = (desired_sz - sizeof(skel->bss->sum)) / sizeof(skel->data_custom->my_array[0]);
if (!ASSERT_GT(array_len, 1, "array_len"))
goto teardown;
skel->data_custom = bpf_map__initial_value(skel->maps.data_custom, &actual_sz);
if (!ASSERT_OK_PTR(skel->data_custom, "bpf_map__initial_value (ptr)"))
goto teardown;
if (!ASSERT_EQ(actual_sz, desired_sz, "bpf_map__initial_value (size)"))
goto teardown;
/* fill the newly resized array with ones,
* skipping the first element which was previously set
*/
for (int i = 1; i < array_len; i++)
skel->data_custom->my_array[i] = 1;
/* set global const values before loading */
skel->rodata->pid = getpid();
skel->rodata->bss_array_len = 1;
skel->rodata->data_array_len = array_len;
err = test_global_map_resize__load(skel);
if (!ASSERT_OK(err, "test_global_map_resize__load"))
goto teardown;
err = test_global_map_resize__attach(skel);
if (!ASSERT_OK(err, "test_global_map_resize__attach"))
goto teardown;
/* run the bpf program which will sum the contents of the array.
* since the array was filled with ones,verify the sum equals array_len
*/
run_prog_data_array_sum();
if (!ASSERT_EQ(skel->bss->sum, array_len, "sum"))
goto teardown;
teardown:
test_global_map_resize__destroy(skel);
}
static void global_map_resize_invalid_subtest(void)
{
int err;
struct test_global_map_resize *skel;
struct bpf_map *map;
__u32 element_sz, desired_sz;
skel = test_global_map_resize__open();
if (!ASSERT_OK_PTR(skel, "test_global_map_resize__open"))
return;
/* attempt to resize a global datasec map to size
* which does NOT align with array
*/
map = skel->maps.data_custom;
if (!ASSERT_NEQ(bpf_map__btf_value_type_id(map), 0, ".data.custom initial btf"))
goto teardown;
/* set desired size a fraction of element size beyond an aligned size */
element_sz = sizeof(skel->data_custom->my_array[0]);
desired_sz = element_sz + element_sz / 2;
/* confirm desired size does NOT align with array */
if (!ASSERT_NEQ(desired_sz % element_sz, 0, "my_array alignment"))
goto teardown;
err = bpf_map__set_value_size(map, desired_sz);
/* confirm resize is OK but BTF info is cleared */
if (!ASSERT_OK(err, ".data.custom bpf_map__set_value_size") ||
!ASSERT_EQ(bpf_map__btf_key_type_id(map), 0, ".data.custom clear btf key") ||
!ASSERT_EQ(bpf_map__btf_value_type_id(map), 0, ".data.custom clear btf val"))
goto teardown;
/* attempt to resize a global datasec map whose only var is NOT an array */
map = skel->maps.data_non_array;
if (!ASSERT_NEQ(bpf_map__btf_value_type_id(map), 0, ".data.non_array initial btf"))
goto teardown;
/* set desired size to arbitrary value */
desired_sz = 1024;
err = bpf_map__set_value_size(map, desired_sz);
/* confirm resize is OK but BTF info is cleared */
if (!ASSERT_OK(err, ".data.non_array bpf_map__set_value_size") ||
!ASSERT_EQ(bpf_map__btf_key_type_id(map), 0, ".data.non_array clear btf key") ||
!ASSERT_EQ(bpf_map__btf_value_type_id(map), 0, ".data.non_array clear btf val"))
goto teardown;
/* attempt to resize a global datasec map
* whose last var is NOT an array
*/
map = skel->maps.data_array_not_last;
if (!ASSERT_NEQ(bpf_map__btf_value_type_id(map), 0, ".data.array_not_last initial btf"))
goto teardown;
/* set desired size to a multiple of element size */
element_sz = sizeof(skel->data_array_not_last->my_array_first[0]);
desired_sz = element_sz * 8;
/* confirm desired size aligns with array */
if (!ASSERT_EQ(desired_sz % element_sz, 0, "my_array_first alignment"))
goto teardown;
err = bpf_map__set_value_size(map, desired_sz);
/* confirm resize is OK but BTF info is cleared */
if (!ASSERT_OK(err, ".data.array_not_last bpf_map__set_value_size") ||
!ASSERT_EQ(bpf_map__btf_key_type_id(map), 0, ".data.array_not_last clear btf key") ||
!ASSERT_EQ(bpf_map__btf_value_type_id(map), 0, ".data.array_not_last clear btf val"))
goto teardown;
teardown:
test_global_map_resize__destroy(skel);
}
void test_global_map_resize(void)
{
if (test__start_subtest("global_map_resize_bss"))
global_map_resize_bss_subtest();
if (test__start_subtest("global_map_resize_data"))
global_map_resize_data_subtest();
if (test__start_subtest("global_map_resize_invalid"))
global_map_resize_invalid_subtest();
}
| linux-master | tools/testing/selftests/bpf/prog_tests/global_map_resize.c |
// SPDX-License-Identifier: GPL-2.0-only
#include <test_progs.h>
#include "inner_array_lookup.skel.h"
void test_inner_array_lookup(void)
{
int map1_fd, err;
int key = 3;
int val = 1;
struct inner_array_lookup *skel;
skel = inner_array_lookup__open_and_load();
if (!ASSERT_OK_PTR(skel, "open_load_skeleton"))
return;
err = inner_array_lookup__attach(skel);
if (!ASSERT_OK(err, "skeleton_attach"))
goto cleanup;
map1_fd = bpf_map__fd(skel->maps.inner_map1);
bpf_map_update_elem(map1_fd, &key, &val, 0);
/* Probe should have set the element at index 3 to 2 */
bpf_map_lookup_elem(map1_fd, &key, &val);
ASSERT_EQ(val, 2, "value_is_2");
cleanup:
inner_array_lookup__destroy(skel);
}
| linux-master | tools/testing/selftests/bpf/prog_tests/inner_array_lookup.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2021 Facebook */
#include <test_progs.h>
#include "get_branch_snapshot.skel.h"
static int *pfd_array;
static int cpu_cnt;
static bool is_hypervisor(void)
{
char *line = NULL;
bool ret = false;
size_t len;
FILE *fp;
fp = fopen("/proc/cpuinfo", "r");
if (!fp)
return false;
while (getline(&line, &len, fp) != -1) {
if (!strncmp(line, "flags", 5)) {
if (strstr(line, "hypervisor") != NULL)
ret = true;
break;
}
}
free(line);
fclose(fp);
return ret;
}
static int create_perf_events(void)
{
struct perf_event_attr attr = {0};
int cpu;
/* create perf event */
attr.size = sizeof(attr);
attr.type = PERF_TYPE_HARDWARE;
attr.config = PERF_COUNT_HW_CPU_CYCLES;
attr.sample_type = PERF_SAMPLE_BRANCH_STACK;
attr.branch_sample_type = PERF_SAMPLE_BRANCH_KERNEL |
PERF_SAMPLE_BRANCH_USER | PERF_SAMPLE_BRANCH_ANY;
cpu_cnt = libbpf_num_possible_cpus();
pfd_array = malloc(sizeof(int) * cpu_cnt);
if (!pfd_array) {
cpu_cnt = 0;
return 1;
}
for (cpu = 0; cpu < cpu_cnt; cpu++) {
pfd_array[cpu] = syscall(__NR_perf_event_open, &attr,
-1, cpu, -1, PERF_FLAG_FD_CLOEXEC);
if (pfd_array[cpu] < 0)
break;
}
return cpu == 0;
}
static void close_perf_events(void)
{
int cpu, fd;
for (cpu = 0; cpu < cpu_cnt; cpu++) {
fd = pfd_array[cpu];
if (fd < 0)
break;
close(fd);
}
free(pfd_array);
}
void serial_test_get_branch_snapshot(void)
{
struct get_branch_snapshot *skel = NULL;
int err;
/* Skip the test before we fix LBR snapshot for hypervisor. */
if (is_hypervisor()) {
test__skip();
return;
}
if (create_perf_events()) {
test__skip(); /* system doesn't support LBR */
goto cleanup;
}
skel = get_branch_snapshot__open_and_load();
if (!ASSERT_OK_PTR(skel, "get_branch_snapshot__open_and_load"))
goto cleanup;
err = kallsyms_find("bpf_testmod_loop_test", &skel->bss->address_low);
if (!ASSERT_OK(err, "kallsyms_find"))
goto cleanup;
/* Just a guess for the end of this function, as module functions
* in /proc/kallsyms could come in any order.
*/
skel->bss->address_high = skel->bss->address_low + 128;
err = get_branch_snapshot__attach(skel);
if (!ASSERT_OK(err, "get_branch_snapshot__attach"))
goto cleanup;
trigger_module_test_read(100);
if (skel->bss->total_entries < 16) {
/* too few entries for the hit/waste test */
test__skip();
goto cleanup;
}
ASSERT_GT(skel->bss->test1_hits, 6, "find_looptest_in_lbr");
/* Given we stop LBR in software, we will waste a few entries.
* But we should try to waste as few as possible entries. We are at
* about 7 on x86_64 systems.
* Add a check for < 10 so that we get heads-up when something
* changes and wastes too many entries.
*/
ASSERT_LT(skel->bss->wasted_entries, 10, "check_wasted_entries");
cleanup:
get_branch_snapshot__destroy(skel);
close_perf_events();
}
| linux-master | tools/testing/selftests/bpf/prog_tests/get_branch_snapshot.c |
// SPDX-License-Identifier: GPL-2.0
#include <test_progs.h>
/* TODO: corrupts other tests uses connect() */
void serial_test_probe_user(void)
{
static const char *const prog_names[] = {
"handle_sys_connect",
#if defined(__s390x__)
"handle_sys_socketcall",
#endif
};
enum { prog_count = ARRAY_SIZE(prog_names) };
const char *obj_file = "./test_probe_user.bpf.o";
DECLARE_LIBBPF_OPTS(bpf_object_open_opts, opts, );
int err, results_map_fd, sock_fd, duration = 0;
struct sockaddr curr, orig, tmp;
struct sockaddr_in *in = (struct sockaddr_in *)&curr;
struct bpf_link *kprobe_links[prog_count] = {};
struct bpf_program *kprobe_progs[prog_count];
struct bpf_object *obj;
static const int zero = 0;
size_t i;
obj = bpf_object__open_file(obj_file, &opts);
if (!ASSERT_OK_PTR(obj, "obj_open_file"))
return;
for (i = 0; i < prog_count; i++) {
kprobe_progs[i] =
bpf_object__find_program_by_name(obj, prog_names[i]);
if (CHECK(!kprobe_progs[i], "find_probe",
"prog '%s' not found\n", prog_names[i]))
goto cleanup;
}
err = bpf_object__load(obj);
if (CHECK(err, "obj_load", "err %d\n", err))
goto cleanup;
results_map_fd = bpf_find_map(__func__, obj, "test_pro.bss");
if (CHECK(results_map_fd < 0, "find_bss_map",
"err %d\n", results_map_fd))
goto cleanup;
for (i = 0; i < prog_count; i++) {
kprobe_links[i] = bpf_program__attach(kprobe_progs[i]);
if (!ASSERT_OK_PTR(kprobe_links[i], "attach_kprobe"))
goto cleanup;
}
memset(&curr, 0, sizeof(curr));
in->sin_family = AF_INET;
in->sin_port = htons(5555);
in->sin_addr.s_addr = inet_addr("255.255.255.255");
memcpy(&orig, &curr, sizeof(curr));
sock_fd = socket(AF_INET, SOCK_STREAM, 0);
if (CHECK(sock_fd < 0, "create_sock_fd", "err %d\n", sock_fd))
goto cleanup;
connect(sock_fd, &curr, sizeof(curr));
close(sock_fd);
err = bpf_map_lookup_elem(results_map_fd, &zero, &tmp);
if (CHECK(err, "get_kprobe_res",
"failed to get kprobe res: %d\n", err))
goto cleanup;
in = (struct sockaddr_in *)&tmp;
if (CHECK(memcmp(&tmp, &orig, sizeof(orig)), "check_kprobe_res",
"wrong kprobe res from probe read: %s:%u\n",
inet_ntoa(in->sin_addr), ntohs(in->sin_port)))
goto cleanup;
memset(&tmp, 0xab, sizeof(tmp));
in = (struct sockaddr_in *)&curr;
if (CHECK(memcmp(&curr, &tmp, sizeof(tmp)), "check_kprobe_res",
"wrong kprobe res from probe write: %s:%u\n",
inet_ntoa(in->sin_addr), ntohs(in->sin_port)))
goto cleanup;
cleanup:
for (i = 0; i < prog_count; i++)
bpf_link__destroy(kprobe_links[i]);
bpf_object__close(obj);
}
| linux-master | tools/testing/selftests/bpf/prog_tests/probe_user.c |
// SPDX-License-Identifier: GPL-2.0
#include <test_progs.h>
#include <linux/pkt_cls.h>
#include "cap_helpers.h"
#include "test_tc_bpf.skel.h"
#define LO_IFINDEX 1
#define TEST_DECLARE_OPTS(__fd) \
DECLARE_LIBBPF_OPTS(bpf_tc_opts, opts_h, .handle = 1); \
DECLARE_LIBBPF_OPTS(bpf_tc_opts, opts_p, .priority = 1); \
DECLARE_LIBBPF_OPTS(bpf_tc_opts, opts_f, .prog_fd = __fd); \
DECLARE_LIBBPF_OPTS(bpf_tc_opts, opts_hp, .handle = 1, .priority = 1); \
DECLARE_LIBBPF_OPTS(bpf_tc_opts, opts_hf, .handle = 1, .prog_fd = __fd); \
DECLARE_LIBBPF_OPTS(bpf_tc_opts, opts_pf, .priority = 1, .prog_fd = __fd); \
DECLARE_LIBBPF_OPTS(bpf_tc_opts, opts_hpf, .handle = 1, .priority = 1, .prog_fd = __fd); \
DECLARE_LIBBPF_OPTS(bpf_tc_opts, opts_hpi, .handle = 1, .priority = 1, .prog_id = 42); \
DECLARE_LIBBPF_OPTS(bpf_tc_opts, opts_hpr, .handle = 1, .priority = 1, \
.flags = BPF_TC_F_REPLACE); \
DECLARE_LIBBPF_OPTS(bpf_tc_opts, opts_hpfi, .handle = 1, .priority = 1, .prog_fd = __fd, \
.prog_id = 42); \
DECLARE_LIBBPF_OPTS(bpf_tc_opts, opts_prio_max, .handle = 1, .priority = UINT16_MAX + 1);
static int test_tc_bpf_basic(const struct bpf_tc_hook *hook, int fd)
{
DECLARE_LIBBPF_OPTS(bpf_tc_opts, opts, .handle = 1, .priority = 1, .prog_fd = fd);
struct bpf_prog_info info = {};
__u32 info_len = sizeof(info);
int ret;
ret = bpf_prog_get_info_by_fd(fd, &info, &info_len);
if (!ASSERT_OK(ret, "bpf_prog_get_info_by_fd"))
return ret;
ret = bpf_tc_attach(hook, &opts);
if (!ASSERT_OK(ret, "bpf_tc_attach"))
return ret;
if (!ASSERT_EQ(opts.handle, 1, "handle set") ||
!ASSERT_EQ(opts.priority, 1, "priority set") ||
!ASSERT_EQ(opts.prog_id, info.id, "prog_id set"))
goto end;
opts.prog_id = 0;
opts.flags = BPF_TC_F_REPLACE;
ret = bpf_tc_attach(hook, &opts);
if (!ASSERT_OK(ret, "bpf_tc_attach replace mode"))
goto end;
opts.flags = opts.prog_fd = opts.prog_id = 0;
ret = bpf_tc_query(hook, &opts);
if (!ASSERT_OK(ret, "bpf_tc_query"))
goto end;
if (!ASSERT_EQ(opts.handle, 1, "handle set") ||
!ASSERT_EQ(opts.priority, 1, "priority set") ||
!ASSERT_EQ(opts.prog_id, info.id, "prog_id set"))
goto end;
end:
opts.flags = opts.prog_fd = opts.prog_id = 0;
ret = bpf_tc_detach(hook, &opts);
ASSERT_OK(ret, "bpf_tc_detach");
return ret;
}
static int test_tc_bpf_api(struct bpf_tc_hook *hook, int fd)
{
DECLARE_LIBBPF_OPTS(bpf_tc_opts, attach_opts, .handle = 1, .priority = 1, .prog_fd = fd);
DECLARE_LIBBPF_OPTS(bpf_tc_hook, inv_hook, .attach_point = BPF_TC_INGRESS);
DECLARE_LIBBPF_OPTS(bpf_tc_opts, opts, .handle = 1, .priority = 1);
int ret;
ret = bpf_tc_hook_create(NULL);
if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_hook_create invalid hook = NULL"))
return -EINVAL;
/* hook ifindex = 0 */
ret = bpf_tc_hook_create(&inv_hook);
if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_hook_create invalid hook ifindex == 0"))
return -EINVAL;
ret = bpf_tc_hook_destroy(&inv_hook);
if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_hook_destroy invalid hook ifindex == 0"))
return -EINVAL;
ret = bpf_tc_attach(&inv_hook, &attach_opts);
if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_attach invalid hook ifindex == 0"))
return -EINVAL;
attach_opts.prog_id = 0;
ret = bpf_tc_detach(&inv_hook, &opts);
if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_detach invalid hook ifindex == 0"))
return -EINVAL;
ret = bpf_tc_query(&inv_hook, &opts);
if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_query invalid hook ifindex == 0"))
return -EINVAL;
/* hook ifindex < 0 */
inv_hook.ifindex = -1;
ret = bpf_tc_hook_create(&inv_hook);
if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_hook_create invalid hook ifindex < 0"))
return -EINVAL;
ret = bpf_tc_hook_destroy(&inv_hook);
if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_hook_destroy invalid hook ifindex < 0"))
return -EINVAL;
ret = bpf_tc_attach(&inv_hook, &attach_opts);
if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_attach invalid hook ifindex < 0"))
return -EINVAL;
attach_opts.prog_id = 0;
ret = bpf_tc_detach(&inv_hook, &opts);
if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_detach invalid hook ifindex < 0"))
return -EINVAL;
ret = bpf_tc_query(&inv_hook, &opts);
if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_query invalid hook ifindex < 0"))
return -EINVAL;
inv_hook.ifindex = LO_IFINDEX;
/* hook.attach_point invalid */
inv_hook.attach_point = 0xabcd;
ret = bpf_tc_hook_create(&inv_hook);
if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_hook_create invalid hook.attach_point"))
return -EINVAL;
ret = bpf_tc_hook_destroy(&inv_hook);
if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_hook_destroy invalid hook.attach_point"))
return -EINVAL;
ret = bpf_tc_attach(&inv_hook, &attach_opts);
if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_attach invalid hook.attach_point"))
return -EINVAL;
ret = bpf_tc_detach(&inv_hook, &opts);
if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_detach invalid hook.attach_point"))
return -EINVAL;
ret = bpf_tc_query(&inv_hook, &opts);
if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_query invalid hook.attach_point"))
return -EINVAL;
inv_hook.attach_point = BPF_TC_INGRESS;
/* hook.attach_point valid, but parent invalid */
inv_hook.parent = TC_H_MAKE(1UL << 16, 10);
ret = bpf_tc_hook_create(&inv_hook);
if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_hook_create invalid hook parent"))
return -EINVAL;
ret = bpf_tc_hook_destroy(&inv_hook);
if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_hook_destroy invalid hook parent"))
return -EINVAL;
ret = bpf_tc_attach(&inv_hook, &attach_opts);
if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_attach invalid hook parent"))
return -EINVAL;
ret = bpf_tc_detach(&inv_hook, &opts);
if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_detach invalid hook parent"))
return -EINVAL;
ret = bpf_tc_query(&inv_hook, &opts);
if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_query invalid hook parent"))
return -EINVAL;
inv_hook.attach_point = BPF_TC_CUSTOM;
inv_hook.parent = 0;
/* These return EOPNOTSUPP instead of EINVAL as parent is checked after
* attach_point of the hook.
*/
ret = bpf_tc_hook_create(&inv_hook);
if (!ASSERT_EQ(ret, -EOPNOTSUPP, "bpf_tc_hook_create invalid hook parent"))
return -EINVAL;
ret = bpf_tc_hook_destroy(&inv_hook);
if (!ASSERT_EQ(ret, -EOPNOTSUPP, "bpf_tc_hook_destroy invalid hook parent"))
return -EINVAL;
ret = bpf_tc_attach(&inv_hook, &attach_opts);
if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_attach invalid hook parent"))
return -EINVAL;
ret = bpf_tc_detach(&inv_hook, &opts);
if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_detach invalid hook parent"))
return -EINVAL;
ret = bpf_tc_query(&inv_hook, &opts);
if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_query invalid hook parent"))
return -EINVAL;
inv_hook.attach_point = BPF_TC_INGRESS;
/* detach */
{
TEST_DECLARE_OPTS(fd);
ret = bpf_tc_detach(NULL, &opts_hp);
if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_detach invalid hook = NULL"))
return -EINVAL;
ret = bpf_tc_detach(hook, NULL);
if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_detach invalid opts = NULL"))
return -EINVAL;
ret = bpf_tc_detach(hook, &opts_hpr);
if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_detach invalid flags set"))
return -EINVAL;
ret = bpf_tc_detach(hook, &opts_hpf);
if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_detach invalid prog_fd set"))
return -EINVAL;
ret = bpf_tc_detach(hook, &opts_hpi);
if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_detach invalid prog_id set"))
return -EINVAL;
ret = bpf_tc_detach(hook, &opts_p);
if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_detach invalid handle unset"))
return -EINVAL;
ret = bpf_tc_detach(hook, &opts_h);
if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_detach invalid priority unset"))
return -EINVAL;
ret = bpf_tc_detach(hook, &opts_prio_max);
if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_detach invalid priority > UINT16_MAX"))
return -EINVAL;
}
/* query */
{
TEST_DECLARE_OPTS(fd);
ret = bpf_tc_query(NULL, &opts);
if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_query invalid hook = NULL"))
return -EINVAL;
ret = bpf_tc_query(hook, NULL);
if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_query invalid opts = NULL"))
return -EINVAL;
ret = bpf_tc_query(hook, &opts_hpr);
if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_query invalid flags set"))
return -EINVAL;
ret = bpf_tc_query(hook, &opts_hpf);
if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_query invalid prog_fd set"))
return -EINVAL;
ret = bpf_tc_query(hook, &opts_hpi);
if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_query invalid prog_id set"))
return -EINVAL;
ret = bpf_tc_query(hook, &opts_p);
if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_query invalid handle unset"))
return -EINVAL;
ret = bpf_tc_query(hook, &opts_h);
if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_query invalid priority unset"))
return -EINVAL;
ret = bpf_tc_query(hook, &opts_prio_max);
if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_query invalid priority > UINT16_MAX"))
return -EINVAL;
/* when chain is not present, kernel returns -EINVAL */
ret = bpf_tc_query(hook, &opts_hp);
if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_query valid handle, priority set"))
return -EINVAL;
}
/* attach */
{
TEST_DECLARE_OPTS(fd);
ret = bpf_tc_attach(NULL, &opts_hp);
if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_attach invalid hook = NULL"))
return -EINVAL;
ret = bpf_tc_attach(hook, NULL);
if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_attach invalid opts = NULL"))
return -EINVAL;
opts_hp.flags = 42;
ret = bpf_tc_attach(hook, &opts_hp);
if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_attach invalid flags"))
return -EINVAL;
ret = bpf_tc_attach(hook, NULL);
if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_attach invalid prog_fd unset"))
return -EINVAL;
ret = bpf_tc_attach(hook, &opts_hpi);
if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_attach invalid prog_id set"))
return -EINVAL;
ret = bpf_tc_attach(hook, &opts_pf);
if (!ASSERT_OK(ret, "bpf_tc_attach valid handle unset"))
return -EINVAL;
opts_pf.prog_fd = opts_pf.prog_id = 0;
ASSERT_OK(bpf_tc_detach(hook, &opts_pf), "bpf_tc_detach");
ret = bpf_tc_attach(hook, &opts_hf);
if (!ASSERT_OK(ret, "bpf_tc_attach valid priority unset"))
return -EINVAL;
opts_hf.prog_fd = opts_hf.prog_id = 0;
ASSERT_OK(bpf_tc_detach(hook, &opts_hf), "bpf_tc_detach");
ret = bpf_tc_attach(hook, &opts_prio_max);
if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_attach invalid priority > UINT16_MAX"))
return -EINVAL;
ret = bpf_tc_attach(hook, &opts_f);
if (!ASSERT_OK(ret, "bpf_tc_attach valid both handle and priority unset"))
return -EINVAL;
opts_f.prog_fd = opts_f.prog_id = 0;
ASSERT_OK(bpf_tc_detach(hook, &opts_f), "bpf_tc_detach");
}
return 0;
}
void tc_bpf_root(void)
{
DECLARE_LIBBPF_OPTS(bpf_tc_hook, hook, .ifindex = LO_IFINDEX,
.attach_point = BPF_TC_INGRESS);
struct test_tc_bpf *skel = NULL;
bool hook_created = false;
int cls_fd, ret;
skel = test_tc_bpf__open_and_load();
if (!ASSERT_OK_PTR(skel, "test_tc_bpf__open_and_load"))
return;
cls_fd = bpf_program__fd(skel->progs.cls);
ret = bpf_tc_hook_create(&hook);
if (ret == 0)
hook_created = true;
ret = ret == -EEXIST ? 0 : ret;
if (!ASSERT_OK(ret, "bpf_tc_hook_create(BPF_TC_INGRESS)"))
goto end;
hook.attach_point = BPF_TC_CUSTOM;
hook.parent = TC_H_MAKE(TC_H_CLSACT, TC_H_MIN_INGRESS);
ret = bpf_tc_hook_create(&hook);
if (!ASSERT_EQ(ret, -EOPNOTSUPP, "bpf_tc_hook_create invalid hook.attach_point"))
goto end;
ret = test_tc_bpf_basic(&hook, cls_fd);
if (!ASSERT_OK(ret, "test_tc_internal ingress"))
goto end;
ret = bpf_tc_hook_destroy(&hook);
if (!ASSERT_EQ(ret, -EOPNOTSUPP, "bpf_tc_hook_destroy invalid hook.attach_point"))
goto end;
hook.attach_point = BPF_TC_INGRESS;
hook.parent = 0;
bpf_tc_hook_destroy(&hook);
ret = test_tc_bpf_basic(&hook, cls_fd);
if (!ASSERT_OK(ret, "test_tc_internal ingress"))
goto end;
bpf_tc_hook_destroy(&hook);
hook.attach_point = BPF_TC_EGRESS;
ret = test_tc_bpf_basic(&hook, cls_fd);
if (!ASSERT_OK(ret, "test_tc_internal egress"))
goto end;
bpf_tc_hook_destroy(&hook);
ret = test_tc_bpf_api(&hook, cls_fd);
if (!ASSERT_OK(ret, "test_tc_bpf_api"))
goto end;
bpf_tc_hook_destroy(&hook);
end:
if (hook_created) {
hook.attach_point = BPF_TC_INGRESS | BPF_TC_EGRESS;
bpf_tc_hook_destroy(&hook);
}
test_tc_bpf__destroy(skel);
}
void tc_bpf_non_root(void)
{
struct test_tc_bpf *skel = NULL;
__u64 caps = 0;
int ret;
/* In case CAP_BPF and CAP_PERFMON is not set */
ret = cap_enable_effective(1ULL << CAP_BPF | 1ULL << CAP_NET_ADMIN, &caps);
if (!ASSERT_OK(ret, "set_cap_bpf_cap_net_admin"))
return;
ret = cap_disable_effective(1ULL << CAP_SYS_ADMIN | 1ULL << CAP_PERFMON, NULL);
if (!ASSERT_OK(ret, "disable_cap_sys_admin"))
goto restore_cap;
skel = test_tc_bpf__open_and_load();
if (!ASSERT_OK_PTR(skel, "test_tc_bpf__open_and_load"))
goto restore_cap;
test_tc_bpf__destroy(skel);
restore_cap:
if (caps)
cap_enable_effective(caps, NULL);
}
void test_tc_bpf(void)
{
if (test__start_subtest("tc_bpf_root"))
tc_bpf_root();
if (test__start_subtest("tc_bpf_non_root"))
tc_bpf_non_root();
}
| linux-master | tools/testing/selftests/bpf/prog_tests/tc_bpf.c |
// SPDX-License-Identifier: GPL-2.0
#include <test_progs.h>
#include "cgroup_helpers.h"
#define PING_CMD "ping -q -c1 -w1 127.0.0.1 > /dev/null"
static char bpf_log_buf[BPF_LOG_BUF_SIZE];
static int map_fd = -1;
static int prog_load_cnt(int verdict, int val)
{
int cgroup_storage_fd, percpu_cgroup_storage_fd;
if (map_fd < 0)
map_fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, NULL, 4, 8, 1, NULL);
if (map_fd < 0) {
printf("failed to create map '%s'\n", strerror(errno));
return -1;
}
cgroup_storage_fd = bpf_map_create(BPF_MAP_TYPE_CGROUP_STORAGE, NULL,
sizeof(struct bpf_cgroup_storage_key), 8, 0, NULL);
if (cgroup_storage_fd < 0) {
printf("failed to create map '%s'\n", strerror(errno));
return -1;
}
percpu_cgroup_storage_fd = bpf_map_create(
BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE, NULL,
sizeof(struct bpf_cgroup_storage_key), 8, 0, NULL);
if (percpu_cgroup_storage_fd < 0) {
printf("failed to create map '%s'\n", strerror(errno));
return -1;
}
struct bpf_insn prog[] = {
BPF_MOV32_IMM(BPF_REG_0, 0),
BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -4), /* *(u32 *)(fp - 4) = r0 */
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), /* r2 = fp - 4 */
BPF_LD_MAP_FD(BPF_REG_1, map_fd),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
BPF_MOV64_IMM(BPF_REG_1, val), /* r1 = 1 */
BPF_ATOMIC_OP(BPF_DW, BPF_ADD, BPF_REG_0, BPF_REG_1, 0),
BPF_LD_MAP_FD(BPF_REG_1, cgroup_storage_fd),
BPF_MOV64_IMM(BPF_REG_2, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_local_storage),
BPF_MOV64_IMM(BPF_REG_1, val),
BPF_ATOMIC_OP(BPF_W, BPF_ADD, BPF_REG_0, BPF_REG_1, 0),
BPF_LD_MAP_FD(BPF_REG_1, percpu_cgroup_storage_fd),
BPF_MOV64_IMM(BPF_REG_2, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_local_storage),
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 0x1),
BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_3, 0),
BPF_MOV64_IMM(BPF_REG_0, verdict), /* r0 = verdict */
BPF_EXIT_INSN(),
};
size_t insns_cnt = ARRAY_SIZE(prog);
int ret;
ret = bpf_test_load_program(BPF_PROG_TYPE_CGROUP_SKB,
prog, insns_cnt, "GPL", 0,
bpf_log_buf, BPF_LOG_BUF_SIZE);
close(cgroup_storage_fd);
return ret;
}
void serial_test_cgroup_attach_multi(void)
{
__u32 prog_ids[4], prog_cnt = 0, attach_flags, saved_prog_id;
int cg1 = 0, cg2 = 0, cg3 = 0, cg4 = 0, cg5 = 0, key = 0;
DECLARE_LIBBPF_OPTS(bpf_prog_attach_opts, attach_opts);
int allow_prog[7] = {-1};
unsigned long long value;
__u32 duration = 0;
int i = 0;
for (i = 0; i < ARRAY_SIZE(allow_prog); i++) {
allow_prog[i] = prog_load_cnt(1, 1 << i);
if (CHECK(allow_prog[i] < 0, "prog_load",
"verifier output:\n%s\n-------\n", bpf_log_buf))
goto err;
}
if (CHECK_FAIL(setup_cgroup_environment()))
goto err;
cg1 = create_and_get_cgroup("/cg1");
if (CHECK_FAIL(cg1 < 0))
goto err;
cg2 = create_and_get_cgroup("/cg1/cg2");
if (CHECK_FAIL(cg2 < 0))
goto err;
cg3 = create_and_get_cgroup("/cg1/cg2/cg3");
if (CHECK_FAIL(cg3 < 0))
goto err;
cg4 = create_and_get_cgroup("/cg1/cg2/cg3/cg4");
if (CHECK_FAIL(cg4 < 0))
goto err;
cg5 = create_and_get_cgroup("/cg1/cg2/cg3/cg4/cg5");
if (CHECK_FAIL(cg5 < 0))
goto err;
if (CHECK_FAIL(join_cgroup("/cg1/cg2/cg3/cg4/cg5")))
goto err;
if (CHECK(bpf_prog_attach(allow_prog[0], cg1, BPF_CGROUP_INET_EGRESS,
BPF_F_ALLOW_MULTI),
"prog0_attach_to_cg1_multi", "errno=%d\n", errno))
goto err;
if (CHECK(!bpf_prog_attach(allow_prog[0], cg1, BPF_CGROUP_INET_EGRESS,
BPF_F_ALLOW_MULTI),
"fail_same_prog_attach_to_cg1", "unexpected success\n"))
goto err;
if (CHECK(bpf_prog_attach(allow_prog[1], cg1, BPF_CGROUP_INET_EGRESS,
BPF_F_ALLOW_MULTI),
"prog1_attach_to_cg1_multi", "errno=%d\n", errno))
goto err;
if (CHECK(bpf_prog_attach(allow_prog[2], cg2, BPF_CGROUP_INET_EGRESS,
BPF_F_ALLOW_OVERRIDE),
"prog2_attach_to_cg2_override", "errno=%d\n", errno))
goto err;
if (CHECK(bpf_prog_attach(allow_prog[3], cg3, BPF_CGROUP_INET_EGRESS,
BPF_F_ALLOW_MULTI),
"prog3_attach_to_cg3_multi", "errno=%d\n", errno))
goto err;
if (CHECK(bpf_prog_attach(allow_prog[4], cg4, BPF_CGROUP_INET_EGRESS,
BPF_F_ALLOW_OVERRIDE),
"prog4_attach_to_cg4_override", "errno=%d\n", errno))
goto err;
if (CHECK(bpf_prog_attach(allow_prog[5], cg5, BPF_CGROUP_INET_EGRESS, 0),
"prog5_attach_to_cg5_none", "errno=%d\n", errno))
goto err;
CHECK_FAIL(system(PING_CMD));
CHECK_FAIL(bpf_map_lookup_elem(map_fd, &key, &value));
CHECK_FAIL(value != 1 + 2 + 8 + 32);
/* query the number of effective progs in cg5 */
CHECK_FAIL(bpf_prog_query(cg5, BPF_CGROUP_INET_EGRESS,
BPF_F_QUERY_EFFECTIVE, NULL, NULL, &prog_cnt));
CHECK_FAIL(prog_cnt != 4);
/* retrieve prog_ids of effective progs in cg5 */
CHECK_FAIL(bpf_prog_query(cg5, BPF_CGROUP_INET_EGRESS,
BPF_F_QUERY_EFFECTIVE, &attach_flags,
prog_ids, &prog_cnt));
CHECK_FAIL(prog_cnt != 4);
CHECK_FAIL(attach_flags != 0);
saved_prog_id = prog_ids[0];
/* check enospc handling */
prog_ids[0] = 0;
prog_cnt = 2;
CHECK_FAIL(bpf_prog_query(cg5, BPF_CGROUP_INET_EGRESS,
BPF_F_QUERY_EFFECTIVE, &attach_flags,
prog_ids, &prog_cnt) >= 0);
CHECK_FAIL(errno != ENOSPC);
CHECK_FAIL(prog_cnt != 4);
/* check that prog_ids are returned even when buffer is too small */
CHECK_FAIL(prog_ids[0] != saved_prog_id);
/* retrieve prog_id of single attached prog in cg5 */
prog_ids[0] = 0;
CHECK_FAIL(bpf_prog_query(cg5, BPF_CGROUP_INET_EGRESS, 0, NULL,
prog_ids, &prog_cnt));
CHECK_FAIL(prog_cnt != 1);
CHECK_FAIL(prog_ids[0] != saved_prog_id);
/* detach bottom program and ping again */
if (CHECK(bpf_prog_detach2(-1, cg5, BPF_CGROUP_INET_EGRESS),
"prog_detach_from_cg5", "errno=%d\n", errno))
goto err;
value = 0;
CHECK_FAIL(bpf_map_update_elem(map_fd, &key, &value, 0));
CHECK_FAIL(system(PING_CMD));
CHECK_FAIL(bpf_map_lookup_elem(map_fd, &key, &value));
CHECK_FAIL(value != 1 + 2 + 8 + 16);
/* test replace */
attach_opts.flags = BPF_F_ALLOW_OVERRIDE | BPF_F_REPLACE;
attach_opts.replace_prog_fd = allow_prog[0];
if (CHECK(!bpf_prog_attach_opts(allow_prog[6], cg1,
BPF_CGROUP_INET_EGRESS, &attach_opts),
"fail_prog_replace_override", "unexpected success\n"))
goto err;
CHECK_FAIL(errno != EINVAL);
attach_opts.flags = BPF_F_REPLACE;
if (CHECK(!bpf_prog_attach_opts(allow_prog[6], cg1,
BPF_CGROUP_INET_EGRESS, &attach_opts),
"fail_prog_replace_no_multi", "unexpected success\n"))
goto err;
CHECK_FAIL(errno != EINVAL);
attach_opts.flags = BPF_F_ALLOW_MULTI | BPF_F_REPLACE;
attach_opts.replace_prog_fd = -1;
if (CHECK(!bpf_prog_attach_opts(allow_prog[6], cg1,
BPF_CGROUP_INET_EGRESS, &attach_opts),
"fail_prog_replace_bad_fd", "unexpected success\n"))
goto err;
CHECK_FAIL(errno != EBADF);
/* replacing a program that is not attached to cgroup should fail */
attach_opts.replace_prog_fd = allow_prog[3];
if (CHECK(!bpf_prog_attach_opts(allow_prog[6], cg1,
BPF_CGROUP_INET_EGRESS, &attach_opts),
"fail_prog_replace_no_ent", "unexpected success\n"))
goto err;
CHECK_FAIL(errno != ENOENT);
/* replace 1st from the top program */
attach_opts.replace_prog_fd = allow_prog[0];
if (CHECK(bpf_prog_attach_opts(allow_prog[6], cg1,
BPF_CGROUP_INET_EGRESS, &attach_opts),
"prog_replace", "errno=%d\n", errno))
goto err;
/* replace program with itself */
attach_opts.replace_prog_fd = allow_prog[6];
if (CHECK(bpf_prog_attach_opts(allow_prog[6], cg1,
BPF_CGROUP_INET_EGRESS, &attach_opts),
"prog_replace", "errno=%d\n", errno))
goto err;
value = 0;
CHECK_FAIL(bpf_map_update_elem(map_fd, &key, &value, 0));
CHECK_FAIL(system(PING_CMD));
CHECK_FAIL(bpf_map_lookup_elem(map_fd, &key, &value));
CHECK_FAIL(value != 64 + 2 + 8 + 16);
/* detach 3rd from bottom program and ping again */
if (CHECK(!bpf_prog_detach2(0, cg3, BPF_CGROUP_INET_EGRESS),
"fail_prog_detach_from_cg3", "unexpected success\n"))
goto err;
if (CHECK(bpf_prog_detach2(allow_prog[3], cg3, BPF_CGROUP_INET_EGRESS),
"prog3_detach_from_cg3", "errno=%d\n", errno))
goto err;
value = 0;
CHECK_FAIL(bpf_map_update_elem(map_fd, &key, &value, 0));
CHECK_FAIL(system(PING_CMD));
CHECK_FAIL(bpf_map_lookup_elem(map_fd, &key, &value));
CHECK_FAIL(value != 64 + 2 + 16);
/* detach 2nd from bottom program and ping again */
if (CHECK(bpf_prog_detach2(-1, cg4, BPF_CGROUP_INET_EGRESS),
"prog_detach_from_cg4", "errno=%d\n", errno))
goto err;
value = 0;
CHECK_FAIL(bpf_map_update_elem(map_fd, &key, &value, 0));
CHECK_FAIL(system(PING_CMD));
CHECK_FAIL(bpf_map_lookup_elem(map_fd, &key, &value));
CHECK_FAIL(value != 64 + 2 + 4);
prog_cnt = 4;
CHECK_FAIL(bpf_prog_query(cg5, BPF_CGROUP_INET_EGRESS,
BPF_F_QUERY_EFFECTIVE, &attach_flags,
prog_ids, &prog_cnt));
CHECK_FAIL(prog_cnt != 3);
CHECK_FAIL(attach_flags != 0);
CHECK_FAIL(bpf_prog_query(cg5, BPF_CGROUP_INET_EGRESS, 0, NULL,
prog_ids, &prog_cnt));
CHECK_FAIL(prog_cnt != 0);
err:
for (i = 0; i < ARRAY_SIZE(allow_prog); i++)
if (allow_prog[i] >= 0)
close(allow_prog[i]);
close(cg1);
close(cg2);
close(cg3);
close(cg4);
close(cg5);
cleanup_cgroup_environment();
}
| linux-master | tools/testing/selftests/bpf/prog_tests/cgroup_attach_multi.c |
// SPDX-License-Identifier: GPL-2.0
#include <test_progs.h>
#include "test_attach_kprobe_sleepable.skel.h"
#include "test_attach_probe_manual.skel.h"
#include "test_attach_probe.skel.h"
/* this is how USDT semaphore is actually defined, except volatile modifier */
volatile unsigned short uprobe_ref_ctr __attribute__((unused)) __attribute((section(".probes")));
/* uprobe attach point */
static noinline void trigger_func(void)
{
asm volatile ("");
}
/* attach point for byname uprobe */
static noinline void trigger_func2(void)
{
asm volatile ("");
}
/* attach point for byname sleepable uprobe */
static noinline void trigger_func3(void)
{
asm volatile ("");
}
/* attach point for ref_ctr */
static noinline void trigger_func4(void)
{
asm volatile ("");
}
static char test_data[] = "test_data";
/* manual attach kprobe/kretprobe/uprobe/uretprobe testings */
static void test_attach_probe_manual(enum probe_attach_mode attach_mode)
{
DECLARE_LIBBPF_OPTS(bpf_uprobe_opts, uprobe_opts);
DECLARE_LIBBPF_OPTS(bpf_kprobe_opts, kprobe_opts);
struct bpf_link *kprobe_link, *kretprobe_link;
struct bpf_link *uprobe_link, *uretprobe_link;
struct test_attach_probe_manual *skel;
ssize_t uprobe_offset;
skel = test_attach_probe_manual__open_and_load();
if (!ASSERT_OK_PTR(skel, "skel_kprobe_manual_open_and_load"))
return;
uprobe_offset = get_uprobe_offset(&trigger_func);
if (!ASSERT_GE(uprobe_offset, 0, "uprobe_offset"))
goto cleanup;
/* manual-attach kprobe/kretprobe */
kprobe_opts.attach_mode = attach_mode;
kprobe_opts.retprobe = false;
kprobe_link = bpf_program__attach_kprobe_opts(skel->progs.handle_kprobe,
SYS_NANOSLEEP_KPROBE_NAME,
&kprobe_opts);
if (!ASSERT_OK_PTR(kprobe_link, "attach_kprobe"))
goto cleanup;
skel->links.handle_kprobe = kprobe_link;
kprobe_opts.retprobe = true;
kretprobe_link = bpf_program__attach_kprobe_opts(skel->progs.handle_kretprobe,
SYS_NANOSLEEP_KPROBE_NAME,
&kprobe_opts);
if (!ASSERT_OK_PTR(kretprobe_link, "attach_kretprobe"))
goto cleanup;
skel->links.handle_kretprobe = kretprobe_link;
/* manual-attach uprobe/uretprobe */
uprobe_opts.attach_mode = attach_mode;
uprobe_opts.ref_ctr_offset = 0;
uprobe_opts.retprobe = false;
uprobe_link = bpf_program__attach_uprobe_opts(skel->progs.handle_uprobe,
0 /* self pid */,
"/proc/self/exe",
uprobe_offset,
&uprobe_opts);
if (!ASSERT_OK_PTR(uprobe_link, "attach_uprobe"))
goto cleanup;
skel->links.handle_uprobe = uprobe_link;
uprobe_opts.retprobe = true;
uretprobe_link = bpf_program__attach_uprobe_opts(skel->progs.handle_uretprobe,
-1 /* any pid */,
"/proc/self/exe",
uprobe_offset, &uprobe_opts);
if (!ASSERT_OK_PTR(uretprobe_link, "attach_uretprobe"))
goto cleanup;
skel->links.handle_uretprobe = uretprobe_link;
/* attach uprobe by function name manually */
uprobe_opts.func_name = "trigger_func2";
uprobe_opts.retprobe = false;
uprobe_opts.ref_ctr_offset = 0;
skel->links.handle_uprobe_byname =
bpf_program__attach_uprobe_opts(skel->progs.handle_uprobe_byname,
0 /* this pid */,
"/proc/self/exe",
0, &uprobe_opts);
if (!ASSERT_OK_PTR(skel->links.handle_uprobe_byname, "attach_uprobe_byname"))
goto cleanup;
/* trigger & validate kprobe && kretprobe */
usleep(1);
/* trigger & validate uprobe & uretprobe */
trigger_func();
/* trigger & validate uprobe attached by name */
trigger_func2();
ASSERT_EQ(skel->bss->kprobe_res, 1, "check_kprobe_res");
ASSERT_EQ(skel->bss->kretprobe_res, 2, "check_kretprobe_res");
ASSERT_EQ(skel->bss->uprobe_res, 3, "check_uprobe_res");
ASSERT_EQ(skel->bss->uretprobe_res, 4, "check_uretprobe_res");
ASSERT_EQ(skel->bss->uprobe_byname_res, 5, "check_uprobe_byname_res");
cleanup:
test_attach_probe_manual__destroy(skel);
}
static void test_attach_probe_auto(struct test_attach_probe *skel)
{
struct bpf_link *uprobe_err_link;
/* auto-attachable kprobe and kretprobe */
skel->links.handle_kprobe_auto = bpf_program__attach(skel->progs.handle_kprobe_auto);
ASSERT_OK_PTR(skel->links.handle_kprobe_auto, "attach_kprobe_auto");
skel->links.handle_kretprobe_auto = bpf_program__attach(skel->progs.handle_kretprobe_auto);
ASSERT_OK_PTR(skel->links.handle_kretprobe_auto, "attach_kretprobe_auto");
/* verify auto-attach fails for old-style uprobe definition */
uprobe_err_link = bpf_program__attach(skel->progs.handle_uprobe_byname);
if (!ASSERT_EQ(libbpf_get_error(uprobe_err_link), -EOPNOTSUPP,
"auto-attach should fail for old-style name"))
return;
/* verify auto-attach works */
skel->links.handle_uretprobe_byname =
bpf_program__attach(skel->progs.handle_uretprobe_byname);
if (!ASSERT_OK_PTR(skel->links.handle_uretprobe_byname, "attach_uretprobe_byname"))
return;
/* trigger & validate kprobe && kretprobe */
usleep(1);
/* trigger & validate uprobe attached by name */
trigger_func2();
ASSERT_EQ(skel->bss->kprobe2_res, 11, "check_kprobe_auto_res");
ASSERT_EQ(skel->bss->kretprobe2_res, 22, "check_kretprobe_auto_res");
ASSERT_EQ(skel->bss->uretprobe_byname_res, 6, "check_uretprobe_byname_res");
}
static void test_uprobe_lib(struct test_attach_probe *skel)
{
DECLARE_LIBBPF_OPTS(bpf_uprobe_opts, uprobe_opts);
FILE *devnull;
/* test attach by name for a library function, using the library
* as the binary argument. libc.so.6 will be resolved via dlopen()/dlinfo().
*/
uprobe_opts.func_name = "fopen";
uprobe_opts.retprobe = false;
skel->links.handle_uprobe_byname2 =
bpf_program__attach_uprobe_opts(skel->progs.handle_uprobe_byname2,
0 /* this pid */,
"libc.so.6",
0, &uprobe_opts);
if (!ASSERT_OK_PTR(skel->links.handle_uprobe_byname2, "attach_uprobe_byname2"))
return;
uprobe_opts.func_name = "fclose";
uprobe_opts.retprobe = true;
skel->links.handle_uretprobe_byname2 =
bpf_program__attach_uprobe_opts(skel->progs.handle_uretprobe_byname2,
-1 /* any pid */,
"libc.so.6",
0, &uprobe_opts);
if (!ASSERT_OK_PTR(skel->links.handle_uretprobe_byname2, "attach_uretprobe_byname2"))
return;
/* trigger & validate shared library u[ret]probes attached by name */
devnull = fopen("/dev/null", "r");
fclose(devnull);
ASSERT_EQ(skel->bss->uprobe_byname2_res, 7, "check_uprobe_byname2_res");
ASSERT_EQ(skel->bss->uretprobe_byname2_res, 8, "check_uretprobe_byname2_res");
}
static void test_uprobe_ref_ctr(struct test_attach_probe *skel)
{
DECLARE_LIBBPF_OPTS(bpf_uprobe_opts, uprobe_opts);
struct bpf_link *uprobe_link, *uretprobe_link;
ssize_t uprobe_offset, ref_ctr_offset;
uprobe_offset = get_uprobe_offset(&trigger_func4);
if (!ASSERT_GE(uprobe_offset, 0, "uprobe_offset_ref_ctr"))
return;
ref_ctr_offset = get_rel_offset((uintptr_t)&uprobe_ref_ctr);
if (!ASSERT_GE(ref_ctr_offset, 0, "ref_ctr_offset"))
return;
ASSERT_EQ(uprobe_ref_ctr, 0, "uprobe_ref_ctr_before");
uprobe_opts.retprobe = false;
uprobe_opts.ref_ctr_offset = ref_ctr_offset;
uprobe_link = bpf_program__attach_uprobe_opts(skel->progs.handle_uprobe_ref_ctr,
0 /* self pid */,
"/proc/self/exe",
uprobe_offset,
&uprobe_opts);
if (!ASSERT_OK_PTR(uprobe_link, "attach_uprobe_ref_ctr"))
return;
skel->links.handle_uprobe_ref_ctr = uprobe_link;
ASSERT_GT(uprobe_ref_ctr, 0, "uprobe_ref_ctr_after");
/* if uprobe uses ref_ctr, uretprobe has to use ref_ctr as well */
uprobe_opts.retprobe = true;
uprobe_opts.ref_ctr_offset = ref_ctr_offset;
uretprobe_link = bpf_program__attach_uprobe_opts(skel->progs.handle_uretprobe_ref_ctr,
-1 /* any pid */,
"/proc/self/exe",
uprobe_offset, &uprobe_opts);
if (!ASSERT_OK_PTR(uretprobe_link, "attach_uretprobe_ref_ctr"))
return;
skel->links.handle_uretprobe_ref_ctr = uretprobe_link;
}
static void test_kprobe_sleepable(void)
{
struct test_attach_kprobe_sleepable *skel;
skel = test_attach_kprobe_sleepable__open();
if (!ASSERT_OK_PTR(skel, "skel_kprobe_sleepable_open"))
return;
/* sleepable kprobe test case needs flags set before loading */
if (!ASSERT_OK(bpf_program__set_flags(skel->progs.handle_kprobe_sleepable,
BPF_F_SLEEPABLE), "kprobe_sleepable_flags"))
goto cleanup;
if (!ASSERT_OK(test_attach_kprobe_sleepable__load(skel),
"skel_kprobe_sleepable_load"))
goto cleanup;
/* sleepable kprobes should not attach successfully */
skel->links.handle_kprobe_sleepable = bpf_program__attach(skel->progs.handle_kprobe_sleepable);
ASSERT_ERR_PTR(skel->links.handle_kprobe_sleepable, "attach_kprobe_sleepable");
cleanup:
test_attach_kprobe_sleepable__destroy(skel);
}
static void test_uprobe_sleepable(struct test_attach_probe *skel)
{
/* test sleepable uprobe and uretprobe variants */
skel->links.handle_uprobe_byname3_sleepable = bpf_program__attach(skel->progs.handle_uprobe_byname3_sleepable);
if (!ASSERT_OK_PTR(skel->links.handle_uprobe_byname3_sleepable, "attach_uprobe_byname3_sleepable"))
return;
skel->links.handle_uprobe_byname3 = bpf_program__attach(skel->progs.handle_uprobe_byname3);
if (!ASSERT_OK_PTR(skel->links.handle_uprobe_byname3, "attach_uprobe_byname3"))
return;
skel->links.handle_uretprobe_byname3_sleepable = bpf_program__attach(skel->progs.handle_uretprobe_byname3_sleepable);
if (!ASSERT_OK_PTR(skel->links.handle_uretprobe_byname3_sleepable, "attach_uretprobe_byname3_sleepable"))
return;
skel->links.handle_uretprobe_byname3 = bpf_program__attach(skel->progs.handle_uretprobe_byname3);
if (!ASSERT_OK_PTR(skel->links.handle_uretprobe_byname3, "attach_uretprobe_byname3"))
return;
skel->bss->user_ptr = test_data;
/* trigger & validate sleepable uprobe attached by name */
trigger_func3();
ASSERT_EQ(skel->bss->uprobe_byname3_sleepable_res, 9, "check_uprobe_byname3_sleepable_res");
ASSERT_EQ(skel->bss->uprobe_byname3_res, 10, "check_uprobe_byname3_res");
ASSERT_EQ(skel->bss->uretprobe_byname3_sleepable_res, 11, "check_uretprobe_byname3_sleepable_res");
ASSERT_EQ(skel->bss->uretprobe_byname3_res, 12, "check_uretprobe_byname3_res");
}
void test_attach_probe(void)
{
struct test_attach_probe *skel;
skel = test_attach_probe__open();
if (!ASSERT_OK_PTR(skel, "skel_open"))
return;
if (!ASSERT_OK(test_attach_probe__load(skel), "skel_load"))
goto cleanup;
if (!ASSERT_OK_PTR(skel->bss, "check_bss"))
goto cleanup;
if (test__start_subtest("manual-default"))
test_attach_probe_manual(PROBE_ATTACH_MODE_DEFAULT);
if (test__start_subtest("manual-legacy"))
test_attach_probe_manual(PROBE_ATTACH_MODE_LEGACY);
if (test__start_subtest("manual-perf"))
test_attach_probe_manual(PROBE_ATTACH_MODE_PERF);
if (test__start_subtest("manual-link"))
test_attach_probe_manual(PROBE_ATTACH_MODE_LINK);
if (test__start_subtest("auto"))
test_attach_probe_auto(skel);
if (test__start_subtest("kprobe-sleepable"))
test_kprobe_sleepable();
if (test__start_subtest("uprobe-lib"))
test_uprobe_lib(skel);
if (test__start_subtest("uprobe-sleepable"))
test_uprobe_sleepable(skel);
if (test__start_subtest("uprobe-ref_ctr"))
test_uprobe_ref_ctr(skel);
cleanup:
test_attach_probe__destroy(skel);
ASSERT_EQ(uprobe_ref_ctr, 0, "uprobe_ref_ctr_cleanup");
}
| linux-master | tools/testing/selftests/bpf/prog_tests/attach_probe.c |
// SPDX-License-Identifier: GPL-2.0
#include <test_progs.h>
#include <network_helpers.h>
void test_pkt_md_access(void)
{
const char *file = "./test_pkt_md_access.bpf.o";
struct bpf_object *obj;
int err, prog_fd;
LIBBPF_OPTS(bpf_test_run_opts, topts,
.data_in = &pkt_v4,
.data_size_in = sizeof(pkt_v4),
.repeat = 10,
);
err = bpf_prog_test_load(file, BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd);
if (CHECK_FAIL(err))
return;
err = bpf_prog_test_run_opts(prog_fd, &topts);
ASSERT_OK(err, "test_run_opts err");
ASSERT_OK(topts.retval, "test_run_opts retval");
bpf_object__close(obj);
}
| linux-master | tools/testing/selftests/bpf/prog_tests/pkt_md_access.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2022 Huawei Technologies Duesseldorf GmbH
*
* Author: Roberto Sassu <[email protected]>
*/
#include <test_progs.h>
#include "test_libbpf_get_fd_by_id_opts.skel.h"
void test_libbpf_get_fd_by_id_opts(void)
{
struct test_libbpf_get_fd_by_id_opts *skel;
struct bpf_map_info info_m = {};
__u32 len = sizeof(info_m), value;
int ret, zero = 0, fd = -1;
LIBBPF_OPTS(bpf_get_fd_by_id_opts, fd_opts_rdonly,
.open_flags = BPF_F_RDONLY,
);
skel = test_libbpf_get_fd_by_id_opts__open_and_load();
if (!ASSERT_OK_PTR(skel,
"test_libbpf_get_fd_by_id_opts__open_and_load"))
return;
ret = test_libbpf_get_fd_by_id_opts__attach(skel);
if (!ASSERT_OK(ret, "test_libbpf_get_fd_by_id_opts__attach"))
goto close_prog;
ret = bpf_map_get_info_by_fd(bpf_map__fd(skel->maps.data_input),
&info_m, &len);
if (!ASSERT_OK(ret, "bpf_map_get_info_by_fd"))
goto close_prog;
fd = bpf_map_get_fd_by_id(info_m.id);
if (!ASSERT_LT(fd, 0, "bpf_map_get_fd_by_id"))
goto close_prog;
fd = bpf_map_get_fd_by_id_opts(info_m.id, NULL);
if (!ASSERT_LT(fd, 0, "bpf_map_get_fd_by_id_opts"))
goto close_prog;
fd = bpf_map_get_fd_by_id_opts(info_m.id, &fd_opts_rdonly);
if (!ASSERT_GE(fd, 0, "bpf_map_get_fd_by_id_opts"))
goto close_prog;
/* Map lookup should work with read-only fd. */
ret = bpf_map_lookup_elem(fd, &zero, &value);
if (!ASSERT_OK(ret, "bpf_map_lookup_elem"))
goto close_prog;
if (!ASSERT_EQ(value, 0, "map value mismatch"))
goto close_prog;
/* Map update should not work with read-only fd. */
ret = bpf_map_update_elem(fd, &zero, &len, BPF_ANY);
if (!ASSERT_LT(ret, 0, "bpf_map_update_elem"))
goto close_prog;
/* Map update should work with read-write fd. */
ret = bpf_map_update_elem(bpf_map__fd(skel->maps.data_input), &zero,
&len, BPF_ANY);
if (!ASSERT_OK(ret, "bpf_map_update_elem"))
goto close_prog;
/* Prog get fd with opts set should not work (no kernel support). */
ret = bpf_prog_get_fd_by_id_opts(0, &fd_opts_rdonly);
if (!ASSERT_EQ(ret, -EINVAL, "bpf_prog_get_fd_by_id_opts"))
goto close_prog;
/* Link get fd with opts set should not work (no kernel support). */
ret = bpf_link_get_fd_by_id_opts(0, &fd_opts_rdonly);
if (!ASSERT_EQ(ret, -EINVAL, "bpf_link_get_fd_by_id_opts"))
goto close_prog;
/* BTF get fd with opts set should not work (no kernel support). */
ret = bpf_btf_get_fd_by_id_opts(0, &fd_opts_rdonly);
ASSERT_EQ(ret, -EINVAL, "bpf_btf_get_fd_by_id_opts");
close_prog:
if (fd >= 0)
close(fd);
test_libbpf_get_fd_by_id_opts__destroy(skel);
}
| linux-master | tools/testing/selftests/bpf/prog_tests/libbpf_get_fd_by_id_opts.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2021 Facebook */
#define _GNU_SOURCE
#include <sched.h>
#include <test_progs.h>
#include <time.h>
#include <sys/mman.h>
#include <sys/syscall.h>
#include "fexit_sleep.lskel.h"
static int do_sleep(void *skel)
{
struct fexit_sleep_lskel *fexit_skel = skel;
struct timespec ts1 = { .tv_nsec = 1 };
struct timespec ts2 = { .tv_sec = 10 };
fexit_skel->bss->pid = getpid();
(void)syscall(__NR_nanosleep, &ts1, NULL);
(void)syscall(__NR_nanosleep, &ts2, NULL);
return 0;
}
#define STACK_SIZE (1024 * 1024)
static char child_stack[STACK_SIZE];
void test_fexit_sleep(void)
{
struct fexit_sleep_lskel *fexit_skel = NULL;
int wstatus, duration = 0;
pid_t cpid;
int err, fexit_cnt;
fexit_skel = fexit_sleep_lskel__open_and_load();
if (CHECK(!fexit_skel, "fexit_skel_load", "fexit skeleton failed\n"))
goto cleanup;
err = fexit_sleep_lskel__attach(fexit_skel);
if (CHECK(err, "fexit_attach", "fexit attach failed: %d\n", err))
goto cleanup;
cpid = clone(do_sleep, child_stack + STACK_SIZE, CLONE_FILES | SIGCHLD, fexit_skel);
if (CHECK(cpid == -1, "clone", "%s\n", strerror(errno)))
goto cleanup;
/* wait until first sys_nanosleep ends and second sys_nanosleep starts */
while (READ_ONCE(fexit_skel->bss->fentry_cnt) != 2);
fexit_cnt = READ_ONCE(fexit_skel->bss->fexit_cnt);
if (CHECK(fexit_cnt != 1, "fexit_cnt", "%d", fexit_cnt))
goto cleanup;
/* close progs and detach them. That will trigger two nop5->jmp5 rewrites
* in the trampolines to skip nanosleep_fexit prog.
* The nanosleep_fentry prog will get detached first.
* The nanosleep_fexit prog will get detached second.
* Detaching will trigger freeing of both progs JITed images.
* There will be two dying bpf_tramp_image-s, but only the initial
* bpf_tramp_image (with both _fentry and _fexit progs will be stuck
* waiting for percpu_ref_kill to confirm). The other one
* will be freed quickly.
*/
close(fexit_skel->progs.nanosleep_fentry.prog_fd);
close(fexit_skel->progs.nanosleep_fexit.prog_fd);
fexit_sleep_lskel__detach(fexit_skel);
/* kill the thread to unwind sys_nanosleep stack through the trampoline */
kill(cpid, 9);
if (CHECK(waitpid(cpid, &wstatus, 0) == -1, "waitpid", "%s\n", strerror(errno)))
goto cleanup;
if (CHECK(WEXITSTATUS(wstatus) != 0, "exitstatus", "failed"))
goto cleanup;
/* The bypassed nanosleep_fexit prog shouldn't have executed.
* Unlike progs the maps were not freed and directly accessible.
*/
fexit_cnt = READ_ONCE(fexit_skel->bss->fexit_cnt);
if (CHECK(fexit_cnt != 1, "fexit_cnt", "%d", fexit_cnt))
goto cleanup;
cleanup:
fexit_sleep_lskel__destroy(fexit_skel);
}
| linux-master | tools/testing/selftests/bpf/prog_tests/fexit_sleep.c |
// SPDX-License-Identifier: GPL-2.0
#include <test_progs.h>
#include <network_helpers.h>
#include "test_xdp_noinline.skel.h"
void test_xdp_noinline(void)
{
unsigned int nr_cpus = bpf_num_possible_cpus();
struct test_xdp_noinline *skel;
struct vip key = {.protocol = 6};
struct vip_meta {
__u32 flags;
__u32 vip_num;
} value = {.vip_num = VIP_NUM};
__u32 stats_key = VIP_NUM;
struct vip_stats {
__u64 bytes;
__u64 pkts;
} stats[nr_cpus];
struct real_definition {
union {
__be32 dst;
__be32 dstv6[4];
};
__u8 flags;
} real_def = {.dst = MAGIC_VAL};
__u32 ch_key = 11, real_num = 3;
int err, i;
__u64 bytes = 0, pkts = 0;
char buf[128];
u32 *magic = (u32 *)buf;
LIBBPF_OPTS(bpf_test_run_opts, topts,
.data_in = &pkt_v4,
.data_size_in = sizeof(pkt_v4),
.data_out = buf,
.data_size_out = sizeof(buf),
.repeat = NUM_ITER,
);
skel = test_xdp_noinline__open_and_load();
if (!ASSERT_OK_PTR(skel, "skel_open_and_load"))
return;
bpf_map_update_elem(bpf_map__fd(skel->maps.vip_map), &key, &value, 0);
bpf_map_update_elem(bpf_map__fd(skel->maps.ch_rings), &ch_key, &real_num, 0);
bpf_map_update_elem(bpf_map__fd(skel->maps.reals), &real_num, &real_def, 0);
err = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.balancer_ingress_v4), &topts);
ASSERT_OK(err, "ipv4 test_run");
ASSERT_EQ(topts.retval, 1, "ipv4 test_run retval");
ASSERT_EQ(topts.data_size_out, 54, "ipv4 test_run data_size_out");
ASSERT_EQ(*magic, MAGIC_VAL, "ipv4 test_run magic");
topts.data_in = &pkt_v6;
topts.data_size_in = sizeof(pkt_v6);
topts.data_out = buf;
topts.data_size_out = sizeof(buf);
err = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.balancer_ingress_v6), &topts);
ASSERT_OK(err, "ipv6 test_run");
ASSERT_EQ(topts.retval, 1, "ipv6 test_run retval");
ASSERT_EQ(topts.data_size_out, 74, "ipv6 test_run data_size_out");
ASSERT_EQ(*magic, MAGIC_VAL, "ipv6 test_run magic");
bpf_map_lookup_elem(bpf_map__fd(skel->maps.stats), &stats_key, stats);
for (i = 0; i < nr_cpus; i++) {
bytes += stats[i].bytes;
pkts += stats[i].pkts;
}
ASSERT_EQ(bytes, MAGIC_BYTES * NUM_ITER * 2, "stats bytes");
ASSERT_EQ(pkts, NUM_ITER * 2, "stats pkts");
test_xdp_noinline__destroy(skel);
}
| linux-master | tools/testing/selftests/bpf/prog_tests/xdp_noinline.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020 Facebook */
#define _GNU_SOURCE
#include <netinet/in.h>
#include <arpa/inet.h>
#include <unistd.h>
#include <stdlib.h>
#include <string.h>
#include <errno.h>
#include <sched.h>
#include <net/if.h>
#include <linux/compiler.h>
#include <bpf/libbpf.h>
#include "network_helpers.h"
#include "test_progs.h"
#include "test_btf_skc_cls_ingress.skel.h"
static struct test_btf_skc_cls_ingress *skel;
static struct sockaddr_in6 srv_sa6;
static __u32 duration;
static int prepare_netns(void)
{
LIBBPF_OPTS(bpf_tc_hook, qdisc_lo, .attach_point = BPF_TC_INGRESS);
LIBBPF_OPTS(bpf_tc_opts, tc_attach,
.prog_fd = bpf_program__fd(skel->progs.cls_ingress));
if (CHECK(unshare(CLONE_NEWNET), "create netns",
"unshare(CLONE_NEWNET): %s (%d)",
strerror(errno), errno))
return -1;
if (CHECK(system("ip link set dev lo up"),
"ip link set dev lo up", "failed\n"))
return -1;
qdisc_lo.ifindex = if_nametoindex("lo");
if (!ASSERT_OK(bpf_tc_hook_create(&qdisc_lo), "qdisc add dev lo clsact"))
return -1;
if (!ASSERT_OK(bpf_tc_attach(&qdisc_lo, &tc_attach),
"filter add dev lo ingress"))
return -1;
/* Ensure 20 bytes options (i.e. in total 40 bytes tcp header) for the
* bpf_tcp_gen_syncookie() helper.
*/
if (write_sysctl("/proc/sys/net/ipv4/tcp_window_scaling", "1") ||
write_sysctl("/proc/sys/net/ipv4/tcp_timestamps", "1") ||
write_sysctl("/proc/sys/net/ipv4/tcp_sack", "1"))
return -1;
return 0;
}
static void reset_test(void)
{
memset(&skel->bss->srv_sa6, 0, sizeof(skel->bss->srv_sa6));
skel->bss->listen_tp_sport = 0;
skel->bss->req_sk_sport = 0;
skel->bss->recv_cookie = 0;
skel->bss->gen_cookie = 0;
skel->bss->linum = 0;
}
static void print_err_line(void)
{
if (skel->bss->linum)
printf("bpf prog error at line %u\n", skel->bss->linum);
}
static void test_conn(void)
{
int listen_fd = -1, cli_fd = -1, srv_fd = -1, err;
socklen_t addrlen = sizeof(srv_sa6);
int srv_port;
if (write_sysctl("/proc/sys/net/ipv4/tcp_syncookies", "1"))
return;
listen_fd = start_server(AF_INET6, SOCK_STREAM, "::1", 0, 0);
if (CHECK_FAIL(listen_fd == -1))
return;
err = getsockname(listen_fd, (struct sockaddr *)&srv_sa6, &addrlen);
if (CHECK(err, "getsockname(listen_fd)", "err:%d errno:%d\n", err,
errno))
goto done;
memcpy(&skel->bss->srv_sa6, &srv_sa6, sizeof(srv_sa6));
srv_port = ntohs(srv_sa6.sin6_port);
cli_fd = connect_to_fd(listen_fd, 0);
if (CHECK_FAIL(cli_fd == -1))
goto done;
srv_fd = accept(listen_fd, NULL, NULL);
if (CHECK_FAIL(srv_fd == -1))
goto done;
if (CHECK(skel->bss->listen_tp_sport != srv_port ||
skel->bss->req_sk_sport != srv_port,
"Unexpected sk src port",
"listen_tp_sport:%u req_sk_sport:%u expected:%u\n",
skel->bss->listen_tp_sport, skel->bss->req_sk_sport,
srv_port))
goto done;
if (CHECK(skel->bss->gen_cookie || skel->bss->recv_cookie,
"Unexpected syncookie states",
"gen_cookie:%u recv_cookie:%u\n",
skel->bss->gen_cookie, skel->bss->recv_cookie))
goto done;
CHECK(skel->bss->linum, "bpf prog detected error", "at line %u\n",
skel->bss->linum);
done:
if (listen_fd != -1)
close(listen_fd);
if (cli_fd != -1)
close(cli_fd);
if (srv_fd != -1)
close(srv_fd);
}
static void test_syncookie(void)
{
int listen_fd = -1, cli_fd = -1, srv_fd = -1, err;
socklen_t addrlen = sizeof(srv_sa6);
int srv_port;
/* Enforce syncookie mode */
if (write_sysctl("/proc/sys/net/ipv4/tcp_syncookies", "2"))
return;
listen_fd = start_server(AF_INET6, SOCK_STREAM, "::1", 0, 0);
if (CHECK_FAIL(listen_fd == -1))
return;
err = getsockname(listen_fd, (struct sockaddr *)&srv_sa6, &addrlen);
if (CHECK(err, "getsockname(listen_fd)", "err:%d errno:%d\n", err,
errno))
goto done;
memcpy(&skel->bss->srv_sa6, &srv_sa6, sizeof(srv_sa6));
srv_port = ntohs(srv_sa6.sin6_port);
cli_fd = connect_to_fd(listen_fd, 0);
if (CHECK_FAIL(cli_fd == -1))
goto done;
srv_fd = accept(listen_fd, NULL, NULL);
if (CHECK_FAIL(srv_fd == -1))
goto done;
if (CHECK(skel->bss->listen_tp_sport != srv_port,
"Unexpected tp src port",
"listen_tp_sport:%u expected:%u\n",
skel->bss->listen_tp_sport, srv_port))
goto done;
if (CHECK(skel->bss->req_sk_sport,
"Unexpected req_sk src port",
"req_sk_sport:%u expected:0\n",
skel->bss->req_sk_sport))
goto done;
if (CHECK(!skel->bss->gen_cookie ||
skel->bss->gen_cookie != skel->bss->recv_cookie,
"Unexpected syncookie states",
"gen_cookie:%u recv_cookie:%u\n",
skel->bss->gen_cookie, skel->bss->recv_cookie))
goto done;
CHECK(skel->bss->linum, "bpf prog detected error", "at line %u\n",
skel->bss->linum);
done:
if (listen_fd != -1)
close(listen_fd);
if (cli_fd != -1)
close(cli_fd);
if (srv_fd != -1)
close(srv_fd);
}
struct test {
const char *desc;
void (*run)(void);
};
#define DEF_TEST(name) { #name, test_##name }
static struct test tests[] = {
DEF_TEST(conn),
DEF_TEST(syncookie),
};
void test_btf_skc_cls_ingress(void)
{
int i;
skel = test_btf_skc_cls_ingress__open_and_load();
if (CHECK(!skel, "test_btf_skc_cls_ingress__open_and_load", "failed\n"))
return;
for (i = 0; i < ARRAY_SIZE(tests); i++) {
if (!test__start_subtest(tests[i].desc))
continue;
if (prepare_netns())
break;
tests[i].run();
print_err_line();
reset_test();
}
test_btf_skc_cls_ingress__destroy(skel);
}
| linux-master | tools/testing/selftests/bpf/prog_tests/btf_skc_cls_ingress.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2022 Huawei Technologies Duesseldorf GmbH
*
* Author: Roberto Sassu <[email protected]>
*/
#include <stdio.h>
#include <errno.h>
#include <stdlib.h>
#include <unistd.h>
#include <endian.h>
#include <limits.h>
#include <sys/stat.h>
#include <sys/wait.h>
#include <sys/mman.h>
#include <linux/keyctl.h>
#include <test_progs.h>
#include "test_verify_pkcs7_sig.skel.h"
#define MAX_DATA_SIZE (1024 * 1024)
#define MAX_SIG_SIZE 1024
#define VERIFY_USE_SECONDARY_KEYRING (1UL)
#define VERIFY_USE_PLATFORM_KEYRING (2UL)
/* In stripped ARM and x86-64 modules, ~ is surprisingly rare. */
#define MODULE_SIG_STRING "~Module signature appended~\n"
/*
* Module signature information block.
*
* The constituents of the signature section are, in order:
*
* - Signer's name
* - Key identifier
* - Signature data
* - Information block
*/
struct module_signature {
__u8 algo; /* Public-key crypto algorithm [0] */
__u8 hash; /* Digest algorithm [0] */
__u8 id_type; /* Key identifier type [PKEY_ID_PKCS7] */
__u8 signer_len; /* Length of signer's name [0] */
__u8 key_id_len; /* Length of key identifier [0] */
__u8 __pad[3];
__be32 sig_len; /* Length of signature data */
};
struct data {
__u8 data[MAX_DATA_SIZE];
__u32 data_len;
__u8 sig[MAX_SIG_SIZE];
__u32 sig_len;
};
static bool kfunc_not_supported;
static int libbpf_print_cb(enum libbpf_print_level level, const char *fmt,
va_list args)
{
if (level == LIBBPF_WARN)
vprintf(fmt, args);
if (strcmp(fmt, "libbpf: extern (func ksym) '%s': not found in kernel or module BTFs\n"))
return 0;
if (strcmp(va_arg(args, char *), "bpf_verify_pkcs7_signature"))
return 0;
kfunc_not_supported = true;
return 0;
}
static int _run_setup_process(const char *setup_dir, const char *cmd)
{
int child_pid, child_status;
child_pid = fork();
if (child_pid == 0) {
execlp("./verify_sig_setup.sh", "./verify_sig_setup.sh", cmd,
setup_dir, NULL);
exit(errno);
} else if (child_pid > 0) {
waitpid(child_pid, &child_status, 0);
return WEXITSTATUS(child_status);
}
return -EINVAL;
}
static int populate_data_item_str(const char *tmp_dir, struct data *data_item)
{
struct stat st;
char data_template[] = "/tmp/dataXXXXXX";
char path[PATH_MAX];
int ret, fd, child_status, child_pid;
data_item->data_len = 4;
memcpy(data_item->data, "test", data_item->data_len);
fd = mkstemp(data_template);
if (fd == -1)
return -errno;
ret = write(fd, data_item->data, data_item->data_len);
close(fd);
if (ret != data_item->data_len) {
ret = -EIO;
goto out;
}
child_pid = fork();
if (child_pid == -1) {
ret = -errno;
goto out;
}
if (child_pid == 0) {
snprintf(path, sizeof(path), "%s/signing_key.pem", tmp_dir);
return execlp("./sign-file", "./sign-file", "-d", "sha256",
path, path, data_template, NULL);
}
waitpid(child_pid, &child_status, 0);
ret = WEXITSTATUS(child_status);
if (ret)
goto out;
snprintf(path, sizeof(path), "%s.p7s", data_template);
ret = stat(path, &st);
if (ret == -1) {
ret = -errno;
goto out;
}
if (st.st_size > sizeof(data_item->sig)) {
ret = -EINVAL;
goto out_sig;
}
data_item->sig_len = st.st_size;
fd = open(path, O_RDONLY);
if (fd == -1) {
ret = -errno;
goto out_sig;
}
ret = read(fd, data_item->sig, data_item->sig_len);
close(fd);
if (ret != data_item->sig_len) {
ret = -EIO;
goto out_sig;
}
ret = 0;
out_sig:
unlink(path);
out:
unlink(data_template);
return ret;
}
static int populate_data_item_mod(struct data *data_item)
{
char mod_path[PATH_MAX], *mod_path_ptr;
struct stat st;
void *mod;
FILE *fp;
struct module_signature ms;
int ret, fd, modlen, marker_len, sig_len;
data_item->data_len = 0;
if (stat("/lib/modules", &st) == -1)
return 0;
/* Requires CONFIG_TCP_CONG_BIC=m. */
fp = popen("find /lib/modules/$(uname -r) -name tcp_bic.ko", "r");
if (!fp)
return 0;
mod_path_ptr = fgets(mod_path, sizeof(mod_path), fp);
pclose(fp);
if (!mod_path_ptr)
return 0;
mod_path_ptr = strchr(mod_path, '\n');
if (!mod_path_ptr)
return 0;
*mod_path_ptr = '\0';
if (stat(mod_path, &st) == -1)
return 0;
modlen = st.st_size;
marker_len = sizeof(MODULE_SIG_STRING) - 1;
fd = open(mod_path, O_RDONLY);
if (fd == -1)
return -errno;
mod = mmap(NULL, st.st_size, PROT_READ, MAP_PRIVATE, fd, 0);
close(fd);
if (mod == MAP_FAILED)
return -errno;
if (strncmp(mod + modlen - marker_len, MODULE_SIG_STRING, marker_len)) {
ret = -EINVAL;
goto out;
}
modlen -= marker_len;
memcpy(&ms, mod + (modlen - sizeof(ms)), sizeof(ms));
sig_len = __be32_to_cpu(ms.sig_len);
modlen -= sig_len + sizeof(ms);
if (modlen > sizeof(data_item->data)) {
ret = -E2BIG;
goto out;
}
memcpy(data_item->data, mod, modlen);
data_item->data_len = modlen;
if (sig_len > sizeof(data_item->sig)) {
ret = -E2BIG;
goto out;
}
memcpy(data_item->sig, mod + modlen, sig_len);
data_item->sig_len = sig_len;
ret = 0;
out:
munmap(mod, st.st_size);
return ret;
}
void test_verify_pkcs7_sig(void)
{
libbpf_print_fn_t old_print_cb;
char tmp_dir_template[] = "/tmp/verify_sigXXXXXX";
char *tmp_dir;
struct test_verify_pkcs7_sig *skel = NULL;
struct bpf_map *map;
struct data data;
int ret, zero = 0;
/* Trigger creation of session keyring. */
syscall(__NR_request_key, "keyring", "_uid.0", NULL,
KEY_SPEC_SESSION_KEYRING);
tmp_dir = mkdtemp(tmp_dir_template);
if (!ASSERT_OK_PTR(tmp_dir, "mkdtemp"))
return;
ret = _run_setup_process(tmp_dir, "setup");
if (!ASSERT_OK(ret, "_run_setup_process"))
goto close_prog;
skel = test_verify_pkcs7_sig__open();
if (!ASSERT_OK_PTR(skel, "test_verify_pkcs7_sig__open"))
goto close_prog;
old_print_cb = libbpf_set_print(libbpf_print_cb);
ret = test_verify_pkcs7_sig__load(skel);
libbpf_set_print(old_print_cb);
if (ret < 0 && kfunc_not_supported) {
printf(
"%s:SKIP:bpf_verify_pkcs7_signature() kfunc not supported\n",
__func__);
test__skip();
goto close_prog;
}
if (!ASSERT_OK(ret, "test_verify_pkcs7_sig__load"))
goto close_prog;
ret = test_verify_pkcs7_sig__attach(skel);
if (!ASSERT_OK(ret, "test_verify_pkcs7_sig__attach"))
goto close_prog;
map = bpf_object__find_map_by_name(skel->obj, "data_input");
if (!ASSERT_OK_PTR(map, "data_input not found"))
goto close_prog;
skel->bss->monitored_pid = getpid();
/* Test without data and signature. */
skel->bss->user_keyring_serial = KEY_SPEC_SESSION_KEYRING;
ret = bpf_map_update_elem(bpf_map__fd(map), &zero, &data, BPF_ANY);
if (!ASSERT_LT(ret, 0, "bpf_map_update_elem data_input"))
goto close_prog;
/* Test successful signature verification with session keyring. */
ret = populate_data_item_str(tmp_dir, &data);
if (!ASSERT_OK(ret, "populate_data_item_str"))
goto close_prog;
ret = bpf_map_update_elem(bpf_map__fd(map), &zero, &data, BPF_ANY);
if (!ASSERT_OK(ret, "bpf_map_update_elem data_input"))
goto close_prog;
/* Test successful signature verification with testing keyring. */
skel->bss->user_keyring_serial = syscall(__NR_request_key, "keyring",
"ebpf_testing_keyring", NULL,
KEY_SPEC_SESSION_KEYRING);
ret = bpf_map_update_elem(bpf_map__fd(map), &zero, &data, BPF_ANY);
if (!ASSERT_OK(ret, "bpf_map_update_elem data_input"))
goto close_prog;
/*
* Ensure key_task_permission() is called and rejects the keyring
* (no Search permission).
*/
syscall(__NR_keyctl, KEYCTL_SETPERM, skel->bss->user_keyring_serial,
0x37373737);
ret = bpf_map_update_elem(bpf_map__fd(map), &zero, &data, BPF_ANY);
if (!ASSERT_LT(ret, 0, "bpf_map_update_elem data_input"))
goto close_prog;
syscall(__NR_keyctl, KEYCTL_SETPERM, skel->bss->user_keyring_serial,
0x3f3f3f3f);
/*
* Ensure key_validate() is called and rejects the keyring (key expired)
*/
syscall(__NR_keyctl, KEYCTL_SET_TIMEOUT,
skel->bss->user_keyring_serial, 1);
sleep(1);
ret = bpf_map_update_elem(bpf_map__fd(map), &zero, &data, BPF_ANY);
if (!ASSERT_LT(ret, 0, "bpf_map_update_elem data_input"))
goto close_prog;
skel->bss->user_keyring_serial = KEY_SPEC_SESSION_KEYRING;
/* Test with corrupted data (signature verification should fail). */
data.data[0] = 'a';
ret = bpf_map_update_elem(bpf_map__fd(map), &zero, &data, BPF_ANY);
if (!ASSERT_LT(ret, 0, "bpf_map_update_elem data_input"))
goto close_prog;
ret = populate_data_item_mod(&data);
if (!ASSERT_OK(ret, "populate_data_item_mod"))
goto close_prog;
/* Test signature verification with system keyrings. */
if (data.data_len) {
skel->bss->user_keyring_serial = 0;
skel->bss->system_keyring_id = 0;
ret = bpf_map_update_elem(bpf_map__fd(map), &zero, &data,
BPF_ANY);
if (!ASSERT_OK(ret, "bpf_map_update_elem data_input"))
goto close_prog;
skel->bss->system_keyring_id = VERIFY_USE_SECONDARY_KEYRING;
ret = bpf_map_update_elem(bpf_map__fd(map), &zero, &data,
BPF_ANY);
if (!ASSERT_OK(ret, "bpf_map_update_elem data_input"))
goto close_prog;
skel->bss->system_keyring_id = VERIFY_USE_PLATFORM_KEYRING;
ret = bpf_map_update_elem(bpf_map__fd(map), &zero, &data,
BPF_ANY);
ASSERT_LT(ret, 0, "bpf_map_update_elem data_input");
}
close_prog:
_run_setup_process(tmp_dir, "cleanup");
if (!skel)
return;
skel->bss->monitored_pid = 0;
test_verify_pkcs7_sig__destroy(skel);
}
| linux-master | tools/testing/selftests/bpf/prog_tests/verify_pkcs7_sig.c |
// SPDX-License-Identifier: GPL-2.0
#include <test_progs.h>
#include <net/if.h>
#include <linux/netfilter.h>
#include <network_helpers.h>
#include "ip_check_defrag.skel.h"
#include "ip_check_defrag_frags.h"
/*
* This selftest spins up a client and an echo server, each in their own
* network namespace. The client will send a fragmented message to the server.
* The prog attached to the server will shoot down any fragments. Thus, if
* the server is able to correctly echo back the message to the client, we will
* have verified that netfilter is reassembling packets for us.
*
* Topology:
* =========
* NS0 | NS1
* |
* client | server
* ---------- | ----------
* | veth0 | --------- | veth1 |
* ---------- peer ----------
* |
* | with bpf
*/
#define NS0 "defrag_ns0"
#define NS1 "defrag_ns1"
#define VETH0 "veth0"
#define VETH1 "veth1"
#define VETH0_ADDR "172.16.1.100"
#define VETH0_ADDR6 "fc00::100"
/* The following constants must stay in sync with `generate_udp_fragments.py` */
#define VETH1_ADDR "172.16.1.200"
#define VETH1_ADDR6 "fc00::200"
#define CLIENT_PORT 48878
#define SERVER_PORT 48879
#define MAGIC_MESSAGE "THIS IS THE ORIGINAL MESSAGE, PLEASE REASSEMBLE ME"
static int setup_topology(bool ipv6)
{
bool up;
int i;
SYS(fail, "ip netns add " NS0);
SYS(fail, "ip netns add " NS1);
SYS(fail, "ip link add " VETH0 " netns " NS0 " type veth peer name " VETH1 " netns " NS1);
if (ipv6) {
SYS(fail, "ip -6 -net " NS0 " addr add " VETH0_ADDR6 "/64 dev " VETH0 " nodad");
SYS(fail, "ip -6 -net " NS1 " addr add " VETH1_ADDR6 "/64 dev " VETH1 " nodad");
} else {
SYS(fail, "ip -net " NS0 " addr add " VETH0_ADDR "/24 dev " VETH0);
SYS(fail, "ip -net " NS1 " addr add " VETH1_ADDR "/24 dev " VETH1);
}
SYS(fail, "ip -net " NS0 " link set dev " VETH0 " up");
SYS(fail, "ip -net " NS1 " link set dev " VETH1 " up");
/* Wait for up to 5s for links to come up */
for (i = 0; i < 5; ++i) {
if (ipv6)
up = !system("ip netns exec " NS0 " ping -6 -c 1 -W 1 " VETH1_ADDR6 " &>/dev/null");
else
up = !system("ip netns exec " NS0 " ping -c 1 -W 1 " VETH1_ADDR " &>/dev/null");
if (up)
break;
}
return 0;
fail:
return -1;
}
static void cleanup_topology(void)
{
SYS_NOFAIL("test -f /var/run/netns/" NS0 " && ip netns delete " NS0);
SYS_NOFAIL("test -f /var/run/netns/" NS1 " && ip netns delete " NS1);
}
static int attach(struct ip_check_defrag *skel, bool ipv6)
{
LIBBPF_OPTS(bpf_netfilter_opts, opts,
.pf = ipv6 ? NFPROTO_IPV6 : NFPROTO_IPV4,
.priority = 42,
.flags = BPF_F_NETFILTER_IP_DEFRAG);
struct nstoken *nstoken;
int err = -1;
nstoken = open_netns(NS1);
skel->links.defrag = bpf_program__attach_netfilter(skel->progs.defrag, &opts);
if (!ASSERT_OK_PTR(skel->links.defrag, "program attach"))
goto out;
err = 0;
out:
close_netns(nstoken);
return err;
}
static int send_frags(int client)
{
struct sockaddr_storage saddr;
struct sockaddr *saddr_p;
socklen_t saddr_len;
int err;
saddr_p = (struct sockaddr *)&saddr;
err = make_sockaddr(AF_INET, VETH1_ADDR, SERVER_PORT, &saddr, &saddr_len);
if (!ASSERT_OK(err, "make_sockaddr"))
return -1;
err = sendto(client, frag_0, sizeof(frag_0), 0, saddr_p, saddr_len);
if (!ASSERT_GE(err, 0, "sendto frag_0"))
return -1;
err = sendto(client, frag_1, sizeof(frag_1), 0, saddr_p, saddr_len);
if (!ASSERT_GE(err, 0, "sendto frag_1"))
return -1;
err = sendto(client, frag_2, sizeof(frag_2), 0, saddr_p, saddr_len);
if (!ASSERT_GE(err, 0, "sendto frag_2"))
return -1;
return 0;
}
static int send_frags6(int client)
{
struct sockaddr_storage saddr;
struct sockaddr *saddr_p;
socklen_t saddr_len;
int err;
saddr_p = (struct sockaddr *)&saddr;
/* Port needs to be set to 0 for raw ipv6 socket for some reason */
err = make_sockaddr(AF_INET6, VETH1_ADDR6, 0, &saddr, &saddr_len);
if (!ASSERT_OK(err, "make_sockaddr"))
return -1;
err = sendto(client, frag6_0, sizeof(frag6_0), 0, saddr_p, saddr_len);
if (!ASSERT_GE(err, 0, "sendto frag6_0"))
return -1;
err = sendto(client, frag6_1, sizeof(frag6_1), 0, saddr_p, saddr_len);
if (!ASSERT_GE(err, 0, "sendto frag6_1"))
return -1;
err = sendto(client, frag6_2, sizeof(frag6_2), 0, saddr_p, saddr_len);
if (!ASSERT_GE(err, 0, "sendto frag6_2"))
return -1;
return 0;
}
void test_bpf_ip_check_defrag_ok(bool ipv6)
{
struct network_helper_opts rx_opts = {
.timeout_ms = 1000,
.noconnect = true,
};
struct network_helper_opts tx_ops = {
.timeout_ms = 1000,
.type = SOCK_RAW,
.proto = IPPROTO_RAW,
.noconnect = true,
};
struct sockaddr_storage caddr;
struct ip_check_defrag *skel;
struct nstoken *nstoken;
int client_tx_fd = -1;
int client_rx_fd = -1;
socklen_t caddr_len;
int srv_fd = -1;
char buf[1024];
int len, err;
skel = ip_check_defrag__open_and_load();
if (!ASSERT_OK_PTR(skel, "skel_open"))
return;
if (!ASSERT_OK(setup_topology(ipv6), "setup_topology"))
goto out;
if (!ASSERT_OK(attach(skel, ipv6), "attach"))
goto out;
/* Start server in ns1 */
nstoken = open_netns(NS1);
if (!ASSERT_OK_PTR(nstoken, "setns ns1"))
goto out;
srv_fd = start_server(ipv6 ? AF_INET6 : AF_INET, SOCK_DGRAM, NULL, SERVER_PORT, 0);
close_netns(nstoken);
if (!ASSERT_GE(srv_fd, 0, "start_server"))
goto out;
/* Open tx raw socket in ns0 */
nstoken = open_netns(NS0);
if (!ASSERT_OK_PTR(nstoken, "setns ns0"))
goto out;
client_tx_fd = connect_to_fd_opts(srv_fd, &tx_ops);
close_netns(nstoken);
if (!ASSERT_GE(client_tx_fd, 0, "connect_to_fd_opts"))
goto out;
/* Open rx socket in ns0 */
nstoken = open_netns(NS0);
if (!ASSERT_OK_PTR(nstoken, "setns ns0"))
goto out;
client_rx_fd = connect_to_fd_opts(srv_fd, &rx_opts);
close_netns(nstoken);
if (!ASSERT_GE(client_rx_fd, 0, "connect_to_fd_opts"))
goto out;
/* Bind rx socket to a premeditated port */
memset(&caddr, 0, sizeof(caddr));
nstoken = open_netns(NS0);
if (!ASSERT_OK_PTR(nstoken, "setns ns0"))
goto out;
if (ipv6) {
struct sockaddr_in6 *c = (struct sockaddr_in6 *)&caddr;
c->sin6_family = AF_INET6;
inet_pton(AF_INET6, VETH0_ADDR6, &c->sin6_addr);
c->sin6_port = htons(CLIENT_PORT);
err = bind(client_rx_fd, (struct sockaddr *)c, sizeof(*c));
} else {
struct sockaddr_in *c = (struct sockaddr_in *)&caddr;
c->sin_family = AF_INET;
inet_pton(AF_INET, VETH0_ADDR, &c->sin_addr);
c->sin_port = htons(CLIENT_PORT);
err = bind(client_rx_fd, (struct sockaddr *)c, sizeof(*c));
}
close_netns(nstoken);
if (!ASSERT_OK(err, "bind"))
goto out;
/* Send message in fragments */
if (ipv6) {
if (!ASSERT_OK(send_frags6(client_tx_fd), "send_frags6"))
goto out;
} else {
if (!ASSERT_OK(send_frags(client_tx_fd), "send_frags"))
goto out;
}
if (!ASSERT_EQ(skel->bss->shootdowns, 0, "shootdowns"))
goto out;
/* Receive reassembled msg on server and echo back to client */
caddr_len = sizeof(caddr);
len = recvfrom(srv_fd, buf, sizeof(buf), 0, (struct sockaddr *)&caddr, &caddr_len);
if (!ASSERT_GE(len, 0, "server recvfrom"))
goto out;
len = sendto(srv_fd, buf, len, 0, (struct sockaddr *)&caddr, caddr_len);
if (!ASSERT_GE(len, 0, "server sendto"))
goto out;
/* Expect reassembed message to be echoed back */
len = recvfrom(client_rx_fd, buf, sizeof(buf), 0, NULL, NULL);
if (!ASSERT_EQ(len, sizeof(MAGIC_MESSAGE) - 1, "client short read"))
goto out;
out:
if (client_rx_fd != -1)
close(client_rx_fd);
if (client_tx_fd != -1)
close(client_tx_fd);
if (srv_fd != -1)
close(srv_fd);
cleanup_topology();
ip_check_defrag__destroy(skel);
}
void test_bpf_ip_check_defrag(void)
{
if (test__start_subtest("v4"))
test_bpf_ip_check_defrag_ok(false);
if (test__start_subtest("v6"))
test_bpf_ip_check_defrag_ok(true);
}
| linux-master | tools/testing/selftests/bpf/prog_tests/ip_check_defrag.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
#include <sys/types.h>
#include <sys/socket.h>
#include <net/if.h>
#include <linux/in6.h>
#include "test_progs.h"
#include "network_helpers.h"
#include "decap_sanity.skel.h"
#define NS_TEST "decap_sanity_ns"
#define IPV6_IFACE_ADDR "face::1"
#define UDP_TEST_PORT 7777
void test_decap_sanity(void)
{
LIBBPF_OPTS(bpf_tc_hook, qdisc_hook, .attach_point = BPF_TC_EGRESS);
LIBBPF_OPTS(bpf_tc_opts, tc_attach);
struct nstoken *nstoken = NULL;
struct decap_sanity *skel;
struct sockaddr_in6 addr;
socklen_t addrlen;
char buf[128] = {};
int sockfd, err;
skel = decap_sanity__open_and_load();
if (!ASSERT_OK_PTR(skel, "skel open_and_load"))
return;
SYS(fail, "ip netns add %s", NS_TEST);
SYS(fail, "ip -net %s -6 addr add %s/128 dev lo nodad", NS_TEST, IPV6_IFACE_ADDR);
SYS(fail, "ip -net %s link set dev lo up", NS_TEST);
nstoken = open_netns(NS_TEST);
if (!ASSERT_OK_PTR(nstoken, "open_netns"))
goto fail;
qdisc_hook.ifindex = if_nametoindex("lo");
if (!ASSERT_GT(qdisc_hook.ifindex, 0, "if_nametoindex lo"))
goto fail;
err = bpf_tc_hook_create(&qdisc_hook);
if (!ASSERT_OK(err, "create qdisc hook"))
goto fail;
tc_attach.prog_fd = bpf_program__fd(skel->progs.decap_sanity);
err = bpf_tc_attach(&qdisc_hook, &tc_attach);
if (!ASSERT_OK(err, "attach filter"))
goto fail;
addrlen = sizeof(addr);
err = make_sockaddr(AF_INET6, IPV6_IFACE_ADDR, UDP_TEST_PORT,
(void *)&addr, &addrlen);
if (!ASSERT_OK(err, "make_sockaddr"))
goto fail;
sockfd = socket(AF_INET6, SOCK_DGRAM, 0);
if (!ASSERT_NEQ(sockfd, -1, "socket"))
goto fail;
err = sendto(sockfd, buf, sizeof(buf), 0, (void *)&addr, addrlen);
close(sockfd);
if (!ASSERT_EQ(err, sizeof(buf), "send"))
goto fail;
ASSERT_TRUE(skel->bss->init_csum_partial, "init_csum_partial");
ASSERT_TRUE(skel->bss->final_csum_none, "final_csum_none");
ASSERT_FALSE(skel->bss->broken_csum_start, "broken_csum_start");
fail:
if (nstoken) {
bpf_tc_hook_destroy(&qdisc_hook);
close_netns(nstoken);
}
SYS_NOFAIL("ip netns del " NS_TEST " &> /dev/null");
decap_sanity__destroy(skel);
}
| linux-master | tools/testing/selftests/bpf/prog_tests/decap_sanity.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2022, Oracle and/or its affiliates. */
#include <test_progs.h>
#include "test_uprobe_autoattach.skel.h"
/* uprobe attach point */
static noinline int autoattach_trigger_func(int arg1, int arg2, int arg3,
int arg4, int arg5, int arg6,
int arg7, int arg8)
{
asm volatile ("");
return arg1 + arg2 + arg3 + arg4 + arg5 + arg6 + arg7 + arg8 + 1;
}
void test_uprobe_autoattach(void)
{
const char *devnull_str = "/dev/null";
struct test_uprobe_autoattach *skel;
int trigger_ret;
FILE *devnull;
skel = test_uprobe_autoattach__open_and_load();
if (!ASSERT_OK_PTR(skel, "skel_open"))
return;
if (!ASSERT_OK(test_uprobe_autoattach__attach(skel), "skel_attach"))
goto cleanup;
skel->bss->test_pid = getpid();
/* trigger & validate uprobe & uretprobe */
trigger_ret = autoattach_trigger_func(1, 2, 3, 4, 5, 6, 7, 8);
skel->bss->test_pid = getpid();
/* trigger & validate shared library u[ret]probes attached by name */
devnull = fopen(devnull_str, "r");
ASSERT_EQ(skel->bss->uprobe_byname_parm1, 1, "check_uprobe_byname_parm1");
ASSERT_EQ(skel->bss->uprobe_byname_ran, 1, "check_uprobe_byname_ran");
ASSERT_EQ(skel->bss->uretprobe_byname_rc, trigger_ret, "check_uretprobe_byname_rc");
ASSERT_EQ(skel->bss->uretprobe_byname_ret, trigger_ret, "check_uretprobe_byname_ret");
ASSERT_EQ(skel->bss->uretprobe_byname_ran, 2, "check_uretprobe_byname_ran");
ASSERT_EQ(skel->bss->uprobe_byname2_parm1, (__u64)(long)devnull_str,
"check_uprobe_byname2_parm1");
ASSERT_EQ(skel->bss->uprobe_byname2_ran, 3, "check_uprobe_byname2_ran");
ASSERT_EQ(skel->bss->uretprobe_byname2_rc, (__u64)(long)devnull,
"check_uretprobe_byname2_rc");
ASSERT_EQ(skel->bss->uretprobe_byname2_ran, 4, "check_uretprobe_byname2_ran");
ASSERT_EQ(skel->bss->a[0], 1, "arg1");
ASSERT_EQ(skel->bss->a[1], 2, "arg2");
ASSERT_EQ(skel->bss->a[2], 3, "arg3");
#if FUNC_REG_ARG_CNT > 3
ASSERT_EQ(skel->bss->a[3], 4, "arg4");
#endif
#if FUNC_REG_ARG_CNT > 4
ASSERT_EQ(skel->bss->a[4], 5, "arg5");
#endif
#if FUNC_REG_ARG_CNT > 5
ASSERT_EQ(skel->bss->a[5], 6, "arg6");
#endif
#if FUNC_REG_ARG_CNT > 6
ASSERT_EQ(skel->bss->a[6], 7, "arg7");
#endif
#if FUNC_REG_ARG_CNT > 7
ASSERT_EQ(skel->bss->a[7], 8, "arg8");
#endif
fclose(devnull);
cleanup:
test_uprobe_autoattach__destroy(skel);
}
| linux-master | tools/testing/selftests/bpf/prog_tests/uprobe_autoattach.c |
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2021 Hengqi Chen */
#include <test_progs.h>
#include "test_prog_array_init.skel.h"
void test_prog_array_init(void)
{
struct test_prog_array_init *skel;
int err;
skel = test_prog_array_init__open();
if (!ASSERT_OK_PTR(skel, "could not open BPF object"))
return;
skel->rodata->my_pid = getpid();
err = test_prog_array_init__load(skel);
if (!ASSERT_OK(err, "could not load BPF object"))
goto cleanup;
skel->links.entry = bpf_program__attach_raw_tracepoint(skel->progs.entry, "sys_enter");
if (!ASSERT_OK_PTR(skel->links.entry, "could not attach BPF program"))
goto cleanup;
usleep(1);
ASSERT_EQ(skel->bss->value, 42, "unexpected value");
cleanup:
test_prog_array_init__destroy(skel);
}
| linux-master | tools/testing/selftests/bpf/prog_tests/prog_array_init.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
#include <test_progs.h>
#include <bpf/btf.h>
#include "test_log_buf.skel.h"
static bool check_prog_load(int prog_fd, bool expect_err, const char *tag)
{
if (expect_err) {
if (!ASSERT_LT(prog_fd, 0, tag)) {
close(prog_fd);
return false;
}
} else /* !expect_err */ {
if (!ASSERT_GT(prog_fd, 0, tag))
return false;
}
if (prog_fd >= 0)
close(prog_fd);
return true;
}
static struct {
/* strategically placed before others to avoid accidental modification by kernel */
char filler[1024];
char buf[1024];
/* strategically placed after buf[] to catch more accidental corruptions */
char reference[1024];
} logs;
static const struct bpf_insn *insns;
static size_t insn_cnt;
static int load_prog(struct bpf_prog_load_opts *opts, bool expect_load_error)
{
int prog_fd;
prog_fd = bpf_prog_load(BPF_PROG_TYPE_RAW_TRACEPOINT, "log_prog",
"GPL", insns, insn_cnt, opts);
check_prog_load(prog_fd, expect_load_error, "prog_load");
return prog_fd;
}
static void verif_log_subtest(const char *name, bool expect_load_error, int log_level)
{
LIBBPF_OPTS(bpf_prog_load_opts, opts);
char *exp_log, prog_name[16], op_name[32];
struct test_log_buf *skel;
struct bpf_program *prog;
size_t fixed_log_sz;
__u32 log_true_sz_fixed, log_true_sz_rolling;
int i, mode, err, prog_fd, res;
skel = test_log_buf__open();
if (!ASSERT_OK_PTR(skel, "skel_open"))
return;
bpf_object__for_each_program(prog, skel->obj) {
if (strcmp(bpf_program__name(prog), name) == 0)
bpf_program__set_autoload(prog, true);
else
bpf_program__set_autoload(prog, false);
}
err = test_log_buf__load(skel);
if (!expect_load_error && !ASSERT_OK(err, "unexpected_load_failure"))
goto cleanup;
if (expect_load_error && !ASSERT_ERR(err, "unexpected_load_success"))
goto cleanup;
insns = bpf_program__insns(skel->progs.good_prog);
insn_cnt = bpf_program__insn_cnt(skel->progs.good_prog);
opts.log_buf = logs.reference;
opts.log_size = sizeof(logs.reference);
opts.log_level = log_level | 8 /* BPF_LOG_FIXED */;
load_prog(&opts, expect_load_error);
fixed_log_sz = strlen(logs.reference) + 1;
if (!ASSERT_GT(fixed_log_sz, 50, "fixed_log_sz"))
goto cleanup;
memset(logs.reference + fixed_log_sz, 0, sizeof(logs.reference) - fixed_log_sz);
/* validate BPF_LOG_FIXED works as verifier log used to work, that is:
* we get -ENOSPC and beginning of the full verifier log. This only
* works for log_level 2 and log_level 1 + failed program. For log
* level 2 we don't reset log at all. For log_level 1 + failed program
* we don't get to verification stats output. With log level 1
* for successful program final result will be just verifier stats.
* But if provided too short log buf, kernel will NULL-out log->ubuf
* and will stop emitting further log. This means we'll never see
* predictable verifier stats.
* Long story short, we do the following -ENOSPC test only for
* predictable combinations.
*/
if (log_level >= 2 || expect_load_error) {
opts.log_buf = logs.buf;
opts.log_level = log_level | 8; /* fixed-length log */
opts.log_size = 25;
prog_fd = bpf_prog_load(BPF_PROG_TYPE_RAW_TRACEPOINT, "log_fixed25",
"GPL", insns, insn_cnt, &opts);
if (!ASSERT_EQ(prog_fd, -ENOSPC, "unexpected_log_fixed_prog_load_result")) {
if (prog_fd >= 0)
close(prog_fd);
goto cleanup;
}
if (!ASSERT_EQ(strlen(logs.buf), 24, "log_fixed_25"))
goto cleanup;
if (!ASSERT_STRNEQ(logs.buf, logs.reference, 24, "log_fixed_contents_25"))
goto cleanup;
}
/* validate rolling verifier log logic: try all variations of log buf
* length to force various truncation scenarios
*/
opts.log_buf = logs.buf;
/* rotating mode, then fixed mode */
for (mode = 1; mode >= 0; mode--) {
/* prefill logs.buf with 'A's to detect any write beyond allowed length */
memset(logs.filler, 'A', sizeof(logs.filler));
logs.filler[sizeof(logs.filler) - 1] = '\0';
memset(logs.buf, 'A', sizeof(logs.buf));
logs.buf[sizeof(logs.buf) - 1] = '\0';
for (i = 1; i < fixed_log_sz; i++) {
opts.log_size = i;
opts.log_level = log_level | (mode ? 0 : 8 /* BPF_LOG_FIXED */);
snprintf(prog_name, sizeof(prog_name),
"log_%s_%d", mode ? "roll" : "fixed", i);
prog_fd = bpf_prog_load(BPF_PROG_TYPE_RAW_TRACEPOINT, prog_name,
"GPL", insns, insn_cnt, &opts);
snprintf(op_name, sizeof(op_name),
"log_%s_prog_load_%d", mode ? "roll" : "fixed", i);
if (!ASSERT_EQ(prog_fd, -ENOSPC, op_name)) {
if (prog_fd >= 0)
close(prog_fd);
goto cleanup;
}
snprintf(op_name, sizeof(op_name),
"log_%s_strlen_%d", mode ? "roll" : "fixed", i);
ASSERT_EQ(strlen(logs.buf), i - 1, op_name);
if (mode)
exp_log = logs.reference + fixed_log_sz - i;
else
exp_log = logs.reference;
snprintf(op_name, sizeof(op_name),
"log_%s_contents_%d", mode ? "roll" : "fixed", i);
if (!ASSERT_STRNEQ(logs.buf, exp_log, i - 1, op_name)) {
printf("CMP:%d\nS1:'%s'\nS2:'%s'\n",
strncmp(logs.buf, exp_log, i - 1),
logs.buf, exp_log);
goto cleanup;
}
/* check that unused portions of logs.buf is not overwritten */
snprintf(op_name, sizeof(op_name),
"log_%s_unused_%d", mode ? "roll" : "fixed", i);
if (!ASSERT_STREQ(logs.buf + i, logs.filler + i, op_name)) {
printf("CMP:%d\nS1:'%s'\nS2:'%s'\n",
strcmp(logs.buf + i, logs.filler + i),
logs.buf + i, logs.filler + i);
goto cleanup;
}
}
}
/* (FIXED) get actual log size */
opts.log_buf = logs.buf;
opts.log_level = log_level | 8; /* BPF_LOG_FIXED */
opts.log_size = sizeof(logs.buf);
opts.log_true_size = 0;
res = load_prog(&opts, expect_load_error);
ASSERT_NEQ(res, -ENOSPC, "prog_load_res_fixed");
log_true_sz_fixed = opts.log_true_size;
ASSERT_GT(log_true_sz_fixed, 0, "log_true_sz_fixed");
/* (FIXED, NULL) get actual log size */
opts.log_buf = NULL;
opts.log_level = log_level | 8; /* BPF_LOG_FIXED */
opts.log_size = 0;
opts.log_true_size = 0;
res = load_prog(&opts, expect_load_error);
ASSERT_NEQ(res, -ENOSPC, "prog_load_res_fixed_null");
ASSERT_EQ(opts.log_true_size, log_true_sz_fixed, "log_sz_fixed_null_eq");
/* (ROLLING) get actual log size */
opts.log_buf = logs.buf;
opts.log_level = log_level;
opts.log_size = sizeof(logs.buf);
opts.log_true_size = 0;
res = load_prog(&opts, expect_load_error);
ASSERT_NEQ(res, -ENOSPC, "prog_load_res_rolling");
log_true_sz_rolling = opts.log_true_size;
ASSERT_EQ(log_true_sz_rolling, log_true_sz_fixed, "log_true_sz_eq");
/* (ROLLING, NULL) get actual log size */
opts.log_buf = NULL;
opts.log_level = log_level;
opts.log_size = 0;
opts.log_true_size = 0;
res = load_prog(&opts, expect_load_error);
ASSERT_NEQ(res, -ENOSPC, "prog_load_res_rolling_null");
ASSERT_EQ(opts.log_true_size, log_true_sz_rolling, "log_true_sz_null_eq");
/* (FIXED) expect -ENOSPC for one byte short log */
opts.log_buf = logs.buf;
opts.log_level = log_level | 8; /* BPF_LOG_FIXED */
opts.log_size = log_true_sz_fixed - 1;
opts.log_true_size = 0;
res = load_prog(&opts, true /* should fail */);
ASSERT_EQ(res, -ENOSPC, "prog_load_res_too_short_fixed");
/* (FIXED) expect *not* -ENOSPC with exact log_true_size buffer */
opts.log_buf = logs.buf;
opts.log_level = log_level | 8; /* BPF_LOG_FIXED */
opts.log_size = log_true_sz_fixed;
opts.log_true_size = 0;
res = load_prog(&opts, expect_load_error);
ASSERT_NEQ(res, -ENOSPC, "prog_load_res_just_right_fixed");
/* (ROLLING) expect -ENOSPC for one byte short log */
opts.log_buf = logs.buf;
opts.log_level = log_level;
opts.log_size = log_true_sz_rolling - 1;
res = load_prog(&opts, true /* should fail */);
ASSERT_EQ(res, -ENOSPC, "prog_load_res_too_short_rolling");
/* (ROLLING) expect *not* -ENOSPC with exact log_true_size buffer */
opts.log_buf = logs.buf;
opts.log_level = log_level;
opts.log_size = log_true_sz_rolling;
opts.log_true_size = 0;
res = load_prog(&opts, expect_load_error);
ASSERT_NEQ(res, -ENOSPC, "prog_load_res_just_right_rolling");
cleanup:
test_log_buf__destroy(skel);
}
static const void *btf_data;
static u32 btf_data_sz;
static int load_btf(struct bpf_btf_load_opts *opts, bool expect_err)
{
int fd;
fd = bpf_btf_load(btf_data, btf_data_sz, opts);
if (fd >= 0)
close(fd);
if (expect_err)
ASSERT_LT(fd, 0, "btf_load_failure");
else /* !expect_err */
ASSERT_GT(fd, 0, "btf_load_success");
return fd;
}
static void verif_btf_log_subtest(bool bad_btf)
{
LIBBPF_OPTS(bpf_btf_load_opts, opts);
struct btf *btf;
struct btf_type *t;
char *exp_log, op_name[32];
size_t fixed_log_sz;
__u32 log_true_sz_fixed, log_true_sz_rolling;
int i, res;
/* prepare simple BTF contents */
btf = btf__new_empty();
if (!ASSERT_OK_PTR(btf, "btf_new_empty"))
return;
res = btf__add_int(btf, "whatever", 4, 0);
if (!ASSERT_GT(res, 0, "btf_add_int_id"))
goto cleanup;
if (bad_btf) {
/* btf__add_int() doesn't allow bad value of size, so we'll just
* force-cast btf_type pointer and manually override size to invalid
* 3 if we need to simulate failure
*/
t = (void *)btf__type_by_id(btf, res);
if (!ASSERT_OK_PTR(t, "int_btf_type"))
goto cleanup;
t->size = 3;
}
btf_data = btf__raw_data(btf, &btf_data_sz);
if (!ASSERT_OK_PTR(btf_data, "btf_data"))
goto cleanup;
load_btf(&opts, bad_btf);
opts.log_buf = logs.reference;
opts.log_size = sizeof(logs.reference);
opts.log_level = 1 | 8 /* BPF_LOG_FIXED */;
load_btf(&opts, bad_btf);
fixed_log_sz = strlen(logs.reference) + 1;
if (!ASSERT_GT(fixed_log_sz, 50, "fixed_log_sz"))
goto cleanup;
memset(logs.reference + fixed_log_sz, 0, sizeof(logs.reference) - fixed_log_sz);
/* validate BPF_LOG_FIXED truncation works as verifier log used to work */
opts.log_buf = logs.buf;
opts.log_level = 1 | 8; /* fixed-length log */
opts.log_size = 25;
res = load_btf(&opts, true);
ASSERT_EQ(res, -ENOSPC, "half_log_fd");
ASSERT_EQ(strlen(logs.buf), 24, "log_fixed_25");
ASSERT_STRNEQ(logs.buf, logs.reference, 24, op_name);
/* validate rolling verifier log logic: try all variations of log buf
* length to force various truncation scenarios
*/
opts.log_buf = logs.buf;
opts.log_level = 1; /* rolling log */
/* prefill logs.buf with 'A's to detect any write beyond allowed length */
memset(logs.filler, 'A', sizeof(logs.filler));
logs.filler[sizeof(logs.filler) - 1] = '\0';
memset(logs.buf, 'A', sizeof(logs.buf));
logs.buf[sizeof(logs.buf) - 1] = '\0';
for (i = 1; i < fixed_log_sz; i++) {
opts.log_size = i;
snprintf(op_name, sizeof(op_name), "log_roll_btf_load_%d", i);
res = load_btf(&opts, true);
if (!ASSERT_EQ(res, -ENOSPC, op_name))
goto cleanup;
exp_log = logs.reference + fixed_log_sz - i;
snprintf(op_name, sizeof(op_name), "log_roll_contents_%d", i);
if (!ASSERT_STREQ(logs.buf, exp_log, op_name)) {
printf("CMP:%d\nS1:'%s'\nS2:'%s'\n",
strcmp(logs.buf, exp_log),
logs.buf, exp_log);
goto cleanup;
}
/* check that unused portions of logs.buf are not overwritten */
snprintf(op_name, sizeof(op_name), "log_roll_unused_tail_%d", i);
if (!ASSERT_STREQ(logs.buf + i, logs.filler + i, op_name)) {
printf("CMP:%d\nS1:'%s'\nS2:'%s'\n",
strcmp(logs.buf + i, logs.filler + i),
logs.buf + i, logs.filler + i);
goto cleanup;
}
}
/* (FIXED) get actual log size */
opts.log_buf = logs.buf;
opts.log_level = 1 | 8; /* BPF_LOG_FIXED */
opts.log_size = sizeof(logs.buf);
opts.log_true_size = 0;
res = load_btf(&opts, bad_btf);
ASSERT_NEQ(res, -ENOSPC, "btf_load_res_fixed");
log_true_sz_fixed = opts.log_true_size;
ASSERT_GT(log_true_sz_fixed, 0, "log_true_sz_fixed");
/* (FIXED, NULL) get actual log size */
opts.log_buf = NULL;
opts.log_level = 1 | 8; /* BPF_LOG_FIXED */
opts.log_size = 0;
opts.log_true_size = 0;
res = load_btf(&opts, bad_btf);
ASSERT_NEQ(res, -ENOSPC, "btf_load_res_fixed_null");
ASSERT_EQ(opts.log_true_size, log_true_sz_fixed, "log_sz_fixed_null_eq");
/* (ROLLING) get actual log size */
opts.log_buf = logs.buf;
opts.log_level = 1;
opts.log_size = sizeof(logs.buf);
opts.log_true_size = 0;
res = load_btf(&opts, bad_btf);
ASSERT_NEQ(res, -ENOSPC, "btf_load_res_rolling");
log_true_sz_rolling = opts.log_true_size;
ASSERT_EQ(log_true_sz_rolling, log_true_sz_fixed, "log_true_sz_eq");
/* (ROLLING, NULL) get actual log size */
opts.log_buf = NULL;
opts.log_level = 1;
opts.log_size = 0;
opts.log_true_size = 0;
res = load_btf(&opts, bad_btf);
ASSERT_NEQ(res, -ENOSPC, "btf_load_res_rolling_null");
ASSERT_EQ(opts.log_true_size, log_true_sz_rolling, "log_true_sz_null_eq");
/* (FIXED) expect -ENOSPC for one byte short log */
opts.log_buf = logs.buf;
opts.log_level = 1 | 8; /* BPF_LOG_FIXED */
opts.log_size = log_true_sz_fixed - 1;
opts.log_true_size = 0;
res = load_btf(&opts, true);
ASSERT_EQ(res, -ENOSPC, "btf_load_res_too_short_fixed");
/* (FIXED) expect *not* -ENOSPC with exact log_true_size buffer */
opts.log_buf = logs.buf;
opts.log_level = 1 | 8; /* BPF_LOG_FIXED */
opts.log_size = log_true_sz_fixed;
opts.log_true_size = 0;
res = load_btf(&opts, bad_btf);
ASSERT_NEQ(res, -ENOSPC, "btf_load_res_just_right_fixed");
/* (ROLLING) expect -ENOSPC for one byte short log */
opts.log_buf = logs.buf;
opts.log_level = 1;
opts.log_size = log_true_sz_rolling - 1;
res = load_btf(&opts, true);
ASSERT_EQ(res, -ENOSPC, "btf_load_res_too_short_rolling");
/* (ROLLING) expect *not* -ENOSPC with exact log_true_size buffer */
opts.log_buf = logs.buf;
opts.log_level = 1;
opts.log_size = log_true_sz_rolling;
opts.log_true_size = 0;
res = load_btf(&opts, bad_btf);
ASSERT_NEQ(res, -ENOSPC, "btf_load_res_just_right_rolling");
cleanup:
btf__free(btf);
}
void test_verifier_log(void)
{
if (test__start_subtest("good_prog-level1"))
verif_log_subtest("good_prog", false, 1);
if (test__start_subtest("good_prog-level2"))
verif_log_subtest("good_prog", false, 2);
if (test__start_subtest("bad_prog-level1"))
verif_log_subtest("bad_prog", true, 1);
if (test__start_subtest("bad_prog-level2"))
verif_log_subtest("bad_prog", true, 2);
if (test__start_subtest("bad_btf"))
verif_btf_log_subtest(true /* bad btf */);
if (test__start_subtest("good_btf"))
verif_btf_log_subtest(false /* !bad btf */);
}
| linux-master | tools/testing/selftests/bpf/prog_tests/verifier_log.c |
// SPDX-License-Identifier: GPL-2.0
#define _GNU_SOURCE
#include <test_progs.h>
#include <sys/stat.h>
#include <linux/sched.h>
#include <sys/syscall.h>
#define MAX_PATH_LEN 128
#define MAX_FILES 7
#include "test_d_path.skel.h"
#include "test_d_path_check_rdonly_mem.skel.h"
#include "test_d_path_check_types.skel.h"
/* sys_close_range is not around for long time, so let's
* make sure we can call it on systems with older glibc
*/
#ifndef __NR_close_range
#ifdef __alpha__
#define __NR_close_range 546
#else
#define __NR_close_range 436
#endif
#endif
static int duration;
static struct {
__u32 cnt;
char paths[MAX_FILES][MAX_PATH_LEN];
} src;
static int set_pathname(int fd, pid_t pid)
{
char buf[MAX_PATH_LEN];
snprintf(buf, MAX_PATH_LEN, "/proc/%d/fd/%d", pid, fd);
return readlink(buf, src.paths[src.cnt++], MAX_PATH_LEN);
}
static int trigger_fstat_events(pid_t pid)
{
int sockfd = -1, procfd = -1, devfd = -1;
int localfd = -1, indicatorfd = -1;
int pipefd[2] = { -1, -1 };
struct stat fileStat;
int ret = -1;
/* unmountable pseudo-filesystems */
if (CHECK(pipe(pipefd) < 0, "trigger", "pipe failed\n"))
return ret;
/* unmountable pseudo-filesystems */
sockfd = socket(AF_INET, SOCK_STREAM, 0);
if (CHECK(sockfd < 0, "trigger", "socket failed\n"))
goto out_close;
/* mountable pseudo-filesystems */
procfd = open("/proc/self/comm", O_RDONLY);
if (CHECK(procfd < 0, "trigger", "open /proc/self/comm failed\n"))
goto out_close;
devfd = open("/dev/urandom", O_RDONLY);
if (CHECK(devfd < 0, "trigger", "open /dev/urandom failed\n"))
goto out_close;
localfd = open("/tmp/d_path_loadgen.txt", O_CREAT | O_RDONLY, 0644);
if (CHECK(localfd < 0, "trigger", "open /tmp/d_path_loadgen.txt failed\n"))
goto out_close;
/* bpf_d_path will return path with (deleted) */
remove("/tmp/d_path_loadgen.txt");
indicatorfd = open("/tmp/", O_PATH);
if (CHECK(indicatorfd < 0, "trigger", "open /tmp/ failed\n"))
goto out_close;
ret = set_pathname(pipefd[0], pid);
if (CHECK(ret < 0, "trigger", "set_pathname failed for pipe[0]\n"))
goto out_close;
ret = set_pathname(pipefd[1], pid);
if (CHECK(ret < 0, "trigger", "set_pathname failed for pipe[1]\n"))
goto out_close;
ret = set_pathname(sockfd, pid);
if (CHECK(ret < 0, "trigger", "set_pathname failed for socket\n"))
goto out_close;
ret = set_pathname(procfd, pid);
if (CHECK(ret < 0, "trigger", "set_pathname failed for proc\n"))
goto out_close;
ret = set_pathname(devfd, pid);
if (CHECK(ret < 0, "trigger", "set_pathname failed for dev\n"))
goto out_close;
ret = set_pathname(localfd, pid);
if (CHECK(ret < 0, "trigger", "set_pathname failed for file\n"))
goto out_close;
ret = set_pathname(indicatorfd, pid);
if (CHECK(ret < 0, "trigger", "set_pathname failed for dir\n"))
goto out_close;
/* triggers vfs_getattr */
fstat(pipefd[0], &fileStat);
fstat(pipefd[1], &fileStat);
fstat(sockfd, &fileStat);
fstat(procfd, &fileStat);
fstat(devfd, &fileStat);
fstat(localfd, &fileStat);
fstat(indicatorfd, &fileStat);
out_close:
/* sys_close no longer triggers filp_close, but we can
* call sys_close_range instead which still does
*/
#define close(fd) syscall(__NR_close_range, fd, fd, 0)
close(pipefd[0]);
close(pipefd[1]);
close(sockfd);
close(procfd);
close(devfd);
close(localfd);
close(indicatorfd);
#undef close
return ret;
}
static void test_d_path_basic(void)
{
struct test_d_path__bss *bss;
struct test_d_path *skel;
int err;
skel = test_d_path__open_and_load();
if (CHECK(!skel, "setup", "d_path skeleton failed\n"))
goto cleanup;
err = test_d_path__attach(skel);
if (CHECK(err, "setup", "attach failed: %d\n", err))
goto cleanup;
bss = skel->bss;
bss->my_pid = getpid();
err = trigger_fstat_events(bss->my_pid);
if (err < 0)
goto cleanup;
if (CHECK(!bss->called_stat,
"stat",
"trampoline for security_inode_getattr was not called\n"))
goto cleanup;
if (CHECK(!bss->called_close,
"close",
"trampoline for filp_close was not called\n"))
goto cleanup;
for (int i = 0; i < MAX_FILES; i++) {
CHECK(strncmp(src.paths[i], bss->paths_stat[i], MAX_PATH_LEN),
"check",
"failed to get stat path[%d]: %s vs %s\n",
i, src.paths[i], bss->paths_stat[i]);
CHECK(strncmp(src.paths[i], bss->paths_close[i], MAX_PATH_LEN),
"check",
"failed to get close path[%d]: %s vs %s\n",
i, src.paths[i], bss->paths_close[i]);
/* The d_path helper returns size plus NUL char, hence + 1 */
CHECK(bss->rets_stat[i] != strlen(bss->paths_stat[i]) + 1,
"check",
"failed to match stat return [%d]: %d vs %zd [%s]\n",
i, bss->rets_stat[i], strlen(bss->paths_stat[i]) + 1,
bss->paths_stat[i]);
CHECK(bss->rets_close[i] != strlen(bss->paths_stat[i]) + 1,
"check",
"failed to match stat return [%d]: %d vs %zd [%s]\n",
i, bss->rets_close[i], strlen(bss->paths_close[i]) + 1,
bss->paths_stat[i]);
}
cleanup:
test_d_path__destroy(skel);
}
static void test_d_path_check_rdonly_mem(void)
{
struct test_d_path_check_rdonly_mem *skel;
skel = test_d_path_check_rdonly_mem__open_and_load();
ASSERT_ERR_PTR(skel, "unexpected_load_overwriting_rdonly_mem");
test_d_path_check_rdonly_mem__destroy(skel);
}
static void test_d_path_check_types(void)
{
struct test_d_path_check_types *skel;
skel = test_d_path_check_types__open_and_load();
ASSERT_ERR_PTR(skel, "unexpected_load_passing_wrong_type");
test_d_path_check_types__destroy(skel);
}
void test_d_path(void)
{
if (test__start_subtest("basic"))
test_d_path_basic();
if (test__start_subtest("check_rdonly_mem"))
test_d_path_check_rdonly_mem();
if (test__start_subtest("check_alloc_mem"))
test_d_path_check_types();
}
| linux-master | tools/testing/selftests/bpf/prog_tests/d_path.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright Amazon.com Inc. or its affiliates. */
#include <sys/socket.h>
#include <sys/un.h>
#include <test_progs.h>
#include "bpf_iter_setsockopt_unix.skel.h"
#define NR_CASES 5
static int create_unix_socket(struct bpf_iter_setsockopt_unix *skel)
{
struct sockaddr_un addr = {
.sun_family = AF_UNIX,
.sun_path = "",
};
socklen_t len;
int fd, err;
fd = socket(AF_UNIX, SOCK_STREAM, 0);
if (!ASSERT_NEQ(fd, -1, "socket"))
return -1;
len = offsetof(struct sockaddr_un, sun_path);
err = bind(fd, (struct sockaddr *)&addr, len);
if (!ASSERT_OK(err, "bind"))
return -1;
len = sizeof(addr);
err = getsockname(fd, (struct sockaddr *)&addr, &len);
if (!ASSERT_OK(err, "getsockname"))
return -1;
memcpy(&skel->bss->sun_path, &addr.sun_path,
len - offsetof(struct sockaddr_un, sun_path));
return fd;
}
static void test_sndbuf(struct bpf_iter_setsockopt_unix *skel, int fd)
{
socklen_t optlen;
int i, err;
for (i = 0; i < NR_CASES; i++) {
if (!ASSERT_NEQ(skel->data->sndbuf_getsockopt[i], -1,
"bpf_(get|set)sockopt"))
return;
err = setsockopt(fd, SOL_SOCKET, SO_SNDBUF,
&(skel->data->sndbuf_setsockopt[i]),
sizeof(skel->data->sndbuf_setsockopt[i]));
if (!ASSERT_OK(err, "setsockopt"))
return;
optlen = sizeof(skel->bss->sndbuf_getsockopt_expected[i]);
err = getsockopt(fd, SOL_SOCKET, SO_SNDBUF,
&(skel->bss->sndbuf_getsockopt_expected[i]),
&optlen);
if (!ASSERT_OK(err, "getsockopt"))
return;
if (!ASSERT_EQ(skel->data->sndbuf_getsockopt[i],
skel->bss->sndbuf_getsockopt_expected[i],
"bpf_(get|set)sockopt"))
return;
}
}
void test_bpf_iter_setsockopt_unix(void)
{
struct bpf_iter_setsockopt_unix *skel;
int err, unix_fd, iter_fd;
char buf;
skel = bpf_iter_setsockopt_unix__open_and_load();
if (!ASSERT_OK_PTR(skel, "open_and_load"))
return;
unix_fd = create_unix_socket(skel);
if (!ASSERT_NEQ(unix_fd, -1, "create_unix_server"))
goto destroy;
skel->links.change_sndbuf = bpf_program__attach_iter(skel->progs.change_sndbuf, NULL);
if (!ASSERT_OK_PTR(skel->links.change_sndbuf, "bpf_program__attach_iter"))
goto destroy;
iter_fd = bpf_iter_create(bpf_link__fd(skel->links.change_sndbuf));
if (!ASSERT_GE(iter_fd, 0, "bpf_iter_create"))
goto destroy;
while ((err = read(iter_fd, &buf, sizeof(buf))) == -1 &&
errno == EAGAIN)
;
if (!ASSERT_OK(err, "read iter error"))
goto destroy;
test_sndbuf(skel, unix_fd);
destroy:
bpf_iter_setsockopt_unix__destroy(skel);
}
| linux-master | tools/testing/selftests/bpf/prog_tests/bpf_iter_setsockopt_unix.c |
// SPDX-License-Identifier: GPL-2.0
#include "bpf/libbpf.h"
#include <test_progs.h>
#include <network_helpers.h>
#include "cb_refs.skel.h"
static char log_buf[1024 * 1024];
struct {
const char *prog_name;
const char *err_msg;
} cb_refs_tests[] = {
{ "underflow_prog", "reference has not been acquired before" },
{ "leak_prog", "Unreleased reference" },
{ "nested_cb", "Unreleased reference id=4 alloc_insn=2" }, /* alloc_insn=2{4,5} */
{ "non_cb_transfer_ref", "Unreleased reference id=4 alloc_insn=1" }, /* alloc_insn=1{1,2} */
};
void test_cb_refs(void)
{
LIBBPF_OPTS(bpf_object_open_opts, opts, .kernel_log_buf = log_buf,
.kernel_log_size = sizeof(log_buf),
.kernel_log_level = 1);
struct bpf_program *prog;
struct cb_refs *skel;
int i;
for (i = 0; i < ARRAY_SIZE(cb_refs_tests); i++) {
LIBBPF_OPTS(bpf_test_run_opts, run_opts,
.data_in = &pkt_v4,
.data_size_in = sizeof(pkt_v4),
.repeat = 1,
);
skel = cb_refs__open_opts(&opts);
if (!ASSERT_OK_PTR(skel, "cb_refs__open_and_load"))
return;
prog = bpf_object__find_program_by_name(skel->obj, cb_refs_tests[i].prog_name);
bpf_program__set_autoload(prog, true);
if (!ASSERT_ERR(cb_refs__load(skel), "cb_refs__load"))
bpf_prog_test_run_opts(bpf_program__fd(prog), &run_opts);
if (!ASSERT_OK_PTR(strstr(log_buf, cb_refs_tests[i].err_msg), "expected error message")) {
fprintf(stderr, "Expected: %s\n", cb_refs_tests[i].err_msg);
fprintf(stderr, "Verifier: %s\n", log_buf);
}
cb_refs__destroy(skel);
}
}
| linux-master | tools/testing/selftests/bpf/prog_tests/cb_refs.c |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.