python_code
stringlengths 0
1.8M
| repo_name
stringclasses 7
values | file_path
stringlengths 5
99
|
---|---|---|
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2020 Facebook
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
int calls = 0;
int alt_calls = 0;
SEC("cgroup_skb/egress")
int egress(struct __sk_buff *skb)
{
__sync_fetch_and_add(&calls, 1);
return 1;
}
SEC("cgroup_skb/egress")
int egress_alt(struct __sk_buff *skb)
{
__sync_fetch_and_add(&alt_calls, 1);
return 1;
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/test_cgroup_link.c |
// SPDX-License-Identifier: GPL-2.0
/* Converted from tools/testing/selftests/bpf/verifier/runtime_jit.c */
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include "bpf_misc.h"
void dummy_prog_42_socket(void);
void dummy_prog_24_socket(void);
void dummy_prog_loop1_socket(void);
void dummy_prog_loop2_socket(void);
struct {
__uint(type, BPF_MAP_TYPE_PROG_ARRAY);
__uint(max_entries, 4);
__uint(key_size, sizeof(int));
__array(values, void (void));
} map_prog1_socket SEC(".maps") = {
.values = {
[0] = (void *)&dummy_prog_42_socket,
[1] = (void *)&dummy_prog_loop1_socket,
[2] = (void *)&dummy_prog_24_socket,
},
};
struct {
__uint(type, BPF_MAP_TYPE_PROG_ARRAY);
__uint(max_entries, 8);
__uint(key_size, sizeof(int));
__array(values, void (void));
} map_prog2_socket SEC(".maps") = {
.values = {
[1] = (void *)&dummy_prog_loop2_socket,
[2] = (void *)&dummy_prog_24_socket,
[7] = (void *)&dummy_prog_42_socket,
},
};
SEC("socket")
__auxiliary __auxiliary_unpriv
__naked void dummy_prog_42_socket(void)
{
asm volatile ("r0 = 42; exit;");
}
SEC("socket")
__auxiliary __auxiliary_unpriv
__naked void dummy_prog_24_socket(void)
{
asm volatile ("r0 = 24; exit;");
}
SEC("socket")
__auxiliary __auxiliary_unpriv
__naked void dummy_prog_loop1_socket(void)
{
asm volatile (" \
r3 = 1; \
r2 = %[map_prog1_socket] ll; \
call %[bpf_tail_call]; \
r0 = 41; \
exit; \
" :
: __imm(bpf_tail_call),
__imm_addr(map_prog1_socket)
: __clobber_all);
}
SEC("socket")
__auxiliary __auxiliary_unpriv
__naked void dummy_prog_loop2_socket(void)
{
asm volatile (" \
r3 = 1; \
r2 = %[map_prog2_socket] ll; \
call %[bpf_tail_call]; \
r0 = 41; \
exit; \
" :
: __imm(bpf_tail_call),
__imm_addr(map_prog2_socket)
: __clobber_all);
}
SEC("socket")
__description("runtime/jit: tail_call within bounds, prog once")
__success __success_unpriv __retval(42)
__naked void call_within_bounds_prog_once(void)
{
asm volatile (" \
r3 = 0; \
r2 = %[map_prog1_socket] ll; \
call %[bpf_tail_call]; \
r0 = 1; \
exit; \
" :
: __imm(bpf_tail_call),
__imm_addr(map_prog1_socket)
: __clobber_all);
}
SEC("socket")
__description("runtime/jit: tail_call within bounds, prog loop")
__success __success_unpriv __retval(41)
__naked void call_within_bounds_prog_loop(void)
{
asm volatile (" \
r3 = 1; \
r2 = %[map_prog1_socket] ll; \
call %[bpf_tail_call]; \
r0 = 1; \
exit; \
" :
: __imm(bpf_tail_call),
__imm_addr(map_prog1_socket)
: __clobber_all);
}
SEC("socket")
__description("runtime/jit: tail_call within bounds, no prog")
__success __success_unpriv __retval(1)
__naked void call_within_bounds_no_prog(void)
{
asm volatile (" \
r3 = 3; \
r2 = %[map_prog1_socket] ll; \
call %[bpf_tail_call]; \
r0 = 1; \
exit; \
" :
: __imm(bpf_tail_call),
__imm_addr(map_prog1_socket)
: __clobber_all);
}
SEC("socket")
__description("runtime/jit: tail_call within bounds, key 2")
__success __success_unpriv __retval(24)
__naked void call_within_bounds_key_2(void)
{
asm volatile (" \
r3 = 2; \
r2 = %[map_prog1_socket] ll; \
call %[bpf_tail_call]; \
r0 = 1; \
exit; \
" :
: __imm(bpf_tail_call),
__imm_addr(map_prog1_socket)
: __clobber_all);
}
SEC("socket")
__description("runtime/jit: tail_call within bounds, key 2 / key 2, first branch")
__success __success_unpriv __retval(24)
__naked void _2_key_2_first_branch(void)
{
asm volatile (" \
r0 = 13; \
*(u8*)(r1 + %[__sk_buff_cb_0]) = r0; \
r0 = *(u8*)(r1 + %[__sk_buff_cb_0]); \
if r0 == 13 goto l0_%=; \
r3 = 2; \
r2 = %[map_prog1_socket] ll; \
goto l1_%=; \
l0_%=: r3 = 2; \
r2 = %[map_prog1_socket] ll; \
l1_%=: call %[bpf_tail_call]; \
r0 = 1; \
exit; \
" :
: __imm(bpf_tail_call),
__imm_addr(map_prog1_socket),
__imm_const(__sk_buff_cb_0, offsetof(struct __sk_buff, cb[0]))
: __clobber_all);
}
SEC("socket")
__description("runtime/jit: tail_call within bounds, key 2 / key 2, second branch")
__success __success_unpriv __retval(24)
__naked void _2_key_2_second_branch(void)
{
asm volatile (" \
r0 = 14; \
*(u8*)(r1 + %[__sk_buff_cb_0]) = r0; \
r0 = *(u8*)(r1 + %[__sk_buff_cb_0]); \
if r0 == 13 goto l0_%=; \
r3 = 2; \
r2 = %[map_prog1_socket] ll; \
goto l1_%=; \
l0_%=: r3 = 2; \
r2 = %[map_prog1_socket] ll; \
l1_%=: call %[bpf_tail_call]; \
r0 = 1; \
exit; \
" :
: __imm(bpf_tail_call),
__imm_addr(map_prog1_socket),
__imm_const(__sk_buff_cb_0, offsetof(struct __sk_buff, cb[0]))
: __clobber_all);
}
SEC("socket")
__description("runtime/jit: tail_call within bounds, key 0 / key 2, first branch")
__success __success_unpriv __retval(24)
__naked void _0_key_2_first_branch(void)
{
asm volatile (" \
r0 = 13; \
*(u8*)(r1 + %[__sk_buff_cb_0]) = r0; \
r0 = *(u8*)(r1 + %[__sk_buff_cb_0]); \
if r0 == 13 goto l0_%=; \
r3 = 0; \
r2 = %[map_prog1_socket] ll; \
goto l1_%=; \
l0_%=: r3 = 2; \
r2 = %[map_prog1_socket] ll; \
l1_%=: call %[bpf_tail_call]; \
r0 = 1; \
exit; \
" :
: __imm(bpf_tail_call),
__imm_addr(map_prog1_socket),
__imm_const(__sk_buff_cb_0, offsetof(struct __sk_buff, cb[0]))
: __clobber_all);
}
SEC("socket")
__description("runtime/jit: tail_call within bounds, key 0 / key 2, second branch")
__success __success_unpriv __retval(42)
__naked void _0_key_2_second_branch(void)
{
asm volatile (" \
r0 = 14; \
*(u8*)(r1 + %[__sk_buff_cb_0]) = r0; \
r0 = *(u8*)(r1 + %[__sk_buff_cb_0]); \
if r0 == 13 goto l0_%=; \
r3 = 0; \
r2 = %[map_prog1_socket] ll; \
goto l1_%=; \
l0_%=: r3 = 2; \
r2 = %[map_prog1_socket] ll; \
l1_%=: call %[bpf_tail_call]; \
r0 = 1; \
exit; \
" :
: __imm(bpf_tail_call),
__imm_addr(map_prog1_socket),
__imm_const(__sk_buff_cb_0, offsetof(struct __sk_buff, cb[0]))
: __clobber_all);
}
SEC("socket")
__description("runtime/jit: tail_call within bounds, different maps, first branch")
__success __failure_unpriv __msg_unpriv("tail_call abusing map_ptr")
__retval(1)
__naked void bounds_different_maps_first_branch(void)
{
asm volatile (" \
r0 = 13; \
*(u8*)(r1 + %[__sk_buff_cb_0]) = r0; \
r0 = *(u8*)(r1 + %[__sk_buff_cb_0]); \
if r0 == 13 goto l0_%=; \
r3 = 0; \
r2 = %[map_prog1_socket] ll; \
goto l1_%=; \
l0_%=: r3 = 0; \
r2 = %[map_prog2_socket] ll; \
l1_%=: call %[bpf_tail_call]; \
r0 = 1; \
exit; \
" :
: __imm(bpf_tail_call),
__imm_addr(map_prog1_socket),
__imm_addr(map_prog2_socket),
__imm_const(__sk_buff_cb_0, offsetof(struct __sk_buff, cb[0]))
: __clobber_all);
}
SEC("socket")
__description("runtime/jit: tail_call within bounds, different maps, second branch")
__success __failure_unpriv __msg_unpriv("tail_call abusing map_ptr")
__retval(42)
__naked void bounds_different_maps_second_branch(void)
{
asm volatile (" \
r0 = 14; \
*(u8*)(r1 + %[__sk_buff_cb_0]) = r0; \
r0 = *(u8*)(r1 + %[__sk_buff_cb_0]); \
if r0 == 13 goto l0_%=; \
r3 = 0; \
r2 = %[map_prog1_socket] ll; \
goto l1_%=; \
l0_%=: r3 = 0; \
r2 = %[map_prog2_socket] ll; \
l1_%=: call %[bpf_tail_call]; \
r0 = 1; \
exit; \
" :
: __imm(bpf_tail_call),
__imm_addr(map_prog1_socket),
__imm_addr(map_prog2_socket),
__imm_const(__sk_buff_cb_0, offsetof(struct __sk_buff, cb[0]))
: __clobber_all);
}
SEC("socket")
__description("runtime/jit: tail_call out of bounds")
__success __success_unpriv __retval(2)
__naked void tail_call_out_of_bounds(void)
{
asm volatile (" \
r3 = 256; \
r2 = %[map_prog1_socket] ll; \
call %[bpf_tail_call]; \
r0 = 2; \
exit; \
" :
: __imm(bpf_tail_call),
__imm_addr(map_prog1_socket)
: __clobber_all);
}
SEC("socket")
__description("runtime/jit: pass negative index to tail_call")
__success __success_unpriv __retval(2)
__naked void negative_index_to_tail_call(void)
{
asm volatile (" \
r3 = -1; \
r2 = %[map_prog1_socket] ll; \
call %[bpf_tail_call]; \
r0 = 2; \
exit; \
" :
: __imm(bpf_tail_call),
__imm_addr(map_prog1_socket)
: __clobber_all);
}
SEC("socket")
__description("runtime/jit: pass > 32bit index to tail_call")
__success __success_unpriv __retval(42)
/* Verifier rewrite for unpriv skips tail call here. */
__retval_unpriv(2)
__naked void _32bit_index_to_tail_call(void)
{
asm volatile (" \
r3 = 0x100000000 ll; \
r2 = %[map_prog1_socket] ll; \
call %[bpf_tail_call]; \
r0 = 2; \
exit; \
" :
: __imm(bpf_tail_call),
__imm_addr(map_prog1_socket)
: __clobber_all);
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/verifier_runtime_jit.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020 Facebook */
#include "vmlinux.h"
#include <asm/unistd.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
#include <bpf/bpf_core_read.h>
#define MY_TV_NSEC 1337
bool tp_called = false;
bool raw_tp_called = false;
bool tp_btf_called = false;
bool kprobe_called = false;
bool fentry_called = false;
SEC("tp/syscalls/sys_enter_nanosleep")
int handle__tp(struct trace_event_raw_sys_enter *args)
{
struct __kernel_timespec *ts;
long tv_nsec;
if (args->id != __NR_nanosleep)
return 0;
ts = (void *)args->args[0];
if (bpf_probe_read_user(&tv_nsec, sizeof(ts->tv_nsec), &ts->tv_nsec) ||
tv_nsec != MY_TV_NSEC)
return 0;
tp_called = true;
return 0;
}
SEC("raw_tp/sys_enter")
int BPF_PROG(handle__raw_tp, struct pt_regs *regs, long id)
{
struct __kernel_timespec *ts;
long tv_nsec;
if (id != __NR_nanosleep)
return 0;
ts = (void *)PT_REGS_PARM1_CORE_SYSCALL(regs);
if (bpf_probe_read_user(&tv_nsec, sizeof(ts->tv_nsec), &ts->tv_nsec) ||
tv_nsec != MY_TV_NSEC)
return 0;
raw_tp_called = true;
return 0;
}
SEC("tp_btf/sys_enter")
int BPF_PROG(handle__tp_btf, struct pt_regs *regs, long id)
{
struct __kernel_timespec *ts;
long tv_nsec;
if (id != __NR_nanosleep)
return 0;
ts = (void *)PT_REGS_PARM1_CORE_SYSCALL(regs);
if (bpf_probe_read_user(&tv_nsec, sizeof(ts->tv_nsec), &ts->tv_nsec) ||
tv_nsec != MY_TV_NSEC)
return 0;
tp_btf_called = true;
return 0;
}
SEC("kprobe/hrtimer_start_range_ns")
int BPF_KPROBE(handle__kprobe, struct hrtimer *timer, ktime_t tim, u64 delta_ns,
const enum hrtimer_mode mode)
{
if (tim == MY_TV_NSEC)
kprobe_called = true;
return 0;
}
SEC("fentry/hrtimer_start_range_ns")
int BPF_PROG(handle__fentry, struct hrtimer *timer, ktime_t tim, u64 delta_ns,
const enum hrtimer_mode mode)
{
if (tim == MY_TV_NSEC)
fentry_called = true;
return 0;
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/test_vmlinux.c |
// SPDX-License-Identifier: GPL-2.0
#include <vmlinux.h>
#include <bpf/bpf_tracing.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_core_read.h>
#include "bpf_misc.h"
#include "../bpf_testmod/bpf_testmod_kfunc.h"
struct map_value {
char buf[8];
struct prog_test_ref_kfunc __kptr_untrusted *unref_ptr;
struct prog_test_ref_kfunc __kptr *ref_ptr;
struct prog_test_member __kptr *ref_memb_ptr;
};
struct array_map {
__uint(type, BPF_MAP_TYPE_ARRAY);
__type(key, int);
__type(value, struct map_value);
__uint(max_entries, 1);
} array_map SEC(".maps");
SEC("?tc")
__failure __msg("kptr access size must be BPF_DW")
int size_not_bpf_dw(struct __sk_buff *ctx)
{
struct map_value *v;
int key = 0;
v = bpf_map_lookup_elem(&array_map, &key);
if (!v)
return 0;
*(u32 *)&v->unref_ptr = 0;
return 0;
}
SEC("?tc")
__failure __msg("kptr access cannot have variable offset")
int non_const_var_off(struct __sk_buff *ctx)
{
struct map_value *v;
int key = 0, id;
v = bpf_map_lookup_elem(&array_map, &key);
if (!v)
return 0;
id = ctx->protocol;
if (id < 4 || id > 12)
return 0;
*(u64 *)((void *)v + id) = 0;
return 0;
}
SEC("?tc")
__failure __msg("R1 doesn't have constant offset. kptr has to be")
int non_const_var_off_kptr_xchg(struct __sk_buff *ctx)
{
struct map_value *v;
int key = 0, id;
v = bpf_map_lookup_elem(&array_map, &key);
if (!v)
return 0;
id = ctx->protocol;
if (id < 4 || id > 12)
return 0;
bpf_kptr_xchg((void *)v + id, NULL);
return 0;
}
SEC("?tc")
__failure __msg("kptr access misaligned expected=8 off=7")
int misaligned_access_write(struct __sk_buff *ctx)
{
struct map_value *v;
int key = 0;
v = bpf_map_lookup_elem(&array_map, &key);
if (!v)
return 0;
*(void **)((void *)v + 7) = NULL;
return 0;
}
SEC("?tc")
__failure __msg("kptr access misaligned expected=8 off=1")
int misaligned_access_read(struct __sk_buff *ctx)
{
struct map_value *v;
int key = 0;
v = bpf_map_lookup_elem(&array_map, &key);
if (!v)
return 0;
return *(u64 *)((void *)v + 1);
}
SEC("?tc")
__failure __msg("variable untrusted_ptr_ access var_off=(0x0; 0x1e0)")
int reject_var_off_store(struct __sk_buff *ctx)
{
struct prog_test_ref_kfunc *unref_ptr;
struct map_value *v;
int key = 0, id;
v = bpf_map_lookup_elem(&array_map, &key);
if (!v)
return 0;
unref_ptr = v->unref_ptr;
if (!unref_ptr)
return 0;
id = ctx->protocol;
if (id < 4 || id > 12)
return 0;
unref_ptr += id;
v->unref_ptr = unref_ptr;
return 0;
}
SEC("?tc")
__failure __msg("invalid kptr access, R1 type=untrusted_ptr_prog_test_ref_kfunc")
int reject_bad_type_match(struct __sk_buff *ctx)
{
struct prog_test_ref_kfunc *unref_ptr;
struct map_value *v;
int key = 0;
v = bpf_map_lookup_elem(&array_map, &key);
if (!v)
return 0;
unref_ptr = v->unref_ptr;
if (!unref_ptr)
return 0;
unref_ptr = (void *)unref_ptr + 4;
v->unref_ptr = unref_ptr;
return 0;
}
SEC("?tc")
__failure __msg("R1 type=untrusted_ptr_or_null_ expected=percpu_ptr_")
int marked_as_untrusted_or_null(struct __sk_buff *ctx)
{
struct map_value *v;
int key = 0;
v = bpf_map_lookup_elem(&array_map, &key);
if (!v)
return 0;
bpf_this_cpu_ptr(v->unref_ptr);
return 0;
}
SEC("?tc")
__failure __msg("access beyond struct prog_test_ref_kfunc at off 32 size 4")
int correct_btf_id_check_size(struct __sk_buff *ctx)
{
struct prog_test_ref_kfunc *p;
struct map_value *v;
int key = 0;
v = bpf_map_lookup_elem(&array_map, &key);
if (!v)
return 0;
p = v->unref_ptr;
if (!p)
return 0;
return *(int *)((void *)p + bpf_core_type_size(struct prog_test_ref_kfunc));
}
SEC("?tc")
__failure __msg("R1 type=untrusted_ptr_ expected=percpu_ptr_")
int inherit_untrusted_on_walk(struct __sk_buff *ctx)
{
struct prog_test_ref_kfunc *unref_ptr;
struct map_value *v;
int key = 0;
v = bpf_map_lookup_elem(&array_map, &key);
if (!v)
return 0;
unref_ptr = v->unref_ptr;
if (!unref_ptr)
return 0;
unref_ptr = unref_ptr->next;
bpf_this_cpu_ptr(unref_ptr);
return 0;
}
SEC("?tc")
__failure __msg("off=8 kptr isn't referenced kptr")
int reject_kptr_xchg_on_unref(struct __sk_buff *ctx)
{
struct map_value *v;
int key = 0;
v = bpf_map_lookup_elem(&array_map, &key);
if (!v)
return 0;
bpf_kptr_xchg(&v->unref_ptr, NULL);
return 0;
}
SEC("?tc")
__failure __msg("R1 type=rcu_ptr_or_null_ expected=percpu_ptr_")
int mark_ref_as_untrusted_or_null(struct __sk_buff *ctx)
{
struct map_value *v;
int key = 0;
v = bpf_map_lookup_elem(&array_map, &key);
if (!v)
return 0;
bpf_this_cpu_ptr(v->ref_ptr);
return 0;
}
SEC("?tc")
__failure __msg("store to referenced kptr disallowed")
int reject_untrusted_store_to_ref(struct __sk_buff *ctx)
{
struct prog_test_ref_kfunc *p;
struct map_value *v;
int key = 0;
v = bpf_map_lookup_elem(&array_map, &key);
if (!v)
return 0;
p = v->ref_ptr;
if (!p)
return 0;
/* Checkmate, clang */
*(struct prog_test_ref_kfunc * volatile *)&v->ref_ptr = p;
return 0;
}
SEC("?tc")
__failure __msg("R2 must be referenced")
int reject_untrusted_xchg(struct __sk_buff *ctx)
{
struct prog_test_ref_kfunc *p;
struct map_value *v;
int key = 0;
v = bpf_map_lookup_elem(&array_map, &key);
if (!v)
return 0;
p = v->ref_ptr;
if (!p)
return 0;
bpf_kptr_xchg(&v->ref_ptr, p);
return 0;
}
SEC("?tc")
__failure
__msg("invalid kptr access, R2 type=ptr_prog_test_ref_kfunc expected=ptr_prog_test_member")
int reject_bad_type_xchg(struct __sk_buff *ctx)
{
struct prog_test_ref_kfunc *ref_ptr;
struct map_value *v;
int key = 0;
v = bpf_map_lookup_elem(&array_map, &key);
if (!v)
return 0;
ref_ptr = bpf_kfunc_call_test_acquire(&(unsigned long){0});
if (!ref_ptr)
return 0;
bpf_kptr_xchg(&v->ref_memb_ptr, ref_ptr);
return 0;
}
SEC("?tc")
__failure __msg("invalid kptr access, R2 type=ptr_prog_test_ref_kfunc")
int reject_member_of_ref_xchg(struct __sk_buff *ctx)
{
struct prog_test_ref_kfunc *ref_ptr;
struct map_value *v;
int key = 0;
v = bpf_map_lookup_elem(&array_map, &key);
if (!v)
return 0;
ref_ptr = bpf_kfunc_call_test_acquire(&(unsigned long){0});
if (!ref_ptr)
return 0;
bpf_kptr_xchg(&v->ref_memb_ptr, &ref_ptr->memb);
return 0;
}
SEC("?syscall")
__failure __msg("kptr cannot be accessed indirectly by helper")
int reject_indirect_helper_access(struct __sk_buff *ctx)
{
struct map_value *v;
int key = 0;
v = bpf_map_lookup_elem(&array_map, &key);
if (!v)
return 0;
bpf_get_current_comm(v, sizeof(v->buf) + 1);
return 0;
}
__noinline
int write_func(int *p)
{
return p ? *p = 42 : 0;
}
SEC("?tc")
__failure __msg("kptr cannot be accessed indirectly by helper")
int reject_indirect_global_func_access(struct __sk_buff *ctx)
{
struct map_value *v;
int key = 0;
v = bpf_map_lookup_elem(&array_map, &key);
if (!v)
return 0;
return write_func((void *)v + 5);
}
SEC("?tc")
__failure __msg("Unreleased reference id=5 alloc_insn=")
int kptr_xchg_ref_state(struct __sk_buff *ctx)
{
struct prog_test_ref_kfunc *p;
struct map_value *v;
int key = 0;
v = bpf_map_lookup_elem(&array_map, &key);
if (!v)
return 0;
p = bpf_kfunc_call_test_acquire(&(unsigned long){0});
if (!p)
return 0;
bpf_kptr_xchg(&v->ref_ptr, p);
return 0;
}
SEC("?tc")
__failure __msg("Possibly NULL pointer passed to helper arg2")
int kptr_xchg_possibly_null(struct __sk_buff *ctx)
{
struct prog_test_ref_kfunc *p;
struct map_value *v;
int key = 0;
v = bpf_map_lookup_elem(&array_map, &key);
if (!v)
return 0;
p = bpf_kfunc_call_test_acquire(&(unsigned long){0});
/* PTR_TO_BTF_ID | PTR_MAYBE_NULL passed to bpf_kptr_xchg() */
p = bpf_kptr_xchg(&v->ref_ptr, p);
if (p)
bpf_kfunc_call_test_release(p);
return 0;
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/map_kptr_fail.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
#include <errno.h>
#include <string.h>
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include "bpf_misc.h"
#define ARRAY_SIZE(x) (sizeof(x) / sizeof(x[0]))
int vals[] SEC(".data.vals") = {1, 2, 3, 4};
__naked __noinline __used
static unsigned long identity_subprog()
{
/* the simplest *static* 64-bit identity function */
asm volatile (
"r0 = r1;"
"exit;"
);
}
__noinline __used
unsigned long global_identity_subprog(__u64 x)
{
/* the simplest *global* 64-bit identity function */
return x;
}
__naked __noinline __used
static unsigned long callback_subprog()
{
/* the simplest callback function */
asm volatile (
"r0 = 0;"
"exit;"
);
}
SEC("?raw_tp")
__success __log_level(2)
__msg("7: (0f) r1 += r0")
__msg("mark_precise: frame0: regs=r0 stack= before 6: (bf) r1 = r7")
__msg("mark_precise: frame0: regs=r0 stack= before 5: (27) r0 *= 4")
__msg("mark_precise: frame0: regs=r0 stack= before 11: (95) exit")
__msg("mark_precise: frame1: regs=r0 stack= before 10: (bf) r0 = r1")
__msg("mark_precise: frame1: regs=r1 stack= before 4: (85) call pc+5")
__msg("mark_precise: frame0: regs=r1 stack= before 3: (bf) r1 = r6")
__msg("mark_precise: frame0: regs=r6 stack= before 2: (b7) r6 = 3")
__naked int subprog_result_precise(void)
{
asm volatile (
"r6 = 3;"
/* pass r6 through r1 into subprog to get it back as r0;
* this whole chain will have to be marked as precise later
*/
"r1 = r6;"
"call identity_subprog;"
/* now use subprog's returned value (which is a
* r6 -> r1 -> r0 chain), as index into vals array, forcing
* all of that to be known precisely
*/
"r0 *= 4;"
"r1 = %[vals];"
/* here r0->r1->r6 chain is forced to be precise and has to be
* propagated back to the beginning, including through the
* subprog call
*/
"r1 += r0;"
"r0 = *(u32 *)(r1 + 0);"
"exit;"
:
: __imm_ptr(vals)
: __clobber_common, "r6"
);
}
SEC("?raw_tp")
__success __log_level(2)
__msg("9: (0f) r1 += r0")
__msg("mark_precise: frame0: last_idx 9 first_idx 0")
__msg("mark_precise: frame0: regs=r0 stack= before 8: (bf) r1 = r7")
__msg("mark_precise: frame0: regs=r0 stack= before 7: (27) r0 *= 4")
__msg("mark_precise: frame0: regs=r0 stack= before 5: (a5) if r0 < 0x4 goto pc+1")
__msg("mark_precise: frame0: regs=r0 stack= before 4: (85) call pc+7")
__naked int global_subprog_result_precise(void)
{
asm volatile (
"r6 = 3;"
/* pass r6 through r1 into subprog to get it back as r0;
* given global_identity_subprog is global, precision won't
* propagate all the way back to r6
*/
"r1 = r6;"
"call global_identity_subprog;"
/* now use subprog's returned value (which is unknown now, so
* we need to clamp it), as index into vals array, forcing r0
* to be marked precise (with no effect on r6, though)
*/
"if r0 < %[vals_arr_sz] goto 1f;"
"r0 = %[vals_arr_sz] - 1;"
"1:"
"r0 *= 4;"
"r1 = %[vals];"
/* here r0 is forced to be precise and has to be
* propagated back to the global subprog call, but it
* shouldn't go all the way to mark r6 as precise
*/
"r1 += r0;"
"r0 = *(u32 *)(r1 + 0);"
"exit;"
:
: __imm_ptr(vals),
__imm_const(vals_arr_sz, ARRAY_SIZE(vals))
: __clobber_common, "r6"
);
}
SEC("?raw_tp")
__success __log_level(2)
__msg("14: (0f) r1 += r6")
__msg("mark_precise: frame0: last_idx 14 first_idx 10")
__msg("mark_precise: frame0: regs=r6 stack= before 13: (bf) r1 = r7")
__msg("mark_precise: frame0: regs=r6 stack= before 12: (27) r6 *= 4")
__msg("mark_precise: frame0: regs=r6 stack= before 11: (25) if r6 > 0x3 goto pc+4")
__msg("mark_precise: frame0: regs=r6 stack= before 10: (bf) r6 = r0")
__msg("mark_precise: frame0: parent state regs=r0 stack=:")
__msg("mark_precise: frame0: last_idx 18 first_idx 0")
__msg("mark_precise: frame0: regs=r0 stack= before 18: (95) exit")
__naked int callback_result_precise(void)
{
asm volatile (
"r6 = 3;"
/* call subprog and use result; r0 shouldn't propagate back to
* callback_subprog
*/
"r1 = r6;" /* nr_loops */
"r2 = %[callback_subprog];" /* callback_fn */
"r3 = 0;" /* callback_ctx */
"r4 = 0;" /* flags */
"call %[bpf_loop];"
"r6 = r0;"
"if r6 > 3 goto 1f;"
"r6 *= 4;"
"r1 = %[vals];"
/* here r6 is forced to be precise and has to be propagated
* back to the bpf_loop() call, but not beyond
*/
"r1 += r6;"
"r0 = *(u32 *)(r1 + 0);"
"1:"
"exit;"
:
: __imm_ptr(vals),
__imm_ptr(callback_subprog),
__imm(bpf_loop)
: __clobber_common, "r6"
);
}
SEC("?raw_tp")
__success __log_level(2)
__msg("7: (0f) r1 += r6")
__msg("mark_precise: frame0: last_idx 7 first_idx 0")
__msg("mark_precise: frame0: regs=r6 stack= before 6: (bf) r1 = r7")
__msg("mark_precise: frame0: regs=r6 stack= before 5: (27) r6 *= 4")
__msg("mark_precise: frame0: regs=r6 stack= before 11: (95) exit")
__msg("mark_precise: frame1: regs= stack= before 10: (bf) r0 = r1")
__msg("mark_precise: frame1: regs= stack= before 4: (85) call pc+5")
__msg("mark_precise: frame0: regs=r6 stack= before 3: (b7) r1 = 0")
__msg("mark_precise: frame0: regs=r6 stack= before 2: (b7) r6 = 3")
__naked int parent_callee_saved_reg_precise(void)
{
asm volatile (
"r6 = 3;"
/* call subprog and ignore result; we need this call only to
* complicate jump history
*/
"r1 = 0;"
"call identity_subprog;"
"r6 *= 4;"
"r1 = %[vals];"
/* here r6 is forced to be precise and has to be propagated
* back to the beginning, handling (and ignoring) subprog call
*/
"r1 += r6;"
"r0 = *(u32 *)(r1 + 0);"
"exit;"
:
: __imm_ptr(vals)
: __clobber_common, "r6"
);
}
SEC("?raw_tp")
__success __log_level(2)
__msg("7: (0f) r1 += r6")
__msg("mark_precise: frame0: last_idx 7 first_idx 0")
__msg("mark_precise: frame0: regs=r6 stack= before 6: (bf) r1 = r7")
__msg("mark_precise: frame0: regs=r6 stack= before 5: (27) r6 *= 4")
__msg("mark_precise: frame0: regs=r6 stack= before 4: (85) call pc+5")
__msg("mark_precise: frame0: regs=r6 stack= before 3: (b7) r1 = 0")
__msg("mark_precise: frame0: regs=r6 stack= before 2: (b7) r6 = 3")
__naked int parent_callee_saved_reg_precise_global(void)
{
asm volatile (
"r6 = 3;"
/* call subprog and ignore result; we need this call only to
* complicate jump history
*/
"r1 = 0;"
"call global_identity_subprog;"
"r6 *= 4;"
"r1 = %[vals];"
/* here r6 is forced to be precise and has to be propagated
* back to the beginning, handling (and ignoring) subprog call
*/
"r1 += r6;"
"r0 = *(u32 *)(r1 + 0);"
"exit;"
:
: __imm_ptr(vals)
: __clobber_common, "r6"
);
}
SEC("?raw_tp")
__success __log_level(2)
__msg("12: (0f) r1 += r6")
__msg("mark_precise: frame0: last_idx 12 first_idx 10")
__msg("mark_precise: frame0: regs=r6 stack= before 11: (bf) r1 = r7")
__msg("mark_precise: frame0: regs=r6 stack= before 10: (27) r6 *= 4")
__msg("mark_precise: frame0: parent state regs=r6 stack=:")
__msg("mark_precise: frame0: last_idx 16 first_idx 0")
__msg("mark_precise: frame0: regs=r6 stack= before 16: (95) exit")
__msg("mark_precise: frame1: regs= stack= before 15: (b7) r0 = 0")
__msg("mark_precise: frame1: regs= stack= before 9: (85) call bpf_loop#181")
__msg("mark_precise: frame0: regs=r6 stack= before 8: (b7) r4 = 0")
__msg("mark_precise: frame0: regs=r6 stack= before 7: (b7) r3 = 0")
__msg("mark_precise: frame0: regs=r6 stack= before 6: (bf) r2 = r8")
__msg("mark_precise: frame0: regs=r6 stack= before 5: (b7) r1 = 1")
__msg("mark_precise: frame0: regs=r6 stack= before 4: (b7) r6 = 3")
__naked int parent_callee_saved_reg_precise_with_callback(void)
{
asm volatile (
"r6 = 3;"
/* call subprog and ignore result; we need this call only to
* complicate jump history
*/
"r1 = 1;" /* nr_loops */
"r2 = %[callback_subprog];" /* callback_fn */
"r3 = 0;" /* callback_ctx */
"r4 = 0;" /* flags */
"call %[bpf_loop];"
"r6 *= 4;"
"r1 = %[vals];"
/* here r6 is forced to be precise and has to be propagated
* back to the beginning, handling (and ignoring) callback call
*/
"r1 += r6;"
"r0 = *(u32 *)(r1 + 0);"
"exit;"
:
: __imm_ptr(vals),
__imm_ptr(callback_subprog),
__imm(bpf_loop)
: __clobber_common, "r6"
);
}
SEC("?raw_tp")
__success __log_level(2)
__msg("9: (0f) r1 += r6")
__msg("mark_precise: frame0: last_idx 9 first_idx 6")
__msg("mark_precise: frame0: regs=r6 stack= before 8: (bf) r1 = r7")
__msg("mark_precise: frame0: regs=r6 stack= before 7: (27) r6 *= 4")
__msg("mark_precise: frame0: regs=r6 stack= before 6: (79) r6 = *(u64 *)(r10 -8)")
__msg("mark_precise: frame0: parent state regs= stack=-8:")
__msg("mark_precise: frame0: last_idx 13 first_idx 0")
__msg("mark_precise: frame0: regs= stack=-8 before 13: (95) exit")
__msg("mark_precise: frame1: regs= stack= before 12: (bf) r0 = r1")
__msg("mark_precise: frame1: regs= stack= before 5: (85) call pc+6")
__msg("mark_precise: frame0: regs= stack=-8 before 4: (b7) r1 = 0")
__msg("mark_precise: frame0: regs= stack=-8 before 3: (7b) *(u64 *)(r10 -8) = r6")
__msg("mark_precise: frame0: regs=r6 stack= before 2: (b7) r6 = 3")
__naked int parent_stack_slot_precise(void)
{
asm volatile (
/* spill reg */
"r6 = 3;"
"*(u64 *)(r10 - 8) = r6;"
/* call subprog and ignore result; we need this call only to
* complicate jump history
*/
"r1 = 0;"
"call identity_subprog;"
/* restore reg from stack; in this case we'll be carrying
* stack mask when going back into subprog through jump
* history
*/
"r6 = *(u64 *)(r10 - 8);"
"r6 *= 4;"
"r1 = %[vals];"
/* here r6 is forced to be precise and has to be propagated
* back to the beginning, handling (and ignoring) subprog call
*/
"r1 += r6;"
"r0 = *(u32 *)(r1 + 0);"
"exit;"
:
: __imm_ptr(vals)
: __clobber_common, "r6"
);
}
SEC("?raw_tp")
__success __log_level(2)
__msg("9: (0f) r1 += r6")
__msg("mark_precise: frame0: last_idx 9 first_idx 6")
__msg("mark_precise: frame0: regs=r6 stack= before 8: (bf) r1 = r7")
__msg("mark_precise: frame0: regs=r6 stack= before 7: (27) r6 *= 4")
__msg("mark_precise: frame0: regs=r6 stack= before 6: (79) r6 = *(u64 *)(r10 -8)")
__msg("mark_precise: frame0: parent state regs= stack=-8:")
__msg("mark_precise: frame0: last_idx 5 first_idx 0")
__msg("mark_precise: frame0: regs= stack=-8 before 5: (85) call pc+6")
__msg("mark_precise: frame0: regs= stack=-8 before 4: (b7) r1 = 0")
__msg("mark_precise: frame0: regs= stack=-8 before 3: (7b) *(u64 *)(r10 -8) = r6")
__msg("mark_precise: frame0: regs=r6 stack= before 2: (b7) r6 = 3")
__naked int parent_stack_slot_precise_global(void)
{
asm volatile (
/* spill reg */
"r6 = 3;"
"*(u64 *)(r10 - 8) = r6;"
/* call subprog and ignore result; we need this call only to
* complicate jump history
*/
"r1 = 0;"
"call global_identity_subprog;"
/* restore reg from stack; in this case we'll be carrying
* stack mask when going back into subprog through jump
* history
*/
"r6 = *(u64 *)(r10 - 8);"
"r6 *= 4;"
"r1 = %[vals];"
/* here r6 is forced to be precise and has to be propagated
* back to the beginning, handling (and ignoring) subprog call
*/
"r1 += r6;"
"r0 = *(u32 *)(r1 + 0);"
"exit;"
:
: __imm_ptr(vals)
: __clobber_common, "r6"
);
}
SEC("?raw_tp")
__success __log_level(2)
__msg("14: (0f) r1 += r6")
__msg("mark_precise: frame0: last_idx 14 first_idx 11")
__msg("mark_precise: frame0: regs=r6 stack= before 13: (bf) r1 = r7")
__msg("mark_precise: frame0: regs=r6 stack= before 12: (27) r6 *= 4")
__msg("mark_precise: frame0: regs=r6 stack= before 11: (79) r6 = *(u64 *)(r10 -8)")
__msg("mark_precise: frame0: parent state regs= stack=-8:")
__msg("mark_precise: frame0: last_idx 18 first_idx 0")
__msg("mark_precise: frame0: regs= stack=-8 before 18: (95) exit")
__msg("mark_precise: frame1: regs= stack= before 17: (b7) r0 = 0")
__msg("mark_precise: frame1: regs= stack= before 10: (85) call bpf_loop#181")
__msg("mark_precise: frame0: regs= stack=-8 before 9: (b7) r4 = 0")
__msg("mark_precise: frame0: regs= stack=-8 before 8: (b7) r3 = 0")
__msg("mark_precise: frame0: regs= stack=-8 before 7: (bf) r2 = r8")
__msg("mark_precise: frame0: regs= stack=-8 before 6: (bf) r1 = r6")
__msg("mark_precise: frame0: regs= stack=-8 before 5: (7b) *(u64 *)(r10 -8) = r6")
__msg("mark_precise: frame0: regs=r6 stack= before 4: (b7) r6 = 3")
__naked int parent_stack_slot_precise_with_callback(void)
{
asm volatile (
/* spill reg */
"r6 = 3;"
"*(u64 *)(r10 - 8) = r6;"
/* ensure we have callback frame in jump history */
"r1 = r6;" /* nr_loops */
"r2 = %[callback_subprog];" /* callback_fn */
"r3 = 0;" /* callback_ctx */
"r4 = 0;" /* flags */
"call %[bpf_loop];"
/* restore reg from stack; in this case we'll be carrying
* stack mask when going back into subprog through jump
* history
*/
"r6 = *(u64 *)(r10 - 8);"
"r6 *= 4;"
"r1 = %[vals];"
/* here r6 is forced to be precise and has to be propagated
* back to the beginning, handling (and ignoring) subprog call
*/
"r1 += r6;"
"r0 = *(u32 *)(r1 + 0);"
"exit;"
:
: __imm_ptr(vals),
__imm_ptr(callback_subprog),
__imm(bpf_loop)
: __clobber_common, "r6"
);
}
__noinline __used
static __u64 subprog_with_precise_arg(__u64 x)
{
return vals[x]; /* x is forced to be precise */
}
SEC("?raw_tp")
__success __log_level(2)
__msg("8: (0f) r2 += r1")
__msg("mark_precise: frame1: last_idx 8 first_idx 0")
__msg("mark_precise: frame1: regs=r1 stack= before 6: (18) r2 = ")
__msg("mark_precise: frame1: regs=r1 stack= before 5: (67) r1 <<= 2")
__msg("mark_precise: frame1: regs=r1 stack= before 2: (85) call pc+2")
__msg("mark_precise: frame0: regs=r1 stack= before 1: (bf) r1 = r6")
__msg("mark_precise: frame0: regs=r6 stack= before 0: (b7) r6 = 3")
__naked int subprog_arg_precise(void)
{
asm volatile (
"r6 = 3;"
"r1 = r6;"
/* subprog_with_precise_arg expects its argument to be
* precise, so r1->r6 will be marked precise from inside the
* subprog
*/
"call subprog_with_precise_arg;"
"r0 += r6;"
"exit;"
:
:
: __clobber_common, "r6"
);
}
/* r1 is pointer to stack slot;
* r2 is a register to spill into that slot
* subprog also spills r2 into its own stack slot
*/
__naked __noinline __used
static __u64 subprog_spill_reg_precise(void)
{
asm volatile (
/* spill to parent stack */
"*(u64 *)(r1 + 0) = r2;"
/* spill to subprog stack (we use -16 offset to avoid
* accidental confusion with parent's -8 stack slot in
* verifier log output)
*/
"*(u64 *)(r10 - 16) = r2;"
/* use both spills as return result to propagete precision everywhere */
"r0 = *(u64 *)(r10 - 16);"
"r2 = *(u64 *)(r1 + 0);"
"r0 += r2;"
"exit;"
);
}
SEC("?raw_tp")
__success __log_level(2)
/* precision backtracking can't currently handle stack access not through r10,
* so we won't be able to mark stack slot fp-8 as precise, and so will
* fallback to forcing all as precise
*/
__msg("mark_precise: frame0: falling back to forcing all scalars precise")
__naked int subprog_spill_into_parent_stack_slot_precise(void)
{
asm volatile (
"r6 = 1;"
/* pass pointer to stack slot and r6 to subprog;
* r6 will be marked precise and spilled into fp-8 slot, which
* also should be marked precise
*/
"r1 = r10;"
"r1 += -8;"
"r2 = r6;"
"call subprog_spill_reg_precise;"
/* restore reg from stack; in this case we'll be carrying
* stack mask when going back into subprog through jump
* history
*/
"r7 = *(u64 *)(r10 - 8);"
"r7 *= 4;"
"r1 = %[vals];"
/* here r7 is forced to be precise and has to be propagated
* back to the beginning, handling subprog call and logic
*/
"r1 += r7;"
"r0 = *(u32 *)(r1 + 0);"
"exit;"
:
: __imm_ptr(vals)
: __clobber_common, "r6", "r7"
);
}
__naked __noinline __used
static __u64 subprog_with_checkpoint(void)
{
asm volatile (
"r0 = 0;"
/* guaranteed checkpoint if BPF_F_TEST_STATE_FREQ is used */
"goto +0;"
"exit;"
);
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/verifier_subprog_precision.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2021 Facebook */
#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
char _license[] SEC("license") = "GPL";
struct {
__uint(type, BPF_MAP_TYPE_HASH);
__uint(max_entries, 3);
__type(key, __u32);
__type(value, __u64);
} hashmap SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_PERCPU_HASH);
__uint(max_entries, 1);
__type(key, __u32);
__type(value, __u64);
} percpu_map SEC(".maps");
struct callback_ctx {
struct __sk_buff *ctx;
int input;
int output;
};
static __u64
check_hash_elem(struct bpf_map *map, __u32 *key, __u64 *val,
struct callback_ctx *data)
{
struct __sk_buff *skb = data->ctx;
__u32 k;
__u64 v;
if (skb) {
k = *key;
v = *val;
if (skb->len == 10000 && k == 10 && v == 10)
data->output = 3; /* impossible path */
else
data->output = 4;
} else {
data->output = data->input;
bpf_map_delete_elem(map, key);
}
return 0;
}
__u32 cpu = 0;
__u32 percpu_called = 0;
__u32 percpu_key = 0;
__u64 percpu_val = 0;
int percpu_output = 0;
static __u64
check_percpu_elem(struct bpf_map *map, __u32 *key, __u64 *val,
struct callback_ctx *unused)
{
struct callback_ctx data;
percpu_called++;
cpu = bpf_get_smp_processor_id();
percpu_key = *key;
percpu_val = *val;
data.ctx = 0;
data.input = 100;
data.output = 0;
bpf_for_each_map_elem(&hashmap, check_hash_elem, &data, 0);
percpu_output = data.output;
return 0;
}
int hashmap_output = 0;
int hashmap_elems = 0;
int percpu_map_elems = 0;
SEC("tc")
int test_pkt_access(struct __sk_buff *skb)
{
struct callback_ctx data;
data.ctx = skb;
data.input = 10;
data.output = 0;
hashmap_elems = bpf_for_each_map_elem(&hashmap, check_hash_elem, &data, 0);
hashmap_output = data.output;
percpu_map_elems = bpf_for_each_map_elem(&percpu_map, check_percpu_elem,
(void *)0, 0);
return 0;
}
| linux-master | tools/testing/selftests/bpf/progs/for_each_hash_map_elem.c |
#include "core_reloc_types.h"
void f(struct core_reloc_type_based___all_missing x) {}
| linux-master | tools/testing/selftests/bpf/progs/btf__core_reloc_type_based___all_missing.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2021 Facebook */
#include <stddef.h>
#include <linux/bpf.h>
#include <linux/types.h>
#include <linux/stddef.h>
#include <linux/tcp.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
#include "bpf_tcp_helpers.h"
char _license[] SEC("license") = "GPL";
const char cubic[] = "cubic";
void BPF_STRUCT_OPS(dctcp_nouse_release, struct sock *sk)
{
bpf_setsockopt(sk, SOL_TCP, TCP_CONGESTION,
(void *)cubic, sizeof(cubic));
}
SEC(".struct_ops")
struct tcp_congestion_ops dctcp_rel = {
.release = (void *)dctcp_nouse_release,
.name = "bpf_dctcp_rel",
};
| linux-master | tools/testing/selftests/bpf/progs/bpf_dctcp_release.c |
// SPDX-License-Identifier: GPL-2.0
/* Converted from tools/testing/selftests/bpf/verifier/cfg.c */
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include "bpf_misc.h"
SEC("socket")
__description("unreachable")
__failure __msg("unreachable")
__failure_unpriv
__naked void unreachable(void)
{
asm volatile (" \
exit; \
exit; \
" ::: __clobber_all);
}
SEC("socket")
__description("unreachable2")
__failure __msg("unreachable")
__failure_unpriv
__naked void unreachable2(void)
{
asm volatile (" \
goto l0_%=; \
goto l0_%=; \
l0_%=: exit; \
" ::: __clobber_all);
}
SEC("socket")
__description("out of range jump")
__failure __msg("jump out of range")
__failure_unpriv
__naked void out_of_range_jump(void)
{
asm volatile (" \
goto l0_%=; \
exit; \
l0_%=: \
" ::: __clobber_all);
}
SEC("socket")
__description("out of range jump2")
__failure __msg("jump out of range")
__failure_unpriv
__naked void out_of_range_jump2(void)
{
asm volatile (" \
goto -2; \
exit; \
" ::: __clobber_all);
}
SEC("socket")
__description("loop (back-edge)")
__failure __msg("unreachable insn 1")
__msg_unpriv("back-edge")
__naked void loop_back_edge(void)
{
asm volatile (" \
l0_%=: goto l0_%=; \
exit; \
" ::: __clobber_all);
}
SEC("socket")
__description("loop2 (back-edge)")
__failure __msg("unreachable insn 4")
__msg_unpriv("back-edge")
__naked void loop2_back_edge(void)
{
asm volatile (" \
l0_%=: r1 = r0; \
r2 = r0; \
r3 = r0; \
goto l0_%=; \
exit; \
" ::: __clobber_all);
}
SEC("socket")
__description("conditional loop")
__failure __msg("infinite loop detected")
__msg_unpriv("back-edge")
__naked void conditional_loop(void)
{
asm volatile (" \
r0 = r1; \
l0_%=: r2 = r0; \
r3 = r0; \
if r1 == 0 goto l0_%=; \
exit; \
" ::: __clobber_all);
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/verifier_cfg.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020 Facebook */
#include "bpf_iter.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
char _license[] SEC("license") = "GPL";
struct key_t {
int a;
int b;
int c;
};
struct {
__uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
__uint(max_entries, 3);
__type(key, __u32);
__type(value, __u32);
} arraymap1 SEC(".maps");
/* will set before prog run */
volatile const __u32 num_cpus = 0;
__u32 key_sum = 0, val_sum = 0;
SEC("iter/bpf_map_elem")
int dump_bpf_percpu_array_map(struct bpf_iter__bpf_map_elem *ctx)
{
__u32 *key = ctx->key;
void *pptr = ctx->value;
__u32 step;
int i;
if (key == (void *)0 || pptr == (void *)0)
return 0;
key_sum += *key;
step = 8;
for (i = 0; i < num_cpus; i++) {
val_sum += *(__u32 *)pptr;
pptr += step;
}
return 0;
}
| linux-master | tools/testing/selftests/bpf/progs/bpf_iter_bpf_percpu_array_map.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
SEC("xdp")
int xdp_tx(struct xdp_md *xdp)
{
return XDP_TX;
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/xdp_tx.c |
// SPDX-License-Identifier: GPL-2.0-only
#include <stddef.h>
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include "bpf_misc.h"
struct S {
int x;
};
__noinline int foo(const struct S *s)
{
return bpf_get_prandom_u32() < s->x;
}
SEC("cgroup_skb/ingress")
__failure __msg("invalid mem access 'mem_or_null'")
int global_func12(struct __sk_buff *skb)
{
const struct S s = {.x = skb->len };
return foo(&s);
}
| linux-master | tools/testing/selftests/bpf/progs/test_global_func12.c |
// SPDX-License-Identifier: GPL-2.0
/* Converted from tools/testing/selftests/bpf/verifier/spill_fill.c */
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include "bpf_misc.h"
struct {
__uint(type, BPF_MAP_TYPE_RINGBUF);
__uint(max_entries, 4096);
} map_ringbuf SEC(".maps");
SEC("socket")
__description("check valid spill/fill")
__success __failure_unpriv __msg_unpriv("R0 leaks addr")
__retval(POINTER_VALUE)
__naked void check_valid_spill_fill(void)
{
asm volatile (" \
/* spill R1(ctx) into stack */ \
*(u64*)(r10 - 8) = r1; \
/* fill it back into R2 */ \
r2 = *(u64*)(r10 - 8); \
/* should be able to access R0 = *(R2 + 8) */ \
/* BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 8), */\
r0 = r2; \
exit; \
" ::: __clobber_all);
}
SEC("socket")
__description("check valid spill/fill, skb mark")
__success __success_unpriv __retval(0)
__naked void valid_spill_fill_skb_mark(void)
{
asm volatile (" \
r6 = r1; \
*(u64*)(r10 - 8) = r6; \
r0 = *(u64*)(r10 - 8); \
r0 = *(u32*)(r0 + %[__sk_buff_mark]); \
exit; \
" :
: __imm_const(__sk_buff_mark, offsetof(struct __sk_buff, mark))
: __clobber_all);
}
SEC("socket")
__description("check valid spill/fill, ptr to mem")
__success __success_unpriv __retval(0)
__naked void spill_fill_ptr_to_mem(void)
{
asm volatile (" \
/* reserve 8 byte ringbuf memory */ \
r1 = 0; \
*(u64*)(r10 - 8) = r1; \
r1 = %[map_ringbuf] ll; \
r2 = 8; \
r3 = 0; \
call %[bpf_ringbuf_reserve]; \
/* store a pointer to the reserved memory in R6 */\
r6 = r0; \
/* check whether the reservation was successful */\
if r0 == 0 goto l0_%=; \
/* spill R6(mem) into the stack */ \
*(u64*)(r10 - 8) = r6; \
/* fill it back in R7 */ \
r7 = *(u64*)(r10 - 8); \
/* should be able to access *(R7) = 0 */ \
r1 = 0; \
*(u64*)(r7 + 0) = r1; \
/* submit the reserved ringbuf memory */ \
r1 = r7; \
r2 = 0; \
call %[bpf_ringbuf_submit]; \
l0_%=: r0 = 0; \
exit; \
" :
: __imm(bpf_ringbuf_reserve),
__imm(bpf_ringbuf_submit),
__imm_addr(map_ringbuf)
: __clobber_all);
}
SEC("socket")
__description("check with invalid reg offset 0")
__failure __msg("R0 pointer arithmetic on ringbuf_mem_or_null prohibited")
__failure_unpriv
__naked void with_invalid_reg_offset_0(void)
{
asm volatile (" \
/* reserve 8 byte ringbuf memory */ \
r1 = 0; \
*(u64*)(r10 - 8) = r1; \
r1 = %[map_ringbuf] ll; \
r2 = 8; \
r3 = 0; \
call %[bpf_ringbuf_reserve]; \
/* store a pointer to the reserved memory in R6 */\
r6 = r0; \
/* add invalid offset to memory or NULL */ \
r0 += 1; \
/* check whether the reservation was successful */\
if r0 == 0 goto l0_%=; \
/* should not be able to access *(R7) = 0 */ \
r1 = 0; \
*(u32*)(r6 + 0) = r1; \
/* submit the reserved ringbuf memory */ \
r1 = r6; \
r2 = 0; \
call %[bpf_ringbuf_submit]; \
l0_%=: r0 = 0; \
exit; \
" :
: __imm(bpf_ringbuf_reserve),
__imm(bpf_ringbuf_submit),
__imm_addr(map_ringbuf)
: __clobber_all);
}
SEC("socket")
__description("check corrupted spill/fill")
__failure __msg("R0 invalid mem access 'scalar'")
__msg_unpriv("attempt to corrupt spilled")
__flag(BPF_F_ANY_ALIGNMENT)
__naked void check_corrupted_spill_fill(void)
{
asm volatile (" \
/* spill R1(ctx) into stack */ \
*(u64*)(r10 - 8) = r1; \
/* mess up with R1 pointer on stack */ \
r0 = 0x23; \
*(u8*)(r10 - 7) = r0; \
/* fill back into R0 is fine for priv. \
* R0 now becomes SCALAR_VALUE. \
*/ \
r0 = *(u64*)(r10 - 8); \
/* Load from R0 should fail. */ \
r0 = *(u64*)(r0 + 8); \
exit; \
" ::: __clobber_all);
}
SEC("socket")
__description("check corrupted spill/fill, LSB")
__success __failure_unpriv __msg_unpriv("attempt to corrupt spilled")
__retval(POINTER_VALUE)
__naked void check_corrupted_spill_fill_lsb(void)
{
asm volatile (" \
*(u64*)(r10 - 8) = r1; \
r0 = 0xcafe; \
*(u16*)(r10 - 8) = r0; \
r0 = *(u64*)(r10 - 8); \
exit; \
" ::: __clobber_all);
}
SEC("socket")
__description("check corrupted spill/fill, MSB")
__success __failure_unpriv __msg_unpriv("attempt to corrupt spilled")
__retval(POINTER_VALUE)
__naked void check_corrupted_spill_fill_msb(void)
{
asm volatile (" \
*(u64*)(r10 - 8) = r1; \
r0 = 0x12345678; \
*(u32*)(r10 - 4) = r0; \
r0 = *(u64*)(r10 - 8); \
exit; \
" ::: __clobber_all);
}
SEC("tc")
__description("Spill and refill a u32 const scalar. Offset to skb->data")
__success __retval(0)
__naked void scalar_offset_to_skb_data_1(void)
{
asm volatile (" \
r2 = *(u32*)(r1 + %[__sk_buff_data]); \
r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \
w4 = 20; \
*(u32*)(r10 - 8) = r4; \
r4 = *(u32*)(r10 - 8); \
r0 = r2; \
/* r0 += r4 R0=pkt R2=pkt R3=pkt_end R4=20 */ \
r0 += r4; \
/* if (r0 > r3) R0=pkt,off=20 R2=pkt R3=pkt_end R4=20 */\
if r0 > r3 goto l0_%=; \
/* r0 = *(u32 *)r2 R0=pkt,off=20,r=20 R2=pkt,r=20 R3=pkt_end R4=20 */\
r0 = *(u32*)(r2 + 0); \
l0_%=: r0 = 0; \
exit; \
" :
: __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
__imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
: __clobber_all);
}
SEC("socket")
__description("Spill a u32 const, refill from another half of the uninit u32 from the stack")
/* in privileged mode reads from uninitialized stack locations are permitted */
__success __failure_unpriv
__msg_unpriv("invalid read from stack off -4+0 size 4")
__retval(0)
__naked void uninit_u32_from_the_stack(void)
{
asm volatile (" \
w4 = 20; \
*(u32*)(r10 - 8) = r4; \
/* r4 = *(u32 *)(r10 -4) fp-8=????rrrr*/ \
r4 = *(u32*)(r10 - 4); \
r0 = 0; \
exit; \
" ::: __clobber_all);
}
SEC("tc")
__description("Spill a u32 const scalar. Refill as u16. Offset to skb->data")
__failure __msg("invalid access to packet")
__naked void u16_offset_to_skb_data(void)
{
asm volatile (" \
r2 = *(u32*)(r1 + %[__sk_buff_data]); \
r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \
w4 = 20; \
*(u32*)(r10 - 8) = r4; \
r4 = *(u16*)(r10 - 8); \
r0 = r2; \
/* r0 += r4 R0=pkt R2=pkt R3=pkt_end R4=umax=65535 */\
r0 += r4; \
/* if (r0 > r3) R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=umax=65535 */\
if r0 > r3 goto l0_%=; \
/* r0 = *(u32 *)r2 R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=20 */\
r0 = *(u32*)(r2 + 0); \
l0_%=: r0 = 0; \
exit; \
" :
: __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
__imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
: __clobber_all);
}
SEC("tc")
__description("Spill u32 const scalars. Refill as u64. Offset to skb->data")
__failure __msg("invalid access to packet")
__naked void u64_offset_to_skb_data(void)
{
asm volatile (" \
r2 = *(u32*)(r1 + %[__sk_buff_data]); \
r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \
w6 = 0; \
w7 = 20; \
*(u32*)(r10 - 4) = r6; \
*(u32*)(r10 - 8) = r7; \
r4 = *(u16*)(r10 - 8); \
r0 = r2; \
/* r0 += r4 R0=pkt R2=pkt R3=pkt_end R4=umax=65535 */\
r0 += r4; \
/* if (r0 > r3) R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=umax=65535 */\
if r0 > r3 goto l0_%=; \
/* r0 = *(u32 *)r2 R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=20 */\
r0 = *(u32*)(r2 + 0); \
l0_%=: r0 = 0; \
exit; \
" :
: __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
__imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
: __clobber_all);
}
SEC("tc")
__description("Spill a u32 const scalar. Refill as u16 from fp-6. Offset to skb->data")
__failure __msg("invalid access to packet")
__naked void _6_offset_to_skb_data(void)
{
asm volatile (" \
r2 = *(u32*)(r1 + %[__sk_buff_data]); \
r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \
w4 = 20; \
*(u32*)(r10 - 8) = r4; \
r4 = *(u16*)(r10 - 6); \
r0 = r2; \
/* r0 += r4 R0=pkt R2=pkt R3=pkt_end R4=umax=65535 */\
r0 += r4; \
/* if (r0 > r3) R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=umax=65535 */\
if r0 > r3 goto l0_%=; \
/* r0 = *(u32 *)r2 R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=20 */\
r0 = *(u32*)(r2 + 0); \
l0_%=: r0 = 0; \
exit; \
" :
: __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
__imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
: __clobber_all);
}
SEC("tc")
__description("Spill and refill a u32 const scalar at non 8byte aligned stack addr. Offset to skb->data")
__failure __msg("invalid access to packet")
__naked void addr_offset_to_skb_data(void)
{
asm volatile (" \
r2 = *(u32*)(r1 + %[__sk_buff_data]); \
r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \
w4 = 20; \
*(u32*)(r10 - 8) = r4; \
*(u32*)(r10 - 4) = r4; \
r4 = *(u32*)(r10 - 4); \
r0 = r2; \
/* r0 += r4 R0=pkt R2=pkt R3=pkt_end R4=umax=U32_MAX */\
r0 += r4; \
/* if (r0 > r3) R0=pkt,umax=U32_MAX R2=pkt R3=pkt_end R4= */\
if r0 > r3 goto l0_%=; \
/* r0 = *(u32 *)r2 R0=pkt,umax=U32_MAX R2=pkt R3=pkt_end R4= */\
r0 = *(u32*)(r2 + 0); \
l0_%=: r0 = 0; \
exit; \
" :
: __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
__imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
: __clobber_all);
}
SEC("tc")
__description("Spill and refill a umax=40 bounded scalar. Offset to skb->data")
__success __retval(0)
__naked void scalar_offset_to_skb_data_2(void)
{
asm volatile (" \
r2 = *(u32*)(r1 + %[__sk_buff_data]); \
r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \
r4 = *(u64*)(r1 + %[__sk_buff_tstamp]); \
if r4 <= 40 goto l0_%=; \
r0 = 0; \
exit; \
l0_%=: /* *(u32 *)(r10 -8) = r4 R4=umax=40 */ \
*(u32*)(r10 - 8) = r4; \
/* r4 = (*u32 *)(r10 - 8) */ \
r4 = *(u32*)(r10 - 8); \
/* r2 += r4 R2=pkt R4=umax=40 */ \
r2 += r4; \
/* r0 = r2 R2=pkt,umax=40 R4=umax=40 */ \
r0 = r2; \
/* r2 += 20 R0=pkt,umax=40 R2=pkt,umax=40 */ \
r2 += 20; \
/* if (r2 > r3) R0=pkt,umax=40 R2=pkt,off=20,umax=40 */\
if r2 > r3 goto l1_%=; \
/* r0 = *(u32 *)r0 R0=pkt,r=20,umax=40 R2=pkt,off=20,r=20,umax=40 */\
r0 = *(u32*)(r0 + 0); \
l1_%=: r0 = 0; \
exit; \
" :
: __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
__imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)),
__imm_const(__sk_buff_tstamp, offsetof(struct __sk_buff, tstamp))
: __clobber_all);
}
SEC("tc")
__description("Spill a u32 scalar at fp-4 and then at fp-8")
__success __retval(0)
__naked void and_then_at_fp_8(void)
{
asm volatile (" \
w4 = 4321; \
*(u32*)(r10 - 4) = r4; \
*(u32*)(r10 - 8) = r4; \
r4 = *(u64*)(r10 - 8); \
r0 = 0; \
exit; \
" ::: __clobber_all);
}
SEC("xdp")
__description("32-bit spill of 64-bit reg should clear ID")
__failure __msg("math between ctx pointer and 4294967295 is not allowed")
__naked void spill_32bit_of_64bit_fail(void)
{
asm volatile (" \
r6 = r1; \
/* Roll one bit to force the verifier to track both branches. */\
call %[bpf_get_prandom_u32]; \
r0 &= 0x8; \
/* Put a large number into r1. */ \
r1 = 0xffffffff; \
r1 <<= 32; \
r1 += r0; \
/* Assign an ID to r1. */ \
r2 = r1; \
/* 32-bit spill r1 to stack - should clear the ID! */\
*(u32*)(r10 - 8) = r1; \
/* 32-bit fill r2 from stack. */ \
r2 = *(u32*)(r10 - 8); \
/* Compare r2 with another register to trigger find_equal_scalars.\
* Having one random bit is important here, otherwise the verifier cuts\
* the corners. If the ID was mistakenly preserved on spill, this would\
* cause the verifier to think that r1 is also equal to zero in one of\
* the branches, and equal to eight on the other branch.\
*/ \
r3 = 0; \
if r2 != r3 goto l0_%=; \
l0_%=: r1 >>= 32; \
/* At this point, if the verifier thinks that r1 is 0, an out-of-bounds\
* read will happen, because it actually contains 0xffffffff.\
*/ \
r6 += r1; \
r0 = *(u32*)(r6 + 0); \
exit; \
" :
: __imm(bpf_get_prandom_u32)
: __clobber_all);
}
SEC("xdp")
__description("16-bit spill of 32-bit reg should clear ID")
__failure __msg("dereference of modified ctx ptr R6 off=65535 disallowed")
__naked void spill_16bit_of_32bit_fail(void)
{
asm volatile (" \
r6 = r1; \
/* Roll one bit to force the verifier to track both branches. */\
call %[bpf_get_prandom_u32]; \
r0 &= 0x8; \
/* Put a large number into r1. */ \
w1 = 0xffff0000; \
r1 += r0; \
/* Assign an ID to r1. */ \
r2 = r1; \
/* 16-bit spill r1 to stack - should clear the ID! */\
*(u16*)(r10 - 8) = r1; \
/* 16-bit fill r2 from stack. */ \
r2 = *(u16*)(r10 - 8); \
/* Compare r2 with another register to trigger find_equal_scalars.\
* Having one random bit is important here, otherwise the verifier cuts\
* the corners. If the ID was mistakenly preserved on spill, this would\
* cause the verifier to think that r1 is also equal to zero in one of\
* the branches, and equal to eight on the other branch.\
*/ \
r3 = 0; \
if r2 != r3 goto l0_%=; \
l0_%=: r1 >>= 16; \
/* At this point, if the verifier thinks that r1 is 0, an out-of-bounds\
* read will happen, because it actually contains 0xffff.\
*/ \
r6 += r1; \
r0 = *(u32*)(r6 + 0); \
exit; \
" :
: __imm(bpf_get_prandom_u32)
: __clobber_all);
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/verifier_spill_fill.c |
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2019 Facebook
#include <linux/bpf.h>
#include <linux/version.h>
#include <bpf/bpf_helpers.h>
#define VAR_NUM 16
struct hmap_elem {
struct bpf_spin_lock lock;
int var[VAR_NUM];
};
struct {
__uint(type, BPF_MAP_TYPE_HASH);
__uint(max_entries, 1);
__type(key, __u32);
__type(value, struct hmap_elem);
} hash_map SEC(".maps");
struct array_elem {
struct bpf_spin_lock lock;
int var[VAR_NUM];
};
struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
__uint(max_entries, 1);
__type(key, int);
__type(value, struct array_elem);
} array_map SEC(".maps");
SEC("cgroup/skb")
int bpf_map_lock_test(struct __sk_buff *skb)
{
struct hmap_elem *val;
int rnd = bpf_get_prandom_u32();
int key = 0, err = 1, i;
struct array_elem *q;
val = bpf_map_lookup_elem(&hash_map, &key);
if (!val)
goto err;
/* spin_lock in hash map */
bpf_spin_lock(&val->lock);
for (i = 0; i < VAR_NUM; i++)
val->var[i] = rnd;
bpf_spin_unlock(&val->lock);
/* spin_lock in array */
q = bpf_map_lookup_elem(&array_map, &key);
if (!q)
goto err;
bpf_spin_lock(&q->lock);
for (i = 0; i < VAR_NUM; i++)
q->var[i] = rnd;
bpf_spin_unlock(&q->lock);
err = 0;
err:
return err;
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/test_map_lock.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2022 Google */
#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
extern const int bpf_prog_active __ksym;
SEC("fentry/security_inode_getattr")
int BPF_PROG(d_path_check_rdonly_mem, struct path *path, struct kstat *stat,
__u32 request_mask, unsigned int query_flags)
{
void *active;
__u32 cpu;
cpu = bpf_get_smp_processor_id();
active = (void *)bpf_per_cpu_ptr(&bpf_prog_active, cpu);
if (active) {
/* FAIL here! 'active' points to readonly memory. bpf helpers
* that update its arguments can not write into it.
*/
bpf_d_path(path, active, sizeof(int));
}
return 0;
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/test_d_path_check_rdonly_mem.c |
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2019 Isovalent, Inc.
#include <linux/bpf.h>
#include <linux/pkt_cls.h>
#include <string.h>
#include <bpf/bpf_helpers.h>
struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
__uint(max_entries, 11);
__type(key, __u32);
__type(value, __u64);
} result_number SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
__uint(max_entries, 5);
__type(key, __u32);
const char (*value)[32];
} result_string SEC(".maps");
struct foo {
__u8 a;
__u32 b;
__u64 c;
};
struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
__uint(max_entries, 5);
__type(key, __u32);
__type(value, struct foo);
} result_struct SEC(".maps");
/* Relocation tests for __u64s. */
static __u64 num0;
static __u64 num1 = 42;
static const __u64 num2 = 24;
static __u64 num3 = 0;
static __u64 num4 = 0xffeeff;
static const __u64 num5 = 0xabab;
static const __u64 num6 = 0xab;
/* Relocation tests for strings. */
static const char str0[32] = "abcdefghijklmnopqrstuvwxyz";
static char str1[32] = "abcdefghijklmnopqrstuvwxyz";
static char str2[32];
/* Relocation tests for structs. */
static const struct foo struct0 = {
.a = 42,
.b = 0xfefeefef,
.c = 0x1111111111111111ULL,
};
static struct foo struct1;
static const struct foo struct2;
static struct foo struct3 = {
.a = 41,
.b = 0xeeeeefef,
.c = 0x2111111111111111ULL,
};
#define test_reloc(map, num, var) \
do { \
__u32 key = num; \
bpf_map_update_elem(&result_##map, &key, var, 0); \
} while (0)
SEC("tc")
int load_static_data(struct __sk_buff *skb)
{
static const __u64 bar = ~0;
test_reloc(number, 0, &num0);
test_reloc(number, 1, &num1);
test_reloc(number, 2, &num2);
test_reloc(number, 3, &num3);
test_reloc(number, 4, &num4);
test_reloc(number, 5, &num5);
num4 = 1234;
test_reloc(number, 6, &num4);
test_reloc(number, 7, &num0);
test_reloc(number, 8, &num6);
test_reloc(string, 0, str0);
test_reloc(string, 1, str1);
test_reloc(string, 2, str2);
str1[5] = 'x';
test_reloc(string, 3, str1);
__builtin_memcpy(&str2[2], "hello", sizeof("hello"));
test_reloc(string, 4, str2);
test_reloc(struct, 0, &struct0);
test_reloc(struct, 1, &struct1);
test_reloc(struct, 2, &struct2);
test_reloc(struct, 3, &struct3);
test_reloc(number, 9, &struct0.c);
test_reloc(number, 10, &bar);
return TC_ACT_OK;
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/test_global_data.c |
#include "core_reloc_types.h"
void f(struct core_reloc_enum64val___diff x) {}
| linux-master | tools/testing/selftests/bpf/progs/btf__core_reloc_enum64val___diff.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2022 Huawei Technologies Duesseldorf GmbH
*
* Author: Roberto Sassu <[email protected]>
*/
#include "vmlinux.h"
#include <errno.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
char _license[] SEC("license") = "GPL";
__u32 monitored_pid;
__u32 key_serial;
__u32 key_id;
__u64 flags;
extern struct bpf_key *bpf_lookup_user_key(__u32 serial, __u64 flags) __ksym;
extern struct bpf_key *bpf_lookup_system_key(__u64 id) __ksym;
extern void bpf_key_put(struct bpf_key *key) __ksym;
SEC("lsm.s/bpf")
int BPF_PROG(bpf, int cmd, union bpf_attr *attr, unsigned int size)
{
struct bpf_key *bkey;
__u32 pid;
pid = bpf_get_current_pid_tgid() >> 32;
if (pid != monitored_pid)
return 0;
if (key_serial)
bkey = bpf_lookup_user_key(key_serial, flags);
else
bkey = bpf_lookup_system_key(key_id);
if (!bkey)
return -ENOENT;
bpf_key_put(bkey);
return 0;
}
| linux-master | tools/testing/selftests/bpf/progs/test_lookup_key.c |
#include "core_reloc_types.h"
void f(struct core_reloc_arrays___equiv_zero_sz_arr x) {}
| linux-master | tools/testing/selftests/bpf/progs/btf__core_reloc_arrays___equiv_zero_sz_arr.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include "bpf_misc.h"
SEC("socket")
__description("check deducing bounds from non-const, jmp64, <non_const> == <const>, 1")
__success __retval(0)
__naked void deducing_bounds_from_non_const_1(void)
{
asm volatile (" \
call %[bpf_ktime_get_ns]; \
if r0 < 3 goto l0_%=; \
r2 = 2; \
if r0 == r2 goto l1_%=; \
l0_%=: \
r0 = 0; \
exit; \
l1_%=: \
r0 -= r1; \
exit; \
" :
: __imm(bpf_ktime_get_ns)
: __clobber_all);
}
SEC("socket")
__description("check deducing bounds from non-const, jmp64, <non_const> == <const>, 2")
__success __retval(0)
__naked void deducing_bounds_from_non_const_2(void)
{
asm volatile (" \
call %[bpf_ktime_get_ns]; \
if r0 > 3 goto l0_%=; \
r2 = 4; \
if r0 == r2 goto l1_%=; \
l0_%=: \
r0 = 0; \
exit; \
l1_%=: \
r0 -= r1; \
exit; \
" :
: __imm(bpf_ktime_get_ns)
: __clobber_all);
}
SEC("socket")
__description("check deducing bounds from non-const, jmp64, <non_const> != <const>, 1")
__success __retval(0)
__naked void deducing_bounds_from_non_const_3(void)
{
asm volatile (" \
call %[bpf_ktime_get_ns]; \
if r0 < 3 goto l0_%=; \
r2 = 2; \
if r0 != r2 goto l0_%=; \
goto l1_%=; \
l0_%=: \
r0 = 0; \
exit; \
l1_%=: \
r0 -= r1; \
exit; \
" :
: __imm(bpf_ktime_get_ns)
: __clobber_all);
}
SEC("socket")
__description("check deducing bounds from non-const, jmp64, <non_const> != <const>, 2")
__success __retval(0)
__naked void deducing_bounds_from_non_const_4(void)
{
asm volatile (" \
call %[bpf_ktime_get_ns]; \
if r0 > 3 goto l0_%=; \
r2 = 4; \
if r0 != r2 goto l0_%=; \
goto l1_%=; \
l0_%=: \
r0 = 0; \
exit; \
l1_%=: \
r0 -= r1; \
exit; \
" :
: __imm(bpf_ktime_get_ns)
: __clobber_all);
}
SEC("socket")
__description("check deducing bounds from non-const, jmp32, <non_const> == <const>, 1")
__success __retval(0)
__naked void deducing_bounds_from_non_const_5(void)
{
asm volatile (" \
call %[bpf_ktime_get_ns]; \
if w0 < 4 goto l0_%=; \
w2 = 3; \
if w0 == w2 goto l1_%=; \
l0_%=: \
r0 = 0; \
exit; \
l1_%=: \
r0 -= r1; \
exit; \
" :
: __imm(bpf_ktime_get_ns)
: __clobber_all);
}
SEC("socket")
__description("check deducing bounds from non-const, jmp32, <non_const> == <const>, 2")
__success __retval(0)
__naked void deducing_bounds_from_non_const_6(void)
{
asm volatile (" \
call %[bpf_ktime_get_ns]; \
if w0 > 4 goto l0_%=; \
w2 = 5; \
if w0 == w2 goto l1_%=; \
l0_%=: \
r0 = 0; \
exit; \
l1_%=: \
r0 -= r1; \
exit; \
" :
: __imm(bpf_ktime_get_ns)
: __clobber_all);
}
SEC("socket")
__description("check deducing bounds from non-const, jmp32, <non_const> != <const>, 1")
__success __retval(0)
__naked void deducing_bounds_from_non_const_7(void)
{
asm volatile (" \
call %[bpf_ktime_get_ns]; \
if w0 < 3 goto l0_%=; \
w2 = 2; \
if w0 != w2 goto l0_%=; \
goto l1_%=; \
l0_%=: \
r0 = 0; \
exit; \
l1_%=: \
r0 -= r1; \
exit; \
" :
: __imm(bpf_ktime_get_ns)
: __clobber_all);
}
SEC("socket")
__description("check deducing bounds from non-const, jmp32, <non_const> != <const>, 2")
__success __retval(0)
__naked void deducing_bounds_from_non_const_8(void)
{
asm volatile (" \
call %[bpf_ktime_get_ns]; \
if w0 > 3 goto l0_%=; \
w2 = 4; \
if w0 != w2 goto l0_%=; \
goto l1_%=; \
l0_%=: \
r0 = 0; \
exit; \
l1_%=: \
r0 -= r1; \
exit; \
" :
: __imm(bpf_ktime_get_ns)
: __clobber_all);
}
SEC("socket")
__description("check deducing bounds from non-const, jmp64, <const> > <non_const>, 1")
__success __retval(0)
__naked void deducing_bounds_from_non_const_9(void)
{
asm volatile (" \
call %[bpf_ktime_get_ns]; \
r2 = 0; \
if r2 > r0 goto l0_%=; \
r0 = 0; \
exit; \
l0_%=: \
r0 -= r1; \
exit; \
" :
: __imm(bpf_ktime_get_ns)
: __clobber_all);
}
SEC("socket")
__description("check deducing bounds from non-const, jmp64, <const> > <non_const>, 2")
__success __retval(0)
__naked void deducing_bounds_from_non_const_10(void)
{
asm volatile (" \
call %[bpf_ktime_get_ns]; \
if r0 < 4 goto l0_%=; \
r2 = 4; \
if r2 > r0 goto l1_%=; \
l0_%=: \
r0 = 0; \
exit; \
l1_%=: \
r0 -= r1; \
exit; \
" :
: __imm(bpf_ktime_get_ns)
: __clobber_all);
}
SEC("socket")
__description("check deducing bounds from non-const, jmp64, <const> >= <non_const>")
__success __retval(0)
__naked void deducing_bounds_from_non_const_11(void)
{
asm volatile (" \
call %[bpf_ktime_get_ns]; \
if r0 < 4 goto l0_%=; \
r2 = 3; \
if r2 >= r0 goto l1_%=; \
l0_%=: \
r0 = 0; \
exit; \
l1_%=: \
r0 -= r1; \
exit; \
" :
: __imm(bpf_ktime_get_ns)
: __clobber_all);
}
SEC("socket")
__description("check deducing bounds from non-const, jmp64, <const> < <non_const>")
__success __retval(0)
__naked void deducing_bounds_from_non_const_12(void)
{
asm volatile (" \
call %[bpf_ktime_get_ns]; \
if r0 > 4 goto l0_%=; \
r2 = 4; \
if r2 < r0 goto l1_%=; \
l0_%=: \
r0 = 0; \
exit; \
l1_%=: \
r0 -= r1; \
exit; \
" :
: __imm(bpf_ktime_get_ns)
: __clobber_all);
}
SEC("socket")
__description("check deducing bounds from non-const, jmp64, <const> <= <non_const>")
__success __retval(0)
__naked void deducing_bounds_from_non_const_13(void)
{
asm volatile (" \
call %[bpf_ktime_get_ns]; \
if r0 >= 4 goto l0_%=; \
r2 = 4; \
if r2 <= r0 goto l1_%=; \
l0_%=: \
r0 = 0; \
exit; \
l1_%=: \
r0 -= r1; \
exit; \
" :
: __imm(bpf_ktime_get_ns)
: __clobber_all);
}
SEC("socket")
__description("check deducing bounds from non-const, jmp64, <const> == <non_const>")
__success __retval(0)
__naked void deducing_bounds_from_non_const_14(void)
{
asm volatile (" \
call %[bpf_ktime_get_ns]; \
if r0 < 3 goto l0_%=; \
r2 = 2; \
if r2 == r0 goto l1_%=; \
l0_%=: \
r0 = 0; \
exit; \
l1_%=: \
r0 -= r1; \
exit; \
" :
: __imm(bpf_ktime_get_ns)
: __clobber_all);
}
SEC("socket")
__description("check deducing bounds from non-const, jmp64, <const> s> <non_const>")
__success __retval(0)
__naked void deducing_bounds_from_non_const_15(void)
{
asm volatile (" \
call %[bpf_ktime_get_ns]; \
if r0 s< 4 goto l0_%=; \
r2 = 4; \
if r2 s> r0 goto l1_%=; \
l0_%=: \
r0 = 0; \
exit; \
l1_%=: \
r0 -= r1; \
exit; \
" :
: __imm(bpf_ktime_get_ns)
: __clobber_all);
}
SEC("socket")
__description("check deducing bounds from non-const, jmp64, <const> s>= <non_const>")
__success __retval(0)
__naked void deducing_bounds_from_non_const_16(void)
{
asm volatile (" \
call %[bpf_ktime_get_ns]; \
if r0 s< 4 goto l0_%=; \
r2 = 3; \
if r2 s>= r0 goto l1_%=; \
l0_%=: \
r0 = 0; \
exit; \
l1_%=: \
r0 -= r1; \
exit; \
" :
: __imm(bpf_ktime_get_ns)
: __clobber_all);
}
SEC("socket")
__description("check deducing bounds from non-const, jmp64, <const> s< <non_const>")
__success __retval(0)
__naked void deducing_bounds_from_non_const_17(void)
{
asm volatile (" \
call %[bpf_ktime_get_ns]; \
if r0 s> 4 goto l0_%=; \
r2 = 4; \
if r2 s< r0 goto l1_%=; \
l0_%=: \
r0 = 0; \
exit; \
l1_%=: \
r0 -= r1; \
exit; \
" :
: __imm(bpf_ktime_get_ns)
: __clobber_all);
}
SEC("socket")
__description("check deducing bounds from non-const, jmp64, <const> s<= <non_const>")
__success __retval(0)
__naked void deducing_bounds_from_non_const_18(void)
{
asm volatile (" \
call %[bpf_ktime_get_ns]; \
if r0 s> 4 goto l0_%=; \
r2 = 5; \
if r2 s<= r0 goto l1_%=; \
l0_%=: \
r0 = 0; \
exit; \
l1_%=: \
r0 -= r1; \
exit; \
" :
: __imm(bpf_ktime_get_ns)
: __clobber_all);
}
SEC("socket")
__description("check deducing bounds from non-const, jmp64, <const> != <non_const>")
__success __retval(0)
__naked void deducing_bounds_from_non_const_19(void)
{
asm volatile (" \
call %[bpf_ktime_get_ns]; \
if r0 < 3 goto l0_%=; \
r2 = 2; \
if r2 != r0 goto l0_%=; \
goto l1_%=; \
l0_%=: \
r0 = 0; \
exit; \
l1_%=: \
r0 -= r1; \
exit; \
" :
: __imm(bpf_ktime_get_ns)
: __clobber_all);
}
SEC("socket")
__description("check deducing bounds from non-const, jmp32, <const> > <non_const>, 1")
__success __retval(0)
__naked void deducing_bounds_from_non_const_20(void)
{
asm volatile (" \
call %[bpf_ktime_get_ns]; \
w2 = 0; \
if w2 > w0 goto l0_%=; \
r0 = 0; \
exit; \
l0_%=: \
r0 -= r1; \
exit; \
" :
: __imm(bpf_ktime_get_ns)
: __clobber_all);
}
SEC("socket")
__description("check deducing bounds from non-const, jmp32, <const> > <non_const>, 2")
__success __retval(0)
__naked void deducing_bounds_from_non_const_21(void)
{
asm volatile (" \
call %[bpf_ktime_get_ns]; \
if w0 < 4 goto l0_%=; \
w2 = 4; \
if w2 > w0 goto l1_%=; \
l0_%=: \
r0 = 0; \
exit; \
l1_%=: \
r0 -= r1; \
exit; \
" :
: __imm(bpf_ktime_get_ns)
: __clobber_all);
}
SEC("socket")
__description("check deducing bounds from non-const, jmp32, <const> >= <non_const>")
__success __retval(0)
__naked void deducing_bounds_from_non_const_22(void)
{
asm volatile (" \
call %[bpf_ktime_get_ns]; \
if w0 < 4 goto l0_%=; \
w2 = 3; \
if w2 >= w0 goto l1_%=; \
l0_%=: \
r0 = 0; \
exit; \
l1_%=: \
r0 -= r1; \
exit; \
" :
: __imm(bpf_ktime_get_ns)
: __clobber_all);
}
SEC("socket")
__description("check deducing bounds from non-const, jmp32, <const> < <non_const>")
__success __retval(0)
__naked void deducing_bounds_from_non_const_23(void)
{
asm volatile (" \
call %[bpf_ktime_get_ns]; \
if w0 > 4 goto l0_%=; \
w2 = 4; \
if w2 < w0 goto l1_%=; \
l0_%=: \
r0 = 0; \
exit; \
l1_%=: \
r0 -= r1; \
exit; \
" :
: __imm(bpf_ktime_get_ns)
: __clobber_all);
}
SEC("socket")
__description("check deducing bounds from non-const, jmp32, <const> <= <non_const>")
__success __retval(0)
__naked void deducing_bounds_from_non_const_24(void)
{
asm volatile (" \
call %[bpf_ktime_get_ns]; \
if w0 >= 4 goto l0_%=; \
w2 = 4; \
if w2 <= w0 goto l1_%=; \
l0_%=: \
r0 = 0; \
exit; \
l1_%=: \
r0 -= r1; \
exit; \
" :
: __imm(bpf_ktime_get_ns)
: __clobber_all);
}
SEC("socket")
__description("check deducing bounds from non-const, jmp32, <const> == <non_const>")
__success __retval(0)
__naked void deducing_bounds_from_non_const_25(void)
{
asm volatile (" \
call %[bpf_ktime_get_ns]; \
if w0 < 4 goto l0_%=; \
w2 = 3; \
if w2 == w0 goto l1_%=; \
l0_%=: \
r0 = 0; \
exit; \
l1_%=: \
r0 -= r1; \
exit; \
" :
: __imm(bpf_ktime_get_ns)
: __clobber_all);
}
SEC("socket")
__description("check deducing bounds from non-const, jmp32, <const> s> <non_const>")
__success __retval(0)
__naked void deducing_bounds_from_non_const_26(void)
{
asm volatile (" \
call %[bpf_ktime_get_ns]; \
if w0 s< 4 goto l0_%=; \
w2 = 4; \
if w2 s> w0 goto l1_%=; \
l0_%=: \
r0 = 0; \
exit; \
l1_%=: \
r0 -= r1; \
exit; \
" :
: __imm(bpf_ktime_get_ns)
: __clobber_all);
}
SEC("socket")
__description("check deducing bounds from non-const, jmp32, <const> s>= <non_const>")
__success __retval(0)
__naked void deducing_bounds_from_non_const_27(void)
{
asm volatile (" \
call %[bpf_ktime_get_ns]; \
if w0 s< 4 goto l0_%=; \
w2 = 3; \
if w2 s>= w0 goto l1_%=; \
l0_%=: \
r0 = 0; \
exit; \
l1_%=: \
r0 -= r1; \
exit; \
" :
: __imm(bpf_ktime_get_ns)
: __clobber_all);
}
SEC("socket")
__description("check deducing bounds from non-const, jmp32, <const> s< <non_const>")
__success __retval(0)
__naked void deducing_bounds_from_non_const_28(void)
{
asm volatile (" \
call %[bpf_ktime_get_ns]; \
if w0 s> 4 goto l0_%=; \
w2 = 5; \
if w2 s< w0 goto l1_%=; \
l0_%=: \
r0 = 0; \
exit; \
l1_%=: \
r0 -= r1; \
exit; \
" :
: __imm(bpf_ktime_get_ns)
: __clobber_all);
}
SEC("socket")
__description("check deducing bounds from non-const, jmp32, <const> s<= <non_const>")
__success __retval(0)
__naked void deducing_bounds_from_non_const_29(void)
{
asm volatile (" \
call %[bpf_ktime_get_ns]; \
if w0 s>= 4 goto l0_%=; \
w2 = 4; \
if w2 s<= w0 goto l1_%=; \
l0_%=: \
r0 = 0; \
exit; \
l1_%=: \
r0 -= r1; \
exit; \
" :
: __imm(bpf_ktime_get_ns)
: __clobber_all);
}
SEC("socket")
__description("check deducing bounds from non-const, jmp32, <const> != <non_const>")
__success __retval(0)
__naked void deducing_bounds_from_non_const_30(void)
{
asm volatile (" \
call %[bpf_ktime_get_ns]; \
if w0 < 3 goto l0_%=; \
w2 = 2; \
if w2 != w0 goto l0_%=; \
goto l1_%=; \
l0_%=: \
r0 = 0; \
exit; \
l1_%=: \
r0 -= r1; \
exit; \
" :
: __imm(bpf_ktime_get_ns)
: __clobber_all);
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/verifier_bounds_deduction_non_const.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2022 Facebook */
#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
struct bpf_testmod_btf_type_tag_1 {
int a;
};
struct bpf_testmod_btf_type_tag_2 {
struct bpf_testmod_btf_type_tag_1 *p;
};
int g;
SEC("fentry/bpf_testmod_test_btf_type_tag_user_1")
int BPF_PROG(test_user1, struct bpf_testmod_btf_type_tag_1 *arg)
{
g = arg->a;
return 0;
}
SEC("fentry/bpf_testmod_test_btf_type_tag_user_2")
int BPF_PROG(test_user2, struct bpf_testmod_btf_type_tag_2 *arg)
{
g = arg->p->a;
return 0;
}
/* int __sys_getsockname(int fd, struct sockaddr __user *usockaddr,
* int __user *usockaddr_len);
*/
SEC("fentry/__sys_getsockname")
int BPF_PROG(test_sys_getsockname, int fd, struct sockaddr *usockaddr,
int *usockaddr_len)
{
g = usockaddr->sa_family;
return 0;
}
| linux-master | tools/testing/selftests/bpf/progs/btf_type_tag_user.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) 2023. Huawei Technologies Co., Ltd */
#include <stdbool.h>
#include <errno.h>
#include <linux/types.h>
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
#define OP_BATCH 64
struct update_ctx {
unsigned int from;
unsigned int step;
};
struct {
__uint(type, BPF_MAP_TYPE_HASH);
__uint(key_size, 4);
__uint(map_flags, BPF_F_NO_PREALLOC);
} htab SEC(".maps");
char _license[] SEC("license") = "GPL";
unsigned char zeroed_value[4096];
unsigned int nr_thread = 0;
long op_cnt = 0;
static int write_htab(unsigned int i, struct update_ctx *ctx, unsigned int flags)
{
bpf_map_update_elem(&htab, &ctx->from, zeroed_value, flags);
ctx->from += ctx->step;
return 0;
}
static int overwrite_htab(unsigned int i, struct update_ctx *ctx)
{
return write_htab(i, ctx, 0);
}
static int newwrite_htab(unsigned int i, struct update_ctx *ctx)
{
return write_htab(i, ctx, BPF_NOEXIST);
}
static int del_htab(unsigned int i, struct update_ctx *ctx)
{
bpf_map_delete_elem(&htab, &ctx->from);
ctx->from += ctx->step;
return 0;
}
SEC("?tp/syscalls/sys_enter_getpgid")
int overwrite(void *ctx)
{
struct update_ctx update;
update.from = bpf_get_smp_processor_id();
update.step = nr_thread;
bpf_loop(OP_BATCH, overwrite_htab, &update, 0);
__sync_fetch_and_add(&op_cnt, 1);
return 0;
}
SEC("?tp/syscalls/sys_enter_getpgid")
int batch_add_batch_del(void *ctx)
{
struct update_ctx update;
update.from = bpf_get_smp_processor_id();
update.step = nr_thread;
bpf_loop(OP_BATCH, overwrite_htab, &update, 0);
update.from = bpf_get_smp_processor_id();
bpf_loop(OP_BATCH, del_htab, &update, 0);
__sync_fetch_and_add(&op_cnt, 2);
return 0;
}
SEC("?tp/syscalls/sys_enter_getpgid")
int add_only(void *ctx)
{
struct update_ctx update;
update.from = bpf_get_smp_processor_id() / 2;
update.step = nr_thread / 2;
bpf_loop(OP_BATCH, newwrite_htab, &update, 0);
__sync_fetch_and_add(&op_cnt, 1);
return 0;
}
SEC("?tp/syscalls/sys_enter_getppid")
int del_only(void *ctx)
{
struct update_ctx update;
update.from = bpf_get_smp_processor_id() / 2;
update.step = nr_thread / 2;
bpf_loop(OP_BATCH, del_htab, &update, 0);
__sync_fetch_and_add(&op_cnt, 1);
return 0;
}
| linux-master | tools/testing/selftests/bpf/progs/htab_mem_bench.c |
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2019 Facebook
#include <linux/bpf.h>
#include <linux/version.h>
#include <bpf/bpf_helpers.h>
#include "bpf_misc.h"
struct hmap_elem {
volatile int cnt;
struct bpf_spin_lock lock;
int test_padding;
};
struct {
__uint(type, BPF_MAP_TYPE_HASH);
__uint(max_entries, 1);
__type(key, int);
__type(value, struct hmap_elem);
} hmap SEC(".maps");
struct cls_elem {
struct bpf_spin_lock lock;
volatile int cnt;
};
struct {
__uint(type, BPF_MAP_TYPE_CGROUP_STORAGE);
__type(key, struct bpf_cgroup_storage_key);
__type(value, struct cls_elem);
} cls_map SEC(".maps");
struct bpf_vqueue {
struct bpf_spin_lock lock;
/* 4 byte hole */
unsigned long long lasttime;
int credit;
unsigned int rate;
};
struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
__uint(max_entries, 1);
__type(key, int);
__type(value, struct bpf_vqueue);
} vqueue SEC(".maps");
#define CREDIT_PER_NS(delta, rate) (((delta) * rate) >> 20)
SEC("cgroup_skb/ingress")
int bpf_spin_lock_test(struct __sk_buff *skb)
{
volatile int credit = 0, max_credit = 100, pkt_len = 64;
struct hmap_elem zero = {}, *val;
unsigned long long curtime;
struct bpf_vqueue *q;
struct cls_elem *cls;
int key = 0;
int err = 0;
val = bpf_map_lookup_elem(&hmap, &key);
if (!val) {
bpf_map_update_elem(&hmap, &key, &zero, 0);
val = bpf_map_lookup_elem(&hmap, &key);
if (!val) {
err = 1;
goto err;
}
}
/* spin_lock in hash map run time test */
bpf_spin_lock(&val->lock);
if (val->cnt)
val->cnt--;
else
val->cnt++;
if (val->cnt != 0 && val->cnt != 1)
err = 1;
bpf_spin_unlock(&val->lock);
/* spin_lock in array. virtual queue demo */
q = bpf_map_lookup_elem(&vqueue, &key);
if (!q)
goto err;
curtime = bpf_ktime_get_ns();
bpf_spin_lock(&q->lock);
q->credit += CREDIT_PER_NS(curtime - q->lasttime, q->rate);
q->lasttime = curtime;
if (q->credit > max_credit)
q->credit = max_credit;
q->credit -= pkt_len;
credit = q->credit;
bpf_spin_unlock(&q->lock);
__sink(credit);
/* spin_lock in cgroup local storage */
cls = bpf_get_local_storage(&cls_map, 0);
bpf_spin_lock(&cls->lock);
cls->cnt++;
bpf_spin_unlock(&cls->lock);
err:
return err;
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/test_spin_lock.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) Meta Platforms, Inc. and affiliates. */
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
int var6 = 6;
struct {
__uint(type, BPF_MAP_TYPE_HASH);
__type(key, __u32);
__type(value, __u32);
__uint(max_entries, 16);
} map2 SEC(".maps");
char LICENSE[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/test_subskeleton_lib2.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
__uint(max_entries, 1);
__type(key, __u32);
__type(value, __u64);
__uint(pinning, LIBBPF_PIN_BY_NAME);
} pinmap SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_HASH);
__uint(max_entries, 1);
__type(key, __u32);
__type(value, __u64);
} nopinmap SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_HASH);
__uint(max_entries, 1);
__type(key, __u32);
__type(value, __u64);
__uint(pinning, LIBBPF_PIN_NONE);
} nopinmap2 SEC(".maps");
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/test_pinning.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2023 Isovalent */
#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
__u32 target_id;
__s64 bpf_map_sum_elem_count(struct bpf_map *map) __ksym;
SEC("iter/bpf_map")
int dump_bpf_map(struct bpf_iter__bpf_map *ctx)
{
struct seq_file *seq = ctx->meta->seq;
struct bpf_map *map = ctx->map;
if (map && map->id == target_id)
BPF_SEQ_PRINTF(seq, "%lld", bpf_map_sum_elem_count(map));
return 0;
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/map_percpu_stats.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2018 Facebook
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of version 2 of the GNU General Public
* License as published by the Free Software Foundation.
*/
#include <linux/bpf.h>
#include <linux/if_ether.h>
#include <bpf/bpf_helpers.h>
SEC("xdp")
int _xdp_adjust_tail_shrink(struct xdp_md *xdp)
{
__u8 *data_end = (void *)(long)xdp->data_end;
__u8 *data = (void *)(long)xdp->data;
int offset = 0;
switch (bpf_xdp_get_buff_len(xdp)) {
case 54:
/* sizeof(pkt_v4) */
offset = 256; /* shrink too much */
break;
case 9000:
/* non-linear buff test cases */
if (data + 1 > data_end)
return XDP_DROP;
switch (data[0]) {
case 0:
offset = 10;
break;
case 1:
offset = 4100;
break;
case 2:
offset = 8200;
break;
default:
return XDP_DROP;
}
break;
default:
offset = 20;
break;
}
if (bpf_xdp_adjust_tail(xdp, 0 - offset))
return XDP_DROP;
return XDP_TX;
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/test_xdp_adjust_tail_shrink.c |
// SPDX-License-Identifier: GPL-2.0
/* This logic is lifted from a real-world use case of packet parsing, used in
* the open source library katran, a layer 4 load balancer.
*
* This test demonstrates how to parse packet contents using dynptrs. The
* original code (parsing without dynptrs) can be found in test_parse_tcp_hdr_opt.c
*/
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include <linux/tcp.h>
#include <stdbool.h>
#include <linux/ipv6.h>
#include <linux/if_ether.h>
#include "test_tcp_hdr_options.h"
#include "bpf_kfuncs.h"
char _license[] SEC("license") = "GPL";
/* Kind number used for experiments */
const __u32 tcp_hdr_opt_kind_tpr = 0xFD;
/* Length of the tcp header option */
const __u32 tcp_hdr_opt_len_tpr = 6;
/* maximum number of header options to check to lookup server_id */
const __u32 tcp_hdr_opt_max_opt_checks = 15;
__u32 server_id;
static int parse_hdr_opt(struct bpf_dynptr *ptr, __u32 *off, __u8 *hdr_bytes_remaining,
__u32 *server_id)
{
__u8 kind, hdr_len;
__u8 buffer[sizeof(kind) + sizeof(hdr_len) + sizeof(*server_id)];
__u8 *data;
__builtin_memset(buffer, 0, sizeof(buffer));
data = bpf_dynptr_slice(ptr, *off, buffer, sizeof(buffer));
if (!data)
return -1;
kind = data[0];
if (kind == TCPOPT_EOL)
return -1;
if (kind == TCPOPT_NOP) {
*off += 1;
*hdr_bytes_remaining -= 1;
return 0;
}
if (*hdr_bytes_remaining < 2)
return -1;
hdr_len = data[1];
if (hdr_len > *hdr_bytes_remaining)
return -1;
if (kind == tcp_hdr_opt_kind_tpr) {
if (hdr_len != tcp_hdr_opt_len_tpr)
return -1;
__builtin_memcpy(server_id, (__u32 *)(data + 2), sizeof(*server_id));
return 1;
}
*off += hdr_len;
*hdr_bytes_remaining -= hdr_len;
return 0;
}
SEC("xdp")
int xdp_ingress_v6(struct xdp_md *xdp)
{
__u8 buffer[sizeof(struct tcphdr)] = {};
__u8 hdr_bytes_remaining;
struct tcphdr *tcp_hdr;
__u8 tcp_hdr_opt_len;
int err = 0;
__u32 off;
struct bpf_dynptr ptr;
bpf_dynptr_from_xdp(xdp, 0, &ptr);
off = sizeof(struct ethhdr) + sizeof(struct ipv6hdr);
tcp_hdr = bpf_dynptr_slice(&ptr, off, buffer, sizeof(buffer));
if (!tcp_hdr)
return XDP_DROP;
tcp_hdr_opt_len = (tcp_hdr->doff * 4) - sizeof(struct tcphdr);
if (tcp_hdr_opt_len < tcp_hdr_opt_len_tpr)
return XDP_DROP;
hdr_bytes_remaining = tcp_hdr_opt_len;
off += sizeof(struct tcphdr);
/* max number of bytes of options in tcp header is 40 bytes */
for (int i = 0; i < tcp_hdr_opt_max_opt_checks; i++) {
err = parse_hdr_opt(&ptr, &off, &hdr_bytes_remaining, &server_id);
if (err || !hdr_bytes_remaining)
break;
}
if (!server_id)
return XDP_DROP;
return XDP_PASS;
}
| linux-master | tools/testing/selftests/bpf/progs/test_parse_tcp_hdr_opt_dynptr.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2021 Facebook */
#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
char _license[] SEC("license") = "GPL";
struct callback_ctx {
int dummy;
};
static long write_task(struct task_struct *task, struct vm_area_struct *vma,
struct callback_ctx *data)
{
/* writing to task, which is illegal */
task->mm = NULL;
return 0;
}
SEC("raw_tp/sys_enter")
int handle_getpid(void)
{
struct task_struct *task = bpf_get_current_task_btf();
struct callback_ctx data = {};
bpf_find_vma(task, 0, write_task, &data, 0);
return 0;
}
| linux-master | tools/testing/selftests/bpf/progs/find_vma_fail2.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020 Facebook */
#define START_CHAR 'a'
#include "bpf_iter_test_kern_common.h"
| linux-master | tools/testing/selftests/bpf/progs/bpf_iter_test_kern1.c |
#include <stddef.h>
#include <inttypes.h>
#include <errno.h>
#include <linux/seg6_local.h>
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_endian.h>
/* Packet parsing state machine helpers. */
#define cursor_advance(_cursor, _len) \
({ void *_tmp = _cursor; _cursor += _len; _tmp; })
#define SR6_FLAG_ALERT (1 << 4)
#define BPF_PACKET_HEADER __attribute__((packed))
struct ip6_t {
unsigned int ver:4;
unsigned int priority:8;
unsigned int flow_label:20;
unsigned short payload_len;
unsigned char next_header;
unsigned char hop_limit;
unsigned long long src_hi;
unsigned long long src_lo;
unsigned long long dst_hi;
unsigned long long dst_lo;
} BPF_PACKET_HEADER;
struct ip6_addr_t {
unsigned long long hi;
unsigned long long lo;
} BPF_PACKET_HEADER;
struct ip6_srh_t {
unsigned char nexthdr;
unsigned char hdrlen;
unsigned char type;
unsigned char segments_left;
unsigned char first_segment;
unsigned char flags;
unsigned short tag;
struct ip6_addr_t segments[0];
} BPF_PACKET_HEADER;
struct sr6_tlv_t {
unsigned char type;
unsigned char len;
unsigned char value[0];
} BPF_PACKET_HEADER;
static __always_inline struct ip6_srh_t *get_srh(struct __sk_buff *skb)
{
void *cursor, *data_end;
struct ip6_srh_t *srh;
struct ip6_t *ip;
uint8_t *ipver;
data_end = (void *)(long)skb->data_end;
cursor = (void *)(long)skb->data;
ipver = (uint8_t *)cursor;
if ((void *)ipver + sizeof(*ipver) > data_end)
return NULL;
if ((*ipver >> 4) != 6)
return NULL;
ip = cursor_advance(cursor, sizeof(*ip));
if ((void *)ip + sizeof(*ip) > data_end)
return NULL;
if (ip->next_header != 43)
return NULL;
srh = cursor_advance(cursor, sizeof(*srh));
if ((void *)srh + sizeof(*srh) > data_end)
return NULL;
if (srh->type != 4)
return NULL;
return srh;
}
static __always_inline int update_tlv_pad(struct __sk_buff *skb,
uint32_t new_pad, uint32_t old_pad,
uint32_t pad_off)
{
int err;
if (new_pad != old_pad) {
err = bpf_lwt_seg6_adjust_srh(skb, pad_off,
(int) new_pad - (int) old_pad);
if (err)
return err;
}
if (new_pad > 0) {
char pad_tlv_buf[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0};
struct sr6_tlv_t *pad_tlv = (struct sr6_tlv_t *) pad_tlv_buf;
pad_tlv->type = SR6_TLV_PADDING;
pad_tlv->len = new_pad - 2;
err = bpf_lwt_seg6_store_bytes(skb, pad_off,
(void *)pad_tlv_buf, new_pad);
if (err)
return err;
}
return 0;
}
static __always_inline int is_valid_tlv_boundary(struct __sk_buff *skb,
struct ip6_srh_t *srh,
uint32_t *tlv_off,
uint32_t *pad_size,
uint32_t *pad_off)
{
uint32_t srh_off, cur_off;
int offset_valid = 0;
int err;
srh_off = (char *)srh - (char *)(long)skb->data;
// cur_off = end of segments, start of possible TLVs
cur_off = srh_off + sizeof(*srh) +
sizeof(struct ip6_addr_t) * (srh->first_segment + 1);
*pad_off = 0;
// we can only go as far as ~10 TLVs due to the BPF max stack size
// workaround: define induction variable "i" as "long" instead
// of "int" to prevent alu32 sub-register spilling.
#pragma clang loop unroll(disable)
for (long i = 0; i < 100; i++) {
struct sr6_tlv_t tlv;
if (cur_off == *tlv_off)
offset_valid = 1;
if (cur_off >= srh_off + ((srh->hdrlen + 1) << 3))
break;
err = bpf_skb_load_bytes(skb, cur_off, &tlv, sizeof(tlv));
if (err)
return err;
if (tlv.type == SR6_TLV_PADDING) {
*pad_size = tlv.len + sizeof(tlv);
*pad_off = cur_off;
if (*tlv_off == srh_off) {
*tlv_off = cur_off;
offset_valid = 1;
}
break;
} else if (tlv.type == SR6_TLV_HMAC) {
break;
}
cur_off += sizeof(tlv) + tlv.len;
} // we reached the padding or HMAC TLVs, or the end of the SRH
if (*pad_off == 0)
*pad_off = cur_off;
if (*tlv_off == -1)
*tlv_off = cur_off;
else if (!offset_valid)
return -EINVAL;
return 0;
}
static __always_inline int add_tlv(struct __sk_buff *skb,
struct ip6_srh_t *srh, uint32_t tlv_off,
struct sr6_tlv_t *itlv, uint8_t tlv_size)
{
uint32_t srh_off = (char *)srh - (char *)(long)skb->data;
uint8_t len_remaining, new_pad;
uint32_t pad_off = 0;
uint32_t pad_size = 0;
uint32_t partial_srh_len;
int err;
if (tlv_off != -1)
tlv_off += srh_off;
if (itlv->type == SR6_TLV_PADDING || itlv->type == SR6_TLV_HMAC)
return -EINVAL;
err = is_valid_tlv_boundary(skb, srh, &tlv_off, &pad_size, &pad_off);
if (err)
return err;
err = bpf_lwt_seg6_adjust_srh(skb, tlv_off, sizeof(*itlv) + itlv->len);
if (err)
return err;
err = bpf_lwt_seg6_store_bytes(skb, tlv_off, (void *)itlv, tlv_size);
if (err)
return err;
// the following can't be moved inside update_tlv_pad because the
// bpf verifier has some issues with it
pad_off += sizeof(*itlv) + itlv->len;
partial_srh_len = pad_off - srh_off;
len_remaining = partial_srh_len % 8;
new_pad = 8 - len_remaining;
if (new_pad == 1) // cannot pad for 1 byte only
new_pad = 9;
else if (new_pad == 8)
new_pad = 0;
return update_tlv_pad(skb, new_pad, pad_size, pad_off);
}
// Add an Egress TLV fc00::4, add the flag A,
// and apply End.X action to fc42::1
SEC("lwt_seg6local")
int __add_egr_x(struct __sk_buff *skb)
{
unsigned long long hi = 0xfc42000000000000;
unsigned long long lo = 0x1;
struct ip6_srh_t *srh = get_srh(skb);
uint8_t new_flags = SR6_FLAG_ALERT;
struct ip6_addr_t addr;
int err, offset;
if (srh == NULL)
return BPF_DROP;
uint8_t tlv[20] = {2, 18, 0, 0, 0xfd, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x4};
err = add_tlv(skb, srh, (srh->hdrlen+1) << 3,
(struct sr6_tlv_t *)&tlv, 20);
if (err)
return BPF_DROP;
offset = sizeof(struct ip6_t) + offsetof(struct ip6_srh_t, flags);
err = bpf_lwt_seg6_store_bytes(skb, offset,
(void *)&new_flags, sizeof(new_flags));
if (err)
return BPF_DROP;
addr.lo = bpf_cpu_to_be64(lo);
addr.hi = bpf_cpu_to_be64(hi);
err = bpf_lwt_seg6_action(skb, SEG6_LOCAL_ACTION_END_X,
(void *)&addr, sizeof(addr));
if (err)
return BPF_DROP;
return BPF_REDIRECT;
}
char __license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/test_seg6_loop.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include "bpf_misc.h"
#if (defined(__TARGET_ARCH_arm64) || defined(__TARGET_ARCH_x86) || \
(defined(__TARGET_ARCH_riscv) && __riscv_xlen == 64)) && __clang_major__ >= 18
SEC("socket")
__description("SDIV32, non-zero imm divisor, check 1")
__success __success_unpriv __retval(-20)
__naked void sdiv32_non_zero_imm_1(void)
{
asm volatile (" \
w0 = -41; \
w0 s/= 2; \
exit; \
" ::: __clobber_all);
}
SEC("socket")
__description("SDIV32, non-zero imm divisor, check 2")
__success __success_unpriv __retval(-20)
__naked void sdiv32_non_zero_imm_2(void)
{
asm volatile (" \
w0 = 41; \
w0 s/= -2; \
exit; \
" ::: __clobber_all);
}
SEC("socket")
__description("SDIV32, non-zero imm divisor, check 3")
__success __success_unpriv __retval(20)
__naked void sdiv32_non_zero_imm_3(void)
{
asm volatile (" \
w0 = -41; \
w0 s/= -2; \
exit; \
" ::: __clobber_all);
}
SEC("socket")
__description("SDIV32, non-zero imm divisor, check 4")
__success __success_unpriv __retval(-21)
__naked void sdiv32_non_zero_imm_4(void)
{
asm volatile (" \
w0 = -42; \
w0 s/= 2; \
exit; \
" ::: __clobber_all);
}
SEC("socket")
__description("SDIV32, non-zero imm divisor, check 5")
__success __success_unpriv __retval(-21)
__naked void sdiv32_non_zero_imm_5(void)
{
asm volatile (" \
w0 = 42; \
w0 s/= -2; \
exit; \
" ::: __clobber_all);
}
SEC("socket")
__description("SDIV32, non-zero imm divisor, check 6")
__success __success_unpriv __retval(21)
__naked void sdiv32_non_zero_imm_6(void)
{
asm volatile (" \
w0 = -42; \
w0 s/= -2; \
exit; \
" ::: __clobber_all);
}
SEC("socket")
__description("SDIV32, non-zero imm divisor, check 7")
__success __success_unpriv __retval(21)
__naked void sdiv32_non_zero_imm_7(void)
{
asm volatile (" \
w0 = 42; \
w0 s/= 2; \
exit; \
" ::: __clobber_all);
}
SEC("socket")
__description("SDIV32, non-zero imm divisor, check 8")
__success __success_unpriv __retval(20)
__naked void sdiv32_non_zero_imm_8(void)
{
asm volatile (" \
w0 = 41; \
w0 s/= 2; \
exit; \
" ::: __clobber_all);
}
SEC("socket")
__description("SDIV32, non-zero reg divisor, check 1")
__success __success_unpriv __retval(-20)
__naked void sdiv32_non_zero_reg_1(void)
{
asm volatile (" \
w0 = -41; \
w1 = 2; \
w0 s/= w1; \
exit; \
" ::: __clobber_all);
}
SEC("socket")
__description("SDIV32, non-zero reg divisor, check 2")
__success __success_unpriv __retval(-20)
__naked void sdiv32_non_zero_reg_2(void)
{
asm volatile (" \
w0 = 41; \
w1 = -2; \
w0 s/= w1; \
exit; \
" ::: __clobber_all);
}
SEC("socket")
__description("SDIV32, non-zero reg divisor, check 3")
__success __success_unpriv __retval(20)
__naked void sdiv32_non_zero_reg_3(void)
{
asm volatile (" \
w0 = -41; \
w1 = -2; \
w0 s/= w1; \
exit; \
" ::: __clobber_all);
}
SEC("socket")
__description("SDIV32, non-zero reg divisor, check 4")
__success __success_unpriv __retval(-21)
__naked void sdiv32_non_zero_reg_4(void)
{
asm volatile (" \
w0 = -42; \
w1 = 2; \
w0 s/= w1; \
exit; \
" ::: __clobber_all);
}
SEC("socket")
__description("SDIV32, non-zero reg divisor, check 5")
__success __success_unpriv __retval(-21)
__naked void sdiv32_non_zero_reg_5(void)
{
asm volatile (" \
w0 = 42; \
w1 = -2; \
w0 s/= w1; \
exit; \
" ::: __clobber_all);
}
SEC("socket")
__description("SDIV32, non-zero reg divisor, check 6")
__success __success_unpriv __retval(21)
__naked void sdiv32_non_zero_reg_6(void)
{
asm volatile (" \
w0 = -42; \
w1 = -2; \
w0 s/= w1; \
exit; \
" ::: __clobber_all);
}
SEC("socket")
__description("SDIV32, non-zero reg divisor, check 7")
__success __success_unpriv __retval(21)
__naked void sdiv32_non_zero_reg_7(void)
{
asm volatile (" \
w0 = 42; \
w1 = 2; \
w0 s/= w1; \
exit; \
" ::: __clobber_all);
}
SEC("socket")
__description("SDIV32, non-zero reg divisor, check 8")
__success __success_unpriv __retval(20)
__naked void sdiv32_non_zero_reg_8(void)
{
asm volatile (" \
w0 = 41; \
w1 = 2; \
w0 s/= w1; \
exit; \
" ::: __clobber_all);
}
SEC("socket")
__description("SDIV64, non-zero imm divisor, check 1")
__success __success_unpriv __retval(-20)
__naked void sdiv64_non_zero_imm_1(void)
{
asm volatile (" \
r0 = -41; \
r0 s/= 2; \
exit; \
" ::: __clobber_all);
}
SEC("socket")
__description("SDIV64, non-zero imm divisor, check 2")
__success __success_unpriv __retval(-20)
__naked void sdiv64_non_zero_imm_2(void)
{
asm volatile (" \
r0 = 41; \
r0 s/= -2; \
exit; \
" ::: __clobber_all);
}
SEC("socket")
__description("SDIV64, non-zero imm divisor, check 3")
__success __success_unpriv __retval(20)
__naked void sdiv64_non_zero_imm_3(void)
{
asm volatile (" \
r0 = -41; \
r0 s/= -2; \
exit; \
" ::: __clobber_all);
}
SEC("socket")
__description("SDIV64, non-zero imm divisor, check 4")
__success __success_unpriv __retval(-21)
__naked void sdiv64_non_zero_imm_4(void)
{
asm volatile (" \
r0 = -42; \
r0 s/= 2; \
exit; \
" ::: __clobber_all);
}
SEC("socket")
__description("SDIV64, non-zero imm divisor, check 5")
__success __success_unpriv __retval(-21)
__naked void sdiv64_non_zero_imm_5(void)
{
asm volatile (" \
r0 = 42; \
r0 s/= -2; \
exit; \
" ::: __clobber_all);
}
SEC("socket")
__description("SDIV64, non-zero imm divisor, check 6")
__success __success_unpriv __retval(21)
__naked void sdiv64_non_zero_imm_6(void)
{
asm volatile (" \
r0 = -42; \
r0 s/= -2; \
exit; \
" ::: __clobber_all);
}
SEC("socket")
__description("SDIV64, non-zero reg divisor, check 1")
__success __success_unpriv __retval(-20)
__naked void sdiv64_non_zero_reg_1(void)
{
asm volatile (" \
r0 = -41; \
r1 = 2; \
r0 s/= r1; \
exit; \
" ::: __clobber_all);
}
SEC("socket")
__description("SDIV64, non-zero reg divisor, check 2")
__success __success_unpriv __retval(-20)
__naked void sdiv64_non_zero_reg_2(void)
{
asm volatile (" \
r0 = 41; \
r1 = -2; \
r0 s/= r1; \
exit; \
" ::: __clobber_all);
}
SEC("socket")
__description("SDIV64, non-zero reg divisor, check 3")
__success __success_unpriv __retval(20)
__naked void sdiv64_non_zero_reg_3(void)
{
asm volatile (" \
r0 = -41; \
r1 = -2; \
r0 s/= r1; \
exit; \
" ::: __clobber_all);
}
SEC("socket")
__description("SDIV64, non-zero reg divisor, check 4")
__success __success_unpriv __retval(-21)
__naked void sdiv64_non_zero_reg_4(void)
{
asm volatile (" \
r0 = -42; \
r1 = 2; \
r0 s/= r1; \
exit; \
" ::: __clobber_all);
}
SEC("socket")
__description("SDIV64, non-zero reg divisor, check 5")
__success __success_unpriv __retval(-21)
__naked void sdiv64_non_zero_reg_5(void)
{
asm volatile (" \
r0 = 42; \
r1 = -2; \
r0 s/= r1; \
exit; \
" ::: __clobber_all);
}
SEC("socket")
__description("SDIV64, non-zero reg divisor, check 6")
__success __success_unpriv __retval(21)
__naked void sdiv64_non_zero_reg_6(void)
{
asm volatile (" \
r0 = -42; \
r1 = -2; \
r0 s/= r1; \
exit; \
" ::: __clobber_all);
}
SEC("socket")
__description("SMOD32, non-zero imm divisor, check 1")
__success __success_unpriv __retval(-1)
__naked void smod32_non_zero_imm_1(void)
{
asm volatile (" \
w0 = -41; \
w0 s%%= 2; \
exit; \
" ::: __clobber_all);
}
SEC("socket")
__description("SMOD32, non-zero imm divisor, check 2")
__success __success_unpriv __retval(1)
__naked void smod32_non_zero_imm_2(void)
{
asm volatile (" \
w0 = 41; \
w0 s%%= -2; \
exit; \
" ::: __clobber_all);
}
SEC("socket")
__description("SMOD32, non-zero imm divisor, check 3")
__success __success_unpriv __retval(-1)
__naked void smod32_non_zero_imm_3(void)
{
asm volatile (" \
w0 = -41; \
w0 s%%= -2; \
exit; \
" ::: __clobber_all);
}
SEC("socket")
__description("SMOD32, non-zero imm divisor, check 4")
__success __success_unpriv __retval(0)
__naked void smod32_non_zero_imm_4(void)
{
asm volatile (" \
w0 = -42; \
w0 s%%= 2; \
exit; \
" ::: __clobber_all);
}
SEC("socket")
__description("SMOD32, non-zero imm divisor, check 5")
__success __success_unpriv __retval(0)
__naked void smod32_non_zero_imm_5(void)
{
asm volatile (" \
w0 = 42; \
w0 s%%= -2; \
exit; \
" ::: __clobber_all);
}
SEC("socket")
__description("SMOD32, non-zero imm divisor, check 6")
__success __success_unpriv __retval(0)
__naked void smod32_non_zero_imm_6(void)
{
asm volatile (" \
w0 = -42; \
w0 s%%= -2; \
exit; \
" ::: __clobber_all);
}
SEC("socket")
__description("SMOD32, non-zero reg divisor, check 1")
__success __success_unpriv __retval(-1)
__naked void smod32_non_zero_reg_1(void)
{
asm volatile (" \
w0 = -41; \
w1 = 2; \
w0 s%%= w1; \
exit; \
" ::: __clobber_all);
}
SEC("socket")
__description("SMOD32, non-zero reg divisor, check 2")
__success __success_unpriv __retval(1)
__naked void smod32_non_zero_reg_2(void)
{
asm volatile (" \
w0 = 41; \
w1 = -2; \
w0 s%%= w1; \
exit; \
" ::: __clobber_all);
}
SEC("socket")
__description("SMOD32, non-zero reg divisor, check 3")
__success __success_unpriv __retval(-1)
__naked void smod32_non_zero_reg_3(void)
{
asm volatile (" \
w0 = -41; \
w1 = -2; \
w0 s%%= w1; \
exit; \
" ::: __clobber_all);
}
SEC("socket")
__description("SMOD32, non-zero reg divisor, check 4")
__success __success_unpriv __retval(0)
__naked void smod32_non_zero_reg_4(void)
{
asm volatile (" \
w0 = -42; \
w1 = 2; \
w0 s%%= w1; \
exit; \
" ::: __clobber_all);
}
SEC("socket")
__description("SMOD32, non-zero reg divisor, check 5")
__success __success_unpriv __retval(0)
__naked void smod32_non_zero_reg_5(void)
{
asm volatile (" \
w0 = 42; \
w1 = -2; \
w0 s%%= w1; \
exit; \
" ::: __clobber_all);
}
SEC("socket")
__description("SMOD32, non-zero reg divisor, check 6")
__success __success_unpriv __retval(0)
__naked void smod32_non_zero_reg_6(void)
{
asm volatile (" \
w0 = -42; \
w1 = -2; \
w0 s%%= w1; \
exit; \
" ::: __clobber_all);
}
SEC("socket")
__description("SMOD64, non-zero imm divisor, check 1")
__success __success_unpriv __retval(-1)
__naked void smod64_non_zero_imm_1(void)
{
asm volatile (" \
r0 = -41; \
r0 s%%= 2; \
exit; \
" ::: __clobber_all);
}
SEC("socket")
__description("SMOD64, non-zero imm divisor, check 2")
__success __success_unpriv __retval(1)
__naked void smod64_non_zero_imm_2(void)
{
asm volatile (" \
r0 = 41; \
r0 s%%= -2; \
exit; \
" ::: __clobber_all);
}
SEC("socket")
__description("SMOD64, non-zero imm divisor, check 3")
__success __success_unpriv __retval(-1)
__naked void smod64_non_zero_imm_3(void)
{
asm volatile (" \
r0 = -41; \
r0 s%%= -2; \
exit; \
" ::: __clobber_all);
}
SEC("socket")
__description("SMOD64, non-zero imm divisor, check 4")
__success __success_unpriv __retval(0)
__naked void smod64_non_zero_imm_4(void)
{
asm volatile (" \
r0 = -42; \
r0 s%%= 2; \
exit; \
" ::: __clobber_all);
}
SEC("socket")
__description("SMOD64, non-zero imm divisor, check 5")
__success __success_unpriv __retval(-0)
__naked void smod64_non_zero_imm_5(void)
{
asm volatile (" \
r0 = 42; \
r0 s%%= -2; \
exit; \
" ::: __clobber_all);
}
SEC("socket")
__description("SMOD64, non-zero imm divisor, check 6")
__success __success_unpriv __retval(0)
__naked void smod64_non_zero_imm_6(void)
{
asm volatile (" \
r0 = -42; \
r0 s%%= -2; \
exit; \
" ::: __clobber_all);
}
SEC("socket")
__description("SMOD64, non-zero imm divisor, check 7")
__success __success_unpriv __retval(0)
__naked void smod64_non_zero_imm_7(void)
{
asm volatile (" \
r0 = 42; \
r0 s%%= 2; \
exit; \
" ::: __clobber_all);
}
SEC("socket")
__description("SMOD64, non-zero imm divisor, check 8")
__success __success_unpriv __retval(1)
__naked void smod64_non_zero_imm_8(void)
{
asm volatile (" \
r0 = 41; \
r0 s%%= 2; \
exit; \
" ::: __clobber_all);
}
SEC("socket")
__description("SMOD64, non-zero reg divisor, check 1")
__success __success_unpriv __retval(-1)
__naked void smod64_non_zero_reg_1(void)
{
asm volatile (" \
r0 = -41; \
r1 = 2; \
r0 s%%= r1; \
exit; \
" ::: __clobber_all);
}
SEC("socket")
__description("SMOD64, non-zero reg divisor, check 2")
__success __success_unpriv __retval(1)
__naked void smod64_non_zero_reg_2(void)
{
asm volatile (" \
r0 = 41; \
r1 = -2; \
r0 s%%= r1; \
exit; \
" ::: __clobber_all);
}
SEC("socket")
__description("SMOD64, non-zero reg divisor, check 3")
__success __success_unpriv __retval(-1)
__naked void smod64_non_zero_reg_3(void)
{
asm volatile (" \
r0 = -41; \
r1 = -2; \
r0 s%%= r1; \
exit; \
" ::: __clobber_all);
}
SEC("socket")
__description("SMOD64, non-zero reg divisor, check 4")
__success __success_unpriv __retval(0)
__naked void smod64_non_zero_reg_4(void)
{
asm volatile (" \
r0 = -42; \
r1 = 2; \
r0 s%%= r1; \
exit; \
" ::: __clobber_all);
}
SEC("socket")
__description("SMOD64, non-zero reg divisor, check 5")
__success __success_unpriv __retval(0)
__naked void smod64_non_zero_reg_5(void)
{
asm volatile (" \
r0 = 42; \
r1 = -2; \
r0 s%%= r1; \
exit; \
" ::: __clobber_all);
}
SEC("socket")
__description("SMOD64, non-zero reg divisor, check 6")
__success __success_unpriv __retval(0)
__naked void smod64_non_zero_reg_6(void)
{
asm volatile (" \
r0 = -42; \
r1 = -2; \
r0 s%%= r1; \
exit; \
" ::: __clobber_all);
}
SEC("socket")
__description("SMOD64, non-zero reg divisor, check 7")
__success __success_unpriv __retval(0)
__naked void smod64_non_zero_reg_7(void)
{
asm volatile (" \
r0 = 42; \
r1 = 2; \
r0 s%%= r1; \
exit; \
" ::: __clobber_all);
}
SEC("socket")
__description("SMOD64, non-zero reg divisor, check 8")
__success __success_unpriv __retval(1)
__naked void smod64_non_zero_reg_8(void)
{
asm volatile (" \
r0 = 41; \
r1 = 2; \
r0 s%%= r1; \
exit; \
" ::: __clobber_all);
}
SEC("socket")
__description("SDIV32, zero divisor")
__success __success_unpriv __retval(0)
__naked void sdiv32_zero_divisor(void)
{
asm volatile (" \
w0 = 42; \
w1 = 0; \
w2 = -1; \
w2 s/= w1; \
w0 = w2; \
exit; \
" ::: __clobber_all);
}
SEC("socket")
__description("SDIV64, zero divisor")
__success __success_unpriv __retval(0)
__naked void sdiv64_zero_divisor(void)
{
asm volatile (" \
r0 = 42; \
r1 = 0; \
r2 = -1; \
r2 s/= r1; \
r0 = r2; \
exit; \
" ::: __clobber_all);
}
SEC("socket")
__description("SMOD32, zero divisor")
__success __success_unpriv __retval(-1)
__naked void smod32_zero_divisor(void)
{
asm volatile (" \
w0 = 42; \
w1 = 0; \
w2 = -1; \
w2 s%%= w1; \
w0 = w2; \
exit; \
" ::: __clobber_all);
}
SEC("socket")
__description("SMOD64, zero divisor")
__success __success_unpriv __retval(-1)
__naked void smod64_zero_divisor(void)
{
asm volatile (" \
r0 = 42; \
r1 = 0; \
r2 = -1; \
r2 s%%= r1; \
r0 = r2; \
exit; \
" ::: __clobber_all);
}
#else
SEC("socket")
__description("cpuv4 is not supported by compiler or jit, use a dummy test")
__success
int dummy_test(void)
{
return 0;
}
#endif
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/verifier_sdiv.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2021 Facebook */
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include "bpf_misc.h"
char _license[] SEC("license") = "GPL";
struct bpf_map;
struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
__type(key, __u32);
__type(value, __u32);
__uint(max_entries, 1000);
} map_random_data SEC(".maps");
struct map_bloom_type {
__uint(type, BPF_MAP_TYPE_BLOOM_FILTER);
__type(value, __u32);
__uint(max_entries, 10000);
__uint(map_extra, 5);
} map_bloom SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_ARRAY_OF_MAPS);
__type(key, int);
__type(value, int);
__uint(max_entries, 1);
__array(values, struct map_bloom_type);
} outer_map SEC(".maps");
struct callback_ctx {
struct bpf_map *map;
};
int error = 0;
static __u64
check_elem(struct bpf_map *map, __u32 *key, __u32 *val,
struct callback_ctx *data)
{
int err;
err = bpf_map_peek_elem(data->map, val);
if (err) {
error |= 1;
return 1; /* stop the iteration */
}
return 0;
}
SEC("fentry/" SYS_PREFIX "sys_getpgid")
int inner_map(void *ctx)
{
struct bpf_map *inner_map;
struct callback_ctx data;
int key = 0;
inner_map = bpf_map_lookup_elem(&outer_map, &key);
if (!inner_map) {
error |= 2;
return 0;
}
data.map = inner_map;
bpf_for_each_map_elem(&map_random_data, check_elem, &data, 0);
return 0;
}
SEC("fentry/" SYS_PREFIX "sys_getpgid")
int check_bloom(void *ctx)
{
struct callback_ctx data;
data.map = (struct bpf_map *)&map_bloom;
bpf_for_each_map_elem(&map_random_data, check_elem, &data, 0);
return 0;
}
| linux-master | tools/testing/selftests/bpf/progs/bloom_filter_map.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020 Facebook */
#include <stddef.h>
#include <errno.h>
#include <stdbool.h>
#include <sys/types.h>
#include <sys/socket.h>
#include <linux/tcp.h>
#include <linux/socket.h>
#include <linux/bpf.h>
#include <linux/types.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_endian.h>
#define BPF_PROG_TEST_TCP_HDR_OPTIONS
#include "test_tcp_hdr_options.h"
#ifndef sizeof_field
#define sizeof_field(TYPE, MEMBER) sizeof((((TYPE *)0)->MEMBER))
#endif
__u8 test_kind = TCPOPT_EXP;
__u16 test_magic = 0xeB9F;
__u32 inherit_cb_flags = 0;
struct bpf_test_option passive_synack_out = {};
struct bpf_test_option passive_fin_out = {};
struct bpf_test_option passive_estab_in = {};
struct bpf_test_option passive_fin_in = {};
struct bpf_test_option active_syn_out = {};
struct bpf_test_option active_fin_out = {};
struct bpf_test_option active_estab_in = {};
struct bpf_test_option active_fin_in = {};
struct {
__uint(type, BPF_MAP_TYPE_SK_STORAGE);
__uint(map_flags, BPF_F_NO_PREALLOC);
__type(key, int);
__type(value, struct hdr_stg);
} hdr_stg_map SEC(".maps");
static bool skops_want_cookie(const struct bpf_sock_ops *skops)
{
return skops->args[0] == BPF_WRITE_HDR_TCP_SYNACK_COOKIE;
}
static bool skops_current_mss(const struct bpf_sock_ops *skops)
{
return skops->args[0] == BPF_WRITE_HDR_TCP_CURRENT_MSS;
}
static __u8 option_total_len(__u8 flags)
{
__u8 i, len = 1; /* +1 for flags */
if (!flags)
return 0;
/* RESEND bit does not use a byte */
for (i = OPTION_RESEND + 1; i < __NR_OPTION_FLAGS; i++)
len += !!TEST_OPTION_FLAGS(flags, i);
if (test_kind == TCPOPT_EXP)
return len + TCP_BPF_EXPOPT_BASE_LEN;
else
return len + 2; /* +1 kind, +1 kind-len */
}
static void write_test_option(const struct bpf_test_option *test_opt,
__u8 *data)
{
__u8 offset = 0;
data[offset++] = test_opt->flags;
if (TEST_OPTION_FLAGS(test_opt->flags, OPTION_MAX_DELACK_MS))
data[offset++] = test_opt->max_delack_ms;
if (TEST_OPTION_FLAGS(test_opt->flags, OPTION_RAND))
data[offset++] = test_opt->rand;
}
static int store_option(struct bpf_sock_ops *skops,
const struct bpf_test_option *test_opt)
{
union {
struct tcp_exprm_opt exprm;
struct tcp_opt regular;
} write_opt;
int err;
if (test_kind == TCPOPT_EXP) {
write_opt.exprm.kind = TCPOPT_EXP;
write_opt.exprm.len = option_total_len(test_opt->flags);
write_opt.exprm.magic = __bpf_htons(test_magic);
write_opt.exprm.data32 = 0;
write_test_option(test_opt, write_opt.exprm.data);
err = bpf_store_hdr_opt(skops, &write_opt.exprm,
sizeof(write_opt.exprm), 0);
} else {
write_opt.regular.kind = test_kind;
write_opt.regular.len = option_total_len(test_opt->flags);
write_opt.regular.data32 = 0;
write_test_option(test_opt, write_opt.regular.data);
err = bpf_store_hdr_opt(skops, &write_opt.regular,
sizeof(write_opt.regular), 0);
}
if (err)
RET_CG_ERR(err);
return CG_OK;
}
static int parse_test_option(struct bpf_test_option *opt, const __u8 *start)
{
opt->flags = *start++;
if (TEST_OPTION_FLAGS(opt->flags, OPTION_MAX_DELACK_MS))
opt->max_delack_ms = *start++;
if (TEST_OPTION_FLAGS(opt->flags, OPTION_RAND))
opt->rand = *start++;
return 0;
}
static int load_option(struct bpf_sock_ops *skops,
struct bpf_test_option *test_opt, bool from_syn)
{
union {
struct tcp_exprm_opt exprm;
struct tcp_opt regular;
} search_opt;
int ret, load_flags = from_syn ? BPF_LOAD_HDR_OPT_TCP_SYN : 0;
if (test_kind == TCPOPT_EXP) {
search_opt.exprm.kind = TCPOPT_EXP;
search_opt.exprm.len = 4;
search_opt.exprm.magic = __bpf_htons(test_magic);
search_opt.exprm.data32 = 0;
ret = bpf_load_hdr_opt(skops, &search_opt.exprm,
sizeof(search_opt.exprm), load_flags);
if (ret < 0)
return ret;
return parse_test_option(test_opt, search_opt.exprm.data);
} else {
search_opt.regular.kind = test_kind;
search_opt.regular.len = 0;
search_opt.regular.data32 = 0;
ret = bpf_load_hdr_opt(skops, &search_opt.regular,
sizeof(search_opt.regular), load_flags);
if (ret < 0)
return ret;
return parse_test_option(test_opt, search_opt.regular.data);
}
}
static int synack_opt_len(struct bpf_sock_ops *skops)
{
struct bpf_test_option test_opt = {};
__u8 optlen;
int err;
if (!passive_synack_out.flags)
return CG_OK;
err = load_option(skops, &test_opt, true);
/* bpf_test_option is not found */
if (err == -ENOMSG)
return CG_OK;
if (err)
RET_CG_ERR(err);
optlen = option_total_len(passive_synack_out.flags);
if (optlen) {
err = bpf_reserve_hdr_opt(skops, optlen, 0);
if (err)
RET_CG_ERR(err);
}
return CG_OK;
}
static int write_synack_opt(struct bpf_sock_ops *skops)
{
struct bpf_test_option opt;
if (!passive_synack_out.flags)
/* We should not even be called since no header
* space has been reserved.
*/
RET_CG_ERR(0);
opt = passive_synack_out;
if (skops_want_cookie(skops))
SET_OPTION_FLAGS(opt.flags, OPTION_RESEND);
return store_option(skops, &opt);
}
static int syn_opt_len(struct bpf_sock_ops *skops)
{
__u8 optlen;
int err;
if (!active_syn_out.flags)
return CG_OK;
optlen = option_total_len(active_syn_out.flags);
if (optlen) {
err = bpf_reserve_hdr_opt(skops, optlen, 0);
if (err)
RET_CG_ERR(err);
}
return CG_OK;
}
static int write_syn_opt(struct bpf_sock_ops *skops)
{
if (!active_syn_out.flags)
RET_CG_ERR(0);
return store_option(skops, &active_syn_out);
}
static int fin_opt_len(struct bpf_sock_ops *skops)
{
struct bpf_test_option *opt;
struct hdr_stg *hdr_stg;
__u8 optlen;
int err;
if (!skops->sk)
RET_CG_ERR(0);
hdr_stg = bpf_sk_storage_get(&hdr_stg_map, skops->sk, NULL, 0);
if (!hdr_stg)
RET_CG_ERR(0);
if (hdr_stg->active)
opt = &active_fin_out;
else
opt = &passive_fin_out;
optlen = option_total_len(opt->flags);
if (optlen) {
err = bpf_reserve_hdr_opt(skops, optlen, 0);
if (err)
RET_CG_ERR(err);
}
return CG_OK;
}
static int write_fin_opt(struct bpf_sock_ops *skops)
{
struct bpf_test_option *opt;
struct hdr_stg *hdr_stg;
if (!skops->sk)
RET_CG_ERR(0);
hdr_stg = bpf_sk_storage_get(&hdr_stg_map, skops->sk, NULL, 0);
if (!hdr_stg)
RET_CG_ERR(0);
if (hdr_stg->active)
opt = &active_fin_out;
else
opt = &passive_fin_out;
if (!opt->flags)
RET_CG_ERR(0);
return store_option(skops, opt);
}
static int resend_in_ack(struct bpf_sock_ops *skops)
{
struct hdr_stg *hdr_stg;
if (!skops->sk)
return -1;
hdr_stg = bpf_sk_storage_get(&hdr_stg_map, skops->sk, NULL, 0);
if (!hdr_stg)
return -1;
return !!hdr_stg->resend_syn;
}
static int nodata_opt_len(struct bpf_sock_ops *skops)
{
int resend;
resend = resend_in_ack(skops);
if (resend < 0)
RET_CG_ERR(0);
if (resend)
return syn_opt_len(skops);
return CG_OK;
}
static int write_nodata_opt(struct bpf_sock_ops *skops)
{
int resend;
resend = resend_in_ack(skops);
if (resend < 0)
RET_CG_ERR(0);
if (resend)
return write_syn_opt(skops);
return CG_OK;
}
static int data_opt_len(struct bpf_sock_ops *skops)
{
/* Same as the nodata version. Mostly to show
* an example usage on skops->skb_len.
*/
return nodata_opt_len(skops);
}
static int write_data_opt(struct bpf_sock_ops *skops)
{
return write_nodata_opt(skops);
}
static int current_mss_opt_len(struct bpf_sock_ops *skops)
{
/* Reserve maximum that may be needed */
int err;
err = bpf_reserve_hdr_opt(skops, option_total_len(OPTION_MASK), 0);
if (err)
RET_CG_ERR(err);
return CG_OK;
}
static int handle_hdr_opt_len(struct bpf_sock_ops *skops)
{
__u8 tcp_flags = skops_tcp_flags(skops);
if ((tcp_flags & TCPHDR_SYNACK) == TCPHDR_SYNACK)
return synack_opt_len(skops);
if (tcp_flags & TCPHDR_SYN)
return syn_opt_len(skops);
if (tcp_flags & TCPHDR_FIN)
return fin_opt_len(skops);
if (skops_current_mss(skops))
/* The kernel is calculating the MSS */
return current_mss_opt_len(skops);
if (skops->skb_len)
return data_opt_len(skops);
return nodata_opt_len(skops);
}
static int handle_write_hdr_opt(struct bpf_sock_ops *skops)
{
__u8 tcp_flags = skops_tcp_flags(skops);
struct tcphdr *th;
if ((tcp_flags & TCPHDR_SYNACK) == TCPHDR_SYNACK)
return write_synack_opt(skops);
if (tcp_flags & TCPHDR_SYN)
return write_syn_opt(skops);
if (tcp_flags & TCPHDR_FIN)
return write_fin_opt(skops);
th = skops->skb_data;
if (th + 1 > skops->skb_data_end)
RET_CG_ERR(0);
if (skops->skb_len > tcp_hdrlen(th))
return write_data_opt(skops);
return write_nodata_opt(skops);
}
static int set_delack_max(struct bpf_sock_ops *skops, __u8 max_delack_ms)
{
__u32 max_delack_us = max_delack_ms * 1000;
return bpf_setsockopt(skops, SOL_TCP, TCP_BPF_DELACK_MAX,
&max_delack_us, sizeof(max_delack_us));
}
static int set_rto_min(struct bpf_sock_ops *skops, __u8 peer_max_delack_ms)
{
__u32 min_rto_us = peer_max_delack_ms * 1000;
return bpf_setsockopt(skops, SOL_TCP, TCP_BPF_RTO_MIN, &min_rto_us,
sizeof(min_rto_us));
}
static int handle_active_estab(struct bpf_sock_ops *skops)
{
struct hdr_stg init_stg = {
.active = true,
};
int err;
err = load_option(skops, &active_estab_in, false);
if (err && err != -ENOMSG)
RET_CG_ERR(err);
init_stg.resend_syn = TEST_OPTION_FLAGS(active_estab_in.flags,
OPTION_RESEND);
if (!skops->sk || !bpf_sk_storage_get(&hdr_stg_map, skops->sk,
&init_stg,
BPF_SK_STORAGE_GET_F_CREATE))
RET_CG_ERR(0);
if (init_stg.resend_syn)
/* Don't clear the write_hdr cb now because
* the ACK may get lost and retransmit may
* be needed.
*
* PARSE_ALL_HDR cb flag is set to learn if this
* resend_syn option has received by the peer.
*
* The header option will be resent until a valid
* packet is received at handle_parse_hdr()
* and all hdr cb flags will be cleared in
* handle_parse_hdr().
*/
set_parse_all_hdr_cb_flags(skops);
else if (!active_fin_out.flags)
/* No options will be written from now */
clear_hdr_cb_flags(skops);
if (active_syn_out.max_delack_ms) {
err = set_delack_max(skops, active_syn_out.max_delack_ms);
if (err)
RET_CG_ERR(err);
}
if (active_estab_in.max_delack_ms) {
err = set_rto_min(skops, active_estab_in.max_delack_ms);
if (err)
RET_CG_ERR(err);
}
return CG_OK;
}
static int handle_passive_estab(struct bpf_sock_ops *skops)
{
struct hdr_stg init_stg = {};
struct tcphdr *th;
int err;
inherit_cb_flags = skops->bpf_sock_ops_cb_flags;
err = load_option(skops, &passive_estab_in, true);
if (err == -ENOENT) {
/* saved_syn is not found. It was in syncookie mode.
* We have asked the active side to resend the options
* in ACK, so try to find the bpf_test_option from ACK now.
*/
err = load_option(skops, &passive_estab_in, false);
init_stg.syncookie = true;
}
/* ENOMSG: The bpf_test_option is not found which is fine.
* Bail out now for all other errors.
*/
if (err && err != -ENOMSG)
RET_CG_ERR(err);
th = skops->skb_data;
if (th + 1 > skops->skb_data_end)
RET_CG_ERR(0);
if (th->syn) {
/* Fastopen */
/* Cannot clear cb_flags to stop write_hdr cb.
* synack is not sent yet for fast open.
* Even it was, the synack may need to be retransmitted.
*
* PARSE_ALL_HDR cb flag is set to learn
* if synack has reached the peer.
* All cb_flags will be cleared in handle_parse_hdr().
*/
set_parse_all_hdr_cb_flags(skops);
init_stg.fastopen = true;
} else if (!passive_fin_out.flags) {
/* No options will be written from now */
clear_hdr_cb_flags(skops);
}
if (!skops->sk ||
!bpf_sk_storage_get(&hdr_stg_map, skops->sk, &init_stg,
BPF_SK_STORAGE_GET_F_CREATE))
RET_CG_ERR(0);
if (passive_synack_out.max_delack_ms) {
err = set_delack_max(skops, passive_synack_out.max_delack_ms);
if (err)
RET_CG_ERR(err);
}
if (passive_estab_in.max_delack_ms) {
err = set_rto_min(skops, passive_estab_in.max_delack_ms);
if (err)
RET_CG_ERR(err);
}
return CG_OK;
}
static int handle_parse_hdr(struct bpf_sock_ops *skops)
{
struct hdr_stg *hdr_stg;
struct tcphdr *th;
if (!skops->sk)
RET_CG_ERR(0);
th = skops->skb_data;
if (th + 1 > skops->skb_data_end)
RET_CG_ERR(0);
hdr_stg = bpf_sk_storage_get(&hdr_stg_map, skops->sk, NULL, 0);
if (!hdr_stg)
RET_CG_ERR(0);
if (hdr_stg->resend_syn || hdr_stg->fastopen)
/* The PARSE_ALL_HDR cb flag was turned on
* to ensure that the previously written
* options have reached the peer.
* Those previously written option includes:
* - Active side: resend_syn in ACK during syncookie
* or
* - Passive side: SYNACK during fastopen
*
* A valid packet has been received here after
* the 3WHS, so the PARSE_ALL_HDR cb flag
* can be cleared now.
*/
clear_parse_all_hdr_cb_flags(skops);
if (hdr_stg->resend_syn && !active_fin_out.flags)
/* Active side resent the syn option in ACK
* because the server was in syncookie mode.
* A valid packet has been received, so
* clear header cb flags if there is no
* more option to send.
*/
clear_hdr_cb_flags(skops);
if (hdr_stg->fastopen && !passive_fin_out.flags)
/* Passive side was in fastopen.
* A valid packet has been received, so
* the SYNACK has reached the peer.
* Clear header cb flags if there is no more
* option to send.
*/
clear_hdr_cb_flags(skops);
if (th->fin) {
struct bpf_test_option *fin_opt;
int err;
if (hdr_stg->active)
fin_opt = &active_fin_in;
else
fin_opt = &passive_fin_in;
err = load_option(skops, fin_opt, false);
if (err && err != -ENOMSG)
RET_CG_ERR(err);
}
return CG_OK;
}
SEC("sockops")
int estab(struct bpf_sock_ops *skops)
{
int true_val = 1;
switch (skops->op) {
case BPF_SOCK_OPS_TCP_LISTEN_CB:
bpf_setsockopt(skops, SOL_TCP, TCP_SAVE_SYN,
&true_val, sizeof(true_val));
set_hdr_cb_flags(skops, BPF_SOCK_OPS_STATE_CB_FLAG);
break;
case BPF_SOCK_OPS_TCP_CONNECT_CB:
set_hdr_cb_flags(skops, 0);
break;
case BPF_SOCK_OPS_PARSE_HDR_OPT_CB:
return handle_parse_hdr(skops);
case BPF_SOCK_OPS_HDR_OPT_LEN_CB:
return handle_hdr_opt_len(skops);
case BPF_SOCK_OPS_WRITE_HDR_OPT_CB:
return handle_write_hdr_opt(skops);
case BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB:
return handle_passive_estab(skops);
case BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB:
return handle_active_estab(skops);
}
return CG_OK;
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/test_tcp_hdr_options.c |
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2020 Facebook */
#include <stddef.h>
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include "bpf_misc.h"
__attribute__ ((noinline))
void foo(struct __sk_buff *skb)
{
skb->tc_index = 0;
}
SEC("tc")
__failure __msg("foo() doesn't return scalar")
int global_func7(struct __sk_buff *skb)
{
foo(skb);
return 0;
}
| linux-master | tools/testing/selftests/bpf/progs/test_global_func7.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) 2021. Huawei Technologies Co., Ltd */
#include <linux/types.h>
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
#define STRNCMP_STR_SZ 4096
/* Will be updated by benchmark before program loading */
const volatile unsigned int cmp_str_len = 1;
const char target[STRNCMP_STR_SZ];
long hits = 0;
char str[STRNCMP_STR_SZ];
char _license[] SEC("license") = "GPL";
static __always_inline int local_strncmp(const char *s1, unsigned int sz,
const char *s2)
{
int ret = 0;
unsigned int i;
for (i = 0; i < sz; i++) {
/* E.g. 0xff > 0x31 */
ret = (unsigned char)s1[i] - (unsigned char)s2[i];
if (ret || !s1[i])
break;
}
return ret;
}
SEC("tp/syscalls/sys_enter_getpgid")
int strncmp_no_helper(void *ctx)
{
if (local_strncmp(str, cmp_str_len + 1, target) < 0)
__sync_add_and_fetch(&hits, 1);
return 0;
}
SEC("tp/syscalls/sys_enter_getpgid")
int strncmp_helper(void *ctx)
{
if (bpf_strncmp(str, cmp_str_len + 1, target) < 0)
__sync_add_and_fetch(&hits, 1);
return 0;
}
| linux-master | tools/testing/selftests/bpf/progs/strncmp_bench.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2021 Facebook */
#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
#if __has_attribute(btf_decl_tag)
#define __tag1 __attribute__((btf_decl_tag("tag1")))
#define __tag2 __attribute__((btf_decl_tag("tag2")))
volatile const bool skip_tests __tag1 __tag2 = false;
#else
#define __tag1
#define __tag2
volatile const bool skip_tests = true;
#endif
struct key_t {
int a;
int b __tag1 __tag2;
int c;
} __tag1 __tag2;
typedef struct {
int a;
int b;
} value_t __tag1 __tag2;
struct {
__uint(type, BPF_MAP_TYPE_HASH);
__uint(max_entries, 3);
__type(key, struct key_t);
__type(value, value_t);
} hashmap1 SEC(".maps");
static __noinline int foo(int x __tag1 __tag2) __tag1 __tag2
{
struct key_t key;
value_t val = {};
key.a = key.b = key.c = x;
bpf_map_update_elem(&hashmap1, &key, &val, 0);
return 0;
}
SEC("fentry/bpf_fentry_test1")
int BPF_PROG(sub, int x)
{
return foo(x);
}
| linux-master | tools/testing/selftests/bpf/progs/test_btf_decl_tag.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2021 Facebook */
#include <linux/bpf.h>
#include <time.h>
#include <errno.h>
#include <bpf/bpf_helpers.h>
#include "bpf_tcp_helpers.h"
char _license[] SEC("license") = "GPL";
struct hmap_elem {
int counter;
struct bpf_timer timer;
struct bpf_spin_lock lock; /* unused */
};
struct {
__uint(type, BPF_MAP_TYPE_HASH);
__uint(max_entries, 1000);
__type(key, int);
__type(value, struct hmap_elem);
} hmap SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_HASH);
__uint(map_flags, BPF_F_NO_PREALLOC);
__uint(max_entries, 1000);
__type(key, int);
__type(value, struct hmap_elem);
} hmap_malloc SEC(".maps");
struct elem {
struct bpf_timer t;
};
struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
__uint(max_entries, 2);
__type(key, int);
__type(value, struct elem);
} array SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_LRU_HASH);
__uint(max_entries, 4);
__type(key, int);
__type(value, struct elem);
} lru SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
__uint(max_entries, 1);
__type(key, int);
__type(value, struct elem);
} abs_timer SEC(".maps");
__u64 bss_data;
__u64 abs_data;
__u64 err;
__u64 ok;
__u64 callback_check = 52;
__u64 callback2_check = 52;
#define ARRAY 1
#define HTAB 2
#define HTAB_MALLOC 3
#define LRU 4
/* callback for array and lru timers */
static int timer_cb1(void *map, int *key, struct bpf_timer *timer)
{
/* increment bss variable twice.
* Once via array timer callback and once via lru timer callback
*/
bss_data += 5;
/* *key == 0 - the callback was called for array timer.
* *key == 4 - the callback was called from lru timer.
*/
if (*key == ARRAY) {
struct bpf_timer *lru_timer;
int lru_key = LRU;
/* rearm array timer to be called again in ~35 seconds */
if (bpf_timer_start(timer, 1ull << 35, 0) != 0)
err |= 1;
lru_timer = bpf_map_lookup_elem(&lru, &lru_key);
if (!lru_timer)
return 0;
bpf_timer_set_callback(lru_timer, timer_cb1);
if (bpf_timer_start(lru_timer, 0, 0) != 0)
err |= 2;
} else if (*key == LRU) {
int lru_key, i;
for (i = LRU + 1;
i <= 100 /* for current LRU eviction algorithm this number
* should be larger than ~ lru->max_entries * 2
*/;
i++) {
struct elem init = {};
/* lru_key cannot be used as loop induction variable
* otherwise the loop will be unbounded.
*/
lru_key = i;
/* add more elements into lru map to push out current
* element and force deletion of this timer
*/
bpf_map_update_elem(map, &lru_key, &init, 0);
/* look it up to bump it into active list */
bpf_map_lookup_elem(map, &lru_key);
/* keep adding until *key changes underneath,
* which means that key/timer memory was reused
*/
if (*key != LRU)
break;
}
/* check that the timer was removed */
if (bpf_timer_cancel(timer) != -EINVAL)
err |= 4;
ok |= 1;
}
return 0;
}
SEC("fentry/bpf_fentry_test1")
int BPF_PROG2(test1, int, a)
{
struct bpf_timer *arr_timer, *lru_timer;
struct elem init = {};
int lru_key = LRU;
int array_key = ARRAY;
arr_timer = bpf_map_lookup_elem(&array, &array_key);
if (!arr_timer)
return 0;
bpf_timer_init(arr_timer, &array, CLOCK_MONOTONIC);
bpf_map_update_elem(&lru, &lru_key, &init, 0);
lru_timer = bpf_map_lookup_elem(&lru, &lru_key);
if (!lru_timer)
return 0;
bpf_timer_init(lru_timer, &lru, CLOCK_MONOTONIC);
bpf_timer_set_callback(arr_timer, timer_cb1);
bpf_timer_start(arr_timer, 0 /* call timer_cb1 asap */, 0);
/* init more timers to check that array destruction
* doesn't leak timer memory.
*/
array_key = 0;
arr_timer = bpf_map_lookup_elem(&array, &array_key);
if (!arr_timer)
return 0;
bpf_timer_init(arr_timer, &array, CLOCK_MONOTONIC);
return 0;
}
/* callback for prealloc and non-prealloca hashtab timers */
static int timer_cb2(void *map, int *key, struct hmap_elem *val)
{
if (*key == HTAB)
callback_check--;
else
callback2_check--;
if (val->counter > 0 && --val->counter) {
/* re-arm the timer again to execute after 1 usec */
bpf_timer_start(&val->timer, 1000, 0);
} else if (*key == HTAB) {
struct bpf_timer *arr_timer;
int array_key = ARRAY;
/* cancel arr_timer otherwise bpf_fentry_test1 prog
* will stay alive forever.
*/
arr_timer = bpf_map_lookup_elem(&array, &array_key);
if (!arr_timer)
return 0;
if (bpf_timer_cancel(arr_timer) != 1)
/* bpf_timer_cancel should return 1 to indicate
* that arr_timer was active at this time
*/
err |= 8;
/* try to cancel ourself. It shouldn't deadlock. */
if (bpf_timer_cancel(&val->timer) != -EDEADLK)
err |= 16;
/* delete this key and this timer anyway.
* It shouldn't deadlock either.
*/
bpf_map_delete_elem(map, key);
/* in preallocated hashmap both 'key' and 'val' could have been
* reused to store another map element (like in LRU above),
* but in controlled test environment the below test works.
* It's not a use-after-free. The memory is owned by the map.
*/
if (bpf_timer_start(&val->timer, 1000, 0) != -EINVAL)
err |= 32;
ok |= 2;
} else {
if (*key != HTAB_MALLOC)
err |= 64;
/* try to cancel ourself. It shouldn't deadlock. */
if (bpf_timer_cancel(&val->timer) != -EDEADLK)
err |= 128;
/* delete this key and this timer anyway.
* It shouldn't deadlock either.
*/
bpf_map_delete_elem(map, key);
ok |= 4;
}
return 0;
}
int bpf_timer_test(void)
{
struct hmap_elem *val;
int key = HTAB, key_malloc = HTAB_MALLOC;
val = bpf_map_lookup_elem(&hmap, &key);
if (val) {
if (bpf_timer_init(&val->timer, &hmap, CLOCK_BOOTTIME) != 0)
err |= 512;
bpf_timer_set_callback(&val->timer, timer_cb2);
bpf_timer_start(&val->timer, 1000, 0);
}
val = bpf_map_lookup_elem(&hmap_malloc, &key_malloc);
if (val) {
if (bpf_timer_init(&val->timer, &hmap_malloc, CLOCK_BOOTTIME) != 0)
err |= 1024;
bpf_timer_set_callback(&val->timer, timer_cb2);
bpf_timer_start(&val->timer, 1000, 0);
}
return 0;
}
SEC("fentry/bpf_fentry_test2")
int BPF_PROG2(test2, int, a, int, b)
{
struct hmap_elem init = {}, *val;
int key = HTAB, key_malloc = HTAB_MALLOC;
init.counter = 10; /* number of times to trigger timer_cb2 */
bpf_map_update_elem(&hmap, &key, &init, 0);
val = bpf_map_lookup_elem(&hmap, &key);
if (val)
bpf_timer_init(&val->timer, &hmap, CLOCK_BOOTTIME);
/* update the same key to free the timer */
bpf_map_update_elem(&hmap, &key, &init, 0);
bpf_map_update_elem(&hmap_malloc, &key_malloc, &init, 0);
val = bpf_map_lookup_elem(&hmap_malloc, &key_malloc);
if (val)
bpf_timer_init(&val->timer, &hmap_malloc, CLOCK_BOOTTIME);
/* update the same key to free the timer */
bpf_map_update_elem(&hmap_malloc, &key_malloc, &init, 0);
/* init more timers to check that htab operations
* don't leak timer memory.
*/
key = 0;
bpf_map_update_elem(&hmap, &key, &init, 0);
val = bpf_map_lookup_elem(&hmap, &key);
if (val)
bpf_timer_init(&val->timer, &hmap, CLOCK_BOOTTIME);
bpf_map_delete_elem(&hmap, &key);
bpf_map_update_elem(&hmap, &key, &init, 0);
val = bpf_map_lookup_elem(&hmap, &key);
if (val)
bpf_timer_init(&val->timer, &hmap, CLOCK_BOOTTIME);
/* and with non-prealloc htab */
key_malloc = 0;
bpf_map_update_elem(&hmap_malloc, &key_malloc, &init, 0);
val = bpf_map_lookup_elem(&hmap_malloc, &key_malloc);
if (val)
bpf_timer_init(&val->timer, &hmap_malloc, CLOCK_BOOTTIME);
bpf_map_delete_elem(&hmap_malloc, &key_malloc);
bpf_map_update_elem(&hmap_malloc, &key_malloc, &init, 0);
val = bpf_map_lookup_elem(&hmap_malloc, &key_malloc);
if (val)
bpf_timer_init(&val->timer, &hmap_malloc, CLOCK_BOOTTIME);
return bpf_timer_test();
}
/* callback for absolute timer */
static int timer_cb3(void *map, int *key, struct bpf_timer *timer)
{
abs_data += 6;
if (abs_data < 12) {
bpf_timer_start(timer, bpf_ktime_get_boot_ns() + 1000,
BPF_F_TIMER_ABS);
} else {
/* Re-arm timer ~35 seconds in future */
bpf_timer_start(timer, bpf_ktime_get_boot_ns() + (1ull << 35),
BPF_F_TIMER_ABS);
}
return 0;
}
SEC("fentry/bpf_fentry_test3")
int BPF_PROG2(test3, int, a)
{
int key = 0;
struct bpf_timer *timer;
bpf_printk("test3");
timer = bpf_map_lookup_elem(&abs_timer, &key);
if (timer) {
if (bpf_timer_init(timer, &abs_timer, CLOCK_BOOTTIME) != 0)
err |= 2048;
bpf_timer_set_callback(timer, timer_cb3);
bpf_timer_start(timer, bpf_ktime_get_boot_ns() + 1000,
BPF_F_TIMER_ABS);
}
return 0;
}
| linux-master | tools/testing/selftests/bpf/progs/timer.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
#include "bpf_iter.h"
#include <bpf/bpf_helpers.h>
char _license[] SEC("license") = "GPL";
__u32 unique_tgid_cnt = 0;
uintptr_t address = 0;
uintptr_t offset = 0;
__u32 last_tgid = 0;
__u32 pid = 0;
__u32 page_shift = 0;
SEC("iter/task_vma")
int get_vma_offset(struct bpf_iter__task_vma *ctx)
{
struct vm_area_struct *vma = ctx->vma;
struct seq_file *seq = ctx->meta->seq;
struct task_struct *task = ctx->task;
if (task == NULL || vma == NULL)
return 0;
if (last_tgid != task->tgid)
unique_tgid_cnt++;
last_tgid = task->tgid;
if (task->tgid != pid)
return 0;
if (vma->vm_start <= address && vma->vm_end > address) {
offset = address - vma->vm_start + (vma->vm_pgoff << page_shift);
BPF_SEQ_PRINTF(seq, "OK\n");
}
return 0;
}
| linux-master | tools/testing/selftests/bpf/progs/bpf_iter_vma_offset.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright 2020 Google LLC.
*/
#include "vmlinux.h"
#include <errno.h>
#include <bpf/bpf_core_read.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
#include "bpf_misc.h"
struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
__uint(max_entries, 1);
__type(key, __u32);
__type(value, __u64);
} array SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_HASH);
__uint(max_entries, 1);
__type(key, __u32);
__type(value, __u64);
} hash SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_LRU_HASH);
__uint(max_entries, 1);
__type(key, __u32);
__type(value, __u64);
} lru_hash SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
__uint(max_entries, 1);
__type(key, __u32);
__type(value, __u64);
} percpu_array SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_PERCPU_HASH);
__uint(max_entries, 1);
__type(key, __u32);
__type(value, __u64);
} percpu_hash SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_LRU_PERCPU_HASH);
__uint(max_entries, 1);
__type(key, __u32);
__type(value, __u64);
} lru_percpu_hash SEC(".maps");
struct inner_map {
__uint(type, BPF_MAP_TYPE_ARRAY);
__uint(max_entries, 1);
__type(key, int);
__type(value, __u64);
} inner_map SEC(".maps");
struct outer_arr {
__uint(type, BPF_MAP_TYPE_ARRAY_OF_MAPS);
__uint(max_entries, 1);
__uint(key_size, sizeof(int));
__uint(value_size, sizeof(int));
__array(values, struct inner_map);
} outer_arr SEC(".maps") = {
.values = { [0] = &inner_map },
};
struct outer_hash {
__uint(type, BPF_MAP_TYPE_HASH_OF_MAPS);
__uint(max_entries, 1);
__uint(key_size, sizeof(int));
__array(values, struct inner_map);
} outer_hash SEC(".maps") = {
.values = { [0] = &inner_map },
};
char _license[] SEC("license") = "GPL";
int monitored_pid = 0;
int mprotect_count = 0;
int bprm_count = 0;
SEC("lsm/file_mprotect")
int BPF_PROG(test_int_hook, struct vm_area_struct *vma,
unsigned long reqprot, unsigned long prot, int ret)
{
if (ret != 0)
return ret;
__u32 pid = bpf_get_current_pid_tgid() >> 32;
int is_stack = 0;
is_stack = (vma->vm_start <= vma->vm_mm->start_stack &&
vma->vm_end >= vma->vm_mm->start_stack);
if (is_stack && monitored_pid == pid) {
mprotect_count++;
ret = -EPERM;
}
return ret;
}
SEC("lsm.s/bprm_committed_creds")
int BPF_PROG(test_void_hook, struct linux_binprm *bprm)
{
__u32 pid = bpf_get_current_pid_tgid() >> 32;
struct inner_map *inner_map;
char args[64];
__u32 key = 0;
__u64 *value;
if (monitored_pid == pid)
bprm_count++;
bpf_copy_from_user(args, sizeof(args), (void *)bprm->vma->vm_mm->arg_start);
bpf_copy_from_user(args, sizeof(args), (void *)bprm->mm->arg_start);
value = bpf_map_lookup_elem(&array, &key);
if (value)
*value = 0;
value = bpf_map_lookup_elem(&hash, &key);
if (value)
*value = 0;
value = bpf_map_lookup_elem(&lru_hash, &key);
if (value)
*value = 0;
value = bpf_map_lookup_elem(&percpu_array, &key);
if (value)
*value = 0;
value = bpf_map_lookup_elem(&percpu_hash, &key);
if (value)
*value = 0;
value = bpf_map_lookup_elem(&lru_percpu_hash, &key);
if (value)
*value = 0;
inner_map = bpf_map_lookup_elem(&outer_arr, &key);
if (inner_map) {
value = bpf_map_lookup_elem(inner_map, &key);
if (value)
*value = 0;
}
inner_map = bpf_map_lookup_elem(&outer_hash, &key);
if (inner_map) {
value = bpf_map_lookup_elem(inner_map, &key);
if (value)
*value = 0;
}
return 0;
}
SEC("lsm/task_free") /* lsm/ is ok, lsm.s/ fails */
int BPF_PROG(test_task_free, struct task_struct *task)
{
return 0;
}
int copy_test = 0;
SEC("fentry.s/" SYS_PREFIX "sys_setdomainname")
int BPF_PROG(test_sys_setdomainname, struct pt_regs *regs)
{
void *ptr = (void *)PT_REGS_PARM1_SYSCALL(regs);
int len = PT_REGS_PARM2_SYSCALL(regs);
int buf = 0;
long ret;
ret = bpf_copy_from_user(&buf, sizeof(buf), ptr);
if (len == -2 && ret == 0 && buf == 1234)
copy_test++;
if (len == -3 && ret == -EFAULT)
copy_test++;
if (len == -4 && ret == -EFAULT)
copy_test++;
return 0;
}
| linux-master | tools/testing/selftests/bpf/progs/lsm.c |
// SPDX-License-Identifier: GPL-2.0
/* Converted from tools/testing/selftests/bpf/verifier/bounds.c */
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include "bpf_misc.h"
struct {
__uint(type, BPF_MAP_TYPE_HASH);
__uint(max_entries, 1);
__type(key, long long);
__type(value, long long);
} map_hash_8b SEC(".maps");
SEC("socket")
__description("subtraction bounds (map value) variant 1")
__failure __msg("R0 max value is outside of the allowed memory range")
__failure_unpriv
__naked void bounds_map_value_variant_1(void)
{
asm volatile (" \
r1 = 0; \
*(u64*)(r10 - 8) = r1; \
r2 = r10; \
r2 += -8; \
r1 = %[map_hash_8b] ll; \
call %[bpf_map_lookup_elem]; \
if r0 == 0 goto l0_%=; \
r1 = *(u8*)(r0 + 0); \
if r1 > 0xff goto l0_%=; \
r3 = *(u8*)(r0 + 1); \
if r3 > 0xff goto l0_%=; \
r1 -= r3; \
r1 >>= 56; \
r0 += r1; \
r0 = *(u8*)(r0 + 0); \
exit; \
l0_%=: r0 = 0; \
exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm_addr(map_hash_8b)
: __clobber_all);
}
SEC("socket")
__description("subtraction bounds (map value) variant 2")
__failure
__msg("R0 min value is negative, either use unsigned index or do a if (index >=0) check.")
__msg_unpriv("R1 has unknown scalar with mixed signed bounds")
__naked void bounds_map_value_variant_2(void)
{
asm volatile (" \
r1 = 0; \
*(u64*)(r10 - 8) = r1; \
r2 = r10; \
r2 += -8; \
r1 = %[map_hash_8b] ll; \
call %[bpf_map_lookup_elem]; \
if r0 == 0 goto l0_%=; \
r1 = *(u8*)(r0 + 0); \
if r1 > 0xff goto l0_%=; \
r3 = *(u8*)(r0 + 1); \
if r3 > 0xff goto l0_%=; \
r1 -= r3; \
r0 += r1; \
r0 = *(u8*)(r0 + 0); \
exit; \
l0_%=: r0 = 0; \
exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm_addr(map_hash_8b)
: __clobber_all);
}
SEC("socket")
__description("check subtraction on pointers for unpriv")
__success __failure_unpriv __msg_unpriv("R9 pointer -= pointer prohibited")
__retval(0)
__naked void subtraction_on_pointers_for_unpriv(void)
{
asm volatile (" \
r0 = 0; \
r1 = %[map_hash_8b] ll; \
r2 = r10; \
r2 += -8; \
r6 = 9; \
*(u64*)(r2 + 0) = r6; \
call %[bpf_map_lookup_elem]; \
r9 = r10; \
r9 -= r0; \
r1 = %[map_hash_8b] ll; \
r2 = r10; \
r2 += -8; \
r6 = 0; \
*(u64*)(r2 + 0) = r6; \
call %[bpf_map_lookup_elem]; \
if r0 != 0 goto l0_%=; \
exit; \
l0_%=: *(u64*)(r0 + 0) = r9; \
r0 = 0; \
exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm_addr(map_hash_8b)
: __clobber_all);
}
SEC("socket")
__description("bounds check based on zero-extended MOV")
__success __success_unpriv __retval(0)
__naked void based_on_zero_extended_mov(void)
{
asm volatile (" \
r1 = 0; \
*(u64*)(r10 - 8) = r1; \
r2 = r10; \
r2 += -8; \
r1 = %[map_hash_8b] ll; \
call %[bpf_map_lookup_elem]; \
if r0 == 0 goto l0_%=; \
/* r2 = 0x0000'0000'ffff'ffff */ \
w2 = 0xffffffff; \
/* r2 = 0 */ \
r2 >>= 32; \
/* no-op */ \
r0 += r2; \
/* access at offset 0 */ \
r0 = *(u8*)(r0 + 0); \
l0_%=: /* exit */ \
r0 = 0; \
exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm_addr(map_hash_8b)
: __clobber_all);
}
SEC("socket")
__description("bounds check based on sign-extended MOV. test1")
__failure __msg("map_value pointer and 4294967295")
__failure_unpriv
__naked void on_sign_extended_mov_test1(void)
{
asm volatile (" \
r1 = 0; \
*(u64*)(r10 - 8) = r1; \
r2 = r10; \
r2 += -8; \
r1 = %[map_hash_8b] ll; \
call %[bpf_map_lookup_elem]; \
if r0 == 0 goto l0_%=; \
/* r2 = 0xffff'ffff'ffff'ffff */ \
r2 = 0xffffffff; \
/* r2 = 0xffff'ffff */ \
r2 >>= 32; \
/* r0 = <oob pointer> */ \
r0 += r2; \
/* access to OOB pointer */ \
r0 = *(u8*)(r0 + 0); \
l0_%=: /* exit */ \
r0 = 0; \
exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm_addr(map_hash_8b)
: __clobber_all);
}
SEC("socket")
__description("bounds check based on sign-extended MOV. test2")
__failure __msg("R0 min value is outside of the allowed memory range")
__failure_unpriv
__naked void on_sign_extended_mov_test2(void)
{
asm volatile (" \
r1 = 0; \
*(u64*)(r10 - 8) = r1; \
r2 = r10; \
r2 += -8; \
r1 = %[map_hash_8b] ll; \
call %[bpf_map_lookup_elem]; \
if r0 == 0 goto l0_%=; \
/* r2 = 0xffff'ffff'ffff'ffff */ \
r2 = 0xffffffff; \
/* r2 = 0xfff'ffff */ \
r2 >>= 36; \
/* r0 = <oob pointer> */ \
r0 += r2; \
/* access to OOB pointer */ \
r0 = *(u8*)(r0 + 0); \
l0_%=: /* exit */ \
r0 = 0; \
exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm_addr(map_hash_8b)
: __clobber_all);
}
SEC("tc")
__description("bounds check based on reg_off + var_off + insn_off. test1")
__failure __msg("value_size=8 off=1073741825")
__naked void var_off_insn_off_test1(void)
{
asm volatile (" \
r6 = *(u32*)(r1 + %[__sk_buff_mark]); \
r1 = 0; \
*(u64*)(r10 - 8) = r1; \
r2 = r10; \
r2 += -8; \
r1 = %[map_hash_8b] ll; \
call %[bpf_map_lookup_elem]; \
if r0 == 0 goto l0_%=; \
r6 &= 1; \
r6 += %[__imm_0]; \
r0 += r6; \
r0 += %[__imm_0]; \
l0_%=: r0 = *(u8*)(r0 + 3); \
r0 = 0; \
exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm_addr(map_hash_8b),
__imm_const(__imm_0, (1 << 29) - 1),
__imm_const(__sk_buff_mark, offsetof(struct __sk_buff, mark))
: __clobber_all);
}
SEC("tc")
__description("bounds check based on reg_off + var_off + insn_off. test2")
__failure __msg("value 1073741823")
__naked void var_off_insn_off_test2(void)
{
asm volatile (" \
r6 = *(u32*)(r1 + %[__sk_buff_mark]); \
r1 = 0; \
*(u64*)(r10 - 8) = r1; \
r2 = r10; \
r2 += -8; \
r1 = %[map_hash_8b] ll; \
call %[bpf_map_lookup_elem]; \
if r0 == 0 goto l0_%=; \
r6 &= 1; \
r6 += %[__imm_0]; \
r0 += r6; \
r0 += %[__imm_1]; \
l0_%=: r0 = *(u8*)(r0 + 3); \
r0 = 0; \
exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm_addr(map_hash_8b),
__imm_const(__imm_0, (1 << 30) - 1),
__imm_const(__imm_1, (1 << 29) - 1),
__imm_const(__sk_buff_mark, offsetof(struct __sk_buff, mark))
: __clobber_all);
}
SEC("socket")
__description("bounds check after truncation of non-boundary-crossing range")
__success __success_unpriv __retval(0)
__naked void of_non_boundary_crossing_range(void)
{
asm volatile (" \
r1 = 0; \
*(u64*)(r10 - 8) = r1; \
r2 = r10; \
r2 += -8; \
r1 = %[map_hash_8b] ll; \
call %[bpf_map_lookup_elem]; \
if r0 == 0 goto l0_%=; \
/* r1 = [0x00, 0xff] */ \
r1 = *(u8*)(r0 + 0); \
r2 = 1; \
/* r2 = 0x10'0000'0000 */ \
r2 <<= 36; \
/* r1 = [0x10'0000'0000, 0x10'0000'00ff] */ \
r1 += r2; \
/* r1 = [0x10'7fff'ffff, 0x10'8000'00fe] */ \
r1 += 0x7fffffff; \
/* r1 = [0x00, 0xff] */ \
w1 -= 0x7fffffff; \
/* r1 = 0 */ \
r1 >>= 8; \
/* no-op */ \
r0 += r1; \
/* access at offset 0 */ \
r0 = *(u8*)(r0 + 0); \
l0_%=: /* exit */ \
r0 = 0; \
exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm_addr(map_hash_8b)
: __clobber_all);
}
SEC("socket")
__description("bounds check after truncation of boundary-crossing range (1)")
__failure
/* not actually fully unbounded, but the bound is very high */
__msg("value -4294967168 makes map_value pointer be out of bounds")
__failure_unpriv
__naked void of_boundary_crossing_range_1(void)
{
asm volatile (" \
r1 = 0; \
*(u64*)(r10 - 8) = r1; \
r2 = r10; \
r2 += -8; \
r1 = %[map_hash_8b] ll; \
call %[bpf_map_lookup_elem]; \
if r0 == 0 goto l0_%=; \
/* r1 = [0x00, 0xff] */ \
r1 = *(u8*)(r0 + 0); \
r1 += %[__imm_0]; \
/* r1 = [0xffff'ff80, 0x1'0000'007f] */ \
r1 += %[__imm_0]; \
/* r1 = [0xffff'ff80, 0xffff'ffff] or \
* [0x0000'0000, 0x0000'007f] \
*/ \
w1 += 0; \
r1 -= %[__imm_0]; \
/* r1 = [0x00, 0xff] or \
* [0xffff'ffff'0000'0080, 0xffff'ffff'ffff'ffff]\
*/ \
r1 -= %[__imm_0]; \
/* error on OOB pointer computation */ \
r0 += r1; \
/* exit */ \
r0 = 0; \
l0_%=: exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm_addr(map_hash_8b),
__imm_const(__imm_0, 0xffffff80 >> 1)
: __clobber_all);
}
SEC("socket")
__description("bounds check after truncation of boundary-crossing range (2)")
__failure __msg("value -4294967168 makes map_value pointer be out of bounds")
__failure_unpriv
__naked void of_boundary_crossing_range_2(void)
{
asm volatile (" \
r1 = 0; \
*(u64*)(r10 - 8) = r1; \
r2 = r10; \
r2 += -8; \
r1 = %[map_hash_8b] ll; \
call %[bpf_map_lookup_elem]; \
if r0 == 0 goto l0_%=; \
/* r1 = [0x00, 0xff] */ \
r1 = *(u8*)(r0 + 0); \
r1 += %[__imm_0]; \
/* r1 = [0xffff'ff80, 0x1'0000'007f] */ \
r1 += %[__imm_0]; \
/* r1 = [0xffff'ff80, 0xffff'ffff] or \
* [0x0000'0000, 0x0000'007f] \
* difference to previous test: truncation via MOV32\
* instead of ALU32. \
*/ \
w1 = w1; \
r1 -= %[__imm_0]; \
/* r1 = [0x00, 0xff] or \
* [0xffff'ffff'0000'0080, 0xffff'ffff'ffff'ffff]\
*/ \
r1 -= %[__imm_0]; \
/* error on OOB pointer computation */ \
r0 += r1; \
/* exit */ \
r0 = 0; \
l0_%=: exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm_addr(map_hash_8b),
__imm_const(__imm_0, 0xffffff80 >> 1)
: __clobber_all);
}
SEC("socket")
__description("bounds check after wrapping 32-bit addition")
__success __success_unpriv __retval(0)
__naked void after_wrapping_32_bit_addition(void)
{
asm volatile (" \
r1 = 0; \
*(u64*)(r10 - 8) = r1; \
r2 = r10; \
r2 += -8; \
r1 = %[map_hash_8b] ll; \
call %[bpf_map_lookup_elem]; \
if r0 == 0 goto l0_%=; \
/* r1 = 0x7fff'ffff */ \
r1 = 0x7fffffff; \
/* r1 = 0xffff'fffe */ \
r1 += 0x7fffffff; \
/* r1 = 0 */ \
w1 += 2; \
/* no-op */ \
r0 += r1; \
/* access at offset 0 */ \
r0 = *(u8*)(r0 + 0); \
l0_%=: /* exit */ \
r0 = 0; \
exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm_addr(map_hash_8b)
: __clobber_all);
}
SEC("socket")
__description("bounds check after shift with oversized count operand")
__failure __msg("R0 max value is outside of the allowed memory range")
__failure_unpriv
__naked void shift_with_oversized_count_operand(void)
{
asm volatile (" \
r1 = 0; \
*(u64*)(r10 - 8) = r1; \
r2 = r10; \
r2 += -8; \
r1 = %[map_hash_8b] ll; \
call %[bpf_map_lookup_elem]; \
if r0 == 0 goto l0_%=; \
r2 = 32; \
r1 = 1; \
/* r1 = (u32)1 << (u32)32 = ? */ \
w1 <<= w2; \
/* r1 = [0x0000, 0xffff] */ \
r1 &= 0xffff; \
/* computes unknown pointer, potentially OOB */ \
r0 += r1; \
/* potentially OOB access */ \
r0 = *(u8*)(r0 + 0); \
l0_%=: /* exit */ \
r0 = 0; \
exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm_addr(map_hash_8b)
: __clobber_all);
}
SEC("socket")
__description("bounds check after right shift of maybe-negative number")
__failure __msg("R0 unbounded memory access")
__failure_unpriv
__naked void shift_of_maybe_negative_number(void)
{
asm volatile (" \
r1 = 0; \
*(u64*)(r10 - 8) = r1; \
r2 = r10; \
r2 += -8; \
r1 = %[map_hash_8b] ll; \
call %[bpf_map_lookup_elem]; \
if r0 == 0 goto l0_%=; \
/* r1 = [0x00, 0xff] */ \
r1 = *(u8*)(r0 + 0); \
/* r1 = [-0x01, 0xfe] */ \
r1 -= 1; \
/* r1 = 0 or 0xff'ffff'ffff'ffff */ \
r1 >>= 8; \
/* r1 = 0 or 0xffff'ffff'ffff */ \
r1 >>= 8; \
/* computes unknown pointer, potentially OOB */ \
r0 += r1; \
/* potentially OOB access */ \
r0 = *(u8*)(r0 + 0); \
l0_%=: /* exit */ \
r0 = 0; \
exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm_addr(map_hash_8b)
: __clobber_all);
}
SEC("socket")
__description("bounds check after 32-bit right shift with 64-bit input")
__failure __msg("math between map_value pointer and 4294967294 is not allowed")
__failure_unpriv
__naked void shift_with_64_bit_input(void)
{
asm volatile (" \
r1 = 0; \
*(u64*)(r10 - 8) = r1; \
r2 = r10; \
r2 += -8; \
r1 = %[map_hash_8b] ll; \
call %[bpf_map_lookup_elem]; \
if r0 == 0 goto l0_%=; \
r1 = 2; \
/* r1 = 1<<32 */ \
r1 <<= 31; \
/* r1 = 0 (NOT 2!) */ \
w1 >>= 31; \
/* r1 = 0xffff'fffe (NOT 0!) */ \
w1 -= 2; \
/* error on computing OOB pointer */ \
r0 += r1; \
/* exit */ \
r0 = 0; \
l0_%=: exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm_addr(map_hash_8b)
: __clobber_all);
}
SEC("socket")
__description("bounds check map access with off+size signed 32bit overflow. test1")
__failure __msg("map_value pointer and 2147483646")
__failure_unpriv
__naked void size_signed_32bit_overflow_test1(void)
{
asm volatile (" \
r1 = 0; \
*(u64*)(r10 - 8) = r1; \
r2 = r10; \
r2 += -8; \
r1 = %[map_hash_8b] ll; \
call %[bpf_map_lookup_elem]; \
if r0 != 0 goto l0_%=; \
exit; \
l0_%=: r0 += 0x7ffffffe; \
r0 = *(u64*)(r0 + 0); \
goto l1_%=; \
l1_%=: exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm_addr(map_hash_8b)
: __clobber_all);
}
SEC("socket")
__description("bounds check map access with off+size signed 32bit overflow. test2")
__failure __msg("pointer offset 1073741822")
__msg_unpriv("R0 pointer arithmetic of map value goes out of range")
__naked void size_signed_32bit_overflow_test2(void)
{
asm volatile (" \
r1 = 0; \
*(u64*)(r10 - 8) = r1; \
r2 = r10; \
r2 += -8; \
r1 = %[map_hash_8b] ll; \
call %[bpf_map_lookup_elem]; \
if r0 != 0 goto l0_%=; \
exit; \
l0_%=: r0 += 0x1fffffff; \
r0 += 0x1fffffff; \
r0 += 0x1fffffff; \
r0 = *(u64*)(r0 + 0); \
goto l1_%=; \
l1_%=: exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm_addr(map_hash_8b)
: __clobber_all);
}
SEC("socket")
__description("bounds check map access with off+size signed 32bit overflow. test3")
__failure __msg("pointer offset -1073741822")
__msg_unpriv("R0 pointer arithmetic of map value goes out of range")
__naked void size_signed_32bit_overflow_test3(void)
{
asm volatile (" \
r1 = 0; \
*(u64*)(r10 - 8) = r1; \
r2 = r10; \
r2 += -8; \
r1 = %[map_hash_8b] ll; \
call %[bpf_map_lookup_elem]; \
if r0 != 0 goto l0_%=; \
exit; \
l0_%=: r0 -= 0x1fffffff; \
r0 -= 0x1fffffff; \
r0 = *(u64*)(r0 + 2); \
goto l1_%=; \
l1_%=: exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm_addr(map_hash_8b)
: __clobber_all);
}
SEC("socket")
__description("bounds check map access with off+size signed 32bit overflow. test4")
__failure __msg("map_value pointer and 1000000000000")
__failure_unpriv
__naked void size_signed_32bit_overflow_test4(void)
{
asm volatile (" \
r1 = 0; \
*(u64*)(r10 - 8) = r1; \
r2 = r10; \
r2 += -8; \
r1 = %[map_hash_8b] ll; \
call %[bpf_map_lookup_elem]; \
if r0 != 0 goto l0_%=; \
exit; \
l0_%=: r1 = 1000000; \
r1 *= 1000000; \
r0 += r1; \
r0 = *(u64*)(r0 + 2); \
goto l1_%=; \
l1_%=: exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm_addr(map_hash_8b)
: __clobber_all);
}
SEC("socket")
__description("bounds check mixed 32bit and 64bit arithmetic. test1")
__success __failure_unpriv __msg_unpriv("R0 invalid mem access 'scalar'")
__retval(0)
__naked void _32bit_and_64bit_arithmetic_test1(void)
{
asm volatile (" \
r0 = 0; \
r1 = -1; \
r1 <<= 32; \
r1 += 1; \
/* r1 = 0xffffFFFF00000001 */ \
if w1 > 1 goto l0_%=; \
/* check ALU64 op keeps 32bit bounds */ \
r1 += 1; \
if w1 > 2 goto l0_%=; \
goto l1_%=; \
l0_%=: /* invalid ldx if bounds are lost above */ \
r0 = *(u64*)(r0 - 1); \
l1_%=: exit; \
" ::: __clobber_all);
}
SEC("socket")
__description("bounds check mixed 32bit and 64bit arithmetic. test2")
__success __failure_unpriv __msg_unpriv("R0 invalid mem access 'scalar'")
__retval(0)
__naked void _32bit_and_64bit_arithmetic_test2(void)
{
asm volatile (" \
r0 = 0; \
r1 = -1; \
r1 <<= 32; \
r1 += 1; \
/* r1 = 0xffffFFFF00000001 */ \
r2 = 3; \
/* r1 = 0x2 */ \
w1 += 1; \
/* check ALU32 op zero extends 64bit bounds */ \
if r1 > r2 goto l0_%=; \
goto l1_%=; \
l0_%=: /* invalid ldx if bounds are lost above */ \
r0 = *(u64*)(r0 - 1); \
l1_%=: exit; \
" ::: __clobber_all);
}
SEC("tc")
__description("assigning 32bit bounds to 64bit for wA = 0, wB = wA")
__success __retval(0) __flag(BPF_F_ANY_ALIGNMENT)
__naked void for_wa_0_wb_wa(void)
{
asm volatile (" \
r8 = *(u32*)(r1 + %[__sk_buff_data_end]); \
r7 = *(u32*)(r1 + %[__sk_buff_data]); \
w9 = 0; \
w2 = w9; \
r6 = r7; \
r6 += r2; \
r3 = r6; \
r3 += 8; \
if r3 > r8 goto l0_%=; \
r5 = *(u32*)(r6 + 0); \
l0_%=: r0 = 0; \
exit; \
" :
: __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
__imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
: __clobber_all);
}
SEC("socket")
__description("bounds check for reg = 0, reg xor 1")
__success __failure_unpriv
__msg_unpriv("R0 min value is outside of the allowed memory range")
__retval(0)
__naked void reg_0_reg_xor_1(void)
{
asm volatile (" \
r1 = 0; \
*(u64*)(r10 - 8) = r1; \
r2 = r10; \
r2 += -8; \
r1 = %[map_hash_8b] ll; \
call %[bpf_map_lookup_elem]; \
if r0 != 0 goto l0_%=; \
exit; \
l0_%=: r1 = 0; \
r1 ^= 1; \
if r1 != 0 goto l1_%=; \
r0 = *(u64*)(r0 + 8); \
l1_%=: r0 = 0; \
exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm_addr(map_hash_8b)
: __clobber_all);
}
SEC("socket")
__description("bounds check for reg32 = 0, reg32 xor 1")
__success __failure_unpriv
__msg_unpriv("R0 min value is outside of the allowed memory range")
__retval(0)
__naked void reg32_0_reg32_xor_1(void)
{
asm volatile (" \
r1 = 0; \
*(u64*)(r10 - 8) = r1; \
r2 = r10; \
r2 += -8; \
r1 = %[map_hash_8b] ll; \
call %[bpf_map_lookup_elem]; \
if r0 != 0 goto l0_%=; \
exit; \
l0_%=: w1 = 0; \
w1 ^= 1; \
if w1 != 0 goto l1_%=; \
r0 = *(u64*)(r0 + 8); \
l1_%=: r0 = 0; \
exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm_addr(map_hash_8b)
: __clobber_all);
}
SEC("socket")
__description("bounds check for reg = 2, reg xor 3")
__success __failure_unpriv
__msg_unpriv("R0 min value is outside of the allowed memory range")
__retval(0)
__naked void reg_2_reg_xor_3(void)
{
asm volatile (" \
r1 = 0; \
*(u64*)(r10 - 8) = r1; \
r2 = r10; \
r2 += -8; \
r1 = %[map_hash_8b] ll; \
call %[bpf_map_lookup_elem]; \
if r0 != 0 goto l0_%=; \
exit; \
l0_%=: r1 = 2; \
r1 ^= 3; \
if r1 > 0 goto l1_%=; \
r0 = *(u64*)(r0 + 8); \
l1_%=: r0 = 0; \
exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm_addr(map_hash_8b)
: __clobber_all);
}
SEC("socket")
__description("bounds check for reg = any, reg xor 3")
__failure __msg("invalid access to map value")
__msg_unpriv("invalid access to map value")
__naked void reg_any_reg_xor_3(void)
{
asm volatile (" \
r1 = 0; \
*(u64*)(r10 - 8) = r1; \
r2 = r10; \
r2 += -8; \
r1 = %[map_hash_8b] ll; \
call %[bpf_map_lookup_elem]; \
if r0 != 0 goto l0_%=; \
exit; \
l0_%=: r1 = *(u64*)(r0 + 0); \
r1 ^= 3; \
if r1 != 0 goto l1_%=; \
r0 = *(u64*)(r0 + 8); \
l1_%=: r0 = 0; \
exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm_addr(map_hash_8b)
: __clobber_all);
}
SEC("socket")
__description("bounds check for reg32 = any, reg32 xor 3")
__failure __msg("invalid access to map value")
__msg_unpriv("invalid access to map value")
__naked void reg32_any_reg32_xor_3(void)
{
asm volatile (" \
r1 = 0; \
*(u64*)(r10 - 8) = r1; \
r2 = r10; \
r2 += -8; \
r1 = %[map_hash_8b] ll; \
call %[bpf_map_lookup_elem]; \
if r0 != 0 goto l0_%=; \
exit; \
l0_%=: r1 = *(u64*)(r0 + 0); \
w1 ^= 3; \
if w1 != 0 goto l1_%=; \
r0 = *(u64*)(r0 + 8); \
l1_%=: r0 = 0; \
exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm_addr(map_hash_8b)
: __clobber_all);
}
SEC("socket")
__description("bounds check for reg > 0, reg xor 3")
__success __failure_unpriv
__msg_unpriv("R0 min value is outside of the allowed memory range")
__retval(0)
__naked void reg_0_reg_xor_3(void)
{
asm volatile (" \
r1 = 0; \
*(u64*)(r10 - 8) = r1; \
r2 = r10; \
r2 += -8; \
r1 = %[map_hash_8b] ll; \
call %[bpf_map_lookup_elem]; \
if r0 != 0 goto l0_%=; \
exit; \
l0_%=: r1 = *(u64*)(r0 + 0); \
if r1 <= 0 goto l1_%=; \
r1 ^= 3; \
if r1 >= 0 goto l1_%=; \
r0 = *(u64*)(r0 + 8); \
l1_%=: r0 = 0; \
exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm_addr(map_hash_8b)
: __clobber_all);
}
SEC("socket")
__description("bounds check for reg32 > 0, reg32 xor 3")
__success __failure_unpriv
__msg_unpriv("R0 min value is outside of the allowed memory range")
__retval(0)
__naked void reg32_0_reg32_xor_3(void)
{
asm volatile (" \
r1 = 0; \
*(u64*)(r10 - 8) = r1; \
r2 = r10; \
r2 += -8; \
r1 = %[map_hash_8b] ll; \
call %[bpf_map_lookup_elem]; \
if r0 != 0 goto l0_%=; \
exit; \
l0_%=: r1 = *(u64*)(r0 + 0); \
if w1 <= 0 goto l1_%=; \
w1 ^= 3; \
if w1 >= 0 goto l1_%=; \
r0 = *(u64*)(r0 + 8); \
l1_%=: r0 = 0; \
exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm_addr(map_hash_8b)
: __clobber_all);
}
SEC("socket")
__description("bounds checks after 32-bit truncation. test 1")
__success __failure_unpriv __msg_unpriv("R0 leaks addr")
__retval(0)
__naked void _32_bit_truncation_test_1(void)
{
asm volatile (" \
r1 = 0; \
*(u64*)(r10 - 8) = r1; \
r2 = r10; \
r2 += -8; \
r1 = %[map_hash_8b] ll; \
call %[bpf_map_lookup_elem]; \
if r0 == 0 goto l0_%=; \
r1 = *(u32*)(r0 + 0); \
/* This used to reduce the max bound to 0x7fffffff */\
if r1 == 0 goto l1_%=; \
if r1 > 0x7fffffff goto l0_%=; \
l1_%=: r0 = 0; \
l0_%=: exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm_addr(map_hash_8b)
: __clobber_all);
}
SEC("socket")
__description("bounds checks after 32-bit truncation. test 2")
__success __failure_unpriv __msg_unpriv("R0 leaks addr")
__retval(0)
__naked void _32_bit_truncation_test_2(void)
{
asm volatile (" \
r1 = 0; \
*(u64*)(r10 - 8) = r1; \
r2 = r10; \
r2 += -8; \
r1 = %[map_hash_8b] ll; \
call %[bpf_map_lookup_elem]; \
if r0 == 0 goto l0_%=; \
r1 = *(u32*)(r0 + 0); \
if r1 s< 1 goto l1_%=; \
if w1 s< 0 goto l0_%=; \
l1_%=: r0 = 0; \
l0_%=: exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm_addr(map_hash_8b)
: __clobber_all);
}
SEC("xdp")
__description("bound check with JMP_JLT for crossing 64-bit signed boundary")
__success __retval(0)
__naked void crossing_64_bit_signed_boundary_1(void)
{
asm volatile (" \
r2 = *(u32*)(r1 + %[xdp_md_data]); \
r3 = *(u32*)(r1 + %[xdp_md_data_end]); \
r1 = r2; \
r1 += 1; \
if r1 > r3 goto l0_%=; \
r1 = *(u8*)(r2 + 0); \
r0 = 0x7fffffffffffff10 ll; \
r1 += r0; \
r0 = 0x8000000000000000 ll; \
l1_%=: r0 += 1; \
/* r1 unsigned range is [0x7fffffffffffff10, 0x800000000000000f] */\
if r0 < r1 goto l1_%=; \
l0_%=: r0 = 0; \
exit; \
" :
: __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
__imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end))
: __clobber_all);
}
SEC("xdp")
__description("bound check with JMP_JSLT for crossing 64-bit signed boundary")
__success __retval(0)
__naked void crossing_64_bit_signed_boundary_2(void)
{
asm volatile (" \
r2 = *(u32*)(r1 + %[xdp_md_data]); \
r3 = *(u32*)(r1 + %[xdp_md_data_end]); \
r1 = r2; \
r1 += 1; \
if r1 > r3 goto l0_%=; \
r1 = *(u8*)(r2 + 0); \
r0 = 0x7fffffffffffff10 ll; \
r1 += r0; \
r2 = 0x8000000000000fff ll; \
r0 = 0x8000000000000000 ll; \
l1_%=: r0 += 1; \
if r0 s> r2 goto l0_%=; \
/* r1 signed range is [S64_MIN, S64_MAX] */ \
if r0 s< r1 goto l1_%=; \
r0 = 1; \
exit; \
l0_%=: r0 = 0; \
exit; \
" :
: __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
__imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end))
: __clobber_all);
}
SEC("xdp")
__description("bound check for loop upper bound greater than U32_MAX")
__success __retval(0)
__naked void bound_greater_than_u32_max(void)
{
asm volatile (" \
r2 = *(u32*)(r1 + %[xdp_md_data]); \
r3 = *(u32*)(r1 + %[xdp_md_data_end]); \
r1 = r2; \
r1 += 1; \
if r1 > r3 goto l0_%=; \
r1 = *(u8*)(r2 + 0); \
r0 = 0x100000000 ll; \
r1 += r0; \
r0 = 0x100000000 ll; \
l1_%=: r0 += 1; \
if r0 < r1 goto l1_%=; \
l0_%=: r0 = 0; \
exit; \
" :
: __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
__imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end))
: __clobber_all);
}
SEC("xdp")
__description("bound check with JMP32_JLT for crossing 32-bit signed boundary")
__success __retval(0)
__naked void crossing_32_bit_signed_boundary_1(void)
{
asm volatile (" \
r2 = *(u32*)(r1 + %[xdp_md_data]); \
r3 = *(u32*)(r1 + %[xdp_md_data_end]); \
r1 = r2; \
r1 += 1; \
if r1 > r3 goto l0_%=; \
r1 = *(u8*)(r2 + 0); \
w0 = 0x7fffff10; \
w1 += w0; \
w0 = 0x80000000; \
l1_%=: w0 += 1; \
/* r1 unsigned range is [0, 0x8000000f] */ \
if w0 < w1 goto l1_%=; \
l0_%=: r0 = 0; \
exit; \
" :
: __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
__imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end))
: __clobber_all);
}
SEC("xdp")
__description("bound check with JMP32_JSLT for crossing 32-bit signed boundary")
__success __retval(0)
__naked void crossing_32_bit_signed_boundary_2(void)
{
asm volatile (" \
r2 = *(u32*)(r1 + %[xdp_md_data]); \
r3 = *(u32*)(r1 + %[xdp_md_data_end]); \
r1 = r2; \
r1 += 1; \
if r1 > r3 goto l0_%=; \
r1 = *(u8*)(r2 + 0); \
w0 = 0x7fffff10; \
w1 += w0; \
w2 = 0x80000fff; \
w0 = 0x80000000; \
l1_%=: w0 += 1; \
if w0 s> w2 goto l0_%=; \
/* r1 signed range is [S32_MIN, S32_MAX] */ \
if w0 s< w1 goto l1_%=; \
r0 = 1; \
exit; \
l0_%=: r0 = 0; \
exit; \
" :
: __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
__imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end))
: __clobber_all);
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/verifier_bounds.c |
// SPDX-License-Identifier: GPL-2.0
#include <stddef.h>
#include <string.h>
#include <linux/bpf.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_endian.h>
struct grehdr {
__be16 flags;
__be16 protocol;
};
SEC("encap_gre")
int bpf_lwt_encap_gre(struct __sk_buff *skb)
{
struct encap_hdr {
struct iphdr iph;
struct grehdr greh;
} hdr;
int err;
memset(&hdr, 0, sizeof(struct encap_hdr));
hdr.iph.ihl = 5;
hdr.iph.version = 4;
hdr.iph.ttl = 0x40;
hdr.iph.protocol = 47; /* IPPROTO_GRE */
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
hdr.iph.saddr = 0x640110ac; /* 172.16.1.100 */
hdr.iph.daddr = 0x641010ac; /* 172.16.16.100 */
#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
hdr.iph.saddr = 0xac100164; /* 172.16.1.100 */
hdr.iph.daddr = 0xac101064; /* 172.16.16.100 */
#else
#error "Fix your compiler's __BYTE_ORDER__?!"
#endif
hdr.iph.tot_len = bpf_htons(skb->len + sizeof(struct encap_hdr));
hdr.greh.protocol = skb->protocol;
err = bpf_lwt_push_encap(skb, BPF_LWT_ENCAP_IP, &hdr,
sizeof(struct encap_hdr));
if (err)
return BPF_DROP;
return BPF_LWT_REROUTE;
}
SEC("encap_gre6")
int bpf_lwt_encap_gre6(struct __sk_buff *skb)
{
struct encap_hdr {
struct ipv6hdr ip6hdr;
struct grehdr greh;
} hdr;
int err;
memset(&hdr, 0, sizeof(struct encap_hdr));
hdr.ip6hdr.version = 6;
hdr.ip6hdr.payload_len = bpf_htons(skb->len + sizeof(struct grehdr));
hdr.ip6hdr.nexthdr = 47; /* IPPROTO_GRE */
hdr.ip6hdr.hop_limit = 0x40;
/* fb01::1 */
hdr.ip6hdr.saddr.s6_addr[0] = 0xfb;
hdr.ip6hdr.saddr.s6_addr[1] = 1;
hdr.ip6hdr.saddr.s6_addr[15] = 1;
/* fb10::1 */
hdr.ip6hdr.daddr.s6_addr[0] = 0xfb;
hdr.ip6hdr.daddr.s6_addr[1] = 0x10;
hdr.ip6hdr.daddr.s6_addr[15] = 1;
hdr.greh.protocol = skb->protocol;
err = bpf_lwt_push_encap(skb, BPF_LWT_ENCAP_IP, &hdr,
sizeof(struct encap_hdr));
if (err)
return BPF_DROP;
return BPF_LWT_REROUTE;
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/test_lwt_ip_encap.c |
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2018 Facebook
#include <string.h>
#include <linux/stddef.h>
#include <linux/bpf.h>
#include <linux/in.h>
#include <linux/in6.h>
#include <sys/socket.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_endian.h>
#define SRC_REWRITE_IP6_0 0
#define SRC_REWRITE_IP6_1 0
#define SRC_REWRITE_IP6_2 0
#define SRC_REWRITE_IP6_3 6
#define DST_REWRITE_IP6_0 0
#define DST_REWRITE_IP6_1 0
#define DST_REWRITE_IP6_2 0
#define DST_REWRITE_IP6_3 1
#define DST_REWRITE_PORT6 6666
SEC("cgroup/connect6")
int connect_v6_prog(struct bpf_sock_addr *ctx)
{
struct bpf_sock_tuple tuple = {};
struct sockaddr_in6 sa;
struct bpf_sock *sk;
/* Verify that new destination is available. */
memset(&tuple.ipv6.saddr, 0, sizeof(tuple.ipv6.saddr));
memset(&tuple.ipv6.sport, 0, sizeof(tuple.ipv6.sport));
tuple.ipv6.daddr[0] = bpf_htonl(DST_REWRITE_IP6_0);
tuple.ipv6.daddr[1] = bpf_htonl(DST_REWRITE_IP6_1);
tuple.ipv6.daddr[2] = bpf_htonl(DST_REWRITE_IP6_2);
tuple.ipv6.daddr[3] = bpf_htonl(DST_REWRITE_IP6_3);
tuple.ipv6.dport = bpf_htons(DST_REWRITE_PORT6);
if (ctx->type != SOCK_STREAM && ctx->type != SOCK_DGRAM)
return 0;
else if (ctx->type == SOCK_STREAM)
sk = bpf_sk_lookup_tcp(ctx, &tuple, sizeof(tuple.ipv6),
BPF_F_CURRENT_NETNS, 0);
else
sk = bpf_sk_lookup_udp(ctx, &tuple, sizeof(tuple.ipv6),
BPF_F_CURRENT_NETNS, 0);
if (!sk)
return 0;
if (sk->src_ip6[0] != tuple.ipv6.daddr[0] ||
sk->src_ip6[1] != tuple.ipv6.daddr[1] ||
sk->src_ip6[2] != tuple.ipv6.daddr[2] ||
sk->src_ip6[3] != tuple.ipv6.daddr[3] ||
sk->src_port != DST_REWRITE_PORT6) {
bpf_sk_release(sk);
return 0;
}
bpf_sk_release(sk);
/* Rewrite destination. */
ctx->user_ip6[0] = bpf_htonl(DST_REWRITE_IP6_0);
ctx->user_ip6[1] = bpf_htonl(DST_REWRITE_IP6_1);
ctx->user_ip6[2] = bpf_htonl(DST_REWRITE_IP6_2);
ctx->user_ip6[3] = bpf_htonl(DST_REWRITE_IP6_3);
ctx->user_port = bpf_htons(DST_REWRITE_PORT6);
/* Rewrite source. */
memset(&sa, 0, sizeof(sa));
sa.sin6_family = AF_INET6;
sa.sin6_port = bpf_htons(0);
sa.sin6_addr.s6_addr32[0] = bpf_htonl(SRC_REWRITE_IP6_0);
sa.sin6_addr.s6_addr32[1] = bpf_htonl(SRC_REWRITE_IP6_1);
sa.sin6_addr.s6_addr32[2] = bpf_htonl(SRC_REWRITE_IP6_2);
sa.sin6_addr.s6_addr32[3] = bpf_htonl(SRC_REWRITE_IP6_3);
if (bpf_bind(ctx, (struct sockaddr *)&sa, sizeof(sa)) != 0)
return 0;
return 1;
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/connect6_prog.c |
#include "core_reloc_types.h"
void f(struct core_reloc_ints___reverse_sign x) {}
| linux-master | tools/testing/selftests/bpf/progs/btf__core_reloc_ints___reverse_sign.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020 Facebook */
#define UNROLL
#define INLINE __always_inline
#include "profiler.inc.h"
| linux-master | tools/testing/selftests/bpf/progs/profiler1.c |
// SPDX-License-Identifier: GPL-2.0
#define BPF_NO_PRESERVE_ACCESS_INDEX
#include <vmlinux.h>
#include <bpf/bpf_core_read.h>
#include <bpf/bpf_helpers.h>
#define INLINE __always_inline
#define skb_shorter(skb, len) ((void *)(long)(skb)->data + (len) > (void *)(long)skb->data_end)
#define ETH_IPV4_TCP_SIZE (14 + sizeof(struct iphdr) + sizeof(struct tcphdr))
static INLINE struct iphdr *get_iphdr(struct __sk_buff *skb)
{
struct iphdr *ip = NULL;
struct ethhdr *eth;
if (skb_shorter(skb, ETH_IPV4_TCP_SIZE))
goto out;
eth = (void *)(long)skb->data;
ip = (void *)(eth + 1);
out:
return ip;
}
SEC("tc")
int main_prog(struct __sk_buff *skb)
{
struct iphdr *ip = NULL;
struct tcphdr *tcp;
__u8 proto = 0;
if (!(ip = get_iphdr(skb)))
goto out;
proto = ip->protocol;
if (proto != IPPROTO_TCP)
goto out;
tcp = (void*)(ip + 1);
if (tcp->dest != 0)
goto out;
if (!tcp)
goto out;
return tcp->urg_ptr;
out:
return -1;
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/skb_pkt_end.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/ptrace.h>
#include <stddef.h>
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
#include "bpf_misc.h"
char _license[] SEC("license") = "GPL";
/* typically virtio scsi has max SGs of 6 */
#define VIRTIO_MAX_SGS 6
/* Verifier will fail with SG_MAX = 128. The failure can be
* workarounded with a smaller SG_MAX, e.g. 10.
*/
#define WORKAROUND
#ifdef WORKAROUND
#define SG_MAX 10
#else
/* typically virtio blk has max SEG of 128 */
#define SG_MAX 128
#endif
#define SG_CHAIN 0x01UL
#define SG_END 0x02UL
struct scatterlist {
unsigned long page_link;
unsigned int offset;
unsigned int length;
};
#define sg_is_chain(sg) ((sg)->page_link & SG_CHAIN)
#define sg_is_last(sg) ((sg)->page_link & SG_END)
#define sg_chain_ptr(sg) \
((struct scatterlist *) ((sg)->page_link & ~(SG_CHAIN | SG_END)))
static inline struct scatterlist *__sg_next(struct scatterlist *sgp)
{
struct scatterlist sg;
bpf_probe_read_kernel(&sg, sizeof(sg), sgp);
if (sg_is_last(&sg))
return NULL;
sgp++;
bpf_probe_read_kernel(&sg, sizeof(sg), sgp);
if (sg_is_chain(&sg))
sgp = sg_chain_ptr(&sg);
return sgp;
}
static inline struct scatterlist *get_sgp(struct scatterlist **sgs, int i)
{
struct scatterlist *sgp;
bpf_probe_read_kernel(&sgp, sizeof(sgp), sgs + i);
return sgp;
}
int config = 0;
int result = 0;
SEC("kprobe/virtqueue_add_sgs")
int BPF_KPROBE(trace_virtqueue_add_sgs, void *unused, struct scatterlist **sgs,
unsigned int out_sgs, unsigned int in_sgs)
{
struct scatterlist *sgp = NULL;
__u64 length1 = 0, length2 = 0;
unsigned int i, n, len;
if (config != 0)
return 0;
for (i = 0; (i < VIRTIO_MAX_SGS) && (i < out_sgs); i++) {
__sink(out_sgs);
for (n = 0, sgp = get_sgp(sgs, i); sgp && (n < SG_MAX);
sgp = __sg_next(sgp)) {
bpf_probe_read_kernel(&len, sizeof(len), &sgp->length);
length1 += len;
n++;
}
}
for (i = 0; (i < VIRTIO_MAX_SGS) && (i < in_sgs); i++) {
__sink(in_sgs);
for (n = 0, sgp = get_sgp(sgs, i); sgp && (n < SG_MAX);
sgp = __sg_next(sgp)) {
bpf_probe_read_kernel(&len, sizeof(len), &sgp->length);
length2 += len;
n++;
}
}
config = 1;
result = length2 - length1;
return 0;
}
| linux-master | tools/testing/selftests/bpf/progs/loop6.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020 Cloudflare */
#include "bpf_iter.h"
#include "bpf_tracing_net.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
#include <errno.h>
char _license[] SEC("license") = "GPL";
struct {
__uint(type, BPF_MAP_TYPE_SOCKMAP);
__uint(max_entries, 64);
__type(key, __u32);
__type(value, __u64);
} sockmap SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_SOCKHASH);
__uint(max_entries, 64);
__type(key, __u32);
__type(value, __u64);
} sockhash SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_SOCKHASH);
__uint(max_entries, 64);
__type(key, __u32);
__type(value, __u64);
} dst SEC(".maps");
__u32 elems = 0;
__u32 socks = 0;
SEC("iter/sockmap")
int copy(struct bpf_iter__sockmap *ctx)
{
struct sock *sk = ctx->sk;
__u32 tmp, *key = ctx->key;
int ret;
if (!key)
return 0;
elems++;
/* We need a temporary buffer on the stack, since the verifier doesn't
* let us use the pointer from the context as an argument to the helper.
*/
tmp = *key;
if (sk) {
socks++;
return bpf_map_update_elem(&dst, &tmp, sk, 0) != 0;
}
ret = bpf_map_delete_elem(&dst, &tmp);
return ret && ret != -ENOENT;
}
| linux-master | tools/testing/selftests/bpf/progs/bpf_iter_sockmap.c |
#include "core_reloc_types.h"
void f(struct core_reloc_mods x) {}
| linux-master | tools/testing/selftests/bpf/progs/btf__core_reloc_mods.c |
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2019 Facebook
#include <linux/bpf.h>
#include <stdint.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_core_read.h>
char _license[] SEC("license") = "GPL";
struct {
char in[256];
char out[256];
} data = {};
struct core_reloc_bitfields {
/* unsigned bitfields */
uint8_t ub1: 1;
uint8_t ub2: 2;
uint32_t ub7: 7;
/* signed bitfields */
int8_t sb4: 4;
int32_t sb20: 20;
/* non-bitfields */
uint32_t u32;
int32_t s32;
};
/* bitfield read results, all as plain integers */
struct core_reloc_bitfields_output {
int64_t ub1;
int64_t ub2;
int64_t ub7;
int64_t sb4;
int64_t sb20;
int64_t u32;
int64_t s32;
};
struct pt_regs;
struct trace_sys_enter {
struct pt_regs *regs;
long id;
};
SEC("tp_btf/sys_enter")
int test_core_bitfields_direct(void *ctx)
{
struct core_reloc_bitfields *in = (void *)&data.in;
struct core_reloc_bitfields_output *out = (void *)&data.out;
out->ub1 = BPF_CORE_READ_BITFIELD(in, ub1);
out->ub2 = BPF_CORE_READ_BITFIELD(in, ub2);
out->ub7 = BPF_CORE_READ_BITFIELD(in, ub7);
out->sb4 = BPF_CORE_READ_BITFIELD(in, sb4);
out->sb20 = BPF_CORE_READ_BITFIELD(in, sb20);
out->u32 = BPF_CORE_READ_BITFIELD(in, u32);
out->s32 = BPF_CORE_READ_BITFIELD(in, s32);
return 0;
}
| linux-master | tools/testing/selftests/bpf/progs/test_core_reloc_bitfields_direct.c |
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2019 Facebook
#include <stddef.h>
#include <string.h>
#include <linux/bpf.h>
#include <linux/if_ether.h>
#include <linux/if_packet.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <linux/in.h>
#include <linux/udp.h>
#include <linux/tcp.h>
#include <linux/pkt_cls.h>
#include <sys/socket.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_endian.h>
#include "test_iptunnel_common.h"
struct {
__uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
__uint(max_entries, 256);
__type(key, __u32);
__type(value, __u64);
} rxcnt SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_HASH);
__uint(max_entries, MAX_IPTNL_ENTRIES);
__type(key, struct vip);
__type(value, struct iptnl_info);
} vip2tnl SEC(".maps");
static __always_inline void count_tx(__u32 protocol)
{
__u64 *rxcnt_count;
rxcnt_count = bpf_map_lookup_elem(&rxcnt, &protocol);
if (rxcnt_count)
*rxcnt_count += 1;
}
static __always_inline int get_dport(void *trans_data, void *data_end,
__u8 protocol)
{
struct tcphdr *th;
struct udphdr *uh;
switch (protocol) {
case IPPROTO_TCP:
th = (struct tcphdr *)trans_data;
if (th + 1 > data_end)
return -1;
return th->dest;
case IPPROTO_UDP:
uh = (struct udphdr *)trans_data;
if (uh + 1 > data_end)
return -1;
return uh->dest;
default:
return 0;
}
}
static __always_inline void set_ethhdr(struct ethhdr *new_eth,
const struct ethhdr *old_eth,
const struct iptnl_info *tnl,
__be16 h_proto)
{
memcpy(new_eth->h_source, old_eth->h_dest, sizeof(new_eth->h_source));
memcpy(new_eth->h_dest, tnl->dmac, sizeof(new_eth->h_dest));
new_eth->h_proto = h_proto;
}
static __always_inline int handle_ipv4(struct xdp_md *xdp)
{
void *data_end = (void *)(long)xdp->data_end;
void *data = (void *)(long)xdp->data;
struct iptnl_info *tnl;
struct ethhdr *new_eth;
struct ethhdr *old_eth;
struct iphdr *iph = data + sizeof(struct ethhdr);
__u16 *next_iph;
__u16 payload_len;
struct vip vip = {};
int dport;
__u32 csum = 0;
int i;
if (iph + 1 > data_end)
return XDP_DROP;
dport = get_dport(iph + 1, data_end, iph->protocol);
if (dport == -1)
return XDP_DROP;
vip.protocol = iph->protocol;
vip.family = AF_INET;
vip.daddr.v4 = iph->daddr;
vip.dport = dport;
payload_len = bpf_ntohs(iph->tot_len);
tnl = bpf_map_lookup_elem(&vip2tnl, &vip);
/* It only does v4-in-v4 */
if (!tnl || tnl->family != AF_INET)
return XDP_PASS;
if (bpf_xdp_adjust_head(xdp, 0 - (int)sizeof(struct iphdr)))
return XDP_DROP;
data = (void *)(long)xdp->data;
data_end = (void *)(long)xdp->data_end;
new_eth = data;
iph = data + sizeof(*new_eth);
old_eth = data + sizeof(*iph);
if (new_eth + 1 > data_end ||
old_eth + 1 > data_end ||
iph + 1 > data_end)
return XDP_DROP;
set_ethhdr(new_eth, old_eth, tnl, bpf_htons(ETH_P_IP));
iph->version = 4;
iph->ihl = sizeof(*iph) >> 2;
iph->frag_off = 0;
iph->protocol = IPPROTO_IPIP;
iph->check = 0;
iph->tos = 0;
iph->tot_len = bpf_htons(payload_len + sizeof(*iph));
iph->daddr = tnl->daddr.v4;
iph->saddr = tnl->saddr.v4;
iph->ttl = 8;
next_iph = (__u16 *)iph;
#pragma clang loop unroll(disable)
for (i = 0; i < sizeof(*iph) >> 1; i++)
csum += *next_iph++;
iph->check = ~((csum & 0xffff) + (csum >> 16));
count_tx(vip.protocol);
return XDP_TX;
}
static __always_inline int handle_ipv6(struct xdp_md *xdp)
{
void *data_end = (void *)(long)xdp->data_end;
void *data = (void *)(long)xdp->data;
struct iptnl_info *tnl;
struct ethhdr *new_eth;
struct ethhdr *old_eth;
struct ipv6hdr *ip6h = data + sizeof(struct ethhdr);
__u16 payload_len;
struct vip vip = {};
int dport;
if (ip6h + 1 > data_end)
return XDP_DROP;
dport = get_dport(ip6h + 1, data_end, ip6h->nexthdr);
if (dport == -1)
return XDP_DROP;
vip.protocol = ip6h->nexthdr;
vip.family = AF_INET6;
memcpy(vip.daddr.v6, ip6h->daddr.s6_addr32, sizeof(vip.daddr));
vip.dport = dport;
payload_len = ip6h->payload_len;
tnl = bpf_map_lookup_elem(&vip2tnl, &vip);
/* It only does v6-in-v6 */
if (!tnl || tnl->family != AF_INET6)
return XDP_PASS;
if (bpf_xdp_adjust_head(xdp, 0 - (int)sizeof(struct ipv6hdr)))
return XDP_DROP;
data = (void *)(long)xdp->data;
data_end = (void *)(long)xdp->data_end;
new_eth = data;
ip6h = data + sizeof(*new_eth);
old_eth = data + sizeof(*ip6h);
if (new_eth + 1 > data_end || old_eth + 1 > data_end ||
ip6h + 1 > data_end)
return XDP_DROP;
set_ethhdr(new_eth, old_eth, tnl, bpf_htons(ETH_P_IPV6));
ip6h->version = 6;
ip6h->priority = 0;
memset(ip6h->flow_lbl, 0, sizeof(ip6h->flow_lbl));
ip6h->payload_len = bpf_htons(bpf_ntohs(payload_len) + sizeof(*ip6h));
ip6h->nexthdr = IPPROTO_IPV6;
ip6h->hop_limit = 8;
memcpy(ip6h->saddr.s6_addr32, tnl->saddr.v6, sizeof(tnl->saddr.v6));
memcpy(ip6h->daddr.s6_addr32, tnl->daddr.v6, sizeof(tnl->daddr.v6));
count_tx(vip.protocol);
return XDP_TX;
}
SEC("xdp")
int _xdp_tx_iptunnel(struct xdp_md *xdp)
{
void *data_end = (void *)(long)xdp->data_end;
void *data = (void *)(long)xdp->data;
struct ethhdr *eth = data;
__u16 h_proto;
if (eth + 1 > data_end)
return XDP_DROP;
h_proto = eth->h_proto;
if (h_proto == bpf_htons(ETH_P_IP))
return handle_ipv4(xdp);
else if (h_proto == bpf_htons(ETH_P_IPV6))
return handle_ipv6(xdp);
else
return XDP_DROP;
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/test_xdp_loop.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) Meta Platforms, Inc. and affiliates. */
#include "vmlinux.h"
#include "bpf_tracing_net.h"
#include <bpf/bpf_core_read.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
#ifndef ARRAY_SIZE
#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
#endif
extern unsigned long CONFIG_HZ __kconfig;
const volatile char veth[IFNAMSIZ];
const volatile int veth_ifindex;
int nr_listen;
int nr_passive;
int nr_active;
int nr_connect;
int nr_binddev;
int nr_socket_post_create;
int nr_fin_wait1;
struct sockopt_test {
int opt;
int new;
int restore;
int expected;
int tcp_expected;
unsigned int flip:1;
};
static const char not_exist_cc[] = "not_exist";
static const char cubic_cc[] = "cubic";
static const char reno_cc[] = "reno";
static const struct sockopt_test sol_socket_tests[] = {
{ .opt = SO_REUSEADDR, .flip = 1, },
{ .opt = SO_SNDBUF, .new = 8123, .expected = 8123 * 2, },
{ .opt = SO_RCVBUF, .new = 8123, .expected = 8123 * 2, },
{ .opt = SO_KEEPALIVE, .flip = 1, },
{ .opt = SO_PRIORITY, .new = 0xeb9f, .expected = 0xeb9f, },
{ .opt = SO_REUSEPORT, .flip = 1, },
{ .opt = SO_RCVLOWAT, .new = 8123, .expected = 8123, },
{ .opt = SO_MARK, .new = 0xeb9f, .expected = 0xeb9f, },
{ .opt = SO_MAX_PACING_RATE, .new = 0xeb9f, .expected = 0xeb9f, },
{ .opt = SO_TXREHASH, .flip = 1, },
{ .opt = 0, },
};
static const struct sockopt_test sol_tcp_tests[] = {
{ .opt = TCP_NODELAY, .flip = 1, },
{ .opt = TCP_KEEPIDLE, .new = 123, .expected = 123, .restore = 321, },
{ .opt = TCP_KEEPINTVL, .new = 123, .expected = 123, .restore = 321, },
{ .opt = TCP_KEEPCNT, .new = 123, .expected = 123, .restore = 124, },
{ .opt = TCP_SYNCNT, .new = 123, .expected = 123, .restore = 124, },
{ .opt = TCP_WINDOW_CLAMP, .new = 8123, .expected = 8123, .restore = 8124, },
{ .opt = TCP_CONGESTION, },
{ .opt = TCP_THIN_LINEAR_TIMEOUTS, .flip = 1, },
{ .opt = TCP_USER_TIMEOUT, .new = 123400, .expected = 123400, },
{ .opt = TCP_NOTSENT_LOWAT, .new = 1314, .expected = 1314, },
{ .opt = 0, },
};
static const struct sockopt_test sol_ip_tests[] = {
{ .opt = IP_TOS, .new = 0xe1, .expected = 0xe1, .tcp_expected = 0xe0, },
{ .opt = 0, },
};
static const struct sockopt_test sol_ipv6_tests[] = {
{ .opt = IPV6_TCLASS, .new = 0xe1, .expected = 0xe1, .tcp_expected = 0xe0, },
{ .opt = IPV6_AUTOFLOWLABEL, .flip = 1, },
{ .opt = 0, },
};
struct loop_ctx {
void *ctx;
struct sock *sk;
};
static int bpf_test_sockopt_flip(void *ctx, struct sock *sk,
const struct sockopt_test *t,
int level)
{
int old, tmp, new, opt = t->opt;
opt = t->opt;
if (bpf_getsockopt(ctx, level, opt, &old, sizeof(old)))
return 1;
/* kernel initialized txrehash to 255 */
if (level == SOL_SOCKET && opt == SO_TXREHASH && old != 0 && old != 1)
old = 1;
new = !old;
if (bpf_setsockopt(ctx, level, opt, &new, sizeof(new)))
return 1;
if (bpf_getsockopt(ctx, level, opt, &tmp, sizeof(tmp)) ||
tmp != new)
return 1;
if (bpf_setsockopt(ctx, level, opt, &old, sizeof(old)))
return 1;
return 0;
}
static int bpf_test_sockopt_int(void *ctx, struct sock *sk,
const struct sockopt_test *t,
int level)
{
int old, tmp, new, expected, opt;
opt = t->opt;
new = t->new;
if (sk->sk_type == SOCK_STREAM && t->tcp_expected)
expected = t->tcp_expected;
else
expected = t->expected;
if (bpf_getsockopt(ctx, level, opt, &old, sizeof(old)) ||
old == new)
return 1;
if (bpf_setsockopt(ctx, level, opt, &new, sizeof(new)))
return 1;
if (bpf_getsockopt(ctx, level, opt, &tmp, sizeof(tmp)) ||
tmp != expected)
return 1;
if (t->restore)
old = t->restore;
if (bpf_setsockopt(ctx, level, opt, &old, sizeof(old)))
return 1;
return 0;
}
static int bpf_test_socket_sockopt(__u32 i, struct loop_ctx *lc)
{
const struct sockopt_test *t;
if (i >= ARRAY_SIZE(sol_socket_tests))
return 1;
t = &sol_socket_tests[i];
if (!t->opt)
return 1;
if (t->flip)
return bpf_test_sockopt_flip(lc->ctx, lc->sk, t, SOL_SOCKET);
return bpf_test_sockopt_int(lc->ctx, lc->sk, t, SOL_SOCKET);
}
static int bpf_test_ip_sockopt(__u32 i, struct loop_ctx *lc)
{
const struct sockopt_test *t;
if (i >= ARRAY_SIZE(sol_ip_tests))
return 1;
t = &sol_ip_tests[i];
if (!t->opt)
return 1;
if (t->flip)
return bpf_test_sockopt_flip(lc->ctx, lc->sk, t, IPPROTO_IP);
return bpf_test_sockopt_int(lc->ctx, lc->sk, t, IPPROTO_IP);
}
static int bpf_test_ipv6_sockopt(__u32 i, struct loop_ctx *lc)
{
const struct sockopt_test *t;
if (i >= ARRAY_SIZE(sol_ipv6_tests))
return 1;
t = &sol_ipv6_tests[i];
if (!t->opt)
return 1;
if (t->flip)
return bpf_test_sockopt_flip(lc->ctx, lc->sk, t, IPPROTO_IPV6);
return bpf_test_sockopt_int(lc->ctx, lc->sk, t, IPPROTO_IPV6);
}
static int bpf_test_tcp_sockopt(__u32 i, struct loop_ctx *lc)
{
const struct sockopt_test *t;
struct sock *sk;
void *ctx;
if (i >= ARRAY_SIZE(sol_tcp_tests))
return 1;
t = &sol_tcp_tests[i];
if (!t->opt)
return 1;
ctx = lc->ctx;
sk = lc->sk;
if (t->opt == TCP_CONGESTION) {
char old_cc[16], tmp_cc[16];
const char *new_cc;
int new_cc_len;
if (!bpf_setsockopt(ctx, IPPROTO_TCP, TCP_CONGESTION,
(void *)not_exist_cc, sizeof(not_exist_cc)))
return 1;
if (bpf_getsockopt(ctx, IPPROTO_TCP, TCP_CONGESTION, old_cc, sizeof(old_cc)))
return 1;
if (!bpf_strncmp(old_cc, sizeof(old_cc), cubic_cc)) {
new_cc = reno_cc;
new_cc_len = sizeof(reno_cc);
} else {
new_cc = cubic_cc;
new_cc_len = sizeof(cubic_cc);
}
if (bpf_setsockopt(ctx, IPPROTO_TCP, TCP_CONGESTION, (void *)new_cc,
new_cc_len))
return 1;
if (bpf_getsockopt(ctx, IPPROTO_TCP, TCP_CONGESTION, tmp_cc, sizeof(tmp_cc)))
return 1;
if (bpf_strncmp(tmp_cc, sizeof(tmp_cc), new_cc))
return 1;
if (bpf_setsockopt(ctx, IPPROTO_TCP, TCP_CONGESTION, old_cc, sizeof(old_cc)))
return 1;
return 0;
}
if (t->flip)
return bpf_test_sockopt_flip(ctx, sk, t, IPPROTO_TCP);
return bpf_test_sockopt_int(ctx, sk, t, IPPROTO_TCP);
}
static int bpf_test_sockopt(void *ctx, struct sock *sk)
{
struct loop_ctx lc = { .ctx = ctx, .sk = sk, };
__u16 family, proto;
int n;
family = sk->sk_family;
proto = sk->sk_protocol;
n = bpf_loop(ARRAY_SIZE(sol_socket_tests), bpf_test_socket_sockopt, &lc, 0);
if (n != ARRAY_SIZE(sol_socket_tests))
return -1;
if (proto == IPPROTO_TCP) {
n = bpf_loop(ARRAY_SIZE(sol_tcp_tests), bpf_test_tcp_sockopt, &lc, 0);
if (n != ARRAY_SIZE(sol_tcp_tests))
return -1;
}
if (family == AF_INET) {
n = bpf_loop(ARRAY_SIZE(sol_ip_tests), bpf_test_ip_sockopt, &lc, 0);
if (n != ARRAY_SIZE(sol_ip_tests))
return -1;
} else {
n = bpf_loop(ARRAY_SIZE(sol_ipv6_tests), bpf_test_ipv6_sockopt, &lc, 0);
if (n != ARRAY_SIZE(sol_ipv6_tests))
return -1;
}
return 0;
}
static int binddev_test(void *ctx)
{
const char empty_ifname[] = "";
int ifindex, zero = 0;
if (bpf_setsockopt(ctx, SOL_SOCKET, SO_BINDTODEVICE,
(void *)veth, sizeof(veth)))
return -1;
if (bpf_getsockopt(ctx, SOL_SOCKET, SO_BINDTOIFINDEX,
&ifindex, sizeof(int)) ||
ifindex != veth_ifindex)
return -1;
if (bpf_setsockopt(ctx, SOL_SOCKET, SO_BINDTODEVICE,
(void *)empty_ifname, sizeof(empty_ifname)))
return -1;
if (bpf_getsockopt(ctx, SOL_SOCKET, SO_BINDTOIFINDEX,
&ifindex, sizeof(int)) ||
ifindex != 0)
return -1;
if (bpf_setsockopt(ctx, SOL_SOCKET, SO_BINDTOIFINDEX,
(void *)&veth_ifindex, sizeof(int)))
return -1;
if (bpf_getsockopt(ctx, SOL_SOCKET, SO_BINDTOIFINDEX,
&ifindex, sizeof(int)) ||
ifindex != veth_ifindex)
return -1;
if (bpf_setsockopt(ctx, SOL_SOCKET, SO_BINDTOIFINDEX,
&zero, sizeof(int)))
return -1;
if (bpf_getsockopt(ctx, SOL_SOCKET, SO_BINDTOIFINDEX,
&ifindex, sizeof(int)) ||
ifindex != 0)
return -1;
return 0;
}
static int test_tcp_maxseg(void *ctx, struct sock *sk)
{
int val = 1314, tmp;
if (sk->sk_state != TCP_ESTABLISHED)
return bpf_setsockopt(ctx, IPPROTO_TCP, TCP_MAXSEG,
&val, sizeof(val));
if (bpf_getsockopt(ctx, IPPROTO_TCP, TCP_MAXSEG, &tmp, sizeof(tmp)) ||
tmp > val)
return -1;
return 0;
}
static int test_tcp_saved_syn(void *ctx, struct sock *sk)
{
__u8 saved_syn[20];
int one = 1;
if (sk->sk_state == TCP_LISTEN)
return bpf_setsockopt(ctx, IPPROTO_TCP, TCP_SAVE_SYN,
&one, sizeof(one));
return bpf_getsockopt(ctx, IPPROTO_TCP, TCP_SAVED_SYN,
saved_syn, sizeof(saved_syn));
}
SEC("lsm_cgroup/socket_post_create")
int BPF_PROG(socket_post_create, struct socket *sock, int family,
int type, int protocol, int kern)
{
struct sock *sk = sock->sk;
if (!sk)
return 1;
nr_socket_post_create += !bpf_test_sockopt(sk, sk);
nr_binddev += !binddev_test(sk);
return 1;
}
SEC("sockops")
int skops_sockopt(struct bpf_sock_ops *skops)
{
struct bpf_sock *bpf_sk = skops->sk;
struct sock *sk;
if (!bpf_sk)
return 1;
sk = (struct sock *)bpf_skc_to_tcp_sock(bpf_sk);
if (!sk)
return 1;
switch (skops->op) {
case BPF_SOCK_OPS_TCP_LISTEN_CB:
nr_listen += !(bpf_test_sockopt(skops, sk) ||
test_tcp_maxseg(skops, sk) ||
test_tcp_saved_syn(skops, sk));
break;
case BPF_SOCK_OPS_TCP_CONNECT_CB:
nr_connect += !(bpf_test_sockopt(skops, sk) ||
test_tcp_maxseg(skops, sk));
break;
case BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB:
nr_active += !(bpf_test_sockopt(skops, sk) ||
test_tcp_maxseg(skops, sk));
break;
case BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB:
nr_passive += !(bpf_test_sockopt(skops, sk) ||
test_tcp_maxseg(skops, sk) ||
test_tcp_saved_syn(skops, sk));
bpf_sock_ops_cb_flags_set(skops,
skops->bpf_sock_ops_cb_flags |
BPF_SOCK_OPS_STATE_CB_FLAG);
break;
case BPF_SOCK_OPS_STATE_CB:
if (skops->args[1] == BPF_TCP_CLOSE_WAIT)
nr_fin_wait1 += !bpf_test_sockopt(skops, sk);
break;
}
return 1;
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/setget_sockopt.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include "bpf_misc.h"
#include "test_user_ringbuf.h"
char _license[] SEC("license") = "GPL";
struct {
__uint(type, BPF_MAP_TYPE_USER_RINGBUF);
} user_ringbuf SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_RINGBUF);
} kernel_ringbuf SEC(".maps");
/* inputs */
int pid, err, val;
int read = 0;
/* Counter used for end-to-end protocol test */
__u64 kern_mutated = 0;
__u64 user_mutated = 0;
__u64 expected_user_mutated = 0;
static int
is_test_process(void)
{
int cur_pid = bpf_get_current_pid_tgid() >> 32;
return cur_pid == pid;
}
static long
record_sample(struct bpf_dynptr *dynptr, void *context)
{
const struct sample *sample = NULL;
struct sample stack_sample;
int status;
static int num_calls;
if (num_calls++ % 2 == 0) {
status = bpf_dynptr_read(&stack_sample, sizeof(stack_sample), dynptr, 0, 0);
if (status) {
bpf_printk("bpf_dynptr_read() failed: %d\n", status);
err = 1;
return 1;
}
} else {
sample = bpf_dynptr_data(dynptr, 0, sizeof(*sample));
if (!sample) {
bpf_printk("Unexpectedly failed to get sample\n");
err = 2;
return 1;
}
stack_sample = *sample;
}
__sync_fetch_and_add(&read, 1);
return 0;
}
static void
handle_sample_msg(const struct test_msg *msg)
{
switch (msg->msg_op) {
case TEST_MSG_OP_INC64:
kern_mutated += msg->operand_64;
break;
case TEST_MSG_OP_INC32:
kern_mutated += msg->operand_32;
break;
case TEST_MSG_OP_MUL64:
kern_mutated *= msg->operand_64;
break;
case TEST_MSG_OP_MUL32:
kern_mutated *= msg->operand_32;
break;
default:
bpf_printk("Unrecognized op %d\n", msg->msg_op);
err = 2;
}
}
static long
read_protocol_msg(struct bpf_dynptr *dynptr, void *context)
{
const struct test_msg *msg = NULL;
msg = bpf_dynptr_data(dynptr, 0, sizeof(*msg));
if (!msg) {
err = 1;
bpf_printk("Unexpectedly failed to get msg\n");
return 0;
}
handle_sample_msg(msg);
return 0;
}
static int publish_next_kern_msg(__u32 index, void *context)
{
struct test_msg *msg = NULL;
int operand_64 = TEST_OP_64;
int operand_32 = TEST_OP_32;
msg = bpf_ringbuf_reserve(&kernel_ringbuf, sizeof(*msg), 0);
if (!msg) {
err = 4;
return 1;
}
switch (index % TEST_MSG_OP_NUM_OPS) {
case TEST_MSG_OP_INC64:
msg->operand_64 = operand_64;
msg->msg_op = TEST_MSG_OP_INC64;
expected_user_mutated += operand_64;
break;
case TEST_MSG_OP_INC32:
msg->operand_32 = operand_32;
msg->msg_op = TEST_MSG_OP_INC32;
expected_user_mutated += operand_32;
break;
case TEST_MSG_OP_MUL64:
msg->operand_64 = operand_64;
msg->msg_op = TEST_MSG_OP_MUL64;
expected_user_mutated *= operand_64;
break;
case TEST_MSG_OP_MUL32:
msg->operand_32 = operand_32;
msg->msg_op = TEST_MSG_OP_MUL32;
expected_user_mutated *= operand_32;
break;
default:
bpf_ringbuf_discard(msg, 0);
err = 5;
return 1;
}
bpf_ringbuf_submit(msg, 0);
return 0;
}
static void
publish_kern_messages(void)
{
if (expected_user_mutated != user_mutated) {
bpf_printk("%lu != %lu\n", expected_user_mutated, user_mutated);
err = 3;
return;
}
bpf_loop(8, publish_next_kern_msg, NULL, 0);
}
SEC("fentry/" SYS_PREFIX "sys_prctl")
int test_user_ringbuf_protocol(void *ctx)
{
long status = 0;
if (!is_test_process())
return 0;
status = bpf_user_ringbuf_drain(&user_ringbuf, read_protocol_msg, NULL, 0);
if (status < 0) {
bpf_printk("Drain returned: %ld\n", status);
err = 1;
return 0;
}
publish_kern_messages();
return 0;
}
SEC("fentry/" SYS_PREFIX "sys_getpgid")
int test_user_ringbuf(void *ctx)
{
if (!is_test_process())
return 0;
err = bpf_user_ringbuf_drain(&user_ringbuf, record_sample, NULL, 0);
return 0;
}
static long
do_nothing_cb(struct bpf_dynptr *dynptr, void *context)
{
__sync_fetch_and_add(&read, 1);
return 0;
}
SEC("fentry/" SYS_PREFIX "sys_prlimit64")
int test_user_ringbuf_epoll(void *ctx)
{
long num_samples;
if (!is_test_process())
return 0;
num_samples = bpf_user_ringbuf_drain(&user_ringbuf, do_nothing_cb, NULL, 0);
if (num_samples <= 0)
err = 1;
return 0;
}
| linux-master | tools/testing/selftests/bpf/progs/user_ringbuf_success.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
struct {
__uint(type, BPF_MAP_TYPE_PROG_ARRAY);
__uint(max_entries, 3);
__uint(key_size, sizeof(__u32));
__uint(value_size, sizeof(__u32));
} jmp_table SEC(".maps");
int selector = 0;
#define TAIL_FUNC(x) \
SEC("tc") \
int classifier_##x(struct __sk_buff *skb) \
{ \
return x; \
}
TAIL_FUNC(0)
TAIL_FUNC(1)
TAIL_FUNC(2)
SEC("tc")
int entry(struct __sk_buff *skb)
{
bpf_tail_call(skb, &jmp_table, selector);
return 3;
}
char __license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/tailcall4.c |
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2019 Facebook
#include <linux/bpf.h>
#include <stdint.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_core_read.h>
char _license[] SEC("license") = "GPL";
struct {
char in[256];
char out[256];
} data = {};
struct core_reloc_ints {
uint8_t u8_field;
int8_t s8_field;
uint16_t u16_field;
int16_t s16_field;
uint32_t u32_field;
int32_t s32_field;
uint64_t u64_field;
int64_t s64_field;
};
#define CORE_READ(dst, src) bpf_core_read(dst, sizeof(*(dst)), src)
SEC("raw_tracepoint/sys_enter")
int test_core_ints(void *ctx)
{
struct core_reloc_ints *in = (void *)&data.in;
struct core_reloc_ints *out = (void *)&data.out;
if (CORE_READ(&out->u8_field, &in->u8_field) ||
CORE_READ(&out->s8_field, &in->s8_field) ||
CORE_READ(&out->u16_field, &in->u16_field) ||
CORE_READ(&out->s16_field, &in->s16_field) ||
CORE_READ(&out->u32_field, &in->u32_field) ||
CORE_READ(&out->s32_field, &in->s32_field) ||
CORE_READ(&out->u64_field, &in->u64_field) ||
CORE_READ(&out->s64_field, &in->s64_field))
return 1;
return 0;
}
| linux-master | tools/testing/selftests/bpf/progs/test_core_reloc_ints.c |
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_endian.h>
struct {
__uint(type, BPF_MAP_TYPE_SOCKMAP);
__uint(max_entries, 20);
__type(key, int);
__type(value, int);
} sock_map_rx SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_SOCKMAP);
__uint(max_entries, 20);
__type(key, int);
__type(value, int);
} sock_map_tx SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_SOCKMAP);
__uint(max_entries, 20);
__type(key, int);
__type(value, int);
} sock_map_msg SEC(".maps");
SEC("sk_skb")
int prog_skb_verdict(struct __sk_buff *skb)
{
return SK_PASS;
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/test_sockmap_pass_prog.c |
// SPDX-License-Identifier: GPL-2.0
/* Converted from tools/testing/selftests/bpf/verifier/subreg.c */
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include "bpf_misc.h"
/* This file contains sub-register zero extension checks for insns defining
* sub-registers, meaning:
* - All insns under BPF_ALU class. Their BPF_ALU32 variants or narrow width
* forms (BPF_END) could define sub-registers.
* - Narrow direct loads, BPF_B/H/W | BPF_LDX.
* - BPF_LD is not exposed to JIT back-ends, so no need for testing.
*
* "get_prandom_u32" is used to initialize low 32-bit of some registers to
* prevent potential optimizations done by verifier or JIT back-ends which could
* optimize register back into constant when range info shows one register is a
* constant.
*/
SEC("socket")
__description("add32 reg zero extend check")
__success __success_unpriv __retval(0)
__naked void add32_reg_zero_extend_check(void)
{
asm volatile (" \
call %[bpf_get_prandom_u32]; \
r1 = r0; \
r0 = 0x100000000 ll; \
w0 += w1; \
r0 >>= 32; \
exit; \
" :
: __imm(bpf_get_prandom_u32)
: __clobber_all);
}
SEC("socket")
__description("add32 imm zero extend check")
__success __success_unpriv __retval(0)
__naked void add32_imm_zero_extend_check(void)
{
asm volatile (" \
call %[bpf_get_prandom_u32]; \
r1 = 0x1000000000 ll; \
r0 |= r1; \
/* An insn could have no effect on the low 32-bit, for example:\
* a = a + 0 \
* a = a | 0 \
* a = a & -1 \
* But, they should still zero high 32-bit. \
*/ \
w0 += 0; \
r0 >>= 32; \
r6 = r0; \
call %[bpf_get_prandom_u32]; \
r1 = 0x1000000000 ll; \
r0 |= r1; \
w0 += -2; \
r0 >>= 32; \
r0 |= r6; \
exit; \
" :
: __imm(bpf_get_prandom_u32)
: __clobber_all);
}
SEC("socket")
__description("sub32 reg zero extend check")
__success __success_unpriv __retval(0)
__naked void sub32_reg_zero_extend_check(void)
{
asm volatile (" \
call %[bpf_get_prandom_u32]; \
r1 = r0; \
r0 = 0x1ffffffff ll; \
w0 -= w1; \
r0 >>= 32; \
exit; \
" :
: __imm(bpf_get_prandom_u32)
: __clobber_all);
}
SEC("socket")
__description("sub32 imm zero extend check")
__success __success_unpriv __retval(0)
__naked void sub32_imm_zero_extend_check(void)
{
asm volatile (" \
call %[bpf_get_prandom_u32]; \
r1 = 0x1000000000 ll; \
r0 |= r1; \
w0 -= 0; \
r0 >>= 32; \
r6 = r0; \
call %[bpf_get_prandom_u32]; \
r1 = 0x1000000000 ll; \
r0 |= r1; \
w0 -= 1; \
r0 >>= 32; \
r0 |= r6; \
exit; \
" :
: __imm(bpf_get_prandom_u32)
: __clobber_all);
}
SEC("socket")
__description("mul32 reg zero extend check")
__success __success_unpriv __retval(0)
__naked void mul32_reg_zero_extend_check(void)
{
asm volatile (" \
call %[bpf_get_prandom_u32]; \
r1 = r0; \
r0 = 0x100000001 ll; \
w0 *= w1; \
r0 >>= 32; \
exit; \
" :
: __imm(bpf_get_prandom_u32)
: __clobber_all);
}
SEC("socket")
__description("mul32 imm zero extend check")
__success __success_unpriv __retval(0)
__naked void mul32_imm_zero_extend_check(void)
{
asm volatile (" \
call %[bpf_get_prandom_u32]; \
r1 = 0x1000000000 ll; \
r0 |= r1; \
w0 *= 1; \
r0 >>= 32; \
r6 = r0; \
call %[bpf_get_prandom_u32]; \
r1 = 0x1000000000 ll; \
r0 |= r1; \
w0 *= -1; \
r0 >>= 32; \
r0 |= r6; \
exit; \
" :
: __imm(bpf_get_prandom_u32)
: __clobber_all);
}
SEC("socket")
__description("div32 reg zero extend check")
__success __success_unpriv __retval(0)
__naked void div32_reg_zero_extend_check(void)
{
asm volatile (" \
call %[bpf_get_prandom_u32]; \
r1 = r0; \
r0 = -1; \
w0 /= w1; \
r0 >>= 32; \
exit; \
" :
: __imm(bpf_get_prandom_u32)
: __clobber_all);
}
SEC("socket")
__description("div32 imm zero extend check")
__success __success_unpriv __retval(0)
__naked void div32_imm_zero_extend_check(void)
{
asm volatile (" \
call %[bpf_get_prandom_u32]; \
r1 = 0x1000000000 ll; \
r0 |= r1; \
w0 /= 1; \
r0 >>= 32; \
r6 = r0; \
call %[bpf_get_prandom_u32]; \
r1 = 0x1000000000 ll; \
r0 |= r1; \
w0 /= 2; \
r0 >>= 32; \
r0 |= r6; \
exit; \
" :
: __imm(bpf_get_prandom_u32)
: __clobber_all);
}
SEC("socket")
__description("or32 reg zero extend check")
__success __success_unpriv __retval(0)
__naked void or32_reg_zero_extend_check(void)
{
asm volatile (" \
call %[bpf_get_prandom_u32]; \
r1 = r0; \
r0 = 0x100000001 ll; \
w0 |= w1; \
r0 >>= 32; \
exit; \
" :
: __imm(bpf_get_prandom_u32)
: __clobber_all);
}
SEC("socket")
__description("or32 imm zero extend check")
__success __success_unpriv __retval(0)
__naked void or32_imm_zero_extend_check(void)
{
asm volatile (" \
call %[bpf_get_prandom_u32]; \
r1 = 0x1000000000 ll; \
r0 |= r1; \
w0 |= 0; \
r0 >>= 32; \
r6 = r0; \
call %[bpf_get_prandom_u32]; \
r1 = 0x1000000000 ll; \
r0 |= r1; \
w0 |= 1; \
r0 >>= 32; \
r0 |= r6; \
exit; \
" :
: __imm(bpf_get_prandom_u32)
: __clobber_all);
}
SEC("socket")
__description("and32 reg zero extend check")
__success __success_unpriv __retval(0)
__naked void and32_reg_zero_extend_check(void)
{
asm volatile (" \
call %[bpf_get_prandom_u32]; \
r1 = 0x100000000 ll; \
r1 |= r0; \
r0 = 0x1ffffffff ll; \
w0 &= w1; \
r0 >>= 32; \
exit; \
" :
: __imm(bpf_get_prandom_u32)
: __clobber_all);
}
SEC("socket")
__description("and32 imm zero extend check")
__success __success_unpriv __retval(0)
__naked void and32_imm_zero_extend_check(void)
{
asm volatile (" \
call %[bpf_get_prandom_u32]; \
r1 = 0x1000000000 ll; \
r0 |= r1; \
w0 &= -1; \
r0 >>= 32; \
r6 = r0; \
call %[bpf_get_prandom_u32]; \
r1 = 0x1000000000 ll; \
r0 |= r1; \
w0 &= -2; \
r0 >>= 32; \
r0 |= r6; \
exit; \
" :
: __imm(bpf_get_prandom_u32)
: __clobber_all);
}
SEC("socket")
__description("lsh32 reg zero extend check")
__success __success_unpriv __retval(0)
__naked void lsh32_reg_zero_extend_check(void)
{
asm volatile (" \
call %[bpf_get_prandom_u32]; \
r1 = 0x100000000 ll; \
r0 |= r1; \
r1 = 1; \
w0 <<= w1; \
r0 >>= 32; \
exit; \
" :
: __imm(bpf_get_prandom_u32)
: __clobber_all);
}
SEC("socket")
__description("lsh32 imm zero extend check")
__success __success_unpriv __retval(0)
__naked void lsh32_imm_zero_extend_check(void)
{
asm volatile (" \
call %[bpf_get_prandom_u32]; \
r1 = 0x1000000000 ll; \
r0 |= r1; \
w0 <<= 0; \
r0 >>= 32; \
r6 = r0; \
call %[bpf_get_prandom_u32]; \
r1 = 0x1000000000 ll; \
r0 |= r1; \
w0 <<= 1; \
r0 >>= 32; \
r0 |= r6; \
exit; \
" :
: __imm(bpf_get_prandom_u32)
: __clobber_all);
}
SEC("socket")
__description("rsh32 reg zero extend check")
__success __success_unpriv __retval(0)
__naked void rsh32_reg_zero_extend_check(void)
{
asm volatile (" \
call %[bpf_get_prandom_u32]; \
r1 = 0x1000000000 ll; \
r0 |= r1; \
r1 = 1; \
w0 >>= w1; \
r0 >>= 32; \
exit; \
" :
: __imm(bpf_get_prandom_u32)
: __clobber_all);
}
SEC("socket")
__description("rsh32 imm zero extend check")
__success __success_unpriv __retval(0)
__naked void rsh32_imm_zero_extend_check(void)
{
asm volatile (" \
call %[bpf_get_prandom_u32]; \
r1 = 0x1000000000 ll; \
r0 |= r1; \
w0 >>= 0; \
r0 >>= 32; \
r6 = r0; \
call %[bpf_get_prandom_u32]; \
r1 = 0x1000000000 ll; \
r0 |= r1; \
w0 >>= 1; \
r0 >>= 32; \
r0 |= r6; \
exit; \
" :
: __imm(bpf_get_prandom_u32)
: __clobber_all);
}
SEC("socket")
__description("neg32 reg zero extend check")
__success __success_unpriv __retval(0)
__naked void neg32_reg_zero_extend_check(void)
{
asm volatile (" \
call %[bpf_get_prandom_u32]; \
r1 = 0x1000000000 ll; \
r0 |= r1; \
w0 = -w0; \
r0 >>= 32; \
exit; \
" :
: __imm(bpf_get_prandom_u32)
: __clobber_all);
}
SEC("socket")
__description("mod32 reg zero extend check")
__success __success_unpriv __retval(0)
__naked void mod32_reg_zero_extend_check(void)
{
asm volatile (" \
call %[bpf_get_prandom_u32]; \
r1 = r0; \
r0 = -1; \
w0 %%= w1; \
r0 >>= 32; \
exit; \
" :
: __imm(bpf_get_prandom_u32)
: __clobber_all);
}
SEC("socket")
__description("mod32 imm zero extend check")
__success __success_unpriv __retval(0)
__naked void mod32_imm_zero_extend_check(void)
{
asm volatile (" \
call %[bpf_get_prandom_u32]; \
r1 = 0x1000000000 ll; \
r0 |= r1; \
w0 %%= 1; \
r0 >>= 32; \
r6 = r0; \
call %[bpf_get_prandom_u32]; \
r1 = 0x1000000000 ll; \
r0 |= r1; \
w0 %%= 2; \
r0 >>= 32; \
r0 |= r6; \
exit; \
" :
: __imm(bpf_get_prandom_u32)
: __clobber_all);
}
SEC("socket")
__description("xor32 reg zero extend check")
__success __success_unpriv __retval(0)
__naked void xor32_reg_zero_extend_check(void)
{
asm volatile (" \
call %[bpf_get_prandom_u32]; \
r1 = r0; \
r0 = 0x100000000 ll; \
w0 ^= w1; \
r0 >>= 32; \
exit; \
" :
: __imm(bpf_get_prandom_u32)
: __clobber_all);
}
SEC("socket")
__description("xor32 imm zero extend check")
__success __success_unpriv __retval(0)
__naked void xor32_imm_zero_extend_check(void)
{
asm volatile (" \
call %[bpf_get_prandom_u32]; \
r1 = 0x1000000000 ll; \
r0 |= r1; \
w0 ^= 1; \
r0 >>= 32; \
exit; \
" :
: __imm(bpf_get_prandom_u32)
: __clobber_all);
}
SEC("socket")
__description("mov32 reg zero extend check")
__success __success_unpriv __retval(0)
__naked void mov32_reg_zero_extend_check(void)
{
asm volatile (" \
call %[bpf_get_prandom_u32]; \
r1 = 0x100000000 ll; \
r1 |= r0; \
r0 = 0x100000000 ll; \
w0 = w1; \
r0 >>= 32; \
exit; \
" :
: __imm(bpf_get_prandom_u32)
: __clobber_all);
}
SEC("socket")
__description("mov32 imm zero extend check")
__success __success_unpriv __retval(0)
__naked void mov32_imm_zero_extend_check(void)
{
asm volatile (" \
call %[bpf_get_prandom_u32]; \
r1 = 0x1000000000 ll; \
r0 |= r1; \
w0 = 0; \
r0 >>= 32; \
r6 = r0; \
call %[bpf_get_prandom_u32]; \
r1 = 0x1000000000 ll; \
r0 |= r1; \
w0 = 1; \
r0 >>= 32; \
r0 |= r6; \
exit; \
" :
: __imm(bpf_get_prandom_u32)
: __clobber_all);
}
SEC("socket")
__description("arsh32 reg zero extend check")
__success __success_unpriv __retval(0)
__naked void arsh32_reg_zero_extend_check(void)
{
asm volatile (" \
call %[bpf_get_prandom_u32]; \
r1 = 0x1000000000 ll; \
r0 |= r1; \
r1 = 1; \
w0 s>>= w1; \
r0 >>= 32; \
exit; \
" :
: __imm(bpf_get_prandom_u32)
: __clobber_all);
}
SEC("socket")
__description("arsh32 imm zero extend check")
__success __success_unpriv __retval(0)
__naked void arsh32_imm_zero_extend_check(void)
{
asm volatile (" \
call %[bpf_get_prandom_u32]; \
r1 = 0x1000000000 ll; \
r0 |= r1; \
w0 s>>= 0; \
r0 >>= 32; \
r6 = r0; \
call %[bpf_get_prandom_u32]; \
r1 = 0x1000000000 ll; \
r0 |= r1; \
w0 s>>= 1; \
r0 >>= 32; \
r0 |= r6; \
exit; \
" :
: __imm(bpf_get_prandom_u32)
: __clobber_all);
}
SEC("socket")
__description("end16 (to_le) reg zero extend check")
__success __success_unpriv __retval(0)
__naked void le_reg_zero_extend_check_1(void)
{
asm volatile (" \
call %[bpf_get_prandom_u32]; \
r6 = r0; \
r6 <<= 32; \
call %[bpf_get_prandom_u32]; \
r0 |= r6; \
r0 = le16 r0; \
r0 >>= 32; \
exit; \
" :
: __imm(bpf_get_prandom_u32)
: __clobber_all);
}
SEC("socket")
__description("end32 (to_le) reg zero extend check")
__success __success_unpriv __retval(0)
__naked void le_reg_zero_extend_check_2(void)
{
asm volatile (" \
call %[bpf_get_prandom_u32]; \
r6 = r0; \
r6 <<= 32; \
call %[bpf_get_prandom_u32]; \
r0 |= r6; \
r0 = le32 r0; \
r0 >>= 32; \
exit; \
" :
: __imm(bpf_get_prandom_u32)
: __clobber_all);
}
SEC("socket")
__description("end16 (to_be) reg zero extend check")
__success __success_unpriv __retval(0)
__naked void be_reg_zero_extend_check_1(void)
{
asm volatile (" \
call %[bpf_get_prandom_u32]; \
r6 = r0; \
r6 <<= 32; \
call %[bpf_get_prandom_u32]; \
r0 |= r6; \
r0 = be16 r0; \
r0 >>= 32; \
exit; \
" :
: __imm(bpf_get_prandom_u32)
: __clobber_all);
}
SEC("socket")
__description("end32 (to_be) reg zero extend check")
__success __success_unpriv __retval(0)
__naked void be_reg_zero_extend_check_2(void)
{
asm volatile (" \
call %[bpf_get_prandom_u32]; \
r6 = r0; \
r6 <<= 32; \
call %[bpf_get_prandom_u32]; \
r0 |= r6; \
r0 = be32 r0; \
r0 >>= 32; \
exit; \
" :
: __imm(bpf_get_prandom_u32)
: __clobber_all);
}
SEC("socket")
__description("ldx_b zero extend check")
__success __success_unpriv __retval(0)
__naked void ldx_b_zero_extend_check(void)
{
asm volatile (" \
r6 = r10; \
r6 += -4; \
r7 = 0xfaceb00c; \
*(u32*)(r6 + 0) = r7; \
call %[bpf_get_prandom_u32]; \
r1 = 0x1000000000 ll; \
r0 |= r1; \
r0 = *(u8*)(r6 + 0); \
r0 >>= 32; \
exit; \
" :
: __imm(bpf_get_prandom_u32)
: __clobber_all);
}
SEC("socket")
__description("ldx_h zero extend check")
__success __success_unpriv __retval(0)
__naked void ldx_h_zero_extend_check(void)
{
asm volatile (" \
r6 = r10; \
r6 += -4; \
r7 = 0xfaceb00c; \
*(u32*)(r6 + 0) = r7; \
call %[bpf_get_prandom_u32]; \
r1 = 0x1000000000 ll; \
r0 |= r1; \
r0 = *(u16*)(r6 + 0); \
r0 >>= 32; \
exit; \
" :
: __imm(bpf_get_prandom_u32)
: __clobber_all);
}
SEC("socket")
__description("ldx_w zero extend check")
__success __success_unpriv __retval(0)
__naked void ldx_w_zero_extend_check(void)
{
asm volatile (" \
r6 = r10; \
r6 += -4; \
r7 = 0xfaceb00c; \
*(u32*)(r6 + 0) = r7; \
call %[bpf_get_prandom_u32]; \
r1 = 0x1000000000 ll; \
r0 |= r1; \
r0 = *(u32*)(r6 + 0); \
r0 >>= 32; \
exit; \
" :
: __imm(bpf_get_prandom_u32)
: __clobber_all);
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/verifier_subreg.c |
// SPDX-License-Identifier: GPL-2.0
/* Converted from tools/testing/selftests/bpf/verifier/btf_ctx_access.c */
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include "bpf_misc.h"
SEC("fentry/bpf_modify_return_test")
__description("btf_ctx_access accept")
__success __retval(0)
__naked void btf_ctx_access_accept(void)
{
asm volatile (" \
r2 = *(u32*)(r1 + 8); /* load 2nd argument value (int pointer) */\
r0 = 0; \
exit; \
" ::: __clobber_all);
}
SEC("fentry/bpf_fentry_test9")
__description("btf_ctx_access u32 pointer accept")
__success __retval(0)
__naked void ctx_access_u32_pointer_accept(void)
{
asm volatile (" \
r2 = *(u32*)(r1 + 0); /* load 1nd argument value (u32 pointer) */\
r0 = 0; \
exit; \
" ::: __clobber_all);
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/verifier_btf_ctx_access.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2021 Facebook */
#include "../bpf_testmod/bpf_testmod_kfunc.h"
extern const int bpf_prog_active __ksym;
int active_res = -1;
int sk_state_res = -1;
int __noinline f1(struct __sk_buff *skb)
{
struct bpf_sock *sk = skb->sk;
int *active;
if (!sk)
return -1;
sk = bpf_sk_fullsock(sk);
if (!sk)
return -1;
active = (int *)bpf_per_cpu_ptr(&bpf_prog_active,
bpf_get_smp_processor_id());
if (active)
active_res = *active;
sk_state_res = bpf_kfunc_call_test3((struct sock *)sk)->__sk_common.skc_state;
return (__u32)bpf_kfunc_call_test1((struct sock *)sk, 1, 2, 3, 4);
}
SEC("tc")
int kfunc_call_test1(struct __sk_buff *skb)
{
return f1(skb);
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/kfunc_call_test_subprog.c |
// SPDX-License-Identifier: GPL-2.0
/* Converted from tools/testing/selftests/bpf/verifier/ctx.c */
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include "bpf_misc.h"
SEC("tc")
__description("context stores via BPF_ATOMIC")
__failure __msg("BPF_ATOMIC stores into R1 ctx is not allowed")
__naked void context_stores_via_bpf_atomic(void)
{
asm volatile (" \
r0 = 0; \
lock *(u32 *)(r1 + %[__sk_buff_mark]) += w0; \
exit; \
" :
: __imm_const(__sk_buff_mark, offsetof(struct __sk_buff, mark))
: __clobber_all);
}
SEC("tc")
__description("arithmetic ops make PTR_TO_CTX unusable")
__failure __msg("dereference of modified ctx ptr")
__naked void make_ptr_to_ctx_unusable(void)
{
asm volatile (" \
r1 += %[__imm_0]; \
r0 = *(u32*)(r1 + %[__sk_buff_mark]); \
exit; \
" :
: __imm_const(__imm_0,
offsetof(struct __sk_buff, data) - offsetof(struct __sk_buff, mark)),
__imm_const(__sk_buff_mark, offsetof(struct __sk_buff, mark))
: __clobber_all);
}
SEC("tc")
__description("pass unmodified ctx pointer to helper")
__success __retval(0)
__naked void unmodified_ctx_pointer_to_helper(void)
{
asm volatile (" \
r2 = 0; \
call %[bpf_csum_update]; \
r0 = 0; \
exit; \
" :
: __imm(bpf_csum_update)
: __clobber_all);
}
SEC("tc")
__description("pass modified ctx pointer to helper, 1")
__failure __msg("negative offset ctx ptr R1 off=-612 disallowed")
__naked void ctx_pointer_to_helper_1(void)
{
asm volatile (" \
r1 += -612; \
r2 = 0; \
call %[bpf_csum_update]; \
r0 = 0; \
exit; \
" :
: __imm(bpf_csum_update)
: __clobber_all);
}
SEC("socket")
__description("pass modified ctx pointer to helper, 2")
__failure __msg("negative offset ctx ptr R1 off=-612 disallowed")
__failure_unpriv __msg_unpriv("negative offset ctx ptr R1 off=-612 disallowed")
__naked void ctx_pointer_to_helper_2(void)
{
asm volatile (" \
r1 += -612; \
call %[bpf_get_socket_cookie]; \
r0 = 0; \
exit; \
" :
: __imm(bpf_get_socket_cookie)
: __clobber_all);
}
SEC("tc")
__description("pass modified ctx pointer to helper, 3")
__failure __msg("variable ctx access var_off=(0x0; 0x4)")
__naked void ctx_pointer_to_helper_3(void)
{
asm volatile (" \
r3 = *(u32*)(r1 + 0); \
r3 &= 4; \
r1 += r3; \
r2 = 0; \
call %[bpf_csum_update]; \
r0 = 0; \
exit; \
" :
: __imm(bpf_csum_update)
: __clobber_all);
}
SEC("cgroup/sendmsg6")
__description("pass ctx or null check, 1: ctx")
__success
__naked void or_null_check_1_ctx(void)
{
asm volatile (" \
call %[bpf_get_netns_cookie]; \
r0 = 0; \
exit; \
" :
: __imm(bpf_get_netns_cookie)
: __clobber_all);
}
SEC("cgroup/sendmsg6")
__description("pass ctx or null check, 2: null")
__success
__naked void or_null_check_2_null(void)
{
asm volatile (" \
r1 = 0; \
call %[bpf_get_netns_cookie]; \
r0 = 0; \
exit; \
" :
: __imm(bpf_get_netns_cookie)
: __clobber_all);
}
SEC("cgroup/sendmsg6")
__description("pass ctx or null check, 3: 1")
__failure __msg("R1 type=scalar expected=ctx")
__naked void or_null_check_3_1(void)
{
asm volatile (" \
r1 = 1; \
call %[bpf_get_netns_cookie]; \
r0 = 0; \
exit; \
" :
: __imm(bpf_get_netns_cookie)
: __clobber_all);
}
SEC("cgroup/sendmsg6")
__description("pass ctx or null check, 4: ctx - const")
__failure __msg("negative offset ctx ptr R1 off=-612 disallowed")
__naked void null_check_4_ctx_const(void)
{
asm volatile (" \
r1 += -612; \
call %[bpf_get_netns_cookie]; \
r0 = 0; \
exit; \
" :
: __imm(bpf_get_netns_cookie)
: __clobber_all);
}
SEC("cgroup/connect4")
__description("pass ctx or null check, 5: null (connect)")
__success
__naked void null_check_5_null_connect(void)
{
asm volatile (" \
r1 = 0; \
call %[bpf_get_netns_cookie]; \
r0 = 0; \
exit; \
" :
: __imm(bpf_get_netns_cookie)
: __clobber_all);
}
SEC("cgroup/post_bind4")
__description("pass ctx or null check, 6: null (bind)")
__success
__naked void null_check_6_null_bind(void)
{
asm volatile (" \
r1 = 0; \
call %[bpf_get_netns_cookie]; \
r0 = 0; \
exit; \
" :
: __imm(bpf_get_netns_cookie)
: __clobber_all);
}
SEC("cgroup/post_bind4")
__description("pass ctx or null check, 7: ctx (bind)")
__success
__naked void null_check_7_ctx_bind(void)
{
asm volatile (" \
call %[bpf_get_socket_cookie]; \
r0 = 0; \
exit; \
" :
: __imm(bpf_get_socket_cookie)
: __clobber_all);
}
SEC("cgroup/post_bind4")
__description("pass ctx or null check, 8: null (bind)")
__failure __msg("R1 type=scalar expected=ctx")
__naked void null_check_8_null_bind(void)
{
asm volatile (" \
r1 = 0; \
call %[bpf_get_socket_cookie]; \
r0 = 0; \
exit; \
" :
: __imm(bpf_get_socket_cookie)
: __clobber_all);
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/verifier_ctx.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include "bpf_legacy.h"
#include "bpf_misc.h"
struct {
__uint(type, BPF_MAP_TYPE_PROG_ARRAY);
__uint(max_entries, 2);
__uint(key_size, sizeof(__u32));
__uint(value_size, sizeof(__u32));
} jmp_table SEC(".maps");
__noinline
int subprog_tail2(struct __sk_buff *skb)
{
volatile char arr[64] = {};
if (load_word(skb, 0) || load_half(skb, 0))
bpf_tail_call_static(skb, &jmp_table, 10);
else
bpf_tail_call_static(skb, &jmp_table, 1);
__sink(arr[sizeof(arr) - 1]);
return skb->len;
}
static __noinline
int subprog_tail(struct __sk_buff *skb)
{
volatile char arr[64] = {};
bpf_tail_call_static(skb, &jmp_table, 0);
__sink(arr[sizeof(arr) - 1]);
return skb->len * 2;
}
SEC("tc")
int classifier_0(struct __sk_buff *skb)
{
volatile char arr[128] = {};
__sink(arr[sizeof(arr) - 1]);
return subprog_tail2(skb);
}
SEC("tc")
int classifier_1(struct __sk_buff *skb)
{
volatile char arr[128] = {};
__sink(arr[sizeof(arr) - 1]);
return skb->len * 3;
}
SEC("tc")
int entry(struct __sk_buff *skb)
{
volatile char arr[128] = {};
__sink(arr[sizeof(arr) - 1]);
return subprog_tail(skb);
}
char __license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/tailcall_bpf2bpf3.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020 Facebook */
#include "bpf_iter.h"
#include "bpf_tracing_net.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
char _license[] SEC("license") = "GPL";
struct {
__uint(type, BPF_MAP_TYPE_SK_STORAGE);
__uint(map_flags, BPF_F_NO_PREALLOC);
__type(key, int);
__type(value, int);
} sk_stg_map SEC(".maps");
__u32 val_sum = 0;
__u32 ipv6_sk_count = 0;
__u32 to_add_val = 0;
SEC("iter/bpf_sk_storage_map")
int rw_bpf_sk_storage_map(struct bpf_iter__bpf_sk_storage_map *ctx)
{
struct sock *sk = ctx->sk;
__u32 *val = ctx->value;
if (sk == NULL || val == NULL)
return 0;
if (sk->sk_family == AF_INET6)
ipv6_sk_count++;
val_sum += *val;
*val += to_add_val;
return 0;
}
SEC("iter/bpf_sk_storage_map")
int oob_write_bpf_sk_storage_map(struct bpf_iter__bpf_sk_storage_map *ctx)
{
struct sock *sk = ctx->sk;
__u32 *val = ctx->value;
if (sk == NULL || val == NULL)
return 0;
*(val + 1) = 0xdeadbeef;
return 0;
}
| linux-master | tools/testing/selftests/bpf/progs/bpf_iter_bpf_sk_storage_map.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2022 Google */
#include "bpf_iter.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
char _license[] SEC("license") = "GPL";
int terminate_early = 0;
u64 terminal_cgroup = 0;
static inline u64 cgroup_id(struct cgroup *cgrp)
{
return cgrp->kn->id;
}
SEC("iter/cgroup")
int cgroup_id_printer(struct bpf_iter__cgroup *ctx)
{
struct seq_file *seq = ctx->meta->seq;
struct cgroup *cgrp = ctx->cgroup;
/* epilogue */
if (cgrp == NULL) {
BPF_SEQ_PRINTF(seq, "epilogue\n");
return 0;
}
/* prologue */
if (ctx->meta->seq_num == 0)
BPF_SEQ_PRINTF(seq, "prologue\n");
BPF_SEQ_PRINTF(seq, "%8llu\n", cgroup_id(cgrp));
if (terminal_cgroup == cgroup_id(cgrp))
return 1;
return terminate_early ? 1 : 0;
}
| linux-master | tools/testing/selftests/bpf/progs/cgroup_iter.c |
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2018 Facebook
#include <string.h>
#include <linux/stddef.h>
#include <linux/bpf.h>
#include <linux/in.h>
#include <linux/in6.h>
#include <linux/tcp.h>
#include <linux/if.h>
#include <errno.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_endian.h>
#include "bpf_tcp_helpers.h"
#define SRC_REWRITE_IP4 0x7f000004U
#define DST_REWRITE_IP4 0x7f000001U
#define DST_REWRITE_PORT4 4444
#ifndef TCP_CA_NAME_MAX
#define TCP_CA_NAME_MAX 16
#endif
#ifndef TCP_NOTSENT_LOWAT
#define TCP_NOTSENT_LOWAT 25
#endif
#ifndef IFNAMSIZ
#define IFNAMSIZ 16
#endif
__attribute__ ((noinline)) __weak
int do_bind(struct bpf_sock_addr *ctx)
{
struct sockaddr_in sa = {};
sa.sin_family = AF_INET;
sa.sin_port = bpf_htons(0);
sa.sin_addr.s_addr = bpf_htonl(SRC_REWRITE_IP4);
if (bpf_bind(ctx, (struct sockaddr *)&sa, sizeof(sa)) != 0)
return 0;
return 1;
}
static __inline int verify_cc(struct bpf_sock_addr *ctx,
char expected[TCP_CA_NAME_MAX])
{
char buf[TCP_CA_NAME_MAX];
int i;
if (bpf_getsockopt(ctx, SOL_TCP, TCP_CONGESTION, &buf, sizeof(buf)))
return 1;
for (i = 0; i < TCP_CA_NAME_MAX; i++) {
if (buf[i] != expected[i])
return 1;
if (buf[i] == 0)
break;
}
return 0;
}
static __inline int set_cc(struct bpf_sock_addr *ctx)
{
char reno[TCP_CA_NAME_MAX] = "reno";
char cubic[TCP_CA_NAME_MAX] = "cubic";
if (bpf_setsockopt(ctx, SOL_TCP, TCP_CONGESTION, &reno, sizeof(reno)))
return 1;
if (verify_cc(ctx, reno))
return 1;
if (bpf_setsockopt(ctx, SOL_TCP, TCP_CONGESTION, &cubic, sizeof(cubic)))
return 1;
if (verify_cc(ctx, cubic))
return 1;
return 0;
}
static __inline int bind_to_device(struct bpf_sock_addr *ctx)
{
char veth1[IFNAMSIZ] = "test_sock_addr1";
char veth2[IFNAMSIZ] = "test_sock_addr2";
char missing[IFNAMSIZ] = "nonexistent_dev";
char del_bind[IFNAMSIZ] = "";
if (bpf_setsockopt(ctx, SOL_SOCKET, SO_BINDTODEVICE,
&veth1, sizeof(veth1)))
return 1;
if (bpf_setsockopt(ctx, SOL_SOCKET, SO_BINDTODEVICE,
&veth2, sizeof(veth2)))
return 1;
if (bpf_setsockopt(ctx, SOL_SOCKET, SO_BINDTODEVICE,
&missing, sizeof(missing)) != -ENODEV)
return 1;
if (bpf_setsockopt(ctx, SOL_SOCKET, SO_BINDTODEVICE,
&del_bind, sizeof(del_bind)))
return 1;
return 0;
}
static __inline int set_keepalive(struct bpf_sock_addr *ctx)
{
int zero = 0, one = 1;
if (bpf_setsockopt(ctx, SOL_SOCKET, SO_KEEPALIVE, &one, sizeof(one)))
return 1;
if (ctx->type == SOCK_STREAM) {
if (bpf_setsockopt(ctx, SOL_TCP, TCP_KEEPIDLE, &one, sizeof(one)))
return 1;
if (bpf_setsockopt(ctx, SOL_TCP, TCP_KEEPINTVL, &one, sizeof(one)))
return 1;
if (bpf_setsockopt(ctx, SOL_TCP, TCP_KEEPCNT, &one, sizeof(one)))
return 1;
if (bpf_setsockopt(ctx, SOL_TCP, TCP_SYNCNT, &one, sizeof(one)))
return 1;
if (bpf_setsockopt(ctx, SOL_TCP, TCP_USER_TIMEOUT, &one, sizeof(one)))
return 1;
}
if (bpf_setsockopt(ctx, SOL_SOCKET, SO_KEEPALIVE, &zero, sizeof(zero)))
return 1;
return 0;
}
static __inline int set_notsent_lowat(struct bpf_sock_addr *ctx)
{
int lowat = 65535;
if (ctx->type == SOCK_STREAM) {
if (bpf_setsockopt(ctx, SOL_TCP, TCP_NOTSENT_LOWAT, &lowat, sizeof(lowat)))
return 1;
}
return 0;
}
SEC("cgroup/connect4")
int connect_v4_prog(struct bpf_sock_addr *ctx)
{
struct bpf_sock_tuple tuple = {};
struct bpf_sock *sk;
/* Verify that new destination is available. */
memset(&tuple.ipv4.saddr, 0, sizeof(tuple.ipv4.saddr));
memset(&tuple.ipv4.sport, 0, sizeof(tuple.ipv4.sport));
tuple.ipv4.daddr = bpf_htonl(DST_REWRITE_IP4);
tuple.ipv4.dport = bpf_htons(DST_REWRITE_PORT4);
/* Bind to device and unbind it. */
if (bind_to_device(ctx))
return 0;
if (set_keepalive(ctx))
return 0;
if (set_notsent_lowat(ctx))
return 0;
if (ctx->type != SOCK_STREAM && ctx->type != SOCK_DGRAM)
return 0;
else if (ctx->type == SOCK_STREAM)
sk = bpf_sk_lookup_tcp(ctx, &tuple, sizeof(tuple.ipv4),
BPF_F_CURRENT_NETNS, 0);
else
sk = bpf_sk_lookup_udp(ctx, &tuple, sizeof(tuple.ipv4),
BPF_F_CURRENT_NETNS, 0);
if (!sk)
return 0;
if (sk->src_ip4 != tuple.ipv4.daddr ||
sk->src_port != DST_REWRITE_PORT4) {
bpf_sk_release(sk);
return 0;
}
bpf_sk_release(sk);
/* Rewrite congestion control. */
if (ctx->type == SOCK_STREAM && set_cc(ctx))
return 0;
/* Rewrite destination. */
ctx->user_ip4 = bpf_htonl(DST_REWRITE_IP4);
ctx->user_port = bpf_htons(DST_REWRITE_PORT4);
return do_bind(ctx) ? 1 : 0;
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/connect4_prog.c |
// SPDX-License-Identifier: GPL-2.0
#include <vmlinux.h>
#include <bpf/bpf_helpers.h>
extern int bpf_testmod_ksym_percpu __ksym;
SEC("tc")
int ksym_fail(struct __sk_buff *ctx)
{
return *(int *)bpf_this_cpu_ptr(&bpf_testmod_ksym_percpu);
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/ksym_race.c |
#include "core_reloc_types.h"
void f(struct core_reloc_ints x) {}
| linux-master | tools/testing/selftests/bpf/progs/btf__core_reloc_ints.c |
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2018 Covalent IO, Inc. http://covalent.io
#define SOCKMAP
#define TEST_MAP_TYPE BPF_MAP_TYPE_SOCKMAP
#include "./test_sockmap_kern.h"
| linux-master | tools/testing/selftests/bpf/progs/test_sockmap_kern.c |
// SPDX-License-Identifier: GPL-2.0
/* Converted from tools/testing/selftests/bpf/verifier/regalloc.c */
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include "bpf_misc.h"
#define MAX_ENTRIES 11
struct test_val {
unsigned int index;
int foo[MAX_ENTRIES];
};
struct {
__uint(type, BPF_MAP_TYPE_HASH);
__uint(max_entries, 1);
__type(key, long long);
__type(value, struct test_val);
} map_hash_48b SEC(".maps");
SEC("tracepoint")
__description("regalloc basic")
__success __flag(BPF_F_ANY_ALIGNMENT)
__naked void regalloc_basic(void)
{
asm volatile (" \
r6 = r1; \
r1 = 0; \
*(u64*)(r10 - 8) = r1; \
r2 = r10; \
r2 += -8; \
r1 = %[map_hash_48b] ll; \
call %[bpf_map_lookup_elem]; \
if r0 == 0 goto l0_%=; \
r7 = r0; \
call %[bpf_get_prandom_u32]; \
r2 = r0; \
if r0 s> 20 goto l0_%=; \
if r2 s< 0 goto l0_%=; \
r7 += r0; \
r7 += r2; \
r0 = *(u64*)(r7 + 0); \
l0_%=: exit; \
" :
: __imm(bpf_get_prandom_u32),
__imm(bpf_map_lookup_elem),
__imm_addr(map_hash_48b)
: __clobber_all);
}
SEC("tracepoint")
__description("regalloc negative")
__failure __msg("invalid access to map value, value_size=48 off=48 size=1")
__naked void regalloc_negative(void)
{
asm volatile (" \
r6 = r1; \
r1 = 0; \
*(u64*)(r10 - 8) = r1; \
r2 = r10; \
r2 += -8; \
r1 = %[map_hash_48b] ll; \
call %[bpf_map_lookup_elem]; \
if r0 == 0 goto l0_%=; \
r7 = r0; \
call %[bpf_get_prandom_u32]; \
r2 = r0; \
if r0 s> 24 goto l0_%=; \
if r2 s< 0 goto l0_%=; \
r7 += r0; \
r7 += r2; \
r0 = *(u8*)(r7 + 0); \
l0_%=: exit; \
" :
: __imm(bpf_get_prandom_u32),
__imm(bpf_map_lookup_elem),
__imm_addr(map_hash_48b)
: __clobber_all);
}
SEC("tracepoint")
__description("regalloc src_reg mark")
__success __flag(BPF_F_ANY_ALIGNMENT)
__naked void regalloc_src_reg_mark(void)
{
asm volatile (" \
r6 = r1; \
r1 = 0; \
*(u64*)(r10 - 8) = r1; \
r2 = r10; \
r2 += -8; \
r1 = %[map_hash_48b] ll; \
call %[bpf_map_lookup_elem]; \
if r0 == 0 goto l0_%=; \
r7 = r0; \
call %[bpf_get_prandom_u32]; \
r2 = r0; \
if r0 s> 20 goto l0_%=; \
r3 = 0; \
if r3 s>= r2 goto l0_%=; \
r7 += r0; \
r7 += r2; \
r0 = *(u64*)(r7 + 0); \
l0_%=: exit; \
" :
: __imm(bpf_get_prandom_u32),
__imm(bpf_map_lookup_elem),
__imm_addr(map_hash_48b)
: __clobber_all);
}
SEC("tracepoint")
__description("regalloc src_reg negative")
__failure __msg("invalid access to map value, value_size=48 off=44 size=8")
__flag(BPF_F_ANY_ALIGNMENT)
__naked void regalloc_src_reg_negative(void)
{
asm volatile (" \
r6 = r1; \
r1 = 0; \
*(u64*)(r10 - 8) = r1; \
r2 = r10; \
r2 += -8; \
r1 = %[map_hash_48b] ll; \
call %[bpf_map_lookup_elem]; \
if r0 == 0 goto l0_%=; \
r7 = r0; \
call %[bpf_get_prandom_u32]; \
r2 = r0; \
if r0 s> 22 goto l0_%=; \
r3 = 0; \
if r3 s>= r2 goto l0_%=; \
r7 += r0; \
r7 += r2; \
r0 = *(u64*)(r7 + 0); \
l0_%=: exit; \
" :
: __imm(bpf_get_prandom_u32),
__imm(bpf_map_lookup_elem),
__imm_addr(map_hash_48b)
: __clobber_all);
}
SEC("tracepoint")
__description("regalloc and spill")
__success __flag(BPF_F_ANY_ALIGNMENT)
__naked void regalloc_and_spill(void)
{
asm volatile (" \
r6 = r1; \
r1 = 0; \
*(u64*)(r10 - 8) = r1; \
r2 = r10; \
r2 += -8; \
r1 = %[map_hash_48b] ll; \
call %[bpf_map_lookup_elem]; \
if r0 == 0 goto l0_%=; \
r7 = r0; \
call %[bpf_get_prandom_u32]; \
r2 = r0; \
if r0 s> 20 goto l0_%=; \
/* r0 has upper bound that should propagate into r2 */\
*(u64*)(r10 - 8) = r2; /* spill r2 */ \
r0 = 0; \
r2 = 0; /* clear r0 and r2 */\
r3 = *(u64*)(r10 - 8); /* fill r3 */ \
if r0 s>= r3 goto l0_%=; \
/* r3 has lower and upper bounds */ \
r7 += r3; \
r0 = *(u64*)(r7 + 0); \
l0_%=: exit; \
" :
: __imm(bpf_get_prandom_u32),
__imm(bpf_map_lookup_elem),
__imm_addr(map_hash_48b)
: __clobber_all);
}
SEC("tracepoint")
__description("regalloc and spill negative")
__failure __msg("invalid access to map value, value_size=48 off=48 size=8")
__flag(BPF_F_ANY_ALIGNMENT)
__naked void regalloc_and_spill_negative(void)
{
asm volatile (" \
r6 = r1; \
r1 = 0; \
*(u64*)(r10 - 8) = r1; \
r2 = r10; \
r2 += -8; \
r1 = %[map_hash_48b] ll; \
call %[bpf_map_lookup_elem]; \
if r0 == 0 goto l0_%=; \
r7 = r0; \
call %[bpf_get_prandom_u32]; \
r2 = r0; \
if r0 s> 48 goto l0_%=; \
/* r0 has upper bound that should propagate into r2 */\
*(u64*)(r10 - 8) = r2; /* spill r2 */ \
r0 = 0; \
r2 = 0; /* clear r0 and r2 */\
r3 = *(u64*)(r10 - 8); /* fill r3 */\
if r0 s>= r3 goto l0_%=; \
/* r3 has lower and upper bounds */ \
r7 += r3; \
r0 = *(u64*)(r7 + 0); \
l0_%=: exit; \
" :
: __imm(bpf_get_prandom_u32),
__imm(bpf_map_lookup_elem),
__imm_addr(map_hash_48b)
: __clobber_all);
}
SEC("tracepoint")
__description("regalloc three regs")
__success __flag(BPF_F_ANY_ALIGNMENT)
__naked void regalloc_three_regs(void)
{
asm volatile (" \
r6 = r1; \
r1 = 0; \
*(u64*)(r10 - 8) = r1; \
r2 = r10; \
r2 += -8; \
r1 = %[map_hash_48b] ll; \
call %[bpf_map_lookup_elem]; \
if r0 == 0 goto l0_%=; \
r7 = r0; \
call %[bpf_get_prandom_u32]; \
r2 = r0; \
r4 = r2; \
if r0 s> 12 goto l0_%=; \
if r2 s< 0 goto l0_%=; \
r7 += r0; \
r7 += r2; \
r7 += r4; \
r0 = *(u64*)(r7 + 0); \
l0_%=: exit; \
" :
: __imm(bpf_get_prandom_u32),
__imm(bpf_map_lookup_elem),
__imm_addr(map_hash_48b)
: __clobber_all);
}
SEC("tracepoint")
__description("regalloc after call")
__success __flag(BPF_F_ANY_ALIGNMENT)
__naked void regalloc_after_call(void)
{
asm volatile (" \
r6 = r1; \
r1 = 0; \
*(u64*)(r10 - 8) = r1; \
r2 = r10; \
r2 += -8; \
r1 = %[map_hash_48b] ll; \
call %[bpf_map_lookup_elem]; \
if r0 == 0 goto l0_%=; \
r7 = r0; \
call %[bpf_get_prandom_u32]; \
r8 = r0; \
r9 = r0; \
call regalloc_after_call__1; \
if r8 s> 20 goto l0_%=; \
if r9 s< 0 goto l0_%=; \
r7 += r8; \
r7 += r9; \
r0 = *(u64*)(r7 + 0); \
l0_%=: exit; \
" :
: __imm(bpf_get_prandom_u32),
__imm(bpf_map_lookup_elem),
__imm_addr(map_hash_48b)
: __clobber_all);
}
static __naked __noinline __attribute__((used))
void regalloc_after_call__1(void)
{
asm volatile (" \
r0 = 0; \
exit; \
" ::: __clobber_all);
}
SEC("tracepoint")
__description("regalloc in callee")
__success __flag(BPF_F_ANY_ALIGNMENT)
__naked void regalloc_in_callee(void)
{
asm volatile (" \
r6 = r1; \
r1 = 0; \
*(u64*)(r10 - 8) = r1; \
r2 = r10; \
r2 += -8; \
r1 = %[map_hash_48b] ll; \
call %[bpf_map_lookup_elem]; \
if r0 == 0 goto l0_%=; \
r7 = r0; \
call %[bpf_get_prandom_u32]; \
r1 = r0; \
r2 = r0; \
r3 = r7; \
call regalloc_in_callee__1; \
l0_%=: exit; \
" :
: __imm(bpf_get_prandom_u32),
__imm(bpf_map_lookup_elem),
__imm_addr(map_hash_48b)
: __clobber_all);
}
static __naked __noinline __attribute__((used))
void regalloc_in_callee__1(void)
{
asm volatile (" \
if r1 s> 20 goto l0_%=; \
if r2 s< 0 goto l0_%=; \
r3 += r1; \
r3 += r2; \
r0 = *(u64*)(r3 + 0); \
exit; \
l0_%=: r0 = 0; \
exit; \
" ::: __clobber_all);
}
SEC("tracepoint")
__description("regalloc, spill, JEQ")
__success
__naked void regalloc_spill_jeq(void)
{
asm volatile (" \
r6 = r1; \
r1 = 0; \
*(u64*)(r10 - 8) = r1; \
r2 = r10; \
r2 += -8; \
r1 = %[map_hash_48b] ll; \
call %[bpf_map_lookup_elem]; \
*(u64*)(r10 - 8) = r0; /* spill r0 */ \
if r0 == 0 goto l0_%=; \
l0_%=: /* The verifier will walk the rest twice with r0 == 0 and r0 == map_value */\
call %[bpf_get_prandom_u32]; \
r2 = r0; \
if r2 == 20 goto l1_%=; \
l1_%=: /* The verifier will walk the rest two more times with r0 == 20 and r0 == unknown */\
r3 = *(u64*)(r10 - 8); /* fill r3 with map_value */\
if r3 == 0 goto l2_%=; /* skip ldx if map_value == NULL */\
/* Buggy verifier will think that r3 == 20 here */\
r0 = *(u64*)(r3 + 0); /* read from map_value */\
l2_%=: exit; \
" :
: __imm(bpf_get_prandom_u32),
__imm(bpf_map_lookup_elem),
__imm_addr(map_hash_48b)
: __clobber_all);
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/verifier_regalloc.c |
// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
// Copyright (c) 2019 Facebook
#define STROBE_MAX_INTS 2
#define STROBE_MAX_STRS 25
#define STROBE_MAX_MAPS 30
#define STROBE_MAX_MAP_ENTRIES 20
#define NO_UNROLL
#include "strobemeta.h"
| linux-master | tools/testing/selftests/bpf/progs/strobemeta_nounroll2.c |
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2019 Facebook
#include <linux/bpf.h>
#include <stdint.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_core_read.h>
char _license[] SEC("license") = "GPL";
struct {
char in[256];
char out[256];
} data = {};
struct core_reloc_mods_output {
int a, b, c, d, e, f, g, h;
};
typedef const int int_t;
typedef const char *char_ptr_t;
typedef const int arr_t[7];
struct core_reloc_mods_substruct {
int x;
int y;
};
typedef struct {
int x;
int y;
} core_reloc_mods_substruct_t;
struct core_reloc_mods {
int a;
int_t b;
char *c;
char_ptr_t d;
int e[3];
arr_t f;
struct core_reloc_mods_substruct g;
core_reloc_mods_substruct_t h;
};
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
#define CORE_READ(dst, src) bpf_core_read(dst, sizeof(*(dst)), src)
#else
#define CORE_READ(dst, src) ({ \
int __sz = sizeof(*(dst)) < sizeof(*(src)) ? sizeof(*(dst)) : \
sizeof(*(src)); \
bpf_core_read((char *)(dst) + sizeof(*(dst)) - __sz, __sz, \
(const char *)(src) + sizeof(*(src)) - __sz); \
})
#endif
SEC("raw_tracepoint/sys_enter")
int test_core_mods(void *ctx)
{
struct core_reloc_mods *in = (void *)&data.in;
struct core_reloc_mods_output *out = (void *)&data.out;
if (CORE_READ(&out->a, &in->a) ||
CORE_READ(&out->b, &in->b) ||
CORE_READ(&out->c, &in->c) ||
CORE_READ(&out->d, &in->d) ||
CORE_READ(&out->e, &in->e[2]) ||
CORE_READ(&out->f, &in->f[1]) ||
CORE_READ(&out->g, &in->g.x) ||
CORE_READ(&out->h, &in->h.y))
return 1;
return 0;
}
| linux-master | tools/testing/selftests/bpf/progs/test_core_reloc_mods.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
#if (defined(__TARGET_ARCH_arm64) || defined(__TARGET_ARCH_x86) || \
(defined(__TARGET_ARCH_riscv) && __riscv_xlen == 64)) && __clang_major__ >= 18
const volatile int skip = 0;
#else
const volatile int skip = 1;
#endif
volatile const short val1 = -1;
volatile const int val2 = -1;
short val3 = -1;
int val4 = -1;
int done1, done2, ret1, ret2;
SEC("?raw_tp/sys_enter")
int rdonly_map_prog(const void *ctx)
{
if (done1)
return 0;
done1 = 1;
/* val1/val2 readonly map */
if (val1 == val2)
ret1 = 1;
return 0;
}
SEC("?raw_tp/sys_enter")
int map_val_prog(const void *ctx)
{
if (done2)
return 0;
done2 = 1;
/* val1/val2 regular read/write map */
if (val3 == val4)
ret2 = 1;
return 0;
}
struct bpf_testmod_struct_arg_1 {
int a;
};
long long int_member;
SEC("?fentry/bpf_testmod_test_arg_ptr_to_struct")
int BPF_PROG2(test_ptr_struct_arg, struct bpf_testmod_struct_arg_1 *, p)
{
/* probed memory access */
int_member = p->a;
return 0;
}
long long set_optlen, set_retval;
SEC("?cgroup/getsockopt")
int _getsockopt(volatile struct bpf_sockopt *ctx)
{
int old_optlen, old_retval;
old_optlen = ctx->optlen;
old_retval = ctx->retval;
ctx->optlen = -1;
ctx->retval = -1;
/* sign extension for ctx member */
set_optlen = ctx->optlen;
set_retval = ctx->retval;
ctx->optlen = old_optlen;
ctx->retval = old_retval;
return 0;
}
long long set_mark;
SEC("?tc")
int _tc(volatile struct __sk_buff *skb)
{
long long tmp_mark;
int old_mark;
old_mark = skb->mark;
skb->mark = 0xf6fe;
/* narrowed sign extension for ctx member */
#if __clang_major__ >= 18
/* force narrow one-byte signed load. Otherwise, compiler may
* generate a 32-bit unsigned load followed by an s8 movsx.
*/
asm volatile ("r1 = *(s8 *)(%[ctx] + %[off_mark])\n\t"
"%[tmp_mark] = r1"
: [tmp_mark]"=r"(tmp_mark)
: [ctx]"r"(skb),
[off_mark]"i"(offsetof(struct __sk_buff, mark))
: "r1");
#else
tmp_mark = (char)skb->mark;
#endif
set_mark = tmp_mark;
skb->mark = old_mark;
return 0;
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/test_ldsx_insn.c |
// SPDX-License-Identifier: GPL-2.0
#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
__u32 set_pid = 0;
__u64 set_key = 0;
__u64 set_value = 0;
struct {
__uint(type, BPF_MAP_TYPE_HASH);
__uint(max_entries, 2);
__type(key, __u64);
__type(value, __u64);
} hash_map SEC(".maps");
SEC("tp/syscalls/sys_enter_getpgid")
int bpf_lookup_and_delete_test(const void *ctx)
{
if (set_pid == bpf_get_current_pid_tgid() >> 32)
bpf_map_update_elem(&hash_map, &set_key, &set_value, BPF_NOEXIST);
return 0;
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/test_lookup_and_delete.c |
// SPDX-License-Identifier: GPL-2.0
#include <vmlinux.h>
#include <bpf/bpf_helpers.h>
#define ETH_ALEN 6
#define HDR_SZ (sizeof(struct ethhdr) + sizeof(struct ipv6hdr) + sizeof(struct udphdr))
/**
* enum frame_mark - magics to distinguish page/packet paths
* @MARK_XMIT: page was recycled due to the frame being "xmitted" by the NIC.
* @MARK_IN: frame is being processed by the input XDP prog.
* @MARK_SKB: frame did hit the TC ingress hook as an skb.
*/
enum frame_mark {
MARK_XMIT = 0U,
MARK_IN = 0x42,
MARK_SKB = 0x45,
};
const volatile int ifindex_out;
const volatile int ifindex_in;
const volatile __u8 expect_dst[ETH_ALEN];
volatile int pkts_seen_xdp = 0;
volatile int pkts_seen_zero = 0;
volatile int pkts_seen_tc = 0;
volatile int retcode = XDP_REDIRECT;
SEC("xdp")
int xdp_redirect(struct xdp_md *xdp)
{
__u32 *metadata = (void *)(long)xdp->data_meta;
void *data_end = (void *)(long)xdp->data_end;
void *data = (void *)(long)xdp->data;
__u8 *payload = data + HDR_SZ;
int ret = retcode;
if (payload + 1 > data_end)
return XDP_ABORTED;
if (xdp->ingress_ifindex != ifindex_in)
return XDP_ABORTED;
if (metadata + 1 > data)
return XDP_ABORTED;
if (*metadata != 0x42)
return XDP_ABORTED;
if (*payload == MARK_XMIT)
pkts_seen_zero++;
*payload = MARK_IN;
if (bpf_xdp_adjust_meta(xdp, sizeof(__u64)))
return XDP_ABORTED;
if (retcode > XDP_PASS)
retcode--;
if (ret == XDP_REDIRECT)
return bpf_redirect(ifindex_out, 0);
return ret;
}
static bool check_pkt(void *data, void *data_end, const __u32 mark)
{
struct ipv6hdr *iph = data + sizeof(struct ethhdr);
__u8 *payload = data + HDR_SZ;
if (payload + 1 > data_end)
return false;
if (iph->nexthdr != IPPROTO_UDP || *payload != MARK_IN)
return false;
/* reset the payload so the same packet doesn't get counted twice when
* it cycles back through the kernel path and out the dst veth
*/
*payload = mark;
return true;
}
SEC("xdp")
int xdp_count_pkts(struct xdp_md *xdp)
{
void *data = (void *)(long)xdp->data;
void *data_end = (void *)(long)xdp->data_end;
if (check_pkt(data, data_end, MARK_XMIT))
pkts_seen_xdp++;
/* Return %XDP_DROP to recycle the data page with %MARK_XMIT, like
* it exited a physical NIC. Those pages will be counted in the
* pkts_seen_zero counter above.
*/
return XDP_DROP;
}
SEC("tc")
int tc_count_pkts(struct __sk_buff *skb)
{
void *data = (void *)(long)skb->data;
void *data_end = (void *)(long)skb->data_end;
if (check_pkt(data, data_end, MARK_SKB))
pkts_seen_tc++;
/* Will be either recycled or freed, %MARK_SKB makes sure it won't
* hit any of the counters above.
*/
return 0;
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/test_xdp_do_redirect.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2022 Facebook */
#include <errno.h>
#include <string.h>
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include "bpf_misc.h"
char _license[] SEC("license") = "GPL";
#define ITER_HELPERS \
__imm(bpf_iter_num_new), \
__imm(bpf_iter_num_next), \
__imm(bpf_iter_num_destroy)
SEC("?raw_tp")
__success
int force_clang_to_emit_btf_for_externs(void *ctx)
{
/* we need this as a workaround to enforce compiler emitting BTF
* information for bpf_iter_num_{new,next,destroy}() kfuncs,
* as, apparently, it doesn't emit it for symbols only referenced from
* assembly (or cleanup attribute, for that matter, as well)
*/
bpf_repeat(0);
return 0;
}
SEC("?raw_tp")
__success __log_level(2)
__msg("fp-8_w=iter_num(ref_id=1,state=active,depth=0)")
int create_and_destroy(void *ctx)
{
struct bpf_iter_num iter;
asm volatile (
/* create iterator */
"r1 = %[iter];"
"r2 = 0;"
"r3 = 1000;"
"call %[bpf_iter_num_new];"
/* destroy iterator */
"r1 = %[iter];"
"call %[bpf_iter_num_destroy];"
:
: __imm_ptr(iter), ITER_HELPERS
: __clobber_common
);
return 0;
}
SEC("?raw_tp")
__failure __msg("Unreleased reference id=1")
int create_and_forget_to_destroy_fail(void *ctx)
{
struct bpf_iter_num iter;
asm volatile (
/* create iterator */
"r1 = %[iter];"
"r2 = 0;"
"r3 = 1000;"
"call %[bpf_iter_num_new];"
:
: __imm_ptr(iter), ITER_HELPERS
: __clobber_common
);
return 0;
}
SEC("?raw_tp")
__failure __msg("expected an initialized iter_num as arg #1")
int destroy_without_creating_fail(void *ctx)
{
/* init with zeros to stop verifier complaining about uninit stack */
struct bpf_iter_num iter;
asm volatile (
"r1 = %[iter];"
"call %[bpf_iter_num_destroy];"
:
: __imm_ptr(iter), ITER_HELPERS
: __clobber_common
);
return 0;
}
SEC("?raw_tp")
__failure __msg("expected an initialized iter_num as arg #1")
int compromise_iter_w_direct_write_fail(void *ctx)
{
struct bpf_iter_num iter;
asm volatile (
/* create iterator */
"r1 = %[iter];"
"r2 = 0;"
"r3 = 1000;"
"call %[bpf_iter_num_new];"
/* directly write over first half of iter state */
"*(u64 *)(%[iter] + 0) = r0;"
/* (attempt to) destroy iterator */
"r1 = %[iter];"
"call %[bpf_iter_num_destroy];"
:
: __imm_ptr(iter), ITER_HELPERS
: __clobber_common
);
return 0;
}
SEC("?raw_tp")
__failure __msg("Unreleased reference id=1")
int compromise_iter_w_direct_write_and_skip_destroy_fail(void *ctx)
{
struct bpf_iter_num iter;
asm volatile (
/* create iterator */
"r1 = %[iter];"
"r2 = 0;"
"r3 = 1000;"
"call %[bpf_iter_num_new];"
/* directly write over first half of iter state */
"*(u64 *)(%[iter] + 0) = r0;"
/* don't destroy iter, leaking ref, which should fail */
:
: __imm_ptr(iter), ITER_HELPERS
: __clobber_common
);
return 0;
}
SEC("?raw_tp")
__failure __msg("expected an initialized iter_num as arg #1")
int compromise_iter_w_helper_write_fail(void *ctx)
{
struct bpf_iter_num iter;
asm volatile (
/* create iterator */
"r1 = %[iter];"
"r2 = 0;"
"r3 = 1000;"
"call %[bpf_iter_num_new];"
/* overwrite 8th byte with bpf_probe_read_kernel() */
"r1 = %[iter];"
"r1 += 7;"
"r2 = 1;"
"r3 = 0;" /* NULL */
"call %[bpf_probe_read_kernel];"
/* (attempt to) destroy iterator */
"r1 = %[iter];"
"call %[bpf_iter_num_destroy];"
:
: __imm_ptr(iter), ITER_HELPERS, __imm(bpf_probe_read_kernel)
: __clobber_common
);
return 0;
}
static __noinline void subprog_with_iter(void)
{
struct bpf_iter_num iter;
bpf_iter_num_new(&iter, 0, 1);
return;
}
SEC("?raw_tp")
__failure
/* ensure there was a call to subprog, which might happen without __noinline */
__msg("returning from callee:")
__msg("Unreleased reference id=1")
int leak_iter_from_subprog_fail(void *ctx)
{
subprog_with_iter();
return 0;
}
SEC("?raw_tp")
__success __log_level(2)
__msg("fp-8_w=iter_num(ref_id=1,state=active,depth=0)")
int valid_stack_reuse(void *ctx)
{
struct bpf_iter_num iter;
asm volatile (
/* create iterator */
"r1 = %[iter];"
"r2 = 0;"
"r3 = 1000;"
"call %[bpf_iter_num_new];"
/* destroy iterator */
"r1 = %[iter];"
"call %[bpf_iter_num_destroy];"
/* now reuse same stack slots */
/* create iterator */
"r1 = %[iter];"
"r2 = 0;"
"r3 = 1000;"
"call %[bpf_iter_num_new];"
/* destroy iterator */
"r1 = %[iter];"
"call %[bpf_iter_num_destroy];"
:
: __imm_ptr(iter), ITER_HELPERS
: __clobber_common
);
return 0;
}
SEC("?raw_tp")
__failure __msg("expected uninitialized iter_num as arg #1")
int double_create_fail(void *ctx)
{
struct bpf_iter_num iter;
asm volatile (
/* create iterator */
"r1 = %[iter];"
"r2 = 0;"
"r3 = 1000;"
"call %[bpf_iter_num_new];"
/* (attempt to) create iterator again */
"r1 = %[iter];"
"r2 = 0;"
"r3 = 1000;"
"call %[bpf_iter_num_new];"
/* destroy iterator */
"r1 = %[iter];"
"call %[bpf_iter_num_destroy];"
:
: __imm_ptr(iter), ITER_HELPERS
: __clobber_common
);
return 0;
}
SEC("?raw_tp")
__failure __msg("expected an initialized iter_num as arg #1")
int double_destroy_fail(void *ctx)
{
struct bpf_iter_num iter;
asm volatile (
/* create iterator */
"r1 = %[iter];"
"r2 = 0;"
"r3 = 1000;"
"call %[bpf_iter_num_new];"
/* destroy iterator */
"r1 = %[iter];"
"call %[bpf_iter_num_destroy];"
/* (attempt to) destroy iterator again */
"r1 = %[iter];"
"call %[bpf_iter_num_destroy];"
:
: __imm_ptr(iter), ITER_HELPERS
: __clobber_common
);
return 0;
}
SEC("?raw_tp")
__failure __msg("expected an initialized iter_num as arg #1")
int next_without_new_fail(void *ctx)
{
struct bpf_iter_num iter;
asm volatile (
/* don't create iterator and try to iterate*/
"r1 = %[iter];"
"call %[bpf_iter_num_next];"
/* destroy iterator */
"r1 = %[iter];"
"call %[bpf_iter_num_destroy];"
:
: __imm_ptr(iter), ITER_HELPERS
: __clobber_common
);
return 0;
}
SEC("?raw_tp")
__failure __msg("expected an initialized iter_num as arg #1")
int next_after_destroy_fail(void *ctx)
{
struct bpf_iter_num iter;
asm volatile (
/* create iterator */
"r1 = %[iter];"
"r2 = 0;"
"r3 = 1000;"
"call %[bpf_iter_num_new];"
/* destroy iterator */
"r1 = %[iter];"
"call %[bpf_iter_num_destroy];"
/* don't create iterator and try to iterate*/
"r1 = %[iter];"
"call %[bpf_iter_num_next];"
:
: __imm_ptr(iter), ITER_HELPERS
: __clobber_common
);
return 0;
}
SEC("?raw_tp")
__failure __msg("invalid read from stack")
int __naked read_from_iter_slot_fail(void)
{
asm volatile (
/* r6 points to struct bpf_iter_num on the stack */
"r6 = r10;"
"r6 += -24;"
/* create iterator */
"r1 = r6;"
"r2 = 0;"
"r3 = 1000;"
"call %[bpf_iter_num_new];"
/* attemp to leak bpf_iter_num state */
"r7 = *(u64 *)(r6 + 0);"
"r8 = *(u64 *)(r6 + 8);"
/* destroy iterator */
"r1 = r6;"
"call %[bpf_iter_num_destroy];"
/* leak bpf_iter_num state */
"r0 = r7;"
"if r7 > r8 goto +1;"
"r0 = r8;"
"exit;"
:
: ITER_HELPERS
: __clobber_common, "r6", "r7", "r8"
);
}
int zero;
SEC("?raw_tp")
__failure
__flag(BPF_F_TEST_STATE_FREQ)
__msg("Unreleased reference")
int stacksafe_should_not_conflate_stack_spill_and_iter(void *ctx)
{
struct bpf_iter_num iter;
asm volatile (
/* Create a fork in logic, with general setup as follows:
* - fallthrough (first) path is valid;
* - branch (second) path is invalid.
* Then depending on what we do in fallthrough vs branch path,
* we try to detect bugs in func_states_equal(), regsafe(),
* refsafe(), stack_safe(), and similar by tricking verifier
* into believing that branch state is a valid subset of
* a fallthrough state. Verifier should reject overall
* validation, unless there is a bug somewhere in verifier
* logic.
*/
"call %[bpf_get_prandom_u32];"
"r6 = r0;"
"call %[bpf_get_prandom_u32];"
"r7 = r0;"
"if r6 > r7 goto bad;" /* fork */
/* spill r6 into stack slot of bpf_iter_num var */
"*(u64 *)(%[iter] + 0) = r6;"
"goto skip_bad;"
"bad:"
/* create iterator in the same stack slot */
"r1 = %[iter];"
"r2 = 0;"
"r3 = 1000;"
"call %[bpf_iter_num_new];"
/* but then forget about it and overwrite it back to r6 spill */
"*(u64 *)(%[iter] + 0) = r6;"
"skip_bad:"
"goto +0;" /* force checkpoint */
/* corrupt stack slots, if they are really dynptr */
"*(u64 *)(%[iter] + 0) = r6;"
:
: __imm_ptr(iter),
__imm_addr(zero),
__imm(bpf_get_prandom_u32),
__imm(bpf_dynptr_from_mem),
ITER_HELPERS
: __clobber_common, "r6", "r7"
);
return 0;
}
| linux-master | tools/testing/selftests/bpf/progs/iters_state_safety.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) Meta Platforms, Inc. and affiliates. */
#include <stdbool.h>
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
/* volatile to force a read, compiler may assume 0 otherwise */
const volatile int rovar1;
int out1;
/* Override weak symbol in test_subskeleton_lib */
int var5 = 5;
extern volatile bool CONFIG_BPF_SYSCALL __kconfig;
extern int lib_routine(void);
SEC("raw_tp/sys_enter")
int handler1(const void *ctx)
{
(void) CONFIG_BPF_SYSCALL;
out1 = lib_routine() * rovar1;
return 0;
}
char LICENSE[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/test_subskeleton.c |
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2019 Facebook
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#define ATTR __attribute__((noinline))
#include "test_jhash.h"
SEC("tc")
int balancer_ingress(struct __sk_buff *ctx)
{
void *data_end = (void *)(long)ctx->data_end;
void *data = (void *)(long)ctx->data;
void *ptr;
int nh_off, i = 0;
nh_off = 14;
/* pragma unroll doesn't work on large loops */
#define C do { \
ptr = data + i; \
if (ptr + nh_off > data_end) \
break; \
ctx->tc_index = jhash(ptr, nh_off, ctx->cb[0] + i++); \
} while (0);
#define C30 C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;
C30;C30;C30; /* 90 calls */
return 0;
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/test_verif_scale1.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
char _license[] SEC("license") = "GPL";
struct {
__uint(type, BPF_MAP_TYPE_CGRP_STORAGE);
__uint(map_flags, BPF_F_NO_PREALLOC);
__type(key, int);
__type(value, long);
} map_a SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_CGRP_STORAGE);
__uint(map_flags, BPF_F_NO_PREALLOC);
__type(key, int);
__type(value, long);
} map_b SEC(".maps");
SEC("fentry/bpf_local_storage_lookup")
int BPF_PROG(on_lookup)
{
struct task_struct *task = bpf_get_current_task_btf();
bpf_cgrp_storage_delete(&map_a, task->cgroups->dfl_cgrp);
bpf_cgrp_storage_delete(&map_b, task->cgroups->dfl_cgrp);
return 0;
}
SEC("fentry/bpf_local_storage_update")
int BPF_PROG(on_update)
{
struct task_struct *task = bpf_get_current_task_btf();
long *ptr;
ptr = bpf_cgrp_storage_get(&map_a, task->cgroups->dfl_cgrp, 0,
BPF_LOCAL_STORAGE_GET_F_CREATE);
if (ptr)
*ptr += 1;
ptr = bpf_cgrp_storage_get(&map_b, task->cgroups->dfl_cgrp, 0,
BPF_LOCAL_STORAGE_GET_F_CREATE);
if (ptr)
*ptr += 1;
return 0;
}
SEC("tp_btf/sys_enter")
int BPF_PROG(on_enter, struct pt_regs *regs, long id)
{
struct task_struct *task;
long *ptr;
task = bpf_get_current_task_btf();
ptr = bpf_cgrp_storage_get(&map_a, task->cgroups->dfl_cgrp, 0,
BPF_LOCAL_STORAGE_GET_F_CREATE);
if (ptr)
*ptr = 200;
ptr = bpf_cgrp_storage_get(&map_b, task->cgroups->dfl_cgrp, 0,
BPF_LOCAL_STORAGE_GET_F_CREATE);
if (ptr)
*ptr = 100;
return 0;
}
| linux-master | tools/testing/selftests/bpf/progs/cgrp_ls_recursion.c |
// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
/*
* BTF-to-C dumper tests for struct packing determination.
*
* Copyright (c) 2019 Facebook
*/
/* ----- START-EXPECTED-OUTPUT ----- */
struct packed_trailing_space {
int a;
short b;
} __attribute__((packed));
struct non_packed_trailing_space {
int a;
short b;
};
struct packed_fields {
short a;
int b;
} __attribute__((packed));
struct non_packed_fields {
short a;
int b;
};
struct nested_packed {
char: 4;
int a: 4;
long b;
struct {
char c;
int d;
} __attribute__((packed)) e;
} __attribute__((packed));
union union_is_never_packed {
int a: 4;
char b;
char c: 1;
};
union union_does_not_need_packing {
struct {
long a;
int b;
} __attribute__((packed));
int c;
};
union jump_code_union {
char code[5];
struct {
char jump;
int offset;
} __attribute__((packed));
};
/* ----- START-EXPECTED-OUTPUT ----- */
/*
*struct nested_packed_but_aligned_struct {
* int x1;
* int x2;
*};
*
*struct outer_implicitly_packed_struct {
* char y1;
* struct nested_packed_but_aligned_struct y2;
*} __attribute__((packed));
*
*/
/* ------ END-EXPECTED-OUTPUT ------ */
struct nested_packed_but_aligned_struct {
int x1;
int x2;
} __attribute__((packed));
struct outer_implicitly_packed_struct {
char y1;
struct nested_packed_but_aligned_struct y2;
};
/* ----- START-EXPECTED-OUTPUT ----- */
/*
*struct usb_ss_ep_comp_descriptor {
* char: 8;
* char bDescriptorType;
* char bMaxBurst;
* short wBytesPerInterval;
*};
*
*struct usb_host_endpoint {
* long: 64;
* char: 8;
* struct usb_ss_ep_comp_descriptor ss_ep_comp;
* long: 0;
*} __attribute__((packed));
*
*/
/* ------ END-EXPECTED-OUTPUT ------ */
struct usb_ss_ep_comp_descriptor {
char: 8;
char bDescriptorType;
char bMaxBurst;
int: 0;
short wBytesPerInterval;
} __attribute__((packed));
struct usb_host_endpoint {
long: 64;
char: 8;
struct usb_ss_ep_comp_descriptor ss_ep_comp;
long: 0;
};
/* ----- START-EXPECTED-OUTPUT ----- */
struct nested_packed_struct {
int a;
char b;
} __attribute__((packed));
struct outer_nonpacked_struct {
short a;
struct nested_packed_struct b;
};
struct outer_packed_struct {
short a;
struct nested_packed_struct b;
} __attribute__((packed));
/* ------ END-EXPECTED-OUTPUT ------ */
int f(struct {
struct packed_trailing_space _1;
struct non_packed_trailing_space _2;
struct packed_fields _3;
struct non_packed_fields _4;
struct nested_packed _5;
union union_is_never_packed _6;
union union_does_not_need_packing _7;
union jump_code_union _8;
struct outer_implicitly_packed_struct _9;
struct usb_host_endpoint _10;
struct outer_nonpacked_struct _11;
struct outer_packed_struct _12;
} *_)
{
return 0;
}
| linux-master | tools/testing/selftests/bpf/progs/btf_dump_test_case_packing.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2021 Isovalent, Inc. */
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
struct inner {
__uint(type, BPF_MAP_TYPE_ARRAY);
__type(key, __u32);
__type(value, int);
__uint(max_entries, 4);
};
struct {
__uint(type, BPF_MAP_TYPE_ARRAY_OF_MAPS);
__uint(max_entries, 0); /* This will make map creation to fail */
__type(key, __u32);
__array(values, struct inner);
} mim SEC(".maps");
SEC("xdp")
int xdp_noop0(struct xdp_md *ctx)
{
return XDP_PASS;
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/test_map_in_map_invalid.c |
#include "core_reloc_types.h"
void f(struct core_reloc_flavors x) {}
| linux-master | tools/testing/selftests/bpf/progs/btf__core_reloc_flavors.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2022 Meta */
#include <stddef.h>
#include <string.h>
#include <stdbool.h>
#include <linux/bpf.h>
#include <linux/if_ether.h>
#include <linux/if_packet.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <linux/in.h>
#include <linux/udp.h>
#include <linux/tcp.h>
#include <linux/pkt_cls.h>
#include <sys/socket.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_endian.h>
#include "test_iptunnel_common.h"
#include "bpf_kfuncs.h"
const size_t tcphdr_sz = sizeof(struct tcphdr);
const size_t udphdr_sz = sizeof(struct udphdr);
const size_t ethhdr_sz = sizeof(struct ethhdr);
const size_t iphdr_sz = sizeof(struct iphdr);
const size_t ipv6hdr_sz = sizeof(struct ipv6hdr);
struct {
__uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
__uint(max_entries, 256);
__type(key, __u32);
__type(value, __u64);
} rxcnt SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_HASH);
__uint(max_entries, MAX_IPTNL_ENTRIES);
__type(key, struct vip);
__type(value, struct iptnl_info);
} vip2tnl SEC(".maps");
static __always_inline void count_tx(__u32 protocol)
{
__u64 *rxcnt_count;
rxcnt_count = bpf_map_lookup_elem(&rxcnt, &protocol);
if (rxcnt_count)
*rxcnt_count += 1;
}
static __always_inline int get_dport(void *trans_data, __u8 protocol)
{
struct tcphdr *th;
struct udphdr *uh;
switch (protocol) {
case IPPROTO_TCP:
th = (struct tcphdr *)trans_data;
return th->dest;
case IPPROTO_UDP:
uh = (struct udphdr *)trans_data;
return uh->dest;
default:
return 0;
}
}
static __always_inline void set_ethhdr(struct ethhdr *new_eth,
const struct ethhdr *old_eth,
const struct iptnl_info *tnl,
__be16 h_proto)
{
memcpy(new_eth->h_source, old_eth->h_dest, sizeof(new_eth->h_source));
memcpy(new_eth->h_dest, tnl->dmac, sizeof(new_eth->h_dest));
new_eth->h_proto = h_proto;
}
static __always_inline int handle_ipv4(struct xdp_md *xdp, struct bpf_dynptr *xdp_ptr)
{
__u8 eth_buffer[ethhdr_sz + iphdr_sz + ethhdr_sz];
__u8 iph_buffer_tcp[iphdr_sz + tcphdr_sz];
__u8 iph_buffer_udp[iphdr_sz + udphdr_sz];
struct bpf_dynptr new_xdp_ptr;
struct iptnl_info *tnl;
struct ethhdr *new_eth;
struct ethhdr *old_eth;
struct iphdr *iph;
__u16 *next_iph;
__u16 payload_len;
struct vip vip = {};
int dport;
__u32 csum = 0;
int i;
__builtin_memset(eth_buffer, 0, sizeof(eth_buffer));
__builtin_memset(iph_buffer_tcp, 0, sizeof(iph_buffer_tcp));
__builtin_memset(iph_buffer_udp, 0, sizeof(iph_buffer_udp));
if (ethhdr_sz + iphdr_sz + tcphdr_sz > xdp->data_end - xdp->data)
iph = bpf_dynptr_slice(xdp_ptr, ethhdr_sz, iph_buffer_udp, sizeof(iph_buffer_udp));
else
iph = bpf_dynptr_slice(xdp_ptr, ethhdr_sz, iph_buffer_tcp, sizeof(iph_buffer_tcp));
if (!iph)
return XDP_DROP;
dport = get_dport(iph + 1, iph->protocol);
if (dport == -1)
return XDP_DROP;
vip.protocol = iph->protocol;
vip.family = AF_INET;
vip.daddr.v4 = iph->daddr;
vip.dport = dport;
payload_len = bpf_ntohs(iph->tot_len);
tnl = bpf_map_lookup_elem(&vip2tnl, &vip);
/* It only does v4-in-v4 */
if (!tnl || tnl->family != AF_INET)
return XDP_PASS;
if (bpf_xdp_adjust_head(xdp, 0 - (int)iphdr_sz))
return XDP_DROP;
bpf_dynptr_from_xdp(xdp, 0, &new_xdp_ptr);
new_eth = bpf_dynptr_slice_rdwr(&new_xdp_ptr, 0, eth_buffer, sizeof(eth_buffer));
if (!new_eth)
return XDP_DROP;
iph = (struct iphdr *)(new_eth + 1);
old_eth = (struct ethhdr *)(iph + 1);
set_ethhdr(new_eth, old_eth, tnl, bpf_htons(ETH_P_IP));
if (new_eth == eth_buffer)
bpf_dynptr_write(&new_xdp_ptr, 0, eth_buffer, sizeof(eth_buffer), 0);
iph->version = 4;
iph->ihl = iphdr_sz >> 2;
iph->frag_off = 0;
iph->protocol = IPPROTO_IPIP;
iph->check = 0;
iph->tos = 0;
iph->tot_len = bpf_htons(payload_len + iphdr_sz);
iph->daddr = tnl->daddr.v4;
iph->saddr = tnl->saddr.v4;
iph->ttl = 8;
next_iph = (__u16 *)iph;
for (i = 0; i < iphdr_sz >> 1; i++)
csum += *next_iph++;
iph->check = ~((csum & 0xffff) + (csum >> 16));
count_tx(vip.protocol);
return XDP_TX;
}
static __always_inline int handle_ipv6(struct xdp_md *xdp, struct bpf_dynptr *xdp_ptr)
{
__u8 eth_buffer[ethhdr_sz + ipv6hdr_sz + ethhdr_sz];
__u8 ip6h_buffer_tcp[ipv6hdr_sz + tcphdr_sz];
__u8 ip6h_buffer_udp[ipv6hdr_sz + udphdr_sz];
struct bpf_dynptr new_xdp_ptr;
struct iptnl_info *tnl;
struct ethhdr *new_eth;
struct ethhdr *old_eth;
struct ipv6hdr *ip6h;
__u16 payload_len;
struct vip vip = {};
int dport;
__builtin_memset(eth_buffer, 0, sizeof(eth_buffer));
__builtin_memset(ip6h_buffer_tcp, 0, sizeof(ip6h_buffer_tcp));
__builtin_memset(ip6h_buffer_udp, 0, sizeof(ip6h_buffer_udp));
if (ethhdr_sz + iphdr_sz + tcphdr_sz > xdp->data_end - xdp->data)
ip6h = bpf_dynptr_slice(xdp_ptr, ethhdr_sz, ip6h_buffer_udp, sizeof(ip6h_buffer_udp));
else
ip6h = bpf_dynptr_slice(xdp_ptr, ethhdr_sz, ip6h_buffer_tcp, sizeof(ip6h_buffer_tcp));
if (!ip6h)
return XDP_DROP;
dport = get_dport(ip6h + 1, ip6h->nexthdr);
if (dport == -1)
return XDP_DROP;
vip.protocol = ip6h->nexthdr;
vip.family = AF_INET6;
memcpy(vip.daddr.v6, ip6h->daddr.s6_addr32, sizeof(vip.daddr));
vip.dport = dport;
payload_len = ip6h->payload_len;
tnl = bpf_map_lookup_elem(&vip2tnl, &vip);
/* It only does v6-in-v6 */
if (!tnl || tnl->family != AF_INET6)
return XDP_PASS;
if (bpf_xdp_adjust_head(xdp, 0 - (int)ipv6hdr_sz))
return XDP_DROP;
bpf_dynptr_from_xdp(xdp, 0, &new_xdp_ptr);
new_eth = bpf_dynptr_slice_rdwr(&new_xdp_ptr, 0, eth_buffer, sizeof(eth_buffer));
if (!new_eth)
return XDP_DROP;
ip6h = (struct ipv6hdr *)(new_eth + 1);
old_eth = (struct ethhdr *)(ip6h + 1);
set_ethhdr(new_eth, old_eth, tnl, bpf_htons(ETH_P_IPV6));
if (new_eth == eth_buffer)
bpf_dynptr_write(&new_xdp_ptr, 0, eth_buffer, sizeof(eth_buffer), 0);
ip6h->version = 6;
ip6h->priority = 0;
memset(ip6h->flow_lbl, 0, sizeof(ip6h->flow_lbl));
ip6h->payload_len = bpf_htons(bpf_ntohs(payload_len) + ipv6hdr_sz);
ip6h->nexthdr = IPPROTO_IPV6;
ip6h->hop_limit = 8;
memcpy(ip6h->saddr.s6_addr32, tnl->saddr.v6, sizeof(tnl->saddr.v6));
memcpy(ip6h->daddr.s6_addr32, tnl->daddr.v6, sizeof(tnl->daddr.v6));
count_tx(vip.protocol);
return XDP_TX;
}
SEC("xdp")
int _xdp_tx_iptunnel(struct xdp_md *xdp)
{
__u8 buffer[ethhdr_sz];
struct bpf_dynptr ptr;
struct ethhdr *eth;
__u16 h_proto;
__builtin_memset(buffer, 0, sizeof(buffer));
bpf_dynptr_from_xdp(xdp, 0, &ptr);
eth = bpf_dynptr_slice(&ptr, 0, buffer, sizeof(buffer));
if (!eth)
return XDP_DROP;
h_proto = eth->h_proto;
if (h_proto == bpf_htons(ETH_P_IP))
return handle_ipv4(xdp, &ptr);
else if (h_proto == bpf_htons(ETH_P_IPV6))
return handle_ipv6(xdp, &ptr);
else
return XDP_DROP;
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/test_xdp_dynptr.c |
/* SPDX-License-Identifier: GPL-2.0 */
// Copyright (c) 2018 Covalent IO, Inc. http://covalent.io
#include <stddef.h>
#include <stdbool.h>
#include <string.h>
#include <linux/bpf.h>
#include <linux/if_ether.h>
#include <linux/in.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <linux/pkt_cls.h>
#include <linux/tcp.h>
#include <sys/socket.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_endian.h>
char _license[] SEC("license") = "GPL";
/* Fill 'tuple' with L3 info, and attempt to find L4. On fail, return NULL. */
static struct bpf_sock_tuple *get_tuple(void *data, __u64 nh_off,
void *data_end, __u16 eth_proto,
bool *ipv4)
{
struct bpf_sock_tuple *result;
__u64 ihl_len = 0;
__u8 proto = 0;
if (eth_proto == bpf_htons(ETH_P_IP)) {
struct iphdr *iph = (struct iphdr *)(data + nh_off);
if (iph + 1 > data_end)
return NULL;
ihl_len = iph->ihl * 4;
proto = iph->protocol;
*ipv4 = true;
result = (struct bpf_sock_tuple *)&iph->saddr;
} else if (eth_proto == bpf_htons(ETH_P_IPV6)) {
struct ipv6hdr *ip6h = (struct ipv6hdr *)(data + nh_off);
if (ip6h + 1 > data_end)
return NULL;
ihl_len = sizeof(*ip6h);
proto = ip6h->nexthdr;
*ipv4 = true;
result = (struct bpf_sock_tuple *)&ip6h->saddr;
}
if (data + nh_off + ihl_len > data_end || proto != IPPROTO_TCP)
return NULL;
return result;
}
SEC("?tc")
int sk_lookup_success(struct __sk_buff *skb)
{
void *data_end = (void *)(long)skb->data_end;
void *data = (void *)(long)skb->data;
struct ethhdr *eth = (struct ethhdr *)(data);
struct bpf_sock_tuple *tuple;
struct bpf_sock *sk;
size_t tuple_len;
bool ipv4;
if (eth + 1 > data_end)
return TC_ACT_SHOT;
tuple = get_tuple(data, sizeof(*eth), data_end, eth->h_proto, &ipv4);
if (!tuple || tuple + sizeof *tuple > data_end)
return TC_ACT_SHOT;
tuple_len = ipv4 ? sizeof(tuple->ipv4) : sizeof(tuple->ipv6);
sk = bpf_sk_lookup_tcp(skb, tuple, tuple_len, BPF_F_CURRENT_NETNS, 0);
bpf_printk("sk=%d\n", sk ? 1 : 0);
if (sk)
bpf_sk_release(sk);
return sk ? TC_ACT_OK : TC_ACT_UNSPEC;
}
SEC("?tc")
int sk_lookup_success_simple(struct __sk_buff *skb)
{
struct bpf_sock_tuple tuple = {};
struct bpf_sock *sk;
sk = bpf_sk_lookup_tcp(skb, &tuple, sizeof(tuple), BPF_F_CURRENT_NETNS, 0);
if (sk)
bpf_sk_release(sk);
return 0;
}
SEC("?tc")
int err_use_after_free(struct __sk_buff *skb)
{
struct bpf_sock_tuple tuple = {};
struct bpf_sock *sk;
__u32 family = 0;
sk = bpf_sk_lookup_tcp(skb, &tuple, sizeof(tuple), BPF_F_CURRENT_NETNS, 0);
if (sk) {
bpf_sk_release(sk);
family = sk->family;
}
return family;
}
SEC("?tc")
int err_modify_sk_pointer(struct __sk_buff *skb)
{
struct bpf_sock_tuple tuple = {};
struct bpf_sock *sk;
sk = bpf_sk_lookup_tcp(skb, &tuple, sizeof(tuple), BPF_F_CURRENT_NETNS, 0);
if (sk) {
sk += 1;
bpf_sk_release(sk);
}
return 0;
}
SEC("?tc")
int err_modify_sk_or_null_pointer(struct __sk_buff *skb)
{
struct bpf_sock_tuple tuple = {};
struct bpf_sock *sk;
sk = bpf_sk_lookup_tcp(skb, &tuple, sizeof(tuple), BPF_F_CURRENT_NETNS, 0);
sk += 1;
if (sk)
bpf_sk_release(sk);
return 0;
}
SEC("?tc")
int err_no_release(struct __sk_buff *skb)
{
struct bpf_sock_tuple tuple = {};
bpf_sk_lookup_tcp(skb, &tuple, sizeof(tuple), BPF_F_CURRENT_NETNS, 0);
return 0;
}
SEC("?tc")
int err_release_twice(struct __sk_buff *skb)
{
struct bpf_sock_tuple tuple = {};
struct bpf_sock *sk;
sk = bpf_sk_lookup_tcp(skb, &tuple, sizeof(tuple), BPF_F_CURRENT_NETNS, 0);
bpf_sk_release(sk);
bpf_sk_release(sk);
return 0;
}
SEC("?tc")
int err_release_unchecked(struct __sk_buff *skb)
{
struct bpf_sock_tuple tuple = {};
struct bpf_sock *sk;
sk = bpf_sk_lookup_tcp(skb, &tuple, sizeof(tuple), BPF_F_CURRENT_NETNS, 0);
bpf_sk_release(sk);
return 0;
}
void lookup_no_release(struct __sk_buff *skb)
{
struct bpf_sock_tuple tuple = {};
bpf_sk_lookup_tcp(skb, &tuple, sizeof(tuple), BPF_F_CURRENT_NETNS, 0);
}
SEC("?tc")
int err_no_release_subcall(struct __sk_buff *skb)
{
lookup_no_release(skb);
return 0;
}
| linux-master | tools/testing/selftests/bpf/progs/test_sk_lookup_kern.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
int probe_res;
char input[4] = {};
int test_pid;
SEC("tracepoint/syscalls/sys_enter_nanosleep")
int probe(void *ctx)
{
/* This BPF program performs variable-offset reads and writes on a
* stack-allocated buffer.
*/
char stack_buf[16];
unsigned long len;
unsigned long last;
if ((bpf_get_current_pid_tgid() >> 32) != test_pid)
return 0;
/* Copy the input to the stack. */
__builtin_memcpy(stack_buf, input, 4);
/* The first byte in the buffer indicates the length. */
len = stack_buf[0] & 0xf;
last = (len - 1) & 0xf;
/* Append something to the buffer. The offset where we write is not
* statically known; this is a variable-offset stack write.
*/
stack_buf[len] = 42;
/* Index into the buffer at an unknown offset. This is a
* variable-offset stack read.
*
* Note that if it wasn't for the preceding variable-offset write, this
* read would be rejected because the stack slot cannot be verified as
* being initialized. With the preceding variable-offset write, the
* stack slot still cannot be verified, but the write inhibits the
* respective check on the reasoning that, if there was a
* variable-offset to a higher-or-equal spot, we're probably reading
* what we just wrote.
*/
probe_res = stack_buf[last];
return 0;
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/test_stack_var_off.c |
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2020 Isovalent, Inc.
#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
struct {
__uint(type, BPF_MAP_TYPE_SOCKMAP);
__uint(max_entries, 2);
__type(key, __u32);
__type(value, __u64);
} sock_map SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_SOCKHASH);
__uint(max_entries, 2);
__type(key, __u32);
__type(value, __u64);
} sock_hash SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_SK_STORAGE);
__uint(map_flags, BPF_F_NO_PREALLOC);
__type(key, __u32);
__type(value, __u64);
} socket_storage SEC(".maps");
SEC("sk_msg")
int prog_msg_verdict(struct sk_msg_md *msg)
{
struct task_struct *task = (struct task_struct *)bpf_get_current_task();
int verdict = SK_PASS;
__u32 pid, tpid;
__u64 *sk_stg;
pid = bpf_get_current_pid_tgid() >> 32;
sk_stg = bpf_sk_storage_get(&socket_storage, msg->sk, 0, BPF_SK_STORAGE_GET_F_CREATE);
if (!sk_stg)
return SK_DROP;
*sk_stg = pid;
bpf_probe_read_kernel(&tpid , sizeof(tpid), &task->tgid);
if (pid != tpid)
verdict = SK_DROP;
bpf_sk_storage_delete(&socket_storage, (void *)msg->sk);
return verdict;
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/test_skmsg_load_helpers.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
#include <bpf/usdt.bpf.h>
int urand_pid;
int urand_read_without_sema_call_cnt;
int urand_read_without_sema_buf_sz_sum;
SEC("usdt/./urandom_read:urand:read_without_sema")
int BPF_USDT(urand_read_without_sema, int iter_num, int iter_cnt, int buf_sz)
{
if (urand_pid != (bpf_get_current_pid_tgid() >> 32))
return 0;
__sync_fetch_and_add(&urand_read_without_sema_call_cnt, 1);
__sync_fetch_and_add(&urand_read_without_sema_buf_sz_sum, buf_sz);
return 0;
}
int urand_read_with_sema_call_cnt;
int urand_read_with_sema_buf_sz_sum;
SEC("usdt/./urandom_read:urand:read_with_sema")
int BPF_USDT(urand_read_with_sema, int iter_num, int iter_cnt, int buf_sz)
{
if (urand_pid != (bpf_get_current_pid_tgid() >> 32))
return 0;
__sync_fetch_and_add(&urand_read_with_sema_call_cnt, 1);
__sync_fetch_and_add(&urand_read_with_sema_buf_sz_sum, buf_sz);
return 0;
}
int urandlib_read_without_sema_call_cnt;
int urandlib_read_without_sema_buf_sz_sum;
SEC("usdt/./liburandom_read.so:urandlib:read_without_sema")
int BPF_USDT(urandlib_read_without_sema, int iter_num, int iter_cnt, int buf_sz)
{
if (urand_pid != (bpf_get_current_pid_tgid() >> 32))
return 0;
__sync_fetch_and_add(&urandlib_read_without_sema_call_cnt, 1);
__sync_fetch_and_add(&urandlib_read_without_sema_buf_sz_sum, buf_sz);
return 0;
}
int urandlib_read_with_sema_call_cnt;
int urandlib_read_with_sema_buf_sz_sum;
SEC("usdt/./liburandom_read.so:urandlib:read_with_sema")
int BPF_USDT(urandlib_read_with_sema, int iter_num, int iter_cnt, int buf_sz)
{
if (urand_pid != (bpf_get_current_pid_tgid() >> 32))
return 0;
__sync_fetch_and_add(&urandlib_read_with_sema_call_cnt, 1);
__sync_fetch_and_add(&urandlib_read_with_sema_buf_sz_sum, buf_sz);
return 0;
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/test_urandom_usdt.c |
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2018 Facebook
#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_endian.h>
#include <bpf/bpf_tracing.h>
#define AF_INET6 10
struct socket_cookie {
__u64 cookie_key;
__u32 cookie_value;
};
struct {
__uint(type, BPF_MAP_TYPE_SK_STORAGE);
__uint(map_flags, BPF_F_NO_PREALLOC);
__type(key, int);
__type(value, struct socket_cookie);
} socket_cookies SEC(".maps");
/*
* These three programs get executed in a row on connect() syscalls. The
* userspace side of the test creates a client socket, issues a connect() on it
* and then checks that the local storage associated with this socket has:
* cookie_value == local_port << 8 | 0xFF
* The different parts of this cookie_value are appended by those hooks if they
* all agree on the output of bpf_get_socket_cookie().
*/
SEC("cgroup/connect6")
int set_cookie(struct bpf_sock_addr *ctx)
{
struct socket_cookie *p;
if (ctx->family != AF_INET6 || ctx->user_family != AF_INET6)
return 1;
p = bpf_sk_storage_get(&socket_cookies, ctx->sk, 0,
BPF_SK_STORAGE_GET_F_CREATE);
if (!p)
return 1;
p->cookie_value = 0xF;
p->cookie_key = bpf_get_socket_cookie(ctx);
return 1;
}
SEC("sockops")
int update_cookie_sockops(struct bpf_sock_ops *ctx)
{
struct bpf_sock *sk = ctx->sk;
struct socket_cookie *p;
if (ctx->family != AF_INET6)
return 1;
if (ctx->op != BPF_SOCK_OPS_TCP_CONNECT_CB)
return 1;
if (!sk)
return 1;
p = bpf_sk_storage_get(&socket_cookies, sk, 0, 0);
if (!p)
return 1;
if (p->cookie_key != bpf_get_socket_cookie(ctx))
return 1;
p->cookie_value |= (ctx->local_port << 8);
return 1;
}
SEC("fexit/inet_stream_connect")
int BPF_PROG(update_cookie_tracing, struct socket *sock,
struct sockaddr *uaddr, int addr_len, int flags)
{
struct socket_cookie *p;
if (uaddr->sa_family != AF_INET6)
return 0;
p = bpf_sk_storage_get(&socket_cookies, sock->sk, 0, 0);
if (!p)
return 0;
if (p->cookie_key != bpf_get_socket_cookie(sock->sk))
return 0;
p->cookie_value |= 0xF0;
return 0;
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/socket_cookie_prog.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2021 Facebook */
#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
#include <bpf/bpf_core_read.h>
#include "bpf_misc.h"
/* weak and shared between both files */
const volatile int my_tid __weak;
long syscall_id __weak;
int output_val2;
int output_ctx2;
int output_weak2; /* should stay zero */
/* same "subprog" name in all files, but it's ok because they all are static */
static __noinline int subprog(int x)
{
/* but different formula */
return x * 2;
}
/* Global functions can't be void */
int set_output_val2(int x)
{
output_val2 = 2 * x + 2 * subprog(x);
return 2 * x;
}
/* This function can't be verified as global, as it assumes raw_tp/sys_enter
* context and accesses syscall id (second argument). So we mark it as
* __hidden, so that libbpf will mark it as static in the final object file,
* right before verifying it in the kernel.
*
* But we don't mark it as __hidden here, rather at extern site. __hidden is
* "contaminating" visibility, so it will get propagated from either extern or
* actual definition (including from the losing __weak definition).
*/
void set_output_ctx2(__u64 *ctx)
{
output_ctx2 = ctx[1]; /* long id, same as in BPF_PROG below */
}
/* this weak instance should lose, because it will be processed second */
__weak int set_output_weak(int x)
{
static volatile int whatever;
/* make sure we use CO-RE relocations in a weak function, this used to
* cause problems for BPF static linker
*/
whatever = 2 * bpf_core_type_size(struct task_struct);
__sink(whatever);
output_weak2 = x;
return 2 * x;
}
extern int set_output_val1(int x);
/* here we'll force set_output_ctx1() to be __hidden in the final obj file */
__hidden extern void set_output_ctx1(__u64 *ctx);
SEC("?raw_tp/sys_enter")
int BPF_PROG(handler2, struct pt_regs *regs, long id)
{
static volatile int whatever;
if (my_tid != (u32)bpf_get_current_pid_tgid() || id != syscall_id)
return 0;
/* make sure we have CO-RE relocations in main program */
whatever = bpf_core_type_size(struct task_struct);
__sink(whatever);
set_output_val1(2000);
set_output_ctx1(ctx); /* ctx definition is hidden in BPF_PROG macro */
/* keep input value the same across both files to avoid dependency on
* handler call order; differentiate by output_weak1 vs output_weak2.
*/
set_output_weak(42);
return 0;
}
char LICENSE[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/linked_funcs2.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/stddef.h>
#include <linux/bpf.h>
#include <sys/types.h>
#include <sys/socket.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_endian.h>
static __always_inline int bind_prog(struct bpf_sock_addr *ctx, int family)
{
struct bpf_sock *sk;
sk = ctx->sk;
if (!sk)
return 0;
if (sk->family != family)
return 0;
if (ctx->type != SOCK_STREAM)
return 0;
/* Return 1 OR'ed with the first bit set to indicate
* that CAP_NET_BIND_SERVICE should be bypassed.
*/
if (ctx->user_port == bpf_htons(111))
return (1 | 2);
return 1;
}
SEC("cgroup/bind4")
int bind_v4_prog(struct bpf_sock_addr *ctx)
{
return bind_prog(ctx, AF_INET);
}
SEC("cgroup/bind6")
int bind_v6_prog(struct bpf_sock_addr *ctx)
{
return bind_prog(ctx, AF_INET6);
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/bind_perm.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
#include <vmlinux.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
#include <bpf/bpf_core_read.h>
#include "../bpf_experimental.h"
#include "bpf_misc.h"
struct node_data {
long key;
long data;
struct bpf_rb_node node;
};
struct map_value {
struct node_data __kptr *node;
};
struct node_data2 {
long key[4];
};
/* This is necessary so that LLVM generates BTF for node_data struct
* If it's not included, a fwd reference for node_data will be generated but
* no struct. Example BTF of "node" field in map_value when not included:
*
* [10] PTR '(anon)' type_id=35
* [34] FWD 'node_data' fwd_kind=struct
* [35] TYPE_TAG 'kptr_ref' type_id=34
*/
struct node_data *just_here_because_btf_bug;
struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
__type(key, int);
__type(value, struct map_value);
__uint(max_entries, 2);
} some_nodes SEC(".maps");
SEC("tc")
__failure __msg("invalid kptr access, R2 type=ptr_node_data2 expected=ptr_node_data")
long stash_rb_nodes(void *ctx)
{
struct map_value *mapval;
struct node_data2 *res;
int idx = 0;
mapval = bpf_map_lookup_elem(&some_nodes, &idx);
if (!mapval)
return 1;
res = bpf_obj_new(typeof(*res));
if (!res)
return 1;
res->key[0] = 40;
res = bpf_kptr_xchg(&mapval->node, res);
if (res)
bpf_obj_drop(res);
return 0;
}
SEC("tc")
__failure __msg("R1 must have zero offset when passed to release func")
long drop_rb_node_off(void *ctx)
{
struct map_value *mapval;
struct node_data *res;
int idx = 0;
mapval = bpf_map_lookup_elem(&some_nodes, &idx);
if (!mapval)
return 1;
res = bpf_obj_new(typeof(*res));
if (!res)
return 1;
/* Try releasing with graph node offset */
bpf_obj_drop(&res->node);
return 0;
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/local_kptr_stash_fail.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020, Oracle and/or its affiliates. */
#include "bpf_iter.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_core_read.h>
#include <errno.h>
char _license[] SEC("license") = "GPL";
long tasks = 0;
long seq_err = 0;
bool skip = false;
SEC("iter/task")
int dump_task_struct(struct bpf_iter__task *ctx)
{
struct seq_file *seq = ctx->meta->seq;
struct task_struct *task = ctx->task;
static struct btf_ptr ptr = { };
long ret;
#if __has_builtin(__builtin_btf_type_id)
ptr.type_id = bpf_core_type_id_kernel(struct task_struct);
ptr.ptr = task;
if (ctx->meta->seq_num == 0)
BPF_SEQ_PRINTF(seq, "Raw BTF task\n");
ret = bpf_seq_printf_btf(seq, &ptr, sizeof(ptr), 0);
switch (ret) {
case 0:
tasks++;
break;
case -ERANGE:
/* NULL task or task->fs, don't count it as an error. */
break;
case -E2BIG:
return 1;
default:
seq_err = ret;
break;
}
#else
skip = true;
#endif
return 0;
}
| linux-master | tools/testing/selftests/bpf/progs/bpf_iter_task_btf.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) 2022. Huawei Technologies Co., Ltd */
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
char _license[] SEC("license") = "GPL";
struct {
__uint(type, BPF_MAP_TYPE_HASH);
__uint(max_entries, 1);
__uint(key_size, sizeof(__u32));
__uint(value_size, sizeof(__u32));
} htab SEC(".maps");
int pid = 0;
int update_err = 0;
SEC("?fentry/lookup_elem_raw")
int lookup_elem_raw(void *ctx)
{
__u32 key = 0, value = 1;
if ((bpf_get_current_pid_tgid() >> 32) != pid)
return 0;
update_err = bpf_map_update_elem(&htab, &key, &value, 0);
return 0;
}
| linux-master | tools/testing/selftests/bpf/progs/htab_update.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
#include <errno.h>
char _license[] SEC("license") = "GPL";
__u64 test1_result = 0;
SEC("fentry/bpf_fentry_test1")
int BPF_PROG(test1)
{
__u64 cnt = bpf_get_func_arg_cnt(ctx);
__u64 a = 0, z = 0, ret = 0;
__s64 err;
test1_result = cnt == 1;
/* valid arguments */
err = bpf_get_func_arg(ctx, 0, &a);
/* We need to cast access to traced function argument values with
* proper type cast, because trampoline uses type specific instruction
* to save it, like for 'int a' with 32-bit mov like:
*
* mov %edi,-0x8(%rbp)
*
* so the upper 4 bytes are not zeroed.
*/
test1_result &= err == 0 && ((int) a == 1);
/* not valid argument */
err = bpf_get_func_arg(ctx, 1, &z);
test1_result &= err == -EINVAL;
/* return value fails in fentry */
err = bpf_get_func_ret(ctx, &ret);
test1_result &= err == -EOPNOTSUPP;
return 0;
}
__u64 test2_result = 0;
SEC("fexit/bpf_fentry_test2")
int BPF_PROG(test2)
{
__u64 cnt = bpf_get_func_arg_cnt(ctx);
__u64 a = 0, b = 0, z = 0, ret = 0;
__s64 err;
test2_result = cnt == 2;
/* valid arguments */
err = bpf_get_func_arg(ctx, 0, &a);
test2_result &= err == 0 && (int) a == 2;
err = bpf_get_func_arg(ctx, 1, &b);
test2_result &= err == 0 && b == 3;
/* not valid argument */
err = bpf_get_func_arg(ctx, 2, &z);
test2_result &= err == -EINVAL;
/* return value */
err = bpf_get_func_ret(ctx, &ret);
test2_result &= err == 0 && ret == 5;
return 0;
}
__u64 test3_result = 0;
SEC("fmod_ret/bpf_modify_return_test")
int BPF_PROG(fmod_ret_test, int _a, int *_b, int _ret)
{
__u64 cnt = bpf_get_func_arg_cnt(ctx);
__u64 a = 0, b = 0, z = 0, ret = 0;
__s64 err;
test3_result = cnt == 2;
/* valid arguments */
err = bpf_get_func_arg(ctx, 0, &a);
test3_result &= err == 0 && ((int) a == 1);
err = bpf_get_func_arg(ctx, 1, &b);
test3_result &= err == 0 && ((int *) b == _b);
/* not valid argument */
err = bpf_get_func_arg(ctx, 2, &z);
test3_result &= err == -EINVAL;
/* return value */
err = bpf_get_func_ret(ctx, &ret);
test3_result &= err == 0 && ret == 0;
/* change return value, it's checked in fexit_test program */
return 1234;
}
__u64 test4_result = 0;
SEC("fexit/bpf_modify_return_test")
int BPF_PROG(fexit_test, int _a, int *_b, int _ret)
{
__u64 cnt = bpf_get_func_arg_cnt(ctx);
__u64 a = 0, b = 0, z = 0, ret = 0;
__s64 err;
test4_result = cnt == 2;
/* valid arguments */
err = bpf_get_func_arg(ctx, 0, &a);
test4_result &= err == 0 && ((int) a == 1);
err = bpf_get_func_arg(ctx, 1, &b);
test4_result &= err == 0 && ((int *) b == _b);
/* not valid argument */
err = bpf_get_func_arg(ctx, 2, &z);
test4_result &= err == -EINVAL;
/* return value */
err = bpf_get_func_ret(ctx, &ret);
test4_result &= err == 0 && ret == 1234;
return 0;
}
| linux-master | tools/testing/selftests/bpf/progs/get_func_args_test.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include "bpf_misc.h"
#if (defined(__TARGET_ARCH_arm64) || defined(__TARGET_ARCH_x86) || \
(defined(__TARGET_ARCH_riscv) && __riscv_xlen == 64)) && __clang_major__ >= 18
SEC("socket")
__description("BSWAP, 16")
__success __success_unpriv __retval(0x23ff)
__naked void bswap_16(void)
{
asm volatile (" \
r0 = 0xff23; \
r0 = bswap16 r0; \
exit; \
" ::: __clobber_all);
}
SEC("socket")
__description("BSWAP, 32")
__success __success_unpriv __retval(0x23ff0000)
__naked void bswap_32(void)
{
asm volatile (" \
r0 = 0xff23; \
r0 = bswap32 r0; \
exit; \
" ::: __clobber_all);
}
SEC("socket")
__description("BSWAP, 64")
__success __success_unpriv __retval(0x34ff12ff)
__naked void bswap_64(void)
{
asm volatile (" \
r0 = %[u64_val] ll; \
r0 = bswap64 r0; \
exit; \
" :
: [u64_val]"i"(0xff12ff34ff56ff78ull)
: __clobber_all);
}
#else
SEC("socket")
__description("cpuv4 is not supported by compiler or jit, use a dummy test")
__success
int dummy_test(void)
{
return 0;
}
#endif
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/verifier_bswap.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2022 Facebook */
#include <string.h>
#include <stdbool.h>
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include "bpf_misc.h"
#include "bpf_kfuncs.h"
#include "errno.h"
char _license[] SEC("license") = "GPL";
int pid, err, val;
struct sample {
int pid;
int seq;
long value;
char comm[16];
};
struct {
__uint(type, BPF_MAP_TYPE_RINGBUF);
__uint(max_entries, 4096);
} ringbuf SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
__uint(max_entries, 1);
__type(key, __u32);
__type(value, __u32);
} array_map SEC(".maps");
SEC("?tp/syscalls/sys_enter_nanosleep")
int test_read_write(void *ctx)
{
char write_data[64] = "hello there, world!!";
char read_data[64] = {};
struct bpf_dynptr ptr;
int i;
if (bpf_get_current_pid_tgid() >> 32 != pid)
return 0;
bpf_ringbuf_reserve_dynptr(&ringbuf, sizeof(write_data), 0, &ptr);
/* Write data into the dynptr */
err = bpf_dynptr_write(&ptr, 0, write_data, sizeof(write_data), 0);
/* Read the data that was written into the dynptr */
err = err ?: bpf_dynptr_read(read_data, sizeof(read_data), &ptr, 0, 0);
/* Ensure the data we read matches the data we wrote */
for (i = 0; i < sizeof(read_data); i++) {
if (read_data[i] != write_data[i]) {
err = 1;
break;
}
}
bpf_ringbuf_discard_dynptr(&ptr, 0);
return 0;
}
SEC("?tp/syscalls/sys_enter_nanosleep")
int test_dynptr_data(void *ctx)
{
__u32 key = 0, val = 235, *map_val;
struct bpf_dynptr ptr;
__u32 map_val_size;
void *data;
map_val_size = sizeof(*map_val);
if (bpf_get_current_pid_tgid() >> 32 != pid)
return 0;
bpf_map_update_elem(&array_map, &key, &val, 0);
map_val = bpf_map_lookup_elem(&array_map, &key);
if (!map_val) {
err = 1;
return 0;
}
bpf_dynptr_from_mem(map_val, map_val_size, 0, &ptr);
/* Try getting a data slice that is out of range */
data = bpf_dynptr_data(&ptr, map_val_size + 1, 1);
if (data) {
err = 2;
return 0;
}
/* Try getting more bytes than available */
data = bpf_dynptr_data(&ptr, 0, map_val_size + 1);
if (data) {
err = 3;
return 0;
}
data = bpf_dynptr_data(&ptr, 0, sizeof(__u32));
if (!data) {
err = 4;
return 0;
}
*(__u32 *)data = 999;
err = bpf_probe_read_kernel(&val, sizeof(val), data);
if (err)
return 0;
if (val != *(int *)data)
err = 5;
return 0;
}
static int ringbuf_callback(__u32 index, void *data)
{
struct sample *sample;
struct bpf_dynptr *ptr = (struct bpf_dynptr *)data;
sample = bpf_dynptr_data(ptr, 0, sizeof(*sample));
if (!sample)
err = 2;
else
sample->pid += index;
return 0;
}
SEC("?tp/syscalls/sys_enter_nanosleep")
int test_ringbuf(void *ctx)
{
struct bpf_dynptr ptr;
struct sample *sample;
if (bpf_get_current_pid_tgid() >> 32 != pid)
return 0;
val = 100;
/* check that you can reserve a dynamic size reservation */
err = bpf_ringbuf_reserve_dynptr(&ringbuf, val, 0, &ptr);
sample = err ? NULL : bpf_dynptr_data(&ptr, 0, sizeof(*sample));
if (!sample) {
err = 1;
goto done;
}
sample->pid = 10;
/* Can pass dynptr to callback functions */
bpf_loop(10, ringbuf_callback, &ptr, 0);
if (sample->pid != 55)
err = 2;
done:
bpf_ringbuf_discard_dynptr(&ptr, 0);
return 0;
}
SEC("?cgroup_skb/egress")
int test_skb_readonly(struct __sk_buff *skb)
{
__u8 write_data[2] = {1, 2};
struct bpf_dynptr ptr;
int ret;
if (bpf_dynptr_from_skb(skb, 0, &ptr)) {
err = 1;
return 1;
}
/* since cgroup skbs are read only, writes should fail */
ret = bpf_dynptr_write(&ptr, 0, write_data, sizeof(write_data), 0);
if (ret != -EINVAL) {
err = 2;
return 1;
}
return 1;
}
SEC("?cgroup_skb/egress")
int test_dynptr_skb_data(struct __sk_buff *skb)
{
struct bpf_dynptr ptr;
__u64 *data;
if (bpf_dynptr_from_skb(skb, 0, &ptr)) {
err = 1;
return 1;
}
/* This should return NULL. Must use bpf_dynptr_slice API */
data = bpf_dynptr_data(&ptr, 0, 1);
if (data) {
err = 2;
return 1;
}
return 1;
}
SEC("tp/syscalls/sys_enter_nanosleep")
int test_adjust(void *ctx)
{
struct bpf_dynptr ptr;
__u32 bytes = 64;
__u32 off = 10;
__u32 trim = 15;
if (bpf_get_current_pid_tgid() >> 32 != pid)
return 0;
err = bpf_ringbuf_reserve_dynptr(&ringbuf, bytes, 0, &ptr);
if (err) {
err = 1;
goto done;
}
if (bpf_dynptr_size(&ptr) != bytes) {
err = 2;
goto done;
}
/* Advance the dynptr by off */
err = bpf_dynptr_adjust(&ptr, off, bpf_dynptr_size(&ptr));
if (err) {
err = 3;
goto done;
}
if (bpf_dynptr_size(&ptr) != bytes - off) {
err = 4;
goto done;
}
/* Trim the dynptr */
err = bpf_dynptr_adjust(&ptr, off, 15);
if (err) {
err = 5;
goto done;
}
/* Check that the size was adjusted correctly */
if (bpf_dynptr_size(&ptr) != trim - off) {
err = 6;
goto done;
}
done:
bpf_ringbuf_discard_dynptr(&ptr, 0);
return 0;
}
SEC("tp/syscalls/sys_enter_nanosleep")
int test_adjust_err(void *ctx)
{
char write_data[45] = "hello there, world!!";
struct bpf_dynptr ptr;
__u32 size = 64;
__u32 off = 20;
if (bpf_get_current_pid_tgid() >> 32 != pid)
return 0;
if (bpf_ringbuf_reserve_dynptr(&ringbuf, size, 0, &ptr)) {
err = 1;
goto done;
}
/* Check that start can't be greater than end */
if (bpf_dynptr_adjust(&ptr, 5, 1) != -EINVAL) {
err = 2;
goto done;
}
/* Check that start can't be greater than size */
if (bpf_dynptr_adjust(&ptr, size + 1, size + 1) != -ERANGE) {
err = 3;
goto done;
}
/* Check that end can't be greater than size */
if (bpf_dynptr_adjust(&ptr, 0, size + 1) != -ERANGE) {
err = 4;
goto done;
}
if (bpf_dynptr_adjust(&ptr, off, size)) {
err = 5;
goto done;
}
/* Check that you can't write more bytes than available into the dynptr
* after you've adjusted it
*/
if (bpf_dynptr_write(&ptr, 0, &write_data, sizeof(write_data), 0) != -E2BIG) {
err = 6;
goto done;
}
/* Check that even after adjusting, submitting/discarding
* a ringbuf dynptr works
*/
bpf_ringbuf_submit_dynptr(&ptr, 0);
return 0;
done:
bpf_ringbuf_discard_dynptr(&ptr, 0);
return 0;
}
SEC("tp/syscalls/sys_enter_nanosleep")
int test_zero_size_dynptr(void *ctx)
{
char write_data = 'x', read_data;
struct bpf_dynptr ptr;
__u32 size = 64;
if (bpf_get_current_pid_tgid() >> 32 != pid)
return 0;
if (bpf_ringbuf_reserve_dynptr(&ringbuf, size, 0, &ptr)) {
err = 1;
goto done;
}
/* After this, the dynptr has a size of 0 */
if (bpf_dynptr_adjust(&ptr, size, size)) {
err = 2;
goto done;
}
/* Test that reading + writing non-zero bytes is not ok */
if (bpf_dynptr_read(&read_data, sizeof(read_data), &ptr, 0, 0) != -E2BIG) {
err = 3;
goto done;
}
if (bpf_dynptr_write(&ptr, 0, &write_data, sizeof(write_data), 0) != -E2BIG) {
err = 4;
goto done;
}
/* Test that reading + writing 0 bytes from a 0-size dynptr is ok */
if (bpf_dynptr_read(&read_data, 0, &ptr, 0, 0)) {
err = 5;
goto done;
}
if (bpf_dynptr_write(&ptr, 0, &write_data, 0, 0)) {
err = 6;
goto done;
}
err = 0;
done:
bpf_ringbuf_discard_dynptr(&ptr, 0);
return 0;
}
SEC("tp/syscalls/sys_enter_nanosleep")
int test_dynptr_is_null(void *ctx)
{
struct bpf_dynptr ptr1;
struct bpf_dynptr ptr2;
__u64 size = 4;
if (bpf_get_current_pid_tgid() >> 32 != pid)
return 0;
/* Pass in invalid flags, get back an invalid dynptr */
if (bpf_ringbuf_reserve_dynptr(&ringbuf, size, 123, &ptr1) != -EINVAL) {
err = 1;
goto exit_early;
}
/* Test that the invalid dynptr is null */
if (!bpf_dynptr_is_null(&ptr1)) {
err = 2;
goto exit_early;
}
/* Get a valid dynptr */
if (bpf_ringbuf_reserve_dynptr(&ringbuf, size, 0, &ptr2)) {
err = 3;
goto exit;
}
/* Test that the valid dynptr is not null */
if (bpf_dynptr_is_null(&ptr2)) {
err = 4;
goto exit;
}
exit:
bpf_ringbuf_discard_dynptr(&ptr2, 0);
exit_early:
bpf_ringbuf_discard_dynptr(&ptr1, 0);
return 0;
}
SEC("cgroup_skb/egress")
int test_dynptr_is_rdonly(struct __sk_buff *skb)
{
struct bpf_dynptr ptr1;
struct bpf_dynptr ptr2;
struct bpf_dynptr ptr3;
/* Pass in invalid flags, get back an invalid dynptr */
if (bpf_dynptr_from_skb(skb, 123, &ptr1) != -EINVAL) {
err = 1;
return 0;
}
/* Test that an invalid dynptr is_rdonly returns false */
if (bpf_dynptr_is_rdonly(&ptr1)) {
err = 2;
return 0;
}
/* Get a read-only dynptr */
if (bpf_dynptr_from_skb(skb, 0, &ptr2)) {
err = 3;
return 0;
}
/* Test that the dynptr is read-only */
if (!bpf_dynptr_is_rdonly(&ptr2)) {
err = 4;
return 0;
}
/* Get a read-writeable dynptr */
if (bpf_ringbuf_reserve_dynptr(&ringbuf, 64, 0, &ptr3)) {
err = 5;
goto done;
}
/* Test that the dynptr is read-only */
if (bpf_dynptr_is_rdonly(&ptr3)) {
err = 6;
goto done;
}
done:
bpf_ringbuf_discard_dynptr(&ptr3, 0);
return 0;
}
SEC("cgroup_skb/egress")
int test_dynptr_clone(struct __sk_buff *skb)
{
struct bpf_dynptr ptr1;
struct bpf_dynptr ptr2;
__u32 off = 2, size;
/* Get a dynptr */
if (bpf_dynptr_from_skb(skb, 0, &ptr1)) {
err = 1;
return 0;
}
if (bpf_dynptr_adjust(&ptr1, off, bpf_dynptr_size(&ptr1))) {
err = 2;
return 0;
}
/* Clone the dynptr */
if (bpf_dynptr_clone(&ptr1, &ptr2)) {
err = 3;
return 0;
}
size = bpf_dynptr_size(&ptr1);
/* Check that the clone has the same size and rd-only */
if (bpf_dynptr_size(&ptr2) != size) {
err = 4;
return 0;
}
if (bpf_dynptr_is_rdonly(&ptr2) != bpf_dynptr_is_rdonly(&ptr1)) {
err = 5;
return 0;
}
/* Advance and trim the original dynptr */
bpf_dynptr_adjust(&ptr1, 5, 5);
/* Check that only original dynptr was affected, and the clone wasn't */
if (bpf_dynptr_size(&ptr2) != size) {
err = 6;
return 0;
}
return 0;
}
SEC("?cgroup_skb/egress")
int test_dynptr_skb_no_buff(struct __sk_buff *skb)
{
struct bpf_dynptr ptr;
__u64 *data;
if (bpf_dynptr_from_skb(skb, 0, &ptr)) {
err = 1;
return 1;
}
/* This may return NULL. SKB may require a buffer */
data = bpf_dynptr_slice(&ptr, 0, NULL, 1);
return !!data;
}
SEC("?cgroup_skb/egress")
int test_dynptr_skb_strcmp(struct __sk_buff *skb)
{
struct bpf_dynptr ptr;
char *data;
if (bpf_dynptr_from_skb(skb, 0, &ptr)) {
err = 1;
return 1;
}
/* This may return NULL. SKB may require a buffer */
data = bpf_dynptr_slice(&ptr, 0, NULL, 10);
if (data) {
bpf_strncmp(data, 10, "foo");
return 1;
}
return 1;
}
| linux-master | tools/testing/selftests/bpf/progs/dynptr_success.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
#include <vmlinux.h>
#include <bpf/bpf_tracing.h>
#include <bpf/bpf_helpers.h>
struct bpf_testmod_struct_arg_1 {
int a;
};
struct bpf_testmod_struct_arg_2 {
long a;
long b;
};
struct bpf_testmod_struct_arg_3 {
int a;
int b[];
};
struct bpf_testmod_struct_arg_4 {
u64 a;
int b;
};
long t1_a_a, t1_a_b, t1_b, t1_c, t1_ret, t1_nregs;
__u64 t1_reg0, t1_reg1, t1_reg2, t1_reg3;
long t2_a, t2_b_a, t2_b_b, t2_c, t2_ret;
long t3_a, t3_b, t3_c_a, t3_c_b, t3_ret;
long t4_a_a, t4_b, t4_c, t4_d, t4_e_a, t4_e_b, t4_ret;
long t5_ret;
int t6;
long t7_a, t7_b, t7_c, t7_d, t7_e, t7_f_a, t7_f_b, t7_ret;
long t8_a, t8_b, t8_c, t8_d, t8_e, t8_f_a, t8_f_b, t8_g, t8_ret;
SEC("fentry/bpf_testmod_test_struct_arg_1")
int BPF_PROG2(test_struct_arg_1, struct bpf_testmod_struct_arg_2, a, int, b, int, c)
{
t1_a_a = a.a;
t1_a_b = a.b;
t1_b = b;
t1_c = c;
return 0;
}
SEC("fexit/bpf_testmod_test_struct_arg_1")
int BPF_PROG2(test_struct_arg_2, struct bpf_testmod_struct_arg_2, a, int, b, int, c, int, ret)
{
t1_nregs = bpf_get_func_arg_cnt(ctx);
/* a.a */
bpf_get_func_arg(ctx, 0, &t1_reg0);
/* a.b */
bpf_get_func_arg(ctx, 1, &t1_reg1);
/* b */
bpf_get_func_arg(ctx, 2, &t1_reg2);
t1_reg2 = (int)t1_reg2;
/* c */
bpf_get_func_arg(ctx, 3, &t1_reg3);
t1_reg3 = (int)t1_reg3;
t1_ret = ret;
return 0;
}
SEC("fentry/bpf_testmod_test_struct_arg_2")
int BPF_PROG2(test_struct_arg_3, int, a, struct bpf_testmod_struct_arg_2, b, int, c)
{
t2_a = a;
t2_b_a = b.a;
t2_b_b = b.b;
t2_c = c;
return 0;
}
SEC("fexit/bpf_testmod_test_struct_arg_2")
int BPF_PROG2(test_struct_arg_4, int, a, struct bpf_testmod_struct_arg_2, b, int, c, int, ret)
{
t2_ret = ret;
return 0;
}
SEC("fentry/bpf_testmod_test_struct_arg_3")
int BPF_PROG2(test_struct_arg_5, int, a, int, b, struct bpf_testmod_struct_arg_2, c)
{
t3_a = a;
t3_b = b;
t3_c_a = c.a;
t3_c_b = c.b;
return 0;
}
SEC("fexit/bpf_testmod_test_struct_arg_3")
int BPF_PROG2(test_struct_arg_6, int, a, int, b, struct bpf_testmod_struct_arg_2, c, int, ret)
{
t3_ret = ret;
return 0;
}
SEC("fentry/bpf_testmod_test_struct_arg_4")
int BPF_PROG2(test_struct_arg_7, struct bpf_testmod_struct_arg_1, a, int, b,
int, c, int, d, struct bpf_testmod_struct_arg_2, e)
{
t4_a_a = a.a;
t4_b = b;
t4_c = c;
t4_d = d;
t4_e_a = e.a;
t4_e_b = e.b;
return 0;
}
SEC("fexit/bpf_testmod_test_struct_arg_4")
int BPF_PROG2(test_struct_arg_8, struct bpf_testmod_struct_arg_1, a, int, b,
int, c, int, d, struct bpf_testmod_struct_arg_2, e, int, ret)
{
t4_ret = ret;
return 0;
}
SEC("fentry/bpf_testmod_test_struct_arg_5")
int BPF_PROG2(test_struct_arg_9)
{
return 0;
}
SEC("fexit/bpf_testmod_test_struct_arg_5")
int BPF_PROG2(test_struct_arg_10, int, ret)
{
t5_ret = ret;
return 0;
}
SEC("fentry/bpf_testmod_test_struct_arg_6")
int BPF_PROG2(test_struct_arg_11, struct bpf_testmod_struct_arg_3 *, a)
{
t6 = a->b[0];
return 0;
}
SEC("fentry/bpf_testmod_test_struct_arg_7")
int BPF_PROG2(test_struct_arg_12, __u64, a, void *, b, short, c, int, d,
void *, e, struct bpf_testmod_struct_arg_4, f)
{
t7_a = a;
t7_b = (long)b;
t7_c = c;
t7_d = d;
t7_e = (long)e;
t7_f_a = f.a;
t7_f_b = f.b;
return 0;
}
SEC("fexit/bpf_testmod_test_struct_arg_7")
int BPF_PROG2(test_struct_arg_13, __u64, a, void *, b, short, c, int, d,
void *, e, struct bpf_testmod_struct_arg_4, f, int, ret)
{
t7_ret = ret;
return 0;
}
SEC("fentry/bpf_testmod_test_struct_arg_8")
int BPF_PROG2(test_struct_arg_14, __u64, a, void *, b, short, c, int, d,
void *, e, struct bpf_testmod_struct_arg_4, f, int, g)
{
t8_a = a;
t8_b = (long)b;
t8_c = c;
t8_d = d;
t8_e = (long)e;
t8_f_a = f.a;
t8_f_b = f.b;
t8_g = g;
return 0;
}
SEC("fexit/bpf_testmod_test_struct_arg_8")
int BPF_PROG2(test_struct_arg_15, __u64, a, void *, b, short, c, int, d,
void *, e, struct bpf_testmod_struct_arg_4, f, int, g,
int, ret)
{
t8_ret = ret;
return 0;
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/tracing_struct.c |
// SPDX-License-Identifier: GPL-2.0
#include <vmlinux.h>
#include <bpf/bpf_helpers.h>
#include "bpf_misc.h"
struct hmap_elem {
struct bpf_timer timer;
};
struct {
__uint(type, BPF_MAP_TYPE_HASH);
__uint(max_entries, 64);
__type(key, int);
__type(value, struct hmap_elem);
} hmap SEC(".maps");
__attribute__((noinline))
static int timer_cb(void *map, int *key, struct bpf_timer *timer)
{
volatile char buf[256] = {};
return buf[69];
}
__attribute__((noinline))
static int bad_timer_cb(void *map, int *key, struct bpf_timer *timer)
{
volatile char buf[300] = {};
return buf[255] + timer_cb(NULL, NULL, NULL);
}
SEC("tc")
__failure __msg("combined stack size of 2 calls is 576. Too large")
int pseudo_call_check(struct __sk_buff *ctx)
{
struct hmap_elem *elem;
volatile char buf[256] = {};
elem = bpf_map_lookup_elem(&hmap, &(int){0});
if (!elem)
return 0;
timer_cb(NULL, NULL, NULL);
return bpf_timer_set_callback(&elem->timer, timer_cb) + buf[0];
}
SEC("tc")
__failure __msg("combined stack size of 2 calls is 608. Too large")
int async_call_root_check(struct __sk_buff *ctx)
{
struct hmap_elem *elem;
volatile char buf[256] = {};
elem = bpf_map_lookup_elem(&hmap, &(int){0});
if (!elem)
return 0;
return bpf_timer_set_callback(&elem->timer, bad_timer_cb) + buf[0];
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/async_stack_depth.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
#include "bpf_tracing_net.h"
char _license[] SEC("license") = "GPL";
struct socket_cookie {
__u64 cookie_key;
__u64 cookie_value;
};
struct {
__uint(type, BPF_MAP_TYPE_CGRP_STORAGE);
__uint(map_flags, BPF_F_NO_PREALLOC);
__type(key, int);
__type(value, struct socket_cookie);
} socket_cookies SEC(".maps");
SEC("cgroup/connect6")
int set_cookie(struct bpf_sock_addr *ctx)
{
struct socket_cookie *p;
struct tcp_sock *tcp_sk;
struct bpf_sock *sk;
if (ctx->family != AF_INET6 || ctx->user_family != AF_INET6)
return 1;
sk = ctx->sk;
if (!sk)
return 1;
tcp_sk = bpf_skc_to_tcp_sock(sk);
if (!tcp_sk)
return 1;
p = bpf_cgrp_storage_get(&socket_cookies,
tcp_sk->inet_conn.icsk_inet.sk.sk_cgrp_data.cgroup, 0,
BPF_LOCAL_STORAGE_GET_F_CREATE);
if (!p)
return 1;
p->cookie_value = 0xF;
p->cookie_key = bpf_get_socket_cookie(ctx);
return 1;
}
SEC("sockops")
int update_cookie_sockops(struct bpf_sock_ops *ctx)
{
struct socket_cookie *p;
struct tcp_sock *tcp_sk;
struct bpf_sock *sk;
if (ctx->family != AF_INET6 || ctx->op != BPF_SOCK_OPS_TCP_CONNECT_CB)
return 1;
sk = ctx->sk;
if (!sk)
return 1;
tcp_sk = bpf_skc_to_tcp_sock(sk);
if (!tcp_sk)
return 1;
p = bpf_cgrp_storage_get(&socket_cookies,
tcp_sk->inet_conn.icsk_inet.sk.sk_cgrp_data.cgroup, 0, 0);
if (!p)
return 1;
if (p->cookie_key != bpf_get_socket_cookie(ctx))
return 1;
p->cookie_value |= (ctx->local_port << 8);
return 1;
}
SEC("fexit/inet_stream_connect")
int BPF_PROG(update_cookie_tracing, struct socket *sock,
struct sockaddr *uaddr, int addr_len, int flags)
{
struct socket_cookie *p;
if (uaddr->sa_family != AF_INET6)
return 0;
p = bpf_cgrp_storage_get(&socket_cookies, sock->sk->sk_cgrp_data.cgroup, 0, 0);
if (!p)
return 0;
if (p->cookie_key != bpf_get_socket_cookie(sock->sk))
return 0;
p->cookie_value |= 0xF0;
return 0;
}
| linux-master | tools/testing/selftests/bpf/progs/cgrp_ls_attach_cgroup.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
struct S {
int v;
};
struct S global_variable = {};
struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
__uint(max_entries, 7);
__type(key, __u32);
__type(value, int);
} values SEC(".maps");
static void save_value(__u32 index, int value)
{
bpf_map_update_elem(&values, &index, &value, 0);
}
__noinline int foo(__u32 index, struct S *s)
{
if (s) {
save_value(index, s->v);
return ++s->v;
}
save_value(index, 0);
return 1;
}
__noinline int bar(__u32 index, volatile struct S *s)
{
if (s) {
save_value(index, s->v);
return ++s->v;
}
save_value(index, 0);
return 1;
}
__noinline int baz(struct S **s)
{
if (s)
*s = 0;
return 0;
}
SEC("cgroup_skb/ingress")
int test_cls(struct __sk_buff *skb)
{
__u32 index = 0;
{
const int v = foo(index++, 0);
save_value(index++, v);
}
{
struct S s = { .v = 100 };
foo(index++, &s);
save_value(index++, s.v);
}
{
global_variable.v = 42;
bar(index++, &global_variable);
save_value(index++, global_variable.v);
}
{
struct S v, *p = &v;
baz(&p);
save_value(index++, !p);
}
return 0;
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/test_global_func_args.c |
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2019 Facebook
#include <linux/bpf.h>
#include <stdint.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_core_read.h>
char _license[] SEC("license") = "GPL";
struct {
char in[256];
char out[256];
} data = {};
struct core_reloc_existence_output {
int a_exists;
int a_value;
int b_exists;
int b_value;
int c_exists;
int c_value;
int arr_exists;
int arr_value;
int s_exists;
int s_value;
};
struct core_reloc_existence {
struct {
int x;
} s;
int arr[1];
int a;
struct {
int b;
};
int c;
};
SEC("raw_tracepoint/sys_enter")
int test_core_existence(void *ctx)
{
struct core_reloc_existence *in = (void *)&data.in;
struct core_reloc_existence_output *out = (void *)&data.out;
out->a_exists = bpf_core_field_exists(in->a);
if (bpf_core_field_exists(struct core_reloc_existence, a))
out->a_value = BPF_CORE_READ(in, a);
else
out->a_value = 0xff000001u;
out->b_exists = bpf_core_field_exists(in->b);
if (bpf_core_field_exists(struct core_reloc_existence, b))
out->b_value = BPF_CORE_READ(in, b);
else
out->b_value = 0xff000002u;
out->c_exists = bpf_core_field_exists(in->c);
if (bpf_core_field_exists(struct core_reloc_existence, c))
out->c_value = BPF_CORE_READ(in, c);
else
out->c_value = 0xff000003u;
out->arr_exists = bpf_core_field_exists(in->arr);
if (bpf_core_field_exists(struct core_reloc_existence, arr))
out->arr_value = BPF_CORE_READ(in, arr[0]);
else
out->arr_value = 0xff000004u;
out->s_exists = bpf_core_field_exists(in->s);
if (bpf_core_field_exists(struct core_reloc_existence, s))
out->s_value = BPF_CORE_READ(in, s.x);
else
out->s_value = 0xff000005u;
return 0;
}
| linux-master | tools/testing/selftests/bpf/progs/test_core_reloc_existence.c |
/* SPDX-License-Identifier: GPL-2.0
* Copyright(c) 2018 Jesper Dangaard Brouer.
*
* XDP/TC VLAN manipulation example
*
* GOTCHA: Remember to disable NIC hardware offloading of VLANs,
* else the VLAN tags are NOT inlined in the packet payload:
*
* # ethtool -K ixgbe2 rxvlan off
*
* Verify setting:
* # ethtool -k ixgbe2 | grep rx-vlan-offload
* rx-vlan-offload: off
*
*/
#include <stddef.h>
#include <stdbool.h>
#include <string.h>
#include <linux/bpf.h>
#include <linux/if_ether.h>
#include <linux/if_vlan.h>
#include <linux/in.h>
#include <linux/pkt_cls.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_endian.h>
/* linux/if_vlan.h have not exposed this as UAPI, thus mirror some here
*
* struct vlan_hdr - vlan header
* @h_vlan_TCI: priority and VLAN ID
* @h_vlan_encapsulated_proto: packet type ID or len
*/
struct _vlan_hdr {
__be16 h_vlan_TCI;
__be16 h_vlan_encapsulated_proto;
};
#define VLAN_PRIO_MASK 0xe000 /* Priority Code Point */
#define VLAN_PRIO_SHIFT 13
#define VLAN_CFI_MASK 0x1000 /* Canonical Format Indicator */
#define VLAN_TAG_PRESENT VLAN_CFI_MASK
#define VLAN_VID_MASK 0x0fff /* VLAN Identifier */
#define VLAN_N_VID 4096
struct parse_pkt {
__u16 l3_proto;
__u16 l3_offset;
__u16 vlan_outer;
__u16 vlan_inner;
__u8 vlan_outer_offset;
__u8 vlan_inner_offset;
};
char _license[] SEC("license") = "GPL";
static __always_inline
bool parse_eth_frame(struct ethhdr *eth, void *data_end, struct parse_pkt *pkt)
{
__u16 eth_type;
__u8 offset;
offset = sizeof(*eth);
/* Make sure packet is large enough for parsing eth + 2 VLAN headers */
if ((void *)eth + offset + (2*sizeof(struct _vlan_hdr)) > data_end)
return false;
eth_type = eth->h_proto;
/* Handle outer VLAN tag */
if (eth_type == bpf_htons(ETH_P_8021Q)
|| eth_type == bpf_htons(ETH_P_8021AD)) {
struct _vlan_hdr *vlan_hdr;
vlan_hdr = (void *)eth + offset;
pkt->vlan_outer_offset = offset;
pkt->vlan_outer = bpf_ntohs(vlan_hdr->h_vlan_TCI)
& VLAN_VID_MASK;
eth_type = vlan_hdr->h_vlan_encapsulated_proto;
offset += sizeof(*vlan_hdr);
}
/* Handle inner (double) VLAN tag */
if (eth_type == bpf_htons(ETH_P_8021Q)
|| eth_type == bpf_htons(ETH_P_8021AD)) {
struct _vlan_hdr *vlan_hdr;
vlan_hdr = (void *)eth + offset;
pkt->vlan_inner_offset = offset;
pkt->vlan_inner = bpf_ntohs(vlan_hdr->h_vlan_TCI)
& VLAN_VID_MASK;
eth_type = vlan_hdr->h_vlan_encapsulated_proto;
offset += sizeof(*vlan_hdr);
}
pkt->l3_proto = bpf_ntohs(eth_type); /* Convert to host-byte-order */
pkt->l3_offset = offset;
return true;
}
/* Hint, VLANs are chosen to hit network-byte-order issues */
#define TESTVLAN 4011 /* 0xFAB */
// #define TO_VLAN 4000 /* 0xFA0 (hint 0xOA0 = 160) */
SEC("xdp_drop_vlan_4011")
int xdp_prognum0(struct xdp_md *ctx)
{
void *data_end = (void *)(long)ctx->data_end;
void *data = (void *)(long)ctx->data;
struct parse_pkt pkt = { 0 };
if (!parse_eth_frame(data, data_end, &pkt))
return XDP_ABORTED;
/* Drop specific VLAN ID example */
if (pkt.vlan_outer == TESTVLAN)
return XDP_ABORTED;
/*
* Using XDP_ABORTED makes it possible to record this event,
* via tracepoint xdp:xdp_exception like:
* # perf record -a -e xdp:xdp_exception
* # perf script
*/
return XDP_PASS;
}
/*
Commands to setup VLAN on Linux to test packets gets dropped:
export ROOTDEV=ixgbe2
export VLANID=4011
ip link add link $ROOTDEV name $ROOTDEV.$VLANID type vlan id $VLANID
ip link set dev $ROOTDEV.$VLANID up
ip link set dev $ROOTDEV mtu 1508
ip addr add 100.64.40.11/24 dev $ROOTDEV.$VLANID
Load prog with ip tool:
ip link set $ROOTDEV xdp off
ip link set $ROOTDEV xdp object xdp_vlan01_kern.o section xdp_drop_vlan_4011
*/
/* Changing VLAN to zero, have same practical effect as removing the VLAN. */
#define TO_VLAN 0
SEC("xdp_vlan_change")
int xdp_prognum1(struct xdp_md *ctx)
{
void *data_end = (void *)(long)ctx->data_end;
void *data = (void *)(long)ctx->data;
struct parse_pkt pkt = { 0 };
if (!parse_eth_frame(data, data_end, &pkt))
return XDP_ABORTED;
/* Change specific VLAN ID */
if (pkt.vlan_outer == TESTVLAN) {
struct _vlan_hdr *vlan_hdr = data + pkt.vlan_outer_offset;
/* Modifying VLAN, preserve top 4 bits */
vlan_hdr->h_vlan_TCI =
bpf_htons((bpf_ntohs(vlan_hdr->h_vlan_TCI) & 0xf000)
| TO_VLAN);
}
return XDP_PASS;
}
/*
* Show XDP+TC can cooperate, on creating a VLAN rewriter.
* 1. Create a XDP prog that can "pop"/remove a VLAN header.
* 2. Create a TC-bpf prog that egress can add a VLAN header.
*/
#ifndef ETH_ALEN /* Ethernet MAC address length */
#define ETH_ALEN 6 /* bytes */
#endif
#define VLAN_HDR_SZ 4 /* bytes */
SEC("xdp_vlan_remove_outer")
int xdp_prognum2(struct xdp_md *ctx)
{
void *data_end = (void *)(long)ctx->data_end;
void *data = (void *)(long)ctx->data;
struct parse_pkt pkt = { 0 };
char *dest;
if (!parse_eth_frame(data, data_end, &pkt))
return XDP_ABORTED;
/* Skip packet if no outer VLAN was detected */
if (pkt.vlan_outer_offset == 0)
return XDP_PASS;
/* Moving Ethernet header, dest overlap with src, memmove handle this */
dest = data;
dest += VLAN_HDR_SZ;
/*
* Notice: Taking over vlan_hdr->h_vlan_encapsulated_proto, by
* only moving two MAC addrs (12 bytes), not overwriting last 2 bytes
*/
__builtin_memmove(dest, data, ETH_ALEN * 2);
/* Note: LLVM built-in memmove inlining require size to be constant */
/* Move start of packet header seen by Linux kernel stack */
bpf_xdp_adjust_head(ctx, VLAN_HDR_SZ);
return XDP_PASS;
}
static __always_inline
void shift_mac_4bytes_32bit(void *data)
{
__u32 *p = data;
/* Assuming VLAN hdr present. The 4 bytes in p[3] that gets
* overwritten, is ethhdr->h_proto and vlan_hdr->h_vlan_TCI.
* The vlan_hdr->h_vlan_encapsulated_proto take over role as
* ethhdr->h_proto.
*/
p[3] = p[2];
p[2] = p[1];
p[1] = p[0];
}
SEC("xdp_vlan_remove_outer2")
int xdp_prognum3(struct xdp_md *ctx)
{
void *data_end = (void *)(long)ctx->data_end;
void *data = (void *)(long)ctx->data;
struct ethhdr *orig_eth = data;
struct parse_pkt pkt = { 0 };
if (!parse_eth_frame(orig_eth, data_end, &pkt))
return XDP_ABORTED;
/* Skip packet if no outer VLAN was detected */
if (pkt.vlan_outer_offset == 0)
return XDP_PASS;
/* Simply shift down MAC addrs 4 bytes, overwrite h_proto + TCI */
shift_mac_4bytes_32bit(data);
/* Move start of packet header seen by Linux kernel stack */
bpf_xdp_adjust_head(ctx, VLAN_HDR_SZ);
return XDP_PASS;
}
/*=====================================
* BELOW: TC-hook based ebpf programs
* ====================================
* The TC-clsact eBPF programs (currently) need to be attach via TC commands
*/
SEC("tc_vlan_push")
int _tc_progA(struct __sk_buff *ctx)
{
bpf_skb_vlan_push(ctx, bpf_htons(ETH_P_8021Q), TESTVLAN);
return TC_ACT_OK;
}
/*
Commands to setup TC to use above bpf prog:
export ROOTDEV=ixgbe2
export FILE=xdp_vlan01_kern.o
# Re-attach clsact to clear/flush existing role
tc qdisc del dev $ROOTDEV clsact 2> /dev/null ;\
tc qdisc add dev $ROOTDEV clsact
# Attach BPF prog EGRESS
tc filter add dev $ROOTDEV egress \
prio 1 handle 1 bpf da obj $FILE sec tc_vlan_push
tc filter show dev $ROOTDEV egress
*/
| linux-master | tools/testing/selftests/bpf/progs/test_xdp_vlan.c |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.