python_code
stringlengths 0
1.8M
| repo_name
stringclasses 7
values | file_path
stringlengths 5
99
|
---|---|---|
// SPDX-License-Identifier: GPL-2.0-only
#include <stddef.h>
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include "bpf_misc.h"
struct S {
int x;
};
__noinline int foo(const struct S *s)
{
if (s)
return bpf_get_prandom_u32() < s->x;
return 0;
}
SEC("cgroup_skb/ingress")
__failure __msg("Caller passes invalid args into func#1")
int global_func13(struct __sk_buff *skb)
{
const struct S *s = (const struct S *)(0xbedabeda);
return foo(s);
}
| linux-master | tools/testing/selftests/bpf/progs/test_global_func13.c |
/* SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) */
/* Sample program which should always load for testing control paths. */
int func()
{
return 0;
}
| linux-master | tools/testing/selftests/bpf/progs/sample_ret0.c |
// SPDX-License-Identifier: GPL-2.0
/* Converted from tools/testing/selftests/bpf/verifier/ringbuf.c */
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include "bpf_misc.h"
struct {
__uint(type, BPF_MAP_TYPE_RINGBUF);
__uint(max_entries, 4096);
} map_ringbuf SEC(".maps");
SEC("socket")
__description("ringbuf: invalid reservation offset 1")
__failure __msg("R1 must have zero offset when passed to release func")
__failure_unpriv
__naked void ringbuf_invalid_reservation_offset_1(void)
{
asm volatile (" \
/* reserve 8 byte ringbuf memory */ \
r1 = 0; \
*(u64*)(r10 - 8) = r1; \
r1 = %[map_ringbuf] ll; \
r2 = 8; \
r3 = 0; \
call %[bpf_ringbuf_reserve]; \
/* store a pointer to the reserved memory in R6 */\
r6 = r0; \
/* check whether the reservation was successful */\
if r0 == 0 goto l0_%=; \
/* spill R6(mem) into the stack */ \
*(u64*)(r10 - 8) = r6; \
/* fill it back in R7 */ \
r7 = *(u64*)(r10 - 8); \
/* should be able to access *(R7) = 0 */ \
r1 = 0; \
*(u64*)(r7 + 0) = r1; \
/* submit the reserved ringbuf memory */ \
r1 = r7; \
/* add invalid offset to reserved ringbuf memory */\
r1 += 0xcafe; \
r2 = 0; \
call %[bpf_ringbuf_submit]; \
l0_%=: r0 = 0; \
exit; \
" :
: __imm(bpf_ringbuf_reserve),
__imm(bpf_ringbuf_submit),
__imm_addr(map_ringbuf)
: __clobber_all);
}
SEC("socket")
__description("ringbuf: invalid reservation offset 2")
__failure __msg("R7 min value is outside of the allowed memory range")
__failure_unpriv
__naked void ringbuf_invalid_reservation_offset_2(void)
{
asm volatile (" \
/* reserve 8 byte ringbuf memory */ \
r1 = 0; \
*(u64*)(r10 - 8) = r1; \
r1 = %[map_ringbuf] ll; \
r2 = 8; \
r3 = 0; \
call %[bpf_ringbuf_reserve]; \
/* store a pointer to the reserved memory in R6 */\
r6 = r0; \
/* check whether the reservation was successful */\
if r0 == 0 goto l0_%=; \
/* spill R6(mem) into the stack */ \
*(u64*)(r10 - 8) = r6; \
/* fill it back in R7 */ \
r7 = *(u64*)(r10 - 8); \
/* add invalid offset to reserved ringbuf memory */\
r7 += 0xcafe; \
/* should be able to access *(R7) = 0 */ \
r1 = 0; \
*(u64*)(r7 + 0) = r1; \
/* submit the reserved ringbuf memory */ \
r1 = r7; \
r2 = 0; \
call %[bpf_ringbuf_submit]; \
l0_%=: r0 = 0; \
exit; \
" :
: __imm(bpf_ringbuf_reserve),
__imm(bpf_ringbuf_submit),
__imm_addr(map_ringbuf)
: __clobber_all);
}
SEC("xdp")
__description("ringbuf: check passing rb mem to helpers")
__success __retval(0)
__naked void passing_rb_mem_to_helpers(void)
{
asm volatile (" \
r6 = r1; \
/* reserve 8 byte ringbuf memory */ \
r1 = 0; \
*(u64*)(r10 - 8) = r1; \
r1 = %[map_ringbuf] ll; \
r2 = 8; \
r3 = 0; \
call %[bpf_ringbuf_reserve]; \
r7 = r0; \
/* check whether the reservation was successful */\
if r0 != 0 goto l0_%=; \
exit; \
l0_%=: /* pass allocated ring buffer memory to fib lookup */\
r1 = r6; \
r2 = r0; \
r3 = 8; \
r4 = 0; \
call %[bpf_fib_lookup]; \
/* submit the ringbuf memory */ \
r1 = r7; \
r2 = 0; \
call %[bpf_ringbuf_submit]; \
r0 = 0; \
exit; \
" :
: __imm(bpf_fib_lookup),
__imm(bpf_ringbuf_reserve),
__imm(bpf_ringbuf_submit),
__imm_addr(map_ringbuf)
: __clobber_all);
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/verifier_ringbuf.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
char _license[] SEC("license") = "GPL";
struct {
__uint(type, BPF_MAP_TYPE_CGRP_STORAGE);
__uint(map_flags, BPF_F_NO_PREALLOC);
__type(key, int);
__type(value, long);
} map_a SEC(".maps");
SEC("tp_btf/sys_enter")
int BPF_PROG(on_enter, struct pt_regs *regs, long id)
{
struct task_struct *task;
task = bpf_get_current_task_btf();
(void)bpf_cgrp_storage_get(&map_a, (struct cgroup *)task, 0,
BPF_LOCAL_STORAGE_GET_F_CREATE);
return 0;
}
| linux-master | tools/testing/selftests/bpf/progs/cgrp_ls_negative.c |
// SPDX-License-Identifier: GPL-2.0
/* Converted from tools/testing/selftests/bpf/verifier/const_or.c */
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include "bpf_misc.h"
SEC("tracepoint")
__description("constant register |= constant should keep constant type")
__success
__naked void constant_should_keep_constant_type(void)
{
asm volatile (" \
r1 = r10; \
r1 += -48; \
r2 = 34; \
r2 |= 13; \
r3 = 0; \
call %[bpf_probe_read_kernel]; \
exit; \
" :
: __imm(bpf_probe_read_kernel)
: __clobber_all);
}
SEC("tracepoint")
__description("constant register |= constant should not bypass stack boundary checks")
__failure __msg("invalid indirect access to stack R1 off=-48 size=58")
__naked void not_bypass_stack_boundary_checks_1(void)
{
asm volatile (" \
r1 = r10; \
r1 += -48; \
r2 = 34; \
r2 |= 24; \
r3 = 0; \
call %[bpf_probe_read_kernel]; \
exit; \
" :
: __imm(bpf_probe_read_kernel)
: __clobber_all);
}
SEC("tracepoint")
__description("constant register |= constant register should keep constant type")
__success
__naked void register_should_keep_constant_type(void)
{
asm volatile (" \
r1 = r10; \
r1 += -48; \
r2 = 34; \
r4 = 13; \
r2 |= r4; \
r3 = 0; \
call %[bpf_probe_read_kernel]; \
exit; \
" :
: __imm(bpf_probe_read_kernel)
: __clobber_all);
}
SEC("tracepoint")
__description("constant register |= constant register should not bypass stack boundary checks")
__failure __msg("invalid indirect access to stack R1 off=-48 size=58")
__naked void not_bypass_stack_boundary_checks_2(void)
{
asm volatile (" \
r1 = r10; \
r1 += -48; \
r2 = 34; \
r4 = 24; \
r2 |= r4; \
r3 = 0; \
call %[bpf_probe_read_kernel]; \
exit; \
" :
: __imm(bpf_probe_read_kernel)
: __clobber_all);
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/verifier_const_or.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
#include <vmlinux.h>
#include <bpf/bpf_tracing.h>
#include <bpf/bpf_helpers.h>
#include "bpf_misc.h"
#include "cgrp_kfunc_common.h"
char _license[] SEC("license") = "GPL";
/* Prototype for all of the program trace events below:
*
* TRACE_EVENT(cgroup_mkdir,
* TP_PROTO(struct cgroup *cgrp, const char *path),
* TP_ARGS(cgrp, path)
*/
static struct __cgrps_kfunc_map_value *insert_lookup_cgrp(struct cgroup *cgrp)
{
int status;
status = cgrps_kfunc_map_insert(cgrp);
if (status)
return NULL;
return cgrps_kfunc_map_value_lookup(cgrp);
}
SEC("tp_btf/cgroup_mkdir")
__failure __msg("Possibly NULL pointer passed to trusted arg0")
int BPF_PROG(cgrp_kfunc_acquire_untrusted, struct cgroup *cgrp, const char *path)
{
struct cgroup *acquired;
struct __cgrps_kfunc_map_value *v;
v = insert_lookup_cgrp(cgrp);
if (!v)
return 0;
/* Can't invoke bpf_cgroup_acquire() on an untrusted pointer. */
acquired = bpf_cgroup_acquire(v->cgrp);
if (acquired)
bpf_cgroup_release(acquired);
return 0;
}
SEC("tp_btf/cgroup_mkdir")
__failure __msg("Possibly NULL pointer passed to trusted arg0")
int BPF_PROG(cgrp_kfunc_acquire_no_null_check, struct cgroup *cgrp, const char *path)
{
struct cgroup *acquired;
acquired = bpf_cgroup_acquire(cgrp);
/*
* Can't invoke bpf_cgroup_release() without checking the return value
* of bpf_cgroup_acquire().
*/
bpf_cgroup_release(acquired);
return 0;
}
SEC("tp_btf/cgroup_mkdir")
__failure __msg("arg#0 pointer type STRUCT cgroup must point")
int BPF_PROG(cgrp_kfunc_acquire_fp, struct cgroup *cgrp, const char *path)
{
struct cgroup *acquired, *stack_cgrp = (struct cgroup *)&path;
/* Can't invoke bpf_cgroup_acquire() on a random frame pointer. */
acquired = bpf_cgroup_acquire((struct cgroup *)&stack_cgrp);
if (acquired)
bpf_cgroup_release(acquired);
return 0;
}
SEC("kretprobe/cgroup_destroy_locked")
__failure __msg("reg type unsupported for arg#0 function")
int BPF_PROG(cgrp_kfunc_acquire_unsafe_kretprobe, struct cgroup *cgrp)
{
struct cgroup *acquired;
/* Can't acquire an untrusted struct cgroup * pointer. */
acquired = bpf_cgroup_acquire(cgrp);
if (acquired)
bpf_cgroup_release(acquired);
return 0;
}
SEC("tp_btf/cgroup_mkdir")
__failure __msg("cgrp_kfunc_acquire_trusted_walked")
int BPF_PROG(cgrp_kfunc_acquire_trusted_walked, struct cgroup *cgrp, const char *path)
{
struct cgroup *acquired;
/* Can't invoke bpf_cgroup_acquire() on a pointer obtained from walking a trusted cgroup. */
acquired = bpf_cgroup_acquire(cgrp->old_dom_cgrp);
if (acquired)
bpf_cgroup_release(acquired);
return 0;
}
SEC("tp_btf/cgroup_mkdir")
__failure __msg("Possibly NULL pointer passed to trusted arg0")
int BPF_PROG(cgrp_kfunc_acquire_null, struct cgroup *cgrp, const char *path)
{
struct cgroup *acquired;
/* Can't invoke bpf_cgroup_acquire() on a NULL pointer. */
acquired = bpf_cgroup_acquire(NULL);
if (acquired)
bpf_cgroup_release(acquired);
return 0;
}
SEC("tp_btf/cgroup_mkdir")
__failure __msg("Unreleased reference")
int BPF_PROG(cgrp_kfunc_acquire_unreleased, struct cgroup *cgrp, const char *path)
{
struct cgroup *acquired;
acquired = bpf_cgroup_acquire(cgrp);
/* Acquired cgroup is never released. */
__sink(acquired);
return 0;
}
SEC("tp_btf/cgroup_mkdir")
__failure __msg("Unreleased reference")
int BPF_PROG(cgrp_kfunc_xchg_unreleased, struct cgroup *cgrp, const char *path)
{
struct cgroup *kptr;
struct __cgrps_kfunc_map_value *v;
v = insert_lookup_cgrp(cgrp);
if (!v)
return 0;
kptr = bpf_kptr_xchg(&v->cgrp, NULL);
if (!kptr)
return 0;
/* Kptr retrieved from map is never released. */
return 0;
}
SEC("tp_btf/cgroup_mkdir")
__failure __msg("must be referenced or trusted")
int BPF_PROG(cgrp_kfunc_rcu_get_release, struct cgroup *cgrp, const char *path)
{
struct cgroup *kptr;
struct __cgrps_kfunc_map_value *v;
v = insert_lookup_cgrp(cgrp);
if (!v)
return 0;
bpf_rcu_read_lock();
kptr = v->cgrp;
if (kptr)
/* Can't release a cgroup kptr stored in a map. */
bpf_cgroup_release(kptr);
bpf_rcu_read_unlock();
return 0;
}
SEC("tp_btf/cgroup_mkdir")
__failure __msg("Possibly NULL pointer passed to trusted arg0")
int BPF_PROG(cgrp_kfunc_release_untrusted, struct cgroup *cgrp, const char *path)
{
struct __cgrps_kfunc_map_value *v;
v = insert_lookup_cgrp(cgrp);
if (!v)
return 0;
/* Can't invoke bpf_cgroup_release() on an untrusted pointer. */
bpf_cgroup_release(v->cgrp);
return 0;
}
SEC("tp_btf/cgroup_mkdir")
__failure __msg("arg#0 pointer type STRUCT cgroup must point")
int BPF_PROG(cgrp_kfunc_release_fp, struct cgroup *cgrp, const char *path)
{
struct cgroup *acquired = (struct cgroup *)&path;
/* Cannot release random frame pointer. */
bpf_cgroup_release(acquired);
return 0;
}
SEC("tp_btf/cgroup_mkdir")
__failure __msg("Possibly NULL pointer passed to trusted arg0")
int BPF_PROG(cgrp_kfunc_release_null, struct cgroup *cgrp, const char *path)
{
struct __cgrps_kfunc_map_value local, *v;
long status;
struct cgroup *acquired, *old;
s32 id;
status = bpf_probe_read_kernel(&id, sizeof(id), &cgrp->self.id);
if (status)
return 0;
local.cgrp = NULL;
status = bpf_map_update_elem(&__cgrps_kfunc_map, &id, &local, BPF_NOEXIST);
if (status)
return status;
v = bpf_map_lookup_elem(&__cgrps_kfunc_map, &id);
if (!v)
return -ENOENT;
acquired = bpf_cgroup_acquire(cgrp);
if (!acquired)
return -ENOENT;
old = bpf_kptr_xchg(&v->cgrp, acquired);
/* old cannot be passed to bpf_cgroup_release() without a NULL check. */
bpf_cgroup_release(old);
return 0;
}
SEC("tp_btf/cgroup_mkdir")
__failure __msg("release kernel function bpf_cgroup_release expects")
int BPF_PROG(cgrp_kfunc_release_unacquired, struct cgroup *cgrp, const char *path)
{
/* Cannot release trusted cgroup pointer which was not acquired. */
bpf_cgroup_release(cgrp);
return 0;
}
| linux-master | tools/testing/selftests/bpf/progs/cgrp_kfunc_failure.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2018 Facebook */
#include <stdlib.h>
#include <linux/in.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <linux/tcp.h>
#include <linux/udp.h>
#include <linux/bpf.h>
#include <linux/types.h>
#include <linux/if_ether.h>
#include <bpf/bpf_endian.h>
#include <bpf/bpf_helpers.h>
#include "test_select_reuseport_common.h"
#ifndef offsetof
#define offsetof(TYPE, MEMBER) ((size_t) &((TYPE *)0)->MEMBER)
#endif
struct {
__uint(type, BPF_MAP_TYPE_ARRAY_OF_MAPS);
__uint(max_entries, 1);
__type(key, __u32);
__type(value, __u32);
} outer_map SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
__uint(max_entries, NR_RESULTS);
__type(key, __u32);
__type(value, __u32);
} result_map SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
__uint(max_entries, 1);
__type(key, __u32);
__type(value, int);
} tmp_index_ovr_map SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
__uint(max_entries, 1);
__type(key, __u32);
__type(value, __u32);
} linum_map SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
__uint(max_entries, 1);
__type(key, __u32);
__type(value, struct data_check);
} data_check_map SEC(".maps");
#define GOTO_DONE(_result) ({ \
result = (_result); \
linum = __LINE__; \
goto done; \
})
SEC("sk_reuseport")
int _select_by_skb_data(struct sk_reuseport_md *reuse_md)
{
__u32 linum, index = 0, flags = 0, index_zero = 0;
__u32 *result_cnt;
struct data_check data_check = {};
struct cmd *cmd, cmd_copy;
void *data, *data_end;
void *reuseport_array;
enum result result;
int *index_ovr;
int err;
data = reuse_md->data;
data_end = reuse_md->data_end;
data_check.len = reuse_md->len;
data_check.eth_protocol = reuse_md->eth_protocol;
data_check.ip_protocol = reuse_md->ip_protocol;
data_check.hash = reuse_md->hash;
data_check.bind_inany = reuse_md->bind_inany;
if (data_check.eth_protocol == bpf_htons(ETH_P_IP)) {
if (bpf_skb_load_bytes_relative(reuse_md,
offsetof(struct iphdr, saddr),
data_check.skb_addrs, 8,
BPF_HDR_START_NET))
GOTO_DONE(DROP_MISC);
} else {
if (bpf_skb_load_bytes_relative(reuse_md,
offsetof(struct ipv6hdr, saddr),
data_check.skb_addrs, 32,
BPF_HDR_START_NET))
GOTO_DONE(DROP_MISC);
}
/*
* The ip_protocol could be a compile time decision
* if the bpf_prog.o is dedicated to either TCP or
* UDP.
*
* Otherwise, reuse_md->ip_protocol or
* the protocol field in the iphdr can be used.
*/
if (data_check.ip_protocol == IPPROTO_TCP) {
struct tcphdr *th = data;
if (th + 1 > data_end)
GOTO_DONE(DROP_MISC);
data_check.skb_ports[0] = th->source;
data_check.skb_ports[1] = th->dest;
if (th->fin)
/* The connection is being torn down at the end of a
* test. It can't contain a cmd, so return early.
*/
return SK_PASS;
if ((th->doff << 2) + sizeof(*cmd) > data_check.len)
GOTO_DONE(DROP_ERR_SKB_DATA);
if (bpf_skb_load_bytes(reuse_md, th->doff << 2, &cmd_copy,
sizeof(cmd_copy)))
GOTO_DONE(DROP_MISC);
cmd = &cmd_copy;
} else if (data_check.ip_protocol == IPPROTO_UDP) {
struct udphdr *uh = data;
if (uh + 1 > data_end)
GOTO_DONE(DROP_MISC);
data_check.skb_ports[0] = uh->source;
data_check.skb_ports[1] = uh->dest;
if (sizeof(struct udphdr) + sizeof(*cmd) > data_check.len)
GOTO_DONE(DROP_ERR_SKB_DATA);
if (data + sizeof(struct udphdr) + sizeof(*cmd) > data_end) {
if (bpf_skb_load_bytes(reuse_md, sizeof(struct udphdr),
&cmd_copy, sizeof(cmd_copy)))
GOTO_DONE(DROP_MISC);
cmd = &cmd_copy;
} else {
cmd = data + sizeof(struct udphdr);
}
} else {
GOTO_DONE(DROP_MISC);
}
reuseport_array = bpf_map_lookup_elem(&outer_map, &index_zero);
if (!reuseport_array)
GOTO_DONE(DROP_ERR_INNER_MAP);
index = cmd->reuseport_index;
index_ovr = bpf_map_lookup_elem(&tmp_index_ovr_map, &index_zero);
if (!index_ovr)
GOTO_DONE(DROP_MISC);
if (*index_ovr != -1) {
index = *index_ovr;
*index_ovr = -1;
}
err = bpf_sk_select_reuseport(reuse_md, reuseport_array, &index,
flags);
if (!err)
GOTO_DONE(PASS);
if (cmd->pass_on_failure)
GOTO_DONE(PASS_ERR_SK_SELECT_REUSEPORT);
else
GOTO_DONE(DROP_ERR_SK_SELECT_REUSEPORT);
done:
result_cnt = bpf_map_lookup_elem(&result_map, &result);
if (!result_cnt)
return SK_DROP;
bpf_map_update_elem(&linum_map, &index_zero, &linum, BPF_ANY);
bpf_map_update_elem(&data_check_map, &index_zero, &data_check, BPF_ANY);
(*result_cnt)++;
return result < PASS ? SK_DROP : SK_PASS;
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/test_select_reuseport_kern.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020 Facebook */
#include "bpf_iter.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
char _license[] SEC("license") = "GPL";
struct key_t {
int a;
int b;
int c;
};
struct {
__uint(type, BPF_MAP_TYPE_PERCPU_HASH);
__uint(max_entries, 3);
__type(key, struct key_t);
__type(value, __u32);
} hashmap1 SEC(".maps");
/* will set before prog run */
volatile const __u32 num_cpus = 0;
/* will collect results during prog run */
__u32 key_sum_a = 0, key_sum_b = 0, key_sum_c = 0;
__u32 val_sum = 0;
SEC("iter/bpf_map_elem")
int dump_bpf_percpu_hash_map(struct bpf_iter__bpf_map_elem *ctx)
{
struct key_t *key = ctx->key;
void *pptr = ctx->value;
__u32 step;
int i;
if (key == (void *)0 || pptr == (void *)0)
return 0;
key_sum_a += key->a;
key_sum_b += key->b;
key_sum_c += key->c;
step = 8;
for (i = 0; i < num_cpus; i++) {
val_sum += *(__u32 *)pptr;
pptr += step;
}
return 0;
}
| linux-master | tools/testing/selftests/bpf/progs/bpf_iter_bpf_percpu_hash_map.c |
// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
/*
* BTF-to-C dumper test for topological sorting of dependent structs.
*
* Copyright (c) 2019 Facebook
*/
/* ----- START-EXPECTED-OUTPUT ----- */
struct s1 {};
struct s3;
struct s4;
struct s2 {
struct s2 *s2;
struct s3 *s3;
struct s4 *s4;
};
struct s3 {
struct s1 s1;
struct s2 s2;
};
struct s4 {
struct s1 s1;
struct s3 s3;
};
struct list_head {
struct list_head *next;
struct list_head *prev;
};
struct hlist_node {
struct hlist_node *next;
struct hlist_node **pprev;
};
struct hlist_head {
struct hlist_node *first;
};
struct callback_head {
struct callback_head *next;
void (*func)(struct callback_head *);
};
struct root_struct {
struct s4 s4;
struct list_head l;
struct hlist_node n;
struct hlist_head h;
struct callback_head cb;
};
/*------ END-EXPECTED-OUTPUT ------ */
int f(struct root_struct *root)
{
return 0;
}
| linux-master | tools/testing/selftests/bpf/progs/btf_dump_test_case_ordering.c |
#include "core_reloc_types.h"
void f(struct core_reloc_arrays x) {}
| linux-master | tools/testing/selftests/bpf/progs/btf__core_reloc_arrays.c |
// SPDX-License-Identifier: GPL-2.0
/* Converted from tools/testing/selftests/bpf/verifier/div0.c */
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include "bpf_misc.h"
SEC("socket")
__description("DIV32 by 0, zero check 1")
__success __success_unpriv __retval(42)
__naked void by_0_zero_check_1_1(void)
{
asm volatile (" \
w0 = 42; \
w1 = 0; \
w2 = 1; \
w2 /= w1; \
exit; \
" ::: __clobber_all);
}
SEC("socket")
__description("DIV32 by 0, zero check 2")
__success __success_unpriv __retval(42)
__naked void by_0_zero_check_2_1(void)
{
asm volatile (" \
w0 = 42; \
r1 = 0xffffffff00000000LL ll; \
w2 = 1; \
w2 /= w1; \
exit; \
" ::: __clobber_all);
}
SEC("socket")
__description("DIV64 by 0, zero check")
__success __success_unpriv __retval(42)
__naked void div64_by_0_zero_check(void)
{
asm volatile (" \
w0 = 42; \
w1 = 0; \
w2 = 1; \
r2 /= r1; \
exit; \
" ::: __clobber_all);
}
SEC("socket")
__description("MOD32 by 0, zero check 1")
__success __success_unpriv __retval(42)
__naked void by_0_zero_check_1_2(void)
{
asm volatile (" \
w0 = 42; \
w1 = 0; \
w2 = 1; \
w2 %%= w1; \
exit; \
" ::: __clobber_all);
}
SEC("socket")
__description("MOD32 by 0, zero check 2")
__success __success_unpriv __retval(42)
__naked void by_0_zero_check_2_2(void)
{
asm volatile (" \
w0 = 42; \
r1 = 0xffffffff00000000LL ll; \
w2 = 1; \
w2 %%= w1; \
exit; \
" ::: __clobber_all);
}
SEC("socket")
__description("MOD64 by 0, zero check")
__success __success_unpriv __retval(42)
__naked void mod64_by_0_zero_check(void)
{
asm volatile (" \
w0 = 42; \
w1 = 0; \
w2 = 1; \
r2 %%= r1; \
exit; \
" ::: __clobber_all);
}
SEC("tc")
__description("DIV32 by 0, zero check ok, cls")
__success __retval(8)
__naked void _0_zero_check_ok_cls_1(void)
{
asm volatile (" \
w0 = 42; \
w1 = 2; \
w2 = 16; \
w2 /= w1; \
r0 = r2; \
exit; \
" ::: __clobber_all);
}
SEC("tc")
__description("DIV32 by 0, zero check 1, cls")
__success __retval(0)
__naked void _0_zero_check_1_cls_1(void)
{
asm volatile (" \
w1 = 0; \
w0 = 1; \
w0 /= w1; \
exit; \
" ::: __clobber_all);
}
SEC("tc")
__description("DIV32 by 0, zero check 2, cls")
__success __retval(0)
__naked void _0_zero_check_2_cls_1(void)
{
asm volatile (" \
r1 = 0xffffffff00000000LL ll; \
w0 = 1; \
w0 /= w1; \
exit; \
" ::: __clobber_all);
}
SEC("tc")
__description("DIV64 by 0, zero check, cls")
__success __retval(0)
__naked void by_0_zero_check_cls(void)
{
asm volatile (" \
w1 = 0; \
w0 = 1; \
r0 /= r1; \
exit; \
" ::: __clobber_all);
}
SEC("tc")
__description("MOD32 by 0, zero check ok, cls")
__success __retval(2)
__naked void _0_zero_check_ok_cls_2(void)
{
asm volatile (" \
w0 = 42; \
w1 = 3; \
w2 = 5; \
w2 %%= w1; \
r0 = r2; \
exit; \
" ::: __clobber_all);
}
SEC("tc")
__description("MOD32 by 0, zero check 1, cls")
__success __retval(1)
__naked void _0_zero_check_1_cls_2(void)
{
asm volatile (" \
w1 = 0; \
w0 = 1; \
w0 %%= w1; \
exit; \
" ::: __clobber_all);
}
SEC("tc")
__description("MOD32 by 0, zero check 2, cls")
__success __retval(1)
__naked void _0_zero_check_2_cls_2(void)
{
asm volatile (" \
r1 = 0xffffffff00000000LL ll; \
w0 = 1; \
w0 %%= w1; \
exit; \
" ::: __clobber_all);
}
SEC("tc")
__description("MOD64 by 0, zero check 1, cls")
__success __retval(2)
__naked void _0_zero_check_1_cls_3(void)
{
asm volatile (" \
w1 = 0; \
w0 = 2; \
r0 %%= r1; \
exit; \
" ::: __clobber_all);
}
SEC("tc")
__description("MOD64 by 0, zero check 2, cls")
__success __retval(-1)
__naked void _0_zero_check_2_cls_3(void)
{
asm volatile (" \
w1 = 0; \
w0 = -1; \
r0 %%= r1; \
exit; \
" ::: __clobber_all);
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/verifier_div0.c |
// SPDX-License-Identifier: GPL-2.0
/* Converted from tools/testing/selftests/bpf/verifier/and.c */
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include "bpf_misc.h"
#define MAX_ENTRIES 11
struct test_val {
unsigned int index;
int foo[MAX_ENTRIES];
};
struct {
__uint(type, BPF_MAP_TYPE_HASH);
__uint(max_entries, 1);
__type(key, long long);
__type(value, struct test_val);
} map_hash_48b SEC(".maps");
SEC("socket")
__description("invalid and of negative number")
__failure __msg("R0 max value is outside of the allowed memory range")
__failure_unpriv
__flag(BPF_F_ANY_ALIGNMENT)
__naked void invalid_and_of_negative_number(void)
{
asm volatile (" \
r1 = 0; \
*(u64*)(r10 - 8) = r1; \
r2 = r10; \
r2 += -8; \
r1 = %[map_hash_48b] ll; \
call %[bpf_map_lookup_elem]; \
if r0 == 0 goto l0_%=; \
r1 = *(u8*)(r0 + 0); \
r1 &= -4; \
r1 <<= 2; \
r0 += r1; \
l0_%=: r1 = %[test_val_foo]; \
*(u64*)(r0 + 0) = r1; \
exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm_addr(map_hash_48b),
__imm_const(test_val_foo, offsetof(struct test_val, foo))
: __clobber_all);
}
SEC("socket")
__description("invalid range check")
__failure __msg("R0 max value is outside of the allowed memory range")
__failure_unpriv
__flag(BPF_F_ANY_ALIGNMENT)
__naked void invalid_range_check(void)
{
asm volatile (" \
r1 = 0; \
*(u64*)(r10 - 8) = r1; \
r2 = r10; \
r2 += -8; \
r1 = %[map_hash_48b] ll; \
call %[bpf_map_lookup_elem]; \
if r0 == 0 goto l0_%=; \
r1 = *(u32*)(r0 + 0); \
r9 = 1; \
w1 %%= 2; \
w1 += 1; \
w9 &= w1; \
w9 += 1; \
w9 >>= 1; \
w3 = 1; \
w3 -= w9; \
w3 *= 0x10000000; \
r0 += r3; \
*(u32*)(r0 + 0) = r3; \
l0_%=: r0 = r0; \
exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm_addr(map_hash_48b)
: __clobber_all);
}
SEC("socket")
__description("check known subreg with unknown reg")
__success __failure_unpriv __msg_unpriv("R1 !read_ok")
__retval(0)
__naked void known_subreg_with_unknown_reg(void)
{
asm volatile (" \
call %[bpf_get_prandom_u32]; \
r0 <<= 32; \
r0 += 1; \
r0 &= 0xFFFF1234; \
/* Upper bits are unknown but AND above masks out 1 zero'ing lower bits */\
if w0 < 1 goto l0_%=; \
r1 = *(u32*)(r1 + 512); \
l0_%=: r0 = 0; \
exit; \
" :
: __imm(bpf_get_prandom_u32)
: __clobber_all);
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/verifier_and.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
char _license[] SEC("license") = "GPL";
__u32 load_offset = 0;
int test_result = 0;
SEC("tc")
int skb_process(struct __sk_buff *skb)
{
char buf[16];
test_result = bpf_skb_load_bytes(skb, load_offset, buf, 10);
return 0;
}
| linux-master | tools/testing/selftests/bpf/progs/skb_load_bytes.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
struct {
__uint(type, BPF_MAP_TYPE_DEVMAP);
__uint(key_size, sizeof(__u32));
__uint(value_size, sizeof(struct bpf_devmap_val));
__uint(max_entries, 4);
} dm_ports SEC(".maps");
/* valid program on DEVMAP entry via SEC name;
* has access to egress and ingress ifindex
*/
SEC("xdp/devmap")
int xdp_dummy_dm(struct xdp_md *ctx)
{
return XDP_PASS;
}
SEC("xdp.frags/devmap")
int xdp_dummy_dm_frags(struct xdp_md *ctx)
{
return XDP_PASS;
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/test_xdp_with_devmap_frags_helpers.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2021 Facebook */
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
/* 8-byte aligned .data */
static volatile long static_var1 = 2;
static volatile int static_var2 = 3;
int var1 = -1;
/* 4-byte aligned .rodata */
const volatile int rovar1;
/* same "subprog" name in both files */
static __noinline int subprog(int x)
{
/* but different formula */
return x * 2;
}
SEC("raw_tp/sys_enter")
int handler1(const void *ctx)
{
var1 = subprog(rovar1) + static_var1 + static_var2;
return 0;
}
char LICENSE[] SEC("license") = "GPL";
int VERSION SEC("version") = 1;
| linux-master | tools/testing/selftests/bpf/progs/test_static_linked1.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
#include <stdbool.h>
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include "bpf_misc.h"
#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
static volatile int zero = 0;
int my_pid;
int arr[256];
int small_arr[16] SEC(".data.small_arr");
#ifdef REAL_TEST
#define MY_PID_GUARD() if (my_pid != (bpf_get_current_pid_tgid() >> 32)) return 0
#else
#define MY_PID_GUARD() ({ })
#endif
SEC("?raw_tp")
__failure __msg("math between map_value pointer and register with unbounded min value is not allowed")
int iter_err_unsafe_c_loop(const void *ctx)
{
struct bpf_iter_num it;
int *v, i = zero; /* obscure initial value of i */
MY_PID_GUARD();
bpf_iter_num_new(&it, 0, 1000);
while ((v = bpf_iter_num_next(&it))) {
i++;
}
bpf_iter_num_destroy(&it);
small_arr[i] = 123; /* invalid */
return 0;
}
SEC("?raw_tp")
__failure __msg("unbounded memory access")
int iter_err_unsafe_asm_loop(const void *ctx)
{
struct bpf_iter_num it;
MY_PID_GUARD();
asm volatile (
"r6 = %[zero];" /* iteration counter */
"r1 = %[it];" /* iterator state */
"r2 = 0;"
"r3 = 1000;"
"r4 = 1;"
"call %[bpf_iter_num_new];"
"loop:"
"r1 = %[it];"
"call %[bpf_iter_num_next];"
"if r0 == 0 goto out;"
"r6 += 1;"
"goto loop;"
"out:"
"r1 = %[it];"
"call %[bpf_iter_num_destroy];"
"r1 = %[small_arr];"
"r2 = r6;"
"r2 <<= 2;"
"r1 += r2;"
"*(u32 *)(r1 + 0) = r6;" /* invalid */
:
: [it]"r"(&it),
[small_arr]"p"(small_arr),
[zero]"p"(zero),
__imm(bpf_iter_num_new),
__imm(bpf_iter_num_next),
__imm(bpf_iter_num_destroy)
: __clobber_common, "r6"
);
return 0;
}
SEC("raw_tp")
__success
int iter_while_loop(const void *ctx)
{
struct bpf_iter_num it;
int *v;
MY_PID_GUARD();
bpf_iter_num_new(&it, 0, 3);
while ((v = bpf_iter_num_next(&it))) {
bpf_printk("ITER_BASIC: E1 VAL: v=%d", *v);
}
bpf_iter_num_destroy(&it);
return 0;
}
SEC("raw_tp")
__success
int iter_while_loop_auto_cleanup(const void *ctx)
{
__attribute__((cleanup(bpf_iter_num_destroy))) struct bpf_iter_num it;
int *v;
MY_PID_GUARD();
bpf_iter_num_new(&it, 0, 3);
while ((v = bpf_iter_num_next(&it))) {
bpf_printk("ITER_BASIC: E1 VAL: v=%d", *v);
}
/* (!) no explicit bpf_iter_num_destroy() */
return 0;
}
SEC("raw_tp")
__success
int iter_for_loop(const void *ctx)
{
struct bpf_iter_num it;
int *v;
MY_PID_GUARD();
bpf_iter_num_new(&it, 5, 10);
for (v = bpf_iter_num_next(&it); v; v = bpf_iter_num_next(&it)) {
bpf_printk("ITER_BASIC: E2 VAL: v=%d", *v);
}
bpf_iter_num_destroy(&it);
return 0;
}
SEC("raw_tp")
__success
int iter_bpf_for_each_macro(const void *ctx)
{
int *v;
MY_PID_GUARD();
bpf_for_each(num, v, 5, 10) {
bpf_printk("ITER_BASIC: E2 VAL: v=%d", *v);
}
return 0;
}
SEC("raw_tp")
__success
int iter_bpf_for_macro(const void *ctx)
{
int i;
MY_PID_GUARD();
bpf_for(i, 5, 10) {
bpf_printk("ITER_BASIC: E2 VAL: v=%d", i);
}
return 0;
}
SEC("raw_tp")
__success
int iter_pragma_unroll_loop(const void *ctx)
{
struct bpf_iter_num it;
int *v, i;
MY_PID_GUARD();
bpf_iter_num_new(&it, 0, 2);
#pragma nounroll
for (i = 0; i < 3; i++) {
v = bpf_iter_num_next(&it);
bpf_printk("ITER_BASIC: E3 VAL: i=%d v=%d", i, v ? *v : -1);
}
bpf_iter_num_destroy(&it);
return 0;
}
SEC("raw_tp")
__success
int iter_manual_unroll_loop(const void *ctx)
{
struct bpf_iter_num it;
int *v;
MY_PID_GUARD();
bpf_iter_num_new(&it, 100, 200);
v = bpf_iter_num_next(&it);
bpf_printk("ITER_BASIC: E4 VAL: v=%d", v ? *v : -1);
v = bpf_iter_num_next(&it);
bpf_printk("ITER_BASIC: E4 VAL: v=%d", v ? *v : -1);
v = bpf_iter_num_next(&it);
bpf_printk("ITER_BASIC: E4 VAL: v=%d", v ? *v : -1);
v = bpf_iter_num_next(&it);
bpf_printk("ITER_BASIC: E4 VAL: v=%d\n", v ? *v : -1);
bpf_iter_num_destroy(&it);
return 0;
}
SEC("raw_tp")
__success
int iter_multiple_sequential_loops(const void *ctx)
{
struct bpf_iter_num it;
int *v, i;
MY_PID_GUARD();
bpf_iter_num_new(&it, 0, 3);
while ((v = bpf_iter_num_next(&it))) {
bpf_printk("ITER_BASIC: E1 VAL: v=%d", *v);
}
bpf_iter_num_destroy(&it);
bpf_iter_num_new(&it, 5, 10);
for (v = bpf_iter_num_next(&it); v; v = bpf_iter_num_next(&it)) {
bpf_printk("ITER_BASIC: E2 VAL: v=%d", *v);
}
bpf_iter_num_destroy(&it);
bpf_iter_num_new(&it, 0, 2);
#pragma nounroll
for (i = 0; i < 3; i++) {
v = bpf_iter_num_next(&it);
bpf_printk("ITER_BASIC: E3 VAL: i=%d v=%d", i, v ? *v : -1);
}
bpf_iter_num_destroy(&it);
bpf_iter_num_new(&it, 100, 200);
v = bpf_iter_num_next(&it);
bpf_printk("ITER_BASIC: E4 VAL: v=%d", v ? *v : -1);
v = bpf_iter_num_next(&it);
bpf_printk("ITER_BASIC: E4 VAL: v=%d", v ? *v : -1);
v = bpf_iter_num_next(&it);
bpf_printk("ITER_BASIC: E4 VAL: v=%d", v ? *v : -1);
v = bpf_iter_num_next(&it);
bpf_printk("ITER_BASIC: E4 VAL: v=%d\n", v ? *v : -1);
bpf_iter_num_destroy(&it);
return 0;
}
SEC("raw_tp")
__success
int iter_limit_cond_break_loop(const void *ctx)
{
struct bpf_iter_num it;
int *v, i = 0, sum = 0;
MY_PID_GUARD();
bpf_iter_num_new(&it, 0, 10);
while ((v = bpf_iter_num_next(&it))) {
bpf_printk("ITER_SIMPLE: i=%d v=%d", i, *v);
sum += *v;
i++;
if (i > 3)
break;
}
bpf_iter_num_destroy(&it);
bpf_printk("ITER_SIMPLE: sum=%d\n", sum);
return 0;
}
SEC("raw_tp")
__success
int iter_obfuscate_counter(const void *ctx)
{
struct bpf_iter_num it;
int *v, sum = 0;
/* Make i's initial value unknowable for verifier to prevent it from
* pruning if/else branch inside the loop body and marking i as precise.
*/
int i = zero;
MY_PID_GUARD();
bpf_iter_num_new(&it, 0, 10);
while ((v = bpf_iter_num_next(&it))) {
int x;
i += 1;
/* If we initialized i as `int i = 0;` above, verifier would
* track that i becomes 1 on first iteration after increment
* above, and here verifier would eagerly prune else branch
* and mark i as precise, ruining open-coded iterator logic
* completely, as each next iteration would have a different
* *precise* value of i, and thus there would be no
* convergence of state. This would result in reaching maximum
* instruction limit, no matter what the limit is.
*/
if (i == 1)
x = 123;
else
x = i * 3 + 1;
bpf_printk("ITER_OBFUSCATE_COUNTER: i=%d v=%d x=%d", i, *v, x);
sum += x;
}
bpf_iter_num_destroy(&it);
bpf_printk("ITER_OBFUSCATE_COUNTER: sum=%d\n", sum);
return 0;
}
SEC("raw_tp")
__success
int iter_search_loop(const void *ctx)
{
struct bpf_iter_num it;
int *v, *elem = NULL;
bool found = false;
MY_PID_GUARD();
bpf_iter_num_new(&it, 0, 10);
while ((v = bpf_iter_num_next(&it))) {
bpf_printk("ITER_SEARCH_LOOP: v=%d", *v);
if (*v == 2) {
found = true;
elem = v;
barrier_var(elem);
}
}
/* should fail to verify if bpf_iter_num_destroy() is here */
if (found)
/* here found element will be wrong, we should have copied
* value to a variable, but here we want to make sure we can
* access memory after the loop anyways
*/
bpf_printk("ITER_SEARCH_LOOP: FOUND IT = %d!\n", *elem);
else
bpf_printk("ITER_SEARCH_LOOP: NOT FOUND IT!\n");
bpf_iter_num_destroy(&it);
return 0;
}
SEC("raw_tp")
__success
int iter_array_fill(const void *ctx)
{
int sum, i;
MY_PID_GUARD();
bpf_for(i, 0, ARRAY_SIZE(arr)) {
arr[i] = i * 2;
}
sum = 0;
bpf_for(i, 0, ARRAY_SIZE(arr)) {
sum += arr[i];
}
bpf_printk("ITER_ARRAY_FILL: sum=%d (should be %d)\n", sum, 255 * 256);
return 0;
}
static int arr2d[4][5];
static int arr2d_row_sums[4];
static int arr2d_col_sums[5];
SEC("raw_tp")
__success
int iter_nested_iters(const void *ctx)
{
int sum, row, col;
MY_PID_GUARD();
bpf_for(row, 0, ARRAY_SIZE(arr2d)) {
bpf_for( col, 0, ARRAY_SIZE(arr2d[0])) {
arr2d[row][col] = row * col;
}
}
/* zero-initialize sums */
sum = 0;
bpf_for(row, 0, ARRAY_SIZE(arr2d)) {
arr2d_row_sums[row] = 0;
}
bpf_for(col, 0, ARRAY_SIZE(arr2d[0])) {
arr2d_col_sums[col] = 0;
}
/* calculate sums */
bpf_for(row, 0, ARRAY_SIZE(arr2d)) {
bpf_for(col, 0, ARRAY_SIZE(arr2d[0])) {
sum += arr2d[row][col];
arr2d_row_sums[row] += arr2d[row][col];
arr2d_col_sums[col] += arr2d[row][col];
}
}
bpf_printk("ITER_NESTED_ITERS: total sum=%d", sum);
bpf_for(row, 0, ARRAY_SIZE(arr2d)) {
bpf_printk("ITER_NESTED_ITERS: row #%d sum=%d", row, arr2d_row_sums[row]);
}
bpf_for(col, 0, ARRAY_SIZE(arr2d[0])) {
bpf_printk("ITER_NESTED_ITERS: col #%d sum=%d%s",
col, arr2d_col_sums[col],
col == ARRAY_SIZE(arr2d[0]) - 1 ? "\n" : "");
}
return 0;
}
SEC("raw_tp")
__success
int iter_nested_deeply_iters(const void *ctx)
{
int sum = 0;
MY_PID_GUARD();
bpf_repeat(10) {
bpf_repeat(10) {
bpf_repeat(10) {
bpf_repeat(10) {
bpf_repeat(10) {
sum += 1;
}
}
}
}
/* validate that we can break from inside bpf_repeat() */
break;
}
return sum;
}
static __noinline void fill_inner_dimension(int row)
{
int col;
bpf_for(col, 0, ARRAY_SIZE(arr2d[0])) {
arr2d[row][col] = row * col;
}
}
static __noinline int sum_inner_dimension(int row)
{
int sum = 0, col;
bpf_for(col, 0, ARRAY_SIZE(arr2d[0])) {
sum += arr2d[row][col];
arr2d_row_sums[row] += arr2d[row][col];
arr2d_col_sums[col] += arr2d[row][col];
}
return sum;
}
SEC("raw_tp")
__success
int iter_subprog_iters(const void *ctx)
{
int sum, row, col;
MY_PID_GUARD();
bpf_for(row, 0, ARRAY_SIZE(arr2d)) {
fill_inner_dimension(row);
}
/* zero-initialize sums */
sum = 0;
bpf_for(row, 0, ARRAY_SIZE(arr2d)) {
arr2d_row_sums[row] = 0;
}
bpf_for(col, 0, ARRAY_SIZE(arr2d[0])) {
arr2d_col_sums[col] = 0;
}
/* calculate sums */
bpf_for(row, 0, ARRAY_SIZE(arr2d)) {
sum += sum_inner_dimension(row);
}
bpf_printk("ITER_SUBPROG_ITERS: total sum=%d", sum);
bpf_for(row, 0, ARRAY_SIZE(arr2d)) {
bpf_printk("ITER_SUBPROG_ITERS: row #%d sum=%d",
row, arr2d_row_sums[row]);
}
bpf_for(col, 0, ARRAY_SIZE(arr2d[0])) {
bpf_printk("ITER_SUBPROG_ITERS: col #%d sum=%d%s",
col, arr2d_col_sums[col],
col == ARRAY_SIZE(arr2d[0]) - 1 ? "\n" : "");
}
return 0;
}
struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
__type(key, int);
__type(value, int);
__uint(max_entries, 1000);
} arr_map SEC(".maps");
SEC("?raw_tp")
__failure __msg("invalid mem access 'scalar'")
int iter_err_too_permissive1(const void *ctx)
{
int *map_val = NULL;
int key = 0;
MY_PID_GUARD();
map_val = bpf_map_lookup_elem(&arr_map, &key);
if (!map_val)
return 0;
bpf_repeat(1000000) {
map_val = NULL;
}
*map_val = 123;
return 0;
}
SEC("?raw_tp")
__failure __msg("invalid mem access 'map_value_or_null'")
int iter_err_too_permissive2(const void *ctx)
{
int *map_val = NULL;
int key = 0;
MY_PID_GUARD();
map_val = bpf_map_lookup_elem(&arr_map, &key);
if (!map_val)
return 0;
bpf_repeat(1000000) {
map_val = bpf_map_lookup_elem(&arr_map, &key);
}
*map_val = 123;
return 0;
}
SEC("?raw_tp")
__failure __msg("invalid mem access 'map_value_or_null'")
int iter_err_too_permissive3(const void *ctx)
{
int *map_val = NULL;
int key = 0;
bool found = false;
MY_PID_GUARD();
bpf_repeat(1000000) {
map_val = bpf_map_lookup_elem(&arr_map, &key);
found = true;
}
if (found)
*map_val = 123;
return 0;
}
SEC("raw_tp")
__success
int iter_tricky_but_fine(const void *ctx)
{
int *map_val = NULL;
int key = 0;
bool found = false;
MY_PID_GUARD();
bpf_repeat(1000000) {
map_val = bpf_map_lookup_elem(&arr_map, &key);
if (map_val) {
found = true;
break;
}
}
if (found)
*map_val = 123;
return 0;
}
#define __bpf_memzero(p, sz) bpf_probe_read_kernel((p), (sz), 0)
SEC("raw_tp")
__success
int iter_stack_array_loop(const void *ctx)
{
long arr1[16], arr2[16], sum = 0;
int i;
MY_PID_GUARD();
/* zero-init arr1 and arr2 in such a way that verifier doesn't know
* it's all zeros; if we don't do that, we'll make BPF verifier track
* all combination of zero/non-zero stack slots for arr1/arr2, which
* will lead to O(2^(ARRAY_SIZE(arr1)+ARRAY_SIZE(arr2))) different
* states
*/
__bpf_memzero(arr1, sizeof(arr1));
__bpf_memzero(arr2, sizeof(arr1));
/* validate that we can break and continue when using bpf_for() */
bpf_for(i, 0, ARRAY_SIZE(arr1)) {
if (i & 1) {
arr1[i] = i;
continue;
} else {
arr2[i] = i;
break;
}
}
bpf_for(i, 0, ARRAY_SIZE(arr1)) {
sum += arr1[i] + arr2[i];
}
return sum;
}
static __noinline void fill(struct bpf_iter_num *it, int *arr, __u32 n, int mul)
{
int *t, i;
while ((t = bpf_iter_num_next(it))) {
i = *t;
if (i >= n)
break;
arr[i] = i * mul;
}
}
static __noinline int sum(struct bpf_iter_num *it, int *arr, __u32 n)
{
int *t, i, sum = 0;;
while ((t = bpf_iter_num_next(it))) {
i = *t;
if (i >= n)
break;
sum += arr[i];
}
return sum;
}
SEC("raw_tp")
__success
int iter_pass_iter_ptr_to_subprog(const void *ctx)
{
int arr1[16], arr2[32];
struct bpf_iter_num it;
int n, sum1, sum2;
MY_PID_GUARD();
/* fill arr1 */
n = ARRAY_SIZE(arr1);
bpf_iter_num_new(&it, 0, n);
fill(&it, arr1, n, 2);
bpf_iter_num_destroy(&it);
/* fill arr2 */
n = ARRAY_SIZE(arr2);
bpf_iter_num_new(&it, 0, n);
fill(&it, arr2, n, 10);
bpf_iter_num_destroy(&it);
/* sum arr1 */
n = ARRAY_SIZE(arr1);
bpf_iter_num_new(&it, 0, n);
sum1 = sum(&it, arr1, n);
bpf_iter_num_destroy(&it);
/* sum arr2 */
n = ARRAY_SIZE(arr2);
bpf_iter_num_new(&it, 0, n);
sum2 = sum(&it, arr2, n);
bpf_iter_num_destroy(&it);
bpf_printk("sum1=%d, sum2=%d", sum1, sum2);
return 0;
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/iters.c |
#include "core_reloc_types.h"
void f(struct core_reloc_enumval x) {}
| linux-master | tools/testing/selftests/bpf/progs/btf__core_reloc_enumval.c |
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2018 Facebook
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
__uint(max_entries, 1);
__type(key, __u32);
__type(value, __u64);
} cg_ids SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
__uint(max_entries, 1);
__type(key, __u32);
__type(value, __u32);
} pidmap SEC(".maps");
SEC("tracepoint/syscalls/sys_enter_nanosleep")
int trace(void *ctx)
{
__u32 pid = bpf_get_current_pid_tgid();
__u32 key = 0, *expected_pid;
__u64 *val;
expected_pid = bpf_map_lookup_elem(&pidmap, &key);
if (!expected_pid || *expected_pid != pid)
return 0;
val = bpf_map_lookup_elem(&cg_ids, &key);
if (val)
*val = bpf_get_current_cgroup_id();
return 0;
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/get_cgroup_id_kern.c |
// SPDX-License-Identifier: GPL-2.0
#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
#define MAX_PATH_LEN 128
#define MAX_FILES 7
pid_t my_pid = 0;
__u32 cnt_stat = 0;
__u32 cnt_close = 0;
char paths_stat[MAX_FILES][MAX_PATH_LEN] = {};
char paths_close[MAX_FILES][MAX_PATH_LEN] = {};
int rets_stat[MAX_FILES] = {};
int rets_close[MAX_FILES] = {};
int called_stat = 0;
int called_close = 0;
SEC("fentry/security_inode_getattr")
int BPF_PROG(prog_stat, struct path *path, struct kstat *stat,
__u32 request_mask, unsigned int query_flags)
{
pid_t pid = bpf_get_current_pid_tgid() >> 32;
__u32 cnt = cnt_stat;
int ret;
called_stat = 1;
if (pid != my_pid)
return 0;
if (cnt >= MAX_FILES)
return 0;
ret = bpf_d_path(path, paths_stat[cnt], MAX_PATH_LEN);
rets_stat[cnt] = ret;
cnt_stat++;
return 0;
}
SEC("fentry/filp_close")
int BPF_PROG(prog_close, struct file *file, void *id)
{
pid_t pid = bpf_get_current_pid_tgid() >> 32;
__u32 cnt = cnt_close;
int ret;
called_close = 1;
if (pid != my_pid)
return 0;
if (cnt >= MAX_FILES)
return 0;
ret = bpf_d_path(&file->f_path,
paths_close[cnt], MAX_PATH_LEN);
rets_close[cnt] = ret;
cnt_close++;
return 0;
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/test_d_path.c |
// SPDX-License-Identifier: GPL-2.0
#include <vmlinux.h>
#include <bpf/bpf_helpers.h>
#include "../bpf_testmod/bpf_testmod_kfunc.h"
SEC("tc")
int kfunc_call_fail(struct __sk_buff *ctx)
{
bpf_testmod_test_mod_kfunc(0);
return 0;
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/kfunc_call_race.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2020 Google LLC.
*/
#include <errno.h>
#include <linux/bpf.h>
#include <linux/if_ether.h>
#include <linux/ip.h>
#include <bpf/bpf_helpers.h>
struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
__uint(max_entries, 1);
__type(key, __u32);
__type(value, __u32);
} test_result SEC(".maps");
SEC("cgroup_skb/egress")
int load_bytes_relative(struct __sk_buff *skb)
{
struct ethhdr eth;
struct iphdr iph;
__u32 map_key = 0;
__u32 test_passed = 0;
/* MAC header is not set by the time cgroup_skb/egress triggers */
if (bpf_skb_load_bytes_relative(skb, 0, ð, sizeof(eth),
BPF_HDR_START_MAC) != -EFAULT)
goto fail;
if (bpf_skb_load_bytes_relative(skb, 0, &iph, sizeof(iph),
BPF_HDR_START_NET))
goto fail;
if (bpf_skb_load_bytes_relative(skb, 0xffff, &iph, sizeof(iph),
BPF_HDR_START_NET) != -EFAULT)
goto fail;
test_passed = 1;
fail:
bpf_map_update_elem(&test_result, &map_key, &test_passed, BPF_ANY);
return 1;
}
| linux-master | tools/testing/selftests/bpf/progs/load_bytes_relative.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
#include <bpf/usdt.bpf.h>
int my_pid;
int usdt0_called;
u64 usdt0_cookie;
int usdt0_arg_cnt;
int usdt0_arg_ret;
SEC("usdt")
int usdt0(struct pt_regs *ctx)
{
long tmp;
if (my_pid != (bpf_get_current_pid_tgid() >> 32))
return 0;
__sync_fetch_and_add(&usdt0_called, 1);
usdt0_cookie = bpf_usdt_cookie(ctx);
usdt0_arg_cnt = bpf_usdt_arg_cnt(ctx);
/* should return -ENOENT for any arg_num */
usdt0_arg_ret = bpf_usdt_arg(ctx, bpf_get_prandom_u32(), &tmp);
return 0;
}
int usdt3_called;
u64 usdt3_cookie;
int usdt3_arg_cnt;
int usdt3_arg_rets[3];
u64 usdt3_args[3];
SEC("usdt//proc/self/exe:test:usdt3")
int usdt3(struct pt_regs *ctx)
{
long tmp;
if (my_pid != (bpf_get_current_pid_tgid() >> 32))
return 0;
__sync_fetch_and_add(&usdt3_called, 1);
usdt3_cookie = bpf_usdt_cookie(ctx);
usdt3_arg_cnt = bpf_usdt_arg_cnt(ctx);
usdt3_arg_rets[0] = bpf_usdt_arg(ctx, 0, &tmp);
usdt3_args[0] = (int)tmp;
usdt3_arg_rets[1] = bpf_usdt_arg(ctx, 1, &tmp);
usdt3_args[1] = (long)tmp;
usdt3_arg_rets[2] = bpf_usdt_arg(ctx, 2, &tmp);
usdt3_args[2] = (uintptr_t)tmp;
return 0;
}
int usdt12_called;
u64 usdt12_cookie;
int usdt12_arg_cnt;
u64 usdt12_args[12];
SEC("usdt//proc/self/exe:test:usdt12")
int BPF_USDT(usdt12, int a1, int a2, long a3, long a4, unsigned a5,
long a6, __u64 a7, uintptr_t a8, int a9, short a10,
short a11, signed char a12)
{
if (my_pid != (bpf_get_current_pid_tgid() >> 32))
return 0;
__sync_fetch_and_add(&usdt12_called, 1);
usdt12_cookie = bpf_usdt_cookie(ctx);
usdt12_arg_cnt = bpf_usdt_arg_cnt(ctx);
usdt12_args[0] = a1;
usdt12_args[1] = a2;
usdt12_args[2] = a3;
usdt12_args[3] = a4;
usdt12_args[4] = a5;
usdt12_args[5] = a6;
usdt12_args[6] = a7;
usdt12_args[7] = a8;
usdt12_args[8] = a9;
usdt12_args[9] = a10;
usdt12_args[10] = a11;
usdt12_args[11] = a12;
return 0;
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/test_usdt.c |
#include "core_reloc_types.h"
void f(struct core_reloc_nesting___err_nonstruct_container x) {}
| linux-master | tools/testing/selftests/bpf/progs/btf__core_reloc_nesting___err_nonstruct_container.c |
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2020 Facebook */
#include <stddef.h>
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include "bpf_misc.h"
__attribute__ ((noinline))
int f1(struct __sk_buff *skb)
{
return skb->len;
}
int f3(int, struct __sk_buff *skb);
__attribute__ ((noinline))
int f2(int val, struct __sk_buff *skb)
{
return f1(skb) + f3(val, skb + 1); /* type mismatch */
}
__attribute__ ((noinline))
int f3(int val, struct __sk_buff *skb)
{
return skb->ifindex * val;
}
SEC("tc")
__failure __msg("modified ctx ptr R2")
int global_func6(struct __sk_buff *skb)
{
return f1(skb) + f2(2, skb) + f3(3, skb);
}
| linux-master | tools/testing/selftests/bpf/progs/test_global_func6.c |
// SPDX-License-Identifier: GPL-2.0
#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
struct {
__uint(type, BPF_MAP_TYPE_SOCKMAP);
__uint(max_entries, 1);
__type(key, __u32);
__type(value, __u64);
} sock_map SEC(".maps");
SEC("sk_skb")
int prog_skb_verdict(struct __sk_buff *skb)
{
return SK_PASS;
}
SEC("sk_msg")
int prog_skmsg_verdict(struct sk_msg_md *msg)
{
return SK_PASS;
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/test_sockmap_progs_query.c |
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2019 Facebook
#include <linux/bpf.h>
#include <stdint.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_core_read.h>
char _license[] SEC("license") = "GPL";
struct {
char in[256];
char out[256];
} data = {};
struct core_reloc_ptr_as_arr {
int a;
};
#define CORE_READ(dst, src) bpf_core_read(dst, sizeof(*(dst)), src)
SEC("raw_tracepoint/sys_enter")
int test_core_ptr_as_arr(void *ctx)
{
struct core_reloc_ptr_as_arr *in = (void *)&data.in;
struct core_reloc_ptr_as_arr *out = (void *)&data.out;
if (CORE_READ(&out->a, &in[2].a))
return 1;
return 0;
}
| linux-master | tools/testing/selftests/bpf/progs/test_core_reloc_ptr_as_arr.c |
// SPDX-License-Identifier: GPL-2.0
#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
#include <bpf/bpf_core_read.h>
typedef int (*func_proto_typedef)(long);
typedef int (*func_proto_typedef_nested1)(func_proto_typedef);
typedef int (*func_proto_typedef_nested2)(func_proto_typedef_nested1);
int proto_out;
SEC("raw_tracepoint/sys_enter")
int core_relo_proto(void *ctx)
{
proto_out = bpf_core_type_exists(func_proto_typedef_nested2);
return 0;
}
char LICENSE[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/core_kern_overflow.c |
#include "core_reloc_types.h"
void f(struct core_reloc_type_based___diff x) {}
| linux-master | tools/testing/selftests/bpf/progs/btf__core_reloc_type_based___diff.c |
// SPDX-License-Identifier: GPL-2.0
#include <string.h>
#include <stdbool.h>
#include <linux/bpf.h>
#include <linux/in.h>
#include <linux/in6.h>
#include <sys/socket.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_endian.h>
#include <bpf_sockopt_helpers.h>
char _license[] SEC("license") = "GPL";
struct svc_addr {
__be32 addr;
__be16 port;
};
struct {
__uint(type, BPF_MAP_TYPE_SK_STORAGE);
__uint(map_flags, BPF_F_NO_PREALLOC);
__type(key, int);
__type(value, struct svc_addr);
} service_mapping SEC(".maps");
SEC("cgroup/connect4")
int connect4(struct bpf_sock_addr *ctx)
{
struct sockaddr_in sa = {};
struct svc_addr *orig;
/* Force local address to 127.0.0.1:22222. */
sa.sin_family = AF_INET;
sa.sin_port = bpf_htons(22222);
sa.sin_addr.s_addr = bpf_htonl(0x7f000001);
if (bpf_bind(ctx, (struct sockaddr *)&sa, sizeof(sa)) != 0)
return 0;
/* Rewire service 1.2.3.4:60000 to backend 127.0.0.1:60123. */
if (ctx->user_port == bpf_htons(60000)) {
orig = bpf_sk_storage_get(&service_mapping, ctx->sk, 0,
BPF_SK_STORAGE_GET_F_CREATE);
if (!orig)
return 0;
orig->addr = ctx->user_ip4;
orig->port = ctx->user_port;
ctx->user_ip4 = bpf_htonl(0x7f000001);
ctx->user_port = bpf_htons(60123);
}
return 1;
}
SEC("cgroup/getsockname4")
int getsockname4(struct bpf_sock_addr *ctx)
{
if (!get_set_sk_priority(ctx))
return 1;
/* Expose local server as 1.2.3.4:60000 to client. */
if (ctx->user_port == bpf_htons(60123)) {
ctx->user_ip4 = bpf_htonl(0x01020304);
ctx->user_port = bpf_htons(60000);
}
return 1;
}
SEC("cgroup/getpeername4")
int getpeername4(struct bpf_sock_addr *ctx)
{
struct svc_addr *orig;
if (!get_set_sk_priority(ctx))
return 1;
/* Expose service 1.2.3.4:60000 as peer instead of backend. */
if (ctx->user_port == bpf_htons(60123)) {
orig = bpf_sk_storage_get(&service_mapping, ctx->sk, 0, 0);
if (orig) {
ctx->user_ip4 = orig->addr;
ctx->user_port = orig->port;
}
}
return 1;
}
| linux-master | tools/testing/selftests/bpf/progs/connect_force_port4.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2022 Google LLC.
*/
#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
#include <bpf/bpf_core_read.h>
char _license[] SEC("license") = "GPL";
struct percpu_attach_counter {
/* Previous percpu state, to figure out if we have new updates */
__u64 prev;
/* Current percpu state */
__u64 state;
};
struct attach_counter {
/* State propagated through children, pending aggregation */
__u64 pending;
/* Total state, including all cpus and all children */
__u64 state;
};
struct {
__uint(type, BPF_MAP_TYPE_PERCPU_HASH);
__uint(max_entries, 1024);
__type(key, __u64);
__type(value, struct percpu_attach_counter);
} percpu_attach_counters SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_HASH);
__uint(max_entries, 1024);
__type(key, __u64);
__type(value, struct attach_counter);
} attach_counters SEC(".maps");
extern void cgroup_rstat_updated(struct cgroup *cgrp, int cpu) __ksym;
extern void cgroup_rstat_flush(struct cgroup *cgrp) __ksym;
static uint64_t cgroup_id(struct cgroup *cgrp)
{
return cgrp->kn->id;
}
static int create_percpu_attach_counter(__u64 cg_id, __u64 state)
{
struct percpu_attach_counter pcpu_init = {.state = state, .prev = 0};
return bpf_map_update_elem(&percpu_attach_counters, &cg_id,
&pcpu_init, BPF_NOEXIST);
}
static int create_attach_counter(__u64 cg_id, __u64 state, __u64 pending)
{
struct attach_counter init = {.state = state, .pending = pending};
return bpf_map_update_elem(&attach_counters, &cg_id,
&init, BPF_NOEXIST);
}
SEC("fentry/cgroup_attach_task")
int BPF_PROG(counter, struct cgroup *dst_cgrp, struct task_struct *leader,
bool threadgroup)
{
__u64 cg_id = cgroup_id(dst_cgrp);
struct percpu_attach_counter *pcpu_counter = bpf_map_lookup_elem(
&percpu_attach_counters,
&cg_id);
if (pcpu_counter)
pcpu_counter->state += 1;
else if (create_percpu_attach_counter(cg_id, 1))
return 0;
cgroup_rstat_updated(dst_cgrp, bpf_get_smp_processor_id());
return 0;
}
SEC("fentry/bpf_rstat_flush")
int BPF_PROG(flusher, struct cgroup *cgrp, struct cgroup *parent, int cpu)
{
struct percpu_attach_counter *pcpu_counter;
struct attach_counter *total_counter, *parent_counter;
__u64 cg_id = cgroup_id(cgrp);
__u64 parent_cg_id = parent ? cgroup_id(parent) : 0;
__u64 state;
__u64 delta = 0;
/* Add CPU changes on this level since the last flush */
pcpu_counter = bpf_map_lookup_percpu_elem(&percpu_attach_counters,
&cg_id, cpu);
if (pcpu_counter) {
state = pcpu_counter->state;
delta += state - pcpu_counter->prev;
pcpu_counter->prev = state;
}
total_counter = bpf_map_lookup_elem(&attach_counters, &cg_id);
if (!total_counter) {
if (create_attach_counter(cg_id, delta, 0))
return 0;
goto update_parent;
}
/* Collect pending stats from subtree */
if (total_counter->pending) {
delta += total_counter->pending;
total_counter->pending = 0;
}
/* Propagate changes to this cgroup's total */
total_counter->state += delta;
update_parent:
/* Skip if there are no changes to propagate, or no parent */
if (!delta || !parent_cg_id)
return 0;
/* Propagate changes to cgroup's parent */
parent_counter = bpf_map_lookup_elem(&attach_counters,
&parent_cg_id);
if (parent_counter)
parent_counter->pending += delta;
else
create_attach_counter(parent_cg_id, 0, delta);
return 0;
}
SEC("iter.s/cgroup")
int BPF_PROG(dumper, struct bpf_iter_meta *meta, struct cgroup *cgrp)
{
struct seq_file *seq = meta->seq;
struct attach_counter *total_counter;
__u64 cg_id = cgrp ? cgroup_id(cgrp) : 0;
/* Do nothing for the terminal call */
if (!cg_id)
return 1;
/* Flush the stats to make sure we get the most updated numbers */
cgroup_rstat_flush(cgrp);
total_counter = bpf_map_lookup_elem(&attach_counters, &cg_id);
if (!total_counter) {
BPF_SEQ_PRINTF(seq, "cg_id: %llu, attach_counter: 0\n",
cg_id);
} else {
BPF_SEQ_PRINTF(seq, "cg_id: %llu, attach_counter: %llu\n",
cg_id, total_counter->state);
}
return 0;
}
| linux-master | tools/testing/selftests/bpf/progs/cgroup_hierarchical_stats.c |
#include "core_reloc_types.h"
void f(struct core_reloc_enum64val___err_missing x) {}
| linux-master | tools/testing/selftests/bpf/progs/btf__core_reloc_enum64val___err_missing.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2021 Facebook */
#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
#include "bpf_misc.h"
char _license[] SEC("license") = "GPL";
int null_data_vprintk_ret = 0;
int trace_vprintk_ret = 0;
int trace_vprintk_ran = 0;
SEC("fentry/" SYS_PREFIX "sys_nanosleep")
int sys_enter(void *ctx)
{
static const char one[] = "1";
static const char three[] = "3";
static const char five[] = "5";
static const char seven[] = "7";
static const char nine[] = "9";
static const char f[] = "%pS\n";
/* runner doesn't search for \t, just ensure it compiles */
bpf_printk("\t");
trace_vprintk_ret = __bpf_vprintk("%s,%d,%s,%d,%s,%d,%s,%d,%s,%d %d\n",
one, 2, three, 4, five, 6, seven, 8, nine, 10, ++trace_vprintk_ran);
/* non-NULL fmt w/ NULL data should result in error */
null_data_vprintk_ret = bpf_trace_vprintk(f, sizeof(f), NULL, 0);
return 0;
}
| linux-master | tools/testing/selftests/bpf/progs/trace_vprintk.c |
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2019 Facebook
#include <linux/bpf.h>
#include <stdint.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_core_read.h>
char _license[] SEC("license") = "GPL";
struct {
char in[256];
char out[256];
} data = {};
enum core_reloc_primitives_enum {
A = 0,
B = 1,
};
struct core_reloc_primitives {
char a;
int b;
enum core_reloc_primitives_enum c;
void *d;
int (*f)(const char *);
};
#define CORE_READ(dst, src) bpf_core_read(dst, sizeof(*(dst)), src)
SEC("raw_tracepoint/sys_enter")
int test_core_primitives(void *ctx)
{
struct core_reloc_primitives *in = (void *)&data.in;
struct core_reloc_primitives *out = (void *)&data.out;
if (CORE_READ(&out->a, &in->a) ||
CORE_READ(&out->b, &in->b) ||
CORE_READ(&out->c, &in->c) ||
CORE_READ(&out->d, &in->d) ||
CORE_READ(&out->f, &in->f))
return 1;
return 0;
}
| linux-master | tools/testing/selftests/bpf/progs/test_core_reloc_primitives.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
__uint(max_entries, 1);
__type(key, __u32);
__type(value, __u64);
__uint(pinning, 2); /* invalid */
} nopinmap3 SEC(".maps");
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/test_pinning_invalid.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020 Facebook */
#include "bpf_iter.h"
#include "bpf_tracing_net.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_endian.h>
char _license[] SEC("license") = "GPL";
static int hlist_unhashed_lockless(const struct hlist_node *h)
{
return !(h->pprev);
}
static int timer_pending(const struct timer_list * timer)
{
return !hlist_unhashed_lockless(&timer->entry);
}
extern unsigned CONFIG_HZ __kconfig;
#define USER_HZ 100
#define NSEC_PER_SEC 1000000000ULL
static clock_t jiffies_to_clock_t(unsigned long x)
{
/* The implementation here tailored to a particular
* setting of USER_HZ.
*/
u64 tick_nsec = (NSEC_PER_SEC + CONFIG_HZ/2) / CONFIG_HZ;
u64 user_hz_nsec = NSEC_PER_SEC / USER_HZ;
if ((tick_nsec % user_hz_nsec) == 0) {
if (CONFIG_HZ < USER_HZ)
return x * (USER_HZ / CONFIG_HZ);
else
return x / (CONFIG_HZ / USER_HZ);
}
return x * tick_nsec/user_hz_nsec;
}
static clock_t jiffies_delta_to_clock_t(long delta)
{
if (delta <= 0)
return 0;
return jiffies_to_clock_t(delta);
}
static long sock_i_ino(const struct sock *sk)
{
const struct socket *sk_socket = sk->sk_socket;
const struct inode *inode;
unsigned long ino;
if (!sk_socket)
return 0;
inode = &container_of(sk_socket, struct socket_alloc, socket)->vfs_inode;
bpf_probe_read_kernel(&ino, sizeof(ino), &inode->i_ino);
return ino;
}
static bool
inet_csk_in_pingpong_mode(const struct inet_connection_sock *icsk)
{
return icsk->icsk_ack.pingpong >= TCP_PINGPONG_THRESH;
}
static bool tcp_in_initial_slowstart(const struct tcp_sock *tcp)
{
return tcp->snd_ssthresh >= TCP_INFINITE_SSTHRESH;
}
static int dump_tcp6_sock(struct seq_file *seq, struct tcp6_sock *tp,
uid_t uid, __u32 seq_num)
{
const struct inet_connection_sock *icsk;
const struct fastopen_queue *fastopenq;
const struct in6_addr *dest, *src;
const struct inet_sock *inet;
unsigned long timer_expires;
const struct sock *sp;
__u16 destp, srcp;
int timer_active;
int rx_queue;
int state;
icsk = &tp->tcp.inet_conn;
inet = &icsk->icsk_inet;
sp = &inet->sk;
fastopenq = &icsk->icsk_accept_queue.fastopenq;
dest = &sp->sk_v6_daddr;
src = &sp->sk_v6_rcv_saddr;
destp = bpf_ntohs(inet->inet_dport);
srcp = bpf_ntohs(inet->inet_sport);
if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
icsk->icsk_pending == ICSK_TIME_REO_TIMEOUT ||
icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
timer_active = 1;
timer_expires = icsk->icsk_timeout;
} else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
timer_active = 4;
timer_expires = icsk->icsk_timeout;
} else if (timer_pending(&sp->sk_timer)) {
timer_active = 2;
timer_expires = sp->sk_timer.expires;
} else {
timer_active = 0;
timer_expires = bpf_jiffies64();
}
state = sp->sk_state;
if (state == TCP_LISTEN) {
rx_queue = sp->sk_ack_backlog;
} else {
rx_queue = tp->tcp.rcv_nxt - tp->tcp.copied_seq;
if (rx_queue < 0)
rx_queue = 0;
}
BPF_SEQ_PRINTF(seq, "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X ",
seq_num,
src->s6_addr32[0], src->s6_addr32[1],
src->s6_addr32[2], src->s6_addr32[3], srcp,
dest->s6_addr32[0], dest->s6_addr32[1],
dest->s6_addr32[2], dest->s6_addr32[3], destp);
BPF_SEQ_PRINTF(seq, "%02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d ",
state,
tp->tcp.write_seq - tp->tcp.snd_una, rx_queue,
timer_active,
jiffies_delta_to_clock_t(timer_expires - bpf_jiffies64()),
icsk->icsk_retransmits, uid,
icsk->icsk_probes_out,
sock_i_ino(sp),
sp->sk_refcnt.refs.counter);
BPF_SEQ_PRINTF(seq, "%pK %lu %lu %u %u %d\n",
tp,
jiffies_to_clock_t(icsk->icsk_rto),
jiffies_to_clock_t(icsk->icsk_ack.ato),
(icsk->icsk_ack.quick << 1) | inet_csk_in_pingpong_mode(icsk),
tp->tcp.snd_cwnd,
state == TCP_LISTEN ? fastopenq->max_qlen
: (tcp_in_initial_slowstart(&tp->tcp) ? -1
: tp->tcp.snd_ssthresh)
);
return 0;
}
static int dump_tw_sock(struct seq_file *seq, struct tcp_timewait_sock *ttw,
uid_t uid, __u32 seq_num)
{
struct inet_timewait_sock *tw = &ttw->tw_sk;
const struct in6_addr *dest, *src;
__u16 destp, srcp;
long delta;
delta = tw->tw_timer.expires - bpf_jiffies64();
dest = &tw->tw_v6_daddr;
src = &tw->tw_v6_rcv_saddr;
destp = bpf_ntohs(tw->tw_dport);
srcp = bpf_ntohs(tw->tw_sport);
BPF_SEQ_PRINTF(seq, "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X ",
seq_num,
src->s6_addr32[0], src->s6_addr32[1],
src->s6_addr32[2], src->s6_addr32[3], srcp,
dest->s6_addr32[0], dest->s6_addr32[1],
dest->s6_addr32[2], dest->s6_addr32[3], destp);
BPF_SEQ_PRINTF(seq, "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
tw->tw_substate, 0, 0,
3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
tw->tw_refcnt.refs.counter, tw);
return 0;
}
static int dump_req_sock(struct seq_file *seq, struct tcp_request_sock *treq,
uid_t uid, __u32 seq_num)
{
struct inet_request_sock *irsk = &treq->req;
struct request_sock *req = &irsk->req;
struct in6_addr *src, *dest;
long ttd;
ttd = req->rsk_timer.expires - bpf_jiffies64();
src = &irsk->ir_v6_loc_addr;
dest = &irsk->ir_v6_rmt_addr;
if (ttd < 0)
ttd = 0;
BPF_SEQ_PRINTF(seq, "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X ",
seq_num,
src->s6_addr32[0], src->s6_addr32[1],
src->s6_addr32[2], src->s6_addr32[3],
irsk->ir_num,
dest->s6_addr32[0], dest->s6_addr32[1],
dest->s6_addr32[2], dest->s6_addr32[3],
bpf_ntohs(irsk->ir_rmt_port));
BPF_SEQ_PRINTF(seq, "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
TCP_SYN_RECV, 0, 0, 1, jiffies_to_clock_t(ttd),
req->num_timeout, uid, 0, 0, 0, req);
return 0;
}
SEC("iter/tcp")
int dump_tcp6(struct bpf_iter__tcp *ctx)
{
struct sock_common *sk_common = ctx->sk_common;
struct seq_file *seq = ctx->meta->seq;
struct tcp_timewait_sock *tw;
struct tcp_request_sock *req;
struct tcp6_sock *tp;
uid_t uid = ctx->uid;
__u32 seq_num;
if (sk_common == (void *)0)
return 0;
seq_num = ctx->meta->seq_num;
if (seq_num == 0)
BPF_SEQ_PRINTF(seq, " sl "
"local_address "
"remote_address "
"st tx_queue rx_queue tr tm->when retrnsmt"
" uid timeout inode\n");
if (sk_common->skc_family != AF_INET6)
return 0;
tp = bpf_skc_to_tcp6_sock(sk_common);
if (tp)
return dump_tcp6_sock(seq, tp, uid, seq_num);
tw = bpf_skc_to_tcp_timewait_sock(sk_common);
if (tw)
return dump_tw_sock(seq, tw, uid, seq_num);
req = bpf_skc_to_tcp_request_sock(sk_common);
if (req)
return dump_req_sock(seq, req, uid, seq_num);
return 0;
}
| linux-master | tools/testing/selftests/bpf/progs/bpf_iter_tcp6.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2019 Facebook */
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
struct sk_buff {
unsigned int len;
};
__u64 test_result = 0;
SEC("fexit/test_pkt_md_access")
int BPF_PROG(test_main2, struct sk_buff *skb, int ret)
{
int len;
__builtin_preserve_access_index(({
len = skb->len;
}));
if (len != 74 || ret != 0)
return 0;
test_result = 1;
return 0;
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/fexit_bpf2bpf_simple.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
char _license[] SEC("license") = "GPL";
#define SOL_CUSTOM 0xdeadbeef
#define CUSTOM_INHERIT1 0
#define CUSTOM_INHERIT2 1
#define CUSTOM_LISTENER 2
__u32 page_size = 0;
struct sockopt_inherit {
__u8 val;
};
struct {
__uint(type, BPF_MAP_TYPE_SK_STORAGE);
__uint(map_flags, BPF_F_NO_PREALLOC | BPF_F_CLONE);
__type(key, int);
__type(value, struct sockopt_inherit);
} cloned1_map SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_SK_STORAGE);
__uint(map_flags, BPF_F_NO_PREALLOC | BPF_F_CLONE);
__type(key, int);
__type(value, struct sockopt_inherit);
} cloned2_map SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_SK_STORAGE);
__uint(map_flags, BPF_F_NO_PREALLOC);
__type(key, int);
__type(value, struct sockopt_inherit);
} listener_only_map SEC(".maps");
static __inline struct sockopt_inherit *get_storage(struct bpf_sockopt *ctx)
{
if (ctx->optname == CUSTOM_INHERIT1)
return bpf_sk_storage_get(&cloned1_map, ctx->sk, 0,
BPF_SK_STORAGE_GET_F_CREATE);
else if (ctx->optname == CUSTOM_INHERIT2)
return bpf_sk_storage_get(&cloned2_map, ctx->sk, 0,
BPF_SK_STORAGE_GET_F_CREATE);
else
return bpf_sk_storage_get(&listener_only_map, ctx->sk, 0,
BPF_SK_STORAGE_GET_F_CREATE);
}
SEC("cgroup/getsockopt")
int _getsockopt(struct bpf_sockopt *ctx)
{
__u8 *optval_end = ctx->optval_end;
struct sockopt_inherit *storage;
__u8 *optval = ctx->optval;
if (ctx->level != SOL_CUSTOM)
goto out; /* only interested in SOL_CUSTOM */
if (optval + 1 > optval_end)
return 0; /* EPERM, bounds check */
storage = get_storage(ctx);
if (!storage)
return 0; /* EPERM, couldn't get sk storage */
ctx->retval = 0; /* Reset system call return value to zero */
optval[0] = storage->val;
ctx->optlen = 1;
return 1;
out:
/* optval larger than PAGE_SIZE use kernel's buffer. */
if (ctx->optlen > page_size)
ctx->optlen = 0;
return 1;
}
SEC("cgroup/setsockopt")
int _setsockopt(struct bpf_sockopt *ctx)
{
__u8 *optval_end = ctx->optval_end;
struct sockopt_inherit *storage;
__u8 *optval = ctx->optval;
if (ctx->level != SOL_CUSTOM)
goto out; /* only interested in SOL_CUSTOM */
if (optval + 1 > optval_end)
return 0; /* EPERM, bounds check */
storage = get_storage(ctx);
if (!storage)
return 0; /* EPERM, couldn't get sk storage */
storage->val = optval[0];
ctx->optlen = -1;
return 1;
out:
/* optval larger than PAGE_SIZE use kernel's buffer. */
if (ctx->optlen > page_size)
ctx->optlen = 0;
return 1;
}
| linux-master | tools/testing/selftests/bpf/progs/sockopt_inherit.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
char _license[] SEC("license") = "GPL";
struct tcp_rtt_storage {
__u32 invoked;
__u32 dsack_dups;
__u32 delivered;
__u32 delivered_ce;
__u32 icsk_retransmits;
};
struct {
__uint(type, BPF_MAP_TYPE_SK_STORAGE);
__uint(map_flags, BPF_F_NO_PREALLOC);
__type(key, int);
__type(value, struct tcp_rtt_storage);
} socket_storage_map SEC(".maps");
SEC("sockops")
int _sockops(struct bpf_sock_ops *ctx)
{
struct tcp_rtt_storage *storage;
struct bpf_tcp_sock *tcp_sk;
int op = (int) ctx->op;
struct bpf_sock *sk;
sk = ctx->sk;
if (!sk)
return 1;
storage = bpf_sk_storage_get(&socket_storage_map, sk, 0,
BPF_SK_STORAGE_GET_F_CREATE);
if (!storage)
return 1;
if (op == BPF_SOCK_OPS_TCP_CONNECT_CB) {
bpf_sock_ops_cb_flags_set(ctx, BPF_SOCK_OPS_RTT_CB_FLAG);
return 1;
}
if (op != BPF_SOCK_OPS_RTT_CB)
return 1;
tcp_sk = bpf_tcp_sock(sk);
if (!tcp_sk)
return 1;
storage->invoked++;
storage->dsack_dups = tcp_sk->dsack_dups;
storage->delivered = tcp_sk->delivered;
storage->delivered_ce = tcp_sk->delivered_ce;
storage->icsk_retransmits = tcp_sk->icsk_retransmits;
return 1;
}
| linux-master | tools/testing/selftests/bpf/progs/tcp_rtt.c |
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2019 Facebook
#include <linux/bpf.h>
#include <stdint.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_core_read.h>
char _license[] SEC("license") = "GPL";
struct {
char in[256];
char out[256];
} data = {};
struct core_reloc_bitfields {
/* unsigned bitfields */
uint8_t ub1: 1;
uint8_t ub2: 2;
uint32_t ub7: 7;
/* signed bitfields */
int8_t sb4: 4;
int32_t sb20: 20;
/* non-bitfields */
uint32_t u32;
int32_t s32;
};
/* bitfield read results, all as plain integers */
struct core_reloc_bitfields_output {
int64_t ub1;
int64_t ub2;
int64_t ub7;
int64_t sb4;
int64_t sb20;
int64_t u32;
int64_t s32;
};
SEC("raw_tracepoint/sys_enter")
int test_core_bitfields(void *ctx)
{
struct core_reloc_bitfields *in = (void *)&data.in;
struct core_reloc_bitfields_output *out = (void *)&data.out;
out->ub1 = BPF_CORE_READ_BITFIELD_PROBED(in, ub1);
out->ub2 = BPF_CORE_READ_BITFIELD_PROBED(in, ub2);
out->ub7 = BPF_CORE_READ_BITFIELD_PROBED(in, ub7);
out->sb4 = BPF_CORE_READ_BITFIELD_PROBED(in, sb4);
out->sb20 = BPF_CORE_READ_BITFIELD_PROBED(in, sb20);
out->u32 = BPF_CORE_READ_BITFIELD_PROBED(in, u32);
out->s32 = BPF_CORE_READ_BITFIELD_PROBED(in, s32);
return 0;
}
| linux-master | tools/testing/selftests/bpf/progs/test_core_reloc_bitfields_probed.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_endian.h>
#include <linux/ip.h>
#include <linux/in.h>
#include <linux/if_ether.h>
#include <linux/pkt_cls.h>
#include <stdbool.h>
int lookup_status;
bool test_xdp;
bool tcp_skc;
#define CUR_NS BPF_F_CURRENT_NETNS
static void socket_lookup(void *ctx, void *data_end, void *data)
{
struct ethhdr *eth = data;
struct bpf_sock_tuple *tp;
struct bpf_sock *sk;
struct iphdr *iph;
int tplen;
if (eth + 1 > data_end)
return;
if (eth->h_proto != bpf_htons(ETH_P_IP))
return;
iph = (struct iphdr *)(eth + 1);
if (iph + 1 > data_end)
return;
tp = (struct bpf_sock_tuple *)&iph->saddr;
tplen = sizeof(tp->ipv4);
if ((void *)tp + tplen > data_end)
return;
switch (iph->protocol) {
case IPPROTO_TCP:
if (tcp_skc)
sk = bpf_skc_lookup_tcp(ctx, tp, tplen, CUR_NS, 0);
else
sk = bpf_sk_lookup_tcp(ctx, tp, tplen, CUR_NS, 0);
break;
case IPPROTO_UDP:
sk = bpf_sk_lookup_udp(ctx, tp, tplen, CUR_NS, 0);
break;
default:
return;
}
lookup_status = 0;
if (sk) {
bpf_sk_release(sk);
lookup_status = 1;
}
}
SEC("tc")
int tc_socket_lookup(struct __sk_buff *skb)
{
void *data_end = (void *)(long)skb->data_end;
void *data = (void *)(long)skb->data;
if (test_xdp)
return TC_ACT_UNSPEC;
socket_lookup(skb, data_end, data);
return TC_ACT_UNSPEC;
}
SEC("xdp")
int xdp_socket_lookup(struct xdp_md *xdp)
{
void *data_end = (void *)(long)xdp->data_end;
void *data = (void *)(long)xdp->data;
if (!test_xdp)
return XDP_PASS;
socket_lookup(xdp, data_end, data);
return XDP_PASS;
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/vrf_socket_lookup.c |
// SPDX-License-Identifier: GPL-2.0
#include <limits.h>
#include <stddef.h>
#include <stdbool.h>
#include <string.h>
#include <linux/pkt_cls.h>
#include <linux/bpf.h>
#include <linux/in.h>
#include <linux/if_ether.h>
#include <linux/icmp.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <linux/tcp.h>
#include <linux/udp.h>
#include <linux/if_packet.h>
#include <sys/socket.h>
#include <linux/if_tunnel.h>
#include <linux/mpls.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_endian.h>
#define PROG(F) PROG_(F, _##F)
#define PROG_(NUM, NAME) SEC("flow_dissector") int flow_dissector_##NUM
#define FLOW_CONTINUE_SADDR 0x7f00007f /* 127.0.0.127 */
/* These are the identifiers of the BPF programs that will be used in tail
* calls. Name is limited to 16 characters, with the terminating character and
* bpf_func_ above, we have only 6 to work with, anything after will be cropped.
*/
#define IP 0
#define IPV6 1
#define IPV6OP 2 /* Destination/Hop-by-Hop Options IPv6 Ext. Header */
#define IPV6FR 3 /* Fragmentation IPv6 Extension Header */
#define MPLS 4
#define VLAN 5
#define MAX_PROG 6
#define IP_MF 0x2000
#define IP_OFFSET 0x1FFF
#define IP6_MF 0x0001
#define IP6_OFFSET 0xFFF8
struct vlan_hdr {
__be16 h_vlan_TCI;
__be16 h_vlan_encapsulated_proto;
};
struct gre_hdr {
__be16 flags;
__be16 proto;
};
struct frag_hdr {
__u8 nexthdr;
__u8 reserved;
__be16 frag_off;
__be32 identification;
};
struct {
__uint(type, BPF_MAP_TYPE_PROG_ARRAY);
__uint(max_entries, MAX_PROG);
__uint(key_size, sizeof(__u32));
__uint(value_size, sizeof(__u32));
} jmp_table SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_HASH);
__uint(max_entries, 1024);
__type(key, __u32);
__type(value, struct bpf_flow_keys);
} last_dissection SEC(".maps");
static __always_inline int export_flow_keys(struct bpf_flow_keys *keys,
int ret)
{
__u32 key = (__u32)(keys->sport) << 16 | keys->dport;
struct bpf_flow_keys val;
memcpy(&val, keys, sizeof(val));
bpf_map_update_elem(&last_dissection, &key, &val, BPF_ANY);
return ret;
}
#define IPV6_FLOWLABEL_MASK __bpf_constant_htonl(0x000FFFFF)
static inline __be32 ip6_flowlabel(const struct ipv6hdr *hdr)
{
return *(__be32 *)hdr & IPV6_FLOWLABEL_MASK;
}
static __always_inline void *bpf_flow_dissect_get_header(struct __sk_buff *skb,
__u16 hdr_size,
void *buffer)
{
void *data_end = (void *)(long)skb->data_end;
void *data = (void *)(long)skb->data;
__u16 thoff = skb->flow_keys->thoff;
__u8 *hdr;
/* Verifies this variable offset does not overflow */
if (thoff > (USHRT_MAX - hdr_size))
return NULL;
hdr = data + thoff;
if (hdr + hdr_size <= data_end)
return hdr;
if (bpf_skb_load_bytes(skb, thoff, buffer, hdr_size))
return NULL;
return buffer;
}
/* Dispatches on ETHERTYPE */
static __always_inline int parse_eth_proto(struct __sk_buff *skb, __be16 proto)
{
struct bpf_flow_keys *keys = skb->flow_keys;
switch (proto) {
case bpf_htons(ETH_P_IP):
bpf_tail_call_static(skb, &jmp_table, IP);
break;
case bpf_htons(ETH_P_IPV6):
bpf_tail_call_static(skb, &jmp_table, IPV6);
break;
case bpf_htons(ETH_P_MPLS_MC):
case bpf_htons(ETH_P_MPLS_UC):
bpf_tail_call_static(skb, &jmp_table, MPLS);
break;
case bpf_htons(ETH_P_8021Q):
case bpf_htons(ETH_P_8021AD):
bpf_tail_call_static(skb, &jmp_table, VLAN);
break;
default:
/* Protocol not supported */
return export_flow_keys(keys, BPF_DROP);
}
return export_flow_keys(keys, BPF_DROP);
}
SEC("flow_dissector")
int _dissect(struct __sk_buff *skb)
{
struct bpf_flow_keys *keys = skb->flow_keys;
if (keys->n_proto == bpf_htons(ETH_P_IP)) {
/* IP traffic from FLOW_CONTINUE_SADDR falls-back to
* standard dissector
*/
struct iphdr *iph, _iph;
iph = bpf_flow_dissect_get_header(skb, sizeof(*iph), &_iph);
if (iph && iph->ihl == 5 &&
iph->saddr == bpf_htonl(FLOW_CONTINUE_SADDR)) {
return BPF_FLOW_DISSECTOR_CONTINUE;
}
}
return parse_eth_proto(skb, keys->n_proto);
}
/* Parses on IPPROTO_* */
static __always_inline int parse_ip_proto(struct __sk_buff *skb, __u8 proto)
{
struct bpf_flow_keys *keys = skb->flow_keys;
void *data_end = (void *)(long)skb->data_end;
struct icmphdr *icmp, _icmp;
struct gre_hdr *gre, _gre;
struct ethhdr *eth, _eth;
struct tcphdr *tcp, _tcp;
struct udphdr *udp, _udp;
switch (proto) {
case IPPROTO_ICMP:
icmp = bpf_flow_dissect_get_header(skb, sizeof(*icmp), &_icmp);
if (!icmp)
return export_flow_keys(keys, BPF_DROP);
return export_flow_keys(keys, BPF_OK);
case IPPROTO_IPIP:
keys->is_encap = true;
if (keys->flags & BPF_FLOW_DISSECTOR_F_STOP_AT_ENCAP)
return export_flow_keys(keys, BPF_OK);
return parse_eth_proto(skb, bpf_htons(ETH_P_IP));
case IPPROTO_IPV6:
keys->is_encap = true;
if (keys->flags & BPF_FLOW_DISSECTOR_F_STOP_AT_ENCAP)
return export_flow_keys(keys, BPF_OK);
return parse_eth_proto(skb, bpf_htons(ETH_P_IPV6));
case IPPROTO_GRE:
gre = bpf_flow_dissect_get_header(skb, sizeof(*gre), &_gre);
if (!gre)
return export_flow_keys(keys, BPF_DROP);
if (bpf_htons(gre->flags & GRE_VERSION))
/* Only inspect standard GRE packets with version 0 */
return export_flow_keys(keys, BPF_OK);
keys->thoff += sizeof(*gre); /* Step over GRE Flags and Proto */
if (GRE_IS_CSUM(gre->flags))
keys->thoff += 4; /* Step over chksum and Padding */
if (GRE_IS_KEY(gre->flags))
keys->thoff += 4; /* Step over key */
if (GRE_IS_SEQ(gre->flags))
keys->thoff += 4; /* Step over sequence number */
keys->is_encap = true;
if (keys->flags & BPF_FLOW_DISSECTOR_F_STOP_AT_ENCAP)
return export_flow_keys(keys, BPF_OK);
if (gre->proto == bpf_htons(ETH_P_TEB)) {
eth = bpf_flow_dissect_get_header(skb, sizeof(*eth),
&_eth);
if (!eth)
return export_flow_keys(keys, BPF_DROP);
keys->thoff += sizeof(*eth);
return parse_eth_proto(skb, eth->h_proto);
} else {
return parse_eth_proto(skb, gre->proto);
}
case IPPROTO_TCP:
tcp = bpf_flow_dissect_get_header(skb, sizeof(*tcp), &_tcp);
if (!tcp)
return export_flow_keys(keys, BPF_DROP);
if (tcp->doff < 5)
return export_flow_keys(keys, BPF_DROP);
if ((__u8 *)tcp + (tcp->doff << 2) > data_end)
return export_flow_keys(keys, BPF_DROP);
keys->sport = tcp->source;
keys->dport = tcp->dest;
return export_flow_keys(keys, BPF_OK);
case IPPROTO_UDP:
case IPPROTO_UDPLITE:
udp = bpf_flow_dissect_get_header(skb, sizeof(*udp), &_udp);
if (!udp)
return export_flow_keys(keys, BPF_DROP);
keys->sport = udp->source;
keys->dport = udp->dest;
return export_flow_keys(keys, BPF_OK);
default:
return export_flow_keys(keys, BPF_DROP);
}
return export_flow_keys(keys, BPF_DROP);
}
static __always_inline int parse_ipv6_proto(struct __sk_buff *skb, __u8 nexthdr)
{
struct bpf_flow_keys *keys = skb->flow_keys;
switch (nexthdr) {
case IPPROTO_HOPOPTS:
case IPPROTO_DSTOPTS:
bpf_tail_call_static(skb, &jmp_table, IPV6OP);
break;
case IPPROTO_FRAGMENT:
bpf_tail_call_static(skb, &jmp_table, IPV6FR);
break;
default:
return parse_ip_proto(skb, nexthdr);
}
return export_flow_keys(keys, BPF_DROP);
}
PROG(IP)(struct __sk_buff *skb)
{
void *data_end = (void *)(long)skb->data_end;
struct bpf_flow_keys *keys = skb->flow_keys;
void *data = (void *)(long)skb->data;
struct iphdr *iph, _iph;
bool done = false;
iph = bpf_flow_dissect_get_header(skb, sizeof(*iph), &_iph);
if (!iph)
return export_flow_keys(keys, BPF_DROP);
/* IP header cannot be smaller than 20 bytes */
if (iph->ihl < 5)
return export_flow_keys(keys, BPF_DROP);
keys->addr_proto = ETH_P_IP;
keys->ipv4_src = iph->saddr;
keys->ipv4_dst = iph->daddr;
keys->ip_proto = iph->protocol;
keys->thoff += iph->ihl << 2;
if (data + keys->thoff > data_end)
return export_flow_keys(keys, BPF_DROP);
if (iph->frag_off & bpf_htons(IP_MF | IP_OFFSET)) {
keys->is_frag = true;
if (iph->frag_off & bpf_htons(IP_OFFSET)) {
/* From second fragment on, packets do not have headers
* we can parse.
*/
done = true;
} else {
keys->is_first_frag = true;
/* No need to parse fragmented packet unless
* explicitly asked for.
*/
if (!(keys->flags &
BPF_FLOW_DISSECTOR_F_PARSE_1ST_FRAG))
done = true;
}
}
if (done)
return export_flow_keys(keys, BPF_OK);
return parse_ip_proto(skb, iph->protocol);
}
PROG(IPV6)(struct __sk_buff *skb)
{
struct bpf_flow_keys *keys = skb->flow_keys;
struct ipv6hdr *ip6h, _ip6h;
ip6h = bpf_flow_dissect_get_header(skb, sizeof(*ip6h), &_ip6h);
if (!ip6h)
return export_flow_keys(keys, BPF_DROP);
keys->addr_proto = ETH_P_IPV6;
memcpy(&keys->ipv6_src, &ip6h->saddr, 2*sizeof(ip6h->saddr));
keys->thoff += sizeof(struct ipv6hdr);
keys->ip_proto = ip6h->nexthdr;
keys->flow_label = ip6_flowlabel(ip6h);
if (keys->flow_label && keys->flags & BPF_FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL)
return export_flow_keys(keys, BPF_OK);
return parse_ipv6_proto(skb, ip6h->nexthdr);
}
PROG(IPV6OP)(struct __sk_buff *skb)
{
struct bpf_flow_keys *keys = skb->flow_keys;
struct ipv6_opt_hdr *ip6h, _ip6h;
ip6h = bpf_flow_dissect_get_header(skb, sizeof(*ip6h), &_ip6h);
if (!ip6h)
return export_flow_keys(keys, BPF_DROP);
/* hlen is in 8-octets and does not include the first 8 bytes
* of the header
*/
keys->thoff += (1 + ip6h->hdrlen) << 3;
keys->ip_proto = ip6h->nexthdr;
return parse_ipv6_proto(skb, ip6h->nexthdr);
}
PROG(IPV6FR)(struct __sk_buff *skb)
{
struct bpf_flow_keys *keys = skb->flow_keys;
struct frag_hdr *fragh, _fragh;
fragh = bpf_flow_dissect_get_header(skb, sizeof(*fragh), &_fragh);
if (!fragh)
return export_flow_keys(keys, BPF_DROP);
keys->thoff += sizeof(*fragh);
keys->is_frag = true;
keys->ip_proto = fragh->nexthdr;
if (!(fragh->frag_off & bpf_htons(IP6_OFFSET))) {
keys->is_first_frag = true;
/* No need to parse fragmented packet unless
* explicitly asked for.
*/
if (!(keys->flags & BPF_FLOW_DISSECTOR_F_PARSE_1ST_FRAG))
return export_flow_keys(keys, BPF_OK);
} else {
return export_flow_keys(keys, BPF_OK);
}
return parse_ipv6_proto(skb, fragh->nexthdr);
}
PROG(MPLS)(struct __sk_buff *skb)
{
struct bpf_flow_keys *keys = skb->flow_keys;
struct mpls_label *mpls, _mpls;
mpls = bpf_flow_dissect_get_header(skb, sizeof(*mpls), &_mpls);
if (!mpls)
return export_flow_keys(keys, BPF_DROP);
return export_flow_keys(keys, BPF_OK);
}
PROG(VLAN)(struct __sk_buff *skb)
{
struct bpf_flow_keys *keys = skb->flow_keys;
struct vlan_hdr *vlan, _vlan;
/* Account for double-tagging */
if (keys->n_proto == bpf_htons(ETH_P_8021AD)) {
vlan = bpf_flow_dissect_get_header(skb, sizeof(*vlan), &_vlan);
if (!vlan)
return export_flow_keys(keys, BPF_DROP);
if (vlan->h_vlan_encapsulated_proto != bpf_htons(ETH_P_8021Q))
return export_flow_keys(keys, BPF_DROP);
keys->nhoff += sizeof(*vlan);
keys->thoff += sizeof(*vlan);
}
vlan = bpf_flow_dissect_get_header(skb, sizeof(*vlan), &_vlan);
if (!vlan)
return export_flow_keys(keys, BPF_DROP);
keys->nhoff += sizeof(*vlan);
keys->thoff += sizeof(*vlan);
/* Only allow 8021AD + 8021Q double tagging and no triple tagging.*/
if (vlan->h_vlan_encapsulated_proto == bpf_htons(ETH_P_8021AD) ||
vlan->h_vlan_encapsulated_proto == bpf_htons(ETH_P_8021Q))
return export_flow_keys(keys, BPF_DROP);
keys->n_proto = vlan->h_vlan_encapsulated_proto;
return parse_eth_proto(skb, vlan->h_vlan_encapsulated_proto);
}
char __license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/bpf_flow.c |
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2020 Facebook
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_core_read.h>
struct task_struct {
int tgid;
} __attribute__((preserve_access_index));
struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
__uint(max_entries, 1);
__type(key, int);
__type(value, int);
} exp_tgid_map SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
__uint(max_entries, 1);
__type(key, int);
__type(value, int);
} results SEC(".maps");
SEC("tp/raw_syscalls/sys_enter")
int handle_sys_enter(void *ctx)
{
struct task_struct *task = (void *)bpf_get_current_task();
int tgid = BPF_CORE_READ(task, tgid);
int zero = 0;
int real_tgid = bpf_get_current_pid_tgid() >> 32;
int *exp_tgid = bpf_map_lookup_elem(&exp_tgid_map, &zero);
/* only pass through sys_enters from test process */
if (!exp_tgid || *exp_tgid != real_tgid)
return 0;
bpf_map_update_elem(&results, &zero, &tgid, 0);
return 0;
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/test_core_retro.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) 2021. Huawei Technologies Co., Ltd */
#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
char _license[] SEC("license") = "GPL";
SEC("struct_ops/test_1")
int BPF_PROG(test_1, struct bpf_dummy_ops_state *state)
{
int ret;
if (!state)
return 0xf2f3f4f5;
ret = state->val;
state->val = 0x5a;
return ret;
}
__u64 test_2_args[5];
SEC("struct_ops/test_2")
int BPF_PROG(test_2, struct bpf_dummy_ops_state *state, int a1, unsigned short a2,
char a3, unsigned long a4)
{
test_2_args[0] = (unsigned long)state;
test_2_args[1] = a1;
test_2_args[2] = a2;
test_2_args[3] = a3;
test_2_args[4] = a4;
return 0;
}
SEC("struct_ops.s/test_sleepable")
int BPF_PROG(test_sleepable, struct bpf_dummy_ops_state *state)
{
return 0;
}
SEC(".struct_ops")
struct bpf_dummy_ops dummy_1 = {
.test_1 = (void *)test_1,
.test_2 = (void *)test_2,
.test_sleepable = (void *)test_sleepable,
};
| linux-master | tools/testing/selftests/bpf/progs/dummy_st_ops_success.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2021 Facebook */
#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
/* modifiers and typedefs are ignored when comparing key/value types */
typedef struct my_key { long x; } key_type;
typedef struct my_value { long x; } value_type;
extern struct {
__uint(max_entries, 16);
__type(key, key_type);
__type(value, value_type);
__uint(type, BPF_MAP_TYPE_HASH);
} map1 SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
__type(key, int);
__type(value, int);
__uint(max_entries, 8);
} map2 SEC(".maps");
/* this definition will lose, but it has to exactly match the winner */
struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
__type(key, int);
__type(value, int);
__uint(max_entries, 16);
} map_weak __weak SEC(".maps");
int output_first2;
int output_second2;
int output_weak2;
SEC("raw_tp/sys_enter")
int BPF_PROG(handler_enter2)
{
/* update values with key = 2 */
int key = 2, val = 2;
key_type key_struct = { .x = 2 };
value_type val_struct = { .x = 2000 };
bpf_map_update_elem(&map1, &key_struct, &val_struct, 0);
bpf_map_update_elem(&map2, &key, &val, 0);
bpf_map_update_elem(&map_weak, &key, &val, 0);
return 0;
}
SEC("raw_tp/sys_exit")
int BPF_PROG(handler_exit2)
{
/* lookup values with key = 1, set in another file */
int key = 1, *val;
key_type key_struct = { .x = 1 };
value_type *value_struct;
value_struct = bpf_map_lookup_elem(&map1, &key_struct);
if (value_struct)
output_first2 = value_struct->x;
val = bpf_map_lookup_elem(&map2, &key);
if (val)
output_second2 = *val;
val = bpf_map_lookup_elem(&map_weak, &key);
if (val)
output_weak2 = *val;
return 0;
}
char LICENSE[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/linked_maps2.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2022, Oracle and/or its affiliates. */
#include "bpf_iter.h"
#include <bpf/bpf_helpers.h>
char _license[] SEC("license") = "GPL";
unsigned long last_sym_value = 0;
static inline char to_lower(char c)
{
if (c >= 'A' && c <= 'Z')
c += ('a' - 'A');
return c;
}
static inline char to_upper(char c)
{
if (c >= 'a' && c <= 'z')
c -= ('a' - 'A');
return c;
}
/* Dump symbols with max size; the latter is calculated by caching symbol N value
* and when iterating on symbol N+1, we can print max size of symbol N via
* address of N+1 - address of N.
*/
SEC("iter/ksym")
int dump_ksym(struct bpf_iter__ksym *ctx)
{
struct seq_file *seq = ctx->meta->seq;
struct kallsym_iter *iter = ctx->ksym;
__u32 seq_num = ctx->meta->seq_num;
unsigned long value;
char type;
if (!iter)
return 0;
if (seq_num == 0) {
BPF_SEQ_PRINTF(seq, "ADDR TYPE NAME MODULE_NAME KIND MAX_SIZE\n");
return 0;
}
if (last_sym_value)
BPF_SEQ_PRINTF(seq, "0x%x\n", iter->value - last_sym_value);
else
BPF_SEQ_PRINTF(seq, "\n");
value = iter->show_value ? iter->value : 0;
last_sym_value = value;
type = iter->type;
if (iter->module_name[0]) {
type = iter->exported ? to_upper(type) : to_lower(type);
BPF_SEQ_PRINTF(seq, "0x%llx %c %s [ %s ] ",
value, type, iter->name, iter->module_name);
} else {
BPF_SEQ_PRINTF(seq, "0x%llx %c %s ", value, type, iter->name);
}
if (!iter->pos_mod_end || iter->pos_mod_end > iter->pos)
BPF_SEQ_PRINTF(seq, "MOD ");
else if (!iter->pos_ftrace_mod_end || iter->pos_ftrace_mod_end > iter->pos)
BPF_SEQ_PRINTF(seq, "FTRACE_MOD ");
else if (!iter->pos_bpf_end || iter->pos_bpf_end > iter->pos)
BPF_SEQ_PRINTF(seq, "BPF ");
else
BPF_SEQ_PRINTF(seq, "KPROBE ");
return 0;
}
| linux-master | tools/testing/selftests/bpf/progs/bpf_iter_ksym.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2021 Facebook */
#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
#define X_0(x)
#define X_1(x) x X_0(x)
#define X_2(x) x X_1(x)
#define X_3(x) x X_2(x)
#define X_4(x) x X_3(x)
#define X_5(x) x X_4(x)
#define X_6(x) x X_5(x)
#define X_7(x) x X_6(x)
#define X_8(x) x X_7(x)
#define X_9(x) x X_8(x)
#define X_10(x) x X_9(x)
#define REPEAT_256(Y) X_2(X_10(X_10(Y))) X_5(X_10(Y)) X_6(Y)
extern const int bpf_testmod_ksym_percpu __ksym;
extern void bpf_testmod_test_mod_kfunc(int i) __ksym;
extern void bpf_testmod_invalid_mod_kfunc(void) __ksym __weak;
int out_bpf_testmod_ksym = 0;
const volatile int x = 0;
SEC("tc")
int load(struct __sk_buff *skb)
{
/* This will be kept by clang, but removed by verifier. Since it is
* marked as __weak, libbpf and gen_loader don't error out if BTF ID
* is not found for it, instead imm and off is set to 0 for it.
*/
if (x)
bpf_testmod_invalid_mod_kfunc();
bpf_testmod_test_mod_kfunc(42);
out_bpf_testmod_ksym = *(int *)bpf_this_cpu_ptr(&bpf_testmod_ksym_percpu);
return 0;
}
SEC("tc")
int load_256(struct __sk_buff *skb)
{
/* this will fail if kfunc doesn't reuse its own btf fd index */
REPEAT_256(bpf_testmod_test_mod_kfunc(42););
bpf_testmod_test_mod_kfunc(42);
return 0;
}
char LICENSE[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/test_ksyms_module.c |
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2019 Facebook
#include <linux/ptrace.h>
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
const struct {
unsigned a[4];
/*
* if the struct's size is multiple of 16, compiler will put it into
* .rodata.cst16 section, which is not recognized by libbpf; work
* around this by ensuring we don't have 16-aligned struct
*/
char _y;
} rdonly_values = { .a = {2, 3, 4, 5} };
struct {
unsigned did_run;
unsigned iters;
unsigned sum;
} res = {};
SEC("raw_tracepoint/sys_enter:skip_loop")
int skip_loop(struct pt_regs *ctx)
{
/* prevent compiler to optimize everything out */
unsigned * volatile p = (void *)&rdonly_values.a;
unsigned iters = 0, sum = 0;
/* we should never enter this loop */
while (*p & 1) {
iters++;
sum += *p;
p++;
}
res.did_run = 1;
res.iters = iters;
res.sum = sum;
return 0;
}
SEC("raw_tracepoint/sys_enter:part_loop")
int part_loop(struct pt_regs *ctx)
{
/* prevent compiler to optimize everything out */
unsigned * volatile p = (void *)&rdonly_values.a;
unsigned iters = 0, sum = 0;
/* validate verifier can derive loop termination */
while (*p < 5) {
iters++;
sum += *p;
p++;
}
res.did_run = 1;
res.iters = iters;
res.sum = sum;
return 0;
}
SEC("raw_tracepoint/sys_enter:full_loop")
int full_loop(struct pt_regs *ctx)
{
/* prevent compiler to optimize everything out */
unsigned * volatile p = (void *)&rdonly_values.a;
int i = sizeof(rdonly_values.a) / sizeof(rdonly_values.a[0]);
unsigned iters = 0, sum = 0;
/* validate verifier can allow full loop as well */
while (i > 0 ) {
iters++;
sum += *p;
p++;
i--;
}
res.did_run = 1;
res.iters = iters;
res.sum = sum;
return 0;
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/test_rdonly_maps.c |
// SPDX-License-Identifier: GPL-2.0
/* Converted from tools/testing/selftests/bpf/verifier/div_overflow.c */
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include <limits.h>
#include "bpf_misc.h"
/* Just make sure that JITs used udiv/umod as otherwise we get
* an exception from INT_MIN/-1 overflow similarly as with div
* by zero.
*/
SEC("tc")
__description("DIV32 overflow, check 1")
__success __retval(0)
__naked void div32_overflow_check_1(void)
{
asm volatile (" \
w1 = -1; \
w0 = %[int_min]; \
w0 /= w1; \
exit; \
" :
: __imm_const(int_min, INT_MIN)
: __clobber_all);
}
SEC("tc")
__description("DIV32 overflow, check 2")
__success __retval(0)
__naked void div32_overflow_check_2(void)
{
asm volatile (" \
w0 = %[int_min]; \
w0 /= -1; \
exit; \
" :
: __imm_const(int_min, INT_MIN)
: __clobber_all);
}
SEC("tc")
__description("DIV64 overflow, check 1")
__success __retval(0)
__naked void div64_overflow_check_1(void)
{
asm volatile (" \
r1 = -1; \
r2 = %[llong_min] ll; \
r2 /= r1; \
w0 = 0; \
if r0 == r2 goto l0_%=; \
w0 = 1; \
l0_%=: exit; \
" :
: __imm_const(llong_min, LLONG_MIN)
: __clobber_all);
}
SEC("tc")
__description("DIV64 overflow, check 2")
__success __retval(0)
__naked void div64_overflow_check_2(void)
{
asm volatile (" \
r1 = %[llong_min] ll; \
r1 /= -1; \
w0 = 0; \
if r0 == r1 goto l0_%=; \
w0 = 1; \
l0_%=: exit; \
" :
: __imm_const(llong_min, LLONG_MIN)
: __clobber_all);
}
SEC("tc")
__description("MOD32 overflow, check 1")
__success __retval(INT_MIN)
__naked void mod32_overflow_check_1(void)
{
asm volatile (" \
w1 = -1; \
w0 = %[int_min]; \
w0 %%= w1; \
exit; \
" :
: __imm_const(int_min, INT_MIN)
: __clobber_all);
}
SEC("tc")
__description("MOD32 overflow, check 2")
__success __retval(INT_MIN)
__naked void mod32_overflow_check_2(void)
{
asm volatile (" \
w0 = %[int_min]; \
w0 %%= -1; \
exit; \
" :
: __imm_const(int_min, INT_MIN)
: __clobber_all);
}
SEC("tc")
__description("MOD64 overflow, check 1")
__success __retval(1)
__naked void mod64_overflow_check_1(void)
{
asm volatile (" \
r1 = -1; \
r2 = %[llong_min] ll; \
r3 = r2; \
r2 %%= r1; \
w0 = 0; \
if r3 != r2 goto l0_%=; \
w0 = 1; \
l0_%=: exit; \
" :
: __imm_const(llong_min, LLONG_MIN)
: __clobber_all);
}
SEC("tc")
__description("MOD64 overflow, check 2")
__success __retval(1)
__naked void mod64_overflow_check_2(void)
{
asm volatile (" \
r2 = %[llong_min] ll; \
r3 = r2; \
r2 %%= -1; \
w0 = 0; \
if r3 != r2 goto l0_%=; \
w0 = 1; \
l0_%=: exit; \
" :
: __imm_const(llong_min, LLONG_MIN)
: __clobber_all);
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/verifier_div_overflow.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include "bpf_legacy.h"
struct {
__uint(type, BPF_MAP_TYPE_PROG_ARRAY);
__uint(max_entries, 1);
__uint(key_size, sizeof(__u32));
__uint(value_size, sizeof(__u32));
} jmp_table SEC(".maps");
static __noinline
int subprog_tail(struct __sk_buff *skb)
{
if (load_byte(skb, 0))
bpf_tail_call_static(skb, &jmp_table, 1);
else
bpf_tail_call_static(skb, &jmp_table, 0);
return 1;
}
int count = 0;
SEC("tc")
int classifier_0(struct __sk_buff *skb)
{
count++;
return subprog_tail(skb);
}
SEC("tc")
int entry(struct __sk_buff *skb)
{
bpf_tail_call_static(skb, &jmp_table, 0);
return 0;
}
char __license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/tailcall_bpf2bpf2.c |
#include "core_reloc_types.h"
void f(struct core_reloc_enum64val___val3_missing x) {}
| linux-master | tools/testing/selftests/bpf/progs/btf__core_reloc_enum64val___val3_missing.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2021 Facebook */
#include <vmlinux.h>
#include <bpf/bpf_helpers.h>
#include "../bpf_testmod/bpf_testmod_kfunc.h"
struct syscall_test_args {
__u8 data[16];
size_t size;
};
SEC("?syscall")
int kfunc_syscall_test_fail(struct syscall_test_args *args)
{
bpf_kfunc_call_test_mem_len_pass1(&args->data, sizeof(*args) + 1);
return 0;
}
SEC("?syscall")
int kfunc_syscall_test_null_fail(struct syscall_test_args *args)
{
/* Must be called with args as a NULL pointer
* we do not check for it to have the verifier consider that
* the pointer might not be null, and so we can load it.
*
* So the following can not be added:
*
* if (args)
* return -22;
*/
bpf_kfunc_call_test_mem_len_pass1(args, sizeof(*args));
return 0;
}
SEC("?tc")
int kfunc_call_test_get_mem_fail_rdonly(struct __sk_buff *skb)
{
struct prog_test_ref_kfunc *pt;
unsigned long s = 0;
int *p = NULL;
int ret = 0;
pt = bpf_kfunc_call_test_acquire(&s);
if (pt) {
p = bpf_kfunc_call_test_get_rdonly_mem(pt, 2 * sizeof(int));
if (p)
p[0] = 42; /* this is a read-only buffer, so -EACCES */
else
ret = -1;
bpf_kfunc_call_test_release(pt);
}
return ret;
}
SEC("?tc")
int kfunc_call_test_get_mem_fail_use_after_free(struct __sk_buff *skb)
{
struct prog_test_ref_kfunc *pt;
unsigned long s = 0;
int *p = NULL;
int ret = 0;
pt = bpf_kfunc_call_test_acquire(&s);
if (pt) {
p = bpf_kfunc_call_test_get_rdwr_mem(pt, 2 * sizeof(int));
if (p) {
p[0] = 42;
ret = p[1]; /* 108 */
} else {
ret = -1;
}
bpf_kfunc_call_test_release(pt);
}
if (p)
ret = p[0]; /* p is not valid anymore */
return ret;
}
SEC("?tc")
int kfunc_call_test_get_mem_fail_oob(struct __sk_buff *skb)
{
struct prog_test_ref_kfunc *pt;
unsigned long s = 0;
int *p = NULL;
int ret = 0;
pt = bpf_kfunc_call_test_acquire(&s);
if (pt) {
p = bpf_kfunc_call_test_get_rdonly_mem(pt, 2 * sizeof(int));
if (p)
ret = p[2 * sizeof(int)]; /* oob access, so -EACCES */
else
ret = -1;
bpf_kfunc_call_test_release(pt);
}
return ret;
}
int not_const_size = 2 * sizeof(int);
SEC("?tc")
int kfunc_call_test_get_mem_fail_not_const(struct __sk_buff *skb)
{
struct prog_test_ref_kfunc *pt;
unsigned long s = 0;
int *p = NULL;
int ret = 0;
pt = bpf_kfunc_call_test_acquire(&s);
if (pt) {
p = bpf_kfunc_call_test_get_rdonly_mem(pt, not_const_size); /* non const size, -EINVAL */
if (p)
ret = p[0];
else
ret = -1;
bpf_kfunc_call_test_release(pt);
}
return ret;
}
SEC("?tc")
int kfunc_call_test_mem_acquire_fail(struct __sk_buff *skb)
{
struct prog_test_ref_kfunc *pt;
unsigned long s = 0;
int *p = NULL;
int ret = 0;
pt = bpf_kfunc_call_test_acquire(&s);
if (pt) {
/* we are failing on this one, because we are not acquiring a PTR_TO_BTF_ID (a struct ptr) */
p = bpf_kfunc_call_test_acq_rdonly_mem(pt, 2 * sizeof(int));
if (p)
ret = p[0];
else
ret = -1;
bpf_kfunc_call_int_mem_release(p);
bpf_kfunc_call_test_release(pt);
}
return ret;
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/kfunc_call_fail.c |
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2020 Facebook
#include <linux/bpf.h>
#include <stdint.h>
#include <bpf/bpf_helpers.h>
#include "bpf_misc.h"
char _license[] SEC("license") = "GPL";
struct {
__uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY);
__uint(value_size, sizeof(int));
__uint(key_size, sizeof(int));
} perfbuf SEC(".maps");
const volatile int batch_cnt = 0;
long sample_val = 42;
long dropped __attribute__((aligned(128))) = 0;
SEC("fentry/" SYS_PREFIX "sys_getpgid")
int bench_perfbuf(void *ctx)
{
int i;
for (i = 0; i < batch_cnt; i++) {
if (bpf_perf_event_output(ctx, &perfbuf, BPF_F_CURRENT_CPU,
&sample_val, sizeof(sample_val)))
__sync_add_and_fetch(&dropped, 1);
}
return 0;
}
| linux-master | tools/testing/selftests/bpf/progs/perfbuf_bench.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2023 Facebook */
#include "vmlinux.h"
#include "bpf_tracing_net.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
#include <bpf/bpf_core_read.h>
void *local_storage_ptr = NULL;
void *sk_ptr = NULL;
int cookie_found = 0;
__u64 cookie = 0;
__u32 omem = 0;
void *bpf_rdonly_cast(void *, __u32) __ksym;
struct {
__uint(type, BPF_MAP_TYPE_SK_STORAGE);
__uint(map_flags, BPF_F_NO_PREALLOC);
__type(key, int);
__type(value, int);
} sk_storage SEC(".maps");
SEC("fexit/bpf_local_storage_destroy")
int BPF_PROG(bpf_local_storage_destroy, struct bpf_local_storage *local_storage)
{
struct sock *sk;
if (local_storage_ptr != local_storage)
return 0;
sk = bpf_rdonly_cast(sk_ptr, bpf_core_type_id_kernel(struct sock));
if (sk->sk_cookie.counter != cookie)
return 0;
cookie_found++;
omem = sk->sk_omem_alloc.counter;
local_storage_ptr = NULL;
return 0;
}
SEC("fentry/inet6_sock_destruct")
int BPF_PROG(inet6_sock_destruct, struct sock *sk)
{
int *value;
if (!cookie || sk->sk_cookie.counter != cookie)
return 0;
value = bpf_sk_storage_get(&sk_storage, sk, 0, 0);
if (value && *value == 0xdeadbeef) {
cookie_found++;
sk_ptr = sk;
local_storage_ptr = sk->sk_bpf_storage;
}
return 0;
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/sk_storage_omem_uncharge.c |
// SPDX-License-Identifier: GPL-2.0
/* Converted from tools/testing/selftests/bpf/verifier/basic_stack.c */
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include "bpf_misc.h"
struct {
__uint(type, BPF_MAP_TYPE_HASH);
__uint(max_entries, 1);
__type(key, long long);
__type(value, long long);
} map_hash_8b SEC(".maps");
SEC("socket")
__description("stack out of bounds")
__failure __msg("invalid write to stack")
__failure_unpriv
__naked void stack_out_of_bounds(void)
{
asm volatile (" \
r1 = 0; \
*(u64*)(r10 + 8) = r1; \
exit; \
" ::: __clobber_all);
}
SEC("socket")
__description("uninitialized stack1")
__failure __msg("invalid indirect read from stack")
__failure_unpriv
__naked void uninitialized_stack1(void)
{
asm volatile (" \
r2 = r10; \
r2 += -8; \
r1 = %[map_hash_8b] ll; \
call %[bpf_map_lookup_elem]; \
exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm_addr(map_hash_8b)
: __clobber_all);
}
SEC("socket")
__description("uninitialized stack2")
__failure __msg("invalid read from stack")
__failure_unpriv
__naked void uninitialized_stack2(void)
{
asm volatile (" \
r2 = r10; \
r0 = *(u64*)(r2 - 8); \
exit; \
" ::: __clobber_all);
}
SEC("socket")
__description("invalid fp arithmetic")
__failure __msg("R1 subtraction from stack pointer")
__failure_unpriv
__naked void invalid_fp_arithmetic(void)
{
/* If this gets ever changed, make sure JITs can deal with it. */
asm volatile (" \
r0 = 0; \
r1 = r10; \
r1 -= 8; \
*(u64*)(r1 + 0) = r0; \
exit; \
" ::: __clobber_all);
}
SEC("socket")
__description("non-invalid fp arithmetic")
__success __success_unpriv __retval(0)
__naked void non_invalid_fp_arithmetic(void)
{
asm volatile (" \
r0 = 0; \
*(u64*)(r10 - 8) = r0; \
exit; \
" ::: __clobber_all);
}
SEC("socket")
__description("misaligned read from stack")
__failure __msg("misaligned stack access")
__failure_unpriv
__naked void misaligned_read_from_stack(void)
{
asm volatile (" \
r2 = r10; \
r0 = *(u64*)(r2 - 4); \
exit; \
" ::: __clobber_all);
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/verifier_basic_stack.c |
// SPDX-License-Identifier: GPL-2.0
/* Converted from tools/testing/selftests/bpf/verifier/value.c */
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include "bpf_misc.h"
#define MAX_ENTRIES 11
struct test_val {
unsigned int index;
int foo[MAX_ENTRIES];
};
struct {
__uint(type, BPF_MAP_TYPE_HASH);
__uint(max_entries, 1);
__type(key, long long);
__type(value, struct test_val);
} map_hash_48b SEC(".maps");
SEC("socket")
__description("map element value store of cleared call register")
__failure __msg("R1 !read_ok")
__failure_unpriv __msg_unpriv("R1 !read_ok")
__naked void store_of_cleared_call_register(void)
{
asm volatile (" \
r2 = r10; \
r2 += -8; \
r1 = 0; \
*(u64*)(r2 + 0) = r1; \
r1 = %[map_hash_48b] ll; \
call %[bpf_map_lookup_elem]; \
if r0 == 0 goto l0_%=; \
*(u64*)(r0 + 0) = r1; \
l0_%=: exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm_addr(map_hash_48b)
: __clobber_all);
}
SEC("socket")
__description("map element value with unaligned store")
__success __failure_unpriv __msg_unpriv("R0 leaks addr")
__retval(0) __flag(BPF_F_ANY_ALIGNMENT)
__naked void element_value_with_unaligned_store(void)
{
asm volatile (" \
r2 = r10; \
r2 += -8; \
r1 = 0; \
*(u64*)(r2 + 0) = r1; \
r1 = %[map_hash_48b] ll; \
call %[bpf_map_lookup_elem]; \
if r0 == 0 goto l0_%=; \
r0 += 3; \
r1 = 42; \
*(u64*)(r0 + 0) = r1; \
r1 = 43; \
*(u64*)(r0 + 2) = r1; \
r1 = 44; \
*(u64*)(r0 - 2) = r1; \
r8 = r0; \
r1 = 32; \
*(u64*)(r8 + 0) = r1; \
r1 = 33; \
*(u64*)(r8 + 2) = r1; \
r1 = 34; \
*(u64*)(r8 - 2) = r1; \
r8 += 5; \
r1 = 22; \
*(u64*)(r8 + 0) = r1; \
r1 = 23; \
*(u64*)(r8 + 4) = r1; \
r1 = 24; \
*(u64*)(r8 - 7) = r1; \
r7 = r8; \
r7 += 3; \
r1 = 22; \
*(u64*)(r7 + 0) = r1; \
r1 = 23; \
*(u64*)(r7 + 4) = r1; \
r1 = 24; \
*(u64*)(r7 - 4) = r1; \
l0_%=: exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm_addr(map_hash_48b)
: __clobber_all);
}
SEC("socket")
__description("map element value with unaligned load")
__success __failure_unpriv __msg_unpriv("R0 leaks addr")
__retval(0) __flag(BPF_F_ANY_ALIGNMENT)
__naked void element_value_with_unaligned_load(void)
{
asm volatile (" \
r2 = r10; \
r2 += -8; \
r1 = 0; \
*(u64*)(r2 + 0) = r1; \
r1 = %[map_hash_48b] ll; \
call %[bpf_map_lookup_elem]; \
if r0 == 0 goto l0_%=; \
r1 = *(u32*)(r0 + 0); \
if r1 >= %[max_entries] goto l0_%=; \
r0 += 3; \
r7 = *(u64*)(r0 + 0); \
r7 = *(u64*)(r0 + 2); \
r8 = r0; \
r7 = *(u64*)(r8 + 0); \
r7 = *(u64*)(r8 + 2); \
r0 += 5; \
r7 = *(u64*)(r0 + 0); \
r7 = *(u64*)(r0 + 4); \
l0_%=: exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm_addr(map_hash_48b),
__imm_const(max_entries, MAX_ENTRIES)
: __clobber_all);
}
SEC("socket")
__description("map element value is preserved across register spilling")
__success __failure_unpriv __msg_unpriv("R0 leaks addr")
__retval(0) __flag(BPF_F_ANY_ALIGNMENT)
__naked void is_preserved_across_register_spilling(void)
{
asm volatile (" \
r2 = r10; \
r2 += -8; \
r1 = 0; \
*(u64*)(r2 + 0) = r1; \
r1 = %[map_hash_48b] ll; \
call %[bpf_map_lookup_elem]; \
if r0 == 0 goto l0_%=; \
r0 += %[test_val_foo]; \
r1 = 42; \
*(u64*)(r0 + 0) = r1; \
r1 = r10; \
r1 += -184; \
*(u64*)(r1 + 0) = r0; \
r3 = *(u64*)(r1 + 0); \
r1 = 42; \
*(u64*)(r3 + 0) = r1; \
l0_%=: exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm_addr(map_hash_48b),
__imm_const(test_val_foo, offsetof(struct test_val, foo))
: __clobber_all);
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/verifier_value.c |
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2019 Facebook
#define STACK_MAX_LEN 600
/* Full unroll of 600 iterations will have total
* program size close to 298k insns and this may
* cause BPF_JMP insn out of 16-bit integer range.
* So limit the unroll size to 150 so the
* total program size is around 80k insns but
* the loop will still execute 600 times.
*/
#define UNROLL_COUNT 150
#include "pyperf.h"
| linux-master | tools/testing/selftests/bpf/progs/pyperf600.c |
// SPDX-License-Identifier: GPL-2.0
/* Converted from tools/testing/selftests/bpf/verifier/bounds_mix_sign_unsign.c */
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include "bpf_misc.h"
struct {
__uint(type, BPF_MAP_TYPE_HASH);
__uint(max_entries, 1);
__type(key, long long);
__type(value, long long);
} map_hash_8b SEC(".maps");
SEC("socket")
__description("bounds checks mixing signed and unsigned, positive bounds")
__failure __msg("unbounded min value")
__failure_unpriv
__naked void signed_and_unsigned_positive_bounds(void)
{
asm volatile (" \
call %[bpf_ktime_get_ns]; \
*(u64*)(r10 - 16) = r0; \
r1 = 0; \
*(u64*)(r10 - 8) = r1; \
r2 = r10; \
r2 += -8; \
r1 = %[map_hash_8b] ll; \
call %[bpf_map_lookup_elem]; \
if r0 == 0 goto l0_%=; \
r1 = *(u64*)(r10 - 16); \
r2 = 2; \
if r2 >= r1 goto l0_%=; \
if r1 s> 4 goto l0_%=; \
r0 += r1; \
r1 = 0; \
*(u8*)(r0 + 0) = r1; \
l0_%=: r0 = 0; \
exit; \
" :
: __imm(bpf_ktime_get_ns),
__imm(bpf_map_lookup_elem),
__imm_addr(map_hash_8b)
: __clobber_all);
}
SEC("socket")
__description("bounds checks mixing signed and unsigned")
__failure __msg("unbounded min value")
__failure_unpriv
__naked void checks_mixing_signed_and_unsigned(void)
{
asm volatile (" \
call %[bpf_ktime_get_ns]; \
*(u64*)(r10 - 16) = r0; \
r1 = 0; \
*(u64*)(r10 - 8) = r1; \
r2 = r10; \
r2 += -8; \
r1 = %[map_hash_8b] ll; \
call %[bpf_map_lookup_elem]; \
if r0 == 0 goto l0_%=; \
r1 = *(u64*)(r10 - 16); \
r2 = -1; \
if r1 > r2 goto l0_%=; \
if r1 s> 1 goto l0_%=; \
r0 += r1; \
r1 = 0; \
*(u8*)(r0 + 0) = r1; \
l0_%=: r0 = 0; \
exit; \
" :
: __imm(bpf_ktime_get_ns),
__imm(bpf_map_lookup_elem),
__imm_addr(map_hash_8b)
: __clobber_all);
}
SEC("socket")
__description("bounds checks mixing signed and unsigned, variant 2")
__failure __msg("unbounded min value")
__failure_unpriv
__naked void signed_and_unsigned_variant_2(void)
{
asm volatile (" \
call %[bpf_ktime_get_ns]; \
*(u64*)(r10 - 16) = r0; \
r1 = 0; \
*(u64*)(r10 - 8) = r1; \
r2 = r10; \
r2 += -8; \
r1 = %[map_hash_8b] ll; \
call %[bpf_map_lookup_elem]; \
if r0 == 0 goto l0_%=; \
r1 = *(u64*)(r10 - 16); \
r2 = -1; \
if r1 > r2 goto l0_%=; \
r8 = 0; \
r8 += r1; \
if r8 s> 1 goto l0_%=; \
r0 += r8; \
r0 = 0; \
*(u8*)(r8 + 0) = r0; \
l0_%=: r0 = 0; \
exit; \
" :
: __imm(bpf_ktime_get_ns),
__imm(bpf_map_lookup_elem),
__imm_addr(map_hash_8b)
: __clobber_all);
}
SEC("socket")
__description("bounds checks mixing signed and unsigned, variant 3")
__failure __msg("unbounded min value")
__failure_unpriv
__naked void signed_and_unsigned_variant_3(void)
{
asm volatile (" \
call %[bpf_ktime_get_ns]; \
*(u64*)(r10 - 16) = r0; \
r1 = 0; \
*(u64*)(r10 - 8) = r1; \
r2 = r10; \
r2 += -8; \
r1 = %[map_hash_8b] ll; \
call %[bpf_map_lookup_elem]; \
if r0 == 0 goto l0_%=; \
r1 = *(u64*)(r10 - 16); \
r2 = -1; \
if r1 > r2 goto l0_%=; \
r8 = r1; \
if r8 s> 1 goto l0_%=; \
r0 += r8; \
r0 = 0; \
*(u8*)(r8 + 0) = r0; \
l0_%=: r0 = 0; \
exit; \
" :
: __imm(bpf_ktime_get_ns),
__imm(bpf_map_lookup_elem),
__imm_addr(map_hash_8b)
: __clobber_all);
}
SEC("socket")
__description("bounds checks mixing signed and unsigned, variant 4")
__success __success_unpriv __retval(0)
__naked void signed_and_unsigned_variant_4(void)
{
asm volatile (" \
call %[bpf_ktime_get_ns]; \
*(u64*)(r10 - 16) = r0; \
r1 = 0; \
*(u64*)(r10 - 8) = r1; \
r2 = r10; \
r2 += -8; \
r1 = %[map_hash_8b] ll; \
call %[bpf_map_lookup_elem]; \
if r0 == 0 goto l0_%=; \
r1 = *(u64*)(r10 - 16); \
r2 = 1; \
r1 &= r2; \
if r1 s> 1 goto l0_%=; \
r0 += r1; \
r1 = 0; \
*(u8*)(r0 + 0) = r1; \
l0_%=: r0 = 0; \
exit; \
" :
: __imm(bpf_ktime_get_ns),
__imm(bpf_map_lookup_elem),
__imm_addr(map_hash_8b)
: __clobber_all);
}
SEC("socket")
__description("bounds checks mixing signed and unsigned, variant 5")
__failure __msg("unbounded min value")
__failure_unpriv
__naked void signed_and_unsigned_variant_5(void)
{
asm volatile (" \
call %[bpf_ktime_get_ns]; \
*(u64*)(r10 - 16) = r0; \
r1 = 0; \
*(u64*)(r10 - 8) = r1; \
r2 = r10; \
r2 += -8; \
r1 = %[map_hash_8b] ll; \
call %[bpf_map_lookup_elem]; \
if r0 == 0 goto l0_%=; \
r1 = *(u64*)(r10 - 16); \
r2 = -1; \
if r1 > r2 goto l0_%=; \
if r1 s> 1 goto l0_%=; \
r0 += 4; \
r0 -= r1; \
r1 = 0; \
*(u8*)(r0 + 0) = r1; \
r0 = 0; \
l0_%=: exit; \
" :
: __imm(bpf_ktime_get_ns),
__imm(bpf_map_lookup_elem),
__imm_addr(map_hash_8b)
: __clobber_all);
}
SEC("socket")
__description("bounds checks mixing signed and unsigned, variant 6")
__failure __msg("R4 min value is negative, either use unsigned")
__failure_unpriv
__naked void signed_and_unsigned_variant_6(void)
{
asm volatile (" \
r9 = r1; \
call %[bpf_ktime_get_ns]; \
*(u64*)(r10 - 16) = r0; \
r1 = r9; \
r2 = 0; \
r3 = r10; \
r3 += -512; \
r4 = *(u64*)(r10 - 16); \
r6 = -1; \
if r4 > r6 goto l0_%=; \
if r4 s> 1 goto l0_%=; \
r4 += 1; \
r5 = 0; \
r6 = 0; \
*(u16*)(r10 - 512) = r6; \
call %[bpf_skb_load_bytes]; \
l0_%=: r0 = 0; \
exit; \
" :
: __imm(bpf_ktime_get_ns),
__imm(bpf_skb_load_bytes)
: __clobber_all);
}
SEC("socket")
__description("bounds checks mixing signed and unsigned, variant 7")
__success __success_unpriv __retval(0)
__naked void signed_and_unsigned_variant_7(void)
{
asm volatile (" \
call %[bpf_ktime_get_ns]; \
*(u64*)(r10 - 16) = r0; \
r1 = 0; \
*(u64*)(r10 - 8) = r1; \
r2 = r10; \
r2 += -8; \
r1 = %[map_hash_8b] ll; \
call %[bpf_map_lookup_elem]; \
if r0 == 0 goto l0_%=; \
r1 = *(u64*)(r10 - 16); \
r2 = %[__imm_0]; \
if r1 > r2 goto l0_%=; \
if r1 s> 1 goto l0_%=; \
r0 += r1; \
r1 = 0; \
*(u8*)(r0 + 0) = r1; \
l0_%=: r0 = 0; \
exit; \
" :
: __imm(bpf_ktime_get_ns),
__imm(bpf_map_lookup_elem),
__imm_addr(map_hash_8b),
__imm_const(__imm_0, 1024 * 1024 * 1024)
: __clobber_all);
}
SEC("socket")
__description("bounds checks mixing signed and unsigned, variant 8")
__failure __msg("unbounded min value")
__failure_unpriv
__naked void signed_and_unsigned_variant_8(void)
{
asm volatile (" \
call %[bpf_ktime_get_ns]; \
*(u64*)(r10 - 16) = r0; \
r1 = 0; \
*(u64*)(r10 - 8) = r1; \
r2 = r10; \
r2 += -8; \
r1 = %[map_hash_8b] ll; \
call %[bpf_map_lookup_elem]; \
if r0 == 0 goto l0_%=; \
r1 = *(u64*)(r10 - 16); \
r2 = -1; \
if r2 > r1 goto l1_%=; \
r0 = 0; \
exit; \
l1_%=: if r1 s> 1 goto l0_%=; \
r0 += r1; \
r1 = 0; \
*(u8*)(r0 + 0) = r1; \
l0_%=: r0 = 0; \
exit; \
" :
: __imm(bpf_ktime_get_ns),
__imm(bpf_map_lookup_elem),
__imm_addr(map_hash_8b)
: __clobber_all);
}
SEC("socket")
__description("bounds checks mixing signed and unsigned, variant 9")
__success __success_unpriv __retval(0)
__naked void signed_and_unsigned_variant_9(void)
{
asm volatile (" \
call %[bpf_ktime_get_ns]; \
*(u64*)(r10 - 16) = r0; \
r1 = 0; \
*(u64*)(r10 - 8) = r1; \
r2 = r10; \
r2 += -8; \
r1 = %[map_hash_8b] ll; \
call %[bpf_map_lookup_elem]; \
if r0 == 0 goto l0_%=; \
r1 = *(u64*)(r10 - 16); \
r2 = -9223372036854775808ULL ll; \
if r2 > r1 goto l1_%=; \
r0 = 0; \
exit; \
l1_%=: if r1 s> 1 goto l0_%=; \
r0 += r1; \
r1 = 0; \
*(u8*)(r0 + 0) = r1; \
l0_%=: r0 = 0; \
exit; \
" :
: __imm(bpf_ktime_get_ns),
__imm(bpf_map_lookup_elem),
__imm_addr(map_hash_8b)
: __clobber_all);
}
SEC("socket")
__description("bounds checks mixing signed and unsigned, variant 10")
__failure __msg("unbounded min value")
__failure_unpriv
__naked void signed_and_unsigned_variant_10(void)
{
asm volatile (" \
call %[bpf_ktime_get_ns]; \
*(u64*)(r10 - 16) = r0; \
r1 = 0; \
*(u64*)(r10 - 8) = r1; \
r2 = r10; \
r2 += -8; \
r1 = %[map_hash_8b] ll; \
call %[bpf_map_lookup_elem]; \
if r0 == 0 goto l0_%=; \
r1 = *(u64*)(r10 - 16); \
r2 = -1; \
if r2 > r1 goto l1_%=; \
r0 = 0; \
exit; \
l1_%=: if r1 s> 1 goto l0_%=; \
r0 += r1; \
r1 = 0; \
*(u8*)(r0 + 0) = r1; \
l0_%=: r0 = 0; \
exit; \
" :
: __imm(bpf_ktime_get_ns),
__imm(bpf_map_lookup_elem),
__imm_addr(map_hash_8b)
: __clobber_all);
}
SEC("socket")
__description("bounds checks mixing signed and unsigned, variant 11")
__failure __msg("unbounded min value")
__failure_unpriv
__naked void signed_and_unsigned_variant_11(void)
{
asm volatile (" \
call %[bpf_ktime_get_ns]; \
*(u64*)(r10 - 16) = r0; \
r1 = 0; \
*(u64*)(r10 - 8) = r1; \
r2 = r10; \
r2 += -8; \
r1 = %[map_hash_8b] ll; \
call %[bpf_map_lookup_elem]; \
if r0 == 0 goto l0_%=; \
r1 = *(u64*)(r10 - 16); \
r2 = -1; \
if r2 >= r1 goto l1_%=; \
/* Dead branch. */ \
r0 = 0; \
exit; \
l1_%=: if r1 s> 1 goto l0_%=; \
r0 += r1; \
r1 = 0; \
*(u8*)(r0 + 0) = r1; \
l0_%=: r0 = 0; \
exit; \
" :
: __imm(bpf_ktime_get_ns),
__imm(bpf_map_lookup_elem),
__imm_addr(map_hash_8b)
: __clobber_all);
}
SEC("socket")
__description("bounds checks mixing signed and unsigned, variant 12")
__failure __msg("unbounded min value")
__failure_unpriv
__naked void signed_and_unsigned_variant_12(void)
{
asm volatile (" \
call %[bpf_ktime_get_ns]; \
*(u64*)(r10 - 16) = r0; \
r1 = 0; \
*(u64*)(r10 - 8) = r1; \
r2 = r10; \
r2 += -8; \
r1 = %[map_hash_8b] ll; \
call %[bpf_map_lookup_elem]; \
if r0 == 0 goto l0_%=; \
r1 = *(u64*)(r10 - 16); \
r2 = -6; \
if r2 >= r1 goto l1_%=; \
r0 = 0; \
exit; \
l1_%=: if r1 s> 1 goto l0_%=; \
r0 += r1; \
r1 = 0; \
*(u8*)(r0 + 0) = r1; \
l0_%=: r0 = 0; \
exit; \
" :
: __imm(bpf_ktime_get_ns),
__imm(bpf_map_lookup_elem),
__imm_addr(map_hash_8b)
: __clobber_all);
}
SEC("socket")
__description("bounds checks mixing signed and unsigned, variant 13")
__failure __msg("unbounded min value")
__failure_unpriv
__naked void signed_and_unsigned_variant_13(void)
{
asm volatile (" \
call %[bpf_ktime_get_ns]; \
*(u64*)(r10 - 16) = r0; \
r1 = 0; \
*(u64*)(r10 - 8) = r1; \
r2 = r10; \
r2 += -8; \
r1 = %[map_hash_8b] ll; \
call %[bpf_map_lookup_elem]; \
if r0 == 0 goto l0_%=; \
r1 = *(u64*)(r10 - 16); \
r2 = 2; \
if r2 >= r1 goto l0_%=; \
r7 = 1; \
if r7 s> 0 goto l1_%=; \
l0_%=: r0 = 0; \
exit; \
l1_%=: r7 += r1; \
if r7 s> 4 goto l2_%=; \
r0 += r7; \
r1 = 0; \
*(u8*)(r0 + 0) = r1; \
l2_%=: r0 = 0; \
exit; \
" :
: __imm(bpf_ktime_get_ns),
__imm(bpf_map_lookup_elem),
__imm_addr(map_hash_8b)
: __clobber_all);
}
SEC("socket")
__description("bounds checks mixing signed and unsigned, variant 14")
__failure __msg("unbounded min value")
__failure_unpriv
__naked void signed_and_unsigned_variant_14(void)
{
asm volatile (" \
r9 = *(u32*)(r1 + %[__sk_buff_mark]); \
call %[bpf_ktime_get_ns]; \
*(u64*)(r10 - 16) = r0; \
r1 = 0; \
*(u64*)(r10 - 8) = r1; \
r2 = r10; \
r2 += -8; \
r1 = %[map_hash_8b] ll; \
call %[bpf_map_lookup_elem]; \
if r0 == 0 goto l0_%=; \
r1 = *(u64*)(r10 - 16); \
r2 = -1; \
r8 = 2; \
if r9 == 42 goto l1_%=; \
if r8 s> r1 goto l2_%=; \
l3_%=: if r1 s> 1 goto l2_%=; \
r0 += r1; \
l0_%=: r1 = 0; \
*(u8*)(r0 + 0) = r1; \
l2_%=: r0 = 0; \
exit; \
l1_%=: if r1 > r2 goto l2_%=; \
goto l3_%=; \
" :
: __imm(bpf_ktime_get_ns),
__imm(bpf_map_lookup_elem),
__imm_addr(map_hash_8b),
__imm_const(__sk_buff_mark, offsetof(struct __sk_buff, mark))
: __clobber_all);
}
SEC("socket")
__description("bounds checks mixing signed and unsigned, variant 15")
__failure __msg("unbounded min value")
__failure_unpriv
__naked void signed_and_unsigned_variant_15(void)
{
asm volatile (" \
call %[bpf_ktime_get_ns]; \
*(u64*)(r10 - 16) = r0; \
r1 = 0; \
*(u64*)(r10 - 8) = r1; \
r2 = r10; \
r2 += -8; \
r1 = %[map_hash_8b] ll; \
call %[bpf_map_lookup_elem]; \
if r0 == 0 goto l0_%=; \
r1 = *(u64*)(r10 - 16); \
r2 = -6; \
if r2 >= r1 goto l1_%=; \
l0_%=: r0 = 0; \
exit; \
l1_%=: r0 += r1; \
if r0 > 1 goto l2_%=; \
r0 = 0; \
exit; \
l2_%=: r1 = 0; \
*(u8*)(r0 + 0) = r1; \
r0 = 0; \
exit; \
" :
: __imm(bpf_ktime_get_ns),
__imm(bpf_map_lookup_elem),
__imm_addr(map_hash_8b)
: __clobber_all);
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/verifier_bounds_mix_sign_unsign.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2021 Facebook */
#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
#include "bpf_misc.h"
char _license[] SEC("license") = "GPL";
struct callback_ctx {
int output;
};
struct {
__uint(type, BPF_MAP_TYPE_HASH);
__uint(max_entries, 32);
__type(key, int);
__type(value, int);
} map1 SEC(".maps");
/* These should be set by the user program */
u32 nested_callback_nr_loops;
u32 stop_index = -1;
u32 nr_loops;
int pid;
int callback_selector;
/* Making these global variables so that the userspace program
* can verify the output through the skeleton
*/
int nr_loops_returned;
int g_output;
int err;
static int callback(__u32 index, void *data)
{
struct callback_ctx *ctx = data;
if (index >= stop_index)
return 1;
ctx->output += index;
return 0;
}
static int empty_callback(__u32 index, void *data)
{
return 0;
}
static int nested_callback2(__u32 index, void *data)
{
nr_loops_returned += bpf_loop(nested_callback_nr_loops, callback, data, 0);
return 0;
}
static int nested_callback1(__u32 index, void *data)
{
bpf_loop(nested_callback_nr_loops, nested_callback2, data, 0);
return 0;
}
SEC("fentry/" SYS_PREFIX "sys_nanosleep")
int test_prog(void *ctx)
{
struct callback_ctx data = {};
if (bpf_get_current_pid_tgid() >> 32 != pid)
return 0;
nr_loops_returned = bpf_loop(nr_loops, callback, &data, 0);
if (nr_loops_returned < 0)
err = nr_loops_returned;
else
g_output = data.output;
return 0;
}
SEC("fentry/" SYS_PREFIX "sys_nanosleep")
int prog_null_ctx(void *ctx)
{
if (bpf_get_current_pid_tgid() >> 32 != pid)
return 0;
nr_loops_returned = bpf_loop(nr_loops, empty_callback, NULL, 0);
return 0;
}
SEC("fentry/" SYS_PREFIX "sys_nanosleep")
int prog_invalid_flags(void *ctx)
{
struct callback_ctx data = {};
if (bpf_get_current_pid_tgid() >> 32 != pid)
return 0;
err = bpf_loop(nr_loops, callback, &data, 1);
return 0;
}
SEC("fentry/" SYS_PREFIX "sys_nanosleep")
int prog_nested_calls(void *ctx)
{
struct callback_ctx data = {};
if (bpf_get_current_pid_tgid() >> 32 != pid)
return 0;
nr_loops_returned = 0;
bpf_loop(nr_loops, nested_callback1, &data, 0);
g_output = data.output;
return 0;
}
static int callback_set_f0(int i, void *ctx)
{
g_output = 0xF0;
return 0;
}
static int callback_set_0f(int i, void *ctx)
{
g_output = 0x0F;
return 0;
}
/*
* non-constant callback is a corner case for bpf_loop inline logic
*/
SEC("fentry/" SYS_PREFIX "sys_nanosleep")
int prog_non_constant_callback(void *ctx)
{
if (bpf_get_current_pid_tgid() >> 32 != pid)
return 0;
int (*callback)(int i, void *ctx);
g_output = 0;
if (callback_selector == 0x0F)
callback = callback_set_0f;
else
callback = callback_set_f0;
bpf_loop(1, callback, NULL, 0);
return 0;
}
static int stack_check_inner_callback(void *ctx)
{
return 0;
}
static int map1_lookup_elem(int key)
{
int *val = bpf_map_lookup_elem(&map1, &key);
return val ? *val : -1;
}
static void map1_update_elem(int key, int val)
{
bpf_map_update_elem(&map1, &key, &val, BPF_ANY);
}
static int stack_check_outer_callback(void *ctx)
{
int a = map1_lookup_elem(1);
int b = map1_lookup_elem(2);
int c = map1_lookup_elem(3);
int d = map1_lookup_elem(4);
int e = map1_lookup_elem(5);
int f = map1_lookup_elem(6);
bpf_loop(1, stack_check_inner_callback, NULL, 0);
map1_update_elem(1, a + 1);
map1_update_elem(2, b + 1);
map1_update_elem(3, c + 1);
map1_update_elem(4, d + 1);
map1_update_elem(5, e + 1);
map1_update_elem(6, f + 1);
return 0;
}
/* Some of the local variables in stack_check and
* stack_check_outer_callback would be allocated on stack by
* compiler. This test should verify that stack content for these
* variables is preserved between calls to bpf_loop (might be an issue
* if loop inlining allocates stack slots incorrectly).
*/
SEC("fentry/" SYS_PREFIX "sys_nanosleep")
int stack_check(void *ctx)
{
if (bpf_get_current_pid_tgid() >> 32 != pid)
return 0;
int a = map1_lookup_elem(7);
int b = map1_lookup_elem(8);
int c = map1_lookup_elem(9);
int d = map1_lookup_elem(10);
int e = map1_lookup_elem(11);
int f = map1_lookup_elem(12);
bpf_loop(1, stack_check_outer_callback, NULL, 0);
map1_update_elem(7, a + 1);
map1_update_elem(8, b + 1);
map1_update_elem(9, c + 1);
map1_update_elem(10, d + 1);
map1_update_elem(11, e + 1);
map1_update_elem(12, f + 1);
return 0;
}
| linux-master | tools/testing/selftests/bpf/progs/bpf_loop.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
struct {
__uint(type, BPF_MAP_TYPE_PROG_ARRAY);
__uint(max_entries, 3);
__uint(key_size, sizeof(__u32));
__uint(value_size, sizeof(__u32));
} jmp_table SEC(".maps");
int selector = 0;
#define TAIL_FUNC(x) \
SEC("tc") \
int classifier_##x(struct __sk_buff *skb) \
{ \
return x; \
}
TAIL_FUNC(0)
TAIL_FUNC(1)
TAIL_FUNC(2)
SEC("tc")
int entry(struct __sk_buff *skb)
{
int idx = 0;
if (selector == 1234)
idx = 1;
else if (selector == 5678)
idx = 2;
bpf_tail_call(skb, &jmp_table, idx);
return 3;
}
char __license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/tailcall5.c |
// SPDX-License-Identifier: GPL-2.0
/* Converted from tools/testing/selftests/bpf/verifier/map_ptr.c */
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include "bpf_misc.h"
#define MAX_ENTRIES 11
struct test_val {
unsigned int index;
int foo[MAX_ENTRIES];
};
struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
__uint(max_entries, 1);
__type(key, int);
__type(value, struct test_val);
} map_array_48b SEC(".maps");
struct other_val {
long long foo;
long long bar;
};
struct {
__uint(type, BPF_MAP_TYPE_HASH);
__uint(max_entries, 1);
__type(key, long long);
__type(value, struct other_val);
} map_hash_16b SEC(".maps");
SEC("socket")
__description("bpf_map_ptr: read with negative offset rejected")
__failure __msg("R1 is bpf_array invalid negative access: off=-8")
__failure_unpriv
__msg_unpriv("access is allowed only to CAP_PERFMON and CAP_SYS_ADMIN")
__naked void read_with_negative_offset_rejected(void)
{
asm volatile (" \
r1 = r10; \
r1 = %[map_array_48b] ll; \
r6 = *(u64*)(r1 - 8); \
r0 = 1; \
exit; \
" :
: __imm_addr(map_array_48b)
: __clobber_all);
}
SEC("socket")
__description("bpf_map_ptr: write rejected")
__failure __msg("only read from bpf_array is supported")
__failure_unpriv
__msg_unpriv("access is allowed only to CAP_PERFMON and CAP_SYS_ADMIN")
__naked void bpf_map_ptr_write_rejected(void)
{
asm volatile (" \
r0 = 0; \
*(u64*)(r10 - 8) = r0; \
r2 = r10; \
r2 += -8; \
r1 = %[map_array_48b] ll; \
*(u64*)(r1 + 0) = r2; \
r0 = 1; \
exit; \
" :
: __imm_addr(map_array_48b)
: __clobber_all);
}
SEC("socket")
__description("bpf_map_ptr: read non-existent field rejected")
__failure
__msg("cannot access ptr member ops with moff 0 in struct bpf_map with off 1 size 4")
__failure_unpriv
__msg_unpriv("access is allowed only to CAP_PERFMON and CAP_SYS_ADMIN")
__flag(BPF_F_ANY_ALIGNMENT)
__naked void read_non_existent_field_rejected(void)
{
asm volatile (" \
r6 = 0; \
r1 = %[map_array_48b] ll; \
r6 = *(u32*)(r1 + 1); \
r0 = 1; \
exit; \
" :
: __imm_addr(map_array_48b)
: __clobber_all);
}
SEC("socket")
__description("bpf_map_ptr: read ops field accepted")
__success __failure_unpriv
__msg_unpriv("access is allowed only to CAP_PERFMON and CAP_SYS_ADMIN")
__retval(1)
__naked void ptr_read_ops_field_accepted(void)
{
asm volatile (" \
r6 = 0; \
r1 = %[map_array_48b] ll; \
r6 = *(u64*)(r1 + 0); \
r0 = 1; \
exit; \
" :
: __imm_addr(map_array_48b)
: __clobber_all);
}
SEC("socket")
__description("bpf_map_ptr: r = 0, map_ptr = map_ptr + r")
__success __failure_unpriv
__msg_unpriv("R1 has pointer with unsupported alu operation")
__retval(0)
__naked void map_ptr_map_ptr_r(void)
{
asm volatile (" \
r0 = 0; \
*(u64*)(r10 - 8) = r0; \
r2 = r10; \
r2 += -8; \
r0 = 0; \
r1 = %[map_hash_16b] ll; \
r1 += r0; \
call %[bpf_map_lookup_elem]; \
r0 = 0; \
exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm_addr(map_hash_16b)
: __clobber_all);
}
SEC("socket")
__description("bpf_map_ptr: r = 0, r = r + map_ptr")
__success __failure_unpriv
__msg_unpriv("R0 has pointer with unsupported alu operation")
__retval(0)
__naked void _0_r_r_map_ptr(void)
{
asm volatile (" \
r0 = 0; \
*(u64*)(r10 - 8) = r0; \
r2 = r10; \
r2 += -8; \
r1 = 0; \
r0 = %[map_hash_16b] ll; \
r1 += r0; \
call %[bpf_map_lookup_elem]; \
r0 = 0; \
exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm_addr(map_hash_16b)
: __clobber_all);
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/verifier_map_ptr.c |
// SPDX-License-Identifier: GPL-2.0
/* Converted from tools/testing/selftests/bpf/verifier/map_ptr_mixing.c */
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include "bpf_misc.h"
#define MAX_ENTRIES 11
struct test_val {
unsigned int index;
int foo[MAX_ENTRIES];
};
struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
__uint(max_entries, 1);
__type(key, int);
__type(value, struct test_val);
} map_array_48b SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_HASH);
__uint(max_entries, 1);
__type(key, long long);
__type(value, struct test_val);
} map_hash_48b SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_ARRAY_OF_MAPS);
__uint(max_entries, 1);
__type(key, int);
__type(value, int);
__array(values, struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
__uint(max_entries, 1);
__type(key, int);
__type(value, int);
});
} map_in_map SEC(".maps");
void dummy_prog_42_socket(void);
void dummy_prog_24_socket(void);
void dummy_prog_loop1_socket(void);
void dummy_prog_loop2_socket(void);
struct {
__uint(type, BPF_MAP_TYPE_PROG_ARRAY);
__uint(max_entries, 4);
__uint(key_size, sizeof(int));
__array(values, void (void));
} map_prog1_socket SEC(".maps") = {
.values = {
[0] = (void *)&dummy_prog_42_socket,
[1] = (void *)&dummy_prog_loop1_socket,
[2] = (void *)&dummy_prog_24_socket,
},
};
struct {
__uint(type, BPF_MAP_TYPE_PROG_ARRAY);
__uint(max_entries, 8);
__uint(key_size, sizeof(int));
__array(values, void (void));
} map_prog2_socket SEC(".maps") = {
.values = {
[1] = (void *)&dummy_prog_loop2_socket,
[2] = (void *)&dummy_prog_24_socket,
[7] = (void *)&dummy_prog_42_socket,
},
};
SEC("socket")
__auxiliary __auxiliary_unpriv
__naked void dummy_prog_42_socket(void)
{
asm volatile ("r0 = 42; exit;");
}
SEC("socket")
__auxiliary __auxiliary_unpriv
__naked void dummy_prog_24_socket(void)
{
asm volatile ("r0 = 24; exit;");
}
SEC("socket")
__auxiliary __auxiliary_unpriv
__naked void dummy_prog_loop1_socket(void)
{
asm volatile (" \
r3 = 1; \
r2 = %[map_prog1_socket] ll; \
call %[bpf_tail_call]; \
r0 = 41; \
exit; \
" :
: __imm(bpf_tail_call),
__imm_addr(map_prog1_socket)
: __clobber_all);
}
SEC("socket")
__auxiliary __auxiliary_unpriv
__naked void dummy_prog_loop2_socket(void)
{
asm volatile (" \
r3 = 1; \
r2 = %[map_prog2_socket] ll; \
call %[bpf_tail_call]; \
r0 = 41; \
exit; \
" :
: __imm(bpf_tail_call),
__imm_addr(map_prog2_socket)
: __clobber_all);
}
SEC("tc")
__description("calls: two calls returning different map pointers for lookup (hash, array)")
__success __retval(1)
__naked void pointers_for_lookup_hash_array(void)
{
asm volatile (" \
/* main prog */ \
if r1 != 0 goto l0_%=; \
call pointers_for_lookup_hash_array__1; \
goto l1_%=; \
l0_%=: call pointers_for_lookup_hash_array__2; \
l1_%=: r1 = r0; \
r2 = 0; \
*(u64*)(r10 - 8) = r2; \
r2 = r10; \
r2 += -8; \
call %[bpf_map_lookup_elem]; \
if r0 == 0 goto l2_%=; \
r1 = %[test_val_foo]; \
*(u64*)(r0 + 0) = r1; \
r0 = 1; \
l2_%=: exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm_const(test_val_foo, offsetof(struct test_val, foo))
: __clobber_all);
}
static __naked __noinline __attribute__((used))
void pointers_for_lookup_hash_array__1(void)
{
asm volatile (" \
r0 = %[map_hash_48b] ll; \
exit; \
" :
: __imm_addr(map_hash_48b)
: __clobber_all);
}
static __naked __noinline __attribute__((used))
void pointers_for_lookup_hash_array__2(void)
{
asm volatile (" \
r0 = %[map_array_48b] ll; \
exit; \
" :
: __imm_addr(map_array_48b)
: __clobber_all);
}
SEC("tc")
__description("calls: two calls returning different map pointers for lookup (hash, map in map)")
__failure __msg("only read from bpf_array is supported")
__naked void lookup_hash_map_in_map(void)
{
asm volatile (" \
/* main prog */ \
if r1 != 0 goto l0_%=; \
call lookup_hash_map_in_map__1; \
goto l1_%=; \
l0_%=: call lookup_hash_map_in_map__2; \
l1_%=: r1 = r0; \
r2 = 0; \
*(u64*)(r10 - 8) = r2; \
r2 = r10; \
r2 += -8; \
call %[bpf_map_lookup_elem]; \
if r0 == 0 goto l2_%=; \
r1 = %[test_val_foo]; \
*(u64*)(r0 + 0) = r1; \
r0 = 1; \
l2_%=: exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm_const(test_val_foo, offsetof(struct test_val, foo))
: __clobber_all);
}
static __naked __noinline __attribute__((used))
void lookup_hash_map_in_map__1(void)
{
asm volatile (" \
r0 = %[map_array_48b] ll; \
exit; \
" :
: __imm_addr(map_array_48b)
: __clobber_all);
}
static __naked __noinline __attribute__((used))
void lookup_hash_map_in_map__2(void)
{
asm volatile (" \
r0 = %[map_in_map] ll; \
exit; \
" :
: __imm_addr(map_in_map)
: __clobber_all);
}
SEC("socket")
__description("cond: two branches returning different map pointers for lookup (tail, tail)")
__success __failure_unpriv __msg_unpriv("tail_call abusing map_ptr")
__retval(42)
__naked void pointers_for_lookup_tail_tail_1(void)
{
asm volatile (" \
r6 = *(u32*)(r1 + %[__sk_buff_mark]); \
if r6 != 0 goto l0_%=; \
r2 = %[map_prog2_socket] ll; \
goto l1_%=; \
l0_%=: r2 = %[map_prog1_socket] ll; \
l1_%=: r3 = 7; \
call %[bpf_tail_call]; \
r0 = 1; \
exit; \
" :
: __imm(bpf_tail_call),
__imm_addr(map_prog1_socket),
__imm_addr(map_prog2_socket),
__imm_const(__sk_buff_mark, offsetof(struct __sk_buff, mark))
: __clobber_all);
}
SEC("socket")
__description("cond: two branches returning same map pointers for lookup (tail, tail)")
__success __success_unpriv __retval(42)
__naked void pointers_for_lookup_tail_tail_2(void)
{
asm volatile (" \
r6 = *(u32*)(r1 + %[__sk_buff_mark]); \
if r6 == 0 goto l0_%=; \
r2 = %[map_prog2_socket] ll; \
goto l1_%=; \
l0_%=: r2 = %[map_prog2_socket] ll; \
l1_%=: r3 = 7; \
call %[bpf_tail_call]; \
r0 = 1; \
exit; \
" :
: __imm(bpf_tail_call),
__imm_addr(map_prog2_socket),
__imm_const(__sk_buff_mark, offsetof(struct __sk_buff, mark))
: __clobber_all);
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/verifier_map_ptr_mixing.c |
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
// Copyright (c) 2019, 2020 Cloudflare
#include <stdbool.h>
#include <stddef.h>
#include <stdint.h>
#include <string.h>
#include <linux/bpf.h>
#include <linux/icmp.h>
#include <linux/icmpv6.h>
#include <linux/if_ether.h>
#include <linux/in.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <linux/pkt_cls.h>
#include <linux/tcp.h>
#include <linux/udp.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_endian.h>
#include "test_cls_redirect.h"
#ifdef SUBPROGS
#define INLINING __noinline
#else
#define INLINING __always_inline
#endif
#define offsetofend(TYPE, MEMBER) \
(offsetof(TYPE, MEMBER) + sizeof((((TYPE *)0)->MEMBER)))
#define IP_OFFSET_MASK (0x1FFF)
#define IP_MF (0x2000)
char _license[] SEC("license") = "Dual BSD/GPL";
/**
* Destination port and IP used for UDP encapsulation.
*/
volatile const __be16 ENCAPSULATION_PORT;
volatile const __be32 ENCAPSULATION_IP;
typedef struct {
uint64_t processed_packets_total;
uint64_t l3_protocol_packets_total_ipv4;
uint64_t l3_protocol_packets_total_ipv6;
uint64_t l4_protocol_packets_total_tcp;
uint64_t l4_protocol_packets_total_udp;
uint64_t accepted_packets_total_syn;
uint64_t accepted_packets_total_syn_cookies;
uint64_t accepted_packets_total_last_hop;
uint64_t accepted_packets_total_icmp_echo_request;
uint64_t accepted_packets_total_established;
uint64_t forwarded_packets_total_gue;
uint64_t forwarded_packets_total_gre;
uint64_t errors_total_unknown_l3_proto;
uint64_t errors_total_unknown_l4_proto;
uint64_t errors_total_malformed_ip;
uint64_t errors_total_fragmented_ip;
uint64_t errors_total_malformed_icmp;
uint64_t errors_total_unwanted_icmp;
uint64_t errors_total_malformed_icmp_pkt_too_big;
uint64_t errors_total_malformed_tcp;
uint64_t errors_total_malformed_udp;
uint64_t errors_total_icmp_echo_replies;
uint64_t errors_total_malformed_encapsulation;
uint64_t errors_total_encap_adjust_failed;
uint64_t errors_total_encap_buffer_too_small;
uint64_t errors_total_redirect_loop;
uint64_t errors_total_encap_mtu_violate;
} metrics_t;
typedef enum {
INVALID = 0,
UNKNOWN,
ECHO_REQUEST,
SYN,
SYN_COOKIE,
ESTABLISHED,
} verdict_t;
typedef struct {
uint16_t src, dst;
} flow_ports_t;
_Static_assert(
sizeof(flow_ports_t) !=
offsetofend(struct bpf_sock_tuple, ipv4.dport) -
offsetof(struct bpf_sock_tuple, ipv4.sport) - 1,
"flow_ports_t must match sport and dport in struct bpf_sock_tuple");
_Static_assert(
sizeof(flow_ports_t) !=
offsetofend(struct bpf_sock_tuple, ipv6.dport) -
offsetof(struct bpf_sock_tuple, ipv6.sport) - 1,
"flow_ports_t must match sport and dport in struct bpf_sock_tuple");
typedef int ret_t;
/* This is a bit of a hack. We need a return value which allows us to
* indicate that the regular flow of the program should continue,
* while allowing functions to use XDP_PASS and XDP_DROP, etc.
*/
static const ret_t CONTINUE_PROCESSING = -1;
/* Convenience macro to call functions which return ret_t.
*/
#define MAYBE_RETURN(x) \
do { \
ret_t __ret = x; \
if (__ret != CONTINUE_PROCESSING) \
return __ret; \
} while (0)
/* Linux packet pointers are either aligned to NET_IP_ALIGN (aka 2 bytes),
* or not aligned if the arch supports efficient unaligned access.
*
* Since the verifier ensures that eBPF packet accesses follow these rules,
* we can tell LLVM to emit code as if we always had a larger alignment.
* It will yell at us if we end up on a platform where this is not valid.
*/
typedef uint8_t *net_ptr __attribute__((align_value(8)));
typedef struct buf {
struct __sk_buff *skb;
net_ptr head;
/* NB: tail musn't have alignment other than 1, otherwise
* LLVM will go and eliminate code, e.g. when checking packet lengths.
*/
uint8_t *const tail;
} buf_t;
static __always_inline size_t buf_off(const buf_t *buf)
{
/* Clang seems to optimize constructs like
* a - b + c
* if c is known:
* r? = c
* r? -= b
* r? += a
*
* This is a problem if a and b are packet pointers,
* since the verifier allows subtracting two pointers to
* get a scalar, but not a scalar and a pointer.
*
* Use inline asm to break this optimization.
*/
size_t off = (size_t)buf->head;
asm("%0 -= %1" : "+r"(off) : "r"(buf->skb->data));
return off;
}
static __always_inline bool buf_copy(buf_t *buf, void *dst, size_t len)
{
if (bpf_skb_load_bytes(buf->skb, buf_off(buf), dst, len)) {
return false;
}
buf->head += len;
return true;
}
static __always_inline bool buf_skip(buf_t *buf, const size_t len)
{
/* Check whether off + len is valid in the non-linear part. */
if (buf_off(buf) + len > buf->skb->len) {
return false;
}
buf->head += len;
return true;
}
/* Returns a pointer to the start of buf, or NULL if len is
* larger than the remaining data. Consumes len bytes on a successful
* call.
*
* If scratch is not NULL, the function will attempt to load non-linear
* data via bpf_skb_load_bytes. On success, scratch is returned.
*/
static __always_inline void *buf_assign(buf_t *buf, const size_t len, void *scratch)
{
if (buf->head + len > buf->tail) {
if (scratch == NULL) {
return NULL;
}
return buf_copy(buf, scratch, len) ? scratch : NULL;
}
void *ptr = buf->head;
buf->head += len;
return ptr;
}
static INLINING bool pkt_skip_ipv4_options(buf_t *buf, const struct iphdr *ipv4)
{
if (ipv4->ihl <= 5) {
return true;
}
return buf_skip(buf, (ipv4->ihl - 5) * 4);
}
static INLINING bool ipv4_is_fragment(const struct iphdr *ip)
{
uint16_t frag_off = ip->frag_off & bpf_htons(IP_OFFSET_MASK);
return (ip->frag_off & bpf_htons(IP_MF)) != 0 || frag_off > 0;
}
static __always_inline struct iphdr *pkt_parse_ipv4(buf_t *pkt, struct iphdr *scratch)
{
struct iphdr *ipv4 = buf_assign(pkt, sizeof(*ipv4), scratch);
if (ipv4 == NULL) {
return NULL;
}
if (ipv4->ihl < 5) {
return NULL;
}
if (!pkt_skip_ipv4_options(pkt, ipv4)) {
return NULL;
}
return ipv4;
}
/* Parse the L4 ports from a packet, assuming a layout like TCP or UDP. */
static INLINING bool pkt_parse_icmp_l4_ports(buf_t *pkt, flow_ports_t *ports)
{
if (!buf_copy(pkt, ports, sizeof(*ports))) {
return false;
}
/* Ports in the L4 headers are reversed, since we are parsing an ICMP
* payload which is going towards the eyeball.
*/
uint16_t dst = ports->src;
ports->src = ports->dst;
ports->dst = dst;
return true;
}
static INLINING uint16_t pkt_checksum_fold(uint32_t csum)
{
/* The highest reasonable value for an IPv4 header
* checksum requires two folds, so we just do that always.
*/
csum = (csum & 0xffff) + (csum >> 16);
csum = (csum & 0xffff) + (csum >> 16);
return (uint16_t)~csum;
}
static INLINING void pkt_ipv4_checksum(struct iphdr *iph)
{
iph->check = 0;
/* An IP header without options is 20 bytes. Two of those
* are the checksum, which we always set to zero. Hence,
* the maximum accumulated value is 18 / 2 * 0xffff = 0x8fff7,
* which fits in 32 bit.
*/
_Static_assert(sizeof(struct iphdr) == 20, "iphdr must be 20 bytes");
uint32_t acc = 0;
uint16_t *ipw = (uint16_t *)iph;
#pragma clang loop unroll(full)
for (size_t i = 0; i < sizeof(struct iphdr) / 2; i++) {
acc += ipw[i];
}
iph->check = pkt_checksum_fold(acc);
}
static INLINING
bool pkt_skip_ipv6_extension_headers(buf_t *pkt,
const struct ipv6hdr *ipv6,
uint8_t *upper_proto,
bool *is_fragment)
{
/* We understand five extension headers.
* https://tools.ietf.org/html/rfc8200#section-4.1 states that all
* headers should occur once, except Destination Options, which may
* occur twice. Hence we give up after 6 headers.
*/
struct {
uint8_t next;
uint8_t len;
} exthdr = {
.next = ipv6->nexthdr,
};
*is_fragment = false;
#pragma clang loop unroll(full)
for (int i = 0; i < 6; i++) {
switch (exthdr.next) {
case IPPROTO_FRAGMENT:
*is_fragment = true;
/* NB: We don't check that hdrlen == 0 as per spec. */
/* fallthrough; */
case IPPROTO_HOPOPTS:
case IPPROTO_ROUTING:
case IPPROTO_DSTOPTS:
case IPPROTO_MH:
if (!buf_copy(pkt, &exthdr, sizeof(exthdr))) {
return false;
}
/* hdrlen is in 8-octet units, and excludes the first 8 octets. */
if (!buf_skip(pkt,
(exthdr.len + 1) * 8 - sizeof(exthdr))) {
return false;
}
/* Decode next header */
break;
default:
/* The next header is not one of the known extension
* headers, treat it as the upper layer header.
*
* This handles IPPROTO_NONE.
*
* Encapsulating Security Payload (50) and Authentication
* Header (51) also end up here (and will trigger an
* unknown proto error later). They have a custom header
* format and seem too esoteric to care about.
*/
*upper_proto = exthdr.next;
return true;
}
}
/* We never found an upper layer header. */
return false;
}
/* This function has to be inlined, because the verifier otherwise rejects it
* due to returning a pointer to the stack. This is technically correct, since
* scratch is allocated on the stack. However, this usage should be safe since
* it's the callers stack after all.
*/
static __always_inline struct ipv6hdr *
pkt_parse_ipv6(buf_t *pkt, struct ipv6hdr *scratch, uint8_t *proto,
bool *is_fragment)
{
struct ipv6hdr *ipv6 = buf_assign(pkt, sizeof(*ipv6), scratch);
if (ipv6 == NULL) {
return NULL;
}
if (!pkt_skip_ipv6_extension_headers(pkt, ipv6, proto, is_fragment)) {
return NULL;
}
return ipv6;
}
/* Global metrics, per CPU
*/
struct {
__uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
__uint(max_entries, 1);
__type(key, unsigned int);
__type(value, metrics_t);
} metrics_map SEC(".maps");
static INLINING metrics_t *get_global_metrics(void)
{
uint64_t key = 0;
return bpf_map_lookup_elem(&metrics_map, &key);
}
static INLINING ret_t accept_locally(struct __sk_buff *skb, encap_headers_t *encap)
{
const int payload_off =
sizeof(*encap) +
sizeof(struct in_addr) * encap->unigue.hop_count;
int32_t encap_overhead = payload_off - sizeof(struct ethhdr);
// Changing the ethertype if the encapsulated packet is ipv6
if (encap->gue.proto_ctype == IPPROTO_IPV6) {
encap->eth.h_proto = bpf_htons(ETH_P_IPV6);
}
if (bpf_skb_adjust_room(skb, -encap_overhead, BPF_ADJ_ROOM_MAC,
BPF_F_ADJ_ROOM_FIXED_GSO |
BPF_F_ADJ_ROOM_NO_CSUM_RESET) ||
bpf_csum_level(skb, BPF_CSUM_LEVEL_DEC))
return TC_ACT_SHOT;
return bpf_redirect(skb->ifindex, BPF_F_INGRESS);
}
static INLINING ret_t forward_with_gre(struct __sk_buff *skb, encap_headers_t *encap,
struct in_addr *next_hop, metrics_t *metrics)
{
metrics->forwarded_packets_total_gre++;
const int payload_off =
sizeof(*encap) +
sizeof(struct in_addr) * encap->unigue.hop_count;
int32_t encap_overhead =
payload_off - sizeof(struct ethhdr) - sizeof(struct iphdr);
int32_t delta = sizeof(struct gre_base_hdr) - encap_overhead;
uint16_t proto = ETH_P_IP;
uint32_t mtu_len = 0;
/* Loop protection: the inner packet's TTL is decremented as a safeguard
* against any forwarding loop. As the only interesting field is the TTL
* hop limit for IPv6, it is easier to use bpf_skb_load_bytes/bpf_skb_store_bytes
* as they handle the split packets if needed (no need for the data to be
* in the linear section).
*/
if (encap->gue.proto_ctype == IPPROTO_IPV6) {
proto = ETH_P_IPV6;
uint8_t ttl;
int rc;
rc = bpf_skb_load_bytes(
skb, payload_off + offsetof(struct ipv6hdr, hop_limit),
&ttl, 1);
if (rc != 0) {
metrics->errors_total_malformed_encapsulation++;
return TC_ACT_SHOT;
}
if (ttl == 0) {
metrics->errors_total_redirect_loop++;
return TC_ACT_SHOT;
}
ttl--;
rc = bpf_skb_store_bytes(
skb, payload_off + offsetof(struct ipv6hdr, hop_limit),
&ttl, 1, 0);
if (rc != 0) {
metrics->errors_total_malformed_encapsulation++;
return TC_ACT_SHOT;
}
} else {
uint8_t ttl;
int rc;
rc = bpf_skb_load_bytes(
skb, payload_off + offsetof(struct iphdr, ttl), &ttl,
1);
if (rc != 0) {
metrics->errors_total_malformed_encapsulation++;
return TC_ACT_SHOT;
}
if (ttl == 0) {
metrics->errors_total_redirect_loop++;
return TC_ACT_SHOT;
}
/* IPv4 also has a checksum to patch. While the TTL is only one byte,
* this function only works for 2 and 4 bytes arguments (the result is
* the same).
*/
rc = bpf_l3_csum_replace(
skb, payload_off + offsetof(struct iphdr, check), ttl,
ttl - 1, 2);
if (rc != 0) {
metrics->errors_total_malformed_encapsulation++;
return TC_ACT_SHOT;
}
ttl--;
rc = bpf_skb_store_bytes(
skb, payload_off + offsetof(struct iphdr, ttl), &ttl, 1,
0);
if (rc != 0) {
metrics->errors_total_malformed_encapsulation++;
return TC_ACT_SHOT;
}
}
if (bpf_check_mtu(skb, skb->ifindex, &mtu_len, delta, 0)) {
metrics->errors_total_encap_mtu_violate++;
return TC_ACT_SHOT;
}
if (bpf_skb_adjust_room(skb, delta, BPF_ADJ_ROOM_NET,
BPF_F_ADJ_ROOM_FIXED_GSO |
BPF_F_ADJ_ROOM_NO_CSUM_RESET) ||
bpf_csum_level(skb, BPF_CSUM_LEVEL_INC)) {
metrics->errors_total_encap_adjust_failed++;
return TC_ACT_SHOT;
}
if (bpf_skb_pull_data(skb, sizeof(encap_gre_t))) {
metrics->errors_total_encap_buffer_too_small++;
return TC_ACT_SHOT;
}
buf_t pkt = {
.skb = skb,
.head = (uint8_t *)(long)skb->data,
.tail = (uint8_t *)(long)skb->data_end,
};
encap_gre_t *encap_gre = buf_assign(&pkt, sizeof(encap_gre_t), NULL);
if (encap_gre == NULL) {
metrics->errors_total_encap_buffer_too_small++;
return TC_ACT_SHOT;
}
encap_gre->ip.protocol = IPPROTO_GRE;
encap_gre->ip.daddr = next_hop->s_addr;
encap_gre->ip.saddr = ENCAPSULATION_IP;
encap_gre->ip.tot_len =
bpf_htons(bpf_ntohs(encap_gre->ip.tot_len) + delta);
encap_gre->gre.flags = 0;
encap_gre->gre.protocol = bpf_htons(proto);
pkt_ipv4_checksum((void *)&encap_gre->ip);
return bpf_redirect(skb->ifindex, 0);
}
static INLINING ret_t forward_to_next_hop(struct __sk_buff *skb, encap_headers_t *encap,
struct in_addr *next_hop, metrics_t *metrics)
{
/* swap L2 addresses */
/* This assumes that packets are received from a router.
* So just swapping the MAC addresses here will make the packet go back to
* the router, which will send it to the appropriate machine.
*/
unsigned char temp[ETH_ALEN];
memcpy(temp, encap->eth.h_dest, sizeof(temp));
memcpy(encap->eth.h_dest, encap->eth.h_source,
sizeof(encap->eth.h_dest));
memcpy(encap->eth.h_source, temp, sizeof(encap->eth.h_source));
if (encap->unigue.next_hop == encap->unigue.hop_count - 1 &&
encap->unigue.last_hop_gre) {
return forward_with_gre(skb, encap, next_hop, metrics);
}
metrics->forwarded_packets_total_gue++;
uint32_t old_saddr = encap->ip.saddr;
encap->ip.saddr = encap->ip.daddr;
encap->ip.daddr = next_hop->s_addr;
if (encap->unigue.next_hop < encap->unigue.hop_count) {
encap->unigue.next_hop++;
}
/* Remove ip->saddr, add next_hop->s_addr */
const uint64_t off = offsetof(typeof(*encap), ip.check);
int ret = bpf_l3_csum_replace(skb, off, old_saddr, next_hop->s_addr, 4);
if (ret < 0) {
return TC_ACT_SHOT;
}
return bpf_redirect(skb->ifindex, 0);
}
static INLINING ret_t skip_next_hops(buf_t *pkt, int n)
{
switch (n) {
case 1:
if (!buf_skip(pkt, sizeof(struct in_addr)))
return TC_ACT_SHOT;
case 0:
return CONTINUE_PROCESSING;
default:
return TC_ACT_SHOT;
}
}
/* Get the next hop from the GLB header.
*
* Sets next_hop->s_addr to 0 if there are no more hops left.
* pkt is positioned just after the variable length GLB header
* iff the call is successful.
*/
static INLINING ret_t get_next_hop(buf_t *pkt, encap_headers_t *encap,
struct in_addr *next_hop)
{
if (encap->unigue.next_hop > encap->unigue.hop_count) {
return TC_ACT_SHOT;
}
/* Skip "used" next hops. */
MAYBE_RETURN(skip_next_hops(pkt, encap->unigue.next_hop));
if (encap->unigue.next_hop == encap->unigue.hop_count) {
/* No more next hops, we are at the end of the GLB header. */
next_hop->s_addr = 0;
return CONTINUE_PROCESSING;
}
if (!buf_copy(pkt, next_hop, sizeof(*next_hop))) {
return TC_ACT_SHOT;
}
/* Skip the remaining next hops (may be zero). */
return skip_next_hops(pkt, encap->unigue.hop_count -
encap->unigue.next_hop - 1);
}
/* Fill a bpf_sock_tuple to be used with the socket lookup functions.
* This is a kludge that let's us work around verifier limitations:
*
* fill_tuple(&t, foo, sizeof(struct iphdr), 123, 321)
*
* clang will substitute a constant for sizeof, which allows the verifier
* to track its value. Based on this, it can figure out the constant
* return value, and calling code works while still being "generic" to
* IPv4 and IPv6.
*/
static INLINING uint64_t fill_tuple(struct bpf_sock_tuple *tuple, void *iph,
uint64_t iphlen, uint16_t sport, uint16_t dport)
{
switch (iphlen) {
case sizeof(struct iphdr): {
struct iphdr *ipv4 = (struct iphdr *)iph;
tuple->ipv4.daddr = ipv4->daddr;
tuple->ipv4.saddr = ipv4->saddr;
tuple->ipv4.sport = sport;
tuple->ipv4.dport = dport;
return sizeof(tuple->ipv4);
}
case sizeof(struct ipv6hdr): {
struct ipv6hdr *ipv6 = (struct ipv6hdr *)iph;
memcpy(&tuple->ipv6.daddr, &ipv6->daddr,
sizeof(tuple->ipv6.daddr));
memcpy(&tuple->ipv6.saddr, &ipv6->saddr,
sizeof(tuple->ipv6.saddr));
tuple->ipv6.sport = sport;
tuple->ipv6.dport = dport;
return sizeof(tuple->ipv6);
}
default:
return 0;
}
}
static INLINING verdict_t classify_tcp(struct __sk_buff *skb,
struct bpf_sock_tuple *tuple, uint64_t tuplen,
void *iph, struct tcphdr *tcp)
{
struct bpf_sock *sk =
bpf_skc_lookup_tcp(skb, tuple, tuplen, BPF_F_CURRENT_NETNS, 0);
if (sk == NULL) {
return UNKNOWN;
}
if (sk->state != BPF_TCP_LISTEN) {
bpf_sk_release(sk);
return ESTABLISHED;
}
if (iph != NULL && tcp != NULL) {
/* Kludge: we've run out of arguments, but need the length of the ip header. */
uint64_t iphlen = sizeof(struct iphdr);
if (tuplen == sizeof(tuple->ipv6)) {
iphlen = sizeof(struct ipv6hdr);
}
if (bpf_tcp_check_syncookie(sk, iph, iphlen, tcp,
sizeof(*tcp)) == 0) {
bpf_sk_release(sk);
return SYN_COOKIE;
}
}
bpf_sk_release(sk);
return UNKNOWN;
}
static INLINING verdict_t classify_udp(struct __sk_buff *skb,
struct bpf_sock_tuple *tuple, uint64_t tuplen)
{
struct bpf_sock *sk =
bpf_sk_lookup_udp(skb, tuple, tuplen, BPF_F_CURRENT_NETNS, 0);
if (sk == NULL) {
return UNKNOWN;
}
if (sk->state == BPF_TCP_ESTABLISHED) {
bpf_sk_release(sk);
return ESTABLISHED;
}
bpf_sk_release(sk);
return UNKNOWN;
}
static INLINING verdict_t classify_icmp(struct __sk_buff *skb, uint8_t proto,
struct bpf_sock_tuple *tuple, uint64_t tuplen,
metrics_t *metrics)
{
switch (proto) {
case IPPROTO_TCP:
return classify_tcp(skb, tuple, tuplen, NULL, NULL);
case IPPROTO_UDP:
return classify_udp(skb, tuple, tuplen);
default:
metrics->errors_total_malformed_icmp++;
return INVALID;
}
}
static INLINING verdict_t process_icmpv4(buf_t *pkt, metrics_t *metrics)
{
struct icmphdr icmp;
if (!buf_copy(pkt, &icmp, sizeof(icmp))) {
metrics->errors_total_malformed_icmp++;
return INVALID;
}
/* We should never receive encapsulated echo replies. */
if (icmp.type == ICMP_ECHOREPLY) {
metrics->errors_total_icmp_echo_replies++;
return INVALID;
}
if (icmp.type == ICMP_ECHO) {
return ECHO_REQUEST;
}
if (icmp.type != ICMP_DEST_UNREACH || icmp.code != ICMP_FRAG_NEEDED) {
metrics->errors_total_unwanted_icmp++;
return INVALID;
}
struct iphdr _ip4;
const struct iphdr *ipv4 = pkt_parse_ipv4(pkt, &_ip4);
if (ipv4 == NULL) {
metrics->errors_total_malformed_icmp_pkt_too_big++;
return INVALID;
}
/* The source address in the outer IP header is from the entity that
* originated the ICMP message. Use the original IP header to restore
* the correct flow tuple.
*/
struct bpf_sock_tuple tuple;
tuple.ipv4.saddr = ipv4->daddr;
tuple.ipv4.daddr = ipv4->saddr;
if (!pkt_parse_icmp_l4_ports(pkt, (flow_ports_t *)&tuple.ipv4.sport)) {
metrics->errors_total_malformed_icmp_pkt_too_big++;
return INVALID;
}
return classify_icmp(pkt->skb, ipv4->protocol, &tuple,
sizeof(tuple.ipv4), metrics);
}
static INLINING verdict_t process_icmpv6(buf_t *pkt, metrics_t *metrics)
{
struct icmp6hdr icmp6;
if (!buf_copy(pkt, &icmp6, sizeof(icmp6))) {
metrics->errors_total_malformed_icmp++;
return INVALID;
}
/* We should never receive encapsulated echo replies. */
if (icmp6.icmp6_type == ICMPV6_ECHO_REPLY) {
metrics->errors_total_icmp_echo_replies++;
return INVALID;
}
if (icmp6.icmp6_type == ICMPV6_ECHO_REQUEST) {
return ECHO_REQUEST;
}
if (icmp6.icmp6_type != ICMPV6_PKT_TOOBIG) {
metrics->errors_total_unwanted_icmp++;
return INVALID;
}
bool is_fragment;
uint8_t l4_proto;
struct ipv6hdr _ipv6;
const struct ipv6hdr *ipv6 =
pkt_parse_ipv6(pkt, &_ipv6, &l4_proto, &is_fragment);
if (ipv6 == NULL) {
metrics->errors_total_malformed_icmp_pkt_too_big++;
return INVALID;
}
if (is_fragment) {
metrics->errors_total_fragmented_ip++;
return INVALID;
}
/* Swap source and dest addresses. */
struct bpf_sock_tuple tuple;
memcpy(&tuple.ipv6.saddr, &ipv6->daddr, sizeof(tuple.ipv6.saddr));
memcpy(&tuple.ipv6.daddr, &ipv6->saddr, sizeof(tuple.ipv6.daddr));
if (!pkt_parse_icmp_l4_ports(pkt, (flow_ports_t *)&tuple.ipv6.sport)) {
metrics->errors_total_malformed_icmp_pkt_too_big++;
return INVALID;
}
return classify_icmp(pkt->skb, l4_proto, &tuple, sizeof(tuple.ipv6),
metrics);
}
static INLINING verdict_t process_tcp(buf_t *pkt, void *iph, uint64_t iphlen,
metrics_t *metrics)
{
metrics->l4_protocol_packets_total_tcp++;
struct tcphdr _tcp;
struct tcphdr *tcp = buf_assign(pkt, sizeof(_tcp), &_tcp);
if (tcp == NULL) {
metrics->errors_total_malformed_tcp++;
return INVALID;
}
if (tcp->syn) {
return SYN;
}
struct bpf_sock_tuple tuple;
uint64_t tuplen =
fill_tuple(&tuple, iph, iphlen, tcp->source, tcp->dest);
return classify_tcp(pkt->skb, &tuple, tuplen, iph, tcp);
}
static INLINING verdict_t process_udp(buf_t *pkt, void *iph, uint64_t iphlen,
metrics_t *metrics)
{
metrics->l4_protocol_packets_total_udp++;
struct udphdr _udp;
struct udphdr *udph = buf_assign(pkt, sizeof(_udp), &_udp);
if (udph == NULL) {
metrics->errors_total_malformed_udp++;
return INVALID;
}
struct bpf_sock_tuple tuple;
uint64_t tuplen =
fill_tuple(&tuple, iph, iphlen, udph->source, udph->dest);
return classify_udp(pkt->skb, &tuple, tuplen);
}
static INLINING verdict_t process_ipv4(buf_t *pkt, metrics_t *metrics)
{
metrics->l3_protocol_packets_total_ipv4++;
struct iphdr _ip4;
struct iphdr *ipv4 = pkt_parse_ipv4(pkt, &_ip4);
if (ipv4 == NULL) {
metrics->errors_total_malformed_ip++;
return INVALID;
}
if (ipv4->version != 4) {
metrics->errors_total_malformed_ip++;
return INVALID;
}
if (ipv4_is_fragment(ipv4)) {
metrics->errors_total_fragmented_ip++;
return INVALID;
}
switch (ipv4->protocol) {
case IPPROTO_ICMP:
return process_icmpv4(pkt, metrics);
case IPPROTO_TCP:
return process_tcp(pkt, ipv4, sizeof(*ipv4), metrics);
case IPPROTO_UDP:
return process_udp(pkt, ipv4, sizeof(*ipv4), metrics);
default:
metrics->errors_total_unknown_l4_proto++;
return INVALID;
}
}
static INLINING verdict_t process_ipv6(buf_t *pkt, metrics_t *metrics)
{
metrics->l3_protocol_packets_total_ipv6++;
uint8_t l4_proto;
bool is_fragment;
struct ipv6hdr _ipv6;
struct ipv6hdr *ipv6 =
pkt_parse_ipv6(pkt, &_ipv6, &l4_proto, &is_fragment);
if (ipv6 == NULL) {
metrics->errors_total_malformed_ip++;
return INVALID;
}
if (ipv6->version != 6) {
metrics->errors_total_malformed_ip++;
return INVALID;
}
if (is_fragment) {
metrics->errors_total_fragmented_ip++;
return INVALID;
}
switch (l4_proto) {
case IPPROTO_ICMPV6:
return process_icmpv6(pkt, metrics);
case IPPROTO_TCP:
return process_tcp(pkt, ipv6, sizeof(*ipv6), metrics);
case IPPROTO_UDP:
return process_udp(pkt, ipv6, sizeof(*ipv6), metrics);
default:
metrics->errors_total_unknown_l4_proto++;
return INVALID;
}
}
SEC("tc")
int cls_redirect(struct __sk_buff *skb)
{
metrics_t *metrics = get_global_metrics();
if (metrics == NULL) {
return TC_ACT_SHOT;
}
metrics->processed_packets_total++;
/* Pass bogus packets as long as we're not sure they're
* destined for us.
*/
if (skb->protocol != bpf_htons(ETH_P_IP)) {
return TC_ACT_OK;
}
encap_headers_t *encap;
/* Make sure that all encapsulation headers are available in
* the linear portion of the skb. This makes it easy to manipulate them.
*/
if (bpf_skb_pull_data(skb, sizeof(*encap))) {
return TC_ACT_OK;
}
buf_t pkt = {
.skb = skb,
.head = (uint8_t *)(long)skb->data,
.tail = (uint8_t *)(long)skb->data_end,
};
encap = buf_assign(&pkt, sizeof(*encap), NULL);
if (encap == NULL) {
return TC_ACT_OK;
}
if (encap->ip.ihl != 5) {
/* We never have any options. */
return TC_ACT_OK;
}
if (encap->ip.daddr != ENCAPSULATION_IP ||
encap->ip.protocol != IPPROTO_UDP) {
return TC_ACT_OK;
}
/* TODO Check UDP length? */
if (encap->udp.dest != ENCAPSULATION_PORT) {
return TC_ACT_OK;
}
/* We now know that the packet is destined to us, we can
* drop bogus ones.
*/
if (ipv4_is_fragment((void *)&encap->ip)) {
metrics->errors_total_fragmented_ip++;
return TC_ACT_SHOT;
}
if (encap->gue.variant != 0) {
metrics->errors_total_malformed_encapsulation++;
return TC_ACT_SHOT;
}
if (encap->gue.control != 0) {
metrics->errors_total_malformed_encapsulation++;
return TC_ACT_SHOT;
}
if (encap->gue.flags != 0) {
metrics->errors_total_malformed_encapsulation++;
return TC_ACT_SHOT;
}
if (encap->gue.hlen !=
sizeof(encap->unigue) / 4 + encap->unigue.hop_count) {
metrics->errors_total_malformed_encapsulation++;
return TC_ACT_SHOT;
}
if (encap->unigue.version != 0) {
metrics->errors_total_malformed_encapsulation++;
return TC_ACT_SHOT;
}
if (encap->unigue.reserved != 0) {
return TC_ACT_SHOT;
}
struct in_addr next_hop;
MAYBE_RETURN(get_next_hop(&pkt, encap, &next_hop));
if (next_hop.s_addr == 0) {
metrics->accepted_packets_total_last_hop++;
return accept_locally(skb, encap);
}
verdict_t verdict;
switch (encap->gue.proto_ctype) {
case IPPROTO_IPIP:
verdict = process_ipv4(&pkt, metrics);
break;
case IPPROTO_IPV6:
verdict = process_ipv6(&pkt, metrics);
break;
default:
metrics->errors_total_unknown_l3_proto++;
return TC_ACT_SHOT;
}
switch (verdict) {
case INVALID:
/* metrics have already been bumped */
return TC_ACT_SHOT;
case UNKNOWN:
return forward_to_next_hop(skb, encap, &next_hop, metrics);
case ECHO_REQUEST:
metrics->accepted_packets_total_icmp_echo_request++;
break;
case SYN:
if (encap->unigue.forward_syn) {
return forward_to_next_hop(skb, encap, &next_hop,
metrics);
}
metrics->accepted_packets_total_syn++;
break;
case SYN_COOKIE:
metrics->accepted_packets_total_syn_cookies++;
break;
case ESTABLISHED:
metrics->accepted_packets_total_established++;
break;
}
return accept_locally(skb, encap);
}
| linux-master | tools/testing/selftests/bpf/progs/test_cls_redirect.c |
// SPDX-License-Identifier: GPL-2.0
#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
char _license[] SEC("license") = "GPL";
SEC("lsm_cgroup/inet_csk_clone")
int BPF_PROG(nonvoid_socket_clone, struct sock *newsk, const struct request_sock *req)
{
/* Can not return any errors from void LSM hooks. */
return 0;
}
| linux-master | tools/testing/selftests/bpf/progs/lsm_cgroup_nonvoid.c |
// SPDX-License-Identifier: GPL-2.0
/* Converted from tools/testing/selftests/bpf/verifier/meta_access.c */
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include "bpf_misc.h"
SEC("xdp")
__description("meta access, test1")
__success __retval(0)
__naked void meta_access_test1(void)
{
asm volatile (" \
r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \
r3 = *(u32*)(r1 + %[xdp_md_data]); \
r0 = r2; \
r0 += 8; \
if r0 > r3 goto l0_%=; \
r0 = *(u8*)(r2 + 0); \
l0_%=: r0 = 0; \
exit; \
" :
: __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
__imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta))
: __clobber_all);
}
SEC("xdp")
__description("meta access, test2")
__failure __msg("invalid access to packet, off=-8")
__naked void meta_access_test2(void)
{
asm volatile (" \
r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \
r3 = *(u32*)(r1 + %[xdp_md_data]); \
r0 = r2; \
r0 -= 8; \
r4 = r2; \
r4 += 8; \
if r4 > r3 goto l0_%=; \
r0 = *(u8*)(r0 + 0); \
l0_%=: r0 = 0; \
exit; \
" :
: __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
__imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta))
: __clobber_all);
}
SEC("xdp")
__description("meta access, test3")
__failure __msg("invalid access to packet")
__naked void meta_access_test3(void)
{
asm volatile (" \
r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \
r3 = *(u32*)(r1 + %[xdp_md_data_end]); \
r0 = r2; \
r0 += 8; \
if r0 > r3 goto l0_%=; \
r0 = *(u8*)(r2 + 0); \
l0_%=: r0 = 0; \
exit; \
" :
: __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end)),
__imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta))
: __clobber_all);
}
SEC("xdp")
__description("meta access, test4")
__failure __msg("invalid access to packet")
__naked void meta_access_test4(void)
{
asm volatile (" \
r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \
r3 = *(u32*)(r1 + %[xdp_md_data_end]); \
r4 = *(u32*)(r1 + %[xdp_md_data]); \
r0 = r4; \
r0 += 8; \
if r0 > r3 goto l0_%=; \
r0 = *(u8*)(r2 + 0); \
l0_%=: r0 = 0; \
exit; \
" :
: __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
__imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end)),
__imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta))
: __clobber_all);
}
SEC("xdp")
__description("meta access, test5")
__failure __msg("R3 !read_ok")
__naked void meta_access_test5(void)
{
asm volatile (" \
r3 = *(u32*)(r1 + %[xdp_md_data_meta]); \
r4 = *(u32*)(r1 + %[xdp_md_data]); \
r0 = r3; \
r0 += 8; \
if r0 > r4 goto l0_%=; \
r2 = -8; \
call %[bpf_xdp_adjust_meta]; \
r0 = *(u8*)(r3 + 0); \
l0_%=: r0 = 0; \
exit; \
" :
: __imm(bpf_xdp_adjust_meta),
__imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
__imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta))
: __clobber_all);
}
SEC("xdp")
__description("meta access, test6")
__failure __msg("invalid access to packet")
__naked void meta_access_test6(void)
{
asm volatile (" \
r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \
r3 = *(u32*)(r1 + %[xdp_md_data]); \
r0 = r3; \
r0 += 8; \
r4 = r2; \
r4 += 8; \
if r4 > r0 goto l0_%=; \
r0 = *(u8*)(r2 + 0); \
l0_%=: r0 = 0; \
exit; \
" :
: __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
__imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta))
: __clobber_all);
}
SEC("xdp")
__description("meta access, test7")
__success __retval(0)
__naked void meta_access_test7(void)
{
asm volatile (" \
r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \
r3 = *(u32*)(r1 + %[xdp_md_data]); \
r0 = r3; \
r0 += 8; \
r4 = r2; \
r4 += 8; \
if r4 > r3 goto l0_%=; \
r0 = *(u8*)(r2 + 0); \
l0_%=: r0 = 0; \
exit; \
" :
: __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
__imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta))
: __clobber_all);
}
SEC("xdp")
__description("meta access, test8")
__success __retval(0)
__naked void meta_access_test8(void)
{
asm volatile (" \
r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \
r3 = *(u32*)(r1 + %[xdp_md_data]); \
r4 = r2; \
r4 += 0xFFFF; \
if r4 > r3 goto l0_%=; \
r0 = *(u8*)(r2 + 0); \
l0_%=: r0 = 0; \
exit; \
" :
: __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
__imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta))
: __clobber_all);
}
SEC("xdp")
__description("meta access, test9")
__failure __msg("invalid access to packet")
__naked void meta_access_test9(void)
{
asm volatile (" \
r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \
r3 = *(u32*)(r1 + %[xdp_md_data]); \
r4 = r2; \
r4 += 0xFFFF; \
r4 += 1; \
if r4 > r3 goto l0_%=; \
r0 = *(u8*)(r2 + 0); \
l0_%=: r0 = 0; \
exit; \
" :
: __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
__imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta))
: __clobber_all);
}
SEC("xdp")
__description("meta access, test10")
__failure __msg("invalid access to packet")
__naked void meta_access_test10(void)
{
asm volatile (" \
r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \
r3 = *(u32*)(r1 + %[xdp_md_data]); \
r4 = *(u32*)(r1 + %[xdp_md_data_end]); \
r5 = 42; \
r6 = 24; \
*(u64*)(r10 - 8) = r5; \
lock *(u64 *)(r10 - 8) += r6; \
r5 = *(u64*)(r10 - 8); \
if r5 > 100 goto l0_%=; \
r3 += r5; \
r5 = r3; \
r6 = r2; \
r6 += 8; \
if r6 > r5 goto l0_%=; \
r2 = *(u8*)(r2 + 0); \
l0_%=: r0 = 0; \
exit; \
" :
: __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
__imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end)),
__imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta))
: __clobber_all);
}
SEC("xdp")
__description("meta access, test11")
__success __retval(0)
__naked void meta_access_test11(void)
{
asm volatile (" \
r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \
r3 = *(u32*)(r1 + %[xdp_md_data]); \
r5 = 42; \
r6 = 24; \
*(u64*)(r10 - 8) = r5; \
lock *(u64 *)(r10 - 8) += r6; \
r5 = *(u64*)(r10 - 8); \
if r5 > 100 goto l0_%=; \
r2 += r5; \
r5 = r2; \
r6 = r2; \
r6 += 8; \
if r6 > r3 goto l0_%=; \
r5 = *(u8*)(r5 + 0); \
l0_%=: r0 = 0; \
exit; \
" :
: __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
__imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta))
: __clobber_all);
}
SEC("xdp")
__description("meta access, test12")
__success __retval(0)
__naked void meta_access_test12(void)
{
asm volatile (" \
r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \
r3 = *(u32*)(r1 + %[xdp_md_data]); \
r4 = *(u32*)(r1 + %[xdp_md_data_end]); \
r5 = r3; \
r5 += 16; \
if r5 > r4 goto l0_%=; \
r0 = *(u8*)(r3 + 0); \
r5 = r2; \
r5 += 16; \
if r5 > r3 goto l0_%=; \
r0 = *(u8*)(r2 + 0); \
l0_%=: r0 = 0; \
exit; \
" :
: __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
__imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end)),
__imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta))
: __clobber_all);
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/verifier_meta_access.c |
// SPDX-License-Identifier: GPL-2.0
/* Converted from tools/testing/selftests/bpf/verifier/jeq_infer_not_null.c */
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include "bpf_misc.h"
struct {
__uint(type, BPF_MAP_TYPE_XSKMAP);
__uint(max_entries, 1);
__type(key, int);
__type(value, int);
} map_xskmap SEC(".maps");
/* This is equivalent to the following program:
*
* r6 = skb->sk;
* r7 = sk_fullsock(r6);
* r0 = sk_fullsock(r6);
* if (r0 == 0) return 0; (a)
* if (r0 != r7) return 0; (b)
* *r7->type; (c)
* return 0;
*
* It is safe to dereference r7 at point (c), because of (a) and (b).
* The test verifies that relation r0 == r7 is propagated from (b) to (c).
*/
SEC("cgroup/skb")
__description("jne/jeq infer not null, PTR_TO_SOCKET_OR_NULL -> PTR_TO_SOCKET for JNE false branch")
__success __failure_unpriv __msg_unpriv("R7 pointer comparison")
__retval(0)
__naked void socket_for_jne_false_branch(void)
{
asm volatile (" \
/* r6 = skb->sk; */ \
r6 = *(u64*)(r1 + %[__sk_buff_sk]); \
/* if (r6 == 0) return 0; */ \
if r6 == 0 goto l0_%=; \
/* r7 = sk_fullsock(skb); */ \
r1 = r6; \
call %[bpf_sk_fullsock]; \
r7 = r0; \
/* r0 = sk_fullsock(skb); */ \
r1 = r6; \
call %[bpf_sk_fullsock]; \
/* if (r0 == null) return 0; */ \
if r0 == 0 goto l0_%=; \
/* if (r0 == r7) r0 = *(r7->type); */ \
if r0 != r7 goto l0_%=; /* Use ! JNE ! */\
r0 = *(u32*)(r7 + %[bpf_sock_type]); \
l0_%=: /* return 0 */ \
r0 = 0; \
exit; \
" :
: __imm(bpf_sk_fullsock),
__imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
__imm_const(bpf_sock_type, offsetof(struct bpf_sock, type))
: __clobber_all);
}
/* Same as above, but verify that another branch of JNE still
* prohibits access to PTR_MAYBE_NULL.
*/
SEC("cgroup/skb")
__description("jne/jeq infer not null, PTR_TO_SOCKET_OR_NULL unchanged for JNE true branch")
__failure __msg("R7 invalid mem access 'sock_or_null'")
__failure_unpriv __msg_unpriv("R7 pointer comparison")
__naked void unchanged_for_jne_true_branch(void)
{
asm volatile (" \
/* r6 = skb->sk */ \
r6 = *(u64*)(r1 + %[__sk_buff_sk]); \
/* if (r6 == 0) return 0; */ \
if r6 == 0 goto l0_%=; \
/* r7 = sk_fullsock(skb); */ \
r1 = r6; \
call %[bpf_sk_fullsock]; \
r7 = r0; \
/* r0 = sk_fullsock(skb); */ \
r1 = r6; \
call %[bpf_sk_fullsock]; \
/* if (r0 == null) return 0; */ \
if r0 != 0 goto l0_%=; \
/* if (r0 == r7) return 0; */ \
if r0 != r7 goto l1_%=; /* Use ! JNE ! */\
goto l0_%=; \
l1_%=: /* r0 = *(r7->type); */ \
r0 = *(u32*)(r7 + %[bpf_sock_type]); \
l0_%=: /* return 0 */ \
r0 = 0; \
exit; \
" :
: __imm(bpf_sk_fullsock),
__imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
__imm_const(bpf_sock_type, offsetof(struct bpf_sock, type))
: __clobber_all);
}
/* Same as a first test, but not null should be inferred for JEQ branch */
SEC("cgroup/skb")
__description("jne/jeq infer not null, PTR_TO_SOCKET_OR_NULL -> PTR_TO_SOCKET for JEQ true branch")
__success __failure_unpriv __msg_unpriv("R7 pointer comparison")
__retval(0)
__naked void socket_for_jeq_true_branch(void)
{
asm volatile (" \
/* r6 = skb->sk; */ \
r6 = *(u64*)(r1 + %[__sk_buff_sk]); \
/* if (r6 == null) return 0; */ \
if r6 == 0 goto l0_%=; \
/* r7 = sk_fullsock(skb); */ \
r1 = r6; \
call %[bpf_sk_fullsock]; \
r7 = r0; \
/* r0 = sk_fullsock(skb); */ \
r1 = r6; \
call %[bpf_sk_fullsock]; \
/* if (r0 == null) return 0; */ \
if r0 == 0 goto l0_%=; \
/* if (r0 != r7) return 0; */ \
if r0 == r7 goto l1_%=; /* Use ! JEQ ! */\
goto l0_%=; \
l1_%=: /* r0 = *(r7->type); */ \
r0 = *(u32*)(r7 + %[bpf_sock_type]); \
l0_%=: /* return 0; */ \
r0 = 0; \
exit; \
" :
: __imm(bpf_sk_fullsock),
__imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
__imm_const(bpf_sock_type, offsetof(struct bpf_sock, type))
: __clobber_all);
}
/* Same as above, but verify that another branch of JNE still
* prohibits access to PTR_MAYBE_NULL.
*/
SEC("cgroup/skb")
__description("jne/jeq infer not null, PTR_TO_SOCKET_OR_NULL unchanged for JEQ false branch")
__failure __msg("R7 invalid mem access 'sock_or_null'")
__failure_unpriv __msg_unpriv("R7 pointer comparison")
__naked void unchanged_for_jeq_false_branch(void)
{
asm volatile (" \
/* r6 = skb->sk; */ \
r6 = *(u64*)(r1 + %[__sk_buff_sk]); \
/* if (r6 == null) return 0; */ \
if r6 == 0 goto l0_%=; \
/* r7 = sk_fullsock(skb); */ \
r1 = r6; \
call %[bpf_sk_fullsock]; \
r7 = r0; \
/* r0 = sk_fullsock(skb); */ \
r1 = r6; \
call %[bpf_sk_fullsock]; \
/* if (r0 == null) return 0; */ \
if r0 == 0 goto l0_%=; \
/* if (r0 != r7) r0 = *(r7->type); */ \
if r0 == r7 goto l0_%=; /* Use ! JEQ ! */\
r0 = *(u32*)(r7 + %[bpf_sock_type]); \
l0_%=: /* return 0; */ \
r0 = 0; \
exit; \
" :
: __imm(bpf_sk_fullsock),
__imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
__imm_const(bpf_sock_type, offsetof(struct bpf_sock, type))
: __clobber_all);
}
/* Maps are treated in a different branch of `mark_ptr_not_null_reg`,
* so separate test for maps case.
*/
SEC("xdp")
__description("jne/jeq infer not null, PTR_TO_MAP_VALUE_OR_NULL -> PTR_TO_MAP_VALUE")
__success __retval(0)
__naked void null_ptr_to_map_value(void)
{
asm volatile (" \
/* r9 = &some stack to use as key */ \
r1 = 0; \
*(u32*)(r10 - 8) = r1; \
r9 = r10; \
r9 += -8; \
/* r8 = process local map */ \
r8 = %[map_xskmap] ll; \
/* r6 = map_lookup_elem(r8, r9); */ \
r1 = r8; \
r2 = r9; \
call %[bpf_map_lookup_elem]; \
r6 = r0; \
/* r7 = map_lookup_elem(r8, r9); */ \
r1 = r8; \
r2 = r9; \
call %[bpf_map_lookup_elem]; \
r7 = r0; \
/* if (r6 == 0) return 0; */ \
if r6 == 0 goto l0_%=; \
/* if (r6 != r7) return 0; */ \
if r6 != r7 goto l0_%=; \
/* read *r7; */ \
r0 = *(u32*)(r7 + %[bpf_xdp_sock_queue_id]); \
l0_%=: /* return 0; */ \
r0 = 0; \
exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm_addr(map_xskmap),
__imm_const(bpf_xdp_sock_queue_id, offsetof(struct bpf_xdp_sock, queue_id))
: __clobber_all);
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/verifier_jeq_infer_not_null.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2018 Facebook */
#include <stddef.h>
#include <linux/bpf.h>
#include <linux/types.h>
#include <bpf/bpf_helpers.h>
struct {
__uint(type, BPF_MAP_TYPE_ARRAY_OF_MAPS);
__uint(max_entries, 1);
__uint(map_flags, 0);
__type(key, __u32);
__type(value, __u32);
} mim_array SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_HASH_OF_MAPS);
__uint(max_entries, 1);
__uint(map_flags, 0);
__type(key, int);
__type(value, __u32);
} mim_hash SEC(".maps");
SEC("xdp")
int xdp_mimtest0(struct xdp_md *ctx)
{
int value = 123;
int *value_p;
int key = 0;
void *map;
map = bpf_map_lookup_elem(&mim_array, &key);
if (!map)
return XDP_DROP;
bpf_map_update_elem(map, &key, &value, 0);
value_p = bpf_map_lookup_elem(map, &key);
if (!value_p || *value_p != 123)
return XDP_DROP;
map = bpf_map_lookup_elem(&mim_hash, &key);
if (!map)
return XDP_DROP;
bpf_map_update_elem(map, &key, &value, 0);
return XDP_PASS;
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/test_map_in_map.c |
#include "core_reloc_types.h"
void f(struct core_reloc_nesting___err_missing_field x) {}
| linux-master | tools/testing/selftests/bpf/progs/btf__core_reloc_nesting___err_missing_field.c |
#include "core_reloc_types.h"
void f(struct core_reloc_nesting___extra_nesting x) {}
| linux-master | tools/testing/selftests/bpf/progs/btf__core_reloc_nesting___extra_nesting.c |
// SPDX-License-Identifier: GPL-2.0
/* Converted from tools/testing/selftests/bpf/verifier/helper_access_var_len.c */
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include "bpf_misc.h"
#define MAX_ENTRIES 11
struct test_val {
unsigned int index;
int foo[MAX_ENTRIES];
};
struct {
__uint(type, BPF_MAP_TYPE_HASH);
__uint(max_entries, 1);
__type(key, long long);
__type(value, struct test_val);
} map_hash_48b SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_HASH);
__uint(max_entries, 1);
__type(key, long long);
__type(value, long long);
} map_hash_8b SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_RINGBUF);
__uint(max_entries, 4096);
} map_ringbuf SEC(".maps");
SEC("tracepoint")
__description("helper access to variable memory: stack, bitwise AND + JMP, correct bounds")
__success
__naked void bitwise_and_jmp_correct_bounds(void)
{
asm volatile (" \
r1 = r10; \
r1 += -64; \
r0 = 0; \
*(u64*)(r10 - 64) = r0; \
*(u64*)(r10 - 56) = r0; \
*(u64*)(r10 - 48) = r0; \
*(u64*)(r10 - 40) = r0; \
*(u64*)(r10 - 32) = r0; \
*(u64*)(r10 - 24) = r0; \
*(u64*)(r10 - 16) = r0; \
*(u64*)(r10 - 8) = r0; \
r2 = 16; \
*(u64*)(r1 - 128) = r2; \
r2 = *(u64*)(r1 - 128); \
r2 &= 64; \
r4 = 0; \
if r4 >= r2 goto l0_%=; \
r3 = 0; \
call %[bpf_probe_read_kernel]; \
l0_%=: r0 = 0; \
exit; \
" :
: __imm(bpf_probe_read_kernel)
: __clobber_all);
}
SEC("socket")
__description("helper access to variable memory: stack, bitwise AND, zero included")
/* in privileged mode reads from uninitialized stack locations are permitted */
__success __failure_unpriv
__msg_unpriv("invalid indirect read from stack R2 off -64+0 size 64")
__retval(0)
__naked void stack_bitwise_and_zero_included(void)
{
asm volatile (" \
/* set max stack size */ \
r6 = 0; \
*(u64*)(r10 - 128) = r6; \
/* set r3 to a random value */ \
call %[bpf_get_prandom_u32]; \
r3 = r0; \
/* use bitwise AND to limit r3 range to [0, 64] */\
r3 &= 64; \
r1 = %[map_ringbuf] ll; \
r2 = r10; \
r2 += -64; \
r4 = 0; \
/* Call bpf_ringbuf_output(), it is one of a few helper functions with\
* ARG_CONST_SIZE_OR_ZERO parameter allowed in unpriv mode.\
* For unpriv this should signal an error, because memory at &fp[-64] is\
* not initialized. \
*/ \
call %[bpf_ringbuf_output]; \
exit; \
" :
: __imm(bpf_get_prandom_u32),
__imm(bpf_ringbuf_output),
__imm_addr(map_ringbuf)
: __clobber_all);
}
SEC("tracepoint")
__description("helper access to variable memory: stack, bitwise AND + JMP, wrong max")
__failure __msg("invalid indirect access to stack R1 off=-64 size=65")
__naked void bitwise_and_jmp_wrong_max(void)
{
asm volatile (" \
r2 = *(u64*)(r1 + 8); \
r1 = r10; \
r1 += -64; \
*(u64*)(r1 - 128) = r2; \
r2 = *(u64*)(r1 - 128); \
r2 &= 65; \
r4 = 0; \
if r4 >= r2 goto l0_%=; \
r3 = 0; \
call %[bpf_probe_read_kernel]; \
l0_%=: r0 = 0; \
exit; \
" :
: __imm(bpf_probe_read_kernel)
: __clobber_all);
}
SEC("tracepoint")
__description("helper access to variable memory: stack, JMP, correct bounds")
__success
__naked void memory_stack_jmp_correct_bounds(void)
{
asm volatile (" \
r1 = r10; \
r1 += -64; \
r0 = 0; \
*(u64*)(r10 - 64) = r0; \
*(u64*)(r10 - 56) = r0; \
*(u64*)(r10 - 48) = r0; \
*(u64*)(r10 - 40) = r0; \
*(u64*)(r10 - 32) = r0; \
*(u64*)(r10 - 24) = r0; \
*(u64*)(r10 - 16) = r0; \
*(u64*)(r10 - 8) = r0; \
r2 = 16; \
*(u64*)(r1 - 128) = r2; \
r2 = *(u64*)(r1 - 128); \
if r2 > 64 goto l0_%=; \
r4 = 0; \
if r4 >= r2 goto l0_%=; \
r3 = 0; \
call %[bpf_probe_read_kernel]; \
l0_%=: r0 = 0; \
exit; \
" :
: __imm(bpf_probe_read_kernel)
: __clobber_all);
}
SEC("tracepoint")
__description("helper access to variable memory: stack, JMP (signed), correct bounds")
__success
__naked void stack_jmp_signed_correct_bounds(void)
{
asm volatile (" \
r1 = r10; \
r1 += -64; \
r0 = 0; \
*(u64*)(r10 - 64) = r0; \
*(u64*)(r10 - 56) = r0; \
*(u64*)(r10 - 48) = r0; \
*(u64*)(r10 - 40) = r0; \
*(u64*)(r10 - 32) = r0; \
*(u64*)(r10 - 24) = r0; \
*(u64*)(r10 - 16) = r0; \
*(u64*)(r10 - 8) = r0; \
r2 = 16; \
*(u64*)(r1 - 128) = r2; \
r2 = *(u64*)(r1 - 128); \
if r2 s> 64 goto l0_%=; \
r4 = 0; \
if r4 s>= r2 goto l0_%=; \
r3 = 0; \
call %[bpf_probe_read_kernel]; \
l0_%=: r0 = 0; \
exit; \
" :
: __imm(bpf_probe_read_kernel)
: __clobber_all);
}
SEC("tracepoint")
__description("helper access to variable memory: stack, JMP, bounds + offset")
__failure __msg("invalid indirect access to stack R1 off=-64 size=65")
__naked void memory_stack_jmp_bounds_offset(void)
{
asm volatile (" \
r2 = *(u64*)(r1 + 8); \
r1 = r10; \
r1 += -64; \
*(u64*)(r1 - 128) = r2; \
r2 = *(u64*)(r1 - 128); \
if r2 > 64 goto l0_%=; \
r4 = 0; \
if r4 >= r2 goto l0_%=; \
r2 += 1; \
r3 = 0; \
call %[bpf_probe_read_kernel]; \
l0_%=: r0 = 0; \
exit; \
" :
: __imm(bpf_probe_read_kernel)
: __clobber_all);
}
SEC("tracepoint")
__description("helper access to variable memory: stack, JMP, wrong max")
__failure __msg("invalid indirect access to stack R1 off=-64 size=65")
__naked void memory_stack_jmp_wrong_max(void)
{
asm volatile (" \
r2 = *(u64*)(r1 + 8); \
r1 = r10; \
r1 += -64; \
*(u64*)(r1 - 128) = r2; \
r2 = *(u64*)(r1 - 128); \
if r2 > 65 goto l0_%=; \
r4 = 0; \
if r4 >= r2 goto l0_%=; \
r3 = 0; \
call %[bpf_probe_read_kernel]; \
l0_%=: r0 = 0; \
exit; \
" :
: __imm(bpf_probe_read_kernel)
: __clobber_all);
}
SEC("tracepoint")
__description("helper access to variable memory: stack, JMP, no max check")
__failure
/* because max wasn't checked, signed min is negative */
__msg("R2 min value is negative, either use unsigned or 'var &= const'")
__naked void stack_jmp_no_max_check(void)
{
asm volatile (" \
r2 = *(u64*)(r1 + 8); \
r1 = r10; \
r1 += -64; \
*(u64*)(r1 - 128) = r2; \
r2 = *(u64*)(r1 - 128); \
r4 = 0; \
if r4 >= r2 goto l0_%=; \
r3 = 0; \
call %[bpf_probe_read_kernel]; \
l0_%=: r0 = 0; \
exit; \
" :
: __imm(bpf_probe_read_kernel)
: __clobber_all);
}
SEC("socket")
__description("helper access to variable memory: stack, JMP, no min check")
/* in privileged mode reads from uninitialized stack locations are permitted */
__success __failure_unpriv
__msg_unpriv("invalid indirect read from stack R2 off -64+0 size 64")
__retval(0)
__naked void stack_jmp_no_min_check(void)
{
asm volatile (" \
/* set max stack size */ \
r6 = 0; \
*(u64*)(r10 - 128) = r6; \
/* set r3 to a random value */ \
call %[bpf_get_prandom_u32]; \
r3 = r0; \
/* use JMP to limit r3 range to [0, 64] */ \
if r3 > 64 goto l0_%=; \
r1 = %[map_ringbuf] ll; \
r2 = r10; \
r2 += -64; \
r4 = 0; \
/* Call bpf_ringbuf_output(), it is one of a few helper functions with\
* ARG_CONST_SIZE_OR_ZERO parameter allowed in unpriv mode.\
* For unpriv this should signal an error, because memory at &fp[-64] is\
* not initialized. \
*/ \
call %[bpf_ringbuf_output]; \
l0_%=: r0 = 0; \
exit; \
" :
: __imm(bpf_get_prandom_u32),
__imm(bpf_ringbuf_output),
__imm_addr(map_ringbuf)
: __clobber_all);
}
SEC("tracepoint")
__description("helper access to variable memory: stack, JMP (signed), no min check")
__failure __msg("R2 min value is negative")
__naked void jmp_signed_no_min_check(void)
{
asm volatile (" \
r2 = *(u64*)(r1 + 8); \
r1 = r10; \
r1 += -64; \
*(u64*)(r1 - 128) = r2; \
r2 = *(u64*)(r1 - 128); \
if r2 s> 64 goto l0_%=; \
r3 = 0; \
call %[bpf_probe_read_kernel]; \
r0 = 0; \
l0_%=: exit; \
" :
: __imm(bpf_probe_read_kernel)
: __clobber_all);
}
SEC("tracepoint")
__description("helper access to variable memory: map, JMP, correct bounds")
__success
__naked void memory_map_jmp_correct_bounds(void)
{
asm volatile (" \
r2 = r10; \
r2 += -8; \
r1 = 0; \
*(u64*)(r2 + 0) = r1; \
r1 = %[map_hash_48b] ll; \
call %[bpf_map_lookup_elem]; \
if r0 == 0 goto l0_%=; \
r1 = r0; \
r2 = %[sizeof_test_val]; \
*(u64*)(r10 - 128) = r2; \
r2 = *(u64*)(r10 - 128); \
if r2 s> %[sizeof_test_val] goto l1_%=; \
r4 = 0; \
if r4 s>= r2 goto l1_%=; \
r3 = 0; \
call %[bpf_probe_read_kernel]; \
l1_%=: r0 = 0; \
l0_%=: exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm(bpf_probe_read_kernel),
__imm_addr(map_hash_48b),
__imm_const(sizeof_test_val, sizeof(struct test_val))
: __clobber_all);
}
SEC("tracepoint")
__description("helper access to variable memory: map, JMP, wrong max")
__failure __msg("invalid access to map value, value_size=48 off=0 size=49")
__naked void memory_map_jmp_wrong_max(void)
{
asm volatile (" \
r6 = *(u64*)(r1 + 8); \
r2 = r10; \
r2 += -8; \
r1 = 0; \
*(u64*)(r2 + 0) = r1; \
r1 = %[map_hash_48b] ll; \
call %[bpf_map_lookup_elem]; \
if r0 == 0 goto l0_%=; \
r1 = r0; \
r2 = r6; \
*(u64*)(r10 - 128) = r2; \
r2 = *(u64*)(r10 - 128); \
if r2 s> %[__imm_0] goto l1_%=; \
r4 = 0; \
if r4 s>= r2 goto l1_%=; \
r3 = 0; \
call %[bpf_probe_read_kernel]; \
l1_%=: r0 = 0; \
l0_%=: exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm(bpf_probe_read_kernel),
__imm_addr(map_hash_48b),
__imm_const(__imm_0, sizeof(struct test_val) + 1)
: __clobber_all);
}
SEC("tracepoint")
__description("helper access to variable memory: map adjusted, JMP, correct bounds")
__success
__naked void map_adjusted_jmp_correct_bounds(void)
{
asm volatile (" \
r2 = r10; \
r2 += -8; \
r1 = 0; \
*(u64*)(r2 + 0) = r1; \
r1 = %[map_hash_48b] ll; \
call %[bpf_map_lookup_elem]; \
if r0 == 0 goto l0_%=; \
r1 = r0; \
r1 += 20; \
r2 = %[sizeof_test_val]; \
*(u64*)(r10 - 128) = r2; \
r2 = *(u64*)(r10 - 128); \
if r2 s> %[__imm_0] goto l1_%=; \
r4 = 0; \
if r4 s>= r2 goto l1_%=; \
r3 = 0; \
call %[bpf_probe_read_kernel]; \
l1_%=: r0 = 0; \
l0_%=: exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm(bpf_probe_read_kernel),
__imm_addr(map_hash_48b),
__imm_const(__imm_0, sizeof(struct test_val) - 20),
__imm_const(sizeof_test_val, sizeof(struct test_val))
: __clobber_all);
}
SEC("tracepoint")
__description("helper access to variable memory: map adjusted, JMP, wrong max")
__failure __msg("R1 min value is outside of the allowed memory range")
__naked void map_adjusted_jmp_wrong_max(void)
{
asm volatile (" \
r6 = *(u64*)(r1 + 8); \
r2 = r10; \
r2 += -8; \
r1 = 0; \
*(u64*)(r2 + 0) = r1; \
r1 = %[map_hash_48b] ll; \
call %[bpf_map_lookup_elem]; \
if r0 == 0 goto l0_%=; \
r1 = r0; \
r1 += 20; \
r2 = r6; \
*(u64*)(r10 - 128) = r2; \
r2 = *(u64*)(r10 - 128); \
if r2 s> %[__imm_0] goto l1_%=; \
r4 = 0; \
if r4 s>= r2 goto l1_%=; \
r3 = 0; \
call %[bpf_probe_read_kernel]; \
l1_%=: r0 = 0; \
l0_%=: exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm(bpf_probe_read_kernel),
__imm_addr(map_hash_48b),
__imm_const(__imm_0, sizeof(struct test_val) - 19)
: __clobber_all);
}
SEC("tc")
__description("helper access to variable memory: size = 0 allowed on NULL (ARG_PTR_TO_MEM_OR_NULL)")
__success __retval(0)
__naked void ptr_to_mem_or_null_1(void)
{
asm volatile (" \
r1 = 0; \
r2 = 0; \
r3 = 0; \
r4 = 0; \
r5 = 0; \
call %[bpf_csum_diff]; \
exit; \
" :
: __imm(bpf_csum_diff)
: __clobber_all);
}
SEC("tc")
__description("helper access to variable memory: size > 0 not allowed on NULL (ARG_PTR_TO_MEM_OR_NULL)")
__failure __msg("R1 type=scalar expected=fp")
__naked void ptr_to_mem_or_null_2(void)
{
asm volatile (" \
r2 = *(u32*)(r1 + 0); \
r1 = 0; \
*(u64*)(r10 - 128) = r2; \
r2 = *(u64*)(r10 - 128); \
r2 &= 64; \
r3 = 0; \
r4 = 0; \
r5 = 0; \
call %[bpf_csum_diff]; \
exit; \
" :
: __imm(bpf_csum_diff)
: __clobber_all);
}
SEC("tc")
__description("helper access to variable memory: size = 0 allowed on != NULL stack pointer (ARG_PTR_TO_MEM_OR_NULL)")
__success __retval(0)
__naked void ptr_to_mem_or_null_3(void)
{
asm volatile (" \
r1 = r10; \
r1 += -8; \
r2 = 0; \
*(u64*)(r1 + 0) = r2; \
r2 &= 8; \
r3 = 0; \
r4 = 0; \
r5 = 0; \
call %[bpf_csum_diff]; \
exit; \
" :
: __imm(bpf_csum_diff)
: __clobber_all);
}
SEC("tc")
__description("helper access to variable memory: size = 0 allowed on != NULL map pointer (ARG_PTR_TO_MEM_OR_NULL)")
__success __retval(0)
__naked void ptr_to_mem_or_null_4(void)
{
asm volatile (" \
r1 = 0; \
*(u64*)(r10 - 8) = r1; \
r2 = r10; \
r2 += -8; \
r1 = %[map_hash_8b] ll; \
call %[bpf_map_lookup_elem]; \
if r0 == 0 goto l0_%=; \
r1 = r0; \
r2 = 0; \
r3 = 0; \
r4 = 0; \
r5 = 0; \
call %[bpf_csum_diff]; \
l0_%=: exit; \
" :
: __imm(bpf_csum_diff),
__imm(bpf_map_lookup_elem),
__imm_addr(map_hash_8b)
: __clobber_all);
}
SEC("tc")
__description("helper access to variable memory: size possible = 0 allowed on != NULL stack pointer (ARG_PTR_TO_MEM_OR_NULL)")
__success __retval(0)
__naked void ptr_to_mem_or_null_5(void)
{
asm volatile (" \
r1 = 0; \
*(u64*)(r10 - 8) = r1; \
r2 = r10; \
r2 += -8; \
r1 = %[map_hash_8b] ll; \
call %[bpf_map_lookup_elem]; \
if r0 == 0 goto l0_%=; \
r2 = *(u64*)(r0 + 0); \
if r2 > 8 goto l0_%=; \
r1 = r10; \
r1 += -8; \
*(u64*)(r1 + 0) = r2; \
r3 = 0; \
r4 = 0; \
r5 = 0; \
call %[bpf_csum_diff]; \
l0_%=: exit; \
" :
: __imm(bpf_csum_diff),
__imm(bpf_map_lookup_elem),
__imm_addr(map_hash_8b)
: __clobber_all);
}
SEC("tc")
__description("helper access to variable memory: size possible = 0 allowed on != NULL map pointer (ARG_PTR_TO_MEM_OR_NULL)")
__success __retval(0)
__naked void ptr_to_mem_or_null_6(void)
{
asm volatile (" \
r1 = 0; \
*(u64*)(r10 - 8) = r1; \
r2 = r10; \
r2 += -8; \
r1 = %[map_hash_8b] ll; \
call %[bpf_map_lookup_elem]; \
if r0 == 0 goto l0_%=; \
r1 = r0; \
r2 = *(u64*)(r0 + 0); \
if r2 > 8 goto l0_%=; \
r3 = 0; \
r4 = 0; \
r5 = 0; \
call %[bpf_csum_diff]; \
l0_%=: exit; \
" :
: __imm(bpf_csum_diff),
__imm(bpf_map_lookup_elem),
__imm_addr(map_hash_8b)
: __clobber_all);
}
SEC("tc")
__description("helper access to variable memory: size possible = 0 allowed on != NULL packet pointer (ARG_PTR_TO_MEM_OR_NULL)")
__success __retval(0)
/* csum_diff of 64-byte packet */
__flag(BPF_F_ANY_ALIGNMENT)
__naked void ptr_to_mem_or_null_7(void)
{
asm volatile (" \
r6 = *(u32*)(r1 + %[__sk_buff_data]); \
r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \
r0 = r6; \
r0 += 8; \
if r0 > r3 goto l0_%=; \
r1 = r6; \
r2 = *(u64*)(r6 + 0); \
if r2 > 8 goto l0_%=; \
r3 = 0; \
r4 = 0; \
r5 = 0; \
call %[bpf_csum_diff]; \
l0_%=: exit; \
" :
: __imm(bpf_csum_diff),
__imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
__imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
: __clobber_all);
}
SEC("tracepoint")
__description("helper access to variable memory: size = 0 not allowed on NULL (!ARG_PTR_TO_MEM_OR_NULL)")
__failure __msg("R1 type=scalar expected=fp")
__naked void ptr_to_mem_or_null_8(void)
{
asm volatile (" \
r1 = 0; \
r2 = 0; \
r3 = 0; \
call %[bpf_probe_read_kernel]; \
exit; \
" :
: __imm(bpf_probe_read_kernel)
: __clobber_all);
}
SEC("tracepoint")
__description("helper access to variable memory: size > 0 not allowed on NULL (!ARG_PTR_TO_MEM_OR_NULL)")
__failure __msg("R1 type=scalar expected=fp")
__naked void ptr_to_mem_or_null_9(void)
{
asm volatile (" \
r1 = 0; \
r2 = 1; \
r3 = 0; \
call %[bpf_probe_read_kernel]; \
exit; \
" :
: __imm(bpf_probe_read_kernel)
: __clobber_all);
}
SEC("tracepoint")
__description("helper access to variable memory: size = 0 allowed on != NULL stack pointer (!ARG_PTR_TO_MEM_OR_NULL)")
__success
__naked void ptr_to_mem_or_null_10(void)
{
asm volatile (" \
r1 = r10; \
r1 += -8; \
r2 = 0; \
r3 = 0; \
call %[bpf_probe_read_kernel]; \
exit; \
" :
: __imm(bpf_probe_read_kernel)
: __clobber_all);
}
SEC("tracepoint")
__description("helper access to variable memory: size = 0 allowed on != NULL map pointer (!ARG_PTR_TO_MEM_OR_NULL)")
__success
__naked void ptr_to_mem_or_null_11(void)
{
asm volatile (" \
r1 = 0; \
*(u64*)(r10 - 8) = r1; \
r2 = r10; \
r2 += -8; \
r1 = %[map_hash_8b] ll; \
call %[bpf_map_lookup_elem]; \
if r0 == 0 goto l0_%=; \
r1 = r0; \
r2 = 0; \
r3 = 0; \
call %[bpf_probe_read_kernel]; \
l0_%=: exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm(bpf_probe_read_kernel),
__imm_addr(map_hash_8b)
: __clobber_all);
}
SEC("tracepoint")
__description("helper access to variable memory: size possible = 0 allowed on != NULL stack pointer (!ARG_PTR_TO_MEM_OR_NULL)")
__success
__naked void ptr_to_mem_or_null_12(void)
{
asm volatile (" \
r1 = 0; \
*(u64*)(r10 - 8) = r1; \
r2 = r10; \
r2 += -8; \
r1 = %[map_hash_8b] ll; \
call %[bpf_map_lookup_elem]; \
if r0 == 0 goto l0_%=; \
r2 = *(u64*)(r0 + 0); \
if r2 > 8 goto l0_%=; \
r1 = r10; \
r1 += -8; \
r3 = 0; \
call %[bpf_probe_read_kernel]; \
l0_%=: exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm(bpf_probe_read_kernel),
__imm_addr(map_hash_8b)
: __clobber_all);
}
SEC("tracepoint")
__description("helper access to variable memory: size possible = 0 allowed on != NULL map pointer (!ARG_PTR_TO_MEM_OR_NULL)")
__success
__naked void ptr_to_mem_or_null_13(void)
{
asm volatile (" \
r1 = 0; \
*(u64*)(r10 - 8) = r1; \
r2 = r10; \
r2 += -8; \
r1 = %[map_hash_8b] ll; \
call %[bpf_map_lookup_elem]; \
if r0 == 0 goto l0_%=; \
r1 = r0; \
r2 = *(u64*)(r0 + 0); \
if r2 > 8 goto l0_%=; \
r3 = 0; \
call %[bpf_probe_read_kernel]; \
l0_%=: exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm(bpf_probe_read_kernel),
__imm_addr(map_hash_8b)
: __clobber_all);
}
SEC("socket")
__description("helper access to variable memory: 8 bytes leak")
/* in privileged mode reads from uninitialized stack locations are permitted */
__success __failure_unpriv
__msg_unpriv("invalid indirect read from stack R2 off -64+32 size 64")
__retval(0)
__naked void variable_memory_8_bytes_leak(void)
{
asm volatile (" \
/* set max stack size */ \
r6 = 0; \
*(u64*)(r10 - 128) = r6; \
/* set r3 to a random value */ \
call %[bpf_get_prandom_u32]; \
r3 = r0; \
r1 = %[map_ringbuf] ll; \
r2 = r10; \
r2 += -64; \
r0 = 0; \
*(u64*)(r10 - 64) = r0; \
*(u64*)(r10 - 56) = r0; \
*(u64*)(r10 - 48) = r0; \
*(u64*)(r10 - 40) = r0; \
/* Note: fp[-32] left uninitialized */ \
*(u64*)(r10 - 24) = r0; \
*(u64*)(r10 - 16) = r0; \
*(u64*)(r10 - 8) = r0; \
/* Limit r3 range to [1, 64] */ \
r3 &= 63; \
r3 += 1; \
r4 = 0; \
/* Call bpf_ringbuf_output(), it is one of a few helper functions with\
* ARG_CONST_SIZE_OR_ZERO parameter allowed in unpriv mode.\
* For unpriv this should signal an error, because memory region [1, 64]\
* at &fp[-64] is not fully initialized. \
*/ \
call %[bpf_ringbuf_output]; \
r0 = 0; \
exit; \
" :
: __imm(bpf_get_prandom_u32),
__imm(bpf_ringbuf_output),
__imm_addr(map_ringbuf)
: __clobber_all);
}
SEC("tracepoint")
__description("helper access to variable memory: 8 bytes no leak (init memory)")
__success
__naked void bytes_no_leak_init_memory(void)
{
asm volatile (" \
r1 = r10; \
r0 = 0; \
r0 = 0; \
*(u64*)(r10 - 64) = r0; \
*(u64*)(r10 - 56) = r0; \
*(u64*)(r10 - 48) = r0; \
*(u64*)(r10 - 40) = r0; \
*(u64*)(r10 - 32) = r0; \
*(u64*)(r10 - 24) = r0; \
*(u64*)(r10 - 16) = r0; \
*(u64*)(r10 - 8) = r0; \
r1 += -64; \
r2 = 0; \
r2 &= 32; \
r2 += 32; \
r3 = 0; \
call %[bpf_probe_read_kernel]; \
r1 = *(u64*)(r10 - 16); \
exit; \
" :
: __imm(bpf_probe_read_kernel)
: __clobber_all);
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/verifier_helper_access_var_len.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020 Facebook */
#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_core_read.h>
#include <bpf/bpf_tracing.h>
char _license[] SEC("license") = "GPL";
struct bpf_testmod_test_read_ctx {
/* field order is mixed up */
size_t len;
char *buf;
loff_t off;
} __attribute__((preserve_access_index));
struct {
char in[256];
char out[256];
bool skip;
uint64_t my_pid_tgid;
} data = {};
struct core_reloc_module_output {
long long len;
long long off;
int read_ctx_sz;
bool read_ctx_exists;
bool buf_exists;
bool len_exists;
bool off_exists;
/* we have test_progs[-flavor], so cut flavor part */
char comm[sizeof("test_progs")];
int comm_len;
};
SEC("raw_tp/bpf_testmod_test_read")
int BPF_PROG(test_core_module_probed,
struct task_struct *task,
struct bpf_testmod_test_read_ctx *read_ctx)
{
#if __has_builtin(__builtin_preserve_enum_value)
struct core_reloc_module_output *out = (void *)&data.out;
__u64 pid_tgid = bpf_get_current_pid_tgid();
__u32 real_tgid = (__u32)(pid_tgid >> 32);
__u32 real_pid = (__u32)pid_tgid;
if (data.my_pid_tgid != pid_tgid)
return 0;
if (BPF_CORE_READ(task, pid) != real_pid || BPF_CORE_READ(task, tgid) != real_tgid)
return 0;
out->len = BPF_CORE_READ(read_ctx, len);
out->off = BPF_CORE_READ(read_ctx, off);
out->read_ctx_sz = bpf_core_type_size(struct bpf_testmod_test_read_ctx);
out->read_ctx_exists = bpf_core_type_exists(struct bpf_testmod_test_read_ctx);
out->buf_exists = bpf_core_field_exists(read_ctx->buf);
out->off_exists = bpf_core_field_exists(read_ctx->off);
out->len_exists = bpf_core_field_exists(read_ctx->len);
out->comm_len = BPF_CORE_READ_STR_INTO(&out->comm, task, comm);
#else
data.skip = true;
#endif
return 0;
}
SEC("tp_btf/bpf_testmod_test_read")
int BPF_PROG(test_core_module_direct,
struct task_struct *task,
struct bpf_testmod_test_read_ctx *read_ctx)
{
#if __has_builtin(__builtin_preserve_enum_value)
struct core_reloc_module_output *out = (void *)&data.out;
__u64 pid_tgid = bpf_get_current_pid_tgid();
__u32 real_tgid = (__u32)(pid_tgid >> 32);
__u32 real_pid = (__u32)pid_tgid;
if (data.my_pid_tgid != pid_tgid)
return 0;
if (task->pid != real_pid || task->tgid != real_tgid)
return 0;
out->len = read_ctx->len;
out->off = read_ctx->off;
out->read_ctx_sz = bpf_core_type_size(struct bpf_testmod_test_read_ctx);
out->read_ctx_exists = bpf_core_type_exists(struct bpf_testmod_test_read_ctx);
out->buf_exists = bpf_core_field_exists(read_ctx->buf);
out->off_exists = bpf_core_field_exists(read_ctx->off);
out->len_exists = bpf_core_field_exists(read_ctx->len);
out->comm_len = BPF_CORE_READ_STR_INTO(&out->comm, task, comm);
#else
data.skip = true;
#endif
return 0;
}
| linux-master | tools/testing/selftests/bpf/progs/test_core_reloc_module.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2022 Huawei Technologies Duesseldorf GmbH
*
* Author: Roberto Sassu <[email protected]>
*/
#include "vmlinux.h"
#include <errno.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
#define MAX_DATA_SIZE (1024 * 1024)
#define MAX_SIG_SIZE 1024
extern struct bpf_key *bpf_lookup_user_key(__u32 serial, __u64 flags) __ksym;
extern struct bpf_key *bpf_lookup_system_key(__u64 id) __ksym;
extern void bpf_key_put(struct bpf_key *key) __ksym;
extern int bpf_verify_pkcs7_signature(struct bpf_dynptr *data_ptr,
struct bpf_dynptr *sig_ptr,
struct bpf_key *trusted_keyring) __ksym;
__u32 monitored_pid;
__u32 user_keyring_serial;
__u64 system_keyring_id;
struct data {
__u8 data[MAX_DATA_SIZE];
__u32 data_len;
__u8 sig[MAX_SIG_SIZE];
__u32 sig_len;
};
struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
__uint(max_entries, 1);
__type(key, __u32);
__type(value, struct data);
} data_input SEC(".maps");
char _license[] SEC("license") = "GPL";
SEC("lsm.s/bpf")
int BPF_PROG(bpf, int cmd, union bpf_attr *attr, unsigned int size)
{
struct bpf_dynptr data_ptr, sig_ptr;
struct data *data_val;
struct bpf_key *trusted_keyring;
__u32 pid;
__u64 value;
int ret, zero = 0;
pid = bpf_get_current_pid_tgid() >> 32;
if (pid != monitored_pid)
return 0;
data_val = bpf_map_lookup_elem(&data_input, &zero);
if (!data_val)
return 0;
ret = bpf_probe_read_kernel(&value, sizeof(value), &attr->value);
if (ret)
return ret;
ret = bpf_copy_from_user(data_val, sizeof(struct data),
(void *)(unsigned long)value);
if (ret)
return ret;
if (data_val->data_len > sizeof(data_val->data))
return -EINVAL;
bpf_dynptr_from_mem(data_val->data, data_val->data_len, 0, &data_ptr);
if (data_val->sig_len > sizeof(data_val->sig))
return -EINVAL;
bpf_dynptr_from_mem(data_val->sig, data_val->sig_len, 0, &sig_ptr);
if (user_keyring_serial)
trusted_keyring = bpf_lookup_user_key(user_keyring_serial, 0);
else
trusted_keyring = bpf_lookup_system_key(system_keyring_id);
if (!trusted_keyring)
return -ENOENT;
ret = bpf_verify_pkcs7_signature(&data_ptr, &sig_ptr, trusted_keyring);
bpf_key_put(trusted_keyring);
return ret;
}
| linux-master | tools/testing/selftests/bpf/progs/test_verify_pkcs7_sig.c |
// SPDX-License-Identifier: GPL-2.0
/* Converted from tools/testing/selftests/bpf/verifier/direct_packet_access.c */
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include "bpf_misc.h"
SEC("tc")
__description("pkt_end - pkt_start is allowed")
__success __retval(TEST_DATA_LEN)
__naked void end_pkt_start_is_allowed(void)
{
asm volatile (" \
r0 = *(u32*)(r1 + %[__sk_buff_data_end]); \
r2 = *(u32*)(r1 + %[__sk_buff_data]); \
r0 -= r2; \
exit; \
" :
: __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
__imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
: __clobber_all);
}
SEC("tc")
__description("direct packet access: test1")
__success __retval(0)
__naked void direct_packet_access_test1(void)
{
asm volatile (" \
r2 = *(u32*)(r1 + %[__sk_buff_data]); \
r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \
r0 = r2; \
r0 += 8; \
if r0 > r3 goto l0_%=; \
r0 = *(u8*)(r2 + 0); \
l0_%=: r0 = 0; \
exit; \
" :
: __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
__imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
: __clobber_all);
}
SEC("tc")
__description("direct packet access: test2")
__success __retval(0)
__naked void direct_packet_access_test2(void)
{
asm volatile (" \
r0 = 1; \
r4 = *(u32*)(r1 + %[__sk_buff_data_end]); \
r3 = *(u32*)(r1 + %[__sk_buff_data]); \
r5 = r3; \
r5 += 14; \
if r5 > r4 goto l0_%=; \
r0 = *(u8*)(r3 + 7); \
r4 = *(u8*)(r3 + 12); \
r4 *= 14; \
r3 = *(u32*)(r1 + %[__sk_buff_data]); \
r3 += r4; \
r2 = *(u32*)(r1 + %[__sk_buff_len]); \
r2 <<= 49; \
r2 >>= 49; \
r3 += r2; \
r2 = r3; \
r2 += 8; \
r1 = *(u32*)(r1 + %[__sk_buff_data_end]); \
if r2 > r1 goto l1_%=; \
r1 = *(u8*)(r3 + 4); \
l1_%=: r0 = 0; \
l0_%=: exit; \
" :
: __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
__imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)),
__imm_const(__sk_buff_len, offsetof(struct __sk_buff, len))
: __clobber_all);
}
SEC("socket")
__description("direct packet access: test3")
__failure __msg("invalid bpf_context access off=76")
__failure_unpriv
__naked void direct_packet_access_test3(void)
{
asm volatile (" \
r2 = *(u32*)(r1 + %[__sk_buff_data]); \
r0 = 0; \
exit; \
" :
: __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data))
: __clobber_all);
}
SEC("tc")
__description("direct packet access: test4 (write)")
__success __retval(0)
__naked void direct_packet_access_test4_write(void)
{
asm volatile (" \
r2 = *(u32*)(r1 + %[__sk_buff_data]); \
r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \
r0 = r2; \
r0 += 8; \
if r0 > r3 goto l0_%=; \
*(u8*)(r2 + 0) = r2; \
l0_%=: r0 = 0; \
exit; \
" :
: __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
__imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
: __clobber_all);
}
SEC("tc")
__description("direct packet access: test5 (pkt_end >= reg, good access)")
__success __retval(0)
__naked void pkt_end_reg_good_access(void)
{
asm volatile (" \
r2 = *(u32*)(r1 + %[__sk_buff_data]); \
r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \
r0 = r2; \
r0 += 8; \
if r3 >= r0 goto l0_%=; \
r0 = 1; \
exit; \
l0_%=: r0 = *(u8*)(r2 + 0); \
r0 = 0; \
exit; \
" :
: __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
__imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
: __clobber_all);
}
SEC("tc")
__description("direct packet access: test6 (pkt_end >= reg, bad access)")
__failure __msg("invalid access to packet")
__naked void pkt_end_reg_bad_access(void)
{
asm volatile (" \
r2 = *(u32*)(r1 + %[__sk_buff_data]); \
r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \
r0 = r2; \
r0 += 8; \
if r3 >= r0 goto l0_%=; \
r0 = *(u8*)(r2 + 0); \
r0 = 1; \
exit; \
l0_%=: r0 = 0; \
exit; \
" :
: __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
__imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
: __clobber_all);
}
SEC("tc")
__description("direct packet access: test7 (pkt_end >= reg, both accesses)")
__failure __msg("invalid access to packet")
__naked void pkt_end_reg_both_accesses(void)
{
asm volatile (" \
r2 = *(u32*)(r1 + %[__sk_buff_data]); \
r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \
r0 = r2; \
r0 += 8; \
if r3 >= r0 goto l0_%=; \
r0 = *(u8*)(r2 + 0); \
r0 = 1; \
exit; \
l0_%=: r0 = *(u8*)(r2 + 0); \
r0 = 0; \
exit; \
" :
: __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
__imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
: __clobber_all);
}
SEC("tc")
__description("direct packet access: test8 (double test, variant 1)")
__success __retval(0)
__naked void test8_double_test_variant_1(void)
{
asm volatile (" \
r2 = *(u32*)(r1 + %[__sk_buff_data]); \
r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \
r0 = r2; \
r0 += 8; \
if r3 >= r0 goto l0_%=; \
if r0 > r3 goto l1_%=; \
r0 = *(u8*)(r2 + 0); \
l1_%=: r0 = 1; \
exit; \
l0_%=: r0 = *(u8*)(r2 + 0); \
r0 = 0; \
exit; \
" :
: __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
__imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
: __clobber_all);
}
SEC("tc")
__description("direct packet access: test9 (double test, variant 2)")
__success __retval(0)
__naked void test9_double_test_variant_2(void)
{
asm volatile (" \
r2 = *(u32*)(r1 + %[__sk_buff_data]); \
r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \
r0 = r2; \
r0 += 8; \
if r3 >= r0 goto l0_%=; \
r0 = 1; \
exit; \
l0_%=: if r0 > r3 goto l1_%=; \
r0 = *(u8*)(r2 + 0); \
l1_%=: r0 = *(u8*)(r2 + 0); \
r0 = 0; \
exit; \
" :
: __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
__imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
: __clobber_all);
}
SEC("tc")
__description("direct packet access: test10 (write invalid)")
__failure __msg("invalid access to packet")
__naked void packet_access_test10_write_invalid(void)
{
asm volatile (" \
r2 = *(u32*)(r1 + %[__sk_buff_data]); \
r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \
r0 = r2; \
r0 += 8; \
if r0 > r3 goto l0_%=; \
r0 = 0; \
exit; \
l0_%=: *(u8*)(r2 + 0) = r2; \
r0 = 0; \
exit; \
" :
: __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
__imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
: __clobber_all);
}
SEC("tc")
__description("direct packet access: test11 (shift, good access)")
__success __retval(1)
__naked void access_test11_shift_good_access(void)
{
asm volatile (" \
r2 = *(u32*)(r1 + %[__sk_buff_data]); \
r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \
r0 = r2; \
r0 += 22; \
if r0 > r3 goto l0_%=; \
r3 = 144; \
r5 = r3; \
r5 += 23; \
r5 >>= 3; \
r6 = r2; \
r6 += r5; \
r0 = 1; \
exit; \
l0_%=: r0 = 0; \
exit; \
" :
: __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
__imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
: __clobber_all);
}
SEC("tc")
__description("direct packet access: test12 (and, good access)")
__success __retval(1)
__naked void access_test12_and_good_access(void)
{
asm volatile (" \
r2 = *(u32*)(r1 + %[__sk_buff_data]); \
r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \
r0 = r2; \
r0 += 22; \
if r0 > r3 goto l0_%=; \
r3 = 144; \
r5 = r3; \
r5 += 23; \
r5 &= 15; \
r6 = r2; \
r6 += r5; \
r0 = 1; \
exit; \
l0_%=: r0 = 0; \
exit; \
" :
: __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
__imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
: __clobber_all);
}
SEC("tc")
__description("direct packet access: test13 (branches, good access)")
__success __retval(1)
__naked void access_test13_branches_good_access(void)
{
asm volatile (" \
r2 = *(u32*)(r1 + %[__sk_buff_data]); \
r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \
r0 = r2; \
r0 += 22; \
if r0 > r3 goto l0_%=; \
r3 = *(u32*)(r1 + %[__sk_buff_mark]); \
r4 = 1; \
if r3 > r4 goto l1_%=; \
r3 = 14; \
goto l2_%=; \
l1_%=: r3 = 24; \
l2_%=: r5 = r3; \
r5 += 23; \
r5 &= 15; \
r6 = r2; \
r6 += r5; \
r0 = 1; \
exit; \
l0_%=: r0 = 0; \
exit; \
" :
: __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
__imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)),
__imm_const(__sk_buff_mark, offsetof(struct __sk_buff, mark))
: __clobber_all);
}
SEC("tc")
__description("direct packet access: test14 (pkt_ptr += 0, CONST_IMM, good access)")
__success __retval(1)
__naked void _0_const_imm_good_access(void)
{
asm volatile (" \
r2 = *(u32*)(r1 + %[__sk_buff_data]); \
r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \
r0 = r2; \
r0 += 22; \
if r0 > r3 goto l0_%=; \
r5 = 12; \
r5 >>= 4; \
r6 = r2; \
r6 += r5; \
r0 = *(u8*)(r6 + 0); \
r0 = 1; \
exit; \
l0_%=: r0 = 0; \
exit; \
" :
: __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
__imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
: __clobber_all);
}
SEC("tc")
__description("direct packet access: test15 (spill with xadd)")
__failure __msg("R2 invalid mem access 'scalar'")
__flag(BPF_F_ANY_ALIGNMENT)
__naked void access_test15_spill_with_xadd(void)
{
asm volatile (" \
r2 = *(u32*)(r1 + %[__sk_buff_data]); \
r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \
r0 = r2; \
r0 += 8; \
if r0 > r3 goto l0_%=; \
r5 = 4096; \
r4 = r10; \
r4 += -8; \
*(u64*)(r4 + 0) = r2; \
lock *(u64 *)(r4 + 0) += r5; \
r2 = *(u64*)(r4 + 0); \
*(u32*)(r2 + 0) = r5; \
r0 = 0; \
l0_%=: exit; \
" :
: __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
__imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
: __clobber_all);
}
SEC("tc")
__description("direct packet access: test16 (arith on data_end)")
__failure __msg("R3 pointer arithmetic on pkt_end")
__naked void test16_arith_on_data_end(void)
{
asm volatile (" \
r2 = *(u32*)(r1 + %[__sk_buff_data]); \
r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \
r0 = r2; \
r0 += 8; \
r3 += 16; \
if r0 > r3 goto l0_%=; \
*(u8*)(r2 + 0) = r2; \
l0_%=: r0 = 0; \
exit; \
" :
: __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
__imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
: __clobber_all);
}
SEC("tc")
__description("direct packet access: test17 (pruning, alignment)")
__failure __msg("misaligned packet access off 2+(0x0; 0x0)+15+-4 size 4")
__flag(BPF_F_STRICT_ALIGNMENT)
__naked void packet_access_test17_pruning_alignment(void)
{
asm volatile (" \
r2 = *(u32*)(r1 + %[__sk_buff_data]); \
r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \
r7 = *(u32*)(r1 + %[__sk_buff_mark]); \
r0 = r2; \
r0 += 14; \
if r7 > 1 goto l0_%=; \
l2_%=: if r0 > r3 goto l1_%=; \
*(u32*)(r0 - 4) = r0; \
l1_%=: r0 = 0; \
exit; \
l0_%=: r0 += 1; \
goto l2_%=; \
" :
: __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
__imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)),
__imm_const(__sk_buff_mark, offsetof(struct __sk_buff, mark))
: __clobber_all);
}
SEC("tc")
__description("direct packet access: test18 (imm += pkt_ptr, 1)")
__success __retval(0)
__naked void test18_imm_pkt_ptr_1(void)
{
asm volatile (" \
r2 = *(u32*)(r1 + %[__sk_buff_data]); \
r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \
r0 = 8; \
r0 += r2; \
if r0 > r3 goto l0_%=; \
*(u8*)(r2 + 0) = r2; \
l0_%=: r0 = 0; \
exit; \
" :
: __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
__imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
: __clobber_all);
}
SEC("tc")
__description("direct packet access: test19 (imm += pkt_ptr, 2)")
__success __retval(0)
__naked void test19_imm_pkt_ptr_2(void)
{
asm volatile (" \
r2 = *(u32*)(r1 + %[__sk_buff_data]); \
r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \
r0 = r2; \
r0 += 8; \
if r0 > r3 goto l0_%=; \
r4 = 4; \
r4 += r2; \
*(u8*)(r4 + 0) = r4; \
l0_%=: r0 = 0; \
exit; \
" :
: __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
__imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
: __clobber_all);
}
SEC("tc")
__description("direct packet access: test20 (x += pkt_ptr, 1)")
__success __retval(0) __flag(BPF_F_ANY_ALIGNMENT)
__naked void test20_x_pkt_ptr_1(void)
{
asm volatile (" \
r2 = *(u32*)(r1 + %[__sk_buff_data]); \
r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \
r0 = 0xffffffff; \
*(u64*)(r10 - 8) = r0; \
r0 = *(u64*)(r10 - 8); \
r0 &= 0x7fff; \
r4 = r0; \
r4 += r2; \
r5 = r4; \
r4 += %[__imm_0]; \
if r4 > r3 goto l0_%=; \
*(u64*)(r5 + 0) = r4; \
l0_%=: r0 = 0; \
exit; \
" :
: __imm_const(__imm_0, 0x7fff - 1),
__imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
__imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
: __clobber_all);
}
SEC("tc")
__description("direct packet access: test21 (x += pkt_ptr, 2)")
__success __retval(0) __flag(BPF_F_ANY_ALIGNMENT)
__naked void test21_x_pkt_ptr_2(void)
{
asm volatile (" \
r2 = *(u32*)(r1 + %[__sk_buff_data]); \
r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \
r0 = r2; \
r0 += 8; \
if r0 > r3 goto l0_%=; \
r4 = 0xffffffff; \
*(u64*)(r10 - 8) = r4; \
r4 = *(u64*)(r10 - 8); \
r4 &= 0x7fff; \
r4 += r2; \
r5 = r4; \
r4 += %[__imm_0]; \
if r4 > r3 goto l0_%=; \
*(u64*)(r5 + 0) = r4; \
l0_%=: r0 = 0; \
exit; \
" :
: __imm_const(__imm_0, 0x7fff - 1),
__imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
__imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
: __clobber_all);
}
SEC("tc")
__description("direct packet access: test22 (x += pkt_ptr, 3)")
__success __retval(0) __flag(BPF_F_ANY_ALIGNMENT)
__naked void test22_x_pkt_ptr_3(void)
{
asm volatile (" \
r2 = *(u32*)(r1 + %[__sk_buff_data]); \
r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \
r0 = r2; \
r0 += 8; \
*(u64*)(r10 - 8) = r2; \
*(u64*)(r10 - 16) = r3; \
r3 = *(u64*)(r10 - 16); \
if r0 > r3 goto l0_%=; \
r2 = *(u64*)(r10 - 8); \
r4 = 0xffffffff; \
lock *(u64 *)(r10 - 8) += r4; \
r4 = *(u64*)(r10 - 8); \
r4 >>= 49; \
r4 += r2; \
r0 = r4; \
r0 += 2; \
if r0 > r3 goto l0_%=; \
r2 = 1; \
*(u16*)(r4 + 0) = r2; \
l0_%=: r0 = 0; \
exit; \
" :
: __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
__imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
: __clobber_all);
}
SEC("tc")
__description("direct packet access: test23 (x += pkt_ptr, 4)")
__failure __msg("invalid access to packet, off=0 size=8, R5(id=2,off=0,r=0)")
__flag(BPF_F_ANY_ALIGNMENT)
__naked void test23_x_pkt_ptr_4(void)
{
asm volatile (" \
r2 = *(u32*)(r1 + %[__sk_buff_data]); \
r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \
r0 = *(u32*)(r1 + %[__sk_buff_mark]); \
*(u64*)(r10 - 8) = r0; \
r0 = *(u64*)(r10 - 8); \
r0 &= 0xffff; \
r4 = r0; \
r0 = 31; \
r0 += r4; \
r0 += r2; \
r5 = r0; \
r0 += %[__imm_0]; \
if r0 > r3 goto l0_%=; \
*(u64*)(r5 + 0) = r0; \
l0_%=: r0 = 0; \
exit; \
" :
: __imm_const(__imm_0, 0xffff - 1),
__imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
__imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)),
__imm_const(__sk_buff_mark, offsetof(struct __sk_buff, mark))
: __clobber_all);
}
SEC("tc")
__description("direct packet access: test24 (x += pkt_ptr, 5)")
__success __retval(0) __flag(BPF_F_ANY_ALIGNMENT)
__naked void test24_x_pkt_ptr_5(void)
{
asm volatile (" \
r2 = *(u32*)(r1 + %[__sk_buff_data]); \
r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \
r0 = 0xffffffff; \
*(u64*)(r10 - 8) = r0; \
r0 = *(u64*)(r10 - 8); \
r0 &= 0xff; \
r4 = r0; \
r0 = 64; \
r0 += r4; \
r0 += r2; \
r5 = r0; \
r0 += %[__imm_0]; \
if r0 > r3 goto l0_%=; \
*(u64*)(r5 + 0) = r0; \
l0_%=: r0 = 0; \
exit; \
" :
: __imm_const(__imm_0, 0x7fff - 1),
__imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
__imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
: __clobber_all);
}
SEC("tc")
__description("direct packet access: test25 (marking on <, good access)")
__success __retval(0)
__naked void test25_marking_on_good_access(void)
{
asm volatile (" \
r2 = *(u32*)(r1 + %[__sk_buff_data]); \
r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \
r0 = r2; \
r0 += 8; \
if r0 < r3 goto l0_%=; \
l1_%=: r0 = 0; \
exit; \
l0_%=: r0 = *(u8*)(r2 + 0); \
goto l1_%=; \
" :
: __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
__imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
: __clobber_all);
}
SEC("tc")
__description("direct packet access: test26 (marking on <, bad access)")
__failure __msg("invalid access to packet")
__naked void test26_marking_on_bad_access(void)
{
asm volatile (" \
r2 = *(u32*)(r1 + %[__sk_buff_data]); \
r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \
r0 = r2; \
r0 += 8; \
if r0 < r3 goto l0_%=; \
r0 = *(u8*)(r2 + 0); \
l1_%=: r0 = 0; \
exit; \
l0_%=: goto l1_%=; \
" :
: __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
__imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
: __clobber_all);
}
SEC("tc")
__description("direct packet access: test27 (marking on <=, good access)")
__success __retval(1)
__naked void test27_marking_on_good_access(void)
{
asm volatile (" \
r2 = *(u32*)(r1 + %[__sk_buff_data]); \
r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \
r0 = r2; \
r0 += 8; \
if r3 <= r0 goto l0_%=; \
r0 = *(u8*)(r2 + 0); \
l0_%=: r0 = 1; \
exit; \
" :
: __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
__imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
: __clobber_all);
}
SEC("tc")
__description("direct packet access: test28 (marking on <=, bad access)")
__failure __msg("invalid access to packet")
__naked void test28_marking_on_bad_access(void)
{
asm volatile (" \
r2 = *(u32*)(r1 + %[__sk_buff_data]); \
r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \
r0 = r2; \
r0 += 8; \
if r3 <= r0 goto l0_%=; \
l1_%=: r0 = 1; \
exit; \
l0_%=: r0 = *(u8*)(r2 + 0); \
goto l1_%=; \
" :
: __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
__imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
: __clobber_all);
}
SEC("tc")
__description("direct packet access: test29 (reg > pkt_end in subprog)")
__success __retval(0)
__naked void reg_pkt_end_in_subprog(void)
{
asm volatile (" \
r6 = *(u32*)(r1 + %[__sk_buff_data]); \
r2 = *(u32*)(r1 + %[__sk_buff_data_end]); \
r3 = r6; \
r3 += 8; \
call reg_pkt_end_in_subprog__1; \
if r0 == 0 goto l0_%=; \
r0 = *(u8*)(r6 + 0); \
l0_%=: r0 = 0; \
exit; \
" :
: __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
__imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
: __clobber_all);
}
static __naked __noinline __attribute__((used))
void reg_pkt_end_in_subprog__1(void)
{
asm volatile (" \
r0 = 0; \
if r3 > r2 goto l0_%=; \
r0 = 1; \
l0_%=: exit; \
" ::: __clobber_all);
}
SEC("tc")
__description("direct packet access: test30 (check_id() in regsafe(), bad access)")
__failure __msg("invalid access to packet, off=0 size=1, R2")
__flag(BPF_F_TEST_STATE_FREQ)
__naked void id_in_regsafe_bad_access(void)
{
asm volatile (" \
/* r9 = ctx */ \
r9 = r1; \
/* r7 = ktime_get_ns() */ \
call %[bpf_ktime_get_ns]; \
r7 = r0; \
/* r6 = ktime_get_ns() */ \
call %[bpf_ktime_get_ns]; \
r6 = r0; \
/* r2 = ctx->data \
* r3 = ctx->data \
* r4 = ctx->data_end \
*/ \
r2 = *(u32*)(r9 + %[__sk_buff_data]); \
r3 = *(u32*)(r9 + %[__sk_buff_data]); \
r4 = *(u32*)(r9 + %[__sk_buff_data_end]); \
/* if r6 > 100 goto exit \
* if r7 > 100 goto exit \
*/ \
if r6 > 100 goto l0_%=; \
if r7 > 100 goto l0_%=; \
/* r2 += r6 ; this forces assignment of ID to r2\
* r2 += 1 ; get some fixed off for r2\
* r3 += r7 ; this forces assignment of ID to r3\
* r3 += 1 ; get some fixed off for r3\
*/ \
r2 += r6; \
r2 += 1; \
r3 += r7; \
r3 += 1; \
/* if r6 > r7 goto +1 ; no new information about the state is derived from\
* ; this check, thus produced verifier states differ\
* ; only in 'insn_idx' \
* r2 = r3 ; optionally share ID between r2 and r3\
*/ \
if r6 != r7 goto l1_%=; \
r2 = r3; \
l1_%=: /* if r3 > ctx->data_end goto exit */ \
if r3 > r4 goto l0_%=; \
/* r5 = *(u8 *) (r2 - 1) ; access packet memory using r2,\
* ; this is not always safe\
*/ \
r5 = *(u8*)(r2 - 1); \
l0_%=: /* exit(0) */ \
r0 = 0; \
exit; \
" :
: __imm(bpf_ktime_get_ns),
__imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
__imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
: __clobber_all);
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/verifier_direct_packet_access.c |
// SPDX-License-Identifier: GPL-2.0
#include <vmlinux.h>
#include <bpf/bpf_helpers.h>
struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
__uint(max_entries, 1);
__type(key, __u32);
__type(value, __u64);
} array_map SEC(".maps");
static __u64
check_array_elem(struct bpf_map *map, __u32 *key, __u64 *val,
void *data)
{
bpf_get_current_comm(key, sizeof(*key));
return 0;
}
SEC("raw_tp/sys_enter")
int test_map_key_write(const void *ctx)
{
bpf_for_each_map_elem(&array_map, check_array_elem, NULL, 0);
return 0;
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/for_each_map_elem_write_key.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
#include <bpf/usdt.bpf.h>
/* this file is linked together with test_usdt.c to validate that usdt.bpf.h
* can be included in multiple .bpf.c files forming single final BPF object
* file
*/
extern int my_pid;
int usdt_100_called;
int usdt_100_sum;
SEC("usdt//proc/self/exe:test:usdt_100")
int BPF_USDT(usdt_100, int x)
{
if (my_pid != (bpf_get_current_pid_tgid() >> 32))
return 0;
__sync_fetch_and_add(&usdt_100_called, 1);
__sync_fetch_and_add(&usdt_100_sum, x);
return 0;
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/test_usdt_multispec.c |
// SPDX-License-Identifier: GPL-2.0
#include <string.h>
#include <linux/tcp.h>
#include <linux/bpf.h>
#include <netinet/in.h>
#include <bpf/bpf_helpers.h>
char _license[] SEC("license") = "GPL";
int page_size = 0; /* userspace should set it */
#ifndef SOL_TCP
#define SOL_TCP IPPROTO_TCP
#endif
#define SOL_CUSTOM 0xdeadbeef
struct sockopt_sk {
__u8 val;
};
struct {
__uint(type, BPF_MAP_TYPE_SK_STORAGE);
__uint(map_flags, BPF_F_NO_PREALLOC);
__type(key, int);
__type(value, struct sockopt_sk);
} socket_storage_map SEC(".maps");
SEC("cgroup/getsockopt")
int _getsockopt(struct bpf_sockopt *ctx)
{
__u8 *optval_end = ctx->optval_end;
__u8 *optval = ctx->optval;
struct sockopt_sk *storage;
struct bpf_sock *sk;
/* Bypass AF_NETLINK. */
sk = ctx->sk;
if (sk && sk->family == AF_NETLINK)
goto out;
/* Make sure bpf_get_netns_cookie is callable.
*/
if (bpf_get_netns_cookie(NULL) == 0)
return 0;
if (bpf_get_netns_cookie(ctx) == 0)
return 0;
if (ctx->level == SOL_IP && ctx->optname == IP_TOS) {
/* Not interested in SOL_IP:IP_TOS;
* let next BPF program in the cgroup chain or kernel
* handle it.
*/
goto out;
}
if (ctx->level == SOL_SOCKET && ctx->optname == SO_SNDBUF) {
/* Not interested in SOL_SOCKET:SO_SNDBUF;
* let next BPF program in the cgroup chain or kernel
* handle it.
*/
goto out;
}
if (ctx->level == SOL_TCP && ctx->optname == TCP_CONGESTION) {
/* Not interested in SOL_TCP:TCP_CONGESTION;
* let next BPF program in the cgroup chain or kernel
* handle it.
*/
goto out;
}
if (ctx->level == SOL_TCP && ctx->optname == TCP_ZEROCOPY_RECEIVE) {
/* Verify that TCP_ZEROCOPY_RECEIVE triggers.
* It has a custom implementation for performance
* reasons.
*/
/* Check that optval contains address (__u64) */
if (optval + sizeof(__u64) > optval_end)
return 0; /* bounds check */
if (((struct tcp_zerocopy_receive *)optval)->address != 0)
return 0; /* unexpected data */
goto out;
}
if (ctx->level == SOL_IP && ctx->optname == IP_FREEBIND) {
if (optval + 1 > optval_end)
return 0; /* bounds check */
ctx->retval = 0; /* Reset system call return value to zero */
/* Always export 0x55 */
optval[0] = 0x55;
ctx->optlen = 1;
/* Userspace buffer is PAGE_SIZE * 2, but BPF
* program can only see the first PAGE_SIZE
* bytes of data.
*/
if (optval_end - optval != page_size)
return 0; /* unexpected data size */
return 1;
}
if (ctx->level != SOL_CUSTOM)
return 0; /* deny everything except custom level */
if (optval + 1 > optval_end)
return 0; /* bounds check */
storage = bpf_sk_storage_get(&socket_storage_map, ctx->sk, 0,
BPF_SK_STORAGE_GET_F_CREATE);
if (!storage)
return 0; /* couldn't get sk storage */
if (!ctx->retval)
return 0; /* kernel should not have handled
* SOL_CUSTOM, something is wrong!
*/
ctx->retval = 0; /* Reset system call return value to zero */
optval[0] = storage->val;
ctx->optlen = 1;
return 1;
out:
/* optval larger than PAGE_SIZE use kernel's buffer. */
if (ctx->optlen > page_size)
ctx->optlen = 0;
return 1;
}
SEC("cgroup/setsockopt")
int _setsockopt(struct bpf_sockopt *ctx)
{
__u8 *optval_end = ctx->optval_end;
__u8 *optval = ctx->optval;
struct sockopt_sk *storage;
struct bpf_sock *sk;
/* Bypass AF_NETLINK. */
sk = ctx->sk;
if (sk && sk->family == AF_NETLINK)
goto out;
/* Make sure bpf_get_netns_cookie is callable.
*/
if (bpf_get_netns_cookie(NULL) == 0)
return 0;
if (bpf_get_netns_cookie(ctx) == 0)
return 0;
if (ctx->level == SOL_IP && ctx->optname == IP_TOS) {
/* Not interested in SOL_IP:IP_TOS;
* let next BPF program in the cgroup chain or kernel
* handle it.
*/
ctx->optlen = 0; /* bypass optval>PAGE_SIZE */
return 1;
}
if (ctx->level == SOL_SOCKET && ctx->optname == SO_SNDBUF) {
/* Overwrite SO_SNDBUF value */
if (optval + sizeof(__u32) > optval_end)
return 0; /* bounds check */
*(__u32 *)optval = 0x55AA;
ctx->optlen = 4;
return 1;
}
if (ctx->level == SOL_TCP && ctx->optname == TCP_CONGESTION) {
/* Always use cubic */
if (optval + 5 > optval_end)
return 0; /* bounds check */
memcpy(optval, "cubic", 5);
ctx->optlen = 5;
return 1;
}
if (ctx->level == SOL_IP && ctx->optname == IP_FREEBIND) {
/* Original optlen is larger than PAGE_SIZE. */
if (ctx->optlen != page_size * 2)
return 0; /* unexpected data size */
if (optval + 1 > optval_end)
return 0; /* bounds check */
/* Make sure we can trim the buffer. */
optval[0] = 0;
ctx->optlen = 1;
/* Usepace buffer is PAGE_SIZE * 2, but BPF
* program can only see the first PAGE_SIZE
* bytes of data.
*/
if (optval_end - optval != page_size)
return 0; /* unexpected data size */
return 1;
}
if (ctx->level != SOL_CUSTOM)
return 0; /* deny everything except custom level */
if (optval + 1 > optval_end)
return 0; /* bounds check */
storage = bpf_sk_storage_get(&socket_storage_map, ctx->sk, 0,
BPF_SK_STORAGE_GET_F_CREATE);
if (!storage)
return 0; /* couldn't get sk storage */
storage->val = optval[0];
ctx->optlen = -1; /* BPF has consumed this option, don't call kernel
* setsockopt handler.
*/
return 1;
out:
/* optval larger than PAGE_SIZE use kernel's buffer. */
if (ctx->optlen > page_size)
ctx->optlen = 0;
return 1;
}
| linux-master | tools/testing/selftests/bpf/progs/sockopt_sk.c |
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2018 Facebook
#include <vmlinux.h>
#include <bpf/bpf_helpers.h>
#ifndef PERF_MAX_STACK_DEPTH
#define PERF_MAX_STACK_DEPTH 127
#endif
struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
__uint(max_entries, 1);
__type(key, __u32);
__type(value, __u32);
} control_map SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_HASH);
__uint(max_entries, 16384);
__type(key, __u32);
__type(value, __u32);
} stackid_hmap SEC(".maps");
typedef __u64 stack_trace_t[PERF_MAX_STACK_DEPTH];
struct {
__uint(type, BPF_MAP_TYPE_STACK_TRACE);
__uint(max_entries, 16384);
__type(key, __u32);
__type(value, stack_trace_t);
} stackmap SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
__uint(max_entries, 16384);
__type(key, __u32);
__type(value, stack_trace_t);
} stack_amap SEC(".maps");
/* taken from /sys/kernel/tracing/events/sched/sched_switch/format */
struct sched_switch_args {
unsigned long long pad;
char prev_comm[TASK_COMM_LEN];
int prev_pid;
int prev_prio;
long long prev_state;
char next_comm[TASK_COMM_LEN];
int next_pid;
int next_prio;
};
SEC("tracepoint/sched/sched_switch")
int oncpu(struct sched_switch_args *ctx)
{
__u32 max_len = PERF_MAX_STACK_DEPTH * sizeof(__u64);
__u32 key = 0, val = 0, *value_p;
void *stack_p;
value_p = bpf_map_lookup_elem(&control_map, &key);
if (value_p && *value_p)
return 0; /* skip if non-zero *value_p */
/* The size of stackmap and stackid_hmap should be the same */
key = bpf_get_stackid(ctx, &stackmap, 0);
if ((int)key >= 0) {
bpf_map_update_elem(&stackid_hmap, &key, &val, 0);
stack_p = bpf_map_lookup_elem(&stack_amap, &key);
if (stack_p)
bpf_get_stack(ctx, stack_p, max_len, 0);
}
return 0;
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/test_stacktrace_map.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020 Facebook */
#include <vmlinux.h>
#include <bpf/bpf_tracing.h>
#include <bpf/bpf_core_read.h>
#include <bpf/bpf_helpers.h>
struct sk_stg {
__u32 pid;
__u32 last_notclose_state;
char comm[16];
};
struct {
__uint(type, BPF_MAP_TYPE_SK_STORAGE);
__uint(map_flags, BPF_F_NO_PREALLOC);
__type(key, int);
__type(value, struct sk_stg);
} sk_stg_map SEC(".maps");
/* Testing delete */
struct {
__uint(type, BPF_MAP_TYPE_SK_STORAGE);
__uint(map_flags, BPF_F_NO_PREALLOC);
__type(key, int);
__type(value, int);
} del_sk_stg_map SEC(".maps");
char task_comm[16] = "";
SEC("tp_btf/inet_sock_set_state")
int BPF_PROG(trace_inet_sock_set_state, struct sock *sk, int oldstate,
int newstate)
{
struct sk_stg *stg;
if (newstate == BPF_TCP_CLOSE)
return 0;
stg = bpf_sk_storage_get(&sk_stg_map, sk, 0,
BPF_SK_STORAGE_GET_F_CREATE);
if (!stg)
return 0;
stg->last_notclose_state = newstate;
bpf_sk_storage_delete(&del_sk_stg_map, sk);
return 0;
}
static void set_task_info(struct sock *sk)
{
struct task_struct *task;
struct sk_stg *stg;
stg = bpf_sk_storage_get(&sk_stg_map, sk, 0,
BPF_SK_STORAGE_GET_F_CREATE);
if (!stg)
return;
stg->pid = bpf_get_current_pid_tgid();
task = (struct task_struct *)bpf_get_current_task();
bpf_core_read_str(&stg->comm, sizeof(stg->comm), &task->comm);
bpf_core_read_str(&task_comm, sizeof(task_comm), &task->comm);
}
SEC("fentry/inet_csk_listen_start")
int BPF_PROG(trace_inet_csk_listen_start, struct sock *sk)
{
set_task_info(sk);
return 0;
}
SEC("fentry/tcp_connect")
int BPF_PROG(trace_tcp_connect, struct sock *sk)
{
set_task_info(sk);
return 0;
}
SEC("fexit/inet_csk_accept")
int BPF_PROG(inet_csk_accept, struct sock *sk, int flags, int *err, bool kern,
struct sock *accepted_sk)
{
set_task_info(accepted_sk);
return 0;
}
SEC("tp_btf/tcp_retransmit_synack")
int BPF_PROG(tcp_retransmit_synack, struct sock* sk, struct request_sock* req)
{
/* load only test */
bpf_sk_storage_get(&sk_stg_map, sk, 0, 0);
bpf_sk_storage_get(&sk_stg_map, req->sk, 0, 0);
return 0;
}
SEC("tp_btf/tcp_bad_csum")
int BPF_PROG(tcp_bad_csum, struct sk_buff* skb)
{
bpf_sk_storage_get(&sk_stg_map, skb->sk, 0, 0);
return 0;
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/test_sk_storage_tracing.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2021 Facebook */
#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
#if __has_attribute(btf_type_tag)
#define __tag1 __attribute__((btf_type_tag("tag1")))
#define __tag2 __attribute__((btf_type_tag("tag2")))
volatile const bool skip_tests = false;
#else
#define __tag1
#define __tag2
volatile const bool skip_tests = true;
#endif
struct btf_type_tag_test {
int __tag1 * __tag1 __tag2 *p;
} g;
SEC("fentry/bpf_fentry_test1")
int BPF_PROG(sub, int x)
{
return 0;
}
| linux-master | tools/testing/selftests/bpf/progs/btf_type_tag.c |
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2017 Facebook
*/
#include <stddef.h>
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include "bpf_misc.h"
struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
__uint(max_entries, 1);
__type(key, __u32);
__type(value, __u64);
} test_map_id SEC(".maps");
SEC("raw_tp/sys_enter")
int test_obj_id(void *ctx)
{
__u32 key = 0;
__u64 *value;
value = bpf_map_lookup_elem(&test_map_id, &key);
__sink(value);
return 0;
}
| linux-master | tools/testing/selftests/bpf/progs/test_obj_id.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2021 Google */
#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
extern const int bpf_prog_active __ksym; /* int type global var. */
SEC("raw_tp/sys_enter")
int handler1(const void *ctx)
{
int *active;
__u32 cpu;
cpu = bpf_get_smp_processor_id();
active = (int *)bpf_per_cpu_ptr(&bpf_prog_active, cpu);
if (active) {
/* Kernel memory obtained from bpf_{per,this}_cpu_ptr
* is read-only, should _not_ pass verification.
*/
/* WRITE_ONCE */
*(volatile int *)active = -1;
}
return 0;
}
__noinline int write_active(int *p)
{
return p ? (*p = 42) : 0;
}
SEC("raw_tp/sys_enter")
int handler2(const void *ctx)
{
int *active;
active = bpf_this_cpu_ptr(&bpf_prog_active);
write_active(active);
return 0;
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/test_ksyms_btf_write_check.c |
// SPDX-License-Identifier: GPL-2.0
#include <vmlinux.h>
#include <bpf/bpf_tracing.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_core_read.h>
#include "bpf_experimental.h"
#include "bpf_misc.h"
struct node_data {
long key;
long data;
struct bpf_rb_node node;
};
#define private(name) SEC(".data." #name) __hidden __attribute__((aligned(8)))
private(A) struct bpf_spin_lock glock;
private(A) struct bpf_rb_root groot __contains(node_data, node);
private(A) struct bpf_rb_root groot2 __contains(node_data, node);
static bool less(struct bpf_rb_node *a, const struct bpf_rb_node *b)
{
struct node_data *node_a;
struct node_data *node_b;
node_a = container_of(a, struct node_data, node);
node_b = container_of(b, struct node_data, node);
return node_a->key < node_b->key;
}
SEC("?tc")
__failure __msg("bpf_spin_lock at off=16 must be held for bpf_rb_root")
long rbtree_api_nolock_add(void *ctx)
{
struct node_data *n;
n = bpf_obj_new(typeof(*n));
if (!n)
return 1;
bpf_rbtree_add(&groot, &n->node, less);
return 0;
}
SEC("?tc")
__failure __msg("bpf_spin_lock at off=16 must be held for bpf_rb_root")
long rbtree_api_nolock_remove(void *ctx)
{
struct node_data *n;
n = bpf_obj_new(typeof(*n));
if (!n)
return 1;
bpf_spin_lock(&glock);
bpf_rbtree_add(&groot, &n->node, less);
bpf_spin_unlock(&glock);
bpf_rbtree_remove(&groot, &n->node);
return 0;
}
SEC("?tc")
__failure __msg("bpf_spin_lock at off=16 must be held for bpf_rb_root")
long rbtree_api_nolock_first(void *ctx)
{
bpf_rbtree_first(&groot);
return 0;
}
SEC("?tc")
__failure __msg("rbtree_remove node input must be non-owning ref")
long rbtree_api_remove_unadded_node(void *ctx)
{
struct node_data *n, *m;
struct bpf_rb_node *res;
n = bpf_obj_new(typeof(*n));
if (!n)
return 1;
m = bpf_obj_new(typeof(*m));
if (!m) {
bpf_obj_drop(n);
return 1;
}
bpf_spin_lock(&glock);
bpf_rbtree_add(&groot, &n->node, less);
/* This remove should pass verifier */
res = bpf_rbtree_remove(&groot, &n->node);
n = container_of(res, struct node_data, node);
/* This remove shouldn't, m isn't in an rbtree */
res = bpf_rbtree_remove(&groot, &m->node);
m = container_of(res, struct node_data, node);
bpf_spin_unlock(&glock);
if (n)
bpf_obj_drop(n);
if (m)
bpf_obj_drop(m);
return 0;
}
SEC("?tc")
__failure __msg("Unreleased reference id=3 alloc_insn=10")
long rbtree_api_remove_no_drop(void *ctx)
{
struct bpf_rb_node *res;
struct node_data *n;
bpf_spin_lock(&glock);
res = bpf_rbtree_first(&groot);
if (!res)
goto unlock_err;
res = bpf_rbtree_remove(&groot, res);
if (res) {
n = container_of(res, struct node_data, node);
__sink(n);
}
bpf_spin_unlock(&glock);
/* if (res) { bpf_obj_drop(n); } is missing here */
return 0;
unlock_err:
bpf_spin_unlock(&glock);
return 1;
}
SEC("?tc")
__failure __msg("arg#1 expected pointer to allocated object")
long rbtree_api_add_to_multiple_trees(void *ctx)
{
struct node_data *n;
n = bpf_obj_new(typeof(*n));
if (!n)
return 1;
bpf_spin_lock(&glock);
bpf_rbtree_add(&groot, &n->node, less);
/* This add should fail since n already in groot's tree */
bpf_rbtree_add(&groot2, &n->node, less);
bpf_spin_unlock(&glock);
return 0;
}
SEC("?tc")
__failure __msg("dereference of modified ptr_or_null_ ptr R2 off=16 disallowed")
long rbtree_api_use_unchecked_remove_retval(void *ctx)
{
struct bpf_rb_node *res;
bpf_spin_lock(&glock);
res = bpf_rbtree_first(&groot);
if (!res)
goto err_out;
res = bpf_rbtree_remove(&groot, res);
bpf_spin_unlock(&glock);
bpf_spin_lock(&glock);
/* Must check res for NULL before using in rbtree_add below */
bpf_rbtree_add(&groot, res, less);
bpf_spin_unlock(&glock);
return 0;
err_out:
bpf_spin_unlock(&glock);
return 1;
}
SEC("?tc")
__failure __msg("rbtree_remove node input must be non-owning ref")
long rbtree_api_add_release_unlock_escape(void *ctx)
{
struct node_data *n;
n = bpf_obj_new(typeof(*n));
if (!n)
return 1;
bpf_spin_lock(&glock);
bpf_rbtree_add(&groot, &n->node, less);
bpf_spin_unlock(&glock);
bpf_spin_lock(&glock);
/* After add() in previous critical section, n should be
* release_on_unlock and released after previous spin_unlock,
* so should not be possible to use it here
*/
bpf_rbtree_remove(&groot, &n->node);
bpf_spin_unlock(&glock);
return 0;
}
SEC("?tc")
__failure __msg("rbtree_remove node input must be non-owning ref")
long rbtree_api_first_release_unlock_escape(void *ctx)
{
struct bpf_rb_node *res;
struct node_data *n;
bpf_spin_lock(&glock);
res = bpf_rbtree_first(&groot);
if (!res) {
bpf_spin_unlock(&glock);
return 1;
}
n = container_of(res, struct node_data, node);
bpf_spin_unlock(&glock);
bpf_spin_lock(&glock);
/* After first() in previous critical section, n should be
* release_on_unlock and released after previous spin_unlock,
* so should not be possible to use it here
*/
bpf_rbtree_remove(&groot, &n->node);
bpf_spin_unlock(&glock);
return 0;
}
static bool less__bad_fn_call_add(struct bpf_rb_node *a, const struct bpf_rb_node *b)
{
struct node_data *node_a;
struct node_data *node_b;
node_a = container_of(a, struct node_data, node);
node_b = container_of(b, struct node_data, node);
bpf_rbtree_add(&groot, &node_a->node, less);
return node_a->key < node_b->key;
}
static bool less__bad_fn_call_remove(struct bpf_rb_node *a, const struct bpf_rb_node *b)
{
struct node_data *node_a;
struct node_data *node_b;
node_a = container_of(a, struct node_data, node);
node_b = container_of(b, struct node_data, node);
bpf_rbtree_remove(&groot, &node_a->node);
return node_a->key < node_b->key;
}
static bool less__bad_fn_call_first_unlock_after(struct bpf_rb_node *a, const struct bpf_rb_node *b)
{
struct node_data *node_a;
struct node_data *node_b;
node_a = container_of(a, struct node_data, node);
node_b = container_of(b, struct node_data, node);
bpf_rbtree_first(&groot);
bpf_spin_unlock(&glock);
return node_a->key < node_b->key;
}
static __always_inline
long add_with_cb(bool (cb)(struct bpf_rb_node *a, const struct bpf_rb_node *b))
{
struct node_data *n;
n = bpf_obj_new(typeof(*n));
if (!n)
return 1;
bpf_spin_lock(&glock);
bpf_rbtree_add(&groot, &n->node, cb);
bpf_spin_unlock(&glock);
return 0;
}
SEC("?tc")
__failure __msg("arg#1 expected pointer to allocated object")
long rbtree_api_add_bad_cb_bad_fn_call_add(void *ctx)
{
return add_with_cb(less__bad_fn_call_add);
}
SEC("?tc")
__failure __msg("rbtree_remove not allowed in rbtree cb")
long rbtree_api_add_bad_cb_bad_fn_call_remove(void *ctx)
{
return add_with_cb(less__bad_fn_call_remove);
}
SEC("?tc")
__failure __msg("can't spin_{lock,unlock} in rbtree cb")
long rbtree_api_add_bad_cb_bad_fn_call_first_unlock_after(void *ctx)
{
return add_with_cb(less__bad_fn_call_first_unlock_after);
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/rbtree_fail.c |
// SPDX-License-Identifier: GPL-2.0
/* Converted from tools/testing/selftests/bpf/verifier/raw_tp_writable.c */
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include "bpf_misc.h"
struct {
__uint(type, BPF_MAP_TYPE_HASH);
__uint(max_entries, 1);
__type(key, long long);
__type(value, long long);
} map_hash_8b SEC(".maps");
SEC("raw_tracepoint.w")
__description("raw_tracepoint_writable: reject variable offset")
__failure
__msg("R6 invalid variable buffer offset: off=0, var_off=(0x0; 0xffffffff)")
__flag(BPF_F_ANY_ALIGNMENT)
__naked void tracepoint_writable_reject_variable_offset(void)
{
asm volatile (" \
/* r6 is our tp buffer */ \
r6 = *(u64*)(r1 + 0); \
r1 = %[map_hash_8b] ll; \
/* move the key (== 0) to r10-8 */ \
w0 = 0; \
r2 = r10; \
r2 += -8; \
*(u64*)(r2 + 0) = r0; \
/* lookup in the map */ \
call %[bpf_map_lookup_elem]; \
/* exit clean if null */ \
if r0 != 0 goto l0_%=; \
exit; \
l0_%=: /* shift the buffer pointer to a variable location */\
r0 = *(u32*)(r0 + 0); \
r6 += r0; \
/* clobber whatever's there */ \
r7 = 4242; \
*(u64*)(r6 + 0) = r7; \
r0 = 0; \
exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm_addr(map_hash_8b)
: __clobber_all);
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/verifier_raw_tp_writable.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
#include <stdbool.h>
char _license[] SEC("license") = "GPL";
extern const void bpf_fentry_test1 __ksym;
extern const void bpf_fentry_test2 __ksym;
extern const void bpf_fentry_test3 __ksym;
extern const void bpf_fentry_test4 __ksym;
extern const void bpf_fentry_test5 __ksym;
extern const void bpf_fentry_test6 __ksym;
extern const void bpf_fentry_test7 __ksym;
extern const void bpf_fentry_test8 __ksym;
int pid = 0;
bool test_cookie = false;
__u64 kprobe_test1_result = 0;
__u64 kprobe_test2_result = 0;
__u64 kprobe_test3_result = 0;
__u64 kprobe_test4_result = 0;
__u64 kprobe_test5_result = 0;
__u64 kprobe_test6_result = 0;
__u64 kprobe_test7_result = 0;
__u64 kprobe_test8_result = 0;
__u64 kretprobe_test1_result = 0;
__u64 kretprobe_test2_result = 0;
__u64 kretprobe_test3_result = 0;
__u64 kretprobe_test4_result = 0;
__u64 kretprobe_test5_result = 0;
__u64 kretprobe_test6_result = 0;
__u64 kretprobe_test7_result = 0;
__u64 kretprobe_test8_result = 0;
static void kprobe_multi_check(void *ctx, bool is_return)
{
if (bpf_get_current_pid_tgid() >> 32 != pid)
return;
__u64 cookie = test_cookie ? bpf_get_attach_cookie(ctx) : 0;
__u64 addr = bpf_get_func_ip(ctx);
#define SET(__var, __addr, __cookie) ({ \
if (((const void *) addr == __addr) && \
(!test_cookie || (cookie == __cookie))) \
__var = 1; \
})
if (is_return) {
SET(kretprobe_test1_result, &bpf_fentry_test1, 8);
SET(kretprobe_test2_result, &bpf_fentry_test2, 2);
SET(kretprobe_test3_result, &bpf_fentry_test3, 7);
SET(kretprobe_test4_result, &bpf_fentry_test4, 6);
SET(kretprobe_test5_result, &bpf_fentry_test5, 5);
SET(kretprobe_test6_result, &bpf_fentry_test6, 4);
SET(kretprobe_test7_result, &bpf_fentry_test7, 3);
SET(kretprobe_test8_result, &bpf_fentry_test8, 1);
} else {
SET(kprobe_test1_result, &bpf_fentry_test1, 1);
SET(kprobe_test2_result, &bpf_fentry_test2, 7);
SET(kprobe_test3_result, &bpf_fentry_test3, 2);
SET(kprobe_test4_result, &bpf_fentry_test4, 3);
SET(kprobe_test5_result, &bpf_fentry_test5, 4);
SET(kprobe_test6_result, &bpf_fentry_test6, 5);
SET(kprobe_test7_result, &bpf_fentry_test7, 6);
SET(kprobe_test8_result, &bpf_fentry_test8, 8);
}
#undef SET
}
/*
* No tests in here, just to trigger 'bpf_fentry_test*'
* through tracing test_run
*/
SEC("fentry/bpf_modify_return_test")
int BPF_PROG(trigger)
{
return 0;
}
SEC("kprobe.multi/bpf_fentry_tes??")
int test_kprobe(struct pt_regs *ctx)
{
kprobe_multi_check(ctx, false);
return 0;
}
SEC("kretprobe.multi/bpf_fentry_test*")
int test_kretprobe(struct pt_regs *ctx)
{
kprobe_multi_check(ctx, true);
return 0;
}
SEC("kprobe.multi")
int test_kprobe_manual(struct pt_regs *ctx)
{
kprobe_multi_check(ctx, false);
return 0;
}
SEC("kretprobe.multi")
int test_kretprobe_manual(struct pt_regs *ctx)
{
kprobe_multi_check(ctx, true);
return 0;
}
extern const void bpf_testmod_fentry_test1 __ksym;
extern const void bpf_testmod_fentry_test2 __ksym;
extern const void bpf_testmod_fentry_test3 __ksym;
__u64 kprobe_testmod_test1_result = 0;
__u64 kprobe_testmod_test2_result = 0;
__u64 kprobe_testmod_test3_result = 0;
__u64 kretprobe_testmod_test1_result = 0;
__u64 kretprobe_testmod_test2_result = 0;
__u64 kretprobe_testmod_test3_result = 0;
static void kprobe_multi_testmod_check(void *ctx, bool is_return)
{
if (bpf_get_current_pid_tgid() >> 32 != pid)
return;
__u64 addr = bpf_get_func_ip(ctx);
if (is_return) {
if ((const void *) addr == &bpf_testmod_fentry_test1)
kretprobe_testmod_test1_result = 1;
if ((const void *) addr == &bpf_testmod_fentry_test2)
kretprobe_testmod_test2_result = 1;
if ((const void *) addr == &bpf_testmod_fentry_test3)
kretprobe_testmod_test3_result = 1;
} else {
if ((const void *) addr == &bpf_testmod_fentry_test1)
kprobe_testmod_test1_result = 1;
if ((const void *) addr == &bpf_testmod_fentry_test2)
kprobe_testmod_test2_result = 1;
if ((const void *) addr == &bpf_testmod_fentry_test3)
kprobe_testmod_test3_result = 1;
}
}
SEC("kprobe.multi")
int test_kprobe_testmod(struct pt_regs *ctx)
{
kprobe_multi_testmod_check(ctx, false);
return 0;
}
SEC("kretprobe.multi")
int test_kretprobe_testmod(struct pt_regs *ctx)
{
kprobe_multi_testmod_check(ctx, true);
return 0;
}
| linux-master | tools/testing/selftests/bpf/progs/kprobe_multi.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
#include <linux/bpf.h>
#include <stdint.h>
#include <stdbool.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_core_read.h>
char _license[] SEC("license") = "GPL";
struct {
char in[256];
char out[256];
bool skip;
} data = {};
enum named_unsigned_enum64 {
UNSIGNED_ENUM64_VAL1 = 0x1ffffffffULL,
UNSIGNED_ENUM64_VAL2 = 0x2ffffffffULL,
UNSIGNED_ENUM64_VAL3 = 0x3ffffffffULL,
};
enum named_signed_enum64 {
SIGNED_ENUM64_VAL1 = 0x1ffffffffLL,
SIGNED_ENUM64_VAL2 = -2,
SIGNED_ENUM64_VAL3 = 0x3ffffffffLL,
};
struct core_reloc_enum64val_output {
bool unsigned_val1_exists;
bool unsigned_val2_exists;
bool unsigned_val3_exists;
bool signed_val1_exists;
bool signed_val2_exists;
bool signed_val3_exists;
long unsigned_val1;
long unsigned_val2;
long signed_val1;
long signed_val2;
};
SEC("raw_tracepoint/sys_enter")
int test_core_enum64val(void *ctx)
{
#if __clang_major__ >= 15
struct core_reloc_enum64val_output *out = (void *)&data.out;
enum named_unsigned_enum64 named_unsigned = 0;
enum named_signed_enum64 named_signed = 0;
out->unsigned_val1_exists = bpf_core_enum_value_exists(named_unsigned, UNSIGNED_ENUM64_VAL1);
out->unsigned_val2_exists = bpf_core_enum_value_exists(enum named_unsigned_enum64, UNSIGNED_ENUM64_VAL2);
out->unsigned_val3_exists = bpf_core_enum_value_exists(enum named_unsigned_enum64, UNSIGNED_ENUM64_VAL3);
out->signed_val1_exists = bpf_core_enum_value_exists(named_signed, SIGNED_ENUM64_VAL1);
out->signed_val2_exists = bpf_core_enum_value_exists(enum named_signed_enum64, SIGNED_ENUM64_VAL2);
out->signed_val3_exists = bpf_core_enum_value_exists(enum named_signed_enum64, SIGNED_ENUM64_VAL3);
out->unsigned_val1 = bpf_core_enum_value(named_unsigned, UNSIGNED_ENUM64_VAL1);
out->unsigned_val2 = bpf_core_enum_value(named_unsigned, UNSIGNED_ENUM64_VAL2);
out->signed_val1 = bpf_core_enum_value(named_signed, SIGNED_ENUM64_VAL1);
out->signed_val2 = bpf_core_enum_value(named_signed, SIGNED_ENUM64_VAL2);
/* NAMED_ENUM64_VAL3 value is optional */
#else
data.skip = true;
#endif
return 0;
}
| linux-master | tools/testing/selftests/bpf/progs/test_core_reloc_enum64val.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2022 Google */
#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
struct bpf_testmod_btf_type_tag_1 {
int a;
};
struct bpf_testmod_btf_type_tag_2 {
struct bpf_testmod_btf_type_tag_1 *p;
};
__u64 g;
SEC("fentry/bpf_testmod_test_btf_type_tag_percpu_1")
int BPF_PROG(test_percpu1, struct bpf_testmod_btf_type_tag_1 *arg)
{
g = arg->a;
return 0;
}
SEC("fentry/bpf_testmod_test_btf_type_tag_percpu_2")
int BPF_PROG(test_percpu2, struct bpf_testmod_btf_type_tag_2 *arg)
{
g = arg->p->a;
return 0;
}
/* trace_cgroup_mkdir(struct cgroup *cgrp, const char *path)
*
* struct cgroup_rstat_cpu {
* ...
* struct cgroup *updated_children;
* ...
* };
*
* struct cgroup {
* ...
* struct cgroup_rstat_cpu __percpu *rstat_cpu;
* ...
* };
*/
SEC("tp_btf/cgroup_mkdir")
int BPF_PROG(test_percpu_load, struct cgroup *cgrp, const char *path)
{
g = (__u64)cgrp->rstat_cpu->updated_children;
return 0;
}
SEC("tp_btf/cgroup_mkdir")
int BPF_PROG(test_percpu_helper, struct cgroup *cgrp, const char *path)
{
struct cgroup_rstat_cpu *rstat;
__u32 cpu;
cpu = bpf_get_smp_processor_id();
rstat = (struct cgroup_rstat_cpu *)bpf_per_cpu_ptr(cgrp->rstat_cpu, cpu);
if (rstat) {
/* READ_ONCE */
*(volatile int *)rstat;
}
return 0;
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/btf_type_tag_percpu.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
#include <stdbool.h>
#ifdef ENABLE_ATOMICS_TESTS
bool skip_tests __attribute((__section__(".data"))) = false;
#else
bool skip_tests = true;
#endif
SEC("fentry/bpf_fentry_test1")
int BPF_PROG(sub, int x)
{
#ifdef ENABLE_ATOMICS_TESTS
int a = 0;
int b = __sync_fetch_and_add(&a, 1);
/* b is certainly 0 here. Can the verifier tell? */
while (b)
continue;
#endif
return 0;
}
| linux-master | tools/testing/selftests/bpf/progs/atomic_bounds.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020 Facebook */
#include "bpf_iter.h"
#include "bpf_tracing_net.h"
#include <bpf/bpf_helpers.h>
char _license[] SEC("license") = "GPL";
static __attribute__((noinline)) struct inode *SOCK_INODE(struct socket *socket)
{
return &container_of(socket, struct socket_alloc, socket)->vfs_inode;
}
SEC("iter/netlink")
int dump_netlink(struct bpf_iter__netlink *ctx)
{
struct seq_file *seq = ctx->meta->seq;
struct netlink_sock *nlk = ctx->sk;
unsigned long group, ino;
struct inode *inode;
struct socket *sk;
struct sock *s;
if (nlk == (void *)0)
return 0;
if (ctx->meta->seq_num == 0)
BPF_SEQ_PRINTF(seq, "sk Eth Pid Groups "
"Rmem Wmem Dump Locks Drops "
"Inode\n");
s = &nlk->sk;
BPF_SEQ_PRINTF(seq, "%pK %-3d ", s, s->sk_protocol);
if (!nlk->groups) {
group = 0;
} else {
/* FIXME: temporary use bpf_probe_read_kernel here, needs
* verifier support to do direct access.
*/
bpf_probe_read_kernel(&group, sizeof(group), &nlk->groups[0]);
}
BPF_SEQ_PRINTF(seq, "%-10u %08x %-8d %-8d %-5d %-8d ",
nlk->portid, (u32)group,
s->sk_rmem_alloc.counter,
s->sk_wmem_alloc.refs.counter - 1,
nlk->cb_running, s->sk_refcnt.refs.counter);
sk = s->sk_socket;
if (!sk) {
ino = 0;
} else {
/* FIXME: container_of inside SOCK_INODE has a forced
* type conversion, and direct access cannot be used
* with current verifier.
*/
inode = SOCK_INODE(sk);
bpf_probe_read_kernel(&ino, sizeof(ino), &inode->i_ino);
}
BPF_SEQ_PRINTF(seq, "%-8u %-8lu\n", s->sk_drops.counter, ino);
return 0;
}
| linux-master | tools/testing/selftests/bpf/progs/bpf_iter_netlink.c |
// SPDX-License-Identifier: GPL-2.0
/* fails to load without expected_attach_type = BPF_XDP_DEVMAP
* because of access to egress_ifindex
*/
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
SEC("xdp")
int xdpdm_devlog(struct xdp_md *ctx)
{
char fmt[] = "devmap redirect: dev %u -> dev %u len %u\n";
void *data_end = (void *)(long)ctx->data_end;
void *data = (void *)(long)ctx->data;
unsigned int len = data_end - data;
bpf_trace_printk(fmt, sizeof(fmt),
ctx->ingress_ifindex, ctx->egress_ifindex, len);
return XDP_PASS;
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/test_xdp_devmap_helpers.c |
// SPDX-License-Identifier: GPL-2.0
#include <stddef.h>
#include <stdint.h>
#include <stdbool.h>
#include <linux/bpf.h>
#include <linux/stddef.h>
#include <linux/pkt_cls.h>
#include <linux/if_ether.h>
#include <linux/in.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_endian.h>
#ifndef ctx_ptr
# define ctx_ptr(field) (void *)(long)(field)
#endif
#define ip4_src 0xac100164 /* 172.16.1.100 */
#define ip4_dst 0xac100264 /* 172.16.2.100 */
#define ip6_src { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, \
0x00, 0x01, 0xde, 0xad, 0xbe, 0xef, 0xca, 0xfe }
#define ip6_dst { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, \
0x00, 0x02, 0xde, 0xad, 0xbe, 0xef, 0xca, 0xfe }
#ifndef v6_equal
# define v6_equal(a, b) (a.s6_addr32[0] == b.s6_addr32[0] && \
a.s6_addr32[1] == b.s6_addr32[1] && \
a.s6_addr32[2] == b.s6_addr32[2] && \
a.s6_addr32[3] == b.s6_addr32[3])
#endif
volatile const __u32 IFINDEX_SRC;
volatile const __u32 IFINDEX_DST;
static __always_inline bool is_remote_ep_v4(struct __sk_buff *skb,
__be32 addr)
{
void *data_end = ctx_ptr(skb->data_end);
void *data = ctx_ptr(skb->data);
struct iphdr *ip4h;
if (data + sizeof(struct ethhdr) > data_end)
return false;
ip4h = (struct iphdr *)(data + sizeof(struct ethhdr));
if ((void *)(ip4h + 1) > data_end)
return false;
return ip4h->daddr == addr;
}
static __always_inline bool is_remote_ep_v6(struct __sk_buff *skb,
struct in6_addr addr)
{
void *data_end = ctx_ptr(skb->data_end);
void *data = ctx_ptr(skb->data);
struct ipv6hdr *ip6h;
if (data + sizeof(struct ethhdr) > data_end)
return false;
ip6h = (struct ipv6hdr *)(data + sizeof(struct ethhdr));
if ((void *)(ip6h + 1) > data_end)
return false;
return v6_equal(ip6h->daddr, addr);
}
SEC("tc")
int tc_chk(struct __sk_buff *skb)
{
void *data_end = ctx_ptr(skb->data_end);
void *data = ctx_ptr(skb->data);
__u32 *raw = data;
if (data + sizeof(struct ethhdr) > data_end)
return TC_ACT_SHOT;
return !raw[0] && !raw[1] && !raw[2] ? TC_ACT_SHOT : TC_ACT_OK;
}
SEC("tc")
int tc_dst(struct __sk_buff *skb)
{
__u8 zero[ETH_ALEN * 2];
bool redirect = false;
switch (skb->protocol) {
case __bpf_constant_htons(ETH_P_IP):
redirect = is_remote_ep_v4(skb, __bpf_constant_htonl(ip4_src));
break;
case __bpf_constant_htons(ETH_P_IPV6):
redirect = is_remote_ep_v6(skb, (struct in6_addr){{ip6_src}});
break;
}
if (!redirect)
return TC_ACT_OK;
__builtin_memset(&zero, 0, sizeof(zero));
if (bpf_skb_store_bytes(skb, 0, &zero, sizeof(zero), 0) < 0)
return TC_ACT_SHOT;
return bpf_redirect_neigh(IFINDEX_SRC, NULL, 0, 0);
}
SEC("tc")
int tc_src(struct __sk_buff *skb)
{
__u8 zero[ETH_ALEN * 2];
bool redirect = false;
switch (skb->protocol) {
case __bpf_constant_htons(ETH_P_IP):
redirect = is_remote_ep_v4(skb, __bpf_constant_htonl(ip4_dst));
break;
case __bpf_constant_htons(ETH_P_IPV6):
redirect = is_remote_ep_v6(skb, (struct in6_addr){{ip6_dst}});
break;
}
if (!redirect)
return TC_ACT_OK;
__builtin_memset(&zero, 0, sizeof(zero));
if (bpf_skb_store_bytes(skb, 0, &zero, sizeof(zero), 0) < 0)
return TC_ACT_SHOT;
return bpf_redirect_neigh(IFINDEX_DST, NULL, 0, 0);
}
char __license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/test_tc_neigh.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include "bpf_misc.h"
#if (defined(__TARGET_ARCH_arm64) || defined(__TARGET_ARCH_x86) || \
(defined(__TARGET_ARCH_riscv) && __riscv_xlen == 64)) && __clang_major__ >= 18
SEC("socket")
__description("MOV32SX, S8")
__success __success_unpriv __retval(0x23)
__naked void mov32sx_s8(void)
{
asm volatile (" \
w0 = 0xff23; \
w0 = (s8)w0; \
exit; \
" ::: __clobber_all);
}
SEC("socket")
__description("MOV32SX, S16")
__success __success_unpriv __retval(0xFFFFff23)
__naked void mov32sx_s16(void)
{
asm volatile (" \
w0 = 0xff23; \
w0 = (s16)w0; \
exit; \
" ::: __clobber_all);
}
SEC("socket")
__description("MOV64SX, S8")
__success __success_unpriv __retval(-2)
__naked void mov64sx_s8(void)
{
asm volatile (" \
r0 = 0x1fe; \
r0 = (s8)r0; \
exit; \
" ::: __clobber_all);
}
SEC("socket")
__description("MOV64SX, S16")
__success __success_unpriv __retval(0xf23)
__naked void mov64sx_s16(void)
{
asm volatile (" \
r0 = 0xf0f23; \
r0 = (s16)r0; \
exit; \
" ::: __clobber_all);
}
SEC("socket")
__description("MOV64SX, S32")
__success __success_unpriv __retval(-1)
__naked void mov64sx_s32(void)
{
asm volatile (" \
r0 = 0xfffffffe; \
r0 = (s32)r0; \
r0 >>= 1; \
exit; \
" ::: __clobber_all);
}
SEC("socket")
__description("MOV32SX, S8, range_check")
__success __success_unpriv __retval(1)
__naked void mov32sx_s8_range(void)
{
asm volatile (" \
call %[bpf_get_prandom_u32]; \
w1 = (s8)w0; \
/* w1 with s8 range */ \
if w1 s> 0x7f goto l0_%=; \
if w1 s< -0x80 goto l0_%=; \
r0 = 1; \
l1_%=: \
exit; \
l0_%=: \
r0 = 2; \
goto l1_%=; \
" :
: __imm(bpf_get_prandom_u32)
: __clobber_all);
}
SEC("socket")
__description("MOV32SX, S16, range_check")
__success __success_unpriv __retval(1)
__naked void mov32sx_s16_range(void)
{
asm volatile (" \
call %[bpf_get_prandom_u32]; \
w1 = (s16)w0; \
/* w1 with s16 range */ \
if w1 s> 0x7fff goto l0_%=; \
if w1 s< -0x80ff goto l0_%=; \
r0 = 1; \
l1_%=: \
exit; \
l0_%=: \
r0 = 2; \
goto l1_%=; \
" :
: __imm(bpf_get_prandom_u32)
: __clobber_all);
}
SEC("socket")
__description("MOV32SX, S16, range_check 2")
__success __success_unpriv __retval(1)
__naked void mov32sx_s16_range_2(void)
{
asm volatile (" \
r1 = 65535; \
w2 = (s16)w1; \
r2 >>= 1; \
if r2 != 0x7fffFFFF goto l0_%=; \
r0 = 1; \
l1_%=: \
exit; \
l0_%=: \
r0 = 0; \
goto l1_%=; \
" :
: __imm(bpf_get_prandom_u32)
: __clobber_all);
}
SEC("socket")
__description("MOV64SX, S8, range_check")
__success __success_unpriv __retval(1)
__naked void mov64sx_s8_range(void)
{
asm volatile (" \
call %[bpf_get_prandom_u32]; \
r1 = (s8)r0; \
/* r1 with s8 range */ \
if r1 s> 0x7f goto l0_%=; \
if r1 s< -0x80 goto l0_%=; \
r0 = 1; \
l1_%=: \
exit; \
l0_%=: \
r0 = 2; \
goto l1_%=; \
" :
: __imm(bpf_get_prandom_u32)
: __clobber_all);
}
SEC("socket")
__description("MOV64SX, S16, range_check")
__success __success_unpriv __retval(1)
__naked void mov64sx_s16_range(void)
{
asm volatile (" \
call %[bpf_get_prandom_u32]; \
r1 = (s16)r0; \
/* r1 with s16 range */ \
if r1 s> 0x7fff goto l0_%=; \
if r1 s< -0x8000 goto l0_%=; \
r0 = 1; \
l1_%=: \
exit; \
l0_%=: \
r0 = 2; \
goto l1_%=; \
" :
: __imm(bpf_get_prandom_u32)
: __clobber_all);
}
SEC("socket")
__description("MOV64SX, S32, range_check")
__success __success_unpriv __retval(1)
__naked void mov64sx_s32_range(void)
{
asm volatile (" \
call %[bpf_get_prandom_u32]; \
r1 = (s32)r0; \
/* r1 with s32 range */ \
if r1 s> 0x7fffffff goto l0_%=; \
if r1 s< -0x80000000 goto l0_%=; \
r0 = 1; \
l1_%=: \
exit; \
l0_%=: \
r0 = 2; \
goto l1_%=; \
" :
: __imm(bpf_get_prandom_u32)
: __clobber_all);
}
SEC("socket")
__description("MOV64SX, S16, R10 Sign Extension")
__failure __msg("R1 type=scalar expected=fp, pkt, pkt_meta, map_key, map_value, mem, ringbuf_mem, buf, trusted_ptr_")
__failure_unpriv __msg_unpriv("R10 sign-extension part of pointer")
__naked void mov64sx_s16_r10(void)
{
asm volatile (" \
r1 = 553656332; \
*(u32 *)(r10 - 8) = r1; \
r1 = (s16)r10; \
r1 += -8; \
r2 = 3; \
if r2 <= r1 goto l0_%=; \
l0_%=: \
call %[bpf_trace_printk]; \
r0 = 0; \
exit; \
" :
: __imm(bpf_trace_printk)
: __clobber_all);
}
#else
SEC("socket")
__description("cpuv4 is not supported by compiler or jit, use a dummy test")
__success
int dummy_test(void)
{
return 0;
}
#endif
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/verifier_movsx.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2023 Bytedance */
#include <vmlinux.h>
#include <bpf/bpf_tracing.h>
#include <bpf/bpf_helpers.h>
#include "bpf_misc.h"
struct cgroup *bpf_cgroup_from_id(u64 cgid) __ksym;
long bpf_task_under_cgroup(struct task_struct *task, struct cgroup *ancestor) __ksym;
void bpf_cgroup_release(struct cgroup *p) __ksym;
struct task_struct *bpf_task_acquire(struct task_struct *p) __ksym;
void bpf_task_release(struct task_struct *p) __ksym;
const volatile int local_pid;
const volatile __u64 cgid;
int remote_pid;
SEC("tp_btf/task_newtask")
int BPF_PROG(handle__task_newtask, struct task_struct *task, u64 clone_flags)
{
struct cgroup *cgrp = NULL;
struct task_struct *acquired;
if (local_pid != (bpf_get_current_pid_tgid() >> 32))
return 0;
acquired = bpf_task_acquire(task);
if (!acquired)
return 0;
if (local_pid == acquired->tgid)
goto out;
cgrp = bpf_cgroup_from_id(cgid);
if (!cgrp)
goto out;
if (bpf_task_under_cgroup(acquired, cgrp))
remote_pid = acquired->tgid;
out:
if (cgrp)
bpf_cgroup_release(cgrp);
bpf_task_release(acquired);
return 0;
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/test_task_under_cgroup.c |
#include "core_reloc_types.h"
void f1(struct core_reloc_nesting___err_partial_match_dups__a x) {}
void f2(struct core_reloc_nesting___err_partial_match_dups__b x) {}
| linux-master | tools/testing/selftests/bpf/progs/btf__core_reloc_nesting___err_partial_match_dups.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020 Facebook */
#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
#include <bpf/bpf_core_read.h>
#define MAX_LEN 256
char buf_in1[MAX_LEN] = {};
char buf_in2[MAX_LEN] = {};
int test_pid = 0;
bool capture = false;
/* .bss */
__u64 payload1_len1 = 0;
__u64 payload1_len2 = 0;
__u64 total1 = 0;
char payload1[MAX_LEN + MAX_LEN] = {};
__u64 ret_bad_read = 0;
/* .data */
int payload2_len1 = -1;
int payload2_len2 = -1;
int total2 = -1;
char payload2[MAX_LEN + MAX_LEN] = { 1 };
int payload3_len1 = -1;
int payload3_len2 = -1;
int total3= -1;
char payload3[MAX_LEN + MAX_LEN] = { 1 };
int payload4_len1 = -1;
int payload4_len2 = -1;
int total4= -1;
char payload4[MAX_LEN + MAX_LEN] = { 1 };
char payload_bad[5] = { 0x42, 0x42, 0x42, 0x42, 0x42 };
SEC("raw_tp/sys_enter")
int handler64_unsigned(void *regs)
{
int pid = bpf_get_current_pid_tgid() >> 32;
void *payload = payload1;
long len;
/* ignore irrelevant invocations */
if (test_pid != pid || !capture)
return 0;
len = bpf_probe_read_kernel_str(payload, MAX_LEN, &buf_in1[0]);
if (len >= 0) {
payload += len;
payload1_len1 = len;
}
len = bpf_probe_read_kernel_str(payload, MAX_LEN, &buf_in2[0]);
if (len >= 0) {
payload += len;
payload1_len2 = len;
}
total1 = payload - (void *)payload1;
ret_bad_read = bpf_probe_read_kernel_str(payload_bad + 2, 1, (void *) -1);
return 0;
}
SEC("raw_tp/sys_exit")
int handler64_signed(void *regs)
{
int pid = bpf_get_current_pid_tgid() >> 32;
void *payload = payload3;
long len;
/* ignore irrelevant invocations */
if (test_pid != pid || !capture)
return 0;
len = bpf_probe_read_kernel_str(payload, MAX_LEN, &buf_in1[0]);
if (len >= 0) {
payload += len;
payload3_len1 = len;
}
len = bpf_probe_read_kernel_str(payload, MAX_LEN, &buf_in2[0]);
if (len >= 0) {
payload += len;
payload3_len2 = len;
}
total3 = payload - (void *)payload3;
return 0;
}
SEC("tp/raw_syscalls/sys_enter")
int handler32_unsigned(void *regs)
{
int pid = bpf_get_current_pid_tgid() >> 32;
void *payload = payload2;
u32 len;
/* ignore irrelevant invocations */
if (test_pid != pid || !capture)
return 0;
len = bpf_probe_read_kernel_str(payload, MAX_LEN, &buf_in1[0]);
if (len <= MAX_LEN) {
payload += len;
payload2_len1 = len;
}
len = bpf_probe_read_kernel_str(payload, MAX_LEN, &buf_in2[0]);
if (len <= MAX_LEN) {
payload += len;
payload2_len2 = len;
}
total2 = payload - (void *)payload2;
return 0;
}
SEC("tp/raw_syscalls/sys_exit")
int handler32_signed(void *regs)
{
int pid = bpf_get_current_pid_tgid() >> 32;
void *payload = payload4;
long len;
/* ignore irrelevant invocations */
if (test_pid != pid || !capture)
return 0;
len = bpf_probe_read_kernel_str(payload, MAX_LEN, &buf_in1[0]);
if (len >= 0) {
payload += len;
payload4_len1 = len;
}
len = bpf_probe_read_kernel_str(payload, MAX_LEN, &buf_in2[0]);
if (len >= 0) {
payload += len;
payload4_len2 = len;
}
total4 = payload - (void *)payload4;
return 0;
}
SEC("tp/syscalls/sys_exit_getpid")
int handler_exit(void *regs)
{
long bla;
if (bpf_probe_read_kernel(&bla, sizeof(bla), 0))
return 1;
else
return 0;
}
char LICENSE[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/test_varlen.c |
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2020 Cloudflare
#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
struct {
__uint(type, BPF_MAP_TYPE_SOCKMAP);
__uint(max_entries, 1);
__type(key, __u32);
__type(value, __u64);
} src SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_SOCKMAP);
__uint(max_entries, 1);
__type(key, __u32);
__type(value, __u64);
} dst_sock_map SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_SOCKHASH);
__uint(max_entries, 1);
__type(key, __u32);
__type(value, __u64);
} dst_sock_hash SEC(".maps");
SEC("tc")
int copy_sock_map(void *ctx)
{
struct bpf_sock *sk;
bool failed = false;
__u32 key = 0;
sk = bpf_map_lookup_elem(&src, &key);
if (!sk)
return SK_DROP;
if (bpf_map_update_elem(&dst_sock_map, &key, sk, 0))
failed = true;
if (bpf_map_update_elem(&dst_sock_hash, &key, sk, 0))
failed = true;
bpf_sk_release(sk);
return failed ? SK_DROP : SK_PASS;
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/test_sockmap_update.c |
/* SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) */
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
struct {
__uint(type, BPF_MAP_TYPE_HASH);
__type(key, __u32);
__type(value, long);
__uint(max_entries, 2);
} htab SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
__type(key, __u32);
__type(value, long);
__uint(max_entries, 2);
} array SEC(".maps");
/* Sample program which should always load for testing control paths. */
SEC(".text") int func()
{
__u64 key64 = 0;
__u32 key = 0;
long *value;
value = bpf_map_lookup_elem(&htab, &key);
if (!value)
return 1;
value = bpf_map_lookup_elem(&array, &key64);
if (!value)
return 1;
return 0;
}
| linux-master | tools/testing/selftests/bpf/progs/sample_map_ret0.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright Leon Hwang */
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#define ERRMSG_LEN 64
struct xdp_errmsg {
char msg[ERRMSG_LEN];
};
struct {
__uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY);
__type(key, int);
__type(value, int);
} xdp_errmsg_pb SEC(".maps");
struct xdp_attach_error_ctx {
unsigned long unused;
/*
* bpf does not support tracepoint __data_loc directly.
*
* Actually, this field is a 32 bit integer whose value encodes
* information on where to find the actual data. The first 2 bytes is
* the size of the data. The last 2 bytes is the offset from the start
* of the tracepoint struct where the data begins.
* -- https://github.com/iovisor/bpftrace/pull/1542
*/
__u32 msg; // __data_loc char[] msg;
};
/*
* Catch the error message at the tracepoint.
*/
SEC("tp/xdp/bpf_xdp_link_attach_failed")
int tp__xdp__bpf_xdp_link_attach_failed(struct xdp_attach_error_ctx *ctx)
{
char *msg = (void *)(__u64) ((void *) ctx + (__u16) ctx->msg);
struct xdp_errmsg errmsg = {};
bpf_probe_read_kernel_str(&errmsg.msg, ERRMSG_LEN, msg);
bpf_perf_event_output(ctx, &xdp_errmsg_pb, BPF_F_CURRENT_CPU, &errmsg,
ERRMSG_LEN);
return 0;
}
/*
* Reuse the XDP program in xdp_dummy.c.
*/
char LICENSE[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/test_xdp_attach_fail.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
char _license[] SEC("license") = "GPL";
int count;
SEC("uprobe.multi/./uprobe_multi:uprobe_multi_func_*")
int uprobe_bench(struct pt_regs *ctx)
{
count++;
return 0;
}
| linux-master | tools/testing/selftests/bpf/progs/uprobe_multi_bench.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020 Facebook */
#include <stdbool.h>
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
int in = 0;
int out = 0;
SEC("raw_tp/sys_enter")
int raw_tp_prog(const void *ctx)
{
out = in;
return 0;
}
SEC("tp_btf/sys_enter")
int tp_btf_prog(const void *ctx)
{
out = in;
return 0;
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/test_link_pinning.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
struct {
__uint(type, BPF_MAP_TYPE_PROG_ARRAY);
__uint(max_entries, 3);
__uint(key_size, sizeof(__u32));
__uint(value_size, sizeof(__u32));
} jmp_table SEC(".maps");
#define TAIL_FUNC(x) \
SEC("tc") \
int classifier_##x(struct __sk_buff *skb) \
{ \
return x; \
}
TAIL_FUNC(0)
TAIL_FUNC(1)
TAIL_FUNC(2)
SEC("tc")
int entry(struct __sk_buff *skb)
{
/* Multiple locations to make sure we patch
* all of them.
*/
bpf_tail_call_static(skb, &jmp_table, 0);
bpf_tail_call_static(skb, &jmp_table, 0);
bpf_tail_call_static(skb, &jmp_table, 0);
bpf_tail_call_static(skb, &jmp_table, 0);
bpf_tail_call_static(skb, &jmp_table, 1);
bpf_tail_call_static(skb, &jmp_table, 1);
bpf_tail_call_static(skb, &jmp_table, 1);
bpf_tail_call_static(skb, &jmp_table, 1);
bpf_tail_call_static(skb, &jmp_table, 2);
bpf_tail_call_static(skb, &jmp_table, 2);
bpf_tail_call_static(skb, &jmp_table, 2);
bpf_tail_call_static(skb, &jmp_table, 2);
return 3;
}
char __license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/tailcall1.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2021 Facebook */
#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
#ifndef EBUSY
#define EBUSY 16
#endif
char _license[] SEC("license") = "GPL";
int nr_del_errs = 0;
int test_pid = 0;
struct {
__uint(type, BPF_MAP_TYPE_TASK_STORAGE);
__uint(map_flags, BPF_F_NO_PREALLOC);
__type(key, int);
__type(value, long);
} map_a SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_TASK_STORAGE);
__uint(map_flags, BPF_F_NO_PREALLOC);
__type(key, int);
__type(value, long);
} map_b SEC(".maps");
SEC("fentry/bpf_local_storage_lookup")
int BPF_PROG(on_lookup)
{
struct task_struct *task = bpf_get_current_task_btf();
if (!test_pid || task->pid != test_pid)
return 0;
/* The bpf_task_storage_delete will call
* bpf_local_storage_lookup. The prog->active will
* stop the recursion.
*/
bpf_task_storage_delete(&map_a, task);
bpf_task_storage_delete(&map_b, task);
return 0;
}
SEC("fentry/bpf_local_storage_update")
int BPF_PROG(on_update)
{
struct task_struct *task = bpf_get_current_task_btf();
long *ptr;
if (!test_pid || task->pid != test_pid)
return 0;
ptr = bpf_task_storage_get(&map_a, task, 0,
BPF_LOCAL_STORAGE_GET_F_CREATE);
/* ptr will not be NULL when it is called from
* the bpf_task_storage_get(&map_b,...F_CREATE) in
* the BPF_PROG(on_enter) below. It is because
* the value can be found in map_a and the kernel
* does not need to acquire any spin_lock.
*/
if (ptr) {
int err;
*ptr += 1;
err = bpf_task_storage_delete(&map_a, task);
if (err == -EBUSY)
nr_del_errs++;
}
/* This will still fail because map_b is empty and
* this BPF_PROG(on_update) has failed to acquire
* the percpu busy lock => meaning potential
* deadlock is detected and it will fail to create
* new storage.
*/
ptr = bpf_task_storage_get(&map_b, task, 0,
BPF_LOCAL_STORAGE_GET_F_CREATE);
if (ptr)
*ptr += 1;
return 0;
}
SEC("tp_btf/sys_enter")
int BPF_PROG(on_enter, struct pt_regs *regs, long id)
{
struct task_struct *task;
long *ptr;
task = bpf_get_current_task_btf();
if (!test_pid || task->pid != test_pid)
return 0;
ptr = bpf_task_storage_get(&map_a, task, 0,
BPF_LOCAL_STORAGE_GET_F_CREATE);
if (ptr && !*ptr)
*ptr = 200;
ptr = bpf_task_storage_get(&map_b, task, 0,
BPF_LOCAL_STORAGE_GET_F_CREATE);
if (ptr && !*ptr)
*ptr = 100;
return 0;
}
| linux-master | tools/testing/selftests/bpf/progs/task_ls_recursion.c |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.