python_code
stringlengths 0
1.8M
| repo_name
stringclasses 7
values | file_path
stringlengths 5
99
|
---|---|---|
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020, Oracle and/or its affiliates. */
#include "btf_ptr.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
#include <bpf/bpf_core_read.h>
#include <errno.h>
long ret = 0;
int num_subtests = 0;
int ran_subtests = 0;
bool skip = false;
#define STRSIZE 2048
#define EXPECTED_STRSIZE 256
#if defined(bpf_target_s390)
/* NULL points to a readable struct lowcore on s390, so take the last page */
#define BADPTR ((void *)0xFFFFFFFFFFFFF000ULL)
#else
#define BADPTR 0
#endif
#ifndef ARRAY_SIZE
#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
#endif
struct {
__uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
__uint(max_entries, 1);
__type(key, __u32);
__type(value, char[STRSIZE]);
} strdata SEC(".maps");
static int __strncmp(const void *m1, const void *m2, size_t len)
{
const unsigned char *s1 = m1;
const unsigned char *s2 = m2;
int i, delta = 0;
for (i = 0; i < len; i++) {
delta = s1[i] - s2[i];
if (delta || s1[i] == 0 || s2[i] == 0)
break;
}
return delta;
}
#if __has_builtin(__builtin_btf_type_id)
#define TEST_BTF(_str, _type, _flags, _expected, ...) \
do { \
static const char _expectedval[EXPECTED_STRSIZE] = \
_expected; \
__u64 _hflags = _flags | BTF_F_COMPACT; \
static _type _ptrdata = __VA_ARGS__; \
static struct btf_ptr _ptr = { }; \
int _cmp; \
\
++num_subtests; \
if (ret < 0) \
break; \
++ran_subtests; \
_ptr.ptr = &_ptrdata; \
_ptr.type_id = bpf_core_type_id_kernel(_type); \
if (_ptr.type_id <= 0) { \
ret = -EINVAL; \
break; \
} \
ret = bpf_snprintf_btf(_str, STRSIZE, \
&_ptr, sizeof(_ptr), _hflags); \
if (ret) \
break; \
_cmp = __strncmp(_str, _expectedval, EXPECTED_STRSIZE); \
if (_cmp != 0) { \
bpf_printk("(%d) got %s", _cmp, _str); \
bpf_printk("(%d) expected %s", _cmp, \
_expectedval); \
ret = -EBADMSG; \
break; \
} \
} while (0)
#endif
/* Use where expected data string matches its stringified declaration */
#define TEST_BTF_C(_str, _type, _flags, ...) \
TEST_BTF(_str, _type, _flags, "(" #_type ")" #__VA_ARGS__, \
__VA_ARGS__)
/* TRACE_EVENT(netif_receive_skb,
* TP_PROTO(struct sk_buff *skb),
*/
SEC("tp_btf/netif_receive_skb")
int BPF_PROG(trace_netif_receive_skb, struct sk_buff *skb)
{
static __u64 flags[] = { 0, BTF_F_COMPACT, BTF_F_ZERO, BTF_F_PTR_RAW,
BTF_F_NONAME, BTF_F_COMPACT | BTF_F_ZERO |
BTF_F_PTR_RAW | BTF_F_NONAME };
static struct btf_ptr p = { };
__u32 key = 0;
int i, __ret;
char *str;
#if __has_builtin(__builtin_btf_type_id)
str = bpf_map_lookup_elem(&strdata, &key);
if (!str)
return 0;
/* Ensure we can write skb string representation */
p.type_id = bpf_core_type_id_kernel(struct sk_buff);
p.ptr = skb;
for (i = 0; i < ARRAY_SIZE(flags); i++) {
++num_subtests;
ret = bpf_snprintf_btf(str, STRSIZE, &p, sizeof(p), 0);
if (ret < 0)
bpf_printk("returned %d when writing skb", ret);
++ran_subtests;
}
/* Check invalid ptr value */
p.ptr = BADPTR;
__ret = bpf_snprintf_btf(str, STRSIZE, &p, sizeof(p), 0);
if (__ret >= 0) {
bpf_printk("printing %llx should generate error, got (%d)",
(unsigned long long)BADPTR, __ret);
ret = -ERANGE;
}
/* Verify type display for various types. */
/* simple int */
TEST_BTF_C(str, int, 0, 1234);
TEST_BTF(str, int, BTF_F_NONAME, "1234", 1234);
/* zero value should be printed at toplevel */
TEST_BTF(str, int, 0, "(int)0", 0);
TEST_BTF(str, int, BTF_F_NONAME, "0", 0);
TEST_BTF(str, int, BTF_F_ZERO, "(int)0", 0);
TEST_BTF(str, int, BTF_F_NONAME | BTF_F_ZERO, "0", 0);
TEST_BTF_C(str, int, 0, -4567);
TEST_BTF(str, int, BTF_F_NONAME, "-4567", -4567);
/* simple char */
TEST_BTF_C(str, char, 0, 100);
TEST_BTF(str, char, BTF_F_NONAME, "100", 100);
/* zero value should be printed at toplevel */
TEST_BTF(str, char, 0, "(char)0", 0);
TEST_BTF(str, char, BTF_F_NONAME, "0", 0);
TEST_BTF(str, char, BTF_F_ZERO, "(char)0", 0);
TEST_BTF(str, char, BTF_F_NONAME | BTF_F_ZERO, "0", 0);
/* simple typedef */
TEST_BTF_C(str, uint64_t, 0, 100);
TEST_BTF(str, u64, BTF_F_NONAME, "1", 1);
/* zero value should be printed at toplevel */
TEST_BTF(str, u64, 0, "(u64)0", 0);
TEST_BTF(str, u64, BTF_F_NONAME, "0", 0);
TEST_BTF(str, u64, BTF_F_ZERO, "(u64)0", 0);
TEST_BTF(str, u64, BTF_F_NONAME|BTF_F_ZERO, "0", 0);
/* typedef struct */
TEST_BTF_C(str, atomic_t, 0, {.counter = (int)1,});
TEST_BTF(str, atomic_t, BTF_F_NONAME, "{1,}", {.counter = 1,});
/* typedef with 0 value should be printed at toplevel */
TEST_BTF(str, atomic_t, 0, "(atomic_t){}", {.counter = 0,});
TEST_BTF(str, atomic_t, BTF_F_NONAME, "{}", {.counter = 0,});
TEST_BTF(str, atomic_t, BTF_F_ZERO, "(atomic_t){.counter = (int)0,}",
{.counter = 0,});
TEST_BTF(str, atomic_t, BTF_F_NONAME|BTF_F_ZERO,
"{0,}", {.counter = 0,});
/* enum where enum value does (and does not) exist */
TEST_BTF_C(str, enum bpf_cmd, 0, BPF_MAP_CREATE);
TEST_BTF(str, enum bpf_cmd, 0, "(enum bpf_cmd)BPF_MAP_CREATE", 0);
TEST_BTF(str, enum bpf_cmd, BTF_F_NONAME, "BPF_MAP_CREATE",
BPF_MAP_CREATE);
TEST_BTF(str, enum bpf_cmd, BTF_F_NONAME|BTF_F_ZERO,
"BPF_MAP_CREATE", 0);
TEST_BTF(str, enum bpf_cmd, BTF_F_ZERO, "(enum bpf_cmd)BPF_MAP_CREATE",
BPF_MAP_CREATE);
TEST_BTF(str, enum bpf_cmd, BTF_F_NONAME|BTF_F_ZERO,
"BPF_MAP_CREATE", BPF_MAP_CREATE);
TEST_BTF_C(str, enum bpf_cmd, 0, 2000);
TEST_BTF(str, enum bpf_cmd, BTF_F_NONAME, "2000", 2000);
/* simple struct */
TEST_BTF_C(str, struct btf_enum, 0,
{.name_off = (__u32)3,.val = (__s32)-1,});
TEST_BTF(str, struct btf_enum, BTF_F_NONAME, "{3,-1,}",
{ .name_off = 3, .val = -1,});
TEST_BTF(str, struct btf_enum, BTF_F_NONAME, "{-1,}",
{ .name_off = 0, .val = -1,});
TEST_BTF(str, struct btf_enum, BTF_F_NONAME|BTF_F_ZERO, "{0,-1,}",
{ .name_off = 0, .val = -1,});
/* empty struct should be printed */
TEST_BTF(str, struct btf_enum, 0, "(struct btf_enum){}",
{ .name_off = 0, .val = 0,});
TEST_BTF(str, struct btf_enum, BTF_F_NONAME, "{}",
{ .name_off = 0, .val = 0,});
TEST_BTF(str, struct btf_enum, BTF_F_ZERO,
"(struct btf_enum){.name_off = (__u32)0,.val = (__s32)0,}",
{ .name_off = 0, .val = 0,});
/* struct with pointers */
TEST_BTF(str, struct list_head, BTF_F_PTR_RAW,
"(struct list_head){.next = (struct list_head *)0x0000000000000001,}",
{ .next = (struct list_head *)1 });
/* NULL pointer should not be displayed */
TEST_BTF(str, struct list_head, BTF_F_PTR_RAW,
"(struct list_head){}",
{ .next = (struct list_head *)0 });
/* struct with char array */
TEST_BTF(str, struct bpf_prog_info, 0,
"(struct bpf_prog_info){.name = (char[])['f','o','o',],}",
{ .name = "foo",});
TEST_BTF(str, struct bpf_prog_info, BTF_F_NONAME,
"{['f','o','o',],}",
{.name = "foo",});
/* leading null char means do not display string */
TEST_BTF(str, struct bpf_prog_info, 0,
"(struct bpf_prog_info){}",
{.name = {'\0', 'f', 'o', 'o'}});
/* handle non-printable characters */
TEST_BTF(str, struct bpf_prog_info, 0,
"(struct bpf_prog_info){.name = (char[])[1,2,3,],}",
{ .name = {1, 2, 3, 0}});
/* struct with non-char array */
TEST_BTF(str, struct __sk_buff, 0,
"(struct __sk_buff){.cb = (__u32[])[1,2,3,4,5,],}",
{ .cb = {1, 2, 3, 4, 5,},});
TEST_BTF(str, struct __sk_buff, BTF_F_NONAME,
"{[1,2,3,4,5,],}",
{ .cb = { 1, 2, 3, 4, 5},});
/* For non-char, arrays, show non-zero values only */
TEST_BTF(str, struct __sk_buff, 0,
"(struct __sk_buff){.cb = (__u32[])[1,],}",
{ .cb = { 0, 0, 1, 0, 0},});
/* struct with bitfields */
TEST_BTF_C(str, struct bpf_insn, 0,
{.code = (__u8)1,.dst_reg = (__u8)0x2,.src_reg = (__u8)0x3,.off = (__s16)4,.imm = (__s32)5,});
TEST_BTF(str, struct bpf_insn, BTF_F_NONAME, "{1,0x2,0x3,4,5,}",
{.code = 1, .dst_reg = 0x2, .src_reg = 0x3, .off = 4,
.imm = 5,});
#else
skip = true;
#endif
return 0;
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/netif_receive_skb.c |
// SPDX-License-Identifier: GPL-2.0
#include <vmlinux.h>
#include <bpf/bpf_tracing.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_core_read.h>
#include "bpf_experimental.h"
#include "bpf_misc.h"
struct node_acquire {
long key;
long data;
struct bpf_rb_node node;
struct bpf_refcount refcount;
};
extern void bpf_rcu_read_lock(void) __ksym;
extern void bpf_rcu_read_unlock(void) __ksym;
#define private(name) SEC(".data." #name) __hidden __attribute__((aligned(8)))
private(A) struct bpf_spin_lock glock;
private(A) struct bpf_rb_root groot __contains(node_acquire, node);
static bool less(struct bpf_rb_node *a, const struct bpf_rb_node *b)
{
struct node_acquire *node_a;
struct node_acquire *node_b;
node_a = container_of(a, struct node_acquire, node);
node_b = container_of(b, struct node_acquire, node);
return node_a->key < node_b->key;
}
SEC("?tc")
__failure __msg("Unreleased reference id=4 alloc_insn=21")
long rbtree_refcounted_node_ref_escapes(void *ctx)
{
struct node_acquire *n, *m;
n = bpf_obj_new(typeof(*n));
if (!n)
return 1;
bpf_spin_lock(&glock);
bpf_rbtree_add(&groot, &n->node, less);
/* m becomes an owning ref but is never drop'd or added to a tree */
m = bpf_refcount_acquire(n);
bpf_spin_unlock(&glock);
if (!m)
return 2;
m->key = 2;
return 0;
}
SEC("?tc")
__failure __msg("Unreleased reference id=3 alloc_insn=9")
long rbtree_refcounted_node_ref_escapes_owning_input(void *ctx)
{
struct node_acquire *n, *m;
n = bpf_obj_new(typeof(*n));
if (!n)
return 1;
/* m becomes an owning ref but is never drop'd or added to a tree */
m = bpf_refcount_acquire(n);
m->key = 2;
bpf_spin_lock(&glock);
bpf_rbtree_add(&groot, &n->node, less);
bpf_spin_unlock(&glock);
return 0;
}
SEC("?fentry.s/bpf_testmod_test_read")
__failure __msg("function calls are not allowed while holding a lock")
int BPF_PROG(rbtree_fail_sleepable_lock_across_rcu,
struct file *file, struct kobject *kobj,
struct bin_attribute *bin_attr, char *buf, loff_t off, size_t len)
{
struct node_acquire *n;
n = bpf_obj_new(typeof(*n));
if (!n)
return 0;
/* spin_{lock,unlock} are in different RCU CS */
bpf_rcu_read_lock();
bpf_spin_lock(&glock);
bpf_rbtree_add(&groot, &n->node, less);
bpf_rcu_read_unlock();
bpf_rcu_read_lock();
bpf_spin_unlock(&glock);
bpf_rcu_read_unlock();
return 0;
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/refcounted_kptr_fail.c |
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2019 Facebook
#define STACK_MAX_LEN 180
#include "pyperf.h"
| linux-master | tools/testing/selftests/bpf/progs/pyperf180.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_endian.h>
volatile __u64 test_get_constant = 0;
SEC("freplace/get_constant")
int security_new_get_constant(long val)
{
if (val != 123)
return 0;
test_get_constant = 1;
return test_get_constant; /* original get_constant() returns val - 122 */
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/freplace_get_constant.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
#include <bpf/bpf_core_read.h>
struct {
__uint(type, BPF_MAP_TYPE_TASK_STORAGE);
__uint(map_flags, BPF_F_NO_PREALLOC);
__type(key, int);
__type(value, long);
} enter_id SEC(".maps");
#define IFNAMSIZ 16
int ifindex, ingress_ifindex;
char name[IFNAMSIZ];
unsigned int inum;
unsigned int meta_len, frag0_len, kskb_len, kskb2_len;
void *bpf_cast_to_kern_ctx(void *) __ksym;
void *bpf_rdonly_cast(void *, __u32) __ksym;
SEC("?xdp")
int md_xdp(struct xdp_md *ctx)
{
struct xdp_buff *kctx = bpf_cast_to_kern_ctx(ctx);
struct net_device *dev;
dev = kctx->rxq->dev;
ifindex = dev->ifindex;
inum = dev->nd_net.net->ns.inum;
__builtin_memcpy(name, dev->name, IFNAMSIZ);
ingress_ifindex = ctx->ingress_ifindex;
return XDP_PASS;
}
SEC("?tc")
int md_skb(struct __sk_buff *skb)
{
struct sk_buff *kskb = bpf_cast_to_kern_ctx(skb);
struct skb_shared_info *shared_info;
struct sk_buff *kskb2;
kskb_len = kskb->len;
/* Simulate the following kernel macro:
* #define skb_shinfo(SKB) ((struct skb_shared_info *)(skb_end_pointer(SKB)))
*/
shared_info = bpf_rdonly_cast(kskb->head + kskb->end,
bpf_core_type_id_kernel(struct skb_shared_info));
meta_len = shared_info->meta_len;
frag0_len = shared_info->frag_list->len;
/* kskb2 should be equal to kskb */
kskb2 = bpf_rdonly_cast(kskb, bpf_core_type_id_kernel(struct sk_buff));
kskb2_len = kskb2->len;
return 0;
}
SEC("?tp_btf/sys_enter")
int BPF_PROG(untrusted_ptr, struct pt_regs *regs, long id)
{
struct task_struct *task, *task_dup;
task = bpf_get_current_task_btf();
task_dup = bpf_rdonly_cast(task, bpf_core_type_id_kernel(struct task_struct));
(void)bpf_task_storage_get(&enter_id, task_dup, 0, 0);
return 0;
}
SEC("?tracepoint/syscalls/sys_enter_nanosleep")
int kctx_u64(void *ctx)
{
u64 *kctx = bpf_rdonly_cast(ctx, bpf_core_type_id_kernel(u64));
(void)kctx;
return 0;
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/type_cast.c |
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2017 Facebook
#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
#include <bpf/bpf_core_read.h>
#include "bpf_misc.h"
int kprobe_res = 0;
int kretprobe_res = 0;
int uprobe_res = 0;
int uretprobe_res = 0;
int uprobe_byname_res = 0;
void *user_ptr = 0;
SEC("kprobe")
int handle_kprobe(struct pt_regs *ctx)
{
kprobe_res = 1;
return 0;
}
SEC("kretprobe")
int handle_kretprobe(struct pt_regs *ctx)
{
kretprobe_res = 2;
return 0;
}
SEC("uprobe")
int handle_uprobe(struct pt_regs *ctx)
{
uprobe_res = 3;
return 0;
}
SEC("uretprobe")
int handle_uretprobe(struct pt_regs *ctx)
{
uretprobe_res = 4;
return 0;
}
SEC("uprobe")
int handle_uprobe_byname(struct pt_regs *ctx)
{
uprobe_byname_res = 5;
return 0;
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/test_attach_probe_manual.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020 Facebook */
#include "bpf_iter.h"
#include <bpf/bpf_helpers.h>
char _license[] SEC("license") = "GPL";
int count = 0;
int tgid = 0;
int last_tgid = 0;
int unique_tgid_count = 0;
SEC("iter/task_file")
int dump_task_file(struct bpf_iter__task_file *ctx)
{
struct seq_file *seq = ctx->meta->seq;
struct task_struct *task = ctx->task;
struct file *file = ctx->file;
__u32 fd = ctx->fd;
if (task == (void *)0 || file == (void *)0)
return 0;
if (ctx->meta->seq_num == 0) {
count = 0;
BPF_SEQ_PRINTF(seq, " tgid gid fd file\n");
}
if (tgid == task->tgid && task->tgid != task->pid)
count++;
if (last_tgid != task->tgid) {
last_tgid = task->tgid;
unique_tgid_count++;
}
BPF_SEQ_PRINTF(seq, "%8d %8d %8d %lx\n", task->tgid, task->pid, fd,
(long)file->f_op);
return 0;
}
| linux-master | tools/testing/selftests/bpf/progs/bpf_iter_task_file.c |
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2019 Facebook
#include <linux/bpf.h>
#include <stdbool.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_endian.h>
#include <bpf/bpf_tracing.h>
char _license[] SEC("license") = "GPL";
struct {
__uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY);
__type(key, int);
__type(value, int);
} perf_buf_map SEC(".maps");
#define _(P) (__builtin_preserve_access_index(P))
/* define few struct-s that bpf program needs to access */
struct callback_head {
struct callback_head *next;
void (*func)(struct callback_head *head);
};
struct dev_ifalias {
struct callback_head rcuhead;
};
struct net_device /* same as kernel's struct net_device */ {
int ifindex;
struct dev_ifalias *ifalias;
};
typedef struct {
int counter;
} atomic_t;
typedef struct refcount_struct {
atomic_t refs;
} refcount_t;
struct sk_buff {
/* field names and sizes should match to those in the kernel */
unsigned int len, data_len;
__u16 mac_len, hdr_len, queue_mapping;
struct net_device *dev;
/* order of the fields doesn't matter */
refcount_t users;
unsigned char *data;
char __pkt_type_offset[0];
char cb[48];
};
struct meta {
int ifindex;
__u32 cb32_0;
__u8 cb8_0;
};
/* TRACE_EVENT(kfree_skb,
* TP_PROTO(struct sk_buff *skb, void *location),
*/
SEC("tp_btf/kfree_skb")
int BPF_PROG(trace_kfree_skb, struct sk_buff *skb, void *location)
{
struct net_device *dev;
struct callback_head *ptr;
void *func;
int users;
unsigned char *data;
unsigned short pkt_data;
struct meta meta = {};
char pkt_type;
__u32 *cb32;
__u8 *cb8;
__builtin_preserve_access_index(({
users = skb->users.refs.counter;
data = skb->data;
dev = skb->dev;
ptr = dev->ifalias->rcuhead.next;
func = ptr->func;
cb8 = (__u8 *)&skb->cb;
cb32 = (__u32 *)&skb->cb;
}));
meta.ifindex = _(dev->ifindex);
meta.cb8_0 = cb8[8];
meta.cb32_0 = cb32[2];
bpf_probe_read_kernel(&pkt_type, sizeof(pkt_type), _(&skb->__pkt_type_offset));
pkt_type &= 7;
/* read eth proto */
bpf_probe_read_kernel(&pkt_data, sizeof(pkt_data), data + 12);
bpf_printk("rcuhead.next %llx func %llx\n", ptr, func);
bpf_printk("skb->len %d users %d pkt_type %x\n",
_(skb->len), users, pkt_type);
bpf_printk("skb->queue_mapping %d\n", _(skb->queue_mapping));
bpf_printk("dev->ifindex %d data %llx pkt_data %x\n",
meta.ifindex, data, pkt_data);
bpf_printk("cb8_0:%x cb32_0:%x\n", meta.cb8_0, meta.cb32_0);
if (users != 1 || pkt_data != bpf_htons(0x86dd) || meta.ifindex != 1)
/* raw tp ignores return value */
return 0;
/* send first 72 byte of the packet to user space */
bpf_skb_output(skb, &perf_buf_map, (72ull << 32) | BPF_F_CURRENT_CPU,
&meta, sizeof(meta));
return 0;
}
struct {
bool fentry_test_ok;
bool fexit_test_ok;
} result = {};
SEC("fentry/eth_type_trans")
int BPF_PROG(fentry_eth_type_trans, struct sk_buff *skb, struct net_device *dev,
unsigned short protocol)
{
int len, ifindex;
__builtin_preserve_access_index(({
len = skb->len;
ifindex = dev->ifindex;
}));
/* fentry sees full packet including L2 header */
if (len != 74 || ifindex != 1)
return 0;
result.fentry_test_ok = true;
return 0;
}
SEC("fexit/eth_type_trans")
int BPF_PROG(fexit_eth_type_trans, struct sk_buff *skb, struct net_device *dev,
unsigned short protocol)
{
int len, ifindex;
__builtin_preserve_access_index(({
len = skb->len;
ifindex = dev->ifindex;
}));
/* fexit sees packet without L2 header that eth_type_trans should have
* consumed.
*/
if (len != 60 || protocol != bpf_htons(0x86dd) || ifindex != 1)
return 0;
result.fexit_test_ok = true;
return 0;
}
| linux-master | tools/testing/selftests/bpf/progs/kfree_skb.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
struct {
__uint(type, BPF_MAP_TYPE_CPUMAP);
__type(key, __u32);
__type(value, struct bpf_cpumap_val);
__uint(max_entries, 1);
} cpu_map SEC(".maps");
SEC("xdp/cpumap")
int xdp_drop_prog(struct xdp_md *ctx)
{
return XDP_DROP;
}
SEC("freplace")
int xdp_cpumap_prog(struct xdp_md *ctx)
{
return bpf_redirect_map(&cpu_map, 0, XDP_PASS);
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/freplace_progmap.c |
#define SUBPROGS
#include "test_cls_redirect.c"
| linux-master | tools/testing/selftests/bpf/progs/test_cls_redirect_subprogs.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
char _license[] SEC("license") = "GPL";
SEC("kprobe.multi/")
int test_kprobe_empty(struct pt_regs *ctx)
{
return 0;
}
| linux-master | tools/testing/selftests/bpf/progs/kprobe_multi_empty.c |
#include "core_reloc_types.h"
void f1(struct core_reloc_nesting___dup_compat_types x) {}
void f2(struct core_reloc_nesting___dup_compat_types__2 x) {}
void f3(struct core_reloc_nesting___dup_compat_types__3 x) {}
| linux-master | tools/testing/selftests/bpf/progs/btf__core_reloc_nesting___dup_compat_types.c |
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2019 Facebook
#include <linux/bpf.h>
#include <stdint.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_core_read.h>
char _license[] SEC("license") = "GPL";
struct {
char in[256];
char out[256];
} data = {};
struct core_reloc_flavors {
int a;
int b;
int c;
};
/* local flavor with reversed layout */
struct core_reloc_flavors___reversed {
int c;
int b;
int a;
};
/* local flavor with nested/overlapping layout */
struct core_reloc_flavors___weird {
struct {
int b;
};
/* a and c overlap in local flavor, but this should still work
* correctly with target original flavor
*/
union {
int a;
int c;
};
};
#define CORE_READ(dst, src) bpf_core_read(dst, sizeof(*(dst)), src)
SEC("raw_tracepoint/sys_enter")
int test_core_flavors(void *ctx)
{
struct core_reloc_flavors *in_orig = (void *)&data.in;
struct core_reloc_flavors___reversed *in_rev = (void *)&data.in;
struct core_reloc_flavors___weird *in_weird = (void *)&data.in;
struct core_reloc_flavors *out = (void *)&data.out;
/* read a using weird layout */
if (CORE_READ(&out->a, &in_weird->a))
return 1;
/* read b using reversed layout */
if (CORE_READ(&out->b, &in_rev->b))
return 1;
/* read c using original layout */
if (CORE_READ(&out->c, &in_orig->c))
return 1;
return 0;
}
| linux-master | tools/testing/selftests/bpf/progs/test_core_reloc_flavors.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
#include "bpf_misc.h"
struct {
__uint(type, BPF_MAP_TYPE_TASK_STORAGE);
__uint(map_flags, BPF_F_NO_PREALLOC);
__type(key, int);
__type(value, int);
} task_storage SEC(".maps");
long hits;
long gp_hits;
long gp_times;
long current_gp_start;
long unexpected;
bool postgp_seen;
SEC("fentry/" SYS_PREFIX "sys_getpgid")
int get_local(void *ctx)
{
struct task_struct *task;
int idx;
int *s;
idx = 0;
task = bpf_get_current_task_btf();
s = bpf_task_storage_get(&task_storage, task, &idx,
BPF_LOCAL_STORAGE_GET_F_CREATE);
if (!s)
return 0;
*s = 3;
bpf_task_storage_delete(&task_storage, task);
__sync_add_and_fetch(&hits, 1);
return 0;
}
SEC("fentry/rcu_tasks_trace_pregp_step")
int pregp_step(struct pt_regs *ctx)
{
current_gp_start = bpf_ktime_get_ns();
return 0;
}
SEC("fentry/rcu_tasks_trace_postgp")
int postgp(struct pt_regs *ctx)
{
if (!current_gp_start && postgp_seen) {
/* Will only happen if prog tracing rcu_tasks_trace_pregp_step doesn't
* execute before this prog
*/
__sync_add_and_fetch(&unexpected, 1);
return 0;
}
__sync_add_and_fetch(&gp_times, bpf_ktime_get_ns() - current_gp_start);
__sync_add_and_fetch(&gp_hits, 1);
current_gp_start = 0;
postgp_seen = true;
return 0;
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/local_storage_rcu_tasks_trace_bench.c |
#include "core_reloc_types.h"
void f(struct core_reloc_bitfields___err_too_big_bitfield x) {}
| linux-master | tools/testing/selftests/bpf/progs/btf__core_reloc_bitfields___err_too_big_bitfield.c |
// SPDX-License-Identifier: GPL-2.0-only
#include <sys/socket.h>
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
int invocations = 0, in_use = 0;
struct {
__uint(type, BPF_MAP_TYPE_SK_STORAGE);
__uint(map_flags, BPF_F_NO_PREALLOC);
__type(key, int);
__type(value, int);
} sk_map SEC(".maps");
SEC("cgroup/sock_create")
int sock(struct bpf_sock *ctx)
{
int *sk_storage;
if (ctx->type != SOCK_DGRAM)
return 1;
sk_storage = bpf_sk_storage_get(&sk_map, ctx, 0,
BPF_SK_STORAGE_GET_F_CREATE);
if (!sk_storage)
return 0;
*sk_storage = 0xdeadbeef;
__sync_fetch_and_add(&invocations, 1);
if (in_use > 0) {
/* BPF_CGROUP_INET_SOCK_RELEASE is _not_ called
* when we return an error from the BPF
* program!
*/
return 0;
}
__sync_fetch_and_add(&in_use, 1);
return 1;
}
SEC("cgroup/sock_release")
int sock_release(struct bpf_sock *ctx)
{
int *sk_storage;
if (ctx->type != SOCK_DGRAM)
return 1;
sk_storage = bpf_sk_storage_get(&sk_map, ctx, 0, 0);
if (!sk_storage || *sk_storage != 0xdeadbeef)
return 0;
__sync_fetch_and_add(&invocations, 1);
__sync_fetch_and_add(&in_use, -1);
return 1;
}
| linux-master | tools/testing/selftests/bpf/progs/udp_limit.c |
// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
// Copyright (c) 2019 Facebook
#define STROBE_MAX_INTS 2
#define STROBE_MAX_STRS 25
#define STROBE_MAX_MAPS 13
#define STROBE_MAX_MAP_ENTRIES 20
#define NO_UNROLL
#include "strobemeta.h"
| linux-master | tools/testing/selftests/bpf/progs/strobemeta_nounroll1.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
int val = 0;
SEC("fentry/test_1")
int BPF_PROG(fentry_test_1, __u64 *st_ops_ctx)
{
__u64 state;
/* Read the traced st_ops arg1 which is a pointer */
bpf_probe_read_kernel(&state, sizeof(__u64), (void *)st_ops_ctx);
/* Read state->val */
bpf_probe_read_kernel(&val, sizeof(__u32), (void *)state);
return 0;
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/trace_dummy_st_ops.c |
// SPDX-License-Identifier: GPL-2.0
/* Converted from tools/testing/selftests/bpf/verifier/stack_ptr.c */
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include <limits.h>
#include "bpf_misc.h"
#define MAX_ENTRIES 11
struct test_val {
unsigned int index;
int foo[MAX_ENTRIES];
};
struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
__uint(max_entries, 1);
__type(key, int);
__type(value, struct test_val);
} map_array_48b SEC(".maps");
SEC("socket")
__description("PTR_TO_STACK store/load")
__success __success_unpriv __retval(0xfaceb00c)
__naked void ptr_to_stack_store_load(void)
{
asm volatile (" \
r1 = r10; \
r1 += -10; \
r0 = 0xfaceb00c; \
*(u64*)(r1 + 2) = r0; \
r0 = *(u64*)(r1 + 2); \
exit; \
" ::: __clobber_all);
}
SEC("socket")
__description("PTR_TO_STACK store/load - bad alignment on off")
__failure __msg("misaligned stack access off (0x0; 0x0)+-8+2 size 8")
__failure_unpriv
__naked void load_bad_alignment_on_off(void)
{
asm volatile (" \
r1 = r10; \
r1 += -8; \
r0 = 0xfaceb00c; \
*(u64*)(r1 + 2) = r0; \
r0 = *(u64*)(r1 + 2); \
exit; \
" ::: __clobber_all);
}
SEC("socket")
__description("PTR_TO_STACK store/load - bad alignment on reg")
__failure __msg("misaligned stack access off (0x0; 0x0)+-10+8 size 8")
__failure_unpriv
__naked void load_bad_alignment_on_reg(void)
{
asm volatile (" \
r1 = r10; \
r1 += -10; \
r0 = 0xfaceb00c; \
*(u64*)(r1 + 8) = r0; \
r0 = *(u64*)(r1 + 8); \
exit; \
" ::: __clobber_all);
}
SEC("socket")
__description("PTR_TO_STACK store/load - out of bounds low")
__failure __msg("invalid write to stack R1 off=-79992 size=8")
__msg_unpriv("R1 stack pointer arithmetic goes out of range")
__naked void load_out_of_bounds_low(void)
{
asm volatile (" \
r1 = r10; \
r1 += -80000; \
r0 = 0xfaceb00c; \
*(u64*)(r1 + 8) = r0; \
r0 = *(u64*)(r1 + 8); \
exit; \
" ::: __clobber_all);
}
SEC("socket")
__description("PTR_TO_STACK store/load - out of bounds high")
__failure __msg("invalid write to stack R1 off=0 size=8")
__failure_unpriv
__naked void load_out_of_bounds_high(void)
{
asm volatile (" \
r1 = r10; \
r1 += -8; \
r0 = 0xfaceb00c; \
*(u64*)(r1 + 8) = r0; \
r0 = *(u64*)(r1 + 8); \
exit; \
" ::: __clobber_all);
}
SEC("socket")
__description("PTR_TO_STACK check high 1")
__success __success_unpriv __retval(42)
__naked void to_stack_check_high_1(void)
{
asm volatile (" \
r1 = r10; \
r1 += -1; \
r0 = 42; \
*(u8*)(r1 + 0) = r0; \
r0 = *(u8*)(r1 + 0); \
exit; \
" ::: __clobber_all);
}
SEC("socket")
__description("PTR_TO_STACK check high 2")
__success __success_unpriv __retval(42)
__naked void to_stack_check_high_2(void)
{
asm volatile (" \
r1 = r10; \
r0 = 42; \
*(u8*)(r1 - 1) = r0; \
r0 = *(u8*)(r1 - 1); \
exit; \
" ::: __clobber_all);
}
SEC("socket")
__description("PTR_TO_STACK check high 3")
__success __failure_unpriv
__msg_unpriv("R1 stack pointer arithmetic goes out of range")
__retval(42)
__naked void to_stack_check_high_3(void)
{
asm volatile (" \
r1 = r10; \
r1 += 0; \
r0 = 42; \
*(u8*)(r1 - 1) = r0; \
r0 = *(u8*)(r1 - 1); \
exit; \
" ::: __clobber_all);
}
SEC("socket")
__description("PTR_TO_STACK check high 4")
__failure __msg("invalid write to stack R1 off=0 size=1")
__msg_unpriv("R1 stack pointer arithmetic goes out of range")
__naked void to_stack_check_high_4(void)
{
asm volatile (" \
r1 = r10; \
r1 += 0; \
r0 = 42; \
*(u8*)(r1 + 0) = r0; \
r0 = *(u8*)(r1 + 0); \
exit; \
" ::: __clobber_all);
}
SEC("socket")
__description("PTR_TO_STACK check high 5")
__failure __msg("invalid write to stack R1")
__msg_unpriv("R1 stack pointer arithmetic goes out of range")
__naked void to_stack_check_high_5(void)
{
asm volatile (" \
r1 = r10; \
r1 += %[__imm_0]; \
r0 = 42; \
*(u8*)(r1 + 0) = r0; \
r0 = *(u8*)(r1 + 0); \
exit; \
" :
: __imm_const(__imm_0, (1 << 29) - 1)
: __clobber_all);
}
SEC("socket")
__description("PTR_TO_STACK check high 6")
__failure __msg("invalid write to stack")
__msg_unpriv("R1 stack pointer arithmetic goes out of range")
__naked void to_stack_check_high_6(void)
{
asm volatile (" \
r1 = r10; \
r1 += %[__imm_0]; \
r0 = 42; \
*(u8*)(r1 + %[shrt_max]) = r0; \
r0 = *(u8*)(r1 + %[shrt_max]); \
exit; \
" :
: __imm_const(__imm_0, (1 << 29) - 1),
__imm_const(shrt_max, SHRT_MAX)
: __clobber_all);
}
SEC("socket")
__description("PTR_TO_STACK check high 7")
__failure __msg("fp pointer offset")
__msg_unpriv("R1 stack pointer arithmetic goes out of range")
__naked void to_stack_check_high_7(void)
{
asm volatile (" \
r1 = r10; \
r1 += %[__imm_0]; \
r1 += %[__imm_0]; \
r0 = 42; \
*(u8*)(r1 + %[shrt_max]) = r0; \
r0 = *(u8*)(r1 + %[shrt_max]); \
exit; \
" :
: __imm_const(__imm_0, (1 << 29) - 1),
__imm_const(shrt_max, SHRT_MAX)
: __clobber_all);
}
SEC("socket")
__description("PTR_TO_STACK check low 1")
__success __success_unpriv __retval(42)
__naked void to_stack_check_low_1(void)
{
asm volatile (" \
r1 = r10; \
r1 += -512; \
r0 = 42; \
*(u8*)(r1 + 0) = r0; \
r0 = *(u8*)(r1 + 0); \
exit; \
" ::: __clobber_all);
}
SEC("socket")
__description("PTR_TO_STACK check low 2")
__success __failure_unpriv
__msg_unpriv("R1 stack pointer arithmetic goes out of range")
__retval(42)
__naked void to_stack_check_low_2(void)
{
asm volatile (" \
r1 = r10; \
r1 += -513; \
r0 = 42; \
*(u8*)(r1 + 1) = r0; \
r0 = *(u8*)(r1 + 1); \
exit; \
" ::: __clobber_all);
}
SEC("socket")
__description("PTR_TO_STACK check low 3")
__failure __msg("invalid write to stack R1 off=-513 size=1")
__msg_unpriv("R1 stack pointer arithmetic goes out of range")
__naked void to_stack_check_low_3(void)
{
asm volatile (" \
r1 = r10; \
r1 += -513; \
r0 = 42; \
*(u8*)(r1 + 0) = r0; \
r0 = *(u8*)(r1 + 0); \
exit; \
" ::: __clobber_all);
}
SEC("socket")
__description("PTR_TO_STACK check low 4")
__failure __msg("math between fp pointer")
__failure_unpriv
__naked void to_stack_check_low_4(void)
{
asm volatile (" \
r1 = r10; \
r1 += %[int_min]; \
r0 = 42; \
*(u8*)(r1 + 0) = r0; \
r0 = *(u8*)(r1 + 0); \
exit; \
" :
: __imm_const(int_min, INT_MIN)
: __clobber_all);
}
SEC("socket")
__description("PTR_TO_STACK check low 5")
__failure __msg("invalid write to stack")
__msg_unpriv("R1 stack pointer arithmetic goes out of range")
__naked void to_stack_check_low_5(void)
{
asm volatile (" \
r1 = r10; \
r1 += %[__imm_0]; \
r0 = 42; \
*(u8*)(r1 + 0) = r0; \
r0 = *(u8*)(r1 + 0); \
exit; \
" :
: __imm_const(__imm_0, -((1 << 29) - 1))
: __clobber_all);
}
SEC("socket")
__description("PTR_TO_STACK check low 6")
__failure __msg("invalid write to stack")
__msg_unpriv("R1 stack pointer arithmetic goes out of range")
__naked void to_stack_check_low_6(void)
{
asm volatile (" \
r1 = r10; \
r1 += %[__imm_0]; \
r0 = 42; \
*(u8*)(r1 %[shrt_min]) = r0; \
r0 = *(u8*)(r1 %[shrt_min]); \
exit; \
" :
: __imm_const(__imm_0, -((1 << 29) - 1)),
__imm_const(shrt_min, SHRT_MIN)
: __clobber_all);
}
SEC("socket")
__description("PTR_TO_STACK check low 7")
__failure __msg("fp pointer offset")
__msg_unpriv("R1 stack pointer arithmetic goes out of range")
__naked void to_stack_check_low_7(void)
{
asm volatile (" \
r1 = r10; \
r1 += %[__imm_0]; \
r1 += %[__imm_0]; \
r0 = 42; \
*(u8*)(r1 %[shrt_min]) = r0; \
r0 = *(u8*)(r1 %[shrt_min]); \
exit; \
" :
: __imm_const(__imm_0, -((1 << 29) - 1)),
__imm_const(shrt_min, SHRT_MIN)
: __clobber_all);
}
SEC("socket")
__description("PTR_TO_STACK mixed reg/k, 1")
__success __success_unpriv __retval(42)
__naked void stack_mixed_reg_k_1(void)
{
asm volatile (" \
r1 = r10; \
r1 += -3; \
r2 = -3; \
r1 += r2; \
r0 = 42; \
*(u8*)(r1 + 0) = r0; \
r0 = *(u8*)(r1 + 0); \
exit; \
" ::: __clobber_all);
}
SEC("socket")
__description("PTR_TO_STACK mixed reg/k, 2")
__success __success_unpriv __retval(42)
__naked void stack_mixed_reg_k_2(void)
{
asm volatile (" \
r0 = 0; \
*(u64*)(r10 - 8) = r0; \
r0 = 0; \
*(u64*)(r10 - 16) = r0; \
r1 = r10; \
r1 += -3; \
r2 = -3; \
r1 += r2; \
r0 = 42; \
*(u8*)(r1 + 0) = r0; \
r5 = r10; \
r0 = *(u8*)(r5 - 6); \
exit; \
" ::: __clobber_all);
}
SEC("socket")
__description("PTR_TO_STACK mixed reg/k, 3")
__success __success_unpriv __retval(-3)
__naked void stack_mixed_reg_k_3(void)
{
asm volatile (" \
r1 = r10; \
r1 += -3; \
r2 = -3; \
r1 += r2; \
r0 = 42; \
*(u8*)(r1 + 0) = r0; \
r0 = r2; \
exit; \
" ::: __clobber_all);
}
SEC("socket")
__description("PTR_TO_STACK reg")
__success __success_unpriv __retval(42)
__naked void ptr_to_stack_reg(void)
{
asm volatile (" \
r1 = r10; \
r2 = -3; \
r1 += r2; \
r0 = 42; \
*(u8*)(r1 + 0) = r0; \
r0 = *(u8*)(r1 + 0); \
exit; \
" ::: __clobber_all);
}
SEC("socket")
__description("stack pointer arithmetic")
__success __success_unpriv __retval(0)
__naked void stack_pointer_arithmetic(void)
{
asm volatile (" \
r1 = 4; \
goto l0_%=; \
l0_%=: r7 = r10; \
r7 += -10; \
r7 += -10; \
r2 = r7; \
r2 += r1; \
r0 = 0; \
*(u32*)(r2 + 4) = r0; \
r2 = r7; \
r2 += 8; \
r0 = 0; \
*(u32*)(r2 + 4) = r0; \
r0 = 0; \
exit; \
" ::: __clobber_all);
}
SEC("tc")
__description("store PTR_TO_STACK in R10 to array map using BPF_B")
__success __retval(42)
__naked void array_map_using_bpf_b(void)
{
asm volatile (" \
/* Load pointer to map. */ \
r2 = r10; \
r2 += -8; \
r1 = 0; \
*(u64*)(r2 + 0) = r1; \
r1 = %[map_array_48b] ll; \
call %[bpf_map_lookup_elem]; \
if r0 != 0 goto l0_%=; \
r0 = 2; \
exit; \
l0_%=: r1 = r0; \
/* Copy R10 to R9. */ \
r9 = r10; \
/* Pollute other registers with unaligned values. */\
r2 = -1; \
r3 = -1; \
r4 = -1; \
r5 = -1; \
r6 = -1; \
r7 = -1; \
r8 = -1; \
/* Store both R9 and R10 with BPF_B and read back. */\
*(u8*)(r1 + 0) = r10; \
r2 = *(u8*)(r1 + 0); \
*(u8*)(r1 + 0) = r9; \
r3 = *(u8*)(r1 + 0); \
/* Should read back as same value. */ \
if r2 == r3 goto l1_%=; \
r0 = 1; \
exit; \
l1_%=: r0 = 42; \
exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm_addr(map_array_48b)
: __clobber_all);
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/verifier_stack_ptr.c |
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2018 Facebook
#include <linux/stddef.h>
#include <linux/bpf.h>
#include <sys/socket.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_endian.h>
#include <bpf_sockopt_helpers.h>
#define SRC1_IP4 0xAC100001U /* 172.16.0.1 */
#define SRC2_IP4 0x00000000U
#define SRC_REWRITE_IP4 0x7f000004U
#define DST_IP4 0xC0A801FEU /* 192.168.1.254 */
#define DST_REWRITE_IP4 0x7f000001U
#define DST_PORT 4040
#define DST_REWRITE_PORT4 4444
SEC("cgroup/sendmsg4")
int sendmsg_v4_prog(struct bpf_sock_addr *ctx)
{
if (ctx->type != SOCK_DGRAM)
return 0;
if (!get_set_sk_priority(ctx))
return 0;
/* Rewrite source. */
if (ctx->msg_src_ip4 == bpf_htonl(SRC1_IP4) ||
ctx->msg_src_ip4 == bpf_htonl(SRC2_IP4)) {
ctx->msg_src_ip4 = bpf_htonl(SRC_REWRITE_IP4);
} else {
/* Unexpected source. Reject sendmsg. */
return 0;
}
/* Rewrite destination. */
if ((ctx->user_ip4 >> 24) == (bpf_htonl(DST_IP4) >> 24) &&
ctx->user_port == bpf_htons(DST_PORT)) {
ctx->user_ip4 = bpf_htonl(DST_REWRITE_IP4);
ctx->user_port = bpf_htons(DST_REWRITE_PORT4);
} else {
/* Unexpected source. Reject sendmsg. */
return 0;
}
return 1;
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/sendmsg4_prog.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
#include <linux/types.h>
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include "bpf_tracing_net.h"
struct bpf_fib_lookup fib_params = {};
int fib_lookup_ret = 0;
int lookup_flags = 0;
SEC("tc")
int fib_lookup(struct __sk_buff *skb)
{
fib_lookup_ret = bpf_fib_lookup(skb, &fib_params, sizeof(fib_params),
lookup_flags);
return TC_ACT_SHOT;
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/fib_lookup.c |
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2019 Facebook
#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
#define ATTR __always_inline
#include "test_jhash.h"
SEC("tc")
int balancer_ingress(struct __sk_buff *ctx)
{
void *data_end = (void *)(long)ctx->data_end;
void *data = (void *)(long)ctx->data;
void *ptr;
int nh_off, i = 0;
nh_off = 14;
/* pragma unroll doesn't work on large loops */
#define C do { \
ptr = data + i; \
if (ptr + nh_off > data_end) \
break; \
ctx->tc_index = jhash(ptr, nh_off, ctx->cb[0] + i++); \
} while (0);
#define C30 C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;
C30;C30;C30; /* 90 calls */
return 0;
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/test_verif_scale2.c |
// SPDX-License-Identifier: GPL-2.0
/* Converted from tools/testing/selftests/bpf/verifier/search_pruning.c */
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include "bpf_misc.h"
#define MAX_ENTRIES 11
struct test_val {
unsigned int index;
int foo[MAX_ENTRIES];
};
struct {
__uint(type, BPF_MAP_TYPE_HASH);
__uint(max_entries, 1);
__type(key, long long);
__type(value, struct test_val);
} map_hash_48b SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_HASH);
__uint(max_entries, 1);
__type(key, long long);
__type(value, long long);
} map_hash_8b SEC(".maps");
SEC("socket")
__description("pointer/scalar confusion in state equality check (way 1)")
__success __failure_unpriv __msg_unpriv("R0 leaks addr as return value")
__retval(POINTER_VALUE)
__naked void state_equality_check_way_1(void)
{
asm volatile (" \
r1 = 0; \
*(u64*)(r10 - 8) = r1; \
r2 = r10; \
r2 += -8; \
r1 = %[map_hash_8b] ll; \
call %[bpf_map_lookup_elem]; \
if r0 == 0 goto l0_%=; \
r0 = *(u64*)(r0 + 0); \
goto l1_%=; \
l0_%=: r0 = r10; \
l1_%=: goto l2_%=; \
l2_%=: exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm_addr(map_hash_8b)
: __clobber_all);
}
SEC("socket")
__description("pointer/scalar confusion in state equality check (way 2)")
__success __failure_unpriv __msg_unpriv("R0 leaks addr as return value")
__retval(POINTER_VALUE)
__naked void state_equality_check_way_2(void)
{
asm volatile (" \
r1 = 0; \
*(u64*)(r10 - 8) = r1; \
r2 = r10; \
r2 += -8; \
r1 = %[map_hash_8b] ll; \
call %[bpf_map_lookup_elem]; \
if r0 != 0 goto l0_%=; \
r0 = r10; \
goto l1_%=; \
l0_%=: r0 = *(u64*)(r0 + 0); \
l1_%=: exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm_addr(map_hash_8b)
: __clobber_all);
}
SEC("lwt_in")
__description("liveness pruning and write screening")
__failure __msg("R0 !read_ok")
__naked void liveness_pruning_and_write_screening(void)
{
asm volatile (" \
/* Get an unknown value */ \
r2 = *(u32*)(r1 + 0); \
/* branch conditions teach us nothing about R2 */\
if r2 >= 0 goto l0_%=; \
r0 = 0; \
l0_%=: if r2 >= 0 goto l1_%=; \
r0 = 0; \
l1_%=: exit; \
" ::: __clobber_all);
}
SEC("socket")
__description("varlen_map_value_access pruning")
__failure __msg("R0 unbounded memory access")
__failure_unpriv __msg_unpriv("R0 leaks addr")
__flag(BPF_F_ANY_ALIGNMENT)
__naked void varlen_map_value_access_pruning(void)
{
asm volatile (" \
r1 = 0; \
*(u64*)(r10 - 8) = r1; \
r2 = r10; \
r2 += -8; \
r1 = %[map_hash_48b] ll; \
call %[bpf_map_lookup_elem]; \
if r0 == 0 goto l0_%=; \
r1 = *(u64*)(r0 + 0); \
w2 = %[max_entries]; \
if r2 s> r1 goto l1_%=; \
w1 = 0; \
l1_%=: w1 <<= 2; \
r0 += r1; \
goto l2_%=; \
l2_%=: r1 = %[test_val_foo]; \
*(u64*)(r0 + 0) = r1; \
l0_%=: exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm_addr(map_hash_48b),
__imm_const(max_entries, MAX_ENTRIES),
__imm_const(test_val_foo, offsetof(struct test_val, foo))
: __clobber_all);
}
SEC("tracepoint")
__description("search pruning: all branches should be verified (nop operation)")
__failure __msg("R6 invalid mem access 'scalar'")
__naked void should_be_verified_nop_operation(void)
{
asm volatile (" \
r2 = r10; \
r2 += -8; \
r1 = 0; \
*(u64*)(r2 + 0) = r1; \
r1 = %[map_hash_8b] ll; \
call %[bpf_map_lookup_elem]; \
if r0 == 0 goto l0_%=; \
r3 = *(u64*)(r0 + 0); \
if r3 == 0xbeef goto l1_%=; \
r4 = 0; \
goto l2_%=; \
l1_%=: r4 = 1; \
l2_%=: *(u64*)(r10 - 16) = r4; \
call %[bpf_ktime_get_ns]; \
r5 = *(u64*)(r10 - 16); \
if r5 == 0 goto l0_%=; \
r6 = 0; \
r1 = 0xdead; \
*(u64*)(r6 + 0) = r1; \
l0_%=: exit; \
" :
: __imm(bpf_ktime_get_ns),
__imm(bpf_map_lookup_elem),
__imm_addr(map_hash_8b)
: __clobber_all);
}
SEC("socket")
__description("search pruning: all branches should be verified (invalid stack access)")
/* in privileged mode reads from uninitialized stack locations are permitted */
__success __failure_unpriv
__msg_unpriv("invalid read from stack off -16+0 size 8")
__retval(0)
__naked void be_verified_invalid_stack_access(void)
{
asm volatile (" \
r2 = r10; \
r2 += -8; \
r1 = 0; \
*(u64*)(r2 + 0) = r1; \
r1 = %[map_hash_8b] ll; \
call %[bpf_map_lookup_elem]; \
if r0 == 0 goto l0_%=; \
r3 = *(u64*)(r0 + 0); \
r4 = 0; \
if r3 == 0xbeef goto l1_%=; \
*(u64*)(r10 - 16) = r4; \
goto l2_%=; \
l1_%=: *(u64*)(r10 - 24) = r4; \
l2_%=: call %[bpf_ktime_get_ns]; \
r5 = *(u64*)(r10 - 16); \
l0_%=: exit; \
" :
: __imm(bpf_ktime_get_ns),
__imm(bpf_map_lookup_elem),
__imm_addr(map_hash_8b)
: __clobber_all);
}
SEC("tracepoint")
__description("precision tracking for u32 spill/fill")
__failure __msg("R0 min value is outside of the allowed memory range")
__naked void tracking_for_u32_spill_fill(void)
{
asm volatile (" \
r7 = r1; \
call %[bpf_get_prandom_u32]; \
w6 = 32; \
if r0 == 0 goto l0_%=; \
w6 = 4; \
l0_%=: /* Additional insns to introduce a pruning point. */\
call %[bpf_get_prandom_u32]; \
r3 = 0; \
r3 = 0; \
if r0 == 0 goto l1_%=; \
r3 = 0; \
l1_%=: /* u32 spill/fill */ \
*(u32*)(r10 - 8) = r6; \
r8 = *(u32*)(r10 - 8); \
/* out-of-bound map value access for r6=32 */ \
r1 = 0; \
*(u64*)(r10 - 16) = r1; \
r2 = r10; \
r2 += -16; \
r1 = %[map_hash_8b] ll; \
call %[bpf_map_lookup_elem]; \
if r0 == 0 goto l2_%=; \
r0 += r8; \
r1 = *(u32*)(r0 + 0); \
l2_%=: r0 = 0; \
exit; \
" :
: __imm(bpf_get_prandom_u32),
__imm(bpf_map_lookup_elem),
__imm_addr(map_hash_8b)
: __clobber_all);
}
SEC("tracepoint")
__description("precision tracking for u32 spills, u64 fill")
__failure __msg("div by zero")
__naked void for_u32_spills_u64_fill(void)
{
asm volatile (" \
call %[bpf_get_prandom_u32]; \
r6 = r0; \
w7 = 0xffffffff; \
/* Additional insns to introduce a pruning point. */\
r3 = 1; \
r3 = 1; \
r3 = 1; \
r3 = 1; \
call %[bpf_get_prandom_u32]; \
if r0 == 0 goto l0_%=; \
r3 = 1; \
l0_%=: w3 /= 0; \
/* u32 spills, u64 fill */ \
*(u32*)(r10 - 4) = r6; \
*(u32*)(r10 - 8) = r7; \
r8 = *(u64*)(r10 - 8); \
/* if r8 != X goto pc+1 r8 known in fallthrough branch */\
if r8 != 0xffffffff goto l1_%=; \
r3 = 1; \
l1_%=: /* if r8 == X goto pc+1 condition always true on first\
* traversal, so starts backtracking to mark r8 as requiring\
* precision. r7 marked as needing precision. r6 not marked\
* since it's not tracked. \
*/ \
if r8 == 0xffffffff goto l2_%=; \
/* fails if r8 correctly marked unknown after fill. */\
w3 /= 0; \
l2_%=: r0 = 0; \
exit; \
" :
: __imm(bpf_get_prandom_u32)
: __clobber_all);
}
SEC("socket")
__description("allocated_stack")
__success __msg("processed 15 insns")
__success_unpriv __msg_unpriv("") __log_level(1) __retval(0)
__naked void allocated_stack(void)
{
asm volatile (" \
r6 = r1; \
call %[bpf_get_prandom_u32]; \
r7 = r0; \
if r0 == 0 goto l0_%=; \
r0 = 0; \
*(u64*)(r10 - 8) = r6; \
r6 = *(u64*)(r10 - 8); \
*(u8*)(r10 - 9) = r7; \
r7 = *(u8*)(r10 - 9); \
l0_%=: if r0 != 0 goto l1_%=; \
l1_%=: if r0 != 0 goto l2_%=; \
l2_%=: if r0 != 0 goto l3_%=; \
l3_%=: if r0 != 0 goto l4_%=; \
l4_%=: exit; \
" :
: __imm(bpf_get_prandom_u32)
: __clobber_all);
}
/* The test performs a conditional 64-bit write to a stack location
* fp[-8], this is followed by an unconditional 8-bit write to fp[-8],
* then data is read from fp[-8]. This sequence is unsafe.
*
* The test would be mistakenly marked as safe w/o dst register parent
* preservation in verifier.c:copy_register_state() function.
*
* Note the usage of BPF_F_TEST_STATE_FREQ to force creation of the
* checkpoint state after conditional 64-bit assignment.
*/
SEC("socket")
__description("write tracking and register parent chain bug")
/* in privileged mode reads from uninitialized stack locations are permitted */
__success __failure_unpriv
__msg_unpriv("invalid read from stack off -8+1 size 8")
__retval(0) __flag(BPF_F_TEST_STATE_FREQ)
__naked void and_register_parent_chain_bug(void)
{
asm volatile (" \
/* r6 = ktime_get_ns() */ \
call %[bpf_ktime_get_ns]; \
r6 = r0; \
/* r0 = ktime_get_ns() */ \
call %[bpf_ktime_get_ns]; \
/* if r0 > r6 goto +1 */ \
if r0 > r6 goto l0_%=; \
/* *(u64 *)(r10 - 8) = 0xdeadbeef */ \
r0 = 0xdeadbeef; \
*(u64*)(r10 - 8) = r0; \
l0_%=: r1 = 42; \
*(u8*)(r10 - 8) = r1; \
r2 = *(u64*)(r10 - 8); \
/* exit(0) */ \
r0 = 0; \
exit; \
" :
: __imm(bpf_ktime_get_ns)
: __clobber_all);
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/verifier_search_pruning.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) 2023 Yafang Shao <[email protected]> */
#include "vmlinux.h"
#include <bpf/bpf_tracing.h>
#include <stdbool.h>
extern bool CONFIG_X86_KERNEL_IBT __kconfig __weak;
/* This function is here to have CONFIG_X86_KERNEL_IBT
* used and added to object BTF.
*/
int unused(void)
{
return CONFIG_X86_KERNEL_IBT ? 0 : 1;
}
SEC("kprobe")
int BPF_PROG(kprobe_run)
{
return 0;
}
SEC("uprobe")
int BPF_PROG(uprobe_run)
{
return 0;
}
SEC("tracepoint")
int BPF_PROG(tp_run)
{
return 0;
}
SEC("kprobe.multi")
int BPF_PROG(kmulti_run)
{
return 0;
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/test_fill_link_info.c |
// SPDX-License-Identifier: GPL-2.0
#include <vmlinux.h>
#include <bpf/bpf_tracing.h>
#include <bpf/bpf_helpers.h>
struct map_value {
struct task_struct __kptr_untrusted *ptr;
};
struct {
__uint(type, BPF_MAP_TYPE_LRU_HASH);
__uint(max_entries, 1);
__type(key, int);
__type(value, struct map_value);
} lru_map SEC(".maps");
int pid = 0;
int result = 1;
SEC("fentry/bpf_ktime_get_ns")
int printk(void *ctx)
{
struct map_value v = {};
if (pid == bpf_get_current_task_btf()->pid)
bpf_map_update_elem(&lru_map, &(int){0}, &v, 0);
return 0;
}
SEC("fentry/do_nanosleep")
int nanosleep(void *ctx)
{
struct map_value val = {}, *v;
struct task_struct *current;
bpf_map_update_elem(&lru_map, &(int){0}, &val, 0);
v = bpf_map_lookup_elem(&lru_map, &(int){0});
if (!v)
return 0;
bpf_map_delete_elem(&lru_map, &(int){0});
current = bpf_get_current_task_btf();
v->ptr = current;
pid = current->pid;
bpf_ktime_get_ns();
result = !v->ptr;
return 0;
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/lru_bug.c |
#include "core_reloc_types.h"
void f(struct core_reloc_arrays___err_wrong_val_type x) {}
| linux-master | tools/testing/selftests/bpf/progs/btf__core_reloc_arrays___err_wrong_val_type.c |
// SPDX-License-Identifier: GPL-2.0
#include <vmlinux.h>
#include <bpf/bpf_helpers.h>
#include "../bpf_testmod/bpf_testmod_kfunc.h"
SEC("tc")
int kfunc_destructive_test(void)
{
bpf_kfunc_call_test_destructive();
return 0;
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/kfunc_call_destructive.c |
// SPDX-License-Identifier: GPL-2.0
#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
char _license[] SEC("license") = "GPL";
unsigned long uprobe_trigger_body;
__u64 test1_result = 0;
SEC("uprobe//proc/self/exe:uprobe_trigger_body+1")
int BPF_UPROBE(test1)
{
__u64 addr = bpf_get_func_ip(ctx);
test1_result = (const void *) addr == (const void *) uprobe_trigger_body + 1;
return 0;
}
| linux-master | tools/testing/selftests/bpf/progs/get_func_ip_uprobe_test.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2022 Google */
#include "vmlinux.h"
#include <bpf/bpf_tracing.h>
bool prog1_called = false;
bool prog2_called = false;
SEC("raw_tp/sys_enter")
int prog1(const void *ctx)
{
prog1_called = true;
return 0;
}
SEC("raw_tp/sys_exit")
int prog2(const void *ctx)
{
prog2_called = true;
return 0;
}
| linux-master | tools/testing/selftests/bpf/progs/test_autoattach.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#define MAX_STACK_RAWTP 10
SEC("raw_tracepoint/sys_enter")
int bpf_prog2(void *ctx)
{
__u64 stack[MAX_STACK_RAWTP];
int error;
/* set all the flags which should return -EINVAL */
error = bpf_get_stack(ctx, stack, 0, -1);
if (error < 0)
goto loop;
return error;
loop:
while (1) {
error++;
}
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/test_get_stack_rawtp_err.c |
#include "core_reloc_types.h"
void f(struct core_reloc_existence___minimal x) {}
| linux-master | tools/testing/selftests/bpf/progs/btf__core_reloc_existence___minimal.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#define IFINDEX_LO 1
struct {
__uint(type, BPF_MAP_TYPE_CPUMAP);
__uint(key_size, sizeof(__u32));
__uint(value_size, sizeof(struct bpf_cpumap_val));
__uint(max_entries, 4);
} cpu_map SEC(".maps");
SEC("xdp/cpumap")
int xdp_dummy_cm(struct xdp_md *ctx)
{
return XDP_PASS;
}
SEC("xdp.frags/cpumap")
int xdp_dummy_cm_frags(struct xdp_md *ctx)
{
return XDP_PASS;
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/test_xdp_with_cpumap_frags_helpers.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2023, SUSE. */
#include "vmlinux.h"
#include <bpf/bpf_tracing.h>
#include "bpf_tracing_net.h"
char _license[] SEC("license") = "GPL";
SEC("fmod_ret/update_socket_protocol")
int BPF_PROG(mptcpify, int family, int type, int protocol)
{
if ((family == AF_INET || family == AF_INET6) &&
type == SOCK_STREAM &&
(!protocol || protocol == IPPROTO_TCP)) {
return IPPROTO_MPTCP;
}
return protocol;
}
| linux-master | tools/testing/selftests/bpf/progs/mptcpify.c |
// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
/* Copyright (c) 2021 Facebook */
#define STROBE_MAX_INTS 2
#define STROBE_MAX_STRS 25
#define STROBE_MAX_MAPS 100
#define STROBE_MAX_MAP_ENTRIES 20
#define USE_BPF_LOOP
#include "strobemeta.h"
| linux-master | tools/testing/selftests/bpf/progs/strobemeta_bpf_loop.c |
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2019 Facebook
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
char _license[] SEC("license") = "GPL";
SEC("socket")
int while_true(volatile struct __sk_buff* skb)
{
int i = 0;
while (1) {
if (skb->len)
i += 3;
else
i += 7;
if (i == 9)
break;
barrier();
if (i == 10)
break;
barrier();
if (i == 13)
break;
barrier();
if (i == 14)
break;
}
return i;
}
| linux-master | tools/testing/selftests/bpf/progs/loop5.c |
#include "core_reloc_types.h"
void f(struct core_reloc_primitives___err_non_ptr x) {}
| linux-master | tools/testing/selftests/bpf/progs/btf__core_reloc_primitives___err_non_ptr.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2019 Carlos Neira [email protected] */
#include <linux/bpf.h>
#include <stdint.h>
#include <bpf/bpf_helpers.h>
__u64 user_pid = 0;
__u64 user_tgid = 0;
__u64 dev = 0;
__u64 ino = 0;
SEC("tracepoint/syscalls/sys_enter_nanosleep")
int handler(const void *ctx)
{
struct bpf_pidns_info nsdata;
if (bpf_get_ns_current_pid_tgid(dev, ino, &nsdata, sizeof(struct bpf_pidns_info)))
return 0;
user_pid = nsdata.pid;
user_tgid = nsdata.tgid;
return 0;
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/test_ns_current_pid_tgid.c |
// SPDX-License-Identifier: GPL-2.0
#include <vmlinux.h>
#include <bpf/bpf_tracing.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_core_read.h>
#include "bpf_experimental.h"
#include "linked_list.h"
#define INIT \
struct map_value *v, *v2, *iv, *iv2; \
struct foo *f, *f1, *f2; \
struct bar *b; \
void *map; \
\
map = bpf_map_lookup_elem(&map_of_maps, &(int){ 0 }); \
if (!map) \
return 0; \
v = bpf_map_lookup_elem(&array_map, &(int){ 0 }); \
if (!v) \
return 0; \
v2 = bpf_map_lookup_elem(&array_map, &(int){ 0 }); \
if (!v2) \
return 0; \
iv = bpf_map_lookup_elem(map, &(int){ 0 }); \
if (!iv) \
return 0; \
iv2 = bpf_map_lookup_elem(map, &(int){ 0 }); \
if (!iv2) \
return 0; \
f = bpf_obj_new(typeof(*f)); \
if (!f) \
return 0; \
f1 = f; \
f2 = bpf_obj_new(typeof(*f2)); \
if (!f2) { \
bpf_obj_drop(f1); \
return 0; \
} \
b = bpf_obj_new(typeof(*b)); \
if (!b) { \
bpf_obj_drop(f2); \
bpf_obj_drop(f1); \
return 0; \
}
#define CHECK(test, op, hexpr) \
SEC("?tc") \
int test##_missing_lock_##op(void *ctx) \
{ \
INIT; \
void (*p)(void *) = (void *)&bpf_list_##op; \
p(hexpr); \
return 0; \
}
CHECK(kptr, pop_front, &f->head);
CHECK(kptr, pop_back, &f->head);
CHECK(global, pop_front, &ghead);
CHECK(global, pop_back, &ghead);
CHECK(map, pop_front, &v->head);
CHECK(map, pop_back, &v->head);
CHECK(inner_map, pop_front, &iv->head);
CHECK(inner_map, pop_back, &iv->head);
#undef CHECK
#define CHECK(test, op, hexpr, nexpr) \
SEC("?tc") \
int test##_missing_lock_##op(void *ctx) \
{ \
INIT; \
bpf_list_##op(hexpr, nexpr); \
return 0; \
}
CHECK(kptr, push_front, &f->head, &b->node);
CHECK(kptr, push_back, &f->head, &b->node);
CHECK(global, push_front, &ghead, &f->node2);
CHECK(global, push_back, &ghead, &f->node2);
CHECK(map, push_front, &v->head, &f->node2);
CHECK(map, push_back, &v->head, &f->node2);
CHECK(inner_map, push_front, &iv->head, &f->node2);
CHECK(inner_map, push_back, &iv->head, &f->node2);
#undef CHECK
#define CHECK(test, op, lexpr, hexpr) \
SEC("?tc") \
int test##_incorrect_lock_##op(void *ctx) \
{ \
INIT; \
void (*p)(void *) = (void *)&bpf_list_##op; \
bpf_spin_lock(lexpr); \
p(hexpr); \
return 0; \
}
#define CHECK_OP(op) \
CHECK(kptr_kptr, op, &f1->lock, &f2->head); \
CHECK(kptr_global, op, &f1->lock, &ghead); \
CHECK(kptr_map, op, &f1->lock, &v->head); \
CHECK(kptr_inner_map, op, &f1->lock, &iv->head); \
\
CHECK(global_global, op, &glock2, &ghead); \
CHECK(global_kptr, op, &glock, &f1->head); \
CHECK(global_map, op, &glock, &v->head); \
CHECK(global_inner_map, op, &glock, &iv->head); \
\
CHECK(map_map, op, &v->lock, &v2->head); \
CHECK(map_kptr, op, &v->lock, &f2->head); \
CHECK(map_global, op, &v->lock, &ghead); \
CHECK(map_inner_map, op, &v->lock, &iv->head); \
\
CHECK(inner_map_inner_map, op, &iv->lock, &iv2->head); \
CHECK(inner_map_kptr, op, &iv->lock, &f2->head); \
CHECK(inner_map_global, op, &iv->lock, &ghead); \
CHECK(inner_map_map, op, &iv->lock, &v->head);
CHECK_OP(pop_front);
CHECK_OP(pop_back);
#undef CHECK
#undef CHECK_OP
#define CHECK(test, op, lexpr, hexpr, nexpr) \
SEC("?tc") \
int test##_incorrect_lock_##op(void *ctx) \
{ \
INIT; \
bpf_spin_lock(lexpr); \
bpf_list_##op(hexpr, nexpr); \
return 0; \
}
#define CHECK_OP(op) \
CHECK(kptr_kptr, op, &f1->lock, &f2->head, &b->node); \
CHECK(kptr_global, op, &f1->lock, &ghead, &f->node2); \
CHECK(kptr_map, op, &f1->lock, &v->head, &f->node2); \
CHECK(kptr_inner_map, op, &f1->lock, &iv->head, &f->node2); \
\
CHECK(global_global, op, &glock2, &ghead, &f->node2); \
CHECK(global_kptr, op, &glock, &f1->head, &b->node); \
CHECK(global_map, op, &glock, &v->head, &f->node2); \
CHECK(global_inner_map, op, &glock, &iv->head, &f->node2); \
\
CHECK(map_map, op, &v->lock, &v2->head, &f->node2); \
CHECK(map_kptr, op, &v->lock, &f2->head, &b->node); \
CHECK(map_global, op, &v->lock, &ghead, &f->node2); \
CHECK(map_inner_map, op, &v->lock, &iv->head, &f->node2); \
\
CHECK(inner_map_inner_map, op, &iv->lock, &iv2->head, &f->node2);\
CHECK(inner_map_kptr, op, &iv->lock, &f2->head, &b->node); \
CHECK(inner_map_global, op, &iv->lock, &ghead, &f->node2); \
CHECK(inner_map_map, op, &iv->lock, &v->head, &f->node2);
CHECK_OP(push_front);
CHECK_OP(push_back);
#undef CHECK
#undef CHECK_OP
#undef INIT
SEC("?kprobe/xyz")
int map_compat_kprobe(void *ctx)
{
bpf_list_push_front(&ghead, NULL);
return 0;
}
SEC("?kretprobe/xyz")
int map_compat_kretprobe(void *ctx)
{
bpf_list_push_front(&ghead, NULL);
return 0;
}
SEC("?tracepoint/xyz")
int map_compat_tp(void *ctx)
{
bpf_list_push_front(&ghead, NULL);
return 0;
}
SEC("?perf_event")
int map_compat_perf(void *ctx)
{
bpf_list_push_front(&ghead, NULL);
return 0;
}
SEC("?raw_tp/xyz")
int map_compat_raw_tp(void *ctx)
{
bpf_list_push_front(&ghead, NULL);
return 0;
}
SEC("?raw_tp.w/xyz")
int map_compat_raw_tp_w(void *ctx)
{
bpf_list_push_front(&ghead, NULL);
return 0;
}
SEC("?tc")
int obj_type_id_oor(void *ctx)
{
bpf_obj_new_impl(~0UL, NULL);
return 0;
}
SEC("?tc")
int obj_new_no_composite(void *ctx)
{
bpf_obj_new_impl(bpf_core_type_id_local(int), (void *)42);
return 0;
}
SEC("?tc")
int obj_new_no_struct(void *ctx)
{
bpf_obj_new(union { int data; unsigned udata; });
return 0;
}
SEC("?tc")
int obj_drop_non_zero_off(void *ctx)
{
void *f;
f = bpf_obj_new(struct foo);
if (!f)
return 0;
bpf_obj_drop(f+1);
return 0;
}
SEC("?tc")
int new_null_ret(void *ctx)
{
return bpf_obj_new(struct foo)->data;
}
SEC("?tc")
int obj_new_acq(void *ctx)
{
bpf_obj_new(struct foo);
return 0;
}
SEC("?tc")
int use_after_drop(void *ctx)
{
struct foo *f;
f = bpf_obj_new(typeof(*f));
if (!f)
return 0;
bpf_obj_drop(f);
return f->data;
}
SEC("?tc")
int ptr_walk_scalar(void *ctx)
{
struct test1 {
struct test2 {
struct test2 *next;
} *ptr;
} *p;
p = bpf_obj_new(typeof(*p));
if (!p)
return 0;
bpf_this_cpu_ptr(p->ptr);
return 0;
}
SEC("?tc")
int direct_read_lock(void *ctx)
{
struct foo *f;
f = bpf_obj_new(typeof(*f));
if (!f)
return 0;
return *(int *)&f->lock;
}
SEC("?tc")
int direct_write_lock(void *ctx)
{
struct foo *f;
f = bpf_obj_new(typeof(*f));
if (!f)
return 0;
*(int *)&f->lock = 0;
return 0;
}
SEC("?tc")
int direct_read_head(void *ctx)
{
struct foo *f;
f = bpf_obj_new(typeof(*f));
if (!f)
return 0;
return *(int *)&f->head;
}
SEC("?tc")
int direct_write_head(void *ctx)
{
struct foo *f;
f = bpf_obj_new(typeof(*f));
if (!f)
return 0;
*(int *)&f->head = 0;
return 0;
}
SEC("?tc")
int direct_read_node(void *ctx)
{
struct foo *f;
f = bpf_obj_new(typeof(*f));
if (!f)
return 0;
return *(int *)&f->node2;
}
SEC("?tc")
int direct_write_node(void *ctx)
{
struct foo *f;
f = bpf_obj_new(typeof(*f));
if (!f)
return 0;
*(int *)&f->node2 = 0;
return 0;
}
static __always_inline
int use_after_unlock(bool push_front)
{
struct foo *f;
f = bpf_obj_new(typeof(*f));
if (!f)
return 0;
bpf_spin_lock(&glock);
f->data = 42;
if (push_front)
bpf_list_push_front(&ghead, &f->node2);
else
bpf_list_push_back(&ghead, &f->node2);
bpf_spin_unlock(&glock);
return f->data;
}
SEC("?tc")
int use_after_unlock_push_front(void *ctx)
{
return use_after_unlock(true);
}
SEC("?tc")
int use_after_unlock_push_back(void *ctx)
{
return use_after_unlock(false);
}
static __always_inline
int list_double_add(bool push_front)
{
struct foo *f;
f = bpf_obj_new(typeof(*f));
if (!f)
return 0;
bpf_spin_lock(&glock);
if (push_front) {
bpf_list_push_front(&ghead, &f->node2);
bpf_list_push_front(&ghead, &f->node2);
} else {
bpf_list_push_back(&ghead, &f->node2);
bpf_list_push_back(&ghead, &f->node2);
}
bpf_spin_unlock(&glock);
return 0;
}
SEC("?tc")
int double_push_front(void *ctx)
{
return list_double_add(true);
}
SEC("?tc")
int double_push_back(void *ctx)
{
return list_double_add(false);
}
SEC("?tc")
int no_node_value_type(void *ctx)
{
void *p;
p = bpf_obj_new(struct { int data; });
if (!p)
return 0;
bpf_spin_lock(&glock);
bpf_list_push_front(&ghead, p);
bpf_spin_unlock(&glock);
return 0;
}
SEC("?tc")
int incorrect_value_type(void *ctx)
{
struct bar *b;
b = bpf_obj_new(typeof(*b));
if (!b)
return 0;
bpf_spin_lock(&glock);
bpf_list_push_front(&ghead, &b->node);
bpf_spin_unlock(&glock);
return 0;
}
SEC("?tc")
int incorrect_node_var_off(struct __sk_buff *ctx)
{
struct foo *f;
f = bpf_obj_new(typeof(*f));
if (!f)
return 0;
bpf_spin_lock(&glock);
bpf_list_push_front(&ghead, (void *)&f->node2 + ctx->protocol);
bpf_spin_unlock(&glock);
return 0;
}
SEC("?tc")
int incorrect_node_off1(void *ctx)
{
struct foo *f;
f = bpf_obj_new(typeof(*f));
if (!f)
return 0;
bpf_spin_lock(&glock);
bpf_list_push_front(&ghead, (void *)&f->node2 + 1);
bpf_spin_unlock(&glock);
return 0;
}
SEC("?tc")
int incorrect_node_off2(void *ctx)
{
struct foo *f;
f = bpf_obj_new(typeof(*f));
if (!f)
return 0;
bpf_spin_lock(&glock);
bpf_list_push_front(&ghead, &f->node);
bpf_spin_unlock(&glock);
return 0;
}
SEC("?tc")
int no_head_type(void *ctx)
{
void *p;
p = bpf_obj_new(typeof(struct { int data; }));
if (!p)
return 0;
bpf_spin_lock(&glock);
bpf_list_push_front(p, NULL);
bpf_spin_lock(&glock);
return 0;
}
SEC("?tc")
int incorrect_head_var_off1(struct __sk_buff *ctx)
{
struct foo *f;
f = bpf_obj_new(typeof(*f));
if (!f)
return 0;
bpf_spin_lock(&glock);
bpf_list_push_front((void *)&ghead + ctx->protocol, &f->node2);
bpf_spin_unlock(&glock);
return 0;
}
SEC("?tc")
int incorrect_head_var_off2(struct __sk_buff *ctx)
{
struct foo *f;
f = bpf_obj_new(typeof(*f));
if (!f)
return 0;
bpf_spin_lock(&glock);
bpf_list_push_front((void *)&f->head + ctx->protocol, &f->node2);
bpf_spin_unlock(&glock);
return 0;
}
SEC("?tc")
int incorrect_head_off1(void *ctx)
{
struct foo *f;
struct bar *b;
f = bpf_obj_new(typeof(*f));
if (!f)
return 0;
b = bpf_obj_new(typeof(*b));
if (!b) {
bpf_obj_drop(f);
return 0;
}
bpf_spin_lock(&f->lock);
bpf_list_push_front((void *)&f->head + 1, &b->node);
bpf_spin_unlock(&f->lock);
return 0;
}
SEC("?tc")
int incorrect_head_off2(void *ctx)
{
struct foo *f;
f = bpf_obj_new(typeof(*f));
if (!f)
return 0;
bpf_spin_lock(&glock);
bpf_list_push_front((void *)&ghead + 1, &f->node2);
bpf_spin_unlock(&glock);
return 0;
}
static __always_inline
int pop_ptr_off(void *(*op)(void *head))
{
struct {
struct bpf_list_head head __contains(foo, node2);
struct bpf_spin_lock lock;
} *p;
struct bpf_list_node *n;
p = bpf_obj_new(typeof(*p));
if (!p)
return 0;
bpf_spin_lock(&p->lock);
n = op(&p->head);
bpf_spin_unlock(&p->lock);
bpf_this_cpu_ptr(n);
return 0;
}
SEC("?tc")
int pop_front_off(void *ctx)
{
return pop_ptr_off((void *)bpf_list_pop_front);
}
SEC("?tc")
int pop_back_off(void *ctx)
{
return pop_ptr_off((void *)bpf_list_pop_back);
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/linked_list_fail.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2022 Bytedance */
#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
#include "bpf_misc.h"
char _license[] SEC("license") = "GPL";
#define MAX_ENTRIES 1000
struct {
__uint(type, BPF_MAP_TYPE_HASH);
__type(key, u32);
__type(value, u64);
__uint(max_entries, MAX_ENTRIES);
} hash_map_bench SEC(".maps");
u64 __attribute__((__aligned__(256))) percpu_time[256];
u64 nr_loops;
static int loop_update_callback(__u32 index, u32 *key)
{
u64 init_val = 1;
bpf_map_update_elem(&hash_map_bench, key, &init_val, BPF_ANY);
return 0;
}
SEC("fentry/" SYS_PREFIX "sys_getpgid")
int benchmark(void *ctx)
{
u32 cpu = bpf_get_smp_processor_id();
u32 key = cpu + MAX_ENTRIES;
u64 start_time = bpf_ktime_get_ns();
bpf_loop(nr_loops, loop_update_callback, &key, 0);
percpu_time[cpu & 255] = bpf_ktime_get_ns() - start_time;
return 0;
}
| linux-master | tools/testing/selftests/bpf/progs/bpf_hashmap_full_update_bench.c |
// SPDX-License-Identifier: GPL-2.0
/* Converted from tools/testing/selftests/bpf/verifier/bpf_get_stack.c */
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include "bpf_misc.h"
#define MAX_ENTRIES 11
struct test_val {
unsigned int index;
int foo[MAX_ENTRIES];
};
struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
__uint(max_entries, 1);
__type(key, int);
__type(value, struct test_val);
} map_array_48b SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_HASH);
__uint(max_entries, 1);
__type(key, long long);
__type(value, struct test_val);
} map_hash_48b SEC(".maps");
SEC("tracepoint")
__description("bpf_get_stack return R0 within range")
__success
__naked void stack_return_r0_within_range(void)
{
asm volatile (" \
r6 = r1; \
r1 = 0; \
*(u64*)(r10 - 8) = r1; \
r2 = r10; \
r2 += -8; \
r1 = %[map_hash_48b] ll; \
call %[bpf_map_lookup_elem]; \
if r0 == 0 goto l0_%=; \
r7 = r0; \
r9 = %[__imm_0]; \
r1 = r6; \
r2 = r7; \
r3 = %[__imm_0]; \
r4 = 256; \
call %[bpf_get_stack]; \
r1 = 0; \
r8 = r0; \
r8 <<= 32; \
r8 s>>= 32; \
if r1 s> r8 goto l0_%=; \
r9 -= r8; \
r2 = r7; \
r2 += r8; \
r1 = r9; \
r1 <<= 32; \
r1 s>>= 32; \
r3 = r2; \
r3 += r1; \
r1 = r7; \
r5 = %[__imm_0]; \
r1 += r5; \
if r3 >= r1 goto l0_%=; \
r1 = r6; \
r3 = r9; \
r4 = 0; \
call %[bpf_get_stack]; \
l0_%=: exit; \
" :
: __imm(bpf_get_stack),
__imm(bpf_map_lookup_elem),
__imm_addr(map_hash_48b),
__imm_const(__imm_0, sizeof(struct test_val) / 2)
: __clobber_all);
}
SEC("iter/task")
__description("bpf_get_task_stack return R0 range is refined")
__success
__naked void return_r0_range_is_refined(void)
{
asm volatile (" \
r6 = *(u64*)(r1 + 0); \
r6 = *(u64*)(r6 + 0); /* ctx->meta->seq */\
r7 = *(u64*)(r1 + 8); /* ctx->task */\
r1 = %[map_array_48b] ll; /* fixup_map_array_48b */\
r2 = 0; \
*(u64*)(r10 - 8) = r2; \
r2 = r10; \
r2 += -8; \
call %[bpf_map_lookup_elem]; \
if r0 != 0 goto l0_%=; \
r0 = 0; \
exit; \
l0_%=: if r7 != 0 goto l1_%=; \
r0 = 0; \
exit; \
l1_%=: r1 = r7; \
r2 = r0; \
r9 = r0; /* keep buf for seq_write */\
r3 = 48; \
r4 = 0; \
call %[bpf_get_task_stack]; \
if r0 s> 0 goto l2_%=; \
r0 = 0; \
exit; \
l2_%=: r1 = r6; \
r2 = r9; \
r3 = r0; \
call %[bpf_seq_write]; \
r0 = 0; \
exit; \
" :
: __imm(bpf_get_task_stack),
__imm(bpf_map_lookup_elem),
__imm(bpf_seq_write),
__imm_addr(map_array_48b)
: __clobber_all);
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/verifier_bpf_get_stack.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020 Facebook */
#include <linux/bpf.h>
#include <stdint.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_core_read.h>
char _license[] SEC("license") = "GPL";
/* fields of exactly the same size */
struct test_struct___samesize {
void *ptr;
unsigned long long val1;
unsigned int val2;
unsigned short val3;
unsigned char val4;
} __attribute((preserve_access_index));
/* unsigned fields that have to be downsized by libbpf */
struct test_struct___downsize {
void *ptr;
unsigned long val1;
unsigned long val2;
unsigned long val3;
unsigned long val4;
/* total sz: 40 */
} __attribute__((preserve_access_index));
/* fields with signed integers of wrong size, should be rejected */
struct test_struct___signed {
void *ptr;
long val1;
long val2;
long val3;
long val4;
} __attribute((preserve_access_index));
/* real layout and sizes according to test's (32-bit) BTF */
struct test_struct___real {
unsigned int ptr; /* can't use `void *`, it is always 8 byte in BPF target */
unsigned int val2;
unsigned long long val1;
unsigned short val3;
unsigned char val4;
unsigned char _pad;
/* total sz: 20 */
};
struct test_struct___real input = {
.ptr = 0x01020304,
.val1 = 0x1020304050607080,
.val2 = 0x0a0b0c0d,
.val3 = 0xfeed,
.val4 = 0xb9,
._pad = 0xff, /* make sure no accidental zeros are present */
};
unsigned long long ptr_samesized = 0;
unsigned long long val1_samesized = 0;
unsigned long long val2_samesized = 0;
unsigned long long val3_samesized = 0;
unsigned long long val4_samesized = 0;
struct test_struct___real output_samesized = {};
unsigned long long ptr_downsized = 0;
unsigned long long val1_downsized = 0;
unsigned long long val2_downsized = 0;
unsigned long long val3_downsized = 0;
unsigned long long val4_downsized = 0;
struct test_struct___real output_downsized = {};
unsigned long long ptr_probed = 0;
unsigned long long val1_probed = 0;
unsigned long long val2_probed = 0;
unsigned long long val3_probed = 0;
unsigned long long val4_probed = 0;
unsigned long long ptr_signed = 0;
unsigned long long val1_signed = 0;
unsigned long long val2_signed = 0;
unsigned long long val3_signed = 0;
unsigned long long val4_signed = 0;
struct test_struct___real output_signed = {};
SEC("raw_tp/sys_exit")
int handle_samesize(void *ctx)
{
struct test_struct___samesize *in = (void *)&input;
struct test_struct___samesize *out = (void *)&output_samesized;
ptr_samesized = (unsigned long long)in->ptr;
val1_samesized = in->val1;
val2_samesized = in->val2;
val3_samesized = in->val3;
val4_samesized = in->val4;
out->ptr = in->ptr;
out->val1 = in->val1;
out->val2 = in->val2;
out->val3 = in->val3;
out->val4 = in->val4;
return 0;
}
SEC("raw_tp/sys_exit")
int handle_downsize(void *ctx)
{
struct test_struct___downsize *in = (void *)&input;
struct test_struct___downsize *out = (void *)&output_downsized;
ptr_downsized = (unsigned long long)in->ptr;
val1_downsized = in->val1;
val2_downsized = in->val2;
val3_downsized = in->val3;
val4_downsized = in->val4;
out->ptr = in->ptr;
out->val1 = in->val1;
out->val2 = in->val2;
out->val3 = in->val3;
out->val4 = in->val4;
return 0;
}
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
#define bpf_core_read_int bpf_core_read
#else
#define bpf_core_read_int(dst, sz, src) ({ \
/* Prevent "subtraction from stack pointer prohibited" */ \
volatile long __off = sizeof(*dst) - (sz); \
bpf_core_read((char *)(dst) + __off, sz, src); \
})
#endif
SEC("raw_tp/sys_enter")
int handle_probed(void *ctx)
{
struct test_struct___downsize *in = (void *)&input;
__u64 tmp;
tmp = 0;
bpf_core_read_int(&tmp, bpf_core_field_size(in->ptr), &in->ptr);
ptr_probed = tmp;
tmp = 0;
bpf_core_read_int(&tmp, bpf_core_field_size(in->val1), &in->val1);
val1_probed = tmp;
tmp = 0;
bpf_core_read_int(&tmp, bpf_core_field_size(in->val2), &in->val2);
val2_probed = tmp;
tmp = 0;
bpf_core_read_int(&tmp, bpf_core_field_size(in->val3), &in->val3);
val3_probed = tmp;
tmp = 0;
bpf_core_read_int(&tmp, bpf_core_field_size(in->val4), &in->val4);
val4_probed = tmp;
return 0;
}
SEC("raw_tp/sys_enter")
int handle_signed(void *ctx)
{
struct test_struct___signed *in = (void *)&input;
struct test_struct___signed *out = (void *)&output_signed;
val2_signed = in->val2;
val3_signed = in->val3;
val4_signed = in->val4;
out->val2= in->val2;
out->val3= in->val3;
out->val4= in->val4;
return 0;
}
| linux-master | tools/testing/selftests/bpf/progs/test_core_autosize.c |
// SPDX-License-Identifier: GPL-2.0
/* Converted from tools/testing/selftests/bpf/verifier/d_path.c */
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include "bpf_misc.h"
SEC("fentry/dentry_open")
__description("d_path accept")
__success __retval(0)
__naked void d_path_accept(void)
{
asm volatile (" \
r1 = *(u32*)(r1 + 0); \
r2 = r10; \
r2 += -8; \
r6 = 0; \
*(u64*)(r2 + 0) = r6; \
r3 = 8 ll; \
call %[bpf_d_path]; \
r0 = 0; \
exit; \
" :
: __imm(bpf_d_path)
: __clobber_all);
}
SEC("fentry/d_path")
__description("d_path reject")
__failure __msg("helper call is not allowed in probe")
__naked void d_path_reject(void)
{
asm volatile (" \
r1 = *(u32*)(r1 + 0); \
r2 = r10; \
r2 += -8; \
r6 = 0; \
*(u64*)(r2 + 0) = r6; \
r3 = 8 ll; \
call %[bpf_d_path]; \
r0 = 0; \
exit; \
" :
: __imm(bpf_d_path)
: __clobber_all);
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/verifier_d_path.c |
#include "core_reloc_types.h"
void f(struct core_reloc_existence x) {}
| linux-master | tools/testing/selftests/bpf/progs/btf__core_reloc_existence.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2021 Google LLC.
*/
#include <errno.h>
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
__u32 invocations = 0;
__u32 assertion_error = 0;
__u32 retval_value = 0;
__u32 page_size = 0;
SEC("cgroup/setsockopt")
int get_retval(struct bpf_sockopt *ctx)
{
retval_value = bpf_get_retval();
__sync_fetch_and_add(&invocations, 1);
/* optval larger than PAGE_SIZE use kernel's buffer. */
if (ctx->optlen > page_size)
ctx->optlen = 0;
return 1;
}
SEC("cgroup/setsockopt")
int set_eunatch(struct bpf_sockopt *ctx)
{
__sync_fetch_and_add(&invocations, 1);
if (bpf_set_retval(-EUNATCH))
assertion_error = 1;
/* optval larger than PAGE_SIZE use kernel's buffer. */
if (ctx->optlen > page_size)
ctx->optlen = 0;
return 0;
}
SEC("cgroup/setsockopt")
int set_eisconn(struct bpf_sockopt *ctx)
{
__sync_fetch_and_add(&invocations, 1);
if (bpf_set_retval(-EISCONN))
assertion_error = 1;
/* optval larger than PAGE_SIZE use kernel's buffer. */
if (ctx->optlen > page_size)
ctx->optlen = 0;
return 0;
}
SEC("cgroup/setsockopt")
int legacy_eperm(struct bpf_sockopt *ctx)
{
__sync_fetch_and_add(&invocations, 1);
/* optval larger than PAGE_SIZE use kernel's buffer. */
if (ctx->optlen > page_size)
ctx->optlen = 0;
return 0;
}
| linux-master | tools/testing/selftests/bpf/progs/cgroup_getset_retval_setsockopt.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright 2020 Google LLC.
*/
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
char _license[] SEC("license") = "GPL";
static int sequence = 0;
__s32 input_retval = 0;
__u64 fentry_result = 0;
SEC("fentry/bpf_modify_return_test")
int BPF_PROG(fentry_test, int a, __u64 b)
{
sequence++;
fentry_result = (sequence == 1);
return 0;
}
__u64 fmod_ret_result = 0;
SEC("fmod_ret/bpf_modify_return_test")
int BPF_PROG(fmod_ret_test, int a, int *b, int ret)
{
sequence++;
/* This is the first fmod_ret program, the ret passed should be 0 */
fmod_ret_result = (sequence == 2 && ret == 0);
return input_retval;
}
__u64 fexit_result = 0;
SEC("fexit/bpf_modify_return_test")
int BPF_PROG(fexit_test, int a, __u64 b, int ret)
{
sequence++;
/* If the input_reval is non-zero a successful modification should have
* occurred.
*/
if (input_retval)
fexit_result = (sequence == 3 && ret == input_retval);
else
fexit_result = (sequence == 3 && ret == 4);
return 0;
}
static int sequence2;
__u64 fentry_result2 = 0;
SEC("fentry/bpf_modify_return_test2")
int BPF_PROG(fentry_test2, int a, int *b, short c, int d, void *e, char f,
int g)
{
sequence2++;
fentry_result2 = (sequence2 == 1);
return 0;
}
__u64 fmod_ret_result2 = 0;
SEC("fmod_ret/bpf_modify_return_test2")
int BPF_PROG(fmod_ret_test2, int a, int *b, short c, int d, void *e, char f,
int g, int ret)
{
sequence2++;
/* This is the first fmod_ret program, the ret passed should be 0 */
fmod_ret_result2 = (sequence2 == 2 && ret == 0);
return input_retval;
}
__u64 fexit_result2 = 0;
SEC("fexit/bpf_modify_return_test2")
int BPF_PROG(fexit_test2, int a, int *b, short c, int d, void *e, char f,
int g, int ret)
{
sequence2++;
/* If the input_reval is non-zero a successful modification should have
* occurred.
*/
if (input_retval)
fexit_result2 = (sequence2 == 3 && ret == input_retval);
else
fexit_result2 = (sequence2 == 3 && ret == 29);
return 0;
}
| linux-master | tools/testing/selftests/bpf/progs/modify_return.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
#include <vmlinux.h>
#include <bpf/bpf_tracing.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_core_read.h>
#include "bpf_misc.h"
#include "bpf_experimental.h"
extern void bpf_rcu_read_lock(void) __ksym;
extern void bpf_rcu_read_unlock(void) __ksym;
struct node_data {
long key;
long list_data;
struct bpf_rb_node r;
struct bpf_list_node l;
struct bpf_refcount ref;
};
struct map_value {
struct node_data __kptr *node;
};
struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
__type(key, int);
__type(value, struct map_value);
__uint(max_entries, 2);
} stashed_nodes SEC(".maps");
struct node_acquire {
long key;
long data;
struct bpf_rb_node node;
struct bpf_refcount refcount;
};
#define private(name) SEC(".bss." #name) __hidden __attribute__((aligned(8)))
private(A) struct bpf_spin_lock lock;
private(A) struct bpf_rb_root root __contains(node_data, r);
private(A) struct bpf_list_head head __contains(node_data, l);
private(B) struct bpf_spin_lock alock;
private(B) struct bpf_rb_root aroot __contains(node_acquire, node);
private(C) struct bpf_spin_lock block;
private(C) struct bpf_rb_root broot __contains(node_data, r);
static bool less(struct bpf_rb_node *node_a, const struct bpf_rb_node *node_b)
{
struct node_data *a;
struct node_data *b;
a = container_of(node_a, struct node_data, r);
b = container_of(node_b, struct node_data, r);
return a->key < b->key;
}
static bool less_a(struct bpf_rb_node *a, const struct bpf_rb_node *b)
{
struct node_acquire *node_a;
struct node_acquire *node_b;
node_a = container_of(a, struct node_acquire, node);
node_b = container_of(b, struct node_acquire, node);
return node_a->key < node_b->key;
}
static long __insert_in_tree_and_list(struct bpf_list_head *head,
struct bpf_rb_root *root,
struct bpf_spin_lock *lock)
{
struct node_data *n, *m;
n = bpf_obj_new(typeof(*n));
if (!n)
return -1;
m = bpf_refcount_acquire(n);
m->key = 123;
m->list_data = 456;
bpf_spin_lock(lock);
if (bpf_rbtree_add(root, &n->r, less)) {
/* Failure to insert - unexpected */
bpf_spin_unlock(lock);
bpf_obj_drop(m);
return -2;
}
bpf_spin_unlock(lock);
bpf_spin_lock(lock);
if (bpf_list_push_front(head, &m->l)) {
/* Failure to insert - unexpected */
bpf_spin_unlock(lock);
return -3;
}
bpf_spin_unlock(lock);
return 0;
}
static long __stash_map_insert_tree(int idx, int val, struct bpf_rb_root *root,
struct bpf_spin_lock *lock)
{
struct map_value *mapval;
struct node_data *n, *m;
mapval = bpf_map_lookup_elem(&stashed_nodes, &idx);
if (!mapval)
return -1;
n = bpf_obj_new(typeof(*n));
if (!n)
return -2;
n->key = val;
m = bpf_refcount_acquire(n);
n = bpf_kptr_xchg(&mapval->node, n);
if (n) {
bpf_obj_drop(n);
bpf_obj_drop(m);
return -3;
}
bpf_spin_lock(lock);
if (bpf_rbtree_add(root, &m->r, less)) {
/* Failure to insert - unexpected */
bpf_spin_unlock(lock);
return -4;
}
bpf_spin_unlock(lock);
return 0;
}
static long __read_from_tree(struct bpf_rb_root *root,
struct bpf_spin_lock *lock,
bool remove_from_tree)
{
struct bpf_rb_node *rb;
struct node_data *n;
long res = -99;
bpf_spin_lock(lock);
rb = bpf_rbtree_first(root);
if (!rb) {
bpf_spin_unlock(lock);
return -1;
}
n = container_of(rb, struct node_data, r);
res = n->key;
if (!remove_from_tree) {
bpf_spin_unlock(lock);
return res;
}
rb = bpf_rbtree_remove(root, rb);
bpf_spin_unlock(lock);
if (!rb)
return -2;
n = container_of(rb, struct node_data, r);
bpf_obj_drop(n);
return res;
}
static long __read_from_list(struct bpf_list_head *head,
struct bpf_spin_lock *lock,
bool remove_from_list)
{
struct bpf_list_node *l;
struct node_data *n;
long res = -99;
bpf_spin_lock(lock);
l = bpf_list_pop_front(head);
if (!l) {
bpf_spin_unlock(lock);
return -1;
}
n = container_of(l, struct node_data, l);
res = n->list_data;
if (!remove_from_list) {
if (bpf_list_push_back(head, &n->l)) {
bpf_spin_unlock(lock);
return -2;
}
}
bpf_spin_unlock(lock);
if (remove_from_list)
bpf_obj_drop(n);
return res;
}
static long __read_from_unstash(int idx)
{
struct node_data *n = NULL;
struct map_value *mapval;
long val = -99;
mapval = bpf_map_lookup_elem(&stashed_nodes, &idx);
if (!mapval)
return -1;
n = bpf_kptr_xchg(&mapval->node, n);
if (!n)
return -2;
val = n->key;
bpf_obj_drop(n);
return val;
}
#define INSERT_READ_BOTH(rem_tree, rem_list, desc) \
SEC("tc") \
__description(desc) \
__success __retval(579) \
long insert_and_remove_tree_##rem_tree##_list_##rem_list(void *ctx) \
{ \
long err, tree_data, list_data; \
\
err = __insert_in_tree_and_list(&head, &root, &lock); \
if (err) \
return err; \
\
err = __read_from_tree(&root, &lock, rem_tree); \
if (err < 0) \
return err; \
else \
tree_data = err; \
\
err = __read_from_list(&head, &lock, rem_list); \
if (err < 0) \
return err; \
else \
list_data = err; \
\
return tree_data + list_data; \
}
/* After successful insert of struct node_data into both collections:
* - it should have refcount = 2
* - removing / not removing the node_data from a collection after
* reading should have no effect on ability to read / remove from
* the other collection
*/
INSERT_READ_BOTH(true, true, "insert_read_both: remove from tree + list");
INSERT_READ_BOTH(false, false, "insert_read_both: remove from neither");
INSERT_READ_BOTH(true, false, "insert_read_both: remove from tree");
INSERT_READ_BOTH(false, true, "insert_read_both: remove from list");
#undef INSERT_READ_BOTH
#define INSERT_READ_BOTH(rem_tree, rem_list, desc) \
SEC("tc") \
__description(desc) \
__success __retval(579) \
long insert_and_remove_lf_tree_##rem_tree##_list_##rem_list(void *ctx) \
{ \
long err, tree_data, list_data; \
\
err = __insert_in_tree_and_list(&head, &root, &lock); \
if (err) \
return err; \
\
err = __read_from_list(&head, &lock, rem_list); \
if (err < 0) \
return err; \
else \
list_data = err; \
\
err = __read_from_tree(&root, &lock, rem_tree); \
if (err < 0) \
return err; \
else \
tree_data = err; \
\
return tree_data + list_data; \
}
/* Similar to insert_read_both, but list data is read and possibly removed
* first
*
* Results should be no different than reading and possibly removing rbtree
* node first
*/
INSERT_READ_BOTH(true, true, "insert_read_both_list_first: remove from tree + list");
INSERT_READ_BOTH(false, false, "insert_read_both_list_first: remove from neither");
INSERT_READ_BOTH(true, false, "insert_read_both_list_first: remove from tree");
INSERT_READ_BOTH(false, true, "insert_read_both_list_first: remove from list");
#define INSERT_DOUBLE_READ_AND_DEL(read_fn, read_root, desc) \
SEC("tc") \
__description(desc) \
__success __retval(-1) \
long insert_double_##read_fn##_and_del_##read_root(void *ctx) \
{ \
long err, list_data; \
\
err = __insert_in_tree_and_list(&head, &root, &lock); \
if (err) \
return err; \
\
err = read_fn(&read_root, &lock, true); \
if (err < 0) \
return err; \
else \
list_data = err; \
\
err = read_fn(&read_root, &lock, true); \
if (err < 0) \
return err; \
\
return err + list_data; \
}
/* Insert into both tree and list, then try reading-and-removing from either twice
*
* The second read-and-remove should fail on read step since the node has
* already been removed
*/
INSERT_DOUBLE_READ_AND_DEL(__read_from_tree, root, "insert_double_del: 2x read-and-del from tree");
INSERT_DOUBLE_READ_AND_DEL(__read_from_list, head, "insert_double_del: 2x read-and-del from list");
#define INSERT_STASH_READ(rem_tree, desc) \
SEC("tc") \
__description(desc) \
__success __retval(84) \
long insert_rbtree_and_stash__del_tree_##rem_tree(void *ctx) \
{ \
long err, tree_data, map_data; \
\
err = __stash_map_insert_tree(0, 42, &root, &lock); \
if (err) \
return err; \
\
err = __read_from_tree(&root, &lock, rem_tree); \
if (err < 0) \
return err; \
else \
tree_data = err; \
\
err = __read_from_unstash(0); \
if (err < 0) \
return err; \
else \
map_data = err; \
\
return tree_data + map_data; \
}
/* Stash a refcounted node in map_val, insert same node into tree, then try
* reading data from tree then unstashed map_val, possibly removing from tree
*
* Removing from tree should have no effect on map_val kptr validity
*/
INSERT_STASH_READ(true, "insert_stash_read: remove from tree");
INSERT_STASH_READ(false, "insert_stash_read: don't remove from tree");
SEC("tc")
__success
long rbtree_refcounted_node_ref_escapes(void *ctx)
{
struct node_acquire *n, *m;
n = bpf_obj_new(typeof(*n));
if (!n)
return 1;
bpf_spin_lock(&alock);
bpf_rbtree_add(&aroot, &n->node, less_a);
m = bpf_refcount_acquire(n);
bpf_spin_unlock(&alock);
if (!m)
return 2;
m->key = 2;
bpf_obj_drop(m);
return 0;
}
SEC("tc")
__success
long rbtree_refcounted_node_ref_escapes_owning_input(void *ctx)
{
struct node_acquire *n, *m;
n = bpf_obj_new(typeof(*n));
if (!n)
return 1;
m = bpf_refcount_acquire(n);
m->key = 2;
bpf_spin_lock(&alock);
bpf_rbtree_add(&aroot, &n->node, less_a);
bpf_spin_unlock(&alock);
bpf_obj_drop(m);
return 0;
}
static long __stash_map_empty_xchg(struct node_data *n, int idx)
{
struct map_value *mapval = bpf_map_lookup_elem(&stashed_nodes, &idx);
if (!mapval) {
bpf_obj_drop(n);
return 1;
}
n = bpf_kptr_xchg(&mapval->node, n);
if (n) {
bpf_obj_drop(n);
return 2;
}
return 0;
}
SEC("tc")
long rbtree_wrong_owner_remove_fail_a1(void *ctx)
{
struct node_data *n, *m;
n = bpf_obj_new(typeof(*n));
if (!n)
return 1;
m = bpf_refcount_acquire(n);
if (__stash_map_empty_xchg(n, 0)) {
bpf_obj_drop(m);
return 2;
}
if (__stash_map_empty_xchg(m, 1))
return 3;
return 0;
}
SEC("tc")
long rbtree_wrong_owner_remove_fail_b(void *ctx)
{
struct map_value *mapval;
struct node_data *n;
int idx = 0;
mapval = bpf_map_lookup_elem(&stashed_nodes, &idx);
if (!mapval)
return 1;
n = bpf_kptr_xchg(&mapval->node, NULL);
if (!n)
return 2;
bpf_spin_lock(&block);
bpf_rbtree_add(&broot, &n->r, less);
bpf_spin_unlock(&block);
return 0;
}
SEC("tc")
long rbtree_wrong_owner_remove_fail_a2(void *ctx)
{
struct map_value *mapval;
struct bpf_rb_node *res;
struct node_data *m;
int idx = 1;
mapval = bpf_map_lookup_elem(&stashed_nodes, &idx);
if (!mapval)
return 1;
m = bpf_kptr_xchg(&mapval->node, NULL);
if (!m)
return 2;
bpf_spin_lock(&lock);
/* make m non-owning ref */
bpf_list_push_back(&head, &m->l);
res = bpf_rbtree_remove(&root, &m->r);
bpf_spin_unlock(&lock);
if (res) {
bpf_obj_drop(container_of(res, struct node_data, r));
return 3;
}
return 0;
}
SEC("?fentry.s/bpf_testmod_test_read")
__success
int BPF_PROG(rbtree_sleepable_rcu,
struct file *file, struct kobject *kobj,
struct bin_attribute *bin_attr, char *buf, loff_t off, size_t len)
{
struct bpf_rb_node *rb;
struct node_data *n, *m = NULL;
n = bpf_obj_new(typeof(*n));
if (!n)
return 0;
bpf_rcu_read_lock();
bpf_spin_lock(&lock);
bpf_rbtree_add(&root, &n->r, less);
rb = bpf_rbtree_first(&root);
if (!rb)
goto err_out;
rb = bpf_rbtree_remove(&root, rb);
if (!rb)
goto err_out;
m = container_of(rb, struct node_data, r);
err_out:
bpf_spin_unlock(&lock);
bpf_rcu_read_unlock();
if (m)
bpf_obj_drop(m);
return 0;
}
SEC("?fentry.s/bpf_testmod_test_read")
__success
int BPF_PROG(rbtree_sleepable_rcu_no_explicit_rcu_lock,
struct file *file, struct kobject *kobj,
struct bin_attribute *bin_attr, char *buf, loff_t off, size_t len)
{
struct bpf_rb_node *rb;
struct node_data *n, *m = NULL;
n = bpf_obj_new(typeof(*n));
if (!n)
return 0;
/* No explicit bpf_rcu_read_lock */
bpf_spin_lock(&lock);
bpf_rbtree_add(&root, &n->r, less);
rb = bpf_rbtree_first(&root);
if (!rb)
goto err_out;
rb = bpf_rbtree_remove(&root, rb);
if (!rb)
goto err_out;
m = container_of(rb, struct node_data, r);
err_out:
bpf_spin_unlock(&lock);
/* No explicit bpf_rcu_read_unlock */
if (m)
bpf_obj_drop(m);
return 0;
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/refcounted_kptr.c |
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2020 Facebook
#include <linux/bpf.h>
#include <asm/unistd.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
#include "bpf_misc.h"
char _license[] SEC("license") = "GPL";
long hits = 0;
SEC("tp/syscalls/sys_enter_getpgid")
int bench_trigger_tp(void *ctx)
{
__sync_add_and_fetch(&hits, 1);
return 0;
}
SEC("raw_tp/sys_enter")
int BPF_PROG(bench_trigger_raw_tp, struct pt_regs *regs, long id)
{
if (id == __NR_getpgid)
__sync_add_and_fetch(&hits, 1);
return 0;
}
SEC("kprobe/" SYS_PREFIX "sys_getpgid")
int bench_trigger_kprobe(void *ctx)
{
__sync_add_and_fetch(&hits, 1);
return 0;
}
SEC("fentry/" SYS_PREFIX "sys_getpgid")
int bench_trigger_fentry(void *ctx)
{
__sync_add_and_fetch(&hits, 1);
return 0;
}
SEC("fentry.s/" SYS_PREFIX "sys_getpgid")
int bench_trigger_fentry_sleep(void *ctx)
{
__sync_add_and_fetch(&hits, 1);
return 0;
}
SEC("fmod_ret/" SYS_PREFIX "sys_getpgid")
int bench_trigger_fmodret(void *ctx)
{
__sync_add_and_fetch(&hits, 1);
return -22;
}
SEC("uprobe")
int bench_trigger_uprobe(void *ctx)
{
__sync_add_and_fetch(&hits, 1);
return 0;
}
| linux-master | tools/testing/selftests/bpf/progs/trigger_bench.c |
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2019 Facebook
#include <linux/bpf.h>
#include <stdint.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_core_read.h>
char _license[] SEC("license") = "GPL";
struct {
char in[256];
char out[256];
} data = {};
struct core_reloc_nesting_substruct {
int a;
};
union core_reloc_nesting_subunion {
int b;
};
/* int a.a.a and b.b.b accesses */
struct core_reloc_nesting {
union {
struct core_reloc_nesting_substruct a;
} a;
struct {
union core_reloc_nesting_subunion b;
} b;
};
#define CORE_READ(dst, src) bpf_core_read(dst, sizeof(*(dst)), src)
SEC("raw_tracepoint/sys_enter")
int test_core_nesting(void *ctx)
{
struct core_reloc_nesting *in = (void *)&data.in;
struct core_reloc_nesting *out = (void *)&data.out;
if (CORE_READ(&out->a.a.a, &in->a.a.a))
return 1;
if (CORE_READ(&out->b.b.b, &in->b.b.b))
return 1;
return 0;
}
| linux-master | tools/testing/selftests/bpf/progs/test_core_reloc_nesting.c |
// SPDX-License-Identifier: GPL-2.0
/* Converted from tools/testing/selftests/bpf/verifier/helper_restricted.c */
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include "bpf_misc.h"
struct val {
int cnt;
struct bpf_spin_lock l;
};
struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
__uint(max_entries, 1);
__type(key, int);
__type(value, struct val);
} map_spin_lock SEC(".maps");
struct timer {
struct bpf_timer t;
};
struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
__uint(max_entries, 1);
__type(key, int);
__type(value, struct timer);
} map_timer SEC(".maps");
SEC("kprobe")
__description("bpf_ktime_get_coarse_ns is forbidden in BPF_PROG_TYPE_KPROBE")
__failure __msg("unknown func bpf_ktime_get_coarse_ns")
__naked void in_bpf_prog_type_kprobe_1(void)
{
asm volatile (" \
call %[bpf_ktime_get_coarse_ns]; \
r0 = 0; \
exit; \
" :
: __imm(bpf_ktime_get_coarse_ns)
: __clobber_all);
}
SEC("tracepoint")
__description("bpf_ktime_get_coarse_ns is forbidden in BPF_PROG_TYPE_TRACEPOINT")
__failure __msg("unknown func bpf_ktime_get_coarse_ns")
__naked void in_bpf_prog_type_tracepoint_1(void)
{
asm volatile (" \
call %[bpf_ktime_get_coarse_ns]; \
r0 = 0; \
exit; \
" :
: __imm(bpf_ktime_get_coarse_ns)
: __clobber_all);
}
SEC("perf_event")
__description("bpf_ktime_get_coarse_ns is forbidden in BPF_PROG_TYPE_PERF_EVENT")
__failure __msg("unknown func bpf_ktime_get_coarse_ns")
__naked void bpf_prog_type_perf_event_1(void)
{
asm volatile (" \
call %[bpf_ktime_get_coarse_ns]; \
r0 = 0; \
exit; \
" :
: __imm(bpf_ktime_get_coarse_ns)
: __clobber_all);
}
SEC("raw_tracepoint")
__description("bpf_ktime_get_coarse_ns is forbidden in BPF_PROG_TYPE_RAW_TRACEPOINT")
__failure __msg("unknown func bpf_ktime_get_coarse_ns")
__naked void bpf_prog_type_raw_tracepoint_1(void)
{
asm volatile (" \
call %[bpf_ktime_get_coarse_ns]; \
r0 = 0; \
exit; \
" :
: __imm(bpf_ktime_get_coarse_ns)
: __clobber_all);
}
SEC("kprobe")
__description("bpf_timer_init isn restricted in BPF_PROG_TYPE_KPROBE")
__failure __msg("tracing progs cannot use bpf_timer yet")
__naked void in_bpf_prog_type_kprobe_2(void)
{
asm volatile (" \
r2 = r10; \
r2 += -8; \
r1 = 0; \
*(u64*)(r2 + 0) = r1; \
r1 = %[map_timer] ll; \
call %[bpf_map_lookup_elem]; \
if r0 == 0 goto l0_%=; \
r1 = r0; \
r2 = %[map_timer] ll; \
r3 = 1; \
l0_%=: call %[bpf_timer_init]; \
exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm(bpf_timer_init),
__imm_addr(map_timer)
: __clobber_all);
}
SEC("perf_event")
__description("bpf_timer_init is forbidden in BPF_PROG_TYPE_PERF_EVENT")
__failure __msg("tracing progs cannot use bpf_timer yet")
__naked void bpf_prog_type_perf_event_2(void)
{
asm volatile (" \
r2 = r10; \
r2 += -8; \
r1 = 0; \
*(u64*)(r2 + 0) = r1; \
r1 = %[map_timer] ll; \
call %[bpf_map_lookup_elem]; \
if r0 == 0 goto l0_%=; \
r1 = r0; \
r2 = %[map_timer] ll; \
r3 = 1; \
l0_%=: call %[bpf_timer_init]; \
exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm(bpf_timer_init),
__imm_addr(map_timer)
: __clobber_all);
}
SEC("tracepoint")
__description("bpf_timer_init is forbidden in BPF_PROG_TYPE_TRACEPOINT")
__failure __msg("tracing progs cannot use bpf_timer yet")
__naked void in_bpf_prog_type_tracepoint_2(void)
{
asm volatile (" \
r2 = r10; \
r2 += -8; \
r1 = 0; \
*(u64*)(r2 + 0) = r1; \
r1 = %[map_timer] ll; \
call %[bpf_map_lookup_elem]; \
if r0 == 0 goto l0_%=; \
r1 = r0; \
r2 = %[map_timer] ll; \
r3 = 1; \
l0_%=: call %[bpf_timer_init]; \
exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm(bpf_timer_init),
__imm_addr(map_timer)
: __clobber_all);
}
SEC("raw_tracepoint")
__description("bpf_timer_init is forbidden in BPF_PROG_TYPE_RAW_TRACEPOINT")
__failure __msg("tracing progs cannot use bpf_timer yet")
__naked void bpf_prog_type_raw_tracepoint_2(void)
{
asm volatile (" \
r2 = r10; \
r2 += -8; \
r1 = 0; \
*(u64*)(r2 + 0) = r1; \
r1 = %[map_timer] ll; \
call %[bpf_map_lookup_elem]; \
if r0 == 0 goto l0_%=; \
r1 = r0; \
r2 = %[map_timer] ll; \
r3 = 1; \
l0_%=: call %[bpf_timer_init]; \
exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm(bpf_timer_init),
__imm_addr(map_timer)
: __clobber_all);
}
SEC("kprobe")
__description("bpf_spin_lock is forbidden in BPF_PROG_TYPE_KPROBE")
__failure __msg("tracing progs cannot use bpf_spin_lock yet")
__naked void in_bpf_prog_type_kprobe_3(void)
{
asm volatile (" \
r2 = r10; \
r2 += -8; \
r1 = 0; \
*(u64*)(r2 + 0) = r1; \
r1 = %[map_spin_lock] ll; \
call %[bpf_map_lookup_elem]; \
if r0 == 0 goto l0_%=; \
r1 = r0; \
call %[bpf_spin_lock]; \
l0_%=: exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm(bpf_spin_lock),
__imm_addr(map_spin_lock)
: __clobber_all);
}
SEC("tracepoint")
__description("bpf_spin_lock is forbidden in BPF_PROG_TYPE_TRACEPOINT")
__failure __msg("tracing progs cannot use bpf_spin_lock yet")
__naked void in_bpf_prog_type_tracepoint_3(void)
{
asm volatile (" \
r2 = r10; \
r2 += -8; \
r1 = 0; \
*(u64*)(r2 + 0) = r1; \
r1 = %[map_spin_lock] ll; \
call %[bpf_map_lookup_elem]; \
if r0 == 0 goto l0_%=; \
r1 = r0; \
call %[bpf_spin_lock]; \
l0_%=: exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm(bpf_spin_lock),
__imm_addr(map_spin_lock)
: __clobber_all);
}
SEC("perf_event")
__description("bpf_spin_lock is forbidden in BPF_PROG_TYPE_PERF_EVENT")
__failure __msg("tracing progs cannot use bpf_spin_lock yet")
__naked void bpf_prog_type_perf_event_3(void)
{
asm volatile (" \
r2 = r10; \
r2 += -8; \
r1 = 0; \
*(u64*)(r2 + 0) = r1; \
r1 = %[map_spin_lock] ll; \
call %[bpf_map_lookup_elem]; \
if r0 == 0 goto l0_%=; \
r1 = r0; \
call %[bpf_spin_lock]; \
l0_%=: exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm(bpf_spin_lock),
__imm_addr(map_spin_lock)
: __clobber_all);
}
SEC("raw_tracepoint")
__description("bpf_spin_lock is forbidden in BPF_PROG_TYPE_RAW_TRACEPOINT")
__failure __msg("tracing progs cannot use bpf_spin_lock yet")
__naked void bpf_prog_type_raw_tracepoint_3(void)
{
asm volatile (" \
r2 = r10; \
r2 += -8; \
r1 = 0; \
*(u64*)(r2 + 0) = r1; \
r1 = %[map_spin_lock] ll; \
call %[bpf_map_lookup_elem]; \
if r0 == 0 goto l0_%=; \
r1 = r0; \
call %[bpf_spin_lock]; \
l0_%=: exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm(bpf_spin_lock),
__imm_addr(map_spin_lock)
: __clobber_all);
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/verifier_helper_restricted.c |
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2020 Facebook
#include <linux/bpf.h>
#include <stdint.h>
#include <linux/types.h>
#include <bpf/bpf_helpers.h>
char _license[] SEC("license") = "GPL";
__u64 count = 0;
SEC("raw_tracepoint/sys_enter")
int test_enable_stats(void *ctx)
{
__sync_fetch_and_add(&count, 1);
return 0;
}
| linux-master | tools/testing/selftests/bpf/progs/test_enable_stats.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020 Google */
#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
__u64 out__runqueues_addr = -1;
__u64 out__bpf_prog_active_addr = -1;
__u32 out__rq_cpu = -1; /* percpu struct fields */
int out__bpf_prog_active = -1; /* percpu int */
__u32 out__this_rq_cpu = -1;
int out__this_bpf_prog_active = -1;
__u32 out__cpu_0_rq_cpu = -1; /* cpu_rq(0)->cpu */
extern const struct rq runqueues __ksym; /* struct type global var. */
extern const int bpf_prog_active __ksym; /* int type global var. */
SEC("raw_tp/sys_enter")
int handler(const void *ctx)
{
struct rq *rq;
int *active;
__u32 cpu;
out__runqueues_addr = (__u64)&runqueues;
out__bpf_prog_active_addr = (__u64)&bpf_prog_active;
cpu = bpf_get_smp_processor_id();
/* test bpf_per_cpu_ptr() */
rq = (struct rq *)bpf_per_cpu_ptr(&runqueues, cpu);
if (rq)
out__rq_cpu = rq->cpu;
active = (int *)bpf_per_cpu_ptr(&bpf_prog_active, cpu);
if (active)
out__bpf_prog_active = *active;
rq = (struct rq *)bpf_per_cpu_ptr(&runqueues, 0);
if (rq) /* should always be valid, but we can't spare the check. */
out__cpu_0_rq_cpu = rq->cpu;
/* test bpf_this_cpu_ptr */
rq = (struct rq *)bpf_this_cpu_ptr(&runqueues);
out__this_rq_cpu = rq->cpu;
active = (int *)bpf_this_cpu_ptr(&bpf_prog_active);
out__this_bpf_prog_active = *active;
return 0;
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/test_ksyms_btf.c |
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2021 Hengqi Chen */
#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
#include "bpf_tracing_net.h"
const volatile pid_t my_pid = 0;
char path[256] = {};
SEC("fentry/unix_listen")
int BPF_PROG(unix_listen, struct socket *sock, int backlog)
{
pid_t pid = bpf_get_current_pid_tgid() >> 32;
struct unix_sock *unix_sk;
int i, len;
if (pid != my_pid)
return 0;
unix_sk = (struct unix_sock *)bpf_skc_to_unix_sock(sock->sk);
if (!unix_sk)
return 0;
if (unix_sk->addr->name->sun_path[0])
return 0;
len = unix_sk->addr->len - sizeof(short);
path[0] = '@';
for (i = 1; i < len; i++) {
if (i >= sizeof(struct sockaddr_un))
break;
path[i] = unix_sk->addr->name->sun_path[i];
}
return 0;
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/test_skc_to_unix_sock.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
__uint(max_entries, 1);
__uint(key_size, sizeof(__u32));
__uint(value_size, sizeof(__u32));
} nop_table SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_PROG_ARRAY);
__uint(max_entries, 3);
__uint(key_size, sizeof(__u32));
__uint(value_size, sizeof(__u32));
} jmp_table SEC(".maps");
int count = 0;
int noise = 0;
static __always_inline int subprog_noise(void)
{
__u32 key = 0;
bpf_map_lookup_elem(&nop_table, &key);
return 0;
}
__noinline
int subprog_tail_2(struct __sk_buff *skb)
{
if (noise)
subprog_noise();
bpf_tail_call_static(skb, &jmp_table, 2);
return skb->len * 3;
}
__noinline
int subprog_tail_1(struct __sk_buff *skb)
{
bpf_tail_call_static(skb, &jmp_table, 1);
return skb->len * 2;
}
__noinline
int subprog_tail(struct __sk_buff *skb)
{
bpf_tail_call_static(skb, &jmp_table, 0);
return skb->len;
}
SEC("tc")
int classifier_1(struct __sk_buff *skb)
{
return subprog_tail_2(skb);
}
SEC("tc")
int classifier_2(struct __sk_buff *skb)
{
count++;
return subprog_tail_2(skb);
}
SEC("tc")
int classifier_0(struct __sk_buff *skb)
{
return subprog_tail_1(skb);
}
SEC("tc")
int entry(struct __sk_buff *skb)
{
return subprog_tail(skb);
}
char __license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/tailcall_bpf2bpf4.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2016 VMware
* Copyright (c) 2016 Facebook
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of version 2 of the GNU General Public
* License as published by the Free Software Foundation.
*/
#include <stddef.h>
#include <string.h>
#include <arpa/inet.h>
#include <linux/bpf.h>
#include <linux/if_ether.h>
#include <linux/if_packet.h>
#include <linux/if_tunnel.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <linux/icmp.h>
#include <linux/types.h>
#include <linux/socket.h>
#include <linux/pkt_cls.h>
#include <linux/erspan.h>
#include <linux/udp.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_endian.h>
#define log_err(__ret) bpf_printk("ERROR line:%d ret:%d\n", __LINE__, __ret)
#define VXLAN_UDP_PORT 4789
/* Only IPv4 address assigned to veth1.
* 172.16.1.200
*/
#define ASSIGNED_ADDR_VETH1 0xac1001c8
struct geneve_opt {
__be16 opt_class;
__u8 type;
__u8 length:5;
__u8 r3:1;
__u8 r2:1;
__u8 r1:1;
__u8 opt_data[8]; /* hard-coded to 8 byte */
};
struct vxlanhdr {
__be32 vx_flags;
__be32 vx_vni;
} __attribute__((packed));
struct vxlan_metadata {
__u32 gbp;
};
struct bpf_fou_encap {
__be16 sport;
__be16 dport;
};
enum bpf_fou_encap_type {
FOU_BPF_ENCAP_FOU,
FOU_BPF_ENCAP_GUE,
};
int bpf_skb_set_fou_encap(struct __sk_buff *skb_ctx,
struct bpf_fou_encap *encap, int type) __ksym;
int bpf_skb_get_fou_encap(struct __sk_buff *skb_ctx,
struct bpf_fou_encap *encap) __ksym;
struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
__uint(max_entries, 1);
__type(key, __u32);
__type(value, __u32);
} local_ip_map SEC(".maps");
SEC("tc")
int gre_set_tunnel(struct __sk_buff *skb)
{
int ret;
struct bpf_tunnel_key key;
__builtin_memset(&key, 0x0, sizeof(key));
key.remote_ipv4 = 0xac100164; /* 172.16.1.100 */
key.tunnel_id = 2;
key.tunnel_tos = 0;
key.tunnel_ttl = 64;
ret = bpf_skb_set_tunnel_key(skb, &key, sizeof(key),
BPF_F_ZERO_CSUM_TX | BPF_F_SEQ_NUMBER);
if (ret < 0) {
log_err(ret);
return TC_ACT_SHOT;
}
return TC_ACT_OK;
}
SEC("tc")
int gre_set_tunnel_no_key(struct __sk_buff *skb)
{
int ret;
struct bpf_tunnel_key key;
__builtin_memset(&key, 0x0, sizeof(key));
key.remote_ipv4 = 0xac100164; /* 172.16.1.100 */
key.tunnel_ttl = 64;
ret = bpf_skb_set_tunnel_key(skb, &key, sizeof(key),
BPF_F_ZERO_CSUM_TX | BPF_F_SEQ_NUMBER |
BPF_F_NO_TUNNEL_KEY);
if (ret < 0) {
log_err(ret);
return TC_ACT_SHOT;
}
return TC_ACT_OK;
}
SEC("tc")
int gre_get_tunnel(struct __sk_buff *skb)
{
int ret;
struct bpf_tunnel_key key;
ret = bpf_skb_get_tunnel_key(skb, &key, sizeof(key), 0);
if (ret < 0) {
log_err(ret);
return TC_ACT_SHOT;
}
bpf_printk("key %d remote ip 0x%x\n", key.tunnel_id, key.remote_ipv4);
return TC_ACT_OK;
}
SEC("tc")
int ip6gretap_set_tunnel(struct __sk_buff *skb)
{
struct bpf_tunnel_key key;
int ret;
__builtin_memset(&key, 0x0, sizeof(key));
key.remote_ipv6[3] = bpf_htonl(0x11); /* ::11 */
key.tunnel_id = 2;
key.tunnel_tos = 0;
key.tunnel_ttl = 64;
key.tunnel_label = 0xabcde;
ret = bpf_skb_set_tunnel_key(skb, &key, sizeof(key),
BPF_F_TUNINFO_IPV6 | BPF_F_ZERO_CSUM_TX |
BPF_F_SEQ_NUMBER);
if (ret < 0) {
log_err(ret);
return TC_ACT_SHOT;
}
return TC_ACT_OK;
}
SEC("tc")
int ip6gretap_get_tunnel(struct __sk_buff *skb)
{
struct bpf_tunnel_key key;
int ret;
ret = bpf_skb_get_tunnel_key(skb, &key, sizeof(key),
BPF_F_TUNINFO_IPV6);
if (ret < 0) {
log_err(ret);
return TC_ACT_SHOT;
}
bpf_printk("key %d remote ip6 ::%x label %x\n",
key.tunnel_id, key.remote_ipv6[3], key.tunnel_label);
return TC_ACT_OK;
}
SEC("tc")
int erspan_set_tunnel(struct __sk_buff *skb)
{
struct bpf_tunnel_key key;
struct erspan_metadata md;
int ret;
__builtin_memset(&key, 0x0, sizeof(key));
key.remote_ipv4 = 0xac100164; /* 172.16.1.100 */
key.tunnel_id = 2;
key.tunnel_tos = 0;
key.tunnel_ttl = 64;
ret = bpf_skb_set_tunnel_key(skb, &key, sizeof(key),
BPF_F_ZERO_CSUM_TX);
if (ret < 0) {
log_err(ret);
return TC_ACT_SHOT;
}
__builtin_memset(&md, 0, sizeof(md));
#ifdef ERSPAN_V1
md.version = 1;
md.u.index = bpf_htonl(123);
#else
__u8 direction = 1;
__u8 hwid = 7;
md.version = 2;
md.u.md2.dir = direction;
md.u.md2.hwid = hwid & 0xf;
md.u.md2.hwid_upper = (hwid >> 4) & 0x3;
#endif
ret = bpf_skb_set_tunnel_opt(skb, &md, sizeof(md));
if (ret < 0) {
log_err(ret);
return TC_ACT_SHOT;
}
return TC_ACT_OK;
}
SEC("tc")
int erspan_get_tunnel(struct __sk_buff *skb)
{
struct bpf_tunnel_key key;
struct erspan_metadata md;
int ret;
ret = bpf_skb_get_tunnel_key(skb, &key, sizeof(key), 0);
if (ret < 0) {
log_err(ret);
return TC_ACT_SHOT;
}
ret = bpf_skb_get_tunnel_opt(skb, &md, sizeof(md));
if (ret < 0) {
log_err(ret);
return TC_ACT_SHOT;
}
bpf_printk("key %d remote ip 0x%x erspan version %d\n",
key.tunnel_id, key.remote_ipv4, md.version);
#ifdef ERSPAN_V1
index = bpf_ntohl(md.u.index);
bpf_printk("\tindex %x\n", index);
#else
bpf_printk("\tdirection %d hwid %x timestamp %u\n",
md.u.md2.dir,
(md.u.md2.hwid_upper << 4) + md.u.md2.hwid,
bpf_ntohl(md.u.md2.timestamp));
#endif
return TC_ACT_OK;
}
SEC("tc")
int ip4ip6erspan_set_tunnel(struct __sk_buff *skb)
{
struct bpf_tunnel_key key;
struct erspan_metadata md;
int ret;
__builtin_memset(&key, 0x0, sizeof(key));
key.remote_ipv6[3] = bpf_htonl(0x11);
key.tunnel_id = 2;
key.tunnel_tos = 0;
key.tunnel_ttl = 64;
ret = bpf_skb_set_tunnel_key(skb, &key, sizeof(key),
BPF_F_TUNINFO_IPV6);
if (ret < 0) {
log_err(ret);
return TC_ACT_SHOT;
}
__builtin_memset(&md, 0, sizeof(md));
#ifdef ERSPAN_V1
md.u.index = bpf_htonl(123);
md.version = 1;
#else
__u8 direction = 0;
__u8 hwid = 17;
md.version = 2;
md.u.md2.dir = direction;
md.u.md2.hwid = hwid & 0xf;
md.u.md2.hwid_upper = (hwid >> 4) & 0x3;
#endif
ret = bpf_skb_set_tunnel_opt(skb, &md, sizeof(md));
if (ret < 0) {
log_err(ret);
return TC_ACT_SHOT;
}
return TC_ACT_OK;
}
SEC("tc")
int ip4ip6erspan_get_tunnel(struct __sk_buff *skb)
{
struct bpf_tunnel_key key;
struct erspan_metadata md;
int ret;
ret = bpf_skb_get_tunnel_key(skb, &key, sizeof(key),
BPF_F_TUNINFO_IPV6);
if (ret < 0) {
log_err(ret);
return TC_ACT_SHOT;
}
ret = bpf_skb_get_tunnel_opt(skb, &md, sizeof(md));
if (ret < 0) {
log_err(ret);
return TC_ACT_SHOT;
}
bpf_printk("ip6erspan get key %d remote ip6 ::%x erspan version %d\n",
key.tunnel_id, key.remote_ipv4, md.version);
#ifdef ERSPAN_V1
index = bpf_ntohl(md.u.index);
bpf_printk("\tindex %x\n", index);
#else
bpf_printk("\tdirection %d hwid %x timestamp %u\n",
md.u.md2.dir,
(md.u.md2.hwid_upper << 4) + md.u.md2.hwid,
bpf_ntohl(md.u.md2.timestamp));
#endif
return TC_ACT_OK;
}
SEC("tc")
int vxlan_set_tunnel_dst(struct __sk_buff *skb)
{
struct bpf_tunnel_key key;
struct vxlan_metadata md;
__u32 index = 0;
__u32 *local_ip = NULL;
int ret = 0;
local_ip = bpf_map_lookup_elem(&local_ip_map, &index);
if (!local_ip) {
log_err(ret);
return TC_ACT_SHOT;
}
__builtin_memset(&key, 0x0, sizeof(key));
key.local_ipv4 = 0xac100164; /* 172.16.1.100 */
key.remote_ipv4 = *local_ip;
key.tunnel_id = 2;
key.tunnel_tos = 0;
key.tunnel_ttl = 64;
ret = bpf_skb_set_tunnel_key(skb, &key, sizeof(key),
BPF_F_ZERO_CSUM_TX);
if (ret < 0) {
log_err(ret);
return TC_ACT_SHOT;
}
md.gbp = 0x800FF; /* Set VXLAN Group Policy extension */
ret = bpf_skb_set_tunnel_opt(skb, &md, sizeof(md));
if (ret < 0) {
log_err(ret);
return TC_ACT_SHOT;
}
return TC_ACT_OK;
}
SEC("tc")
int vxlan_set_tunnel_src(struct __sk_buff *skb)
{
struct bpf_tunnel_key key;
struct vxlan_metadata md;
__u32 index = 0;
__u32 *local_ip = NULL;
int ret = 0;
local_ip = bpf_map_lookup_elem(&local_ip_map, &index);
if (!local_ip) {
log_err(ret);
return TC_ACT_SHOT;
}
__builtin_memset(&key, 0x0, sizeof(key));
key.local_ipv4 = *local_ip;
key.remote_ipv4 = 0xac100164; /* 172.16.1.100 */
key.tunnel_id = 2;
key.tunnel_tos = 0;
key.tunnel_ttl = 64;
ret = bpf_skb_set_tunnel_key(skb, &key, sizeof(key),
BPF_F_ZERO_CSUM_TX);
if (ret < 0) {
log_err(ret);
return TC_ACT_SHOT;
}
md.gbp = 0x800FF; /* Set VXLAN Group Policy extension */
ret = bpf_skb_set_tunnel_opt(skb, &md, sizeof(md));
if (ret < 0) {
log_err(ret);
return TC_ACT_SHOT;
}
return TC_ACT_OK;
}
SEC("tc")
int vxlan_get_tunnel_src(struct __sk_buff *skb)
{
int ret;
struct bpf_tunnel_key key;
struct vxlan_metadata md;
ret = bpf_skb_get_tunnel_key(skb, &key, sizeof(key),
BPF_F_TUNINFO_FLAGS);
if (ret < 0) {
log_err(ret);
return TC_ACT_SHOT;
}
ret = bpf_skb_get_tunnel_opt(skb, &md, sizeof(md));
if (ret < 0) {
log_err(ret);
return TC_ACT_SHOT;
}
if (key.local_ipv4 != ASSIGNED_ADDR_VETH1 || md.gbp != 0x800FF ||
!(key.tunnel_flags & TUNNEL_KEY) ||
(key.tunnel_flags & TUNNEL_CSUM)) {
bpf_printk("vxlan key %d local ip 0x%x remote ip 0x%x gbp 0x%x flags 0x%x\n",
key.tunnel_id, key.local_ipv4,
key.remote_ipv4, md.gbp,
bpf_ntohs(key.tunnel_flags));
log_err(ret);
return TC_ACT_SHOT;
}
return TC_ACT_OK;
}
SEC("tc")
int veth_set_outer_dst(struct __sk_buff *skb)
{
struct ethhdr *eth = (struct ethhdr *)(long)skb->data;
__u32 assigned_ip = bpf_htonl(ASSIGNED_ADDR_VETH1);
void *data_end = (void *)(long)skb->data_end;
struct udphdr *udph;
struct iphdr *iph;
int ret = 0;
__s64 csum;
if ((void *)eth + sizeof(*eth) > data_end) {
log_err(ret);
return TC_ACT_SHOT;
}
if (eth->h_proto != bpf_htons(ETH_P_IP))
return TC_ACT_OK;
iph = (struct iphdr *)(eth + 1);
if ((void *)iph + sizeof(*iph) > data_end) {
log_err(ret);
return TC_ACT_SHOT;
}
if (iph->protocol != IPPROTO_UDP)
return TC_ACT_OK;
udph = (struct udphdr *)(iph + 1);
if ((void *)udph + sizeof(*udph) > data_end) {
log_err(ret);
return TC_ACT_SHOT;
}
if (udph->dest != bpf_htons(VXLAN_UDP_PORT))
return TC_ACT_OK;
if (iph->daddr != assigned_ip) {
csum = bpf_csum_diff(&iph->daddr, sizeof(__u32), &assigned_ip,
sizeof(__u32), 0);
if (bpf_skb_store_bytes(skb, ETH_HLEN + offsetof(struct iphdr, daddr),
&assigned_ip, sizeof(__u32), 0) < 0) {
log_err(ret);
return TC_ACT_SHOT;
}
if (bpf_l3_csum_replace(skb, ETH_HLEN + offsetof(struct iphdr, check),
0, csum, 0) < 0) {
log_err(ret);
return TC_ACT_SHOT;
}
bpf_skb_change_type(skb, PACKET_HOST);
}
return TC_ACT_OK;
}
SEC("tc")
int ip6vxlan_set_tunnel_dst(struct __sk_buff *skb)
{
struct bpf_tunnel_key key;
__u32 index = 0;
__u32 *local_ip;
int ret = 0;
local_ip = bpf_map_lookup_elem(&local_ip_map, &index);
if (!local_ip) {
log_err(ret);
return TC_ACT_SHOT;
}
__builtin_memset(&key, 0x0, sizeof(key));
key.local_ipv6[3] = bpf_htonl(0x11); /* ::11 */
key.remote_ipv6[3] = bpf_htonl(*local_ip);
key.tunnel_id = 22;
key.tunnel_tos = 0;
key.tunnel_ttl = 64;
ret = bpf_skb_set_tunnel_key(skb, &key, sizeof(key),
BPF_F_TUNINFO_IPV6);
if (ret < 0) {
log_err(ret);
return TC_ACT_SHOT;
}
return TC_ACT_OK;
}
SEC("tc")
int ip6vxlan_set_tunnel_src(struct __sk_buff *skb)
{
struct bpf_tunnel_key key;
__u32 index = 0;
__u32 *local_ip;
int ret = 0;
local_ip = bpf_map_lookup_elem(&local_ip_map, &index);
if (!local_ip) {
log_err(ret);
return TC_ACT_SHOT;
}
__builtin_memset(&key, 0x0, sizeof(key));
key.local_ipv6[3] = bpf_htonl(*local_ip);
key.remote_ipv6[3] = bpf_htonl(0x11); /* ::11 */
key.tunnel_id = 22;
key.tunnel_tos = 0;
key.tunnel_ttl = 64;
ret = bpf_skb_set_tunnel_key(skb, &key, sizeof(key),
BPF_F_TUNINFO_IPV6);
if (ret < 0) {
log_err(ret);
return TC_ACT_SHOT;
}
return TC_ACT_OK;
}
SEC("tc")
int ip6vxlan_get_tunnel_src(struct __sk_buff *skb)
{
struct bpf_tunnel_key key;
__u32 index = 0;
__u32 *local_ip;
int ret = 0;
local_ip = bpf_map_lookup_elem(&local_ip_map, &index);
if (!local_ip) {
log_err(ret);
return TC_ACT_SHOT;
}
ret = bpf_skb_get_tunnel_key(skb, &key, sizeof(key),
BPF_F_TUNINFO_IPV6 | BPF_F_TUNINFO_FLAGS);
if (ret < 0) {
log_err(ret);
return TC_ACT_SHOT;
}
if (bpf_ntohl(key.local_ipv6[3]) != *local_ip ||
!(key.tunnel_flags & TUNNEL_KEY) ||
!(key.tunnel_flags & TUNNEL_CSUM)) {
bpf_printk("ip6vxlan key %d local ip6 ::%x remote ip6 ::%x label 0x%x flags 0x%x\n",
key.tunnel_id, bpf_ntohl(key.local_ipv6[3]),
bpf_ntohl(key.remote_ipv6[3]), key.tunnel_label,
bpf_ntohs(key.tunnel_flags));
bpf_printk("local_ip 0x%x\n", *local_ip);
log_err(ret);
return TC_ACT_SHOT;
}
return TC_ACT_OK;
}
SEC("tc")
int geneve_set_tunnel(struct __sk_buff *skb)
{
int ret;
struct bpf_tunnel_key key;
struct geneve_opt gopt;
__builtin_memset(&key, 0x0, sizeof(key));
key.remote_ipv4 = 0xac100164; /* 172.16.1.100 */
key.tunnel_id = 2;
key.tunnel_tos = 0;
key.tunnel_ttl = 64;
__builtin_memset(&gopt, 0x0, sizeof(gopt));
gopt.opt_class = bpf_htons(0x102); /* Open Virtual Networking (OVN) */
gopt.type = 0x08;
gopt.r1 = 0;
gopt.r2 = 0;
gopt.r3 = 0;
gopt.length = 2; /* 4-byte multiple */
*(int *) &gopt.opt_data = bpf_htonl(0xdeadbeef);
ret = bpf_skb_set_tunnel_key(skb, &key, sizeof(key),
BPF_F_ZERO_CSUM_TX);
if (ret < 0) {
log_err(ret);
return TC_ACT_SHOT;
}
ret = bpf_skb_set_tunnel_opt(skb, &gopt, sizeof(gopt));
if (ret < 0) {
log_err(ret);
return TC_ACT_SHOT;
}
return TC_ACT_OK;
}
SEC("tc")
int geneve_get_tunnel(struct __sk_buff *skb)
{
int ret;
struct bpf_tunnel_key key;
struct geneve_opt gopt;
ret = bpf_skb_get_tunnel_key(skb, &key, sizeof(key), 0);
if (ret < 0) {
log_err(ret);
return TC_ACT_SHOT;
}
ret = bpf_skb_get_tunnel_opt(skb, &gopt, sizeof(gopt));
if (ret < 0)
gopt.opt_class = 0;
bpf_printk("key %d remote ip 0x%x geneve class 0x%x\n",
key.tunnel_id, key.remote_ipv4, gopt.opt_class);
return TC_ACT_OK;
}
SEC("tc")
int ip6geneve_set_tunnel(struct __sk_buff *skb)
{
struct bpf_tunnel_key key;
struct geneve_opt gopt;
int ret;
__builtin_memset(&key, 0x0, sizeof(key));
key.remote_ipv6[3] = bpf_htonl(0x11); /* ::11 */
key.tunnel_id = 22;
key.tunnel_tos = 0;
key.tunnel_ttl = 64;
ret = bpf_skb_set_tunnel_key(skb, &key, sizeof(key),
BPF_F_TUNINFO_IPV6);
if (ret < 0) {
log_err(ret);
return TC_ACT_SHOT;
}
__builtin_memset(&gopt, 0x0, sizeof(gopt));
gopt.opt_class = bpf_htons(0x102); /* Open Virtual Networking (OVN) */
gopt.type = 0x08;
gopt.r1 = 0;
gopt.r2 = 0;
gopt.r3 = 0;
gopt.length = 2; /* 4-byte multiple */
*(int *) &gopt.opt_data = bpf_htonl(0xfeedbeef);
ret = bpf_skb_set_tunnel_opt(skb, &gopt, sizeof(gopt));
if (ret < 0) {
log_err(ret);
return TC_ACT_SHOT;
}
return TC_ACT_OK;
}
SEC("tc")
int ip6geneve_get_tunnel(struct __sk_buff *skb)
{
struct bpf_tunnel_key key;
struct geneve_opt gopt;
int ret;
ret = bpf_skb_get_tunnel_key(skb, &key, sizeof(key),
BPF_F_TUNINFO_IPV6);
if (ret < 0) {
log_err(ret);
return TC_ACT_SHOT;
}
ret = bpf_skb_get_tunnel_opt(skb, &gopt, sizeof(gopt));
if (ret < 0)
gopt.opt_class = 0;
bpf_printk("key %d remote ip 0x%x geneve class 0x%x\n",
key.tunnel_id, key.remote_ipv4, gopt.opt_class);
return TC_ACT_OK;
}
SEC("tc")
int ipip_set_tunnel(struct __sk_buff *skb)
{
struct bpf_tunnel_key key = {};
void *data = (void *)(long)skb->data;
struct iphdr *iph = data;
void *data_end = (void *)(long)skb->data_end;
int ret;
/* single length check */
if (data + sizeof(*iph) > data_end) {
log_err(1);
return TC_ACT_SHOT;
}
key.tunnel_ttl = 64;
if (iph->protocol == IPPROTO_ICMP) {
key.remote_ipv4 = 0xac100164; /* 172.16.1.100 */
}
ret = bpf_skb_set_tunnel_key(skb, &key, sizeof(key), 0);
if (ret < 0) {
log_err(ret);
return TC_ACT_SHOT;
}
return TC_ACT_OK;
}
SEC("tc")
int ipip_get_tunnel(struct __sk_buff *skb)
{
int ret;
struct bpf_tunnel_key key;
ret = bpf_skb_get_tunnel_key(skb, &key, sizeof(key), 0);
if (ret < 0) {
log_err(ret);
return TC_ACT_SHOT;
}
bpf_printk("remote ip 0x%x\n", key.remote_ipv4);
return TC_ACT_OK;
}
SEC("tc")
int ipip_gue_set_tunnel(struct __sk_buff *skb)
{
struct bpf_tunnel_key key = {};
struct bpf_fou_encap encap = {};
void *data = (void *)(long)skb->data;
struct iphdr *iph = data;
void *data_end = (void *)(long)skb->data_end;
int ret;
if (data + sizeof(*iph) > data_end) {
log_err(1);
return TC_ACT_SHOT;
}
key.tunnel_ttl = 64;
if (iph->protocol == IPPROTO_ICMP)
key.remote_ipv4 = 0xac100164; /* 172.16.1.100 */
ret = bpf_skb_set_tunnel_key(skb, &key, sizeof(key), 0);
if (ret < 0) {
log_err(ret);
return TC_ACT_SHOT;
}
encap.sport = 0;
encap.dport = bpf_htons(5555);
ret = bpf_skb_set_fou_encap(skb, &encap, FOU_BPF_ENCAP_GUE);
if (ret < 0) {
log_err(ret);
return TC_ACT_SHOT;
}
return TC_ACT_OK;
}
SEC("tc")
int ipip_fou_set_tunnel(struct __sk_buff *skb)
{
struct bpf_tunnel_key key = {};
struct bpf_fou_encap encap = {};
void *data = (void *)(long)skb->data;
struct iphdr *iph = data;
void *data_end = (void *)(long)skb->data_end;
int ret;
if (data + sizeof(*iph) > data_end) {
log_err(1);
return TC_ACT_SHOT;
}
key.tunnel_ttl = 64;
if (iph->protocol == IPPROTO_ICMP)
key.remote_ipv4 = 0xac100164; /* 172.16.1.100 */
ret = bpf_skb_set_tunnel_key(skb, &key, sizeof(key), 0);
if (ret < 0) {
log_err(ret);
return TC_ACT_SHOT;
}
encap.sport = 0;
encap.dport = bpf_htons(5555);
ret = bpf_skb_set_fou_encap(skb, &encap, FOU_BPF_ENCAP_FOU);
if (ret < 0) {
log_err(ret);
return TC_ACT_SHOT;
}
return TC_ACT_OK;
}
SEC("tc")
int ipip_encap_get_tunnel(struct __sk_buff *skb)
{
int ret;
struct bpf_tunnel_key key = {};
struct bpf_fou_encap encap = {};
ret = bpf_skb_get_tunnel_key(skb, &key, sizeof(key), 0);
if (ret < 0) {
log_err(ret);
return TC_ACT_SHOT;
}
ret = bpf_skb_get_fou_encap(skb, &encap);
if (ret < 0) {
log_err(ret);
return TC_ACT_SHOT;
}
if (bpf_ntohs(encap.dport) != 5555)
return TC_ACT_SHOT;
bpf_printk("%d remote ip 0x%x, sport %d, dport %d\n", ret,
key.remote_ipv4, bpf_ntohs(encap.sport),
bpf_ntohs(encap.dport));
return TC_ACT_OK;
}
SEC("tc")
int ipip6_set_tunnel(struct __sk_buff *skb)
{
struct bpf_tunnel_key key = {};
void *data = (void *)(long)skb->data;
struct iphdr *iph = data;
void *data_end = (void *)(long)skb->data_end;
int ret;
/* single length check */
if (data + sizeof(*iph) > data_end) {
log_err(1);
return TC_ACT_SHOT;
}
__builtin_memset(&key, 0x0, sizeof(key));
key.tunnel_ttl = 64;
if (iph->protocol == IPPROTO_ICMP) {
key.remote_ipv6[3] = bpf_htonl(0x11); /* ::11 */
}
ret = bpf_skb_set_tunnel_key(skb, &key, sizeof(key),
BPF_F_TUNINFO_IPV6);
if (ret < 0) {
log_err(ret);
return TC_ACT_SHOT;
}
return TC_ACT_OK;
}
SEC("tc")
int ipip6_get_tunnel(struct __sk_buff *skb)
{
int ret;
struct bpf_tunnel_key key;
ret = bpf_skb_get_tunnel_key(skb, &key, sizeof(key),
BPF_F_TUNINFO_IPV6);
if (ret < 0) {
log_err(ret);
return TC_ACT_SHOT;
}
bpf_printk("remote ip6 %x::%x\n", bpf_htonl(key.remote_ipv6[0]),
bpf_htonl(key.remote_ipv6[3]));
return TC_ACT_OK;
}
SEC("tc")
int ip6ip6_set_tunnel(struct __sk_buff *skb)
{
struct bpf_tunnel_key key = {};
void *data = (void *)(long)skb->data;
struct ipv6hdr *iph = data;
void *data_end = (void *)(long)skb->data_end;
int ret;
/* single length check */
if (data + sizeof(*iph) > data_end) {
log_err(1);
return TC_ACT_SHOT;
}
key.tunnel_ttl = 64;
if (iph->nexthdr == 58 /* NEXTHDR_ICMP */) {
key.remote_ipv6[3] = bpf_htonl(0x11); /* ::11 */
}
ret = bpf_skb_set_tunnel_key(skb, &key, sizeof(key),
BPF_F_TUNINFO_IPV6);
if (ret < 0) {
log_err(ret);
return TC_ACT_SHOT;
}
return TC_ACT_OK;
}
SEC("tc")
int ip6ip6_get_tunnel(struct __sk_buff *skb)
{
int ret;
struct bpf_tunnel_key key;
ret = bpf_skb_get_tunnel_key(skb, &key, sizeof(key),
BPF_F_TUNINFO_IPV6);
if (ret < 0) {
log_err(ret);
return TC_ACT_SHOT;
}
bpf_printk("remote ip6 %x::%x\n", bpf_htonl(key.remote_ipv6[0]),
bpf_htonl(key.remote_ipv6[3]));
return TC_ACT_OK;
}
SEC("tc")
int xfrm_get_state(struct __sk_buff *skb)
{
struct bpf_xfrm_state x;
int ret;
ret = bpf_skb_get_xfrm_state(skb, 0, &x, sizeof(x), 0);
if (ret < 0)
return TC_ACT_OK;
bpf_printk("reqid %d spi 0x%x remote ip 0x%x\n",
x.reqid, bpf_ntohl(x.spi),
bpf_ntohl(x.remote_ipv4));
return TC_ACT_OK;
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/test_tunnel_kern.c |
// SPDX-License-Identifier: GPL-2.0
#include <vmlinux.h>
#include "xdp_metadata.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_endian.h>
struct {
__uint(type, BPF_MAP_TYPE_XSKMAP);
__uint(max_entries, 4);
__type(key, __u32);
__type(value, __u32);
} xsk SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_PROG_ARRAY);
__uint(max_entries, 1);
__type(key, __u32);
__type(value, __u32);
} prog_arr SEC(".maps");
extern int bpf_xdp_metadata_rx_timestamp(const struct xdp_md *ctx,
__u64 *timestamp) __ksym;
extern int bpf_xdp_metadata_rx_hash(const struct xdp_md *ctx, __u32 *hash,
enum xdp_rss_hash_type *rss_type) __ksym;
SEC("xdp")
int rx(struct xdp_md *ctx)
{
void *data, *data_meta;
struct xdp_meta *meta;
u64 timestamp = -1;
int ret;
/* Reserve enough for all custom metadata. */
ret = bpf_xdp_adjust_meta(ctx, -(int)sizeof(struct xdp_meta));
if (ret != 0)
return XDP_DROP;
data = (void *)(long)ctx->data;
data_meta = (void *)(long)ctx->data_meta;
if (data_meta + sizeof(struct xdp_meta) > data)
return XDP_DROP;
meta = data_meta;
/* Export metadata. */
/* We expect veth bpf_xdp_metadata_rx_timestamp to return 0 HW
* timestamp, so put some non-zero value into AF_XDP frame for
* the userspace.
*/
bpf_xdp_metadata_rx_timestamp(ctx, ×tamp);
if (timestamp == 0)
meta->rx_timestamp = 1;
bpf_xdp_metadata_rx_hash(ctx, &meta->rx_hash, &meta->rx_hash_type);
return bpf_redirect_map(&xsk, ctx->rx_queue_index, XDP_PASS);
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/xdp_metadata.c |
// SPDX-License-Identifier: GPL-2.0
#include <inttypes.h>
#include <linux/bpf.h>
#include <bpf/bpf_endian.h>
#include <bpf/bpf_helpers.h>
#include <linux/if_ether.h>
#include <linux/ip.h>
/* This function extracts the last byte of the daddr, and uses it
* as output dev index.
*/
SEC("lwt_xmit")
int test_lwt_reroute(struct __sk_buff *skb)
{
struct iphdr *iph = NULL;
void *start = (void *)(long)skb->data;
void *end = (void *)(long)skb->data_end;
/* set mark at most once */
if (skb->mark != 0)
return BPF_OK;
if (start + sizeof(*iph) > end)
return BPF_DROP;
iph = (struct iphdr *)start;
skb->mark = bpf_ntohl(iph->daddr) & 0xff;
/* do not reroute x.x.x.0 packets */
if (skb->mark == 0)
return BPF_OK;
return BPF_LWT_REROUTE;
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/test_lwt_reroute.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
#include <vmlinux.h>
#include <bpf/bpf_tracing.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_core_read.h>
#include "../bpf_experimental.h"
#include "../bpf_testmod/bpf_testmod_kfunc.h"
struct node_data {
long key;
long data;
struct bpf_rb_node node;
};
struct plain_local {
long key;
long data;
};
struct map_value {
struct prog_test_ref_kfunc *not_kptr;
struct prog_test_ref_kfunc __kptr *val;
struct node_data __kptr *node;
struct plain_local __kptr *plain;
};
/* This is necessary so that LLVM generates BTF for node_data struct
* If it's not included, a fwd reference for node_data will be generated but
* no struct. Example BTF of "node" field in map_value when not included:
*
* [10] PTR '(anon)' type_id=35
* [34] FWD 'node_data' fwd_kind=struct
* [35] TYPE_TAG 'kptr_ref' type_id=34
*
* (with no node_data struct defined)
* Had to do the same w/ bpf_kfunc_call_test_release below
*/
struct node_data *just_here_because_btf_bug;
struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
__type(key, int);
__type(value, struct map_value);
__uint(max_entries, 2);
} some_nodes SEC(".maps");
static int create_and_stash(int idx, int val)
{
struct map_value *mapval;
struct node_data *res;
mapval = bpf_map_lookup_elem(&some_nodes, &idx);
if (!mapval)
return 1;
res = bpf_obj_new(typeof(*res));
if (!res)
return 1;
res->key = val;
res = bpf_kptr_xchg(&mapval->node, res);
if (res)
bpf_obj_drop(res);
return 0;
}
SEC("tc")
long stash_rb_nodes(void *ctx)
{
return create_and_stash(0, 41) ?: create_and_stash(1, 42);
}
SEC("tc")
long stash_plain(void *ctx)
{
struct map_value *mapval;
struct plain_local *res;
int idx = 0;
mapval = bpf_map_lookup_elem(&some_nodes, &idx);
if (!mapval)
return 1;
res = bpf_obj_new(typeof(*res));
if (!res)
return 1;
res->key = 41;
res = bpf_kptr_xchg(&mapval->plain, res);
if (res)
bpf_obj_drop(res);
return 0;
}
SEC("tc")
long unstash_rb_node(void *ctx)
{
struct map_value *mapval;
struct node_data *res;
long retval;
int key = 1;
mapval = bpf_map_lookup_elem(&some_nodes, &key);
if (!mapval)
return 1;
res = bpf_kptr_xchg(&mapval->node, NULL);
if (res) {
retval = res->key;
bpf_obj_drop(res);
return retval;
}
return 1;
}
SEC("tc")
long stash_test_ref_kfunc(void *ctx)
{
struct prog_test_ref_kfunc *res;
struct map_value *mapval;
int key = 0;
mapval = bpf_map_lookup_elem(&some_nodes, &key);
if (!mapval)
return 1;
res = bpf_kptr_xchg(&mapval->val, NULL);
if (res)
bpf_kfunc_call_test_release(res);
return 0;
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/local_kptr_stash.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright 2020 Google LLC.
*/
#include "vmlinux.h"
#include <errno.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
u32 monitored_pid = 0;
struct {
__uint(type, BPF_MAP_TYPE_RINGBUF);
__uint(max_entries, 1 << 12);
} ringbuf SEC(".maps");
char _license[] SEC("license") = "GPL";
bool use_ima_file_hash;
bool enable_bprm_creds_for_exec;
bool enable_kernel_read_file;
bool test_deny;
static void ima_test_common(struct file *file)
{
u64 ima_hash = 0;
u64 *sample;
int ret;
u32 pid;
pid = bpf_get_current_pid_tgid() >> 32;
if (pid == monitored_pid) {
if (!use_ima_file_hash)
ret = bpf_ima_inode_hash(file->f_inode, &ima_hash,
sizeof(ima_hash));
else
ret = bpf_ima_file_hash(file, &ima_hash,
sizeof(ima_hash));
if (ret < 0 || ima_hash == 0)
return;
sample = bpf_ringbuf_reserve(&ringbuf, sizeof(u64), 0);
if (!sample)
return;
*sample = ima_hash;
bpf_ringbuf_submit(sample, 0);
}
return;
}
static int ima_test_deny(void)
{
u32 pid;
pid = bpf_get_current_pid_tgid() >> 32;
if (pid == monitored_pid && test_deny)
return -EPERM;
return 0;
}
SEC("lsm.s/bprm_committed_creds")
void BPF_PROG(bprm_committed_creds, struct linux_binprm *bprm)
{
ima_test_common(bprm->file);
}
SEC("lsm.s/bprm_creds_for_exec")
int BPF_PROG(bprm_creds_for_exec, struct linux_binprm *bprm)
{
if (!enable_bprm_creds_for_exec)
return 0;
ima_test_common(bprm->file);
return 0;
}
SEC("lsm.s/kernel_read_file")
int BPF_PROG(kernel_read_file, struct file *file, enum kernel_read_file_id id,
bool contents)
{
int ret;
if (!enable_kernel_read_file)
return 0;
if (!contents)
return 0;
if (id != READING_POLICY)
return 0;
ret = ima_test_deny();
if (ret < 0)
return ret;
ima_test_common(file);
return 0;
}
| linux-master | tools/testing/selftests/bpf/progs/ima.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
char _license[] SEC("license") = "GPL";
struct {
__uint(type, BPF_MAP_TYPE_HASH);
__uint(max_entries, 2);
__type(key, struct bigelement);
__type(value, __u32);
} hash_map SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
__uint(max_entries, 1);
__type(key, __u32);
__type(value, struct bigelement);
} key_map SEC(".maps");
struct bigelement {
int a;
char b[4096];
long long c;
};
SEC("raw_tracepoint/sys_enter")
int bpf_hash_large_key_test(void *ctx)
{
int zero = 0, value = 42;
struct bigelement *key;
key = bpf_map_lookup_elem(&key_map, &zero);
if (!key)
return 0;
key->c = 1;
if (bpf_map_update_elem(&hash_map, key, &value, BPF_ANY))
return 0;
return 0;
}
| linux-master | tools/testing/selftests/bpf/progs/test_hash_large_key.c |
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2019 Facebook
#include <linux/sched.h>
#include <linux/ptrace.h>
#include <stdint.h>
#include <stddef.h>
#include <stdbool.h>
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
char _license[] SEC("license") = "GPL";
SEC("raw_tracepoint/kfree_skb")
int nested_loops(volatile struct pt_regs* ctx)
{
int i, j, sum = 0, m;
for (j = 0; j < 300; j++)
for (i = 0; i < j; i++) {
if (j & 1)
m = PT_REGS_RC(ctx);
else
m = j;
sum += i * m;
}
return sum;
}
| linux-master | tools/testing/selftests/bpf/progs/loop1.c |
// SPDX-License-Identifier: GPL-2.0
/* Converted from tools/testing/selftests/bpf/verifier/value_adj_spill.c */
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include "bpf_misc.h"
#define MAX_ENTRIES 11
struct test_val {
unsigned int index;
int foo[MAX_ENTRIES];
};
struct {
__uint(type, BPF_MAP_TYPE_HASH);
__uint(max_entries, 1);
__type(key, long long);
__type(value, struct test_val);
} map_hash_48b SEC(".maps");
SEC("socket")
__description("map element value is preserved across register spilling")
__success __failure_unpriv __msg_unpriv("R0 leaks addr")
__retval(0)
__naked void is_preserved_across_register_spilling(void)
{
asm volatile (" \
r2 = r10; \
r2 += -8; \
r1 = 0; \
*(u64*)(r2 + 0) = r1; \
r1 = %[map_hash_48b] ll; \
call %[bpf_map_lookup_elem]; \
if r0 == 0 goto l0_%=; \
r1 = 42; \
*(u64*)(r0 + 0) = r1; \
r1 = r10; \
r1 += -184; \
*(u64*)(r1 + 0) = r0; \
r3 = *(u64*)(r1 + 0); \
r1 = 42; \
*(u64*)(r3 + 0) = r1; \
l0_%=: exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm_addr(map_hash_48b)
: __clobber_all);
}
SEC("socket")
__description("map element value or null is marked on register spilling")
__success __failure_unpriv __msg_unpriv("R0 leaks addr")
__retval(0)
__naked void is_marked_on_register_spilling(void)
{
asm volatile (" \
r2 = r10; \
r2 += -8; \
r1 = 0; \
*(u64*)(r2 + 0) = r1; \
r1 = %[map_hash_48b] ll; \
call %[bpf_map_lookup_elem]; \
r1 = r10; \
r1 += -152; \
*(u64*)(r1 + 0) = r0; \
if r0 == 0 goto l0_%=; \
r3 = *(u64*)(r1 + 0); \
r1 = 42; \
*(u64*)(r3 + 0) = r1; \
l0_%=: exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm_addr(map_hash_48b)
: __clobber_all);
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/verifier_value_adj_spill.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2019 Facebook */
#include <stdint.h>
#include <stdbool.h>
#include <linux/ptrace.h>
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
/* non-existing BPF helper, to test dead code elimination */
static int (*bpf_missing_helper)(const void *arg1, int arg2) = (void *) 999;
extern int LINUX_KERNEL_VERSION __kconfig;
extern int LINUX_UNKNOWN_VIRTUAL_EXTERN __kconfig __weak;
extern bool CONFIG_BPF_SYSCALL __kconfig; /* strong */
extern enum libbpf_tristate CONFIG_TRISTATE __kconfig __weak;
extern bool CONFIG_BOOL __kconfig __weak;
extern char CONFIG_CHAR __kconfig __weak;
extern uint16_t CONFIG_USHORT __kconfig __weak;
extern int CONFIG_INT __kconfig __weak;
extern uint64_t CONFIG_ULONG __kconfig __weak;
extern const char CONFIG_STR[8] __kconfig __weak;
extern uint64_t CONFIG_MISSING __kconfig __weak;
uint64_t kern_ver = -1;
uint64_t unkn_virt_val = -1;
uint64_t bpf_syscall = -1;
uint64_t tristate_val = -1;
uint64_t bool_val = -1;
uint64_t char_val = -1;
uint64_t ushort_val = -1;
uint64_t int_val = -1;
uint64_t ulong_val = -1;
char str_val[8] = {-1, -1, -1, -1, -1, -1, -1, -1};
uint64_t missing_val = -1;
SEC("raw_tp/sys_enter")
int handle_sys_enter(struct pt_regs *ctx)
{
int i;
kern_ver = LINUX_KERNEL_VERSION;
unkn_virt_val = LINUX_UNKNOWN_VIRTUAL_EXTERN;
bpf_syscall = CONFIG_BPF_SYSCALL;
tristate_val = CONFIG_TRISTATE;
bool_val = CONFIG_BOOL;
char_val = CONFIG_CHAR;
ushort_val = CONFIG_USHORT;
int_val = CONFIG_INT;
ulong_val = CONFIG_ULONG;
for (i = 0; i < sizeof(CONFIG_STR); i++) {
str_val[i] = CONFIG_STR[i];
}
if (CONFIG_MISSING)
/* invalid, but dead code - never executed */
missing_val = bpf_missing_helper(ctx, 123);
else
missing_val = 0xDEADC0DE;
return 0;
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/test_core_extern.c |
#include "core_reloc_types.h"
void f(struct core_reloc_ptr_as_arr x) {}
| linux-master | tools/testing/selftests/bpf/progs/btf__core_reloc_ptr_as_arr.c |
// SPDX-License-Identifier: GPL-2.0
/* Converted from tools/testing/selftests/bpf/verifier/leak_ptr.c */
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include "bpf_misc.h"
struct {
__uint(type, BPF_MAP_TYPE_HASH);
__uint(max_entries, 1);
__type(key, long long);
__type(value, long long);
} map_hash_8b SEC(".maps");
SEC("socket")
__description("leak pointer into ctx 1")
__failure __msg("BPF_ATOMIC stores into R1 ctx is not allowed")
__failure_unpriv __msg_unpriv("R2 leaks addr into mem")
__naked void leak_pointer_into_ctx_1(void)
{
asm volatile (" \
r0 = 0; \
*(u64*)(r1 + %[__sk_buff_cb_0]) = r0; \
r2 = %[map_hash_8b] ll; \
lock *(u64 *)(r1 + %[__sk_buff_cb_0]) += r2; \
exit; \
" :
: __imm_addr(map_hash_8b),
__imm_const(__sk_buff_cb_0, offsetof(struct __sk_buff, cb[0]))
: __clobber_all);
}
SEC("socket")
__description("leak pointer into ctx 2")
__failure __msg("BPF_ATOMIC stores into R1 ctx is not allowed")
__failure_unpriv __msg_unpriv("R10 leaks addr into mem")
__naked void leak_pointer_into_ctx_2(void)
{
asm volatile (" \
r0 = 0; \
*(u64*)(r1 + %[__sk_buff_cb_0]) = r0; \
lock *(u64 *)(r1 + %[__sk_buff_cb_0]) += r10; \
exit; \
" :
: __imm_const(__sk_buff_cb_0, offsetof(struct __sk_buff, cb[0]))
: __clobber_all);
}
SEC("socket")
__description("leak pointer into ctx 3")
__success __failure_unpriv __msg_unpriv("R2 leaks addr into ctx")
__retval(0)
__naked void leak_pointer_into_ctx_3(void)
{
asm volatile (" \
r0 = 0; \
r2 = %[map_hash_8b] ll; \
*(u64*)(r1 + %[__sk_buff_cb_0]) = r2; \
exit; \
" :
: __imm_addr(map_hash_8b),
__imm_const(__sk_buff_cb_0, offsetof(struct __sk_buff, cb[0]))
: __clobber_all);
}
SEC("socket")
__description("leak pointer into map val")
__success __failure_unpriv __msg_unpriv("R6 leaks addr into mem")
__retval(0)
__naked void leak_pointer_into_map_val(void)
{
asm volatile (" \
r6 = r1; \
r1 = 0; \
*(u64*)(r10 - 8) = r1; \
r2 = r10; \
r2 += -8; \
r1 = %[map_hash_8b] ll; \
call %[bpf_map_lookup_elem]; \
if r0 == 0 goto l0_%=; \
r3 = 0; \
*(u64*)(r0 + 0) = r3; \
lock *(u64 *)(r0 + 0) += r6; \
l0_%=: r0 = 0; \
exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm_addr(map_hash_8b)
: __clobber_all);
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/verifier_leak_ptr.c |
// SPDX-License-Identifier: GPL-2.0
#include "vmlinux.h"
#include "bpf_misc.h"
#include <bpf/bpf_endian.h>
#include <bpf/bpf_tracing.h>
#include <bpf/bpf_helpers.h>
SEC("netfilter")
__description("netfilter invalid context access, size too short")
__failure __msg("invalid bpf_context access")
__naked void with_invalid_ctx_access_test1(void)
{
asm volatile (" \
r2 = *(u8*)(r1 + %[__bpf_nf_ctx_state]); \
r0 = 0; \
exit; \
" :
: __imm_const(__bpf_nf_ctx_state, offsetof(struct bpf_nf_ctx, state))
: __clobber_all);
}
SEC("netfilter")
__description("netfilter invalid context access, size too short")
__failure __msg("invalid bpf_context access")
__naked void with_invalid_ctx_access_test2(void)
{
asm volatile (" \
r2 = *(u16*)(r1 + %[__bpf_nf_ctx_skb]); \
r0 = 0; \
exit; \
" :
: __imm_const(__bpf_nf_ctx_skb, offsetof(struct bpf_nf_ctx, skb))
: __clobber_all);
}
SEC("netfilter")
__description("netfilter invalid context access, past end of ctx")
__failure __msg("invalid bpf_context access")
__naked void with_invalid_ctx_access_test3(void)
{
asm volatile (" \
r2 = *(u64*)(r1 + %[__bpf_nf_ctx_size]); \
r0 = 0; \
exit; \
" :
: __imm_const(__bpf_nf_ctx_size, sizeof(struct bpf_nf_ctx))
: __clobber_all);
}
SEC("netfilter")
__description("netfilter invalid context, write")
__failure __msg("invalid bpf_context access")
__naked void with_invalid_ctx_access_test4(void)
{
asm volatile (" \
r2 = r1; \
*(u64*)(r2 + 0) = r1; \
r0 = 1; \
exit; \
" :
: __imm_const(__bpf_nf_ctx_skb, offsetof(struct bpf_nf_ctx, skb))
: __clobber_all);
}
#define NF_DROP 0
#define NF_ACCEPT 1
SEC("netfilter")
__description("netfilter valid context read and invalid write")
__failure __msg("only read is supported")
int with_invalid_ctx_access_test5(struct bpf_nf_ctx *ctx)
{
struct nf_hook_state *state = (void *)ctx->state;
state->sk = NULL;
return NF_ACCEPT;
}
extern int bpf_dynptr_from_skb(struct sk_buff *skb, __u64 flags,
struct bpf_dynptr *ptr__uninit) __ksym;
extern void *bpf_dynptr_slice(const struct bpf_dynptr *ptr, uint32_t offset,
void *buffer, uint32_t buffer__sz) __ksym;
SEC("netfilter")
__description("netfilter test prog with skb and state read access")
__success __failure_unpriv
__retval(0)
int with_valid_ctx_access_test6(struct bpf_nf_ctx *ctx)
{
const struct nf_hook_state *state = ctx->state;
struct sk_buff *skb = ctx->skb;
const struct iphdr *iph;
const struct tcphdr *th;
u8 buffer_iph[20] = {};
u8 buffer_th[40] = {};
struct bpf_dynptr ptr;
uint8_t ihl;
if (skb->len <= 20 || bpf_dynptr_from_skb(skb, 0, &ptr))
return NF_ACCEPT;
iph = bpf_dynptr_slice(&ptr, 0, buffer_iph, sizeof(buffer_iph));
if (!iph)
return NF_ACCEPT;
if (state->pf != 2)
return NF_ACCEPT;
ihl = iph->ihl << 2;
th = bpf_dynptr_slice(&ptr, ihl, buffer_th, sizeof(buffer_th));
if (!th)
return NF_ACCEPT;
return th->dest == bpf_htons(22) ? NF_ACCEPT : NF_DROP;
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/verifier_netfilter_ctx.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
struct {
__uint(type, BPF_MAP_TYPE_PROG_ARRAY);
__uint(max_entries, 1);
__uint(key_size, sizeof(__u32));
__uint(value_size, sizeof(__u32));
} jmp_table SEC(".maps");
int count = 0;
SEC("tc")
int classifier_0(struct __sk_buff *skb)
{
count++;
bpf_tail_call_static(skb, &jmp_table, 0);
return 1;
}
SEC("tc")
int entry(struct __sk_buff *skb)
{
bpf_tail_call_static(skb, &jmp_table, 0);
return 0;
}
char __license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/tailcall3.c |
// SPDX-License-Identifier: GPL-2.0
#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
extern const int bpf_prog_active __ksym;
struct {
__uint(type, BPF_MAP_TYPE_RINGBUF);
__uint(max_entries, 1 << 12);
} ringbuf SEC(".maps");
SEC("fentry/security_inode_getattr")
int BPF_PROG(d_path_check_rdonly_mem, struct path *path, struct kstat *stat,
__u32 request_mask, unsigned int query_flags)
{
void *active;
u32 cpu;
cpu = bpf_get_smp_processor_id();
active = (void *)bpf_per_cpu_ptr(&bpf_prog_active, cpu);
if (active) {
/* FAIL here! 'active' points to 'regular' memory. It
* cannot be submitted to ring buffer.
*/
bpf_ringbuf_submit(active, 0);
}
return 0;
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/test_d_path_check_types.c |
// SPDX-License-Identifier: GPL-2.0
/* Converted from tools/testing/selftests/bpf/verifier/sock.c */
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include "bpf_misc.h"
#define sizeof_field(TYPE, MEMBER) sizeof((((TYPE *)0)->MEMBER))
#define offsetofend(TYPE, MEMBER) \
(offsetof(TYPE, MEMBER) + sizeof_field(TYPE, MEMBER))
struct {
__uint(type, BPF_MAP_TYPE_REUSEPORT_SOCKARRAY);
__uint(max_entries, 1);
__type(key, __u32);
__type(value, __u64);
} map_reuseport_array SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_SOCKHASH);
__uint(max_entries, 1);
__type(key, int);
__type(value, int);
} map_sockhash SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_SOCKMAP);
__uint(max_entries, 1);
__type(key, int);
__type(value, int);
} map_sockmap SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_XSKMAP);
__uint(max_entries, 1);
__type(key, int);
__type(value, int);
} map_xskmap SEC(".maps");
struct val {
int cnt;
struct bpf_spin_lock l;
};
struct {
__uint(type, BPF_MAP_TYPE_SK_STORAGE);
__uint(max_entries, 0);
__type(key, int);
__type(value, struct val);
__uint(map_flags, BPF_F_NO_PREALLOC);
} sk_storage_map SEC(".maps");
SEC("cgroup/skb")
__description("skb->sk: no NULL check")
__failure __msg("invalid mem access 'sock_common_or_null'")
__failure_unpriv
__naked void skb_sk_no_null_check(void)
{
asm volatile (" \
r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
r0 = *(u32*)(r1 + 0); \
r0 = 0; \
exit; \
" :
: __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk))
: __clobber_all);
}
SEC("cgroup/skb")
__description("skb->sk: sk->family [non fullsock field]")
__success __success_unpriv __retval(0)
__naked void sk_family_non_fullsock_field_1(void)
{
asm volatile (" \
r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
if r1 != 0 goto l0_%=; \
r0 = 0; \
exit; \
l0_%=: r0 = *(u32*)(r1 + %[bpf_sock_family]); \
r0 = 0; \
exit; \
" :
: __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
__imm_const(bpf_sock_family, offsetof(struct bpf_sock, family))
: __clobber_all);
}
SEC("cgroup/skb")
__description("skb->sk: sk->type [fullsock field]")
__failure __msg("invalid sock_common access")
__failure_unpriv
__naked void sk_sk_type_fullsock_field_1(void)
{
asm volatile (" \
r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
if r1 != 0 goto l0_%=; \
r0 = 0; \
exit; \
l0_%=: r0 = *(u32*)(r1 + %[bpf_sock_type]); \
r0 = 0; \
exit; \
" :
: __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
__imm_const(bpf_sock_type, offsetof(struct bpf_sock, type))
: __clobber_all);
}
SEC("cgroup/skb")
__description("bpf_sk_fullsock(skb->sk): no !skb->sk check")
__failure __msg("type=sock_common_or_null expected=sock_common")
__failure_unpriv
__naked void sk_no_skb_sk_check_1(void)
{
asm volatile (" \
r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
call %[bpf_sk_fullsock]; \
r0 = 0; \
exit; \
" :
: __imm(bpf_sk_fullsock),
__imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk))
: __clobber_all);
}
SEC("cgroup/skb")
__description("sk_fullsock(skb->sk): no NULL check on ret")
__failure __msg("invalid mem access 'sock_or_null'")
__failure_unpriv
__naked void no_null_check_on_ret_1(void)
{
asm volatile (" \
r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
if r1 != 0 goto l0_%=; \
r0 = 0; \
exit; \
l0_%=: call %[bpf_sk_fullsock]; \
r0 = *(u32*)(r0 + %[bpf_sock_type]); \
r0 = 0; \
exit; \
" :
: __imm(bpf_sk_fullsock),
__imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
__imm_const(bpf_sock_type, offsetof(struct bpf_sock, type))
: __clobber_all);
}
SEC("cgroup/skb")
__description("sk_fullsock(skb->sk): sk->type [fullsock field]")
__success __success_unpriv __retval(0)
__naked void sk_sk_type_fullsock_field_2(void)
{
asm volatile (" \
r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
if r1 != 0 goto l0_%=; \
r0 = 0; \
exit; \
l0_%=: call %[bpf_sk_fullsock]; \
if r0 != 0 goto l1_%=; \
r0 = 0; \
exit; \
l1_%=: r0 = *(u32*)(r0 + %[bpf_sock_type]); \
r0 = 0; \
exit; \
" :
: __imm(bpf_sk_fullsock),
__imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
__imm_const(bpf_sock_type, offsetof(struct bpf_sock, type))
: __clobber_all);
}
SEC("cgroup/skb")
__description("sk_fullsock(skb->sk): sk->family [non fullsock field]")
__success __success_unpriv __retval(0)
__naked void sk_family_non_fullsock_field_2(void)
{
asm volatile (" \
r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
if r1 != 0 goto l0_%=; \
r0 = 0; \
exit; \
l0_%=: call %[bpf_sk_fullsock]; \
if r0 != 0 goto l1_%=; \
exit; \
l1_%=: r0 = *(u32*)(r0 + %[bpf_sock_family]); \
r0 = 0; \
exit; \
" :
: __imm(bpf_sk_fullsock),
__imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
__imm_const(bpf_sock_family, offsetof(struct bpf_sock, family))
: __clobber_all);
}
SEC("cgroup/skb")
__description("sk_fullsock(skb->sk): sk->state [narrow load]")
__success __success_unpriv __retval(0)
__naked void sk_sk_state_narrow_load(void)
{
asm volatile (" \
r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
if r1 != 0 goto l0_%=; \
r0 = 0; \
exit; \
l0_%=: call %[bpf_sk_fullsock]; \
if r0 != 0 goto l1_%=; \
r0 = 0; \
exit; \
l1_%=: r0 = *(u8*)(r0 + %[bpf_sock_state]); \
r0 = 0; \
exit; \
" :
: __imm(bpf_sk_fullsock),
__imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
__imm_const(bpf_sock_state, offsetof(struct bpf_sock, state))
: __clobber_all);
}
SEC("cgroup/skb")
__description("sk_fullsock(skb->sk): sk->dst_port [word load] (backward compatibility)")
__success __success_unpriv __retval(0)
__naked void port_word_load_backward_compatibility(void)
{
asm volatile (" \
r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
if r1 != 0 goto l0_%=; \
r0 = 0; \
exit; \
l0_%=: call %[bpf_sk_fullsock]; \
if r0 != 0 goto l1_%=; \
r0 = 0; \
exit; \
l1_%=: r0 = *(u32*)(r0 + %[bpf_sock_dst_port]); \
r0 = 0; \
exit; \
" :
: __imm(bpf_sk_fullsock),
__imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
__imm_const(bpf_sock_dst_port, offsetof(struct bpf_sock, dst_port))
: __clobber_all);
}
SEC("cgroup/skb")
__description("sk_fullsock(skb->sk): sk->dst_port [half load]")
__success __success_unpriv __retval(0)
__naked void sk_dst_port_half_load(void)
{
asm volatile (" \
r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
if r1 != 0 goto l0_%=; \
r0 = 0; \
exit; \
l0_%=: call %[bpf_sk_fullsock]; \
if r0 != 0 goto l1_%=; \
r0 = 0; \
exit; \
l1_%=: r0 = *(u16*)(r0 + %[bpf_sock_dst_port]); \
r0 = 0; \
exit; \
" :
: __imm(bpf_sk_fullsock),
__imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
__imm_const(bpf_sock_dst_port, offsetof(struct bpf_sock, dst_port))
: __clobber_all);
}
SEC("cgroup/skb")
__description("sk_fullsock(skb->sk): sk->dst_port [half load] (invalid)")
__failure __msg("invalid sock access")
__failure_unpriv
__naked void dst_port_half_load_invalid_1(void)
{
asm volatile (" \
r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
if r1 != 0 goto l0_%=; \
r0 = 0; \
exit; \
l0_%=: call %[bpf_sk_fullsock]; \
if r0 != 0 goto l1_%=; \
r0 = 0; \
exit; \
l1_%=: r0 = *(u16*)(r0 + %[__imm_0]); \
r0 = 0; \
exit; \
" :
: __imm(bpf_sk_fullsock),
__imm_const(__imm_0, offsetof(struct bpf_sock, dst_port) + 2),
__imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk))
: __clobber_all);
}
SEC("cgroup/skb")
__description("sk_fullsock(skb->sk): sk->dst_port [byte load]")
__success __success_unpriv __retval(0)
__naked void sk_dst_port_byte_load(void)
{
asm volatile (" \
r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
if r1 != 0 goto l0_%=; \
r0 = 0; \
exit; \
l0_%=: call %[bpf_sk_fullsock]; \
if r0 != 0 goto l1_%=; \
r0 = 0; \
exit; \
l1_%=: r2 = *(u8*)(r0 + %[bpf_sock_dst_port]); \
r2 = *(u8*)(r0 + %[__imm_0]); \
r0 = 0; \
exit; \
" :
: __imm(bpf_sk_fullsock),
__imm_const(__imm_0, offsetof(struct bpf_sock, dst_port) + 1),
__imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
__imm_const(bpf_sock_dst_port, offsetof(struct bpf_sock, dst_port))
: __clobber_all);
}
SEC("cgroup/skb")
__description("sk_fullsock(skb->sk): sk->dst_port [byte load] (invalid)")
__failure __msg("invalid sock access")
__failure_unpriv
__naked void dst_port_byte_load_invalid(void)
{
asm volatile (" \
r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
if r1 != 0 goto l0_%=; \
r0 = 0; \
exit; \
l0_%=: call %[bpf_sk_fullsock]; \
if r0 != 0 goto l1_%=; \
r0 = 0; \
exit; \
l1_%=: r0 = *(u8*)(r0 + %[__imm_0]); \
r0 = 0; \
exit; \
" :
: __imm(bpf_sk_fullsock),
__imm_const(__imm_0, offsetof(struct bpf_sock, dst_port) + 2),
__imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk))
: __clobber_all);
}
SEC("cgroup/skb")
__description("sk_fullsock(skb->sk): past sk->dst_port [half load] (invalid)")
__failure __msg("invalid sock access")
__failure_unpriv
__naked void dst_port_half_load_invalid_2(void)
{
asm volatile (" \
r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
if r1 != 0 goto l0_%=; \
r0 = 0; \
exit; \
l0_%=: call %[bpf_sk_fullsock]; \
if r0 != 0 goto l1_%=; \
r0 = 0; \
exit; \
l1_%=: r0 = *(u16*)(r0 + %[bpf_sock_dst_port__end]); \
r0 = 0; \
exit; \
" :
: __imm(bpf_sk_fullsock),
__imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
__imm_const(bpf_sock_dst_port__end, offsetofend(struct bpf_sock, dst_port))
: __clobber_all);
}
SEC("cgroup/skb")
__description("sk_fullsock(skb->sk): sk->dst_ip6 [load 2nd byte]")
__success __success_unpriv __retval(0)
__naked void dst_ip6_load_2nd_byte(void)
{
asm volatile (" \
r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
if r1 != 0 goto l0_%=; \
r0 = 0; \
exit; \
l0_%=: call %[bpf_sk_fullsock]; \
if r0 != 0 goto l1_%=; \
r0 = 0; \
exit; \
l1_%=: r0 = *(u8*)(r0 + %[__imm_0]); \
r0 = 0; \
exit; \
" :
: __imm(bpf_sk_fullsock),
__imm_const(__imm_0, offsetof(struct bpf_sock, dst_ip6[0]) + 1),
__imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk))
: __clobber_all);
}
SEC("cgroup/skb")
__description("sk_fullsock(skb->sk): sk->type [narrow load]")
__success __success_unpriv __retval(0)
__naked void sk_sk_type_narrow_load(void)
{
asm volatile (" \
r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
if r1 != 0 goto l0_%=; \
r0 = 0; \
exit; \
l0_%=: call %[bpf_sk_fullsock]; \
if r0 != 0 goto l1_%=; \
r0 = 0; \
exit; \
l1_%=: r0 = *(u8*)(r0 + %[bpf_sock_type]); \
r0 = 0; \
exit; \
" :
: __imm(bpf_sk_fullsock),
__imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
__imm_const(bpf_sock_type, offsetof(struct bpf_sock, type))
: __clobber_all);
}
SEC("cgroup/skb")
__description("sk_fullsock(skb->sk): sk->protocol [narrow load]")
__success __success_unpriv __retval(0)
__naked void sk_sk_protocol_narrow_load(void)
{
asm volatile (" \
r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
if r1 != 0 goto l0_%=; \
r0 = 0; \
exit; \
l0_%=: call %[bpf_sk_fullsock]; \
if r0 != 0 goto l1_%=; \
r0 = 0; \
exit; \
l1_%=: r0 = *(u8*)(r0 + %[bpf_sock_protocol]); \
r0 = 0; \
exit; \
" :
: __imm(bpf_sk_fullsock),
__imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
__imm_const(bpf_sock_protocol, offsetof(struct bpf_sock, protocol))
: __clobber_all);
}
SEC("cgroup/skb")
__description("sk_fullsock(skb->sk): beyond last field")
__failure __msg("invalid sock access")
__failure_unpriv
__naked void skb_sk_beyond_last_field_1(void)
{
asm volatile (" \
r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
if r1 != 0 goto l0_%=; \
r0 = 0; \
exit; \
l0_%=: call %[bpf_sk_fullsock]; \
if r0 != 0 goto l1_%=; \
r0 = 0; \
exit; \
l1_%=: r0 = *(u32*)(r0 + %[bpf_sock_rx_queue_mapping__end]);\
r0 = 0; \
exit; \
" :
: __imm(bpf_sk_fullsock),
__imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
__imm_const(bpf_sock_rx_queue_mapping__end, offsetofend(struct bpf_sock, rx_queue_mapping))
: __clobber_all);
}
SEC("cgroup/skb")
__description("bpf_tcp_sock(skb->sk): no !skb->sk check")
__failure __msg("type=sock_common_or_null expected=sock_common")
__failure_unpriv
__naked void sk_no_skb_sk_check_2(void)
{
asm volatile (" \
r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
call %[bpf_tcp_sock]; \
r0 = 0; \
exit; \
" :
: __imm(bpf_tcp_sock),
__imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk))
: __clobber_all);
}
SEC("cgroup/skb")
__description("bpf_tcp_sock(skb->sk): no NULL check on ret")
__failure __msg("invalid mem access 'tcp_sock_or_null'")
__failure_unpriv
__naked void no_null_check_on_ret_2(void)
{
asm volatile (" \
r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
if r1 != 0 goto l0_%=; \
r0 = 0; \
exit; \
l0_%=: call %[bpf_tcp_sock]; \
r0 = *(u32*)(r0 + %[bpf_tcp_sock_snd_cwnd]); \
r0 = 0; \
exit; \
" :
: __imm(bpf_tcp_sock),
__imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
__imm_const(bpf_tcp_sock_snd_cwnd, offsetof(struct bpf_tcp_sock, snd_cwnd))
: __clobber_all);
}
SEC("cgroup/skb")
__description("bpf_tcp_sock(skb->sk): tp->snd_cwnd")
__success __success_unpriv __retval(0)
__naked void skb_sk_tp_snd_cwnd_1(void)
{
asm volatile (" \
r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
if r1 != 0 goto l0_%=; \
r0 = 0; \
exit; \
l0_%=: call %[bpf_tcp_sock]; \
if r0 != 0 goto l1_%=; \
exit; \
l1_%=: r0 = *(u32*)(r0 + %[bpf_tcp_sock_snd_cwnd]); \
r0 = 0; \
exit; \
" :
: __imm(bpf_tcp_sock),
__imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
__imm_const(bpf_tcp_sock_snd_cwnd, offsetof(struct bpf_tcp_sock, snd_cwnd))
: __clobber_all);
}
SEC("cgroup/skb")
__description("bpf_tcp_sock(skb->sk): tp->bytes_acked")
__success __success_unpriv __retval(0)
__naked void skb_sk_tp_bytes_acked(void)
{
asm volatile (" \
r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
if r1 != 0 goto l0_%=; \
r0 = 0; \
exit; \
l0_%=: call %[bpf_tcp_sock]; \
if r0 != 0 goto l1_%=; \
exit; \
l1_%=: r0 = *(u64*)(r0 + %[bpf_tcp_sock_bytes_acked]); \
r0 = 0; \
exit; \
" :
: __imm(bpf_tcp_sock),
__imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
__imm_const(bpf_tcp_sock_bytes_acked, offsetof(struct bpf_tcp_sock, bytes_acked))
: __clobber_all);
}
SEC("cgroup/skb")
__description("bpf_tcp_sock(skb->sk): beyond last field")
__failure __msg("invalid tcp_sock access")
__failure_unpriv
__naked void skb_sk_beyond_last_field_2(void)
{
asm volatile (" \
r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
if r1 != 0 goto l0_%=; \
r0 = 0; \
exit; \
l0_%=: call %[bpf_tcp_sock]; \
if r0 != 0 goto l1_%=; \
exit; \
l1_%=: r0 = *(u64*)(r0 + %[bpf_tcp_sock_bytes_acked__end]);\
r0 = 0; \
exit; \
" :
: __imm(bpf_tcp_sock),
__imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
__imm_const(bpf_tcp_sock_bytes_acked__end, offsetofend(struct bpf_tcp_sock, bytes_acked))
: __clobber_all);
}
SEC("cgroup/skb")
__description("bpf_tcp_sock(bpf_sk_fullsock(skb->sk)): tp->snd_cwnd")
__success __success_unpriv __retval(0)
__naked void skb_sk_tp_snd_cwnd_2(void)
{
asm volatile (" \
r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
if r1 != 0 goto l0_%=; \
r0 = 0; \
exit; \
l0_%=: call %[bpf_sk_fullsock]; \
if r0 != 0 goto l1_%=; \
exit; \
l1_%=: r1 = r0; \
call %[bpf_tcp_sock]; \
if r0 != 0 goto l2_%=; \
exit; \
l2_%=: r0 = *(u32*)(r0 + %[bpf_tcp_sock_snd_cwnd]); \
r0 = 0; \
exit; \
" :
: __imm(bpf_sk_fullsock),
__imm(bpf_tcp_sock),
__imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
__imm_const(bpf_tcp_sock_snd_cwnd, offsetof(struct bpf_tcp_sock, snd_cwnd))
: __clobber_all);
}
SEC("tc")
__description("bpf_sk_release(skb->sk)")
__failure __msg("R1 must be referenced when passed to release function")
__naked void bpf_sk_release_skb_sk(void)
{
asm volatile (" \
r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
if r1 == 0 goto l0_%=; \
call %[bpf_sk_release]; \
l0_%=: r0 = 0; \
exit; \
" :
: __imm(bpf_sk_release),
__imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk))
: __clobber_all);
}
SEC("tc")
__description("bpf_sk_release(bpf_sk_fullsock(skb->sk))")
__failure __msg("R1 must be referenced when passed to release function")
__naked void bpf_sk_fullsock_skb_sk(void)
{
asm volatile (" \
r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
if r1 != 0 goto l0_%=; \
r0 = 0; \
exit; \
l0_%=: call %[bpf_sk_fullsock]; \
if r0 != 0 goto l1_%=; \
exit; \
l1_%=: r1 = r0; \
call %[bpf_sk_release]; \
r0 = 1; \
exit; \
" :
: __imm(bpf_sk_fullsock),
__imm(bpf_sk_release),
__imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk))
: __clobber_all);
}
SEC("tc")
__description("bpf_sk_release(bpf_tcp_sock(skb->sk))")
__failure __msg("R1 must be referenced when passed to release function")
__naked void bpf_tcp_sock_skb_sk(void)
{
asm volatile (" \
r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
if r1 != 0 goto l0_%=; \
r0 = 0; \
exit; \
l0_%=: call %[bpf_tcp_sock]; \
if r0 != 0 goto l1_%=; \
exit; \
l1_%=: r1 = r0; \
call %[bpf_sk_release]; \
r0 = 1; \
exit; \
" :
: __imm(bpf_sk_release),
__imm(bpf_tcp_sock),
__imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk))
: __clobber_all);
}
SEC("tc")
__description("sk_storage_get(map, skb->sk, NULL, 0): value == NULL")
__success __retval(0)
__naked void sk_null_0_value_null(void)
{
asm volatile (" \
r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
if r1 != 0 goto l0_%=; \
r0 = 0; \
exit; \
l0_%=: call %[bpf_sk_fullsock]; \
if r0 != 0 goto l1_%=; \
r0 = 0; \
exit; \
l1_%=: r4 = 0; \
r3 = 0; \
r2 = r0; \
r1 = %[sk_storage_map] ll; \
call %[bpf_sk_storage_get]; \
r0 = 0; \
exit; \
" :
: __imm(bpf_sk_fullsock),
__imm(bpf_sk_storage_get),
__imm_addr(sk_storage_map),
__imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk))
: __clobber_all);
}
SEC("tc")
__description("sk_storage_get(map, skb->sk, 1, 1): value == 1")
__failure __msg("R3 type=scalar expected=fp")
__naked void sk_1_1_value_1(void)
{
asm volatile (" \
r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
if r1 != 0 goto l0_%=; \
r0 = 0; \
exit; \
l0_%=: call %[bpf_sk_fullsock]; \
if r0 != 0 goto l1_%=; \
r0 = 0; \
exit; \
l1_%=: r4 = 1; \
r3 = 1; \
r2 = r0; \
r1 = %[sk_storage_map] ll; \
call %[bpf_sk_storage_get]; \
r0 = 0; \
exit; \
" :
: __imm(bpf_sk_fullsock),
__imm(bpf_sk_storage_get),
__imm_addr(sk_storage_map),
__imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk))
: __clobber_all);
}
SEC("tc")
__description("sk_storage_get(map, skb->sk, &stack_value, 1): stack_value")
__success __retval(0)
__naked void stack_value_1_stack_value(void)
{
asm volatile (" \
r2 = 0; \
*(u64*)(r10 - 8) = r2; \
r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
if r1 != 0 goto l0_%=; \
r0 = 0; \
exit; \
l0_%=: call %[bpf_sk_fullsock]; \
if r0 != 0 goto l1_%=; \
r0 = 0; \
exit; \
l1_%=: r4 = 1; \
r3 = r10; \
r3 += -8; \
r2 = r0; \
r1 = %[sk_storage_map] ll; \
call %[bpf_sk_storage_get]; \
r0 = 0; \
exit; \
" :
: __imm(bpf_sk_fullsock),
__imm(bpf_sk_storage_get),
__imm_addr(sk_storage_map),
__imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk))
: __clobber_all);
}
SEC("tc")
__description("bpf_map_lookup_elem(smap, &key)")
__failure __msg("cannot pass map_type 24 into func bpf_map_lookup_elem")
__naked void map_lookup_elem_smap_key(void)
{
asm volatile (" \
r1 = 0; \
*(u32*)(r10 - 4) = r1; \
r2 = r10; \
r2 += -4; \
r1 = %[sk_storage_map] ll; \
call %[bpf_map_lookup_elem]; \
r0 = 0; \
exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm_addr(sk_storage_map)
: __clobber_all);
}
SEC("xdp")
__description("bpf_map_lookup_elem(xskmap, &key); xs->queue_id")
__success __retval(0)
__naked void xskmap_key_xs_queue_id(void)
{
asm volatile (" \
r1 = 0; \
*(u32*)(r10 - 8) = r1; \
r2 = r10; \
r2 += -8; \
r1 = %[map_xskmap] ll; \
call %[bpf_map_lookup_elem]; \
if r0 != 0 goto l0_%=; \
exit; \
l0_%=: r0 = *(u32*)(r0 + %[bpf_xdp_sock_queue_id]); \
r0 = 0; \
exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm_addr(map_xskmap),
__imm_const(bpf_xdp_sock_queue_id, offsetof(struct bpf_xdp_sock, queue_id))
: __clobber_all);
}
SEC("sk_skb")
__description("bpf_map_lookup_elem(sockmap, &key)")
__failure __msg("Unreleased reference id=2 alloc_insn=6")
__naked void map_lookup_elem_sockmap_key(void)
{
asm volatile (" \
r1 = 0; \
*(u32*)(r10 - 4) = r1; \
r2 = r10; \
r2 += -4; \
r1 = %[map_sockmap] ll; \
call %[bpf_map_lookup_elem]; \
r0 = 0; \
exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm_addr(map_sockmap)
: __clobber_all);
}
SEC("sk_skb")
__description("bpf_map_lookup_elem(sockhash, &key)")
__failure __msg("Unreleased reference id=2 alloc_insn=6")
__naked void map_lookup_elem_sockhash_key(void)
{
asm volatile (" \
r1 = 0; \
*(u32*)(r10 - 4) = r1; \
r2 = r10; \
r2 += -4; \
r1 = %[map_sockhash] ll; \
call %[bpf_map_lookup_elem]; \
r0 = 0; \
exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm_addr(map_sockhash)
: __clobber_all);
}
SEC("sk_skb")
__description("bpf_map_lookup_elem(sockmap, &key); sk->type [fullsock field]; bpf_sk_release(sk)")
__success
__naked void field_bpf_sk_release_sk_1(void)
{
asm volatile (" \
r1 = 0; \
*(u32*)(r10 - 4) = r1; \
r2 = r10; \
r2 += -4; \
r1 = %[map_sockmap] ll; \
call %[bpf_map_lookup_elem]; \
if r0 != 0 goto l0_%=; \
exit; \
l0_%=: r1 = r0; \
r0 = *(u32*)(r0 + %[bpf_sock_type]); \
call %[bpf_sk_release]; \
exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm(bpf_sk_release),
__imm_addr(map_sockmap),
__imm_const(bpf_sock_type, offsetof(struct bpf_sock, type))
: __clobber_all);
}
SEC("sk_skb")
__description("bpf_map_lookup_elem(sockhash, &key); sk->type [fullsock field]; bpf_sk_release(sk)")
__success
__naked void field_bpf_sk_release_sk_2(void)
{
asm volatile (" \
r1 = 0; \
*(u32*)(r10 - 4) = r1; \
r2 = r10; \
r2 += -4; \
r1 = %[map_sockhash] ll; \
call %[bpf_map_lookup_elem]; \
if r0 != 0 goto l0_%=; \
exit; \
l0_%=: r1 = r0; \
r0 = *(u32*)(r0 + %[bpf_sock_type]); \
call %[bpf_sk_release]; \
exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm(bpf_sk_release),
__imm_addr(map_sockhash),
__imm_const(bpf_sock_type, offsetof(struct bpf_sock, type))
: __clobber_all);
}
SEC("sk_reuseport")
__description("bpf_sk_select_reuseport(ctx, reuseport_array, &key, flags)")
__success
__naked void ctx_reuseport_array_key_flags(void)
{
asm volatile (" \
r4 = 0; \
r2 = 0; \
*(u32*)(r10 - 4) = r2; \
r3 = r10; \
r3 += -4; \
r2 = %[map_reuseport_array] ll; \
call %[bpf_sk_select_reuseport]; \
exit; \
" :
: __imm(bpf_sk_select_reuseport),
__imm_addr(map_reuseport_array)
: __clobber_all);
}
SEC("sk_reuseport")
__description("bpf_sk_select_reuseport(ctx, sockmap, &key, flags)")
__success
__naked void reuseport_ctx_sockmap_key_flags(void)
{
asm volatile (" \
r4 = 0; \
r2 = 0; \
*(u32*)(r10 - 4) = r2; \
r3 = r10; \
r3 += -4; \
r2 = %[map_sockmap] ll; \
call %[bpf_sk_select_reuseport]; \
exit; \
" :
: __imm(bpf_sk_select_reuseport),
__imm_addr(map_sockmap)
: __clobber_all);
}
SEC("sk_reuseport")
__description("bpf_sk_select_reuseport(ctx, sockhash, &key, flags)")
__success
__naked void reuseport_ctx_sockhash_key_flags(void)
{
asm volatile (" \
r4 = 0; \
r2 = 0; \
*(u32*)(r10 - 4) = r2; \
r3 = r10; \
r3 += -4; \
r2 = %[map_sockmap] ll; \
call %[bpf_sk_select_reuseport]; \
exit; \
" :
: __imm(bpf_sk_select_reuseport),
__imm_addr(map_sockmap)
: __clobber_all);
}
SEC("tc")
__description("mark null check on return value of bpf_skc_to helpers")
__failure __msg("invalid mem access")
__naked void of_bpf_skc_to_helpers(void)
{
asm volatile (" \
r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
if r1 != 0 goto l0_%=; \
r0 = 0; \
exit; \
l0_%=: r6 = r1; \
call %[bpf_skc_to_tcp_sock]; \
r7 = r0; \
r1 = r6; \
call %[bpf_skc_to_tcp_request_sock]; \
r8 = r0; \
if r8 != 0 goto l1_%=; \
r0 = 0; \
exit; \
l1_%=: r0 = *(u8*)(r7 + 0); \
exit; \
" :
: __imm(bpf_skc_to_tcp_request_sock),
__imm(bpf_skc_to_tcp_sock),
__imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk))
: __clobber_all);
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/verifier_sock.c |
#include "core_reloc_types.h"
void f(struct core_reloc_primitives x) {}
| linux-master | tools/testing/selftests/bpf/progs/btf__core_reloc_primitives.c |
// SPDX-License-Identifier: GPL-2.0
/* Converted from tools/testing/selftests/bpf/verifier/loops1.c */
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include "bpf_misc.h"
SEC("xdp")
__description("bounded loop, count to 4")
__success __retval(4)
__naked void bounded_loop_count_to_4(void)
{
asm volatile (" \
r0 = 0; \
l0_%=: r0 += 1; \
if r0 < 4 goto l0_%=; \
exit; \
" ::: __clobber_all);
}
SEC("tracepoint")
__description("bounded loop, count to 20")
__success
__naked void bounded_loop_count_to_20(void)
{
asm volatile (" \
r0 = 0; \
l0_%=: r0 += 3; \
if r0 < 20 goto l0_%=; \
exit; \
" ::: __clobber_all);
}
SEC("tracepoint")
__description("bounded loop, count from positive unknown to 4")
__success
__naked void from_positive_unknown_to_4(void)
{
asm volatile (" \
call %[bpf_get_prandom_u32]; \
if r0 s< 0 goto l0_%=; \
l1_%=: r0 += 1; \
if r0 < 4 goto l1_%=; \
l0_%=: exit; \
" :
: __imm(bpf_get_prandom_u32)
: __clobber_all);
}
SEC("tracepoint")
__description("bounded loop, count from totally unknown to 4")
__success
__naked void from_totally_unknown_to_4(void)
{
asm volatile (" \
call %[bpf_get_prandom_u32]; \
l0_%=: r0 += 1; \
if r0 < 4 goto l0_%=; \
exit; \
" :
: __imm(bpf_get_prandom_u32)
: __clobber_all);
}
SEC("tracepoint")
__description("bounded loop, count to 4 with equality")
__success
__naked void count_to_4_with_equality(void)
{
asm volatile (" \
r0 = 0; \
l0_%=: r0 += 1; \
if r0 != 4 goto l0_%=; \
exit; \
" ::: __clobber_all);
}
SEC("tracepoint")
__description("bounded loop, start in the middle")
__failure __msg("back-edge")
__naked void loop_start_in_the_middle(void)
{
asm volatile (" \
r0 = 0; \
goto l0_%=; \
l1_%=: r0 += 1; \
l0_%=: if r0 < 4 goto l1_%=; \
exit; \
" ::: __clobber_all);
}
SEC("xdp")
__description("bounded loop containing a forward jump")
__success __retval(4)
__naked void loop_containing_a_forward_jump(void)
{
asm volatile (" \
r0 = 0; \
l1_%=: r0 += 1; \
if r0 == r0 goto l0_%=; \
l0_%=: if r0 < 4 goto l1_%=; \
exit; \
" ::: __clobber_all);
}
SEC("tracepoint")
__description("bounded loop that jumps out rather than in")
__success
__naked void jumps_out_rather_than_in(void)
{
asm volatile (" \
r6 = 0; \
l1_%=: r6 += 1; \
if r6 > 10000 goto l0_%=; \
call %[bpf_get_prandom_u32]; \
goto l1_%=; \
l0_%=: exit; \
" :
: __imm(bpf_get_prandom_u32)
: __clobber_all);
}
SEC("tracepoint")
__description("infinite loop after a conditional jump")
__failure __msg("program is too large")
__naked void loop_after_a_conditional_jump(void)
{
asm volatile (" \
r0 = 5; \
if r0 < 4 goto l0_%=; \
l1_%=: r0 += 1; \
goto l1_%=; \
l0_%=: exit; \
" ::: __clobber_all);
}
SEC("tracepoint")
__description("bounded recursion")
__failure __msg("back-edge")
__naked void bounded_recursion(void)
{
asm volatile (" \
r1 = 0; \
call bounded_recursion__1; \
exit; \
" ::: __clobber_all);
}
static __naked __noinline __attribute__((used))
void bounded_recursion__1(void)
{
asm volatile (" \
r1 += 1; \
r0 = r1; \
if r1 < 4 goto l0_%=; \
exit; \
l0_%=: call bounded_recursion__1; \
exit; \
" ::: __clobber_all);
}
SEC("tracepoint")
__description("infinite loop in two jumps")
__failure __msg("loop detected")
__naked void infinite_loop_in_two_jumps(void)
{
asm volatile (" \
r0 = 0; \
l1_%=: goto l0_%=; \
l0_%=: if r0 < 4 goto l1_%=; \
exit; \
" ::: __clobber_all);
}
SEC("tracepoint")
__description("infinite loop: three-jump trick")
__failure __msg("loop detected")
__naked void infinite_loop_three_jump_trick(void)
{
asm volatile (" \
r0 = 0; \
l2_%=: r0 += 1; \
r0 &= 1; \
if r0 < 2 goto l0_%=; \
exit; \
l0_%=: r0 += 1; \
r0 &= 1; \
if r0 < 2 goto l1_%=; \
exit; \
l1_%=: r0 += 1; \
r0 &= 1; \
if r0 < 2 goto l2_%=; \
exit; \
" ::: __clobber_all);
}
SEC("xdp")
__description("not-taken loop with back jump to 1st insn")
__success __retval(123)
__naked void back_jump_to_1st_insn_1(void)
{
asm volatile (" \
l0_%=: r0 = 123; \
if r0 == 4 goto l0_%=; \
exit; \
" ::: __clobber_all);
}
SEC("xdp")
__description("taken loop with back jump to 1st insn")
__success __retval(55)
__naked void back_jump_to_1st_insn_2(void)
{
asm volatile (" \
r1 = 10; \
r2 = 0; \
call back_jump_to_1st_insn_2__1; \
exit; \
" ::: __clobber_all);
}
static __naked __noinline __attribute__((used))
void back_jump_to_1st_insn_2__1(void)
{
asm volatile (" \
l0_%=: r2 += r1; \
r1 -= 1; \
if r1 != 0 goto l0_%=; \
r0 = r2; \
exit; \
" ::: __clobber_all);
}
SEC("xdp")
__description("taken loop with back jump to 1st insn, 2")
__success __retval(55)
__naked void jump_to_1st_insn_2(void)
{
asm volatile (" \
r1 = 10; \
r2 = 0; \
call jump_to_1st_insn_2__1; \
exit; \
" ::: __clobber_all);
}
static __naked __noinline __attribute__((used))
void jump_to_1st_insn_2__1(void)
{
asm volatile (" \
l0_%=: r2 += r1; \
r1 -= 1; \
if w1 != 0 goto l0_%=; \
r0 = r2; \
exit; \
" ::: __clobber_all);
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/verifier_loops1.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2021 Facebook */
#include <linux/stddef.h>
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
#include <../../../tools/include/linux/filter.h>
#include <linux/btf.h>
char _license[] SEC("license") = "GPL";
struct args {
__u64 log_buf;
__u32 log_size;
int max_entries;
int map_fd;
int prog_fd;
int btf_fd;
};
#define BTF_INFO_ENC(kind, kind_flag, vlen) \
((!!(kind_flag) << 31) | ((kind) << 24) | ((vlen) & BTF_MAX_VLEN))
#define BTF_TYPE_ENC(name, info, size_or_type) (name), (info), (size_or_type)
#define BTF_INT_ENC(encoding, bits_offset, nr_bits) \
((encoding) << 24 | (bits_offset) << 16 | (nr_bits))
#define BTF_TYPE_INT_ENC(name, encoding, bits_offset, bits, sz) \
BTF_TYPE_ENC(name, BTF_INFO_ENC(BTF_KIND_INT, 0, 0), sz), \
BTF_INT_ENC(encoding, bits_offset, bits)
static int btf_load(void)
{
struct btf_blob {
struct btf_header btf_hdr;
__u32 types[8];
__u32 str;
} raw_btf = {
.btf_hdr = {
.magic = BTF_MAGIC,
.version = BTF_VERSION,
.hdr_len = sizeof(struct btf_header),
.type_len = sizeof(__u32) * 8,
.str_off = sizeof(__u32) * 8,
.str_len = sizeof(__u32),
},
.types = {
/* long */
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 64, 8), /* [1] */
/* unsigned long */
BTF_TYPE_INT_ENC(0, 0, 0, 64, 8), /* [2] */
},
};
static union bpf_attr btf_load_attr = {
.btf_size = sizeof(raw_btf),
};
btf_load_attr.btf = (long)&raw_btf;
return bpf_sys_bpf(BPF_BTF_LOAD, &btf_load_attr, sizeof(btf_load_attr));
}
SEC("syscall")
int bpf_prog(struct args *ctx)
{
static char license[] = "GPL";
static struct bpf_insn insns[] = {
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
};
static union bpf_attr map_create_attr = {
.map_type = BPF_MAP_TYPE_HASH,
.key_size = 8,
.value_size = 8,
.btf_key_type_id = 1,
.btf_value_type_id = 2,
};
static union bpf_attr map_update_attr = { .map_fd = 1, };
static __u64 key = 12;
static __u64 value = 34;
static union bpf_attr prog_load_attr = {
.prog_type = BPF_PROG_TYPE_XDP,
.insn_cnt = sizeof(insns) / sizeof(insns[0]),
};
int ret;
ret = btf_load();
if (ret <= 0)
return ret;
ctx->btf_fd = ret;
map_create_attr.max_entries = ctx->max_entries;
map_create_attr.btf_fd = ret;
prog_load_attr.license = (long) license;
prog_load_attr.insns = (long) insns;
prog_load_attr.log_buf = ctx->log_buf;
prog_load_attr.log_size = ctx->log_size;
prog_load_attr.log_level = 1;
ret = bpf_sys_bpf(BPF_MAP_CREATE, &map_create_attr, sizeof(map_create_attr));
if (ret <= 0)
return ret;
ctx->map_fd = ret;
insns[3].imm = ret;
map_update_attr.map_fd = ret;
map_update_attr.key = (long) &key;
map_update_attr.value = (long) &value;
ret = bpf_sys_bpf(BPF_MAP_UPDATE_ELEM, &map_update_attr, sizeof(map_update_attr));
if (ret < 0)
return ret;
ret = bpf_sys_bpf(BPF_PROG_LOAD, &prog_load_attr, sizeof(prog_load_attr));
if (ret <= 0)
return ret;
ctx->prog_fd = ret;
return 1;
}
| linux-master | tools/testing/selftests/bpf/progs/syscall.c |
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2020 Facebook
#include <linux/ptrace.h>
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
#define VAR_NUM 2
struct hmap_elem {
struct bpf_spin_lock lock;
int var[VAR_NUM];
};
struct {
__uint(type, BPF_MAP_TYPE_HASH);
__uint(max_entries, 1);
__type(key, __u32);
__type(value, struct hmap_elem);
} hash_map SEC(".maps");
SEC("freplace/handle_kprobe")
int new_handle_kprobe(struct pt_regs *ctx)
{
struct hmap_elem *val;
int key = 0;
val = bpf_map_lookup_elem(&hash_map, &key);
if (!val)
return 1;
/* spin_lock in hash map */
bpf_spin_lock(&val->lock);
val->var[0] = 99;
bpf_spin_unlock(&val->lock);
return 0;
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/freplace_attach_probe.c |
/* Copyright (c) 2017 VMware
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of version 2 of the GNU General Public
* License as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*/
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
SEC("redirect_to_111")
int xdp_redirect_to_111(struct xdp_md *xdp)
{
return bpf_redirect(111, 0);
}
SEC("redirect_to_222")
int xdp_redirect_to_222(struct xdp_md *xdp)
{
return bpf_redirect(222, 0);
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/test_xdp_redirect.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2021 Facebook */
#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
#include <bpf/bpf_core_read.h>
#include "bpf_misc.h"
/* weak and shared between two files */
const volatile int my_tid __weak;
long syscall_id __weak;
int output_val1;
int output_ctx1;
int output_weak1;
/* same "subprog" name in all files, but it's ok because they all are static */
static __noinline int subprog(int x)
{
/* but different formula */
return x * 1;
}
/* Global functions can't be void */
int set_output_val1(int x)
{
output_val1 = x + subprog(x);
return x;
}
/* This function can't be verified as global, as it assumes raw_tp/sys_enter
* context and accesses syscall id (second argument). So we mark it as
* __hidden, so that libbpf will mark it as static in the final object file,
* right before verifying it in the kernel.
*
* But we don't mark it as __hidden here, rather at extern site. __hidden is
* "contaminating" visibility, so it will get propagated from either extern or
* actual definition (including from the losing __weak definition).
*/
void set_output_ctx1(__u64 *ctx)
{
output_ctx1 = ctx[1]; /* long id, same as in BPF_PROG below */
}
/* this weak instance should win because it's the first one */
__weak int set_output_weak(int x)
{
static volatile int whatever;
/* make sure we use CO-RE relocations in a weak function, this used to
* cause problems for BPF static linker
*/
whatever = bpf_core_type_size(struct task_struct);
__sink(whatever);
output_weak1 = x;
return x;
}
extern int set_output_val2(int x);
/* here we'll force set_output_ctx2() to be __hidden in the final obj file */
__hidden extern void set_output_ctx2(__u64 *ctx);
SEC("?raw_tp/sys_enter")
int BPF_PROG(handler1, struct pt_regs *regs, long id)
{
static volatile int whatever;
if (my_tid != (u32)bpf_get_current_pid_tgid() || id != syscall_id)
return 0;
/* make sure we have CO-RE relocations in main program */
whatever = bpf_core_type_size(struct task_struct);
__sink(whatever);
set_output_val2(1000);
set_output_ctx2(ctx); /* ctx definition is hidden in BPF_PROG macro */
/* keep input value the same across both files to avoid dependency on
* handler call order; differentiate by output_weak1 vs output_weak2.
*/
set_output_weak(42);
return 0;
}
char LICENSE[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/linked_funcs1.c |
// SPDX-License-Identifier: GPL-2.0
/* Converted from tools/testing/selftests/bpf/verifier/ctx_sk_msg.c */
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include "bpf_misc.h"
SEC("sk_msg")
__description("valid access family in SK_MSG")
__success
__naked void access_family_in_sk_msg(void)
{
asm volatile (" \
r0 = *(u32*)(r1 + %[sk_msg_md_family]); \
exit; \
" :
: __imm_const(sk_msg_md_family, offsetof(struct sk_msg_md, family))
: __clobber_all);
}
SEC("sk_msg")
__description("valid access remote_ip4 in SK_MSG")
__success
__naked void remote_ip4_in_sk_msg(void)
{
asm volatile (" \
r0 = *(u32*)(r1 + %[sk_msg_md_remote_ip4]); \
exit; \
" :
: __imm_const(sk_msg_md_remote_ip4, offsetof(struct sk_msg_md, remote_ip4))
: __clobber_all);
}
SEC("sk_msg")
__description("valid access local_ip4 in SK_MSG")
__success
__naked void local_ip4_in_sk_msg(void)
{
asm volatile (" \
r0 = *(u32*)(r1 + %[sk_msg_md_local_ip4]); \
exit; \
" :
: __imm_const(sk_msg_md_local_ip4, offsetof(struct sk_msg_md, local_ip4))
: __clobber_all);
}
SEC("sk_msg")
__description("valid access remote_port in SK_MSG")
__success
__naked void remote_port_in_sk_msg(void)
{
asm volatile (" \
r0 = *(u32*)(r1 + %[sk_msg_md_remote_port]); \
exit; \
" :
: __imm_const(sk_msg_md_remote_port, offsetof(struct sk_msg_md, remote_port))
: __clobber_all);
}
SEC("sk_msg")
__description("valid access local_port in SK_MSG")
__success
__naked void local_port_in_sk_msg(void)
{
asm volatile (" \
r0 = *(u32*)(r1 + %[sk_msg_md_local_port]); \
exit; \
" :
: __imm_const(sk_msg_md_local_port, offsetof(struct sk_msg_md, local_port))
: __clobber_all);
}
SEC("sk_skb")
__description("valid access remote_ip6 in SK_MSG")
__success
__naked void remote_ip6_in_sk_msg(void)
{
asm volatile (" \
r0 = *(u32*)(r1 + %[sk_msg_md_remote_ip6_0]); \
r0 = *(u32*)(r1 + %[sk_msg_md_remote_ip6_1]); \
r0 = *(u32*)(r1 + %[sk_msg_md_remote_ip6_2]); \
r0 = *(u32*)(r1 + %[sk_msg_md_remote_ip6_3]); \
exit; \
" :
: __imm_const(sk_msg_md_remote_ip6_0, offsetof(struct sk_msg_md, remote_ip6[0])),
__imm_const(sk_msg_md_remote_ip6_1, offsetof(struct sk_msg_md, remote_ip6[1])),
__imm_const(sk_msg_md_remote_ip6_2, offsetof(struct sk_msg_md, remote_ip6[2])),
__imm_const(sk_msg_md_remote_ip6_3, offsetof(struct sk_msg_md, remote_ip6[3]))
: __clobber_all);
}
SEC("sk_skb")
__description("valid access local_ip6 in SK_MSG")
__success
__naked void local_ip6_in_sk_msg(void)
{
asm volatile (" \
r0 = *(u32*)(r1 + %[sk_msg_md_local_ip6_0]); \
r0 = *(u32*)(r1 + %[sk_msg_md_local_ip6_1]); \
r0 = *(u32*)(r1 + %[sk_msg_md_local_ip6_2]); \
r0 = *(u32*)(r1 + %[sk_msg_md_local_ip6_3]); \
exit; \
" :
: __imm_const(sk_msg_md_local_ip6_0, offsetof(struct sk_msg_md, local_ip6[0])),
__imm_const(sk_msg_md_local_ip6_1, offsetof(struct sk_msg_md, local_ip6[1])),
__imm_const(sk_msg_md_local_ip6_2, offsetof(struct sk_msg_md, local_ip6[2])),
__imm_const(sk_msg_md_local_ip6_3, offsetof(struct sk_msg_md, local_ip6[3]))
: __clobber_all);
}
SEC("sk_msg")
__description("valid access size in SK_MSG")
__success
__naked void access_size_in_sk_msg(void)
{
asm volatile (" \
r0 = *(u32*)(r1 + %[sk_msg_md_size]); \
exit; \
" :
: __imm_const(sk_msg_md_size, offsetof(struct sk_msg_md, size))
: __clobber_all);
}
SEC("sk_msg")
__description("invalid 64B read of size in SK_MSG")
__failure __msg("invalid bpf_context access")
__flag(BPF_F_ANY_ALIGNMENT)
__naked void of_size_in_sk_msg(void)
{
asm volatile (" \
r2 = *(u64*)(r1 + %[sk_msg_md_size]); \
exit; \
" :
: __imm_const(sk_msg_md_size, offsetof(struct sk_msg_md, size))
: __clobber_all);
}
SEC("sk_msg")
__description("invalid read past end of SK_MSG")
__failure __msg("invalid bpf_context access")
__naked void past_end_of_sk_msg(void)
{
asm volatile (" \
r2 = *(u32*)(r1 + %[__imm_0]); \
exit; \
" :
: __imm_const(__imm_0, offsetof(struct sk_msg_md, size) + 4)
: __clobber_all);
}
SEC("sk_msg")
__description("invalid read offset in SK_MSG")
__failure __msg("invalid bpf_context access")
__flag(BPF_F_ANY_ALIGNMENT)
__naked void read_offset_in_sk_msg(void)
{
asm volatile (" \
r2 = *(u32*)(r1 + %[__imm_0]); \
exit; \
" :
: __imm_const(__imm_0, offsetof(struct sk_msg_md, family) + 1)
: __clobber_all);
}
SEC("sk_msg")
__description("direct packet read for SK_MSG")
__success
__naked void packet_read_for_sk_msg(void)
{
asm volatile (" \
r2 = *(u64*)(r1 + %[sk_msg_md_data]); \
r3 = *(u64*)(r1 + %[sk_msg_md_data_end]); \
r0 = r2; \
r0 += 8; \
if r0 > r3 goto l0_%=; \
r0 = *(u8*)(r2 + 0); \
l0_%=: r0 = 0; \
exit; \
" :
: __imm_const(sk_msg_md_data, offsetof(struct sk_msg_md, data)),
__imm_const(sk_msg_md_data_end, offsetof(struct sk_msg_md, data_end))
: __clobber_all);
}
SEC("sk_msg")
__description("direct packet write for SK_MSG")
__success
__naked void packet_write_for_sk_msg(void)
{
asm volatile (" \
r2 = *(u64*)(r1 + %[sk_msg_md_data]); \
r3 = *(u64*)(r1 + %[sk_msg_md_data_end]); \
r0 = r2; \
r0 += 8; \
if r0 > r3 goto l0_%=; \
*(u8*)(r2 + 0) = r2; \
l0_%=: r0 = 0; \
exit; \
" :
: __imm_const(sk_msg_md_data, offsetof(struct sk_msg_md, data)),
__imm_const(sk_msg_md_data_end, offsetof(struct sk_msg_md, data_end))
: __clobber_all);
}
SEC("sk_msg")
__description("overlapping checks for direct packet access SK_MSG")
__success
__naked void direct_packet_access_sk_msg(void)
{
asm volatile (" \
r2 = *(u64*)(r1 + %[sk_msg_md_data]); \
r3 = *(u64*)(r1 + %[sk_msg_md_data_end]); \
r0 = r2; \
r0 += 8; \
if r0 > r3 goto l0_%=; \
r1 = r2; \
r1 += 6; \
if r1 > r3 goto l0_%=; \
r0 = *(u16*)(r2 + 6); \
l0_%=: r0 = 0; \
exit; \
" :
: __imm_const(sk_msg_md_data, offsetof(struct sk_msg_md, data)),
__imm_const(sk_msg_md_data_end, offsetof(struct sk_msg_md, data_end))
: __clobber_all);
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/verifier_ctx_sk_msg.c |
// SPDX-License-Identifier: GPL-2.0
#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
__u64 fentry_called = 0;
SEC("fentry/test_pkt_md_access_new")
int BPF_PROG(fentry, struct sk_buff *skb)
{
fentry_called = skb->len;
return 0;
}
__u64 fexit_called = 0;
SEC("fexit/test_pkt_md_access_new")
int BPF_PROG(fexit, struct sk_buff *skb)
{
fexit_called = skb->len;
return 0;
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/test_trace_ext_tracing.c |
// SPDX-License-Identifier: GPL-2.0
/* Converted from tools/testing/selftests/bpf/verifier/cgroup_inv_retcode.c */
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include "bpf_misc.h"
SEC("cgroup/sock")
__description("bpf_exit with invalid return code. test1")
__failure __msg("R0 has value (0x0; 0xffffffff)")
__naked void with_invalid_return_code_test1(void)
{
asm volatile (" \
r0 = *(u32*)(r1 + 0); \
exit; \
" ::: __clobber_all);
}
SEC("cgroup/sock")
__description("bpf_exit with invalid return code. test2")
__success
__naked void with_invalid_return_code_test2(void)
{
asm volatile (" \
r0 = *(u32*)(r1 + 0); \
r0 &= 1; \
exit; \
" ::: __clobber_all);
}
SEC("cgroup/sock")
__description("bpf_exit with invalid return code. test3")
__failure __msg("R0 has value (0x0; 0x3)")
__naked void with_invalid_return_code_test3(void)
{
asm volatile (" \
r0 = *(u32*)(r1 + 0); \
r0 &= 3; \
exit; \
" ::: __clobber_all);
}
SEC("cgroup/sock")
__description("bpf_exit with invalid return code. test4")
__success
__naked void with_invalid_return_code_test4(void)
{
asm volatile (" \
r0 = 1; \
exit; \
" ::: __clobber_all);
}
SEC("cgroup/sock")
__description("bpf_exit with invalid return code. test5")
__failure __msg("R0 has value (0x2; 0x0)")
__naked void with_invalid_return_code_test5(void)
{
asm volatile (" \
r0 = 2; \
exit; \
" ::: __clobber_all);
}
SEC("cgroup/sock")
__description("bpf_exit with invalid return code. test6")
__failure __msg("R0 is not a known value (ctx)")
__naked void with_invalid_return_code_test6(void)
{
asm volatile (" \
r0 = r1; \
exit; \
" ::: __clobber_all);
}
SEC("cgroup/sock")
__description("bpf_exit with invalid return code. test7")
__failure __msg("R0 has unknown scalar value")
__naked void with_invalid_return_code_test7(void)
{
asm volatile (" \
r0 = *(u32*)(r1 + 0); \
r2 = *(u32*)(r1 + 4); \
r0 *= r2; \
exit; \
" ::: __clobber_all);
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/verifier_cgroup_inv_retcode.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
#include <errno.h>
#include <string.h>
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include "bpf_misc.h"
char _license[] SEC("license") = "GPL";
#define ITER_HELPERS \
__imm(bpf_iter_num_new), \
__imm(bpf_iter_num_next), \
__imm(bpf_iter_num_destroy)
SEC("?raw_tp")
__success
int force_clang_to_emit_btf_for_externs(void *ctx)
{
/* we need this as a workaround to enforce compiler emitting BTF
* information for bpf_iter_num_{new,next,destroy}() kfuncs,
* as, apparently, it doesn't emit it for symbols only referenced from
* assembly (or cleanup attribute, for that matter, as well)
*/
bpf_repeat(0);
return 0;
}
SEC("?raw_tp")
__success
int consume_first_item_only(void *ctx)
{
struct bpf_iter_num iter;
asm volatile (
/* create iterator */
"r1 = %[iter];"
"r2 = 0;"
"r3 = 1000;"
"call %[bpf_iter_num_new];"
/* consume first item */
"r1 = %[iter];"
"call %[bpf_iter_num_next];"
"if r0 == 0 goto +1;"
"r0 = *(u32 *)(r0 + 0);"
/* destroy iterator */
"r1 = %[iter];"
"call %[bpf_iter_num_destroy];"
:
: __imm_ptr(iter), ITER_HELPERS
: __clobber_common
);
return 0;
}
SEC("?raw_tp")
__failure __msg("R0 invalid mem access 'scalar'")
int missing_null_check_fail(void *ctx)
{
struct bpf_iter_num iter;
asm volatile (
/* create iterator */
"r1 = %[iter];"
"r2 = 0;"
"r3 = 1000;"
"call %[bpf_iter_num_new];"
/* consume first element */
"r1 = %[iter];"
"call %[bpf_iter_num_next];"
/* FAIL: deref with no NULL check */
"r1 = *(u32 *)(r0 + 0);"
/* destroy iterator */
"r1 = %[iter];"
"call %[bpf_iter_num_destroy];"
:
: __imm_ptr(iter), ITER_HELPERS
: __clobber_common
);
return 0;
}
SEC("?raw_tp")
__failure
__msg("invalid access to memory, mem_size=4 off=0 size=8")
__msg("R0 min value is outside of the allowed memory range")
int wrong_sized_read_fail(void *ctx)
{
struct bpf_iter_num iter;
asm volatile (
/* create iterator */
"r1 = %[iter];"
"r2 = 0;"
"r3 = 1000;"
"call %[bpf_iter_num_new];"
/* consume first element */
"r1 = %[iter];"
"call %[bpf_iter_num_next];"
"if r0 == 0 goto +1;"
/* FAIL: deref more than available 4 bytes */
"r0 = *(u64 *)(r0 + 0);"
/* destroy iterator */
"r1 = %[iter];"
"call %[bpf_iter_num_destroy];"
:
: __imm_ptr(iter), ITER_HELPERS
: __clobber_common
);
return 0;
}
SEC("?raw_tp")
__success __log_level(2)
__flag(BPF_F_TEST_STATE_FREQ)
int simplest_loop(void *ctx)
{
struct bpf_iter_num iter;
asm volatile (
"r6 = 0;" /* init sum */
/* create iterator */
"r1 = %[iter];"
"r2 = 0;"
"r3 = 10;"
"call %[bpf_iter_num_new];"
"1:"
/* consume next item */
"r1 = %[iter];"
"call %[bpf_iter_num_next];"
"if r0 == 0 goto 2f;"
"r0 = *(u32 *)(r0 + 0);"
"r6 += r0;" /* accumulate sum */
"goto 1b;"
"2:"
/* destroy iterator */
"r1 = %[iter];"
"call %[bpf_iter_num_destroy];"
:
: __imm_ptr(iter), ITER_HELPERS
: __clobber_common, "r6"
);
return 0;
}
| linux-master | tools/testing/selftests/bpf/progs/iters_looping.c |
#include "core_reloc_types.h"
void f(struct core_reloc_enumval___val3_missing x) {}
| linux-master | tools/testing/selftests/bpf/progs/btf__core_reloc_enumval___val3_missing.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2023 Isovalent */
#include <stdbool.h>
#include <linux/bpf.h>
#include <linux/if_ether.h>
#include <linux/in.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <linux/tcp.h>
#include <linux/udp.h>
#include <bpf/bpf_endian.h>
#include <bpf/bpf_helpers.h>
#include <linux/pkt_cls.h>
char LICENSE[] SEC("license") = "GPL";
__u64 sk_cookie_seen;
__u64 reuseport_executed;
union {
struct tcphdr tcp;
struct udphdr udp;
} headers;
const volatile __u16 dest_port;
struct {
__uint(type, BPF_MAP_TYPE_SOCKMAP);
__uint(max_entries, 1);
__type(key, __u32);
__type(value, __u64);
} sk_map SEC(".maps");
SEC("sk_reuseport")
int reuse_accept(struct sk_reuseport_md *ctx)
{
reuseport_executed++;
if (ctx->ip_protocol == IPPROTO_TCP) {
if (ctx->data + sizeof(headers.tcp) > ctx->data_end)
return SK_DROP;
if (__builtin_memcmp(&headers.tcp, ctx->data, sizeof(headers.tcp)) != 0)
return SK_DROP;
} else if (ctx->ip_protocol == IPPROTO_UDP) {
if (ctx->data + sizeof(headers.udp) > ctx->data_end)
return SK_DROP;
if (__builtin_memcmp(&headers.udp, ctx->data, sizeof(headers.udp)) != 0)
return SK_DROP;
} else {
return SK_DROP;
}
sk_cookie_seen = bpf_get_socket_cookie(ctx->sk);
return SK_PASS;
}
SEC("sk_reuseport")
int reuse_drop(struct sk_reuseport_md *ctx)
{
reuseport_executed++;
sk_cookie_seen = 0;
return SK_DROP;
}
static int
assign_sk(struct __sk_buff *skb)
{
int zero = 0, ret = 0;
struct bpf_sock *sk;
sk = bpf_map_lookup_elem(&sk_map, &zero);
if (!sk)
return TC_ACT_SHOT;
ret = bpf_sk_assign(skb, sk, 0);
bpf_sk_release(sk);
return ret ? TC_ACT_SHOT : TC_ACT_OK;
}
static bool
maybe_assign_tcp(struct __sk_buff *skb, struct tcphdr *th)
{
if (th + 1 > (void *)(long)(skb->data_end))
return TC_ACT_SHOT;
if (!th->syn || th->ack || th->dest != bpf_htons(dest_port))
return TC_ACT_OK;
__builtin_memcpy(&headers.tcp, th, sizeof(headers.tcp));
return assign_sk(skb);
}
static bool
maybe_assign_udp(struct __sk_buff *skb, struct udphdr *uh)
{
if (uh + 1 > (void *)(long)(skb->data_end))
return TC_ACT_SHOT;
if (uh->dest != bpf_htons(dest_port))
return TC_ACT_OK;
__builtin_memcpy(&headers.udp, uh, sizeof(headers.udp));
return assign_sk(skb);
}
SEC("tc")
int tc_main(struct __sk_buff *skb)
{
void *data_end = (void *)(long)skb->data_end;
void *data = (void *)(long)skb->data;
struct ethhdr *eth;
eth = (struct ethhdr *)(data);
if (eth + 1 > data_end)
return TC_ACT_SHOT;
if (eth->h_proto == bpf_htons(ETH_P_IP)) {
struct iphdr *iph = (struct iphdr *)(data + sizeof(*eth));
if (iph + 1 > data_end)
return TC_ACT_SHOT;
if (iph->protocol == IPPROTO_TCP)
return maybe_assign_tcp(skb, (struct tcphdr *)(iph + 1));
else if (iph->protocol == IPPROTO_UDP)
return maybe_assign_udp(skb, (struct udphdr *)(iph + 1));
else
return TC_ACT_SHOT;
} else {
struct ipv6hdr *ip6h = (struct ipv6hdr *)(data + sizeof(*eth));
if (ip6h + 1 > data_end)
return TC_ACT_SHOT;
if (ip6h->nexthdr == IPPROTO_TCP)
return maybe_assign_tcp(skb, (struct tcphdr *)(ip6h + 1));
else if (ip6h->nexthdr == IPPROTO_UDP)
return maybe_assign_udp(skb, (struct udphdr *)(ip6h + 1));
else
return TC_ACT_SHOT;
}
}
| linux-master | tools/testing/selftests/bpf/progs/test_assign_reuse.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2023 Tencent */
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
char _license[] SEC("license") = "GPL";
__u64 test1_result = 0;
SEC("fentry/bpf_testmod_fentry_test7")
int BPF_PROG(test1, __u64 a, void *b, short c, int d, void *e, char f,
int g)
{
test1_result = a == 16 && b == (void *)17 && c == 18 && d == 19 &&
e == (void *)20 && f == 21 && g == 22;
return 0;
}
__u64 test2_result = 0;
SEC("fentry/bpf_testmod_fentry_test11")
int BPF_PROG(test2, __u64 a, void *b, short c, int d, void *e, char f,
int g, unsigned int h, long i, __u64 j, unsigned long k)
{
test2_result = a == 16 && b == (void *)17 && c == 18 && d == 19 &&
e == (void *)20 && f == 21 && g == 22 && h == 23 &&
i == 24 && j == 25 && k == 26;
return 0;
}
__u64 test3_result = 0;
SEC("fentry/bpf_testmod_fentry_test11")
int BPF_PROG(test3, __u64 a, __u64 b, __u64 c, __u64 d, __u64 e, __u64 f,
__u64 g, __u64 h, __u64 i, __u64 j, __u64 k)
{
test3_result = a == 16 && b == 17 && c == 18 && d == 19 &&
e == 20 && f == 21 && g == 22 && h == 23 &&
i == 24 && j == 25 && k == 26;
return 0;
}
| linux-master | tools/testing/selftests/bpf/progs/fentry_many_args.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/bpf.h>
#include <bpf/bpf_endian.h>
#include <bpf/bpf_helpers.h>
#include <linux/ip.h>
#include "bpf_tracing_net.h"
/* We don't care about whether the packet can be received by network stack.
* Just care if the packet is sent to the correct device at correct direction
* and not panic the kernel.
*/
static int prepend_dummy_mac(struct __sk_buff *skb)
{
char mac[] = {0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0xf,
0xe, 0xd, 0xc, 0xb, 0xa, 0x08, 0x00};
if (bpf_skb_change_head(skb, ETH_HLEN, 0))
return -1;
if (bpf_skb_store_bytes(skb, 0, mac, sizeof(mac), 0))
return -1;
return 0;
}
/* Use the last byte of IP address to redirect the packet */
static int get_redirect_target(struct __sk_buff *skb)
{
struct iphdr *iph = NULL;
void *start = (void *)(long)skb->data;
void *end = (void *)(long)skb->data_end;
if (start + sizeof(*iph) > end)
return -1;
iph = (struct iphdr *)start;
return bpf_ntohl(iph->daddr) & 0xff;
}
SEC("redir_ingress")
int test_lwt_redirect_in(struct __sk_buff *skb)
{
int target = get_redirect_target(skb);
if (target < 0)
return BPF_OK;
if (prepend_dummy_mac(skb))
return BPF_DROP;
return bpf_redirect(target, BPF_F_INGRESS);
}
SEC("redir_egress")
int test_lwt_redirect_out(struct __sk_buff *skb)
{
int target = get_redirect_target(skb);
if (target < 0)
return BPF_OK;
if (prepend_dummy_mac(skb))
return BPF_DROP;
return bpf_redirect(target, 0);
}
SEC("redir_egress_nomac")
int test_lwt_redirect_out_nomac(struct __sk_buff *skb)
{
int target = get_redirect_target(skb);
if (target < 0)
return BPF_OK;
return bpf_redirect(target, 0);
}
SEC("redir_ingress_nomac")
int test_lwt_redirect_in_nomac(struct __sk_buff *skb)
{
int target = get_redirect_target(skb);
if (target < 0)
return BPF_OK;
return bpf_redirect(target, BPF_F_INGRESS);
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/test_lwt_redirect.c |
#include "core_reloc_types.h"
void f(struct core_reloc_existence___wrong_field_defs x) {}
| linux-master | tools/testing/selftests/bpf/progs/btf__core_reloc_existence___wrong_field_defs.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
char _license[] SEC("license") = "GPL";
/* rodata section */
const volatile pid_t pid;
const volatile size_t bss_array_len;
const volatile size_t data_array_len;
/* bss section */
int sum = 0;
int array[1];
/* custom data secton */
int my_array[1] SEC(".data.custom");
/* custom data section which should NOT be resizable,
* since it contains a single var which is not an array
*/
int my_int SEC(".data.non_array");
/* custom data section which should NOT be resizable,
* since its last var is not an array
*/
int my_array_first[1] SEC(".data.array_not_last");
int my_int_last SEC(".data.array_not_last");
int percpu_arr[1] SEC(".data.percpu_arr");
SEC("tp/syscalls/sys_enter_getpid")
int bss_array_sum(void *ctx)
{
if (pid != (bpf_get_current_pid_tgid() >> 32))
return 0;
/* this will be zero, we just rely on verifier not rejecting this */
sum = percpu_arr[bpf_get_smp_processor_id()];
for (size_t i = 0; i < bss_array_len; ++i)
sum += array[i];
return 0;
}
SEC("tp/syscalls/sys_enter_getuid")
int data_array_sum(void *ctx)
{
if (pid != (bpf_get_current_pid_tgid() >> 32))
return 0;
/* this will be zero, we just rely on verifier not rejecting this */
sum = percpu_arr[bpf_get_smp_processor_id()];
for (size_t i = 0; i < data_array_len; ++i)
sum += my_array[i];
return 0;
}
| linux-master | tools/testing/selftests/bpf/progs/test_global_map_resize.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
#include <vmlinux.h>
#include <bpf/bpf_tracing.h>
#include <bpf/bpf_helpers.h>
#include "task_kfunc_common.h"
char _license[] SEC("license") = "GPL";
int err, pid;
/* Prototype for all of the program trace events below:
*
* TRACE_EVENT(task_newtask,
* TP_PROTO(struct task_struct *p, u64 clone_flags)
*/
struct task_struct *bpf_task_acquire(struct task_struct *p) __ksym __weak;
struct task_struct *bpf_task_acquire___one(struct task_struct *task) __ksym __weak;
/* The two-param bpf_task_acquire doesn't exist */
struct task_struct *bpf_task_acquire___two(struct task_struct *p, void *ctx) __ksym __weak;
/* Incorrect type for first param */
struct task_struct *bpf_task_acquire___three(void *ctx) __ksym __weak;
void invalid_kfunc(void) __ksym __weak;
void bpf_testmod_test_mod_kfunc(int i) __ksym __weak;
static bool is_test_kfunc_task(void)
{
int cur_pid = bpf_get_current_pid_tgid() >> 32;
return pid == cur_pid;
}
static int test_acquire_release(struct task_struct *task)
{
struct task_struct *acquired = NULL;
if (!bpf_ksym_exists(bpf_task_acquire)) {
err = 3;
return 0;
}
if (!bpf_ksym_exists(bpf_testmod_test_mod_kfunc)) {
err = 4;
return 0;
}
if (bpf_ksym_exists(invalid_kfunc)) {
/* the verifier's dead code elimination should remove this */
err = 5;
asm volatile ("goto -1"); /* for (;;); */
}
acquired = bpf_task_acquire(task);
if (acquired)
bpf_task_release(acquired);
else
err = 6;
return 0;
}
SEC("tp_btf/task_newtask")
int BPF_PROG(test_task_kfunc_flavor_relo, struct task_struct *task, u64 clone_flags)
{
struct task_struct *acquired = NULL;
int fake_ctx = 42;
if (bpf_ksym_exists(bpf_task_acquire___one)) {
acquired = bpf_task_acquire___one(task);
} else if (bpf_ksym_exists(bpf_task_acquire___two)) {
/* Here, bpf_object__resolve_ksym_func_btf_id's find_ksym_btf_id
* call will find vmlinux's bpf_task_acquire, but subsequent
* bpf_core_types_are_compat will fail
*/
acquired = bpf_task_acquire___two(task, &fake_ctx);
err = 3;
return 0;
} else if (bpf_ksym_exists(bpf_task_acquire___three)) {
/* bpf_core_types_are_compat will fail similarly to above case */
acquired = bpf_task_acquire___three(&fake_ctx);
err = 4;
return 0;
}
if (acquired)
bpf_task_release(acquired);
else
err = 5;
return 0;
}
SEC("tp_btf/task_newtask")
int BPF_PROG(test_task_kfunc_flavor_relo_not_found, struct task_struct *task, u64 clone_flags)
{
/* Neither symbol should successfully resolve.
* Success or failure of one ___flavor should not affect others
*/
if (bpf_ksym_exists(bpf_task_acquire___two))
err = 1;
else if (bpf_ksym_exists(bpf_task_acquire___three))
err = 2;
return 0;
}
SEC("tp_btf/task_newtask")
int BPF_PROG(test_task_acquire_release_argument, struct task_struct *task, u64 clone_flags)
{
if (!is_test_kfunc_task())
return 0;
return test_acquire_release(task);
}
SEC("tp_btf/task_newtask")
int BPF_PROG(test_task_acquire_release_current, struct task_struct *task, u64 clone_flags)
{
if (!is_test_kfunc_task())
return 0;
return test_acquire_release(bpf_get_current_task_btf());
}
SEC("tp_btf/task_newtask")
int BPF_PROG(test_task_acquire_leave_in_map, struct task_struct *task, u64 clone_flags)
{
long status;
if (!is_test_kfunc_task())
return 0;
status = tasks_kfunc_map_insert(task);
if (status)
err = 1;
return 0;
}
SEC("tp_btf/task_newtask")
int BPF_PROG(test_task_xchg_release, struct task_struct *task, u64 clone_flags)
{
struct task_struct *kptr;
struct __tasks_kfunc_map_value *v;
long status;
if (!is_test_kfunc_task())
return 0;
status = tasks_kfunc_map_insert(task);
if (status) {
err = 1;
return 0;
}
v = tasks_kfunc_map_value_lookup(task);
if (!v) {
err = 2;
return 0;
}
kptr = bpf_kptr_xchg(&v->task, NULL);
if (!kptr) {
err = 3;
return 0;
}
bpf_task_release(kptr);
return 0;
}
SEC("tp_btf/task_newtask")
int BPF_PROG(test_task_map_acquire_release, struct task_struct *task, u64 clone_flags)
{
struct task_struct *kptr;
struct __tasks_kfunc_map_value *v;
long status;
if (!is_test_kfunc_task())
return 0;
status = tasks_kfunc_map_insert(task);
if (status) {
err = 1;
return 0;
}
v = tasks_kfunc_map_value_lookup(task);
if (!v) {
err = 2;
return 0;
}
bpf_rcu_read_lock();
kptr = v->task;
if (!kptr) {
err = 3;
} else {
kptr = bpf_task_acquire(kptr);
if (!kptr)
err = 4;
else
bpf_task_release(kptr);
}
bpf_rcu_read_unlock();
return 0;
}
SEC("tp_btf/task_newtask")
int BPF_PROG(test_task_current_acquire_release, struct task_struct *task, u64 clone_flags)
{
struct task_struct *current, *acquired;
if (!is_test_kfunc_task())
return 0;
current = bpf_get_current_task_btf();
acquired = bpf_task_acquire(current);
if (acquired)
bpf_task_release(acquired);
else
err = 1;
return 0;
}
static void lookup_compare_pid(const struct task_struct *p)
{
struct task_struct *acquired;
acquired = bpf_task_from_pid(p->pid);
if (!acquired) {
err = 1;
return;
}
if (acquired->pid != p->pid)
err = 2;
bpf_task_release(acquired);
}
SEC("tp_btf/task_newtask")
int BPF_PROG(test_task_from_pid_arg, struct task_struct *task, u64 clone_flags)
{
if (!is_test_kfunc_task())
return 0;
lookup_compare_pid(task);
return 0;
}
SEC("tp_btf/task_newtask")
int BPF_PROG(test_task_from_pid_current, struct task_struct *task, u64 clone_flags)
{
if (!is_test_kfunc_task())
return 0;
lookup_compare_pid(bpf_get_current_task_btf());
return 0;
}
static int is_pid_lookup_valid(s32 pid)
{
struct task_struct *acquired;
acquired = bpf_task_from_pid(pid);
if (acquired) {
bpf_task_release(acquired);
return 1;
}
return 0;
}
SEC("tp_btf/task_newtask")
int BPF_PROG(test_task_from_pid_invalid, struct task_struct *task, u64 clone_flags)
{
if (!is_test_kfunc_task())
return 0;
bpf_strncmp(task->comm, 12, "foo");
bpf_strncmp(task->comm, 16, "foo");
bpf_strncmp(&task->comm[8], 4, "foo");
if (is_pid_lookup_valid(-1)) {
err = 1;
return 0;
}
if (is_pid_lookup_valid(0xcafef00d)) {
err = 2;
return 0;
}
return 0;
}
SEC("tp_btf/task_newtask")
int BPF_PROG(task_kfunc_acquire_trusted_walked, struct task_struct *task, u64 clone_flags)
{
struct task_struct *acquired;
/* task->group_leader is listed as a trusted, non-NULL field of task struct. */
acquired = bpf_task_acquire(task->group_leader);
if (acquired)
bpf_task_release(acquired);
else
err = 1;
return 0;
}
| linux-master | tools/testing/selftests/bpf/progs/task_kfunc_success.c |
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2021 Hengqi Chen */
#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
const volatile pid_t my_pid = 0;
int value = 0;
SEC("raw_tp/sys_enter")
int tailcall_1(void *ctx)
{
value = 42;
return 0;
}
struct {
__uint(type, BPF_MAP_TYPE_PROG_ARRAY);
__uint(max_entries, 2);
__uint(key_size, sizeof(__u32));
__array(values, int (void *));
} prog_array_init SEC(".maps") = {
.values = {
[1] = (void *)&tailcall_1,
},
};
SEC("raw_tp/sys_enter")
int entry(void *ctx)
{
pid_t pid = bpf_get_current_pid_tgid() >> 32;
if (pid != my_pid)
return 0;
bpf_tail_call(ctx, &prog_array_init, 1);
return 0;
}
| linux-master | tools/testing/selftests/bpf/progs/test_prog_array_init.c |
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2019 Facebook
#include <linux/bpf.h>
#include <stdint.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_core_read.h>
char _license[] SEC("license") = "GPL";
struct {
char in[256];
char out[256];
} data = {};
struct core_reloc_size_output {
int int_sz;
int int_off;
int struct_sz;
int struct_off;
int union_sz;
int union_off;
int arr_sz;
int arr_off;
int arr_elem_sz;
int arr_elem_off;
int ptr_sz;
int ptr_off;
int enum_sz;
int enum_off;
int float_sz;
int float_off;
};
struct core_reloc_size {
int int_field;
struct { int x; } struct_field;
union { int x; } union_field;
int arr_field[4];
void *ptr_field;
enum { VALUE = 123 } enum_field;
float float_field;
};
SEC("raw_tracepoint/sys_enter")
int test_core_size(void *ctx)
{
struct core_reloc_size *in = (void *)&data.in;
struct core_reloc_size_output *out = (void *)&data.out;
out->int_sz = bpf_core_field_size(in->int_field);
out->int_off = bpf_core_field_offset(in->int_field);
out->struct_sz = bpf_core_field_size(in->struct_field);
out->struct_off = bpf_core_field_offset(in->struct_field);
out->union_sz = bpf_core_field_size(in->union_field);
out->union_off = bpf_core_field_offset(in->union_field);
out->arr_sz = bpf_core_field_size(in->arr_field);
out->arr_off = bpf_core_field_offset(in->arr_field);
out->arr_elem_sz = bpf_core_field_size(struct core_reloc_size, arr_field[1]);
out->arr_elem_off = bpf_core_field_offset(struct core_reloc_size, arr_field[1]);
out->ptr_sz = bpf_core_field_size(struct core_reloc_size, ptr_field);
out->ptr_off = bpf_core_field_offset(struct core_reloc_size, ptr_field);
out->enum_sz = bpf_core_field_size(struct core_reloc_size, enum_field);
out->enum_off = bpf_core_field_offset(struct core_reloc_size, enum_field);
out->float_sz = bpf_core_field_size(struct core_reloc_size, float_field);
out->float_off = bpf_core_field_offset(struct core_reloc_size, float_field);
return 0;
}
| linux-master | tools/testing/selftests/bpf/progs/test_core_reloc_size.c |
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2020 Facebook
#include <linux/bpf.h>
#include <stdint.h>
#include <stdbool.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_core_read.h>
char _license[] SEC("license") = "GPL";
struct {
char in[256];
char out[256];
bool skip;
} data = {};
/* some types are shared with test_core_reloc_type_based.c */
struct a_struct {
int x;
};
union a_union {
int y;
int z;
};
enum an_enum {
AN_ENUM_VAL1 = 1,
AN_ENUM_VAL2 = 2,
AN_ENUM_VAL3 = 3,
};
typedef struct a_struct named_struct_typedef;
typedef int (*func_proto_typedef)(long);
typedef char arr_typedef[20];
struct core_reloc_type_id_output {
int local_anon_struct;
int local_anon_union;
int local_anon_enum;
int local_anon_func_proto_ptr;
int local_anon_void_ptr;
int local_anon_arr;
int local_struct;
int local_union;
int local_enum;
int local_int;
int local_struct_typedef;
int local_func_proto_typedef;
int local_arr_typedef;
int targ_struct;
int targ_union;
int targ_enum;
int targ_int;
int targ_struct_typedef;
int targ_func_proto_typedef;
int targ_arr_typedef;
};
/* preserve types even if Clang doesn't support built-in */
struct a_struct t1 = {};
union a_union t2 = {};
enum an_enum t3 = 0;
named_struct_typedef t4 = {};
func_proto_typedef t5 = 0;
arr_typedef t6 = {};
SEC("raw_tracepoint/sys_enter")
int test_core_type_id(void *ctx)
{
/* We use __builtin_btf_type_id() in this tests, but up until the time
* __builtin_preserve_type_info() was added it contained a bug that
* would make this test fail. The bug was fixed ([0]) with addition of
* __builtin_preserve_type_info(), though, so that's what we are using
* to detect whether this test has to be executed, however strange
* that might look like.
*
* [0] https://reviews.llvm.org/D85174
*/
#if __has_builtin(__builtin_preserve_type_info)
struct core_reloc_type_id_output *out = (void *)&data.out;
out->local_anon_struct = bpf_core_type_id_local(struct { int marker_field; });
out->local_anon_union = bpf_core_type_id_local(union { int marker_field; });
out->local_anon_enum = bpf_core_type_id_local(enum { MARKER_ENUM_VAL = 123 });
out->local_anon_func_proto_ptr = bpf_core_type_id_local(_Bool(*)(int));
out->local_anon_void_ptr = bpf_core_type_id_local(void *);
out->local_anon_arr = bpf_core_type_id_local(_Bool[47]);
out->local_struct = bpf_core_type_id_local(struct a_struct);
out->local_union = bpf_core_type_id_local(union a_union);
out->local_enum = bpf_core_type_id_local(enum an_enum);
out->local_int = bpf_core_type_id_local(int);
out->local_struct_typedef = bpf_core_type_id_local(named_struct_typedef);
out->local_func_proto_typedef = bpf_core_type_id_local(func_proto_typedef);
out->local_arr_typedef = bpf_core_type_id_local(arr_typedef);
out->targ_struct = bpf_core_type_id_kernel(struct a_struct);
out->targ_union = bpf_core_type_id_kernel(union a_union);
out->targ_enum = bpf_core_type_id_kernel(enum an_enum);
out->targ_int = bpf_core_type_id_kernel(int);
out->targ_struct_typedef = bpf_core_type_id_kernel(named_struct_typedef);
out->targ_func_proto_typedef = bpf_core_type_id_kernel(func_proto_typedef);
out->targ_arr_typedef = bpf_core_type_id_kernel(arr_typedef);
#else
data.skip = true;
#endif
return 0;
}
| linux-master | tools/testing/selftests/bpf/progs/test_core_reloc_type_id.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020 Tessares SA <http://www.tessares.net> */
#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
__u64 inKey = 0;
__u64 inValue = 0;
__u32 inPid = 0;
struct {
__uint(type, BPF_MAP_TYPE_PERCPU_HASH);
__uint(max_entries, 2);
__type(key, __u64);
__type(value, __u64);
} hashmap1 SEC(".maps");
SEC("tp/syscalls/sys_enter_getpgid")
int sysenter_getpgid(const void *ctx)
{
/* Just do it for once, when called from our own test prog. This
* ensures the map value is only updated for a single CPU.
*/
int cur_pid = bpf_get_current_pid_tgid() >> 32;
if (cur_pid == inPid)
bpf_map_update_elem(&hashmap1, &inKey, &inValue, BPF_NOEXIST);
return 0;
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/test_map_init.c |
// SPDX-License-Identifier: GPL-2.0-only
/* WARNING: This implemenation is not necessarily the same
* as the tcp_cubic.c. The purpose is mainly for testing
* the kernel BPF logic.
*
* Highlights:
* 1. CONFIG_HZ .kconfig map is used.
* 2. In bictcp_update(), calculation is changed to use usec
* resolution (i.e. USEC_PER_JIFFY) instead of using jiffies.
* Thus, usecs_to_jiffies() is not used in the bpf_cubic.c.
* 3. In bitctcp_update() [under tcp_friendliness], the original
* "while (ca->ack_cnt > delta)" loop is changed to the equivalent
* "ca->ack_cnt / delta" operation.
*/
#include <linux/bpf.h>
#include <linux/stddef.h>
#include <linux/tcp.h>
#include "bpf_tcp_helpers.h"
char _license[] SEC("license") = "GPL";
#define clamp(val, lo, hi) min((typeof(val))max(val, lo), hi)
#define BICTCP_BETA_SCALE 1024 /* Scale factor beta calculation
* max_cwnd = snd_cwnd * beta
*/
#define BICTCP_HZ 10 /* BIC HZ 2^10 = 1024 */
/* Two methods of hybrid slow start */
#define HYSTART_ACK_TRAIN 0x1
#define HYSTART_DELAY 0x2
/* Number of delay samples for detecting the increase of delay */
#define HYSTART_MIN_SAMPLES 8
#define HYSTART_DELAY_MIN (4000U) /* 4ms */
#define HYSTART_DELAY_MAX (16000U) /* 16 ms */
#define HYSTART_DELAY_THRESH(x) clamp(x, HYSTART_DELAY_MIN, HYSTART_DELAY_MAX)
static int fast_convergence = 1;
static const int beta = 717; /* = 717/1024 (BICTCP_BETA_SCALE) */
static int initial_ssthresh;
static const int bic_scale = 41;
static int tcp_friendliness = 1;
static int hystart = 1;
static int hystart_detect = HYSTART_ACK_TRAIN | HYSTART_DELAY;
static int hystart_low_window = 16;
static int hystart_ack_delta_us = 2000;
static const __u32 cube_rtt_scale = (bic_scale * 10); /* 1024*c/rtt */
static const __u32 beta_scale = 8*(BICTCP_BETA_SCALE+beta) / 3
/ (BICTCP_BETA_SCALE - beta);
/* calculate the "K" for (wmax-cwnd) = c/rtt * K^3
* so K = cubic_root( (wmax-cwnd)*rtt/c )
* the unit of K is bictcp_HZ=2^10, not HZ
*
* c = bic_scale >> 10
* rtt = 100ms
*
* the following code has been designed and tested for
* cwnd < 1 million packets
* RTT < 100 seconds
* HZ < 1,000,00 (corresponding to 10 nano-second)
*/
/* 1/c * 2^2*bictcp_HZ * srtt, 2^40 */
static const __u64 cube_factor = (__u64)(1ull << (10+3*BICTCP_HZ))
/ (bic_scale * 10);
/* BIC TCP Parameters */
struct bictcp {
__u32 cnt; /* increase cwnd by 1 after ACKs */
__u32 last_max_cwnd; /* last maximum snd_cwnd */
__u32 last_cwnd; /* the last snd_cwnd */
__u32 last_time; /* time when updated last_cwnd */
__u32 bic_origin_point;/* origin point of bic function */
__u32 bic_K; /* time to origin point
from the beginning of the current epoch */
__u32 delay_min; /* min delay (usec) */
__u32 epoch_start; /* beginning of an epoch */
__u32 ack_cnt; /* number of acks */
__u32 tcp_cwnd; /* estimated tcp cwnd */
__u16 unused;
__u8 sample_cnt; /* number of samples to decide curr_rtt */
__u8 found; /* the exit point is found? */
__u32 round_start; /* beginning of each round */
__u32 end_seq; /* end_seq of the round */
__u32 last_ack; /* last time when the ACK spacing is close */
__u32 curr_rtt; /* the minimum rtt of current round */
};
static inline void bictcp_reset(struct bictcp *ca)
{
ca->cnt = 0;
ca->last_max_cwnd = 0;
ca->last_cwnd = 0;
ca->last_time = 0;
ca->bic_origin_point = 0;
ca->bic_K = 0;
ca->delay_min = 0;
ca->epoch_start = 0;
ca->ack_cnt = 0;
ca->tcp_cwnd = 0;
ca->found = 0;
}
extern unsigned long CONFIG_HZ __kconfig;
#define HZ CONFIG_HZ
#define USEC_PER_MSEC 1000UL
#define USEC_PER_SEC 1000000UL
#define USEC_PER_JIFFY (USEC_PER_SEC / HZ)
static __always_inline __u64 div64_u64(__u64 dividend, __u64 divisor)
{
return dividend / divisor;
}
#define div64_ul div64_u64
#define BITS_PER_U64 (sizeof(__u64) * 8)
static __always_inline int fls64(__u64 x)
{
int num = BITS_PER_U64 - 1;
if (x == 0)
return 0;
if (!(x & (~0ull << (BITS_PER_U64-32)))) {
num -= 32;
x <<= 32;
}
if (!(x & (~0ull << (BITS_PER_U64-16)))) {
num -= 16;
x <<= 16;
}
if (!(x & (~0ull << (BITS_PER_U64-8)))) {
num -= 8;
x <<= 8;
}
if (!(x & (~0ull << (BITS_PER_U64-4)))) {
num -= 4;
x <<= 4;
}
if (!(x & (~0ull << (BITS_PER_U64-2)))) {
num -= 2;
x <<= 2;
}
if (!(x & (~0ull << (BITS_PER_U64-1))))
num -= 1;
return num + 1;
}
static __always_inline __u32 bictcp_clock_us(const struct sock *sk)
{
return tcp_sk(sk)->tcp_mstamp;
}
static __always_inline void bictcp_hystart_reset(struct sock *sk)
{
struct tcp_sock *tp = tcp_sk(sk);
struct bictcp *ca = inet_csk_ca(sk);
ca->round_start = ca->last_ack = bictcp_clock_us(sk);
ca->end_seq = tp->snd_nxt;
ca->curr_rtt = ~0U;
ca->sample_cnt = 0;
}
/* "struct_ops/" prefix is a requirement */
SEC("struct_ops/bpf_cubic_init")
void BPF_PROG(bpf_cubic_init, struct sock *sk)
{
struct bictcp *ca = inet_csk_ca(sk);
bictcp_reset(ca);
if (hystart)
bictcp_hystart_reset(sk);
if (!hystart && initial_ssthresh)
tcp_sk(sk)->snd_ssthresh = initial_ssthresh;
}
/* "struct_ops" prefix is a requirement */
SEC("struct_ops/bpf_cubic_cwnd_event")
void BPF_PROG(bpf_cubic_cwnd_event, struct sock *sk, enum tcp_ca_event event)
{
if (event == CA_EVENT_TX_START) {
struct bictcp *ca = inet_csk_ca(sk);
__u32 now = tcp_jiffies32;
__s32 delta;
delta = now - tcp_sk(sk)->lsndtime;
/* We were application limited (idle) for a while.
* Shift epoch_start to keep cwnd growth to cubic curve.
*/
if (ca->epoch_start && delta > 0) {
ca->epoch_start += delta;
if (after(ca->epoch_start, now))
ca->epoch_start = now;
}
return;
}
}
/*
* cbrt(x) MSB values for x MSB values in [0..63].
* Precomputed then refined by hand - Willy Tarreau
*
* For x in [0..63],
* v = cbrt(x << 18) - 1
* cbrt(x) = (v[x] + 10) >> 6
*/
static const __u8 v[] = {
/* 0x00 */ 0, 54, 54, 54, 118, 118, 118, 118,
/* 0x08 */ 123, 129, 134, 138, 143, 147, 151, 156,
/* 0x10 */ 157, 161, 164, 168, 170, 173, 176, 179,
/* 0x18 */ 181, 185, 187, 190, 192, 194, 197, 199,
/* 0x20 */ 200, 202, 204, 206, 209, 211, 213, 215,
/* 0x28 */ 217, 219, 221, 222, 224, 225, 227, 229,
/* 0x30 */ 231, 232, 234, 236, 237, 239, 240, 242,
/* 0x38 */ 244, 245, 246, 248, 250, 251, 252, 254,
};
/* calculate the cubic root of x using a table lookup followed by one
* Newton-Raphson iteration.
* Avg err ~= 0.195%
*/
static __always_inline __u32 cubic_root(__u64 a)
{
__u32 x, b, shift;
if (a < 64) {
/* a in [0..63] */
return ((__u32)v[(__u32)a] + 35) >> 6;
}
b = fls64(a);
b = ((b * 84) >> 8) - 1;
shift = (a >> (b * 3));
/* it is needed for verifier's bound check on v */
if (shift >= 64)
return 0;
x = ((__u32)(((__u32)v[shift] + 10) << b)) >> 6;
/*
* Newton-Raphson iteration
* 2
* x = ( 2 * x + a / x ) / 3
* k+1 k k
*/
x = (2 * x + (__u32)div64_u64(a, (__u64)x * (__u64)(x - 1)));
x = ((x * 341) >> 10);
return x;
}
/*
* Compute congestion window to use.
*/
static __always_inline void bictcp_update(struct bictcp *ca, __u32 cwnd,
__u32 acked)
{
__u32 delta, bic_target, max_cnt;
__u64 offs, t;
ca->ack_cnt += acked; /* count the number of ACKed packets */
if (ca->last_cwnd == cwnd &&
(__s32)(tcp_jiffies32 - ca->last_time) <= HZ / 32)
return;
/* The CUBIC function can update ca->cnt at most once per jiffy.
* On all cwnd reduction events, ca->epoch_start is set to 0,
* which will force a recalculation of ca->cnt.
*/
if (ca->epoch_start && tcp_jiffies32 == ca->last_time)
goto tcp_friendliness;
ca->last_cwnd = cwnd;
ca->last_time = tcp_jiffies32;
if (ca->epoch_start == 0) {
ca->epoch_start = tcp_jiffies32; /* record beginning */
ca->ack_cnt = acked; /* start counting */
ca->tcp_cwnd = cwnd; /* syn with cubic */
if (ca->last_max_cwnd <= cwnd) {
ca->bic_K = 0;
ca->bic_origin_point = cwnd;
} else {
/* Compute new K based on
* (wmax-cwnd) * (srtt>>3 / HZ) / c * 2^(3*bictcp_HZ)
*/
ca->bic_K = cubic_root(cube_factor
* (ca->last_max_cwnd - cwnd));
ca->bic_origin_point = ca->last_max_cwnd;
}
}
/* cubic function - calc*/
/* calculate c * time^3 / rtt,
* while considering overflow in calculation of time^3
* (so time^3 is done by using 64 bit)
* and without the support of division of 64bit numbers
* (so all divisions are done by using 32 bit)
* also NOTE the unit of those veriables
* time = (t - K) / 2^bictcp_HZ
* c = bic_scale >> 10
* rtt = (srtt >> 3) / HZ
* !!! The following code does not have overflow problems,
* if the cwnd < 1 million packets !!!
*/
t = (__s32)(tcp_jiffies32 - ca->epoch_start) * USEC_PER_JIFFY;
t += ca->delay_min;
/* change the unit from usec to bictcp_HZ */
t <<= BICTCP_HZ;
t /= USEC_PER_SEC;
if (t < ca->bic_K) /* t - K */
offs = ca->bic_K - t;
else
offs = t - ca->bic_K;
/* c/rtt * (t-K)^3 */
delta = (cube_rtt_scale * offs * offs * offs) >> (10+3*BICTCP_HZ);
if (t < ca->bic_K) /* below origin*/
bic_target = ca->bic_origin_point - delta;
else /* above origin*/
bic_target = ca->bic_origin_point + delta;
/* cubic function - calc bictcp_cnt*/
if (bic_target > cwnd) {
ca->cnt = cwnd / (bic_target - cwnd);
} else {
ca->cnt = 100 * cwnd; /* very small increment*/
}
/*
* The initial growth of cubic function may be too conservative
* when the available bandwidth is still unknown.
*/
if (ca->last_max_cwnd == 0 && ca->cnt > 20)
ca->cnt = 20; /* increase cwnd 5% per RTT */
tcp_friendliness:
/* TCP Friendly */
if (tcp_friendliness) {
__u32 scale = beta_scale;
__u32 n;
/* update tcp cwnd */
delta = (cwnd * scale) >> 3;
if (ca->ack_cnt > delta && delta) {
n = ca->ack_cnt / delta;
ca->ack_cnt -= n * delta;
ca->tcp_cwnd += n;
}
if (ca->tcp_cwnd > cwnd) { /* if bic is slower than tcp */
delta = ca->tcp_cwnd - cwnd;
max_cnt = cwnd / delta;
if (ca->cnt > max_cnt)
ca->cnt = max_cnt;
}
}
/* The maximum rate of cwnd increase CUBIC allows is 1 packet per
* 2 packets ACKed, meaning cwnd grows at 1.5x per RTT.
*/
ca->cnt = max(ca->cnt, 2U);
}
/* Or simply use the BPF_STRUCT_OPS to avoid the SEC boiler plate. */
void BPF_STRUCT_OPS(bpf_cubic_cong_avoid, struct sock *sk, __u32 ack, __u32 acked)
{
struct tcp_sock *tp = tcp_sk(sk);
struct bictcp *ca = inet_csk_ca(sk);
if (!tcp_is_cwnd_limited(sk))
return;
if (tcp_in_slow_start(tp)) {
if (hystart && after(ack, ca->end_seq))
bictcp_hystart_reset(sk);
acked = tcp_slow_start(tp, acked);
if (!acked)
return;
}
bictcp_update(ca, tp->snd_cwnd, acked);
tcp_cong_avoid_ai(tp, ca->cnt, acked);
}
__u32 BPF_STRUCT_OPS(bpf_cubic_recalc_ssthresh, struct sock *sk)
{
const struct tcp_sock *tp = tcp_sk(sk);
struct bictcp *ca = inet_csk_ca(sk);
ca->epoch_start = 0; /* end of epoch */
/* Wmax and fast convergence */
if (tp->snd_cwnd < ca->last_max_cwnd && fast_convergence)
ca->last_max_cwnd = (tp->snd_cwnd * (BICTCP_BETA_SCALE + beta))
/ (2 * BICTCP_BETA_SCALE);
else
ca->last_max_cwnd = tp->snd_cwnd;
return max((tp->snd_cwnd * beta) / BICTCP_BETA_SCALE, 2U);
}
void BPF_STRUCT_OPS(bpf_cubic_state, struct sock *sk, __u8 new_state)
{
if (new_state == TCP_CA_Loss) {
bictcp_reset(inet_csk_ca(sk));
bictcp_hystart_reset(sk);
}
}
#define GSO_MAX_SIZE 65536
/* Account for TSO/GRO delays.
* Otherwise short RTT flows could get too small ssthresh, since during
* slow start we begin with small TSO packets and ca->delay_min would
* not account for long aggregation delay when TSO packets get bigger.
* Ideally even with a very small RTT we would like to have at least one
* TSO packet being sent and received by GRO, and another one in qdisc layer.
* We apply another 100% factor because @rate is doubled at this point.
* We cap the cushion to 1ms.
*/
static __always_inline __u32 hystart_ack_delay(struct sock *sk)
{
unsigned long rate;
rate = sk->sk_pacing_rate;
if (!rate)
return 0;
return min((__u64)USEC_PER_MSEC,
div64_ul((__u64)GSO_MAX_SIZE * 4 * USEC_PER_SEC, rate));
}
static __always_inline void hystart_update(struct sock *sk, __u32 delay)
{
struct tcp_sock *tp = tcp_sk(sk);
struct bictcp *ca = inet_csk_ca(sk);
__u32 threshold;
if (hystart_detect & HYSTART_ACK_TRAIN) {
__u32 now = bictcp_clock_us(sk);
/* first detection parameter - ack-train detection */
if ((__s32)(now - ca->last_ack) <= hystart_ack_delta_us) {
ca->last_ack = now;
threshold = ca->delay_min + hystart_ack_delay(sk);
/* Hystart ack train triggers if we get ack past
* ca->delay_min/2.
* Pacing might have delayed packets up to RTT/2
* during slow start.
*/
if (sk->sk_pacing_status == SK_PACING_NONE)
threshold >>= 1;
if ((__s32)(now - ca->round_start) > threshold) {
ca->found = 1;
tp->snd_ssthresh = tp->snd_cwnd;
}
}
}
if (hystart_detect & HYSTART_DELAY) {
/* obtain the minimum delay of more than sampling packets */
if (ca->curr_rtt > delay)
ca->curr_rtt = delay;
if (ca->sample_cnt < HYSTART_MIN_SAMPLES) {
ca->sample_cnt++;
} else {
if (ca->curr_rtt > ca->delay_min +
HYSTART_DELAY_THRESH(ca->delay_min >> 3)) {
ca->found = 1;
tp->snd_ssthresh = tp->snd_cwnd;
}
}
}
}
void BPF_STRUCT_OPS(bpf_cubic_acked, struct sock *sk,
const struct ack_sample *sample)
{
const struct tcp_sock *tp = tcp_sk(sk);
struct bictcp *ca = inet_csk_ca(sk);
__u32 delay;
/* Some calls are for duplicates without timetamps */
if (sample->rtt_us < 0)
return;
/* Discard delay samples right after fast recovery */
if (ca->epoch_start && (__s32)(tcp_jiffies32 - ca->epoch_start) < HZ)
return;
delay = sample->rtt_us;
if (delay == 0)
delay = 1;
/* first time call or link delay decreases */
if (ca->delay_min == 0 || ca->delay_min > delay)
ca->delay_min = delay;
/* hystart triggers when cwnd is larger than some threshold */
if (!ca->found && tcp_in_slow_start(tp) && hystart &&
tp->snd_cwnd >= hystart_low_window)
hystart_update(sk, delay);
}
extern __u32 tcp_reno_undo_cwnd(struct sock *sk) __ksym;
__u32 BPF_STRUCT_OPS(bpf_cubic_undo_cwnd, struct sock *sk)
{
return tcp_reno_undo_cwnd(sk);
}
SEC(".struct_ops")
struct tcp_congestion_ops cubic = {
.init = (void *)bpf_cubic_init,
.ssthresh = (void *)bpf_cubic_recalc_ssthresh,
.cong_avoid = (void *)bpf_cubic_cong_avoid,
.set_state = (void *)bpf_cubic_state,
.undo_cwnd = (void *)bpf_cubic_undo_cwnd,
.cwnd_event = (void *)bpf_cubic_cwnd_event,
.pkts_acked = (void *)bpf_cubic_acked,
.name = "bpf_cubic",
};
| linux-master | tools/testing/selftests/bpf/progs/bpf_cubic.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include "bpf_misc.h"
char _license[] SEC("license") = "GPL";
struct sample {
int pid;
int seq;
long value;
char comm[16];
};
struct {
__uint(type, BPF_MAP_TYPE_USER_RINGBUF);
__uint(max_entries, 4096);
} user_ringbuf SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_RINGBUF);
__uint(max_entries, 2);
} ringbuf SEC(".maps");
static int map_value;
static long
bad_access1(struct bpf_dynptr *dynptr, void *context)
{
const struct sample *sample;
sample = bpf_dynptr_data(dynptr - 1, 0, sizeof(*sample));
bpf_printk("Was able to pass bad pointer %lx\n", (__u64)dynptr - 1);
return 0;
}
/* A callback that accesses a dynptr in a bpf_user_ringbuf_drain callback should
* not be able to read before the pointer.
*/
SEC("?raw_tp")
__failure __msg("negative offset dynptr_ptr ptr")
int user_ringbuf_callback_bad_access1(void *ctx)
{
bpf_user_ringbuf_drain(&user_ringbuf, bad_access1, NULL, 0);
return 0;
}
static long
bad_access2(struct bpf_dynptr *dynptr, void *context)
{
const struct sample *sample;
sample = bpf_dynptr_data(dynptr + 1, 0, sizeof(*sample));
bpf_printk("Was able to pass bad pointer %lx\n", (__u64)dynptr + 1);
return 0;
}
/* A callback that accesses a dynptr in a bpf_user_ringbuf_drain callback should
* not be able to read past the end of the pointer.
*/
SEC("?raw_tp")
__failure __msg("dereference of modified dynptr_ptr ptr")
int user_ringbuf_callback_bad_access2(void *ctx)
{
bpf_user_ringbuf_drain(&user_ringbuf, bad_access2, NULL, 0);
return 0;
}
static long
write_forbidden(struct bpf_dynptr *dynptr, void *context)
{
*((long *)dynptr) = 0;
return 0;
}
/* A callback that accesses a dynptr in a bpf_user_ringbuf_drain callback should
* not be able to write to that pointer.
*/
SEC("?raw_tp")
__failure __msg("invalid mem access 'dynptr_ptr'")
int user_ringbuf_callback_write_forbidden(void *ctx)
{
bpf_user_ringbuf_drain(&user_ringbuf, write_forbidden, NULL, 0);
return 0;
}
static long
null_context_write(struct bpf_dynptr *dynptr, void *context)
{
*((__u64 *)context) = 0;
return 0;
}
/* A callback that accesses a dynptr in a bpf_user_ringbuf_drain callback should
* not be able to write to that pointer.
*/
SEC("?raw_tp")
__failure __msg("invalid mem access 'scalar'")
int user_ringbuf_callback_null_context_write(void *ctx)
{
bpf_user_ringbuf_drain(&user_ringbuf, null_context_write, NULL, 0);
return 0;
}
static long
null_context_read(struct bpf_dynptr *dynptr, void *context)
{
__u64 id = *((__u64 *)context);
bpf_printk("Read id %lu\n", id);
return 0;
}
/* A callback that accesses a dynptr in a bpf_user_ringbuf_drain callback should
* not be able to write to that pointer.
*/
SEC("?raw_tp")
__failure __msg("invalid mem access 'scalar'")
int user_ringbuf_callback_null_context_read(void *ctx)
{
bpf_user_ringbuf_drain(&user_ringbuf, null_context_read, NULL, 0);
return 0;
}
static long
try_discard_dynptr(struct bpf_dynptr *dynptr, void *context)
{
bpf_ringbuf_discard_dynptr(dynptr, 0);
return 0;
}
/* A callback that accesses a dynptr in a bpf_user_ringbuf_drain callback should
* not be able to read past the end of the pointer.
*/
SEC("?raw_tp")
__failure __msg("cannot release unowned const bpf_dynptr")
int user_ringbuf_callback_discard_dynptr(void *ctx)
{
bpf_user_ringbuf_drain(&user_ringbuf, try_discard_dynptr, NULL, 0);
return 0;
}
static long
try_submit_dynptr(struct bpf_dynptr *dynptr, void *context)
{
bpf_ringbuf_submit_dynptr(dynptr, 0);
return 0;
}
/* A callback that accesses a dynptr in a bpf_user_ringbuf_drain callback should
* not be able to read past the end of the pointer.
*/
SEC("?raw_tp")
__failure __msg("cannot release unowned const bpf_dynptr")
int user_ringbuf_callback_submit_dynptr(void *ctx)
{
bpf_user_ringbuf_drain(&user_ringbuf, try_submit_dynptr, NULL, 0);
return 0;
}
static long
invalid_drain_callback_return(struct bpf_dynptr *dynptr, void *context)
{
return 2;
}
/* A callback that accesses a dynptr in a bpf_user_ringbuf_drain callback should
* not be able to write to that pointer.
*/
SEC("?raw_tp")
__failure __msg("At callback return the register R0 has value")
int user_ringbuf_callback_invalid_return(void *ctx)
{
bpf_user_ringbuf_drain(&user_ringbuf, invalid_drain_callback_return, NULL, 0);
return 0;
}
static long
try_reinit_dynptr_mem(struct bpf_dynptr *dynptr, void *context)
{
bpf_dynptr_from_mem(&map_value, 4, 0, dynptr);
return 0;
}
static long
try_reinit_dynptr_ringbuf(struct bpf_dynptr *dynptr, void *context)
{
bpf_ringbuf_reserve_dynptr(&ringbuf, 8, 0, dynptr);
return 0;
}
SEC("?raw_tp")
__failure __msg("Dynptr has to be an uninitialized dynptr")
int user_ringbuf_callback_reinit_dynptr_mem(void *ctx)
{
bpf_user_ringbuf_drain(&user_ringbuf, try_reinit_dynptr_mem, NULL, 0);
return 0;
}
SEC("?raw_tp")
__failure __msg("Dynptr has to be an uninitialized dynptr")
int user_ringbuf_callback_reinit_dynptr_ringbuf(void *ctx)
{
bpf_user_ringbuf_drain(&user_ringbuf, try_reinit_dynptr_ringbuf, NULL, 0);
return 0;
}
| linux-master | tools/testing/selftests/bpf/progs/user_ringbuf_fail.c |
// SPDX-License-Identifier: GPL-2.0-only
#include <stddef.h>
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include "bpf_misc.h"
struct S;
__noinline int foo(const struct S *s)
{
if (s)
return bpf_get_prandom_u32() < *(const int *) s;
return 0;
}
SEC("cgroup_skb/ingress")
__failure __msg("reference type('FWD S') size cannot be determined")
int global_func14(struct __sk_buff *skb)
{
return foo(NULL);
}
| linux-master | tools/testing/selftests/bpf/progs/test_global_func14.c |
/* Copyright (c) 2017 Facebook
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of version 2 of the GNU General Public
* License as published by the Free Software Foundation.
*/
#include <linux/bpf.h>
#include <linux/version.h>
#include <bpf/bpf_helpers.h>
SEC("cgroup/dev")
int bpf_prog1(struct bpf_cgroup_dev_ctx *ctx)
{
short type = ctx->access_type & 0xFFFF;
#ifdef DEBUG
short access = ctx->access_type >> 16;
char fmt[] = " %d:%d \n";
switch (type) {
case BPF_DEVCG_DEV_BLOCK:
fmt[0] = 'b';
break;
case BPF_DEVCG_DEV_CHAR:
fmt[0] = 'c';
break;
default:
fmt[0] = '?';
break;
}
if (access & BPF_DEVCG_ACC_READ)
fmt[8] = 'r';
if (access & BPF_DEVCG_ACC_WRITE)
fmt[9] = 'w';
if (access & BPF_DEVCG_ACC_MKNOD)
fmt[10] = 'm';
bpf_trace_printk(fmt, sizeof(fmt), ctx->major, ctx->minor);
#endif
/* Allow access to /dev/zero and /dev/random.
* Forbid everything else.
*/
if (ctx->major != 1 || type != BPF_DEVCG_DEV_CHAR)
return 0;
switch (ctx->minor) {
case 5: /* 1:5 /dev/zero */
case 9: /* 1:9 /dev/urandom */
return 1;
}
return 0;
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/dev_cgroup.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
#include "bpf_iter.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
#include "bpf_misc.h"
char _license[] SEC("license") = "GPL";
struct {
__uint(type, BPF_MAP_TYPE_CGRP_STORAGE);
__uint(map_flags, BPF_F_NO_PREALLOC);
__type(key, int);
__type(value, long);
} map_a SEC(".maps");
__u32 target_pid;
__u64 cgroup_id;
void bpf_rcu_read_lock(void) __ksym;
void bpf_rcu_read_unlock(void) __ksym;
SEC("?iter.s/cgroup")
int cgroup_iter(struct bpf_iter__cgroup *ctx)
{
struct cgroup *cgrp = ctx->cgroup;
long *ptr;
if (cgrp == NULL)
return 0;
ptr = bpf_cgrp_storage_get(&map_a, cgrp, 0,
BPF_LOCAL_STORAGE_GET_F_CREATE);
if (ptr)
cgroup_id = cgrp->kn->id;
return 0;
}
SEC("?fentry.s/" SYS_PREFIX "sys_getpgid")
int no_rcu_lock(void *ctx)
{
struct task_struct *task;
struct cgroup *cgrp;
long *ptr;
task = bpf_get_current_task_btf();
if (task->pid != target_pid)
return 0;
/* task->cgroups is untrusted in sleepable prog outside of RCU CS */
cgrp = task->cgroups->dfl_cgrp;
ptr = bpf_cgrp_storage_get(&map_a, cgrp, 0,
BPF_LOCAL_STORAGE_GET_F_CREATE);
if (ptr)
cgroup_id = cgrp->kn->id;
return 0;
}
SEC("?fentry.s/" SYS_PREFIX "sys_getpgid")
int yes_rcu_lock(void *ctx)
{
struct task_struct *task;
struct cgroup *cgrp;
long *ptr;
task = bpf_get_current_task_btf();
if (task->pid != target_pid)
return 0;
bpf_rcu_read_lock();
cgrp = task->cgroups->dfl_cgrp;
/* cgrp is trusted under RCU CS */
ptr = bpf_cgrp_storage_get(&map_a, cgrp, 0, BPF_LOCAL_STORAGE_GET_F_CREATE);
if (ptr)
cgroup_id = cgrp->kn->id;
bpf_rcu_read_unlock();
return 0;
}
| linux-master | tools/testing/selftests/bpf/progs/cgrp_ls_sleepable.c |
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2020 Facebook
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#define LOOP_BOUND 0xf
#define MAX_ENTRIES 8
#define HALF_ENTRIES (MAX_ENTRIES >> 1)
_Static_assert(MAX_ENTRIES < LOOP_BOUND, "MAX_ENTRIES must be < LOOP_BOUND");
enum bpf_map_type g_map_type = BPF_MAP_TYPE_UNSPEC;
__u32 g_line = 0;
int page_size = 0; /* userspace should set it */
#define VERIFY_TYPE(type, func) ({ \
g_map_type = type; \
if (!func()) \
return 0; \
})
#define VERIFY(expr) ({ \
g_line = __LINE__; \
if (!(expr)) \
return 0; \
})
struct bpf_map {
enum bpf_map_type map_type;
__u32 key_size;
__u32 value_size;
__u32 max_entries;
__u32 id;
} __attribute__((preserve_access_index));
static inline int check_bpf_map_fields(struct bpf_map *map, __u32 key_size,
__u32 value_size, __u32 max_entries)
{
VERIFY(map->map_type == g_map_type);
VERIFY(map->key_size == key_size);
VERIFY(map->value_size == value_size);
VERIFY(map->max_entries == max_entries);
VERIFY(map->id > 0);
return 1;
}
static inline int check_bpf_map_ptr(struct bpf_map *indirect,
struct bpf_map *direct)
{
VERIFY(indirect->map_type == direct->map_type);
VERIFY(indirect->key_size == direct->key_size);
VERIFY(indirect->value_size == direct->value_size);
VERIFY(indirect->max_entries == direct->max_entries);
VERIFY(indirect->id == direct->id);
return 1;
}
static inline int check(struct bpf_map *indirect, struct bpf_map *direct,
__u32 key_size, __u32 value_size, __u32 max_entries)
{
VERIFY(check_bpf_map_ptr(indirect, direct));
VERIFY(check_bpf_map_fields(indirect, key_size, value_size,
max_entries));
return 1;
}
static inline int check_default(struct bpf_map *indirect,
struct bpf_map *direct)
{
VERIFY(check(indirect, direct, sizeof(__u32), sizeof(__u32),
MAX_ENTRIES));
return 1;
}
static __noinline int
check_default_noinline(struct bpf_map *indirect, struct bpf_map *direct)
{
VERIFY(check(indirect, direct, sizeof(__u32), sizeof(__u32),
MAX_ENTRIES));
return 1;
}
typedef struct {
int counter;
} atomic_t;
struct bpf_htab {
struct bpf_map map;
atomic_t count;
__u32 n_buckets;
__u32 elem_size;
} __attribute__((preserve_access_index));
struct {
__uint(type, BPF_MAP_TYPE_HASH);
__uint(map_flags, BPF_F_NO_PREALLOC); /* to test bpf_htab.count */
__uint(max_entries, MAX_ENTRIES);
__type(key, __u32);
__type(value, __u32);
} m_hash SEC(".maps");
__s64 bpf_map_sum_elem_count(struct bpf_map *map) __ksym;
static inline int check_hash(void)
{
struct bpf_htab *hash = (struct bpf_htab *)&m_hash;
struct bpf_map *map = (struct bpf_map *)&m_hash;
int i;
VERIFY(check_default_noinline(&hash->map, map));
VERIFY(hash->n_buckets == MAX_ENTRIES);
VERIFY(hash->elem_size == 64);
VERIFY(hash->count.counter == 0);
VERIFY(bpf_map_sum_elem_count(map) == 0);
for (i = 0; i < HALF_ENTRIES; ++i) {
const __u32 key = i;
const __u32 val = 1;
if (bpf_map_update_elem(hash, &key, &val, 0))
return 0;
}
VERIFY(hash->count.counter == HALF_ENTRIES);
VERIFY(bpf_map_sum_elem_count(map) == HALF_ENTRIES);
return 1;
}
struct bpf_array {
struct bpf_map map;
__u32 elem_size;
} __attribute__((preserve_access_index));
struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
__uint(max_entries, MAX_ENTRIES);
__type(key, __u32);
__type(value, __u32);
} m_array SEC(".maps");
static inline int check_array(void)
{
struct bpf_array *array = (struct bpf_array *)&m_array;
struct bpf_map *map = (struct bpf_map *)&m_array;
int i, n_lookups = 0, n_keys = 0;
VERIFY(check_default(&array->map, map));
VERIFY(array->elem_size == 8);
for (i = 0; i < array->map.max_entries && i < LOOP_BOUND; ++i) {
const __u32 key = i;
__u32 *val = bpf_map_lookup_elem(array, &key);
++n_lookups;
if (val)
++n_keys;
}
VERIFY(n_lookups == MAX_ENTRIES);
VERIFY(n_keys == MAX_ENTRIES);
return 1;
}
struct {
__uint(type, BPF_MAP_TYPE_PROG_ARRAY);
__uint(max_entries, MAX_ENTRIES);
__type(key, __u32);
__type(value, __u32);
} m_prog_array SEC(".maps");
static inline int check_prog_array(void)
{
struct bpf_array *prog_array = (struct bpf_array *)&m_prog_array;
struct bpf_map *map = (struct bpf_map *)&m_prog_array;
VERIFY(check_default(&prog_array->map, map));
return 1;
}
struct {
__uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY);
__uint(max_entries, MAX_ENTRIES);
__type(key, __u32);
__type(value, __u32);
} m_perf_event_array SEC(".maps");
static inline int check_perf_event_array(void)
{
struct bpf_array *perf_event_array = (struct bpf_array *)&m_perf_event_array;
struct bpf_map *map = (struct bpf_map *)&m_perf_event_array;
VERIFY(check_default(&perf_event_array->map, map));
return 1;
}
struct {
__uint(type, BPF_MAP_TYPE_PERCPU_HASH);
__uint(max_entries, MAX_ENTRIES);
__type(key, __u32);
__type(value, __u32);
} m_percpu_hash SEC(".maps");
static inline int check_percpu_hash(void)
{
struct bpf_htab *percpu_hash = (struct bpf_htab *)&m_percpu_hash;
struct bpf_map *map = (struct bpf_map *)&m_percpu_hash;
VERIFY(check_default(&percpu_hash->map, map));
return 1;
}
struct {
__uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
__uint(max_entries, MAX_ENTRIES);
__type(key, __u32);
__type(value, __u32);
} m_percpu_array SEC(".maps");
static inline int check_percpu_array(void)
{
struct bpf_array *percpu_array = (struct bpf_array *)&m_percpu_array;
struct bpf_map *map = (struct bpf_map *)&m_percpu_array;
VERIFY(check_default(&percpu_array->map, map));
return 1;
}
struct bpf_stack_map {
struct bpf_map map;
} __attribute__((preserve_access_index));
struct {
__uint(type, BPF_MAP_TYPE_STACK_TRACE);
__uint(max_entries, MAX_ENTRIES);
__type(key, __u32);
__type(value, __u64);
} m_stack_trace SEC(".maps");
static inline int check_stack_trace(void)
{
struct bpf_stack_map *stack_trace =
(struct bpf_stack_map *)&m_stack_trace;
struct bpf_map *map = (struct bpf_map *)&m_stack_trace;
VERIFY(check(&stack_trace->map, map, sizeof(__u32), sizeof(__u64),
MAX_ENTRIES));
return 1;
}
struct {
__uint(type, BPF_MAP_TYPE_CGROUP_ARRAY);
__uint(max_entries, MAX_ENTRIES);
__type(key, __u32);
__type(value, __u32);
} m_cgroup_array SEC(".maps");
static inline int check_cgroup_array(void)
{
struct bpf_array *cgroup_array = (struct bpf_array *)&m_cgroup_array;
struct bpf_map *map = (struct bpf_map *)&m_cgroup_array;
VERIFY(check_default(&cgroup_array->map, map));
return 1;
}
struct {
__uint(type, BPF_MAP_TYPE_LRU_HASH);
__uint(max_entries, MAX_ENTRIES);
__type(key, __u32);
__type(value, __u32);
} m_lru_hash SEC(".maps");
static inline int check_lru_hash(void)
{
struct bpf_htab *lru_hash = (struct bpf_htab *)&m_lru_hash;
struct bpf_map *map = (struct bpf_map *)&m_lru_hash;
VERIFY(check_default(&lru_hash->map, map));
return 1;
}
struct {
__uint(type, BPF_MAP_TYPE_LRU_PERCPU_HASH);
__uint(max_entries, MAX_ENTRIES);
__type(key, __u32);
__type(value, __u32);
} m_lru_percpu_hash SEC(".maps");
static inline int check_lru_percpu_hash(void)
{
struct bpf_htab *lru_percpu_hash = (struct bpf_htab *)&m_lru_percpu_hash;
struct bpf_map *map = (struct bpf_map *)&m_lru_percpu_hash;
VERIFY(check_default(&lru_percpu_hash->map, map));
return 1;
}
struct lpm_trie {
struct bpf_map map;
} __attribute__((preserve_access_index));
struct lpm_key {
struct bpf_lpm_trie_key trie_key;
__u32 data;
};
struct {
__uint(type, BPF_MAP_TYPE_LPM_TRIE);
__uint(map_flags, BPF_F_NO_PREALLOC);
__uint(max_entries, MAX_ENTRIES);
__type(key, struct lpm_key);
__type(value, __u32);
} m_lpm_trie SEC(".maps");
static inline int check_lpm_trie(void)
{
struct lpm_trie *lpm_trie = (struct lpm_trie *)&m_lpm_trie;
struct bpf_map *map = (struct bpf_map *)&m_lpm_trie;
VERIFY(check(&lpm_trie->map, map, sizeof(struct lpm_key), sizeof(__u32),
MAX_ENTRIES));
return 1;
}
#define INNER_MAX_ENTRIES 1234
struct inner_map {
__uint(type, BPF_MAP_TYPE_ARRAY);
__uint(max_entries, INNER_MAX_ENTRIES);
__type(key, __u32);
__type(value, __u32);
} inner_map SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_ARRAY_OF_MAPS);
__uint(max_entries, MAX_ENTRIES);
__type(key, __u32);
__type(value, __u32);
__array(values, struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
__uint(max_entries, INNER_MAX_ENTRIES);
__type(key, __u32);
__type(value, __u32);
});
} m_array_of_maps SEC(".maps") = {
.values = { (void *)&inner_map, 0, 0, 0, 0, 0, 0, 0, 0 },
};
static inline int check_array_of_maps(void)
{
struct bpf_array *array_of_maps = (struct bpf_array *)&m_array_of_maps;
struct bpf_map *map = (struct bpf_map *)&m_array_of_maps;
struct bpf_array *inner_map;
int key = 0;
VERIFY(check_default(&array_of_maps->map, map));
inner_map = bpf_map_lookup_elem(array_of_maps, &key);
VERIFY(inner_map != NULL);
VERIFY(inner_map->map.max_entries == INNER_MAX_ENTRIES);
return 1;
}
struct {
__uint(type, BPF_MAP_TYPE_HASH_OF_MAPS);
__uint(max_entries, MAX_ENTRIES);
__type(key, __u32);
__type(value, __u32);
__array(values, struct inner_map);
} m_hash_of_maps SEC(".maps") = {
.values = {
[2] = &inner_map,
},
};
static inline int check_hash_of_maps(void)
{
struct bpf_htab *hash_of_maps = (struct bpf_htab *)&m_hash_of_maps;
struct bpf_map *map = (struct bpf_map *)&m_hash_of_maps;
struct bpf_htab *inner_map;
int key = 2;
VERIFY(check_default(&hash_of_maps->map, map));
inner_map = bpf_map_lookup_elem(hash_of_maps, &key);
VERIFY(inner_map != NULL);
VERIFY(inner_map->map.max_entries == INNER_MAX_ENTRIES);
return 1;
}
struct bpf_dtab {
struct bpf_map map;
} __attribute__((preserve_access_index));
struct {
__uint(type, BPF_MAP_TYPE_DEVMAP);
__uint(max_entries, MAX_ENTRIES);
__type(key, __u32);
__type(value, __u32);
} m_devmap SEC(".maps");
static inline int check_devmap(void)
{
struct bpf_dtab *devmap = (struct bpf_dtab *)&m_devmap;
struct bpf_map *map = (struct bpf_map *)&m_devmap;
VERIFY(check_default(&devmap->map, map));
return 1;
}
struct bpf_stab {
struct bpf_map map;
} __attribute__((preserve_access_index));
struct {
__uint(type, BPF_MAP_TYPE_SOCKMAP);
__uint(max_entries, MAX_ENTRIES);
__type(key, __u32);
__type(value, __u32);
} m_sockmap SEC(".maps");
static inline int check_sockmap(void)
{
struct bpf_stab *sockmap = (struct bpf_stab *)&m_sockmap;
struct bpf_map *map = (struct bpf_map *)&m_sockmap;
VERIFY(check_default(&sockmap->map, map));
return 1;
}
struct bpf_cpu_map {
struct bpf_map map;
} __attribute__((preserve_access_index));
struct {
__uint(type, BPF_MAP_TYPE_CPUMAP);
__uint(max_entries, MAX_ENTRIES);
__type(key, __u32);
__type(value, __u32);
} m_cpumap SEC(".maps");
static inline int check_cpumap(void)
{
struct bpf_cpu_map *cpumap = (struct bpf_cpu_map *)&m_cpumap;
struct bpf_map *map = (struct bpf_map *)&m_cpumap;
VERIFY(check_default(&cpumap->map, map));
return 1;
}
struct xsk_map {
struct bpf_map map;
} __attribute__((preserve_access_index));
struct {
__uint(type, BPF_MAP_TYPE_XSKMAP);
__uint(max_entries, MAX_ENTRIES);
__type(key, __u32);
__type(value, __u32);
} m_xskmap SEC(".maps");
static inline int check_xskmap(void)
{
struct xsk_map *xskmap = (struct xsk_map *)&m_xskmap;
struct bpf_map *map = (struct bpf_map *)&m_xskmap;
VERIFY(check_default(&xskmap->map, map));
return 1;
}
struct bpf_shtab {
struct bpf_map map;
} __attribute__((preserve_access_index));
struct {
__uint(type, BPF_MAP_TYPE_SOCKHASH);
__uint(max_entries, MAX_ENTRIES);
__type(key, __u32);
__type(value, __u32);
} m_sockhash SEC(".maps");
static inline int check_sockhash(void)
{
struct bpf_shtab *sockhash = (struct bpf_shtab *)&m_sockhash;
struct bpf_map *map = (struct bpf_map *)&m_sockhash;
VERIFY(check_default(&sockhash->map, map));
return 1;
}
struct bpf_cgroup_storage_map {
struct bpf_map map;
} __attribute__((preserve_access_index));
struct {
__uint(type, BPF_MAP_TYPE_CGROUP_STORAGE);
__type(key, struct bpf_cgroup_storage_key);
__type(value, __u32);
} m_cgroup_storage SEC(".maps");
static inline int check_cgroup_storage(void)
{
struct bpf_cgroup_storage_map *cgroup_storage =
(struct bpf_cgroup_storage_map *)&m_cgroup_storage;
struct bpf_map *map = (struct bpf_map *)&m_cgroup_storage;
VERIFY(check(&cgroup_storage->map, map,
sizeof(struct bpf_cgroup_storage_key), sizeof(__u32), 0));
return 1;
}
struct reuseport_array {
struct bpf_map map;
} __attribute__((preserve_access_index));
struct {
__uint(type, BPF_MAP_TYPE_REUSEPORT_SOCKARRAY);
__uint(max_entries, MAX_ENTRIES);
__type(key, __u32);
__type(value, __u32);
} m_reuseport_sockarray SEC(".maps");
static inline int check_reuseport_sockarray(void)
{
struct reuseport_array *reuseport_sockarray =
(struct reuseport_array *)&m_reuseport_sockarray;
struct bpf_map *map = (struct bpf_map *)&m_reuseport_sockarray;
VERIFY(check_default(&reuseport_sockarray->map, map));
return 1;
}
struct {
__uint(type, BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE);
__type(key, struct bpf_cgroup_storage_key);
__type(value, __u32);
} m_percpu_cgroup_storage SEC(".maps");
static inline int check_percpu_cgroup_storage(void)
{
struct bpf_cgroup_storage_map *percpu_cgroup_storage =
(struct bpf_cgroup_storage_map *)&m_percpu_cgroup_storage;
struct bpf_map *map = (struct bpf_map *)&m_percpu_cgroup_storage;
VERIFY(check(&percpu_cgroup_storage->map, map,
sizeof(struct bpf_cgroup_storage_key), sizeof(__u32), 0));
return 1;
}
struct bpf_queue_stack {
struct bpf_map map;
} __attribute__((preserve_access_index));
struct {
__uint(type, BPF_MAP_TYPE_QUEUE);
__uint(max_entries, MAX_ENTRIES);
__type(value, __u32);
} m_queue SEC(".maps");
static inline int check_queue(void)
{
struct bpf_queue_stack *queue = (struct bpf_queue_stack *)&m_queue;
struct bpf_map *map = (struct bpf_map *)&m_queue;
VERIFY(check(&queue->map, map, 0, sizeof(__u32), MAX_ENTRIES));
return 1;
}
struct {
__uint(type, BPF_MAP_TYPE_STACK);
__uint(max_entries, MAX_ENTRIES);
__type(value, __u32);
} m_stack SEC(".maps");
static inline int check_stack(void)
{
struct bpf_queue_stack *stack = (struct bpf_queue_stack *)&m_stack;
struct bpf_map *map = (struct bpf_map *)&m_stack;
VERIFY(check(&stack->map, map, 0, sizeof(__u32), MAX_ENTRIES));
return 1;
}
struct bpf_local_storage_map {
struct bpf_map map;
} __attribute__((preserve_access_index));
struct {
__uint(type, BPF_MAP_TYPE_SK_STORAGE);
__uint(map_flags, BPF_F_NO_PREALLOC);
__type(key, __u32);
__type(value, __u32);
} m_sk_storage SEC(".maps");
static inline int check_sk_storage(void)
{
struct bpf_local_storage_map *sk_storage =
(struct bpf_local_storage_map *)&m_sk_storage;
struct bpf_map *map = (struct bpf_map *)&m_sk_storage;
VERIFY(check(&sk_storage->map, map, sizeof(__u32), sizeof(__u32), 0));
return 1;
}
struct {
__uint(type, BPF_MAP_TYPE_DEVMAP_HASH);
__uint(max_entries, MAX_ENTRIES);
__type(key, __u32);
__type(value, __u32);
} m_devmap_hash SEC(".maps");
static inline int check_devmap_hash(void)
{
struct bpf_dtab *devmap_hash = (struct bpf_dtab *)&m_devmap_hash;
struct bpf_map *map = (struct bpf_map *)&m_devmap_hash;
VERIFY(check_default(&devmap_hash->map, map));
return 1;
}
struct bpf_ringbuf_map {
struct bpf_map map;
} __attribute__((preserve_access_index));
struct {
__uint(type, BPF_MAP_TYPE_RINGBUF);
} m_ringbuf SEC(".maps");
static inline int check_ringbuf(void)
{
struct bpf_ringbuf_map *ringbuf = (struct bpf_ringbuf_map *)&m_ringbuf;
struct bpf_map *map = (struct bpf_map *)&m_ringbuf;
VERIFY(check(&ringbuf->map, map, 0, 0, page_size));
return 1;
}
SEC("cgroup_skb/egress")
int cg_skb(void *ctx)
{
VERIFY_TYPE(BPF_MAP_TYPE_HASH, check_hash);
VERIFY_TYPE(BPF_MAP_TYPE_ARRAY, check_array);
VERIFY_TYPE(BPF_MAP_TYPE_PROG_ARRAY, check_prog_array);
VERIFY_TYPE(BPF_MAP_TYPE_PERF_EVENT_ARRAY, check_perf_event_array);
VERIFY_TYPE(BPF_MAP_TYPE_PERCPU_HASH, check_percpu_hash);
VERIFY_TYPE(BPF_MAP_TYPE_PERCPU_ARRAY, check_percpu_array);
VERIFY_TYPE(BPF_MAP_TYPE_STACK_TRACE, check_stack_trace);
VERIFY_TYPE(BPF_MAP_TYPE_CGROUP_ARRAY, check_cgroup_array);
VERIFY_TYPE(BPF_MAP_TYPE_LRU_HASH, check_lru_hash);
VERIFY_TYPE(BPF_MAP_TYPE_LRU_PERCPU_HASH, check_lru_percpu_hash);
VERIFY_TYPE(BPF_MAP_TYPE_LPM_TRIE, check_lpm_trie);
VERIFY_TYPE(BPF_MAP_TYPE_ARRAY_OF_MAPS, check_array_of_maps);
VERIFY_TYPE(BPF_MAP_TYPE_HASH_OF_MAPS, check_hash_of_maps);
VERIFY_TYPE(BPF_MAP_TYPE_DEVMAP, check_devmap);
VERIFY_TYPE(BPF_MAP_TYPE_SOCKMAP, check_sockmap);
VERIFY_TYPE(BPF_MAP_TYPE_CPUMAP, check_cpumap);
VERIFY_TYPE(BPF_MAP_TYPE_XSKMAP, check_xskmap);
VERIFY_TYPE(BPF_MAP_TYPE_SOCKHASH, check_sockhash);
VERIFY_TYPE(BPF_MAP_TYPE_CGROUP_STORAGE, check_cgroup_storage);
VERIFY_TYPE(BPF_MAP_TYPE_REUSEPORT_SOCKARRAY,
check_reuseport_sockarray);
VERIFY_TYPE(BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE,
check_percpu_cgroup_storage);
VERIFY_TYPE(BPF_MAP_TYPE_QUEUE, check_queue);
VERIFY_TYPE(BPF_MAP_TYPE_STACK, check_stack);
VERIFY_TYPE(BPF_MAP_TYPE_SK_STORAGE, check_sk_storage);
VERIFY_TYPE(BPF_MAP_TYPE_DEVMAP_HASH, check_devmap_hash);
VERIFY_TYPE(BPF_MAP_TYPE_RINGBUF, check_ringbuf);
return 1;
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/map_ptr_kern.c |
// SPDX-License-Identifier: GPL-2.0
#include <stdint.h>
#include <stdbool.h>
#include <stddef.h>
#include <linux/bpf.h>
#include <linux/stddef.h>
#include <linux/pkt_cls.h>
#include <linux/if_ether.h>
#include <linux/in.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_endian.h>
#ifndef ctx_ptr
# define ctx_ptr(field) (void *)(long)(field)
#endif
#define AF_INET 2
#define AF_INET6 10
static __always_inline int fill_fib_params_v4(struct __sk_buff *skb,
struct bpf_fib_lookup *fib_params)
{
void *data_end = ctx_ptr(skb->data_end);
void *data = ctx_ptr(skb->data);
struct iphdr *ip4h;
if (data + sizeof(struct ethhdr) > data_end)
return -1;
ip4h = (struct iphdr *)(data + sizeof(struct ethhdr));
if ((void *)(ip4h + 1) > data_end)
return -1;
fib_params->family = AF_INET;
fib_params->tos = ip4h->tos;
fib_params->l4_protocol = ip4h->protocol;
fib_params->sport = 0;
fib_params->dport = 0;
fib_params->tot_len = bpf_ntohs(ip4h->tot_len);
fib_params->ipv4_src = ip4h->saddr;
fib_params->ipv4_dst = ip4h->daddr;
return 0;
}
static __always_inline int fill_fib_params_v6(struct __sk_buff *skb,
struct bpf_fib_lookup *fib_params)
{
struct in6_addr *src = (struct in6_addr *)fib_params->ipv6_src;
struct in6_addr *dst = (struct in6_addr *)fib_params->ipv6_dst;
void *data_end = ctx_ptr(skb->data_end);
void *data = ctx_ptr(skb->data);
struct ipv6hdr *ip6h;
if (data + sizeof(struct ethhdr) > data_end)
return -1;
ip6h = (struct ipv6hdr *)(data + sizeof(struct ethhdr));
if ((void *)(ip6h + 1) > data_end)
return -1;
fib_params->family = AF_INET6;
fib_params->flowinfo = 0;
fib_params->l4_protocol = ip6h->nexthdr;
fib_params->sport = 0;
fib_params->dport = 0;
fib_params->tot_len = bpf_ntohs(ip6h->payload_len);
*src = ip6h->saddr;
*dst = ip6h->daddr;
return 0;
}
SEC("tc")
int tc_chk(struct __sk_buff *skb)
{
void *data_end = ctx_ptr(skb->data_end);
void *data = ctx_ptr(skb->data);
__u32 *raw = data;
if (data + sizeof(struct ethhdr) > data_end)
return TC_ACT_SHOT;
return !raw[0] && !raw[1] && !raw[2] ? TC_ACT_SHOT : TC_ACT_OK;
}
static __always_inline int tc_redir(struct __sk_buff *skb)
{
struct bpf_fib_lookup fib_params = { .ifindex = skb->ingress_ifindex };
__u8 zero[ETH_ALEN * 2];
int ret = -1;
switch (skb->protocol) {
case __bpf_constant_htons(ETH_P_IP):
ret = fill_fib_params_v4(skb, &fib_params);
break;
case __bpf_constant_htons(ETH_P_IPV6):
ret = fill_fib_params_v6(skb, &fib_params);
break;
}
if (ret)
return TC_ACT_OK;
ret = bpf_fib_lookup(skb, &fib_params, sizeof(fib_params), 0);
if (ret == BPF_FIB_LKUP_RET_NOT_FWDED || ret < 0)
return TC_ACT_OK;
__builtin_memset(&zero, 0, sizeof(zero));
if (bpf_skb_store_bytes(skb, 0, &zero, sizeof(zero), 0) < 0)
return TC_ACT_SHOT;
if (ret == BPF_FIB_LKUP_RET_NO_NEIGH) {
struct bpf_redir_neigh nh_params = {};
nh_params.nh_family = fib_params.family;
__builtin_memcpy(&nh_params.ipv6_nh, &fib_params.ipv6_dst,
sizeof(nh_params.ipv6_nh));
return bpf_redirect_neigh(fib_params.ifindex, &nh_params,
sizeof(nh_params), 0);
} else if (ret == BPF_FIB_LKUP_RET_SUCCESS) {
void *data_end = ctx_ptr(skb->data_end);
struct ethhdr *eth = ctx_ptr(skb->data);
if (eth + 1 > data_end)
return TC_ACT_SHOT;
__builtin_memcpy(eth->h_dest, fib_params.dmac, ETH_ALEN);
__builtin_memcpy(eth->h_source, fib_params.smac, ETH_ALEN);
return bpf_redirect(fib_params.ifindex, 0);
}
return TC_ACT_SHOT;
}
/* these are identical, but keep them separate for compatibility with the
* section names expected by test_tc_redirect.sh
*/
SEC("tc")
int tc_dst(struct __sk_buff *skb)
{
return tc_redir(skb);
}
SEC("tc")
int tc_src(struct __sk_buff *skb)
{
return tc_redir(skb);
}
char __license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/test_tc_neigh_fib.c |
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2020 Facebook
#include <linux/bpf.h>
#include <stdint.h>
#include <stdbool.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_core_read.h>
char _license[] SEC("license") = "GPL";
struct {
char in[256];
char out[256];
bool skip;
} data = {};
enum named_enum {
NAMED_ENUM_VAL1 = 1,
NAMED_ENUM_VAL2 = 2,
NAMED_ENUM_VAL3 = 3,
};
typedef enum {
ANON_ENUM_VAL1 = 0x10,
ANON_ENUM_VAL2 = 0x20,
ANON_ENUM_VAL3 = 0x30,
} anon_enum;
struct core_reloc_enumval_output {
bool named_val1_exists;
bool named_val2_exists;
bool named_val3_exists;
bool anon_val1_exists;
bool anon_val2_exists;
bool anon_val3_exists;
int named_val1;
int named_val2;
int anon_val1;
int anon_val2;
};
SEC("raw_tracepoint/sys_enter")
int test_core_enumval(void *ctx)
{
#if __has_builtin(__builtin_preserve_enum_value)
struct core_reloc_enumval_output *out = (void *)&data.out;
enum named_enum named = 0;
anon_enum anon = 0;
out->named_val1_exists = bpf_core_enum_value_exists(named, NAMED_ENUM_VAL1);
out->named_val2_exists = bpf_core_enum_value_exists(enum named_enum, NAMED_ENUM_VAL2);
out->named_val3_exists = bpf_core_enum_value_exists(enum named_enum, NAMED_ENUM_VAL3);
out->anon_val1_exists = bpf_core_enum_value_exists(anon, ANON_ENUM_VAL1);
out->anon_val2_exists = bpf_core_enum_value_exists(anon_enum, ANON_ENUM_VAL2);
out->anon_val3_exists = bpf_core_enum_value_exists(anon_enum, ANON_ENUM_VAL3);
out->named_val1 = bpf_core_enum_value(named, NAMED_ENUM_VAL1);
out->named_val2 = bpf_core_enum_value(named, NAMED_ENUM_VAL2);
/* NAMED_ENUM_VAL3 value is optional */
out->anon_val1 = bpf_core_enum_value(anon, ANON_ENUM_VAL1);
out->anon_val2 = bpf_core_enum_value(anon, ANON_ENUM_VAL2);
/* ANON_ENUM_VAL3 value is optional */
#else
data.skip = true;
#endif
return 0;
}
| linux-master | tools/testing/selftests/bpf/progs/test_core_reloc_enumval.c |
#include "core_reloc_types.h"
void f(struct core_reloc_size x) {}
| linux-master | tools/testing/selftests/bpf/progs/btf__core_reloc_size.c |
// SPDX-License-Identifier: GPL-2.0
#include "vmlinux.h"
#include "bpf_tracing_net.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
char _license[] SEC("license") = "GPL";
extern bool CONFIG_SECURITY_SELINUX __kconfig __weak;
extern bool CONFIG_SECURITY_SMACK __kconfig __weak;
extern bool CONFIG_SECURITY_APPARMOR __kconfig __weak;
#ifndef AF_PACKET
#define AF_PACKET 17
#endif
#ifndef AF_UNIX
#define AF_UNIX 1
#endif
#ifndef EPERM
#define EPERM 1
#endif
struct {
__uint(type, BPF_MAP_TYPE_CGROUP_STORAGE);
__type(key, __u64);
__type(value, __u64);
} cgroup_storage SEC(".maps");
int called_socket_post_create;
int called_socket_post_create2;
int called_socket_bind;
int called_socket_bind2;
int called_socket_alloc;
int called_socket_clone;
static __always_inline int test_local_storage(void)
{
__u64 *val;
val = bpf_get_local_storage(&cgroup_storage, 0);
if (!val)
return 0;
*val += 1;
return 1;
}
static __always_inline int real_create(struct socket *sock, int family,
int protocol)
{
struct sock *sk;
int prio = 123;
/* Reject non-tx-only AF_PACKET. */
if (family == AF_PACKET && protocol != 0)
return 0; /* EPERM */
sk = sock->sk;
if (!sk)
return 1;
/* The rest of the sockets get default policy. */
if (bpf_setsockopt(sk, SOL_SOCKET, SO_PRIORITY, &prio, sizeof(prio)))
return 0; /* EPERM */
/* Make sure bpf_getsockopt is allowed and works. */
prio = 0;
if (bpf_getsockopt(sk, SOL_SOCKET, SO_PRIORITY, &prio, sizeof(prio)))
return 0; /* EPERM */
if (prio != 123)
return 0; /* EPERM */
/* Can access cgroup local storage. */
if (!test_local_storage())
return 0; /* EPERM */
return 1;
}
/* __cgroup_bpf_run_lsm_socket */
SEC("lsm_cgroup/socket_post_create")
int BPF_PROG(socket_post_create, struct socket *sock, int family,
int type, int protocol, int kern)
{
called_socket_post_create++;
return real_create(sock, family, protocol);
}
/* __cgroup_bpf_run_lsm_socket */
SEC("lsm_cgroup/socket_post_create")
int BPF_PROG(socket_post_create2, struct socket *sock, int family,
int type, int protocol, int kern)
{
called_socket_post_create2++;
return real_create(sock, family, protocol);
}
static __always_inline int real_bind(struct socket *sock,
struct sockaddr *address,
int addrlen)
{
struct sockaddr_ll sa = {};
if (sock->sk->__sk_common.skc_family != AF_PACKET)
return 1;
if (sock->sk->sk_kern_sock)
return 1;
bpf_probe_read_kernel(&sa, sizeof(sa), address);
if (sa.sll_protocol)
return 0; /* EPERM */
/* Can access cgroup local storage. */
if (!test_local_storage())
return 0; /* EPERM */
return 1;
}
/* __cgroup_bpf_run_lsm_socket */
SEC("lsm_cgroup/socket_bind")
int BPF_PROG(socket_bind, struct socket *sock, struct sockaddr *address,
int addrlen)
{
called_socket_bind++;
return real_bind(sock, address, addrlen);
}
/* __cgroup_bpf_run_lsm_socket */
SEC("lsm_cgroup/socket_bind")
int BPF_PROG(socket_bind2, struct socket *sock, struct sockaddr *address,
int addrlen)
{
called_socket_bind2++;
return real_bind(sock, address, addrlen);
}
/* __cgroup_bpf_run_lsm_current (via bpf_lsm_current_hooks) */
SEC("lsm_cgroup/sk_alloc_security")
int BPF_PROG(socket_alloc, struct sock *sk, int family, gfp_t priority)
{
called_socket_alloc++;
/* if already have non-bpf lsms installed, EPERM will cause memory leak of non-bpf lsms */
if (CONFIG_SECURITY_SELINUX || CONFIG_SECURITY_SMACK || CONFIG_SECURITY_APPARMOR)
return 1;
if (family == AF_UNIX)
return 0; /* EPERM */
/* Can access cgroup local storage. */
if (!test_local_storage())
return 0; /* EPERM */
return 1;
}
/* __cgroup_bpf_run_lsm_sock */
SEC("lsm_cgroup/inet_csk_clone")
int BPF_PROG(socket_clone, struct sock *newsk, const struct request_sock *req)
{
int prio = 234;
if (!newsk)
return 1;
/* Accepted request sockets get a different priority. */
if (bpf_setsockopt(newsk, SOL_SOCKET, SO_PRIORITY, &prio, sizeof(prio)))
return 1;
/* Make sure bpf_getsockopt is allowed and works. */
prio = 0;
if (bpf_getsockopt(newsk, SOL_SOCKET, SO_PRIORITY, &prio, sizeof(prio)))
return 1;
if (prio != 234)
return 1;
/* Can access cgroup local storage. */
if (!test_local_storage())
return 1;
called_socket_clone++;
return 1;
}
| linux-master | tools/testing/selftests/bpf/progs/lsm_cgroup.c |
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2019 Facebook
#include <stdint.h>
#include <string.h>
#include <linux/stddef.h>
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#ifndef ARRAY_SIZE
#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
#endif
/* tcp_mem sysctl has only 3 ints, but this test is doing TCP_MEM_LOOPS */
#define TCP_MEM_LOOPS 20 /* because 30 doesn't fit into 512 bytes of stack */
#define MAX_ULONG_STR_LEN 7
#define MAX_VALUE_STR_LEN (TCP_MEM_LOOPS * MAX_ULONG_STR_LEN)
const char tcp_mem_name[] = "net/ipv4/tcp_mem/very_very_very_very_long_pointless_string_to_stress_byte_loop";
static __attribute__((noinline)) int is_tcp_mem(struct bpf_sysctl *ctx)
{
unsigned char i;
char name[sizeof(tcp_mem_name)];
int ret;
memset(name, 0, sizeof(name));
ret = bpf_sysctl_get_name(ctx, name, sizeof(name), 0);
if (ret < 0 || ret != sizeof(tcp_mem_name) - 1)
return 0;
#pragma clang loop unroll(disable)
for (i = 0; i < sizeof(tcp_mem_name); ++i)
if (name[i] != tcp_mem_name[i])
return 0;
return 1;
}
SEC("cgroup/sysctl")
int sysctl_tcp_mem(struct bpf_sysctl *ctx)
{
unsigned long tcp_mem[TCP_MEM_LOOPS] = {};
char value[MAX_VALUE_STR_LEN];
unsigned char i, off = 0;
int ret;
if (ctx->write)
return 0;
if (!is_tcp_mem(ctx))
return 0;
ret = bpf_sysctl_get_current_value(ctx, value, MAX_VALUE_STR_LEN);
if (ret < 0 || ret >= MAX_VALUE_STR_LEN)
return 0;
#pragma clang loop unroll(disable)
for (i = 0; i < ARRAY_SIZE(tcp_mem); ++i) {
ret = bpf_strtoul(value + off, MAX_ULONG_STR_LEN, 0,
tcp_mem + i);
if (ret <= 0 || ret > MAX_ULONG_STR_LEN)
return 0;
off += ret & MAX_ULONG_STR_LEN;
}
return tcp_mem[0] < tcp_mem[1] && tcp_mem[1] < tcp_mem[2];
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/test_sysctl_loop2.c |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.