python_code
stringlengths 0
1.8M
| repo_name
stringclasses 7
values | file_path
stringlengths 5
99
|
---|---|---|
// SPDX-License-Identifier: GPL-2.0
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
char _license[] SEC("license") = "GPL";
SEC("kprobe.multi")
int test_override(struct pt_regs *ctx)
{
bpf_override_return(ctx, 123);
return 0;
}
| linux-master | tools/testing/selftests/bpf/progs/kprobe_multi_override.c |
// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
/*
* BTF-to-C dumper test for majority of C syntax quirks.
*
* Copyright (c) 2019 Facebook
*/
/* ----- START-EXPECTED-OUTPUT ----- */
enum e1 {
A = 0,
B = 1,
};
enum e2 {
C = 100,
D = 4294967295,
E = 0,
};
typedef enum e2 e2_t;
typedef enum {
F = 0,
G = 1,
H = 2,
} e3_t;
/* ----- START-EXPECTED-OUTPUT ----- */
/*
*enum e_byte {
* EBYTE_1 = 0,
* EBYTE_2 = 1,
*} __attribute__((mode(byte)));
*
*/
/* ----- END-EXPECTED-OUTPUT ----- */
enum e_byte {
EBYTE_1,
EBYTE_2,
} __attribute__((mode(byte)));
/* ----- START-EXPECTED-OUTPUT ----- */
/*
*enum e_word {
* EWORD_1 = 0LL,
* EWORD_2 = 1LL,
*} __attribute__((mode(word)));
*
*/
/* ----- END-EXPECTED-OUTPUT ----- */
enum e_word {
EWORD_1,
EWORD_2,
} __attribute__((mode(word))); /* force to use 8-byte backing for this enum */
/* ----- START-EXPECTED-OUTPUT ----- */
enum e_big {
EBIG_1 = 1000000000000ULL,
};
typedef int int_t;
typedef volatile const int * volatile const crazy_ptr_t;
typedef int *****we_need_to_go_deeper_ptr_t;
typedef volatile const we_need_to_go_deeper_ptr_t * restrict * volatile * const * restrict volatile * restrict const * volatile const * restrict volatile const how_about_this_ptr_t;
typedef int *ptr_arr_t[10];
typedef void (*fn_ptr1_t)(int);
typedef void (*printf_fn_t)(const char *, ...);
/* ------ END-EXPECTED-OUTPUT ------ */
/*
* While previous function pointers are pretty trivial (C-syntax-level
* trivial), the following are deciphered here for future generations:
*
* - `fn_ptr2_t`: function, taking anonymous struct as a first arg and pointer
* to a function, that takes int and returns int, as a second arg; returning
* a pointer to a const pointer to a char. Equivalent to:
* typedef struct { int a; } s_t;
* typedef int (*fn_t)(int);
* typedef char * const * (*fn_ptr2_t)(s_t, fn_t);
*
* - `fn_complex_t`: pointer to a function returning struct and accepting
* union and struct. All structs and enum are anonymous and defined inline.
*
* - `signal_t: pointer to a function accepting a pointer to a function as an
* argument and returning pointer to a function as a result. Sane equivalent:
* typedef void (*signal_handler_t)(int);
* typedef signal_handler_t (*signal_ptr_t)(int, signal_handler_t);
*
* - fn_ptr_arr1_t: array of pointers to a function accepting pointer to
* a pointer to an int and returning pointer to a char. Easy.
*
* - fn_ptr_arr2_t: array of const pointers to a function taking no arguments
* and returning a const pointer to a function, that takes pointer to a
* `int -> char *` function and returns pointer to a char. Equivalent:
* typedef char * (*fn_input_t)(int);
* typedef char * (*fn_output_outer_t)(fn_input_t);
* typedef const fn_output_outer_t (* fn_output_inner_t)();
* typedef const fn_output_inner_t fn_ptr_arr2_t[5];
*/
/* ----- START-EXPECTED-OUTPUT ----- */
typedef char * const * (*fn_ptr2_t)(struct {
int a;
}, int (*)(int));
typedef struct {
int a;
void (*b)(int, struct {
int c;
}, union {
char d;
int e[5];
});
} (*fn_complex_t)(union {
void *f;
char g[16];
}, struct {
int h;
});
typedef void (* (*signal_t)(int, void (*)(int)))(int);
typedef char * (*fn_ptr_arr1_t[10])(int **);
typedef char * (* (* const fn_ptr_arr2_t[5])())(char * (*)(int));
struct struct_w_typedefs {
int_t a;
crazy_ptr_t b;
we_need_to_go_deeper_ptr_t c;
how_about_this_ptr_t d;
ptr_arr_t e;
fn_ptr1_t f;
printf_fn_t g;
fn_ptr2_t h;
fn_complex_t i;
signal_t j;
fn_ptr_arr1_t k;
fn_ptr_arr2_t l;
};
typedef struct {
int x;
int y;
int z;
} anon_struct_t;
struct struct_fwd;
typedef struct struct_fwd struct_fwd_t;
typedef struct struct_fwd *struct_fwd_ptr_t;
union union_fwd;
typedef union union_fwd union_fwd_t;
typedef union union_fwd *union_fwd_ptr_t;
struct struct_empty {};
struct struct_simple {
int a;
char b;
const int_t *p;
struct struct_empty s;
enum e2 e;
enum {
ANON_VAL1 = 1,
ANON_VAL2 = 2,
} f;
int arr1[13];
enum e2 arr2[5];
};
union union_empty {};
union union_simple {
void *ptr;
int num;
int_t num2;
union union_empty u;
};
struct struct_in_struct {
struct struct_simple simple;
union union_simple also_simple;
struct {
int a;
} not_so_hard_as_well;
union {
int b;
int c;
} anon_union_is_good;
struct {
int d;
int e;
};
union {
int f;
int g;
};
};
struct struct_in_array {};
struct struct_in_array_typed {};
typedef struct struct_in_array_typed struct_in_array_t[2];
struct struct_with_embedded_stuff {
int a;
struct {
int b;
struct {
struct struct_with_embedded_stuff *c;
const char *d;
} e;
union {
volatile long f;
void * restrict g;
};
};
union {
const int_t *h;
void (*i)(char, int, void *);
} j;
enum {
K = 100,
L = 200,
} m;
char n[16];
struct {
char o;
int p;
void (*q)(int);
} r[5];
struct struct_in_struct s[10];
int t[11];
struct struct_in_array (*u)[2];
struct_in_array_t *v;
};
struct float_struct {
float f;
const double *d;
volatile long double *ld;
};
struct root_struct {
enum e1 _1;
enum e2 _2;
e2_t _2_1;
e3_t _2_2;
enum e_byte _100;
enum e_word _101;
enum e_big _102;
struct struct_w_typedefs _3;
anon_struct_t _7;
struct struct_fwd *_8;
struct_fwd_t *_9;
struct_fwd_ptr_t _10;
union union_fwd *_11;
union_fwd_t *_12;
union_fwd_ptr_t _13;
struct struct_with_embedded_stuff _14;
struct float_struct _15;
};
/* ------ END-EXPECTED-OUTPUT ------ */
int f(struct root_struct *s)
{
return 0;
}
| linux-master | tools/testing/selftests/bpf/progs/btf_dump_test_case_syntax.c |
// SPDX-License-Identifier: GPL-2.0
#include <string.h>
#include <linux/stddef.h>
#include <linux/bpf.h>
#include <linux/in.h>
#include <linux/in6.h>
#include <linux/if.h>
#include <errno.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_endian.h>
#define SERV4_IP 0xc0a801feU /* 192.168.1.254 */
#define SERV4_PORT 4040
#define SERV4_REWRITE_IP 0x7f000001U /* 127.0.0.1 */
#define SERV4_REWRITE_PORT 4444
#ifndef IFNAMSIZ
#define IFNAMSIZ 16
#endif
static __inline int bind_to_device(struct bpf_sock_addr *ctx)
{
char veth1[IFNAMSIZ] = "test_sock_addr1";
char veth2[IFNAMSIZ] = "test_sock_addr2";
char missing[IFNAMSIZ] = "nonexistent_dev";
char del_bind[IFNAMSIZ] = "";
int veth1_idx, veth2_idx;
if (bpf_setsockopt(ctx, SOL_SOCKET, SO_BINDTODEVICE,
&veth1, sizeof(veth1)))
return 1;
if (bpf_getsockopt(ctx, SOL_SOCKET, SO_BINDTOIFINDEX,
&veth1_idx, sizeof(veth1_idx)) || !veth1_idx)
return 1;
if (bpf_setsockopt(ctx, SOL_SOCKET, SO_BINDTODEVICE,
&veth2, sizeof(veth2)))
return 1;
if (bpf_getsockopt(ctx, SOL_SOCKET, SO_BINDTOIFINDEX,
&veth2_idx, sizeof(veth2_idx)) || !veth2_idx ||
veth1_idx == veth2_idx)
return 1;
if (bpf_setsockopt(ctx, SOL_SOCKET, SO_BINDTODEVICE,
&missing, sizeof(missing)) != -ENODEV)
return 1;
if (bpf_setsockopt(ctx, SOL_SOCKET, SO_BINDTOIFINDEX,
&veth1_idx, sizeof(veth1_idx)))
return 1;
if (bpf_setsockopt(ctx, SOL_SOCKET, SO_BINDTODEVICE,
&del_bind, sizeof(del_bind)))
return 1;
return 0;
}
static __inline int bind_reuseport(struct bpf_sock_addr *ctx)
{
int val = 1;
if (bpf_setsockopt(ctx, SOL_SOCKET, SO_REUSEPORT,
&val, sizeof(val)))
return 1;
if (bpf_getsockopt(ctx, SOL_SOCKET, SO_REUSEPORT,
&val, sizeof(val)) || !val)
return 1;
val = 0;
if (bpf_setsockopt(ctx, SOL_SOCKET, SO_REUSEPORT,
&val, sizeof(val)))
return 1;
if (bpf_getsockopt(ctx, SOL_SOCKET, SO_REUSEPORT,
&val, sizeof(val)) || val)
return 1;
return 0;
}
static __inline int misc_opts(struct bpf_sock_addr *ctx, int opt)
{
int old, tmp, new = 0xeb9f;
/* Socket in test case has guarantee that old never equals to new. */
if (bpf_getsockopt(ctx, SOL_SOCKET, opt, &old, sizeof(old)) ||
old == new)
return 1;
if (bpf_setsockopt(ctx, SOL_SOCKET, opt, &new, sizeof(new)))
return 1;
if (bpf_getsockopt(ctx, SOL_SOCKET, opt, &tmp, sizeof(tmp)) ||
tmp != new)
return 1;
if (bpf_setsockopt(ctx, SOL_SOCKET, opt, &old, sizeof(old)))
return 1;
return 0;
}
SEC("cgroup/bind4")
int bind_v4_prog(struct bpf_sock_addr *ctx)
{
struct bpf_sock *sk;
__u32 user_ip4;
__u16 user_port;
sk = ctx->sk;
if (!sk)
return 0;
if (sk->family != AF_INET)
return 0;
if (ctx->type != SOCK_STREAM && ctx->type != SOCK_DGRAM)
return 0;
if (ctx->user_ip4 != bpf_htonl(SERV4_IP) ||
ctx->user_port != bpf_htons(SERV4_PORT))
return 0;
// u8 narrow loads:
user_ip4 = 0;
user_ip4 |= ((volatile __u8 *)&ctx->user_ip4)[0] << 0;
user_ip4 |= ((volatile __u8 *)&ctx->user_ip4)[1] << 8;
user_ip4 |= ((volatile __u8 *)&ctx->user_ip4)[2] << 16;
user_ip4 |= ((volatile __u8 *)&ctx->user_ip4)[3] << 24;
if (ctx->user_ip4 != user_ip4)
return 0;
user_port = 0;
user_port |= ((volatile __u8 *)&ctx->user_port)[0] << 0;
user_port |= ((volatile __u8 *)&ctx->user_port)[1] << 8;
if (ctx->user_port != user_port)
return 0;
// u16 narrow loads:
user_ip4 = 0;
user_ip4 |= ((volatile __u16 *)&ctx->user_ip4)[0] << 0;
user_ip4 |= ((volatile __u16 *)&ctx->user_ip4)[1] << 16;
if (ctx->user_ip4 != user_ip4)
return 0;
/* Bind to device and unbind it. */
if (bind_to_device(ctx))
return 0;
/* Test for misc socket options. */
if (misc_opts(ctx, SO_MARK) || misc_opts(ctx, SO_PRIORITY))
return 0;
/* Set reuseport and unset */
if (bind_reuseport(ctx))
return 0;
ctx->user_ip4 = bpf_htonl(SERV4_REWRITE_IP);
ctx->user_port = bpf_htons(SERV4_REWRITE_PORT);
return 1;
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/bind4_prog.c |
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2019 Facebook
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
char _license[] SEC("license") = "GPL";
SEC("socket")
int combinations(volatile struct __sk_buff* skb)
{
int ret = 0, i;
#pragma nounroll
for (i = 0; i < 20; i++)
if (skb->len)
ret |= 1 << i;
return ret;
}
| linux-master | tools/testing/selftests/bpf/progs/loop4.c |
#include "core_reloc_types.h"
void f(struct core_reloc_primitives___diff_ptr_type x) {}
| linux-master | tools/testing/selftests/bpf/progs/btf__core_reloc_primitives___diff_ptr_type.c |
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2022 Meta
#include <stddef.h>
#include <stdint.h>
#include <stdbool.h>
#include <linux/bpf.h>
#include <linux/stddef.h>
#include <linux/pkt_cls.h>
#include <linux/if_ether.h>
#include <linux/in.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <linux/tcp.h>
#include <linux/udp.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_endian.h>
/* veth_src --- veth_src_fwd --- veth_det_fwd --- veth_dst
* | |
* ns_src | ns_fwd | ns_dst
*
* ns_src and ns_dst: ENDHOST namespace
* ns_fwd: Fowarding namespace
*/
#define ctx_ptr(field) (void *)(long)(field)
#define ip4_src __bpf_htonl(0xac100164) /* 172.16.1.100 */
#define ip4_dst __bpf_htonl(0xac100264) /* 172.16.2.100 */
#define ip6_src { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, \
0x00, 0x01, 0xde, 0xad, 0xbe, 0xef, 0xca, 0xfe }
#define ip6_dst { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, \
0x00, 0x02, 0xde, 0xad, 0xbe, 0xef, 0xca, 0xfe }
#define v6_equal(a, b) (a.s6_addr32[0] == b.s6_addr32[0] && \
a.s6_addr32[1] == b.s6_addr32[1] && \
a.s6_addr32[2] == b.s6_addr32[2] && \
a.s6_addr32[3] == b.s6_addr32[3])
volatile const __u32 IFINDEX_SRC;
volatile const __u32 IFINDEX_DST;
#define EGRESS_ENDHOST_MAGIC 0x0b9fbeef
#define INGRESS_FWDNS_MAGIC 0x1b9fbeef
#define EGRESS_FWDNS_MAGIC 0x2b9fbeef
enum {
INGRESS_FWDNS_P100,
INGRESS_FWDNS_P101,
EGRESS_FWDNS_P100,
EGRESS_FWDNS_P101,
INGRESS_ENDHOST,
EGRESS_ENDHOST,
SET_DTIME,
__MAX_CNT,
};
enum {
TCP_IP6_CLEAR_DTIME,
TCP_IP4,
TCP_IP6,
UDP_IP4,
UDP_IP6,
TCP_IP4_RT_FWD,
TCP_IP6_RT_FWD,
UDP_IP4_RT_FWD,
UDP_IP6_RT_FWD,
UKN_TEST,
__NR_TESTS,
};
enum {
SRC_NS = 1,
DST_NS,
};
__u32 dtimes[__NR_TESTS][__MAX_CNT] = {};
__u32 errs[__NR_TESTS][__MAX_CNT] = {};
__u32 test = 0;
static void inc_dtimes(__u32 idx)
{
if (test < __NR_TESTS)
dtimes[test][idx]++;
else
dtimes[UKN_TEST][idx]++;
}
static void inc_errs(__u32 idx)
{
if (test < __NR_TESTS)
errs[test][idx]++;
else
errs[UKN_TEST][idx]++;
}
static int skb_proto(int type)
{
return type & 0xff;
}
static int skb_ns(int type)
{
return (type >> 8) & 0xff;
}
static bool fwdns_clear_dtime(void)
{
return test == TCP_IP6_CLEAR_DTIME;
}
static bool bpf_fwd(void)
{
return test < TCP_IP4_RT_FWD;
}
static __u8 get_proto(void)
{
switch (test) {
case UDP_IP4:
case UDP_IP6:
case UDP_IP4_RT_FWD:
case UDP_IP6_RT_FWD:
return IPPROTO_UDP;
default:
return IPPROTO_TCP;
}
}
/* -1: parse error: TC_ACT_SHOT
* 0: not testing traffic: TC_ACT_OK
* >0: first byte is the inet_proto, second byte has the netns
* of the sender
*/
static int skb_get_type(struct __sk_buff *skb)
{
__u16 dst_ns_port = __bpf_htons(50000 + test);
void *data_end = ctx_ptr(skb->data_end);
void *data = ctx_ptr(skb->data);
__u8 inet_proto = 0, ns = 0;
struct ipv6hdr *ip6h;
__u16 sport, dport;
struct iphdr *iph;
struct tcphdr *th;
struct udphdr *uh;
void *trans;
switch (skb->protocol) {
case __bpf_htons(ETH_P_IP):
iph = data + sizeof(struct ethhdr);
if (iph + 1 > data_end)
return -1;
if (iph->saddr == ip4_src)
ns = SRC_NS;
else if (iph->saddr == ip4_dst)
ns = DST_NS;
inet_proto = iph->protocol;
trans = iph + 1;
break;
case __bpf_htons(ETH_P_IPV6):
ip6h = data + sizeof(struct ethhdr);
if (ip6h + 1 > data_end)
return -1;
if (v6_equal(ip6h->saddr, (struct in6_addr){{ip6_src}}))
ns = SRC_NS;
else if (v6_equal(ip6h->saddr, (struct in6_addr){{ip6_dst}}))
ns = DST_NS;
inet_proto = ip6h->nexthdr;
trans = ip6h + 1;
break;
default:
return 0;
}
/* skb is not from src_ns or dst_ns.
* skb is not the testing IPPROTO.
*/
if (!ns || inet_proto != get_proto())
return 0;
switch (inet_proto) {
case IPPROTO_TCP:
th = trans;
if (th + 1 > data_end)
return -1;
sport = th->source;
dport = th->dest;
break;
case IPPROTO_UDP:
uh = trans;
if (uh + 1 > data_end)
return -1;
sport = uh->source;
dport = uh->dest;
break;
default:
return 0;
}
/* The skb is the testing traffic */
if ((ns == SRC_NS && dport == dst_ns_port) ||
(ns == DST_NS && sport == dst_ns_port))
return (ns << 8 | inet_proto);
return 0;
}
/* format: direction@iface@netns
* egress@veth_(src|dst)@ns_(src|dst)
*/
SEC("tc")
int egress_host(struct __sk_buff *skb)
{
int skb_type;
skb_type = skb_get_type(skb);
if (skb_type == -1)
return TC_ACT_SHOT;
if (!skb_type)
return TC_ACT_OK;
if (skb_proto(skb_type) == IPPROTO_TCP) {
if (skb->tstamp_type == BPF_SKB_TSTAMP_DELIVERY_MONO &&
skb->tstamp)
inc_dtimes(EGRESS_ENDHOST);
else
inc_errs(EGRESS_ENDHOST);
} else {
if (skb->tstamp_type == BPF_SKB_TSTAMP_UNSPEC &&
skb->tstamp)
inc_dtimes(EGRESS_ENDHOST);
else
inc_errs(EGRESS_ENDHOST);
}
skb->tstamp = EGRESS_ENDHOST_MAGIC;
return TC_ACT_OK;
}
/* ingress@veth_(src|dst)@ns_(src|dst) */
SEC("tc")
int ingress_host(struct __sk_buff *skb)
{
int skb_type;
skb_type = skb_get_type(skb);
if (skb_type == -1)
return TC_ACT_SHOT;
if (!skb_type)
return TC_ACT_OK;
if (skb->tstamp_type == BPF_SKB_TSTAMP_DELIVERY_MONO &&
skb->tstamp == EGRESS_FWDNS_MAGIC)
inc_dtimes(INGRESS_ENDHOST);
else
inc_errs(INGRESS_ENDHOST);
return TC_ACT_OK;
}
/* ingress@veth_(src|dst)_fwd@ns_fwd priority 100 */
SEC("tc")
int ingress_fwdns_prio100(struct __sk_buff *skb)
{
int skb_type;
skb_type = skb_get_type(skb);
if (skb_type == -1)
return TC_ACT_SHOT;
if (!skb_type)
return TC_ACT_OK;
/* delivery_time is only available to the ingress
* if the tc-bpf checks the skb->tstamp_type.
*/
if (skb->tstamp == EGRESS_ENDHOST_MAGIC)
inc_errs(INGRESS_FWDNS_P100);
if (fwdns_clear_dtime())
skb->tstamp = 0;
return TC_ACT_UNSPEC;
}
/* egress@veth_(src|dst)_fwd@ns_fwd priority 100 */
SEC("tc")
int egress_fwdns_prio100(struct __sk_buff *skb)
{
int skb_type;
skb_type = skb_get_type(skb);
if (skb_type == -1)
return TC_ACT_SHOT;
if (!skb_type)
return TC_ACT_OK;
/* delivery_time is always available to egress even
* the tc-bpf did not use the tstamp_type.
*/
if (skb->tstamp == INGRESS_FWDNS_MAGIC)
inc_dtimes(EGRESS_FWDNS_P100);
else
inc_errs(EGRESS_FWDNS_P100);
if (fwdns_clear_dtime())
skb->tstamp = 0;
return TC_ACT_UNSPEC;
}
/* ingress@veth_(src|dst)_fwd@ns_fwd priority 101 */
SEC("tc")
int ingress_fwdns_prio101(struct __sk_buff *skb)
{
__u64 expected_dtime = EGRESS_ENDHOST_MAGIC;
int skb_type;
skb_type = skb_get_type(skb);
if (skb_type == -1 || !skb_type)
/* Should have handled in prio100 */
return TC_ACT_SHOT;
if (skb_proto(skb_type) == IPPROTO_UDP)
expected_dtime = 0;
if (skb->tstamp_type) {
if (fwdns_clear_dtime() ||
skb->tstamp_type != BPF_SKB_TSTAMP_DELIVERY_MONO ||
skb->tstamp != expected_dtime)
inc_errs(INGRESS_FWDNS_P101);
else
inc_dtimes(INGRESS_FWDNS_P101);
} else {
if (!fwdns_clear_dtime() && expected_dtime)
inc_errs(INGRESS_FWDNS_P101);
}
if (skb->tstamp_type == BPF_SKB_TSTAMP_DELIVERY_MONO) {
skb->tstamp = INGRESS_FWDNS_MAGIC;
} else {
if (bpf_skb_set_tstamp(skb, INGRESS_FWDNS_MAGIC,
BPF_SKB_TSTAMP_DELIVERY_MONO))
inc_errs(SET_DTIME);
if (!bpf_skb_set_tstamp(skb, INGRESS_FWDNS_MAGIC,
BPF_SKB_TSTAMP_UNSPEC))
inc_errs(SET_DTIME);
}
if (skb_ns(skb_type) == SRC_NS)
return bpf_fwd() ?
bpf_redirect_neigh(IFINDEX_DST, NULL, 0, 0) : TC_ACT_OK;
else
return bpf_fwd() ?
bpf_redirect_neigh(IFINDEX_SRC, NULL, 0, 0) : TC_ACT_OK;
}
/* egress@veth_(src|dst)_fwd@ns_fwd priority 101 */
SEC("tc")
int egress_fwdns_prio101(struct __sk_buff *skb)
{
int skb_type;
skb_type = skb_get_type(skb);
if (skb_type == -1 || !skb_type)
/* Should have handled in prio100 */
return TC_ACT_SHOT;
if (skb->tstamp_type) {
if (fwdns_clear_dtime() ||
skb->tstamp_type != BPF_SKB_TSTAMP_DELIVERY_MONO ||
skb->tstamp != INGRESS_FWDNS_MAGIC)
inc_errs(EGRESS_FWDNS_P101);
else
inc_dtimes(EGRESS_FWDNS_P101);
} else {
if (!fwdns_clear_dtime())
inc_errs(EGRESS_FWDNS_P101);
}
if (skb->tstamp_type == BPF_SKB_TSTAMP_DELIVERY_MONO) {
skb->tstamp = EGRESS_FWDNS_MAGIC;
} else {
if (bpf_skb_set_tstamp(skb, EGRESS_FWDNS_MAGIC,
BPF_SKB_TSTAMP_DELIVERY_MONO))
inc_errs(SET_DTIME);
if (!bpf_skb_set_tstamp(skb, INGRESS_FWDNS_MAGIC,
BPF_SKB_TSTAMP_UNSPEC))
inc_errs(SET_DTIME);
}
return TC_ACT_OK;
}
char __license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/test_tc_dtime.c |
// SPDX-License-Identifier: GPL-2.0
/* Converted from tools/testing/selftests/bpf/verifier/prevent_map_lookup.c */
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include "bpf_misc.h"
struct {
__uint(type, BPF_MAP_TYPE_STACK_TRACE);
__uint(max_entries, 1);
__type(key, __u32);
__type(value, __u64);
} map_stacktrace SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_PROG_ARRAY);
__uint(max_entries, 8);
__uint(key_size, sizeof(int));
__array(values, void (void));
} map_prog2_socket SEC(".maps");
SEC("perf_event")
__description("prevent map lookup in stack trace")
__failure __msg("cannot pass map_type 7 into func bpf_map_lookup_elem")
__naked void map_lookup_in_stack_trace(void)
{
asm volatile (" \
r1 = 0; \
*(u64*)(r10 - 8) = r1; \
r2 = r10; \
r2 += -8; \
r1 = %[map_stacktrace] ll; \
call %[bpf_map_lookup_elem]; \
exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm_addr(map_stacktrace)
: __clobber_all);
}
SEC("socket")
__description("prevent map lookup in prog array")
__failure __msg("cannot pass map_type 3 into func bpf_map_lookup_elem")
__failure_unpriv
__naked void map_lookup_in_prog_array(void)
{
asm volatile (" \
r1 = 0; \
*(u64*)(r10 - 8) = r1; \
r2 = r10; \
r2 += -8; \
r1 = %[map_prog2_socket] ll; \
call %[bpf_map_lookup_elem]; \
exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm_addr(map_prog2_socket)
: __clobber_all);
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/verifier_prevent_map_lookup.c |
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2019 Facebook
#include <stddef.h>
#include <linux/ptrace.h>
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
int valid = 0;
int required_size_out = 0;
int written_stack_out = 0;
int written_global_out = 0;
struct {
__u64 _a;
__u64 _b;
__u64 _c;
} fpbe[30] = {0};
SEC("perf_event")
int perf_branches(void *ctx)
{
__u64 entries[4 * 3] = {0};
int required_size, written_stack, written_global;
/* write to stack */
written_stack = bpf_read_branch_records(ctx, entries, sizeof(entries), 0);
/* ignore spurious events */
if (!written_stack)
return 1;
/* get required size */
required_size = bpf_read_branch_records(ctx, NULL, 0,
BPF_F_GET_BRANCH_RECORDS_SIZE);
written_global = bpf_read_branch_records(ctx, fpbe, sizeof(fpbe), 0);
/* ignore spurious events */
if (!written_global)
return 1;
required_size_out = required_size;
written_stack_out = written_stack;
written_global_out = written_global;
valid = 1;
return 0;
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/test_perf_branches.c |
#include "core_reloc_types.h"
void f(struct core_reloc_enumval___err_missing x) {}
| linux-master | tools/testing/selftests/bpf/progs/btf__core_reloc_enumval___err_missing.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
struct {
__uint(type, BPF_MAP_TYPE_PROG_ARRAY);
__uint(max_entries, 5);
__uint(key_size, sizeof(__u32));
__uint(value_size, sizeof(__u32));
} jmp_table SEC(".maps");
SEC("tc")
int classifier_0(struct __sk_buff *skb)
{
bpf_tail_call_static(skb, &jmp_table, 1);
return 0;
}
SEC("tc")
int classifier_1(struct __sk_buff *skb)
{
bpf_tail_call_static(skb, &jmp_table, 2);
return 1;
}
SEC("tc")
int classifier_2(struct __sk_buff *skb)
{
return 2;
}
SEC("tc")
int classifier_3(struct __sk_buff *skb)
{
bpf_tail_call_static(skb, &jmp_table, 4);
return 3;
}
SEC("tc")
int classifier_4(struct __sk_buff *skb)
{
bpf_tail_call_static(skb, &jmp_table, 3);
return 4;
}
SEC("tc")
int entry(struct __sk_buff *skb)
{
bpf_tail_call_static(skb, &jmp_table, 0);
/* Check multi-prog update. */
bpf_tail_call_static(skb, &jmp_table, 2);
/* Check tail call limit. */
bpf_tail_call_static(skb, &jmp_table, 3);
return 3;
}
char __license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/tailcall2.c |
// SPDX-License-Identifier: GPL-2.0
/* Converted from tools/testing/selftests/bpf/verifier/bounds_deduction.c */
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include "bpf_misc.h"
SEC("socket")
__description("check deducing bounds from const, 1")
__failure __msg("R0 tried to subtract pointer from scalar")
__msg_unpriv("R1 has pointer with unsupported alu operation")
__naked void deducing_bounds_from_const_1(void)
{
asm volatile (" \
r0 = 1; \
if r0 s>= 1 goto l0_%=; \
l0_%=: r0 -= r1; \
exit; \
" ::: __clobber_all);
}
SEC("socket")
__description("check deducing bounds from const, 2")
__success __failure_unpriv
__msg_unpriv("R1 has pointer with unsupported alu operation")
__retval(1)
__naked void deducing_bounds_from_const_2(void)
{
asm volatile (" \
r0 = 1; \
if r0 s>= 1 goto l0_%=; \
exit; \
l0_%=: if r0 s<= 1 goto l1_%=; \
exit; \
l1_%=: r1 -= r0; \
exit; \
" ::: __clobber_all);
}
SEC("socket")
__description("check deducing bounds from const, 3")
__failure __msg("R0 tried to subtract pointer from scalar")
__msg_unpriv("R1 has pointer with unsupported alu operation")
__naked void deducing_bounds_from_const_3(void)
{
asm volatile (" \
r0 = 0; \
if r0 s<= 0 goto l0_%=; \
l0_%=: r0 -= r1; \
exit; \
" ::: __clobber_all);
}
SEC("socket")
__description("check deducing bounds from const, 4")
__success __failure_unpriv
__msg_unpriv("R6 has pointer with unsupported alu operation")
__retval(0)
__naked void deducing_bounds_from_const_4(void)
{
asm volatile (" \
r6 = r1; \
r0 = 0; \
if r0 s<= 0 goto l0_%=; \
exit; \
l0_%=: if r0 s>= 0 goto l1_%=; \
exit; \
l1_%=: r6 -= r0; \
exit; \
" ::: __clobber_all);
}
SEC("socket")
__description("check deducing bounds from const, 5")
__failure __msg("R0 tried to subtract pointer from scalar")
__msg_unpriv("R1 has pointer with unsupported alu operation")
__naked void deducing_bounds_from_const_5(void)
{
asm volatile (" \
r0 = 0; \
if r0 s>= 1 goto l0_%=; \
r0 -= r1; \
l0_%=: exit; \
" ::: __clobber_all);
}
SEC("socket")
__description("check deducing bounds from const, 6")
__failure __msg("R0 tried to subtract pointer from scalar")
__msg_unpriv("R1 has pointer with unsupported alu operation")
__naked void deducing_bounds_from_const_6(void)
{
asm volatile (" \
r0 = 0; \
if r0 s>= 0 goto l0_%=; \
exit; \
l0_%=: r0 -= r1; \
exit; \
" ::: __clobber_all);
}
SEC("socket")
__description("check deducing bounds from const, 7")
__failure __msg("dereference of modified ctx ptr")
__msg_unpriv("R1 has pointer with unsupported alu operation")
__flag(BPF_F_ANY_ALIGNMENT)
__naked void deducing_bounds_from_const_7(void)
{
asm volatile (" \
r0 = %[__imm_0]; \
if r0 s>= 0 goto l0_%=; \
l0_%=: r1 -= r0; \
r0 = *(u32*)(r1 + %[__sk_buff_mark]); \
exit; \
" :
: __imm_const(__imm_0, ~0),
__imm_const(__sk_buff_mark, offsetof(struct __sk_buff, mark))
: __clobber_all);
}
SEC("socket")
__description("check deducing bounds from const, 8")
__failure __msg("negative offset ctx ptr R1 off=-1 disallowed")
__msg_unpriv("R1 has pointer with unsupported alu operation")
__flag(BPF_F_ANY_ALIGNMENT)
__naked void deducing_bounds_from_const_8(void)
{
asm volatile (" \
r0 = %[__imm_0]; \
if r0 s>= 0 goto l0_%=; \
r1 += r0; \
l0_%=: r0 = *(u32*)(r1 + %[__sk_buff_mark]); \
exit; \
" :
: __imm_const(__imm_0, ~0),
__imm_const(__sk_buff_mark, offsetof(struct __sk_buff, mark))
: __clobber_all);
}
SEC("socket")
__description("check deducing bounds from const, 9")
__failure __msg("R0 tried to subtract pointer from scalar")
__msg_unpriv("R1 has pointer with unsupported alu operation")
__naked void deducing_bounds_from_const_9(void)
{
asm volatile (" \
r0 = 0; \
if r0 s>= 0 goto l0_%=; \
l0_%=: r0 -= r1; \
exit; \
" ::: __clobber_all);
}
SEC("socket")
__description("check deducing bounds from const, 10")
__failure
__msg("math between ctx pointer and register with unbounded min value is not allowed")
__failure_unpriv
__naked void deducing_bounds_from_const_10(void)
{
asm volatile (" \
r0 = 0; \
if r0 s<= 0 goto l0_%=; \
l0_%=: /* Marks reg as unknown. */ \
r0 = -r0; \
r0 -= r1; \
exit; \
" ::: __clobber_all);
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/verifier_bounds_deduction.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
#include <vmlinux.h>
#include <bpf/bpf_tracing.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_core_read.h>
#include "bpf_experimental.h"
/* BTF load should fail as bpf_rb_root __contains this type and points to
* 'node', but 'node' is not a bpf_rb_node
*/
struct node_data {
int key;
int data;
struct bpf_list_node node;
};
#define private(name) SEC(".data." #name) __hidden __attribute__((aligned(8)))
private(A) struct bpf_spin_lock glock;
private(A) struct bpf_rb_root groot __contains(node_data, node);
SEC("tc")
long rbtree_api_add__wrong_node_type(void *ctx)
{
struct node_data *n;
n = bpf_obj_new(typeof(*n));
if (!n)
return 1;
bpf_spin_lock(&glock);
bpf_rbtree_first(&groot);
bpf_spin_unlock(&glock);
return 0;
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/rbtree_btf_fail__wrong_node_type.c |
// SPDX-License-Identifier: GPL-2.0
/* Converted from tools/testing/selftests/bpf/verifier/uninit.c */
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include "../../../include/linux/filter.h"
#include "bpf_misc.h"
SEC("socket")
__description("read uninitialized register")
__failure __msg("R2 !read_ok")
__failure_unpriv
__naked void read_uninitialized_register(void)
{
asm volatile (" \
r0 = r2; \
exit; \
" ::: __clobber_all);
}
SEC("socket")
__description("read invalid register")
__failure __msg("R15 is invalid")
__failure_unpriv
__naked void read_invalid_register(void)
{
asm volatile (" \
.8byte %[mov64_reg]; \
exit; \
" :
: __imm_insn(mov64_reg, BPF_MOV64_REG(BPF_REG_0, -1))
: __clobber_all);
}
SEC("socket")
__description("program doesn't init R0 before exit")
__failure __msg("R0 !read_ok")
__failure_unpriv
__naked void t_init_r0_before_exit(void)
{
asm volatile (" \
r2 = r1; \
exit; \
" ::: __clobber_all);
}
SEC("socket")
__description("program doesn't init R0 before exit in all branches")
__failure __msg("R0 !read_ok")
__msg_unpriv("R1 pointer comparison")
__naked void before_exit_in_all_branches(void)
{
asm volatile (" \
if r1 >= 0 goto l0_%=; \
r0 = 1; \
r0 += 2; \
l0_%=: exit; \
" ::: __clobber_all);
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/verifier_uninit.c |
// SPDX-License-Identifier: GPL-2.0
/* Converted from tools/testing/selftests/bpf/verifier/lwt.c */
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include "bpf_misc.h"
SEC("lwt_in")
__description("invalid direct packet write for LWT_IN")
__failure __msg("cannot write into packet")
__naked void packet_write_for_lwt_in(void)
{
asm volatile (" \
r2 = *(u32*)(r1 + %[__sk_buff_data]); \
r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \
r0 = r2; \
r0 += 8; \
if r0 > r3 goto l0_%=; \
*(u8*)(r2 + 0) = r2; \
l0_%=: r0 = 0; \
exit; \
" :
: __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
__imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
: __clobber_all);
}
SEC("lwt_out")
__description("invalid direct packet write for LWT_OUT")
__failure __msg("cannot write into packet")
__naked void packet_write_for_lwt_out(void)
{
asm volatile (" \
r2 = *(u32*)(r1 + %[__sk_buff_data]); \
r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \
r0 = r2; \
r0 += 8; \
if r0 > r3 goto l0_%=; \
*(u8*)(r2 + 0) = r2; \
l0_%=: r0 = 0; \
exit; \
" :
: __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
__imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
: __clobber_all);
}
SEC("lwt_xmit")
__description("direct packet write for LWT_XMIT")
__success __retval(0)
__naked void packet_write_for_lwt_xmit(void)
{
asm volatile (" \
r2 = *(u32*)(r1 + %[__sk_buff_data]); \
r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \
r0 = r2; \
r0 += 8; \
if r0 > r3 goto l0_%=; \
*(u8*)(r2 + 0) = r2; \
l0_%=: r0 = 0; \
exit; \
" :
: __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
__imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
: __clobber_all);
}
SEC("lwt_in")
__description("direct packet read for LWT_IN")
__success __retval(0)
__naked void packet_read_for_lwt_in(void)
{
asm volatile (" \
r2 = *(u32*)(r1 + %[__sk_buff_data]); \
r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \
r0 = r2; \
r0 += 8; \
if r0 > r3 goto l0_%=; \
r0 = *(u8*)(r2 + 0); \
l0_%=: r0 = 0; \
exit; \
" :
: __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
__imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
: __clobber_all);
}
SEC("lwt_out")
__description("direct packet read for LWT_OUT")
__success __retval(0)
__naked void packet_read_for_lwt_out(void)
{
asm volatile (" \
r2 = *(u32*)(r1 + %[__sk_buff_data]); \
r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \
r0 = r2; \
r0 += 8; \
if r0 > r3 goto l0_%=; \
r0 = *(u8*)(r2 + 0); \
l0_%=: r0 = 0; \
exit; \
" :
: __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
__imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
: __clobber_all);
}
SEC("lwt_xmit")
__description("direct packet read for LWT_XMIT")
__success __retval(0)
__naked void packet_read_for_lwt_xmit(void)
{
asm volatile (" \
r2 = *(u32*)(r1 + %[__sk_buff_data]); \
r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \
r0 = r2; \
r0 += 8; \
if r0 > r3 goto l0_%=; \
r0 = *(u8*)(r2 + 0); \
l0_%=: r0 = 0; \
exit; \
" :
: __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
__imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
: __clobber_all);
}
SEC("lwt_xmit")
__description("overlapping checks for direct packet access")
__success __retval(0)
__naked void checks_for_direct_packet_access(void)
{
asm volatile (" \
r2 = *(u32*)(r1 + %[__sk_buff_data]); \
r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \
r0 = r2; \
r0 += 8; \
if r0 > r3 goto l0_%=; \
r1 = r2; \
r1 += 6; \
if r1 > r3 goto l0_%=; \
r0 = *(u16*)(r2 + 6); \
l0_%=: r0 = 0; \
exit; \
" :
: __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
__imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
: __clobber_all);
}
SEC("lwt_xmit")
__description("make headroom for LWT_XMIT")
__success __retval(0)
__naked void make_headroom_for_lwt_xmit(void)
{
asm volatile (" \
r6 = r1; \
r2 = 34; \
r3 = 0; \
call %[bpf_skb_change_head]; \
/* split for s390 to succeed */ \
r1 = r6; \
r2 = 42; \
r3 = 0; \
call %[bpf_skb_change_head]; \
r0 = 0; \
exit; \
" :
: __imm(bpf_skb_change_head)
: __clobber_all);
}
SEC("socket")
__description("invalid access of tc_classid for LWT_IN")
__failure __msg("invalid bpf_context access")
__failure_unpriv
__naked void tc_classid_for_lwt_in(void)
{
asm volatile (" \
r0 = *(u32*)(r1 + %[__sk_buff_tc_classid]); \
exit; \
" :
: __imm_const(__sk_buff_tc_classid, offsetof(struct __sk_buff, tc_classid))
: __clobber_all);
}
SEC("socket")
__description("invalid access of tc_classid for LWT_OUT")
__failure __msg("invalid bpf_context access")
__failure_unpriv
__naked void tc_classid_for_lwt_out(void)
{
asm volatile (" \
r0 = *(u32*)(r1 + %[__sk_buff_tc_classid]); \
exit; \
" :
: __imm_const(__sk_buff_tc_classid, offsetof(struct __sk_buff, tc_classid))
: __clobber_all);
}
SEC("socket")
__description("invalid access of tc_classid for LWT_XMIT")
__failure __msg("invalid bpf_context access")
__failure_unpriv
__naked void tc_classid_for_lwt_xmit(void)
{
asm volatile (" \
r0 = *(u32*)(r1 + %[__sk_buff_tc_classid]); \
exit; \
" :
: __imm_const(__sk_buff_tc_classid, offsetof(struct __sk_buff, tc_classid))
: __clobber_all);
}
SEC("lwt_in")
__description("check skb->tc_classid half load not permitted for lwt prog")
__failure __msg("invalid bpf_context access")
__naked void not_permitted_for_lwt_prog(void)
{
asm volatile (
"r0 = 0;"
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
"r0 = *(u16*)(r1 + %[__sk_buff_tc_classid]);"
#else
"r0 = *(u16*)(r1 + %[__imm_0]);"
#endif
"exit;"
:
: __imm_const(__imm_0, offsetof(struct __sk_buff, tc_classid) + 2),
__imm_const(__sk_buff_tc_classid, offsetof(struct __sk_buff, tc_classid))
: __clobber_all);
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/verifier_lwt.c |
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2019 Cloudflare Ltd.
// Copyright (c) 2020 Isovalent, Inc.
#include <stddef.h>
#include <stdbool.h>
#include <string.h>
#include <linux/bpf.h>
#include <linux/if_ether.h>
#include <linux/in.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <linux/pkt_cls.h>
#include <linux/tcp.h>
#include <sys/socket.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_endian.h>
#include "bpf_misc.h"
#if defined(IPROUTE2_HAVE_LIBBPF)
/* Use a new-style map definition. */
struct {
__uint(type, BPF_MAP_TYPE_SOCKMAP);
__type(key, int);
__type(value, __u64);
__uint(pinning, LIBBPF_PIN_BY_NAME);
__uint(max_entries, 1);
} server_map SEC(".maps");
#else
/* Pin map under /sys/fs/bpf/tc/globals/<map name> */
#define PIN_GLOBAL_NS 2
/* Must match struct bpf_elf_map layout from iproute2 */
struct {
__u32 type;
__u32 size_key;
__u32 size_value;
__u32 max_elem;
__u32 flags;
__u32 id;
__u32 pinning;
} server_map SEC("maps") = {
.type = BPF_MAP_TYPE_SOCKMAP,
.size_key = sizeof(int),
.size_value = sizeof(__u64),
.max_elem = 1,
.pinning = PIN_GLOBAL_NS,
};
#endif
char _license[] SEC("license") = "GPL";
/* Fill 'tuple' with L3 info, and attempt to find L4. On fail, return NULL. */
static inline struct bpf_sock_tuple *
get_tuple(struct __sk_buff *skb, bool *ipv4, bool *tcp)
{
void *data_end = (void *)(long)skb->data_end;
void *data = (void *)(long)skb->data;
struct bpf_sock_tuple *result;
struct ethhdr *eth;
__u8 proto = 0;
__u64 ihl_len;
eth = (struct ethhdr *)(data);
if (eth + 1 > data_end)
return NULL;
if (eth->h_proto == bpf_htons(ETH_P_IP)) {
struct iphdr *iph = (struct iphdr *)(data + sizeof(*eth));
if (iph + 1 > data_end)
return NULL;
if (iph->ihl != 5)
/* Options are not supported */
return NULL;
ihl_len = iph->ihl * 4;
proto = iph->protocol;
*ipv4 = true;
result = (struct bpf_sock_tuple *)&iph->saddr;
} else if (eth->h_proto == bpf_htons(ETH_P_IPV6)) {
struct ipv6hdr *ip6h = (struct ipv6hdr *)(data + sizeof(*eth));
if (ip6h + 1 > data_end)
return NULL;
ihl_len = sizeof(*ip6h);
proto = ip6h->nexthdr;
*ipv4 = false;
result = (struct bpf_sock_tuple *)&ip6h->saddr;
} else {
return (struct bpf_sock_tuple *)data;
}
if (proto != IPPROTO_TCP && proto != IPPROTO_UDP)
return NULL;
*tcp = (proto == IPPROTO_TCP);
__sink(ihl_len);
return result;
}
static inline int
handle_udp(struct __sk_buff *skb, struct bpf_sock_tuple *tuple, bool ipv4)
{
struct bpf_sock *sk;
const int zero = 0;
size_t tuple_len;
__be16 dport;
int ret;
tuple_len = ipv4 ? sizeof(tuple->ipv4) : sizeof(tuple->ipv6);
if ((void *)tuple + tuple_len > (void *)(long)skb->data_end)
return TC_ACT_SHOT;
sk = bpf_sk_lookup_udp(skb, tuple, tuple_len, BPF_F_CURRENT_NETNS, 0);
if (sk)
goto assign;
dport = ipv4 ? tuple->ipv4.dport : tuple->ipv6.dport;
if (dport != bpf_htons(4321))
return TC_ACT_OK;
sk = bpf_map_lookup_elem(&server_map, &zero);
if (!sk)
return TC_ACT_SHOT;
assign:
ret = bpf_sk_assign(skb, sk, 0);
bpf_sk_release(sk);
return ret;
}
static inline int
handle_tcp(struct __sk_buff *skb, struct bpf_sock_tuple *tuple, bool ipv4)
{
struct bpf_sock *sk;
const int zero = 0;
size_t tuple_len;
__be16 dport;
int ret;
tuple_len = ipv4 ? sizeof(tuple->ipv4) : sizeof(tuple->ipv6);
if ((void *)tuple + tuple_len > (void *)(long)skb->data_end)
return TC_ACT_SHOT;
sk = bpf_skc_lookup_tcp(skb, tuple, tuple_len, BPF_F_CURRENT_NETNS, 0);
if (sk) {
if (sk->state != BPF_TCP_LISTEN)
goto assign;
bpf_sk_release(sk);
}
dport = ipv4 ? tuple->ipv4.dport : tuple->ipv6.dport;
if (dport != bpf_htons(4321))
return TC_ACT_OK;
sk = bpf_map_lookup_elem(&server_map, &zero);
if (!sk)
return TC_ACT_SHOT;
if (sk->state != BPF_TCP_LISTEN) {
bpf_sk_release(sk);
return TC_ACT_SHOT;
}
assign:
ret = bpf_sk_assign(skb, sk, 0);
bpf_sk_release(sk);
return ret;
}
SEC("tc")
int bpf_sk_assign_test(struct __sk_buff *skb)
{
struct bpf_sock_tuple *tuple;
bool ipv4 = false;
bool tcp = false;
int ret = 0;
tuple = get_tuple(skb, &ipv4, &tcp);
if (!tuple)
return TC_ACT_SHOT;
/* Note that the verifier socket return type for bpf_skc_lookup_tcp()
* differs from bpf_sk_lookup_udp(), so even though the C-level type is
* the same here, if we try to share the implementations they will
* fail to verify because we're crossing pointer types.
*/
if (tcp)
ret = handle_tcp(skb, tuple, ipv4);
else
ret = handle_udp(skb, tuple, ipv4);
return ret == 0 ? TC_ACT_OK : TC_ACT_SHOT;
}
| linux-master | tools/testing/selftests/bpf/progs/test_sk_assign.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2021 Facebook */
#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
extern int LINUX_KERNEL_VERSION __kconfig;
/* this weak extern will be strict due to the other file's strong extern */
extern bool CONFIG_BPF_SYSCALL __kconfig __weak;
extern const void bpf_link_fops __ksym __weak;
int input_bss1;
int input_data1 = 1;
const volatile int input_rodata1 = 11;
int input_bss_weak __weak;
/* these two definitions should win */
int input_data_weak __weak = 10;
const volatile int input_rodata_weak __weak = 100;
extern int input_bss2;
extern int input_data2;
extern const int input_rodata2;
int output_bss1;
int output_data1;
int output_rodata1;
long output_sink1;
static __noinline int get_bss_res(void)
{
/* just make sure all the relocations work against .text as well */
return input_bss1 + input_bss2 + input_bss_weak;
}
SEC("raw_tp/sys_enter")
int BPF_PROG(handler1)
{
output_bss1 = get_bss_res();
output_data1 = input_data1 + input_data2 + input_data_weak;
output_rodata1 = input_rodata1 + input_rodata2 + input_rodata_weak;
/* make sure we actually use above special externs, otherwise compiler
* will optimize them out
*/
output_sink1 = LINUX_KERNEL_VERSION
+ CONFIG_BPF_SYSCALL
+ (long)&bpf_link_fops;
return 0;
}
char LICENSE[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/linked_vars1.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2021 Google LLC. */
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
/* The format string is filled from the userspace such that loading fails */
const char fmt[10];
SEC("raw_tp/sys_enter")
int handler(const void *ctx)
{
unsigned long long arg = 42;
bpf_snprintf(NULL, 0, fmt, &arg, sizeof(arg));
return 0;
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/test_snprintf_single.c |
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2017 Facebook
#include <stddef.h>
#include <stdbool.h>
#include <string.h>
#include <linux/pkt_cls.h>
#include <linux/bpf.h>
#include <linux/in.h>
#include <linux/if_ether.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <linux/icmp.h>
#include <linux/icmpv6.h>
#include <linux/tcp.h>
#include <linux/udp.h>
#include <bpf/bpf_helpers.h>
#include "test_iptunnel_common.h"
#include <bpf/bpf_endian.h>
#include "bpf_kfuncs.h"
static __always_inline __u32 rol32(__u32 word, unsigned int shift)
{
return (word << shift) | (word >> ((-shift) & 31));
}
/* copy paste of jhash from kernel sources to make sure llvm
* can compile it into valid sequence of bpf instructions
*/
#define __jhash_mix(a, b, c) \
{ \
a -= c; a ^= rol32(c, 4); c += b; \
b -= a; b ^= rol32(a, 6); a += c; \
c -= b; c ^= rol32(b, 8); b += a; \
a -= c; a ^= rol32(c, 16); c += b; \
b -= a; b ^= rol32(a, 19); a += c; \
c -= b; c ^= rol32(b, 4); b += a; \
}
#define __jhash_final(a, b, c) \
{ \
c ^= b; c -= rol32(b, 14); \
a ^= c; a -= rol32(c, 11); \
b ^= a; b -= rol32(a, 25); \
c ^= b; c -= rol32(b, 16); \
a ^= c; a -= rol32(c, 4); \
b ^= a; b -= rol32(a, 14); \
c ^= b; c -= rol32(b, 24); \
}
#define JHASH_INITVAL 0xdeadbeef
typedef unsigned int u32;
static __noinline u32 jhash(const void *key, u32 length, u32 initval)
{
u32 a, b, c;
const unsigned char *k = key;
a = b = c = JHASH_INITVAL + length + initval;
while (length > 12) {
a += *(u32 *)(k);
b += *(u32 *)(k + 4);
c += *(u32 *)(k + 8);
__jhash_mix(a, b, c);
length -= 12;
k += 12;
}
switch (length) {
case 12: c += (u32)k[11]<<24;
case 11: c += (u32)k[10]<<16;
case 10: c += (u32)k[9]<<8;
case 9: c += k[8];
case 8: b += (u32)k[7]<<24;
case 7: b += (u32)k[6]<<16;
case 6: b += (u32)k[5]<<8;
case 5: b += k[4];
case 4: a += (u32)k[3]<<24;
case 3: a += (u32)k[2]<<16;
case 2: a += (u32)k[1]<<8;
case 1: a += k[0];
__jhash_final(a, b, c);
case 0: /* Nothing left to add */
break;
}
return c;
}
static __noinline u32 __jhash_nwords(u32 a, u32 b, u32 c, u32 initval)
{
a += initval;
b += initval;
c += initval;
__jhash_final(a, b, c);
return c;
}
static __noinline u32 jhash_2words(u32 a, u32 b, u32 initval)
{
return __jhash_nwords(a, b, 0, initval + JHASH_INITVAL + (2 << 2));
}
#define PCKT_FRAGMENTED 65343
#define IPV4_HDR_LEN_NO_OPT 20
#define IPV4_PLUS_ICMP_HDR 28
#define IPV6_PLUS_ICMP_HDR 48
#define RING_SIZE 2
#define MAX_VIPS 12
#define MAX_REALS 5
#define CTL_MAP_SIZE 16
#define CH_RINGS_SIZE (MAX_VIPS * RING_SIZE)
#define F_IPV6 (1 << 0)
#define F_HASH_NO_SRC_PORT (1 << 0)
#define F_ICMP (1 << 0)
#define F_SYN_SET (1 << 1)
struct packet_description {
union {
__be32 src;
__be32 srcv6[4];
};
union {
__be32 dst;
__be32 dstv6[4];
};
union {
__u32 ports;
__u16 port16[2];
};
__u8 proto;
__u8 flags;
};
struct ctl_value {
union {
__u64 value;
__u32 ifindex;
__u8 mac[6];
};
};
struct vip_meta {
__u32 flags;
__u32 vip_num;
};
struct real_definition {
union {
__be32 dst;
__be32 dstv6[4];
};
__u8 flags;
};
struct vip_stats {
__u64 bytes;
__u64 pkts;
};
struct eth_hdr {
unsigned char eth_dest[ETH_ALEN];
unsigned char eth_source[ETH_ALEN];
unsigned short eth_proto;
};
struct {
__uint(type, BPF_MAP_TYPE_HASH);
__uint(max_entries, MAX_VIPS);
__type(key, struct vip);
__type(value, struct vip_meta);
} vip_map SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
__uint(max_entries, CH_RINGS_SIZE);
__type(key, __u32);
__type(value, __u32);
} ch_rings SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
__uint(max_entries, MAX_REALS);
__type(key, __u32);
__type(value, struct real_definition);
} reals SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
__uint(max_entries, MAX_VIPS);
__type(key, __u32);
__type(value, struct vip_stats);
} stats SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
__uint(max_entries, CTL_MAP_SIZE);
__type(key, __u32);
__type(value, struct ctl_value);
} ctl_array SEC(".maps");
static __noinline __u32 get_packet_hash(struct packet_description *pckt, bool ipv6)
{
if (ipv6)
return jhash_2words(jhash(pckt->srcv6, 16, MAX_VIPS),
pckt->ports, CH_RINGS_SIZE);
else
return jhash_2words(pckt->src, pckt->ports, CH_RINGS_SIZE);
}
static __noinline bool get_packet_dst(struct real_definition **real,
struct packet_description *pckt,
struct vip_meta *vip_info,
bool is_ipv6)
{
__u32 hash = get_packet_hash(pckt, is_ipv6);
__u32 key = RING_SIZE * vip_info->vip_num + hash % RING_SIZE;
__u32 *real_pos;
if (hash != 0x358459b7 /* jhash of ipv4 packet */ &&
hash != 0x2f4bc6bb /* jhash of ipv6 packet */)
return false;
real_pos = bpf_map_lookup_elem(&ch_rings, &key);
if (!real_pos)
return false;
key = *real_pos;
*real = bpf_map_lookup_elem(&reals, &key);
if (!(*real))
return false;
return true;
}
static __noinline int parse_icmpv6(struct bpf_dynptr *skb_ptr, __u64 off,
struct packet_description *pckt)
{
__u8 buffer[sizeof(struct ipv6hdr)] = {};
struct icmp6hdr *icmp_hdr;
struct ipv6hdr *ip6h;
icmp_hdr = bpf_dynptr_slice(skb_ptr, off, buffer, sizeof(buffer));
if (!icmp_hdr)
return TC_ACT_SHOT;
if (icmp_hdr->icmp6_type != ICMPV6_PKT_TOOBIG)
return TC_ACT_OK;
off += sizeof(struct icmp6hdr);
ip6h = bpf_dynptr_slice(skb_ptr, off, buffer, sizeof(buffer));
if (!ip6h)
return TC_ACT_SHOT;
pckt->proto = ip6h->nexthdr;
pckt->flags |= F_ICMP;
memcpy(pckt->srcv6, ip6h->daddr.s6_addr32, 16);
memcpy(pckt->dstv6, ip6h->saddr.s6_addr32, 16);
return TC_ACT_UNSPEC;
}
static __noinline int parse_icmp(struct bpf_dynptr *skb_ptr, __u64 off,
struct packet_description *pckt)
{
__u8 buffer_icmp[sizeof(struct iphdr)] = {};
__u8 buffer_ip[sizeof(struct iphdr)] = {};
struct icmphdr *icmp_hdr;
struct iphdr *iph;
icmp_hdr = bpf_dynptr_slice(skb_ptr, off, buffer_icmp, sizeof(buffer_icmp));
if (!icmp_hdr)
return TC_ACT_SHOT;
if (icmp_hdr->type != ICMP_DEST_UNREACH ||
icmp_hdr->code != ICMP_FRAG_NEEDED)
return TC_ACT_OK;
off += sizeof(struct icmphdr);
iph = bpf_dynptr_slice(skb_ptr, off, buffer_ip, sizeof(buffer_ip));
if (!iph || iph->ihl != 5)
return TC_ACT_SHOT;
pckt->proto = iph->protocol;
pckt->flags |= F_ICMP;
pckt->src = iph->daddr;
pckt->dst = iph->saddr;
return TC_ACT_UNSPEC;
}
static __noinline bool parse_udp(struct bpf_dynptr *skb_ptr, __u64 off,
struct packet_description *pckt)
{
__u8 buffer[sizeof(struct udphdr)] = {};
struct udphdr *udp;
udp = bpf_dynptr_slice(skb_ptr, off, buffer, sizeof(buffer));
if (!udp)
return false;
if (!(pckt->flags & F_ICMP)) {
pckt->port16[0] = udp->source;
pckt->port16[1] = udp->dest;
} else {
pckt->port16[0] = udp->dest;
pckt->port16[1] = udp->source;
}
return true;
}
static __noinline bool parse_tcp(struct bpf_dynptr *skb_ptr, __u64 off,
struct packet_description *pckt)
{
__u8 buffer[sizeof(struct tcphdr)] = {};
struct tcphdr *tcp;
tcp = bpf_dynptr_slice(skb_ptr, off, buffer, sizeof(buffer));
if (!tcp)
return false;
if (tcp->syn)
pckt->flags |= F_SYN_SET;
if (!(pckt->flags & F_ICMP)) {
pckt->port16[0] = tcp->source;
pckt->port16[1] = tcp->dest;
} else {
pckt->port16[0] = tcp->dest;
pckt->port16[1] = tcp->source;
}
return true;
}
static __noinline int process_packet(struct bpf_dynptr *skb_ptr,
struct eth_hdr *eth, __u64 off,
bool is_ipv6, struct __sk_buff *skb)
{
struct packet_description pckt = {};
struct bpf_tunnel_key tkey = {};
struct vip_stats *data_stats;
struct real_definition *dst;
struct vip_meta *vip_info;
struct ctl_value *cval;
__u32 v4_intf_pos = 1;
__u32 v6_intf_pos = 2;
struct ipv6hdr *ip6h;
struct vip vip = {};
struct iphdr *iph;
int tun_flag = 0;
__u16 pkt_bytes;
__u64 iph_len;
__u32 ifindex;
__u8 protocol;
__u32 vip_num;
int action;
tkey.tunnel_ttl = 64;
if (is_ipv6) {
__u8 buffer[sizeof(struct ipv6hdr)] = {};
ip6h = bpf_dynptr_slice(skb_ptr, off, buffer, sizeof(buffer));
if (!ip6h)
return TC_ACT_SHOT;
iph_len = sizeof(struct ipv6hdr);
protocol = ip6h->nexthdr;
pckt.proto = protocol;
pkt_bytes = bpf_ntohs(ip6h->payload_len);
off += iph_len;
if (protocol == IPPROTO_FRAGMENT) {
return TC_ACT_SHOT;
} else if (protocol == IPPROTO_ICMPV6) {
action = parse_icmpv6(skb_ptr, off, &pckt);
if (action >= 0)
return action;
off += IPV6_PLUS_ICMP_HDR;
} else {
memcpy(pckt.srcv6, ip6h->saddr.s6_addr32, 16);
memcpy(pckt.dstv6, ip6h->daddr.s6_addr32, 16);
}
} else {
__u8 buffer[sizeof(struct iphdr)] = {};
iph = bpf_dynptr_slice(skb_ptr, off, buffer, sizeof(buffer));
if (!iph || iph->ihl != 5)
return TC_ACT_SHOT;
protocol = iph->protocol;
pckt.proto = protocol;
pkt_bytes = bpf_ntohs(iph->tot_len);
off += IPV4_HDR_LEN_NO_OPT;
if (iph->frag_off & PCKT_FRAGMENTED)
return TC_ACT_SHOT;
if (protocol == IPPROTO_ICMP) {
action = parse_icmp(skb_ptr, off, &pckt);
if (action >= 0)
return action;
off += IPV4_PLUS_ICMP_HDR;
} else {
pckt.src = iph->saddr;
pckt.dst = iph->daddr;
}
}
protocol = pckt.proto;
if (protocol == IPPROTO_TCP) {
if (!parse_tcp(skb_ptr, off, &pckt))
return TC_ACT_SHOT;
} else if (protocol == IPPROTO_UDP) {
if (!parse_udp(skb_ptr, off, &pckt))
return TC_ACT_SHOT;
} else {
return TC_ACT_SHOT;
}
if (is_ipv6)
memcpy(vip.daddr.v6, pckt.dstv6, 16);
else
vip.daddr.v4 = pckt.dst;
vip.dport = pckt.port16[1];
vip.protocol = pckt.proto;
vip_info = bpf_map_lookup_elem(&vip_map, &vip);
if (!vip_info) {
vip.dport = 0;
vip_info = bpf_map_lookup_elem(&vip_map, &vip);
if (!vip_info)
return TC_ACT_SHOT;
pckt.port16[1] = 0;
}
if (vip_info->flags & F_HASH_NO_SRC_PORT)
pckt.port16[0] = 0;
if (!get_packet_dst(&dst, &pckt, vip_info, is_ipv6))
return TC_ACT_SHOT;
if (dst->flags & F_IPV6) {
cval = bpf_map_lookup_elem(&ctl_array, &v6_intf_pos);
if (!cval)
return TC_ACT_SHOT;
ifindex = cval->ifindex;
memcpy(tkey.remote_ipv6, dst->dstv6, 16);
tun_flag = BPF_F_TUNINFO_IPV6;
} else {
cval = bpf_map_lookup_elem(&ctl_array, &v4_intf_pos);
if (!cval)
return TC_ACT_SHOT;
ifindex = cval->ifindex;
tkey.remote_ipv4 = dst->dst;
}
vip_num = vip_info->vip_num;
data_stats = bpf_map_lookup_elem(&stats, &vip_num);
if (!data_stats)
return TC_ACT_SHOT;
data_stats->pkts++;
data_stats->bytes += pkt_bytes;
bpf_skb_set_tunnel_key(skb, &tkey, sizeof(tkey), tun_flag);
*(u32 *)eth->eth_dest = tkey.remote_ipv4;
return bpf_redirect(ifindex, 0);
}
SEC("tc")
int balancer_ingress(struct __sk_buff *ctx)
{
__u8 buffer[sizeof(struct eth_hdr)] = {};
struct bpf_dynptr ptr;
struct eth_hdr *eth;
__u32 eth_proto;
__u32 nh_off;
int err;
nh_off = sizeof(struct eth_hdr);
bpf_dynptr_from_skb(ctx, 0, &ptr);
eth = bpf_dynptr_slice_rdwr(&ptr, 0, buffer, sizeof(buffer));
if (!eth)
return TC_ACT_SHOT;
eth_proto = eth->eth_proto;
if (eth_proto == bpf_htons(ETH_P_IP))
err = process_packet(&ptr, eth, nh_off, false, ctx);
else if (eth_proto == bpf_htons(ETH_P_IPV6))
err = process_packet(&ptr, eth, nh_off, true, ctx);
else
return TC_ACT_SHOT;
if (eth == buffer)
bpf_dynptr_write(&ptr, 0, buffer, sizeof(buffer), 0);
return err;
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/test_l4lb_noinline_dynptr.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2021 Google LLC. */
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
__u32 pid = 0;
char num_out[64] = {};
long num_ret = 0;
char ip_out[64] = {};
long ip_ret = 0;
char sym_out[64] = {};
long sym_ret = 0;
char addr_out[64] = {};
long addr_ret = 0;
char str_out[64] = {};
long str_ret = 0;
char over_out[6] = {};
long over_ret = 0;
char pad_out[10] = {};
long pad_ret = 0;
char noarg_out[64] = {};
long noarg_ret = 0;
long nobuf_ret = 0;
extern const void schedule __ksym;
SEC("raw_tp/sys_enter")
int handler(const void *ctx)
{
/* Convenient values to pretty-print */
const __u8 ex_ipv4[] = {127, 0, 0, 1};
const __u8 ex_ipv6[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1};
static const char str1[] = "str1";
static const char longstr[] = "longstr";
if ((int)bpf_get_current_pid_tgid() != pid)
return 0;
/* Integer types */
num_ret = BPF_SNPRINTF(num_out, sizeof(num_out),
"%d %u %x %li %llu %lX",
-8, 9, 150, -424242, 1337, 0xDABBAD00);
/* IP addresses */
ip_ret = BPF_SNPRINTF(ip_out, sizeof(ip_out), "%pi4 %pI6",
&ex_ipv4, &ex_ipv6);
/* Symbol lookup formatting */
sym_ret = BPF_SNPRINTF(sym_out, sizeof(sym_out), "%ps %pS %pB",
&schedule, &schedule, &schedule);
/* Kernel pointers */
addr_ret = BPF_SNPRINTF(addr_out, sizeof(addr_out), "%pK %px %p",
0, 0xFFFF00000ADD4E55, 0xFFFF00000ADD4E55);
/* Strings and single-byte character embedding */
str_ret = BPF_SNPRINTF(str_out, sizeof(str_out), "%s % 9c %+2c %-3c %04c %0c %+05s",
str1, 'a', 'b', 'c', 'd', 'e', longstr);
/* Overflow */
over_ret = BPF_SNPRINTF(over_out, sizeof(over_out), "%%overflow");
/* Padding of fixed width numbers */
pad_ret = BPF_SNPRINTF(pad_out, sizeof(pad_out), "%5d %0900000X", 4, 4);
/* No args */
noarg_ret = BPF_SNPRINTF(noarg_out, sizeof(noarg_out), "simple case");
/* No buffer */
nobuf_ret = BPF_SNPRINTF(NULL, 0, "only interested in length %d", 60);
return 0;
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/test_snprintf.c |
// SPDX-License-Identifier: GPL-2.0
/* Converted from tools/testing/selftests/bpf/verifier/value_ptr_arith.c */
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include <errno.h>
#include "bpf_misc.h"
#define MAX_ENTRIES 11
struct test_val {
unsigned int index;
int foo[MAX_ENTRIES];
};
struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
__uint(max_entries, 1);
__type(key, int);
__type(value, struct test_val);
} map_array_48b SEC(".maps");
struct other_val {
long long foo;
long long bar;
};
struct {
__uint(type, BPF_MAP_TYPE_HASH);
__uint(max_entries, 1);
__type(key, long long);
__type(value, struct other_val);
} map_hash_16b SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_HASH);
__uint(max_entries, 1);
__type(key, long long);
__type(value, struct test_val);
} map_hash_48b SEC(".maps");
SEC("socket")
__description("map access: known scalar += value_ptr unknown vs const")
__success __failure_unpriv
__msg_unpriv("R1 tried to add from different maps, paths or scalars")
__retval(1)
__naked void value_ptr_unknown_vs_const(void)
{
asm volatile (" \
r0 = *(u32*)(r1 + %[__sk_buff_len]); \
r1 = 0; \
*(u64*)(r10 - 8) = r1; \
r2 = r10; \
r2 += -8; \
if r0 == 1 goto l0_%=; \
r1 = %[map_hash_16b] ll; \
if r0 != 1 goto l1_%=; \
l0_%=: r1 = %[map_array_48b] ll; \
l1_%=: call %[bpf_map_lookup_elem]; \
if r0 == 0 goto l2_%=; \
r4 = *(u8*)(r0 + 0); \
if r4 == 1 goto l3_%=; \
r1 = 6; \
r1 = -r1; \
r1 &= 0x7; \
goto l4_%=; \
l3_%=: r1 = 3; \
l4_%=: r1 += r0; \
r0 = *(u8*)(r1 + 0); \
l2_%=: r0 = 1; \
exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm_addr(map_array_48b),
__imm_addr(map_hash_16b),
__imm_const(__sk_buff_len, offsetof(struct __sk_buff, len))
: __clobber_all);
}
SEC("socket")
__description("map access: known scalar += value_ptr const vs unknown")
__success __failure_unpriv
__msg_unpriv("R1 tried to add from different maps, paths or scalars")
__retval(1)
__naked void value_ptr_const_vs_unknown(void)
{
asm volatile (" \
r0 = *(u32*)(r1 + %[__sk_buff_len]); \
r1 = 0; \
*(u64*)(r10 - 8) = r1; \
r2 = r10; \
r2 += -8; \
if r0 == 1 goto l0_%=; \
r1 = %[map_hash_16b] ll; \
if r0 != 1 goto l1_%=; \
l0_%=: r1 = %[map_array_48b] ll; \
l1_%=: call %[bpf_map_lookup_elem]; \
if r0 == 0 goto l2_%=; \
r4 = *(u8*)(r0 + 0); \
if r4 == 1 goto l3_%=; \
r1 = 3; \
goto l4_%=; \
l3_%=: r1 = 6; \
r1 = -r1; \
r1 &= 0x7; \
l4_%=: r1 += r0; \
r0 = *(u8*)(r1 + 0); \
l2_%=: r0 = 1; \
exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm_addr(map_array_48b),
__imm_addr(map_hash_16b),
__imm_const(__sk_buff_len, offsetof(struct __sk_buff, len))
: __clobber_all);
}
SEC("socket")
__description("map access: known scalar += value_ptr const vs const (ne)")
__success __failure_unpriv
__msg_unpriv("R1 tried to add from different maps, paths or scalars")
__retval(1)
__naked void ptr_const_vs_const_ne(void)
{
asm volatile (" \
r0 = *(u32*)(r1 + %[__sk_buff_len]); \
r1 = 0; \
*(u64*)(r10 - 8) = r1; \
r2 = r10; \
r2 += -8; \
if r0 == 1 goto l0_%=; \
r1 = %[map_hash_16b] ll; \
if r0 != 1 goto l1_%=; \
l0_%=: r1 = %[map_array_48b] ll; \
l1_%=: call %[bpf_map_lookup_elem]; \
if r0 == 0 goto l2_%=; \
r4 = *(u8*)(r0 + 0); \
if r4 == 1 goto l3_%=; \
r1 = 3; \
goto l4_%=; \
l3_%=: r1 = 5; \
l4_%=: r1 += r0; \
r0 = *(u8*)(r1 + 0); \
l2_%=: r0 = 1; \
exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm_addr(map_array_48b),
__imm_addr(map_hash_16b),
__imm_const(__sk_buff_len, offsetof(struct __sk_buff, len))
: __clobber_all);
}
SEC("socket")
__description("map access: known scalar += value_ptr const vs const (eq)")
__success __success_unpriv __retval(1)
__naked void ptr_const_vs_const_eq(void)
{
asm volatile (" \
r0 = *(u32*)(r1 + %[__sk_buff_len]); \
r1 = 0; \
*(u64*)(r10 - 8) = r1; \
r2 = r10; \
r2 += -8; \
if r0 == 1 goto l0_%=; \
r1 = %[map_hash_16b] ll; \
if r0 != 1 goto l1_%=; \
l0_%=: r1 = %[map_array_48b] ll; \
l1_%=: call %[bpf_map_lookup_elem]; \
if r0 == 0 goto l2_%=; \
r4 = *(u8*)(r0 + 0); \
if r4 == 1 goto l3_%=; \
r1 = 5; \
goto l4_%=; \
l3_%=: r1 = 5; \
l4_%=: r1 += r0; \
r0 = *(u8*)(r1 + 0); \
l2_%=: r0 = 1; \
exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm_addr(map_array_48b),
__imm_addr(map_hash_16b),
__imm_const(__sk_buff_len, offsetof(struct __sk_buff, len))
: __clobber_all);
}
SEC("socket")
__description("map access: known scalar += value_ptr unknown vs unknown (eq)")
__success __success_unpriv __retval(1)
__naked void ptr_unknown_vs_unknown_eq(void)
{
asm volatile (" \
r0 = *(u32*)(r1 + %[__sk_buff_len]); \
r1 = 0; \
*(u64*)(r10 - 8) = r1; \
r2 = r10; \
r2 += -8; \
if r0 == 1 goto l0_%=; \
r1 = %[map_hash_16b] ll; \
if r0 != 1 goto l1_%=; \
l0_%=: r1 = %[map_array_48b] ll; \
l1_%=: call %[bpf_map_lookup_elem]; \
if r0 == 0 goto l2_%=; \
r4 = *(u8*)(r0 + 0); \
if r4 == 1 goto l3_%=; \
r1 = 6; \
r1 = -r1; \
r1 &= 0x7; \
goto l4_%=; \
l3_%=: r1 = 6; \
r1 = -r1; \
r1 &= 0x7; \
l4_%=: r1 += r0; \
r0 = *(u8*)(r1 + 0); \
l2_%=: r0 = 1; \
exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm_addr(map_array_48b),
__imm_addr(map_hash_16b),
__imm_const(__sk_buff_len, offsetof(struct __sk_buff, len))
: __clobber_all);
}
SEC("socket")
__description("map access: known scalar += value_ptr unknown vs unknown (lt)")
__success __failure_unpriv
__msg_unpriv("R1 tried to add from different maps, paths or scalars")
__retval(1)
__naked void ptr_unknown_vs_unknown_lt(void)
{
asm volatile (" \
r0 = *(u32*)(r1 + %[__sk_buff_len]); \
r1 = 0; \
*(u64*)(r10 - 8) = r1; \
r2 = r10; \
r2 += -8; \
if r0 == 1 goto l0_%=; \
r1 = %[map_hash_16b] ll; \
if r0 != 1 goto l1_%=; \
l0_%=: r1 = %[map_array_48b] ll; \
l1_%=: call %[bpf_map_lookup_elem]; \
if r0 == 0 goto l2_%=; \
r4 = *(u8*)(r0 + 0); \
if r4 == 1 goto l3_%=; \
r1 = 6; \
r1 = -r1; \
r1 &= 0x3; \
goto l4_%=; \
l3_%=: r1 = 6; \
r1 = -r1; \
r1 &= 0x7; \
l4_%=: r1 += r0; \
r0 = *(u8*)(r1 + 0); \
l2_%=: r0 = 1; \
exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm_addr(map_array_48b),
__imm_addr(map_hash_16b),
__imm_const(__sk_buff_len, offsetof(struct __sk_buff, len))
: __clobber_all);
}
SEC("socket")
__description("map access: known scalar += value_ptr unknown vs unknown (gt)")
__success __failure_unpriv
__msg_unpriv("R1 tried to add from different maps, paths or scalars")
__retval(1)
__naked void ptr_unknown_vs_unknown_gt(void)
{
asm volatile (" \
r0 = *(u32*)(r1 + %[__sk_buff_len]); \
r1 = 0; \
*(u64*)(r10 - 8) = r1; \
r2 = r10; \
r2 += -8; \
if r0 == 1 goto l0_%=; \
r1 = %[map_hash_16b] ll; \
if r0 != 1 goto l1_%=; \
l0_%=: r1 = %[map_array_48b] ll; \
l1_%=: call %[bpf_map_lookup_elem]; \
if r0 == 0 goto l2_%=; \
r4 = *(u8*)(r0 + 0); \
if r4 == 1 goto l3_%=; \
r1 = 6; \
r1 = -r1; \
r1 &= 0x7; \
goto l4_%=; \
l3_%=: r1 = 6; \
r1 = -r1; \
r1 &= 0x3; \
l4_%=: r1 += r0; \
r0 = *(u8*)(r1 + 0); \
l2_%=: r0 = 1; \
exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm_addr(map_array_48b),
__imm_addr(map_hash_16b),
__imm_const(__sk_buff_len, offsetof(struct __sk_buff, len))
: __clobber_all);
}
SEC("socket")
__description("map access: known scalar += value_ptr from different maps")
__success __success_unpriv __retval(1)
__naked void value_ptr_from_different_maps(void)
{
asm volatile (" \
r0 = *(u32*)(r1 + %[__sk_buff_len]); \
r1 = 0; \
*(u64*)(r10 - 8) = r1; \
r2 = r10; \
r2 += -8; \
if r0 == 1 goto l0_%=; \
r1 = %[map_hash_16b] ll; \
if r0 != 1 goto l1_%=; \
l0_%=: r1 = %[map_array_48b] ll; \
l1_%=: call %[bpf_map_lookup_elem]; \
if r0 == 0 goto l2_%=; \
r1 = 4; \
r1 += r0; \
r0 = *(u8*)(r1 + 0); \
l2_%=: r0 = 1; \
exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm_addr(map_array_48b),
__imm_addr(map_hash_16b),
__imm_const(__sk_buff_len, offsetof(struct __sk_buff, len))
: __clobber_all);
}
SEC("socket")
__description("map access: value_ptr -= known scalar from different maps")
__success __failure_unpriv
__msg_unpriv("R0 min value is outside of the allowed memory range")
__retval(1)
__naked void known_scalar_from_different_maps(void)
{
asm volatile (" \
r0 = *(u32*)(r1 + %[__sk_buff_len]); \
r1 = 0; \
*(u64*)(r10 - 8) = r1; \
r2 = r10; \
r2 += -8; \
if r0 == 1 goto l0_%=; \
r1 = %[map_hash_16b] ll; \
if r0 != 1 goto l1_%=; \
l0_%=: r1 = %[map_array_48b] ll; \
l1_%=: call %[bpf_map_lookup_elem]; \
if r0 == 0 goto l2_%=; \
r1 = 4; \
r0 -= r1; \
r0 += r1; \
r0 = *(u8*)(r0 + 0); \
l2_%=: r0 = 1; \
exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm_addr(map_array_48b),
__imm_addr(map_hash_16b),
__imm_const(__sk_buff_len, offsetof(struct __sk_buff, len))
: __clobber_all);
}
SEC("socket")
__description("map access: known scalar += value_ptr from different maps, but same value properties")
__success __success_unpriv __retval(1)
__naked void maps_but_same_value_properties(void)
{
asm volatile (" \
r0 = *(u32*)(r1 + %[__sk_buff_len]); \
r1 = 0; \
*(u64*)(r10 - 8) = r1; \
r2 = r10; \
r2 += -8; \
if r0 == 1 goto l0_%=; \
r1 = %[map_hash_48b] ll; \
if r0 != 1 goto l1_%=; \
l0_%=: r1 = %[map_array_48b] ll; \
l1_%=: call %[bpf_map_lookup_elem]; \
if r0 == 0 goto l2_%=; \
r1 = 4; \
r1 += r0; \
r0 = *(u8*)(r1 + 0); \
l2_%=: r0 = 1; \
exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm_addr(map_array_48b),
__imm_addr(map_hash_48b),
__imm_const(__sk_buff_len, offsetof(struct __sk_buff, len))
: __clobber_all);
}
SEC("socket")
__description("map access: mixing value pointer and scalar, 1")
__success __failure_unpriv __msg_unpriv("R2 pointer comparison prohibited")
__retval(0)
__naked void value_pointer_and_scalar_1(void)
{
asm volatile (" \
/* load map value pointer into r0 and r2 */ \
r0 = 1; \
r1 = %[map_array_48b] ll; \
r2 = r10; \
r2 += -16; \
r6 = 0; \
*(u64*)(r10 - 16) = r6; \
call %[bpf_map_lookup_elem]; \
if r0 != 0 goto l0_%=; \
exit; \
l0_%=: /* load some number from the map into r1 */ \
r1 = *(u8*)(r0 + 0); \
/* depending on r1, branch: */ \
if r1 != 0 goto l1_%=; \
/* branch A */ \
r2 = r0; \
r3 = 0; \
goto l2_%=; \
l1_%=: /* branch B */ \
r2 = 0; \
r3 = 0x100000; \
l2_%=: /* common instruction */ \
r2 += r3; \
/* depending on r1, branch: */ \
if r1 != 0 goto l3_%=; \
/* branch A */ \
goto l4_%=; \
l3_%=: /* branch B */ \
r0 = 0x13371337; \
/* verifier follows fall-through */ \
if r2 != 0x100000 goto l4_%=; \
r0 = 0; \
exit; \
l4_%=: /* fake-dead code; targeted from branch A to \
* prevent dead code sanitization \
*/ \
r0 = *(u8*)(r0 + 0); \
r0 = 0; \
exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm_addr(map_array_48b)
: __clobber_all);
}
SEC("socket")
__description("map access: mixing value pointer and scalar, 2")
__success __failure_unpriv __msg_unpriv("R0 invalid mem access 'scalar'")
__retval(0)
__naked void value_pointer_and_scalar_2(void)
{
asm volatile (" \
/* load map value pointer into r0 and r2 */ \
r0 = 1; \
r1 = %[map_array_48b] ll; \
r2 = r10; \
r2 += -16; \
r6 = 0; \
*(u64*)(r10 - 16) = r6; \
call %[bpf_map_lookup_elem]; \
if r0 != 0 goto l0_%=; \
exit; \
l0_%=: /* load some number from the map into r1 */ \
r1 = *(u8*)(r0 + 0); \
/* depending on r1, branch: */ \
if r1 == 0 goto l1_%=; \
/* branch A */ \
r2 = 0; \
r3 = 0x100000; \
goto l2_%=; \
l1_%=: /* branch B */ \
r2 = r0; \
r3 = 0; \
l2_%=: /* common instruction */ \
r2 += r3; \
/* depending on r1, branch: */ \
if r1 != 0 goto l3_%=; \
/* branch A */ \
goto l4_%=; \
l3_%=: /* branch B */ \
r0 = 0x13371337; \
/* verifier follows fall-through */ \
if r2 != 0x100000 goto l4_%=; \
r0 = 0; \
exit; \
l4_%=: /* fake-dead code; targeted from branch A to \
* prevent dead code sanitization, rejected \
* via branch B however \
*/ \
r0 = *(u8*)(r0 + 0); \
r0 = 0; \
exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm_addr(map_array_48b)
: __clobber_all);
}
SEC("socket")
__description("sanitation: alu with different scalars 1")
__success __success_unpriv __retval(0x100000)
__naked void alu_with_different_scalars_1(void)
{
asm volatile (" \
r0 = 1; \
r1 = %[map_array_48b] ll; \
r2 = r10; \
r2 += -16; \
r6 = 0; \
*(u64*)(r10 - 16) = r6; \
call %[bpf_map_lookup_elem]; \
if r0 != 0 goto l0_%=; \
exit; \
l0_%=: r1 = *(u32*)(r0 + 0); \
if r1 == 0 goto l1_%=; \
r2 = 0; \
r3 = 0x100000; \
goto l2_%=; \
l1_%=: r2 = 42; \
r3 = 0x100001; \
l2_%=: r2 += r3; \
r0 = r2; \
exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm_addr(map_array_48b)
: __clobber_all);
}
SEC("socket")
__description("sanitation: alu with different scalars 2")
__success __success_unpriv __retval(0)
__naked void alu_with_different_scalars_2(void)
{
asm volatile (" \
r0 = 1; \
r1 = %[map_array_48b] ll; \
r6 = r1; \
r2 = r10; \
r2 += -16; \
r7 = 0; \
*(u64*)(r10 - 16) = r7; \
call %[bpf_map_delete_elem]; \
r7 = r0; \
r1 = r6; \
r2 = r10; \
r2 += -16; \
call %[bpf_map_delete_elem]; \
r6 = r0; \
r8 = r6; \
r8 += r7; \
r0 = r8; \
r0 += %[einval]; \
r0 += %[einval]; \
exit; \
" :
: __imm(bpf_map_delete_elem),
__imm_addr(map_array_48b),
__imm_const(einval, EINVAL)
: __clobber_all);
}
SEC("socket")
__description("sanitation: alu with different scalars 3")
__success __success_unpriv __retval(0)
__naked void alu_with_different_scalars_3(void)
{
asm volatile (" \
r0 = %[einval]; \
r0 *= -1; \
r7 = r0; \
r0 = %[einval]; \
r0 *= -1; \
r6 = r0; \
r8 = r6; \
r8 += r7; \
r0 = r8; \
r0 += %[einval]; \
r0 += %[einval]; \
exit; \
" :
: __imm_const(einval, EINVAL)
: __clobber_all);
}
SEC("socket")
__description("map access: value_ptr += known scalar, upper oob arith, test 1")
__success __failure_unpriv
__msg_unpriv("R0 pointer arithmetic of map value goes out of range")
__retval(1)
__naked void upper_oob_arith_test_1(void)
{
asm volatile (" \
r1 = 0; \
*(u64*)(r10 - 8) = r1; \
r2 = r10; \
r2 += -8; \
r1 = %[map_array_48b] ll; \
call %[bpf_map_lookup_elem]; \
if r0 == 0 goto l0_%=; \
r1 = 48; \
r0 += r1; \
r0 -= r1; \
r0 = *(u8*)(r0 + 0); \
l0_%=: r0 = 1; \
exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm_addr(map_array_48b)
: __clobber_all);
}
SEC("socket")
__description("map access: value_ptr += known scalar, upper oob arith, test 2")
__success __failure_unpriv
__msg_unpriv("R0 pointer arithmetic of map value goes out of range")
__retval(1)
__naked void upper_oob_arith_test_2(void)
{
asm volatile (" \
r1 = 0; \
*(u64*)(r10 - 8) = r1; \
r2 = r10; \
r2 += -8; \
r1 = %[map_array_48b] ll; \
call %[bpf_map_lookup_elem]; \
if r0 == 0 goto l0_%=; \
r1 = 49; \
r0 += r1; \
r0 -= r1; \
r0 = *(u8*)(r0 + 0); \
l0_%=: r0 = 1; \
exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm_addr(map_array_48b)
: __clobber_all);
}
SEC("socket")
__description("map access: value_ptr += known scalar, upper oob arith, test 3")
__success __success_unpriv __retval(1)
__naked void upper_oob_arith_test_3(void)
{
asm volatile (" \
r1 = 0; \
*(u64*)(r10 - 8) = r1; \
r2 = r10; \
r2 += -8; \
r1 = %[map_array_48b] ll; \
call %[bpf_map_lookup_elem]; \
if r0 == 0 goto l0_%=; \
r1 = 47; \
r0 += r1; \
r0 -= r1; \
r0 = *(u8*)(r0 + 0); \
l0_%=: r0 = 1; \
exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm_addr(map_array_48b)
: __clobber_all);
}
SEC("socket")
__description("map access: value_ptr -= known scalar, lower oob arith, test 1")
__failure __msg("R0 min value is outside of the allowed memory range")
__failure_unpriv
__msg_unpriv("R0 pointer arithmetic of map value goes out of range")
__naked void lower_oob_arith_test_1(void)
{
asm volatile (" \
r1 = 0; \
*(u64*)(r10 - 8) = r1; \
r2 = r10; \
r2 += -8; \
r1 = %[map_array_48b] ll; \
call %[bpf_map_lookup_elem]; \
if r0 == 0 goto l0_%=; \
r1 = 47; \
r0 += r1; \
r1 = 48; \
r0 -= r1; \
r0 = *(u8*)(r0 + 0); \
l0_%=: r0 = 1; \
exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm_addr(map_array_48b)
: __clobber_all);
}
SEC("socket")
__description("map access: value_ptr -= known scalar, lower oob arith, test 2")
__success __failure_unpriv
__msg_unpriv("R0 pointer arithmetic of map value goes out of range")
__retval(1)
__naked void lower_oob_arith_test_2(void)
{
asm volatile (" \
r1 = 0; \
*(u64*)(r10 - 8) = r1; \
r2 = r10; \
r2 += -8; \
r1 = %[map_array_48b] ll; \
call %[bpf_map_lookup_elem]; \
if r0 == 0 goto l0_%=; \
r1 = 47; \
r0 += r1; \
r1 = 48; \
r0 -= r1; \
r1 = 1; \
r0 += r1; \
r0 = *(u8*)(r0 + 0); \
l0_%=: r0 = 1; \
exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm_addr(map_array_48b)
: __clobber_all);
}
SEC("socket")
__description("map access: value_ptr -= known scalar, lower oob arith, test 3")
__success __success_unpriv __retval(1)
__naked void lower_oob_arith_test_3(void)
{
asm volatile (" \
r1 = 0; \
*(u64*)(r10 - 8) = r1; \
r2 = r10; \
r2 += -8; \
r1 = %[map_array_48b] ll; \
call %[bpf_map_lookup_elem]; \
if r0 == 0 goto l0_%=; \
r1 = 47; \
r0 += r1; \
r1 = 47; \
r0 -= r1; \
r0 = *(u8*)(r0 + 0); \
l0_%=: r0 = 1; \
exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm_addr(map_array_48b)
: __clobber_all);
}
SEC("socket")
__description("map access: known scalar += value_ptr")
__success __success_unpriv __retval(1)
__naked void access_known_scalar_value_ptr_1(void)
{
asm volatile (" \
r1 = 0; \
*(u64*)(r10 - 8) = r1; \
r2 = r10; \
r2 += -8; \
r1 = %[map_array_48b] ll; \
call %[bpf_map_lookup_elem]; \
if r0 == 0 goto l0_%=; \
r1 = 4; \
r1 += r0; \
r0 = *(u8*)(r1 + 0); \
l0_%=: r0 = 1; \
exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm_addr(map_array_48b)
: __clobber_all);
}
SEC("socket")
__description("map access: value_ptr += known scalar, 1")
__success __success_unpriv __retval(1)
__naked void value_ptr_known_scalar_1(void)
{
asm volatile (" \
r1 = 0; \
*(u64*)(r10 - 8) = r1; \
r2 = r10; \
r2 += -8; \
r1 = %[map_array_48b] ll; \
call %[bpf_map_lookup_elem]; \
if r0 == 0 goto l0_%=; \
r1 = 4; \
r0 += r1; \
r1 = *(u8*)(r0 + 0); \
l0_%=: r0 = 1; \
exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm_addr(map_array_48b)
: __clobber_all);
}
SEC("socket")
__description("map access: value_ptr += known scalar, 2")
__failure __msg("invalid access to map value")
__failure_unpriv
__naked void value_ptr_known_scalar_2_1(void)
{
asm volatile (" \
r1 = 0; \
*(u64*)(r10 - 8) = r1; \
r2 = r10; \
r2 += -8; \
r1 = %[map_array_48b] ll; \
call %[bpf_map_lookup_elem]; \
if r0 == 0 goto l0_%=; \
r1 = 49; \
r0 += r1; \
r1 = *(u8*)(r0 + 0); \
l0_%=: r0 = 1; \
exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm_addr(map_array_48b)
: __clobber_all);
}
SEC("socket")
__description("map access: value_ptr += known scalar, 3")
__failure __msg("invalid access to map value")
__failure_unpriv
__naked void value_ptr_known_scalar_3(void)
{
asm volatile (" \
r1 = 0; \
*(u64*)(r10 - 8) = r1; \
r2 = r10; \
r2 += -8; \
r1 = %[map_array_48b] ll; \
call %[bpf_map_lookup_elem]; \
if r0 == 0 goto l0_%=; \
r1 = -1; \
r0 += r1; \
r1 = *(u8*)(r0 + 0); \
l0_%=: r0 = 1; \
exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm_addr(map_array_48b)
: __clobber_all);
}
SEC("socket")
__description("map access: value_ptr += known scalar, 4")
__success __success_unpriv __retval(1)
__naked void value_ptr_known_scalar_4(void)
{
asm volatile (" \
r1 = 0; \
*(u64*)(r10 - 8) = r1; \
r2 = r10; \
r2 += -8; \
r1 = %[map_array_48b] ll; \
call %[bpf_map_lookup_elem]; \
if r0 == 0 goto l0_%=; \
r1 = 5; \
r0 += r1; \
r1 = -2; \
r0 += r1; \
r1 = -1; \
r0 += r1; \
r1 = *(u8*)(r0 + 0); \
l0_%=: r0 = 1; \
exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm_addr(map_array_48b)
: __clobber_all);
}
SEC("socket")
__description("map access: value_ptr += known scalar, 5")
__success __success_unpriv __retval(0xabcdef12)
__naked void value_ptr_known_scalar_5(void)
{
asm volatile (" \
r1 = 0; \
*(u64*)(r10 - 8) = r1; \
r2 = r10; \
r2 += -8; \
r1 = %[map_array_48b] ll; \
call %[bpf_map_lookup_elem]; \
if r0 == 0 goto l0_%=; \
r1 = %[__imm_0]; \
r1 += r0; \
r0 = *(u32*)(r1 + 0); \
l0_%=: exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm_addr(map_array_48b),
__imm_const(__imm_0, (6 + 1) * sizeof(int))
: __clobber_all);
}
SEC("socket")
__description("map access: value_ptr += known scalar, 6")
__success __success_unpriv __retval(0xabcdef12)
__naked void value_ptr_known_scalar_6(void)
{
asm volatile (" \
r1 = 0; \
*(u64*)(r10 - 8) = r1; \
r2 = r10; \
r2 += -8; \
r1 = %[map_array_48b] ll; \
call %[bpf_map_lookup_elem]; \
if r0 == 0 goto l0_%=; \
r1 = %[__imm_0]; \
r0 += r1; \
r1 = %[__imm_1]; \
r0 += r1; \
r0 = *(u32*)(r0 + 0); \
l0_%=: exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm_addr(map_array_48b),
__imm_const(__imm_0, (3 + 1) * sizeof(int)),
__imm_const(__imm_1, 3 * sizeof(int))
: __clobber_all);
}
SEC("socket")
__description("map access: value_ptr += N, value_ptr -= N known scalar")
__success __success_unpriv __retval(0x12345678)
__naked void value_ptr_n_known_scalar(void)
{
asm volatile (" \
r1 = 0; \
*(u64*)(r10 - 8) = r1; \
r2 = r10; \
r2 += -8; \
r1 = %[map_array_48b] ll; \
call %[bpf_map_lookup_elem]; \
if r0 == 0 goto l0_%=; \
w1 = 0x12345678; \
*(u32*)(r0 + 0) = r1; \
r0 += 2; \
r1 = 2; \
r0 -= r1; \
r0 = *(u32*)(r0 + 0); \
l0_%=: exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm_addr(map_array_48b)
: __clobber_all);
}
SEC("socket")
__description("map access: unknown scalar += value_ptr, 1")
__success __success_unpriv __retval(1)
__naked void unknown_scalar_value_ptr_1(void)
{
asm volatile (" \
r1 = 0; \
*(u64*)(r10 - 8) = r1; \
r2 = r10; \
r2 += -8; \
r1 = %[map_array_48b] ll; \
call %[bpf_map_lookup_elem]; \
if r0 == 0 goto l0_%=; \
r1 = *(u8*)(r0 + 0); \
r1 &= 0xf; \
r1 += r0; \
r0 = *(u8*)(r1 + 0); \
l0_%=: r0 = 1; \
exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm_addr(map_array_48b)
: __clobber_all);
}
SEC("socket")
__description("map access: unknown scalar += value_ptr, 2")
__success __success_unpriv __retval(0xabcdef12) __flag(BPF_F_ANY_ALIGNMENT)
__naked void unknown_scalar_value_ptr_2(void)
{
asm volatile (" \
r1 = 0; \
*(u64*)(r10 - 8) = r1; \
r2 = r10; \
r2 += -8; \
r1 = %[map_array_48b] ll; \
call %[bpf_map_lookup_elem]; \
if r0 == 0 goto l0_%=; \
r1 = *(u32*)(r0 + 0); \
r1 &= 31; \
r1 += r0; \
r0 = *(u32*)(r1 + 0); \
l0_%=: exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm_addr(map_array_48b)
: __clobber_all);
}
SEC("socket")
__description("map access: unknown scalar += value_ptr, 3")
__success __failure_unpriv
__msg_unpriv("R0 pointer arithmetic of map value goes out of range")
__retval(0xabcdef12) __flag(BPF_F_ANY_ALIGNMENT)
__naked void unknown_scalar_value_ptr_3(void)
{
asm volatile (" \
r1 = 0; \
*(u64*)(r10 - 8) = r1; \
r2 = r10; \
r2 += -8; \
r1 = %[map_array_48b] ll; \
call %[bpf_map_lookup_elem]; \
if r0 == 0 goto l0_%=; \
r1 = -1; \
r0 += r1; \
r1 = 1; \
r0 += r1; \
r1 = *(u32*)(r0 + 0); \
r1 &= 31; \
r1 += r0; \
r0 = *(u32*)(r1 + 0); \
l0_%=: exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm_addr(map_array_48b)
: __clobber_all);
}
SEC("socket")
__description("map access: unknown scalar += value_ptr, 4")
__failure __msg("R1 max value is outside of the allowed memory range")
__msg_unpriv("R1 pointer arithmetic of map value goes out of range")
__flag(BPF_F_ANY_ALIGNMENT)
__naked void unknown_scalar_value_ptr_4(void)
{
asm volatile (" \
r1 = 0; \
*(u64*)(r10 - 8) = r1; \
r2 = r10; \
r2 += -8; \
r1 = %[map_array_48b] ll; \
call %[bpf_map_lookup_elem]; \
if r0 == 0 goto l0_%=; \
r1 = 19; \
r0 += r1; \
r1 = *(u32*)(r0 + 0); \
r1 &= 31; \
r1 += r0; \
r0 = *(u32*)(r1 + 0); \
l0_%=: exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm_addr(map_array_48b)
: __clobber_all);
}
SEC("socket")
__description("map access: value_ptr += unknown scalar, 1")
__success __success_unpriv __retval(1)
__naked void value_ptr_unknown_scalar_1(void)
{
asm volatile (" \
r1 = 0; \
*(u64*)(r10 - 8) = r1; \
r2 = r10; \
r2 += -8; \
r1 = %[map_array_48b] ll; \
call %[bpf_map_lookup_elem]; \
if r0 == 0 goto l0_%=; \
r1 = *(u8*)(r0 + 0); \
r1 &= 0xf; \
r0 += r1; \
r1 = *(u8*)(r0 + 0); \
l0_%=: r0 = 1; \
exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm_addr(map_array_48b)
: __clobber_all);
}
SEC("socket")
__description("map access: value_ptr += unknown scalar, 2")
__success __success_unpriv __retval(0xabcdef12) __flag(BPF_F_ANY_ALIGNMENT)
__naked void value_ptr_unknown_scalar_2_1(void)
{
asm volatile (" \
r1 = 0; \
*(u64*)(r10 - 8) = r1; \
r2 = r10; \
r2 += -8; \
r1 = %[map_array_48b] ll; \
call %[bpf_map_lookup_elem]; \
if r0 == 0 goto l0_%=; \
r1 = *(u32*)(r0 + 0); \
r1 &= 31; \
r0 += r1; \
r0 = *(u32*)(r0 + 0); \
l0_%=: exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm_addr(map_array_48b)
: __clobber_all);
}
SEC("socket")
__description("map access: value_ptr += unknown scalar, 3")
__success __success_unpriv __retval(1)
__naked void value_ptr_unknown_scalar_3(void)
{
asm volatile (" \
r1 = 0; \
*(u64*)(r10 - 8) = r1; \
r2 = r10; \
r2 += -8; \
r1 = %[map_array_48b] ll; \
call %[bpf_map_lookup_elem]; \
if r0 == 0 goto l0_%=; \
r1 = *(u64*)(r0 + 0); \
r2 = *(u64*)(r0 + 8); \
r3 = *(u64*)(r0 + 16); \
r1 &= 0xf; \
r3 &= 1; \
r3 |= 1; \
if r2 > r3 goto l0_%=; \
r0 += r3; \
r0 = *(u8*)(r0 + 0); \
r0 = 1; \
l1_%=: exit; \
l0_%=: r0 = 2; \
goto l1_%=; \
" :
: __imm(bpf_map_lookup_elem),
__imm_addr(map_array_48b)
: __clobber_all);
}
SEC("socket")
__description("map access: value_ptr += value_ptr")
__failure __msg("R0 pointer += pointer prohibited")
__failure_unpriv
__naked void access_value_ptr_value_ptr_1(void)
{
asm volatile (" \
r1 = 0; \
*(u64*)(r10 - 8) = r1; \
r2 = r10; \
r2 += -8; \
r1 = %[map_array_48b] ll; \
call %[bpf_map_lookup_elem]; \
if r0 == 0 goto l0_%=; \
r0 += r0; \
r1 = *(u8*)(r0 + 0); \
l0_%=: r0 = 1; \
exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm_addr(map_array_48b)
: __clobber_all);
}
SEC("socket")
__description("map access: known scalar -= value_ptr")
__failure __msg("R1 tried to subtract pointer from scalar")
__failure_unpriv
__naked void access_known_scalar_value_ptr_2(void)
{
asm volatile (" \
r1 = 0; \
*(u64*)(r10 - 8) = r1; \
r2 = r10; \
r2 += -8; \
r1 = %[map_array_48b] ll; \
call %[bpf_map_lookup_elem]; \
if r0 == 0 goto l0_%=; \
r1 = 4; \
r1 -= r0; \
r0 = *(u8*)(r1 + 0); \
l0_%=: r0 = 1; \
exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm_addr(map_array_48b)
: __clobber_all);
}
SEC("socket")
__description("map access: value_ptr -= known scalar")
__failure __msg("R0 min value is outside of the allowed memory range")
__failure_unpriv
__naked void access_value_ptr_known_scalar(void)
{
asm volatile (" \
r1 = 0; \
*(u64*)(r10 - 8) = r1; \
r2 = r10; \
r2 += -8; \
r1 = %[map_array_48b] ll; \
call %[bpf_map_lookup_elem]; \
if r0 == 0 goto l0_%=; \
r1 = 4; \
r0 -= r1; \
r1 = *(u8*)(r0 + 0); \
l0_%=: r0 = 1; \
exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm_addr(map_array_48b)
: __clobber_all);
}
SEC("socket")
__description("map access: value_ptr -= known scalar, 2")
__success __success_unpriv __retval(1)
__naked void value_ptr_known_scalar_2_2(void)
{
asm volatile (" \
r1 = 0; \
*(u64*)(r10 - 8) = r1; \
r2 = r10; \
r2 += -8; \
r1 = %[map_array_48b] ll; \
call %[bpf_map_lookup_elem]; \
if r0 == 0 goto l0_%=; \
r1 = 6; \
r2 = 4; \
r0 += r1; \
r0 -= r2; \
r1 = *(u8*)(r0 + 0); \
l0_%=: r0 = 1; \
exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm_addr(map_array_48b)
: __clobber_all);
}
SEC("socket")
__description("map access: unknown scalar -= value_ptr")
__failure __msg("R1 tried to subtract pointer from scalar")
__failure_unpriv
__naked void access_unknown_scalar_value_ptr(void)
{
asm volatile (" \
r1 = 0; \
*(u64*)(r10 - 8) = r1; \
r2 = r10; \
r2 += -8; \
r1 = %[map_array_48b] ll; \
call %[bpf_map_lookup_elem]; \
if r0 == 0 goto l0_%=; \
r1 = *(u8*)(r0 + 0); \
r1 &= 0xf; \
r1 -= r0; \
r0 = *(u8*)(r1 + 0); \
l0_%=: r0 = 1; \
exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm_addr(map_array_48b)
: __clobber_all);
}
SEC("socket")
__description("map access: value_ptr -= unknown scalar")
__failure __msg("R0 min value is negative")
__failure_unpriv
__naked void access_value_ptr_unknown_scalar(void)
{
asm volatile (" \
r1 = 0; \
*(u64*)(r10 - 8) = r1; \
r2 = r10; \
r2 += -8; \
r1 = %[map_array_48b] ll; \
call %[bpf_map_lookup_elem]; \
if r0 == 0 goto l0_%=; \
r1 = *(u8*)(r0 + 0); \
r1 &= 0xf; \
r0 -= r1; \
r1 = *(u8*)(r0 + 0); \
l0_%=: r0 = 1; \
exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm_addr(map_array_48b)
: __clobber_all);
}
SEC("socket")
__description("map access: value_ptr -= unknown scalar, 2")
__success __failure_unpriv
__msg_unpriv("R0 pointer arithmetic of map value goes out of range")
__retval(1)
__naked void value_ptr_unknown_scalar_2_2(void)
{
asm volatile (" \
r1 = 0; \
*(u64*)(r10 - 8) = r1; \
r2 = r10; \
r2 += -8; \
r1 = %[map_array_48b] ll; \
call %[bpf_map_lookup_elem]; \
if r0 == 0 goto l0_%=; \
r1 = *(u8*)(r0 + 0); \
r1 &= 0xf; \
r1 |= 0x7; \
r0 += r1; \
r1 = *(u8*)(r0 + 0); \
r1 &= 0x7; \
r0 -= r1; \
r1 = *(u8*)(r0 + 0); \
l0_%=: r0 = 1; \
exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm_addr(map_array_48b)
: __clobber_all);
}
SEC("socket")
__description("map access: value_ptr -= value_ptr")
__failure __msg("R0 invalid mem access 'scalar'")
__msg_unpriv("R0 pointer -= pointer prohibited")
__naked void access_value_ptr_value_ptr_2(void)
{
asm volatile (" \
r1 = 0; \
*(u64*)(r10 - 8) = r1; \
r2 = r10; \
r2 += -8; \
r1 = %[map_array_48b] ll; \
call %[bpf_map_lookup_elem]; \
if r0 == 0 goto l0_%=; \
r0 -= r0; \
r1 = *(u8*)(r0 + 0); \
l0_%=: r0 = 1; \
exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm_addr(map_array_48b)
: __clobber_all);
}
SEC("socket")
__description("map access: trying to leak tainted dst reg")
__failure __msg("math between map_value pointer and 4294967295 is not allowed")
__failure_unpriv
__naked void to_leak_tainted_dst_reg(void)
{
asm volatile (" \
r0 = 0; \
r1 = 0; \
*(u64*)(r10 - 8) = r1; \
r2 = r10; \
r2 += -8; \
r1 = %[map_array_48b] ll; \
call %[bpf_map_lookup_elem]; \
if r0 != 0 goto l0_%=; \
exit; \
l0_%=: r2 = r0; \
w1 = 0xFFFFFFFF; \
w1 = w1; \
r2 -= r1; \
*(u64*)(r0 + 0) = r2; \
r0 = 0; \
exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm_addr(map_array_48b)
: __clobber_all);
}
SEC("tc")
__description("32bit pkt_ptr -= scalar")
__success __retval(0) __flag(BPF_F_ANY_ALIGNMENT)
__naked void _32bit_pkt_ptr_scalar(void)
{
asm volatile (" \
r8 = *(u32*)(r1 + %[__sk_buff_data_end]); \
r7 = *(u32*)(r1 + %[__sk_buff_data]); \
r6 = r7; \
r6 += 40; \
if r6 > r8 goto l0_%=; \
w4 = w7; \
w6 -= w4; \
l0_%=: r0 = 0; \
exit; \
" :
: __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
__imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
: __clobber_all);
}
SEC("tc")
__description("32bit scalar -= pkt_ptr")
__success __retval(0) __flag(BPF_F_ANY_ALIGNMENT)
__naked void _32bit_scalar_pkt_ptr(void)
{
asm volatile (" \
r8 = *(u32*)(r1 + %[__sk_buff_data_end]); \
r7 = *(u32*)(r1 + %[__sk_buff_data]); \
r6 = r7; \
r6 += 40; \
if r6 > r8 goto l0_%=; \
w4 = w6; \
w4 -= w7; \
l0_%=: r0 = 0; \
exit; \
" :
: __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
__imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
: __clobber_all);
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/verifier_value_ptr_arith.c |
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2017 Facebook
#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
#include <bpf/bpf_core_read.h>
#include "bpf_misc.h"
int kprobe_res = 0;
/**
* This program will be manually made sleepable on the userspace side
* and should thus be unattachable.
*/
SEC("kprobe/" SYS_PREFIX "sys_nanosleep")
int handle_kprobe_sleepable(struct pt_regs *ctx)
{
kprobe_res = 1;
return 0;
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/test_attach_kprobe_sleepable.c |
#include "core_reloc_types.h"
void f(struct core_reloc_bitfields x) {}
| linux-master | tools/testing/selftests/bpf/progs/btf__core_reloc_bitfields.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
#include "bpf_misc.h"
char _license[] SEC("license") = "GPL";
SEC("struct_ops.s/test_2")
__failure __msg("attach to unsupported member test_2 of struct bpf_dummy_ops")
int BPF_PROG(test_unsupported_field_sleepable,
struct bpf_dummy_ops_state *state, int a1, unsigned short a2,
char a3, unsigned long a4)
{
/* Tries to mark an unsleepable field in struct bpf_dummy_ops as sleepable. */
return 0;
}
SEC(".struct_ops")
struct bpf_dummy_ops dummy_1 = {
.test_1 = NULL,
.test_2 = (void *)test_unsupported_field_sleepable,
.test_sleepable = (void *)NULL,
};
| linux-master | tools/testing/selftests/bpf/progs/dummy_st_ops_fail.c |
// SPDX-License-Identifier: GPL-2.0
/* Converted from tools/testing/selftests/bpf/verifier/direct_stack_access_wraparound.c */
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include "bpf_misc.h"
SEC("socket")
__description("direct stack access with 32-bit wraparound. test1")
__failure __msg("fp pointer and 2147483647")
__failure_unpriv
__naked void with_32_bit_wraparound_test1(void)
{
asm volatile (" \
r1 = r10; \
r1 += 0x7fffffff; \
r1 += 0x7fffffff; \
w0 = 0; \
*(u8*)(r1 + 0) = r0; \
exit; \
" ::: __clobber_all);
}
SEC("socket")
__description("direct stack access with 32-bit wraparound. test2")
__failure __msg("fp pointer and 1073741823")
__failure_unpriv
__naked void with_32_bit_wraparound_test2(void)
{
asm volatile (" \
r1 = r10; \
r1 += 0x3fffffff; \
r1 += 0x3fffffff; \
w0 = 0; \
*(u8*)(r1 + 0) = r0; \
exit; \
" ::: __clobber_all);
}
SEC("socket")
__description("direct stack access with 32-bit wraparound. test3")
__failure __msg("fp pointer offset 1073741822")
__msg_unpriv("R1 stack pointer arithmetic goes out of range")
__naked void with_32_bit_wraparound_test3(void)
{
asm volatile (" \
r1 = r10; \
r1 += 0x1fffffff; \
r1 += 0x1fffffff; \
w0 = 0; \
*(u8*)(r1 + 0) = r0; \
exit; \
" ::: __clobber_all);
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/verifier_direct_stack_access_wraparound.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
#include <vmlinux.h>
#include <bpf/bpf_tracing.h>
#include <bpf/bpf_helpers.h>
#include "../bpf_testmod/bpf_testmod_kfunc.h"
static struct prog_test_ref_kfunc __kptr *v;
long total_sum = -1;
SEC("tc")
int test_jit_probe_mem(struct __sk_buff *ctx)
{
struct prog_test_ref_kfunc *p;
unsigned long zero = 0, sum;
p = bpf_kfunc_call_test_acquire(&zero);
if (!p)
return 1;
p = bpf_kptr_xchg(&v, p);
if (p)
goto release_out;
/* Direct map value access of kptr, should be PTR_UNTRUSTED */
p = v;
if (!p)
return 1;
asm volatile (
"r9 = %[p];"
"%[sum] = 0;"
/* r8 = p->a */
"r8 = *(u32 *)(r9 + 0);"
"%[sum] += r8;"
/* r8 = p->b */
"r8 = *(u32 *)(r9 + 4);"
"%[sum] += r8;"
"r9 += 8;"
/* r9 = p->a */
"r9 = *(u32 *)(r9 - 8);"
"%[sum] += r9;"
: [sum] "=r"(sum)
: [p] "r"(p)
: "r8", "r9"
);
total_sum = sum;
return 0;
release_out:
bpf_kfunc_call_test_release(p);
return 1;
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/jit_probe_mem.c |
#include "core_reloc_types.h"
void f(struct core_reloc_bitfields___bit_sz_change x) {}
| linux-master | tools/testing/selftests/bpf/progs/btf__core_reloc_bitfields___bit_sz_change.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
#include <vmlinux.h>
#include <bpf/bpf_tracing.h>
#include <bpf/bpf_helpers.h>
#include "bpf_misc.h"
#include "cpumask_common.h"
char _license[] SEC("license") = "GPL";
/* Prototype for all of the program trace events below:
*
* TRACE_EVENT(task_newtask,
* TP_PROTO(struct task_struct *p, u64 clone_flags)
*/
SEC("tp_btf/task_newtask")
__failure __msg("Unreleased reference")
int BPF_PROG(test_alloc_no_release, struct task_struct *task, u64 clone_flags)
{
struct bpf_cpumask *cpumask;
cpumask = create_cpumask();
__sink(cpumask);
/* cpumask is never released. */
return 0;
}
SEC("tp_btf/task_newtask")
__failure __msg("NULL pointer passed to trusted arg0")
int BPF_PROG(test_alloc_double_release, struct task_struct *task, u64 clone_flags)
{
struct bpf_cpumask *cpumask;
cpumask = create_cpumask();
/* cpumask is released twice. */
bpf_cpumask_release(cpumask);
bpf_cpumask_release(cpumask);
return 0;
}
SEC("tp_btf/task_newtask")
__failure __msg("must be referenced")
int BPF_PROG(test_acquire_wrong_cpumask, struct task_struct *task, u64 clone_flags)
{
struct bpf_cpumask *cpumask;
/* Can't acquire a non-struct bpf_cpumask. */
cpumask = bpf_cpumask_acquire((struct bpf_cpumask *)task->cpus_ptr);
__sink(cpumask);
return 0;
}
SEC("tp_btf/task_newtask")
__failure __msg("bpf_cpumask_set_cpu args#1 expected pointer to STRUCT bpf_cpumask")
int BPF_PROG(test_mutate_cpumask, struct task_struct *task, u64 clone_flags)
{
struct bpf_cpumask *cpumask;
/* Can't set the CPU of a non-struct bpf_cpumask. */
bpf_cpumask_set_cpu(0, (struct bpf_cpumask *)task->cpus_ptr);
__sink(cpumask);
return 0;
}
SEC("tp_btf/task_newtask")
__failure __msg("Unreleased reference")
int BPF_PROG(test_insert_remove_no_release, struct task_struct *task, u64 clone_flags)
{
struct bpf_cpumask *cpumask;
struct __cpumask_map_value *v;
cpumask = create_cpumask();
if (!cpumask)
return 0;
if (cpumask_map_insert(cpumask))
return 0;
v = cpumask_map_value_lookup();
if (!v)
return 0;
cpumask = bpf_kptr_xchg(&v->cpumask, NULL);
/* cpumask is never released. */
return 0;
}
SEC("tp_btf/task_newtask")
__failure __msg("NULL pointer passed to trusted arg0")
int BPF_PROG(test_cpumask_null, struct task_struct *task, u64 clone_flags)
{
/* NULL passed to KF_TRUSTED_ARGS kfunc. */
bpf_cpumask_empty(NULL);
return 0;
}
SEC("tp_btf/task_newtask")
__failure __msg("R2 must be a rcu pointer")
int BPF_PROG(test_global_mask_out_of_rcu, struct task_struct *task, u64 clone_flags)
{
struct bpf_cpumask *local, *prev;
local = create_cpumask();
if (!local)
return 0;
prev = bpf_kptr_xchg(&global_mask, local);
if (prev) {
bpf_cpumask_release(prev);
err = 3;
return 0;
}
bpf_rcu_read_lock();
local = global_mask;
if (!local) {
err = 4;
bpf_rcu_read_unlock();
return 0;
}
bpf_rcu_read_unlock();
/* RCU region is exited before calling KF_RCU kfunc. */
bpf_cpumask_test_cpu(0, (const struct cpumask *)local);
return 0;
}
SEC("tp_btf/task_newtask")
__failure __msg("NULL pointer passed to trusted arg1")
int BPF_PROG(test_global_mask_no_null_check, struct task_struct *task, u64 clone_flags)
{
struct bpf_cpumask *local, *prev;
local = create_cpumask();
if (!local)
return 0;
prev = bpf_kptr_xchg(&global_mask, local);
if (prev) {
bpf_cpumask_release(prev);
err = 3;
return 0;
}
bpf_rcu_read_lock();
local = global_mask;
/* No NULL check is performed on global cpumask kptr. */
bpf_cpumask_test_cpu(0, (const struct cpumask *)local);
bpf_rcu_read_unlock();
return 0;
}
SEC("tp_btf/task_newtask")
__failure __msg("Possibly NULL pointer passed to helper arg2")
int BPF_PROG(test_global_mask_rcu_no_null_check, struct task_struct *task, u64 clone_flags)
{
struct bpf_cpumask *prev, *curr;
curr = bpf_cpumask_create();
if (!curr)
return 0;
prev = bpf_kptr_xchg(&global_mask, curr);
if (prev)
bpf_cpumask_release(prev);
bpf_rcu_read_lock();
curr = global_mask;
/* PTR_TO_BTF_ID | PTR_MAYBE_NULL | MEM_RCU passed to bpf_kptr_xchg() */
prev = bpf_kptr_xchg(&global_mask, curr);
bpf_rcu_read_unlock();
if (prev)
bpf_cpumask_release(prev);
return 0;
}
| linux-master | tools/testing/selftests/bpf/progs/cpumask_failure.c |
#include "core_reloc_types.h"
void f(struct core_reloc_type_based___incompat x) {}
| linux-master | tools/testing/selftests/bpf/progs/btf__core_reloc_type_based___incompat.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2022 Huawei Technologies Duesseldorf GmbH
*
* Author: Roberto Sassu <[email protected]>
*/
#include "vmlinux.h"
#include <errno.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
#include "bpf_misc.h"
extern struct bpf_key *bpf_lookup_system_key(__u64 id) __ksym;
extern void bpf_key_put(struct bpf_key *key) __ksym;
extern int bpf_verify_pkcs7_signature(struct bpf_dynptr *data_ptr,
struct bpf_dynptr *sig_ptr,
struct bpf_key *trusted_keyring) __ksym;
struct {
__uint(type, BPF_MAP_TYPE_RINGBUF);
__uint(max_entries, 4096);
} ringbuf SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
__uint(max_entries, 1);
__type(key, __u32);
__type(value, __u32);
} array_map SEC(".maps");
int err, pid;
char _license[] SEC("license") = "GPL";
SEC("?lsm.s/bpf")
__failure __msg("cannot pass in dynptr at an offset=-8")
int BPF_PROG(not_valid_dynptr, int cmd, union bpf_attr *attr, unsigned int size)
{
unsigned long val;
return bpf_verify_pkcs7_signature((struct bpf_dynptr *)&val,
(struct bpf_dynptr *)&val, NULL);
}
SEC("?lsm.s/bpf")
__failure __msg("arg#0 expected pointer to stack or dynptr_ptr")
int BPF_PROG(not_ptr_to_stack, int cmd, union bpf_attr *attr, unsigned int size)
{
unsigned long val = 0;
return bpf_verify_pkcs7_signature((struct bpf_dynptr *)val,
(struct bpf_dynptr *)val, NULL);
}
SEC("lsm.s/bpf")
int BPF_PROG(dynptr_data_null, int cmd, union bpf_attr *attr, unsigned int size)
{
struct bpf_key *trusted_keyring;
struct bpf_dynptr ptr;
__u32 *value;
int ret, zero = 0;
if (bpf_get_current_pid_tgid() >> 32 != pid)
return 0;
value = bpf_map_lookup_elem(&array_map, &zero);
if (!value)
return 0;
/* Pass invalid flags. */
ret = bpf_dynptr_from_mem(value, sizeof(*value), ((__u64)~0ULL), &ptr);
if (ret != -EINVAL)
return 0;
trusted_keyring = bpf_lookup_system_key(0);
if (!trusted_keyring)
return 0;
err = bpf_verify_pkcs7_signature(&ptr, &ptr, trusted_keyring);
bpf_key_put(trusted_keyring);
return 0;
}
| linux-master | tools/testing/selftests/bpf/progs/test_kfunc_dynptr_param.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
SEC("xdp")
int _xdp_adjust_tail_grow(struct xdp_md *xdp)
{
int data_len = bpf_xdp_get_buff_len(xdp);
int offset = 0;
/* SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) */
#if defined(__TARGET_ARCH_s390)
int tailroom = 512;
#else
int tailroom = 320;
#endif
/* Data length determine test case */
if (data_len == 54) { /* sizeof(pkt_v4) */
offset = 4096; /* test too large offset */
} else if (data_len == 74) { /* sizeof(pkt_v6) */
offset = 40;
} else if (data_len == 64) {
offset = 128;
} else if (data_len == 128) {
/* Max tail grow 3520 */
offset = 4096 - 256 - tailroom - data_len;
} else if (data_len == 9000) {
offset = 10;
} else if (data_len == 9001) {
offset = 4096;
} else {
return XDP_ABORTED; /* No matching test */
}
if (bpf_xdp_adjust_tail(xdp, offset))
return XDP_DROP;
return XDP_TX;
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/test_xdp_adjust_tail_grow.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020 Facebook */
#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
__u32 count = 0;
__u32 on_cpu = 0xffffffff;
SEC("raw_tp/task_rename")
int BPF_PROG(rename, struct task_struct *task, char *comm)
{
count++;
if ((__u64) task == 0x1234ULL && (__u64) comm == 0x5678ULL) {
on_cpu = bpf_get_smp_processor_id();
return (long)task + (long)comm;
}
return 0;
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/test_raw_tp_test_run.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) Meta Platforms, Inc. and affiliates. */
#include <stdbool.h>
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
/* volatile to force a read */
const volatile int var1;
volatile int var2 = 1;
struct {
int var3_1;
__s64 var3_2;
} var3;
int libout1;
extern volatile bool CONFIG_BPF_SYSCALL __kconfig;
int var4[4];
__weak int var5 SEC(".data");
/* Fully contained within library extern-and-definition */
extern int var6;
int var7 SEC(".data.custom");
int (*fn_ptr)(void);
struct {
__uint(type, BPF_MAP_TYPE_HASH);
__type(key, __u32);
__type(value, __u32);
__uint(max_entries, 16);
} map1 SEC(".maps");
extern struct {
__uint(type, BPF_MAP_TYPE_HASH);
__type(key, __u32);
__type(value, __u32);
__uint(max_entries, 16);
} map2 SEC(".maps");
int lib_routine(void)
{
__u32 key = 1, value = 2;
(void) CONFIG_BPF_SYSCALL;
bpf_map_update_elem(&map2, &key, &value, BPF_ANY);
libout1 = var1 + var2 + var3.var3_1 + var3.var3_2 + var5 + var6;
return libout1;
}
SEC("perf_event")
int lib_perf_handler(struct pt_regs *ctx)
{
return 0;
}
char LICENSE[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/test_subskeleton_lib.c |
#include "core_reloc_types.h"
void f(struct core_reloc_bitfields___bitfield_vs_int x) {}
| linux-master | tools/testing/selftests/bpf/progs/btf__core_reloc_bitfields___bitfield_vs_int.c |
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2020 Facebook
#include <linux/bpf.h>
#include <stdint.h>
#include <bpf/bpf_helpers.h>
#include "bpf_misc.h"
char _license[] SEC("license") = "GPL";
struct {
__uint(type, BPF_MAP_TYPE_RINGBUF);
} ringbuf SEC(".maps");
const volatile int batch_cnt = 0;
const volatile long use_output = 0;
long sample_val = 42;
long dropped __attribute__((aligned(128))) = 0;
const volatile long wakeup_data_size = 0;
static __always_inline long get_flags()
{
long sz;
if (!wakeup_data_size)
return 0;
sz = bpf_ringbuf_query(&ringbuf, BPF_RB_AVAIL_DATA);
return sz >= wakeup_data_size ? BPF_RB_FORCE_WAKEUP : BPF_RB_NO_WAKEUP;
}
SEC("fentry/" SYS_PREFIX "sys_getpgid")
int bench_ringbuf(void *ctx)
{
long *sample, flags;
int i;
if (!use_output) {
for (i = 0; i < batch_cnt; i++) {
sample = bpf_ringbuf_reserve(&ringbuf,
sizeof(sample_val), 0);
if (!sample) {
__sync_add_and_fetch(&dropped, 1);
} else {
*sample = sample_val;
flags = get_flags();
bpf_ringbuf_submit(sample, flags);
}
}
} else {
for (i = 0; i < batch_cnt; i++) {
flags = get_flags();
if (bpf_ringbuf_output(&ringbuf, &sample_val,
sizeof(sample_val), flags))
__sync_add_and_fetch(&dropped, 1);
}
}
return 0;
}
| linux-master | tools/testing/selftests/bpf/progs/ringbuf_bench.c |
/* Copyright (c) 2016,2017 Facebook
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of version 2 of the GNU General Public
* License as published by the Free Software Foundation.
*/
#include <stddef.h>
#include <string.h>
#include <linux/bpf.h>
#include <linux/if_ether.h>
#include <linux/if_packet.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <linux/in.h>
#include <linux/udp.h>
#include <linux/tcp.h>
#include <linux/pkt_cls.h>
#include <sys/socket.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_endian.h>
#include "test_iptunnel_common.h"
struct {
__uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
__uint(max_entries, 256);
__type(key, __u32);
__type(value, __u64);
} rxcnt SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_HASH);
__uint(max_entries, MAX_IPTNL_ENTRIES);
__type(key, struct vip);
__type(value, struct iptnl_info);
} vip2tnl SEC(".maps");
static __always_inline void count_tx(__u32 protocol)
{
__u64 *rxcnt_count;
rxcnt_count = bpf_map_lookup_elem(&rxcnt, &protocol);
if (rxcnt_count)
*rxcnt_count += 1;
}
static __always_inline int get_dport(void *trans_data, void *data_end,
__u8 protocol)
{
struct tcphdr *th;
struct udphdr *uh;
switch (protocol) {
case IPPROTO_TCP:
th = (struct tcphdr *)trans_data;
if (th + 1 > data_end)
return -1;
return th->dest;
case IPPROTO_UDP:
uh = (struct udphdr *)trans_data;
if (uh + 1 > data_end)
return -1;
return uh->dest;
default:
return 0;
}
}
static __always_inline void set_ethhdr(struct ethhdr *new_eth,
const struct ethhdr *old_eth,
const struct iptnl_info *tnl,
__be16 h_proto)
{
memcpy(new_eth->h_source, old_eth->h_dest, sizeof(new_eth->h_source));
memcpy(new_eth->h_dest, tnl->dmac, sizeof(new_eth->h_dest));
new_eth->h_proto = h_proto;
}
static __always_inline int handle_ipv4(struct xdp_md *xdp)
{
void *data_end = (void *)(long)xdp->data_end;
void *data = (void *)(long)xdp->data;
struct iptnl_info *tnl;
struct ethhdr *new_eth;
struct ethhdr *old_eth;
struct iphdr *iph = data + sizeof(struct ethhdr);
__u16 *next_iph;
__u16 payload_len;
struct vip vip = {};
int dport;
__u32 csum = 0;
int i;
if (iph + 1 > data_end)
return XDP_DROP;
dport = get_dport(iph + 1, data_end, iph->protocol);
if (dport == -1)
return XDP_DROP;
vip.protocol = iph->protocol;
vip.family = AF_INET;
vip.daddr.v4 = iph->daddr;
vip.dport = dport;
payload_len = bpf_ntohs(iph->tot_len);
tnl = bpf_map_lookup_elem(&vip2tnl, &vip);
/* It only does v4-in-v4 */
if (!tnl || tnl->family != AF_INET)
return XDP_PASS;
if (bpf_xdp_adjust_head(xdp, 0 - (int)sizeof(struct iphdr)))
return XDP_DROP;
data = (void *)(long)xdp->data;
data_end = (void *)(long)xdp->data_end;
new_eth = data;
iph = data + sizeof(*new_eth);
old_eth = data + sizeof(*iph);
if (new_eth + 1 > data_end ||
old_eth + 1 > data_end ||
iph + 1 > data_end)
return XDP_DROP;
set_ethhdr(new_eth, old_eth, tnl, bpf_htons(ETH_P_IP));
iph->version = 4;
iph->ihl = sizeof(*iph) >> 2;
iph->frag_off = 0;
iph->protocol = IPPROTO_IPIP;
iph->check = 0;
iph->tos = 0;
iph->tot_len = bpf_htons(payload_len + sizeof(*iph));
iph->daddr = tnl->daddr.v4;
iph->saddr = tnl->saddr.v4;
iph->ttl = 8;
next_iph = (__u16 *)iph;
#pragma clang loop unroll(full)
for (i = 0; i < sizeof(*iph) >> 1; i++)
csum += *next_iph++;
iph->check = ~((csum & 0xffff) + (csum >> 16));
count_tx(vip.protocol);
return XDP_TX;
}
static __always_inline int handle_ipv6(struct xdp_md *xdp)
{
void *data_end = (void *)(long)xdp->data_end;
void *data = (void *)(long)xdp->data;
struct iptnl_info *tnl;
struct ethhdr *new_eth;
struct ethhdr *old_eth;
struct ipv6hdr *ip6h = data + sizeof(struct ethhdr);
__u16 payload_len;
struct vip vip = {};
int dport;
if (ip6h + 1 > data_end)
return XDP_DROP;
dport = get_dport(ip6h + 1, data_end, ip6h->nexthdr);
if (dport == -1)
return XDP_DROP;
vip.protocol = ip6h->nexthdr;
vip.family = AF_INET6;
memcpy(vip.daddr.v6, ip6h->daddr.s6_addr32, sizeof(vip.daddr));
vip.dport = dport;
payload_len = ip6h->payload_len;
tnl = bpf_map_lookup_elem(&vip2tnl, &vip);
/* It only does v6-in-v6 */
if (!tnl || tnl->family != AF_INET6)
return XDP_PASS;
if (bpf_xdp_adjust_head(xdp, 0 - (int)sizeof(struct ipv6hdr)))
return XDP_DROP;
data = (void *)(long)xdp->data;
data_end = (void *)(long)xdp->data_end;
new_eth = data;
ip6h = data + sizeof(*new_eth);
old_eth = data + sizeof(*ip6h);
if (new_eth + 1 > data_end || old_eth + 1 > data_end ||
ip6h + 1 > data_end)
return XDP_DROP;
set_ethhdr(new_eth, old_eth, tnl, bpf_htons(ETH_P_IPV6));
ip6h->version = 6;
ip6h->priority = 0;
memset(ip6h->flow_lbl, 0, sizeof(ip6h->flow_lbl));
ip6h->payload_len = bpf_htons(bpf_ntohs(payload_len) + sizeof(*ip6h));
ip6h->nexthdr = IPPROTO_IPV6;
ip6h->hop_limit = 8;
memcpy(ip6h->saddr.s6_addr32, tnl->saddr.v6, sizeof(tnl->saddr.v6));
memcpy(ip6h->daddr.s6_addr32, tnl->daddr.v6, sizeof(tnl->daddr.v6));
count_tx(vip.protocol);
return XDP_TX;
}
SEC("xdp")
int _xdp_tx_iptunnel(struct xdp_md *xdp)
{
void *data_end = (void *)(long)xdp->data_end;
void *data = (void *)(long)xdp->data;
struct ethhdr *eth = data;
__u16 h_proto;
if (eth + 1 > data_end)
return XDP_DROP;
h_proto = eth->h_proto;
if (h_proto == bpf_htons(ETH_P_IP))
return handle_ipv4(xdp);
else if (h_proto == bpf_htons(ETH_P_IPV6))
return handle_ipv6(xdp);
else
return XDP_DROP;
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/test_xdp.c |
// SPDX-License-Identifier: GPL-2.0-only
#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_endian.h>
#define TEST_COMM_LEN 16
struct {
__uint(type, BPF_MAP_TYPE_CGROUP_ARRAY);
__uint(max_entries, 1);
__type(key, u32);
__type(value, u32);
} cgroup_map SEC(".maps");
char _license[] SEC("license") = "GPL";
SEC("tc")
int test_skb_helpers(struct __sk_buff *skb)
{
struct task_struct *task;
char comm[TEST_COMM_LEN];
__u32 tpid;
task = (struct task_struct *)bpf_get_current_task();
bpf_probe_read_kernel(&tpid , sizeof(tpid), &task->tgid);
bpf_probe_read_kernel_str(&comm, sizeof(comm), &task->comm);
return 0;
}
| linux-master | tools/testing/selftests/bpf/progs/test_skb_helpers.c |
// SPDX-License-Identifier: GPL-2.0
/* Converted from tools/testing/selftests/bpf/verifier/int_ptr.c */
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include "bpf_misc.h"
SEC("cgroup/sysctl")
__description("ARG_PTR_TO_LONG uninitialized")
__failure __msg("invalid indirect read from stack R4 off -16+0 size 8")
__naked void arg_ptr_to_long_uninitialized(void)
{
asm volatile (" \
/* bpf_strtoul arg1 (buf) */ \
r7 = r10; \
r7 += -8; \
r0 = 0x00303036; \
*(u64*)(r7 + 0) = r0; \
r1 = r7; \
/* bpf_strtoul arg2 (buf_len) */ \
r2 = 4; \
/* bpf_strtoul arg3 (flags) */ \
r3 = 0; \
/* bpf_strtoul arg4 (res) */ \
r7 += -8; \
r4 = r7; \
/* bpf_strtoul() */ \
call %[bpf_strtoul]; \
r0 = 1; \
exit; \
" :
: __imm(bpf_strtoul)
: __clobber_all);
}
SEC("socket")
__description("ARG_PTR_TO_LONG half-uninitialized")
/* in privileged mode reads from uninitialized stack locations are permitted */
__success __failure_unpriv
__msg_unpriv("invalid indirect read from stack R4 off -16+4 size 8")
__retval(0)
__naked void ptr_to_long_half_uninitialized(void)
{
asm volatile (" \
/* bpf_strtoul arg1 (buf) */ \
r7 = r10; \
r7 += -8; \
r0 = 0x00303036; \
*(u64*)(r7 + 0) = r0; \
r1 = r7; \
/* bpf_strtoul arg2 (buf_len) */ \
r2 = 4; \
/* bpf_strtoul arg3 (flags) */ \
r3 = 0; \
/* bpf_strtoul arg4 (res) */ \
r7 += -8; \
*(u32*)(r7 + 0) = r0; \
r4 = r7; \
/* bpf_strtoul() */ \
call %[bpf_strtoul]; \
r0 = 0; \
exit; \
" :
: __imm(bpf_strtoul)
: __clobber_all);
}
SEC("cgroup/sysctl")
__description("ARG_PTR_TO_LONG misaligned")
__failure __msg("misaligned stack access off (0x0; 0x0)+-20+0 size 8")
__naked void arg_ptr_to_long_misaligned(void)
{
asm volatile (" \
/* bpf_strtoul arg1 (buf) */ \
r7 = r10; \
r7 += -8; \
r0 = 0x00303036; \
*(u64*)(r7 + 0) = r0; \
r1 = r7; \
/* bpf_strtoul arg2 (buf_len) */ \
r2 = 4; \
/* bpf_strtoul arg3 (flags) */ \
r3 = 0; \
/* bpf_strtoul arg4 (res) */ \
r7 += -12; \
r0 = 0; \
*(u32*)(r7 + 0) = r0; \
*(u64*)(r7 + 4) = r0; \
r4 = r7; \
/* bpf_strtoul() */ \
call %[bpf_strtoul]; \
r0 = 1; \
exit; \
" :
: __imm(bpf_strtoul)
: __clobber_all);
}
SEC("cgroup/sysctl")
__description("ARG_PTR_TO_LONG size < sizeof(long)")
__failure __msg("invalid indirect access to stack R4 off=-4 size=8")
__naked void to_long_size_sizeof_long(void)
{
asm volatile (" \
/* bpf_strtoul arg1 (buf) */ \
r7 = r10; \
r7 += -16; \
r0 = 0x00303036; \
*(u64*)(r7 + 0) = r0; \
r1 = r7; \
/* bpf_strtoul arg2 (buf_len) */ \
r2 = 4; \
/* bpf_strtoul arg3 (flags) */ \
r3 = 0; \
/* bpf_strtoul arg4 (res) */ \
r7 += 12; \
*(u32*)(r7 + 0) = r0; \
r4 = r7; \
/* bpf_strtoul() */ \
call %[bpf_strtoul]; \
r0 = 1; \
exit; \
" :
: __imm(bpf_strtoul)
: __clobber_all);
}
SEC("cgroup/sysctl")
__description("ARG_PTR_TO_LONG initialized")
__success
__naked void arg_ptr_to_long_initialized(void)
{
asm volatile (" \
/* bpf_strtoul arg1 (buf) */ \
r7 = r10; \
r7 += -8; \
r0 = 0x00303036; \
*(u64*)(r7 + 0) = r0; \
r1 = r7; \
/* bpf_strtoul arg2 (buf_len) */ \
r2 = 4; \
/* bpf_strtoul arg3 (flags) */ \
r3 = 0; \
/* bpf_strtoul arg4 (res) */ \
r7 += -8; \
*(u64*)(r7 + 0) = r0; \
r4 = r7; \
/* bpf_strtoul() */ \
call %[bpf_strtoul]; \
r0 = 1; \
exit; \
" :
: __imm(bpf_strtoul)
: __clobber_all);
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/verifier_int_ptr.c |
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2020 Facebook
#include <linux/stddef.h>
#include <linux/ipv6.h>
#include <linux/bpf.h>
#include <linux/in.h>
#include <sys/socket.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_endian.h>
SEC("freplace/connect_v4_prog")
int new_connect_v4_prog(struct bpf_sock_addr *ctx)
{
// return value thats in invalid range
return 255;
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/freplace_connect_v4_prog.c |
#include "core_reloc_types.h"
void f(struct core_reloc_mods___typedefs x) {}
| linux-master | tools/testing/selftests/bpf/progs/btf__core_reloc_mods___typedefs.c |
#include "core_reloc_types.h"
void f(struct core_reloc_primitives___diff_func_proto x) {}
| linux-master | tools/testing/selftests/bpf/progs/btf__core_reloc_primitives___diff_func_proto.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
#include "bpf_misc.h"
#define HASHMAP_SZ 4194304
struct {
__uint(type, BPF_MAP_TYPE_ARRAY_OF_MAPS);
__uint(max_entries, 1000);
__type(key, int);
__type(value, int);
__array(values, struct {
__uint(type, BPF_MAP_TYPE_TASK_STORAGE);
__uint(map_flags, BPF_F_NO_PREALLOC);
__type(key, int);
__type(value, int);
});
} array_of_local_storage_maps SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_ARRAY_OF_MAPS);
__uint(max_entries, 1000);
__type(key, int);
__type(value, int);
__array(values, struct {
__uint(type, BPF_MAP_TYPE_HASH);
__uint(max_entries, HASHMAP_SZ);
__type(key, int);
__type(value, int);
});
} array_of_hash_maps SEC(".maps");
long important_hits;
long hits;
/* set from user-space */
const volatile unsigned int use_hashmap;
const volatile unsigned int hashmap_num_keys;
const volatile unsigned int num_maps;
const volatile unsigned int interleave;
struct loop_ctx {
struct task_struct *task;
long loop_hits;
long loop_important_hits;
};
static int do_lookup(unsigned int elem, struct loop_ctx *lctx)
{
void *map, *inner_map;
int idx = 0;
if (use_hashmap)
map = &array_of_hash_maps;
else
map = &array_of_local_storage_maps;
inner_map = bpf_map_lookup_elem(map, &elem);
if (!inner_map)
return -1;
if (use_hashmap) {
idx = bpf_get_prandom_u32() % hashmap_num_keys;
bpf_map_lookup_elem(inner_map, &idx);
} else {
bpf_task_storage_get(inner_map, lctx->task, &idx,
BPF_LOCAL_STORAGE_GET_F_CREATE);
}
lctx->loop_hits++;
if (!elem)
lctx->loop_important_hits++;
return 0;
}
static long loop(u32 index, void *ctx)
{
struct loop_ctx *lctx = (struct loop_ctx *)ctx;
unsigned int map_idx = index % num_maps;
do_lookup(map_idx, lctx);
if (interleave && map_idx % 3 == 0)
do_lookup(0, lctx);
return 0;
}
SEC("fentry/" SYS_PREFIX "sys_getpgid")
int get_local(void *ctx)
{
struct loop_ctx lctx;
lctx.task = bpf_get_current_task_btf();
lctx.loop_hits = 0;
lctx.loop_important_hits = 0;
bpf_loop(10000, &loop, &lctx, 0);
__sync_add_and_fetch(&hits, lctx.loop_hits);
__sync_add_and_fetch(&important_hits, lctx.loop_important_hits);
return 0;
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/local_storage_bench.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2022 Red Hat, Inc. */
#include "bpf_iter.h"
#include <bpf/bpf_helpers.h>
char _license[] SEC("license") = "GPL";
SEC("iter/bpf_link")
int dump_bpf_link(struct bpf_iter__bpf_link *ctx)
{
struct seq_file *seq = ctx->meta->seq;
struct bpf_link *link = ctx->link;
int link_id;
if (!link)
return 0;
link_id = link->id;
bpf_seq_write(seq, &link_id, sizeof(link_id));
return 0;
}
| linux-master | tools/testing/selftests/bpf/progs/bpf_iter_bpf_link.c |
// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
/*
* BTF-to-C dumper tests for implicit and explicit padding between fields and
* at the end of a struct.
*
* Copyright (c) 2019 Facebook
*/
/* ----- START-EXPECTED-OUTPUT ----- */
struct padded_implicitly {
int a;
long b;
char c;
};
/* ------ END-EXPECTED-OUTPUT ------ */
/* ----- START-EXPECTED-OUTPUT ----- */
/*
*struct padded_explicitly {
* int a;
* long: 0;
* int b;
*};
*
*/
/* ------ END-EXPECTED-OUTPUT ------ */
struct padded_explicitly {
int a;
int: 1; /* algo will emit aligning `long: 0;` here */
int b;
};
/* ----- START-EXPECTED-OUTPUT ----- */
struct padded_a_lot {
int a;
long: 64;
long: 64;
int b;
};
/* ------ END-EXPECTED-OUTPUT ------ */
/* ----- START-EXPECTED-OUTPUT ----- */
/*
*struct padded_cache_line {
* int a;
* long: 64;
* long: 64;
* long: 64;
* int b;
* long: 64;
* long: 64;
* long: 64;
*};
*
*/
/* ------ END-EXPECTED-OUTPUT ------ */
struct padded_cache_line {
int a;
int b __attribute__((aligned(32)));
};
/* ----- START-EXPECTED-OUTPUT ----- */
/*
*struct zone_padding {
* char x[0];
*};
*
*struct zone {
* int a;
* short b;
* long: 0;
* struct zone_padding __pad__;
*};
*
*/
/* ------ END-EXPECTED-OUTPUT ------ */
struct zone_padding {
char x[0];
} __attribute__((__aligned__(8)));
struct zone {
int a;
short b;
struct zone_padding __pad__;
};
/* ----- START-EXPECTED-OUTPUT ----- */
struct padding_wo_named_members {
long: 64;
long: 64;
};
struct padding_weird_1 {
int a;
long: 64;
short: 16;
short b;
};
/* ------ END-EXPECTED-OUTPUT ------ */
/* ----- START-EXPECTED-OUTPUT ----- */
/*
*struct padding_weird_2 {
* long: 56;
* char a;
* long: 56;
* char b;
* char: 8;
*};
*
*/
/* ------ END-EXPECTED-OUTPUT ------ */
struct padding_weird_2 {
int: 32; /* these paddings will be collapsed into `long: 56;` */
short: 16;
char: 8;
char a;
int: 32; /* these paddings will be collapsed into `long: 56;` */
short: 16;
char: 8;
char b;
char: 8;
};
/* ----- START-EXPECTED-OUTPUT ----- */
struct exact_1byte {
char x;
};
struct padded_1byte {
char: 8;
};
struct exact_2bytes {
short x;
};
struct padded_2bytes {
short: 16;
};
struct exact_4bytes {
int x;
};
struct padded_4bytes {
int: 32;
};
struct exact_8bytes {
long x;
};
struct padded_8bytes {
long: 64;
};
struct ff_periodic_effect {
int: 32;
short magnitude;
long: 0;
short phase;
long: 0;
int: 32;
int custom_len;
short *custom_data;
};
struct ib_wc {
long: 64;
long: 64;
int: 32;
int byte_len;
void *qp;
union {} ex;
long: 64;
int slid;
int wc_flags;
long: 64;
char smac[6];
long: 0;
char network_hdr_type;
};
struct acpi_object_method {
long: 64;
char: 8;
char type;
short reference_count;
char flags;
short: 0;
char: 8;
char sync_level;
long: 64;
void *node;
void *aml_start;
union {} dispatch;
long: 64;
int aml_length;
};
struct nested_unpacked {
int x;
};
struct nested_packed {
struct nested_unpacked a;
char c;
} __attribute__((packed));
struct outer_mixed_but_unpacked {
struct nested_packed b1;
short a1;
struct nested_packed b2;
};
/* ------ END-EXPECTED-OUTPUT ------ */
int f(struct {
struct padded_implicitly _1;
struct padded_explicitly _2;
struct padded_a_lot _3;
struct padded_cache_line _4;
struct zone _5;
struct padding_wo_named_members _6;
struct padding_weird_1 _7;
struct padding_weird_2 _8;
struct exact_1byte _100;
struct padded_1byte _101;
struct exact_2bytes _102;
struct padded_2bytes _103;
struct exact_4bytes _104;
struct padded_4bytes _105;
struct exact_8bytes _106;
struct padded_8bytes _107;
struct ff_periodic_effect _200;
struct ib_wc _201;
struct acpi_object_method _202;
struct outer_mixed_but_unpacked _203;
} *_)
{
return 0;
}
| linux-master | tools/testing/selftests/bpf/progs/btf_dump_test_case_padding.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include "bpf_misc.h"
SEC("netfilter")
__description("bpf_exit with invalid return code. test1")
__failure __msg("R0 is not a known value")
__naked void with_invalid_return_code_test1(void)
{
asm volatile (" \
r0 = *(u64*)(r1 + 0); \
exit; \
" ::: __clobber_all);
}
SEC("netfilter")
__description("bpf_exit with valid return code. test2")
__success
__naked void with_valid_return_code_test2(void)
{
asm volatile (" \
r0 = 0; \
exit; \
" ::: __clobber_all);
}
SEC("netfilter")
__description("bpf_exit with valid return code. test3")
__success
__naked void with_valid_return_code_test3(void)
{
asm volatile (" \
r0 = 1; \
exit; \
" ::: __clobber_all);
}
SEC("netfilter")
__description("bpf_exit with invalid return code. test4")
__failure __msg("R0 has value (0x2; 0x0)")
__naked void with_invalid_return_code_test4(void)
{
asm volatile (" \
r0 = 2; \
exit; \
" ::: __clobber_all);
}
| linux-master | tools/testing/selftests/bpf/progs/verifier_netfilter_retcode.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2019 Facebook */
#include <stdbool.h>
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
__u64 out__bpf_link_fops = -1;
__u64 out__bpf_link_fops1 = -1;
__u64 out__btf_size = -1;
__u64 out__per_cpu_start = -1;
extern const void bpf_link_fops __ksym;
extern const void __start_BTF __ksym;
extern const void __stop_BTF __ksym;
extern const void __per_cpu_start __ksym;
/* non-existing symbol, weak, default to zero */
extern const void bpf_link_fops1 __ksym __weak;
SEC("raw_tp/sys_enter")
int handler(const void *ctx)
{
out__bpf_link_fops = (__u64)&bpf_link_fops;
out__btf_size = (__u64)(&__stop_BTF - &__start_BTF);
out__per_cpu_start = (__u64)&__per_cpu_start;
out__bpf_link_fops1 = (__u64)&bpf_link_fops1;
return 0;
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/test_ksyms.c |
#include "core_reloc_types.h"
void f(struct core_reloc_mods___mod_swap x) {}
| linux-master | tools/testing/selftests/bpf/progs/btf__core_reloc_mods___mod_swap.c |
#include "core_reloc_types.h"
void f(struct core_reloc_size___err_ambiguous1 x,
struct core_reloc_size___err_ambiguous2 y) {}
| linux-master | tools/testing/selftests/bpf/progs/btf__core_reloc_size___err_ambiguous.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) 2022 Linutronix GmbH */
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
char _license[] SEC("license") = "GPL";
SEC("tc")
int time_tai(struct __sk_buff *skb)
{
__u64 ts1, ts2;
/* Get TAI timestamps */
ts1 = bpf_ktime_get_tai_ns();
ts2 = bpf_ktime_get_tai_ns();
/* Save TAI timestamps (Note: skb->hwtstamp is read-only) */
skb->tstamp = ts1;
skb->cb[0] = ts2 & 0xffffffff;
skb->cb[1] = ts2 >> 32;
return 0;
}
| linux-master | tools/testing/selftests/bpf/progs/test_time_tai.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020 Facebook */
#include "bpf_iter.h"
#include <bpf/bpf_helpers.h>
char _license[] SEC("license") = "GPL";
__u32 map1_id = 0, map2_id = 0;
__u32 map1_accessed = 0, map2_accessed = 0;
__u64 map1_seqnum = 0, map2_seqnum1 = 0, map2_seqnum2 = 0;
volatile const __u32 print_len;
volatile const __u32 ret1;
SEC("iter/bpf_map")
int dump_bpf_map(struct bpf_iter__bpf_map *ctx)
{
struct seq_file *seq = ctx->meta->seq;
struct bpf_map *map = ctx->map;
__u64 seq_num;
int i, ret = 0;
if (map == (void *)0)
return 0;
/* only dump map1_id and map2_id */
if (map->id != map1_id && map->id != map2_id)
return 0;
seq_num = ctx->meta->seq_num;
if (map->id == map1_id) {
map1_seqnum = seq_num;
map1_accessed++;
}
if (map->id == map2_id) {
if (map2_accessed == 0) {
map2_seqnum1 = seq_num;
if (ret1)
ret = 1;
} else {
map2_seqnum2 = seq_num;
}
map2_accessed++;
}
/* fill seq_file buffer */
for (i = 0; i < print_len; i++)
bpf_seq_write(seq, &seq_num, sizeof(seq_num));
return ret;
}
| linux-master | tools/testing/selftests/bpf/progs/bpf_iter_test_kern4.c |
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2018 Facebook
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#ifndef PERF_MAX_STACK_DEPTH
#define PERF_MAX_STACK_DEPTH 127
#endif
struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
__uint(max_entries, 1);
__type(key, __u32);
__type(value, __u32);
} control_map SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_HASH);
__uint(max_entries, 16384);
__type(key, __u32);
__type(value, __u32);
} stackid_hmap SEC(".maps");
typedef struct bpf_stack_build_id stack_trace_t[PERF_MAX_STACK_DEPTH];
struct {
__uint(type, BPF_MAP_TYPE_STACK_TRACE);
__uint(max_entries, 128);
__uint(map_flags, BPF_F_STACK_BUILD_ID);
__type(key, __u32);
__type(value, stack_trace_t);
} stackmap SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
__uint(max_entries, 128);
__type(key, __u32);
__type(value, stack_trace_t);
} stack_amap SEC(".maps");
SEC("kprobe/urandom_read_iter")
int oncpu(struct pt_regs *args)
{
__u32 max_len = sizeof(struct bpf_stack_build_id)
* PERF_MAX_STACK_DEPTH;
__u32 key = 0, val = 0, *value_p;
void *stack_p;
value_p = bpf_map_lookup_elem(&control_map, &key);
if (value_p && *value_p)
return 0; /* skip if non-zero *value_p */
/* The size of stackmap and stackid_hmap should be the same */
key = bpf_get_stackid(args, &stackmap, BPF_F_USER_STACK);
if ((int)key >= 0) {
bpf_map_update_elem(&stackid_hmap, &key, &val, 0);
stack_p = bpf_map_lookup_elem(&stack_amap, &key);
if (stack_p)
bpf_get_stack(args, stack_p, max_len,
BPF_F_USER_STACK | BPF_F_USER_BUILD_ID);
}
return 0;
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/test_stacktrace_build_id.c |
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2019 Facebook
#include <stdint.h>
#include <string.h>
#include <linux/stddef.h>
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#ifndef ARRAY_SIZE
#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
#endif
/* tcp_mem sysctl has only 3 ints, but this test is doing TCP_MEM_LOOPS */
#define TCP_MEM_LOOPS 28 /* because 30 doesn't fit into 512 bytes of stack */
#define MAX_ULONG_STR_LEN 7
#define MAX_VALUE_STR_LEN (TCP_MEM_LOOPS * MAX_ULONG_STR_LEN)
const char tcp_mem_name[] = "net/ipv4/tcp_mem/very_very_very_very_long_pointless_string";
static __always_inline int is_tcp_mem(struct bpf_sysctl *ctx)
{
unsigned char i;
char name[sizeof(tcp_mem_name)];
int ret;
memset(name, 0, sizeof(name));
ret = bpf_sysctl_get_name(ctx, name, sizeof(name), 0);
if (ret < 0 || ret != sizeof(tcp_mem_name) - 1)
return 0;
#pragma clang loop unroll(disable)
for (i = 0; i < sizeof(tcp_mem_name); ++i)
if (name[i] != tcp_mem_name[i])
return 0;
return 1;
}
SEC("cgroup/sysctl")
int sysctl_tcp_mem(struct bpf_sysctl *ctx)
{
unsigned long tcp_mem[TCP_MEM_LOOPS] = {};
char value[MAX_VALUE_STR_LEN];
unsigned char i, off = 0;
/* a workaround to prevent compiler from generating
* codes verifier cannot handle yet.
*/
volatile int ret;
if (ctx->write)
return 0;
if (!is_tcp_mem(ctx))
return 0;
ret = bpf_sysctl_get_current_value(ctx, value, MAX_VALUE_STR_LEN);
if (ret < 0 || ret >= MAX_VALUE_STR_LEN)
return 0;
#pragma clang loop unroll(disable)
for (i = 0; i < ARRAY_SIZE(tcp_mem); ++i) {
ret = bpf_strtoul(value + off, MAX_ULONG_STR_LEN, 0,
tcp_mem + i);
if (ret <= 0 || ret > MAX_ULONG_STR_LEN)
return 0;
off += ret & MAX_ULONG_STR_LEN;
}
return tcp_mem[0] < tcp_mem[1] && tcp_mem[1] < tcp_mem[2];
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/test_sysctl_loop1.c |
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2019 Facebook
#include <linux/ptrace.h>
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
__type(key, int);
__type(value, int);
__uint(max_entries, 1);
} my_pid_map SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY);
__type(key, int);
__type(value, int);
} perf_buf_map SEC(".maps");
SEC("tp/raw_syscalls/sys_enter")
int handle_sys_enter(void *ctx)
{
int zero = 0, *my_pid, cur_pid;
int cpu = bpf_get_smp_processor_id();
my_pid = bpf_map_lookup_elem(&my_pid_map, &zero);
if (!my_pid)
return 1;
cur_pid = bpf_get_current_pid_tgid() >> 32;
if (cur_pid != *my_pid)
return 1;
bpf_perf_event_output(ctx, &perf_buf_map, BPF_F_CURRENT_CPU,
&cpu, sizeof(cpu));
return 1;
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/test_perf_buffer.c |
// SPDX-License-Identifier: GPL-2.0
/*
* This program is free software; you can redistribute it and/or
* modify it under the terms of version 2 of the GNU General Public
* License as published by the Free Software Foundation.
*/
#include <linux/bpf.h>
#include <linux/if_ether.h>
#include <bpf/bpf_helpers.h>
int _version SEC("version") = 1;
SEC("xdp.frags")
int xdp_adjust_frags(struct xdp_md *xdp)
{
__u8 *data_end = (void *)(long)xdp->data_end;
__u8 *data = (void *)(long)xdp->data;
__u8 val[16] = {};
__u32 offset;
int err;
if (data + sizeof(__u32) > data_end)
return XDP_DROP;
offset = *(__u32 *)data;
err = bpf_xdp_load_bytes(xdp, offset, val, sizeof(val));
if (err < 0)
return XDP_DROP;
if (val[0] != 0xaa || val[15] != 0xaa) /* marker */
return XDP_DROP;
val[0] = 0xbb; /* update the marker */
val[15] = 0xbb;
err = bpf_xdp_store_bytes(xdp, offset, val, sizeof(val));
if (err < 0)
return XDP_DROP;
return XDP_PASS;
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/test_xdp_update_frags.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include <linux/if_ether.h>
#include <linux/ip.h>
/* Dummy prog to test TC-BPF API */
SEC("tc")
int cls(struct __sk_buff *skb)
{
return 0;
}
/* Prog to verify tc-bpf without cap_sys_admin and cap_perfmon */
SEC("tcx/ingress")
int pkt_ptr(struct __sk_buff *skb)
{
struct iphdr *iph = (void *)(long)skb->data + sizeof(struct ethhdr);
if ((long)(iph + 1) > (long)skb->data_end)
return 1;
return 0;
}
| linux-master | tools/testing/selftests/bpf/progs/test_tc_bpf.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2021 Facebook */
#include "bpf_iter.h"
#include "bpf_tracing_net.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_endian.h>
#define bpf_tcp_sk(skc) ({ \
struct sock_common *_skc = skc; \
sk = NULL; \
tp = NULL; \
if (_skc) { \
tp = bpf_skc_to_tcp_sock(_skc); \
sk = (struct sock *)tp; \
} \
tp; \
})
unsigned short reuse_listen_hport = 0;
unsigned short listen_hport = 0;
char cubic_cc[TCP_CA_NAME_MAX] = "bpf_cubic";
char dctcp_cc[TCP_CA_NAME_MAX] = "bpf_dctcp";
bool random_retry = false;
static bool tcp_cc_eq(const char *a, const char *b)
{
int i;
for (i = 0; i < TCP_CA_NAME_MAX; i++) {
if (a[i] != b[i])
return false;
if (!a[i])
break;
}
return true;
}
SEC("iter/tcp")
int change_tcp_cc(struct bpf_iter__tcp *ctx)
{
char cur_cc[TCP_CA_NAME_MAX];
struct tcp_sock *tp;
struct sock *sk;
if (!bpf_tcp_sk(ctx->sk_common))
return 0;
if (sk->sk_family != AF_INET6 ||
(sk->sk_state != TCP_LISTEN &&
sk->sk_state != TCP_ESTABLISHED) ||
(sk->sk_num != reuse_listen_hport &&
sk->sk_num != listen_hport &&
bpf_ntohs(sk->sk_dport) != listen_hport))
return 0;
if (bpf_getsockopt(tp, SOL_TCP, TCP_CONGESTION,
cur_cc, sizeof(cur_cc)))
return 0;
if (!tcp_cc_eq(cur_cc, cubic_cc))
return 0;
if (random_retry && bpf_get_prandom_u32() % 4 == 1)
return 1;
bpf_setsockopt(tp, SOL_TCP, TCP_CONGESTION, dctcp_cc, sizeof(dctcp_cc));
return 0;
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/bpf_iter_setsockopt.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2019 Facebook */
#include <linux/bpf.h>
#include <netinet/in.h>
#include <stdbool.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_endian.h>
#include "bpf_tcp_helpers.h"
enum bpf_linum_array_idx {
EGRESS_LINUM_IDX,
INGRESS_LINUM_IDX,
READ_SK_DST_PORT_LINUM_IDX,
__NR_BPF_LINUM_ARRAY_IDX,
};
struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
__uint(max_entries, __NR_BPF_LINUM_ARRAY_IDX);
__type(key, __u32);
__type(value, __u32);
} linum_map SEC(".maps");
struct bpf_spinlock_cnt {
struct bpf_spin_lock lock;
__u32 cnt;
};
struct {
__uint(type, BPF_MAP_TYPE_SK_STORAGE);
__uint(map_flags, BPF_F_NO_PREALLOC);
__type(key, int);
__type(value, struct bpf_spinlock_cnt);
} sk_pkt_out_cnt SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_SK_STORAGE);
__uint(map_flags, BPF_F_NO_PREALLOC);
__type(key, int);
__type(value, struct bpf_spinlock_cnt);
} sk_pkt_out_cnt10 SEC(".maps");
struct bpf_tcp_sock listen_tp = {};
struct sockaddr_in6 srv_sa6 = {};
struct bpf_tcp_sock cli_tp = {};
struct bpf_tcp_sock srv_tp = {};
struct bpf_sock listen_sk = {};
struct bpf_sock srv_sk = {};
struct bpf_sock cli_sk = {};
__u64 parent_cg_id = 0;
__u64 child_cg_id = 0;
__u64 lsndtime = 0;
static bool is_loopback6(__u32 *a6)
{
return !a6[0] && !a6[1] && !a6[2] && a6[3] == bpf_htonl(1);
}
static void skcpy(struct bpf_sock *dst,
const struct bpf_sock *src)
{
dst->bound_dev_if = src->bound_dev_if;
dst->family = src->family;
dst->type = src->type;
dst->protocol = src->protocol;
dst->mark = src->mark;
dst->priority = src->priority;
dst->src_ip4 = src->src_ip4;
dst->src_ip6[0] = src->src_ip6[0];
dst->src_ip6[1] = src->src_ip6[1];
dst->src_ip6[2] = src->src_ip6[2];
dst->src_ip6[3] = src->src_ip6[3];
dst->src_port = src->src_port;
dst->dst_ip4 = src->dst_ip4;
dst->dst_ip6[0] = src->dst_ip6[0];
dst->dst_ip6[1] = src->dst_ip6[1];
dst->dst_ip6[2] = src->dst_ip6[2];
dst->dst_ip6[3] = src->dst_ip6[3];
dst->dst_port = src->dst_port;
dst->state = src->state;
}
static void tpcpy(struct bpf_tcp_sock *dst,
const struct bpf_tcp_sock *src)
{
dst->snd_cwnd = src->snd_cwnd;
dst->srtt_us = src->srtt_us;
dst->rtt_min = src->rtt_min;
dst->snd_ssthresh = src->snd_ssthresh;
dst->rcv_nxt = src->rcv_nxt;
dst->snd_nxt = src->snd_nxt;
dst->snd_una = src->snd_una;
dst->mss_cache = src->mss_cache;
dst->ecn_flags = src->ecn_flags;
dst->rate_delivered = src->rate_delivered;
dst->rate_interval_us = src->rate_interval_us;
dst->packets_out = src->packets_out;
dst->retrans_out = src->retrans_out;
dst->total_retrans = src->total_retrans;
dst->segs_in = src->segs_in;
dst->data_segs_in = src->data_segs_in;
dst->segs_out = src->segs_out;
dst->data_segs_out = src->data_segs_out;
dst->lost_out = src->lost_out;
dst->sacked_out = src->sacked_out;
dst->bytes_received = src->bytes_received;
dst->bytes_acked = src->bytes_acked;
}
/* Always return CG_OK so that no pkt will be filtered out */
#define CG_OK 1
#define RET_LOG() ({ \
linum = __LINE__; \
bpf_map_update_elem(&linum_map, &linum_idx, &linum, BPF_ANY); \
return CG_OK; \
})
SEC("cgroup_skb/egress")
int egress_read_sock_fields(struct __sk_buff *skb)
{
struct bpf_spinlock_cnt cli_cnt_init = { .lock = {}, .cnt = 0xeB9F };
struct bpf_spinlock_cnt *pkt_out_cnt, *pkt_out_cnt10;
struct bpf_tcp_sock *tp, *tp_ret;
struct bpf_sock *sk, *sk_ret;
__u32 linum, linum_idx;
struct tcp_sock *ktp;
linum_idx = EGRESS_LINUM_IDX;
sk = skb->sk;
if (!sk)
RET_LOG();
/* Not testing the egress traffic or the listening socket,
* which are covered by the cgroup_skb/ingress test program.
*/
if (sk->family != AF_INET6 || !is_loopback6(sk->src_ip6) ||
sk->state == BPF_TCP_LISTEN)
return CG_OK;
if (sk->src_port == bpf_ntohs(srv_sa6.sin6_port)) {
/* Server socket */
sk_ret = &srv_sk;
tp_ret = &srv_tp;
} else if (sk->dst_port == srv_sa6.sin6_port) {
/* Client socket */
sk_ret = &cli_sk;
tp_ret = &cli_tp;
} else {
/* Not the testing egress traffic */
return CG_OK;
}
/* It must be a fullsock for cgroup_skb/egress prog */
sk = bpf_sk_fullsock(sk);
if (!sk)
RET_LOG();
/* Not the testing egress traffic */
if (sk->protocol != IPPROTO_TCP)
return CG_OK;
tp = bpf_tcp_sock(sk);
if (!tp)
RET_LOG();
skcpy(sk_ret, sk);
tpcpy(tp_ret, tp);
if (sk_ret == &srv_sk) {
ktp = bpf_skc_to_tcp_sock(sk);
if (!ktp)
RET_LOG();
lsndtime = ktp->lsndtime;
child_cg_id = bpf_sk_cgroup_id(ktp);
if (!child_cg_id)
RET_LOG();
parent_cg_id = bpf_sk_ancestor_cgroup_id(ktp, 2);
if (!parent_cg_id)
RET_LOG();
/* The userspace has created it for srv sk */
pkt_out_cnt = bpf_sk_storage_get(&sk_pkt_out_cnt, ktp, 0, 0);
pkt_out_cnt10 = bpf_sk_storage_get(&sk_pkt_out_cnt10, ktp,
0, 0);
} else {
pkt_out_cnt = bpf_sk_storage_get(&sk_pkt_out_cnt, sk,
&cli_cnt_init,
BPF_SK_STORAGE_GET_F_CREATE);
pkt_out_cnt10 = bpf_sk_storage_get(&sk_pkt_out_cnt10,
sk, &cli_cnt_init,
BPF_SK_STORAGE_GET_F_CREATE);
}
if (!pkt_out_cnt || !pkt_out_cnt10)
RET_LOG();
/* Even both cnt and cnt10 have lock defined in their BTF,
* intentionally one cnt takes lock while one does not
* as a test for the spinlock support in BPF_MAP_TYPE_SK_STORAGE.
*/
pkt_out_cnt->cnt += 1;
bpf_spin_lock(&pkt_out_cnt10->lock);
pkt_out_cnt10->cnt += 10;
bpf_spin_unlock(&pkt_out_cnt10->lock);
return CG_OK;
}
SEC("cgroup_skb/ingress")
int ingress_read_sock_fields(struct __sk_buff *skb)
{
struct bpf_tcp_sock *tp;
__u32 linum, linum_idx;
struct bpf_sock *sk;
linum_idx = INGRESS_LINUM_IDX;
sk = skb->sk;
if (!sk)
RET_LOG();
/* Not the testing ingress traffic to the server */
if (sk->family != AF_INET6 || !is_loopback6(sk->src_ip6) ||
sk->src_port != bpf_ntohs(srv_sa6.sin6_port))
return CG_OK;
/* Only interested in the listening socket */
if (sk->state != BPF_TCP_LISTEN)
return CG_OK;
/* It must be a fullsock for cgroup_skb/ingress prog */
sk = bpf_sk_fullsock(sk);
if (!sk)
RET_LOG();
tp = bpf_tcp_sock(sk);
if (!tp)
RET_LOG();
skcpy(&listen_sk, sk);
tpcpy(&listen_tp, tp);
return CG_OK;
}
/*
* NOTE: 4-byte load from bpf_sock at dst_port offset is quirky. It
* gets rewritten by the access converter to a 2-byte load for
* backward compatibility. Treating the load result as a be16 value
* makes the code portable across little- and big-endian platforms.
*/
static __noinline bool sk_dst_port__load_word(struct bpf_sock *sk)
{
__u32 *word = (__u32 *)&sk->dst_port;
return word[0] == bpf_htons(0xcafe);
}
static __noinline bool sk_dst_port__load_half(struct bpf_sock *sk)
{
__u16 *half;
asm volatile ("");
half = (__u16 *)&sk->dst_port;
return half[0] == bpf_htons(0xcafe);
}
static __noinline bool sk_dst_port__load_byte(struct bpf_sock *sk)
{
__u8 *byte = (__u8 *)&sk->dst_port;
return byte[0] == 0xca && byte[1] == 0xfe;
}
SEC("cgroup_skb/egress")
int read_sk_dst_port(struct __sk_buff *skb)
{
__u32 linum, linum_idx;
struct bpf_sock *sk;
linum_idx = READ_SK_DST_PORT_LINUM_IDX;
sk = skb->sk;
if (!sk)
RET_LOG();
/* Ignore everything but the SYN from the client socket */
if (sk->state != BPF_TCP_SYN_SENT)
return CG_OK;
if (!sk_dst_port__load_word(sk))
RET_LOG();
if (!sk_dst_port__load_half(sk))
RET_LOG();
if (!sk_dst_port__load_byte(sk))
RET_LOG();
return CG_OK;
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/test_sock_fields.c |
// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
/*
* BTF-to-C dumper test validating no name versioning happens between
* independent C namespaces (struct/union/enum vs typedef/enum values).
*
* Copyright (c) 2019 Facebook
*/
/* ----- START-EXPECTED-OUTPUT ----- */
struct S {
int S;
int U;
};
typedef struct S S;
union U {
int S;
int U;
};
typedef union U U;
enum E {
V = 0,
};
typedef enum E E;
struct A {};
union B {};
enum C {
A = 1,
B = 2,
C = 3,
};
struct X {};
union Y {};
enum Z;
typedef int X;
typedef int Y;
typedef int Z;
/*------ END-EXPECTED-OUTPUT ------ */
int f(struct {
struct S _1;
S _2;
union U _3;
U _4;
enum E _5;
E _6;
struct A a;
union B b;
enum C c;
struct X x;
union Y y;
enum Z *z;
X xx;
Y yy;
Z zz;
} *_)
{
return 0;
}
| linux-master | tools/testing/selftests/bpf/progs/btf_dump_test_case_namespacing.c |
// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
struct S {
int a;
int b;
int c;
};
union U {
int a;
int b;
int c;
};
struct S1 {
int a;
int b;
int c;
};
union U1 {
int a;
int b;
int c;
};
typedef int T;
typedef int S;
typedef int U;
typedef int T1;
typedef int S1;
typedef int U1;
struct root_struct {
S m_1;
T m_2;
U m_3;
S1 m_4;
T1 m_5;
U1 m_6;
struct S m_7;
struct S1 m_8;
union U m_9;
union U1 m_10;
};
int func(struct root_struct *root)
{
return 0;
}
| linux-master | tools/testing/selftests/bpf/progs/btf_data.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2020 Google LLC.
*/
#include <errno.h>
#include <linux/bpf.h>
#include <linux/ip.h>
#include <linux/udp.h>
#include <bpf/bpf_helpers.h>
#include "progs/cg_storage_multi.h"
struct {
__uint(type, BPF_MAP_TYPE_CGROUP_STORAGE);
__type(key, struct bpf_cgroup_storage_key);
__type(value, struct cgroup_value);
} cgroup_storage SEC(".maps");
__u32 invocations = 0;
SEC("cgroup_skb/egress")
int egress1(struct __sk_buff *skb)
{
struct cgroup_value *ptr_cg_storage =
bpf_get_local_storage(&cgroup_storage, 0);
__sync_fetch_and_add(&ptr_cg_storage->egress_pkts, 1);
__sync_fetch_and_add(&invocations, 1);
return 1;
}
SEC("cgroup_skb/egress")
int egress2(struct __sk_buff *skb)
{
struct cgroup_value *ptr_cg_storage =
bpf_get_local_storage(&cgroup_storage, 0);
__sync_fetch_and_add(&ptr_cg_storage->egress_pkts, 1);
__sync_fetch_and_add(&invocations, 1);
return 1;
}
SEC("cgroup_skb/ingress")
int ingress(struct __sk_buff *skb)
{
struct cgroup_value *ptr_cg_storage =
bpf_get_local_storage(&cgroup_storage, 0);
__sync_fetch_and_add(&ptr_cg_storage->ingress_pkts, 1);
__sync_fetch_and_add(&invocations, 1);
return 1;
}
| linux-master | tools/testing/selftests/bpf/progs/cg_storage_multi_isolated.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_core_read.h>
struct task_struct___bad {
int pid;
int fake_field;
void *fake_field_subprog;
} __attribute__((preserve_access_index));
SEC("?raw_tp/sys_enter")
int bad_relo(const void *ctx)
{
static struct task_struct___bad *t;
return bpf_core_field_size(t->fake_field);
}
static __noinline int bad_subprog(void)
{
static struct task_struct___bad *t;
/* ugliness below is a field offset relocation */
return (void *)&t->fake_field_subprog - (void *)t;
}
SEC("?raw_tp/sys_enter")
int bad_relo_subprog(const void *ctx)
{
static struct task_struct___bad *t;
return bad_subprog() + bpf_core_field_size(t->pid);
}
struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
__uint(max_entries, 1);
__type(key, int);
__type(value, int);
} existing_map SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
__uint(max_entries, 1);
__type(key, int);
__type(value, int);
} missing_map SEC(".maps");
SEC("?raw_tp/sys_enter")
int use_missing_map(const void *ctx)
{
int zero = 0, *value;
value = bpf_map_lookup_elem(&existing_map, &zero);
value = bpf_map_lookup_elem(&missing_map, &zero);
return value != NULL;
}
extern int bpf_nonexistent_kfunc(void) __ksym __weak;
SEC("?raw_tp/sys_enter")
int use_missing_kfunc(const void *ctx)
{
bpf_nonexistent_kfunc();
return 0;
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/test_log_fixup.c |
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2020 Facebook */
#include <stddef.h>
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include "bpf_misc.h"
__noinline int foo(struct __sk_buff *skb)
{
return bpf_get_prandom_u32();
}
SEC("cgroup_skb/ingress")
__success
int global_func8(struct __sk_buff *skb)
{
if (!foo(skb))
return 0;
return 1;
}
| linux-master | tools/testing/selftests/bpf/progs/test_global_func8.c |
// SPDX-License-Identifier: GPL-2.0
#include <stdint.h>
#include <stdbool.h>
#include <linux/bpf.h>
#include <linux/stddef.h>
#include <linux/pkt_cls.h>
#include <linux/if_ether.h>
#include <linux/ip.h>
#include <bpf/bpf_helpers.h>
volatile const __u32 IFINDEX_SRC;
volatile const __u32 IFINDEX_DST;
static const __u8 src_mac[] = {0x00, 0x11, 0x22, 0x33, 0x44, 0x55};
static const __u8 dst_mac[] = {0x00, 0x22, 0x33, 0x44, 0x55, 0x66};
SEC("tc")
int tc_chk(struct __sk_buff *skb)
{
return TC_ACT_SHOT;
}
SEC("tc")
int tc_dst(struct __sk_buff *skb)
{
return bpf_redirect_peer(IFINDEX_SRC, 0);
}
SEC("tc")
int tc_src(struct __sk_buff *skb)
{
return bpf_redirect_peer(IFINDEX_DST, 0);
}
SEC("tc")
int tc_dst_l3(struct __sk_buff *skb)
{
return bpf_redirect(IFINDEX_SRC, 0);
}
SEC("tc")
int tc_src_l3(struct __sk_buff *skb)
{
__u16 proto = skb->protocol;
if (bpf_skb_change_head(skb, ETH_HLEN, 0) != 0)
return TC_ACT_SHOT;
if (bpf_skb_store_bytes(skb, 0, &src_mac, ETH_ALEN, 0) != 0)
return TC_ACT_SHOT;
if (bpf_skb_store_bytes(skb, ETH_ALEN, &dst_mac, ETH_ALEN, 0) != 0)
return TC_ACT_SHOT;
if (bpf_skb_store_bytes(skb, ETH_ALEN + ETH_ALEN, &proto, sizeof(__u16), 0) != 0)
return TC_ACT_SHOT;
return bpf_redirect_peer(IFINDEX_DST, 0);
}
char __license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/test_tc_peer.c |
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2017 Facebook
#include <vmlinux.h>
#include <bpf/bpf_helpers.h>
/* taken from /sys/kernel/tracing/events/sched/sched_switch/format */
struct sched_switch_args {
unsigned long long pad;
char prev_comm[TASK_COMM_LEN];
int prev_pid;
int prev_prio;
long long prev_state;
char next_comm[TASK_COMM_LEN];
int next_pid;
int next_prio;
};
SEC("tracepoint/sched/sched_switch")
int oncpu(struct sched_switch_args *ctx)
{
return 0;
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/test_tracepoint.c |
#include "core_reloc_types.h"
void f(struct core_reloc_arrays___err_too_shallow x) {}
| linux-master | tools/testing/selftests/bpf/progs/btf__core_reloc_arrays___err_too_shallow.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2019 Facebook */
#include <linux/stddef.h>
#include <linux/if_ether.h>
#include <linux/ipv6.h>
#include <linux/bpf.h>
#include <linux/tcp.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_endian.h>
#include <bpf/bpf_tracing.h>
struct sk_buff {
unsigned int len;
};
__u64 test_result = 0;
SEC("fexit/test_pkt_access")
int BPF_PROG(test_main, struct sk_buff *skb, int ret)
{
int len;
__builtin_preserve_access_index(({
len = skb->len;
}));
if (len != 74 || ret != 0)
return 0;
test_result = 1;
return 0;
}
__u64 test_result_subprog1 = 0;
SEC("fexit/test_pkt_access_subprog1")
int BPF_PROG(test_subprog1, struct sk_buff *skb, int ret)
{
int len;
__builtin_preserve_access_index(({
len = skb->len;
}));
if (len != 74 || ret != 148)
return 0;
test_result_subprog1 = 1;
return 0;
}
/* Though test_pkt_access_subprog2() is defined in C as:
* static __attribute__ ((noinline))
* int test_pkt_access_subprog2(int val, volatile struct __sk_buff *skb)
* {
* return skb->len * val;
* }
* llvm optimizations remove 'int val' argument and generate BPF assembly:
* r0 = *(u32 *)(r1 + 0)
* w0 <<= 1
* exit
* In such case the verifier falls back to conservative and
* tracing program can access arguments and return value as u64
* instead of accurate types.
*/
struct args_subprog2 {
__u64 args[5];
__u64 ret;
};
__u64 test_result_subprog2 = 0;
SEC("fexit/test_pkt_access_subprog2")
int test_subprog2(struct args_subprog2 *ctx)
{
struct sk_buff *skb = (void *)ctx->args[0];
__u64 ret;
int len;
bpf_probe_read_kernel(&len, sizeof(len),
__builtin_preserve_access_index(&skb->len));
ret = ctx->ret;
/* bpf_prog_test_load() loads "test_pkt_access.bpf.o" with
* BPF_F_TEST_RND_HI32 which randomizes upper 32 bits after BPF_ALU32
* insns. Hence after 'w0 <<= 1' upper bits of $rax are random. That is
* expected and correct. Trim them.
*/
ret = (__u32) ret;
if (len != 74 || ret != 148)
return 0;
test_result_subprog2 = 1;
return 0;
}
__u64 test_result_subprog3 = 0;
SEC("fexit/test_pkt_access_subprog3")
int BPF_PROG(test_subprog3, int val, struct sk_buff *skb, int ret)
{
int len;
__builtin_preserve_access_index(({
len = skb->len;
}));
if (len != 74 || ret != 74 * val || val != 3)
return 0;
test_result_subprog3 = 1;
return 0;
}
__u64 test_get_skb_len = 0;
SEC("freplace/get_skb_len")
int new_get_skb_len(struct __sk_buff *skb)
{
int len = skb->len;
if (len != 74)
return 0;
test_get_skb_len = 1;
return 74; /* original get_skb_len() returns skb->len */
}
__u64 test_get_skb_ifindex = 0;
SEC("freplace/get_skb_ifindex")
int new_get_skb_ifindex(int val, struct __sk_buff *skb, int var)
{
void *data_end = (void *)(long)skb->data_end;
void *data = (void *)(long)skb->data;
struct ipv6hdr ip6, *ip6p;
int ifindex = skb->ifindex;
/* check that BPF extension can read packet via direct packet access */
if (data + 14 + sizeof(ip6) > data_end)
return 0;
ip6p = data + 14;
if (ip6p->nexthdr != 6 || ip6p->payload_len != __bpf_constant_htons(123))
return 0;
/* check that legacy packet access helper works too */
if (bpf_skb_load_bytes(skb, 14, &ip6, sizeof(ip6)) < 0)
return 0;
ip6p = &ip6;
if (ip6p->nexthdr != 6 || ip6p->payload_len != __bpf_constant_htons(123))
return 0;
if (ifindex != 1 || val != 3 || var != 1)
return 0;
test_get_skb_ifindex = 1;
return 3; /* original get_skb_ifindex() returns val * ifindex * var */
}
volatile __u64 test_get_constant = 0;
SEC("freplace/get_constant")
int new_get_constant(long val)
{
if (val != 123)
return 0;
test_get_constant = 1;
return test_get_constant; /* original get_constant() returns val - 122 */
}
__u64 test_pkt_write_access_subprog = 0;
SEC("freplace/test_pkt_write_access_subprog")
int new_test_pkt_write_access_subprog(struct __sk_buff *skb, __u32 off)
{
void *data = (void *)(long)skb->data;
void *data_end = (void *)(long)skb->data_end;
struct tcphdr *tcp;
if (off > sizeof(struct ethhdr) + sizeof(struct ipv6hdr))
return -1;
tcp = data + off;
if (tcp + 1 > data_end)
return -1;
/* make modifications to the packet data */
tcp->check++;
tcp->syn = 0;
test_pkt_write_access_subprog = 1;
return 0;
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/fexit_bpf2bpf.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/bpf.h>
#include <bpf/bpf_tracing.h>
#include <bpf/bpf_helpers.h>
char _license[] SEC("license") = "GPL";
struct net_device {
/* Structure does not need to contain all entries,
* as "preserve_access_index" will use BTF to fix this...
*/
int ifindex;
} __attribute__((preserve_access_index));
struct xdp_rxq_info {
/* Structure does not need to contain all entries,
* as "preserve_access_index" will use BTF to fix this...
*/
struct net_device *dev;
__u32 queue_index;
} __attribute__((preserve_access_index));
struct xdp_buff {
void *data;
void *data_end;
void *data_meta;
void *data_hard_start;
unsigned long handle;
struct xdp_rxq_info *rxq;
} __attribute__((preserve_access_index));
struct meta {
int ifindex;
int pkt_len;
};
struct {
__uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY);
__type(key, int);
__type(value, int);
} perf_buf_map SEC(".maps");
__u64 test_result_fentry = 0;
SEC("fentry/FUNC")
int BPF_PROG(trace_on_entry, struct xdp_buff *xdp)
{
struct meta meta;
meta.ifindex = xdp->rxq->dev->ifindex;
meta.pkt_len = bpf_xdp_get_buff_len((struct xdp_md *)xdp);
bpf_xdp_output(xdp, &perf_buf_map,
((__u64) meta.pkt_len << 32) |
BPF_F_CURRENT_CPU,
&meta, sizeof(meta));
test_result_fentry = xdp->rxq->dev->ifindex;
return 0;
}
__u64 test_result_fexit = 0;
SEC("fexit/FUNC")
int BPF_PROG(trace_on_exit, struct xdp_buff *xdp, int ret)
{
test_result_fexit = ret;
return 0;
}
| linux-master | tools/testing/selftests/bpf/progs/test_xdp_bpf2bpf.c |
// SPDX-License-Identifier: GPL-2.0-only
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
volatile const char bpf_metadata_a[] SEC(".rodata") = "foo";
volatile const int bpf_metadata_b SEC(".rodata") = 1;
SEC("cgroup_skb/egress")
int prog(struct xdp_md *ctx)
{
return 0;
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/metadata_unused.c |
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2018 Politecnico di Torino
#define MAP_TYPE BPF_MAP_TYPE_STACK
#include "test_queue_stack_map.h"
| linux-master | tools/testing/selftests/bpf/progs/test_stack_map.c |
// SPDX-License-Identifier: GPL-2.0
/* Converted from tools/testing/selftests/bpf/verifier/map_in_map.c */
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include "bpf_misc.h"
struct {
__uint(type, BPF_MAP_TYPE_ARRAY_OF_MAPS);
__uint(max_entries, 1);
__type(key, int);
__type(value, int);
__array(values, struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
__uint(max_entries, 1);
__type(key, int);
__type(value, int);
});
} map_in_map SEC(".maps");
SEC("socket")
__description("map in map access")
__success __success_unpriv __retval(0)
__naked void map_in_map_access(void)
{
asm volatile (" \
r1 = 0; \
*(u32*)(r10 - 4) = r1; \
r2 = r10; \
r2 += -4; \
r1 = %[map_in_map] ll; \
call %[bpf_map_lookup_elem]; \
if r0 == 0 goto l0_%=; \
r1 = 0; \
*(u32*)(r10 - 4) = r1; \
r2 = r10; \
r2 += -4; \
r1 = r0; \
call %[bpf_map_lookup_elem]; \
l0_%=: r0 = 0; \
exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm_addr(map_in_map)
: __clobber_all);
}
SEC("xdp")
__description("map in map state pruning")
__success __msg("processed 26 insns")
__log_level(2) __retval(0) __flag(BPF_F_TEST_STATE_FREQ)
__naked void map_in_map_state_pruning(void)
{
asm volatile (" \
r1 = 0; \
*(u32*)(r10 - 4) = r1; \
r6 = r10; \
r6 += -4; \
r2 = r6; \
r1 = %[map_in_map] ll; \
call %[bpf_map_lookup_elem]; \
if r0 != 0 goto l0_%=; \
exit; \
l0_%=: r2 = r6; \
r1 = r0; \
call %[bpf_map_lookup_elem]; \
if r0 != 0 goto l1_%=; \
r2 = r6; \
r1 = %[map_in_map] ll; \
call %[bpf_map_lookup_elem]; \
if r0 != 0 goto l2_%=; \
exit; \
l2_%=: r2 = r6; \
r1 = r0; \
call %[bpf_map_lookup_elem]; \
if r0 != 0 goto l1_%=; \
exit; \
l1_%=: r0 = *(u32*)(r0 + 0); \
exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm_addr(map_in_map)
: __clobber_all);
}
SEC("socket")
__description("invalid inner map pointer")
__failure __msg("R1 pointer arithmetic on map_ptr prohibited")
__failure_unpriv
__naked void invalid_inner_map_pointer(void)
{
asm volatile (" \
r1 = 0; \
*(u32*)(r10 - 4) = r1; \
r2 = r10; \
r2 += -4; \
r1 = %[map_in_map] ll; \
call %[bpf_map_lookup_elem]; \
if r0 == 0 goto l0_%=; \
r1 = 0; \
*(u32*)(r10 - 4) = r1; \
r2 = r10; \
r2 += -4; \
r1 = r0; \
r1 += 8; \
call %[bpf_map_lookup_elem]; \
l0_%=: r0 = 0; \
exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm_addr(map_in_map)
: __clobber_all);
}
SEC("socket")
__description("forgot null checking on the inner map pointer")
__failure __msg("R1 type=map_value_or_null expected=map_ptr")
__failure_unpriv
__naked void on_the_inner_map_pointer(void)
{
asm volatile (" \
r1 = 0; \
*(u32*)(r10 - 4) = r1; \
r2 = r10; \
r2 += -4; \
r1 = %[map_in_map] ll; \
call %[bpf_map_lookup_elem]; \
r1 = 0; \
*(u32*)(r10 - 4) = r1; \
r2 = r10; \
r2 += -4; \
r1 = r0; \
call %[bpf_map_lookup_elem]; \
r0 = 0; \
exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm_addr(map_in_map)
: __clobber_all);
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/verifier_map_in_map.c |
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2020 Facebook */
#include <stddef.h>
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include "bpf_misc.h"
#define MAX_STACK (512 - 3 * 32)
static __attribute__ ((noinline))
int f0(int var, struct __sk_buff *skb)
{
return skb->len;
}
__attribute__ ((noinline))
int f1(struct __sk_buff *skb)
{
volatile char buf[MAX_STACK] = {};
__sink(buf[MAX_STACK - 1]);
return f0(0, skb) + skb->len;
}
int f3(int, struct __sk_buff *skb, int);
__attribute__ ((noinline))
int f2(int val, struct __sk_buff *skb)
{
return f1(skb) + f3(val, skb, 1);
}
__attribute__ ((noinline))
int f3(int val, struct __sk_buff *skb, int var)
{
volatile char buf[MAX_STACK] = {};
__sink(buf[MAX_STACK - 1]);
return skb->ifindex * val * var;
}
SEC("tc")
__success
int global_func2(struct __sk_buff *skb)
{
return f0(1, skb) + f1(skb) + f2(2, skb) + f3(3, skb, 4);
}
| linux-master | tools/testing/selftests/bpf/progs/test_global_func2.c |
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2019 Facebook
#include <stdint.h>
#include <string.h>
#include <linux/stddef.h>
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
/* Max supported length of a string with unsigned long in base 10 (pow2 - 1). */
#define MAX_ULONG_STR_LEN 0xF
/* Max supported length of sysctl value string (pow2). */
#define MAX_VALUE_STR_LEN 0x40
#ifndef ARRAY_SIZE
#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
#endif
const char tcp_mem_name[] = "net/ipv4/tcp_mem";
static __always_inline int is_tcp_mem(struct bpf_sysctl *ctx)
{
unsigned char i;
char name[sizeof(tcp_mem_name)];
int ret;
memset(name, 0, sizeof(name));
ret = bpf_sysctl_get_name(ctx, name, sizeof(name), 0);
if (ret < 0 || ret != sizeof(tcp_mem_name) - 1)
return 0;
#pragma clang loop unroll(full)
for (i = 0; i < sizeof(tcp_mem_name); ++i)
if (name[i] != tcp_mem_name[i])
return 0;
return 1;
}
SEC("cgroup/sysctl")
int sysctl_tcp_mem(struct bpf_sysctl *ctx)
{
unsigned long tcp_mem[3] = {0, 0, 0};
char value[MAX_VALUE_STR_LEN];
unsigned char i, off = 0;
volatile int ret;
if (ctx->write)
return 0;
if (!is_tcp_mem(ctx))
return 0;
ret = bpf_sysctl_get_current_value(ctx, value, MAX_VALUE_STR_LEN);
if (ret < 0 || ret >= MAX_VALUE_STR_LEN)
return 0;
#pragma clang loop unroll(full)
for (i = 0; i < ARRAY_SIZE(tcp_mem); ++i) {
ret = bpf_strtoul(value + off, MAX_ULONG_STR_LEN, 0,
tcp_mem + i);
if (ret <= 0 || ret > MAX_ULONG_STR_LEN)
return 0;
off += ret & MAX_ULONG_STR_LEN;
}
return tcp_mem[0] < tcp_mem[1] && tcp_mem[1] < tcp_mem[2];
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/test_sysctl_prog.c |
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2019 Facebook
#include <linux/bpf.h>
#include <stdbool.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_endian.h>
#include <bpf/bpf_tracing.h>
__u64 ext_called = 0;
SEC("freplace/test_pkt_md_access")
int test_pkt_md_access_new(struct __sk_buff *skb)
{
ext_called = skb->len;
return 0;
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/test_trace_ext.c |
#include "core_reloc_types.h"
void f(struct core_reloc_primitives___diff_enum_def x) {}
| linux-master | tools/testing/selftests/bpf/progs/btf__core_reloc_primitives___diff_enum_def.c |
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2019 Facebook
#include <linux/bpf.h>
#include <stdint.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_core_read.h>
char _license[] SEC("license") = "GPL";
struct {
char in[256];
char out[256];
} data = {};
struct core_reloc_misc_output {
int a, b, c;
};
struct core_reloc_misc___a {
int a1;
int a2;
};
struct core_reloc_misc___b {
int b1;
int b2;
};
/* fixed two first members, can be extended with new fields */
struct core_reloc_misc_extensible {
int a;
int b;
};
#define CORE_READ(dst, src) bpf_core_read(dst, sizeof(*(dst)), src)
SEC("raw_tracepoint/sys_enter")
int test_core_misc(void *ctx)
{
struct core_reloc_misc___a *in_a = (void *)&data.in;
struct core_reloc_misc___b *in_b = (void *)&data.in;
struct core_reloc_misc_extensible *in_ext = (void *)&data.in;
struct core_reloc_misc_output *out = (void *)&data.out;
/* record two different relocations with the same accessor string */
if (CORE_READ(&out->a, &in_a->a1) || /* accessor: 0:0 */
CORE_READ(&out->b, &in_b->b1)) /* accessor: 0:0 */
return 1;
/* Validate relocations capture array-only accesses for structs with
* fixed header, but with potentially extendable tail. This will read
* first 4 bytes of 2nd element of in_ext array of potentially
* variably sized struct core_reloc_misc_extensible. */
if (CORE_READ(&out->c, &in_ext[2])) /* accessor: 2 */
return 1;
return 0;
}
| linux-master | tools/testing/selftests/bpf/progs/test_core_reloc_misc.c |
// SPDX-License-Identifier: GPL-2.0
/* Converted from tools/testing/selftests/bpf/verifier/xdp.c */
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include "bpf_misc.h"
SEC("xdp")
__description("XDP, using ifindex from netdev")
__success __retval(1)
__naked void xdp_using_ifindex_from_netdev(void)
{
asm volatile (" \
r0 = 0; \
r2 = *(u32*)(r1 + %[xdp_md_ingress_ifindex]); \
if r2 < 1 goto l0_%=; \
r0 = 1; \
l0_%=: exit; \
" :
: __imm_const(xdp_md_ingress_ifindex, offsetof(struct xdp_md, ingress_ifindex))
: __clobber_all);
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/verifier_xdp.c |
#include "core_reloc_types.h"
void f(struct core_reloc_primitives___err_non_int x) {}
| linux-master | tools/testing/selftests/bpf/progs/btf__core_reloc_primitives___err_non_int.c |
// SPDX-License-Identifier: GPL-2.0
#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
char _license[] SEC("license") = "GPL";
#define USEC_PER_SEC 1000000UL
#define min(a, b) ((a) < (b) ? (a) : (b))
static inline struct tcp_sock *tcp_sk(const struct sock *sk)
{
return (struct tcp_sock *)sk;
}
static inline unsigned int tcp_left_out(const struct tcp_sock *tp)
{
return tp->sacked_out + tp->lost_out;
}
static inline unsigned int tcp_packets_in_flight(const struct tcp_sock *tp)
{
return tp->packets_out - tcp_left_out(tp) + tp->retrans_out;
}
SEC("struct_ops/write_sk_pacing_init")
void BPF_PROG(write_sk_pacing_init, struct sock *sk)
{
#ifdef ENABLE_ATOMICS_TESTS
__sync_bool_compare_and_swap(&sk->sk_pacing_status, SK_PACING_NONE,
SK_PACING_NEEDED);
#else
sk->sk_pacing_status = SK_PACING_NEEDED;
#endif
}
SEC("struct_ops/write_sk_pacing_cong_control")
void BPF_PROG(write_sk_pacing_cong_control, struct sock *sk,
const struct rate_sample *rs)
{
struct tcp_sock *tp = tcp_sk(sk);
unsigned long rate =
((tp->snd_cwnd * tp->mss_cache * USEC_PER_SEC) << 3) /
(tp->srtt_us ?: 1U << 3);
sk->sk_pacing_rate = min(rate, sk->sk_max_pacing_rate);
tp->app_limited = (tp->delivered + tcp_packets_in_flight(tp)) ?: 1;
}
SEC("struct_ops/write_sk_pacing_ssthresh")
__u32 BPF_PROG(write_sk_pacing_ssthresh, struct sock *sk)
{
return tcp_sk(sk)->snd_ssthresh;
}
SEC("struct_ops/write_sk_pacing_undo_cwnd")
__u32 BPF_PROG(write_sk_pacing_undo_cwnd, struct sock *sk)
{
return tcp_sk(sk)->snd_cwnd;
}
SEC(".struct_ops")
struct tcp_congestion_ops write_sk_pacing = {
.init = (void *)write_sk_pacing_init,
.cong_control = (void *)write_sk_pacing_cong_control,
.ssthresh = (void *)write_sk_pacing_ssthresh,
.undo_cwnd = (void *)write_sk_pacing_undo_cwnd,
.name = "bpf_w_sk_pacing",
};
| linux-master | tools/testing/selftests/bpf/progs/tcp_ca_write_sk_pacing.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2021, Oracle and/or its affiliates. */
#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
#include <bpf/bpf_core_read.h>
char _license[] SEC("license") = "GPL";
unsigned int exception_triggered;
int test_pid;
/* TRACE_EVENT(task_newtask,
* TP_PROTO(struct task_struct *p, u64 clone_flags)
*/
SEC("tp_btf/task_newtask")
int BPF_PROG(trace_task_newtask, struct task_struct *task, u64 clone_flags)
{
int pid = bpf_get_current_pid_tgid() >> 32;
struct callback_head *work;
void *func;
if (test_pid != pid)
return 0;
/* To verify we hit an exception we dereference task->task_works->func.
* If task work has been added,
* - task->task_works is non-NULL; and
* - task->task_works->func is non-NULL also (the callback function
* must be specified for the task work.
*
* However, for a newly-created task, task->task_works is NULLed,
* so we know the exception handler triggered if task_works is
* NULL and func is NULL.
*/
work = task->task_works;
func = work->func;
/* Currently verifier will fail for `btf_ptr |= btf_ptr` * instruction.
* To workaround the issue, use barrier_var() and rewrite as below to
* prevent compiler from generating verifier-unfriendly code.
*/
barrier_var(work);
if (work)
return 0;
barrier_var(func);
if (func)
return 0;
exception_triggered++;
return 0;
}
| linux-master | tools/testing/selftests/bpf/progs/exhandler_kern.c |
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2020 Facebook
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
char _license[] SEC("license") = "GPL";
struct sample {
int pid;
int seq;
long value;
char comm[16];
};
struct ringbuf_map {
__uint(type, BPF_MAP_TYPE_RINGBUF);
/* libbpf will adjust to valid page size */
__uint(max_entries, 1000);
} ringbuf1 SEC(".maps"),
ringbuf2 SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_ARRAY_OF_MAPS);
__uint(max_entries, 4);
__type(key, int);
__array(values, struct ringbuf_map);
} ringbuf_arr SEC(".maps") = {
.values = {
[0] = &ringbuf1,
[2] = &ringbuf2,
},
};
struct {
__uint(type, BPF_MAP_TYPE_HASH_OF_MAPS);
__uint(max_entries, 1);
__type(key, int);
__array(values, struct ringbuf_map);
} ringbuf_hash SEC(".maps") = {
.values = {
[0] = &ringbuf1,
},
};
/* inputs */
int pid = 0;
int target_ring = 0;
long value = 0;
/* outputs */
long total = 0;
long dropped = 0;
long skipped = 0;
SEC("tp/syscalls/sys_enter_getpgid")
int test_ringbuf(void *ctx)
{
int cur_pid = bpf_get_current_pid_tgid() >> 32;
struct sample *sample;
void *rb;
if (cur_pid != pid)
return 0;
rb = bpf_map_lookup_elem(&ringbuf_arr, &target_ring);
if (!rb) {
skipped += 1;
return 1;
}
sample = bpf_ringbuf_reserve(rb, sizeof(*sample), 0);
if (!sample) {
dropped += 1;
return 1;
}
sample->pid = pid;
bpf_get_current_comm(sample->comm, sizeof(sample->comm));
sample->value = value;
sample->seq = total;
total += 1;
bpf_ringbuf_submit(sample, 0);
return 0;
}
| linux-master | tools/testing/selftests/bpf/progs/test_ringbuf_multi.c |
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2019 Facebook
#define STACK_MAX_LEN 100
#include "pyperf.h"
| linux-master | tools/testing/selftests/bpf/progs/pyperf100.c |
// SPDX-License-Identifier: GPL-2.0
/* Converted from tools/testing/selftests/bpf/verifier/xadd.c */
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include "bpf_misc.h"
struct {
__uint(type, BPF_MAP_TYPE_HASH);
__uint(max_entries, 1);
__type(key, long long);
__type(value, long long);
} map_hash_8b SEC(".maps");
SEC("tc")
__description("xadd/w check unaligned stack")
__failure __msg("misaligned stack access off")
__naked void xadd_w_check_unaligned_stack(void)
{
asm volatile (" \
r0 = 1; \
*(u64*)(r10 - 8) = r0; \
lock *(u32 *)(r10 - 7) += w0; \
r0 = *(u64*)(r10 - 8); \
exit; \
" ::: __clobber_all);
}
SEC("tc")
__description("xadd/w check unaligned map")
__failure __msg("misaligned value access off")
__naked void xadd_w_check_unaligned_map(void)
{
asm volatile (" \
r1 = 0; \
*(u64*)(r10 - 8) = r1; \
r2 = r10; \
r2 += -8; \
r1 = %[map_hash_8b] ll; \
call %[bpf_map_lookup_elem]; \
if r0 != 0 goto l0_%=; \
exit; \
l0_%=: r1 = 1; \
lock *(u32 *)(r0 + 3) += w1; \
r0 = *(u32*)(r0 + 3); \
exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm_addr(map_hash_8b)
: __clobber_all);
}
SEC("xdp")
__description("xadd/w check unaligned pkt")
__failure __msg("BPF_ATOMIC stores into R2 pkt is not allowed")
__flag(BPF_F_ANY_ALIGNMENT)
__naked void xadd_w_check_unaligned_pkt(void)
{
asm volatile (" \
r2 = *(u32*)(r1 + %[xdp_md_data]); \
r3 = *(u32*)(r1 + %[xdp_md_data_end]); \
r1 = r2; \
r1 += 8; \
if r1 < r3 goto l0_%=; \
r0 = 99; \
goto l1_%=; \
l0_%=: r0 = 1; \
r1 = 0; \
*(u32*)(r2 + 0) = r1; \
r1 = 0; \
*(u32*)(r2 + 3) = r1; \
lock *(u32 *)(r2 + 1) += w0; \
lock *(u32 *)(r2 + 2) += w0; \
r0 = *(u32*)(r2 + 1); \
l1_%=: exit; \
" :
: __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
__imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end))
: __clobber_all);
}
SEC("tc")
__description("xadd/w check whether src/dst got mangled, 1")
__success __retval(3)
__naked void src_dst_got_mangled_1(void)
{
asm volatile (" \
r0 = 1; \
r6 = r0; \
r7 = r10; \
*(u64*)(r10 - 8) = r0; \
lock *(u64 *)(r10 - 8) += r0; \
lock *(u64 *)(r10 - 8) += r0; \
if r6 != r0 goto l0_%=; \
if r7 != r10 goto l0_%=; \
r0 = *(u64*)(r10 - 8); \
exit; \
l0_%=: r0 = 42; \
exit; \
" ::: __clobber_all);
}
SEC("tc")
__description("xadd/w check whether src/dst got mangled, 2")
__success __retval(3)
__naked void src_dst_got_mangled_2(void)
{
asm volatile (" \
r0 = 1; \
r6 = r0; \
r7 = r10; \
*(u32*)(r10 - 8) = r0; \
lock *(u32 *)(r10 - 8) += w0; \
lock *(u32 *)(r10 - 8) += w0; \
if r6 != r0 goto l0_%=; \
if r7 != r10 goto l0_%=; \
r0 = *(u32*)(r10 - 8); \
exit; \
l0_%=: r0 = 42; \
exit; \
" ::: __clobber_all);
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/verifier_xadd.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2022 Google LLC.
*/
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_endian.h>
#include <netinet/in.h>
#include <sys/socket.h>
/* 2001:db8::1 */
#define BINDADDR_V6 { { { 0x20,0x01,0x0d,0xb8,0,0,0,0,0,0,0,0,0,0,0,1 } } }
__u32 do_bind = 0;
__u32 has_error = 0;
__u32 invocations_v4 = 0;
__u32 invocations_v6 = 0;
SEC("cgroup/connect4")
int connect_v4_prog(struct bpf_sock_addr *ctx)
{
struct sockaddr_in sa = {
.sin_family = AF_INET,
.sin_addr.s_addr = bpf_htonl(0x01010101),
};
__sync_fetch_and_add(&invocations_v4, 1);
if (do_bind && bpf_bind(ctx, (struct sockaddr *)&sa, sizeof(sa)))
has_error = 1;
return 1;
}
SEC("cgroup/connect6")
int connect_v6_prog(struct bpf_sock_addr *ctx)
{
struct sockaddr_in6 sa = {
.sin6_family = AF_INET6,
.sin6_addr = BINDADDR_V6,
};
__sync_fetch_and_add(&invocations_v6, 1);
if (do_bind && bpf_bind(ctx, (struct sockaddr *)&sa, sizeof(sa)))
has_error = 1;
return 1;
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/connect_ping.c |
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2020 Cloudflare
#include <errno.h>
#include <stdbool.h>
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
struct {
__uint(type, BPF_MAP_TYPE_SOCKMAP);
__uint(max_entries, 2);
__type(key, __u32);
__type(value, __u64);
} sock_map SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_SOCKHASH);
__uint(max_entries, 2);
__type(key, __u32);
__type(value, __u64);
} sock_hash SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
__uint(max_entries, 2);
__type(key, int);
__type(value, unsigned int);
} verdict_map SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
__uint(max_entries, 1);
__type(key, int);
__type(value, int);
} parser_map SEC(".maps");
bool test_sockmap = false; /* toggled by user-space */
bool test_ingress = false; /* toggled by user-space */
SEC("sk_skb/stream_parser")
int prog_stream_parser(struct __sk_buff *skb)
{
int *value;
__u32 key = 0;
value = bpf_map_lookup_elem(&parser_map, &key);
if (value && *value)
return *value;
return skb->len;
}
SEC("sk_skb/stream_verdict")
int prog_stream_verdict(struct __sk_buff *skb)
{
unsigned int *count;
__u32 zero = 0;
int verdict;
if (test_sockmap)
verdict = bpf_sk_redirect_map(skb, &sock_map, zero, 0);
else
verdict = bpf_sk_redirect_hash(skb, &sock_hash, &zero, 0);
count = bpf_map_lookup_elem(&verdict_map, &verdict);
if (count)
(*count)++;
return verdict;
}
SEC("sk_skb")
int prog_skb_verdict(struct __sk_buff *skb)
{
unsigned int *count;
__u32 zero = 0;
int verdict;
if (test_sockmap)
verdict = bpf_sk_redirect_map(skb, &sock_map, zero,
test_ingress ? BPF_F_INGRESS : 0);
else
verdict = bpf_sk_redirect_hash(skb, &sock_hash, &zero,
test_ingress ? BPF_F_INGRESS : 0);
count = bpf_map_lookup_elem(&verdict_map, &verdict);
if (count)
(*count)++;
return verdict;
}
SEC("sk_msg")
int prog_msg_verdict(struct sk_msg_md *msg)
{
unsigned int *count;
__u32 zero = 0;
int verdict;
if (test_sockmap)
verdict = bpf_msg_redirect_map(msg, &sock_map, zero, 0);
else
verdict = bpf_msg_redirect_hash(msg, &sock_hash, &zero, 0);
count = bpf_map_lookup_elem(&verdict_map, &verdict);
if (count)
(*count)++;
return verdict;
}
SEC("sk_reuseport")
int prog_reuseport(struct sk_reuseport_md *reuse)
{
unsigned int *count;
int err, verdict;
__u32 zero = 0;
if (test_sockmap)
err = bpf_sk_select_reuseport(reuse, &sock_map, &zero, 0);
else
err = bpf_sk_select_reuseport(reuse, &sock_hash, &zero, 0);
verdict = err ? SK_DROP : SK_PASS;
count = bpf_map_lookup_elem(&verdict_map, &verdict);
if (count)
(*count)++;
return verdict;
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/test_sockmap_listen.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
#include <vmlinux.h>
#include <bpf/bpf_tracing.h>
#include <bpf/bpf_helpers.h>
#include "cgrp_kfunc_common.h"
char _license[] SEC("license") = "GPL";
int err, pid, invocations;
/* Prototype for all of the program trace events below:
*
* TRACE_EVENT(cgroup_mkdir,
* TP_PROTO(struct cgroup *cgrp, const char *path),
* TP_ARGS(cgrp, path)
*/
static bool is_test_kfunc_task(void)
{
int cur_pid = bpf_get_current_pid_tgid() >> 32;
bool same = pid == cur_pid;
if (same)
__sync_fetch_and_add(&invocations, 1);
return same;
}
SEC("tp_btf/cgroup_mkdir")
int BPF_PROG(test_cgrp_acquire_release_argument, struct cgroup *cgrp, const char *path)
{
struct cgroup *acquired;
if (!is_test_kfunc_task())
return 0;
acquired = bpf_cgroup_acquire(cgrp);
if (!acquired)
err = 1;
else
bpf_cgroup_release(acquired);
return 0;
}
SEC("tp_btf/cgroup_mkdir")
int BPF_PROG(test_cgrp_acquire_leave_in_map, struct cgroup *cgrp, const char *path)
{
long status;
if (!is_test_kfunc_task())
return 0;
status = cgrps_kfunc_map_insert(cgrp);
if (status)
err = 1;
return 0;
}
SEC("tp_btf/cgroup_mkdir")
int BPF_PROG(test_cgrp_xchg_release, struct cgroup *cgrp, const char *path)
{
struct cgroup *kptr, *cg;
struct __cgrps_kfunc_map_value *v;
long status;
if (!is_test_kfunc_task())
return 0;
status = cgrps_kfunc_map_insert(cgrp);
if (status) {
err = 1;
return 0;
}
v = cgrps_kfunc_map_value_lookup(cgrp);
if (!v) {
err = 2;
return 0;
}
kptr = v->cgrp;
if (!kptr) {
err = 4;
return 0;
}
cg = bpf_cgroup_ancestor(kptr, 1);
if (cg) /* verifier only check */
bpf_cgroup_release(cg);
kptr = bpf_kptr_xchg(&v->cgrp, NULL);
if (!kptr) {
err = 3;
return 0;
}
bpf_cgroup_release(kptr);
return 0;
}
SEC("tp_btf/cgroup_mkdir")
int BPF_PROG(test_cgrp_get_release, struct cgroup *cgrp, const char *path)
{
struct cgroup *kptr;
struct __cgrps_kfunc_map_value *v;
long status;
if (!is_test_kfunc_task())
return 0;
status = cgrps_kfunc_map_insert(cgrp);
if (status) {
err = 1;
return 0;
}
v = cgrps_kfunc_map_value_lookup(cgrp);
if (!v) {
err = 2;
return 0;
}
bpf_rcu_read_lock();
kptr = v->cgrp;
if (!kptr)
err = 3;
bpf_rcu_read_unlock();
return 0;
}
SEC("tp_btf/cgroup_mkdir")
int BPF_PROG(test_cgrp_get_ancestors, struct cgroup *cgrp, const char *path)
{
struct cgroup *self, *ancestor1, *invalid;
if (!is_test_kfunc_task())
return 0;
self = bpf_cgroup_ancestor(cgrp, cgrp->level);
if (!self) {
err = 1;
return 0;
}
if (self->self.id != cgrp->self.id) {
bpf_cgroup_release(self);
err = 2;
return 0;
}
bpf_cgroup_release(self);
ancestor1 = bpf_cgroup_ancestor(cgrp, cgrp->level - 1);
if (!ancestor1) {
err = 3;
return 0;
}
bpf_cgroup_release(ancestor1);
invalid = bpf_cgroup_ancestor(cgrp, 10000);
if (invalid) {
bpf_cgroup_release(invalid);
err = 4;
return 0;
}
invalid = bpf_cgroup_ancestor(cgrp, -1);
if (invalid) {
bpf_cgroup_release(invalid);
err = 5;
return 0;
}
return 0;
}
SEC("tp_btf/cgroup_mkdir")
int BPF_PROG(test_cgrp_from_id, struct cgroup *cgrp, const char *path)
{
struct cgroup *parent, *res;
u64 parent_cgid;
if (!is_test_kfunc_task())
return 0;
/* @cgrp's ID is not visible yet, let's test with the parent */
parent = bpf_cgroup_ancestor(cgrp, cgrp->level - 1);
if (!parent) {
err = 1;
return 0;
}
parent_cgid = parent->kn->id;
bpf_cgroup_release(parent);
res = bpf_cgroup_from_id(parent_cgid);
if (!res) {
err = 2;
return 0;
}
bpf_cgroup_release(res);
if (res != parent) {
err = 3;
return 0;
}
res = bpf_cgroup_from_id((u64)-1);
if (res) {
bpf_cgroup_release(res);
err = 4;
return 0;
}
return 0;
}
| linux-master | tools/testing/selftests/bpf/progs/cgrp_kfunc_success.c |
// SPDX-License-Identifier: GPL-2.0
#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
#define PT_REGS_SIZE sizeof(struct pt_regs)
/*
* The kernel struct pt_regs isn't exported in its entirety to userspace.
* Pass it as an array to task_pt_regs.c
*/
char current_regs[PT_REGS_SIZE] = {};
char ctx_regs[PT_REGS_SIZE] = {};
int uprobe_res = 0;
SEC("uprobe")
int handle_uprobe(struct pt_regs *ctx)
{
struct task_struct *current;
struct pt_regs *regs;
current = bpf_get_current_task_btf();
regs = (struct pt_regs *) bpf_task_pt_regs(current);
if (bpf_probe_read_kernel(current_regs, PT_REGS_SIZE, regs))
return 0;
if (bpf_probe_read_kernel(ctx_regs, PT_REGS_SIZE, ctx))
return 0;
/* Prove that uprobe was run */
uprobe_res = 1;
return 0;
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/test_task_pt_regs.c |
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2019 Facebook
#include <linux/bpf.h>
#include <linux/version.h>
#include <bpf/bpf_helpers.h>
__u32 sig = 0, pid = 0, status = 0, signal_thread = 0;
static __always_inline int bpf_send_signal_test(void *ctx)
{
int ret;
if (status != 0 || pid == 0)
return 0;
if ((bpf_get_current_pid_tgid() >> 32) == pid) {
if (signal_thread)
ret = bpf_send_signal_thread(sig);
else
ret = bpf_send_signal(sig);
if (ret == 0)
status = 1;
}
return 0;
}
SEC("tracepoint/syscalls/sys_enter_nanosleep")
int send_signal_tp(void *ctx)
{
return bpf_send_signal_test(ctx);
}
SEC("tracepoint/sched/sched_switch")
int send_signal_tp_sched(void *ctx)
{
return bpf_send_signal_test(ctx);
}
SEC("perf_event")
int send_signal_perf(void *ctx)
{
return bpf_send_signal_test(ctx);
}
char __license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/test_send_signal_kern.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2020 Google LLC.
*/
#include <errno.h>
#include <linux/bpf.h>
#include <linux/ip.h>
#include <linux/udp.h>
#include <bpf/bpf_helpers.h>
#include "progs/cg_storage_multi.h"
struct {
__uint(type, BPF_MAP_TYPE_CGROUP_STORAGE);
__type(key, __u64);
__type(value, struct cgroup_value);
} cgroup_storage SEC(".maps");
__u32 invocations = 0;
SEC("cgroup_skb/egress")
int egress1(struct __sk_buff *skb)
{
struct cgroup_value *ptr_cg_storage =
bpf_get_local_storage(&cgroup_storage, 0);
__sync_fetch_and_add(&ptr_cg_storage->egress_pkts, 1);
__sync_fetch_and_add(&invocations, 1);
return 1;
}
SEC("cgroup_skb/egress")
int egress2(struct __sk_buff *skb)
{
struct cgroup_value *ptr_cg_storage =
bpf_get_local_storage(&cgroup_storage, 0);
__sync_fetch_and_add(&ptr_cg_storage->egress_pkts, 1);
__sync_fetch_and_add(&invocations, 1);
return 1;
}
SEC("cgroup_skb/ingress")
int ingress(struct __sk_buff *skb)
{
struct cgroup_value *ptr_cg_storage =
bpf_get_local_storage(&cgroup_storage, 0);
__sync_fetch_and_add(&ptr_cg_storage->ingress_pkts, 1);
__sync_fetch_and_add(&invocations, 1);
return 1;
}
| linux-master | tools/testing/selftests/bpf/progs/cg_storage_multi_shared.c |
#include "core_reloc_types.h"
void f(struct core_reloc_nesting___anon_embed x) {}
| linux-master | tools/testing/selftests/bpf/progs/btf__core_reloc_nesting___anon_embed.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2023 Tencent */
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
char _license[] SEC("license") = "GPL";
__u64 test1_result = 0;
SEC("fexit/bpf_testmod_fentry_test7")
int BPF_PROG(test1, __u64 a, void *b, short c, int d, void *e, char f,
int g, int ret)
{
test1_result = a == 16 && b == (void *)17 && c == 18 && d == 19 &&
e == (void *)20 && f == 21 && g == 22 && ret == 133;
return 0;
}
__u64 test2_result = 0;
SEC("fexit/bpf_testmod_fentry_test11")
int BPF_PROG(test2, __u64 a, void *b, short c, int d, void *e, char f,
int g, unsigned int h, long i, __u64 j, unsigned long k,
int ret)
{
test2_result = a == 16 && b == (void *)17 && c == 18 && d == 19 &&
e == (void *)20 && f == 21 && g == 22 && h == 23 &&
i == 24 && j == 25 && k == 26 && ret == 231;
return 0;
}
__u64 test3_result = 0;
SEC("fexit/bpf_testmod_fentry_test11")
int BPF_PROG(test3, __u64 a, __u64 b, __u64 c, __u64 d, __u64 e, __u64 f,
__u64 g, __u64 h, __u64 i, __u64 j, __u64 k, __u64 ret)
{
test3_result = a == 16 && b == 17 && c == 18 && d == 19 &&
e == 20 && f == 21 && g == 22 && h == 23 &&
i == 24 && j == 25 && k == 26 && ret == 231;
return 0;
}
| linux-master | tools/testing/selftests/bpf/progs/fexit_many_args.c |
// SPDX-License-Identifier: GPL-2.0
#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
char _license[] SEC("license") = "GPL";
SEC("struct_ops/unsupp_cong_op_get_info")
size_t BPF_PROG(unsupp_cong_op_get_info, struct sock *sk, u32 ext, int *attr,
union tcp_cc_info *info)
{
return 0;
}
SEC(".struct_ops")
struct tcp_congestion_ops unsupp_cong_op = {
.get_info = (void *)unsupp_cong_op_get_info,
.name = "bpf_unsupp_op",
};
| linux-master | tools/testing/selftests/bpf/progs/tcp_ca_unsupp_cong_op.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
#include <stdbool.h>
char _license[] SEC("license") = "GPL";
__u64 uprobe_multi_func_1_addr = 0;
__u64 uprobe_multi_func_2_addr = 0;
__u64 uprobe_multi_func_3_addr = 0;
__u64 uprobe_multi_func_1_result = 0;
__u64 uprobe_multi_func_2_result = 0;
__u64 uprobe_multi_func_3_result = 0;
__u64 uretprobe_multi_func_1_result = 0;
__u64 uretprobe_multi_func_2_result = 0;
__u64 uretprobe_multi_func_3_result = 0;
__u64 uprobe_multi_sleep_result = 0;
int pid = 0;
int child_pid = 0;
bool test_cookie = false;
void *user_ptr = 0;
static __always_inline bool verify_sleepable_user_copy(void)
{
char data[9];
bpf_copy_from_user(data, sizeof(data), user_ptr);
return bpf_strncmp(data, sizeof(data), "test_data") == 0;
}
static void uprobe_multi_check(void *ctx, bool is_return, bool is_sleep)
{
child_pid = bpf_get_current_pid_tgid() >> 32;
if (pid && child_pid != pid)
return;
__u64 cookie = test_cookie ? bpf_get_attach_cookie(ctx) : 0;
__u64 addr = bpf_get_func_ip(ctx);
#define SET(__var, __addr, __cookie) ({ \
if (addr == __addr && \
(!test_cookie || (cookie == __cookie))) \
__var += 1; \
})
if (is_return) {
SET(uretprobe_multi_func_1_result, uprobe_multi_func_1_addr, 2);
SET(uretprobe_multi_func_2_result, uprobe_multi_func_2_addr, 3);
SET(uretprobe_multi_func_3_result, uprobe_multi_func_3_addr, 1);
} else {
SET(uprobe_multi_func_1_result, uprobe_multi_func_1_addr, 3);
SET(uprobe_multi_func_2_result, uprobe_multi_func_2_addr, 1);
SET(uprobe_multi_func_3_result, uprobe_multi_func_3_addr, 2);
}
#undef SET
if (is_sleep && verify_sleepable_user_copy())
uprobe_multi_sleep_result += 1;
}
SEC("uprobe.multi//proc/self/exe:uprobe_multi_func_*")
int uprobe(struct pt_regs *ctx)
{
uprobe_multi_check(ctx, false, false);
return 0;
}
SEC("uretprobe.multi//proc/self/exe:uprobe_multi_func_*")
int uretprobe(struct pt_regs *ctx)
{
uprobe_multi_check(ctx, true, false);
return 0;
}
SEC("uprobe.multi.s//proc/self/exe:uprobe_multi_func_*")
int uprobe_sleep(struct pt_regs *ctx)
{
uprobe_multi_check(ctx, false, true);
return 0;
}
SEC("uretprobe.multi.s//proc/self/exe:uprobe_multi_func_*")
int uretprobe_sleep(struct pt_regs *ctx)
{
uprobe_multi_check(ctx, true, true);
return 0;
}
SEC("uprobe.multi//proc/self/exe:uprobe_multi_func_*")
int uprobe_extra(struct pt_regs *ctx)
{
return 0;
}
| linux-master | tools/testing/selftests/bpf/progs/uprobe_multi.c |
#include "core_reloc_types.h"
void f1(struct core_reloc_misc___a x) {}
void f2(struct core_reloc_misc___b x) {}
void f3(struct core_reloc_misc_extensible x) {}
| linux-master | tools/testing/selftests/bpf/progs/btf__core_reloc_misc.c |
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2020 Facebook
#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
#ifndef PERF_MAX_STACK_DEPTH
#define PERF_MAX_STACK_DEPTH 127
#endif
typedef __u64 stack_trace_t[PERF_MAX_STACK_DEPTH];
struct {
__uint(type, BPF_MAP_TYPE_STACK_TRACE);
__uint(max_entries, 16384);
__type(key, __u32);
__type(value, stack_trace_t);
} stackmap SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
__uint(max_entries, 1);
__type(key, __u32);
__type(value, stack_trace_t);
} stackdata_map SEC(".maps");
long stackid_kernel = 1;
long stackid_user = 1;
long stack_kernel = 1;
long stack_user = 1;
SEC("perf_event")
int oncpu(void *ctx)
{
stack_trace_t *trace;
__u32 key = 0;
long val;
val = bpf_get_stackid(ctx, &stackmap, 0);
if (val >= 0)
stackid_kernel = 2;
val = bpf_get_stackid(ctx, &stackmap, BPF_F_USER_STACK);
if (val >= 0)
stackid_user = 2;
trace = bpf_map_lookup_elem(&stackdata_map, &key);
if (!trace)
return 0;
val = bpf_get_stack(ctx, trace, sizeof(stack_trace_t), 0);
if (val > 0)
stack_kernel = 2;
val = bpf_get_stack(ctx, trace, sizeof(stack_trace_t), BPF_F_USER_STACK);
if (val > 0)
stack_user = 2;
return 0;
}
char LICENSE[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/perf_event_stackmap.c |
// SPDX-License-Identifier: GPL-2.0-or-later
#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
#define NF_ACCEPT 1
SEC("netfilter")
int nf_link_attach_test(struct bpf_nf_ctx *ctx)
{
return NF_ACCEPT;
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/test_netfilter_link_attach.c |
// SPDX-License-Identifier: GPL-2.0-only
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
volatile const char bpf_metadata_a[] SEC(".rodata") = "bar";
volatile const int bpf_metadata_b SEC(".rodata") = 2;
SEC("cgroup_skb/egress")
int prog(struct xdp_md *ctx)
{
return bpf_metadata_b ? 1 : 0;
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/metadata_used.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020, Tessares SA. */
/* Copyright (c) 2022, SUSE. */
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include "bpf_tcp_helpers.h"
char _license[] SEC("license") = "GPL";
__u32 token = 0;
struct mptcp_storage {
__u32 invoked;
__u32 is_mptcp;
struct sock *sk;
__u32 token;
struct sock *first;
char ca_name[TCP_CA_NAME_MAX];
};
struct {
__uint(type, BPF_MAP_TYPE_SK_STORAGE);
__uint(map_flags, BPF_F_NO_PREALLOC);
__type(key, int);
__type(value, struct mptcp_storage);
} socket_storage_map SEC(".maps");
SEC("sockops")
int _sockops(struct bpf_sock_ops *ctx)
{
struct mptcp_storage *storage;
struct mptcp_sock *msk;
int op = (int)ctx->op;
struct tcp_sock *tsk;
struct bpf_sock *sk;
bool is_mptcp;
if (op != BPF_SOCK_OPS_TCP_CONNECT_CB)
return 1;
sk = ctx->sk;
if (!sk)
return 1;
tsk = bpf_skc_to_tcp_sock(sk);
if (!tsk)
return 1;
is_mptcp = bpf_core_field_exists(tsk->is_mptcp) ? tsk->is_mptcp : 0;
if (!is_mptcp) {
storage = bpf_sk_storage_get(&socket_storage_map, sk, 0,
BPF_SK_STORAGE_GET_F_CREATE);
if (!storage)
return 1;
storage->token = 0;
__builtin_memset(storage->ca_name, 0, TCP_CA_NAME_MAX);
storage->first = NULL;
} else {
msk = bpf_skc_to_mptcp_sock(sk);
if (!msk)
return 1;
storage = bpf_sk_storage_get(&socket_storage_map, msk, 0,
BPF_SK_STORAGE_GET_F_CREATE);
if (!storage)
return 1;
storage->token = msk->token;
__builtin_memcpy(storage->ca_name, msk->ca_name, TCP_CA_NAME_MAX);
storage->first = msk->first;
}
storage->invoked++;
storage->is_mptcp = is_mptcp;
storage->sk = (struct sock *)sk;
return 1;
}
SEC("fentry/mptcp_pm_new_connection")
int BPF_PROG(trace_mptcp_pm_new_connection, struct mptcp_sock *msk,
const struct sock *ssk, int server_side)
{
if (!server_side)
token = msk->token;
return 0;
}
| linux-master | tools/testing/selftests/bpf/progs/mptcp_sock.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020 Facebook */
#include "bpf_iter.h"
#include "bpf_tracing_net.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_endian.h>
char _license[] SEC("license") = "GPL";
#define IPV6_SEQ_DGRAM_HEADER \
" sl " \
"local_address " \
"remote_address " \
"st tx_queue rx_queue tr tm->when retrnsmt" \
" uid timeout inode ref pointer drops\n"
static long sock_i_ino(const struct sock *sk)
{
const struct socket *sk_socket = sk->sk_socket;
const struct inode *inode;
unsigned long ino;
if (!sk_socket)
return 0;
inode = &container_of(sk_socket, struct socket_alloc, socket)->vfs_inode;
bpf_probe_read_kernel(&ino, sizeof(ino), &inode->i_ino);
return ino;
}
SEC("iter/udp")
int dump_udp6(struct bpf_iter__udp *ctx)
{
struct seq_file *seq = ctx->meta->seq;
struct udp_sock *udp_sk = ctx->udp_sk;
const struct in6_addr *dest, *src;
struct udp6_sock *udp6_sk;
struct inet_sock *inet;
__u16 srcp, destp;
__u32 seq_num;
int rqueue;
if (udp_sk == (void *)0)
return 0;
seq_num = ctx->meta->seq_num;
if (seq_num == 0)
BPF_SEQ_PRINTF(seq, IPV6_SEQ_DGRAM_HEADER);
udp6_sk = bpf_skc_to_udp6_sock(udp_sk);
if (udp6_sk == (void *)0)
return 0;
inet = &udp_sk->inet;
srcp = bpf_ntohs(inet->inet_sport);
destp = bpf_ntohs(inet->inet_dport);
rqueue = inet->sk.sk_rmem_alloc.counter - udp_sk->forward_deficit;
dest = &inet->sk.sk_v6_daddr;
src = &inet->sk.sk_v6_rcv_saddr;
BPF_SEQ_PRINTF(seq, "%5d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X ",
ctx->bucket,
src->s6_addr32[0], src->s6_addr32[1],
src->s6_addr32[2], src->s6_addr32[3], srcp,
dest->s6_addr32[0], dest->s6_addr32[1],
dest->s6_addr32[2], dest->s6_addr32[3], destp);
BPF_SEQ_PRINTF(seq, "%02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %u\n",
inet->sk.sk_state,
inet->sk.sk_wmem_alloc.refs.counter - 1,
rqueue,
0, 0L, 0, ctx->uid, 0,
sock_i_ino(&inet->sk),
inet->sk.sk_refcnt.refs.counter, udp_sk,
inet->sk.sk_drops.counter);
return 0;
}
| linux-master | tools/testing/selftests/bpf/progs/bpf_iter_udp6.c |
// SPDX-License-Identifier: GPL-2.0-only
#include <vmlinux.h>
#include <bpf/bpf_helpers.h>
#include "bpf_misc.h"
__noinline int foo(int *p)
{
return p ? (*p = 42) : 0;
}
const volatile int i;
SEC("tc")
__failure __msg("Caller passes invalid args into func#1")
int global_func17(struct __sk_buff *skb)
{
return foo((int *)&i);
}
| linux-master | tools/testing/selftests/bpf/progs/test_global_func17.c |
// SPDX-License-Identifier: GPL-2.0
#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
char _license[] SEC("license") = "GPL";
static inline struct tcp_sock *tcp_sk(const struct sock *sk)
{
return (struct tcp_sock *)sk;
}
SEC("struct_ops/incompl_cong_ops_ssthresh")
__u32 BPF_PROG(incompl_cong_ops_ssthresh, struct sock *sk)
{
return tcp_sk(sk)->snd_ssthresh;
}
SEC("struct_ops/incompl_cong_ops_undo_cwnd")
__u32 BPF_PROG(incompl_cong_ops_undo_cwnd, struct sock *sk)
{
return tcp_sk(sk)->snd_cwnd;
}
SEC(".struct_ops")
struct tcp_congestion_ops incompl_cong_ops = {
/* Intentionally leaving out any of the required cong_avoid() and
* cong_control() here.
*/
.ssthresh = (void *)incompl_cong_ops_ssthresh,
.undo_cwnd = (void *)incompl_cong_ops_undo_cwnd,
.name = "bpf_incompl_ops",
};
| linux-master | tools/testing/selftests/bpf/progs/tcp_ca_incompl_cong_ops.c |
#include "core_reloc_types.h"
void f(struct core_reloc_type_id x) {}
| linux-master | tools/testing/selftests/bpf/progs/btf__core_reloc_type_id.c |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.